summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--docs/MIRLangRef.rst16
-rw-r--r--docs/NVPTXUsage.rst8
-rw-r--r--include/llvm/CodeGen/MachineBasicBlock.h9
-rw-r--r--lib/CodeGen/AsmPrinter/AsmPrinter.cpp3
-rw-r--r--lib/CodeGen/BranchFolding.cpp35
-rw-r--r--lib/CodeGen/BranchRelaxation.cpp19
-rw-r--r--lib/CodeGen/EarlyIfConversion.cpp24
-rw-r--r--lib/CodeGen/EdgeBundles.cpp12
-rw-r--r--lib/CodeGen/ExecutionDepsFix.cpp4
-rw-r--r--lib/CodeGen/IfConversion.cpp31
-rw-r--r--lib/CodeGen/LiveDebugVariables.cpp4
-rw-r--r--lib/CodeGen/LiveIntervalAnalysis.cpp2
-rw-r--r--lib/CodeGen/LiveRangeCalc.cpp2
-rw-r--r--lib/CodeGen/MIRParser/MILexer.cpp3
-rw-r--r--lib/CodeGen/MIRParser/MIParser.cpp2
-rw-r--r--lib/CodeGen/MIRPrinter.cpp22
-rw-r--r--lib/CodeGen/MachineBasicBlock.cpp23
-rw-r--r--lib/CodeGen/MachineBlockPlacement.cpp2
-rw-r--r--lib/CodeGen/MachineBranchProbabilityInfo.cpp2
-rw-r--r--lib/CodeGen/MachineFunction.cpp4
-rw-r--r--lib/CodeGen/MachineLICM.cpp12
-rw-r--r--lib/CodeGen/MachineOperand.cpp2
-rw-r--r--lib/CodeGen/MachineScheduler.cpp29
-rw-r--r--lib/CodeGen/MachineSink.cpp46
-rw-r--r--lib/CodeGen/MachineTraceMetrics.cpp49
-rw-r--r--lib/CodeGen/MachineVerifier.cpp47
-rw-r--r--lib/CodeGen/PHIElimination.cpp6
-rw-r--r--lib/CodeGen/PostRASchedulerList.cpp4
-rw-r--r--lib/CodeGen/ProcessImplicitDefs.cpp2
-rw-r--r--lib/CodeGen/RegAllocGreedy.cpp2
-rw-r--r--lib/CodeGen/RegisterCoalescer.cpp10
-rw-r--r--lib/CodeGen/ScheduleDAGInstrs.cpp2
-rw-r--r--lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp5
-rw-r--r--lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp5
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp65
-rw-r--r--lib/CodeGen/SlotIndexes.cpp2
-rw-r--r--lib/CodeGen/SplitKit.cpp36
-rw-r--r--lib/CodeGen/StackColoring.cpp2
-rw-r--r--lib/CodeGen/TailDuplicator.cpp23
-rw-r--r--lib/Target/AArch64/AArch64ConditionOptimizer.cpp2
-rw-r--r--lib/Target/AArch64/AArch64ConditionalCompares.cpp16
-rw-r--r--lib/Target/AArch64/AArch64RedundantCopyElimination.cpp10
-rw-r--r--lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp85
-rw-r--r--lib/Target/AMDGPU/GCNIterativeScheduler.cpp4
-rw-r--r--lib/Target/AMDGPU/GCNSchedStrategy.cpp5
-rw-r--r--lib/Target/AMDGPU/SIFixSGPRCopies.cpp25
-rw-r--r--lib/Target/AMDGPU/SIMachineScheduler.cpp10
-rw-r--r--lib/Target/AMDGPU/SIWholeQuadMode.cpp5
-rw-r--r--lib/Target/ARM/ARMConstantIslandPass.cpp32
-rw-r--r--lib/Target/ARM/ARMConstantPoolValue.cpp2
-rw-r--r--lib/Target/BPF/BPFISelDAGToDAG.cpp8
-rw-r--r--lib/Target/Hexagon/BitTracker.cpp15
-rw-r--r--lib/Target/Hexagon/HexagonBitSimplify.cpp2
-rw-r--r--lib/Target/Hexagon/HexagonConstPropagation.cpp31
-rw-r--r--lib/Target/Hexagon/HexagonEarlyIfConv.cpp32
-rw-r--r--lib/Target/Hexagon/HexagonExpandCondsets.cpp2
-rw-r--r--lib/Target/Hexagon/HexagonFrameLowering.cpp27
-rw-r--r--lib/Target/Hexagon/HexagonGenInsert.cpp2
-rw-r--r--lib/Target/Hexagon/HexagonHardwareLoops.cpp4
-rw-r--r--lib/Target/Hexagon/HexagonInstrInfo.cpp18
-rw-r--r--lib/Target/Hexagon/HexagonMachineScheduler.cpp14
-rw-r--r--lib/Target/Hexagon/HexagonOptAddrMode.cpp4
-rw-r--r--lib/Target/Hexagon/HexagonPeephole.cpp5
-rw-r--r--lib/Target/Hexagon/HexagonSplitDouble.cpp2
-rw-r--r--lib/Target/Hexagon/RDFGraph.cpp10
-rw-r--r--lib/Target/Hexagon/RDFGraph.h2
-rw-r--r--lib/Target/Hexagon/RDFLiveness.cpp4
-rw-r--r--lib/Target/MSP430/MSP430BranchSelector.cpp10
-rw-r--r--lib/Target/Mips/MipsConstantIslandPass.cpp30
-rw-r--r--lib/Target/PowerPC/PPCBranchCoalescing.cpp58
-rw-r--r--lib/Target/PowerPC/PPCCTRLoops.cpp19
-rw-r--r--lib/Target/PowerPC/PPCExpandISEL.cpp8
-rw-r--r--lib/Target/PowerPC/PPCMIPeephole.cpp7
-rw-r--r--lib/Target/PowerPC/PPCVSXSwapRemoval.cpp2
-rw-r--r--lib/Target/PowerPC/README.txt2
-rw-r--r--lib/Target/PowerPC/README_ALTIVEC.txt2
-rw-r--r--lib/Target/README.txt6
-rw-r--r--lib/Target/SystemZ/SystemZMachineScheduler.cpp8
-rw-r--r--lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp3
-rw-r--r--lib/Target/X86/README.txt14
-rw-r--r--lib/Target/X86/X86FixupBWInsts.cpp13
-rw-r--r--lib/Target/X86/X86FloatingPoint.cpp4
-rw-r--r--test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll94
-rw-r--r--test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll6
-rw-r--r--test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll4
-rw-r--r--test/CodeGen/AArch64/GlobalISel/legalize-simple.mir10
-rw-r--r--test/CodeGen/AArch64/GlobalISel/localizer-in-O0-pipeline.mir18
-rw-r--r--test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll4
-rw-r--r--test/CodeGen/AArch64/aarch64-stp-cluster.ll14
-rw-r--r--test/CodeGen/AArch64/analyze-branch.ll20
-rw-r--r--test/CodeGen/AArch64/arm64-ccmp.ll25
-rw-r--r--test/CodeGen/AArch64/arm64-fp128.ll4
-rw-r--r--test/CodeGen/AArch64/arm64-icmp-opt.ll2
-rw-r--r--test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll2
-rw-r--r--test/CodeGen/AArch64/arm64-ldp-cluster.ll28
-rw-r--r--test/CodeGen/AArch64/arm64-misched-basic-A53.ll10
-rw-r--r--test/CodeGen/AArch64/arm64-misched-basic-A57.ll4
-rw-r--r--test/CodeGen/AArch64/arm64-misched-memdep-bug.ll2
-rw-r--r--test/CodeGen/AArch64/arm64-variadic-aapcs.ll2
-rw-r--r--test/CodeGen/AArch64/bics.ll6
-rw-r--r--test/CodeGen/AArch64/branch-relax-cbz.ll2
-rw-r--r--test/CodeGen/AArch64/fast-isel-assume.ll2
-rw-r--r--test/CodeGen/AArch64/fast-isel-atomic.ll48
-rw-r--r--test/CodeGen/AArch64/fast-isel-cmp-vec.ll24
-rw-r--r--test/CodeGen/AArch64/fast-isel-cmpxchg.ll8
-rw-r--r--test/CodeGen/AArch64/fcvt-int.ll4
-rw-r--r--test/CodeGen/AArch64/local_vars.ll2
-rw-r--r--test/CodeGen/AArch64/max-jump-table.ll8
-rw-r--r--test/CodeGen/AArch64/neon-bitcast.ll122
-rw-r--r--test/CodeGen/AArch64/nest-register.ll2
-rw-r--r--test/CodeGen/AArch64/recp-fastmath.ll28
-rw-r--r--test/CodeGen/AArch64/selectcc-to-shiftand.ll18
-rw-r--r--test/CodeGen/AArch64/sibling-call.ll4
-rw-r--r--test/CodeGen/AArch64/sqrt-fastmath.ll56
-rw-r--r--test/CodeGen/AArch64/tail-call.ll2
-rw-r--r--test/CodeGen/AMDGPU/branch-relaxation.ll8
-rw-r--r--test/CodeGen/AMDGPU/callee-frame-setup.ll8
-rw-r--r--test/CodeGen/AMDGPU/cf-loop-on-constant.ll2
-rw-r--r--test/CodeGen/AMDGPU/control-flow-fastregalloc.ll6
-rw-r--r--test/CodeGen/AMDGPU/convergent-inlineasm.ll2
-rw-r--r--test/CodeGen/AMDGPU/early-if-convert.ll2
-rw-r--r--test/CodeGen/AMDGPU/else.ll2
-rw-r--r--test/CodeGen/AMDGPU/fence-amdgiz.ll2
-rw-r--r--test/CodeGen/AMDGPU/i1-copy-implicit-def.ll2
-rw-r--r--test/CodeGen/AMDGPU/invert-br-undef-vcc.mir6
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.buffer.load.ll10
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.ll2
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.sc.ll2
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.vol.ll2
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.cvt.pkrtz.ll2
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll4
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.ll4
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.vol.ll4
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.ll4
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.vol.ll4
-rw-r--r--test/CodeGen/AMDGPU/loop_break.ll4
-rw-r--r--test/CodeGen/AMDGPU/memory-legalizer-atomic-fence.ll40
-rw-r--r--test/CodeGen/AMDGPU/multilevel-break.ll2
-rw-r--r--test/CodeGen/AMDGPU/optimize-if-exec-masking.mir48
-rw-r--r--test/CodeGen/AMDGPU/ret_jump.ll2
-rw-r--r--test/CodeGen/AMDGPU/sgpr-control-flow.ll2
-rw-r--r--test/CodeGen/AMDGPU/si-lower-control-flow-unreachable-block.ll2
-rw-r--r--test/CodeGen/AMDGPU/skip-if-dead.ll48
-rw-r--r--test/CodeGen/AMDGPU/smrd.ll8
-rw-r--r--test/CodeGen/AMDGPU/uniform-cfg.ll2
-rw-r--r--test/CodeGen/AMDGPU/valu-i1.ll2
-rw-r--r--test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir12
-rw-r--r--test/CodeGen/ARM/Windows/dbzchk.ll48
-rw-r--r--test/CodeGen/ARM/and-load-combine.ll168
-rw-r--r--test/CodeGen/ARM/arm-and-tst-peephole.ll10
-rw-r--r--test/CodeGen/ARM/atomic-ops-v8.ll8
-rw-r--r--test/CodeGen/ARM/bool-ext-inc.ll8
-rw-r--r--test/CodeGen/ARM/cmpxchg-weak.ll10
-rw-r--r--test/CodeGen/ARM/cortex-a57-misched-alu.ll2
-rw-r--r--test/CodeGen/ARM/cortex-a57-misched-basic.ll4
-rw-r--r--test/CodeGen/ARM/cortex-a57-misched-vadd.ll4
-rw-r--r--test/CodeGen/ARM/cortex-a57-misched-vfma.ll12
-rw-r--r--test/CodeGen/ARM/cortex-a57-misched-vsub.ll4
-rw-r--r--test/CodeGen/ARM/cortexr52-misched-basic.ll4
-rw-r--r--test/CodeGen/ARM/crash-on-pow2-shufflevector.ll2
-rw-r--r--test/CodeGen/ARM/deprecated-asm.s2
-rw-r--r--test/CodeGen/ARM/ifcvt-branch-weight-bug.ll4
-rw-r--r--test/CodeGen/ARM/ifcvt-branch-weight.ll4
-rw-r--r--test/CodeGen/ARM/ifcvt-iter-indbr.ll8
-rw-r--r--test/CodeGen/ARM/illegal-bitfield-loadstore.ll24
-rw-r--r--test/CodeGen/ARM/jump-table-tbh.ll4
-rw-r--r--test/CodeGen/ARM/machine-licm.ll2
-rw-r--r--test/CodeGen/ARM/misched-copy-arm.ll4
-rw-r--r--test/CodeGen/ARM/negate-i1.ll4
-rw-r--r--test/CodeGen/ARM/neon_vabs.ll26
-rw-r--r--test/CodeGen/ARM/nest-register.ll2
-rw-r--r--test/CodeGen/ARM/noopt-dmb-v7.ll2
-rw-r--r--test/CodeGen/ARM/select_const.ll52
-rw-r--r--test/CodeGen/ARM/setcc-logic.ll8
-rw-r--r--test/CodeGen/ARM/tail-merge-branch-weight.ll6
-rw-r--r--test/CodeGen/ARM/taildup-branch-weight.ll4
-rw-r--r--test/CodeGen/ARM/v8m.base-jumptable_alignment.ll2
-rw-r--r--test/CodeGen/ARM/vbits.ll118
-rw-r--r--test/CodeGen/ARM/vcvt.ll56
-rw-r--r--test/CodeGen/ARM/vext.ll42
-rw-r--r--test/CodeGen/ARM/vpadd.ll62
-rw-r--r--test/CodeGen/ARM/vtrn.ll44
-rw-r--r--test/CodeGen/ARM/vuzp.ll54
-rw-r--r--test/CodeGen/ARM/vzip.ll46
-rw-r--r--test/CodeGen/AVR/atomics/fence.ll2
-rw-r--r--test/CodeGen/AVR/select-must-add-unconditional-jump.ll24
-rw-r--r--test/CodeGen/Generic/MachineBranchProb.ll20
-rw-r--r--test/CodeGen/Hexagon/branch-folder-hoist-kills.mir2
-rw-r--r--test/CodeGen/Hexagon/hwloop-redef-imm.mir4
-rw-r--r--test/CodeGen/Hexagon/ifcvt-edge-weight.ll4
-rw-r--r--test/CodeGen/MIR/X86/frame-info-save-restore-points.mir18
-rw-r--r--test/CodeGen/MIR/X86/implicit-register-flag.mir6
-rw-r--r--test/CodeGen/MIR/X86/jump-table-info.mir18
-rw-r--r--test/CodeGen/MIR/X86/machine-basic-block-operands.mir6
-rw-r--r--test/CodeGen/MIR/X86/newline-handling.mir16
-rw-r--r--test/CodeGen/MIR/X86/successor-basic-blocks-weights.mir6
-rw-r--r--test/CodeGen/MSP430/BranchSelector.ll2
-rw-r--r--test/CodeGen/Mips/compactbranches/empty-block.mir6
-rw-r--r--test/CodeGen/Mips/lcb4a.ll2
-rw-r--r--test/CodeGen/Mips/prevent-hoisting.ll2
-rw-r--r--test/CodeGen/PowerPC/2006-07-07-ComputeMaskedBits.ll2
-rw-r--r--test/CodeGen/PowerPC/addegluecrash.ll2
-rw-r--r--test/CodeGen/PowerPC/andc.ll8
-rw-r--r--test/CodeGen/PowerPC/atomics-constant.ll2
-rw-r--r--test/CodeGen/PowerPC/atomics-regression.ll2276
-rw-r--r--test/CodeGen/PowerPC/branch_coalesce.ll4
-rw-r--r--test/CodeGen/PowerPC/fabs.ll4
-rw-r--r--test/CodeGen/PowerPC/fma-aggr-FMF.ll4
-rw-r--r--test/CodeGen/PowerPC/fp64-to-int16.ll2
-rw-r--r--test/CodeGen/PowerPC/hello-reloc.s2
-rw-r--r--test/CodeGen/PowerPC/licm-remat.ll2
-rw-r--r--test/CodeGen/PowerPC/licm-tocReg.ll2
-rw-r--r--test/CodeGen/PowerPC/logic-ops-on-compares.ll12
-rw-r--r--test/CodeGen/PowerPC/machine-combiner.ll18
-rw-r--r--test/CodeGen/PowerPC/memCmpUsedInZeroEqualityComparison.ll28
-rw-r--r--test/CodeGen/PowerPC/memcmp.ll8
-rw-r--r--test/CodeGen/PowerPC/negate-i1.ll4
-rw-r--r--test/CodeGen/PowerPC/ppc32-nest.ll2
-rw-r--r--test/CodeGen/PowerPC/ppc64-nest.ll2
-rw-r--r--test/CodeGen/PowerPC/pr32140.ll6
-rw-r--r--test/CodeGen/PowerPC/pr33093.ll4
-rw-r--r--test/CodeGen/PowerPC/select-addrRegRegOnly.ll4
-rw-r--r--test/CodeGen/PowerPC/select_const.ll150
-rw-r--r--test/CodeGen/PowerPC/setcc-logic.ll72
-rw-r--r--test/CodeGen/PowerPC/setcc-to-sub.ll8
-rw-r--r--test/CodeGen/PowerPC/shift_mask.ll48
-rw-r--r--test/CodeGen/PowerPC/sjlj.ll2
-rw-r--r--test/CodeGen/PowerPC/tail-dup-branch-to-fallthrough.ll8
-rw-r--r--test/CodeGen/PowerPC/tail-dup-layout.ll20
-rw-r--r--test/CodeGen/PowerPC/testBitReverse.ll4
-rw-r--r--test/CodeGen/PowerPC/testComparesi32gtu.ll4
-rw-r--r--test/CodeGen/PowerPC/testComparesi32leu.ll2
-rw-r--r--test/CodeGen/PowerPC/testComparesi32ltu.ll4
-rw-r--r--test/CodeGen/PowerPC/testComparesieqsc.ll16
-rw-r--r--test/CodeGen/PowerPC/testComparesieqsi.ll16
-rw-r--r--test/CodeGen/PowerPC/testComparesieqsll.ll16
-rw-r--r--test/CodeGen/PowerPC/testComparesieqss.ll16
-rw-r--r--test/CodeGen/PowerPC/testComparesiequc.ll16
-rw-r--r--test/CodeGen/PowerPC/testComparesiequi.ll16
-rw-r--r--test/CodeGen/PowerPC/testComparesiequll.ll16
-rw-r--r--test/CodeGen/PowerPC/testComparesiequs.ll16
-rw-r--r--test/CodeGen/PowerPC/testComparesigesc.ll8
-rw-r--r--test/CodeGen/PowerPC/testComparesigesi.ll8
-rw-r--r--test/CodeGen/PowerPC/testComparesigesll.ll16
-rw-r--r--test/CodeGen/PowerPC/testComparesigess.ll8
-rw-r--r--test/CodeGen/PowerPC/testComparesigtsc.ll12
-rw-r--r--test/CodeGen/PowerPC/testComparesigtsi.ll12
-rw-r--r--test/CodeGen/PowerPC/testComparesigtsll.ll12
-rw-r--r--test/CodeGen/PowerPC/testComparesigtss.ll14
-rw-r--r--test/CodeGen/PowerPC/testComparesilesc.ll8
-rw-r--r--test/CodeGen/PowerPC/testComparesilesi.ll8
-rw-r--r--test/CodeGen/PowerPC/testComparesilesll.ll16
-rw-r--r--test/CodeGen/PowerPC/testComparesiless.ll8
-rw-r--r--test/CodeGen/PowerPC/testComparesiltsc.ll8
-rw-r--r--test/CodeGen/PowerPC/testComparesiltsi.ll10
-rw-r--r--test/CodeGen/PowerPC/testComparesiltsll.ll10
-rw-r--r--test/CodeGen/PowerPC/testComparesiltss.ll8
-rw-r--r--test/CodeGen/PowerPC/testComparesinesll.ll16
-rw-r--r--test/CodeGen/PowerPC/testComparesineuc.ll16
-rw-r--r--test/CodeGen/PowerPC/testComparesineull.ll16
-rw-r--r--test/CodeGen/PowerPC/testComparesineus.ll16
-rw-r--r--test/CodeGen/PowerPC/testCompareslleqsc.ll16
-rw-r--r--test/CodeGen/PowerPC/testCompareslleqsi.ll16
-rw-r--r--test/CodeGen/PowerPC/testCompareslleqsll.ll16
-rw-r--r--test/CodeGen/PowerPC/testCompareslleqss.ll16
-rw-r--r--test/CodeGen/PowerPC/testComparesllequc.ll16
-rw-r--r--test/CodeGen/PowerPC/testComparesllequi.ll16
-rw-r--r--test/CodeGen/PowerPC/testComparesllequll.ll16
-rw-r--r--test/CodeGen/PowerPC/testComparesllequs.ll16
-rw-r--r--test/CodeGen/PowerPC/testComparesllgesc.ll8
-rw-r--r--test/CodeGen/PowerPC/testComparesllgesi.ll8
-rw-r--r--test/CodeGen/PowerPC/testComparesllgesll.ll16
-rw-r--r--test/CodeGen/PowerPC/testComparesllgess.ll8
-rw-r--r--test/CodeGen/PowerPC/testComparesllgtsll.ll12
-rw-r--r--test/CodeGen/PowerPC/testComparesllgtus.ll16
-rw-r--r--test/CodeGen/PowerPC/testCompareslllesc.ll8
-rw-r--r--test/CodeGen/PowerPC/testCompareslllesi.ll8
-rw-r--r--test/CodeGen/PowerPC/testCompareslllesll.ll16
-rw-r--r--test/CodeGen/PowerPC/testComparesllless.ll8
-rw-r--r--test/CodeGen/PowerPC/testComparesllltsll.ll10
-rw-r--r--test/CodeGen/PowerPC/testComparesllltuc.ll8
-rw-r--r--test/CodeGen/PowerPC/testComparesllltui.ll16
-rw-r--r--test/CodeGen/PowerPC/testComparesllltus.ll6
-rw-r--r--test/CodeGen/PowerPC/testComparesllnesll.ll16
-rw-r--r--test/CodeGen/PowerPC/testComparesllneull.ll16
-rw-r--r--test/CodeGen/PowerPC/vec_add_sub_quadword.ll2
-rw-r--r--test/CodeGen/PowerPC/vec_extract_p9.ll36
-rw-r--r--test/CodeGen/PowerPC/vec_extract_p9_2.ll48
-rw-r--r--test/CodeGen/PowerPC/vec_int_ext.ll48
-rw-r--r--test/CodeGen/PowerPC/vec_revb.ll8
-rw-r--r--test/CodeGen/PowerPC/vselect-constants.ll28
-rw-r--r--test/CodeGen/RISCV/addc-adde-sube-subc.ll4
-rw-r--r--test/CodeGen/RISCV/alu32.ll38
-rw-r--r--test/CodeGen/RISCV/bare-select.ll4
-rw-r--r--test/CodeGen/RISCV/blockaddress.ll2
-rw-r--r--test/CodeGen/RISCV/branch.ll2
-rw-r--r--test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll30
-rw-r--r--test/CodeGen/RISCV/calls.ll12
-rw-r--r--test/CodeGen/RISCV/div.ll20
-rw-r--r--test/CodeGen/RISCV/i32-icmp.ll20
-rw-r--r--test/CodeGen/RISCV/imm.ll10
-rw-r--r--test/CodeGen/RISCV/indirectbr.ll4
-rw-r--r--test/CodeGen/RISCV/jumptable.ll2
-rw-r--r--test/CodeGen/RISCV/mem.ll24
-rw-r--r--test/CodeGen/RISCV/mul.ll12
-rw-r--r--test/CodeGen/RISCV/rem.ll4
-rw-r--r--test/CodeGen/RISCV/rotl-rotr.ll4
-rw-r--r--test/CodeGen/RISCV/select-cc.ll22
-rw-r--r--test/CodeGen/RISCV/sext-zext-trunc.ll60
-rw-r--r--test/CodeGen/RISCV/shifts.ll6
-rw-r--r--test/CodeGen/RISCV/wide-mem.ll4
-rw-r--r--test/CodeGen/SPARC/analyze-branch.ll4
-rw-r--r--test/CodeGen/SPARC/vector-extract-elt.ll2
-rw-r--r--test/CodeGen/SystemZ/DAGCombiner_isAlias.ll2
-rw-r--r--test/CodeGen/SystemZ/dag-combine-02.ll2
-rw-r--r--test/CodeGen/SystemZ/int-cmp-51.ll2
-rw-r--r--test/CodeGen/SystemZ/pr32372.ll2
-rw-r--r--test/CodeGen/SystemZ/pr32505.ll2
-rw-r--r--test/CodeGen/SystemZ/strcmp-01.ll6
-rw-r--r--test/CodeGen/SystemZ/strlen-01.ll4
-rw-r--r--test/CodeGen/SystemZ/vec-cmp-cmp-logic-select.ll84
-rw-r--r--test/CodeGen/SystemZ/vec-cmpsel.ll78
-rw-r--r--test/CodeGen/SystemZ/vec-trunc-to-i1.ll4
-rw-r--r--test/CodeGen/Thumb2/ifcvt-rescan-bug-2016-08-22.ll2
-rw-r--r--test/CodeGen/WebAssembly/dbgvalue.ll4
-rw-r--r--test/CodeGen/WebAssembly/signext-arg.ll2
-rw-r--r--test/CodeGen/X86/2006-01-19-ISelFoldingBug.ll2
-rw-r--r--test/CodeGen/X86/2006-03-01-InstrSchedBug.ll2
-rw-r--r--test/CodeGen/X86/2008-02-14-BitMiscompile.ll2
-rw-r--r--test/CodeGen/X86/2009-04-12-FastIselOverflowCrash.ll2
-rw-r--r--test/CodeGen/X86/2010-05-12-FastAllocKills.ll16
-rw-r--r--test/CodeGen/X86/2010-08-04-MaskedSignedCompare.ll4
-rw-r--r--test/CodeGen/X86/2011-10-19-widen_vselect.ll16
-rw-r--r--test/CodeGen/X86/2011-10-21-widen-cmp.ll6
-rw-r--r--test/CodeGen/X86/2011-12-26-extractelement-duplicate-load.ll4
-rw-r--r--test/CodeGen/X86/2011-12-8-bitcastintprom.ll4
-rw-r--r--test/CodeGen/X86/2011-20-21-zext-ui2fp.ll2
-rw-r--r--test/CodeGen/X86/2012-01-11-split-cv.ll2
-rw-r--r--test/CodeGen/X86/2012-01-12-extract-sv.ll2
-rw-r--r--test/CodeGen/X86/2012-04-26-sdglue.ll2
-rw-r--r--test/CodeGen/X86/2012-07-10-extload64.ll6
-rw-r--r--test/CodeGen/X86/2012-08-16-setcc.ll8
-rw-r--r--test/CodeGen/X86/2012-1-10-buildvector.ll4
-rw-r--r--test/CodeGen/X86/2012-12-1-merge-multiple.ll2
-rw-r--r--test/CodeGen/X86/3dnow-schedule.ll54
-rw-r--r--test/CodeGen/X86/GlobalISel/GV.ll16
-rw-r--r--test/CodeGen/X86/GlobalISel/add-scalar.ll20
-rw-r--r--test/CodeGen/X86/GlobalISel/add-vec.ll56
-rw-r--r--test/CodeGen/X86/GlobalISel/and-scalar.ll10
-rw-r--r--test/CodeGen/X86/GlobalISel/binop.ll44
-rw-r--r--test/CodeGen/X86/GlobalISel/br.ll2
-rw-r--r--test/CodeGen/X86/GlobalISel/brcond.ll16
-rw-r--r--test/CodeGen/X86/GlobalISel/callingconv.ll72
-rw-r--r--test/CodeGen/X86/GlobalISel/cmp.ll26
-rw-r--r--test/CodeGen/X86/GlobalISel/constant.ll14
-rw-r--r--test/CodeGen/X86/GlobalISel/ext-x86-64.ll6
-rw-r--r--test/CodeGen/X86/GlobalISel/ext.ll28
-rw-r--r--test/CodeGen/X86/GlobalISel/fadd-scalar.ll4
-rw-r--r--test/CodeGen/X86/GlobalISel/fconstant.ll6
-rw-r--r--test/CodeGen/X86/GlobalISel/fdiv-scalar.ll4
-rw-r--r--test/CodeGen/X86/GlobalISel/fmul-scalar.ll4
-rw-r--r--test/CodeGen/X86/GlobalISel/fpext-scalar.ll2
-rw-r--r--test/CodeGen/X86/GlobalISel/frameIndex.ll6
-rw-r--r--test/CodeGen/X86/GlobalISel/fsub-scalar.ll4
-rw-r--r--test/CodeGen/X86/GlobalISel/gep.ll32
-rw-r--r--test/CodeGen/X86/GlobalISel/legalize-brcond.mir14
-rw-r--r--test/CodeGen/X86/GlobalISel/legalize-phi.mir210
-rw-r--r--test/CodeGen/X86/GlobalISel/memop-scalar-x32.ll20
-rw-r--r--test/CodeGen/X86/GlobalISel/memop-scalar.ll40
-rw-r--r--test/CodeGen/X86/GlobalISel/memop-vec.ll24
-rw-r--r--test/CodeGen/X86/GlobalISel/mul-scalar.ll6
-rw-r--r--test/CodeGen/X86/GlobalISel/mul-vec.ll18
-rw-r--r--test/CodeGen/X86/GlobalISel/or-scalar.ll10
-rw-r--r--test/CodeGen/X86/GlobalISel/phi.ll24
-rw-r--r--test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir32
-rw-r--r--test/CodeGen/X86/GlobalISel/select-br.mir12
-rw-r--r--test/CodeGen/X86/GlobalISel/select-brcond.mir14
-rw-r--r--test/CodeGen/X86/GlobalISel/select-phi.mir96
-rw-r--r--test/CodeGen/X86/GlobalISel/sub-scalar.ll10
-rw-r--r--test/CodeGen/X86/GlobalISel/sub-vec.ll24
-rw-r--r--test/CodeGen/X86/GlobalISel/trunc.ll12
-rw-r--r--test/CodeGen/X86/GlobalISel/undef.ll8
-rw-r--r--test/CodeGen/X86/GlobalISel/xor-scalar.ll10
-rw-r--r--test/CodeGen/X86/MachineBranchProb.ll8
-rw-r--r--test/CodeGen/X86/MergeConsecutiveStores.ll72
-rw-r--r--test/CodeGen/X86/SwizzleShuff.ll10
-rw-r--r--test/CodeGen/X86/TruncAssertSext.ll2
-rw-r--r--test/CodeGen/X86/TruncAssertZext.ll4
-rw-r--r--test/CodeGen/X86/WidenArith.ll4
-rw-r--r--test/CodeGen/X86/add-ext.ll22
-rw-r--r--test/CodeGen/X86/add-of-carry.ll4
-rw-r--r--test/CodeGen/X86/add-sub-nsw-nuw.ll2
-rw-r--r--test/CodeGen/X86/add.ll90
-rw-r--r--test/CodeGen/X86/addcarry.ll18
-rw-r--r--test/CodeGen/X86/adx-intrinsics.ll26
-rw-r--r--test/CodeGen/X86/aes-schedule.ll96
-rw-r--r--test/CodeGen/X86/aes_intrinsics.ll24
-rw-r--r--test/CodeGen/X86/all-ones-vector.ll248
-rw-r--r--test/CodeGen/X86/and-sink.ll30
-rw-r--r--test/CodeGen/X86/anyext.ll8
-rw-r--r--test/CodeGen/X86/atom-fixup-lea2.ll2
-rw-r--r--test/CodeGen/X86/atomic-eflags-reuse.ll56
-rw-r--r--test/CodeGen/X86/atomic-minmax-i6432.ll40
-rw-r--r--test/CodeGen/X86/atomic128.ll50
-rw-r--r--test/CodeGen/X86/avg-mask.ll48
-rw-r--r--test/CodeGen/X86/avg.ll190
-rw-r--r--test/CodeGen/X86/avx-arith.ll62
-rw-r--r--test/CodeGen/X86/avx-basic.ll20
-rw-r--r--test/CodeGen/X86/avx-bitcast.ll2
-rw-r--r--test/CodeGen/X86/avx-cast.ll14
-rw-r--r--test/CodeGen/X86/avx-cmp.ll32
-rw-r--r--test/CodeGen/X86/avx-cvt-2.ll8
-rw-r--r--test/CodeGen/X86/avx-cvt-3.ll24
-rw-r--r--test/CodeGen/X86/avx-cvt.ll32
-rw-r--r--test/CodeGen/X86/avx-gfni-intrinsics.ll12
-rw-r--r--test/CodeGen/X86/avx-insertelt.ll16
-rw-r--r--test/CodeGen/X86/avx-intrinsics-fast-isel.ll760
-rw-r--r--test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll110
-rw-r--r--test/CodeGen/X86/avx-intrinsics-x86.ll210
-rw-r--r--test/CodeGen/X86/avx-intrinsics-x86_64.ll8
-rw-r--r--test/CodeGen/X86/avx-load-store.ll52
-rw-r--r--test/CodeGen/X86/avx-logic.ll44
-rw-r--r--test/CodeGen/X86/avx-schedule.ll1440
-rw-r--r--test/CodeGen/X86/avx-select.ll16
-rw-r--r--test/CodeGen/X86/avx-shift.ll32
-rw-r--r--test/CodeGen/X86/avx-shuffle-x86_32.ll4
-rw-r--r--test/CodeGen/X86/avx-splat.ll26
-rw-r--r--test/CodeGen/X86/avx-trunc.ll6
-rw-r--r--test/CodeGen/X86/avx-unpack.ll40
-rw-r--r--test/CodeGen/X86/avx-vbroadcast.ll164
-rw-r--r--test/CodeGen/X86/avx-vbroadcastf128.ll52
-rw-r--r--test/CodeGen/X86/avx-vextractf128.ll20
-rw-r--r--test/CodeGen/X86/avx-vinsertf128.ll20
-rw-r--r--test/CodeGen/X86/avx-vpclmulqdq.ll2
-rw-r--r--test/CodeGen/X86/avx-vperm2x128.ll144
-rw-r--r--test/CodeGen/X86/avx-vzeroupper.ll34
-rw-r--r--test/CodeGen/X86/avx1-logical-load-folding.ll16
-rw-r--r--test/CodeGen/X86/avx2-arith.ll96
-rw-r--r--test/CodeGen/X86/avx2-cmp.ll32
-rw-r--r--test/CodeGen/X86/avx2-conversions.ll60
-rw-r--r--test/CodeGen/X86/avx2-fma-fneg-combine.ll24
-rw-r--r--test/CodeGen/X86/avx2-gather.ll24
-rw-r--r--test/CodeGen/X86/avx2-intrinsics-fast-isel.ll470
-rw-r--r--test/CodeGen/X86/avx2-intrinsics-x86-upgrade.ll110
-rw-r--r--test/CodeGen/X86/avx2-intrinsics-x86.ll430
-rw-r--r--test/CodeGen/X86/avx2-logic.ll28
-rw-r--r--test/CodeGen/X86/avx2-masked-gather.ll160
-rw-r--r--test/CodeGen/X86/avx2-nontemporal.ll4
-rw-r--r--test/CodeGen/X86/avx2-phaddsub.ll32
-rw-r--r--test/CodeGen/X86/avx2-pmovxrm.ll48
-rw-r--r--test/CodeGen/X86/avx2-schedule.ll1908
-rw-r--r--test/CodeGen/X86/avx2-shift.ll148
-rw-r--r--test/CodeGen/X86/avx2-vbroadcast.ll270
-rw-r--r--test/CodeGen/X86/avx2-vbroadcasti128.ll52
-rw-r--r--test/CodeGen/X86/avx2-vector-shifts.ll140
-rw-r--r--test/CodeGen/X86/avx2-vperm.ll16
-rw-r--r--test/CodeGen/X86/avx512-adc-sbb.ll2
-rw-r--r--test/CodeGen/X86/avx512-any_extend_load.ll12
-rw-r--r--test/CodeGen/X86/avx512-arith.ll224
-rw-r--r--test/CodeGen/X86/avx512-bugfix-23634.ll2
-rw-r--r--test/CodeGen/X86/avx512-bugfix-25270.ll2
-rw-r--r--test/CodeGen/X86/avx512-bugfix-26264.ll4
-rw-r--r--test/CodeGen/X86/avx512-build-vector.ll4
-rw-r--r--test/CodeGen/X86/avx512-calling-conv.ll70
-rw-r--r--test/CodeGen/X86/avx512-cmp-kor-sequence.ll2
-rw-r--r--test/CodeGen/X86/avx512-cmp.ll28
-rw-r--r--test/CodeGen/X86/avx512-cvt.ll348
-rw-r--r--test/CodeGen/X86/avx512-ext.ll336
-rw-r--r--test/CodeGen/X86/avx512-extract-subvector-load-store.ll212
-rw-r--r--test/CodeGen/X86/avx512-extract-subvector.ll138
-rw-r--r--test/CodeGen/X86/avx512-fma-commute.ll16
-rw-r--r--test/CodeGen/X86/avx512-fma-intrinsics.ll128
-rw-r--r--test/CodeGen/X86/avx512-fma.ll34
-rw-r--r--test/CodeGen/X86/avx512-fsel.ll2
-rw-r--r--test/CodeGen/X86/avx512-gather-scatter-intrin.ll104
-rw-r--r--test/CodeGen/X86/avx512-gfni-intrinsics.ll18
-rw-r--r--test/CodeGen/X86/avx512-hadd-hsub.ll48
-rw-r--r--test/CodeGen/X86/avx512-i1test.ll8
-rw-r--r--test/CodeGen/X86/avx512-insert-extract.ll216
-rw-r--r--test/CodeGen/X86/avx512-insert-extract_i1.ll2
-rw-r--r--test/CodeGen/X86/avx512-intrinsics-fast-isel.ll364
-rw-r--r--test/CodeGen/X86/avx512-intrinsics-upgrade.ll504
-rw-r--r--test/CodeGen/X86/avx512-intrinsics.ll748
-rw-r--r--test/CodeGen/X86/avx512-load-store.ll40
-rw-r--r--test/CodeGen/X86/avx512-logic.ll160
-rw-r--r--test/CodeGen/X86/avx512-mask-op.ll472
-rw-r--r--test/CodeGen/X86/avx512-mask-spills.ll10
-rwxr-xr-xtest/CodeGen/X86/avx512-mask-zext-bugfix.ll2
-rw-r--r--test/CodeGen/X86/avx512-masked-memop-64-32.ll40
-rw-r--r--test/CodeGen/X86/avx512-masked_memop-16-8.ll24
-rw-r--r--test/CodeGen/X86/avx512-memfold.ll10
-rw-r--r--test/CodeGen/X86/avx512-mov.ll94
-rw-r--r--test/CodeGen/X86/avx512-pmovxrm.ll48
-rw-r--r--test/CodeGen/X86/avx512-regcall-Mask.ll88
-rw-r--r--test/CodeGen/X86/avx512-regcall-NoMask.ll174
-rw-r--r--test/CodeGen/X86/avx512-rotate.ll40
-rw-r--r--test/CodeGen/X86/avx512-scalarIntrinsics.ll28
-rw-r--r--test/CodeGen/X86/avx512-scalar_mask.ll20
-rwxr-xr-xtest/CodeGen/X86/avx512-schedule.ll1882
-rw-r--r--test/CodeGen/X86/avx512-select.ll64
-rw-r--r--test/CodeGen/X86/avx512-shift.ll36
-rwxr-xr-xtest/CodeGen/X86/avx512-shuffle-schedule.ll3400
-rw-r--r--test/CodeGen/X86/avx512-shuffles/broadcast-scalar-fp.ll180
-rw-r--r--test/CodeGen/X86/avx512-shuffles/broadcast-scalar-int.ll416
-rw-r--r--test/CodeGen/X86/avx512-shuffles/broadcast-vector-fp.ll180
-rw-r--r--test/CodeGen/X86/avx512-shuffles/broadcast-vector-int.ll216
-rw-r--r--test/CodeGen/X86/avx512-shuffles/duplicate-high.ll132
-rw-r--r--test/CodeGen/X86/avx512-shuffles/duplicate-low.ll240
-rw-r--r--test/CodeGen/X86/avx512-shuffles/in_lane_permute.ll292
-rw-r--r--test/CodeGen/X86/avx512-shuffles/partial_permute.ll592
-rw-r--r--test/CodeGen/X86/avx512-shuffles/permute.ll472
-rw-r--r--test/CodeGen/X86/avx512-shuffles/shuffle-interleave.ll220
-rw-r--r--test/CodeGen/X86/avx512-shuffles/shuffle-vec.ll320
-rw-r--r--test/CodeGen/X86/avx512-shuffles/shuffle.ll468
-rw-r--r--test/CodeGen/X86/avx512-shuffles/unpack.ll440
-rw-r--r--test/CodeGen/X86/avx512-skx-insert-subvec.ll24
-rw-r--r--test/CodeGen/X86/avx512-trunc.ll154
-rw-r--r--test/CodeGen/X86/avx512-unsafe-fp-math.ll24
-rw-r--r--test/CodeGen/X86/avx512-vbroadcast.ll66
-rw-r--r--test/CodeGen/X86/avx512-vbroadcasti128.ll38
-rw-r--r--test/CodeGen/X86/avx512-vbroadcasti256.ll20
-rw-r--r--test/CodeGen/X86/avx512-vec-cmp.ll140
-rw-r--r--test/CodeGen/X86/avx512-vec3-crash.ll2
-rw-r--r--test/CodeGen/X86/avx512-vpclmulqdq.ll2
-rw-r--r--test/CodeGen/X86/avx512-vpermv3-commute.ll58
-rw-r--r--test/CodeGen/X86/avx512-vpternlog-commute.ll176
-rw-r--r--test/CodeGen/X86/avx512-vselect-crash.ll2
-rw-r--r--test/CodeGen/X86/avx512-vselect.ll4
-rw-r--r--test/CodeGen/X86/avx512bw-intrinsics-fast-isel.ll128
-rw-r--r--test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll360
-rw-r--r--test/CodeGen/X86/avx512bw-intrinsics.ll400
-rw-r--r--test/CodeGen/X86/avx512bw-mask-op.ll28
-rw-r--r--test/CodeGen/X86/avx512bw-mov.ll32
-rw-r--r--test/CodeGen/X86/avx512bw-vec-cmp.ll24
-rw-r--r--test/CodeGen/X86/avx512bw-vec-test-testn.ll16
-rw-r--r--test/CodeGen/X86/avx512bwvl-intrinsics-fast-isel.ll144
-rw-r--r--test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll468
-rw-r--r--test/CodeGen/X86/avx512bwvl-intrinsics.ll396
-rw-r--r--test/CodeGen/X86/avx512bwvl-mov.ll32
-rw-r--r--test/CodeGen/X86/avx512bwvl-vec-cmp.ll48
-rw-r--r--test/CodeGen/X86/avx512bwvl-vec-test-testn.ll32
-rw-r--r--test/CodeGen/X86/avx512cd-intrinsics-fast-isel.ll4
-rw-r--r--test/CodeGen/X86/avx512cd-intrinsics-upgrade.ll12
-rw-r--r--test/CodeGen/X86/avx512cd-intrinsics.ll14
-rw-r--r--test/CodeGen/X86/avx512cdvl-intrinsics-upgrade.ll16
-rw-r--r--test/CodeGen/X86/avx512cdvl-intrinsics.ll16
-rw-r--r--test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll36
-rw-r--r--test/CodeGen/X86/avx512dq-intrinsics.ll58
-rw-r--r--test/CodeGen/X86/avx512dq-mask-op.ll8
-rw-r--r--test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll298
-rw-r--r--test/CodeGen/X86/avx512dqvl-intrinsics.ll84
-rw-r--r--test/CodeGen/X86/avx512er-intrinsics.ll42
-rw-r--r--test/CodeGen/X86/avx512f-vec-test-testn.ll16
-rw-r--r--test/CodeGen/X86/avx512ifma-intrinsics.ll32
-rw-r--r--test/CodeGen/X86/avx512ifmavl-intrinsics.ll16
-rw-r--r--test/CodeGen/X86/avx512vbmi-intrinsics.ll10
-rw-r--r--test/CodeGen/X86/avx512vbmi2-intrinsics.ll40
-rw-r--r--test/CodeGen/X86/avx512vbmi2vl-intrinsics.ll80
-rw-r--r--test/CodeGen/X86/avx512vbmivl-intrinsics.ll20
-rwxr-xr-xtest/CodeGen/X86/avx512vl-arith.ll146
-rw-r--r--test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll384
-rw-r--r--test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll746
-rw-r--r--test/CodeGen/X86/avx512vl-intrinsics.ll706
-rw-r--r--test/CodeGen/X86/avx512vl-logic.ll160
-rw-r--r--test/CodeGen/X86/avx512vl-mov.ll128
-rw-r--r--test/CodeGen/X86/avx512vl-vbroadcast.ll28
-rw-r--r--test/CodeGen/X86/avx512vl-vec-cmp.ll192
-rw-r--r--test/CodeGen/X86/avx512vl-vec-masked-cmp.ll3138
-rw-r--r--test/CodeGen/X86/avx512vl-vec-test-testn.ll64
-rw-r--r--test/CodeGen/X86/avx512vl-vpclmulqdq.ll4
-rw-r--r--test/CodeGen/X86/avx512vl_vnni-intrinsics.ll16
-rw-r--r--test/CodeGen/X86/avx512vlcd-intrinsics-fast-isel.ll8
-rw-r--r--test/CodeGen/X86/avx512vnni-intrinsics.ll8
-rw-r--r--test/CodeGen/X86/avx512vpopcntdq-intrinsics.ll16
-rw-r--r--test/CodeGen/X86/bc-extract.ll12
-rw-r--r--test/CodeGen/X86/bigstructret.ll8
-rw-r--r--test/CodeGen/X86/bitcast-and-setcc-128.ll102
-rw-r--r--test/CodeGen/X86/bitcast-and-setcc-256.ll56
-rw-r--r--test/CodeGen/X86/bitcast-and-setcc-512.ll56
-rw-r--r--test/CodeGen/X86/bitcast-i256.ll4
-rw-r--r--test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll98
-rw-r--r--test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll122
-rw-r--r--test/CodeGen/X86/bitcast-int-to-vector-bool.ll48
-rw-r--r--test/CodeGen/X86/bitcast-int-to-vector.ll6
-rw-r--r--test/CodeGen/X86/bitcast-mmx.ll16
-rw-r--r--test/CodeGen/X86/bitcast-setcc-128.ll102
-rw-r--r--test/CodeGen/X86/bitcast-setcc-256.ll56
-rw-r--r--test/CodeGen/X86/bitcast-setcc-512.ll60
-rw-r--r--test/CodeGen/X86/bitreverse.ll60
-rw-r--r--test/CodeGen/X86/block-placement.ll6
-rw-r--r--test/CodeGen/X86/block-placement.mir14
-rw-r--r--test/CodeGen/X86/bmi-intrinsics-fast-isel-x86_64.ll24
-rw-r--r--test/CodeGen/X86/bmi-intrinsics-fast-isel.ll64
-rw-r--r--test/CodeGen/X86/bmi-schedule.ll168
-rw-r--r--test/CodeGen/X86/bmi.ll144
-rw-r--r--test/CodeGen/X86/bmi2-schedule.ll180
-rw-r--r--test/CodeGen/X86/bmi2.ll18
-rw-r--r--test/CodeGen/X86/bool-ext-inc.ll14
-rw-r--r--test/CodeGen/X86/bool-simplify.ll20
-rw-r--r--test/CodeGen/X86/bool-vector.ll24
-rw-r--r--test/CodeGen/X86/bool-zext.ll12
-rw-r--r--test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll384
-rw-r--r--test/CodeGen/X86/broadcastm-lowering.ll36
-rw-r--r--test/CodeGen/X86/bswap-rotate.ll4
-rw-r--r--test/CodeGen/X86/bswap-vector.ll106
-rw-r--r--test/CodeGen/X86/bswap-wide-int.ll24
-rw-r--r--test/CodeGen/X86/bswap_tree.ll8
-rw-r--r--test/CodeGen/X86/bswap_tree2.ll12
-rw-r--r--test/CodeGen/X86/bt.ll240
-rw-r--r--test/CodeGen/X86/btq.ll8
-rw-r--r--test/CodeGen/X86/build-vector-128.ll60
-rw-r--r--test/CodeGen/X86/build-vector-256.ll36
-rw-r--r--test/CodeGen/X86/build-vector-512.ll32
-rw-r--r--test/CodeGen/X86/buildvec-insertvec.ll62
-rw-r--r--test/CodeGen/X86/bypass-slow-division-32.ll32
-rw-r--r--test/CodeGen/X86/bypass-slow-division-64.ll12
-rw-r--r--test/CodeGen/X86/cast-vsel.ll68
-rw-r--r--test/CodeGen/X86/catchpad-weight.ll2
-rw-r--r--test/CodeGen/X86/chain_order.ll2
-rw-r--r--test/CodeGen/X86/clear_upper_vector_element_bits.ll142
-rw-r--r--test/CodeGen/X86/clflushopt-schedule.ll10
-rw-r--r--test/CodeGen/X86/clflushopt.ll4
-rw-r--r--test/CodeGen/X86/clwb.ll2
-rw-r--r--test/CodeGen/X86/clz.ll224
-rw-r--r--test/CodeGen/X86/clzero.ll4
-rw-r--r--test/CodeGen/X86/cmov-into-branch.ll22
-rw-r--r--test/CodeGen/X86/cmov-promotion.ll84
-rw-r--r--test/CodeGen/X86/cmov.ll26
-rw-r--r--test/CodeGen/X86/cmovcmov.ll14
-rw-r--r--test/CodeGen/X86/cmp.ll64
-rw-r--r--test/CodeGen/X86/coalesce_commute_movsd.ll16
-rw-r--r--test/CodeGen/X86/combine-64bit-vec-binop.ll42
-rw-r--r--test/CodeGen/X86/combine-abs.ll24
-rw-r--r--test/CodeGen/X86/combine-add.ll68
-rw-r--r--test/CodeGen/X86/combine-and.ll52
-rw-r--r--test/CodeGen/X86/combine-avx-intrinsics.ll12
-rw-r--r--test/CodeGen/X86/combine-avx2-intrinsics.ll18
-rw-r--r--test/CodeGen/X86/combine-fcopysign.ll48
-rw-r--r--test/CodeGen/X86/combine-mul.ll68
-rw-r--r--test/CodeGen/X86/combine-multiplies.ll8
-rw-r--r--test/CodeGen/X86/combine-or.ll74
-rw-r--r--test/CodeGen/X86/combine-pmuldq.ll16
-rw-r--r--test/CodeGen/X86/combine-rotates.ll12
-rw-r--r--test/CodeGen/X86/combine-sdiv.ll34
-rw-r--r--test/CodeGen/X86/combine-sext-in-reg.ll8
-rw-r--r--test/CodeGen/X86/combine-shl.ll128
-rw-r--r--test/CodeGen/X86/combine-sra.ll56
-rw-r--r--test/CodeGen/X86/combine-srem.ll18
-rw-r--r--test/CodeGen/X86/combine-srl.ll80
-rw-r--r--test/CodeGen/X86/combine-sse41-intrinsics.ll18
-rw-r--r--test/CodeGen/X86/combine-sub.ll60
-rw-r--r--test/CodeGen/X86/combine-testm-and.ll8
-rw-r--r--test/CodeGen/X86/combine-udiv.ll36
-rw-r--r--test/CodeGen/X86/combine-urem.ll42
-rw-r--r--test/CodeGen/X86/commute-3dnow.ll36
-rw-r--r--test/CodeGen/X86/commute-blend-avx2.ll16
-rw-r--r--test/CodeGen/X86/commute-blend-sse41.ll6
-rw-r--r--test/CodeGen/X86/commute-clmul.ll16
-rw-r--r--test/CodeGen/X86/commute-fcmp.ll192
-rw-r--r--test/CodeGen/X86/commute-vpclmulqdq-avx.ll6
-rw-r--r--test/CodeGen/X86/commute-vpclmulqdq-avx512.ll18
-rw-r--r--test/CodeGen/X86/commute-xop.ll80
-rw-r--r--test/CodeGen/X86/complex-fastmath.ll24
-rw-r--r--test/CodeGen/X86/compress_expand.ll70
-rw-r--r--test/CodeGen/X86/computeKnownBits_urem.ll4
-rw-r--r--test/CodeGen/X86/conditional-indecrement.ll18
-rw-r--r--test/CodeGen/X86/conditional-tailcall-samedest.mir20
-rw-r--r--test/CodeGen/X86/constant-combines.ll2
-rw-r--r--test/CodeGen/X86/copysign-constant-magnitude.ll16
-rw-r--r--test/CodeGen/X86/critical-edge-split-2.ll4
-rw-r--r--test/CodeGen/X86/ctpop-combine.ll8
-rw-r--r--test/CodeGen/X86/cvtv2f32.ll8
-rw-r--r--test/CodeGen/X86/dag-fmf-cse.ll2
-rw-r--r--test/CodeGen/X86/dag-merge-fast-accesses.ll12
-rw-r--r--test/CodeGen/X86/dagcombine-buildvector.ll4
-rw-r--r--test/CodeGen/X86/dagcombine-cse.ll4
-rw-r--r--test/CodeGen/X86/debugloc-no-line-0.ll2
-rw-r--r--test/CodeGen/X86/div-rem-simplify.ll40
-rw-r--r--test/CodeGen/X86/divide-by-constant.ll56
-rw-r--r--test/CodeGen/X86/divrem.ll32
-rw-r--r--test/CodeGen/X86/divrem8_ext.ll36
-rw-r--r--test/CodeGen/X86/domain-reassignment.mir16
-rw-r--r--test/CodeGen/X86/exedeps-movq.ll16
-rw-r--r--test/CodeGen/X86/exedepsfix-broadcast.ll16
-rw-r--r--test/CodeGen/X86/extract-store.ll202
-rw-r--r--test/CodeGen/X86/extractelement-index.ll150
-rw-r--r--test/CodeGen/X86/extractelement-legalization-store-ordering.ll2
-rw-r--r--test/CodeGen/X86/extractelement-load.ll24
-rw-r--r--test/CodeGen/X86/f16c-intrinsics-fast-isel.ll24
-rw-r--r--test/CodeGen/X86/f16c-intrinsics.ll96
-rw-r--r--test/CodeGen/X86/f16c-schedule.ll56
-rw-r--r--test/CodeGen/X86/fadd-combines.ll36
-rw-r--r--test/CodeGen/X86/fast-isel-cmp.ll320
-rw-r--r--test/CodeGen/X86/fast-isel-constpool.ll16
-rw-r--r--test/CodeGen/X86/fast-isel-fptrunc-fpext.ll24
-rw-r--r--test/CodeGen/X86/fast-isel-int-float-conversion-x86-64.ll24
-rw-r--r--test/CodeGen/X86/fast-isel-int-float-conversion.ll48
-rw-r--r--test/CodeGen/X86/fast-isel-load-i1.ll4
-rw-r--r--test/CodeGen/X86/fast-isel-nontemporal.ll338
-rw-r--r--test/CodeGen/X86/fast-isel-select-cmov.ll12
-rw-r--r--test/CodeGen/X86/fast-isel-select-cmov2.ll84
-rw-r--r--test/CodeGen/X86/fast-isel-select-pseudo-cmov.ll96
-rw-r--r--test/CodeGen/X86/fast-isel-select-sse.ll144
-rw-r--r--test/CodeGen/X86/fast-isel-sext-zext.ll80
-rw-r--r--test/CodeGen/X86/fast-isel-shift.ll72
-rw-r--r--test/CodeGen/X86/fast-isel-store.ll184
-rw-r--r--test/CodeGen/X86/fast-isel-vecload.ll278
-rw-r--r--test/CodeGen/X86/fastisel-softfloat.ll2
-rw-r--r--test/CodeGen/X86/fdiv-combine.ll12
-rw-r--r--test/CodeGen/X86/fdiv.ll12
-rw-r--r--test/CodeGen/X86/fixup-bw-copy.ll18
-rw-r--r--test/CodeGen/X86/fma-commute-x86.ll144
-rw-r--r--test/CodeGen/X86/fma-fneg-combine.ll54
-rw-r--r--test/CodeGen/X86/fma-intrinsics-x86.ll240
-rw-r--r--test/CodeGen/X86/fma-scalar-memfold.ll32
-rw-r--r--test/CodeGen/X86/fma-schedule.ll448
-rw-r--r--test/CodeGen/X86/fma.ll96
-rw-r--r--test/CodeGen/X86/fma4-commute-x86.ll108
-rw-r--r--test/CodeGen/X86/fma4-fneg-combine.ll20
-rw-r--r--test/CodeGen/X86/fma4-intrinsics-x86.ll56
-rw-r--r--test/CodeGen/X86/fma4-intrinsics-x86_64-folded-load.ll20
-rw-r--r--test/CodeGen/X86/fma4-scalar-memfold.ll8
-rw-r--r--test/CodeGen/X86/fma_patterns.ll432
-rw-r--r--test/CodeGen/X86/fma_patterns_wide.ll282
-rw-r--r--test/CodeGen/X86/fmaddsub-combine.ll28
-rw-r--r--test/CodeGen/X86/fmf-flags.ll16
-rw-r--r--test/CodeGen/X86/fmsubadd-combine.ll30
-rw-r--r--test/CodeGen/X86/fold-load-binops.ll32
-rw-r--r--test/CodeGen/X86/fold-load-unops.ll48
-rw-r--r--test/CodeGen/X86/fold-rmw-ops.ll304
-rw-r--r--test/CodeGen/X86/fold-vector-sext-crash.ll2
-rw-r--r--test/CodeGen/X86/fold-vector-sext-crash2.ll16
-rw-r--r--test/CodeGen/X86/fold-vector-sext-zext.ll80
-rw-r--r--test/CodeGen/X86/fp-fast.ll22
-rw-r--r--test/CodeGen/X86/fp-load-trunc.ll16
-rw-r--r--test/CodeGen/X86/fp-logic-replace.ll24
-rw-r--r--test/CodeGen/X86/fp-logic.ll42
-rw-r--r--test/CodeGen/X86/fp-select-cmp-and.ll36
-rw-r--r--test/CodeGen/X86/fp-trunc.ll16
-rw-r--r--test/CodeGen/X86/fp-une-cmp.ll8
-rw-r--r--test/CodeGen/X86/fp128-cast.ll12
-rw-r--r--test/CodeGen/X86/fp128-i128.ll30
-rw-r--r--test/CodeGen/X86/fp128-select.ll6
-rw-r--r--test/CodeGen/X86/gfni-intrinsics.ll6
-rw-r--r--test/CodeGen/X86/gpr-to-mask.ll40
-rw-r--r--test/CodeGen/X86/haddsub-2.ll124
-rw-r--r--test/CodeGen/X86/haddsub-shuf.ll32
-rw-r--r--test/CodeGen/X86/haddsub-undef.ll76
-rw-r--r--test/CodeGen/X86/haddsub.ll96
-rw-r--r--test/CodeGen/X86/half.ll106
-rw-r--r--test/CodeGen/X86/horizontal-reduce-smax.ll196
-rw-r--r--test/CodeGen/X86/horizontal-reduce-smin.ll196
-rw-r--r--test/CodeGen/X86/horizontal-reduce-umax.ll196
-rw-r--r--test/CodeGen/X86/horizontal-reduce-umin.ll196
-rw-r--r--test/CodeGen/X86/horizontal-shuffle.ll96
-rw-r--r--test/CodeGen/X86/i256-add.ll8
-rw-r--r--test/CodeGen/X86/i64-mem-copy.ll10
-rw-r--r--test/CodeGen/X86/i64-to-float.ll40
-rw-r--r--test/CodeGen/X86/iabs.ll20
-rw-r--r--test/CodeGen/X86/illegal-bitfield-loadstore.ll24
-rw-r--r--test/CodeGen/X86/immediate_merging.ll16
-rw-r--r--test/CodeGen/X86/immediate_merging64.ll4
-rw-r--r--test/CodeGen/X86/implicit-null-checks.mir134
-rw-r--r--test/CodeGen/X86/imul-lea-2.ll4
-rw-r--r--test/CodeGen/X86/imul-lea.ll2
-rw-r--r--test/CodeGen/X86/imul.ll36
-rw-r--r--test/CodeGen/X86/inline-0bh.ll2
-rw-r--r--test/CodeGen/X86/inline-asm-fpstack.ll46
-rw-r--r--test/CodeGen/X86/inline-sse.ll4
-rw-r--r--test/CodeGen/X86/insert-into-constant-vector.ll112
-rw-r--r--test/CodeGen/X86/insertelement-duplicates.ll8
-rw-r--r--test/CodeGen/X86/insertelement-ones.ll106
-rw-r--r--test/CodeGen/X86/insertelement-shuffle.ll24
-rw-r--r--test/CodeGen/X86/insertelement-zero.ll128
-rw-r--r--test/CodeGen/X86/insertps-combine.ll60
-rw-r--r--test/CodeGen/X86/insertps-from-constantpool.ll4
-rw-r--r--test/CodeGen/X86/insertps-unfold-load-bug.ll4
-rw-r--r--test/CodeGen/X86/jump_sign.ll58
-rw-r--r--test/CodeGen/X86/known-bits-vector.ll124
-rw-r--r--test/CodeGen/X86/known-bits.ll20
-rw-r--r--test/CodeGen/X86/known-signbits-vector.ll44
-rw-r--r--test/CodeGen/X86/lea-3.ll24
-rw-r--r--test/CodeGen/X86/lea-opt-cse1.ll4
-rw-r--r--test/CodeGen/X86/lea-opt-cse2.ll8
-rw-r--r--test/CodeGen/X86/lea-opt-cse3.ll24
-rw-r--r--test/CodeGen/X86/lea-opt-cse4.ll12
-rw-r--r--test/CodeGen/X86/lea32-schedule.ll198
-rw-r--r--test/CodeGen/X86/lea64-schedule.ll198
-rw-r--r--test/CodeGen/X86/legalize-shift-64.ll20
-rw-r--r--test/CodeGen/X86/legalize-shl-vec.ll12
-rw-r--r--test/CodeGen/X86/live-out-reg-info.ll4
-rw-r--r--test/CodeGen/X86/load-combine.ll148
-rw-r--r--test/CodeGen/X86/logical-load-fold.ll8
-rw-r--r--test/CodeGen/X86/longlong-deadload.ll2
-rw-r--r--test/CodeGen/X86/loop-search.ll10
-rw-r--r--test/CodeGen/X86/lower-bitcast.ll36
-rw-r--r--test/CodeGen/X86/lower-vec-shift-2.ll32
-rw-r--r--test/CodeGen/X86/lower-vec-shift.ll48
-rw-r--r--test/CodeGen/X86/lower-vec-shuffle-bug.ll8
-rw-r--r--test/CodeGen/X86/lwp-intrinsics-x86_64.ll8
-rw-r--r--test/CodeGen/X86/lwp-intrinsics.ll24
-rw-r--r--test/CodeGen/X86/lzcnt-schedule.ll36
-rw-r--r--test/CodeGen/X86/lzcnt-zext-cmp.ll40
-rw-r--r--test/CodeGen/X86/machine-combiner-int-vec.ll18
-rw-r--r--test/CodeGen/X86/machine-combiner-int.ll24
-rw-r--r--test/CodeGen/X86/machine-combiner.ll100
-rw-r--r--test/CodeGen/X86/machine-cp.ll12
-rw-r--r--test/CodeGen/X86/machine-cse.ll24
-rw-r--r--test/CodeGen/X86/machine-region-info.mir16
-rw-r--r--test/CodeGen/X86/madd.ll36
-rw-r--r--test/CodeGen/X86/mask-negated-bool.ll12
-rw-r--r--test/CodeGen/X86/masked_gather_scatter.ll422
-rw-r--r--test/CodeGen/X86/masked_memop.ll266
-rw-r--r--test/CodeGen/X86/memcmp-minsize.ll124
-rw-r--r--test/CodeGen/X86/memcmp-optsize.ll184
-rw-r--r--test/CodeGen/X86/memcmp.ll216
-rw-r--r--test/CodeGen/X86/memset-2.ll8
-rw-r--r--test/CodeGen/X86/memset-nonzero.ll70
-rw-r--r--test/CodeGen/X86/memset.ll12
-rw-r--r--test/CodeGen/X86/memset64-on-x86-32.ll6
-rw-r--r--test/CodeGen/X86/merge-consecutive-loads-128.ll226
-rw-r--r--test/CodeGen/X86/merge-consecutive-loads-256.ll112
-rw-r--r--test/CodeGen/X86/merge-consecutive-loads-512.ll110
-rw-r--r--test/CodeGen/X86/merge-consecutive-stores.ll2
-rw-r--r--test/CodeGen/X86/merge-store-constants.ll16
-rw-r--r--test/CodeGen/X86/merge-store-partially-alias-loads.ll2
-rw-r--r--test/CodeGen/X86/merge_store.ll6
-rw-r--r--test/CodeGen/X86/merge_store_duplicated_loads.ll6
-rw-r--r--test/CodeGen/X86/mfence.ll8
-rw-r--r--test/CodeGen/X86/misched-copy.ll2
-rw-r--r--test/CodeGen/X86/mmx-arg-passing-x86-64.ll6
-rw-r--r--test/CodeGen/X86/mmx-arg-passing.ll8
-rw-r--r--test/CodeGen/X86/mmx-bitcast.ll14
-rw-r--r--test/CodeGen/X86/mmx-coalescing.ll2
-rw-r--r--test/CodeGen/X86/mmx-cvt.ll40
-rw-r--r--test/CodeGen/X86/mmx-fold-load.ll72
-rw-r--r--test/CodeGen/X86/mmx-schedule.ll1760
-rw-r--r--test/CodeGen/X86/movbe-schedule.ll48
-rw-r--r--test/CodeGen/X86/movgs.ll16
-rw-r--r--test/CodeGen/X86/movmsk.ll14
-rw-r--r--test/CodeGen/X86/mul-constant-i16.ll132
-rw-r--r--test/CodeGen/X86/mul-constant-i32.ll528
-rw-r--r--test/CodeGen/X86/mul-constant-i64.ll528
-rw-r--r--test/CodeGen/X86/mul-constant-result.ll16
-rw-r--r--test/CodeGen/X86/mul-i1024.ll4
-rw-r--r--test/CodeGen/X86/mul-i256.ll4
-rw-r--r--test/CodeGen/X86/mul-i512.ll4
-rw-r--r--test/CodeGen/X86/mul128.ll4
-rw-r--r--test/CodeGen/X86/mul64.ll4
-rw-r--r--test/CodeGen/X86/mulvi32.ll58
-rw-r--r--test/CodeGen/X86/mulx32.ll4
-rw-r--r--test/CodeGen/X86/mulx64.ll4
-rw-r--r--test/CodeGen/X86/neg_cmp.ll8
-rw-r--r--test/CodeGen/X86/negate-i1.ll32
-rw-r--r--test/CodeGen/X86/negate-shift.ll8
-rw-r--r--test/CodeGen/X86/negate.ll12
-rw-r--r--test/CodeGen/X86/negative-sin.ll12
-rw-r--r--test/CodeGen/X86/no-sse2-avg.ll2
-rw-r--r--test/CodeGen/X86/nontemporal-2.ll326
-rw-r--r--test/CodeGen/X86/nontemporal-loads.ll478
-rw-r--r--test/CodeGen/X86/nontemporal.ll8
-rw-r--r--test/CodeGen/X86/nosse-vector.ll28
-rw-r--r--test/CodeGen/X86/not-and-simplify.ll8
-rw-r--r--test/CodeGen/X86/oddshuffles.ll184
-rw-r--r--test/CodeGen/X86/optimize-max-1.ll16
-rw-r--r--test/CodeGen/X86/optimize-max-2.ll4
-rw-r--r--test/CodeGen/X86/or-branch.ll18
-rw-r--r--test/CodeGen/X86/or-lea.ll16
-rw-r--r--test/CodeGen/X86/overflow-intrinsic-setcc-fold.ll24
-rw-r--r--test/CodeGen/X86/overflow.ll4
-rw-r--r--test/CodeGen/X86/packss.ll20
-rw-r--r--test/CodeGen/X86/palignr.ll50
-rw-r--r--test/CodeGen/X86/pause.ll2
-rw-r--r--test/CodeGen/X86/peep-setb.ll18
-rw-r--r--test/CodeGen/X86/peep-test-4.ll54
-rw-r--r--test/CodeGen/X86/peephole-cvt-sse.ll8
-rw-r--r--test/CodeGen/X86/peephole-na-phys-copy-folding.ll72
-rw-r--r--test/CodeGen/X86/peephole-recurrence.mir64
-rw-r--r--test/CodeGen/X86/phaddsub.ll64
-rw-r--r--test/CodeGen/X86/pku.ll4
-rw-r--r--test/CodeGen/X86/pmovsx-inreg.ll84
-rw-r--r--test/CodeGen/X86/pmul.ll160
-rw-r--r--test/CodeGen/X86/pointer-vector.ll24
-rw-r--r--test/CodeGen/X86/popcnt-schedule.ll48
-rw-r--r--test/CodeGen/X86/popcnt.ll32
-rw-r--r--test/CodeGen/X86/post-ra-sched.ll2
-rw-r--r--test/CodeGen/X86/powi.ll6
-rw-r--r--test/CodeGen/X86/pr11334.ll20
-rw-r--r--test/CodeGen/X86/pr11985.ll4
-rw-r--r--test/CodeGen/X86/pr12312.ll48
-rw-r--r--test/CodeGen/X86/pr13577.ll4
-rw-r--r--test/CodeGen/X86/pr14161.ll4
-rw-r--r--test/CodeGen/X86/pr14204.ll2
-rw-r--r--test/CodeGen/X86/pr14314.ll4
-rw-r--r--test/CodeGen/X86/pr15267.ll8
-rw-r--r--test/CodeGen/X86/pr15309.ll2
-rw-r--r--test/CodeGen/X86/pr15705.ll10
-rw-r--r--test/CodeGen/X86/pr15981.ll14
-rw-r--r--test/CodeGen/X86/pr16031.ll2
-rw-r--r--test/CodeGen/X86/pr16360.ll2
-rw-r--r--test/CodeGen/X86/pr17764.ll2
-rw-r--r--test/CodeGen/X86/pr18014.ll2
-rw-r--r--test/CodeGen/X86/pr18344.ll4
-rw-r--r--test/CodeGen/X86/pr20011.ll4
-rw-r--r--test/CodeGen/X86/pr20012.ll4
-rw-r--r--test/CodeGen/X86/pr21792.ll2
-rw-r--r--test/CodeGen/X86/pr22338.ll8
-rw-r--r--test/CodeGen/X86/pr22774.ll2
-rw-r--r--test/CodeGen/X86/pr22970.ll8
-rw-r--r--test/CodeGen/X86/pr23603.ll4
-rw-r--r--test/CodeGen/X86/pr24602.ll2
-rw-r--r--test/CodeGen/X86/pr2585.ll4
-rw-r--r--test/CodeGen/X86/pr26350.ll2
-rw-r--r--test/CodeGen/X86/pr2656.ll4
-rw-r--r--test/CodeGen/X86/pr27591.ll4
-rw-r--r--test/CodeGen/X86/pr28129.ll16
-rw-r--r--test/CodeGen/X86/pr28173.ll10
-rw-r--r--test/CodeGen/X86/pr28472.ll2
-rw-r--r--test/CodeGen/X86/pr29061.ll4
-rw-r--r--test/CodeGen/X86/pr29112.ll2
-rw-r--r--test/CodeGen/X86/pr29170.ll6
-rw-r--r--test/CodeGen/X86/pr30284.ll2
-rw-r--r--test/CodeGen/X86/pr30430.ll2
-rw-r--r--test/CodeGen/X86/pr30511.ll2
-rw-r--r--test/CodeGen/X86/pr31045.ll2
-rw-r--r--test/CodeGen/X86/pr31088.ll12
-rw-r--r--test/CodeGen/X86/pr31323.ll4
-rw-r--r--test/CodeGen/X86/pr31773.ll8
-rw-r--r--test/CodeGen/X86/pr31956.ll2
-rw-r--r--test/CodeGen/X86/pr32108.ll2
-rw-r--r--test/CodeGen/X86/pr32241.ll6
-rw-r--r--test/CodeGen/X86/pr32256.ll2
-rw-r--r--test/CodeGen/X86/pr32282.ll6
-rw-r--r--test/CodeGen/X86/pr32284.ll32
-rw-r--r--test/CodeGen/X86/pr32329.ll4
-rw-r--r--test/CodeGen/X86/pr32340.ll2
-rw-r--r--test/CodeGen/X86/pr32345.ll12
-rw-r--r--test/CodeGen/X86/pr32368.ll24
-rw-r--r--test/CodeGen/X86/pr32420.ll2
-rw-r--r--test/CodeGen/X86/pr32451.ll2
-rw-r--r--test/CodeGen/X86/pr32484.ll2
-rw-r--r--test/CodeGen/X86/pr32659.ll2
-rw-r--r--test/CodeGen/X86/pr32907.ll8
-rw-r--r--test/CodeGen/X86/pr33290.ll4
-rw-r--r--test/CodeGen/X86/pr33349.ll4
-rw-r--r--test/CodeGen/X86/pr33828.ll8
-rw-r--r--test/CodeGen/X86/pr33844.ll2
-rw-r--r--test/CodeGen/X86/pr33960.ll4
-rw-r--r--test/CodeGen/X86/pr34080.ll8
-rw-r--r--test/CodeGen/X86/pr34088.ll2
-rw-r--r--test/CodeGen/X86/pr34137.ll2
-rw-r--r--test/CodeGen/X86/pr34139.ll2
-rw-r--r--test/CodeGen/X86/pr34149.ll6
-rw-r--r--test/CodeGen/X86/pr34177.ll2
-rw-r--r--test/CodeGen/X86/pr34271-1.ll2
-rw-r--r--test/CodeGen/X86/pr34271.ll2
-rw-r--r--test/CodeGen/X86/pr34381.ll2
-rw-r--r--test/CodeGen/X86/pr34421.ll4
-rw-r--r--test/CodeGen/X86/pr34605.ll2
-rw-r--r--test/CodeGen/X86/pr34629.ll4
-rw-r--r--test/CodeGen/X86/pr34634.ll4
-rw-r--r--test/CodeGen/X86/pr34653.ll2
-rw-r--r--test/CodeGen/X86/pr34657.ll2
-rw-r--r--test/CodeGen/X86/pr34855.ll4
-rw-r--r--test/CodeGen/X86/pr35272.ll2
-rw-r--r--test/CodeGen/X86/pr35399.ll2
-rw-r--r--test/CodeGen/X86/pr35443.ll2
-rw-r--r--test/CodeGen/X86/pre-coalesce.mir10
-rw-r--r--test/CodeGen/X86/promote-vec3.ll16
-rw-r--r--test/CodeGen/X86/pseudo_cmov_lower2.ll4
-rw-r--r--test/CodeGen/X86/pshufb-mask-comments.ll12
-rw-r--r--test/CodeGen/X86/psubus.ll260
-rw-r--r--test/CodeGen/X86/rdrand-x86_64.ll2
-rw-r--r--test/CodeGen/X86/rdrand.ll18
-rw-r--r--test/CodeGen/X86/rdseed-x86_64.ll2
-rw-r--r--test/CodeGen/X86/rdseed.ll8
-rw-r--r--test/CodeGen/X86/recip-fastmath.ll162
-rw-r--r--test/CodeGen/X86/recip-fastmath2.ll216
-rw-r--r--test/CodeGen/X86/recip-pic.ll2
-rw-r--r--test/CodeGen/X86/reduce-trunc-shl.ll32
-rw-r--r--test/CodeGen/X86/rem.ll10
-rw-r--r--test/CodeGen/X86/replace-load-and-with-bzhi.ll16
-rw-r--r--test/CodeGen/X86/ret-mmx.ll8
-rw-r--r--test/CodeGen/X86/rot16.ll32
-rw-r--r--test/CodeGen/X86/rot32.ll36
-rw-r--r--test/CodeGen/X86/rot64.ll36
-rw-r--r--test/CodeGen/X86/rotate.ll120
-rw-r--r--test/CodeGen/X86/rotate4.ll32
-rw-r--r--test/CodeGen/X86/rotate_vec.ll8
-rw-r--r--test/CodeGen/X86/rounding-ops.ll60
-rw-r--r--test/CodeGen/X86/rtm.ll20
-rw-r--r--test/CodeGen/X86/sad.ll96
-rw-r--r--test/CodeGen/X86/sad_variations.ll42
-rw-r--r--test/CodeGen/X86/sandybridge-loads.ll4
-rw-r--r--test/CodeGen/X86/sar_fold.ll8
-rw-r--r--test/CodeGen/X86/sar_fold64.ll12
-rw-r--r--test/CodeGen/X86/sbb.ll32
-rw-r--r--test/CodeGen/X86/scalar-int-to-fp.ll124
-rw-r--r--test/CodeGen/X86/scatter-schedule.ll2
-rw-r--r--test/CodeGen/X86/schedule-x86_32.ll120
-rw-r--r--test/CodeGen/X86/schedule-x86_64.ll300
-rw-r--r--test/CodeGen/X86/select-mmx.ll12
-rw-r--r--test/CodeGen/X86/select-with-and-or.ll28
-rw-r--r--test/CodeGen/X86/select.ll202
-rw-r--r--test/CodeGen/X86/select_const.ll84
-rw-r--r--test/CodeGen/X86/setcc-combine.ll24
-rw-r--r--test/CodeGen/X86/setcc-logic.ll84
-rw-r--r--test/CodeGen/X86/setcc-lowering.ll12
-rw-r--r--test/CodeGen/X86/setcc-narrowing.ll2
-rw-r--r--test/CodeGen/X86/setcc-wide-types.ll16
-rw-r--r--test/CodeGen/X86/setcc.ll12
-rw-r--r--test/CodeGen/X86/sext-i1.ll32
-rw-r--r--test/CodeGen/X86/sext-setcc-self.ll12
-rw-r--r--test/CodeGen/X86/sha-schedule.ll56
-rw-r--r--test/CodeGen/X86/shift-and.ll38
-rw-r--r--test/CodeGen/X86/shift-bmi2.ll66
-rw-r--r--test/CodeGen/X86/shift-codegen.ll4
-rw-r--r--test/CodeGen/X86/shift-combine.ll28
-rw-r--r--test/CodeGen/X86/shift-double-x86_64.ll14
-rw-r--r--test/CodeGen/X86/shift-double.ll74
-rw-r--r--test/CodeGen/X86/shift-folding.ll10
-rw-r--r--test/CodeGen/X86/shift-pcmp.ll8
-rw-r--r--test/CodeGen/X86/shl-crash-on-legalize.ll2
-rw-r--r--test/CodeGen/X86/shrink-compare.ll56
-rw-r--r--test/CodeGen/X86/shrink_vmul.ll96
-rw-r--r--test/CodeGen/X86/shrink_vmul_sse.ll2
-rw-r--r--test/CodeGen/X86/shuffle-combine-crash-2.ll4
-rw-r--r--test/CodeGen/X86/shuffle-of-insert.ll48
-rw-r--r--test/CodeGen/X86/shuffle-of-splat-multiuses.ll16
-rw-r--r--test/CodeGen/X86/shuffle-strided-with-offset-128.ll216
-rw-r--r--test/CodeGen/X86/shuffle-strided-with-offset-256.ll172
-rw-r--r--test/CodeGen/X86/shuffle-strided-with-offset-512.ll122
-rw-r--r--test/CodeGen/X86/shuffle-vs-trunc-128.ll160
-rw-r--r--test/CodeGen/X86/shuffle-vs-trunc-256.ll142
-rw-r--r--test/CodeGen/X86/shuffle-vs-trunc-512.ll84
-rw-r--r--test/CodeGen/X86/sincos.ll12
-rw-r--r--test/CodeGen/X86/sink-blockfreq.ll2
-rw-r--r--test/CodeGen/X86/sink-out-of-loop.ll2
-rw-r--r--test/CodeGen/X86/slow-incdec.ll12
-rw-r--r--test/CodeGen/X86/slow-pmulld.ll16
-rw-r--r--test/CodeGen/X86/slow-unaligned-mem.ll4
-rw-r--r--test/CodeGen/X86/soft-fp-legal-in-HW-reg.ll2
-rw-r--r--test/CodeGen/X86/splat-for-size.ll40
-rw-r--r--test/CodeGen/X86/split-extend-vector-inreg.ll8
-rw-r--r--test/CodeGen/X86/split-store.ll34
-rw-r--r--test/CodeGen/X86/sqrt-fastmath-tune.ll12
-rw-r--r--test/CodeGen/X86/sqrt-fastmath.ll44
-rw-r--r--test/CodeGen/X86/sqrt-partial.ll8
-rw-r--r--test/CodeGen/X86/sse-align-12.ll8
-rw-r--r--test/CodeGen/X86/sse-fcopysign.ll24
-rw-r--r--test/CodeGen/X86/sse-fsignum.ll24
-rw-r--r--test/CodeGen/X86/sse-intrinsics-fast-isel-x86_64.ll6
-rw-r--r--test/CodeGen/X86/sse-intrinsics-fast-isel.ll432
-rw-r--r--test/CodeGen/X86/sse-intrinsics-x86-upgrade.ll38
-rw-r--r--test/CodeGen/X86/sse-intrinsics-x86.ll166
-rw-r--r--test/CodeGen/X86/sse-intrinsics-x86_64.ll24
-rw-r--r--test/CodeGen/X86/sse-minmax.ll308
-rw-r--r--test/CodeGen/X86/sse-only.ll2
-rw-r--r--test/CodeGen/X86/sse-scalar-fp-arith-unary.ll16
-rw-r--r--test/CodeGen/X86/sse-scalar-fp-arith.ll284
-rw-r--r--test/CodeGen/X86/sse-schedule.ll980
-rw-r--r--test/CodeGen/X86/sse1.ll24
-rw-r--r--test/CodeGen/X86/sse2-intrinsics-fast-isel-x86_64.ll14
-rw-r--r--test/CodeGen/X86/sse2-intrinsics-fast-isel.ll844
-rw-r--r--test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll68
-rw-r--r--test/CodeGen/X86/sse2-intrinsics-x86.ll484
-rw-r--r--test/CodeGen/X86/sse2-intrinsics-x86_64.ll24
-rw-r--r--test/CodeGen/X86/sse2-schedule.ll2500
-rw-r--r--test/CodeGen/X86/sse2-vector-shifts.ll74
-rw-r--r--test/CodeGen/X86/sse2.ll84
-rw-r--r--test/CodeGen/X86/sse3-avx-addsub-2.ll68
-rw-r--r--test/CodeGen/X86/sse3-avx-addsub.ll60
-rw-r--r--test/CodeGen/X86/sse3-intrinsics-fast-isel.ll44
-rw-r--r--test/CodeGen/X86/sse3-intrinsics-x86.ll32
-rw-r--r--test/CodeGen/X86/sse3-schedule.ll240
-rw-r--r--test/CodeGen/X86/sse3.ll72
-rw-r--r--test/CodeGen/X86/sse41-intrinsics-fast-isel.ll244
-rw-r--r--test/CodeGen/X86/sse41-intrinsics-x86-upgrade.ll56
-rw-r--r--test/CodeGen/X86/sse41-intrinsics-x86.ll142
-rw-r--r--test/CodeGen/X86/sse41-pmovxrm.ll48
-rw-r--r--test/CodeGen/X86/sse41-schedule.ll882
-rw-r--r--test/CodeGen/X86/sse41.ll232
-rw-r--r--test/CodeGen/X86/sse42-intrinsics-fast-isel-x86_64.ll4
-rw-r--r--test/CodeGen/X86/sse42-intrinsics-fast-isel.ll72
-rw-r--r--test/CodeGen/X86/sse42-intrinsics-x86.ll82
-rw-r--r--test/CodeGen/X86/sse42-intrinsics-x86_64.ll4
-rw-r--r--test/CodeGen/X86/sse42-schedule.ll198
-rw-r--r--test/CodeGen/X86/sse4a-intrinsics-fast-isel.ll24
-rw-r--r--test/CodeGen/X86/sse4a-schedule.ll36
-rw-r--r--test/CodeGen/X86/sse4a-upgrade.ll8
-rw-r--r--test/CodeGen/X86/sse4a.ll48
-rw-r--r--test/CodeGen/X86/sse_partial_update.ll14
-rw-r--r--test/CodeGen/X86/ssse3-intrinsics-fast-isel.ll68
-rw-r--r--test/CodeGen/X86/ssse3-intrinsics-x86.ll78
-rw-r--r--test/CodeGen/X86/ssse3-schedule.ll320
-rw-r--r--test/CodeGen/X86/stack-folding-bmi.ll4
-rw-r--r--test/CodeGen/X86/stack-folding-lwp.ll8
-rw-r--r--test/CodeGen/X86/stack-folding-tbm.ll4
-rw-r--r--test/CodeGen/X86/stack-protector-weight.ll12
-rw-r--r--test/CodeGen/X86/statepoint-live-in.ll12
-rw-r--r--test/CodeGen/X86/stores-merging.ll10
-rw-r--r--test/CodeGen/X86/subcarry.ll4
-rw-r--r--test/CodeGen/X86/subvector-broadcast.ll412
-rw-r--r--test/CodeGen/X86/switch-edge-weight.ll102
-rw-r--r--test/CodeGen/X86/switch-jump-table.ll6
-rw-r--r--test/CodeGen/X86/switch-lower-peel-top-case.ll20
-rw-r--r--test/CodeGen/X86/switch.ll4
-rw-r--r--test/CodeGen/X86/swizzle-2.ll84
-rw-r--r--test/CodeGen/X86/swizzle-avx2.ll14
-rw-r--r--test/CodeGen/X86/tbm-intrinsics-fast-isel-x86_64.ll20
-rw-r--r--test/CodeGen/X86/tbm-intrinsics-fast-isel.ll40
-rw-r--r--test/CodeGen/X86/tbm-intrinsics-x86_64.ll12
-rw-r--r--test/CodeGen/X86/tbm_patterns.ll136
-rw-r--r--test/CodeGen/X86/tls-pie.ll24
-rw-r--r--test/CodeGen/X86/tls-shrink-wrapping.ll2
-rw-r--r--test/CodeGen/X86/trunc-ext-ld-st.ll24
-rw-r--r--test/CodeGen/X86/trunc-store.ll4
-rw-r--r--test/CodeGen/X86/trunc-to-bool.ll18
-rw-r--r--test/CodeGen/X86/uint64-to-float.ll6
-rw-r--r--test/CodeGen/X86/uint_to_fp-2.ll4
-rw-r--r--test/CodeGen/X86/uint_to_fp-3.ll16
-rw-r--r--test/CodeGen/X86/uint_to_fp.ll4
-rw-r--r--test/CodeGen/X86/umul-with-overflow.ll12
-rw-r--r--test/CodeGen/X86/unaligned-32-byte-memops.ll60
-rw-r--r--test/CodeGen/X86/urem-i8-constant.ll2
-rw-r--r--test/CodeGen/X86/urem-power-of-two.ll24
-rw-r--r--test/CodeGen/X86/use-add-flags.ll20
-rw-r--r--test/CodeGen/X86/v2f32.ll20
-rw-r--r--test/CodeGen/X86/v4f32-immediate.ll4
-rw-r--r--test/CodeGen/X86/v8i1-masks.ll8
-rw-r--r--test/CodeGen/X86/vaargs.ll2
-rw-r--r--test/CodeGen/X86/vaes-intrinsics-avx-x86.ll2
-rw-r--r--test/CodeGen/X86/vaes-intrinsics-avx512-x86.ll8
-rw-r--r--test/CodeGen/X86/vaes-intrinsics-avx512vl-x86.ll16
-rw-r--r--test/CodeGen/X86/var-permute-128.ll26
-rw-r--r--test/CodeGen/X86/var-permute-256.ll48
-rw-r--r--test/CodeGen/X86/var-permute-512.ll16
-rw-r--r--test/CodeGen/X86/vec-copysign-avx512.ll24
-rw-r--r--test/CodeGen/X86/vec-copysign.ll16
-rw-r--r--test/CodeGen/X86/vec-trunc-store.ll4
-rw-r--r--test/CodeGen/X86/vec3.ll4
-rw-r--r--test/CodeGen/X86/vec_cast2.ll24
-rw-r--r--test/CodeGen/X86/vec_cmp_sint-128.ll168
-rw-r--r--test/CodeGen/X86/vec_cmp_uint-128.ll200
-rw-r--r--test/CodeGen/X86/vec_compare-sse4.ll12
-rw-r--r--test/CodeGen/X86/vec_ctbits.ll12
-rw-r--r--test/CodeGen/X86/vec_ext_inreg.ll24
-rw-r--r--test/CodeGen/X86/vec_extract-avx.ll32
-rw-r--r--test/CodeGen/X86/vec_extract-mmx.ll20
-rw-r--r--test/CodeGen/X86/vec_extract-sse4.ll16
-rw-r--r--test/CodeGen/X86/vec_extract.ll16
-rw-r--r--test/CodeGen/X86/vec_fabs.ll80
-rw-r--r--test/CodeGen/X86/vec_floor.ll216
-rw-r--r--test/CodeGen/X86/vec_fneg.ll16
-rw-r--r--test/CodeGen/X86/vec_fp_to_int.ll340
-rw-r--r--test/CodeGen/X86/vec_fpext.ll48
-rw-r--r--test/CodeGen/X86/vec_fptrunc.ll48
-rw-r--r--test/CodeGen/X86/vec_i64.ll8
-rw-r--r--test/CodeGen/X86/vec_ins_extract-1.ll16
-rw-r--r--test/CodeGen/X86/vec_insert-2.ll16
-rw-r--r--test/CodeGen/X86/vec_insert-3.ll4
-rw-r--r--test/CodeGen/X86/vec_insert-4.ll4
-rw-r--r--test/CodeGen/X86/vec_insert-5.ll36
-rw-r--r--test/CodeGen/X86/vec_insert-7.ll4
-rw-r--r--test/CodeGen/X86/vec_insert-8.ll8
-rw-r--r--test/CodeGen/X86/vec_insert-9.ll4
-rw-r--r--test/CodeGen/X86/vec_insert-mmx.ll16
-rw-r--r--test/CodeGen/X86/vec_int_to_fp.ll836
-rw-r--r--test/CodeGen/X86/vec_loadsingles.ll18
-rw-r--r--test/CodeGen/X86/vec_logical.ll20
-rw-r--r--test/CodeGen/X86/vec_minmax_match.ll36
-rw-r--r--test/CodeGen/X86/vec_minmax_sint.ll416
-rw-r--r--test/CodeGen/X86/vec_minmax_uint.ll416
-rw-r--r--test/CodeGen/X86/vec_partial.ll12
-rw-r--r--test/CodeGen/X86/vec_reassociate.ll40
-rw-r--r--test/CodeGen/X86/vec_return.ll4
-rw-r--r--test/CodeGen/X86/vec_sdiv_to_shift.ll32
-rw-r--r--test/CodeGen/X86/vec_set-2.ll8
-rw-r--r--test/CodeGen/X86/vec_set-3.ll12
-rw-r--r--test/CodeGen/X86/vec_set-4.ll8
-rw-r--r--test/CodeGen/X86/vec_set-6.ll4
-rw-r--r--test/CodeGen/X86/vec_set-7.ll4
-rw-r--r--test/CodeGen/X86/vec_set-8.ll4
-rw-r--r--test/CodeGen/X86/vec_set-A.ll4
-rw-r--r--test/CodeGen/X86/vec_set-B.ll8
-rw-r--r--test/CodeGen/X86/vec_set-C.ll4
-rw-r--r--test/CodeGen/X86/vec_set-D.ll2
-rw-r--r--test/CodeGen/X86/vec_set-F.ll4
-rw-r--r--test/CodeGen/X86/vec_set-H.ll2
-rw-r--r--test/CodeGen/X86/vec_set.ll4
-rw-r--r--test/CodeGen/X86/vec_setcc.ll44
-rw-r--r--test/CodeGen/X86/vec_shift.ll12
-rw-r--r--test/CodeGen/X86/vec_shift2.ll8
-rw-r--r--test/CodeGen/X86/vec_shift3.ll12
-rw-r--r--test/CodeGen/X86/vec_shift4.ll8
-rw-r--r--test/CodeGen/X86/vec_shift5.ll64
-rw-r--r--test/CodeGen/X86/vec_shift6.ll54
-rw-r--r--test/CodeGen/X86/vec_shift7.ll4
-rw-r--r--test/CodeGen/X86/vec_ss_load_fold.ll84
-rw-r--r--test/CodeGen/X86/vec_trunc_sext.ll4
-rw-r--r--test/CodeGen/X86/vec_uint_to_fp-fastmath.ll24
-rw-r--r--test/CodeGen/X86/vec_unsafe-fp-math.ll4
-rw-r--r--test/CodeGen/X86/vec_zero_cse.ll16
-rw-r--r--test/CodeGen/X86/vector-bitreverse.ll200
-rw-r--r--test/CodeGen/X86/vector-blend.ll234
-rw-r--r--test/CodeGen/X86/vector-compare-all_of.ll116
-rw-r--r--test/CodeGen/X86/vector-compare-any_of.ll116
-rw-r--r--test/CodeGen/X86/vector-compare-combines.ll8
-rw-r--r--test/CodeGen/X86/vector-compare-results.ll270
-rw-r--r--test/CodeGen/X86/vector-extend-inreg.ll8
-rw-r--r--test/CodeGen/X86/vector-half-conversions.ll230
-rw-r--r--test/CodeGen/X86/vector-idiv-sdiv-128.ll56
-rw-r--r--test/CodeGen/X86/vector-idiv-sdiv-256.ll36
-rw-r--r--test/CodeGen/X86/vector-idiv-sdiv-512.ll24
-rw-r--r--test/CodeGen/X86/vector-idiv-udiv-128.ll56
-rw-r--r--test/CodeGen/X86/vector-idiv-udiv-256.ll36
-rw-r--r--test/CodeGen/X86/vector-idiv-udiv-512.ll24
-rw-r--r--test/CodeGen/X86/vector-idiv.ll12
-rw-r--r--test/CodeGen/X86/vector-interleave.ll6
-rw-r--r--test/CodeGen/X86/vector-lzcnt-128.ll204
-rw-r--r--test/CodeGen/X86/vector-lzcnt-256.ll136
-rw-r--r--test/CodeGen/X86/vector-lzcnt-512.ll64
-rw-r--r--test/CodeGen/X86/vector-merge-store-fp-constants.ll4
-rw-r--r--test/CodeGen/X86/vector-mul.ll208
-rw-r--r--test/CodeGen/X86/vector-narrow-binop.ll12
-rw-r--r--test/CodeGen/X86/vector-pcmp.ll96
-rw-r--r--test/CodeGen/X86/vector-popcnt-128.ll104
-rw-r--r--test/CodeGen/X86/vector-popcnt-256.ll48
-rw-r--r--test/CodeGen/X86/vector-popcnt-512.ll36
-rw-r--r--test/CodeGen/X86/vector-rem.ll6
-rw-r--r--test/CodeGen/X86/vector-rotate-128.ll208
-rw-r--r--test/CodeGen/X86/vector-rotate-256.ll180
-rw-r--r--test/CodeGen/X86/vector-rotate-512.ll80
-rw-r--r--test/CodeGen/X86/vector-sext.ll586
-rw-r--r--test/CodeGen/X86/vector-shift-ashr-128.ll248
-rw-r--r--test/CodeGen/X86/vector-shift-ashr-256.ll276
-rw-r--r--test/CodeGen/X86/vector-shift-ashr-512.ll52
-rw-r--r--test/CodeGen/X86/vector-shift-lshr-128.ll246
-rw-r--r--test/CodeGen/X86/vector-shift-lshr-256.ll276
-rw-r--r--test/CodeGen/X86/vector-shift-lshr-512.ll48
-rw-r--r--test/CodeGen/X86/vector-shift-shl-128.ll242
-rw-r--r--test/CodeGen/X86/vector-shift-shl-256.ll276
-rw-r--r--test/CodeGen/X86/vector-shift-shl-512.ll48
-rw-r--r--test/CodeGen/X86/vector-shuffle-128-v16.ll446
-rw-r--r--test/CodeGen/X86/vector-shuffle-128-v2.ll426
-rw-r--r--test/CodeGen/X86/vector-shuffle-128-v4.ll666
-rw-r--r--test/CodeGen/X86/vector-shuffle-128-v8.ll706
-rw-r--r--test/CodeGen/X86/vector-shuffle-256-v16.ll888
-rw-r--r--test/CodeGen/X86/vector-shuffle-256-v32.ll514
-rw-r--r--test/CodeGen/X86/vector-shuffle-256-v4.ll404
-rw-r--r--test/CodeGen/X86/vector-shuffle-256-v8.ll528
-rw-r--r--test/CodeGen/X86/vector-shuffle-512-v16.ll142
-rw-r--r--test/CodeGen/X86/vector-shuffle-512-v32.ll76
-rw-r--r--test/CodeGen/X86/vector-shuffle-512-v64.ll138
-rw-r--r--test/CodeGen/X86/vector-shuffle-512-v8.ll668
-rw-r--r--test/CodeGen/X86/vector-shuffle-avx512.ll192
-rw-r--r--test/CodeGen/X86/vector-shuffle-combining-avx.ll116
-rw-r--r--test/CodeGen/X86/vector-shuffle-combining-avx2.ll244
-rw-r--r--test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll272
-rw-r--r--test/CodeGen/X86/vector-shuffle-combining-avx512bwvl.ll24
-rw-r--r--test/CodeGen/X86/vector-shuffle-combining-avx512vbmi.ll32
-rw-r--r--test/CodeGen/X86/vector-shuffle-combining-sse41.ll4
-rw-r--r--test/CodeGen/X86/vector-shuffle-combining-sse4a.ll18
-rw-r--r--test/CodeGen/X86/vector-shuffle-combining-ssse3.ll208
-rw-r--r--test/CodeGen/X86/vector-shuffle-combining-xop.ll112
-rw-r--r--test/CodeGen/X86/vector-shuffle-combining.ll726
-rw-r--r--test/CodeGen/X86/vector-shuffle-masked.ll276
-rw-r--r--test/CodeGen/X86/vector-shuffle-mmx.ll12
-rw-r--r--test/CodeGen/X86/vector-shuffle-sse1.ll56
-rw-r--r--test/CodeGen/X86/vector-shuffle-sse41.ll12
-rw-r--r--test/CodeGen/X86/vector-shuffle-sse4a.ll108
-rw-r--r--test/CodeGen/X86/vector-shuffle-v1.ll90
-rw-r--r--test/CodeGen/X86/vector-shuffle-v48.ll2
-rw-r--r--test/CodeGen/X86/vector-shuffle-variable-128.ll68
-rw-r--r--test/CodeGen/X86/vector-shuffle-variable-256.ll28
-rw-r--r--test/CodeGen/X86/vector-sqrt.ll4
-rw-r--r--test/CodeGen/X86/vector-trunc-math.ll664
-rw-r--r--test/CodeGen/X86/vector-trunc.ll374
-rw-r--r--test/CodeGen/X86/vector-tzcnt-128.ll258
-rw-r--r--test/CodeGen/X86/vector-tzcnt-256.ll192
-rw-r--r--test/CodeGen/X86/vector-tzcnt-512.ll80
-rw-r--r--test/CodeGen/X86/vector-unsigned-cmp.ll72
-rw-r--r--test/CodeGen/X86/vector-zext.ll494
-rw-r--r--test/CodeGen/X86/vector-zmov.ll8
-rw-r--r--test/CodeGen/X86/viabs.ll174
-rw-r--r--test/CodeGen/X86/vmovq.ll4
-rw-r--r--test/CodeGen/X86/vpshufbitqbm-intrinsics.ll6
-rw-r--r--test/CodeGen/X86/vselect-2.ll28
-rw-r--r--test/CodeGen/X86/vselect-avx.ll14
-rw-r--r--test/CodeGen/X86/vselect-constants.ll56
-rw-r--r--test/CodeGen/X86/vselect-minmax.ll1632
-rw-r--r--test/CodeGen/X86/vselect-packss.ll56
-rw-r--r--test/CodeGen/X86/vselect-pcmp.ll64
-rw-r--r--test/CodeGen/X86/vselect-zero.ll12
-rw-r--r--test/CodeGen/X86/vselect.ll148
-rw-r--r--test/CodeGen/X86/vshift-1.ll24
-rw-r--r--test/CodeGen/X86/vshift-2.ll24
-rw-r--r--test/CodeGen/X86/vshift-3.ll20
-rw-r--r--test/CodeGen/X86/vshift-4.ll28
-rw-r--r--test/CodeGen/X86/vshift-5.ll16
-rw-r--r--test/CodeGen/X86/vshift-6.ll4
-rw-r--r--test/CodeGen/X86/vsplit-and.ll4
-rw-r--r--test/CodeGen/X86/vzero-excess.ll8
-rw-r--r--test/CodeGen/X86/wide-fma-contraction.ll4
-rw-r--r--test/CodeGen/X86/wide-integer-cmp.ll22
-rw-r--r--test/CodeGen/X86/widen_arith-1.ll4
-rw-r--r--test/CodeGen/X86/widen_arith-2.ll4
-rw-r--r--test/CodeGen/X86/widen_arith-3.ll4
-rw-r--r--test/CodeGen/X86/widen_arith-4.ll4
-rw-r--r--test/CodeGen/X86/widen_arith-5.ll4
-rw-r--r--test/CodeGen/X86/widen_arith-6.ll4
-rw-r--r--test/CodeGen/X86/widen_bitops-0.ll48
-rw-r--r--test/CodeGen/X86/widen_bitops-1.ll48
-rw-r--r--test/CodeGen/X86/widen_cast-1.ll4
-rw-r--r--test/CodeGen/X86/widen_cast-2.ll2
-rw-r--r--test/CodeGen/X86/widen_cast-3.ll4
-rw-r--r--test/CodeGen/X86/widen_cast-4.ll8
-rw-r--r--test/CodeGen/X86/widen_cast-5.ll4
-rw-r--r--test/CodeGen/X86/widen_cast-6.ll4
-rw-r--r--test/CodeGen/X86/widen_compare-1.ll4
-rw-r--r--test/CodeGen/X86/widen_conv-1.ll12
-rw-r--r--test/CodeGen/X86/widen_conv-2.ll4
-rw-r--r--test/CodeGen/X86/widen_conv-3.ll14
-rw-r--r--test/CodeGen/X86/widen_conv-4.ll16
-rw-r--r--test/CodeGen/X86/widen_conversions.ll4
-rw-r--r--test/CodeGen/X86/widen_extract-1.ll4
-rw-r--r--test/CodeGen/X86/widen_load-0.ll4
-rw-r--r--test/CodeGen/X86/widen_load-2.ll44
-rw-r--r--test/CodeGen/X86/widen_load-3.ll16
-rw-r--r--test/CodeGen/X86/widen_shuffle-1.ll20
-rw-r--r--test/CodeGen/X86/widened-broadcast.ll168
-rw-r--r--test/CodeGen/X86/win64_sibcall.ll4
-rw-r--r--test/CodeGen/X86/win_chkstk.ll8
-rw-r--r--test/CodeGen/X86/win_coreclr_chkstk.ll4
-rw-r--r--test/CodeGen/X86/x32-cet-intrinsics.ll16
-rw-r--r--test/CodeGen/X86/x32-lea-1.ll2
-rw-r--r--test/CodeGen/X86/x64-cet-intrinsics.ll24
-rw-r--r--test/CodeGen/X86/x86-64-intrcc-nosse.ll2
-rw-r--r--test/CodeGen/X86/x86-fold-pshufb.ll4
-rw-r--r--test/CodeGen/X86/x86-interleaved-access.ll106
-rw-r--r--test/CodeGen/X86/x86-interleaved-check.ll2
-rw-r--r--test/CodeGen/X86/x86-no_caller_saved_registers-preserve.ll2
-rw-r--r--test/CodeGen/X86/x86-setcc-int-to-fp-combine.ll10
-rw-r--r--test/CodeGen/X86/x86-shifts.ll64
-rw-r--r--test/CodeGen/X86/x86-shrink-wrapping.ll6
-rw-r--r--test/CodeGen/X86/x86-upgrade-avx-vbroadcast.ll6
-rw-r--r--test/CodeGen/X86/x86-upgrade-avx2-vbroadcast.ll2
-rw-r--r--test/CodeGen/X86/x87-schedule.ll940
-rw-r--r--test/CodeGen/X86/xaluo.ll294
-rw-r--r--test/CodeGen/X86/xchg-nofold.ll4
-rw-r--r--test/CodeGen/X86/xmulo.ll144
-rw-r--r--test/CodeGen/X86/xop-ifma.ll22
-rw-r--r--test/CodeGen/X86/xop-intrinsics-fast-isel.ll256
-rw-r--r--test/CodeGen/X86/xop-intrinsics-x86_64-upgrade.ll154
-rw-r--r--test/CodeGen/X86/xop-intrinsics-x86_64.ll166
-rw-r--r--test/CodeGen/X86/xop-mask-comments.ll48
-rw-r--r--test/CodeGen/X86/xop-pcmov.ll24
-rw-r--r--test/CodeGen/X86/xor-icmp.ll16
-rw-r--r--test/CodeGen/X86/xor-select-i1-combine.ll4
-rw-r--r--test/CodeGen/X86/zext-shl.ll4
-rw-r--r--test/CodeGen/X86/zext-trunc.ll2
-rw-r--r--test/DebugInfo/COFF/asan-module-ctor.ll2
-rw-r--r--test/DebugInfo/COFF/inlining-header.ll2
-rw-r--r--test/DebugInfo/COFF/local-variable-gap.ll2
-rw-r--r--test/DebugInfo/COFF/local-variables.ll4
-rw-r--r--test/DebugInfo/COFF/multifile.ll4
-rw-r--r--test/DebugInfo/COFF/multifunction.ll12
-rw-r--r--test/DebugInfo/COFF/pieces.ll2
-rw-r--r--test/DebugInfo/COFF/register-variables.ll4
-rw-r--r--test/DebugInfo/COFF/simple.ll4
-rw-r--r--test/DebugInfo/MIR/X86/live-debug-values-3preds.mir4
-rw-r--r--test/DebugInfo/MIR/X86/live-debug-values.mir4
-rw-r--r--test/DebugInfo/MIR/X86/live-debug-vars-unused-arg-debugonly.mir8
-rw-r--r--test/DebugInfo/SystemZ/variable-loc.s2
-rw-r--r--test/DebugInfo/X86/dbg-value-transfer-order.ll2
-rw-r--r--test/DebugInfo/X86/live-debug-values.ll4
-rw-r--r--test/ExecutionEngine/RuntimeDyld/PowerPC/ppc32_elf_rel_addr16.s2
-rw-r--r--test/ExecutionEngine/RuntimeDyld/X86/COFF_x86_64.s2
-rw-r--r--test/ExecutionEngine/RuntimeDyld/X86/ELF_x64-64_PIC_relocations.s2
-rw-r--r--test/ExecutionEngine/RuntimeDyld/X86/ELF_x86_64_StubBuf.s2
-rw-r--r--test/Instrumentation/AddressSanitizer/X86/asm_mov.s4
-rw-r--r--test/Instrumentation/AddressSanitizer/X86/asm_mov_no_instrumentation.s2
-rw-r--r--test/Instrumentation/AddressSanitizer/X86/asm_swap_intel.s2
-rw-r--r--test/MC/AArch64/arm64-ilp32.s2
-rw-r--r--test/MC/AArch64/arm64-leaf-compact-unwind.s16
-rw-r--r--test/MC/AArch64/basic-pic.s10
-rw-r--r--test/MC/AArch64/elf-extern.s2
-rw-r--r--test/MC/AArch64/inline-asm-modifiers.s12
-rw-r--r--test/MC/AArch64/jump-table.s4
-rw-r--r--test/MC/ARM/2010-11-30-reloc-movt.s2
-rw-r--r--test/MC/ARM/elf-eflags-eabi.s2
-rw-r--r--test/MC/ARM/elf-movt.s2
-rw-r--r--test/MC/AsmParser/seh-directive-errors.s2
-rw-r--r--test/MC/COFF/basic-coff-64.s2
-rw-r--r--test/MC/COFF/basic-coff.s2
-rw-r--r--test/MC/COFF/cv-def-range.s2
-rw-r--r--test/MC/COFF/cv-empty-linetable.s2
-rw-r--r--test/MC/COFF/cv-inline-linetable-unreachable.s2
-rw-r--r--test/MC/COFF/cv-inline-linetable.s2
-rw-r--r--test/MC/COFF/diff.s2
-rw-r--r--test/MC/COFF/seh-linkonce.s2
-rw-r--r--test/MC/COFF/seh-section-2.s4
-rw-r--r--test/MC/COFF/simple-fixups.s6
-rw-r--r--test/MC/COFF/symbol-alias.s2
-rw-r--r--test/MC/COFF/symbol-fragment-offset-64.s2
-rw-r--r--test/MC/COFF/symbol-fragment-offset.s2
-rw-r--r--test/MC/COFF/weak.s4
-rw-r--r--test/MC/ELF/ARM/clang-section.s12
-rw-r--r--test/MC/ELF/basic-elf-32.s2
-rw-r--r--test/MC/ELF/basic-elf-64.s2
-rw-r--r--test/MC/ELF/call-abs.s2
-rw-r--r--test/MC/ELF/fde.s2
-rw-r--r--test/MC/MachO/debug_frame.s2
-rw-r--r--test/MC/Mips/do_switch1.s2
-rw-r--r--test/MC/Mips/do_switch2.s2
-rw-r--r--test/MC/Mips/do_switch3.s2
-rw-r--r--test/MC/Mips/elf-N64.s2
-rw-r--r--test/MC/Mips/elf-gprel-32-64.s2
-rw-r--r--test/MC/Mips/elf-relsym.s2
-rw-r--r--test/MC/Mips/elf-tls.s6
-rw-r--r--test/MC/Mips/mips_gprel16.s4
-rw-r--r--test/MC/Mips/r-mips-got-disp.s2
-rw-r--r--test/MC/Mips/xgot.s2
-rw-r--r--test/MC/PowerPC/tls-gd-obj.s2
-rw-r--r--test/MC/PowerPC/tls-ie-obj.s2
-rw-r--r--test/MC/PowerPC/tls-ld-obj.s2
-rw-r--r--test/MC/X86/compact-unwind.s4
-rw-r--r--test/tools/llvm-cfi-verify/X86/Inputs/unprotected-fullinfo.s8
-rw-r--r--test/tools/llvm-cfi-verify/X86/Inputs/unprotected-lineinfo.s8
-rw-r--r--test/tools/llvm-cfi-verify/X86/Inputs/unprotected-nolineinfo.s8
-rw-r--r--test/tools/llvm-dwarfdump/X86/brief.s2
-rw-r--r--test/tools/llvm-dwarfdump/X86/debugloc.s4
-rw-r--r--test/tools/llvm-dwarfdump/X86/lookup.s2
-rw-r--r--test/tools/llvm-dwarfdump/X86/verify_debug_info.s2
-rw-r--r--test/tools/llvm-dwarfdump/X86/verify_die_ranges.s2
1435 files changed, 50355 insertions, 50304 deletions
diff --git a/docs/MIRLangRef.rst b/docs/MIRLangRef.rst
index b4ca8f2347a..fff0d3ef0eb 100644
--- a/docs/MIRLangRef.rst
+++ b/docs/MIRLangRef.rst
@@ -246,13 +246,25 @@ blocks are referenced using the following syntax:
.. code-block:: text
- %bb.<id>[.<name>]
+ %bb.<id>
-Examples:
+Example:
.. code-block:: llvm
%bb.0
+
+The following syntax is also supported, but the former syntax is preferred for
+block references:
+
+.. code-block:: text
+
+ %bb.<id>[.<name>]
+
+Example:
+
+.. code-block:: llvm
+
%bb.1.then
Successors
diff --git a/docs/NVPTXUsage.rst b/docs/NVPTXUsage.rst
index 159fe078653..38222afbc63 100644
--- a/docs/NVPTXUsage.rst
+++ b/docs/NVPTXUsage.rst
@@ -499,7 +499,7 @@ The output we get from ``llc`` (as of LLVM 3.4):
.reg .s32 %r<2>;
.reg .s64 %rl<8>;
- // BB#0: // %entry
+ // %bb.0: // %entry
ld.param.u64 %rl1, [kernel_param_0];
mov.u32 %r1, %tid.x;
mul.wide.s32 %rl2, %r1, 4;
@@ -897,7 +897,7 @@ This gives us the following PTX (excerpt):
.reg .s32 %r<21>;
.reg .s64 %rl<8>;
- // BB#0: // %entry
+ // %bb.0: // %entry
ld.param.u64 %rl2, [kernel_param_0];
mov.u32 %r3, %tid.x;
ld.param.u64 %rl3, [kernel_param_1];
@@ -921,7 +921,7 @@ This gives us the following PTX (excerpt):
abs.f32 %f4, %f1;
setp.gtu.f32 %p4, %f4, 0f7F800000;
@%p4 bra BB0_4;
- // BB#3: // %__nv_isnanf.exit5.i
+ // %bb.3: // %__nv_isnanf.exit5.i
abs.f32 %f5, %f2;
setp.le.f32 %p5, %f5, 0f7F800000;
@%p5 bra BB0_5;
@@ -953,7 +953,7 @@ This gives us the following PTX (excerpt):
selp.f32 %f110, 0f7F800000, %f99, %p16;
setp.eq.f32 %p17, %f110, 0f7F800000;
@%p17 bra BB0_28;
- // BB#27:
+ // %bb.27:
fma.rn.f32 %f110, %f110, %f108, %f110;
BB0_28: // %__internal_accurate_powf.exit.i
setp.lt.f32 %p18, %f1, 0f00000000;
diff --git a/include/llvm/CodeGen/MachineBasicBlock.h b/include/llvm/CodeGen/MachineBasicBlock.h
index 0f5b04d9045..0730eb763e1 100644
--- a/include/llvm/CodeGen/MachineBasicBlock.h
+++ b/include/llvm/CodeGen/MachineBasicBlock.h
@@ -25,6 +25,7 @@
#include "llvm/MC/LaneBitmask.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/Support/BranchProbability.h"
+#include "llvm/Support/Printable.h"
#include <cassert>
#include <cstdint>
#include <functional>
@@ -771,6 +772,14 @@ private:
raw_ostream& operator<<(raw_ostream &OS, const MachineBasicBlock &MBB);
+/// Prints a machine basic block reference.
+///
+/// The format is:
+/// %bb.5 - a machine basic block with MBB.getNumber() == 5.
+///
+/// Usage: OS << printMBBReference(MBB) << '\n';
+Printable printMBBReference(const MachineBasicBlock &MBB);
+
// This is useful when building IndexedMaps keyed on basic block pointers.
struct MBB2NumberFunctor {
using argument_type = const MachineBasicBlock *;
diff --git a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index 2d82f81ed70..1bc8b4eee0f 100644
--- a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -2710,7 +2710,8 @@ void AsmPrinter::EmitBasicBlockStart(const MachineBasicBlock &MBB) const {
(isBlockOnlyReachableByFallthrough(&MBB) && !MBB.isEHFuncletEntry())) {
if (isVerbose()) {
// NOTE: Want this comment at start of line, don't emit with AddComment.
- OutStreamer->emitRawComment(" BB#" + Twine(MBB.getNumber()) + ":", false);
+ OutStreamer->emitRawComment(" %bb." + Twine(MBB.getNumber()) + ":",
+ false);
}
} else {
OutStreamer->EmitLabel(MBB.getSymbol());
diff --git a/lib/CodeGen/BranchFolding.cpp b/lib/CodeGen/BranchFolding.cpp
index d31260e767f..92e73cb502c 100644
--- a/lib/CodeGen/BranchFolding.cpp
+++ b/lib/CodeGen/BranchFolding.cpp
@@ -613,8 +613,8 @@ ProfitableToMerge(MachineBasicBlock *MBB1, MachineBasicBlock *MBB2,
CommonTailLen = ComputeCommonTailLength(MBB1, MBB2, I1, I2);
if (CommonTailLen == 0)
return false;
- DEBUG(dbgs() << "Common tail length of BB#" << MBB1->getNumber()
- << " and BB#" << MBB2->getNumber() << " is " << CommonTailLen
+ DEBUG(dbgs() << "Common tail length of " << printMBBReference(*MBB1)
+ << " and " << printMBBReference(*MBB2) << " is " << CommonTailLen
<< '\n');
// It's almost always profitable to merge any number of non-terminator
@@ -770,7 +770,7 @@ bool BranchFolder::CreateCommonTailOnlyBlock(MachineBasicBlock *&PredBB,
SameTails[commonTailIndex].getTailStartPos();
MachineBasicBlock *MBB = SameTails[commonTailIndex].getBlock();
- DEBUG(dbgs() << "\nSplitting BB#" << MBB->getNumber() << ", size "
+ DEBUG(dbgs() << "\nSplitting " << printMBBReference(*MBB) << ", size "
<< maxCommonTailLength);
// If the split block unconditionally falls-thru to SuccBB, it will be
@@ -920,20 +920,17 @@ bool BranchFolder::TryTailMergeBlocks(MachineBasicBlock *SuccBB,
bool MadeChange = false;
DEBUG(dbgs() << "\nTryTailMergeBlocks: ";
- for (unsigned i = 0, e = MergePotentials.size(); i != e; ++i)
- dbgs() << "BB#" << MergePotentials[i].getBlock()->getNumber()
- << (i == e-1 ? "" : ", ");
- dbgs() << "\n";
- if (SuccBB) {
- dbgs() << " with successor BB#" << SuccBB->getNumber() << '\n';
+ for (unsigned i = 0, e = MergePotentials.size(); i != e; ++i) dbgs()
+ << printMBBReference(*MergePotentials[i].getBlock())
+ << (i == e - 1 ? "" : ", ");
+ dbgs() << "\n"; if (SuccBB) {
+ dbgs() << " with successor " << printMBBReference(*SuccBB) << '\n';
if (PredBB)
- dbgs() << " which has fall-through from BB#"
- << PredBB->getNumber() << "\n";
- }
- dbgs() << "Looking for common tails of at least "
- << MinCommonTailLength << " instruction"
- << (MinCommonTailLength == 1 ? "" : "s") << '\n';
- );
+ dbgs() << " which has fall-through from "
+ << printMBBReference(*PredBB) << "\n";
+ } dbgs() << "Looking for common tails of at least "
+ << MinCommonTailLength << " instruction"
+ << (MinCommonTailLength == 1 ? "" : "s") << '\n';);
// Sort by hash value so that blocks with identical end sequences sort
// together.
@@ -1013,13 +1010,13 @@ bool BranchFolder::TryTailMergeBlocks(MachineBasicBlock *SuccBB,
// MBB is common tail. Adjust all other BB's to jump to this one.
// Traversal must be forwards so erases work.
- DEBUG(dbgs() << "\nUsing common tail in BB#" << MBB->getNumber()
+ DEBUG(dbgs() << "\nUsing common tail in " << printMBBReference(*MBB)
<< " for ");
for (unsigned int i=0, e = SameTails.size(); i != e; ++i) {
if (commonTailIndex == i)
continue;
- DEBUG(dbgs() << "BB#" << SameTails[i].getBlock()->getNumber()
- << (i == e-1 ? "" : ", "));
+ DEBUG(dbgs() << printMBBReference(*SameTails[i].getBlock())
+ << (i == e - 1 ? "" : ", "));
// Hack the end off BB i, making it jump to BB commonTailIndex instead.
replaceTailWithBranchTo(SameTails[i].getTailStartPos(), *MBB);
// BB i is no longer a predecessor of SuccBB; remove it from the worklist.
diff --git a/lib/CodeGen/BranchRelaxation.cpp b/lib/CodeGen/BranchRelaxation.cpp
index 99fa4dc6791..0d87f142c7c 100644
--- a/lib/CodeGen/BranchRelaxation.cpp
+++ b/lib/CodeGen/BranchRelaxation.cpp
@@ -143,7 +143,7 @@ void BranchRelaxation::verify() {
LLVM_DUMP_METHOD void BranchRelaxation::dumpBBs() {
for (auto &MBB : *MF) {
const BasicBlockInfo &BBI = BlockInfo[MBB.getNumber()];
- dbgs() << format("BB#%u\toffset=%08x\t", MBB.getNumber(), BBI.Offset)
+ dbgs() << format("%bb.%u\toffset=%08x\t", MBB.getNumber(), BBI.Offset)
<< format("size=%#x\n", BBI.Size);
}
}
@@ -287,13 +287,10 @@ bool BranchRelaxation::isBlockInRange(
if (TII->isBranchOffsetInRange(MI.getOpcode(), DestOffset - BrOffset))
return true;
- DEBUG(
- dbgs() << "Out of range branch to destination BB#" << DestBB.getNumber()
- << " from BB#" << MI.getParent()->getNumber()
- << " to " << DestOffset
- << " offset " << DestOffset - BrOffset
- << '\t' << MI
- );
+ DEBUG(dbgs() << "Out of range branch to destination "
+ << printMBBReference(DestBB) << " from "
+ << printMBBReference(*MI.getParent()) << " to " << DestOffset
+ << " offset " << DestOffset - BrOffset << '\t' << MI);
return false;
}
@@ -366,9 +363,9 @@ bool BranchRelaxation::fixupConditionalBranch(MachineInstr &MI) {
// just created), so we can invert the condition.
MachineBasicBlock &NextBB = *std::next(MachineFunction::iterator(MBB));
- DEBUG(dbgs() << " Insert B to BB#" << TBB->getNumber()
- << ", invert condition and change dest. to BB#"
- << NextBB.getNumber() << '\n');
+ DEBUG(dbgs() << " Insert B to " << printMBBReference(*TBB)
+ << ", invert condition and change dest. to "
+ << printMBBReference(NextBB) << '\n');
unsigned &MBBSize = BlockInfo[MBB->getNumber()].Size;
diff --git a/lib/CodeGen/EarlyIfConversion.cpp b/lib/CodeGen/EarlyIfConversion.cpp
index bb181b7e165..461da8f138f 100644
--- a/lib/CodeGen/EarlyIfConversion.cpp
+++ b/lib/CodeGen/EarlyIfConversion.cpp
@@ -185,7 +185,7 @@ bool SSAIfConv::canSpeculateInstrs(MachineBasicBlock *MBB) {
// Reject any live-in physregs. It's probably CPSR/EFLAGS, and very hard to
// get right.
if (!MBB->livein_empty()) {
- DEBUG(dbgs() << "BB#" << MBB->getNumber() << " has live-ins.\n");
+ DEBUG(dbgs() << printMBBReference(*MBB) << " has live-ins.\n");
return false;
}
@@ -199,7 +199,7 @@ bool SSAIfConv::canSpeculateInstrs(MachineBasicBlock *MBB) {
continue;
if (++InstrCount > BlockInstrLimit && !Stress) {
- DEBUG(dbgs() << "BB#" << MBB->getNumber() << " has more than "
+ DEBUG(dbgs() << printMBBReference(*MBB) << " has more than "
<< BlockInstrLimit << " instructions.\n");
return false;
}
@@ -246,7 +246,7 @@ bool SSAIfConv::canSpeculateInstrs(MachineBasicBlock *MBB) {
if (!DefMI || DefMI->getParent() != Head)
continue;
if (InsertAfter.insert(DefMI).second)
- DEBUG(dbgs() << "BB#" << MBB->getNumber() << " depends on " << *DefMI);
+ DEBUG(dbgs() << printMBBReference(*MBB) << " depends on " << *DefMI);
if (DefMI->isTerminator()) {
DEBUG(dbgs() << "Can't insert instructions below terminator.\n");
return false;
@@ -361,10 +361,10 @@ bool SSAIfConv::canConvertIf(MachineBasicBlock *MBB) {
if (Succ1->pred_size() != 1 || Succ1->succ_size() != 1 ||
Succ1->succ_begin()[0] != Tail)
return false;
- DEBUG(dbgs() << "\nDiamond: BB#" << Head->getNumber()
- << " -> BB#" << Succ0->getNumber()
- << "/BB#" << Succ1->getNumber()
- << " -> BB#" << Tail->getNumber() << '\n');
+ DEBUG(dbgs() << "\nDiamond: " << printMBBReference(*Head) << " -> "
+ << printMBBReference(*Succ0) << "/"
+ << printMBBReference(*Succ1) << " -> "
+ << printMBBReference(*Tail) << '\n');
// Live-in physregs are tricky to get right when speculating code.
if (!Tail->livein_empty()) {
@@ -372,9 +372,9 @@ bool SSAIfConv::canConvertIf(MachineBasicBlock *MBB) {
return false;
}
} else {
- DEBUG(dbgs() << "\nTriangle: BB#" << Head->getNumber()
- << " -> BB#" << Succ0->getNumber()
- << " -> BB#" << Tail->getNumber() << '\n');
+ DEBUG(dbgs() << "\nTriangle: " << printMBBReference(*Head) << " -> "
+ << printMBBReference(*Succ0) << " -> "
+ << printMBBReference(*Tail) << '\n');
}
// This is a triangle or a diamond.
@@ -563,8 +563,8 @@ void SSAIfConv::convertIf(SmallVectorImpl<MachineBasicBlock*> &RemovedBlocks) {
assert(Head->succ_empty() && "Additional head successors?");
if (!ExtraPreds && Head->isLayoutSuccessor(Tail)) {
// Splice Tail onto the end of Head.
- DEBUG(dbgs() << "Joining tail BB#" << Tail->getNumber()
- << " into head BB#" << Head->getNumber() << '\n');
+ DEBUG(dbgs() << "Joining tail " << printMBBReference(*Tail) << " into head "
+ << printMBBReference(*Head) << '\n');
Head->splice(Head->end(), Tail,
Tail->begin(), Tail->end());
Head->transferSuccessorsAndUpdatePHIs(Tail);
diff --git a/lib/CodeGen/EdgeBundles.cpp b/lib/CodeGen/EdgeBundles.cpp
index b3a25544be3..54c53eb1631 100644
--- a/lib/CodeGen/EdgeBundles.cpp
+++ b/lib/CodeGen/EdgeBundles.cpp
@@ -80,13 +80,15 @@ raw_ostream &WriteGraph<>(raw_ostream &O, const EdgeBundles &G,
O << "digraph {\n";
for (const auto &MBB : *MF) {
unsigned BB = MBB.getNumber();
- O << "\t\"BB#" << BB << "\" [ shape=box ]\n"
- << '\t' << G.getBundle(BB, false) << " -> \"BB#" << BB << "\"\n"
- << "\t\"BB#" << BB << "\" -> " << G.getBundle(BB, true) << '\n';
+ O << "\t\"" << printMBBReference(MBB) << "\" [ shape=box ]\n"
+ << '\t' << G.getBundle(BB, false) << " -> \"" << printMBBReference(MBB)
+ << "\"\n"
+ << "\t\"" << printMBBReference(MBB) << "\" -> " << G.getBundle(BB, true)
+ << '\n';
for (MachineBasicBlock::const_succ_iterator SI = MBB.succ_begin(),
SE = MBB.succ_end(); SI != SE; ++SI)
- O << "\t\"BB#" << BB << "\" -> \"BB#" << (*SI)->getNumber()
- << "\" [ color=lightgray ]\n";
+ O << "\t\"" << printMBBReference(MBB) << "\" -> \""
+ << printMBBReference(**SI) << "\" [ color=lightgray ]\n";
}
O << "}\n";
return O;
diff --git a/lib/CodeGen/ExecutionDepsFix.cpp b/lib/CodeGen/ExecutionDepsFix.cpp
index 73c4b6a145d..df51ecc0001 100644
--- a/lib/CodeGen/ExecutionDepsFix.cpp
+++ b/lib/CodeGen/ExecutionDepsFix.cpp
@@ -200,7 +200,7 @@ void ExecutionDepsFix::enterBasicBlock(MachineBasicBlock *MBB) {
LiveRegs[rx].Def = -1;
}
}
- DEBUG(dbgs() << "BB#" << MBB->getNumber() << ": entry\n");
+ DEBUG(dbgs() << printMBBReference(*MBB) << ": entry\n");
return;
}
@@ -246,7 +246,7 @@ void ExecutionDepsFix::enterBasicBlock(MachineBasicBlock *MBB) {
}
}
DEBUG(
- dbgs() << "BB#" << MBB->getNumber()
+ dbgs() << printMBBReference(*MBB)
<< (!isBlockDone(MBB) ? ": incomplete\n" : ": all preds known\n"));
}
diff --git a/lib/CodeGen/IfConversion.cpp b/lib/CodeGen/IfConversion.cpp
index 567461c1945..1bac5685ec5 100644
--- a/lib/CodeGen/IfConversion.cpp
+++ b/lib/CodeGen/IfConversion.cpp
@@ -406,12 +406,12 @@ bool IfConverter::runOnMachineFunction(MachineFunction &MF) {
case ICSimpleFalse: {
bool isFalse = Kind == ICSimpleFalse;
if ((isFalse && DisableSimpleF) || (!isFalse && DisableSimple)) break;
- DEBUG(dbgs() << "Ifcvt (Simple" << (Kind == ICSimpleFalse ?
- " false" : "")
- << "): BB#" << BBI.BB->getNumber() << " ("
- << ((Kind == ICSimpleFalse)
- ? BBI.FalseBB->getNumber()
- : BBI.TrueBB->getNumber()) << ") ");
+ DEBUG(dbgs() << "Ifcvt (Simple"
+ << (Kind == ICSimpleFalse ? " false" : "")
+ << "): " << printMBBReference(*BBI.BB) << " ("
+ << ((Kind == ICSimpleFalse) ? BBI.FalseBB->getNumber()
+ : BBI.TrueBB->getNumber())
+ << ") ");
RetVal = IfConvertSimple(BBI, Kind);
DEBUG(dbgs() << (RetVal ? "succeeded!" : "failed!") << "\n");
if (RetVal) {
@@ -435,9 +435,9 @@ bool IfConverter::runOnMachineFunction(MachineFunction &MF) {
DEBUG(dbgs() << " false");
if (isRev)
DEBUG(dbgs() << " rev");
- DEBUG(dbgs() << "): BB#" << BBI.BB->getNumber() << " (T:"
- << BBI.TrueBB->getNumber() << ",F:"
- << BBI.FalseBB->getNumber() << ") ");
+ DEBUG(dbgs() << "): " << printMBBReference(*BBI.BB)
+ << " (T:" << BBI.TrueBB->getNumber()
+ << ",F:" << BBI.FalseBB->getNumber() << ") ");
RetVal = IfConvertTriangle(BBI, Kind);
DEBUG(dbgs() << (RetVal ? "succeeded!" : "failed!") << "\n");
if (RetVal) {
@@ -453,9 +453,9 @@ bool IfConverter::runOnMachineFunction(MachineFunction &MF) {
}
case ICDiamond:
if (DisableDiamond) break;
- DEBUG(dbgs() << "Ifcvt (Diamond): BB#" << BBI.BB->getNumber() << " (T:"
- << BBI.TrueBB->getNumber() << ",F:"
- << BBI.FalseBB->getNumber() << ") ");
+ DEBUG(dbgs() << "Ifcvt (Diamond): " << printMBBReference(*BBI.BB)
+ << " (T:" << BBI.TrueBB->getNumber()
+ << ",F:" << BBI.FalseBB->getNumber() << ") ");
RetVal = IfConvertDiamond(BBI, Kind, NumDups, NumDups2,
Token->TClobbersPred,
Token->FClobbersPred);
@@ -464,10 +464,9 @@ bool IfConverter::runOnMachineFunction(MachineFunction &MF) {
break;
case ICForkedDiamond:
if (DisableForkedDiamond) break;
- DEBUG(dbgs() << "Ifcvt (Forked Diamond): BB#"
- << BBI.BB->getNumber() << " (T:"
- << BBI.TrueBB->getNumber() << ",F:"
- << BBI.FalseBB->getNumber() << ") ");
+ DEBUG(dbgs() << "Ifcvt (Forked Diamond): " << printMBBReference(*BBI.BB)
+ << " (T:" << BBI.TrueBB->getNumber()
+ << ",F:" << BBI.FalseBB->getNumber() << ") ");
RetVal = IfConvertForkedDiamond(BBI, Kind, NumDups, NumDups2,
Token->TClobbersPred,
Token->FClobbersPred);
diff --git a/lib/CodeGen/LiveDebugVariables.cpp b/lib/CodeGen/LiveDebugVariables.cpp
index 97bb7c712f6..5f0559b7d99 100644
--- a/lib/CodeGen/LiveDebugVariables.cpp
+++ b/lib/CodeGen/LiveDebugVariables.cpp
@@ -1174,7 +1174,7 @@ void UserValue::emitDebugValues(VirtRegMap *VRM, LiveIntervals &LIS,
MachineFunction::iterator MBB = LIS.getMBBFromIndex(Start)->getIterator();
SlotIndex MBBEnd = LIS.getMBBEndIdx(&*MBB);
- DEBUG(dbgs() << " BB#" << MBB->getNumber() << '-' << MBBEnd);
+ DEBUG(dbgs() << ' ' << printMBBReference(*MBB) << '-' << MBBEnd);
insertDebugValue(&*MBB, Start, Stop, Loc, Spilled, LIS, TII, TRI);
// This interval may span multiple basic blocks.
// Insert a DBG_VALUE into each one.
@@ -1184,7 +1184,7 @@ void UserValue::emitDebugValues(VirtRegMap *VRM, LiveIntervals &LIS,
if (++MBB == MFEnd)
break;
MBBEnd = LIS.getMBBEndIdx(&*MBB);
- DEBUG(dbgs() << " BB#" << MBB->getNumber() << '-' << MBBEnd);
+ DEBUG(dbgs() << ' ' << printMBBReference(*MBB) << '-' << MBBEnd);
insertDebugValue(&*MBB, Start, Stop, Loc, Spilled, LIS, TII, TRI);
}
DEBUG(dbgs() << '\n');
diff --git a/lib/CodeGen/LiveIntervalAnalysis.cpp b/lib/CodeGen/LiveIntervalAnalysis.cpp
index fb7fbe7f1c2..06807542b34 100644
--- a/lib/CodeGen/LiveIntervalAnalysis.cpp
+++ b/lib/CodeGen/LiveIntervalAnalysis.cpp
@@ -323,7 +323,7 @@ void LiveIntervals::computeLiveInRegUnits() {
// Create phi-defs at Begin for all live-in registers.
SlotIndex Begin = Indexes->getMBBStartIdx(&MBB);
- DEBUG(dbgs() << Begin << "\tBB#" << MBB.getNumber());
+ DEBUG(dbgs() << Begin << "\t" << printMBBReference(MBB));
for (const auto &LI : MBB.liveins()) {
for (MCRegUnitIterator Units(LI.PhysReg, TRI); Units.isValid(); ++Units) {
unsigned Unit = *Units;
diff --git a/lib/CodeGen/LiveRangeCalc.cpp b/lib/CodeGen/LiveRangeCalc.cpp
index 0074a9fd907..352d75ec3ae 100644
--- a/lib/CodeGen/LiveRangeCalc.cpp
+++ b/lib/CodeGen/LiveRangeCalc.cpp
@@ -377,7 +377,7 @@ bool LiveRangeCalc::findReachingDefs(LiveRange &LR, MachineBasicBlock &UseMBB,
MBB->getParent()->verify();
const TargetRegisterInfo *TRI = MRI->getTargetRegisterInfo();
errs() << "The register " << printReg(PhysReg, TRI)
- << " needs to be live in to BB#" << MBB->getNumber()
+ << " needs to be live in to " << printMBBReference(*MBB)
<< ", but is missing from the live-in list.\n";
report_fatal_error("Invalid global physical register");
}
diff --git a/lib/CodeGen/MIRParser/MILexer.cpp b/lib/CodeGen/MIRParser/MILexer.cpp
index d23df9c137b..ac696923794 100644
--- a/lib/CodeGen/MIRParser/MILexer.cpp
+++ b/lib/CodeGen/MIRParser/MILexer.cpp
@@ -277,6 +277,9 @@ static Cursor maybeLexMachineBasicBlock(Cursor C, MIToken &Token,
C.advance();
StringRef Number = NumberRange.upto(C);
unsigned StringOffset = PrefixLength + Number.size(); // Drop '%bb.<id>'
+ // TODO: The format bb.<id>.<irname> is supported only when it's not a
+ // reference. Once we deprecate the format where the irname shows up, we
+ // should only lex forward if it is a reference.
if (C.peek() == '.') {
C.advance(); // Skip '.'
++StringOffset;
diff --git a/lib/CodeGen/MIRParser/MIParser.cpp b/lib/CodeGen/MIRParser/MIParser.cpp
index 9b5539a7ca7..de951e42c8f 100644
--- a/lib/CodeGen/MIRParser/MIParser.cpp
+++ b/lib/CodeGen/MIRParser/MIParser.cpp
@@ -1344,6 +1344,8 @@ bool MIParser::parseMBBReference(MachineBasicBlock *&MBB) {
return error(Twine("use of undefined machine basic block #") +
Twine(Number));
MBB = MBBInfo->second;
+ // TODO: Only parse the name if it's a MachineBasicBlockLabel. Deprecate once
+ // we drop the <irname> from the bb.<id>.<irname> format.
if (!Token.stringValue().empty() && Token.stringValue() != MBB->getName())
return error(Twine("the name of machine basic block #") + Twine(Number) +
" isn't '" + Token.stringValue() + "'");
diff --git a/lib/CodeGen/MIRPrinter.cpp b/lib/CodeGen/MIRPrinter.cpp
index 92dc0aea421..aa0f38036b1 100644
--- a/lib/CodeGen/MIRPrinter.cpp
+++ b/lib/CodeGen/MIRPrinter.cpp
@@ -157,7 +157,6 @@ public:
void print(const MachineBasicBlock &MBB);
void print(const MachineInstr &MI);
- void printMBBReference(const MachineBasicBlock &MBB);
void printIRBlockReference(const BasicBlock &BB);
void printIRValueReference(const Value &V);
void printStackObjectReference(int FrameIndex);
@@ -338,13 +337,11 @@ void MIRPrinter::convert(ModuleSlotTracker &MST,
YamlMFI.HasMustTailInVarArgFunc = MFI.hasMustTailInVarArgFunc();
if (MFI.getSavePoint()) {
raw_string_ostream StrOS(YamlMFI.SavePoint.Value);
- MIPrinter(StrOS, MST, RegisterMaskIds, StackObjectOperandMapping)
- .printMBBReference(*MFI.getSavePoint());
+ StrOS << printMBBReference(*MFI.getSavePoint());
}
if (MFI.getRestorePoint()) {
raw_string_ostream StrOS(YamlMFI.RestorePoint.Value);
- MIPrinter(StrOS, MST, RegisterMaskIds, StackObjectOperandMapping)
- .printMBBReference(*MFI.getRestorePoint());
+ StrOS << printMBBReference(*MFI.getRestorePoint());
}
}
@@ -493,8 +490,7 @@ void MIRPrinter::convert(ModuleSlotTracker &MST,
Entry.ID = ID++;
for (const auto *MBB : Table.MBBs) {
raw_string_ostream StrOS(Str);
- MIPrinter(StrOS, MST, RegisterMaskIds, StackObjectOperandMapping)
- .printMBBReference(*MBB);
+ StrOS << printMBBReference(*MBB);
Entry.Blocks.push_back(StrOS.str());
Str.clear();
}
@@ -616,7 +612,7 @@ void MIPrinter::print(const MachineBasicBlock &MBB) {
for (auto I = MBB.succ_begin(), E = MBB.succ_end(); I != E; ++I) {
if (I != MBB.succ_begin())
OS << ", ";
- printMBBReference(**I);
+ OS << printMBBReference(**I);
if (!SimplifyMIR || !canPredictProbs)
OS << '('
<< format("0x%08" PRIx32, MBB.getSuccProbability(I).getNumerator())
@@ -764,14 +760,6 @@ void MIPrinter::print(const MachineInstr &MI) {
}
}
-void MIPrinter::printMBBReference(const MachineBasicBlock &MBB) {
- OS << "%bb." << MBB.getNumber();
- if (const auto *BB = MBB.getBasicBlock()) {
- if (BB->hasName())
- OS << '.' << BB->getName();
- }
-}
-
static void printIRSlotNumber(raw_ostream &OS, int Slot) {
if (Slot == -1)
OS << "<badref>";
@@ -967,7 +955,7 @@ void MIPrinter::print(const MachineInstr &MI, unsigned OpIdx,
Op.getFPImm()->printAsOperand(OS, /*PrintType=*/true, MST);
break;
case MachineOperand::MO_MachineBasicBlock:
- printMBBReference(*Op.getMBB());
+ OS << printMBBReference(*Op.getMBB());
break;
case MachineOperand::MO_FrameIndex:
printStackObjectReference(Op.getIndex());
diff --git a/lib/CodeGen/MachineBasicBlock.cpp b/lib/CodeGen/MachineBasicBlock.cpp
index 8863ac23607..5522d6694e0 100644
--- a/lib/CodeGen/MachineBasicBlock.cpp
+++ b/lib/CodeGen/MachineBasicBlock.cpp
@@ -70,6 +70,10 @@ raw_ostream &llvm::operator<<(raw_ostream &OS, const MachineBasicBlock &MBB) {
return OS;
}
+Printable llvm::printMBBReference(const MachineBasicBlock &MBB) {
+ return Printable([&MBB](raw_ostream &OS) { return MBB.printAsOperand(OS); });
+}
+
/// When an MBB is added to an MF, we need to update the parent pointer of the
/// MBB, the MBB numbering, and any instructions in the MBB to be on the right
/// operand list for registers.
@@ -281,7 +285,7 @@ void MachineBasicBlock::print(raw_ostream &OS, ModuleSlotTracker &MST,
if (Indexes)
OS << Indexes->getMBBStartIdx(this) << '\t';
- OS << "BB#" << getNumber() << ": ";
+ OS << printMBBReference(*this) << ": ";
const char *Comma = "";
if (const BasicBlock *LBB = getBasicBlock()) {
@@ -313,7 +317,7 @@ void MachineBasicBlock::print(raw_ostream &OS, ModuleSlotTracker &MST,
if (Indexes) OS << '\t';
OS << " Predecessors according to CFG:";
for (const_pred_iterator PI = pred_begin(), E = pred_end(); PI != E; ++PI)
- OS << " BB#" << (*PI)->getNumber();
+ OS << " " << printMBBReference(*(*PI));
OS << '\n';
}
@@ -334,7 +338,7 @@ void MachineBasicBlock::print(raw_ostream &OS, ModuleSlotTracker &MST,
if (Indexes) OS << '\t';
OS << " Successors according to CFG:";
for (const_succ_iterator SI = succ_begin(), E = succ_end(); SI != E; ++SI) {
- OS << " BB#" << (*SI)->getNumber();
+ OS << " " << printMBBReference(*(*SI));
if (!Probs.empty())
OS << '(' << *getProbabilityIterator(SI) << ')';
}
@@ -350,7 +354,7 @@ void MachineBasicBlock::print(raw_ostream &OS, ModuleSlotTracker &MST,
void MachineBasicBlock::printAsOperand(raw_ostream &OS,
bool /*PrintType*/) const {
- OS << "BB#" << getNumber();
+ OS << "%bb." << getNumber();
}
void MachineBasicBlock::removeLiveIn(MCPhysReg Reg, LaneBitmask LaneMask) {
@@ -767,10 +771,9 @@ MachineBasicBlock *MachineBasicBlock::SplitCriticalEdge(MachineBasicBlock *Succ,
MachineBasicBlock *NMBB = MF->CreateMachineBasicBlock();
MF->insert(std::next(MachineFunction::iterator(this)), NMBB);
- DEBUG(dbgs() << "Splitting critical edge:"
- " BB#" << getNumber()
- << " -- BB#" << NMBB->getNumber()
- << " -- BB#" << Succ->getNumber() << '\n');
+ DEBUG(dbgs() << "Splitting critical edge: " << printMBBReference(*this)
+ << " -- " << printMBBReference(*NMBB) << " -- "
+ << printMBBReference(*Succ) << '\n');
LiveIntervals *LIS = P.getAnalysisIfAvailable<LiveIntervals>();
SlotIndexes *Indexes = P.getAnalysisIfAvailable<SlotIndexes>();
@@ -1023,8 +1026,8 @@ bool MachineBasicBlock::canSplitCriticalEdge(
// case that we can't handle. Since this never happens in properly optimized
// code, just skip those edges.
if (TBB && TBB == FBB) {
- DEBUG(dbgs() << "Won't split critical edge after degenerate BB#"
- << getNumber() << '\n');
+ DEBUG(dbgs() << "Won't split critical edge after degenerate "
+ << printMBBReference(*this) << '\n');
return false;
}
return true;
diff --git a/lib/CodeGen/MachineBlockPlacement.cpp b/lib/CodeGen/MachineBlockPlacement.cpp
index f0285ea8f8e..87af9533b32 100644
--- a/lib/CodeGen/MachineBlockPlacement.cpp
+++ b/lib/CodeGen/MachineBlockPlacement.cpp
@@ -546,7 +546,7 @@ INITIALIZE_PASS_END(MachineBlockPlacement, DEBUG_TYPE,
static std::string getBlockName(const MachineBasicBlock *BB) {
std::string Result;
raw_string_ostream OS(Result);
- OS << "BB#" << BB->getNumber();
+ OS << printMBBReference(*BB);
OS << " ('" << BB->getName() << "')";
OS.flush();
return Result;
diff --git a/lib/CodeGen/MachineBranchProbabilityInfo.cpp b/lib/CodeGen/MachineBranchProbabilityInfo.cpp
index 21eff9dfff9..e4952aaaba0 100644
--- a/lib/CodeGen/MachineBranchProbabilityInfo.cpp
+++ b/lib/CodeGen/MachineBranchProbabilityInfo.cpp
@@ -84,7 +84,7 @@ raw_ostream &MachineBranchProbabilityInfo::printEdgeProbability(
const MachineBasicBlock *Dst) const {
const BranchProbability Prob = getEdgeProbability(Src, Dst);
- OS << "edge MBB#" << Src->getNumber() << " -> MBB#" << Dst->getNumber()
+ OS << "edge " << printMBBReference(*Src) << " -> " << printMBBReference(*Dst)
<< " probability is " << Prob
<< (isEdgeHot(Src, Dst) ? " [HOT edge]\n" : "\n");
diff --git a/lib/CodeGen/MachineFunction.cpp b/lib/CodeGen/MachineFunction.cpp
index 1f55b8fa495..f0d5eec4dea 100644
--- a/lib/CodeGen/MachineFunction.cpp
+++ b/lib/CodeGen/MachineFunction.cpp
@@ -546,7 +546,7 @@ namespace llvm {
raw_string_ostream OSS(OutStr);
if (isSimple()) {
- OSS << "BB#" << Node->getNumber();
+ OSS << printMBBReference(*Node);
if (const BasicBlock *BB = Node->getBasicBlock())
OSS << ": " << BB->getName();
} else
@@ -908,7 +908,7 @@ void MachineJumpTableInfo::print(raw_ostream &OS) const {
for (unsigned i = 0, e = JumpTables.size(); i != e; ++i) {
OS << " jt#" << i << ": ";
for (unsigned j = 0, f = JumpTables[i].MBBs.size(); j != f; ++j)
- OS << " BB#" << JumpTables[i].MBBs[j]->getNumber();
+ OS << ' ' << printMBBReference(*JumpTables[i].MBBs[j]);
}
OS << '\n';
diff --git a/lib/CodeGen/MachineLICM.cpp b/lib/CodeGen/MachineLICM.cpp
index 3e622b4a23c..a251a08a516 100644
--- a/lib/CodeGen/MachineLICM.cpp
+++ b/lib/CodeGen/MachineLICM.cpp
@@ -563,8 +563,8 @@ void MachineLICM::HoistPostRA(MachineInstr *MI, unsigned Def) {
// Now move the instructions to the predecessor, inserting it before any
// terminator instructions.
- DEBUG(dbgs() << "Hoisting to BB#" << Preheader->getNumber() << " from BB#"
- << MI->getParent()->getNumber() << ": " << *MI);
+ DEBUG(dbgs() << "Hoisting to " << printMBBReference(*Preheader) << " from "
+ << printMBBReference(*MI->getParent()) << ": " << *MI);
// Splice the instruction to the preheader.
MachineBasicBlock *MBB = MI->getParent();
@@ -601,14 +601,14 @@ bool MachineLICM::IsGuaranteedToExecute(MachineBasicBlock *BB) {
}
void MachineLICM::EnterScope(MachineBasicBlock *MBB) {
- DEBUG(dbgs() << "Entering BB#" << MBB->getNumber() << '\n');
+ DEBUG(dbgs() << "Entering " << printMBBReference(*MBB) << '\n');
// Remember livein register pressure.
BackTrace.push_back(RegPressure);
}
void MachineLICM::ExitScope(MachineBasicBlock *MBB) {
- DEBUG(dbgs() << "Exiting BB#" << MBB->getNumber() << '\n');
+ DEBUG(dbgs() << "Exiting " << printMBBReference(*MBB) << '\n');
BackTrace.pop_back();
}
@@ -1336,9 +1336,9 @@ bool MachineLICM::Hoist(MachineInstr *MI, MachineBasicBlock *Preheader) {
DEBUG({
dbgs() << "Hoisting " << *MI;
if (MI->getParent()->getBasicBlock())
- dbgs() << " from BB#" << MI->getParent()->getNumber();
+ dbgs() << " from " << printMBBReference(*MI->getParent());
if (Preheader->getBasicBlock())
- dbgs() << " to BB#" << Preheader->getNumber();
+ dbgs() << " to " << printMBBReference(*Preheader);
dbgs() << "\n";
});
diff --git a/lib/CodeGen/MachineOperand.cpp b/lib/CodeGen/MachineOperand.cpp
index 2dbf57fc16a..acd83dde455 100644
--- a/lib/CodeGen/MachineOperand.cpp
+++ b/lib/CodeGen/MachineOperand.cpp
@@ -428,7 +428,7 @@ void MachineOperand::print(raw_ostream &OS, ModuleSlotTracker &MST,
}
break;
case MachineOperand::MO_MachineBasicBlock:
- OS << "<BB#" << getMBB()->getNumber() << ">";
+ OS << printMBBReference(*getMBB());
break;
case MachineOperand::MO_FrameIndex:
OS << "<fi#" << getIndex() << '>';
diff --git a/lib/CodeGen/MachineScheduler.cpp b/lib/CodeGen/MachineScheduler.cpp
index 6aaacb479fe..3e796cc3add 100644
--- a/lib/CodeGen/MachineScheduler.cpp
+++ b/lib/CodeGen/MachineScheduler.cpp
@@ -98,7 +98,7 @@ static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden,
static cl::opt<std::string> SchedOnlyFunc("misched-only-func", cl::Hidden,
cl::desc("Only schedule this function"));
static cl::opt<unsigned> SchedOnlyBlock("misched-only-block", cl::Hidden,
- cl::desc("Only schedule this MBB#"));
+ cl::desc("Only schedule this MBB#"));
#else
static bool ViewMISchedDAGs = false;
#endif // NDEBUG
@@ -548,15 +548,14 @@ void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs &Scheduler,
continue;
}
DEBUG(dbgs() << "********** MI Scheduling **********\n");
- DEBUG(dbgs() << MF->getName()
- << ":BB#" << MBB->getNumber() << " " << MBB->getName()
- << "\n From: " << *I << " To: ";
+ DEBUG(dbgs() << MF->getName() << ":" << printMBBReference(*MBB) << " "
+ << MBB->getName() << "\n From: " << *I << " To: ";
if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
else dbgs() << "End";
dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n');
if (DumpCriticalPathLength) {
errs() << MF->getName();
- errs() << ":BB# " << MBB->getNumber();
+ errs() << ":%bb. " << MBB->getNumber();
errs() << " " << MBB->getName() << " \n";
}
@@ -823,11 +822,11 @@ void ScheduleDAGMI::schedule() {
placeDebugValues();
DEBUG({
- unsigned BBNum = begin()->getParent()->getNumber();
- dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";
- dumpSchedule();
- dbgs() << '\n';
- });
+ dbgs() << "*** Final schedule for "
+ << printMBBReference(*begin()->getParent()) << " ***\n";
+ dumpSchedule();
+ dbgs() << '\n';
+ });
}
/// Apply each ScheduleDAGMutation step in order.
@@ -1261,11 +1260,11 @@ void ScheduleDAGMILive::schedule() {
placeDebugValues();
DEBUG({
- unsigned BBNum = begin()->getParent()->getNumber();
- dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";
- dumpSchedule();
- dbgs() << '\n';
- });
+ dbgs() << "*** Final schedule for "
+ << printMBBReference(*begin()->getParent()) << " ***\n";
+ dumpSchedule();
+ dbgs() << '\n';
+ });
}
/// Build the DAG and setup three register pressure trackers.
diff --git a/lib/CodeGen/MachineSink.cpp b/lib/CodeGen/MachineSink.cpp
index 11257d98e37..7857084c4e6 100644
--- a/lib/CodeGen/MachineSink.cpp
+++ b/lib/CodeGen/MachineSink.cpp
@@ -243,17 +243,17 @@ MachineSinking::AllUsesDominatedByBlock(unsigned Reg,
// into and they are all PHI nodes. In this case, machine-sink must break
// the critical edge first. e.g.
//
- // BB#1: derived from LLVM BB %bb4.preheader
- // Predecessors according to CFG: BB#0
+ // %bb.1: derived from LLVM BB %bb4.preheader
+ // Predecessors according to CFG: %bb.0
// ...
// %reg16385<def> = DEC64_32r %reg16437, %eflags<imp-def,dead>
// ...
- // JE_4 <BB#37>, %eflags<imp-use>
- // Successors according to CFG: BB#37 BB#2
+ // JE_4 <%bb.37>, %eflags<imp-use>
+ // Successors according to CFG: %bb.37 %bb.2
//
- // BB#2: derived from LLVM BB %bb.nph
- // Predecessors according to CFG: BB#0 BB#1
- // %reg16386<def> = PHI %reg16434, <BB#0>, %reg16385, <BB#1>
+ // %bb.2: derived from LLVM BB %bb.nph
+ // Predecessors according to CFG: %bb.0 %bb.1
+ // %reg16386<def> = PHI %reg16434, %bb.0, %reg16385, %bb.1
BreakPHIEdge = true;
for (MachineOperand &MO : MRI->use_nodbg_operands(Reg)) {
MachineInstr *UseInst = MO.getParent();
@@ -321,10 +321,10 @@ bool MachineSinking::runOnMachineFunction(MachineFunction &MF) {
for (auto &Pair : ToSplit) {
auto NewSucc = Pair.first->SplitCriticalEdge(Pair.second, *this);
if (NewSucc != nullptr) {
- DEBUG(dbgs() << " *** Splitting critical edge:"
- " BB#" << Pair.first->getNumber()
- << " -- BB#" << NewSucc->getNumber()
- << " -- BB#" << Pair.second->getNumber() << '\n');
+ DEBUG(dbgs() << " *** Splitting critical edge: "
+ << printMBBReference(*Pair.first) << " -- "
+ << printMBBReference(*NewSucc) << " -- "
+ << printMBBReference(*Pair.second) << '\n');
MadeChange = true;
++NumSplit;
} else
@@ -460,33 +460,33 @@ bool MachineSinking::PostponeSplitCriticalEdge(MachineInstr &MI,
// It's not always legal to break critical edges and sink the computation
// to the edge.
//
- // BB#1:
+ // %bb.1:
// v1024
- // Beq BB#3
+ // Beq %bb.3
// <fallthrough>
- // BB#2:
+ // %bb.2:
// ... no uses of v1024
// <fallthrough>
- // BB#3:
+ // %bb.3:
// ...
// = v1024
//
- // If BB#1 -> BB#3 edge is broken and computation of v1024 is inserted:
+ // If %bb.1 -> %bb.3 edge is broken and computation of v1024 is inserted:
//
- // BB#1:
+ // %bb.1:
// ...
- // Bne BB#2
- // BB#4:
+ // Bne %bb.2
+ // %bb.4:
// v1024 =
- // B BB#3
- // BB#2:
+ // B %bb.3
+ // %bb.2:
// ... no uses of v1024
// <fallthrough>
- // BB#3:
+ // %bb.3:
// ...
// = v1024
//
- // This is incorrect since v1024 is not computed along the BB#1->BB#2->BB#3
+ // This is incorrect since v1024 is not computed along the %bb.1->%bb.2->%bb.3
// flow. We need to ensure the new basic block where the computation is
// sunk to dominates all the uses.
// It's only legal to break critical edge and sink the computation to the
diff --git a/lib/CodeGen/MachineTraceMetrics.cpp b/lib/CodeGen/MachineTraceMetrics.cpp
index 453b47b71f7..d81c6f8a31e 100644
--- a/lib/CodeGen/MachineTraceMetrics.cpp
+++ b/lib/CodeGen/MachineTraceMetrics.cpp
@@ -396,7 +396,8 @@ MachineTraceMetrics::getEnsemble(MachineTraceMetrics::Strategy strategy) {
}
void MachineTraceMetrics::invalidate(const MachineBasicBlock *MBB) {
- DEBUG(dbgs() << "Invalidate traces through BB#" << MBB->getNumber() << '\n');
+ DEBUG(dbgs() << "Invalidate traces through " << printMBBReference(*MBB)
+ << '\n');
BlockInfo[MBB->getNumber()].invalidate();
for (unsigned i = 0; i != TS_NumStrategies; ++i)
if (Ensembles[i])
@@ -476,8 +477,8 @@ public:
/// Compute the trace through MBB.
void MachineTraceMetrics::Ensemble::computeTrace(const MachineBasicBlock *MBB) {
- DEBUG(dbgs() << "Computing " << getName() << " trace through BB#"
- << MBB->getNumber() << '\n');
+ DEBUG(dbgs() << "Computing " << getName() << " trace through "
+ << printMBBReference(*MBB) << '\n');
// Set up loop bounds for the backwards post-order traversal.
LoopBounds Bounds(BlockInfo, MTM.Loops);
@@ -485,13 +486,13 @@ void MachineTraceMetrics::Ensemble::computeTrace(const MachineBasicBlock *MBB) {
Bounds.Downward = false;
Bounds.Visited.clear();
for (auto I : inverse_post_order_ext(MBB, Bounds)) {
- DEBUG(dbgs() << " pred for BB#" << I->getNumber() << ": ");
+ DEBUG(dbgs() << " pred for " << printMBBReference(*I) << ": ");
TraceBlockInfo &TBI = BlockInfo[I->getNumber()];
// All the predecessors have been visited, pick the preferred one.
TBI.Pred = pickTracePred(I);
DEBUG({
if (TBI.Pred)
- dbgs() << "BB#" << TBI.Pred->getNumber() << '\n';
+ dbgs() << printMBBReference(*TBI.Pred) << '\n';
else
dbgs() << "null\n";
});
@@ -503,13 +504,13 @@ void MachineTraceMetrics::Ensemble::computeTrace(const MachineBasicBlock *MBB) {
Bounds.Downward = true;
Bounds.Visited.clear();
for (auto I : post_order_ext(MBB, Bounds)) {
- DEBUG(dbgs() << " succ for BB#" << I->getNumber() << ": ");
+ DEBUG(dbgs() << " succ for " << printMBBReference(*I) << ": ");
TraceBlockInfo &TBI = BlockInfo[I->getNumber()];
// All the successors have been visited, pick the preferred one.
TBI.Succ = pickTraceSucc(I);
DEBUG({
if (TBI.Succ)
- dbgs() << "BB#" << TBI.Succ->getNumber() << '\n';
+ dbgs() << printMBBReference(*TBI.Succ) << '\n';
else
dbgs() << "null\n";
});
@@ -530,8 +531,8 @@ MachineTraceMetrics::Ensemble::invalidate(const MachineBasicBlock *BadMBB) {
WorkList.push_back(BadMBB);
do {
const MachineBasicBlock *MBB = WorkList.pop_back_val();
- DEBUG(dbgs() << "Invalidate BB#" << MBB->getNumber() << ' ' << getName()
- << " height.\n");
+ DEBUG(dbgs() << "Invalidate " << printMBBReference(*MBB) << ' '
+ << getName() << " height.\n");
// Find any MBB predecessors that have MBB as their preferred successor.
// They are the only ones that need to be invalidated.
for (const MachineBasicBlock *Pred : MBB->predecessors()) {
@@ -555,8 +556,8 @@ MachineTraceMetrics::Ensemble::invalidate(const MachineBasicBlock *BadMBB) {
WorkList.push_back(BadMBB);
do {
const MachineBasicBlock *MBB = WorkList.pop_back_val();
- DEBUG(dbgs() << "Invalidate BB#" << MBB->getNumber() << ' ' << getName()
- << " depth.\n");
+ DEBUG(dbgs() << "Invalidate " << printMBBReference(*MBB) << ' '
+ << getName() << " depth.\n");
// Find any MBB successors that have MBB as their preferred predecessor.
// They are the only ones that need to be invalidated.
for (const MachineBasicBlock *Succ : MBB->successors()) {
@@ -859,7 +860,7 @@ computeInstrDepths(const MachineBasicBlock *MBB) {
// Go through trace blocks in top-down order, stopping after the center block.
while (!Stack.empty()) {
MBB = Stack.pop_back_val();
- DEBUG(dbgs() << "\nDepths for BB#" << MBB->getNumber() << ":\n");
+ DEBUG(dbgs() << "\nDepths for " << printMBBReference(*MBB) << ":\n");
TraceBlockInfo &TBI = BlockInfo[MBB->getNumber()];
TBI.HasValidInstrDepths = true;
TBI.CriticalPath = 0;
@@ -1044,7 +1045,7 @@ computeInstrHeights(const MachineBasicBlock *MBB) {
SmallVector<DataDep, 8> Deps;
for (;!Stack.empty(); Stack.pop_back()) {
MBB = Stack.back();
- DEBUG(dbgs() << "Heights for BB#" << MBB->getNumber() << ":\n");
+ DEBUG(dbgs() << "Heights for " << printMBBReference(*MBB) << ":\n");
TraceBlockInfo &TBI = BlockInfo[MBB->getNumber()];
TBI.HasValidInstrHeights = true;
TBI.CriticalPath = 0;
@@ -1131,7 +1132,7 @@ computeInstrHeights(const MachineBasicBlock *MBB) {
// Update virtual live-in heights. They were added by addLiveIns() with a 0
// height because the final height isn't known until now.
- DEBUG(dbgs() << "BB#" << MBB->getNumber() << " Live-ins:");
+ DEBUG(dbgs() << printMBBReference(*MBB) << " Live-ins:");
for (LiveInReg &LIR : TBI.LiveIns) {
const MachineInstr *DefMI = MTM.MRI->getVRegDef(LIR.Reg);
LIR.Height = Heights.lookup(DefMI);
@@ -1289,7 +1290,7 @@ bool MachineTraceMetrics::Trace::isDepInTrace(const MachineInstr &DefMI,
void MachineTraceMetrics::Ensemble::print(raw_ostream &OS) const {
OS << getName() << " ensemble:\n";
for (unsigned i = 0, e = BlockInfo.size(); i != e; ++i) {
- OS << " BB#" << i << '\t';
+ OS << " %bb." << i << '\t';
BlockInfo[i].print(OS);
OS << '\n';
}
@@ -1299,10 +1300,10 @@ void MachineTraceMetrics::TraceBlockInfo::print(raw_ostream &OS) const {
if (hasValidDepth()) {
OS << "depth=" << InstrDepth;
if (Pred)
- OS << " pred=BB#" << Pred->getNumber();
+ OS << " pred=" << printMBBReference(*Pred);
else
OS << " pred=null";
- OS << " head=BB#" << Head;
+ OS << " head=%bb." << Head;
if (HasValidInstrDepths)
OS << " +instrs";
} else
@@ -1311,10 +1312,10 @@ void MachineTraceMetrics::TraceBlockInfo::print(raw_ostream &OS) const {
if (hasValidHeight()) {
OS << "height=" << InstrHeight;
if (Succ)
- OS << " succ=BB#" << Succ->getNumber();
+ OS << " succ=" << printMBBReference(*Succ);
else
OS << " succ=null";
- OS << " tail=BB#" << Tail;
+ OS << " tail=%bb." << Tail;
if (HasValidInstrHeights)
OS << " +instrs";
} else
@@ -1326,18 +1327,18 @@ void MachineTraceMetrics::TraceBlockInfo::print(raw_ostream &OS) const {
void MachineTraceMetrics::Trace::print(raw_ostream &OS) const {
unsigned MBBNum = &TBI - &TE.BlockInfo[0];
- OS << TE.getName() << " trace BB#" << TBI.Head << " --> BB#" << MBBNum
- << " --> BB#" << TBI.Tail << ':';
+ OS << TE.getName() << " trace %bb." << TBI.Head << " --> %bb." << MBBNum
+ << " --> %bb." << TBI.Tail << ':';
if (TBI.hasValidHeight() && TBI.hasValidDepth())
OS << ' ' << getInstrCount() << " instrs.";
if (TBI.HasValidInstrDepths && TBI.HasValidInstrHeights)
OS << ' ' << TBI.CriticalPath << " cycles.";
const MachineTraceMetrics::TraceBlockInfo *Block = &TBI;
- OS << "\nBB#" << MBBNum;
+ OS << "\n%bb." << MBBNum;
while (Block->hasValidDepth() && Block->Pred) {
unsigned Num = Block->Pred->getNumber();
- OS << " <- BB#" << Num;
+ OS << " <- " << printMBBReference(*Block->Pred);
Block = &TE.BlockInfo[Num];
}
@@ -1345,7 +1346,7 @@ void MachineTraceMetrics::Trace::print(raw_ostream &OS) const {
OS << "\n ";
while (Block->hasValidHeight() && Block->Succ) {
unsigned Num = Block->Succ->getNumber();
- OS << " -> BB#" << Num;
+ OS << " -> " << printMBBReference(*Block->Succ);
Block = &TE.BlockInfo[Num];
}
OS << '\n';
diff --git a/lib/CodeGen/MachineVerifier.cpp b/lib/CodeGen/MachineVerifier.cpp
index 2d138298a94..fd00a2d7501 100644
--- a/lib/CodeGen/MachineVerifier.cpp
+++ b/lib/CodeGen/MachineVerifier.cpp
@@ -471,9 +471,8 @@ void MachineVerifier::report(const char *msg, const MachineFunction *MF) {
void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB) {
assert(MBB);
report(msg, MBB->getParent());
- errs() << "- basic block: BB#" << MBB->getNumber()
- << ' ' << MBB->getName()
- << " (" << (const void*)MBB << ')';
+ errs() << "- basic block: " << printMBBReference(*MBB) << ' '
+ << MBB->getName() << " (" << (const void *)MBB << ')';
if (Indexes)
errs() << " [" << Indexes->getMBBStartIdx(MBB)
<< ';' << Indexes->getMBBEndIdx(MBB) << ')';
@@ -619,8 +618,8 @@ MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
report("MBB has successor that isn't part of the function.", MBB);
if (!MBBInfoMap[*I].Preds.count(MBB)) {
report("Inconsistent CFG", MBB);
- errs() << "MBB is not in the predecessor list of the successor BB#"
- << (*I)->getNumber() << ".\n";
+ errs() << "MBB is not in the predecessor list of the successor "
+ << printMBBReference(*(*I)) << ".\n";
}
}
@@ -631,8 +630,8 @@ MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
report("MBB has predecessor that isn't part of the function.", MBB);
if (!MBBInfoMap[*I].Succs.count(MBB)) {
report("Inconsistent CFG", MBB);
- errs() << "MBB is not in the successor list of the predecessor BB#"
- << (*I)->getNumber() << ".\n";
+ errs() << "MBB is not in the successor list of the predecessor "
+ << printMBBReference(*(*I)) << ".\n";
}
}
@@ -1663,8 +1662,8 @@ void MachineVerifier::checkPHIOps(const MachineBasicBlock &MBB) {
for (MachineBasicBlock *Pred : MBB.predecessors()) {
if (!seen.count(Pred)) {
report("Missing PHI operand", &Phi);
- errs() << "BB#" << Pred->getNumber()
- << " is a predecessor according to the CFG.\n";
+ errs() << printMBBReference(*Pred)
+ << " is a predecessor according to the CFG.\n";
}
}
}
@@ -2038,8 +2037,8 @@ void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR,
report("Register not marked live out of predecessor", *PI);
report_context(LR, Reg, LaneMask);
report_context(*VNI);
- errs() << " live into BB#" << MFI->getNumber()
- << '@' << LiveInts->getMBBStartIdx(&*MFI) << ", not live before "
+ errs() << " live into " << printMBBReference(*MFI) << '@'
+ << LiveInts->getMBBStartIdx(&*MFI) << ", not live before "
<< PEnd << '\n';
continue;
}
@@ -2048,9 +2047,9 @@ void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR,
if (!IsPHI && PVNI != VNI) {
report("Different value live out of predecessor", *PI);
report_context(LR, Reg, LaneMask);
- errs() << "Valno #" << PVNI->id << " live out of BB#"
- << (*PI)->getNumber() << '@' << PEnd << "\nValno #" << VNI->id
- << " live into BB#" << MFI->getNumber() << '@'
+ errs() << "Valno #" << PVNI->id << " live out of "
+ << printMBBReference(*(*PI)) << '@' << PEnd << "\nValno #"
+ << VNI->id << " live into " << printMBBReference(*MFI) << '@'
<< LiveInts->getMBBStartIdx(&*MFI) << '\n';
}
}
@@ -2201,11 +2200,11 @@ void MachineVerifier::verifyStackFrame() {
(SPState[(*I)->getNumber()].ExitValue != BBState.EntryValue ||
SPState[(*I)->getNumber()].ExitIsSetup != BBState.EntryIsSetup)) {
report("The exit stack state of a predecessor is inconsistent.", MBB);
- errs() << "Predecessor BB#" << (*I)->getNumber() << " has exit state ("
- << SPState[(*I)->getNumber()].ExitValue << ", "
- << SPState[(*I)->getNumber()].ExitIsSetup
- << "), while BB#" << MBB->getNumber() << " has entry state ("
- << BBState.EntryValue << ", " << BBState.EntryIsSetup << ").\n";
+ errs() << "Predecessor " << printMBBReference(*(*I))
+ << " has exit state (" << SPState[(*I)->getNumber()].ExitValue
+ << ", " << SPState[(*I)->getNumber()].ExitIsSetup << "), while "
+ << printMBBReference(*MBB) << " has entry state ("
+ << BBState.EntryValue << ", " << BBState.EntryIsSetup << ").\n";
}
}
@@ -2217,11 +2216,11 @@ void MachineVerifier::verifyStackFrame() {
(SPState[(*I)->getNumber()].EntryValue != BBState.ExitValue ||
SPState[(*I)->getNumber()].EntryIsSetup != BBState.ExitIsSetup)) {
report("The entry stack state of a successor is inconsistent.", MBB);
- errs() << "Successor BB#" << (*I)->getNumber() << " has entry state ("
- << SPState[(*I)->getNumber()].EntryValue << ", "
- << SPState[(*I)->getNumber()].EntryIsSetup
- << "), while BB#" << MBB->getNumber() << " has exit state ("
- << BBState.ExitValue << ", " << BBState.ExitIsSetup << ").\n";
+ errs() << "Successor " << printMBBReference(*(*I))
+ << " has entry state (" << SPState[(*I)->getNumber()].EntryValue
+ << ", " << SPState[(*I)->getNumber()].EntryIsSetup << "), while "
+ << printMBBReference(*MBB) << " has exit state ("
+ << BBState.ExitValue << ", " << BBState.ExitIsSetup << ").\n";
}
}
diff --git a/lib/CodeGen/PHIElimination.cpp b/lib/CodeGen/PHIElimination.cpp
index 864d6d547ca..82bbe1528c8 100644
--- a/lib/CodeGen/PHIElimination.cpp
+++ b/lib/CodeGen/PHIElimination.cpp
@@ -593,9 +593,9 @@ bool PHIElimination::SplitPHIEdges(MachineFunction &MF,
if (!ShouldSplit && !NoPhiElimLiveOutEarlyExit)
continue;
if (ShouldSplit) {
- DEBUG(dbgs() << printReg(Reg) << " live-out before critical edge BB#"
- << PreMBB->getNumber() << " -> BB#" << MBB.getNumber()
- << ": " << *BBI);
+ DEBUG(dbgs() << printReg(Reg) << " live-out before critical edge "
+ << printMBBReference(*PreMBB) << " -> "
+ << printMBBReference(MBB) << ": " << *BBI);
}
// If Reg is not live-in to MBB, it means it must be live-in to some
diff --git a/lib/CodeGen/PostRASchedulerList.cpp b/lib/CodeGen/PostRASchedulerList.cpp
index 673dc37904f..81dd6ef2444 100644
--- a/lib/CodeGen/PostRASchedulerList.cpp
+++ b/lib/CodeGen/PostRASchedulerList.cpp
@@ -322,8 +322,8 @@ bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
static int bbcnt = 0;
if (bbcnt++ % DebugDiv != DebugMod)
continue;
- dbgs() << "*** DEBUG scheduling " << Fn.getName()
- << ":BB#" << MBB.getNumber() << " ***\n";
+ dbgs() << "*** DEBUG scheduling " << Fn.getName() << ":"
+ << printMBBReference(MBB) << " ***\n";
}
#endif
diff --git a/lib/CodeGen/ProcessImplicitDefs.cpp b/lib/CodeGen/ProcessImplicitDefs.cpp
index 7fbf7ddde0b..48b48c5f649 100644
--- a/lib/CodeGen/ProcessImplicitDefs.cpp
+++ b/lib/CodeGen/ProcessImplicitDefs.cpp
@@ -154,7 +154,7 @@ bool ProcessImplicitDefs::runOnMachineFunction(MachineFunction &MF) {
if (WorkList.empty())
continue;
- DEBUG(dbgs() << "BB#" << MFI->getNumber() << " has " << WorkList.size()
+ DEBUG(dbgs() << printMBBReference(*MFI) << " has " << WorkList.size()
<< " implicit defs.\n");
Changed = true;
diff --git a/lib/CodeGen/RegAllocGreedy.cpp b/lib/CodeGen/RegAllocGreedy.cpp
index 131cd5a17ef..7aa998bc07f 100644
--- a/lib/CodeGen/RegAllocGreedy.cpp
+++ b/lib/CodeGen/RegAllocGreedy.cpp
@@ -1612,7 +1612,7 @@ void RAGreedy::splitAroundRegion(LiveRangeEdit &LREdit,
// Create separate intervals for isolated blocks with multiple uses.
if (!IntvIn && !IntvOut) {
- DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " isolated.\n");
+ DEBUG(dbgs() << printMBBReference(*BI.MBB) << " isolated.\n");
if (SA->shouldSplitSingleBlock(BI, SingleInstrs))
SE->splitSingleBlock(BI);
continue;
diff --git a/lib/CodeGen/RegisterCoalescer.cpp b/lib/CodeGen/RegisterCoalescer.cpp
index 2bbefa97132..09875d336fd 100644
--- a/lib/CodeGen/RegisterCoalescer.cpp
+++ b/lib/CodeGen/RegisterCoalescer.cpp
@@ -991,8 +991,8 @@ bool RegisterCoalescer::removePartialRedundancy(const CoalescerPair &CP,
// Now ok to move copy.
if (CopyLeftBB) {
- DEBUG(dbgs() << "\tremovePartialRedundancy: Move the copy to BB#"
- << CopyLeftBB->getNumber() << '\t' << CopyMI);
+ DEBUG(dbgs() << "\tremovePartialRedundancy: Move the copy to "
+ << printMBBReference(*CopyLeftBB) << '\t' << CopyMI);
// Insert new copy to CopyLeftBB.
auto InsPos = CopyLeftBB->getFirstTerminator();
@@ -1010,8 +1010,8 @@ bool RegisterCoalescer::removePartialRedundancy(const CoalescerPair &CP,
// the deleted list.
ErasedInstrs.erase(NewCopyMI);
} else {
- DEBUG(dbgs() << "\tremovePartialRedundancy: Remove the copy from BB#"
- << MBB.getNumber() << '\t' << CopyMI);
+ DEBUG(dbgs() << "\tremovePartialRedundancy: Remove the copy from "
+ << printMBBReference(MBB) << '\t' << CopyMI);
}
// Remove CopyMI.
@@ -2376,7 +2376,7 @@ JoinVals::analyzeValue(unsigned ValNo, JoinVals &Other) {
if (OtherV.ErasableImplicitDef && DefMI &&
DefMI->getParent() != Indexes->getMBBFromIndex(V.OtherVNI->def)) {
DEBUG(dbgs() << "IMPLICIT_DEF defined at " << V.OtherVNI->def
- << " extends into BB#" << DefMI->getParent()->getNumber()
+ << " extends into " << printMBBReference(*DefMI->getParent())
<< ", keeping it.\n");
OtherV.ErasableImplicitDef = false;
}
diff --git a/lib/CodeGen/ScheduleDAGInstrs.cpp b/lib/CodeGen/ScheduleDAGInstrs.cpp
index b1a48514910..4b45edcf0de 100644
--- a/lib/CodeGen/ScheduleDAGInstrs.cpp
+++ b/lib/CodeGen/ScheduleDAGInstrs.cpp
@@ -1043,7 +1043,7 @@ static void toggleKills(const MachineRegisterInfo &MRI, LivePhysRegs &LiveRegs,
}
void ScheduleDAGInstrs::fixupKills(MachineBasicBlock &MBB) {
- DEBUG(dbgs() << "Fixup kills for BB#" << MBB.getNumber() << '\n');
+ DEBUG(dbgs() << "Fixup kills for " << printMBBReference(MBB) << '\n');
LiveRegs.init(*TRI);
LiveRegs.addLiveOuts(MBB);
diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
index a83f4eff383..49f304c8cc8 100644
--- a/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
+++ b/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
@@ -346,9 +346,8 @@ static void GetCostForDef(const ScheduleDAGSDNodes::RegDefIter &RegDefPos,
/// Schedule - Schedule the DAG using list scheduling.
void ScheduleDAGRRList::Schedule() {
- DEBUG(dbgs()
- << "********** List Scheduling BB#" << BB->getNumber()
- << " '" << BB->getName() << "' **********\n");
+ DEBUG(dbgs() << "********** List Scheduling " << printMBBReference(*BB)
+ << " '" << BB->getName() << "' **********\n");
CurCycle = 0;
IssueCount = 0;
diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp
index 54c1531a018..07b46b9183a 100644
--- a/lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp
+++ b/lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp
@@ -93,9 +93,8 @@ private:
/// Schedule - Schedule the DAG using list scheduling.
void ScheduleDAGVLIW::Schedule() {
- DEBUG(dbgs()
- << "********** List Scheduling BB#" << BB->getNumber()
- << " '" << BB->getName() << "' **********\n");
+ DEBUG(dbgs() << "********** List Scheduling " << printMBBReference(*BB)
+ << " '" << BB->getName() << "' **********\n");
// Build the scheduling graph.
BuildSchedGraph(AA);
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index cb37137d547..8f47c5b40ba 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -730,8 +730,9 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
BlockName =
(MF->getName() + ":" + FuncInfo->MBB->getBasicBlock()->getName()).str();
}
- DEBUG(dbgs() << "Initial selection DAG: BB#" << BlockNumber
- << " '" << BlockName << "'\n"; CurDAG->dump());
+ DEBUG(dbgs() << "Initial selection DAG: " << printMBBReference(*FuncInfo->MBB)
+ << " '" << BlockName << "'\n";
+ CurDAG->dump());
if (ViewDAGCombine1 && MatchFilterBB)
CurDAG->viewGraph("dag-combine1 input for " + BlockName);
@@ -743,8 +744,10 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
CurDAG->Combine(BeforeLegalizeTypes, AA, OptLevel);
}
- DEBUG(dbgs() << "Optimized lowered selection DAG: BB#" << BlockNumber
- << " '" << BlockName << "'\n"; CurDAG->dump());
+ DEBUG(dbgs() << "Optimized lowered selection DAG: "
+ << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
+ << "'\n";
+ CurDAG->dump());
// Second step, hack on the DAG until it only uses operations and types that
// the target supports.
@@ -758,8 +761,10 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
Changed = CurDAG->LegalizeTypes();
}
- DEBUG(dbgs() << "Type-legalized selection DAG: BB#" << BlockNumber
- << " '" << BlockName << "'\n"; CurDAG->dump());
+ DEBUG(dbgs() << "Type-legalized selection DAG: "
+ << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
+ << "'\n";
+ CurDAG->dump());
// Only allow creation of legal node types.
CurDAG->NewNodesMustHaveLegalTypes = true;
@@ -775,8 +780,10 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
CurDAG->Combine(AfterLegalizeTypes, AA, OptLevel);
}
- DEBUG(dbgs() << "Optimized type-legalized selection DAG: BB#" << BlockNumber
- << " '" << BlockName << "'\n"; CurDAG->dump());
+ DEBUG(dbgs() << "Optimized type-legalized selection DAG: "
+ << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
+ << "'\n";
+ CurDAG->dump());
}
{
@@ -786,8 +793,10 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
}
if (Changed) {
- DEBUG(dbgs() << "Vector-legalized selection DAG: BB#" << BlockNumber
- << " '" << BlockName << "'\n"; CurDAG->dump());
+ DEBUG(dbgs() << "Vector-legalized selection DAG: "
+ << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
+ << "'\n";
+ CurDAG->dump());
{
NamedRegionTimer T("legalize_types2", "Type Legalization 2", GroupName,
@@ -795,8 +804,10 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
CurDAG->LegalizeTypes();
}
- DEBUG(dbgs() << "Vector/type-legalized selection DAG: BB#" << BlockNumber
- << " '" << BlockName << "'\n"; CurDAG->dump());
+ DEBUG(dbgs() << "Vector/type-legalized selection DAG: "
+ << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
+ << "'\n";
+ CurDAG->dump());
if (ViewDAGCombineLT && MatchFilterBB)
CurDAG->viewGraph("dag-combine-lv input for " + BlockName);
@@ -808,8 +819,10 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
CurDAG->Combine(AfterLegalizeVectorOps, AA, OptLevel);
}
- DEBUG(dbgs() << "Optimized vector-legalized selection DAG: BB#"
- << BlockNumber << " '" << BlockName << "'\n"; CurDAG->dump());
+ DEBUG(dbgs() << "Optimized vector-legalized selection DAG: "
+ << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
+ << "'\n";
+ CurDAG->dump());
}
if (ViewLegalizeDAGs && MatchFilterBB)
@@ -821,8 +834,10 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
CurDAG->Legalize();
}
- DEBUG(dbgs() << "Legalized selection DAG: BB#" << BlockNumber
- << " '" << BlockName << "'\n"; CurDAG->dump());
+ DEBUG(dbgs() << "Legalized selection DAG: "
+ << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
+ << "'\n";
+ CurDAG->dump());
if (ViewDAGCombine2 && MatchFilterBB)
CurDAG->viewGraph("dag-combine2 input for " + BlockName);
@@ -834,8 +849,10 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
CurDAG->Combine(AfterLegalizeDAG, AA, OptLevel);
}
- DEBUG(dbgs() << "Optimized legalized selection DAG: BB#" << BlockNumber
- << " '" << BlockName << "'\n"; CurDAG->dump());
+ DEBUG(dbgs() << "Optimized legalized selection DAG: "
+ << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
+ << "'\n";
+ CurDAG->dump());
if (OptLevel != CodeGenOpt::None)
ComputeLiveOutVRegInfo();
@@ -851,8 +868,10 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
DoInstructionSelection();
}
- DEBUG(dbgs() << "Selected selection DAG: BB#" << BlockNumber
- << " '" << BlockName << "'\n"; CurDAG->dump());
+ DEBUG(dbgs() << "Selected selection DAG: "
+ << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
+ << "'\n";
+ CurDAG->dump());
if (ViewSchedDAGs && MatchFilterBB)
CurDAG->viewGraph("scheduler input for " + BlockName);
@@ -919,9 +938,9 @@ public:
} // end anonymous namespace
void SelectionDAGISel::DoInstructionSelection() {
- DEBUG(dbgs() << "===== Instruction selection begins: BB#"
- << FuncInfo->MBB->getNumber()
- << " '" << FuncInfo->MBB->getName() << "'\n");
+ DEBUG(dbgs() << "===== Instruction selection begins: "
+ << printMBBReference(*FuncInfo->MBB) << " '"
+ << FuncInfo->MBB->getName() << "'\n");
PreprocessISelDAG();
diff --git a/lib/CodeGen/SlotIndexes.cpp b/lib/CodeGen/SlotIndexes.cpp
index 25a1c37b145..184329a659c 100644
--- a/lib/CodeGen/SlotIndexes.cpp
+++ b/lib/CodeGen/SlotIndexes.cpp
@@ -264,7 +264,7 @@ LLVM_DUMP_METHOD void SlotIndexes::dump() const {
}
for (unsigned i = 0, e = MBBRanges.size(); i != e; ++i)
- dbgs() << "BB#" << i << "\t[" << MBBRanges[i].first << ';'
+ dbgs() << "%bb." << i << "\t[" << MBBRanges[i].first << ';'
<< MBBRanges[i].second << ")\n";
}
#endif
diff --git a/lib/CodeGen/SplitKit.cpp b/lib/CodeGen/SplitKit.cpp
index 49f31333acf..fc85ea3d166 100644
--- a/lib/CodeGen/SplitKit.cpp
+++ b/lib/CodeGen/SplitKit.cpp
@@ -729,7 +729,8 @@ SlotIndex SplitEditor::enterIntvAtEnd(MachineBasicBlock &MBB) {
assert(OpenIdx && "openIntv not called before enterIntvAtEnd");
SlotIndex End = LIS.getMBBEndIdx(&MBB);
SlotIndex Last = End.getPrevSlot();
- DEBUG(dbgs() << " enterIntvAtEnd BB#" << MBB.getNumber() << ", " << Last);
+ DEBUG(dbgs() << " enterIntvAtEnd " << printMBBReference(MBB) << ", "
+ << Last);
VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(Last);
if (!ParentVNI) {
DEBUG(dbgs() << ": not live\n");
@@ -808,7 +809,8 @@ SlotIndex SplitEditor::leaveIntvBefore(SlotIndex Idx) {
SlotIndex SplitEditor::leaveIntvAtTop(MachineBasicBlock &MBB) {
assert(OpenIdx && "openIntv not called before leaveIntvAtTop");
SlotIndex Start = LIS.getMBBStartIdx(&MBB);
- DEBUG(dbgs() << " leaveIntvAtTop BB#" << MBB.getNumber() << ", " << Start);
+ DEBUG(dbgs() << " leaveIntvAtTop " << printMBBReference(MBB) << ", "
+ << Start);
VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(Start);
if (!ParentVNI) {
@@ -906,15 +908,15 @@ SplitEditor::findShallowDominator(MachineBasicBlock *MBB,
// MBB isn't in a loop, it doesn't get any better. All dominators have a
// higher frequency by definition.
if (!Loop) {
- DEBUG(dbgs() << "Def in BB#" << DefMBB->getNumber() << " dominates BB#"
- << MBB->getNumber() << " at depth 0\n");
+ DEBUG(dbgs() << "Def in " << printMBBReference(*DefMBB) << " dominates "
+ << printMBBReference(*MBB) << " at depth 0\n");
return MBB;
}
// We'll never be able to exit the DefLoop.
if (Loop == DefLoop) {
- DEBUG(dbgs() << "Def in BB#" << DefMBB->getNumber() << " dominates BB#"
- << MBB->getNumber() << " in the same loop\n");
+ DEBUG(dbgs() << "Def in " << printMBBReference(*DefMBB) << " dominates "
+ << printMBBReference(*MBB) << " in the same loop\n");
return MBB;
}
@@ -923,8 +925,8 @@ SplitEditor::findShallowDominator(MachineBasicBlock *MBB,
if (Depth < BestDepth) {
BestMBB = MBB;
BestDepth = Depth;
- DEBUG(dbgs() << "Def in BB#" << DefMBB->getNumber() << " dominates BB#"
- << MBB->getNumber() << " at depth " << Depth << '\n');
+ DEBUG(dbgs() << "Def in " << printMBBReference(*DefMBB) << " dominates "
+ << printMBBReference(*MBB) << " at depth " << Depth << '\n');
}
// Leave loop by going to the immediate dominator of the loop header.
@@ -1063,7 +1065,7 @@ void SplitEditor::hoistCopies() {
DEBUG(dbgs() << "Multi-mapped complement " << VNI->id << '@' << VNI->def
<< " for parent " << ParentVNI->id << '@' << ParentVNI->def
- << " hoist to BB#" << Dom.first->getNumber() << ' '
+ << " hoist to " << printMBBReference(*Dom.first) << ' '
<< Dom.second << '\n');
}
@@ -1173,7 +1175,7 @@ bool SplitEditor::transferValues() {
if (Start != BlockStart) {
VNInfo *VNI = LI.extendInBlock(BlockStart, std::min(BlockEnd, End));
assert(VNI && "Missing def for complex mapped value");
- DEBUG(dbgs() << ':' << VNI->id << "*BB#" << MBB->getNumber());
+ DEBUG(dbgs() << ':' << VNI->id << "*" << printMBBReference(*MBB));
// MBB has its own def. Is it also live-out?
if (BlockEnd <= End)
LRC.setLiveOutValue(&*MBB, VNI);
@@ -1186,7 +1188,7 @@ bool SplitEditor::transferValues() {
// Handle the live-in blocks covered by [Start;End).
assert(Start <= BlockStart && "Expected live-in block");
while (BlockStart < End) {
- DEBUG(dbgs() << ">BB#" << MBB->getNumber());
+ DEBUG(dbgs() << ">" << printMBBReference(*MBB));
BlockEnd = LIS.getMBBEndIdx(&*MBB);
if (BlockStart == ParentVNI->def) {
// This block has the def of a parent PHI, so it isn't live-in.
@@ -1329,7 +1331,7 @@ void SplitEditor::rewriteAssigned(bool ExtendRanges) {
unsigned RegIdx = RegAssign.lookup(Idx);
LiveInterval &LI = LIS.getInterval(Edit->get(RegIdx));
MO.setReg(LI.reg);
- DEBUG(dbgs() << " rewr BB#" << MI->getParent()->getNumber() << '\t'
+ DEBUG(dbgs() << " rewr " << printMBBReference(*MI->getParent()) << '\t'
<< Idx << ':' << RegIdx << '\t' << *MI);
// Extend liveness to Idx if the instruction reads reg.
@@ -1563,9 +1565,9 @@ void SplitEditor::splitLiveThroughBlock(unsigned MBBNum,
SlotIndex Start, Stop;
std::tie(Start, Stop) = LIS.getSlotIndexes()->getMBBRange(MBBNum);
- DEBUG(dbgs() << "BB#" << MBBNum << " [" << Start << ';' << Stop
- << ") intf " << LeaveBefore << '-' << EnterAfter
- << ", live-through " << IntvIn << " -> " << IntvOut);
+ DEBUG(dbgs() << "%bb." << MBBNum << " [" << Start << ';' << Stop << ") intf "
+ << LeaveBefore << '-' << EnterAfter << ", live-through "
+ << IntvIn << " -> " << IntvOut);
assert((IntvIn || IntvOut) && "Use splitSingleBlock for isolated blocks");
@@ -1665,7 +1667,7 @@ void SplitEditor::splitRegInBlock(const SplitAnalysis::BlockInfo &BI,
SlotIndex Start, Stop;
std::tie(Start, Stop) = LIS.getSlotIndexes()->getMBBRange(BI.MBB);
- DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " [" << Start << ';' << Stop
+ DEBUG(dbgs() << printMBBReference(*BI.MBB) << " [" << Start << ';' << Stop
<< "), uses " << BI.FirstInstr << '-' << BI.LastInstr
<< ", reg-in " << IntvIn << ", leave before " << LeaveBefore
<< (BI.LiveOut ? ", stack-out" : ", killed in block"));
@@ -1757,7 +1759,7 @@ void SplitEditor::splitRegOutBlock(const SplitAnalysis::BlockInfo &BI,
SlotIndex Start, Stop;
std::tie(Start, Stop) = LIS.getSlotIndexes()->getMBBRange(BI.MBB);
- DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " [" << Start << ';' << Stop
+ DEBUG(dbgs() << printMBBReference(*BI.MBB) << " [" << Start << ';' << Stop
<< "), uses " << BI.FirstInstr << '-' << BI.LastInstr
<< ", reg-out " << IntvOut << ", enter after " << EnterAfter
<< (BI.LiveIn ? ", stack-in" : ", defined in block"));
diff --git a/lib/CodeGen/StackColoring.cpp b/lib/CodeGen/StackColoring.cpp
index 0a7be1d12fa..18bba42907b 100644
--- a/lib/CodeGen/StackColoring.cpp
+++ b/lib/CodeGen/StackColoring.cpp
@@ -739,7 +739,7 @@ unsigned StackColoring::collectMarkers(unsigned NumSlot) {
} else {
for (auto Slot : slots) {
DEBUG(dbgs() << "Found a use of slot #" << Slot);
- DEBUG(dbgs() << " at BB#" << MBB->getNumber() << " index ");
+ DEBUG(dbgs() << " at " << printMBBReference(*MBB) << " index ");
DEBUG(Indexes->getInstructionIndex(MI).print(dbgs()));
const AllocaInst *Allocation = MFI->getObjectAllocation(Slot);
if (Allocation) {
diff --git a/lib/CodeGen/TailDuplicator.cpp b/lib/CodeGen/TailDuplicator.cpp
index 7adf9b037b5..63eb6cc651b 100644
--- a/lib/CodeGen/TailDuplicator.cpp
+++ b/lib/CodeGen/TailDuplicator.cpp
@@ -111,9 +111,10 @@ static void VerifyPHIs(MachineFunction &MF, bool CheckExtra) {
}
}
if (!Found) {
- dbgs() << "Malformed PHI in BB#" << MBB->getNumber() << ": " << *MI;
- dbgs() << " missing input from predecessor BB#"
- << PredBB->getNumber() << '\n';
+ dbgs() << "Malformed PHI in " << printMBBReference(*MBB) << ": "
+ << *MI;
+ dbgs() << " missing input from predecessor "
+ << printMBBReference(*PredBB) << '\n';
llvm_unreachable(nullptr);
}
}
@@ -121,15 +122,16 @@ static void VerifyPHIs(MachineFunction &MF, bool CheckExtra) {
for (unsigned i = 1, e = MI->getNumOperands(); i != e; i += 2) {
MachineBasicBlock *PHIBB = MI->getOperand(i + 1).getMBB();
if (CheckExtra && !Preds.count(PHIBB)) {
- dbgs() << "Warning: malformed PHI in BB#" << MBB->getNumber() << ": "
- << *MI;
- dbgs() << " extra input from predecessor BB#" << PHIBB->getNumber()
- << '\n';
+ dbgs() << "Warning: malformed PHI in " << printMBBReference(*MBB)
+ << ": " << *MI;
+ dbgs() << " extra input from predecessor "
+ << printMBBReference(*PHIBB) << '\n';
llvm_unreachable(nullptr);
}
if (PHIBB->getNumber() < 0) {
- dbgs() << "Malformed PHI in BB#" << MBB->getNumber() << ": " << *MI;
- dbgs() << " non-existing BB#" << PHIBB->getNumber() << '\n';
+ dbgs() << "Malformed PHI in " << printMBBReference(*MBB) << ": "
+ << *MI;
+ dbgs() << " non-existing " << printMBBReference(*PHIBB) << '\n';
llvm_unreachable(nullptr);
}
}
@@ -783,7 +785,8 @@ bool TailDuplicator::tailDuplicate(bool IsSimple, MachineBasicBlock *TailBB,
MachineBasicBlock *ForcedLayoutPred,
SmallVectorImpl<MachineBasicBlock *> &TDBBs,
SmallVectorImpl<MachineInstr *> &Copies) {
- DEBUG(dbgs() << "\n*** Tail-duplicating BB#" << TailBB->getNumber() << '\n');
+ DEBUG(dbgs() << "\n*** Tail-duplicating " << printMBBReference(*TailBB)
+ << '\n');
DenseSet<unsigned> UsedByPhi;
getRegsUsedByPHIs(*TailBB, &UsedByPhi);
diff --git a/lib/Target/AArch64/AArch64ConditionOptimizer.cpp b/lib/Target/AArch64/AArch64ConditionOptimizer.cpp
index d1bcd3dcaec..f765825cdee 100644
--- a/lib/Target/AArch64/AArch64ConditionOptimizer.cpp
+++ b/lib/Target/AArch64/AArch64ConditionOptimizer.cpp
@@ -207,7 +207,7 @@ MachineInstr *AArch64ConditionOptimizer::findSuitableCompare(
return nullptr;
}
}
- DEBUG(dbgs() << "Flags not defined in BB#" << MBB->getNumber() << '\n');
+ DEBUG(dbgs() << "Flags not defined in " << printMBBReference(*MBB) << '\n');
return nullptr;
}
diff --git a/lib/Target/AArch64/AArch64ConditionalCompares.cpp b/lib/Target/AArch64/AArch64ConditionalCompares.cpp
index 668d21d0b16..f7c97117ba5 100644
--- a/lib/Target/AArch64/AArch64ConditionalCompares.cpp
+++ b/lib/Target/AArch64/AArch64ConditionalCompares.cpp
@@ -369,7 +369,7 @@ MachineInstr *SSACCmpConv::findConvertibleCompare(MachineBasicBlock *MBB) {
return nullptr;
}
}
- DEBUG(dbgs() << "Flags not defined in BB#" << MBB->getNumber() << '\n');
+ DEBUG(dbgs() << "Flags not defined in " << printMBBReference(*MBB) << '\n');
return nullptr;
}
@@ -383,7 +383,7 @@ bool SSACCmpConv::canSpeculateInstrs(MachineBasicBlock *MBB,
// Reject any live-in physregs. It's probably NZCV/EFLAGS, and very hard to
// get right.
if (!MBB->livein_empty()) {
- DEBUG(dbgs() << "BB#" << MBB->getNumber() << " has live-ins.\n");
+ DEBUG(dbgs() << printMBBReference(*MBB) << " has live-ins.\n");
return false;
}
@@ -396,7 +396,7 @@ bool SSACCmpConv::canSpeculateInstrs(MachineBasicBlock *MBB,
continue;
if (++InstrCount > BlockInstrLimit && !Stress) {
- DEBUG(dbgs() << "BB#" << MBB->getNumber() << " has more than "
+ DEBUG(dbgs() << printMBBReference(*MBB) << " has more than "
<< BlockInstrLimit << " instructions.\n");
return false;
}
@@ -458,8 +458,9 @@ bool SSACCmpConv::canConvert(MachineBasicBlock *MBB) {
return false;
// The CFG topology checks out.
- DEBUG(dbgs() << "\nTriangle: BB#" << Head->getNumber() << " -> BB#"
- << CmpBB->getNumber() << " -> BB#" << Tail->getNumber() << '\n');
+ DEBUG(dbgs() << "\nTriangle: " << printMBBReference(*Head) << " -> "
+ << printMBBReference(*CmpBB) << " -> "
+ << printMBBReference(*Tail) << '\n');
++NumConsidered;
// Tail is allowed to have many predecessors, but we can't handle PHIs yet.
@@ -562,8 +563,9 @@ bool SSACCmpConv::canConvert(MachineBasicBlock *MBB) {
}
void SSACCmpConv::convert(SmallVectorImpl<MachineBasicBlock *> &RemovedBlocks) {
- DEBUG(dbgs() << "Merging BB#" << CmpBB->getNumber() << " into BB#"
- << Head->getNumber() << ":\n" << *CmpBB);
+ DEBUG(dbgs() << "Merging " << printMBBReference(*CmpBB) << " into "
+ << printMBBReference(*Head) << ":\n"
+ << *CmpBB);
// All CmpBB instructions are moved into Head, and CmpBB is deleted.
// Update the CFG first.
diff --git a/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp b/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp
index ec98980fa0b..98480835376 100644
--- a/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp
+++ b/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp
@@ -12,9 +12,9 @@
// 1. For BBs that are targets of CBZ/CBNZ instructions, we know the value of
// the CBZ/CBNZ source register is zero on the taken/not-taken path. For
// instance, the copy instruction in the code below can be removed because
-// the CBZW jumps to BB#2 when w0 is zero.
+// the CBZW jumps to %bb.2 when w0 is zero.
//
-// BB#1:
+// %bb.1:
// cbz w0, .LBB0_2
// .LBB0_2:
// mov w0, wzr ; <-- redundant
@@ -22,11 +22,11 @@
// 2. If the flag setting instruction defines a register other than WZR/XZR, we
// can remove a zero copy in some cases.
//
-// BB#0:
+// %bb.0:
// subs w0, w1, w2
// str w0, [x1]
// b.ne .LBB0_2
-// BB#1:
+// %bb.1:
// mov w0, wzr ; <-- redundant
// str w0, [x2]
// .LBB0_2
@@ -35,7 +35,7 @@
// constant (i.e., ADDS[W|X]ri, SUBS[W|X]ri), we can remove a mov immediate
// in some cases.
//
-// BB#0:
+// %bb.0:
// subs xzr, x0, #1
// b.eq .LBB0_1
// .LBB0_1:
diff --git a/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp b/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp
index 879f65e1228..5ff82c5d1e0 100644
--- a/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp
+++ b/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp
@@ -270,8 +270,8 @@ LLVM_DUMP_METHOD void PHILinearize::dump(MachineRegisterInfo *MRI) {
dbgs() << "Dest: " << printReg(Element.DestReg, TRI)
<< " Sources: {";
for (auto &SI : Element.Sources) {
- dbgs() << printReg(SI.first, TRI) << "(BB#"
- << SI.second->getNumber() << "),";
+ dbgs() << printReg(SI.first, TRI) << '(' << printMBBReference(*SI.second)
+ << "),";
}
dbgs() << "}\n";
}
@@ -658,7 +658,7 @@ RegionMRT *MRT::buildMRT(MachineFunction &MF,
continue;
}
- DEBUG(dbgs() << "Visiting BB#" << MBB->getNumber() << "\n");
+ DEBUG(dbgs() << "Visiting " << printMBBReference(*MBB) << "\n");
MBBMRT *NewMBB = new MBBMRT(MBB);
MachineRegion *Region = RegionInfo->getRegionFor(MBB);
@@ -705,7 +705,7 @@ void LinearizedRegion::storeLiveOutReg(MachineBasicBlock *MBB, unsigned Reg,
// If this is live out of the MBB
for (auto &UI : MRI->use_operands(Reg)) {
if (UI.getParent()->getParent() != MBB) {
- DEBUG(dbgs() << "Add LiveOut (MBB BB#" << MBB->getNumber()
+ DEBUG(dbgs() << "Add LiveOut (MBB " << printMBBReference(*MBB)
<< "): " << printReg(Reg, TRI) << "\n");
addLiveOut(Reg);
} else {
@@ -749,7 +749,8 @@ void LinearizedRegion::storeLiveOuts(MachineBasicBlock *MBB,
const MachineRegisterInfo *MRI,
const TargetRegisterInfo *TRI,
PHILinearize &PHIInfo) {
- DEBUG(dbgs() << "-Store Live Outs Begin (BB#" << MBB->getNumber() << ")-\n");
+ DEBUG(dbgs() << "-Store Live Outs Begin (" << printMBBReference(*MBB)
+ << ")-\n");
for (auto &II : *MBB) {
for (auto &RI : II.defs()) {
storeLiveOutReg(MBB, RI.getReg(), RI.getParent(), MRI, TRI, PHIInfo);
@@ -773,8 +774,8 @@ void LinearizedRegion::storeLiveOuts(MachineBasicBlock *MBB,
for (int i = 0; i < numPreds; ++i) {
if (getPHIPred(PHI, i) == MBB) {
unsigned PHIReg = getPHISourceReg(PHI, i);
- DEBUG(dbgs() << "Add LiveOut (PhiSource BB#" << MBB->getNumber()
- << " -> BB#" << (*SI)->getNumber()
+ DEBUG(dbgs() << "Add LiveOut (PhiSource " << printMBBReference(*MBB)
+ << " -> " << printMBBReference(*(*SI))
<< "): " << printReg(PHIReg, TRI) << "\n");
addLiveOut(PHIReg);
}
@@ -1480,8 +1481,8 @@ bool AMDGPUMachineCFGStructurizer::shrinkPHI(MachineInstr &PHI,
if (SourceMBB) {
MIB.addReg(CombinedSourceReg);
MIB.addMBB(SourceMBB);
- DEBUG(dbgs() << printReg(CombinedSourceReg, TRI) << ", BB#"
- << SourceMBB->getNumber());
+ DEBUG(dbgs() << printReg(CombinedSourceReg, TRI) << ", "
+ << printMBBReference(*SourceMBB));
}
for (unsigned i = 0; i < NumInputs; ++i) {
@@ -1492,8 +1493,8 @@ bool AMDGPUMachineCFGStructurizer::shrinkPHI(MachineInstr &PHI,
MachineBasicBlock *SourcePred = getPHIPred(PHI, i);
MIB.addReg(SourceReg);
MIB.addMBB(SourcePred);
- DEBUG(dbgs() << printReg(SourceReg, TRI) << ", BB#"
- << SourcePred->getNumber());
+ DEBUG(dbgs() << printReg(SourceReg, TRI) << ", "
+ << printMBBReference(*SourcePred));
}
DEBUG(dbgs() << ")\n");
}
@@ -1524,8 +1525,8 @@ void AMDGPUMachineCFGStructurizer::replacePHI(
getPHIDestReg(PHI));
MIB.addReg(CombinedSourceReg);
MIB.addMBB(LastMerge);
- DEBUG(dbgs() << printReg(CombinedSourceReg, TRI) << ", BB#"
- << LastMerge->getNumber());
+ DEBUG(dbgs() << printReg(CombinedSourceReg, TRI) << ", "
+ << printMBBReference(*LastMerge));
for (unsigned i = 0; i < NumInputs; ++i) {
if (isPHIRegionIndex(PHIRegionIndices, i)) {
continue;
@@ -1534,8 +1535,8 @@ void AMDGPUMachineCFGStructurizer::replacePHI(
MachineBasicBlock *SourcePred = getPHIPred(PHI, i);
MIB.addReg(SourceReg);
MIB.addMBB(SourcePred);
- DEBUG(dbgs() << printReg(SourceReg, TRI) << ", BB#"
- << SourcePred->getNumber());
+ DEBUG(dbgs() << printReg(SourceReg, TRI) << ", "
+ << printMBBReference(*SourcePred));
}
DEBUG(dbgs() << ")\n");
} else {
@@ -1572,8 +1573,8 @@ void AMDGPUMachineCFGStructurizer::replaceEntryPHI(
getPHIDestReg(PHI));
MIB.addReg(CombinedSourceReg);
MIB.addMBB(IfMBB);
- DEBUG(dbgs() << printReg(CombinedSourceReg, TRI) << ", BB#"
- << IfMBB->getNumber());
+ DEBUG(dbgs() << printReg(CombinedSourceReg, TRI) << ", "
+ << printMBBReference(*IfMBB));
unsigned NumInputs = getPHINumInputs(PHI);
for (unsigned i = 0; i < NumInputs; ++i) {
if (isPHIRegionIndex(PHIRegionIndices, i)) {
@@ -1583,8 +1584,8 @@ void AMDGPUMachineCFGStructurizer::replaceEntryPHI(
MachineBasicBlock *SourcePred = getPHIPred(PHI, i);
MIB.addReg(SourceReg);
MIB.addMBB(SourcePred);
- DEBUG(dbgs() << printReg(SourceReg, TRI) << ", BB#"
- << SourcePred->getNumber());
+ DEBUG(dbgs() << printReg(SourceReg, TRI) << ", "
+ << printMBBReference(*SourcePred));
}
DEBUG(dbgs() << ")\n");
PHI.eraseFromParent();
@@ -1749,11 +1750,11 @@ void AMDGPUMachineCFGStructurizer::insertMergePHI(MachineBasicBlock *IfBB,
if (MergeBB->succ_begin() == MergeBB->succ_end()) {
return;
}
- DEBUG(dbgs() << "Merge PHI (BB#" << MergeBB->getNumber()
+ DEBUG(dbgs() << "Merge PHI (" << printMBBReference(*MergeBB)
<< "): " << printReg(DestRegister, TRI) << "<def> = PHI("
- << printReg(IfSourceRegister, TRI) << ", BB#"
- << IfBB->getNumber() << printReg(CodeSourceRegister, TRI)
- << ", BB#" << CodeBB->getNumber() << ")\n");
+ << printReg(IfSourceRegister, TRI) << ", "
+ << printMBBReference(*IfBB) << printReg(CodeSourceRegister, TRI)
+ << ", " << printMBBReference(*CodeBB) << ")\n");
const DebugLoc &DL = MergeBB->findDebugLoc(MergeBB->begin());
MachineInstrBuilder MIB = BuildMI(*MergeBB, MergeBB->instr_begin(), DL,
TII->get(TargetOpcode::PHI), DestRegister);
@@ -1811,8 +1812,8 @@ static void removeExternalCFGEdges(MachineBasicBlock *StartMBB,
for (auto SI : Succs) {
std::pair<MachineBasicBlock *, MachineBasicBlock *> Edge = SI;
- DEBUG(dbgs() << "Removing edge: BB#" << Edge.first->getNumber() << " -> BB#"
- << Edge.second->getNumber() << "\n");
+ DEBUG(dbgs() << "Removing edge: " << printMBBReference(*Edge.first)
+ << " -> " << printMBBReference(*Edge.second) << "\n");
Edge.first->removeSuccessor(Edge.second);
}
}
@@ -1850,8 +1851,8 @@ MachineBasicBlock *AMDGPUMachineCFGStructurizer::createIfBlock(
if (!CodeBBEnd->isSuccessor(MergeBB))
CodeBBEnd->addSuccessor(MergeBB);
- DEBUG(dbgs() << "Moved MBB#" << CodeBBStart->getNumber() << " through MBB#"
- << CodeBBEnd->getNumber() << "\n");
+ DEBUG(dbgs() << "Moved " << printMBBReference(*CodeBBStart) << " through "
+ << printMBBReference(*CodeBBEnd) << "\n");
// If we have a single predecessor we can find a reasonable debug location
MachineBasicBlock *SinglePred =
@@ -2064,7 +2065,7 @@ void AMDGPUMachineCFGStructurizer::rewriteLiveOutRegs(MachineBasicBlock *IfBB,
// is a source block for a definition.
SmallVector<unsigned, 4> Sources;
if (PHIInfo.findSourcesFromMBB(CodeBB, Sources)) {
- DEBUG(dbgs() << "Inserting PHI Live Out from BB#" << CodeBB->getNumber()
+ DEBUG(dbgs() << "Inserting PHI Live Out from " << printMBBReference(*CodeBB)
<< "\n");
for (auto SI : Sources) {
unsigned DestReg;
@@ -2172,16 +2173,17 @@ void AMDGPUMachineCFGStructurizer::createEntryPHI(LinearizedRegion *CurrentRegio
CurrentBackedgeReg = NewBackedgeReg;
DEBUG(dbgs() << "Inserting backedge PHI: "
<< printReg(NewBackedgeReg, TRI) << "<def> = PHI("
- << printReg(CurrentBackedgeReg, TRI) << ", BB#"
- << getPHIPred(*PHIDefInstr, 0)->getNumber() << ", "
+ << printReg(CurrentBackedgeReg, TRI) << ", "
+ << printMBBReference(*getPHIPred(*PHIDefInstr, 0))
+ << ", "
<< printReg(getPHISourceReg(*PHIDefInstr, 1), TRI)
- << ", BB#" << (*SRI).second->getNumber());
+ << ", " << printMBBReference(*(*SRI).second));
}
} else {
MIB.addReg(SourceReg);
MIB.addMBB((*SRI).second);
- DEBUG(dbgs() << printReg(SourceReg, TRI) << ", BB#"
- << (*SRI).second->getNumber() << ", ");
+ DEBUG(dbgs() << printReg(SourceReg, TRI) << ", "
+ << printMBBReference(*(*SRI).second) << ", ");
}
}
@@ -2189,8 +2191,8 @@ void AMDGPUMachineCFGStructurizer::createEntryPHI(LinearizedRegion *CurrentRegio
if (CurrentBackedgeReg != 0) {
MIB.addReg(CurrentBackedgeReg);
MIB.addMBB(Exit);
- DEBUG(dbgs() << printReg(CurrentBackedgeReg, TRI) << ", BB#"
- << Exit->getNumber() << ")\n");
+ DEBUG(dbgs() << printReg(CurrentBackedgeReg, TRI) << ", "
+ << printMBBReference(*Exit) << ")\n");
} else {
DEBUG(dbgs() << ")\n");
}
@@ -2443,11 +2445,12 @@ void AMDGPUMachineCFGStructurizer::splitLoopPHI(MachineInstr &PHI,
<< "<def> = PHI(");
MIB.addReg(PHISource);
MIB.addMBB(Entry);
- DEBUG(dbgs() << printReg(PHISource, TRI) << ", BB#" << Entry->getNumber());
+ DEBUG(dbgs() << printReg(PHISource, TRI) << ", "
+ << printMBBReference(*Entry));
MIB.addReg(RegionSourceReg);
MIB.addMBB(RegionSourceMBB);
- DEBUG(dbgs() << " ," << printReg(RegionSourceReg, TRI) << ", BB#"
- << RegionSourceMBB->getNumber() << ")\n");
+ DEBUG(dbgs() << " ," << printReg(RegionSourceReg, TRI) << ", "
+ << printMBBReference(*RegionSourceMBB) << ")\n");
}
void AMDGPUMachineCFGStructurizer::splitLoopPHIs(MachineBasicBlock *Entry,
@@ -2528,9 +2531,9 @@ AMDGPUMachineCFGStructurizer::splitEntry(LinearizedRegion *LRegion) {
MachineBasicBlock *EntrySucc = split(Entry->getFirstNonPHI());
MachineBasicBlock *Exit = LRegion->getExit();
- DEBUG(dbgs() << "Split BB#" << Entry->getNumber() << " to BB#"
- << Entry->getNumber() << " -> BB#" << EntrySucc->getNumber()
- << "\n");
+ DEBUG(dbgs() << "Split " << printMBBReference(*Entry) << " to "
+ << printMBBReference(*Entry) << " -> "
+ << printMBBReference(*EntrySucc) << "\n");
LRegion->addMBB(EntrySucc);
// Make the backedge go to Entry Succ
diff --git a/lib/Target/AMDGPU/GCNIterativeScheduler.cpp b/lib/Target/AMDGPU/GCNIterativeScheduler.cpp
index 942063d5f93..56d639aca52 100644
--- a/lib/Target/AMDGPU/GCNIterativeScheduler.cpp
+++ b/lib/Target/AMDGPU/GCNIterativeScheduler.cpp
@@ -63,8 +63,8 @@ static void printRegion(raw_ostream &OS,
unsigned MaxInstNum =
std::numeric_limits<unsigned>::max()) {
auto BB = Begin->getParent();
- OS << BB->getParent()->getName() << ":BB#" << BB->getNumber()
- << ' ' << BB->getName() << ":\n";
+ OS << BB->getParent()->getName() << ":" << printMBBReference(*BB) << ' '
+ << BB->getName() << ":\n";
auto I = Begin;
MaxInstNum = std::max(MaxInstNum, 1u);
for (; I != End && MaxInstNum; ++I, --MaxInstNum) {
diff --git a/lib/Target/AMDGPU/GCNSchedStrategy.cpp b/lib/Target/AMDGPU/GCNSchedStrategy.cpp
index 155b400ba02..38803204d6e 100644
--- a/lib/Target/AMDGPU/GCNSchedStrategy.cpp
+++ b/lib/Target/AMDGPU/GCNSchedStrategy.cpp
@@ -531,9 +531,8 @@ void GCNScheduleDAGMILive::finalizeSchedule() {
}
DEBUG(dbgs() << "********** MI Scheduling **********\n");
- DEBUG(dbgs() << MF.getName()
- << ":BB#" << MBB->getNumber() << " " << MBB->getName()
- << "\n From: " << *begin() << " To: ";
+ DEBUG(dbgs() << MF.getName() << ":" << printMBBReference(*MBB) << " "
+ << MBB->getName() << "\n From: " << *begin() << " To: ";
if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
else dbgs() << "End";
dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n');
diff --git a/lib/Target/AMDGPU/SIFixSGPRCopies.cpp b/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
index d6b99966760..8b155c2d278 100644
--- a/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
+++ b/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
@@ -22,7 +22,7 @@
/// %2 <vgpr> = VECTOR_INST
/// %3 <vsrc> = COPY %2 <vgpr>
/// BB2:
-/// %4 <vsrc> = PHI %1 <vsrc>, <BB#0>, %3 <vrsc>, <BB#1>
+/// %4 <vsrc> = PHI %1 <vsrc>, <%bb.0>, %3 <vrsc>, <%bb.1>
/// %5 <vgpr> = VECTOR_INST %4 <vsrc>
///
///
@@ -37,7 +37,7 @@
/// %2 <vgpr> = VECTOR_INST
/// %3 <vsrc> = COPY %2 <vgpr>
/// BB2:
-/// %4 <sgpr> = PHI %0 <sgpr>, <BB#0>, %3 <vsrc>, <BB#1>
+/// %4 <sgpr> = PHI %0 <sgpr>, <%bb.0>, %3 <vsrc>, <%bb.1>
/// %5 <vgpr> = VECTOR_INST %4 <sgpr>
///
/// Now that the result of the PHI instruction is an SGPR, the register
@@ -52,7 +52,7 @@
/// %2 <vgpr> = VECTOR_INST
/// %3 <sgpr> = COPY %2 <vgpr>
/// BB2:
-/// %4 <sgpr> = PHI %0 <sgpr>, <BB#0>, %3 <sgpr>, <BB#1>
+/// %4 <sgpr> = PHI %0 <sgpr>, <%bb.0>, %3 <sgpr>, <%bb.1>
/// %5 <vgpr> = VECTOR_INST %4 <sgpr>
///
/// Now this code contains an illegal copy from a VGPR to an SGPR.
@@ -515,8 +515,9 @@ static bool hoistAndMergeSGPRInits(unsigned Reg,
if (MDT.dominates(MI1, MI2)) {
if (!intereferes(MI2, MI1)) {
- DEBUG(dbgs() << "Erasing from BB#" << MI2->getParent()->getNumber()
- << " " << *MI2);
+ DEBUG(dbgs() << "Erasing from "
+ << printMBBReference(*MI2->getParent()) << " "
+ << *MI2);
MI2->eraseFromParent();
Defs.erase(I2++);
Changed = true;
@@ -524,8 +525,9 @@ static bool hoistAndMergeSGPRInits(unsigned Reg,
}
} else if (MDT.dominates(MI2, MI1)) {
if (!intereferes(MI1, MI2)) {
- DEBUG(dbgs() << "Erasing from BB#" << MI1->getParent()->getNumber()
- << " " << *MI1);
+ DEBUG(dbgs() << "Erasing from "
+ << printMBBReference(*MI1->getParent()) << " "
+ << *MI1);
MI1->eraseFromParent();
Defs.erase(I1++);
Changed = true;
@@ -541,10 +543,11 @@ static bool hoistAndMergeSGPRInits(unsigned Reg,
MachineBasicBlock::iterator I = MBB->getFirstNonPHI();
if (!intereferes(MI1, I) && !intereferes(MI2, I)) {
- DEBUG(dbgs() << "Erasing from BB#" << MI1->getParent()->getNumber()
- << " " << *MI1 << "and moving from BB#"
- << MI2->getParent()->getNumber() << " to BB#"
- << I->getParent()->getNumber() << " " << *MI2);
+ DEBUG(dbgs() << "Erasing from "
+ << printMBBReference(*MI1->getParent()) << " " << *MI1
+ << "and moving from "
+ << printMBBReference(*MI2->getParent()) << " to "
+ << printMBBReference(*I->getParent()) << " " << *MI2);
I->getParent()->splice(I, MI2->getParent(), MI2);
MI1->eraseFromParent();
Defs.erase(I1++);
diff --git a/lib/Target/AMDGPU/SIMachineScheduler.cpp b/lib/Target/AMDGPU/SIMachineScheduler.cpp
index c13148bf0a2..3fb39998ff7 100644
--- a/lib/Target/AMDGPU/SIMachineScheduler.cpp
+++ b/lib/Target/AMDGPU/SIMachineScheduler.cpp
@@ -2050,9 +2050,9 @@ void SIScheduleDAGMI::schedule()
placeDebugValues();
DEBUG({
- unsigned BBNum = begin()->getParent()->getNumber();
- dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";
- dumpSchedule();
- dbgs() << '\n';
- });
+ dbgs() << "*** Final schedule for "
+ << printMBBReference(*begin()->getParent()) << " ***\n";
+ dumpSchedule();
+ dbgs() << '\n';
+ });
}
diff --git a/lib/Target/AMDGPU/SIWholeQuadMode.cpp b/lib/Target/AMDGPU/SIWholeQuadMode.cpp
index 18649733103..c46fd287106 100644
--- a/lib/Target/AMDGPU/SIWholeQuadMode.cpp
+++ b/lib/Target/AMDGPU/SIWholeQuadMode.cpp
@@ -224,7 +224,8 @@ FunctionPass *llvm::createSIWholeQuadModePass() {
#ifndef NDEBUG
LLVM_DUMP_METHOD void SIWholeQuadMode::printInfo() {
for (const auto &BII : Blocks) {
- dbgs() << "\nBB#" << BII.first->getNumber() << ":\n"
+ dbgs() << "\n"
+ << printMBBReference(*BII.first) << ":\n"
<< " InNeeds = " << PrintState(BII.second.InNeeds)
<< ", Needs = " << PrintState(BII.second.Needs)
<< ", OutNeeds = " << PrintState(BII.second.OutNeeds) << "\n\n";
@@ -680,7 +681,7 @@ void SIWholeQuadMode::processBlock(MachineBasicBlock &MBB, unsigned LiveMaskReg,
if (!isEntry && BI.Needs == StateWQM && BI.OutNeeds != StateExact)
return;
- DEBUG(dbgs() << "\nProcessing block BB#" << MBB.getNumber() << ":\n");
+ DEBUG(dbgs() << "\nProcessing block " << printMBBReference(MBB) << ":\n");
unsigned SavedWQMReg = 0;
unsigned SavedNonWWMReg = 0;
diff --git a/lib/Target/ARM/ARMConstantIslandPass.cpp b/lib/Target/ARM/ARMConstantIslandPass.cpp
index bc781b26b2c..8baee1ce281 100644
--- a/lib/Target/ARM/ARMConstantIslandPass.cpp
+++ b/lib/Target/ARM/ARMConstantIslandPass.cpp
@@ -326,7 +326,7 @@ LLVM_DUMP_METHOD void ARMConstantIslands::dumpBBs() {
DEBUG({
for (unsigned J = 0, E = BBInfo.size(); J !=E; ++J) {
const BasicBlockInfo &BBI = BBInfo[J];
- dbgs() << format("%08x BB#%u\t", BBI.Offset, J)
+ dbgs() << format("%08x %bb.%u\t", BBI.Offset, J)
<< " kb=" << unsigned(BBI.KnownBits)
<< " ua=" << unsigned(BBI.Unalign)
<< " pa=" << unsigned(BBI.PostAlign)
@@ -1071,11 +1071,11 @@ bool ARMConstantIslands::isCPEntryInRange(MachineInstr *MI, unsigned UserOffset,
const BasicBlockInfo &BBI = BBInfo[Block];
dbgs() << "User of CPE#" << CPEMI->getOperand(0).getImm()
<< " max delta=" << MaxDisp
- << format(" insn address=%#x", UserOffset)
- << " in BB#" << Block << ": "
+ << format(" insn address=%#x", UserOffset) << " in "
+ << printMBBReference(*MI->getParent()) << ": "
<< format("%#x-%x\t", BBI.Offset, BBI.postOffset()) << *MI
<< format("CPE address=%#x offset=%+d: ", CPEOffset,
- int(CPEOffset-UserOffset));
+ int(CPEOffset - UserOffset));
});
}
@@ -1261,7 +1261,7 @@ bool ARMConstantIslands::findAvailableWater(CPUser &U, unsigned UserOffset,
// This is the least amount of required padding seen so far.
BestGrowth = Growth;
WaterIter = IP;
- DEBUG(dbgs() << "Found water after BB#" << WaterBB->getNumber()
+ DEBUG(dbgs() << "Found water after " << printMBBReference(*WaterBB)
<< " Growth=" << Growth << '\n');
if (CloserWater && WaterBB == U.MI->getParent())
@@ -1305,8 +1305,8 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
unsigned CPEOffset = UserBBI.postOffset(CPELogAlign) + Delta;
if (isOffsetInRange(UserOffset, CPEOffset, U)) {
- DEBUG(dbgs() << "Split at end of BB#" << UserMBB->getNumber()
- << format(", expected CPE offset %#x\n", CPEOffset));
+ DEBUG(dbgs() << "Split at end of " << printMBBReference(*UserMBB)
+ << format(", expected CPE offset %#x\n", CPEOffset));
NewMBB = &*++UserMBB->getIterator();
// Add an unconditional branch from UserMBB to fallthrough block. Record
// it for branch lengthening; this new branch will not get out of range,
@@ -1578,11 +1578,11 @@ bool ARMConstantIslands::isBBInRange(MachineInstr *MI,MachineBasicBlock *DestBB,
unsigned BrOffset = getOffsetOf(MI) + PCAdj;
unsigned DestOffset = BBInfo[DestBB->getNumber()].Offset;
- DEBUG(dbgs() << "Branch of destination BB#" << DestBB->getNumber()
- << " from BB#" << MI->getParent()->getNumber()
- << " max delta=" << MaxDisp
- << " from " << getOffsetOf(MI) << " to " << DestOffset
- << " offset " << int(DestOffset-BrOffset) << "\t" << *MI);
+ DEBUG(dbgs() << "Branch of destination " << printMBBReference(*DestBB)
+ << " from " << printMBBReference(*MI->getParent())
+ << " max delta=" << MaxDisp << " from " << getOffsetOf(MI)
+ << " to " << DestOffset << " offset "
+ << int(DestOffset - BrOffset) << "\t" << *MI);
if (BrOffset <= DestOffset) {
// Branch before the Dest.
@@ -1700,9 +1700,9 @@ ARMConstantIslands::fixupConditionalBr(ImmBranch &Br) {
}
MachineBasicBlock *NextBB = &*++MBB->getIterator();
- DEBUG(dbgs() << " Insert B to BB#" << DestBB->getNumber()
- << " also invert condition and change dest. to BB#"
- << NextBB->getNumber() << "\n");
+ DEBUG(dbgs() << " Insert B to " << printMBBReference(*DestBB)
+ << " also invert condition and change dest. to "
+ << printMBBReference(*NextBB) << "\n");
// Insert a new conditional branch and a new unconditional branch.
// Also update the ImmBranch as well as adding a new entry for the new branch.
@@ -2212,7 +2212,7 @@ bool ARMConstantIslands::optimizeThumb2JumpTables() {
.addReg(IdxReg, getKillRegState(IdxRegKill))
.addJumpTableIndex(JTI, JTOP.getTargetFlags())
.addImm(CPEMI->getOperand(0).getImm());
- DEBUG(dbgs() << "BB#" << MBB->getNumber() << ": " << *NewJTMI);
+ DEBUG(dbgs() << printMBBReference(*MBB) << ": " << *NewJTMI);
unsigned JTOpc = ByteOk ? ARM::JUMPTABLE_TBB : ARM::JUMPTABLE_TBH;
CPEMI->setDesc(TII->get(JTOpc));
diff --git a/lib/Target/ARM/ARMConstantPoolValue.cpp b/lib/Target/ARM/ARMConstantPoolValue.cpp
index 38ea835fbe2..39ae02af513 100644
--- a/lib/Target/ARM/ARMConstantPoolValue.cpp
+++ b/lib/Target/ARM/ARMConstantPoolValue.cpp
@@ -292,6 +292,6 @@ void ARMConstantPoolMBB::addSelectionDAGCSEId(FoldingSetNodeID &ID) {
}
void ARMConstantPoolMBB::print(raw_ostream &O) const {
- O << "BB#" << MBB->getNumber();
+ O << printMBBReference(*MBB);
ARMConstantPoolValue::print(O);
}
diff --git a/lib/Target/BPF/BPFISelDAGToDAG.cpp b/lib/Target/BPF/BPFISelDAGToDAG.cpp
index 98cd0f165a6..283359c8b23 100644
--- a/lib/Target/BPF/BPFISelDAGToDAG.cpp
+++ b/lib/Target/BPF/BPFISelDAGToDAG.cpp
@@ -573,10 +573,10 @@ void BPFDAGToDAGISel::PreprocessTrunc(SDNode *Node,
return;
} else {
// The PHI node looks like:
- // %2<def> = PHI %0, <BB#1>, %1, <BB#3>
- // Trace each incoming definition, e.g., (%0, BB#1) and (%1, BB#3)
- // The AND operation can be removed if both %0 in BB#1 and %1 in
- // BB#3 are defined with with a load matching the MaskN.
+ // %2<def> = PHI %0, <%bb.1>, %1, <%bb.3>
+ // Trace each incoming definition, e.g., (%0, %bb.1) and (%1, %bb.3)
+ // The AND operation can be removed if both %0 in %bb.1 and %1 in
+ // %bb.3 are defined with with a load matching the MaskN.
DEBUG(dbgs() << "Check PHI Insn: "; MII->dump(); dbgs() << '\n');
unsigned PrevReg = -1;
for (unsigned i = 0; i < MII->getNumOperands(); ++i) {
diff --git a/lib/Target/Hexagon/BitTracker.cpp b/lib/Target/Hexagon/BitTracker.cpp
index 4a10408d8c7..a8b89990277 100644
--- a/lib/Target/Hexagon/BitTracker.cpp
+++ b/lib/Target/Hexagon/BitTracker.cpp
@@ -767,7 +767,7 @@ bool BT::MachineEvaluator::evaluate(const MachineInstr &MI,
void BT::visitPHI(const MachineInstr &PI) {
int ThisN = PI.getParent()->getNumber();
if (Trace)
- dbgs() << "Visit FI(BB#" << ThisN << "): " << PI;
+ dbgs() << "Visit FI(" << printMBBReference(*PI.getParent()) << "): " << PI;
const MachineOperand &MD = PI.getOperand(0);
assert(MD.getSubReg() == 0 && "Unexpected sub-register in definition");
@@ -784,7 +784,8 @@ void BT::visitPHI(const MachineInstr &PI) {
const MachineBasicBlock *PB = PI.getOperand(i + 1).getMBB();
int PredN = PB->getNumber();
if (Trace)
- dbgs() << " edge BB#" << PredN << "->BB#" << ThisN;
+ dbgs() << " edge " << printMBBReference(*PB) << "->"
+ << printMBBReference(*PI.getParent());
if (!EdgeExec.count(CFGEdge(PredN, ThisN))) {
if (Trace)
dbgs() << " not executable\n";
@@ -809,10 +810,8 @@ void BT::visitPHI(const MachineInstr &PI) {
}
void BT::visitNonBranch(const MachineInstr &MI) {
- if (Trace) {
- int ThisN = MI.getParent()->getNumber();
- dbgs() << "Visit MI(BB#" << ThisN << "): " << MI;
- }
+ if (Trace)
+ dbgs() << "Visit MI(" << printMBBReference(*MI.getParent()) << "): " << MI;
if (MI.isDebugValue())
return;
assert(!MI.isBranch() && "Unexpected branch instruction");
@@ -897,7 +896,7 @@ void BT::visitBranchesFrom(const MachineInstr &BI) {
BTs.clear();
const MachineInstr &MI = *It;
if (Trace)
- dbgs() << "Visit BR(BB#" << ThisN << "): " << MI;
+ dbgs() << "Visit BR(" << printMBBReference(B) << "): " << MI;
assert(MI.isBranch() && "Expecting branch instruction");
InstrExec.insert(&MI);
bool Eval = ME.evaluate(MI, Map, BTs, FallsThrough);
@@ -913,7 +912,7 @@ void BT::visitBranchesFrom(const MachineInstr &BI) {
if (Trace) {
dbgs() << " adding targets:";
for (unsigned i = 0, n = BTs.size(); i < n; ++i)
- dbgs() << " BB#" << BTs[i]->getNumber();
+ dbgs() << " " << printMBBReference(*BTs[i]);
if (FallsThrough)
dbgs() << "\n falls through\n";
else
diff --git a/lib/Target/Hexagon/HexagonBitSimplify.cpp b/lib/Target/Hexagon/HexagonBitSimplify.cpp
index d3cb53e3594..f14beaad339 100644
--- a/lib/Target/Hexagon/HexagonBitSimplify.cpp
+++ b/lib/Target/Hexagon/HexagonBitSimplify.cpp
@@ -2977,7 +2977,7 @@ void HexagonLoopRescheduling::moveGroup(InstrGroup &G, MachineBasicBlock &LB,
}
bool HexagonLoopRescheduling::processLoop(LoopCand &C) {
- DEBUG(dbgs() << "Processing loop in BB#" << C.LB->getNumber() << "\n");
+ DEBUG(dbgs() << "Processing loop in " << printMBBReference(*C.LB) << "\n");
std::vector<PhiInfo> Phis;
for (auto &I : *C.LB) {
if (!I.isPHI())
diff --git a/lib/Target/Hexagon/HexagonConstPropagation.cpp b/lib/Target/Hexagon/HexagonConstPropagation.cpp
index 9a8762a48fd..80db36071db 100644
--- a/lib/Target/Hexagon/HexagonConstPropagation.cpp
+++ b/lib/Target/Hexagon/HexagonConstPropagation.cpp
@@ -617,7 +617,7 @@ void MachineConstPropagator::CellMap::print(raw_ostream &os,
void MachineConstPropagator::visitPHI(const MachineInstr &PN) {
const MachineBasicBlock *MB = PN.getParent();
unsigned MBN = MB->getNumber();
- DEBUG(dbgs() << "Visiting FI(BB#" << MBN << "): " << PN);
+ DEBUG(dbgs() << "Visiting FI(" << printMBBReference(*MB) << "): " << PN);
const MachineOperand &MD = PN.getOperand(0);
Register DefR(MD);
@@ -642,8 +642,8 @@ Bottomize:
const MachineBasicBlock *PB = PN.getOperand(i+1).getMBB();
unsigned PBN = PB->getNumber();
if (!EdgeExec.count(CFGEdge(PBN, MBN))) {
- DEBUG(dbgs() << " edge BB#" << PBN << "->BB#" << MBN
- << " not executable\n");
+ DEBUG(dbgs() << " edge " << printMBBReference(*PB) << "->"
+ << printMBBReference(*MB) << " not executable\n");
continue;
}
const MachineOperand &SO = PN.getOperand(i);
@@ -658,9 +658,8 @@ Bottomize:
LatticeCell SrcC;
bool Eval = MCE.evaluate(UseR, Cells.get(UseR.Reg), SrcC);
- DEBUG(dbgs() << " edge from BB#" << PBN << ": "
- << printReg(UseR.Reg, &MCE.TRI, UseR.SubReg)
- << SrcC << '\n');
+ DEBUG(dbgs() << " edge from " << printMBBReference(*PB) << ": "
+ << printReg(UseR.Reg, &MCE.TRI, UseR.SubReg) << SrcC << '\n');
Changed |= Eval ? DefC.meet(SrcC)
: DefC.setBottom();
Cells.update(DefR.Reg, DefC);
@@ -672,7 +671,7 @@ Bottomize:
}
void MachineConstPropagator::visitNonBranch(const MachineInstr &MI) {
- DEBUG(dbgs() << "Visiting MI(BB#" << MI.getParent()->getNumber()
+ DEBUG(dbgs() << "Visiting MI(" << printMBBReference(*MI.getParent())
<< "): " << MI);
CellMap Outputs;
bool Eval = MCE.evaluate(MI, Cells, Outputs);
@@ -729,8 +728,8 @@ void MachineConstPropagator::visitBranchesFrom(const MachineInstr &BrI) {
while (It != End) {
const MachineInstr &MI = *It;
InstrExec.insert(&MI);
- DEBUG(dbgs() << "Visiting " << (EvalOk ? "BR" : "br") << "(BB#"
- << MBN << "): " << MI);
+ DEBUG(dbgs() << "Visiting " << (EvalOk ? "BR" : "br") << "("
+ << printMBBReference(B) << "): " << MI);
// Do not evaluate subsequent branches if the evaluation of any of the
// previous branches failed. Keep iterating over the branches only
// to mark them as executable.
@@ -772,7 +771,8 @@ void MachineConstPropagator::visitBranchesFrom(const MachineInstr &BrI) {
for (const MachineBasicBlock *TB : Targets) {
unsigned TBN = TB->getNumber();
- DEBUG(dbgs() << " pushing edge BB#" << MBN << " -> BB#" << TBN << "\n");
+ DEBUG(dbgs() << " pushing edge " << printMBBReference(B) << " -> "
+ << printMBBReference(*TB) << "\n");
FlowQ.push(CFGEdge(MBN, TBN));
}
}
@@ -870,8 +870,10 @@ void MachineConstPropagator::propagate(MachineFunction &MF) {
CFGEdge Edge = FlowQ.front();
FlowQ.pop();
- DEBUG(dbgs() << "Picked edge BB#" << Edge.first << "->BB#"
- << Edge.second << '\n');
+ DEBUG(dbgs() << "Picked edge "
+ << printMBBReference(*MF.getBlockNumbered(Edge.first)) << "->"
+ << printMBBReference(*MF.getBlockNumbered(Edge.second))
+ << '\n');
if (Edge.first != EntryNum)
if (EdgeExec.count(Edge))
continue;
@@ -934,7 +936,8 @@ void MachineConstPropagator::propagate(MachineFunction &MF) {
for (const MachineBasicBlock *SB : B.successors()) {
unsigned SN = SB->getNumber();
if (!EdgeExec.count(CFGEdge(BN, SN)))
- dbgs() << " BB#" << BN << " -> BB#" << SN << '\n';
+ dbgs() << " " << printMBBReference(B) << " -> "
+ << printMBBReference(*SB) << '\n';
}
}
});
@@ -3126,7 +3129,7 @@ bool HexagonConstEvaluator::rewriteHexBranch(MachineInstr &BrI,
if (BrI.getOpcode() == Hexagon::J2_jump)
return false;
- DEBUG(dbgs() << "Rewrite(BB#" << B.getNumber() << "):" << BrI);
+ DEBUG(dbgs() << "Rewrite(" << printMBBReference(B) << "):" << BrI);
bool Rewritten = false;
if (NumTargets > 0) {
assert(!FallsThru && "This should have been checked before");
diff --git a/lib/Target/Hexagon/HexagonEarlyIfConv.cpp b/lib/Target/Hexagon/HexagonEarlyIfConv.cpp
index 4a6100d02fc..652ea13c414 100644
--- a/lib/Target/Hexagon/HexagonEarlyIfConv.cpp
+++ b/lib/Target/Hexagon/HexagonEarlyIfConv.cpp
@@ -27,24 +27,24 @@
//
// %40<def> = L2_loadrub_io %39<kill>, 1
// %41<def> = S2_tstbit_i %40<kill>, 0
-// J2_jumpt %41<kill>, <BB#5>, %pc<imp-def,dead>
-// J2_jump <BB#4>, %pc<imp-def,dead>
-// Successors according to CFG: BB#4(62) BB#5(62)
+// J2_jumpt %41<kill>, <%bb.5>, %pc<imp-def,dead>
+// J2_jump <%bb.4>, %pc<imp-def,dead>
+// Successors according to CFG: %bb.4(62) %bb.5(62)
//
-// BB#4: derived from LLVM BB %if.then
-// Predecessors according to CFG: BB#3
+// %bb.4: derived from LLVM BB %if.then
+// Predecessors according to CFG: %bb.3
// %11<def> = A2_addp %6, %10
// S2_storerd_io %32, 16, %11
-// Successors according to CFG: BB#5
+// Successors according to CFG: %bb.5
//
-// BB#5: derived from LLVM BB %if.end
-// Predecessors according to CFG: BB#3 BB#4
-// %12<def> = PHI %6, <BB#3>, %11, <BB#4>
+// %bb.5: derived from LLVM BB %if.end
+// Predecessors according to CFG: %bb.3 %bb.4
+// %12<def> = PHI %6, <%bb.3>, %11, <%bb.4>
// %13<def> = A2_addp %7, %12
// %42<def> = C2_cmpeqi %9, 10
-// J2_jumpf %42<kill>, <BB#3>, %pc<imp-def,dead>
-// J2_jump <BB#6>, %pc<imp-def,dead>
-// Successors according to CFG: BB#6(4) BB#3(124)
+// J2_jumpf %42<kill>, <%bb.3>, %pc<imp-def,dead>
+// J2_jump <%bb.6>, %pc<imp-def,dead>
+// Successors according to CFG: %bb.6(4) %bb.3(124)
//
// would become:
//
@@ -55,9 +55,9 @@
// %46<def> = PS_pselect %41, %6, %11
// %13<def> = A2_addp %7, %46
// %42<def> = C2_cmpeqi %9, 10
-// J2_jumpf %42<kill>, <BB#3>, %pc<imp-def,dead>
-// J2_jump <BB#6>, %pc<imp-def,dead>
-// Successors according to CFG: BB#6 BB#3
+// J2_jumpf %42<kill>, <%bb.3>, %pc<imp-def,dead>
+// J2_jump <%bb.6>, %pc<imp-def,dead>
+// Successors according to CFG: %bb.6 %bb.3
#include "Hexagon.h"
#include "HexagonInstrInfo.h"
@@ -238,7 +238,7 @@ bool HexagonEarlyIfConversion::isPreheader(const MachineBasicBlock *B) const {
bool HexagonEarlyIfConversion::matchFlowPattern(MachineBasicBlock *B,
MachineLoop *L, FlowPattern &FP) {
- DEBUG(dbgs() << "Checking flow pattern at BB#" << B->getNumber() << "\n");
+ DEBUG(dbgs() << "Checking flow pattern at " << printMBBReference(*B) << "\n");
// Interested only in conditional branches, no .new, no new-value, etc.
// Check the terminators directly, it's easier than handling all responses
diff --git a/lib/Target/Hexagon/HexagonExpandCondsets.cpp b/lib/Target/Hexagon/HexagonExpandCondsets.cpp
index 86645ddf913..78c7c102e7d 100644
--- a/lib/Target/Hexagon/HexagonExpandCondsets.cpp
+++ b/lib/Target/Hexagon/HexagonExpandCondsets.cpp
@@ -654,7 +654,7 @@ bool HexagonExpandCondsets::split(MachineInstr &MI,
return false;
TfrCounter++;
}
- DEBUG(dbgs() << "\nsplitting BB#" << MI.getParent()->getNumber() << ": "
+ DEBUG(dbgs() << "\nsplitting " << printMBBReference(*MI.getParent()) << ": "
<< MI);
MachineOperand &MD = MI.getOperand(0); // Definition
MachineOperand &MP = MI.getOperand(1); // Predicate register
diff --git a/lib/Target/Hexagon/HexagonFrameLowering.cpp b/lib/Target/Hexagon/HexagonFrameLowering.cpp
index ebb7add82e1..a6a950ea045 100644
--- a/lib/Target/Hexagon/HexagonFrameLowering.cpp
+++ b/lib/Target/Hexagon/HexagonFrameLowering.cpp
@@ -443,7 +443,7 @@ void HexagonFrameLowering::findShrunkPrologEpilog(MachineFunction &MF,
DEBUG({
dbgs() << "Blocks needing SF: {";
for (auto &B : SFBlocks)
- dbgs() << " BB#" << B->getNumber();
+ dbgs() << " " << printMBBReference(*B);
dbgs() << " }\n";
});
// No frame needed?
@@ -464,12 +464,16 @@ void HexagonFrameLowering::findShrunkPrologEpilog(MachineFunction &MF,
break;
}
DEBUG({
- dbgs() << "Computed dom block: BB#";
- if (DomB) dbgs() << DomB->getNumber();
- else dbgs() << "<null>";
- dbgs() << ", computed pdom block: BB#";
- if (PDomB) dbgs() << PDomB->getNumber();
- else dbgs() << "<null>";
+ dbgs() << "Computed dom block: ";
+ if (DomB)
+ dbgs() << printMBBReference(*DomB);
+ else
+ dbgs() << "<null>";
+ dbgs() << ", computed pdom block: ";
+ if (PDomB)
+ dbgs() << printMBBReference(*PDomB);
+ else
+ dbgs() << "<null>";
dbgs() << "\n";
});
if (!DomB || !PDomB)
@@ -2010,7 +2014,7 @@ void HexagonFrameLowering::optimizeSpillSlots(MachineFunction &MF,
auto P = BlockIndexes.insert(
std::make_pair(&B, HexagonBlockRanges::InstrIndexMap(B)));
auto &IndexMap = P.first->second;
- DEBUG(dbgs() << "Index map for BB#" << B.getNumber() << "\n"
+ DEBUG(dbgs() << "Index map for " << printMBBReference(B) << "\n"
<< IndexMap << '\n');
for (auto &In : B) {
@@ -2129,7 +2133,8 @@ void HexagonFrameLowering::optimizeSpillSlots(MachineFunction &MF,
else
dbgs() << "<null>\n";
for (auto &R : P.second.Map)
- dbgs() << " BB#" << R.first->getNumber() << " { " << R.second << "}\n";
+ dbgs() << " " << printMBBReference(*R.first) << " { " << R.second
+ << "}\n";
}
});
@@ -2162,7 +2167,7 @@ void HexagonFrameLowering::optimizeSpillSlots(MachineFunction &MF,
auto &FIs = P.second;
if (FIs.empty())
continue;
- dbgs() << " BB#" << P.first->getNumber() << ": {";
+ dbgs() << " " << printMBBReference(*P.first) << ": {";
for (auto I : FIs) {
dbgs() << " fi#" << I;
if (LoxFIs.count(I))
@@ -2183,7 +2188,7 @@ void HexagonFrameLowering::optimizeSpillSlots(MachineFunction &MF,
HexagonBlockRanges::InstrIndexMap &IM = F->second;
HexagonBlockRanges::RegToRangeMap LM = HBR.computeLiveMap(IM);
HexagonBlockRanges::RegToRangeMap DM = HBR.computeDeadMap(IM, LM);
- DEBUG(dbgs() << "BB#" << B.getNumber() << " dead map\n"
+ DEBUG(dbgs() << printMBBReference(B) << " dead map\n"
<< HexagonBlockRanges::PrintRangeMap(DM, HRI));
for (auto FI : BlockFIMap[&B]) {
diff --git a/lib/Target/Hexagon/HexagonGenInsert.cpp b/lib/Target/Hexagon/HexagonGenInsert.cpp
index d1f63699292..99f3a2e9e88 100644
--- a/lib/Target/Hexagon/HexagonGenInsert.cpp
+++ b/lib/Target/Hexagon/HexagonGenInsert.cpp
@@ -915,7 +915,7 @@ bool HexagonGenInsert::findRecordInsertForms(unsigned VR,
void HexagonGenInsert::collectInBlock(MachineBasicBlock *B,
OrderedRegisterList &AVs) {
if (isDebug())
- dbgs() << "visiting block BB#" << B->getNumber() << "\n";
+ dbgs() << "visiting block " << printMBBReference(*B) << "\n";
// First, check if this block is reachable at all. If not, the bit tracker
// will not have any information about registers in it.
diff --git a/lib/Target/Hexagon/HexagonHardwareLoops.cpp b/lib/Target/Hexagon/HexagonHardwareLoops.cpp
index 5c18cc8732d..b5fa0689d04 100644
--- a/lib/Target/Hexagon/HexagonHardwareLoops.cpp
+++ b/lib/Target/Hexagon/HexagonHardwareLoops.cpp
@@ -1011,7 +1011,7 @@ bool HexagonHardwareLoops::isInvalidLoopOperation(const MachineInstr *MI,
bool HexagonHardwareLoops::containsInvalidInstruction(MachineLoop *L,
bool IsInnerHWLoop) const {
const std::vector<MachineBasicBlock *> &Blocks = L->getBlocks();
- DEBUG(dbgs() << "\nhw_loop head, BB#" << Blocks[0]->getNumber(););
+ DEBUG(dbgs() << "\nhw_loop head, " << printMBBReference(*Blocks[0]));
for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
MachineBasicBlock *MBB = Blocks[i];
for (MachineBasicBlock::iterator
@@ -1367,7 +1367,7 @@ bool HexagonHardwareLoops::isLoopFeeder(MachineLoop *L, MachineBasicBlock *A,
LoopFeederMap &LoopFeederPhi) const {
if (LoopFeederPhi.find(MO->getReg()) == LoopFeederPhi.end()) {
const std::vector<MachineBasicBlock *> &Blocks = L->getBlocks();
- DEBUG(dbgs() << "\nhw_loop head, BB#" << Blocks[0]->getNumber(););
+ DEBUG(dbgs() << "\nhw_loop head, " << printMBBReference(*Blocks[0]));
// Ignore all BBs that form Loop.
for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
MachineBasicBlock *MBB = Blocks[i];
diff --git a/lib/Target/Hexagon/HexagonInstrInfo.cpp b/lib/Target/Hexagon/HexagonInstrInfo.cpp
index 4cdfd09c095..cb00bc770c0 100644
--- a/lib/Target/Hexagon/HexagonInstrInfo.cpp
+++ b/lib/Target/Hexagon/HexagonInstrInfo.cpp
@@ -463,7 +463,7 @@ bool HexagonInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
Cond.push_back(LastInst->getOperand(1));
return false;
}
- DEBUG(dbgs() << "\nCant analyze BB#" << MBB.getNumber()
+ DEBUG(dbgs() << "\nCant analyze " << printMBBReference(MBB)
<< " with one jump\n";);
// Otherwise, don't know what this is.
return true;
@@ -511,7 +511,7 @@ bool HexagonInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
FBB = LastInst->getOperand(0).getMBB();
return false;
}
- DEBUG(dbgs() << "\nCant analyze BB#" << MBB.getNumber()
+ DEBUG(dbgs() << "\nCant analyze " << printMBBReference(MBB)
<< " with two jumps";);
// Otherwise, can't handle this.
return true;
@@ -521,7 +521,7 @@ unsigned HexagonInstrInfo::removeBranch(MachineBasicBlock &MBB,
int *BytesRemoved) const {
assert(!BytesRemoved && "code size not handled");
- DEBUG(dbgs() << "\nRemoving branches out of BB#" << MBB.getNumber());
+ DEBUG(dbgs() << "\nRemoving branches out of " << printMBBReference(MBB));
MachineBasicBlock::iterator I = MBB.end();
unsigned Count = 0;
while (I != MBB.begin()) {
@@ -593,7 +593,7 @@ unsigned HexagonInstrInfo::insertBranch(MachineBasicBlock &MBB,
// (ins IntRegs:$src1, IntRegs:$src2, brtarget:$offset)
// (ins IntRegs:$src1, u5Imm:$src2, brtarget:$offset)
unsigned Flags1 = getUndefRegState(Cond[1].isUndef());
- DEBUG(dbgs() << "\nInserting NVJump for BB#" << MBB.getNumber(););
+ DEBUG(dbgs() << "\nInserting NVJump for " << printMBBReference(MBB););
if (Cond[2].isReg()) {
unsigned Flags2 = getUndefRegState(Cond[2].isUndef());
BuildMI(&MBB, DL, get(BccOpc)).addReg(Cond[1].getReg(), Flags1).
@@ -829,9 +829,8 @@ void HexagonInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
#ifndef NDEBUG
// Show the invalid registers to ease debugging.
- dbgs() << "Invalid registers for copy in BB#" << MBB.getNumber()
- << ": " << printReg(DestReg, &HRI)
- << " = " << printReg(SrcReg, &HRI) << '\n';
+ dbgs() << "Invalid registers for copy in " << printMBBReference(MBB) << ": "
+ << printReg(DestReg, &HRI) << " = " << printReg(SrcReg, &HRI) << '\n';
#endif
llvm_unreachable("Unimplemented");
}
@@ -4032,8 +4031,9 @@ void HexagonInstrInfo::immediateExtend(MachineInstr &MI) const {
bool HexagonInstrInfo::invertAndChangeJumpTarget(
MachineInstr &MI, MachineBasicBlock *NewTarget) const {
- DEBUG(dbgs() << "\n[invertAndChangeJumpTarget] to BB#"
- << NewTarget->getNumber(); MI.dump(););
+ DEBUG(dbgs() << "\n[invertAndChangeJumpTarget] to "
+ << printMBBReference(*NewTarget);
+ MI.dump(););
assert(MI.isBranch());
unsigned NewOpcode = getInvertedPredicatedOpcode(MI.getOpcode());
int TargetPos = MI.getNumOperands() - 1;
diff --git a/lib/Target/Hexagon/HexagonMachineScheduler.cpp b/lib/Target/Hexagon/HexagonMachineScheduler.cpp
index 5daceac6496..8765fc98448 100644
--- a/lib/Target/Hexagon/HexagonMachineScheduler.cpp
+++ b/lib/Target/Hexagon/HexagonMachineScheduler.cpp
@@ -186,12 +186,10 @@ bool VLIWResourceModel::reserveResources(SUnit *SU) {
/// after setting up the current scheduling region. [RegionBegin, RegionEnd)
/// only includes instructions that have DAG nodes, not scheduling boundaries.
void VLIWMachineScheduler::schedule() {
- DEBUG(dbgs()
- << "********** MI Converging Scheduling VLIW BB#" << BB->getNumber()
- << " " << BB->getName()
- << " in_func " << BB->getParent()->getFunction()->getName()
- << " at loop depth " << MLI->getLoopDepth(BB)
- << " \n");
+ DEBUG(dbgs() << "********** MI Converging Scheduling VLIW "
+ << printMBBReference(*BB) << " " << BB->getName() << " in_func "
+ << BB->getParent()->getFunction()->getName() << " at loop depth "
+ << MLI->getLoopDepth(BB) << " \n");
buildDAGWithRegPressure();
@@ -237,8 +235,8 @@ void VLIWMachineScheduler::schedule() {
placeDebugValues();
DEBUG({
- unsigned BBNum = begin()->getParent()->getNumber();
- dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";
+ dbgs() << "*** Final schedule for "
+ << printMBBReference(*begin()->getParent()) << " ***\n";
dumpSchedule();
dbgs() << '\n';
});
diff --git a/lib/Target/Hexagon/HexagonOptAddrMode.cpp b/lib/Target/Hexagon/HexagonOptAddrMode.cpp
index f42b6ed9935..d97ed4812f2 100644
--- a/lib/Target/Hexagon/HexagonOptAddrMode.cpp
+++ b/lib/Target/Hexagon/HexagonOptAddrMode.cpp
@@ -461,7 +461,7 @@ bool HexagonOptAddrMode::changeAddAsl(NodeAddr<UseNode *> AddAslUN,
DEBUG(dbgs() << "[InstrNode]: " << Print<NodeAddr<InstrNode *>>(UseIA, *DFG)
<< "\n");
MachineInstr *UseMI = UseIA.Addr->getCode();
- DEBUG(dbgs() << "[MI <BB#" << UseMI->getParent()->getNumber()
+ DEBUG(dbgs() << "[MI <" << printMBBReference(*UseMI->getParent())
<< ">]: " << *UseMI << "\n");
const MCInstrDesc &UseMID = UseMI->getDesc();
assert(HII->getAddrMode(*UseMI) == HexagonII::BaseImmOffset);
@@ -570,7 +570,7 @@ bool HexagonOptAddrMode::processBlock(NodeAddr<BlockNode *> BA) {
NodeAddr<StmtNode *> OwnerN = UseN.Addr->getOwner(*DFG);
MachineInstr *UseMI = OwnerN.Addr->getCode();
- DEBUG(dbgs() << "\t\t[MI <BB#" << UseMI->getParent()->getNumber()
+ DEBUG(dbgs() << "\t\t[MI <" << printMBBReference(*UseMI->getParent())
<< ">]: " << *UseMI << "\n");
int UseMOnum = -1;
diff --git a/lib/Target/Hexagon/HexagonPeephole.cpp b/lib/Target/Hexagon/HexagonPeephole.cpp
index 354bb95e448..7f82a5c4c4d 100644
--- a/lib/Target/Hexagon/HexagonPeephole.cpp
+++ b/lib/Target/Hexagon/HexagonPeephole.cpp
@@ -20,19 +20,18 @@
// ...
// %16<def> = NOT_p %15<kill>
// ...
-// JMP_c %16<kill>, <BB#1>, %pc<imp-def,dead>
+// JMP_c %16<kill>, <%bb.1>, %pc<imp-def,dead>
//
// Into
// %15<def> = CMPGTrr %6, %2;
// ...
-// JMP_cNot %15<kill>, <BB#1>, %pc<imp-def,dead>;
+// JMP_cNot %15<kill>, <%bb.1>, %pc<imp-def,dead>;
//
// Note: The peephole pass makes the instrucstions like
// %170<def> = SXTW %166 or %16<def> = NOT_p %15<kill>
// redundant and relies on some form of dead removal instructions, like
// DCE or DIE to actually eliminate them.
-
//===----------------------------------------------------------------------===//
#include "Hexagon.h"
diff --git a/lib/Target/Hexagon/HexagonSplitDouble.cpp b/lib/Target/Hexagon/HexagonSplitDouble.cpp
index 75d6750322b..68b5ddd4438 100644
--- a/lib/Target/Hexagon/HexagonSplitDouble.cpp
+++ b/lib/Target/Hexagon/HexagonSplitDouble.cpp
@@ -536,7 +536,7 @@ void HexagonSplitDoubleRegs::collectIndRegsForLoop(const MachineLoop *L,
Rs.insert(CmpR2);
DEBUG({
- dbgs() << "For loop at BB#" << HB->getNumber() << " ind regs: ";
+ dbgs() << "For loop at " << printMBBReference(*HB) << " ind regs: ";
dump_partition(dbgs(), Rs, *TRI);
dbgs() << '\n';
});
diff --git a/lib/Target/Hexagon/RDFGraph.cpp b/lib/Target/Hexagon/RDFGraph.cpp
index 50ebcd5302c..8513ebd1c76 100644
--- a/lib/Target/Hexagon/RDFGraph.cpp
+++ b/lib/Target/Hexagon/RDFGraph.cpp
@@ -247,7 +247,7 @@ raw_ostream &operator<< (raw_ostream &OS,
if (T != MI.operands_end()) {
OS << ' ';
if (T->isMBB())
- OS << "BB#" << T->getMBB()->getNumber();
+ OS << printMBBReference(*T->getMBB());
else if (T->isGlobal())
OS << T->getGlobal()->getName();
else if (T->isSymbol())
@@ -284,13 +284,13 @@ raw_ostream &operator<< (raw_ostream &OS,
auto PrintBBs = [&OS] (std::vector<int> Ns) -> void {
unsigned N = Ns.size();
for (int I : Ns) {
- OS << "BB#" << I;
+ OS << "%bb." << I;
if (--N)
OS << ", ";
}
};
- OS << Print<NodeId>(P.Obj.Id, P.G) << ": --- BB#" << BB->getNumber()
+ OS << Print<NodeId>(P.Obj.Id, P.G) << ": --- " << printMBBReference(*BB)
<< " --- preds(" << NP << "): ";
for (MachineBasicBlock *B : BB->predecessors())
Ns.push_back(B->getNumber());
@@ -1123,8 +1123,8 @@ void DataFlowGraph::pushDefs(NodeAddr<InstrNode*> IA, DefStackMap &DefM) {
if (!Defined.insert(RR.Reg).second) {
MachineInstr *MI = NodeAddr<StmtNode*>(IA).Addr->getCode();
dbgs() << "Multiple definitions of register: "
- << Print<RegisterRef>(RR, *this) << " in\n " << *MI
- << "in BB#" << MI->getParent()->getNumber() << '\n';
+ << Print<RegisterRef>(RR, *this) << " in\n " << *MI << "in "
+ << printMBBReference(*MI->getParent()) << '\n';
llvm_unreachable(nullptr);
}
#endif
diff --git a/lib/Target/Hexagon/RDFGraph.h b/lib/Target/Hexagon/RDFGraph.h
index 399b401c5ff..25c4b67230a 100644
--- a/lib/Target/Hexagon/RDFGraph.h
+++ b/lib/Target/Hexagon/RDFGraph.h
@@ -111,7 +111,7 @@
//
// DFG dump:[
// f1: Function foo
-// b2: === BB#0 === preds(0), succs(0):
+// b2: === %bb.0 === preds(0), succs(0):
// p3: phi [d4<r0>(,d12,u9):]
// p5: phi [d6<r1>(,,u10):]
// s7: add [d8<r2>(,,u13):, u9<r0>(d4):, u10<r1>(d6):]
diff --git a/lib/Target/Hexagon/RDFLiveness.cpp b/lib/Target/Hexagon/RDFLiveness.cpp
index 740cd11136b..13d9a174197 100644
--- a/lib/Target/Hexagon/RDFLiveness.cpp
+++ b/lib/Target/Hexagon/RDFLiveness.cpp
@@ -814,7 +814,7 @@ void Liveness::computeLiveIns() {
for (auto I = B.livein_begin(), E = B.livein_end(); I != E; ++I)
LV.push_back(RegisterRef(I->PhysReg, I->LaneMask));
std::sort(LV.begin(), LV.end());
- dbgs() << "BB#" << B.getNumber() << "\t rec = {";
+ dbgs() << printMBBReference(B) << "\t rec = {";
for (auto I : LV)
dbgs() << ' ' << Print<RegisterRef>(I, DFG);
dbgs() << " }\n";
@@ -963,7 +963,7 @@ void Liveness::traverse(MachineBasicBlock *B, RefMap &LiveIn) {
}
if (Trace) {
- dbgs() << "\n-- BB#" << B->getNumber() << ": " << __func__
+ dbgs() << "\n-- " << printMBBReference(*B) << ": " << __func__
<< " after recursion into: {";
for (auto I : *N)
dbgs() << ' ' << I->getBlock()->getNumber();
diff --git a/lib/Target/MSP430/MSP430BranchSelector.cpp b/lib/Target/MSP430/MSP430BranchSelector.cpp
index 424b5ae418f..87c320aa76a 100644
--- a/lib/Target/MSP430/MSP430BranchSelector.cpp
+++ b/lib/Target/MSP430/MSP430BranchSelector.cpp
@@ -138,15 +138,15 @@ bool MSP430BSel::expandBranches(OffsetVector &BlockOffsets) {
continue;
}
- DEBUG(dbgs() << " Found a branch that needs expanding, BB#"
- << DestBB->getNumber() << ", Distance " << BranchDistance
- << "\n");
+ DEBUG(dbgs() << " Found a branch that needs expanding, "
+ << printMBBReference(*DestBB) << ", Distance "
+ << BranchDistance << "\n");
// If JCC is not the last instruction we need to split the MBB.
if (MI->getOpcode() == MSP430::JCC && std::next(MI) != EE) {
- DEBUG(dbgs() << " Found a basic block that needs to be split, BB#"
- << MBB->getNumber() << "\n");
+ DEBUG(dbgs() << " Found a basic block that needs to be split, "
+ << printMBBReference(*MBB) << "\n");
// Create a new basic block.
MachineBasicBlock *NewBB =
diff --git a/lib/Target/Mips/MipsConstantIslandPass.cpp b/lib/Target/Mips/MipsConstantIslandPass.cpp
index 257e8f45a70..4dad98b80ed 100644
--- a/lib/Target/Mips/MipsConstantIslandPass.cpp
+++ b/lib/Target/Mips/MipsConstantIslandPass.cpp
@@ -430,7 +430,7 @@ bool MipsConstantIslands::isOffsetInRange
LLVM_DUMP_METHOD void MipsConstantIslands::dumpBBs() {
for (unsigned J = 0, E = BBInfo.size(); J !=E; ++J) {
const BasicBlockInfo &BBI = BBInfo[J];
- dbgs() << format("%08x BB#%u\t", BBI.Offset, J)
+ dbgs() << format("%08x %bb.%u\t", BBI.Offset, J)
<< format(" size=%#x\n", BBInfo[J].Size);
}
}
@@ -991,11 +991,11 @@ bool MipsConstantIslands::isCPEntryInRange
const BasicBlockInfo &BBI = BBInfo[Block];
dbgs() << "User of CPE#" << CPEMI->getOperand(0).getImm()
<< " max delta=" << MaxDisp
- << format(" insn address=%#x", UserOffset)
- << " in BB#" << Block << ": "
+ << format(" insn address=%#x", UserOffset) << " in "
+ << printMBBReference(*MI->getParent()) << ": "
<< format("%#x-%x\t", BBI.Offset, BBI.postOffset()) << *MI
<< format("CPE address=%#x offset=%+d: ", CPEOffset,
- int(CPEOffset-UserOffset));
+ int(CPEOffset - UserOffset));
});
}
@@ -1197,7 +1197,7 @@ bool MipsConstantIslands::findAvailableWater(CPUser &U, unsigned UserOffset,
// This is the least amount of required padding seen so far.
BestGrowth = Growth;
WaterIter = IP;
- DEBUG(dbgs() << "Found water after BB#" << WaterBB->getNumber()
+ DEBUG(dbgs() << "Found water after " << printMBBReference(*WaterBB)
<< " Growth=" << Growth << '\n');
// Keep looking unless it is perfect.
@@ -1236,8 +1236,8 @@ void MipsConstantIslands::createNewWater(unsigned CPUserIndex,
unsigned CPEOffset = UserBBI.postOffset(CPELogAlign) + Delta;
if (isOffsetInRange(UserOffset, CPEOffset, U)) {
- DEBUG(dbgs() << "Split at end of BB#" << UserMBB->getNumber()
- << format(", expected CPE offset %#x\n", CPEOffset));
+ DEBUG(dbgs() << "Split at end of " << printMBBReference(*UserMBB)
+ << format(", expected CPE offset %#x\n", CPEOffset));
NewMBB = &*++UserMBB->getIterator();
// Add an unconditional branch from UserMBB to fallthrough block. Record
// it for branch lengthening; this new branch will not get out of range,
@@ -1470,11 +1470,11 @@ bool MipsConstantIslands::isBBInRange
unsigned BrOffset = getOffsetOf(MI) + PCAdj;
unsigned DestOffset = BBInfo[DestBB->getNumber()].Offset;
- DEBUG(dbgs() << "Branch of destination BB#" << DestBB->getNumber()
- << " from BB#" << MI->getParent()->getNumber()
- << " max delta=" << MaxDisp
- << " from " << getOffsetOf(MI) << " to " << DestOffset
- << " offset " << int(DestOffset-BrOffset) << "\t" << *MI);
+ DEBUG(dbgs() << "Branch of destination " << printMBBReference(*DestBB)
+ << " from " << printMBBReference(*MI->getParent())
+ << " max delta=" << MaxDisp << " from " << getOffsetOf(MI)
+ << " to " << DestOffset << " offset "
+ << int(DestOffset - BrOffset) << "\t" << *MI);
if (BrOffset <= DestOffset) {
// Branch before the Dest.
@@ -1615,9 +1615,9 @@ MipsConstantIslands::fixupConditionalBr(ImmBranch &Br) {
}
MachineBasicBlock *NextBB = &*++MBB->getIterator();
- DEBUG(dbgs() << " Insert B to BB#" << DestBB->getNumber()
- << " also invert condition and change dest. to BB#"
- << NextBB->getNumber() << "\n");
+ DEBUG(dbgs() << " Insert B to " << printMBBReference(*DestBB)
+ << " also invert condition and change dest. to "
+ << printMBBReference(*NextBB) << "\n");
// Insert a new conditional branch and a new unconditional branch.
// Also update the ImmBranch as well as adding a new entry for the new branch.
diff --git a/lib/Target/PowerPC/PPCBranchCoalescing.cpp b/lib/Target/PowerPC/PPCBranchCoalescing.cpp
index 4c101f58601..cd078972307 100644
--- a/lib/Target/PowerPC/PPCBranchCoalescing.cpp
+++ b/lib/Target/PowerPC/PPCBranchCoalescing.cpp
@@ -59,45 +59,45 @@ namespace llvm {
///
/// expands to the following machine code:
///
-/// BB#0: derived from LLVM BB %entry
+/// %bb.0: derived from LLVM BB %entry
/// Live Ins: %f1 %f3 %x6
/// <SNIP1>
/// %0<def> = COPY %f1; F8RC:%0
/// %5<def> = CMPLWI %4<kill>, 0; CRRC:%5 GPRC:%4
/// %8<def> = LXSDX %zero8, %7<kill>, %rm<imp-use>;
/// mem:LD8[ConstantPool] F8RC:%8 G8RC:%7
-/// BCC 76, %5, <BB#2>; CRRC:%5
-/// Successors according to CFG: BB#1(?%) BB#2(?%)
+/// BCC 76, %5, <%bb.2>; CRRC:%5
+/// Successors according to CFG: %bb.1(?%) %bb.2(?%)
///
-/// BB#1: derived from LLVM BB %entry
-/// Predecessors according to CFG: BB#0
-/// Successors according to CFG: BB#2(?%)
+/// %bb.1: derived from LLVM BB %entry
+/// Predecessors according to CFG: %bb.0
+/// Successors according to CFG: %bb.2(?%)
///
-/// BB#2: derived from LLVM BB %entry
-/// Predecessors according to CFG: BB#0 BB#1
-/// %9<def> = PHI %8, <BB#1>, %0, <BB#0>;
+/// %bb.2: derived from LLVM BB %entry
+/// Predecessors according to CFG: %bb.0 %bb.1
+/// %9<def> = PHI %8, <%bb.1>, %0, <%bb.0>;
/// F8RC:%9,%8,%0
/// <SNIP2>
-/// BCC 76, %5, <BB#4>; CRRC:%5
-/// Successors according to CFG: BB#3(?%) BB#4(?%)
+/// BCC 76, %5, <%bb.4>; CRRC:%5
+/// Successors according to CFG: %bb.3(?%) %bb.4(?%)
///
-/// BB#3: derived from LLVM BB %entry
-/// Predecessors according to CFG: BB#2
-/// Successors according to CFG: BB#4(?%)
+/// %bb.3: derived from LLVM BB %entry
+/// Predecessors according to CFG: %bb.2
+/// Successors according to CFG: %bb.4(?%)
///
-/// BB#4: derived from LLVM BB %entry
-/// Predecessors according to CFG: BB#2 BB#3
-/// %13<def> = PHI %12, <BB#3>, %2, <BB#2>;
+/// %bb.4: derived from LLVM BB %entry
+/// Predecessors according to CFG: %bb.2 %bb.3
+/// %13<def> = PHI %12, <%bb.3>, %2, <%bb.2>;
/// F8RC:%13,%12,%2
/// <SNIP3>
/// BLR8 %lr8<imp-use>, %rm<imp-use>, %f1<imp-use>
///
/// When this pattern is detected, branch coalescing will try to collapse
-/// it by moving code in BB#2 to BB#0 and/or BB#4 and removing BB#3.
+/// it by moving code in %bb.2 to %bb.0 and/or %bb.4 and removing %bb.3.
///
/// If all conditions are meet, IR should collapse to:
///
-/// BB#0: derived from LLVM BB %entry
+/// %bb.0: derived from LLVM BB %entry
/// Live Ins: %f1 %f3 %x6
/// <SNIP1>
/// %0<def> = COPY %f1; F8RC:%0
@@ -105,19 +105,19 @@ namespace llvm {
/// %8<def> = LXSDX %zero8, %7<kill>, %rm<imp-use>;
/// mem:LD8[ConstantPool] F8RC:%8 G8RC:%7
/// <SNIP2>
-/// BCC 76, %5, <BB#4>; CRRC:%5
-/// Successors according to CFG: BB#1(0x2aaaaaaa / 0x80000000 = 33.33%)
-/// BB#4(0x55555554 / 0x80000000 = 66.67%)
+/// BCC 76, %5, <%bb.4>; CRRC:%5
+/// Successors according to CFG: %bb.1(0x2aaaaaaa / 0x80000000 = 33.33%)
+/// %bb.4(0x55555554 / 0x80000000 = 66.67%)
///
-/// BB#1: derived from LLVM BB %entry
-/// Predecessors according to CFG: BB#0
-/// Successors according to CFG: BB#4(0x40000000 / 0x80000000 = 50.00%)
+/// %bb.1: derived from LLVM BB %entry
+/// Predecessors according to CFG: %bb.0
+/// Successors according to CFG: %bb.4(0x40000000 / 0x80000000 = 50.00%)
///
-/// BB#4: derived from LLVM BB %entry
-/// Predecessors according to CFG: BB#0 BB#1
-/// %9<def> = PHI %8, <BB#1>, %0, <BB#0>;
+/// %bb.4: derived from LLVM BB %entry
+/// Predecessors according to CFG: %bb.0 %bb.1
+/// %9<def> = PHI %8, <%bb.1>, %0, <%bb.0>;
/// F8RC:%9,%8,%0
-/// %13<def> = PHI %12, <BB#1>, %2, <BB#0>;
+/// %13<def> = PHI %12, <%bb.1>, %2, <%bb.0>;
/// F8RC:%13,%12,%2
/// <SNIP3>
/// BLR8 %lr8<imp-use>, %rm<imp-use>, %f1<imp-use>
diff --git a/lib/Target/PowerPC/PPCCTRLoops.cpp b/lib/Target/PowerPC/PPCCTRLoops.cpp
index 8784a831902..fc638829378 100644
--- a/lib/Target/PowerPC/PPCCTRLoops.cpp
+++ b/lib/Target/PowerPC/PPCCTRLoops.cpp
@@ -690,12 +690,11 @@ check_block:
}
if (I != BI && clobbersCTR(*I)) {
- DEBUG(dbgs() << "BB#" << MBB->getNumber() << " (" <<
- MBB->getFullName() << ") instruction " << *I <<
- " clobbers CTR, invalidating " << "BB#" <<
- BI->getParent()->getNumber() << " (" <<
- BI->getParent()->getFullName() << ") instruction " <<
- *BI << "\n");
+ DEBUG(dbgs() << printMBBReference(*MBB) << " (" << MBB->getFullName()
+ << ") instruction " << *I << " clobbers CTR, invalidating "
+ << printMBBReference(*BI->getParent()) << " ("
+ << BI->getParent()->getFullName() << ") instruction " << *BI
+ << "\n");
return false;
}
@@ -709,10 +708,10 @@ check_block:
if (CheckPreds) {
queue_preds:
if (MachineFunction::iterator(MBB) == MBB->getParent()->begin()) {
- DEBUG(dbgs() << "Unable to find a MTCTR instruction for BB#" <<
- BI->getParent()->getNumber() << " (" <<
- BI->getParent()->getFullName() << ") instruction " <<
- *BI << "\n");
+ DEBUG(dbgs() << "Unable to find a MTCTR instruction for "
+ << printMBBReference(*BI->getParent()) << " ("
+ << BI->getParent()->getFullName() << ") instruction " << *BI
+ << "\n");
return false;
}
diff --git a/lib/Target/PowerPC/PPCExpandISEL.cpp b/lib/Target/PowerPC/PPCExpandISEL.cpp
index 41e3190c3ee..dfd2b9bfd05 100644
--- a/lib/Target/PowerPC/PPCExpandISEL.cpp
+++ b/lib/Target/PowerPC/PPCExpandISEL.cpp
@@ -171,7 +171,7 @@ bool PPCExpandISEL::collectISELInstructions() {
#ifndef NDEBUG
void PPCExpandISEL::DumpISELInstructions() const {
for (const auto &I : ISELInstructions) {
- DEBUG(dbgs() << "BB#" << I.first << ":\n");
+ DEBUG(dbgs() << printMBBReference(*MF->getBlockNumbered(I.first)) << ":\n");
for (const auto &VI : I.second)
DEBUG(dbgs() << " "; VI->print(dbgs()));
}
@@ -191,7 +191,11 @@ bool PPCExpandISEL::canMerge(MachineInstr *PrevPushedMI, MachineInstr *MI) {
void PPCExpandISEL::expandAndMergeISELs() {
for (auto &BlockList : ISELInstructions) {
- DEBUG(dbgs() << "Expanding ISEL instructions in BB#" << BlockList.first
+
+ DEBUG(dbgs() << printMBBReference(*MF->getBlockNumbered(BlockList.first))
+ << ":\n");
+ DEBUG(dbgs() << "Expanding ISEL instructions in "
+ << printMBBReference(*MF->getBlockNumbered(BlockList.first))
<< "\n");
BlockISELList &CurrentISELList = BlockList.second;
diff --git a/lib/Target/PowerPC/PPCMIPeephole.cpp b/lib/Target/PowerPC/PPCMIPeephole.cpp
index 1ac7afe2cdc..c6fcea7c956 100644
--- a/lib/Target/PowerPC/PPCMIPeephole.cpp
+++ b/lib/Target/PowerPC/PPCMIPeephole.cpp
@@ -686,7 +686,7 @@ bool PPCMIPeephole::simplifyCode(void) {
DEBUG(LiMI->dump());
// There could be repeated registers in the PHI, e.g: %1<def> =
- // PHI %6, <BB#2>, %8, <BB#3>, %8, <BB#6>; So if we've
+ // PHI %6, <%bb.2>, %8, <%bb.3>, %8, <%bb.6>; So if we've
// already replaced the def instruction, skip.
if (LiMI->getOpcode() == PPC::ADDI || LiMI->getOpcode() == PPC::ADDI8)
continue;
@@ -1209,8 +1209,9 @@ bool PPCMIPeephole::eliminateRedundantCompare(void) {
DEBUG(BI1->dump());
DEBUG(BI2->dump());
if (IsPartiallyRedundant) {
- DEBUG(dbgs() << "The following compare is moved into BB#" <<
- MBBtoMoveCmp->getNumber() << " to handle partial redundancy.\n");
+ DEBUG(dbgs() << "The following compare is moved into "
+ << printMBBReference(*MBBtoMoveCmp)
+ << " to handle partial redundancy.\n");
DEBUG(CMPI2->dump());
}
diff --git a/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp b/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp
index c51368d6d2a..0320ecaf853 100644
--- a/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp
+++ b/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp
@@ -966,7 +966,7 @@ LLVM_DUMP_METHOD void PPCVSXSwapRemoval::dumpSwapVector() {
dbgs() << format("%6d", ID);
dbgs() << format("%6d", EC->getLeaderValue(ID));
- dbgs() << format(" BB#%3d", MI->getParent()->getNumber());
+ dbgs() << format(" %bb.%3d", MI->getParent()->getNumber());
dbgs() << format(" %14s ", TII->getName(MI->getOpcode()).str().c_str());
if (SwapVector[EntryIdx].IsLoad)
diff --git a/lib/Target/PowerPC/README.txt b/lib/Target/PowerPC/README.txt
index bc09d5f8a7e..b4bf635dc2c 100644
--- a/lib/Target/PowerPC/README.txt
+++ b/lib/Target/PowerPC/README.txt
@@ -256,7 +256,7 @@ _clamp0g:
cmpwi cr0, r3, 0
li r2, 0
blt cr0, LBB1_2
-; BB#1: ; %entry
+; %bb.1: ; %entry
mr r2, r3
LBB1_2: ; %entry
mr r3, r2
diff --git a/lib/Target/PowerPC/README_ALTIVEC.txt b/lib/Target/PowerPC/README_ALTIVEC.txt
index f70ebd82bd5..c38e0192316 100644
--- a/lib/Target/PowerPC/README_ALTIVEC.txt
+++ b/lib/Target/PowerPC/README_ALTIVEC.txt
@@ -233,7 +233,7 @@ declare <16 x i8> @llvm.ppc.altivec.crypto.vpmsumb(<16 x i8>, <16 x i8>) #1
Produces the following code with -mtriple=powerpc64-unknown-linux-gnu:
-# BB#0: # %entry
+# %bb.0: # %entry
addis 3, 2, .LCPI0_0@toc@ha
addis 4, 2, .LCPI0_1@toc@ha
addi 3, 3, .LCPI0_0@toc@l
diff --git a/lib/Target/README.txt b/lib/Target/README.txt
index f0fd323bb58..563aee9e1a7 100644
--- a/lib/Target/README.txt
+++ b/lib/Target/README.txt
@@ -1778,7 +1778,7 @@ We do get this at the codegen level, so something knows about it, but
instcombine should catch it earlier:
_foo: ## @foo
-## BB#0: ## %entry
+## %bb.0: ## %entry
movl %edi, %eax
sarl $4, %eax
ret
@@ -2234,13 +2234,13 @@ void foo(funcs f, int which) {
which we compile to:
foo: # @foo
-# BB#0: # %entry
+# %bb.0: # %entry
pushq %rbp
movq %rsp, %rbp
testl %esi, %esi
movq %rdi, %rax
je .LBB0_2
-# BB#1: # %if.then
+# %bb.1: # %if.then
movl $5, %edi
callq *%rax
popq %rbp
diff --git a/lib/Target/SystemZ/SystemZMachineScheduler.cpp b/lib/Target/SystemZ/SystemZMachineScheduler.cpp
index 4b0f9256763..08eb73fc362 100644
--- a/lib/Target/SystemZ/SystemZMachineScheduler.cpp
+++ b/lib/Target/SystemZ/SystemZMachineScheduler.cpp
@@ -74,7 +74,7 @@ advanceTo(MachineBasicBlock::iterator NextBegin) {
void SystemZPostRASchedStrategy::enterMBB(MachineBasicBlock *NextMBB) {
assert ((SchedStates.find(NextMBB) == SchedStates.end()) &&
"Entering MBB twice?");
- DEBUG (dbgs() << "+++ Entering MBB#" << NextMBB->getNumber());
+ DEBUG(dbgs() << "+++ Entering " << printMBBReference(*NextMBB));
MBB = NextMBB;
/// Create a HazardRec for MBB, save it in SchedStates and set HazardRec to
@@ -93,8 +93,8 @@ void SystemZPostRASchedStrategy::enterMBB(MachineBasicBlock *NextMBB) {
SchedStates.find(SinglePredMBB) == SchedStates.end())
return;
- DEBUG (dbgs() << "+++ Continued scheduling from MBB#"
- << SinglePredMBB->getNumber() << "\n";);
+ DEBUG(dbgs() << "+++ Continued scheduling from "
+ << printMBBReference(*SinglePredMBB) << "\n";);
HazardRec->copyState(SchedStates[SinglePredMBB]);
@@ -113,7 +113,7 @@ void SystemZPostRASchedStrategy::enterMBB(MachineBasicBlock *NextMBB) {
}
void SystemZPostRASchedStrategy::leaveMBB() {
- DEBUG (dbgs() << "+++ Leaving MBB#" << MBB->getNumber() << "\n";);
+ DEBUG(dbgs() << "+++ Leaving " << printMBBReference(*MBB) << "\n";);
// Advance to first terminator. The successor block will handle terminators
// dependent on CFG layout (T/NT branch etc).
diff --git a/lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp b/lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp
index 41f315c2825..88daea7e368 100644
--- a/lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp
@@ -205,8 +205,7 @@ bool WebAssemblyFixIrreducibleControlFlow::VisitLoop(MachineFunction &MF,
continue;
unsigned Index = MIB.getInstr()->getNumExplicitOperands() - 1;
- DEBUG(dbgs() << "MBB#" << MBB->getNumber() << " has index " << Index
- << "\n");
+ DEBUG(dbgs() << printMBBReference(*MBB) << " has index " << Index << "\n");
Pair.first->second = Index;
for (auto Pred : MBB->predecessors())
diff --git a/lib/Target/X86/README.txt b/lib/Target/X86/README.txt
index 799157c926e..11652af9f1f 100644
--- a/lib/Target/X86/README.txt
+++ b/lib/Target/X86/README.txt
@@ -987,11 +987,11 @@ bb7: ; preds = %entry
to:
foo: # @foo
-# BB#0: # %entry
+# %bb.0: # %entry
movl 4(%esp), %ecx
cmpb $0, 16(%esp)
je .LBB0_2
-# BB#1: # %bb
+# %bb.1: # %bb
movl 8(%esp), %eax
addl %ecx, %eax
ret
@@ -1073,7 +1073,7 @@ declare void @exit(i32) noreturn nounwind
This compiles into:
_abort_gzip: ## @abort_gzip
-## BB#0: ## %entry
+## %bb.0: ## %entry
subl $12, %esp
movb _in_exit.4870.b, %al
cmpb $1, %al
@@ -1396,7 +1396,7 @@ define i32 @bar(%struct.B* nocapture %a) nounwind readonly optsize {
}
bar: # @bar
-# BB#0:
+# %bb.0:
movb (%rdi), %al
andb $1, %al
movzbl %al, %eax
@@ -1633,7 +1633,7 @@ In the real code, we get a lot more wrong than this. However, even in this
code we generate:
_foo: ## @foo
-## BB#0: ## %entry
+## %bb.0: ## %entry
movb (%rsi), %al
movb (%rdi), %cl
cmpb %al, %cl
@@ -1646,12 +1646,12 @@ LBB0_2: ## %if.end
movb 1(%rdi), %cl
cmpb %al, %cl
jne LBB0_1
-## BB#3: ## %if.end38
+## %bb.3: ## %if.end38
movb 2(%rsi), %al
movb 2(%rdi), %cl
cmpb %al, %cl
jne LBB0_1
-## BB#4: ## %if.end60
+## %bb.4: ## %if.end60
movb 3(%rdi), %al
cmpb 3(%rsi), %al
LBB0_5: ## %if.end60
diff --git a/lib/Target/X86/X86FixupBWInsts.cpp b/lib/Target/X86/X86FixupBWInsts.cpp
index ce559323efc..2e39cb0d797 100644
--- a/lib/Target/X86/X86FixupBWInsts.cpp
+++ b/lib/Target/X86/X86FixupBWInsts.cpp
@@ -188,16 +188,17 @@ bool FixupBWInstPass::runOnMachineFunction(MachineFunction &MF) {
/// necessary (e.g. due to register coalescing with a "truncate" copy).
/// So, it handles pattern like this:
///
-/// BB#2: derived from LLVM BB %if.then
+/// %bb.2: derived from LLVM BB %if.then
/// Live Ins: %rdi
-/// Predecessors according to CFG: BB#0
-/// %ax<def> = MOV16rm %rdi<kill>, 1, %noreg, 0, %noreg, %eax<imp-def>; mem:LD2[%p]
+/// Predecessors according to CFG: %bb.0
+/// %ax<def> = MOV16rm %rdi<kill>, 1, %noreg, 0, %noreg, %eax<imp-def>;
+/// mem:LD2[%p]
/// No %eax<imp-use>
-/// Successors according to CFG: BB#3(?%)
+/// Successors according to CFG: %bb.3(?%)
///
-/// BB#3: derived from LLVM BB %if.end
+/// %bb.3: derived from LLVM BB %if.end
/// Live Ins: %eax Only %ax is actually live
-/// Predecessors according to CFG: BB#2 BB#1
+/// Predecessors according to CFG: %bb.2 %bb.1
/// %ax<def> = KILL %ax, %eax<imp-use,kill>
/// RET 0, %ax
static bool isLive(const MachineInstr &MI,
diff --git a/lib/Target/X86/X86FloatingPoint.cpp b/lib/Target/X86/X86FloatingPoint.cpp
index 6db02f0bd05..b73a08846e9 100644
--- a/lib/Target/X86/X86FloatingPoint.cpp
+++ b/lib/Target/X86/X86FloatingPoint.cpp
@@ -499,7 +499,7 @@ bool FPS::processBasicBlock(MachineFunction &MF, MachineBasicBlock &BB) {
/// setupBlockStack - Use the live bundles to set up our model of the stack
/// to match predecessors' live out stack.
void FPS::setupBlockStack() {
- DEBUG(dbgs() << "\nSetting up live-ins for BB#" << MBB->getNumber()
+ DEBUG(dbgs() << "\nSetting up live-ins for " << printMBBReference(*MBB)
<< " derived from " << MBB->getName() << ".\n");
StackTop = 0;
// Get the live-in bundle for MBB.
@@ -538,7 +538,7 @@ void FPS::finishBlockStack() {
if (MBB->succ_empty())
return;
- DEBUG(dbgs() << "Setting up live-outs for BB#" << MBB->getNumber()
+ DEBUG(dbgs() << "Setting up live-outs for " << printMBBReference(*MBB)
<< " derived from " << MBB->getName() << ".\n");
// Get MBB's live-out bundle.
diff --git a/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll b/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
index 7362bd817cc..e7868327975 100644
--- a/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
+++ b/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
@@ -58,19 +58,19 @@ define void @allocai64() {
; CHECK: body:
;
; ABI/constant lowering and IR-level entry basic block.
-; CHECK: {{bb.[0-9]+}}.entry:
+; CHECK: bb.{{[0-9]+}}.{{[a-zA-Z0-9.]+}}:
;
; Make sure we have one successor and only one.
-; CHECK-NEXT: successors: %[[BB2:bb.[0-9]+.bb2]](0x80000000)
+; CHECK-NEXT: successors: %[[BB2:bb.[0-9]+]](0x80000000)
;
; Check that we emit the correct branch.
; CHECK: G_BR %[[BB2]]
;
; Check that end contains the return instruction.
-; CHECK: [[END:bb.[0-9]+.end]]:
+; CHECK: [[END:bb.[0-9]+]].{{[a-zA-Z0-9.]+}}:
; CHECK-NEXT: RET_ReallyLR
;
-; CHECK: {{bb.[0-9]+}}.bb2:
+; CHECK: bb.{{[0-9]+}}.{{[a-zA-Z0-9.]+}}:
; CHECK-NEXT: successors: %[[END]](0x80000000)
; CHECK: G_BR %[[END]]
define void @uncondbr() {
@@ -84,11 +84,11 @@ bb2:
; CHECK-LABEL: name: uncondbr_fallthrough
; CHECK: body:
-; CHECK: {{bb.[0-9]+}}.entry:
-; CHECK-NEXT: successors: %[[END:bb.[0-9]+.end]](0x80000000)
+; CHECK: bb.{{[0-9]+}}.{{[a-zA-Z0-9.]+}}:
+; CHECK-NEXT: successors: %[[END:bb.[0-9]+]](0x80000000)
; We don't emit a branch here, as we can fallthrough to the successor.
; CHECK-NOT: G_BR
-; CHECK: [[END]]:
+; CHECK: [[END]].{{[a-zA-Z0-9.]+}}:
; CHECK-NEXT: RET_ReallyLR
define void @uncondbr_fallthrough() {
entry:
@@ -102,10 +102,10 @@ end:
; CHECK: body:
;
; ABI/constant lowering and IR-level entry basic block.
-; CHECK: {{bb.[0-9]+}} (%ir-block.{{[0-9]+}}):
+; CHECK: bb.{{[0-9]+}} (%ir-block.{{[0-9]+}}):
; Make sure we have two successors
-; CHECK-NEXT: successors: %[[TRUE:bb.[0-9]+.true]](0x40000000),
-; CHECK: %[[FALSE:bb.[0-9]+.false]](0x40000000)
+; CHECK-NEXT: successors: %[[TRUE:bb.[0-9]+]](0x40000000),
+; CHECK: %[[FALSE:bb.[0-9]+]](0x40000000)
;
; CHECK: [[ADDR:%.*]]:_(p0) = COPY %x0
;
@@ -115,9 +115,9 @@ end:
; CHECK: G_BR %[[FALSE]]
;
; Check that each successor contains the return instruction.
-; CHECK: [[TRUE]]:
+; CHECK: [[TRUE]].{{[a-zA-Z0-9.]+}}:
; CHECK-NEXT: RET_ReallyLR
-; CHECK: [[FALSE]]:
+; CHECK: [[FALSE]].{{[a-zA-Z0-9.]+}}:
; CHECK-NEXT: RET_ReallyLR
define void @condbr(i1* %tstaddr) {
%tst = load i1, i1* %tstaddr
@@ -133,8 +133,8 @@ false:
; CHECK-LABEL: name: switch
; CHECK: body:
;
-; CHECK: {{bb.[0-9]+.entry}}:
-; CHECK-NEXT: successors: %[[BB_CASE100:bb.[0-9]+.case100]](0x40000000), %[[BB_NOTCASE100_CHECKNEXT:bb.[0-9]+.entry]](0x40000000)
+; CHECK: bb.{{[a-zA-Z0-9.]+}}:
+; CHECK-NEXT: successors: %[[BB_CASE100:bb.[0-9]+]](0x40000000), %[[BB_NOTCASE100_CHECKNEXT:bb.[0-9]+]](0x40000000)
; CHECK: %0:_(s32) = COPY %w0
; CHECK: %[[reg100:[0-9]+]]:_(s32) = G_CONSTANT i32 100
; CHECK: %[[reg200:[0-9]+]]:_(s32) = G_CONSTANT i32 200
@@ -145,31 +145,31 @@ false:
; CHECK: G_BRCOND %[[regicmp100]](s1), %[[BB_CASE100]]
; CHECK: G_BR %[[BB_NOTCASE100_CHECKNEXT]]
;
-; CHECK: [[BB_NOTCASE100_CHECKNEXT]]:
-; CHECK-NEXT: successors: %[[BB_CASE200:bb.[0-9]+.case200]](0x40000000), %[[BB_NOTCASE200_CHECKNEXT:bb.[0-9]+.entry]](0x40000000)
+; CHECK: [[BB_NOTCASE100_CHECKNEXT]].{{[a-zA-Z0-9.]+}}:
+; CHECK-NEXT: successors: %[[BB_CASE200:bb.[0-9]+]](0x40000000), %[[BB_NOTCASE200_CHECKNEXT:bb.[0-9]+]](0x40000000)
; CHECK: %[[regicmp200:[0-9]+]]:_(s1) = G_ICMP intpred(eq), %[[reg200]](s32), %0
; CHECK: G_BRCOND %[[regicmp200]](s1), %[[BB_CASE200]]
; CHECK: G_BR %[[BB_NOTCASE200_CHECKNEXT]]
;
-; CHECK: [[BB_NOTCASE200_CHECKNEXT]]:
-; CHECK-NEXT: successors: %[[BB_DEFAULT:bb.[0-9]+.default]](0x80000000)
+; CHECK: [[BB_NOTCASE200_CHECKNEXT]].{{[a-zA-Z0-9.]+}}:
+; CHECK-NEXT: successors: %[[BB_DEFAULT:bb.[0-9]+]](0x80000000)
; CHECK: G_BR %[[BB_DEFAULT]]
;
-; CHECK: [[BB_DEFAULT]]:
-; CHECK-NEXT: successors: %[[BB_RET:bb.[0-9]+.return]](0x80000000)
+; CHECK: [[BB_DEFAULT]].{{[a-zA-Z0-9.]+}}:
+; CHECK-NEXT: successors: %[[BB_RET:bb.[0-9]+]](0x80000000)
; CHECK: %[[regretdefault:[0-9]+]]:_(s32) = G_ADD %0, %[[reg0]]
; CHECK: G_BR %[[BB_RET]]
;
-; CHECK: [[BB_CASE100]]:
-; CHECK-NEXT: successors: %[[BB_RET:bb.[0-9]+.return]](0x80000000)
+; CHECK: [[BB_CASE100]].{{[a-zA-Z0-9.]+}}:
+; CHECK-NEXT: successors: %[[BB_RET:bb.[0-9]+]](0x80000000)
; CHECK: %[[regretc100:[0-9]+]]:_(s32) = G_ADD %0, %[[reg1]]
; CHECK: G_BR %[[BB_RET]]
;
-; CHECK: [[BB_CASE200]]:
+; CHECK: [[BB_CASE200]].{{[a-zA-Z0-9.]+}}:
; CHECK-NEXT: successors: %[[BB_RET]](0x80000000)
; CHECK: %[[regretc200:[0-9]+]]:_(s32) = G_ADD %0, %[[reg2]]
;
-; CHECK: [[BB_RET]]:
+; CHECK: [[BB_RET]].{{[a-zA-Z0-9.]+}}:
; CHECK-NEXT: %[[regret:[0-9]+]]:_(s32) = G_PHI %[[regretdefault]](s32), %[[BB_DEFAULT]], %[[regretc100]](s32), %[[BB_CASE100]]
; CHECK: %w0 = COPY %[[regret]](s32)
; CHECK: RET_ReallyLR implicit %w0
@@ -202,16 +202,16 @@ return:
; %entry block is no longer a predecessor for the phi instruction. We need to
; use the correct lowered MachineBasicBlock instead.
; CHECK-LABEL: name: test_cfg_remap
-; CHECK: {{bb.[0-9]+.entry}}:
-; CHECK-NEXT: successors: %{{bb.[0-9]+.next}}(0x40000000), %[[NOTCASE1_BLOCK:bb.[0-9]+.entry]](0x40000000)
-; CHECK: [[NOTCASE1_BLOCK]]:
-; CHECK-NEXT: successors: %{{bb.[0-9]+.other}}(0x40000000), %[[NOTCASE57_BLOCK:bb.[0-9]+.entry]](0x40000000)
-; CHECK: [[NOTCASE57_BLOCK]]:
-; CHECK-NEXT: successors: %[[PHI_BLOCK:bb.[0-9]+.phi.block]](0x80000000)
+; CHECK: bb.{{[0-9]+.[a-zA-Z0-9.]+}}:
+; CHECK-NEXT: successors: %{{bb.[0-9]+}}(0x40000000), %[[NOTCASE1_BLOCK:bb.[0-9]+]](0x40000000)
+; CHECK: [[NOTCASE1_BLOCK]].{{[a-zA-Z0-9.]+}}:
+; CHECK-NEXT: successors: %{{bb.[0-9]+}}(0x40000000), %[[NOTCASE57_BLOCK:bb.[0-9]+]](0x40000000)
+; CHECK: [[NOTCASE57_BLOCK]].{{[a-zA-Z0-9.]+}}:
+; CHECK-NEXT: successors: %[[PHI_BLOCK:bb.[0-9]+]](0x80000000)
; CHECK: G_BR %[[PHI_BLOCK]]
;
-; CHECK: [[PHI_BLOCK]]:
-; CHECK-NEXT: G_PHI %{{.*}}(s32), %[[NOTCASE57_BLOCK:bb.[0-9]+.entry]], %{{.*}}(s32),
+; CHECK: [[PHI_BLOCK]].{{[a-zA-Z0-9.]+}}:
+; CHECK-NEXT: G_PHI %{{.*}}(s32), %[[NOTCASE57_BLOCK:bb.[0-9]+]], %{{.*}}(s32),
;
define i32 @test_cfg_remap(i32 %in) {
entry:
@@ -230,7 +230,7 @@ phi.block:
}
; CHECK-LABEL: name: test_cfg_remap_multiple_preds
-; CHECK: G_PHI [[ENTRY:%.*]](s32), %bb.{{[0-9]+}}.entry, [[ENTRY]](s32), %bb.{{[0-9]+}}.entry
+; CHECK: G_PHI [[ENTRY:%.*]](s32), %bb.{{[0-9]+}}, [[ENTRY]](s32), %bb.{{[0-9]+}}
define i32 @test_cfg_remap_multiple_preds(i32 %in) {
entry:
switch i32 %in, label %odd [i32 1, label %next
@@ -256,19 +256,19 @@ phi.block:
; CHECK: body:
;
; ABI/constant lowering and IR-level entry basic block.
-; CHECK: {{bb.[0-9]+.entry}}:
+; CHECK: bb.{{[0-9]+.[a-zA-Z0-9.]+}}:
; Make sure we have one successor
-; CHECK-NEXT: successors: %[[BB_L1:bb.[0-9]+.L1]](0x80000000)
+; CHECK-NEXT: successors: %[[BB_L1:bb.[0-9]+]](0x80000000)
; CHECK-NOT: G_BR
;
; Check basic block L1 has 2 successors: BBL1 and BBL2
-; CHECK: [[BB_L1]] (address-taken):
+; CHECK: [[BB_L1]].{{[a-zA-Z0-9.]+}} (address-taken):
; CHECK-NEXT: successors: %[[BB_L1]](0x40000000),
-; CHECK: %[[BB_L2:bb.[0-9]+.L2]](0x40000000)
+; CHECK: %[[BB_L2:bb.[0-9]+]](0x40000000)
; CHECK: G_BRINDIRECT %{{[0-9]+}}(p0)
;
; Check basic block L2 is the return basic block
-; CHECK: [[BB_L2]] (address-taken):
+; CHECK: [[BB_L2]].{{[a-zA-Z0-9.]+}} (address-taken):
; CHECK-NEXT: RET_ReallyLR
@indirectbr.L = internal unnamed_addr constant [3 x i8*] [i8* blockaddress(@indirectbr, %L1), i8* blockaddress(@indirectbr, %L2), i8* null], align 8
@@ -410,11 +410,11 @@ define i64* @trivial_bitcast(i8* %a) {
; CHECK-LABEL: name: trivial_bitcast_with_copy
; CHECK: [[A:%[0-9]+]]:_(p0) = COPY %x0
-; CHECK: G_BR %[[CAST:bb\.[0-9]+.cast]]
+; CHECK: G_BR %[[CAST:bb\.[0-9]+]]
-; CHECK: [[END:bb\.[0-9]+.end]]:
+; CHECK: [[END:bb\.[0-9]+]].{{[a-zA-Z0-9.]+}}:
-; CHECK: [[CAST]]:
+; CHECK: [[CAST]].{{[a-zA-Z0-9.]+}}:
; CHECK: {{%[0-9]+}}:_(p0) = COPY [[A]]
; CHECK: G_BR %[[END]]
define i64* @trivial_bitcast_with_copy(i8* %a) {
@@ -512,13 +512,13 @@ define void @intrinsics(i32 %cur, i32 %bits) {
}
; CHECK-LABEL: name: test_phi
-; CHECK: G_BRCOND {{%.*}}, %[[TRUE:bb\.[0-9]+.true]]
-; CHECK: G_BR %[[FALSE:bb\.[0-9]+.false]]
+; CHECK: G_BRCOND {{%.*}}, %[[TRUE:bb\.[0-9]+]]
+; CHECK: G_BR %[[FALSE:bb\.[0-9]+]]
-; CHECK: [[TRUE]]:
+; CHECK: [[TRUE]].{{[a-zA-Z0-9.]+}}:
; CHECK: [[RES1:%[0-9]+]]:_(s32) = G_LOAD
-; CHECK: [[FALSE]]:
+; CHECK: [[FALSE]].{{[a-zA-Z0-9.]+}}:
; CHECK: [[RES2:%[0-9]+]]:_(s32) = G_LOAD
; CHECK: [[RES:%[0-9]+]]:_(s32) = G_PHI [[RES1]](s32), %[[TRUE]], [[RES2]](s32), %[[FALSE]]
@@ -554,7 +554,7 @@ define void @unreachable(i32 %a) {
; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY %w0
; CHECK: [[ONE:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-; CHECK: {{bb.[0-9]+}}.next:
+; CHECK: bb.{{[0-9]+}}.{{[a-zA-Z0-9.]+}}:
; CHECK: [[SUM1:%[0-9]+]]:_(s32) = G_ADD [[IN]], [[ONE]]
; CHECK: [[SUM2:%[0-9]+]]:_(s32) = G_ADD [[IN]], [[ONE]]
; CHECK: [[RES:%[0-9]+]]:_(s32) = G_ADD [[SUM1]], [[SUM2]]
@@ -1226,7 +1226,7 @@ define i8* @test_const_placement() {
; CHECK: bb.{{[0-9]+}} (%ir-block.{{[0-9]+}}):
; CHECK: [[VAL_INT:%[0-9]+]]:_(s32) = G_CONSTANT i32 42
; CHECK: [[VAL:%[0-9]+]]:_(p0) = G_INTTOPTR [[VAL_INT]](s32)
-; CHECK: {{bb.[0-9]+}}.next:
+; CHECK: bb.{{[0-9]+}}.{{[a-zA-Z0-9.]+}}:
br label %next
next:
diff --git a/test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll b/test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll
index 0e7fbd32c6f..827fdd26108 100644
--- a/test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll
+++ b/test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll
@@ -9,7 +9,7 @@ declare i32 @llvm.eh.typeid.for(i8*)
; CHECK-LABEL: name: bar
; CHECK: body:
; CHECK-NEXT: bb.1 (%ir-block.0):
-; CHECK: successors: %[[GOOD:bb.[0-9]+.continue]]{{.*}}%[[BAD:bb.[0-9]+.broken]]
+; CHECK: successors: %[[GOOD:bb.[0-9]+]]{{.*}}%[[BAD:bb.[0-9]+]]
; CHECK: EH_LABEL
; CHECK: %w0 = COPY
; CHECK: BL @foo, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %w0, implicit-def %w0
@@ -17,7 +17,7 @@ declare i32 @llvm.eh.typeid.for(i8*)
; CHECK: EH_LABEL
; CHECK: G_BR %[[GOOD]]
-; CHECK: [[BAD]] (landing-pad):
+; CHECK: [[BAD]].{{[a-z]+}} (landing-pad):
; CHECK: EH_LABEL
; CHECK: [[UNDEF:%[0-9]+]]:_(s128) = G_IMPLICIT_DEF
; CHECK: [[PTR:%[0-9]+]]:_(p0) = COPY %x0
@@ -30,7 +30,7 @@ declare i32 @llvm.eh.typeid.for(i8*)
; CHECK: %x0 = COPY [[PTR_RET]]
; CHECK: %w1 = COPY [[SEL_RET]]
-; CHECK: [[GOOD]]:
+; CHECK: [[GOOD]].{{[a-z]+}}:
; CHECK: [[SEL:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK: {{%[0-9]+}}:_(s128) = G_INSERT {{%[0-9]+}}, [[SEL]](s32), 64
diff --git a/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll b/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll
index da40b274aa6..01f955bc1d1 100644
--- a/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll
+++ b/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll
@@ -10,9 +10,9 @@ declare void @_Unwind_Resume(i8*)
; CHECK: name: bar
; CHECK: body:
; CHECK-NEXT: bb.1 (%ir-block.0):
-; CHECK: successors: %{{bb.[0-9]+.continue.*}}%[[LP:bb.[0-9]+.cleanup]]
+; CHECK: successors: %{{bb.[0-9]+.*}}%[[LP:bb.[0-9]+]]
-; CHECK: [[LP]] (landing-pad):
+; CHECK: [[LP]].{{[a-z]+}} (landing-pad):
; CHECK: EH_LABEL
; CHECK: [[PTR:%[0-9]+]]:_(p0) = COPY %x0
diff --git a/test/CodeGen/AArch64/GlobalISel/legalize-simple.mir b/test/CodeGen/AArch64/GlobalISel/legalize-simple.mir
index 9c028eb9d95..a7329916ea8 100644
--- a/test/CodeGen/AArch64/GlobalISel/legalize-simple.mir
+++ b/test/CodeGen/AArch64/GlobalISel/legalize-simple.mir
@@ -43,16 +43,16 @@ registers:
- { id: 16, class: _ }
body: |
; CHECK-LABEL: name: test_simple
- ; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1.next(0x80000000)
+ ; CHECK: bb.0.{{[a-zA-Z0-9]+}}:
+ ; CHECK: successors: %bb.1(0x80000000)
; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0
; CHECK: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s64)
; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
; CHECK: [[INTTOPTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[COPY]](s64)
; CHECK: [[PTRTOINT:%[0-9]+]]:_(s64) = G_PTRTOINT [[INTTOPTR]](p0)
; CHECK: %x0 = COPY [[PTRTOINT]](s64)
- ; CHECK: G_BRCOND [[TRUNC]](s1), %bb.1.next
- ; CHECK: bb.1.next:
+ ; CHECK: G_BRCOND [[TRUNC]](s1), %bb.1
+ ; CHECK: bb.1.{{[a-zA-Z0-9]+}}:
; CHECK: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
; CHECK: [[TRUNC3:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
; CHECK: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[TRUNC]](s1), [[TRUNC2]], [[TRUNC3]]
@@ -95,7 +95,7 @@ body: |
%6(s64) = G_PTRTOINT %5
%x0 = COPY %6
- G_BRCOND %1, %bb.1.next
+ G_BRCOND %1, %bb.1
bb.1.next:
diff --git a/test/CodeGen/AArch64/GlobalISel/localizer-in-O0-pipeline.mir b/test/CodeGen/AArch64/GlobalISel/localizer-in-O0-pipeline.mir
index 997205bc0ef..d4ed70fa531 100644
--- a/test/CodeGen/AArch64/GlobalISel/localizer-in-O0-pipeline.mir
+++ b/test/CodeGen/AArch64/GlobalISel/localizer-in-O0-pipeline.mir
@@ -59,19 +59,19 @@ registers:
# CHECK: %5:fpr(s32) = G_FCONSTANT float 2.000000e+00
# Second block will get the constant 1.0 when the localizer is enabled.
-# CHECK: bb.1.true:
+# CHECK: bb.1.{{[a-zA-Z0-9]+}}:
# OPT-NOT: G_FCONSTANT
# OPTNONE: [[FONE:%[0-9]+]]:fpr(s32) = G_FCONSTANT float 1.000000e+00
-# CHECK: G_BR %bb.3.end
+# CHECK: G_BR %bb.3
# Thrid block will get the constant 2.0 when the localizer is enabled.
-# CHECK: bb.2.false:
+# CHECK: bb.2.{{[a-zA-Z0-9]+}}:
# OPT-NOT: G_FCONSTANT
# OPTNONE: [[FTWO:%[0-9]+]]:fpr(s32) = G_FCONSTANT float 2.000000e+00
# CHECK: bb.3.end
-# OPTNONE: %2:fpr(s32) = PHI [[FONE]](s32), %bb.1.true, [[FTWO]](s32), %bb.2.false
-# OPT: %2:fpr(s32) = PHI %4(s32), %bb.1.true, %5(s32), %bb.2.false
+# OPTNONE: %2:fpr(s32) = PHI [[FONE]](s32), %bb.1, [[FTWO]](s32), %bb.2
+# OPT: %2:fpr(s32) = PHI %4(s32), %bb.1, %5(s32), %bb.2
# CHECK-NEXT: G_FADD %0, %2
body: |
bb.0 (%ir-block.0):
@@ -82,16 +82,16 @@ body: |
%1(s1) = G_TRUNC %6
%4(s32) = G_FCONSTANT float 1.000000e+00
%5(s32) = G_FCONSTANT float 2.000000e+00
- G_BRCOND %1(s1), %bb.1.true
- G_BR %bb.2.false
+ G_BRCOND %1(s1), %bb.1
+ G_BR %bb.2
bb.1.true:
- G_BR %bb.3.end
+ G_BR %bb.3
bb.2.false:
bb.3.end:
- %2(s32) = PHI %4(s32), %bb.1.true, %5(s32), %bb.2.false
+ %2(s32) = PHI %4(s32), %bb.1, %5(s32), %bb.2
%3(s32) = G_FADD %0, %2
%s0 = COPY %3(s32)
RET_ReallyLR implicit %s0
diff --git a/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll b/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll
index 51c32b409db..eafb4126807 100644
--- a/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll
+++ b/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll
@@ -508,12 +508,12 @@ block1:
; CHECK: ldr
; CHECK-NEXT: nop
; CHECK-NEXT: .Ltmp
-; CHECK-NEXT: BB
+; CHECK-NEXT: %bb.
; CHECK-NEXT: madd
; CHECK-NOWORKAROUND-LABEL: fall_through
; CHECK-NOWORKAROUND: ldr
; CHECK-NOWORKAROUND-NEXT: .Ltmp
-; CHECK-NOWORKAROUND-NEXT: BB
+; CHECK-NOWORKAROUND-NEXT: %bb.
; CHECK-NOWORKAROUND-NEXT: madd
; No checks for this, just check it doesn't crash
diff --git a/test/CodeGen/AArch64/aarch64-stp-cluster.ll b/test/CodeGen/AArch64/aarch64-stp-cluster.ll
index 0ee32f79a35..c6bdbe4f032 100644
--- a/test/CodeGen/AArch64/aarch64-stp-cluster.ll
+++ b/test/CodeGen/AArch64/aarch64-stp-cluster.ll
@@ -2,7 +2,7 @@
; RUN: llc < %s -mtriple=arm64-linux-gnu -mcpu=cortex-a57 -verify-misched -debug-only=machine-scheduler -aarch64-enable-stp-suppress=false -o - 2>&1 > /dev/null | FileCheck %s
; CHECK: ********** MI Scheduling **********
-; CHECK-LABEL: stp_i64_scale:BB#0
+; CHECK-LABEL: stp_i64_scale:%bb.0
; CHECK:Cluster ld/st SU(4) - SU(3)
; CHECK:Cluster ld/st SU(2) - SU(5)
; CHECK:SU(4): STRXui %1, %0, 1
@@ -23,7 +23,7 @@ entry:
}
; CHECK: ********** MI Scheduling **********
-; CHECK-LABEL: stp_i32_scale:BB#0
+; CHECK-LABEL: stp_i32_scale:%bb.0
; CHECK:Cluster ld/st SU(4) - SU(3)
; CHECK:Cluster ld/st SU(2) - SU(5)
; CHECK:SU(4): STRWui %1, %0, 1
@@ -44,7 +44,7 @@ entry:
}
; CHECK:********** MI Scheduling **********
-; CHECK-LABEL:stp_i64_unscale:BB#0 entry
+; CHECK-LABEL:stp_i64_unscale:%bb.0 entry
; CHECK:Cluster ld/st SU(5) - SU(2)
; CHECK:Cluster ld/st SU(4) - SU(3)
; CHECK:SU(5): STURXi %1, %0, -32
@@ -65,7 +65,7 @@ entry:
}
; CHECK:********** MI Scheduling **********
-; CHECK-LABEL:stp_i32_unscale:BB#0 entry
+; CHECK-LABEL:stp_i32_unscale:%bb.0 entry
; CHECK:Cluster ld/st SU(5) - SU(2)
; CHECK:Cluster ld/st SU(4) - SU(3)
; CHECK:SU(5): STURWi %1, %0, -16
@@ -86,7 +86,7 @@ entry:
}
; CHECK:********** MI Scheduling **********
-; CHECK-LABEL:stp_double:BB#0
+; CHECK-LABEL:stp_double:%bb.0
; CHECK:Cluster ld/st SU(3) - SU(4)
; CHECK:Cluster ld/st SU(2) - SU(5)
; CHECK:SU(3): STRDui %1, %0, 1
@@ -107,7 +107,7 @@ entry:
}
; CHECK:********** MI Scheduling **********
-; CHECK-LABEL:stp_float:BB#0
+; CHECK-LABEL:stp_float:%bb.0
; CHECK:Cluster ld/st SU(3) - SU(4)
; CHECK:Cluster ld/st SU(2) - SU(5)
; CHECK:SU(3): STRSui %1, %0, 1
@@ -128,7 +128,7 @@ entry:
}
; CHECK: ********** MI Scheduling **********
-; CHECK-LABEL: stp_volatile:BB#0
+; CHECK-LABEL: stp_volatile:%bb.0
; CHECK-NOT: Cluster ld/st
; CHECK:SU(2): STRXui %1, %0, 3; mem:Volatile
; CHECK:SU(3): STRXui %1, %0, 2; mem:Volatile
diff --git a/test/CodeGen/AArch64/analyze-branch.ll b/test/CodeGen/AArch64/analyze-branch.ll
index 932cd75052c..4f902ef4fc8 100644
--- a/test/CodeGen/AArch64/analyze-branch.ll
+++ b/test/CodeGen/AArch64/analyze-branch.ll
@@ -18,7 +18,7 @@ define void @test_Bcc_fallthrough_taken(i32 %in) nounwind {
; CHECK: cmp {{w[0-9]+}}, #42
; CHECK: b.ne [[FALSE:.LBB[0-9]+_[0-9]+]]
-; CHECK-NEXT: // BB#
+; CHECK-NEXT: // %bb.
; CHECK-NEXT: bl test_true
; CHECK: [[FALSE]]:
@@ -41,7 +41,7 @@ define void @test_Bcc_fallthrough_nottaken(i32 %in) nounwind {
; CHECK: cmp {{w[0-9]+}}, #42
; CHECK: b.eq [[TRUE:.LBB[0-9]+_[0-9]+]]
-; CHECK-NEXT: // BB#
+; CHECK-NEXT: // %bb.
; CHECK-NEXT: bl test_false
; CHECK: [[TRUE]]:
@@ -62,7 +62,7 @@ define void @test_CBZ_fallthrough_taken(i32 %in) nounwind {
br i1 %tst, label %true, label %false, !prof !0
; CHECK: cbnz {{w[0-9]+}}, [[FALSE:.LBB[0-9]+_[0-9]+]]
-; CHECK-NEXT: // BB#
+; CHECK-NEXT: // %bb.
; CHECK-NEXT: bl test_true
; CHECK: [[FALSE]]:
@@ -83,7 +83,7 @@ define void @test_CBZ_fallthrough_nottaken(i64 %in) nounwind {
br i1 %tst, label %true, label %false, !prof !1
; CHECK: cbz {{x[0-9]+}}, [[TRUE:.LBB[0-9]+_[0-9]+]]
-; CHECK-NEXT: // BB#
+; CHECK-NEXT: // %bb.
; CHECK-NEXT: bl test_false
; CHECK: [[TRUE]]:
@@ -104,7 +104,7 @@ define void @test_CBNZ_fallthrough_taken(i32 %in) nounwind {
br i1 %tst, label %true, label %false, !prof !0
; CHECK: cbz {{w[0-9]+}}, [[FALSE:.LBB[0-9]+_[0-9]+]]
-; CHECK-NEXT: // BB#
+; CHECK-NEXT: // %bb.
; CHECK-NEXT: bl test_true
; CHECK: [[FALSE]]:
@@ -125,7 +125,7 @@ define void @test_CBNZ_fallthrough_nottaken(i64 %in) nounwind {
br i1 %tst, label %true, label %false, !prof !1
; CHECK: cbnz {{x[0-9]+}}, [[TRUE:.LBB[0-9]+_[0-9]+]]
-; CHECK-NEXT: // BB#
+; CHECK-NEXT: // %bb.
; CHECK-NEXT: bl test_false
; CHECK: [[TRUE]]:
@@ -147,7 +147,7 @@ define void @test_TBZ_fallthrough_taken(i32 %in) nounwind {
br i1 %tst, label %true, label %false, !prof !0
; CHECK: tbnz {{w[0-9]+}}, #15, [[FALSE:.LBB[0-9]+_[0-9]+]]
-; CHECK-NEXT: // BB#
+; CHECK-NEXT: // %bb.
; CHECK-NEXT: bl test_true
; CHECK: [[FALSE]]:
@@ -169,7 +169,7 @@ define void @test_TBZ_fallthrough_nottaken(i64 %in) nounwind {
br i1 %tst, label %true, label %false, !prof !1
; CHECK: tbz {{[wx][0-9]+}}, #15, [[TRUE:.LBB[0-9]+_[0-9]+]]
-; CHECK-NEXT: // BB#
+; CHECK-NEXT: // %bb.
; CHECK-NEXT: bl test_false
; CHECK: [[TRUE]]:
@@ -192,7 +192,7 @@ define void @test_TBNZ_fallthrough_taken(i32 %in) nounwind {
br i1 %tst, label %true, label %false, !prof !0
; CHECK: tbz {{w[0-9]+}}, #15, [[FALSE:.LBB[0-9]+_[0-9]+]]
-; CHECK-NEXT: // BB#
+; CHECK-NEXT: // %bb.
; CHECK-NEXT: bl test_true
; CHECK: [[FALSE]]:
@@ -214,7 +214,7 @@ define void @test_TBNZ_fallthrough_nottaken(i64 %in) nounwind {
br i1 %tst, label %true, label %false, !prof !1
; CHECK: tbnz {{[wx][0-9]+}}, #15, [[TRUE:.LBB[0-9]+_[0-9]+]]
-; CHECK-NEXT: // BB#
+; CHECK-NEXT: // %bb.
; CHECK-NEXT: bl test_false
; CHECK: [[TRUE]]:
diff --git a/test/CodeGen/AArch64/arm64-ccmp.ll b/test/CodeGen/AArch64/arm64-ccmp.ll
index a910585e7f5..b18e638a3a9 100644
--- a/test/CodeGen/AArch64/arm64-ccmp.ll
+++ b/test/CodeGen/AArch64/arm64-ccmp.ll
@@ -132,6 +132,7 @@ if.end:
; Floating point compare.
; CHECK: single_fcmp
+; CHECK: ; %bb.
; CHECK: cmp
; CHECK-NOT: b.
; CHECK: fccmp {{.*}}, #8, ge
@@ -448,7 +449,7 @@ define i32 @select_noccmp3(i32 %v0, i32 %v1, i32 %v2) {
; Test the IR CCs that expand to two cond codes.
; CHECK-LABEL: select_and_olt_one:
-; CHECK-LABEL: ; BB#0:
+; CHECK-LABEL: ; %bb.0:
; CHECK-NEXT: fcmp d0, d1
; CHECK-NEXT: fccmp d2, d3, #4, mi
; CHECK-NEXT: fccmp d2, d3, #1, ne
@@ -463,7 +464,7 @@ define i32 @select_and_olt_one(double %v0, double %v1, double %v2, double %v3, i
}
; CHECK-LABEL: select_and_one_olt:
-; CHECK-LABEL: ; BB#0:
+; CHECK-LABEL: ; %bb.0:
; CHECK-NEXT: fcmp d0, d1
; CHECK-NEXT: fccmp d0, d1, #1, ne
; CHECK-NEXT: fccmp d2, d3, #0, vc
@@ -478,7 +479,7 @@ define i32 @select_and_one_olt(double %v0, double %v1, double %v2, double %v3, i
}
; CHECK-LABEL: select_and_olt_ueq:
-; CHECK-LABEL: ; BB#0:
+; CHECK-LABEL: ; %bb.0:
; CHECK-NEXT: fcmp d0, d1
; CHECK-NEXT: fccmp d2, d3, #0, mi
; CHECK-NEXT: fccmp d2, d3, #8, le
@@ -493,7 +494,7 @@ define i32 @select_and_olt_ueq(double %v0, double %v1, double %v2, double %v3, i
}
; CHECK-LABEL: select_and_ueq_olt:
-; CHECK-LABEL: ; BB#0:
+; CHECK-LABEL: ; %bb.0:
; CHECK-NEXT: fcmp d0, d1
; CHECK-NEXT: fccmp d0, d1, #8, le
; CHECK-NEXT: fccmp d2, d3, #0, pl
@@ -508,7 +509,7 @@ define i32 @select_and_ueq_olt(double %v0, double %v1, double %v2, double %v3, i
}
; CHECK-LABEL: select_or_olt_one:
-; CHECK-LABEL: ; BB#0:
+; CHECK-LABEL: ; %bb.0:
; CHECK-NEXT: fcmp d0, d1
; CHECK-NEXT: fccmp d2, d3, #0, pl
; CHECK-NEXT: fccmp d2, d3, #8, le
@@ -523,7 +524,7 @@ define i32 @select_or_olt_one(double %v0, double %v1, double %v2, double %v3, i3
}
; CHECK-LABEL: select_or_one_olt:
-; CHECK-LABEL: ; BB#0:
+; CHECK-LABEL: ; %bb.0:
; CHECK-NEXT: fcmp d0, d1
; CHECK-NEXT: fccmp d0, d1, #1, ne
; CHECK-NEXT: fccmp d2, d3, #8, vs
@@ -538,7 +539,7 @@ define i32 @select_or_one_olt(double %v0, double %v1, double %v2, double %v3, i3
}
; CHECK-LABEL: select_or_olt_ueq:
-; CHECK-LABEL: ; BB#0:
+; CHECK-LABEL: ; %bb.0:
; CHECK-NEXT: fcmp d0, d1
; CHECK-NEXT: fccmp d2, d3, #4, pl
; CHECK-NEXT: fccmp d2, d3, #1, ne
@@ -553,7 +554,7 @@ define i32 @select_or_olt_ueq(double %v0, double %v1, double %v2, double %v3, i3
}
; CHECK-LABEL: select_or_ueq_olt:
-; CHECK-LABEL: ; BB#0:
+; CHECK-LABEL: ; %bb.0:
; CHECK-NEXT: fcmp d0, d1
; CHECK-NEXT: fccmp d0, d1, #8, le
; CHECK-NEXT: fccmp d2, d3, #8, mi
@@ -568,7 +569,7 @@ define i32 @select_or_ueq_olt(double %v0, double %v1, double %v2, double %v3, i3
}
; CHECK-LABEL: select_or_olt_ogt_ueq:
-; CHECK-LABEL: ; BB#0:
+; CHECK-LABEL: ; %bb.0:
; CHECK-NEXT: fcmp d0, d1
; CHECK-NEXT: fccmp d2, d3, #0, pl
; CHECK-NEXT: fccmp d4, d5, #4, le
@@ -586,7 +587,7 @@ define i32 @select_or_olt_ogt_ueq(double %v0, double %v1, double %v2, double %v3
}
; CHECK-LABEL: select_or_olt_ueq_ogt:
-; CHECK-LABEL: ; BB#0:
+; CHECK-LABEL: ; %bb.0:
; CHECK-NEXT: fcmp d0, d1
; CHECK-NEXT: fccmp d2, d3, #4, pl
; CHECK-NEXT: fccmp d2, d3, #1, ne
@@ -606,7 +607,7 @@ define i32 @select_or_olt_ueq_ogt(double %v0, double %v1, double %v2, double %v3
; Verify that we correctly promote f16.
; CHECK-LABEL: half_select_and_olt_oge:
-; CHECK-LABEL: ; BB#0:
+; CHECK-LABEL: ; %bb.0:
; CHECK-DAG: fcvt [[S0:s[0-9]+]], h0
; CHECK-DAG: fcvt [[S1:s[0-9]+]], h1
; CHECK-NEXT: fcmp [[S0]], [[S1]]
@@ -624,7 +625,7 @@ define i32 @half_select_and_olt_oge(half %v0, half %v1, half %v2, half %v3, i32
}
; CHECK-LABEL: half_select_and_olt_one:
-; CHECK-LABEL: ; BB#0:
+; CHECK-LABEL: ; %bb.0:
; CHECK-DAG: fcvt [[S0:s[0-9]+]], h0
; CHECK-DAG: fcvt [[S1:s[0-9]+]], h1
; CHECK-NEXT: fcmp [[S0]], [[S1]]
diff --git a/test/CodeGen/AArch64/arm64-fp128.ll b/test/CodeGen/AArch64/arm64-fp128.ll
index 2ae0da2d89d..3561d8fcdff 100644
--- a/test/CodeGen/AArch64/arm64-fp128.ll
+++ b/test/CodeGen/AArch64/arm64-fp128.ll
@@ -195,7 +195,7 @@ define i32 @test_br_cc() {
iftrue:
ret i32 42
-; CHECK-NEXT: BB#
+; CHECK-NEXT: %bb.
; CHECK-NEXT: mov w0, #42
; CHECK: ret
iffalse:
@@ -211,7 +211,7 @@ define void @test_select(i1 %cond, fp128 %lhs, fp128 %rhs) {
store fp128 %val, fp128* @lhs, align 16
; CHECK: tst w0, #0x1
; CHECK-NEXT: b.eq [[IFFALSE:.LBB[0-9]+_[0-9]+]]
-; CHECK-NEXT: BB#
+; CHECK-NEXT: %bb.
; CHECK-NEXT: mov v[[VAL:[0-9]+]].16b, v0.16b
; CHECK-NEXT: [[IFFALSE]]:
; CHECK: str q[[VAL]], [{{x[0-9]+}}, :lo12:lhs]
diff --git a/test/CodeGen/AArch64/arm64-icmp-opt.ll b/test/CodeGen/AArch64/arm64-icmp-opt.ll
index 12eae0e88fb..1ed5c5ee135 100644
--- a/test/CodeGen/AArch64/arm64-icmp-opt.ll
+++ b/test/CodeGen/AArch64/arm64-icmp-opt.ll
@@ -7,7 +7,7 @@
define i32 @t1(i64 %a) {
; CHECK-LABEL: t1:
-; CHECK: // BB#0:
+; CHECK: // %bb.0:
; CHECK-NEXT: lsr x8, x0, #63
; CHECK-NEXT: eor w0, w8, #0x1
; CHECK-NEXT: ret
diff --git a/test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll b/test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll
index cdbadfe51f0..b63e739f577 100644
--- a/test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll
+++ b/test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll
@@ -6176,7 +6176,7 @@ define <2 x double> @test_v2f64_post_reg_ld1lane(double* %bar, double** %ptr, i6
; Check for dependencies between the vector and the scalar load.
define <4 x float> @test_v4f32_post_reg_ld1lane_dep_vec_on_load(float* %bar, float** %ptr, i64 %inc, <4 x float>* %dep_ptr_1, <4 x float>* %dep_ptr_2, <4 x float> %vec) {
; CHECK-LABEL: test_v4f32_post_reg_ld1lane_dep_vec_on_load:
-; CHECK: BB#0:
+; CHECK: %bb.0:
; CHECK-NEXT: ldr s[[LD:[0-9]+]], [x0]
; CHECK-NEXT: str q0, [x3]
; CHECK-NEXT: ldr q0, [x4]
diff --git a/test/CodeGen/AArch64/arm64-ldp-cluster.ll b/test/CodeGen/AArch64/arm64-ldp-cluster.ll
index ca50e110a88..370db233fcb 100644
--- a/test/CodeGen/AArch64/arm64-ldp-cluster.ll
+++ b/test/CodeGen/AArch64/arm64-ldp-cluster.ll
@@ -4,12 +4,12 @@
; Test ldr clustering.
; CHECK: ********** MI Scheduling **********
-; CHECK-LABEL: ldr_int:BB#0
+; CHECK-LABEL: ldr_int:%bb.0
; CHECK: Cluster ld/st SU(1) - SU(2)
; CHECK: SU(1): %{{[0-9]+}}<def> = LDRWui
; CHECK: SU(2): %{{[0-9]+}}<def> = LDRWui
; EXYNOS: ********** MI Scheduling **********
-; EXYNOS-LABEL: ldr_int:BB#0
+; EXYNOS-LABEL: ldr_int:%bb.0
; EXYNOS: Cluster ld/st SU(1) - SU(2)
; EXYNOS: SU(1): %{{[0-9]+}}<def> = LDRWui
; EXYNOS: SU(2): %{{[0-9]+}}<def> = LDRWui
@@ -24,12 +24,12 @@ define i32 @ldr_int(i32* %a) nounwind {
; Test ldpsw clustering
; CHECK: ********** MI Scheduling **********
-; CHECK-LABEL: ldp_sext_int:BB#0
+; CHECK-LABEL: ldp_sext_int:%bb.0
; CHECK: Cluster ld/st SU(1) - SU(2)
; CHECK: SU(1): %{{[0-9]+}}<def> = LDRSWui
; CHECK: SU(2): %{{[0-9]+}}<def> = LDRSWui
; EXYNOS: ********** MI Scheduling **********
-; EXYNOS-LABEL: ldp_sext_int:BB#0
+; EXYNOS-LABEL: ldp_sext_int:%bb.0
; EXYNOS: Cluster ld/st SU(1) - SU(2)
; EXYNOS: SU(1): %{{[0-9]+}}<def> = LDRSWui
; EXYNOS: SU(2): %{{[0-9]+}}<def> = LDRSWui
@@ -45,12 +45,12 @@ define i64 @ldp_sext_int(i32* %p) nounwind {
; Test ldur clustering.
; CHECK: ********** MI Scheduling **********
-; CHECK-LABEL: ldur_int:BB#0
+; CHECK-LABEL: ldur_int:%bb.0
; CHECK: Cluster ld/st SU(2) - SU(1)
; CHECK: SU(1): %{{[0-9]+}}<def> = LDURWi
; CHECK: SU(2): %{{[0-9]+}}<def> = LDURWi
; EXYNOS: ********** MI Scheduling **********
-; EXYNOS-LABEL: ldur_int:BB#0
+; EXYNOS-LABEL: ldur_int:%bb.0
; EXYNOS: Cluster ld/st SU(2) - SU(1)
; EXYNOS: SU(1): %{{[0-9]+}}<def> = LDURWi
; EXYNOS: SU(2): %{{[0-9]+}}<def> = LDURWi
@@ -65,12 +65,12 @@ define i32 @ldur_int(i32* %a) nounwind {
; Test sext + zext clustering.
; CHECK: ********** MI Scheduling **********
-; CHECK-LABEL: ldp_half_sext_zext_int:BB#0
+; CHECK-LABEL: ldp_half_sext_zext_int:%bb.0
; CHECK: Cluster ld/st SU(3) - SU(4)
; CHECK: SU(3): %{{[0-9]+}}<def> = LDRSWui
; CHECK: SU(4): %{{[0-9]+}}:sub_32<def,read-undef> = LDRWui
; EXYNOS: ********** MI Scheduling **********
-; EXYNOS-LABEL: ldp_half_sext_zext_int:BB#0
+; EXYNOS-LABEL: ldp_half_sext_zext_int:%bb.0
; EXYNOS: Cluster ld/st SU(3) - SU(4)
; EXYNOS: SU(3): %{{[0-9]+}}<def> = LDRSWui
; EXYNOS: SU(4): %{{[0-9]+}}:sub_32<def,read-undef> = LDRWui
@@ -88,12 +88,12 @@ define i64 @ldp_half_sext_zext_int(i64* %q, i32* %p) nounwind {
; Test zext + sext clustering.
; CHECK: ********** MI Scheduling **********
-; CHECK-LABEL: ldp_half_zext_sext_int:BB#0
+; CHECK-LABEL: ldp_half_zext_sext_int:%bb.0
; CHECK: Cluster ld/st SU(3) - SU(4)
; CHECK: SU(3): %{{[0-9]+}}:sub_32<def,read-undef> = LDRWui
; CHECK: SU(4): %{{[0-9]+}}<def> = LDRSWui
; EXYNOS: ********** MI Scheduling **********
-; EXYNOS-LABEL: ldp_half_zext_sext_int:BB#0
+; EXYNOS-LABEL: ldp_half_zext_sext_int:%bb.0
; EXYNOS: Cluster ld/st SU(3) - SU(4)
; EXYNOS: SU(3): %{{[0-9]+}}:sub_32<def,read-undef> = LDRWui
; EXYNOS: SU(4): %{{[0-9]+}}<def> = LDRSWui
@@ -111,12 +111,12 @@ define i64 @ldp_half_zext_sext_int(i64* %q, i32* %p) nounwind {
; Verify we don't cluster volatile loads.
; CHECK: ********** MI Scheduling **********
-; CHECK-LABEL: ldr_int_volatile:BB#0
+; CHECK-LABEL: ldr_int_volatile:%bb.0
; CHECK-NOT: Cluster ld/st
; CHECK: SU(1): %{{[0-9]+}}<def> = LDRWui
; CHECK: SU(2): %{{[0-9]+}}<def> = LDRWui
; EXYNOS: ********** MI Scheduling **********
-; EXYNOS-LABEL: ldr_int_volatile:BB#0
+; EXYNOS-LABEL: ldr_int_volatile:%bb.0
; EXYNOS-NOT: Cluster ld/st
; EXYNOS: SU(1): %{{[0-9]+}}<def> = LDRWui
; EXYNOS: SU(2): %{{[0-9]+}}<def> = LDRWui
@@ -131,12 +131,12 @@ define i32 @ldr_int_volatile(i32* %a) nounwind {
; Test ldq clustering (no clustering for Exynos).
; CHECK: ********** MI Scheduling **********
-; CHECK-LABEL: ldq_cluster:BB#0
+; CHECK-LABEL: ldq_cluster:%bb.0
; CHECK: Cluster ld/st SU(1) - SU(3)
; CHECK: SU(1): %{{[0-9]+}}<def> = LDRQui
; CHECK: SU(3): %{{[0-9]+}}<def> = LDRQui
; EXYNOS: ********** MI Scheduling **********
-; EXYNOS-LABEL: ldq_cluster:BB#0
+; EXYNOS-LABEL: ldq_cluster:%bb.0
; EXYNOS-NOT: Cluster ld/st
define <2 x i64> @ldq_cluster(i64* %p) {
%a1 = bitcast i64* %p to <2 x i64>*
diff --git a/test/CodeGen/AArch64/arm64-misched-basic-A53.ll b/test/CodeGen/AArch64/arm64-misched-basic-A53.ll
index 307d1ec1aa8..07df9cb32db 100644
--- a/test/CodeGen/AArch64/arm64-misched-basic-A53.ll
+++ b/test/CodeGen/AArch64/arm64-misched-basic-A53.ll
@@ -8,7 +8,7 @@
;
; CHECK: ********** MI Scheduling **********
; CHECK: main
-; CHECK: *** Final schedule for BB#2 ***
+; CHECK: *** Final schedule for %bb.2 ***
; CHECK: MADDWrrr
; CHECK: ADDWri
; CHECK: ********** INTERVALS **********
@@ -83,8 +83,8 @@ for.end: ; preds = %for.cond
; after it, this test checks to make sure there are more than one.
;
; CHECK: ********** MI Scheduling **********
-; CHECK: neon4xfloat:BB#0
-; CHECK: *** Final schedule for BB#0 ***
+; CHECK: neon4xfloat:%bb.0
+; CHECK: *** Final schedule for %bb.0 ***
; CHECK: FDIVv4f32
; CHECK: FADDv4f32
; CHECK: FADDv4f32
@@ -130,7 +130,7 @@ declare { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2.v16i8.p0i8(i8*)
; are otherwise ready are jammed in the pending queue.
; CHECK: ********** MI Scheduling **********
; CHECK: testResourceConflict
-; CHECK: *** Final schedule for BB#0 ***
+; CHECK: *** Final schedule for %bb.0 ***
; CHECK: BRK
; CHECK: ********** INTERVALS **********
define void @testResourceConflict(float* %ptr) {
@@ -178,7 +178,7 @@ declare void @llvm.trap()
; Resource contention on LDST.
; CHECK: ********** MI Scheduling **********
; CHECK: testLdStConflict
-; CHECK: *** Final schedule for BB#1 ***
+; CHECK: *** Final schedule for %bb.1 ***
; CHECK: LD4Fourv2d
; CHECK: STRQui
; CHECK: ********** INTERVALS **********
diff --git a/test/CodeGen/AArch64/arm64-misched-basic-A57.ll b/test/CodeGen/AArch64/arm64-misched-basic-A57.ll
index 82ba18ce72c..711d2f7397b 100644
--- a/test/CodeGen/AArch64/arm64-misched-basic-A57.ll
+++ b/test/CodeGen/AArch64/arm64-misched-basic-A57.ll
@@ -8,10 +8,10 @@
;
; RUN: llc < %s -mtriple=arm64-linux-gnu -mcpu=cortex-a57 -enable-misched -verify-misched -debug-only=machine-scheduler -o - 2>&1 > /dev/null | FileCheck %s
; CHECK: ********** MI Scheduling **********
-; CHECK: main:BB#2
+; CHECK: main:%bb.2
; CHECK: LDR
; CHECK: Latency : 4
-; CHECK: *** Final schedule for BB#2 ***
+; CHECK: *** Final schedule for %bb.2 ***
; CHECK: LDR
; CHECK: LDR
; CHECK-NOT: LDR
diff --git a/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll b/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll
index b2bfc13967a..8c81cf43e68 100644
--- a/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll
+++ b/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll
@@ -4,7 +4,7 @@
; Test for bug in misched memory dependency calculation.
;
; CHECK: ********** MI Scheduling **********
-; CHECK: misched_bug:BB#0 entry
+; CHECK: misched_bug:%bb.0 entry
; CHECK: SU(2): %2<def> = LDRWui %0, 1; mem:LD4[%ptr1_plus1] GPR32:%2 GPR64common:%0
; CHECK: Successors:
; CHECK-NEXT: SU(5): Data Latency=4 Reg=%2
diff --git a/test/CodeGen/AArch64/arm64-variadic-aapcs.ll b/test/CodeGen/AArch64/arm64-variadic-aapcs.ll
index 375877c5179..c6c7a65e2c1 100644
--- a/test/CodeGen/AArch64/arm64-variadic-aapcs.ll
+++ b/test/CodeGen/AArch64/arm64-variadic-aapcs.ll
@@ -113,7 +113,7 @@ declare void @llvm.va_end(i8*)
define void @test_va_end() nounwind {
; CHECK-LABEL: test_va_end:
-; CHECK-NEXT: BB#0
+; CHECK-NEXT: %bb.0
%addr = bitcast %va_list* @var to i8*
call void @llvm.va_end(i8* %addr)
diff --git a/test/CodeGen/AArch64/bics.ll b/test/CodeGen/AArch64/bics.ll
index 53aa28ad913..244aacbc0df 100644
--- a/test/CodeGen/AArch64/bics.ll
+++ b/test/CodeGen/AArch64/bics.ll
@@ -2,7 +2,7 @@
define i1 @andn_cmp(i32 %x, i32 %y) {
; CHECK-LABEL: andn_cmp:
-; CHECK: // BB#0:
+; CHECK: // %bb.0:
; CHECK-NEXT: bics wzr, w1, w0
; CHECK-NEXT: cset w0, eq
; CHECK-NEXT: ret
@@ -15,7 +15,7 @@ define i1 @andn_cmp(i32 %x, i32 %y) {
define i1 @and_cmp(i32 %x, i32 %y) {
; CHECK-LABEL: and_cmp:
-; CHECK: // BB#0:
+; CHECK: // %bb.0:
; CHECK-NEXT: bics wzr, w1, w0
; CHECK-NEXT: cset w0, eq
; CHECK-NEXT: ret
@@ -27,7 +27,7 @@ define i1 @and_cmp(i32 %x, i32 %y) {
define i1 @and_cmp_const(i32 %x) {
; CHECK-LABEL: and_cmp_const:
-; CHECK: // BB#0:
+; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #43
; CHECK-NEXT: bics wzr, w8, w0
; CHECK-NEXT: cset w0, eq
diff --git a/test/CodeGen/AArch64/branch-relax-cbz.ll b/test/CodeGen/AArch64/branch-relax-cbz.ll
index d13c0f677bc..cddecbd9bab 100644
--- a/test/CodeGen/AArch64/branch-relax-cbz.ll
+++ b/test/CodeGen/AArch64/branch-relax-cbz.ll
@@ -4,7 +4,7 @@
; CHECK: cmn x{{[0-9]+}}, #5
; CHECK-NEXT: b.le [[B2:LBB[0-9]+_[0-9]+]]
-; CHECK-NEXT: ; BB#1: ; %b3
+; CHECK-NEXT: ; %bb.1: ; %b3
; CHECK: ldr [[LOAD:w[0-9]+]]
; CHECK: cbnz [[LOAD]], [[B8:LBB[0-9]+_[0-9]+]]
; CHECK-NEXT: b [[B7:LBB[0-9]+_[0-9]+]]
diff --git a/test/CodeGen/AArch64/fast-isel-assume.ll b/test/CodeGen/AArch64/fast-isel-assume.ll
index d39a907407d..50f510a09b6 100644
--- a/test/CodeGen/AArch64/fast-isel-assume.ll
+++ b/test/CodeGen/AArch64/fast-isel-assume.ll
@@ -3,7 +3,7 @@
; Check that we ignore the assume intrinsic.
; CHECK-LABEL: test:
-; CHECK: // BB#0:
+; CHECK: // %bb.0:
; CHECK-NEXT: ret
define void @test(i32 %a) {
%tmp0 = icmp slt i32 %a, 0
diff --git a/test/CodeGen/AArch64/fast-isel-atomic.ll b/test/CodeGen/AArch64/fast-isel-atomic.ll
index 195b8befc8e..ec612616ae2 100644
--- a/test/CodeGen/AArch64/fast-isel-atomic.ll
+++ b/test/CodeGen/AArch64/fast-isel-atomic.ll
@@ -5,7 +5,7 @@
; currently match, so we might as well check both! Feel free to remove SDAG.
; CHECK-LABEL: atomic_store_monotonic_8:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: strb w1, [x0]
; CHECK-NEXT: ret
define void @atomic_store_monotonic_8(i8* %p, i8 %val) #0 {
@@ -14,7 +14,7 @@ define void @atomic_store_monotonic_8(i8* %p, i8 %val) #0 {
}
; CHECK-LABEL: atomic_store_monotonic_8_off:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: strb w1, [x0, #1]
; CHECK-NEXT: ret
define void @atomic_store_monotonic_8_off(i8* %p, i8 %val) #0 {
@@ -24,7 +24,7 @@ define void @atomic_store_monotonic_8_off(i8* %p, i8 %val) #0 {
}
; CHECK-LABEL: atomic_store_monotonic_16:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: strh w1, [x0]
; CHECK-NEXT: ret
define void @atomic_store_monotonic_16(i16* %p, i16 %val) #0 {
@@ -33,7 +33,7 @@ define void @atomic_store_monotonic_16(i16* %p, i16 %val) #0 {
}
; CHECK-LABEL: atomic_store_monotonic_16_off:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: strh w1, [x0, #2]
; CHECK-NEXT: ret
define void @atomic_store_monotonic_16_off(i16* %p, i16 %val) #0 {
@@ -43,7 +43,7 @@ define void @atomic_store_monotonic_16_off(i16* %p, i16 %val) #0 {
}
; CHECK-LABEL: atomic_store_monotonic_32:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: str w1, [x0]
; CHECK-NEXT: ret
define void @atomic_store_monotonic_32(i32* %p, i32 %val) #0 {
@@ -52,7 +52,7 @@ define void @atomic_store_monotonic_32(i32* %p, i32 %val) #0 {
}
; CHECK-LABEL: atomic_store_monotonic_32_off:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: str w1, [x0, #4]
; CHECK-NEXT: ret
define void @atomic_store_monotonic_32_off(i32* %p, i32 %val) #0 {
@@ -62,7 +62,7 @@ define void @atomic_store_monotonic_32_off(i32* %p, i32 %val) #0 {
}
; CHECK-LABEL: atomic_store_monotonic_64:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: str x1, [x0]
; CHECK-NEXT: ret
define void @atomic_store_monotonic_64(i64* %p, i64 %val) #0 {
@@ -71,7 +71,7 @@ define void @atomic_store_monotonic_64(i64* %p, i64 %val) #0 {
}
; CHECK-LABEL: atomic_store_monotonic_64_off:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: str x1, [x0, #8]
; CHECK-NEXT: ret
define void @atomic_store_monotonic_64_off(i64* %p, i64 %val) #0 {
@@ -81,7 +81,7 @@ define void @atomic_store_monotonic_64_off(i64* %p, i64 %val) #0 {
}
; CHECK-LABEL: atomic_store_release_8:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: stlrb w1, [x0]
; CHECK-NEXT: ret
define void @atomic_store_release_8(i8* %p, i8 %val) #0 {
@@ -90,7 +90,7 @@ define void @atomic_store_release_8(i8* %p, i8 %val) #0 {
}
; CHECK-LABEL: atomic_store_release_8_off:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: add x0, x0, #1
; CHECK-NEXT: stlrb w1, [x0]
; CHECK-NEXT: ret
@@ -101,7 +101,7 @@ define void @atomic_store_release_8_off(i8* %p, i8 %val) #0 {
}
; CHECK-LABEL: atomic_store_release_16:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: stlrh w1, [x0]
; CHECK-NEXT: ret
define void @atomic_store_release_16(i16* %p, i16 %val) #0 {
@@ -110,7 +110,7 @@ define void @atomic_store_release_16(i16* %p, i16 %val) #0 {
}
; CHECK-LABEL: atomic_store_release_16_off:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: add x0, x0, #2
; CHECK-NEXT: stlrh w1, [x0]
; CHECK-NEXT: ret
@@ -121,7 +121,7 @@ define void @atomic_store_release_16_off(i16* %p, i16 %val) #0 {
}
; CHECK-LABEL: atomic_store_release_32:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: stlr w1, [x0]
; CHECK-NEXT: ret
define void @atomic_store_release_32(i32* %p, i32 %val) #0 {
@@ -130,7 +130,7 @@ define void @atomic_store_release_32(i32* %p, i32 %val) #0 {
}
; CHECK-LABEL: atomic_store_release_32_off:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: add x0, x0, #4
; CHECK-NEXT: stlr w1, [x0]
; CHECK-NEXT: ret
@@ -141,7 +141,7 @@ define void @atomic_store_release_32_off(i32* %p, i32 %val) #0 {
}
; CHECK-LABEL: atomic_store_release_64:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: stlr x1, [x0]
; CHECK-NEXT: ret
define void @atomic_store_release_64(i64* %p, i64 %val) #0 {
@@ -150,7 +150,7 @@ define void @atomic_store_release_64(i64* %p, i64 %val) #0 {
}
; CHECK-LABEL: atomic_store_release_64_off:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: add x0, x0, #8
; CHECK-NEXT: stlr x1, [x0]
; CHECK-NEXT: ret
@@ -162,7 +162,7 @@ define void @atomic_store_release_64_off(i64* %p, i64 %val) #0 {
; CHECK-LABEL: atomic_store_seq_cst_8:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: stlrb w1, [x0]
; CHECK-NEXT: ret
define void @atomic_store_seq_cst_8(i8* %p, i8 %val) #0 {
@@ -171,7 +171,7 @@ define void @atomic_store_seq_cst_8(i8* %p, i8 %val) #0 {
}
; CHECK-LABEL: atomic_store_seq_cst_8_off:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: add x0, x0, #1
; CHECK-NEXT: stlrb w1, [x0]
; CHECK-NEXT: ret
@@ -182,7 +182,7 @@ define void @atomic_store_seq_cst_8_off(i8* %p, i8 %val) #0 {
}
; CHECK-LABEL: atomic_store_seq_cst_16:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: stlrh w1, [x0]
; CHECK-NEXT: ret
define void @atomic_store_seq_cst_16(i16* %p, i16 %val) #0 {
@@ -191,7 +191,7 @@ define void @atomic_store_seq_cst_16(i16* %p, i16 %val) #0 {
}
; CHECK-LABEL: atomic_store_seq_cst_16_off:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: add x0, x0, #2
; CHECK-NEXT: stlrh w1, [x0]
; CHECK-NEXT: ret
@@ -202,7 +202,7 @@ define void @atomic_store_seq_cst_16_off(i16* %p, i16 %val) #0 {
}
; CHECK-LABEL: atomic_store_seq_cst_32:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: stlr w1, [x0]
; CHECK-NEXT: ret
define void @atomic_store_seq_cst_32(i32* %p, i32 %val) #0 {
@@ -211,7 +211,7 @@ define void @atomic_store_seq_cst_32(i32* %p, i32 %val) #0 {
}
; CHECK-LABEL: atomic_store_seq_cst_32_off:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: add x0, x0, #4
; CHECK-NEXT: stlr w1, [x0]
; CHECK-NEXT: ret
@@ -222,7 +222,7 @@ define void @atomic_store_seq_cst_32_off(i32* %p, i32 %val) #0 {
}
; CHECK-LABEL: atomic_store_seq_cst_64:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: stlr x1, [x0]
; CHECK-NEXT: ret
define void @atomic_store_seq_cst_64(i64* %p, i64 %val) #0 {
@@ -231,7 +231,7 @@ define void @atomic_store_seq_cst_64(i64* %p, i64 %val) #0 {
}
; CHECK-LABEL: atomic_store_seq_cst_64_off:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: add x0, x0, #8
; CHECK-NEXT: stlr x1, [x0]
; CHECK-NEXT: ret
diff --git a/test/CodeGen/AArch64/fast-isel-cmp-vec.ll b/test/CodeGen/AArch64/fast-isel-cmp-vec.ll
index 89b368fa19b..d5b64c5363e 100644
--- a/test/CodeGen/AArch64/fast-isel-cmp-vec.ll
+++ b/test/CodeGen/AArch64/fast-isel-cmp-vec.ll
@@ -8,9 +8,9 @@
define <2 x i32> @icmp_v2i32(<2 x i32> %a) {
; CHECK-LABEL: icmp_v2i32:
-; CHECK: ; BB#0:
+; CHECK: ; %bb.0:
; CHECK-NEXT: cmeq.2s [[CMP:v[0-9]+]], v0, #0
-; CHECK-NEXT: ; BB#1:
+; CHECK-NEXT: ; %bb.1:
; CHECK-NEXT: movi.2s [[MASK:v[0-9]+]], #1
; CHECK-NEXT: and.8b v0, [[CMP]], [[MASK]]
; CHECK-NEXT: ret
@@ -23,9 +23,9 @@ bb2:
define <2 x i32> @icmp_constfold_v2i32(<2 x i32> %a) {
; CHECK-LABEL: icmp_constfold_v2i32:
-; CHECK: ; BB#0:
+; CHECK: ; %bb.0:
; CHECK-NEXT: movi d[[CMP:[0-9]+]], #0xffffffffffffffff
-; CHECK-NEXT: ; BB#1:
+; CHECK-NEXT: ; %bb.1:
; CHECK-NEXT: movi.2s [[MASK:v[0-9]+]], #1
; CHECK-NEXT: and.8b v0, v[[CMP]], [[MASK]]
; CHECK-NEXT: ret
@@ -38,10 +38,10 @@ bb2:
define <4 x i32> @icmp_v4i32(<4 x i32> %a) {
; CHECK-LABEL: icmp_v4i32:
-; CHECK: ; BB#0:
+; CHECK: ; %bb.0:
; CHECK-NEXT: cmeq.4s [[CMP:v[0-9]+]], v0, #0
; CHECK-NEXT: xtn.4h [[CMPV4I16:v[0-9]+]], [[CMP]]
-; CHECK-NEXT: ; BB#1:
+; CHECK-NEXT: ; %bb.1:
; CHECK-NEXT: movi.4h [[MASK:v[0-9]+]], #1
; CHECK-NEXT: and.8b [[ZEXT:v[0-9]+]], [[CMPV4I16]], [[MASK]]
; CHECK-NEXT: ushll.4s v0, [[ZEXT]], #0
@@ -55,9 +55,9 @@ bb2:
define <4 x i32> @icmp_constfold_v4i32(<4 x i32> %a) {
; CHECK-LABEL: icmp_constfold_v4i32:
-; CHECK: ; BB#0:
+; CHECK: ; %bb.0:
; CHECK-NEXT: movi d[[CMP:[0-9]+]], #0xffffffffffffffff
-; CHECK-NEXT: ; BB#1:
+; CHECK-NEXT: ; %bb.1:
; CHECK-NEXT: movi.4h [[MASK:v[0-9]+]], #1
; CHECK-NEXT: and.8b [[ZEXT:v[0-9]+]], v[[CMP]], [[MASK]]
; CHECK-NEXT: ushll.4s v0, [[ZEXT]], #0
@@ -71,9 +71,9 @@ bb2:
define <16 x i8> @icmp_v16i8(<16 x i8> %a) {
; CHECK-LABEL: icmp_v16i8:
-; CHECK: ; BB#0:
+; CHECK: ; %bb.0:
; CHECK-NEXT: cmeq.16b [[CMP:v[0-9]+]], v0, #0
-; CHECK-NEXT: ; BB#1:
+; CHECK-NEXT: ; %bb.1:
; CHECK-NEXT: movi.16b [[MASK:v[0-9]+]], #1
; CHECK-NEXT: and.16b v0, [[CMP]], [[MASK]]
; CHECK-NEXT: ret
@@ -86,9 +86,9 @@ bb2:
define <16 x i8> @icmp_constfold_v16i8(<16 x i8> %a) {
; CHECK-LABEL: icmp_constfold_v16i8:
-; CHECK: ; BB#0:
+; CHECK: ; %bb.0:
; CHECK-NEXT: movi.2d [[CMP:v[0-9]+]], #0xffffffffffffffff
-; CHECK-NEXT: ; BB#1:
+; CHECK-NEXT: ; %bb.1:
; CHECK-NEXT: movi.16b [[MASK:v[0-9]+]], #1
; CHECK-NEXT: and.16b v0, [[CMP]], [[MASK]]
; CHECK-NEXT: ret
diff --git a/test/CodeGen/AArch64/fast-isel-cmpxchg.ll b/test/CodeGen/AArch64/fast-isel-cmpxchg.ll
index 7ef625abab2..f03955c4dcd 100644
--- a/test/CodeGen/AArch64/fast-isel-cmpxchg.ll
+++ b/test/CodeGen/AArch64/fast-isel-cmpxchg.ll
@@ -6,7 +6,7 @@
; CHECK-NEXT: ldaxr [[OLD:w[0-9]+]], [x0]
; CHECK-NEXT: cmp [[OLD]], w1
; CHECK-NEXT: b.ne [[DONE:.LBB[0-9_]+]]
-; CHECK-NEXT: // BB#2:
+; CHECK-NEXT: // %bb.2:
; CHECK-NEXT: stlxr [[STATUS]], w2, [x0]
; CHECK-NEXT: cbnz [[STATUS]], [[RETRY]]
; CHECK-NEXT: [[DONE]]:
@@ -25,14 +25,14 @@ define i32 @cmpxchg_monotonic_32(i32* %p, i32 %cmp, i32 %new, i32* %ps) #0 {
}
; CHECK-LABEL: cmpxchg_acq_rel_32_load:
-; CHECK: // BB#0:
+; CHECK: // %bb.0:
; CHECK: ldr [[NEW:w[0-9]+]], [x2]
; CHECK-NEXT: [[RETRY:.LBB[0-9_]+]]:
; CHECK-NEXT: mov [[STATUS:w[0-9]+]], #0
; CHECK-NEXT: ldaxr [[OLD:w[0-9]+]], [x0]
; CHECK-NEXT: cmp [[OLD]], w1
; CHECK-NEXT: b.ne [[DONE:.LBB[0-9_]+]]
-; CHECK-NEXT: // BB#2:
+; CHECK-NEXT: // %bb.2:
; CHECK-NEXT: stlxr [[STATUS]], [[NEW]], [x0]
; CHECK-NEXT: cbnz [[STATUS]], [[RETRY]]
; CHECK-NEXT: [[DONE]]:
@@ -57,7 +57,7 @@ define i32 @cmpxchg_acq_rel_32_load(i32* %p, i32 %cmp, i32* %pnew, i32* %ps) #0
; CHECK-NEXT: ldaxr [[OLD:x[0-9]+]], [x0]
; CHECK-NEXT: cmp [[OLD]], x1
; CHECK-NEXT: b.ne [[DONE:.LBB[0-9_]+]]
-; CHECK-NEXT: // BB#2:
+; CHECK-NEXT: // %bb.2:
; CHECK-NEXT: stlxr [[STATUS]], x2, [x0]
; CHECK-NEXT: cbnz [[STATUS]], [[RETRY]]
; CHECK-NEXT: [[DONE]]:
diff --git a/test/CodeGen/AArch64/fcvt-int.ll b/test/CodeGen/AArch64/fcvt-int.ll
index e52b601b145..aeafc127494 100644
--- a/test/CodeGen/AArch64/fcvt-int.ll
+++ b/test/CodeGen/AArch64/fcvt-int.ll
@@ -152,7 +152,7 @@ define double @test_bitcasti64todouble(i64 %in) {
define double @bitcast_fabs(double %x) {
; CHECK-LABEL: bitcast_fabs:
-; CHECK: ; BB#0:
+; CHECK: ; %bb.0:
; CHECK-NEXT: fabs d0, d0
; CHECK-NEXT: ret
;
@@ -164,7 +164,7 @@ define double @bitcast_fabs(double %x) {
define float @bitcast_fneg(float %x) {
; CHECK-LABEL: bitcast_fneg:
-; CHECK: ; BB#0:
+; CHECK: ; %bb.0:
; CHECK-NEXT: fneg s0, s0
; CHECK-NEXT: ret
;
diff --git a/test/CodeGen/AArch64/local_vars.ll b/test/CodeGen/AArch64/local_vars.ll
index 6e33ab2d0be..a479572d2a3 100644
--- a/test/CodeGen/AArch64/local_vars.ll
+++ b/test/CodeGen/AArch64/local_vars.ll
@@ -17,7 +17,7 @@ declare void @foo()
define void @trivial_func() nounwind {
; CHECK-LABEL: trivial_func: // @trivial_func
-; CHECK-NEXT: // BB#0
+; CHECK-NEXT: // %bb.0
; CHECK-NEXT: ret
ret void
diff --git a/test/CodeGen/AArch64/max-jump-table.ll b/test/CodeGen/AArch64/max-jump-table.ll
index 070502052ff..1a7a418b31f 100644
--- a/test/CodeGen/AArch64/max-jump-table.ll
+++ b/test/CodeGen/AArch64/max-jump-table.ll
@@ -77,10 +77,10 @@ entry:
]
; CHECK-LABEL: function jt2:
; CHECK-NEXT: Jump Tables:
-; CHECK0-NEXT: jt#0: BB#1 BB#2 BB#3 BB#4 BB#7 BB#7 BB#7 BB#7 BB#7 BB#7 BB#7 BB#7 BB#7 BB#5 BB#6{{$}}
-; CHECK4-NEXT: jt#0: BB#1 BB#2 BB#3 BB#4{{$}}
-; CHECK8-NEXT: jt#0: BB#1 BB#2 BB#3 BB#4{{$}}
-; CHECKM1-NEXT: jt#0: BB#1 BB#2 BB#3 BB#4{{$}}
+; CHECK0-NEXT: jt#0: %bb.1 %bb.2 %bb.3 %bb.4 %bb.7 %bb.7 %bb.7 %bb.7 %bb.7 %bb.7 %bb.7 %bb.7 %bb.7 %bb.5 %bb.6{{$}}
+; CHECK4-NEXT: jt#0: %bb.1 %bb.2 %bb.3 %bb.4{{$}}
+; CHECK8-NEXT: jt#0: %bb.1 %bb.2 %bb.3 %bb.4{{$}}
+; CHECKM1-NEXT: jt#0: %bb.1 %bb.2 %bb.3 %bb.4{{$}}
; CHEC-NEXT: Function Live Ins:
bb1: tail call void @ext(i32 1) br label %return
diff --git a/test/CodeGen/AArch64/neon-bitcast.ll b/test/CodeGen/AArch64/neon-bitcast.ll
index 61099d48fdd..8f67ff83ae1 100644
--- a/test/CodeGen/AArch64/neon-bitcast.ll
+++ b/test/CodeGen/AArch64/neon-bitcast.ll
@@ -4,7 +4,7 @@
define <1 x i64> @test_v8i8_to_v1i64(<8 x i8> %in) nounwind {
; CHECK: test_v8i8_to_v1i64:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <8 x i8> %in to <1 x i64>
@@ -13,7 +13,7 @@ define <1 x i64> @test_v8i8_to_v1i64(<8 x i8> %in) nounwind {
define <2 x i32> @test_v8i8_to_v2i32(<8 x i8> %in) nounwind {
; CHECK: test_v8i8_to_v2i32:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <8 x i8> %in to <2 x i32>
@@ -22,7 +22,7 @@ define <2 x i32> @test_v8i8_to_v2i32(<8 x i8> %in) nounwind {
define <2 x float> @test_v8i8_to_v2f32(<8 x i8> %in) nounwind{
; CHECK: test_v8i8_to_v2f32:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <8 x i8> %in to <2 x float>
@@ -31,7 +31,7 @@ define <2 x float> @test_v8i8_to_v2f32(<8 x i8> %in) nounwind{
define <4 x i16> @test_v8i8_to_v4i16(<8 x i8> %in) nounwind{
; CHECK: test_v8i8_to_v4i16:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <8 x i8> %in to <4 x i16>
@@ -40,7 +40,7 @@ define <4 x i16> @test_v8i8_to_v4i16(<8 x i8> %in) nounwind{
define <8 x i8> @test_v8i8_to_v8i8(<8 x i8> %in) nounwind{
; CHECK: test_v8i8_to_v8i8:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <8 x i8> %in to <8 x i8>
@@ -51,7 +51,7 @@ define <8 x i8> @test_v8i8_to_v8i8(<8 x i8> %in) nounwind{
define <1 x i64> @test_v4i16_to_v1i64(<4 x i16> %in) nounwind {
; CHECK: test_v4i16_to_v1i64:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <4 x i16> %in to <1 x i64>
@@ -60,7 +60,7 @@ define <1 x i64> @test_v4i16_to_v1i64(<4 x i16> %in) nounwind {
define <2 x i32> @test_v4i16_to_v2i32(<4 x i16> %in) nounwind {
; CHECK: test_v4i16_to_v2i32:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <4 x i16> %in to <2 x i32>
@@ -69,7 +69,7 @@ define <2 x i32> @test_v4i16_to_v2i32(<4 x i16> %in) nounwind {
define <2 x float> @test_v4i16_to_v2f32(<4 x i16> %in) nounwind{
; CHECK: test_v4i16_to_v2f32:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <4 x i16> %in to <2 x float>
@@ -78,7 +78,7 @@ define <2 x float> @test_v4i16_to_v2f32(<4 x i16> %in) nounwind{
define <4 x i16> @test_v4i16_to_v4i16(<4 x i16> %in) nounwind{
; CHECK: test_v4i16_to_v4i16:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <4 x i16> %in to <4 x i16>
@@ -87,7 +87,7 @@ define <4 x i16> @test_v4i16_to_v4i16(<4 x i16> %in) nounwind{
define <8 x i8> @test_v4i16_to_v8i8(<4 x i16> %in) nounwind{
; CHECK: test_v4i16_to_v8i8:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <4 x i16> %in to <8 x i8>
@@ -98,7 +98,7 @@ define <8 x i8> @test_v4i16_to_v8i8(<4 x i16> %in) nounwind{
define <1 x i64> @test_v2i32_to_v1i64(<2 x i32> %in) nounwind {
; CHECK: test_v2i32_to_v1i64:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <2 x i32> %in to <1 x i64>
@@ -107,7 +107,7 @@ define <1 x i64> @test_v2i32_to_v1i64(<2 x i32> %in) nounwind {
define <2 x i32> @test_v2i32_to_v2i32(<2 x i32> %in) nounwind {
; CHECK: test_v2i32_to_v2i32:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <2 x i32> %in to <2 x i32>
@@ -116,7 +116,7 @@ define <2 x i32> @test_v2i32_to_v2i32(<2 x i32> %in) nounwind {
define <2 x float> @test_v2i32_to_v2f32(<2 x i32> %in) nounwind{
; CHECK: test_v2i32_to_v2f32:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <2 x i32> %in to <2 x float>
@@ -125,7 +125,7 @@ define <2 x float> @test_v2i32_to_v2f32(<2 x i32> %in) nounwind{
define <4 x i16> @test_v2i32_to_v4i16(<2 x i32> %in) nounwind{
; CHECK: test_v2i32_to_v4i16:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <2 x i32> %in to <4 x i16>
@@ -134,7 +134,7 @@ define <4 x i16> @test_v2i32_to_v4i16(<2 x i32> %in) nounwind{
define <8 x i8> @test_v2i32_to_v8i8(<2 x i32> %in) nounwind{
; CHECK: test_v2i32_to_v8i8:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <2 x i32> %in to <8 x i8>
@@ -145,7 +145,7 @@ define <8 x i8> @test_v2i32_to_v8i8(<2 x i32> %in) nounwind{
define <1 x i64> @test_v2f32_to_v1i64(<2 x float> %in) nounwind {
; CHECK: test_v2f32_to_v1i64:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <2 x float> %in to <1 x i64>
@@ -154,7 +154,7 @@ define <1 x i64> @test_v2f32_to_v1i64(<2 x float> %in) nounwind {
define <2 x i32> @test_v2f32_to_v2i32(<2 x float> %in) nounwind {
; CHECK: test_v2f32_to_v2i32:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <2 x float> %in to <2 x i32>
@@ -163,7 +163,7 @@ define <2 x i32> @test_v2f32_to_v2i32(<2 x float> %in) nounwind {
define <2 x float> @test_v2f32_to_v2f32(<2 x float> %in) nounwind{
; CHECK: test_v2f32_to_v2f32:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <2 x float> %in to <2 x float>
@@ -172,7 +172,7 @@ define <2 x float> @test_v2f32_to_v2f32(<2 x float> %in) nounwind{
define <4 x i16> @test_v2f32_to_v4i16(<2 x float> %in) nounwind{
; CHECK: test_v2f32_to_v4i16:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <2 x float> %in to <4 x i16>
@@ -181,7 +181,7 @@ define <4 x i16> @test_v2f32_to_v4i16(<2 x float> %in) nounwind{
define <8 x i8> @test_v2f32_to_v8i8(<2 x float> %in) nounwind{
; CHECK: test_v2f32_to_v8i8:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <2 x float> %in to <8 x i8>
@@ -192,7 +192,7 @@ define <8 x i8> @test_v2f32_to_v8i8(<2 x float> %in) nounwind{
define <1 x i64> @test_v1i64_to_v1i64(<1 x i64> %in) nounwind {
; CHECK: test_v1i64_to_v1i64:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <1 x i64> %in to <1 x i64>
@@ -201,7 +201,7 @@ define <1 x i64> @test_v1i64_to_v1i64(<1 x i64> %in) nounwind {
define <2 x i32> @test_v1i64_to_v2i32(<1 x i64> %in) nounwind {
; CHECK: test_v1i64_to_v2i32:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <1 x i64> %in to <2 x i32>
@@ -210,7 +210,7 @@ define <2 x i32> @test_v1i64_to_v2i32(<1 x i64> %in) nounwind {
define <2 x float> @test_v1i64_to_v2f32(<1 x i64> %in) nounwind{
; CHECK: test_v1i64_to_v2f32:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <1 x i64> %in to <2 x float>
@@ -219,7 +219,7 @@ define <2 x float> @test_v1i64_to_v2f32(<1 x i64> %in) nounwind{
define <4 x i16> @test_v1i64_to_v4i16(<1 x i64> %in) nounwind{
; CHECK: test_v1i64_to_v4i16:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <1 x i64> %in to <4 x i16>
@@ -228,7 +228,7 @@ define <4 x i16> @test_v1i64_to_v4i16(<1 x i64> %in) nounwind{
define <8 x i8> @test_v1i64_to_v8i8(<1 x i64> %in) nounwind{
; CHECK: test_v1i64_to_v8i8:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <1 x i64> %in to <8 x i8>
@@ -240,7 +240,7 @@ define <8 x i8> @test_v1i64_to_v8i8(<1 x i64> %in) nounwind{
define <2 x double> @test_v16i8_to_v2f64(<16 x i8> %in) nounwind {
; CHECK: test_v16i8_to_v2f64:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <16 x i8> %in to <2 x double>
@@ -249,7 +249,7 @@ define <2 x double> @test_v16i8_to_v2f64(<16 x i8> %in) nounwind {
define <2 x i64> @test_v16i8_to_v2i64(<16 x i8> %in) nounwind {
; CHECK: test_v16i8_to_v2i64:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <16 x i8> %in to <2 x i64>
@@ -258,7 +258,7 @@ define <2 x i64> @test_v16i8_to_v2i64(<16 x i8> %in) nounwind {
define <4 x i32> @test_v16i8_to_v4i32(<16 x i8> %in) nounwind {
; CHECK: test_v16i8_to_v4i32:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <16 x i8> %in to <4 x i32>
@@ -267,7 +267,7 @@ define <4 x i32> @test_v16i8_to_v4i32(<16 x i8> %in) nounwind {
define <4 x float> @test_v16i8_to_v2f32(<16 x i8> %in) nounwind{
; CHECK: test_v16i8_to_v2f32:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <16 x i8> %in to <4 x float>
@@ -276,7 +276,7 @@ define <4 x float> @test_v16i8_to_v2f32(<16 x i8> %in) nounwind{
define <8 x i16> @test_v16i8_to_v8i16(<16 x i8> %in) nounwind{
; CHECK: test_v16i8_to_v8i16:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <16 x i8> %in to <8 x i16>
@@ -285,7 +285,7 @@ define <8 x i16> @test_v16i8_to_v8i16(<16 x i8> %in) nounwind{
define <16 x i8> @test_v16i8_to_v16i8(<16 x i8> %in) nounwind{
; CHECK: test_v16i8_to_v16i8:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <16 x i8> %in to <16 x i8>
@@ -296,7 +296,7 @@ define <16 x i8> @test_v16i8_to_v16i8(<16 x i8> %in) nounwind{
define <2 x double> @test_v8i16_to_v2f64(<8 x i16> %in) nounwind {
; CHECK: test_v8i16_to_v2f64:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <8 x i16> %in to <2 x double>
@@ -305,7 +305,7 @@ define <2 x double> @test_v8i16_to_v2f64(<8 x i16> %in) nounwind {
define <2 x i64> @test_v8i16_to_v2i64(<8 x i16> %in) nounwind {
; CHECK: test_v8i16_to_v2i64:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <8 x i16> %in to <2 x i64>
@@ -314,7 +314,7 @@ define <2 x i64> @test_v8i16_to_v2i64(<8 x i16> %in) nounwind {
define <4 x i32> @test_v8i16_to_v4i32(<8 x i16> %in) nounwind {
; CHECK: test_v8i16_to_v4i32:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <8 x i16> %in to <4 x i32>
@@ -323,7 +323,7 @@ define <4 x i32> @test_v8i16_to_v4i32(<8 x i16> %in) nounwind {
define <4 x float> @test_v8i16_to_v2f32(<8 x i16> %in) nounwind{
; CHECK: test_v8i16_to_v2f32:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <8 x i16> %in to <4 x float>
@@ -332,7 +332,7 @@ define <4 x float> @test_v8i16_to_v2f32(<8 x i16> %in) nounwind{
define <8 x i16> @test_v8i16_to_v8i16(<8 x i16> %in) nounwind{
; CHECK: test_v8i16_to_v8i16:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <8 x i16> %in to <8 x i16>
@@ -341,7 +341,7 @@ define <8 x i16> @test_v8i16_to_v8i16(<8 x i16> %in) nounwind{
define <16 x i8> @test_v8i16_to_v16i8(<8 x i16> %in) nounwind{
; CHECK: test_v8i16_to_v16i8:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <8 x i16> %in to <16 x i8>
@@ -352,7 +352,7 @@ define <16 x i8> @test_v8i16_to_v16i8(<8 x i16> %in) nounwind{
define <2 x double> @test_v4i32_to_v2f64(<4 x i32> %in) nounwind {
; CHECK: test_v4i32_to_v2f64:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <4 x i32> %in to <2 x double>
@@ -361,7 +361,7 @@ define <2 x double> @test_v4i32_to_v2f64(<4 x i32> %in) nounwind {
define <2 x i64> @test_v4i32_to_v2i64(<4 x i32> %in) nounwind {
; CHECK: test_v4i32_to_v2i64:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <4 x i32> %in to <2 x i64>
@@ -370,7 +370,7 @@ define <2 x i64> @test_v4i32_to_v2i64(<4 x i32> %in) nounwind {
define <4 x i32> @test_v4i32_to_v4i32(<4 x i32> %in) nounwind {
; CHECK: test_v4i32_to_v4i32:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <4 x i32> %in to <4 x i32>
@@ -379,7 +379,7 @@ define <4 x i32> @test_v4i32_to_v4i32(<4 x i32> %in) nounwind {
define <4 x float> @test_v4i32_to_v2f32(<4 x i32> %in) nounwind{
; CHECK: test_v4i32_to_v2f32:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <4 x i32> %in to <4 x float>
@@ -388,7 +388,7 @@ define <4 x float> @test_v4i32_to_v2f32(<4 x i32> %in) nounwind{
define <8 x i16> @test_v4i32_to_v8i16(<4 x i32> %in) nounwind{
; CHECK: test_v4i32_to_v8i16:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <4 x i32> %in to <8 x i16>
@@ -397,7 +397,7 @@ define <8 x i16> @test_v4i32_to_v8i16(<4 x i32> %in) nounwind{
define <16 x i8> @test_v4i32_to_v16i8(<4 x i32> %in) nounwind{
; CHECK: test_v4i32_to_v16i8:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <4 x i32> %in to <16 x i8>
@@ -408,7 +408,7 @@ define <16 x i8> @test_v4i32_to_v16i8(<4 x i32> %in) nounwind{
define <2 x double> @test_v4f32_to_v2f64(<4 x float> %in) nounwind {
; CHECK: test_v4f32_to_v2f64:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <4 x float> %in to <2 x double>
@@ -417,7 +417,7 @@ define <2 x double> @test_v4f32_to_v2f64(<4 x float> %in) nounwind {
define <2 x i64> @test_v4f32_to_v2i64(<4 x float> %in) nounwind {
; CHECK: test_v4f32_to_v2i64:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <4 x float> %in to <2 x i64>
@@ -426,7 +426,7 @@ define <2 x i64> @test_v4f32_to_v2i64(<4 x float> %in) nounwind {
define <4 x i32> @test_v4f32_to_v4i32(<4 x float> %in) nounwind {
; CHECK: test_v4f32_to_v4i32:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <4 x float> %in to <4 x i32>
@@ -435,7 +435,7 @@ define <4 x i32> @test_v4f32_to_v4i32(<4 x float> %in) nounwind {
define <4 x float> @test_v4f32_to_v4f32(<4 x float> %in) nounwind{
; CHECK: test_v4f32_to_v4f32:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <4 x float> %in to <4 x float>
@@ -444,7 +444,7 @@ define <4 x float> @test_v4f32_to_v4f32(<4 x float> %in) nounwind{
define <8 x i16> @test_v4f32_to_v8i16(<4 x float> %in) nounwind{
; CHECK: test_v4f32_to_v8i16:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <4 x float> %in to <8 x i16>
@@ -453,7 +453,7 @@ define <8 x i16> @test_v4f32_to_v8i16(<4 x float> %in) nounwind{
define <16 x i8> @test_v4f32_to_v16i8(<4 x float> %in) nounwind{
; CHECK: test_v4f32_to_v16i8:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <4 x float> %in to <16 x i8>
@@ -464,7 +464,7 @@ define <16 x i8> @test_v4f32_to_v16i8(<4 x float> %in) nounwind{
define <2 x double> @test_v2i64_to_v2f64(<2 x i64> %in) nounwind {
; CHECK: test_v2i64_to_v2f64:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <2 x i64> %in to <2 x double>
@@ -473,7 +473,7 @@ define <2 x double> @test_v2i64_to_v2f64(<2 x i64> %in) nounwind {
define <2 x i64> @test_v2i64_to_v2i64(<2 x i64> %in) nounwind {
; CHECK: test_v2i64_to_v2i64:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <2 x i64> %in to <2 x i64>
@@ -482,7 +482,7 @@ define <2 x i64> @test_v2i64_to_v2i64(<2 x i64> %in) nounwind {
define <4 x i32> @test_v2i64_to_v4i32(<2 x i64> %in) nounwind {
; CHECK: test_v2i64_to_v4i32:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <2 x i64> %in to <4 x i32>
@@ -491,7 +491,7 @@ define <4 x i32> @test_v2i64_to_v4i32(<2 x i64> %in) nounwind {
define <4 x float> @test_v2i64_to_v4f32(<2 x i64> %in) nounwind{
; CHECK: test_v2i64_to_v4f32:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <2 x i64> %in to <4 x float>
@@ -500,7 +500,7 @@ define <4 x float> @test_v2i64_to_v4f32(<2 x i64> %in) nounwind{
define <8 x i16> @test_v2i64_to_v8i16(<2 x i64> %in) nounwind{
; CHECK: test_v2i64_to_v8i16:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <2 x i64> %in to <8 x i16>
@@ -509,7 +509,7 @@ define <8 x i16> @test_v2i64_to_v8i16(<2 x i64> %in) nounwind{
define <16 x i8> @test_v2i64_to_v16i8(<2 x i64> %in) nounwind{
; CHECK: test_v2i64_to_v16i8:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <2 x i64> %in to <16 x i8>
@@ -520,7 +520,7 @@ define <16 x i8> @test_v2i64_to_v16i8(<2 x i64> %in) nounwind{
define <2 x double> @test_v2f64_to_v2f64(<2 x double> %in) nounwind {
; CHECK: test_v2f64_to_v2f64:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <2 x double> %in to <2 x double>
@@ -529,7 +529,7 @@ define <2 x double> @test_v2f64_to_v2f64(<2 x double> %in) nounwind {
define <2 x i64> @test_v2f64_to_v2i64(<2 x double> %in) nounwind {
; CHECK: test_v2f64_to_v2i64:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <2 x double> %in to <2 x i64>
@@ -538,7 +538,7 @@ define <2 x i64> @test_v2f64_to_v2i64(<2 x double> %in) nounwind {
define <4 x i32> @test_v2f64_to_v4i32(<2 x double> %in) nounwind {
; CHECK: test_v2f64_to_v4i32:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <2 x double> %in to <4 x i32>
@@ -547,7 +547,7 @@ define <4 x i32> @test_v2f64_to_v4i32(<2 x double> %in) nounwind {
define <4 x float> @test_v2f64_to_v4f32(<2 x double> %in) nounwind{
; CHECK: test_v2f64_to_v4f32:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <2 x double> %in to <4 x float>
@@ -556,7 +556,7 @@ define <4 x float> @test_v2f64_to_v4f32(<2 x double> %in) nounwind{
define <8 x i16> @test_v2f64_to_v8i16(<2 x double> %in) nounwind{
; CHECK: test_v2f64_to_v8i16:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <2 x double> %in to <8 x i16>
@@ -565,7 +565,7 @@ define <8 x i16> @test_v2f64_to_v8i16(<2 x double> %in) nounwind{
define <16 x i8> @test_v2f64_to_v16i8(<2 x double> %in) nounwind{
; CHECK: test_v2f64_to_v16i8:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ret
%val = bitcast <2 x double> %in to <16 x i8>
diff --git a/test/CodeGen/AArch64/nest-register.ll b/test/CodeGen/AArch64/nest-register.ll
index cc42913e10a..b8651714be3 100644
--- a/test/CodeGen/AArch64/nest-register.ll
+++ b/test/CodeGen/AArch64/nest-register.ll
@@ -5,7 +5,7 @@
define i8* @nest_receiver(i8* nest %arg) nounwind {
; CHECK-LABEL: nest_receiver:
-; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: mov x0, x18
; CHECK-NEXT: ret
diff --git a/test/CodeGen/AArch64/recp-fastmath.ll b/test/CodeGen/AArch64/recp-fastmath.ll
index 4776931cf06..9f00621eff6 100644
--- a/test/CodeGen/AArch64/recp-fastmath.ll
+++ b/test/CodeGen/AArch64/recp-fastmath.ll
@@ -5,7 +5,7 @@ define float @frecp0(float %x) #0 {
ret float %div
; CHECK-LABEL: frecp0:
-; CHECK-NEXT: BB#0
+; CHECK-NEXT: %bb.0
; CHECK-NEXT: fmov
; CHECK-NEXT: fdiv
}
@@ -15,7 +15,7 @@ define float @frecp1(float %x) #1 {
ret float %div
; CHECK-LABEL: frecp1:
-; CHECK-NEXT: BB#0
+; CHECK-NEXT: %bb.0
; CHECK-NEXT: frecpe [[R:s[0-7]]]
; CHECK-NEXT: frecps {{s[0-7](, s[0-7])?}}, [[R]]
; CHECK: frecps {{s[0-7]}}, {{s[0-7]}}, {{s[0-7]}}
@@ -27,7 +27,7 @@ define <2 x float> @f2recp0(<2 x float> %x) #0 {
ret <2 x float> %div
; CHECK-LABEL: f2recp0:
-; CHECK-NEXT: BB#0
+; CHECK-NEXT: %bb.0
; CHECK-NEXT: fmov
; CHECK-NEXT: fdiv
}
@@ -37,7 +37,7 @@ define <2 x float> @f2recp1(<2 x float> %x) #1 {
ret <2 x float> %div
; CHECK-LABEL: f2recp1:
-; CHECK-NEXT: BB#0
+; CHECK-NEXT: %bb.0
; CHECK-NEXT: frecpe [[R:v[0-7]\.2s]]
; CHECK-NEXT: frecps {{v[0-7]\.2s(, v[0-7].2s)?}}, [[R]]
; CHECK: frecps {{v[0-7]\.2s}}, {{v[0-7]\.2s}}, {{v[0-7]\.2s}}
@@ -49,7 +49,7 @@ define <4 x float> @f4recp0(<4 x float> %x) #0 {
ret <4 x float> %div
; CHECK-LABEL: f4recp0:
-; CHECK-NEXT: BB#0
+; CHECK-NEXT: %bb.0
; CHECK-NEXT: fmov
; CHECK-NEXT: fdiv
}
@@ -59,7 +59,7 @@ define <4 x float> @f4recp1(<4 x float> %x) #1 {
ret <4 x float> %div
; CHECK-LABEL: f4recp1:
-; CHECK-NEXT: BB#0
+; CHECK-NEXT: %bb.0
; CHECK-NEXT: frecpe [[R:v[0-7]\.4s]]
; CHECK-NEXT: frecps {{v[0-7]\.4s(, v[0-7].4s)?}}, [[R]]
; CHECK: frecps {{v[0-7]\.4s}}, {{v[0-7]\.4s}}, {{v[0-7]\.4s}}
@@ -71,7 +71,7 @@ define <8 x float> @f8recp0(<8 x float> %x) #0 {
ret <8 x float> %div
; CHECK-LABEL: f8recp0:
-; CHECK-NEXT: BB#0
+; CHECK-NEXT: %bb.0
; CHECK-NEXT: fmov
; CHECK-NEXT: fdiv
; CHECK-NEXT: fdiv
@@ -82,7 +82,7 @@ define <8 x float> @f8recp1(<8 x float> %x) #1 {
ret <8 x float> %div
; CHECK-LABEL: f8recp1:
-; CHECK-NEXT: BB#0
+; CHECK-NEXT: %bb.0
; CHECK-NEXT: frecpe [[R:v[0-7]\.4s]]
; CHECK: frecps {{v[0-7]\.4s(, v[0-7].4s)?}}, [[R]]
; CHECK: frecps {{v[0-7]\.4s(, v[0-7].4s)?}}, {{v[0-7]\.4s}}
@@ -96,7 +96,7 @@ define double @drecp0(double %x) #0 {
ret double %div
; CHECK-LABEL: drecp0:
-; CHECK-NEXT: BB#0
+; CHECK-NEXT: %bb.0
; CHECK-NEXT: fmov
; CHECK-NEXT: fdiv
}
@@ -106,7 +106,7 @@ define double @drecp1(double %x) #1 {
ret double %div
; CHECK-LABEL: drecp1:
-; CHECK-NEXT: BB#0
+; CHECK-NEXT: %bb.0
; CHECK-NEXT: frecpe [[R:d[0-7]]]
; CHECK-NEXT: frecps {{d[0-7](, d[0-7])?}}, [[R]]
; CHECK: frecps {{d[0-7]}}, {{d[0-7]}}, {{d[0-7]}}
@@ -119,7 +119,7 @@ define <2 x double> @d2recp0(<2 x double> %x) #0 {
ret <2 x double> %div
; CHECK-LABEL: d2recp0:
-; CHECK-NEXT: BB#0
+; CHECK-NEXT: %bb.0
; CHECK-NEXT: fmov
; CHECK-NEXT: fdiv
}
@@ -129,7 +129,7 @@ define <2 x double> @d2recp1(<2 x double> %x) #1 {
ret <2 x double> %div
; CHECK-LABEL: d2recp1:
-; CHECK-NEXT: BB#0
+; CHECK-NEXT: %bb.0
; CHECK-NEXT: frecpe [[R:v[0-7]\.2d]]
; CHECK-NEXT: frecps {{v[0-7]\.2d(, v[0-7].2d)?}}, [[R]]
; CHECK: frecps {{v[0-7]\.2d}}, {{v[0-7]\.2d}}, {{v[0-7]\.2d}}
@@ -142,7 +142,7 @@ define <4 x double> @d4recp0(<4 x double> %x) #0 {
ret <4 x double> %div
; CHECK-LABEL: d4recp0:
-; CHECK-NEXT: BB#0
+; CHECK-NEXT: %bb.0
; CHECK-NEXT: fmov
; CHECK-NEXT: fdiv
; CHECK-NEXT: fdiv
@@ -153,7 +153,7 @@ define <4 x double> @d4recp1(<4 x double> %x) #1 {
ret <4 x double> %div
; CHECK-LABEL: d4recp1:
-; CHECK-NEXT: BB#0
+; CHECK-NEXT: %bb.0
; CHECK-NEXT: frecpe [[R:v[0-7]\.2d]]
; CHECK: frecps {{v[0-7]\.2d(, v[0-7].2d)?}}, [[R]]
; CHECK: frecps {{v[0-7]\.2d}}, {{v[0-7]\.2d}}, {{v[0-7]\.2d}}
diff --git a/test/CodeGen/AArch64/selectcc-to-shiftand.ll b/test/CodeGen/AArch64/selectcc-to-shiftand.ll
index 0d89cdedfa8..99190633547 100644
--- a/test/CodeGen/AArch64/selectcc-to-shiftand.ll
+++ b/test/CodeGen/AArch64/selectcc-to-shiftand.ll
@@ -4,7 +4,7 @@
define i32 @neg_sel_constants(i32 %a) {
; CHECK-LABEL: neg_sel_constants:
-; CHECK: // BB#0:
+; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #5
; CHECK-NEXT: and w0, w8, w0, asr #31
; CHECK-NEXT: ret
@@ -18,7 +18,7 @@ define i32 @neg_sel_constants(i32 %a) {
define i32 @neg_sel_special_constant(i32 %a) {
; CHECK-LABEL: neg_sel_special_constant:
-; CHECK: // BB#0:
+; CHECK: // %bb.0:
; CHECK-NEXT: lsr w8, w0, #22
; CHECK-NEXT: and w0, w8, #0x200
; CHECK-NEXT: ret
@@ -32,7 +32,7 @@ define i32 @neg_sel_special_constant(i32 %a) {
define i32 @neg_sel_variable_and_zero(i32 %a, i32 %b) {
; CHECK-LABEL: neg_sel_variable_and_zero:
-; CHECK: // BB#0:
+; CHECK: // %bb.0:
; CHECK-NEXT: and w0, w1, w0, asr #31
; CHECK-NEXT: ret
;
@@ -45,7 +45,7 @@ define i32 @neg_sel_variable_and_zero(i32 %a, i32 %b) {
define i32 @not_pos_sel_same_variable(i32 %a) {
; CHECK-LABEL: not_pos_sel_same_variable:
-; CHECK: // BB#0:
+; CHECK: // %bb.0:
; CHECK-NEXT: and w0, w0, w0, asr #31
; CHECK-NEXT: ret
;
@@ -60,7 +60,7 @@ define i32 @not_pos_sel_same_variable(i32 %a) {
define i32 @pos_sel_constants(i32 %a) {
; CHECK-LABEL: pos_sel_constants:
-; CHECK: // BB#0:
+; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #5
; CHECK-NEXT: bic w0, w8, w0, asr #31
; CHECK-NEXT: ret
@@ -74,7 +74,7 @@ define i32 @pos_sel_constants(i32 %a) {
define i32 @pos_sel_special_constant(i32 %a) {
; CHECK-LABEL: pos_sel_special_constant:
-; CHECK: // BB#0:
+; CHECK: // %bb.0:
; CHECK-NEXT: orr w8, wzr, #0x200
; CHECK-NEXT: bic w0, w8, w0, lsr #22
; CHECK-NEXT: ret
@@ -88,7 +88,7 @@ define i32 @pos_sel_special_constant(i32 %a) {
define i32 @pos_sel_variable_and_zero(i32 %a, i32 %b) {
; CHECK-LABEL: pos_sel_variable_and_zero:
-; CHECK: // BB#0:
+; CHECK: // %bb.0:
; CHECK-NEXT: bic w0, w1, w0, asr #31
; CHECK-NEXT: ret
;
@@ -101,7 +101,7 @@ define i32 @pos_sel_variable_and_zero(i32 %a, i32 %b) {
define i32 @not_neg_sel_same_variable(i32 %a) {
; CHECK-LABEL: not_neg_sel_same_variable:
-; CHECK: // BB#0:
+; CHECK: // %bb.0:
; CHECK-NEXT: bic w0, w0, w0, asr #31
; CHECK-NEXT: ret
;
@@ -115,7 +115,7 @@ define i32 @not_neg_sel_same_variable(i32 %a) {
; ret = (x-y) > 0 ? x-y : 0
define i32 @PR31175(i32 %x, i32 %y) {
; CHECK-LABEL: PR31175:
-; CHECK: // BB#0:
+; CHECK: // %bb.0:
; CHECK-NEXT: sub w8, w0, w1
; CHECK-NEXT: bic w0, w8, w8, asr #31
; CHECK-NEXT: ret
diff --git a/test/CodeGen/AArch64/sibling-call.ll b/test/CodeGen/AArch64/sibling-call.ll
index 9a44b43d14e..be59f27fa85 100644
--- a/test/CodeGen/AArch64/sibling-call.ll
+++ b/test/CodeGen/AArch64/sibling-call.ll
@@ -6,7 +6,7 @@ declare void @callee_stack16([8 x i32], i64, i64)
define void @caller_to0_from0() nounwind {
; CHECK-LABEL: caller_to0_from0:
-; CHECK-NEXT: // BB
+; CHECK-NEXT: // %bb.
tail call void @callee_stack0()
ret void
; CHECK-NEXT: b callee_stack0
@@ -14,7 +14,7 @@ define void @caller_to0_from0() nounwind {
define void @caller_to0_from8([8 x i32], i64) nounwind{
; CHECK-LABEL: caller_to0_from8:
-; CHECK-NEXT: // BB
+; CHECK-NEXT: // %bb.
tail call void @callee_stack0()
ret void
diff --git a/test/CodeGen/AArch64/sqrt-fastmath.ll b/test/CodeGen/AArch64/sqrt-fastmath.ll
index 4dd0516faf0..ade9e3d8df3 100644
--- a/test/CodeGen/AArch64/sqrt-fastmath.ll
+++ b/test/CodeGen/AArch64/sqrt-fastmath.ll
@@ -14,11 +14,11 @@ define float @fsqrt(float %a) #0 {
ret float %1
; FAULT-LABEL: fsqrt:
-; FAULT-NEXT: BB#0
+; FAULT-NEXT: %bb.0
; FAULT-NEXT: fsqrt
; CHECK-LABEL: fsqrt:
-; CHECK-NEXT: BB#0
+; CHECK-NEXT: %bb.0
; CHECK-NEXT: frsqrte [[RA:s[0-7]]]
; CHECK-NEXT: fmul [[RB:s[0-7]]], [[RA]], [[RA]]
; CHECK-NEXT: frsqrts {{s[0-7](, s[0-7])?}}, [[RB]]
@@ -32,11 +32,11 @@ define <2 x float> @f2sqrt(<2 x float> %a) #0 {
ret <2 x float> %1
; FAULT-LABEL: f2sqrt:
-; FAULT-NEXT: BB#0
+; FAULT-NEXT: %bb.0
; FAULT-NEXT: fsqrt
; CHECK-LABEL: f2sqrt:
-; CHECK-NEXT: BB#0
+; CHECK-NEXT: %bb.0
; CHECK-NEXT: frsqrte [[RA:v[0-7]\.2s]]
; CHECK-NEXT: fmul [[RB:v[0-7]\.2s]], [[RA]], [[RA]]
; CHECK-NEXT: frsqrts {{v[0-7]\.2s(, v[0-7]\.2s)?}}, [[RB]]
@@ -50,11 +50,11 @@ define <4 x float> @f4sqrt(<4 x float> %a) #0 {
ret <4 x float> %1
; FAULT-LABEL: f4sqrt:
-; FAULT-NEXT: BB#0
+; FAULT-NEXT: %bb.0
; FAULT-NEXT: fsqrt
; CHECK-LABEL: f4sqrt:
-; CHECK-NEXT: BB#0
+; CHECK-NEXT: %bb.0
; CHECK-NEXT: frsqrte [[RA:v[0-7]\.4s]]
; CHECK-NEXT: fmul [[RB:v[0-7]\.4s]], [[RA]], [[RA]]
; CHECK-NEXT: frsqrts {{v[0-7]\.4s(, v[0-7]\.4s)?}}, [[RB]]
@@ -68,12 +68,12 @@ define <8 x float> @f8sqrt(<8 x float> %a) #0 {
ret <8 x float> %1
; FAULT-LABEL: f8sqrt:
-; FAULT-NEXT: BB#0
+; FAULT-NEXT: %bb.0
; FAULT-NEXT: fsqrt
; FAULT-NEXT: fsqrt
; CHECK-LABEL: f8sqrt:
-; CHECK-NEXT: BB#0
+; CHECK-NEXT: %bb.0
; CHECK-NEXT: frsqrte [[RA:v[0-7]\.4s]]
; CHECK-NEXT: fmul [[RB:v[0-7]\.4s]], [[RA]], [[RA]]
; CHECK-NEXT: frsqrts {{v[0-7]\.4s(, v[0-7]\.4s)?}}, [[RB]]
@@ -92,11 +92,11 @@ define double @dsqrt(double %a) #0 {
ret double %1
; FAULT-LABEL: dsqrt:
-; FAULT-NEXT: BB#0
+; FAULT-NEXT: %bb.0
; FAULT-NEXT: fsqrt
; CHECK-LABEL: dsqrt:
-; CHECK-NEXT: BB#0
+; CHECK-NEXT: %bb.0
; CHECK-NEXT: frsqrte [[RA:d[0-7]]]
; CHECK-NEXT: fmul [[RB:d[0-7]]], [[RA]], [[RA]]
; CHECK-NEXT: frsqrts {{d[0-7](, d[0-7])?}}, [[RB]]
@@ -111,11 +111,11 @@ define <2 x double> @d2sqrt(<2 x double> %a) #0 {
ret <2 x double> %1
; FAULT-LABEL: d2sqrt:
-; FAULT-NEXT: BB#0
+; FAULT-NEXT: %bb.0
; FAULT-NEXT: fsqrt
; CHECK-LABEL: d2sqrt:
-; CHECK-NEXT: BB#0
+; CHECK-NEXT: %bb.0
; CHECK-NEXT: frsqrte [[RA:v[0-7]\.2d]]
; CHECK-NEXT: fmul [[RB:v[0-7]\.2d]], [[RA]], [[RA]]
; CHECK-NEXT: frsqrts {{v[0-7]\.2d(, v[0-7]\.2d)?}}, [[RB]]
@@ -130,12 +130,12 @@ define <4 x double> @d4sqrt(<4 x double> %a) #0 {
ret <4 x double> %1
; FAULT-LABEL: d4sqrt:
-; FAULT-NEXT: BB#0
+; FAULT-NEXT: %bb.0
; FAULT-NEXT: fsqrt
; FAULT-NEXT: fsqrt
; CHECK-LABEL: d4sqrt:
-; CHECK-NEXT: BB#0
+; CHECK-NEXT: %bb.0
; CHECK-NEXT: frsqrte [[RA:v[0-7]\.2d]]
; CHECK-NEXT: fmul [[RB:v[0-7]\.2d]], [[RA]], [[RA]]
; CHECK-NEXT: frsqrts {{v[0-7]\.2d(, v[0-7]\.2d)?}}, [[RB]]
@@ -158,11 +158,11 @@ define float @frsqrt(float %a) #0 {
ret float %2
; FAULT-LABEL: frsqrt:
-; FAULT-NEXT: BB#0
+; FAULT-NEXT: %bb.0
; FAULT-NEXT: fsqrt
; CHECK-LABEL: frsqrt:
-; CHECK-NEXT: BB#0
+; CHECK-NEXT: %bb.0
; CHECK-NEXT: frsqrte [[RA:s[0-7]]]
; CHECK-NEXT: fmul [[RB:s[0-7]]], [[RA]], [[RA]]
; CHECK-NEXT: frsqrts {{s[0-7](, s[0-7])?}}, [[RB]]
@@ -177,11 +177,11 @@ define <2 x float> @f2rsqrt(<2 x float> %a) #0 {
ret <2 x float> %2
; FAULT-LABEL: f2rsqrt:
-; FAULT-NEXT: BB#0
+; FAULT-NEXT: %bb.0
; FAULT-NEXT: fsqrt
; CHECK-LABEL: f2rsqrt:
-; CHECK-NEXT: BB#0
+; CHECK-NEXT: %bb.0
; CHECK-NEXT: frsqrte [[RA:v[0-7]\.2s]]
; CHECK-NEXT: fmul [[RB:v[0-7]\.2s]], [[RA]], [[RA]]
; CHECK-NEXT: frsqrts {{v[0-7]\.2s(, v[0-7]\.2s)?}}, [[RB]]
@@ -196,11 +196,11 @@ define <4 x float> @f4rsqrt(<4 x float> %a) #0 {
ret <4 x float> %2
; FAULT-LABEL: f4rsqrt:
-; FAULT-NEXT: BB#0
+; FAULT-NEXT: %bb.0
; FAULT-NEXT: fsqrt
; CHECK-LABEL: f4rsqrt:
-; CHECK-NEXT: BB#0
+; CHECK-NEXT: %bb.0
; CHECK-NEXT: frsqrte [[RA:v[0-7]\.4s]]
; CHECK-NEXT: fmul [[RB:v[0-7]\.4s]], [[RA]], [[RA]]
; CHECK-NEXT: frsqrts {{v[0-7]\.4s(, v[0-7]\.4s)?}}, [[RB]]
@@ -215,12 +215,12 @@ define <8 x float> @f8rsqrt(<8 x float> %a) #0 {
ret <8 x float> %2
; FAULT-LABEL: f8rsqrt:
-; FAULT-NEXT: BB#0
+; FAULT-NEXT: %bb.0
; FAULT-NEXT: fsqrt
; FAULT-NEXT: fsqrt
; CHECK-LABEL: f8rsqrt:
-; CHECK-NEXT: BB#0
+; CHECK-NEXT: %bb.0
; CHECK-NEXT: frsqrte [[RA:v[0-7]\.4s]]
; CHECK: fmul [[RB:v[0-7]\.4s]], [[RA]], [[RA]]
; CHECK: frsqrts {{v[0-7]\.4s(, v[0-7]\.4s)?}}, [[RB]]
@@ -237,11 +237,11 @@ define double @drsqrt(double %a) #0 {
ret double %2
; FAULT-LABEL: drsqrt:
-; FAULT-NEXT: BB#0
+; FAULT-NEXT: %bb.0
; FAULT-NEXT: fsqrt
; CHECK-LABEL: drsqrt:
-; CHECK-NEXT: BB#0
+; CHECK-NEXT: %bb.0
; CHECK-NEXT: frsqrte [[RA:d[0-7]]]
; CHECK-NEXT: fmul [[RB:d[0-7]]], [[RA]], [[RA]]
; CHECK-NEXT: frsqrts {{d[0-7](, d[0-7])?}}, [[RB]]
@@ -257,11 +257,11 @@ define <2 x double> @d2rsqrt(<2 x double> %a) #0 {
ret <2 x double> %2
; FAULT-LABEL: d2rsqrt:
-; FAULT-NEXT: BB#0
+; FAULT-NEXT: %bb.0
; FAULT-NEXT: fsqrt
; CHECK-LABEL: d2rsqrt:
-; CHECK-NEXT: BB#0
+; CHECK-NEXT: %bb.0
; CHECK-NEXT: frsqrte [[RA:v[0-7]\.2d]]
; CHECK-NEXT: fmul [[RB:v[0-7]\.2d]], [[RA]], [[RA]]
; CHECK-NEXT: frsqrts {{v[0-7]\.2d(, v[0-7]\.2d)?}}, [[RB]]
@@ -277,12 +277,12 @@ define <4 x double> @d4rsqrt(<4 x double> %a) #0 {
ret <4 x double> %2
; FAULT-LABEL: d4rsqrt:
-; FAULT-NEXT: BB#0
+; FAULT-NEXT: %bb.0
; FAULT-NEXT: fsqrt
; FAULT-NEXT: fsqrt
; CHECK-LABEL: d4rsqrt:
-; CHECK-NEXT: BB#0
+; CHECK-NEXT: %bb.0
; CHECK-NEXT: frsqrte [[RA:v[0-7]\.2d]]
; CHECK: fmul [[RB:v[0-7]\.2d]], [[RA]], [[RA]]
; CHECK: frsqrts {{v[0-7]\.2d(, v[0-7]\.2d)?}}, [[RB]]
diff --git a/test/CodeGen/AArch64/tail-call.ll b/test/CodeGen/AArch64/tail-call.ll
index fa5d8b943b6..ab63413bd3f 100644
--- a/test/CodeGen/AArch64/tail-call.ll
+++ b/test/CodeGen/AArch64/tail-call.ll
@@ -7,7 +7,7 @@ declare extern_weak fastcc void @callee_weak()
define fastcc void @caller_to0_from0() nounwind {
; CHECK-LABEL: caller_to0_from0:
-; CHECK-NEXT: // BB
+; CHECK-NEXT: // %bb.
tail call fastcc void @callee_stack0()
ret void
diff --git a/test/CodeGen/AMDGPU/branch-relaxation.ll b/test/CodeGen/AMDGPU/branch-relaxation.ll
index 9edf439b586..023baf1407e 100644
--- a/test/CodeGen/AMDGPU/branch-relaxation.ll
+++ b/test/CodeGen/AMDGPU/branch-relaxation.ll
@@ -24,7 +24,7 @@ declare i32 @llvm.amdgcn.workitem.id.x() #1
; GCN-NEXT: s_cbranch_scc1 [[BB3:BB[0-9]+_[0-9]+]]
-; GCN-NEXT: ; BB#1: ; %bb2
+; GCN-NEXT: ; %bb.1: ; %bb2
; GCN-NEXT: ;;#ASMSTART
; GCN-NEXT: v_nop_e64
; GCN-NEXT: v_nop_e64
@@ -275,7 +275,7 @@ bb4:
}
; GCN-LABEL: {{^}}uniform_unconditional_min_long_backward_branch:
-; GCN-NEXT: ; BB#0: ; %entry
+; GCN-NEXT: ; %bb.0: ; %entry
; GCN-NEXT: [[LOOP:BB[0-9]_[0-9]+]]: ; %loop
; GCN-NEXT: ; =>This Inner Loop Header: Depth=1
@@ -311,7 +311,7 @@ loop:
; branch from %bb0 to %bb2
; GCN-LABEL: {{^}}expand_requires_expand:
-; GCN-NEXT: ; BB#0: ; %bb0
+; GCN-NEXT: ; %bb.0: ; %bb0
; GCN: s_load_dword
; GCN: s_cmp_lt_i32 s{{[0-9]+}}, 0{{$}}
; GCN-NEXT: s_cbranch_scc0 [[BB1:BB[0-9]+_[0-9]+]]
@@ -398,7 +398,7 @@ bb3:
; GCN: s_cmp_lg_u32
; GCN: s_cbranch_scc1 [[ENDIF]]
-; GCN-NEXT: ; BB#2: ; %if_uniform
+; GCN-NEXT: ; %bb.2: ; %if_uniform
; GCN: buffer_store_dword
; GCN-NEXT: [[ENDIF]]: ; %endif
diff --git a/test/CodeGen/AMDGPU/callee-frame-setup.ll b/test/CodeGen/AMDGPU/callee-frame-setup.ll
index 9e01267150e..88d165144f9 100644
--- a/test/CodeGen/AMDGPU/callee-frame-setup.ll
+++ b/test/CodeGen/AMDGPU/callee-frame-setup.ll
@@ -2,7 +2,7 @@
; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=GFX9 %s
; GCN-LABEL: {{^}}callee_no_stack:
-; GCN: ; BB#0:
+; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt
; GCN-NEXT: s_setpc_b64
define void @callee_no_stack() #0 {
@@ -10,7 +10,7 @@ define void @callee_no_stack() #0 {
}
; GCN-LABEL: {{^}}callee_no_stack_no_fp_elim:
-; GCN: ; BB#0:
+; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt
; GCN-NEXT: s_setpc_b64
define void @callee_no_stack_no_fp_elim() #1 {
@@ -20,7 +20,7 @@ define void @callee_no_stack_no_fp_elim() #1 {
; Requires frame pointer for access to local regular object.
; GCN-LABEL: {{^}}callee_with_stack:
-; GCN: ; BB#0:
+; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt
; GCN-NEXT: s_mov_b32 s5, s32
; GCN-NEXT: v_mov_b32_e32 v0, 0{{$}}
@@ -34,7 +34,7 @@ define void @callee_with_stack() #0 {
}
; GCN-LABEL: {{^}}callee_with_stack_and_call:
-; GCN: ; BB#0:
+; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt
; GCN: s_mov_b32 s5, s32
; GCN: buffer_store_dword v32, off, s[0:3], s5 offset:8
diff --git a/test/CodeGen/AMDGPU/cf-loop-on-constant.ll b/test/CodeGen/AMDGPU/cf-loop-on-constant.ll
index 697f26b83a4..1e0af2611b0 100644
--- a/test/CodeGen/AMDGPU/cf-loop-on-constant.ll
+++ b/test/CodeGen/AMDGPU/cf-loop-on-constant.ll
@@ -102,7 +102,7 @@ for.body:
; GCN: s_add_i32 s{{[0-9]+}}, s{{[0-9]+}}, 4
; GCN: s_cbranch_vccnz [[LOOPBB]]
-; GCN-NEXT: ; BB#2
+; GCN-NEXT: ; %bb.2
; GCN-NEXT: s_endpgm
define amdgpu_kernel void @loop_arg_0(float addrspace(3)* %ptr, i32 %n, i1 %cond) nounwind {
entry:
diff --git a/test/CodeGen/AMDGPU/control-flow-fastregalloc.ll b/test/CodeGen/AMDGPU/control-flow-fastregalloc.ll
index 6f9c043f914..071bcbcf81b 100644
--- a/test/CodeGen/AMDGPU/control-flow-fastregalloc.ll
+++ b/test/CodeGen/AMDGPU/control-flow-fastregalloc.ll
@@ -13,7 +13,7 @@
; VGPR: workitem_private_segment_byte_size = 12{{$}}
-; GCN: {{^}}; BB#0:
+; GCN: {{^}}; %bb.0:
; GCN: s_mov_b32 m0, -1
; GCN: ds_read_b32 [[LOAD0:v[0-9]+]]
@@ -91,7 +91,7 @@ endif:
; GCN-LABEL: {{^}}divergent_loop:
; VGPR: workitem_private_segment_byte_size = 12{{$}}
-; GCN: {{^}}; BB#0:
+; GCN: {{^}}; %bb.0:
; GCN: s_mov_b32 m0, -1
; GCN: ds_read_b32 [[LOAD0:v[0-9]+]]
@@ -167,7 +167,7 @@ end:
}
; GCN-LABEL: {{^}}divergent_if_else_endif:
-; GCN: {{^}}; BB#0:
+; GCN: {{^}}; %bb.0:
; GCN: s_mov_b32 m0, -1
; GCN: ds_read_b32 [[LOAD0:v[0-9]+]]
diff --git a/test/CodeGen/AMDGPU/convergent-inlineasm.ll b/test/CodeGen/AMDGPU/convergent-inlineasm.ll
index 0074a41e44c..80907bf1c1b 100644
--- a/test/CodeGen/AMDGPU/convergent-inlineasm.ll
+++ b/test/CodeGen/AMDGPU/convergent-inlineasm.ll
@@ -2,7 +2,7 @@
declare i32 @llvm.amdgcn.workitem.id.x() #0
; GCN-LABEL: {{^}}convergent_inlineasm:
-; GCN: BB#0:
+; GCN: %bb.0:
; GCN: v_cmp_ne_u32_e64
; GCN: ; mask branch
; GCN: BB{{[0-9]+_[0-9]+}}:
diff --git a/test/CodeGen/AMDGPU/early-if-convert.ll b/test/CodeGen/AMDGPU/early-if-convert.ll
index 792f0b1eaef..d129ca5c140 100644
--- a/test/CodeGen/AMDGPU/early-if-convert.ll
+++ b/test/CodeGen/AMDGPU/early-if-convert.ll
@@ -382,7 +382,7 @@ done:
}
; GCN-LABEL: {{^}}ifcvt_undef_scc:
-; GCN: {{^}}; BB#0:
+; GCN: {{^}}; %bb.0:
; GCN-NEXT: s_load_dwordx2
; GCN-NEXT: s_cselect_b32 s{{[0-9]+}}, 1, 0
define amdgpu_kernel void @ifcvt_undef_scc(i32 %cond, i32 addrspace(1)* %out) {
diff --git a/test/CodeGen/AMDGPU/else.ll b/test/CodeGen/AMDGPU/else.ll
index 22338e4f50e..c73ea936e8b 100644
--- a/test/CodeGen/AMDGPU/else.ll
+++ b/test/CodeGen/AMDGPU/else.ll
@@ -25,7 +25,7 @@ end:
}
; CHECK-LABEL: {{^}}else_execfix_leave_wqm:
-; CHECK: ; BB#0:
+; CHECK: ; %bb.0:
; CHECK-NEXT: s_mov_b64 [[INIT_EXEC:s\[[0-9]+:[0-9]+\]]], exec
; CHECK: ; %Flow
; CHECK-NEXT: s_or_saveexec_b64 [[DST:s\[[0-9]+:[0-9]+\]]],
diff --git a/test/CodeGen/AMDGPU/fence-amdgiz.ll b/test/CodeGen/AMDGPU/fence-amdgiz.ll
index 3055f325f3f..0dd2a9241b2 100644
--- a/test/CodeGen/AMDGPU/fence-amdgiz.ll
+++ b/test/CodeGen/AMDGPU/fence-amdgiz.ll
@@ -3,7 +3,7 @@
target datalayout = "e-p:64:64-p1:64:64-p2:64:64-p3:32:32-p4:32:32-p5:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-A5"
; CHECK-LABEL: atomic_fence
-; CHECK: BB#0:
+; CHECK: %bb.0:
; CHECK-NOT: ATOMIC_FENCE
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: buffer_wbinvl1_vol
diff --git a/test/CodeGen/AMDGPU/i1-copy-implicit-def.ll b/test/CodeGen/AMDGPU/i1-copy-implicit-def.ll
index f6bf0b09486..37d05c7ac41 100644
--- a/test/CodeGen/AMDGPU/i1-copy-implicit-def.ll
+++ b/test/CodeGen/AMDGPU/i1-copy-implicit-def.ll
@@ -3,7 +3,7 @@
; SILowerI1Copies was not handling IMPLICIT_DEF
; SI-LABEL: {{^}}br_implicit_def:
-; SI: BB#0:
+; SI: %bb.0:
; SI-NEXT: s_cbranch_scc1
define amdgpu_kernel void @br_implicit_def(i32 addrspace(1)* %out, i32 %arg) #0 {
bb:
diff --git a/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir b/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir
index 67642282f75..61aa39fcc25 100644
--- a/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir
+++ b/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir
@@ -26,7 +26,7 @@
...
---
# CHECK-LABEL: name: invert_br_undef_vcc
-# CHECK: S_CBRANCH_VCCZ %bb.1.else, implicit undef %vcc
+# CHECK: S_CBRANCH_VCCZ %bb.1, implicit undef %vcc
name: invert_br_undef_vcc
alignment: 0
@@ -58,7 +58,7 @@ body: |
%sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed %sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
%sgpr7 = S_MOV_B32 61440
%sgpr6 = S_MOV_B32 -1
- S_CBRANCH_VCCNZ %bb.2.if, implicit undef %vcc
+ S_CBRANCH_VCCNZ %bb.2, implicit undef %vcc
bb.1.else:
liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003
@@ -66,7 +66,7 @@ body: |
%vgpr0 = V_MOV_B32_e32 100, implicit %exec
BUFFER_STORE_DWORD_OFFSET killed %vgpr0, killed %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `i32 addrspace(1)* undef`)
%vgpr0 = V_MOV_B32_e32 1, implicit %exec
- S_BRANCH %bb.3.done
+ S_BRANCH %bb.3
bb.2.if:
liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.load.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.load.ll
index 4f8c6191224..49ca7d40572 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.load.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.load.ll
@@ -127,7 +127,7 @@ entry:
}
;CHECK-LABEL: {{^}}buffer_load_x1_offen_merged:
-;CHECK-NEXT: BB#
+;CHECK-NEXT: %bb.
;CHECK-NEXT: buffer_load_dwordx4 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:4
;CHECK-NEXT: buffer_load_dwordx2 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:28
;CHECK: s_waitcnt
@@ -151,7 +151,7 @@ main_body:
}
;CHECK-LABEL: {{^}}buffer_load_x1_offen_merged_glc_slc:
-;CHECK-NEXT: BB#
+;CHECK-NEXT: %bb.
;CHECK-NEXT: buffer_load_dwordx2 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:4{{$}}
;CHECK-NEXT: buffer_load_dwordx2 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:12 glc{{$}}
;CHECK-NEXT: buffer_load_dwordx2 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:28 glc slc{{$}}
@@ -176,7 +176,7 @@ main_body:
}
;CHECK-LABEL: {{^}}buffer_load_x2_offen_merged:
-;CHECK-NEXT: BB#
+;CHECK-NEXT: %bb.
;CHECK-NEXT: buffer_load_dwordx4 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:4
;CHECK: s_waitcnt
define amdgpu_ps void @buffer_load_x2_offen_merged(<4 x i32> inreg %rsrc, i32 %a) {
@@ -194,7 +194,7 @@ main_body:
}
;CHECK-LABEL: {{^}}buffer_load_x1_offset_merged:
-;CHECK-NEXT: BB#
+;CHECK-NEXT: %bb.
;CHECK-NEXT: buffer_load_dwordx4 v[{{[0-9]}}:{{[0-9]}}], off, s[0:3], 0 offset:4
;CHECK-NEXT: buffer_load_dwordx2 v[{{[0-9]}}:{{[0-9]}}], off, s[0:3], 0 offset:28
;CHECK: s_waitcnt
@@ -212,7 +212,7 @@ main_body:
}
;CHECK-LABEL: {{^}}buffer_load_x2_offset_merged:
-;CHECK-NEXT: BB#
+;CHECK-NEXT: %bb.
;CHECK-NEXT: buffer_load_dwordx4 v[{{[0-9]}}:{{[0-9]}}], off, s[0:3], 0 offset:4
;CHECK: s_waitcnt
define amdgpu_ps void @buffer_load_x2_offset_merged(<4 x i32> inreg %rsrc) {
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.ll
index 10bea8ea63b..69de9555035 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.ll
@@ -4,7 +4,7 @@
declare void @llvm.amdgcn.buffer.wbinvl1() #0
; GCN-LABEL: {{^}}test_buffer_wbinvl1:
-; GCN-NEXT: ; BB#0:
+; GCN-NEXT: ; %bb.0:
; SI-NEXT: buffer_wbinvl1 ; encoding: [0x00,0x00,0xc4,0xe1,0x00,0x00,0x00,0x00]
; VI-NEXT: buffer_wbinvl1 ; encoding: [0x00,0x00,0xf8,0xe0,0x00,0x00,0x00,0x00]
; GCN-NEXT: s_endpgm
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.sc.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.sc.ll
index fe60d16d90f..d1c8f37b3d8 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.sc.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.sc.ll
@@ -3,7 +3,7 @@
declare void @llvm.amdgcn.buffer.wbinvl1.sc() #0
; SI-LABEL: {{^}}test_buffer_wbinvl1_sc:
-; SI-NEXT: ; BB#0:
+; SI-NEXT: ; %bb.0:
; SI-NEXT: buffer_wbinvl1_sc ; encoding: [0x00,0x00,0xc0,0xe1,0x00,0x00,0x00,0x00]
; SI-NEXT: s_endpgm
define amdgpu_kernel void @test_buffer_wbinvl1_sc() #0 {
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.vol.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.vol.ll
index 061c1469ed4..4dc938c9b0a 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.vol.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.vol.ll
@@ -4,7 +4,7 @@
declare void @llvm.amdgcn.buffer.wbinvl1.vol() #0
; GCN-LABEL: {{^}}test_buffer_wbinvl1_vol:
-; GCN-NEXT: ; BB#0:
+; GCN-NEXT: ; %bb.0:
; CI-NEXT: buffer_wbinvl1_vol ; encoding: [0x00,0x00,0xc0,0xe1,0x00,0x00,0x00,0x00]
; VI-NEXT: buffer_wbinvl1_vol ; encoding: [0x00,0x00,0xfc,0xe0,0x00,0x00,0x00,0x00]
; GCN: s_endpgm
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.pkrtz.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.pkrtz.ll
index 7b1cfa18721..16d0c237007 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.pkrtz.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.pkrtz.ll
@@ -25,7 +25,7 @@ define amdgpu_kernel void @s_cvt_pkrtz_samereg_v2f16_f32(<2 x half> addrspace(1)
; FIXME: Folds to 0 on gfx9
; GCN-LABEL: {{^}}s_cvt_pkrtz_undef_undef:
-; GCN-NEXT: ; BB#0
+; GCN-NEXT: ; %bb.0
; SI-NEXT: s_endpgm
; VI-NEXT: s_endpgm
; GFX9: v_mov_b32_e32 v{{[0-9]+}}, 0{{$}}
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll
index a1ecb7f750c..d6b0628956a 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll
@@ -31,8 +31,8 @@ define amdgpu_ps void @vcc_implicit_def(float %arg13, float %arg14) {
}
; SI-LABEL: {{^}}true:
-; SI-NEXT: BB#
-; SI-NEXT: BB#
+; SI-NEXT: %bb.
+; SI-NEXT: %bb.
; SI-NEXT: s_endpgm
define amdgpu_gs void @true() {
call void @llvm.amdgcn.kill(i1 true)
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.ll
index 224b2ed72e3..b7fb96a2d1a 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.ll
@@ -5,7 +5,7 @@ declare void @llvm.amdgcn.s.dcache.inv() #0
declare void @llvm.amdgcn.s.waitcnt(i32) #0
; GCN-LABEL: {{^}}test_s_dcache_inv:
-; GCN-NEXT: ; BB#0:
+; GCN-NEXT: ; %bb.0:
; SI-NEXT: s_dcache_inv ; encoding: [0x00,0x00,0xc0,0xc7]
; VI-NEXT: s_dcache_inv ; encoding: [0x00,0x00,0x80,0xc0,0x00,0x00,0x00,0x00]
; GCN-NEXT: s_endpgm
@@ -15,7 +15,7 @@ define amdgpu_kernel void @test_s_dcache_inv() #0 {
}
; GCN-LABEL: {{^}}test_s_dcache_inv_insert_wait:
-; GCN-NEXT: ; BB#0:
+; GCN-NEXT: ; %bb.0:
; GCN: s_dcache_inv
; GCN: s_waitcnt lgkmcnt(0) ; encoding
define amdgpu_kernel void @test_s_dcache_inv_insert_wait() #0 {
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.vol.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.vol.ll
index f96d5db5794..e8a363adde7 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.vol.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.vol.ll
@@ -5,7 +5,7 @@ declare void @llvm.amdgcn.s.dcache.inv.vol() #0
declare void @llvm.amdgcn.s.waitcnt(i32) #0
; GCN-LABEL: {{^}}test_s_dcache_inv_vol:
-; GCN-NEXT: ; BB#0:
+; GCN-NEXT: ; %bb.0:
; CI-NEXT: s_dcache_inv_vol ; encoding: [0x00,0x00,0x40,0xc7]
; VI-NEXT: s_dcache_inv_vol ; encoding: [0x00,0x00,0x88,0xc0,0x00,0x00,0x00,0x00]
; GCN-NEXT: s_endpgm
@@ -15,7 +15,7 @@ define amdgpu_kernel void @test_s_dcache_inv_vol() #0 {
}
; GCN-LABEL: {{^}}test_s_dcache_inv_vol_insert_wait:
-; GCN-NEXT: ; BB#0:
+; GCN-NEXT: ; %bb.0:
; GCN-NEXT: s_dcache_inv_vol
; GCN: s_waitcnt lgkmcnt(0) ; encoding
define amdgpu_kernel void @test_s_dcache_inv_vol_insert_wait() #0 {
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.ll
index 99b65135043..254a0fae3c3 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.ll
@@ -4,7 +4,7 @@ declare void @llvm.amdgcn.s.dcache.wb() #0
declare void @llvm.amdgcn.s.waitcnt(i32) #0
; VI-LABEL: {{^}}test_s_dcache_wb:
-; VI-NEXT: ; BB#0:
+; VI-NEXT: ; %bb.0:
; VI-NEXT: s_dcache_wb ; encoding: [0x00,0x00,0x84,0xc0,0x00,0x00,0x00,0x00]
; VI-NEXT: s_endpgm
define amdgpu_kernel void @test_s_dcache_wb() #0 {
@@ -13,7 +13,7 @@ define amdgpu_kernel void @test_s_dcache_wb() #0 {
}
; VI-LABEL: {{^}}test_s_dcache_wb_insert_wait:
-; VI-NEXT: ; BB#0:
+; VI-NEXT: ; %bb.0:
; VI-NEXT: s_dcache_wb
; VI: s_waitcnt lgkmcnt(0) ; encoding
define amdgpu_kernel void @test_s_dcache_wb_insert_wait() #0 {
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.vol.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.vol.ll
index 844fcecdb48..929cd1c5f0b 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.vol.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.vol.ll
@@ -4,7 +4,7 @@ declare void @llvm.amdgcn.s.dcache.wb.vol() #0
declare void @llvm.amdgcn.s.waitcnt(i32) #0
; VI-LABEL: {{^}}test_s_dcache_wb_vol:
-; VI-NEXT: ; BB#0:
+; VI-NEXT: ; %bb.0:
; VI-NEXT: s_dcache_wb_vol ; encoding: [0x00,0x00,0x8c,0xc0,0x00,0x00,0x00,0x00]
; VI-NEXT: s_endpgm
define amdgpu_kernel void @test_s_dcache_wb_vol() #0 {
@@ -13,7 +13,7 @@ define amdgpu_kernel void @test_s_dcache_wb_vol() #0 {
}
; VI-LABEL: {{^}}test_s_dcache_wb_vol_insert_wait:
-; VI-NEXT: ; BB#0:
+; VI-NEXT: ; %bb.0:
; VI-NEXT: s_dcache_wb_vol
; VI: s_waitcnt lgkmcnt(0) ; encoding
define amdgpu_kernel void @test_s_dcache_wb_vol_insert_wait() #0 {
diff --git a/test/CodeGen/AMDGPU/loop_break.ll b/test/CodeGen/AMDGPU/loop_break.ll
index 4acd1b24795..b2641cd4d2e 100644
--- a/test/CodeGen/AMDGPU/loop_break.ll
+++ b/test/CodeGen/AMDGPU/loop_break.ll
@@ -31,7 +31,7 @@
; GCN: s_and_b64 vcc, exec, vcc
; GCN-NEXT: s_cbranch_vccnz [[FLOW:BB[0-9]+_[0-9]+]]
-; GCN: ; BB#2: ; %bb4
+; GCN: ; %bb.2: ; %bb4
; GCN: buffer_load_dword
; GCN: v_cmp_ge_i32_e32 vcc,
; GCN: s_or_b64 [[MASK]], vcc, [[INITMASK]]
@@ -41,7 +41,7 @@
; GCN: s_andn2_b64 exec, exec, [[MASK]]
; GCN-NEXT: s_cbranch_execnz [[LOOP_ENTRY]]
-; GCN: ; BB#4: ; %bb9
+; GCN: ; %bb.4: ; %bb9
; GCN-NEXT: s_endpgm
define amdgpu_kernel void @break_loop(i32 %arg) #0 {
bb:
diff --git a/test/CodeGen/AMDGPU/memory-legalizer-atomic-fence.ll b/test/CodeGen/AMDGPU/memory-legalizer-atomic-fence.ll
index a563cfd0283..c8e920a1854 100644
--- a/test/CodeGen/AMDGPU/memory-legalizer-atomic-fence.ll
+++ b/test/CodeGen/AMDGPU/memory-legalizer-atomic-fence.ll
@@ -3,7 +3,7 @@
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx803 -verify-machineinstrs < %s | FileCheck -check-prefix=FUNC -check-prefix=GCN -check-prefix=GFX8 %s
; FUNC-LABEL: {{^}}system_acquire
-; GCN: BB#0
+; GCN: %bb.0
; GCN-NOT: ATOMIC_FENCE
; GFX6: s_waitcnt vmcnt(0){{$}}
; GFX6-NEXT: buffer_wbinvl1{{$}}
@@ -17,7 +17,7 @@ entry:
}
; FUNC-LABEL: {{^}}system_release
-; GCN: BB#0
+; GCN: %bb.0
; GCN-NOT: ATOMIC_FENCE
; GCN: s_waitcnt vmcnt(0){{$}}
; GCN: s_endpgm
@@ -28,7 +28,7 @@ entry:
}
; FUNC-LABEL: {{^}}system_acq_rel
-; GCN: BB#0
+; GCN: %bb.0
; GCN-NOT: ATOMIC_FENCE
; GCN: s_waitcnt vmcnt(0){{$}}
; GFX6: buffer_wbinvl1{{$}}
@@ -41,7 +41,7 @@ entry:
}
; FUNC-LABEL: {{^}}system_seq_cst
-; GCN: BB#0
+; GCN: %bb.0
; GCN-NOT: ATOMIC_FENCE
; GCN: s_waitcnt vmcnt(0){{$}}
; GFX6: buffer_wbinvl1{{$}}
@@ -54,7 +54,7 @@ entry:
}
; FUNC-LABEL: {{^}}singlethread_acquire
-; GCN: BB#0
+; GCN: %bb.0
; GCN-NOT: ATOMIC_FENCE
; GCN: s_endpgm
define amdgpu_kernel void @singlethread_acquire() {
@@ -64,7 +64,7 @@ entry:
}
; FUNC-LABEL: {{^}}singlethread_release
-; GCN: BB#0
+; GCN: %bb.0
; GCN-NOT: ATOMIC_FENCE
; GCN: s_endpgm
define amdgpu_kernel void @singlethread_release() {
@@ -74,7 +74,7 @@ entry:
}
; FUNC-LABEL: {{^}}singlethread_acq_rel
-; GCN: BB#0
+; GCN: %bb.0
; GCN-NOT: ATOMIC_FENCE
; GCN: s_endpgm
define amdgpu_kernel void @singlethread_acq_rel() {
@@ -84,7 +84,7 @@ entry:
}
; FUNC-LABEL: {{^}}singlethread_seq_cst
-; GCN: BB#0
+; GCN: %bb.0
; GCN-NOT: ATOMIC_FENCE
; GCN: s_endpgm
define amdgpu_kernel void @singlethread_seq_cst() {
@@ -94,7 +94,7 @@ entry:
}
; FUNC-LABEL: {{^}}agent_acquire
-; GCN: BB#0
+; GCN: %bb.0
; GCN-NOT: ATOMIC_FENCE
; GFX6: s_waitcnt vmcnt(0){{$}}
; GFX6-NEXT: buffer_wbinvl1{{$}}
@@ -108,7 +108,7 @@ entry:
}
; FUNC-LABEL: {{^}}agent_release
-; GCN: BB#0
+; GCN: %bb.0
; GCN-NOT: ATOMIC_FENCE
; GCN: s_waitcnt vmcnt(0){{$}}
; GCN: s_endpgm
@@ -119,7 +119,7 @@ entry:
}
; FUNC-LABEL: {{^}}agent_acq_rel
-; GCN: BB#0
+; GCN: %bb.0
; GCN-NOT: ATOMIC_FENCE
; GCN: s_waitcnt vmcnt(0){{$}}
; GFX6: buffer_wbinvl1{{$}}
@@ -132,7 +132,7 @@ entry:
}
; FUNC-LABEL: {{^}}agent_seq_cst
-; GCN: BB#0
+; GCN: %bb.0
; GCN-NOT: ATOMIC_FENCE
; GCN: s_waitcnt vmcnt(0){{$}}
; GFX6: buffer_wbinvl1{{$}}
@@ -145,7 +145,7 @@ entry:
}
; FUNC-LABEL: {{^}}workgroup_acquire
-; GCN: BB#0
+; GCN: %bb.0
; GCN-NOT: ATOMIC_FENCE
; GCN: s_endpgm
define amdgpu_kernel void @workgroup_acquire() {
@@ -155,7 +155,7 @@ entry:
}
; FUNC-LABEL: {{^}}workgroup_release
-; GCN: BB#0
+; GCN: %bb.0
; GCN-NOT: ATOMIC_FENCE
; GCN: s_endpgm
define amdgpu_kernel void @workgroup_release() {
@@ -165,7 +165,7 @@ entry:
}
; FUNC-LABEL: {{^}}workgroup_acq_rel
-; GCN: BB#0
+; GCN: %bb.0
; GCN-NOT: ATOMIC_FENCE
; GCN: s_endpgm
define amdgpu_kernel void @workgroup_acq_rel() {
@@ -175,7 +175,7 @@ entry:
}
; FUNC-LABEL: {{^}}workgroup_seq_cst
-; GCN: BB#0
+; GCN: %bb.0
; GCN-NOT: ATOMIC_FENCE
; GCN: s_endpgm
define amdgpu_kernel void @workgroup_seq_cst() {
@@ -185,7 +185,7 @@ entry:
}
; FUNC-LABEL: {{^}}wavefront_acquire
-; GCN: BB#0
+; GCN: %bb.0
; GCN-NOT: ATOMIC_FENCE
; GCN: s_endpgm
define amdgpu_kernel void @wavefront_acquire() {
@@ -195,7 +195,7 @@ entry:
}
; FUNC-LABEL: {{^}}wavefront_release
-; GCN: BB#0
+; GCN: %bb.0
; GCN-NOT: ATOMIC_FENCE
; GCN: s_endpgm
define amdgpu_kernel void @wavefront_release() {
@@ -205,7 +205,7 @@ entry:
}
; FUNC-LABEL: {{^}}wavefront_acq_rel
-; GCN: BB#0
+; GCN: %bb.0
; GCN-NOT: ATOMIC_FENCE
; GCN: s_endpgm
define amdgpu_kernel void @wavefront_acq_rel() {
@@ -215,7 +215,7 @@ entry:
}
; FUNC-LABEL: {{^}}wavefront_seq_cst
-; GCN: BB#0
+; GCN: %bb.0
; GCN-NOT: ATOMIC_FENCE
; GCN: s_endpgm
define amdgpu_kernel void @wavefront_seq_cst() {
diff --git a/test/CodeGen/AMDGPU/multilevel-break.ll b/test/CodeGen/AMDGPU/multilevel-break.ll
index 15de689b953..8cc02d49709 100644
--- a/test/CodeGen/AMDGPU/multilevel-break.ll
+++ b/test/CodeGen/AMDGPU/multilevel-break.ll
@@ -34,7 +34,7 @@
; GCN-NEXT: s_andn2_b64 exec, exec, [[OR_BREAK]]
; GCN-NEXT: s_cbranch_execnz [[INNER_LOOP]]
-; GCN: ; BB#{{[0-9]+}}: ; %Flow1{{$}}
+; GCN: ; %bb.{{[0-9]+}}: ; %Flow1{{$}}
; GCN-NEXT: ; in Loop: Header=[[OUTER_LOOP]] Depth=1
; Ensure copy is eliminated
diff --git a/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir b/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir
index b5dc9d9dac8..24e8ed8e29c 100644
--- a/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir
+++ b/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir
@@ -184,8 +184,8 @@ body: |
%sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
%sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
%exec = S_MOV_B64_term killed %sgpr2_sgpr3
- SI_MASK_BRANCH %bb.2.end, implicit %exec
- S_BRANCH %bb.1.if
+ SI_MASK_BRANCH %bb.2, implicit %exec
+ S_BRANCH %bb.1
bb.1.if:
liveins: %sgpr0_sgpr1
@@ -241,8 +241,8 @@ body: |
%vgpr0 = V_MOV_B32_e32 4, implicit %exec
%sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
%exec = S_MOV_B64_term killed %sgpr2_sgpr3
- SI_MASK_BRANCH %bb.2.end, implicit %exec
- S_BRANCH %bb.1.if
+ SI_MASK_BRANCH %bb.2, implicit %exec
+ S_BRANCH %bb.1
bb.1.if:
liveins: %sgpr0_sgpr1
@@ -298,8 +298,8 @@ body: |
%vgpr0 = V_MOV_B32_e32 4, implicit %exec
%sgpr2_sgpr3 = S_OR_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
%exec = S_MOV_B64_term killed %sgpr2_sgpr3
- SI_MASK_BRANCH %bb.2.end, implicit %exec
- S_BRANCH %bb.1.if
+ SI_MASK_BRANCH %bb.2, implicit %exec
+ S_BRANCH %bb.1
bb.1.if:
liveins: %sgpr0_sgpr1
@@ -359,8 +359,8 @@ body: |
BUFFER_STORE_DWORD_OFFSET %vgpr0, undef %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
%sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
%exec = S_MOV_B64_term killed %sgpr2_sgpr3
- SI_MASK_BRANCH %bb.2.end, implicit %exec
- S_BRANCH %bb.1.if
+ SI_MASK_BRANCH %bb.2, implicit %exec
+ S_BRANCH %bb.1
bb.1.if:
liveins: %sgpr0_sgpr1
@@ -384,7 +384,7 @@ body: |
# CHECK: %sgpr0_sgpr1 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
# CHECK-NEXT: %sgpr0_sgpr1 = S_XOR_B64 undef %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
# CHECK-NEXT: %exec = COPY %sgpr0_sgpr1
-# CHECK-NEXT: SI_MASK_BRANCH %bb.2.end, implicit %exec
+# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit %exec
name: optimize_if_and_saveexec_xor_wrong_reg
alignment: 0
exposesReturnsTwice: false
@@ -420,8 +420,8 @@ body: |
%sgpr0_sgpr1 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
%sgpr0_sgpr1 = S_XOR_B64 undef %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
%exec = S_MOV_B64_term %sgpr0_sgpr1
- SI_MASK_BRANCH %bb.2.end, implicit %exec
- S_BRANCH %bb.1.if
+ SI_MASK_BRANCH %bb.2, implicit %exec
+ S_BRANCH %bb.1
bb.1.if:
liveins: %sgpr0_sgpr1 , %sgpr4_sgpr5_sgpr6_sgpr7
@@ -443,7 +443,7 @@ body: |
# CHECK-NEXT: %sgpr2_sgpr3 = S_OR_B64 killed %sgpr2_sgpr3, 1, implicit-def %scc
# CHECK-NEXT: %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
# CHECK-NEXT: %exec = COPY killed %sgpr2_sgpr3
-# CHECK-NEXT: SI_MASK_BRANCH %bb.2.end, implicit %exec
+# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit %exec
name: optimize_if_and_saveexec_xor_modify_copy_to_exec
alignment: 0
@@ -479,8 +479,8 @@ body: |
%sgpr2_sgpr3 = S_OR_B64 killed %sgpr2_sgpr3, 1, implicit-def %scc
%sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
%exec = S_MOV_B64_term killed %sgpr2_sgpr3
- SI_MASK_BRANCH %bb.2.end, implicit %exec
- S_BRANCH %bb.1.if
+ SI_MASK_BRANCH %bb.2, implicit %exec
+ S_BRANCH %bb.1
bb.1.if:
liveins: %sgpr0_sgpr1
@@ -540,8 +540,8 @@ body: |
%sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
%sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
%exec = S_MOV_B64_term %sgpr2_sgpr3
- SI_MASK_BRANCH %bb.2.end, implicit %exec
- S_BRANCH %bb.1.if
+ SI_MASK_BRANCH %bb.2, implicit %exec
+ S_BRANCH %bb.1
bb.1.if:
liveins: %sgpr0_sgpr1, %sgpr2_sgpr3
@@ -565,7 +565,7 @@ body: |
# CHECK: %sgpr0_sgpr1 = COPY %exec
# CHECK: %sgpr2_sgpr3 = S_LSHR_B64 %sgpr0_sgpr1, killed %vcc_lo, implicit-def %scc
# CHECK-NEXT: %exec = COPY killed %sgpr2_sgpr3
-# CHECK-NEXT: SI_MASK_BRANCH %bb.2.end, implicit %exec
+# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit %exec
name: optimize_if_unknown_saveexec
alignment: 0
@@ -599,8 +599,8 @@ body: |
%vgpr0 = V_MOV_B32_e32 4, implicit %exec
%sgpr2_sgpr3 = S_LSHR_B64 %sgpr0_sgpr1, killed %vcc_lo, implicit-def %scc
%exec = S_MOV_B64_term killed %sgpr2_sgpr3
- SI_MASK_BRANCH %bb.2.end, implicit %exec
- S_BRANCH %bb.1.if
+ SI_MASK_BRANCH %bb.2, implicit %exec
+ S_BRANCH %bb.1
bb.1.if:
liveins: %sgpr0_sgpr1
@@ -656,8 +656,8 @@ body: |
%vgpr0 = V_MOV_B32_e32 4, implicit %exec
%sgpr2_sgpr3 = S_ANDN2_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
%exec = S_MOV_B64_term killed %sgpr2_sgpr3
- SI_MASK_BRANCH %bb.2.end, implicit %exec
- S_BRANCH %bb.1.if
+ SI_MASK_BRANCH %bb.2, implicit %exec
+ S_BRANCH %bb.1
bb.1.if:
liveins: %sgpr0_sgpr1
@@ -680,7 +680,7 @@ body: |
# CHECK-LABEL: name: optimize_if_andn2_saveexec_no_commute{{$}}
# CHECK: %sgpr2_sgpr3 = S_ANDN2_B64 killed %vcc, %sgpr0_sgpr1, implicit-def %scc
# CHECK-NEXT: %exec = COPY killed %sgpr2_sgpr3
-# CHECK-NEXT: SI_MASK_BRANCH %bb.2.end, implicit %exec
+# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit %exec
name: optimize_if_andn2_saveexec_no_commute
alignment: 0
exposesReturnsTwice: false
@@ -713,8 +713,8 @@ body: |
%vgpr0 = V_MOV_B32_e32 4, implicit %exec
%sgpr2_sgpr3 = S_ANDN2_B64 killed %vcc, %sgpr0_sgpr1, implicit-def %scc
%exec = S_MOV_B64_term killed %sgpr2_sgpr3
- SI_MASK_BRANCH %bb.2.end, implicit %exec
- S_BRANCH %bb.1.if
+ SI_MASK_BRANCH %bb.2, implicit %exec
+ S_BRANCH %bb.1
bb.1.if:
liveins: %sgpr0_sgpr1
diff --git a/test/CodeGen/AMDGPU/ret_jump.ll b/test/CodeGen/AMDGPU/ret_jump.ll
index 7c2e28108df..f87e8cbea4f 100644
--- a/test/CodeGen/AMDGPU/ret_jump.ll
+++ b/test/CodeGen/AMDGPU/ret_jump.ll
@@ -57,7 +57,7 @@ ret.bb: ; preds = %else, %main_body
; GCN-LABEL: {{^}}uniform_br_nontrivial_ret_divergent_br_nontrivial_unreachable:
; GCN: s_cbranch_vccnz [[RET_BB:BB[0-9]+_[0-9]+]]
-; GCN: ; BB#{{[0-9]+}}: ; %else
+; GCN: ; %bb.{{[0-9]+}}: ; %else
; GCN: s_and_saveexec_b64 [[SAVE_EXEC:s\[[0-9]+:[0-9]+\]]], vcc
; GCN-NEXT: ; mask branch [[FLOW1:BB[0-9]+_[0-9]+]]
diff --git a/test/CodeGen/AMDGPU/sgpr-control-flow.ll b/test/CodeGen/AMDGPU/sgpr-control-flow.ll
index 8e18ab5554e..575938b5a5c 100644
--- a/test/CodeGen/AMDGPU/sgpr-control-flow.ll
+++ b/test/CodeGen/AMDGPU/sgpr-control-flow.ll
@@ -37,7 +37,7 @@ endif:
; SI: s_cmp_lg_u32
; SI: s_cbranch_scc0 [[IF:BB[0-9]+_[0-9]+]]
-; SI: ; BB#1: ; %else
+; SI: ; %bb.1: ; %else
; SI: s_load_dword [[LOAD0:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xe
; SI: s_load_dword [[LOAD1:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xf
; SI-NOT: add
diff --git a/test/CodeGen/AMDGPU/si-lower-control-flow-unreachable-block.ll b/test/CodeGen/AMDGPU/si-lower-control-flow-unreachable-block.ll
index 7423a4a2753..ce85a666340 100644
--- a/test/CodeGen/AMDGPU/si-lower-control-flow-unreachable-block.ll
+++ b/test/CodeGen/AMDGPU/si-lower-control-flow-unreachable-block.ll
@@ -55,7 +55,7 @@ unreachable:
; GCN: s_cmp_lg_u32
; GCN: s_cbranch_scc0 [[UNREACHABLE:BB[0-9]+_[0-9]+]]
-; GCN-NEXT: BB#{{[0-9]+}}: ; %ret
+; GCN-NEXT: %bb.{{[0-9]+}}: ; %ret
; GCN-NEXT: s_endpgm
; GCN: [[UNREACHABLE]]:
diff --git a/test/CodeGen/AMDGPU/skip-if-dead.ll b/test/CodeGen/AMDGPU/skip-if-dead.ll
index ed7e06ee4e2..9ae36b0a06c 100644
--- a/test/CodeGen/AMDGPU/skip-if-dead.ll
+++ b/test/CodeGen/AMDGPU/skip-if-dead.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck %s
; CHECK-LABEL: {{^}}test_kill_depth_0_imm_pos:
-; CHECK-NEXT: ; BB#0:
+; CHECK-NEXT: ; %bb.0:
; CHECK-NEXT: s_endpgm
define amdgpu_ps void @test_kill_depth_0_imm_pos() #0 {
call void @llvm.AMDGPU.kill(float 0.0)
@@ -9,9 +9,9 @@ define amdgpu_ps void @test_kill_depth_0_imm_pos() #0 {
}
; CHECK-LABEL: {{^}}test_kill_depth_0_imm_neg:
-; CHECK-NEXT: ; BB#0:
+; CHECK-NEXT: ; %bb.0:
; CHECK-NEXT: s_mov_b64 exec, 0
-; CHECK-NEXT: ; BB#1:
+; CHECK-NEXT: ; %bb.1:
; CHECK-NEXT: s_endpgm
define amdgpu_ps void @test_kill_depth_0_imm_neg() #0 {
call void @llvm.AMDGPU.kill(float -0.0)
@@ -20,11 +20,11 @@ define amdgpu_ps void @test_kill_depth_0_imm_neg() #0 {
; FIXME: Ideally only one would be emitted
; CHECK-LABEL: {{^}}test_kill_depth_0_imm_neg_x2:
-; CHECK-NEXT: ; BB#0:
+; CHECK-NEXT: ; %bb.0:
; CHECK-NEXT: s_mov_b64 exec, 0
-; CHECK-NEXT: ; BB#1:
+; CHECK-NEXT: ; %bb.1:
; CHECK-NEXT: s_mov_b64 exec, 0
-; CHECK-NEXT: ; BB#2:
+; CHECK-NEXT: ; %bb.2:
; CHECK-NEXT: s_endpgm
define amdgpu_ps void @test_kill_depth_0_imm_neg_x2() #0 {
call void @llvm.AMDGPU.kill(float -0.0)
@@ -33,9 +33,9 @@ define amdgpu_ps void @test_kill_depth_0_imm_neg_x2() #0 {
}
; CHECK-LABEL: {{^}}test_kill_depth_var:
-; CHECK-NEXT: ; BB#0:
+; CHECK-NEXT: ; %bb.0:
; CHECK-NEXT: v_cmpx_le_f32_e32 vcc, 0, v0
-; CHECK-NEXT: ; BB#1:
+; CHECK-NEXT: ; %bb.1:
; CHECK-NEXT: s_endpgm
define amdgpu_ps void @test_kill_depth_var(float %x) #0 {
call void @llvm.AMDGPU.kill(float %x)
@@ -44,11 +44,11 @@ define amdgpu_ps void @test_kill_depth_var(float %x) #0 {
; FIXME: Ideally only one would be emitted
; CHECK-LABEL: {{^}}test_kill_depth_var_x2_same:
-; CHECK-NEXT: ; BB#0:
+; CHECK-NEXT: ; %bb.0:
; CHECK-NEXT: v_cmpx_le_f32_e32 vcc, 0, v0
-; CHECK-NEXT: ; BB#1:
+; CHECK-NEXT: ; %bb.1:
; CHECK-NEXT: v_cmpx_le_f32_e32 vcc, 0, v0
-; CHECK-NEXT: ; BB#2:
+; CHECK-NEXT: ; %bb.2:
; CHECK-NEXT: s_endpgm
define amdgpu_ps void @test_kill_depth_var_x2_same(float %x) #0 {
call void @llvm.AMDGPU.kill(float %x)
@@ -57,11 +57,11 @@ define amdgpu_ps void @test_kill_depth_var_x2_same(float %x) #0 {
}
; CHECK-LABEL: {{^}}test_kill_depth_var_x2:
-; CHECK-NEXT: ; BB#0:
+; CHECK-NEXT: ; %bb.0:
; CHECK-NEXT: v_cmpx_le_f32_e32 vcc, 0, v0
-; CHECK-NEXT: ; BB#1:
+; CHECK-NEXT: ; %bb.1:
; CHECK-NEXT: v_cmpx_le_f32_e32 vcc, 0, v1
-; CHECK-NEXT: ; BB#2:
+; CHECK-NEXT: ; %bb.2:
; CHECK-NEXT: s_endpgm
define amdgpu_ps void @test_kill_depth_var_x2(float %x, float %y) #0 {
call void @llvm.AMDGPU.kill(float %x)
@@ -70,12 +70,12 @@ define amdgpu_ps void @test_kill_depth_var_x2(float %x, float %y) #0 {
}
; CHECK-LABEL: {{^}}test_kill_depth_var_x2_instructions:
-; CHECK-NEXT: ; BB#0:
+; CHECK-NEXT: ; %bb.0:
; CHECK-NEXT: v_cmpx_le_f32_e32 vcc, 0, v0
-; CHECK-NEXT: ; BB#1:
+; CHECK-NEXT: ; %bb.1:
; CHECK: v_mov_b32_e64 v7, -1
; CHECK: v_cmpx_le_f32_e32 vcc, 0, v7
-; CHECK-NEXT: ; BB#2:
+; CHECK-NEXT: ; %bb.2:
; CHECK-NEXT: s_endpgm
define amdgpu_ps void @test_kill_depth_var_x2_instructions(float %x) #0 {
call void @llvm.AMDGPU.kill(float %x)
@@ -90,7 +90,7 @@ define amdgpu_ps void @test_kill_depth_var_x2_instructions(float %x) #0 {
; CHECK: s_cmp_lg_u32 s{{[0-9]+}}, 0
; CHECK: s_cbranch_scc1 [[RETURN_BB:BB[0-9]+_[0-9]+]]
-; CHECK-NEXT: ; BB#1:
+; CHECK-NEXT: ; %bb.1:
; CHECK: v_mov_b32_e64 v7, -1
; CHECK: v_nop_e64
; CHECK: v_nop_e64
@@ -105,7 +105,7 @@ define amdgpu_ps void @test_kill_depth_var_x2_instructions(float %x) #0 {
; CHECK: v_cmpx_le_f32_e32 vcc, 0, v7
; CHECK-NEXT: s_cbranch_execnz [[SPLIT_BB:BB[0-9]+_[0-9]+]]
-; CHECK-NEXT: ; BB#2:
+; CHECK-NEXT: ; %bb.2:
; CHECK-NEXT: exp null off, off, off, off done vm
; CHECK-NEXT: s_endpgm
@@ -141,7 +141,7 @@ exit:
; CHECK-NEXT: v_mov_b32_e32 v{{[0-9]+}}, 0
; CHECK-NEXT: s_cbranch_scc1 [[RETURN_BB:BB[0-9]+_[0-9]+]]
-; CHECK-NEXT: ; BB#1: ; %bb
+; CHECK-NEXT: ; %bb.1: ; %bb
; CHECK: v_mov_b32_e64 v7, -1
; CHECK: v_nop_e64
; CHECK: v_nop_e64
@@ -157,7 +157,7 @@ exit:
; CHECK: v_cmpx_le_f32_e32 vcc, 0, v7
; CHECK-NEXT: s_cbranch_execnz [[SPLIT_BB:BB[0-9]+_[0-9]+]]
-; CHECK-NEXT: ; BB#2:
+; CHECK-NEXT: ; %bb.2:
; CHECK-NEXT: exp null off, off, off, off done vm
; CHECK-NEXT: s_endpgm
@@ -215,7 +215,7 @@ exit:
; CHECK: v_nop_e64
; CHECK: v_cmpx_le_f32_e32 vcc, 0, v7
-; CHECK-NEXT: ; BB#3:
+; CHECK-NEXT: ; %bb.3:
; CHECK: buffer_load_dword [[LOAD:v[0-9]+]]
; CHECK: v_cmp_eq_u32_e32 vcc, 0, [[LOAD]]
; CHECK-NEXT: s_and_b64 vcc, exec, vcc
@@ -309,7 +309,7 @@ end:
; CHECK: [[SKIPKILL]]:
; CHECK: v_cmp_nge_f32_e32 vcc
-; CHECK-NEXT: BB#3: ; %bb5
+; CHECK-NEXT: %bb.3: ; %bb5
; CHECK-NEXT: .Lfunc_end{{[0-9]+}}
define amdgpu_ps void @no_skip_no_successors(float inreg %arg, float inreg %arg1) #0 {
bb:
@@ -335,7 +335,7 @@ bb7: ; preds = %bb4
}
; CHECK-LABEL: {{^}}if_after_kill_block:
-; CHECK: ; BB#0:
+; CHECK: ; %bb.0:
; CHECK: s_and_saveexec_b64
; CHECK: s_xor_b64
; CHECK-NEXT: mask branch [[BB4:BB[0-9]+_[0-9]+]]
diff --git a/test/CodeGen/AMDGPU/smrd.ll b/test/CodeGen/AMDGPU/smrd.ll
index 6f4592cabee..9fd20fd67b8 100644
--- a/test/CodeGen/AMDGPU/smrd.ll
+++ b/test/CodeGen/AMDGPU/smrd.ll
@@ -193,7 +193,7 @@ main_body:
}
; GCN-LABEL: {{^}}smrd_vgpr_offset_imm:
-; GCN-NEXT: BB#
+; GCN-NEXT: %bb.
; SICIVI-NEXT: buffer_load_dword v{{[0-9]}}, v0, s[0:3], 0 offen offset:4095 ;
@@ -207,7 +207,7 @@ main_body:
}
; GCN-LABEL: {{^}}smrd_vgpr_offset_imm_too_large:
-; GCN-NEXT: BB#
+; GCN-NEXT: %bb.
; GCN-NEXT: v_add_{{i|u}}32_e32 v0, {{(vcc, )?}}0x1000, v0
; GCN-NEXT: buffer_load_dword v{{[0-9]}}, v0, s[0:3], 0 offen ;
define amdgpu_ps float @smrd_vgpr_offset_imm_too_large(<4 x i32> inreg %desc, i32 %offset) #0 {
@@ -218,7 +218,7 @@ main_body:
}
; GCN-LABEL: {{^}}smrd_imm_merged:
-; GCN-NEXT: BB#
+; GCN-NEXT: %bb.
; SICI-NEXT: s_buffer_load_dwordx4 s[{{[0-9]}}:{{[0-9]}}], s[0:3], 0x1
; SICI-NEXT: s_buffer_load_dwordx2 s[{{[0-9]}}:{{[0-9]}}], s[0:3], 0x7
; VI-NEXT: s_buffer_load_dwordx4 s[{{[0-9]}}:{{[0-9]}}], s[0:3], 0x4
@@ -243,7 +243,7 @@ main_body:
}
; GCN-LABEL: {{^}}smrd_vgpr_merged:
-; GCN-NEXT: BB#
+; GCN-NEXT: %bb.
; SICIVI-NEXT: buffer_load_dwordx4 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:4
; SICIVI-NEXT: buffer_load_dwordx2 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:28
diff --git a/test/CodeGen/AMDGPU/uniform-cfg.ll b/test/CodeGen/AMDGPU/uniform-cfg.ll
index 247b9691aff..a247d7a343f 100644
--- a/test/CodeGen/AMDGPU/uniform-cfg.ll
+++ b/test/CodeGen/AMDGPU/uniform-cfg.ll
@@ -401,7 +401,7 @@ exit:
; GCN: s_cmp_lt_i32 [[COND]], 1
; GCN: s_cbranch_scc1 BB[[FNNUM:[0-9]+]]_3
-; GCN: BB#1:
+; GCN: %bb.1:
; GCN-NOT: cmp
; GCN: buffer_load_dword
; GCN: buffer_store_dword
diff --git a/test/CodeGen/AMDGPU/valu-i1.ll b/test/CodeGen/AMDGPU/valu-i1.ll
index 3b0f003f52b..4a3937e44f3 100644
--- a/test/CodeGen/AMDGPU/valu-i1.ll
+++ b/test/CodeGen/AMDGPU/valu-i1.ll
@@ -192,7 +192,7 @@ exit:
; Load loop limit from buffer
; Branch to exit if uniformly not taken
-; SI: ; BB#0:
+; SI: ; %bb.0:
; SI: buffer_load_dword [[VBOUND:v[0-9]+]]
; SI: v_cmp_lt_i32_e32 vcc
; SI: s_and_saveexec_b64 [[OUTER_CMP_SREG:s\[[0-9]+:[0-9]+\]]], vcc
diff --git a/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir b/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir
index 54991d3d953..ff9826baf48 100644
--- a/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir
+++ b/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir
@@ -48,7 +48,7 @@
# CHECK-LABEL: name: vccz_corrupt_workaround
# CHECK: %vcc = V_CMP_EQ_F32
# CHECK-NEXT: %vcc = S_MOV_B64 %vcc
-# CHECK-NEXT: S_CBRANCH_VCCZ %bb.2.else, implicit killed %vcc
+# CHECK-NEXT: S_CBRANCH_VCCZ %bb.2, implicit killed %vcc
name: vccz_corrupt_workaround
alignment: 0
@@ -82,7 +82,7 @@ body: |
%sgpr7 = S_MOV_B32 61440
%sgpr6 = S_MOV_B32 -1
%vcc = V_CMP_EQ_F32_e64 0, 0, 0, %sgpr2, 0, implicit %exec
- S_CBRANCH_VCCZ %bb.1.else, implicit killed %vcc
+ S_CBRANCH_VCCZ %bb.1, implicit killed %vcc
bb.2.if:
liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003
@@ -90,7 +90,7 @@ body: |
%vgpr0 = V_MOV_B32_e32 9, implicit %exec
BUFFER_STORE_DWORD_OFFSET killed %vgpr0, killed %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `i32 addrspace(1)* undef`)
%vgpr0 = V_MOV_B32_e32 0, implicit %exec
- S_BRANCH %bb.3.done
+ S_BRANCH %bb.3
bb.1.else:
liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003
@@ -111,7 +111,7 @@ body: |
---
# CHECK-LABEL: name: vccz_corrupt_undef_vcc
# CHECK: S_WAITCNT
-# CHECK-NEXT: S_CBRANCH_VCCZ %bb.2.else, implicit undef %vcc
+# CHECK-NEXT: S_CBRANCH_VCCZ %bb.2, implicit undef %vcc
name: vccz_corrupt_undef_vcc
alignment: 0
@@ -143,7 +143,7 @@ body: |
%sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed %sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
%sgpr7 = S_MOV_B32 61440
%sgpr6 = S_MOV_B32 -1
- S_CBRANCH_VCCZ %bb.1.else, implicit undef %vcc
+ S_CBRANCH_VCCZ %bb.1, implicit undef %vcc
bb.2.if:
liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003
@@ -151,7 +151,7 @@ body: |
%vgpr0 = V_MOV_B32_e32 9, implicit %exec
BUFFER_STORE_DWORD_OFFSET killed %vgpr0, killed %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `i32 addrspace(1)* undef`)
%vgpr0 = V_MOV_B32_e32 0, implicit %exec
- S_BRANCH %bb.3.done
+ S_BRANCH %bb.3
bb.1.else:
liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003
diff --git a/test/CodeGen/ARM/Windows/dbzchk.ll b/test/CodeGen/ARM/Windows/dbzchk.ll
index afe30b28a27..18e6e528057 100644
--- a/test/CodeGen/ARM/Windows/dbzchk.ll
+++ b/test/CodeGen/ARM/Windows/dbzchk.ll
@@ -32,13 +32,13 @@ return:
ret i32 %2
}
-; CHECK-DIV-DAG: BB#0
-; CHECK-DIV-DAG: Successors according to CFG: BB#1({{.*}}) BB#2
-; CHECK-DIV-DAG: BB#1
-; CHECK-DIV-DAG: Successors according to CFG: BB#3
-; CHECK-DIV-DAG: BB#2
-; CHECK-DIV-DAG: Successors according to CFG: BB#3
-; CHECK-DIV-DAG: BB#3
+; CHECK-DIV-DAG: %bb.0
+; CHECK-DIV-DAG: Successors according to CFG: %bb.1({{.*}}) %bb.2
+; CHECK-DIV-DAG: %bb.1
+; CHECK-DIV-DAG: Successors according to CFG: %bb.3
+; CHECK-DIV-DAG: %bb.2
+; CHECK-DIV-DAG: Successors according to CFG: %bb.3
+; CHECK-DIV-DAG: %bb.3
; RUN: llc -mtriple thumbv7--windows-itanium -print-machineinstrs=expand-isel-pseudos -verify-machineinstrs -o /dev/null %s 2>&1 | FileCheck %s -check-prefix CHECK-MOD
@@ -66,13 +66,13 @@ return:
ret i32 %retval.0
}
-; CHECK-MOD-DAG: BB#0
-; CHECK-MOD-DAG: Successors according to CFG: BB#2({{.*}}) BB#1
-; CHECK-MOD-DAG: BB#1
-; CHECK-MOD-DAG: Successors according to CFG: BB#3
-; CHECK-MOD-DAG: BB#3
-; CHECK-MOD-DAG: Successors according to CFG: BB#2
-; CHECK-MOD-DAG: BB#2
+; CHECK-MOD-DAG: %bb.0
+; CHECK-MOD-DAG: Successors according to CFG: %bb.2({{.*}}) %bb.1
+; CHECK-MOD-DAG: %bb.1
+; CHECK-MOD-DAG: Successors according to CFG: %bb.3
+; CHECK-MOD-DAG: %bb.3
+; CHECK-MOD-DAG: Successors according to CFG: %bb.2
+; CHECK-MOD-DAG: %bb.2
; RUN: llc -mtriple thumbv7--windows-itanium -print-machineinstrs=expand-isel-pseudos -verify-machineinstrs -filetype asm -o /dev/null %s 2>&1 | FileCheck %s -check-prefix CHECK-CFG
; RUN: llc -mtriple thumbv7--windows-itanium -verify-machineinstrs -filetype asm -o - %s | FileCheck %s -check-prefix CHECK-CFG-ASM
@@ -111,23 +111,23 @@ if.end:
attributes #0 = { optsize }
-; CHECK-CFG-DAG: BB#0
-; CHECK-CFG-DAG: t2Bcc <BB#2>
-; CHECK-CFG-DAG: t2B <BB#1>
+; CHECK-CFG-DAG: %bb.0
+; CHECK-CFG-DAG: t2Bcc %bb.2
+; CHECK-CFG-DAG: t2B %bb.1
-; CHECK-CFG-DAG: BB#1
-; CHECK-CFG-DAG: t2B <BB#3>
+; CHECK-CFG-DAG: %bb.1
+; CHECK-CFG-DAG: t2B %bb.3
-; CHECK-CFG-DAG: BB#2
+; CHECK-CFG-DAG: %bb.2
; CHECK-CFG-DAG: tCMPi8 %{{[0-9]}}, 0
-; CHECK-CFG-DAG: t2Bcc <BB#5>
+; CHECK-CFG-DAG: t2Bcc %bb.5
-; CHECK-CFG-DAG: BB#4
+; CHECK-CFG-DAG: %bb.4
-; CHECK-CFG-DAG: BB#3
+; CHECK-CFG-DAG: %bb.3
; CHECK-CFG-DAG: tBX_RET
-; CHECK-CFG-DAG: BB#5
+; CHECK-CFG-DAG: %bb.5
; CHECK-CFG-DAG: t__brkdiv0
; CHECK-CFG-ASM-LABEL: h:
diff --git a/test/CodeGen/ARM/and-load-combine.ll b/test/CodeGen/ARM/and-load-combine.ll
index f4ea7ebcf62..6f0c1235959 100644
--- a/test/CodeGen/ARM/and-load-combine.ll
+++ b/test/CodeGen/ARM/and-load-combine.ll
@@ -6,7 +6,7 @@
define arm_aapcscc zeroext i1 @cmp_xor8_short_short(i16* nocapture readonly %a,
; ARM-LABEL: cmp_xor8_short_short:
-; ARM: @ BB#0: @ %entry
+; ARM: @ %bb.0: @ %entry
; ARM-NEXT: ldrh r0, [r0]
; ARM-NEXT: ldrh r1, [r1]
; ARM-NEXT: eor r1, r1, r0
@@ -16,7 +16,7 @@ define arm_aapcscc zeroext i1 @cmp_xor8_short_short(i16* nocapture readonly %a,
; ARM-NEXT: bx lr
;
; ARMEB-LABEL: cmp_xor8_short_short:
-; ARMEB: @ BB#0: @ %entry
+; ARMEB: @ %bb.0: @ %entry
; ARMEB-NEXT: ldrh r0, [r0]
; ARMEB-NEXT: ldrh r1, [r1]
; ARMEB-NEXT: eor r1, r1, r0
@@ -26,7 +26,7 @@ define arm_aapcscc zeroext i1 @cmp_xor8_short_short(i16* nocapture readonly %a,
; ARMEB-NEXT: bx lr
;
; THUMB1-LABEL: cmp_xor8_short_short:
-; THUMB1: @ BB#0: @ %entry
+; THUMB1: @ %bb.0: @ %entry
; THUMB1-NEXT: ldrh r0, [r0]
; THUMB1-NEXT: ldrh r2, [r1]
; THUMB1-NEXT: eors r2, r0
@@ -34,13 +34,13 @@ define arm_aapcscc zeroext i1 @cmp_xor8_short_short(i16* nocapture readonly %a,
; THUMB1-NEXT: movs r1, #0
; THUMB1-NEXT: lsls r2, r2, #24
; THUMB1-NEXT: beq .LBB0_2
-; THUMB1-NEXT: @ BB#1: @ %entry
+; THUMB1-NEXT: @ %bb.1: @ %entry
; THUMB1-NEXT: mov r0, r1
; THUMB1-NEXT: .LBB0_2: @ %entry
; THUMB1-NEXT: bx lr
;
; THUMB2-LABEL: cmp_xor8_short_short:
-; THUMB2: @ BB#0: @ %entry
+; THUMB2: @ %bb.0: @ %entry
; THUMB2-NEXT: ldrh r0, [r0]
; THUMB2-NEXT: ldrh r1, [r1]
; THUMB2-NEXT: eors r0, r1
@@ -61,7 +61,7 @@ entry:
define arm_aapcscc zeroext i1 @cmp_xor8_short_int(i16* nocapture readonly %a,
; ARM-LABEL: cmp_xor8_short_int:
-; ARM: @ BB#0: @ %entry
+; ARM: @ %bb.0: @ %entry
; ARM-NEXT: ldrh r0, [r0]
; ARM-NEXT: ldr r1, [r1]
; ARM-NEXT: eor r1, r1, r0
@@ -71,7 +71,7 @@ define arm_aapcscc zeroext i1 @cmp_xor8_short_int(i16* nocapture readonly %a,
; ARM-NEXT: bx lr
;
; ARMEB-LABEL: cmp_xor8_short_int:
-; ARMEB: @ BB#0: @ %entry
+; ARMEB: @ %bb.0: @ %entry
; ARMEB-NEXT: ldrh r0, [r0]
; ARMEB-NEXT: ldr r1, [r1]
; ARMEB-NEXT: eor r1, r1, r0
@@ -81,7 +81,7 @@ define arm_aapcscc zeroext i1 @cmp_xor8_short_int(i16* nocapture readonly %a,
; ARMEB-NEXT: bx lr
;
; THUMB1-LABEL: cmp_xor8_short_int:
-; THUMB1: @ BB#0: @ %entry
+; THUMB1: @ %bb.0: @ %entry
; THUMB1-NEXT: ldrh r0, [r0]
; THUMB1-NEXT: ldr r2, [r1]
; THUMB1-NEXT: eors r2, r0
@@ -89,13 +89,13 @@ define arm_aapcscc zeroext i1 @cmp_xor8_short_int(i16* nocapture readonly %a,
; THUMB1-NEXT: movs r1, #0
; THUMB1-NEXT: lsls r2, r2, #24
; THUMB1-NEXT: beq .LBB1_2
-; THUMB1-NEXT: @ BB#1: @ %entry
+; THUMB1-NEXT: @ %bb.1: @ %entry
; THUMB1-NEXT: mov r0, r1
; THUMB1-NEXT: .LBB1_2: @ %entry
; THUMB1-NEXT: bx lr
;
; THUMB2-LABEL: cmp_xor8_short_int:
-; THUMB2: @ BB#0: @ %entry
+; THUMB2: @ %bb.0: @ %entry
; THUMB2-NEXT: ldrh r0, [r0]
; THUMB2-NEXT: ldr r1, [r1]
; THUMB2-NEXT: eors r0, r1
@@ -117,7 +117,7 @@ entry:
define arm_aapcscc zeroext i1 @cmp_xor8_int_int(i32* nocapture readonly %a,
; ARM-LABEL: cmp_xor8_int_int:
-; ARM: @ BB#0: @ %entry
+; ARM: @ %bb.0: @ %entry
; ARM-NEXT: ldr r0, [r0]
; ARM-NEXT: ldr r1, [r1]
; ARM-NEXT: eor r1, r1, r0
@@ -127,7 +127,7 @@ define arm_aapcscc zeroext i1 @cmp_xor8_int_int(i32* nocapture readonly %a,
; ARM-NEXT: bx lr
;
; ARMEB-LABEL: cmp_xor8_int_int:
-; ARMEB: @ BB#0: @ %entry
+; ARMEB: @ %bb.0: @ %entry
; ARMEB-NEXT: ldr r0, [r0]
; ARMEB-NEXT: ldr r1, [r1]
; ARMEB-NEXT: eor r1, r1, r0
@@ -137,7 +137,7 @@ define arm_aapcscc zeroext i1 @cmp_xor8_int_int(i32* nocapture readonly %a,
; ARMEB-NEXT: bx lr
;
; THUMB1-LABEL: cmp_xor8_int_int:
-; THUMB1: @ BB#0: @ %entry
+; THUMB1: @ %bb.0: @ %entry
; THUMB1-NEXT: ldr r0, [r0]
; THUMB1-NEXT: ldr r2, [r1]
; THUMB1-NEXT: eors r2, r0
@@ -145,13 +145,13 @@ define arm_aapcscc zeroext i1 @cmp_xor8_int_int(i32* nocapture readonly %a,
; THUMB1-NEXT: movs r1, #0
; THUMB1-NEXT: lsls r2, r2, #24
; THUMB1-NEXT: beq .LBB2_2
-; THUMB1-NEXT: @ BB#1: @ %entry
+; THUMB1-NEXT: @ %bb.1: @ %entry
; THUMB1-NEXT: mov r0, r1
; THUMB1-NEXT: .LBB2_2: @ %entry
; THUMB1-NEXT: bx lr
;
; THUMB2-LABEL: cmp_xor8_int_int:
-; THUMB2: @ BB#0: @ %entry
+; THUMB2: @ %bb.0: @ %entry
; THUMB2-NEXT: ldr r0, [r0]
; THUMB2-NEXT: ldr r1, [r1]
; THUMB2-NEXT: eors r0, r1
@@ -172,7 +172,7 @@ entry:
define arm_aapcscc zeroext i1 @cmp_xor16(i32* nocapture readonly %a,
; ARM-LABEL: cmp_xor16:
-; ARM: @ BB#0: @ %entry
+; ARM: @ %bb.0: @ %entry
; ARM-NEXT: ldr r0, [r0]
; ARM-NEXT: movw r2, #65535
; ARM-NEXT: ldr r1, [r1]
@@ -183,7 +183,7 @@ define arm_aapcscc zeroext i1 @cmp_xor16(i32* nocapture readonly %a,
; ARM-NEXT: bx lr
;
; ARMEB-LABEL: cmp_xor16:
-; ARMEB: @ BB#0: @ %entry
+; ARMEB: @ %bb.0: @ %entry
; ARMEB-NEXT: ldr r0, [r0]
; ARMEB-NEXT: movw r2, #65535
; ARMEB-NEXT: ldr r1, [r1]
@@ -194,7 +194,7 @@ define arm_aapcscc zeroext i1 @cmp_xor16(i32* nocapture readonly %a,
; ARMEB-NEXT: bx lr
;
; THUMB1-LABEL: cmp_xor16:
-; THUMB1: @ BB#0: @ %entry
+; THUMB1: @ %bb.0: @ %entry
; THUMB1-NEXT: ldr r0, [r0]
; THUMB1-NEXT: ldr r2, [r1]
; THUMB1-NEXT: eors r2, r0
@@ -202,13 +202,13 @@ define arm_aapcscc zeroext i1 @cmp_xor16(i32* nocapture readonly %a,
; THUMB1-NEXT: movs r1, #0
; THUMB1-NEXT: lsls r2, r2, #16
; THUMB1-NEXT: beq .LBB3_2
-; THUMB1-NEXT: @ BB#1: @ %entry
+; THUMB1-NEXT: @ %bb.1: @ %entry
; THUMB1-NEXT: mov r0, r1
; THUMB1-NEXT: .LBB3_2: @ %entry
; THUMB1-NEXT: bx lr
;
; THUMB2-LABEL: cmp_xor16:
-; THUMB2: @ BB#0: @ %entry
+; THUMB2: @ %bb.0: @ %entry
; THUMB2-NEXT: ldr r0, [r0]
; THUMB2-NEXT: ldr r1, [r1]
; THUMB2-NEXT: eors r0, r1
@@ -229,7 +229,7 @@ entry:
define arm_aapcscc zeroext i1 @cmp_or8_short_short(i16* nocapture readonly %a,
; ARM-LABEL: cmp_or8_short_short:
-; ARM: @ BB#0: @ %entry
+; ARM: @ %bb.0: @ %entry
; ARM-NEXT: ldrh r0, [r0]
; ARM-NEXT: ldrh r1, [r1]
; ARM-NEXT: orr r1, r1, r0
@@ -239,7 +239,7 @@ define arm_aapcscc zeroext i1 @cmp_or8_short_short(i16* nocapture readonly %a,
; ARM-NEXT: bx lr
;
; ARMEB-LABEL: cmp_or8_short_short:
-; ARMEB: @ BB#0: @ %entry
+; ARMEB: @ %bb.0: @ %entry
; ARMEB-NEXT: ldrh r0, [r0]
; ARMEB-NEXT: ldrh r1, [r1]
; ARMEB-NEXT: orr r1, r1, r0
@@ -249,7 +249,7 @@ define arm_aapcscc zeroext i1 @cmp_or8_short_short(i16* nocapture readonly %a,
; ARMEB-NEXT: bx lr
;
; THUMB1-LABEL: cmp_or8_short_short:
-; THUMB1: @ BB#0: @ %entry
+; THUMB1: @ %bb.0: @ %entry
; THUMB1-NEXT: ldrh r0, [r0]
; THUMB1-NEXT: ldrh r2, [r1]
; THUMB1-NEXT: orrs r2, r0
@@ -257,13 +257,13 @@ define arm_aapcscc zeroext i1 @cmp_or8_short_short(i16* nocapture readonly %a,
; THUMB1-NEXT: movs r1, #0
; THUMB1-NEXT: lsls r2, r2, #24
; THUMB1-NEXT: beq .LBB4_2
-; THUMB1-NEXT: @ BB#1: @ %entry
+; THUMB1-NEXT: @ %bb.1: @ %entry
; THUMB1-NEXT: mov r0, r1
; THUMB1-NEXT: .LBB4_2: @ %entry
; THUMB1-NEXT: bx lr
;
; THUMB2-LABEL: cmp_or8_short_short:
-; THUMB2: @ BB#0: @ %entry
+; THUMB2: @ %bb.0: @ %entry
; THUMB2-NEXT: ldrh r0, [r0]
; THUMB2-NEXT: ldrh r1, [r1]
; THUMB2-NEXT: orrs r0, r1
@@ -284,7 +284,7 @@ entry:
define arm_aapcscc zeroext i1 @cmp_or8_short_int(i16* nocapture readonly %a,
; ARM-LABEL: cmp_or8_short_int:
-; ARM: @ BB#0: @ %entry
+; ARM: @ %bb.0: @ %entry
; ARM-NEXT: ldrh r0, [r0]
; ARM-NEXT: ldr r1, [r1]
; ARM-NEXT: orr r1, r1, r0
@@ -294,7 +294,7 @@ define arm_aapcscc zeroext i1 @cmp_or8_short_int(i16* nocapture readonly %a,
; ARM-NEXT: bx lr
;
; ARMEB-LABEL: cmp_or8_short_int:
-; ARMEB: @ BB#0: @ %entry
+; ARMEB: @ %bb.0: @ %entry
; ARMEB-NEXT: ldrh r0, [r0]
; ARMEB-NEXT: ldr r1, [r1]
; ARMEB-NEXT: orr r1, r1, r0
@@ -304,7 +304,7 @@ define arm_aapcscc zeroext i1 @cmp_or8_short_int(i16* nocapture readonly %a,
; ARMEB-NEXT: bx lr
;
; THUMB1-LABEL: cmp_or8_short_int:
-; THUMB1: @ BB#0: @ %entry
+; THUMB1: @ %bb.0: @ %entry
; THUMB1-NEXT: ldrh r0, [r0]
; THUMB1-NEXT: ldr r2, [r1]
; THUMB1-NEXT: orrs r2, r0
@@ -312,13 +312,13 @@ define arm_aapcscc zeroext i1 @cmp_or8_short_int(i16* nocapture readonly %a,
; THUMB1-NEXT: movs r1, #0
; THUMB1-NEXT: lsls r2, r2, #24
; THUMB1-NEXT: beq .LBB5_2
-; THUMB1-NEXT: @ BB#1: @ %entry
+; THUMB1-NEXT: @ %bb.1: @ %entry
; THUMB1-NEXT: mov r0, r1
; THUMB1-NEXT: .LBB5_2: @ %entry
; THUMB1-NEXT: bx lr
;
; THUMB2-LABEL: cmp_or8_short_int:
-; THUMB2: @ BB#0: @ %entry
+; THUMB2: @ %bb.0: @ %entry
; THUMB2-NEXT: ldrh r0, [r0]
; THUMB2-NEXT: ldr r1, [r1]
; THUMB2-NEXT: orrs r0, r1
@@ -340,7 +340,7 @@ entry:
define arm_aapcscc zeroext i1 @cmp_or8_int_int(i32* nocapture readonly %a,
; ARM-LABEL: cmp_or8_int_int:
-; ARM: @ BB#0: @ %entry
+; ARM: @ %bb.0: @ %entry
; ARM-NEXT: ldr r0, [r0]
; ARM-NEXT: ldr r1, [r1]
; ARM-NEXT: orr r1, r1, r0
@@ -350,7 +350,7 @@ define arm_aapcscc zeroext i1 @cmp_or8_int_int(i32* nocapture readonly %a,
; ARM-NEXT: bx lr
;
; ARMEB-LABEL: cmp_or8_int_int:
-; ARMEB: @ BB#0: @ %entry
+; ARMEB: @ %bb.0: @ %entry
; ARMEB-NEXT: ldr r0, [r0]
; ARMEB-NEXT: ldr r1, [r1]
; ARMEB-NEXT: orr r1, r1, r0
@@ -360,7 +360,7 @@ define arm_aapcscc zeroext i1 @cmp_or8_int_int(i32* nocapture readonly %a,
; ARMEB-NEXT: bx lr
;
; THUMB1-LABEL: cmp_or8_int_int:
-; THUMB1: @ BB#0: @ %entry
+; THUMB1: @ %bb.0: @ %entry
; THUMB1-NEXT: ldr r0, [r0]
; THUMB1-NEXT: ldr r2, [r1]
; THUMB1-NEXT: orrs r2, r0
@@ -368,13 +368,13 @@ define arm_aapcscc zeroext i1 @cmp_or8_int_int(i32* nocapture readonly %a,
; THUMB1-NEXT: movs r1, #0
; THUMB1-NEXT: lsls r2, r2, #24
; THUMB1-NEXT: beq .LBB6_2
-; THUMB1-NEXT: @ BB#1: @ %entry
+; THUMB1-NEXT: @ %bb.1: @ %entry
; THUMB1-NEXT: mov r0, r1
; THUMB1-NEXT: .LBB6_2: @ %entry
; THUMB1-NEXT: bx lr
;
; THUMB2-LABEL: cmp_or8_int_int:
-; THUMB2: @ BB#0: @ %entry
+; THUMB2: @ %bb.0: @ %entry
; THUMB2-NEXT: ldr r0, [r0]
; THUMB2-NEXT: ldr r1, [r1]
; THUMB2-NEXT: orrs r0, r1
@@ -395,7 +395,7 @@ entry:
define arm_aapcscc zeroext i1 @cmp_or16(i32* nocapture readonly %a,
; ARM-LABEL: cmp_or16:
-; ARM: @ BB#0: @ %entry
+; ARM: @ %bb.0: @ %entry
; ARM-NEXT: ldr r0, [r0]
; ARM-NEXT: movw r2, #65535
; ARM-NEXT: ldr r1, [r1]
@@ -406,7 +406,7 @@ define arm_aapcscc zeroext i1 @cmp_or16(i32* nocapture readonly %a,
; ARM-NEXT: bx lr
;
; ARMEB-LABEL: cmp_or16:
-; ARMEB: @ BB#0: @ %entry
+; ARMEB: @ %bb.0: @ %entry
; ARMEB-NEXT: ldr r0, [r0]
; ARMEB-NEXT: movw r2, #65535
; ARMEB-NEXT: ldr r1, [r1]
@@ -417,7 +417,7 @@ define arm_aapcscc zeroext i1 @cmp_or16(i32* nocapture readonly %a,
; ARMEB-NEXT: bx lr
;
; THUMB1-LABEL: cmp_or16:
-; THUMB1: @ BB#0: @ %entry
+; THUMB1: @ %bb.0: @ %entry
; THUMB1-NEXT: ldr r0, [r0]
; THUMB1-NEXT: ldr r2, [r1]
; THUMB1-NEXT: orrs r2, r0
@@ -425,13 +425,13 @@ define arm_aapcscc zeroext i1 @cmp_or16(i32* nocapture readonly %a,
; THUMB1-NEXT: movs r1, #0
; THUMB1-NEXT: lsls r2, r2, #16
; THUMB1-NEXT: beq .LBB7_2
-; THUMB1-NEXT: @ BB#1: @ %entry
+; THUMB1-NEXT: @ %bb.1: @ %entry
; THUMB1-NEXT: mov r0, r1
; THUMB1-NEXT: .LBB7_2: @ %entry
; THUMB1-NEXT: bx lr
;
; THUMB2-LABEL: cmp_or16:
-; THUMB2: @ BB#0: @ %entry
+; THUMB2: @ %bb.0: @ %entry
; THUMB2-NEXT: ldr r0, [r0]
; THUMB2-NEXT: ldr r1, [r1]
; THUMB2-NEXT: orrs r0, r1
@@ -452,7 +452,7 @@ entry:
define arm_aapcscc zeroext i1 @cmp_and8_short_short(i16* nocapture readonly %a,
; ARM-LABEL: cmp_and8_short_short:
-; ARM: @ BB#0: @ %entry
+; ARM: @ %bb.0: @ %entry
; ARM-NEXT: ldrh r1, [r1]
; ARM-NEXT: ldrh r0, [r0]
; ARM-NEXT: and r1, r0, r1
@@ -462,7 +462,7 @@ define arm_aapcscc zeroext i1 @cmp_and8_short_short(i16* nocapture readonly %a,
; ARM-NEXT: bx lr
;
; ARMEB-LABEL: cmp_and8_short_short:
-; ARMEB: @ BB#0: @ %entry
+; ARMEB: @ %bb.0: @ %entry
; ARMEB-NEXT: ldrh r1, [r1]
; ARMEB-NEXT: ldrh r0, [r0]
; ARMEB-NEXT: and r1, r0, r1
@@ -472,7 +472,7 @@ define arm_aapcscc zeroext i1 @cmp_and8_short_short(i16* nocapture readonly %a,
; ARMEB-NEXT: bx lr
;
; THUMB1-LABEL: cmp_and8_short_short:
-; THUMB1: @ BB#0: @ %entry
+; THUMB1: @ %bb.0: @ %entry
; THUMB1-NEXT: ldrh r1, [r1]
; THUMB1-NEXT: ldrh r2, [r0]
; THUMB1-NEXT: ands r2, r1
@@ -480,13 +480,13 @@ define arm_aapcscc zeroext i1 @cmp_and8_short_short(i16* nocapture readonly %a,
; THUMB1-NEXT: movs r1, #0
; THUMB1-NEXT: lsls r2, r2, #24
; THUMB1-NEXT: beq .LBB8_2
-; THUMB1-NEXT: @ BB#1: @ %entry
+; THUMB1-NEXT: @ %bb.1: @ %entry
; THUMB1-NEXT: mov r0, r1
; THUMB1-NEXT: .LBB8_2: @ %entry
; THUMB1-NEXT: bx lr
;
; THUMB2-LABEL: cmp_and8_short_short:
-; THUMB2: @ BB#0: @ %entry
+; THUMB2: @ %bb.0: @ %entry
; THUMB2-NEXT: ldrh r1, [r1]
; THUMB2-NEXT: ldrh r0, [r0]
; THUMB2-NEXT: ands r0, r1
@@ -507,7 +507,7 @@ entry:
define arm_aapcscc zeroext i1 @cmp_and8_short_int(i16* nocapture readonly %a,
; ARM-LABEL: cmp_and8_short_int:
-; ARM: @ BB#0: @ %entry
+; ARM: @ %bb.0: @ %entry
; ARM-NEXT: ldrh r0, [r0]
; ARM-NEXT: ldr r1, [r1]
; ARM-NEXT: and r1, r1, r0
@@ -517,7 +517,7 @@ define arm_aapcscc zeroext i1 @cmp_and8_short_int(i16* nocapture readonly %a,
; ARM-NEXT: bx lr
;
; ARMEB-LABEL: cmp_and8_short_int:
-; ARMEB: @ BB#0: @ %entry
+; ARMEB: @ %bb.0: @ %entry
; ARMEB-NEXT: ldrh r0, [r0]
; ARMEB-NEXT: ldr r1, [r1]
; ARMEB-NEXT: and r1, r1, r0
@@ -527,7 +527,7 @@ define arm_aapcscc zeroext i1 @cmp_and8_short_int(i16* nocapture readonly %a,
; ARMEB-NEXT: bx lr
;
; THUMB1-LABEL: cmp_and8_short_int:
-; THUMB1: @ BB#0: @ %entry
+; THUMB1: @ %bb.0: @ %entry
; THUMB1-NEXT: ldrh r0, [r0]
; THUMB1-NEXT: ldr r2, [r1]
; THUMB1-NEXT: ands r2, r0
@@ -535,13 +535,13 @@ define arm_aapcscc zeroext i1 @cmp_and8_short_int(i16* nocapture readonly %a,
; THUMB1-NEXT: movs r1, #0
; THUMB1-NEXT: lsls r2, r2, #24
; THUMB1-NEXT: beq .LBB9_2
-; THUMB1-NEXT: @ BB#1: @ %entry
+; THUMB1-NEXT: @ %bb.1: @ %entry
; THUMB1-NEXT: mov r0, r1
; THUMB1-NEXT: .LBB9_2: @ %entry
; THUMB1-NEXT: bx lr
;
; THUMB2-LABEL: cmp_and8_short_int:
-; THUMB2: @ BB#0: @ %entry
+; THUMB2: @ %bb.0: @ %entry
; THUMB2-NEXT: ldrh r0, [r0]
; THUMB2-NEXT: ldr r1, [r1]
; THUMB2-NEXT: ands r0, r1
@@ -563,7 +563,7 @@ entry:
define arm_aapcscc zeroext i1 @cmp_and8_int_int(i32* nocapture readonly %a,
; ARM-LABEL: cmp_and8_int_int:
-; ARM: @ BB#0: @ %entry
+; ARM: @ %bb.0: @ %entry
; ARM-NEXT: ldr r1, [r1]
; ARM-NEXT: ldr r0, [r0]
; ARM-NEXT: and r1, r0, r1
@@ -573,7 +573,7 @@ define arm_aapcscc zeroext i1 @cmp_and8_int_int(i32* nocapture readonly %a,
; ARM-NEXT: bx lr
;
; ARMEB-LABEL: cmp_and8_int_int:
-; ARMEB: @ BB#0: @ %entry
+; ARMEB: @ %bb.0: @ %entry
; ARMEB-NEXT: ldr r1, [r1]
; ARMEB-NEXT: ldr r0, [r0]
; ARMEB-NEXT: and r1, r0, r1
@@ -583,7 +583,7 @@ define arm_aapcscc zeroext i1 @cmp_and8_int_int(i32* nocapture readonly %a,
; ARMEB-NEXT: bx lr
;
; THUMB1-LABEL: cmp_and8_int_int:
-; THUMB1: @ BB#0: @ %entry
+; THUMB1: @ %bb.0: @ %entry
; THUMB1-NEXT: ldr r1, [r1]
; THUMB1-NEXT: ldr r2, [r0]
; THUMB1-NEXT: ands r2, r1
@@ -591,13 +591,13 @@ define arm_aapcscc zeroext i1 @cmp_and8_int_int(i32* nocapture readonly %a,
; THUMB1-NEXT: movs r1, #0
; THUMB1-NEXT: lsls r2, r2, #24
; THUMB1-NEXT: beq .LBB10_2
-; THUMB1-NEXT: @ BB#1: @ %entry
+; THUMB1-NEXT: @ %bb.1: @ %entry
; THUMB1-NEXT: mov r0, r1
; THUMB1-NEXT: .LBB10_2: @ %entry
; THUMB1-NEXT: bx lr
;
; THUMB2-LABEL: cmp_and8_int_int:
-; THUMB2: @ BB#0: @ %entry
+; THUMB2: @ %bb.0: @ %entry
; THUMB2-NEXT: ldr r1, [r1]
; THUMB2-NEXT: ldr r0, [r0]
; THUMB2-NEXT: ands r0, r1
@@ -618,7 +618,7 @@ entry:
define arm_aapcscc zeroext i1 @cmp_and16(i32* nocapture readonly %a,
; ARM-LABEL: cmp_and16:
-; ARM: @ BB#0: @ %entry
+; ARM: @ %bb.0: @ %entry
; ARM-NEXT: ldr r1, [r1]
; ARM-NEXT: movw r2, #65535
; ARM-NEXT: ldr r0, [r0]
@@ -629,7 +629,7 @@ define arm_aapcscc zeroext i1 @cmp_and16(i32* nocapture readonly %a,
; ARM-NEXT: bx lr
;
; ARMEB-LABEL: cmp_and16:
-; ARMEB: @ BB#0: @ %entry
+; ARMEB: @ %bb.0: @ %entry
; ARMEB-NEXT: ldr r1, [r1]
; ARMEB-NEXT: movw r2, #65535
; ARMEB-NEXT: ldr r0, [r0]
@@ -640,7 +640,7 @@ define arm_aapcscc zeroext i1 @cmp_and16(i32* nocapture readonly %a,
; ARMEB-NEXT: bx lr
;
; THUMB1-LABEL: cmp_and16:
-; THUMB1: @ BB#0: @ %entry
+; THUMB1: @ %bb.0: @ %entry
; THUMB1-NEXT: ldr r1, [r1]
; THUMB1-NEXT: ldr r2, [r0]
; THUMB1-NEXT: ands r2, r1
@@ -648,13 +648,13 @@ define arm_aapcscc zeroext i1 @cmp_and16(i32* nocapture readonly %a,
; THUMB1-NEXT: movs r1, #0
; THUMB1-NEXT: lsls r2, r2, #16
; THUMB1-NEXT: beq .LBB11_2
-; THUMB1-NEXT: @ BB#1: @ %entry
+; THUMB1-NEXT: @ %bb.1: @ %entry
; THUMB1-NEXT: mov r0, r1
; THUMB1-NEXT: .LBB11_2: @ %entry
; THUMB1-NEXT: bx lr
;
; THUMB2-LABEL: cmp_and16:
-; THUMB2: @ BB#0: @ %entry
+; THUMB2: @ %bb.0: @ %entry
; THUMB2-NEXT: ldr r1, [r1]
; THUMB2-NEXT: ldr r0, [r0]
; THUMB2-NEXT: ands r0, r1
@@ -675,7 +675,7 @@ entry:
define arm_aapcscc i32 @add_and16(i32* nocapture readonly %a, i32 %y, i32 %z) {
; ARM-LABEL: add_and16:
-; ARM: @ BB#0: @ %entry
+; ARM: @ %bb.0: @ %entry
; ARM-NEXT: ldr r0, [r0]
; ARM-NEXT: add r1, r1, r2
; ARM-NEXT: orr r0, r0, r1
@@ -683,7 +683,7 @@ define arm_aapcscc i32 @add_and16(i32* nocapture readonly %a, i32 %y, i32 %z) {
; ARM-NEXT: bx lr
;
; ARMEB-LABEL: add_and16:
-; ARMEB: @ BB#0: @ %entry
+; ARMEB: @ %bb.0: @ %entry
; ARMEB-NEXT: ldr r0, [r0]
; ARMEB-NEXT: add r1, r1, r2
; ARMEB-NEXT: orr r0, r0, r1
@@ -691,7 +691,7 @@ define arm_aapcscc i32 @add_and16(i32* nocapture readonly %a, i32 %y, i32 %z) {
; ARMEB-NEXT: bx lr
;
; THUMB1-LABEL: add_and16:
-; THUMB1: @ BB#0: @ %entry
+; THUMB1: @ %bb.0: @ %entry
; THUMB1-NEXT: adds r1, r1, r2
; THUMB1-NEXT: ldr r0, [r0]
; THUMB1-NEXT: orrs r0, r1
@@ -699,7 +699,7 @@ define arm_aapcscc i32 @add_and16(i32* nocapture readonly %a, i32 %y, i32 %z) {
; THUMB1-NEXT: bx lr
;
; THUMB2-LABEL: add_and16:
-; THUMB2: @ BB#0: @ %entry
+; THUMB2: @ %bb.0: @ %entry
; THUMB2-NEXT: ldr r0, [r0]
; THUMB2-NEXT: add r1, r2
; THUMB2-NEXT: orrs r0, r1
@@ -715,7 +715,7 @@ entry:
define arm_aapcscc i32 @test1(i32* %a, i32* %b, i32 %x, i32 %y) {
; ARM-LABEL: test1:
-; ARM: @ BB#0: @ %entry
+; ARM: @ %bb.0: @ %entry
; ARM-NEXT: mul r2, r2, r3
; ARM-NEXT: ldr r1, [r1]
; ARM-NEXT: ldr r0, [r0]
@@ -725,7 +725,7 @@ define arm_aapcscc i32 @test1(i32* %a, i32* %b, i32 %x, i32 %y) {
; ARM-NEXT: bx lr
;
; ARMEB-LABEL: test1:
-; ARMEB: @ BB#0: @ %entry
+; ARMEB: @ %bb.0: @ %entry
; ARMEB-NEXT: mul r2, r2, r3
; ARMEB-NEXT: ldr r1, [r1]
; ARMEB-NEXT: ldr r0, [r0]
@@ -735,7 +735,7 @@ define arm_aapcscc i32 @test1(i32* %a, i32* %b, i32 %x, i32 %y) {
; ARMEB-NEXT: bx lr
;
; THUMB1-LABEL: test1:
-; THUMB1: @ BB#0: @ %entry
+; THUMB1: @ %bb.0: @ %entry
; THUMB1-NEXT: muls r2, r3, r2
; THUMB1-NEXT: ldr r1, [r1]
; THUMB1-NEXT: ldr r0, [r0]
@@ -745,7 +745,7 @@ define arm_aapcscc i32 @test1(i32* %a, i32* %b, i32 %x, i32 %y) {
; THUMB1-NEXT: bx lr
;
; THUMB2-LABEL: test1:
-; THUMB2: @ BB#0: @ %entry
+; THUMB2: @ %bb.0: @ %entry
; THUMB2-NEXT: muls r2, r3, r2
; THUMB2-NEXT: ldr r1, [r1]
; THUMB2-NEXT: ldr r0, [r0]
@@ -765,7 +765,7 @@ entry:
define arm_aapcscc i32 @test2(i32* %a, i32* %b, i32 %x, i32 %y) {
; ARM-LABEL: test2:
-; ARM: @ BB#0: @ %entry
+; ARM: @ %bb.0: @ %entry
; ARM-NEXT: ldr r1, [r1]
; ARM-NEXT: ldr r0, [r0]
; ARM-NEXT: mul r1, r2, r1
@@ -775,7 +775,7 @@ define arm_aapcscc i32 @test2(i32* %a, i32* %b, i32 %x, i32 %y) {
; ARM-NEXT: bx lr
;
; ARMEB-LABEL: test2:
-; ARMEB: @ BB#0: @ %entry
+; ARMEB: @ %bb.0: @ %entry
; ARMEB-NEXT: ldr r1, [r1]
; ARMEB-NEXT: ldr r0, [r0]
; ARMEB-NEXT: mul r1, r2, r1
@@ -785,7 +785,7 @@ define arm_aapcscc i32 @test2(i32* %a, i32* %b, i32 %x, i32 %y) {
; ARMEB-NEXT: bx lr
;
; THUMB1-LABEL: test2:
-; THUMB1: @ BB#0: @ %entry
+; THUMB1: @ %bb.0: @ %entry
; THUMB1-NEXT: ldr r1, [r1]
; THUMB1-NEXT: muls r1, r2, r1
; THUMB1-NEXT: ldr r0, [r0]
@@ -795,7 +795,7 @@ define arm_aapcscc i32 @test2(i32* %a, i32* %b, i32 %x, i32 %y) {
; THUMB1-NEXT: bx lr
;
; THUMB2-LABEL: test2:
-; THUMB2: @ BB#0: @ %entry
+; THUMB2: @ %bb.0: @ %entry
; THUMB2-NEXT: ldr r1, [r1]
; THUMB2-NEXT: ldr r0, [r0]
; THUMB2-NEXT: muls r1, r2, r1
@@ -815,7 +815,7 @@ entry:
define arm_aapcscc i32 @test3(i32* %a, i32* %b, i32 %x, i16* %y) {
; ARM-LABEL: test3:
-; ARM: @ BB#0: @ %entry
+; ARM: @ %bb.0: @ %entry
; ARM-NEXT: ldr r0, [r0]
; ARM-NEXT: mul r1, r2, r0
; ARM-NEXT: ldrh r2, [r3]
@@ -825,7 +825,7 @@ define arm_aapcscc i32 @test3(i32* %a, i32* %b, i32 %x, i16* %y) {
; ARM-NEXT: bx lr
;
; ARMEB-LABEL: test3:
-; ARMEB: @ BB#0: @ %entry
+; ARMEB: @ %bb.0: @ %entry
; ARMEB-NEXT: ldr r0, [r0]
; ARMEB-NEXT: mul r1, r2, r0
; ARMEB-NEXT: ldrh r2, [r3]
@@ -835,7 +835,7 @@ define arm_aapcscc i32 @test3(i32* %a, i32* %b, i32 %x, i16* %y) {
; ARMEB-NEXT: bx lr
;
; THUMB1-LABEL: test3:
-; THUMB1: @ BB#0: @ %entry
+; THUMB1: @ %bb.0: @ %entry
; THUMB1-NEXT: ldr r0, [r0]
; THUMB1-NEXT: muls r2, r0, r2
; THUMB1-NEXT: ldrh r1, [r3]
@@ -845,7 +845,7 @@ define arm_aapcscc i32 @test3(i32* %a, i32* %b, i32 %x, i16* %y) {
; THUMB1-NEXT: bx lr
;
; THUMB2-LABEL: test3:
-; THUMB2: @ BB#0: @ %entry
+; THUMB2: @ %bb.0: @ %entry
; THUMB2-NEXT: ldr r0, [r0]
; THUMB2-NEXT: mul r1, r2, r0
; THUMB2-NEXT: ldrh r2, [r3]
@@ -866,7 +866,7 @@ entry:
define arm_aapcscc i32 @test4(i32* %a, i32* %b, i32 %x, i32 %y) {
; ARM-LABEL: test4:
-; ARM: @ BB#0: @ %entry
+; ARM: @ %bb.0: @ %entry
; ARM-NEXT: mul r2, r2, r3
; ARM-NEXT: ldr r1, [r1]
; ARM-NEXT: ldr r0, [r0]
@@ -876,7 +876,7 @@ define arm_aapcscc i32 @test4(i32* %a, i32* %b, i32 %x, i32 %y) {
; ARM-NEXT: bx lr
;
; ARMEB-LABEL: test4:
-; ARMEB: @ BB#0: @ %entry
+; ARMEB: @ %bb.0: @ %entry
; ARMEB-NEXT: mul r2, r2, r3
; ARMEB-NEXT: ldr r1, [r1]
; ARMEB-NEXT: ldr r0, [r0]
@@ -886,7 +886,7 @@ define arm_aapcscc i32 @test4(i32* %a, i32* %b, i32 %x, i32 %y) {
; ARMEB-NEXT: bx lr
;
; THUMB1-LABEL: test4:
-; THUMB1: @ BB#0: @ %entry
+; THUMB1: @ %bb.0: @ %entry
; THUMB1-NEXT: muls r2, r3, r2
; THUMB1-NEXT: ldr r1, [r1]
; THUMB1-NEXT: ldr r0, [r0]
@@ -896,7 +896,7 @@ define arm_aapcscc i32 @test4(i32* %a, i32* %b, i32 %x, i32 %y) {
; THUMB1-NEXT: bx lr
;
; THUMB2-LABEL: test4:
-; THUMB2: @ BB#0: @ %entry
+; THUMB2: @ %bb.0: @ %entry
; THUMB2-NEXT: muls r2, r3, r2
; THUMB2-NEXT: ldr r1, [r1]
; THUMB2-NEXT: ldr r0, [r0]
@@ -916,7 +916,7 @@ entry:
define arm_aapcscc i32 @test5(i32* %a, i32* %b, i32 %x, i16 zeroext %y) {
; ARM-LABEL: test5:
-; ARM: @ BB#0: @ %entry
+; ARM: @ %bb.0: @ %entry
; ARM-NEXT: ldr r1, [r1]
; ARM-NEXT: ldr r0, [r0]
; ARM-NEXT: mul r1, r2, r1
@@ -926,7 +926,7 @@ define arm_aapcscc i32 @test5(i32* %a, i32* %b, i32 %x, i16 zeroext %y) {
; ARM-NEXT: bx lr
;
; ARMEB-LABEL: test5:
-; ARMEB: @ BB#0: @ %entry
+; ARMEB: @ %bb.0: @ %entry
; ARMEB-NEXT: ldr r1, [r1]
; ARMEB-NEXT: ldr r0, [r0]
; ARMEB-NEXT: mul r1, r2, r1
@@ -936,7 +936,7 @@ define arm_aapcscc i32 @test5(i32* %a, i32* %b, i32 %x, i16 zeroext %y) {
; ARMEB-NEXT: bx lr
;
; THUMB1-LABEL: test5:
-; THUMB1: @ BB#0: @ %entry
+; THUMB1: @ %bb.0: @ %entry
; THUMB1-NEXT: ldr r1, [r1]
; THUMB1-NEXT: muls r1, r2, r1
; THUMB1-NEXT: ldr r0, [r0]
@@ -946,7 +946,7 @@ define arm_aapcscc i32 @test5(i32* %a, i32* %b, i32 %x, i16 zeroext %y) {
; THUMB1-NEXT: bx lr
;
; THUMB2-LABEL: test5:
-; THUMB2: @ BB#0: @ %entry
+; THUMB2: @ %bb.0: @ %entry
; THUMB2-NEXT: ldr r1, [r1]
; THUMB2-NEXT: ldr r0, [r0]
; THUMB2-NEXT: muls r1, r2, r1
diff --git a/test/CodeGen/ARM/arm-and-tst-peephole.ll b/test/CodeGen/ARM/arm-and-tst-peephole.ll
index a24808004ef..c6ca6a624b1 100644
--- a/test/CodeGen/ARM/arm-and-tst-peephole.ll
+++ b/test/CodeGen/ARM/arm-and-tst-peephole.ll
@@ -142,27 +142,27 @@ return: ; preds = %bb2, %bb, %entry
define i32 @test_tst_assessment(i32 %a, i32 %b) {
; ARM-LABEL: test_tst_assessment:
-; ARM: @ BB#0:
+; ARM: @ %bb.0:
; ARM-NEXT: and r0, r0, #1
; ARM-NEXT: tst r1, #1
; ARM-NEXT: subne r0, r0, #1
; ARM-NEXT: mov pc, lr
;
; THUMB-LABEL: test_tst_assessment:
-; THUMB: @ BB#0:
+; THUMB: @ %bb.0:
; THUMB-NEXT: movs r2, r0
; THUMB-NEXT: movs r0, #1
; THUMB-NEXT: ands r0, r2
; THUMB-NEXT: subs r2, r0, #1
; THUMB-NEXT: lsls r1, r1, #31
; THUMB-NEXT: beq .LBB2_2
-; THUMB-NEXT: @ BB#1:
+; THUMB-NEXT: @ %bb.1:
; THUMB-NEXT: movs r0, r2
; THUMB-NEXT: .LBB2_2:
; THUMB-NEXT: bx lr
;
; T2-LABEL: test_tst_assessment:
-; T2: @ BB#0:
+; T2: @ %bb.0:
; T2-NEXT: lsls r1, r1, #31
; T2-NEXT: and r0, r0, #1
; T2-NEXT: it ne
@@ -170,7 +170,7 @@ define i32 @test_tst_assessment(i32 %a, i32 %b) {
; T2-NEXT: bx lr
;
; V8-LABEL: test_tst_assessment:
-; V8: @ BB#0:
+; V8: @ %bb.0:
; V8-NEXT: and r0, r0, #1
; V8-NEXT: lsls r1, r1, #31
; V8-NEXT: it ne
diff --git a/test/CodeGen/ARM/atomic-ops-v8.ll b/test/CodeGen/ARM/atomic-ops-v8.ll
index d1575ed12e4..192ed8f8db7 100644
--- a/test/CodeGen/ARM/atomic-ops-v8.ll
+++ b/test/CodeGen/ARM/atomic-ops-v8.ll
@@ -1046,7 +1046,7 @@ define i8 @test_atomic_cmpxchg_i8(i8 zeroext %wanted, i8 zeroext %new) nounwind
; CHECK-ARM-NEXT: cmp r[[OLD]], r0
; CHECK-THUMB-NEXT: cmp r[[OLD]], r[[WANTED]]
; CHECK-NEXT: bne .LBB{{[0-9]+}}_4
-; CHECK-NEXT: BB#2:
+; CHECK-NEXT: %bb.2:
; As above, r1 is a reasonable guess.
; CHECK: strexb [[STATUS:r[0-9]+]], r1, [r[[ADDR]]]
; CHECK-NEXT: cmp [[STATUS]], #0
@@ -1080,7 +1080,7 @@ define i16 @test_atomic_cmpxchg_i16(i16 zeroext %wanted, i16 zeroext %new) nounw
; CHECK-ARM-NEXT: cmp r[[OLD]], r0
; CHECK-THUMB-NEXT: cmp r[[OLD]], r[[WANTED]]
; CHECK-NEXT: bne .LBB{{[0-9]+}}_4
-; CHECK-NEXT: BB#2:
+; CHECK-NEXT: %bb.2:
; As above, r1 is a reasonable guess.
; CHECK: stlexh [[STATUS:r[0-9]+]], r1, [r[[ADDR]]]
; CHECK-NEXT: cmp [[STATUS]], #0
@@ -1113,7 +1113,7 @@ define void @test_atomic_cmpxchg_i32(i32 %wanted, i32 %new) nounwind {
; function there.
; CHECK-NEXT: cmp r[[OLD]], r0
; CHECK-NEXT: bne .LBB{{[0-9]+}}_4
-; CHECK-NEXT: BB#2:
+; CHECK-NEXT: %bb.2:
; As above, r1 is a reasonable guess.
; CHECK: stlex [[STATUS:r[0-9]+]], r1, [r[[ADDR]]]
; CHECK-NEXT: cmp [[STATUS]], #0
@@ -1152,7 +1152,7 @@ define void @test_atomic_cmpxchg_i64(i64 %wanted, i64 %new) nounwind {
; CHECK-ARM-BE: orrs{{(\.w)?}} {{r[0-9]+}}, [[MISMATCH_HI]], [[MISMATCH_LO]]
; CHECK-THUMB-BE: orrs{{(\.w)?}} {{(r[0-9]+, )?}}[[MISMATCH_LO]], [[MISMATCH_HI]]
; CHECK-NEXT: bne .LBB{{[0-9]+}}_4
-; CHECK-NEXT: BB#2:
+; CHECK-NEXT: %bb.2:
; As above, r2, r3 is a reasonable guess.
; CHECK: strexd [[STATUS:r[0-9]+]], r2, r3, [r[[ADDR]]]
; CHECK-NEXT: cmp [[STATUS]], #0
diff --git a/test/CodeGen/ARM/bool-ext-inc.ll b/test/CodeGen/ARM/bool-ext-inc.ll
index ca9c9ab079d..00a7fcdee3c 100644
--- a/test/CodeGen/ARM/bool-ext-inc.ll
+++ b/test/CodeGen/ARM/bool-ext-inc.ll
@@ -3,7 +3,7 @@
define i32 @sext_inc(i1 zeroext %x) {
; CHECK-LABEL: sext_inc:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: eor r0, r0, #1
; CHECK-NEXT: mov pc, lr
%ext = sext i1 %x to i32
@@ -13,7 +13,7 @@ define i32 @sext_inc(i1 zeroext %x) {
define <4 x i32> @sext_inc_vec(<4 x i1> %x) {
; CHECK-LABEL: sext_inc_vec:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vmov.i16 d16, #0x1
; CHECK-NEXT: vmov d17, r0, r1
; CHECK-NEXT: veor d16, d17, d16
@@ -30,7 +30,7 @@ define <4 x i32> @sext_inc_vec(<4 x i1> %x) {
define <4 x i32> @cmpgt_sext_inc_vec(<4 x i32> %x, <4 x i32> %y) {
; CHECK-LABEL: cmpgt_sext_inc_vec:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vmov d17, r2, r3
; CHECK-NEXT: vmov d16, r0, r1
; CHECK-NEXT: mov r0, sp
@@ -49,7 +49,7 @@ define <4 x i32> @cmpgt_sext_inc_vec(<4 x i32> %x, <4 x i32> %y) {
define <4 x i32> @cmpne_sext_inc_vec(<4 x i32> %x, <4 x i32> %y) {
; CHECK-LABEL: cmpne_sext_inc_vec:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vmov d17, r2, r3
; CHECK-NEXT: mov r12, sp
; CHECK-NEXT: vld1.64 {d18, d19}, [r12]
diff --git a/test/CodeGen/ARM/cmpxchg-weak.ll b/test/CodeGen/ARM/cmpxchg-weak.ll
index 29d97fef060..5ee07828526 100644
--- a/test/CodeGen/ARM/cmpxchg-weak.ll
+++ b/test/CodeGen/ARM/cmpxchg-weak.ll
@@ -5,16 +5,16 @@ define void @test_cmpxchg_weak(i32 *%addr, i32 %desired, i32 %new) {
%pair = cmpxchg weak i32* %addr, i32 %desired, i32 %new seq_cst monotonic
%oldval = extractvalue { i32, i1 } %pair, 0
-; CHECK-NEXT: BB#0:
+; CHECK-NEXT: %bb.0:
; CHECK-NEXT: ldrex [[LOADED:r[0-9]+]], [r0]
; CHECK-NEXT: cmp [[LOADED]], r1
; CHECK-NEXT: bne [[LDFAILBB:LBB[0-9]+_[0-9]+]]
-; CHECK-NEXT: BB#1:
+; CHECK-NEXT: %bb.1:
; CHECK-NEXT: dmb ish
; CHECK-NEXT: strex [[SUCCESS:r[0-9]+]], r2, [r0]
; CHECK-NEXT: cmp [[SUCCESS]], #0
; CHECK-NEXT: beq [[SUCCESSBB:LBB[0-9]+_[0-9]+]]
-; CHECK-NEXT: BB#2:
+; CHECK-NEXT: %bb.2:
; CHECK-NEXT: str r3, [r0]
; CHECK-NEXT: bx lr
; CHECK-NEXT: [[LDFAILBB]]:
@@ -37,11 +37,11 @@ define i1 @test_cmpxchg_weak_to_bool(i32, i32 *%addr, i32 %desired, i32 %new) {
%pair = cmpxchg weak i32* %addr, i32 %desired, i32 %new seq_cst monotonic
%success = extractvalue { i32, i1 } %pair, 1
-; CHECK-NEXT: BB#0:
+; CHECK-NEXT: %bb.0:
; CHECK-NEXT: ldrex [[LOADED:r[0-9]+]], [r1]
; CHECK-NEXT: cmp [[LOADED]], r2
; CHECK-NEXT: bne [[LDFAILBB:LBB[0-9]+_[0-9]+]]
-; CHECK-NEXT: BB#1:
+; CHECK-NEXT: %bb.1:
; CHECK-NEXT: dmb ish
; CHECK-NEXT: mov r0, #0
; CHECK-NEXT: strex [[SUCCESS:r[0-9]+]], r3, [r1]
diff --git a/test/CodeGen/ARM/cortex-a57-misched-alu.ll b/test/CodeGen/ARM/cortex-a57-misched-alu.ll
index 2ced60fbf0d..7d50a2023ed 100644
--- a/test/CodeGen/ARM/cortex-a57-misched-alu.ll
+++ b/test/CodeGen/ARM/cortex-a57-misched-alu.ll
@@ -5,7 +5,7 @@
; Check the latency for ALU shifted operand variants.
;
; CHECK: ********** MI Scheduling **********
-; CHECK: foo:BB#0 entry
+; CHECK: foo:%bb.0 entry
; ALU, basic - 1 cyc I0/I1
; CHECK: EORrr
diff --git a/test/CodeGen/ARM/cortex-a57-misched-basic.ll b/test/CodeGen/ARM/cortex-a57-misched-basic.ll
index cfbef7bd429..ad729c2ff2a 100644
--- a/test/CodeGen/ARM/cortex-a57-misched-basic.ll
+++ b/test/CodeGen/ARM/cortex-a57-misched-basic.ll
@@ -6,7 +6,7 @@
; SDIV should be scheduled at the block's begin (20 cyc of independent M unit).
;
; CHECK: ********** MI Scheduling **********
-; CHECK: foo:BB#0 entry
+; CHECK: foo:%bb.0 entry
; GENERIC: LDRi12
; GENERIC: Latency : 1
@@ -30,7 +30,7 @@
; A57_SCHED: SUBrr
; A57_SCHED: Latency : 1
-; CHECK: ** Final schedule for BB#0 ***
+; CHECK: ** Final schedule for %bb.0 ***
; GENERIC: LDRi12
; GENERIC: SDIV
; A57_SCHED: SDIV
diff --git a/test/CodeGen/ARM/cortex-a57-misched-vadd.ll b/test/CodeGen/ARM/cortex-a57-misched-vadd.ll
index eb8d1c85523..cb7490856ab 100644
--- a/test/CodeGen/ARM/cortex-a57-misched-vadd.ll
+++ b/test/CodeGen/ARM/cortex-a57-misched-vadd.ll
@@ -1,7 +1,7 @@
; REQUIRES: asserts
; RUN: llc < %s -mtriple=armv8r-eabi -mcpu=cortex-a57 -misched-postra -enable-misched -verify-misched -debug-only=machine-scheduler -o - 2>&1 > /dev/null | FileCheck %s
-; CHECK-LABEL: addv_i32:BB#0
+; CHECK-LABEL: addv_i32:%bb.0
; CHECK: SU(8): {{.*}} VADDv4i32
; CHECK-NEXT: # preds left
; CHECK-NEXT: # succs left
@@ -13,7 +13,7 @@ define <4 x i32> @addv_i32(<4 x i32>, <4 x i32>) {
ret <4 x i32> %3
}
-; CHECK-LABEL: addv_f32:BB#0
+; CHECK-LABEL: addv_f32:%bb.0
; CHECK: SU(8): {{.*}} VADDfq
; CHECK-NEXT: # preds left
; CHECK-NEXT: # succs left
diff --git a/test/CodeGen/ARM/cortex-a57-misched-vfma.ll b/test/CodeGen/ARM/cortex-a57-misched-vfma.ll
index 372b2e2f5dc..a3e07ba17b9 100644
--- a/test/CodeGen/ARM/cortex-a57-misched-vfma.ll
+++ b/test/CodeGen/ARM/cortex-a57-misched-vfma.ll
@@ -5,7 +5,7 @@
define float @Test1(float %f1, float %f2, float %f3, float %f4, float %f5, float %f6) {
; CHECK: ********** MI Scheduling **********
-; CHECK: Test1:BB#0
+; CHECK: Test1:%bb.0
; CHECK: VMULS
; > VMULS common latency = 5
@@ -44,7 +44,7 @@ define float @Test1(float %f1, float %f2, float %f3, float %f4, float %f5, float
; ASIMD form
define <2 x float> @Test2(<2 x float> %f1, <2 x float> %f2, <2 x float> %f3, <2 x float> %f4, <2 x float> %f5, <2 x float> %f6) {
; CHECK: ********** MI Scheduling **********
-; CHECK: Test2:BB#0
+; CHECK: Test2:%bb.0
; CHECK: VMULfd
; > VMULfd common latency = 5
@@ -82,7 +82,7 @@ define <2 x float> @Test2(<2 x float> %f1, <2 x float> %f2, <2 x float> %f3, <2
define float @Test3(float %f1, float %f2, float %f3, float %f4, float %f5, float %f6) {
; CHECK: ********** MI Scheduling **********
-; CHECK: Test3:BB#0
+; CHECK: Test3:%bb.0
; CHECK: VMULS
; > VMULS common latency = 5
@@ -121,7 +121,7 @@ define float @Test3(float %f1, float %f2, float %f3, float %f4, float %f5, float
; ASIMD form
define <2 x float> @Test4(<2 x float> %f1, <2 x float> %f2, <2 x float> %f3, <2 x float> %f4, <2 x float> %f5, <2 x float> %f6) {
; CHECK: ********** MI Scheduling **********
-; CHECK: Test4:BB#0
+; CHECK: Test4:%bb.0
; CHECK: VMULfd
; > VMULfd common latency = 5
@@ -159,7 +159,7 @@ define <2 x float> @Test4(<2 x float> %f1, <2 x float> %f2, <2 x float> %f3, <2
define float @Test5(float %f1, float %f2, float %f3) {
; CHECK: ********** MI Scheduling **********
-; CHECK: Test5:BB#0
+; CHECK: Test5:%bb.0
; CHECK-DEFAULT: VNMLS
; CHECK-FAST: VFNMS
@@ -178,7 +178,7 @@ define float @Test5(float %f1, float %f2, float %f3) {
define float @Test6(float %f1, float %f2, float %f3) {
; CHECK: ********** MI Scheduling **********
-; CHECK: Test6:BB#0
+; CHECK: Test6:%bb.0
; CHECK-DEFAULT: VNMLA
; CHECK-FAST: VFNMA
diff --git a/test/CodeGen/ARM/cortex-a57-misched-vsub.ll b/test/CodeGen/ARM/cortex-a57-misched-vsub.ll
index c3c445d3f0e..fe14c861f8e 100644
--- a/test/CodeGen/ARM/cortex-a57-misched-vsub.ll
+++ b/test/CodeGen/ARM/cortex-a57-misched-vsub.ll
@@ -1,7 +1,7 @@
; REQUIRES: asserts
; RUN: llc < %s -mtriple=armv8r-eabi -mcpu=cortex-a57 -misched-postra -enable-misched -verify-misched -debug-only=machine-scheduler -o - 2>&1 > /dev/null | FileCheck %s
-; CHECK-LABEL: subv_i32:BB#0
+; CHECK-LABEL: subv_i32:%bb.0
; CHECK: SU(8): {{.*}} VSUBv4i32
; CHECK-NEXT: # preds left
; CHECK-NEXT: # succs left
@@ -13,7 +13,7 @@ define <4 x i32> @subv_i32(<4 x i32>, <4 x i32>) {
ret <4 x i32> %3
}
-; CHECK-LABEL: subv_f32:BB#0
+; CHECK-LABEL: subv_f32:%bb.0
; CHECK: SU(8): {{.*}} VSUBfq
; CHECK-NEXT: # preds left
; CHECK-NEXT: # succs left
diff --git a/test/CodeGen/ARM/cortexr52-misched-basic.ll b/test/CodeGen/ARM/cortexr52-misched-basic.ll
index 614157eb0e1..0edc6653a03 100644
--- a/test/CodeGen/ARM/cortexr52-misched-basic.ll
+++ b/test/CodeGen/ARM/cortexr52-misched-basic.ll
@@ -7,7 +7,7 @@
; as div takes more cycles to compute than eor.
;
; CHECK: ********** MI Scheduling **********
-; CHECK: foo:BB#0 entry
+; CHECK: foo:%bb.0 entry
; CHECK: EORrr
; GENERIC: Latency : 1
; R52_SCHED: Latency : 3
@@ -17,7 +17,7 @@
; CHECK: SDIV
; GENERIC: Latency : 0
; R52_SCHED: Latency : 8
-; CHECK: ** Final schedule for BB#0 ***
+; CHECK: ** Final schedule for %bb.0 ***
; GENERIC: EORrr
; GENERIC: SDIV
; R52_SCHED: SDIV
diff --git a/test/CodeGen/ARM/crash-on-pow2-shufflevector.ll b/test/CodeGen/ARM/crash-on-pow2-shufflevector.ll
index 8395674e880..4f6055dee62 100644
--- a/test/CodeGen/ARM/crash-on-pow2-shufflevector.ll
+++ b/test/CodeGen/ARM/crash-on-pow2-shufflevector.ll
@@ -6,7 +6,7 @@
define i32 @foo(%struct.desc* %descs, i32 %num, i32 %cw) local_unnamed_addr #0 {
; CHECK-LABEL: foo:
-; CHECK: @ BB#0: @ %entry
+; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: mov r1, #32
; CHECK-NEXT: vld1.32 {d16, d17}, [r0], r1
; CHECK-NEXT: vld1.32 {d18, d19}, [r0]
diff --git a/test/CodeGen/ARM/deprecated-asm.s b/test/CodeGen/ARM/deprecated-asm.s
index 7318e6a68c5..465da40c1c1 100644
--- a/test/CodeGen/ARM/deprecated-asm.s
+++ b/test/CodeGen/ARM/deprecated-asm.s
@@ -25,7 +25,7 @@
.type foo,%function
foo: @ @foo
.fnstart
-@ BB#0: @ %entry
+@ %bb.0: @ %entry
mov r0, #0
bx lr
stmia r4!, {r12-r14}
diff --git a/test/CodeGen/ARM/ifcvt-branch-weight-bug.ll b/test/CodeGen/ARM/ifcvt-branch-weight-bug.ll
index 1c8142e5ddd..b69f121d10c 100644
--- a/test/CodeGen/ARM/ifcvt-branch-weight-bug.ll
+++ b/test/CodeGen/ARM/ifcvt-branch-weight-bug.ll
@@ -21,8 +21,8 @@ entry:
; Afer if conversion, we have
; for.body -> for.cond.backedge (100%)
; -> cond.false.i (0%)
-; CHECK: BB#1: derived from LLVM BB %for.body
-; CHECK: Successors according to CFG: BB#2(0x80000000 / 0x80000000 = 100.00%) BB#4(0x00000001 / 0x80000000 = 0.00%)
+; CHECK: %bb.1: derived from LLVM BB %for.body
+; CHECK: Successors according to CFG: %bb.2(0x80000000 / 0x80000000 = 100.00%) %bb.4(0x00000001 / 0x80000000 = 0.00%)
for.body:
br i1 undef, label %for.cond.backedge, label %lor.lhs.false.i, !prof !1
diff --git a/test/CodeGen/ARM/ifcvt-branch-weight.ll b/test/CodeGen/ARM/ifcvt-branch-weight.ll
index 5c39d63fda1..6f6f8bc1834 100644
--- a/test/CodeGen/ARM/ifcvt-branch-weight.ll
+++ b/test/CodeGen/ARM/ifcvt-branch-weight.ll
@@ -18,8 +18,8 @@ bb:
%9 = icmp eq i32 %8, 0
br i1 %9, label %return, label %bb2
-; CHECK: BB#2: derived from LLVM BB %bb2
-; CHECK: Successors according to CFG: BB#4({{[0-9a-fx/= ]+}}50.00%) BB#3({{[0-9a-fx/= ]+}}50.00%)
+; CHECK: %bb.2: derived from LLVM BB %bb2
+; CHECK: Successors according to CFG: %bb.4({{[0-9a-fx/= ]+}}50.00%) %bb.3({{[0-9a-fx/= ]+}}50.00%)
bb2:
%v10 = icmp eq i32 %3, 16
diff --git a/test/CodeGen/ARM/ifcvt-iter-indbr.ll b/test/CodeGen/ARM/ifcvt-iter-indbr.ll
index 73496257306..ccc6ded49f1 100644
--- a/test/CodeGen/ARM/ifcvt-iter-indbr.ll
+++ b/test/CodeGen/ARM/ifcvt-iter-indbr.ll
@@ -30,10 +30,10 @@ declare i8* @bar(i32, i8*, i8*)
; CHECK-NEXT: [[FOOCALL]]:
; CHECK-NEXT: bl _foo
;
-; CHECK-PROB: BB#0:
-; CHECK-PROB: Successors according to CFG: BB#1({{[0-9a-fx/= ]+}}50.00%) BB#3({{[0-9a-fx/= ]+}}25.00%) BB#5({{[0-9a-fx/= ]+}}25.00%)
-; CHECK-PROB: BB#2:
-; CHECK-PROB: Successors according to CFG: BB#3({{[0-9a-fx/= ]+}}50.00%) BB#5({{[0-9a-fx/= ]+}}50.00%)
+; CHECK-PROB: %bb.0:
+; CHECK-PROB: Successors according to CFG: %bb.1({{[0-9a-fx/= ]+}}50.00%) %bb.3({{[0-9a-fx/= ]+}}25.00%) %bb.5({{[0-9a-fx/= ]+}}25.00%)
+; CHECK-PROB: %bb.2:
+; CHECK-PROB: Successors according to CFG: %bb.3({{[0-9a-fx/= ]+}}50.00%) %bb.5({{[0-9a-fx/= ]+}}50.00%)
define i32 @test(i32 %a, i32 %a2, i32* %p, i32* %p2) "no-frame-pointer-elim"="true" {
entry:
diff --git a/test/CodeGen/ARM/illegal-bitfield-loadstore.ll b/test/CodeGen/ARM/illegal-bitfield-loadstore.ll
index 6d62fd31f97..6f1e18ffdfc 100644
--- a/test/CodeGen/ARM/illegal-bitfield-loadstore.ll
+++ b/test/CodeGen/ARM/illegal-bitfield-loadstore.ll
@@ -4,14 +4,14 @@
define void @i24_or(i24* %a) {
; LE-LABEL: i24_or:
-; LE: @ BB#0:
+; LE: @ %bb.0:
; LE-NEXT: ldrh r1, [r0]
; LE-NEXT: orr r1, r1, #384
; LE-NEXT: strh r1, [r0]
; LE-NEXT: mov pc, lr
;
; BE-LABEL: i24_or:
-; BE: @ BB#0:
+; BE: @ %bb.0:
; BE-NEXT: ldrh r1, [r0]
; BE-NEXT: ldrb r2, [r0, #2]
; BE-NEXT: orr r1, r2, r1, lsl #8
@@ -28,7 +28,7 @@ define void @i24_or(i24* %a) {
define void @i24_and_or(i24* %a) {
; LE-LABEL: i24_and_or:
-; LE: @ BB#0:
+; LE: @ %bb.0:
; LE-NEXT: ldrh r1, [r0]
; LE-NEXT: mov r2, #16256
; LE-NEXT: orr r2, r2, #49152
@@ -38,7 +38,7 @@ define void @i24_and_or(i24* %a) {
; LE-NEXT: mov pc, lr
;
; BE-LABEL: i24_and_or:
-; BE: @ BB#0:
+; BE: @ %bb.0:
; BE-NEXT: mov r1, #128
; BE-NEXT: strb r1, [r0, #2]
; BE-NEXT: ldrh r1, [r0]
@@ -54,7 +54,7 @@ define void @i24_and_or(i24* %a) {
define void @i24_insert_bit(i24* %a, i1 zeroext %bit) {
; LE-LABEL: i24_insert_bit:
-; LE: @ BB#0:
+; LE: @ %bb.0:
; LE-NEXT: mov r3, #255
; LE-NEXT: ldrh r2, [r0]
; LE-NEXT: orr r3, r3, #57088
@@ -64,7 +64,7 @@ define void @i24_insert_bit(i24* %a, i1 zeroext %bit) {
; LE-NEXT: mov pc, lr
;
; BE-LABEL: i24_insert_bit:
-; BE: @ BB#0:
+; BE: @ %bb.0:
; BE-NEXT: ldrh r2, [r0]
; BE-NEXT: mov r3, #57088
; BE-NEXT: orr r3, r3, #16711680
@@ -84,14 +84,14 @@ define void @i24_insert_bit(i24* %a, i1 zeroext %bit) {
define void @i56_or(i56* %a) {
; LE-LABEL: i56_or:
-; LE: @ BB#0:
+; LE: @ %bb.0:
; LE-NEXT: ldr r1, [r0]
; LE-NEXT: orr r1, r1, #384
; LE-NEXT: str r1, [r0]
; LE-NEXT: mov pc, lr
;
; BE-LABEL: i56_or:
-; BE: @ BB#0:
+; BE: @ %bb.0:
; BE-NEXT: mov r1, r0
; BE-NEXT: ldr r12, [r0]
; BE-NEXT: ldrh r2, [r1, #4]!
@@ -114,7 +114,7 @@ define void @i56_or(i56* %a) {
define void @i56_and_or(i56* %a) {
; LE-LABEL: i56_and_or:
-; LE: @ BB#0:
+; LE: @ %bb.0:
; LE-NEXT: ldr r1, [r0]
; LE-NEXT: orr r1, r1, #384
; LE-NEXT: bic r1, r1, #127
@@ -122,7 +122,7 @@ define void @i56_and_or(i56* %a) {
; LE-NEXT: mov pc, lr
;
; BE-LABEL: i56_and_or:
-; BE: @ BB#0:
+; BE: @ %bb.0:
; BE-NEXT: mov r1, r0
; BE-NEXT: ldr r12, [r0]
; BE-NEXT: ldrh r2, [r1, #4]!
@@ -147,7 +147,7 @@ define void @i56_and_or(i56* %a) {
define void @i56_insert_bit(i56* %a, i1 zeroext %bit) {
; LE-LABEL: i56_insert_bit:
-; LE: @ BB#0:
+; LE: @ %bb.0:
; LE-NEXT: ldr r2, [r0]
; LE-NEXT: bic r2, r2, #8192
; LE-NEXT: orr r1, r2, r1, lsl #13
@@ -155,7 +155,7 @@ define void @i56_insert_bit(i56* %a, i1 zeroext %bit) {
; LE-NEXT: mov pc, lr
;
; BE-LABEL: i56_insert_bit:
-; BE: @ BB#0:
+; BE: @ %bb.0:
; BE-NEXT: .save {r11, lr}
; BE-NEXT: push {r11, lr}
; BE-NEXT: mov r2, r0
diff --git a/test/CodeGen/ARM/jump-table-tbh.ll b/test/CodeGen/ARM/jump-table-tbh.ll
index b3ee68ea075..ab2c579e514 100644
--- a/test/CodeGen/ARM/jump-table-tbh.ll
+++ b/test/CodeGen/ARM/jump-table-tbh.ll
@@ -10,7 +10,7 @@ define i32 @test_tbh(i1 %tst, i32 %sw, i32 %l) {
; T2-LABEL: test_tbh:
; T2: [[ANCHOR:.LCPI[0-9_]+]]:
; T2: tbh [pc, r{{[0-9]+}}, lsl #1]
-; T2-NEXT: @ BB#{{[0-9]+}}
+; T2-NEXT: @ %bb.{{[0-9]+}}
; T2-NEXT: LJTI
; T2-NEXT: .short (.LBB0_[[x:[0-9]+]]-([[ANCHOR]]+4))/2
; T2-NEXT: .short (.LBB0_{{[0-9]+}}-([[ANCHOR]]+4))/2
@@ -24,7 +24,7 @@ define i32 @test_tbh(i1 %tst, i32 %sw, i32 %l) {
; T1: lsls [[x]], [[x]], #1
; T1: [[ANCHOR:.LCPI[0-9_]+]]:
; T1: add pc, [[x]]
-; T1-NEXT: @ BB#2
+; T1-NEXT: @ %bb.2
; T1-NEXT: .p2align 2
; T1-NEXT: LJTI
; T1-NEXT: .short (.LBB0_[[x:[0-9]+]]-([[ANCHOR]]+4))/2
diff --git a/test/CodeGen/ARM/machine-licm.ll b/test/CodeGen/ARM/machine-licm.ll
index a1eec78e453..9ed1a57616c 100644
--- a/test/CodeGen/ARM/machine-licm.ll
+++ b/test/CodeGen/ARM/machine-licm.ll
@@ -31,7 +31,7 @@ bb.nph: ; preds = %entry
; ARM-NOT: LCPI0_1:
; ARM: .section
-; THUMB: BB#1
+; THUMB: %bb.1
; THUMB: ldr r2, LCPI0_0
; THUMB: add r2, pc
; THUMB: ldr r{{[0-9]+}}, [r2]
diff --git a/test/CodeGen/ARM/misched-copy-arm.ll b/test/CodeGen/ARM/misched-copy-arm.ll
index bc20939d0f7..ae0b127a6f8 100644
--- a/test/CodeGen/ARM/misched-copy-arm.ll
+++ b/test/CodeGen/ARM/misched-copy-arm.ll
@@ -4,7 +4,7 @@
; Loop counter copies should be eliminated.
; There is also a MUL here, but we don't care where it is scheduled.
; CHECK: postinc
-; CHECK: *** Final schedule for BB#2 ***
+; CHECK: *** Final schedule for %bb.2 ***
; CHECK: t2LDRs
; CHECK: t2ADDrr
; CHECK: t2CMPrr
@@ -32,7 +32,7 @@ for.end: ; preds = %for.body, %entry
; This case was a crasher in constrainLocalCopy.
; The problem was the t2LDR_PRE defining both the global and local lrg.
-; CHECK-LABEL: *** Final schedule for BB#5 ***
+; CHECK-LABEL: *** Final schedule for %bb.5 ***
; CHECK: %[[R4:[0-9]+]]<def>, %[[R1:[0-9]+]]<def,tied2> = t2LDR_PRE %[[R1]]<tied1>
; CHECK: %{{[0-9]+}}<def> = COPY %[[R1]]
; CHECK: %{{[0-9]+}}<def> = COPY %[[R4]]
diff --git a/test/CodeGen/ARM/negate-i1.ll b/test/CodeGen/ARM/negate-i1.ll
index 0503763e674..493b26a5a84 100644
--- a/test/CodeGen/ARM/negate-i1.ll
+++ b/test/CodeGen/ARM/negate-i1.ll
@@ -4,7 +4,7 @@
define i32 @select_i32_neg1_or_0(i1 %a) {
; CHECK-LABEL: select_i32_neg1_or_0:
-; CHECK-NEXT: @ BB#0:
+; CHECK-NEXT: @ %bb.0:
; CHECK-NEXT: and r0, r0, #1
; CHECK-NEXT: rsb r0, r0, #0
; CHECK-NEXT: mov pc, lr
@@ -15,7 +15,7 @@ define i32 @select_i32_neg1_or_0(i1 %a) {
define i32 @select_i32_neg1_or_0_zeroext(i1 zeroext %a) {
; CHECK-LABEL: select_i32_neg1_or_0_zeroext:
-; CHECK-NEXT: @ BB#0:
+; CHECK-NEXT: @ %bb.0:
; CHECK-NEXT: rsb r0, r0, #0
; CHECK-NEXT: mov pc, lr
;
diff --git a/test/CodeGen/ARM/neon_vabs.ll b/test/CodeGen/ARM/neon_vabs.ll
index 109d09582af..4064aae65f6 100644
--- a/test/CodeGen/ARM/neon_vabs.ll
+++ b/test/CodeGen/ARM/neon_vabs.ll
@@ -3,7 +3,7 @@
define <4 x i32> @test1(<4 x i32> %a) nounwind {
; CHECK-LABEL: test1:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vmov d17, r2, r3
; CHECK-NEXT: vmov d16, r0, r1
; CHECK-NEXT: vabs.s32 q8, q8
@@ -18,7 +18,7 @@ define <4 x i32> @test1(<4 x i32> %a) nounwind {
define <4 x i32> @test2(<4 x i32> %a) nounwind {
; CHECK-LABEL: test2:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vmov d17, r2, r3
; CHECK-NEXT: vmov d16, r0, r1
; CHECK-NEXT: vabs.s32 q8, q8
@@ -33,7 +33,7 @@ define <4 x i32> @test2(<4 x i32> %a) nounwind {
define <8 x i16> @test3(<8 x i16> %a) nounwind {
; CHECK-LABEL: test3:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vmov d17, r2, r3
; CHECK-NEXT: vmov d16, r0, r1
; CHECK-NEXT: vabs.s16 q8, q8
@@ -48,7 +48,7 @@ define <8 x i16> @test3(<8 x i16> %a) nounwind {
define <16 x i8> @test4(<16 x i8> %a) nounwind {
; CHECK-LABEL: test4:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vmov d17, r2, r3
; CHECK-NEXT: vmov d16, r0, r1
; CHECK-NEXT: vabs.s8 q8, q8
@@ -63,7 +63,7 @@ define <16 x i8> @test4(<16 x i8> %a) nounwind {
define <4 x i32> @test5(<4 x i32> %a) nounwind {
; CHECK-LABEL: test5:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vmov d17, r2, r3
; CHECK-NEXT: vmov d16, r0, r1
; CHECK-NEXT: vabs.s32 q8, q8
@@ -78,7 +78,7 @@ define <4 x i32> @test5(<4 x i32> %a) nounwind {
define <2 x i32> @test6(<2 x i32> %a) nounwind {
; CHECK-LABEL: test6:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vmov d16, r0, r1
; CHECK-NEXT: vabs.s32 d16, d16
; CHECK-NEXT: vmov r0, r1, d16
@@ -91,7 +91,7 @@ define <2 x i32> @test6(<2 x i32> %a) nounwind {
define <2 x i32> @test7(<2 x i32> %a) nounwind {
; CHECK-LABEL: test7:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vmov d16, r0, r1
; CHECK-NEXT: vabs.s32 d16, d16
; CHECK-NEXT: vmov r0, r1, d16
@@ -104,7 +104,7 @@ define <2 x i32> @test7(<2 x i32> %a) nounwind {
define <4 x i16> @test8(<4 x i16> %a) nounwind {
; CHECK-LABEL: test8:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vmov d16, r0, r1
; CHECK-NEXT: vabs.s16 d16, d16
; CHECK-NEXT: vmov r0, r1, d16
@@ -117,7 +117,7 @@ define <4 x i16> @test8(<4 x i16> %a) nounwind {
define <8 x i8> @test9(<8 x i8> %a) nounwind {
; CHECK-LABEL: test9:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vmov d16, r0, r1
; CHECK-NEXT: vabs.s8 d16, d16
; CHECK-NEXT: vmov r0, r1, d16
@@ -130,7 +130,7 @@ define <8 x i8> @test9(<8 x i8> %a) nounwind {
define <2 x i32> @test10(<2 x i32> %a) nounwind {
; CHECK-LABEL: test10:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vmov d16, r0, r1
; CHECK-NEXT: vabs.s32 d16, d16
; CHECK-NEXT: vmov r0, r1, d16
@@ -146,7 +146,7 @@ define <2 x i32> @test10(<2 x i32> %a) nounwind {
define <4 x i32> @test11(<4 x i16> %a, <4 x i16> %b) nounwind {
; CHECK-LABEL: test11:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vmov d16, r2, r3
; CHECK-NEXT: vmov d17, r0, r1
; CHECK-NEXT: vabdl.u16 q8, d17, d16
@@ -163,7 +163,7 @@ define <4 x i32> @test11(<4 x i16> %a, <4 x i16> %b) nounwind {
}
define <8 x i16> @test12(<8 x i8> %a, <8 x i8> %b) nounwind {
; CHECK-LABEL: test12:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vmov d16, r2, r3
; CHECK-NEXT: vmov d17, r0, r1
; CHECK-NEXT: vabdl.u8 q8, d17, d16
@@ -181,7 +181,7 @@ define <8 x i16> @test12(<8 x i8> %a, <8 x i8> %b) nounwind {
define <2 x i64> @test13(<2 x i32> %a, <2 x i32> %b) nounwind {
; CHECK-LABEL: test13:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vmov d16, r2, r3
; CHECK-NEXT: vmov d17, r0, r1
; CHECK-NEXT: vabdl.u32 q8, d17, d16
diff --git a/test/CodeGen/ARM/nest-register.ll b/test/CodeGen/ARM/nest-register.ll
index 6b8c3dc47db..ac7afe0007c 100644
--- a/test/CodeGen/ARM/nest-register.ll
+++ b/test/CodeGen/ARM/nest-register.ll
@@ -5,7 +5,7 @@
define i8* @nest_receiver(i8* nest %arg) nounwind {
; CHECK-LABEL: nest_receiver:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: mov r0, r12
; CHECK-NEXT: mov pc, lr
ret i8* %arg
diff --git a/test/CodeGen/ARM/noopt-dmb-v7.ll b/test/CodeGen/ARM/noopt-dmb-v7.ll
index 56a29c8a17e..86b27600eb4 100644
--- a/test/CodeGen/ARM/noopt-dmb-v7.ll
+++ b/test/CodeGen/ARM/noopt-dmb-v7.ll
@@ -9,7 +9,7 @@ entry:
ret i32 0
}
-; CHECK: @ BB#0: @ %entry
+; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: dmb ish
; CHECK-NEXT: dmb ish
; CHECK-NEXT: dmb ish
diff --git a/test/CodeGen/ARM/select_const.ll b/test/CodeGen/ARM/select_const.ll
index 23de9c35a5b..7cce0b08203 100644
--- a/test/CodeGen/ARM/select_const.ll
+++ b/test/CodeGen/ARM/select_const.ll
@@ -8,7 +8,7 @@
define i32 @select_0_or_1(i1 %cond) {
; CHECK-LABEL: select_0_or_1:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: mov r1, #1
; CHECK-NEXT: bic r0, r1, r0
; CHECK-NEXT: mov pc, lr
@@ -18,7 +18,7 @@ define i32 @select_0_or_1(i1 %cond) {
define i32 @select_0_or_1_zeroext(i1 zeroext %cond) {
; CHECK-LABEL: select_0_or_1_zeroext:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: eor r0, r0, #1
; CHECK-NEXT: mov pc, lr
%sel = select i1 %cond, i32 0, i32 1
@@ -27,7 +27,7 @@ define i32 @select_0_or_1_zeroext(i1 zeroext %cond) {
define i32 @select_0_or_1_signext(i1 signext %cond) {
; CHECK-LABEL: select_0_or_1_signext:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: mov r1, #1
; CHECK-NEXT: bic r0, r1, r0
; CHECK-NEXT: mov pc, lr
@@ -39,7 +39,7 @@ define i32 @select_0_or_1_signext(i1 signext %cond) {
define i32 @select_1_or_0(i1 %cond) {
; CHECK-LABEL: select_1_or_0:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: and r0, r0, #1
; CHECK-NEXT: mov pc, lr
%sel = select i1 %cond, i32 1, i32 0
@@ -48,7 +48,7 @@ define i32 @select_1_or_0(i1 %cond) {
define i32 @select_1_or_0_zeroext(i1 zeroext %cond) {
; CHECK-LABEL: select_1_or_0_zeroext:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: mov pc, lr
%sel = select i1 %cond, i32 1, i32 0
ret i32 %sel
@@ -56,7 +56,7 @@ define i32 @select_1_or_0_zeroext(i1 zeroext %cond) {
define i32 @select_1_or_0_signext(i1 signext %cond) {
; CHECK-LABEL: select_1_or_0_signext:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: and r0, r0, #1
; CHECK-NEXT: mov pc, lr
%sel = select i1 %cond, i32 1, i32 0
@@ -67,7 +67,7 @@ define i32 @select_1_or_0_signext(i1 signext %cond) {
define i32 @select_0_or_neg1(i1 %cond) {
; CHECK-LABEL: select_0_or_neg1:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: mov r1, #1
; CHECK-NEXT: bic r0, r1, r0
; CHECK-NEXT: rsb r0, r0, #0
@@ -78,7 +78,7 @@ define i32 @select_0_or_neg1(i1 %cond) {
define i32 @select_0_or_neg1_zeroext(i1 zeroext %cond) {
; CHECK-LABEL: select_0_or_neg1_zeroext:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: eor r0, r0, #1
; CHECK-NEXT: rsb r0, r0, #0
; CHECK-NEXT: mov pc, lr
@@ -88,7 +88,7 @@ define i32 @select_0_or_neg1_zeroext(i1 zeroext %cond) {
define i32 @select_0_or_neg1_signext(i1 signext %cond) {
; CHECK-LABEL: select_0_or_neg1_signext:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: mvn r0, r0
; CHECK-NEXT: mov pc, lr
%sel = select i1 %cond, i32 0, i32 -1
@@ -97,7 +97,7 @@ define i32 @select_0_or_neg1_signext(i1 signext %cond) {
define i32 @select_0_or_neg1_alt(i1 %cond) {
; CHECK-LABEL: select_0_or_neg1_alt:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: and r0, r0, #1
; CHECK-NEXT: sub r0, r0, #1
; CHECK-NEXT: mov pc, lr
@@ -108,7 +108,7 @@ define i32 @select_0_or_neg1_alt(i1 %cond) {
define i32 @select_0_or_neg1_alt_zeroext(i1 zeroext %cond) {
; CHECK-LABEL: select_0_or_neg1_alt_zeroext:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: sub r0, r0, #1
; CHECK-NEXT: mov pc, lr
%z = zext i1 %cond to i32
@@ -118,7 +118,7 @@ define i32 @select_0_or_neg1_alt_zeroext(i1 zeroext %cond) {
define i32 @select_0_or_neg1_alt_signext(i1 signext %cond) {
; CHECK-LABEL: select_0_or_neg1_alt_signext:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: mvn r0, r0
; CHECK-NEXT: mov pc, lr
%z = zext i1 %cond to i32
@@ -130,7 +130,7 @@ define i32 @select_0_or_neg1_alt_signext(i1 signext %cond) {
define i32 @select_neg1_or_0(i1 %cond) {
; CHECK-LABEL: select_neg1_or_0:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: and r0, r0, #1
; CHECK-NEXT: rsb r0, r0, #0
; CHECK-NEXT: mov pc, lr
@@ -140,7 +140,7 @@ define i32 @select_neg1_or_0(i1 %cond) {
define i32 @select_neg1_or_0_zeroext(i1 zeroext %cond) {
; CHECK-LABEL: select_neg1_or_0_zeroext:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: rsb r0, r0, #0
; CHECK-NEXT: mov pc, lr
%sel = select i1 %cond, i32 -1, i32 0
@@ -149,7 +149,7 @@ define i32 @select_neg1_or_0_zeroext(i1 zeroext %cond) {
define i32 @select_neg1_or_0_signext(i1 signext %cond) {
; CHECK-LABEL: select_neg1_or_0_signext:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: mov pc, lr
%sel = select i1 %cond, i32 -1, i32 0
ret i32 %sel
@@ -159,7 +159,7 @@ define i32 @select_neg1_or_0_signext(i1 signext %cond) {
define i32 @select_Cplus1_C(i1 %cond) {
; CHECK-LABEL: select_Cplus1_C:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: mov r1, #41
; CHECK-NEXT: tst r0, #1
; CHECK-NEXT: movne r1, #42
@@ -171,7 +171,7 @@ define i32 @select_Cplus1_C(i1 %cond) {
define i32 @select_Cplus1_C_zeroext(i1 zeroext %cond) {
; CHECK-LABEL: select_Cplus1_C_zeroext:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: mov r1, #41
; CHECK-NEXT: cmp r0, #0
; CHECK-NEXT: movne r1, #42
@@ -183,7 +183,7 @@ define i32 @select_Cplus1_C_zeroext(i1 zeroext %cond) {
define i32 @select_Cplus1_C_signext(i1 signext %cond) {
; CHECK-LABEL: select_Cplus1_C_signext:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: mov r1, #41
; CHECK-NEXT: tst r0, #1
; CHECK-NEXT: movne r1, #42
@@ -197,7 +197,7 @@ define i32 @select_Cplus1_C_signext(i1 signext %cond) {
define i32 @select_C_Cplus1(i1 %cond) {
; CHECK-LABEL: select_C_Cplus1:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: mov r1, #42
; CHECK-NEXT: tst r0, #1
; CHECK-NEXT: movne r1, #41
@@ -209,7 +209,7 @@ define i32 @select_C_Cplus1(i1 %cond) {
define i32 @select_C_Cplus1_zeroext(i1 zeroext %cond) {
; CHECK-LABEL: select_C_Cplus1_zeroext:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: mov r1, #42
; CHECK-NEXT: cmp r0, #0
; CHECK-NEXT: movne r1, #41
@@ -221,7 +221,7 @@ define i32 @select_C_Cplus1_zeroext(i1 zeroext %cond) {
define i32 @select_C_Cplus1_signext(i1 signext %cond) {
; CHECK-LABEL: select_C_Cplus1_signext:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: mov r1, #42
; CHECK-NEXT: tst r0, #1
; CHECK-NEXT: movne r1, #41
@@ -236,7 +236,7 @@ define i32 @select_C_Cplus1_signext(i1 signext %cond) {
define i32 @select_C1_C2(i1 %cond) {
; CHECK-LABEL: select_C1_C2:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: mov r1, #165
; CHECK-NEXT: tst r0, #1
; CHECK-NEXT: orr r1, r1, #256
@@ -249,7 +249,7 @@ define i32 @select_C1_C2(i1 %cond) {
define i32 @select_C1_C2_zeroext(i1 zeroext %cond) {
; CHECK-LABEL: select_C1_C2_zeroext:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: mov r1, #165
; CHECK-NEXT: cmp r0, #0
; CHECK-NEXT: orr r1, r1, #256
@@ -262,7 +262,7 @@ define i32 @select_C1_C2_zeroext(i1 zeroext %cond) {
define i32 @select_C1_C2_signext(i1 signext %cond) {
; CHECK-LABEL: select_C1_C2_signext:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: mov r1, #165
; CHECK-NEXT: tst r0, #1
; CHECK-NEXT: orr r1, r1, #256
@@ -278,7 +278,7 @@ define i32 @select_C1_C2_signext(i1 signext %cond) {
define i64 @opaque_constant1(i1 %cond, i64 %x) {
; CHECK-LABEL: opaque_constant1:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: .save {r4, lr}
; CHECK-NEXT: push {r4, lr}
; CHECK-NEXT: mov lr, #1
@@ -310,7 +310,7 @@ define i64 @opaque_constant1(i1 %cond, i64 %x) {
define i64 @opaque_constant2(i1 %cond, i64 %x) {
; CHECK-LABEL: opaque_constant2:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: mov r1, #1
; CHECK-NEXT: tst r0, #1
; CHECK-NEXT: orr r1, r1, #65536
diff --git a/test/CodeGen/ARM/setcc-logic.ll b/test/CodeGen/ARM/setcc-logic.ll
index 79bae1facb3..c48636dffa7 100644
--- a/test/CodeGen/ARM/setcc-logic.ll
+++ b/test/CodeGen/ARM/setcc-logic.ll
@@ -3,7 +3,7 @@
define zeroext i1 @ne_neg1_and_ne_zero(i32 %x) nounwind {
; CHECK-LABEL: ne_neg1_and_ne_zero:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: add r1, r0, #1
; CHECK-NEXT: mov r0, #0
; CHECK-NEXT: cmp r1, #1
@@ -19,7 +19,7 @@ define zeroext i1 @ne_neg1_and_ne_zero(i32 %x) nounwind {
define zeroext i1 @and_eq(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
; CHECK-LABEL: and_eq:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: eor r2, r2, r3
; CHECK-NEXT: eor r0, r0, r1
; CHECK-NEXT: orrs r0, r0, r2
@@ -34,7 +34,7 @@ define zeroext i1 @and_eq(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
define zeroext i1 @or_ne(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
; CHECK-LABEL: or_ne:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: eor r2, r2, r3
; CHECK-NEXT: eor r0, r0, r1
; CHECK-NEXT: orrs r0, r0, r2
@@ -48,7 +48,7 @@ define zeroext i1 @or_ne(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
define <4 x i1> @and_eq_vec(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) nounwind {
; CHECK-LABEL: and_eq_vec:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: .save {r11, lr}
; CHECK-NEXT: push {r11, lr}
; CHECK-NEXT: vmov d19, r2, r3
diff --git a/test/CodeGen/ARM/tail-merge-branch-weight.ll b/test/CodeGen/ARM/tail-merge-branch-weight.ll
index f83f2881579..f03906b6bf5 100644
--- a/test/CodeGen/ARM/tail-merge-branch-weight.ll
+++ b/test/CodeGen/ARM/tail-merge-branch-weight.ll
@@ -9,9 +9,9 @@
; = 0.2 * 0.4 + 0.8 * 0.7 = 0.64
; CHECK: # Machine code for function test0:
-; CHECK: Successors according to CFG: BB#{{[0-9]+}}({{[0-9a-fx/= ]+}}20.00%) BB#{{[0-9]+}}({{[0-9a-fx/= ]+}}80.00%)
-; CHECK: BB#{{[0-9]+}}:
-; CHECK: BB#{{[0-9]+}}:
+; CHECK: Successors according to CFG: %bb.{{[0-9]+}}({{[0-9a-fx/= ]+}}20.00%) %bb.{{[0-9]+}}({{[0-9a-fx/= ]+}}80.00%)
+; CHECK: %bb.{{[0-9]+}}:
+; CHECK: %bb.{{[0-9]+}}:
; CHECK: # End machine code for function test0.
define i32 @test0(i32 %n, i32 %m, i32* nocapture %a, i32* nocapture %b) {
diff --git a/test/CodeGen/ARM/taildup-branch-weight.ll b/test/CodeGen/ARM/taildup-branch-weight.ll
index 6f8d245e74a..5b7ba0ae51b 100644
--- a/test/CodeGen/ARM/taildup-branch-weight.ll
+++ b/test/CodeGen/ARM/taildup-branch-weight.ll
@@ -3,7 +3,7 @@
; RUN: | FileCheck %s
; CHECK: Machine code for function test0:
-; CHECK: Successors according to CFG: BB#1({{[0-9a-fx/= ]+}}3.12%) BB#2({{[0-9a-fx/= ]+}}96.88%)
+; CHECK: Successors according to CFG: %bb.1({{[0-9a-fx/= ]+}}3.12%) %bb.2({{[0-9a-fx/= ]+}}96.88%)
define void @test0(i32 %a, i32 %b, i32* %c, i32* %d) {
entry:
@@ -30,7 +30,7 @@ B4:
!0 = !{!"branch_weights", i32 4, i32 124}
; CHECK: Machine code for function test1:
-; CHECK: Successors according to CFG: BB#2(0x7c000000 / 0x80000000 = 96.88%) BB#1(0x04000000 / 0x80000000 = 3.12%)
+; CHECK: Successors according to CFG: %bb.2(0x7c000000 / 0x80000000 = 96.88%) %bb.1(0x04000000 / 0x80000000 = 3.12%)
@g0 = common global i32 0, align 4
diff --git a/test/CodeGen/ARM/v8m.base-jumptable_alignment.ll b/test/CodeGen/ARM/v8m.base-jumptable_alignment.ll
index 673e04687a1..73189fe69db 100644
--- a/test/CodeGen/ARM/v8m.base-jumptable_alignment.ll
+++ b/test/CodeGen/ARM/v8m.base-jumptable_alignment.ll
@@ -30,7 +30,7 @@ for.cond7.preheader.i.us.i.i: ; preds = %for.cond7.preheader
unreachable
for.cond14.preheader.us.i.i.i: ; preds = %for.inc459.us.i.i.i, %for.cond7.preheader.i.i.preheader.i
-; CHECK: @ BB#4
+; CHECK: @ %bb.4
; CHECK-NEXT: .p2align 2
switch i4 undef, label %func_1.exit.loopexit [
i4 0, label %for.inc459.us.i.i.i
diff --git a/test/CodeGen/ARM/vbits.ll b/test/CodeGen/ARM/vbits.ll
index 0a7f7698fa8..2997750ccb1 100644
--- a/test/CodeGen/ARM/vbits.ll
+++ b/test/CodeGen/ARM/vbits.ll
@@ -3,7 +3,7 @@
define <8 x i8> @v_andi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
; CHECK-LABEL: v_andi8:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d17, [r0]
; CHECK-NEXT: vand d16, d17, d16
@@ -17,7 +17,7 @@ define <8 x i8> @v_andi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
define <4 x i16> @v_andi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
; CHECK-LABEL: v_andi16:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d17, [r0]
; CHECK-NEXT: vand d16, d17, d16
@@ -31,7 +31,7 @@ define <4 x i16> @v_andi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
define <2 x i32> @v_andi32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
; CHECK-LABEL: v_andi32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d17, [r0]
; CHECK-NEXT: vand d16, d17, d16
@@ -45,7 +45,7 @@ define <2 x i32> @v_andi32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
define <1 x i64> @v_andi64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
; CHECK-LABEL: v_andi64:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d17, [r0]
; CHECK-NEXT: vand d16, d17, d16
@@ -59,7 +59,7 @@ define <1 x i64> @v_andi64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
define <16 x i8> @v_andQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
; CHECK-LABEL: v_andQi8:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
; CHECK-NEXT: vand q8, q9, q8
@@ -74,7 +74,7 @@ define <16 x i8> @v_andQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
define <8 x i16> @v_andQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
; CHECK-LABEL: v_andQi16:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
; CHECK-NEXT: vand q8, q9, q8
@@ -89,7 +89,7 @@ define <8 x i16> @v_andQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
define <4 x i32> @v_andQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
; CHECK-LABEL: v_andQi32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
; CHECK-NEXT: vand q8, q9, q8
@@ -104,7 +104,7 @@ define <4 x i32> @v_andQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
define <2 x i64> @v_andQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
; CHECK-LABEL: v_andQi64:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
; CHECK-NEXT: vand q8, q9, q8
@@ -119,7 +119,7 @@ define <2 x i64> @v_andQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
define <8 x i8> @v_bici8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
; CHECK-LABEL: v_bici8:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d17, [r0]
; CHECK-NEXT: vbic d16, d17, d16
@@ -134,7 +134,7 @@ define <8 x i8> @v_bici8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
define <4 x i16> @v_bici16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
; CHECK-LABEL: v_bici16:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d17, [r0]
; CHECK-NEXT: vbic d16, d17, d16
@@ -149,7 +149,7 @@ define <4 x i16> @v_bici16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
define <2 x i32> @v_bici32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
; CHECK-LABEL: v_bici32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d17, [r0]
; CHECK-NEXT: vbic d16, d17, d16
@@ -164,7 +164,7 @@ define <2 x i32> @v_bici32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
define <1 x i64> @v_bici64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
; CHECK-LABEL: v_bici64:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d17, [r0]
; CHECK-NEXT: vbic d16, d17, d16
@@ -179,7 +179,7 @@ define <1 x i64> @v_bici64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
define <16 x i8> @v_bicQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
; CHECK-LABEL: v_bicQi8:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
; CHECK-NEXT: vbic q8, q9, q8
@@ -195,7 +195,7 @@ define <16 x i8> @v_bicQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
define <8 x i16> @v_bicQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
; CHECK-LABEL: v_bicQi16:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
; CHECK-NEXT: vbic q8, q9, q8
@@ -211,7 +211,7 @@ define <8 x i16> @v_bicQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
define <4 x i32> @v_bicQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
; CHECK-LABEL: v_bicQi32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
; CHECK-NEXT: vbic q8, q9, q8
@@ -227,7 +227,7 @@ define <4 x i32> @v_bicQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
define <2 x i64> @v_bicQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
; CHECK-LABEL: v_bicQi64:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
; CHECK-NEXT: vbic q8, q9, q8
@@ -243,7 +243,7 @@ define <2 x i64> @v_bicQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
define <8 x i8> @v_eori8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
; CHECK-LABEL: v_eori8:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d17, [r0]
; CHECK-NEXT: veor d16, d17, d16
@@ -257,7 +257,7 @@ define <8 x i8> @v_eori8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
define <4 x i16> @v_eori16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
; CHECK-LABEL: v_eori16:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d17, [r0]
; CHECK-NEXT: veor d16, d17, d16
@@ -271,7 +271,7 @@ define <4 x i16> @v_eori16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
define <2 x i32> @v_eori32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
; CHECK-LABEL: v_eori32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d17, [r0]
; CHECK-NEXT: veor d16, d17, d16
@@ -285,7 +285,7 @@ define <2 x i32> @v_eori32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
define <1 x i64> @v_eori64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
; CHECK-LABEL: v_eori64:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d17, [r0]
; CHECK-NEXT: veor d16, d17, d16
@@ -299,7 +299,7 @@ define <1 x i64> @v_eori64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
define <16 x i8> @v_eorQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
; CHECK-LABEL: v_eorQi8:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
; CHECK-NEXT: veor q8, q9, q8
@@ -314,7 +314,7 @@ define <16 x i8> @v_eorQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
define <8 x i16> @v_eorQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
; CHECK-LABEL: v_eorQi16:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
; CHECK-NEXT: veor q8, q9, q8
@@ -329,7 +329,7 @@ define <8 x i16> @v_eorQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
define <4 x i32> @v_eorQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
; CHECK-LABEL: v_eorQi32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
; CHECK-NEXT: veor q8, q9, q8
@@ -344,7 +344,7 @@ define <4 x i32> @v_eorQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
define <2 x i64> @v_eorQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
; CHECK-LABEL: v_eorQi64:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
; CHECK-NEXT: veor q8, q9, q8
@@ -359,7 +359,7 @@ define <2 x i64> @v_eorQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
define <8 x i8> @v_mvni8(<8 x i8>* %A) nounwind {
; CHECK-LABEL: v_mvni8:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vmvn d16, d16
; CHECK-NEXT: vmov r0, r1, d16
@@ -371,7 +371,7 @@ define <8 x i8> @v_mvni8(<8 x i8>* %A) nounwind {
define <4 x i16> @v_mvni16(<4 x i16>* %A) nounwind {
; CHECK-LABEL: v_mvni16:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vmvn d16, d16
; CHECK-NEXT: vmov r0, r1, d16
@@ -383,7 +383,7 @@ define <4 x i16> @v_mvni16(<4 x i16>* %A) nounwind {
define <2 x i32> @v_mvni32(<2 x i32>* %A) nounwind {
; CHECK-LABEL: v_mvni32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vmvn d16, d16
; CHECK-NEXT: vmov r0, r1, d16
@@ -395,7 +395,7 @@ define <2 x i32> @v_mvni32(<2 x i32>* %A) nounwind {
define <1 x i64> @v_mvni64(<1 x i64>* %A) nounwind {
; CHECK-LABEL: v_mvni64:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vmvn d16, d16
; CHECK-NEXT: vmov r0, r1, d16
@@ -407,7 +407,7 @@ define <1 x i64> @v_mvni64(<1 x i64>* %A) nounwind {
define <16 x i8> @v_mvnQi8(<16 x i8>* %A) nounwind {
; CHECK-LABEL: v_mvnQi8:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vmvn q8, q8
; CHECK-NEXT: vmov r0, r1, d16
@@ -420,7 +420,7 @@ define <16 x i8> @v_mvnQi8(<16 x i8>* %A) nounwind {
define <8 x i16> @v_mvnQi16(<8 x i16>* %A) nounwind {
; CHECK-LABEL: v_mvnQi16:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vmvn q8, q8
; CHECK-NEXT: vmov r0, r1, d16
@@ -433,7 +433,7 @@ define <8 x i16> @v_mvnQi16(<8 x i16>* %A) nounwind {
define <4 x i32> @v_mvnQi32(<4 x i32>* %A) nounwind {
; CHECK-LABEL: v_mvnQi32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vmvn q8, q8
; CHECK-NEXT: vmov r0, r1, d16
@@ -446,7 +446,7 @@ define <4 x i32> @v_mvnQi32(<4 x i32>* %A) nounwind {
define <2 x i64> @v_mvnQi64(<2 x i64>* %A) nounwind {
; CHECK-LABEL: v_mvnQi64:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vmvn q8, q8
; CHECK-NEXT: vmov r0, r1, d16
@@ -459,7 +459,7 @@ define <2 x i64> @v_mvnQi64(<2 x i64>* %A) nounwind {
define <8 x i8> @v_orri8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
; CHECK-LABEL: v_orri8:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d17, [r0]
; CHECK-NEXT: vorr d16, d17, d16
@@ -473,7 +473,7 @@ define <8 x i8> @v_orri8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
define <4 x i16> @v_orri16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
; CHECK-LABEL: v_orri16:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d17, [r0]
; CHECK-NEXT: vorr d16, d17, d16
@@ -487,7 +487,7 @@ define <4 x i16> @v_orri16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
define <2 x i32> @v_orri32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
; CHECK-LABEL: v_orri32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d17, [r0]
; CHECK-NEXT: vorr d16, d17, d16
@@ -501,7 +501,7 @@ define <2 x i32> @v_orri32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
define <1 x i64> @v_orri64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
; CHECK-LABEL: v_orri64:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d17, [r0]
; CHECK-NEXT: vorr d16, d17, d16
@@ -515,7 +515,7 @@ define <1 x i64> @v_orri64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
define <16 x i8> @v_orrQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
; CHECK-LABEL: v_orrQi8:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
; CHECK-NEXT: vorr q8, q9, q8
@@ -530,7 +530,7 @@ define <16 x i8> @v_orrQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
define <8 x i16> @v_orrQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
; CHECK-LABEL: v_orrQi16:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
; CHECK-NEXT: vorr q8, q9, q8
@@ -545,7 +545,7 @@ define <8 x i16> @v_orrQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
define <4 x i32> @v_orrQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
; CHECK-LABEL: v_orrQi32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
; CHECK-NEXT: vorr q8, q9, q8
@@ -560,7 +560,7 @@ define <4 x i32> @v_orrQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
define <2 x i64> @v_orrQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
; CHECK-LABEL: v_orrQi64:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
; CHECK-NEXT: vorr q8, q9, q8
@@ -575,7 +575,7 @@ define <2 x i64> @v_orrQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
define <8 x i8> @v_orni8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
; CHECK-LABEL: v_orni8:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d17, [r0]
; CHECK-NEXT: vorn d16, d17, d16
@@ -590,7 +590,7 @@ define <8 x i8> @v_orni8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
define <4 x i16> @v_orni16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
; CHECK-LABEL: v_orni16:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d17, [r0]
; CHECK-NEXT: vorn d16, d17, d16
@@ -605,7 +605,7 @@ define <4 x i16> @v_orni16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
define <2 x i32> @v_orni32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
; CHECK-LABEL: v_orni32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d17, [r0]
; CHECK-NEXT: vorn d16, d17, d16
@@ -620,7 +620,7 @@ define <2 x i32> @v_orni32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
define <1 x i64> @v_orni64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
; CHECK-LABEL: v_orni64:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d17, [r0]
; CHECK-NEXT: vorn d16, d17, d16
@@ -635,7 +635,7 @@ define <1 x i64> @v_orni64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
define <16 x i8> @v_ornQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
; CHECK-LABEL: v_ornQi8:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
; CHECK-NEXT: vorn q8, q9, q8
@@ -651,7 +651,7 @@ define <16 x i8> @v_ornQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
define <8 x i16> @v_ornQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
; CHECK-LABEL: v_ornQi16:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
; CHECK-NEXT: vorn q8, q9, q8
@@ -667,7 +667,7 @@ define <8 x i16> @v_ornQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
define <4 x i32> @v_ornQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
; CHECK-LABEL: v_ornQi32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
; CHECK-NEXT: vorn q8, q9, q8
@@ -683,7 +683,7 @@ define <4 x i32> @v_ornQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
define <2 x i64> @v_ornQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
; CHECK-LABEL: v_ornQi64:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
; CHECK-NEXT: vorn q8, q9, q8
@@ -699,7 +699,7 @@ define <2 x i64> @v_ornQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
define <8 x i8> @vtsti8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
; CHECK-LABEL: vtsti8:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d17, [r0]
; CHECK-NEXT: vtst.8 d16, d17, d16
@@ -715,7 +715,7 @@ define <8 x i8> @vtsti8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
define <4 x i16> @vtsti16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
; CHECK-LABEL: vtsti16:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d17, [r0]
; CHECK-NEXT: vtst.16 d16, d17, d16
@@ -731,7 +731,7 @@ define <4 x i16> @vtsti16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
define <2 x i32> @vtsti32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
; CHECK-LABEL: vtsti32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d17, [r0]
; CHECK-NEXT: vtst.32 d16, d17, d16
@@ -747,7 +747,7 @@ define <2 x i32> @vtsti32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
define <16 x i8> @vtstQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
; CHECK-LABEL: vtstQi8:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
; CHECK-NEXT: vtst.8 q8, q9, q8
@@ -764,7 +764,7 @@ define <16 x i8> @vtstQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
define <8 x i16> @vtstQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
; CHECK-LABEL: vtstQi16:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
; CHECK-NEXT: vtst.16 q8, q9, q8
@@ -781,7 +781,7 @@ define <8 x i16> @vtstQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
define <4 x i32> @vtstQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
; CHECK-LABEL: vtstQi32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
; CHECK-NEXT: vtst.32 q8, q9, q8
@@ -798,7 +798,7 @@ define <4 x i32> @vtstQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
define <8 x i8> @v_orrimm(<8 x i8>* %A) nounwind {
; CHECK-LABEL: v_orrimm:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vorr.i32 d16, #0x1000000
; CHECK-NEXT: vmov r0, r1, d16
@@ -810,7 +810,7 @@ define <8 x i8> @v_orrimm(<8 x i8>* %A) nounwind {
define <16 x i8> @v_orrimmQ(<16 x i8>* %A) nounwind {
; CHECK-LABEL: v_orrimmQ:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vorr.i32 q8, #0x1000000
; CHECK-NEXT: vmov r0, r1, d16
@@ -823,7 +823,7 @@ define <16 x i8> @v_orrimmQ(<16 x i8>* %A) nounwind {
define <8 x i8> @v_bicimm(<8 x i8>* %A) nounwind {
; CHECK-LABEL: v_bicimm:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vbic.i32 d16, #0xff000000
; CHECK-NEXT: vmov r0, r1, d16
@@ -835,7 +835,7 @@ define <8 x i8> @v_bicimm(<8 x i8>* %A) nounwind {
define <16 x i8> @v_bicimmQ(<16 x i8>* %A) nounwind {
; CHECK-LABEL: v_bicimmQ:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vbic.i32 q8, #0xff000000
; CHECK-NEXT: vmov r0, r1, d16
@@ -848,7 +848,7 @@ define <16 x i8> @v_bicimmQ(<16 x i8>* %A) nounwind {
define <4 x i32> @hidden_not_v4i32(<4 x i32> %x) nounwind {
; CHECK-LABEL: hidden_not_v4i32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vmov d19, r2, r3
; CHECK-NEXT: vmov.i32 q8, #0x6
; CHECK-NEXT: vmov d18, r0, r1
diff --git a/test/CodeGen/ARM/vcvt.ll b/test/CodeGen/ARM/vcvt.ll
index 5f470d60707..7052607bf80 100644
--- a/test/CodeGen/ARM/vcvt.ll
+++ b/test/CodeGen/ARM/vcvt.ll
@@ -3,7 +3,7 @@
define <2 x i32> @vcvt_f32tos32(<2 x float>* %A) nounwind {
; CHECK-LABEL: vcvt_f32tos32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vcvt.s32.f32 d16, d16
; CHECK-NEXT: vmov r0, r1, d16
@@ -15,7 +15,7 @@ define <2 x i32> @vcvt_f32tos32(<2 x float>* %A) nounwind {
define <2 x i32> @vcvt_f32tou32(<2 x float>* %A) nounwind {
; CHECK-LABEL: vcvt_f32tou32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vcvt.u32.f32 d16, d16
; CHECK-NEXT: vmov r0, r1, d16
@@ -27,7 +27,7 @@ define <2 x i32> @vcvt_f32tou32(<2 x float>* %A) nounwind {
define <2 x float> @vcvt_s32tof32(<2 x i32>* %A) nounwind {
; CHECK-LABEL: vcvt_s32tof32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vcvt.f32.s32 d16, d16
; CHECK-NEXT: vmov r0, r1, d16
@@ -39,7 +39,7 @@ define <2 x float> @vcvt_s32tof32(<2 x i32>* %A) nounwind {
define <2 x float> @vcvt_u32tof32(<2 x i32>* %A) nounwind {
; CHECK-LABEL: vcvt_u32tof32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vcvt.f32.u32 d16, d16
; CHECK-NEXT: vmov r0, r1, d16
@@ -51,7 +51,7 @@ define <2 x float> @vcvt_u32tof32(<2 x i32>* %A) nounwind {
define <4 x i32> @vcvtQ_f32tos32(<4 x float>* %A) nounwind {
; CHECK-LABEL: vcvtQ_f32tos32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vcvt.s32.f32 q8, q8
; CHECK-NEXT: vmov r0, r1, d16
@@ -64,7 +64,7 @@ define <4 x i32> @vcvtQ_f32tos32(<4 x float>* %A) nounwind {
define <4 x i32> @vcvtQ_f32tou32(<4 x float>* %A) nounwind {
; CHECK-LABEL: vcvtQ_f32tou32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vcvt.u32.f32 q8, q8
; CHECK-NEXT: vmov r0, r1, d16
@@ -77,7 +77,7 @@ define <4 x i32> @vcvtQ_f32tou32(<4 x float>* %A) nounwind {
define <4 x float> @vcvtQ_s32tof32(<4 x i32>* %A) nounwind {
; CHECK-LABEL: vcvtQ_s32tof32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vcvt.f32.s32 q8, q8
; CHECK-NEXT: vmov r0, r1, d16
@@ -90,7 +90,7 @@ define <4 x float> @vcvtQ_s32tof32(<4 x i32>* %A) nounwind {
define <4 x float> @vcvtQ_u32tof32(<4 x i32>* %A) nounwind {
; CHECK-LABEL: vcvtQ_u32tof32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vcvt.f32.u32 q8, q8
; CHECK-NEXT: vmov r0, r1, d16
@@ -103,7 +103,7 @@ define <4 x float> @vcvtQ_u32tof32(<4 x i32>* %A) nounwind {
define <2 x i32> @vcvt_n_f32tos32(<2 x float>* %A) nounwind {
; CHECK-LABEL: vcvt_n_f32tos32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vcvt.s32.f32 d16, d16, #1
; CHECK-NEXT: vmov r0, r1, d16
@@ -115,7 +115,7 @@ define <2 x i32> @vcvt_n_f32tos32(<2 x float>* %A) nounwind {
define <2 x i32> @vcvt_n_f32tou32(<2 x float>* %A) nounwind {
; CHECK-LABEL: vcvt_n_f32tou32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vcvt.u32.f32 d16, d16, #1
; CHECK-NEXT: vmov r0, r1, d16
@@ -127,7 +127,7 @@ define <2 x i32> @vcvt_n_f32tou32(<2 x float>* %A) nounwind {
define <2 x float> @vcvt_n_s32tof32(<2 x i32>* %A) nounwind {
; CHECK-LABEL: vcvt_n_s32tof32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vcvt.f32.s32 d16, d16, #1
; CHECK-NEXT: vmov r0, r1, d16
@@ -139,7 +139,7 @@ define <2 x float> @vcvt_n_s32tof32(<2 x i32>* %A) nounwind {
define <2 x float> @vcvt_n_u32tof32(<2 x i32>* %A) nounwind {
; CHECK-LABEL: vcvt_n_u32tof32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vcvt.f32.u32 d16, d16, #1
; CHECK-NEXT: vmov r0, r1, d16
@@ -156,7 +156,7 @@ declare <2 x float> @llvm.arm.neon.vcvtfxu2fp.v2f32.v2i32(<2 x i32>, i32) nounwi
define <4 x i32> @vcvtQ_n_f32tos32(<4 x float>* %A) nounwind {
; CHECK-LABEL: vcvtQ_n_f32tos32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vcvt.s32.f32 q8, q8, #1
; CHECK-NEXT: vmov r0, r1, d16
@@ -169,7 +169,7 @@ define <4 x i32> @vcvtQ_n_f32tos32(<4 x float>* %A) nounwind {
define <4 x i32> @vcvtQ_n_f32tou32(<4 x float>* %A) nounwind {
; CHECK-LABEL: vcvtQ_n_f32tou32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vcvt.u32.f32 q8, q8, #1
; CHECK-NEXT: vmov r0, r1, d16
@@ -182,7 +182,7 @@ define <4 x i32> @vcvtQ_n_f32tou32(<4 x float>* %A) nounwind {
define <4 x float> @vcvtQ_n_s32tof32(<4 x i32>* %A) nounwind {
; CHECK-LABEL: vcvtQ_n_s32tof32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vcvt.f32.s32 q8, q8, #1
; CHECK-NEXT: vmov r0, r1, d16
@@ -195,7 +195,7 @@ define <4 x float> @vcvtQ_n_s32tof32(<4 x i32>* %A) nounwind {
define <4 x float> @vcvtQ_n_u32tof32(<4 x i32>* %A) nounwind {
; CHECK-LABEL: vcvtQ_n_u32tof32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vcvt.f32.u32 q8, q8, #1
; CHECK-NEXT: vmov r0, r1, d16
@@ -213,7 +213,7 @@ declare <4 x float> @llvm.arm.neon.vcvtfxu2fp.v4f32.v4i32(<4 x i32>, i32) nounwi
define <4 x float> @vcvt_f16tof32(<4 x i16>* %A) nounwind {
; CHECK-LABEL: vcvt_f16tof32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vcvt.f32.f16 q8, d16
; CHECK-NEXT: vmov r0, r1, d16
@@ -226,7 +226,7 @@ define <4 x float> @vcvt_f16tof32(<4 x i16>* %A) nounwind {
define <4 x i16> @vcvt_f32tof16(<4 x float>* %A) nounwind {
; CHECK-LABEL: vcvt_f32tof16:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vcvt.f16.f32 d16, q8
; CHECK-NEXT: vmov r0, r1, d16
@@ -242,7 +242,7 @@ declare <4 x i16> @llvm.arm.neon.vcvtfp2hf(<4 x float>) nounwind readnone
define <4 x i16> @fix_float_to_i16(<4 x float> %in) {
; CHECK-LABEL: fix_float_to_i16:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vmov d17, r2, r3
; CHECK-NEXT: vmov d16, r0, r1
; CHECK-NEXT: vcvt.u32.f32 q8, q8, #1
@@ -257,7 +257,7 @@ define <4 x i16> @fix_float_to_i16(<4 x float> %in) {
define <2 x i64> @fix_float_to_i64(<2 x float> %in) {
; CHECK-LABEL: fix_float_to_i64:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: .save {r4, lr}
; CHECK-NEXT: push {r4, lr}
; CHECK-NEXT: .vsave {d8, d9}
@@ -287,7 +287,7 @@ define <2 x i64> @fix_float_to_i64(<2 x float> %in) {
define <4 x i16> @fix_double_to_i16(<4 x double> %in) {
; CHECK-LABEL: fix_double_to_i16:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vmov d18, r0, r1
; CHECK-NEXT: mov r12, sp
; CHECK-NEXT: vld1.64 {d16, d17}, [r12]
@@ -319,7 +319,7 @@ define <4 x i16> @fix_double_to_i16(<4 x double> %in) {
define <2 x i64> @fix_double_to_i64(<2 x double> %in) {
; CHECK-LABEL: fix_double_to_i64:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: .save {r4, lr}
; CHECK-NEXT: push {r4, lr}
; CHECK-NEXT: .vsave {d8, d9}
@@ -352,7 +352,7 @@ define <2 x i64> @fix_double_to_i64(<2 x double> %in) {
define i32 @multi_sint(double %c, i32* nocapture %p, i32* nocapture %q) {
; CHECK-LABEL: multi_sint:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vmov d16, r0, r1
; CHECK-NEXT: vcvt.s32.f64 s0, d16
; CHECK-NEXT: vstr s0, [r2]
@@ -369,7 +369,7 @@ define i32 @multi_sint(double %c, i32* nocapture %p, i32* nocapture %q) {
define i32 @multi_uint(double %c, i32* nocapture %p, i32* nocapture %q) {
; CHECK-LABEL: multi_uint:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vmov d16, r0, r1
; CHECK-NEXT: vcvt.u32.f64 s0, d16
; CHECK-NEXT: vstr s0, [r2]
@@ -386,7 +386,7 @@ define i32 @multi_uint(double %c, i32* nocapture %p, i32* nocapture %q) {
define void @double_to_sint_store(double %c, i32* nocapture %p) {
; CHECK-LABEL: double_to_sint_store:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vmov d16, r0, r1
; CHECK-NEXT: vcvt.s32.f64 s0, d16
; CHECK-NEXT: vstr s0, [r2]
@@ -398,7 +398,7 @@ define void @double_to_sint_store(double %c, i32* nocapture %p) {
define void @double_to_uint_store(double %c, i32* nocapture %p) {
; CHECK-LABEL: double_to_uint_store:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vmov d16, r0, r1
; CHECK-NEXT: vcvt.u32.f64 s0, d16
; CHECK-NEXT: vstr s0, [r2]
@@ -410,7 +410,7 @@ define void @double_to_uint_store(double %c, i32* nocapture %p) {
define void @float_to_sint_store(float %c, i32* nocapture %p) {
; CHECK-LABEL: float_to_sint_store:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvt.s32.f32 s0, s0
; CHECK-NEXT: vstr s0, [r1]
@@ -422,7 +422,7 @@ define void @float_to_sint_store(float %c, i32* nocapture %p) {
define void @float_to_uint_store(float %c, i32* nocapture %p) {
; CHECK-LABEL: float_to_uint_store:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvt.u32.f32 s0, s0
; CHECK-NEXT: vstr s0, [r1]
diff --git a/test/CodeGen/ARM/vext.ll b/test/CodeGen/ARM/vext.ll
index 5b524145be7..397680c5b0c 100644
--- a/test/CodeGen/ARM/vext.ll
+++ b/test/CodeGen/ARM/vext.ll
@@ -3,7 +3,7 @@
define <8 x i8> @test_vextd(<8 x i8>* %A, <8 x i8>* %B) nounwind {
; CHECK-LABEL: test_vextd:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d17, [r0]
; CHECK-NEXT: vext.8 d16, d17, d16, #3
@@ -17,7 +17,7 @@ define <8 x i8> @test_vextd(<8 x i8>* %A, <8 x i8>* %B) nounwind {
define <8 x i8> @test_vextRd(<8 x i8>* %A, <8 x i8>* %B) nounwind {
; CHECK-LABEL: test_vextRd:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vldr d17, [r1]
; CHECK-NEXT: vext.8 d16, d17, d16, #5
@@ -31,7 +31,7 @@ define <8 x i8> @test_vextRd(<8 x i8>* %A, <8 x i8>* %B) nounwind {
define <16 x i8> @test_vextq(<16 x i8>* %A, <16 x i8>* %B) nounwind {
; CHECK-LABEL: test_vextq:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
; CHECK-NEXT: vext.8 q8, q9, q8, #3
@@ -46,7 +46,7 @@ define <16 x i8> @test_vextq(<16 x i8>* %A, <16 x i8>* %B) nounwind {
define <16 x i8> @test_vextRq(<16 x i8>* %A, <16 x i8>* %B) nounwind {
; CHECK-LABEL: test_vextRq:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vld1.64 {d18, d19}, [r1]
; CHECK-NEXT: vext.8 q8, q9, q8, #7
@@ -61,7 +61,7 @@ define <16 x i8> @test_vextRq(<16 x i8>* %A, <16 x i8>* %B) nounwind {
define <4 x i16> @test_vextd16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
; CHECK-LABEL: test_vextd16:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d17, [r0]
; CHECK-NEXT: vext.16 d16, d17, d16, #3
@@ -75,7 +75,7 @@ define <4 x i16> @test_vextd16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
define <4 x i32> @test_vextq32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
; CHECK-LABEL: test_vextq32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
; CHECK-NEXT: vext.32 q8, q9, q8, #3
@@ -92,7 +92,7 @@ define <4 x i32> @test_vextq32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
define <8 x i8> @test_vextd_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind {
; CHECK-LABEL: test_vextd_undef:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d17, [r0]
; CHECK-NEXT: vext.8 d16, d17, d16, #3
@@ -106,7 +106,7 @@ define <8 x i8> @test_vextd_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind {
define <16 x i8> @test_vextRq_undef(<16 x i8>* %A, <16 x i8>* %B) nounwind {
; CHECK-LABEL: test_vextRq_undef:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vld1.64 {d18, d19}, [r1]
; CHECK-NEXT: vext.8 q8, q9, q8, #7
@@ -121,7 +121,7 @@ define <16 x i8> @test_vextRq_undef(<16 x i8>* %A, <16 x i8>* %B) nounwind {
define <16 x i8> @test_vextq_undef_op2(<16 x i8> %a) nounwind {
; CHECK-LABEL: test_vextq_undef_op2:
-; CHECK: @ BB#0: @ %entry
+; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmov d17, r2, r3
; CHECK-NEXT: vmov d16, r0, r1
; CHECK-NEXT: vext.8 q8, q8, q8, #2
@@ -135,7 +135,7 @@ entry:
define <8 x i8> @test_vextd_undef_op2(<8 x i8> %a) nounwind {
; CHECK-LABEL: test_vextd_undef_op2:
-; CHECK: @ BB#0: @ %entry
+; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmov d16, r0, r1
; CHECK-NEXT: vext.8 d16, d16, d16, #2
; CHECK-NEXT: vmov r0, r1, d16
@@ -148,7 +148,7 @@ entry:
define <16 x i8> @test_vextq_undef_op2_undef(<16 x i8> %a) nounwind {
; CHECK-LABEL: test_vextq_undef_op2_undef:
-; CHECK: @ BB#0: @ %entry
+; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmov d17, r2, r3
; CHECK-NEXT: vmov d16, r0, r1
; CHECK-NEXT: vext.8 q8, q8, q8, #2
@@ -162,7 +162,7 @@ entry:
define <8 x i8> @test_vextd_undef_op2_undef(<8 x i8> %a) nounwind {
; CHECK-LABEL: test_vextd_undef_op2_undef:
-; CHECK: @ BB#0: @ %entry
+; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmov d16, r0, r1
; CHECK-NEXT: vext.8 d16, d16, d16, #2
; CHECK-NEXT: vmov r0, r1, d16
@@ -180,7 +180,7 @@ entry:
; Essence: a vext is used on %A and something saner than stack load/store for final result.
define <4 x i16> @test_interleaved(<8 x i16>* %A, <8 x i16>* %B) nounwind {
; CHECK-LABEL: test_interleaved:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vext.16 d16, d16, d17, #3
; CHECK-NEXT: vorr d17, d16, d16
@@ -198,7 +198,7 @@ define <4 x i16> @test_interleaved(<8 x i16>* %A, <8 x i16>* %B) nounwind {
; An undef in the shuffle list should still be optimizable
define <4 x i16> @test_undef(<8 x i16>* %A, <8 x i16>* %B) nounwind {
; CHECK-LABEL: test_undef:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d17, [r0, #8]
; CHECK-NEXT: vzip.16 d17, d16
@@ -215,7 +215,7 @@ define <4 x i16> @test_undef(<8 x i16>* %A, <8 x i16>* %B) nounwind {
; Try to look for fallback to by-element inserts.
define <4 x i16> @test_multisource(<32 x i16>* %B) nounwind {
; CHECK-LABEL: test_multisource:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: mov r1, r0
; CHECK-NEXT: add r2, r0, #48
; CHECK-NEXT: add r0, r0, #32
@@ -240,7 +240,7 @@ define <4 x i16> @test_multisource(<32 x i16>* %B) nounwind {
; Again, test for fallback to by-element inserts.
define <4 x i16> @test_largespan(<8 x i16>* %B) nounwind {
; CHECK-LABEL: test_largespan:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vorr d18, d16, d16
; CHECK-NEXT: vuzp.16 d18, d17
@@ -258,7 +258,7 @@ define <4 x i16> @test_largespan(<8 x i16>* %B) nounwind {
; really important.)
define <8 x i16> @test_illegal(<8 x i16>* %A, <8 x i16>* %B) nounwind {
; CHECK-LABEL: test_illegal:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vorr d22, d16, d16
; CHECK-NEXT: vmov.u16 r0, d16[0]
@@ -287,7 +287,7 @@ define <8 x i16> @test_illegal(<8 x i16>* %A, <8 x i16>* %B) nounwind {
; Make sure this doesn't crash
define arm_aapcscc void @test_elem_mismatch(<2 x i64>* nocapture %src, <4 x i16>* nocapture %dest) nounwind {
; CHECK-LABEL: test_elem_mismatch:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0:128]
; CHECK-NEXT: vmov.32 r0, d16[0]
; CHECK-NEXT: vmov.32 r2, d17[0]
@@ -309,7 +309,7 @@ define arm_aapcscc void @test_elem_mismatch(<2 x i64>* nocapture %src, <4 x i16>
define <4 x i32> @test_reverse_and_extract(<2 x i32>* %A) {
; CHECK-LABEL: test_reverse_and_extract:
-; CHECK: @ BB#0: @ %entry
+; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vrev64.32 q9, q8
; CHECK-NEXT: vext.32 q8, q8, q9, #2
@@ -324,7 +324,7 @@ entry:
define <4 x i32> @test_dup_and_extract(<2 x i32>* %A) {
; CHECK-LABEL: test_dup_and_extract:
-; CHECK: @ BB#0: @ %entry
+; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vdup.32 q9, d16[0]
; CHECK-NEXT: vext.32 q8, q9, q8, #2
@@ -339,7 +339,7 @@ entry:
define <4 x i32> @test_zip_and_extract(<2 x i32>* %A) {
; CHECK-LABEL: test_zip_and_extract:
-; CHECK: @ BB#0: @ %entry
+; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vorr q9, q8, q8
; CHECK-NEXT: vorr q10, q8, q8
diff --git a/test/CodeGen/ARM/vpadd.ll b/test/CodeGen/ARM/vpadd.ll
index 3fa93bb43f0..731bc373aaa 100644
--- a/test/CodeGen/ARM/vpadd.ll
+++ b/test/CodeGen/ARM/vpadd.ll
@@ -3,7 +3,7 @@
define <8 x i8> @vpaddi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
; CHECK-LABEL: vpaddi8:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d17, [r0]
; CHECK-NEXT: vpadd.i8 d16, d17, d16
@@ -17,7 +17,7 @@ define <8 x i8> @vpaddi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
define <4 x i16> @vpaddi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
; CHECK-LABEL: vpaddi16:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d17, [r0]
; CHECK-NEXT: vpadd.i16 d16, d17, d16
@@ -31,7 +31,7 @@ define <4 x i16> @vpaddi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
define <2 x i32> @vpaddi32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
; CHECK-LABEL: vpaddi32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d17, [r0]
; CHECK-NEXT: vpadd.i32 d16, d17, d16
@@ -45,7 +45,7 @@ define <2 x i32> @vpaddi32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
define <2 x float> @vpaddf32(<2 x float>* %A, <2 x float>* %B) nounwind {
; CHECK-LABEL: vpaddf32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d17, [r0]
; CHECK-NEXT: vpadd.f32 d16, d17, d16
@@ -65,7 +65,7 @@ declare <2 x float> @llvm.arm.neon.vpadd.v2f32(<2 x float>, <2 x float>) nounwin
define <4 x i16> @vpaddls8(<8 x i8>* %A) nounwind {
; CHECK-LABEL: vpaddls8:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vpaddl.s8 d16, d16
; CHECK-NEXT: vmov r0, r1, d16
@@ -77,7 +77,7 @@ define <4 x i16> @vpaddls8(<8 x i8>* %A) nounwind {
define <2 x i32> @vpaddls16(<4 x i16>* %A) nounwind {
; CHECK-LABEL: vpaddls16:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vpaddl.s16 d16, d16
; CHECK-NEXT: vmov r0, r1, d16
@@ -89,7 +89,7 @@ define <2 x i32> @vpaddls16(<4 x i16>* %A) nounwind {
define <1 x i64> @vpaddls32(<2 x i32>* %A) nounwind {
; CHECK-LABEL: vpaddls32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vpaddl.s32 d16, d16
; CHECK-NEXT: vmov r0, r1, d16
@@ -101,7 +101,7 @@ define <1 x i64> @vpaddls32(<2 x i32>* %A) nounwind {
define <4 x i16> @vpaddlu8(<8 x i8>* %A) nounwind {
; CHECK-LABEL: vpaddlu8:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vpaddl.u8 d16, d16
; CHECK-NEXT: vmov r0, r1, d16
@@ -113,7 +113,7 @@ define <4 x i16> @vpaddlu8(<8 x i8>* %A) nounwind {
define <2 x i32> @vpaddlu16(<4 x i16>* %A) nounwind {
; CHECK-LABEL: vpaddlu16:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vpaddl.u16 d16, d16
; CHECK-NEXT: vmov r0, r1, d16
@@ -125,7 +125,7 @@ define <2 x i32> @vpaddlu16(<4 x i16>* %A) nounwind {
define <1 x i64> @vpaddlu32(<2 x i32>* %A) nounwind {
; CHECK-LABEL: vpaddlu32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vpaddl.u32 d16, d16
; CHECK-NEXT: vmov r0, r1, d16
@@ -137,7 +137,7 @@ define <1 x i64> @vpaddlu32(<2 x i32>* %A) nounwind {
define <8 x i16> @vpaddlQs8(<16 x i8>* %A) nounwind {
; CHECK-LABEL: vpaddlQs8:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vpaddl.s8 q8, q8
; CHECK-NEXT: vmov r0, r1, d16
@@ -150,7 +150,7 @@ define <8 x i16> @vpaddlQs8(<16 x i8>* %A) nounwind {
define <4 x i32> @vpaddlQs16(<8 x i16>* %A) nounwind {
; CHECK-LABEL: vpaddlQs16:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vpaddl.s16 q8, q8
; CHECK-NEXT: vmov r0, r1, d16
@@ -163,7 +163,7 @@ define <4 x i32> @vpaddlQs16(<8 x i16>* %A) nounwind {
define <2 x i64> @vpaddlQs32(<4 x i32>* %A) nounwind {
; CHECK-LABEL: vpaddlQs32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vpaddl.s32 q8, q8
; CHECK-NEXT: vmov r0, r1, d16
@@ -176,7 +176,7 @@ define <2 x i64> @vpaddlQs32(<4 x i32>* %A) nounwind {
define <8 x i16> @vpaddlQu8(<16 x i8>* %A) nounwind {
; CHECK-LABEL: vpaddlQu8:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vpaddl.u8 q8, q8
; CHECK-NEXT: vmov r0, r1, d16
@@ -189,7 +189,7 @@ define <8 x i16> @vpaddlQu8(<16 x i8>* %A) nounwind {
define <4 x i32> @vpaddlQu16(<8 x i16>* %A) nounwind {
; CHECK-LABEL: vpaddlQu16:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vpaddl.u16 q8, q8
; CHECK-NEXT: vmov r0, r1, d16
@@ -202,7 +202,7 @@ define <4 x i32> @vpaddlQu16(<8 x i16>* %A) nounwind {
define <2 x i64> @vpaddlQu32(<4 x i32>* %A) nounwind {
; CHECK-LABEL: vpaddlQu32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vpaddl.u32 q8, q8
; CHECK-NEXT: vmov r0, r1, d16
@@ -216,7 +216,7 @@ define <2 x i64> @vpaddlQu32(<4 x i32>* %A) nounwind {
; Combine vuzp+vadd->vpadd.
define void @addCombineToVPADD_i8(<16 x i8> *%cbcr, <8 x i8> *%X) nounwind ssp {
; CHECK-LABEL: addCombineToVPADD_i8:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vpadd.i8 d16, d16, d17
; CHECK-NEXT: vstr d16, [r1]
@@ -233,7 +233,7 @@ define void @addCombineToVPADD_i8(<16 x i8> *%cbcr, <8 x i8> *%X) nounwind ssp {
; Combine vuzp+vadd->vpadd.
define void @addCombineToVPADD_i16(<8 x i16> *%cbcr, <4 x i16> *%X) nounwind ssp {
; CHECK-LABEL: addCombineToVPADD_i16:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vpadd.i16 d16, d16, d17
; CHECK-NEXT: vstr d16, [r1]
@@ -249,7 +249,7 @@ define void @addCombineToVPADD_i16(<8 x i16> *%cbcr, <4 x i16> *%X) nounwind ssp
; Combine vtrn+vadd->vpadd.
define void @addCombineToVPADD_i32(<4 x i32> *%cbcr, <2 x i32> *%X) nounwind ssp {
; CHECK-LABEL: addCombineToVPADD_i32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vpadd.i32 d16, d16, d17
; CHECK-NEXT: vstr d16, [r1]
@@ -265,7 +265,7 @@ define void @addCombineToVPADD_i32(<4 x i32> *%cbcr, <2 x i32> *%X) nounwind ssp
; Combine vuzp+vaddl->vpaddl
define void @addCombineToVPADDLq_s8(<16 x i8> *%cbcr, <8 x i16> *%X) nounwind ssp {
; CHECK-LABEL: addCombineToVPADDLq_s8:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vpaddl.s8 q8, q8
; CHECK-NEXT: vst1.64 {d16, d17}, [r1]
@@ -284,7 +284,7 @@ define void @addCombineToVPADDLq_s8(<16 x i8> *%cbcr, <8 x i16> *%X) nounwind ss
; FIXME: Legalization butchers the shuffles.
define void @addCombineToVPADDL_s8(<16 x i8> *%cbcr, <4 x i16> *%X) nounwind ssp {
; CHECK-LABEL: addCombineToVPADDL_s8:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vmov.i16 d16, #0x8
; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
; CHECK-NEXT: vext.8 d17, d18, d16, #1
@@ -309,7 +309,7 @@ define void @addCombineToVPADDL_s8(<16 x i8> *%cbcr, <4 x i16> *%X) nounwind ssp
; Combine vuzp+vaddl->vpaddl
define void @addCombineToVPADDLq_u8(<16 x i8> *%cbcr, <8 x i16> *%X) nounwind ssp {
; CHECK-LABEL: addCombineToVPADDLq_u8:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vpaddl.u8 q8, q8
; CHECK-NEXT: vst1.64 {d16, d17}, [r1]
@@ -328,7 +328,7 @@ define void @addCombineToVPADDLq_u8(<16 x i8> *%cbcr, <8 x i16> *%X) nounwind ss
; shuffle is awkward, so this doesn't match at the moment.
define void @addCombineToVPADDLq_u8_early_zext(<16 x i8> *%cbcr, <8 x i16> *%X) nounwind ssp {
; CHECK-LABEL: addCombineToVPADDLq_u8_early_zext:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vmovl.u8 q9, d17
; CHECK-NEXT: vmovl.u8 q8, d16
@@ -349,7 +349,7 @@ define void @addCombineToVPADDLq_u8_early_zext(<16 x i8> *%cbcr, <8 x i16> *%X)
; FIXME: Legalization butchers the shuffle.
define void @addCombineToVPADDL_u8(<16 x i8> *%cbcr, <4 x i16> *%X) nounwind ssp {
; CHECK-LABEL: addCombineToVPADDL_u8:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vext.8 d18, d16, d16, #1
; CHECK-NEXT: vbic.i16 d16, #0xff00
@@ -370,7 +370,7 @@ define void @addCombineToVPADDL_u8(<16 x i8> *%cbcr, <4 x i16> *%X) nounwind ssp
; Matching to vpaddl.8 requires matching shuffle(zext()).
define void @addCombineToVPADDL_u8_early_zext(<16 x i8> *%cbcr, <4 x i16> *%X) nounwind ssp {
; CHECK-LABEL: addCombineToVPADDL_u8_early_zext:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vmovl.u8 q8, d16
; CHECK-NEXT: vpadd.i16 d16, d16, d17
@@ -388,7 +388,7 @@ define void @addCombineToVPADDL_u8_early_zext(<16 x i8> *%cbcr, <4 x i16> *%X) n
; Combine vuzp+vaddl->vpaddl
define void @addCombineToVPADDLq_s16(<8 x i16> *%cbcr, <4 x i32> *%X) nounwind ssp {
; CHECK-LABEL: addCombineToVPADDLq_s16:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vpaddl.s16 q8, q8
; CHECK-NEXT: vst1.64 {d16, d17}, [r1]
@@ -406,7 +406,7 @@ define void @addCombineToVPADDLq_s16(<8 x i16> *%cbcr, <4 x i32> *%X) nounwind s
; Combine vuzp+vaddl->vpaddl
define void @addCombineToVPADDLq_u16(<8 x i16> *%cbcr, <4 x i32> *%X) nounwind ssp {
; CHECK-LABEL: addCombineToVPADDLq_u16:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vpaddl.u16 q8, q8
; CHECK-NEXT: vst1.64 {d16, d17}, [r1]
@@ -424,7 +424,7 @@ define void @addCombineToVPADDLq_u16(<8 x i16> *%cbcr, <4 x i32> *%X) nounwind s
; Combine vtrn+vaddl->vpaddl
define void @addCombineToVPADDLq_s32(<4 x i32> *%cbcr, <2 x i64> *%X) nounwind ssp {
; CHECK-LABEL: addCombineToVPADDLq_s32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vpaddl.s32 q8, q8
; CHECK-NEXT: vst1.64 {d16, d17}, [r1]
@@ -442,7 +442,7 @@ define void @addCombineToVPADDLq_s32(<4 x i32> *%cbcr, <2 x i64> *%X) nounwind s
; Combine vtrn+vaddl->vpaddl
define void @addCombineToVPADDLq_u32(<4 x i32> *%cbcr, <2 x i64> *%X) nounwind ssp {
; CHECK-LABEL: addCombineToVPADDLq_u32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vpaddl.u32 q8, q8
; CHECK-NEXT: vst1.64 {d16, d17}, [r1]
@@ -460,7 +460,7 @@ define void @addCombineToVPADDLq_u32(<4 x i32> *%cbcr, <2 x i64> *%X) nounwind s
; Legalization promotes the <4 x i8> to <4 x i16>.
define <4 x i8> @fromExtendingExtractVectorElt_i8(<8 x i8> %in) {
; CHECK-LABEL: fromExtendingExtractVectorElt_i8:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vmov d16, r0, r1
; CHECK-NEXT: vpaddl.s8 d16, d16
; CHECK-NEXT: vmov r0, r1, d16
@@ -474,7 +474,7 @@ define <4 x i8> @fromExtendingExtractVectorElt_i8(<8 x i8> %in) {
; Legalization promotes the <2 x i16> to <2 x i32>.
define <2 x i16> @fromExtendingExtractVectorElt_i16(<4 x i16> %in) {
; CHECK-LABEL: fromExtendingExtractVectorElt_i16:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vmov d16, r0, r1
; CHECK-NEXT: vpaddl.s16 d16, d16
; CHECK-NEXT: vmov r0, r1, d16
diff --git a/test/CodeGen/ARM/vtrn.ll b/test/CodeGen/ARM/vtrn.ll
index df6336043fd..12cb504eda7 100644
--- a/test/CodeGen/ARM/vtrn.ll
+++ b/test/CodeGen/ARM/vtrn.ll
@@ -2,7 +2,7 @@
define <8 x i8> @vtrni8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
; CHECK-LABEL: vtrni8:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d17, [r0]
; CHECK-NEXT: vtrn.8 d17, d16
@@ -19,7 +19,7 @@ define <8 x i8> @vtrni8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
define <16 x i8> @vtrni8_Qres(<8 x i8>* %A, <8 x i8>* %B) nounwind {
; CHECK-LABEL: vtrni8_Qres:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr [[LDR1:d[0-9]+]], [r1]
; CHECK-NEXT: vldr [[LDR0:d[0-9]+]], [r0]
; CHECK-NEXT: vtrn.8 [[LDR0]], [[LDR1]]
@@ -34,7 +34,7 @@ define <16 x i8> @vtrni8_Qres(<8 x i8>* %A, <8 x i8>* %B) nounwind {
define <4 x i16> @vtrni16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
; CHECK-LABEL: vtrni16:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d17, [r0]
; CHECK-NEXT: vtrn.16 d17, d16
@@ -51,7 +51,7 @@ define <4 x i16> @vtrni16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
define <8 x i16> @vtrni16_Qres(<4 x i16>* %A, <4 x i16>* %B) nounwind {
; CHECK-LABEL: vtrni16_Qres:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr [[LDR1:d[0-9]+]], [r1]
; CHECK-NEXT: vldr [[LDR0:d[0-9]+]], [r0]
; CHECK-NEXT: vtrn.16 [[LDR0]], [[LDR1]]
@@ -66,7 +66,7 @@ define <8 x i16> @vtrni16_Qres(<4 x i16>* %A, <4 x i16>* %B) nounwind {
define <2 x i32> @vtrni32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
; CHECK-LABEL: vtrni32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d17, [r0]
; CHECK-NEXT: vtrn.32 d17, d16
@@ -83,7 +83,7 @@ define <2 x i32> @vtrni32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
define <4 x i32> @vtrni32_Qres(<2 x i32>* %A, <2 x i32>* %B) nounwind {
; CHECK-LABEL: vtrni32_Qres:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr [[LDR1:d[0-9]+]], [r1]
; CHECK-NEXT: vldr [[LDR0:d[0-9]+]], [r0]
; CHECK-NEXT: vtrn.32 [[LDR0]], [[LDR1]]
@@ -98,7 +98,7 @@ define <4 x i32> @vtrni32_Qres(<2 x i32>* %A, <2 x i32>* %B) nounwind {
define <2 x float> @vtrnf(<2 x float>* %A, <2 x float>* %B) nounwind {
; CHECK-LABEL: vtrnf:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d17, [r0]
; CHECK-NEXT: vtrn.32 d17, d16
@@ -115,7 +115,7 @@ define <2 x float> @vtrnf(<2 x float>* %A, <2 x float>* %B) nounwind {
define <4 x float> @vtrnf_Qres(<2 x float>* %A, <2 x float>* %B) nounwind {
; CHECK-LABEL: vtrnf_Qres:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr [[LDR1:d[0-9]+]], [r1]
; CHECK-NEXT: vldr [[LDR0:d[0-9]+]], [r0]
; CHECK-NEXT: vtrn.32 [[LDR0]], [[LDR1]]
@@ -130,7 +130,7 @@ define <4 x float> @vtrnf_Qres(<2 x float>* %A, <2 x float>* %B) nounwind {
define <16 x i8> @vtrnQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
; CHECK-LABEL: vtrnQi8:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
; CHECK-NEXT: vtrn.8 q9, q8
@@ -148,7 +148,7 @@ define <16 x i8> @vtrnQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
define <32 x i8> @vtrnQi8_QQres(<16 x i8>* %A, <16 x i8>* %B) nounwind {
; CHECK-LABEL: vtrnQi8_QQres:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r2]
; CHECK-NEXT: vld1.64 {d18, d19}, [r1]
; CHECK-NEXT: vtrn.8 q9, q8
@@ -163,7 +163,7 @@ define <32 x i8> @vtrnQi8_QQres(<16 x i8>* %A, <16 x i8>* %B) nounwind {
define <8 x i16> @vtrnQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
; CHECK-LABEL: vtrnQi16:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
; CHECK-NEXT: vtrn.16 q9, q8
@@ -181,7 +181,7 @@ define <8 x i16> @vtrnQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
define <16 x i16> @vtrnQi16_QQres(<8 x i16>* %A, <8 x i16>* %B) nounwind {
; CHECK-LABEL: vtrnQi16_QQres:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r2]
; CHECK-NEXT: vld1.64 {d18, d19}, [r1]
; CHECK-NEXT: vtrn.16 q9, q8
@@ -196,7 +196,7 @@ define <16 x i16> @vtrnQi16_QQres(<8 x i16>* %A, <8 x i16>* %B) nounwind {
define <4 x i32> @vtrnQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
; CHECK-LABEL: vtrnQi32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
; CHECK-NEXT: vtrn.32 q9, q8
@@ -214,7 +214,7 @@ define <4 x i32> @vtrnQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
define <8 x i32> @vtrnQi32_QQres(<4 x i32>* %A, <4 x i32>* %B) nounwind {
; CHECK-LABEL: vtrnQi32_QQres:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r2]
; CHECK-NEXT: vld1.64 {d18, d19}, [r1]
; CHECK-NEXT: vtrn.32 q9, q8
@@ -229,7 +229,7 @@ define <8 x i32> @vtrnQi32_QQres(<4 x i32>* %A, <4 x i32>* %B) nounwind {
define <4 x float> @vtrnQf(<4 x float>* %A, <4 x float>* %B) nounwind {
; CHECK-LABEL: vtrnQf:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
; CHECK-NEXT: vtrn.32 q9, q8
@@ -247,7 +247,7 @@ define <4 x float> @vtrnQf(<4 x float>* %A, <4 x float>* %B) nounwind {
define <8 x float> @vtrnQf_QQres(<4 x float>* %A, <4 x float>* %B) nounwind {
; CHECK-LABEL: vtrnQf_QQres:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r2]
; CHECK-NEXT: vld1.64 {d18, d19}, [r1]
; CHECK-NEXT: vtrn.32 q9, q8
@@ -263,7 +263,7 @@ define <8 x float> @vtrnQf_QQres(<4 x float>* %A, <4 x float>* %B) nounwind {
define <8 x i8> @vtrni8_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind {
; CHECK-LABEL: vtrni8_undef:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d17, [r0]
; CHECK-NEXT: vtrn.8 d17, d16
@@ -280,7 +280,7 @@ define <8 x i8> @vtrni8_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind {
define <16 x i8> @vtrni8_undef_Qres(<8 x i8>* %A, <8 x i8>* %B) nounwind {
; CHECK-LABEL: vtrni8_undef_Qres:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr [[LDR1:d[0-9]+]], [r1]
; CHECK-NEXT: vldr [[LDR0:d[0-9]+]], [r0]
; CHECK-NEXT: vtrn.8 [[LDR0]], [[LDR1]]
@@ -295,7 +295,7 @@ define <16 x i8> @vtrni8_undef_Qres(<8 x i8>* %A, <8 x i8>* %B) nounwind {
define <8 x i16> @vtrnQi16_undef(<8 x i16>* %A, <8 x i16>* %B) nounwind {
; CHECK-LABEL: vtrnQi16_undef:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
; CHECK-NEXT: vtrn.16 q9, q8
@@ -313,7 +313,7 @@ define <8 x i16> @vtrnQi16_undef(<8 x i16>* %A, <8 x i16>* %B) nounwind {
define <16 x i16> @vtrnQi16_undef_QQres(<8 x i16>* %A, <8 x i16>* %B) nounwind {
; CHECK-LABEL: vtrnQi16_undef_QQres:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r2]
; CHECK-NEXT: vld1.64 {d18, d19}, [r1]
; CHECK-NEXT: vtrn.16 q9, q8
@@ -375,7 +375,7 @@ define <8 x i8> @vtrn_mismatched_builvector1(<8 x i8> %tr0, <8 x i8> %tr1,
define void @lower_twice_no_vtrn(<4 x i16>* %A, <4 x i16>* %B, <8 x i16>* %C) {
entry:
; CHECK-LABEL: lower_twice_no_vtrn:
- ; CHECK: @ BB#0:
+ ; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d18, [r0]
; CHECK-NEXT: vtrn.16 d18, d16
@@ -394,7 +394,7 @@ entry:
define void @upper_twice_no_vtrn(<4 x i16>* %A, <4 x i16>* %B, <8 x i16>* %C) {
entry:
; CHECK-LABEL: upper_twice_no_vtrn:
- ; CHECK: @ BB#0:
+ ; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d18, [r0]
; CHECK-NEXT: vtrn.16 d18, d16
diff --git a/test/CodeGen/ARM/vuzp.ll b/test/CodeGen/ARM/vuzp.ll
index 24090cfd6c6..0ac366be3fe 100644
--- a/test/CodeGen/ARM/vuzp.ll
+++ b/test/CodeGen/ARM/vuzp.ll
@@ -3,7 +3,7 @@
define <8 x i8> @vuzpi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
; CHECK-LABEL: vuzpi8:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d17, [r0]
; CHECK-NEXT: vuzp.8 d17, d16
@@ -20,7 +20,7 @@ define <8 x i8> @vuzpi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
define <16 x i8> @vuzpi8_Qres(<8 x i8>* %A, <8 x i8>* %B) nounwind {
; CHECK-LABEL: vuzpi8_Qres:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d17, [r1]
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vuzp.8 d16, d17
@@ -35,7 +35,7 @@ define <16 x i8> @vuzpi8_Qres(<8 x i8>* %A, <8 x i8>* %B) nounwind {
define <4 x i16> @vuzpi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
; CHECK-LABEL: vuzpi16:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d17, [r0]
; CHECK-NEXT: vuzp.16 d17, d16
@@ -52,7 +52,7 @@ define <4 x i16> @vuzpi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
define <8 x i16> @vuzpi16_Qres(<4 x i16>* %A, <4 x i16>* %B) nounwind {
; CHECK-LABEL: vuzpi16_Qres:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d17, [r1]
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vuzp.16 d16, d17
@@ -69,7 +69,7 @@ define <8 x i16> @vuzpi16_Qres(<4 x i16>* %A, <4 x i16>* %B) nounwind {
define <16 x i8> @vuzpQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
; CHECK-LABEL: vuzpQi8:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
; CHECK-NEXT: vuzp.8 q9, q8
@@ -87,7 +87,7 @@ define <16 x i8> @vuzpQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
define <32 x i8> @vuzpQi8_QQres(<16 x i8>* %A, <16 x i8>* %B) nounwind {
; CHECK-LABEL: vuzpQi8_QQres:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r2]
; CHECK-NEXT: vld1.64 {d18, d19}, [r1]
; CHECK-NEXT: vuzp.8 q9, q8
@@ -102,7 +102,7 @@ define <32 x i8> @vuzpQi8_QQres(<16 x i8>* %A, <16 x i8>* %B) nounwind {
define <8 x i16> @vuzpQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
; CHECK-LABEL: vuzpQi16:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
; CHECK-NEXT: vuzp.16 q9, q8
@@ -120,7 +120,7 @@ define <8 x i16> @vuzpQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
define <16 x i16> @vuzpQi16_QQres(<8 x i16>* %A, <8 x i16>* %B) nounwind {
; CHECK-LABEL: vuzpQi16_QQres:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r2]
; CHECK-NEXT: vld1.64 {d18, d19}, [r1]
; CHECK-NEXT: vuzp.16 q9, q8
@@ -135,7 +135,7 @@ define <16 x i16> @vuzpQi16_QQres(<8 x i16>* %A, <8 x i16>* %B) nounwind {
define <4 x i32> @vuzpQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
; CHECK-LABEL: vuzpQi32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
; CHECK-NEXT: vuzp.32 q9, q8
@@ -153,7 +153,7 @@ define <4 x i32> @vuzpQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
define <8 x i32> @vuzpQi32_QQres(<4 x i32>* %A, <4 x i32>* %B) nounwind {
; CHECK-LABEL: vuzpQi32_QQres:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r2]
; CHECK-NEXT: vld1.64 {d18, d19}, [r1]
; CHECK-NEXT: vuzp.32 q9, q8
@@ -168,7 +168,7 @@ define <8 x i32> @vuzpQi32_QQres(<4 x i32>* %A, <4 x i32>* %B) nounwind {
define <4 x float> @vuzpQf(<4 x float>* %A, <4 x float>* %B) nounwind {
; CHECK-LABEL: vuzpQf:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
; CHECK-NEXT: vuzp.32 q9, q8
@@ -186,7 +186,7 @@ define <4 x float> @vuzpQf(<4 x float>* %A, <4 x float>* %B) nounwind {
define <8 x float> @vuzpQf_QQres(<4 x float>* %A, <4 x float>* %B) nounwind {
; CHECK-LABEL: vuzpQf_QQres:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r2]
; CHECK-NEXT: vld1.64 {d18, d19}, [r1]
; CHECK-NEXT: vuzp.32 q9, q8
@@ -203,7 +203,7 @@ define <8 x float> @vuzpQf_QQres(<4 x float>* %A, <4 x float>* %B) nounwind {
define <8 x i8> @vuzpi8_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind {
; CHECK-LABEL: vuzpi8_undef:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d17, [r0]
; CHECK-NEXT: vuzp.8 d17, d16
@@ -220,7 +220,7 @@ define <8 x i8> @vuzpi8_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind {
define <16 x i8> @vuzpi8_undef_Qres(<8 x i8>* %A, <8 x i8>* %B) nounwind {
; CHECK-LABEL: vuzpi8_undef_Qres:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d17, [r1]
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vuzp.8 d16, d17
@@ -235,7 +235,7 @@ define <16 x i8> @vuzpi8_undef_Qres(<8 x i8>* %A, <8 x i8>* %B) nounwind {
define <8 x i16> @vuzpQi16_undef(<8 x i16>* %A, <8 x i16>* %B) nounwind {
; CHECK-LABEL: vuzpQi16_undef:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
; CHECK-NEXT: vuzp.16 q9, q8
@@ -253,7 +253,7 @@ define <8 x i16> @vuzpQi16_undef(<8 x i16>* %A, <8 x i16>* %B) nounwind {
define <16 x i16> @vuzpQi16_undef_QQres(<8 x i16>* %A, <8 x i16>* %B) nounwind {
; CHECK-LABEL: vuzpQi16_undef_QQres:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r2]
; CHECK-NEXT: vld1.64 {d18, d19}, [r1]
; CHECK-NEXT: vuzp.16 q9, q8
@@ -268,7 +268,7 @@ define <16 x i16> @vuzpQi16_undef_QQres(<8 x i16>* %A, <8 x i16>* %B) nounwind {
define <8 x i16> @vuzp_lower_shufflemask_undef(<4 x i16>* %A, <4 x i16>* %B) {
; CHECK-LABEL: vuzp_lower_shufflemask_undef:
-; CHECK: @ BB#0: @ %entry
+; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vldr d17, [r1]
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vorr q9, q8, q8
@@ -285,7 +285,7 @@ entry:
define <4 x i32> @vuzp_lower_shufflemask_zeroed(<2 x i32>* %A, <2 x i32>* %B) {
; CHECK-LABEL: vuzp_lower_shufflemask_zeroed:
-; CHECK: @ BB#0: @ %entry
+; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vldr d17, [r1]
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vdup.32 q9, d16[0]
@@ -303,7 +303,7 @@ entry:
define void @vuzp_rev_shufflemask_vtrn(<2 x i32>* %A, <2 x i32>* %B, <4 x i32>* %C) {
; CHECK-LABEL: vuzp_rev_shufflemask_vtrn:
-; CHECK: @ BB#0: @ %entry
+; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vldr d17, [r1]
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vrev64.32 q9, q8
@@ -323,7 +323,7 @@ define <8 x i8> @cmpsel_trunc(<8 x i8> %in0, <8 x i8> %in1, <8 x i32> %cmp0, <8
; This results in a build_vector with mismatched types. We will generate two vmovn.i32 instructions to
; truncate from i32 to i16 and one vmovn.i16 to perform the final truncation for i8.
; CHECK-LABEL: cmpsel_trunc:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: add r12, sp, #16
; CHECK-NEXT: vld1.64 {d16, d17}, [r12]
; CHECK-NEXT: mov r12, sp
@@ -352,7 +352,7 @@ define <8 x i8> @cmpsel_trunc(<8 x i8> %in0, <8 x i8> %in1, <8 x i32> %cmp0, <8
; to perform the vuzp and get the vbsl mask.
define <8 x i8> @vuzp_trunc_and_shuffle(<8 x i8> %tr0, <8 x i8> %tr1,
; CHECK-LABEL: vuzp_trunc_and_shuffle:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: .save {r11, lr}
; CHECK-NEXT: push {r11, lr}
; CHECK-NEXT: add r12, sp, #8
@@ -388,7 +388,7 @@ define <8 x i8> @vuzp_trunc_and_shuffle(<8 x i8> %tr0, <8 x i8> %tr1,
; This produces a build_vector with some of the operands undefs.
define <8 x i8> @vuzp_trunc_and_shuffle_undef_right(<8 x i8> %tr0, <8 x i8> %tr1,
; CHECK-LABEL: vuzp_trunc_and_shuffle_undef_right:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: mov r12, sp
; CHECK-NEXT: vld1.64 {d16, d17}, [r12]
; CHECK-NEXT: add r12, sp, #16
@@ -416,7 +416,7 @@ define <8 x i8> @vuzp_trunc_and_shuffle_undef_right(<8 x i8> %tr0, <8 x i8> %tr1
define <8 x i8> @vuzp_trunc_and_shuffle_undef_left(<8 x i8> %tr0, <8 x i8> %tr1,
; CHECK-LABEL: vuzp_trunc_and_shuffle_undef_left:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: mov r12, sp
; CHECK-NEXT: vld1.64 {d16, d17}, [r12]
; CHECK-NEXT: add r12, sp, #16
@@ -435,7 +435,7 @@ define <8 x i8> @vuzp_trunc_and_shuffle_undef_left(<8 x i8> %tr0, <8 x i8> %tr1,
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: mov pc, lr
; CHECK-NEXT: .p2align 3
-; CHECK-NEXT: @ BB#1:
+; CHECK-NEXT: @ %bb.1:
; CHECK-NEXT: .LCPI22_0:
; CHECK-NEXT: .byte 255 @ 0xff
; CHECK-NEXT: .byte 255 @ 0xff
@@ -458,7 +458,7 @@ define <8 x i8> @vuzp_trunc_and_shuffle_undef_left(<8 x i8> %tr0, <8 x i8> %tr1,
; get some vector size that we can represent.
define <10 x i8> @vuzp_wide_type(<10 x i8> %tr0, <10 x i8> %tr1,
; CHECK-LABEL: vuzp_wide_type:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: .save {r4, r10, r11, lr}
; CHECK-NEXT: push {r4, r10, r11, lr}
; CHECK-NEXT: .setfp r11, sp, #8
@@ -517,7 +517,7 @@ define <10 x i8> @vuzp_wide_type(<10 x i8> %tr0, <10 x i8> %tr1,
; CHECK-NEXT: pop {r4, r10, r11, lr}
; CHECK-NEXT: mov pc, lr
; CHECK-NEXT: .p2align 3
-; CHECK-NEXT: @ BB#1:
+; CHECK-NEXT: @ %bb.1:
; CHECK-NEXT: .LCPI23_0:
; CHECK-NEXT: .byte 0 @ 0x0
; CHECK-NEXT: .byte 1 @ 0x1
@@ -539,7 +539,7 @@ define <10 x i8> @vuzp_wide_type(<10 x i8> %tr0, <10 x i8> %tr1,
%struct.uint8x8x2_t = type { [2 x <8 x i8>] }
define %struct.uint8x8x2_t @vuzp_extract_subvector(<16 x i8> %t) #0 {
; CHECK-LABEL: vuzp_extract_subvector:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vmov d17, r2, r3
; CHECK-NEXT: vmov d16, r0, r1
; CHECK-NEXT: vorr d18, d17, d17
diff --git a/test/CodeGen/ARM/vzip.ll b/test/CodeGen/ARM/vzip.ll
index 06b49ab9405..5047b3e087a 100644
--- a/test/CodeGen/ARM/vzip.ll
+++ b/test/CodeGen/ARM/vzip.ll
@@ -3,7 +3,7 @@
define <8 x i8> @vzipi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
; CHECK-LABEL: vzipi8:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d17, [r0]
; CHECK-NEXT: vzip.8 d17, d16
@@ -20,7 +20,7 @@ define <8 x i8> @vzipi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
define <16 x i8> @vzipi8_Qres(<8 x i8>* %A, <8 x i8>* %B) nounwind {
; CHECK-LABEL: vzipi8_Qres:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d17, [r1]
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vzip.8 d16, d17
@@ -35,7 +35,7 @@ define <16 x i8> @vzipi8_Qres(<8 x i8>* %A, <8 x i8>* %B) nounwind {
define <4 x i16> @vzipi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
; CHECK-LABEL: vzipi16:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d17, [r0]
; CHECK-NEXT: vzip.16 d17, d16
@@ -52,7 +52,7 @@ define <4 x i16> @vzipi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
define <8 x i16> @vzipi16_Qres(<4 x i16>* %A, <4 x i16>* %B) nounwind {
; CHECK-LABEL: vzipi16_Qres:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d17, [r1]
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vzip.16 d16, d17
@@ -69,7 +69,7 @@ define <8 x i16> @vzipi16_Qres(<4 x i16>* %A, <4 x i16>* %B) nounwind {
define <16 x i8> @vzipQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
; CHECK-LABEL: vzipQi8:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
; CHECK-NEXT: vzip.8 q9, q8
@@ -87,7 +87,7 @@ define <16 x i8> @vzipQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
define <32 x i8> @vzipQi8_QQres(<16 x i8>* %A, <16 x i8>* %B) nounwind {
; CHECK-LABEL: vzipQi8_QQres:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r2]
; CHECK-NEXT: vld1.64 {d18, d19}, [r1]
; CHECK-NEXT: vzip.8 q9, q8
@@ -102,7 +102,7 @@ define <32 x i8> @vzipQi8_QQres(<16 x i8>* %A, <16 x i8>* %B) nounwind {
define <8 x i16> @vzipQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
; CHECK-LABEL: vzipQi16:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
; CHECK-NEXT: vzip.16 q9, q8
@@ -120,7 +120,7 @@ define <8 x i16> @vzipQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
define <16 x i16> @vzipQi16_QQres(<8 x i16>* %A, <8 x i16>* %B) nounwind {
; CHECK-LABEL: vzipQi16_QQres:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r2]
; CHECK-NEXT: vld1.64 {d18, d19}, [r1]
; CHECK-NEXT: vzip.16 q9, q8
@@ -135,7 +135,7 @@ define <16 x i16> @vzipQi16_QQres(<8 x i16>* %A, <8 x i16>* %B) nounwind {
define <4 x i32> @vzipQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
; CHECK-LABEL: vzipQi32:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
; CHECK-NEXT: vzip.32 q9, q8
@@ -153,7 +153,7 @@ define <4 x i32> @vzipQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
define <8 x i32> @vzipQi32_QQres(<4 x i32>* %A, <4 x i32>* %B) nounwind {
; CHECK-LABEL: vzipQi32_QQres:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r2]
; CHECK-NEXT: vld1.64 {d18, d19}, [r1]
; CHECK-NEXT: vzip.32 q9, q8
@@ -168,7 +168,7 @@ define <8 x i32> @vzipQi32_QQres(<4 x i32>* %A, <4 x i32>* %B) nounwind {
define <4 x float> @vzipQf(<4 x float>* %A, <4 x float>* %B) nounwind {
; CHECK-LABEL: vzipQf:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
; CHECK-NEXT: vzip.32 q9, q8
@@ -186,7 +186,7 @@ define <4 x float> @vzipQf(<4 x float>* %A, <4 x float>* %B) nounwind {
define <8 x float> @vzipQf_QQres(<4 x float>* %A, <4 x float>* %B) nounwind {
; CHECK-LABEL: vzipQf_QQres:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r2]
; CHECK-NEXT: vld1.64 {d18, d19}, [r1]
; CHECK-NEXT: vzip.32 q9, q8
@@ -203,7 +203,7 @@ define <8 x float> @vzipQf_QQres(<4 x float>* %A, <4 x float>* %B) nounwind {
define <8 x i8> @vzipi8_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind {
; CHECK-LABEL: vzipi8_undef:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d17, [r0]
; CHECK-NEXT: vzip.8 d17, d16
@@ -220,7 +220,7 @@ define <8 x i8> @vzipi8_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind {
define <16 x i8> @vzipi8_undef_Qres(<8 x i8>* %A, <8 x i8>* %B) nounwind {
; CHECK-LABEL: vzipi8_undef_Qres:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d17, [r1]
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vzip.8 d16, d17
@@ -235,7 +235,7 @@ define <16 x i8> @vzipi8_undef_Qres(<8 x i8>* %A, <8 x i8>* %B) nounwind {
define <16 x i8> @vzipQi8_undef(<16 x i8>* %A, <16 x i8>* %B) nounwind {
; CHECK-LABEL: vzipQi8_undef:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
; CHECK-NEXT: vzip.8 q9, q8
@@ -253,7 +253,7 @@ define <16 x i8> @vzipQi8_undef(<16 x i8>* %A, <16 x i8>* %B) nounwind {
define <32 x i8> @vzipQi8_undef_QQres(<16 x i8>* %A, <16 x i8>* %B) nounwind {
; CHECK-LABEL: vzipQi8_undef_QQres:
-; CHECK: @ BB#0:
+; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r2]
; CHECK-NEXT: vld1.64 {d18, d19}, [r1]
; CHECK-NEXT: vzip.8 q9, q8
@@ -268,7 +268,7 @@ define <32 x i8> @vzipQi8_undef_QQres(<16 x i8>* %A, <16 x i8>* %B) nounwind {
define <8 x i16> @vzip_lower_shufflemask_undef(<4 x i16>* %A, <4 x i16>* %B) {
; CHECK-LABEL: vzip_lower_shufflemask_undef:
-; CHECK: @ BB#0: @ %entry
+; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vldr d17, [r1]
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vzip.16 d16, d17
@@ -287,7 +287,7 @@ entry:
; as a vtrn.
define <8 x i16> @vzip_lower_shufflemask_undef_rev(<4 x i16>* %A, <4 x i16>* %B) {
; CHECK-LABEL: vzip_lower_shufflemask_undef_rev:
-; CHECK: @ BB#0: @ %entry
+; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vldr d16, [r1]
; CHECK-NEXT: vldr d19, [r0]
; CHECK-NEXT: vtrn.16 d19, d16
@@ -303,7 +303,7 @@ entry:
define <4 x i32> @vzip_lower_shufflemask_zeroed(<2 x i32>* %A) {
; CHECK-LABEL: vzip_lower_shufflemask_zeroed:
-; CHECK: @ BB#0: @ %entry
+; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vdup.32 q9, d16[0]
; CHECK-NEXT: vzip.32 q8, q9
@@ -318,7 +318,7 @@ entry:
define <4 x i32> @vzip_lower_shufflemask_vuzp(<2 x i32>* %A) {
; CHECK-LABEL: vzip_lower_shufflemask_vuzp:
-; CHECK: @ BB#0: @ %entry
+; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vdup.32 q9, d16[0]
; CHECK-NEXT: vzip.32 q8, q9
@@ -333,7 +333,7 @@ entry:
define void @vzip_undef_rev_shufflemask_vtrn(<2 x i32>* %A, <4 x i32>* %B) {
; CHECK-LABEL: vzip_undef_rev_shufflemask_vtrn:
-; CHECK: @ BB#0: @ %entry
+; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vorr q9, q8, q8
; CHECK-NEXT: vzip.32 q8, q9
@@ -349,7 +349,7 @@ entry:
define void @vzip_vext_factor(<8 x i16>* %A, <4 x i16>* %B) {
; CHECK-LABEL: vzip_vext_factor:
-; CHECK: @ BB#0: @ %entry
+; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vext.16 d18, d16, d17, #1
; CHECK-NEXT: vext.16 d16, d18, d17, #2
@@ -365,7 +365,7 @@ entry:
define <8 x i8> @vdup_zip(i8* nocapture readonly %x, i8* nocapture readonly %y) {
; CHECK-LABEL: vdup_zip:
-; CHECK: @ BB#0: @ %entry
+; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vld1.8 {d16[]}, [r1]
; CHECK-NEXT: vld1.8 {d17[]}, [r0]
; CHECK-NEXT: vzip.8 d17, d16
diff --git a/test/CodeGen/AVR/atomics/fence.ll b/test/CodeGen/AVR/atomics/fence.ll
index 6ea49bc7e3f..b4cd215f3a2 100644
--- a/test/CodeGen/AVR/atomics/fence.ll
+++ b/test/CodeGen/AVR/atomics/fence.ll
@@ -4,7 +4,7 @@
; AVR is always singlethreaded so fences do nothing.
; CHECK_LABEL: atomic_fence8
-; CHECK: ; BB#0:
+; CHECK: ; %bb.0:
; CHECK-NEXT: ret
define void @atomic_fence8() {
fence acquire
diff --git a/test/CodeGen/AVR/select-must-add-unconditional-jump.ll b/test/CodeGen/AVR/select-must-add-unconditional-jump.ll
index 64faff70a33..5c247f6e8e6 100644
--- a/test/CodeGen/AVR/select-must-add-unconditional-jump.ll
+++ b/test/CodeGen/AVR/select-must-add-unconditional-jump.ll
@@ -9,18 +9,18 @@
;
; This issue manifests in a CFG that looks something like this:
;
-; BB#2: derived from LLVM BB %finish
-; Predecessors according to CFG: BB#0 BB#1
-; %0<def> = PHI %3, <BB#0>, %5, <BB#1>
+; %bb.2: derived from LLVM BB %finish
+; Predecessors according to CFG: %bb.0 %bb.1
+; %0<def> = PHI %3, <%bb.0>, %5, <%bb.1>
; %7<def> = LDIRdK 2
; %8<def> = LDIRdK 1
; CPRdRr %2, %0, %SREG<imp-def>
-; BREQk <BB#6>, %SREG<imp-use>
-; Successors according to CFG: BB#5(?%) BB#6(?%)
+; BREQk <%bb.6>, %SREG<imp-use>
+; Successors according to CFG: %bb.5(?%) %bb.6(?%)
;
-; The code assumes it the fallthrough block after this is BB#5, but
-; it's actually BB#3! To be proper, there should be an unconditional
-; jump tying this block to BB#5.
+; The code assumes it the fallthrough block after this is %bb.5, but
+; it's actually %bb.3! To be proper, there should be an unconditional
+; jump tying this block to %bb.5.
define i8 @select_must_add_unconditional_jump(i8 %arg0, i8 %arg1) unnamed_addr {
entry-block:
@@ -49,10 +49,10 @@ dead:
; basic block containing `select` needs to contain explicit jumps to
; both successors.
-; CHECK: BB#2: derived from LLVM BB %finish
-; CHECK: BREQk <[[BRANCHED:BB#[0-9]+]]>
-; CHECK: RJMPk <[[DIRECT:BB#[0-9]+]]>
+; CHECK: %bb.2: derived from LLVM BB %finish
+; CHECK: BREQk <[[BRANCHED:%bb.[0-9]+]]>
+; CHECK: RJMPk <[[DIRECT:%bb.[0-9]+]]>
; CHECK: Successors according to CFG
; CHECK-SAME-DAG: {{.*}}[[BRANCHED]]
; CHECK-SAME-DAG: {{.*}}[[DIRECT]]
-; CHECK: BB#3: derived from LLVM BB
+; CHECK: %bb.3: derived from LLVM BB
diff --git a/test/CodeGen/Generic/MachineBranchProb.ll b/test/CodeGen/Generic/MachineBranchProb.ll
index 75e9a191e3d..dc4a52ab711 100644
--- a/test/CodeGen/Generic/MachineBranchProb.ll
+++ b/test/CodeGen/Generic/MachineBranchProb.ll
@@ -21,14 +21,14 @@ entry:
i64 5, label %sw.bb1
i64 15, label %sw.bb
], !prof !0
-; CHECK: BB#0: derived from LLVM BB %entry
-; CHECK: Successors according to CFG: BB#1({{[0-9a-fx/= ]+}}92.17%) BB#4({{[0-9a-fx/= ]+}}7.83%)
-; CHECK: BB#4: derived from LLVM BB %entry
-; CHECK: Successors according to CFG: BB#2({{[0-9a-fx/= ]+}}75.29%) BB#5({{[0-9a-fx/= ]+}}24.71%)
-; CHECK: BB#5: derived from LLVM BB %entry
-; CHECK: Successors according to CFG: BB#1({{[0-9a-fx/= ]+}}47.62%) BB#6({{[0-9a-fx/= ]+}}52.38%)
-; CHECK: BB#6: derived from LLVM BB %entry
-; CHECK: Successors according to CFG: BB#1({{[0-9a-fx/= ]+}}36.36%) BB#3({{[0-9a-fx/= ]+}}63.64%)
+; CHECK: %bb.0: derived from LLVM BB %entry
+; CHECK: Successors according to CFG: %bb.1({{[0-9a-fx/= ]+}}92.17%) %bb.4({{[0-9a-fx/= ]+}}7.83%)
+; CHECK: %bb.4: derived from LLVM BB %entry
+; CHECK: Successors according to CFG: %bb.2({{[0-9a-fx/= ]+}}75.29%) %bb.5({{[0-9a-fx/= ]+}}24.71%)
+; CHECK: %bb.5: derived from LLVM BB %entry
+; CHECK: Successors according to CFG: %bb.1({{[0-9a-fx/= ]+}}47.62%) %bb.6({{[0-9a-fx/= ]+}}52.38%)
+; CHECK: %bb.6: derived from LLVM BB %entry
+; CHECK: Successors according to CFG: %bb.1({{[0-9a-fx/= ]+}}36.36%) %bb.3({{[0-9a-fx/= ]+}}63.64%)
sw.bb:
; this call will prevent simplifyCFG from optimizing the block away in ARM/AArch64.
@@ -70,9 +70,9 @@ return: ret void
; right with weight 20.
;
; CHECK-LABEL: Machine code for function left_leaning_weight_balanced_tree:
-; CHECK: BB#0: derived from LLVM BB %entry
+; CHECK: %bb.0: derived from LLVM BB %entry
; CHECK-NOT: Successors
-; CHECK: Successors according to CFG: BB#8({{[0-9a-fx/= ]+}}39.71%) BB#9({{[0-9a-fx/= ]+}}60.29%)
+; CHECK: Successors according to CFG: %bb.8({{[0-9a-fx/= ]+}}39.71%) %bb.9({{[0-9a-fx/= ]+}}60.29%)
}
!1 = !{!"branch_weights",
diff --git a/test/CodeGen/Hexagon/branch-folder-hoist-kills.mir b/test/CodeGen/Hexagon/branch-folder-hoist-kills.mir
index 47da85b2308..c685a0c2740 100644
--- a/test/CodeGen/Hexagon/branch-folder-hoist-kills.mir
+++ b/test/CodeGen/Hexagon/branch-folder-hoist-kills.mir
@@ -11,7 +11,7 @@
# then created code, where the first predicated instruction has incorrect
# implicit use of r0:
#
-# BB#0:
+# %bb.0:
# Live Ins: %R0
# %R1<def> = A2_sxth %R0<kill> ; hoisted, kills r0
# A2_nop %P0<imp-def>
diff --git a/test/CodeGen/Hexagon/hwloop-redef-imm.mir b/test/CodeGen/Hexagon/hwloop-redef-imm.mir
index 014908e20a7..7b6044c9a50 100644
--- a/test/CodeGen/Hexagon/hwloop-redef-imm.mir
+++ b/test/CodeGen/Hexagon/hwloop-redef-imm.mir
@@ -8,10 +8,10 @@
# loop setup in the preheader).
# CHECK: [[R0:%[0-9]+]]:intregs = A2_tfrsi 1920
-# CHECK: J2_loop0r %bb.1.b1, [[R0]]
+# CHECK: J2_loop0r %bb.1, [[R0]]
#
# CHECK: bb.1.b1 (address-taken):
-# CHECK: ENDLOOP0 %bb.1.b1
+# CHECK: ENDLOOP0 %bb.1
--- |
diff --git a/test/CodeGen/Hexagon/ifcvt-edge-weight.ll b/test/CodeGen/Hexagon/ifcvt-edge-weight.ll
index 341567e1d02..250a81938bd 100644
--- a/test/CodeGen/Hexagon/ifcvt-edge-weight.ll
+++ b/test/CodeGen/Hexagon/ifcvt-edge-weight.ll
@@ -1,8 +1,8 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 -hexagon-eif=0 -print-machineinstrs=if-converter %s -o /dev/null 2>&1 | FileCheck %s
; Check that the edge weights are updated correctly after if-conversion.
-; CHECK: BB#3:
-; CHECK: Successors according to CFG: BB#2({{[0-9a-fx/= ]+}}10.00%) BB#1({{[0-9a-fx/= ]+}}90.00%)
+; CHECK: %bb.3:
+; CHECK: Successors according to CFG: %bb.2({{[0-9a-fx/= ]+}}10.00%) %bb.1({{[0-9a-fx/= ]+}}90.00%)
@a = external global i32
@d = external global i32
diff --git a/test/CodeGen/MIR/X86/frame-info-save-restore-points.mir b/test/CodeGen/MIR/X86/frame-info-save-restore-points.mir
index d9b117bd9c2..f5d63287aff 100644
--- a/test/CodeGen/MIR/X86/frame-info-save-restore-points.mir
+++ b/test/CodeGen/MIR/X86/frame-info-save-restore-points.mir
@@ -30,33 +30,33 @@ liveins:
- { reg: '%edi' }
- { reg: '%esi' }
# CHECK: frameInfo:
-# CHECK: savePoint: '%bb.2.true'
-# CHECK-NEXT: restorePoint: '%bb.2.true'
+# CHECK: savePoint: '%bb.2'
+# CHECK-NEXT: restorePoint: '%bb.2'
# CHECK: stack
frameInfo:
maxAlignment: 4
hasCalls: true
- savePoint: '%bb.2.true'
- restorePoint: '%bb.2.true'
+ savePoint: '%bb.2'
+ restorePoint: '%bb.2'
stack:
- { id: 0, name: tmp, offset: 0, size: 4, alignment: 4 }
body: |
bb.0:
- successors: %bb.2.true, %bb.1
+ successors: %bb.2, %bb.1
liveins: %edi, %esi
%eax = COPY %edi
CMP32rr %eax, killed %esi, implicit-def %eflags
- JL_1 %bb.2.true, implicit killed %eflags
+ JL_1 %bb.2, implicit killed %eflags
bb.1:
- successors: %bb.3.false
+ successors: %bb.3
liveins: %eax
- JMP_1 %bb.3.false
+ JMP_1 %bb.3
bb.2.true:
- successors: %bb.3.false
+ successors: %bb.3
liveins: %eax
MOV32mr %stack.0.tmp, 1, _, 0, _, killed %eax
diff --git a/test/CodeGen/MIR/X86/implicit-register-flag.mir b/test/CodeGen/MIR/X86/implicit-register-flag.mir
index 70b1cc50094..dddbfc90cf6 100644
--- a/test/CodeGen/MIR/X86/implicit-register-flag.mir
+++ b/test/CodeGen/MIR/X86/implicit-register-flag.mir
@@ -31,11 +31,11 @@
name: foo
body: |
bb.0.entry:
- successors: %bb.1.less, %bb.2.exit
+ successors: %bb.1, %bb.2
; CHECK: CMP32ri8 %edi, 10, implicit-def %eflags
- ; CHECK-NEXT: JG_1 %bb.2.exit, implicit %eflags
+ ; CHECK-NEXT: JG_1 %bb.2, implicit %eflags
CMP32ri8 %edi, 10, implicit-def %eflags
- JG_1 %bb.2.exit, implicit %eflags
+ JG_1 %bb.2, implicit %eflags
bb.1.less:
; CHECK: %eax = MOV32r0 implicit-def %eflags
diff --git a/test/CodeGen/MIR/X86/jump-table-info.mir b/test/CodeGen/MIR/X86/jump-table-info.mir
index 52d562c8212..71dd46b8218 100644
--- a/test/CodeGen/MIR/X86/jump-table-info.mir
+++ b/test/CodeGen/MIR/X86/jump-table-info.mir
@@ -61,23 +61,23 @@ name: test_jumptable
# CHECK-NEXT: kind: label-difference32
# CHECK-NEXT: entries:
# CHECK-NEXT: - id: 0
-# CHECK-NEXT: blocks: [ '%bb.3.lbl1', '%bb.4.lbl2', '%bb.5.lbl3', '%bb.6.lbl4' ]
+# CHECK-NEXT: blocks: [ '%bb.3', '%bb.4', '%bb.5', '%bb.6' ]
# CHECK-NEXT: body:
jumpTable:
kind: label-difference32
entries:
- id: 0
- blocks: [ '%bb.3.lbl1', '%bb.4.lbl2', '%bb.5.lbl3', '%bb.6.lbl4' ]
+ blocks: [ '%bb.3', '%bb.4', '%bb.5', '%bb.6' ]
body: |
bb.0.entry:
- successors: %bb.2.def, %bb.1.entry
+ successors: %bb.2, %bb.1
%eax = MOV32rr %edi, implicit-def %rax
CMP32ri8 %edi, 3, implicit-def %eflags
- JA_1 %bb.2.def, implicit %eflags
+ JA_1 %bb.2, implicit %eflags
bb.1.entry:
- successors: %bb.3.lbl1, %bb.4.lbl2, %bb.5.lbl3, %bb.6.lbl4
+ successors: %bb.3, %bb.4, %bb.5, %bb.6
; CHECK: %rcx = LEA64r %rip, 1, %noreg, %jump-table.0, %noreg
%rcx = LEA64r %rip, 1, _, %jump-table.0, _
%rax = MOVSX64rm32 %rcx, 4, %rax, 0, _
@@ -110,17 +110,17 @@ jumpTable:
kind: label-difference32
entries:
- id: 1
- blocks: [ '%bb.3.lbl1', '%bb.4.lbl2', '%bb.5.lbl3', '%bb.6.lbl4' ]
+ blocks: [ '%bb.3', '%bb.4', '%bb.5', '%bb.6' ]
body: |
bb.0.entry:
- successors: %bb.2.def, %bb.1.entry
+ successors: %bb.2, %bb.1
%eax = MOV32rr %edi, implicit-def %rax
CMP32ri8 %edi, 3, implicit-def %eflags
- JA_1 %bb.2.def, implicit %eflags
+ JA_1 %bb.2, implicit %eflags
bb.1.entry:
- successors: %bb.3.lbl1, %bb.4.lbl2, %bb.5.lbl3, %bb.6.lbl4
+ successors: %bb.3, %bb.4, %bb.5, %bb.6
; Verify that the printer will use an id of 0 for this jump table:
; CHECK: %rcx = LEA64r %rip, 1, %noreg, %jump-table.0, %noreg
%rcx = LEA64r %rip, 1, _, %jump-table.1, _
diff --git a/test/CodeGen/MIR/X86/machine-basic-block-operands.mir b/test/CodeGen/MIR/X86/machine-basic-block-operands.mir
index f5915738679..a7866f239be 100644
--- a/test/CodeGen/MIR/X86/machine-basic-block-operands.mir
+++ b/test/CodeGen/MIR/X86/machine-basic-block-operands.mir
@@ -36,13 +36,13 @@ name: foo
body: |
; CHECK: bb.0.entry
bb.0.entry:
- successors: %bb.1.less, %bb.2.exit
+ successors: %bb.1, %bb.2
%eax = MOV32rm %rdi, 1, _, 0, _
; CHECK: CMP32ri8 %eax, 10
- ; CHECK-NEXT: JG_1 %bb.2.exit
+ ; CHECK-NEXT: JG_1 %bb.2
CMP32ri8 %eax, 10, implicit-def %eflags
- JG_1 %bb.2.exit, implicit %eflags
+ JG_1 %bb.2, implicit %eflags
; CHECK: bb.1.less:
bb.1.less:
diff --git a/test/CodeGen/MIR/X86/newline-handling.mir b/test/CodeGen/MIR/X86/newline-handling.mir
index ce53e49eddb..1a93c1a6425 100644
--- a/test/CodeGen/MIR/X86/newline-handling.mir
+++ b/test/CodeGen/MIR/X86/newline-handling.mir
@@ -35,10 +35,10 @@ liveins:
# CHECK-LABEL: name: foo
# CHECK: body: |
# CHECK-NEXT: bb.0.entry:
-# CHECK-NEXT: successors: %bb.1.less(0x40000000), %bb.2.exit(0x40000000)
+# CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
# CHECK-NEXT: liveins: %edi
# CHECK: CMP32ri8 %edi, 10, implicit-def %eflags
-# CHECK-NEXT: JG_1 %bb.2.exit, implicit killed %eflags
+# CHECK-NEXT: JG_1 %bb.2, implicit killed %eflags
# CHECK: bb.1.less:
# CHECK-NEXT: %eax = MOV32r0 implicit-def dead %eflags
@@ -50,13 +50,13 @@ liveins:
# CHECK-NEXT: RETQ killed %eax
body: |
bb.0.entry:
- successors: %bb.1.less, %bb.2.exit
+ successors: %bb.1, %bb.2
liveins: %edi
CMP32ri8 %edi, 10, implicit-def %eflags
- JG_1 %bb.2.exit, implicit killed %eflags
+ JG_1 %bb.2, implicit killed %eflags
bb.1.less:
@@ -79,10 +79,10 @@ liveins:
# CHECK-LABEL: name: bar
# CHECK: body: |
# CHECK-NEXT: bb.0.entry:
-# CHECK-NEXT: successors: %bb.1.less(0x40000000), %bb.2.exit(0x40000000)
+# CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
# CHECK-NEXT: liveins: %edi
# CHECK: CMP32ri8 %edi, 10, implicit-def %eflags
-# CHECK-NEXT: JG_1 %bb.2.exit, implicit killed %eflags
+# CHECK-NEXT: JG_1 %bb.2, implicit killed %eflags
# CHECK: bb.1.less:
# CHECK-NEXT: %eax = MOV32r0 implicit-def dead %eflags
@@ -95,10 +95,10 @@ liveins:
body: |
bb.0.entry:
- successors: %bb.1.less, %bb.2.exit
+ successors: %bb.1, %bb.2
liveins: %edi
CMP32ri8 %edi, 10, implicit-def %eflags
- JG_1 %bb.2.exit, implicit killed %eflags
+ JG_1 %bb.2, implicit killed %eflags
bb.1.less: %eax = MOV32r0 implicit-def dead %eflags
RETQ killed %eax
diff --git a/test/CodeGen/MIR/X86/successor-basic-blocks-weights.mir b/test/CodeGen/MIR/X86/successor-basic-blocks-weights.mir
index 512ba4e41aa..5a22557f324 100644
--- a/test/CodeGen/MIR/X86/successor-basic-blocks-weights.mir
+++ b/test/CodeGen/MIR/X86/successor-basic-blocks-weights.mir
@@ -21,14 +21,14 @@
name: foo
body: |
; CHECK-LABEL: bb.0.entry:
- ; CHECK: successors: %bb.1.less(0x2a3d70a4), %bb.2.exit(0x55c28f5c)
+ ; CHECK: successors: %bb.1(0x2a3d70a4), %bb.2(0x55c28f5c)
; CHECK-LABEL: bb.1.less:
bb.0.entry:
- successors: %bb.1.less (33), %bb.2.exit(67)
+ successors: %bb.1 (33), %bb.2(67)
liveins: %edi
CMP32ri8 %edi, 10, implicit-def %eflags
- JG_1 %bb.2.exit, implicit killed %eflags
+ JG_1 %bb.2, implicit killed %eflags
bb.1.less:
%eax = MOV32r0 implicit-def dead %eflags
diff --git a/test/CodeGen/MSP430/BranchSelector.ll b/test/CodeGen/MSP430/BranchSelector.ll
index 4dfd95bf41a..a36da626234 100644
--- a/test/CodeGen/MSP430/BranchSelector.ll
+++ b/test/CodeGen/MSP430/BranchSelector.ll
@@ -579,7 +579,7 @@ begin:
; This branch should not be expanded
; CHECK-LABEL: .LBB1_1:
; CHECK: jeq .LBB1_1
-; CHECK: BB#2:
+; CHECK: %bb.2:
; CHECK: ret
br i1 %lnot, label %begin, label %end
diff --git a/test/CodeGen/Mips/compactbranches/empty-block.mir b/test/CodeGen/Mips/compactbranches/empty-block.mir
index 7fb1afae912..5bfaef0cb69 100644
--- a/test/CodeGen/Mips/compactbranches/empty-block.mir
+++ b/test/CodeGen/Mips/compactbranches/empty-block.mir
@@ -5,11 +5,11 @@
# CHECK: blezc
# CHECK: nop
-# CHECK: # BB#1:
+# CHECK: # %bb.1:
# CHECK: .insn
-# CHECK: # BB#2:
+# CHECK: # %bb.2:
# CHECK: .insn
-# CHECK: # BB#3:
+# CHECK: # %bb.3:
# CHECK: jal
--- |
diff --git a/test/CodeGen/Mips/lcb4a.ll b/test/CodeGen/Mips/lcb4a.ll
index 4a99ef26efc..016e895d12e 100644
--- a/test/CodeGen/Mips/lcb4a.ll
+++ b/test/CodeGen/Mips/lcb4a.ll
@@ -26,7 +26,7 @@ if.end: ; preds = %if.else, %if.then
}
; ci: beqz $3, $BB0_2
-; ci: # BB#1: # %if.else
+; ci: # %bb.1: # %if.else
; Function Attrs: nounwind optsize
diff --git a/test/CodeGen/Mips/prevent-hoisting.ll b/test/CodeGen/Mips/prevent-hoisting.ll
index ca71bf7d1af..1fc7462811c 100644
--- a/test/CodeGen/Mips/prevent-hoisting.ll
+++ b/test/CodeGen/Mips/prevent-hoisting.ll
@@ -16,7 +16,7 @@
; CHECK: sll
; Check that at the start of a fallthrough block there is a instruction that writes to $1.
-; CHECK: {{BB[0-9_#]+}}:
+; CHECK: {{%bb.[0-9]+}}:
; CHECK: sll $1, $[[R0:[0-9]+]], 4
; CHECK: lw $[[R1:[0-9]+]], %got(assignSE2partition)($[[R2:[0-9]+]])
diff --git a/test/CodeGen/PowerPC/2006-07-07-ComputeMaskedBits.ll b/test/CodeGen/PowerPC/2006-07-07-ComputeMaskedBits.ll
index 56f4a4173ef..5b8b8147cce 100644
--- a/test/CodeGen/PowerPC/2006-07-07-ComputeMaskedBits.ll
+++ b/test/CodeGen/PowerPC/2006-07-07-ComputeMaskedBits.ll
@@ -6,7 +6,7 @@
define i32 @test(i32 %i) {
; CHECK-LABEL: test:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: addis 4, 2, .LC0@toc@ha
; CHECK-NEXT: extsw 3, 3
; CHECK-NEXT: addis 5, 2, .LC1@toc@ha
diff --git a/test/CodeGen/PowerPC/addegluecrash.ll b/test/CodeGen/PowerPC/addegluecrash.ll
index f17b6dce9a9..642960f8490 100644
--- a/test/CodeGen/PowerPC/addegluecrash.ll
+++ b/test/CodeGen/PowerPC/addegluecrash.ll
@@ -5,7 +5,7 @@ target triple = "powerpc64le-unknown-linux-gnu"
define void @bn_mul_comba8(i64* nocapture %r, i64* nocapture readonly %a, i64* nocapture readonly %b) {
; CHECK-LABEL: bn_mul_comba8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: ld 6, 0(4)
; CHECK-NEXT: ld 7, 0(5)
; CHECK-NEXT: mulhdu 8, 7, 6
diff --git a/test/CodeGen/PowerPC/andc.ll b/test/CodeGen/PowerPC/andc.ll
index df47bfc1e38..9bfbda2bbd7 100644
--- a/test/CodeGen/PowerPC/andc.ll
+++ b/test/CodeGen/PowerPC/andc.ll
@@ -3,7 +3,7 @@
define i1 @and_cmp1(i32 %x, i32 %y) {
; CHECK-LABEL: and_cmp1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: andc 3, 4, 3
; CHECK-NEXT: cntlzw 3, 3
; CHECK-NEXT: rlwinm 3, 3, 27, 31, 31
@@ -15,7 +15,7 @@ define i1 @and_cmp1(i32 %x, i32 %y) {
define i1 @and_cmp_const(i32 %x) {
; CHECK-LABEL: and_cmp_const:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: li 4, 43
; CHECK-NEXT: andc 3, 4, 3
; CHECK-NEXT: cntlzw 3, 3
@@ -28,7 +28,7 @@ define i1 @and_cmp_const(i32 %x) {
define i1 @foo(i32 %i) {
; CHECK-LABEL: foo:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: lis 4, 4660
; CHECK-NEXT: ori 4, 4, 22136
; CHECK-NEXT: andc 3, 4, 3
@@ -42,7 +42,7 @@ define i1 @foo(i32 %i) {
define <4 x i32> @hidden_not_v4i32(<4 x i32> %x) {
; CHECK-LABEL: hidden_not_v4i32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vspltisw 3, 6
; CHECK-NEXT: xxlandc 34, 35, 34
; CHECK-NEXT: blr
diff --git a/test/CodeGen/PowerPC/atomics-constant.ll b/test/CodeGen/PowerPC/atomics-constant.ll
index 77825c608a3..559cd9eb656 100644
--- a/test/CodeGen/PowerPC/atomics-constant.ll
+++ b/test/CodeGen/PowerPC/atomics-constant.ll
@@ -7,7 +7,7 @@ target triple = "powerpc64le-unknown-linux-gnu"
define i64 @foo() {
; CHECK-LABEL: foo:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis 3, 2, .LC0@toc@ha
; CHECK-NEXT: li 4, 0
; CHECK-NEXT: ld 3, .LC0@toc@l(3)
diff --git a/test/CodeGen/PowerPC/atomics-regression.ll b/test/CodeGen/PowerPC/atomics-regression.ll
index c8fb1e74e73..7079f6dd52e 100644
--- a/test/CodeGen/PowerPC/atomics-regression.ll
+++ b/test/CodeGen/PowerPC/atomics-regression.ll
@@ -3,7 +3,7 @@
define i8 @test0(i8* %ptr) {
; PPC64LE-LABEL: test0:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lbz 3, 0(3)
; PPC64LE-NEXT: blr
%val = load atomic i8, i8* %ptr unordered, align 1
@@ -12,7 +12,7 @@ define i8 @test0(i8* %ptr) {
define i8 @test1(i8* %ptr) {
; PPC64LE-LABEL: test1:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lbz 3, 0(3)
; PPC64LE-NEXT: blr
%val = load atomic i8, i8* %ptr monotonic, align 1
@@ -21,7 +21,7 @@ define i8 @test1(i8* %ptr) {
define i8 @test2(i8* %ptr) {
; PPC64LE-LABEL: test2:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lbz 3, 0(3)
; PPC64LE-NEXT: cmpd 7, 3, 3
; PPC64LE-NEXT: bne- 7, .+4
@@ -33,7 +33,7 @@ define i8 @test2(i8* %ptr) {
define i8 @test3(i8* %ptr) {
; PPC64LE-LABEL: test3:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: ori 2, 2, 0
; PPC64LE-NEXT: lbz 3, 0(3)
@@ -47,7 +47,7 @@ define i8 @test3(i8* %ptr) {
define i16 @test4(i16* %ptr) {
; PPC64LE-LABEL: test4:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lhz 3, 0(3)
; PPC64LE-NEXT: blr
%val = load atomic i16, i16* %ptr unordered, align 2
@@ -56,7 +56,7 @@ define i16 @test4(i16* %ptr) {
define i16 @test5(i16* %ptr) {
; PPC64LE-LABEL: test5:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lhz 3, 0(3)
; PPC64LE-NEXT: blr
%val = load atomic i16, i16* %ptr monotonic, align 2
@@ -65,7 +65,7 @@ define i16 @test5(i16* %ptr) {
define i16 @test6(i16* %ptr) {
; PPC64LE-LABEL: test6:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lhz 3, 0(3)
; PPC64LE-NEXT: cmpd 7, 3, 3
; PPC64LE-NEXT: bne- 7, .+4
@@ -77,7 +77,7 @@ define i16 @test6(i16* %ptr) {
define i16 @test7(i16* %ptr) {
; PPC64LE-LABEL: test7:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: ori 2, 2, 0
; PPC64LE-NEXT: lhz 3, 0(3)
@@ -91,7 +91,7 @@ define i16 @test7(i16* %ptr) {
define i32 @test8(i32* %ptr) {
; PPC64LE-LABEL: test8:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwz 3, 0(3)
; PPC64LE-NEXT: blr
%val = load atomic i32, i32* %ptr unordered, align 4
@@ -100,7 +100,7 @@ define i32 @test8(i32* %ptr) {
define i32 @test9(i32* %ptr) {
; PPC64LE-LABEL: test9:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwz 3, 0(3)
; PPC64LE-NEXT: blr
%val = load atomic i32, i32* %ptr monotonic, align 4
@@ -109,7 +109,7 @@ define i32 @test9(i32* %ptr) {
define i32 @test10(i32* %ptr) {
; PPC64LE-LABEL: test10:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwz 3, 0(3)
; PPC64LE-NEXT: cmpd 7, 3, 3
; PPC64LE-NEXT: bne- 7, .+4
@@ -121,7 +121,7 @@ define i32 @test10(i32* %ptr) {
define i32 @test11(i32* %ptr) {
; PPC64LE-LABEL: test11:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: ori 2, 2, 0
; PPC64LE-NEXT: lwz 3, 0(3)
@@ -135,7 +135,7 @@ define i32 @test11(i32* %ptr) {
define i64 @test12(i64* %ptr) {
; PPC64LE-LABEL: test12:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: ld 3, 0(3)
; PPC64LE-NEXT: blr
%val = load atomic i64, i64* %ptr unordered, align 8
@@ -144,7 +144,7 @@ define i64 @test12(i64* %ptr) {
define i64 @test13(i64* %ptr) {
; PPC64LE-LABEL: test13:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: ld 3, 0(3)
; PPC64LE-NEXT: blr
%val = load atomic i64, i64* %ptr monotonic, align 8
@@ -153,7 +153,7 @@ define i64 @test13(i64* %ptr) {
define i64 @test14(i64* %ptr) {
; PPC64LE-LABEL: test14:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: ld 3, 0(3)
; PPC64LE-NEXT: cmpd 7, 3, 3
; PPC64LE-NEXT: bne- 7, .+4
@@ -165,7 +165,7 @@ define i64 @test14(i64* %ptr) {
define i64 @test15(i64* %ptr) {
; PPC64LE-LABEL: test15:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: ori 2, 2, 0
; PPC64LE-NEXT: ld 3, 0(3)
@@ -179,7 +179,7 @@ define i64 @test15(i64* %ptr) {
define void @test16(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test16:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: stb 4, 0(3)
; PPC64LE-NEXT: blr
store atomic i8 %val, i8* %ptr unordered, align 1
@@ -188,7 +188,7 @@ define void @test16(i8* %ptr, i8 %val) {
define void @test17(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test17:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: stb 4, 0(3)
; PPC64LE-NEXT: blr
store atomic i8 %val, i8* %ptr monotonic, align 1
@@ -197,7 +197,7 @@ define void @test17(i8* %ptr, i8 %val) {
define void @test18(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test18:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: stb 4, 0(3)
; PPC64LE-NEXT: blr
@@ -207,7 +207,7 @@ define void @test18(i8* %ptr, i8 %val) {
define void @test19(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test19:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: stb 4, 0(3)
; PPC64LE-NEXT: blr
@@ -217,7 +217,7 @@ define void @test19(i8* %ptr, i8 %val) {
define void @test20(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test20:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sth 4, 0(3)
; PPC64LE-NEXT: blr
store atomic i16 %val, i16* %ptr unordered, align 2
@@ -226,7 +226,7 @@ define void @test20(i16* %ptr, i16 %val) {
define void @test21(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test21:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sth 4, 0(3)
; PPC64LE-NEXT: blr
store atomic i16 %val, i16* %ptr monotonic, align 2
@@ -235,7 +235,7 @@ define void @test21(i16* %ptr, i16 %val) {
define void @test22(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test22:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: sth 4, 0(3)
; PPC64LE-NEXT: blr
@@ -245,7 +245,7 @@ define void @test22(i16* %ptr, i16 %val) {
define void @test23(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test23:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: sth 4, 0(3)
; PPC64LE-NEXT: blr
@@ -255,7 +255,7 @@ define void @test23(i16* %ptr, i16 %val) {
define void @test24(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test24:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: stw 4, 0(3)
; PPC64LE-NEXT: blr
store atomic i32 %val, i32* %ptr unordered, align 4
@@ -264,7 +264,7 @@ define void @test24(i32* %ptr, i32 %val) {
define void @test25(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test25:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: stw 4, 0(3)
; PPC64LE-NEXT: blr
store atomic i32 %val, i32* %ptr monotonic, align 4
@@ -273,7 +273,7 @@ define void @test25(i32* %ptr, i32 %val) {
define void @test26(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test26:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: stw 4, 0(3)
; PPC64LE-NEXT: blr
@@ -283,7 +283,7 @@ define void @test26(i32* %ptr, i32 %val) {
define void @test27(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test27:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: stw 4, 0(3)
; PPC64LE-NEXT: blr
@@ -293,7 +293,7 @@ define void @test27(i32* %ptr, i32 %val) {
define void @test28(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test28:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: std 4, 0(3)
; PPC64LE-NEXT: blr
store atomic i64 %val, i64* %ptr unordered, align 8
@@ -302,7 +302,7 @@ define void @test28(i64* %ptr, i64 %val) {
define void @test29(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test29:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: std 4, 0(3)
; PPC64LE-NEXT: blr
store atomic i64 %val, i64* %ptr monotonic, align 8
@@ -311,7 +311,7 @@ define void @test29(i64* %ptr, i64 %val) {
define void @test30(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test30:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: std 4, 0(3)
; PPC64LE-NEXT: blr
@@ -321,7 +321,7 @@ define void @test30(i64* %ptr, i64 %val) {
define void @test31(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test31:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: std 4, 0(3)
; PPC64LE-NEXT: blr
@@ -331,7 +331,7 @@ define void @test31(i64* %ptr, i64 %val) {
define void @test32() {
; PPC64LE-LABEL: test32:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
fence acquire
@@ -340,7 +340,7 @@ define void @test32() {
define void @test33() {
; PPC64LE-LABEL: test33:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
fence release
@@ -349,7 +349,7 @@ define void @test33() {
define void @test34() {
; PPC64LE-LABEL: test34:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
fence acq_rel
@@ -358,7 +358,7 @@ define void @test34() {
define void @test35() {
; PPC64LE-LABEL: test35:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: blr
fence seq_cst
@@ -367,7 +367,7 @@ define void @test35() {
define void @test36() {
; PPC64LE-LABEL: test36:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
fence syncscope("singlethread") acquire
@@ -376,7 +376,7 @@ define void @test36() {
define void @test37() {
; PPC64LE-LABEL: test37:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
fence syncscope("singlethread") release
@@ -385,7 +385,7 @@ define void @test37() {
define void @test38() {
; PPC64LE-LABEL: test38:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
fence syncscope("singlethread") acq_rel
@@ -394,7 +394,7 @@ define void @test38() {
define void @test39() {
; PPC64LE-LABEL: test39:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: blr
fence syncscope("singlethread") seq_cst
@@ -403,7 +403,7 @@ define void @test39() {
define void @test40(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-LABEL: test40:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: b .LBB40_2
; PPC64LE-NEXT: .p2align 5
; PPC64LE-NEXT: .LBB40_1:
@@ -413,7 +413,7 @@ define void @test40(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-NEXT: lbarx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: beq 0, .LBB40_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: blr
%res = cmpxchg i8* %ptr, i8 %cmp, i8 %val monotonic monotonic
@@ -422,15 +422,15 @@ define void @test40(i8* %ptr, i8 %cmp, i8 %val) {
define void @test41(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-LABEL: test41:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB41_1:
; PPC64LE-NEXT: lbarx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bne 0, .LBB41_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB41_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB41_4:
@@ -443,15 +443,15 @@ define void @test41(i8* %ptr, i8 %cmp, i8 %val) {
define void @test42(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-LABEL: test42:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB42_1:
; PPC64LE-NEXT: lbarx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bne 0, .LBB42_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB42_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB42_4:
@@ -464,7 +464,7 @@ define void @test42(i8* %ptr, i8 %cmp, i8 %val) {
define void @test43(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-LABEL: test43:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: b .LBB43_2
; PPC64LE-NEXT: .p2align 5
@@ -475,7 +475,7 @@ define void @test43(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-NEXT: lbarx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: beq 0, .LBB43_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: blr
%res = cmpxchg i8* %ptr, i8 %cmp, i8 %val release monotonic
@@ -484,7 +484,7 @@ define void @test43(i8* %ptr, i8 %cmp, i8 %val) {
define void @test44(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-LABEL: test44:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: b .LBB44_2
; PPC64LE-NEXT: .p2align 5
@@ -495,7 +495,7 @@ define void @test44(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-NEXT: lbarx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: beq 0, .LBB44_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: blr
%res = cmpxchg i8* %ptr, i8 %cmp, i8 %val release acquire
@@ -504,16 +504,16 @@ define void @test44(i8* %ptr, i8 %cmp, i8 %val) {
define void @test45(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-LABEL: test45:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB45_1:
; PPC64LE-NEXT: lbarx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bne 0, .LBB45_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB45_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB45_4:
@@ -526,16 +526,16 @@ define void @test45(i8* %ptr, i8 %cmp, i8 %val) {
define void @test46(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-LABEL: test46:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB46_1:
; PPC64LE-NEXT: lbarx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bne 0, .LBB46_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB46_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB46_4:
@@ -548,16 +548,16 @@ define void @test46(i8* %ptr, i8 %cmp, i8 %val) {
define void @test47(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-LABEL: test47:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB47_1:
; PPC64LE-NEXT: lbarx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bne 0, .LBB47_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB47_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB47_4:
@@ -570,16 +570,16 @@ define void @test47(i8* %ptr, i8 %cmp, i8 %val) {
define void @test48(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-LABEL: test48:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB48_1:
; PPC64LE-NEXT: lbarx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bne 0, .LBB48_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB48_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB48_4:
@@ -592,16 +592,16 @@ define void @test48(i8* %ptr, i8 %cmp, i8 %val) {
define void @test49(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-LABEL: test49:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB49_1:
; PPC64LE-NEXT: lbarx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bne 0, .LBB49_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB49_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB49_4:
@@ -614,7 +614,7 @@ define void @test49(i8* %ptr, i8 %cmp, i8 %val) {
define void @test50(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-LABEL: test50:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: b .LBB50_2
; PPC64LE-NEXT: .p2align 5
; PPC64LE-NEXT: .LBB50_1:
@@ -624,7 +624,7 @@ define void @test50(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-NEXT: lharx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: beq 0, .LBB50_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: blr
%res = cmpxchg i16* %ptr, i16 %cmp, i16 %val monotonic monotonic
@@ -633,15 +633,15 @@ define void @test50(i16* %ptr, i16 %cmp, i16 %val) {
define void @test51(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-LABEL: test51:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB51_1:
; PPC64LE-NEXT: lharx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bne 0, .LBB51_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB51_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB51_4:
@@ -654,15 +654,15 @@ define void @test51(i16* %ptr, i16 %cmp, i16 %val) {
define void @test52(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-LABEL: test52:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB52_1:
; PPC64LE-NEXT: lharx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bne 0, .LBB52_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB52_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB52_4:
@@ -675,7 +675,7 @@ define void @test52(i16* %ptr, i16 %cmp, i16 %val) {
define void @test53(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-LABEL: test53:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: b .LBB53_2
; PPC64LE-NEXT: .p2align 5
@@ -686,7 +686,7 @@ define void @test53(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-NEXT: lharx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: beq 0, .LBB53_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: blr
%res = cmpxchg i16* %ptr, i16 %cmp, i16 %val release monotonic
@@ -695,7 +695,7 @@ define void @test53(i16* %ptr, i16 %cmp, i16 %val) {
define void @test54(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-LABEL: test54:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: b .LBB54_2
; PPC64LE-NEXT: .p2align 5
@@ -706,7 +706,7 @@ define void @test54(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-NEXT: lharx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: beq 0, .LBB54_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: blr
%res = cmpxchg i16* %ptr, i16 %cmp, i16 %val release acquire
@@ -715,16 +715,16 @@ define void @test54(i16* %ptr, i16 %cmp, i16 %val) {
define void @test55(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-LABEL: test55:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB55_1:
; PPC64LE-NEXT: lharx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bne 0, .LBB55_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB55_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB55_4:
@@ -737,16 +737,16 @@ define void @test55(i16* %ptr, i16 %cmp, i16 %val) {
define void @test56(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-LABEL: test56:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB56_1:
; PPC64LE-NEXT: lharx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bne 0, .LBB56_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB56_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB56_4:
@@ -759,16 +759,16 @@ define void @test56(i16* %ptr, i16 %cmp, i16 %val) {
define void @test57(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-LABEL: test57:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB57_1:
; PPC64LE-NEXT: lharx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bne 0, .LBB57_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB57_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB57_4:
@@ -781,16 +781,16 @@ define void @test57(i16* %ptr, i16 %cmp, i16 %val) {
define void @test58(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-LABEL: test58:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB58_1:
; PPC64LE-NEXT: lharx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bne 0, .LBB58_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB58_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB58_4:
@@ -803,16 +803,16 @@ define void @test58(i16* %ptr, i16 %cmp, i16 %val) {
define void @test59(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-LABEL: test59:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB59_1:
; PPC64LE-NEXT: lharx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bne 0, .LBB59_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB59_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB59_4:
@@ -825,7 +825,7 @@ define void @test59(i16* %ptr, i16 %cmp, i16 %val) {
define void @test60(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-LABEL: test60:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: b .LBB60_2
; PPC64LE-NEXT: .p2align 5
; PPC64LE-NEXT: .LBB60_1:
@@ -835,7 +835,7 @@ define void @test60(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-NEXT: lwarx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: beq 0, .LBB60_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: blr
%res = cmpxchg i32* %ptr, i32 %cmp, i32 %val monotonic monotonic
@@ -844,15 +844,15 @@ define void @test60(i32* %ptr, i32 %cmp, i32 %val) {
define void @test61(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-LABEL: test61:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB61_1:
; PPC64LE-NEXT: lwarx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bne 0, .LBB61_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB61_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB61_4:
@@ -865,15 +865,15 @@ define void @test61(i32* %ptr, i32 %cmp, i32 %val) {
define void @test62(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-LABEL: test62:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB62_1:
; PPC64LE-NEXT: lwarx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bne 0, .LBB62_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB62_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB62_4:
@@ -886,7 +886,7 @@ define void @test62(i32* %ptr, i32 %cmp, i32 %val) {
define void @test63(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-LABEL: test63:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: b .LBB63_2
; PPC64LE-NEXT: .p2align 5
@@ -897,7 +897,7 @@ define void @test63(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-NEXT: lwarx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: beq 0, .LBB63_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: blr
%res = cmpxchg i32* %ptr, i32 %cmp, i32 %val release monotonic
@@ -906,7 +906,7 @@ define void @test63(i32* %ptr, i32 %cmp, i32 %val) {
define void @test64(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-LABEL: test64:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: b .LBB64_2
; PPC64LE-NEXT: .p2align 5
@@ -917,7 +917,7 @@ define void @test64(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-NEXT: lwarx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: beq 0, .LBB64_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: blr
%res = cmpxchg i32* %ptr, i32 %cmp, i32 %val release acquire
@@ -926,16 +926,16 @@ define void @test64(i32* %ptr, i32 %cmp, i32 %val) {
define void @test65(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-LABEL: test65:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB65_1:
; PPC64LE-NEXT: lwarx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bne 0, .LBB65_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB65_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB65_4:
@@ -948,16 +948,16 @@ define void @test65(i32* %ptr, i32 %cmp, i32 %val) {
define void @test66(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-LABEL: test66:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB66_1:
; PPC64LE-NEXT: lwarx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bne 0, .LBB66_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB66_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB66_4:
@@ -970,16 +970,16 @@ define void @test66(i32* %ptr, i32 %cmp, i32 %val) {
define void @test67(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-LABEL: test67:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB67_1:
; PPC64LE-NEXT: lwarx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bne 0, .LBB67_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB67_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB67_4:
@@ -992,16 +992,16 @@ define void @test67(i32* %ptr, i32 %cmp, i32 %val) {
define void @test68(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-LABEL: test68:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB68_1:
; PPC64LE-NEXT: lwarx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bne 0, .LBB68_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB68_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB68_4:
@@ -1014,16 +1014,16 @@ define void @test68(i32* %ptr, i32 %cmp, i32 %val) {
define void @test69(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-LABEL: test69:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB69_1:
; PPC64LE-NEXT: lwarx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bne 0, .LBB69_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB69_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB69_4:
@@ -1036,7 +1036,7 @@ define void @test69(i32* %ptr, i32 %cmp, i32 %val) {
define void @test70(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-LABEL: test70:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: b .LBB70_2
; PPC64LE-NEXT: .p2align 5
; PPC64LE-NEXT: .LBB70_1:
@@ -1046,7 +1046,7 @@ define void @test70(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-NEXT: ldarx 6, 0, 3
; PPC64LE-NEXT: cmpd 4, 6
; PPC64LE-NEXT: beq 0, .LBB70_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: blr
%res = cmpxchg i64* %ptr, i64 %cmp, i64 %val monotonic monotonic
@@ -1055,15 +1055,15 @@ define void @test70(i64* %ptr, i64 %cmp, i64 %val) {
define void @test71(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-LABEL: test71:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB71_1:
; PPC64LE-NEXT: ldarx 6, 0, 3
; PPC64LE-NEXT: cmpd 4, 6
; PPC64LE-NEXT: bne 0, .LBB71_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB71_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB71_4:
@@ -1076,15 +1076,15 @@ define void @test71(i64* %ptr, i64 %cmp, i64 %val) {
define void @test72(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-LABEL: test72:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB72_1:
; PPC64LE-NEXT: ldarx 6, 0, 3
; PPC64LE-NEXT: cmpd 4, 6
; PPC64LE-NEXT: bne 0, .LBB72_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB72_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB72_4:
@@ -1097,7 +1097,7 @@ define void @test72(i64* %ptr, i64 %cmp, i64 %val) {
define void @test73(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-LABEL: test73:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: b .LBB73_2
; PPC64LE-NEXT: .p2align 5
@@ -1108,7 +1108,7 @@ define void @test73(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-NEXT: ldarx 6, 0, 3
; PPC64LE-NEXT: cmpd 4, 6
; PPC64LE-NEXT: beq 0, .LBB73_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: blr
%res = cmpxchg i64* %ptr, i64 %cmp, i64 %val release monotonic
@@ -1117,7 +1117,7 @@ define void @test73(i64* %ptr, i64 %cmp, i64 %val) {
define void @test74(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-LABEL: test74:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: b .LBB74_2
; PPC64LE-NEXT: .p2align 5
@@ -1128,7 +1128,7 @@ define void @test74(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-NEXT: ldarx 6, 0, 3
; PPC64LE-NEXT: cmpd 4, 6
; PPC64LE-NEXT: beq 0, .LBB74_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: blr
%res = cmpxchg i64* %ptr, i64 %cmp, i64 %val release acquire
@@ -1137,16 +1137,16 @@ define void @test74(i64* %ptr, i64 %cmp, i64 %val) {
define void @test75(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-LABEL: test75:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB75_1:
; PPC64LE-NEXT: ldarx 6, 0, 3
; PPC64LE-NEXT: cmpd 4, 6
; PPC64LE-NEXT: bne 0, .LBB75_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB75_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB75_4:
@@ -1159,16 +1159,16 @@ define void @test75(i64* %ptr, i64 %cmp, i64 %val) {
define void @test76(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-LABEL: test76:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB76_1:
; PPC64LE-NEXT: ldarx 6, 0, 3
; PPC64LE-NEXT: cmpd 4, 6
; PPC64LE-NEXT: bne 0, .LBB76_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB76_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB76_4:
@@ -1181,16 +1181,16 @@ define void @test76(i64* %ptr, i64 %cmp, i64 %val) {
define void @test77(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-LABEL: test77:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB77_1:
; PPC64LE-NEXT: ldarx 6, 0, 3
; PPC64LE-NEXT: cmpd 4, 6
; PPC64LE-NEXT: bne 0, .LBB77_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB77_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB77_4:
@@ -1203,16 +1203,16 @@ define void @test77(i64* %ptr, i64 %cmp, i64 %val) {
define void @test78(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-LABEL: test78:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB78_1:
; PPC64LE-NEXT: ldarx 6, 0, 3
; PPC64LE-NEXT: cmpd 4, 6
; PPC64LE-NEXT: bne 0, .LBB78_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB78_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB78_4:
@@ -1225,16 +1225,16 @@ define void @test78(i64* %ptr, i64 %cmp, i64 %val) {
define void @test79(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-LABEL: test79:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB79_1:
; PPC64LE-NEXT: ldarx 6, 0, 3
; PPC64LE-NEXT: cmpd 4, 6
; PPC64LE-NEXT: bne 0, .LBB79_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB79_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB79_4:
@@ -1247,7 +1247,7 @@ define void @test79(i64* %ptr, i64 %cmp, i64 %val) {
define void @test80(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-LABEL: test80:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: b .LBB80_2
; PPC64LE-NEXT: .p2align 5
; PPC64LE-NEXT: .LBB80_1:
@@ -1257,7 +1257,7 @@ define void @test80(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-NEXT: lbarx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: beq 0, .LBB80_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: blr
%res = cmpxchg i8* %ptr, i8 %cmp, i8 %val syncscope("singlethread") monotonic monotonic
@@ -1266,15 +1266,15 @@ define void @test80(i8* %ptr, i8 %cmp, i8 %val) {
define void @test81(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-LABEL: test81:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB81_1:
; PPC64LE-NEXT: lbarx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bne 0, .LBB81_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB81_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB81_4:
@@ -1287,15 +1287,15 @@ define void @test81(i8* %ptr, i8 %cmp, i8 %val) {
define void @test82(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-LABEL: test82:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB82_1:
; PPC64LE-NEXT: lbarx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bne 0, .LBB82_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB82_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB82_4:
@@ -1308,7 +1308,7 @@ define void @test82(i8* %ptr, i8 %cmp, i8 %val) {
define void @test83(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-LABEL: test83:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: b .LBB83_2
; PPC64LE-NEXT: .p2align 5
@@ -1319,7 +1319,7 @@ define void @test83(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-NEXT: lbarx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: beq 0, .LBB83_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: blr
%res = cmpxchg i8* %ptr, i8 %cmp, i8 %val syncscope("singlethread") release monotonic
@@ -1328,7 +1328,7 @@ define void @test83(i8* %ptr, i8 %cmp, i8 %val) {
define void @test84(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-LABEL: test84:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: b .LBB84_2
; PPC64LE-NEXT: .p2align 5
@@ -1339,7 +1339,7 @@ define void @test84(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-NEXT: lbarx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: beq 0, .LBB84_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: blr
%res = cmpxchg i8* %ptr, i8 %cmp, i8 %val syncscope("singlethread") release acquire
@@ -1348,16 +1348,16 @@ define void @test84(i8* %ptr, i8 %cmp, i8 %val) {
define void @test85(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-LABEL: test85:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB85_1:
; PPC64LE-NEXT: lbarx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bne 0, .LBB85_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB85_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB85_4:
@@ -1370,16 +1370,16 @@ define void @test85(i8* %ptr, i8 %cmp, i8 %val) {
define void @test86(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-LABEL: test86:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB86_1:
; PPC64LE-NEXT: lbarx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bne 0, .LBB86_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB86_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB86_4:
@@ -1392,16 +1392,16 @@ define void @test86(i8* %ptr, i8 %cmp, i8 %val) {
define void @test87(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-LABEL: test87:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB87_1:
; PPC64LE-NEXT: lbarx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bne 0, .LBB87_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB87_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB87_4:
@@ -1414,16 +1414,16 @@ define void @test87(i8* %ptr, i8 %cmp, i8 %val) {
define void @test88(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-LABEL: test88:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB88_1:
; PPC64LE-NEXT: lbarx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bne 0, .LBB88_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB88_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB88_4:
@@ -1436,16 +1436,16 @@ define void @test88(i8* %ptr, i8 %cmp, i8 %val) {
define void @test89(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-LABEL: test89:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB89_1:
; PPC64LE-NEXT: lbarx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bne 0, .LBB89_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB89_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB89_4:
@@ -1458,7 +1458,7 @@ define void @test89(i8* %ptr, i8 %cmp, i8 %val) {
define void @test90(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-LABEL: test90:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: b .LBB90_2
; PPC64LE-NEXT: .p2align 5
; PPC64LE-NEXT: .LBB90_1:
@@ -1468,7 +1468,7 @@ define void @test90(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-NEXT: lharx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: beq 0, .LBB90_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: blr
%res = cmpxchg i16* %ptr, i16 %cmp, i16 %val syncscope("singlethread") monotonic monotonic
@@ -1477,15 +1477,15 @@ define void @test90(i16* %ptr, i16 %cmp, i16 %val) {
define void @test91(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-LABEL: test91:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB91_1:
; PPC64LE-NEXT: lharx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bne 0, .LBB91_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB91_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB91_4:
@@ -1498,15 +1498,15 @@ define void @test91(i16* %ptr, i16 %cmp, i16 %val) {
define void @test92(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-LABEL: test92:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB92_1:
; PPC64LE-NEXT: lharx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bne 0, .LBB92_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB92_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB92_4:
@@ -1519,7 +1519,7 @@ define void @test92(i16* %ptr, i16 %cmp, i16 %val) {
define void @test93(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-LABEL: test93:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: b .LBB93_2
; PPC64LE-NEXT: .p2align 5
@@ -1530,7 +1530,7 @@ define void @test93(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-NEXT: lharx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: beq 0, .LBB93_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: blr
%res = cmpxchg i16* %ptr, i16 %cmp, i16 %val syncscope("singlethread") release monotonic
@@ -1539,7 +1539,7 @@ define void @test93(i16* %ptr, i16 %cmp, i16 %val) {
define void @test94(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-LABEL: test94:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: b .LBB94_2
; PPC64LE-NEXT: .p2align 5
@@ -1550,7 +1550,7 @@ define void @test94(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-NEXT: lharx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: beq 0, .LBB94_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: blr
%res = cmpxchg i16* %ptr, i16 %cmp, i16 %val syncscope("singlethread") release acquire
@@ -1559,16 +1559,16 @@ define void @test94(i16* %ptr, i16 %cmp, i16 %val) {
define void @test95(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-LABEL: test95:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB95_1:
; PPC64LE-NEXT: lharx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bne 0, .LBB95_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB95_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB95_4:
@@ -1581,16 +1581,16 @@ define void @test95(i16* %ptr, i16 %cmp, i16 %val) {
define void @test96(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-LABEL: test96:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB96_1:
; PPC64LE-NEXT: lharx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bne 0, .LBB96_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB96_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB96_4:
@@ -1603,16 +1603,16 @@ define void @test96(i16* %ptr, i16 %cmp, i16 %val) {
define void @test97(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-LABEL: test97:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB97_1:
; PPC64LE-NEXT: lharx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bne 0, .LBB97_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB97_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB97_4:
@@ -1625,16 +1625,16 @@ define void @test97(i16* %ptr, i16 %cmp, i16 %val) {
define void @test98(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-LABEL: test98:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB98_1:
; PPC64LE-NEXT: lharx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bne 0, .LBB98_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB98_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB98_4:
@@ -1647,16 +1647,16 @@ define void @test98(i16* %ptr, i16 %cmp, i16 %val) {
define void @test99(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-LABEL: test99:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB99_1:
; PPC64LE-NEXT: lharx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bne 0, .LBB99_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB99_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB99_4:
@@ -1669,7 +1669,7 @@ define void @test99(i16* %ptr, i16 %cmp, i16 %val) {
define void @test100(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-LABEL: test100:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: b .LBB100_2
; PPC64LE-NEXT: .p2align 5
; PPC64LE-NEXT: .LBB100_1:
@@ -1679,7 +1679,7 @@ define void @test100(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-NEXT: lwarx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: beq 0, .LBB100_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: blr
%res = cmpxchg i32* %ptr, i32 %cmp, i32 %val syncscope("singlethread") monotonic monotonic
@@ -1688,15 +1688,15 @@ define void @test100(i32* %ptr, i32 %cmp, i32 %val) {
define void @test101(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-LABEL: test101:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB101_1:
; PPC64LE-NEXT: lwarx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bne 0, .LBB101_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB101_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB101_4:
@@ -1709,15 +1709,15 @@ define void @test101(i32* %ptr, i32 %cmp, i32 %val) {
define void @test102(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-LABEL: test102:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB102_1:
; PPC64LE-NEXT: lwarx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bne 0, .LBB102_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB102_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB102_4:
@@ -1730,7 +1730,7 @@ define void @test102(i32* %ptr, i32 %cmp, i32 %val) {
define void @test103(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-LABEL: test103:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: b .LBB103_2
; PPC64LE-NEXT: .p2align 5
@@ -1741,7 +1741,7 @@ define void @test103(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-NEXT: lwarx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: beq 0, .LBB103_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: blr
%res = cmpxchg i32* %ptr, i32 %cmp, i32 %val syncscope("singlethread") release monotonic
@@ -1750,7 +1750,7 @@ define void @test103(i32* %ptr, i32 %cmp, i32 %val) {
define void @test104(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-LABEL: test104:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: b .LBB104_2
; PPC64LE-NEXT: .p2align 5
@@ -1761,7 +1761,7 @@ define void @test104(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-NEXT: lwarx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: beq 0, .LBB104_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: blr
%res = cmpxchg i32* %ptr, i32 %cmp, i32 %val syncscope("singlethread") release acquire
@@ -1770,16 +1770,16 @@ define void @test104(i32* %ptr, i32 %cmp, i32 %val) {
define void @test105(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-LABEL: test105:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB105_1:
; PPC64LE-NEXT: lwarx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bne 0, .LBB105_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB105_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB105_4:
@@ -1792,16 +1792,16 @@ define void @test105(i32* %ptr, i32 %cmp, i32 %val) {
define void @test106(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-LABEL: test106:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB106_1:
; PPC64LE-NEXT: lwarx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bne 0, .LBB106_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB106_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB106_4:
@@ -1814,16 +1814,16 @@ define void @test106(i32* %ptr, i32 %cmp, i32 %val) {
define void @test107(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-LABEL: test107:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB107_1:
; PPC64LE-NEXT: lwarx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bne 0, .LBB107_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB107_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB107_4:
@@ -1836,16 +1836,16 @@ define void @test107(i32* %ptr, i32 %cmp, i32 %val) {
define void @test108(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-LABEL: test108:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB108_1:
; PPC64LE-NEXT: lwarx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bne 0, .LBB108_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB108_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB108_4:
@@ -1858,16 +1858,16 @@ define void @test108(i32* %ptr, i32 %cmp, i32 %val) {
define void @test109(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-LABEL: test109:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB109_1:
; PPC64LE-NEXT: lwarx 6, 0, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bne 0, .LBB109_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB109_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB109_4:
@@ -1880,7 +1880,7 @@ define void @test109(i32* %ptr, i32 %cmp, i32 %val) {
define void @test110(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-LABEL: test110:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: b .LBB110_2
; PPC64LE-NEXT: .p2align 5
; PPC64LE-NEXT: .LBB110_1:
@@ -1890,7 +1890,7 @@ define void @test110(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-NEXT: ldarx 6, 0, 3
; PPC64LE-NEXT: cmpd 4, 6
; PPC64LE-NEXT: beq 0, .LBB110_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: blr
%res = cmpxchg i64* %ptr, i64 %cmp, i64 %val syncscope("singlethread") monotonic monotonic
@@ -1899,15 +1899,15 @@ define void @test110(i64* %ptr, i64 %cmp, i64 %val) {
define void @test111(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-LABEL: test111:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB111_1:
; PPC64LE-NEXT: ldarx 6, 0, 3
; PPC64LE-NEXT: cmpd 4, 6
; PPC64LE-NEXT: bne 0, .LBB111_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB111_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB111_4:
@@ -1920,15 +1920,15 @@ define void @test111(i64* %ptr, i64 %cmp, i64 %val) {
define void @test112(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-LABEL: test112:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB112_1:
; PPC64LE-NEXT: ldarx 6, 0, 3
; PPC64LE-NEXT: cmpd 4, 6
; PPC64LE-NEXT: bne 0, .LBB112_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB112_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB112_4:
@@ -1941,7 +1941,7 @@ define void @test112(i64* %ptr, i64 %cmp, i64 %val) {
define void @test113(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-LABEL: test113:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: b .LBB113_2
; PPC64LE-NEXT: .p2align 5
@@ -1952,7 +1952,7 @@ define void @test113(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-NEXT: ldarx 6, 0, 3
; PPC64LE-NEXT: cmpd 4, 6
; PPC64LE-NEXT: beq 0, .LBB113_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: blr
%res = cmpxchg i64* %ptr, i64 %cmp, i64 %val syncscope("singlethread") release monotonic
@@ -1961,7 +1961,7 @@ define void @test113(i64* %ptr, i64 %cmp, i64 %val) {
define void @test114(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-LABEL: test114:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: b .LBB114_2
; PPC64LE-NEXT: .p2align 5
@@ -1972,7 +1972,7 @@ define void @test114(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-NEXT: ldarx 6, 0, 3
; PPC64LE-NEXT: cmpd 4, 6
; PPC64LE-NEXT: beq 0, .LBB114_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: blr
%res = cmpxchg i64* %ptr, i64 %cmp, i64 %val syncscope("singlethread") release acquire
@@ -1981,16 +1981,16 @@ define void @test114(i64* %ptr, i64 %cmp, i64 %val) {
define void @test115(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-LABEL: test115:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB115_1:
; PPC64LE-NEXT: ldarx 6, 0, 3
; PPC64LE-NEXT: cmpd 4, 6
; PPC64LE-NEXT: bne 0, .LBB115_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB115_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB115_4:
@@ -2003,16 +2003,16 @@ define void @test115(i64* %ptr, i64 %cmp, i64 %val) {
define void @test116(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-LABEL: test116:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB116_1:
; PPC64LE-NEXT: ldarx 6, 0, 3
; PPC64LE-NEXT: cmpd 4, 6
; PPC64LE-NEXT: bne 0, .LBB116_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB116_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB116_4:
@@ -2025,16 +2025,16 @@ define void @test116(i64* %ptr, i64 %cmp, i64 %val) {
define void @test117(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-LABEL: test117:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB117_1:
; PPC64LE-NEXT: ldarx 6, 0, 3
; PPC64LE-NEXT: cmpd 4, 6
; PPC64LE-NEXT: bne 0, .LBB117_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB117_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB117_4:
@@ -2047,16 +2047,16 @@ define void @test117(i64* %ptr, i64 %cmp, i64 %val) {
define void @test118(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-LABEL: test118:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB118_1:
; PPC64LE-NEXT: ldarx 6, 0, 3
; PPC64LE-NEXT: cmpd 4, 6
; PPC64LE-NEXT: bne 0, .LBB118_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB118_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB118_4:
@@ -2069,16 +2069,16 @@ define void @test118(i64* %ptr, i64 %cmp, i64 %val) {
define void @test119(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-LABEL: test119:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB119_1:
; PPC64LE-NEXT: ldarx 6, 0, 3
; PPC64LE-NEXT: cmpd 4, 6
; PPC64LE-NEXT: bne 0, .LBB119_4
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 5, 0, 3
; PPC64LE-NEXT: bne 0, .LBB119_1
-; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
; PPC64LE-NEXT: .LBB119_4:
@@ -2091,12 +2091,12 @@ define void @test119(i64* %ptr, i64 %cmp, i64 %val) {
define i8 @test120(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test120:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB120_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: stbcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB120_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw xchg i8* %ptr, i8 %val monotonic
@@ -2105,13 +2105,13 @@ define i8 @test120(i8* %ptr, i8 %val) {
define i8 @test121(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test121:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB121_1:
; PPC64LE-NEXT: lbarx 3, 0, 5
; PPC64LE-NEXT: stbcx. 4, 0, 5
; PPC64LE-NEXT: bne 0, .LBB121_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw xchg i8* %ptr, i8 %val acquire
@@ -2120,13 +2120,13 @@ define i8 @test121(i8* %ptr, i8 %val) {
define i8 @test122(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test122:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB122_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: stbcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB122_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw xchg i8* %ptr, i8 %val release
@@ -2135,13 +2135,13 @@ define i8 @test122(i8* %ptr, i8 %val) {
define i8 @test123(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test123:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB123_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: stbcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB123_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -2151,13 +2151,13 @@ define i8 @test123(i8* %ptr, i8 %val) {
define i8 @test124(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test124:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB124_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: stbcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB124_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -2167,12 +2167,12 @@ define i8 @test124(i8* %ptr, i8 %val) {
define i16 @test125(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test125:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB125_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: sthcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB125_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw xchg i16* %ptr, i16 %val monotonic
@@ -2181,13 +2181,13 @@ define i16 @test125(i16* %ptr, i16 %val) {
define i16 @test126(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test126:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB126_1:
; PPC64LE-NEXT: lharx 3, 0, 5
; PPC64LE-NEXT: sthcx. 4, 0, 5
; PPC64LE-NEXT: bne 0, .LBB126_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw xchg i16* %ptr, i16 %val acquire
@@ -2196,13 +2196,13 @@ define i16 @test126(i16* %ptr, i16 %val) {
define i16 @test127(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test127:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB127_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: sthcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB127_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw xchg i16* %ptr, i16 %val release
@@ -2211,13 +2211,13 @@ define i16 @test127(i16* %ptr, i16 %val) {
define i16 @test128(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test128:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB128_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: sthcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB128_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -2227,13 +2227,13 @@ define i16 @test128(i16* %ptr, i16 %val) {
define i16 @test129(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test129:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB129_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: sthcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB129_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -2243,12 +2243,12 @@ define i16 @test129(i16* %ptr, i16 %val) {
define i32 @test130(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test130:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB130_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: stwcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB130_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw xchg i32* %ptr, i32 %val monotonic
@@ -2257,13 +2257,13 @@ define i32 @test130(i32* %ptr, i32 %val) {
define i32 @test131(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test131:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB131_1:
; PPC64LE-NEXT: lwarx 3, 0, 5
; PPC64LE-NEXT: stwcx. 4, 0, 5
; PPC64LE-NEXT: bne 0, .LBB131_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw xchg i32* %ptr, i32 %val acquire
@@ -2272,13 +2272,13 @@ define i32 @test131(i32* %ptr, i32 %val) {
define i32 @test132(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test132:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB132_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: stwcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB132_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw xchg i32* %ptr, i32 %val release
@@ -2287,13 +2287,13 @@ define i32 @test132(i32* %ptr, i32 %val) {
define i32 @test133(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test133:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB133_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: stwcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB133_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -2303,13 +2303,13 @@ define i32 @test133(i32* %ptr, i32 %val) {
define i32 @test134(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test134:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB134_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: stwcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB134_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -2319,12 +2319,12 @@ define i32 @test134(i32* %ptr, i32 %val) {
define i64 @test135(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test135:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB135_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: stdcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB135_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw xchg i64* %ptr, i64 %val monotonic
@@ -2333,13 +2333,13 @@ define i64 @test135(i64* %ptr, i64 %val) {
define i64 @test136(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test136:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB136_1:
; PPC64LE-NEXT: ldarx 3, 0, 5
; PPC64LE-NEXT: stdcx. 4, 0, 5
; PPC64LE-NEXT: bne 0, .LBB136_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw xchg i64* %ptr, i64 %val acquire
@@ -2348,13 +2348,13 @@ define i64 @test136(i64* %ptr, i64 %val) {
define i64 @test137(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test137:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB137_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: stdcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB137_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw xchg i64* %ptr, i64 %val release
@@ -2363,13 +2363,13 @@ define i64 @test137(i64* %ptr, i64 %val) {
define i64 @test138(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test138:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB138_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: stdcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB138_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -2379,13 +2379,13 @@ define i64 @test138(i64* %ptr, i64 %val) {
define i64 @test139(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test139:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB139_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: stdcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB139_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -2395,13 +2395,13 @@ define i64 @test139(i64* %ptr, i64 %val) {
define i8 @test140(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test140:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB140_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB140_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw add i8* %ptr, i8 %val monotonic
@@ -2410,14 +2410,14 @@ define i8 @test140(i8* %ptr, i8 %val) {
define i8 @test141(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test141:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB141_1:
; PPC64LE-NEXT: lbarx 3, 0, 5
; PPC64LE-NEXT: add 6, 4, 3
; PPC64LE-NEXT: stbcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB141_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw add i8* %ptr, i8 %val acquire
@@ -2426,14 +2426,14 @@ define i8 @test141(i8* %ptr, i8 %val) {
define i8 @test142(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test142:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB142_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB142_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw add i8* %ptr, i8 %val release
@@ -2442,14 +2442,14 @@ define i8 @test142(i8* %ptr, i8 %val) {
define i8 @test143(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test143:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB143_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB143_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -2459,14 +2459,14 @@ define i8 @test143(i8* %ptr, i8 %val) {
define i8 @test144(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test144:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB144_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB144_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -2476,13 +2476,13 @@ define i8 @test144(i8* %ptr, i8 %val) {
define i16 @test145(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test145:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB145_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB145_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw add i16* %ptr, i16 %val monotonic
@@ -2491,14 +2491,14 @@ define i16 @test145(i16* %ptr, i16 %val) {
define i16 @test146(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test146:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB146_1:
; PPC64LE-NEXT: lharx 3, 0, 5
; PPC64LE-NEXT: add 6, 4, 3
; PPC64LE-NEXT: sthcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB146_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw add i16* %ptr, i16 %val acquire
@@ -2507,14 +2507,14 @@ define i16 @test146(i16* %ptr, i16 %val) {
define i16 @test147(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test147:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB147_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB147_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw add i16* %ptr, i16 %val release
@@ -2523,14 +2523,14 @@ define i16 @test147(i16* %ptr, i16 %val) {
define i16 @test148(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test148:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB148_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB148_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -2540,14 +2540,14 @@ define i16 @test148(i16* %ptr, i16 %val) {
define i16 @test149(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test149:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB149_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB149_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -2557,13 +2557,13 @@ define i16 @test149(i16* %ptr, i16 %val) {
define i32 @test150(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test150:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB150_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB150_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw add i32* %ptr, i32 %val monotonic
@@ -2572,14 +2572,14 @@ define i32 @test150(i32* %ptr, i32 %val) {
define i32 @test151(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test151:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB151_1:
; PPC64LE-NEXT: lwarx 3, 0, 5
; PPC64LE-NEXT: add 6, 4, 3
; PPC64LE-NEXT: stwcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB151_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw add i32* %ptr, i32 %val acquire
@@ -2588,14 +2588,14 @@ define i32 @test151(i32* %ptr, i32 %val) {
define i32 @test152(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test152:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB152_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB152_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw add i32* %ptr, i32 %val release
@@ -2604,14 +2604,14 @@ define i32 @test152(i32* %ptr, i32 %val) {
define i32 @test153(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test153:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB153_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB153_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -2621,14 +2621,14 @@ define i32 @test153(i32* %ptr, i32 %val) {
define i32 @test154(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test154:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB154_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB154_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -2638,13 +2638,13 @@ define i32 @test154(i32* %ptr, i32 %val) {
define i64 @test155(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test155:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB155_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB155_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw add i64* %ptr, i64 %val monotonic
@@ -2653,14 +2653,14 @@ define i64 @test155(i64* %ptr, i64 %val) {
define i64 @test156(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test156:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB156_1:
; PPC64LE-NEXT: ldarx 3, 0, 5
; PPC64LE-NEXT: add 6, 4, 3
; PPC64LE-NEXT: stdcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB156_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw add i64* %ptr, i64 %val acquire
@@ -2669,14 +2669,14 @@ define i64 @test156(i64* %ptr, i64 %val) {
define i64 @test157(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test157:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB157_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB157_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw add i64* %ptr, i64 %val release
@@ -2685,14 +2685,14 @@ define i64 @test157(i64* %ptr, i64 %val) {
define i64 @test158(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test158:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB158_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB158_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -2702,14 +2702,14 @@ define i64 @test158(i64* %ptr, i64 %val) {
define i64 @test159(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test159:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB159_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB159_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -2719,13 +2719,13 @@ define i64 @test159(i64* %ptr, i64 %val) {
define i8 @test160(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test160:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB160_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: subf 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB160_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw sub i8* %ptr, i8 %val monotonic
@@ -2734,14 +2734,14 @@ define i8 @test160(i8* %ptr, i8 %val) {
define i8 @test161(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test161:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB161_1:
; PPC64LE-NEXT: lbarx 3, 0, 5
; PPC64LE-NEXT: subf 6, 4, 3
; PPC64LE-NEXT: stbcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB161_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw sub i8* %ptr, i8 %val acquire
@@ -2750,14 +2750,14 @@ define i8 @test161(i8* %ptr, i8 %val) {
define i8 @test162(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test162:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB162_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: subf 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB162_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw sub i8* %ptr, i8 %val release
@@ -2766,14 +2766,14 @@ define i8 @test162(i8* %ptr, i8 %val) {
define i8 @test163(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test163:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB163_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: subf 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB163_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -2783,14 +2783,14 @@ define i8 @test163(i8* %ptr, i8 %val) {
define i8 @test164(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test164:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB164_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: subf 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB164_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -2800,13 +2800,13 @@ define i8 @test164(i8* %ptr, i8 %val) {
define i16 @test165(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test165:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB165_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: subf 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB165_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw sub i16* %ptr, i16 %val monotonic
@@ -2815,14 +2815,14 @@ define i16 @test165(i16* %ptr, i16 %val) {
define i16 @test166(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test166:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB166_1:
; PPC64LE-NEXT: lharx 3, 0, 5
; PPC64LE-NEXT: subf 6, 4, 3
; PPC64LE-NEXT: sthcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB166_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw sub i16* %ptr, i16 %val acquire
@@ -2831,14 +2831,14 @@ define i16 @test166(i16* %ptr, i16 %val) {
define i16 @test167(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test167:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB167_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: subf 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB167_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw sub i16* %ptr, i16 %val release
@@ -2847,14 +2847,14 @@ define i16 @test167(i16* %ptr, i16 %val) {
define i16 @test168(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test168:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB168_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: subf 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB168_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -2864,14 +2864,14 @@ define i16 @test168(i16* %ptr, i16 %val) {
define i16 @test169(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test169:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB169_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: subf 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB169_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -2881,13 +2881,13 @@ define i16 @test169(i16* %ptr, i16 %val) {
define i32 @test170(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test170:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB170_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: subf 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB170_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw sub i32* %ptr, i32 %val monotonic
@@ -2896,14 +2896,14 @@ define i32 @test170(i32* %ptr, i32 %val) {
define i32 @test171(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test171:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB171_1:
; PPC64LE-NEXT: lwarx 3, 0, 5
; PPC64LE-NEXT: subf 6, 4, 3
; PPC64LE-NEXT: stwcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB171_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw sub i32* %ptr, i32 %val acquire
@@ -2912,14 +2912,14 @@ define i32 @test171(i32* %ptr, i32 %val) {
define i32 @test172(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test172:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB172_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: subf 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB172_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw sub i32* %ptr, i32 %val release
@@ -2928,14 +2928,14 @@ define i32 @test172(i32* %ptr, i32 %val) {
define i32 @test173(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test173:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB173_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: subf 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB173_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -2945,14 +2945,14 @@ define i32 @test173(i32* %ptr, i32 %val) {
define i32 @test174(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test174:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB174_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: subf 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB174_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -2962,13 +2962,13 @@ define i32 @test174(i32* %ptr, i32 %val) {
define i64 @test175(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test175:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB175_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: sub 6, 5, 4
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB175_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw sub i64* %ptr, i64 %val monotonic
@@ -2977,14 +2977,14 @@ define i64 @test175(i64* %ptr, i64 %val) {
define i64 @test176(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test176:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB176_1:
; PPC64LE-NEXT: ldarx 3, 0, 5
; PPC64LE-NEXT: sub 6, 3, 4
; PPC64LE-NEXT: stdcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB176_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw sub i64* %ptr, i64 %val acquire
@@ -2993,14 +2993,14 @@ define i64 @test176(i64* %ptr, i64 %val) {
define i64 @test177(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test177:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB177_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: sub 6, 5, 4
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB177_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw sub i64* %ptr, i64 %val release
@@ -3009,14 +3009,14 @@ define i64 @test177(i64* %ptr, i64 %val) {
define i64 @test178(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test178:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB178_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: sub 6, 5, 4
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB178_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -3026,14 +3026,14 @@ define i64 @test178(i64* %ptr, i64 %val) {
define i64 @test179(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test179:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB179_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: sub 6, 5, 4
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB179_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -3043,13 +3043,13 @@ define i64 @test179(i64* %ptr, i64 %val) {
define i8 @test180(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test180:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB180_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB180_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw and i8* %ptr, i8 %val monotonic
@@ -3058,14 +3058,14 @@ define i8 @test180(i8* %ptr, i8 %val) {
define i8 @test181(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test181:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB181_1:
; PPC64LE-NEXT: lbarx 3, 0, 5
; PPC64LE-NEXT: and 6, 4, 3
; PPC64LE-NEXT: stbcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB181_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw and i8* %ptr, i8 %val acquire
@@ -3074,14 +3074,14 @@ define i8 @test181(i8* %ptr, i8 %val) {
define i8 @test182(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test182:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB182_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB182_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw and i8* %ptr, i8 %val release
@@ -3090,14 +3090,14 @@ define i8 @test182(i8* %ptr, i8 %val) {
define i8 @test183(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test183:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB183_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB183_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -3107,14 +3107,14 @@ define i8 @test183(i8* %ptr, i8 %val) {
define i8 @test184(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test184:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB184_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB184_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -3124,13 +3124,13 @@ define i8 @test184(i8* %ptr, i8 %val) {
define i16 @test185(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test185:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB185_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB185_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw and i16* %ptr, i16 %val monotonic
@@ -3139,14 +3139,14 @@ define i16 @test185(i16* %ptr, i16 %val) {
define i16 @test186(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test186:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB186_1:
; PPC64LE-NEXT: lharx 3, 0, 5
; PPC64LE-NEXT: and 6, 4, 3
; PPC64LE-NEXT: sthcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB186_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw and i16* %ptr, i16 %val acquire
@@ -3155,14 +3155,14 @@ define i16 @test186(i16* %ptr, i16 %val) {
define i16 @test187(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test187:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB187_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB187_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw and i16* %ptr, i16 %val release
@@ -3171,14 +3171,14 @@ define i16 @test187(i16* %ptr, i16 %val) {
define i16 @test188(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test188:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB188_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB188_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -3188,14 +3188,14 @@ define i16 @test188(i16* %ptr, i16 %val) {
define i16 @test189(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test189:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB189_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB189_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -3205,13 +3205,13 @@ define i16 @test189(i16* %ptr, i16 %val) {
define i32 @test190(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test190:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB190_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB190_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw and i32* %ptr, i32 %val monotonic
@@ -3220,14 +3220,14 @@ define i32 @test190(i32* %ptr, i32 %val) {
define i32 @test191(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test191:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB191_1:
; PPC64LE-NEXT: lwarx 3, 0, 5
; PPC64LE-NEXT: and 6, 4, 3
; PPC64LE-NEXT: stwcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB191_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw and i32* %ptr, i32 %val acquire
@@ -3236,14 +3236,14 @@ define i32 @test191(i32* %ptr, i32 %val) {
define i32 @test192(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test192:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB192_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB192_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw and i32* %ptr, i32 %val release
@@ -3252,14 +3252,14 @@ define i32 @test192(i32* %ptr, i32 %val) {
define i32 @test193(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test193:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB193_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB193_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -3269,14 +3269,14 @@ define i32 @test193(i32* %ptr, i32 %val) {
define i32 @test194(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test194:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB194_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB194_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -3286,13 +3286,13 @@ define i32 @test194(i32* %ptr, i32 %val) {
define i64 @test195(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test195:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB195_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB195_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw and i64* %ptr, i64 %val monotonic
@@ -3301,14 +3301,14 @@ define i64 @test195(i64* %ptr, i64 %val) {
define i64 @test196(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test196:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB196_1:
; PPC64LE-NEXT: ldarx 3, 0, 5
; PPC64LE-NEXT: and 6, 4, 3
; PPC64LE-NEXT: stdcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB196_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw and i64* %ptr, i64 %val acquire
@@ -3317,14 +3317,14 @@ define i64 @test196(i64* %ptr, i64 %val) {
define i64 @test197(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test197:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB197_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB197_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw and i64* %ptr, i64 %val release
@@ -3333,14 +3333,14 @@ define i64 @test197(i64* %ptr, i64 %val) {
define i64 @test198(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test198:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB198_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB198_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -3350,14 +3350,14 @@ define i64 @test198(i64* %ptr, i64 %val) {
define i64 @test199(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test199:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB199_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB199_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -3367,13 +3367,13 @@ define i64 @test199(i64* %ptr, i64 %val) {
define i8 @test200(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test200:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB200_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB200_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw nand i8* %ptr, i8 %val monotonic
@@ -3382,14 +3382,14 @@ define i8 @test200(i8* %ptr, i8 %val) {
define i8 @test201(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test201:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB201_1:
; PPC64LE-NEXT: lbarx 3, 0, 5
; PPC64LE-NEXT: nand 6, 4, 3
; PPC64LE-NEXT: stbcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB201_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw nand i8* %ptr, i8 %val acquire
@@ -3398,14 +3398,14 @@ define i8 @test201(i8* %ptr, i8 %val) {
define i8 @test202(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test202:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB202_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB202_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw nand i8* %ptr, i8 %val release
@@ -3414,14 +3414,14 @@ define i8 @test202(i8* %ptr, i8 %val) {
define i8 @test203(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test203:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB203_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB203_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -3431,14 +3431,14 @@ define i8 @test203(i8* %ptr, i8 %val) {
define i8 @test204(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test204:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB204_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB204_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -3448,13 +3448,13 @@ define i8 @test204(i8* %ptr, i8 %val) {
define i16 @test205(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test205:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB205_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB205_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw nand i16* %ptr, i16 %val monotonic
@@ -3463,14 +3463,14 @@ define i16 @test205(i16* %ptr, i16 %val) {
define i16 @test206(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test206:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB206_1:
; PPC64LE-NEXT: lharx 3, 0, 5
; PPC64LE-NEXT: nand 6, 4, 3
; PPC64LE-NEXT: sthcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB206_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw nand i16* %ptr, i16 %val acquire
@@ -3479,14 +3479,14 @@ define i16 @test206(i16* %ptr, i16 %val) {
define i16 @test207(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test207:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB207_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB207_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw nand i16* %ptr, i16 %val release
@@ -3495,14 +3495,14 @@ define i16 @test207(i16* %ptr, i16 %val) {
define i16 @test208(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test208:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB208_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB208_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -3512,14 +3512,14 @@ define i16 @test208(i16* %ptr, i16 %val) {
define i16 @test209(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test209:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB209_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB209_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -3529,13 +3529,13 @@ define i16 @test209(i16* %ptr, i16 %val) {
define i32 @test210(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test210:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB210_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB210_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw nand i32* %ptr, i32 %val monotonic
@@ -3544,14 +3544,14 @@ define i32 @test210(i32* %ptr, i32 %val) {
define i32 @test211(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test211:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB211_1:
; PPC64LE-NEXT: lwarx 3, 0, 5
; PPC64LE-NEXT: nand 6, 4, 3
; PPC64LE-NEXT: stwcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB211_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw nand i32* %ptr, i32 %val acquire
@@ -3560,14 +3560,14 @@ define i32 @test211(i32* %ptr, i32 %val) {
define i32 @test212(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test212:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB212_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB212_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw nand i32* %ptr, i32 %val release
@@ -3576,14 +3576,14 @@ define i32 @test212(i32* %ptr, i32 %val) {
define i32 @test213(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test213:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB213_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB213_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -3593,14 +3593,14 @@ define i32 @test213(i32* %ptr, i32 %val) {
define i32 @test214(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test214:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB214_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB214_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -3610,13 +3610,13 @@ define i32 @test214(i32* %ptr, i32 %val) {
define i64 @test215(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test215:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB215_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB215_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw nand i64* %ptr, i64 %val monotonic
@@ -3625,14 +3625,14 @@ define i64 @test215(i64* %ptr, i64 %val) {
define i64 @test216(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test216:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB216_1:
; PPC64LE-NEXT: ldarx 3, 0, 5
; PPC64LE-NEXT: nand 6, 4, 3
; PPC64LE-NEXT: stdcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB216_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw nand i64* %ptr, i64 %val acquire
@@ -3641,14 +3641,14 @@ define i64 @test216(i64* %ptr, i64 %val) {
define i64 @test217(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test217:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB217_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB217_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw nand i64* %ptr, i64 %val release
@@ -3657,14 +3657,14 @@ define i64 @test217(i64* %ptr, i64 %val) {
define i64 @test218(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test218:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB218_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB218_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -3674,14 +3674,14 @@ define i64 @test218(i64* %ptr, i64 %val) {
define i64 @test219(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test219:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB219_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB219_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -3691,13 +3691,13 @@ define i64 @test219(i64* %ptr, i64 %val) {
define i8 @test220(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test220:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB220_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB220_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw or i8* %ptr, i8 %val monotonic
@@ -3706,14 +3706,14 @@ define i8 @test220(i8* %ptr, i8 %val) {
define i8 @test221(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test221:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB221_1:
; PPC64LE-NEXT: lbarx 3, 0, 5
; PPC64LE-NEXT: or 6, 4, 3
; PPC64LE-NEXT: stbcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB221_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw or i8* %ptr, i8 %val acquire
@@ -3722,14 +3722,14 @@ define i8 @test221(i8* %ptr, i8 %val) {
define i8 @test222(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test222:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB222_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB222_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw or i8* %ptr, i8 %val release
@@ -3738,14 +3738,14 @@ define i8 @test222(i8* %ptr, i8 %val) {
define i8 @test223(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test223:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB223_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB223_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -3755,14 +3755,14 @@ define i8 @test223(i8* %ptr, i8 %val) {
define i8 @test224(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test224:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB224_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB224_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -3772,13 +3772,13 @@ define i8 @test224(i8* %ptr, i8 %val) {
define i16 @test225(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test225:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB225_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB225_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw or i16* %ptr, i16 %val monotonic
@@ -3787,14 +3787,14 @@ define i16 @test225(i16* %ptr, i16 %val) {
define i16 @test226(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test226:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB226_1:
; PPC64LE-NEXT: lharx 3, 0, 5
; PPC64LE-NEXT: or 6, 4, 3
; PPC64LE-NEXT: sthcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB226_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw or i16* %ptr, i16 %val acquire
@@ -3803,14 +3803,14 @@ define i16 @test226(i16* %ptr, i16 %val) {
define i16 @test227(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test227:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB227_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB227_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw or i16* %ptr, i16 %val release
@@ -3819,14 +3819,14 @@ define i16 @test227(i16* %ptr, i16 %val) {
define i16 @test228(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test228:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB228_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB228_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -3836,14 +3836,14 @@ define i16 @test228(i16* %ptr, i16 %val) {
define i16 @test229(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test229:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB229_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB229_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -3853,13 +3853,13 @@ define i16 @test229(i16* %ptr, i16 %val) {
define i32 @test230(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test230:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB230_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB230_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw or i32* %ptr, i32 %val monotonic
@@ -3868,14 +3868,14 @@ define i32 @test230(i32* %ptr, i32 %val) {
define i32 @test231(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test231:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB231_1:
; PPC64LE-NEXT: lwarx 3, 0, 5
; PPC64LE-NEXT: or 6, 4, 3
; PPC64LE-NEXT: stwcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB231_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw or i32* %ptr, i32 %val acquire
@@ -3884,14 +3884,14 @@ define i32 @test231(i32* %ptr, i32 %val) {
define i32 @test232(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test232:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB232_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB232_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw or i32* %ptr, i32 %val release
@@ -3900,14 +3900,14 @@ define i32 @test232(i32* %ptr, i32 %val) {
define i32 @test233(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test233:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB233_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB233_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -3917,14 +3917,14 @@ define i32 @test233(i32* %ptr, i32 %val) {
define i32 @test234(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test234:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB234_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB234_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -3934,13 +3934,13 @@ define i32 @test234(i32* %ptr, i32 %val) {
define i64 @test235(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test235:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB235_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB235_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw or i64* %ptr, i64 %val monotonic
@@ -3949,14 +3949,14 @@ define i64 @test235(i64* %ptr, i64 %val) {
define i64 @test236(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test236:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB236_1:
; PPC64LE-NEXT: ldarx 3, 0, 5
; PPC64LE-NEXT: or 6, 4, 3
; PPC64LE-NEXT: stdcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB236_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw or i64* %ptr, i64 %val acquire
@@ -3965,14 +3965,14 @@ define i64 @test236(i64* %ptr, i64 %val) {
define i64 @test237(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test237:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB237_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB237_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw or i64* %ptr, i64 %val release
@@ -3981,14 +3981,14 @@ define i64 @test237(i64* %ptr, i64 %val) {
define i64 @test238(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test238:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB238_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB238_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -3998,14 +3998,14 @@ define i64 @test238(i64* %ptr, i64 %val) {
define i64 @test239(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test239:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB239_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB239_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -4015,13 +4015,13 @@ define i64 @test239(i64* %ptr, i64 %val) {
define i8 @test240(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test240:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB240_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB240_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw xor i8* %ptr, i8 %val monotonic
@@ -4030,14 +4030,14 @@ define i8 @test240(i8* %ptr, i8 %val) {
define i8 @test241(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test241:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB241_1:
; PPC64LE-NEXT: lbarx 3, 0, 5
; PPC64LE-NEXT: xor 6, 4, 3
; PPC64LE-NEXT: stbcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB241_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw xor i8* %ptr, i8 %val acquire
@@ -4046,14 +4046,14 @@ define i8 @test241(i8* %ptr, i8 %val) {
define i8 @test242(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test242:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB242_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB242_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw xor i8* %ptr, i8 %val release
@@ -4062,14 +4062,14 @@ define i8 @test242(i8* %ptr, i8 %val) {
define i8 @test243(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test243:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB243_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB243_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -4079,14 +4079,14 @@ define i8 @test243(i8* %ptr, i8 %val) {
define i8 @test244(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test244:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB244_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB244_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -4096,13 +4096,13 @@ define i8 @test244(i8* %ptr, i8 %val) {
define i16 @test245(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test245:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB245_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB245_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw xor i16* %ptr, i16 %val monotonic
@@ -4111,14 +4111,14 @@ define i16 @test245(i16* %ptr, i16 %val) {
define i16 @test246(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test246:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB246_1:
; PPC64LE-NEXT: lharx 3, 0, 5
; PPC64LE-NEXT: xor 6, 4, 3
; PPC64LE-NEXT: sthcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB246_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw xor i16* %ptr, i16 %val acquire
@@ -4127,14 +4127,14 @@ define i16 @test246(i16* %ptr, i16 %val) {
define i16 @test247(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test247:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB247_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB247_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw xor i16* %ptr, i16 %val release
@@ -4143,14 +4143,14 @@ define i16 @test247(i16* %ptr, i16 %val) {
define i16 @test248(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test248:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB248_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB248_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -4160,14 +4160,14 @@ define i16 @test248(i16* %ptr, i16 %val) {
define i16 @test249(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test249:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB249_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB249_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -4177,13 +4177,13 @@ define i16 @test249(i16* %ptr, i16 %val) {
define i32 @test250(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test250:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB250_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB250_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw xor i32* %ptr, i32 %val monotonic
@@ -4192,14 +4192,14 @@ define i32 @test250(i32* %ptr, i32 %val) {
define i32 @test251(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test251:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB251_1:
; PPC64LE-NEXT: lwarx 3, 0, 5
; PPC64LE-NEXT: xor 6, 4, 3
; PPC64LE-NEXT: stwcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB251_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw xor i32* %ptr, i32 %val acquire
@@ -4208,14 +4208,14 @@ define i32 @test251(i32* %ptr, i32 %val) {
define i32 @test252(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test252:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB252_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB252_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw xor i32* %ptr, i32 %val release
@@ -4224,14 +4224,14 @@ define i32 @test252(i32* %ptr, i32 %val) {
define i32 @test253(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test253:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB253_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB253_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -4241,14 +4241,14 @@ define i32 @test253(i32* %ptr, i32 %val) {
define i32 @test254(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test254:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB254_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB254_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -4258,13 +4258,13 @@ define i32 @test254(i32* %ptr, i32 %val) {
define i64 @test255(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test255:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB255_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB255_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw xor i64* %ptr, i64 %val monotonic
@@ -4273,14 +4273,14 @@ define i64 @test255(i64* %ptr, i64 %val) {
define i64 @test256(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test256:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB256_1:
; PPC64LE-NEXT: ldarx 3, 0, 5
; PPC64LE-NEXT: xor 6, 4, 3
; PPC64LE-NEXT: stdcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB256_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw xor i64* %ptr, i64 %val acquire
@@ -4289,14 +4289,14 @@ define i64 @test256(i64* %ptr, i64 %val) {
define i64 @test257(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test257:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB257_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB257_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw xor i64* %ptr, i64 %val release
@@ -4305,14 +4305,14 @@ define i64 @test257(i64* %ptr, i64 %val) {
define i64 @test258(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test258:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB258_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB258_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -4322,14 +4322,14 @@ define i64 @test258(i64* %ptr, i64 %val) {
define i64 @test259(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test259:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB259_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB259_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -4339,13 +4339,13 @@ define i64 @test259(i64* %ptr, i64 %val) {
define i8 @test260(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test260:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB260_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: extsb 6, 5
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: ble 0, .LBB260_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB260_1
; PPC64LE-NEXT: .LBB260_3:
@@ -4357,14 +4357,14 @@ define i8 @test260(i8* %ptr, i8 %val) {
define i8 @test261(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test261:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB261_1:
; PPC64LE-NEXT: lbarx 3, 0, 5
; PPC64LE-NEXT: extsb 6, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: ble 0, .LBB261_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 5
; PPC64LE-NEXT: bne 0, .LBB261_1
; PPC64LE-NEXT: .LBB261_3:
@@ -4376,14 +4376,14 @@ define i8 @test261(i8* %ptr, i8 %val) {
define i8 @test262(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test262:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB262_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: extsb 6, 5
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: ble 0, .LBB262_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB262_1
; PPC64LE-NEXT: .LBB262_3:
@@ -4395,14 +4395,14 @@ define i8 @test262(i8* %ptr, i8 %val) {
define i8 @test263(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test263:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB263_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: extsb 6, 5
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: ble 0, .LBB263_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB263_1
; PPC64LE-NEXT: .LBB263_3:
@@ -4415,14 +4415,14 @@ define i8 @test263(i8* %ptr, i8 %val) {
define i8 @test264(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test264:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB264_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: extsb 6, 5
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: ble 0, .LBB264_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB264_1
; PPC64LE-NEXT: .LBB264_3:
@@ -4435,13 +4435,13 @@ define i8 @test264(i8* %ptr, i8 %val) {
define i16 @test265(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test265:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB265_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: extsh 6, 5
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: ble 0, .LBB265_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB265_1
; PPC64LE-NEXT: .LBB265_3:
@@ -4453,14 +4453,14 @@ define i16 @test265(i16* %ptr, i16 %val) {
define i16 @test266(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test266:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB266_1:
; PPC64LE-NEXT: lharx 3, 0, 5
; PPC64LE-NEXT: extsh 6, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: ble 0, .LBB266_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 5
; PPC64LE-NEXT: bne 0, .LBB266_1
; PPC64LE-NEXT: .LBB266_3:
@@ -4472,14 +4472,14 @@ define i16 @test266(i16* %ptr, i16 %val) {
define i16 @test267(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test267:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB267_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: extsh 6, 5
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: ble 0, .LBB267_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB267_1
; PPC64LE-NEXT: .LBB267_3:
@@ -4491,14 +4491,14 @@ define i16 @test267(i16* %ptr, i16 %val) {
define i16 @test268(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test268:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB268_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: extsh 6, 5
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: ble 0, .LBB268_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB268_1
; PPC64LE-NEXT: .LBB268_3:
@@ -4511,14 +4511,14 @@ define i16 @test268(i16* %ptr, i16 %val) {
define i16 @test269(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test269:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB269_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: extsh 6, 5
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: ble 0, .LBB269_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB269_1
; PPC64LE-NEXT: .LBB269_3:
@@ -4531,12 +4531,12 @@ define i16 @test269(i16* %ptr, i16 %val) {
define i32 @test270(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test270:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB270_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: cmpw 4, 5
; PPC64LE-NEXT: ble 0, .LBB270_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB270_1
; PPC64LE-NEXT: .LBB270_3:
@@ -4548,13 +4548,13 @@ define i32 @test270(i32* %ptr, i32 %val) {
define i32 @test271(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test271:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB271_1:
; PPC64LE-NEXT: lwarx 3, 0, 5
; PPC64LE-NEXT: cmpw 4, 3
; PPC64LE-NEXT: ble 0, .LBB271_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 5
; PPC64LE-NEXT: bne 0, .LBB271_1
; PPC64LE-NEXT: .LBB271_3:
@@ -4566,13 +4566,13 @@ define i32 @test271(i32* %ptr, i32 %val) {
define i32 @test272(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test272:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB272_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: cmpw 4, 5
; PPC64LE-NEXT: ble 0, .LBB272_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB272_1
; PPC64LE-NEXT: .LBB272_3:
@@ -4584,13 +4584,13 @@ define i32 @test272(i32* %ptr, i32 %val) {
define i32 @test273(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test273:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB273_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: cmpw 4, 5
; PPC64LE-NEXT: ble 0, .LBB273_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB273_1
; PPC64LE-NEXT: .LBB273_3:
@@ -4603,13 +4603,13 @@ define i32 @test273(i32* %ptr, i32 %val) {
define i32 @test274(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test274:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB274_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: cmpw 4, 5
; PPC64LE-NEXT: ble 0, .LBB274_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB274_1
; PPC64LE-NEXT: .LBB274_3:
@@ -4622,12 +4622,12 @@ define i32 @test274(i32* %ptr, i32 %val) {
define i64 @test275(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test275:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB275_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: cmpd 4, 5
; PPC64LE-NEXT: ble 0, .LBB275_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB275_1
; PPC64LE-NEXT: .LBB275_3:
@@ -4639,13 +4639,13 @@ define i64 @test275(i64* %ptr, i64 %val) {
define i64 @test276(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test276:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB276_1:
; PPC64LE-NEXT: ldarx 3, 0, 5
; PPC64LE-NEXT: cmpd 4, 3
; PPC64LE-NEXT: ble 0, .LBB276_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 5
; PPC64LE-NEXT: bne 0, .LBB276_1
; PPC64LE-NEXT: .LBB276_3:
@@ -4657,13 +4657,13 @@ define i64 @test276(i64* %ptr, i64 %val) {
define i64 @test277(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test277:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB277_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: cmpd 4, 5
; PPC64LE-NEXT: ble 0, .LBB277_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB277_1
; PPC64LE-NEXT: .LBB277_3:
@@ -4675,13 +4675,13 @@ define i64 @test277(i64* %ptr, i64 %val) {
define i64 @test278(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test278:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB278_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: cmpd 4, 5
; PPC64LE-NEXT: ble 0, .LBB278_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB278_1
; PPC64LE-NEXT: .LBB278_3:
@@ -4694,13 +4694,13 @@ define i64 @test278(i64* %ptr, i64 %val) {
define i64 @test279(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test279:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB279_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: cmpd 4, 5
; PPC64LE-NEXT: ble 0, .LBB279_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB279_1
; PPC64LE-NEXT: .LBB279_3:
@@ -4713,13 +4713,13 @@ define i64 @test279(i64* %ptr, i64 %val) {
define i8 @test280(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test280:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB280_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: extsb 6, 5
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bge 0, .LBB280_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB280_1
; PPC64LE-NEXT: .LBB280_3:
@@ -4731,14 +4731,14 @@ define i8 @test280(i8* %ptr, i8 %val) {
define i8 @test281(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test281:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB281_1:
; PPC64LE-NEXT: lbarx 3, 0, 5
; PPC64LE-NEXT: extsb 6, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bge 0, .LBB281_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 5
; PPC64LE-NEXT: bne 0, .LBB281_1
; PPC64LE-NEXT: .LBB281_3:
@@ -4750,14 +4750,14 @@ define i8 @test281(i8* %ptr, i8 %val) {
define i8 @test282(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test282:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB282_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: extsb 6, 5
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bge 0, .LBB282_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB282_1
; PPC64LE-NEXT: .LBB282_3:
@@ -4769,14 +4769,14 @@ define i8 @test282(i8* %ptr, i8 %val) {
define i8 @test283(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test283:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB283_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: extsb 6, 5
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bge 0, .LBB283_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB283_1
; PPC64LE-NEXT: .LBB283_3:
@@ -4789,14 +4789,14 @@ define i8 @test283(i8* %ptr, i8 %val) {
define i8 @test284(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test284:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB284_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: extsb 6, 5
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bge 0, .LBB284_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB284_1
; PPC64LE-NEXT: .LBB284_3:
@@ -4809,13 +4809,13 @@ define i8 @test284(i8* %ptr, i8 %val) {
define i16 @test285(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test285:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB285_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: extsh 6, 5
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bge 0, .LBB285_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB285_1
; PPC64LE-NEXT: .LBB285_3:
@@ -4827,14 +4827,14 @@ define i16 @test285(i16* %ptr, i16 %val) {
define i16 @test286(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test286:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB286_1:
; PPC64LE-NEXT: lharx 3, 0, 5
; PPC64LE-NEXT: extsh 6, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bge 0, .LBB286_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 5
; PPC64LE-NEXT: bne 0, .LBB286_1
; PPC64LE-NEXT: .LBB286_3:
@@ -4846,14 +4846,14 @@ define i16 @test286(i16* %ptr, i16 %val) {
define i16 @test287(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test287:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB287_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: extsh 6, 5
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bge 0, .LBB287_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB287_1
; PPC64LE-NEXT: .LBB287_3:
@@ -4865,14 +4865,14 @@ define i16 @test287(i16* %ptr, i16 %val) {
define i16 @test288(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test288:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB288_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: extsh 6, 5
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bge 0, .LBB288_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB288_1
; PPC64LE-NEXT: .LBB288_3:
@@ -4885,14 +4885,14 @@ define i16 @test288(i16* %ptr, i16 %val) {
define i16 @test289(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test289:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB289_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: extsh 6, 5
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bge 0, .LBB289_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB289_1
; PPC64LE-NEXT: .LBB289_3:
@@ -4905,12 +4905,12 @@ define i16 @test289(i16* %ptr, i16 %val) {
define i32 @test290(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test290:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB290_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: cmpw 4, 5
; PPC64LE-NEXT: bge 0, .LBB290_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB290_1
; PPC64LE-NEXT: .LBB290_3:
@@ -4922,13 +4922,13 @@ define i32 @test290(i32* %ptr, i32 %val) {
define i32 @test291(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test291:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB291_1:
; PPC64LE-NEXT: lwarx 3, 0, 5
; PPC64LE-NEXT: cmpw 4, 3
; PPC64LE-NEXT: bge 0, .LBB291_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 5
; PPC64LE-NEXT: bne 0, .LBB291_1
; PPC64LE-NEXT: .LBB291_3:
@@ -4940,13 +4940,13 @@ define i32 @test291(i32* %ptr, i32 %val) {
define i32 @test292(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test292:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB292_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: cmpw 4, 5
; PPC64LE-NEXT: bge 0, .LBB292_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB292_1
; PPC64LE-NEXT: .LBB292_3:
@@ -4958,13 +4958,13 @@ define i32 @test292(i32* %ptr, i32 %val) {
define i32 @test293(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test293:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB293_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: cmpw 4, 5
; PPC64LE-NEXT: bge 0, .LBB293_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB293_1
; PPC64LE-NEXT: .LBB293_3:
@@ -4977,13 +4977,13 @@ define i32 @test293(i32* %ptr, i32 %val) {
define i32 @test294(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test294:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB294_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: cmpw 4, 5
; PPC64LE-NEXT: bge 0, .LBB294_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB294_1
; PPC64LE-NEXT: .LBB294_3:
@@ -4996,12 +4996,12 @@ define i32 @test294(i32* %ptr, i32 %val) {
define i64 @test295(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test295:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB295_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: cmpd 4, 5
; PPC64LE-NEXT: bge 0, .LBB295_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB295_1
; PPC64LE-NEXT: .LBB295_3:
@@ -5013,13 +5013,13 @@ define i64 @test295(i64* %ptr, i64 %val) {
define i64 @test296(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test296:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB296_1:
; PPC64LE-NEXT: ldarx 3, 0, 5
; PPC64LE-NEXT: cmpd 4, 3
; PPC64LE-NEXT: bge 0, .LBB296_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 5
; PPC64LE-NEXT: bne 0, .LBB296_1
; PPC64LE-NEXT: .LBB296_3:
@@ -5031,13 +5031,13 @@ define i64 @test296(i64* %ptr, i64 %val) {
define i64 @test297(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test297:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB297_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: cmpd 4, 5
; PPC64LE-NEXT: bge 0, .LBB297_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB297_1
; PPC64LE-NEXT: .LBB297_3:
@@ -5049,13 +5049,13 @@ define i64 @test297(i64* %ptr, i64 %val) {
define i64 @test298(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test298:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB298_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: cmpd 4, 5
; PPC64LE-NEXT: bge 0, .LBB298_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB298_1
; PPC64LE-NEXT: .LBB298_3:
@@ -5068,13 +5068,13 @@ define i64 @test298(i64* %ptr, i64 %val) {
define i64 @test299(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test299:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB299_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: cmpd 4, 5
; PPC64LE-NEXT: bge 0, .LBB299_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB299_1
; PPC64LE-NEXT: .LBB299_3:
@@ -5087,12 +5087,12 @@ define i64 @test299(i64* %ptr, i64 %val) {
define i8 @test300(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test300:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB300_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: ble 0, .LBB300_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB300_1
; PPC64LE-NEXT: .LBB300_3:
@@ -5104,13 +5104,13 @@ define i8 @test300(i8* %ptr, i8 %val) {
define i8 @test301(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test301:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB301_1:
; PPC64LE-NEXT: lbarx 3, 0, 5
; PPC64LE-NEXT: cmplw 4, 3
; PPC64LE-NEXT: ble 0, .LBB301_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 5
; PPC64LE-NEXT: bne 0, .LBB301_1
; PPC64LE-NEXT: .LBB301_3:
@@ -5122,13 +5122,13 @@ define i8 @test301(i8* %ptr, i8 %val) {
define i8 @test302(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test302:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB302_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: ble 0, .LBB302_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB302_1
; PPC64LE-NEXT: .LBB302_3:
@@ -5140,13 +5140,13 @@ define i8 @test302(i8* %ptr, i8 %val) {
define i8 @test303(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test303:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB303_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: ble 0, .LBB303_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB303_1
; PPC64LE-NEXT: .LBB303_3:
@@ -5159,13 +5159,13 @@ define i8 @test303(i8* %ptr, i8 %val) {
define i8 @test304(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test304:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB304_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: ble 0, .LBB304_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB304_1
; PPC64LE-NEXT: .LBB304_3:
@@ -5178,12 +5178,12 @@ define i8 @test304(i8* %ptr, i8 %val) {
define i16 @test305(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test305:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB305_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: ble 0, .LBB305_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB305_1
; PPC64LE-NEXT: .LBB305_3:
@@ -5195,13 +5195,13 @@ define i16 @test305(i16* %ptr, i16 %val) {
define i16 @test306(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test306:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB306_1:
; PPC64LE-NEXT: lharx 3, 0, 5
; PPC64LE-NEXT: cmplw 4, 3
; PPC64LE-NEXT: ble 0, .LBB306_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 5
; PPC64LE-NEXT: bne 0, .LBB306_1
; PPC64LE-NEXT: .LBB306_3:
@@ -5213,13 +5213,13 @@ define i16 @test306(i16* %ptr, i16 %val) {
define i16 @test307(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test307:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB307_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: ble 0, .LBB307_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB307_1
; PPC64LE-NEXT: .LBB307_3:
@@ -5231,13 +5231,13 @@ define i16 @test307(i16* %ptr, i16 %val) {
define i16 @test308(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test308:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB308_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: ble 0, .LBB308_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB308_1
; PPC64LE-NEXT: .LBB308_3:
@@ -5250,13 +5250,13 @@ define i16 @test308(i16* %ptr, i16 %val) {
define i16 @test309(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test309:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB309_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: ble 0, .LBB309_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB309_1
; PPC64LE-NEXT: .LBB309_3:
@@ -5269,12 +5269,12 @@ define i16 @test309(i16* %ptr, i16 %val) {
define i32 @test310(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test310:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB310_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: ble 0, .LBB310_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB310_1
; PPC64LE-NEXT: .LBB310_3:
@@ -5286,13 +5286,13 @@ define i32 @test310(i32* %ptr, i32 %val) {
define i32 @test311(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test311:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB311_1:
; PPC64LE-NEXT: lwarx 3, 0, 5
; PPC64LE-NEXT: cmplw 4, 3
; PPC64LE-NEXT: ble 0, .LBB311_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 5
; PPC64LE-NEXT: bne 0, .LBB311_1
; PPC64LE-NEXT: .LBB311_3:
@@ -5304,13 +5304,13 @@ define i32 @test311(i32* %ptr, i32 %val) {
define i32 @test312(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test312:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB312_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: ble 0, .LBB312_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB312_1
; PPC64LE-NEXT: .LBB312_3:
@@ -5322,13 +5322,13 @@ define i32 @test312(i32* %ptr, i32 %val) {
define i32 @test313(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test313:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB313_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: ble 0, .LBB313_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB313_1
; PPC64LE-NEXT: .LBB313_3:
@@ -5341,13 +5341,13 @@ define i32 @test313(i32* %ptr, i32 %val) {
define i32 @test314(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test314:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB314_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: ble 0, .LBB314_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB314_1
; PPC64LE-NEXT: .LBB314_3:
@@ -5360,12 +5360,12 @@ define i32 @test314(i32* %ptr, i32 %val) {
define i64 @test315(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test315:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB315_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: cmpld 4, 5
; PPC64LE-NEXT: ble 0, .LBB315_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB315_1
; PPC64LE-NEXT: .LBB315_3:
@@ -5377,13 +5377,13 @@ define i64 @test315(i64* %ptr, i64 %val) {
define i64 @test316(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test316:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB316_1:
; PPC64LE-NEXT: ldarx 3, 0, 5
; PPC64LE-NEXT: cmpld 4, 3
; PPC64LE-NEXT: ble 0, .LBB316_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 5
; PPC64LE-NEXT: bne 0, .LBB316_1
; PPC64LE-NEXT: .LBB316_3:
@@ -5395,13 +5395,13 @@ define i64 @test316(i64* %ptr, i64 %val) {
define i64 @test317(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test317:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB317_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: cmpld 4, 5
; PPC64LE-NEXT: ble 0, .LBB317_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB317_1
; PPC64LE-NEXT: .LBB317_3:
@@ -5413,13 +5413,13 @@ define i64 @test317(i64* %ptr, i64 %val) {
define i64 @test318(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test318:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB318_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: cmpld 4, 5
; PPC64LE-NEXT: ble 0, .LBB318_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB318_1
; PPC64LE-NEXT: .LBB318_3:
@@ -5432,13 +5432,13 @@ define i64 @test318(i64* %ptr, i64 %val) {
define i64 @test319(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test319:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB319_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: cmpld 4, 5
; PPC64LE-NEXT: ble 0, .LBB319_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB319_1
; PPC64LE-NEXT: .LBB319_3:
@@ -5451,12 +5451,12 @@ define i64 @test319(i64* %ptr, i64 %val) {
define i8 @test320(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test320:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB320_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: bge 0, .LBB320_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB320_1
; PPC64LE-NEXT: .LBB320_3:
@@ -5468,13 +5468,13 @@ define i8 @test320(i8* %ptr, i8 %val) {
define i8 @test321(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test321:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB321_1:
; PPC64LE-NEXT: lbarx 3, 0, 5
; PPC64LE-NEXT: cmplw 4, 3
; PPC64LE-NEXT: bge 0, .LBB321_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 5
; PPC64LE-NEXT: bne 0, .LBB321_1
; PPC64LE-NEXT: .LBB321_3:
@@ -5486,13 +5486,13 @@ define i8 @test321(i8* %ptr, i8 %val) {
define i8 @test322(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test322:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB322_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: bge 0, .LBB322_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB322_1
; PPC64LE-NEXT: .LBB322_3:
@@ -5504,13 +5504,13 @@ define i8 @test322(i8* %ptr, i8 %val) {
define i8 @test323(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test323:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB323_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: bge 0, .LBB323_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB323_1
; PPC64LE-NEXT: .LBB323_3:
@@ -5523,13 +5523,13 @@ define i8 @test323(i8* %ptr, i8 %val) {
define i8 @test324(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test324:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB324_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: bge 0, .LBB324_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB324_1
; PPC64LE-NEXT: .LBB324_3:
@@ -5542,12 +5542,12 @@ define i8 @test324(i8* %ptr, i8 %val) {
define i16 @test325(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test325:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB325_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: bge 0, .LBB325_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB325_1
; PPC64LE-NEXT: .LBB325_3:
@@ -5559,13 +5559,13 @@ define i16 @test325(i16* %ptr, i16 %val) {
define i16 @test326(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test326:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB326_1:
; PPC64LE-NEXT: lharx 3, 0, 5
; PPC64LE-NEXT: cmplw 4, 3
; PPC64LE-NEXT: bge 0, .LBB326_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 5
; PPC64LE-NEXT: bne 0, .LBB326_1
; PPC64LE-NEXT: .LBB326_3:
@@ -5577,13 +5577,13 @@ define i16 @test326(i16* %ptr, i16 %val) {
define i16 @test327(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test327:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB327_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: bge 0, .LBB327_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB327_1
; PPC64LE-NEXT: .LBB327_3:
@@ -5595,13 +5595,13 @@ define i16 @test327(i16* %ptr, i16 %val) {
define i16 @test328(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test328:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB328_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: bge 0, .LBB328_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB328_1
; PPC64LE-NEXT: .LBB328_3:
@@ -5614,13 +5614,13 @@ define i16 @test328(i16* %ptr, i16 %val) {
define i16 @test329(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test329:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB329_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: bge 0, .LBB329_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB329_1
; PPC64LE-NEXT: .LBB329_3:
@@ -5633,12 +5633,12 @@ define i16 @test329(i16* %ptr, i16 %val) {
define i32 @test330(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test330:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB330_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: bge 0, .LBB330_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB330_1
; PPC64LE-NEXT: .LBB330_3:
@@ -5650,13 +5650,13 @@ define i32 @test330(i32* %ptr, i32 %val) {
define i32 @test331(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test331:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB331_1:
; PPC64LE-NEXT: lwarx 3, 0, 5
; PPC64LE-NEXT: cmplw 4, 3
; PPC64LE-NEXT: bge 0, .LBB331_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 5
; PPC64LE-NEXT: bne 0, .LBB331_1
; PPC64LE-NEXT: .LBB331_3:
@@ -5668,13 +5668,13 @@ define i32 @test331(i32* %ptr, i32 %val) {
define i32 @test332(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test332:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB332_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: bge 0, .LBB332_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB332_1
; PPC64LE-NEXT: .LBB332_3:
@@ -5686,13 +5686,13 @@ define i32 @test332(i32* %ptr, i32 %val) {
define i32 @test333(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test333:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB333_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: bge 0, .LBB333_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB333_1
; PPC64LE-NEXT: .LBB333_3:
@@ -5705,13 +5705,13 @@ define i32 @test333(i32* %ptr, i32 %val) {
define i32 @test334(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test334:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB334_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: bge 0, .LBB334_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB334_1
; PPC64LE-NEXT: .LBB334_3:
@@ -5724,12 +5724,12 @@ define i32 @test334(i32* %ptr, i32 %val) {
define i64 @test335(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test335:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB335_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: cmpld 4, 5
; PPC64LE-NEXT: bge 0, .LBB335_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB335_1
; PPC64LE-NEXT: .LBB335_3:
@@ -5741,13 +5741,13 @@ define i64 @test335(i64* %ptr, i64 %val) {
define i64 @test336(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test336:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB336_1:
; PPC64LE-NEXT: ldarx 3, 0, 5
; PPC64LE-NEXT: cmpld 4, 3
; PPC64LE-NEXT: bge 0, .LBB336_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 5
; PPC64LE-NEXT: bne 0, .LBB336_1
; PPC64LE-NEXT: .LBB336_3:
@@ -5759,13 +5759,13 @@ define i64 @test336(i64* %ptr, i64 %val) {
define i64 @test337(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test337:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB337_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: cmpld 4, 5
; PPC64LE-NEXT: bge 0, .LBB337_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB337_1
; PPC64LE-NEXT: .LBB337_3:
@@ -5777,13 +5777,13 @@ define i64 @test337(i64* %ptr, i64 %val) {
define i64 @test338(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test338:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB338_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: cmpld 4, 5
; PPC64LE-NEXT: bge 0, .LBB338_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB338_1
; PPC64LE-NEXT: .LBB338_3:
@@ -5796,13 +5796,13 @@ define i64 @test338(i64* %ptr, i64 %val) {
define i64 @test339(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test339:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB339_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: cmpld 4, 5
; PPC64LE-NEXT: bge 0, .LBB339_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB339_1
; PPC64LE-NEXT: .LBB339_3:
@@ -5815,12 +5815,12 @@ define i64 @test339(i64* %ptr, i64 %val) {
define i8 @test340(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test340:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB340_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: stbcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB340_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw xchg i8* %ptr, i8 %val syncscope("singlethread") monotonic
@@ -5829,13 +5829,13 @@ define i8 @test340(i8* %ptr, i8 %val) {
define i8 @test341(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test341:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB341_1:
; PPC64LE-NEXT: lbarx 3, 0, 5
; PPC64LE-NEXT: stbcx. 4, 0, 5
; PPC64LE-NEXT: bne 0, .LBB341_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw xchg i8* %ptr, i8 %val syncscope("singlethread") acquire
@@ -5844,13 +5844,13 @@ define i8 @test341(i8* %ptr, i8 %val) {
define i8 @test342(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test342:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB342_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: stbcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB342_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw xchg i8* %ptr, i8 %val syncscope("singlethread") release
@@ -5859,13 +5859,13 @@ define i8 @test342(i8* %ptr, i8 %val) {
define i8 @test343(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test343:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB343_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: stbcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB343_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -5875,13 +5875,13 @@ define i8 @test343(i8* %ptr, i8 %val) {
define i8 @test344(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test344:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB344_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: stbcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB344_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -5891,12 +5891,12 @@ define i8 @test344(i8* %ptr, i8 %val) {
define i16 @test345(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test345:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB345_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: sthcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB345_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw xchg i16* %ptr, i16 %val syncscope("singlethread") monotonic
@@ -5905,13 +5905,13 @@ define i16 @test345(i16* %ptr, i16 %val) {
define i16 @test346(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test346:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB346_1:
; PPC64LE-NEXT: lharx 3, 0, 5
; PPC64LE-NEXT: sthcx. 4, 0, 5
; PPC64LE-NEXT: bne 0, .LBB346_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw xchg i16* %ptr, i16 %val syncscope("singlethread") acquire
@@ -5920,13 +5920,13 @@ define i16 @test346(i16* %ptr, i16 %val) {
define i16 @test347(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test347:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB347_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: sthcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB347_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw xchg i16* %ptr, i16 %val syncscope("singlethread") release
@@ -5935,13 +5935,13 @@ define i16 @test347(i16* %ptr, i16 %val) {
define i16 @test348(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test348:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB348_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: sthcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB348_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -5951,13 +5951,13 @@ define i16 @test348(i16* %ptr, i16 %val) {
define i16 @test349(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test349:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB349_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: sthcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB349_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -5967,12 +5967,12 @@ define i16 @test349(i16* %ptr, i16 %val) {
define i32 @test350(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test350:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB350_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: stwcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB350_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw xchg i32* %ptr, i32 %val syncscope("singlethread") monotonic
@@ -5981,13 +5981,13 @@ define i32 @test350(i32* %ptr, i32 %val) {
define i32 @test351(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test351:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB351_1:
; PPC64LE-NEXT: lwarx 3, 0, 5
; PPC64LE-NEXT: stwcx. 4, 0, 5
; PPC64LE-NEXT: bne 0, .LBB351_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw xchg i32* %ptr, i32 %val syncscope("singlethread") acquire
@@ -5996,13 +5996,13 @@ define i32 @test351(i32* %ptr, i32 %val) {
define i32 @test352(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test352:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB352_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: stwcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB352_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw xchg i32* %ptr, i32 %val syncscope("singlethread") release
@@ -6011,13 +6011,13 @@ define i32 @test352(i32* %ptr, i32 %val) {
define i32 @test353(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test353:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB353_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: stwcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB353_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -6027,13 +6027,13 @@ define i32 @test353(i32* %ptr, i32 %val) {
define i32 @test354(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test354:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB354_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: stwcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB354_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -6043,12 +6043,12 @@ define i32 @test354(i32* %ptr, i32 %val) {
define i64 @test355(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test355:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB355_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: stdcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB355_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw xchg i64* %ptr, i64 %val syncscope("singlethread") monotonic
@@ -6057,13 +6057,13 @@ define i64 @test355(i64* %ptr, i64 %val) {
define i64 @test356(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test356:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB356_1:
; PPC64LE-NEXT: ldarx 3, 0, 5
; PPC64LE-NEXT: stdcx. 4, 0, 5
; PPC64LE-NEXT: bne 0, .LBB356_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw xchg i64* %ptr, i64 %val syncscope("singlethread") acquire
@@ -6072,13 +6072,13 @@ define i64 @test356(i64* %ptr, i64 %val) {
define i64 @test357(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test357:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB357_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: stdcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB357_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw xchg i64* %ptr, i64 %val syncscope("singlethread") release
@@ -6087,13 +6087,13 @@ define i64 @test357(i64* %ptr, i64 %val) {
define i64 @test358(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test358:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB358_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: stdcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB358_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -6103,13 +6103,13 @@ define i64 @test358(i64* %ptr, i64 %val) {
define i64 @test359(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test359:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB359_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: stdcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB359_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -6119,13 +6119,13 @@ define i64 @test359(i64* %ptr, i64 %val) {
define i8 @test360(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test360:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB360_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB360_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw add i8* %ptr, i8 %val syncscope("singlethread") monotonic
@@ -6134,14 +6134,14 @@ define i8 @test360(i8* %ptr, i8 %val) {
define i8 @test361(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test361:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB361_1:
; PPC64LE-NEXT: lbarx 3, 0, 5
; PPC64LE-NEXT: add 6, 4, 3
; PPC64LE-NEXT: stbcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB361_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw add i8* %ptr, i8 %val syncscope("singlethread") acquire
@@ -6150,14 +6150,14 @@ define i8 @test361(i8* %ptr, i8 %val) {
define i8 @test362(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test362:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB362_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB362_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw add i8* %ptr, i8 %val syncscope("singlethread") release
@@ -6166,14 +6166,14 @@ define i8 @test362(i8* %ptr, i8 %val) {
define i8 @test363(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test363:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB363_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB363_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -6183,14 +6183,14 @@ define i8 @test363(i8* %ptr, i8 %val) {
define i8 @test364(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test364:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB364_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB364_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -6200,13 +6200,13 @@ define i8 @test364(i8* %ptr, i8 %val) {
define i16 @test365(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test365:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB365_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB365_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw add i16* %ptr, i16 %val syncscope("singlethread") monotonic
@@ -6215,14 +6215,14 @@ define i16 @test365(i16* %ptr, i16 %val) {
define i16 @test366(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test366:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB366_1:
; PPC64LE-NEXT: lharx 3, 0, 5
; PPC64LE-NEXT: add 6, 4, 3
; PPC64LE-NEXT: sthcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB366_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw add i16* %ptr, i16 %val syncscope("singlethread") acquire
@@ -6231,14 +6231,14 @@ define i16 @test366(i16* %ptr, i16 %val) {
define i16 @test367(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test367:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB367_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB367_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw add i16* %ptr, i16 %val syncscope("singlethread") release
@@ -6247,14 +6247,14 @@ define i16 @test367(i16* %ptr, i16 %val) {
define i16 @test368(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test368:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB368_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB368_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -6264,14 +6264,14 @@ define i16 @test368(i16* %ptr, i16 %val) {
define i16 @test369(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test369:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB369_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB369_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -6281,13 +6281,13 @@ define i16 @test369(i16* %ptr, i16 %val) {
define i32 @test370(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test370:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB370_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB370_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw add i32* %ptr, i32 %val syncscope("singlethread") monotonic
@@ -6296,14 +6296,14 @@ define i32 @test370(i32* %ptr, i32 %val) {
define i32 @test371(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test371:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB371_1:
; PPC64LE-NEXT: lwarx 3, 0, 5
; PPC64LE-NEXT: add 6, 4, 3
; PPC64LE-NEXT: stwcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB371_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw add i32* %ptr, i32 %val syncscope("singlethread") acquire
@@ -6312,14 +6312,14 @@ define i32 @test371(i32* %ptr, i32 %val) {
define i32 @test372(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test372:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB372_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB372_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw add i32* %ptr, i32 %val syncscope("singlethread") release
@@ -6328,14 +6328,14 @@ define i32 @test372(i32* %ptr, i32 %val) {
define i32 @test373(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test373:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB373_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB373_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -6345,14 +6345,14 @@ define i32 @test373(i32* %ptr, i32 %val) {
define i32 @test374(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test374:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB374_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB374_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -6362,13 +6362,13 @@ define i32 @test374(i32* %ptr, i32 %val) {
define i64 @test375(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test375:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB375_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB375_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw add i64* %ptr, i64 %val syncscope("singlethread") monotonic
@@ -6377,14 +6377,14 @@ define i64 @test375(i64* %ptr, i64 %val) {
define i64 @test376(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test376:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB376_1:
; PPC64LE-NEXT: ldarx 3, 0, 5
; PPC64LE-NEXT: add 6, 4, 3
; PPC64LE-NEXT: stdcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB376_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw add i64* %ptr, i64 %val syncscope("singlethread") acquire
@@ -6393,14 +6393,14 @@ define i64 @test376(i64* %ptr, i64 %val) {
define i64 @test377(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test377:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB377_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB377_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw add i64* %ptr, i64 %val syncscope("singlethread") release
@@ -6409,14 +6409,14 @@ define i64 @test377(i64* %ptr, i64 %val) {
define i64 @test378(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test378:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB378_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB378_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -6426,14 +6426,14 @@ define i64 @test378(i64* %ptr, i64 %val) {
define i64 @test379(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test379:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB379_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB379_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -6443,13 +6443,13 @@ define i64 @test379(i64* %ptr, i64 %val) {
define i8 @test380(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test380:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB380_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: subf 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB380_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw sub i8* %ptr, i8 %val syncscope("singlethread") monotonic
@@ -6458,14 +6458,14 @@ define i8 @test380(i8* %ptr, i8 %val) {
define i8 @test381(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test381:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB381_1:
; PPC64LE-NEXT: lbarx 3, 0, 5
; PPC64LE-NEXT: subf 6, 4, 3
; PPC64LE-NEXT: stbcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB381_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw sub i8* %ptr, i8 %val syncscope("singlethread") acquire
@@ -6474,14 +6474,14 @@ define i8 @test381(i8* %ptr, i8 %val) {
define i8 @test382(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test382:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB382_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: subf 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB382_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw sub i8* %ptr, i8 %val syncscope("singlethread") release
@@ -6490,14 +6490,14 @@ define i8 @test382(i8* %ptr, i8 %val) {
define i8 @test383(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test383:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB383_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: subf 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB383_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -6507,14 +6507,14 @@ define i8 @test383(i8* %ptr, i8 %val) {
define i8 @test384(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test384:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB384_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: subf 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB384_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -6524,13 +6524,13 @@ define i8 @test384(i8* %ptr, i8 %val) {
define i16 @test385(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test385:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB385_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: subf 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB385_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw sub i16* %ptr, i16 %val syncscope("singlethread") monotonic
@@ -6539,14 +6539,14 @@ define i16 @test385(i16* %ptr, i16 %val) {
define i16 @test386(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test386:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB386_1:
; PPC64LE-NEXT: lharx 3, 0, 5
; PPC64LE-NEXT: subf 6, 4, 3
; PPC64LE-NEXT: sthcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB386_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw sub i16* %ptr, i16 %val syncscope("singlethread") acquire
@@ -6555,14 +6555,14 @@ define i16 @test386(i16* %ptr, i16 %val) {
define i16 @test387(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test387:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB387_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: subf 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB387_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw sub i16* %ptr, i16 %val syncscope("singlethread") release
@@ -6571,14 +6571,14 @@ define i16 @test387(i16* %ptr, i16 %val) {
define i16 @test388(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test388:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB388_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: subf 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB388_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -6588,14 +6588,14 @@ define i16 @test388(i16* %ptr, i16 %val) {
define i16 @test389(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test389:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB389_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: subf 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB389_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -6605,13 +6605,13 @@ define i16 @test389(i16* %ptr, i16 %val) {
define i32 @test390(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test390:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB390_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: subf 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB390_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw sub i32* %ptr, i32 %val syncscope("singlethread") monotonic
@@ -6620,14 +6620,14 @@ define i32 @test390(i32* %ptr, i32 %val) {
define i32 @test391(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test391:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB391_1:
; PPC64LE-NEXT: lwarx 3, 0, 5
; PPC64LE-NEXT: subf 6, 4, 3
; PPC64LE-NEXT: stwcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB391_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw sub i32* %ptr, i32 %val syncscope("singlethread") acquire
@@ -6636,14 +6636,14 @@ define i32 @test391(i32* %ptr, i32 %val) {
define i32 @test392(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test392:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB392_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: subf 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB392_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw sub i32* %ptr, i32 %val syncscope("singlethread") release
@@ -6652,14 +6652,14 @@ define i32 @test392(i32* %ptr, i32 %val) {
define i32 @test393(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test393:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB393_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: subf 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB393_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -6669,14 +6669,14 @@ define i32 @test393(i32* %ptr, i32 %val) {
define i32 @test394(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test394:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB394_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: subf 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB394_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -6686,13 +6686,13 @@ define i32 @test394(i32* %ptr, i32 %val) {
define i64 @test395(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test395:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB395_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: sub 6, 5, 4
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB395_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw sub i64* %ptr, i64 %val syncscope("singlethread") monotonic
@@ -6701,14 +6701,14 @@ define i64 @test395(i64* %ptr, i64 %val) {
define i64 @test396(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test396:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB396_1:
; PPC64LE-NEXT: ldarx 3, 0, 5
; PPC64LE-NEXT: sub 6, 3, 4
; PPC64LE-NEXT: stdcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB396_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw sub i64* %ptr, i64 %val syncscope("singlethread") acquire
@@ -6717,14 +6717,14 @@ define i64 @test396(i64* %ptr, i64 %val) {
define i64 @test397(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test397:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB397_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: sub 6, 5, 4
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB397_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw sub i64* %ptr, i64 %val syncscope("singlethread") release
@@ -6733,14 +6733,14 @@ define i64 @test397(i64* %ptr, i64 %val) {
define i64 @test398(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test398:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB398_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: sub 6, 5, 4
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB398_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -6750,14 +6750,14 @@ define i64 @test398(i64* %ptr, i64 %val) {
define i64 @test399(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test399:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB399_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: sub 6, 5, 4
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB399_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -6767,13 +6767,13 @@ define i64 @test399(i64* %ptr, i64 %val) {
define i8 @test400(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test400:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB400_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB400_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw and i8* %ptr, i8 %val syncscope("singlethread") monotonic
@@ -6782,14 +6782,14 @@ define i8 @test400(i8* %ptr, i8 %val) {
define i8 @test401(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test401:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB401_1:
; PPC64LE-NEXT: lbarx 3, 0, 5
; PPC64LE-NEXT: and 6, 4, 3
; PPC64LE-NEXT: stbcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB401_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw and i8* %ptr, i8 %val syncscope("singlethread") acquire
@@ -6798,14 +6798,14 @@ define i8 @test401(i8* %ptr, i8 %val) {
define i8 @test402(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test402:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB402_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB402_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw and i8* %ptr, i8 %val syncscope("singlethread") release
@@ -6814,14 +6814,14 @@ define i8 @test402(i8* %ptr, i8 %val) {
define i8 @test403(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test403:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB403_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB403_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -6831,14 +6831,14 @@ define i8 @test403(i8* %ptr, i8 %val) {
define i8 @test404(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test404:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB404_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB404_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -6848,13 +6848,13 @@ define i8 @test404(i8* %ptr, i8 %val) {
define i16 @test405(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test405:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB405_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB405_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw and i16* %ptr, i16 %val syncscope("singlethread") monotonic
@@ -6863,14 +6863,14 @@ define i16 @test405(i16* %ptr, i16 %val) {
define i16 @test406(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test406:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB406_1:
; PPC64LE-NEXT: lharx 3, 0, 5
; PPC64LE-NEXT: and 6, 4, 3
; PPC64LE-NEXT: sthcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB406_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw and i16* %ptr, i16 %val syncscope("singlethread") acquire
@@ -6879,14 +6879,14 @@ define i16 @test406(i16* %ptr, i16 %val) {
define i16 @test407(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test407:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB407_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB407_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw and i16* %ptr, i16 %val syncscope("singlethread") release
@@ -6895,14 +6895,14 @@ define i16 @test407(i16* %ptr, i16 %val) {
define i16 @test408(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test408:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB408_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB408_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -6912,14 +6912,14 @@ define i16 @test408(i16* %ptr, i16 %val) {
define i16 @test409(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test409:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB409_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB409_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -6929,13 +6929,13 @@ define i16 @test409(i16* %ptr, i16 %val) {
define i32 @test410(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test410:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB410_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB410_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw and i32* %ptr, i32 %val syncscope("singlethread") monotonic
@@ -6944,14 +6944,14 @@ define i32 @test410(i32* %ptr, i32 %val) {
define i32 @test411(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test411:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB411_1:
; PPC64LE-NEXT: lwarx 3, 0, 5
; PPC64LE-NEXT: and 6, 4, 3
; PPC64LE-NEXT: stwcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB411_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw and i32* %ptr, i32 %val syncscope("singlethread") acquire
@@ -6960,14 +6960,14 @@ define i32 @test411(i32* %ptr, i32 %val) {
define i32 @test412(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test412:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB412_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB412_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw and i32* %ptr, i32 %val syncscope("singlethread") release
@@ -6976,14 +6976,14 @@ define i32 @test412(i32* %ptr, i32 %val) {
define i32 @test413(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test413:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB413_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB413_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -6993,14 +6993,14 @@ define i32 @test413(i32* %ptr, i32 %val) {
define i32 @test414(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test414:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB414_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB414_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -7010,13 +7010,13 @@ define i32 @test414(i32* %ptr, i32 %val) {
define i64 @test415(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test415:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB415_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB415_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw and i64* %ptr, i64 %val syncscope("singlethread") monotonic
@@ -7025,14 +7025,14 @@ define i64 @test415(i64* %ptr, i64 %val) {
define i64 @test416(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test416:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB416_1:
; PPC64LE-NEXT: ldarx 3, 0, 5
; PPC64LE-NEXT: and 6, 4, 3
; PPC64LE-NEXT: stdcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB416_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw and i64* %ptr, i64 %val syncscope("singlethread") acquire
@@ -7041,14 +7041,14 @@ define i64 @test416(i64* %ptr, i64 %val) {
define i64 @test417(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test417:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB417_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB417_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw and i64* %ptr, i64 %val syncscope("singlethread") release
@@ -7057,14 +7057,14 @@ define i64 @test417(i64* %ptr, i64 %val) {
define i64 @test418(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test418:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB418_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB418_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -7074,14 +7074,14 @@ define i64 @test418(i64* %ptr, i64 %val) {
define i64 @test419(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test419:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB419_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB419_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -7091,13 +7091,13 @@ define i64 @test419(i64* %ptr, i64 %val) {
define i8 @test420(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test420:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB420_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB420_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw nand i8* %ptr, i8 %val syncscope("singlethread") monotonic
@@ -7106,14 +7106,14 @@ define i8 @test420(i8* %ptr, i8 %val) {
define i8 @test421(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test421:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB421_1:
; PPC64LE-NEXT: lbarx 3, 0, 5
; PPC64LE-NEXT: nand 6, 4, 3
; PPC64LE-NEXT: stbcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB421_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw nand i8* %ptr, i8 %val syncscope("singlethread") acquire
@@ -7122,14 +7122,14 @@ define i8 @test421(i8* %ptr, i8 %val) {
define i8 @test422(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test422:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB422_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB422_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw nand i8* %ptr, i8 %val syncscope("singlethread") release
@@ -7138,14 +7138,14 @@ define i8 @test422(i8* %ptr, i8 %val) {
define i8 @test423(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test423:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB423_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB423_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -7155,14 +7155,14 @@ define i8 @test423(i8* %ptr, i8 %val) {
define i8 @test424(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test424:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB424_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB424_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -7172,13 +7172,13 @@ define i8 @test424(i8* %ptr, i8 %val) {
define i16 @test425(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test425:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB425_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB425_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw nand i16* %ptr, i16 %val syncscope("singlethread") monotonic
@@ -7187,14 +7187,14 @@ define i16 @test425(i16* %ptr, i16 %val) {
define i16 @test426(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test426:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB426_1:
; PPC64LE-NEXT: lharx 3, 0, 5
; PPC64LE-NEXT: nand 6, 4, 3
; PPC64LE-NEXT: sthcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB426_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw nand i16* %ptr, i16 %val syncscope("singlethread") acquire
@@ -7203,14 +7203,14 @@ define i16 @test426(i16* %ptr, i16 %val) {
define i16 @test427(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test427:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB427_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB427_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw nand i16* %ptr, i16 %val syncscope("singlethread") release
@@ -7219,14 +7219,14 @@ define i16 @test427(i16* %ptr, i16 %val) {
define i16 @test428(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test428:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB428_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB428_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -7236,14 +7236,14 @@ define i16 @test428(i16* %ptr, i16 %val) {
define i16 @test429(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test429:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB429_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB429_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -7253,13 +7253,13 @@ define i16 @test429(i16* %ptr, i16 %val) {
define i32 @test430(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test430:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB430_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB430_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw nand i32* %ptr, i32 %val syncscope("singlethread") monotonic
@@ -7268,14 +7268,14 @@ define i32 @test430(i32* %ptr, i32 %val) {
define i32 @test431(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test431:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB431_1:
; PPC64LE-NEXT: lwarx 3, 0, 5
; PPC64LE-NEXT: nand 6, 4, 3
; PPC64LE-NEXT: stwcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB431_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw nand i32* %ptr, i32 %val syncscope("singlethread") acquire
@@ -7284,14 +7284,14 @@ define i32 @test431(i32* %ptr, i32 %val) {
define i32 @test432(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test432:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB432_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB432_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw nand i32* %ptr, i32 %val syncscope("singlethread") release
@@ -7300,14 +7300,14 @@ define i32 @test432(i32* %ptr, i32 %val) {
define i32 @test433(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test433:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB433_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB433_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -7317,14 +7317,14 @@ define i32 @test433(i32* %ptr, i32 %val) {
define i32 @test434(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test434:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB434_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB434_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -7334,13 +7334,13 @@ define i32 @test434(i32* %ptr, i32 %val) {
define i64 @test435(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test435:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB435_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB435_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw nand i64* %ptr, i64 %val syncscope("singlethread") monotonic
@@ -7349,14 +7349,14 @@ define i64 @test435(i64* %ptr, i64 %val) {
define i64 @test436(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test436:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB436_1:
; PPC64LE-NEXT: ldarx 3, 0, 5
; PPC64LE-NEXT: nand 6, 4, 3
; PPC64LE-NEXT: stdcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB436_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw nand i64* %ptr, i64 %val syncscope("singlethread") acquire
@@ -7365,14 +7365,14 @@ define i64 @test436(i64* %ptr, i64 %val) {
define i64 @test437(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test437:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB437_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB437_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw nand i64* %ptr, i64 %val syncscope("singlethread") release
@@ -7381,14 +7381,14 @@ define i64 @test437(i64* %ptr, i64 %val) {
define i64 @test438(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test438:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB438_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB438_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -7398,14 +7398,14 @@ define i64 @test438(i64* %ptr, i64 %val) {
define i64 @test439(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test439:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB439_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB439_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -7415,13 +7415,13 @@ define i64 @test439(i64* %ptr, i64 %val) {
define i8 @test440(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test440:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB440_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB440_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw or i8* %ptr, i8 %val syncscope("singlethread") monotonic
@@ -7430,14 +7430,14 @@ define i8 @test440(i8* %ptr, i8 %val) {
define i8 @test441(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test441:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB441_1:
; PPC64LE-NEXT: lbarx 3, 0, 5
; PPC64LE-NEXT: or 6, 4, 3
; PPC64LE-NEXT: stbcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB441_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw or i8* %ptr, i8 %val syncscope("singlethread") acquire
@@ -7446,14 +7446,14 @@ define i8 @test441(i8* %ptr, i8 %val) {
define i8 @test442(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test442:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB442_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB442_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw or i8* %ptr, i8 %val syncscope("singlethread") release
@@ -7462,14 +7462,14 @@ define i8 @test442(i8* %ptr, i8 %val) {
define i8 @test443(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test443:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB443_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB443_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -7479,14 +7479,14 @@ define i8 @test443(i8* %ptr, i8 %val) {
define i8 @test444(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test444:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB444_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB444_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -7496,13 +7496,13 @@ define i8 @test444(i8* %ptr, i8 %val) {
define i16 @test445(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test445:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB445_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB445_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw or i16* %ptr, i16 %val syncscope("singlethread") monotonic
@@ -7511,14 +7511,14 @@ define i16 @test445(i16* %ptr, i16 %val) {
define i16 @test446(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test446:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB446_1:
; PPC64LE-NEXT: lharx 3, 0, 5
; PPC64LE-NEXT: or 6, 4, 3
; PPC64LE-NEXT: sthcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB446_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw or i16* %ptr, i16 %val syncscope("singlethread") acquire
@@ -7527,14 +7527,14 @@ define i16 @test446(i16* %ptr, i16 %val) {
define i16 @test447(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test447:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB447_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB447_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw or i16* %ptr, i16 %val syncscope("singlethread") release
@@ -7543,14 +7543,14 @@ define i16 @test447(i16* %ptr, i16 %val) {
define i16 @test448(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test448:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB448_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB448_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -7560,14 +7560,14 @@ define i16 @test448(i16* %ptr, i16 %val) {
define i16 @test449(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test449:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB449_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB449_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -7577,13 +7577,13 @@ define i16 @test449(i16* %ptr, i16 %val) {
define i32 @test450(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test450:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB450_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB450_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw or i32* %ptr, i32 %val syncscope("singlethread") monotonic
@@ -7592,14 +7592,14 @@ define i32 @test450(i32* %ptr, i32 %val) {
define i32 @test451(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test451:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB451_1:
; PPC64LE-NEXT: lwarx 3, 0, 5
; PPC64LE-NEXT: or 6, 4, 3
; PPC64LE-NEXT: stwcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB451_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw or i32* %ptr, i32 %val syncscope("singlethread") acquire
@@ -7608,14 +7608,14 @@ define i32 @test451(i32* %ptr, i32 %val) {
define i32 @test452(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test452:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB452_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB452_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw or i32* %ptr, i32 %val syncscope("singlethread") release
@@ -7624,14 +7624,14 @@ define i32 @test452(i32* %ptr, i32 %val) {
define i32 @test453(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test453:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB453_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB453_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -7641,14 +7641,14 @@ define i32 @test453(i32* %ptr, i32 %val) {
define i32 @test454(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test454:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB454_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB454_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -7658,13 +7658,13 @@ define i32 @test454(i32* %ptr, i32 %val) {
define i64 @test455(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test455:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB455_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB455_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw or i64* %ptr, i64 %val syncscope("singlethread") monotonic
@@ -7673,14 +7673,14 @@ define i64 @test455(i64* %ptr, i64 %val) {
define i64 @test456(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test456:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB456_1:
; PPC64LE-NEXT: ldarx 3, 0, 5
; PPC64LE-NEXT: or 6, 4, 3
; PPC64LE-NEXT: stdcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB456_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw or i64* %ptr, i64 %val syncscope("singlethread") acquire
@@ -7689,14 +7689,14 @@ define i64 @test456(i64* %ptr, i64 %val) {
define i64 @test457(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test457:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB457_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB457_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw or i64* %ptr, i64 %val syncscope("singlethread") release
@@ -7705,14 +7705,14 @@ define i64 @test457(i64* %ptr, i64 %val) {
define i64 @test458(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test458:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB458_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB458_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -7722,14 +7722,14 @@ define i64 @test458(i64* %ptr, i64 %val) {
define i64 @test459(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test459:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB459_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB459_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -7739,13 +7739,13 @@ define i64 @test459(i64* %ptr, i64 %val) {
define i8 @test460(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test460:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB460_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB460_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw xor i8* %ptr, i8 %val syncscope("singlethread") monotonic
@@ -7754,14 +7754,14 @@ define i8 @test460(i8* %ptr, i8 %val) {
define i8 @test461(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test461:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB461_1:
; PPC64LE-NEXT: lbarx 3, 0, 5
; PPC64LE-NEXT: xor 6, 4, 3
; PPC64LE-NEXT: stbcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB461_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw xor i8* %ptr, i8 %val syncscope("singlethread") acquire
@@ -7770,14 +7770,14 @@ define i8 @test461(i8* %ptr, i8 %val) {
define i8 @test462(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test462:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB462_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB462_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw xor i8* %ptr, i8 %val syncscope("singlethread") release
@@ -7786,14 +7786,14 @@ define i8 @test462(i8* %ptr, i8 %val) {
define i8 @test463(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test463:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB463_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB463_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -7803,14 +7803,14 @@ define i8 @test463(i8* %ptr, i8 %val) {
define i8 @test464(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test464:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB464_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB464_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -7820,13 +7820,13 @@ define i8 @test464(i8* %ptr, i8 %val) {
define i16 @test465(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test465:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB465_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB465_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw xor i16* %ptr, i16 %val syncscope("singlethread") monotonic
@@ -7835,14 +7835,14 @@ define i16 @test465(i16* %ptr, i16 %val) {
define i16 @test466(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test466:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB466_1:
; PPC64LE-NEXT: lharx 3, 0, 5
; PPC64LE-NEXT: xor 6, 4, 3
; PPC64LE-NEXT: sthcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB466_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw xor i16* %ptr, i16 %val syncscope("singlethread") acquire
@@ -7851,14 +7851,14 @@ define i16 @test466(i16* %ptr, i16 %val) {
define i16 @test467(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test467:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB467_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB467_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw xor i16* %ptr, i16 %val syncscope("singlethread") release
@@ -7867,14 +7867,14 @@ define i16 @test467(i16* %ptr, i16 %val) {
define i16 @test468(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test468:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB468_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB468_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -7884,14 +7884,14 @@ define i16 @test468(i16* %ptr, i16 %val) {
define i16 @test469(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test469:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB469_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB469_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -7901,13 +7901,13 @@ define i16 @test469(i16* %ptr, i16 %val) {
define i32 @test470(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test470:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB470_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB470_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw xor i32* %ptr, i32 %val syncscope("singlethread") monotonic
@@ -7916,14 +7916,14 @@ define i32 @test470(i32* %ptr, i32 %val) {
define i32 @test471(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test471:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB471_1:
; PPC64LE-NEXT: lwarx 3, 0, 5
; PPC64LE-NEXT: xor 6, 4, 3
; PPC64LE-NEXT: stwcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB471_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw xor i32* %ptr, i32 %val syncscope("singlethread") acquire
@@ -7932,14 +7932,14 @@ define i32 @test471(i32* %ptr, i32 %val) {
define i32 @test472(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test472:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB472_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB472_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw xor i32* %ptr, i32 %val syncscope("singlethread") release
@@ -7948,14 +7948,14 @@ define i32 @test472(i32* %ptr, i32 %val) {
define i32 @test473(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test473:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB473_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB473_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -7965,14 +7965,14 @@ define i32 @test473(i32* %ptr, i32 %val) {
define i32 @test474(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test474:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB474_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB474_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -7982,13 +7982,13 @@ define i32 @test474(i32* %ptr, i32 %val) {
define i64 @test475(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test475:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB475_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB475_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw xor i64* %ptr, i64 %val syncscope("singlethread") monotonic
@@ -7997,14 +7997,14 @@ define i64 @test475(i64* %ptr, i64 %val) {
define i64 @test476(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test476:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB476_1:
; PPC64LE-NEXT: ldarx 3, 0, 5
; PPC64LE-NEXT: xor 6, 4, 3
; PPC64LE-NEXT: stdcx. 6, 0, 5
; PPC64LE-NEXT: bne 0, .LBB476_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
%ret = atomicrmw xor i64* %ptr, i64 %val syncscope("singlethread") acquire
@@ -8013,14 +8013,14 @@ define i64 @test476(i64* %ptr, i64 %val) {
define i64 @test477(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test477:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB477_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB477_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
%ret = atomicrmw xor i64* %ptr, i64 %val syncscope("singlethread") release
@@ -8029,14 +8029,14 @@ define i64 @test477(i64* %ptr, i64 %val) {
define i64 @test478(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test478:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB478_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB478_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -8046,14 +8046,14 @@ define i64 @test478(i64* %ptr, i64 %val) {
define i64 @test479(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test479:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB479_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: bne 0, .LBB479_1
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -8063,13 +8063,13 @@ define i64 @test479(i64* %ptr, i64 %val) {
define i8 @test480(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test480:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB480_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: extsb 6, 5
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: ble 0, .LBB480_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB480_1
; PPC64LE-NEXT: .LBB480_3:
@@ -8081,14 +8081,14 @@ define i8 @test480(i8* %ptr, i8 %val) {
define i8 @test481(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test481:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB481_1:
; PPC64LE-NEXT: lbarx 3, 0, 5
; PPC64LE-NEXT: extsb 6, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: ble 0, .LBB481_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 5
; PPC64LE-NEXT: bne 0, .LBB481_1
; PPC64LE-NEXT: .LBB481_3:
@@ -8100,14 +8100,14 @@ define i8 @test481(i8* %ptr, i8 %val) {
define i8 @test482(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test482:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB482_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: extsb 6, 5
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: ble 0, .LBB482_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB482_1
; PPC64LE-NEXT: .LBB482_3:
@@ -8119,14 +8119,14 @@ define i8 @test482(i8* %ptr, i8 %val) {
define i8 @test483(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test483:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB483_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: extsb 6, 5
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: ble 0, .LBB483_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB483_1
; PPC64LE-NEXT: .LBB483_3:
@@ -8139,14 +8139,14 @@ define i8 @test483(i8* %ptr, i8 %val) {
define i8 @test484(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test484:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB484_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: extsb 6, 5
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: ble 0, .LBB484_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB484_1
; PPC64LE-NEXT: .LBB484_3:
@@ -8159,13 +8159,13 @@ define i8 @test484(i8* %ptr, i8 %val) {
define i16 @test485(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test485:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB485_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: extsh 6, 5
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: ble 0, .LBB485_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB485_1
; PPC64LE-NEXT: .LBB485_3:
@@ -8177,14 +8177,14 @@ define i16 @test485(i16* %ptr, i16 %val) {
define i16 @test486(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test486:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB486_1:
; PPC64LE-NEXT: lharx 3, 0, 5
; PPC64LE-NEXT: extsh 6, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: ble 0, .LBB486_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 5
; PPC64LE-NEXT: bne 0, .LBB486_1
; PPC64LE-NEXT: .LBB486_3:
@@ -8196,14 +8196,14 @@ define i16 @test486(i16* %ptr, i16 %val) {
define i16 @test487(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test487:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB487_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: extsh 6, 5
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: ble 0, .LBB487_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB487_1
; PPC64LE-NEXT: .LBB487_3:
@@ -8215,14 +8215,14 @@ define i16 @test487(i16* %ptr, i16 %val) {
define i16 @test488(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test488:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB488_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: extsh 6, 5
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: ble 0, .LBB488_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB488_1
; PPC64LE-NEXT: .LBB488_3:
@@ -8235,14 +8235,14 @@ define i16 @test488(i16* %ptr, i16 %val) {
define i16 @test489(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test489:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB489_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: extsh 6, 5
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: ble 0, .LBB489_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB489_1
; PPC64LE-NEXT: .LBB489_3:
@@ -8255,12 +8255,12 @@ define i16 @test489(i16* %ptr, i16 %val) {
define i32 @test490(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test490:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB490_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: cmpw 4, 5
; PPC64LE-NEXT: ble 0, .LBB490_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB490_1
; PPC64LE-NEXT: .LBB490_3:
@@ -8272,13 +8272,13 @@ define i32 @test490(i32* %ptr, i32 %val) {
define i32 @test491(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test491:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB491_1:
; PPC64LE-NEXT: lwarx 3, 0, 5
; PPC64LE-NEXT: cmpw 4, 3
; PPC64LE-NEXT: ble 0, .LBB491_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 5
; PPC64LE-NEXT: bne 0, .LBB491_1
; PPC64LE-NEXT: .LBB491_3:
@@ -8290,13 +8290,13 @@ define i32 @test491(i32* %ptr, i32 %val) {
define i32 @test492(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test492:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB492_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: cmpw 4, 5
; PPC64LE-NEXT: ble 0, .LBB492_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB492_1
; PPC64LE-NEXT: .LBB492_3:
@@ -8308,13 +8308,13 @@ define i32 @test492(i32* %ptr, i32 %val) {
define i32 @test493(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test493:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB493_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: cmpw 4, 5
; PPC64LE-NEXT: ble 0, .LBB493_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB493_1
; PPC64LE-NEXT: .LBB493_3:
@@ -8327,13 +8327,13 @@ define i32 @test493(i32* %ptr, i32 %val) {
define i32 @test494(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test494:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB494_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: cmpw 4, 5
; PPC64LE-NEXT: ble 0, .LBB494_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB494_1
; PPC64LE-NEXT: .LBB494_3:
@@ -8346,12 +8346,12 @@ define i32 @test494(i32* %ptr, i32 %val) {
define i64 @test495(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test495:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB495_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: cmpd 4, 5
; PPC64LE-NEXT: ble 0, .LBB495_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB495_1
; PPC64LE-NEXT: .LBB495_3:
@@ -8363,13 +8363,13 @@ define i64 @test495(i64* %ptr, i64 %val) {
define i64 @test496(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test496:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB496_1:
; PPC64LE-NEXT: ldarx 3, 0, 5
; PPC64LE-NEXT: cmpd 4, 3
; PPC64LE-NEXT: ble 0, .LBB496_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 5
; PPC64LE-NEXT: bne 0, .LBB496_1
; PPC64LE-NEXT: .LBB496_3:
@@ -8381,13 +8381,13 @@ define i64 @test496(i64* %ptr, i64 %val) {
define i64 @test497(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test497:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB497_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: cmpd 4, 5
; PPC64LE-NEXT: ble 0, .LBB497_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB497_1
; PPC64LE-NEXT: .LBB497_3:
@@ -8399,13 +8399,13 @@ define i64 @test497(i64* %ptr, i64 %val) {
define i64 @test498(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test498:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB498_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: cmpd 4, 5
; PPC64LE-NEXT: ble 0, .LBB498_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB498_1
; PPC64LE-NEXT: .LBB498_3:
@@ -8418,13 +8418,13 @@ define i64 @test498(i64* %ptr, i64 %val) {
define i64 @test499(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test499:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB499_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: cmpd 4, 5
; PPC64LE-NEXT: ble 0, .LBB499_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB499_1
; PPC64LE-NEXT: .LBB499_3:
@@ -8437,13 +8437,13 @@ define i64 @test499(i64* %ptr, i64 %val) {
define i8 @test500(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test500:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB500_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: extsb 6, 5
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bge 0, .LBB500_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB500_1
; PPC64LE-NEXT: .LBB500_3:
@@ -8455,14 +8455,14 @@ define i8 @test500(i8* %ptr, i8 %val) {
define i8 @test501(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test501:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB501_1:
; PPC64LE-NEXT: lbarx 3, 0, 5
; PPC64LE-NEXT: extsb 6, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bge 0, .LBB501_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 5
; PPC64LE-NEXT: bne 0, .LBB501_1
; PPC64LE-NEXT: .LBB501_3:
@@ -8474,14 +8474,14 @@ define i8 @test501(i8* %ptr, i8 %val) {
define i8 @test502(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test502:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB502_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: extsb 6, 5
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bge 0, .LBB502_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB502_1
; PPC64LE-NEXT: .LBB502_3:
@@ -8493,14 +8493,14 @@ define i8 @test502(i8* %ptr, i8 %val) {
define i8 @test503(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test503:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB503_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: extsb 6, 5
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bge 0, .LBB503_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB503_1
; PPC64LE-NEXT: .LBB503_3:
@@ -8513,14 +8513,14 @@ define i8 @test503(i8* %ptr, i8 %val) {
define i8 @test504(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test504:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB504_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: extsb 6, 5
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bge 0, .LBB504_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB504_1
; PPC64LE-NEXT: .LBB504_3:
@@ -8533,13 +8533,13 @@ define i8 @test504(i8* %ptr, i8 %val) {
define i16 @test505(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test505:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB505_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: extsh 6, 5
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bge 0, .LBB505_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB505_1
; PPC64LE-NEXT: .LBB505_3:
@@ -8551,14 +8551,14 @@ define i16 @test505(i16* %ptr, i16 %val) {
define i16 @test506(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test506:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB506_1:
; PPC64LE-NEXT: lharx 3, 0, 5
; PPC64LE-NEXT: extsh 6, 3
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bge 0, .LBB506_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 5
; PPC64LE-NEXT: bne 0, .LBB506_1
; PPC64LE-NEXT: .LBB506_3:
@@ -8570,14 +8570,14 @@ define i16 @test506(i16* %ptr, i16 %val) {
define i16 @test507(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test507:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB507_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: extsh 6, 5
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bge 0, .LBB507_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB507_1
; PPC64LE-NEXT: .LBB507_3:
@@ -8589,14 +8589,14 @@ define i16 @test507(i16* %ptr, i16 %val) {
define i16 @test508(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test508:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB508_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: extsh 6, 5
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bge 0, .LBB508_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB508_1
; PPC64LE-NEXT: .LBB508_3:
@@ -8609,14 +8609,14 @@ define i16 @test508(i16* %ptr, i16 %val) {
define i16 @test509(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test509:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB509_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: extsh 6, 5
; PPC64LE-NEXT: cmpw 4, 6
; PPC64LE-NEXT: bge 0, .LBB509_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB509_1
; PPC64LE-NEXT: .LBB509_3:
@@ -8629,12 +8629,12 @@ define i16 @test509(i16* %ptr, i16 %val) {
define i32 @test510(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test510:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB510_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: cmpw 4, 5
; PPC64LE-NEXT: bge 0, .LBB510_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB510_1
; PPC64LE-NEXT: .LBB510_3:
@@ -8646,13 +8646,13 @@ define i32 @test510(i32* %ptr, i32 %val) {
define i32 @test511(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test511:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB511_1:
; PPC64LE-NEXT: lwarx 3, 0, 5
; PPC64LE-NEXT: cmpw 4, 3
; PPC64LE-NEXT: bge 0, .LBB511_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 5
; PPC64LE-NEXT: bne 0, .LBB511_1
; PPC64LE-NEXT: .LBB511_3:
@@ -8664,13 +8664,13 @@ define i32 @test511(i32* %ptr, i32 %val) {
define i32 @test512(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test512:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB512_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: cmpw 4, 5
; PPC64LE-NEXT: bge 0, .LBB512_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB512_1
; PPC64LE-NEXT: .LBB512_3:
@@ -8682,13 +8682,13 @@ define i32 @test512(i32* %ptr, i32 %val) {
define i32 @test513(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test513:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB513_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: cmpw 4, 5
; PPC64LE-NEXT: bge 0, .LBB513_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB513_1
; PPC64LE-NEXT: .LBB513_3:
@@ -8701,13 +8701,13 @@ define i32 @test513(i32* %ptr, i32 %val) {
define i32 @test514(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test514:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB514_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: cmpw 4, 5
; PPC64LE-NEXT: bge 0, .LBB514_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB514_1
; PPC64LE-NEXT: .LBB514_3:
@@ -8720,12 +8720,12 @@ define i32 @test514(i32* %ptr, i32 %val) {
define i64 @test515(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test515:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB515_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: cmpd 4, 5
; PPC64LE-NEXT: bge 0, .LBB515_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB515_1
; PPC64LE-NEXT: .LBB515_3:
@@ -8737,13 +8737,13 @@ define i64 @test515(i64* %ptr, i64 %val) {
define i64 @test516(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test516:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB516_1:
; PPC64LE-NEXT: ldarx 3, 0, 5
; PPC64LE-NEXT: cmpd 4, 3
; PPC64LE-NEXT: bge 0, .LBB516_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 5
; PPC64LE-NEXT: bne 0, .LBB516_1
; PPC64LE-NEXT: .LBB516_3:
@@ -8755,13 +8755,13 @@ define i64 @test516(i64* %ptr, i64 %val) {
define i64 @test517(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test517:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB517_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: cmpd 4, 5
; PPC64LE-NEXT: bge 0, .LBB517_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB517_1
; PPC64LE-NEXT: .LBB517_3:
@@ -8773,13 +8773,13 @@ define i64 @test517(i64* %ptr, i64 %val) {
define i64 @test518(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test518:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB518_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: cmpd 4, 5
; PPC64LE-NEXT: bge 0, .LBB518_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB518_1
; PPC64LE-NEXT: .LBB518_3:
@@ -8792,13 +8792,13 @@ define i64 @test518(i64* %ptr, i64 %val) {
define i64 @test519(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test519:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB519_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: cmpd 4, 5
; PPC64LE-NEXT: bge 0, .LBB519_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB519_1
; PPC64LE-NEXT: .LBB519_3:
@@ -8811,12 +8811,12 @@ define i64 @test519(i64* %ptr, i64 %val) {
define i8 @test520(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test520:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB520_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: ble 0, .LBB520_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB520_1
; PPC64LE-NEXT: .LBB520_3:
@@ -8828,13 +8828,13 @@ define i8 @test520(i8* %ptr, i8 %val) {
define i8 @test521(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test521:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB521_1:
; PPC64LE-NEXT: lbarx 3, 0, 5
; PPC64LE-NEXT: cmplw 4, 3
; PPC64LE-NEXT: ble 0, .LBB521_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 5
; PPC64LE-NEXT: bne 0, .LBB521_1
; PPC64LE-NEXT: .LBB521_3:
@@ -8846,13 +8846,13 @@ define i8 @test521(i8* %ptr, i8 %val) {
define i8 @test522(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test522:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB522_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: ble 0, .LBB522_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB522_1
; PPC64LE-NEXT: .LBB522_3:
@@ -8864,13 +8864,13 @@ define i8 @test522(i8* %ptr, i8 %val) {
define i8 @test523(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test523:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB523_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: ble 0, .LBB523_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB523_1
; PPC64LE-NEXT: .LBB523_3:
@@ -8883,13 +8883,13 @@ define i8 @test523(i8* %ptr, i8 %val) {
define i8 @test524(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test524:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB524_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: ble 0, .LBB524_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB524_1
; PPC64LE-NEXT: .LBB524_3:
@@ -8902,12 +8902,12 @@ define i8 @test524(i8* %ptr, i8 %val) {
define i16 @test525(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test525:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB525_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: ble 0, .LBB525_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB525_1
; PPC64LE-NEXT: .LBB525_3:
@@ -8919,13 +8919,13 @@ define i16 @test525(i16* %ptr, i16 %val) {
define i16 @test526(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test526:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB526_1:
; PPC64LE-NEXT: lharx 3, 0, 5
; PPC64LE-NEXT: cmplw 4, 3
; PPC64LE-NEXT: ble 0, .LBB526_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 5
; PPC64LE-NEXT: bne 0, .LBB526_1
; PPC64LE-NEXT: .LBB526_3:
@@ -8937,13 +8937,13 @@ define i16 @test526(i16* %ptr, i16 %val) {
define i16 @test527(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test527:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB527_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: ble 0, .LBB527_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB527_1
; PPC64LE-NEXT: .LBB527_3:
@@ -8955,13 +8955,13 @@ define i16 @test527(i16* %ptr, i16 %val) {
define i16 @test528(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test528:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB528_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: ble 0, .LBB528_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB528_1
; PPC64LE-NEXT: .LBB528_3:
@@ -8974,13 +8974,13 @@ define i16 @test528(i16* %ptr, i16 %val) {
define i16 @test529(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test529:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB529_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: ble 0, .LBB529_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB529_1
; PPC64LE-NEXT: .LBB529_3:
@@ -8993,12 +8993,12 @@ define i16 @test529(i16* %ptr, i16 %val) {
define i32 @test530(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test530:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB530_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: ble 0, .LBB530_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB530_1
; PPC64LE-NEXT: .LBB530_3:
@@ -9010,13 +9010,13 @@ define i32 @test530(i32* %ptr, i32 %val) {
define i32 @test531(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test531:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB531_1:
; PPC64LE-NEXT: lwarx 3, 0, 5
; PPC64LE-NEXT: cmplw 4, 3
; PPC64LE-NEXT: ble 0, .LBB531_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 5
; PPC64LE-NEXT: bne 0, .LBB531_1
; PPC64LE-NEXT: .LBB531_3:
@@ -9028,13 +9028,13 @@ define i32 @test531(i32* %ptr, i32 %val) {
define i32 @test532(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test532:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB532_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: ble 0, .LBB532_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB532_1
; PPC64LE-NEXT: .LBB532_3:
@@ -9046,13 +9046,13 @@ define i32 @test532(i32* %ptr, i32 %val) {
define i32 @test533(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test533:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB533_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: ble 0, .LBB533_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB533_1
; PPC64LE-NEXT: .LBB533_3:
@@ -9065,13 +9065,13 @@ define i32 @test533(i32* %ptr, i32 %val) {
define i32 @test534(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test534:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB534_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: ble 0, .LBB534_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB534_1
; PPC64LE-NEXT: .LBB534_3:
@@ -9084,12 +9084,12 @@ define i32 @test534(i32* %ptr, i32 %val) {
define i64 @test535(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test535:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB535_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: cmpld 4, 5
; PPC64LE-NEXT: ble 0, .LBB535_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB535_1
; PPC64LE-NEXT: .LBB535_3:
@@ -9101,13 +9101,13 @@ define i64 @test535(i64* %ptr, i64 %val) {
define i64 @test536(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test536:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB536_1:
; PPC64LE-NEXT: ldarx 3, 0, 5
; PPC64LE-NEXT: cmpld 4, 3
; PPC64LE-NEXT: ble 0, .LBB536_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 5
; PPC64LE-NEXT: bne 0, .LBB536_1
; PPC64LE-NEXT: .LBB536_3:
@@ -9119,13 +9119,13 @@ define i64 @test536(i64* %ptr, i64 %val) {
define i64 @test537(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test537:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB537_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: cmpld 4, 5
; PPC64LE-NEXT: ble 0, .LBB537_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB537_1
; PPC64LE-NEXT: .LBB537_3:
@@ -9137,13 +9137,13 @@ define i64 @test537(i64* %ptr, i64 %val) {
define i64 @test538(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test538:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB538_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: cmpld 4, 5
; PPC64LE-NEXT: ble 0, .LBB538_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB538_1
; PPC64LE-NEXT: .LBB538_3:
@@ -9156,13 +9156,13 @@ define i64 @test538(i64* %ptr, i64 %val) {
define i64 @test539(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test539:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB539_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: cmpld 4, 5
; PPC64LE-NEXT: ble 0, .LBB539_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB539_1
; PPC64LE-NEXT: .LBB539_3:
@@ -9175,12 +9175,12 @@ define i64 @test539(i64* %ptr, i64 %val) {
define i8 @test540(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test540:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB540_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: bge 0, .LBB540_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB540_1
; PPC64LE-NEXT: .LBB540_3:
@@ -9192,13 +9192,13 @@ define i8 @test540(i8* %ptr, i8 %val) {
define i8 @test541(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test541:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB541_1:
; PPC64LE-NEXT: lbarx 3, 0, 5
; PPC64LE-NEXT: cmplw 4, 3
; PPC64LE-NEXT: bge 0, .LBB541_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 5
; PPC64LE-NEXT: bne 0, .LBB541_1
; PPC64LE-NEXT: .LBB541_3:
@@ -9210,13 +9210,13 @@ define i8 @test541(i8* %ptr, i8 %val) {
define i8 @test542(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test542:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB542_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: bge 0, .LBB542_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB542_1
; PPC64LE-NEXT: .LBB542_3:
@@ -9228,13 +9228,13 @@ define i8 @test542(i8* %ptr, i8 %val) {
define i8 @test543(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test543:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB543_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: bge 0, .LBB543_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB543_1
; PPC64LE-NEXT: .LBB543_3:
@@ -9247,13 +9247,13 @@ define i8 @test543(i8* %ptr, i8 %val) {
define i8 @test544(i8* %ptr, i8 %val) {
; PPC64LE-LABEL: test544:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB544_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: bge 0, .LBB544_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB544_1
; PPC64LE-NEXT: .LBB544_3:
@@ -9266,12 +9266,12 @@ define i8 @test544(i8* %ptr, i8 %val) {
define i16 @test545(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test545:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB545_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: bge 0, .LBB545_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB545_1
; PPC64LE-NEXT: .LBB545_3:
@@ -9283,13 +9283,13 @@ define i16 @test545(i16* %ptr, i16 %val) {
define i16 @test546(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test546:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB546_1:
; PPC64LE-NEXT: lharx 3, 0, 5
; PPC64LE-NEXT: cmplw 4, 3
; PPC64LE-NEXT: bge 0, .LBB546_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 5
; PPC64LE-NEXT: bne 0, .LBB546_1
; PPC64LE-NEXT: .LBB546_3:
@@ -9301,13 +9301,13 @@ define i16 @test546(i16* %ptr, i16 %val) {
define i16 @test547(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test547:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB547_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: bge 0, .LBB547_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB547_1
; PPC64LE-NEXT: .LBB547_3:
@@ -9319,13 +9319,13 @@ define i16 @test547(i16* %ptr, i16 %val) {
define i16 @test548(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test548:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB548_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: bge 0, .LBB548_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB548_1
; PPC64LE-NEXT: .LBB548_3:
@@ -9338,13 +9338,13 @@ define i16 @test548(i16* %ptr, i16 %val) {
define i16 @test549(i16* %ptr, i16 %val) {
; PPC64LE-LABEL: test549:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB549_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: bge 0, .LBB549_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB549_1
; PPC64LE-NEXT: .LBB549_3:
@@ -9357,12 +9357,12 @@ define i16 @test549(i16* %ptr, i16 %val) {
define i32 @test550(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test550:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB550_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: bge 0, .LBB550_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB550_1
; PPC64LE-NEXT: .LBB550_3:
@@ -9374,13 +9374,13 @@ define i32 @test550(i32* %ptr, i32 %val) {
define i32 @test551(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test551:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB551_1:
; PPC64LE-NEXT: lwarx 3, 0, 5
; PPC64LE-NEXT: cmplw 4, 3
; PPC64LE-NEXT: bge 0, .LBB551_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 5
; PPC64LE-NEXT: bne 0, .LBB551_1
; PPC64LE-NEXT: .LBB551_3:
@@ -9392,13 +9392,13 @@ define i32 @test551(i32* %ptr, i32 %val) {
define i32 @test552(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test552:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB552_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: bge 0, .LBB552_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB552_1
; PPC64LE-NEXT: .LBB552_3:
@@ -9410,13 +9410,13 @@ define i32 @test552(i32* %ptr, i32 %val) {
define i32 @test553(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test553:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB553_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: bge 0, .LBB553_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB553_1
; PPC64LE-NEXT: .LBB553_3:
@@ -9429,13 +9429,13 @@ define i32 @test553(i32* %ptr, i32 %val) {
define i32 @test554(i32* %ptr, i32 %val) {
; PPC64LE-LABEL: test554:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB554_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: cmplw 4, 5
; PPC64LE-NEXT: bge 0, .LBB554_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB554_1
; PPC64LE-NEXT: .LBB554_3:
@@ -9448,12 +9448,12 @@ define i32 @test554(i32* %ptr, i32 %val) {
define i64 @test555(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test555:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB555_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: cmpld 4, 5
; PPC64LE-NEXT: bge 0, .LBB555_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB555_1
; PPC64LE-NEXT: .LBB555_3:
@@ -9465,13 +9465,13 @@ define i64 @test555(i64* %ptr, i64 %val) {
define i64 @test556(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test556:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB556_1:
; PPC64LE-NEXT: ldarx 3, 0, 5
; PPC64LE-NEXT: cmpld 4, 3
; PPC64LE-NEXT: bge 0, .LBB556_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 5
; PPC64LE-NEXT: bne 0, .LBB556_1
; PPC64LE-NEXT: .LBB556_3:
@@ -9483,13 +9483,13 @@ define i64 @test556(i64* %ptr, i64 %val) {
define i64 @test557(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test557:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB557_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: cmpld 4, 5
; PPC64LE-NEXT: bge 0, .LBB557_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB557_1
; PPC64LE-NEXT: .LBB557_3:
@@ -9501,13 +9501,13 @@ define i64 @test557(i64* %ptr, i64 %val) {
define i64 @test558(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test558:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB558_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: cmpld 4, 5
; PPC64LE-NEXT: bge 0, .LBB558_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB558_1
; PPC64LE-NEXT: .LBB558_3:
@@ -9520,13 +9520,13 @@ define i64 @test558(i64* %ptr, i64 %val) {
define i64 @test559(i64* %ptr, i64 %val) {
; PPC64LE-LABEL: test559:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB559_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: cmpld 4, 5
; PPC64LE-NEXT: bge 0, .LBB559_3
-; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
; PPC64LE-NEXT: bne 0, .LBB559_1
; PPC64LE-NEXT: .LBB559_3:
@@ -9540,7 +9540,7 @@ define i64 @test559(i64* %ptr, i64 %val) {
; The second load should never be scheduled before isync.
define i32 @test_ordering0(i32* %ptr1, i32* %ptr2) {
; PPC64LE-LABEL: test_ordering0:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwz 4, 0(3)
; PPC64LE-NEXT: cmpd 7, 4, 4
; PPC64LE-NEXT: bne- 7, .+4
@@ -9557,7 +9557,7 @@ define i32 @test_ordering0(i32* %ptr1, i32* %ptr2) {
; The second store should never be scheduled before isync.
define i32 @test_ordering1(i32* %ptr1, i32 %val1, i32* %ptr2) {
; PPC64LE-LABEL: test_ordering1:
-; PPC64LE: # BB#0:
+; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwz 3, 0(3)
; PPC64LE-NEXT: cmpd 7, 3, 3
; PPC64LE-NEXT: bne- 7, .+4
diff --git a/test/CodeGen/PowerPC/branch_coalesce.ll b/test/CodeGen/PowerPC/branch_coalesce.ll
index 007eef27b2d..a57dec111bc 100644
--- a/test/CodeGen/PowerPC/branch_coalesce.ll
+++ b/test/CodeGen/PowerPC/branch_coalesce.ll
@@ -23,10 +23,10 @@ define double @testBranchCoal(double %a, double %b, double %c, i32 %x) {
; CHECK: blr
; CHECK-NOCOALESCE-LABEL: testBranchCoal:
-; CHECK-NOCOALESCE: # BB#0: # %entry
+; CHECK-NOCOALESCE: # %bb.0: # %entry
; CHECK-NOCOALESCE-NEXT: cmplwi 0, 6, 0
; CHECK-NOCOALESCE-NEXT: bne 0, .LBB0_5
-; CHECK-NOCOALESCE-NEXT: # BB#1: # %entry
+; CHECK-NOCOALESCE-NEXT: # %bb.1: # %entry
; CHECK-NOCOALESCE-NEXT: bne 0, .LBB0_6
; CHECK-NOCOALESCE-NEXT: .LBB0_2: # %entry
; CHECK-NOCOALESCE-NEXT: beq 0, .LBB0_4
diff --git a/test/CodeGen/PowerPC/fabs.ll b/test/CodeGen/PowerPC/fabs.ll
index c8cbd00b4dc..369803af979 100644
--- a/test/CodeGen/PowerPC/fabs.ll
+++ b/test/CodeGen/PowerPC/fabs.ll
@@ -2,7 +2,7 @@
define double @fabs(double %f) {
; CHECK-LABEL: fabs:
-; CHECK: ; BB#0:
+; CHECK: ; %bb.0:
; CHECK-NEXT: fabs f1, f1
; CHECK-NEXT: blr
;
@@ -12,7 +12,7 @@ define double @fabs(double %f) {
define float @bitcast_fabs(float %x) {
; CHECK-LABEL: bitcast_fabs:
-; CHECK: ; BB#0:
+; CHECK: ; %bb.0:
; CHECK-NEXT: stfs f1, -8(r1)
; CHECK-NEXT: nop
; CHECK-NEXT: nop
diff --git a/test/CodeGen/PowerPC/fma-aggr-FMF.ll b/test/CodeGen/PowerPC/fma-aggr-FMF.ll
index 8e97115bd1f..e861c9df37a 100644
--- a/test/CodeGen/PowerPC/fma-aggr-FMF.ll
+++ b/test/CodeGen/PowerPC/fma-aggr-FMF.ll
@@ -3,7 +3,7 @@
define float @can_fma_with_fewer_uses(float %f1, float %f2, float %f3, float %f4) {
; CHECK-LABEL: can_fma_with_fewer_uses:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xsmulsp 0, 1, 2
; CHECK-NEXT: fmr 1, 0
; CHECK-NEXT: xsmaddasp 1, 3, 4
@@ -21,7 +21,7 @@ define float @can_fma_with_fewer_uses(float %f1, float %f2, float %f3, float %f4
; around beside the fma.
define float @no_fma_with_fewer_uses(float %f1, float %f2, float %f3, float %f4) {
; CHECK-LABEL: no_fma_with_fewer_uses:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xsmulsp 0, 3, 4
; CHECK-NEXT: xsmulsp 13, 1, 2
; CHECK-NEXT: xsmaddasp 0, 1, 2
diff --git a/test/CodeGen/PowerPC/fp64-to-int16.ll b/test/CodeGen/PowerPC/fp64-to-int16.ll
index 0c5274d9426..360a9866518 100644
--- a/test/CodeGen/PowerPC/fp64-to-int16.ll
+++ b/test/CodeGen/PowerPC/fp64-to-int16.ll
@@ -4,7 +4,7 @@ target triple = "powerpc64le--linux-gnu"
define i1 @Test(double %a) {
; CHECK-LABEL: Test:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscvdpsxws 1, 1
; CHECK-NEXT: mfvsrwz 3, 1
; CHECK-NEXT: xori 3, 3, 65534
diff --git a/test/CodeGen/PowerPC/hello-reloc.s b/test/CodeGen/PowerPC/hello-reloc.s
index bbf1e7cacbd..66bc9a84f5c 100644
--- a/test/CodeGen/PowerPC/hello-reloc.s
+++ b/test/CodeGen/PowerPC/hello-reloc.s
@@ -11,7 +11,7 @@
.globl _main
.align 4
_main: ; @main
-; BB#0: ; %entry
+; %bb.0: ; %entry
mflr r0
stw r31, -4(r1)
stw r0, 8(r1)
diff --git a/test/CodeGen/PowerPC/licm-remat.ll b/test/CodeGen/PowerPC/licm-remat.ll
index cbd1af62b84..393c56bcb86 100644
--- a/test/CodeGen/PowerPC/licm-remat.ll
+++ b/test/CodeGen/PowerPC/licm-remat.ll
@@ -18,7 +18,7 @@ declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture r
define linkonce_odr void @ZN6snappyDecompressor_(%"class.snappy::SnappyDecompressor"* %this, %"class.snappy::SnappyIOVecWriter"* %writer) {
; CHECK-LABEL: ZN6snappyDecompressor_:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK: addis 3, 2, _ZN6snappy8internalL8wordmaskE@toc@ha
; CHECK-DAG: addi 25, 3, _ZN6snappy8internalL8wordmaskE@toc@l
; CHECK-DAG: addis 4, 2, _ZN6snappy8internalL10char_tableE@toc@ha
diff --git a/test/CodeGen/PowerPC/licm-tocReg.ll b/test/CodeGen/PowerPC/licm-tocReg.ll
index 824d554991a..efbec9091a5 100644
--- a/test/CodeGen/PowerPC/licm-tocReg.ll
+++ b/test/CodeGen/PowerPC/licm-tocReg.ll
@@ -64,7 +64,7 @@
define signext i32 @test(i32 (i32)* nocapture %FP) local_unnamed_addr #0 {
; CHECK-LABEL: test:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis 6, 2, .LC0@toc@ha
; CHECK-NEXT: addis 4, 2, .LC1@toc@ha
; CHECK-NEXT: ld 5, .LC1@toc@l(4)
diff --git a/test/CodeGen/PowerPC/logic-ops-on-compares.ll b/test/CodeGen/PowerPC/logic-ops-on-compares.ll
index e448afd03ea..b1b26f0ab76 100644
--- a/test/CodeGen/PowerPC/logic-ops-on-compares.ll
+++ b/test/CodeGen/PowerPC/logic-ops-on-compares.ll
@@ -43,11 +43,11 @@ return: ; preds = %if.end, %if.then
define void @neg_truncate_i32_eq(i32 *%ptr) {
; CHECK-LABEL: neg_truncate_i32_eq:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwz r3, 0(r3)
; CHECK-NEXT: rldicl. r3, r3, 0, 63
; CHECK-NEXT: bclr 12, eq, 0
-; CHECK-NEXT: # BB#1: # %if.end29.thread136
+; CHECK-NEXT: # %bb.1: # %if.end29.thread136
entry:
%0 = load i32, i32* %ptr, align 4
%rem17127 = and i32 %0, 1
@@ -101,11 +101,11 @@ return: ; preds = %if.end, %if.then
define void @neg_truncate_i64_eq(i64 *%ptr) {
; CHECK-LABEL: neg_truncate_i64_eq:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: ld r3, 0(r3)
; CHECK-NEXT: rldicl. r3, r3, 0, 63
; CHECK-NEXT: bclr 12, eq, 0
-; CHECK-NEXT: # BB#1: # %if.end29.thread136
+; CHECK-NEXT: # %bb.1: # %if.end29.thread136
entry:
%0 = load i64, i64* %ptr, align 4
%rem17127 = and i64 %0, 1
@@ -161,11 +161,11 @@ return: ; preds = %if.end, %if.then
define void @neg_truncate_i64_ne(i64 *%ptr) {
; CHECK-LABEL: neg_truncate_i64_ne:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: ld r3, 0(r3)
; CHECK-NEXT: andi. r3, r3, 1
; CHECK-NEXT: bclr 12, gt, 0
-; CHECK-NEXT: # BB#1: # %if.end29.thread136
+; CHECK-NEXT: # %bb.1: # %if.end29.thread136
entry:
%0 = load i64, i64* %ptr, align 4
%rem17127 = and i64 %0, 1
diff --git a/test/CodeGen/PowerPC/machine-combiner.ll b/test/CodeGen/PowerPC/machine-combiner.ll
index e026017710e..c7337e3637e 100644
--- a/test/CodeGen/PowerPC/machine-combiner.ll
+++ b/test/CodeGen/PowerPC/machine-combiner.ll
@@ -8,7 +8,7 @@ target triple = "powerpc64-unknown-linux-gnu"
define float @reassociate_adds1(float %x0, float %x1, float %x2, float %x3) {
; CHECK-LABEL: reassociate_adds1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK: fadds [[REG0:[0-9]+]], 1, 2
; CHECK: fadds [[REG1:[0-9]+]], 3, 4
; CHECK: fadds 1, [[REG0]], [[REG1]]
@@ -22,7 +22,7 @@ define float @reassociate_adds1(float %x0, float %x1, float %x2, float %x3) {
define float @reassociate_adds2(float %x0, float %x1, float %x2, float %x3) {
; CHECK-LABEL: reassociate_adds2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK: fadds [[REG0:[0-9]+]], 1, 2
; CHECK: fadds [[REG1:[0-9]+]], 3, 4
; CHECK: fadds 1, [[REG0]], [[REG1]]
@@ -36,7 +36,7 @@ define float @reassociate_adds2(float %x0, float %x1, float %x2, float %x3) {
define float @reassociate_adds3(float %x0, float %x1, float %x2, float %x3) {
; CHECK-LABEL: reassociate_adds3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK: fadds [[REG0:[0-9]+]], 1, 2
; CHECK: fadds [[REG1:[0-9]+]], 3, 4
; CHECK: fadds 1, [[REG0]], [[REG1]]
@@ -50,7 +50,7 @@ define float @reassociate_adds3(float %x0, float %x1, float %x2, float %x3) {
define float @reassociate_adds4(float %x0, float %x1, float %x2, float %x3) {
; CHECK-LABEL: reassociate_adds4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK: fadds [[REG0:[0-9]+]], 1, 2
; CHECK: fadds [[REG1:[0-9]+]], 3, 4
; CHECK: fadds 1, [[REG0]], [[REG1]]
@@ -67,7 +67,7 @@ define float @reassociate_adds4(float %x0, float %x1, float %x2, float %x3) {
define float @reassociate_adds5(float %x0, float %x1, float %x2, float %x3, float %x4, float %x5, float %x6, float %x7) {
; CHECK-LABEL: reassociate_adds5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK: fadds [[REG12:[0-9]+]], 5, 6
; CHECK: fadds [[REG0:[0-9]+]], 1, 2
; CHECK: fadds [[REG11:[0-9]+]], 3, 4
@@ -91,7 +91,7 @@ define float @reassociate_adds5(float %x0, float %x1, float %x2, float %x3, floa
define <4 x float> @vector_reassociate_adds1(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) {
; CHECK-LABEL: vector_reassociate_adds1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-QPX: qvfadds [[REG0:[0-9]+]], 1, 2
; CHECK-QPX: qvfadds [[REG1:[0-9]+]], 3, 4
; CHECK-QPX: qvfadds 1, [[REG0]], [[REG1]]
@@ -108,7 +108,7 @@ define <4 x float> @vector_reassociate_adds1(<4 x float> %x0, <4 x float> %x1, <
define <4 x float> @vector_reassociate_adds2(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) {
; CHECK-LABEL: vector_reassociate_adds2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-QPX: qvfadds [[REG0:[0-9]+]], 1, 2
; CHECK-QPX: qvfadds [[REG1:[0-9]+]], 3, 4
; CHECK-QPX: qvfadds 1, [[REG0]], [[REG1]]
@@ -125,7 +125,7 @@ define <4 x float> @vector_reassociate_adds2(<4 x float> %x0, <4 x float> %x1, <
define <4 x float> @vector_reassociate_adds3(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) {
; CHECK-LABEL: vector_reassociate_adds3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-QPX: qvfadds [[REG0:[0-9]+]], 1, 2
; CHECK-QPX: qvfadds [[REG1:[0-9]+]], 3, 4
; CHECK-QPX: qvfadds 1, [[REG0]], [[REG1]]
@@ -142,7 +142,7 @@ define <4 x float> @vector_reassociate_adds3(<4 x float> %x0, <4 x float> %x1, <
define <4 x float> @vector_reassociate_adds4(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) {
; CHECK-LABEL: vector_reassociate_adds4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-QPX: qvfadds [[REG0:[0-9]+]], 1, 2
; CHECK-QPX: qvfadds [[REG1:[0-9]+]], 3, 4
; CHECK-QPX: qvfadds 1, [[REG0]], [[REG1]]
diff --git a/test/CodeGen/PowerPC/memCmpUsedInZeroEqualityComparison.ll b/test/CodeGen/PowerPC/memCmpUsedInZeroEqualityComparison.ll
index e91b9acaee0..3bfc0de1b87 100644
--- a/test/CodeGen/PowerPC/memCmpUsedInZeroEqualityComparison.ll
+++ b/test/CodeGen/PowerPC/memCmpUsedInZeroEqualityComparison.ll
@@ -17,7 +17,7 @@ declare signext i32 @memcmp(i8* nocapture, i8* nocapture, i64) local_unnamed_add
; Check 4 bytes - requires 1 load for each param.
define signext i32 @zeroEqualityTest02(i8* %x, i8* %y) {
; CHECK-LABEL: zeroEqualityTest02:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: lwz 3, 0(3)
; CHECK-NEXT: lwz 4, 0(4)
; CHECK-NEXT: xor 3, 3, 4
@@ -34,12 +34,12 @@ define signext i32 @zeroEqualityTest02(i8* %x, i8* %y) {
; Check 16 bytes - requires 2 loads for each param (or use vectors?).
define signext i32 @zeroEqualityTest01(i8* %x, i8* %y) {
; CHECK-LABEL: zeroEqualityTest01:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: ld 5, 0(3)
; CHECK-NEXT: ld 6, 0(4)
; CHECK-NEXT: cmpld 5, 6
; CHECK-NEXT: bne 0, .LBB1_2
-; CHECK-NEXT: # BB#1: # %loadbb1
+; CHECK-NEXT: # %bb.1: # %loadbb1
; CHECK-NEXT: ld 3, 8(3)
; CHECK-NEXT: ld 4, 8(4)
; CHECK-NEXT: cmpld 3, 4
@@ -59,17 +59,17 @@ define signext i32 @zeroEqualityTest01(i8* %x, i8* %y) {
; Check 7 bytes - requires 3 loads for each param.
define signext i32 @zeroEqualityTest03(i8* %x, i8* %y) {
; CHECK-LABEL: zeroEqualityTest03:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: lwz 5, 0(3)
; CHECK-NEXT: lwz 6, 0(4)
; CHECK-NEXT: cmplw 5, 6
; CHECK-NEXT: bne 0, .LBB2_3
-; CHECK-NEXT: # BB#1: # %loadbb1
+; CHECK-NEXT: # %bb.1: # %loadbb1
; CHECK-NEXT: lhz 5, 4(3)
; CHECK-NEXT: lhz 6, 4(4)
; CHECK-NEXT: cmplw 5, 6
; CHECK-NEXT: bne 0, .LBB2_3
-; CHECK-NEXT: # BB#2: # %loadbb2
+; CHECK-NEXT: # %bb.2: # %loadbb2
; CHECK-NEXT: lbz 3, 6(3)
; CHECK-NEXT: lbz 4, 6(4)
; CHECK-NEXT: cmplw 3, 4
@@ -89,7 +89,7 @@ define signext i32 @zeroEqualityTest03(i8* %x, i8* %y) {
; Validate with > 0
define signext i32 @zeroEqualityTest04() {
; CHECK-LABEL: zeroEqualityTest04:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: addis 3, 2, .LzeroEqualityTest02.buffer1@toc@ha
; CHECK-NEXT: addis 4, 2, .LzeroEqualityTest02.buffer2@toc@ha
; CHECK-NEXT: addi 6, 3, .LzeroEqualityTest02.buffer1@toc@l
@@ -98,7 +98,7 @@ define signext i32 @zeroEqualityTest04() {
; CHECK-NEXT: ldbrx 4, 0, 5
; CHECK-NEXT: cmpld 3, 4
; CHECK-NEXT: bne 0, .LBB3_2
-; CHECK-NEXT: # BB#1: # %loadbb1
+; CHECK-NEXT: # %bb.1: # %loadbb1
; CHECK-NEXT: li 4, 8
; CHECK-NEXT: ldbrx 3, 6, 4
; CHECK-NEXT: ldbrx 4, 5, 4
@@ -125,7 +125,7 @@ define signext i32 @zeroEqualityTest04() {
; Validate with < 0
define signext i32 @zeroEqualityTest05() {
; CHECK-LABEL: zeroEqualityTest05:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: addis 3, 2, .LzeroEqualityTest03.buffer1@toc@ha
; CHECK-NEXT: addis 4, 2, .LzeroEqualityTest03.buffer2@toc@ha
; CHECK-NEXT: addi 6, 3, .LzeroEqualityTest03.buffer1@toc@l
@@ -134,7 +134,7 @@ define signext i32 @zeroEqualityTest05() {
; CHECK-NEXT: ldbrx 4, 0, 5
; CHECK-NEXT: cmpld 3, 4
; CHECK-NEXT: bne 0, .LBB4_2
-; CHECK-NEXT: # BB#1: # %loadbb1
+; CHECK-NEXT: # %bb.1: # %loadbb1
; CHECK-NEXT: li 4, 8
; CHECK-NEXT: ldbrx 3, 6, 4
; CHECK-NEXT: ldbrx 4, 5, 4
@@ -160,7 +160,7 @@ define signext i32 @zeroEqualityTest05() {
; Validate with memcmp()?:
define signext i32 @equalityFoldTwoConstants() {
; CHECK-LABEL: equalityFoldTwoConstants:
-; CHECK: # BB#0: # %endblock
+; CHECK: # %bb.0: # %endblock
; CHECK-NEXT: li 3, 1
; CHECK-NEXT: blr
%call = tail call signext i32 @memcmp(i8* bitcast ([15 x i32]* @zeroEqualityTest04.buffer1 to i8*), i8* bitcast ([15 x i32]* @zeroEqualityTest04.buffer2 to i8*), i64 16)
@@ -171,13 +171,13 @@ define signext i32 @equalityFoldTwoConstants() {
define signext i32 @equalityFoldOneConstant(i8* %X) {
; CHECK-LABEL: equalityFoldOneConstant:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: li 5, 1
; CHECK-NEXT: ld 4, 0(3)
; CHECK-NEXT: sldi 5, 5, 32
; CHECK-NEXT: cmpld 4, 5
; CHECK-NEXT: bne 0, .LBB6_2
-; CHECK-NEXT: # BB#1: # %loadbb1
+; CHECK-NEXT: # %bb.1: # %loadbb1
; CHECK-NEXT: li 4, 3
; CHECK-NEXT: ld 3, 8(3)
; CHECK-NEXT: sldi 4, 4, 32
@@ -199,7 +199,7 @@ define signext i32 @equalityFoldOneConstant(i8* %X) {
define i1 @length2_eq_nobuiltin_attr(i8* %X, i8* %Y) {
; CHECK-LABEL: length2_eq_nobuiltin_attr:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: mflr 0
; CHECK-NEXT: std 0, 16(1)
; CHECK-NEXT: stdu 1, -32(1)
diff --git a/test/CodeGen/PowerPC/memcmp.ll b/test/CodeGen/PowerPC/memcmp.ll
index 392be4d712c..4aa5b400dd7 100644
--- a/test/CodeGen/PowerPC/memcmp.ll
+++ b/test/CodeGen/PowerPC/memcmp.ll
@@ -3,7 +3,7 @@
define signext i32 @memcmp8(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) {
; CHECK-LABEL: memcmp8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: ldbrx 3, 0, 3
; CHECK-NEXT: ldbrx 4, 0, 4
; CHECK-NEXT: subfc 5, 3, 4
@@ -23,7 +23,7 @@ define signext i32 @memcmp8(i32* nocapture readonly %buffer1, i32* nocapture rea
define signext i32 @memcmp4(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) {
; CHECK-LABEL: memcmp4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: lwbrx 3, 0, 3
; CHECK-NEXT: lwbrx 4, 0, 4
; CHECK-NEXT: sub 5, 4, 3
@@ -41,7 +41,7 @@ define signext i32 @memcmp4(i32* nocapture readonly %buffer1, i32* nocapture rea
define signext i32 @memcmp2(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) {
; CHECK-LABEL: memcmp2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: lhbrx 3, 0, 3
; CHECK-NEXT: lhbrx 4, 0, 4
; CHECK-NEXT: subf 3, 4, 3
@@ -55,7 +55,7 @@ define signext i32 @memcmp2(i32* nocapture readonly %buffer1, i32* nocapture rea
define signext i32 @memcmp1(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) {
; CHECK-LABEL: memcmp1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: lbz 3, 0(3)
; CHECK-NEXT: lbz 4, 0(4)
; CHECK-NEXT: subf 3, 4, 3
diff --git a/test/CodeGen/PowerPC/negate-i1.ll b/test/CodeGen/PowerPC/negate-i1.ll
index c6a7867fe9d..a56048d67a8 100644
--- a/test/CodeGen/PowerPC/negate-i1.ll
+++ b/test/CodeGen/PowerPC/negate-i1.ll
@@ -4,7 +4,7 @@
define i32 @select_i32_neg1_or_0(i1 %a) {
; CHECK-LABEL: select_i32_neg1_or_0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: clrldi 3, 3, 63
; CHECK-NEXT: neg 3, 3
; CHECK-NEXT: blr
@@ -15,7 +15,7 @@ define i32 @select_i32_neg1_or_0(i1 %a) {
define i32 @select_i32_neg1_or_0_zeroext(i1 zeroext %a) {
; CHECK-LABEL: select_i32_neg1_or_0_zeroext:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: neg 3, 3
; CHECK-NEXT: blr
;
diff --git a/test/CodeGen/PowerPC/ppc32-nest.ll b/test/CodeGen/PowerPC/ppc32-nest.ll
index 221e8be2951..b933edcf616 100644
--- a/test/CodeGen/PowerPC/ppc32-nest.ll
+++ b/test/CodeGen/PowerPC/ppc32-nest.ll
@@ -7,7 +7,7 @@ target triple = "powerpc-unknown-linux-gnu"
define i8* @nest_receiver(i8* nest %arg) nounwind {
; CHECK-LABEL: nest_receiver:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: mr 3, 11
; CHECK-NEXT: blr
diff --git a/test/CodeGen/PowerPC/ppc64-nest.ll b/test/CodeGen/PowerPC/ppc64-nest.ll
index 14872632e81..cd2366cfa45 100644
--- a/test/CodeGen/PowerPC/ppc64-nest.ll
+++ b/test/CodeGen/PowerPC/ppc64-nest.ll
@@ -7,7 +7,7 @@ target triple = "powerpc64-unknown-linux-gnu"
define i8* @nest_receiver(i8* nest %arg) nounwind {
; CHECK-LABEL: nest_receiver:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: mr 3, 11
; CHECK-NEXT: blr
diff --git a/test/CodeGen/PowerPC/pr32140.ll b/test/CodeGen/PowerPC/pr32140.ll
index 827a90404e4..3feb9bd9c9e 100644
--- a/test/CodeGen/PowerPC/pr32140.ll
+++ b/test/CodeGen/PowerPC/pr32140.ll
@@ -9,7 +9,7 @@
define void @bswapStorei64Toi32() {
; CHECK-LABEL: bswapStorei64Toi32:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK: lwa 3, 0(3)
; CHECK-NEXT: rldicl 3, 3, 32, 32
; CHECK-NEXT: stwbrx 3, 0, 4
@@ -25,7 +25,7 @@ entry:
define void @bswapStorei32Toi16() {
; CHECK-LABEL: bswapStorei32Toi16:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK: lha 3, 0(3)
; CHECK-NEXT: srwi 3, 3, 16
; CHECK-NEXT: sthbrx 3, 0, 4
@@ -41,7 +41,7 @@ entry:
define void @bswapStorei64Toi16() {
; CHECK-LABEL: bswapStorei64Toi16:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK: lha 3, 0(3)
; CHECK-NEXT: rldicl 3, 3, 16, 48
; CHECK-NEXT: sthbrx 3, 0, 4
diff --git a/test/CodeGen/PowerPC/pr33093.ll b/test/CodeGen/PowerPC/pr33093.ll
index fc28bcfd0ca..af0350e17fd 100644
--- a/test/CodeGen/PowerPC/pr33093.ll
+++ b/test/CodeGen/PowerPC/pr33093.ll
@@ -4,7 +4,7 @@
define zeroext i32 @ReverseBits(i32 zeroext %n) {
; CHECK-LABEL: ReverseBits:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lis 4, -21846
; CHECK-NEXT: lis 5, 21845
; CHECK-NEXT: slwi 6, 3, 1
@@ -68,7 +68,7 @@ entry:
define i64 @ReverseBits64(i64 %n) {
; CHECK-LABEL: ReverseBits64:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lis 4, -21846
; CHECK-NEXT: lis 5, 21845
; CHECK-NEXT: lis 6, -13108
diff --git a/test/CodeGen/PowerPC/select-addrRegRegOnly.ll b/test/CodeGen/PowerPC/select-addrRegRegOnly.ll
index 6be31eaea74..46b23ff04f2 100644
--- a/test/CodeGen/PowerPC/select-addrRegRegOnly.ll
+++ b/test/CodeGen/PowerPC/select-addrRegRegOnly.ll
@@ -4,7 +4,7 @@
; Function Attrs: norecurse nounwind readonly
define float @testSingleAccess(i32* nocapture readonly %arr) local_unnamed_addr #0 {
; CHECK-LABEL: testSingleAccess:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi 3, 3, 8
; CHECK-NEXT: lfiwax 0, 0, 3
; CHECK-NEXT: xscvsxdsp 1, 0
@@ -19,7 +19,7 @@ entry:
; Function Attrs: norecurse nounwind readonly
define float @testMultipleAccess(i32* nocapture readonly %arr) local_unnamed_addr #0 {
; CHECK-LABEL: testMultipleAccess:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwz 4, 8(3)
; CHECK-NEXT: lwz 12, 12(3)
; CHECK-NEXT: add 3, 12, 4
diff --git a/test/CodeGen/PowerPC/select_const.ll b/test/CodeGen/PowerPC/select_const.ll
index fd864805abd..178d9187e3b 100644
--- a/test/CodeGen/PowerPC/select_const.ll
+++ b/test/CodeGen/PowerPC/select_const.ll
@@ -9,7 +9,7 @@
define i32 @select_0_or_1(i1 %cond) {
; ALL-LABEL: select_0_or_1:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: not 3, 3
; ALL-NEXT: clrldi 3, 3, 63
; ALL-NEXT: blr
@@ -19,7 +19,7 @@ define i32 @select_0_or_1(i1 %cond) {
define i32 @select_0_or_1_zeroext(i1 zeroext %cond) {
; ALL-LABEL: select_0_or_1_zeroext:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: xori 3, 3, 1
; ALL-NEXT: blr
%sel = select i1 %cond, i32 0, i32 1
@@ -28,7 +28,7 @@ define i32 @select_0_or_1_zeroext(i1 zeroext %cond) {
define i32 @select_0_or_1_signext(i1 signext %cond) {
; ALL-LABEL: select_0_or_1_signext:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: not 3, 3
; ALL-NEXT: clrldi 3, 3, 63
; ALL-NEXT: blr
@@ -40,7 +40,7 @@ define i32 @select_0_or_1_signext(i1 signext %cond) {
define i32 @select_1_or_0(i1 %cond) {
; ALL-LABEL: select_1_or_0:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: clrldi 3, 3, 63
; ALL-NEXT: blr
%sel = select i1 %cond, i32 1, i32 0
@@ -49,7 +49,7 @@ define i32 @select_1_or_0(i1 %cond) {
define i32 @select_1_or_0_zeroext(i1 zeroext %cond) {
; ALL-LABEL: select_1_or_0_zeroext:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: blr
%sel = select i1 %cond, i32 1, i32 0
ret i32 %sel
@@ -57,7 +57,7 @@ define i32 @select_1_or_0_zeroext(i1 zeroext %cond) {
define i32 @select_1_or_0_signext(i1 signext %cond) {
; ALL-LABEL: select_1_or_0_signext:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: clrldi 3, 3, 63
; ALL-NEXT: blr
%sel = select i1 %cond, i32 1, i32 0
@@ -68,7 +68,7 @@ define i32 @select_1_or_0_signext(i1 signext %cond) {
define i32 @select_0_or_neg1(i1 %cond) {
; ISEL-LABEL: select_0_or_neg1:
-; ISEL: # BB#0:
+; ISEL: # %bb.0:
; ISEL-NEXT: li 4, 0
; ISEL-NEXT: andi. 3, 3, 1
; ISEL-NEXT: oris 3, 4, 65535
@@ -77,7 +77,7 @@ define i32 @select_0_or_neg1(i1 %cond) {
; ISEL-NEXT: blr
;
; NO_ISEL-LABEL: select_0_or_neg1:
-; NO_ISEL: # BB#0:
+; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: li 4, 0
; NO_ISEL-NEXT: andi. 3, 3, 1
; NO_ISEL-NEXT: oris 3, 4, 65535
@@ -93,7 +93,7 @@ define i32 @select_0_or_neg1(i1 %cond) {
define i32 @select_0_or_neg1_zeroext(i1 zeroext %cond) {
; ISEL-LABEL: select_0_or_neg1_zeroext:
-; ISEL: # BB#0:
+; ISEL: # %bb.0:
; ISEL-NEXT: li 4, 0
; ISEL-NEXT: andi. 3, 3, 1
; ISEL-NEXT: oris 3, 4, 65535
@@ -102,7 +102,7 @@ define i32 @select_0_or_neg1_zeroext(i1 zeroext %cond) {
; ISEL-NEXT: blr
;
; NO_ISEL-LABEL: select_0_or_neg1_zeroext:
-; NO_ISEL: # BB#0:
+; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: li 4, 0
; NO_ISEL-NEXT: andi. 3, 3, 1
; NO_ISEL-NEXT: oris 3, 4, 65535
@@ -118,7 +118,7 @@ define i32 @select_0_or_neg1_zeroext(i1 zeroext %cond) {
define i32 @select_0_or_neg1_signext(i1 signext %cond) {
; ISEL-LABEL: select_0_or_neg1_signext:
-; ISEL: # BB#0:
+; ISEL: # %bb.0:
; ISEL-NEXT: li 4, 0
; ISEL-NEXT: andi. 3, 3, 1
; ISEL-NEXT: oris 3, 4, 65535
@@ -127,7 +127,7 @@ define i32 @select_0_or_neg1_signext(i1 signext %cond) {
; ISEL-NEXT: blr
;
; NO_ISEL-LABEL: select_0_or_neg1_signext:
-; NO_ISEL: # BB#0:
+; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: li 4, 0
; NO_ISEL-NEXT: andi. 3, 3, 1
; NO_ISEL-NEXT: oris 3, 4, 65535
@@ -145,7 +145,7 @@ define i32 @select_0_or_neg1_signext(i1 signext %cond) {
define i32 @select_neg1_or_0(i1 %cond) {
; ISEL-LABEL: select_neg1_or_0:
-; ISEL: # BB#0:
+; ISEL: # %bb.0:
; ISEL-NEXT: li 4, 0
; ISEL-NEXT: andi. 3, 3, 1
; ISEL-NEXT: oris 3, 4, 65535
@@ -154,13 +154,13 @@ define i32 @select_neg1_or_0(i1 %cond) {
; ISEL-NEXT: blr
;
; NO_ISEL-LABEL: select_neg1_or_0:
-; NO_ISEL: # BB#0:
+; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: li 4, 0
; NO_ISEL-NEXT: andi. 3, 3, 1
; NO_ISEL-NEXT: oris 3, 4, 65535
; NO_ISEL-NEXT: ori 3, 3, 65535
; NO_ISEL-NEXT: bclr 12, 1, 0
-; NO_ISEL-NEXT: # BB#1:
+; NO_ISEL-NEXT: # %bb.1:
; NO_ISEL-NEXT: ori 3, 4, 0
; NO_ISEL-NEXT: blr
%sel = select i1 %cond, i32 -1, i32 0
@@ -169,7 +169,7 @@ define i32 @select_neg1_or_0(i1 %cond) {
define i32 @select_neg1_or_0_zeroext(i1 zeroext %cond) {
; ISEL-LABEL: select_neg1_or_0_zeroext:
-; ISEL: # BB#0:
+; ISEL: # %bb.0:
; ISEL-NEXT: li 4, 0
; ISEL-NEXT: andi. 3, 3, 1
; ISEL-NEXT: oris 3, 4, 65535
@@ -178,13 +178,13 @@ define i32 @select_neg1_or_0_zeroext(i1 zeroext %cond) {
; ISEL-NEXT: blr
;
; NO_ISEL-LABEL: select_neg1_or_0_zeroext:
-; NO_ISEL: # BB#0:
+; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: li 4, 0
; NO_ISEL-NEXT: andi. 3, 3, 1
; NO_ISEL-NEXT: oris 3, 4, 65535
; NO_ISEL-NEXT: ori 3, 3, 65535
; NO_ISEL-NEXT: bclr 12, 1, 0
-; NO_ISEL-NEXT: # BB#1:
+; NO_ISEL-NEXT: # %bb.1:
; NO_ISEL-NEXT: ori 3, 4, 0
; NO_ISEL-NEXT: blr
%sel = select i1 %cond, i32 -1, i32 0
@@ -193,7 +193,7 @@ define i32 @select_neg1_or_0_zeroext(i1 zeroext %cond) {
define i32 @select_neg1_or_0_signext(i1 signext %cond) {
; ISEL-LABEL: select_neg1_or_0_signext:
-; ISEL: # BB#0:
+; ISEL: # %bb.0:
; ISEL-NEXT: li 4, 0
; ISEL-NEXT: andi. 3, 3, 1
; ISEL-NEXT: oris 3, 4, 65535
@@ -202,13 +202,13 @@ define i32 @select_neg1_or_0_signext(i1 signext %cond) {
; ISEL-NEXT: blr
;
; NO_ISEL-LABEL: select_neg1_or_0_signext:
-; NO_ISEL: # BB#0:
+; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: li 4, 0
; NO_ISEL-NEXT: andi. 3, 3, 1
; NO_ISEL-NEXT: oris 3, 4, 65535
; NO_ISEL-NEXT: ori 3, 3, 65535
; NO_ISEL-NEXT: bclr 12, 1, 0
-; NO_ISEL-NEXT: # BB#1:
+; NO_ISEL-NEXT: # %bb.1:
; NO_ISEL-NEXT: ori 3, 4, 0
; NO_ISEL-NEXT: blr
%sel = select i1 %cond, i32 -1, i32 0
@@ -219,7 +219,7 @@ define i32 @select_neg1_or_0_signext(i1 signext %cond) {
define i32 @select_Cplus1_C(i1 %cond) {
; ALL-LABEL: select_Cplus1_C:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: clrldi 3, 3, 63
; ALL-NEXT: addi 3, 3, 41
; ALL-NEXT: blr
@@ -229,7 +229,7 @@ define i32 @select_Cplus1_C(i1 %cond) {
define i32 @select_Cplus1_C_zeroext(i1 zeroext %cond) {
; ALL-LABEL: select_Cplus1_C_zeroext:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: addi 3, 3, 41
; ALL-NEXT: blr
%sel = select i1 %cond, i32 42, i32 41
@@ -238,7 +238,7 @@ define i32 @select_Cplus1_C_zeroext(i1 zeroext %cond) {
define i32 @select_Cplus1_C_signext(i1 signext %cond) {
; ALL-LABEL: select_Cplus1_C_signext:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: subfic 3, 3, 41
; ALL-NEXT: blr
%sel = select i1 %cond, i32 42, i32 41
@@ -249,7 +249,7 @@ define i32 @select_Cplus1_C_signext(i1 signext %cond) {
define i32 @select_C_Cplus1(i1 %cond) {
; ALL-LABEL: select_C_Cplus1:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: clrldi 3, 3, 63
; ALL-NEXT: subfic 3, 3, 42
; ALL-NEXT: blr
@@ -259,7 +259,7 @@ define i32 @select_C_Cplus1(i1 %cond) {
define i32 @select_C_Cplus1_zeroext(i1 zeroext %cond) {
; ALL-LABEL: select_C_Cplus1_zeroext:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: subfic 3, 3, 42
; ALL-NEXT: blr
%sel = select i1 %cond, i32 41, i32 42
@@ -268,7 +268,7 @@ define i32 @select_C_Cplus1_zeroext(i1 zeroext %cond) {
define i32 @select_C_Cplus1_signext(i1 signext %cond) {
; ALL-LABEL: select_C_Cplus1_signext:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: addi 3, 3, 42
; ALL-NEXT: blr
%sel = select i1 %cond, i32 41, i32 42
@@ -280,7 +280,7 @@ define i32 @select_C_Cplus1_signext(i1 signext %cond) {
define i32 @select_C1_C2(i1 %cond) {
; ISEL-LABEL: select_C1_C2:
-; ISEL: # BB#0:
+; ISEL: # %bb.0:
; ISEL-NEXT: andi. 3, 3, 1
; ISEL-NEXT: li 4, 421
; ISEL-NEXT: li 3, 42
@@ -288,7 +288,7 @@ define i32 @select_C1_C2(i1 %cond) {
; ISEL-NEXT: blr
;
; NO_ISEL-LABEL: select_C1_C2:
-; NO_ISEL: # BB#0:
+; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: andi. 3, 3, 1
; NO_ISEL-NEXT: li 4, 421
; NO_ISEL-NEXT: li 3, 42
@@ -303,7 +303,7 @@ define i32 @select_C1_C2(i1 %cond) {
define i32 @select_C1_C2_zeroext(i1 zeroext %cond) {
; ISEL-LABEL: select_C1_C2_zeroext:
-; ISEL: # BB#0:
+; ISEL: # %bb.0:
; ISEL-NEXT: andi. 3, 3, 1
; ISEL-NEXT: li 4, 421
; ISEL-NEXT: li 3, 42
@@ -311,7 +311,7 @@ define i32 @select_C1_C2_zeroext(i1 zeroext %cond) {
; ISEL-NEXT: blr
;
; NO_ISEL-LABEL: select_C1_C2_zeroext:
-; NO_ISEL: # BB#0:
+; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: andi. 3, 3, 1
; NO_ISEL-NEXT: li 4, 421
; NO_ISEL-NEXT: li 3, 42
@@ -326,7 +326,7 @@ define i32 @select_C1_C2_zeroext(i1 zeroext %cond) {
define i32 @select_C1_C2_signext(i1 signext %cond) {
; ISEL-LABEL: select_C1_C2_signext:
-; ISEL: # BB#0:
+; ISEL: # %bb.0:
; ISEL-NEXT: andi. 3, 3, 1
; ISEL-NEXT: li 4, 421
; ISEL-NEXT: li 3, 42
@@ -334,7 +334,7 @@ define i32 @select_C1_C2_signext(i1 signext %cond) {
; ISEL-NEXT: blr
;
; NO_ISEL-LABEL: select_C1_C2_signext:
-; NO_ISEL: # BB#0:
+; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: andi. 3, 3, 1
; NO_ISEL-NEXT: li 4, 421
; NO_ISEL-NEXT: li 3, 42
@@ -351,7 +351,7 @@ define i32 @select_C1_C2_signext(i1 signext %cond) {
define i8 @sel_constants_add_constant(i1 %cond) {
; ISEL-LABEL: sel_constants_add_constant:
-; ISEL: # BB#0:
+; ISEL: # %bb.0:
; ISEL-NEXT: andi. 3, 3, 1
; ISEL-NEXT: li 4, 1
; ISEL-NEXT: li 3, 28
@@ -359,7 +359,7 @@ define i8 @sel_constants_add_constant(i1 %cond) {
; ISEL-NEXT: blr
;
; NO_ISEL-LABEL: sel_constants_add_constant:
-; NO_ISEL: # BB#0:
+; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: andi. 3, 3, 1
; NO_ISEL-NEXT: li 4, 1
; NO_ISEL-NEXT: li 3, 28
@@ -375,7 +375,7 @@ define i8 @sel_constants_add_constant(i1 %cond) {
define i8 @sel_constants_sub_constant(i1 %cond) {
; ISEL-LABEL: sel_constants_sub_constant:
-; ISEL: # BB#0:
+; ISEL: # %bb.0:
; ISEL-NEXT: li 4, 0
; ISEL-NEXT: andi. 3, 3, 1
; ISEL-NEXT: oris 3, 4, 65535
@@ -385,14 +385,14 @@ define i8 @sel_constants_sub_constant(i1 %cond) {
; ISEL-NEXT: blr
;
; NO_ISEL-LABEL: sel_constants_sub_constant:
-; NO_ISEL: # BB#0:
+; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: li 4, 0
; NO_ISEL-NEXT: andi. 3, 3, 1
; NO_ISEL-NEXT: oris 3, 4, 65535
; NO_ISEL-NEXT: li 4, 18
; NO_ISEL-NEXT: ori 3, 3, 65527
; NO_ISEL-NEXT: bclr 12, 1, 0
-; NO_ISEL-NEXT: # BB#1:
+; NO_ISEL-NEXT: # %bb.1:
; NO_ISEL-NEXT: ori 3, 4, 0
; NO_ISEL-NEXT: blr
%sel = select i1 %cond, i8 -4, i8 23
@@ -402,7 +402,7 @@ define i8 @sel_constants_sub_constant(i1 %cond) {
define i8 @sel_constants_mul_constant(i1 %cond) {
; ISEL-LABEL: sel_constants_mul_constant:
-; ISEL: # BB#0:
+; ISEL: # %bb.0:
; ISEL-NEXT: lis 4, 16383
; ISEL-NEXT: andi. 3, 3, 1
; ISEL-NEXT: ori 3, 4, 65531
@@ -412,14 +412,14 @@ define i8 @sel_constants_mul_constant(i1 %cond) {
; ISEL-NEXT: blr
;
; NO_ISEL-LABEL: sel_constants_mul_constant:
-; NO_ISEL: # BB#0:
+; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: lis 4, 16383
; NO_ISEL-NEXT: andi. 3, 3, 1
; NO_ISEL-NEXT: ori 3, 4, 65531
; NO_ISEL-NEXT: li 4, 115
; NO_ISEL-NEXT: sldi 3, 3, 2
; NO_ISEL-NEXT: bclr 12, 1, 0
-; NO_ISEL-NEXT: # BB#1:
+; NO_ISEL-NEXT: # %bb.1:
; NO_ISEL-NEXT: ori 3, 4, 0
; NO_ISEL-NEXT: blr
%sel = select i1 %cond, i8 -4, i8 23
@@ -429,14 +429,14 @@ define i8 @sel_constants_mul_constant(i1 %cond) {
define i8 @sel_constants_sdiv_constant(i1 %cond) {
; ISEL-LABEL: sel_constants_sdiv_constant:
-; ISEL: # BB#0:
+; ISEL: # %bb.0:
; ISEL-NEXT: andi. 3, 3, 1
; ISEL-NEXT: li 3, 4
; ISEL-NEXT: isel 3, 0, 3, 1
; ISEL-NEXT: blr
;
; NO_ISEL-LABEL: sel_constants_sdiv_constant:
-; NO_ISEL: # BB#0:
+; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: andi. 3, 3, 1
; NO_ISEL-NEXT: li 3, 4
; NO_ISEL-NEXT: bc 12, 1, .LBB24_1
@@ -451,7 +451,7 @@ define i8 @sel_constants_sdiv_constant(i1 %cond) {
define i8 @sel_constants_udiv_constant(i1 %cond) {
; ISEL-LABEL: sel_constants_udiv_constant:
-; ISEL: # BB#0:
+; ISEL: # %bb.0:
; ISEL-NEXT: andi. 3, 3, 1
; ISEL-NEXT: li 4, 50
; ISEL-NEXT: li 3, 4
@@ -459,7 +459,7 @@ define i8 @sel_constants_udiv_constant(i1 %cond) {
; ISEL-NEXT: blr
;
; NO_ISEL-LABEL: sel_constants_udiv_constant:
-; NO_ISEL: # BB#0:
+; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: andi. 3, 3, 1
; NO_ISEL-NEXT: li 4, 50
; NO_ISEL-NEXT: li 3, 4
@@ -475,7 +475,7 @@ define i8 @sel_constants_udiv_constant(i1 %cond) {
define i8 @sel_constants_srem_constant(i1 %cond) {
; ISEL-LABEL: sel_constants_srem_constant:
-; ISEL: # BB#0:
+; ISEL: # %bb.0:
; ISEL-NEXT: lis 4, 16383
; ISEL-NEXT: andi. 3, 3, 1
; ISEL-NEXT: ori 3, 4, 65535
@@ -485,14 +485,14 @@ define i8 @sel_constants_srem_constant(i1 %cond) {
; ISEL-NEXT: blr
;
; NO_ISEL-LABEL: sel_constants_srem_constant:
-; NO_ISEL: # BB#0:
+; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: lis 4, 16383
; NO_ISEL-NEXT: andi. 3, 3, 1
; NO_ISEL-NEXT: ori 3, 4, 65535
; NO_ISEL-NEXT: li 4, 3
; NO_ISEL-NEXT: sldi 3, 3, 2
; NO_ISEL-NEXT: bclr 12, 1, 0
-; NO_ISEL-NEXT: # BB#1:
+; NO_ISEL-NEXT: # %bb.1:
; NO_ISEL-NEXT: ori 3, 4, 0
; NO_ISEL-NEXT: blr
%sel = select i1 %cond, i8 -4, i8 23
@@ -502,7 +502,7 @@ define i8 @sel_constants_srem_constant(i1 %cond) {
define i8 @sel_constants_urem_constant(i1 %cond) {
; ALL-LABEL: sel_constants_urem_constant:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: rlwinm 3, 3, 0, 31, 31
; ALL-NEXT: subfic 3, 3, 3
; ALL-NEXT: blr
@@ -513,7 +513,7 @@ define i8 @sel_constants_urem_constant(i1 %cond) {
define i8 @sel_constants_and_constant(i1 %cond) {
; ALL-LABEL: sel_constants_and_constant:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: rlwinm 3, 3, 0, 31, 31
; ALL-NEXT: subfic 3, 3, 5
; ALL-NEXT: blr
@@ -524,7 +524,7 @@ define i8 @sel_constants_and_constant(i1 %cond) {
define i8 @sel_constants_or_constant(i1 %cond) {
; ISEL-LABEL: sel_constants_or_constant:
-; ISEL: # BB#0:
+; ISEL: # %bb.0:
; ISEL-NEXT: li 4, 0
; ISEL-NEXT: andi. 3, 3, 1
; ISEL-NEXT: oris 3, 4, 65535
@@ -534,14 +534,14 @@ define i8 @sel_constants_or_constant(i1 %cond) {
; ISEL-NEXT: blr
;
; NO_ISEL-LABEL: sel_constants_or_constant:
-; NO_ISEL: # BB#0:
+; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: li 4, 0
; NO_ISEL-NEXT: andi. 3, 3, 1
; NO_ISEL-NEXT: oris 3, 4, 65535
; NO_ISEL-NEXT: li 4, 23
; NO_ISEL-NEXT: ori 3, 3, 65533
; NO_ISEL-NEXT: bclr 12, 1, 0
-; NO_ISEL-NEXT: # BB#1:
+; NO_ISEL-NEXT: # %bb.1:
; NO_ISEL-NEXT: ori 3, 4, 0
; NO_ISEL-NEXT: blr
%sel = select i1 %cond, i8 -4, i8 23
@@ -551,7 +551,7 @@ define i8 @sel_constants_or_constant(i1 %cond) {
define i8 @sel_constants_xor_constant(i1 %cond) {
; ISEL-LABEL: sel_constants_xor_constant:
-; ISEL: # BB#0:
+; ISEL: # %bb.0:
; ISEL-NEXT: li 4, 0
; ISEL-NEXT: andi. 3, 3, 1
; ISEL-NEXT: oris 3, 4, 65535
@@ -561,14 +561,14 @@ define i8 @sel_constants_xor_constant(i1 %cond) {
; ISEL-NEXT: blr
;
; NO_ISEL-LABEL: sel_constants_xor_constant:
-; NO_ISEL: # BB#0:
+; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: li 4, 0
; NO_ISEL-NEXT: andi. 3, 3, 1
; NO_ISEL-NEXT: oris 3, 4, 65535
; NO_ISEL-NEXT: li 4, 18
; NO_ISEL-NEXT: ori 3, 3, 65529
; NO_ISEL-NEXT: bclr 12, 1, 0
-; NO_ISEL-NEXT: # BB#1:
+; NO_ISEL-NEXT: # %bb.1:
; NO_ISEL-NEXT: ori 3, 4, 0
; NO_ISEL-NEXT: blr
%sel = select i1 %cond, i8 -4, i8 23
@@ -578,7 +578,7 @@ define i8 @sel_constants_xor_constant(i1 %cond) {
define i8 @sel_constants_shl_constant(i1 %cond) {
; ISEL-LABEL: sel_constants_shl_constant:
-; ISEL: # BB#0:
+; ISEL: # %bb.0:
; ISEL-NEXT: lis 5, 511
; ISEL-NEXT: lis 4, 2047
; ISEL-NEXT: andi. 3, 3, 1
@@ -590,7 +590,7 @@ define i8 @sel_constants_shl_constant(i1 %cond) {
; ISEL-NEXT: blr
;
; NO_ISEL-LABEL: sel_constants_shl_constant:
-; NO_ISEL: # BB#0:
+; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: lis 5, 511
; NO_ISEL-NEXT: lis 4, 2047
; NO_ISEL-NEXT: andi. 3, 3, 1
@@ -610,7 +610,7 @@ define i8 @sel_constants_shl_constant(i1 %cond) {
define i8 @sel_constants_lshr_constant(i1 %cond) {
; ISEL-LABEL: sel_constants_lshr_constant:
-; ISEL: # BB#0:
+; ISEL: # %bb.0:
; ISEL-NEXT: andi. 3, 3, 1
; ISEL-NEXT: li 4, 7
; ISEL-NEXT: li 3, 0
@@ -618,7 +618,7 @@ define i8 @sel_constants_lshr_constant(i1 %cond) {
; ISEL-NEXT: blr
;
; NO_ISEL-LABEL: sel_constants_lshr_constant:
-; NO_ISEL: # BB#0:
+; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: andi. 3, 3, 1
; NO_ISEL-NEXT: li 4, 7
; NO_ISEL-NEXT: li 3, 0
@@ -634,7 +634,7 @@ define i8 @sel_constants_lshr_constant(i1 %cond) {
define i8 @sel_constants_ashr_constant(i1 %cond) {
; ALL-LABEL: sel_constants_ashr_constant:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: clrldi 3, 3, 63
; ALL-NEXT: neg 3, 3
; ALL-NEXT: blr
@@ -645,7 +645,7 @@ define i8 @sel_constants_ashr_constant(i1 %cond) {
define double @sel_constants_fadd_constant(i1 %cond) {
; ISEL-LABEL: sel_constants_fadd_constant:
-; ISEL: # BB#0:
+; ISEL: # %bb.0:
; ISEL-NEXT: andi. 3, 3, 1
; ISEL-NEXT: addis 4, 2, .LCPI34_0@toc@ha
; ISEL-NEXT: addis 3, 2, .LCPI34_1@toc@ha
@@ -656,14 +656,14 @@ define double @sel_constants_fadd_constant(i1 %cond) {
; ISEL-NEXT: blr
;
; NO_ISEL-LABEL: sel_constants_fadd_constant:
-; NO_ISEL: # BB#0:
+; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: andi. 3, 3, 1
; NO_ISEL-NEXT: addis 4, 2, .LCPI34_0@toc@ha
; NO_ISEL-NEXT: addis 3, 2, .LCPI34_1@toc@ha
; NO_ISEL-NEXT: addi 4, 4, .LCPI34_0@toc@l
; NO_ISEL-NEXT: addi 3, 3, .LCPI34_1@toc@l
; NO_ISEL-NEXT: bc 12, 1, .LBB34_2
-; NO_ISEL-NEXT: # BB#1:
+; NO_ISEL-NEXT: # %bb.1:
; NO_ISEL-NEXT: ori 3, 4, 0
; NO_ISEL-NEXT: b .LBB34_2
; NO_ISEL-NEXT: .LBB34_2:
@@ -676,7 +676,7 @@ define double @sel_constants_fadd_constant(i1 %cond) {
define double @sel_constants_fsub_constant(i1 %cond) {
; ISEL-LABEL: sel_constants_fsub_constant:
-; ISEL: # BB#0:
+; ISEL: # %bb.0:
; ISEL-NEXT: andi. 3, 3, 1
; ISEL-NEXT: addis 4, 2, .LCPI35_0@toc@ha
; ISEL-NEXT: addis 3, 2, .LCPI35_1@toc@ha
@@ -687,14 +687,14 @@ define double @sel_constants_fsub_constant(i1 %cond) {
; ISEL-NEXT: blr
;
; NO_ISEL-LABEL: sel_constants_fsub_constant:
-; NO_ISEL: # BB#0:
+; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: andi. 3, 3, 1
; NO_ISEL-NEXT: addis 4, 2, .LCPI35_0@toc@ha
; NO_ISEL-NEXT: addis 3, 2, .LCPI35_1@toc@ha
; NO_ISEL-NEXT: addi 4, 4, .LCPI35_0@toc@l
; NO_ISEL-NEXT: addi 3, 3, .LCPI35_1@toc@l
; NO_ISEL-NEXT: bc 12, 1, .LBB35_2
-; NO_ISEL-NEXT: # BB#1:
+; NO_ISEL-NEXT: # %bb.1:
; NO_ISEL-NEXT: ori 3, 4, 0
; NO_ISEL-NEXT: b .LBB35_2
; NO_ISEL-NEXT: .LBB35_2:
@@ -707,7 +707,7 @@ define double @sel_constants_fsub_constant(i1 %cond) {
define double @sel_constants_fmul_constant(i1 %cond) {
; ISEL-LABEL: sel_constants_fmul_constant:
-; ISEL: # BB#0:
+; ISEL: # %bb.0:
; ISEL-NEXT: andi. 3, 3, 1
; ISEL-NEXT: addis 4, 2, .LCPI36_0@toc@ha
; ISEL-NEXT: addis 3, 2, .LCPI36_1@toc@ha
@@ -718,14 +718,14 @@ define double @sel_constants_fmul_constant(i1 %cond) {
; ISEL-NEXT: blr
;
; NO_ISEL-LABEL: sel_constants_fmul_constant:
-; NO_ISEL: # BB#0:
+; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: andi. 3, 3, 1
; NO_ISEL-NEXT: addis 4, 2, .LCPI36_0@toc@ha
; NO_ISEL-NEXT: addis 3, 2, .LCPI36_1@toc@ha
; NO_ISEL-NEXT: addi 4, 4, .LCPI36_0@toc@l
; NO_ISEL-NEXT: addi 3, 3, .LCPI36_1@toc@l
; NO_ISEL-NEXT: bc 12, 1, .LBB36_2
-; NO_ISEL-NEXT: # BB#1:
+; NO_ISEL-NEXT: # %bb.1:
; NO_ISEL-NEXT: ori 3, 4, 0
; NO_ISEL-NEXT: b .LBB36_2
; NO_ISEL-NEXT: .LBB36_2:
@@ -738,7 +738,7 @@ define double @sel_constants_fmul_constant(i1 %cond) {
define double @sel_constants_fdiv_constant(i1 %cond) {
; ISEL-LABEL: sel_constants_fdiv_constant:
-; ISEL: # BB#0:
+; ISEL: # %bb.0:
; ISEL-NEXT: andi. 3, 3, 1
; ISEL-NEXT: addis 4, 2, .LCPI37_0@toc@ha
; ISEL-NEXT: addis 3, 2, .LCPI37_1@toc@ha
@@ -749,14 +749,14 @@ define double @sel_constants_fdiv_constant(i1 %cond) {
; ISEL-NEXT: blr
;
; NO_ISEL-LABEL: sel_constants_fdiv_constant:
-; NO_ISEL: # BB#0:
+; NO_ISEL: # %bb.0:
; NO_ISEL-NEXT: andi. 3, 3, 1
; NO_ISEL-NEXT: addis 4, 2, .LCPI37_0@toc@ha
; NO_ISEL-NEXT: addis 3, 2, .LCPI37_1@toc@ha
; NO_ISEL-NEXT: addi 4, 4, .LCPI37_0@toc@l
; NO_ISEL-NEXT: addi 3, 3, .LCPI37_1@toc@l
; NO_ISEL-NEXT: bc 12, 1, .LBB37_2
-; NO_ISEL-NEXT: # BB#1:
+; NO_ISEL-NEXT: # %bb.1:
; NO_ISEL-NEXT: ori 3, 4, 0
; NO_ISEL-NEXT: b .LBB37_2
; NO_ISEL-NEXT: .LBB37_2:
@@ -769,10 +769,10 @@ define double @sel_constants_fdiv_constant(i1 %cond) {
define double @sel_constants_frem_constant(i1 %cond) {
; ALL-LABEL: sel_constants_frem_constant:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: andi. 3, 3, 1
; ALL-NEXT: bc 12, 1, .LBB38_2
-; ALL-NEXT: # BB#1:
+; ALL-NEXT: # %bb.1:
; ALL-NEXT: addis 3, 2, .LCPI38_0@toc@ha
; ALL-NEXT: addi 3, 3, .LCPI38_0@toc@l
; ALL-NEXT: lxsdx 1, 0, 3
diff --git a/test/CodeGen/PowerPC/setcc-logic.ll b/test/CodeGen/PowerPC/setcc-logic.ll
index 108a6bb2909..b17869f312c 100644
--- a/test/CodeGen/PowerPC/setcc-logic.ll
+++ b/test/CodeGen/PowerPC/setcc-logic.ll
@@ -3,7 +3,7 @@
define zeroext i1 @all_bits_clear(i32 %P, i32 %Q) {
; CHECK-LABEL: all_bits_clear:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: or 3, 3, 4
; CHECK-NEXT: cntlzw 3, 3
; CHECK-NEXT: srwi 3, 3, 5
@@ -16,7 +16,7 @@ define zeroext i1 @all_bits_clear(i32 %P, i32 %Q) {
define zeroext i1 @all_sign_bits_clear(i32 %P, i32 %Q) {
; CHECK-LABEL: all_sign_bits_clear:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: or 3, 3, 4
; CHECK-NEXT: nor 3, 3, 3
; CHECK-NEXT: srwi 3, 3, 31
@@ -29,7 +29,7 @@ define zeroext i1 @all_sign_bits_clear(i32 %P, i32 %Q) {
define zeroext i1 @all_bits_set(i32 %P, i32 %Q) {
; CHECK-LABEL: all_bits_set:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: li 5, -1
; CHECK-NEXT: and 3, 3, 4
; CHECK-NEXT: xor 3, 3, 5
@@ -44,7 +44,7 @@ define zeroext i1 @all_bits_set(i32 %P, i32 %Q) {
define zeroext i1 @all_sign_bits_set(i32 %P, i32 %Q) {
; CHECK-LABEL: all_sign_bits_set:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: and 3, 3, 4
; CHECK-NEXT: srwi 3, 3, 31
; CHECK-NEXT: blr
@@ -56,7 +56,7 @@ define zeroext i1 @all_sign_bits_set(i32 %P, i32 %Q) {
define zeroext i1 @any_bits_set(i32 %P, i32 %Q) {
; CHECK-LABEL: any_bits_set:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: or 3, 3, 4
; CHECK-NEXT: cntlzw 3, 3
; CHECK-NEXT: srwi 3, 3, 5
@@ -70,7 +70,7 @@ define zeroext i1 @any_bits_set(i32 %P, i32 %Q) {
define zeroext i1 @any_sign_bits_set(i32 %P, i32 %Q) {
; CHECK-LABEL: any_sign_bits_set:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: or 3, 3, 4
; CHECK-NEXT: srwi 3, 3, 31
; CHECK-NEXT: blr
@@ -82,7 +82,7 @@ define zeroext i1 @any_sign_bits_set(i32 %P, i32 %Q) {
define zeroext i1 @any_bits_clear(i32 %P, i32 %Q) {
; CHECK-LABEL: any_bits_clear:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: li 5, -1
; CHECK-NEXT: and 3, 3, 4
; CHECK-NEXT: xor 3, 3, 5
@@ -98,7 +98,7 @@ define zeroext i1 @any_bits_clear(i32 %P, i32 %Q) {
define zeroext i1 @any_sign_bits_clear(i32 %P, i32 %Q) {
; CHECK-LABEL: any_sign_bits_clear:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: and 3, 3, 4
; CHECK-NEXT: nor 3, 3, 3
; CHECK-NEXT: srwi 3, 3, 31
@@ -112,10 +112,10 @@ define zeroext i1 @any_sign_bits_clear(i32 %P, i32 %Q) {
; PR3351 - (P == 0) & (Q == 0) -> (P|Q) == 0
define i32 @all_bits_clear_branch(i32* %P, i32* %Q) {
; CHECK-LABEL: all_bits_clear_branch:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: or. 3, 3, 4
; CHECK-NEXT: bne 0, .LBB8_2
-; CHECK-NEXT: # BB#1: # %bb1
+; CHECK-NEXT: # %bb.1: # %bb1
; CHECK-NEXT: li 3, 4
; CHECK-NEXT: blr
; CHECK-NEXT: .LBB8_2: # %return
@@ -136,11 +136,11 @@ return:
define i32 @all_sign_bits_clear_branch(i32 %P, i32 %Q) {
; CHECK-LABEL: all_sign_bits_clear_branch:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: or 3, 3, 4
; CHECK-NEXT: cmpwi 0, 3, 0
; CHECK-NEXT: blt 0, .LBB9_2
-; CHECK-NEXT: # BB#1: # %bb1
+; CHECK-NEXT: # %bb.1: # %bb1
; CHECK-NEXT: li 3, 4
; CHECK-NEXT: blr
; CHECK-NEXT: .LBB9_2: # %return
@@ -161,11 +161,11 @@ return:
define i32 @all_bits_set_branch(i32 %P, i32 %Q) {
; CHECK-LABEL: all_bits_set_branch:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: and 3, 3, 4
; CHECK-NEXT: cmpwi 0, 3, -1
; CHECK-NEXT: bne 0, .LBB10_2
-; CHECK-NEXT: # BB#1: # %bb1
+; CHECK-NEXT: # %bb.1: # %bb1
; CHECK-NEXT: li 3, 4
; CHECK-NEXT: blr
; CHECK-NEXT: .LBB10_2: # %return
@@ -186,11 +186,11 @@ return:
define i32 @all_sign_bits_set_branch(i32 %P, i32 %Q) {
; CHECK-LABEL: all_sign_bits_set_branch:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: and 3, 3, 4
; CHECK-NEXT: cmpwi 0, 3, -1
; CHECK-NEXT: bgt 0, .LBB11_2
-; CHECK-NEXT: # BB#1: # %bb1
+; CHECK-NEXT: # %bb.1: # %bb1
; CHECK-NEXT: li 3, 4
; CHECK-NEXT: blr
; CHECK-NEXT: .LBB11_2: # %return
@@ -212,10 +212,10 @@ return:
; PR3351 - (P != 0) | (Q != 0) -> (P|Q) != 0
define i32 @any_bits_set_branch(i32* %P, i32* %Q) {
; CHECK-LABEL: any_bits_set_branch:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: or. 3, 3, 4
; CHECK-NEXT: beq 0, .LBB12_2
-; CHECK-NEXT: # BB#1: # %bb1
+; CHECK-NEXT: # %bb.1: # %bb1
; CHECK-NEXT: li 3, 4
; CHECK-NEXT: blr
; CHECK-NEXT: .LBB12_2: # %return
@@ -236,11 +236,11 @@ return:
define i32 @any_sign_bits_set_branch(i32 %P, i32 %Q) {
; CHECK-LABEL: any_sign_bits_set_branch:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: or 3, 3, 4
; CHECK-NEXT: cmpwi 0, 3, -1
; CHECK-NEXT: bgt 0, .LBB13_2
-; CHECK-NEXT: # BB#1: # %bb1
+; CHECK-NEXT: # %bb.1: # %bb1
; CHECK-NEXT: li 3, 4
; CHECK-NEXT: blr
; CHECK-NEXT: .LBB13_2: # %return
@@ -261,11 +261,11 @@ return:
define i32 @any_bits_clear_branch(i32 %P, i32 %Q) {
; CHECK-LABEL: any_bits_clear_branch:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: and 3, 3, 4
; CHECK-NEXT: cmpwi 0, 3, -1
; CHECK-NEXT: beq 0, .LBB14_2
-; CHECK-NEXT: # BB#1: # %bb1
+; CHECK-NEXT: # %bb.1: # %bb1
; CHECK-NEXT: li 3, 4
; CHECK-NEXT: blr
; CHECK-NEXT: .LBB14_2: # %return
@@ -286,11 +286,11 @@ return:
define i32 @any_sign_bits_clear_branch(i32 %P, i32 %Q) {
; CHECK-LABEL: any_sign_bits_clear_branch:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: and 3, 3, 4
; CHECK-NEXT: cmpwi 0, 3, 0
; CHECK-NEXT: blt 0, .LBB15_2
-; CHECK-NEXT: # BB#1: # %bb1
+; CHECK-NEXT: # %bb.1: # %bb1
; CHECK-NEXT: li 3, 4
; CHECK-NEXT: blr
; CHECK-NEXT: .LBB15_2: # %return
@@ -311,7 +311,7 @@ return:
define <4 x i1> @all_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) {
; CHECK-LABEL: all_bits_clear_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xxlxor 36, 36, 36
; CHECK-NEXT: xxlor 34, 34, 35
; CHECK-NEXT: vcmpequw 2, 2, 4
@@ -324,7 +324,7 @@ define <4 x i1> @all_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) {
define <4 x i1> @all_sign_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) {
; CHECK-LABEL: all_sign_bits_clear_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vspltisb 4, -1
; CHECK-NEXT: xxlor 34, 34, 35
; CHECK-NEXT: vcmpgtsw 2, 2, 4
@@ -337,7 +337,7 @@ define <4 x i1> @all_sign_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) {
define <4 x i1> @all_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) {
; CHECK-LABEL: all_bits_set_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vspltisb 4, -1
; CHECK-NEXT: xxland 34, 34, 35
; CHECK-NEXT: vcmpequw 2, 2, 4
@@ -350,7 +350,7 @@ define <4 x i1> @all_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) {
define <4 x i1> @all_sign_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) {
; CHECK-LABEL: all_sign_bits_set_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xxlxor 36, 36, 36
; CHECK-NEXT: xxland 34, 34, 35
; CHECK-NEXT: vcmpgtsw 2, 4, 2
@@ -363,7 +363,7 @@ define <4 x i1> @all_sign_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) {
define <4 x i1> @any_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) {
; CHECK-LABEL: any_bits_set_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xxlxor 36, 36, 36
; CHECK-NEXT: xxlor 34, 34, 35
; CHECK-NEXT: vcmpequw 2, 2, 4
@@ -377,7 +377,7 @@ define <4 x i1> @any_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) {
define <4 x i1> @any_sign_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) {
; CHECK-LABEL: any_sign_bits_set_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xxlxor 36, 36, 36
; CHECK-NEXT: xxlor 34, 34, 35
; CHECK-NEXT: vcmpgtsw 2, 4, 2
@@ -390,7 +390,7 @@ define <4 x i1> @any_sign_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) {
define <4 x i1> @any_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) {
; CHECK-LABEL: any_bits_clear_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vspltisb 4, -1
; CHECK-NEXT: xxland 34, 34, 35
; CHECK-NEXT: vcmpequw 2, 2, 4
@@ -404,7 +404,7 @@ define <4 x i1> @any_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) {
define <4 x i1> @any_sign_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) {
; CHECK-LABEL: any_sign_bits_clear_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vspltisb 4, -1
; CHECK-NEXT: xxland 34, 34, 35
; CHECK-NEXT: vcmpgtsw 2, 2, 4
@@ -417,7 +417,7 @@ define <4 x i1> @any_sign_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) {
define zeroext i1 @ne_neg1_and_ne_zero(i64 %x) {
; CHECK-LABEL: ne_neg1_and_ne_zero:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: li 4, 1
; CHECK-NEXT: addi 3, 3, 1
; CHECK-NEXT: subfc 3, 3, 4
@@ -434,7 +434,7 @@ define zeroext i1 @ne_neg1_and_ne_zero(i64 %x) {
define zeroext i1 @and_eq(i16 zeroext %a, i16 zeroext %b, i16 zeroext %c, i16 zeroext %d) {
; CHECK-LABEL: and_eq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xor 5, 5, 6
; CHECK-NEXT: xor 3, 3, 4
; CHECK-NEXT: or 3, 3, 5
@@ -449,7 +449,7 @@ define zeroext i1 @and_eq(i16 zeroext %a, i16 zeroext %b, i16 zeroext %c, i16 z
define zeroext i1 @or_ne(i32 %a, i32 %b, i32 %c, i32 %d) {
; CHECK-LABEL: or_ne:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xor 5, 5, 6
; CHECK-NEXT: xor 3, 3, 4
; CHECK-NEXT: or 3, 3, 5
@@ -467,7 +467,7 @@ define zeroext i1 @or_ne(i32 %a, i32 %b, i32 %c, i32 %d) {
define <4 x i1> @and_eq_vec(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) {
; CHECK-LABEL: and_eq_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcmpequw 2, 2, 3
; CHECK-NEXT: vcmpequw 19, 4, 5
; CHECK-NEXT: xxland 34, 34, 51
diff --git a/test/CodeGen/PowerPC/setcc-to-sub.ll b/test/CodeGen/PowerPC/setcc-to-sub.ll
index 752ebe0c9d8..a143d73c7c0 100644
--- a/test/CodeGen/PowerPC/setcc-to-sub.ll
+++ b/test/CodeGen/PowerPC/setcc-to-sub.ll
@@ -8,7 +8,7 @@
; Function Attrs: norecurse nounwind readonly
define zeroext i1 @test1(%class.PB2* %s_a, %class.PB2* %s_b) local_unnamed_addr #0 {
; CHECK-LABEL: test1:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwz 3, 0(3)
; CHECK-NEXT: lwz 4, 0(4)
; CHECK-NEXT: rlwinm 3, 3, 0, 28, 28
@@ -30,7 +30,7 @@ entry:
; Function Attrs: norecurse nounwind readonly
define zeroext i1 @test2(%class.PB2* %s_a, %class.PB2* %s_b) local_unnamed_addr #0 {
; CHECK-LABEL: test2:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwz 3, 0(3)
; CHECK-NEXT: lwz 4, 0(4)
; CHECK-NEXT: rlwinm 3, 3, 0, 28, 28
@@ -53,7 +53,7 @@ entry:
; Function Attrs: norecurse nounwind readonly
define zeroext i1 @test3(%class.PB2* %s_a, %class.PB2* %s_b) local_unnamed_addr #0 {
; CHECK-LABEL: test3:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwz 3, 0(3)
; CHECK-NEXT: lwz 4, 0(4)
; CHECK-NEXT: rlwinm 3, 3, 0, 28, 28
@@ -75,7 +75,7 @@ entry:
; Function Attrs: norecurse nounwind readonly
define zeroext i1 @test4(%class.PB2* %s_a, %class.PB2* %s_b) local_unnamed_addr #0 {
; CHECK-LABEL: test4:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwz 3, 0(3)
; CHECK-NEXT: lwz 4, 0(4)
; CHECK-NEXT: rlwinm 3, 3, 0, 28, 28
diff --git a/test/CodeGen/PowerPC/shift_mask.ll b/test/CodeGen/PowerPC/shift_mask.ll
index e9ca9b0bdf0..59382c61531 100644
--- a/test/CodeGen/PowerPC/shift_mask.ll
+++ b/test/CodeGen/PowerPC/shift_mask.ll
@@ -4,7 +4,7 @@ target triple = "powerpc64le-linux-gnu"
define i8 @test000(i8 %a, i8 %b) {
; CHECK-LABEL: test000:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: rlwinm 4, 4, 0, 29, 31
; CHECK-NEXT: slw 3, 3, 4
; CHECK-NEXT: blr
@@ -15,7 +15,7 @@ define i8 @test000(i8 %a, i8 %b) {
define i16 @test001(i16 %a, i16 %b) {
; CHECK-LABEL: test001:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: rlwinm 4, 4, 0, 28, 31
; CHECK-NEXT: slw 3, 3, 4
; CHECK-NEXT: blr
@@ -26,7 +26,7 @@ define i16 @test001(i16 %a, i16 %b) {
define i32 @test002(i32 %a, i32 %b) {
; CHECK-LABEL: test002:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: rlwinm 4, 4, 0, 27, 31
; CHECK-NEXT: slw 3, 3, 4
; CHECK-NEXT: blr
@@ -37,7 +37,7 @@ define i32 @test002(i32 %a, i32 %b) {
define i64 @test003(i64 %a, i64 %b) {
; CHECK-LABEL: test003:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: rlwinm 4, 4, 0, 26, 31
; CHECK-NEXT: sld 3, 3, 4
; CHECK-NEXT: blr
@@ -48,7 +48,7 @@ define i64 @test003(i64 %a, i64 %b) {
define <16 x i8> @test010(<16 x i8> %a, <16 x i8> %b) {
; CHECK-LABEL: test010:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vslb 2, 2, 3
; CHECK-NEXT: blr
%rem = and <16 x i8> %b, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
@@ -58,7 +58,7 @@ define <16 x i8> @test010(<16 x i8> %a, <16 x i8> %b) {
define <8 x i16> @test011(<8 x i16> %a, <8 x i16> %b) {
; CHECK-LABEL: test011:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vslh 2, 2, 3
; CHECK-NEXT: blr
%rem = and <8 x i16> %b, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
@@ -68,7 +68,7 @@ define <8 x i16> @test011(<8 x i16> %a, <8 x i16> %b) {
define <4 x i32> @test012(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test012:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vslw 2, 2, 3
; CHECK-NEXT: blr
%rem = and <4 x i32> %b, <i32 31, i32 31, i32 31, i32 31>
@@ -78,7 +78,7 @@ define <4 x i32> @test012(<4 x i32> %a, <4 x i32> %b) {
define <2 x i64> @test013(<2 x i64> %a, <2 x i64> %b) {
; CHECK-LABEL: test013:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vsld 2, 2, 3
; CHECK-NEXT: blr
%rem = and <2 x i64> %b, <i64 63, i64 63>
@@ -88,7 +88,7 @@ define <2 x i64> @test013(<2 x i64> %a, <2 x i64> %b) {
define i8 @test100(i8 %a, i8 %b) {
; CHECK-LABEL: test100:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: rlwinm 3, 3, 0, 24, 31
; CHECK-NEXT: rlwinm 4, 4, 0, 29, 31
; CHECK-NEXT: srw 3, 3, 4
@@ -100,7 +100,7 @@ define i8 @test100(i8 %a, i8 %b) {
define i16 @test101(i16 %a, i16 %b) {
; CHECK-LABEL: test101:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: rlwinm 3, 3, 0, 16, 31
; CHECK-NEXT: rlwinm 4, 4, 0, 28, 31
; CHECK-NEXT: srw 3, 3, 4
@@ -112,7 +112,7 @@ define i16 @test101(i16 %a, i16 %b) {
define i32 @test102(i32 %a, i32 %b) {
; CHECK-LABEL: test102:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: rlwinm 4, 4, 0, 27, 31
; CHECK-NEXT: srw 3, 3, 4
; CHECK-NEXT: blr
@@ -123,7 +123,7 @@ define i32 @test102(i32 %a, i32 %b) {
define i64 @test103(i64 %a, i64 %b) {
; CHECK-LABEL: test103:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: rlwinm 4, 4, 0, 26, 31
; CHECK-NEXT: srd 3, 3, 4
; CHECK-NEXT: blr
@@ -134,7 +134,7 @@ define i64 @test103(i64 %a, i64 %b) {
define <16 x i8> @test110(<16 x i8> %a, <16 x i8> %b) {
; CHECK-LABEL: test110:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vsrb 2, 2, 3
; CHECK-NEXT: blr
%rem = and <16 x i8> %b, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
@@ -144,7 +144,7 @@ define <16 x i8> @test110(<16 x i8> %a, <16 x i8> %b) {
define <8 x i16> @test111(<8 x i16> %a, <8 x i16> %b) {
; CHECK-LABEL: test111:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vsrh 2, 2, 3
; CHECK-NEXT: blr
%rem = and <8 x i16> %b, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
@@ -154,7 +154,7 @@ define <8 x i16> @test111(<8 x i16> %a, <8 x i16> %b) {
define <4 x i32> @test112(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test112:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vsrw 2, 2, 3
; CHECK-NEXT: blr
%rem = and <4 x i32> %b, <i32 31, i32 31, i32 31, i32 31>
@@ -164,7 +164,7 @@ define <4 x i32> @test112(<4 x i32> %a, <4 x i32> %b) {
define <2 x i64> @test113(<2 x i64> %a, <2 x i64> %b) {
; CHECK-LABEL: test113:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vsrd 2, 2, 3
; CHECK-NEXT: blr
%rem = and <2 x i64> %b, <i64 63, i64 63>
@@ -174,7 +174,7 @@ define <2 x i64> @test113(<2 x i64> %a, <2 x i64> %b) {
define i8 @test200(i8 %a, i8 %b) {
; CHECK-LABEL: test200:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: extsb 3, 3
; CHECK-NEXT: rlwinm 4, 4, 0, 29, 31
; CHECK-NEXT: sraw 3, 3, 4
@@ -186,7 +186,7 @@ define i8 @test200(i8 %a, i8 %b) {
define i16 @test201(i16 %a, i16 %b) {
; CHECK-LABEL: test201:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: extsh 3, 3
; CHECK-NEXT: rlwinm 4, 4, 0, 28, 31
; CHECK-NEXT: sraw 3, 3, 4
@@ -198,7 +198,7 @@ define i16 @test201(i16 %a, i16 %b) {
define i32 @test202(i32 %a, i32 %b) {
; CHECK-LABEL: test202:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: rlwinm 4, 4, 0, 27, 31
; CHECK-NEXT: sraw 3, 3, 4
; CHECK-NEXT: blr
@@ -209,7 +209,7 @@ define i32 @test202(i32 %a, i32 %b) {
define i64 @test203(i64 %a, i64 %b) {
; CHECK-LABEL: test203:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: rlwinm 4, 4, 0, 26, 31
; CHECK-NEXT: srad 3, 3, 4
; CHECK-NEXT: blr
@@ -220,7 +220,7 @@ define i64 @test203(i64 %a, i64 %b) {
define <16 x i8> @test210(<16 x i8> %a, <16 x i8> %b) {
; CHECK-LABEL: test210:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vsrab 2, 2, 3
; CHECK-NEXT: blr
%rem = and <16 x i8> %b, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
@@ -230,7 +230,7 @@ define <16 x i8> @test210(<16 x i8> %a, <16 x i8> %b) {
define <8 x i16> @test211(<8 x i16> %a, <8 x i16> %b) {
; CHECK-LABEL: test211:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vsrah 2, 2, 3
; CHECK-NEXT: blr
%rem = and <8 x i16> %b, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
@@ -240,7 +240,7 @@ define <8 x i16> @test211(<8 x i16> %a, <8 x i16> %b) {
define <4 x i32> @test212(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test212:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vsraw 2, 2, 3
; CHECK-NEXT: blr
%rem = and <4 x i32> %b, <i32 31, i32 31, i32 31, i32 31>
@@ -250,7 +250,7 @@ define <4 x i32> @test212(<4 x i32> %a, <4 x i32> %b) {
define <2 x i64> @test213(<2 x i64> %a, <2 x i64> %b) {
; CHECK-LABEL: test213:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vsrad 2, 2, 3
; CHECK-NEXT: blr
%rem = and <2 x i64> %b, <i64 63, i64 63>
diff --git a/test/CodeGen/PowerPC/sjlj.ll b/test/CodeGen/PowerPC/sjlj.ll
index 14aec583891..68b53417f01 100644
--- a/test/CodeGen/PowerPC/sjlj.ll
+++ b/test/CodeGen/PowerPC/sjlj.ll
@@ -77,7 +77,7 @@ return: ; preds = %if.end, %if.then
; CHECK: bcl 20, 31, .LBB1_3
; CHECK: li 3, 1
; CHECK: #EH_SjLj_Setup .LBB1_3
-; CHECK: # BB#1:
+; CHECK: # %bb.1:
; CHECK: .LBB1_3:
; CHECK: mflr [[REGL:[0-9]+]]
diff --git a/test/CodeGen/PowerPC/tail-dup-branch-to-fallthrough.ll b/test/CodeGen/PowerPC/tail-dup-branch-to-fallthrough.ll
index 0b101457161..3ff4753200e 100644
--- a/test/CodeGen/PowerPC/tail-dup-branch-to-fallthrough.ll
+++ b/test/CodeGen/PowerPC/tail-dup-branch-to-fallthrough.ll
@@ -12,17 +12,17 @@ declare void @f4()
; Function Attrs: nounwind
; CHECK-LABEL: tail_dup_fallthrough_with_branch
-; CHECK: # %entry
+; CHECK: # %bb.{{[0-9]+}}: # %entry
; CHECK-NOT: # %{{[-_a-zA-Z0-9]+}}
-; CHECK: # %entry
+; CHECK: # %bb.{{[0-9]+}}: # %entry
; CHECK-NOT: # %{{[-_a-zA-Z0-9]+}}
-; CHECK: # %sw.0
+; CHECK: # %bb.{{[0-9]+}}: # %sw.0
; CHECK-NOT: # %{{[-_a-zA-Z0-9]+}}
; CHECK: # %sw.1
; CHECK-NOT: # %{{[-_a-zA-Z0-9]+}}
; CHECK: # %sw.default
; CHECK-NOT: # %{{[-_a-zA-Z0-9]+}}
-; CHECK: # %if.then
+; CHECK: # %bb.{{[0-9]+}}: # %if.then
; CHECK-NOT: # %{{[-_a-zA-Z0-9]+}}
; CHECK: # %if.else
; CHECK-NOT: # %{{[-_a-zA-Z0-9]+}}
diff --git a/test/CodeGen/PowerPC/tail-dup-layout.ll b/test/CodeGen/PowerPC/tail-dup-layout.ll
index 9665901e874..badeed5b30a 100644
--- a/test/CodeGen/PowerPC/tail-dup-layout.ll
+++ b/test/CodeGen/PowerPC/tail-dup-layout.ll
@@ -278,7 +278,7 @@ exit:
;CHECK: addi
;CHECK: .[[CHECKLABEL:[._0-9A-Za-z]+]]: # %for.check
;CHECK: lwz [[TAGREG:[0-9]+]], 0([[TAGPTRREG]])
-;CHECK: # %test1
+;CHECK: # %bb.{{[0-9]+}}: # %test1
;CHECK: andi. {{[0-9]+}}, [[TAGREG]], 1
;CHECK-NEXT: bc 12, 1, .[[OPT1LABEL:[._0-9A-Za-z]+]]
;CHECK-NEXT: # %test2
@@ -366,12 +366,12 @@ exit:
; code is independent of the outlining code, which works by choosing the
; "unavoidable" blocks.
; CHECK-LABEL: avoidable_test:
-; CHECK: # %entry
+; CHECK: # %bb.{{[0-9]+}}: # %entry
; CHECK: andi.
-; CHECK: # %test2
+; CHECK: # %bb.{{[0-9]+}}: # %test2
; Make sure then2 falls through from test2
; CHECK-NOT: # %{{[-_a-zA-Z0-9]+}}
-; CHECK: # %then2
+; CHECK: # %bb.{{[0-9]+}}: # %then2
; CHECK: rlwinm. {{[0-9]+}}, {{[0-9]+}}, 0, 29, 29
; CHECK: # %else1
; CHECK: bl a
@@ -420,8 +420,8 @@ end1:
; The f;g->h;i trellis should be resolved as f->i;g->h.
; The h;i->j;ret trellis contains a triangle edge, and should be resolved as
; h->j->ret
-; CHECK: # %entry
-; CHECK: # %c10
+; CHECK: # %bb.{{[0-9]+}}: # %entry
+; CHECK: # %bb.{{[0-9]+}}: # %c10
; CHECK: # %e9
; CHECK: # %g10
; CHECK: # %h10
@@ -504,8 +504,8 @@ ret:
; checking, it's profitable to duplicate G into F. The weights here are not
; really important. They are there to help make the test stable.
; CHECK-LABEL: trellis_then_dup_test
-; CHECK: # %entry
-; CHECK: # %b
+; CHECK: # %bb.{{[0-9]+}}: # %entry
+; CHECK: # %bb.{{[0-9]+}}: # %b
; CHECK: # %d
; CHECK: # %g
; CHECK: # %ret1
@@ -568,8 +568,8 @@ ret:
; Verify that we did not mis-identify triangle trellises if it is not
; really a triangle.
; CHECK-LABEL: trellis_no_triangle
-; CHECK: # %entry
-; CHECK: # %b
+; CHECK: # %bb.{{[0-9]+}}: # %entry
+; CHECK: # %bb.{{[0-9]+}}: # %b
; CHECK: # %d
; CHECK: # %ret
; CHECK: # %c
diff --git a/test/CodeGen/PowerPC/testBitReverse.ll b/test/CodeGen/PowerPC/testBitReverse.ll
index 1508af9e4d0..22fefe45468 100644
--- a/test/CodeGen/PowerPC/testBitReverse.ll
+++ b/test/CodeGen/PowerPC/testBitReverse.ll
@@ -4,7 +4,7 @@
declare i32 @llvm.bitreverse.i32(i32)
define i32 @testBitReverseIntrinsicI32(i32 %arg) {
; CHECK-LABEL: testBitReverseIntrinsicI32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: lis 4, -21846
; CHECK-NEXT: lis 5, 21845
; CHECK-NEXT: slwi 6, 3, 1
@@ -44,7 +44,7 @@ define i32 @testBitReverseIntrinsicI32(i32 %arg) {
declare i64 @llvm.bitreverse.i64(i64)
define i64 @testBitReverseIntrinsicI64(i64 %arg) {
; CHECK-LABEL: testBitReverseIntrinsicI64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: lis 4, -21846
; CHECK-NEXT: lis 5, 21845
; CHECK-NEXT: lis 6, -13108
diff --git a/test/CodeGen/PowerPC/testComparesi32gtu.ll b/test/CodeGen/PowerPC/testComparesi32gtu.ll
index 1d0cee72823..4341b59390e 100644
--- a/test/CodeGen/PowerPC/testComparesi32gtu.ll
+++ b/test/CodeGen/PowerPC/testComparesi32gtu.ll
@@ -11,7 +11,7 @@ declare signext i32 @fn2(...) local_unnamed_addr #1
; Function Attrs: nounwind
define i32 @testCompare1(%struct.tree_common* nocapture readonly %arg1) {
; CHECK-LABEL: testCompare1:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK: lbz r3, 0(r3)
; CHECK-DAG: clrlwi r3, r3, 31
; CHECK-DAG: clrldi r3, r3, 32
@@ -35,7 +35,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define signext i32 @testCompare2(i32 zeroext %a, i32 zeroext %b) {
; CHECK-LABEL: testCompare2:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-DAG: rlwinm r3, r3, 0, 31, 31
; CHECK-DAG: rlwinm r4, r4, 0, 31, 31
; CHECK-DAG: clrldi r3, r3, 32
diff --git a/test/CodeGen/PowerPC/testComparesi32leu.ll b/test/CodeGen/PowerPC/testComparesi32leu.ll
index 65a75dacbeb..3ba967b51da 100644
--- a/test/CodeGen/PowerPC/testComparesi32leu.ll
+++ b/test/CodeGen/PowerPC/testComparesi32leu.ll
@@ -8,7 +8,7 @@
define signext i32 @test(i8 zeroext %a, i8 zeroext %b) {
; CHECK-LABEL: test:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: rlwinm r3, r3, 0, 31, 31
; CHECK-NEXT: rlwinm r4, r4, 0, 31, 31
; CHECK-NEXT: clrldi r3, r3, 32
diff --git a/test/CodeGen/PowerPC/testComparesi32ltu.ll b/test/CodeGen/PowerPC/testComparesi32ltu.ll
index fb6b3f88bb5..9623a63e9bc 100644
--- a/test/CodeGen/PowerPC/testComparesi32ltu.ll
+++ b/test/CodeGen/PowerPC/testComparesi32ltu.ll
@@ -11,7 +11,7 @@ declare signext i32 @fn2(...) local_unnamed_addr #1
; Function Attrs: nounwind
define i32 @testCompare1(%struct.tree_common* nocapture readonly %arg1) {
; CHECK-LABEL: testCompare1:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK: lbz r3, 0(r3)
; CHECK-DAG: clrlwi r3, r3, 31
; CHECK-DAG: clrldi r3, r3, 32
@@ -35,7 +35,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define signext i32 @testCompare2(i32 zeroext %a, i32 zeroext %b) {
; CHECK-LABEL: testCompare2:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-DAG: rlwinm r3, r3, 0, 31, 31
; CHECK-DAG: rlwinm r4, r4, 0, 31, 31
; CHECK-DAG: clrldi r3, r3, 32
diff --git a/test/CodeGen/PowerPC/testComparesieqsc.ll b/test/CodeGen/PowerPC/testComparesieqsc.ll
index e65abd317f4..aa0211ebb65 100644
--- a/test/CodeGen/PowerPC/testComparesieqsc.ll
+++ b/test/CodeGen/PowerPC/testComparesieqsc.ll
@@ -12,7 +12,7 @@
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_ieqsc(i8 signext %a, i8 signext %b) {
; CHECK-LABEL: test_ieqsc:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
@@ -26,7 +26,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_ieqsc_sext(i8 signext %a, i8 signext %b) {
; CHECK-LABEL: test_ieqsc_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
@@ -41,7 +41,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_ieqsc_z(i8 signext %a) {
; CHECK-LABEL: test_ieqsc_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
; CHECK-NEXT: blr
@@ -54,7 +54,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_ieqsc_sext_z(i8 signext %a) {
; CHECK-LABEL: test_ieqsc_sext_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
; CHECK-NEXT: neg r3, r3
@@ -68,7 +68,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_ieqsc_store(i8 signext %a, i8 signext %b) {
; CHECK-LABEL: test_ieqsc_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
@@ -86,7 +86,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_ieqsc_sext_store(i8 signext %a, i8 signext %b) {
; CHECK-LABEL: test_ieqsc_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
@@ -105,7 +105,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_ieqsc_z_store(i8 signext %a) {
; CHECK-LABEL: test_ieqsc_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
@@ -122,7 +122,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_ieqsc_sext_z_store(i8 signext %a) {
; CHECK-LABEL: test_ieqsc_sext_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
diff --git a/test/CodeGen/PowerPC/testComparesieqsi.ll b/test/CodeGen/PowerPC/testComparesieqsi.ll
index 81b28ac6fe5..0a6b7b9ca35 100644
--- a/test/CodeGen/PowerPC/testComparesieqsi.ll
+++ b/test/CodeGen/PowerPC/testComparesieqsi.ll
@@ -12,7 +12,7 @@
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_ieqsi(i32 signext %a, i32 signext %b) {
; CHECK-LABEL: test_ieqsi:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
@@ -26,7 +26,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_ieqsi_sext(i32 signext %a, i32 signext %b) {
; CHECK-LABEL: test_ieqsi_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
@@ -41,7 +41,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_ieqsi_z(i32 signext %a) {
; CHECK-LABEL: test_ieqsi_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
; CHECK-NEXT: blr
@@ -54,7 +54,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_ieqsi_sext_z(i32 signext %a) {
; CHECK-LABEL: test_ieqsi_sext_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
; CHECK-NEXT: neg r3, r3
@@ -68,7 +68,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_ieqsi_store(i32 signext %a, i32 signext %b) {
; CHECK-LABEL: test_ieqsi_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
@@ -86,7 +86,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_ieqsi_sext_store(i32 signext %a, i32 signext %b) {
; CHECK-LABEL: test_ieqsi_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
@@ -105,7 +105,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_ieqsi_z_store(i32 signext %a) {
; CHECK-LABEL: test_ieqsi_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
@@ -122,7 +122,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_ieqsi_sext_z_store(i32 signext %a) {
; CHECK-LABEL: test_ieqsi_sext_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
diff --git a/test/CodeGen/PowerPC/testComparesieqsll.ll b/test/CodeGen/PowerPC/testComparesieqsll.ll
index bedd0ed9c97..1dae985c36c 100644
--- a/test/CodeGen/PowerPC/testComparesieqsll.ll
+++ b/test/CodeGen/PowerPC/testComparesieqsll.ll
@@ -12,7 +12,7 @@
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_ieqsll(i64 %a, i64 %b) {
; CHECK-LABEL: test_ieqsll:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: cntlzd r3, r3
; CHECK-NEXT: rldicl r3, r3, 58, 63
@@ -26,7 +26,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_ieqsll_sext(i64 %a, i64 %b) {
; CHECK-LABEL: test_ieqsll_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: addic r3, r3, -1
; CHECK-NEXT: subfe r3, r3, r3
@@ -40,7 +40,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_ieqsll_z(i64 %a) {
; CHECK-LABEL: test_ieqsll_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cntlzd r3, r3
; CHECK-NEXT: rldicl r3, r3, 58, 63
; CHECK-NEXT: blr
@@ -53,7 +53,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_ieqsll_sext_z(i64 %a) {
; CHECK-LABEL: test_ieqsll_sext_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addic r3, r3, -1
; CHECK-NEXT: subfe r3, r3, r3
; CHECK-NEXT: blr
@@ -66,7 +66,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_ieqsll_store(i64 %a, i64 %b) {
; CHECK-LABEL: test_ieqsll_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
@@ -84,7 +84,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_ieqsll_sext_store(i64 %a, i64 %b) {
; CHECK-LABEL: test_ieqsll_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
@@ -102,7 +102,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_ieqsll_z_store(i64 %a) {
; CHECK-LABEL: test_ieqsll_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzd r3, r3
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
@@ -119,7 +119,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_ieqsll_sext_z_store(i64 %a) {
; CHECK-LABEL: test_ieqsll_sext_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: addic r3, r3, -1
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
diff --git a/test/CodeGen/PowerPC/testComparesieqss.ll b/test/CodeGen/PowerPC/testComparesieqss.ll
index 66a1fa814b9..93a92e17807 100644
--- a/test/CodeGen/PowerPC/testComparesieqss.ll
+++ b/test/CodeGen/PowerPC/testComparesieqss.ll
@@ -12,7 +12,7 @@
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_ieqss(i16 signext %a, i16 signext %b) {
; CHECK-LABEL: test_ieqss:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
@@ -26,7 +26,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_ieqss_sext(i16 signext %a, i16 signext %b) {
; CHECK-LABEL: test_ieqss_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
@@ -41,7 +41,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_ieqss_z(i16 signext %a) {
; CHECK-LABEL: test_ieqss_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
; CHECK-NEXT: blr
@@ -54,7 +54,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_ieqss_sext_z(i16 signext %a) {
; CHECK-LABEL: test_ieqss_sext_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
; CHECK-NEXT: neg r3, r3
@@ -68,7 +68,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_ieqss_store(i16 signext %a, i16 signext %b) {
; CHECK-LABEL: test_ieqss_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
@@ -86,7 +86,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_ieqss_sext_store(i16 signext %a, i16 signext %b) {
; CHECK-LABEL: test_ieqss_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
@@ -105,7 +105,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_ieqss_z_store(i16 signext %a) {
; CHECK-LABEL: test_ieqss_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
@@ -122,7 +122,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_ieqss_sext_z_store(i16 signext %a) {
; CHECK-LABEL: test_ieqss_sext_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
diff --git a/test/CodeGen/PowerPC/testComparesiequc.ll b/test/CodeGen/PowerPC/testComparesiequc.ll
index 2616ab56d8d..592f7bc83bb 100644
--- a/test/CodeGen/PowerPC/testComparesiequc.ll
+++ b/test/CodeGen/PowerPC/testComparesiequc.ll
@@ -12,7 +12,7 @@
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_iequc(i8 zeroext %a, i8 zeroext %b) {
; CHECK-LABEL: test_iequc:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
@@ -26,7 +26,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_iequc_sext(i8 zeroext %a, i8 zeroext %b) {
; CHECK-LABEL: test_iequc_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
@@ -41,7 +41,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_iequc_z(i8 zeroext %a) {
; CHECK-LABEL: test_iequc_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
; CHECK-NEXT: blr
@@ -54,7 +54,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_iequc_sext_z(i8 zeroext %a) {
; CHECK-LABEL: test_iequc_sext_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
; CHECK-NEXT: neg r3, r3
@@ -68,7 +68,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_iequc_store(i8 zeroext %a, i8 zeroext %b) {
; CHECK-LABEL: test_iequc_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
@@ -86,7 +86,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_iequc_sext_store(i8 zeroext %a, i8 zeroext %b) {
; CHECK-LABEL: test_iequc_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
@@ -105,7 +105,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_iequc_z_store(i8 zeroext %a) {
; CHECK-LABEL: test_iequc_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
@@ -122,7 +122,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_iequc_sext_z_store(i8 zeroext %a) {
; CHECK-LABEL: test_iequc_sext_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
diff --git a/test/CodeGen/PowerPC/testComparesiequi.ll b/test/CodeGen/PowerPC/testComparesiequi.ll
index a4a1b7635e8..9a639dc5410 100644
--- a/test/CodeGen/PowerPC/testComparesiequi.ll
+++ b/test/CodeGen/PowerPC/testComparesiequi.ll
@@ -12,7 +12,7 @@
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_iequi(i32 zeroext %a, i32 zeroext %b) {
; CHECK-LABEL: test_iequi:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
@@ -26,7 +26,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_iequi_sext(i32 zeroext %a, i32 zeroext %b) {
; CHECK-LABEL: test_iequi_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
@@ -41,7 +41,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_iequi_z(i32 zeroext %a) {
; CHECK-LABEL: test_iequi_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
; CHECK-NEXT: blr
@@ -54,7 +54,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_iequi_sext_z(i32 zeroext %a) {
; CHECK-LABEL: test_iequi_sext_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
; CHECK-NEXT: neg r3, r3
@@ -68,7 +68,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_iequi_store(i32 zeroext %a, i32 zeroext %b) {
; CHECK-LABEL: test_iequi_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
@@ -86,7 +86,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_iequi_sext_store(i32 zeroext %a, i32 zeroext %b) {
; CHECK-LABEL: test_iequi_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
@@ -105,7 +105,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_iequi_z_store(i32 zeroext %a) {
; CHECK-LABEL: test_iequi_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
@@ -122,7 +122,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_iequi_sext_z_store(i32 zeroext %a) {
; CHECK-LABEL: test_iequi_sext_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
diff --git a/test/CodeGen/PowerPC/testComparesiequll.ll b/test/CodeGen/PowerPC/testComparesiequll.ll
index 4d9035813a8..f147478d5ea 100644
--- a/test/CodeGen/PowerPC/testComparesiequll.ll
+++ b/test/CodeGen/PowerPC/testComparesiequll.ll
@@ -12,7 +12,7 @@
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_iequll(i64 %a, i64 %b) {
; CHECK-LABEL: test_iequll:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: cntlzd r3, r3
; CHECK-NEXT: rldicl r3, r3, 58, 63
@@ -26,7 +26,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_iequll_sext(i64 %a, i64 %b) {
; CHECK-LABEL: test_iequll_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: addic r3, r3, -1
; CHECK-NEXT: subfe r3, r3, r3
@@ -40,7 +40,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_iequll_z(i64 %a) {
; CHECK-LABEL: test_iequll_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cntlzd r3, r3
; CHECK-NEXT: rldicl r3, r3, 58, 63
; CHECK-NEXT: blr
@@ -53,7 +53,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_iequll_sext_z(i64 %a) {
; CHECK-LABEL: test_iequll_sext_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addic r3, r3, -1
; CHECK-NEXT: subfe r3, r3, r3
; CHECK-NEXT: blr
@@ -66,7 +66,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_iequll_store(i64 %a, i64 %b) {
; CHECK-LABEL: test_iequll_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
@@ -84,7 +84,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_iequll_sext_store(i64 %a, i64 %b) {
; CHECK-LABEL: test_iequll_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
@@ -102,7 +102,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_iequll_z_store(i64 %a) {
; CHECK-LABEL: test_iequll_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzd r3, r3
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
@@ -119,7 +119,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_iequll_sext_z_store(i64 %a) {
; CHECK-LABEL: test_iequll_sext_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: addic r3, r3, -1
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
diff --git a/test/CodeGen/PowerPC/testComparesiequs.ll b/test/CodeGen/PowerPC/testComparesiequs.ll
index 5d47c38f739..195339ddb2e 100644
--- a/test/CodeGen/PowerPC/testComparesiequs.ll
+++ b/test/CodeGen/PowerPC/testComparesiequs.ll
@@ -12,7 +12,7 @@
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_iequs(i16 zeroext %a, i16 zeroext %b) {
; CHECK-LABEL: test_iequs:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
@@ -26,7 +26,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_iequs_sext(i16 zeroext %a, i16 zeroext %b) {
; CHECK-LABEL: test_iequs_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
@@ -41,7 +41,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_iequs_z(i16 zeroext %a) {
; CHECK-LABEL: test_iequs_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
; CHECK-NEXT: blr
@@ -54,7 +54,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_iequs_sext_z(i16 zeroext %a) {
; CHECK-LABEL: test_iequs_sext_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
; CHECK-NEXT: neg r3, r3
@@ -68,7 +68,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_iequs_store(i16 zeroext %a, i16 zeroext %b) {
; CHECK-LABEL: test_iequs_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
@@ -86,7 +86,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_iequs_sext_store(i16 zeroext %a, i16 zeroext %b) {
; CHECK-LABEL: test_iequs_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
@@ -105,7 +105,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_iequs_z_store(i16 zeroext %a) {
; CHECK-LABEL: test_iequs_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
@@ -122,7 +122,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_iequs_sext_z_store(i16 zeroext %a) {
; CHECK-LABEL: test_iequs_sext_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
diff --git a/test/CodeGen/PowerPC/testComparesigesc.ll b/test/CodeGen/PowerPC/testComparesigesc.ll
index 130127bf351..69dd97fc9c4 100644
--- a/test/CodeGen/PowerPC/testComparesigesc.ll
+++ b/test/CodeGen/PowerPC/testComparesigesc.ll
@@ -9,7 +9,7 @@
define signext i32 @test_igesc(i8 signext %a, i8 signext %b) {
; CHECK-LABEL: test_igesc:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub r3, r3, r4
; CHECK-NEXT: rldicl r3, r3, 1, 63
; CHECK-NEXT: xori r3, r3, 1
@@ -22,7 +22,7 @@ entry:
define signext i32 @test_igesc_sext(i8 signext %a, i8 signext %b) {
; CHECK-LABEL: test_igesc_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub r3, r3, r4
; CHECK-NEXT: rldicl r3, r3, 1, 63
; CHECK-NEXT: addi r3, r3, -1
@@ -35,7 +35,7 @@ entry:
define void @test_igesc_store(i8 signext %a, i8 signext %b) {
; CHECK-LABEL: test_igesc_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: sub r3, r3, r4
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
@@ -52,7 +52,7 @@ entry:
define void @test_igesc_sext_store(i8 signext %a, i8 signext %b) {
; CHECK-LABEL: test_igesc_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: sub r3, r3, r4
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
diff --git a/test/CodeGen/PowerPC/testComparesigesi.ll b/test/CodeGen/PowerPC/testComparesigesi.ll
index 018fb940a2c..7efc8ae8245 100644
--- a/test/CodeGen/PowerPC/testComparesigesi.ll
+++ b/test/CodeGen/PowerPC/testComparesigesi.ll
@@ -9,7 +9,7 @@
define signext i32 @test_igesi(i32 signext %a, i32 signext %b) {
; CHECK-LABEL: test_igesi:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub r3, r3, r4
; CHECK-NEXT: rldicl r3, r3, 1, 63
; CHECK-NEXT: xori r3, r3, 1
@@ -22,7 +22,7 @@ entry:
define signext i32 @test_igesi_sext(i32 signext %a, i32 signext %b) {
; CHECK-LABEL: test_igesi_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub r3, r3, r4
; CHECK-NEXT: rldicl r3, r3, 1, 63
; CHECK-NEXT: addi r3, r3, -1
@@ -35,7 +35,7 @@ entry:
define void @test_igesi_store(i32 signext %a, i32 signext %b) {
; CHECK-LABEL: test_igesi_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: sub r3, r3, r4
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
@@ -52,7 +52,7 @@ entry:
define void @test_igesi_sext_store(i32 signext %a, i32 signext %b) {
; CHECK-LABEL: test_igesi_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: sub r3, r3, r4
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
diff --git a/test/CodeGen/PowerPC/testComparesigesll.ll b/test/CodeGen/PowerPC/testComparesigesll.ll
index 8ce71c0fd9c..30efe3da3e9 100644
--- a/test/CodeGen/PowerPC/testComparesigesll.ll
+++ b/test/CodeGen/PowerPC/testComparesigesll.ll
@@ -9,7 +9,7 @@
define signext i32 @test_igesll(i64 %a, i64 %b) {
; CHECK-LABEL: test_igesll:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sradi r5, r3, 63
; CHECK-NEXT: rldicl r6, r4, 1, 63
; CHECK-NEXT: subfc r3, r4, r3
@@ -23,7 +23,7 @@ entry:
define signext i32 @test_igesll_sext(i64 %a, i64 %b) {
; CHECK-LABEL: test_igesll_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sradi r5, r3, 63
; CHECK-NEXT: rldicl r6, r4, 1, 63
; CHECK-NEXT: subfc r3, r4, r3
@@ -38,7 +38,7 @@ entry:
define signext i32 @test_igesll_z(i64 %a) {
; CHECK-LABEL: test_igesll_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: rldicl r3, r3, 1, 63
; CHECK-NEXT: xori r3, r3, 1
; CHECK-NEXT: blr
@@ -50,7 +50,7 @@ entry:
define signext i32 @test_igesll_sext_z(i64 %a) {
; CHECK-LABEL: test_igesll_sext_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sradi r3, r3, 63
; CHECK-NEXT: not r3, r3
; CHECK-NEXT: blr
@@ -62,7 +62,7 @@ entry:
define void @test_igesll_store(i64 %a, i64 %b) {
; CHECK-LABEL: test_igesll_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK: sradi r6, r3, 63
; CHECK: subfc r3, r4, r3
; CHECK: rldicl r3, r4, 1, 63
@@ -78,7 +78,7 @@ entry:
define void @test_igesll_sext_store(i64 %a, i64 %b) {
; CHECK-LABEL: test_igesll_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sradi r6, r3, 63
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: subfc r3, r4, r3
@@ -97,7 +97,7 @@ entry:
define void @test_igesll_z_store(i64 %a) {
; CHECK-LABEL: test_igesll_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: rldicl r3, r3, 1, 63
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
@@ -113,7 +113,7 @@ entry:
define void @test_igesll_sext_z_store(i64 %a) {
; CHECK-LABEL: test_igesll_sext_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: sradi r3, r3, 63
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
diff --git a/test/CodeGen/PowerPC/testComparesigess.ll b/test/CodeGen/PowerPC/testComparesigess.ll
index 8773d423a58..231a26c916d 100644
--- a/test/CodeGen/PowerPC/testComparesigess.ll
+++ b/test/CodeGen/PowerPC/testComparesigess.ll
@@ -9,7 +9,7 @@
define signext i32 @test_igess(i16 signext %a, i16 signext %b) {
; CHECK-LABEL: test_igess:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub r3, r3, r4
; CHECK-NEXT: rldicl r3, r3, 1, 63
; CHECK-NEXT: xori r3, r3, 1
@@ -22,7 +22,7 @@ entry:
define signext i32 @test_igess_sext(i16 signext %a, i16 signext %b) {
; CHECK-LABEL: test_igess_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub r3, r3, r4
; CHECK-NEXT: rldicl r3, r3, 1, 63
; CHECK-NEXT: addi r3, r3, -1
@@ -35,7 +35,7 @@ entry:
define void @test_igess_store(i16 signext %a, i16 signext %b) {
; CHECK-LABEL: test_igess_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: sub r3, r3, r4
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
@@ -52,7 +52,7 @@ entry:
define void @test_igess_sext_store(i16 signext %a, i16 signext %b) {
; CHECK-LABEL: test_igess_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: sub r3, r3, r4
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
diff --git a/test/CodeGen/PowerPC/testComparesigtsc.ll b/test/CodeGen/PowerPC/testComparesigtsc.ll
index 9af61599398..8009043c45d 100644
--- a/test/CodeGen/PowerPC/testComparesigtsc.ll
+++ b/test/CodeGen/PowerPC/testComparesigtsc.ll
@@ -10,7 +10,7 @@
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_igtsc(i8 signext %a, i8 signext %b) {
; CHECK-LABEL: test_igtsc:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub [[REG:r[0-9]+]], r4, r3
; CHECK-NEXT: rldicl r3, [[REG]], 1, 63
; CHECK-NEXT: blr
@@ -23,7 +23,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_igtsc_sext(i8 signext %a, i8 signext %b) {
; CHECK-LABEL: test_igtsc_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub [[REG:r[0-9]+]], r4, r3
; CHECK-NEXT: sradi r3, [[REG]], 63
; CHECK-NEXT: blr
@@ -37,7 +37,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_igtsc_z(i8 signext %a) {
; CHECK-LABEL: test_igtsc_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: neg r3, r3
; CHECK-NEXT: rldicl r3, r3, 1, 63
; CHECK-NEXT: blr
@@ -62,7 +62,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_igtsc_store(i8 signext %a, i8 signext %b) {
; CHECK-LABEL: test_igtsc_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK: sub [[REG:r[0-9]+]], r4, r3
; CHECK: rldicl {{r[0-9]+}}, [[REG]], 1, 63
entry:
@@ -75,7 +75,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_igtsc_sext_store(i8 signext %a, i8 signext %b) {
; CHECK-LABEL: test_igtsc_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK: sub [[REG:r[0-9]+]], r4, r3
; CHECK: sradi {{r[0-9]+}}, [[REG]], 63
entry:
@@ -89,7 +89,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_igtsc_z_store(i8 signext %a) {
; CHECK-LABEL: test_igtsc_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: neg r3, r3
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
diff --git a/test/CodeGen/PowerPC/testComparesigtsi.ll b/test/CodeGen/PowerPC/testComparesigtsi.ll
index f7a32c9c49b..77dfc3583f1 100644
--- a/test/CodeGen/PowerPC/testComparesigtsi.ll
+++ b/test/CodeGen/PowerPC/testComparesigtsi.ll
@@ -10,7 +10,7 @@
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_igtsi(i32 signext %a, i32 signext %b) {
; CHECK-LABEL: test_igtsi:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub [[REG:r[0-9]+]], r4, r3
; CHECK-NEXT: rldicl r3, [[REG]], 1, 63
; CHECK-NEXT: blr
@@ -23,7 +23,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_igtsi_sext(i32 signext %a, i32 signext %b) {
; CHECK-LABEL: test_igtsi_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub [[REG:r[0-9]+]], r4, r3
; CHECK-NEXT: sradi r3, [[REG]], 63
; CHECK-NEXT: blr
@@ -37,7 +37,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_igtsi_z(i32 signext %a) {
; CHECK-LABEL: test_igtsi_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: neg r3, r3
; CHECK-NEXT: rldicl r3, r3, 1, 63
; CHECK-NEXT: blr
@@ -62,7 +62,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_igtsi_store(i32 signext %a, i32 signext %b) {
; CHECK-LABEL: test_igtsi_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK: sub [[REG:r[0-9]+]], r4, r3
; CHECK: rldicl {{r[0-9]+}}, [[REG]], 1, 63
entry:
@@ -75,7 +75,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_igtsi_sext_store(i32 signext %a, i32 signext %b) {
; CHECK-LABEL: test_igtsi_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK: sub [[REG:r[0-9]+]], r4, r3
; CHECK: sradi {{r[0-9]+}}, [[REG]], 63
entry:
@@ -89,7 +89,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_igtsi_z_store(i32 signext %a) {
; CHECK-LABEL: test_igtsi_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: neg r3, r3
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
diff --git a/test/CodeGen/PowerPC/testComparesigtsll.ll b/test/CodeGen/PowerPC/testComparesigtsll.ll
index bd681f9e168..75314d708f5 100644
--- a/test/CodeGen/PowerPC/testComparesigtsll.ll
+++ b/test/CodeGen/PowerPC/testComparesigtsll.ll
@@ -10,7 +10,7 @@
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_igtsll(i64 %a, i64 %b) {
; CHECK-LABEL: test_igtsll:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sradi [[REG1:r[0-9]+]], r4, 63
; CHECK-NEXT: rldicl [[REG2:r[0-9]+]], r3, 1, 63
; CHECK-NEXT: subfc [[REG3:r[0-9]+]], r3, r4
@@ -26,7 +26,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_igtsll_sext(i64 %a, i64 %b) {
; CHECK-LABEL: test_igtsll_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sradi [[REG1:r[0-9]+]], r4, 63
; CHECK-NEXT: rldicl [[REG2:r[0-9]+]], r3, 1, 63
; CHECK-NEXT: subfc [[REG3:r[0-9]+]], r3, r4
@@ -44,7 +44,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_igtsll_z(i64 %a) {
; CHECK-LABEL: test_igtsll_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi r4, r3, -1
; CHECK-NEXT: nor r3, r4, r3
; CHECK-NEXT: rldicl r3, r3, 1, 63
@@ -70,7 +70,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_igtsll_store(i64 %a, i64 %b) {
; CHECK-LABEL: test_igtsll_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK: sradi [[REG1:r[0-9]+]], r4, 63
; CHECK: rldicl [[REG2:r[0-9]+]], r3, 1, 63
; CHECK-DIAG: subfc [[REG3:r[0-9]+]], r3, r4
@@ -87,7 +87,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_igtsll_sext_store(i64 %a, i64 %b) {
; CHECK-LABEL: test_igtsll_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK: sradi [[REG1:r[0-9]+]], r4, 63
; CHECK: rldicl [[REG2:r[0-9]+]], r3, 1, 63
; CHECK-DIAG: subfc [[REG3:r[0-9]+]], r3, r4
@@ -105,7 +105,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_igtsll_z_store(i64 %a) {
; CHECK-LABEL: test_igtsll_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: addi r5, r3, -1
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
diff --git a/test/CodeGen/PowerPC/testComparesigtss.ll b/test/CodeGen/PowerPC/testComparesigtss.ll
index 65ea0b58e78..23ddbe30f7e 100644
--- a/test/CodeGen/PowerPC/testComparesigtss.ll
+++ b/test/CodeGen/PowerPC/testComparesigtss.ll
@@ -10,7 +10,7 @@
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_igtss(i16 signext %a, i16 signext %b) {
; CHECK-LABEL: test_igtss:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub [[REG1:r[0-9]+]], r4, r3
; CHECK-NEXT: rldicl r3, [[REG1]], 1, 63
; CHECK-NEXT: blr
@@ -23,7 +23,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_igtss_sext(i16 signext %a, i16 signext %b) {
; CHECK-LABEL: test_igtss_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub [[REG:r[0-9]+]], r4, r3
; CHECK-NEXT: sradi r3, [[REG]], 63
; CHECK-NEXT: blr
@@ -37,7 +37,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_igtss_z(i16 signext %a) {
; CHECK-LABEL: test_igtss_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: neg r3, r3
; CHECK-NEXT: rldicl r3, r3, 1, 63
; CHECK-NEXT: blr
@@ -50,7 +50,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_igtss_sext_z(i16 signext %a) {
; CHECK-LABEL: test_igtss_sext_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK: neg [[REG2:r[0-9]+]], r3
; CHECK-NEXT: sradi r3, [[REG2]], 63
; CHECK-NEXT: blr
@@ -63,7 +63,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_igtss_store(i16 signext %a, i16 signext %b) {
; CHECK-LABEL: test_igtss_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK: sub [[REG1:r[0-9]+]], r4, r3
; CHECK: rldicl {{r[0-9]+}}, [[REG1]], 1, 63
entry:
@@ -76,7 +76,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_igtss_sext_store(i16 signext %a, i16 signext %b) {
; CHECK-LABEL: test_igtss_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK: sub [[REG:r[0-9]+]], r4, r3
; CHECK: sradi {{r[0-9]+}}, [[REG]], 63
entry:
@@ -90,7 +90,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_igtss_z_store(i16 signext %a) {
; CHECK-LABEL: test_igtss_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: neg r3, r3
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
diff --git a/test/CodeGen/PowerPC/testComparesilesc.ll b/test/CodeGen/PowerPC/testComparesilesc.ll
index b932867ef31..422dc3adc5d 100644
--- a/test/CodeGen/PowerPC/testComparesilesc.ll
+++ b/test/CodeGen/PowerPC/testComparesilesc.ll
@@ -9,7 +9,7 @@
define signext i32 @test_ilesc(i8 signext %a, i8 signext %b) {
; CHECK-LABEL: test_ilesc:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub r3, r4, r3
; CHECK-NEXT: rldicl r3, r3, 1, 63
; CHECK-NEXT: xori r3, r3, 1
@@ -22,7 +22,7 @@ entry:
define signext i32 @test_ilesc_sext(i8 signext %a, i8 signext %b) {
; CHECK-LABEL: test_ilesc_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub r3, r4, r3
; CHECK-NEXT: rldicl r3, r3, 1, 63
; CHECK-NEXT: addi r3, r3, -1
@@ -35,7 +35,7 @@ entry:
define void @test_ilesc_store(i8 signext %a, i8 signext %b) {
; CHECK-LABEL: test_ilesc_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: sub r3, r4, r3
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
@@ -52,7 +52,7 @@ entry:
define void @test_ilesc_sext_store(i8 signext %a, i8 signext %b) {
; CHECK-LABEL: test_ilesc_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: sub r3, r4, r3
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
diff --git a/test/CodeGen/PowerPC/testComparesilesi.ll b/test/CodeGen/PowerPC/testComparesilesi.ll
index 250cbc704c1..72439bd9aa3 100644
--- a/test/CodeGen/PowerPC/testComparesilesi.ll
+++ b/test/CodeGen/PowerPC/testComparesilesi.ll
@@ -9,7 +9,7 @@
define signext i32 @test_ilesi(i32 signext %a, i32 signext %b) {
; CHECK-LABEL: test_ilesi:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub r3, r4, r3
; CHECK-NEXT: rldicl r3, r3, 1, 63
; CHECK-NEXT: xori r3, r3, 1
@@ -22,7 +22,7 @@ entry:
define signext i32 @test_ilesi_sext(i32 signext %a, i32 signext %b) {
; CHECK-LABEL: test_ilesi_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub r3, r4, r3
; CHECK-NEXT: rldicl r3, r3, 1, 63
; CHECK-NEXT: addi r3, r3, -1
@@ -35,7 +35,7 @@ entry:
define void @test_ilesi_store(i32 signext %a, i32 signext %b) {
; CHECK-LABEL: test_ilesi_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: sub r3, r4, r3
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
@@ -52,7 +52,7 @@ entry:
define void @test_ilesi_sext_store(i32 signext %a, i32 signext %b) {
; CHECK-LABEL: test_ilesi_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: sub r3, r4, r3
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
diff --git a/test/CodeGen/PowerPC/testComparesilesll.ll b/test/CodeGen/PowerPC/testComparesilesll.ll
index 8c23dcd95cd..21b67664c30 100644
--- a/test/CodeGen/PowerPC/testComparesilesll.ll
+++ b/test/CodeGen/PowerPC/testComparesilesll.ll
@@ -9,7 +9,7 @@
define signext i32 @test_ilesll(i64 %a, i64 %b) {
; CHECK-LABEL: test_ilesll:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sradi r5, r4, 63
; CHECK-NEXT: rldicl r6, r3, 1, 63
; CHECK-NEXT: subfc r12, r3, r4
@@ -23,7 +23,7 @@ entry:
define signext i32 @test_ilesll_sext(i64 %a, i64 %b) {
; CHECK-LABEL: test_ilesll_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sradi r5, r4, 63
; CHECK-NEXT: rldicl r6, r3, 1, 63
; CHECK-NEXT: subfc r12, r3, r4
@@ -38,7 +38,7 @@ entry:
define signext i32 @test_ilesll_z(i64 %a) {
; CHECK-LABEL: test_ilesll_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi r4, r3, -1
; CHECK-NEXT: or r3, r4, r3
; CHECK-NEXT: rldicl r3, r3, 1, 63
@@ -51,7 +51,7 @@ entry:
define signext i32 @test_ilesll_sext_z(i64 %a) {
; CHECK-LABEL: test_ilesll_sext_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi r4, r3, -1
; CHECK-NEXT: or r3, r4, r3
; CHECK-NEXT: sradi r3, r3, 63
@@ -64,7 +64,7 @@ entry:
define void @test_ilesll_store(i64 %a, i64 %b) {
; CHECK-LABEL: test_ilesll_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK: sradi r6, r4, 63
; CHECK: subfc r4, r3, r4
; CHECK: rldicl r3, r3, 1, 63
@@ -80,7 +80,7 @@ entry:
define void @test_ilesll_sext_store(i64 %a, i64 %b) {
; CHECK-LABEL: test_ilesll_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK: sradi r6, r4, 63
; CHECK-DAG: rldicl r3, r3, 1, 63
; CHECK-DAG: subfc r4, r3, r4
@@ -97,7 +97,7 @@ entry:
define void @test_ilesll_z_store(i64 %a) {
; CHECK-LABEL: test_ilesll_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: addi r5, r3, -1
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
@@ -114,7 +114,7 @@ entry:
define void @test_ilesll_sext_z_store(i64 %a) {
; CHECK-LABEL: test_ilesll_sext_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: addi r5, r3, -1
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
diff --git a/test/CodeGen/PowerPC/testComparesiless.ll b/test/CodeGen/PowerPC/testComparesiless.ll
index 5e4a455990d..c85ff6078e7 100644
--- a/test/CodeGen/PowerPC/testComparesiless.ll
+++ b/test/CodeGen/PowerPC/testComparesiless.ll
@@ -9,7 +9,7 @@
define signext i32 @test_iless(i16 signext %a, i16 signext %b) {
; CHECK-LABEL: test_iless:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub r3, r4, r3
; CHECK-NEXT: rldicl r3, r3, 1, 63
; CHECK-NEXT: xori r3, r3, 1
@@ -22,7 +22,7 @@ entry:
define signext i32 @test_iless_sext(i16 signext %a, i16 signext %b) {
; CHECK-LABEL: test_iless_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub r3, r4, r3
; CHECK-NEXT: rldicl r3, r3, 1, 63
; CHECK-NEXT: addi r3, r3, -1
@@ -35,7 +35,7 @@ entry:
define void @test_iless_store(i16 signext %a, i16 signext %b) {
; CHECK-LABEL: test_iless_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: sub r3, r4, r3
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
@@ -52,7 +52,7 @@ entry:
define void @test_iless_sext_store(i16 signext %a, i16 signext %b) {
; CHECK-LABEL: test_iless_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: sub r3, r4, r3
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
diff --git a/test/CodeGen/PowerPC/testComparesiltsc.ll b/test/CodeGen/PowerPC/testComparesiltsc.ll
index d4f267cc12a..08a023302bd 100644
--- a/test/CodeGen/PowerPC/testComparesiltsc.ll
+++ b/test/CodeGen/PowerPC/testComparesiltsc.ll
@@ -11,7 +11,7 @@
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_iltsc(i8 signext %a, i8 signext %b) {
; CHECK-LABEL: test_iltsc:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub [[REG:r[0-9]+]], r3, r4
; CHECK-NEXT: rldicl r3, [[REG]], 1, 63
; CHECK-NEXT: blr
@@ -24,7 +24,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_iltsc_sext(i8 signext %a, i8 signext %b) {
; CHECK-LABEL: test_iltsc_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub [[REG:r[0-9]+]], r3, r4
; CHECK-NEXT: sradi r3, [[REG]], 63
; CHECK-NEXT: blr
@@ -48,7 +48,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_iltsc_store(i8 signext %a, i8 signext %b) {
; CHECK-LABEL: test_iltsc_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK: sub [[REG:r[0-9]+]], r3, r4
; CHECK: rldicl {{r[0-9]+}}, [[REG]], 1, 63
entry:
@@ -61,7 +61,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_iltsc_sext_store(i8 signext %a, i8 signext %b) {
; CHECK-LABEL: test_iltsc_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK: sub [[REG:r[0-9]+]], r3, r4
; CHECK: sradi {{r[0-9]+}}, [[REG]], 63
entry:
diff --git a/test/CodeGen/PowerPC/testComparesiltsi.ll b/test/CodeGen/PowerPC/testComparesiltsi.ll
index 191afd20eaa..39f37387f53 100644
--- a/test/CodeGen/PowerPC/testComparesiltsi.ll
+++ b/test/CodeGen/PowerPC/testComparesiltsi.ll
@@ -11,7 +11,7 @@
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_iltsi(i32 signext %a, i32 signext %b) {
; CHECK-LABEL: test_iltsi:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub [[REG:r[0-9]+]], r3, r4
; CHECK-NEXT: rldicl r3, [[REG]], 1, 63
; CHECK-NEXT: blr
@@ -24,7 +24,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_iltsi_sext(i32 signext %a, i32 signext %b) {
; CHECK-LABEL: test_iltsi_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub [[REG:r[0-9]+]], r3, r4
; CHECK-NEXT: sradi r3, [[REG]], 63
; CHECK-NEXT: blr
@@ -37,7 +37,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_iltsi_sext_z(i32 signext %a) {
; CHECK-LABEL: test_iltsi_sext_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: srawi r3, r3, 31
; CHECK-NEXT: blr
entry:
@@ -49,7 +49,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_iltsi_store(i32 signext %a, i32 signext %b) {
; CHECK-LABEL: test_iltsi_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK: sub [[REG:r[0-9]+]], r3, r4
; CHECK: rldicl {{r[0-9]+}}, [[REG]], 1, 63
entry:
@@ -62,7 +62,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_iltsi_sext_store(i32 signext %a, i32 signext %b) {
; CHECK-LABEL: test_iltsi_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK: sub [[REG:r[0-9]+]], r3, r4
; CHECK: sradi {{r[0-9]+}}, [[REG]], 63
entry:
diff --git a/test/CodeGen/PowerPC/testComparesiltsll.ll b/test/CodeGen/PowerPC/testComparesiltsll.ll
index a0452954917..4152b8556df 100644
--- a/test/CodeGen/PowerPC/testComparesiltsll.ll
+++ b/test/CodeGen/PowerPC/testComparesiltsll.ll
@@ -11,7 +11,7 @@
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_iltsll(i64 %a, i64 %b) {
; CHECK-LABEL: test_iltsll:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sradi [[REG1:r[0-9]+]], r3, 63
; CHECK-NEXT: rldicl [[REG2:r[0-9]+]], r4, 1, 63
; CHECK-NEXT: subfc [[REG3:r[0-9]+]], r4, r3
@@ -27,7 +27,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_iltsll_sext(i64 %a, i64 %b) {
; CHECK-LABEL: test_iltsll_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sradi [[REG1:r[0-9]+]], r3, 63
; CHECK-NEXT: rldicl [[REG2:r[0-9]+]], r4, 1, 63
; CHECK-NEXT: subfc [[REG3:r[0-9]+]], r4, r3
@@ -44,7 +44,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_iltsll_sext_z(i64 %a) {
; CHECK-LABEL: test_iltsll_sext_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sradi r3, r3, 63
; CHECK-NEXT: blr
entry:
@@ -56,7 +56,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_iltsll_store(i64 %a, i64 %b) {
; CHECK-LABEL: test_iltsll_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK: sradi [[REG1:r[0-9]+]], r3, 63
; CHECK: rldicl [[REG2:r[0-9]+]], r4, 1, 63
; CHECK-DIAG: subfc [[REG3:r[0-9]+]], r4, r3
@@ -73,7 +73,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_iltsll_sext_store(i64 %a, i64 %b) {
; CHECK-LABEL: test_iltsll_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK: sradi [[REG1:r[0-9]+]], r3, 63
; CHECK: rldicl [[REG2:r[0-9]+]], r4, 1, 63
; CHECK-DIAG: subfc [[REG3:r[0-9]+]], r4, r3
diff --git a/test/CodeGen/PowerPC/testComparesiltss.ll b/test/CodeGen/PowerPC/testComparesiltss.ll
index 4d66fad13ad..db5a60dfb66 100644
--- a/test/CodeGen/PowerPC/testComparesiltss.ll
+++ b/test/CodeGen/PowerPC/testComparesiltss.ll
@@ -11,7 +11,7 @@
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_iltss(i16 signext %a, i16 signext %b) {
; CHECK-LABEL: test_iltss:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub [[REG:r[0-9]+]], r3, r4
; CHECK-NEXT: rldicl r3, [[REG]], 1, 63
; CHECK-NEXT: blr
@@ -24,7 +24,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define signext i32 @test_iltss_sext(i16 signext %a, i16 signext %b) {
; CHECK-LABEL: test_iltss_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub [[REG:r[0-9]+]], r3, r4
; CHECK-NEXT: sradi r3, [[REG]], 63
; CHECK-NEXT: blr
@@ -48,7 +48,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_iltss_store(i16 signext %a, i16 signext %b) {
; CHECK-LABEL: test_iltss_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK: sub [[REG:r[0-9]+]], r3, r4
; CHECK: rldicl {{r[0-9]+}}, [[REG]], 1, 63
entry:
@@ -61,7 +61,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_iltss_sext_store(i16 signext %a, i16 signext %b) {
; CHECK-LABEL: test_iltss_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK: sub [[REG:r[0-9]+]], r3, r4
; CHECK: sradi {{r[0-9]+}}, [[REG]], 63
entry:
diff --git a/test/CodeGen/PowerPC/testComparesinesll.ll b/test/CodeGen/PowerPC/testComparesinesll.ll
index 5f49d2290a6..cccff24c504 100644
--- a/test/CodeGen/PowerPC/testComparesinesll.ll
+++ b/test/CodeGen/PowerPC/testComparesinesll.ll
@@ -10,7 +10,7 @@
define signext i32 @test_inesll(i64 %a, i64 %b) {
; CHECK-LABEL: test_inesll:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: addic r4, r3, -1
; CHECK-NEXT: subfe r3, r4, r3
@@ -23,7 +23,7 @@ entry:
define signext i32 @test_inesll_sext(i64 %a, i64 %b) {
; CHECK-LABEL: test_inesll_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: subfic r3, r3, 0
; CHECK-NEXT: subfe r3, r3, r3
@@ -36,7 +36,7 @@ entry:
define signext i32 @test_inesll_z(i64 %a) {
; CHECK-LABEL: test_inesll_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addic r4, r3, -1
; CHECK-NEXT: subfe r3, r4, r3
; CHECK-NEXT: blr
@@ -48,7 +48,7 @@ entry:
define signext i32 @test_inesll_sext_z(i64 %a) {
; CHECK-LABEL: test_inesll_sext_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subfic r3, r3, 0
; CHECK-NEXT: subfe r3, r3, r3
; CHECK-NEXT: blr
@@ -60,7 +60,7 @@ entry:
define void @test_inesll_store(i64 %a, i64 %b) {
; CHECK-LABEL: test_inesll_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
@@ -77,7 +77,7 @@ entry:
define void @test_inesll_sext_store(i64 %a, i64 %b) {
; CHECK-LABEL: test_inesll_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
@@ -94,7 +94,7 @@ entry:
define void @test_inesll_z_store(i64 %a) {
; CHECK-LABEL: test_inesll_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: addic r5, r3, -1
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
@@ -110,7 +110,7 @@ entry:
define void @test_inesll_sext_z_store(i64 %a) {
; CHECK-LABEL: test_inesll_sext_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: subfic r3, r3, 0
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
diff --git a/test/CodeGen/PowerPC/testComparesineuc.ll b/test/CodeGen/PowerPC/testComparesineuc.ll
index 3f99fbcd212..c478041b19e 100644
--- a/test/CodeGen/PowerPC/testComparesineuc.ll
+++ b/test/CodeGen/PowerPC/testComparesineuc.ll
@@ -9,7 +9,7 @@
define signext i32 @test_ineuc(i8 zeroext %a, i8 zeroext %b) {
; CHECK-LABEL: test_ineuc:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
@@ -23,7 +23,7 @@ entry:
define signext i32 @test_ineuc_sext(i8 zeroext %a, i8 zeroext %b) {
; CHECK-LABEL: test_ineuc_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
@@ -38,7 +38,7 @@ entry:
define signext i32 @test_ineuc_z(i8 zeroext %a) {
; CHECK-LABEL: test_ineuc_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
; CHECK-NEXT: xori r3, r3, 1
@@ -51,7 +51,7 @@ entry:
define signext i32 @test_ineuc_sext_z(i8 zeroext %a) {
; CHECK-LABEL: test_ineuc_sext_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
; CHECK-NEXT: xori r3, r3, 1
@@ -65,7 +65,7 @@ entry:
define void @test_ineuc_store(i8 zeroext %a, i8 zeroext %b) {
; CHECK-LABEL: test_ineuc_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
@@ -83,7 +83,7 @@ entry:
define void @test_ineuc_sext_store(i8 zeroext %a, i8 zeroext %b) {
; CHECK-LABEL: test_ineuc_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
@@ -102,7 +102,7 @@ entry:
define void @test_ineuc_z_store(i8 zeroext %a) {
; CHECK-LABEL: test_ineuc_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
@@ -119,7 +119,7 @@ entry:
define void @test_ineuc_sext_z_store(i8 zeroext %a) {
; CHECK-LABEL: test_ineuc_sext_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
diff --git a/test/CodeGen/PowerPC/testComparesineull.ll b/test/CodeGen/PowerPC/testComparesineull.ll
index 6d645f5d33b..ba388a45fad 100644
--- a/test/CodeGen/PowerPC/testComparesineull.ll
+++ b/test/CodeGen/PowerPC/testComparesineull.ll
@@ -10,7 +10,7 @@
define signext i32 @test_ineull(i64 %a, i64 %b) {
; CHECK-LABEL: test_ineull:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: addic r4, r3, -1
; CHECK-NEXT: subfe r3, r4, r3
@@ -23,7 +23,7 @@ entry:
define signext i32 @test_ineull_sext(i64 %a, i64 %b) {
; CHECK-LABEL: test_ineull_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: subfic r3, r3, 0
; CHECK-NEXT: subfe r3, r3, r3
@@ -36,7 +36,7 @@ entry:
define signext i32 @test_ineull_z(i64 %a) {
; CHECK-LABEL: test_ineull_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addic r4, r3, -1
; CHECK-NEXT: subfe r3, r4, r3
; CHECK-NEXT: blr
@@ -48,7 +48,7 @@ entry:
define signext i32 @test_ineull_sext_z(i64 %a) {
; CHECK-LABEL: test_ineull_sext_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subfic r3, r3, 0
; CHECK-NEXT: subfe r3, r3, r3
; CHECK-NEXT: blr
@@ -60,7 +60,7 @@ entry:
define void @test_ineull_store(i64 %a, i64 %b) {
; CHECK-LABEL: test_ineull_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
@@ -77,7 +77,7 @@ entry:
define void @test_ineull_sext_store(i64 %a, i64 %b) {
; CHECK-LABEL: test_ineull_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
@@ -94,7 +94,7 @@ entry:
define void @test_ineull_z_store(i64 %a) {
; CHECK-LABEL: test_ineull_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: addic r5, r3, -1
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
@@ -110,7 +110,7 @@ entry:
define void @test_ineull_sext_z_store(i64 %a) {
; CHECK-LABEL: test_ineull_sext_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: subfic r3, r3, 0
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
diff --git a/test/CodeGen/PowerPC/testComparesineus.ll b/test/CodeGen/PowerPC/testComparesineus.ll
index 38c62f7dc3f..a78671b6407 100644
--- a/test/CodeGen/PowerPC/testComparesineus.ll
+++ b/test/CodeGen/PowerPC/testComparesineus.ll
@@ -10,7 +10,7 @@
define signext i32 @test_ineus(i16 zeroext %a, i16 zeroext %b) {
; CHECK-LABEL: test_ineus:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
@@ -24,7 +24,7 @@ entry:
define signext i32 @test_ineus_sext(i16 zeroext %a, i16 zeroext %b) {
; CHECK-LABEL: test_ineus_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
@@ -39,7 +39,7 @@ entry:
define signext i32 @test_ineus_z(i16 zeroext %a) {
; CHECK-LABEL: test_ineus_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
; CHECK-NEXT: xori r3, r3, 1
@@ -52,7 +52,7 @@ entry:
define signext i32 @test_ineus_sext_z(i16 zeroext %a) {
; CHECK-LABEL: test_ineus_sext_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
; CHECK-NEXT: xori r3, r3, 1
@@ -66,7 +66,7 @@ entry:
define void @test_ineus_store(i16 zeroext %a, i16 zeroext %b) {
; CHECK-LABEL: test_ineus_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
@@ -84,7 +84,7 @@ entry:
define void @test_ineus_sext_store(i16 zeroext %a, i16 zeroext %b) {
; CHECK-LABEL: test_ineus_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
@@ -103,7 +103,7 @@ entry:
define void @test_ineus_z_store(i16 zeroext %a) {
; CHECK-LABEL: test_ineus_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
@@ -120,7 +120,7 @@ entry:
define void @test_ineus_sext_z_store(i16 zeroext %a) {
; CHECK-LABEL: test_ineus_sext_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
diff --git a/test/CodeGen/PowerPC/testCompareslleqsc.ll b/test/CodeGen/PowerPC/testCompareslleqsc.ll
index 8559665f53d..43fb358efef 100644
--- a/test/CodeGen/PowerPC/testCompareslleqsc.ll
+++ b/test/CodeGen/PowerPC/testCompareslleqsc.ll
@@ -12,7 +12,7 @@
; Function Attrs: norecurse nounwind readnone
define i64 @test_lleqsc(i8 signext %a, i8 signext %b) {
; CHECK-LABEL: test_lleqsc:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
@@ -26,7 +26,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define i64 @test_lleqsc_sext(i8 signext %a, i8 signext %b) {
; CHECK-LABEL: test_lleqsc_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
@@ -41,7 +41,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define i64 @test_lleqsc_z(i8 signext %a) {
; CHECK-LABEL: test_lleqsc_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
; CHECK-NEXT: blr
@@ -54,7 +54,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define i64 @test_lleqsc_sext_z(i8 signext %a) {
; CHECK-LABEL: test_lleqsc_sext_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
; CHECK-NEXT: neg r3, r3
@@ -68,7 +68,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_lleqsc_store(i8 signext %a, i8 signext %b) {
; CHECK-LABEL: test_lleqsc_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
@@ -86,7 +86,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_lleqsc_sext_store(i8 signext %a, i8 signext %b) {
; CHECK-LABEL: test_lleqsc_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
@@ -105,7 +105,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_lleqsc_z_store(i8 signext %a) {
; CHECK-LABEL: test_lleqsc_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
@@ -122,7 +122,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_lleqsc_sext_z_store(i8 signext %a) {
; CHECK-LABEL: test_lleqsc_sext_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
diff --git a/test/CodeGen/PowerPC/testCompareslleqsi.ll b/test/CodeGen/PowerPC/testCompareslleqsi.ll
index 131c088a8ab..ae8dffb1e22 100644
--- a/test/CodeGen/PowerPC/testCompareslleqsi.ll
+++ b/test/CodeGen/PowerPC/testCompareslleqsi.ll
@@ -11,7 +11,7 @@
; Function Attrs: norecurse nounwind readnone
define i64 @test_lleqsi(i32 signext %a, i32 signext %b) {
; CHECK-LABEL: test_lleqsi:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
@@ -25,7 +25,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define i64 @test_lleqsi_sext(i32 signext %a, i32 signext %b) {
; CHECK-LABEL: test_lleqsi_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
@@ -40,7 +40,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define i64 @test_lleqsi_z(i32 signext %a) {
; CHECK-LABEL: test_lleqsi_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
; CHECK-NEXT: blr
@@ -53,7 +53,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define i64 @test_lleqsi_sext_z(i32 signext %a) {
; CHECK-LABEL: test_lleqsi_sext_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
; CHECK-NEXT: neg r3, r3
@@ -67,7 +67,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_lleqsi_store(i32 signext %a, i32 signext %b) {
; CHECK-LABEL: test_lleqsi_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
@@ -85,7 +85,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_lleqsi_sext_store(i32 signext %a, i32 signext %b) {
; CHECK-LABEL: test_lleqsi_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
@@ -104,7 +104,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_lleqsi_z_store(i32 signext %a) {
; CHECK-LABEL: test_lleqsi_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
@@ -122,7 +122,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_lleqsi_sext_z_store(i32 signext %a) {
; CHECK-LABEL: test_lleqsi_sext_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
diff --git a/test/CodeGen/PowerPC/testCompareslleqsll.ll b/test/CodeGen/PowerPC/testCompareslleqsll.ll
index 8484586c160..89ef960a6f9 100644
--- a/test/CodeGen/PowerPC/testCompareslleqsll.ll
+++ b/test/CodeGen/PowerPC/testCompareslleqsll.ll
@@ -11,7 +11,7 @@
; Function Attrs: norecurse nounwind readnone
define i64 @test_lleqsll(i64 %a, i64 %b) {
; CHECK-LABEL: test_lleqsll:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: cntlzd r3, r3
; CHECK-NEXT: rldicl r3, r3, 58, 63
@@ -25,7 +25,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define i64 @test_lleqsll_sext(i64 %a, i64 %b) {
; CHECK-LABEL: test_lleqsll_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: addic r3, r3, -1
; CHECK-NEXT: subfe r3, r3, r3
@@ -39,7 +39,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define i64 @test_lleqsll_z(i64 %a) {
; CHECK-LABEL: test_lleqsll_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cntlzd r3, r3
; CHECK-NEXT: rldicl r3, r3, 58, 63
; CHECK-NEXT: blr
@@ -52,7 +52,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define i64 @test_lleqsll_sext_z(i64 %a) {
; CHECK-LABEL: test_lleqsll_sext_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addic r3, r3, -1
; CHECK-NEXT: subfe r3, r3, r3
; CHECK-NEXT: blr
@@ -65,7 +65,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_lleqsll_store(i64 %a, i64 %b) {
; CHECK-LABEL: test_lleqsll_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
@@ -83,7 +83,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_lleqsll_sext_store(i64 %a, i64 %b) {
; CHECK-LABEL: test_lleqsll_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
@@ -101,7 +101,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_lleqsll_z_store(i64 %a) {
; CHECK-LABEL: test_lleqsll_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzd r3, r3
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
@@ -118,7 +118,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_lleqsll_sext_z_store(i64 %a) {
; CHECK-LABEL: test_lleqsll_sext_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: addic r3, r3, -1
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
diff --git a/test/CodeGen/PowerPC/testCompareslleqss.ll b/test/CodeGen/PowerPC/testCompareslleqss.ll
index 1eac65f268c..5d1945d73e3 100644
--- a/test/CodeGen/PowerPC/testCompareslleqss.ll
+++ b/test/CodeGen/PowerPC/testCompareslleqss.ll
@@ -11,7 +11,7 @@
; Function Attrs: norecurse nounwind readnone
define i64 @test_lleqss(i16 signext %a, i16 signext %b) {
; CHECK-LABEL: test_lleqss:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
@@ -25,7 +25,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define i64 @test_lleqss_sext(i16 signext %a, i16 signext %b) {
; CHECK-LABEL: test_lleqss_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
@@ -40,7 +40,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define i64 @test_lleqss_z(i16 signext %a) {
; CHECK-LABEL: test_lleqss_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
; CHECK-NEXT: blr
@@ -53,7 +53,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define i64 @test_lleqss_sext_z(i16 signext %a) {
; CHECK-LABEL: test_lleqss_sext_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
; CHECK-NEXT: neg r3, r3
@@ -67,7 +67,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_lleqss_store(i16 signext %a, i16 signext %b) {
; CHECK-LABEL: test_lleqss_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
@@ -85,7 +85,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_lleqss_sext_store(i16 signext %a, i16 signext %b) {
; CHECK-LABEL: test_lleqss_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
@@ -104,7 +104,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_lleqss_z_store(i16 signext %a) {
; CHECK-LABEL: test_lleqss_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
@@ -121,7 +121,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_lleqss_sext_z_store(i16 signext %a) {
; CHECK-LABEL: test_lleqss_sext_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
diff --git a/test/CodeGen/PowerPC/testComparesllequc.ll b/test/CodeGen/PowerPC/testComparesllequc.ll
index a1733b86516..0f5d4c6f287 100644
--- a/test/CodeGen/PowerPC/testComparesllequc.ll
+++ b/test/CodeGen/PowerPC/testComparesllequc.ll
@@ -11,7 +11,7 @@
; Function Attrs: norecurse nounwind readnone
define i64 @test_llequc(i8 zeroext %a, i8 zeroext %b) {
; CHECK-LABEL: test_llequc:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
@@ -25,7 +25,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define i64 @test_llequc_sext(i8 zeroext %a, i8 zeroext %b) {
; CHECK-LABEL: test_llequc_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
@@ -40,7 +40,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define i64 @test_llequc_z(i8 zeroext %a) {
; CHECK-LABEL: test_llequc_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
; CHECK-NEXT: blr
@@ -53,7 +53,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define i64 @test_llequc_sext_z(i8 zeroext %a) {
; CHECK-LABEL: test_llequc_sext_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
; CHECK-NEXT: neg r3, r3
@@ -67,7 +67,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_llequc_store(i8 zeroext %a, i8 zeroext %b) {
; CHECK-LABEL: test_llequc_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
@@ -85,7 +85,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_llequc_sext_store(i8 zeroext %a, i8 zeroext %b) {
; CHECK-LABEL: test_llequc_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
@@ -104,7 +104,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_llequc_z_store(i8 zeroext %a) {
; CHECK-LABEL: test_llequc_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
@@ -121,7 +121,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_llequc_sext_z_store(i8 zeroext %a) {
; CHECK-LABEL: test_llequc_sext_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
diff --git a/test/CodeGen/PowerPC/testComparesllequi.ll b/test/CodeGen/PowerPC/testComparesllequi.ll
index ab3176d49ac..350168e0e6c 100644
--- a/test/CodeGen/PowerPC/testComparesllequi.ll
+++ b/test/CodeGen/PowerPC/testComparesllequi.ll
@@ -11,7 +11,7 @@
; Function Attrs: norecurse nounwind readnone
define i64 @test_llequi(i32 zeroext %a, i32 zeroext %b) {
; CHECK-LABEL: test_llequi:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
@@ -25,7 +25,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define i64 @test_llequi_sext(i32 zeroext %a, i32 zeroext %b) {
; CHECK-LABEL: test_llequi_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
@@ -40,7 +40,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define i64 @test_llequi_z(i32 zeroext %a) {
; CHECK-LABEL: test_llequi_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
; CHECK-NEXT: blr
@@ -53,7 +53,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define i64 @test_llequi_sext_z(i32 zeroext %a) {
; CHECK-LABEL: test_llequi_sext_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
; CHECK-NEXT: neg r3, r3
@@ -67,7 +67,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_llequi_store(i32 zeroext %a, i32 zeroext %b) {
; CHECK-LABEL: test_llequi_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
@@ -85,7 +85,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_llequi_sext_store(i32 zeroext %a, i32 zeroext %b) {
; CHECK-LABEL: test_llequi_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
@@ -104,7 +104,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_llequi_z_store(i32 zeroext %a) {
; CHECK-LABEL: test_llequi_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
@@ -121,7 +121,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_llequi_sext_z_store(i32 zeroext %a) {
; CHECK-LABEL: test_llequi_sext_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
diff --git a/test/CodeGen/PowerPC/testComparesllequll.ll b/test/CodeGen/PowerPC/testComparesllequll.ll
index 8ca9767e879..7d1fe527e8a 100644
--- a/test/CodeGen/PowerPC/testComparesllequll.ll
+++ b/test/CodeGen/PowerPC/testComparesllequll.ll
@@ -11,7 +11,7 @@
; Function Attrs: norecurse nounwind readnone
define i64 @test_llequll(i64 %a, i64 %b) {
; CHECK-LABEL: test_llequll:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: cntlzd r3, r3
; CHECK-NEXT: rldicl r3, r3, 58, 63
@@ -25,7 +25,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define i64 @test_llequll_sext(i64 %a, i64 %b) {
; CHECK-LABEL: test_llequll_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: addic r3, r3, -1
; CHECK-NEXT: subfe r3, r3, r3
@@ -39,7 +39,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define i64 @test_llequll_z(i64 %a) {
; CHECK-LABEL: test_llequll_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cntlzd r3, r3
; CHECK-NEXT: rldicl r3, r3, 58, 63
; CHECK-NEXT: blr
@@ -52,7 +52,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define i64 @test_llequll_sext_z(i64 %a) {
; CHECK-LABEL: test_llequll_sext_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addic r3, r3, -1
; CHECK-NEXT: subfe r3, r3, r3
; CHECK-NEXT: blr
@@ -65,7 +65,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_llequll_store(i64 %a, i64 %b) {
; CHECK-LABEL: test_llequll_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
@@ -83,7 +83,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_llequll_sext_store(i64 %a, i64 %b) {
; CHECK-LABEL: test_llequll_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
@@ -101,7 +101,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_llequll_z_store(i64 %a) {
; CHECK-LABEL: test_llequll_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzd r3, r3
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
@@ -118,7 +118,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_llequll_sext_z_store(i64 %a) {
; CHECK-LABEL: test_llequll_sext_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: addic r3, r3, -1
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
diff --git a/test/CodeGen/PowerPC/testComparesllequs.ll b/test/CodeGen/PowerPC/testComparesllequs.ll
index 0cc2b43bdff..cc215216dfc 100644
--- a/test/CodeGen/PowerPC/testComparesllequs.ll
+++ b/test/CodeGen/PowerPC/testComparesllequs.ll
@@ -11,7 +11,7 @@
; Function Attrs: norecurse nounwind readnone
define i64 @test_llequs(i16 zeroext %a, i16 zeroext %b) {
; CHECK-LABEL: test_llequs:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
@@ -25,7 +25,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define i64 @test_llequs_sext(i16 zeroext %a, i16 zeroext %b) {
; CHECK-LABEL: test_llequs_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
@@ -40,7 +40,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define i64 @test_llequs_z(i16 zeroext %a) {
; CHECK-LABEL: test_llequs_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
; CHECK-NEXT: blr
@@ -53,7 +53,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define i64 @test_llequs_sext_z(i16 zeroext %a) {
; CHECK-LABEL: test_llequs_sext_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
; CHECK-NEXT: neg r3, r3
@@ -67,7 +67,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_llequs_store(i16 zeroext %a, i16 zeroext %b) {
; CHECK-LABEL: test_llequs_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
@@ -85,7 +85,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_llequs_sext_store(i16 zeroext %a, i16 zeroext %b) {
; CHECK-LABEL: test_llequs_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
@@ -104,7 +104,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_llequs_z_store(i16 zeroext %a) {
; CHECK-LABEL: test_llequs_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
@@ -121,7 +121,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_llequs_sext_z_store(i16 zeroext %a) {
; CHECK-LABEL: test_llequs_sext_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
diff --git a/test/CodeGen/PowerPC/testComparesllgesc.ll b/test/CodeGen/PowerPC/testComparesllgesc.ll
index 744ef362abf..82f54cd6b1b 100644
--- a/test/CodeGen/PowerPC/testComparesllgesc.ll
+++ b/test/CodeGen/PowerPC/testComparesllgesc.ll
@@ -9,7 +9,7 @@
define i64 @test_llgesc(i8 signext %a, i8 signext %b) {
; CHECK-LABEL: test_llgesc:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub r3, r3, r4
; CHECK-NEXT: rldicl r3, r3, 1, 63
; CHECK-NEXT: xori r3, r3, 1
@@ -22,7 +22,7 @@ entry:
define i64 @test_llgesc_sext(i8 signext %a, i8 signext %b) {
; CHECK-LABEL: test_llgesc_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub r3, r3, r4
; CHECK-NEXT: rldicl r3, r3, 1, 63
; CHECK-NEXT: addi r3, r3, -1
@@ -35,7 +35,7 @@ entry:
define void @test_llgesc_store(i8 signext %a, i8 signext %b) {
; CHECK-LABEL: test_llgesc_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: sub r3, r3, r4
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
@@ -52,7 +52,7 @@ entry:
define void @test_llgesc_sext_store(i8 signext %a, i8 signext %b) {
; CHECK-LABEL: test_llgesc_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: sub r3, r3, r4
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
diff --git a/test/CodeGen/PowerPC/testComparesllgesi.ll b/test/CodeGen/PowerPC/testComparesllgesi.ll
index eec3e671b8f..82c1fa11b8b 100644
--- a/test/CodeGen/PowerPC/testComparesllgesi.ll
+++ b/test/CodeGen/PowerPC/testComparesllgesi.ll
@@ -9,7 +9,7 @@
define i64 @test_llgesi(i32 signext %a, i32 signext %b) {
; CHECK-LABEL: test_llgesi:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub r3, r3, r4
; CHECK-NEXT: rldicl r3, r3, 1, 63
; CHECK-NEXT: xori r3, r3, 1
@@ -22,7 +22,7 @@ entry:
define i64 @test_llgesi_sext(i32 signext %a, i32 signext %b) {
; CHECK-LABEL: test_llgesi_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub r3, r3, r4
; CHECK-NEXT: rldicl r3, r3, 1, 63
; CHECK-NEXT: addi r3, r3, -1
@@ -35,7 +35,7 @@ entry:
define void @test_llgesi_store(i32 signext %a, i32 signext %b) {
; CHECK-LABEL: test_llgesi_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: sub r3, r3, r4
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
@@ -52,7 +52,7 @@ entry:
define void @test_llgesi_sext_store(i32 signext %a, i32 signext %b) {
; CHECK-LABEL: test_llgesi_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: sub r3, r3, r4
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
diff --git a/test/CodeGen/PowerPC/testComparesllgesll.ll b/test/CodeGen/PowerPC/testComparesllgesll.ll
index 9fa5985ba6e..6fb53977a55 100644
--- a/test/CodeGen/PowerPC/testComparesllgesll.ll
+++ b/test/CodeGen/PowerPC/testComparesllgesll.ll
@@ -9,7 +9,7 @@
define i64 @test_llgesll(i64 %a, i64 %b) {
; CHECK-LABEL: test_llgesll:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sradi r5, r3, 63
; CHECK-NEXT: rldicl r6, r4, 1, 63
; CHECK-NEXT: subfc r3, r4, r3
@@ -23,7 +23,7 @@ entry:
define i64 @test_llgesll_sext(i64 %a, i64 %b) {
; CHECK-LABEL: test_llgesll_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sradi r5, r3, 63
; CHECK-NEXT: rldicl r6, r4, 1, 63
; CHECK-NEXT: subfc r3, r4, r3
@@ -38,7 +38,7 @@ entry:
define i64 @test_llgesll_z(i64 %a) {
; CHECK-LABEL: test_llgesll_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: rldicl r3, r3, 1, 63
; CHECK-NEXT: xori r3, r3, 1
; CHECK-NEXT: blr
@@ -50,7 +50,7 @@ entry:
define i64 @test_llgesll_sext_z(i64 %a) {
; CHECK-LABEL: test_llgesll_sext_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sradi r3, r3, 63
; CHECK-NEXT: not r3, r3
; CHECK-NEXT: blr
@@ -62,7 +62,7 @@ entry:
define void @test_llgesll_store(i64 %a, i64 %b) {
; CHECK-LABEL: test_llgesll_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK: sradi r6, r3, 63
; CHECK: subfc r3, r4, r3
; CHECK: rldicl r3, r4, 1, 63
@@ -78,7 +78,7 @@ entry:
define void @test_llgesll_sext_store(i64 %a, i64 %b) {
; CHECK-LABEL: test_llgesll_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sradi r6, r3, 63
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: subfc r3, r4, r3
@@ -97,7 +97,7 @@ entry:
define void @test_llgesll_z_store(i64 %a) {
; CHECK-LABEL: test_llgesll_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: rldicl r3, r3, 1, 63
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
@@ -113,7 +113,7 @@ entry:
define void @test_llgesll_sext_z_store(i64 %a) {
; CHECK-LABEL: test_llgesll_sext_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: sradi r3, r3, 63
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
diff --git a/test/CodeGen/PowerPC/testComparesllgess.ll b/test/CodeGen/PowerPC/testComparesllgess.ll
index 04b07b2200e..1206339a23b 100644
--- a/test/CodeGen/PowerPC/testComparesllgess.ll
+++ b/test/CodeGen/PowerPC/testComparesllgess.ll
@@ -9,7 +9,7 @@
define i64 @test_llgess(i16 signext %a, i16 signext %b) {
; CHECK-LABEL: test_llgess:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub r3, r3, r4
; CHECK-NEXT: rldicl r3, r3, 1, 63
; CHECK-NEXT: xori r3, r3, 1
@@ -22,7 +22,7 @@ entry:
define i64 @test_llgess_sext(i16 signext %a, i16 signext %b) {
; CHECK-LABEL: test_llgess_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub r3, r3, r4
; CHECK-NEXT: rldicl r3, r3, 1, 63
; CHECK-NEXT: addi r3, r3, -1
@@ -35,7 +35,7 @@ entry:
define void @test_llgess_store(i16 signext %a, i16 signext %b) {
; CHECK-LABEL: test_llgess_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: sub r3, r3, r4
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
@@ -52,7 +52,7 @@ entry:
define void @test_llgess_sext_store(i16 signext %a, i16 signext %b) {
; CHECK-LABEL: test_llgess_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: sub r3, r3, r4
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
diff --git a/test/CodeGen/PowerPC/testComparesllgtsll.ll b/test/CodeGen/PowerPC/testComparesllgtsll.ll
index 2467468afac..0dc1374374f 100644
--- a/test/CodeGen/PowerPC/testComparesllgtsll.ll
+++ b/test/CodeGen/PowerPC/testComparesllgtsll.ll
@@ -10,7 +10,7 @@
; Function Attrs: norecurse nounwind readnone
define i64 @test_llgtsll(i64 %a, i64 %b) {
; CHECK-LABEL: test_llgtsll:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sradi [[REG1:r[0-9]+]], r4, 63
; CHECK-NEXT: rldicl [[REG2:r[0-9]+]], r3, 1, 63
; CHECK-NEXT: subfc [[REG3:r[0-9]+]], r3, r4
@@ -26,7 +26,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define i64 @test_llgtsll_sext(i64 %a, i64 %b) {
; CHECK-LABEL: test_llgtsll_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sradi [[REG1:r[0-9]+]], r4, 63
; CHECK-NEXT: rldicl [[REG2:r[0-9]+]], r3, 1, 63
; CHECK-NEXT: subfc [[REG3:r[0-9]+]], r3, r4
@@ -44,7 +44,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define i64 @test_llgtsll_z(i64 %a) {
; CHECK-LABEL: test_llgtsll_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi r4, r3, -1
; CHECK-NEXT: nor r3, r4, r3
; CHECK-NEXT: rldicl r3, r3, 1, 63
@@ -70,7 +70,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_llgtsll_store(i64 %a, i64 %b) {
; CHECK-LABEL: test_llgtsll_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK: sradi [[REG1:r[0-9]+]], r4, 63
; CHECK: rldicl [[REG2:r[0-9]+]], r3, 1, 63
; CHECK-DIAG: subfc [[REG3:r[0-9]+]], r3, r4
@@ -87,7 +87,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_llgtsll_sext_store(i64 %a, i64 %b) {
; CHECK-LABEL: test_llgtsll_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK: sradi [[REG1:r[0-9]+]], r4, 63
; CHECK: rldicl [[REG2:r[0-9]+]], r3, 1, 63
; CHECK-DIAG: subfc [[REG3:r[0-9]+]], r3, r4
@@ -105,7 +105,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_llgtsll_z_store(i64 %a) {
; CHECK-LABEL: test_llgtsll_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: addi r5, r3, -1
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
diff --git a/test/CodeGen/PowerPC/testComparesllgtus.ll b/test/CodeGen/PowerPC/testComparesllgtus.ll
index 8d06b4b3790..3758e8e097c 100644
--- a/test/CodeGen/PowerPC/testComparesllgtus.ll
+++ b/test/CodeGen/PowerPC/testComparesllgtus.ll
@@ -10,7 +10,7 @@
; Function Attrs: norecurse nounwind readnone
define i64 @test_llgtus(i16 zeroext %a, i16 zeroext %b) {
; CHECK-LABEL: test_llgtus:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub [[REG:r[0-9]+]], r4, r3
; CHECK-NEXT: rldicl r3, [[REG]], 1, 63
; CHECK-NEXT: blr
@@ -23,7 +23,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define i64 @test_llgtus_sext(i16 zeroext %a, i16 zeroext %b) {
; CHECK-LABEL: test_llgtus_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub [[REG:r[0-9]+]], r4, r3
; CHECK-NEXT: sradi r3, [[REG]], 63
; CHECK-NEXT: blr
@@ -36,7 +36,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define i64 @test_llgtus_z(i16 zeroext %a) {
; CHECK-LABEL: test_llgtus_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
; CHECK-NEXT: xori r3, r3, 1
@@ -50,7 +50,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define i64 @test_llgtus_sext_z(i16 zeroext %a) {
; CHECK-LABEL: test_llgtus_sext_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
; CHECK-NEXT: xori r3, r3, 1
@@ -65,7 +65,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_llgtus_store(i16 zeroext %a, i16 zeroext %b) {
; CHECK-LABEL: test_llgtus_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK: sub [[REG:r[0-9]+]], r4, r3
; CHECK: rldicl {{r[0-9]+}}, [[REG]], 1, 63
entry:
@@ -78,7 +78,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_llgtus_sext_store(i16 zeroext %a, i16 zeroext %b) {
; CHECK-LABEL: test_llgtus_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK: sub [[REG:r[0-9]+]], r4, r3
; CHECK: sradi {{r[0-9]+}}, [[REG]], 63
entry:
@@ -91,7 +91,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_llgtus_z_store(i16 zeroext %a) {
; CHECK-LABEL: test_llgtus_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
@@ -109,7 +109,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_llgtus_sext_z_store(i16 zeroext %a) {
; CHECK-LABEL: test_llgtus_sext_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: cntlzw r3, r3
; CHECK-NEXT: srwi r3, r3, 5
diff --git a/test/CodeGen/PowerPC/testCompareslllesc.ll b/test/CodeGen/PowerPC/testCompareslllesc.ll
index cb564cc721b..f9352990f2c 100644
--- a/test/CodeGen/PowerPC/testCompareslllesc.ll
+++ b/test/CodeGen/PowerPC/testCompareslllesc.ll
@@ -10,7 +10,7 @@
define i64 @test_lllesc(i8 signext %a, i8 signext %b) {
; CHECK-LABEL: test_lllesc:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub r3, r4, r3
; CHECK-NEXT: rldicl r3, r3, 1, 63
; CHECK-NEXT: xori r3, r3, 1
@@ -23,7 +23,7 @@ entry:
define i64 @test_lllesc_sext(i8 signext %a, i8 signext %b) {
; CHECK-LABEL: test_lllesc_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub r3, r4, r3
; CHECK-NEXT: rldicl r3, r3, 1, 63
; CHECK-NEXT: addi r3, r3, -1
@@ -36,7 +36,7 @@ entry:
define void @test_lllesc_store(i8 signext %a, i8 signext %b) {
; CHECK-LABEL: test_lllesc_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: sub r3, r4, r3
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
@@ -53,7 +53,7 @@ entry:
define void @test_lllesc_sext_store(i8 signext %a, i8 signext %b) {
; CHECK-LABEL: test_lllesc_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: sub r3, r4, r3
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
diff --git a/test/CodeGen/PowerPC/testCompareslllesi.ll b/test/CodeGen/PowerPC/testCompareslllesi.ll
index d39f61fbd88..42062692a08 100644
--- a/test/CodeGen/PowerPC/testCompareslllesi.ll
+++ b/test/CodeGen/PowerPC/testCompareslllesi.ll
@@ -10,7 +10,7 @@
define i64 @test_lllesi(i32 signext %a, i32 signext %b) {
; CHECK-LABEL: test_lllesi:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub r3, r4, r3
; CHECK-NEXT: rldicl r3, r3, 1, 63
; CHECK-NEXT: xori r3, r3, 1
@@ -23,7 +23,7 @@ entry:
define i64 @test_lllesi_sext(i32 signext %a, i32 signext %b) {
; CHECK-LABEL: test_lllesi_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub r3, r4, r3
; CHECK-NEXT: rldicl r3, r3, 1, 63
; CHECK-NEXT: addi r3, r3, -1
@@ -36,7 +36,7 @@ entry:
define void @test_lllesi_store(i32 signext %a, i32 signext %b) {
; CHECK-LABEL: test_lllesi_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: sub r3, r4, r3
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
@@ -53,7 +53,7 @@ entry:
define void @test_lllesi_sext_store(i32 signext %a, i32 signext %b) {
; CHECK-LABEL: test_lllesi_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: sub r3, r4, r3
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
diff --git a/test/CodeGen/PowerPC/testCompareslllesll.ll b/test/CodeGen/PowerPC/testCompareslllesll.ll
index 375bd2295a7..8db1ee19ebb 100644
--- a/test/CodeGen/PowerPC/testCompareslllesll.ll
+++ b/test/CodeGen/PowerPC/testCompareslllesll.ll
@@ -10,7 +10,7 @@
; Function Attrs: norecurse nounwind readnone
define i64 @test_lllesll(i64 %a, i64 %b) {
; CHECK-LABEL: test_lllesll:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sradi r5, r4, 63
; CHECK-NEXT: rldicl r6, r3, 1, 63
; CHECK-NEXT: subfc r12, r3, r4
@@ -25,7 +25,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define i64 @test_lllesll_sext(i64 %a, i64 %b) {
; CHECK-LABEL: test_lllesll_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sradi r5, r4, 63
; CHECK-NEXT: rldicl r6, r3, 1, 63
; CHECK-NEXT: subfc r12, r3, r4
@@ -41,7 +41,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define i64 @test_lllesll_z(i64 %a) {
; CHECK-LABEL: test_lllesll_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi r4, r3, -1
; CHECK-NEXT: or r3, r4, r3
; CHECK-NEXT: rldicl r3, r3, 1, 63
@@ -55,7 +55,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define i64 @test_lllesll_sext_z(i64 %a) {
; CHECK-LABEL: test_lllesll_sext_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi r4, r3, -1
; CHECK-NEXT: or r3, r4, r3
; CHECK-NEXT: sradi r3, r3, 63
@@ -69,7 +69,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_lllesll_store(i64 %a, i64 %b) {
; CHECK-LABEL: test_lllesll_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK: sradi r6, r4, 63
; CHECK: subfc r4, r3, r4
; CHECK: rldicl r3, r3, 1, 63
@@ -86,7 +86,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_lllesll_sext_store(i64 %a, i64 %b) {
; CHECK-LABEL: test_lllesll_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK: sradi r6, r4, 63
; CHECK-DAG: rldicl r3, r3, 1, 63
; CHECK-DAG: subfc r4, r3, r4
@@ -104,7 +104,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_lllesll_z_store(i64 %a) {
; CHECK-LABEL: test_lllesll_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: addi r5, r3, -1
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
@@ -122,7 +122,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_lllesll_sext_z_store(i64 %a) {
; CHECK-LABEL: test_lllesll_sext_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: addi r5, r3, -1
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
diff --git a/test/CodeGen/PowerPC/testComparesllless.ll b/test/CodeGen/PowerPC/testComparesllless.ll
index 4971fb75900..a6f3b5e3988 100644
--- a/test/CodeGen/PowerPC/testComparesllless.ll
+++ b/test/CodeGen/PowerPC/testComparesllless.ll
@@ -10,7 +10,7 @@
define i64 @test_llless(i16 signext %a, i16 signext %b) {
; CHECK-LABEL: test_llless:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub r3, r4, r3
; CHECK-NEXT: rldicl r3, r3, 1, 63
; CHECK-NEXT: xori r3, r3, 1
@@ -23,7 +23,7 @@ entry:
define i64 @test_llless_sext(i16 signext %a, i16 signext %b) {
; CHECK-LABEL: test_llless_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub r3, r4, r3
; CHECK-NEXT: rldicl r3, r3, 1, 63
; CHECK-NEXT: addi r3, r3, -1
@@ -36,7 +36,7 @@ entry:
define void @test_llless_store(i16 signext %a, i16 signext %b) {
; CHECK-LABEL: test_llless_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: sub r3, r4, r3
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
@@ -53,7 +53,7 @@ entry:
define void @test_llless_sext_store(i16 signext %a, i16 signext %b) {
; CHECK-LABEL: test_llless_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: sub r3, r4, r3
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
diff --git a/test/CodeGen/PowerPC/testComparesllltsll.ll b/test/CodeGen/PowerPC/testComparesllltsll.ll
index 887c14faf7b..3e37daf046f 100644
--- a/test/CodeGen/PowerPC/testComparesllltsll.ll
+++ b/test/CodeGen/PowerPC/testComparesllltsll.ll
@@ -11,7 +11,7 @@
; Function Attrs: norecurse nounwind readnone
define i64 @test_llltsll(i64 %a, i64 %b) {
; CHECK-LABEL: test_llltsll:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sradi [[REG1:r[0-9]+]], r3, 63
; CHECK-NEXT: rldicl [[REG2:r[0-9]+]], r4, 1, 63
; CHECK-NEXT: subfc [[REG3:r[0-9]+]], r4, r3
@@ -27,7 +27,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define i64 @test_llltsll_sext(i64 %a, i64 %b) {
; CHECK-LABEL: test_llltsll_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sradi [[REG1:r[0-9]+]], r3, 63
; CHECK-NEXT: rldicl [[REG2:r[0-9]+]], r4, 1, 63
; CHECK-NEXT: subfc [[REG3:r[0-9]+]], r4, r3
@@ -44,7 +44,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define i64 @test_llltsll_sext_z(i64 %a) {
; CHECK-LABEL: test_llltsll_sext_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sradi r3, r3, 63
; CHECK-NEXT: blr
entry:
@@ -56,7 +56,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_llltsll_store(i64 %a, i64 %b) {
; CHECK-LABEL: test_llltsll_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK: sradi [[REG1:r[0-9]+]], r3, 63
; CHECK: rldicl [[REG2:r[0-9]+]], r4, 1, 63
; CHECK-DIAG: subfc [[REG3:r[0-9]+]], r4, r3
@@ -73,7 +73,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_llltsll_sext_store(i64 %a, i64 %b) {
; CHECK-LABEL: test_llltsll_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK: sradi [[REG1:r[0-9]+]], r3, 63
; CHECK: rldicl [[REG2:r[0-9]+]], r4, 1, 63
; CHECK-DIAG: subfc [[REG3:r[0-9]+]], r4, r3
diff --git a/test/CodeGen/PowerPC/testComparesllltuc.ll b/test/CodeGen/PowerPC/testComparesllltuc.ll
index a02452554b5..a8244e757b1 100644
--- a/test/CodeGen/PowerPC/testComparesllltuc.ll
+++ b/test/CodeGen/PowerPC/testComparesllltuc.ll
@@ -10,7 +10,7 @@
; Function Attrs: norecurse nounwind readnone
define i64 @test_llltuc(i8 zeroext %a, i8 zeroext %b) {
; CHECK-LABEL: test_llltuc:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub [[REG:r[0-9]+]], r3, r4
; CHECK-NEXT: rldicl r3, [[REG]], 1, 63
; CHECK-NEXT: blr
@@ -23,7 +23,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define i64 @test_llltuc_sext(i8 zeroext %a, i8 zeroext %b) {
; CHECK-LABEL: test_llltuc_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub [[REG:r[0-9]+]], r3, r4
; CHECK-NEXT: sradi r3, [[REG]], 63
; CHECK-NEXT: blr
@@ -36,7 +36,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_llltuc_store(i8 zeroext %a, i8 zeroext %b) {
; CHECK-LABEL: test_llltuc_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK: sub [[REG:r[2-9]+]], r3, r4
; CHECK: rldicl {{r[0-9]+}}, [[REG]], 1, 63
entry:
@@ -49,7 +49,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_llltuc_sext_store(i8 zeroext %a, i8 zeroext %b) {
; CHECK-LABEL: test_llltuc_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK: sub [[REG:r[0-9]+]], r3, r4
; CHECK: sradi {{r[0-9]+}}, [[REG]], 63
entry:
diff --git a/test/CodeGen/PowerPC/testComparesllltui.ll b/test/CodeGen/PowerPC/testComparesllltui.ll
index bea180168da..e785942b3c9 100644
--- a/test/CodeGen/PowerPC/testComparesllltui.ll
+++ b/test/CodeGen/PowerPC/testComparesllltui.ll
@@ -10,7 +10,7 @@
; Function Attrs: norecurse nounwind readnone
define i64 @test_llltui(i32 zeroext %a, i32 zeroext %b) {
; CHECK-LABEL: test_llltui:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NOT: clrldi
; CHECK-NEXT: sub [[REG:r[0-9]+]], r3, r4
; CHECK-NEXT: rldicl r3, [[REG]], 1, 63
@@ -24,7 +24,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define i64 @test_llltui_sext(i32 zeroext %a, i32 zeroext %b) {
; CHECK-LABEL: test_llltui_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub [[REG:r[0-9]+]], r3, r4
; CHECK-NEXT: sradi r3, [[REG]], 63
; CHECK-NEXT: blr
@@ -37,7 +37,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define i64 @test_llltui_z(i32 zeroext %a) {
; CHECK-LABEL: test_llltui_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li r3, 0
; CHECK-NEXT: blr
entry:
@@ -47,7 +47,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define i64 @test_llltui_sext_z(i32 zeroext %a) {
; CHECK-LABEL: test_llltui_sext_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li r3, 0
; CHECK-NEXT: blr
entry:
@@ -57,7 +57,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_llltui_store(i32 zeroext %a, i32 zeroext %b) {
; CHECK-LABEL: test_llltui_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NOT: clrldi
; CHECK: sub [[REG:r[2-9]+]], r3, r4
; CHECK: rldicl {{r[0-9]+}}, [[REG]], 1, 63
@@ -71,7 +71,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_llltui_sext_store(i32 zeroext %a, i32 zeroext %b) {
; CHECK-LABEL: test_llltui_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NOT: clrldi
; CHECK: sub [[REG:r[0-9]+]], r3, r4
; CHECK: sradi {{r[0-9]+}}, [[REG]], 63
@@ -85,7 +85,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_llltui_z_store(i32 zeroext %a) {
; CHECK-LABEL: test_llltui_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK: li [[REG:r[0-9]+]], 0
; CHECK: stw [[REG]], 0(r3)
; CHECK-NEXT: blr
@@ -97,7 +97,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_llltui_sext_z_store(i32 zeroext %a) {
; CHECK-LABEL: test_llltui_sext_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK: li [[REG:r[0-9]+]], 0
; CHECK: stw [[REG]], 0(r3)
; CHECK-NEXT: blr
diff --git a/test/CodeGen/PowerPC/testComparesllltus.ll b/test/CodeGen/PowerPC/testComparesllltus.ll
index 713bc220442..e997d0aa8b8 100644
--- a/test/CodeGen/PowerPC/testComparesllltus.ll
+++ b/test/CodeGen/PowerPC/testComparesllltus.ll
@@ -10,7 +10,7 @@
; Function Attrs: norecurse nounwind readnone
define i64 @test_llltus(i16 zeroext %a, i16 zeroext %b) {
; CHECK-LABEL: test_llltus:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub [[REG:r[0-9]+]], r3, r4
; CHECK-NEXT: rldicl r3, [[REG]], 1, 63
; CHECK-NEXT: blr
@@ -23,7 +23,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define i64 @test_llltus_sext(i16 zeroext %a, i16 zeroext %b) {
; CHECK-LABEL: test_llltus_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub [[REG:r[0-9]+]], r3, r4
; CHECK-NEXT: sradi r3, [[REG]], 63
; CHECK-NEXT: blr
@@ -48,7 +48,7 @@ entry:
; Function Attrs: norecurse nounwind
define void @test_llltus_sext_store(i16 zeroext %a, i16 zeroext %b) {
; CHECK-LABEL: test_llltus_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK: sub [[REG:r[0-9]+]], r3, r4
; CHECK: sradi {{r[0-9]+}}, [[REG]], 63
entry:
diff --git a/test/CodeGen/PowerPC/testComparesllnesll.ll b/test/CodeGen/PowerPC/testComparesllnesll.ll
index 6b2b662dcc7..cdd272f57bd 100644
--- a/test/CodeGen/PowerPC/testComparesllnesll.ll
+++ b/test/CodeGen/PowerPC/testComparesllnesll.ll
@@ -10,7 +10,7 @@
define i64 @test_llnesll(i64 %a, i64 %b) {
; CHECK-LABEL: test_llnesll:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: addic r4, r3, -1
; CHECK-NEXT: subfe r3, r4, r3
@@ -23,7 +23,7 @@ entry:
define i64 @test_llnesll_sext(i64 %a, i64 %b) {
; CHECK-LABEL: test_llnesll_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: subfic r3, r3, 0
; CHECK-NEXT: subfe r3, r3, r3
@@ -36,7 +36,7 @@ entry:
define i64 @test_llnesll_z(i64 %a) {
; CHECK-LABEL: test_llnesll_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addic r4, r3, -1
; CHECK-NEXT: subfe r3, r4, r3
; CHECK-NEXT: blr
@@ -48,7 +48,7 @@ entry:
define i64 @test_llnesll_sext_z(i64 %a) {
; CHECK-LABEL: test_llnesll_sext_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subfic r3, r3, 0
; CHECK-NEXT: subfe r3, r3, r3
; CHECK-NEXT: blr
@@ -60,7 +60,7 @@ entry:
define void @test_llnesll_store(i64 %a, i64 %b) {
; CHECK-LABEL: test_llnesll_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
@@ -77,7 +77,7 @@ entry:
define void @test_llnesll_sext_store(i64 %a, i64 %b) {
; CHECK-LABEL: test_llnesll_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
@@ -94,7 +94,7 @@ entry:
define void @test_llnesll_z_store(i64 %a) {
; CHECK-LABEL: test_llnesll_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: addic r5, r3, -1
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
@@ -110,7 +110,7 @@ entry:
define void @test_llnesll_sext_z_store(i64 %a) {
; CHECK-LABEL: test_llnesll_sext_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: subfic r3, r3, 0
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
diff --git a/test/CodeGen/PowerPC/testComparesllneull.ll b/test/CodeGen/PowerPC/testComparesllneull.ll
index 0cf47e4ac03..7956881f495 100644
--- a/test/CodeGen/PowerPC/testComparesllneull.ll
+++ b/test/CodeGen/PowerPC/testComparesllneull.ll
@@ -10,7 +10,7 @@
define i64 @test_llneull(i64 %a, i64 %b) {
; CHECK-LABEL: test_llneull:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: addic r4, r3, -1
; CHECK-NEXT: subfe r3, r4, r3
@@ -23,7 +23,7 @@ entry:
define i64 @test_llneull_sext(i64 %a, i64 %b) {
; CHECK-LABEL: test_llneull_sext:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: subfic r3, r3, 0
; CHECK-NEXT: subfe r3, r3, r3
@@ -36,7 +36,7 @@ entry:
define i64 @test_llneull_z(i64 %a) {
; CHECK-LABEL: test_llneull_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addic r4, r3, -1
; CHECK-NEXT: subfe r3, r4, r3
; CHECK-NEXT: blr
@@ -48,7 +48,7 @@ entry:
define i64 @test_llneull_sext_z(i64 %a) {
; CHECK-LABEL: test_llneull_sext_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subfic r3, r3, 0
; CHECK-NEXT: subfe r3, r3, r3
; CHECK-NEXT: blr
@@ -60,7 +60,7 @@ entry:
define void @test_llneull_store(i64 %a, i64 %b) {
; CHECK-LABEL: test_llneull_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
@@ -77,7 +77,7 @@ entry:
define void @test_llneull_sext_store(i64 %a, i64 %b) {
; CHECK-LABEL: test_llneull_sext_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: xor r3, r3, r4
; CHECK-NEXT: ld r12, .LC0@toc@l(r5)
@@ -94,7 +94,7 @@ entry:
define void @test_llneull_z_store(i64 %a) {
; CHECK-LABEL: test_llneull_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: addic r5, r3, -1
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
@@ -110,7 +110,7 @@ entry:
define void @test_llneull_sext_z_store(i64 %a) {
; CHECK-LABEL: test_llneull_sext_z_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: subfic r3, r3, 0
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
diff --git a/test/CodeGen/PowerPC/vec_add_sub_quadword.ll b/test/CodeGen/PowerPC/vec_add_sub_quadword.ll
index f42f7d11783..8f3864ff268 100644
--- a/test/CodeGen/PowerPC/vec_add_sub_quadword.ll
+++ b/test/CodeGen/PowerPC/vec_add_sub_quadword.ll
@@ -8,7 +8,7 @@ define <1 x i128> @out_of_bounds_insertelement(<1 x i128> %x, i128 %val) nounwin
%result = add <1 x i128> %x, %tmpvec
ret <1 x i128> %result
; CHECK-LABEL: @out_of_bounds_insertelement
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blr
}
diff --git a/test/CodeGen/PowerPC/vec_extract_p9.ll b/test/CodeGen/PowerPC/vec_extract_p9.ll
index b07c905ceec..7e397f54684 100644
--- a/test/CodeGen/PowerPC/vec_extract_p9.ll
+++ b/test/CodeGen/PowerPC/vec_extract_p9.ll
@@ -4,12 +4,12 @@
define zeroext i8 @test1(<16 x i8> %a, i32 signext %index) {
; CHECK-LE-LABEL: test1:
-; CHECK-LE: # BB#0: # %entry
+; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: vextubrx 3, 5, 2
; CHECK-LE-NEXT: clrldi 3, 3, 56
; CHECK-LE-NEXT: blr
; CHECK-BE-LABEL: test1:
-; CHECK-BE: # BB#0: # %entry
+; CHECK-BE: # %bb.0: # %entry
; CHECK-BE-NEXT: vextublx 3, 5, 2
; CHECK-BE-NEXT: clrldi 3, 3, 56
; CHECK-BE-NEXT: blr
@@ -21,12 +21,12 @@ entry:
define signext i8 @test2(<16 x i8> %a, i32 signext %index) {
; CHECK-LE-LABEL: test2:
-; CHECK-LE: # BB#0: # %entry
+; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: vextubrx 3, 5, 2
; CHECK-LE-NEXT: extsb 3, 3
; CHECK-LE-NEXT: blr
; CHECK-BE-LABEL: test2:
-; CHECK-BE: # BB#0: # %entry
+; CHECK-BE: # %bb.0: # %entry
; CHECK-BE-NEXT: vextublx 3, 5, 2
; CHECK-BE-NEXT: extsb 3, 3
; CHECK-BE-NEXT: blr
@@ -38,13 +38,13 @@ entry:
define zeroext i16 @test3(<8 x i16> %a, i32 signext %index) {
; CHECK-LE-LABEL: test3:
-; CHECK-LE: # BB#0: # %entry
+; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: rlwinm 3, 5, 1, 28, 30
; CHECK-LE-NEXT: vextuhrx 3, 3, 2
; CHECK-LE-NEXT: clrldi 3, 3, 48
; CHECK-LE-NEXT: blr
; CHECK-BE-LABEL: test3:
-; CHECK-BE: # BB#0: # %entry
+; CHECK-BE: # %bb.0: # %entry
; CHECK-BE-NEXT: rlwinm 3, 5, 1, 28, 30
; CHECK-BE-NEXT: vextuhlx 3, 3, 2
; CHECK-BE-NEXT: clrldi 3, 3, 48
@@ -57,13 +57,13 @@ entry:
define signext i16 @test4(<8 x i16> %a, i32 signext %index) {
; CHECK-LE-LABEL: test4:
-; CHECK-LE: # BB#0: # %entry
+; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: rlwinm 3, 5, 1, 28, 30
; CHECK-LE-NEXT: vextuhrx 3, 3, 2
; CHECK-LE-NEXT: extsh 3, 3
; CHECK-LE-NEXT: blr
; CHECK-BE-LABEL: test4:
-; CHECK-BE: # BB#0: # %entry
+; CHECK-BE: # %bb.0: # %entry
; CHECK-BE-NEXT: rlwinm 3, 5, 1, 28, 30
; CHECK-BE-NEXT: vextuhlx 3, 3, 2
; CHECK-BE-NEXT: extsh 3, 3
@@ -76,12 +76,12 @@ entry:
define zeroext i32 @test5(<4 x i32> %a, i32 signext %index) {
; CHECK-LE-LABEL: test5:
-; CHECK-LE: # BB#0: # %entry
+; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: rlwinm 3, 5, 2, 28, 29
; CHECK-LE-NEXT: vextuwrx 3, 3, 2
; CHECK-LE-NEXT: blr
; CHECK-BE-LABEL: test5:
-; CHECK-BE: # BB#0: # %entry
+; CHECK-BE: # %bb.0: # %entry
; CHECK-BE-NEXT: rlwinm 3, 5, 2, 28, 29
; CHECK-BE-NEXT: vextuwlx 3, 3, 2
; CHECK-BE-NEXT: blr
@@ -93,13 +93,13 @@ entry:
define signext i32 @test6(<4 x i32> %a, i32 signext %index) {
; CHECK-LE-LABEL: test6:
-; CHECK-LE: # BB#0: # %entry
+; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: rlwinm 3, 5, 2, 28, 29
; CHECK-LE-NEXT: vextuwrx 3, 3, 2
; CHECK-LE-NEXT: extsw 3, 3
; CHECK-LE-NEXT: blr
; CHECK-BE-LABEL: test6:
-; CHECK-BE: # BB#0: # %entry
+; CHECK-BE: # %bb.0: # %entry
; CHECK-BE-NEXT: rlwinm 3, 5, 2, 28, 29
; CHECK-BE-NEXT: vextuwlx 3, 3, 2
; CHECK-BE-NEXT: extsw 3, 3
@@ -113,13 +113,13 @@ entry:
; Test with immediate index
define zeroext i8 @test7(<16 x i8> %a) {
; CHECK-LE-LABEL: test7:
-; CHECK-LE: # BB#0: # %entry
+; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: li 3, 1
; CHECK-LE-NEXT: vextubrx 3, 3, 2
; CHECK-LE-NEXT: clrldi 3, 3, 56
; CHECK-LE-NEXT: blr
; CHECK-BE-LABEL: test7:
-; CHECK-BE: # BB#0: # %entry
+; CHECK-BE: # %bb.0: # %entry
; CHECK-BE-NEXT: li 3, 1
; CHECK-BE-NEXT: vextublx 3, 3, 2
; CHECK-BE-NEXT: clrldi 3, 3, 56
@@ -132,13 +132,13 @@ entry:
define zeroext i16 @test8(<8 x i16> %a) {
; CHECK-LE-LABEL: test8:
-; CHECK-LE: # BB#0: # %entry
+; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: li 3, 2
; CHECK-LE-NEXT: vextuhrx 3, 3, 2
; CHECK-LE-NEXT: clrldi 3, 3, 48
; CHECK-LE-NEXT: blr
; CHECK-BE-LABEL: test8:
-; CHECK-BE: # BB#0: # %entry
+; CHECK-BE: # %bb.0: # %entry
; CHECK-BE-NEXT: li 3, 2
; CHECK-BE-NEXT: vextuhlx 3, 3, 2
; CHECK-BE-NEXT: clrldi 3, 3, 48
@@ -151,12 +151,12 @@ entry:
define zeroext i32 @test9(<4 x i32> %a) {
; CHECK-LE-LABEL: test9:
-; CHECK-LE: # BB#0: # %entry
+; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: li 3, 12
; CHECK-LE-NEXT: vextuwrx 3, 3, 2
; CHECK-LE-NEXT: blr
; CHECK-BE-LABEL: test9:
-; CHECK-BE: # BB#0: # %entry
+; CHECK-BE: # %bb.0: # %entry
; CHECK-BE-NEXT: li 3, 12
; CHECK-BE-NEXT: vextuwlx 3, 3, 2
; CHECK-BE-NEXT: blr
diff --git a/test/CodeGen/PowerPC/vec_extract_p9_2.ll b/test/CodeGen/PowerPC/vec_extract_p9_2.ll
index 9734a88fdec..f2ce7924ed9 100644
--- a/test/CodeGen/PowerPC/vec_extract_p9_2.ll
+++ b/test/CodeGen/PowerPC/vec_extract_p9_2.ll
@@ -4,13 +4,13 @@
define zeroext i8 @test_add1(<16 x i8> %a, i32 signext %index, i8 zeroext %c) {
; CHECK-LE-LABEL: test_add1:
-; CHECK-LE: # BB#0: # %entry
+; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: vextubrx 3, 5, 2
; CHECK-LE-NEXT: add 3, 3, 6
; CHECK-LE-NEXT: rlwinm 3, 3, 0, 24, 31
; CHECK-LE-NEXT: blr
; CHECK-BE-LABEL: test_add1:
-; CHECK-BE: # BB#0: # %entry
+; CHECK-BE: # %bb.0: # %entry
; CHECK-BE-NEXT: vextublx 3, 5, 2
; CHECK-BE-NEXT: add 3, 3, 6
; CHECK-BE-NEXT: rlwinm 3, 3, 0, 24, 31
@@ -26,13 +26,13 @@ entry:
define signext i8 @test_add2(<16 x i8> %a, i32 signext %index, i8 signext %c) {
; CHECK-LE-LABEL: test_add2:
-; CHECK-LE: # BB#0: # %entry
+; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: vextubrx 3, 5, 2
; CHECK-LE-NEXT: add 3, 3, 6
; CHECK-LE-NEXT: extsb 3, 3
; CHECK-LE-NEXT: blr
; CHECK-BE-LABEL: test_add2:
-; CHECK-BE: # BB#0: # %entry
+; CHECK-BE: # %bb.0: # %entry
; CHECK-BE-NEXT: vextublx 3, 5, 2
; CHECK-BE-NEXT: add 3, 3, 6
; CHECK-BE-NEXT: extsb 3, 3
@@ -48,14 +48,14 @@ entry:
define zeroext i16 @test_add3(<8 x i16> %a, i32 signext %index, i16 zeroext %c) {
; CHECK-LE-LABEL: test_add3:
-; CHECK-LE: # BB#0: # %entry
+; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: rlwinm 3, 5, 1, 28, 30
; CHECK-LE-NEXT: vextuhrx 3, 3, 2
; CHECK-LE-NEXT: add 3, 3, 6
; CHECK-LE-NEXT: rlwinm 3, 3, 0, 16, 31
; CHECK-LE-NEXT: blr
; CHECK-BE-LABEL: test_add3:
-; CHECK-BE: # BB#0: # %entry
+; CHECK-BE: # %bb.0: # %entry
; CHECK-BE-NEXT: rlwinm 3, 5, 1, 28, 30
; CHECK-BE-NEXT: vextuhlx 3, 3, 2
; CHECK-BE-NEXT: add 3, 3, 6
@@ -72,14 +72,14 @@ entry:
define signext i16 @test_add4(<8 x i16> %a, i32 signext %index, i16 signext %c) {
; CHECK-LE-LABEL: test_add4:
-; CHECK-LE: # BB#0: # %entry
+; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: rlwinm 3, 5, 1, 28, 30
; CHECK-LE-NEXT: vextuhrx 3, 3, 2
; CHECK-LE-NEXT: add 3, 3, 6
; CHECK-LE-NEXT: extsh 3, 3
; CHECK-LE-NEXT: blr
; CHECK-BE-LABEL: test_add4:
-; CHECK-BE: # BB#0: # %entry
+; CHECK-BE: # %bb.0: # %entry
; CHECK-BE-NEXT: rlwinm 3, 5, 1, 28, 30
; CHECK-BE-NEXT: vextuhlx 3, 3, 2
; CHECK-BE-NEXT: add 3, 3, 6
@@ -96,14 +96,14 @@ entry:
define zeroext i32 @test_add5(<4 x i32> %a, i32 signext %index, i32 zeroext %c) {
; CHECK-LE-LABEL: test_add5:
-; CHECK-LE: # BB#0: # %entry
+; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: rlwinm 3, 5, 2, 28, 29
; CHECK-LE-NEXT: vextuwrx 3, 3, 2
; CHECK-LE-NEXT: add 3, 3, 6
; CHECK-LE-NEXT: clrldi 3, 3, 32
; CHECK-LE-NEXT: blr
; CHECK-BE-LABEL: test_add5:
-; CHECK-BE: # BB#0: # %entry
+; CHECK-BE: # %bb.0: # %entry
; CHECK-BE-NEXT: rlwinm 3, 5, 2, 28, 29
; CHECK-BE-NEXT: vextuwlx 3, 3, 2
; CHECK-BE-NEXT: add 3, 3, 6
@@ -117,14 +117,14 @@ entry:
define signext i32 @test_add6(<4 x i32> %a, i32 signext %index, i32 signext %c) {
; CHECK-LE-LABEL: test_add6:
-; CHECK-LE: # BB#0: # %entry
+; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: rlwinm 3, 5, 2, 28, 29
; CHECK-LE-NEXT: vextuwrx 3, 3, 2
; CHECK-LE-NEXT: add 3, 3, 6
; CHECK-LE-NEXT: extsw 3, 3
; CHECK-LE-NEXT: blr
; CHECK-BE-LABEL: test_add6:
-; CHECK-BE: # BB#0: # %entry
+; CHECK-BE: # %bb.0: # %entry
; CHECK-BE-NEXT: rlwinm 3, 5, 2, 28, 29
; CHECK-BE-NEXT: vextuwlx 3, 3, 2
; CHECK-BE-NEXT: add 3, 3, 6
@@ -139,11 +139,11 @@ entry:
; When extracting word element 2 on LE, it's better to use mfvsrwz rather than vextuwrx
define zeroext i32 @test7(<4 x i32> %a) {
; CHECK-LE-LABEL: test7:
-; CHECK-LE: # BB#0: # %entry
+; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: mfvsrwz 3, 34
; CHECK-LE-NEXT: blr
; CHECK-BE-LABEL: test7:
-; CHECK-BE: # BB#0: # %entry
+; CHECK-BE: # %bb.0: # %entry
; CHECK-BE-NEXT: li 3, 8
; CHECK-BE-NEXT: vextuwlx 3, 3, 2
; CHECK-BE-NEXT: blr
@@ -154,13 +154,13 @@ entry:
define zeroext i32 @testadd_7(<4 x i32> %a, i32 zeroext %c) {
; CHECK-LE-LABEL: testadd_7:
-; CHECK-LE: # BB#0: # %entry
+; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: mfvsrwz 3, 34
; CHECK-LE-NEXT: add 3, 3, 5
; CHECK-LE-NEXT: clrldi 3, 3, 32
; CHECK-LE-NEXT: blr
; CHECK-BE-LABEL: testadd_7:
-; CHECK-BE: # BB#0: # %entry
+; CHECK-BE: # %bb.0: # %entry
; CHECK-BE-NEXT: li 3, 8
; CHECK-BE-NEXT: vextuwlx 3, 3, 2
; CHECK-BE-NEXT: add 3, 3, 5
@@ -174,12 +174,12 @@ entry:
define signext i32 @test8(<4 x i32> %a) {
; CHECK-LE-LABEL: test8:
-; CHECK-LE: # BB#0: # %entry
+; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: mfvsrwz 3, 34
; CHECK-LE-NEXT: extsw 3, 3
; CHECK-LE-NEXT: blr
; CHECK-BE-LABEL: test8:
-; CHECK-BE: # BB#0: # %entry
+; CHECK-BE: # %bb.0: # %entry
; CHECK-BE-NEXT: li 3, 8
; CHECK-BE-NEXT: vextuwlx 3, 3, 2
; CHECK-BE-NEXT: extsw 3, 3
@@ -191,13 +191,13 @@ entry:
define signext i32 @testadd_8(<4 x i32> %a, i32 signext %c) {
; CHECK-LE-LABEL: testadd_8:
-; CHECK-LE: # BB#0: # %entry
+; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: mfvsrwz 3, 34
; CHECK-LE-NEXT: add 3, 3, 5
; CHECK-LE-NEXT: extsw 3, 3
; CHECK-LE-NEXT: blr
; CHECK-BE-LABEL: testadd_8:
-; CHECK-BE: # BB#0: # %entry
+; CHECK-BE: # %bb.0: # %entry
; CHECK-BE-NEXT: li 3, 8
; CHECK-BE-NEXT: vextuwlx 3, 3, 2
; CHECK-BE-NEXT: add 3, 3, 5
@@ -212,13 +212,13 @@ entry:
; When extracting word element 1 on BE, it's better to use mfvsrwz rather than vextuwlx
define signext i32 @test9(<4 x i32> %a) {
; CHECK-LE-LABEL: test9:
-; CHECK-LE: # BB#0: # %entry
+; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: li 3, 4
; CHECK-LE-NEXT: vextuwrx 3, 3, 2
; CHECK-LE-NEXT: extsw 3, 3
; CHECK-LE-NEXT: blr
; CHECK-BE-LABEL: test9:
-; CHECK-BE: # BB#0: # %entry
+; CHECK-BE: # %bb.0: # %entry
; CHECK-BE-NEXT: mfvsrwz 3, 34
; CHECK-BE-NEXT: extsw 3, 3
; CHECK-BE-NEXT: blr
@@ -229,14 +229,14 @@ entry:
define signext i32 @testadd_9(<4 x i32> %a, i32 signext %c) {
; CHECK-LE-LABEL: testadd_9:
-; CHECK-LE: # BB#0: # %entry
+; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: li 3, 4
; CHECK-LE-NEXT: vextuwrx 3, 3, 2
; CHECK-LE-NEXT: add 3, 3, 5
; CHECK-LE-NEXT: extsw 3, 3
; CHECK-LE-NEXT: blr
; CHECK-BE-LABEL: testadd_9:
-; CHECK-BE: # BB#0: # %entry
+; CHECK-BE: # %bb.0: # %entry
; CHECK-BE-NEXT: mfvsrwz 3, 34
; CHECK-BE-NEXT: add 3, 3, 5
; CHECK-BE-NEXT: extsw 3, 3
diff --git a/test/CodeGen/PowerPC/vec_int_ext.ll b/test/CodeGen/PowerPC/vec_int_ext.ll
index d7bed503318..1c86e38d060 100644
--- a/test/CodeGen/PowerPC/vec_int_ext.ll
+++ b/test/CodeGen/PowerPC/vec_int_ext.ll
@@ -4,11 +4,11 @@
define <4 x i32> @vextsb2wLE(<16 x i8> %a) {
; CHECK-LE-LABEL: vextsb2wLE:
-; CHECK-LE: # BB#0: # %entry
+; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: vextsb2w 2, 2
; CHECK-LE-NEXT: blr
; CHECK-BE-LABEL: vextsb2wLE:
-; CHECK-BE: # BB#0: # %entry
+; CHECK-BE: # %bb.0: # %entry
; CHECK-BE: vperm 2, 2, 2, 3
; CHECK-BE-NEXT: vextsb2w 2, 2
; CHECK-BE-NEXT: blr
@@ -31,11 +31,11 @@ entry:
define <2 x i64> @vextsb2dLE(<16 x i8> %a) {
; CHECK-LE-LABEL: vextsb2dLE:
-; CHECK-LE: # BB#0: # %entry
+; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: vextsb2d 2, 2
; CHECK-LE-NEXT: blr
; CHECK-BE-LABEL: vextsb2dLE:
-; CHECK-BE: # BB#0: # %entry
+; CHECK-BE: # %bb.0: # %entry
; CHECK-BE: vperm 2, 2, 2, 3
; CHECK-BE-NEXT: vextsb2d 2, 2
; CHECK-BE-NEXT: blr
@@ -52,11 +52,11 @@ entry:
define <4 x i32> @vextsh2wLE(<8 x i16> %a) {
; CHECK-LE-LABEL: vextsh2wLE:
-; CHECK-LE: # BB#0: # %entry
+; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: vextsh2w 2, 2
; CHECK-LE-NEXT: blr
; CHECK-BE-LABEL: vextsh2wLE:
-; CHECK-BE: # BB#0: # %entry
+; CHECK-BE: # %bb.0: # %entry
; CHECK-BE: vperm 2, 2, 2, 3
; CHECK-BE-NEXT: vextsh2w 2, 2
; CHECK-BE-NEXT: blr
@@ -79,11 +79,11 @@ entry:
define <2 x i64> @vextsh2dLE(<8 x i16> %a) {
; CHECK-LE-LABEL: vextsh2dLE:
-; CHECK-LE: # BB#0: # %entry
+; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: vextsh2d 2, 2
; CHECK-LE-NEXT: blr
; CHECK-BE-LABEL: vextsh2dLE:
-; CHECK-BE: # BB#0: # %entry
+; CHECK-BE: # %bb.0: # %entry
; CHECK-BE: vperm 2, 2, 2, 3
; CHECK-BE-NEXT: vextsh2d 2, 2
; CHECK-BE-NEXT: blr
@@ -100,11 +100,11 @@ entry:
define <2 x i64> @vextsw2dLE(<4 x i32> %a) {
; CHECK-LE-LABEL: vextsw2dLE:
-; CHECK-LE: # BB#0: # %entry
+; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: vextsw2d 2, 2
; CHECK-LE-NEXT: blr
; CHECK-BE-LABEL: vextsw2dLE:
-; CHECK-BE: # BB#0: # %entry
+; CHECK-BE: # %bb.0: # %entry
; CHECK-BE: vmrgew
; CHECK-BE-NEXT: vextsw2d 2, 2
; CHECK-BE-NEXT: blr
@@ -121,11 +121,11 @@ entry:
define <4 x i32> @vextsb2wBE(<16 x i8> %a) {
; CHECK-BE-LABEL: vextsb2wBE:
-; CHECK-BE: # BB#0: # %entry
+; CHECK-BE: # %bb.0: # %entry
; CHECK-BE-NEXT: vextsb2w 2, 2
; CHECK-BE-NEXT: blr
; CHECK-LE-LABEL: vextsb2wBE:
-; CHECK-LE: # BB#0: # %entry
+; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: vsldoi 2, 2, 2, 13
; CHECK-LE-NEXT: vextsb2w 2, 2
; CHECK-LE-NEXT: blr
@@ -147,11 +147,11 @@ entry:
define <2 x i64> @vextsb2dBE(<16 x i8> %a) {
; CHECK-BE-LABEL: vextsb2dBE:
-; CHECK-BE: # BB#0: # %entry
+; CHECK-BE: # %bb.0: # %entry
; CHECK-BE-NEXT: vextsb2d 2, 2
; CHECK-BE-NEXT: blr
; CHECK-LE-LABEL: vextsb2dBE:
-; CHECK-LE: # BB#0: # %entry
+; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: vsldoi 2, 2, 2, 9
; CHECK-LE-NEXT: vextsb2d 2, 2
; CHECK-LE-NEXT: blr
@@ -167,11 +167,11 @@ entry:
define <4 x i32> @vextsh2wBE(<8 x i16> %a) {
; CHECK-BE-LABEL: vextsh2wBE:
-; CHECK-BE: # BB#0: # %entry
+; CHECK-BE: # %bb.0: # %entry
; CHECK-BE-NEXT: vextsh2w 2, 2
; CHECK-BE-NEXT: blr
; CHECK-LE-LABEL: vextsh2wBE:
-; CHECK-LE: # BB#0: # %entry
+; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: vsldoi 2, 2, 2, 14
; CHECK-LE-NEXT: vextsh2w 2, 2
; CHECK-LE-NEXT: blr
@@ -193,11 +193,11 @@ entry:
define <2 x i64> @vextsh2dBE(<8 x i16> %a) {
; CHECK-BE-LABEL: vextsh2dBE:
-; CHECK-BE: # BB#0: # %entry
+; CHECK-BE: # %bb.0: # %entry
; CHECK-BE-NEXT: vextsh2d 2, 2
; CHECK-BE-NEXT: blr
; CHECK-LE-LABEL: vextsh2dBE:
-; CHECK-LE: # BB#0: # %entry
+; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: vsldoi 2, 2, 2, 10
; CHECK-LE-NEXT: vextsh2d 2, 2
; CHECK-LE-NEXT: blr
@@ -213,11 +213,11 @@ entry:
define <2 x i64> @vextsw2dBE(<4 x i32> %a) {
; CHECK-BE-LABEL: vextsw2dBE:
-; CHECK-BE: # BB#0: # %entry
+; CHECK-BE: # %bb.0: # %entry
; CHECK-BE-NEXT: vextsw2d 2, 2
; CHECK-BE-NEXT: blr
; CHECK-LE-LABEL: vextsw2dBE:
-; CHECK-LE: # BB#0: # %entry
+; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: vsldoi 2, 2, 2, 12
; CHECK-LE-NEXT: vextsw2d 2, 2
; CHECK-LE-NEXT: blr
@@ -233,11 +233,11 @@ entry:
define <2 x i64> @vextDiffVectors(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LE-LABEL: vextDiffVectors:
-; CHECK-LE: # BB#0: # %entry
+; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NOT: vextsw2d
; CHECK-BE-LABEL: vextDiffVectors:
-; CHECK-BE: # BB#0: # %entry
+; CHECK-BE: # %bb.0: # %entry
; CHECK-BE-NOT: vextsw2d
entry:
%vecext = extractelement <4 x i32> %a, i32 0
@@ -252,11 +252,11 @@ entry:
define <8 x i16> @testInvalidExtend(<16 x i8> %a) {
entry:
; CHECK-LE-LABEL: testInvalidExtend:
-; CHECK-LE: # BB#0: # %entry
+; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NOT: vexts
; CHECK-BE-LABEL: testInvalidExtend:
-; CHECK-BE: # BB#0: # %entry
+; CHECK-BE: # %bb.0: # %entry
; CHECK-BE-NOT: vexts
%vecext = extractelement <16 x i8> %a, i32 0
diff --git a/test/CodeGen/PowerPC/vec_revb.ll b/test/CodeGen/PowerPC/vec_revb.ll
index c09164bae13..00c08a1204f 100644
--- a/test/CodeGen/PowerPC/vec_revb.ll
+++ b/test/CodeGen/PowerPC/vec_revb.ll
@@ -3,7 +3,7 @@
define <8 x i16> @testXXBRH(<8 x i16> %a) {
; CHECK-LABEL: testXXBRH:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxbrh 34, 34
; CHECK-NEXT: blr
@@ -16,7 +16,7 @@ entry:
define <4 x i32> @testXXBRW(<4 x i32> %a) {
; CHECK-LABEL: testXXBRW:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxbrw 34, 34
; CHECK-NEXT: blr
@@ -29,7 +29,7 @@ entry:
define <2 x double> @testXXBRD(<2 x double> %a) {
; CHECK-LABEL: testXXBRD:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxbrd 34, 34
; CHECK-NEXT: blr
@@ -42,7 +42,7 @@ entry:
define <1 x i128> @testXXBRQ(<1 x i128> %a) {
; CHECK-LABEL: testXXBRQ:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxbrq 34, 34
; CHECK-NEXT: blr
diff --git a/test/CodeGen/PowerPC/vselect-constants.ll b/test/CodeGen/PowerPC/vselect-constants.ll
index 077eb2defc0..5f23c3e40de 100644
--- a/test/CodeGen/PowerPC/vselect-constants.ll
+++ b/test/CodeGen/PowerPC/vselect-constants.ll
@@ -9,7 +9,7 @@
define <4 x i32> @sel_C1_or_C2_vec(<4 x i1> %cond) {
; CHECK-LABEL: sel_C1_or_C2_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vspltisw 3, -16
; CHECK-NEXT: vspltisw 4, 15
; CHECK-NEXT: addis 3, 2, .LCPI0_0@toc@ha
@@ -29,7 +29,7 @@ define <4 x i32> @sel_C1_or_C2_vec(<4 x i1> %cond) {
define <4 x i32> @cmp_sel_C1_or_C2_vec(<4 x i32> %x, <4 x i32> %y) {
; CHECK-LABEL: cmp_sel_C1_or_C2_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcmpequw 2, 2, 3
; CHECK-NEXT: addis 3, 2, .LCPI1_0@toc@ha
; CHECK-NEXT: addis 4, 2, .LCPI1_1@toc@ha
@@ -46,7 +46,7 @@ define <4 x i32> @cmp_sel_C1_or_C2_vec(<4 x i32> %x, <4 x i32> %y) {
define <4 x i32> @sel_Cplus1_or_C_vec(<4 x i1> %cond) {
; CHECK-LABEL: sel_Cplus1_or_C_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vspltisw 3, 1
; CHECK-NEXT: addis 3, 2, .LCPI2_0@toc@ha
; CHECK-NEXT: addi 3, 3, .LCPI2_0@toc@l
@@ -60,7 +60,7 @@ define <4 x i32> @sel_Cplus1_or_C_vec(<4 x i1> %cond) {
define <4 x i32> @cmp_sel_Cplus1_or_C_vec(<4 x i32> %x, <4 x i32> %y) {
; CHECK-LABEL: cmp_sel_Cplus1_or_C_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcmpequw 2, 2, 3
; CHECK-NEXT: addis 3, 2, .LCPI3_0@toc@ha
; CHECK-NEXT: addi 3, 3, .LCPI3_0@toc@l
@@ -74,7 +74,7 @@ define <4 x i32> @cmp_sel_Cplus1_or_C_vec(<4 x i32> %x, <4 x i32> %y) {
define <4 x i32> @sel_Cminus1_or_C_vec(<4 x i1> %cond) {
; CHECK-LABEL: sel_Cminus1_or_C_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vspltisw 3, -16
; CHECK-NEXT: vspltisw 4, 15
; CHECK-NEXT: addis 3, 2, .LCPI4_0@toc@ha
@@ -91,7 +91,7 @@ define <4 x i32> @sel_Cminus1_or_C_vec(<4 x i1> %cond) {
define <4 x i32> @cmp_sel_Cminus1_or_C_vec(<4 x i32> %x, <4 x i32> %y) {
; CHECK-LABEL: cmp_sel_Cminus1_or_C_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcmpequw 2, 2, 3
; CHECK-NEXT: addis 3, 2, .LCPI5_0@toc@ha
; CHECK-NEXT: addi 3, 3, .LCPI5_0@toc@l
@@ -105,7 +105,7 @@ define <4 x i32> @cmp_sel_Cminus1_or_C_vec(<4 x i32> %x, <4 x i32> %y) {
define <4 x i32> @sel_minus1_or_0_vec(<4 x i1> %cond) {
; CHECK-LABEL: sel_minus1_or_0_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vspltisw 3, -16
; CHECK-NEXT: vspltisw 4, 15
; CHECK-NEXT: vsubuwm 3, 4, 3
@@ -118,7 +118,7 @@ define <4 x i32> @sel_minus1_or_0_vec(<4 x i1> %cond) {
define <4 x i32> @cmp_sel_minus1_or_0_vec(<4 x i32> %x, <4 x i32> %y) {
; CHECK-LABEL: cmp_sel_minus1_or_0_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcmpequw 2, 2, 3
; CHECK-NEXT: blr
%cond = icmp eq <4 x i32> %x, %y
@@ -128,7 +128,7 @@ define <4 x i32> @cmp_sel_minus1_or_0_vec(<4 x i32> %x, <4 x i32> %y) {
define <4 x i32> @sel_0_or_minus1_vec(<4 x i1> %cond) {
; CHECK-LABEL: sel_0_or_minus1_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vspltisw 3, 1
; CHECK-NEXT: vspltisb 4, -1
; CHECK-NEXT: xxland 34, 34, 35
@@ -140,7 +140,7 @@ define <4 x i32> @sel_0_or_minus1_vec(<4 x i1> %cond) {
define <4 x i32> @cmp_sel_0_or_minus1_vec(<4 x i32> %x, <4 x i32> %y) {
; CHECK-LABEL: cmp_sel_0_or_minus1_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcmpequw 2, 2, 3
; CHECK-NEXT: xxlnor 34, 34, 34
; CHECK-NEXT: blr
@@ -151,7 +151,7 @@ define <4 x i32> @cmp_sel_0_or_minus1_vec(<4 x i32> %x, <4 x i32> %y) {
define <4 x i32> @sel_1_or_0_vec(<4 x i1> %cond) {
; CHECK-LABEL: sel_1_or_0_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vspltisw 3, 1
; CHECK-NEXT: xxland 34, 34, 35
; CHECK-NEXT: blr
@@ -161,7 +161,7 @@ define <4 x i32> @sel_1_or_0_vec(<4 x i1> %cond) {
define <4 x i32> @cmp_sel_1_or_0_vec(<4 x i32> %x, <4 x i32> %y) {
; CHECK-LABEL: cmp_sel_1_or_0_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcmpequw 2, 2, 3
; CHECK-NEXT: vspltisw 19, 1
; CHECK-NEXT: xxland 34, 34, 51
@@ -173,7 +173,7 @@ define <4 x i32> @cmp_sel_1_or_0_vec(<4 x i32> %x, <4 x i32> %y) {
define <4 x i32> @sel_0_or_1_vec(<4 x i1> %cond) {
; CHECK-LABEL: sel_0_or_1_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vspltisw 3, 1
; CHECK-NEXT: xxlandc 34, 35, 34
; CHECK-NEXT: blr
@@ -183,7 +183,7 @@ define <4 x i32> @sel_0_or_1_vec(<4 x i1> %cond) {
define <4 x i32> @cmp_sel_0_or_1_vec(<4 x i32> %x, <4 x i32> %y) {
; CHECK-LABEL: cmp_sel_0_or_1_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcmpequw 2, 2, 3
; CHECK-NEXT: vspltisw 19, 1
; CHECK-NEXT: xxlnor 0, 34, 34
diff --git a/test/CodeGen/RISCV/addc-adde-sube-subc.ll b/test/CodeGen/RISCV/addc-adde-sube-subc.ll
index 50de47d7c1f..54f5482b9e7 100644
--- a/test/CodeGen/RISCV/addc-adde-sube-subc.ll
+++ b/test/CodeGen/RISCV/addc-adde-sube-subc.ll
@@ -6,7 +6,7 @@
define i64 @addc_adde(i64 %a, i64 %b) {
; RV32I-LABEL: addc_adde:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: add a1, a1, a3
; RV32I-NEXT: add a2, a0, a2
; RV32I-NEXT: sltu a0, a2, a0
@@ -19,7 +19,7 @@ define i64 @addc_adde(i64 %a, i64 %b) {
define i64 @subc_sube(i64 %a, i64 %b) {
; RV32I-LABEL: subc_sube:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sub a1, a1, a3
; RV32I-NEXT: sltu a3, a0, a2
; RV32I-NEXT: sub a1, a1, a3
diff --git a/test/CodeGen/RISCV/alu32.ll b/test/CodeGen/RISCV/alu32.ll
index 9aa6058c2a0..e7c82181027 100644
--- a/test/CodeGen/RISCV/alu32.ll
+++ b/test/CodeGen/RISCV/alu32.ll
@@ -10,7 +10,7 @@
define i32 @addi(i32 %a) nounwind {
; RV32I-LABEL: addi:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: addi a0, a0, 1
; RV32I-NEXT: jalr zero, ra, 0
%1 = add i32 %a, 1
@@ -19,7 +19,7 @@ define i32 @addi(i32 %a) nounwind {
define i32 @slti(i32 %a) nounwind {
; RV32I-LABEL: slti:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: slti a0, a0, 2
; RV32I-NEXT: jalr zero, ra, 0
%1 = icmp slt i32 %a, 2
@@ -29,7 +29,7 @@ define i32 @slti(i32 %a) nounwind {
define i32 @sltiu(i32 %a) nounwind {
; RV32I-LABEL: sltiu:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sltiu a0, a0, 3
; RV32I-NEXT: jalr zero, ra, 0
%1 = icmp ult i32 %a, 3
@@ -39,7 +39,7 @@ define i32 @sltiu(i32 %a) nounwind {
define i32 @xori(i32 %a) nounwind {
; RV32I-LABEL: xori:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: xori a0, a0, 4
; RV32I-NEXT: jalr zero, ra, 0
%1 = xor i32 %a, 4
@@ -48,7 +48,7 @@ define i32 @xori(i32 %a) nounwind {
define i32 @ori(i32 %a) nounwind {
; RV32I-LABEL: ori:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: ori a0, a0, 5
; RV32I-NEXT: jalr zero, ra, 0
%1 = or i32 %a, 5
@@ -57,7 +57,7 @@ define i32 @ori(i32 %a) nounwind {
define i32 @andi(i32 %a) nounwind {
; RV32I-LABEL: andi:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: andi a0, a0, 6
; RV32I-NEXT: jalr zero, ra, 0
%1 = and i32 %a, 6
@@ -66,7 +66,7 @@ define i32 @andi(i32 %a) nounwind {
define i32 @slli(i32 %a) nounwind {
; RV32I-LABEL: slli:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: slli a0, a0, 7
; RV32I-NEXT: jalr zero, ra, 0
%1 = shl i32 %a, 7
@@ -75,7 +75,7 @@ define i32 @slli(i32 %a) nounwind {
define i32 @srli(i32 %a) nounwind {
; RV32I-LABEL: srli:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: srli a0, a0, 8
; RV32I-NEXT: jalr zero, ra, 0
%1 = lshr i32 %a, 8
@@ -84,7 +84,7 @@ define i32 @srli(i32 %a) nounwind {
define i32 @srai(i32 %a) nounwind {
; RV32I-LABEL: srai:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: srai a0, a0, 9
; RV32I-NEXT: jalr zero, ra, 0
%1 = ashr i32 %a, 9
@@ -95,7 +95,7 @@ define i32 @srai(i32 %a) nounwind {
define i32 @add(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: add:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: jalr zero, ra, 0
%1 = add i32 %a, %b
@@ -104,7 +104,7 @@ define i32 @add(i32 %a, i32 %b) nounwind {
define i32 @sub(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: sub:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: jalr zero, ra, 0
%1 = sub i32 %a, %b
@@ -113,7 +113,7 @@ define i32 @sub(i32 %a, i32 %b) nounwind {
define i32 @sll(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: sll:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sll a0, a0, a1
; RV32I-NEXT: jalr zero, ra, 0
%1 = shl i32 %a, %b
@@ -122,7 +122,7 @@ define i32 @sll(i32 %a, i32 %b) nounwind {
define i32 @slt(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: slt:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: slt a0, a0, a1
; RV32I-NEXT: jalr zero, ra, 0
%1 = icmp slt i32 %a, %b
@@ -132,7 +132,7 @@ define i32 @slt(i32 %a, i32 %b) nounwind {
define i32 @sltu(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: sltu:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sltu a0, a0, a1
; RV32I-NEXT: jalr zero, ra, 0
%1 = icmp ult i32 %a, %b
@@ -142,7 +142,7 @@ define i32 @sltu(i32 %a, i32 %b) nounwind {
define i32 @xor(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: xor:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: xor a0, a0, a1
; RV32I-NEXT: jalr zero, ra, 0
%1 = xor i32 %a, %b
@@ -151,7 +151,7 @@ define i32 @xor(i32 %a, i32 %b) nounwind {
define i32 @srl(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: srl:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: srl a0, a0, a1
; RV32I-NEXT: jalr zero, ra, 0
%1 = lshr i32 %a, %b
@@ -160,7 +160,7 @@ define i32 @srl(i32 %a, i32 %b) nounwind {
define i32 @sra(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: sra:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sra a0, a0, a1
; RV32I-NEXT: jalr zero, ra, 0
%1 = ashr i32 %a, %b
@@ -169,7 +169,7 @@ define i32 @sra(i32 %a, i32 %b) nounwind {
define i32 @or(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: or:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: jalr zero, ra, 0
%1 = or i32 %a, %b
@@ -178,7 +178,7 @@ define i32 @or(i32 %a, i32 %b) nounwind {
define i32 @and(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: and:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: and a0, a0, a1
; RV32I-NEXT: jalr zero, ra, 0
%1 = and i32 %a, %b
diff --git a/test/CodeGen/RISCV/bare-select.ll b/test/CodeGen/RISCV/bare-select.ll
index ec98b6d18b2..a46afe27143 100644
--- a/test/CodeGen/RISCV/bare-select.ll
+++ b/test/CodeGen/RISCV/bare-select.ll
@@ -4,10 +4,10 @@
define i32 @bare_select(i1 %a, i32 %b, i32 %c) {
; RV32I-LABEL: bare_select:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: andi a0, a0, 1
; RV32I-NEXT: bne a0, zero, .LBB0_2
-; RV32I-NEXT: # BB#1:
+; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: addi a1, a2, 0
; RV32I-NEXT: .LBB0_2:
; RV32I-NEXT: addi a0, a1, 0
diff --git a/test/CodeGen/RISCV/blockaddress.ll b/test/CodeGen/RISCV/blockaddress.ll
index f51598ff5a7..9eb4e3d404d 100644
--- a/test/CodeGen/RISCV/blockaddress.ll
+++ b/test/CodeGen/RISCV/blockaddress.ll
@@ -6,7 +6,7 @@
define void @test_blockaddress() nounwind {
; RV32I-LABEL: test_blockaddress:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sw ra, 0(s0)
; RV32I-NEXT: lui a0, %hi(addr)
; RV32I-NEXT: addi a0, a0, %lo(addr)
diff --git a/test/CodeGen/RISCV/branch.ll b/test/CodeGen/RISCV/branch.ll
index 194083b07c7..e2593d3309b 100644
--- a/test/CodeGen/RISCV/branch.ll
+++ b/test/CodeGen/RISCV/branch.ll
@@ -4,7 +4,7 @@
define void @foo(i32 %a, i32 *%b, i1 %c) {
; RV32I-LABEL: foo:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: lw a3, 0(a1)
; RV32I-NEXT: beq a3, a0, .LBB0_12
; RV32I-NEXT: jal zero, .LBB0_1
diff --git a/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll b/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll
index 6521f66cf6a..150dfed3573 100644
--- a/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll
+++ b/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll
@@ -14,7 +14,7 @@ declare i32 @llvm.ctpop.i32(i32)
define i16 @test_bswap_i16(i16 %a) nounwind {
; RV32I-LABEL: test_bswap_i16:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: lui a1, 4080
; RV32I-NEXT: addi a1, a1, 0
; RV32I-NEXT: slli a2, a0, 8
@@ -29,7 +29,7 @@ define i16 @test_bswap_i16(i16 %a) nounwind {
define i32 @test_bswap_i32(i32 %a) nounwind {
; RV32I-LABEL: test_bswap_i32:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: lui a1, 16
; RV32I-NEXT: addi a1, a1, -256
; RV32I-NEXT: srli a2, a0, 8
@@ -50,7 +50,7 @@ define i32 @test_bswap_i32(i32 %a) nounwind {
define i64 @test_bswap_i64(i64 %a) nounwind {
; RV32I-LABEL: test_bswap_i64:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: lui a2, 16
; RV32I-NEXT: addi a3, a2, -256
; RV32I-NEXT: srli a2, a1, 8
@@ -81,7 +81,7 @@ define i64 @test_bswap_i64(i64 %a) nounwind {
define i8 @test_cttz_i8(i8 %a) nounwind {
; RV32I-LABEL: test_cttz_i8:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sw ra, 12(s0)
; RV32I-NEXT: addi a1, a0, 0
; RV32I-NEXT: addi a0, zero, 8
@@ -123,7 +123,7 @@ define i8 @test_cttz_i8(i8 %a) nounwind {
define i16 @test_cttz_i16(i16 %a) nounwind {
; RV32I-LABEL: test_cttz_i16:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sw ra, 12(s0)
; RV32I-NEXT: addi a1, a0, 0
; RV32I-NEXT: addi a0, zero, 16
@@ -167,7 +167,7 @@ define i16 @test_cttz_i16(i16 %a) nounwind {
define i32 @test_cttz_i32(i32 %a) nounwind {
; RV32I-LABEL: test_cttz_i32:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sw ra, 12(s0)
; RV32I-NEXT: addi a1, a0, 0
; RV32I-NEXT: addi a0, zero, 32
@@ -208,7 +208,7 @@ define i32 @test_cttz_i32(i32 %a) nounwind {
define i32 @test_ctlz_i32(i32 %a) nounwind {
; RV32I-LABEL: test_ctlz_i32:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sw ra, 12(s0)
; RV32I-NEXT: addi a1, a0, 0
; RV32I-NEXT: addi a0, zero, 32
@@ -257,7 +257,7 @@ define i32 @test_ctlz_i32(i32 %a) nounwind {
define i64 @test_cttz_i64(i64 %a) nounwind {
; RV32I-LABEL: test_cttz_i64:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sw ra, 28(s0)
; RV32I-NEXT: sw s1, 24(s0)
; RV32I-NEXT: sw s2, 20(s0)
@@ -311,7 +311,7 @@ define i64 @test_cttz_i64(i64 %a) nounwind {
; RV32I-NEXT: addi a1, s3, 0
; RV32I-NEXT: jalr ra, s6, 0
; RV32I-NEXT: bne s2, zero, .LBB7_2
-; RV32I-NEXT: # BB#1:
+; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: srli a0, a0, 24
; RV32I-NEXT: addi s1, a0, 32
; RV32I-NEXT: .LBB7_2:
@@ -332,7 +332,7 @@ define i64 @test_cttz_i64(i64 %a) nounwind {
define i8 @test_cttz_i8_zero_undef(i8 %a) nounwind {
; RV32I-LABEL: test_cttz_i8_zero_undef:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sw ra, 12(s0)
; RV32I-NEXT: addi a1, a0, -1
; RV32I-NEXT: xori a0, a0, -1
@@ -367,7 +367,7 @@ define i8 @test_cttz_i8_zero_undef(i8 %a) nounwind {
define i16 @test_cttz_i16_zero_undef(i16 %a) nounwind {
; RV32I-LABEL: test_cttz_i16_zero_undef:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sw ra, 12(s0)
; RV32I-NEXT: addi a1, a0, -1
; RV32I-NEXT: xori a0, a0, -1
@@ -402,7 +402,7 @@ define i16 @test_cttz_i16_zero_undef(i16 %a) nounwind {
define i32 @test_cttz_i32_zero_undef(i32 %a) nounwind {
; RV32I-LABEL: test_cttz_i32_zero_undef:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sw ra, 12(s0)
; RV32I-NEXT: addi a1, a0, -1
; RV32I-NEXT: xori a0, a0, -1
@@ -437,7 +437,7 @@ define i32 @test_cttz_i32_zero_undef(i32 %a) nounwind {
define i64 @test_cttz_i64_zero_undef(i64 %a) nounwind {
; RV32I-LABEL: test_cttz_i64_zero_undef:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sw ra, 28(s0)
; RV32I-NEXT: sw s1, 24(s0)
; RV32I-NEXT: sw s2, 20(s0)
@@ -491,7 +491,7 @@ define i64 @test_cttz_i64_zero_undef(i64 %a) nounwind {
; RV32I-NEXT: addi a1, s3, 0
; RV32I-NEXT: jalr ra, s6, 0
; RV32I-NEXT: bne s2, zero, .LBB11_2
-; RV32I-NEXT: # BB#1:
+; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: srli a0, a0, 24
; RV32I-NEXT: addi s1, a0, 32
; RV32I-NEXT: .LBB11_2:
@@ -512,7 +512,7 @@ define i64 @test_cttz_i64_zero_undef(i64 %a) nounwind {
define i32 @test_ctpop_i32(i32 %a) nounwind {
; RV32I-LABEL: test_ctpop_i32:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sw ra, 12(s0)
; RV32I-NEXT: lui a1, 349525
; RV32I-NEXT: addi a1, a1, 1365
diff --git a/test/CodeGen/RISCV/calls.ll b/test/CodeGen/RISCV/calls.ll
index 8abe5e92a8e..77f61290705 100644
--- a/test/CodeGen/RISCV/calls.ll
+++ b/test/CodeGen/RISCV/calls.ll
@@ -6,7 +6,7 @@ declare i32 @external_function(i32)
define i32 @test_call_external(i32 %a) nounwind {
; RV32I-LABEL: test_call_external:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sw ra, 12(s0)
; RV32I-NEXT: lui a1, %hi(external_function)
; RV32I-NEXT: addi a1, a1, %lo(external_function)
@@ -19,7 +19,7 @@ define i32 @test_call_external(i32 %a) nounwind {
define i32 @defined_function(i32 %a) nounwind {
; RV32I-LABEL: defined_function:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: addi a0, a0, 1
; RV32I-NEXT: jalr zero, ra, 0
%1 = add i32 %a, 1
@@ -28,7 +28,7 @@ define i32 @defined_function(i32 %a) nounwind {
define i32 @test_call_defined(i32 %a) nounwind {
; RV32I-LABEL: test_call_defined:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sw ra, 12(s0)
; RV32I-NEXT: lui a1, %hi(defined_function)
; RV32I-NEXT: addi a1, a1, %lo(defined_function)
@@ -41,7 +41,7 @@ define i32 @test_call_defined(i32 %a) nounwind {
define i32 @test_call_indirect(i32 (i32)* %a, i32 %b) nounwind {
; RV32I-LABEL: test_call_indirect:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sw ra, 12(s0)
; RV32I-NEXT: addi a2, a0, 0
; RV32I-NEXT: addi a0, a1, 0
@@ -57,7 +57,7 @@ define i32 @test_call_indirect(i32 (i32)* %a, i32 %b) nounwind {
define fastcc i32 @fastcc_function(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: fastcc_function:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: jalr zero, ra, 0
%1 = add i32 %a, %b
@@ -66,7 +66,7 @@ define fastcc i32 @fastcc_function(i32 %a, i32 %b) nounwind {
define i32 @test_call_fastcc(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: test_call_fastcc:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sw ra, 12(s0)
; RV32I-NEXT: sw s1, 8(s0)
; RV32I-NEXT: addi s1, a0, 0
diff --git a/test/CodeGen/RISCV/div.ll b/test/CodeGen/RISCV/div.ll
index 4c0f5de0358..a53c51c94d8 100644
--- a/test/CodeGen/RISCV/div.ll
+++ b/test/CodeGen/RISCV/div.ll
@@ -4,7 +4,7 @@
define i32 @udiv(i32 %a, i32 %b) {
; RV32I-LABEL: udiv:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sw ra, 12(s0)
; RV32I-NEXT: lui a2, %hi(__udivsi3)
; RV32I-NEXT: addi a2, a2, %lo(__udivsi3)
@@ -17,7 +17,7 @@ define i32 @udiv(i32 %a, i32 %b) {
define i32 @udiv_constant(i32 %a) {
; RV32I-LABEL: udiv_constant:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sw ra, 12(s0)
; RV32I-NEXT: lui a1, %hi(__udivsi3)
; RV32I-NEXT: addi a2, a1, %lo(__udivsi3)
@@ -31,7 +31,7 @@ define i32 @udiv_constant(i32 %a) {
define i32 @udiv_pow2(i32 %a) {
; RV32I-LABEL: udiv_pow2:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: srli a0, a0, 3
; RV32I-NEXT: jalr zero, ra, 0
%1 = udiv i32 %a, 8
@@ -40,7 +40,7 @@ define i32 @udiv_pow2(i32 %a) {
define i64 @udiv64(i64 %a, i64 %b) {
; RV32I-LABEL: udiv64:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sw ra, 12(s0)
; RV32I-NEXT: lui a4, %hi(__udivdi3)
; RV32I-NEXT: addi a4, a4, %lo(__udivdi3)
@@ -53,7 +53,7 @@ define i64 @udiv64(i64 %a, i64 %b) {
define i64 @udiv64_constant(i64 %a) {
; RV32I-LABEL: udiv64_constant:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sw ra, 12(s0)
; RV32I-NEXT: lui a2, %hi(__udivdi3)
; RV32I-NEXT: addi a4, a2, %lo(__udivdi3)
@@ -68,7 +68,7 @@ define i64 @udiv64_constant(i64 %a) {
define i32 @sdiv(i32 %a, i32 %b) {
; RV32I-LABEL: sdiv:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sw ra, 12(s0)
; RV32I-NEXT: lui a2, %hi(__divsi3)
; RV32I-NEXT: addi a2, a2, %lo(__divsi3)
@@ -81,7 +81,7 @@ define i32 @sdiv(i32 %a, i32 %b) {
define i32 @sdiv_constant(i32 %a) {
; RV32I-LABEL: sdiv_constant:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sw ra, 12(s0)
; RV32I-NEXT: lui a1, %hi(__divsi3)
; RV32I-NEXT: addi a2, a1, %lo(__divsi3)
@@ -95,7 +95,7 @@ define i32 @sdiv_constant(i32 %a) {
define i32 @sdiv_pow2(i32 %a) {
; RV32I-LABEL: sdiv_pow2:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: srai a1, a0, 31
; RV32I-NEXT: srli a1, a1, 29
; RV32I-NEXT: add a0, a0, a1
@@ -107,7 +107,7 @@ define i32 @sdiv_pow2(i32 %a) {
define i64 @sdiv64(i64 %a, i64 %b) {
; RV32I-LABEL: sdiv64:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sw ra, 12(s0)
; RV32I-NEXT: lui a4, %hi(__divdi3)
; RV32I-NEXT: addi a4, a4, %lo(__divdi3)
@@ -120,7 +120,7 @@ define i64 @sdiv64(i64 %a, i64 %b) {
define i64 @sdiv64_constant(i64 %a) {
; RV32I-LABEL: sdiv64_constant:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sw ra, 12(s0)
; RV32I-NEXT: lui a2, %hi(__divdi3)
; RV32I-NEXT: addi a4, a2, %lo(__divdi3)
diff --git a/test/CodeGen/RISCV/i32-icmp.ll b/test/CodeGen/RISCV/i32-icmp.ll
index 4d86ced2584..bc06ec805e9 100644
--- a/test/CodeGen/RISCV/i32-icmp.ll
+++ b/test/CodeGen/RISCV/i32-icmp.ll
@@ -7,7 +7,7 @@
define i32 @icmp_eq(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: icmp_eq:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: xor a0, a0, a1
; RV32I-NEXT: sltiu a0, a0, 1
; RV32I-NEXT: jalr zero, ra, 0
@@ -18,7 +18,7 @@ define i32 @icmp_eq(i32 %a, i32 %b) nounwind {
define i32 @icmp_ne(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: icmp_ne:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: xor a0, a0, a1
; RV32I-NEXT: sltu a0, zero, a0
; RV32I-NEXT: jalr zero, ra, 0
@@ -29,7 +29,7 @@ define i32 @icmp_ne(i32 %a, i32 %b) nounwind {
define i32 @icmp_ugt(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: icmp_ugt:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sltu a0, a1, a0
; RV32I-NEXT: jalr zero, ra, 0
%1 = icmp ugt i32 %a, %b
@@ -39,7 +39,7 @@ define i32 @icmp_ugt(i32 %a, i32 %b) nounwind {
define i32 @icmp_uge(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: icmp_uge:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sltu a0, a0, a1
; RV32I-NEXT: xori a0, a0, 1
; RV32I-NEXT: jalr zero, ra, 0
@@ -50,7 +50,7 @@ define i32 @icmp_uge(i32 %a, i32 %b) nounwind {
define i32 @icmp_ult(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: icmp_ult:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sltu a0, a0, a1
; RV32I-NEXT: jalr zero, ra, 0
%1 = icmp ult i32 %a, %b
@@ -60,7 +60,7 @@ define i32 @icmp_ult(i32 %a, i32 %b) nounwind {
define i32 @icmp_ule(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: icmp_ule:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sltu a0, a1, a0
; RV32I-NEXT: xori a0, a0, 1
; RV32I-NEXT: jalr zero, ra, 0
@@ -71,7 +71,7 @@ define i32 @icmp_ule(i32 %a, i32 %b) nounwind {
define i32 @icmp_sgt(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: icmp_sgt:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: slt a0, a1, a0
; RV32I-NEXT: jalr zero, ra, 0
%1 = icmp sgt i32 %a, %b
@@ -81,7 +81,7 @@ define i32 @icmp_sgt(i32 %a, i32 %b) nounwind {
define i32 @icmp_sge(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: icmp_sge:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: slt a0, a0, a1
; RV32I-NEXT: xori a0, a0, 1
; RV32I-NEXT: jalr zero, ra, 0
@@ -92,7 +92,7 @@ define i32 @icmp_sge(i32 %a, i32 %b) nounwind {
define i32 @icmp_slt(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: icmp_slt:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: slt a0, a0, a1
; RV32I-NEXT: jalr zero, ra, 0
%1 = icmp slt i32 %a, %b
@@ -102,7 +102,7 @@ define i32 @icmp_slt(i32 %a, i32 %b) nounwind {
define i32 @icmp_sle(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: icmp_sle:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: slt a0, a1, a0
; RV32I-NEXT: xori a0, a0, 1
; RV32I-NEXT: jalr zero, ra, 0
diff --git a/test/CodeGen/RISCV/imm.ll b/test/CodeGen/RISCV/imm.ll
index c52638da02e..ddefa22835a 100644
--- a/test/CodeGen/RISCV/imm.ll
+++ b/test/CodeGen/RISCV/imm.ll
@@ -6,7 +6,7 @@
define i32 @zero() nounwind {
; RV32I-LABEL: zero:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: addi a0, zero, 0
; RV32I-NEXT: jalr zero, ra, 0
ret i32 0
@@ -14,7 +14,7 @@ define i32 @zero() nounwind {
define i32 @pos_small() nounwind {
; RV32I-LABEL: pos_small:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: addi a0, zero, 2047
; RV32I-NEXT: jalr zero, ra, 0
ret i32 2047
@@ -22,7 +22,7 @@ define i32 @pos_small() nounwind {
define i32 @neg_small() nounwind {
; RV32I-LABEL: neg_small:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: addi a0, zero, -2048
; RV32I-NEXT: jalr zero, ra, 0
ret i32 -2048
@@ -30,7 +30,7 @@ define i32 @neg_small() nounwind {
define i32 @pos_i32() nounwind {
; RV32I-LABEL: pos_i32:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: lui a0, 423811
; RV32I-NEXT: addi a0, a0, -1297
; RV32I-NEXT: jalr zero, ra, 0
@@ -39,7 +39,7 @@ define i32 @pos_i32() nounwind {
define i32 @neg_i32() nounwind {
; RV32I-LABEL: neg_i32:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: lui a0, 912092
; RV32I-NEXT: addi a0, a0, -273
; RV32I-NEXT: jalr zero, ra, 0
diff --git a/test/CodeGen/RISCV/indirectbr.ll b/test/CodeGen/RISCV/indirectbr.ll
index 0a51e3d0b2e..40641da6d6f 100644
--- a/test/CodeGen/RISCV/indirectbr.ll
+++ b/test/CodeGen/RISCV/indirectbr.ll
@@ -4,7 +4,7 @@
define i32 @indirectbr(i8* %target) nounwind {
; RV32I-LABEL: indirectbr:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sw ra, 0(s0)
; RV32I-NEXT: jalr zero, a0, 0
; RV32I-NEXT: .LBB0_1: # %ret
@@ -20,7 +20,7 @@ ret:
define i32 @indirectbr_with_offset(i8* %a) nounwind {
; RV32I-LABEL: indirectbr_with_offset:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sw ra, 0(s0)
; RV32I-NEXT: jalr zero, a0, 1380
; RV32I-NEXT: .LBB1_1: # %ret
diff --git a/test/CodeGen/RISCV/jumptable.ll b/test/CodeGen/RISCV/jumptable.ll
index 98144c7c1e6..68f4f1cb721 100644
--- a/test/CodeGen/RISCV/jumptable.ll
+++ b/test/CodeGen/RISCV/jumptable.ll
@@ -4,7 +4,7 @@
define void @jt(i32 %in, i32* %out) {
; RV32I-LABEL: jt:
-; RV32I: # BB#0: # %entry
+; RV32I: # %bb.0: # %entry
; RV32I-NEXT: addi a2, zero, 2
; RV32I-NEXT: blt a2, a0, .LBB0_3
; RV32I-NEXT: jal zero, .LBB0_1
diff --git a/test/CodeGen/RISCV/mem.ll b/test/CodeGen/RISCV/mem.ll
index b06382f8742..6446034e542 100644
--- a/test/CodeGen/RISCV/mem.ll
+++ b/test/CodeGen/RISCV/mem.ll
@@ -6,7 +6,7 @@
define i32 @lb(i8 *%a) nounwind {
; RV32I-LABEL: lb:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: lb a1, 0(a0)
; RV32I-NEXT: lb a0, 1(a0)
; RV32I-NEXT: jalr zero, ra, 0
@@ -20,7 +20,7 @@ define i32 @lb(i8 *%a) nounwind {
define i32 @lh(i16 *%a) nounwind {
; RV32I-LABEL: lh:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: lh a1, 0(a0)
; RV32I-NEXT: lh a0, 4(a0)
; RV32I-NEXT: jalr zero, ra, 0
@@ -34,7 +34,7 @@ define i32 @lh(i16 *%a) nounwind {
define i32 @lw(i32 *%a) nounwind {
; RV32I-LABEL: lw:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: lw a1, 0(a0)
; RV32I-NEXT: lw a0, 12(a0)
; RV32I-NEXT: jalr zero, ra, 0
@@ -46,7 +46,7 @@ define i32 @lw(i32 *%a) nounwind {
define i32 @lbu(i8 *%a) nounwind {
; RV32I-LABEL: lbu:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: lbu a1, 0(a0)
; RV32I-NEXT: lbu a0, 4(a0)
; RV32I-NEXT: add a0, a0, a1
@@ -62,7 +62,7 @@ define i32 @lbu(i8 *%a) nounwind {
define i32 @lhu(i16 *%a) nounwind {
; RV32I-LABEL: lhu:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: lhu a1, 0(a0)
; RV32I-NEXT: lhu a0, 10(a0)
; RV32I-NEXT: add a0, a0, a1
@@ -80,7 +80,7 @@ define i32 @lhu(i16 *%a) nounwind {
define void @sb(i8 *%a, i8 %b) nounwind {
; RV32I-LABEL: sb:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sb a1, 6(a0)
; RV32I-NEXT: sb a1, 0(a0)
; RV32I-NEXT: jalr zero, ra, 0
@@ -92,7 +92,7 @@ define void @sb(i8 *%a, i8 %b) nounwind {
define void @sh(i16 *%a, i16 %b) nounwind {
; RV32I-LABEL: sh:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sh a1, 14(a0)
; RV32I-NEXT: sh a1, 0(a0)
; RV32I-NEXT: jalr zero, ra, 0
@@ -104,7 +104,7 @@ define void @sh(i16 *%a, i16 %b) nounwind {
define void @sw(i32 *%a, i32 %b) nounwind {
; RV32I-LABEL: sw:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sw a1, 32(a0)
; RV32I-NEXT: sw a1, 0(a0)
; RV32I-NEXT: jalr zero, ra, 0
@@ -117,7 +117,7 @@ define void @sw(i32 *%a, i32 %b) nounwind {
; Check load and store to an i1 location
define i32 @load_sext_zext_anyext_i1(i1 *%a) nounwind {
; RV32I-LABEL: load_sext_zext_anyext_i1:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: lb a1, 0(a0)
; RV32I-NEXT: lbu a1, 1(a0)
; RV32I-NEXT: lbu a0, 2(a0)
@@ -139,7 +139,7 @@ define i32 @load_sext_zext_anyext_i1(i1 *%a) nounwind {
define i16 @load_sext_zext_anyext_i1_i16(i1 *%a) nounwind {
; RV32I-LABEL: load_sext_zext_anyext_i1_i16:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: lb a1, 0(a0)
; RV32I-NEXT: lbu a1, 1(a0)
; RV32I-NEXT: lbu a0, 2(a0)
@@ -165,7 +165,7 @@ define i16 @load_sext_zext_anyext_i1_i16(i1 *%a) nounwind {
define i32 @lw_sw_global(i32 %a) nounwind {
; TODO: the addi should be folded in to the lw/sw operations
; RV32I-LABEL: lw_sw_global:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: lui a1, %hi(G)
; RV32I-NEXT: addi a2, a1, %lo(G)
; RV32I-NEXT: lw a1, 0(a2)
@@ -188,7 +188,7 @@ define i32 @lw_sw_global(i32 %a) nounwind {
define i32 @lw_sw_constant(i32 %a) nounwind {
; TODO: the addi should be folded in to the lw/sw
; RV32I-LABEL: lw_sw_constant:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: lui a1, 912092
; RV32I-NEXT: addi a2, a1, -273
; RV32I-NEXT: lw a1, 0(a2)
diff --git a/test/CodeGen/RISCV/mul.ll b/test/CodeGen/RISCV/mul.ll
index 41653256deb..2eb5db79d1b 100644
--- a/test/CodeGen/RISCV/mul.ll
+++ b/test/CodeGen/RISCV/mul.ll
@@ -4,7 +4,7 @@
define i32 @square(i32 %a) {
; RV32I-LABEL: square:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sw ra, 12(s0)
; RV32I-NEXT: lui a1, %hi(__mulsi3)
; RV32I-NEXT: addi a2, a1, %lo(__mulsi3)
@@ -18,7 +18,7 @@ define i32 @square(i32 %a) {
define i32 @mul(i32 %a, i32 %b) {
; RV32I-LABEL: mul:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sw ra, 12(s0)
; RV32I-NEXT: lui a2, %hi(__mulsi3)
; RV32I-NEXT: addi a2, a2, %lo(__mulsi3)
@@ -31,7 +31,7 @@ define i32 @mul(i32 %a, i32 %b) {
define i32 @mul_constant(i32 %a) {
; RV32I-LABEL: mul_constant:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sw ra, 12(s0)
; RV32I-NEXT: lui a1, %hi(__mulsi3)
; RV32I-NEXT: addi a2, a1, %lo(__mulsi3)
@@ -45,7 +45,7 @@ define i32 @mul_constant(i32 %a) {
define i32 @mul_pow2(i32 %a) {
; RV32I-LABEL: mul_pow2:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: slli a0, a0, 3
; RV32I-NEXT: jalr zero, ra, 0
%1 = mul i32 %a, 8
@@ -54,7 +54,7 @@ define i32 @mul_pow2(i32 %a) {
define i64 @mul64(i64 %a, i64 %b) {
; RV32I-LABEL: mul64:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sw ra, 12(s0)
; RV32I-NEXT: lui a4, %hi(__muldi3)
; RV32I-NEXT: addi a4, a4, %lo(__muldi3)
@@ -67,7 +67,7 @@ define i64 @mul64(i64 %a, i64 %b) {
define i64 @mul64_constant(i64 %a) {
; RV32I-LABEL: mul64_constant:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sw ra, 12(s0)
; RV32I-NEXT: lui a2, %hi(__muldi3)
; RV32I-NEXT: addi a4, a2, %lo(__muldi3)
diff --git a/test/CodeGen/RISCV/rem.ll b/test/CodeGen/RISCV/rem.ll
index 80f79817b74..c9e2a90521d 100644
--- a/test/CodeGen/RISCV/rem.ll
+++ b/test/CodeGen/RISCV/rem.ll
@@ -4,7 +4,7 @@
define i32 @urem(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: urem:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sw ra, 12(s0)
; RV32I-NEXT: lui a2, %hi(__umodsi3)
; RV32I-NEXT: addi a2, a2, %lo(__umodsi3)
@@ -17,7 +17,7 @@ define i32 @urem(i32 %a, i32 %b) nounwind {
define i32 @srem(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: srem:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sw ra, 12(s0)
; RV32I-NEXT: lui a2, %hi(__modsi3)
; RV32I-NEXT: addi a2, a2, %lo(__modsi3)
diff --git a/test/CodeGen/RISCV/rotl-rotr.ll b/test/CodeGen/RISCV/rotl-rotr.ll
index bf0689feafa..b2331051fcd 100644
--- a/test/CodeGen/RISCV/rotl-rotr.ll
+++ b/test/CodeGen/RISCV/rotl-rotr.ll
@@ -7,7 +7,7 @@
define i32 @rotl(i32 %x, i32 %y) {
; RV32I-LABEL: rotl:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: addi a2, zero, 32
; RV32I-NEXT: sub a2, a2, a1
; RV32I-NEXT: sll a1, a0, a1
@@ -23,7 +23,7 @@ define i32 @rotl(i32 %x, i32 %y) {
define i32 @rotr(i32 %x, i32 %y) {
; RV32I-LABEL: rotr:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: addi a2, zero, 32
; RV32I-NEXT: sub a2, a2, a1
; RV32I-NEXT: srl a1, a0, a1
diff --git a/test/CodeGen/RISCV/select-cc.ll b/test/CodeGen/RISCV/select-cc.ll
index c1a570c5c98..ddc5983525e 100644
--- a/test/CodeGen/RISCV/select-cc.ll
+++ b/test/CodeGen/RISCV/select-cc.ll
@@ -4,55 +4,55 @@
define i32 @foo(i32 %a, i32 *%b) {
; RV32I-LABEL: foo:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: lw a2, 0(a1)
; RV32I-NEXT: beq a0, a2, .LBB0_2
-; RV32I-NEXT: # BB#1:
+; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: addi a0, a2, 0
; RV32I-NEXT: .LBB0_2:
; RV32I-NEXT: lw a2, 0(a1)
; RV32I-NEXT: bne a0, a2, .LBB0_4
-; RV32I-NEXT: # BB#3:
+; RV32I-NEXT: # %bb.3:
; RV32I-NEXT: addi a0, a2, 0
; RV32I-NEXT: .LBB0_4:
; RV32I-NEXT: lw a2, 0(a1)
; RV32I-NEXT: bltu a2, a0, .LBB0_6
-; RV32I-NEXT: # BB#5:
+; RV32I-NEXT: # %bb.5:
; RV32I-NEXT: addi a0, a2, 0
; RV32I-NEXT: .LBB0_6:
; RV32I-NEXT: lw a2, 0(a1)
; RV32I-NEXT: bgeu a0, a2, .LBB0_8
-; RV32I-NEXT: # BB#7:
+; RV32I-NEXT: # %bb.7:
; RV32I-NEXT: addi a0, a2, 0
; RV32I-NEXT: .LBB0_8:
; RV32I-NEXT: lw a2, 0(a1)
; RV32I-NEXT: bltu a0, a2, .LBB0_10
-; RV32I-NEXT: # BB#9:
+; RV32I-NEXT: # %bb.9:
; RV32I-NEXT: addi a0, a2, 0
; RV32I-NEXT: .LBB0_10:
; RV32I-NEXT: lw a2, 0(a1)
; RV32I-NEXT: bgeu a2, a0, .LBB0_12
-; RV32I-NEXT: # BB#11:
+; RV32I-NEXT: # %bb.11:
; RV32I-NEXT: addi a0, a2, 0
; RV32I-NEXT: .LBB0_12:
; RV32I-NEXT: lw a2, 0(a1)
; RV32I-NEXT: blt a2, a0, .LBB0_14
-; RV32I-NEXT: # BB#13:
+; RV32I-NEXT: # %bb.13:
; RV32I-NEXT: addi a0, a2, 0
; RV32I-NEXT: .LBB0_14:
; RV32I-NEXT: lw a2, 0(a1)
; RV32I-NEXT: bge a0, a2, .LBB0_16
-; RV32I-NEXT: # BB#15:
+; RV32I-NEXT: # %bb.15:
; RV32I-NEXT: addi a0, a2, 0
; RV32I-NEXT: .LBB0_16:
; RV32I-NEXT: lw a2, 0(a1)
; RV32I-NEXT: blt a0, a2, .LBB0_18
-; RV32I-NEXT: # BB#17:
+; RV32I-NEXT: # %bb.17:
; RV32I-NEXT: addi a0, a2, 0
; RV32I-NEXT: .LBB0_18:
; RV32I-NEXT: lw a1, 0(a1)
; RV32I-NEXT: bge a1, a0, .LBB0_20
-; RV32I-NEXT: # BB#19:
+; RV32I-NEXT: # %bb.19:
; RV32I-NEXT: addi a0, a1, 0
; RV32I-NEXT: .LBB0_20:
; RV32I-NEXT: jalr zero, ra, 0
diff --git a/test/CodeGen/RISCV/sext-zext-trunc.ll b/test/CodeGen/RISCV/sext-zext-trunc.ll
index 7c5f1205b76..80bd2d2b204 100644
--- a/test/CodeGen/RISCV/sext-zext-trunc.ll
+++ b/test/CodeGen/RISCV/sext-zext-trunc.ll
@@ -4,7 +4,7 @@
define i8 @sext_i1_to_i8(i1 %a) {
; RV32I-LABEL: sext_i1_to_i8:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: andi a0, a0, 1
; RV32I-NEXT: sub a0, zero, a0
; RV32I-NEXT: jalr zero, ra, 0
@@ -14,7 +14,7 @@ define i8 @sext_i1_to_i8(i1 %a) {
define i16 @sext_i1_to_i16(i1 %a) {
; RV32I-LABEL: sext_i1_to_i16:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: andi a0, a0, 1
; RV32I-NEXT: sub a0, zero, a0
; RV32I-NEXT: jalr zero, ra, 0
@@ -24,7 +24,7 @@ define i16 @sext_i1_to_i16(i1 %a) {
define i32 @sext_i1_to_i32(i1 %a) {
; RV32I-LABEL: sext_i1_to_i32:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: andi a0, a0, 1
; RV32I-NEXT: sub a0, zero, a0
; RV32I-NEXT: jalr zero, ra, 0
@@ -34,7 +34,7 @@ define i32 @sext_i1_to_i32(i1 %a) {
define i64 @sext_i1_to_i64(i1 %a) {
; RV32I-LABEL: sext_i1_to_i64:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: andi a0, a0, 1
; RV32I-NEXT: sub a0, zero, a0
; RV32I-NEXT: addi a1, a0, 0
@@ -45,7 +45,7 @@ define i64 @sext_i1_to_i64(i1 %a) {
define i16 @sext_i8_to_i16(i8 %a) {
; RV32I-LABEL: sext_i8_to_i16:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: slli a0, a0, 24
; RV32I-NEXT: srai a0, a0, 24
; RV32I-NEXT: jalr zero, ra, 0
@@ -55,7 +55,7 @@ define i16 @sext_i8_to_i16(i8 %a) {
define i32 @sext_i8_to_i32(i8 %a) {
; RV32I-LABEL: sext_i8_to_i32:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: slli a0, a0, 24
; RV32I-NEXT: srai a0, a0, 24
; RV32I-NEXT: jalr zero, ra, 0
@@ -65,7 +65,7 @@ define i32 @sext_i8_to_i32(i8 %a) {
define i64 @sext_i8_to_i64(i8 %a) {
; RV32I-LABEL: sext_i8_to_i64:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: slli a1, a0, 24
; RV32I-NEXT: srai a0, a1, 24
; RV32I-NEXT: srai a1, a1, 31
@@ -76,7 +76,7 @@ define i64 @sext_i8_to_i64(i8 %a) {
define i32 @sext_i16_to_i32(i16 %a) {
; RV32I-LABEL: sext_i16_to_i32:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srai a0, a0, 16
; RV32I-NEXT: jalr zero, ra, 0
@@ -86,7 +86,7 @@ define i32 @sext_i16_to_i32(i16 %a) {
define i64 @sext_i16_to_i64(i16 %a) {
; RV32I-LABEL: sext_i16_to_i64:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: slli a1, a0, 16
; RV32I-NEXT: srai a0, a1, 16
; RV32I-NEXT: srai a1, a1, 31
@@ -97,7 +97,7 @@ define i64 @sext_i16_to_i64(i16 %a) {
define i64 @sext_i32_to_i64(i32 %a) {
; RV32I-LABEL: sext_i32_to_i64:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: srai a1, a0, 31
; RV32I-NEXT: jalr zero, ra, 0
%1 = sext i32 %a to i64
@@ -106,7 +106,7 @@ define i64 @sext_i32_to_i64(i32 %a) {
define i8 @zext_i1_to_i8(i1 %a) {
; RV32I-LABEL: zext_i1_to_i8:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: andi a0, a0, 1
; RV32I-NEXT: jalr zero, ra, 0
%1 = zext i1 %a to i8
@@ -115,7 +115,7 @@ define i8 @zext_i1_to_i8(i1 %a) {
define i16 @zext_i1_to_i16(i1 %a) {
; RV32I-LABEL: zext_i1_to_i16:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: andi a0, a0, 1
; RV32I-NEXT: jalr zero, ra, 0
%1 = zext i1 %a to i16
@@ -124,7 +124,7 @@ define i16 @zext_i1_to_i16(i1 %a) {
define i32 @zext_i1_to_i32(i1 %a) {
; RV32I-LABEL: zext_i1_to_i32:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: andi a0, a0, 1
; RV32I-NEXT: jalr zero, ra, 0
%1 = zext i1 %a to i32
@@ -133,7 +133,7 @@ define i32 @zext_i1_to_i32(i1 %a) {
define i64 @zext_i1_to_i64(i1 %a) {
; RV32I-LABEL: zext_i1_to_i64:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: andi a0, a0, 1
; RV32I-NEXT: addi a1, zero, 0
; RV32I-NEXT: jalr zero, ra, 0
@@ -143,7 +143,7 @@ define i64 @zext_i1_to_i64(i1 %a) {
define i16 @zext_i8_to_i16(i8 %a) {
; RV32I-LABEL: zext_i8_to_i16:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: andi a0, a0, 255
; RV32I-NEXT: jalr zero, ra, 0
%1 = zext i8 %a to i16
@@ -152,7 +152,7 @@ define i16 @zext_i8_to_i16(i8 %a) {
define i32 @zext_i8_to_i32(i8 %a) {
; RV32I-LABEL: zext_i8_to_i32:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: andi a0, a0, 255
; RV32I-NEXT: jalr zero, ra, 0
%1 = zext i8 %a to i32
@@ -161,7 +161,7 @@ define i32 @zext_i8_to_i32(i8 %a) {
define i64 @zext_i8_to_i64(i8 %a) {
; RV32I-LABEL: zext_i8_to_i64:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: andi a0, a0, 255
; RV32I-NEXT: addi a1, zero, 0
; RV32I-NEXT: jalr zero, ra, 0
@@ -171,7 +171,7 @@ define i64 @zext_i8_to_i64(i8 %a) {
define i32 @zext_i16_to_i32(i16 %a) {
; RV32I-LABEL: zext_i16_to_i32:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: lui a1, 16
; RV32I-NEXT: addi a1, a1, -1
; RV32I-NEXT: and a0, a0, a1
@@ -182,7 +182,7 @@ define i32 @zext_i16_to_i32(i16 %a) {
define i64 @zext_i16_to_i64(i16 %a) {
; RV32I-LABEL: zext_i16_to_i64:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: lui a1, 16
; RV32I-NEXT: addi a1, a1, -1
; RV32I-NEXT: and a0, a0, a1
@@ -194,7 +194,7 @@ define i64 @zext_i16_to_i64(i16 %a) {
define i64 @zext_i32_to_i64(i32 %a) {
; RV32I-LABEL: zext_i32_to_i64:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: addi a1, zero, 0
; RV32I-NEXT: jalr zero, ra, 0
%1 = zext i32 %a to i64
@@ -206,7 +206,7 @@ define i64 @zext_i32_to_i64(i32 %a) {
define i1 @trunc_i8_to_i1(i8 %a) {
; RV32I-LABEL: trunc_i8_to_i1:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: jalr zero, ra, 0
%1 = trunc i8 %a to i1
ret i1 %1
@@ -214,7 +214,7 @@ define i1 @trunc_i8_to_i1(i8 %a) {
define i1 @trunc_i16_to_i1(i16 %a) {
; RV32I-LABEL: trunc_i16_to_i1:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: jalr zero, ra, 0
%1 = trunc i16 %a to i1
ret i1 %1
@@ -222,7 +222,7 @@ define i1 @trunc_i16_to_i1(i16 %a) {
define i1 @trunc_i32_to_i1(i32 %a) {
; RV32I-LABEL: trunc_i32_to_i1:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: jalr zero, ra, 0
%1 = trunc i32 %a to i1
ret i1 %1
@@ -230,7 +230,7 @@ define i1 @trunc_i32_to_i1(i32 %a) {
define i1 @trunc_i64_to_i1(i64 %a) {
; RV32I-LABEL: trunc_i64_to_i1:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: jalr zero, ra, 0
%1 = trunc i64 %a to i1
ret i1 %1
@@ -238,7 +238,7 @@ define i1 @trunc_i64_to_i1(i64 %a) {
define i8 @trunc_i16_to_i8(i16 %a) {
; RV32I-LABEL: trunc_i16_to_i8:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: jalr zero, ra, 0
%1 = trunc i16 %a to i8
ret i8 %1
@@ -246,7 +246,7 @@ define i8 @trunc_i16_to_i8(i16 %a) {
define i8 @trunc_i32_to_i8(i32 %a) {
; RV32I-LABEL: trunc_i32_to_i8:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: jalr zero, ra, 0
%1 = trunc i32 %a to i8
ret i8 %1
@@ -254,7 +254,7 @@ define i8 @trunc_i32_to_i8(i32 %a) {
define i8 @trunc_i64_to_i8(i64 %a) {
; RV32I-LABEL: trunc_i64_to_i8:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: jalr zero, ra, 0
%1 = trunc i64 %a to i8
ret i8 %1
@@ -262,7 +262,7 @@ define i8 @trunc_i64_to_i8(i64 %a) {
define i16 @trunc_i32_to_i16(i32 %a) {
; RV32I-LABEL: trunc_i32_to_i16:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: jalr zero, ra, 0
%1 = trunc i32 %a to i16
ret i16 %1
@@ -270,7 +270,7 @@ define i16 @trunc_i32_to_i16(i32 %a) {
define i16 @trunc_i64_to_i16(i64 %a) {
; RV32I-LABEL: trunc_i64_to_i16:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: jalr zero, ra, 0
%1 = trunc i64 %a to i16
ret i16 %1
@@ -278,7 +278,7 @@ define i16 @trunc_i64_to_i16(i64 %a) {
define i32 @trunc_i64_to_i32(i64 %a) {
; RV32I-LABEL: trunc_i64_to_i32:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: jalr zero, ra, 0
%1 = trunc i64 %a to i32
ret i32 %1
diff --git a/test/CodeGen/RISCV/shifts.ll b/test/CodeGen/RISCV/shifts.ll
index d773a6ad62a..c4033c574ef 100644
--- a/test/CodeGen/RISCV/shifts.ll
+++ b/test/CodeGen/RISCV/shifts.ll
@@ -7,7 +7,7 @@
define i64 @lshr64(i64 %a, i64 %b) nounwind {
; RV32I-LABEL: lshr64:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sw ra, 12(s0)
; RV32I-NEXT: lui a3, %hi(__lshrdi3)
; RV32I-NEXT: addi a3, a3, %lo(__lshrdi3)
@@ -20,7 +20,7 @@ define i64 @lshr64(i64 %a, i64 %b) nounwind {
define i64 @ashr64(i64 %a, i64 %b) nounwind {
; RV32I-LABEL: ashr64:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sw ra, 12(s0)
; RV32I-NEXT: lui a3, %hi(__ashrdi3)
; RV32I-NEXT: addi a3, a3, %lo(__ashrdi3)
@@ -33,7 +33,7 @@ define i64 @ashr64(i64 %a, i64 %b) nounwind {
define i64 @shl64(i64 %a, i64 %b) nounwind {
; RV32I-LABEL: shl64:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: sw ra, 12(s0)
; RV32I-NEXT: lui a3, %hi(__ashldi3)
; RV32I-NEXT: addi a3, a3, %lo(__ashldi3)
diff --git a/test/CodeGen/RISCV/wide-mem.ll b/test/CodeGen/RISCV/wide-mem.ll
index 18ab52aaf13..cbb89f631a5 100644
--- a/test/CodeGen/RISCV/wide-mem.ll
+++ b/test/CodeGen/RISCV/wide-mem.ll
@@ -6,7 +6,7 @@
define i64 @load_i64(i64 *%a) nounwind {
; RV32I-LABEL: load_i64:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: lw a2, 0(a0)
; RV32I-NEXT: lw a1, 4(a0)
; RV32I-NEXT: addi a0, a2, 0
@@ -21,7 +21,7 @@ define i64 @load_i64(i64 *%a) nounwind {
; generate two addi
define i64 @load_i64_global() nounwind {
; RV32I-LABEL: load_i64_global:
-; RV32I: # BB#0:
+; RV32I: # %bb.0:
; RV32I-NEXT: lui a0, %hi(val64)
; RV32I-NEXT: addi a0, a0, %lo(val64)
; RV32I-NEXT: lw a0, 0(a0)
diff --git a/test/CodeGen/SPARC/analyze-branch.ll b/test/CodeGen/SPARC/analyze-branch.ll
index 7d2096033a0..c39dde5a2b8 100644
--- a/test/CodeGen/SPARC/analyze-branch.ll
+++ b/test/CodeGen/SPARC/analyze-branch.ll
@@ -18,7 +18,7 @@ define void @test_Bcc_fallthrough_taken(i32 %in) nounwind {
; CHECK: cmp {{%[goli][0-9]+}}, 42
; CHECK: bne [[FALSE:.LBB[0-9]+_[0-9]+]]
; CHECK-NEXT: nop
-; CHECK-NEXT: ! BB#
+; CHECK-NEXT: ! %bb.
; CHECK-NEXT: call test_true
; CHECK: [[FALSE]]:
@@ -42,7 +42,7 @@ define void @test_Bcc_fallthrough_nottaken(i32 %in) nounwind {
; CHECK: be [[TRUE:.LBB[0-9]+_[0-9]+]]
; CHECK-NEXT: nop
-; CHECK-NEXT: ! BB#
+; CHECK-NEXT: ! %bb.
; CHECK-NEXT: call test_false
; CHECK: [[TRUE]]:
diff --git a/test/CodeGen/SPARC/vector-extract-elt.ll b/test/CodeGen/SPARC/vector-extract-elt.ll
index 702f063bfcc..47f39d5b9fb 100644
--- a/test/CodeGen/SPARC/vector-extract-elt.ll
+++ b/test/CodeGen/SPARC/vector-extract-elt.ll
@@ -5,7 +5,7 @@
; look-thru for extractelement then we we know that the add will yield a
; non-negative result.
define i1 @test1(<4 x i16>* %in) {
-; CHECK-LABEL: ! BB#0:
+; CHECK-LABEL: ! %bb.0:
; CHECK-NEXT: retl
; CHECK-NEXT: sethi 0, %o0
%vec2 = load <4 x i16>, <4 x i16>* %in, align 1
diff --git a/test/CodeGen/SystemZ/DAGCombiner_isAlias.ll b/test/CodeGen/SystemZ/DAGCombiner_isAlias.ll
index 8c31f073276..a42f625a536 100644
--- a/test/CodeGen/SystemZ/DAGCombiner_isAlias.ll
+++ b/test/CodeGen/SystemZ/DAGCombiner_isAlias.ll
@@ -9,7 +9,7 @@
; store i1 true, i1* %g_717.sink.i, align 4
; %.b = load i1, i1* @g_2, align 4
-; CHECK: # BB#6: # %crc32_gentab.exit
+; CHECK: # %bb.6: # %crc32_gentab.exit
; CHECK: larl %r2, g_2
; CHECK-NEXT: llc %r3, 0(%r2)
; CHECK-NOT: %r2
diff --git a/test/CodeGen/SystemZ/dag-combine-02.ll b/test/CodeGen/SystemZ/dag-combine-02.ll
index b20133facb8..2d96aafb938 100644
--- a/test/CodeGen/SystemZ/dag-combine-02.ll
+++ b/test/CodeGen/SystemZ/dag-combine-02.ll
@@ -93,7 +93,7 @@ define signext i32 @main(i32 signext, i8** nocapture readonly) local_unnamed_add
br i1 %60, label %61, label %13
; <label>:61: ; preds = %13
-; CHECK-LABEL: BB#6:
+; CHECK-LABEL: %bb.6:
; CHECK: stgrl %r1, g_56
; CHECK: llhrl %r1, g_56+6
; CHECK: stgrl %r2, g_56
diff --git a/test/CodeGen/SystemZ/int-cmp-51.ll b/test/CodeGen/SystemZ/int-cmp-51.ll
index 85a0e4b4d3a..6d00dd843ae 100644
--- a/test/CodeGen/SystemZ/int-cmp-51.ll
+++ b/test/CodeGen/SystemZ/int-cmp-51.ll
@@ -8,7 +8,7 @@ declare void @bar(i8)
; Check the low end of the CH range.
define void @f1(i32 %lhs) {
-; CHECK-LABEL: BB#1:
+; CHECK-LABEL: %bb.1:
; CHECK-NOT: cijlh %r0, 1, .LBB0_3
entry:
diff --git a/test/CodeGen/SystemZ/pr32372.ll b/test/CodeGen/SystemZ/pr32372.ll
index c18e238fbaf..d252a9a96de 100644
--- a/test/CodeGen/SystemZ/pr32372.ll
+++ b/test/CodeGen/SystemZ/pr32372.ll
@@ -3,7 +3,7 @@
define void @pr32372(i8*) {
; CHECK-LABEL: pr32372:
-; CHECK: # BB#0: # %BB
+; CHECK: # %bb.0: # %BB
; CHECK-NEXT: llc %r1, 0(%r2)
; CHECK-NEXT: mvhhi 0(%r1), -3825
; CHECK-NEXT: llill %r0, 0
diff --git a/test/CodeGen/SystemZ/pr32505.ll b/test/CodeGen/SystemZ/pr32505.ll
index c5382b27181..288d0b83863 100644
--- a/test/CodeGen/SystemZ/pr32505.ll
+++ b/test/CodeGen/SystemZ/pr32505.ll
@@ -5,7 +5,7 @@ target triple = "s390x-ibm-linux"
define <2 x float> @pr32505(<2 x i8> * %a) {
; CHECK-LABEL: pr32505:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: lbh %r0, 1(%r2)
; CHECK-NEXT: lbh %r1, 0(%r2)
; CHECK-NEXT: ldgr %f0, %r1
diff --git a/test/CodeGen/SystemZ/strcmp-01.ll b/test/CodeGen/SystemZ/strcmp-01.ll
index a30663a13f1..ef05d832e73 100644
--- a/test/CodeGen/SystemZ/strcmp-01.ll
+++ b/test/CodeGen/SystemZ/strcmp-01.ll
@@ -11,7 +11,7 @@ define i32 @f1(i8 *%src1, i8 *%src2) {
; CHECK: [[LABEL:\.[^:]*]]:
; CHECK: clst %r2, %r3
; CHECK-NEXT: jo [[LABEL]]
-; CHECK-NEXT: BB#{{[0-9]+}}
+; CHECK-NEXT: %bb.{{[0-9]+}}
; CHECK-NEXT: ipm [[REG:%r[0-5]]]
; CHECK: srl [[REG]], 28
; CHECK: rll %r2, [[REG]], 31
@@ -27,7 +27,7 @@ define void @f2(i8 *%src1, i8 *%src2, i32 *%dest) {
; CHECK: [[LABEL:\.[^:]*]]:
; CHECK: clst %r2, %r3
; CHECK-NEXT: jo [[LABEL]]
-; CHECK-NEXT: BB#{{[0-9]+}}
+; CHECK-NEXT: %bb.{{[0-9]+}}
; CHECK-NEXT: ber %r14
; CHECK: br %r14
%res = call i32 @strcmp(i8 *%src1, i8 *%src2)
@@ -50,7 +50,7 @@ define i32 @f3(i8 *%src1, i8 *%src2, i32 *%dest) {
; CHECK: [[LABEL:\.[^:]*]]:
; CHECK: clst %r2, %r3
; CHECK-NEXT: jo [[LABEL]]
-; CHECK-NEXT: BB#{{[0-9]+}}
+; CHECK-NEXT: %bb.{{[0-9]+}}
; CHECK-NEXT: ipm [[REG:%r[0-5]]]
; CHECK: srl [[REG]], 28
; CHECK: rll %r2, [[REG]], 31
diff --git a/test/CodeGen/SystemZ/strlen-01.ll b/test/CodeGen/SystemZ/strlen-01.ll
index 16161d4d2c8..2fb63425fe0 100644
--- a/test/CodeGen/SystemZ/strlen-01.ll
+++ b/test/CodeGen/SystemZ/strlen-01.ll
@@ -15,7 +15,7 @@ define i64 @f1(i32 %dummy, i8 *%src) {
; CHECK: [[LABEL:\.[^:]*]]:
; CHECK-NEXT: srst %r2, [[REG]]
; CHECK-NEXT: jo [[LABEL]]
-; CHECK-NEXT: BB#{{[0-9]+}}
+; CHECK-NEXT: %bb.{{[0-9]+}}
; CHECK-NEXT: sgr %r2, %r3
; CHECK: br %r14
%res = call i64 @strlen(i8 *%src)
@@ -31,7 +31,7 @@ define i64 @f2(i64 %len, i8 *%src) {
; CHECK: [[LABEL:\.[^:]*]]:
; CHECK-NEXT: srst %r2, [[REG]]
; CHECK-NEXT: jo [[LABEL]]
-; CHECK-NEXT: BB#{{[0-9]+}}
+; CHECK-NEXT: %bb.{{[0-9]+}}
; CHECK-NEXT: sgr %r2, %r3
; CHECK: br %r14
%res = call i64 @strnlen(i8 *%src, i64 %len)
diff --git a/test/CodeGen/SystemZ/vec-cmp-cmp-logic-select.ll b/test/CodeGen/SystemZ/vec-cmp-cmp-logic-select.ll
index eafb0122e90..ac12861603a 100644
--- a/test/CodeGen/SystemZ/vec-cmp-cmp-logic-select.ll
+++ b/test/CodeGen/SystemZ/vec-cmp-cmp-logic-select.ll
@@ -7,7 +7,7 @@
define <2 x i8> @fun0(<2 x i8> %val1, <2 x i8> %val2, <2 x i8> %val3, <2 x i8> %val4, <2 x i8> %val5, <2 x i8> %val6) {
; CHECK-LABEL: fun0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-DAG: vceqb [[REG0:%v[0-9]+]], %v24, %v26
; CHECK-DAG: vceqb [[REG1:%v[0-9]+]], %v28, %v30
; CHECK-NEXT: vn %v0, [[REG0]], [[REG1]]
@@ -22,7 +22,7 @@ define <2 x i8> @fun0(<2 x i8> %val1, <2 x i8> %val2, <2 x i8> %val3, <2 x i8> %
define <2 x i16> @fun1(<2 x i8> %val1, <2 x i8> %val2, <2 x i8> %val3, <2 x i8> %val4, <2 x i16> %val5, <2 x i16> %val6) {
; CHECK-LABEL: fun1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-DAG: vceqb [[REG0:%v[0-9]+]], %v24, %v26
; CHECK-DAG: vceqb [[REG1:%v[0-9]+]], %v28, %v30
; CHECK-NEXT: vn %v0, [[REG0]], [[REG1]]
@@ -38,7 +38,7 @@ define <2 x i16> @fun1(<2 x i8> %val1, <2 x i8> %val2, <2 x i8> %val3, <2 x i8>
define <16 x i8> @fun2(<16 x i8> %val1, <16 x i8> %val2, <16 x i16> %val3, <16 x i16> %val4, <16 x i8> %val5, <16 x i8> %val6) {
; CHECK-LABEL: fun2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-DAG: vceqh [[REG0:%v[0-9]+]], %v30, %v27
; CHECK-DAG: vceqh [[REG1:%v[0-9]+]], %v28, %v25
; CHECK-DAG: vceqb [[REG2:%v[0-9]+]], %v24, %v26
@@ -55,7 +55,7 @@ define <16 x i8> @fun2(<16 x i8> %val1, <16 x i8> %val2, <16 x i16> %val3, <16 x
define <16 x i16> @fun3(<16 x i8> %val1, <16 x i8> %val2, <16 x i16> %val3, <16 x i16> %val4, <16 x i16> %val5, <16 x i16> %val6) {
; CHECK-LABEL: fun3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-DAG: vceqb [[REG0:%v[0-9]+]], %v24, %v26
; CHECK-DAG: vuphb [[REG2:%v[0-9]+]], [[REG0]]
; CHECK-DAG: vmrlg [[REG1:%v[0-9]+]], [[REG0]], [[REG0]]
@@ -78,7 +78,7 @@ define <16 x i16> @fun3(<16 x i8> %val1, <16 x i8> %val2, <16 x i16> %val3, <16
define <32 x i8> @fun4(<32 x i8> %val1, <32 x i8> %val2, <32 x i8> %val3, <32 x i8> %val4, <32 x i8> %val5, <32 x i8> %val6) {
; CHECK-LABEL: fun4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-DAG: vceqb [[REG0:%v[0-9]+]], %v24, %v28
; CHECK-DAG: vceqb [[REG1:%v[0-9]+]], %v26, %v30
; CHECK-DAG: vceqb [[REG2:%v[0-9]+]], %v25, %v29
@@ -101,7 +101,7 @@ define <32 x i8> @fun4(<32 x i8> %val1, <32 x i8> %val2, <32 x i8> %val3, <32 x
define <2 x i8> @fun5(<2 x i16> %val1, <2 x i16> %val2, <2 x i8> %val3, <2 x i8> %val4, <2 x i8> %val5, <2 x i8> %val6) {
; CHECK-LABEL: fun5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-DAG: vceqh [[REG0:%v[0-9]+]], %v24, %v26
; CHECK-DAG: vpkh [[REG1:%v[0-9]+]], [[REG0]], [[REG0]]
; CHECK-DAG: vceqb [[REG2:%v[0-9]+]], %v28, %v30
@@ -117,7 +117,7 @@ define <2 x i8> @fun5(<2 x i16> %val1, <2 x i16> %val2, <2 x i8> %val3, <2 x i8>
define <2 x i16> @fun6(<2 x i16> %val1, <2 x i16> %val2, <2 x i8> %val3, <2 x i8> %val4, <2 x i16> %val5, <2 x i16> %val6) {
; CHECK-LABEL: fun6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vceqb %v1, %v28, %v30
; CHECK-NEXT: vceqh %v0, %v24, %v26
; CHECK-NEXT: vuphb %v1, %v1
@@ -133,7 +133,7 @@ define <2 x i16> @fun6(<2 x i16> %val1, <2 x i16> %val2, <2 x i8> %val3, <2 x i8
define <2 x i32> @fun7(<2 x i16> %val1, <2 x i16> %val2, <2 x i8> %val3, <2 x i8> %val4, <2 x i32> %val5, <2 x i32> %val6) {
; CHECK-LABEL: fun7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vceqb %v1, %v28, %v30
; CHECK-NEXT: vceqh %v0, %v24, %v26
; CHECK-NEXT: vuphb %v1, %v1
@@ -150,7 +150,7 @@ define <2 x i32> @fun7(<2 x i16> %val1, <2 x i16> %val2, <2 x i8> %val3, <2 x i8
define <8 x i8> @fun8(<8 x i16> %val1, <8 x i16> %val2, <8 x i16> %val3, <8 x i16> %val4, <8 x i8> %val5, <8 x i8> %val6) {
; CHECK-LABEL: fun8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-DAG: vceqh [[REG0:%v[0-9]+]], %v24, %v26
; CHECK-DAG: vceqh [[REG1:%v[0-9]+]], %v28, %v30
; CHECK-NEXT: vx %v0, [[REG0]], [[REG1]]
@@ -166,7 +166,7 @@ define <8 x i8> @fun8(<8 x i16> %val1, <8 x i16> %val2, <8 x i16> %val3, <8 x i1
define <8 x i16> @fun9(<8 x i16> %val1, <8 x i16> %val2, <8 x i16> %val3, <8 x i16> %val4, <8 x i16> %val5, <8 x i16> %val6) {
; CHECK-LABEL: fun9:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-DAG: vceqh [[REG0:%v[0-9]+]], %v24, %v26
; CHECK-DAG: vceqh [[REG1:%v[0-9]+]], %v28, %v30
; CHECK-NEXT: vx %v0, [[REG0]], [[REG1]]
@@ -181,7 +181,7 @@ define <8 x i16> @fun9(<8 x i16> %val1, <8 x i16> %val2, <8 x i16> %val3, <8 x i
define <8 x i32> @fun10(<8 x i16> %val1, <8 x i16> %val2, <8 x i16> %val3, <8 x i16> %val4, <8 x i32> %val5, <8 x i32> %val6) {
; CHECK-LABEL: fun10:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-DAG: vceqh [[REG0:%v[0-9]+]], %v24, %v26
; CHECK-DAG: vceqh [[REG1:%v[0-9]+]], %v28, %v30
; CHECK-NEXT: vx [[REG2:%v[0-9]+]], [[REG0]], [[REG1]]
@@ -200,7 +200,7 @@ define <8 x i32> @fun10(<8 x i16> %val1, <8 x i16> %val2, <8 x i16> %val3, <8 x
define <16 x i8> @fun11(<16 x i16> %val1, <16 x i16> %val2, <16 x i32> %val3, <16 x i32> %val4, <16 x i8> %val5, <16 x i8> %val6) {
; CHECK-LABEL: fun11:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-DAG: vl [[REG0:%v[0-9]+]], 192(%r15)
; CHECK-DAG: vl [[REG1:%v[0-9]+]], 208(%r15)
; CHECK-DAG: vl [[REG2:%v[0-9]+]], 160(%r15)
@@ -229,7 +229,7 @@ define <16 x i8> @fun11(<16 x i16> %val1, <16 x i16> %val2, <16 x i32> %val3, <1
define <16 x i16> @fun12(<16 x i16> %val1, <16 x i16> %val2, <16 x i32> %val3, <16 x i32> %val4, <16 x i16> %val5, <16 x i16> %val6) {
; CHECK-LABEL: fun12:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-DAG: vl [[REG0:%v[0-9]+]], 192(%r15)
; CHECK-DAG: vl [[REG1:%v[0-9]+]], 208(%r15)
; CHECK-DAG: vl [[REG2:%v[0-9]+]], 160(%r15)
@@ -260,7 +260,7 @@ define <16 x i16> @fun12(<16 x i16> %val1, <16 x i16> %val2, <16 x i32> %val3, <
define <2 x i16> @fun13(<2 x i32> %val1, <2 x i32> %val2, <2 x i64> %val3, <2 x i64> %val4, <2 x i16> %val5, <2 x i16> %val6) {
; CHECK-LABEL: fun13:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vceqg %v1, %v28, %v30
; CHECK-NEXT: vceqf %v0, %v24, %v26
; CHECK-NEXT: vpkg %v1, %v1, %v1
@@ -277,7 +277,7 @@ define <2 x i16> @fun13(<2 x i32> %val1, <2 x i32> %val2, <2 x i64> %val3, <2 x
define <2 x i32> @fun14(<2 x i32> %val1, <2 x i32> %val2, <2 x i64> %val3, <2 x i64> %val4, <2 x i32> %val5, <2 x i32> %val6) {
; CHECK-LABEL: fun14:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vceqg %v1, %v28, %v30
; CHECK-NEXT: vceqf %v0, %v24, %v26
; CHECK-NEXT: vpkg %v1, %v1, %v1
@@ -293,7 +293,7 @@ define <2 x i32> @fun14(<2 x i32> %val1, <2 x i32> %val2, <2 x i64> %val3, <2 x
define <2 x i64> @fun15(<2 x i32> %val1, <2 x i32> %val2, <2 x i64> %val3, <2 x i64> %val4, <2 x i64> %val5, <2 x i64> %val6) {
; CHECK-LABEL: fun15:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-DAG: vceqf [[REG0:%v[0-9]+]], %v24, %v26
; CHECK-DAG: vuphf [[REG1:%v[0-9]+]], [[REG0]]
; CHECK-DAG: vceqg [[REG2:%v[0-9]+]], %v28, %v30
@@ -309,7 +309,7 @@ define <2 x i64> @fun15(<2 x i32> %val1, <2 x i32> %val2, <2 x i64> %val3, <2 x
define <4 x i16> @fun16(<4 x i32> %val1, <4 x i32> %val2, <4 x i16> %val3, <4 x i16> %val4, <4 x i16> %val5, <4 x i16> %val6) {
; CHECK-LABEL: fun16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-DAG: vceqf [[REG0:%v[0-9]+]], %v24, %v26
; CHECK-DAG: vpkf [[REG1:%v[0-9]+]], [[REG0]], [[REG0]]
; CHECK-DAG: vceqh [[REG2:%v[0-9]+]], %v28, %v30
@@ -325,7 +325,7 @@ define <4 x i16> @fun16(<4 x i32> %val1, <4 x i32> %val2, <4 x i16> %val3, <4 x
define <4 x i32> @fun17(<4 x i32> %val1, <4 x i32> %val2, <4 x i16> %val3, <4 x i16> %val4, <4 x i32> %val5, <4 x i32> %val6) {
; CHECK-LABEL: fun17:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vceqh %v1, %v28, %v30
; CHECK-NEXT: vceqf %v0, %v24, %v26
; CHECK-NEXT: vuphh %v1, %v1
@@ -341,7 +341,7 @@ define <4 x i32> @fun17(<4 x i32> %val1, <4 x i32> %val2, <4 x i16> %val3, <4 x
define <4 x i64> @fun18(<4 x i32> %val1, <4 x i32> %val2, <4 x i16> %val3, <4 x i16> %val4, <4 x i64> %val5, <4 x i64> %val6) {
; CHECK-LABEL: fun18:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vceqh %v1, %v28, %v30
; CHECK-NEXT: vceqf %v0, %v24, %v26
; CHECK-NEXT: vuphh %v1, %v1
@@ -361,7 +361,7 @@ define <4 x i64> @fun18(<4 x i32> %val1, <4 x i32> %val2, <4 x i16> %val3, <4 x
define <8 x i16> @fun19(<8 x i32> %val1, <8 x i32> %val2, <8 x i32> %val3, <8 x i32> %val4, <8 x i16> %val5, <8 x i16> %val6) {
; CHECK-LABEL: fun19:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-DAG: vceqf [[REG0:%v[0-9]+]], %v24, %v28
; CHECK-DAG: vceqf [[REG1:%v[0-9]+]], %v26, %v30
; CHECK-DAG: vceqf [[REG2:%v[0-9]+]], %v25, %v29
@@ -382,7 +382,7 @@ define <8 x i16> @fun19(<8 x i32> %val1, <8 x i32> %val2, <8 x i32> %val3, <8 x
define <8 x i32> @fun20(<8 x i32> %val1, <8 x i32> %val2, <8 x i32> %val3, <8 x i32> %val4, <8 x i32> %val5, <8 x i32> %val6) {
; CHECK-LABEL: fun20:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-DAG: vceqf [[REG0:%v[0-9]+]], %v24, %v28
; CHECK-DAG: vceqf [[REG1:%v[0-9]+]], %v26, %v30
; CHECK-DAG: vceqf [[REG2:%v[0-9]+]], %v25, %v29
@@ -405,7 +405,7 @@ define <8 x i32> @fun20(<8 x i32> %val1, <8 x i32> %val2, <8 x i32> %val3, <8 x
define <2 x i32> @fun21(<2 x i64> %val1, <2 x i64> %val2, <2 x i64> %val3, <2 x i64> %val4, <2 x i32> %val5, <2 x i32> %val6) {
; CHECK-LABEL: fun21:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-DAG: vceqg [[REG0:%v[0-9]+]], %v24, %v26
; CHECK-DAG: vceqg [[REG1:%v[0-9]+]], %v28, %v30
; CHECK-NEXT: vn %v0, [[REG0]], [[REG1]]
@@ -421,7 +421,7 @@ define <2 x i32> @fun21(<2 x i64> %val1, <2 x i64> %val2, <2 x i64> %val3, <2 x
define <2 x i64> @fun22(<2 x i64> %val1, <2 x i64> %val2, <2 x i64> %val3, <2 x i64> %val4, <2 x i64> %val5, <2 x i64> %val6) {
; CHECK-LABEL: fun22:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-DAG: vceqg [[REG0:%v[0-9]+]], %v24, %v26
; CHECK-DAG: vceqg [[REG1:%v[0-9]+]], %v28, %v30
; CHECK-NEXT: vn %v0, [[REG0]], [[REG1]]
@@ -436,7 +436,7 @@ define <2 x i64> @fun22(<2 x i64> %val1, <2 x i64> %val2, <2 x i64> %val3, <2 x
define <4 x i32> @fun23(<4 x i64> %val1, <4 x i64> %val2, <4 x i32> %val3, <4 x i32> %val4, <4 x i32> %val5, <4 x i32> %val6) {
; CHECK-LABEL: fun23:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vceqg %v0, %v26, %v30
; CHECK-NEXT: vceqg %v1, %v24, %v28
; CHECK-NEXT: vpkg %v0, %v1, %v0
@@ -453,7 +453,7 @@ define <4 x i32> @fun23(<4 x i64> %val1, <4 x i64> %val2, <4 x i32> %val3, <4 x
define <4 x i64> @fun24(<4 x i64> %val1, <4 x i64> %val2, <4 x i32> %val3, <4 x i32> %val4, <4 x i64> %val5, <4 x i64> %val6) {
; CHECK-LABEL: fun24:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vceqf [[REG0:%v[0-9]+]], %v25, %v27
; CHECK-NEXT: vuphf [[REG1:%v[0-9]+]], [[REG0]]
; CHECK-NEXT: vmrlg [[REG2:%v[0-9]+]], [[REG0]], [[REG0]]
@@ -476,7 +476,7 @@ define <4 x i64> @fun24(<4 x i64> %val1, <4 x i64> %val2, <4 x i32> %val3, <4 x
define <2 x float> @fun25(<2 x float> %val1, <2 x float> %val2, <2 x double> %val3, <2 x double> %val4, <2 x float> %val5, <2 x float> %val6) {
; CHECK-LABEL: fun25:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmrlf %v0, %v26, %v26
; CHECK-NEXT: vmrlf %v1, %v24, %v24
; CHECK-NEXT: vldeb %v0, %v0
@@ -495,7 +495,7 @@ define <2 x float> @fun25(<2 x float> %val1, <2 x float> %val2, <2 x double> %va
; CHECK-NEXT: br %r14
;
; CHECK-Z14-LABEL: fun25:
-; CHECK-Z14: # BB#0:
+; CHECK-Z14: # %bb.0:
; CHECK-Z14-NEXT: vfchdb %v1, %v28, %v30
; CHECK-Z14-NEXT: vfchsb %v0, %v24, %v26
; CHECK-Z14-NEXT: vpkg %v1, %v1, %v1
@@ -511,7 +511,7 @@ define <2 x float> @fun25(<2 x float> %val1, <2 x float> %val2, <2 x double> %va
define <2 x double> @fun26(<2 x float> %val1, <2 x float> %val2, <2 x double> %val3, <2 x double> %val4, <2 x double> %val5, <2 x double> %val6) {
; CHECK-LABEL: fun26:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmrlf %v0, %v26, %v26
; CHECK-NEXT: vmrlf %v1, %v24, %v24
; CHECK-NEXT: vldeb %v0, %v0
@@ -530,7 +530,7 @@ define <2 x double> @fun26(<2 x float> %val1, <2 x float> %val2, <2 x double> %v
; CHECK-NEXT: br %r14
;
; CHECK-Z14-LABEL: fun26:
-; CHECK-Z14: # BB#0:
+; CHECK-Z14: # %bb.0:
; CHECK-Z14-NEXT: vfchsb %v0, %v24, %v26
; CHECK-Z14-NEXT: vuphf %v0, %v0
; CHECK-Z14-NEXT: vfchdb %v1, %v28, %v30
@@ -547,7 +547,7 @@ define <2 x double> @fun26(<2 x float> %val1, <2 x float> %val2, <2 x double> %v
; Also check a widening select of a vector of floats
define <2 x float> @fun27(<2 x i8> %val1, <2 x i8> %val2, <2 x i8> %val3, <2 x i8> %val4, <2 x float> %val5, <2 x float> %val6) {
; CHECK-LABEL: fun27:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-DAG: vceqb [[REG0:%v[0-9]+]], %v24, %v26
; CHECK-DAG: vceqb [[REG1:%v[0-9]+]], %v28, %v30
; CHECK-NEXT: vo %v0, [[REG0]], [[REG1]]
@@ -564,7 +564,7 @@ define <2 x float> @fun27(<2 x i8> %val1, <2 x i8> %val2, <2 x i8> %val3, <2 x i
define <4 x float> @fun28(<4 x float> %val1, <4 x float> %val2, <4 x float> %val3, <4 x float> %val4, <4 x float> %val5, <4 x float> %val6) {
; CHECK-LABEL: fun28:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-DAG: vmrlf [[REG0:%v[0-9]+]], %v26, %v26
; CHECK-DAG: vmrlf [[REG1:%v[0-9]+]], %v24, %v24
; CHECK-DAG: vldeb [[REG2:%v[0-9]+]], [[REG0]]
@@ -592,7 +592,7 @@ define <4 x float> @fun28(<4 x float> %val1, <4 x float> %val2, <4 x float> %val
; CHECK-NEXT: br %r14
;
; CHECK-Z14-LABEL: fun28:
-; CHECK-Z14: # BB#0:
+; CHECK-Z14: # %bb.0:
; CHECK-Z14-NEXT: vfchsb %v0, %v24, %v26
; CHECK-Z14-NEXT: vfchsb %v1, %v28, %v30
; CHECK-Z14-NEXT: vx %v0, %v0, %v1
@@ -607,7 +607,7 @@ define <4 x float> @fun28(<4 x float> %val1, <4 x float> %val2, <4 x float> %val
define <4 x double> @fun29(<4 x float> %val1, <4 x float> %val2, <4 x float> %val3, <4 x float> %val4, <4 x double> %val5, <4 x double> %val6) {
; CHECK-LABEL: fun29:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmrlf %v0, %v26, %v26
; CHECK-NEXT: vmrlf %v1, %v24, %v24
; CHECK-NEXT: vldeb %v0, %v0
@@ -639,7 +639,7 @@ define <4 x double> @fun29(<4 x float> %val1, <4 x float> %val2, <4 x float> %va
; CHECK-NEXT: br %r14
;
; CHECK-Z14-LABEL: fun29:
-; CHECK-Z14: # BB#0:
+; CHECK-Z14: # %bb.0:
; CHECK-Z14-NEXT: vfchsb %v0, %v24, %v26
; CHECK-Z14-NEXT: vfchsb %v1, %v28, %v30
; CHECK-Z14-NEXT: vx %v0, %v0, %v1
@@ -658,7 +658,7 @@ define <4 x double> @fun29(<4 x float> %val1, <4 x float> %val2, <4 x float> %va
define <8 x float> @fun30(<8 x float> %val1, <8 x float> %val2, <8 x double> %val3, <8 x double> %val4, <8 x float> %val5, <8 x float> %val6) {
; CHECK-LABEL: fun30:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmrlf %v16, %v28, %v28
; CHECK-NEXT: vmrlf %v17, %v24, %v24
; CHECK-NEXT: vldeb %v16, %v16
@@ -702,7 +702,7 @@ define <8 x float> @fun30(<8 x float> %val1, <8 x float> %val2, <8 x double> %va
; CHECK-NEXT: br %r14
;
; CHECK-Z14-LABEL: fun30:
-; CHECK-Z14: # BB#0:
+; CHECK-Z14: # %bb.0:
; CHECK-Z14-NEXT: vl %v4, 192(%r15)
; CHECK-Z14-NEXT: vl %v5, 208(%r15)
; CHECK-Z14-NEXT: vl %v6, 160(%r15)
@@ -733,7 +733,7 @@ define <8 x float> @fun30(<8 x float> %val1, <8 x float> %val2, <8 x double> %va
define <2 x float> @fun31(<2 x double> %val1, <2 x double> %val2, <2 x double> %val3, <2 x double> %val4, <2 x float> %val5, <2 x float> %val6) {
; CHECK-LABEL: fun31:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-DAG: vfchdb [[REG0:%v[0-9]+]], %v24, %v26
; CHECK-DAG: vfchdb [[REG1:%v[0-9]+]], %v28, %v30
; CHECK-NEXT: vx %v0, [[REG0]], [[REG1]]
@@ -749,7 +749,7 @@ define <2 x float> @fun31(<2 x double> %val1, <2 x double> %val2, <2 x double> %
define <2 x double> @fun32(<2 x double> %val1, <2 x double> %val2, <2 x double> %val3, <2 x double> %val4, <2 x double> %val5, <2 x double> %val6) {
; CHECK-LABEL: fun32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-DAG: vfchdb [[REG0:%v[0-9]+]], %v24, %v26
; CHECK-DAG: vfchdb [[REG1:%v[0-9]+]], %v28, %v30
; CHECK-NEXT: vx %v0, [[REG0]], [[REG1]]
@@ -764,7 +764,7 @@ define <2 x double> @fun32(<2 x double> %val1, <2 x double> %val2, <2 x double>
define <4 x float> @fun33(<4 x double> %val1, <4 x double> %val2, <4 x float> %val3, <4 x float> %val4, <4 x float> %val5, <4 x float> %val6) {
; CHECK-LABEL: fun33:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfchdb %v0, %v26, %v30
; CHECK-NEXT: vfchdb %v1, %v24, %v28
; CHECK-NEXT: vpkg %v0, %v1, %v0
@@ -784,7 +784,7 @@ define <4 x float> @fun33(<4 x double> %val1, <4 x double> %val2, <4 x float> %v
; CHECK-NEXT: br %r14
;
; CHECK-Z14-LABEL: fun33:
-; CHECK-Z14: # BB#0:
+; CHECK-Z14: # %bb.0:
; CHECK-Z14-NEXT: vfchdb %v0, %v26, %v30
; CHECK-Z14-NEXT: vfchdb %v1, %v24, %v28
; CHECK-Z14-NEXT: vpkg %v0, %v1, %v0
@@ -801,7 +801,7 @@ define <4 x float> @fun33(<4 x double> %val1, <4 x double> %val2, <4 x float> %v
define <4 x double> @fun34(<4 x double> %val1, <4 x double> %val2, <4 x float> %val3, <4 x float> %val4, <4 x double> %val5, <4 x double> %val6) {
; CHECK-LABEL: fun34:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmrlf [[REG0:%v[0-9]+]], %v27, %v27
; CHECK-NEXT: vmrlf [[REG1:%v[0-9]+]], %v25, %v25
; CHECK-NEXT: vldeb [[REG2:%v[0-9]+]], [[REG0]]
@@ -827,7 +827,7 @@ define <4 x double> @fun34(<4 x double> %val1, <4 x double> %val2, <4 x float> %
; CHECK-NEXT: br %r14
;
; CHECK-Z14-LABEL: fun34:
-; CHECK-Z14: # BB#0:
+; CHECK-Z14: # %bb.0:
; CHECK-Z14-NEXT: vfchsb %v4, %v25, %v27
; CHECK-Z14-NEXT: vuphf %v5, %v4
; CHECK-Z14-NEXT: vmrlg %v4, %v4, %v4
diff --git a/test/CodeGen/SystemZ/vec-cmpsel.ll b/test/CodeGen/SystemZ/vec-cmpsel.ll
index fb8ee56b990..200c25179d3 100644
--- a/test/CodeGen/SystemZ/vec-cmpsel.ll
+++ b/test/CodeGen/SystemZ/vec-cmpsel.ll
@@ -6,7 +6,7 @@
define <2 x i8> @fun0(<2 x i8> %val1, <2 x i8> %val2, <2 x i8> %val3, <2 x i8> %val4) {
; CHECK-LABEL: fun0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vceqb %v0, %v24, %v26
; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
; CHECK-NEXT: br %r14
@@ -17,7 +17,7 @@ define <2 x i8> @fun0(<2 x i8> %val1, <2 x i8> %val2, <2 x i8> %val3, <2 x i8> %
define <2 x i16> @fun1(<2 x i8> %val1, <2 x i8> %val2, <2 x i16> %val3, <2 x i16> %val4) {
; CHECK-LABEL: fun1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vceqb %v0, %v24, %v26
; CHECK-NEXT: vuphb %v0, %v0
; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
@@ -29,7 +29,7 @@ define <2 x i16> @fun1(<2 x i8> %val1, <2 x i8> %val2, <2 x i16> %val3, <2 x i16
define <16 x i8> @fun2(<16 x i8> %val1, <16 x i8> %val2, <16 x i8> %val3, <16 x i8> %val4) {
; CHECK-LABEL: fun2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vceqb %v0, %v24, %v26
; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
; CHECK-NEXT: br %r14
@@ -40,7 +40,7 @@ define <16 x i8> @fun2(<16 x i8> %val1, <16 x i8> %val2, <16 x i8> %val3, <16 x
define <16 x i16> @fun3(<16 x i8> %val1, <16 x i8> %val2, <16 x i16> %val3, <16 x i16> %val4) {
; CHECK-LABEL: fun3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vceqb %v0, %v24, %v26
; CHECK-DAG: vuphb [[REG0:%v[0-9]+]], %v0
; CHECK-DAG: vmrlg [[REG1:%v[0-9]+]], %v0, %v0
@@ -55,7 +55,7 @@ define <16 x i16> @fun3(<16 x i8> %val1, <16 x i8> %val2, <16 x i16> %val3, <16
define <32 x i8> @fun4(<32 x i8> %val1, <32 x i8> %val2, <32 x i8> %val3, <32 x i8> %val4) {
; CHECK-LABEL: fun4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-DAG: vceqb [[REG0:%v[0-9]+]], %v26, %v30
; CHECK-DAG: vceqb [[REG1:%v[0-9]+]], %v24, %v28
; CHECK-DAG: vsel %v24, %v25, %v29, [[REG1]]
@@ -68,7 +68,7 @@ define <32 x i8> @fun4(<32 x i8> %val1, <32 x i8> %val2, <32 x i8> %val3, <32 x
define <2 x i8> @fun5(<2 x i16> %val1, <2 x i16> %val2, <2 x i8> %val3, <2 x i8> %val4) {
; CHECK-LABEL: fun5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vceqh %v0, %v24, %v26
; CHECK-NEXT: vpkh %v0, %v0, %v0
; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
@@ -80,7 +80,7 @@ define <2 x i8> @fun5(<2 x i16> %val1, <2 x i16> %val2, <2 x i8> %val3, <2 x i8>
define <2 x i16> @fun6(<2 x i16> %val1, <2 x i16> %val2, <2 x i16> %val3, <2 x i16> %val4) {
; CHECK-LABEL: fun6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vceqh %v0, %v24, %v26
; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
; CHECK-NEXT: br %r14
@@ -91,7 +91,7 @@ define <2 x i16> @fun6(<2 x i16> %val1, <2 x i16> %val2, <2 x i16> %val3, <2 x i
define <2 x i32> @fun7(<2 x i16> %val1, <2 x i16> %val2, <2 x i32> %val3, <2 x i32> %val4) {
; CHECK-LABEL: fun7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vceqh %v0, %v24, %v26
; CHECK-NEXT: vuphh %v0, %v0
; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
@@ -103,7 +103,7 @@ define <2 x i32> @fun7(<2 x i16> %val1, <2 x i16> %val2, <2 x i32> %val3, <2 x i
define <8 x i8> @fun8(<8 x i16> %val1, <8 x i16> %val2, <8 x i8> %val3, <8 x i8> %val4) {
; CHECK-LABEL: fun8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vceqh %v0, %v24, %v26
; CHECK-NEXT: vpkh %v0, %v0, %v0
; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
@@ -115,7 +115,7 @@ define <8 x i8> @fun8(<8 x i16> %val1, <8 x i16> %val2, <8 x i8> %val3, <8 x i8>
define <8 x i16> @fun9(<8 x i16> %val1, <8 x i16> %val2, <8 x i16> %val3, <8 x i16> %val4) {
; CHECK-LABEL: fun9:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vceqh %v0, %v24, %v26
; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
; CHECK-NEXT: br %r14
@@ -126,7 +126,7 @@ define <8 x i16> @fun9(<8 x i16> %val1, <8 x i16> %val2, <8 x i16> %val3, <8 x i
define <8 x i32> @fun10(<8 x i16> %val1, <8 x i16> %val2, <8 x i32> %val3, <8 x i32> %val4) {
; CHECK-LABEL: fun10:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vceqh %v0, %v24, %v26
; CHECK-DAG: vuphh [[REG0:%v[0-9]+]], %v0
; CHECK-DAG: vmrlg [[REG1:%v[0-9]+]], %v0, %v0
@@ -141,7 +141,7 @@ define <8 x i32> @fun10(<8 x i16> %val1, <8 x i16> %val2, <8 x i32> %val3, <8 x
define <16 x i8> @fun11(<16 x i16> %val1, <16 x i16> %val2, <16 x i8> %val3, <16 x i8> %val4) {
; CHECK-LABEL: fun11:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vceqh %v0, %v26, %v30
; CHECK-NEXT: vceqh %v1, %v24, %v28
; CHECK-NEXT: vpkh %v0, %v1, %v0
@@ -154,7 +154,7 @@ define <16 x i8> @fun11(<16 x i16> %val1, <16 x i16> %val2, <16 x i8> %val3, <16
define <16 x i16> @fun12(<16 x i16> %val1, <16 x i16> %val2, <16 x i16> %val3, <16 x i16> %val4) {
; CHECK-LABEL: fun12:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-DAG: vceqh [[REG0:%v[0-9]+]], %v26, %v30
; CHECK-DAG: vceqh [[REG1:%v[0-9]+]], %v24, %v28
; CHECK-DAG: vsel %v24, %v25, %v29, [[REG1]]
@@ -167,7 +167,7 @@ define <16 x i16> @fun12(<16 x i16> %val1, <16 x i16> %val2, <16 x i16> %val3, <
define <2 x i16> @fun13(<2 x i32> %val1, <2 x i32> %val2, <2 x i16> %val3, <2 x i16> %val4) {
; CHECK-LABEL: fun13:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vceqf %v0, %v24, %v26
; CHECK-NEXT: vpkf %v0, %v0, %v0
; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
@@ -179,7 +179,7 @@ define <2 x i16> @fun13(<2 x i32> %val1, <2 x i32> %val2, <2 x i16> %val3, <2 x
define <2 x i32> @fun14(<2 x i32> %val1, <2 x i32> %val2, <2 x i32> %val3, <2 x i32> %val4) {
; CHECK-LABEL: fun14:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vceqf %v0, %v24, %v26
; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
; CHECK-NEXT: br %r14
@@ -190,7 +190,7 @@ define <2 x i32> @fun14(<2 x i32> %val1, <2 x i32> %val2, <2 x i32> %val3, <2 x
define <2 x i64> @fun15(<2 x i32> %val1, <2 x i32> %val2, <2 x i64> %val3, <2 x i64> %val4) {
; CHECK-LABEL: fun15:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vceqf %v0, %v24, %v26
; CHECK-NEXT: vuphf %v0, %v0
; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
@@ -202,7 +202,7 @@ define <2 x i64> @fun15(<2 x i32> %val1, <2 x i32> %val2, <2 x i64> %val3, <2 x
define <4 x i16> @fun16(<4 x i32> %val1, <4 x i32> %val2, <4 x i16> %val3, <4 x i16> %val4) {
; CHECK-LABEL: fun16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vceqf %v0, %v24, %v26
; CHECK-NEXT: vpkf %v0, %v0, %v0
; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
@@ -214,7 +214,7 @@ define <4 x i16> @fun16(<4 x i32> %val1, <4 x i32> %val2, <4 x i16> %val3, <4 x
define <4 x i32> @fun17(<4 x i32> %val1, <4 x i32> %val2, <4 x i32> %val3, <4 x i32> %val4) {
; CHECK-LABEL: fun17:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vceqf %v0, %v24, %v26
; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
; CHECK-NEXT: br %r14
@@ -225,7 +225,7 @@ define <4 x i32> @fun17(<4 x i32> %val1, <4 x i32> %val2, <4 x i32> %val3, <4 x
define <4 x i64> @fun18(<4 x i32> %val1, <4 x i32> %val2, <4 x i64> %val3, <4 x i64> %val4) {
; CHECK-LABEL: fun18:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vceqf %v0, %v24, %v26
; CHECK-DAG: vuphf [[REG0:%v[0-9]+]], %v0
; CHECK-DAG: vmrlg [[REG1:%v[0-9]+]], %v0, %v0
@@ -240,7 +240,7 @@ define <4 x i64> @fun18(<4 x i32> %val1, <4 x i32> %val2, <4 x i64> %val3, <4 x
define <8 x i16> @fun19(<8 x i32> %val1, <8 x i32> %val2, <8 x i16> %val3, <8 x i16> %val4) {
; CHECK-LABEL: fun19:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vceqf %v0, %v26, %v30
; CHECK-NEXT: vceqf %v1, %v24, %v28
; CHECK-NEXT: vpkf %v0, %v1, %v0
@@ -253,7 +253,7 @@ define <8 x i16> @fun19(<8 x i32> %val1, <8 x i32> %val2, <8 x i16> %val3, <8 x
define <8 x i32> @fun20(<8 x i32> %val1, <8 x i32> %val2, <8 x i32> %val3, <8 x i32> %val4) {
; CHECK-LABEL: fun20:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-DAG: vceqf [[REG0:%v[0-9]+]], %v26, %v30
; CHECK-DAG: vceqf [[REG1:%v[0-9]+]], %v24, %v28
; CHECK-DAG: vsel %v24, %v25, %v29, [[REG1]]
@@ -266,7 +266,7 @@ define <8 x i32> @fun20(<8 x i32> %val1, <8 x i32> %val2, <8 x i32> %val3, <8 x
define <2 x i32> @fun21(<2 x i64> %val1, <2 x i64> %val2, <2 x i32> %val3, <2 x i32> %val4) {
; CHECK-LABEL: fun21:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vceqg %v0, %v24, %v26
; CHECK-NEXT: vpkg %v0, %v0, %v0
; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
@@ -278,7 +278,7 @@ define <2 x i32> @fun21(<2 x i64> %val1, <2 x i64> %val2, <2 x i32> %val3, <2 x
define <2 x i64> @fun22(<2 x i64> %val1, <2 x i64> %val2, <2 x i64> %val3, <2 x i64> %val4) {
; CHECK-LABEL: fun22:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vceqg %v0, %v24, %v26
; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
; CHECK-NEXT: br %r14
@@ -289,7 +289,7 @@ define <2 x i64> @fun22(<2 x i64> %val1, <2 x i64> %val2, <2 x i64> %val3, <2 x
define <4 x i32> @fun23(<4 x i64> %val1, <4 x i64> %val2, <4 x i32> %val3, <4 x i32> %val4) {
; CHECK-LABEL: fun23:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vceqg %v0, %v26, %v30
; CHECK-NEXT: vceqg %v1, %v24, %v28
; CHECK-NEXT: vpkg %v0, %v1, %v0
@@ -302,7 +302,7 @@ define <4 x i32> @fun23(<4 x i64> %val1, <4 x i64> %val2, <4 x i32> %val3, <4 x
define <4 x i64> @fun24(<4 x i64> %val1, <4 x i64> %val2, <4 x i64> %val3, <4 x i64> %val4) {
; CHECK-LABEL: fun24:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-DAG: vceqg [[REG0:%v[0-9]+]], %v26, %v30
; CHECK-DAG: vceqg [[REG1:%v[0-9]+]], %v24, %v28
; CHECK-DAG: vsel %v24, %v25, %v29, [[REG1]]
@@ -315,7 +315,7 @@ define <4 x i64> @fun24(<4 x i64> %val1, <4 x i64> %val2, <4 x i64> %val3, <4 x
define <2 x float> @fun25(<2 x float> %val1, <2 x float> %val2, <2 x float> %val3, <2 x float> %val4) {
; CHECK-LABEL: fun25:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmrlf %v0, %v26, %v26
; CHECK-NEXT: vmrlf %v1, %v24, %v24
; CHECK-NEXT: vldeb %v0, %v0
@@ -331,7 +331,7 @@ define <2 x float> @fun25(<2 x float> %val1, <2 x float> %val2, <2 x float> %val
; CHECK-NEXT: br %r14
; CHECK-Z14-LABEL: fun25:
-; CHECK-Z14: # BB#0:
+; CHECK-Z14: # %bb.0:
; CHECK-Z14-NEXT: vfchsb %v0, %v24, %v26
; CHECK-Z14-NEXT: vsel %v24, %v28, %v30, %v0
; CHECK-Z14-NEXT: br %r14
@@ -343,7 +343,7 @@ define <2 x float> @fun25(<2 x float> %val1, <2 x float> %val2, <2 x float> %val
define <2 x double> @fun26(<2 x float> %val1, <2 x float> %val2, <2 x double> %val3, <2 x double> %val4) {
; CHECK-LABEL: fun26:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmrlf %v0, %v26, %v26
; CHECK-NEXT: vmrlf %v1, %v24, %v24
; CHECK-NEXT: vldeb %v0, %v0
@@ -360,7 +360,7 @@ define <2 x double> @fun26(<2 x float> %val1, <2 x float> %val2, <2 x double> %v
; CHECK-NEXT: br %r14
; CHECK-Z14-LABEL: fun26:
-; CHECK-Z14: # BB#0:
+; CHECK-Z14: # %bb.0:
; CHECK-Z14-NEXT: vfchsb %v0, %v24, %v26
; CHECK-Z14-NEXT: vuphf %v0, %v0
; CHECK-Z14-NEXT: vsel %v24, %v28, %v30, %v0
@@ -374,7 +374,7 @@ define <2 x double> @fun26(<2 x float> %val1, <2 x float> %val2, <2 x double> %v
; Test a widening select of floats.
define <2 x float> @fun27(<2 x i8> %val1, <2 x i8> %val2, <2 x float> %val3, <2 x float> %val4) {
; CHECK-LABEL: fun27:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vceqb %v0, %v24, %v26
; CHECK-NEXT: vuphb %v0, %v0
; CHECK-NEXT: vuphh %v0, %v0
@@ -388,7 +388,7 @@ define <2 x float> @fun27(<2 x i8> %val1, <2 x i8> %val2, <2 x float> %val3, <2
define <4 x float> @fun28(<4 x float> %val1, <4 x float> %val2, <4 x float> %val3, <4 x float> %val4) {
; CHECK-LABEL: fun28:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmrlf %v0, %v26, %v26
; CHECK-NEXT: vmrlf %v1, %v24, %v24
; CHECK-NEXT: vldeb %v0, %v0
@@ -404,7 +404,7 @@ define <4 x float> @fun28(<4 x float> %val1, <4 x float> %val2, <4 x float> %val
; CHECK-NEXT: br %r14
; CHECK-Z14-LABEL: fun28:
-; CHECK-Z14: # BB#0:
+; CHECK-Z14: # %bb.0:
; CHECK-Z14-NEXT: vfchsb %v0, %v24, %v26
; CHECK-Z14-NEXT: vsel %v24, %v28, %v30, %v0
; CHECK-Z14-NEXT: br %r14
@@ -416,7 +416,7 @@ define <4 x float> @fun28(<4 x float> %val1, <4 x float> %val2, <4 x float> %val
define <4 x double> @fun29(<4 x float> %val1, <4 x float> %val2, <4 x double> %val3, <4 x double> %val4) {
; CHECK-LABEL: fun29:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmrlf %v0, %v26, %v26
; CHECK-NEXT: vmrlf %v1, %v24, %v24
; CHECK-NEXT: vldeb %v0, %v0
@@ -436,7 +436,7 @@ define <4 x double> @fun29(<4 x float> %val1, <4 x float> %val2, <4 x double> %v
; CHECK-NEXT: br %r14
; CHECK-Z14-LABEL: fun29:
-; CHECK-Z14: # BB#0:
+; CHECK-Z14: # %bb.0:
; CHECK-Z14-NEXT: vfchsb %v0, %v24, %v26
; CHECK-Z14-DAG: vuphf [[REG0:%v[0-9]+]], %v0
; CHECK-Z14-DAG: vmrlg [[REG1:%v[0-9]+]], %v0, %v0
@@ -452,7 +452,7 @@ define <4 x double> @fun29(<4 x float> %val1, <4 x float> %val2, <4 x double> %v
define <8 x float> @fun30(<8 x float> %val1, <8 x float> %val2, <8 x float> %val3, <8 x float> %val4) {
; CHECK-Z14-LABEL: fun30:
-; CHECK-Z14: # BB#0:
+; CHECK-Z14: # %bb.0:
; CHECK-Z14-DAG: vfchsb [[REG0:%v[0-9]+]], %v26, %v30
; CHECK-Z14-DAG: vfchsb [[REG1:%v[0-9]+]], %v24, %v28
; CHECK-Z14-DAG: vsel %v24, %v25, %v29, [[REG1]]
@@ -465,7 +465,7 @@ define <8 x float> @fun30(<8 x float> %val1, <8 x float> %val2, <8 x float> %val
define <2 x float> @fun31(<2 x double> %val1, <2 x double> %val2, <2 x float> %val3, <2 x float> %val4) {
; CHECK-LABEL: fun31:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfchdb %v0, %v24, %v26
; CHECK-NEXT: vpkg %v0, %v0, %v0
; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
@@ -478,7 +478,7 @@ define <2 x float> @fun31(<2 x double> %val1, <2 x double> %val2, <2 x float> %v
define <2 x double> @fun32(<2 x double> %val1, <2 x double> %val2, <2 x double> %val3, <2 x double> %val4) {
; CHECK-LABEL: fun32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfchdb %v0, %v24, %v26
; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
; CHECK-NEXT: br %r14
@@ -489,7 +489,7 @@ define <2 x double> @fun32(<2 x double> %val1, <2 x double> %val2, <2 x double>
define <4 x float> @fun33(<4 x double> %val1, <4 x double> %val2, <4 x float> %val3, <4 x float> %val4) {
; CHECK-LABEL: fun33:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfchdb %v0, %v26, %v30
; CHECK-NEXT: vfchdb %v1, %v24, %v28
; CHECK-NEXT: vpkg %v0, %v1, %v0
@@ -502,7 +502,7 @@ define <4 x float> @fun33(<4 x double> %val1, <4 x double> %val2, <4 x float> %v
define <4 x double> @fun34(<4 x double> %val1, <4 x double> %val2, <4 x double> %val3, <4 x double> %val4) {
; CHECK-LABEL: fun34:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-DAG: vfchdb [[REG0:%v[0-9]+]], %v26, %v30
; CHECK-DAG: vfchdb [[REG1:%v[0-9]+]], %v24, %v28
; CHECK-DAG: vsel %v24, %v25, %v29, [[REG1]]
diff --git a/test/CodeGen/SystemZ/vec-trunc-to-i1.ll b/test/CodeGen/SystemZ/vec-trunc-to-i1.ll
index 705fe3dbac9..73d4c47a840 100644
--- a/test/CodeGen/SystemZ/vec-trunc-to-i1.ll
+++ b/test/CodeGen/SystemZ/vec-trunc-to-i1.ll
@@ -7,7 +7,7 @@
define void @pr32275(<4 x i8> %B15) {
; CHECK-LABEL: pr32275:
-; CHECK: # BB#0: # %BB
+; CHECK: # %bb.0: # %BB
; CHECK-NEXT: vrepif %v0, 1
; CHECK-NEXT: .LBB0_1: # %CF34
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
@@ -22,7 +22,7 @@ define void @pr32275(<4 x i8> %B15) {
; CHECK-NEXT: vlgvf %r0, %v1, 3
; CHECK-NEXT: tmll %r0, 1
; CHECK-NEXT: jne .LBB0_1
-; CHECK-NEXT: # BB#2: # %CF36
+; CHECK-NEXT: # %bb.2: # %CF36
; CHECK-NEXT: br %r14
BB:
br label %CF34
diff --git a/test/CodeGen/Thumb2/ifcvt-rescan-bug-2016-08-22.ll b/test/CodeGen/Thumb2/ifcvt-rescan-bug-2016-08-22.ll
index 65ee4283b3f..be539a6c620 100644
--- a/test/CodeGen/Thumb2/ifcvt-rescan-bug-2016-08-22.ll
+++ b/test/CodeGen/Thumb2/ifcvt-rescan-bug-2016-08-22.ll
@@ -13,7 +13,7 @@ declare void @_ZNSsC1EPKcRKSaIcE() unnamed_addr #0
; It isn't valid to If-Convert the following function, even though the calls
; are in common. The calls clobber the predicate info.
; CHECK: cbnz r{{[0-9]+}}, .LBB0_2
-; CHECK: BB#1
+; CHECK: %bb.1
; CHECK: .LBB0_2
; Function Attrs: nounwind
define hidden void @_ZN4llvm14DOTGraphTraitsIPNS_13ScheduleDAGMIEE17getEdgeAttributesEPKNS_5SUnitENS_13SUnitIteratorEPKNS_11ScheduleDAGE() #0 align 2 {
diff --git a/test/CodeGen/WebAssembly/dbgvalue.ll b/test/CodeGen/WebAssembly/dbgvalue.ll
index dc108ff9b1f..438bea33282 100644
--- a/test/CodeGen/WebAssembly/dbgvalue.ll
+++ b/test/CodeGen/WebAssembly/dbgvalue.ll
@@ -1,8 +1,8 @@
; RUN: llc < %s -O0 -verify-machineinstrs -mtriple=wasm32-unknown-unknown-wasm | FileCheck %s
-; CHECK: BB#0
+; CHECK: %bb.0
; CHECK: #DEBUG_VALUE: usage:self <- %4
-; CHECK: BB#1
+; CHECK: %bb.1
; CHECK: DW_TAG_variable
source_filename = "test/CodeGen/WebAssembly/dbgvalue.ll"
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
diff --git a/test/CodeGen/WebAssembly/signext-arg.ll b/test/CodeGen/WebAssembly/signext-arg.ll
index cd116c645b4..32d74a20b75 100644
--- a/test/CodeGen/WebAssembly/signext-arg.ll
+++ b/test/CodeGen/WebAssembly/signext-arg.ll
@@ -5,7 +5,7 @@ declare i32 @get_int(i16 %arg)
define i32 @func_1(i16 %arg1 , i32 %arg2) #0 {
; CHECK-LABEL: func_1:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: i32.const $push1=, 16
; CHECK-NEXT: i32.shl $push2=, $0, $pop1
; CHECK-NEXT: i32.const $push4=, 16
diff --git a/test/CodeGen/X86/2006-01-19-ISelFoldingBug.ll b/test/CodeGen/X86/2006-01-19-ISelFoldingBug.ll
index 48f5bc3e298..288e2921daa 100644
--- a/test/CodeGen/X86/2006-01-19-ISelFoldingBug.ll
+++ b/test/CodeGen/X86/2006-01-19-ISelFoldingBug.ll
@@ -10,7 +10,7 @@ target triple = "i686-unknown-unknown"
define i32 @test5(i32 %B, i8 %C) {
; CHECK-LABEL: test5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movb {{[0-9]+}}(%esp), %cl
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx
; CHECK-NEXT: movl A, %eax
diff --git a/test/CodeGen/X86/2006-03-01-InstrSchedBug.ll b/test/CodeGen/X86/2006-03-01-InstrSchedBug.ll
index ca3eb9cda37..4bc6b1a53d9 100644
--- a/test/CodeGen/X86/2006-03-01-InstrSchedBug.ll
+++ b/test/CodeGen/X86/2006-03-01-InstrSchedBug.ll
@@ -3,7 +3,7 @@
define i32 @f(i32 %a, i32 %b) {
; CHECK-LABEL: f:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movl %ecx, %edx
diff --git a/test/CodeGen/X86/2008-02-14-BitMiscompile.ll b/test/CodeGen/X86/2008-02-14-BitMiscompile.ll
index fdc1c3bb67b..d3fa16a0747 100644
--- a/test/CodeGen/X86/2008-02-14-BitMiscompile.ll
+++ b/test/CodeGen/X86/2008-02-14-BitMiscompile.ll
@@ -3,7 +3,7 @@
define i32 @test(i1 %A) {
; CHECK-LABEL: test:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: andl $1, %eax
; CHECK-NEXT: negl %eax
diff --git a/test/CodeGen/X86/2009-04-12-FastIselOverflowCrash.ll b/test/CodeGen/X86/2009-04-12-FastIselOverflowCrash.ll
index 4f8df0533aa..363053fe341 100644
--- a/test/CodeGen/X86/2009-04-12-FastIselOverflowCrash.ll
+++ b/test/CodeGen/X86/2009-04-12-FastIselOverflowCrash.ll
@@ -11,7 +11,7 @@ declare %0 @llvm.sadd.with.overflow.i32(i32, i32) nounwind
define fastcc i32 @test() nounwind {
entry:
; CHECK-LABEL: test:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: addl $0, %eax
; CHECK-NEXT: seto %cl
diff --git a/test/CodeGen/X86/2010-05-12-FastAllocKills.ll b/test/CodeGen/X86/2010-05-12-FastAllocKills.ll
index 7a98f778bb9..c564b72e397 100644
--- a/test/CodeGen/X86/2010-05-12-FastAllocKills.ll
+++ b/test/CodeGen/X86/2010-05-12-FastAllocKills.ll
@@ -3,18 +3,18 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
target triple = "x86_64-apple-darwin"
; This test causes a virtual FP register to be redefined while it is live:
-;BB#5: derived from LLVM BB %bb10
-; Predecessors according to CFG: BB#4 BB#5
+;%bb.5: derived from LLVM BB %bb10
+; Predecessors according to CFG: %bb.4 %bb.5
; %reg1024<def> = MOV_Fp8080 %reg1034
; %reg1025<def> = MUL_Fp80m32 %reg1024, %rip, 1, %reg0, <cp#0>, %reg0; mem:LD4[ConstantPool]
; %reg1034<def> = MOV_Fp8080 %reg1025
; FP_REG_KILL %fp0<imp-def>, %fp1<imp-def>, %fp2<imp-def>, %fp3<imp-def>, %fp4<imp-def>, %fp5<imp-def>, %fp6<imp-def>
-; JMP_4 <BB#5>
-; Successors according to CFG: BB#5
+; JMP_4 <%bb.5>
+; Successors according to CFG: %bb.5
;
; The X86FP pass needs good kill flags, like on %fp0 representing %reg1034:
-;BB#5: derived from LLVM BB %bb10
-; Predecessors according to CFG: BB#4 BB#5
+;%bb.5: derived from LLVM BB %bb10
+; Predecessors according to CFG: %bb.4 %bb.5
; %fp0<def> = LD_Fp80m <fi#3>, 1, %reg0, 0, %reg0; mem:LD10[FixedStack3](align=4)
; %fp1<def> = MOV_Fp8080 %fp0<kill>
; %fp2<def> = MUL_Fp80m32 %fp1, %rip, 1, %reg0, <cp#0>, %reg0; mem:LD4[ConstantPool]
@@ -23,8 +23,8 @@ target triple = "x86_64-apple-darwin"
; ST_FpP80m <fi#4>, 1, %reg0, 0, %reg0, %fp1<kill>; mem:ST10[FixedStack4](align=4)
; ST_FpP80m <fi#5>, 1, %reg0, 0, %reg0, %fp2<kill>; mem:ST10[FixedStack5](align=4)
; FP_REG_KILL %fp0<imp-def>, %fp1<imp-def>, %fp2<imp-def>, %fp3<imp-def>, %fp4<imp-def>, %fp5<imp-def>, %fp6<imp-def>
-; JMP_4 <BB#5>
-; Successors according to CFG: BB#5
+; JMP_4 <%bb.5>
+; Successors according to CFG: %bb.5
define fastcc i32 @sqlite3AtoF(i8* %z, double* nocapture %pResult) nounwind ssp {
entry:
diff --git a/test/CodeGen/X86/2010-08-04-MaskedSignedCompare.ll b/test/CodeGen/X86/2010-08-04-MaskedSignedCompare.ll
index cffefc2bee6..9bbd86ca646 100644
--- a/test/CodeGen/X86/2010-08-04-MaskedSignedCompare.ll
+++ b/test/CodeGen/X86/2010-08-04-MaskedSignedCompare.ll
@@ -8,14 +8,14 @@
define i32 @main() nounwind {
; CHECK-LABEL: main:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: cmpq {{.*}}(%rip), %rax
; CHECK-NEXT: sbbl %eax, %eax
; CHECK-NEXT: andl $150, %eax
; CHECK-NEXT: testb %al, %al
; CHECK-NEXT: jle .LBB0_1
-; CHECK-NEXT: # BB#2: # %if.then
+; CHECK-NEXT: # %bb.2: # %if.then
; CHECK-NEXT: movl $1, {{.*}}(%rip)
; CHECK-NEXT: movl $1, %esi
; CHECK-NEXT: jmp .LBB0_3
diff --git a/test/CodeGen/X86/2011-10-19-widen_vselect.ll b/test/CodeGen/X86/2011-10-19-widen_vselect.ll
index 416761ffef4..c98bafcd565 100644
--- a/test/CodeGen/X86/2011-10-19-widen_vselect.ll
+++ b/test/CodeGen/X86/2011-10-19-widen_vselect.ll
@@ -7,13 +7,13 @@
define void @simple_widen(<2 x float> %a, <2 x float> %b) {
; X32-LABEL: simple_widen:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: extractps $1, %xmm1, (%eax)
; X32-NEXT: movss %xmm1, (%eax)
; X32-NEXT: retl
;
; X64-LABEL: simple_widen:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movlps %xmm1, (%rax)
; X64-NEXT: retq
entry:
@@ -24,7 +24,7 @@ entry:
define void @complex_inreg_work(<2 x float> %a, <2 x float> %b) {
; X32-LABEL: complex_inreg_work:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movaps %xmm0, %xmm2
; X32-NEXT: cmpordps %xmm0, %xmm0
; X32-NEXT: blendvps %xmm0, %xmm2, %xmm1
@@ -33,7 +33,7 @@ define void @complex_inreg_work(<2 x float> %a, <2 x float> %b) {
; X32-NEXT: retl
;
; X64-LABEL: complex_inreg_work:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movaps %xmm0, %xmm2
; X64-NEXT: cmpordps %xmm0, %xmm0
; X64-NEXT: blendvps %xmm0, %xmm2, %xmm1
@@ -48,14 +48,14 @@ entry:
define void @zero_test() {
; X32-LABEL: zero_test:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: xorps %xmm0, %xmm0
; X32-NEXT: extractps $1, %xmm0, (%eax)
; X32-NEXT: movss %xmm0, (%eax)
; X32-NEXT: retl
;
; X64-LABEL: zero_test:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: xorps %xmm0, %xmm0
; X64-NEXT: movlps %xmm0, (%rax)
; X64-NEXT: retq
@@ -67,7 +67,7 @@ entry:
define void @full_test() {
; X32-LABEL: full_test:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: subl $60, %esp
; X32-NEXT: .cfi_def_cfa_offset 64
; X32-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
@@ -91,7 +91,7 @@ define void @full_test() {
; X32-NEXT: retl
;
; X64-LABEL: full_test:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
; X64-NEXT: cvttps2dq %xmm2, %xmm0
; X64-NEXT: cvtdq2ps %xmm0, %xmm1
diff --git a/test/CodeGen/X86/2011-10-21-widen-cmp.ll b/test/CodeGen/X86/2011-10-21-widen-cmp.ll
index 9232eba213b..812faaf473d 100644
--- a/test/CodeGen/X86/2011-10-21-widen-cmp.ll
+++ b/test/CodeGen/X86/2011-10-21-widen-cmp.ll
@@ -6,7 +6,7 @@
define void @cmp_2_floats(<2 x float> %a, <2 x float> %b) {
; CHECK-LABEL: cmp_2_floats:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movaps %xmm0, %xmm2
; CHECK-NEXT: cmpordps %xmm0, %xmm0
; CHECK-NEXT: blendvps %xmm0, %xmm2, %xmm1
@@ -21,7 +21,7 @@ entry:
define void @cmp_2_doubles(<2 x double> %a, <2 x double> %b) {
; CHECK-LABEL: cmp_2_doubles:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movapd %xmm0, %xmm2
; CHECK-NEXT: cmpordpd %xmm0, %xmm0
; CHECK-NEXT: blendvpd %xmm0, %xmm2, %xmm1
@@ -36,7 +36,7 @@ entry:
define void @mp_11193(<8 x float> * nocapture %aFOO, <8 x float>* nocapture %RET) nounwind {
; CHECK-LABEL: mp_11193:
-; CHECK: # BB#0: # %allocas
+; CHECK: # %bb.0: # %allocas
; CHECK-NEXT: movl $-1082130432, (%rsi) # imm = 0xBF800000
; CHECK-NEXT: retq
allocas:
diff --git a/test/CodeGen/X86/2011-12-26-extractelement-duplicate-load.ll b/test/CodeGen/X86/2011-12-26-extractelement-duplicate-load.ll
index c87b04485e4..ad52d58bde1 100644
--- a/test/CodeGen/X86/2011-12-26-extractelement-duplicate-load.ll
+++ b/test/CodeGen/X86/2011-12-26-extractelement-duplicate-load.ll
@@ -9,12 +9,12 @@
define <4 x i32> @test(<4 x i32>* %p) {
; CHECK-LABEL: test:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movaps (%rdi), %xmm0
; CHECK-NEXT: extractps $2, %xmm0, %eax
; CHECK-NEXT: cmpl $3, %eax
; CHECK-NEXT: je .LBB0_2
-; CHECK-NEXT: # BB#1:
+; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: xorps %xmm0, %xmm0
; CHECK-NEXT: .LBB0_2:
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/2011-12-8-bitcastintprom.ll b/test/CodeGen/X86/2011-12-8-bitcastintprom.ll
index e2ccaa1b837..36ef1bac1a7 100644
--- a/test/CodeGen/X86/2011-12-8-bitcastintprom.ll
+++ b/test/CodeGen/X86/2011-12-8-bitcastintprom.ll
@@ -5,7 +5,7 @@
; Make sure that the conversion between v4i8 to v2i16 is not a simple bitcast.
define void @prom_bug(<4 x i8> %t, i16* %p) {
; SSE2-LABEL: prom_bug:
-; SSE2: ## BB#0:
+; SSE2: ## %bb.0:
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; SSE2-NEXT: packuswb %xmm0, %xmm0
; SSE2-NEXT: packuswb %xmm0, %xmm0
@@ -16,7 +16,7 @@ define void @prom_bug(<4 x i8> %t, i16* %p) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: prom_bug:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
; SSE41-NEXT: pextrw $0, %xmm0, (%rdi)
; SSE41-NEXT: retq
diff --git a/test/CodeGen/X86/2011-20-21-zext-ui2fp.ll b/test/CodeGen/X86/2011-20-21-zext-ui2fp.ll
index 539d5547d5f..f1543d5262f 100644
--- a/test/CodeGen/X86/2011-20-21-zext-ui2fp.ll
+++ b/test/CodeGen/X86/2011-20-21-zext-ui2fp.ll
@@ -6,7 +6,7 @@
define void @ui_to_fp_conv(<8 x float> * nocapture %aFOO, <8 x float>* nocapture %RET) nounwind {
; CHECK-LABEL: ui_to_fp_conv:
-; CHECK: # BB#0: # %allocas
+; CHECK: # %bb.0: # %allocas
; CHECK-NEXT: movaps {{.*#+}} xmm0 = [1.000000e+00,1.000000e+00,0.000000e+00,0.000000e+00]
; CHECK-NEXT: xorps %xmm1, %xmm1
; CHECK-NEXT: movups %xmm1, 16(%rsi)
diff --git a/test/CodeGen/X86/2012-01-11-split-cv.ll b/test/CodeGen/X86/2012-01-11-split-cv.ll
index 34ec48a0251..c8424fa69aa 100644
--- a/test/CodeGen/X86/2012-01-11-split-cv.ll
+++ b/test/CodeGen/X86/2012-01-11-split-cv.ll
@@ -3,7 +3,7 @@
define void @add18i16(<18 x i16>* nocapture sret %ret, <18 x i16>* %bp) nounwind {
; CHECK-LABEL: add18i16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: vmovups (%ecx), %ymm0
diff --git a/test/CodeGen/X86/2012-01-12-extract-sv.ll b/test/CodeGen/X86/2012-01-12-extract-sv.ll
index bcfbb0a7e79..156e373a5af 100644
--- a/test/CodeGen/X86/2012-01-12-extract-sv.ll
+++ b/test/CodeGen/X86/2012-01-12-extract-sv.ll
@@ -2,7 +2,7 @@
define void @endless_loop() {
; CHECK-LABEL: endless_loop:
-; CHECK-NEXT: # BB#0:
+; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: vmovaps (%eax), %ymm0
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,1,1]
diff --git a/test/CodeGen/X86/2012-04-26-sdglue.ll b/test/CodeGen/X86/2012-04-26-sdglue.ll
index f5f43b7d994..8066b76f3bf 100644
--- a/test/CodeGen/X86/2012-04-26-sdglue.ll
+++ b/test/CodeGen/X86/2012-04-26-sdglue.ll
@@ -6,7 +6,7 @@
define void @func() nounwind ssp {
; CHECK-LABEL: func:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovups 0, %xmm0
; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vblendps {{.*#+}} ymm2 = ymm0[0,1,2,3],ymm1[4,5,6,7]
diff --git a/test/CodeGen/X86/2012-07-10-extload64.ll b/test/CodeGen/X86/2012-07-10-extload64.ll
index a41123e40a5..e1f9839340c 100644
--- a/test/CodeGen/X86/2012-07-10-extload64.ll
+++ b/test/CodeGen/X86/2012-07-10-extload64.ll
@@ -3,7 +3,7 @@
define void @load_store(<4 x i16>* %in) {
; CHECK-LABEL: load_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: pmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; CHECK-NEXT: paddw %xmm0, %xmm0
@@ -20,7 +20,7 @@ entry:
; Make sure that we store a 64bit value, even on 32bit systems.
define void @store_64(<2 x i32>* %ptr) {
; CHECK-LABEL: store_64:
-; CHECK: # BB#0: # %BB
+; CHECK: # %bb.0: # %BB
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: xorps %xmm0, %xmm0
; CHECK-NEXT: movlps %xmm0, (%eax)
@@ -32,7 +32,7 @@ BB:
define <2 x i32> @load_64(<2 x i32>* %ptr) {
; CHECK-LABEL: load_64:
-; CHECK: # BB#0: # %BB
+; CHECK: # %bb.0: # %BB
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
; CHECK-NEXT: retl
diff --git a/test/CodeGen/X86/2012-08-16-setcc.ll b/test/CodeGen/X86/2012-08-16-setcc.ll
index cba208e62a1..a31b651b3e3 100644
--- a/test/CodeGen/X86/2012-08-16-setcc.ll
+++ b/test/CodeGen/X86/2012-08-16-setcc.ll
@@ -5,7 +5,7 @@
define i32 @and_1(i8 zeroext %a, i8 zeroext %b, i32 %x) {
; CHECK-LABEL: and_1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: andb %dil, %sil
; CHECK-NEXT: cmovnel %edx, %eax
@@ -18,7 +18,7 @@ define i32 @and_1(i8 zeroext %a, i8 zeroext %b, i32 %x) {
define zeroext i1 @and_2(i8 zeroext %a, i8 zeroext %b) {
; CHECK-LABEL: and_2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: andb %dil, %sil
; CHECK-NEXT: setne %al
; CHECK-NEXT: retq
@@ -29,7 +29,7 @@ define zeroext i1 @and_2(i8 zeroext %a, i8 zeroext %b) {
define i32 @xor_1(i8 zeroext %a, i8 zeroext %b, i32 %x) {
; CHECK-LABEL: xor_1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: xorb %dil, %sil
; CHECK-NEXT: cmovnel %edx, %eax
@@ -42,7 +42,7 @@ define i32 @xor_1(i8 zeroext %a, i8 zeroext %b, i32 %x) {
define zeroext i1 @xor_2(i8 zeroext %a, i8 zeroext %b) {
; CHECK-LABEL: xor_2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorb %dil, %sil
; CHECK-NEXT: setne %al
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/2012-1-10-buildvector.ll b/test/CodeGen/X86/2012-1-10-buildvector.ll
index 9f17ce4b2ec..03044ac3722 100644
--- a/test/CodeGen/X86/2012-1-10-buildvector.ll
+++ b/test/CodeGen/X86/2012-1-10-buildvector.ll
@@ -3,7 +3,7 @@
define void @bad_cast() {
; CHECK-LABEL: bad_cast:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vmovaps %xmm0, (%eax)
; CHECK-NEXT: movl $0, (%eax)
@@ -16,7 +16,7 @@ define void @bad_cast() {
define void @bad_insert(i32 %t) {
; CHECK-LABEL: bad_insert:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: vmovaps %ymm0, (%eax)
; CHECK-NEXT: vzeroupper
diff --git a/test/CodeGen/X86/2012-12-1-merge-multiple.ll b/test/CodeGen/X86/2012-12-1-merge-multiple.ll
index 365853842ec..83a71974220 100644
--- a/test/CodeGen/X86/2012-12-1-merge-multiple.ll
+++ b/test/CodeGen/X86/2012-12-1-merge-multiple.ll
@@ -3,7 +3,7 @@
define void @multiple_stores_on_chain(i16 * %A) {
; CHECK-LABEL: multiple_stores_on_chain:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movabsq $844433520132096, %rax # imm = 0x3000200010000
; CHECK-NEXT: movq %rax, (%rdi)
; CHECK-NEXT: movabsq $1970350607106052, %rax # imm = 0x7000600050004
diff --git a/test/CodeGen/X86/3dnow-schedule.ll b/test/CodeGen/X86/3dnow-schedule.ll
index 1dc27c0e892..d8ecfb8114a 100644
--- a/test/CodeGen/X86/3dnow-schedule.ll
+++ b/test/CodeGen/X86/3dnow-schedule.ll
@@ -3,7 +3,7 @@
define void @test_femms() optsize {
; CHECK-LABEL: test_femms:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: femms
; CHECK-NEXT: retq # sched: [1:1.00]
call void @llvm.x86.mmx.femms()
@@ -13,7 +13,7 @@ declare void @llvm.x86.mmx.femms() nounwind readnone
define i64 @test_pavgusb(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; CHECK-LABEL: test_pavgusb:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pavgusb %mm1, %mm0 # sched: [5:1.00]
; CHECK-NEXT: pavgusb (%rdi), %mm0 # sched: [9:1.00]
; CHECK-NEXT: movd %mm0, %rax # sched: [1:0.33]
@@ -28,7 +28,7 @@ declare x86_mmx @llvm.x86.3dnow.pavgusb(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_pf2id(x86_mmx* %a0) optsize {
; CHECK-LABEL: test_pf2id:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pf2id (%rdi), %mm0 # sched: [7:1.00]
; CHECK-NEXT: pf2id %mm0, %mm0 # sched: [3:1.00]
; CHECK-NEXT: movd %mm0, %rax # sched: [1:0.33]
@@ -43,7 +43,7 @@ declare x86_mmx @llvm.x86.3dnow.pf2id(x86_mmx) nounwind readnone
define i64 @test_pf2iw(x86_mmx* %a0) optsize {
; CHECK-LABEL: test_pf2iw:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pf2iw (%rdi), %mm0 # sched: [7:1.00]
; CHECK-NEXT: pf2iw %mm0, %mm0 # sched: [3:1.00]
; CHECK-NEXT: movd %mm0, %rax # sched: [1:0.33]
@@ -58,7 +58,7 @@ declare x86_mmx @llvm.x86.3dnowa.pf2iw(x86_mmx) nounwind readnone
define i64 @test_pfacc(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; CHECK-LABEL: test_pfacc:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pfacc %mm1, %mm0 # sched: [3:1.00]
; CHECK-NEXT: pfacc (%rdi), %mm0 # sched: [7:1.00]
; CHECK-NEXT: movd %mm0, %rax # sched: [1:0.33]
@@ -73,7 +73,7 @@ declare x86_mmx @llvm.x86.3dnow.pfacc(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_pfadd(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; CHECK-LABEL: test_pfadd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pfadd %mm1, %mm0 # sched: [3:1.00]
; CHECK-NEXT: pfadd (%rdi), %mm0 # sched: [7:1.00]
; CHECK-NEXT: movd %mm0, %rax # sched: [1:0.33]
@@ -88,7 +88,7 @@ declare x86_mmx @llvm.x86.3dnow.pfadd(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_pfcmpeq(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; CHECK-LABEL: test_pfcmpeq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pfcmpeq %mm1, %mm0 # sched: [3:1.00]
; CHECK-NEXT: pfcmpeq (%rdi), %mm0 # sched: [7:1.00]
; CHECK-NEXT: movd %mm0, %rax # sched: [1:0.33]
@@ -103,7 +103,7 @@ declare x86_mmx @llvm.x86.3dnow.pfcmpeq(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_pfcmpge(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; CHECK-LABEL: test_pfcmpge:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pfcmpge %mm1, %mm0 # sched: [3:1.00]
; CHECK-NEXT: pfcmpge (%rdi), %mm0 # sched: [7:1.00]
; CHECK-NEXT: movd %mm0, %rax # sched: [1:0.33]
@@ -118,7 +118,7 @@ declare x86_mmx @llvm.x86.3dnow.pfcmpge(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_pfcmpgt(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; CHECK-LABEL: test_pfcmpgt:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pfcmpgt %mm1, %mm0 # sched: [3:1.00]
; CHECK-NEXT: pfcmpgt (%rdi), %mm0 # sched: [7:1.00]
; CHECK-NEXT: movd %mm0, %rax # sched: [1:0.33]
@@ -133,7 +133,7 @@ declare x86_mmx @llvm.x86.3dnow.pfcmpgt(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_pfmax(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; CHECK-LABEL: test_pfmax:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pfmax %mm1, %mm0 # sched: [3:1.00]
; CHECK-NEXT: pfmax (%rdi), %mm0 # sched: [7:1.00]
; CHECK-NEXT: movd %mm0, %rax # sched: [1:0.33]
@@ -148,7 +148,7 @@ declare x86_mmx @llvm.x86.3dnow.pfmax(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_pfmin(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; CHECK-LABEL: test_pfmin:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pfmin %mm1, %mm0 # sched: [3:1.00]
; CHECK-NEXT: pfmin (%rdi), %mm0 # sched: [7:1.00]
; CHECK-NEXT: movd %mm0, %rax # sched: [1:0.33]
@@ -163,7 +163,7 @@ declare x86_mmx @llvm.x86.3dnow.pfmin(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_pfmul(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; CHECK-LABEL: test_pfmul:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pfmul %mm1, %mm0 # sched: [3:1.00]
; CHECK-NEXT: pfmul (%rdi), %mm0 # sched: [7:1.00]
; CHECK-NEXT: movd %mm0, %rax # sched: [1:0.33]
@@ -178,7 +178,7 @@ declare x86_mmx @llvm.x86.3dnow.pfmul(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_pfnacc(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; CHECK-LABEL: test_pfnacc:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pfnacc %mm1, %mm0 # sched: [3:1.00]
; CHECK-NEXT: pfnacc (%rdi), %mm0 # sched: [7:1.00]
; CHECK-NEXT: movd %mm0, %rax # sched: [1:0.33]
@@ -193,7 +193,7 @@ declare x86_mmx @llvm.x86.3dnowa.pfnacc(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_pfpnacc(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; CHECK-LABEL: test_pfpnacc:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pfpnacc %mm1, %mm0 # sched: [3:1.00]
; CHECK-NEXT: pfpnacc (%rdi), %mm0 # sched: [7:1.00]
; CHECK-NEXT: movd %mm0, %rax # sched: [1:0.33]
@@ -208,7 +208,7 @@ declare x86_mmx @llvm.x86.3dnowa.pfpnacc(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_pfrcp(x86_mmx* %a0) optsize {
; CHECK-LABEL: test_pfrcp:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pfrcp (%rdi), %mm0 # sched: [7:1.00]
; CHECK-NEXT: pfrcp %mm0, %mm0 # sched: [3:1.00]
; CHECK-NEXT: movd %mm0, %rax # sched: [1:0.33]
@@ -223,7 +223,7 @@ declare x86_mmx @llvm.x86.3dnow.pfrcp(x86_mmx) nounwind readnone
define i64 @test_pfrcpit1(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; CHECK-LABEL: test_pfrcpit1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pfrcpit1 %mm1, %mm0 # sched: [3:1.00]
; CHECK-NEXT: pfrcpit1 (%rdi), %mm0 # sched: [7:1.00]
; CHECK-NEXT: movd %mm0, %rax # sched: [1:0.33]
@@ -238,7 +238,7 @@ declare x86_mmx @llvm.x86.3dnow.pfrcpit1(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_pfrcpit2(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; CHECK-LABEL: test_pfrcpit2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pfrcpit2 %mm1, %mm0 # sched: [3:1.00]
; CHECK-NEXT: pfrcpit2 (%rdi), %mm0 # sched: [7:1.00]
; CHECK-NEXT: movd %mm0, %rax # sched: [1:0.33]
@@ -253,7 +253,7 @@ declare x86_mmx @llvm.x86.3dnow.pfrcpit2(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_pfrsqit1(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; CHECK-LABEL: test_pfrsqit1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pfrsqit1 %mm1, %mm0 # sched: [3:1.00]
; CHECK-NEXT: pfrsqit1 (%rdi), %mm0 # sched: [7:1.00]
; CHECK-NEXT: movd %mm0, %rax # sched: [1:0.33]
@@ -268,7 +268,7 @@ declare x86_mmx @llvm.x86.3dnow.pfrsqit1(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_pfrsqrt(x86_mmx* %a0) optsize {
; CHECK-LABEL: test_pfrsqrt:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pfrsqrt (%rdi), %mm0 # sched: [7:1.00]
; CHECK-NEXT: pfrsqrt %mm0, %mm0 # sched: [3:1.00]
; CHECK-NEXT: movd %mm0, %rax # sched: [1:0.33]
@@ -283,7 +283,7 @@ declare x86_mmx @llvm.x86.3dnow.pfrsqrt(x86_mmx) nounwind readnone
define i64 @test_pfsub(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; CHECK-LABEL: test_pfsub:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pfsub %mm1, %mm0 # sched: [3:1.00]
; CHECK-NEXT: pfsub (%rdi), %mm0 # sched: [7:1.00]
; CHECK-NEXT: movd %mm0, %rax # sched: [1:0.33]
@@ -298,7 +298,7 @@ declare x86_mmx @llvm.x86.3dnow.pfsub(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_pfsubr(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; CHECK-LABEL: test_pfsubr:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pfsubr %mm1, %mm0 # sched: [3:1.00]
; CHECK-NEXT: pfsubr (%rdi), %mm0 # sched: [7:1.00]
; CHECK-NEXT: movd %mm0, %rax # sched: [1:0.33]
@@ -313,7 +313,7 @@ declare x86_mmx @llvm.x86.3dnow.pfsubr(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_pi2fd(x86_mmx* %a0) optsize {
; CHECK-LABEL: test_pi2fd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pi2fd (%rdi), %mm0 # sched: [8:1.00]
; CHECK-NEXT: pi2fd %mm0, %mm0 # sched: [4:1.00]
; CHECK-NEXT: movd %mm0, %rax # sched: [1:0.33]
@@ -328,7 +328,7 @@ declare x86_mmx @llvm.x86.3dnow.pi2fd(x86_mmx) nounwind readnone
define i64 @test_pi2fw(x86_mmx* %a0) optsize {
; CHECK-LABEL: test_pi2fw:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pi2fw (%rdi), %mm0 # sched: [8:1.00]
; CHECK-NEXT: pi2fw %mm0, %mm0 # sched: [4:1.00]
; CHECK-NEXT: movd %mm0, %rax # sched: [1:0.33]
@@ -343,7 +343,7 @@ declare x86_mmx @llvm.x86.3dnowa.pi2fw(x86_mmx) nounwind readnone
define i64 @test_pmulhrw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; CHECK-LABEL: test_pmulhrw:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pmulhrw %mm1, %mm0 # sched: [5:1.00]
; CHECK-NEXT: pmulhrw (%rdi), %mm0 # sched: [9:1.00]
; CHECK-NEXT: movd %mm0, %rax # sched: [1:0.33]
@@ -358,7 +358,7 @@ declare x86_mmx @llvm.x86.3dnow.pmulhrw(x86_mmx, x86_mmx) nounwind readnone
define void @test_prefetch(i8* %a0) optsize {
; CHECK-LABEL: test_prefetch:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: #APP
; CHECK-NEXT: prefetch (%rdi) # sched: [5:0.50]
; CHECK-NEXT: #NO_APP
@@ -369,7 +369,7 @@ define void @test_prefetch(i8* %a0) optsize {
define void @test_prefetchw(i8* %a0) optsize {
; CHECK-LABEL: test_prefetchw:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: #APP
; CHECK-NEXT: prefetchw (%rdi) # sched: [5:0.50]
; CHECK-NEXT: #NO_APP
@@ -380,7 +380,7 @@ define void @test_prefetchw(i8* %a0) optsize {
define i64 @test_pswapd(x86_mmx* %a0) optsize {
; CHECK-LABEL: test_pswapd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pswapd (%rdi), %mm0 # mm0 = mem[1,0] sched: [5:1.00]
; CHECK-NEXT: pswapd %mm0, %mm0 # mm0 = mm0[1,0] sched: [1:1.00]
; CHECK-NEXT: movd %mm0, %rax # sched: [1:0.33]
diff --git a/test/CodeGen/X86/GlobalISel/GV.ll b/test/CodeGen/X86/GlobalISel/GV.ll
index 44862ab5a96..09a2fe665c4 100644
--- a/test/CodeGen/X86/GlobalISel/GV.ll
+++ b/test/CodeGen/X86/GlobalISel/GV.ll
@@ -9,22 +9,22 @@
; Function Attrs: noinline nounwind optnone uwtable
define i32* @test_global_ptrv() #3 {
; X64-LABEL: test_global_ptrv:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: leaq g_int, %rax
; X64-NEXT: retq
;
; X64_DARWIN_PIC-LABEL: test_global_ptrv:
-; X64_DARWIN_PIC: ## BB#0: ## %entry
+; X64_DARWIN_PIC: ## %bb.0: ## %entry
; X64_DARWIN_PIC-NEXT: leaq _g_int(%rip), %rax
; X64_DARWIN_PIC-NEXT: retq
;
; X32-LABEL: test_global_ptrv:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: leal g_int, %eax
; X32-NEXT: retl
;
; X32ABI-LABEL: test_global_ptrv:
-; X32ABI: # BB#0: # %entry
+; X32ABI: # %bb.0: # %entry
; X32ABI-NEXT: leal g_int, %eax
; X32ABI-NEXT: retq
entry:
@@ -34,25 +34,25 @@ entry:
; Function Attrs: noinline nounwind optnone uwtable
define i32 @test_global_valv() #3 {
; X64-LABEL: test_global_valv:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: leaq g_int, %rax
; X64-NEXT: movl (%rax), %eax
; X64-NEXT: retq
;
; X64_DARWIN_PIC-LABEL: test_global_valv:
-; X64_DARWIN_PIC: ## BB#0: ## %entry
+; X64_DARWIN_PIC: ## %bb.0: ## %entry
; X64_DARWIN_PIC-NEXT: leaq _g_int(%rip), %rax
; X64_DARWIN_PIC-NEXT: movl (%rax), %eax
; X64_DARWIN_PIC-NEXT: retq
;
; X32-LABEL: test_global_valv:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: leal g_int, %eax
; X32-NEXT: movl (%eax), %eax
; X32-NEXT: retl
;
; X32ABI-LABEL: test_global_valv:
-; X32ABI: # BB#0: # %entry
+; X32ABI: # %bb.0: # %entry
; X32ABI-NEXT: leal g_int, %eax
; X32ABI-NEXT: movl (%eax), %eax
; X32ABI-NEXT: retq
diff --git a/test/CodeGen/X86/GlobalISel/add-scalar.ll b/test/CodeGen/X86/GlobalISel/add-scalar.ll
index cb30b2508a8..3ed6103d07b 100644
--- a/test/CodeGen/X86/GlobalISel/add-scalar.ll
+++ b/test/CodeGen/X86/GlobalISel/add-scalar.ll
@@ -4,12 +4,12 @@
define i64 @test_add_i64(i64 %arg1, i64 %arg2) {
; X64-LABEL: test_add_i64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: leaq (%rsi,%rdi), %rax
; X64-NEXT: retq
;
; X32-LABEL: test_add_i64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebp
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: .cfi_offset %ebp, -8
@@ -27,14 +27,14 @@ define i64 @test_add_i64(i64 %arg1, i64 %arg2) {
define i32 @test_add_i32(i32 %arg1, i32 %arg2) {
; X64-LABEL: test_add_i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; X64-NEXT: leal (%rsi,%rdi), %eax
; X64-NEXT: retq
;
; X32-LABEL: test_add_i32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
; X32-NEXT: retl
@@ -44,7 +44,7 @@ define i32 @test_add_i32(i32 %arg1, i32 %arg2) {
define i16 @test_add_i16(i16 %arg1, i16 %arg2) {
; X64-LABEL: test_add_i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; X64-NEXT: leal (%rsi,%rdi), %eax
@@ -52,7 +52,7 @@ define i16 @test_add_i16(i16 %arg1, i16 %arg2) {
; X64-NEXT: retq
;
; X32-LABEL: test_add_i16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: addw {{[0-9]+}}(%esp), %ax
; X32-NEXT: retl
@@ -62,13 +62,13 @@ define i16 @test_add_i16(i16 %arg1, i16 %arg2) {
define i8 @test_add_i8(i8 %arg1, i8 %arg2) {
; X64-LABEL: test_add_i8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: addb %dil, %sil
; X64-NEXT: movl %esi, %eax
; X64-NEXT: retq
;
; X32-LABEL: test_add_i8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: addb {{[0-9]+}}(%esp), %al
; X32-NEXT: retl
@@ -78,7 +78,7 @@ define i8 @test_add_i8(i8 %arg1, i8 %arg2) {
define i32 @test_add_i1(i32 %arg1, i32 %arg2) {
; X64-LABEL: test_add_i1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpl %esi, %edi
; X64-NEXT: sete %al
; X64-NEXT: addb %al, %al
@@ -87,7 +87,7 @@ define i32 @test_add_i1(i32 %arg1, i32 %arg2) {
; X64-NEXT: retq
;
; X32-LABEL: test_add_i1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: cmpl %eax, {{[0-9]+}}(%esp)
; X32-NEXT: sete %al
diff --git a/test/CodeGen/X86/GlobalISel/add-vec.ll b/test/CodeGen/X86/GlobalISel/add-vec.ll
index 0ea1cf820c0..6bebf09b26b 100644
--- a/test/CodeGen/X86/GlobalISel/add-vec.ll
+++ b/test/CodeGen/X86/GlobalISel/add-vec.ll
@@ -6,7 +6,7 @@
define <16 x i8> @test_add_v16i8(<16 x i8> %arg1, <16 x i8> %arg2) {
; ALL-LABEL: test_add_v16i8:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; ALL-NEXT: retq
%ret = add <16 x i8> %arg1, %arg2
@@ -15,7 +15,7 @@ define <16 x i8> @test_add_v16i8(<16 x i8> %arg1, <16 x i8> %arg2) {
define <8 x i16> @test_add_v8i16(<8 x i16> %arg1, <8 x i16> %arg2) {
; ALL-LABEL: test_add_v8i16:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; ALL-NEXT: retq
%ret = add <8 x i16> %arg1, %arg2
@@ -24,7 +24,7 @@ define <8 x i16> @test_add_v8i16(<8 x i16> %arg1, <8 x i16> %arg2) {
define <4 x i32> @test_add_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) {
; ALL-LABEL: test_add_v4i32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; ALL-NEXT: retq
%ret = add <4 x i32> %arg1, %arg2
@@ -33,7 +33,7 @@ define <4 x i32> @test_add_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) {
define <2 x i64> @test_add_v2i64(<2 x i64> %arg1, <2 x i64> %arg2) {
; ALL-LABEL: test_add_v2i64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; ALL-NEXT: retq
%ret = add <2 x i64> %arg1, %arg2
@@ -42,17 +42,17 @@ define <2 x i64> @test_add_v2i64(<2 x i64> %arg1, <2 x i64> %arg2) {
define <32 x i8> @test_add_v32i8(<32 x i8> %arg1, <32 x i8> %arg2) {
; SKX-LABEL: test_add_v32i8:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; SKX-NEXT: retq
;
; AVX2-LABEL: test_add_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX1-LABEL: test_add_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpaddb %xmm3, %xmm2, %xmm2
@@ -65,17 +65,17 @@ define <32 x i8> @test_add_v32i8(<32 x i8> %arg1, <32 x i8> %arg2) {
define <16 x i16> @test_add_v16i16(<16 x i16> %arg1, <16 x i16> %arg2) {
; SKX-LABEL: test_add_v16i16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; SKX-NEXT: retq
;
; AVX2-LABEL: test_add_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX1-LABEL: test_add_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpaddw %xmm3, %xmm2, %xmm2
@@ -88,17 +88,17 @@ define <16 x i16> @test_add_v16i16(<16 x i16> %arg1, <16 x i16> %arg2) {
define <8 x i32> @test_add_v8i32(<8 x i32> %arg1, <8 x i32> %arg2) {
; SKX-LABEL: test_add_v8i32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; SKX-NEXT: retq
;
; AVX2-LABEL: test_add_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX1-LABEL: test_add_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm2
@@ -111,17 +111,17 @@ define <8 x i32> @test_add_v8i32(<8 x i32> %arg1, <8 x i32> %arg2) {
define <4 x i64> @test_add_v4i64(<4 x i64> %arg1, <4 x i64> %arg2) {
; SKX-LABEL: test_add_v4i64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; SKX-NEXT: retq
;
; AVX2-LABEL: test_add_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX1-LABEL: test_add_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpaddq %xmm3, %xmm2, %xmm2
@@ -134,18 +134,18 @@ define <4 x i64> @test_add_v4i64(<4 x i64> %arg1, <4 x i64> %arg2) {
define <64 x i8> @test_add_v64i8(<64 x i8> %arg1, <64 x i8> %arg2) {
; SKX-LABEL: test_add_v64i8:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpaddb %zmm1, %zmm0, %zmm0
; SKX-NEXT: retq
;
; AVX2-LABEL: test_add_v64i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpaddb %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpaddb %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX1-LABEL: test_add_v64i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm6
@@ -163,18 +163,18 @@ define <64 x i8> @test_add_v64i8(<64 x i8> %arg1, <64 x i8> %arg2) {
define <32 x i16> @test_add_v32i16(<32 x i16> %arg1, <32 x i16> %arg2) {
; SKX-LABEL: test_add_v32i16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpaddw %zmm1, %zmm0, %zmm0
; SKX-NEXT: retq
;
; AVX2-LABEL: test_add_v32i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpaddw %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpaddw %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX1-LABEL: test_add_v32i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm6
@@ -192,18 +192,18 @@ define <32 x i16> @test_add_v32i16(<32 x i16> %arg1, <32 x i16> %arg2) {
define <16 x i32> @test_add_v16i32(<16 x i32> %arg1, <16 x i32> %arg2) {
; SKX-LABEL: test_add_v16i32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; SKX-NEXT: retq
;
; AVX2-LABEL: test_add_v16i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpaddd %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpaddd %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX1-LABEL: test_add_v16i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm6
@@ -221,18 +221,18 @@ define <16 x i32> @test_add_v16i32(<16 x i32> %arg1, <16 x i32> %arg2) {
define <8 x i64> @test_add_v8i64(<8 x i64> %arg1, <8 x i64> %arg2) {
; SKX-LABEL: test_add_v8i64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpaddq %zmm1, %zmm0, %zmm0
; SKX-NEXT: retq
;
; AVX2-LABEL: test_add_v8i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpaddq %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX1-LABEL: test_add_v8i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm6
diff --git a/test/CodeGen/X86/GlobalISel/and-scalar.ll b/test/CodeGen/X86/GlobalISel/and-scalar.ll
index 8156e057bae..b2370139414 100644
--- a/test/CodeGen/X86/GlobalISel/and-scalar.ll
+++ b/test/CodeGen/X86/GlobalISel/and-scalar.ll
@@ -3,7 +3,7 @@
define i32 @test_and_i1(i32 %arg1, i32 %arg2) {
; ALL-LABEL: test_and_i1:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: cmpl %esi, %edi
; ALL-NEXT: sete %al
; ALL-NEXT: andb %al, %al
@@ -18,7 +18,7 @@ define i32 @test_and_i1(i32 %arg1, i32 %arg2) {
define i8 @test_and_i8(i8 %arg1, i8 %arg2) {
; ALL-LABEL: test_and_i8:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: andb %dil, %sil
; ALL-NEXT: movl %esi, %eax
; ALL-NEXT: retq
@@ -28,7 +28,7 @@ define i8 @test_and_i8(i8 %arg1, i8 %arg2) {
define i16 @test_and_i16(i16 %arg1, i16 %arg2) {
; ALL-LABEL: test_and_i16:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: andw %di, %si
; ALL-NEXT: movl %esi, %eax
; ALL-NEXT: retq
@@ -38,7 +38,7 @@ define i16 @test_and_i16(i16 %arg1, i16 %arg2) {
define i32 @test_and_i32(i32 %arg1, i32 %arg2) {
; ALL-LABEL: test_and_i32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: andl %edi, %esi
; ALL-NEXT: movl %esi, %eax
; ALL-NEXT: retq
@@ -48,7 +48,7 @@ define i32 @test_and_i32(i32 %arg1, i32 %arg2) {
define i64 @test_and_i64(i64 %arg1, i64 %arg2) {
; ALL-LABEL: test_and_i64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: andq %rdi, %rsi
; ALL-NEXT: movq %rsi, %rax
; ALL-NEXT: retq
diff --git a/test/CodeGen/X86/GlobalISel/binop.ll b/test/CodeGen/X86/GlobalISel/binop.ll
index d7ae4435682..a0efcffa66f 100644
--- a/test/CodeGen/X86/GlobalISel/binop.ll
+++ b/test/CodeGen/X86/GlobalISel/binop.ll
@@ -6,7 +6,7 @@
define i64 @test_sub_i64(i64 %arg1, i64 %arg2) {
; ALL-LABEL: test_sub_i64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: subq %rsi, %rdi
; ALL-NEXT: movq %rdi, %rax
; ALL-NEXT: retq
@@ -16,7 +16,7 @@ define i64 @test_sub_i64(i64 %arg1, i64 %arg2) {
define i32 @test_sub_i32(i32 %arg1, i32 %arg2) {
; ALL-LABEL: test_sub_i32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: subl %esi, %edi
; ALL-NEXT: movl %edi, %eax
; ALL-NEXT: retq
@@ -26,12 +26,12 @@ define i32 @test_sub_i32(i32 %arg1, i32 %arg2) {
define float @test_add_float(float %arg1, float %arg2) {
; SSE-LABEL: test_add_float:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addss %xmm1, %xmm0
; SSE-NEXT: retq
;
; ALL_AVX-LABEL: test_add_float:
-; ALL_AVX: # BB#0:
+; ALL_AVX: # %bb.0:
; ALL_AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
; ALL_AVX-NEXT: retq
%ret = fadd float %arg1, %arg2
@@ -40,12 +40,12 @@ define float @test_add_float(float %arg1, float %arg2) {
define double @test_add_double(double %arg1, double %arg2) {
; SSE-LABEL: test_add_double:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addsd %xmm1, %xmm0
; SSE-NEXT: retq
;
; ALL_AVX-LABEL: test_add_double:
-; ALL_AVX: # BB#0:
+; ALL_AVX: # %bb.0:
; ALL_AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; ALL_AVX-NEXT: retq
%ret = fadd double %arg1, %arg2
@@ -54,12 +54,12 @@ define double @test_add_double(double %arg1, double %arg2) {
define float @test_sub_float(float %arg1, float %arg2) {
; SSE-LABEL: test_sub_float:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: subss %xmm1, %xmm0
; SSE-NEXT: retq
;
; ALL_AVX-LABEL: test_sub_float:
-; ALL_AVX: # BB#0:
+; ALL_AVX: # %bb.0:
; ALL_AVX-NEXT: vsubss %xmm1, %xmm0, %xmm0
; ALL_AVX-NEXT: retq
%ret = fsub float %arg1, %arg2
@@ -68,12 +68,12 @@ define float @test_sub_float(float %arg1, float %arg2) {
define double @test_sub_double(double %arg1, double %arg2) {
; SSE-LABEL: test_sub_double:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: subsd %xmm1, %xmm0
; SSE-NEXT: retq
;
; ALL_AVX-LABEL: test_sub_double:
-; ALL_AVX: # BB#0:
+; ALL_AVX: # %bb.0:
; ALL_AVX-NEXT: vsubsd %xmm1, %xmm0, %xmm0
; ALL_AVX-NEXT: retq
%ret = fsub double %arg1, %arg2
@@ -82,12 +82,12 @@ define double @test_sub_double(double %arg1, double %arg2) {
define <4 x i32> @test_add_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) {
; SSE-LABEL: test_add_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: paddd %xmm1, %xmm0
; SSE-NEXT: retq
;
; ALL_AVX-LABEL: test_add_v4i32:
-; ALL_AVX: # BB#0:
+; ALL_AVX: # %bb.0:
; ALL_AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; ALL_AVX-NEXT: retq
%ret = add <4 x i32> %arg1, %arg2
@@ -96,12 +96,12 @@ define <4 x i32> @test_add_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) {
define <4 x i32> @test_sub_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) {
; SSE-LABEL: test_sub_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psubd %xmm1, %xmm0
; SSE-NEXT: retq
;
; ALL_AVX-LABEL: test_sub_v4i32:
-; ALL_AVX: # BB#0:
+; ALL_AVX: # %bb.0:
; ALL_AVX-NEXT: vpsubd %xmm1, %xmm0, %xmm0
; ALL_AVX-NEXT: retq
%ret = sub <4 x i32> %arg1, %arg2
@@ -110,12 +110,12 @@ define <4 x i32> @test_sub_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) {
define <4 x float> @test_add_v4f32(<4 x float> %arg1, <4 x float> %arg2) {
; SSE-LABEL: test_add_v4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addps %xmm1, %xmm0
; SSE-NEXT: retq
;
; ALL_AVX-LABEL: test_add_v4f32:
-; ALL_AVX: # BB#0:
+; ALL_AVX: # %bb.0:
; ALL_AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0
; ALL_AVX-NEXT: retq
%ret = fadd <4 x float> %arg1, %arg2
@@ -124,12 +124,12 @@ define <4 x float> @test_add_v4f32(<4 x float> %arg1, <4 x float> %arg2) {
define <4 x float> @test_sub_v4f32(<4 x float> %arg1, <4 x float> %arg2) {
; SSE-LABEL: test_sub_v4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: subps %xmm1, %xmm0
; SSE-NEXT: retq
;
; ALL_AVX-LABEL: test_sub_v4f32:
-; ALL_AVX: # BB#0:
+; ALL_AVX: # %bb.0:
; ALL_AVX-NEXT: vsubps %xmm1, %xmm0, %xmm0
; ALL_AVX-NEXT: retq
%ret = fsub <4 x float> %arg1, %arg2
@@ -138,12 +138,12 @@ define <4 x float> @test_sub_v4f32(<4 x float> %arg1, <4 x float> %arg2) {
define i32 @test_copy_float(float %val) {
; SSE-LABEL: test_copy_float:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movd %xmm0, %eax
; SSE-NEXT: retq
;
; ALL_AVX-LABEL: test_copy_float:
-; ALL_AVX: # BB#0:
+; ALL_AVX: # %bb.0:
; ALL_AVX-NEXT: vmovd %xmm0, %eax
; ALL_AVX-NEXT: retq
%r = bitcast float %val to i32
@@ -152,12 +152,12 @@ define i32 @test_copy_float(float %val) {
define float @test_copy_i32(i32 %val) {
; SSE-LABEL: test_copy_i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movd %edi, %xmm0
; SSE-NEXT: retq
;
; ALL_AVX-LABEL: test_copy_i32:
-; ALL_AVX: # BB#0:
+; ALL_AVX: # %bb.0:
; ALL_AVX-NEXT: vmovd %edi, %xmm0
; ALL_AVX-NEXT: retq
%r = bitcast i32 %val to float
diff --git a/test/CodeGen/X86/GlobalISel/br.ll b/test/CodeGen/X86/GlobalISel/br.ll
index 387e8797f0c..2c07a4d326e 100644
--- a/test/CodeGen/X86/GlobalISel/br.ll
+++ b/test/CodeGen/X86/GlobalISel/br.ll
@@ -3,7 +3,7 @@
define void @uncondbr() {
; CHECK-LABEL: uncondbr:
-; CHECK: # BB#1: # %entry
+; CHECK: # %bb.1: # %entry
; CHECK-NEXT: jmp .LBB0_3
; CHECK-NEXT: .LBB0_2: # %end
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/GlobalISel/brcond.ll b/test/CodeGen/X86/GlobalISel/brcond.ll
index 917ee6f5bd8..e92573115c3 100644
--- a/test/CodeGen/X86/GlobalISel/brcond.ll
+++ b/test/CodeGen/X86/GlobalISel/brcond.ll
@@ -4,12 +4,12 @@
define i32 @test_1(i32 %a, i32 %b, i32 %tValue, i32 %fValue) {
; X64-LABEL: test_1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: cmpl %esi, %edi
; X64-NEXT: setl %al
; X64-NEXT: testb $1, %al
; X64-NEXT: je .LBB0_2
-; X64-NEXT: # BB#1: # %if.then
+; X64-NEXT: # %bb.1: # %if.then
; X64-NEXT: movl %edx, -{{[0-9]+}}(%rsp)
; X64-NEXT: movl -{{[0-9]+}}(%rsp), %eax
; X64-NEXT: retq
@@ -19,7 +19,7 @@ define i32 @test_1(i32 %a, i32 %b, i32 %tValue, i32 %fValue) {
; X64-NEXT: retq
;
; X32-LABEL: test_1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: pushl %eax
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -27,7 +27,7 @@ define i32 @test_1(i32 %a, i32 %b, i32 %tValue, i32 %fValue) {
; X32-NEXT: setl %al
; X32-NEXT: testb $1, %al
; X32-NEXT: je .LBB0_2
-; X32-NEXT: # BB#1: # %if.then
+; X32-NEXT: # %bb.1: # %if.then
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: jmp .LBB0_3
; X32-NEXT: .LBB0_2: # %if.else
@@ -57,10 +57,10 @@ return:
define i32 @test_2(i32 %a) {
; X64-LABEL: test_2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: testb $1, %dil
; X64-NEXT: je .LBB1_2
-; X64-NEXT: # BB#1: # %if.then
+; X64-NEXT: # %bb.1: # %if.then
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: retq
; X64-NEXT: .LBB1_2: # %if.else
@@ -68,11 +68,11 @@ define i32 @test_2(i32 %a) {
; X64-NEXT: retq
;
; X32-LABEL: test_2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: testb $1, %al
; X32-NEXT: je .LBB1_2
-; X32-NEXT: # BB#1: # %if.then
+; X32-NEXT: # %bb.1: # %if.then
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: retl
; X32-NEXT: .LBB1_2: # %if.else
diff --git a/test/CodeGen/X86/GlobalISel/callingconv.ll b/test/CodeGen/X86/GlobalISel/callingconv.ll
index 4100a7217ac..238f1fa21cf 100644
--- a/test/CodeGen/X86/GlobalISel/callingconv.ll
+++ b/test/CodeGen/X86/GlobalISel/callingconv.ll
@@ -4,12 +4,12 @@
define i32 @test_ret_i32() {
; X32-LABEL: test_ret_i32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl $20, %eax
; X32-NEXT: retl
;
; X64-LABEL: test_ret_i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl $20, %eax
; X64-NEXT: retq
ret i32 20
@@ -17,13 +17,13 @@ define i32 @test_ret_i32() {
define i64 @test_ret_i64() {
; X32-LABEL: test_ret_i64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl $4294967295, %eax # imm = 0xFFFFFFFF
; X32-NEXT: movl $15, %edx
; X32-NEXT: retl
;
; X64-LABEL: test_ret_i64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movabsq $68719476735, %rax # imm = 0xFFFFFFFFF
; X64-NEXT: retq
ret i64 68719476735
@@ -31,12 +31,12 @@ define i64 @test_ret_i64() {
define i8 @test_arg_i8(i8 %a) {
; X32-LABEL: test_arg_i8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb 4(%esp), %al
; X32-NEXT: retl
;
; X64-LABEL: test_arg_i8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: retq
ret i8 %a
@@ -44,12 +44,12 @@ define i8 @test_arg_i8(i8 %a) {
define i16 @test_arg_i16(i16 %a) {
; X32-LABEL: test_arg_i16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzwl 4(%esp), %eax
; X32-NEXT: retl
;
; X64-LABEL: test_arg_i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: retq
ret i16 %a
@@ -57,12 +57,12 @@ define i16 @test_arg_i16(i16 %a) {
define i32 @test_arg_i32(i32 %a) {
; X32-LABEL: test_arg_i32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl 4(%esp), %eax
; X32-NEXT: retl
;
; X64-LABEL: test_arg_i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: retq
ret i32 %a
@@ -70,13 +70,13 @@ define i32 @test_arg_i32(i32 %a) {
define i64 @test_arg_i64(i64 %a) {
; X32-LABEL: test_arg_i64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl 4(%esp), %eax
; X32-NEXT: movl 8(%esp), %edx
; X32-NEXT: retl
;
; X64-LABEL: test_arg_i64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: retq
ret i64 %a
@@ -84,13 +84,13 @@ define i64 @test_arg_i64(i64 %a) {
define i64 @test_i64_args_8(i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %arg5, i64 %arg6, i64 %arg7, i64 %arg8) {
; X32-LABEL: test_i64_args_8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl 60(%esp), %eax
; X32-NEXT: movl 64(%esp), %edx
; X32-NEXT: retl
;
; X64-LABEL: test_i64_args_8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq 16(%rsp), %rax
; X64-NEXT: retq
ret i64 %arg8
@@ -98,12 +98,12 @@ define i64 @test_i64_args_8(i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %arg
define <4 x i32> @test_v4i32_args(<4 x i32> %arg1, <4 x i32> %arg2) {
; X32-LABEL: test_v4i32_args:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movaps %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_v4i32_args:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps %xmm1, %xmm0
; X64-NEXT: retq
ret <4 x i32> %arg2
@@ -111,7 +111,7 @@ define <4 x i32> @test_v4i32_args(<4 x i32> %arg1, <4 x i32> %arg2) {
define <8 x i32> @test_v8i32_args(<8 x i32> %arg1, <8 x i32> %arg2) {
; X32-LABEL: test_v8i32_args:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: subl $12, %esp
; X32-NEXT: .cfi_def_cfa_offset 16
; X32-NEXT: movups 16(%esp), %xmm1
@@ -120,7 +120,7 @@ define <8 x i32> @test_v8i32_args(<8 x i32> %arg1, <8 x i32> %arg2) {
; X32-NEXT: retl
;
; X64-LABEL: test_v8i32_args:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps %xmm2, %xmm0
; X64-NEXT: movaps %xmm3, %xmm1
; X64-NEXT: retq
@@ -130,7 +130,7 @@ define <8 x i32> @test_v8i32_args(<8 x i32> %arg1, <8 x i32> %arg2) {
declare void @trivial_callee()
define void @test_trivial_call() {
; X32-LABEL: test_trivial_call:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: subl $12, %esp
; X32-NEXT: .cfi_def_cfa_offset 16
; X32-NEXT: calll trivial_callee
@@ -138,7 +138,7 @@ define void @test_trivial_call() {
; X32-NEXT: retl
;
; X64-LABEL: test_trivial_call:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pushq %rax
; X64-NEXT: .cfi_def_cfa_offset 16
; X64-NEXT: callq trivial_callee
@@ -151,7 +151,7 @@ define void @test_trivial_call() {
declare void @simple_arg_callee(i32 %in0, i32 %in1)
define void @test_simple_arg_call(i32 %in0, i32 %in1) {
; X32-LABEL: test_simple_arg_call:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: subl $12, %esp
; X32-NEXT: .cfi_def_cfa_offset 16
; X32-NEXT: movl 16(%esp), %eax
@@ -163,7 +163,7 @@ define void @test_simple_arg_call(i32 %in0, i32 %in1) {
; X32-NEXT: retl
;
; X64-LABEL: test_simple_arg_call:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pushq %rax
; X64-NEXT: .cfi_def_cfa_offset 16
; X64-NEXT: movl %edi, %eax
@@ -179,7 +179,7 @@ define void @test_simple_arg_call(i32 %in0, i32 %in1) {
declare void @simple_arg8_callee(i32 %arg1, i32 %arg2, i32 %arg3, i32 %arg4, i32 %arg5, i32 %arg6, i32 %arg7, i32 %arg8)
define void @test_simple_arg8_call(i32 %in0) {
; X32-LABEL: test_simple_arg8_call:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: subl $44, %esp
; X32-NEXT: .cfi_def_cfa_offset 48
; X32-NEXT: movl 48(%esp), %eax
@@ -196,7 +196,7 @@ define void @test_simple_arg8_call(i32 %in0) {
; X32-NEXT: retl
;
; X64-LABEL: test_simple_arg8_call:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: subq $24, %rsp
; X64-NEXT: .cfi_def_cfa_offset 32
; X64-NEXT: movl %edi, (%rsp)
@@ -216,7 +216,7 @@ define void @test_simple_arg8_call(i32 %in0) {
declare i32 @simple_return_callee(i32 %in0)
define i32 @test_simple_return_callee() {
; X32-LABEL: test_simple_return_callee:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: subl $12, %esp
; X32-NEXT: .cfi_def_cfa_offset 16
; X32-NEXT: movl $5, %eax
@@ -227,7 +227,7 @@ define i32 @test_simple_return_callee() {
; X32-NEXT: retl
;
; X64-LABEL: test_simple_return_callee:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pushq %rax
; X64-NEXT: .cfi_def_cfa_offset 16
; X64-NEXT: movl $5, %edi
@@ -243,7 +243,7 @@ define i32 @test_simple_return_callee() {
declare <8 x i32> @split_return_callee(<8 x i32> %in0)
define <8 x i32> @test_split_return_callee(<8 x i32> %arg1, <8 x i32> %arg2) {
; X32-LABEL: test_split_return_callee:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: subl $44, %esp
; X32-NEXT: .cfi_def_cfa_offset 48
; X32-NEXT: movaps %xmm0, (%esp) # 16-byte Spill
@@ -257,7 +257,7 @@ define <8 x i32> @test_split_return_callee(<8 x i32> %arg1, <8 x i32> %arg2) {
; X32-NEXT: retl
;
; X64-LABEL: test_split_return_callee:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: subq $40, %rsp
; X64-NEXT: .cfi_def_cfa_offset 48
; X64-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
@@ -276,7 +276,7 @@ define <8 x i32> @test_split_return_callee(<8 x i32> %arg1, <8 x i32> %arg2) {
define void @test_indirect_call(void()* %func) {
; X32-LABEL: test_indirect_call:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: subl $12, %esp
; X32-NEXT: .cfi_def_cfa_offset 16
; X32-NEXT: calll *16(%esp)
@@ -284,7 +284,7 @@ define void @test_indirect_call(void()* %func) {
; X32-NEXT: retl
;
; X64-LABEL: test_indirect_call:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pushq %rax
; X64-NEXT: .cfi_def_cfa_offset 16
; X64-NEXT: callq *%rdi
@@ -297,7 +297,7 @@ define void @test_indirect_call(void()* %func) {
declare void @take_char(i8)
define void @test_abi_exts_call(i8* %addr) {
; X32-LABEL: test_abi_exts_call:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebx
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: pushl %esi
@@ -322,7 +322,7 @@ define void @test_abi_exts_call(i8* %addr) {
; X32-NEXT: retl
;
; X64-LABEL: test_abi_exts_call:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pushq %rbx
; X64-NEXT: .cfi_def_cfa_offset 16
; X64-NEXT: .cfi_offset %rbx, -16
@@ -346,7 +346,7 @@ define void @test_abi_exts_call(i8* %addr) {
declare void @variadic_callee(i8*, ...)
define void @test_variadic_call_1(i8** %addr_ptr, i32* %val_ptr) {
; X32-LABEL: test_variadic_call_1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: subl $12, %esp
; X32-NEXT: .cfi_def_cfa_offset 16
; X32-NEXT: movl 16(%esp), %eax
@@ -360,7 +360,7 @@ define void @test_variadic_call_1(i8** %addr_ptr, i32* %val_ptr) {
; X32-NEXT: retl
;
; X64-LABEL: test_variadic_call_1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pushq %rax
; X64-NEXT: .cfi_def_cfa_offset 16
; X64-NEXT: movq (%rdi), %rdi
@@ -378,7 +378,7 @@ define void @test_variadic_call_1(i8** %addr_ptr, i32* %val_ptr) {
define void @test_variadic_call_2(i8** %addr_ptr, double* %val_ptr) {
; X32-LABEL: test_variadic_call_2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: subl $12, %esp
; X32-NEXT: .cfi_def_cfa_offset 16
; X32-NEXT: movl 16(%esp), %eax
@@ -396,7 +396,7 @@ define void @test_variadic_call_2(i8** %addr_ptr, double* %val_ptr) {
; X32-NEXT: retl
;
; X64-LABEL: test_variadic_call_2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pushq %rax
; X64-NEXT: .cfi_def_cfa_offset 16
; X64-NEXT: movq (%rdi), %rdi
diff --git a/test/CodeGen/X86/GlobalISel/cmp.ll b/test/CodeGen/X86/GlobalISel/cmp.ll
index 39fee409d78..085f5e32675 100644
--- a/test/CodeGen/X86/GlobalISel/cmp.ll
+++ b/test/CodeGen/X86/GlobalISel/cmp.ll
@@ -3,7 +3,7 @@
define i32 @test_icmp_eq_i8(i8 %a, i8 %b) {
; ALL-LABEL: test_icmp_eq_i8:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: cmpb %sil, %dil
; ALL-NEXT: sete %al
; ALL-NEXT: andl $1, %eax
@@ -15,7 +15,7 @@ define i32 @test_icmp_eq_i8(i8 %a, i8 %b) {
define i32 @test_icmp_eq_i16(i16 %a, i16 %b) {
; ALL-LABEL: test_icmp_eq_i16:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: cmpw %si, %di
; ALL-NEXT: sete %al
; ALL-NEXT: andl $1, %eax
@@ -27,7 +27,7 @@ define i32 @test_icmp_eq_i16(i16 %a, i16 %b) {
define i32 @test_icmp_eq_i64(i64 %a, i64 %b) {
; ALL-LABEL: test_icmp_eq_i64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: cmpq %rsi, %rdi
; ALL-NEXT: sete %al
; ALL-NEXT: andl $1, %eax
@@ -39,7 +39,7 @@ define i32 @test_icmp_eq_i64(i64 %a, i64 %b) {
define i32 @test_icmp_eq_i32(i32 %a, i32 %b) {
; ALL-LABEL: test_icmp_eq_i32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: cmpl %esi, %edi
; ALL-NEXT: sete %al
; ALL-NEXT: andl $1, %eax
@@ -51,7 +51,7 @@ define i32 @test_icmp_eq_i32(i32 %a, i32 %b) {
define i32 @test_icmp_ne_i32(i32 %a, i32 %b) {
; ALL-LABEL: test_icmp_ne_i32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: cmpl %esi, %edi
; ALL-NEXT: setne %al
; ALL-NEXT: andl $1, %eax
@@ -63,7 +63,7 @@ define i32 @test_icmp_ne_i32(i32 %a, i32 %b) {
define i32 @test_icmp_ugt_i32(i32 %a, i32 %b) {
; ALL-LABEL: test_icmp_ugt_i32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: cmpl %esi, %edi
; ALL-NEXT: seta %al
; ALL-NEXT: andl $1, %eax
@@ -75,7 +75,7 @@ define i32 @test_icmp_ugt_i32(i32 %a, i32 %b) {
define i32 @test_icmp_uge_i32(i32 %a, i32 %b) {
; ALL-LABEL: test_icmp_uge_i32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: cmpl %esi, %edi
; ALL-NEXT: setae %al
; ALL-NEXT: andl $1, %eax
@@ -87,7 +87,7 @@ define i32 @test_icmp_uge_i32(i32 %a, i32 %b) {
define i32 @test_icmp_ult_i32(i32 %a, i32 %b) {
; ALL-LABEL: test_icmp_ult_i32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: cmpl %esi, %edi
; ALL-NEXT: setb %al
; ALL-NEXT: andl $1, %eax
@@ -99,7 +99,7 @@ define i32 @test_icmp_ult_i32(i32 %a, i32 %b) {
define i32 @test_icmp_ule_i32(i32 %a, i32 %b) {
; ALL-LABEL: test_icmp_ule_i32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: cmpl %esi, %edi
; ALL-NEXT: setbe %al
; ALL-NEXT: andl $1, %eax
@@ -111,7 +111,7 @@ define i32 @test_icmp_ule_i32(i32 %a, i32 %b) {
define i32 @test_icmp_sgt_i32(i32 %a, i32 %b) {
; ALL-LABEL: test_icmp_sgt_i32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: cmpl %esi, %edi
; ALL-NEXT: setg %al
; ALL-NEXT: andl $1, %eax
@@ -123,7 +123,7 @@ define i32 @test_icmp_sgt_i32(i32 %a, i32 %b) {
define i32 @test_icmp_sge_i32(i32 %a, i32 %b) {
; ALL-LABEL: test_icmp_sge_i32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: cmpl %esi, %edi
; ALL-NEXT: setge %al
; ALL-NEXT: andl $1, %eax
@@ -135,7 +135,7 @@ define i32 @test_icmp_sge_i32(i32 %a, i32 %b) {
define i32 @test_icmp_slt_i32(i32 %a, i32 %b) {
; ALL-LABEL: test_icmp_slt_i32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: cmpl %esi, %edi
; ALL-NEXT: setl %al
; ALL-NEXT: andl $1, %eax
@@ -147,7 +147,7 @@ define i32 @test_icmp_slt_i32(i32 %a, i32 %b) {
define i32 @test_icmp_sle_i32(i32 %a, i32 %b) {
; ALL-LABEL: test_icmp_sle_i32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: cmpl %esi, %edi
; ALL-NEXT: setle %al
; ALL-NEXT: andl $1, %eax
diff --git a/test/CodeGen/X86/GlobalISel/constant.ll b/test/CodeGen/X86/GlobalISel/constant.ll
index 5b512f9ce93..f6ebb70fcf5 100644
--- a/test/CodeGen/X86/GlobalISel/constant.ll
+++ b/test/CodeGen/X86/GlobalISel/constant.ll
@@ -3,7 +3,7 @@
define i8 @const_i8() {
; ALL-LABEL: const_i8:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movb $2, %al
; ALL-NEXT: retq
ret i8 2
@@ -11,7 +11,7 @@ define i8 @const_i8() {
define i16 @const_i16() {
; ALL-LABEL: const_i16:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movw $3, %ax
; ALL-NEXT: retq
ret i16 3
@@ -19,7 +19,7 @@ define i16 @const_i16() {
define i32 @const_i32() {
; ALL-LABEL: const_i32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movl $4, %eax
; ALL-NEXT: retq
ret i32 4
@@ -27,7 +27,7 @@ define i32 @const_i32() {
define i64 @const_i64() {
; ALL-LABEL: const_i64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movabsq $68719476720, %rax # imm = 0xFFFFFFFF0
; ALL-NEXT: retq
ret i64 68719476720
@@ -36,7 +36,7 @@ define i64 @const_i64() {
;i64 value fit into u32
define i64 @const_i64_u32() {
; ALL-LABEL: const_i64_u32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movq $1879048192, %rax # imm = 0x70000000
; ALL-NEXT: retq
ret i64 1879048192
@@ -45,7 +45,7 @@ define i64 @const_i64_u32() {
;i64 value fit into i32
define i64 @const_i64_i32() {
; ALL-LABEL: const_i64_i32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movq $-1, %rax
; ALL-NEXT: retq
ret i64 -1
@@ -53,7 +53,7 @@ define i64 @const_i64_i32() {
define void @main(i32 ** %data) {
; ALL-LABEL: main:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movq $0, %rax
; ALL-NEXT: movq %rax, (%rdi)
; ALL-NEXT: retq
diff --git a/test/CodeGen/X86/GlobalISel/ext-x86-64.ll b/test/CodeGen/X86/GlobalISel/ext-x86-64.ll
index e8afafd0e12..ab9a2253a4e 100644
--- a/test/CodeGen/X86/GlobalISel/ext-x86-64.ll
+++ b/test/CodeGen/X86/GlobalISel/ext-x86-64.ll
@@ -5,7 +5,7 @@
define i64 @test_zext_i1(i8 %a) {
; X64-LABEL: test_zext_i1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: andq $1, %rdi
; X64-NEXT: movq %rdi, %rax
@@ -17,7 +17,7 @@ define i64 @test_zext_i1(i8 %a) {
define i64 @test_sext_i8(i8 %val) {
; X64-LABEL: test_sext_i8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movsbq %dil, %rax
; X64-NEXT: retq
%r = sext i8 %val to i64
@@ -26,7 +26,7 @@ define i64 @test_sext_i8(i8 %val) {
define i64 @test_sext_i16(i16 %val) {
; X64-LABEL: test_sext_i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movswq %di, %rax
; X64-NEXT: retq
%r = sext i16 %val to i64
diff --git a/test/CodeGen/X86/GlobalISel/ext.ll b/test/CodeGen/X86/GlobalISel/ext.ll
index 3b0b35797a8..3fb4979d2cf 100644
--- a/test/CodeGen/X86/GlobalISel/ext.ll
+++ b/test/CodeGen/X86/GlobalISel/ext.ll
@@ -4,13 +4,13 @@
define i8 @test_zext_i1toi8(i32 %a) {
; X64-LABEL: test_zext_i1toi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andb $1, %dil
; X64-NEXT: movl %edi, %eax
; X64-NEXT: retq
;
; X32-LABEL: test_zext_i1toi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: andb $1, %al
; X32-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -22,13 +22,13 @@ define i8 @test_zext_i1toi8(i32 %a) {
define i16 @test_zext_i1toi16(i32 %a) {
; X64-LABEL: test_zext_i1toi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andw $1, %di
; X64-NEXT: movl %edi, %eax
; X64-NEXT: retq
;
; X32-LABEL: test_zext_i1toi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: andw $1, %ax
; X32-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -40,13 +40,13 @@ define i16 @test_zext_i1toi16(i32 %a) {
define i32 @test_zext_i1(i32 %a) {
; X64-LABEL: test_zext_i1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andl $1, %edi
; X64-NEXT: movl %edi, %eax
; X64-NEXT: retq
;
; X32-LABEL: test_zext_i1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: andl $1, %eax
; X32-NEXT: retl
@@ -57,12 +57,12 @@ define i32 @test_zext_i1(i32 %a) {
define i32 @test_zext_i8(i8 %val) {
; X64-LABEL: test_zext_i8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: retq
;
; X32-LABEL: test_zext_i8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: retl
%r = zext i8 %val to i32
@@ -71,12 +71,12 @@ define i32 @test_zext_i8(i8 %val) {
define i32 @test_zext_i16(i16 %val) {
; X64-LABEL: test_zext_i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movzwl %di, %eax
; X64-NEXT: retq
;
; X32-LABEL: test_zext_i16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: retl
%r = zext i16 %val to i32
@@ -85,12 +85,12 @@ define i32 @test_zext_i16(i16 %val) {
define i32 @test_sext_i8(i8 %val) {
; X64-LABEL: test_sext_i8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movsbl %dil, %eax
; X64-NEXT: retq
;
; X32-LABEL: test_sext_i8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movsbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: retl
%r = sext i8 %val to i32
@@ -99,12 +99,12 @@ define i32 @test_sext_i8(i8 %val) {
define i32 @test_sext_i16(i16 %val) {
; X64-LABEL: test_sext_i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movswl %di, %eax
; X64-NEXT: retq
;
; X32-LABEL: test_sext_i16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movswl {{[0-9]+}}(%esp), %eax
; X32-NEXT: retl
%r = sext i16 %val to i32
diff --git a/test/CodeGen/X86/GlobalISel/fadd-scalar.ll b/test/CodeGen/X86/GlobalISel/fadd-scalar.ll
index 6aee06a75f6..0fa1142c30a 100644
--- a/test/CodeGen/X86/GlobalISel/fadd-scalar.ll
+++ b/test/CodeGen/X86/GlobalISel/fadd-scalar.ll
@@ -2,7 +2,7 @@
; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
define float @test_fadd_float(float %arg1, float %arg2) {
; ALL-LABEL: test_fadd_float:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: addss %xmm1, %xmm0
; ALL-NEXT: retq
%ret = fadd float %arg1, %arg2
@@ -11,7 +11,7 @@ define float @test_fadd_float(float %arg1, float %arg2) {
define double @test_fadd_double(double %arg1, double %arg2) {
; ALL-LABEL: test_fadd_double:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: addsd %xmm1, %xmm0
; ALL-NEXT: retq
%ret = fadd double %arg1, %arg2
diff --git a/test/CodeGen/X86/GlobalISel/fconstant.ll b/test/CodeGen/X86/GlobalISel/fconstant.ll
index 2df013a1ed7..6c3586acd37 100644
--- a/test/CodeGen/X86/GlobalISel/fconstant.ll
+++ b/test/CodeGen/X86/GlobalISel/fconstant.ll
@@ -7,7 +7,7 @@
define void @test_float(float* %a , float %b) {
; CHECK_SMALL64-LABEL: test_float:
-; CHECK_SMALL64: # BB#0: # %entry
+; CHECK_SMALL64: # %bb.0: # %entry
; CHECK_SMALL64-NEXT: movss .LCPI0_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
; CHECK_SMALL64-NEXT: addss %xmm0, %xmm1
; CHECK_SMALL64-NEXT: movd %xmm1, %eax
@@ -15,7 +15,7 @@ define void @test_float(float* %a , float %b) {
; CHECK_SMALL64-NEXT: retq
;
; CHECK_LARGE64-LABEL: test_float:
-; CHECK_LARGE64: # BB#0: # %entry
+; CHECK_LARGE64: # %bb.0: # %entry
; CHECK_LARGE64-NEXT: movabsq $.LCPI0_0, %rax
; CHECK_LARGE64-NEXT: addss (%rax), %xmm0
; CHECK_LARGE64-NEXT: movd %xmm0, %eax
@@ -23,7 +23,7 @@ define void @test_float(float* %a , float %b) {
; CHECK_LARGE64-NEXT: retq
;
; CHECK32-LABEL: test_float:
-; CHECK32: # BB#0: # %entry
+; CHECK32: # %bb.0: # %entry
; CHECK32-NEXT: movl 4(%esp), %eax
; CHECK32-NEXT: movl 8(%esp), %ecx
; CHECK32-NEXT: movss .LCPI0_0, %xmm0 # xmm0 = mem[0],zero,zero,zero
diff --git a/test/CodeGen/X86/GlobalISel/fdiv-scalar.ll b/test/CodeGen/X86/GlobalISel/fdiv-scalar.ll
index 268802dc06a..e05a36c4997 100644
--- a/test/CodeGen/X86/GlobalISel/fdiv-scalar.ll
+++ b/test/CodeGen/X86/GlobalISel/fdiv-scalar.ll
@@ -2,7 +2,7 @@
; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
define float @test_fdiv_float(float %arg1, float %arg2) {
; ALL-LABEL: test_fdiv_float:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: divss %xmm1, %xmm0
; ALL-NEXT: retq
%ret = fdiv float %arg1, %arg2
@@ -11,7 +11,7 @@ define float @test_fdiv_float(float %arg1, float %arg2) {
define double @test_fdiv_double(double %arg1, double %arg2) {
; ALL-LABEL: test_fdiv_double:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: divsd %xmm1, %xmm0
; ALL-NEXT: retq
%ret = fdiv double %arg1, %arg2
diff --git a/test/CodeGen/X86/GlobalISel/fmul-scalar.ll b/test/CodeGen/X86/GlobalISel/fmul-scalar.ll
index c7a37a14c33..c2244cb8a5c 100644
--- a/test/CodeGen/X86/GlobalISel/fmul-scalar.ll
+++ b/test/CodeGen/X86/GlobalISel/fmul-scalar.ll
@@ -2,7 +2,7 @@
; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
define float @test_fmul_float(float %arg1, float %arg2) {
; ALL-LABEL: test_fmul_float:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: mulss %xmm1, %xmm0
; ALL-NEXT: retq
%ret = fmul float %arg1, %arg2
@@ -11,7 +11,7 @@ define float @test_fmul_float(float %arg1, float %arg2) {
define double @test_fmul_double(double %arg1, double %arg2) {
; ALL-LABEL: test_fmul_double:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: mulsd %xmm1, %xmm0
; ALL-NEXT: retq
%ret = fmul double %arg1, %arg2
diff --git a/test/CodeGen/X86/GlobalISel/fpext-scalar.ll b/test/CodeGen/X86/GlobalISel/fpext-scalar.ll
index c22a4da5789..8501009e291 100644
--- a/test/CodeGen/X86/GlobalISel/fpext-scalar.ll
+++ b/test/CodeGen/X86/GlobalISel/fpext-scalar.ll
@@ -3,7 +3,7 @@
define double @test(float %a) {
; CHECK-LABEL: test:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvtss2sd %xmm0, %xmm0
; CHECK-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/GlobalISel/frameIndex.ll b/test/CodeGen/X86/GlobalISel/frameIndex.ll
index 7b2a050f153..1faa82b37c1 100644
--- a/test/CodeGen/X86/GlobalISel/frameIndex.ll
+++ b/test/CodeGen/X86/GlobalISel/frameIndex.ll
@@ -8,12 +8,12 @@
define i32* @allocai32() {
; X64-LABEL: allocai32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: leaq -4(%rsp), %rax
; X64-NEXT: retq
;
; X32-LABEL: allocai32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movl %esp, %eax
@@ -21,7 +21,7 @@ define i32* @allocai32() {
; X32-NEXT: retl
;
; X32ABI-LABEL: allocai32:
-; X32ABI: # BB#0:
+; X32ABI: # %bb.0:
; X32ABI-NEXT: leal -4(%rsp), %eax
; X32ABI-NEXT: retq
%ptr1 = alloca i32
diff --git a/test/CodeGen/X86/GlobalISel/fsub-scalar.ll b/test/CodeGen/X86/GlobalISel/fsub-scalar.ll
index 32c25a3a082..7fc9dd31490 100644
--- a/test/CodeGen/X86/GlobalISel/fsub-scalar.ll
+++ b/test/CodeGen/X86/GlobalISel/fsub-scalar.ll
@@ -2,7 +2,7 @@
; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
define float @test_fsub_float(float %arg1, float %arg2) {
; ALL-LABEL: test_fsub_float:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: subss %xmm1, %xmm0
; ALL-NEXT: retq
%ret = fsub float %arg1, %arg2
@@ -11,7 +11,7 @@ define float @test_fsub_float(float %arg1, float %arg2) {
define double @test_fsub_double(double %arg1, double %arg2) {
; ALL-LABEL: test_fsub_double:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: subsd %xmm1, %xmm0
; ALL-NEXT: retq
%ret = fsub double %arg1, %arg2
diff --git a/test/CodeGen/X86/GlobalISel/gep.ll b/test/CodeGen/X86/GlobalISel/gep.ll
index ee66accc77d..fd30f8b782e 100644
--- a/test/CodeGen/X86/GlobalISel/gep.ll
+++ b/test/CodeGen/X86/GlobalISel/gep.ll
@@ -4,7 +4,7 @@
define i32* @test_gep_i8(i32 *%arr, i8 %ind) {
; X64_GISEL-LABEL: test_gep_i8:
-; X64_GISEL: # BB#0:
+; X64_GISEL: # %bb.0:
; X64_GISEL-NEXT: movq $4, %rax
; X64_GISEL-NEXT: movsbq %sil, %rcx
; X64_GISEL-NEXT: imulq %rax, %rcx
@@ -12,7 +12,7 @@ define i32* @test_gep_i8(i32 *%arr, i8 %ind) {
; X64_GISEL-NEXT: retq
;
; X64-LABEL: test_gep_i8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; X64-NEXT: movsbq %sil, %rax
; X64-NEXT: leaq (%rdi,%rax,4), %rax
@@ -23,13 +23,13 @@ define i32* @test_gep_i8(i32 *%arr, i8 %ind) {
define i32* @test_gep_i8_const(i32 *%arr) {
; X64_GISEL-LABEL: test_gep_i8_const:
-; X64_GISEL: # BB#0:
+; X64_GISEL: # %bb.0:
; X64_GISEL-NEXT: movq $80, %rax
; X64_GISEL-NEXT: leaq (%rdi,%rax), %rax
; X64_GISEL-NEXT: retq
;
; X64-LABEL: test_gep_i8_const:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: leaq 80(%rdi), %rax
; X64-NEXT: retq
%arrayidx = getelementptr i32, i32* %arr, i8 20
@@ -38,7 +38,7 @@ define i32* @test_gep_i8_const(i32 *%arr) {
define i32* @test_gep_i16(i32 *%arr, i16 %ind) {
; X64_GISEL-LABEL: test_gep_i16:
-; X64_GISEL: # BB#0:
+; X64_GISEL: # %bb.0:
; X64_GISEL-NEXT: movq $4, %rax
; X64_GISEL-NEXT: movswq %si, %rcx
; X64_GISEL-NEXT: imulq %rax, %rcx
@@ -46,7 +46,7 @@ define i32* @test_gep_i16(i32 *%arr, i16 %ind) {
; X64_GISEL-NEXT: retq
;
; X64-LABEL: test_gep_i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; X64-NEXT: movswq %si, %rax
; X64-NEXT: leaq (%rdi,%rax,4), %rax
@@ -57,13 +57,13 @@ define i32* @test_gep_i16(i32 *%arr, i16 %ind) {
define i32* @test_gep_i16_const(i32 *%arr) {
; X64_GISEL-LABEL: test_gep_i16_const:
-; X64_GISEL: # BB#0:
+; X64_GISEL: # %bb.0:
; X64_GISEL-NEXT: movq $80, %rax
; X64_GISEL-NEXT: leaq (%rdi,%rax), %rax
; X64_GISEL-NEXT: retq
;
; X64-LABEL: test_gep_i16_const:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: leaq 80(%rdi), %rax
; X64-NEXT: retq
%arrayidx = getelementptr i32, i32* %arr, i16 20
@@ -72,7 +72,7 @@ define i32* @test_gep_i16_const(i32 *%arr) {
define i32* @test_gep_i32(i32 *%arr, i32 %ind) {
; X64_GISEL-LABEL: test_gep_i32:
-; X64_GISEL: # BB#0:
+; X64_GISEL: # %bb.0:
; X64_GISEL-NEXT: movq $4, %rax
; X64_GISEL-NEXT: movslq %esi, %rcx
; X64_GISEL-NEXT: imulq %rax, %rcx
@@ -80,7 +80,7 @@ define i32* @test_gep_i32(i32 *%arr, i32 %ind) {
; X64_GISEL-NEXT: retq
;
; X64-LABEL: test_gep_i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movslq %esi, %rax
; X64-NEXT: leaq (%rdi,%rax,4), %rax
; X64-NEXT: retq
@@ -90,13 +90,13 @@ define i32* @test_gep_i32(i32 *%arr, i32 %ind) {
define i32* @test_gep_i32_const(i32 *%arr) {
; X64_GISEL-LABEL: test_gep_i32_const:
-; X64_GISEL: # BB#0:
+; X64_GISEL: # %bb.0:
; X64_GISEL-NEXT: movq $20, %rax
; X64_GISEL-NEXT: leaq (%rdi,%rax), %rax
; X64_GISEL-NEXT: retq
;
; X64-LABEL: test_gep_i32_const:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: leaq 20(%rdi), %rax
; X64-NEXT: retq
%arrayidx = getelementptr i32, i32* %arr, i32 5
@@ -105,14 +105,14 @@ define i32* @test_gep_i32_const(i32 *%arr) {
define i32* @test_gep_i64(i32 *%arr, i64 %ind) {
; X64_GISEL-LABEL: test_gep_i64:
-; X64_GISEL: # BB#0:
+; X64_GISEL: # %bb.0:
; X64_GISEL-NEXT: movq $4, %rax
; X64_GISEL-NEXT: imulq %rsi, %rax
; X64_GISEL-NEXT: leaq (%rdi,%rax), %rax
; X64_GISEL-NEXT: retq
;
; X64-LABEL: test_gep_i64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: leaq (%rdi,%rsi,4), %rax
; X64-NEXT: retq
%arrayidx = getelementptr i32, i32* %arr, i64 %ind
@@ -121,13 +121,13 @@ define i32* @test_gep_i64(i32 *%arr, i64 %ind) {
define i32* @test_gep_i64_const(i32 *%arr) {
; X64_GISEL-LABEL: test_gep_i64_const:
-; X64_GISEL: # BB#0:
+; X64_GISEL: # %bb.0:
; X64_GISEL-NEXT: movq $20, %rax
; X64_GISEL-NEXT: leaq (%rdi,%rax), %rax
; X64_GISEL-NEXT: retq
;
; X64-LABEL: test_gep_i64_const:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: leaq 20(%rdi), %rax
; X64-NEXT: retq
%arrayidx = getelementptr i32, i32* %arr, i64 5
diff --git a/test/CodeGen/X86/GlobalISel/legalize-brcond.mir b/test/CodeGen/X86/GlobalISel/legalize-brcond.mir
index 0346912785e..946e7385f38 100644
--- a/test/CodeGen/X86/GlobalISel/legalize-brcond.mir
+++ b/test/CodeGen/X86/GlobalISel/legalize-brcond.mir
@@ -27,25 +27,25 @@ registers:
- { id: 2, class: _, preferred-register: '' }
- { id: 3, class: _, preferred-register: '' }
# ALL: %1:_(s1) = G_TRUNC %0(s32)
-# ALL-NEXT: G_BRCOND %1(s1), %[[TRUE:bb.[0-9]+.if.then]]
-# ALL-NEXT: G_BR %[[FALSE:bb.[0-9]+.if.else]]
-# ALL: [[TRUE]]:
+# ALL-NEXT: G_BRCOND %1(s1), %[[TRUE:bb.[0-9]+]]
+# ALL-NEXT: G_BR %[[FALSE:bb.[0-9]+]]
+# ALL: [[TRUE]].{{[a-zA-Z0-9.]+}}:
# ALL-NEXT: %eax = COPY %2(s32)
# ALL-NEXT: RET 0, implicit %eax
-# ALL: [[FALSE]]:
+# ALL: [[FALSE]].{{[a-zA-Z0-9.]+}}:
# ALL-NEXT: %eax = COPY %3(s32)
# ALL-NEXT: RET 0, implicit %eax
body: |
bb.1.entry:
- successors: %bb.2.if.then(0x40000000), %bb.3.if.else(0x40000000)
+ successors: %bb.2(0x40000000), %bb.3(0x40000000)
liveins: %edi
%0(s32) = COPY %edi
%2(s32) = G_CONSTANT i32 0
%3(s32) = G_CONSTANT i32 1
%1(s1) = G_TRUNC %0(s32)
- G_BRCOND %1(s1), %bb.2.if.then
- G_BR %bb.3.if.else
+ G_BRCOND %1(s1), %bb.2
+ G_BR %bb.3
bb.2.if.then:
%eax = COPY %2(s32)
diff --git a/test/CodeGen/X86/GlobalISel/legalize-phi.mir b/test/CodeGen/X86/GlobalISel/legalize-phi.mir
index a045205a799..44db405f165 100644
--- a/test/CodeGen/X86/GlobalISel/legalize-phi.mir
+++ b/test/CodeGen/X86/GlobalISel/legalize-phi.mir
@@ -140,29 +140,29 @@ constants:
body: |
; ALL-LABEL: name: test_i1
- ; ALL: bb.0.entry:
- ; ALL: successors: %bb.1.cond.true(0x40000000), %bb.2.cond.false(0x40000000)
+ ; ALL: bb.0.{{[a-zA-Z0-9]+}}:
+ ; ALL: successors: %bb.1(0x40000000), %bb.2(0x40000000)
; ALL: liveins: %edi, %edx, %esi
; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
; ALL: [[COPY1:%[0-9]+]]:_(s1) = COPY %esi
; ALL: [[COPY2:%[0-9]+]]:_(s1) = COPY %edx
; ALL: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; ALL: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
- ; ALL: G_BRCOND [[ICMP]](s1), %bb.1.cond.true
- ; ALL: G_BR %bb.2.cond.false
+ ; ALL: G_BRCOND [[ICMP]](s1), %bb.1
+ ; ALL: G_BR %bb.2
; ALL: bb.1.cond.true:
- ; ALL: successors: %bb.3.cond.end(0x80000000)
+ ; ALL: successors: %bb.3(0x80000000)
; ALL: [[ANYEXT:%[0-9]+]]:_(s8) = G_ANYEXT [[COPY1]](s1)
- ; ALL: G_BR %bb.3.cond.end
+ ; ALL: G_BR %bb.3
; ALL: bb.2.cond.false:
- ; ALL: successors: %bb.3.cond.end(0x80000000)
+ ; ALL: successors: %bb.3(0x80000000)
; ALL: [[ANYEXT1:%[0-9]+]]:_(s8) = G_ANYEXT [[COPY2]](s1)
; ALL: bb.3.cond.end:
- ; ALL: [[PHI:%[0-9]+]]:_(s8) = G_PHI [[ANYEXT]](s8), %bb.1.cond.true, [[ANYEXT1]](s8), %bb.2.cond.false
+ ; ALL: [[PHI:%[0-9]+]]:_(s8) = G_PHI [[ANYEXT]](s8), %bb.1, [[ANYEXT1]](s8), %bb.2
; ALL: %al = COPY
; ALL: RET 0, implicit %al
bb.1.entry:
- successors: %bb.2.cond.true(0x40000000), %bb.3.cond.false(0x40000000)
+ successors: %bb.2(0x40000000), %bb.3(0x40000000)
liveins: %edi, %edx, %esi
%0(s32) = COPY %edi
@@ -170,20 +170,20 @@ body: |
%2(s1) = COPY %edx
%3(s32) = G_CONSTANT i32 0
%4(s1) = G_ICMP intpred(sgt), %0(s32), %3
- G_BRCOND %4(s1), %bb.2.cond.true
- G_BR %bb.3.cond.false
+ G_BRCOND %4(s1), %bb.2
+ G_BR %bb.3
bb.2.cond.true:
- successors: %bb.4.cond.end(0x80000000)
+ successors: %bb.4(0x80000000)
- G_BR %bb.4.cond.end
+ G_BR %bb.4
bb.3.cond.false:
- successors: %bb.4.cond.end(0x80000000)
+ successors: %bb.4(0x80000000)
bb.4.cond.end:
- %5(s1) = G_PHI %1(s1), %bb.2.cond.true, %2(s1), %bb.3.cond.false
+ %5(s1) = G_PHI %1(s1), %bb.2, %2(s1), %bb.3
%6(s8) = G_ZEXT %5(s1)
%al = COPY %6(s8)
RET 0, implicit %al
@@ -211,27 +211,27 @@ constants:
body: |
; ALL-LABEL: name: test_i8
- ; ALL: bb.0.entry:
- ; ALL: successors: %bb.1.cond.true(0x40000000), %bb.2.cond.false(0x40000000)
+ ; ALL: bb.0.{{[a-zA-Z0-9]+}}:
+ ; ALL: successors: %bb.1(0x40000000), %bb.2(0x40000000)
; ALL: liveins: %edi, %edx, %esi
; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
; ALL: [[COPY1:%[0-9]+]]:_(s8) = COPY %sil
; ALL: [[COPY2:%[0-9]+]]:_(s8) = COPY %edx
; ALL: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; ALL: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
- ; ALL: G_BRCOND [[ICMP]](s1), %bb.1.cond.true
- ; ALL: G_BR %bb.2.cond.false
+ ; ALL: G_BRCOND [[ICMP]](s1), %bb.1
+ ; ALL: G_BR %bb.2
; ALL: bb.1.cond.true:
- ; ALL: successors: %bb.3.cond.end(0x80000000)
- ; ALL: G_BR %bb.3.cond.end
+ ; ALL: successors: %bb.3(0x80000000)
+ ; ALL: G_BR %bb.3
; ALL: bb.2.cond.false:
- ; ALL: successors: %bb.3.cond.end(0x80000000)
+ ; ALL: successors: %bb.3(0x80000000)
; ALL: bb.3.cond.end:
- ; ALL: [[PHI:%[0-9]+]]:_(s8) = G_PHI [[COPY1]](s8), %bb.1.cond.true, [[COPY2]](s8), %bb.2.cond.false
+ ; ALL: [[PHI:%[0-9]+]]:_(s8) = G_PHI [[COPY1]](s8), %bb.1, [[COPY2]](s8), %bb.2
; ALL: %al = COPY [[PHI]](s8)
; ALL: RET 0, implicit %al
bb.1.entry:
- successors: %bb.2.cond.true(0x40000000), %bb.3.cond.false(0x40000000)
+ successors: %bb.2(0x40000000), %bb.3(0x40000000)
liveins: %edi, %edx, %esi
%0(s32) = COPY %edi
@@ -239,20 +239,20 @@ body: |
%2(s8) = COPY %edx
%3(s32) = G_CONSTANT i32 0
%4(s1) = G_ICMP intpred(sgt), %0(s32), %3
- G_BRCOND %4(s1), %bb.2.cond.true
- G_BR %bb.3.cond.false
+ G_BRCOND %4(s1), %bb.2
+ G_BR %bb.3
bb.2.cond.true:
- successors: %bb.4.cond.end(0x80000000)
+ successors: %bb.4(0x80000000)
- G_BR %bb.4.cond.end
+ G_BR %bb.4
bb.3.cond.false:
- successors: %bb.4.cond.end(0x80000000)
+ successors: %bb.4(0x80000000)
bb.4.cond.end:
- %5(s8) = G_PHI %1(s8), %bb.2.cond.true, %2(s8), %bb.3.cond.false
+ %5(s8) = G_PHI %1(s8), %bb.2, %2(s8), %bb.3
%al = COPY %5(s8)
RET 0, implicit %al
@@ -279,27 +279,27 @@ constants:
body: |
; ALL-LABEL: name: test_i16
- ; ALL: bb.0.entry:
- ; ALL: successors: %bb.1.cond.true(0x40000000), %bb.2.cond.false(0x40000000)
+ ; ALL: bb.0.{{[a-zA-Z0-9]+}}:
+ ; ALL: successors: %bb.1(0x40000000), %bb.2(0x40000000)
; ALL: liveins: %edi, %edx, %esi
; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
; ALL: [[COPY1:%[0-9]+]]:_(s16) = COPY %si
; ALL: [[COPY2:%[0-9]+]]:_(s16) = COPY %edx
; ALL: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; ALL: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
- ; ALL: G_BRCOND [[ICMP]](s1), %bb.1.cond.true
- ; ALL: G_BR %bb.2.cond.false
+ ; ALL: G_BRCOND [[ICMP]](s1), %bb.1
+ ; ALL: G_BR %bb.2
; ALL: bb.1.cond.true:
- ; ALL: successors: %bb.3.cond.end(0x80000000)
- ; ALL: G_BR %bb.3.cond.end
+ ; ALL: successors: %bb.3(0x80000000)
+ ; ALL: G_BR %bb.3
; ALL: bb.2.cond.false:
- ; ALL: successors: %bb.3.cond.end(0x80000000)
+ ; ALL: successors: %bb.3(0x80000000)
; ALL: bb.3.cond.end:
- ; ALL: [[PHI:%[0-9]+]]:_(s16) = G_PHI [[COPY1]](s16), %bb.1.cond.true, [[COPY2]](s16), %bb.2.cond.false
+ ; ALL: [[PHI:%[0-9]+]]:_(s16) = G_PHI [[COPY1]](s16), %bb.1, [[COPY2]](s16), %bb.2
; ALL: %ax = COPY [[PHI]](s16)
; ALL: RET 0, implicit %ax
bb.1.entry:
- successors: %bb.2.cond.true(0x40000000), %bb.3.cond.false(0x40000000)
+ successors: %bb.2(0x40000000), %bb.3(0x40000000)
liveins: %edi, %edx, %esi
%0(s32) = COPY %edi
@@ -307,20 +307,20 @@ body: |
%2(s16) = COPY %edx
%3(s32) = G_CONSTANT i32 0
%4(s1) = G_ICMP intpred(sgt), %0(s32), %3
- G_BRCOND %4(s1), %bb.2.cond.true
- G_BR %bb.3.cond.false
+ G_BRCOND %4(s1), %bb.2
+ G_BR %bb.3
bb.2.cond.true:
- successors: %bb.4.cond.end(0x80000000)
+ successors: %bb.4(0x80000000)
- G_BR %bb.4.cond.end
+ G_BR %bb.4
bb.3.cond.false:
- successors: %bb.4.cond.end(0x80000000)
+ successors: %bb.4(0x80000000)
bb.4.cond.end:
- %5(s16) = G_PHI %1(s16), %bb.2.cond.true, %2(s16), %bb.3.cond.false
+ %5(s16) = G_PHI %1(s16), %bb.2, %2(s16), %bb.3
%ax = COPY %5(s16)
RET 0, implicit %ax
@@ -347,27 +347,27 @@ constants:
body: |
; ALL-LABEL: name: test_i32
- ; ALL: bb.0.entry:
- ; ALL: successors: %bb.1.cond.true(0x40000000), %bb.2.cond.false(0x40000000)
+ ; ALL: bb.0.{{[a-zA-Z0-9]+}}:
+ ; ALL: successors: %bb.1(0x40000000), %bb.2(0x40000000)
; ALL: liveins: %edi, %edx, %esi
; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
; ALL: [[COPY1:%[0-9]+]]:_(s32) = COPY %esi
; ALL: [[COPY2:%[0-9]+]]:_(s32) = COPY %edx
; ALL: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; ALL: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
- ; ALL: G_BRCOND [[ICMP]](s1), %bb.1.cond.true
- ; ALL: G_BR %bb.2.cond.false
+ ; ALL: G_BRCOND [[ICMP]](s1), %bb.1
+ ; ALL: G_BR %bb.2
; ALL: bb.1.cond.true:
- ; ALL: successors: %bb.3.cond.end(0x80000000)
- ; ALL: G_BR %bb.3.cond.end
+ ; ALL: successors: %bb.3(0x80000000)
+ ; ALL: G_BR %bb.3
; ALL: bb.2.cond.false:
- ; ALL: successors: %bb.3.cond.end(0x80000000)
+ ; ALL: successors: %bb.3(0x80000000)
; ALL: bb.3.cond.end:
- ; ALL: [[PHI:%[0-9]+]]:_(s32) = G_PHI [[COPY1]](s32), %bb.1.cond.true, [[COPY2]](s32), %bb.2.cond.false
+ ; ALL: [[PHI:%[0-9]+]]:_(s32) = G_PHI [[COPY1]](s32), %bb.1, [[COPY2]](s32), %bb.2
; ALL: %eax = COPY [[PHI]](s32)
; ALL: RET 0, implicit %eax
bb.1.entry:
- successors: %bb.2.cond.true(0x40000000), %bb.3.cond.false(0x40000000)
+ successors: %bb.2(0x40000000), %bb.3(0x40000000)
liveins: %edi, %edx, %esi
%0(s32) = COPY %edi
@@ -375,20 +375,20 @@ body: |
%2(s32) = COPY %edx
%3(s32) = G_CONSTANT i32 0
%4(s1) = G_ICMP intpred(sgt), %0(s32), %3
- G_BRCOND %4(s1), %bb.2.cond.true
- G_BR %bb.3.cond.false
+ G_BRCOND %4(s1), %bb.2
+ G_BR %bb.3
bb.2.cond.true:
- successors: %bb.4.cond.end(0x80000000)
+ successors: %bb.4(0x80000000)
- G_BR %bb.4.cond.end
+ G_BR %bb.4
bb.3.cond.false:
- successors: %bb.4.cond.end(0x80000000)
+ successors: %bb.4(0x80000000)
bb.4.cond.end:
- %5(s32) = G_PHI %1(s32), %bb.2.cond.true, %2(s32), %bb.3.cond.false
+ %5(s32) = G_PHI %1(s32), %bb.2, %2(s32), %bb.3
%eax = COPY %5(s32)
RET 0, implicit %eax
@@ -415,27 +415,27 @@ constants:
body: |
; ALL-LABEL: name: test_i64
- ; ALL: bb.0.entry:
- ; ALL: successors: %bb.1.cond.true(0x40000000), %bb.2.cond.false(0x40000000)
+ ; ALL: bb.0.{{[a-zA-Z0-9]+}}:
+ ; ALL: successors: %bb.1(0x40000000), %bb.2(0x40000000)
; ALL: liveins: %edi, %rdx, %rsi
; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
; ALL: [[COPY1:%[0-9]+]]:_(s64) = COPY %rsi
; ALL: [[COPY2:%[0-9]+]]:_(s64) = COPY %rdx
; ALL: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; ALL: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
- ; ALL: G_BRCOND [[ICMP]](s1), %bb.1.cond.true
- ; ALL: G_BR %bb.2.cond.false
+ ; ALL: G_BRCOND [[ICMP]](s1), %bb.1
+ ; ALL: G_BR %bb.2
; ALL: bb.1.cond.true:
- ; ALL: successors: %bb.3.cond.end(0x80000000)
- ; ALL: G_BR %bb.3.cond.end
+ ; ALL: successors: %bb.3(0x80000000)
+ ; ALL: G_BR %bb.3
; ALL: bb.2.cond.false:
- ; ALL: successors: %bb.3.cond.end(0x80000000)
+ ; ALL: successors: %bb.3(0x80000000)
; ALL: bb.3.cond.end:
- ; ALL: [[PHI:%[0-9]+]]:_(s64) = G_PHI [[COPY1]](s64), %bb.1.cond.true, [[COPY2]](s64), %bb.2.cond.false
+ ; ALL: [[PHI:%[0-9]+]]:_(s64) = G_PHI [[COPY1]](s64), %bb.1, [[COPY2]](s64), %bb.2
; ALL: %rax = COPY [[PHI]](s64)
; ALL: RET 0, implicit %rax
bb.1.entry:
- successors: %bb.2.cond.true(0x40000000), %bb.3.cond.false(0x40000000)
+ successors: %bb.2(0x40000000), %bb.3(0x40000000)
liveins: %edi, %rdx, %rsi
%0(s32) = COPY %edi
@@ -443,20 +443,20 @@ body: |
%2(s64) = COPY %rdx
%3(s32) = G_CONSTANT i32 0
%4(s1) = G_ICMP intpred(sgt), %0(s32), %3
- G_BRCOND %4(s1), %bb.2.cond.true
- G_BR %bb.3.cond.false
+ G_BRCOND %4(s1), %bb.2
+ G_BR %bb.3
bb.2.cond.true:
- successors: %bb.4.cond.end(0x80000000)
+ successors: %bb.4(0x80000000)
- G_BR %bb.4.cond.end
+ G_BR %bb.4
bb.3.cond.false:
- successors: %bb.4.cond.end(0x80000000)
+ successors: %bb.4(0x80000000)
bb.4.cond.end:
- %5(s64) = G_PHI %1(s64), %bb.2.cond.true, %2(s64), %bb.3.cond.false
+ %5(s64) = G_PHI %1(s64), %bb.2, %2(s64), %bb.3
%rax = COPY %5(s64)
RET 0, implicit %rax
@@ -483,27 +483,27 @@ constants:
body: |
; ALL-LABEL: name: test_float
- ; ALL: bb.0.entry:
- ; ALL: successors: %bb.1.cond.true(0x40000000), %bb.2.cond.false(0x40000000)
+ ; ALL: bb.0.{{[a-zA-Z0-9]+}}:
+ ; ALL: successors: %bb.1(0x40000000), %bb.2(0x40000000)
; ALL: liveins: %edi, %xmm0, %xmm1
; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
; ALL: [[COPY1:%[0-9]+]]:_(s32) = COPY %xmm0
; ALL: [[COPY2:%[0-9]+]]:_(s32) = COPY %xmm1
; ALL: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; ALL: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
- ; ALL: G_BRCOND [[ICMP]](s1), %bb.1.cond.true
- ; ALL: G_BR %bb.2.cond.false
+ ; ALL: G_BRCOND [[ICMP]](s1), %bb.1
+ ; ALL: G_BR %bb.2
; ALL: bb.1.cond.true:
- ; ALL: successors: %bb.3.cond.end(0x80000000)
- ; ALL: G_BR %bb.3.cond.end
+ ; ALL: successors: %bb.3(0x80000000)
+ ; ALL: G_BR %bb.3
; ALL: bb.2.cond.false:
- ; ALL: successors: %bb.3.cond.end(0x80000000)
+ ; ALL: successors: %bb.3(0x80000000)
; ALL: bb.3.cond.end:
- ; ALL: [[PHI:%[0-9]+]]:_(s32) = G_PHI [[COPY1]](s32), %bb.1.cond.true, [[COPY2]](s32), %bb.2.cond.false
+ ; ALL: [[PHI:%[0-9]+]]:_(s32) = G_PHI [[COPY1]](s32), %bb.1, [[COPY2]](s32), %bb.2
; ALL: %xmm0 = COPY [[PHI]](s32)
; ALL: RET 0, implicit %xmm0
bb.1.entry:
- successors: %bb.2.cond.true(0x40000000), %bb.3.cond.false(0x40000000)
+ successors: %bb.2(0x40000000), %bb.3(0x40000000)
liveins: %edi, %xmm0, %xmm1
%0(s32) = COPY %edi
@@ -511,20 +511,20 @@ body: |
%2(s32) = COPY %xmm1
%3(s32) = G_CONSTANT i32 0
%4(s1) = G_ICMP intpred(sgt), %0(s32), %3
- G_BRCOND %4(s1), %bb.2.cond.true
- G_BR %bb.3.cond.false
+ G_BRCOND %4(s1), %bb.2
+ G_BR %bb.3
bb.2.cond.true:
- successors: %bb.4.cond.end(0x80000000)
+ successors: %bb.4(0x80000000)
- G_BR %bb.4.cond.end
+ G_BR %bb.4
bb.3.cond.false:
- successors: %bb.4.cond.end(0x80000000)
+ successors: %bb.4(0x80000000)
bb.4.cond.end:
- %5(s32) = G_PHI %1(s32), %bb.2.cond.true, %2(s32), %bb.3.cond.false
+ %5(s32) = G_PHI %1(s32), %bb.2, %2(s32), %bb.3
%xmm0 = COPY %5(s32)
RET 0, implicit %xmm0
@@ -551,27 +551,27 @@ constants:
body: |
; ALL-LABEL: name: test_double
- ; ALL: bb.0.entry:
- ; ALL: successors: %bb.1.cond.true(0x40000000), %bb.2.cond.false(0x40000000)
+ ; ALL: bb.0.{{[a-zA-Z0-9]+}}:
+ ; ALL: successors: %bb.1(0x40000000), %bb.2(0x40000000)
; ALL: liveins: %edi, %xmm0, %xmm1
; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
; ALL: [[COPY1:%[0-9]+]]:_(s64) = COPY %xmm0
; ALL: [[COPY2:%[0-9]+]]:_(s64) = COPY %xmm1
; ALL: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; ALL: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
- ; ALL: G_BRCOND [[ICMP]](s1), %bb.1.cond.true
- ; ALL: G_BR %bb.2.cond.false
+ ; ALL: G_BRCOND [[ICMP]](s1), %bb.1
+ ; ALL: G_BR %bb.2
; ALL: bb.1.cond.true:
- ; ALL: successors: %bb.3.cond.end(0x80000000)
- ; ALL: G_BR %bb.3.cond.end
+ ; ALL: successors: %bb.3(0x80000000)
+ ; ALL: G_BR %bb.3
; ALL: bb.2.cond.false:
- ; ALL: successors: %bb.3.cond.end(0x80000000)
+ ; ALL: successors: %bb.3(0x80000000)
; ALL: bb.3.cond.end:
- ; ALL: [[PHI:%[0-9]+]]:_(s64) = G_PHI [[COPY1]](s64), %bb.1.cond.true, [[COPY2]](s64), %bb.2.cond.false
+ ; ALL: [[PHI:%[0-9]+]]:_(s64) = G_PHI [[COPY1]](s64), %bb.1, [[COPY2]](s64), %bb.2
; ALL: %xmm0 = COPY [[PHI]](s64)
; ALL: RET 0, implicit %xmm0
bb.1.entry:
- successors: %bb.2.cond.true(0x40000000), %bb.3.cond.false(0x40000000)
+ successors: %bb.2(0x40000000), %bb.3(0x40000000)
liveins: %edi, %xmm0, %xmm1
%0(s32) = COPY %edi
@@ -579,20 +579,20 @@ body: |
%2(s64) = COPY %xmm1
%3(s32) = G_CONSTANT i32 0
%4(s1) = G_ICMP intpred(sgt), %0(s32), %3
- G_BRCOND %4(s1), %bb.2.cond.true
- G_BR %bb.3.cond.false
+ G_BRCOND %4(s1), %bb.2
+ G_BR %bb.3
bb.2.cond.true:
- successors: %bb.4.cond.end(0x80000000)
+ successors: %bb.4(0x80000000)
- G_BR %bb.4.cond.end
+ G_BR %bb.4
bb.3.cond.false:
- successors: %bb.4.cond.end(0x80000000)
+ successors: %bb.4(0x80000000)
bb.4.cond.end:
- %5(s64) = G_PHI %1(s64), %bb.2.cond.true, %2(s64), %bb.3.cond.false
+ %5(s64) = G_PHI %1(s64), %bb.2, %2(s64), %bb.3
%xmm0 = COPY %5(s64)
RET 0, implicit %xmm0
diff --git a/test/CodeGen/X86/GlobalISel/memop-scalar-x32.ll b/test/CodeGen/X86/GlobalISel/memop-scalar-x32.ll
index 1c719b1bf74..31fb11179bb 100644
--- a/test/CodeGen/X86/GlobalISel/memop-scalar-x32.ll
+++ b/test/CodeGen/X86/GlobalISel/memop-scalar-x32.ll
@@ -6,7 +6,7 @@
define i1 @test_load_i1(i1 * %p1) {
; ALL-LABEL: test_load_i1:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movl 4(%esp), %eax
; ALL-NEXT: movb (%eax), %al
; ALL-NEXT: retl
@@ -16,7 +16,7 @@ define i1 @test_load_i1(i1 * %p1) {
define i8 @test_load_i8(i8 * %p1) {
; ALL-LABEL: test_load_i8:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movl 4(%esp), %eax
; ALL-NEXT: movb (%eax), %al
; ALL-NEXT: retl
@@ -26,7 +26,7 @@ define i8 @test_load_i8(i8 * %p1) {
define i16 @test_load_i16(i16 * %p1) {
; ALL-LABEL: test_load_i16:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movl 4(%esp), %eax
; ALL-NEXT: movzwl (%eax), %eax
; ALL-NEXT: retl
@@ -36,7 +36,7 @@ define i16 @test_load_i16(i16 * %p1) {
define i32 @test_load_i32(i32 * %p1) {
; ALL-LABEL: test_load_i32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movl 4(%esp), %eax
; ALL-NEXT: movl (%eax), %eax
; ALL-NEXT: retl
@@ -46,7 +46,7 @@ define i32 @test_load_i32(i32 * %p1) {
define i1 * @test_store_i1(i1 %val, i1 * %p1) {
; ALL-LABEL: test_store_i1:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movb 4(%esp), %cl
; ALL-NEXT: movl 8(%esp), %eax
; ALL-NEXT: andb $1, %cl
@@ -58,7 +58,7 @@ define i1 * @test_store_i1(i1 %val, i1 * %p1) {
define i8 * @test_store_i8(i8 %val, i8 * %p1) {
; ALL-LABEL: test_store_i8:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movb 4(%esp), %cl
; ALL-NEXT: movl 8(%esp), %eax
; ALL-NEXT: movb %cl, (%eax)
@@ -69,7 +69,7 @@ define i8 * @test_store_i8(i8 %val, i8 * %p1) {
define i16 * @test_store_i16(i16 %val, i16 * %p1) {
; ALL-LABEL: test_store_i16:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movzwl 4(%esp), %ecx
; ALL-NEXT: movl 8(%esp), %eax
; ALL-NEXT: movw %cx, (%eax)
@@ -80,7 +80,7 @@ define i16 * @test_store_i16(i16 %val, i16 * %p1) {
define i32 * @test_store_i32(i32 %val, i32 * %p1) {
; ALL-LABEL: test_store_i32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movl 4(%esp), %ecx
; ALL-NEXT: movl 8(%esp), %eax
; ALL-NEXT: movl %ecx, (%eax)
@@ -91,7 +91,7 @@ define i32 * @test_store_i32(i32 %val, i32 * %p1) {
define i32* @test_load_ptr(i32** %ptr1) {
; ALL-LABEL: test_load_ptr:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movl 4(%esp), %eax
; ALL-NEXT: movl (%eax), %eax
; ALL-NEXT: retl
@@ -101,7 +101,7 @@ define i32* @test_load_ptr(i32** %ptr1) {
define void @test_store_ptr(i32** %ptr1, i32* %a) {
; ALL-LABEL: test_store_ptr:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movl 4(%esp), %eax
; ALL-NEXT: movl 8(%esp), %ecx
; ALL-NEXT: movl %ecx, (%eax)
diff --git a/test/CodeGen/X86/GlobalISel/memop-scalar.ll b/test/CodeGen/X86/GlobalISel/memop-scalar.ll
index 2097a3b0bfc..0355c395780 100644
--- a/test/CodeGen/X86/GlobalISel/memop-scalar.ll
+++ b/test/CodeGen/X86/GlobalISel/memop-scalar.ll
@@ -4,7 +4,7 @@
define i1 @test_load_i1(i1 * %p1) {
; ALL-LABEL: test_load_i1:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movb (%rdi), %al
; ALL-NEXT: retq
%r = load i1, i1* %p1
@@ -13,7 +13,7 @@ define i1 @test_load_i1(i1 * %p1) {
define i8 @test_load_i8(i8 * %p1) {
; ALL-LABEL: test_load_i8:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movb (%rdi), %al
; ALL-NEXT: retq
%r = load i8, i8* %p1
@@ -22,7 +22,7 @@ define i8 @test_load_i8(i8 * %p1) {
define i16 @test_load_i16(i16 * %p1) {
; ALL-LABEL: test_load_i16:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movzwl (%rdi), %eax
; ALL-NEXT: retq
%r = load i16, i16* %p1
@@ -31,7 +31,7 @@ define i16 @test_load_i16(i16 * %p1) {
define i32 @test_load_i32(i32 * %p1) {
; ALL-LABEL: test_load_i32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movl (%rdi), %eax
; ALL-NEXT: retq
%r = load i32, i32* %p1
@@ -40,7 +40,7 @@ define i32 @test_load_i32(i32 * %p1) {
define i64 @test_load_i64(i64 * %p1) {
; ALL-LABEL: test_load_i64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movq (%rdi), %rax
; ALL-NEXT: retq
%r = load i64, i64* %p1
@@ -49,13 +49,13 @@ define i64 @test_load_i64(i64 * %p1) {
define float @test_load_float(float * %p1) {
; SSE-LABEL: test_load_float:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movl (%rdi), %eax
; SSE-NEXT: movd %eax, %xmm0
; SSE-NEXT: retq
;
; ALL-LABEL: test_load_float:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movl (%rdi), %eax
; ALL-NEXT: movd %eax, %xmm0
; ALL-NEXT: retq
@@ -65,13 +65,13 @@ define float @test_load_float(float * %p1) {
define double @test_load_double(double * %p1) {
; SSE-LABEL: test_load_double:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movq (%rdi), %rax
; SSE-NEXT: movq %rax, %xmm0
; SSE-NEXT: retq
;
; ALL-LABEL: test_load_double:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movq (%rdi), %rax
; ALL-NEXT: movq %rax, %xmm0
; ALL-NEXT: retq
@@ -81,7 +81,7 @@ define double @test_load_double(double * %p1) {
define i1 * @test_store_i1(i1 %val, i1 * %p1) {
; ALL-LABEL: test_store_i1:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: andb $1, %dil
; ALL-NEXT: movb %dil, (%rsi)
; ALL-NEXT: movq %rsi, %rax
@@ -92,7 +92,7 @@ define i1 * @test_store_i1(i1 %val, i1 * %p1) {
define i32 * @test_store_i32(i32 %val, i32 * %p1) {
; ALL-LABEL: test_store_i32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movl %edi, (%rsi)
; ALL-NEXT: movq %rsi, %rax
; ALL-NEXT: retq
@@ -102,7 +102,7 @@ define i32 * @test_store_i32(i32 %val, i32 * %p1) {
define i64 * @test_store_i64(i64 %val, i64 * %p1) {
; ALL-LABEL: test_store_i64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movq %rdi, (%rsi)
; ALL-NEXT: movq %rsi, %rax
; ALL-NEXT: retq
@@ -113,14 +113,14 @@ define i64 * @test_store_i64(i64 %val, i64 * %p1) {
define float * @test_store_float(float %val, float * %p1) {
;
; SSE_FAST-LABEL: test_store_float:
-; SSE_FAST: # BB#0:
+; SSE_FAST: # %bb.0:
; SSE_FAST-NEXT: movd %xmm0, %eax
; SSE_FAST-NEXT: movl %eax, (%rdi)
; SSE_FAST-NEXT: movq %rdi, %rax
; SSE_FAST-NEXT: retq
;
; SSE_GREEDY-LABEL: test_store_float:
-; SSE_GREEDY: # BB#0:
+; SSE_GREEDY: # %bb.0:
; SSE_GREEDY-NEXT: movss %xmm0, (%rdi)
; SSE_GREEDY-NEXT: movq %rdi, %rax
; SSE_GREEDY-NEXT: retq
@@ -131,14 +131,14 @@ define float * @test_store_float(float %val, float * %p1) {
define double * @test_store_double(double %val, double * %p1) {
;
; SSE_FAST-LABEL: test_store_double:
-; SSE_FAST: # BB#0:
+; SSE_FAST: # %bb.0:
; SSE_FAST-NEXT: movq %xmm0, %rax
; SSE_FAST-NEXT: movq %rax, (%rdi)
; SSE_FAST-NEXT: movq %rdi, %rax
; SSE_FAST-NEXT: retq
;
; SSE_GREEDY-LABEL: test_store_double:
-; SSE_GREEDY: # BB#0:
+; SSE_GREEDY: # %bb.0:
; SSE_GREEDY-NEXT: movsd %xmm0, (%rdi)
; SSE_GREEDY-NEXT: movq %rdi, %rax
; SSE_GREEDY-NEXT: retq
@@ -148,7 +148,7 @@ define double * @test_store_double(double %val, double * %p1) {
define i32* @test_load_ptr(i32** %ptr1) {
; ALL-LABEL: test_load_ptr:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movq (%rdi), %rax
; ALL-NEXT: retq
%p = load i32*, i32** %ptr1
@@ -157,7 +157,7 @@ define i32* @test_load_ptr(i32** %ptr1) {
define void @test_store_ptr(i32** %ptr1, i32* %a) {
; ALL-LABEL: test_store_ptr:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movq %rsi, (%rdi)
; ALL-NEXT: retq
store i32* %a, i32** %ptr1
@@ -166,7 +166,7 @@ define void @test_store_ptr(i32** %ptr1, i32* %a) {
define i32 @test_gep_folding(i32* %arr, i32 %val) {
; ALL-LABEL: test_gep_folding:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movl %esi, 20(%rdi)
; ALL-NEXT: movl 20(%rdi), %eax
; ALL-NEXT: retq
@@ -179,7 +179,7 @@ define i32 @test_gep_folding(i32* %arr, i32 %val) {
; check that gep index doesn't folded into memory operand
define i32 @test_gep_folding_largeGepIndex(i32* %arr, i32 %val) {
; ALL-LABEL: test_gep_folding_largeGepIndex:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movabsq $228719476720, %rax # imm = 0x3540BE3FF0
; ALL-NEXT: leaq (%rdi,%rax), %rax
; ALL-NEXT: movl %esi, (%rax)
diff --git a/test/CodeGen/X86/GlobalISel/memop-vec.ll b/test/CodeGen/X86/GlobalISel/memop-vec.ll
index 870e812bbb6..a5fbd6c76a4 100644
--- a/test/CodeGen/X86/GlobalISel/memop-vec.ll
+++ b/test/CodeGen/X86/GlobalISel/memop-vec.ll
@@ -4,7 +4,7 @@
define <4 x i32> @test_load_v4i32_noalign(<4 x i32> * %p1) {
; SKX-LABEL: test_load_v4i32_noalign:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovups (%rdi), %xmm0
; SKX-NEXT: retq
%r = load <4 x i32>, <4 x i32>* %p1, align 1
@@ -13,7 +13,7 @@ define <4 x i32> @test_load_v4i32_noalign(<4 x i32> * %p1) {
define <4 x i32> @test_load_v4i32_align(<4 x i32> * %p1) {
; SKX-LABEL: test_load_v4i32_align:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps (%rdi), %xmm0
; SKX-NEXT: retq
%r = load <4 x i32>, <4 x i32>* %p1, align 16
@@ -22,7 +22,7 @@ define <4 x i32> @test_load_v4i32_align(<4 x i32> * %p1) {
define <8 x i32> @test_load_v8i32_noalign(<8 x i32> * %p1) {
; SKX-LABEL: test_load_v8i32_noalign:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovups (%rdi), %ymm0
; SKX-NEXT: retq
%r = load <8 x i32>, <8 x i32>* %p1, align 1
@@ -31,7 +31,7 @@ define <8 x i32> @test_load_v8i32_noalign(<8 x i32> * %p1) {
define <8 x i32> @test_load_v8i32_align(<8 x i32> * %p1) {
; SKX-LABEL: test_load_v8i32_align:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps (%rdi), %ymm0
; SKX-NEXT: retq
%r = load <8 x i32>, <8 x i32>* %p1, align 32
@@ -40,7 +40,7 @@ define <8 x i32> @test_load_v8i32_align(<8 x i32> * %p1) {
define <16 x i32> @test_load_v16i32_noalign(<16 x i32> * %p1) {
; SKX-LABEL: test_load_v16i32_noalign:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovups (%rdi), %zmm0
; SKX-NEXT: retq
%r = load <16 x i32>, <16 x i32>* %p1, align 1
@@ -49,7 +49,7 @@ define <16 x i32> @test_load_v16i32_noalign(<16 x i32> * %p1) {
define <16 x i32> @test_load_v16i32_align(<16 x i32> * %p1) {
; SKX-LABEL: test_load_v16i32_align:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovups (%rdi), %zmm0
; SKX-NEXT: retq
%r = load <16 x i32>, <16 x i32>* %p1, align 32
@@ -58,7 +58,7 @@ define <16 x i32> @test_load_v16i32_align(<16 x i32> * %p1) {
define void @test_store_v4i32_noalign(<4 x i32> %val, <4 x i32>* %p1) {
; SKX-LABEL: test_store_v4i32_noalign:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovups %xmm0, (%rdi)
; SKX-NEXT: retq
store <4 x i32> %val, <4 x i32>* %p1, align 1
@@ -67,7 +67,7 @@ define void @test_store_v4i32_noalign(<4 x i32> %val, <4 x i32>* %p1) {
define void @test_store_v4i32_align(<4 x i32> %val, <4 x i32>* %p1) {
; SKX-LABEL: test_store_v4i32_align:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps %xmm0, (%rdi)
; SKX-NEXT: retq
store <4 x i32> %val, <4 x i32>* %p1, align 16
@@ -76,7 +76,7 @@ define void @test_store_v4i32_align(<4 x i32> %val, <4 x i32>* %p1) {
define void @test_store_v8i32_noalign(<8 x i32> %val, <8 x i32>* %p1) {
; SKX-LABEL: test_store_v8i32_noalign:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovups %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -86,7 +86,7 @@ define void @test_store_v8i32_noalign(<8 x i32> %val, <8 x i32>* %p1) {
define void @test_store_v8i32_align(<8 x i32> %val, <8 x i32>* %p1) {
; SKX-LABEL: test_store_v8i32_align:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -96,7 +96,7 @@ define void @test_store_v8i32_align(<8 x i32> %val, <8 x i32>* %p1) {
define void @test_store_v16i32_noalign(<16 x i32> %val, <16 x i32>* %p1) {
; SKX-LABEL: test_store_v16i32_noalign:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovups %zmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -106,7 +106,7 @@ define void @test_store_v16i32_noalign(<16 x i32> %val, <16 x i32>* %p1) {
define void @test_store_v16i32_align(<16 x i32> %val, <16 x i32>* %p1) {
; SKX-LABEL: test_store_v16i32_align:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps %zmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
diff --git a/test/CodeGen/X86/GlobalISel/mul-scalar.ll b/test/CodeGen/X86/GlobalISel/mul-scalar.ll
index 450c3839797..5fd64c4bcce 100644
--- a/test/CodeGen/X86/GlobalISel/mul-scalar.ll
+++ b/test/CodeGen/X86/GlobalISel/mul-scalar.ll
@@ -9,7 +9,7 @@
define i16 @test_mul_i16(i16 %arg1, i16 %arg2) {
; X64-LABEL: test_mul_i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: imulw %di, %si
; X64-NEXT: movl %esi, %eax
; X64-NEXT: retq
@@ -19,7 +19,7 @@ define i16 @test_mul_i16(i16 %arg1, i16 %arg2) {
define i32 @test_mul_i32(i32 %arg1, i32 %arg2) {
; X64-LABEL: test_mul_i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: imull %edi, %esi
; X64-NEXT: movl %esi, %eax
; X64-NEXT: retq
@@ -29,7 +29,7 @@ define i32 @test_mul_i32(i32 %arg1, i32 %arg2) {
define i64 @test_mul_i64(i64 %arg1, i64 %arg2) {
; X64-LABEL: test_mul_i64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: imulq %rdi, %rsi
; X64-NEXT: movq %rsi, %rax
; X64-NEXT: retq
diff --git a/test/CodeGen/X86/GlobalISel/mul-vec.ll b/test/CodeGen/X86/GlobalISel/mul-vec.ll
index b2e211470f3..37e17453115 100644
--- a/test/CodeGen/X86/GlobalISel/mul-vec.ll
+++ b/test/CodeGen/X86/GlobalISel/mul-vec.ll
@@ -3,7 +3,7 @@
define <8 x i16> @test_mul_v8i16(<8 x i16> %arg1, <8 x i16> %arg2) {
; SKX-LABEL: test_mul_v8i16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; SKX-NEXT: retq
%ret = mul <8 x i16> %arg1, %arg2
@@ -12,7 +12,7 @@ define <8 x i16> @test_mul_v8i16(<8 x i16> %arg1, <8 x i16> %arg2) {
define <4 x i32> @test_mul_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) {
; SKX-LABEL: test_mul_v4i32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; SKX-NEXT: retq
%ret = mul <4 x i32> %arg1, %arg2
@@ -21,7 +21,7 @@ define <4 x i32> @test_mul_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) {
define <2 x i64> @test_mul_v2i64(<2 x i64> %arg1, <2 x i64> %arg2) {
; SKX-LABEL: test_mul_v2i64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmullq %xmm1, %xmm0, %xmm0
; SKX-NEXT: retq
%ret = mul <2 x i64> %arg1, %arg2
@@ -30,7 +30,7 @@ define <2 x i64> @test_mul_v2i64(<2 x i64> %arg1, <2 x i64> %arg2) {
define <16 x i16> @test_mul_v16i16(<16 x i16> %arg1, <16 x i16> %arg2) {
; SKX-LABEL: test_mul_v16i16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmullw %ymm1, %ymm0, %ymm0
; SKX-NEXT: retq
%ret = mul <16 x i16> %arg1, %arg2
@@ -39,7 +39,7 @@ define <16 x i16> @test_mul_v16i16(<16 x i16> %arg1, <16 x i16> %arg2) {
define <8 x i32> @test_mul_v8i32(<8 x i32> %arg1, <8 x i32> %arg2) {
; SKX-LABEL: test_mul_v8i32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmulld %ymm1, %ymm0, %ymm0
; SKX-NEXT: retq
%ret = mul <8 x i32> %arg1, %arg2
@@ -48,7 +48,7 @@ define <8 x i32> @test_mul_v8i32(<8 x i32> %arg1, <8 x i32> %arg2) {
define <4 x i64> @test_mul_v4i64(<4 x i64> %arg1, <4 x i64> %arg2) {
; SKX-LABEL: test_mul_v4i64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmullq %ymm1, %ymm0, %ymm0
; SKX-NEXT: retq
%ret = mul <4 x i64> %arg1, %arg2
@@ -57,7 +57,7 @@ define <4 x i64> @test_mul_v4i64(<4 x i64> %arg1, <4 x i64> %arg2) {
define <32 x i16> @test_mul_v32i16(<32 x i16> %arg1, <32 x i16> %arg2) {
; SKX-LABEL: test_mul_v32i16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmullw %zmm1, %zmm0, %zmm0
; SKX-NEXT: retq
%ret = mul <32 x i16> %arg1, %arg2
@@ -66,7 +66,7 @@ define <32 x i16> @test_mul_v32i16(<32 x i16> %arg1, <32 x i16> %arg2) {
define <16 x i32> @test_mul_v16i32(<16 x i32> %arg1, <16 x i32> %arg2) {
; SKX-LABEL: test_mul_v16i32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmulld %zmm1, %zmm0, %zmm0
; SKX-NEXT: retq
%ret = mul <16 x i32> %arg1, %arg2
@@ -75,7 +75,7 @@ define <16 x i32> @test_mul_v16i32(<16 x i32> %arg1, <16 x i32> %arg2) {
define <8 x i64> @test_mul_v8i64(<8 x i64> %arg1, <8 x i64> %arg2) {
; SKX-LABEL: test_mul_v8i64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmullq %zmm1, %zmm0, %zmm0
; SKX-NEXT: retq
%ret = mul <8 x i64> %arg1, %arg2
diff --git a/test/CodeGen/X86/GlobalISel/or-scalar.ll b/test/CodeGen/X86/GlobalISel/or-scalar.ll
index a4cfcfe8ce5..397deaaf906 100644
--- a/test/CodeGen/X86/GlobalISel/or-scalar.ll
+++ b/test/CodeGen/X86/GlobalISel/or-scalar.ll
@@ -3,7 +3,7 @@
define i32 @test_or_i1(i32 %arg1, i32 %arg2) {
; ALL-LABEL: test_or_i1:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: cmpl %esi, %edi
; ALL-NEXT: sete %al
; ALL-NEXT: orb %al, %al
@@ -18,7 +18,7 @@ define i32 @test_or_i1(i32 %arg1, i32 %arg2) {
define i8 @test_or_i8(i8 %arg1, i8 %arg2) {
; ALL-LABEL: test_or_i8:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: orb %dil, %sil
; ALL-NEXT: movl %esi, %eax
; ALL-NEXT: retq
@@ -28,7 +28,7 @@ define i8 @test_or_i8(i8 %arg1, i8 %arg2) {
define i16 @test_or_i16(i16 %arg1, i16 %arg2) {
; ALL-LABEL: test_or_i16:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: orw %di, %si
; ALL-NEXT: movl %esi, %eax
; ALL-NEXT: retq
@@ -38,7 +38,7 @@ define i16 @test_or_i16(i16 %arg1, i16 %arg2) {
define i32 @test_or_i32(i32 %arg1, i32 %arg2) {
; ALL-LABEL: test_or_i32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: orl %edi, %esi
; ALL-NEXT: movl %esi, %eax
; ALL-NEXT: retq
@@ -48,7 +48,7 @@ define i32 @test_or_i32(i32 %arg1, i32 %arg2) {
define i64 @test_or_i64(i64 %arg1, i64 %arg2) {
; ALL-LABEL: test_or_i64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: orq %rdi, %rsi
; ALL-NEXT: movq %rsi, %rax
; ALL-NEXT: retq
diff --git a/test/CodeGen/X86/GlobalISel/phi.ll b/test/CodeGen/X86/GlobalISel/phi.ll
index 71bd7fecc05..21570819884 100644
--- a/test/CodeGen/X86/GlobalISel/phi.ll
+++ b/test/CodeGen/X86/GlobalISel/phi.ll
@@ -3,13 +3,13 @@
define i8 @test_i8(i32 %a, i8 %f, i8 %t) {
; ALL-LABEL: test_i8:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: xorl %eax, %eax
; ALL-NEXT: cmpl %eax, %edi
; ALL-NEXT: setg %al
; ALL-NEXT: testb $1, %al
; ALL-NEXT: jne .LBB0_2
-; ALL-NEXT: # BB#1: # %cond.false
+; ALL-NEXT: # %bb.1: # %cond.false
; ALL-NEXT: movl %edx, %esi
; ALL-NEXT: .LBB0_2: # %cond.end
; ALL-NEXT: movl %esi, %eax
@@ -31,13 +31,13 @@ cond.end: ; preds = %cond.false, %cond.t
define i16 @test_i16(i32 %a, i16 %f, i16 %t) {
; ALL-LABEL: test_i16:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: xorl %eax, %eax
; ALL-NEXT: cmpl %eax, %edi
; ALL-NEXT: setg %al
; ALL-NEXT: testb $1, %al
; ALL-NEXT: jne .LBB1_2
-; ALL-NEXT: # BB#1: # %cond.false
+; ALL-NEXT: # %bb.1: # %cond.false
; ALL-NEXT: movl %edx, %esi
; ALL-NEXT: .LBB1_2: # %cond.end
; ALL-NEXT: movl %esi, %eax
@@ -59,13 +59,13 @@ cond.end: ; preds = %cond.false, %cond.t
define i32 @test_i32(i32 %a, i32 %f, i32 %t) {
; ALL-LABEL: test_i32:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: xorl %eax, %eax
; ALL-NEXT: cmpl %eax, %edi
; ALL-NEXT: setg %al
; ALL-NEXT: testb $1, %al
; ALL-NEXT: jne .LBB2_2
-; ALL-NEXT: # BB#1: # %cond.false
+; ALL-NEXT: # %bb.1: # %cond.false
; ALL-NEXT: movl %edx, %esi
; ALL-NEXT: .LBB2_2: # %cond.end
; ALL-NEXT: movl %esi, %eax
@@ -87,13 +87,13 @@ cond.end: ; preds = %cond.false, %cond.t
define i64 @test_i64(i32 %a, i64 %f, i64 %t) {
; ALL-LABEL: test_i64:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: xorl %eax, %eax
; ALL-NEXT: cmpl %eax, %edi
; ALL-NEXT: setg %al
; ALL-NEXT: testb $1, %al
; ALL-NEXT: jne .LBB3_2
-; ALL-NEXT: # BB#1: # %cond.false
+; ALL-NEXT: # %bb.1: # %cond.false
; ALL-NEXT: movq %rdx, %rsi
; ALL-NEXT: .LBB3_2: # %cond.end
; ALL-NEXT: movq %rsi, %rax
@@ -115,13 +115,13 @@ cond.end: ; preds = %cond.false, %cond.t
define float @test_float(i32 %a, float %f, float %t) {
; ALL-LABEL: test_float:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: xorl %eax, %eax
; ALL-NEXT: cmpl %eax, %edi
; ALL-NEXT: setg %al
; ALL-NEXT: testb $1, %al
; ALL-NEXT: jne .LBB4_2
-; ALL-NEXT: # BB#1: # %cond.false
+; ALL-NEXT: # %bb.1: # %cond.false
; ALL-NEXT: movaps %xmm1, %xmm0
; ALL-NEXT: .LBB4_2: # %cond.end
; ALL-NEXT: retq
@@ -142,13 +142,13 @@ cond.end: ; preds = %cond.false, %cond.t
define double @test_double(i32 %a, double %f, double %t) {
; ALL-LABEL: test_double:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: xorl %eax, %eax
; ALL-NEXT: cmpl %eax, %edi
; ALL-NEXT: setg %al
; ALL-NEXT: testb $1, %al
; ALL-NEXT: jne .LBB5_2
-; ALL-NEXT: # BB#1: # %cond.false
+; ALL-NEXT: # %bb.1: # %cond.false
; ALL-NEXT: movaps %xmm1, %xmm0
; ALL-NEXT: .LBB5_2: # %cond.end
; ALL-NEXT: retq
diff --git a/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir b/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir
index ad72d301ea3..d6c881c1219 100644
--- a/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir
+++ b/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir
@@ -1311,12 +1311,12 @@ registers:
- { id: 4, class: _, preferred-register: '' }
- { id: 5, class: _, preferred-register: '' }
# CHECK: bb.3.cond.end:
-# CHECK-NEXT: %5:gpr(s32) = G_PHI %1(s32), %bb.1.cond.true, %2(s32), %bb.2.cond.false
+# CHECK-NEXT: %5:gpr(s32) = G_PHI %1(s32), %bb.1, %2(s32), %bb.2
# CHECK-NEXT: %eax = COPY %5(s32)
# CHECK-NEXT: RET 0, implicit %eax
body: |
bb.0.entry:
- successors: %bb.1.cond.true(0x40000000), %bb.2.cond.false(0x40000000)
+ successors: %bb.1(0x40000000), %bb.2(0x40000000)
liveins: %edi, %edx, %esi
%0(s32) = COPY %edi
@@ -1324,19 +1324,19 @@ body: |
%2(s32) = COPY %edx
%3(s32) = G_CONSTANT i32 0
%4(s1) = G_ICMP intpred(sgt), %0(s32), %3
- G_BRCOND %4(s1), %bb.1.cond.true
- G_BR %bb.2.cond.false
+ G_BRCOND %4(s1), %bb.1
+ G_BR %bb.2
bb.1.cond.true:
- successors: %bb.3.cond.end(0x80000000)
+ successors: %bb.3(0x80000000)
- G_BR %bb.3.cond.end
+ G_BR %bb.3
bb.2.cond.false:
- successors: %bb.3.cond.end(0x80000000)
+ successors: %bb.3(0x80000000)
bb.3.cond.end:
- %5(s32) = G_PHI %1(s32), %bb.1.cond.true, %2(s32), %bb.2.cond.false
+ %5(s32) = G_PHI %1(s32), %bb.1, %2(s32), %bb.2
%eax = COPY %5(s32)
RET 0, implicit %eax
@@ -1363,12 +1363,12 @@ registers:
- { id: 4, class: _, preferred-register: '' }
- { id: 5, class: _, preferred-register: '' }
# CHECK: bb.3.cond.end:
-# CHECK-NEXT: %5:vecr(s32) = G_PHI %1(s32), %bb.1.cond.true, %2(s32), %bb.2.cond.false
+# CHECK-NEXT: %5:vecr(s32) = G_PHI %1(s32), %bb.1, %2(s32), %bb.2
# CHECK-NEXT: %xmm0 = COPY %5(s32)
# CHECK-NEXT: RET 0, implicit %xmm0
body: |
bb.0.entry:
- successors: %bb.1.cond.true(0x40000000), %bb.2.cond.false(0x40000000)
+ successors: %bb.1(0x40000000), %bb.2(0x40000000)
liveins: %edi, %xmm0, %xmm1
%0(s32) = COPY %edi
@@ -1376,19 +1376,19 @@ body: |
%2(s32) = COPY %xmm1
%3(s32) = G_CONSTANT i32 0
%4(s1) = G_ICMP intpred(sgt), %0(s32), %3
- G_BRCOND %4(s1), %bb.1.cond.true
- G_BR %bb.2.cond.false
+ G_BRCOND %4(s1), %bb.1
+ G_BR %bb.2
bb.1.cond.true:
- successors: %bb.3.cond.end(0x80000000)
+ successors: %bb.3(0x80000000)
- G_BR %bb.3.cond.end
+ G_BR %bb.3
bb.2.cond.false:
- successors: %bb.3.cond.end(0x80000000)
+ successors: %bb.3(0x80000000)
bb.3.cond.end:
- %5(s32) = G_PHI %1(s32), %bb.1.cond.true, %2(s32), %bb.2.cond.false
+ %5(s32) = G_PHI %1(s32), %bb.1, %2(s32), %bb.2
%xmm0 = COPY %5(s32)
RET 0, implicit %xmm0
diff --git a/test/CodeGen/X86/GlobalISel/select-br.mir b/test/CodeGen/X86/GlobalISel/select-br.mir
index 9d2a878e757..8d231cc26ba 100644
--- a/test/CodeGen/X86/GlobalISel/select-br.mir
+++ b/test/CodeGen/X86/GlobalISel/select-br.mir
@@ -20,20 +20,20 @@ name: uncondbr
alignment: 4
legalized: true
regBankSelected: true
-# CHECK: JMP_1 %bb.2.bb2
-# CHECK: JMP_1 %bb.1.end
+# CHECK: JMP_1 %bb.2
+# CHECK: JMP_1 %bb.1
body: |
bb.1.entry:
- successors: %bb.3.bb2(0x80000000)
+ successors: %bb.3(0x80000000)
- G_BR %bb.3.bb2
+ G_BR %bb.3
bb.2.end:
RET 0
bb.3.bb2:
- successors: %bb.2.end(0x80000000)
+ successors: %bb.2(0x80000000)
- G_BR %bb.2.end
+ G_BR %bb.2
...
diff --git a/test/CodeGen/X86/GlobalISel/select-brcond.mir b/test/CodeGen/X86/GlobalISel/select-brcond.mir
index 3d099a99df4..00a9cc044ea 100644
--- a/test/CodeGen/X86/GlobalISel/select-brcond.mir
+++ b/test/CodeGen/X86/GlobalISel/select-brcond.mir
@@ -33,27 +33,27 @@ registers:
# CHECK-NEXT: %3:gr32 = MOV32ri 1
# CHECK-NEXT: %1:gr8 = COPY %0.sub_8bit
# CHECK-NEXT: TEST8ri %1, 1, implicit-def %eflags
-# CHECK-NEXT: JNE_1 %[[TRUE:bb.[0-9].true]], implicit %eflags
-# CHECK-NEXT: JMP_1 %[[FALSE:bb.[0-9].false]]
-# CHECK: [[TRUE]]:
+# CHECK-NEXT: JNE_1 %[[TRUE:bb.[0-9]+]], implicit %eflags
+# CHECK-NEXT: JMP_1 %[[FALSE:bb.[0-9]+]]
+# CHECK: [[TRUE]].{{[a-zA-Z0-9]+}}:
# CHECK-NEXT: %eax = COPY %2
# CHECK-NEXT: RET 0, implicit %eax
-# CHECK: [[FALSE]]:
+# CHECK: [[FALSE]].{{[a-zA-Z0-9]+}}:
# CHECK-NEXT: %eax = COPY %3
# CHECK-NEXT: RET 0, implicit %eax
body: |
bb.1.entry:
- successors: %bb.2.true(0x40000000), %bb.3.false(0x40000000)
+ successors: %bb.2(0x40000000), %bb.3(0x40000000)
liveins: %edi
%0(s32) = COPY %edi
%2(s32) = G_CONSTANT i32 0
%3(s32) = G_CONSTANT i32 1
%1(s1) = G_TRUNC %0(s32)
- G_BRCOND %1(s1), %bb.2.true
- G_BR %bb.3.false
+ G_BRCOND %1(s1), %bb.2
+ G_BR %bb.3
bb.2.true:
%eax = COPY %2(s32)
diff --git a/test/CodeGen/X86/GlobalISel/select-phi.mir b/test/CodeGen/X86/GlobalISel/select-phi.mir
index f92ba0d71c2..7792d8c208d 100644
--- a/test/CodeGen/X86/GlobalISel/select-phi.mir
+++ b/test/CodeGen/X86/GlobalISel/select-phi.mir
@@ -121,12 +121,12 @@ registers:
- { id: 4, class: gpr, preferred-register: '' }
- { id: 5, class: gpr, preferred-register: '' }
# ALL-LABEL: bb.3.cond.end:
-# ALL: %5:gr8 = PHI %1, %bb.1.cond.true, %2, %bb.2.cond.false
+# ALL: %5:gr8 = PHI %1, %bb.1, %2, %bb.2
# ALL-NEXT: %al = COPY %5
# ALL-NEXT: RET 0, implicit %al
body: |
bb.1.entry:
- successors: %bb.2.cond.true(0x40000000), %bb.3.cond.false(0x40000000)
+ successors: %bb.2(0x40000000), %bb.3(0x40000000)
liveins: %edi, %edx, %esi
%0(s32) = COPY %edi
@@ -134,20 +134,20 @@ body: |
%2(s8) = COPY %edx
%3(s32) = G_CONSTANT i32 0
%4(s1) = G_ICMP intpred(sgt), %0(s32), %3
- G_BRCOND %4(s1), %bb.2.cond.true
- G_BR %bb.3.cond.false
+ G_BRCOND %4(s1), %bb.2
+ G_BR %bb.3
bb.2.cond.true:
- successors: %bb.4.cond.end(0x80000000)
+ successors: %bb.4(0x80000000)
- G_BR %bb.4.cond.end
+ G_BR %bb.4
bb.3.cond.false:
- successors: %bb.4.cond.end(0x80000000)
+ successors: %bb.4(0x80000000)
bb.4.cond.end:
- %5(s8) = G_PHI %1(s8), %bb.2.cond.true, %2(s8), %bb.3.cond.false
+ %5(s8) = G_PHI %1(s8), %bb.2, %2(s8), %bb.3
%al = COPY %5(s8)
RET 0, implicit %al
@@ -174,12 +174,12 @@ registers:
- { id: 4, class: gpr, preferred-register: '' }
- { id: 5, class: gpr, preferred-register: '' }
# ALL-LABEL: bb.3.cond.end:
-# ALL: %5:gr16 = PHI %1, %bb.1.cond.true, %2, %bb.2.cond.false
+# ALL: %5:gr16 = PHI %1, %bb.1, %2, %bb.2
# ALL-NEXT: %ax = COPY %5
# ALL-NEXT: RET 0, implicit %ax
body: |
bb.1.entry:
- successors: %bb.2.cond.true(0x40000000), %bb.3.cond.false(0x40000000)
+ successors: %bb.2(0x40000000), %bb.3(0x40000000)
liveins: %edi, %edx, %esi
%0(s32) = COPY %edi
@@ -187,20 +187,20 @@ body: |
%2(s16) = COPY %edx
%3(s32) = G_CONSTANT i32 0
%4(s1) = G_ICMP intpred(sgt), %0(s32), %3
- G_BRCOND %4(s1), %bb.2.cond.true
- G_BR %bb.3.cond.false
+ G_BRCOND %4(s1), %bb.2
+ G_BR %bb.3
bb.2.cond.true:
- successors: %bb.4.cond.end(0x80000000)
+ successors: %bb.4(0x80000000)
- G_BR %bb.4.cond.end
+ G_BR %bb.4
bb.3.cond.false:
- successors: %bb.4.cond.end(0x80000000)
+ successors: %bb.4(0x80000000)
bb.4.cond.end:
- %5(s16) = G_PHI %1(s16), %bb.2.cond.true, %2(s16), %bb.3.cond.false
+ %5(s16) = G_PHI %1(s16), %bb.2, %2(s16), %bb.3
%ax = COPY %5(s16)
RET 0, implicit %ax
@@ -227,12 +227,12 @@ registers:
- { id: 4, class: gpr, preferred-register: '' }
- { id: 5, class: gpr, preferred-register: '' }
# ALL-LABEL: bb.3.cond.end:
-# ALL: %5:gr32 = PHI %1, %bb.1.cond.true, %2, %bb.2.cond.false
+# ALL: %5:gr32 = PHI %1, %bb.1, %2, %bb.2
# ALL-NEXT: %eax = COPY %5
# ALL-NEXT: RET 0, implicit %eax
body: |
bb.1.entry:
- successors: %bb.2.cond.true(0x40000000), %bb.3.cond.false(0x40000000)
+ successors: %bb.2(0x40000000), %bb.3(0x40000000)
liveins: %edi, %edx, %esi
%0(s32) = COPY %edi
@@ -240,20 +240,20 @@ body: |
%2(s32) = COPY %edx
%3(s32) = G_CONSTANT i32 0
%4(s1) = G_ICMP intpred(sgt), %0(s32), %3
- G_BRCOND %4(s1), %bb.2.cond.true
- G_BR %bb.3.cond.false
+ G_BRCOND %4(s1), %bb.2
+ G_BR %bb.3
bb.2.cond.true:
- successors: %bb.4.cond.end(0x80000000)
+ successors: %bb.4(0x80000000)
- G_BR %bb.4.cond.end
+ G_BR %bb.4
bb.3.cond.false:
- successors: %bb.4.cond.end(0x80000000)
+ successors: %bb.4(0x80000000)
bb.4.cond.end:
- %5(s32) = G_PHI %1(s32), %bb.2.cond.true, %2(s32), %bb.3.cond.false
+ %5(s32) = G_PHI %1(s32), %bb.2, %2(s32), %bb.3
%eax = COPY %5(s32)
RET 0, implicit %eax
@@ -280,12 +280,12 @@ registers:
- { id: 4, class: gpr, preferred-register: '' }
- { id: 5, class: gpr, preferred-register: '' }
# ALL-LABEL: bb.3.cond.end:
-# ALL: %5:gr64 = PHI %1, %bb.1.cond.true, %2, %bb.2.cond.false
+# ALL: %5:gr64 = PHI %1, %bb.1, %2, %bb.2
# ALL-NEXT: %rax = COPY %5
# ALL-NEXT: RET 0, implicit %rax
body: |
bb.1.entry:
- successors: %bb.2.cond.true(0x40000000), %bb.3.cond.false(0x40000000)
+ successors: %bb.2(0x40000000), %bb.3(0x40000000)
liveins: %edi, %rdx, %rsi
%0(s32) = COPY %edi
@@ -293,20 +293,20 @@ body: |
%2(s64) = COPY %rdx
%3(s32) = G_CONSTANT i32 0
%4(s1) = G_ICMP intpred(sgt), %0(s32), %3
- G_BRCOND %4(s1), %bb.2.cond.true
- G_BR %bb.3.cond.false
+ G_BRCOND %4(s1), %bb.2
+ G_BR %bb.3
bb.2.cond.true:
- successors: %bb.4.cond.end(0x80000000)
+ successors: %bb.4(0x80000000)
- G_BR %bb.4.cond.end
+ G_BR %bb.4
bb.3.cond.false:
- successors: %bb.4.cond.end(0x80000000)
+ successors: %bb.4(0x80000000)
bb.4.cond.end:
- %5(s64) = G_PHI %1(s64), %bb.2.cond.true, %2(s64), %bb.3.cond.false
+ %5(s64) = G_PHI %1(s64), %bb.2, %2(s64), %bb.3
%rax = COPY %5(s64)
RET 0, implicit %rax
@@ -337,12 +337,12 @@ fixedStack:
stack:
constants:
# ALL-LABEL: bb.3.cond.end:
-# ALL: %5:fr32 = PHI %1, %bb.1.cond.true, %2, %bb.2.cond.false
+# ALL: %5:fr32 = PHI %1, %bb.1, %2, %bb.2
# ALL-NEXT: %xmm0 = COPY %5
# ALL-NEXT: RET 0, implicit %xmm0
body: |
bb.1.entry:
- successors: %bb.2.cond.true(0x40000000), %bb.3.cond.false(0x40000000)
+ successors: %bb.2(0x40000000), %bb.3(0x40000000)
liveins: %edi, %xmm0, %xmm1
%0(s32) = COPY %edi
@@ -350,20 +350,20 @@ body: |
%2(s32) = COPY %xmm1
%3(s32) = G_CONSTANT i32 0
%4(s1) = G_ICMP intpred(sgt), %0(s32), %3
- G_BRCOND %4(s1), %bb.2.cond.true
- G_BR %bb.3.cond.false
+ G_BRCOND %4(s1), %bb.2
+ G_BR %bb.3
bb.2.cond.true:
- successors: %bb.4.cond.end(0x80000000)
+ successors: %bb.4(0x80000000)
- G_BR %bb.4.cond.end
+ G_BR %bb.4
bb.3.cond.false:
- successors: %bb.4.cond.end(0x80000000)
+ successors: %bb.4(0x80000000)
bb.4.cond.end:
- %5(s32) = G_PHI %1(s32), %bb.2.cond.true, %2(s32), %bb.3.cond.false
+ %5(s32) = G_PHI %1(s32), %bb.2, %2(s32), %bb.3
%xmm0 = COPY %5(s32)
RET 0, implicit %xmm0
@@ -390,12 +390,12 @@ registers:
- { id: 4, class: gpr, preferred-register: '' }
- { id: 5, class: vecr, preferred-register: '' }
# ALL-LABEL: bb.3.cond.end:
-# ALL: %5:fr64 = PHI %1, %bb.1.cond.true, %2, %bb.2.cond.false
+# ALL: %5:fr64 = PHI %1, %bb.1, %2, %bb.2
# ALL-NEXT: %xmm0 = COPY %5
# ALL-NEXT: RET 0, implicit %xmm0
body: |
bb.1.entry:
- successors: %bb.2.cond.true(0x40000000), %bb.3.cond.false(0x40000000)
+ successors: %bb.2(0x40000000), %bb.3(0x40000000)
liveins: %edi, %xmm0, %xmm1
%0(s32) = COPY %edi
@@ -403,20 +403,20 @@ body: |
%2(s64) = COPY %xmm1
%3(s32) = G_CONSTANT i32 0
%4(s1) = G_ICMP intpred(sgt), %0(s32), %3
- G_BRCOND %4(s1), %bb.2.cond.true
- G_BR %bb.3.cond.false
+ G_BRCOND %4(s1), %bb.2
+ G_BR %bb.3
bb.2.cond.true:
- successors: %bb.4.cond.end(0x80000000)
+ successors: %bb.4(0x80000000)
- G_BR %bb.4.cond.end
+ G_BR %bb.4
bb.3.cond.false:
- successors: %bb.4.cond.end(0x80000000)
+ successors: %bb.4(0x80000000)
bb.4.cond.end:
- %5(s64) = G_PHI %1(s64), %bb.2.cond.true, %2(s64), %bb.3.cond.false
+ %5(s64) = G_PHI %1(s64), %bb.2, %2(s64), %bb.3
%xmm0 = COPY %5(s64)
RET 0, implicit %xmm0
diff --git a/test/CodeGen/X86/GlobalISel/sub-scalar.ll b/test/CodeGen/X86/GlobalISel/sub-scalar.ll
index ab1e39399b7..f8d825dff38 100644
--- a/test/CodeGen/X86/GlobalISel/sub-scalar.ll
+++ b/test/CodeGen/X86/GlobalISel/sub-scalar.ll
@@ -3,7 +3,7 @@
define i64 @test_sub_i64(i64 %arg1, i64 %arg2) {
; X64-LABEL: test_sub_i64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: subq %rsi, %rdi
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: retq
@@ -13,7 +13,7 @@ define i64 @test_sub_i64(i64 %arg1, i64 %arg2) {
define i32 @test_sub_i32(i32 %arg1, i32 %arg2) {
; X64-LABEL: test_sub_i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: subl %esi, %edi
; X64-NEXT: movl %edi, %eax
; X64-NEXT: retq
@@ -23,7 +23,7 @@ define i32 @test_sub_i32(i32 %arg1, i32 %arg2) {
define i16 @test_sub_i16(i16 %arg1, i16 %arg2) {
; X64-LABEL: test_sub_i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: subw %si, %di
; X64-NEXT: movl %edi, %eax
; X64-NEXT: retq
@@ -33,7 +33,7 @@ define i16 @test_sub_i16(i16 %arg1, i16 %arg2) {
define i8 @test_sub_i8(i8 %arg1, i8 %arg2) {
; X64-LABEL: test_sub_i8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: subb %sil, %dil
; X64-NEXT: movl %edi, %eax
; X64-NEXT: retq
@@ -43,7 +43,7 @@ define i8 @test_sub_i8(i8 %arg1, i8 %arg2) {
define i32 @test_sub_i1(i32 %arg1, i32 %arg2) {
; X64-LABEL: test_sub_i1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: subb %sil, %dil
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: andl $1, %eax
diff --git a/test/CodeGen/X86/GlobalISel/sub-vec.ll b/test/CodeGen/X86/GlobalISel/sub-vec.ll
index 9caf18f0c0c..8186026836f 100644
--- a/test/CodeGen/X86/GlobalISel/sub-vec.ll
+++ b/test/CodeGen/X86/GlobalISel/sub-vec.ll
@@ -3,7 +3,7 @@
define <16 x i8> @test_sub_v16i8(<16 x i8> %arg1, <16 x i8> %arg2) {
; SKX-LABEL: test_sub_v16i8:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsubb %xmm1, %xmm0, %xmm0
; SKX-NEXT: retq
%ret = sub <16 x i8> %arg1, %arg2
@@ -12,7 +12,7 @@ define <16 x i8> @test_sub_v16i8(<16 x i8> %arg1, <16 x i8> %arg2) {
define <8 x i16> @test_sub_v8i16(<8 x i16> %arg1, <8 x i16> %arg2) {
; SKX-LABEL: test_sub_v8i16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsubw %xmm1, %xmm0, %xmm0
; SKX-NEXT: retq
%ret = sub <8 x i16> %arg1, %arg2
@@ -21,7 +21,7 @@ define <8 x i16> @test_sub_v8i16(<8 x i16> %arg1, <8 x i16> %arg2) {
define <4 x i32> @test_sub_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) {
; SKX-LABEL: test_sub_v4i32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsubd %xmm1, %xmm0, %xmm0
; SKX-NEXT: retq
%ret = sub <4 x i32> %arg1, %arg2
@@ -30,7 +30,7 @@ define <4 x i32> @test_sub_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) {
define <2 x i64> @test_sub_v2i64(<2 x i64> %arg1, <2 x i64> %arg2) {
; SKX-LABEL: test_sub_v2i64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsubq %xmm1, %xmm0, %xmm0
; SKX-NEXT: retq
%ret = sub <2 x i64> %arg1, %arg2
@@ -39,7 +39,7 @@ define <2 x i64> @test_sub_v2i64(<2 x i64> %arg1, <2 x i64> %arg2) {
define <32 x i8> @test_sub_v32i8(<32 x i8> %arg1, <32 x i8> %arg2) {
; SKX-LABEL: test_sub_v32i8:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsubb %ymm1, %ymm0, %ymm0
; SKX-NEXT: retq
%ret = sub <32 x i8> %arg1, %arg2
@@ -48,7 +48,7 @@ define <32 x i8> @test_sub_v32i8(<32 x i8> %arg1, <32 x i8> %arg2) {
define <16 x i16> @test_sub_v16i16(<16 x i16> %arg1, <16 x i16> %arg2) {
; SKX-LABEL: test_sub_v16i16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsubw %ymm1, %ymm0, %ymm0
; SKX-NEXT: retq
%ret = sub <16 x i16> %arg1, %arg2
@@ -57,7 +57,7 @@ define <16 x i16> @test_sub_v16i16(<16 x i16> %arg1, <16 x i16> %arg2) {
define <8 x i32> @test_sub_v8i32(<8 x i32> %arg1, <8 x i32> %arg2) {
; SKX-LABEL: test_sub_v8i32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsubd %ymm1, %ymm0, %ymm0
; SKX-NEXT: retq
%ret = sub <8 x i32> %arg1, %arg2
@@ -66,7 +66,7 @@ define <8 x i32> @test_sub_v8i32(<8 x i32> %arg1, <8 x i32> %arg2) {
define <4 x i64> @test_sub_v4i64(<4 x i64> %arg1, <4 x i64> %arg2) {
; SKX-LABEL: test_sub_v4i64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsubq %ymm1, %ymm0, %ymm0
; SKX-NEXT: retq
%ret = sub <4 x i64> %arg1, %arg2
@@ -75,7 +75,7 @@ define <4 x i64> @test_sub_v4i64(<4 x i64> %arg1, <4 x i64> %arg2) {
define <64 x i8> @test_sub_v64i8(<64 x i8> %arg1, <64 x i8> %arg2) {
; SKX-LABEL: test_sub_v64i8:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsubb %zmm1, %zmm0, %zmm0
; SKX-NEXT: retq
%ret = sub <64 x i8> %arg1, %arg2
@@ -84,7 +84,7 @@ define <64 x i8> @test_sub_v64i8(<64 x i8> %arg1, <64 x i8> %arg2) {
define <32 x i16> @test_sub_v32i16(<32 x i16> %arg1, <32 x i16> %arg2) {
; SKX-LABEL: test_sub_v32i16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsubw %zmm1, %zmm0, %zmm0
; SKX-NEXT: retq
%ret = sub <32 x i16> %arg1, %arg2
@@ -93,7 +93,7 @@ define <32 x i16> @test_sub_v32i16(<32 x i16> %arg1, <32 x i16> %arg2) {
define <16 x i32> @test_sub_v16i32(<16 x i32> %arg1, <16 x i32> %arg2) {
; SKX-LABEL: test_sub_v16i32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsubd %zmm1, %zmm0, %zmm0
; SKX-NEXT: retq
%ret = sub <16 x i32> %arg1, %arg2
@@ -102,7 +102,7 @@ define <16 x i32> @test_sub_v16i32(<16 x i32> %arg1, <16 x i32> %arg2) {
define <8 x i64> @test_sub_v8i64(<8 x i64> %arg1, <8 x i64> %arg2) {
; SKX-LABEL: test_sub_v8i64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsubq %zmm1, %zmm0, %zmm0
; SKX-NEXT: retq
%ret = sub <8 x i64> %arg1, %arg2
diff --git a/test/CodeGen/X86/GlobalISel/trunc.ll b/test/CodeGen/X86/GlobalISel/trunc.ll
index 6c0f01673af..6c4729f3021 100644
--- a/test/CodeGen/X86/GlobalISel/trunc.ll
+++ b/test/CodeGen/X86/GlobalISel/trunc.ll
@@ -3,7 +3,7 @@
define i1 @trunc_i32toi1(i32 %a) {
; CHECK-LABEL: trunc_i32toi1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
%r = trunc i32 %a to i1
@@ -12,7 +12,7 @@ define i1 @trunc_i32toi1(i32 %a) {
define i8 @trunc_i32toi8(i32 %a) {
; CHECK-LABEL: trunc_i32toi8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
%r = trunc i32 %a to i8
@@ -21,7 +21,7 @@ define i8 @trunc_i32toi8(i32 %a) {
define i16 @trunc_i32toi16(i32 %a) {
; CHECK-LABEL: trunc_i32toi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
%r = trunc i32 %a to i16
@@ -30,7 +30,7 @@ define i16 @trunc_i32toi16(i32 %a) {
define i8 @trunc_i64toi8(i64 %a) {
; CHECK-LABEL: trunc_i64toi8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
%r = trunc i64 %a to i8
@@ -39,7 +39,7 @@ define i8 @trunc_i64toi8(i64 %a) {
define i16 @trunc_i64toi16(i64 %a) {
; CHECK-LABEL: trunc_i64toi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
%r = trunc i64 %a to i16
@@ -48,7 +48,7 @@ define i16 @trunc_i64toi16(i64 %a) {
define i32 @trunc_i64toi32(i64 %a) {
; CHECK-LABEL: trunc_i64toi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
%r = trunc i64 %a to i32
diff --git a/test/CodeGen/X86/GlobalISel/undef.ll b/test/CodeGen/X86/GlobalISel/undef.ll
index cd82766be97..6edd0bfed50 100644
--- a/test/CodeGen/X86/GlobalISel/undef.ll
+++ b/test/CodeGen/X86/GlobalISel/undef.ll
@@ -3,14 +3,14 @@
define i8 @test() {
; ALL-LABEL: test:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: retq
ret i8 undef
}
define i8 @test2(i8 %a) {
; ALL-LABEL: test2:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: addb %al, %dil
; ALL-NEXT: movl %edi, %eax
; ALL-NEXT: retq
@@ -21,14 +21,14 @@ define i8 @test2(i8 %a) {
define float @test3() {
; ALL-LABEL: test3:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: retq
ret float undef
}
define float @test4(float %a) {
; ALL-LABEL: test4:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: addss %xmm0, %xmm0
; ALL-NEXT: retq
%r = fadd float %a, undef
diff --git a/test/CodeGen/X86/GlobalISel/xor-scalar.ll b/test/CodeGen/X86/GlobalISel/xor-scalar.ll
index 177ace600fc..069edaadee9 100644
--- a/test/CodeGen/X86/GlobalISel/xor-scalar.ll
+++ b/test/CodeGen/X86/GlobalISel/xor-scalar.ll
@@ -3,7 +3,7 @@
define i32 @test_xor_i1(i32 %arg1, i32 %arg2) {
; ALL-LABEL: test_xor_i1:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: cmpl %esi, %edi
; ALL-NEXT: sete %al
; ALL-NEXT: xorb %al, %al
@@ -18,7 +18,7 @@ define i32 @test_xor_i1(i32 %arg1, i32 %arg2) {
define i8 @test_xor_i8(i8 %arg1, i8 %arg2) {
; ALL-LABEL: test_xor_i8:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: xorb %dil, %sil
; ALL-NEXT: movl %esi, %eax
; ALL-NEXT: retq
@@ -28,7 +28,7 @@ define i8 @test_xor_i8(i8 %arg1, i8 %arg2) {
define i16 @test_xor_i16(i16 %arg1, i16 %arg2) {
; ALL-LABEL: test_xor_i16:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: xorw %di, %si
; ALL-NEXT: movl %esi, %eax
; ALL-NEXT: retq
@@ -38,7 +38,7 @@ define i16 @test_xor_i16(i16 %arg1, i16 %arg2) {
define i32 @test_xor_i32(i32 %arg1, i32 %arg2) {
; ALL-LABEL: test_xor_i32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: xorl %edi, %esi
; ALL-NEXT: movl %esi, %eax
; ALL-NEXT: retq
@@ -48,7 +48,7 @@ define i32 @test_xor_i32(i32 %arg1, i32 %arg2) {
define i64 @test_xor_i64(i64 %arg1, i64 %arg2) {
; ALL-LABEL: test_xor_i64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: xorq %rdi, %rsi
; ALL-NEXT: movq %rsi, %rax
; ALL-NEXT: retq
diff --git a/test/CodeGen/X86/MachineBranchProb.ll b/test/CodeGen/X86/MachineBranchProb.ll
index ee1c658d4c5..e6a56651837 100644
--- a/test/CodeGen/X86/MachineBranchProb.ll
+++ b/test/CodeGen/X86/MachineBranchProb.ll
@@ -17,10 +17,10 @@ for.cond2: ; preds = %for.inc, %for.cond
%cmp4 = icmp eq i32 %i.1, %v3
%or.cond = or i1 %tobool, %cmp4
br i1 %or.cond, label %for.inc20, label %for.inc, !prof !0
-; CHECK: BB#1: derived from LLVM BB %for.cond2
-; CHECK: Successors according to CFG: BB#3({{[0-9a-fx/= ]+}}1.53%) BB#4({{[0-9a-fx/= ]+}}98.47%)
-; CHECK: BB#4: derived from LLVM BB %for.cond2
-; CHECK: Successors according to CFG: BB#3({{[0-9a-fx/= ]+}}1.55%) BB#2({{[0-9a-fx/= ]+}}98.45%)
+; CHECK: %bb.1: derived from LLVM BB %for.cond2
+; CHECK: Successors according to CFG: %bb.3({{[0-9a-fx/= ]+}}1.53%) %bb.4({{[0-9a-fx/= ]+}}98.47%)
+; CHECK: %bb.4: derived from LLVM BB %for.cond2
+; CHECK: Successors according to CFG: %bb.3({{[0-9a-fx/= ]+}}1.55%) %bb.2({{[0-9a-fx/= ]+}}98.45%)
for.inc: ; preds = %for.cond2
%shl = shl i32 %bit.0, 1
diff --git a/test/CodeGen/X86/MergeConsecutiveStores.ll b/test/CodeGen/X86/MergeConsecutiveStores.ll
index 5058f1f5ec9..fd4e9891bac 100644
--- a/test/CodeGen/X86/MergeConsecutiveStores.ll
+++ b/test/CodeGen/X86/MergeConsecutiveStores.ll
@@ -8,10 +8,10 @@
; save 1,2,3 ... as one big integer.
define void @merge_const_store(i32 %count, %struct.A* nocapture %p) nounwind uwtable noinline ssp {
; CHECK-LABEL: merge_const_store:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: jle .LBB0_3
-; CHECK-NEXT: # BB#1: # %.lr.ph.preheader
+; CHECK-NEXT: # %bb.1: # %.lr.ph.preheader
; CHECK-NEXT: movabsq $578437695752307201, %rax # imm = 0x807060504030201
; CHECK-NEXT: .p2align 4, 0x90
; CHECK-NEXT: .LBB0_2: # %.lr.ph
@@ -54,7 +54,7 @@ define void @merge_const_store(i32 %count, %struct.A* nocapture %p) nounwind uwt
; No vectors because we use noimplicitfloat
define void @merge_const_store_no_vec(i32 %count, %struct.B* nocapture %p) noimplicitfloat{
; CHECK-LABEL: merge_const_store_no_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: jle .LBB1_2
; CHECK-NEXT: .p2align 4, 0x90
@@ -101,10 +101,10 @@ define void @merge_const_store_no_vec(i32 %count, %struct.B* nocapture %p) noimp
; Move the constants using a single vector store.
define void @merge_const_store_vec(i32 %count, %struct.B* nocapture %p) nounwind uwtable noinline ssp {
; CHECK-LABEL: merge_const_store_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: jle .LBB2_3
-; CHECK-NEXT: # BB#1: # %.lr.ph.preheader
+; CHECK-NEXT: # %bb.1: # %.lr.ph.preheader
; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0
; CHECK-NEXT: .p2align 4, 0x90
; CHECK-NEXT: .LBB2_2: # %.lr.ph
@@ -148,7 +148,7 @@ define void @merge_const_store_vec(i32 %count, %struct.B* nocapture %p) nounwind
; Move the first 4 constants as a single vector. Move the rest as scalars.
define void @merge_nonconst_store(i32 %count, i8 %zz, %struct.A* nocapture %p) nounwind uwtable noinline ssp {
; CHECK-LABEL: merge_nonconst_store:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: jle .LBB3_2
; CHECK-NEXT: .p2align 4, 0x90
@@ -194,7 +194,7 @@ define void @merge_nonconst_store(i32 %count, i8 %zz, %struct.A* nocapture %p) n
define void @merge_loads_i16(i32 %count, %struct.A* noalias nocapture %q, %struct.A* noalias nocapture %p) nounwind uwtable noinline ssp {
; BWON-LABEL: merge_loads_i16:
-; BWON: # BB#0:
+; BWON: # %bb.0:
; BWON-NEXT: testl %edi, %edi
; BWON-NEXT: jle .LBB4_2
; BWON-NEXT: .p2align 4, 0x90
@@ -208,7 +208,7 @@ define void @merge_loads_i16(i32 %count, %struct.A* noalias nocapture %q, %struc
; BWON-NEXT: retq
;
; BWOFF-LABEL: merge_loads_i16:
-; BWOFF: # BB#0:
+; BWOFF: # %bb.0:
; BWOFF-NEXT: testl %edi, %edi
; BWOFF-NEXT: jle .LBB4_2
; BWOFF-NEXT: .p2align 4, 0x90
@@ -249,7 +249,7 @@ define void @merge_loads_i16(i32 %count, %struct.A* noalias nocapture %q, %struc
; The loads and the stores are interleaved. Can't merge them.
define void @no_merge_loads(i32 %count, %struct.A* noalias nocapture %q, %struct.A* noalias nocapture %p) nounwind uwtable noinline ssp {
; BWON-LABEL: no_merge_loads:
-; BWON: # BB#0:
+; BWON: # %bb.0:
; BWON-NEXT: testl %edi, %edi
; BWON-NEXT: jle .LBB5_2
; BWON-NEXT: .p2align 4, 0x90
@@ -266,7 +266,7 @@ define void @no_merge_loads(i32 %count, %struct.A* noalias nocapture %q, %struct
; BWON-NEXT: retq
;
; BWOFF-LABEL: no_merge_loads:
-; BWOFF: # BB#0:
+; BWOFF: # %bb.0:
; BWOFF-NEXT: testl %edi, %edi
; BWOFF-NEXT: jle .LBB5_2
; BWOFF-NEXT: .p2align 4, 0x90
@@ -309,7 +309,7 @@ a4: ; preds = %4, %.lr.ph
define void @merge_loads_integer(i32 %count, %struct.B* noalias nocapture %q, %struct.B* noalias nocapture %p) nounwind uwtable noinline ssp {
; CHECK-LABEL: merge_loads_integer:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: jle .LBB6_2
; CHECK-NEXT: .p2align 4, 0x90
@@ -349,7 +349,7 @@ define void @merge_loads_integer(i32 %count, %struct.B* noalias nocapture %q, %s
define void @merge_loads_vector(i32 %count, %struct.B* noalias nocapture %q, %struct.B* noalias nocapture %p) nounwind uwtable noinline ssp {
; CHECK-LABEL: merge_loads_vector:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: jle .LBB7_2
; CHECK-NEXT: .p2align 4, 0x90
@@ -399,7 +399,7 @@ block4: ; preds = %4, %.lr.ph
; On x86, even unaligned copies can be merged to vector ops.
define void @merge_loads_no_align(i32 %count, %struct.B* noalias nocapture %q, %struct.B* noalias nocapture %p) nounwind uwtable noinline ssp {
; CHECK-LABEL: merge_loads_no_align:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: jle .LBB8_2
; CHECK-NEXT: .p2align 4, 0x90
@@ -450,7 +450,7 @@ block4: ; preds = %4, %.lr.ph
; word (16 bit) instead of a byte copy.
define void @MergeLoadStoreBaseIndexOffset(i64* %a, i8* %b, i8* %c, i32 %n) {
; BWON-LABEL: MergeLoadStoreBaseIndexOffset:
-; BWON: # BB#0:
+; BWON: # %bb.0:
; BWON-NEXT: movl %ecx, %r8d
; BWON-NEXT: xorl %ecx, %ecx
; BWON-NEXT: .p2align 4, 0x90
@@ -461,11 +461,11 @@ define void @MergeLoadStoreBaseIndexOffset(i64* %a, i8* %b, i8* %c, i32 %n) {
; BWON-NEXT: incq %rcx
; BWON-NEXT: cmpl %ecx, %r8d
; BWON-NEXT: jne .LBB9_1
-; BWON-NEXT: # BB#2:
+; BWON-NEXT: # %bb.2:
; BWON-NEXT: retq
;
; BWOFF-LABEL: MergeLoadStoreBaseIndexOffset:
-; BWOFF: # BB#0:
+; BWOFF: # %bb.0:
; BWOFF-NEXT: movl %ecx, %r8d
; BWOFF-NEXT: xorl %ecx, %ecx
; BWOFF-NEXT: .p2align 4, 0x90
@@ -476,7 +476,7 @@ define void @MergeLoadStoreBaseIndexOffset(i64* %a, i8* %b, i8* %c, i32 %n) {
; BWOFF-NEXT: incq %rcx
; BWOFF-NEXT: cmpl %ecx, %r8d
; BWOFF-NEXT: jne .LBB9_1
-; BWOFF-NEXT: # BB#2:
+; BWOFF-NEXT: # %bb.2:
; BWOFF-NEXT: retq
br label %1
@@ -507,7 +507,7 @@ define void @MergeLoadStoreBaseIndexOffset(i64* %a, i8* %b, i8* %c, i32 %n) {
; word (16 bit) instead of a byte copy for complicated address calculation.
define void @MergeLoadStoreBaseIndexOffsetComplicated(i8* %a, i8* %b, i8* %c, i64 %n) {
; BWON-LABEL: MergeLoadStoreBaseIndexOffsetComplicated:
-; BWON: # BB#0:
+; BWON: # %bb.0:
; BWON-NEXT: xorl %r8d, %r8d
; BWON-NEXT: .p2align 4, 0x90
; BWON-NEXT: .LBB10_1: # =>This Inner Loop Header: Depth=1
@@ -518,11 +518,11 @@ define void @MergeLoadStoreBaseIndexOffsetComplicated(i8* %a, i8* %b, i8* %c, i6
; BWON-NEXT: addq $2, %r8
; BWON-NEXT: cmpq %rcx, %r8
; BWON-NEXT: jl .LBB10_1
-; BWON-NEXT: # BB#2:
+; BWON-NEXT: # %bb.2:
; BWON-NEXT: retq
;
; BWOFF-LABEL: MergeLoadStoreBaseIndexOffsetComplicated:
-; BWOFF: # BB#0:
+; BWOFF: # %bb.0:
; BWOFF-NEXT: xorl %r8d, %r8d
; BWOFF-NEXT: .p2align 4, 0x90
; BWOFF-NEXT: .LBB10_1: # =>This Inner Loop Header: Depth=1
@@ -533,7 +533,7 @@ define void @MergeLoadStoreBaseIndexOffsetComplicated(i8* %a, i8* %b, i8* %c, i6
; BWOFF-NEXT: addq $2, %r8
; BWOFF-NEXT: cmpq %rcx, %r8
; BWOFF-NEXT: jl .LBB10_1
-; BWOFF-NEXT: # BB#2:
+; BWOFF-NEXT: # %bb.2:
; BWOFF-NEXT: retq
br label %1
@@ -566,7 +566,7 @@ define void @MergeLoadStoreBaseIndexOffsetComplicated(i8* %a, i8* %b, i8* %c, i6
; extensions.
define void @MergeLoadStoreBaseIndexOffsetSext(i8* %a, i8* %b, i8* %c, i32 %n) {
; BWON-LABEL: MergeLoadStoreBaseIndexOffsetSext:
-; BWON: # BB#0:
+; BWON: # %bb.0:
; BWON-NEXT: movl %ecx, %r8d
; BWON-NEXT: xorl %ecx, %ecx
; BWON-NEXT: .p2align 4, 0x90
@@ -577,11 +577,11 @@ define void @MergeLoadStoreBaseIndexOffsetSext(i8* %a, i8* %b, i8* %c, i32 %n) {
; BWON-NEXT: incq %rcx
; BWON-NEXT: cmpl %ecx, %r8d
; BWON-NEXT: jne .LBB11_1
-; BWON-NEXT: # BB#2:
+; BWON-NEXT: # %bb.2:
; BWON-NEXT: retq
;
; BWOFF-LABEL: MergeLoadStoreBaseIndexOffsetSext:
-; BWOFF: # BB#0:
+; BWOFF: # %bb.0:
; BWOFF-NEXT: movl %ecx, %r8d
; BWOFF-NEXT: xorl %ecx, %ecx
; BWOFF-NEXT: .p2align 4, 0x90
@@ -592,7 +592,7 @@ define void @MergeLoadStoreBaseIndexOffsetSext(i8* %a, i8* %b, i8* %c, i32 %n) {
; BWOFF-NEXT: incq %rcx
; BWOFF-NEXT: cmpl %ecx, %r8d
; BWOFF-NEXT: jne .LBB11_1
-; BWOFF-NEXT: # BB#2:
+; BWOFF-NEXT: # %bb.2:
; BWOFF-NEXT: retq
br label %1
@@ -624,7 +624,7 @@ define void @MergeLoadStoreBaseIndexOffsetSext(i8* %a, i8* %b, i8* %c, i32 %n) {
; computations;
define void @loadStoreBaseIndexOffsetSextNoSex(i8* %a, i8* %b, i8* %c, i32 %n) {
; BWON-LABEL: loadStoreBaseIndexOffsetSextNoSex:
-; BWON: # BB#0:
+; BWON: # %bb.0:
; BWON-NEXT: movl %ecx, %r8d
; BWON-NEXT: xorl %ecx, %ecx
; BWON-NEXT: .p2align 4, 0x90
@@ -639,11 +639,11 @@ define void @loadStoreBaseIndexOffsetSextNoSex(i8* %a, i8* %b, i8* %c, i32 %n) {
; BWON-NEXT: incq %rcx
; BWON-NEXT: cmpl %ecx, %r8d
; BWON-NEXT: jne .LBB12_1
-; BWON-NEXT: # BB#2:
+; BWON-NEXT: # %bb.2:
; BWON-NEXT: retq
;
; BWOFF-LABEL: loadStoreBaseIndexOffsetSextNoSex:
-; BWOFF: # BB#0:
+; BWOFF: # %bb.0:
; BWOFF-NEXT: movl %ecx, %r8d
; BWOFF-NEXT: xorl %ecx, %ecx
; BWOFF-NEXT: .p2align 4, 0x90
@@ -658,7 +658,7 @@ define void @loadStoreBaseIndexOffsetSextNoSex(i8* %a, i8* %b, i8* %c, i32 %n) {
; BWOFF-NEXT: incq %rcx
; BWOFF-NEXT: cmpl %ecx, %r8d
; BWOFF-NEXT: jne .LBB12_1
-; BWOFF-NEXT: # BB#2:
+; BWOFF-NEXT: # %bb.2:
; BWOFF-NEXT: retq
br label %1
@@ -690,7 +690,7 @@ define void @loadStoreBaseIndexOffsetSextNoSex(i8* %a, i8* %b, i8* %c, i32 %n) {
; PR21711 ( http://llvm.org/bugs/show_bug.cgi?id=21711 )
define void @merge_vec_element_store(<8 x float> %v, float* %ptr) {
; CHECK-LABEL: merge_vec_element_store:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovups %ymm0, (%rdi)
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
@@ -725,7 +725,7 @@ define void @merge_vec_element_store(<8 x float> %v, float* %ptr) {
; These should be merged into 32-byte stores.
define void @merge_vec_extract_stores(<8 x float> %v1, <8 x float> %v2, <4 x float>* %ptr) {
; CHECK-LABEL: merge_vec_extract_stores:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovups %ymm0, 48(%rdi)
; CHECK-NEXT: vmovups %ymm1, 80(%rdi)
; CHECK-NEXT: vzeroupper
@@ -749,7 +749,7 @@ define void @merge_vec_extract_stores(<8 x float> %v1, <8 x float> %v2, <4 x flo
; Merging vector stores when sourced from vector loads.
define void @merge_vec_stores_from_loads(<4 x float>* %v, <4 x float>* %ptr) {
; CHECK-LABEL: merge_vec_stores_from_loads:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovups (%rdi), %ymm0
; CHECK-NEXT: vmovups %ymm0, (%rsi)
; CHECK-NEXT: vzeroupper
@@ -769,7 +769,7 @@ define void @merge_vec_stores_from_loads(<4 x float>* %v, <4 x float>* %ptr) {
; Merging vector stores when sourced from a constant vector is not currently handled.
define void @merge_vec_stores_of_constants(<4 x i32>* %ptr) {
; CHECK-LABEL: merge_vec_stores_of_constants:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vmovaps %xmm0, 48(%rdi)
; CHECK-NEXT: vmovaps %xmm0, 64(%rdi)
@@ -786,7 +786,7 @@ define void @merge_vec_stores_of_constants(<4 x i32>* %ptr) {
; This should now be merged.
define void @merge_vec_element_and_scalar_load([6 x i64]* %array) {
; CHECK-LABEL: merge_vec_element_and_scalar_load:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovups (%rdi), %xmm0
; CHECK-NEXT: vmovups %xmm0, 32(%rdi)
; CHECK-NEXT: retq
@@ -809,7 +809,7 @@ define void @merge_vec_element_and_scalar_load([6 x i64]* %array) {
; Don't let a non-consecutive store thwart merging of the last two.
define void @almost_consecutive_stores(i8* %p) {
; CHECK-LABEL: almost_consecutive_stores:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movb $0, (%rdi)
; CHECK-NEXT: movb $1, 42(%rdi)
; CHECK-NEXT: movw $770, 2(%rdi) # imm = 0x302
@@ -827,7 +827,7 @@ define void @almost_consecutive_stores(i8* %p) {
; We should be able to merge these.
define void @merge_bitcast(<4 x i32> %v, float* %ptr) {
; CHECK-LABEL: merge_bitcast:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovups %xmm0, (%rdi)
; CHECK-NEXT: retq
%fv = bitcast <4 x i32> %v to <4 x float>
diff --git a/test/CodeGen/X86/SwizzleShuff.ll b/test/CodeGen/X86/SwizzleShuff.ll
index 4477a103001..e6519a60a4b 100644
--- a/test/CodeGen/X86/SwizzleShuff.ll
+++ b/test/CodeGen/X86/SwizzleShuff.ll
@@ -5,7 +5,7 @@
define void @pull_bitcast(<4 x i8>* %pA, <4 x i8>* %pB) {
; CHECK-LABEL: pull_bitcast:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl (%rsi), %eax
; CHECK-NEXT: xorl %eax, (%rdi)
; CHECK-NEXT: retq
@@ -18,7 +18,7 @@ define void @pull_bitcast(<4 x i8>* %pA, <4 x i8>* %pB) {
define <4 x i32> @multi_use_swizzle(<4 x i32>* %pA, <4 x i32>* %pB) {
; CHECK-LABEL: multi_use_swizzle:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %xmm0
; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1],mem[1,2]
; CHECK-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,3,2,2]
@@ -36,7 +36,7 @@ define <4 x i32> @multi_use_swizzle(<4 x i32>* %pA, <4 x i32>* %pB) {
define <4 x i8> @pull_bitcast2(<4 x i8>* %pA, <4 x i8>* %pB, <4 x i8>* %pC) {
; CHECK-LABEL: pull_bitcast2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl (%rdi), %eax
; CHECK-NEXT: movl %eax, (%rdx)
; CHECK-NEXT: xorl (%rsi), %eax
@@ -53,7 +53,7 @@ define <4 x i8> @pull_bitcast2(<4 x i8>* %pA, <4 x i8>* %pB, <4 x i8>* %pC) {
define <4 x i32> @reverse_1(<4 x i32>* %pA, <4 x i32>* %pB) {
; CHECK-LABEL: reverse_1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %xmm0
; CHECK-NEXT: retq
%A = load <4 x i32>, <4 x i32>* %pA
@@ -65,7 +65,7 @@ define <4 x i32> @reverse_1(<4 x i32>* %pA, <4 x i32>* %pB) {
define <4 x i32> @no_reverse_shuff(<4 x i32>* %pA, <4 x i32>* %pB) {
; CHECK-LABEL: no_reverse_shuff:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = mem[2,3,2,3]
; CHECK-NEXT: retq
%A = load <4 x i32>, <4 x i32>* %pA
diff --git a/test/CodeGen/X86/TruncAssertSext.ll b/test/CodeGen/X86/TruncAssertSext.ll
index d4f9a5d4873..9ab7622ef9d 100644
--- a/test/CodeGen/X86/TruncAssertSext.ll
+++ b/test/CodeGen/X86/TruncAssertSext.ll
@@ -6,7 +6,7 @@
define i64 @main(i64 %a) {
; CHECK-LABEL: main:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: orq $-2, %rdi
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/TruncAssertZext.ll b/test/CodeGen/X86/TruncAssertZext.ll
index b9ae57ca011..80f8e0f647a 100644
--- a/test/CodeGen/X86/TruncAssertZext.ll
+++ b/test/CodeGen/X86/TruncAssertZext.ll
@@ -6,7 +6,7 @@
define i64 @foo() {
; CHECK-LABEL: foo:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movq $-1, %rax
; CHECK-NEXT: retq
ret i64 -1
@@ -14,7 +14,7 @@ define i64 @foo() {
define i64 @main() {
; CHECK-LABEL: main:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: callq foo
diff --git a/test/CodeGen/X86/WidenArith.ll b/test/CodeGen/X86/WidenArith.ll
index 7470416ba7e..cb9bf03b64c 100644
--- a/test/CodeGen/X86/WidenArith.ll
+++ b/test/CodeGen/X86/WidenArith.ll
@@ -4,7 +4,7 @@
define <8 x i32> @test(<8 x float> %a, <8 x float> %b) {
; X86-LABEL: test:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: vaddps %ymm1, %ymm0, %ymm2
; X86-NEXT: vmulps %ymm0, %ymm1, %ymm1
; X86-NEXT: vsubps %ymm2, %ymm1, %ymm3
@@ -15,7 +15,7 @@ define <8 x i32> @test(<8 x float> %a, <8 x float> %b) {
; X86-NEXT: retl
;
; X64-LABEL: test:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vaddps %ymm1, %ymm0, %ymm2
; X64-NEXT: vmulps %ymm0, %ymm1, %ymm1
; X64-NEXT: vsubps %ymm2, %ymm1, %ymm3
diff --git a/test/CodeGen/X86/add-ext.ll b/test/CodeGen/X86/add-ext.ll
index 7a157ecd3fe..16646fa71ca 100644
--- a/test/CodeGen/X86/add-ext.ll
+++ b/test/CodeGen/X86/add-ext.ll
@@ -8,7 +8,7 @@
define i64 @add_nsw_consts(i32 %i) {
; CHECK-LABEL: add_nsw_consts:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movslq %edi, %rax
; CHECK-NEXT: addq $12, %rax
; CHECK-NEXT: retq
@@ -24,7 +24,7 @@ define i64 @add_nsw_consts(i32 %i) {
define i64 @add_nsw_sext_add(i32 %i, i64 %x) {
; CHECK-LABEL: add_nsw_sext_add:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movslq %edi, %rax
; CHECK-NEXT: leaq 5(%rsi,%rax), %rax
; CHECK-NEXT: retq
@@ -40,7 +40,7 @@ define i64 @add_nsw_sext_add(i32 %i, i64 %x) {
define i64 @add_nsw_sext_lsh_add(i32 %i, i64 %x) {
; CHECK-LABEL: add_nsw_sext_lsh_add:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movslq %edi, %rax
; CHECK-NEXT: leaq -40(%rsi,%rax,8), %rax
; CHECK-NEXT: retq
@@ -57,7 +57,7 @@ define i64 @add_nsw_sext_lsh_add(i32 %i, i64 %x) {
define i64 @add_nsw_sext(i32 %i, i64 %x) {
; CHECK-LABEL: add_nsw_sext:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: addl $5, %edi
; CHECK-NEXT: movslq %edi, %rax
; CHECK-NEXT: retq
@@ -71,7 +71,7 @@ define i64 @add_nsw_sext(i32 %i, i64 %x) {
define i8* @gep8(i32 %i, i8* %x) {
; CHECK-LABEL: gep8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movslq %edi, %rax
; CHECK-NEXT: leaq 5(%rsi,%rax), %rax
; CHECK-NEXT: retq
@@ -84,7 +84,7 @@ define i8* @gep8(i32 %i, i8* %x) {
define i16* @gep16(i32 %i, i16* %x) {
; CHECK-LABEL: gep16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movslq %edi, %rax
; CHECK-NEXT: leaq -10(%rsi,%rax,2), %rax
; CHECK-NEXT: retq
@@ -97,7 +97,7 @@ define i16* @gep16(i32 %i, i16* %x) {
define i32* @gep32(i32 %i, i32* %x) {
; CHECK-LABEL: gep32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movslq %edi, %rax
; CHECK-NEXT: leaq 20(%rsi,%rax,4), %rax
; CHECK-NEXT: retq
@@ -110,7 +110,7 @@ define i32* @gep32(i32 %i, i32* %x) {
define i64* @gep64(i32 %i, i64* %x) {
; CHECK-LABEL: gep64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movslq %edi, %rax
; CHECK-NEXT: leaq -40(%rsi,%rax,8), %rax
; CHECK-NEXT: retq
@@ -125,7 +125,7 @@ define i64* @gep64(i32 %i, i64* %x) {
define i128* @gep128(i32 %i, i128* %x) {
; CHECK-LABEL: gep128:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movslq %edi, %rax
; CHECK-NEXT: shlq $4, %rax
; CHECK-NEXT: leaq 80(%rsi,%rax), %rax
@@ -143,7 +143,7 @@ define i128* @gep128(i32 %i, i128* %x) {
define void @PR20134(i32* %a, i32 %i) {
; CHECK-LABEL: PR20134:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movslq %esi, %rax
; CHECK-NEXT: movl 4(%rdi,%rax,4), %ecx
; CHECK-NEXT: addl 8(%rdi,%rax,4), %ecx
@@ -169,7 +169,7 @@ define void @PR20134(i32* %a, i32 %i) {
; The same as @PR20134 but sign extension is replaced with zero extension
define void @PR20134_zext(i32* %a, i32 %i) {
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %esi, %eax
; CHECK-NEXT: movl 4(%rdi,%rax,4), %ecx
; CHECK-NEXT: addl 8(%rdi,%rax,4), %ecx
diff --git a/test/CodeGen/X86/add-of-carry.ll b/test/CodeGen/X86/add-of-carry.ll
index ad82b8cfb77..1149ae57552 100644
--- a/test/CodeGen/X86/add-of-carry.ll
+++ b/test/CodeGen/X86/add-of-carry.ll
@@ -8,7 +8,7 @@
define i32 @test1(i32 %sum, i32 %x) nounwind readnone ssp {
; CHECK-LABEL: test1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl %eax, %edx
@@ -26,7 +26,7 @@ define i32 @test1(i32 %sum, i32 %x) nounwind readnone ssp {
define i32 @test2(i32 %x, i32 %y, i32 %res) nounwind uwtable readnone ssp {
; CHECK-LABEL: test2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: cmpl {{[0-9]+}}(%esp), %ecx
diff --git a/test/CodeGen/X86/add-sub-nsw-nuw.ll b/test/CodeGen/X86/add-sub-nsw-nuw.ll
index 721b2fe7261..39dfe7b94b3 100644
--- a/test/CodeGen/X86/add-sub-nsw-nuw.ll
+++ b/test/CodeGen/X86/add-sub-nsw-nuw.ll
@@ -7,7 +7,7 @@
define i8 @PR30841(i64 %argc) {
; CHECK-LABEL: PR30841:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: negl %eax
; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
diff --git a/test/CodeGen/X86/add.ll b/test/CodeGen/X86/add.ll
index 95368d100b2..5a8e6c37505 100644
--- a/test/CodeGen/X86/add.ll
+++ b/test/CodeGen/X86/add.ll
@@ -10,18 +10,18 @@ declare {i32, i1} @llvm.uadd.with.overflow.i32(i32, i32)
; instruction is a sub instead of an add.
define i32 @test1(i32 inreg %a) nounwind {
; X32-LABEL: test1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: subl $-128, %eax
; X32-NEXT: retl
;
; X64-LINUX-LABEL: test1:
-; X64-LINUX: # BB#0: # %entry
+; X64-LINUX: # %bb.0: # %entry
; X64-LINUX-NEXT: subl $-128, %edi
; X64-LINUX-NEXT: movl %edi, %eax
; X64-LINUX-NEXT: retq
;
; X64-WIN32-LABEL: test1:
-; X64-WIN32: # BB#0: # %entry
+; X64-WIN32: # %bb.0: # %entry
; X64-WIN32-NEXT: subl $-128, %ecx
; X64-WIN32-NEXT: movl %ecx, %eax
; X64-WIN32-NEXT: retq
@@ -31,19 +31,19 @@ entry:
}
define i64 @test2(i64 inreg %a) nounwind {
; X32-LABEL: test2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: addl $-2147483648, %eax # imm = 0x80000000
; X32-NEXT: adcl $0, %edx
; X32-NEXT: retl
;
; X64-LINUX-LABEL: test2:
-; X64-LINUX: # BB#0: # %entry
+; X64-LINUX: # %bb.0: # %entry
; X64-LINUX-NEXT: subq $-2147483648, %rdi # imm = 0x80000000
; X64-LINUX-NEXT: movq %rdi, %rax
; X64-LINUX-NEXT: retq
;
; X64-WIN32-LABEL: test2:
-; X64-WIN32: # BB#0: # %entry
+; X64-WIN32: # %bb.0: # %entry
; X64-WIN32-NEXT: subq $-2147483648, %rcx # imm = 0x80000000
; X64-WIN32-NEXT: movq %rcx, %rax
; X64-WIN32-NEXT: retq
@@ -53,19 +53,19 @@ entry:
}
define i64 @test3(i64 inreg %a) nounwind {
; X32-LABEL: test3:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: addl $128, %eax
; X32-NEXT: adcl $0, %edx
; X32-NEXT: retl
;
; X64-LINUX-LABEL: test3:
-; X64-LINUX: # BB#0: # %entry
+; X64-LINUX: # %bb.0: # %entry
; X64-LINUX-NEXT: subq $-128, %rdi
; X64-LINUX-NEXT: movq %rdi, %rax
; X64-LINUX-NEXT: retq
;
; X64-WIN32-LABEL: test3:
-; X64-WIN32: # BB#0: # %entry
+; X64-WIN32: # %bb.0: # %entry
; X64-WIN32-NEXT: subq $-128, %rcx
; X64-WIN32-NEXT: movq %rcx, %rax
; X64-WIN32-NEXT: retq
@@ -76,11 +76,11 @@ entry:
define i1 @test4(i32 %v1, i32 %v2, i32* %X) nounwind {
; X32-LABEL: test4:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
; X32-NEXT: jo .LBB3_2
-; X32-NEXT: # BB#1: # %normal
+; X32-NEXT: # %bb.1: # %normal
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl $0, (%eax)
; X32-NEXT: .LBB3_2: # %overflow
@@ -88,20 +88,20 @@ define i1 @test4(i32 %v1, i32 %v2, i32* %X) nounwind {
; X32-NEXT: retl
;
; X64-LINUX-LABEL: test4:
-; X64-LINUX: # BB#0: # %entry
+; X64-LINUX: # %bb.0: # %entry
; X64-LINUX-NEXT: addl %esi, %edi
; X64-LINUX-NEXT: jo .LBB3_2
-; X64-LINUX-NEXT: # BB#1: # %normal
+; X64-LINUX-NEXT: # %bb.1: # %normal
; X64-LINUX-NEXT: movl $0, (%rdx)
; X64-LINUX-NEXT: .LBB3_2: # %overflow
; X64-LINUX-NEXT: xorl %eax, %eax
; X64-LINUX-NEXT: retq
;
; X64-WIN32-LABEL: test4:
-; X64-WIN32: # BB#0: # %entry
+; X64-WIN32: # %bb.0: # %entry
; X64-WIN32-NEXT: addl %edx, %ecx
; X64-WIN32-NEXT: jo .LBB3_2
-; X64-WIN32-NEXT: # BB#1: # %normal
+; X64-WIN32-NEXT: # %bb.1: # %normal
; X64-WIN32-NEXT: movl $0, (%r8)
; X64-WIN32-NEXT: .LBB3_2: # %overflow
; X64-WIN32-NEXT: xorl %eax, %eax
@@ -122,11 +122,11 @@ overflow:
define i1 @test5(i32 %v1, i32 %v2, i32* %X) nounwind {
; X32-LABEL: test5:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
; X32-NEXT: jb .LBB4_2
-; X32-NEXT: # BB#1: # %normal
+; X32-NEXT: # %bb.1: # %normal
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl $0, (%eax)
; X32-NEXT: .LBB4_2: # %carry
@@ -134,20 +134,20 @@ define i1 @test5(i32 %v1, i32 %v2, i32* %X) nounwind {
; X32-NEXT: retl
;
; X64-LINUX-LABEL: test5:
-; X64-LINUX: # BB#0: # %entry
+; X64-LINUX: # %bb.0: # %entry
; X64-LINUX-NEXT: addl %esi, %edi
; X64-LINUX-NEXT: jb .LBB4_2
-; X64-LINUX-NEXT: # BB#1: # %normal
+; X64-LINUX-NEXT: # %bb.1: # %normal
; X64-LINUX-NEXT: movl $0, (%rdx)
; X64-LINUX-NEXT: .LBB4_2: # %carry
; X64-LINUX-NEXT: xorl %eax, %eax
; X64-LINUX-NEXT: retq
;
; X64-WIN32-LABEL: test5:
-; X64-WIN32: # BB#0: # %entry
+; X64-WIN32: # %bb.0: # %entry
; X64-WIN32-NEXT: addl %edx, %ecx
; X64-WIN32-NEXT: jb .LBB4_2
-; X64-WIN32-NEXT: # BB#1: # %normal
+; X64-WIN32-NEXT: # %bb.1: # %normal
; X64-WIN32-NEXT: movl $0, (%r8)
; X64-WIN32-NEXT: .LBB4_2: # %carry
; X64-WIN32-NEXT: xorl %eax, %eax
@@ -168,21 +168,21 @@ carry:
define i64 @test6(i64 %A, i32 %B) nounwind {
; X32-LABEL: test6:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
; X32-NEXT: retl
;
; X64-LINUX-LABEL: test6:
-; X64-LINUX: # BB#0: # %entry
+; X64-LINUX: # %bb.0: # %entry
; X64-LINUX-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; X64-LINUX-NEXT: shlq $32, %rsi
; X64-LINUX-NEXT: leaq (%rsi,%rdi), %rax
; X64-LINUX-NEXT: retq
;
; X64-WIN32-LABEL: test6:
-; X64-WIN32: # BB#0: # %entry
+; X64-WIN32: # %bb.0: # %entry
; X64-WIN32-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
; X64-WIN32-NEXT: shlq $32, %rdx
; X64-WIN32-NEXT: leaq (%rdx,%rcx), %rax
@@ -196,21 +196,21 @@ entry:
define {i32, i1} @test7(i32 %v1, i32 %v2) nounwind {
; X32-LABEL: test7:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
; X32-NEXT: setb %dl
; X32-NEXT: retl
;
; X64-LINUX-LABEL: test7:
-; X64-LINUX: # BB#0: # %entry
+; X64-LINUX: # %bb.0: # %entry
; X64-LINUX-NEXT: addl %esi, %edi
; X64-LINUX-NEXT: setb %dl
; X64-LINUX-NEXT: movl %edi, %eax
; X64-LINUX-NEXT: retq
;
; X64-WIN32-LABEL: test7:
-; X64-WIN32: # BB#0: # %entry
+; X64-WIN32: # %bb.0: # %entry
; X64-WIN32-NEXT: addl %edx, %ecx
; X64-WIN32-NEXT: setb %dl
; X64-WIN32-NEXT: movl %ecx, %eax
@@ -223,7 +223,7 @@ entry:
; PR5443
define {i64, i1} @test8(i64 %left, i64 %right) nounwind {
; X32-LABEL: test8:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
@@ -232,14 +232,14 @@ define {i64, i1} @test8(i64 %left, i64 %right) nounwind {
; X32-NEXT: retl
;
; X64-LINUX-LABEL: test8:
-; X64-LINUX: # BB#0: # %entry
+; X64-LINUX: # %bb.0: # %entry
; X64-LINUX-NEXT: addq %rsi, %rdi
; X64-LINUX-NEXT: setb %dl
; X64-LINUX-NEXT: movq %rdi, %rax
; X64-LINUX-NEXT: retq
;
; X64-WIN32-LABEL: test8:
-; X64-WIN32: # BB#0: # %entry
+; X64-WIN32: # %bb.0: # %entry
; X64-WIN32-NEXT: addq %rdx, %rcx
; X64-WIN32-NEXT: setb %dl
; X64-WIN32-NEXT: movq %rcx, %rax
@@ -258,7 +258,7 @@ entry:
define i32 @test9(i32 %x, i32 %y) nounwind readnone {
; X32-LABEL: test9:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: xorl %ecx, %ecx
; X32-NEXT: cmpl $10, {{[0-9]+}}(%esp)
@@ -267,7 +267,7 @@ define i32 @test9(i32 %x, i32 %y) nounwind readnone {
; X32-NEXT: retl
;
; X64-LINUX-LABEL: test9:
-; X64-LINUX: # BB#0: # %entry
+; X64-LINUX: # %bb.0: # %entry
; X64-LINUX-NEXT: xorl %eax, %eax
; X64-LINUX-NEXT: cmpl $10, %edi
; X64-LINUX-NEXT: sete %al
@@ -276,7 +276,7 @@ define i32 @test9(i32 %x, i32 %y) nounwind readnone {
; X64-LINUX-NEXT: retq
;
; X64-WIN32-LABEL: test9:
-; X64-WIN32: # BB#0: # %entry
+; X64-WIN32: # %bb.0: # %entry
; X64-WIN32-NEXT: xorl %eax, %eax
; X64-WIN32-NEXT: cmpl $10, %ecx
; X64-WIN32-NEXT: sete %al
@@ -292,20 +292,20 @@ entry:
define i1 @test10(i32 %x) nounwind {
; X32-LABEL: test10:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: incl %eax
; X32-NEXT: seto %al
; X32-NEXT: retl
;
; X64-LINUX-LABEL: test10:
-; X64-LINUX: # BB#0: # %entry
+; X64-LINUX: # %bb.0: # %entry
; X64-LINUX-NEXT: incl %edi
; X64-LINUX-NEXT: seto %al
; X64-LINUX-NEXT: retq
;
; X64-WIN32-LABEL: test10:
-; X64-WIN32: # BB#0: # %entry
+; X64-WIN32: # %bb.0: # %entry
; X64-WIN32-NEXT: incl %ecx
; X64-WIN32-NEXT: seto %al
; X64-WIN32-NEXT: retq
@@ -317,17 +317,17 @@ entry:
define void @test11(i32* inreg %a) nounwind {
; X32-LABEL: test11:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: subl $-128, (%eax)
; X32-NEXT: retl
;
; X64-LINUX-LABEL: test11:
-; X64-LINUX: # BB#0: # %entry
+; X64-LINUX: # %bb.0: # %entry
; X64-LINUX-NEXT: subl $-128, (%rdi)
; X64-LINUX-NEXT: retq
;
; X64-WIN32-LABEL: test11:
-; X64-WIN32: # BB#0: # %entry
+; X64-WIN32: # %bb.0: # %entry
; X64-WIN32-NEXT: subl $-128, (%rcx)
; X64-WIN32-NEXT: retq
entry:
@@ -339,18 +339,18 @@ entry:
define void @test12(i64* inreg %a) nounwind {
; X32-LABEL: test12:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: addl $-2147483648, (%eax) # imm = 0x80000000
; X32-NEXT: adcl $0, 4(%eax)
; X32-NEXT: retl
;
; X64-LINUX-LABEL: test12:
-; X64-LINUX: # BB#0: # %entry
+; X64-LINUX: # %bb.0: # %entry
; X64-LINUX-NEXT: subq $-2147483648, (%rdi) # imm = 0x80000000
; X64-LINUX-NEXT: retq
;
; X64-WIN32-LABEL: test12:
-; X64-WIN32: # BB#0: # %entry
+; X64-WIN32: # %bb.0: # %entry
; X64-WIN32-NEXT: subq $-2147483648, (%rcx) # imm = 0x80000000
; X64-WIN32-NEXT: retq
entry:
@@ -362,18 +362,18 @@ entry:
define void @test13(i64* inreg %a) nounwind {
; X32-LABEL: test13:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: addl $128, (%eax)
; X32-NEXT: adcl $0, 4(%eax)
; X32-NEXT: retl
;
; X64-LINUX-LABEL: test13:
-; X64-LINUX: # BB#0: # %entry
+; X64-LINUX: # %bb.0: # %entry
; X64-LINUX-NEXT: subq $-128, (%rdi)
; X64-LINUX-NEXT: retq
;
; X64-WIN32-LABEL: test13:
-; X64-WIN32: # BB#0: # %entry
+; X64-WIN32: # %bb.0: # %entry
; X64-WIN32-NEXT: subq $-128, (%rcx)
; X64-WIN32-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/addcarry.ll b/test/CodeGen/X86/addcarry.ll
index ebeb85c2c83..8ca9fb3120b 100644
--- a/test/CodeGen/X86/addcarry.ll
+++ b/test/CodeGen/X86/addcarry.ll
@@ -3,7 +3,7 @@
define void @a(i64* nocapture %s, i64* nocapture %t, i64 %a, i64 %b, i64 %c) nounwind {
; CHECK-LABEL: a:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addq %rcx, %rdx
; CHECK-NEXT: adcq $0, %r8
; CHECK-NEXT: movq %r8, (%rdi)
@@ -26,7 +26,7 @@ entry:
define void @b(i32* nocapture %r, i64 %a, i64 %b, i32 %c) nounwind {
; CHECK-LABEL: b:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addq %rdx, %rsi
; CHECK-NEXT: adcl $0, %ecx
; CHECK-NEXT: movl %ecx, (%rdi)
@@ -45,7 +45,7 @@ entry:
define void @c(i16* nocapture %r, i64 %a, i64 %b, i16 %c) nounwind {
; CHECK-LABEL: c:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addq %rdx, %rsi
; CHECK-NEXT: adcw $0, %cx
; CHECK-NEXT: movw %cx, (%rdi)
@@ -64,7 +64,7 @@ entry:
define void @d(i8* nocapture %r, i64 %a, i64 %b, i8 %c) nounwind {
; CHECK-LABEL: d:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addq %rdx, %rsi
; CHECK-NEXT: adcb $0, %cl
; CHECK-NEXT: movb %cl, (%rdi)
@@ -83,7 +83,7 @@ entry:
define i8 @e(i32* nocapture %a, i32 %b) nounwind {
; CHECK-LABEL: e:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; CHECK-NEXT: movl (%rdi), %ecx
; CHECK-NEXT: leal (%rsi,%rcx), %edx
@@ -109,7 +109,7 @@ define i8 @e(i32* nocapture %a, i32 %b) nounwind {
define %scalar @pr31719(%scalar* nocapture readonly %this, %scalar %arg.b) {
; CHECK-LABEL: pr31719:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addq (%rsi), %rdx
; CHECK-NEXT: adcq 8(%rsi), %rcx
; CHECK-NEXT: adcq 16(%rsi), %r8
@@ -168,7 +168,7 @@ entry:
define void @muladd(%accumulator* nocapture %this, i64 %arg.a, i64 %arg.b) {
; CHECK-LABEL: muladd:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movq %rdx, %rax
; CHECK-NEXT: mulq %rsi
; CHECK-NEXT: addq %rax, (%rdi)
@@ -205,7 +205,7 @@ entry:
define i64 @shiftadd(i64 %a, i64 %b, i64 %c, i64 %d) {
; CHECK-LABEL: shiftadd:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addq %rsi, %rdi
; CHECK-NEXT: adcq %rcx, %rdx
; CHECK-NEXT: movq %rdx, %rax
@@ -225,7 +225,7 @@ entry:
define %S @readd(%S* nocapture readonly %this, %S %arg.b) {
; CHECK-LABEL: readd:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addq (%rsi), %rdx
; CHECK-NEXT: movq 8(%rsi), %r10
; CHECK-NEXT: adcq $0, %r10
diff --git a/test/CodeGen/X86/adx-intrinsics.ll b/test/CodeGen/X86/adx-intrinsics.ll
index 7af15a73a4f..bc8e2be4de0 100644
--- a/test/CodeGen/X86/adx-intrinsics.ll
+++ b/test/CodeGen/X86/adx-intrinsics.ll
@@ -6,7 +6,7 @@ declare i8 @llvm.x86.addcarryx.u32(i8, i32, i32, i8*)
define i8 @test_addcarryx_u32(i8 %c, i32 %a, i32 %b, i8* %ptr) {
; NOADX-LABEL: test_addcarryx_u32:
-; NOADX: ## BB#0:
+; NOADX: ## %bb.0:
; NOADX-NEXT: addb $-1, %dil ## encoding: [0x40,0x80,0xc7,0xff]
; NOADX-NEXT: adcl %edx, %esi ## encoding: [0x11,0xd6]
; NOADX-NEXT: movl %esi, (%rcx) ## encoding: [0x89,0x31]
@@ -14,7 +14,7 @@ define i8 @test_addcarryx_u32(i8 %c, i32 %a, i32 %b, i8* %ptr) {
; NOADX-NEXT: retq ## encoding: [0xc3]
;
; ADX-LABEL: test_addcarryx_u32:
-; ADX: ## BB#0:
+; ADX: ## %bb.0:
; ADX-NEXT: addb $-1, %dil ## encoding: [0x40,0x80,0xc7,0xff]
; ADX-NEXT: adcxl %edx, %esi ## encoding: [0x66,0x0f,0x38,0xf6,0xf2]
; ADX-NEXT: movl %esi, (%rcx) ## encoding: [0x89,0x31]
@@ -28,7 +28,7 @@ declare i8 @llvm.x86.addcarryx.u64(i8, i64, i64, i8*)
define i8 @test_addcarryx_u64(i8 %c, i64 %a, i64 %b, i8* %ptr) {
; NOADX-LABEL: test_addcarryx_u64:
-; NOADX: ## BB#0:
+; NOADX: ## %bb.0:
; NOADX-NEXT: addb $-1, %dil ## encoding: [0x40,0x80,0xc7,0xff]
; NOADX-NEXT: adcq %rdx, %rsi ## encoding: [0x48,0x11,0xd6]
; NOADX-NEXT: movq %rsi, (%rcx) ## encoding: [0x48,0x89,0x31]
@@ -36,7 +36,7 @@ define i8 @test_addcarryx_u64(i8 %c, i64 %a, i64 %b, i8* %ptr) {
; NOADX-NEXT: retq ## encoding: [0xc3]
;
; ADX-LABEL: test_addcarryx_u64:
-; ADX: ## BB#0:
+; ADX: ## %bb.0:
; ADX-NEXT: addb $-1, %dil ## encoding: [0x40,0x80,0xc7,0xff]
; ADX-NEXT: adcxq %rdx, %rsi ## encoding: [0x66,0x48,0x0f,0x38,0xf6,0xf2]
; ADX-NEXT: movq %rsi, (%rcx) ## encoding: [0x48,0x89,0x31]
@@ -50,7 +50,7 @@ declare i8 @llvm.x86.addcarry.u32(i8, i32, i32, i8*)
define i8 @test_addcarry_u32(i8 %c, i32 %a, i32 %b, i8* %ptr) {
; NOADX-LABEL: test_addcarry_u32:
-; NOADX: ## BB#0:
+; NOADX: ## %bb.0:
; NOADX-NEXT: addb $-1, %dil ## encoding: [0x40,0x80,0xc7,0xff]
; NOADX-NEXT: adcl %edx, %esi ## encoding: [0x11,0xd6]
; NOADX-NEXT: movl %esi, (%rcx) ## encoding: [0x89,0x31]
@@ -58,7 +58,7 @@ define i8 @test_addcarry_u32(i8 %c, i32 %a, i32 %b, i8* %ptr) {
; NOADX-NEXT: retq ## encoding: [0xc3]
;
; ADX-LABEL: test_addcarry_u32:
-; ADX: ## BB#0:
+; ADX: ## %bb.0:
; ADX-NEXT: addb $-1, %dil ## encoding: [0x40,0x80,0xc7,0xff]
; ADX-NEXT: adcxl %edx, %esi ## encoding: [0x66,0x0f,0x38,0xf6,0xf2]
; ADX-NEXT: movl %esi, (%rcx) ## encoding: [0x89,0x31]
@@ -72,7 +72,7 @@ declare i8 @llvm.x86.addcarry.u64(i8, i64, i64, i8*)
define i8 @test_addcarry_u64(i8 %c, i64 %a, i64 %b, i8* %ptr) {
; NOADX-LABEL: test_addcarry_u64:
-; NOADX: ## BB#0:
+; NOADX: ## %bb.0:
; NOADX-NEXT: addb $-1, %dil ## encoding: [0x40,0x80,0xc7,0xff]
; NOADX-NEXT: adcq %rdx, %rsi ## encoding: [0x48,0x11,0xd6]
; NOADX-NEXT: movq %rsi, (%rcx) ## encoding: [0x48,0x89,0x31]
@@ -80,7 +80,7 @@ define i8 @test_addcarry_u64(i8 %c, i64 %a, i64 %b, i8* %ptr) {
; NOADX-NEXT: retq ## encoding: [0xc3]
;
; ADX-LABEL: test_addcarry_u64:
-; ADX: ## BB#0:
+; ADX: ## %bb.0:
; ADX-NEXT: addb $-1, %dil ## encoding: [0x40,0x80,0xc7,0xff]
; ADX-NEXT: adcxq %rdx, %rsi ## encoding: [0x66,0x48,0x0f,0x38,0xf6,0xf2]
; ADX-NEXT: movq %rsi, (%rcx) ## encoding: [0x48,0x89,0x31]
@@ -94,7 +94,7 @@ declare i8 @llvm.x86.subborrow.u32(i8, i32, i32, i8*)
define i8 @test_subborrow_u32(i8 %c, i32 %a, i32 %b, i8* %ptr) {
; CHECK-LABEL: test_subborrow_u32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: addb $-1, %dil ## encoding: [0x40,0x80,0xc7,0xff]
; CHECK-NEXT: sbbl %edx, %esi ## encoding: [0x19,0xd6]
; CHECK-NEXT: movl %esi, (%rcx) ## encoding: [0x89,0x31]
@@ -108,7 +108,7 @@ declare i8 @llvm.x86.subborrow.u64(i8, i64, i64, i8*)
define i8 @test_subborrow_u64(i8 %c, i64 %a, i64 %b, i8* %ptr) {
; CHECK-LABEL: test_subborrow_u64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: addb $-1, %dil ## encoding: [0x40,0x80,0xc7,0xff]
; CHECK-NEXT: sbbq %rdx, %rsi ## encoding: [0x48,0x19,0xd6]
; CHECK-NEXT: movq %rsi, (%rcx) ## encoding: [0x48,0x89,0x31]
@@ -121,7 +121,7 @@ define i8 @test_subborrow_u64(i8 %c, i64 %a, i64 %b, i8* %ptr) {
; Try a version with loads. Previously we crashed on this.
define i32 @load_crash(i64* nocapture readonly %a, i64* nocapture readonly %b, i64* %res) {
; NOADX-LABEL: load_crash:
-; NOADX: ## BB#0:
+; NOADX: ## %bb.0:
; NOADX-NEXT: movq (%rdi), %rax ## encoding: [0x48,0x8b,0x07]
; NOADX-NEXT: xorl %ecx, %ecx ## encoding: [0x31,0xc9]
; NOADX-NEXT: addb $-1, %cl ## encoding: [0x80,0xc1,0xff]
@@ -132,7 +132,7 @@ define i32 @load_crash(i64* nocapture readonly %a, i64* nocapture readonly %b, i
; NOADX-NEXT: retq ## encoding: [0xc3]
;
; ADX-LABEL: load_crash:
-; ADX: ## BB#0:
+; ADX: ## %bb.0:
; ADX-NEXT: movq (%rdi), %rax ## encoding: [0x48,0x8b,0x07]
; ADX-NEXT: xorl %ecx, %ecx ## encoding: [0x31,0xc9]
; ADX-NEXT: addb $-1, %cl ## encoding: [0x80,0xc1,0xff]
@@ -152,7 +152,7 @@ define i32 @load_crash(i64* nocapture readonly %a, i64* nocapture readonly %b, i
; Try a really simple all zero input case, which also used to crash
define void @allzeros() {
; CHECK-LABEL: allzeros:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; CHECK-NEXT: addb $-1, %al ## encoding: [0x04,0xff]
; CHECK-NEXT: sbbq %rax, %rax ## encoding: [0x48,0x19,0xc0]
diff --git a/test/CodeGen/X86/aes-schedule.ll b/test/CodeGen/X86/aes-schedule.ll
index ba22f175757..a829a774867 100644
--- a/test/CodeGen/X86/aes-schedule.ll
+++ b/test/CodeGen/X86/aes-schedule.ll
@@ -12,49 +12,49 @@
define <2 x i64> @test_aesdec(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; GENERIC-LABEL: test_aesdec:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: aesdec %xmm1, %xmm0 # sched: [7:1.00]
; GENERIC-NEXT: aesdec (%rdi), %xmm0 # sched: [13:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_aesdec:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: aesdec %xmm1, %xmm0 # sched: [8:5.00]
; SLM-NEXT: aesdec (%rdi), %xmm0 # sched: [8:5.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_aesdec:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vaesdec %xmm1, %xmm0, %xmm0 # sched: [7:1.00]
; SANDY-NEXT: vaesdec (%rdi), %xmm0, %xmm0 # sched: [13:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_aesdec:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vaesdec %xmm1, %xmm0, %xmm0 # sched: [7:1.00]
; HASWELL-NEXT: vaesdec (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_aesdec:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vaesdec %xmm1, %xmm0, %xmm0 # sched: [7:1.00]
; BROADWELL-NEXT: vaesdec (%rdi), %xmm0, %xmm0 # sched: [12:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_aesdec:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vaesdec %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
; SKYLAKE-NEXT: vaesdec (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_aesdec:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vaesdec %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vaesdec (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_aesdec:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vaesdec %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; ZNVER1-NEXT: vaesdec (%rdi), %xmm0, %xmm0 # sched: [11:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -67,49 +67,49 @@ declare <2 x i64> @llvm.x86.aesni.aesdec(<2 x i64>, <2 x i64>)
define <2 x i64> @test_aesdeclast(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; GENERIC-LABEL: test_aesdeclast:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: aesdeclast %xmm1, %xmm0 # sched: [7:1.00]
; GENERIC-NEXT: aesdeclast (%rdi), %xmm0 # sched: [13:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_aesdeclast:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: aesdeclast %xmm1, %xmm0 # sched: [8:5.00]
; SLM-NEXT: aesdeclast (%rdi), %xmm0 # sched: [8:5.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_aesdeclast:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vaesdeclast %xmm1, %xmm0, %xmm0 # sched: [7:1.00]
; SANDY-NEXT: vaesdeclast (%rdi), %xmm0, %xmm0 # sched: [13:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_aesdeclast:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vaesdeclast %xmm1, %xmm0, %xmm0 # sched: [7:1.00]
; HASWELL-NEXT: vaesdeclast (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_aesdeclast:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vaesdeclast %xmm1, %xmm0, %xmm0 # sched: [7:1.00]
; BROADWELL-NEXT: vaesdeclast (%rdi), %xmm0, %xmm0 # sched: [12:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_aesdeclast:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vaesdeclast %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
; SKYLAKE-NEXT: vaesdeclast (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_aesdeclast:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vaesdeclast %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vaesdeclast (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_aesdeclast:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vaesdeclast %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; ZNVER1-NEXT: vaesdeclast (%rdi), %xmm0, %xmm0 # sched: [11:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -122,49 +122,49 @@ declare <2 x i64> @llvm.x86.aesni.aesdeclast(<2 x i64>, <2 x i64>)
define <2 x i64> @test_aesenc(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; GENERIC-LABEL: test_aesenc:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: aesenc %xmm1, %xmm0 # sched: [7:1.00]
; GENERIC-NEXT: aesenc (%rdi), %xmm0 # sched: [13:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_aesenc:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: aesenc %xmm1, %xmm0 # sched: [8:5.00]
; SLM-NEXT: aesenc (%rdi), %xmm0 # sched: [8:5.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_aesenc:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vaesenc %xmm1, %xmm0, %xmm0 # sched: [7:1.00]
; SANDY-NEXT: vaesenc (%rdi), %xmm0, %xmm0 # sched: [13:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_aesenc:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vaesenc %xmm1, %xmm0, %xmm0 # sched: [7:1.00]
; HASWELL-NEXT: vaesenc (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_aesenc:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vaesenc %xmm1, %xmm0, %xmm0 # sched: [7:1.00]
; BROADWELL-NEXT: vaesenc (%rdi), %xmm0, %xmm0 # sched: [12:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_aesenc:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vaesenc %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
; SKYLAKE-NEXT: vaesenc (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_aesenc:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vaesenc %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vaesenc (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_aesenc:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vaesenc %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; ZNVER1-NEXT: vaesenc (%rdi), %xmm0, %xmm0 # sched: [11:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -177,49 +177,49 @@ declare <2 x i64> @llvm.x86.aesni.aesenc(<2 x i64>, <2 x i64>)
define <2 x i64> @test_aesenclast(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; GENERIC-LABEL: test_aesenclast:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: aesenclast %xmm1, %xmm0 # sched: [7:1.00]
; GENERIC-NEXT: aesenclast (%rdi), %xmm0 # sched: [13:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_aesenclast:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: aesenclast %xmm1, %xmm0 # sched: [8:5.00]
; SLM-NEXT: aesenclast (%rdi), %xmm0 # sched: [8:5.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_aesenclast:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vaesenclast %xmm1, %xmm0, %xmm0 # sched: [7:1.00]
; SANDY-NEXT: vaesenclast (%rdi), %xmm0, %xmm0 # sched: [13:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_aesenclast:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vaesenclast %xmm1, %xmm0, %xmm0 # sched: [7:1.00]
; HASWELL-NEXT: vaesenclast (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_aesenclast:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vaesenclast %xmm1, %xmm0, %xmm0 # sched: [7:1.00]
; BROADWELL-NEXT: vaesenclast (%rdi), %xmm0, %xmm0 # sched: [12:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_aesenclast:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vaesenclast %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
; SKYLAKE-NEXT: vaesenclast (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_aesenclast:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vaesenclast %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vaesenclast (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_aesenclast:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vaesenclast %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; ZNVER1-NEXT: vaesenclast (%rdi), %xmm0, %xmm0 # sched: [11:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -232,56 +232,56 @@ declare <2 x i64> @llvm.x86.aesni.aesenclast(<2 x i64>, <2 x i64>)
define <2 x i64> @test_aesimc(<2 x i64> %a0, <2 x i64> *%a1) {
; GENERIC-LABEL: test_aesimc:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: aesimc %xmm0, %xmm1 # sched: [12:2.00]
; GENERIC-NEXT: aesimc (%rdi), %xmm0 # sched: [18:2.00]
; GENERIC-NEXT: por %xmm1, %xmm0 # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_aesimc:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: aesimc %xmm0, %xmm1 # sched: [8:5.00]
; SLM-NEXT: aesimc (%rdi), %xmm0 # sched: [8:5.00]
; SLM-NEXT: por %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_aesimc:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vaesimc %xmm0, %xmm0 # sched: [12:2.00]
; SANDY-NEXT: vaesimc (%rdi), %xmm1 # sched: [18:2.00]
; SANDY-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_aesimc:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vaesimc %xmm0, %xmm0 # sched: [14:2.00]
; HASWELL-NEXT: vaesimc (%rdi), %xmm1 # sched: [14:2.00]
; HASWELL-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_aesimc:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vaesimc %xmm0, %xmm0 # sched: [14:2.00]
; BROADWELL-NEXT: vaesimc (%rdi), %xmm1 # sched: [19:2.00]
; BROADWELL-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_aesimc:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vaesimc %xmm0, %xmm0 # sched: [8:2.00]
; SKYLAKE-NEXT: vaesimc (%rdi), %xmm1 # sched: [14:2.00]
; SKYLAKE-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_aesimc:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vaesimc (%rdi), %xmm1 # sched: [7:1.00]
; BTVER2-NEXT: vaesimc %xmm0, %xmm0 # sched: [2:1.00]
; BTVER2-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_aesimc:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vaesimc (%rdi), %xmm1 # sched: [11:0.50]
; ZNVER1-NEXT: vaesimc %xmm0, %xmm0 # sched: [4:0.50]
; ZNVER1-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
@@ -296,56 +296,56 @@ declare <2 x i64> @llvm.x86.aesni.aesimc(<2 x i64>)
define <2 x i64> @test_aeskeygenassist(<2 x i64> %a0, <2 x i64> *%a1) {
; GENERIC-LABEL: test_aeskeygenassist:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: aeskeygenassist $7, %xmm0, %xmm1 # sched: [8:3.67]
; GENERIC-NEXT: aeskeygenassist $7, (%rdi), %xmm0 # sched: [8:3.33]
; GENERIC-NEXT: por %xmm1, %xmm0 # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_aeskeygenassist:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: aeskeygenassist $7, %xmm0, %xmm1 # sched: [8:5.00]
; SLM-NEXT: aeskeygenassist $7, (%rdi), %xmm0 # sched: [8:5.00]
; SLM-NEXT: por %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_aeskeygenassist:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vaeskeygenassist $7, %xmm0, %xmm0 # sched: [8:3.67]
; SANDY-NEXT: vaeskeygenassist $7, (%rdi), %xmm1 # sched: [8:3.33]
; SANDY-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_aeskeygenassist:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vaeskeygenassist $7, %xmm0, %xmm0 # sched: [29:7.00]
; HASWELL-NEXT: vaeskeygenassist $7, (%rdi), %xmm1 # sched: [28:7.00]
; HASWELL-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_aeskeygenassist:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vaeskeygenassist $7, %xmm0, %xmm0 # sched: [29:7.00]
; BROADWELL-NEXT: vaeskeygenassist $7, (%rdi), %xmm1 # sched: [33:7.00]
; BROADWELL-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_aeskeygenassist:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vaeskeygenassist $7, %xmm0, %xmm0 # sched: [20:6.00]
; SKYLAKE-NEXT: vaeskeygenassist $7, (%rdi), %xmm1 # sched: [25:6.00]
; SKYLAKE-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_aeskeygenassist:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vaeskeygenassist $7, (%rdi), %xmm1 # sched: [7:1.00]
; BTVER2-NEXT: vaeskeygenassist $7, %xmm0, %xmm0 # sched: [2:1.00]
; BTVER2-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_aeskeygenassist:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vaeskeygenassist $7, (%rdi), %xmm1 # sched: [11:0.50]
; ZNVER1-NEXT: vaeskeygenassist $7, %xmm0, %xmm0 # sched: [4:0.50]
; ZNVER1-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
diff --git a/test/CodeGen/X86/aes_intrinsics.ll b/test/CodeGen/X86/aes_intrinsics.ll
index ac31fd832ec..442feca3fc1 100644
--- a/test/CodeGen/X86/aes_intrinsics.ll
+++ b/test/CodeGen/X86/aes_intrinsics.ll
@@ -6,12 +6,12 @@
define <2 x i64> @test_x86_aesni_aesdec(<2 x i64> %a0, <2 x i64> %a1) {
; SSE-LABEL: test_x86_aesni_aesdec:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: aesdec %xmm1, %xmm0 # encoding: [0x66,0x0f,0x38,0xde,0xc1]
; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; AVX-LABEL: test_x86_aesni_aesdec:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaesdec %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0xde,0xc1]
; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.aesni.aesdec(<2 x i64> %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1]
@@ -22,12 +22,12 @@ declare <2 x i64> @llvm.x86.aesni.aesdec(<2 x i64>, <2 x i64>) nounwind readnone
define <2 x i64> @test_x86_aesni_aesdeclast(<2 x i64> %a0, <2 x i64> %a1) {
; SSE-LABEL: test_x86_aesni_aesdeclast:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: aesdeclast %xmm1, %xmm0 # encoding: [0x66,0x0f,0x38,0xdf,0xc1]
; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; AVX-LABEL: test_x86_aesni_aesdeclast:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaesdeclast %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0xdf,0xc1]
; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.aesni.aesdeclast(<2 x i64> %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1]
@@ -38,12 +38,12 @@ declare <2 x i64> @llvm.x86.aesni.aesdeclast(<2 x i64>, <2 x i64>) nounwind read
define <2 x i64> @test_x86_aesni_aesenc(<2 x i64> %a0, <2 x i64> %a1) {
; SSE-LABEL: test_x86_aesni_aesenc:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: aesenc %xmm1, %xmm0 # encoding: [0x66,0x0f,0x38,0xdc,0xc1]
; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; AVX-LABEL: test_x86_aesni_aesenc:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaesenc %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0xdc,0xc1]
; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.aesni.aesenc(<2 x i64> %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1]
@@ -54,12 +54,12 @@ declare <2 x i64> @llvm.x86.aesni.aesenc(<2 x i64>, <2 x i64>) nounwind readnone
define <2 x i64> @test_x86_aesni_aesenclast(<2 x i64> %a0, <2 x i64> %a1) {
; SSE-LABEL: test_x86_aesni_aesenclast:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: aesenclast %xmm1, %xmm0 # encoding: [0x66,0x0f,0x38,0xdd,0xc1]
; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; AVX-LABEL: test_x86_aesni_aesenclast:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaesenclast %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0xdd,0xc1]
; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.aesni.aesenclast(<2 x i64> %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1]
@@ -70,12 +70,12 @@ declare <2 x i64> @llvm.x86.aesni.aesenclast(<2 x i64>, <2 x i64>) nounwind read
define <2 x i64> @test_x86_aesni_aesimc(<2 x i64> %a0) {
; SSE-LABEL: test_x86_aesni_aesimc:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: aesimc %xmm0, %xmm0 # encoding: [0x66,0x0f,0x38,0xdb,0xc0]
; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; AVX-LABEL: test_x86_aesni_aesimc:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaesimc %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0xdb,0xc0]
; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.aesni.aesimc(<2 x i64> %a0) ; <<2 x i64>> [#uses=1]
@@ -86,12 +86,12 @@ declare <2 x i64> @llvm.x86.aesni.aesimc(<2 x i64>) nounwind readnone
define <2 x i64> @test_x86_aesni_aeskeygenassist(<2 x i64> %a0) {
; SSE-LABEL: test_x86_aesni_aeskeygenassist:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: aeskeygenassist $7, %xmm0, %xmm0 # encoding: [0x66,0x0f,0x3a,0xdf,0xc0,0x07]
; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; AVX-LABEL: test_x86_aesni_aeskeygenassist:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaeskeygenassist $7, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0xdf,0xc0,0x07]
; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.aesni.aeskeygenassist(<2 x i64> %a0, i8 7) ; <<2 x i64>> [#uses=1]
diff --git a/test/CodeGen/X86/all-ones-vector.ll b/test/CodeGen/X86/all-ones-vector.ll
index f5d41ae6db2..d64b3d7e29b 100644
--- a/test/CodeGen/X86/all-ones-vector.ll
+++ b/test/CodeGen/X86/all-ones-vector.ll
@@ -12,22 +12,22 @@
define <16 x i8> @allones_v16i8() nounwind {
; X32-SSE-LABEL: allones_v16i8:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: pcmpeqd %xmm0, %xmm0
; X32-SSE-NEXT: retl
;
; X32-AVX-LABEL: allones_v16i8:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; X32-AVX-NEXT: retl
;
; X64-SSE-LABEL: allones_v16i8:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: pcmpeqd %xmm0, %xmm0
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: allones_v16i8:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; X64-AVX-NEXT: retq
ret <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
@@ -35,22 +35,22 @@ define <16 x i8> @allones_v16i8() nounwind {
define <8 x i16> @allones_v8i16() nounwind {
; X32-SSE-LABEL: allones_v8i16:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: pcmpeqd %xmm0, %xmm0
; X32-SSE-NEXT: retl
;
; X32-AVX-LABEL: allones_v8i16:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; X32-AVX-NEXT: retl
;
; X64-SSE-LABEL: allones_v8i16:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: pcmpeqd %xmm0, %xmm0
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: allones_v8i16:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; X64-AVX-NEXT: retq
ret <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
@@ -58,22 +58,22 @@ define <8 x i16> @allones_v8i16() nounwind {
define <4 x i32> @allones_v4i32() nounwind {
; X32-SSE-LABEL: allones_v4i32:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: pcmpeqd %xmm0, %xmm0
; X32-SSE-NEXT: retl
;
; X32-AVX-LABEL: allones_v4i32:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; X32-AVX-NEXT: retl
;
; X64-SSE-LABEL: allones_v4i32:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: pcmpeqd %xmm0, %xmm0
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: allones_v4i32:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; X64-AVX-NEXT: retq
ret <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>
@@ -81,22 +81,22 @@ define <4 x i32> @allones_v4i32() nounwind {
define <2 x i64> @allones_v2i64() nounwind {
; X32-SSE-LABEL: allones_v2i64:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: pcmpeqd %xmm0, %xmm0
; X32-SSE-NEXT: retl
;
; X32-AVX-LABEL: allones_v2i64:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; X32-AVX-NEXT: retl
;
; X64-SSE-LABEL: allones_v2i64:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: pcmpeqd %xmm0, %xmm0
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: allones_v2i64:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; X64-AVX-NEXT: retq
ret <2 x i64> <i64 -1, i64 -1>
@@ -104,22 +104,22 @@ define <2 x i64> @allones_v2i64() nounwind {
define <2 x double> @allones_v2f64() nounwind {
; X32-SSE-LABEL: allones_v2f64:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: pcmpeqd %xmm0, %xmm0
; X32-SSE-NEXT: retl
;
; X32-AVX-LABEL: allones_v2f64:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; X32-AVX-NEXT: retl
;
; X64-SSE-LABEL: allones_v2f64:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: pcmpeqd %xmm0, %xmm0
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: allones_v2f64:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; X64-AVX-NEXT: retq
ret <2 x double> <double 0xffffffffffffffff, double 0xffffffffffffffff>
@@ -127,22 +127,22 @@ define <2 x double> @allones_v2f64() nounwind {
define <4 x float> @allones_v4f32() nounwind {
; X32-SSE-LABEL: allones_v4f32:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: pcmpeqd %xmm0, %xmm0
; X32-SSE-NEXT: retl
;
; X32-AVX-LABEL: allones_v4f32:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; X32-AVX-NEXT: retl
;
; X64-SSE-LABEL: allones_v4f32:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: pcmpeqd %xmm0, %xmm0
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: allones_v4f32:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; X64-AVX-NEXT: retq
ret <4 x float> <float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000>
@@ -150,36 +150,36 @@ define <4 x float> @allones_v4f32() nounwind {
define <32 x i8> @allones_v32i8() nounwind {
; X32-SSE-LABEL: allones_v32i8:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: pcmpeqd %xmm0, %xmm0
; X32-SSE-NEXT: pcmpeqd %xmm1, %xmm1
; X32-SSE-NEXT: retl
;
; X32-AVX1-LABEL: allones_v32i8:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X32-AVX1-NEXT: retl
;
; X32-AVX256-LABEL: allones_v32i8:
-; X32-AVX256: # BB#0:
+; X32-AVX256: # %bb.0:
; X32-AVX256-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X32-AVX256-NEXT: retl
;
; X64-SSE-LABEL: allones_v32i8:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: pcmpeqd %xmm0, %xmm0
; X64-SSE-NEXT: pcmpeqd %xmm1, %xmm1
; X64-SSE-NEXT: retq
;
; X64-AVX1-LABEL: allones_v32i8:
-; X64-AVX1: # BB#0:
+; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X64-AVX1-NEXT: retq
;
; X64-AVX256-LABEL: allones_v32i8:
-; X64-AVX256: # BB#0:
+; X64-AVX256: # %bb.0:
; X64-AVX256-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X64-AVX256-NEXT: retq
ret <32 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
@@ -187,36 +187,36 @@ define <32 x i8> @allones_v32i8() nounwind {
define <16 x i16> @allones_v16i16() nounwind {
; X32-SSE-LABEL: allones_v16i16:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: pcmpeqd %xmm0, %xmm0
; X32-SSE-NEXT: pcmpeqd %xmm1, %xmm1
; X32-SSE-NEXT: retl
;
; X32-AVX1-LABEL: allones_v16i16:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X32-AVX1-NEXT: retl
;
; X32-AVX256-LABEL: allones_v16i16:
-; X32-AVX256: # BB#0:
+; X32-AVX256: # %bb.0:
; X32-AVX256-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X32-AVX256-NEXT: retl
;
; X64-SSE-LABEL: allones_v16i16:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: pcmpeqd %xmm0, %xmm0
; X64-SSE-NEXT: pcmpeqd %xmm1, %xmm1
; X64-SSE-NEXT: retq
;
; X64-AVX1-LABEL: allones_v16i16:
-; X64-AVX1: # BB#0:
+; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X64-AVX1-NEXT: retq
;
; X64-AVX256-LABEL: allones_v16i16:
-; X64-AVX256: # BB#0:
+; X64-AVX256: # %bb.0:
; X64-AVX256-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X64-AVX256-NEXT: retq
ret <16 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
@@ -224,36 +224,36 @@ define <16 x i16> @allones_v16i16() nounwind {
define <8 x i32> @allones_v8i32() nounwind {
; X32-SSE-LABEL: allones_v8i32:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: pcmpeqd %xmm0, %xmm0
; X32-SSE-NEXT: pcmpeqd %xmm1, %xmm1
; X32-SSE-NEXT: retl
;
; X32-AVX1-LABEL: allones_v8i32:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X32-AVX1-NEXT: retl
;
; X32-AVX256-LABEL: allones_v8i32:
-; X32-AVX256: # BB#0:
+; X32-AVX256: # %bb.0:
; X32-AVX256-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X32-AVX256-NEXT: retl
;
; X64-SSE-LABEL: allones_v8i32:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: pcmpeqd %xmm0, %xmm0
; X64-SSE-NEXT: pcmpeqd %xmm1, %xmm1
; X64-SSE-NEXT: retq
;
; X64-AVX1-LABEL: allones_v8i32:
-; X64-AVX1: # BB#0:
+; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X64-AVX1-NEXT: retq
;
; X64-AVX256-LABEL: allones_v8i32:
-; X64-AVX256: # BB#0:
+; X64-AVX256: # %bb.0:
; X64-AVX256-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X64-AVX256-NEXT: retq
ret <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
@@ -261,36 +261,36 @@ define <8 x i32> @allones_v8i32() nounwind {
define <4 x i64> @allones_v4i64() nounwind {
; X32-SSE-LABEL: allones_v4i64:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: pcmpeqd %xmm0, %xmm0
; X32-SSE-NEXT: pcmpeqd %xmm1, %xmm1
; X32-SSE-NEXT: retl
;
; X32-AVX1-LABEL: allones_v4i64:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X32-AVX1-NEXT: retl
;
; X32-AVX256-LABEL: allones_v4i64:
-; X32-AVX256: # BB#0:
+; X32-AVX256: # %bb.0:
; X32-AVX256-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X32-AVX256-NEXT: retl
;
; X64-SSE-LABEL: allones_v4i64:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: pcmpeqd %xmm0, %xmm0
; X64-SSE-NEXT: pcmpeqd %xmm1, %xmm1
; X64-SSE-NEXT: retq
;
; X64-AVX1-LABEL: allones_v4i64:
-; X64-AVX1: # BB#0:
+; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X64-AVX1-NEXT: retq
;
; X64-AVX256-LABEL: allones_v4i64:
-; X64-AVX256: # BB#0:
+; X64-AVX256: # %bb.0:
; X64-AVX256-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X64-AVX256-NEXT: retq
ret <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>
@@ -298,36 +298,36 @@ define <4 x i64> @allones_v4i64() nounwind {
define <4 x double> @allones_v4f64() nounwind {
; X32-SSE-LABEL: allones_v4f64:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: pcmpeqd %xmm0, %xmm0
; X32-SSE-NEXT: pcmpeqd %xmm1, %xmm1
; X32-SSE-NEXT: retl
;
; X32-AVX1-LABEL: allones_v4f64:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X32-AVX1-NEXT: retl
;
; X32-AVX256-LABEL: allones_v4f64:
-; X32-AVX256: # BB#0:
+; X32-AVX256: # %bb.0:
; X32-AVX256-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X32-AVX256-NEXT: retl
;
; X64-SSE-LABEL: allones_v4f64:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: pcmpeqd %xmm0, %xmm0
; X64-SSE-NEXT: pcmpeqd %xmm1, %xmm1
; X64-SSE-NEXT: retq
;
; X64-AVX1-LABEL: allones_v4f64:
-; X64-AVX1: # BB#0:
+; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X64-AVX1-NEXT: retq
;
; X64-AVX256-LABEL: allones_v4f64:
-; X64-AVX256: # BB#0:
+; X64-AVX256: # %bb.0:
; X64-AVX256-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X64-AVX256-NEXT: retq
ret <4 x double> <double 0xffffffffffffffff, double 0xffffffffffffffff, double 0xffffffffffffffff, double 0xffffffffffffffff>
@@ -335,36 +335,36 @@ define <4 x double> @allones_v4f64() nounwind {
define <4 x double> @allones_v4f64_optsize() nounwind optsize {
; X32-SSE-LABEL: allones_v4f64_optsize:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: pcmpeqd %xmm0, %xmm0
; X32-SSE-NEXT: pcmpeqd %xmm1, %xmm1
; X32-SSE-NEXT: retl
;
; X32-AVX1-LABEL: allones_v4f64_optsize:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X32-AVX1-NEXT: retl
;
; X32-AVX256-LABEL: allones_v4f64_optsize:
-; X32-AVX256: # BB#0:
+; X32-AVX256: # %bb.0:
; X32-AVX256-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X32-AVX256-NEXT: retl
;
; X64-SSE-LABEL: allones_v4f64_optsize:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: pcmpeqd %xmm0, %xmm0
; X64-SSE-NEXT: pcmpeqd %xmm1, %xmm1
; X64-SSE-NEXT: retq
;
; X64-AVX1-LABEL: allones_v4f64_optsize:
-; X64-AVX1: # BB#0:
+; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X64-AVX1-NEXT: retq
;
; X64-AVX256-LABEL: allones_v4f64_optsize:
-; X64-AVX256: # BB#0:
+; X64-AVX256: # %bb.0:
; X64-AVX256-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X64-AVX256-NEXT: retq
ret <4 x double> <double 0xffffffffffffffff, double 0xffffffffffffffff, double 0xffffffffffffffff, double 0xffffffffffffffff>
@@ -372,36 +372,36 @@ define <4 x double> @allones_v4f64_optsize() nounwind optsize {
define <8 x float> @allones_v8f32() nounwind {
; X32-SSE-LABEL: allones_v8f32:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: pcmpeqd %xmm0, %xmm0
; X32-SSE-NEXT: pcmpeqd %xmm1, %xmm1
; X32-SSE-NEXT: retl
;
; X32-AVX1-LABEL: allones_v8f32:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X32-AVX1-NEXT: retl
;
; X32-AVX256-LABEL: allones_v8f32:
-; X32-AVX256: # BB#0:
+; X32-AVX256: # %bb.0:
; X32-AVX256-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X32-AVX256-NEXT: retl
;
; X64-SSE-LABEL: allones_v8f32:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: pcmpeqd %xmm0, %xmm0
; X64-SSE-NEXT: pcmpeqd %xmm1, %xmm1
; X64-SSE-NEXT: retq
;
; X64-AVX1-LABEL: allones_v8f32:
-; X64-AVX1: # BB#0:
+; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X64-AVX1-NEXT: retq
;
; X64-AVX256-LABEL: allones_v8f32:
-; X64-AVX256: # BB#0:
+; X64-AVX256: # %bb.0:
; X64-AVX256-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X64-AVX256-NEXT: retq
ret <8 x float> <float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000>
@@ -409,36 +409,36 @@ define <8 x float> @allones_v8f32() nounwind {
define <8 x float> @allones_v8f32_optsize() nounwind optsize {
; X32-SSE-LABEL: allones_v8f32_optsize:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: pcmpeqd %xmm0, %xmm0
; X32-SSE-NEXT: pcmpeqd %xmm1, %xmm1
; X32-SSE-NEXT: retl
;
; X32-AVX1-LABEL: allones_v8f32_optsize:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X32-AVX1-NEXT: retl
;
; X32-AVX256-LABEL: allones_v8f32_optsize:
-; X32-AVX256: # BB#0:
+; X32-AVX256: # %bb.0:
; X32-AVX256-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X32-AVX256-NEXT: retl
;
; X64-SSE-LABEL: allones_v8f32_optsize:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: pcmpeqd %xmm0, %xmm0
; X64-SSE-NEXT: pcmpeqd %xmm1, %xmm1
; X64-SSE-NEXT: retq
;
; X64-AVX1-LABEL: allones_v8f32_optsize:
-; X64-AVX1: # BB#0:
+; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X64-AVX1-NEXT: retq
;
; X64-AVX256-LABEL: allones_v8f32_optsize:
-; X64-AVX256: # BB#0:
+; X64-AVX256: # %bb.0:
; X64-AVX256-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X64-AVX256-NEXT: retq
ret <8 x float> <float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000>
@@ -446,7 +446,7 @@ define <8 x float> @allones_v8f32_optsize() nounwind optsize {
define <64 x i8> @allones_v64i8() nounwind {
; X32-SSE-LABEL: allones_v64i8:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: pcmpeqd %xmm0, %xmm0
; X32-SSE-NEXT: pcmpeqd %xmm1, %xmm1
; X32-SSE-NEXT: pcmpeqd %xmm2, %xmm2
@@ -454,31 +454,31 @@ define <64 x i8> @allones_v64i8() nounwind {
; X32-SSE-NEXT: retl
;
; X32-AVX1-LABEL: allones_v64i8:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X32-AVX1-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: allones_v64i8:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X32-AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; X32-AVX2-NEXT: retl
;
; X32-KNL-LABEL: allones_v64i8:
-; X32-KNL: # BB#0:
+; X32-KNL: # %bb.0:
; X32-KNL-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X32-KNL-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; X32-KNL-NEXT: retl
;
; X32-SKX-LABEL: allones_v64i8:
-; X32-SKX: # BB#0:
+; X32-SKX: # %bb.0:
; X32-SKX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
; X32-SKX-NEXT: retl
;
; X64-SSE-LABEL: allones_v64i8:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: pcmpeqd %xmm0, %xmm0
; X64-SSE-NEXT: pcmpeqd %xmm1, %xmm1
; X64-SSE-NEXT: pcmpeqd %xmm2, %xmm2
@@ -486,26 +486,26 @@ define <64 x i8> @allones_v64i8() nounwind {
; X64-SSE-NEXT: retq
;
; X64-AVX1-LABEL: allones_v64i8:
-; X64-AVX1: # BB#0:
+; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X64-AVX1-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: allones_v64i8:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X64-AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; X64-AVX2-NEXT: retq
;
; X64-KNL-LABEL: allones_v64i8:
-; X64-KNL: # BB#0:
+; X64-KNL: # %bb.0:
; X64-KNL-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X64-KNL-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; X64-KNL-NEXT: retq
;
; X64-SKX-LABEL: allones_v64i8:
-; X64-SKX: # BB#0:
+; X64-SKX: # %bb.0:
; X64-SKX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
; X64-SKX-NEXT: retq
ret <64 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
@@ -513,7 +513,7 @@ define <64 x i8> @allones_v64i8() nounwind {
define <32 x i16> @allones_v32i16() nounwind {
; X32-SSE-LABEL: allones_v32i16:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: pcmpeqd %xmm0, %xmm0
; X32-SSE-NEXT: pcmpeqd %xmm1, %xmm1
; X32-SSE-NEXT: pcmpeqd %xmm2, %xmm2
@@ -521,31 +521,31 @@ define <32 x i16> @allones_v32i16() nounwind {
; X32-SSE-NEXT: retl
;
; X32-AVX1-LABEL: allones_v32i16:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X32-AVX1-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: allones_v32i16:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X32-AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; X32-AVX2-NEXT: retl
;
; X32-KNL-LABEL: allones_v32i16:
-; X32-KNL: # BB#0:
+; X32-KNL: # %bb.0:
; X32-KNL-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X32-KNL-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; X32-KNL-NEXT: retl
;
; X32-SKX-LABEL: allones_v32i16:
-; X32-SKX: # BB#0:
+; X32-SKX: # %bb.0:
; X32-SKX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
; X32-SKX-NEXT: retl
;
; X64-SSE-LABEL: allones_v32i16:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: pcmpeqd %xmm0, %xmm0
; X64-SSE-NEXT: pcmpeqd %xmm1, %xmm1
; X64-SSE-NEXT: pcmpeqd %xmm2, %xmm2
@@ -553,26 +553,26 @@ define <32 x i16> @allones_v32i16() nounwind {
; X64-SSE-NEXT: retq
;
; X64-AVX1-LABEL: allones_v32i16:
-; X64-AVX1: # BB#0:
+; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X64-AVX1-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: allones_v32i16:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X64-AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; X64-AVX2-NEXT: retq
;
; X64-KNL-LABEL: allones_v32i16:
-; X64-KNL: # BB#0:
+; X64-KNL: # %bb.0:
; X64-KNL-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X64-KNL-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; X64-KNL-NEXT: retq
;
; X64-SKX-LABEL: allones_v32i16:
-; X64-SKX: # BB#0:
+; X64-SKX: # %bb.0:
; X64-SKX-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
; X64-SKX-NEXT: retq
ret <32 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
@@ -580,7 +580,7 @@ define <32 x i16> @allones_v32i16() nounwind {
define <16 x i32> @allones_v16i32() nounwind {
; X32-SSE-LABEL: allones_v16i32:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: pcmpeqd %xmm0, %xmm0
; X32-SSE-NEXT: pcmpeqd %xmm1, %xmm1
; X32-SSE-NEXT: pcmpeqd %xmm2, %xmm2
@@ -588,25 +588,25 @@ define <16 x i32> @allones_v16i32() nounwind {
; X32-SSE-NEXT: retl
;
; X32-AVX1-LABEL: allones_v16i32:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X32-AVX1-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: allones_v16i32:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X32-AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; X32-AVX2-NEXT: retl
;
; X32-AVX512-LABEL: allones_v16i32:
-; X32-AVX512: # BB#0:
+; X32-AVX512: # %bb.0:
; X32-AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
; X32-AVX512-NEXT: retl
;
; X64-SSE-LABEL: allones_v16i32:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: pcmpeqd %xmm0, %xmm0
; X64-SSE-NEXT: pcmpeqd %xmm1, %xmm1
; X64-SSE-NEXT: pcmpeqd %xmm2, %xmm2
@@ -614,20 +614,20 @@ define <16 x i32> @allones_v16i32() nounwind {
; X64-SSE-NEXT: retq
;
; X64-AVX1-LABEL: allones_v16i32:
-; X64-AVX1: # BB#0:
+; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X64-AVX1-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: allones_v16i32:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X64-AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: allones_v16i32:
-; X64-AVX512: # BB#0:
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
; X64-AVX512-NEXT: retq
ret <16 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
@@ -635,7 +635,7 @@ define <16 x i32> @allones_v16i32() nounwind {
define <8 x i64> @allones_v8i64() nounwind {
; X32-SSE-LABEL: allones_v8i64:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: pcmpeqd %xmm0, %xmm0
; X32-SSE-NEXT: pcmpeqd %xmm1, %xmm1
; X32-SSE-NEXT: pcmpeqd %xmm2, %xmm2
@@ -643,25 +643,25 @@ define <8 x i64> @allones_v8i64() nounwind {
; X32-SSE-NEXT: retl
;
; X32-AVX1-LABEL: allones_v8i64:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X32-AVX1-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: allones_v8i64:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X32-AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; X32-AVX2-NEXT: retl
;
; X32-AVX512-LABEL: allones_v8i64:
-; X32-AVX512: # BB#0:
+; X32-AVX512: # %bb.0:
; X32-AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
; X32-AVX512-NEXT: retl
;
; X64-SSE-LABEL: allones_v8i64:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: pcmpeqd %xmm0, %xmm0
; X64-SSE-NEXT: pcmpeqd %xmm1, %xmm1
; X64-SSE-NEXT: pcmpeqd %xmm2, %xmm2
@@ -669,20 +669,20 @@ define <8 x i64> @allones_v8i64() nounwind {
; X64-SSE-NEXT: retq
;
; X64-AVX1-LABEL: allones_v8i64:
-; X64-AVX1: # BB#0:
+; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X64-AVX1-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: allones_v8i64:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X64-AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: allones_v8i64:
-; X64-AVX512: # BB#0:
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
; X64-AVX512-NEXT: retq
ret <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>
@@ -690,7 +690,7 @@ define <8 x i64> @allones_v8i64() nounwind {
define <8 x double> @allones_v8f64() nounwind {
; X32-SSE-LABEL: allones_v8f64:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: pcmpeqd %xmm0, %xmm0
; X32-SSE-NEXT: pcmpeqd %xmm1, %xmm1
; X32-SSE-NEXT: pcmpeqd %xmm2, %xmm2
@@ -698,25 +698,25 @@ define <8 x double> @allones_v8f64() nounwind {
; X32-SSE-NEXT: retl
;
; X32-AVX1-LABEL: allones_v8f64:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X32-AVX1-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: allones_v8f64:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X32-AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; X32-AVX2-NEXT: retl
;
; X32-AVX512-LABEL: allones_v8f64:
-; X32-AVX512: # BB#0:
+; X32-AVX512: # %bb.0:
; X32-AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
; X32-AVX512-NEXT: retl
;
; X64-SSE-LABEL: allones_v8f64:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: pcmpeqd %xmm0, %xmm0
; X64-SSE-NEXT: pcmpeqd %xmm1, %xmm1
; X64-SSE-NEXT: pcmpeqd %xmm2, %xmm2
@@ -724,20 +724,20 @@ define <8 x double> @allones_v8f64() nounwind {
; X64-SSE-NEXT: retq
;
; X64-AVX1-LABEL: allones_v8f64:
-; X64-AVX1: # BB#0:
+; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X64-AVX1-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: allones_v8f64:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X64-AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: allones_v8f64:
-; X64-AVX512: # BB#0:
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
; X64-AVX512-NEXT: retq
ret <8 x double> <double 0xffffffffffffffff, double 0xffffffffffffffff, double 0xffffffffffffffff, double 0xffffffffffffffff, double 0xffffffffffffffff, double 0xffffffffffffffff, double 0xffffffffffffffff, double 0xffffffffffffffff>
@@ -745,7 +745,7 @@ define <8 x double> @allones_v8f64() nounwind {
define <16 x float> @allones_v16f32() nounwind {
; X32-SSE-LABEL: allones_v16f32:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: pcmpeqd %xmm0, %xmm0
; X32-SSE-NEXT: pcmpeqd %xmm1, %xmm1
; X32-SSE-NEXT: pcmpeqd %xmm2, %xmm2
@@ -753,25 +753,25 @@ define <16 x float> @allones_v16f32() nounwind {
; X32-SSE-NEXT: retl
;
; X32-AVX1-LABEL: allones_v16f32:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X32-AVX1-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: allones_v16f32:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X32-AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; X32-AVX2-NEXT: retl
;
; X32-AVX512-LABEL: allones_v16f32:
-; X32-AVX512: # BB#0:
+; X32-AVX512: # %bb.0:
; X32-AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
; X32-AVX512-NEXT: retl
;
; X64-SSE-LABEL: allones_v16f32:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: pcmpeqd %xmm0, %xmm0
; X64-SSE-NEXT: pcmpeqd %xmm1, %xmm1
; X64-SSE-NEXT: pcmpeqd %xmm2, %xmm2
@@ -779,20 +779,20 @@ define <16 x float> @allones_v16f32() nounwind {
; X64-SSE-NEXT: retq
;
; X64-AVX1-LABEL: allones_v16f32:
-; X64-AVX1: # BB#0:
+; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-AVX1-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; X64-AVX1-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: allones_v16f32:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X64-AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: allones_v16f32:
-; X64-AVX512: # BB#0:
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
; X64-AVX512-NEXT: retq
ret <16 x float> <float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000>
diff --git a/test/CodeGen/X86/and-sink.ll b/test/CodeGen/X86/and-sink.ll
index 0f877e778c7..6d23d6cfb70 100644
--- a/test/CodeGen/X86/and-sink.ll
+++ b/test/CodeGen/X86/and-sink.ll
@@ -9,15 +9,15 @@
; Test that 'and' is sunk into bb0.
define i32 @and_sink1(i32 %a, i1 %c) {
; CHECK-LABEL: and_sink1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: testb $1, {{[0-9]+}}(%esp)
; CHECK-NEXT: je .LBB0_3
-; CHECK-NEXT: # BB#1: # %bb0
+; CHECK-NEXT: # %bb.1: # %bb0
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl $0, A
; CHECK-NEXT: testb $4, %al
; CHECK-NEXT: jne .LBB0_3
-; CHECK-NEXT: # BB#2: # %bb1
+; CHECK-NEXT: # %bb.2: # %bb1
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: retl
; CHECK-NEXT: .LBB0_3: # %bb2
@@ -46,11 +46,11 @@ bb2:
; Test that both 'and' and cmp get sunk to bb1.
define i32 @and_sink2(i32 %a, i1 %c, i1 %c2) {
; CHECK-LABEL: and_sink2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl $0, A
; CHECK-NEXT: testb $1, {{[0-9]+}}(%esp)
; CHECK-NEXT: je .LBB1_5
-; CHECK-NEXT: # BB#1: # %bb0.preheader
+; CHECK-NEXT: # %bb.1: # %bb0.preheader
; CHECK-NEXT: movb {{[0-9]+}}(%esp), %al
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: .p2align 4, 0x90
@@ -59,12 +59,12 @@ define i32 @and_sink2(i32 %a, i1 %c, i1 %c2) {
; CHECK-NEXT: movl $0, B
; CHECK-NEXT: testb $1, %al
; CHECK-NEXT: je .LBB1_5
-; CHECK-NEXT: # BB#3: # %bb1
+; CHECK-NEXT: # %bb.3: # %bb1
; CHECK-NEXT: # in Loop: Header=BB1_2 Depth=1
; CHECK-NEXT: movl $0, C
; CHECK-NEXT: testb $4, %cl
; CHECK-NEXT: jne .LBB1_2
-; CHECK-NEXT: # BB#4: # %bb2
+; CHECK-NEXT: # %bb.4: # %bb2
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: retl
; CHECK-NEXT: .LBB1_5: # %bb3
@@ -100,10 +100,10 @@ bb3:
; Test that CodeGenPrepare doesn't get stuck in a loop sinking and hoisting a masked load.
define i32 @and_sink3(i1 %c, i32* %p) {
; CHECK-LABEL: and_sink3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: testb $1, {{[0-9]+}}(%esp)
; CHECK-NEXT: je .LBB2_3
-; CHECK-NEXT: # BB#1: # %bb0
+; CHECK-NEXT: # %bb.1: # %bb0
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movzbl (%eax), %eax
; CHECK-NEXT: testl %eax, %eax
@@ -138,16 +138,16 @@ bb2:
; Test that CodeGenPrepare sinks/duplicates non-immediate 'and'.
define i32 @and_sink4(i32 %a, i32 %b, i1 %c) {
; CHECK-LABEL: and_sink4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: testb $1, {{[0-9]+}}(%esp)
; CHECK-NEXT: je .LBB3_4
-; CHECK-NEXT: # BB#1: # %bb0
+; CHECK-NEXT: # %bb.1: # %bb0
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: testl %eax, %ecx
; CHECK-NEXT: movl $0, A
; CHECK-NEXT: jne .LBB3_4
-; CHECK-NEXT: # BB#2: # %bb1
+; CHECK-NEXT: # %bb.2: # %bb1
; CHECK-NEXT: leal (%ecx,%eax), %edx
; CHECK-NEXT: testl %eax, %ecx
; CHECK-NEXT: movl %edx, B
@@ -189,15 +189,15 @@ bb3:
; when it would increase register pressure.
define i32 @and_sink5(i32 %a, i32 %b, i32 %a2, i32 %b2, i1 %c) {
; CHECK-LABEL: and_sink5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: testb $1, {{[0-9]+}}(%esp)
; CHECK-NEXT: je .LBB4_4
-; CHECK-NEXT: # BB#1: # %bb0
+; CHECK-NEXT: # %bb.1: # %bb0
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: andl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl $0, A
; CHECK-NEXT: jne .LBB4_4
-; CHECK-NEXT: # BB#2: # %bb1
+; CHECK-NEXT: # %bb.2: # %bb1
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: addl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: testl %eax, %eax
diff --git a/test/CodeGen/X86/anyext.ll b/test/CodeGen/X86/anyext.ll
index 00adcd625cb..4baea69af9c 100644
--- a/test/CodeGen/X86/anyext.ll
+++ b/test/CodeGen/X86/anyext.ll
@@ -6,7 +6,7 @@
define i32 @foo(i32 %p, i8 zeroext %x) nounwind {
; X32-LABEL: foo:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
; X32-NEXT: divb {{[0-9]+}}(%esp)
@@ -15,7 +15,7 @@ define i32 @foo(i32 %p, i8 zeroext %x) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: foo:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
; X64-NEXT: divb %sil
@@ -31,7 +31,7 @@ define i32 @foo(i32 %p, i8 zeroext %x) nounwind {
define i32 @bar(i32 %p, i16 zeroext %x) nounwind {
; X32-LABEL: bar:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: xorl %edx, %edx
; X32-NEXT: divw {{[0-9]+}}(%esp)
@@ -40,7 +40,7 @@ define i32 @bar(i32 %p, i16 zeroext %x) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: bar:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %edx, %edx
; X64-NEXT: movl %edi, %eax
; X64-NEXT: divw %si
diff --git a/test/CodeGen/X86/atom-fixup-lea2.ll b/test/CodeGen/X86/atom-fixup-lea2.ll
index 68b376ea5cc..9b0b472be0f 100644
--- a/test/CodeGen/X86/atom-fixup-lea2.ll
+++ b/test/CodeGen/X86/atom-fixup-lea2.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -mcpu=atom -mtriple=i686-linux | FileCheck %s
; RUN: llc < %s -mcpu=goldmont -mtriple=i686-linux | FileCheck %s
-; CHECK:BB#5
+; CHECK:%bb.5
; CHECK-NEXT:leal
; CHECK-NEXT:leal
; CHECK-NEXT:leal
diff --git a/test/CodeGen/X86/atomic-eflags-reuse.ll b/test/CodeGen/X86/atomic-eflags-reuse.ll
index 260680eebf5..df4b00ddbe8 100644
--- a/test/CodeGen/X86/atomic-eflags-reuse.ll
+++ b/test/CodeGen/X86/atomic-eflags-reuse.ll
@@ -4,14 +4,14 @@
define i32 @test_add_1_cmov_slt(i64* %p, i32 %a0, i32 %a1) #0 {
; FASTINCDEC-LABEL: test_add_1_cmov_slt:
-; FASTINCDEC: # BB#0: # %entry
+; FASTINCDEC: # %bb.0: # %entry
; FASTINCDEC-NEXT: lock incq (%rdi)
; FASTINCDEC-NEXT: cmovgl %edx, %esi
; FASTINCDEC-NEXT: movl %esi, %eax
; FASTINCDEC-NEXT: retq
;
; SLOWINCDEC-LABEL: test_add_1_cmov_slt:
-; SLOWINCDEC: # BB#0: # %entry
+; SLOWINCDEC: # %bb.0: # %entry
; SLOWINCDEC-NEXT: lock addq $1, (%rdi)
; SLOWINCDEC-NEXT: cmovgl %edx, %esi
; SLOWINCDEC-NEXT: movl %esi, %eax
@@ -25,14 +25,14 @@ entry:
define i32 @test_add_1_cmov_sge(i64* %p, i32 %a0, i32 %a1) #0 {
; FASTINCDEC-LABEL: test_add_1_cmov_sge:
-; FASTINCDEC: # BB#0: # %entry
+; FASTINCDEC: # %bb.0: # %entry
; FASTINCDEC-NEXT: lock incq (%rdi)
; FASTINCDEC-NEXT: cmovlel %edx, %esi
; FASTINCDEC-NEXT: movl %esi, %eax
; FASTINCDEC-NEXT: retq
;
; SLOWINCDEC-LABEL: test_add_1_cmov_sge:
-; SLOWINCDEC: # BB#0: # %entry
+; SLOWINCDEC: # %bb.0: # %entry
; SLOWINCDEC-NEXT: lock addq $1, (%rdi)
; SLOWINCDEC-NEXT: cmovlel %edx, %esi
; SLOWINCDEC-NEXT: movl %esi, %eax
@@ -46,14 +46,14 @@ entry:
define i32 @test_sub_1_cmov_sle(i64* %p, i32 %a0, i32 %a1) #0 {
; FASTINCDEC-LABEL: test_sub_1_cmov_sle:
-; FASTINCDEC: # BB#0: # %entry
+; FASTINCDEC: # %bb.0: # %entry
; FASTINCDEC-NEXT: lock decq (%rdi)
; FASTINCDEC-NEXT: cmovgel %edx, %esi
; FASTINCDEC-NEXT: movl %esi, %eax
; FASTINCDEC-NEXT: retq
;
; SLOWINCDEC-LABEL: test_sub_1_cmov_sle:
-; SLOWINCDEC: # BB#0: # %entry
+; SLOWINCDEC: # %bb.0: # %entry
; SLOWINCDEC-NEXT: lock addq $-1, (%rdi)
; SLOWINCDEC-NEXT: cmovgel %edx, %esi
; SLOWINCDEC-NEXT: movl %esi, %eax
@@ -67,14 +67,14 @@ entry:
define i32 @test_sub_1_cmov_sgt(i64* %p, i32 %a0, i32 %a1) #0 {
; FASTINCDEC-LABEL: test_sub_1_cmov_sgt:
-; FASTINCDEC: # BB#0: # %entry
+; FASTINCDEC: # %bb.0: # %entry
; FASTINCDEC-NEXT: lock decq (%rdi)
; FASTINCDEC-NEXT: cmovll %edx, %esi
; FASTINCDEC-NEXT: movl %esi, %eax
; FASTINCDEC-NEXT: retq
;
; SLOWINCDEC-LABEL: test_sub_1_cmov_sgt:
-; SLOWINCDEC: # BB#0: # %entry
+; SLOWINCDEC: # %bb.0: # %entry
; SLOWINCDEC-NEXT: lock addq $-1, (%rdi)
; SLOWINCDEC-NEXT: cmovll %edx, %esi
; SLOWINCDEC-NEXT: movl %esi, %eax
@@ -89,7 +89,7 @@ entry:
; FIXME: (setcc slt x, 0) gets combined into shr early.
define i8 @test_add_1_setcc_slt(i64* %p) #0 {
; CHECK-LABEL: test_add_1_setcc_slt:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: lock xaddq %rax, (%rdi)
; CHECK-NEXT: shrq $63, %rax
@@ -104,13 +104,13 @@ entry:
define i8 @test_sub_1_setcc_sgt(i64* %p) #0 {
; FASTINCDEC-LABEL: test_sub_1_setcc_sgt:
-; FASTINCDEC: # BB#0: # %entry
+; FASTINCDEC: # %bb.0: # %entry
; FASTINCDEC-NEXT: lock decq (%rdi)
; FASTINCDEC-NEXT: setge %al
; FASTINCDEC-NEXT: retq
;
; SLOWINCDEC-LABEL: test_sub_1_setcc_sgt:
-; SLOWINCDEC: # BB#0: # %entry
+; SLOWINCDEC: # %bb.0: # %entry
; SLOWINCDEC-NEXT: lock addq $-1, (%rdi)
; SLOWINCDEC-NEXT: setge %al
; SLOWINCDEC-NEXT: retq
@@ -123,10 +123,10 @@ entry:
define i32 @test_add_1_brcond_sge(i64* %p, i32 %a0, i32 %a1) #0 {
; FASTINCDEC-LABEL: test_add_1_brcond_sge:
-; FASTINCDEC: # BB#0: # %entry
+; FASTINCDEC: # %bb.0: # %entry
; FASTINCDEC-NEXT: lock incq (%rdi)
; FASTINCDEC-NEXT: jle .LBB6_2
-; FASTINCDEC-NEXT: # BB#1: # %t
+; FASTINCDEC-NEXT: # %bb.1: # %t
; FASTINCDEC-NEXT: movl %esi, %eax
; FASTINCDEC-NEXT: retq
; FASTINCDEC-NEXT: .LBB6_2: # %f
@@ -134,10 +134,10 @@ define i32 @test_add_1_brcond_sge(i64* %p, i32 %a0, i32 %a1) #0 {
; FASTINCDEC-NEXT: retq
;
; SLOWINCDEC-LABEL: test_add_1_brcond_sge:
-; SLOWINCDEC: # BB#0: # %entry
+; SLOWINCDEC: # %bb.0: # %entry
; SLOWINCDEC-NEXT: lock addq $1, (%rdi)
; SLOWINCDEC-NEXT: jle .LBB6_2
-; SLOWINCDEC-NEXT: # BB#1: # %t
+; SLOWINCDEC-NEXT: # %bb.1: # %t
; SLOWINCDEC-NEXT: movl %esi, %eax
; SLOWINCDEC-NEXT: retq
; SLOWINCDEC-NEXT: .LBB6_2: # %f
@@ -158,7 +158,7 @@ f:
define i32 @test_add_1_cmov_sle(i64* %p, i32 %a0, i32 %a1) #0 {
; CHECK-LABEL: test_add_1_cmov_sle:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: lock xaddq %rax, (%rdi)
; CHECK-NEXT: testq %rax, %rax
@@ -174,7 +174,7 @@ entry:
define i32 @test_add_1_cmov_sgt(i64* %p, i32 %a0, i32 %a1) #0 {
; CHECK-LABEL: test_add_1_cmov_sgt:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: lock xaddq %rax, (%rdi)
; CHECK-NEXT: testq %rax, %rax
@@ -192,7 +192,7 @@ entry:
define i8 @test_add_1_setcc_sgt_reuse(i64* %p, i64* %p2) #0 {
; CHECK-LABEL: test_add_1_setcc_sgt_reuse:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl $1, %ecx
; CHECK-NEXT: lock xaddq %rcx, (%rdi)
; CHECK-NEXT: testq %rcx, %rcx
@@ -209,7 +209,7 @@ entry:
define i8 @test_sub_2_setcc_sgt(i64* %p) #0 {
; CHECK-LABEL: test_sub_2_setcc_sgt:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movq $-2, %rax
; CHECK-NEXT: lock xaddq %rax, (%rdi)
; CHECK-NEXT: testq %rax, %rax
@@ -225,7 +225,7 @@ entry:
define i8 @test_add_1_cmov_cmov(i64* %p, i8* %q) #0 {
; TODO: It's possible to use "lock inc" here, but both cmovs need to be updated.
; CHECK-LABEL: test_add_1_cmov_cmov:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: lock xaddq %rax, (%rdi)
; CHECK-NEXT: testq %rax, %rax
@@ -240,13 +240,13 @@ entry:
define i8 @test_sub_1_cmp_1_setcc_eq(i64* %p) #0 {
; FASTINCDEC-LABEL: test_sub_1_cmp_1_setcc_eq:
-; FASTINCDEC: # BB#0: # %entry
+; FASTINCDEC: # %bb.0: # %entry
; FASTINCDEC-NEXT: lock decq (%rdi)
; FASTINCDEC-NEXT: sete %al
; FASTINCDEC-NEXT: retq
;
; SLOWINCDEC-LABEL: test_sub_1_cmp_1_setcc_eq:
-; SLOWINCDEC: # BB#0: # %entry
+; SLOWINCDEC: # %bb.0: # %entry
; SLOWINCDEC-NEXT: lock subq $1, (%rdi)
; SLOWINCDEC-NEXT: sete %al
; SLOWINCDEC-NEXT: retq
@@ -259,13 +259,13 @@ entry:
define i8 @test_sub_1_cmp_1_setcc_ne(i64* %p) #0 {
; FASTINCDEC-LABEL: test_sub_1_cmp_1_setcc_ne:
-; FASTINCDEC: # BB#0: # %entry
+; FASTINCDEC: # %bb.0: # %entry
; FASTINCDEC-NEXT: lock decq (%rdi)
; FASTINCDEC-NEXT: setne %al
; FASTINCDEC-NEXT: retq
;
; SLOWINCDEC-LABEL: test_sub_1_cmp_1_setcc_ne:
-; SLOWINCDEC: # BB#0: # %entry
+; SLOWINCDEC: # %bb.0: # %entry
; SLOWINCDEC-NEXT: lock subq $1, (%rdi)
; SLOWINCDEC-NEXT: setne %al
; SLOWINCDEC-NEXT: retq
@@ -278,7 +278,7 @@ entry:
define i8 @test_sub_1_cmp_1_setcc_ugt(i64* %p) #0 {
; CHECK-LABEL: test_sub_1_cmp_1_setcc_ugt:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lock subq $1, (%rdi)
; CHECK-NEXT: seta %al
; CHECK-NEXT: retq
@@ -293,7 +293,7 @@ entry:
; comparison can be folded into the atomic subtract.
define i8 @test_sub_1_cmp_1_setcc_sle(i64* %p) #0 {
; CHECK-LABEL: test_sub_1_cmp_1_setcc_sle:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movq $-1, %rax
; CHECK-NEXT: lock xaddq %rax, (%rdi)
; CHECK-NEXT: cmpq $2, %rax
@@ -308,7 +308,7 @@ entry:
define i8 @test_sub_3_cmp_3_setcc_eq(i64* %p) #0 {
; CHECK-LABEL: test_sub_3_cmp_3_setcc_eq:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lock subq $3, (%rdi)
; CHECK-NEXT: sete %al
; CHECK-NEXT: retq
@@ -323,7 +323,7 @@ entry:
; comparison can be folded into the atomic subtract.
define i8 @test_sub_3_cmp_3_setcc_uge(i64* %p) #0 {
; CHECK-LABEL: test_sub_3_cmp_3_setcc_uge:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movq $-3, %rax
; CHECK-NEXT: lock xaddq %rax, (%rdi)
; CHECK-NEXT: cmpq $2, %rax
diff --git a/test/CodeGen/X86/atomic-minmax-i6432.ll b/test/CodeGen/X86/atomic-minmax-i6432.ll
index 9a1b8d38cbe..fec740f591f 100644
--- a/test/CodeGen/X86/atomic-minmax-i6432.ll
+++ b/test/CodeGen/X86/atomic-minmax-i6432.ll
@@ -6,7 +6,7 @@
define i64 @atomic_max_i64() nounwind {
; LINUX-LABEL: atomic_max_i64:
-; LINUX: # BB#0: # %entry
+; LINUX: # %bb.0: # %entry
; LINUX-NEXT: pushl %ebx
; LINUX-NEXT: pushl %esi
; LINUX-NEXT: movl sc64+4, %edx
@@ -24,13 +24,13 @@ define i64 @atomic_max_i64() nounwind {
; LINUX-NEXT: cmovll %eax, %ebx
; LINUX-NEXT: lock cmpxchg8b sc64
; LINUX-NEXT: jne .LBB0_1
-; LINUX-NEXT: # BB#2: # %atomicrmw.end
+; LINUX-NEXT: # %bb.2: # %atomicrmw.end
; LINUX-NEXT: popl %esi
; LINUX-NEXT: popl %ebx
; LINUX-NEXT: retl
;
; PIC-LABEL: atomic_max_i64:
-; PIC: ## BB#0: ## %entry
+; PIC: ## %bb.0: ## %entry
; PIC-NEXT: pushl %ebx
; PIC-NEXT: pushl %edi
; PIC-NEXT: pushl %esi
@@ -53,7 +53,7 @@ define i64 @atomic_max_i64() nounwind {
; PIC-NEXT: cmovll %eax, %ebx
; PIC-NEXT: lock cmpxchg8b (%esi)
; PIC-NEXT: jne LBB0_1
-; PIC-NEXT: ## BB#2: ## %atomicrmw.end
+; PIC-NEXT: ## %bb.2: ## %atomicrmw.end
; PIC-NEXT: popl %esi
; PIC-NEXT: popl %edi
; PIC-NEXT: popl %ebx
@@ -66,7 +66,7 @@ entry:
define i64 @atomic_min_i64() nounwind {
; LINUX-LABEL: atomic_min_i64:
-; LINUX: # BB#0: # %entry
+; LINUX: # %bb.0: # %entry
; LINUX-NEXT: pushl %ebx
; LINUX-NEXT: movl sc64+4, %edx
; LINUX-NEXT: movl sc64, %eax
@@ -82,12 +82,12 @@ define i64 @atomic_min_i64() nounwind {
; LINUX-NEXT: cmovll %eax, %ebx
; LINUX-NEXT: lock cmpxchg8b sc64
; LINUX-NEXT: jne .LBB1_1
-; LINUX-NEXT: # BB#2: # %atomicrmw.end
+; LINUX-NEXT: # %bb.2: # %atomicrmw.end
; LINUX-NEXT: popl %ebx
; LINUX-NEXT: retl
;
; PIC-LABEL: atomic_min_i64:
-; PIC: ## BB#0: ## %entry
+; PIC: ## %bb.0: ## %entry
; PIC-NEXT: pushl %ebx
; PIC-NEXT: pushl %esi
; PIC-NEXT: calll L1$pb
@@ -108,7 +108,7 @@ define i64 @atomic_min_i64() nounwind {
; PIC-NEXT: cmovll %eax, %ebx
; PIC-NEXT: lock cmpxchg8b (%esi)
; PIC-NEXT: jne LBB1_1
-; PIC-NEXT: ## BB#2: ## %atomicrmw.end
+; PIC-NEXT: ## %bb.2: ## %atomicrmw.end
; PIC-NEXT: popl %esi
; PIC-NEXT: popl %ebx
; PIC-NEXT: retl
@@ -120,7 +120,7 @@ entry:
define i64 @atomic_umax_i64() nounwind {
; LINUX-LABEL: atomic_umax_i64:
-; LINUX: # BB#0: # %entry
+; LINUX: # %bb.0: # %entry
; LINUX-NEXT: pushl %ebx
; LINUX-NEXT: pushl %esi
; LINUX-NEXT: movl sc64+4, %edx
@@ -138,13 +138,13 @@ define i64 @atomic_umax_i64() nounwind {
; LINUX-NEXT: cmovbl %eax, %ebx
; LINUX-NEXT: lock cmpxchg8b sc64
; LINUX-NEXT: jne .LBB2_1
-; LINUX-NEXT: # BB#2: # %atomicrmw.end
+; LINUX-NEXT: # %bb.2: # %atomicrmw.end
; LINUX-NEXT: popl %esi
; LINUX-NEXT: popl %ebx
; LINUX-NEXT: retl
;
; PIC-LABEL: atomic_umax_i64:
-; PIC: ## BB#0: ## %entry
+; PIC: ## %bb.0: ## %entry
; PIC-NEXT: pushl %ebx
; PIC-NEXT: pushl %edi
; PIC-NEXT: pushl %esi
@@ -167,7 +167,7 @@ define i64 @atomic_umax_i64() nounwind {
; PIC-NEXT: cmovbl %eax, %ebx
; PIC-NEXT: lock cmpxchg8b (%esi)
; PIC-NEXT: jne LBB2_1
-; PIC-NEXT: ## BB#2: ## %atomicrmw.end
+; PIC-NEXT: ## %bb.2: ## %atomicrmw.end
; PIC-NEXT: popl %esi
; PIC-NEXT: popl %edi
; PIC-NEXT: popl %ebx
@@ -180,7 +180,7 @@ entry:
define i64 @atomic_umin_i64() nounwind {
; LINUX-LABEL: atomic_umin_i64:
-; LINUX: # BB#0: # %entry
+; LINUX: # %bb.0: # %entry
; LINUX-NEXT: pushl %ebx
; LINUX-NEXT: movl sc64+4, %edx
; LINUX-NEXT: movl sc64, %eax
@@ -196,12 +196,12 @@ define i64 @atomic_umin_i64() nounwind {
; LINUX-NEXT: cmovbl %eax, %ebx
; LINUX-NEXT: lock cmpxchg8b sc64
; LINUX-NEXT: jne .LBB3_1
-; LINUX-NEXT: # BB#2: # %atomicrmw.end
+; LINUX-NEXT: # %bb.2: # %atomicrmw.end
; LINUX-NEXT: popl %ebx
; LINUX-NEXT: retl
;
; PIC-LABEL: atomic_umin_i64:
-; PIC: ## BB#0: ## %entry
+; PIC: ## %bb.0: ## %entry
; PIC-NEXT: pushl %ebx
; PIC-NEXT: pushl %esi
; PIC-NEXT: calll L3$pb
@@ -222,7 +222,7 @@ define i64 @atomic_umin_i64() nounwind {
; PIC-NEXT: cmovbl %eax, %ebx
; PIC-NEXT: lock cmpxchg8b (%esi)
; PIC-NEXT: jne LBB3_1
-; PIC-NEXT: ## BB#2: ## %atomicrmw.end
+; PIC-NEXT: ## %bb.2: ## %atomicrmw.end
; PIC-NEXT: popl %esi
; PIC-NEXT: popl %ebx
; PIC-NEXT: retl
@@ -236,7 +236,7 @@ entry:
define void @tf_bug(i8* %ptr) nounwind {
; LINUX-LABEL: tf_bug:
-; LINUX: # BB#0: # %entry
+; LINUX: # %bb.0: # %entry
; LINUX-NEXT: pushl %ebx
; LINUX-NEXT: pushl %esi
; LINUX-NEXT: movl {{[0-9]+}}(%esp), %esi
@@ -251,7 +251,7 @@ define void @tf_bug(i8* %ptr) nounwind {
; LINUX-NEXT: adcl $0, %ecx
; LINUX-NEXT: lock cmpxchg8b id
; LINUX-NEXT: jne .LBB4_1
-; LINUX-NEXT: # BB#2: # %atomicrmw.end
+; LINUX-NEXT: # %bb.2: # %atomicrmw.end
; LINUX-NEXT: addl $1, %eax
; LINUX-NEXT: adcl $0, %edx
; LINUX-NEXT: movl %eax, (%esi)
@@ -261,7 +261,7 @@ define void @tf_bug(i8* %ptr) nounwind {
; LINUX-NEXT: retl
;
; PIC-LABEL: tf_bug:
-; PIC: ## BB#0: ## %entry
+; PIC: ## %bb.0: ## %entry
; PIC-NEXT: pushl %ebx
; PIC-NEXT: pushl %edi
; PIC-NEXT: pushl %esi
@@ -280,7 +280,7 @@ define void @tf_bug(i8* %ptr) nounwind {
; PIC-NEXT: adcl $0, %ecx
; PIC-NEXT: lock cmpxchg8b _id-L4$pb(%edi)
; PIC-NEXT: jne LBB4_1
-; PIC-NEXT: ## BB#2: ## %atomicrmw.end
+; PIC-NEXT: ## %bb.2: ## %atomicrmw.end
; PIC-NEXT: addl $1, %eax
; PIC-NEXT: adcl $0, %edx
; PIC-NEXT: movl %eax, (%esi)
diff --git a/test/CodeGen/X86/atomic128.ll b/test/CodeGen/X86/atomic128.ll
index 4ce0f89841c..896b6d25cf3 100644
--- a/test/CodeGen/X86/atomic128.ll
+++ b/test/CodeGen/X86/atomic128.ll
@@ -8,7 +8,7 @@
; register live-ranges, we end up with a useless copy.
define i128 @val_compare_and_swap(i128* %p, i128 %oldval, i128 %newval) {
; CHECK-LABEL: val_compare_and_swap:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pushq %rbx
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset %rbx, -16
@@ -26,7 +26,7 @@ define i128 @val_compare_and_swap(i128* %p, i128 %oldval, i128 %newval) {
define void @fetch_and_nand(i128* %p, i128 %bits) {
; CHECK-LABEL: fetch_and_nand:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pushq %rbx
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset %rbx, -16
@@ -44,7 +44,7 @@ define void @fetch_and_nand(i128* %p, i128 %bits) {
; CHECK-NEXT: notq %rcx
; CHECK-NEXT: lock cmpxchg16b (%rdi)
; CHECK-NEXT: jne LBB1_1
-; CHECK-NEXT: ## BB#2: ## %atomicrmw.end
+; CHECK-NEXT: ## %bb.2: ## %atomicrmw.end
; CHECK-NEXT: movq %rax, {{.*}}(%rip)
; CHECK-NEXT: movq %rdx, _var+{{.*}}(%rip)
; CHECK-NEXT: popq %rbx
@@ -56,7 +56,7 @@ define void @fetch_and_nand(i128* %p, i128 %bits) {
define void @fetch_and_or(i128* %p, i128 %bits) {
; CHECK-LABEL: fetch_and_or:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pushq %rbx
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset %rbx, -16
@@ -72,7 +72,7 @@ define void @fetch_and_or(i128* %p, i128 %bits) {
; CHECK-NEXT: orq %r8, %rcx
; CHECK-NEXT: lock cmpxchg16b (%rdi)
; CHECK-NEXT: jne LBB2_1
-; CHECK-NEXT: ## BB#2: ## %atomicrmw.end
+; CHECK-NEXT: ## %bb.2: ## %atomicrmw.end
; CHECK-NEXT: movq %rax, {{.*}}(%rip)
; CHECK-NEXT: movq %rdx, _var+{{.*}}(%rip)
; CHECK-NEXT: popq %rbx
@@ -84,7 +84,7 @@ define void @fetch_and_or(i128* %p, i128 %bits) {
define void @fetch_and_add(i128* %p, i128 %bits) {
; CHECK-LABEL: fetch_and_add:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pushq %rbx
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset %rbx, -16
@@ -100,7 +100,7 @@ define void @fetch_and_add(i128* %p, i128 %bits) {
; CHECK-NEXT: adcq %r8, %rcx
; CHECK-NEXT: lock cmpxchg16b (%rdi)
; CHECK-NEXT: jne LBB3_1
-; CHECK-NEXT: ## BB#2: ## %atomicrmw.end
+; CHECK-NEXT: ## %bb.2: ## %atomicrmw.end
; CHECK-NEXT: movq %rax, {{.*}}(%rip)
; CHECK-NEXT: movq %rdx, _var+{{.*}}(%rip)
; CHECK-NEXT: popq %rbx
@@ -112,7 +112,7 @@ define void @fetch_and_add(i128* %p, i128 %bits) {
define void @fetch_and_sub(i128* %p, i128 %bits) {
; CHECK-LABEL: fetch_and_sub:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pushq %rbx
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset %rbx, -16
@@ -128,7 +128,7 @@ define void @fetch_and_sub(i128* %p, i128 %bits) {
; CHECK-NEXT: sbbq %r8, %rcx
; CHECK-NEXT: lock cmpxchg16b (%rdi)
; CHECK-NEXT: jne LBB4_1
-; CHECK-NEXT: ## BB#2: ## %atomicrmw.end
+; CHECK-NEXT: ## %bb.2: ## %atomicrmw.end
; CHECK-NEXT: movq %rax, {{.*}}(%rip)
; CHECK-NEXT: movq %rdx, _var+{{.*}}(%rip)
; CHECK-NEXT: popq %rbx
@@ -140,7 +140,7 @@ define void @fetch_and_sub(i128* %p, i128 %bits) {
define void @fetch_and_min(i128* %p, i128 %bits) {
; CHECK-LABEL: fetch_and_min:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pushq %rbx
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset %rbx, -16
@@ -159,7 +159,7 @@ define void @fetch_and_min(i128* %p, i128 %bits) {
; CHECK-NEXT: cmovgeq %rax, %rbx
; CHECK-NEXT: lock cmpxchg16b (%rdi)
; CHECK-NEXT: jne LBB5_1
-; CHECK-NEXT: ## BB#2: ## %atomicrmw.end
+; CHECK-NEXT: ## %bb.2: ## %atomicrmw.end
; CHECK-NEXT: movq %rax, {{.*}}(%rip)
; CHECK-NEXT: movq %rdx, _var+{{.*}}(%rip)
; CHECK-NEXT: popq %rbx
@@ -171,7 +171,7 @@ define void @fetch_and_min(i128* %p, i128 %bits) {
define void @fetch_and_max(i128* %p, i128 %bits) {
; CHECK-LABEL: fetch_and_max:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pushq %rbx
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset %rbx, -16
@@ -190,7 +190,7 @@ define void @fetch_and_max(i128* %p, i128 %bits) {
; CHECK-NEXT: cmovgeq %rax, %rbx
; CHECK-NEXT: lock cmpxchg16b (%rdi)
; CHECK-NEXT: jne LBB6_1
-; CHECK-NEXT: ## BB#2: ## %atomicrmw.end
+; CHECK-NEXT: ## %bb.2: ## %atomicrmw.end
; CHECK-NEXT: movq %rax, {{.*}}(%rip)
; CHECK-NEXT: movq %rdx, _var+{{.*}}(%rip)
; CHECK-NEXT: popq %rbx
@@ -202,7 +202,7 @@ define void @fetch_and_max(i128* %p, i128 %bits) {
define void @fetch_and_umin(i128* %p, i128 %bits) {
; CHECK-LABEL: fetch_and_umin:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pushq %rbx
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset %rbx, -16
@@ -221,7 +221,7 @@ define void @fetch_and_umin(i128* %p, i128 %bits) {
; CHECK-NEXT: cmovaeq %rax, %rbx
; CHECK-NEXT: lock cmpxchg16b (%rdi)
; CHECK-NEXT: jne LBB7_1
-; CHECK-NEXT: ## BB#2: ## %atomicrmw.end
+; CHECK-NEXT: ## %bb.2: ## %atomicrmw.end
; CHECK-NEXT: movq %rax, {{.*}}(%rip)
; CHECK-NEXT: movq %rdx, _var+{{.*}}(%rip)
; CHECK-NEXT: popq %rbx
@@ -233,7 +233,7 @@ define void @fetch_and_umin(i128* %p, i128 %bits) {
define void @fetch_and_umax(i128* %p, i128 %bits) {
; CHECK-LABEL: fetch_and_umax:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pushq %rbx
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset %rbx, -16
@@ -252,7 +252,7 @@ define void @fetch_and_umax(i128* %p, i128 %bits) {
; CHECK-NEXT: cmovbq %rax, %rbx
; CHECK-NEXT: lock cmpxchg16b (%rdi)
; CHECK-NEXT: jne LBB8_1
-; CHECK-NEXT: ## BB#2: ## %atomicrmw.end
+; CHECK-NEXT: ## %bb.2: ## %atomicrmw.end
; CHECK-NEXT: movq %rax, {{.*}}(%rip)
; CHECK-NEXT: movq %rdx, _var+{{.*}}(%rip)
; CHECK-NEXT: popq %rbx
@@ -264,7 +264,7 @@ define void @fetch_and_umax(i128* %p, i128 %bits) {
define i128 @atomic_load_seq_cst(i128* %p) {
; CHECK-LABEL: atomic_load_seq_cst:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pushq %rbx
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset %rbx, -16
@@ -281,7 +281,7 @@ define i128 @atomic_load_seq_cst(i128* %p) {
define i128 @atomic_load_relaxed(i128* %p) {
; CHECK-LABEL: atomic_load_relaxed:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pushq %rbx
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset %rbx, -16
@@ -298,7 +298,7 @@ define i128 @atomic_load_relaxed(i128* %p) {
define void @atomic_store_seq_cst(i128* %p, i128 %in) {
; CHECK-LABEL: atomic_store_seq_cst:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pushq %rbx
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset %rbx, -16
@@ -311,7 +311,7 @@ define void @atomic_store_seq_cst(i128* %p, i128 %in) {
; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
; CHECK-NEXT: lock cmpxchg16b (%rdi)
; CHECK-NEXT: jne LBB11_1
-; CHECK-NEXT: ## BB#2: ## %atomicrmw.end
+; CHECK-NEXT: ## %bb.2: ## %atomicrmw.end
; CHECK-NEXT: popq %rbx
; CHECK-NEXT: retq
store atomic i128 %in, i128* %p seq_cst, align 16
@@ -320,7 +320,7 @@ define void @atomic_store_seq_cst(i128* %p, i128 %in) {
define void @atomic_store_release(i128* %p, i128 %in) {
; CHECK-LABEL: atomic_store_release:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pushq %rbx
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset %rbx, -16
@@ -333,7 +333,7 @@ define void @atomic_store_release(i128* %p, i128 %in) {
; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
; CHECK-NEXT: lock cmpxchg16b (%rdi)
; CHECK-NEXT: jne LBB12_1
-; CHECK-NEXT: ## BB#2: ## %atomicrmw.end
+; CHECK-NEXT: ## %bb.2: ## %atomicrmw.end
; CHECK-NEXT: popq %rbx
; CHECK-NEXT: retq
store atomic i128 %in, i128* %p release, align 16
@@ -342,7 +342,7 @@ define void @atomic_store_release(i128* %p, i128 %in) {
define void @atomic_store_relaxed(i128* %p, i128 %in) {
; CHECK-LABEL: atomic_store_relaxed:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pushq %rbx
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset %rbx, -16
@@ -355,7 +355,7 @@ define void @atomic_store_relaxed(i128* %p, i128 %in) {
; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
; CHECK-NEXT: lock cmpxchg16b (%rdi)
; CHECK-NEXT: jne LBB13_1
-; CHECK-NEXT: ## BB#2: ## %atomicrmw.end
+; CHECK-NEXT: ## %bb.2: ## %atomicrmw.end
; CHECK-NEXT: popq %rbx
; CHECK-NEXT: retq
store atomic i128 %in, i128* %p unordered, align 16
diff --git a/test/CodeGen/X86/avg-mask.ll b/test/CodeGen/X86/avg-mask.ll
index f5944949b68..4eacbdd4ccb 100644
--- a/test/CodeGen/X86/avg-mask.ll
+++ b/test/CodeGen/X86/avg-mask.ll
@@ -4,7 +4,7 @@
define <16 x i8> @avg_v16i8_mask(<16 x i8> %a, <16 x i8> %b, <16 x i8> %src, i16 %mask) nounwind {
; AVX512F-LABEL: avg_v16i8_mask:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpavgb %xmm1, %xmm0, %xmm0
; AVX512F-NEXT: kmovw %edi, %k1
; AVX512F-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
@@ -14,7 +14,7 @@ define <16 x i8> @avg_v16i8_mask(<16 x i8> %a, <16 x i8> %b, <16 x i8> %src, i16
; AVX512F-NEXT: retq
;
; AVX512BWVL-LABEL: avg_v16i8_mask:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: kmovd %edi, %k1
; AVX512BWVL-NEXT: vpavgb %xmm1, %xmm0, %xmm2 {%k1}
; AVX512BWVL-NEXT: vmovdqa %xmm2, %xmm0
@@ -32,7 +32,7 @@ define <16 x i8> @avg_v16i8_mask(<16 x i8> %a, <16 x i8> %b, <16 x i8> %src, i16
define <16 x i8> @avg_v16i8_maskz(<16 x i8> %a, <16 x i8> %b, i16 %mask) nounwind {
; AVX512F-LABEL: avg_v16i8_maskz:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpavgb %xmm1, %xmm0, %xmm0
; AVX512F-NEXT: kmovw %edi, %k1
; AVX512F-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
@@ -42,7 +42,7 @@ define <16 x i8> @avg_v16i8_maskz(<16 x i8> %a, <16 x i8> %b, i16 %mask) nounwin
; AVX512F-NEXT: retq
;
; AVX512BWVL-LABEL: avg_v16i8_maskz:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: kmovd %edi, %k1
; AVX512BWVL-NEXT: vpavgb %xmm1, %xmm0, %xmm0 {%k1} {z}
; AVX512BWVL-NEXT: retq
@@ -59,7 +59,7 @@ define <16 x i8> @avg_v16i8_maskz(<16 x i8> %a, <16 x i8> %b, i16 %mask) nounwin
define <32 x i8> @avg_v32i8_mask(<32 x i8> %a, <32 x i8> %b, <32 x i8> %src, i32 %mask) nounwind {
; AVX512F-LABEL: avg_v32i8_mask:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: pushq %rbp
; AVX512F-NEXT: movq %rsp, %rbp
; AVX512F-NEXT: andq $-32, %rsp
@@ -79,7 +79,7 @@ define <32 x i8> @avg_v32i8_mask(<32 x i8> %a, <32 x i8> %b, <32 x i8> %src, i32
; AVX512F-NEXT: retq
;
; AVX512BWVL-LABEL: avg_v32i8_mask:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: kmovd %edi, %k1
; AVX512BWVL-NEXT: vpavgb %ymm1, %ymm0, %ymm2 {%k1}
; AVX512BWVL-NEXT: vmovdqa %ymm2, %ymm0
@@ -97,7 +97,7 @@ define <32 x i8> @avg_v32i8_mask(<32 x i8> %a, <32 x i8> %b, <32 x i8> %src, i32
define <32 x i8> @avg_v32i8_maskz(<32 x i8> %a, <32 x i8> %b, i32 %mask) nounwind {
; AVX512F-LABEL: avg_v32i8_maskz:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: pushq %rbp
; AVX512F-NEXT: movq %rsp, %rbp
; AVX512F-NEXT: andq $-32, %rsp
@@ -117,7 +117,7 @@ define <32 x i8> @avg_v32i8_maskz(<32 x i8> %a, <32 x i8> %b, i32 %mask) nounwin
; AVX512F-NEXT: retq
;
; AVX512BWVL-LABEL: avg_v32i8_maskz:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: kmovd %edi, %k1
; AVX512BWVL-NEXT: vpavgb %ymm1, %ymm0, %ymm0 {%k1} {z}
; AVX512BWVL-NEXT: retq
@@ -134,7 +134,7 @@ define <32 x i8> @avg_v32i8_maskz(<32 x i8> %a, <32 x i8> %b, i32 %mask) nounwin
define <64 x i8> @avg_v64i8_mask(<64 x i8> %a, <64 x i8> %b, <64 x i8> %src, i64 %mask) nounwind {
; AVX512F-LABEL: avg_v64i8_mask:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: pushq %rbp
; AVX512F-NEXT: movq %rsp, %rbp
; AVX512F-NEXT: andq $-32, %rsp
@@ -174,7 +174,7 @@ define <64 x i8> @avg_v64i8_mask(<64 x i8> %a, <64 x i8> %b, <64 x i8> %src, i64
; AVX512F-NEXT: retq
;
; AVX512BWVL-LABEL: avg_v64i8_mask:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: kmovq %rdi, %k1
; AVX512BWVL-NEXT: vpavgb %zmm1, %zmm0, %zmm2 {%k1}
; AVX512BWVL-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -192,7 +192,7 @@ define <64 x i8> @avg_v64i8_mask(<64 x i8> %a, <64 x i8> %b, <64 x i8> %src, i64
define <64 x i8> @avg_v64i8_maskz(<64 x i8> %a, <64 x i8> %b, i64 %mask) nounwind {
; AVX512F-LABEL: avg_v64i8_maskz:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: pushq %rbp
; AVX512F-NEXT: movq %rsp, %rbp
; AVX512F-NEXT: andq $-32, %rsp
@@ -232,7 +232,7 @@ define <64 x i8> @avg_v64i8_maskz(<64 x i8> %a, <64 x i8> %b, i64 %mask) nounwin
; AVX512F-NEXT: retq
;
; AVX512BWVL-LABEL: avg_v64i8_maskz:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: kmovq %rdi, %k1
; AVX512BWVL-NEXT: vpavgb %zmm1, %zmm0, %zmm0 {%k1} {z}
; AVX512BWVL-NEXT: retq
@@ -249,7 +249,7 @@ define <64 x i8> @avg_v64i8_maskz(<64 x i8> %a, <64 x i8> %b, i64 %mask) nounwin
define <8 x i16> @avg_v8i16_mask(<8 x i16> %a, <8 x i16> %b, <8 x i16> %src, i8 %mask) nounwind {
; AVX512F-LABEL: avg_v8i16_mask:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpavgw %xmm1, %xmm0, %xmm0
; AVX512F-NEXT: kmovw %edi, %k1
; AVX512F-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
@@ -259,7 +259,7 @@ define <8 x i16> @avg_v8i16_mask(<8 x i16> %a, <8 x i16> %b, <8 x i16> %src, i8
; AVX512F-NEXT: retq
;
; AVX512BWVL-LABEL: avg_v8i16_mask:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: kmovd %edi, %k1
; AVX512BWVL-NEXT: vpavgw %xmm1, %xmm0, %xmm2 {%k1}
; AVX512BWVL-NEXT: vmovdqa %xmm2, %xmm0
@@ -277,7 +277,7 @@ define <8 x i16> @avg_v8i16_mask(<8 x i16> %a, <8 x i16> %b, <8 x i16> %src, i8
define <8 x i16> @avg_v8i16_maskz(<8 x i16> %a, <8 x i16> %b, i8 %mask) nounwind {
; AVX512F-LABEL: avg_v8i16_maskz:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpavgw %xmm1, %xmm0, %xmm0
; AVX512F-NEXT: kmovw %edi, %k1
; AVX512F-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
@@ -287,7 +287,7 @@ define <8 x i16> @avg_v8i16_maskz(<8 x i16> %a, <8 x i16> %b, i8 %mask) nounwind
; AVX512F-NEXT: retq
;
; AVX512BWVL-LABEL: avg_v8i16_maskz:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: kmovd %edi, %k1
; AVX512BWVL-NEXT: vpavgw %xmm1, %xmm0, %xmm0 {%k1} {z}
; AVX512BWVL-NEXT: retq
@@ -304,7 +304,7 @@ define <8 x i16> @avg_v8i16_maskz(<8 x i16> %a, <8 x i16> %b, i8 %mask) nounwind
define <16 x i16> @avg_v16i16_mask(<16 x i16> %a, <16 x i16> %b, <16 x i16> %src, i16 %mask) nounwind {
; AVX512F-LABEL: avg_v16i16_mask:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpavgw %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: kmovw %edi, %k1
; AVX512F-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
@@ -313,7 +313,7 @@ define <16 x i16> @avg_v16i16_mask(<16 x i16> %a, <16 x i16> %b, <16 x i16> %src
; AVX512F-NEXT: retq
;
; AVX512BWVL-LABEL: avg_v16i16_mask:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: kmovd %edi, %k1
; AVX512BWVL-NEXT: vpavgw %ymm1, %ymm0, %ymm2 {%k1}
; AVX512BWVL-NEXT: vmovdqa %ymm2, %ymm0
@@ -331,7 +331,7 @@ define <16 x i16> @avg_v16i16_mask(<16 x i16> %a, <16 x i16> %b, <16 x i16> %src
define <16 x i16> @avg_v16i16_maskz(<16 x i16> %a, <16 x i16> %b, i16 %mask) nounwind {
; AVX512F-LABEL: avg_v16i16_maskz:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpavgw %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: kmovw %edi, %k1
; AVX512F-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
@@ -340,7 +340,7 @@ define <16 x i16> @avg_v16i16_maskz(<16 x i16> %a, <16 x i16> %b, i16 %mask) nou
; AVX512F-NEXT: retq
;
; AVX512BWVL-LABEL: avg_v16i16_maskz:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: kmovd %edi, %k1
; AVX512BWVL-NEXT: vpavgw %ymm1, %ymm0, %ymm0 {%k1} {z}
; AVX512BWVL-NEXT: retq
@@ -357,7 +357,7 @@ define <16 x i16> @avg_v16i16_maskz(<16 x i16> %a, <16 x i16> %b, i16 %mask) nou
define <32 x i16> @avg_v32i16_mask(<32 x i16> %a, <32 x i16> %b, <32 x i16> %src, i32 %mask) nounwind {
; AVX512F-LABEL: avg_v32i16_mask:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: pushq %rbp
; AVX512F-NEXT: movq %rsp, %rbp
; AVX512F-NEXT: andq $-32, %rsp
@@ -384,7 +384,7 @@ define <32 x i16> @avg_v32i16_mask(<32 x i16> %a, <32 x i16> %b, <32 x i16> %src
; AVX512F-NEXT: retq
;
; AVX512BWVL-LABEL: avg_v32i16_mask:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: kmovd %edi, %k1
; AVX512BWVL-NEXT: vpavgw %zmm1, %zmm0, %zmm2 {%k1}
; AVX512BWVL-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -402,7 +402,7 @@ define <32 x i16> @avg_v32i16_mask(<32 x i16> %a, <32 x i16> %b, <32 x i16> %src
define <32 x i16> @avg_v32i16_maskz(<32 x i16> %a, <32 x i16> %b, i32 %mask) nounwind {
; AVX512F-LABEL: avg_v32i16_maskz:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: pushq %rbp
; AVX512F-NEXT: movq %rsp, %rbp
; AVX512F-NEXT: andq $-32, %rsp
@@ -429,7 +429,7 @@ define <32 x i16> @avg_v32i16_maskz(<32 x i16> %a, <32 x i16> %b, i32 %mask) nou
; AVX512F-NEXT: retq
;
; AVX512BWVL-LABEL: avg_v32i16_maskz:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: kmovd %edi, %k1
; AVX512BWVL-NEXT: vpavgw %zmm1, %zmm0, %zmm0 {%k1} {z}
; AVX512BWVL-NEXT: retq
diff --git a/test/CodeGen/X86/avg.ll b/test/CodeGen/X86/avg.ll
index 14494779f10..d1e26b787f4 100644
--- a/test/CodeGen/X86/avg.ll
+++ b/test/CodeGen/X86/avg.ll
@@ -7,7 +7,7 @@
define void @avg_v4i8(<4 x i8>* %a, <4 x i8>* %b) nounwind {
; SSE2-LABEL: avg_v4i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: pavgb %xmm0, %xmm1
@@ -15,7 +15,7 @@ define void @avg_v4i8(<4 x i8>* %a, <4 x i8>* %b) nounwind {
; SSE2-NEXT: retq
;
; AVX-LABEL: avg_v4i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX-NEXT: vpavgb %xmm0, %xmm1, %xmm0
@@ -35,7 +35,7 @@ define void @avg_v4i8(<4 x i8>* %a, <4 x i8>* %b) nounwind {
define void @avg_v8i8(<8 x i8>* %a, <8 x i8>* %b) nounwind {
; SSE2-LABEL: avg_v8i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSE2-NEXT: pavgb %xmm0, %xmm1
@@ -43,7 +43,7 @@ define void @avg_v8i8(<8 x i8>* %a, <8 x i8>* %b) nounwind {
; SSE2-NEXT: retq
;
; AVX-LABEL: avg_v8i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: vpavgb %xmm0, %xmm1, %xmm0
@@ -63,14 +63,14 @@ define void @avg_v8i8(<8 x i8>* %a, <8 x i8>* %b) nounwind {
define void @avg_v16i8(<16 x i8>* %a, <16 x i8>* %b) nounwind {
; SSE2-LABEL: avg_v16i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rsi), %xmm0
; SSE2-NEXT: pavgb (%rdi), %xmm0
; SSE2-NEXT: movdqu %xmm0, (%rax)
; SSE2-NEXT: retq
;
; AVX-LABEL: avg_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa (%rsi), %xmm0
; AVX-NEXT: vpavgb (%rdi), %xmm0, %xmm0
; AVX-NEXT: vmovdqu %xmm0, (%rax)
@@ -89,7 +89,7 @@ define void @avg_v16i8(<16 x i8>* %a, <16 x i8>* %b) nounwind {
define void @avg_v32i8(<32 x i8>* %a, <32 x i8>* %b) nounwind {
; SSE2-LABEL: avg_v32i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rdi), %xmm3
; SSE2-NEXT: movdqa 16(%rdi), %xmm8
; SSE2-NEXT: movdqa (%rsi), %xmm0
@@ -176,7 +176,7 @@ define void @avg_v32i8(<32 x i8>* %a, <32 x i8>* %b) nounwind {
; SSE2-NEXT: retq
;
; AVX1-LABEL: avg_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
@@ -239,7 +239,7 @@ define void @avg_v32i8(<32 x i8>* %a, <32 x i8>* %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: avg_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rsi), %ymm0
; AVX2-NEXT: vpavgb (%rdi), %ymm0, %ymm0
; AVX2-NEXT: vmovdqu %ymm0, (%rax)
@@ -247,7 +247,7 @@ define void @avg_v32i8(<32 x i8>* %a, <32 x i8>* %b) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: avg_v32i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovdqa (%rsi), %ymm0
; AVX512-NEXT: vpavgb (%rdi), %ymm0, %ymm0
; AVX512-NEXT: vmovdqu %ymm0, (%rax)
@@ -267,7 +267,7 @@ define void @avg_v32i8(<32 x i8>* %a, <32 x i8>* %b) nounwind {
define void @avg_v64i8(<64 x i8>* %a, <64 x i8>* %b) nounwind {
; SSE2-LABEL: avg_v64i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rdi), %xmm6
; SSE2-NEXT: movdqa 16(%rdi), %xmm2
; SSE2-NEXT: movdqa 32(%rdi), %xmm1
@@ -448,7 +448,7 @@ define void @avg_v64i8(<64 x i8>* %a, <64 x i8>* %b) nounwind {
; SSE2-NEXT: retq
;
; AVX1-LABEL: avg_v64i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: subq $24, %rsp
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
@@ -593,7 +593,7 @@ define void @avg_v64i8(<64 x i8>* %a, <64 x i8>* %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: avg_v64i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
@@ -673,7 +673,7 @@ define void @avg_v64i8(<64 x i8>* %a, <64 x i8>* %b) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: avg_v64i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
@@ -707,7 +707,7 @@ define void @avg_v64i8(<64 x i8>* %a, <64 x i8>* %b) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: avg_v64i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rsi), %zmm0
; AVX512BW-NEXT: vpavgb (%rdi), %zmm0, %zmm0
; AVX512BW-NEXT: vmovdqu32 %zmm0, (%rax)
@@ -727,7 +727,7 @@ define void @avg_v64i8(<64 x i8>* %a, <64 x i8>* %b) nounwind {
define void @avg_v4i16(<4 x i16>* %a, <4 x i16>* %b) nounwind {
; SSE2-LABEL: avg_v4i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSE2-NEXT: pavgw %xmm0, %xmm1
@@ -735,7 +735,7 @@ define void @avg_v4i16(<4 x i16>* %a, <4 x i16>* %b) nounwind {
; SSE2-NEXT: retq
;
; AVX-LABEL: avg_v4i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: vpavgw %xmm0, %xmm1, %xmm0
@@ -755,14 +755,14 @@ define void @avg_v4i16(<4 x i16>* %a, <4 x i16>* %b) nounwind {
define void @avg_v8i16(<8 x i16>* %a, <8 x i16>* %b) nounwind {
; SSE2-LABEL: avg_v8i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rsi), %xmm0
; SSE2-NEXT: pavgw (%rdi), %xmm0
; SSE2-NEXT: movdqu %xmm0, (%rax)
; SSE2-NEXT: retq
;
; AVX-LABEL: avg_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa (%rsi), %xmm0
; AVX-NEXT: vpavgw (%rdi), %xmm0, %xmm0
; AVX-NEXT: vmovdqu %xmm0, (%rax)
@@ -781,7 +781,7 @@ define void @avg_v8i16(<8 x i16>* %a, <8 x i16>* %b) nounwind {
define void @avg_v16i16(<16 x i16>* %a, <16 x i16>* %b) nounwind {
; SSE2-LABEL: avg_v16i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rdi), %xmm2
; SSE2-NEXT: movdqa 16(%rdi), %xmm4
; SSE2-NEXT: movdqa (%rsi), %xmm0
@@ -827,7 +827,7 @@ define void @avg_v16i16(<16 x i16>* %a, <16 x i16>* %b) nounwind {
; SSE2-NEXT: retq
;
; AVX1-LABEL: avg_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
@@ -862,7 +862,7 @@ define void @avg_v16i16(<16 x i16>* %a, <16 x i16>* %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: avg_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rsi), %ymm0
; AVX2-NEXT: vpavgw (%rdi), %ymm0, %ymm0
; AVX2-NEXT: vmovdqu %ymm0, (%rax)
@@ -870,7 +870,7 @@ define void @avg_v16i16(<16 x i16>* %a, <16 x i16>* %b) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: avg_v16i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovdqa (%rsi), %ymm0
; AVX512-NEXT: vpavgw (%rdi), %ymm0, %ymm0
; AVX512-NEXT: vmovdqu %ymm0, (%rax)
@@ -890,7 +890,7 @@ define void @avg_v16i16(<16 x i16>* %a, <16 x i16>* %b) nounwind {
define void @avg_v32i16(<32 x i16>* %a, <32 x i16>* %b) nounwind {
; SSE2-LABEL: avg_v32i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rdi), %xmm4
; SSE2-NEXT: movdqa 16(%rdi), %xmm11
; SSE2-NEXT: movdqa 32(%rdi), %xmm10
@@ -976,7 +976,7 @@ define void @avg_v32i16(<32 x i16>* %a, <32 x i16>* %b) nounwind {
; SSE2-NEXT: retq
;
; AVX1-LABEL: avg_v32i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
@@ -1039,7 +1039,7 @@ define void @avg_v32i16(<32 x i16>* %a, <32 x i16>* %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: avg_v32i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
@@ -1078,7 +1078,7 @@ define void @avg_v32i16(<32 x i16>* %a, <32 x i16>* %b) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: avg_v32i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
@@ -1096,7 +1096,7 @@ define void @avg_v32i16(<32 x i16>* %a, <32 x i16>* %b) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: avg_v32i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rsi), %zmm0
; AVX512BW-NEXT: vpavgw (%rdi), %zmm0, %zmm0
; AVX512BW-NEXT: vmovdqu32 %zmm0, (%rax)
@@ -1116,7 +1116,7 @@ define void @avg_v32i16(<32 x i16>* %a, <32 x i16>* %b) nounwind {
define void @avg_v4i8_2(<4 x i8>* %a, <4 x i8>* %b) nounwind {
; SSE2-LABEL: avg_v4i8_2:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: pavgb %xmm0, %xmm1
@@ -1124,7 +1124,7 @@ define void @avg_v4i8_2(<4 x i8>* %a, <4 x i8>* %b) nounwind {
; SSE2-NEXT: retq
;
; AVX-LABEL: avg_v4i8_2:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX-NEXT: vpavgb %xmm1, %xmm0, %xmm0
@@ -1144,7 +1144,7 @@ define void @avg_v4i8_2(<4 x i8>* %a, <4 x i8>* %b) nounwind {
define void @avg_v8i8_2(<8 x i8>* %a, <8 x i8>* %b) nounwind {
; SSE2-LABEL: avg_v8i8_2:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSE2-NEXT: pavgb %xmm0, %xmm1
@@ -1152,7 +1152,7 @@ define void @avg_v8i8_2(<8 x i8>* %a, <8 x i8>* %b) nounwind {
; SSE2-NEXT: retq
;
; AVX-LABEL: avg_v8i8_2:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: vpavgb %xmm1, %xmm0, %xmm0
@@ -1172,14 +1172,14 @@ define void @avg_v8i8_2(<8 x i8>* %a, <8 x i8>* %b) nounwind {
define void @avg_v16i8_2(<16 x i8>* %a, <16 x i8>* %b) nounwind {
; SSE2-LABEL: avg_v16i8_2:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rdi), %xmm0
; SSE2-NEXT: pavgb (%rsi), %xmm0
; SSE2-NEXT: movdqu %xmm0, (%rax)
; SSE2-NEXT: retq
;
; AVX-LABEL: avg_v16i8_2:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa (%rdi), %xmm0
; AVX-NEXT: vpavgb (%rsi), %xmm0, %xmm0
; AVX-NEXT: vmovdqu %xmm0, (%rax)
@@ -1198,7 +1198,7 @@ define void @avg_v16i8_2(<16 x i8>* %a, <16 x i8>* %b) nounwind {
define void @avg_v32i8_2(<32 x i8>* %a, <32 x i8>* %b) nounwind {
; SSE2-LABEL: avg_v32i8_2:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rdi), %xmm3
; SSE2-NEXT: movdqa 16(%rdi), %xmm8
; SSE2-NEXT: movdqa (%rsi), %xmm0
@@ -1285,7 +1285,7 @@ define void @avg_v32i8_2(<32 x i8>* %a, <32 x i8>* %b) nounwind {
; SSE2-NEXT: retq
;
; AVX1-LABEL: avg_v32i8_2:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
@@ -1348,7 +1348,7 @@ define void @avg_v32i8_2(<32 x i8>* %a, <32 x i8>* %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: avg_v32i8_2:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-NEXT: vpavgb (%rsi), %ymm0, %ymm0
; AVX2-NEXT: vmovdqu %ymm0, (%rax)
@@ -1356,7 +1356,7 @@ define void @avg_v32i8_2(<32 x i8>* %a, <32 x i8>* %b) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: avg_v32i8_2:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovdqa (%rdi), %ymm0
; AVX512-NEXT: vpavgb (%rsi), %ymm0, %ymm0
; AVX512-NEXT: vmovdqu %ymm0, (%rax)
@@ -1376,7 +1376,7 @@ define void @avg_v32i8_2(<32 x i8>* %a, <32 x i8>* %b) nounwind {
define void @avg_v64i8_2(<64 x i8>* %a, <64 x i8>* %b) nounwind {
; SSE2-LABEL: avg_v64i8_2:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rsi), %xmm14
; SSE2-NEXT: movdqa 16(%rsi), %xmm12
; SSE2-NEXT: movdqa 32(%rsi), %xmm2
@@ -1510,7 +1510,7 @@ define void @avg_v64i8_2(<64 x i8>* %a, <64 x i8>* %b) nounwind {
; SSE2-NEXT: retq
;
; AVX1-LABEL: avg_v64i8_2:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm9 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm10 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
@@ -1627,7 +1627,7 @@ define void @avg_v64i8_2(<64 x i8>* %a, <64 x i8>* %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: avg_v64i8_2:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
@@ -1699,7 +1699,7 @@ define void @avg_v64i8_2(<64 x i8>* %a, <64 x i8>* %b) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: avg_v64i8_2:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
@@ -1729,7 +1729,7 @@ define void @avg_v64i8_2(<64 x i8>* %a, <64 x i8>* %b) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: avg_v64i8_2:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rsi), %zmm0
; AVX512BW-NEXT: vpavgb %zmm0, %zmm0, %zmm0
; AVX512BW-NEXT: vmovdqu32 %zmm0, (%rax)
@@ -1750,7 +1750,7 @@ define void @avg_v64i8_2(<64 x i8>* %a, <64 x i8>* %b) nounwind {
define void @avg_v4i16_2(<4 x i16>* %a, <4 x i16>* %b) nounwind {
; SSE2-LABEL: avg_v4i16_2:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSE2-NEXT: pavgw %xmm0, %xmm1
@@ -1758,7 +1758,7 @@ define void @avg_v4i16_2(<4 x i16>* %a, <4 x i16>* %b) nounwind {
; SSE2-NEXT: retq
;
; AVX-LABEL: avg_v4i16_2:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: vpavgw %xmm1, %xmm0, %xmm0
@@ -1778,14 +1778,14 @@ define void @avg_v4i16_2(<4 x i16>* %a, <4 x i16>* %b) nounwind {
define void @avg_v8i16_2(<8 x i16>* %a, <8 x i16>* %b) nounwind {
; SSE2-LABEL: avg_v8i16_2:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rdi), %xmm0
; SSE2-NEXT: pavgw (%rsi), %xmm0
; SSE2-NEXT: movdqu %xmm0, (%rax)
; SSE2-NEXT: retq
;
; AVX-LABEL: avg_v8i16_2:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa (%rdi), %xmm0
; AVX-NEXT: vpavgw (%rsi), %xmm0, %xmm0
; AVX-NEXT: vmovdqu %xmm0, (%rax)
@@ -1804,7 +1804,7 @@ define void @avg_v8i16_2(<8 x i16>* %a, <8 x i16>* %b) nounwind {
define void @avg_v16i16_2(<16 x i16>* %a, <16 x i16>* %b) nounwind {
; SSE2-LABEL: avg_v16i16_2:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rdi), %xmm2
; SSE2-NEXT: movdqa 16(%rdi), %xmm4
; SSE2-NEXT: movdqa (%rsi), %xmm0
@@ -1850,7 +1850,7 @@ define void @avg_v16i16_2(<16 x i16>* %a, <16 x i16>* %b) nounwind {
; SSE2-NEXT: retq
;
; AVX1-LABEL: avg_v16i16_2:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
@@ -1885,7 +1885,7 @@ define void @avg_v16i16_2(<16 x i16>* %a, <16 x i16>* %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: avg_v16i16_2:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-NEXT: vpavgw (%rsi), %ymm0, %ymm0
; AVX2-NEXT: vmovdqu %ymm0, (%rax)
@@ -1893,7 +1893,7 @@ define void @avg_v16i16_2(<16 x i16>* %a, <16 x i16>* %b) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: avg_v16i16_2:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovdqa (%rdi), %ymm0
; AVX512-NEXT: vpavgw (%rsi), %ymm0, %ymm0
; AVX512-NEXT: vmovdqu %ymm0, (%rax)
@@ -1913,7 +1913,7 @@ define void @avg_v16i16_2(<16 x i16>* %a, <16 x i16>* %b) nounwind {
define void @avg_v32i16_2(<32 x i16>* %a, <32 x i16>* %b) nounwind {
; SSE2-LABEL: avg_v32i16_2:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rdi), %xmm4
; SSE2-NEXT: movdqa 16(%rdi), %xmm11
; SSE2-NEXT: movdqa 32(%rdi), %xmm10
@@ -1999,7 +1999,7 @@ define void @avg_v32i16_2(<32 x i16>* %a, <32 x i16>* %b) nounwind {
; SSE2-NEXT: retq
;
; AVX1-LABEL: avg_v32i16_2:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
@@ -2062,7 +2062,7 @@ define void @avg_v32i16_2(<32 x i16>* %a, <32 x i16>* %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: avg_v32i16_2:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
@@ -2101,7 +2101,7 @@ define void @avg_v32i16_2(<32 x i16>* %a, <32 x i16>* %b) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: avg_v32i16_2:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
@@ -2119,7 +2119,7 @@ define void @avg_v32i16_2(<32 x i16>* %a, <32 x i16>* %b) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: avg_v32i16_2:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BW-NEXT: vpavgw (%rsi), %zmm0, %zmm0
; AVX512BW-NEXT: vmovdqu32 %zmm0, (%rax)
@@ -2139,14 +2139,14 @@ define void @avg_v32i16_2(<32 x i16>* %a, <32 x i16>* %b) nounwind {
define void @avg_v4i8_const(<4 x i8>* %a) nounwind {
; SSE2-LABEL: avg_v4i8_const:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: pavgb {{.*}}(%rip), %xmm0
; SSE2-NEXT: movd %xmm0, (%rax)
; SSE2-NEXT: retq
;
; AVX-LABEL: avg_v4i8_const:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vpavgb {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vmovd %xmm0, (%rax)
@@ -2162,14 +2162,14 @@ define void @avg_v4i8_const(<4 x i8>* %a) nounwind {
define void @avg_v8i8_const(<8 x i8>* %a) nounwind {
; SSE2-LABEL: avg_v8i8_const:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: pavgb {{.*}}(%rip), %xmm0
; SSE2-NEXT: movq %xmm0, (%rax)
; SSE2-NEXT: retq
;
; AVX-LABEL: avg_v8i8_const:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vpavgb {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vmovq %xmm0, (%rax)
@@ -2185,14 +2185,14 @@ define void @avg_v8i8_const(<8 x i8>* %a) nounwind {
define void @avg_v16i8_const(<16 x i8>* %a) nounwind {
; SSE2-LABEL: avg_v16i8_const:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rdi), %xmm0
; SSE2-NEXT: pavgb {{.*}}(%rip), %xmm0
; SSE2-NEXT: movdqu %xmm0, (%rax)
; SSE2-NEXT: retq
;
; AVX-LABEL: avg_v16i8_const:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa (%rdi), %xmm0
; AVX-NEXT: vpavgb {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vmovdqu %xmm0, (%rax)
@@ -2208,7 +2208,7 @@ define void @avg_v16i8_const(<16 x i8>* %a) nounwind {
define void @avg_v32i8_const(<32 x i8>* %a) nounwind {
; SSE2-LABEL: avg_v32i8_const:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rdi), %xmm0
; SSE2-NEXT: movdqa 16(%rdi), %xmm3
; SSE2-NEXT: pxor %xmm4, %xmm4
@@ -2259,7 +2259,7 @@ define void @avg_v32i8_const(<32 x i8>* %a) nounwind {
; SSE2-NEXT: retq
;
; AVX1-LABEL: avg_v32i8_const:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
@@ -2298,7 +2298,7 @@ define void @avg_v32i8_const(<32 x i8>* %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: avg_v32i8_const:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-NEXT: vpavgb {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vmovdqu %ymm0, (%rax)
@@ -2306,7 +2306,7 @@ define void @avg_v32i8_const(<32 x i8>* %a) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: avg_v32i8_const:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovdqa (%rdi), %ymm0
; AVX512-NEXT: vpavgb {{.*}}(%rip), %ymm0, %ymm0
; AVX512-NEXT: vmovdqu %ymm0, (%rax)
@@ -2323,7 +2323,7 @@ define void @avg_v32i8_const(<32 x i8>* %a) nounwind {
define void @avg_v64i8_const(<64 x i8>* %a) nounwind {
; SSE2-LABEL: avg_v64i8_const:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rdi), %xmm5
; SSE2-NEXT: movdqa 16(%rdi), %xmm6
; SSE2-NEXT: movdqa 32(%rdi), %xmm15
@@ -2442,7 +2442,7 @@ define void @avg_v64i8_const(<64 x i8>* %a) nounwind {
; SSE2-NEXT: retq
;
; AVX1-LABEL: avg_v64i8_const:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
@@ -2535,7 +2535,7 @@ define void @avg_v64i8_const(<64 x i8>* %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: avg_v64i8_const:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
@@ -2589,7 +2589,7 @@ define void @avg_v64i8_const(<64 x i8>* %a) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: avg_v64i8_const:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
@@ -2616,7 +2616,7 @@ define void @avg_v64i8_const(<64 x i8>* %a) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: avg_v64i8_const:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BW-NEXT: vpavgb {{.*}}(%rip), %zmm0, %zmm0
; AVX512BW-NEXT: vmovdqu32 %zmm0, (%rax)
@@ -2633,14 +2633,14 @@ define void @avg_v64i8_const(<64 x i8>* %a) nounwind {
define void @avg_v4i16_const(<4 x i16>* %a) nounwind {
; SSE2-LABEL: avg_v4i16_const:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: pavgw {{.*}}(%rip), %xmm0
; SSE2-NEXT: movq %xmm0, (%rax)
; SSE2-NEXT: retq
;
; AVX-LABEL: avg_v4i16_const:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vpavgw {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vmovq %xmm0, (%rax)
@@ -2656,14 +2656,14 @@ define void @avg_v4i16_const(<4 x i16>* %a) nounwind {
define void @avg_v8i16_const(<8 x i16>* %a) nounwind {
; SSE2-LABEL: avg_v8i16_const:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rdi), %xmm0
; SSE2-NEXT: pavgw {{.*}}(%rip), %xmm0
; SSE2-NEXT: movdqu %xmm0, (%rax)
; SSE2-NEXT: retq
;
; AVX-LABEL: avg_v8i16_const:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa (%rdi), %xmm0
; AVX-NEXT: vpavgw {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vmovdqu %xmm0, (%rax)
@@ -2679,7 +2679,7 @@ define void @avg_v8i16_const(<8 x i16>* %a) nounwind {
define void @avg_v16i16_const(<16 x i16>* %a) nounwind {
; SSE2-LABEL: avg_v16i16_const:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rdi), %xmm3
; SSE2-NEXT: movdqa 16(%rdi), %xmm0
; SSE2-NEXT: pxor %xmm4, %xmm4
@@ -2714,7 +2714,7 @@ define void @avg_v16i16_const(<16 x i16>* %a) nounwind {
; SSE2-NEXT: retq
;
; AVX1-LABEL: avg_v16i16_const:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
@@ -2737,7 +2737,7 @@ define void @avg_v16i16_const(<16 x i16>* %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: avg_v16i16_const:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-NEXT: vpavgw {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vmovdqu %ymm0, (%rax)
@@ -2745,7 +2745,7 @@ define void @avg_v16i16_const(<16 x i16>* %a) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: avg_v16i16_const:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovdqa (%rdi), %ymm0
; AVX512-NEXT: vpavgw {{.*}}(%rip), %ymm0, %ymm0
; AVX512-NEXT: vmovdqu %ymm0, (%rax)
@@ -2762,7 +2762,7 @@ define void @avg_v16i16_const(<16 x i16>* %a) nounwind {
define void @avg_v32i16_const(<32 x i16>* %a) nounwind {
; SSE2-LABEL: avg_v32i16_const:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rdi), %xmm7
; SSE2-NEXT: movdqa 16(%rdi), %xmm6
; SSE2-NEXT: movdqa 32(%rdi), %xmm4
@@ -2825,7 +2825,7 @@ define void @avg_v32i16_const(<32 x i16>* %a) nounwind {
; SSE2-NEXT: retq
;
; AVX1-LABEL: avg_v32i16_const:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm8 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
@@ -2864,7 +2864,7 @@ define void @avg_v32i16_const(<32 x i16>* %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: avg_v32i16_const:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
@@ -2894,7 +2894,7 @@ define void @avg_v32i16_const(<32 x i16>* %a) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: avg_v32i16_const:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm2 = [1,2,3,4,5,6,7,8,1,2,3,4,5,6,7,8]
@@ -2909,7 +2909,7 @@ define void @avg_v32i16_const(<32 x i16>* %a) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: avg_v32i16_const:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BW-NEXT: vpavgw {{.*}}(%rip), %zmm0, %zmm0
; AVX512BW-NEXT: vmovdqu32 %zmm0, (%rax)
@@ -2926,12 +2926,12 @@ define void @avg_v32i16_const(<32 x i16>* %a) nounwind {
define <16 x i8> @avg_v16i8_3(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE2-LABEL: avg_v16i8_3:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pavgb %xmm1, %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: avg_v16i8_3:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpavgb %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%za = zext <16 x i8> %a to <16 x i16>
@@ -2945,7 +2945,7 @@ define <16 x i8> @avg_v16i8_3(<16 x i8> %a, <16 x i8> %b) nounwind {
define <32 x i8> @avg_v32i8_3(<32 x i8> %a, <32 x i8> %b) nounwind {
; SSE2-LABEL: avg_v32i8_3:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm5, %xmm5
; SSE2-NEXT: movdqa %xmm0, %xmm6
; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm5[8],xmm6[9],xmm5[9],xmm6[10],xmm5[10],xmm6[11],xmm5[11],xmm6[12],xmm5[12],xmm6[13],xmm5[13],xmm6[14],xmm5[14],xmm6[15],xmm5[15]
@@ -2982,7 +2982,7 @@ define <32 x i8> @avg_v32i8_3(<32 x i8> %a, <32 x i8> %b) nounwind {
; SSE2-NEXT: retq
;
; AVX1-LABEL: avg_v32i8_3:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
@@ -3021,12 +3021,12 @@ define <32 x i8> @avg_v32i8_3(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: avg_v32i8_3:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpavgb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: avg_v32i8_3:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpavgb %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%za = zext <32 x i8> %a to <32 x i16>
@@ -3040,7 +3040,7 @@ define <32 x i8> @avg_v32i8_3(<32 x i8> %a, <32 x i8> %b) nounwind {
define <64 x i8> @avg_v64i8_3(<64 x i8> %a, <64 x i8> %b) nounwind {
; SSE2-LABEL: avg_v64i8_3:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm9, %xmm9
; SSE2-NEXT: movdqa %xmm0, %xmm10
; SSE2-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm9[8],xmm10[9],xmm9[9],xmm10[10],xmm9[10],xmm10[11],xmm9[11],xmm10[12],xmm9[12],xmm10[13],xmm9[13],xmm10[14],xmm9[14],xmm10[15],xmm9[15]
@@ -3107,7 +3107,7 @@ define <64 x i8> @avg_v64i8_3(<64 x i8> %a, <64 x i8> %b) nounwind {
; SSE2-NEXT: retq
;
; AVX1-LABEL: avg_v64i8_3:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[2,3,0,1]
@@ -3179,7 +3179,7 @@ define <64 x i8> @avg_v64i8_3(<64 x i8> %a, <64 x i8> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: avg_v64i8_3:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm4
; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero,xmm4[8],zero,xmm4[9],zero,xmm4[10],zero,xmm4[11],zero,xmm4[12],zero,xmm4[13],zero,xmm4[14],zero,xmm4[15],zero
; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
@@ -3227,7 +3227,7 @@ define <64 x i8> @avg_v64i8_3(<64 x i8> %a, <64 x i8> %b) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: avg_v64i8_3:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm4
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm5
; AVX512F-NEXT: vextracti128 $1, %ymm3, %xmm6
@@ -3241,7 +3241,7 @@ define <64 x i8> @avg_v64i8_3(<64 x i8> %a, <64 x i8> %b) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: avg_v64i8_3:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpavgb %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
%za = zext <64 x i8> %a to <64 x i16>
diff --git a/test/CodeGen/X86/avx-arith.ll b/test/CodeGen/X86/avx-arith.ll
index 82d890a08cf..6f535038bb2 100644
--- a/test/CodeGen/X86/avx-arith.ll
+++ b/test/CodeGen/X86/avx-arith.ll
@@ -3,7 +3,7 @@
define <4 x double> @addpd256(<4 x double> %y, <4 x double> %x) nounwind uwtable readnone ssp {
; CHECK-LABEL: addpd256:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vaddpd %ymm0, %ymm1, %ymm0
; CHECK-NEXT: retq
entry:
@@ -13,7 +13,7 @@ entry:
define <4 x double> @addpd256fold(<4 x double> %y) nounwind uwtable readnone ssp {
; CHECK-LABEL: addpd256fold:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vaddpd {{.*}}(%rip), %ymm0, %ymm0
; CHECK-NEXT: retq
entry:
@@ -23,7 +23,7 @@ entry:
define <8 x float> @addps256(<8 x float> %y, <8 x float> %x) nounwind uwtable readnone ssp {
; CHECK-LABEL: addps256:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vaddps %ymm0, %ymm1, %ymm0
; CHECK-NEXT: retq
entry:
@@ -33,7 +33,7 @@ entry:
define <8 x float> @addps256fold(<8 x float> %y) nounwind uwtable readnone ssp {
; CHECK-LABEL: addps256fold:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vaddps {{.*}}(%rip), %ymm0, %ymm0
; CHECK-NEXT: retq
entry:
@@ -43,7 +43,7 @@ entry:
define <4 x double> @subpd256(<4 x double> %y, <4 x double> %x) nounwind uwtable readnone ssp {
; CHECK-LABEL: subpd256:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vsubpd %ymm0, %ymm1, %ymm0
; CHECK-NEXT: retq
entry:
@@ -53,7 +53,7 @@ entry:
define <4 x double> @subpd256fold(<4 x double> %y, <4 x double>* nocapture %x) nounwind uwtable readonly ssp {
; CHECK-LABEL: subpd256fold:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vsubpd (%rdi), %ymm0, %ymm0
; CHECK-NEXT: retq
entry:
@@ -64,7 +64,7 @@ entry:
define <8 x float> @subps256(<8 x float> %y, <8 x float> %x) nounwind uwtable readnone ssp {
; CHECK-LABEL: subps256:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vsubps %ymm0, %ymm1, %ymm0
; CHECK-NEXT: retq
entry:
@@ -74,7 +74,7 @@ entry:
define <8 x float> @subps256fold(<8 x float> %y, <8 x float>* nocapture %x) nounwind uwtable readonly ssp {
; CHECK-LABEL: subps256fold:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vsubps (%rdi), %ymm0, %ymm0
; CHECK-NEXT: retq
entry:
@@ -85,7 +85,7 @@ entry:
define <4 x double> @mulpd256(<4 x double> %y, <4 x double> %x) nounwind uwtable readnone ssp {
; CHECK-LABEL: mulpd256:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vmulpd %ymm0, %ymm1, %ymm0
; CHECK-NEXT: retq
entry:
@@ -95,7 +95,7 @@ entry:
define <4 x double> @mulpd256fold(<4 x double> %y) nounwind uwtable readnone ssp {
; CHECK-LABEL: mulpd256fold:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vmulpd {{.*}}(%rip), %ymm0, %ymm0
; CHECK-NEXT: retq
entry:
@@ -105,7 +105,7 @@ entry:
define <8 x float> @mulps256(<8 x float> %y, <8 x float> %x) nounwind uwtable readnone ssp {
; CHECK-LABEL: mulps256:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vmulps %ymm0, %ymm1, %ymm0
; CHECK-NEXT: retq
entry:
@@ -115,7 +115,7 @@ entry:
define <8 x float> @mulps256fold(<8 x float> %y) nounwind uwtable readnone ssp {
; CHECK-LABEL: mulps256fold:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
; CHECK-NEXT: retq
entry:
@@ -125,7 +125,7 @@ entry:
define <4 x double> @divpd256(<4 x double> %y, <4 x double> %x) nounwind uwtable readnone ssp {
; CHECK-LABEL: divpd256:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vdivpd %ymm0, %ymm1, %ymm0
; CHECK-NEXT: retq
entry:
@@ -135,7 +135,7 @@ entry:
define <4 x double> @divpd256fold(<4 x double> %y) nounwind uwtable readnone ssp {
; CHECK-LABEL: divpd256fold:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vdivpd {{.*}}(%rip), %ymm0, %ymm0
; CHECK-NEXT: retq
entry:
@@ -145,7 +145,7 @@ entry:
define <8 x float> @divps256(<8 x float> %y, <8 x float> %x) nounwind uwtable readnone ssp {
; CHECK-LABEL: divps256:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vdivps %ymm0, %ymm1, %ymm0
; CHECK-NEXT: retq
entry:
@@ -155,7 +155,7 @@ entry:
define <8 x float> @divps256fold(<8 x float> %y) nounwind uwtable readnone ssp {
; CHECK-LABEL: divps256fold:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vdivps {{.*}}(%rip), %ymm0, %ymm0
; CHECK-NEXT: retq
entry:
@@ -165,7 +165,7 @@ entry:
define float @sqrtA(float %a) nounwind uwtable readnone ssp {
; CHECK-LABEL: sqrtA:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vsqrtss %xmm0, %xmm0, %xmm0
; CHECK-NEXT: retq
entry:
@@ -177,7 +177,7 @@ declare double @sqrt(double) readnone
define double @sqrtB(double %a) nounwind uwtable readnone ssp {
; CHECK-LABEL: sqrtB:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0
; CHECK-NEXT: retq
entry:
@@ -190,7 +190,7 @@ declare float @sqrtf(float) readnone
define <4 x i64> @vpaddq(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
; CHECK-LABEL: vpaddq:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm2
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm3
; CHECK-NEXT: vpaddq %xmm2, %xmm3, %xmm2
@@ -203,7 +203,7 @@ define <4 x i64> @vpaddq(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
define <8 x i32> @vpaddd(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
; CHECK-LABEL: vpaddd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm2
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm3
; CHECK-NEXT: vpaddd %xmm2, %xmm3, %xmm2
@@ -216,7 +216,7 @@ define <8 x i32> @vpaddd(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
define <16 x i16> @vpaddw(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
; CHECK-LABEL: vpaddw:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm2
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm3
; CHECK-NEXT: vpaddw %xmm2, %xmm3, %xmm2
@@ -229,7 +229,7 @@ define <16 x i16> @vpaddw(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
define <32 x i8> @vpaddb(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
; CHECK-LABEL: vpaddb:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm2
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm3
; CHECK-NEXT: vpaddb %xmm2, %xmm3, %xmm2
@@ -242,7 +242,7 @@ define <32 x i8> @vpaddb(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
define <4 x i64> @vpsubq(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
; CHECK-LABEL: vpsubq:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm2
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm3
; CHECK-NEXT: vpsubq %xmm2, %xmm3, %xmm2
@@ -255,7 +255,7 @@ define <4 x i64> @vpsubq(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
define <8 x i32> @vpsubd(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
; CHECK-LABEL: vpsubd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm2
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm3
; CHECK-NEXT: vpsubd %xmm2, %xmm3, %xmm2
@@ -268,7 +268,7 @@ define <8 x i32> @vpsubd(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
define <16 x i16> @vpsubw(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
; CHECK-LABEL: vpsubw:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm2
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm3
; CHECK-NEXT: vpsubw %xmm2, %xmm3, %xmm2
@@ -281,7 +281,7 @@ define <16 x i16> @vpsubw(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
define <32 x i8> @vpsubb(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
; CHECK-LABEL: vpsubb:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm2
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm3
; CHECK-NEXT: vpsubb %xmm2, %xmm3, %xmm2
@@ -294,7 +294,7 @@ define <32 x i8> @vpsubb(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
define <8 x i32> @vpmulld(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
; CHECK-LABEL: vpmulld:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm2
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm3
; CHECK-NEXT: vpmulld %xmm2, %xmm3, %xmm2
@@ -307,7 +307,7 @@ define <8 x i32> @vpmulld(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
define <16 x i16> @vpmullw(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
; CHECK-LABEL: vpmullw:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm2
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm3
; CHECK-NEXT: vpmullw %xmm2, %xmm3, %xmm2
@@ -320,7 +320,7 @@ define <16 x i16> @vpmullw(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
define <4 x i64> @mul_v4i64(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
; CHECK-LABEL: mul_v4i64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm2
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm3
; CHECK-NEXT: vpsrlq $32, %xmm3, %xmm4
@@ -349,7 +349,7 @@ declare <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float>) nounwind readnone
define <4 x float> @int_sqrt_ss() {
; CHECK-LABEL: int_sqrt_ss:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: vsqrtss %xmm0, %xmm0, %xmm0
; CHECK-NEXT: retq
@@ -361,7 +361,7 @@ define <4 x float> @int_sqrt_ss() {
define <2 x double> @vector_sqrt_scalar_load(double* %a0) optsize {
; CHECK-LABEL: vector_sqrt_scalar_load:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: vsqrtpd %xmm0, %xmm0
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/avx-basic.ll b/test/CodeGen/X86/avx-basic.ll
index dc386415934..d27a641203f 100644
--- a/test/CodeGen/X86/avx-basic.ll
+++ b/test/CodeGen/X86/avx-basic.ll
@@ -7,7 +7,7 @@
define void @zero128() nounwind ssp {
; CHECK-LABEL: zero128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0
; CHECK-NEXT: movq _z@{{.*}}(%rip), %rax
; CHECK-NEXT: vmovaps %xmm0, (%rax)
@@ -18,7 +18,7 @@ define void @zero128() nounwind ssp {
define void @zero256() nounwind ssp {
; CHECK-LABEL: zero256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movq _x@{{.*}}(%rip), %rax
; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vmovaps %ymm0, (%rax)
@@ -33,7 +33,7 @@ define void @zero256() nounwind ssp {
define void @ones([0 x float]* nocapture %RET, [0 x float]* nocapture %aFOO) nounwind {
; CHECK-LABEL: ones:
-; CHECK: ## BB#0: ## %allocas
+; CHECK: ## %bb.0: ## %allocas
; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; CHECK-NEXT: vmovaps %ymm0, (%rdi)
@@ -50,7 +50,7 @@ float>* %ptr2vec615, align 32
define void @ones2([0 x i32]* nocapture %RET, [0 x i32]* nocapture %aFOO) nounwind {
; CHECK-LABEL: ones2:
-; CHECK: ## BB#0: ## %allocas
+; CHECK: ## %bb.0: ## %allocas
; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vcmptrueps %ymm0, %ymm0, %ymm0
; CHECK-NEXT: vmovaps %ymm0, (%rdi)
@@ -65,7 +65,7 @@ allocas:
;;; Just make sure this doesn't crash
define <4 x i64> @ISelCrash(<4 x i64> %a) nounwind uwtable readnone ssp {
; CHECK-LABEL: ISelCrash:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
; CHECK-NEXT: retq
%shuffle = shufflevector <4 x i64> %a, <4 x i64> undef, <4 x i32> <i32 2, i32 3, i32 4, i32 4>
@@ -75,7 +75,7 @@ define <4 x i64> @ISelCrash(<4 x i64> %a) nounwind uwtable readnone ssp {
;;; Don't crash on movd
define <8 x i32> @VMOVZQI2PQI([0 x float]* nocapture %aFOO) nounwind {
; CHECK-LABEL: VMOVZQI2PQI:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,1,1]
; CHECK-NEXT: retq
@@ -92,7 +92,7 @@ define <8 x i32> @VMOVZQI2PQI([0 x float]* nocapture %aFOO) nounwind {
; rdar://10566486
define <16 x float> @fneg(<16 x float> %a) nounwind {
; CHECK-LABEL: fneg:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} ymm2 = [-0.000000e+00,-0.000000e+00,-0.000000e+00,-0.000000e+00,-0.000000e+00,-0.000000e+00,-0.000000e+00,-0.000000e+00]
; CHECK-NEXT: vxorps %ymm2, %ymm0, %ymm0
; CHECK-NEXT: vxorps %ymm2, %ymm1, %ymm1
@@ -104,7 +104,7 @@ define <16 x float> @fneg(<16 x float> %a) nounwind {
;;; Don't crash on build vector
define <16 x i16> @build_vec_16x16(i16 %a) nounwind readonly {
; CHECK-LABEL: build_vec_16x16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movzwl %di, %eax
; CHECK-NEXT: vmovd %eax, %xmm0
; CHECK-NEXT: retq
@@ -116,7 +116,7 @@ define <16 x i16> @build_vec_16x16(i16 %a) nounwind readonly {
;;; an incorrect mnemonic of "movd" was printed for this instruction.
define i64 @VMOVPQIto64rr(<2 x i64> %a) {
; CHECK-LABEL: VMOVPQIto64rr:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovq %xmm0, %rax
; CHECK-NEXT: retq
%vecext.i = extractelement <2 x i64> %a, i32 0
@@ -126,7 +126,7 @@ define i64 @VMOVPQIto64rr(<2 x i64> %a) {
; PR22685
define <8 x float> @mov00_8f32(float* %ptr) {
; CHECK-LABEL: mov00_8f32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: retq
%val = load float, float* %ptr
diff --git a/test/CodeGen/X86/avx-bitcast.ll b/test/CodeGen/X86/avx-bitcast.ll
index e34c20fcbd7..150c7ccfa0c 100644
--- a/test/CodeGen/X86/avx-bitcast.ll
+++ b/test/CodeGen/X86/avx-bitcast.ll
@@ -2,7 +2,7 @@
define i64 @bitcasti64tof64() {
; CHECK-LABEL: bitcasti64tof64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK: vmovsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: vmovq %xmm0, %rax
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/avx-cast.ll b/test/CodeGen/X86/avx-cast.ll
index a3d42ef051d..2f332592506 100644
--- a/test/CodeGen/X86/avx-cast.ll
+++ b/test/CodeGen/X86/avx-cast.ll
@@ -8,7 +8,7 @@
define <8 x float> @castA(<4 x float> %m) nounwind uwtable readnone ssp {
; AVX-LABEL: castA:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
@@ -19,7 +19,7 @@ define <8 x float> @castA(<4 x float> %m) nounwind uwtable readnone ssp {
define <4 x double> @castB(<2 x double> %m) nounwind uwtable readnone ssp {
; AVX-LABEL: castB:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
@@ -32,14 +32,14 @@ define <4 x double> @castB(<2 x double> %m) nounwind uwtable readnone ssp {
define <4 x i64> @castC(<2 x i64> %m) nounwind uwtable readnone ssp {
; AVX1-LABEL: castC:
-; AVX1: ## BB#0:
+; AVX1: ## %bb.0:
; AVX1-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX1-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: castC:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
@@ -53,7 +53,7 @@ define <4 x i64> @castC(<2 x i64> %m) nounwind uwtable readnone ssp {
define <4 x float> @castD(<8 x float> %m) nounwind uwtable readnone ssp {
; AVX-LABEL: castD:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
@@ -63,7 +63,7 @@ define <4 x float> @castD(<8 x float> %m) nounwind uwtable readnone ssp {
define <2 x i64> @castE(<4 x i64> %m) nounwind uwtable readnone ssp {
; AVX-LABEL: castE:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
@@ -73,7 +73,7 @@ define <2 x i64> @castE(<4 x i64> %m) nounwind uwtable readnone ssp {
define <2 x double> @castF(<4 x double> %m) nounwind uwtable readnone ssp {
; AVX-LABEL: castF:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
diff --git a/test/CodeGen/X86/avx-cmp.ll b/test/CodeGen/X86/avx-cmp.ll
index be46b1bb3a3..637101a2c77 100644
--- a/test/CodeGen/X86/avx-cmp.ll
+++ b/test/CodeGen/X86/avx-cmp.ll
@@ -3,7 +3,7 @@
define <8 x i32> @cmp00(<8 x float> %a, <8 x float> %b) nounwind {
; CHECK-LABEL: cmp00:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcmpltps %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
%bincmp = fcmp olt <8 x float> %a, %b
@@ -13,7 +13,7 @@ define <8 x i32> @cmp00(<8 x float> %a, <8 x float> %b) nounwind {
define <4 x i64> @cmp01(<4 x double> %a, <4 x double> %b) nounwind {
; CHECK-LABEL: cmp01:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcmpltpd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
%bincmp = fcmp olt <4 x double> %a, %b
@@ -25,12 +25,12 @@ declare void @scale() nounwind
define void @render() nounwind {
; CHECK-LABEL: render:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rbx
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: testb %al, %al
; CHECK-NEXT: jne .LBB2_6
-; CHECK-NEXT: # BB#1: # %for.cond5.preheader
+; CHECK-NEXT: # %bb.1: # %for.cond5.preheader
; CHECK-NEXT: xorl %ebx, %ebx
; CHECK-NEXT: jmp .LBB2_2
; CHECK-NEXT: .p2align 4, 0x90
@@ -41,11 +41,11 @@ define void @render() nounwind {
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: testb %bl, %bl
; CHECK-NEXT: jne .LBB2_2
-; CHECK-NEXT: # BB#3: # %for.cond5
+; CHECK-NEXT: # %bb.3: # %for.cond5
; CHECK-NEXT: # in Loop: Header=BB2_2 Depth=1
; CHECK-NEXT: testb %bl, %bl
; CHECK-NEXT: je .LBB2_2
-; CHECK-NEXT: # BB#4: # %for.body33
+; CHECK-NEXT: # %bb.4: # %for.body33
; CHECK-NEXT: # in Loop: Header=BB2_2 Depth=1
; CHECK-NEXT: vucomisd {{\.LCPI.*}}, %xmm0
; CHECK-NEXT: jne .LBB2_5
@@ -78,7 +78,7 @@ for.end52:
define <8 x i32> @int256_cmp(<8 x i32> %i, <8 x i32> %j) nounwind {
; CHECK-LABEL: int256_cmp:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm2
; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm3
; CHECK-NEXT: vpcmpgtd %xmm2, %xmm3, %xmm2
@@ -92,7 +92,7 @@ define <8 x i32> @int256_cmp(<8 x i32> %i, <8 x i32> %j) nounwind {
define <4 x i64> @v4i64_cmp(<4 x i64> %i, <4 x i64> %j) nounwind {
; CHECK-LABEL: v4i64_cmp:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm2
; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm3
; CHECK-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -106,7 +106,7 @@ define <4 x i64> @v4i64_cmp(<4 x i64> %i, <4 x i64> %j) nounwind {
define <16 x i16> @v16i16_cmp(<16 x i16> %i, <16 x i16> %j) nounwind {
; CHECK-LABEL: v16i16_cmp:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm2
; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm3
; CHECK-NEXT: vpcmpgtw %xmm2, %xmm3, %xmm2
@@ -120,7 +120,7 @@ define <16 x i16> @v16i16_cmp(<16 x i16> %i, <16 x i16> %j) nounwind {
define <32 x i8> @v32i8_cmp(<32 x i8> %i, <32 x i8> %j) nounwind {
; CHECK-LABEL: v32i8_cmp:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm2
; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm3
; CHECK-NEXT: vpcmpgtb %xmm2, %xmm3, %xmm2
@@ -134,7 +134,7 @@ define <32 x i8> @v32i8_cmp(<32 x i8> %i, <32 x i8> %j) nounwind {
define <8 x i32> @int256_cmpeq(<8 x i32> %i, <8 x i32> %j) nounwind {
; CHECK-LABEL: int256_cmpeq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm2
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm3
; CHECK-NEXT: vpcmpeqd %xmm2, %xmm3, %xmm2
@@ -148,7 +148,7 @@ define <8 x i32> @int256_cmpeq(<8 x i32> %i, <8 x i32> %j) nounwind {
define <4 x i64> @v4i64_cmpeq(<4 x i64> %i, <4 x i64> %j) nounwind {
; CHECK-LABEL: v4i64_cmpeq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm2
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm3
; CHECK-NEXT: vpcmpeqq %xmm2, %xmm3, %xmm2
@@ -162,7 +162,7 @@ define <4 x i64> @v4i64_cmpeq(<4 x i64> %i, <4 x i64> %j) nounwind {
define <16 x i16> @v16i16_cmpeq(<16 x i16> %i, <16 x i16> %j) nounwind {
; CHECK-LABEL: v16i16_cmpeq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm2
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm3
; CHECK-NEXT: vpcmpeqw %xmm2, %xmm3, %xmm2
@@ -176,7 +176,7 @@ define <16 x i16> @v16i16_cmpeq(<16 x i16> %i, <16 x i16> %j) nounwind {
define <32 x i8> @v32i8_cmpeq(<32 x i8> %i, <32 x i8> %j) nounwind {
; CHECK-LABEL: v32i8_cmpeq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm2
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm3
; CHECK-NEXT: vpcmpeqb %xmm2, %xmm3, %xmm2
@@ -192,7 +192,7 @@ define <32 x i8> @v32i8_cmpeq(<32 x i8> %i, <32 x i8> %j) nounwind {
define i32 @scalarcmpA() uwtable ssp {
; CHECK-LABEL: scalarcmpA:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorpd %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vcmpeqsd %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vmovq %xmm0, %rax
@@ -206,7 +206,7 @@ define i32 @scalarcmpA() uwtable ssp {
define i32 @scalarcmpB() uwtable ssp {
; CHECK-LABEL: scalarcmpB:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vcmpeqss %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vmovd %xmm0, %eax
diff --git a/test/CodeGen/X86/avx-cvt-2.ll b/test/CodeGen/X86/avx-cvt-2.ll
index f38127960bf..7c2df3e9962 100644
--- a/test/CodeGen/X86/avx-cvt-2.ll
+++ b/test/CodeGen/X86/avx-cvt-2.ll
@@ -9,7 +9,7 @@
define void @fptoui16(%f32vec_t %a, %i16vec_t *%p) {
; CHECK-LABEL: fptoui16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcvttps2dq %ymm0, %ymm0
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1
; CHECK-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
@@ -23,7 +23,7 @@ define void @fptoui16(%f32vec_t %a, %i16vec_t *%p) {
define void @fptosi16(%f32vec_t %a, %i16vec_t *%p) {
; CHECK-LABEL: fptosi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcvttps2dq %ymm0, %ymm0
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1
; CHECK-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
@@ -37,7 +37,7 @@ define void @fptosi16(%f32vec_t %a, %i16vec_t *%p) {
define void @fptoui8(%f32vec_t %a, %i8vec_t *%p) {
; CHECK-LABEL: fptoui8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcvttps2dq %ymm0, %ymm0
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1
; CHECK-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
@@ -52,7 +52,7 @@ define void @fptoui8(%f32vec_t %a, %i8vec_t *%p) {
define void @fptosi8(%f32vec_t %a, %i8vec_t *%p) {
; CHECK-LABEL: fptosi8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcvttps2dq %ymm0, %ymm0
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1
; CHECK-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
diff --git a/test/CodeGen/X86/avx-cvt-3.ll b/test/CodeGen/X86/avx-cvt-3.ll
index e4f29b5d448..ac99684ab3a 100644
--- a/test/CodeGen/X86/avx-cvt-3.ll
+++ b/test/CodeGen/X86/avx-cvt-3.ll
@@ -6,14 +6,14 @@
define <8 x float> @sitofp_insert_zero_v8i32(<8 x i32> %a0) {
; X86-LABEL: sitofp_insert_zero_v8i32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X86-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6,7]
; X86-NEXT: vcvtdq2ps %ymm0, %ymm0
; X86-NEXT: retl
;
; X64-LABEL: sitofp_insert_zero_v8i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6,7]
; X64-NEXT: vcvtdq2ps %ymm0, %ymm0
@@ -28,14 +28,14 @@ define <8 x float> @sitofp_insert_zero_v8i32(<8 x i32> %a0) {
define <8 x float> @sitofp_shuffle_zero_v8i32(<8 x i32> %a0) {
; X86-LABEL: sitofp_shuffle_zero_v8i32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X86-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7]
; X86-NEXT: vcvtdq2ps %ymm0, %ymm0
; X86-NEXT: retl
;
; X64-LABEL: sitofp_shuffle_zero_v8i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7]
; X64-NEXT: vcvtdq2ps %ymm0, %ymm0
@@ -47,7 +47,7 @@ define <8 x float> @sitofp_shuffle_zero_v8i32(<8 x i32> %a0) {
define <8 x float> @sitofp_insert_allbits_v8i32(<8 x i32> %a0) {
; X86-LABEL: sitofp_insert_allbits_v8i32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X86-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
; X86-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6,7]
@@ -55,7 +55,7 @@ define <8 x float> @sitofp_insert_allbits_v8i32(<8 x i32> %a0) {
; X86-NEXT: retl
;
; X64-LABEL: sitofp_insert_allbits_v8i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6,7]
@@ -71,7 +71,7 @@ define <8 x float> @sitofp_insert_allbits_v8i32(<8 x i32> %a0) {
define <8 x float> @sitofp_shuffle_allbits_v8i32(<8 x i32> %a0) {
; X86-LABEL: sitofp_shuffle_allbits_v8i32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X86-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
; X86-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7]
@@ -79,7 +79,7 @@ define <8 x float> @sitofp_shuffle_allbits_v8i32(<8 x i32> %a0) {
; X86-NEXT: retl
;
; X64-LABEL: sitofp_shuffle_allbits_v8i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7]
@@ -92,7 +92,7 @@ define <8 x float> @sitofp_shuffle_allbits_v8i32(<8 x i32> %a0) {
define <8 x float> @sitofp_insert_constants_v8i32(<8 x i32> %a0) {
; X86-LABEL: sitofp_insert_constants_v8i32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X86-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7]
; X86-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
@@ -107,7 +107,7 @@ define <8 x float> @sitofp_insert_constants_v8i32(<8 x i32> %a0) {
; X86-NEXT: retl
;
; X64-LABEL: sitofp_insert_constants_v8i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7]
; X64-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
@@ -130,13 +130,13 @@ define <8 x float> @sitofp_insert_constants_v8i32(<8 x i32> %a0) {
define <8 x float> @sitofp_shuffle_constants_v8i32(<8 x i32> %a0) {
; X86-LABEL: sitofp_shuffle_constants_v8i32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: vblendps {{.*#+}} ymm0 = mem[0],ymm0[1],mem[2],ymm0[3],mem[4],ymm0[5],mem[6],ymm0[7]
; X86-NEXT: vcvtdq2ps %ymm0, %ymm0
; X86-NEXT: retl
;
; X64-LABEL: sitofp_shuffle_constants_v8i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vblendps {{.*#+}} ymm0 = mem[0],ymm0[1],mem[2],ymm0[3],mem[4],ymm0[5],mem[6],ymm0[7]
; X64-NEXT: vcvtdq2ps %ymm0, %ymm0
; X64-NEXT: retq
diff --git a/test/CodeGen/X86/avx-cvt.ll b/test/CodeGen/X86/avx-cvt.ll
index f2900dba938..0a6ba2f84fa 100644
--- a/test/CodeGen/X86/avx-cvt.ll
+++ b/test/CodeGen/X86/avx-cvt.ll
@@ -3,7 +3,7 @@
define <8 x float> @sitofp00(<8 x i32> %a) nounwind {
; CHECK-LABEL: sitofp00:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcvtdq2ps %ymm0, %ymm0
; CHECK-NEXT: retq
%b = sitofp <8 x i32> %a to <8 x float>
@@ -12,7 +12,7 @@ define <8 x float> @sitofp00(<8 x i32> %a) nounwind {
define <8 x i32> @fptosi00(<8 x float> %a) nounwind {
; CHECK-LABEL: fptosi00:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcvttps2dq %ymm0, %ymm0
; CHECK-NEXT: retq
%b = fptosi <8 x float> %a to <8 x i32>
@@ -21,7 +21,7 @@ define <8 x i32> @fptosi00(<8 x float> %a) nounwind {
define <4 x double> @sitofp01(<4 x i32> %a) {
; CHECK-LABEL: sitofp01:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcvtdq2pd %xmm0, %ymm0
; CHECK-NEXT: retq
%b = sitofp <4 x i32> %a to <4 x double>
@@ -30,7 +30,7 @@ define <4 x double> @sitofp01(<4 x i32> %a) {
define <8 x float> @sitofp02(<8 x i16> %a) {
; CHECK-LABEL: sitofp02:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovsxwd %xmm0, %xmm1
; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; CHECK-NEXT: vpmovsxwd %xmm0, %xmm0
@@ -43,7 +43,7 @@ define <8 x float> @sitofp02(<8 x i16> %a) {
define <4 x i32> @fptosi01(<4 x double> %a) {
; CHECK-LABEL: fptosi01:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcvttpd2dq %ymm0, %xmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
@@ -53,7 +53,7 @@ define <4 x i32> @fptosi01(<4 x double> %a) {
define <8 x float> @fptrunc00(<8 x double> %b) nounwind {
; CHECK-LABEL: fptrunc00:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcvtpd2ps %ymm0, %xmm0
; CHECK-NEXT: vcvtpd2ps %ymm1, %xmm1
; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
@@ -64,7 +64,7 @@ define <8 x float> @fptrunc00(<8 x double> %b) nounwind {
define <4 x float> @fptrunc01(<2 x double> %a0, <4 x float> %a1) nounwind {
; CHECK-LABEL: fptrunc01:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcvtsd2ss %xmm0, %xmm1, %xmm0
; CHECK-NEXT: retq
%ext = extractelement <2 x double> %a0, i32 0
@@ -75,7 +75,7 @@ define <4 x float> @fptrunc01(<2 x double> %a0, <4 x float> %a1) nounwind {
define <4 x double> @fpext00(<4 x float> %b) nounwind {
; CHECK-LABEL: fpext00:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcvtps2pd %xmm0, %ymm0
; CHECK-NEXT: retq
%a = fpext <4 x float> %b to <4 x double>
@@ -84,7 +84,7 @@ define <4 x double> @fpext00(<4 x float> %b) nounwind {
define <2 x double> @fpext01(<2 x double> %a0, <4 x float> %a1) nounwind {
; CHECK-LABEL: fpext01:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcvtss2sd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%ext = extractelement <4 x float> %a1, i32 0
@@ -95,7 +95,7 @@ define <2 x double> @fpext01(<2 x double> %a0, <4 x float> %a1) nounwind {
define double @funcA(i64* nocapture %e) nounwind uwtable readonly ssp {
; CHECK-LABEL: funcA:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcvtsi2sdq (%rdi), %xmm0, %xmm0
; CHECK-NEXT: retq
%tmp1 = load i64, i64* %e, align 8
@@ -105,7 +105,7 @@ define double @funcA(i64* nocapture %e) nounwind uwtable readonly ssp {
define double @funcB(i32* nocapture %e) nounwind uwtable readonly ssp {
; CHECK-LABEL: funcB:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcvtsi2sdl (%rdi), %xmm0, %xmm0
; CHECK-NEXT: retq
%tmp1 = load i32, i32* %e, align 4
@@ -115,7 +115,7 @@ define double @funcB(i32* nocapture %e) nounwind uwtable readonly ssp {
define float @funcC(i32* nocapture %e) nounwind uwtable readonly ssp {
; CHECK-LABEL: funcC:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcvtsi2ssl (%rdi), %xmm0, %xmm0
; CHECK-NEXT: retq
%tmp1 = load i32, i32* %e, align 4
@@ -125,7 +125,7 @@ define float @funcC(i32* nocapture %e) nounwind uwtable readonly ssp {
define float @funcD(i64* nocapture %e) nounwind uwtable readonly ssp {
; CHECK-LABEL: funcD:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcvtsi2ssq (%rdi), %xmm0, %xmm0
; CHECK-NEXT: retq
%tmp1 = load i64, i64* %e, align 8
@@ -135,7 +135,7 @@ define float @funcD(i64* nocapture %e) nounwind uwtable readonly ssp {
define void @fpext() nounwind uwtable {
; CHECK-LABEL: fpext:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vmovsd %xmm0, -{{[0-9]+}}(%rsp)
@@ -150,7 +150,7 @@ define void @fpext() nounwind uwtable {
define double @nearbyint_f64(double %a) {
; CHECK-LABEL: nearbyint_f64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vroundsd $12, %xmm0, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call double @llvm.nearbyint.f64(double %a)
@@ -160,7 +160,7 @@ declare double @llvm.nearbyint.f64(double %p)
define float @floor_f32(float %a) {
; CHECK-LABEL: floor_f32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vroundss $9, %xmm0, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call float @llvm.floor.f32(float %a)
diff --git a/test/CodeGen/X86/avx-gfni-intrinsics.ll b/test/CodeGen/X86/avx-gfni-intrinsics.ll
index b10d508b8cf..a59cfcccad2 100644
--- a/test/CodeGen/X86/avx-gfni-intrinsics.ll
+++ b/test/CodeGen/X86/avx-gfni-intrinsics.ll
@@ -4,7 +4,7 @@
declare <16 x i8> @llvm.x86.vgf2p8affineinvqb.128(<16 x i8>, <16 x i8>, i8)
define <16 x i8> @test_vgf2p8affineinvqb_128(<16 x i8> %src1, <16 x i8> %src2) {
; CHECK-LABEL: test_vgf2p8affineinvqb_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vgf2p8affineinvqb $11, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0xf9,0xcf,0xc1,0x0b]
; CHECK-NEXT: retl ## encoding: [0xc3]
%1 = call <16 x i8> @llvm.x86.vgf2p8affineinvqb.128(<16 x i8> %src1, <16 x i8> %src2, i8 11)
@@ -14,7 +14,7 @@ define <16 x i8> @test_vgf2p8affineinvqb_128(<16 x i8> %src1, <16 x i8> %src2) {
declare <32 x i8> @llvm.x86.vgf2p8affineinvqb.256(<32 x i8>, <32 x i8>, i8)
define <32 x i8> @test_vgf2p8affineinvqb_256(<32 x i8> %src1, <32 x i8> %src2) {
; CHECK-LABEL: test_vgf2p8affineinvqb_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vgf2p8affineinvqb $11, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0xfd,0xcf,0xc1,0x0b]
; CHECK-NEXT: retl ## encoding: [0xc3]
%1 = call <32 x i8> @llvm.x86.vgf2p8affineinvqb.256(<32 x i8> %src1, <32 x i8> %src2, i8 11)
@@ -24,7 +24,7 @@ define <32 x i8> @test_vgf2p8affineinvqb_256(<32 x i8> %src1, <32 x i8> %src2) {
declare <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8>, <16 x i8>, i8)
define <16 x i8> @test_vgf2p8affineqb(<16 x i8> %src1, <16 x i8> %src2) {
; CHECK-LABEL: test_vgf2p8affineqb:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vgf2p8affineqb $11, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0xf9,0xce,0xc1,0x0b]
; CHECK-NEXT: retl ## encoding: [0xc3]
%1 = call <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8> %src1, <16 x i8> %src2, i8 11)
@@ -34,7 +34,7 @@ define <16 x i8> @test_vgf2p8affineqb(<16 x i8> %src1, <16 x i8> %src2) {
declare <32 x i8> @llvm.x86.vgf2p8affineqb.256(<32 x i8>, <32 x i8>, i8)
define <32 x i8> @test_vgf2p8affineqb_256(<32 x i8> %src1, <32 x i8> %src2) {
; CHECK-LABEL: test_vgf2p8affineqb_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vgf2p8affineqb $11, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0xfd,0xce,0xc1,0x0b]
; CHECK-NEXT: retl ## encoding: [0xc3]
%1 = call <32 x i8> @llvm.x86.vgf2p8affineqb.256(<32 x i8> %src1, <32 x i8> %src2, i8 11)
@@ -44,7 +44,7 @@ define <32 x i8> @test_vgf2p8affineqb_256(<32 x i8> %src1, <32 x i8> %src2) {
declare <16 x i8> @llvm.x86.vgf2p8mulb.128(<16 x i8>, <16 x i8>)
define <16 x i8> @test_vgf2p8mulb_128(<16 x i8> %src1, <16 x i8> %src2) {
; CHECK-LABEL: test_vgf2p8mulb_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vgf2p8mulb %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0xcf,0xc1]
; CHECK-NEXT: retl ## encoding: [0xc3]
%1 = call <16 x i8> @llvm.x86.vgf2p8mulb.128(<16 x i8> %src1, <16 x i8> %src2)
@@ -54,7 +54,7 @@ define <16 x i8> @test_vgf2p8mulb_128(<16 x i8> %src1, <16 x i8> %src2) {
declare <32 x i8> @llvm.x86.vgf2p8mulb.256(<32 x i8>, <32 x i8>)
define <32 x i8> @test_vgf2p8mulb_256(<32 x i8> %src1, <32 x i8> %src2) {
; CHECK-LABEL: test_vgf2p8mulb_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vgf2p8mulb %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0xcf,0xc1]
; CHECK-NEXT: retl ## encoding: [0xc3]
%1 = call <32 x i8> @llvm.x86.vgf2p8mulb.256(<32 x i8> %src1, <32 x i8> %src2)
diff --git a/test/CodeGen/X86/avx-insertelt.ll b/test/CodeGen/X86/avx-insertelt.ll
index c159d689451..284a6d71e2d 100644
--- a/test/CodeGen/X86/avx-insertelt.ll
+++ b/test/CodeGen/X86/avx-insertelt.ll
@@ -19,13 +19,13 @@ define <4 x double> @insert_f64(<4 x double> %y, double %f, <4 x double> %x) {
define <32 x i8> @insert_i8(<32 x i8> %y, i8 %f, <32 x i8> %x) {
; AVX-LABEL: insert_i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpinsrb $0, %edi, %xmm0, %xmm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: retq
;
; AVX2-LABEL: insert_i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpinsrb $0, %edi, %xmm0, %xmm1
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX2-NEXT: retq
@@ -35,13 +35,13 @@ define <32 x i8> @insert_i8(<32 x i8> %y, i8 %f, <32 x i8> %x) {
define <16 x i16> @insert_i16(<16 x i16> %y, i16 %f, <16 x i16> %x) {
; AVX-LABEL: insert_i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpinsrw $0, %edi, %xmm0, %xmm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: retq
;
; AVX2-LABEL: insert_i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpinsrw $0, %edi, %xmm0, %xmm1
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX2-NEXT: retq
@@ -51,13 +51,13 @@ define <16 x i16> @insert_i16(<16 x i16> %y, i16 %f, <16 x i16> %x) {
define <8 x i32> @insert_i32(<8 x i32> %y, i32 %f, <8 x i32> %x) {
; AVX-LABEL: insert_i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpinsrd $0, %edi, %xmm0, %xmm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: retq
;
; AVX2-LABEL: insert_i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %edi, %xmm1
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7]
; AVX2-NEXT: retq
@@ -67,13 +67,13 @@ define <8 x i32> @insert_i32(<8 x i32> %y, i32 %f, <8 x i32> %x) {
define <4 x i64> @insert_i64(<4 x i64> %y, i64 %f, <4 x i64> %x) {
; AVX-LABEL: insert_i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpinsrq $0, %rdi, %xmm0, %xmm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: retq
;
; AVX2-LABEL: insert_i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpinsrq $0, %rdi, %xmm0, %xmm1
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX2-NEXT: retq
diff --git a/test/CodeGen/X86/avx-intrinsics-fast-isel.ll b/test/CodeGen/X86/avx-intrinsics-fast-isel.ll
index 7e962801133..9658c8e5ae3 100644
--- a/test/CodeGen/X86/avx-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/avx-intrinsics-fast-isel.ll
@@ -6,12 +6,12 @@
define <4 x double> @test_mm256_add_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
; X32-LABEL: test_mm256_add_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_add_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%res = fadd <4 x double> %a0, %a1
@@ -20,12 +20,12 @@ define <4 x double> @test_mm256_add_pd(<4 x double> %a0, <4 x double> %a1) nounw
define <8 x float> @test_mm256_add_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
; X32-LABEL: test_mm256_add_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vaddps %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_add_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vaddps %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%res = fadd <8 x float> %a0, %a1
@@ -34,12 +34,12 @@ define <8 x float> @test_mm256_add_ps(<8 x float> %a0, <8 x float> %a1) nounwind
define <4 x double> @test_mm256_addsub_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
; X32-LABEL: test_mm256_addsub_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vaddsubpd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_addsub_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vaddsubpd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%res = call <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double> %a0, <4 x double> %a1)
@@ -49,12 +49,12 @@ declare <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double>, <4 x double>) nou
define <8 x float> @test_mm256_addsub_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
; X32-LABEL: test_mm256_addsub_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vaddsubps %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_addsub_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vaddsubps %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%res = call <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float> %a0, <8 x float> %a1)
@@ -64,12 +64,12 @@ declare <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float>, <8 x float>) nounwi
define <4 x double> @test_mm256_and_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
; X32-LABEL: test_mm256_and_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vandps %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_and_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vandps %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%1 = bitcast <4 x double> %a0 to <4 x i64>
@@ -81,12 +81,12 @@ define <4 x double> @test_mm256_and_pd(<4 x double> %a0, <4 x double> %a1) nounw
define <8 x float> @test_mm256_and_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
; X32-LABEL: test_mm256_and_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vandps %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_and_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vandps %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%1 = bitcast <8 x float> %a0 to <8 x i32>
@@ -98,7 +98,7 @@ define <8 x float> @test_mm256_and_ps(<8 x float> %a0, <8 x float> %a1) nounwind
define <4 x double> @test_mm256_andnot_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
; X32-LABEL: test_mm256_andnot_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vxorps %xmm2, %xmm2, %xmm2
; X32-NEXT: vcmptrueps %ymm2, %ymm2, %ymm2
; X32-NEXT: vxorps %ymm2, %ymm0, %ymm0
@@ -106,7 +106,7 @@ define <4 x double> @test_mm256_andnot_pd(<4 x double> %a0, <4 x double> %a1) no
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_andnot_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %xmm2, %xmm2, %xmm2
; X64-NEXT: vcmptrueps %ymm2, %ymm2, %ymm2
; X64-NEXT: vxorps %ymm2, %ymm0, %ymm0
@@ -122,12 +122,12 @@ define <4 x double> @test_mm256_andnot_pd(<4 x double> %a0, <4 x double> %a1) no
define <8 x float> @test_mm256_andnot_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
; X32-LABEL: test_mm256_andnot_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vandnps %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_andnot_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vandnps %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%1 = bitcast <8 x float> %a0 to <8 x i32>
@@ -140,12 +140,12 @@ define <8 x float> @test_mm256_andnot_ps(<8 x float> %a0, <8 x float> %a1) nounw
define <4 x double> @test_mm256_blend_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
; X32-LABEL: test_mm256_blend_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_blend_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3]
; X64-NEXT: retq
%res = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
@@ -154,12 +154,12 @@ define <4 x double> @test_mm256_blend_pd(<4 x double> %a0, <4 x double> %a1) nou
define <8 x float> @test_mm256_blend_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
; X32-LABEL: test_mm256_blend_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4,5,6],ymm1[7]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_blend_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4,5,6],ymm1[7]
; X64-NEXT: retq
%res = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 0, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 15>
@@ -168,12 +168,12 @@ define <8 x float> @test_mm256_blend_ps(<8 x float> %a0, <8 x float> %a1) nounwi
define <4 x double> @test_mm256_blendv_pd(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) nounwind {
; X32-LABEL: test_mm256_blendv_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_blendv_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%res = call <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2)
@@ -183,12 +183,12 @@ declare <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double>, <4 x double>, <4
define <8 x float> @test_mm256_blendv_ps(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) nounwind {
; X32-LABEL: test_mm256_blendv_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vblendvps %ymm2, %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_blendv_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vblendvps %ymm2, %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%res = call <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2)
@@ -198,13 +198,13 @@ declare <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float>, <8 x float>, <8 x f
define <4 x double> @test_mm256_broadcast_pd(<2 x double>* %a0) nounwind {
; X32-LABEL: test_mm256_broadcast_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_broadcast_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-NEXT: retq
%ld = load <2 x double>, <2 x double>* %a0
@@ -214,13 +214,13 @@ define <4 x double> @test_mm256_broadcast_pd(<2 x double>* %a0) nounwind {
define <8 x float> @test_mm256_broadcast_ps(<4 x float>* %a0) nounwind {
; X32-LABEL: test_mm256_broadcast_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_broadcast_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-NEXT: retq
%ld = load <4 x float>, <4 x float>* %a0
@@ -230,13 +230,13 @@ define <8 x float> @test_mm256_broadcast_ps(<4 x float>* %a0) nounwind {
define <4 x double> @test_mm256_broadcast_sd(double* %a0) nounwind {
; X32-LABEL: test_mm256_broadcast_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastsd (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_broadcast_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcastsd (%rdi), %ymm0
; X64-NEXT: retq
%ld = load double, double* %a0
@@ -249,13 +249,13 @@ define <4 x double> @test_mm256_broadcast_sd(double* %a0) nounwind {
define <4 x float> @test_mm_broadcast_ss(float* %a0) nounwind {
; X32-LABEL: test_mm_broadcast_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastss (%eax), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_broadcast_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcastss (%rdi), %xmm0
; X64-NEXT: retq
%ld = load float, float* %a0
@@ -268,13 +268,13 @@ define <4 x float> @test_mm_broadcast_ss(float* %a0) nounwind {
define <8 x float> @test_mm256_broadcast_ss(float* %a0) nounwind {
; X32-LABEL: test_mm256_broadcast_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastss (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_broadcast_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcastss (%rdi), %ymm0
; X64-NEXT: retq
%ld = load float, float* %a0
@@ -291,11 +291,11 @@ define <8 x float> @test_mm256_broadcast_ss(float* %a0) nounwind {
define <8 x float> @test_mm256_castpd_ps(<4 x double> %a0) nounwind {
; X32-LABEL: test_mm256_castpd_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_castpd_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
%res = bitcast <4 x double> %a0 to <8 x float>
ret <8 x float> %res
@@ -303,11 +303,11 @@ define <8 x float> @test_mm256_castpd_ps(<4 x double> %a0) nounwind {
define <4 x i64> @test_mm256_castpd_si256(<4 x double> %a0) nounwind {
; X32-LABEL: test_mm256_castpd_si256:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_castpd_si256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
%res = bitcast <4 x double> %a0 to <4 x i64>
ret <4 x i64> %res
@@ -315,12 +315,12 @@ define <4 x i64> @test_mm256_castpd_si256(<4 x double> %a0) nounwind {
define <4 x double> @test_mm256_castpd128_pd256(<2 x double> %a0) nounwind {
; X32-LABEL: test_mm256_castpd128_pd256:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_castpd128_pd256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-NEXT: retq
%res = shufflevector <2 x double> %a0, <2 x double> %a0, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
@@ -329,13 +329,13 @@ define <4 x double> @test_mm256_castpd128_pd256(<2 x double> %a0) nounwind {
define <2 x double> @test_mm256_castpd256_pd128(<4 x double> %a0) nounwind {
; X32-LABEL: test_mm256_castpd256_pd128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_castpd256_pd128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; X64-NEXT: vzeroupper
; X64-NEXT: retq
@@ -345,11 +345,11 @@ define <2 x double> @test_mm256_castpd256_pd128(<4 x double> %a0) nounwind {
define <4 x double> @test_mm256_castps_pd(<8 x float> %a0) nounwind {
; X32-LABEL: test_mm256_castps_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_castps_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
%res = bitcast <8 x float> %a0 to <4 x double>
ret <4 x double> %res
@@ -357,11 +357,11 @@ define <4 x double> @test_mm256_castps_pd(<8 x float> %a0) nounwind {
define <4 x i64> @test_mm256_castps_si256(<8 x float> %a0) nounwind {
; X32-LABEL: test_mm256_castps_si256:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_castps_si256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
%res = bitcast <8 x float> %a0 to <4 x i64>
ret <4 x i64> %res
@@ -369,12 +369,12 @@ define <4 x i64> @test_mm256_castps_si256(<8 x float> %a0) nounwind {
define <8 x float> @test_mm256_castps128_ps256(<4 x float> %a0) nounwind {
; X32-LABEL: test_mm256_castps128_ps256:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_castps128_ps256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-NEXT: retq
%res = shufflevector <4 x float> %a0, <4 x float> %a0, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -383,13 +383,13 @@ define <8 x float> @test_mm256_castps128_ps256(<4 x float> %a0) nounwind {
define <4 x float> @test_mm256_castps256_ps128(<8 x float> %a0) nounwind {
; X32-LABEL: test_mm256_castps256_ps128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_castps256_ps128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; X64-NEXT: vzeroupper
; X64-NEXT: retq
@@ -399,12 +399,12 @@ define <4 x float> @test_mm256_castps256_ps128(<8 x float> %a0) nounwind {
define <4 x i64> @test_mm256_castsi128_si256(<2 x i64> %a0) nounwind {
; X32-LABEL: test_mm256_castsi128_si256:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_castsi128_si256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-NEXT: retq
%res = shufflevector <2 x i64> %a0, <2 x i64> %a0, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
@@ -413,11 +413,11 @@ define <4 x i64> @test_mm256_castsi128_si256(<2 x i64> %a0) nounwind {
define <4 x double> @test_mm256_castsi256_pd(<4 x i64> %a0) nounwind {
; X32-LABEL: test_mm256_castsi256_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_castsi256_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
%res = bitcast <4 x i64> %a0 to <4 x double>
ret <4 x double> %res
@@ -425,11 +425,11 @@ define <4 x double> @test_mm256_castsi256_pd(<4 x i64> %a0) nounwind {
define <8 x float> @test_mm256_castsi256_ps(<4 x i64> %a0) nounwind {
; X32-LABEL: test_mm256_castsi256_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_castsi256_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
%res = bitcast <4 x i64> %a0 to <8 x float>
ret <8 x float> %res
@@ -437,13 +437,13 @@ define <8 x float> @test_mm256_castsi256_ps(<4 x i64> %a0) nounwind {
define <2 x i64> @test_mm256_castsi256_si128(<4 x i64> %a0) nounwind {
; X32-LABEL: test_mm256_castsi256_si128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_castsi256_si128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; X64-NEXT: vzeroupper
; X64-NEXT: retq
@@ -453,12 +453,12 @@ define <2 x i64> @test_mm256_castsi256_si128(<4 x i64> %a0) nounwind {
define <4 x double> @test_mm256_ceil_pd(<4 x double> %a0) nounwind {
; X32-LABEL: test_mm256_ceil_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vroundpd $2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_ceil_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vroundpd $2, %ymm0, %ymm0
; X64-NEXT: retq
%res = call <4 x double> @llvm.x86.avx.round.pd.256(<4 x double> %a0, i32 2)
@@ -468,12 +468,12 @@ declare <4 x double> @llvm.x86.avx.round.pd.256(<4 x double>, i32) nounwind read
define <8 x float> @test_mm256_ceil_ps(<8 x float> %a0) nounwind {
; X32-LABEL: test_mm256_ceil_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vroundps $2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_ceil_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vroundps $2, %ymm0, %ymm0
; X64-NEXT: retq
%res = call <8 x float> @llvm.x86.avx.round.ps.256(<8 x float> %a0, i32 2)
@@ -483,12 +483,12 @@ declare <8 x float> @llvm.x86.avx.round.ps.256(<8 x float>, i32) nounwind readno
define <2 x double> @test_mm_cmp_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_cmp_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vcmpgepd %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmp_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vcmpgepd %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%res = call <2 x double> @llvm.x86.sse2.cmp.pd(<2 x double> %a0, <2 x double> %a1, i8 13)
@@ -498,12 +498,12 @@ declare <2 x double> @llvm.x86.sse2.cmp.pd(<2 x double>, <2 x double>, i8) nounw
define <4 x double> @test_mm256_cmp_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
; X32-LABEL: test_mm256_cmp_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vcmpgepd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_cmp_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vcmpgepd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%res = call <4 x double> @llvm.x86.avx.cmp.pd.256(<4 x double> %a0, <4 x double> %a1, i8 13)
@@ -513,12 +513,12 @@ declare <4 x double> @llvm.x86.avx.cmp.pd.256(<4 x double>, <4 x double>, i8) no
define <4 x float> @test_mm_cmp_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_cmp_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vcmpgeps %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmp_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vcmpgeps %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a0, <4 x float> %a1, i8 13)
@@ -528,12 +528,12 @@ declare <4 x float> @llvm.x86.sse.cmp.ps(<4 x float>, <4 x float>, i8) nounwind
define <8 x float> @test_mm256_cmp_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
; X32-LABEL: test_mm256_cmp_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vcmpgeps %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_cmp_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vcmpgeps %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%res = call <8 x float> @llvm.x86.avx.cmp.ps.256(<8 x float> %a0, <8 x float> %a1, i8 13)
@@ -543,12 +543,12 @@ declare <8 x float> @llvm.x86.avx.cmp.ps.256(<8 x float>, <8 x float>, i8) nounw
define <2 x double> @test_mm_cmp_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_cmp_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vcmpgesd %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmp_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vcmpgesd %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 13)
@@ -558,12 +558,12 @@ declare <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double>, <2 x double>, i8) nounw
define <4 x float> @test_mm_cmp_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_cmp_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vcmpgess %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmp_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vcmpgess %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse.cmp.ss(<4 x float> %a0, <4 x float> %a1, i8 13)
@@ -573,12 +573,12 @@ declare <4 x float> @llvm.x86.sse.cmp.ss(<4 x float>, <4 x float>, i8) nounwind
define <4 x double> @test_mm256_cvtepi32_pd(<2 x i64> %a0) nounwind {
; X32-LABEL: test_mm256_cvtepi32_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vcvtdq2pd %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_cvtepi32_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vcvtdq2pd %xmm0, %ymm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -588,12 +588,12 @@ define <4 x double> @test_mm256_cvtepi32_pd(<2 x i64> %a0) nounwind {
define <8 x float> @test_mm256_cvtepi32_ps(<4 x i64> %a0) nounwind {
; X32-LABEL: test_mm256_cvtepi32_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vcvtdq2ps %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_cvtepi32_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vcvtdq2ps %ymm0, %ymm0
; X64-NEXT: retq
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -604,13 +604,13 @@ declare <8 x float> @llvm.x86.avx.cvtdq2.ps.256(<8 x i32>) nounwind readnone
define <2 x i64> @test_mm256_cvtpd_epi32(<4 x double> %a0) nounwind {
; X32-LABEL: test_mm256_cvtpd_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vcvtpd2dq %ymm0, %xmm0
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_cvtpd_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vcvtpd2dq %ymm0, %xmm0
; X64-NEXT: vzeroupper
; X64-NEXT: retq
@@ -622,13 +622,13 @@ declare <4 x i32> @llvm.x86.avx.cvt.pd2dq.256(<4 x double>) nounwind readnone
define <4 x float> @test_mm256_cvtpd_ps(<4 x double> %a0) nounwind {
; X32-LABEL: test_mm256_cvtpd_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vcvtpd2ps %ymm0, %xmm0
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_cvtpd_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vcvtpd2ps %ymm0, %xmm0
; X64-NEXT: vzeroupper
; X64-NEXT: retq
@@ -639,12 +639,12 @@ declare <4 x float> @llvm.x86.avx.cvt.pd2.ps.256(<4 x double>) nounwind readnone
define <4 x i64> @test_mm256_cvtps_epi32(<8 x float> %a0) nounwind {
; X32-LABEL: test_mm256_cvtps_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vcvtps2dq %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_cvtps_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vcvtps2dq %ymm0, %ymm0
; X64-NEXT: retq
%cvt = call <8 x i32> @llvm.x86.avx.cvt.ps2dq.256(<8 x float> %a0)
@@ -655,12 +655,12 @@ declare <8 x i32> @llvm.x86.avx.cvt.ps2dq.256(<8 x float>) nounwind readnone
define <4 x double> @test_mm256_cvtps_pd(<4 x float> %a0) nounwind {
; X32-LABEL: test_mm256_cvtps_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vcvtps2pd %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_cvtps_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vcvtps2pd %xmm0, %ymm0
; X64-NEXT: retq
%res = fpext <4 x float> %a0 to <4 x double>
@@ -669,13 +669,13 @@ define <4 x double> @test_mm256_cvtps_pd(<4 x float> %a0) nounwind {
define <2 x i64> @test_mm256_cvttpd_epi32(<4 x double> %a0) nounwind {
; X32-LABEL: test_mm256_cvttpd_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vcvttpd2dq %ymm0, %xmm0
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_cvttpd_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vcvttpd2dq %ymm0, %xmm0
; X64-NEXT: vzeroupper
; X64-NEXT: retq
@@ -687,12 +687,12 @@ declare <4 x i32> @llvm.x86.avx.cvtt.pd2dq.256(<4 x double>) nounwind readnone
define <4 x i64> @test_mm256_cvttps_epi32(<8 x float> %a0) nounwind {
; X32-LABEL: test_mm256_cvttps_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vcvttps2dq %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_cvttps_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vcvttps2dq %ymm0, %ymm0
; X64-NEXT: retq
%cvt = call <8 x i32> @llvm.x86.avx.cvtt.ps2dq.256(<8 x float> %a0)
@@ -703,12 +703,12 @@ declare <8 x i32> @llvm.x86.avx.cvtt.ps2dq.256(<8 x float>) nounwind readnone
define <4 x double> @test_mm256_div_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
; X32-LABEL: test_mm256_div_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vdivpd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_div_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vdivpd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%res = fdiv <4 x double> %a0, %a1
@@ -717,12 +717,12 @@ define <4 x double> @test_mm256_div_pd(<4 x double> %a0, <4 x double> %a1) nounw
define <8 x float> @test_mm256_div_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
; X32-LABEL: test_mm256_div_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vdivps %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_div_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vdivps %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%res = fdiv <8 x float> %a0, %a1
@@ -731,12 +731,12 @@ define <8 x float> @test_mm256_div_ps(<8 x float> %a0, <8 x float> %a1) nounwind
define <8 x float> @test_mm256_dp_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
; X32-LABEL: test_mm256_dp_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vdpps $7, %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_dp_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vdpps $7, %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%res = call <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float> %a0, <8 x float> %a1, i8 7)
@@ -746,7 +746,7 @@ declare <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float>, <8 x float>, i8) nounwi
define i32 @test_mm256_extract_epi8(<4 x i64> %a0) nounwind {
; X32-LABEL: test_mm256_extract_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vextractf128 $1, %ymm0, %xmm0
; X32-NEXT: vpextrb $15, %xmm0, %eax
; X32-NEXT: movzbl %al, %eax
@@ -754,7 +754,7 @@ define i32 @test_mm256_extract_epi8(<4 x i64> %a0) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_extract_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vextractf128 $1, %ymm0, %xmm0
; X64-NEXT: vpextrb $15, %xmm0, %eax
; X64-NEXT: movzbl %al, %eax
@@ -768,7 +768,7 @@ define i32 @test_mm256_extract_epi8(<4 x i64> %a0) nounwind {
define i32 @test_mm256_extract_epi16(<4 x i64> %a0) nounwind {
; X32-LABEL: test_mm256_extract_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vextractf128 $1, %ymm0, %xmm0
; X32-NEXT: vpextrw $3, %xmm0, %eax
; X32-NEXT: movzwl %ax, %eax
@@ -776,7 +776,7 @@ define i32 @test_mm256_extract_epi16(<4 x i64> %a0) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_extract_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vextractf128 $1, %ymm0, %xmm0
; X64-NEXT: vpextrw $3, %xmm0, %eax
; X64-NEXT: movzwl %ax, %eax
@@ -790,14 +790,14 @@ define i32 @test_mm256_extract_epi16(<4 x i64> %a0) nounwind {
define i32 @test_mm256_extract_epi32(<4 x i64> %a0) nounwind {
; X32-LABEL: test_mm256_extract_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vextractf128 $1, %ymm0, %xmm0
; X32-NEXT: vextractps $1, %xmm0, %eax
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_extract_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vextractf128 $1, %ymm0, %xmm0
; X64-NEXT: vextractps $1, %xmm0, %eax
; X64-NEXT: vzeroupper
@@ -809,7 +809,7 @@ define i32 @test_mm256_extract_epi32(<4 x i64> %a0) nounwind {
define i64 @test_mm256_extract_epi64(<4 x i64> %a0) nounwind {
; X32-LABEL: test_mm256_extract_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vextractf128 $1, %ymm0, %xmm0
; X32-NEXT: vextractps $2, %xmm0, %eax
; X32-NEXT: vextractps $3, %xmm0, %edx
@@ -817,7 +817,7 @@ define i64 @test_mm256_extract_epi64(<4 x i64> %a0) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_extract_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vextractf128 $1, %ymm0, %xmm0
; X64-NEXT: vpextrq $1, %xmm0, %rax
; X64-NEXT: vzeroupper
@@ -828,13 +828,13 @@ define i64 @test_mm256_extract_epi64(<4 x i64> %a0) nounwind {
define <2 x double> @test_mm256_extractf128_pd(<4 x double> %a0) nounwind {
; X32-LABEL: test_mm256_extractf128_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vextractf128 $1, %ymm0, %xmm0
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_extractf128_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vextractf128 $1, %ymm0, %xmm0
; X64-NEXT: vzeroupper
; X64-NEXT: retq
@@ -844,13 +844,13 @@ define <2 x double> @test_mm256_extractf128_pd(<4 x double> %a0) nounwind {
define <4 x float> @test_mm256_extractf128_ps(<8 x float> %a0) nounwind {
; X32-LABEL: test_mm256_extractf128_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vextractf128 $1, %ymm0, %xmm0
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_extractf128_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vextractf128 $1, %ymm0, %xmm0
; X64-NEXT: vzeroupper
; X64-NEXT: retq
@@ -860,13 +860,13 @@ define <4 x float> @test_mm256_extractf128_ps(<8 x float> %a0) nounwind {
define <2 x i64> @test_mm256_extractf128_si256(<4 x i64> %a0) nounwind {
; X32-LABEL: test_mm256_extractf128_si256:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vextractf128 $1, %ymm0, %xmm0
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_extractf128_si256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vextractf128 $1, %ymm0, %xmm0
; X64-NEXT: vzeroupper
; X64-NEXT: retq
@@ -876,12 +876,12 @@ define <2 x i64> @test_mm256_extractf128_si256(<4 x i64> %a0) nounwind {
define <4 x double> @test_mm256_floor_pd(<4 x double> %a0) nounwind {
; X32-LABEL: test_mm256_floor_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vroundpd $1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_floor_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vroundpd $1, %ymm0, %ymm0
; X64-NEXT: retq
%res = call <4 x double> @llvm.x86.avx.round.pd.256(<4 x double> %a0, i32 1)
@@ -890,12 +890,12 @@ define <4 x double> @test_mm256_floor_pd(<4 x double> %a0) nounwind {
define <8 x float> @test_mm256_floor_ps(<8 x float> %a0) nounwind {
; X32-LABEL: test_mm256_floor_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vroundps $1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_floor_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vroundps $1, %ymm0, %ymm0
; X64-NEXT: retq
%res = call <8 x float> @llvm.x86.avx.round.ps.256(<8 x float> %a0, i32 1)
@@ -904,12 +904,12 @@ define <8 x float> @test_mm256_floor_ps(<8 x float> %a0) nounwind {
define <4 x double> @test_mm256_hadd_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
; X32-LABEL: test_mm256_hadd_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vhaddpd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_hadd_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vhaddpd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%res = call <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double> %a0, <4 x double> %a1)
@@ -919,12 +919,12 @@ declare <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double>, <4 x double>) nounw
define <8 x float> @test_mm256_hadd_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
; X32-LABEL: test_mm256_hadd_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vhaddps %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_hadd_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vhaddps %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%res = call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> %a0, <8 x float> %a1)
@@ -934,12 +934,12 @@ declare <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float>, <8 x float>) nounwind
define <4 x double> @test_mm256_hsub_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
; X32-LABEL: test_mm256_hsub_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vhsubpd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_hsub_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vhsubpd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%res = call <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double> %a0, <4 x double> %a1)
@@ -949,12 +949,12 @@ declare <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double>, <4 x double>) nounw
define <8 x float> @test_mm256_hsub_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
; X32-LABEL: test_mm256_hsub_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vhsubps %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_hsub_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vhsubps %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%res = call <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float> %a0, <8 x float> %a1)
@@ -964,14 +964,14 @@ declare <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float>, <8 x float>) nounwind
define <4 x i64> @test_mm256_insert_epi8(<4 x i64> %a0, i8 %a1) nounwind {
; X32-LABEL: test_mm256_insert_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpinsrb $4, %eax, %xmm0, %xmm1
; X32-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_insert_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: vpinsrb $4, %eax, %xmm0, %xmm1
; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
@@ -984,7 +984,7 @@ define <4 x i64> @test_mm256_insert_epi8(<4 x i64> %a0, i8 %a1) nounwind {
define <4 x i64> @test_mm256_insert_epi16(<4 x i64> %a0, i16 %a1) nounwind {
; X32-LABEL: test_mm256_insert_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vextractf128 $1, %ymm0, %xmm1
; X32-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1
@@ -992,7 +992,7 @@ define <4 x i64> @test_mm256_insert_epi16(<4 x i64> %a0, i16 %a1) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_insert_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vextractf128 $1, %ymm0, %xmm1
; X64-NEXT: vpinsrw $6, %edi, %xmm1, %xmm1
; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
@@ -1005,13 +1005,13 @@ define <4 x i64> @test_mm256_insert_epi16(<4 x i64> %a0, i16 %a1) nounwind {
define <4 x i64> @test_mm256_insert_epi32(<4 x i64> %a0, i32 %a1) nounwind {
; X32-LABEL: test_mm256_insert_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm1
; X32-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_insert_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpinsrd $3, %edi, %xmm0, %xmm1
; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; X64-NEXT: retq
@@ -1023,7 +1023,7 @@ define <4 x i64> @test_mm256_insert_epi32(<4 x i64> %a0, i32 %a1) nounwind {
define <4 x i64> @test_mm256_insert_epi64(<4 x i64> %a0, i64 %a1) nounwind {
; X32-LABEL: test_mm256_insert_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vextractf128 $1, %ymm0, %xmm1
; X32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm1, %xmm1
; X32-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm1, %xmm1
@@ -1031,7 +1031,7 @@ define <4 x i64> @test_mm256_insert_epi64(<4 x i64> %a0, i64 %a1) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_insert_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vextractf128 $1, %ymm0, %xmm1
; X64-NEXT: vpinsrq $1, %rdi, %xmm1, %xmm1
; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
@@ -1042,13 +1042,13 @@ define <4 x i64> @test_mm256_insert_epi64(<4 x i64> %a0, i64 %a1) nounwind {
define <4 x double> @test_mm256_insertf128_pd(<4 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm256_insertf128_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
; X32-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_insertf128_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
; X64-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
; X64-NEXT: retq
@@ -1059,12 +1059,12 @@ define <4 x double> @test_mm256_insertf128_pd(<4 x double> %a0, <2 x double> %a1
define <8 x float> @test_mm256_insertf128_ps(<8 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm256_insertf128_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_insertf128_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X64-NEXT: retq
%ext = shufflevector <4 x float> %a1, <4 x float> %a1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -1074,13 +1074,13 @@ define <8 x float> @test_mm256_insertf128_ps(<8 x float> %a0, <4 x float> %a1) n
define <4 x i64> @test_mm256_insertf128_si256(<4 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm256_insertf128_si256:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
; X32-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_insertf128_si256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
; X64-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
; X64-NEXT: retq
@@ -1091,13 +1091,13 @@ define <4 x i64> @test_mm256_insertf128_si256(<4 x i64> %a0, <2 x i64> %a1) noun
define <4 x i64> @test_mm256_lddqu_si256(<4 x i64>* %a0) nounwind {
; X32-LABEL: test_mm256_lddqu_si256:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vlddqu (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_lddqu_si256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vlddqu (%rdi), %ymm0
; X64-NEXT: retq
%arg0 = bitcast <4 x i64>* %a0 to i8*
@@ -1109,13 +1109,13 @@ declare <32 x i8> @llvm.x86.avx.ldu.dq.256(i8*) nounwind readnone
define <4 x double> @test_mm256_load_pd(double* %a0) nounwind {
; X32-LABEL: test_mm256_load_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovaps (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_load_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps (%rdi), %ymm0
; X64-NEXT: retq
%arg0 = bitcast double* %a0 to <4 x double>*
@@ -1125,13 +1125,13 @@ define <4 x double> @test_mm256_load_pd(double* %a0) nounwind {
define <8 x float> @test_mm256_load_ps(float* %a0) nounwind {
; X32-LABEL: test_mm256_load_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovaps (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_load_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps (%rdi), %ymm0
; X64-NEXT: retq
%arg0 = bitcast float* %a0 to <8 x float>*
@@ -1141,13 +1141,13 @@ define <8 x float> @test_mm256_load_ps(float* %a0) nounwind {
define <4 x i64> @test_mm256_load_si256(<4 x i64>* %a0) nounwind {
; X32-LABEL: test_mm256_load_si256:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovaps (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_load_si256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps (%rdi), %ymm0
; X64-NEXT: retq
%res = load <4 x i64>, <4 x i64>* %a0, align 32
@@ -1156,13 +1156,13 @@ define <4 x i64> @test_mm256_load_si256(<4 x i64>* %a0) nounwind {
define <4 x double> @test_mm256_loadu_pd(double* %a0) nounwind {
; X32-LABEL: test_mm256_loadu_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovups (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_loadu_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovups (%rdi), %ymm0
; X64-NEXT: retq
%arg0 = bitcast double* %a0 to <4 x double>*
@@ -1172,13 +1172,13 @@ define <4 x double> @test_mm256_loadu_pd(double* %a0) nounwind {
define <8 x float> @test_mm256_loadu_ps(float* %a0) nounwind {
; X32-LABEL: test_mm256_loadu_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovups (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_loadu_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovups (%rdi), %ymm0
; X64-NEXT: retq
%arg0 = bitcast float* %a0 to <8 x float>*
@@ -1188,13 +1188,13 @@ define <8 x float> @test_mm256_loadu_ps(float* %a0) nounwind {
define <4 x i64> @test_mm256_loadu_si256(<4 x i64>* %a0) nounwind {
; X32-LABEL: test_mm256_loadu_si256:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovups (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_loadu_si256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovups (%rdi), %ymm0
; X64-NEXT: retq
%res = load <4 x i64>, <4 x i64>* %a0, align 1
@@ -1203,7 +1203,7 @@ define <4 x i64> @test_mm256_loadu_si256(<4 x i64>* %a0) nounwind {
define <8 x float> @test_mm256_loadu2_m128(float* %a0, float* %a1) nounwind {
; X32-LABEL: test_mm256_loadu2_m128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovups (%eax), %xmm0
@@ -1211,7 +1211,7 @@ define <8 x float> @test_mm256_loadu2_m128(float* %a0, float* %a1) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_loadu2_m128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovups (%rsi), %xmm0
; X64-NEXT: vinsertf128 $1, (%rdi), %ymm0, %ymm0
; X64-NEXT: retq
@@ -1227,7 +1227,7 @@ define <8 x float> @test_mm256_loadu2_m128(float* %a0, float* %a1) nounwind {
define <4 x double> @test_mm256_loadu2_m128d(double* %a0, double* %a1) nounwind {
; X32-LABEL: test_mm256_loadu2_m128d:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovups (%eax), %xmm0
@@ -1235,7 +1235,7 @@ define <4 x double> @test_mm256_loadu2_m128d(double* %a0, double* %a1) nounwind
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_loadu2_m128d:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovups (%rsi), %xmm0
; X64-NEXT: vinsertf128 $1, (%rdi), %ymm0, %ymm0
; X64-NEXT: retq
@@ -1251,7 +1251,7 @@ define <4 x double> @test_mm256_loadu2_m128d(double* %a0, double* %a1) nounwind
define <4 x i64> @test_mm256_loadu2_m128i(i64* %a0, i64* %a1) nounwind {
; X32-LABEL: test_mm256_loadu2_m128i:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovups (%eax), %xmm0
@@ -1259,7 +1259,7 @@ define <4 x i64> @test_mm256_loadu2_m128i(i64* %a0, i64* %a1) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_loadu2_m128i:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovups (%rsi), %xmm0
; X64-NEXT: vinsertf128 $1, (%rdi), %ymm0, %ymm0
; X64-NEXT: retq
@@ -1275,13 +1275,13 @@ define <4 x i64> @test_mm256_loadu2_m128i(i64* %a0, i64* %a1) nounwind {
define <2 x double> @test_mm_maskload_pd(double* %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm_maskload_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmaskmovpd (%eax), %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_maskload_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmaskmovpd (%rdi), %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast double* %a0 to i8*
@@ -1292,13 +1292,13 @@ declare <2 x double> @llvm.x86.avx.maskload.pd(i8*, <2 x i64>) nounwind readnone
define <4 x double> @test_mm256_maskload_pd(double* %a0, <4 x i64> %a1) nounwind {
; X32-LABEL: test_mm256_maskload_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmaskmovpd (%eax), %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_maskload_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmaskmovpd (%rdi), %ymm0, %ymm0
; X64-NEXT: retq
%arg0 = bitcast double* %a0 to i8*
@@ -1309,13 +1309,13 @@ declare <4 x double> @llvm.x86.avx.maskload.pd.256(i8*, <4 x i64>) nounwind read
define <4 x float> @test_mm_maskload_ps(float* %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm_maskload_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmaskmovps (%eax), %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_maskload_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmaskmovps (%rdi), %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast float* %a0 to i8*
@@ -1327,13 +1327,13 @@ declare <4 x float> @llvm.x86.avx.maskload.ps(i8*, <4 x i32>) nounwind readnone
define <8 x float> @test_mm256_maskload_ps(float* %a0, <4 x i64> %a1) nounwind {
; X32-LABEL: test_mm256_maskload_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmaskmovps (%eax), %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_maskload_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmaskmovps (%rdi), %ymm0, %ymm0
; X64-NEXT: retq
%arg0 = bitcast float* %a0 to i8*
@@ -1345,13 +1345,13 @@ declare <8 x float> @llvm.x86.avx.maskload.ps.256(i8*, <8 x i32>) nounwind readn
define void @test_mm_maskstore_pd(double* %a0, <2 x i64> %a1, <2 x double> %a2) nounwind {
; X32-LABEL: test_mm_maskstore_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmaskmovpd %xmm1, %xmm0, (%eax)
; X32-NEXT: retl
;
; X64-LABEL: test_mm_maskstore_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmaskmovpd %xmm1, %xmm0, (%rdi)
; X64-NEXT: retq
%arg0 = bitcast double* %a0 to i8*
@@ -1362,14 +1362,14 @@ declare void @llvm.x86.avx.maskstore.pd(i8*, <2 x i64>, <2 x double>) nounwind r
define void @test_mm256_maskstore_pd(double* %a0, <4 x i64> %a1, <4 x double> %a2) nounwind {
; X32-LABEL: test_mm256_maskstore_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmaskmovpd %ymm1, %ymm0, (%eax)
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_maskstore_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmaskmovpd %ymm1, %ymm0, (%rdi)
; X64-NEXT: vzeroupper
; X64-NEXT: retq
@@ -1381,13 +1381,13 @@ declare void @llvm.x86.avx.maskstore.pd.256(i8*, <4 x i64>, <4 x double>) nounwi
define void @test_mm_maskstore_ps(float* %a0, <2 x i64> %a1, <4 x float> %a2) nounwind {
; X32-LABEL: test_mm_maskstore_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmaskmovps %xmm1, %xmm0, (%eax)
; X32-NEXT: retl
;
; X64-LABEL: test_mm_maskstore_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmaskmovps %xmm1, %xmm0, (%rdi)
; X64-NEXT: retq
%arg0 = bitcast float* %a0 to i8*
@@ -1399,14 +1399,14 @@ declare void @llvm.x86.avx.maskstore.ps(i8*, <4 x i32>, <4 x float>) nounwind re
define void @test_mm256_maskstore_ps(float* %a0, <4 x i64> %a1, <8 x float> %a2) nounwind {
; X32-LABEL: test_mm256_maskstore_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmaskmovps %ymm1, %ymm0, (%eax)
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_maskstore_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmaskmovps %ymm1, %ymm0, (%rdi)
; X64-NEXT: vzeroupper
; X64-NEXT: retq
@@ -1419,12 +1419,12 @@ declare void @llvm.x86.avx.maskstore.ps.256(i8*, <8 x i32>, <8 x float>) nounwin
define <4 x double> @test_mm256_max_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
; X32-LABEL: test_mm256_max_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmaxpd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_max_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmaxpd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%res = call <4 x double> @llvm.x86.avx.max.pd.256(<4 x double> %a0, <4 x double> %a1)
@@ -1434,12 +1434,12 @@ declare <4 x double> @llvm.x86.avx.max.pd.256(<4 x double>, <4 x double>) nounwi
define <8 x float> @test_mm256_max_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
; X32-LABEL: test_mm256_max_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmaxps %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_max_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmaxps %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%res = call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> %a0, <8 x float> %a1)
@@ -1449,12 +1449,12 @@ declare <8 x float> @llvm.x86.avx.max.ps.256(<8 x float>, <8 x float>) nounwind
define <4 x double> @test_mm256_min_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
; X32-LABEL: test_mm256_min_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vminpd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_min_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vminpd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%res = call <4 x double> @llvm.x86.avx.min.pd.256(<4 x double> %a0, <4 x double> %a1)
@@ -1464,12 +1464,12 @@ declare <4 x double> @llvm.x86.avx.min.pd.256(<4 x double>, <4 x double>) nounwi
define <8 x float> @test_mm256_min_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
; X32-LABEL: test_mm256_min_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vminps %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_min_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vminps %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%res = call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> %a0, <8 x float> %a1)
@@ -1479,12 +1479,12 @@ declare <8 x float> @llvm.x86.avx.min.ps.256(<8 x float>, <8 x float>) nounwind
define <4 x double> @test_mm256_movedup_pd(<4 x double> %a0) nounwind {
; X32-LABEL: test_mm256_movedup_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_movedup_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
; X64-NEXT: retq
%res = shufflevector <4 x double> %a0, <4 x double> %a0, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
@@ -1493,12 +1493,12 @@ define <4 x double> @test_mm256_movedup_pd(<4 x double> %a0) nounwind {
define <8 x float> @test_mm256_movehdup_ps(<8 x float> %a0) nounwind {
; X32-LABEL: test_mm256_movehdup_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_movehdup_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
; X64-NEXT: retq
%res = shufflevector <8 x float> %a0, <8 x float> %a0, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
@@ -1507,12 +1507,12 @@ define <8 x float> @test_mm256_movehdup_ps(<8 x float> %a0) nounwind {
define <8 x float> @test_mm256_moveldup_ps(<8 x float> %a0) nounwind {
; X32-LABEL: test_mm256_moveldup_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_moveldup_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6]
; X64-NEXT: retq
%res = shufflevector <8 x float> %a0, <8 x float> %a0, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
@@ -1521,13 +1521,13 @@ define <8 x float> @test_mm256_moveldup_ps(<8 x float> %a0) nounwind {
define i32 @test_mm256_movemask_pd(<4 x double> %a0) nounwind {
; X32-LABEL: test_mm256_movemask_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovmskpd %ymm0, %eax
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_movemask_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovmskpd %ymm0, %eax
; X64-NEXT: vzeroupper
; X64-NEXT: retq
@@ -1538,13 +1538,13 @@ declare i32 @llvm.x86.avx.movmsk.pd.256(<4 x double>) nounwind readnone
define i32 @test_mm256_movemask_ps(<8 x float> %a0) nounwind {
; X32-LABEL: test_mm256_movemask_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovmskps %ymm0, %eax
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_movemask_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovmskps %ymm0, %eax
; X64-NEXT: vzeroupper
; X64-NEXT: retq
@@ -1555,12 +1555,12 @@ declare i32 @llvm.x86.avx.movmsk.ps.256(<8 x float>) nounwind readnone
define <4 x double> @test_mm256_mul_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
; X32-LABEL: test_mm256_mul_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmulpd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_mul_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmulpd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%res = fmul <4 x double> %a0, %a1
@@ -1569,12 +1569,12 @@ define <4 x double> @test_mm256_mul_pd(<4 x double> %a0, <4 x double> %a1) nounw
define <8 x float> @test_mm256_mul_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
; X32-LABEL: test_mm256_mul_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmulps %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_mul_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmulps %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%res = fmul <8 x float> %a0, %a1
@@ -1583,12 +1583,12 @@ define <8 x float> @test_mm256_mul_ps(<8 x float> %a0, <8 x float> %a1) nounwind
define <4 x double> @test_mm256_or_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
; X32-LABEL: test_mm256_or_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vorps %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_or_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vorps %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%1 = bitcast <4 x double> %a0 to <4 x i64>
@@ -1600,12 +1600,12 @@ define <4 x double> @test_mm256_or_pd(<4 x double> %a0, <4 x double> %a1) nounwi
define <8 x float> @test_mm256_or_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
; X32-LABEL: test_mm256_or_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vorps %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_or_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vorps %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%1 = bitcast <8 x float> %a0 to <8 x i32>
@@ -1617,12 +1617,12 @@ define <8 x float> @test_mm256_or_ps(<8 x float> %a0, <8 x float> %a1) nounwind
define <2 x double> @test_mm_permute_pd(<2 x double> %a0) nounwind {
; X32-LABEL: test_mm_permute_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_permute_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; X64-NEXT: retq
%res = shufflevector <2 x double> %a0, <2 x double> %a0, <2 x i32> <i32 1, i32 0>
@@ -1631,12 +1631,12 @@ define <2 x double> @test_mm_permute_pd(<2 x double> %a0) nounwind {
define <4 x double> @test_mm256_permute_pd(<4 x double> %a0) nounwind {
; X32-LABEL: test_mm256_permute_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_permute_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
; X64-NEXT: retq
%res = shufflevector <4 x double> %a0, <4 x double> %a0, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
@@ -1645,12 +1645,12 @@ define <4 x double> @test_mm256_permute_pd(<4 x double> %a0) nounwind {
define <4 x float> @test_mm_permute_ps(<4 x float> %a0) nounwind {
; X32-LABEL: test_mm_permute_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_permute_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0]
; X64-NEXT: retq
%res = shufflevector <4 x float> %a0, <4 x float> %a0, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
@@ -1659,12 +1659,12 @@ define <4 x float> @test_mm_permute_ps(<4 x float> %a0) nounwind {
define <4 x float> @test2_mm_permute_ps(<4 x float> %a0) nounwind {
; X32-LABEL: test2_mm_permute_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,1,2,3]
; X32-NEXT: retl
;
; X64-LABEL: test2_mm_permute_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,1,2,3]
; X64-NEXT: retq
%res = shufflevector <4 x float> %a0, <4 x float> %a0, <4 x i32> <i32 2, i32 1, i32 2, i32 3>
@@ -1673,12 +1673,12 @@ define <4 x float> @test2_mm_permute_ps(<4 x float> %a0) nounwind {
define <8 x float> @test_mm256_permute_ps(<8 x float> %a0) nounwind {
; X32-LABEL: test_mm256_permute_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_permute_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; X64-NEXT: retq
%res = shufflevector <8 x float> %a0, <8 x float> %a0, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
@@ -1687,12 +1687,12 @@ define <8 x float> @test_mm256_permute_ps(<8 x float> %a0) nounwind {
define <4 x double> @test_mm256_permute2f128_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
; X32-LABEL: test_mm256_permute2f128_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm1[0,1]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_permute2f128_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm1[0,1]
; X64-NEXT: retq
%res = shufflevector <4 x double> zeroinitializer, <4 x double> %a1, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
@@ -1703,12 +1703,12 @@ declare <4 x double> @llvm.x86.avx.vperm2f128.pd.256(<4 x double>, <4 x double>,
; PR26667
define <8 x float> @test_mm256_permute2f128_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
; X32-LABEL: test_mm256_permute2f128_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_permute2f128_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps %ymm1, %ymm0
; X64-NEXT: retq
%res = shufflevector <8 x float> %a1, <8 x float> %a1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 12, i32 13, i32 14, i32 15>
@@ -1718,12 +1718,12 @@ declare <8 x float> @llvm.x86.avx.vperm2f128.ps.256(<8 x float>, <8 x float>, i8
define <4 x i64> @test_mm256_permute2f128_si256(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; X32-LABEL: test_mm256_permute2f128_si256:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3,0,1]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_permute2f128_si256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3,0,1]
; X64-NEXT: retq
%1 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -1736,12 +1736,12 @@ declare <8 x i32> @llvm.x86.avx.vperm2f128.si.256(<8 x i32>, <8 x i32>, i8) noun
define <2 x double> @test_mm_permutevar_pd(<2 x double> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm_permutevar_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpermilpd %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_permutevar_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermilpd %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%res = call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %a0, <2 x i64> %a1)
@@ -1751,12 +1751,12 @@ declare <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double>, <2 x i64>) nounwi
define <4 x double> @test_mm256_permutevar_pd(<4 x double> %a0, <4 x i64> %a1) nounwind {
; X32-LABEL: test_mm256_permutevar_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpermilpd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_permutevar_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermilpd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%res = call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> %a1)
@@ -1766,12 +1766,12 @@ declare <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double>, <4 x i64>) no
define <4 x float> @test_mm_permutevar_ps(<4 x float> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm_permutevar_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpermilps %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_permutevar_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermilps %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%arg1 = bitcast <2 x i64> %a1 to <4 x i32>
@@ -1782,12 +1782,12 @@ declare <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float>, <4 x i32>) nounwind
define <8 x float> @test_mm256_permutevar_ps(<8 x float> %a0, <4 x i64> %a1) nounwind {
; X32-LABEL: test_mm256_permutevar_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpermilps %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_permutevar_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermilps %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%arg1 = bitcast <4 x i64> %a1 to <8 x i32>
@@ -1798,12 +1798,12 @@ declare <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float>, <8 x i32>) noun
define <8 x float> @test_mm256_rcp_ps(<8 x float> %a0) nounwind {
; X32-LABEL: test_mm256_rcp_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vrcpps %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_rcp_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vrcpps %ymm0, %ymm0
; X64-NEXT: retq
%res = call <8 x float> @llvm.x86.avx.rcp.ps.256(<8 x float> %a0)
@@ -1813,12 +1813,12 @@ declare <8 x float> @llvm.x86.avx.rcp.ps.256(<8 x float>) nounwind readnone
define <4 x double> @test_mm256_round_pd(<4 x double> %a0) nounwind {
; X32-LABEL: test_mm256_round_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vroundpd $4, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_round_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vroundpd $4, %ymm0, %ymm0
; X64-NEXT: retq
%res = call <4 x double> @llvm.x86.avx.round.pd.256(<4 x double> %a0, i32 4)
@@ -1827,12 +1827,12 @@ define <4 x double> @test_mm256_round_pd(<4 x double> %a0) nounwind {
define <8 x float> @test_mm256_round_ps(<8 x float> %a0) nounwind {
; X32-LABEL: test_mm256_round_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vroundps $4, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_round_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vroundps $4, %ymm0, %ymm0
; X64-NEXT: retq
%res = call <8 x float> @llvm.x86.avx.round.ps.256(<8 x float> %a0, i32 4)
@@ -1841,12 +1841,12 @@ define <8 x float> @test_mm256_round_ps(<8 x float> %a0) nounwind {
define <8 x float> @test_mm256_rsqrt_ps(<8 x float> %a0) nounwind {
; X32-LABEL: test_mm256_rsqrt_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vrsqrtps %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_rsqrt_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vrsqrtps %ymm0, %ymm0
; X64-NEXT: retq
%res = call <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float> %a0)
@@ -1856,7 +1856,7 @@ declare <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float>) nounwind readnone
define <4 x i64> @test_mm256_set_epi8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, i8 %a5, i8 %a6, i8 %a7, i8 %a8, i8 %a9, i8 %a10, i8 %a11, i8 %a12, i8 %a13, i8 %a14, i8 %a15, i8 %a16, i8 %a17, i8 %a18, i8 %a19, i8 %a20, i8 %a21, i8 %a22, i8 %a23, i8 %a24, i8 %a25, i8 %a26, i8 %a27, i8 %a28, i8 %a29, i8 %a30, i8 %a31) nounwind {
; X32-LABEL: test_mm256_set_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovd %ecx, %xmm0
@@ -1925,7 +1925,7 @@ define <4 x i64> @test_mm256_set_epi8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, i8
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_set_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movzbl {{[0-9]+}}(%rsp), %r10d
; X64-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; X64-NEXT: vmovd %eax, %xmm0
@@ -2030,7 +2030,7 @@ define <4 x i64> @test_mm256_set_epi8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, i8
define <4 x i64> @test_mm256_set_epi16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %a4, i16 %a5, i16 %a6, i16 %a7, i16 %a8, i16 %a9, i16 %a10, i16 %a11, i16 %a12, i16 %a13, i16 %a14, i16 %a15) nounwind {
; X32-LABEL: test_mm256_set_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovd %eax, %xmm0
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
@@ -2067,7 +2067,7 @@ define <4 x i64> @test_mm256_set_epi16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_set_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movzwl {{[0-9]+}}(%rsp), %eax
; X64-NEXT: vmovd %eax, %xmm0
; X64-NEXT: movzwl {{[0-9]+}}(%rsp), %eax
@@ -2118,7 +2118,7 @@ define <4 x i64> @test_mm256_set_epi16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %
define <4 x i64> @test_mm256_set_epi32(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7) nounwind {
; X32-LABEL: test_mm256_set_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
; X32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
@@ -2131,7 +2131,7 @@ define <4 x i64> @test_mm256_set_epi32(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_set_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovd %ecx, %xmm0
; X64-NEXT: vpinsrd $1, %edx, %xmm0, %xmm0
; X64-NEXT: vpinsrd $2, %esi, %xmm0, %xmm0
@@ -2156,7 +2156,7 @@ define <4 x i64> @test_mm256_set_epi32(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %
define <4 x i64> @test_mm256_set_epi64x(i64 %a0, i64 %a1, i64 %a2, i64 %a3) nounwind {
; X32-LABEL: test_mm256_set_epi64x:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
; X32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
@@ -2169,7 +2169,7 @@ define <4 x i64> @test_mm256_set_epi64x(i64 %a0, i64 %a1, i64 %a2, i64 %a3) noun
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_set_epi64x:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovq %rdi, %xmm0
; X64-NEXT: vmovq %rsi, %xmm1
; X64-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
@@ -2187,13 +2187,13 @@ define <4 x i64> @test_mm256_set_epi64x(i64 %a0, i64 %a1, i64 %a2, i64 %a3) noun
define <8 x float> @test_mm256_set_m128(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm256_set_m128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_set_m128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X64-NEXT: retq
@@ -2203,13 +2203,13 @@ define <8 x float> @test_mm256_set_m128(<4 x float> %a0, <4 x float> %a1) nounwi
define <4 x double> @test_mm256_set_m128d(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm256_set_m128d:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_set_m128d:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X64-NEXT: retq
@@ -2222,13 +2222,13 @@ define <4 x double> @test_mm256_set_m128d(<2 x double> %a0, <2 x double> %a1) no
define <4 x i64> @test_mm256_set_m128i(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm256_set_m128i:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_set_m128i:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X64-NEXT: retq
@@ -2241,7 +2241,7 @@ define <4 x i64> @test_mm256_set_m128i(<2 x i64> %a0, <2 x i64> %a1) nounwind {
define <4 x double> @test_mm256_set_pd(double %a0, double %a1, double %a2, double %a3) nounwind {
; X32-LABEL: test_mm256_set_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; X32-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
@@ -2252,7 +2252,7 @@ define <4 x double> @test_mm256_set_pd(double %a0, double %a1, double %a2, doubl
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_set_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; X64-NEXT: vmovlhps {{.*#+}} xmm1 = xmm3[0],xmm2[0]
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
@@ -2266,7 +2266,7 @@ define <4 x double> @test_mm256_set_pd(double %a0, double %a1, double %a2, doubl
define <8 x float> @test_mm256_set_ps(float %a0, float %a1, float %a2, float %a3, float %a4, float %a5, float %a6, float %a7) nounwind {
; X32-LABEL: test_mm256_set_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
@@ -2285,7 +2285,7 @@ define <8 x float> @test_mm256_set_ps(float %a0, float %a1, float %a2, float %a3
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_set_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3]
; X64-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3]
; X64-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
@@ -2307,7 +2307,7 @@ define <8 x float> @test_mm256_set_ps(float %a0, float %a1, float %a2, float %a3
define <4 x i64> @test_mm256_set1_epi8(i8 %a0) nounwind {
; X32-LABEL: test_mm256_set1_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovd %eax, %xmm0
; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1
@@ -2316,7 +2316,7 @@ define <4 x i64> @test_mm256_set1_epi8(i8 %a0) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_set1_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: vmovd %eax, %xmm0
; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
@@ -2361,7 +2361,7 @@ define <4 x i64> @test_mm256_set1_epi8(i8 %a0) nounwind {
define <4 x i64> @test_mm256_set1_epi16(i16 %a0) nounwind {
; X32-LABEL: test_mm256_set1_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovd %eax, %xmm0
; X32-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
@@ -2370,7 +2370,7 @@ define <4 x i64> @test_mm256_set1_epi16(i16 %a0) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_set1_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovd %edi, %xmm0
; X64-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
@@ -2398,14 +2398,14 @@ define <4 x i64> @test_mm256_set1_epi16(i16 %a0) nounwind {
define <4 x i64> @test_mm256_set1_epi32(i32 %a0) nounwind {
; X32-LABEL: test_mm256_set1_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0]
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_set1_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovd %edi, %xmm0
; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
@@ -2424,7 +2424,7 @@ define <4 x i64> @test_mm256_set1_epi32(i32 %a0) nounwind {
define <4 x i64> @test_mm256_set1_epi64x(i64 %a0) nounwind {
; X32-LABEL: test_mm256_set1_epi64x:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovd %ecx, %xmm0
@@ -2435,7 +2435,7 @@ define <4 x i64> @test_mm256_set1_epi64x(i64 %a0) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_set1_epi64x:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovq %rdi, %xmm0
; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
@@ -2449,14 +2449,14 @@ define <4 x i64> @test_mm256_set1_epi64x(i64 %a0) nounwind {
define <4 x double> @test_mm256_set1_pd(double %a0) nounwind {
; X32-LABEL: test_mm256_set1_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_set1_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: retq
@@ -2469,14 +2469,14 @@ define <4 x double> @test_mm256_set1_pd(double %a0) nounwind {
define <8 x float> @test_mm256_set1_ps(float %a0) nounwind {
; X32-LABEL: test_mm256_set1_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0]
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_set1_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0]
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: retq
@@ -2493,7 +2493,7 @@ define <8 x float> @test_mm256_set1_ps(float %a0) nounwind {
define <4 x i64> @test_mm256_setr_epi8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, i8 %a5, i8 %a6, i8 %a7, i8 %a8, i8 %a9, i8 %a10, i8 %a11, i8 %a12, i8 %a13, i8 %a14, i8 %a15, i8 %a16, i8 %a17, i8 %a18, i8 %a19, i8 %a20, i8 %a21, i8 %a22, i8 %a23, i8 %a24, i8 %a25, i8 %a26, i8 %a27, i8 %a28, i8 %a29, i8 %a30, i8 %a31) nounwind {
; X32-LABEL: test_mm256_setr_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovd %ecx, %xmm0
@@ -2562,7 +2562,7 @@ define <4 x i64> @test_mm256_setr_epi8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, i
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_setr_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movzbl {{[0-9]+}}(%rsp), %r10d
; X64-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; X64-NEXT: vmovd %eax, %xmm0
@@ -2667,7 +2667,7 @@ define <4 x i64> @test_mm256_setr_epi8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, i
define <4 x i64> @test_mm256_setr_epi16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %a4, i16 %a5, i16 %a6, i16 %a7, i16 %a8, i16 %a9, i16 %a10, i16 %a11, i16 %a12, i16 %a13, i16 %a14, i16 %a15) nounwind {
; X32-LABEL: test_mm256_setr_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovd %eax, %xmm0
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
@@ -2704,7 +2704,7 @@ define <4 x i64> @test_mm256_setr_epi16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_setr_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movzwl {{[0-9]+}}(%rsp), %eax
; X64-NEXT: vmovd %eax, %xmm0
; X64-NEXT: movzwl {{[0-9]+}}(%rsp), %eax
@@ -2755,7 +2755,7 @@ define <4 x i64> @test_mm256_setr_epi16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16
define <4 x i64> @test_mm256_setr_epi32(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7) nounwind {
; X32-LABEL: test_mm256_setr_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
; X32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
@@ -2768,7 +2768,7 @@ define <4 x i64> @test_mm256_setr_epi32(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_setr_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovd %r8d, %xmm0
; X64-NEXT: vpinsrd $1, %r9d, %xmm0, %xmm0
; X64-NEXT: vpinsrd $2, {{[0-9]+}}(%rsp), %xmm0, %xmm0
@@ -2793,7 +2793,7 @@ define <4 x i64> @test_mm256_setr_epi32(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32
define <4 x i64> @test_mm256_setr_epi64x(i64 %a0, i64 %a1, i64 %a2, i64 %a3) nounwind {
; X32-LABEL: test_mm256_setr_epi64x:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
; X32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
@@ -2806,7 +2806,7 @@ define <4 x i64> @test_mm256_setr_epi64x(i64 %a0, i64 %a1, i64 %a2, i64 %a3) nou
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_setr_epi64x:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovq %rcx, %xmm0
; X64-NEXT: vmovq %rdx, %xmm1
; X64-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
@@ -2824,13 +2824,13 @@ define <4 x i64> @test_mm256_setr_epi64x(i64 %a0, i64 %a1, i64 %a2, i64 %a3) nou
define <8 x float> @test_mm256_setr_m128(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm256_setr_m128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_setr_m128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X64-NEXT: retq
@@ -2840,13 +2840,13 @@ define <8 x float> @test_mm256_setr_m128(<4 x float> %a0, <4 x float> %a1) nounw
define <4 x double> @test_mm256_setr_m128d(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm256_setr_m128d:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_setr_m128d:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X64-NEXT: retq
@@ -2859,13 +2859,13 @@ define <4 x double> @test_mm256_setr_m128d(<2 x double> %a0, <2 x double> %a1) n
define <4 x i64> @test_mm256_setr_m128i(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm256_setr_m128i:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_setr_m128i:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X64-NEXT: retq
@@ -2878,7 +2878,7 @@ define <4 x i64> @test_mm256_setr_m128i(<2 x i64> %a0, <2 x i64> %a1) nounwind {
define <4 x double> @test_mm256_setr_pd(double %a0, double %a1, double %a2, double %a3) nounwind {
; X32-LABEL: test_mm256_setr_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; X32-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
@@ -2889,7 +2889,7 @@ define <4 x double> @test_mm256_setr_pd(double %a0, double %a1, double %a2, doub
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_setr_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0]
; X64-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; X64-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
@@ -2903,7 +2903,7 @@ define <4 x double> @test_mm256_setr_pd(double %a0, double %a1, double %a2, doub
define <8 x float> @test_mm256_setr_ps(float %a0, float %a1, float %a2, float %a3, float %a4, float %a5, float %a6, float %a7) nounwind {
; X32-LABEL: test_mm256_setr_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
@@ -2922,7 +2922,7 @@ define <8 x float> @test_mm256_setr_ps(float %a0, float %a1, float %a2, float %a
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_setr_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[2,3]
; X64-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1],xmm6[0],xmm4[3]
; X64-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1,2],xmm7[0]
@@ -2944,12 +2944,12 @@ define <8 x float> @test_mm256_setr_ps(float %a0, float %a1, float %a2, float %a
define <4 x double> @test_mm256_setzero_pd() nounwind {
; X32-LABEL: test_mm256_setzero_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_setzero_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-NEXT: retq
ret <4 x double> zeroinitializer
@@ -2957,12 +2957,12 @@ define <4 x double> @test_mm256_setzero_pd() nounwind {
define <8 x float> @test_mm256_setzero_ps() nounwind {
; X32-LABEL: test_mm256_setzero_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_setzero_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-NEXT: retq
ret <8 x float> zeroinitializer
@@ -2970,12 +2970,12 @@ define <8 x float> @test_mm256_setzero_ps() nounwind {
define <4 x i64> @test_mm256_setzero_si256() nounwind {
; X32-LABEL: test_mm256_setzero_si256:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_setzero_si256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-NEXT: retq
ret <4 x i64> zeroinitializer
@@ -2983,12 +2983,12 @@ define <4 x i64> @test_mm256_setzero_si256() nounwind {
define <4 x double> @test_mm256_shuffle_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
; X32-LABEL: test_mm256_shuffle_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_shuffle_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; X64-NEXT: retq
%res = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
@@ -2997,12 +2997,12 @@ define <4 x double> @test_mm256_shuffle_pd(<4 x double> %a0, <4 x double> %a1) n
define <8 x float> @test_mm256_shuffle_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
; X32-LABEL: test_mm256_shuffle_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,0],ymm1[0,0],ymm0[4,4],ymm1[4,4]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_shuffle_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,0],ymm1[0,0],ymm0[4,4],ymm1[4,4]
; X64-NEXT: retq
%res = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 0, i32 0, i32 8, i32 8, i32 4, i32 4, i32 12, i32 12>
@@ -3011,12 +3011,12 @@ define <8 x float> @test_mm256_shuffle_ps(<8 x float> %a0, <8 x float> %a1) noun
define <4 x double> @test_mm256_sqrt_pd(<4 x double> %a0) nounwind {
; X32-LABEL: test_mm256_sqrt_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vsqrtpd %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_sqrt_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vsqrtpd %ymm0, %ymm0
; X64-NEXT: retq
%res = call <4 x double> @llvm.x86.avx.sqrt.pd.256(<4 x double> %a0)
@@ -3026,12 +3026,12 @@ declare <4 x double> @llvm.x86.avx.sqrt.pd.256(<4 x double>) nounwind readnone
define <8 x float> @test_mm256_sqrt_ps(<8 x float> %a0) nounwind {
; X32-LABEL: test_mm256_sqrt_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vsqrtps %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_sqrt_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vsqrtps %ymm0, %ymm0
; X64-NEXT: retq
%res = call <8 x float> @llvm.x86.avx.sqrt.ps.256(<8 x float> %a0)
@@ -3041,14 +3041,14 @@ declare <8 x float> @llvm.x86.avx.sqrt.ps.256(<8 x float>) nounwind readnone
define void @test_mm256_store_pd(double* %a0, <4 x double> %a1) nounwind {
; X32-LABEL: test_mm256_store_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovaps %ymm0, (%eax)
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_store_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps %ymm0, (%rdi)
; X64-NEXT: vzeroupper
; X64-NEXT: retq
@@ -3059,14 +3059,14 @@ define void @test_mm256_store_pd(double* %a0, <4 x double> %a1) nounwind {
define void @test_mm256_store_ps(float* %a0, <8 x float> %a1) nounwind {
; X32-LABEL: test_mm256_store_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovaps %ymm0, (%eax)
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_store_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps %ymm0, (%rdi)
; X64-NEXT: vzeroupper
; X64-NEXT: retq
@@ -3077,14 +3077,14 @@ define void @test_mm256_store_ps(float* %a0, <8 x float> %a1) nounwind {
define void @test_mm256_store_si256(<4 x i64>* %a0, <4 x i64> %a1) nounwind {
; X32-LABEL: test_mm256_store_si256:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovaps %ymm0, (%eax)
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_store_si256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps %ymm0, (%rdi)
; X64-NEXT: vzeroupper
; X64-NEXT: retq
@@ -3094,14 +3094,14 @@ define void @test_mm256_store_si256(<4 x i64>* %a0, <4 x i64> %a1) nounwind {
define void @test_mm256_storeu_pd(double* %a0, <4 x double> %a1) nounwind {
; X32-LABEL: test_mm256_storeu_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovups %ymm0, (%eax)
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_storeu_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovups %ymm0, (%rdi)
; X64-NEXT: vzeroupper
; X64-NEXT: retq
@@ -3112,14 +3112,14 @@ define void @test_mm256_storeu_pd(double* %a0, <4 x double> %a1) nounwind {
define void @test_mm256_storeu_ps(float* %a0, <8 x float> %a1) nounwind {
; X32-LABEL: test_mm256_storeu_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovups %ymm0, (%eax)
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_storeu_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovups %ymm0, (%rdi)
; X64-NEXT: vzeroupper
; X64-NEXT: retq
@@ -3130,14 +3130,14 @@ define void @test_mm256_storeu_ps(float* %a0, <8 x float> %a1) nounwind {
define void @test_mm256_storeu_si256(<4 x i64>* %a0, <4 x i64> %a1) nounwind {
; X32-LABEL: test_mm256_storeu_si256:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovups %ymm0, (%eax)
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_storeu_si256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovups %ymm0, (%rdi)
; X64-NEXT: vzeroupper
; X64-NEXT: retq
@@ -3147,7 +3147,7 @@ define void @test_mm256_storeu_si256(<4 x i64>* %a0, <4 x i64> %a1) nounwind {
define void @test_mm256_storeu2_m128(float* %a0, float* %a1, <8 x float> %a2) nounwind {
; X32-LABEL: test_mm256_storeu2_m128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovups %xmm0, (%ecx)
@@ -3157,7 +3157,7 @@ define void @test_mm256_storeu2_m128(float* %a0, float* %a1, <8 x float> %a2) no
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_storeu2_m128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovups %xmm0, (%rdi)
; X64-NEXT: vextractf128 $1, %ymm0, %xmm0
; X64-NEXT: vmovups %xmm0, (%rsi)
@@ -3174,7 +3174,7 @@ define void @test_mm256_storeu2_m128(float* %a0, float* %a1, <8 x float> %a2) no
define void @test_mm256_storeu2_m128d(double* %a0, double* %a1, <4 x double> %a2) nounwind {
; X32-LABEL: test_mm256_storeu2_m128d:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovups %xmm0, (%ecx)
@@ -3184,7 +3184,7 @@ define void @test_mm256_storeu2_m128d(double* %a0, double* %a1, <4 x double> %a2
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_storeu2_m128d:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovups %xmm0, (%rdi)
; X64-NEXT: vextractf128 $1, %ymm0, %xmm0
; X64-NEXT: vmovups %xmm0, (%rsi)
@@ -3201,7 +3201,7 @@ define void @test_mm256_storeu2_m128d(double* %a0, double* %a1, <4 x double> %a2
define void @test_mm256_storeu2_m128i(<2 x i64>* %a0, <2 x i64>* %a1, <4 x i64> %a2) nounwind {
; X32-LABEL: test_mm256_storeu2_m128i:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovups %xmm0, (%ecx)
@@ -3211,7 +3211,7 @@ define void @test_mm256_storeu2_m128i(<2 x i64>* %a0, <2 x i64>* %a1, <4 x i64>
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_storeu2_m128i:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovups %xmm0, (%rdi)
; X64-NEXT: vextractf128 $1, %ymm0, %xmm0
; X64-NEXT: vmovups %xmm0, (%rsi)
@@ -3228,14 +3228,14 @@ define void @test_mm256_storeu2_m128i(<2 x i64>* %a0, <2 x i64>* %a1, <4 x i64>
define void @test_mm256_stream_pd(double *%a0, <4 x double> %a1) nounwind {
; X32-LABEL: test_mm256_stream_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovntps %ymm0, (%eax)
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_stream_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovntps %ymm0, (%rdi)
; X64-NEXT: vzeroupper
; X64-NEXT: retq
@@ -3246,14 +3246,14 @@ define void @test_mm256_stream_pd(double *%a0, <4 x double> %a1) nounwind {
define void @test_mm256_stream_ps(float *%a0, <8 x float> %a1) nounwind {
; X32-LABEL: test_mm256_stream_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovntps %ymm0, (%eax)
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_stream_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovntps %ymm0, (%rdi)
; X64-NEXT: vzeroupper
; X64-NEXT: retq
@@ -3264,14 +3264,14 @@ define void @test_mm256_stream_ps(float *%a0, <8 x float> %a1) nounwind {
define void @test_mm256_stream_si256(<4 x i64> *%a0, <4 x i64> %a1) nounwind {
; X32-LABEL: test_mm256_stream_si256:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovntps %ymm0, (%eax)
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_stream_si256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovntps %ymm0, (%rdi)
; X64-NEXT: vzeroupper
; X64-NEXT: retq
@@ -3281,12 +3281,12 @@ define void @test_mm256_stream_si256(<4 x i64> *%a0, <4 x i64> %a1) nounwind {
define <4 x double> @test_mm256_sub_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
; X32-LABEL: test_mm256_sub_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vsubpd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_sub_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vsubpd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%res = fsub <4 x double> %a0, %a1
@@ -3295,12 +3295,12 @@ define <4 x double> @test_mm256_sub_pd(<4 x double> %a0, <4 x double> %a1) nounw
define <8 x float> @test_mm256_sub_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
; X32-LABEL: test_mm256_sub_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vsubps %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_sub_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vsubps %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%res = fsub <8 x float> %a0, %a1
@@ -3309,14 +3309,14 @@ define <8 x float> @test_mm256_sub_ps(<8 x float> %a0, <8 x float> %a1) nounwind
define i32 @test_mm_testc_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_testc_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: vtestpd %xmm1, %xmm0
; X32-NEXT: setb %al
; X32-NEXT: retl
;
; X64-LABEL: test_mm_testc_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: vtestpd %xmm1, %xmm0
; X64-NEXT: setb %al
@@ -3328,7 +3328,7 @@ declare i32 @llvm.x86.avx.vtestc.pd(<2 x double>, <2 x double>) nounwind readnon
define i32 @test_mm256_testc_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
; X32-LABEL: test_mm256_testc_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: vtestpd %ymm1, %ymm0
; X32-NEXT: setb %al
@@ -3336,7 +3336,7 @@ define i32 @test_mm256_testc_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_testc_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: vtestpd %ymm1, %ymm0
; X64-NEXT: setb %al
@@ -3349,14 +3349,14 @@ declare i32 @llvm.x86.avx.vtestc.pd.256(<4 x double>, <4 x double>) nounwind rea
define i32 @test_mm_testc_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_testc_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: vtestps %xmm1, %xmm0
; X32-NEXT: setb %al
; X32-NEXT: retl
;
; X64-LABEL: test_mm_testc_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: vtestps %xmm1, %xmm0
; X64-NEXT: setb %al
@@ -3368,7 +3368,7 @@ declare i32 @llvm.x86.avx.vtestc.ps(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_mm256_testc_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
; X32-LABEL: test_mm256_testc_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: vtestps %ymm1, %ymm0
; X32-NEXT: setb %al
@@ -3376,7 +3376,7 @@ define i32 @test_mm256_testc_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_testc_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: vtestps %ymm1, %ymm0
; X64-NEXT: setb %al
@@ -3389,7 +3389,7 @@ declare i32 @llvm.x86.avx.vtestc.ps.256(<8 x float>, <8 x float>) nounwind readn
define i32 @test_mm256_testc_si256(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; X32-LABEL: test_mm256_testc_si256:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: vptest %ymm1, %ymm0
; X32-NEXT: setb %al
@@ -3397,7 +3397,7 @@ define i32 @test_mm256_testc_si256(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_testc_si256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: vptest %ymm1, %ymm0
; X64-NEXT: setb %al
@@ -3410,14 +3410,14 @@ declare i32 @llvm.x86.avx.ptestc.256(<4 x i64>, <4 x i64>) nounwind readnone
define i32 @test_mm_testnzc_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_testnzc_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: vtestpd %xmm1, %xmm0
; X32-NEXT: seta %al
; X32-NEXT: retl
;
; X64-LABEL: test_mm_testnzc_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: vtestpd %xmm1, %xmm0
; X64-NEXT: seta %al
@@ -3429,7 +3429,7 @@ declare i32 @llvm.x86.avx.vtestnzc.pd(<2 x double>, <2 x double>) nounwind readn
define i32 @test_mm256_testnzc_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
; X32-LABEL: test_mm256_testnzc_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: vtestpd %ymm1, %ymm0
; X32-NEXT: seta %al
@@ -3437,7 +3437,7 @@ define i32 @test_mm256_testnzc_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_testnzc_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: vtestpd %ymm1, %ymm0
; X64-NEXT: seta %al
@@ -3450,14 +3450,14 @@ declare i32 @llvm.x86.avx.vtestnzc.pd.256(<4 x double>, <4 x double>) nounwind r
define i32 @test_mm_testnzc_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_testnzc_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: vtestps %xmm1, %xmm0
; X32-NEXT: seta %al
; X32-NEXT: retl
;
; X64-LABEL: test_mm_testnzc_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: vtestps %xmm1, %xmm0
; X64-NEXT: seta %al
@@ -3469,7 +3469,7 @@ declare i32 @llvm.x86.avx.vtestnzc.ps(<4 x float>, <4 x float>) nounwind readnon
define i32 @test_mm256_testnzc_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
; X32-LABEL: test_mm256_testnzc_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: vtestps %ymm1, %ymm0
; X32-NEXT: seta %al
@@ -3477,7 +3477,7 @@ define i32 @test_mm256_testnzc_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_testnzc_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: vtestps %ymm1, %ymm0
; X64-NEXT: seta %al
@@ -3490,7 +3490,7 @@ declare i32 @llvm.x86.avx.vtestnzc.ps.256(<8 x float>, <8 x float>) nounwind rea
define i32 @test_mm256_testnzc_si256(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; X32-LABEL: test_mm256_testnzc_si256:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: vptest %ymm1, %ymm0
; X32-NEXT: seta %al
@@ -3498,7 +3498,7 @@ define i32 @test_mm256_testnzc_si256(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_testnzc_si256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: vptest %ymm1, %ymm0
; X64-NEXT: seta %al
@@ -3511,14 +3511,14 @@ declare i32 @llvm.x86.avx.ptestnzc.256(<4 x i64>, <4 x i64>) nounwind readnone
define i32 @test_mm_testz_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_testz_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: vtestpd %xmm1, %xmm0
; X32-NEXT: sete %al
; X32-NEXT: retl
;
; X64-LABEL: test_mm_testz_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: vtestpd %xmm1, %xmm0
; X64-NEXT: sete %al
@@ -3530,7 +3530,7 @@ declare i32 @llvm.x86.avx.vtestz.pd(<2 x double>, <2 x double>) nounwind readnon
define i32 @test_mm256_testz_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
; X32-LABEL: test_mm256_testz_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: vtestpd %ymm1, %ymm0
; X32-NEXT: sete %al
@@ -3538,7 +3538,7 @@ define i32 @test_mm256_testz_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_testz_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: vtestpd %ymm1, %ymm0
; X64-NEXT: sete %al
@@ -3551,14 +3551,14 @@ declare i32 @llvm.x86.avx.vtestz.pd.256(<4 x double>, <4 x double>) nounwind rea
define i32 @test_mm_testz_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_testz_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: vtestps %xmm1, %xmm0
; X32-NEXT: sete %al
; X32-NEXT: retl
;
; X64-LABEL: test_mm_testz_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: vtestps %xmm1, %xmm0
; X64-NEXT: sete %al
@@ -3570,7 +3570,7 @@ declare i32 @llvm.x86.avx.vtestz.ps(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_mm256_testz_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
; X32-LABEL: test_mm256_testz_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: vtestps %ymm1, %ymm0
; X32-NEXT: sete %al
@@ -3578,7 +3578,7 @@ define i32 @test_mm256_testz_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_testz_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: vtestps %ymm1, %ymm0
; X64-NEXT: sete %al
@@ -3591,7 +3591,7 @@ declare i32 @llvm.x86.avx.vtestz.ps.256(<8 x float>, <8 x float>) nounwind readn
define i32 @test_mm256_testz_si256(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; X32-LABEL: test_mm256_testz_si256:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: vptest %ymm1, %ymm0
; X32-NEXT: sete %al
@@ -3599,7 +3599,7 @@ define i32 @test_mm256_testz_si256(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_testz_si256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: vptest %ymm1, %ymm0
; X64-NEXT: sete %al
@@ -3612,56 +3612,56 @@ declare i32 @llvm.x86.avx.ptestz.256(<4 x i64>, <4 x i64>) nounwind readnone
define <2 x double> @test_mm_undefined_pd() nounwind {
; X32-LABEL: test_mm_undefined_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: test_mm_undefined_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
ret <2 x double> undef
}
define <4 x double> @test_mm256_undefined_pd() nounwind {
; X32-LABEL: test_mm256_undefined_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_undefined_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
ret <4 x double> undef
}
define <8 x float> @test_mm256_undefined_ps() nounwind {
; X32-LABEL: test_mm256_undefined_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_undefined_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
ret <8 x float> undef
}
define <4 x i64> @test_mm256_undefined_si256() nounwind {
; X32-LABEL: test_mm256_undefined_si256:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_undefined_si256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
ret <4 x i64> undef
}
define <4 x double> @test_mm256_unpackhi_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
; X32-LABEL: test_mm256_unpackhi_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_unpackhi_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
; X64-NEXT: retq
%res = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
@@ -3670,12 +3670,12 @@ define <4 x double> @test_mm256_unpackhi_pd(<4 x double> %a0, <4 x double> %a1)
define <8 x float> @test_mm256_unpackhi_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
; X32-LABEL: test_mm256_unpackhi_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_unpackhi_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
; X64-NEXT: retq
%res = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
@@ -3684,12 +3684,12 @@ define <8 x float> @test_mm256_unpackhi_ps(<8 x float> %a0, <8 x float> %a1) nou
define <4 x double> @test_mm256_unpacklo_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
; X32-LABEL: test_mm256_unpacklo_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_unpacklo_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; X64-NEXT: retq
%res = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
@@ -3698,12 +3698,12 @@ define <4 x double> @test_mm256_unpacklo_pd(<4 x double> %a0, <4 x double> %a1)
define <8 x float> @test_mm256_unpacklo_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
; X32-LABEL: test_mm256_unpacklo_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_unpacklo_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
; X64-NEXT: retq
%res = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
@@ -3712,12 +3712,12 @@ define <8 x float> @test_mm256_unpacklo_ps(<8 x float> %a0, <8 x float> %a1) nou
define <4 x double> @test_mm256_xor_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
; X32-LABEL: test_mm256_xor_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vxorps %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_xor_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%1 = bitcast <4 x double> %a0 to <4 x i64>
@@ -3729,12 +3729,12 @@ define <4 x double> @test_mm256_xor_pd(<4 x double> %a0, <4 x double> %a1) nounw
define <8 x float> @test_mm256_xor_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
; X32-LABEL: test_mm256_xor_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vxorps %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_xor_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%1 = bitcast <8 x float> %a0 to <8 x i32>
@@ -3746,12 +3746,12 @@ define <8 x float> @test_mm256_xor_ps(<8 x float> %a0, <8 x float> %a1) nounwind
define void @test_mm256_zeroall() nounwind {
; X32-LABEL: test_mm256_zeroall:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vzeroall
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_zeroall:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vzeroall
; X64-NEXT: retq
call void @llvm.x86.avx.vzeroall()
@@ -3761,12 +3761,12 @@ declare void @llvm.x86.avx.vzeroall() nounwind readnone
define void @test_mm256_zeroupper() nounwind {
; X32-LABEL: test_mm256_zeroupper:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_zeroupper:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vzeroupper
; X64-NEXT: retq
call void @llvm.x86.avx.vzeroupper()
@@ -3776,12 +3776,12 @@ declare void @llvm.x86.avx.vzeroupper() nounwind readnone
define <4 x double> @test_mm256_zextpd128_pd256(<2 x double> %a0) nounwind {
; X32-LABEL: test_mm256_zextpd128_pd256:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_zextpd128_pd256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps %xmm0, %xmm0
; X64-NEXT: retq
%res = shufflevector <2 x double> %a0, <2 x double> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -3790,12 +3790,12 @@ define <4 x double> @test_mm256_zextpd128_pd256(<2 x double> %a0) nounwind {
define <8 x float> @test_mm256_zextps128_ps256(<4 x float> %a0) nounwind {
; X32-LABEL: test_mm256_zextps128_ps256:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_zextps128_ps256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps %xmm0, %xmm0
; X64-NEXT: retq
%res = shufflevector <4 x float> %a0, <4 x float> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -3804,12 +3804,12 @@ define <8 x float> @test_mm256_zextps128_ps256(<4 x float> %a0) nounwind {
define <4 x i64> @test_mm256_zextsi128_si256(<2 x i64> %a0) nounwind {
; X32-LABEL: test_mm256_zextsi128_si256:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_zextsi128_si256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps %xmm0, %xmm0
; X64-NEXT: retq
%res = shufflevector <2 x i64> %a0, <2 x i64> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
diff --git a/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll b/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll
index 0bd58b34fa8..e9b568316f6 100644
--- a/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll
+++ b/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll
@@ -6,7 +6,7 @@
define <4 x double> @test_x86_avx_vinsertf128_pd_256_1(<4 x double> %a0, <2 x double> %a1) {
; CHECK-LABEL: test_x86_avx_vinsertf128_pd_256_1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x double> @llvm.x86.avx.vinsertf128.pd.256(<4 x double> %a0, <2 x double> %a1, i8 1)
@@ -16,7 +16,7 @@ declare <4 x double> @llvm.x86.avx.vinsertf128.pd.256(<4 x double>, <2 x double>
define <8 x float> @test_x86_avx_vinsertf128_ps_256_1(<8 x float> %a0, <4 x float> %a1) {
; CHECK-LABEL: test_x86_avx_vinsertf128_ps_256_1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float> %a0, <4 x float> %a1, i8 1)
@@ -26,7 +26,7 @@ declare <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float>, <4 x float>, i
define <8 x i32> @test_x86_avx_vinsertf128_si_256_1(<8 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_x86_avx_vinsertf128_si_256_1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x i32> @llvm.x86.avx.vinsertf128.si.256(<8 x i32> %a0, <4 x i32> %a1, i8 1)
@@ -38,7 +38,7 @@ define <8 x i32> @test_x86_avx_vinsertf128_si_256_1(<8 x i32> %a0, <4 x i32> %a1
; not a vinsertf128 $1.
define <8 x i32> @test_x86_avx_vinsertf128_si_256_2(<8 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_x86_avx_vinsertf128_si_256_2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
; CHECK-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
; CHECK-NEXT: ret{{[l|q]}}
@@ -51,7 +51,7 @@ declare <8 x i32> @llvm.x86.avx.vinsertf128.si.256(<8 x i32>, <4 x i32>, i8) nou
define <2 x double> @test_x86_avx_vextractf128_pd_256_1(<4 x double> %a0) {
; CHECK-LABEL: test_x86_avx_vextractf128_pd_256_1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: ret{{[l|q]}}
@@ -62,7 +62,7 @@ declare <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double>, i8) nounwin
define <4 x float> @test_x86_avx_vextractf128_ps_256_1(<8 x float> %a0) {
; CHECK-LABEL: test_x86_avx_vextractf128_ps_256_1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: ret{{[l|q]}}
@@ -73,7 +73,7 @@ declare <4 x float> @llvm.x86.avx.vextractf128.ps.256(<8 x float>, i8) nounwind
define <4 x i32> @test_x86_avx_vextractf128_si_256_1(<8 x i32> %a0) {
; CHECK-LABEL: test_x86_avx_vextractf128_si_256_1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: ret{{[l|q]}}
@@ -87,7 +87,7 @@ declare <4 x i32> @llvm.x86.avx.vextractf128.si.256(<8 x i32>, i8) nounwind read
; not a vextractf128 of any kind.
define <2 x double> @test_x86_avx_extractf128_pd_256_2(<4 x double> %a0) {
; CHECK-LABEL: test_x86_avx_extractf128_pd_256_2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: ret{{[l|q]}}
@@ -98,13 +98,13 @@ define <2 x double> @test_x86_avx_extractf128_pd_256_2(<4 x double> %a0) {
define <4 x double> @test_x86_avx_vbroadcastf128_pd_256(i8* %a0) {
; X86-LABEL: test_x86_avx_vbroadcastf128_pd_256:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_x86_avx_vbroadcastf128_pd_256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-NEXT: ret{{[l|q]}}
%res = call <4 x double> @llvm.x86.avx.vbroadcastf128.pd.256(i8* %a0) ; <<4 x double>> [#uses=1]
@@ -115,13 +115,13 @@ declare <4 x double> @llvm.x86.avx.vbroadcastf128.pd.256(i8*) nounwind readonly
define <8 x float> @test_x86_avx_vbroadcastf128_ps_256(i8* %a0) {
; X86-LABEL: test_x86_avx_vbroadcastf128_ps_256:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_x86_avx_vbroadcastf128_ps_256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-NEXT: ret{{[l|q]}}
%res = call <8 x float> @llvm.x86.avx.vbroadcastf128.ps.256(i8* %a0) ; <<8 x float>> [#uses=1]
@@ -132,7 +132,7 @@ declare <8 x float> @llvm.x86.avx.vbroadcastf128.ps.256(i8*) nounwind readonly
define <4 x double> @test_x86_avx_blend_pd_256(<4 x double> %a0, <4 x double> %a1) {
; CHECK-LABEL: test_x86_avx_blend_pd_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3]
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x double> @llvm.x86.avx.blend.pd.256(<4 x double> %a0, <4 x double> %a1, i32 7) ; <<4 x double>> [#uses=1]
@@ -143,7 +143,7 @@ declare <4 x double> @llvm.x86.avx.blend.pd.256(<4 x double>, <4 x double>, i32)
define <8 x float> @test_x86_avx_blend_ps_256(<8 x float> %a0, <8 x float> %a1) {
; CHECK-LABEL: test_x86_avx_blend_ps_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float> %a0, <8 x float> %a1, i32 7) ; <<8 x float>> [#uses=1]
@@ -154,7 +154,7 @@ declare <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float>, <8 x float>, i32) no
define <8 x float> @test_x86_avx_dp_ps_256(<8 x float> %a0, <8 x float> %a1) {
; CHECK-LABEL: test_x86_avx_dp_ps_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vdpps $7, %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float> %a0, <8 x float> %a1, i32 7) ; <<8 x float>> [#uses=1]
@@ -165,7 +165,7 @@ declare <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float>, <8 x float>, i32) nounw
define <2 x i64> @test_x86_sse2_psll_dq(<2 x i64> %a0) {
; CHECK-LABEL: test_x86_sse2_psll_dq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpslldq {{.*#+}} xmm0 = zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
; CHECK-NEXT: ret{{[l|q]}}
%res = call <2 x i64> @llvm.x86.sse2.psll.dq(<2 x i64> %a0, i32 8) ; <<2 x i64>> [#uses=1]
@@ -176,7 +176,7 @@ declare <2 x i64> @llvm.x86.sse2.psll.dq(<2 x i64>, i32) nounwind readnone
define <2 x i64> @test_x86_sse2_psrl_dq(<2 x i64> %a0) {
; CHECK-LABEL: test_x86_sse2_psrl_dq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero
; CHECK-NEXT: ret{{[l|q]}}
%res = call <2 x i64> @llvm.x86.sse2.psrl.dq(<2 x i64> %a0, i32 8) ; <<2 x i64>> [#uses=1]
@@ -187,7 +187,7 @@ declare <2 x i64> @llvm.x86.sse2.psrl.dq(<2 x i64>, i32) nounwind readnone
define <2 x double> @test_x86_sse41_blendpd(<2 x double> %a0, <2 x double> %a1) {
; CHECK-LABEL: test_x86_sse41_blendpd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; CHECK-NEXT: ret{{[l|q]}}
%res = call <2 x double> @llvm.x86.sse41.blendpd(<2 x double> %a0, <2 x double> %a1, i8 2) ; <<2 x double>> [#uses=1]
@@ -198,7 +198,7 @@ declare <2 x double> @llvm.x86.sse41.blendpd(<2 x double>, <2 x double>, i8) nou
define <4 x float> @test_x86_sse41_blendps(<4 x float> %a0, <4 x float> %a1) {
; CHECK-LABEL: test_x86_sse41_blendps:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x float> @llvm.x86.sse41.blendps(<4 x float> %a0, <4 x float> %a1, i8 7) ; <<4 x float>> [#uses=1]
@@ -209,7 +209,7 @@ declare <4 x float> @llvm.x86.sse41.blendps(<4 x float>, <4 x float>, i8) nounwi
define <8 x i16> @test_x86_sse41_pblendw(<8 x i16> %a0, <8 x i16> %a1) {
; CHECK-LABEL: test_x86_sse41_pblendw:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3,4,5,6,7]
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16> %a0, <8 x i16> %a1, i8 7) ; <<8 x i16>> [#uses=1]
@@ -220,7 +220,7 @@ declare <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16>, <8 x i16>, i8) nounwind rea
define <4 x i32> @test_x86_sse41_pmovsxbd(<16 x i8> %a0) {
; CHECK-LABEL: test_x86_sse41_pmovsxbd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovsxbd %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i32> @llvm.x86.sse41.pmovsxbd(<16 x i8> %a0) ; <<4 x i32>> [#uses=1]
@@ -231,7 +231,7 @@ declare <4 x i32> @llvm.x86.sse41.pmovsxbd(<16 x i8>) nounwind readnone
define <2 x i64> @test_x86_sse41_pmovsxbq(<16 x i8> %a0) {
; CHECK-LABEL: test_x86_sse41_pmovsxbq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovsxbq %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <2 x i64> @llvm.x86.sse41.pmovsxbq(<16 x i8> %a0) ; <<2 x i64>> [#uses=1]
@@ -242,7 +242,7 @@ declare <2 x i64> @llvm.x86.sse41.pmovsxbq(<16 x i8>) nounwind readnone
define <8 x i16> @test_x86_sse41_pmovsxbw(<16 x i8> %a0) {
; CHECK-LABEL: test_x86_sse41_pmovsxbw:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovsxbw %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x i16> @llvm.x86.sse41.pmovsxbw(<16 x i8> %a0) ; <<8 x i16>> [#uses=1]
@@ -253,7 +253,7 @@ declare <8 x i16> @llvm.x86.sse41.pmovsxbw(<16 x i8>) nounwind readnone
define <2 x i64> @test_x86_sse41_pmovsxdq(<4 x i32> %a0) {
; CHECK-LABEL: test_x86_sse41_pmovsxdq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovsxdq %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <2 x i64> @llvm.x86.sse41.pmovsxdq(<4 x i32> %a0) ; <<2 x i64>> [#uses=1]
@@ -264,7 +264,7 @@ declare <2 x i64> @llvm.x86.sse41.pmovsxdq(<4 x i32>) nounwind readnone
define <4 x i32> @test_x86_sse41_pmovsxwd(<8 x i16> %a0) {
; CHECK-LABEL: test_x86_sse41_pmovsxwd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovsxwd %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i32> @llvm.x86.sse41.pmovsxwd(<8 x i16> %a0) ; <<4 x i32>> [#uses=1]
@@ -275,7 +275,7 @@ declare <4 x i32> @llvm.x86.sse41.pmovsxwd(<8 x i16>) nounwind readnone
define <2 x i64> @test_x86_sse41_pmovsxwq(<8 x i16> %a0) {
; CHECK-LABEL: test_x86_sse41_pmovsxwq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovsxwq %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <2 x i64> @llvm.x86.sse41.pmovsxwq(<8 x i16> %a0) ; <<2 x i64>> [#uses=1]
@@ -286,7 +286,7 @@ declare <2 x i64> @llvm.x86.sse41.pmovsxwq(<8 x i16>) nounwind readnone
define <4 x i32> @test_x86_sse41_pmovzxbd(<16 x i8> %a0) {
; CHECK-LABEL: test_x86_sse41_pmovzxbd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i32> @llvm.x86.sse41.pmovzxbd(<16 x i8> %a0) ; <<4 x i32>> [#uses=1]
@@ -297,7 +297,7 @@ declare <4 x i32> @llvm.x86.sse41.pmovzxbd(<16 x i8>) nounwind readnone
define <2 x i64> @test_x86_sse41_pmovzxbq(<16 x i8> %a0) {
; CHECK-LABEL: test_x86_sse41_pmovzxbq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT: ret{{[l|q]}}
%res = call <2 x i64> @llvm.x86.sse41.pmovzxbq(<16 x i8> %a0) ; <<2 x i64>> [#uses=1]
@@ -308,7 +308,7 @@ declare <2 x i64> @llvm.x86.sse41.pmovzxbq(<16 x i8>) nounwind readnone
define <8 x i16> @test_x86_sse41_pmovzxbw(<16 x i8> %a0) {
; CHECK-LABEL: test_x86_sse41_pmovzxbw:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x i16> @llvm.x86.sse41.pmovzxbw(<16 x i8> %a0) ; <<8 x i16>> [#uses=1]
@@ -319,7 +319,7 @@ declare <8 x i16> @llvm.x86.sse41.pmovzxbw(<16 x i8>) nounwind readnone
define <2 x i64> @test_x86_sse41_pmovzxdq(<4 x i32> %a0) {
; CHECK-LABEL: test_x86_sse41_pmovzxdq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; CHECK-NEXT: ret{{[l|q]}}
%res = call <2 x i64> @llvm.x86.sse41.pmovzxdq(<4 x i32> %a0) ; <<2 x i64>> [#uses=1]
@@ -330,7 +330,7 @@ declare <2 x i64> @llvm.x86.sse41.pmovzxdq(<4 x i32>) nounwind readnone
define <4 x i32> @test_x86_sse41_pmovzxwd(<8 x i16> %a0) {
; CHECK-LABEL: test_x86_sse41_pmovzxwd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i32> @llvm.x86.sse41.pmovzxwd(<8 x i16> %a0) ; <<4 x i32>> [#uses=1]
@@ -341,7 +341,7 @@ declare <4 x i32> @llvm.x86.sse41.pmovzxwd(<8 x i16>) nounwind readnone
define <2 x i64> @test_x86_sse41_pmovzxwq(<8 x i16> %a0) {
; CHECK-LABEL: test_x86_sse41_pmovzxwq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; CHECK-NEXT: ret{{[l|q]}}
%res = call <2 x i64> @llvm.x86.sse41.pmovzxwq(<8 x i16> %a0) ; <<2 x i64>> [#uses=1]
@@ -352,7 +352,7 @@ declare <2 x i64> @llvm.x86.sse41.pmovzxwq(<8 x i16>) nounwind readnone
define <2 x double> @test_x86_sse2_cvtdq2pd(<4 x i32> %a0) {
; CHECK-LABEL: test_x86_sse2_cvtdq2pd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcvtdq2pd %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <2 x double> @llvm.x86.sse2.cvtdq2pd(<4 x i32> %a0) ; <<2 x double>> [#uses=1]
@@ -363,7 +363,7 @@ declare <2 x double> @llvm.x86.sse2.cvtdq2pd(<4 x i32>) nounwind readnone
define <4 x double> @test_x86_avx_cvtdq2_pd_256(<4 x i32> %a0) {
; CHECK-LABEL: test_x86_avx_cvtdq2_pd_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcvtdq2pd %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x double> @llvm.x86.avx.cvtdq2.pd.256(<4 x i32> %a0) ; <<4 x double>> [#uses=1]
@@ -374,7 +374,7 @@ declare <4 x double> @llvm.x86.avx.cvtdq2.pd.256(<4 x i32>) nounwind readnone
define <2 x double> @test_x86_sse2_cvtps2pd(<4 x float> %a0) {
; CHECK-LABEL: test_x86_sse2_cvtps2pd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcvtps2pd %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <2 x double> @llvm.x86.sse2.cvtps2pd(<4 x float> %a0) ; <<2 x double>> [#uses=1]
@@ -385,7 +385,7 @@ declare <2 x double> @llvm.x86.sse2.cvtps2pd(<4 x float>) nounwind readnone
define <4 x double> @test_x86_avx_cvt_ps2_pd_256(<4 x float> %a0) {
; CHECK-LABEL: test_x86_avx_cvt_ps2_pd_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcvtps2pd %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x double> @llvm.x86.avx.cvt.ps2.pd.256(<4 x float> %a0) ; <<4 x double>> [#uses=1]
@@ -397,7 +397,7 @@ declare <4 x double> @llvm.x86.avx.cvt.ps2.pd.256(<4 x float>) nounwind readnone
define void @test_x86_sse2_storeu_dq(i8* %a0, <16 x i8> %a1) {
; add operation forces the execution domain.
; X86-LABEL: test_x86_sse2_storeu_dq:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; X86-NEXT: vpsubb %xmm1, %xmm0, %xmm0
@@ -405,7 +405,7 @@ define void @test_x86_sse2_storeu_dq(i8* %a0, <16 x i8> %a1) {
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_x86_sse2_storeu_dq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; X64-NEXT: vpsubb %xmm1, %xmm0, %xmm0
; X64-NEXT: vmovdqu %xmm0, (%rdi)
@@ -420,7 +420,7 @@ declare void @llvm.x86.sse2.storeu.dq(i8*, <16 x i8>) nounwind
define void @test_x86_sse2_storeu_pd(i8* %a0, <2 x double> %a1) {
; fadd operation forces the execution domain.
; X86-LABEL: test_x86_sse2_storeu_pd:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X86-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
@@ -429,7 +429,7 @@ define void @test_x86_sse2_storeu_pd(i8* %a0, <2 x double> %a1) {
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_x86_sse2_storeu_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X64-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
; X64-NEXT: vaddpd %xmm1, %xmm0, %xmm0
@@ -444,13 +444,13 @@ declare void @llvm.x86.sse2.storeu.pd(i8*, <2 x double>) nounwind
define void @test_x86_sse_storeu_ps(i8* %a0, <4 x float> %a1) {
; X86-LABEL: test_x86_sse_storeu_ps:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vmovups %xmm0, (%eax)
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_x86_sse_storeu_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovups %xmm0, (%rdi)
; X64-NEXT: ret{{[l|q]}}
call void @llvm.x86.sse.storeu.ps(i8* %a0, <4 x float> %a1)
@@ -463,7 +463,7 @@ define void @test_x86_avx_storeu_dq_256(i8* %a0, <32 x i8> %a1) {
; FIXME: unfortunately the execution domain fix pass changes this to vmovups and its hard to force with no 256-bit integer instructions
; add operation forces the execution domain.
; X86-LABEL: test_x86_avx_storeu_dq_256:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vextractf128 $1, %ymm0, %xmm1
; X86-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
@@ -475,7 +475,7 @@ define void @test_x86_avx_storeu_dq_256(i8* %a0, <32 x i8> %a1) {
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_x86_avx_storeu_dq_256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vextractf128 $1, %ymm0, %xmm1
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X64-NEXT: vpsubb %xmm2, %xmm1, %xmm1
@@ -494,7 +494,7 @@ declare void @llvm.x86.avx.storeu.dq.256(i8*, <32 x i8>) nounwind
define void @test_x86_avx_storeu_pd_256(i8* %a0, <4 x double> %a1) {
; add operation forces the execution domain.
; X86-LABEL: test_x86_avx_storeu_pd_256:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X86-NEXT: vaddpd %ymm1, %ymm0, %ymm0
@@ -503,7 +503,7 @@ define void @test_x86_avx_storeu_pd_256(i8* %a0, <4 x double> %a1) {
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_x86_avx_storeu_pd_256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X64-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; X64-NEXT: vmovupd %ymm0, (%rdi)
@@ -518,14 +518,14 @@ declare void @llvm.x86.avx.storeu.pd.256(i8*, <4 x double>) nounwind
define void @test_x86_avx_storeu_ps_256(i8* %a0, <8 x float> %a1) {
; X86-LABEL: test_x86_avx_storeu_ps_256:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vmovups %ymm0, (%eax)
; X86-NEXT: vzeroupper
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_x86_avx_storeu_ps_256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovups %ymm0, (%rdi)
; X64-NEXT: vzeroupper
; X64-NEXT: ret{{[l|q]}}
@@ -537,7 +537,7 @@ declare void @llvm.x86.avx.storeu.ps.256(i8*, <8 x float>) nounwind
define <2 x double> @test_x86_avx_vpermil_pd(<2 x double> %a0) {
; CHECK-LABEL: test_x86_avx_vpermil_pd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; CHECK-NEXT: ret{{[l|q]}}
%res = call <2 x double> @llvm.x86.avx.vpermil.pd(<2 x double> %a0, i8 1) ; <<2 x double>> [#uses=1]
@@ -548,7 +548,7 @@ declare <2 x double> @llvm.x86.avx.vpermil.pd(<2 x double>, i8) nounwind readnon
define <4 x double> @test_x86_avx_vpermil_pd_256(<4 x double> %a0) {
; CHECK-LABEL: test_x86_avx_vpermil_pd_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,1,3,2]
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x double> @llvm.x86.avx.vpermil.pd.256(<4 x double> %a0, i8 7) ; <<4 x double>> [#uses=1]
@@ -559,7 +559,7 @@ declare <4 x double> @llvm.x86.avx.vpermil.pd.256(<4 x double>, i8) nounwind rea
define <4 x float> @test_x86_avx_vpermil_ps(<4 x float> %a0) {
; CHECK-LABEL: test_x86_avx_vpermil_ps:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,0,0]
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x float> @llvm.x86.avx.vpermil.ps(<4 x float> %a0, i8 7) ; <<4 x float>> [#uses=1]
@@ -570,7 +570,7 @@ declare <4 x float> @llvm.x86.avx.vpermil.ps(<4 x float>, i8) nounwind readnone
define <8 x float> @test_x86_avx_vpermil_ps_256(<8 x float> %a0) {
; CHECK-LABEL: test_x86_avx_vpermil_ps_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,1,0,0,7,5,4,4]
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x float> @llvm.x86.avx.vpermil.ps.256(<8 x float> %a0, i8 7) ; <<8 x float>> [#uses=1]
@@ -581,7 +581,7 @@ declare <8 x float> @llvm.x86.avx.vpermil.ps.256(<8 x float>, i8) nounwind readn
define <4 x double> @test_x86_avx_vperm2f128_pd_256(<4 x double> %a0, <4 x double> %a1) {
; CHECK-LABEL: test_x86_avx_vperm2f128_pd_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[0,1]
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x double> @llvm.x86.avx.vperm2f128.pd.256(<4 x double> %a0, <4 x double> %a1, i8 3) ; <<4 x double>> [#uses=1]
@@ -592,7 +592,7 @@ declare <4 x double> @llvm.x86.avx.vperm2f128.pd.256(<4 x double>, <4 x double>,
define <8 x float> @test_x86_avx_vperm2f128_ps_256(<8 x float> %a0, <8 x float> %a1) {
; CHECK-LABEL: test_x86_avx_vperm2f128_ps_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[0,1]
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x float> @llvm.x86.avx.vperm2f128.ps.256(<8 x float> %a0, <8 x float> %a1, i8 3) ; <<8 x float>> [#uses=1]
@@ -603,7 +603,7 @@ declare <8 x float> @llvm.x86.avx.vperm2f128.ps.256(<8 x float>, <8 x float>, i8
define <8 x i32> @test_x86_avx_vperm2f128_si_256(<8 x i32> %a0, <8 x i32> %a1) {
; CHECK-LABEL: test_x86_avx_vperm2f128_si_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[0,1]
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x i32> @llvm.x86.avx.vperm2f128.si.256(<8 x i32> %a0, <8 x i32> %a1, i8 3) ; <<8 x i32>> [#uses=1]
diff --git a/test/CodeGen/X86/avx-intrinsics-x86.ll b/test/CodeGen/X86/avx-intrinsics-x86.ll
index 39ebfe2f1a8..748dd6804dd 100644
--- a/test/CodeGen/X86/avx-intrinsics-x86.ll
+++ b/test/CodeGen/X86/avx-intrinsics-x86.ll
@@ -6,7 +6,7 @@
define <4 x double> @test_x86_avx_addsub_pd_256(<4 x double> %a0, <4 x double> %a1) {
; CHECK-LABEL: test_x86_avx_addsub_pd_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vaddsubpd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xd0,0xc1]
; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%res = call <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double> %a0, <4 x double> %a1) ; <<4 x double>> [#uses=1]
@@ -17,7 +17,7 @@ declare <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double>, <4 x double>) nou
define <8 x float> @test_x86_avx_addsub_ps_256(<8 x float> %a0, <8 x float> %a1) {
; CHECK-LABEL: test_x86_avx_addsub_ps_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vaddsubps %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xff,0xd0,0xc1]
; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float> %a0, <8 x float> %a1) ; <<8 x float>> [#uses=1]
@@ -28,7 +28,7 @@ declare <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float>, <8 x float>) nounwi
define <4 x double> @test_x86_avx_blendv_pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) {
; CHECK-LABEL: test_x86_avx_blendv_pd_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0x7d,0x4b,0xc1,0x20]
; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%res = call <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) ; <<4 x double>> [#uses=1]
@@ -39,7 +39,7 @@ declare <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double>, <4 x double>, <4
define <8 x float> @test_x86_avx_blendv_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) {
; CHECK-LABEL: test_x86_avx_blendv_ps_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vblendvps %ymm2, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0x7d,0x4a,0xc1,0x20]
; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) ; <<8 x float>> [#uses=1]
@@ -50,7 +50,7 @@ declare <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float>, <8 x float>, <8 x f
define <4 x double> @test_x86_avx_cmp_pd_256(<4 x double> %a0, <4 x double> %a1) {
; CHECK-LABEL: test_x86_avx_cmp_pd_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcmpordpd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xc2,0xc1,0x07]
; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%res = call <4 x double> @llvm.x86.avx.cmp.pd.256(<4 x double> %a0, <4 x double> %a1, i8 7) ; <<4 x double>> [#uses=1]
@@ -61,7 +61,7 @@ declare <4 x double> @llvm.x86.avx.cmp.pd.256(<4 x double>, <4 x double>, i8) no
define <8 x float> @test_x86_avx_cmp_ps_256(<8 x float> %a0, <8 x float> %a1) {
; CHECK-LABEL: test_x86_avx_cmp_ps_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcmpordps %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfc,0xc2,0xc1,0x07]
; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx.cmp.ps.256(<8 x float> %a0, <8 x float> %a1, i8 7) ; <<8 x float>> [#uses=1]
@@ -70,7 +70,7 @@ define <8 x float> @test_x86_avx_cmp_ps_256(<8 x float> %a0, <8 x float> %a1) {
define <8 x float> @test_x86_avx_cmp_ps_256_pseudo_op(<8 x float> %a0, <8 x float> %a1) {
; CHECK-LABEL: test_x86_avx_cmp_ps_256_pseudo_op:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcmpeqps %ymm1, %ymm0, %ymm1 # encoding: [0xc5,0xfc,0xc2,0xc9,0x00]
; CHECK-NEXT: vcmpltps %ymm1, %ymm0, %ymm1 # encoding: [0xc5,0xfc,0xc2,0xc9,0x01]
; CHECK-NEXT: vcmpleps %ymm1, %ymm0, %ymm1 # encoding: [0xc5,0xfc,0xc2,0xc9,0x02]
@@ -143,13 +143,13 @@ declare <8 x float> @llvm.x86.avx.cmp.ps.256(<8 x float>, <8 x float>, i8) nounw
define <4 x float> @test_x86_avx_cvt_pd2_ps_256(<4 x double> %a0) {
; AVX-LABEL: test_x86_avx_cvt_pd2_ps_256:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvtpd2ps %ymm0, %xmm0 # encoding: [0xc5,0xfd,0x5a,0xc0]
; AVX-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx_cvt_pd2_ps_256:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvtpd2ps %ymm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x5a,0xc0]
; AVX512VL-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
; AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3]
@@ -161,13 +161,13 @@ declare <4 x float> @llvm.x86.avx.cvt.pd2.ps.256(<4 x double>) nounwind readnone
define <4 x i32> @test_x86_avx_cvt_pd2dq_256(<4 x double> %a0) {
; AVX-LABEL: test_x86_avx_cvt_pd2dq_256:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvtpd2dq %ymm0, %xmm0 # encoding: [0xc5,0xff,0xe6,0xc0]
; AVX-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx_cvt_pd2dq_256:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvtpd2dq %ymm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xff,0xe6,0xc0]
; AVX512VL-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
; AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3]
@@ -179,7 +179,7 @@ declare <4 x i32> @llvm.x86.avx.cvt.pd2dq.256(<4 x double>) nounwind readnone
define <8 x i32> @test_x86_avx_cvt_ps2dq_256(<8 x float> %a0) {
; CHECK-LABEL: test_x86_avx_cvt_ps2dq_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcvtps2dq %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x5b,0xc0]
; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx.cvt.ps2dq.256(<8 x float> %a0) ; <<8 x i32>> [#uses=1]
@@ -190,12 +190,12 @@ declare <8 x i32> @llvm.x86.avx.cvt.ps2dq.256(<8 x float>) nounwind readnone
define <8 x float> @test_x86_avx_cvtdq2_ps_256(<8 x i32> %a0) {
; AVX-LABEL: test_x86_avx_cvtdq2_ps_256:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvtdq2ps %ymm0, %ymm0 # encoding: [0xc5,0xfc,0x5b,0xc0]
; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx_cvtdq2_ps_256:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvtdq2ps %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x5b,0xc0]
; AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx.cvtdq2.ps.256(<8 x i32> %a0) ; <<8 x float>> [#uses=1]
@@ -206,13 +206,13 @@ declare <8 x float> @llvm.x86.avx.cvtdq2.ps.256(<8 x i32>) nounwind readnone
define <4 x i32> @test_x86_avx_cvtt_pd2dq_256(<4 x double> %a0) {
; AVX-LABEL: test_x86_avx_cvtt_pd2dq_256:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvttpd2dq %ymm0, %xmm0 # encoding: [0xc5,0xfd,0xe6,0xc0]
; AVX-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx_cvtt_pd2dq_256:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvttpd2dq %ymm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe6,0xc0]
; AVX512VL-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
; AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3]
@@ -224,12 +224,12 @@ declare <4 x i32> @llvm.x86.avx.cvtt.pd2dq.256(<4 x double>) nounwind readnone
define <8 x i32> @test_x86_avx_cvtt_ps2dq_256(<8 x float> %a0) {
; AVX-LABEL: test_x86_avx_cvtt_ps2dq_256:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvttps2dq %ymm0, %ymm0 # encoding: [0xc5,0xfe,0x5b,0xc0]
; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx_cvtt_ps2dq_256:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvttps2dq %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfe,0x5b,0xc0]
; AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx.cvtt.ps2dq.256(<8 x float> %a0) ; <<8 x i32>> [#uses=1]
@@ -240,7 +240,7 @@ declare <8 x i32> @llvm.x86.avx.cvtt.ps2dq.256(<8 x float>) nounwind readnone
define <8 x float> @test_x86_avx_dp_ps_256(<8 x float> %a0, <8 x float> %a1) {
; CHECK-LABEL: test_x86_avx_dp_ps_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vdpps $7, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0x7d,0x40,0xc1,0x07]
; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float> %a0, <8 x float> %a1, i8 7) ; <<8 x float>> [#uses=1]
@@ -251,7 +251,7 @@ declare <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float>, <8 x float>, i8) nounwi
define <4 x double> @test_x86_avx_hadd_pd_256(<4 x double> %a0, <4 x double> %a1) {
; CHECK-LABEL: test_x86_avx_hadd_pd_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vhaddpd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x7c,0xc1]
; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%res = call <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double> %a0, <4 x double> %a1) ; <<4 x double>> [#uses=1]
@@ -262,7 +262,7 @@ declare <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double>, <4 x double>) nounw
define <8 x float> @test_x86_avx_hadd_ps_256(<8 x float> %a0, <8 x float> %a1) {
; CHECK-LABEL: test_x86_avx_hadd_ps_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vhaddps %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xff,0x7c,0xc1]
; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> %a0, <8 x float> %a1) ; <<8 x float>> [#uses=1]
@@ -273,7 +273,7 @@ declare <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float>, <8 x float>) nounwind
define <4 x double> @test_x86_avx_hsub_pd_256(<4 x double> %a0, <4 x double> %a1) {
; CHECK-LABEL: test_x86_avx_hsub_pd_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vhsubpd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x7d,0xc1]
; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%res = call <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double> %a0, <4 x double> %a1) ; <<4 x double>> [#uses=1]
@@ -284,7 +284,7 @@ declare <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double>, <4 x double>) nounw
define <8 x float> @test_x86_avx_hsub_ps_256(<8 x float> %a0, <8 x float> %a1) {
; CHECK-LABEL: test_x86_avx_hsub_ps_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vhsubps %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xff,0x7d,0xc1]
; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float> %a0, <8 x float> %a1) ; <<8 x float>> [#uses=1]
@@ -295,13 +295,13 @@ declare <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float>, <8 x float>) nounwind
define <32 x i8> @test_x86_avx_ldu_dq_256(i8* %a0) {
; X86-LABEL: test_x86_avx_ldu_dq_256:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vlddqu (%eax), %ymm0 # encoding: [0xc5,0xff,0xf0,0x00]
; X86-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; X64-LABEL: test_x86_avx_ldu_dq_256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vlddqu (%rdi), %ymm0 # encoding: [0xc5,0xff,0xf0,0x07]
; X64-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%res = call <32 x i8> @llvm.x86.avx.ldu.dq.256(i8* %a0) ; <<32 x i8>> [#uses=1]
@@ -312,13 +312,13 @@ declare <32 x i8> @llvm.x86.avx.ldu.dq.256(i8*) nounwind readonly
define <2 x double> @test_x86_avx_maskload_pd(i8* %a0, <2 x i64> %mask) {
; X86-LABEL: test_x86_avx_maskload_pd:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vmaskmovpd (%eax), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x2d,0x00]
; X86-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; X64-LABEL: test_x86_avx_maskload_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmaskmovpd (%rdi), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x2d,0x07]
; X64-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%res = call <2 x double> @llvm.x86.avx.maskload.pd(i8* %a0, <2 x i64> %mask) ; <<2 x double>> [#uses=1]
@@ -329,13 +329,13 @@ declare <2 x double> @llvm.x86.avx.maskload.pd(i8*, <2 x i64>) nounwind readonly
define <4 x double> @test_x86_avx_maskload_pd_256(i8* %a0, <4 x i64> %mask) {
; X86-LABEL: test_x86_avx_maskload_pd_256:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vmaskmovpd (%eax), %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x2d,0x00]
; X86-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; X64-LABEL: test_x86_avx_maskload_pd_256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmaskmovpd (%rdi), %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x2d,0x07]
; X64-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%res = call <4 x double> @llvm.x86.avx.maskload.pd.256(i8* %a0, <4 x i64> %mask) ; <<4 x double>> [#uses=1]
@@ -346,13 +346,13 @@ declare <4 x double> @llvm.x86.avx.maskload.pd.256(i8*, <4 x i64>) nounwind read
define <4 x float> @test_x86_avx_maskload_ps(i8* %a0, <4 x i32> %mask) {
; X86-LABEL: test_x86_avx_maskload_ps:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vmaskmovps (%eax), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x2c,0x00]
; X86-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; X64-LABEL: test_x86_avx_maskload_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmaskmovps (%rdi), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x2c,0x07]
; X64-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%res = call <4 x float> @llvm.x86.avx.maskload.ps(i8* %a0, <4 x i32> %mask) ; <<4 x float>> [#uses=1]
@@ -363,13 +363,13 @@ declare <4 x float> @llvm.x86.avx.maskload.ps(i8*, <4 x i32>) nounwind readonly
define <8 x float> @test_x86_avx_maskload_ps_256(i8* %a0, <8 x i32> %mask) {
; X86-LABEL: test_x86_avx_maskload_ps_256:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vmaskmovps (%eax), %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x2c,0x00]
; X86-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; X64-LABEL: test_x86_avx_maskload_ps_256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmaskmovps (%rdi), %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x2c,0x07]
; X64-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx.maskload.ps.256(i8* %a0, <8 x i32> %mask) ; <<8 x float>> [#uses=1]
@@ -380,13 +380,13 @@ declare <8 x float> @llvm.x86.avx.maskload.ps.256(i8*, <8 x i32>) nounwind reado
define void @test_x86_avx_maskstore_pd(i8* %a0, <2 x i64> %mask, <2 x double> %a2) {
; X86-LABEL: test_x86_avx_maskstore_pd:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vmaskmovpd %xmm1, %xmm0, (%eax) # encoding: [0xc4,0xe2,0x79,0x2f,0x08]
; X86-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; X64-LABEL: test_x86_avx_maskstore_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmaskmovpd %xmm1, %xmm0, (%rdi) # encoding: [0xc4,0xe2,0x79,0x2f,0x0f]
; X64-NEXT: ret{{[l|q]}} # encoding: [0xc3]
call void @llvm.x86.avx.maskstore.pd(i8* %a0, <2 x i64> %mask, <2 x double> %a2)
@@ -397,14 +397,14 @@ declare void @llvm.x86.avx.maskstore.pd(i8*, <2 x i64>, <2 x double>) nounwind
define void @test_x86_avx_maskstore_pd_256(i8* %a0, <4 x i64> %mask, <4 x double> %a2) {
; X86-LABEL: test_x86_avx_maskstore_pd_256:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vmaskmovpd %ymm1, %ymm0, (%eax) # encoding: [0xc4,0xe2,0x7d,0x2f,0x08]
; X86-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
; X86-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; X64-LABEL: test_x86_avx_maskstore_pd_256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmaskmovpd %ymm1, %ymm0, (%rdi) # encoding: [0xc4,0xe2,0x7d,0x2f,0x0f]
; X64-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
; X64-NEXT: ret{{[l|q]}} # encoding: [0xc3]
@@ -416,13 +416,13 @@ declare void @llvm.x86.avx.maskstore.pd.256(i8*, <4 x i64>, <4 x double>) nounwi
define void @test_x86_avx_maskstore_ps(i8* %a0, <4 x i32> %mask, <4 x float> %a2) {
; X86-LABEL: test_x86_avx_maskstore_ps:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vmaskmovps %xmm1, %xmm0, (%eax) # encoding: [0xc4,0xe2,0x79,0x2e,0x08]
; X86-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; X64-LABEL: test_x86_avx_maskstore_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmaskmovps %xmm1, %xmm0, (%rdi) # encoding: [0xc4,0xe2,0x79,0x2e,0x0f]
; X64-NEXT: ret{{[l|q]}} # encoding: [0xc3]
call void @llvm.x86.avx.maskstore.ps(i8* %a0, <4 x i32> %mask, <4 x float> %a2)
@@ -433,14 +433,14 @@ declare void @llvm.x86.avx.maskstore.ps(i8*, <4 x i32>, <4 x float>) nounwind
define void @test_x86_avx_maskstore_ps_256(i8* %a0, <8 x i32> %mask, <8 x float> %a2) {
; X86-LABEL: test_x86_avx_maskstore_ps_256:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vmaskmovps %ymm1, %ymm0, (%eax) # encoding: [0xc4,0xe2,0x7d,0x2e,0x08]
; X86-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
; X86-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; X64-LABEL: test_x86_avx_maskstore_ps_256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmaskmovps %ymm1, %ymm0, (%rdi) # encoding: [0xc4,0xe2,0x7d,0x2e,0x0f]
; X64-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
; X64-NEXT: ret{{[l|q]}} # encoding: [0xc3]
@@ -452,12 +452,12 @@ declare void @llvm.x86.avx.maskstore.ps.256(i8*, <8 x i32>, <8 x float>) nounwin
define <4 x double> @test_x86_avx_max_pd_256(<4 x double> %a0, <4 x double> %a1) {
; AVX-LABEL: test_x86_avx_max_pd_256:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmaxpd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x5f,0xc1]
; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx_max_pd_256:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmaxpd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x5f,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%res = call <4 x double> @llvm.x86.avx.max.pd.256(<4 x double> %a0, <4 x double> %a1) ; <<4 x double>> [#uses=1]
@@ -468,12 +468,12 @@ declare <4 x double> @llvm.x86.avx.max.pd.256(<4 x double>, <4 x double>) nounwi
define <8 x float> @test_x86_avx_max_ps_256(<8 x float> %a0, <8 x float> %a1) {
; AVX-LABEL: test_x86_avx_max_ps_256:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmaxps %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfc,0x5f,0xc1]
; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx_max_ps_256:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmaxps %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x5f,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> %a0, <8 x float> %a1) ; <<8 x float>> [#uses=1]
@@ -484,12 +484,12 @@ declare <8 x float> @llvm.x86.avx.max.ps.256(<8 x float>, <8 x float>) nounwind
define <4 x double> @test_x86_avx_min_pd_256(<4 x double> %a0, <4 x double> %a1) {
; AVX-LABEL: test_x86_avx_min_pd_256:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vminpd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x5d,0xc1]
; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx_min_pd_256:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vminpd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x5d,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%res = call <4 x double> @llvm.x86.avx.min.pd.256(<4 x double> %a0, <4 x double> %a1) ; <<4 x double>> [#uses=1]
@@ -500,12 +500,12 @@ declare <4 x double> @llvm.x86.avx.min.pd.256(<4 x double>, <4 x double>) nounwi
define <8 x float> @test_x86_avx_min_ps_256(<8 x float> %a0, <8 x float> %a1) {
; AVX-LABEL: test_x86_avx_min_ps_256:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vminps %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfc,0x5d,0xc1]
; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx_min_ps_256:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vminps %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x5d,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> %a0, <8 x float> %a1) ; <<8 x float>> [#uses=1]
@@ -516,7 +516,7 @@ declare <8 x float> @llvm.x86.avx.min.ps.256(<8 x float>, <8 x float>) nounwind
define i32 @test_x86_avx_movmsk_pd_256(<4 x double> %a0) {
; CHECK-LABEL: test_x86_avx_movmsk_pd_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovmskpd %ymm0, %eax # encoding: [0xc5,0xfd,0x50,0xc0]
; CHECK-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
@@ -528,7 +528,7 @@ declare i32 @llvm.x86.avx.movmsk.pd.256(<4 x double>) nounwind readnone
define i32 @test_x86_avx_movmsk_ps_256(<8 x float> %a0) {
; CHECK-LABEL: test_x86_avx_movmsk_ps_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovmskps %ymm0, %eax # encoding: [0xc5,0xfc,0x50,0xc0]
; CHECK-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
@@ -540,7 +540,7 @@ declare i32 @llvm.x86.avx.movmsk.ps.256(<8 x float>) nounwind readnone
define i32 @test_x86_avx_ptestc_256(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_x86_avx_ptestc_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
; CHECK-NEXT: vptest %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x17,0xc1]
; CHECK-NEXT: setb %al # encoding: [0x0f,0x92,0xc0]
@@ -554,7 +554,7 @@ declare i32 @llvm.x86.avx.ptestc.256(<4 x i64>, <4 x i64>) nounwind readnone
define i32 @test_x86_avx_ptestnzc_256(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_x86_avx_ptestnzc_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
; CHECK-NEXT: vptest %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x17,0xc1]
; CHECK-NEXT: seta %al # encoding: [0x0f,0x97,0xc0]
@@ -568,7 +568,7 @@ declare i32 @llvm.x86.avx.ptestnzc.256(<4 x i64>, <4 x i64>) nounwind readnone
define i32 @test_x86_avx_ptestz_256(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_x86_avx_ptestz_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
; CHECK-NEXT: vptest %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x17,0xc1]
; CHECK-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
@@ -582,7 +582,7 @@ declare i32 @llvm.x86.avx.ptestz.256(<4 x i64>, <4 x i64>) nounwind readnone
define <8 x float> @test_x86_avx_rcp_ps_256(<8 x float> %a0) {
; CHECK-LABEL: test_x86_avx_rcp_ps_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vrcpps %ymm0, %ymm0 # encoding: [0xc5,0xfc,0x53,0xc0]
; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx.rcp.ps.256(<8 x float> %a0) ; <<8 x float>> [#uses=1]
@@ -593,12 +593,12 @@ declare <8 x float> @llvm.x86.avx.rcp.ps.256(<8 x float>) nounwind readnone
define <4 x double> @test_x86_avx_round_pd_256(<4 x double> %a0) {
; AVX-LABEL: test_x86_avx_round_pd_256:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vroundpd $7, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0x7d,0x09,0xc0,0x07]
; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx_round_pd_256:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vrndscalepd $7, %ymm0, %ymm0 # encoding: [0x62,0xf3,0xfd,0x28,0x09,0xc0,0x07]
; AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%res = call <4 x double> @llvm.x86.avx.round.pd.256(<4 x double> %a0, i32 7) ; <<4 x double>> [#uses=1]
@@ -609,12 +609,12 @@ declare <4 x double> @llvm.x86.avx.round.pd.256(<4 x double>, i32) nounwind read
define <8 x float> @test_x86_avx_round_ps_256(<8 x float> %a0) {
; AVX-LABEL: test_x86_avx_round_ps_256:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vroundps $7, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0x7d,0x08,0xc0,0x07]
; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx_round_ps_256:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vrndscaleps $7, %ymm0, %ymm0 # encoding: [0x62,0xf3,0x7d,0x28,0x08,0xc0,0x07]
; AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx.round.ps.256(<8 x float> %a0, i32 7) ; <<8 x float>> [#uses=1]
@@ -625,7 +625,7 @@ declare <8 x float> @llvm.x86.avx.round.ps.256(<8 x float>, i32) nounwind readno
define <8 x float> @test_x86_avx_rsqrt_ps_256(<8 x float> %a0) {
; CHECK-LABEL: test_x86_avx_rsqrt_ps_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vrsqrtps %ymm0, %ymm0 # encoding: [0xc5,0xfc,0x52,0xc0]
; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float> %a0) ; <<8 x float>> [#uses=1]
@@ -636,12 +636,12 @@ declare <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float>) nounwind readnone
define <4 x double> @test_x86_avx_sqrt_pd_256(<4 x double> %a0) {
; AVX-LABEL: test_x86_avx_sqrt_pd_256:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vsqrtpd %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x51,0xc0]
; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx_sqrt_pd_256:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vsqrtpd %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x51,0xc0]
; AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%res = call <4 x double> @llvm.x86.avx.sqrt.pd.256(<4 x double> %a0) ; <<4 x double>> [#uses=1]
@@ -652,12 +652,12 @@ declare <4 x double> @llvm.x86.avx.sqrt.pd.256(<4 x double>) nounwind readnone
define <8 x float> @test_x86_avx_sqrt_ps_256(<8 x float> %a0) {
; AVX-LABEL: test_x86_avx_sqrt_ps_256:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vsqrtps %ymm0, %ymm0 # encoding: [0xc5,0xfc,0x51,0xc0]
; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx_sqrt_ps_256:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vsqrtps %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x51,0xc0]
; AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx.sqrt.ps.256(<8 x float> %a0) ; <<8 x float>> [#uses=1]
@@ -668,12 +668,12 @@ declare <8 x float> @llvm.x86.avx.sqrt.ps.256(<8 x float>) nounwind readnone
define <2 x double> @test_x86_avx_vpermilvar_pd(<2 x double> %a0, <2 x i64> %a1) {
; AVX-LABEL: test_x86_avx_vpermilvar_pd:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilpd %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x0d,0xc1]
; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx_vpermilvar_pd:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermilpd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x0d,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%res = call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %a0, <2 x i64> %a1) ; <<2 x double>> [#uses=1]
@@ -684,12 +684,12 @@ declare <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double>, <2 x i64>) nounwi
define <4 x double> @test_x86_avx_vpermilvar_pd_256(<4 x double> %a0, <4 x i64> %a1) {
; AVX-LABEL: test_x86_avx_vpermilvar_pd_256:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilpd %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x0d,0xc1]
; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx_vpermilvar_pd_256:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermilpd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x0d,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%res = call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> %a1) ; <<4 x double>> [#uses=1]
@@ -699,13 +699,13 @@ declare <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double>, <4 x i64>) no
define <4 x double> @test_x86_avx_vpermilvar_pd_256_2(<4 x double> %a0) {
; AVX-LABEL: test_x86_avx_vpermilvar_pd_256_2:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilpd $9, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0x7d,0x05,0xc0,0x09]
; AVX-NEXT: # ymm0 = ymm0[1,0,2,3]
; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx_vpermilvar_pd_256_2:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermilpd $9, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x05,0xc0,0x09]
; AVX512VL-NEXT: # ymm0 = ymm0[1,0,2,3]
; AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3]
@@ -715,12 +715,12 @@ define <4 x double> @test_x86_avx_vpermilvar_pd_256_2(<4 x double> %a0) {
define <4 x float> @test_x86_avx_vpermilvar_ps(<4 x float> %a0, <4 x i32> %a1) {
; AVX-LABEL: test_x86_avx_vpermilvar_ps:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x0c,0xc1]
; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx_vpermilvar_ps:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermilps %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x0c,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%res = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> %a1) ; <<4 x float>> [#uses=1]
@@ -728,24 +728,24 @@ define <4 x float> @test_x86_avx_vpermilvar_ps(<4 x float> %a0, <4 x i32> %a1) {
}
define <4 x float> @test_x86_avx_vpermilvar_ps_load(<4 x float> %a0, <4 x i32>* %a1) {
; X86-AVX-LABEL: test_x86_avx_vpermilvar_ps_load:
-; X86-AVX: # BB#0:
+; X86-AVX: # %bb.0:
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX-NEXT: vpermilps (%eax), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x0c,0x00]
; X86-AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; X86-AVX512VL-LABEL: test_x86_avx_vpermilvar_ps_load:
-; X86-AVX512VL: # BB#0:
+; X86-AVX512VL: # %bb.0:
; X86-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX512VL-NEXT: vpermilps (%eax), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x0c,0x00]
; X86-AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; X64-AVX-LABEL: test_x86_avx_vpermilvar_ps_load:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpermilps (%rdi), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x0c,0x07]
; X64-AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; X64-AVX512VL-LABEL: test_x86_avx_vpermilvar_ps_load:
-; X64-AVX512VL: # BB#0:
+; X64-AVX512VL: # %bb.0:
; X64-AVX512VL-NEXT: vpermilps (%rdi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x0c,0x07]
; X64-AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%a2 = load <4 x i32>, <4 x i32>* %a1
@@ -757,12 +757,12 @@ declare <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float>, <4 x i32>) nounwind
define <8 x float> @test_x86_avx_vpermilvar_ps_256(<8 x float> %a0, <8 x i32> %a1) {
; AVX-LABEL: test_x86_avx_vpermilvar_ps_256:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x0c,0xc1]
; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx_vpermilvar_ps_256:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermilps %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x0c,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> %a1) ; <<8 x float>> [#uses=1]
@@ -773,7 +773,7 @@ declare <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float>, <8 x i32>) noun
define i32 @test_x86_avx_vtestc_pd(<2 x double> %a0, <2 x double> %a1) {
; CHECK-LABEL: test_x86_avx_vtestc_pd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
; CHECK-NEXT: vtestpd %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x79,0x0f,0xc1]
; CHECK-NEXT: setb %al # encoding: [0x0f,0x92,0xc0]
@@ -786,7 +786,7 @@ declare i32 @llvm.x86.avx.vtestc.pd(<2 x double>, <2 x double>) nounwind readnon
define i32 @test_x86_avx_vtestc_pd_256(<4 x double> %a0, <4 x double> %a1) {
; CHECK-LABEL: test_x86_avx_vtestc_pd_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
; CHECK-NEXT: vtestpd %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x0f,0xc1]
; CHECK-NEXT: setb %al # encoding: [0x0f,0x92,0xc0]
@@ -800,7 +800,7 @@ declare i32 @llvm.x86.avx.vtestc.pd.256(<4 x double>, <4 x double>) nounwind rea
define i32 @test_x86_avx_vtestc_ps(<4 x float> %a0, <4 x float> %a1) {
; CHECK-LABEL: test_x86_avx_vtestc_ps:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
; CHECK-NEXT: vtestps %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x79,0x0e,0xc1]
; CHECK-NEXT: setb %al # encoding: [0x0f,0x92,0xc0]
@@ -813,7 +813,7 @@ declare i32 @llvm.x86.avx.vtestc.ps(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_avx_vtestc_ps_256(<8 x float> %a0, <8 x float> %a1) {
; CHECK-LABEL: test_x86_avx_vtestc_ps_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
; CHECK-NEXT: vtestps %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x0e,0xc1]
; CHECK-NEXT: setb %al # encoding: [0x0f,0x92,0xc0]
@@ -827,7 +827,7 @@ declare i32 @llvm.x86.avx.vtestc.ps.256(<8 x float>, <8 x float>) nounwind readn
define i32 @test_x86_avx_vtestnzc_pd(<2 x double> %a0, <2 x double> %a1) {
; CHECK-LABEL: test_x86_avx_vtestnzc_pd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
; CHECK-NEXT: vtestpd %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x79,0x0f,0xc1]
; CHECK-NEXT: seta %al # encoding: [0x0f,0x97,0xc0]
@@ -840,7 +840,7 @@ declare i32 @llvm.x86.avx.vtestnzc.pd(<2 x double>, <2 x double>) nounwind readn
define i32 @test_x86_avx_vtestnzc_pd_256(<4 x double> %a0, <4 x double> %a1) {
; CHECK-LABEL: test_x86_avx_vtestnzc_pd_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
; CHECK-NEXT: vtestpd %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x0f,0xc1]
; CHECK-NEXT: seta %al # encoding: [0x0f,0x97,0xc0]
@@ -854,7 +854,7 @@ declare i32 @llvm.x86.avx.vtestnzc.pd.256(<4 x double>, <4 x double>) nounwind r
define i32 @test_x86_avx_vtestnzc_ps(<4 x float> %a0, <4 x float> %a1) {
; CHECK-LABEL: test_x86_avx_vtestnzc_ps:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
; CHECK-NEXT: vtestps %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x79,0x0e,0xc1]
; CHECK-NEXT: seta %al # encoding: [0x0f,0x97,0xc0]
@@ -867,7 +867,7 @@ declare i32 @llvm.x86.avx.vtestnzc.ps(<4 x float>, <4 x float>) nounwind readnon
define i32 @test_x86_avx_vtestnzc_ps_256(<8 x float> %a0, <8 x float> %a1) {
; CHECK-LABEL: test_x86_avx_vtestnzc_ps_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
; CHECK-NEXT: vtestps %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x0e,0xc1]
; CHECK-NEXT: seta %al # encoding: [0x0f,0x97,0xc0]
@@ -881,7 +881,7 @@ declare i32 @llvm.x86.avx.vtestnzc.ps.256(<8 x float>, <8 x float>) nounwind rea
define i32 @test_x86_avx_vtestz_pd(<2 x double> %a0, <2 x double> %a1) {
; CHECK-LABEL: test_x86_avx_vtestz_pd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
; CHECK-NEXT: vtestpd %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x79,0x0f,0xc1]
; CHECK-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
@@ -894,7 +894,7 @@ declare i32 @llvm.x86.avx.vtestz.pd(<2 x double>, <2 x double>) nounwind readnon
define i32 @test_x86_avx_vtestz_pd_256(<4 x double> %a0, <4 x double> %a1) {
; CHECK-LABEL: test_x86_avx_vtestz_pd_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
; CHECK-NEXT: vtestpd %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x0f,0xc1]
; CHECK-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
@@ -908,7 +908,7 @@ declare i32 @llvm.x86.avx.vtestz.pd.256(<4 x double>, <4 x double>) nounwind rea
define i32 @test_x86_avx_vtestz_ps(<4 x float> %a0, <4 x float> %a1) {
; CHECK-LABEL: test_x86_avx_vtestz_ps:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
; CHECK-NEXT: vtestps %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x79,0x0e,0xc1]
; CHECK-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
@@ -921,7 +921,7 @@ declare i32 @llvm.x86.avx.vtestz.ps(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_avx_vtestz_ps_256(<8 x float> %a0, <8 x float> %a1) {
; CHECK-LABEL: test_x86_avx_vtestz_ps_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
; CHECK-NEXT: vtestps %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x0e,0xc1]
; CHECK-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
@@ -935,7 +935,7 @@ declare i32 @llvm.x86.avx.vtestz.ps.256(<8 x float>, <8 x float>) nounwind readn
define void @test_x86_avx_vzeroall() {
; CHECK-LABEL: test_x86_avx_vzeroall:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vzeroall # encoding: [0xc5,0xfc,0x77]
; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
call void @llvm.x86.avx.vzeroall()
@@ -946,7 +946,7 @@ declare void @llvm.x86.avx.vzeroall() nounwind
define void @test_x86_avx_vzeroupper() {
; CHECK-LABEL: test_x86_avx_vzeroupper:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
call void @llvm.x86.avx.vzeroupper()
@@ -956,7 +956,7 @@ declare void @llvm.x86.avx.vzeroupper() nounwind
define void @movnt_dq(i8* %p, <2 x i64> %a1) nounwind {
; X86-AVX-LABEL: movnt_dq:
-; X86-AVX: # BB#0:
+; X86-AVX: # %bb.0:
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x76,0xc9]
; X86-AVX-NEXT: vpsubq %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfb,0xc1]
@@ -965,7 +965,7 @@ define void @movnt_dq(i8* %p, <2 x i64> %a1) nounwind {
; X86-AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; X86-AVX512VL-LABEL: movnt_dq:
-; X86-AVX512VL: # BB#0:
+; X86-AVX512VL: # %bb.0:
; X86-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x76,0xc9]
; X86-AVX512VL-NEXT: vpsubq %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfb,0xc1]
@@ -974,7 +974,7 @@ define void @movnt_dq(i8* %p, <2 x i64> %a1) nounwind {
; X86-AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; X64-AVX-LABEL: movnt_dq:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x76,0xc9]
; X64-AVX-NEXT: vpsubq %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfb,0xc1]
; X64-AVX-NEXT: vmovntdq %ymm0, (%rdi) # encoding: [0xc5,0xfd,0xe7,0x07]
@@ -982,7 +982,7 @@ define void @movnt_dq(i8* %p, <2 x i64> %a1) nounwind {
; X64-AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; X64-AVX512VL-LABEL: movnt_dq:
-; X64-AVX512VL: # BB#0:
+; X64-AVX512VL: # %bb.0:
; X64-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x76,0xc9]
; X64-AVX512VL-NEXT: vpsubq %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfb,0xc1]
; X64-AVX512VL-NEXT: vmovntdq %ymm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe7,0x07]
@@ -997,27 +997,27 @@ declare void @llvm.x86.avx.movnt.dq.256(i8*, <4 x i64>) nounwind
define void @movnt_ps(i8* %p, <8 x float> %a) nounwind {
; X86-AVX-LABEL: movnt_ps:
-; X86-AVX: # BB#0:
+; X86-AVX: # %bb.0:
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX-NEXT: vmovntps %ymm0, (%eax) # encoding: [0xc5,0xfc,0x2b,0x00]
; X86-AVX-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
; X86-AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; X86-AVX512VL-LABEL: movnt_ps:
-; X86-AVX512VL: # BB#0:
+; X86-AVX512VL: # %bb.0:
; X86-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX512VL-NEXT: vmovntps %ymm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x2b,0x00]
; X86-AVX512VL-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
; X86-AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; X64-AVX-LABEL: movnt_ps:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovntps %ymm0, (%rdi) # encoding: [0xc5,0xfc,0x2b,0x07]
; X64-AVX-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
; X64-AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; X64-AVX512VL-LABEL: movnt_ps:
-; X64-AVX512VL: # BB#0:
+; X64-AVX512VL: # %bb.0:
; X64-AVX512VL-NEXT: vmovntps %ymm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x2b,0x07]
; X64-AVX512VL-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
; X64-AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3]
@@ -1029,7 +1029,7 @@ declare void @llvm.x86.avx.movnt.ps.256(i8*, <8 x float>) nounwind
define void @movnt_pd(i8* %p, <4 x double> %a1) nounwind {
; add operation forces the execution domain.
; X86-AVX-LABEL: movnt_pd:
-; X86-AVX: # BB#0:
+; X86-AVX: # %bb.0:
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x57,0xc9]
; X86-AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x58,0xc1]
@@ -1038,7 +1038,7 @@ define void @movnt_pd(i8* %p, <4 x double> %a1) nounwind {
; X86-AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; X86-AVX512VL-LABEL: movnt_pd:
-; X86-AVX512VL: # BB#0:
+; X86-AVX512VL: # %bb.0:
; X86-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX512VL-NEXT: vxorpd %xmm1, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x57,0xc9]
; X86-AVX512VL-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x58,0xc1]
@@ -1047,7 +1047,7 @@ define void @movnt_pd(i8* %p, <4 x double> %a1) nounwind {
; X86-AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; X64-AVX-LABEL: movnt_pd:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x57,0xc9]
; X64-AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x58,0xc1]
; X64-AVX-NEXT: vmovntpd %ymm0, (%rdi) # encoding: [0xc5,0xfd,0x2b,0x07]
@@ -1055,7 +1055,7 @@ define void @movnt_pd(i8* %p, <4 x double> %a1) nounwind {
; X64-AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; X64-AVX512VL-LABEL: movnt_pd:
-; X64-AVX512VL: # BB#0:
+; X64-AVX512VL: # %bb.0:
; X64-AVX512VL-NEXT: vxorpd %xmm1, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x57,0xc9]
; X64-AVX512VL-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x58,0xc1]
; X64-AVX512VL-NEXT: vmovntpd %ymm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x2b,0x07]
@@ -1071,7 +1071,7 @@ declare void @llvm.x86.avx.movnt.pd.256(i8*, <4 x double>) nounwind
; Check for pclmulqdq
define <2 x i64> @test_x86_pclmulqdq(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_x86_pclmulqdq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpclmulqdq $0, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x44,0xc1,0x00]
; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.pclmulqdq(<2 x i64> %a0, <2 x i64> %a1, i8 0) ; <<2 x i64>> [#uses=1]
diff --git a/test/CodeGen/X86/avx-intrinsics-x86_64.ll b/test/CodeGen/X86/avx-intrinsics-x86_64.ll
index 11f560a5c44..c7039dca27a 100644
--- a/test/CodeGen/X86/avx-intrinsics-x86_64.ll
+++ b/test/CodeGen/X86/avx-intrinsics-x86_64.ll
@@ -4,7 +4,7 @@
define <4 x double> @test_x86_avx_vzeroall(<4 x double> %a, <4 x double> %b) {
; AVX-LABEL: test_x86_avx_vzeroall:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; AVX-NEXT: vmovupd %ymm0, -{{[0-9]+}}(%rsp) # 32-byte Spill
; AVX-NEXT: vzeroall
@@ -12,7 +12,7 @@ define <4 x double> @test_x86_avx_vzeroall(<4 x double> %a, <4 x double> %b) {
; AVX-NEXT: ret{{[l|q]}}
;
; AVX512VL-LABEL: test_x86_avx_vzeroall:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vaddpd %ymm1, %ymm0, %ymm16
; AVX512VL-NEXT: vzeroall
; AVX512VL-NEXT: vmovapd %ymm16, %ymm0
@@ -25,7 +25,7 @@ declare void @llvm.x86.avx.vzeroall() nounwind
define <4 x double> @test_x86_avx_vzeroupper(<4 x double> %a, <4 x double> %b) {
; AVX-LABEL: test_x86_avx_vzeroupper:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; AVX-NEXT: vmovupd %ymm0, -{{[0-9]+}}(%rsp) # 32-byte Spill
; AVX-NEXT: vzeroupper
@@ -33,7 +33,7 @@ define <4 x double> @test_x86_avx_vzeroupper(<4 x double> %a, <4 x double> %b) {
; AVX-NEXT: ret{{[l|q]}}
;
; AVX512VL-LABEL: test_x86_avx_vzeroupper:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vaddpd %ymm1, %ymm0, %ymm16
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: vmovapd %ymm16, %ymm0
diff --git a/test/CodeGen/X86/avx-load-store.ll b/test/CodeGen/X86/avx-load-store.ll
index 402c4ca7f40..5a64db04357 100644
--- a/test/CodeGen/X86/avx-load-store.ll
+++ b/test/CodeGen/X86/avx-load-store.ll
@@ -4,7 +4,7 @@
define void @test_256_load(double* nocapture %d, float* nocapture %f, <4 x i64>* nocapture %i) nounwind {
; CHECK-LABEL: test_256_load:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %r15
; CHECK-NEXT: pushq %r14
; CHECK-NEXT: pushq %rbx
@@ -33,7 +33,7 @@ define void @test_256_load(double* nocapture %d, float* nocapture %f, <4 x i64>*
; CHECK-NEXT: retq
;
; CHECK_O0-LABEL: test_256_load:
-; CHECK_O0: # BB#0: # %entry
+; CHECK_O0: # %bb.0: # %entry
; CHECK_O0-NEXT: subq $152, %rsp
; CHECK_O0-NEXT: vmovapd (%rdi), %ymm0
; CHECK_O0-NEXT: vmovaps (%rsi), %ymm1
@@ -78,12 +78,12 @@ declare void @dummy(<4 x double>, <8 x float>, <4 x i64>)
define <8 x float> @mov00(<8 x float> %v, float * %ptr) nounwind {
; CHECK-LABEL: mov00:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: retq
;
; CHECK_O0-LABEL: mov00:
-; CHECK_O0: # BB#0:
+; CHECK_O0: # %bb.0:
; CHECK_O0-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK_O0-NEXT: # implicit-def: %ymm1
; CHECK_O0-NEXT: vmovaps %xmm0, %xmm1
@@ -97,12 +97,12 @@ define <8 x float> @mov00(<8 x float> %v, float * %ptr) nounwind {
define <4 x double> @mov01(<4 x double> %v, double * %ptr) nounwind {
; CHECK-LABEL: mov01:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: retq
;
; CHECK_O0-LABEL: mov01:
-; CHECK_O0: # BB#0:
+; CHECK_O0: # %bb.0:
; CHECK_O0-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; CHECK_O0-NEXT: # implicit-def: %ymm1
; CHECK_O0-NEXT: vmovaps %xmm0, %xmm1
@@ -116,11 +116,11 @@ define <4 x double> @mov01(<4 x double> %v, double * %ptr) nounwind {
define void @storev16i16(<16 x i16> %a) nounwind {
; CHECK-LABEL: storev16i16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps %ymm0, (%rax)
;
; CHECK_O0-LABEL: storev16i16:
-; CHECK_O0: # BB#0:
+; CHECK_O0: # %bb.0:
; CHECK_O0-NEXT: # implicit-def: %rax
; CHECK_O0-NEXT: vmovdqa %ymm0, (%rax)
store <16 x i16> %a, <16 x i16>* undef, align 32
@@ -129,12 +129,12 @@ define void @storev16i16(<16 x i16> %a) nounwind {
define void @storev16i16_01(<16 x i16> %a) nounwind {
; CHECK-LABEL: storev16i16_01:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, (%rax)
; CHECK-NEXT: vmovups %xmm0, (%rax)
;
; CHECK_O0-LABEL: storev16i16_01:
-; CHECK_O0: # BB#0:
+; CHECK_O0: # %bb.0:
; CHECK_O0-NEXT: # implicit-def: %rax
; CHECK_O0-NEXT: vmovdqu %ymm0, (%rax)
store <16 x i16> %a, <16 x i16>* undef, align 4
@@ -143,11 +143,11 @@ define void @storev16i16_01(<16 x i16> %a) nounwind {
define void @storev32i8(<32 x i8> %a) nounwind {
; CHECK-LABEL: storev32i8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps %ymm0, (%rax)
;
; CHECK_O0-LABEL: storev32i8:
-; CHECK_O0: # BB#0:
+; CHECK_O0: # %bb.0:
; CHECK_O0-NEXT: # implicit-def: %rax
; CHECK_O0-NEXT: vmovdqa %ymm0, (%rax)
store <32 x i8> %a, <32 x i8>* undef, align 32
@@ -156,12 +156,12 @@ define void @storev32i8(<32 x i8> %a) nounwind {
define void @storev32i8_01(<32 x i8> %a) nounwind {
; CHECK-LABEL: storev32i8_01:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, (%rax)
; CHECK-NEXT: vmovups %xmm0, (%rax)
;
; CHECK_O0-LABEL: storev32i8_01:
-; CHECK_O0: # BB#0:
+; CHECK_O0: # %bb.0:
; CHECK_O0-NEXT: # implicit-def: %rax
; CHECK_O0-NEXT: vmovdqu %ymm0, (%rax)
store <32 x i8> %a, <32 x i8>* undef, align 4
@@ -172,13 +172,13 @@ define void @storev32i8_01(<32 x i8> %a) nounwind {
; example, after making an integer operation.
define void @double_save(<4 x i32> %A, <4 x i32> %B, <8 x i32>* %P) nounwind ssp {
; CHECK-LABEL: double_save:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps %xmm1, 16(%rdi)
; CHECK-NEXT: vmovaps %xmm0, (%rdi)
; CHECK-NEXT: retq
;
; CHECK_O0-LABEL: double_save:
-; CHECK_O0: # BB#0:
+; CHECK_O0: # %bb.0:
; CHECK_O0-NEXT: # implicit-def: %ymm2
; CHECK_O0-NEXT: vmovaps %xmm0, %xmm2
; CHECK_O0-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm2
@@ -194,23 +194,23 @@ declare void @llvm.x86.avx.maskstore.ps.256(i8*, <8 x i32>, <8 x float>) nounwin
define void @f_f() nounwind {
; CHECK-LABEL: f_f:
-; CHECK: # BB#0: # %allocas
+; CHECK: # %bb.0: # %allocas
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: testb %al, %al
; CHECK-NEXT: jne .LBB8_2
-; CHECK-NEXT: # BB#1: # %cif_mask_all
+; CHECK-NEXT: # %bb.1: # %cif_mask_all
; CHECK-NEXT: .LBB8_2: # %cif_mask_mixed
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: testb %al, %al
; CHECK-NEXT: jne .LBB8_4
-; CHECK-NEXT: # BB#3: # %cif_mixed_test_all
+; CHECK-NEXT: # %bb.3: # %cif_mixed_test_all
; CHECK-NEXT: movl $-1, %eax
; CHECK-NEXT: vmovd %eax, %xmm0
; CHECK-NEXT: vmaskmovps %ymm0, %ymm0, (%rax)
; CHECK-NEXT: .LBB8_4: # %cif_mixed_test_any_check
;
; CHECK_O0-LABEL: f_f:
-; CHECK_O0: # BB#0: # %allocas
+; CHECK_O0: # %bb.0: # %allocas
; CHECK_O0-NEXT: # implicit-def: %al
; CHECK_O0-NEXT: testb $1, %al
; CHECK_O0-NEXT: jne .LBB8_1
@@ -248,7 +248,7 @@ cif_mixed_test_any_check:
define void @add8i32(<8 x i32>* %ret, <8 x i32>* %bp) nounwind {
; CHECK-LABEL: add8i32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovups (%rsi), %xmm0
; CHECK-NEXT: vmovups 16(%rsi), %xmm1
; CHECK-NEXT: vmovups %xmm1, 16(%rdi)
@@ -256,7 +256,7 @@ define void @add8i32(<8 x i32>* %ret, <8 x i32>* %bp) nounwind {
; CHECK-NEXT: retq
;
; CHECK_O0-LABEL: add8i32:
-; CHECK_O0: # BB#0:
+; CHECK_O0: # %bb.0:
; CHECK_O0-NEXT: vmovdqu (%rsi), %xmm0
; CHECK_O0-NEXT: vmovdqu 16(%rsi), %xmm1
; CHECK_O0-NEXT: # implicit-def: %ymm2
@@ -273,14 +273,14 @@ define void @add8i32(<8 x i32>* %ret, <8 x i32>* %bp) nounwind {
define void @add4i64a64(<4 x i64>* %ret, <4 x i64>* %bp) nounwind {
; CHECK-LABEL: add4i64a64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rsi), %ymm0
; CHECK-NEXT: vmovaps %ymm0, (%rdi)
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
;
; CHECK_O0-LABEL: add4i64a64:
-; CHECK_O0: # BB#0:
+; CHECK_O0: # %bb.0:
; CHECK_O0-NEXT: vmovaps (%rsi), %ymm0
; CHECK_O0-NEXT: vmovdqa %ymm0, (%rdi)
; CHECK_O0-NEXT: vzeroupper
@@ -293,7 +293,7 @@ define void @add4i64a64(<4 x i64>* %ret, <4 x i64>* %bp) nounwind {
define void @add4i64a16(<4 x i64>* %ret, <4 x i64>* %bp) nounwind {
; CHECK-LABEL: add4i64a16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rsi), %xmm0
; CHECK-NEXT: vmovaps 16(%rsi), %xmm1
; CHECK-NEXT: vmovaps %xmm1, 16(%rdi)
@@ -301,7 +301,7 @@ define void @add4i64a16(<4 x i64>* %ret, <4 x i64>* %bp) nounwind {
; CHECK-NEXT: retq
;
; CHECK_O0-LABEL: add4i64a16:
-; CHECK_O0: # BB#0:
+; CHECK_O0: # %bb.0:
; CHECK_O0-NEXT: vmovdqa (%rsi), %xmm0
; CHECK_O0-NEXT: vmovdqa 16(%rsi), %xmm1
; CHECK_O0-NEXT: # implicit-def: %ymm2
diff --git a/test/CodeGen/X86/avx-logic.ll b/test/CodeGen/X86/avx-logic.ll
index c2f89593c97..ad7ceda9b1f 100644
--- a/test/CodeGen/X86/avx-logic.ll
+++ b/test/CodeGen/X86/avx-logic.ll
@@ -4,7 +4,7 @@
define <4 x double> @andpd256(<4 x double> %y, <4 x double> %x) nounwind uwtable readnone ssp {
; CHECK-LABEL: andpd256:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vandpd %ymm0, %ymm1, %ymm0
; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vaddpd %ymm1, %ymm0, %ymm0
@@ -21,7 +21,7 @@ entry:
define <4 x double> @andpd256fold(<4 x double> %y) nounwind uwtable readnone ssp {
; CHECK-LABEL: andpd256fold:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vandpd {{.*}}(%rip), %ymm0, %ymm0
; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vaddpd %ymm1, %ymm0, %ymm0
@@ -37,7 +37,7 @@ entry:
define <8 x float> @andps256(<8 x float> %y, <8 x float> %x) nounwind uwtable readnone ssp {
; CHECK-LABEL: andps256:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vandps %ymm0, %ymm1, %ymm0
; CHECK-NEXT: retq
entry:
@@ -50,7 +50,7 @@ entry:
define <8 x float> @andps256fold(<8 x float> %y) nounwind uwtable readnone ssp {
; CHECK-LABEL: andps256fold:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; CHECK-NEXT: retq
entry:
@@ -62,7 +62,7 @@ entry:
define <4 x double> @xorpd256(<4 x double> %y, <4 x double> %x) nounwind uwtable readnone ssp {
; CHECK-LABEL: xorpd256:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vxorpd %ymm0, %ymm1, %ymm0
; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vaddpd %ymm1, %ymm0, %ymm0
@@ -79,7 +79,7 @@ entry:
define <4 x double> @xorpd256fold(<4 x double> %y) nounwind uwtable readnone ssp {
; CHECK-LABEL: xorpd256fold:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vxorpd {{.*}}(%rip), %ymm0, %ymm0
; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vaddpd %ymm1, %ymm0, %ymm0
@@ -95,7 +95,7 @@ entry:
define <8 x float> @xorps256(<8 x float> %y, <8 x float> %x) nounwind uwtable readnone ssp {
; CHECK-LABEL: xorps256:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vxorps %ymm0, %ymm1, %ymm0
; CHECK-NEXT: retq
entry:
@@ -108,7 +108,7 @@ entry:
define <8 x float> @xorps256fold(<8 x float> %y) nounwind uwtable readnone ssp {
; CHECK-LABEL: xorps256fold:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vxorps {{.*}}(%rip), %ymm0, %ymm0
; CHECK-NEXT: retq
entry:
@@ -120,7 +120,7 @@ entry:
define <4 x double> @orpd256(<4 x double> %y, <4 x double> %x) nounwind uwtable readnone ssp {
; CHECK-LABEL: orpd256:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vorpd %ymm0, %ymm1, %ymm0
; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vaddpd %ymm1, %ymm0, %ymm0
@@ -137,7 +137,7 @@ entry:
define <4 x double> @orpd256fold(<4 x double> %y) nounwind uwtable readnone ssp {
; CHECK-LABEL: orpd256fold:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vorpd {{.*}}(%rip), %ymm0, %ymm0
; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vaddpd %ymm1, %ymm0, %ymm0
@@ -153,7 +153,7 @@ entry:
define <8 x float> @orps256(<8 x float> %y, <8 x float> %x) nounwind uwtable readnone ssp {
; CHECK-LABEL: orps256:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vorps %ymm0, %ymm1, %ymm0
; CHECK-NEXT: retq
entry:
@@ -166,7 +166,7 @@ entry:
define <8 x float> @orps256fold(<8 x float> %y) nounwind uwtable readnone ssp {
; CHECK-LABEL: orps256fold:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vorps {{.*}}(%rip), %ymm0, %ymm0
; CHECK-NEXT: retq
entry:
@@ -178,7 +178,7 @@ entry:
define <4 x double> @andnotpd256(<4 x double> %y, <4 x double> %x) nounwind uwtable readnone ssp {
; CHECK-LABEL: andnotpd256:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vandnpd %ymm0, %ymm1, %ymm0
; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vaddpd %ymm1, %ymm0, %ymm0
@@ -196,7 +196,7 @@ entry:
define <4 x double> @andnotpd256fold(<4 x double> %y, <4 x double>* nocapture %x) nounwind uwtable readonly ssp {
; CHECK-LABEL: andnotpd256fold:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vandnpd (%rdi), %ymm0, %ymm0
; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vaddpd %ymm1, %ymm0, %ymm0
@@ -215,7 +215,7 @@ entry:
define <8 x float> @andnotps256(<8 x float> %y, <8 x float> %x) nounwind uwtable readnone ssp {
; CHECK-LABEL: andnotps256:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vandnps %ymm0, %ymm1, %ymm0
; CHECK-NEXT: retq
entry:
@@ -229,7 +229,7 @@ entry:
define <8 x float> @andnotps256fold(<8 x float> %y, <8 x float>* nocapture %x) nounwind uwtable readonly ssp {
; CHECK-LABEL: andnotps256fold:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vandnps (%rdi), %ymm0, %ymm0
; CHECK-NEXT: retq
entry:
@@ -246,7 +246,7 @@ entry:
define <2 x i64> @vpandn(<2 x i64> %a, <2 x i64> %b) nounwind uwtable readnone ssp {
; CHECK-LABEL: vpandn:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpsubq %xmm1, %xmm0, %xmm1
; CHECK-NEXT: vpandn %xmm0, %xmm1, %xmm0
@@ -261,7 +261,7 @@ entry:
define <2 x i64> @vpand(<2 x i64> %a, <2 x i64> %b) nounwind uwtable readnone ssp {
; CHECK-LABEL: vpand:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpsubq %xmm2, %xmm0, %xmm0
; CHECK-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -275,12 +275,12 @@ entry:
define <4 x i32> @and_xor_splat1_v4i32(<4 x i32> %x) nounwind {
; AVX-LABEL: and_xor_splat1_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vandnps {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: and_xor_splat1_v4i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vbroadcastss {{.*#+}} xmm1 = [1,1,1,1]
; AVX512-NEXT: vandnps %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
@@ -291,12 +291,12 @@ define <4 x i32> @and_xor_splat1_v4i32(<4 x i32> %x) nounwind {
define <4 x i64> @and_xor_splat1_v4i64(<4 x i64> %x) nounwind {
; AVX-LABEL: and_xor_splat1_v4i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vandnps {{.*}}(%rip), %ymm0, %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: and_xor_splat1_v4i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vbroadcastsd {{.*#+}} ymm1 = [1,1,1,1]
; AVX512-NEXT: vandnps %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
diff --git a/test/CodeGen/X86/avx-schedule.ll b/test/CodeGen/X86/avx-schedule.ll
index b9119e3fea1..8c0d2811635 100644
--- a/test/CodeGen/X86/avx-schedule.ll
+++ b/test/CodeGen/X86/avx-schedule.ll
@@ -11,49 +11,49 @@
define <4 x double> @test_addpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
; GENERIC-LABEL: test_addpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vaddpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_addpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: vaddpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_addpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vaddpd (%rdi), %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_addpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: vaddpd (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_addpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vaddpd (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_addpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vaddpd (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_addpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: vaddpd (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_addpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; ZNVER1-NEXT: vaddpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -65,49 +65,49 @@ define <4 x double> @test_addpd(<4 x double> %a0, <4 x double> %a1, <4 x double>
define <8 x float> @test_addps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) {
; GENERIC-LABEL: test_addps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vaddps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_addps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: vaddps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_addps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vaddps (%rdi), %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_addps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: vaddps (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_addps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vaddps (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_addps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vaddps (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_addps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: vaddps (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_addps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; ZNVER1-NEXT: vaddps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -119,49 +119,49 @@ define <8 x float> @test_addps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a
define <4 x double> @test_addsubpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
; GENERIC-LABEL: test_addsubpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vaddsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vaddsubpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_addsubpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vaddsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: vaddsubpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_addsubpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vaddsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vaddsubpd (%rdi), %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_addsubpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vaddsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: vaddsubpd (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_addsubpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vaddsubpd %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vaddsubpd (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_addsubpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vaddsubpd %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vaddsubpd (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_addsubpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vaddsubpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: vaddsubpd (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_addsubpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vaddsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; ZNVER1-NEXT: vaddsubpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -174,49 +174,49 @@ declare <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double>, <4 x double>) nou
define <8 x float> @test_addsubps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) {
; GENERIC-LABEL: test_addsubps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vaddsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vaddsubps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_addsubps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vaddsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: vaddsubps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_addsubps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vaddsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vaddsubps (%rdi), %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_addsubps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vaddsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: vaddsubps (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_addsubps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vaddsubps %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vaddsubps (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_addsubps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vaddsubps %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vaddsubps (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_addsubps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vaddsubps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: vaddsubps (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_addsubps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vaddsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; ZNVER1-NEXT: vaddsubps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -229,56 +229,56 @@ declare <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float>, <8 x float>) nounwi
define <4 x double> @test_andnotpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
; GENERIC-LABEL: test_andnotpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vandnpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vandnpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; GENERIC-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_andnotpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vandnpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; SANDY-NEXT: vandnpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; SANDY-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_andnotpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vandnpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vandnpd (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_andnotpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vandnpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: vandnpd (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; BROADWELL-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_andnotpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vandnpd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vandnpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_andnotpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vandnpd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: vandnpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_andnotpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vandnpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BTVER2-NEXT: vandnpd (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
; BTVER2-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_andnotpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vandnpd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vandnpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
@@ -298,56 +298,56 @@ define <4 x double> @test_andnotpd(<4 x double> %a0, <4 x double> %a1, <4 x doub
define <8 x float> @test_andnotps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) {
; GENERIC-LABEL: test_andnotps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vandnps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vandnps (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; GENERIC-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_andnotps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vandnps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; SANDY-NEXT: vandnps (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; SANDY-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_andnotps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vandnps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vandnps (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_andnotps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vandnps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: vandnps (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; BROADWELL-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_andnotps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vandnps %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vandnps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_andnotps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vandnps %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: vandnps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_andnotps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vandnps %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BTVER2-NEXT: vandnps (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_andnotps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vandnps %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vandnps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
@@ -367,56 +367,56 @@ define <8 x float> @test_andnotps(<8 x float> %a0, <8 x float> %a1, <8 x float>
define <4 x double> @test_andpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
; GENERIC-LABEL: test_andpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vandpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vandpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; GENERIC-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_andpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vandpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; SANDY-NEXT: vandpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; SANDY-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_andpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vandpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vandpd (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_andpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vandpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: vandpd (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; BROADWELL-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_andpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vandpd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vandpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_andpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vandpd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: vandpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_andpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vandpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BTVER2-NEXT: vandpd (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
; BTVER2-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_andpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vandpd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vandpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
@@ -434,56 +434,56 @@ define <4 x double> @test_andpd(<4 x double> %a0, <4 x double> %a1, <4 x double>
define <8 x float> @test_andps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) {
; GENERIC-LABEL: test_andps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vandps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vandps (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; GENERIC-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_andps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vandps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; SANDY-NEXT: vandps (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; SANDY-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_andps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vandps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vandps (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_andps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vandps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: vandps (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; BROADWELL-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_andps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vandps %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vandps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_andps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vandps %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: vandps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_andps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vandps %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BTVER2-NEXT: vandps (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_andps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vandps %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vandps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
@@ -501,56 +501,56 @@ define <8 x float> @test_andps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a
define <4 x double> @test_blendpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
; GENERIC-LABEL: test_blendpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3] sched: [1:0.50]
; GENERIC-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],mem[1,2],ymm0[3] sched: [8:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_blendpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3] sched: [1:0.50]
; SANDY-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],mem[1,2],ymm0[3] sched: [8:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_blendpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3] sched: [1:0.33]
; HASWELL-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],mem[1,2],ymm0[3] sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_blendpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3] sched: [1:0.33]
; BROADWELL-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],mem[1,2],ymm0[3] sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_blendpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3] sched: [1:0.33]
; SKYLAKE-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],mem[1,2],ymm0[3] sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_blendpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3] sched: [1:0.33]
; SKX-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],mem[1,2],ymm0[3] sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_blendpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3] sched: [1:0.50]
; BTVER2-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],mem[1,2],ymm0[3] sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_blendpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3] sched: [1:0.50]
; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; ZNVER1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],mem[1,2],ymm0[3] sched: [8:0.50]
@@ -564,49 +564,49 @@ define <4 x double> @test_blendpd(<4 x double> %a0, <4 x double> %a1, <4 x doubl
define <8 x float> @test_blendps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) {
; GENERIC-LABEL: test_blendps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3,4,5,6,7] sched: [1:0.50]
; GENERIC-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3],mem[4,5,6],ymm0[7] sched: [8:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_blendps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3,4,5,6,7] sched: [1:0.50]
; SANDY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3],mem[4,5,6],ymm0[7] sched: [8:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_blendps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3,4,5,6,7] sched: [1:0.33]
; HASWELL-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3],mem[4,5,6],ymm0[7] sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_blendps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3,4,5,6,7] sched: [1:0.33]
; BROADWELL-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3],mem[4,5,6],ymm0[7] sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_blendps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3,4,5,6,7] sched: [1:0.33]
; SKYLAKE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3],mem[4,5,6],ymm0[7] sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_blendps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3,4,5,6,7] sched: [1:0.33]
; SKX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3],mem[4,5,6],ymm0[7] sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_blendps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3,4,5,6,7] sched: [1:0.50]
; BTVER2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3],mem[4,5,6],ymm0[7] sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_blendps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3,4,5,6,7] sched: [1:0.50]
; ZNVER1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3],mem[4,5,6],ymm0[7] sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -618,49 +618,49 @@ define <8 x float> @test_blendps(<8 x float> %a0, <8 x float> %a1, <8 x float> *
define <4 x double> @test_blendvpd(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, <4 x double> *%a3) {
; GENERIC-LABEL: test_blendvpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:1.00]
; GENERIC-NEXT: vblendvpd %ymm2, (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_blendvpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:1.00]
; SANDY-NEXT: vblendvpd %ymm2, (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_blendvpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:2.00]
; HASWELL-NEXT: vblendvpd %ymm2, (%rdi), %ymm0, %ymm0 # sched: [2:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_blendvpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:2.00]
; BROADWELL-NEXT: vblendvpd %ymm2, (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_blendvpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:0.67]
; SKYLAKE-NEXT: vblendvpd %ymm2, (%rdi), %ymm0, %ymm0 # sched: [9:0.67]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_blendvpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:0.67]
; SKX-NEXT: vblendvpd %ymm2, (%rdi), %ymm0, %ymm0 # sched: [9:0.67]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_blendvpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0 # sched: [3:3.00]
; BTVER2-NEXT: vblendvpd %ymm2, (%rdi), %ymm0, %ymm0 # sched: [8:3.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_blendvpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; ZNVER1-NEXT: vblendvpd %ymm2, (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -673,49 +673,49 @@ declare <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double>, <4 x double>, <4
define <8 x float> @test_blendvps(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, <8 x float> *%a3) {
; GENERIC-LABEL: test_blendvps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vblendvps %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:1.00]
; GENERIC-NEXT: vblendvps %ymm2, (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_blendvps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vblendvps %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:1.00]
; SANDY-NEXT: vblendvps %ymm2, (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_blendvps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vblendvps %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:2.00]
; HASWELL-NEXT: vblendvps %ymm2, (%rdi), %ymm0, %ymm0 # sched: [2:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_blendvps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vblendvps %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:2.00]
; BROADWELL-NEXT: vblendvps %ymm2, (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_blendvps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vblendvps %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:0.67]
; SKYLAKE-NEXT: vblendvps %ymm2, (%rdi), %ymm0, %ymm0 # sched: [9:0.67]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_blendvps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vblendvps %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:0.67]
; SKX-NEXT: vblendvps %ymm2, (%rdi), %ymm0, %ymm0 # sched: [9:0.67]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_blendvps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vblendvps %ymm2, %ymm1, %ymm0, %ymm0 # sched: [3:3.00]
; BTVER2-NEXT: vblendvps %ymm2, (%rdi), %ymm0, %ymm0 # sched: [8:3.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_blendvps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vblendvps %ymm2, %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; ZNVER1-NEXT: vblendvps %ymm2, (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -728,42 +728,42 @@ declare <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float>, <8 x float>, <8 x f
define <8 x float> @test_broadcastf128(<4 x float> *%a0) {
; GENERIC-LABEL: test_broadcastf128:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_broadcastf128:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] sched: [7:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_broadcastf128:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_broadcastf128:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] sched: [6:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_broadcastf128:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_broadcastf128:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_broadcastf128:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_broadcastf128:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load <4 x float>, <4 x float> *%a0, align 32
@@ -773,42 +773,42 @@ define <8 x float> @test_broadcastf128(<4 x float> *%a0) {
define <4 x double> @test_broadcastsd_ymm(double *%a0) {
; GENERIC-LABEL: test_broadcastsd_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vbroadcastsd (%rdi), %ymm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_broadcastsd_ymm:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vbroadcastsd (%rdi), %ymm0 # sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_broadcastsd_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vbroadcastsd (%rdi), %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_broadcastsd_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vbroadcastsd (%rdi), %ymm0 # sched: [6:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_broadcastsd_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vbroadcastsd (%rdi), %ymm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_broadcastsd_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vbroadcastsd (%rdi), %ymm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_broadcastsd_ymm:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vbroadcastsd (%rdi), %ymm0 # sched: [6:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_broadcastsd_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vbroadcastsd (%rdi), %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load double, double *%a0, align 8
@@ -819,42 +819,42 @@ define <4 x double> @test_broadcastsd_ymm(double *%a0) {
define <4 x float> @test_broadcastss(float *%a0) {
; GENERIC-LABEL: test_broadcastss:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vbroadcastss (%rdi), %xmm0 # sched: [6:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_broadcastss:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vbroadcastss (%rdi), %xmm0 # sched: [6:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_broadcastss:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vbroadcastss (%rdi), %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_broadcastss:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vbroadcastss (%rdi), %xmm0 # sched: [5:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_broadcastss:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vbroadcastss (%rdi), %xmm0 # sched: [6:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_broadcastss:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vbroadcastss (%rdi), %xmm0 # sched: [6:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_broadcastss:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vbroadcastss (%rdi), %xmm0 # sched: [5:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_broadcastss:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vbroadcastss (%rdi), %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load float, float *%a0, align 4
@@ -865,42 +865,42 @@ define <4 x float> @test_broadcastss(float *%a0) {
define <8 x float> @test_broadcastss_ymm(float *%a0) {
; GENERIC-LABEL: test_broadcastss_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vbroadcastss (%rdi), %ymm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_broadcastss_ymm:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vbroadcastss (%rdi), %ymm0 # sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_broadcastss_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vbroadcastss (%rdi), %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_broadcastss_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vbroadcastss (%rdi), %ymm0 # sched: [6:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_broadcastss_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vbroadcastss (%rdi), %ymm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_broadcastss_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vbroadcastss (%rdi), %ymm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_broadcastss_ymm:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vbroadcastss (%rdi), %ymm0 # sched: [6:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_broadcastss_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vbroadcastss (%rdi), %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load float, float *%a0, align 4
@@ -911,42 +911,42 @@ define <8 x float> @test_broadcastss_ymm(float *%a0) {
define <4 x double> @test_cmppd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
; GENERIC-LABEL: test_cmppd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcmpeqpd %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
; GENERIC-NEXT: vcmpeqpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; GENERIC-NEXT: vorpd %ymm0, %ymm1, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_cmppd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vcmpeqpd %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
; SANDY-NEXT: vcmpeqpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; SANDY-NEXT: vorpd %ymm0, %ymm1, %ymm0 # sched: [1:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cmppd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vcmpeqpd %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
; HASWELL-NEXT: vcmpeqpd (%rdi), %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vorpd %ymm0, %ymm1, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cmppd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vcmpeqpd %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
; BROADWELL-NEXT: vcmpeqpd (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; BROADWELL-NEXT: vorpd %ymm0, %ymm1, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cmppd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vcmpeqpd %ymm1, %ymm0, %ymm1 # sched: [4:0.33]
; SKYLAKE-NEXT: vcmpeqpd (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKYLAKE-NEXT: vorpd %ymm0, %ymm1, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_cmppd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcmpeqpd %ymm1, %ymm0, %k0 # sched: [3:1.00]
; SKX-NEXT: vcmpeqpd (%rdi), %ymm0, %k1 # sched: [10:1.00]
; SKX-NEXT: vpmovm2q %k0, %ymm0
@@ -955,14 +955,14 @@ define <4 x double> @test_cmppd(<4 x double> %a0, <4 x double> %a1, <4 x double>
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cmppd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vcmpeqpd %ymm1, %ymm0, %ymm1 # sched: [2:2.00]
; BTVER2-NEXT: vcmpeqpd (%rdi), %ymm0, %ymm0 # sched: [7:2.00]
; BTVER2-NEXT: vorpd %ymm0, %ymm1, %ymm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cmppd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vcmpeqpd %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
; ZNVER1-NEXT: vcmpeqpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; ZNVER1-NEXT: vorpd %ymm0, %ymm1, %ymm0 # sched: [1:0.25]
@@ -979,42 +979,42 @@ define <4 x double> @test_cmppd(<4 x double> %a0, <4 x double> %a1, <4 x double>
define <8 x float> @test_cmpps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) {
; GENERIC-LABEL: test_cmpps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcmpeqps %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
; GENERIC-NEXT: vcmpeqps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; GENERIC-NEXT: vorps %ymm0, %ymm1, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_cmpps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vcmpeqps %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
; SANDY-NEXT: vcmpeqps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; SANDY-NEXT: vorps %ymm0, %ymm1, %ymm0 # sched: [1:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cmpps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vcmpeqps %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
; HASWELL-NEXT: vcmpeqps (%rdi), %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vorps %ymm0, %ymm1, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cmpps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vcmpeqps %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
; BROADWELL-NEXT: vcmpeqps (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; BROADWELL-NEXT: vorps %ymm0, %ymm1, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cmpps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vcmpeqps %ymm1, %ymm0, %ymm1 # sched: [4:0.33]
; SKYLAKE-NEXT: vcmpeqps (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKYLAKE-NEXT: vorps %ymm0, %ymm1, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_cmpps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcmpeqps %ymm1, %ymm0, %k0 # sched: [3:1.00]
; SKX-NEXT: vcmpeqps (%rdi), %ymm0, %k1 # sched: [10:1.00]
; SKX-NEXT: vpmovm2d %k0, %ymm0
@@ -1023,14 +1023,14 @@ define <8 x float> @test_cmpps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cmpps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vcmpeqps %ymm1, %ymm0, %ymm1 # sched: [2:2.00]
; BTVER2-NEXT: vcmpeqps (%rdi), %ymm0, %ymm0 # sched: [7:2.00]
; BTVER2-NEXT: vorps %ymm0, %ymm1, %ymm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cmpps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vcmpeqps %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
; ZNVER1-NEXT: vcmpeqps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; ZNVER1-NEXT: vorps %ymm0, %ymm1, %ymm0 # sched: [1:0.25]
@@ -1047,56 +1047,56 @@ define <8 x float> @test_cmpps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a
define <4 x double> @test_cvtdq2pd(<4 x i32> %a0, <4 x i32> *%a1) {
; GENERIC-LABEL: test_cvtdq2pd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvtdq2pd %xmm0, %ymm0 # sched: [4:1.00]
; GENERIC-NEXT: vcvtdq2pd (%rdi), %ymm1 # sched: [10:1.00]
; GENERIC-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_cvtdq2pd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vcvtdq2pd %xmm0, %ymm0 # sched: [4:1.00]
; SANDY-NEXT: vcvtdq2pd (%rdi), %ymm1 # sched: [10:1.00]
; SANDY-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtdq2pd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vcvtdq2pd %xmm0, %ymm0 # sched: [6:1.00]
; HASWELL-NEXT: vcvtdq2pd (%rdi), %ymm1 # sched: [6:1.00]
; HASWELL-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cvtdq2pd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vcvtdq2pd %xmm0, %ymm0 # sched: [6:1.00]
; BROADWELL-NEXT: vcvtdq2pd (%rdi), %ymm1 # sched: [11:1.00]
; BROADWELL-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cvtdq2pd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vcvtdq2pd %xmm0, %ymm0 # sched: [7:1.00]
; SKYLAKE-NEXT: vcvtdq2pd (%rdi), %ymm1 # sched: [13:1.00]
; SKYLAKE-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_cvtdq2pd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtdq2pd %xmm0, %ymm0 # sched: [7:1.00]
; SKX-NEXT: vcvtdq2pd (%rdi), %ymm1 # sched: [13:1.00]
; SKX-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cvtdq2pd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vcvtdq2pd (%rdi), %ymm1 # sched: [8:2.00]
; BTVER2-NEXT: vcvtdq2pd %xmm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cvtdq2pd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vcvtdq2pd (%rdi), %ymm1 # sched: [12:1.00]
; ZNVER1-NEXT: vcvtdq2pd %xmm0, %ymm0 # sched: [5:1.00]
; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
@@ -1110,14 +1110,14 @@ define <4 x double> @test_cvtdq2pd(<4 x i32> %a0, <4 x i32> *%a1) {
define <8 x float> @test_cvtdq2ps(<8 x i32> %a0, <8 x i32> *%a1) {
; GENERIC-LABEL: test_cvtdq2ps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvtdq2ps %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vcvtdq2ps (%rdi), %ymm1 # sched: [10:1.00]
; GENERIC-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_cvtdq2ps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vcvtdq2ps %ymm0, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: vmovaps (%rdi), %xmm1 # sched: [6:0.50]
; SANDY-NEXT: vinsertf128 $1, 16(%rdi), %ymm1, %ymm1 # sched: [7:0.50]
@@ -1126,42 +1126,42 @@ define <8 x float> @test_cvtdq2ps(<8 x i32> %a0, <8 x i32> *%a1) {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtdq2ps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vcvtdq2ps %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vcvtdq2ps (%rdi), %ymm1 # sched: [3:1.00]
; HASWELL-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cvtdq2ps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vcvtdq2ps %ymm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: vcvtdq2ps (%rdi), %ymm1 # sched: [9:1.00]
; BROADWELL-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cvtdq2ps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vcvtdq2ps %ymm0, %ymm0 # sched: [4:0.33]
; SKYLAKE-NEXT: vcvtdq2ps (%rdi), %ymm1 # sched: [11:0.50]
; SKYLAKE-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_cvtdq2ps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtdq2ps %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vcvtdq2ps (%rdi), %ymm1 # sched: [11:0.50]
; SKX-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cvtdq2ps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vcvtdq2ps (%rdi), %ymm1 # sched: [8:2.00]
; BTVER2-NEXT: vcvtdq2ps %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cvtdq2ps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vcvtdq2ps (%rdi), %ymm1 # sched: [12:1.00]
; ZNVER1-NEXT: vcvtdq2ps %ymm0, %ymm0 # sched: [5:1.00]
; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
@@ -1175,56 +1175,56 @@ define <8 x float> @test_cvtdq2ps(<8 x i32> %a0, <8 x i32> *%a1) {
define <8 x i32> @test_cvtpd2dq(<4 x double> %a0, <4 x double> *%a1) {
; GENERIC-LABEL: test_cvtpd2dq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvtpd2dq %ymm0, %xmm0 # sched: [4:1.00]
; GENERIC-NEXT: vcvtpd2dqy (%rdi), %xmm1 # sched: [11:1.00]
; GENERIC-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_cvtpd2dq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vcvtpd2dq %ymm0, %xmm0 # sched: [4:1.00]
; SANDY-NEXT: vcvtpd2dqy (%rdi), %xmm1 # sched: [11:1.00]
; SANDY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [1:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtpd2dq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vcvtpd2dq %ymm0, %xmm0 # sched: [6:1.00]
; HASWELL-NEXT: vcvtpd2dqy (%rdi), %xmm1 # sched: [7:1.00]
; HASWELL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cvtpd2dq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vcvtpd2dq %ymm0, %xmm0 # sched: [6:1.00]
; BROADWELL-NEXT: vcvtpd2dqy (%rdi), %xmm1 # sched: [8:1.00]
; BROADWELL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cvtpd2dq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vcvtpd2dq %ymm0, %xmm0 # sched: [7:1.00]
; SKYLAKE-NEXT: vcvtpd2dqy (%rdi), %xmm1 # sched: [8:1.00]
; SKYLAKE-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [3:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_cvtpd2dq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtpd2dq %ymm0, %xmm0 # sched: [7:1.00]
; SKX-NEXT: vcvtpd2dqy (%rdi), %xmm1 # sched: [8:1.00]
; SKX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cvtpd2dq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vcvtpd2dqy (%rdi), %xmm1 # sched: [11:2.00]
; BTVER2-NEXT: vcvtpd2dq %ymm0, %xmm0 # sched: [6:2.00]
; BTVER2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cvtpd2dq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vcvtpd2dqy (%rdi), %xmm1 # sched: [12:1.00]
; ZNVER1-NEXT: vcvtpd2dq %ymm0, %xmm0 # sched: [5:1.00]
; ZNVER1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [2:0.67]
@@ -1239,56 +1239,56 @@ declare <4 x i32> @llvm.x86.avx.cvt.pd2dq.256(<4 x double>) nounwind readnone
define <8 x i32> @test_cvttpd2dq(<4 x double> %a0, <4 x double> *%a1) {
; GENERIC-LABEL: test_cvttpd2dq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvttpd2dq %ymm0, %xmm0 # sched: [4:1.00]
; GENERIC-NEXT: vcvttpd2dqy (%rdi), %xmm1 # sched: [11:1.00]
; GENERIC-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_cvttpd2dq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vcvttpd2dq %ymm0, %xmm0 # sched: [4:1.00]
; SANDY-NEXT: vcvttpd2dqy (%rdi), %xmm1 # sched: [11:1.00]
; SANDY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [1:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvttpd2dq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vcvttpd2dq %ymm0, %xmm0 # sched: [6:1.00]
; HASWELL-NEXT: vcvttpd2dqy (%rdi), %xmm1 # sched: [7:1.00]
; HASWELL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cvttpd2dq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vcvttpd2dq %ymm0, %xmm0 # sched: [6:1.00]
; BROADWELL-NEXT: vcvttpd2dqy (%rdi), %xmm1 # sched: [8:1.00]
; BROADWELL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cvttpd2dq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vcvttpd2dq %ymm0, %xmm0 # sched: [7:1.00]
; SKYLAKE-NEXT: vcvttpd2dqy (%rdi), %xmm1 # sched: [8:1.00]
; SKYLAKE-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [3:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_cvttpd2dq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvttpd2dq %ymm0, %xmm0 # sched: [7:1.00]
; SKX-NEXT: vcvttpd2dqy (%rdi), %xmm1 # sched: [8:1.00]
; SKX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cvttpd2dq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vcvttpd2dqy (%rdi), %xmm1 # sched: [11:2.00]
; BTVER2-NEXT: vcvttpd2dq %ymm0, %xmm0 # sched: [6:2.00]
; BTVER2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cvttpd2dq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vcvttpd2dqy (%rdi), %xmm1 # sched: [12:1.00]
; ZNVER1-NEXT: vcvttpd2dq %ymm0, %xmm0 # sched: [5:1.00]
; ZNVER1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [2:0.67]
@@ -1302,56 +1302,56 @@ define <8 x i32> @test_cvttpd2dq(<4 x double> %a0, <4 x double> *%a1) {
define <8 x float> @test_cvtpd2ps(<4 x double> %a0, <4 x double> *%a1) {
; GENERIC-LABEL: test_cvtpd2ps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvtpd2ps %ymm0, %xmm0 # sched: [4:1.00]
; GENERIC-NEXT: vcvtpd2psy (%rdi), %xmm1 # sched: [11:1.00]
; GENERIC-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_cvtpd2ps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vcvtpd2ps %ymm0, %xmm0 # sched: [4:1.00]
; SANDY-NEXT: vcvtpd2psy (%rdi), %xmm1 # sched: [11:1.00]
; SANDY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [1:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtpd2ps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vcvtpd2ps %ymm0, %xmm0 # sched: [6:1.00]
; HASWELL-NEXT: vcvtpd2psy (%rdi), %xmm1 # sched: [7:1.00]
; HASWELL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cvtpd2ps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vcvtpd2ps %ymm0, %xmm0 # sched: [6:1.00]
; BROADWELL-NEXT: vcvtpd2psy (%rdi), %xmm1 # sched: [8:1.00]
; BROADWELL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cvtpd2ps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vcvtpd2ps %ymm0, %xmm0 # sched: [7:1.00]
; SKYLAKE-NEXT: vcvtpd2psy (%rdi), %xmm1 # sched: [8:1.00]
; SKYLAKE-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [3:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_cvtpd2ps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtpd2ps %ymm0, %xmm0 # sched: [7:1.00]
; SKX-NEXT: vcvtpd2psy (%rdi), %xmm1 # sched: [8:1.00]
; SKX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cvtpd2ps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vcvtpd2psy (%rdi), %xmm1 # sched: [11:2.00]
; BTVER2-NEXT: vcvtpd2ps %ymm0, %xmm0 # sched: [6:2.00]
; BTVER2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cvtpd2ps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vcvtpd2psy (%rdi), %xmm1 # sched: [11:1.00]
; ZNVER1-NEXT: vcvtpd2ps %ymm0, %xmm0 # sched: [5:1.00]
; ZNVER1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [2:0.67]
@@ -1365,56 +1365,56 @@ define <8 x float> @test_cvtpd2ps(<4 x double> %a0, <4 x double> *%a1) {
define <8 x i32> @test_cvtps2dq(<8 x float> %a0, <8 x float> *%a1) {
; GENERIC-LABEL: test_cvtps2dq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvtps2dq %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vcvtps2dq (%rdi), %ymm1 # sched: [10:1.00]
; GENERIC-NEXT: vorpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_cvtps2dq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vcvtps2dq %ymm0, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: vcvtps2dq (%rdi), %ymm1 # sched: [10:1.00]
; SANDY-NEXT: vorpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtps2dq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vcvtps2dq %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vcvtps2dq (%rdi), %ymm1 # sched: [3:1.00]
; HASWELL-NEXT: vorpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cvtps2dq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vcvtps2dq %ymm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: vcvtps2dq (%rdi), %ymm1 # sched: [9:1.00]
; BROADWELL-NEXT: vorpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cvtps2dq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vcvtps2dq %ymm0, %ymm0 # sched: [4:0.33]
; SKYLAKE-NEXT: vcvtps2dq (%rdi), %ymm1 # sched: [11:0.50]
; SKYLAKE-NEXT: vorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_cvtps2dq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtps2dq %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vcvtps2dq (%rdi), %ymm1 # sched: [11:0.50]
; SKX-NEXT: vorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cvtps2dq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vcvtps2dq (%rdi), %ymm1 # sched: [8:2.00]
; BTVER2-NEXT: vcvtps2dq %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: vorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cvtps2dq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vcvtps2dq (%rdi), %ymm1 # sched: [12:1.00]
; ZNVER1-NEXT: vcvtps2dq %ymm0, %ymm0 # sched: [5:1.00]
; ZNVER1-NEXT: vorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -1429,56 +1429,56 @@ declare <8 x i32> @llvm.x86.avx.cvt.ps2dq.256(<8 x float>) nounwind readnone
define <8 x i32> @test_cvttps2dq(<8 x float> %a0, <8 x float> *%a1) {
; GENERIC-LABEL: test_cvttps2dq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvttps2dq %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vcvttps2dq (%rdi), %ymm1 # sched: [10:1.00]
; GENERIC-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_cvttps2dq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vcvttps2dq %ymm0, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: vcvttps2dq (%rdi), %ymm1 # sched: [10:1.00]
; SANDY-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvttps2dq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vcvttps2dq %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vcvttps2dq (%rdi), %ymm1 # sched: [3:1.00]
; HASWELL-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cvttps2dq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vcvttps2dq %ymm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: vcvttps2dq (%rdi), %ymm1 # sched: [9:1.00]
; BROADWELL-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cvttps2dq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vcvttps2dq %ymm0, %ymm0 # sched: [4:0.33]
; SKYLAKE-NEXT: vcvttps2dq (%rdi), %ymm1 # sched: [11:0.50]
; SKYLAKE-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_cvttps2dq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvttps2dq %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vcvttps2dq (%rdi), %ymm1 # sched: [11:0.50]
; SKX-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cvttps2dq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vcvttps2dq (%rdi), %ymm1 # sched: [8:2.00]
; BTVER2-NEXT: vcvttps2dq %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cvttps2dq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vcvttps2dq (%rdi), %ymm1 # sched: [12:1.00]
; ZNVER1-NEXT: vcvttps2dq %ymm0, %ymm0 # sched: [5:1.00]
; ZNVER1-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -1492,49 +1492,49 @@ define <8 x i32> @test_cvttps2dq(<8 x float> %a0, <8 x float> *%a1) {
define <4 x double> @test_divpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
; GENERIC-LABEL: test_divpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vdivpd %ymm1, %ymm0, %ymm0 # sched: [45:2.00]
; GENERIC-NEXT: vdivpd (%rdi), %ymm0, %ymm0 # sched: [52:2.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_divpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vdivpd %ymm1, %ymm0, %ymm0 # sched: [45:2.00]
; SANDY-NEXT: vdivpd (%rdi), %ymm0, %ymm0 # sched: [52:2.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_divpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vdivpd %ymm1, %ymm0, %ymm0 # sched: [35:2.00]
; HASWELL-NEXT: vdivpd (%rdi), %ymm0, %ymm0 # sched: [35:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_divpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vdivpd %ymm1, %ymm0, %ymm0 # sched: [23:2.00]
; BROADWELL-NEXT: vdivpd (%rdi), %ymm0, %ymm0 # sched: [29:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_divpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vdivpd %ymm1, %ymm0, %ymm0 # sched: [14:1.00]
; SKYLAKE-NEXT: vdivpd (%rdi), %ymm0, %ymm0 # sched: [21:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_divpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vdivpd %ymm1, %ymm0, %ymm0 # sched: [14:1.00]
; SKX-NEXT: vdivpd (%rdi), %ymm0, %ymm0 # sched: [21:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_divpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vdivpd %ymm1, %ymm0, %ymm0 # sched: [38:38.00]
; BTVER2-NEXT: vdivpd (%rdi), %ymm0, %ymm0 # sched: [43:38.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_divpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vdivpd %ymm1, %ymm0, %ymm0 # sched: [15:15.00]
; ZNVER1-NEXT: vdivpd (%rdi), %ymm0, %ymm0 # sched: [22:22.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1546,49 +1546,49 @@ define <4 x double> @test_divpd(<4 x double> %a0, <4 x double> %a1, <4 x double>
define <8 x float> @test_divps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) {
; GENERIC-LABEL: test_divps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vdivps %ymm1, %ymm0, %ymm0 # sched: [29:2.00]
; GENERIC-NEXT: vdivps (%rdi), %ymm0, %ymm0 # sched: [36:2.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_divps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vdivps %ymm1, %ymm0, %ymm0 # sched: [29:2.00]
; SANDY-NEXT: vdivps (%rdi), %ymm0, %ymm0 # sched: [36:2.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_divps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vdivps %ymm1, %ymm0, %ymm0 # sched: [21:2.00]
; HASWELL-NEXT: vdivps (%rdi), %ymm0, %ymm0 # sched: [21:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_divps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vdivps %ymm1, %ymm0, %ymm0 # sched: [17:2.00]
; BROADWELL-NEXT: vdivps (%rdi), %ymm0, %ymm0 # sched: [23:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_divps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vdivps %ymm1, %ymm0, %ymm0 # sched: [11:1.00]
; SKYLAKE-NEXT: vdivps (%rdi), %ymm0, %ymm0 # sched: [18:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_divps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vdivps %ymm1, %ymm0, %ymm0 # sched: [11:1.00]
; SKX-NEXT: vdivps (%rdi), %ymm0, %ymm0 # sched: [18:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_divps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vdivps %ymm1, %ymm0, %ymm0 # sched: [38:38.00]
; BTVER2-NEXT: vdivps (%rdi), %ymm0, %ymm0 # sched: [43:38.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_divps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vdivps %ymm1, %ymm0, %ymm0 # sched: [12:12.00]
; ZNVER1-NEXT: vdivps (%rdi), %ymm0, %ymm0 # sched: [19:19.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1600,49 +1600,49 @@ define <8 x float> @test_divps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a
define <8 x float> @test_dpps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) {
; GENERIC-LABEL: test_dpps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vdpps $7, %ymm1, %ymm0, %ymm0 # sched: [12:2.00]
; GENERIC-NEXT: vdpps $7, (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_dpps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vdpps $7, %ymm1, %ymm0, %ymm0 # sched: [12:2.00]
; SANDY-NEXT: vdpps $7, (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_dpps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vdpps $7, %ymm1, %ymm0, %ymm0 # sched: [14:2.00]
; HASWELL-NEXT: vdpps $7, (%rdi), %ymm0, %ymm0 # sched: [14:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_dpps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vdpps $7, %ymm1, %ymm0, %ymm0 # sched: [14:2.00]
; BROADWELL-NEXT: vdpps $7, (%rdi), %ymm0, %ymm0 # sched: [20:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_dpps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vdpps $7, %ymm1, %ymm0, %ymm0 # sched: [13:1.33]
; SKYLAKE-NEXT: vdpps $7, (%rdi), %ymm0, %ymm0 # sched: [20:1.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_dpps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vdpps $7, %ymm1, %ymm0, %ymm0 # sched: [13:1.33]
; SKX-NEXT: vdpps $7, (%rdi), %ymm0, %ymm0 # sched: [20:1.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_dpps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vdpps $7, %ymm1, %ymm0, %ymm0 # sched: [12:6.00]
; BTVER2-NEXT: vdpps $7, (%rdi), %ymm0, %ymm0 # sched: [17:6.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_dpps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vdpps $7, %ymm1, %ymm0, %ymm0 # sched: [100:?]
; ZNVER1-NEXT: vdpps $7, (%rdi), %ymm0, %ymm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1655,55 +1655,55 @@ declare <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float>, <8 x float>, i8) nounwi
define <4 x float> @test_extractf128(<8 x float> %a0, <8 x float> %a1, <4 x float> *%a2) {
; GENERIC-LABEL: test_extractf128:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vextractf128 $1, %ymm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vextractf128 $1, %ymm1, (%rdi) # sched: [5:1.00]
; GENERIC-NEXT: vzeroupper
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_extractf128:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vextractf128 $1, %ymm0, %xmm0 # sched: [1:1.00]
; SANDY-NEXT: vextractf128 $1, %ymm1, (%rdi) # sched: [5:1.00]
; SANDY-NEXT: vzeroupper
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_extractf128:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vextractf128 $1, %ymm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: vextractf128 $1, %ymm1, (%rdi) # sched: [1:1.00]
; HASWELL-NEXT: vzeroupper # sched: [4:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_extractf128:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vextractf128 $1, %ymm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: vextractf128 $1, %ymm1, (%rdi) # sched: [1:1.00]
; BROADWELL-NEXT: vzeroupper # sched: [4:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_extractf128:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vextractf128 $1, %ymm0, %xmm0 # sched: [3:1.00]
; SKYLAKE-NEXT: vextractf128 $1, %ymm1, (%rdi) # sched: [1:1.00]
; SKYLAKE-NEXT: vzeroupper # sched: [4:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_extractf128:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vextractf128 $1, %ymm0, %xmm0 # sched: [3:1.00]
; SKX-NEXT: vextractf128 $1, %ymm1, (%rdi) # sched: [1:1.00]
; SKX-NEXT: vzeroupper # sched: [4:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_extractf128:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vextractf128 $1, %ymm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vextractf128 $1, %ymm1, (%rdi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_extractf128:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vextractf128 $1, %ymm0, %xmm0 # sched: [1:0.33]
; ZNVER1-NEXT: vextractf128 $1, %ymm1, (%rdi) # sched: [8:0.50]
; ZNVER1-NEXT: vzeroupper # sched: [100:?]
@@ -1716,49 +1716,49 @@ define <4 x float> @test_extractf128(<8 x float> %a0, <8 x float> %a1, <4 x floa
define <4 x double> @test_haddpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
; GENERIC-LABEL: test_haddpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vhaddpd %ymm1, %ymm0, %ymm0 # sched: [5:2.00]
; GENERIC-NEXT: vhaddpd (%rdi), %ymm0, %ymm0 # sched: [12:2.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_haddpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vhaddpd %ymm1, %ymm0, %ymm0 # sched: [5:2.00]
; SANDY-NEXT: vhaddpd (%rdi), %ymm0, %ymm0 # sched: [12:2.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_haddpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vhaddpd %ymm1, %ymm0, %ymm0 # sched: [5:2.00]
; HASWELL-NEXT: vhaddpd (%rdi), %ymm0, %ymm0 # sched: [5:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_haddpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vhaddpd %ymm1, %ymm0, %ymm0 # sched: [5:2.00]
; BROADWELL-NEXT: vhaddpd (%rdi), %ymm0, %ymm0 # sched: [11:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_haddpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vhaddpd %ymm1, %ymm0, %ymm0 # sched: [6:2.00]
; SKYLAKE-NEXT: vhaddpd (%rdi), %ymm0, %ymm0 # sched: [13:2.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_haddpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vhaddpd %ymm1, %ymm0, %ymm0 # sched: [6:2.00]
; SKX-NEXT: vhaddpd (%rdi), %ymm0, %ymm0 # sched: [13:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_haddpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vhaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: vhaddpd (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_haddpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vhaddpd %ymm1, %ymm0, %ymm0 # sched: [100:?]
; ZNVER1-NEXT: vhaddpd (%rdi), %ymm0, %ymm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1771,49 +1771,49 @@ declare <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double>, <4 x double>) nounw
define <8 x float> @test_haddps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) {
; GENERIC-LABEL: test_haddps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vhaddps %ymm1, %ymm0, %ymm0 # sched: [5:2.00]
; GENERIC-NEXT: vhaddps (%rdi), %ymm0, %ymm0 # sched: [12:2.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_haddps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vhaddps %ymm1, %ymm0, %ymm0 # sched: [5:2.00]
; SANDY-NEXT: vhaddps (%rdi), %ymm0, %ymm0 # sched: [12:2.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_haddps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vhaddps %ymm1, %ymm0, %ymm0 # sched: [5:2.00]
; HASWELL-NEXT: vhaddps (%rdi), %ymm0, %ymm0 # sched: [5:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_haddps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vhaddps %ymm1, %ymm0, %ymm0 # sched: [5:2.00]
; BROADWELL-NEXT: vhaddps (%rdi), %ymm0, %ymm0 # sched: [11:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_haddps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vhaddps %ymm1, %ymm0, %ymm0 # sched: [6:2.00]
; SKYLAKE-NEXT: vhaddps (%rdi), %ymm0, %ymm0 # sched: [13:2.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_haddps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vhaddps %ymm1, %ymm0, %ymm0 # sched: [6:2.00]
; SKX-NEXT: vhaddps (%rdi), %ymm0, %ymm0 # sched: [13:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_haddps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vhaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: vhaddps (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_haddps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vhaddps %ymm1, %ymm0, %ymm0 # sched: [100:?]
; ZNVER1-NEXT: vhaddps (%rdi), %ymm0, %ymm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1826,49 +1826,49 @@ declare <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float>, <8 x float>) nounwind
define <4 x double> @test_hsubpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
; GENERIC-LABEL: test_hsubpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vhsubpd %ymm1, %ymm0, %ymm0 # sched: [5:2.00]
; GENERIC-NEXT: vhsubpd (%rdi), %ymm0, %ymm0 # sched: [12:2.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_hsubpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vhsubpd %ymm1, %ymm0, %ymm0 # sched: [5:2.00]
; SANDY-NEXT: vhsubpd (%rdi), %ymm0, %ymm0 # sched: [12:2.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_hsubpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vhsubpd %ymm1, %ymm0, %ymm0 # sched: [5:2.00]
; HASWELL-NEXT: vhsubpd (%rdi), %ymm0, %ymm0 # sched: [5:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_hsubpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vhsubpd %ymm1, %ymm0, %ymm0 # sched: [5:2.00]
; BROADWELL-NEXT: vhsubpd (%rdi), %ymm0, %ymm0 # sched: [11:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_hsubpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vhsubpd %ymm1, %ymm0, %ymm0 # sched: [6:2.00]
; SKYLAKE-NEXT: vhsubpd (%rdi), %ymm0, %ymm0 # sched: [13:2.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_hsubpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vhsubpd %ymm1, %ymm0, %ymm0 # sched: [6:2.00]
; SKX-NEXT: vhsubpd (%rdi), %ymm0, %ymm0 # sched: [13:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_hsubpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vhsubpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: vhsubpd (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_hsubpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vhsubpd %ymm1, %ymm0, %ymm0 # sched: [100:?]
; ZNVER1-NEXT: vhsubpd (%rdi), %ymm0, %ymm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1881,49 +1881,49 @@ declare <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double>, <4 x double>) nounw
define <8 x float> @test_hsubps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) {
; GENERIC-LABEL: test_hsubps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vhsubps %ymm1, %ymm0, %ymm0 # sched: [5:2.00]
; GENERIC-NEXT: vhsubps (%rdi), %ymm0, %ymm0 # sched: [12:2.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_hsubps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vhsubps %ymm1, %ymm0, %ymm0 # sched: [5:2.00]
; SANDY-NEXT: vhsubps (%rdi), %ymm0, %ymm0 # sched: [12:2.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_hsubps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vhsubps %ymm1, %ymm0, %ymm0 # sched: [5:2.00]
; HASWELL-NEXT: vhsubps (%rdi), %ymm0, %ymm0 # sched: [5:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_hsubps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vhsubps %ymm1, %ymm0, %ymm0 # sched: [5:2.00]
; BROADWELL-NEXT: vhsubps (%rdi), %ymm0, %ymm0 # sched: [11:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_hsubps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vhsubps %ymm1, %ymm0, %ymm0 # sched: [6:2.00]
; SKYLAKE-NEXT: vhsubps (%rdi), %ymm0, %ymm0 # sched: [13:2.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_hsubps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vhsubps %ymm1, %ymm0, %ymm0 # sched: [6:2.00]
; SKX-NEXT: vhsubps (%rdi), %ymm0, %ymm0 # sched: [13:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_hsubps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vhsubps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: vhsubps (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_hsubps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vhsubps %ymm1, %ymm0, %ymm0 # sched: [100:?]
; ZNVER1-NEXT: vhsubps (%rdi), %ymm0, %ymm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1936,56 +1936,56 @@ declare <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float>, <8 x float>) nounwind
define <8 x float> @test_insertf128(<8 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; GENERIC-LABEL: test_insertf128:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 # sched: [1:1.00]
; GENERIC-NEXT: vinsertf128 $1, (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; GENERIC-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_insertf128:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 # sched: [1:1.00]
; SANDY-NEXT: vinsertf128 $1, (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; SANDY-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_insertf128:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 # sched: [3:1.00]
; HASWELL-NEXT: vinsertf128 $1, (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_insertf128:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 # sched: [3:1.00]
; BROADWELL-NEXT: vinsertf128 $1, (%rdi), %ymm0, %ymm0 # sched: [6:0.50]
; BROADWELL-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_insertf128:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 # sched: [3:1.00]
; SKYLAKE-NEXT: vinsertf128 $1, (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; SKYLAKE-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_insertf128:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 # sched: [3:1.00]
; SKX-NEXT: vinsertf128 $1, (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; SKX-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_insertf128:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 # sched: [1:0.50]
; BTVER2-NEXT: vinsertf128 $1, (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_insertf128:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 # sched: [2:0.67]
; ZNVER1-NEXT: vinsertf128 $1, (%rdi), %ymm0, %ymm0 # sched: [9:0.67]
; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
@@ -2001,42 +2001,42 @@ define <8 x float> @test_insertf128(<8 x float> %a0, <4 x float> %a1, <4 x float
define <32 x i8> @test_lddqu(i8* %a0) {
; GENERIC-LABEL: test_lddqu:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vlddqu (%rdi), %ymm0 # sched: [6:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_lddqu:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vlddqu (%rdi), %ymm0 # sched: [6:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lddqu:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vlddqu (%rdi), %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_lddqu:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vlddqu (%rdi), %ymm0 # sched: [6:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lddqu:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vlddqu (%rdi), %ymm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_lddqu:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vlddqu (%rdi), %ymm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lddqu:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vlddqu (%rdi), %ymm0 # sched: [5:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lddqu:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vlddqu (%rdi), %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <32 x i8> @llvm.x86.avx.ldu.dq.256(i8* %a0)
@@ -2046,56 +2046,56 @@ declare <32 x i8> @llvm.x86.avx.ldu.dq.256(i8*) nounwind readonly
define <2 x double> @test_maskmovpd(i8* %a0, <2 x i64> %a1, <2 x double> %a2) {
; GENERIC-LABEL: test_maskmovpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmaskmovpd (%rdi), %xmm0, %xmm2 # sched: [8:1.00]
; GENERIC-NEXT: vmaskmovpd %xmm1, %xmm0, (%rdi) # sched: [5:1.00]
; GENERIC-NEXT: vmovapd %xmm2, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_maskmovpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmaskmovpd (%rdi), %xmm0, %xmm2 # sched: [8:1.00]
; SANDY-NEXT: vmaskmovpd %xmm1, %xmm0, (%rdi) # sched: [5:1.00]
; SANDY-NEXT: vmovapd %xmm2, %xmm0 # sched: [1:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_maskmovpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmaskmovpd (%rdi), %xmm0, %xmm2 # sched: [2:2.00]
; HASWELL-NEXT: vmaskmovpd %xmm1, %xmm0, (%rdi) # sched: [4:1.00]
; HASWELL-NEXT: vmovapd %xmm2, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_maskmovpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmaskmovpd (%rdi), %xmm0, %xmm2 # sched: [7:2.00]
; BROADWELL-NEXT: vmaskmovpd %xmm1, %xmm0, (%rdi) # sched: [5:1.00]
; BROADWELL-NEXT: vmovapd %xmm2, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_maskmovpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmaskmovpd (%rdi), %xmm0, %xmm2 # sched: [7:0.50]
; SKYLAKE-NEXT: vmaskmovpd %xmm1, %xmm0, (%rdi) # sched: [2:1.00]
; SKYLAKE-NEXT: vmovapd %xmm2, %xmm0 # sched: [1:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_maskmovpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmaskmovpd (%rdi), %xmm0, %xmm2 # sched: [7:0.50]
; SKX-NEXT: vmaskmovpd %xmm1, %xmm0, (%rdi) # sched: [2:1.00]
; SKX-NEXT: vmovapd %xmm2, %xmm0 # sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_maskmovpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmaskmovpd (%rdi), %xmm0, %xmm2 # sched: [6:1.00]
; BTVER2-NEXT: vmaskmovpd %xmm1, %xmm0, (%rdi) # sched: [6:2.00]
; BTVER2-NEXT: vmovapd %xmm2, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_maskmovpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmaskmovpd (%rdi), %xmm0, %xmm2 # sched: [8:0.50]
; ZNVER1-NEXT: vmaskmovpd %xmm1, %xmm0, (%rdi) # sched: [4:0.50]
; ZNVER1-NEXT: vmovapd %xmm2, %xmm0 # sched: [1:0.50]
@@ -2109,56 +2109,56 @@ declare void @llvm.x86.avx.maskstore.pd(i8*, <2 x i64>, <2 x double>) nounwind
define <4 x double> @test_maskmovpd_ymm(i8* %a0, <4 x i64> %a1, <4 x double> %a2) {
; GENERIC-LABEL: test_maskmovpd_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmaskmovpd (%rdi), %ymm0, %ymm2 # sched: [9:1.00]
; GENERIC-NEXT: vmaskmovpd %ymm1, %ymm0, (%rdi) # sched: [5:1.00]
; GENERIC-NEXT: vmovapd %ymm2, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_maskmovpd_ymm:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmaskmovpd (%rdi), %ymm0, %ymm2 # sched: [9:1.00]
; SANDY-NEXT: vmaskmovpd %ymm1, %ymm0, (%rdi) # sched: [5:1.00]
; SANDY-NEXT: vmovapd %ymm2, %ymm0 # sched: [1:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_maskmovpd_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmaskmovpd (%rdi), %ymm0, %ymm2 # sched: [2:2.00]
; HASWELL-NEXT: vmaskmovpd %ymm1, %ymm0, (%rdi) # sched: [4:1.00]
; HASWELL-NEXT: vmovapd %ymm2, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_maskmovpd_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmaskmovpd (%rdi), %ymm0, %ymm2 # sched: [8:2.00]
; BROADWELL-NEXT: vmaskmovpd %ymm1, %ymm0, (%rdi) # sched: [5:1.00]
; BROADWELL-NEXT: vmovapd %ymm2, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_maskmovpd_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmaskmovpd (%rdi), %ymm0, %ymm2 # sched: [8:0.50]
; SKYLAKE-NEXT: vmaskmovpd %ymm1, %ymm0, (%rdi) # sched: [2:1.00]
; SKYLAKE-NEXT: vmovapd %ymm2, %ymm0 # sched: [1:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_maskmovpd_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmaskmovpd (%rdi), %ymm0, %ymm2 # sched: [8:0.50]
; SKX-NEXT: vmaskmovpd %ymm1, %ymm0, (%rdi) # sched: [2:1.00]
; SKX-NEXT: vmovapd %ymm2, %ymm0 # sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_maskmovpd_ymm:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmaskmovpd (%rdi), %ymm0, %ymm2 # sched: [6:2.00]
; BTVER2-NEXT: vmaskmovpd %ymm1, %ymm0, (%rdi) # sched: [6:2.00]
; BTVER2-NEXT: vmovapd %ymm2, %ymm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_maskmovpd_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmaskmovpd (%rdi), %ymm0, %ymm2 # sched: [8:1.00]
; ZNVER1-NEXT: vmaskmovpd %ymm1, %ymm0, (%rdi) # sched: [5:1.00]
; ZNVER1-NEXT: vmovapd %ymm2, %ymm0 # sched: [1:0.50]
@@ -2172,56 +2172,56 @@ declare void @llvm.x86.avx.maskstore.pd.256(i8*, <4 x i64>, <4 x double>) nounwi
define <4 x float> @test_maskmovps(i8* %a0, <4 x i32> %a1, <4 x float> %a2) {
; GENERIC-LABEL: test_maskmovps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmaskmovps (%rdi), %xmm0, %xmm2 # sched: [8:1.00]
; GENERIC-NEXT: vmaskmovps %xmm1, %xmm0, (%rdi) # sched: [5:1.00]
; GENERIC-NEXT: vmovaps %xmm2, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_maskmovps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmaskmovps (%rdi), %xmm0, %xmm2 # sched: [8:1.00]
; SANDY-NEXT: vmaskmovps %xmm1, %xmm0, (%rdi) # sched: [5:1.00]
; SANDY-NEXT: vmovaps %xmm2, %xmm0 # sched: [1:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_maskmovps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmaskmovps (%rdi), %xmm0, %xmm2 # sched: [2:2.00]
; HASWELL-NEXT: vmaskmovps %xmm1, %xmm0, (%rdi) # sched: [4:1.00]
; HASWELL-NEXT: vmovaps %xmm2, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_maskmovps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmaskmovps (%rdi), %xmm0, %xmm2 # sched: [7:2.00]
; BROADWELL-NEXT: vmaskmovps %xmm1, %xmm0, (%rdi) # sched: [5:1.00]
; BROADWELL-NEXT: vmovaps %xmm2, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_maskmovps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmaskmovps (%rdi), %xmm0, %xmm2 # sched: [7:0.50]
; SKYLAKE-NEXT: vmaskmovps %xmm1, %xmm0, (%rdi) # sched: [2:1.00]
; SKYLAKE-NEXT: vmovaps %xmm2, %xmm0 # sched: [1:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_maskmovps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmaskmovps (%rdi), %xmm0, %xmm2 # sched: [7:0.50]
; SKX-NEXT: vmaskmovps %xmm1, %xmm0, (%rdi) # sched: [2:1.00]
; SKX-NEXT: vmovaps %xmm2, %xmm0 # sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_maskmovps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmaskmovps (%rdi), %xmm0, %xmm2 # sched: [6:1.00]
; BTVER2-NEXT: vmaskmovps %xmm1, %xmm0, (%rdi) # sched: [6:2.00]
; BTVER2-NEXT: vmovaps %xmm2, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_maskmovps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmaskmovps (%rdi), %xmm0, %xmm2 # sched: [8:0.50]
; ZNVER1-NEXT: vmaskmovps %xmm1, %xmm0, (%rdi) # sched: [4:0.50]
; ZNVER1-NEXT: vmovaps %xmm2, %xmm0 # sched: [1:0.50]
@@ -2235,56 +2235,56 @@ declare void @llvm.x86.avx.maskstore.ps(i8*, <4 x i32>, <4 x float>) nounwind
define <8 x float> @test_maskmovps_ymm(i8* %a0, <8 x i32> %a1, <8 x float> %a2) {
; GENERIC-LABEL: test_maskmovps_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmaskmovps (%rdi), %ymm0, %ymm2 # sched: [9:1.00]
; GENERIC-NEXT: vmaskmovps %ymm1, %ymm0, (%rdi) # sched: [5:1.00]
; GENERIC-NEXT: vmovaps %ymm2, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_maskmovps_ymm:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmaskmovps (%rdi), %ymm0, %ymm2 # sched: [9:1.00]
; SANDY-NEXT: vmaskmovps %ymm1, %ymm0, (%rdi) # sched: [5:1.00]
; SANDY-NEXT: vmovaps %ymm2, %ymm0 # sched: [1:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_maskmovps_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmaskmovps (%rdi), %ymm0, %ymm2 # sched: [2:2.00]
; HASWELL-NEXT: vmaskmovps %ymm1, %ymm0, (%rdi) # sched: [4:1.00]
; HASWELL-NEXT: vmovaps %ymm2, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_maskmovps_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmaskmovps (%rdi), %ymm0, %ymm2 # sched: [8:2.00]
; BROADWELL-NEXT: vmaskmovps %ymm1, %ymm0, (%rdi) # sched: [5:1.00]
; BROADWELL-NEXT: vmovaps %ymm2, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_maskmovps_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmaskmovps (%rdi), %ymm0, %ymm2 # sched: [8:0.50]
; SKYLAKE-NEXT: vmaskmovps %ymm1, %ymm0, (%rdi) # sched: [2:1.00]
; SKYLAKE-NEXT: vmovaps %ymm2, %ymm0 # sched: [1:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_maskmovps_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmaskmovps (%rdi), %ymm0, %ymm2 # sched: [8:0.50]
; SKX-NEXT: vmaskmovps %ymm1, %ymm0, (%rdi) # sched: [2:1.00]
; SKX-NEXT: vmovaps %ymm2, %ymm0 # sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_maskmovps_ymm:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmaskmovps (%rdi), %ymm0, %ymm2 # sched: [6:2.00]
; BTVER2-NEXT: vmaskmovps %ymm1, %ymm0, (%rdi) # sched: [6:2.00]
; BTVER2-NEXT: vmovaps %ymm2, %ymm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_maskmovps_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmaskmovps (%rdi), %ymm0, %ymm2 # sched: [8:1.00]
; ZNVER1-NEXT: vmaskmovps %ymm1, %ymm0, (%rdi) # sched: [5:1.00]
; ZNVER1-NEXT: vmovaps %ymm2, %ymm0 # sched: [1:0.50]
@@ -2298,49 +2298,49 @@ declare void @llvm.x86.avx.maskstore.ps.256(i8*, <8 x i32>, <8 x float>) nounwin
define <4 x double> @test_maxpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
; GENERIC-LABEL: test_maxpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmaxpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vmaxpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_maxpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmaxpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: vmaxpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_maxpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmaxpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vmaxpd (%rdi), %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_maxpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmaxpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: vmaxpd (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_maxpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmaxpd %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKYLAKE-NEXT: vmaxpd (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_maxpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmaxpd %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vmaxpd (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_maxpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmaxpd %ymm1, %ymm0, %ymm0 # sched: [2:2.00]
; BTVER2-NEXT: vmaxpd (%rdi), %ymm0, %ymm0 # sched: [7:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_maxpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmaxpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmaxpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -2353,49 +2353,49 @@ declare <4 x double> @llvm.x86.avx.max.pd.256(<4 x double>, <4 x double>) nounwi
define <8 x float> @test_maxps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) {
; GENERIC-LABEL: test_maxps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmaxps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vmaxps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_maxps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmaxps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: vmaxps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_maxps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmaxps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vmaxps (%rdi), %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_maxps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmaxps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: vmaxps (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_maxps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmaxps %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKYLAKE-NEXT: vmaxps (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_maxps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmaxps %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vmaxps (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_maxps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmaxps %ymm1, %ymm0, %ymm0 # sched: [2:2.00]
; BTVER2-NEXT: vmaxps (%rdi), %ymm0, %ymm0 # sched: [7:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_maxps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmaxps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmaxps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -2408,49 +2408,49 @@ declare <8 x float> @llvm.x86.avx.max.ps.256(<8 x float>, <8 x float>) nounwind
define <4 x double> @test_minpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
; GENERIC-LABEL: test_minpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vminpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vminpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_minpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vminpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: vminpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_minpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vminpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vminpd (%rdi), %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_minpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vminpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: vminpd (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_minpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vminpd %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKYLAKE-NEXT: vminpd (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_minpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vminpd %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vminpd (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_minpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vminpd %ymm1, %ymm0, %ymm0 # sched: [2:2.00]
; BTVER2-NEXT: vminpd (%rdi), %ymm0, %ymm0 # sched: [7:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_minpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vminpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; ZNVER1-NEXT: vminpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -2463,49 +2463,49 @@ declare <4 x double> @llvm.x86.avx.min.pd.256(<4 x double>, <4 x double>) nounwi
define <8 x float> @test_minps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) {
; GENERIC-LABEL: test_minps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vminps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vminps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_minps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vminps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: vminps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_minps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vminps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vminps (%rdi), %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_minps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vminps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: vminps (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_minps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vminps %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKYLAKE-NEXT: vminps (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_minps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vminps %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vminps (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_minps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vminps %ymm1, %ymm0, %ymm0 # sched: [2:2.00]
; BTVER2-NEXT: vminps (%rdi), %ymm0, %ymm0 # sched: [7:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_minps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vminps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; ZNVER1-NEXT: vminps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -2518,56 +2518,56 @@ declare <8 x float> @llvm.x86.avx.min.ps.256(<8 x float>, <8 x float>) nounwind
define <4 x double> @test_movapd(<4 x double> *%a0, <4 x double> *%a1) {
; GENERIC-LABEL: test_movapd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovapd (%rdi), %ymm0 # sched: [7:0.50]
; GENERIC-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vmovapd %ymm0, (%rsi) # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_movapd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmovapd (%rdi), %ymm0 # sched: [7:0.50]
; SANDY-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: vmovapd %ymm0, (%rsi) # sched: [5:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movapd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmovapd (%rdi), %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vmovapd %ymm0, (%rsi) # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movapd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmovapd (%rdi), %ymm0 # sched: [6:0.50]
; BROADWELL-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: vmovapd %ymm0, (%rsi) # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movapd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmovapd (%rdi), %ymm0 # sched: [7:0.50]
; SKYLAKE-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vmovapd %ymm0, (%rsi) # sched: [1:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movapd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovapd (%rdi), %ymm0 # sched: [7:0.50]
; SKX-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vmovapd %ymm0, (%rsi) # sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movapd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovapd (%rdi), %ymm0 # sched: [5:1.00]
; BTVER2-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: vmovapd %ymm0, (%rsi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movapd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmovapd (%rdi), %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmovapd %ymm0, (%rsi) # sched: [1:0.50]
@@ -2580,56 +2580,56 @@ define <4 x double> @test_movapd(<4 x double> *%a0, <4 x double> *%a1) {
define <8 x float> @test_movaps(<8 x float> *%a0, <8 x float> *%a1) {
; GENERIC-LABEL: test_movaps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps (%rdi), %ymm0 # sched: [7:0.50]
; GENERIC-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vmovaps %ymm0, (%rsi) # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_movaps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmovaps (%rdi), %ymm0 # sched: [7:0.50]
; SANDY-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: vmovaps %ymm0, (%rsi) # sched: [5:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movaps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmovaps (%rdi), %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vmovaps %ymm0, (%rsi) # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movaps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmovaps (%rdi), %ymm0 # sched: [6:0.50]
; BROADWELL-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: vmovaps %ymm0, (%rsi) # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movaps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmovaps (%rdi), %ymm0 # sched: [7:0.50]
; SKYLAKE-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vmovaps %ymm0, (%rsi) # sched: [1:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movaps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps (%rdi), %ymm0 # sched: [7:0.50]
; SKX-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vmovaps %ymm0, (%rsi) # sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movaps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovaps (%rdi), %ymm0 # sched: [5:1.00]
; BTVER2-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: vmovaps %ymm0, (%rsi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movaps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmovaps (%rdi), %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmovaps %ymm0, (%rsi) # sched: [1:0.50]
@@ -2642,56 +2642,56 @@ define <8 x float> @test_movaps(<8 x float> *%a0, <8 x float> *%a1) {
define <4 x double> @test_movddup(<4 x double> %a0, <4 x double> *%a1) {
; GENERIC-LABEL: test_movddup:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] sched: [1:1.00]
; GENERIC-NEXT: vmovddup {{.*#+}} ymm1 = mem[0,0,2,2] sched: [7:0.50]
; GENERIC-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_movddup:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] sched: [1:1.00]
; SANDY-NEXT: vmovddup {{.*#+}} ymm1 = mem[0,0,2,2] sched: [7:0.50]
; SANDY-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movddup:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] sched: [1:1.00]
; HASWELL-NEXT: vmovddup {{.*#+}} ymm1 = mem[0,0,2,2] sched: [1:0.50]
; HASWELL-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movddup:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] sched: [1:1.00]
; BROADWELL-NEXT: vmovddup {{.*#+}} ymm1 = mem[0,0,2,2] sched: [6:0.50]
; BROADWELL-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movddup:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] sched: [1:1.00]
; SKYLAKE-NEXT: vmovddup {{.*#+}} ymm1 = mem[0,0,2,2] sched: [7:0.50]
; SKYLAKE-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movddup:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] sched: [1:1.00]
; SKX-NEXT: vmovddup {{.*#+}} ymm1 = mem[0,0,2,2] sched: [7:0.50]
; SKX-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movddup:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovddup {{.*#+}} ymm1 = mem[0,0,2,2] sched: [5:1.00]
; BTVER2-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] sched: [1:0.50]
; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movddup:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmovddup {{.*#+}} ymm1 = mem[0,0,2,2] sched: [8:0.50]
; ZNVER1-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] sched: [1:0.50]
; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
@@ -2705,48 +2705,48 @@ define <4 x double> @test_movddup(<4 x double> %a0, <4 x double> *%a1) {
define i32 @test_movmskpd(<4 x double> %a0) {
; GENERIC-LABEL: test_movmskpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovmskpd %ymm0, %eax # sched: [2:1.00]
; GENERIC-NEXT: vzeroupper
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_movmskpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmovmskpd %ymm0, %eax # sched: [2:1.00]
; SANDY-NEXT: vzeroupper
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movmskpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmovmskpd %ymm0, %eax # sched: [3:1.00]
; HASWELL-NEXT: vzeroupper # sched: [4:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movmskpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmovmskpd %ymm0, %eax # sched: [3:1.00]
; BROADWELL-NEXT: vzeroupper # sched: [4:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movmskpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmovmskpd %ymm0, %eax # sched: [2:1.00]
; SKYLAKE-NEXT: vzeroupper # sched: [4:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movmskpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovmskpd %ymm0, %eax # sched: [2:1.00]
; SKX-NEXT: vzeroupper # sched: [4:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movmskpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovmskpd %ymm0, %eax # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movmskpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmovmskpd %ymm0, %eax # sched: [1:1.00]
; ZNVER1-NEXT: vzeroupper # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -2757,48 +2757,48 @@ declare i32 @llvm.x86.avx.movmsk.pd.256(<4 x double>) nounwind readnone
define i32 @test_movmskps(<8 x float> %a0) {
; GENERIC-LABEL: test_movmskps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovmskps %ymm0, %eax # sched: [2:1.00]
; GENERIC-NEXT: vzeroupper
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_movmskps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmovmskps %ymm0, %eax # sched: [2:1.00]
; SANDY-NEXT: vzeroupper
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movmskps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmovmskps %ymm0, %eax # sched: [3:1.00]
; HASWELL-NEXT: vzeroupper # sched: [4:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movmskps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmovmskps %ymm0, %eax # sched: [3:1.00]
; BROADWELL-NEXT: vzeroupper # sched: [4:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movmskps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmovmskps %ymm0, %eax # sched: [2:1.00]
; SKYLAKE-NEXT: vzeroupper # sched: [4:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movmskps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovmskps %ymm0, %eax # sched: [2:1.00]
; SKX-NEXT: vzeroupper # sched: [4:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movmskps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovmskps %ymm0, %eax # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movmskps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmovmskps %ymm0, %eax # sched: [1:1.00]
; ZNVER1-NEXT: vzeroupper # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -2809,7 +2809,7 @@ declare i32 @llvm.x86.avx.movmsk.ps.256(<8 x float>) nounwind readnone
define void @test_movntdq(<4 x i64> %a0, <4 x i64> *%a1) {
; GENERIC-LABEL: test_movntdq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: vmovntdq %ymm0, (%rdi) # sched: [5:1.00]
; GENERIC-NEXT: #NO_APP
@@ -2817,7 +2817,7 @@ define void @test_movntdq(<4 x i64> %a0, <4 x i64> *%a1) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_movntdq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: vmovntdq %ymm0, (%rdi) # sched: [5:1.00]
; SANDY-NEXT: #NO_APP
@@ -2825,7 +2825,7 @@ define void @test_movntdq(<4 x i64> %a0, <4 x i64> *%a1) {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movntdq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: vmovntdq %ymm0, (%rdi) # sched: [1:1.00]
; HASWELL-NEXT: #NO_APP
@@ -2833,7 +2833,7 @@ define void @test_movntdq(<4 x i64> %a0, <4 x i64> *%a1) {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movntdq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: vmovntdq %ymm0, (%rdi) # sched: [1:1.00]
; BROADWELL-NEXT: #NO_APP
@@ -2841,7 +2841,7 @@ define void @test_movntdq(<4 x i64> %a0, <4 x i64> *%a1) {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movntdq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: vmovntdq %ymm0, (%rdi) # sched: [1:1.00]
; SKYLAKE-NEXT: #NO_APP
@@ -2849,7 +2849,7 @@ define void @test_movntdq(<4 x i64> %a0, <4 x i64> *%a1) {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movntdq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: vmovntdq %ymm0, (%rdi) # sched: [1:1.00]
; SKX-NEXT: #NO_APP
@@ -2857,14 +2857,14 @@ define void @test_movntdq(<4 x i64> %a0, <4 x i64> *%a1) {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movntdq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: vmovntdq %ymm0, (%rdi) # sched: [3:2.00]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movntdq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: vmovntdq %ymm0, (%rdi) # sched: [1:0.50]
; ZNVER1-NEXT: #NO_APP
@@ -2876,49 +2876,49 @@ define void @test_movntdq(<4 x i64> %a0, <4 x i64> *%a1) {
define <4 x double> @test_movntpd(<4 x double> %a0, <4 x double> *%a1) {
; GENERIC-LABEL: test_movntpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vmovntpd %ymm0, (%rdi) # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_movntpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: vmovntpd %ymm0, (%rdi) # sched: [5:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movntpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vmovntpd %ymm0, (%rdi) # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movntpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: vmovntpd %ymm0, (%rdi) # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movntpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vmovntpd %ymm0, (%rdi) # sched: [1:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movntpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vmovntpd %ymm0, (%rdi) # sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movntpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: vmovntpd %ymm0, (%rdi) # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movntpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmovntpd %ymm0, (%rdi) # sched: [1:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -2929,49 +2929,49 @@ define <4 x double> @test_movntpd(<4 x double> %a0, <4 x double> *%a1) {
define <8 x float> @test_movntps(<8 x float> %a0, <8 x float> *%a1) {
; GENERIC-LABEL: test_movntps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vmovntps %ymm0, (%rdi) # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_movntps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: vmovntps %ymm0, (%rdi) # sched: [5:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movntps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vmovntps %ymm0, (%rdi) # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movntps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: vmovntps %ymm0, (%rdi) # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movntps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vmovntps %ymm0, (%rdi) # sched: [1:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movntps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vmovntps %ymm0, (%rdi) # sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movntps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: vmovntps %ymm0, (%rdi) # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movntps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmovntps %ymm0, (%rdi) # sched: [1:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -2982,56 +2982,56 @@ define <8 x float> @test_movntps(<8 x float> %a0, <8 x float> *%a1) {
define <8 x float> @test_movshdup(<8 x float> %a0, <8 x float> *%a1) {
; GENERIC-LABEL: test_movshdup:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] sched: [1:1.00]
; GENERIC-NEXT: vmovshdup {{.*#+}} ymm1 = mem[1,1,3,3,5,5,7,7] sched: [7:0.50]
; GENERIC-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_movshdup:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] sched: [1:1.00]
; SANDY-NEXT: vmovshdup {{.*#+}} ymm1 = mem[1,1,3,3,5,5,7,7] sched: [7:0.50]
; SANDY-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movshdup:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] sched: [1:1.00]
; HASWELL-NEXT: vmovshdup {{.*#+}} ymm1 = mem[1,1,3,3,5,5,7,7] sched: [1:0.50]
; HASWELL-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movshdup:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] sched: [1:1.00]
; BROADWELL-NEXT: vmovshdup {{.*#+}} ymm1 = mem[1,1,3,3,5,5,7,7] sched: [6:0.50]
; BROADWELL-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movshdup:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] sched: [1:1.00]
; SKYLAKE-NEXT: vmovshdup {{.*#+}} ymm1 = mem[1,1,3,3,5,5,7,7] sched: [7:0.50]
; SKYLAKE-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movshdup:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] sched: [1:1.00]
; SKX-NEXT: vmovshdup {{.*#+}} ymm1 = mem[1,1,3,3,5,5,7,7] sched: [7:0.50]
; SKX-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movshdup:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovshdup {{.*#+}} ymm1 = mem[1,1,3,3,5,5,7,7] sched: [5:1.00]
; BTVER2-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] sched: [1:0.50]
; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movshdup:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmovshdup {{.*#+}} ymm1 = mem[1,1,3,3,5,5,7,7] sched: [8:0.50]
; ZNVER1-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] sched: [1:0.50]
; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
@@ -3045,56 +3045,56 @@ define <8 x float> @test_movshdup(<8 x float> %a0, <8 x float> *%a1) {
define <8 x float> @test_movsldup(<8 x float> %a0, <8 x float> *%a1) {
; GENERIC-LABEL: test_movsldup:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6] sched: [1:1.00]
; GENERIC-NEXT: vmovsldup {{.*#+}} ymm1 = mem[0,0,2,2,4,4,6,6] sched: [7:0.50]
; GENERIC-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_movsldup:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6] sched: [1:1.00]
; SANDY-NEXT: vmovsldup {{.*#+}} ymm1 = mem[0,0,2,2,4,4,6,6] sched: [7:0.50]
; SANDY-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movsldup:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6] sched: [1:1.00]
; HASWELL-NEXT: vmovsldup {{.*#+}} ymm1 = mem[0,0,2,2,4,4,6,6] sched: [1:0.50]
; HASWELL-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movsldup:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6] sched: [1:1.00]
; BROADWELL-NEXT: vmovsldup {{.*#+}} ymm1 = mem[0,0,2,2,4,4,6,6] sched: [6:0.50]
; BROADWELL-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movsldup:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6] sched: [1:1.00]
; SKYLAKE-NEXT: vmovsldup {{.*#+}} ymm1 = mem[0,0,2,2,4,4,6,6] sched: [7:0.50]
; SKYLAKE-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movsldup:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6] sched: [1:1.00]
; SKX-NEXT: vmovsldup {{.*#+}} ymm1 = mem[0,0,2,2,4,4,6,6] sched: [7:0.50]
; SKX-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movsldup:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovsldup {{.*#+}} ymm1 = mem[0,0,2,2,4,4,6,6] sched: [5:1.00]
; BTVER2-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6] sched: [1:0.50]
; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movsldup:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmovsldup {{.*#+}} ymm1 = mem[0,0,2,2,4,4,6,6] sched: [8:0.50]
; ZNVER1-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6] sched: [1:0.50]
; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
@@ -3108,14 +3108,14 @@ define <8 x float> @test_movsldup(<8 x float> %a0, <8 x float> *%a1) {
define <4 x double> @test_movupd(<4 x double> *%a0, <4 x double> *%a1) {
; GENERIC-LABEL: test_movupd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovupd (%rdi), %ymm0 # sched: [7:0.50]
; GENERIC-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vmovupd %ymm0, (%rsi) # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_movupd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmovups (%rdi), %xmm0 # sched: [6:0.50]
; SANDY-NEXT: vinsertf128 $1, 16(%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; SANDY-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
@@ -3124,42 +3124,42 @@ define <4 x double> @test_movupd(<4 x double> *%a0, <4 x double> *%a1) {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movupd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmovupd (%rdi), %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vmovupd %ymm0, (%rsi) # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movupd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmovupd (%rdi), %ymm0 # sched: [6:0.50]
; BROADWELL-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: vmovupd %ymm0, (%rsi) # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movupd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmovupd (%rdi), %ymm0 # sched: [7:0.50]
; SKYLAKE-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vmovupd %ymm0, (%rsi) # sched: [1:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movupd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovupd (%rdi), %ymm0 # sched: [7:0.50]
; SKX-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vmovupd %ymm0, (%rsi) # sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movupd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovupd (%rdi), %ymm0 # sched: [5:1.00]
; BTVER2-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: vmovupd %ymm0, (%rsi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movupd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmovupd (%rdi), %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmovupd %ymm0, (%rsi) # sched: [1:0.50]
@@ -3172,14 +3172,14 @@ define <4 x double> @test_movupd(<4 x double> *%a0, <4 x double> *%a1) {
define <8 x float> @test_movups(<8 x float> *%a0, <8 x float> *%a1) {
; GENERIC-LABEL: test_movups:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovups (%rdi), %ymm0 # sched: [7:0.50]
; GENERIC-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vmovups %ymm0, (%rsi) # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_movups:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmovups (%rdi), %xmm0 # sched: [6:0.50]
; SANDY-NEXT: vinsertf128 $1, 16(%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; SANDY-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
@@ -3188,42 +3188,42 @@ define <8 x float> @test_movups(<8 x float> *%a0, <8 x float> *%a1) {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movups:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmovups (%rdi), %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vmovups %ymm0, (%rsi) # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movups:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmovups (%rdi), %ymm0 # sched: [6:0.50]
; BROADWELL-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: vmovups %ymm0, (%rsi) # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movups:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmovups (%rdi), %ymm0 # sched: [7:0.50]
; SKYLAKE-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vmovups %ymm0, (%rsi) # sched: [1:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movups:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovups (%rdi), %ymm0 # sched: [7:0.50]
; SKX-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vmovups %ymm0, (%rsi) # sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movups:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovups (%rdi), %ymm0 # sched: [5:1.00]
; BTVER2-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: vmovups %ymm0, (%rsi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movups:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmovups (%rdi), %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmovups %ymm0, (%rsi) # sched: [1:0.50]
@@ -3236,49 +3236,49 @@ define <8 x float> @test_movups(<8 x float> *%a0, <8 x float> *%a1) {
define <4 x double> @test_mulpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
; GENERIC-LABEL: test_mulpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmulpd %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vmulpd (%rdi), %ymm0, %ymm0 # sched: [12:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_mulpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmulpd %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; SANDY-NEXT: vmulpd (%rdi), %ymm0, %ymm0 # sched: [12:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_mulpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmulpd %ymm1, %ymm0, %ymm0 # sched: [5:0.50]
; HASWELL-NEXT: vmulpd (%rdi), %ymm0, %ymm0 # sched: [5:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_mulpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmulpd %ymm1, %ymm0, %ymm0 # sched: [3:0.50]
; BROADWELL-NEXT: vmulpd (%rdi), %ymm0, %ymm0 # sched: [9:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_mulpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmulpd %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vmulpd (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_mulpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmulpd %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vmulpd (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_mulpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmulpd %ymm1, %ymm0, %ymm0 # sched: [4:4.00]
; BTVER2-NEXT: vmulpd (%rdi), %ymm0, %ymm0 # sched: [9:4.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_mulpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmulpd %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
; ZNVER1-NEXT: vmulpd (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3290,49 +3290,49 @@ define <4 x double> @test_mulpd(<4 x double> %a0, <4 x double> %a1, <4 x double>
define <8 x float> @test_mulps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) {
; GENERIC-LABEL: test_mulps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vmulps (%rdi), %ymm0, %ymm0 # sched: [12:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_mulps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; SANDY-NEXT: vmulps (%rdi), %ymm0, %ymm0 # sched: [12:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_mulps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [5:0.50]
; HASWELL-NEXT: vmulps (%rdi), %ymm0, %ymm0 # sched: [5:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_mulps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [3:0.50]
; BROADWELL-NEXT: vmulps (%rdi), %ymm0, %ymm0 # sched: [9:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_mulps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vmulps (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_mulps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vmulps (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_mulps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:2.00]
; BTVER2-NEXT: vmulps (%rdi), %ymm0, %ymm0 # sched: [7:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_mulps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
; ZNVER1-NEXT: vmulps (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3344,56 +3344,56 @@ define <8 x float> @test_mulps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a
define <4 x double> @orpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
; GENERIC-LABEL: orpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vorpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vorpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; GENERIC-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: orpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vorpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; SANDY-NEXT: vorpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; SANDY-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: orpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vorpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vorpd (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: orpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vorpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: vorpd (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; BROADWELL-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: orpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vorpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: orpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: vorpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: orpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BTVER2-NEXT: vorpd (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
; BTVER2-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: orpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vorpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
@@ -3411,56 +3411,56 @@ define <4 x double> @orpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2)
define <8 x float> @test_orps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) {
; GENERIC-LABEL: test_orps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vorps (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; GENERIC-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_orps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; SANDY-NEXT: vorps (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; SANDY-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_orps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vorps (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_orps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: vorps (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; BROADWELL-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_orps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vorps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_orps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: vorps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_orps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BTVER2-NEXT: vorps (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_orps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vorps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
@@ -3478,56 +3478,56 @@ define <8 x float> @test_orps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2
define <4 x double> @test_perm2f128(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
; GENERIC-LABEL: test_perm2f128:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3],ymm1[0,1] sched: [1:1.00]
; GENERIC-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] sched: [8:1.00]
; GENERIC-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_perm2f128:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3],ymm1[0,1] sched: [1:1.00]
; SANDY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] sched: [8:1.00]
; SANDY-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_perm2f128:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3],ymm1[0,1] sched: [3:1.00]
; HASWELL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] sched: [3:1.00]
; HASWELL-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_perm2f128:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3],ymm1[0,1] sched: [3:1.00]
; BROADWELL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] sched: [9:1.00]
; BROADWELL-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_perm2f128:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3],ymm1[0,1] sched: [3:1.00]
; SKYLAKE-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] sched: [10:1.00]
; SKYLAKE-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_perm2f128:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3],ymm1[0,1] sched: [3:1.00]
; SKX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] sched: [10:1.00]
; SKX-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_perm2f128:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3],ymm1[0,1] sched: [1:0.50]
; BTVER2-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] sched: [6:1.00]
; BTVER2-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_perm2f128:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3],ymm1[0,1] sched: [100:?]
; ZNVER1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] sched: [100:?]
; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
@@ -3541,56 +3541,56 @@ define <4 x double> @test_perm2f128(<4 x double> %a0, <4 x double> %a1, <4 x dou
define <2 x double> @test_permilpd(<2 x double> %a0, <2 x double> *%a1) {
; GENERIC-LABEL: test_permilpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] sched: [1:1.00]
; GENERIC-NEXT: vpermilpd {{.*#+}} xmm1 = mem[1,0] sched: [7:1.00]
; GENERIC-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_permilpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] sched: [1:1.00]
; SANDY-NEXT: vpermilpd {{.*#+}} xmm1 = mem[1,0] sched: [7:1.00]
; SANDY-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_permilpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] sched: [1:1.00]
; HASWELL-NEXT: vpermilpd {{.*#+}} xmm1 = mem[1,0] sched: [1:1.00]
; HASWELL-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_permilpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] sched: [1:1.00]
; BROADWELL-NEXT: vpermilpd {{.*#+}} xmm1 = mem[1,0] sched: [6:1.00]
; BROADWELL-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_permilpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] sched: [1:1.00]
; SKYLAKE-NEXT: vpermilpd {{.*#+}} xmm1 = mem[1,0] sched: [7:1.00]
; SKYLAKE-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_permilpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] sched: [1:1.00]
; SKX-NEXT: vpermilpd {{.*#+}} xmm1 = mem[1,0] sched: [7:1.00]
; SKX-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_permilpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpermilpd {{.*#+}} xmm1 = mem[1,0] sched: [6:1.00]
; BTVER2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] sched: [1:0.50]
; BTVER2-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_permilpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpermilpd {{.*#+}} xmm1 = mem[1,0] sched: [8:0.50]
; ZNVER1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] sched: [1:0.50]
; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
@@ -3604,56 +3604,56 @@ define <2 x double> @test_permilpd(<2 x double> %a0, <2 x double> *%a1) {
define <4 x double> @test_permilpd_ymm(<4 x double> %a0, <4 x double> *%a1) {
; GENERIC-LABEL: test_permilpd_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,3] sched: [1:1.00]
; GENERIC-NEXT: vpermilpd {{.*#+}} ymm1 = mem[1,0,2,3] sched: [8:1.00]
; GENERIC-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_permilpd_ymm:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,3] sched: [1:1.00]
; SANDY-NEXT: vpermilpd {{.*#+}} ymm1 = mem[1,0,2,3] sched: [8:1.00]
; SANDY-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_permilpd_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,3] sched: [1:1.00]
; HASWELL-NEXT: vpermilpd {{.*#+}} ymm1 = mem[1,0,2,3] sched: [1:1.00]
; HASWELL-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_permilpd_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,3] sched: [1:1.00]
; BROADWELL-NEXT: vpermilpd {{.*#+}} ymm1 = mem[1,0,2,3] sched: [7:1.00]
; BROADWELL-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_permilpd_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,3] sched: [1:1.00]
; SKYLAKE-NEXT: vpermilpd {{.*#+}} ymm1 = mem[1,0,2,3] sched: [8:1.00]
; SKYLAKE-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_permilpd_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,3] sched: [1:1.00]
; SKX-NEXT: vpermilpd {{.*#+}} ymm1 = mem[1,0,2,3] sched: [8:1.00]
; SKX-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_permilpd_ymm:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpermilpd {{.*#+}} ymm1 = mem[1,0,2,3] sched: [6:1.00]
; BTVER2-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,3] sched: [1:0.50]
; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_permilpd_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpermilpd {{.*#+}} ymm1 = mem[1,0,2,3] sched: [8:0.50]
; ZNVER1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,3] sched: [1:0.50]
; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
@@ -3667,56 +3667,56 @@ define <4 x double> @test_permilpd_ymm(<4 x double> %a0, <4 x double> *%a1) {
define <4 x float> @test_permilps(<4 x float> %a0, <4 x float> *%a1) {
; GENERIC-LABEL: test_permilps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0] sched: [1:1.00]
; GENERIC-NEXT: vpermilps {{.*#+}} xmm1 = mem[3,2,1,0] sched: [7:1.00]
; GENERIC-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_permilps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0] sched: [1:1.00]
; SANDY-NEXT: vpermilps {{.*#+}} xmm1 = mem[3,2,1,0] sched: [7:1.00]
; SANDY-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_permilps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0] sched: [1:1.00]
; HASWELL-NEXT: vpermilps {{.*#+}} xmm1 = mem[3,2,1,0] sched: [1:1.00]
; HASWELL-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_permilps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0] sched: [1:1.00]
; BROADWELL-NEXT: vpermilps {{.*#+}} xmm1 = mem[3,2,1,0] sched: [6:1.00]
; BROADWELL-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_permilps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0] sched: [1:1.00]
; SKYLAKE-NEXT: vpermilps {{.*#+}} xmm1 = mem[3,2,1,0] sched: [7:1.00]
; SKYLAKE-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_permilps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0] sched: [1:1.00]
; SKX-NEXT: vpermilps {{.*#+}} xmm1 = mem[3,2,1,0] sched: [7:1.00]
; SKX-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_permilps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpermilps {{.*#+}} xmm1 = mem[3,2,1,0] sched: [6:1.00]
; BTVER2-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0] sched: [1:0.50]
; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_permilps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpermilps {{.*#+}} xmm1 = mem[3,2,1,0] sched: [8:0.50]
; ZNVER1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0] sched: [1:0.50]
; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
@@ -3730,56 +3730,56 @@ define <4 x float> @test_permilps(<4 x float> %a0, <4 x float> *%a1) {
define <8 x float> @test_permilps_ymm(<8 x float> %a0, <8 x float> *%a1) {
; GENERIC-LABEL: test_permilps_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:1.00]
; GENERIC-NEXT: vpermilps {{.*#+}} ymm1 = mem[3,2,1,0,7,6,5,4] sched: [8:1.00]
; GENERIC-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_permilps_ymm:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:1.00]
; SANDY-NEXT: vpermilps {{.*#+}} ymm1 = mem[3,2,1,0,7,6,5,4] sched: [8:1.00]
; SANDY-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_permilps_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:1.00]
; HASWELL-NEXT: vpermilps {{.*#+}} ymm1 = mem[3,2,1,0,7,6,5,4] sched: [1:1.00]
; HASWELL-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_permilps_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:1.00]
; BROADWELL-NEXT: vpermilps {{.*#+}} ymm1 = mem[3,2,1,0,7,6,5,4] sched: [7:1.00]
; BROADWELL-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_permilps_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:1.00]
; SKYLAKE-NEXT: vpermilps {{.*#+}} ymm1 = mem[3,2,1,0,7,6,5,4] sched: [8:1.00]
; SKYLAKE-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_permilps_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:1.00]
; SKX-NEXT: vpermilps {{.*#+}} ymm1 = mem[3,2,1,0,7,6,5,4] sched: [8:1.00]
; SKX-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_permilps_ymm:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpermilps {{.*#+}} ymm1 = mem[3,2,1,0,7,6,5,4] sched: [6:1.00]
; BTVER2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:0.50]
; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_permilps_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpermilps {{.*#+}} ymm1 = mem[3,2,1,0,7,6,5,4] sched: [8:0.50]
; ZNVER1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:0.50]
; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
@@ -3793,49 +3793,49 @@ define <8 x float> @test_permilps_ymm(<8 x float> %a0, <8 x float> *%a1) {
define <2 x double> @test_permilvarpd(<2 x double> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; GENERIC-LABEL: test_permilvarpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpermilpd %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpermilpd (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_permilvarpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpermilpd %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; SANDY-NEXT: vpermilpd (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_permilvarpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpermilpd %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: vpermilpd (%rdi), %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_permilvarpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpermilpd %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: vpermilpd (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_permilvarpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpermilpd %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; SKYLAKE-NEXT: vpermilpd (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_permilvarpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermilpd %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; SKX-NEXT: vpermilpd (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_permilvarpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpermilpd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpermilpd (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_permilvarpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpermilpd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; ZNVER1-NEXT: vpermilpd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3848,49 +3848,49 @@ declare <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double>, <2 x i64>) nounwi
define <4 x double> @test_permilvarpd_ymm(<4 x double> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
; GENERIC-LABEL: test_permilvarpd_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpermilpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpermilpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_permilvarpd_ymm:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpermilpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; SANDY-NEXT: vpermilpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_permilvarpd_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpermilpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vpermilpd (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_permilvarpd_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpermilpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: vpermilpd (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_permilvarpd_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpermilpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; SKYLAKE-NEXT: vpermilpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_permilvarpd_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermilpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; SKX-NEXT: vpermilpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_permilvarpd_ymm:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpermilpd %ymm1, %ymm0, %ymm0 # sched: [3:3.00]
; BTVER2-NEXT: vpermilpd (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_permilvarpd_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpermilpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; ZNVER1-NEXT: vpermilpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3903,49 +3903,49 @@ declare <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double>, <4 x i64>) no
define <4 x float> @test_permilvarps(<4 x float> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_permilvarps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpermilps %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpermilps (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_permilvarps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpermilps %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; SANDY-NEXT: vpermilps (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_permilvarps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpermilps %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: vpermilps (%rdi), %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_permilvarps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpermilps %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: vpermilps (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_permilvarps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpermilps %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; SKYLAKE-NEXT: vpermilps (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_permilvarps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermilps %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; SKX-NEXT: vpermilps (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_permilvarps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpermilps %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpermilps (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_permilvarps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpermilps %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; ZNVER1-NEXT: vpermilps (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3958,49 +3958,49 @@ declare <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float>, <4 x i32>) nounwind
define <8 x float> @test_permilvarps_ymm(<8 x float> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_permilvarps_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpermilps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpermilps (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_permilvarps_ymm:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpermilps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; SANDY-NEXT: vpermilps (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_permilvarps_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpermilps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vpermilps (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_permilvarps_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpermilps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: vpermilps (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_permilvarps_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpermilps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; SKYLAKE-NEXT: vpermilps (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_permilvarps_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermilps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; SKX-NEXT: vpermilps (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_permilvarps_ymm:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpermilps %ymm1, %ymm0, %ymm0 # sched: [3:3.00]
; BTVER2-NEXT: vpermilps (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_permilvarps_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpermilps %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; ZNVER1-NEXT: vpermilps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -4013,56 +4013,56 @@ declare <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float>, <8 x i32>) noun
define <8 x float> @test_rcpps(<8 x float> %a0, <8 x float> *%a1) {
; GENERIC-LABEL: test_rcpps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vrcpps (%rdi), %ymm1 # sched: [14:2.00]
; GENERIC-NEXT: vrcpps %ymm0, %ymm0 # sched: [7:2.00]
; GENERIC-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_rcpps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vrcpps (%rdi), %ymm1 # sched: [14:2.00]
; SANDY-NEXT: vrcpps %ymm0, %ymm0 # sched: [7:2.00]
; SANDY-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_rcpps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vrcpps (%rdi), %ymm1 # sched: [11:2.00]
; HASWELL-NEXT: vrcpps %ymm0, %ymm0 # sched: [11:2.00]
; HASWELL-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_rcpps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vrcpps (%rdi), %ymm1 # sched: [17:2.00]
; BROADWELL-NEXT: vrcpps %ymm0, %ymm0 # sched: [11:2.00]
; BROADWELL-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_rcpps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vrcpps %ymm0, %ymm0 # sched: [4:1.00]
; SKYLAKE-NEXT: vrcpps (%rdi), %ymm1 # sched: [11:1.00]
; SKYLAKE-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_rcpps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vrcpps %ymm0, %ymm0 # sched: [4:1.00]
; SKX-NEXT: vrcpps (%rdi), %ymm1 # sched: [11:1.00]
; SKX-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_rcpps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vrcpps (%rdi), %ymm1 # sched: [7:2.00]
; BTVER2-NEXT: vrcpps %ymm0, %ymm0 # sched: [2:2.00]
; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_rcpps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vrcpps (%rdi), %ymm1 # sched: [12:0.50]
; ZNVER1-NEXT: vrcpps %ymm0, %ymm0 # sched: [5:0.50]
; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
@@ -4077,56 +4077,56 @@ declare <8 x float> @llvm.x86.avx.rcp.ps.256(<8 x float>) nounwind readnone
define <4 x double> @test_roundpd(<4 x double> %a0, <4 x double> *%a1) {
; GENERIC-LABEL: test_roundpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vroundpd $7, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vroundpd $7, (%rdi), %ymm1 # sched: [10:1.00]
; GENERIC-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_roundpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vroundpd $7, %ymm0, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: vroundpd $7, (%rdi), %ymm1 # sched: [10:1.00]
; SANDY-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_roundpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vroundpd $7, %ymm0, %ymm0 # sched: [5:1.25]
; HASWELL-NEXT: vroundpd $7, (%rdi), %ymm1 # sched: [6:2.00]
; HASWELL-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_roundpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vroundpd $7, %ymm0, %ymm0 # sched: [6:0.50]
; BROADWELL-NEXT: vroundpd $7, (%rdi), %ymm1 # sched: [12:2.00]
; BROADWELL-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_roundpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vroundpd $7, %ymm0, %ymm0 # sched: [8:0.67]
; SKYLAKE-NEXT: vroundpd $7, (%rdi), %ymm1 # sched: [15:0.67]
; SKYLAKE-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_roundpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vrndscalepd $7, %ymm0, %ymm0 # sched: [8:0.67]
; SKX-NEXT: vrndscalepd $7, (%rdi), %ymm1 # sched: [15:0.67]
; SKX-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_roundpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vroundpd $7, (%rdi), %ymm1 # sched: [8:2.00]
; BTVER2-NEXT: vroundpd $7, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_roundpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vroundpd $7, (%rdi), %ymm1 # sched: [11:1.00]
; ZNVER1-NEXT: vroundpd $7, %ymm0, %ymm0 # sched: [4:1.00]
; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
@@ -4141,56 +4141,56 @@ declare <4 x double> @llvm.x86.avx.round.pd.256(<4 x double>, i32) nounwind read
define <8 x float> @test_roundps(<8 x float> %a0, <8 x float> *%a1) {
; GENERIC-LABEL: test_roundps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vroundps $7, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vroundps $7, (%rdi), %ymm1 # sched: [10:1.00]
; GENERIC-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_roundps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vroundps $7, %ymm0, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: vroundps $7, (%rdi), %ymm1 # sched: [10:1.00]
; SANDY-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_roundps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vroundps $7, %ymm0, %ymm0 # sched: [5:1.25]
; HASWELL-NEXT: vroundps $7, (%rdi), %ymm1 # sched: [6:2.00]
; HASWELL-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_roundps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vroundps $7, %ymm0, %ymm0 # sched: [6:0.50]
; BROADWELL-NEXT: vroundps $7, (%rdi), %ymm1 # sched: [12:2.00]
; BROADWELL-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_roundps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vroundps $7, %ymm0, %ymm0 # sched: [8:0.67]
; SKYLAKE-NEXT: vroundps $7, (%rdi), %ymm1 # sched: [15:0.67]
; SKYLAKE-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_roundps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vrndscaleps $7, %ymm0, %ymm0 # sched: [8:0.67]
; SKX-NEXT: vrndscaleps $7, (%rdi), %ymm1 # sched: [15:0.67]
; SKX-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_roundps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vroundps $7, (%rdi), %ymm1 # sched: [8:2.00]
; BTVER2-NEXT: vroundps $7, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_roundps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vroundps $7, (%rdi), %ymm1 # sched: [11:1.00]
; ZNVER1-NEXT: vroundps $7, %ymm0, %ymm0 # sched: [4:1.00]
; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
@@ -4205,56 +4205,56 @@ declare <8 x float> @llvm.x86.avx.round.ps.256(<8 x float>, i32) nounwind readno
define <8 x float> @test_rsqrtps(<8 x float> %a0, <8 x float> *%a1) {
; GENERIC-LABEL: test_rsqrtps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vrsqrtps (%rdi), %ymm1 # sched: [14:2.00]
; GENERIC-NEXT: vrsqrtps %ymm0, %ymm0 # sched: [7:2.00]
; GENERIC-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_rsqrtps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vrsqrtps (%rdi), %ymm1 # sched: [14:2.00]
; SANDY-NEXT: vrsqrtps %ymm0, %ymm0 # sched: [7:2.00]
; SANDY-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_rsqrtps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vrsqrtps (%rdi), %ymm1 # sched: [11:2.00]
; HASWELL-NEXT: vrsqrtps %ymm0, %ymm0 # sched: [11:2.00]
; HASWELL-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_rsqrtps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vrsqrtps (%rdi), %ymm1 # sched: [17:2.00]
; BROADWELL-NEXT: vrsqrtps %ymm0, %ymm0 # sched: [11:2.00]
; BROADWELL-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_rsqrtps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vrsqrtps %ymm0, %ymm0 # sched: [4:1.00]
; SKYLAKE-NEXT: vrsqrtps (%rdi), %ymm1 # sched: [11:1.00]
; SKYLAKE-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_rsqrtps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vrsqrtps %ymm0, %ymm0 # sched: [4:1.00]
; SKX-NEXT: vrsqrtps (%rdi), %ymm1 # sched: [11:1.00]
; SKX-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_rsqrtps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vrsqrtps (%rdi), %ymm1 # sched: [7:2.00]
; BTVER2-NEXT: vrsqrtps %ymm0, %ymm0 # sched: [2:2.00]
; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_rsqrtps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vrsqrtps (%rdi), %ymm1 # sched: [12:0.50]
; ZNVER1-NEXT: vrsqrtps %ymm0, %ymm0 # sched: [5:0.50]
; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
@@ -4269,56 +4269,56 @@ declare <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float>) nounwind readnone
define <4 x double> @test_shufpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
; GENERIC-LABEL: test_shufpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[2],ymm1[3] sched: [1:1.00]
; GENERIC-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[1],mem[0],ymm1[2],mem[3] sched: [8:1.00]
; GENERIC-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_shufpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[2],ymm1[3] sched: [1:1.00]
; SANDY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[1],mem[0],ymm1[2],mem[3] sched: [8:1.00]
; SANDY-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_shufpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[2],ymm1[3] sched: [1:1.00]
; HASWELL-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[1],mem[0],ymm1[2],mem[3] sched: [1:1.00]
; HASWELL-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_shufpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[2],ymm1[3] sched: [1:1.00]
; BROADWELL-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[1],mem[0],ymm1[2],mem[3] sched: [7:1.00]
; BROADWELL-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_shufpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[2],ymm1[3] sched: [1:1.00]
; SKYLAKE-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[1],mem[0],ymm1[2],mem[3] sched: [8:1.00]
; SKYLAKE-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_shufpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[2],ymm1[3] sched: [1:1.00]
; SKX-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[1],mem[0],ymm1[2],mem[3] sched: [8:1.00]
; SKX-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_shufpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[2],ymm1[3] sched: [1:0.50]
; BTVER2-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[1],mem[0],ymm1[2],mem[3] sched: [6:1.00]
; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_shufpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[2],ymm1[3] sched: [1:0.50]
; ZNVER1-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[1],mem[0],ymm1[2],mem[3] sched: [8:0.50]
; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
@@ -4332,49 +4332,49 @@ define <4 x double> @test_shufpd(<4 x double> %a0, <4 x double> %a1, <4 x double
define <8 x float> @test_shufps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) nounwind {
; GENERIC-LABEL: test_shufps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,0],ymm1[0,0],ymm0[4,4],ymm1[4,4] sched: [1:1.00]
; GENERIC-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,3],mem[0,0],ymm0[4,7],mem[4,4] sched: [8:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_shufps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,0],ymm1[0,0],ymm0[4,4],ymm1[4,4] sched: [1:1.00]
; SANDY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,3],mem[0,0],ymm0[4,7],mem[4,4] sched: [8:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_shufps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,0],ymm1[0,0],ymm0[4,4],ymm1[4,4] sched: [1:1.00]
; HASWELL-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,3],mem[0,0],ymm0[4,7],mem[4,4] sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_shufps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,0],ymm1[0,0],ymm0[4,4],ymm1[4,4] sched: [1:1.00]
; BROADWELL-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,3],mem[0,0],ymm0[4,7],mem[4,4] sched: [7:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_shufps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,0],ymm1[0,0],ymm0[4,4],ymm1[4,4] sched: [1:1.00]
; SKYLAKE-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,3],mem[0,0],ymm0[4,7],mem[4,4] sched: [8:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_shufps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,0],ymm1[0,0],ymm0[4,4],ymm1[4,4] sched: [1:1.00]
; SKX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,3],mem[0,0],ymm0[4,7],mem[4,4] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_shufps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,0],ymm1[0,0],ymm0[4,4],ymm1[4,4] sched: [1:0.50]
; BTVER2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,3],mem[0,0],ymm0[4,7],mem[4,4] sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_shufps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,0],ymm1[0,0],ymm0[4,4],ymm1[4,4] sched: [1:0.50]
; ZNVER1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,3],mem[0,0],ymm0[4,7],mem[4,4] sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -4386,56 +4386,56 @@ define <8 x float> @test_shufps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%
define <4 x double> @test_sqrtpd(<4 x double> %a0, <4 x double> *%a1) {
; GENERIC-LABEL: test_sqrtpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vsqrtpd (%rdi), %ymm1 # sched: [52:2.00]
; GENERIC-NEXT: vsqrtpd %ymm0, %ymm0 # sched: [45:2.00]
; GENERIC-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_sqrtpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vsqrtpd (%rdi), %ymm1 # sched: [52:2.00]
; SANDY-NEXT: vsqrtpd %ymm0, %ymm0 # sched: [45:2.00]
; SANDY-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_sqrtpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vsqrtpd (%rdi), %ymm1 # sched: [35:2.00]
; HASWELL-NEXT: vsqrtpd %ymm0, %ymm0 # sched: [35:2.00]
; HASWELL-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_sqrtpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vsqrtpd (%rdi), %ymm1 # sched: [40:2.00]
; BROADWELL-NEXT: vsqrtpd %ymm0, %ymm0 # sched: [34:2.00]
; BROADWELL-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_sqrtpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vsqrtpd %ymm0, %ymm0 # sched: [18:1.00]
; SKYLAKE-NEXT: vsqrtpd (%rdi), %ymm1 # sched: [25:1.00]
; SKYLAKE-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_sqrtpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vsqrtpd %ymm0, %ymm0 # sched: [18:1.00]
; SKX-NEXT: vsqrtpd (%rdi), %ymm1 # sched: [25:1.00]
; SKX-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_sqrtpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vsqrtpd (%rdi), %ymm1 # sched: [59:54.00]
; BTVER2-NEXT: vsqrtpd %ymm0, %ymm0 # sched: [54:54.00]
; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_sqrtpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vsqrtpd (%rdi), %ymm1 # sched: [47:47.00]
; ZNVER1-NEXT: vsqrtpd %ymm0, %ymm0 # sched: [40:40.00]
; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
@@ -4450,56 +4450,56 @@ declare <4 x double> @llvm.x86.avx.sqrt.pd.256(<4 x double>) nounwind readnone
define <8 x float> @test_sqrtps(<8 x float> %a0, <8 x float> *%a1) {
; GENERIC-LABEL: test_sqrtps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vsqrtps (%rdi), %ymm1 # sched: [36:2.00]
; GENERIC-NEXT: vsqrtps %ymm0, %ymm0 # sched: [29:2.00]
; GENERIC-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_sqrtps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vsqrtps (%rdi), %ymm1 # sched: [36:2.00]
; SANDY-NEXT: vsqrtps %ymm0, %ymm0 # sched: [29:2.00]
; SANDY-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_sqrtps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vsqrtps (%rdi), %ymm1 # sched: [21:2.00]
; HASWELL-NEXT: vsqrtps %ymm0, %ymm0 # sched: [21:2.00]
; HASWELL-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_sqrtps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vsqrtps (%rdi), %ymm1 # sched: [27:2.00]
; BROADWELL-NEXT: vsqrtps %ymm0, %ymm0 # sched: [21:2.00]
; BROADWELL-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_sqrtps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vsqrtps %ymm0, %ymm0 # sched: [12:1.00]
; SKYLAKE-NEXT: vsqrtps (%rdi), %ymm1 # sched: [19:1.00]
; SKYLAKE-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_sqrtps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vsqrtps %ymm0, %ymm0 # sched: [12:1.00]
; SKX-NEXT: vsqrtps (%rdi), %ymm1 # sched: [19:1.00]
; SKX-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_sqrtps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vsqrtps (%rdi), %ymm1 # sched: [47:42.00]
; BTVER2-NEXT: vsqrtps %ymm0, %ymm0 # sched: [42:42.00]
; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_sqrtps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vsqrtps (%rdi), %ymm1 # sched: [35:35.00]
; ZNVER1-NEXT: vsqrtps %ymm0, %ymm0 # sched: [28:28.00]
; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
@@ -4514,49 +4514,49 @@ declare <8 x float> @llvm.x86.avx.sqrt.ps.256(<8 x float>) nounwind readnone
define <4 x double> @test_subpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
; GENERIC-LABEL: test_subpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vsubpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_subpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: vsubpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_subpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vsubpd (%rdi), %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_subpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: vsubpd (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_subpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vsubpd %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vsubpd (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_subpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vsubpd %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vsubpd (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_subpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vsubpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: vsubpd (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_subpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; ZNVER1-NEXT: vsubpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -4568,49 +4568,49 @@ define <4 x double> @test_subpd(<4 x double> %a0, <4 x double> %a1, <4 x double>
define <8 x float> @test_subps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) {
; GENERIC-LABEL: test_subps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vsubps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_subps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: vsubps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_subps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vsubps (%rdi), %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_subps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: vsubps (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_subps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vsubps %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vsubps (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_subps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vsubps %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vsubps (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_subps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vsubps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: vsubps (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_subps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; ZNVER1-NEXT: vsubps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -4622,7 +4622,7 @@ define <8 x float> @test_subps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a
define i32 @test_testpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; GENERIC-LABEL: test_testpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: xorl %eax, %eax # sched: [1:0.33]
; GENERIC-NEXT: vtestpd %xmm1, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: setb %al # sched: [1:0.50]
@@ -4631,7 +4631,7 @@ define i32 @test_testpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_testpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: xorl %eax, %eax # sched: [1:0.33]
; SANDY-NEXT: vtestpd %xmm1, %xmm0 # sched: [1:1.00]
; SANDY-NEXT: setb %al # sched: [1:0.50]
@@ -4640,7 +4640,7 @@ define i32 @test_testpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_testpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: xorl %eax, %eax # sched: [1:0.25]
; HASWELL-NEXT: vtestpd %xmm1, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: setb %al # sched: [1:0.50]
@@ -4649,7 +4649,7 @@ define i32 @test_testpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_testpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: xorl %eax, %eax # sched: [1:0.25]
; BROADWELL-NEXT: vtestpd %xmm1, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: setb %al # sched: [1:0.50]
@@ -4658,7 +4658,7 @@ define i32 @test_testpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_testpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: xorl %eax, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: vtestpd %xmm1, %xmm0 # sched: [2:1.00]
; SKYLAKE-NEXT: setb %al # sched: [1:0.50]
@@ -4667,7 +4667,7 @@ define i32 @test_testpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_testpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: xorl %eax, %eax # sched: [1:0.25]
; SKX-NEXT: vtestpd %xmm1, %xmm0 # sched: [2:1.00]
; SKX-NEXT: setb %al # sched: [1:0.50]
@@ -4676,7 +4676,7 @@ define i32 @test_testpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_testpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: xorl %eax, %eax # sched: [1:0.50]
; BTVER2-NEXT: vtestpd %xmm1, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: setb %al # sched: [1:0.50]
@@ -4685,7 +4685,7 @@ define i32 @test_testpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_testpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: xorl %eax, %eax # sched: [1:0.25]
; ZNVER1-NEXT: vtestpd %xmm1, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: setb %al # sched: [1:0.25]
@@ -4702,7 +4702,7 @@ declare i32 @llvm.x86.avx.vtestc.pd(<2 x double>, <2 x double>) nounwind readnon
define i32 @test_testpd_ymm(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
; GENERIC-LABEL: test_testpd_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: xorl %eax, %eax # sched: [1:0.33]
; GENERIC-NEXT: vtestpd %ymm1, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: setb %al # sched: [1:0.50]
@@ -4712,7 +4712,7 @@ define i32 @test_testpd_ymm(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_testpd_ymm:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: xorl %eax, %eax # sched: [1:0.33]
; SANDY-NEXT: vtestpd %ymm1, %ymm0 # sched: [1:1.00]
; SANDY-NEXT: setb %al # sched: [1:0.50]
@@ -4722,7 +4722,7 @@ define i32 @test_testpd_ymm(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_testpd_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: xorl %eax, %eax # sched: [1:0.25]
; HASWELL-NEXT: vtestpd %ymm1, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: setb %al # sched: [1:0.50]
@@ -4732,7 +4732,7 @@ define i32 @test_testpd_ymm(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_testpd_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: xorl %eax, %eax # sched: [1:0.25]
; BROADWELL-NEXT: vtestpd %ymm1, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: setb %al # sched: [1:0.50]
@@ -4742,7 +4742,7 @@ define i32 @test_testpd_ymm(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_testpd_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: xorl %eax, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: vtestpd %ymm1, %ymm0 # sched: [2:1.00]
; SKYLAKE-NEXT: setb %al # sched: [1:0.50]
@@ -4752,7 +4752,7 @@ define i32 @test_testpd_ymm(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_testpd_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: xorl %eax, %eax # sched: [1:0.25]
; SKX-NEXT: vtestpd %ymm1, %ymm0 # sched: [2:1.00]
; SKX-NEXT: setb %al # sched: [1:0.50]
@@ -4762,7 +4762,7 @@ define i32 @test_testpd_ymm(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_testpd_ymm:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: xorl %eax, %eax # sched: [1:0.50]
; BTVER2-NEXT: vtestpd %ymm1, %ymm0 # sched: [4:2.00]
; BTVER2-NEXT: setb %al # sched: [1:0.50]
@@ -4771,7 +4771,7 @@ define i32 @test_testpd_ymm(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_testpd_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: xorl %eax, %eax # sched: [1:0.25]
; ZNVER1-NEXT: vtestpd %ymm1, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: setb %al # sched: [1:0.25]
@@ -4789,7 +4789,7 @@ declare i32 @llvm.x86.avx.vtestc.pd.256(<4 x double>, <4 x double>) nounwind rea
define i32 @test_testps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; GENERIC-LABEL: test_testps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: xorl %eax, %eax # sched: [1:0.33]
; GENERIC-NEXT: vtestps %xmm1, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: setb %al # sched: [1:0.50]
@@ -4798,7 +4798,7 @@ define i32 @test_testps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_testps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: xorl %eax, %eax # sched: [1:0.33]
; SANDY-NEXT: vtestps %xmm1, %xmm0 # sched: [1:1.00]
; SANDY-NEXT: setb %al # sched: [1:0.50]
@@ -4807,7 +4807,7 @@ define i32 @test_testps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_testps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: xorl %eax, %eax # sched: [1:0.25]
; HASWELL-NEXT: vtestps %xmm1, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: setb %al # sched: [1:0.50]
@@ -4816,7 +4816,7 @@ define i32 @test_testps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_testps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: xorl %eax, %eax # sched: [1:0.25]
; BROADWELL-NEXT: vtestps %xmm1, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: setb %al # sched: [1:0.50]
@@ -4825,7 +4825,7 @@ define i32 @test_testps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_testps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: xorl %eax, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: vtestps %xmm1, %xmm0 # sched: [2:1.00]
; SKYLAKE-NEXT: setb %al # sched: [1:0.50]
@@ -4834,7 +4834,7 @@ define i32 @test_testps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_testps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: xorl %eax, %eax # sched: [1:0.25]
; SKX-NEXT: vtestps %xmm1, %xmm0 # sched: [2:1.00]
; SKX-NEXT: setb %al # sched: [1:0.50]
@@ -4843,7 +4843,7 @@ define i32 @test_testps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_testps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: xorl %eax, %eax # sched: [1:0.50]
; BTVER2-NEXT: vtestps %xmm1, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: setb %al # sched: [1:0.50]
@@ -4852,7 +4852,7 @@ define i32 @test_testps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_testps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: xorl %eax, %eax # sched: [1:0.25]
; ZNVER1-NEXT: vtestps %xmm1, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: setb %al # sched: [1:0.25]
@@ -4869,7 +4869,7 @@ declare i32 @llvm.x86.avx.vtestc.ps(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_testps_ymm(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) {
; GENERIC-LABEL: test_testps_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: xorl %eax, %eax # sched: [1:0.33]
; GENERIC-NEXT: vtestps %ymm1, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: setb %al # sched: [1:0.50]
@@ -4879,7 +4879,7 @@ define i32 @test_testps_ymm(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2)
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_testps_ymm:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: xorl %eax, %eax # sched: [1:0.33]
; SANDY-NEXT: vtestps %ymm1, %ymm0 # sched: [1:1.00]
; SANDY-NEXT: setb %al # sched: [1:0.50]
@@ -4889,7 +4889,7 @@ define i32 @test_testps_ymm(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2)
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_testps_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: xorl %eax, %eax # sched: [1:0.25]
; HASWELL-NEXT: vtestps %ymm1, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: setb %al # sched: [1:0.50]
@@ -4899,7 +4899,7 @@ define i32 @test_testps_ymm(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2)
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_testps_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: xorl %eax, %eax # sched: [1:0.25]
; BROADWELL-NEXT: vtestps %ymm1, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: setb %al # sched: [1:0.50]
@@ -4909,7 +4909,7 @@ define i32 @test_testps_ymm(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2)
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_testps_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: xorl %eax, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: vtestps %ymm1, %ymm0 # sched: [2:1.00]
; SKYLAKE-NEXT: setb %al # sched: [1:0.50]
@@ -4919,7 +4919,7 @@ define i32 @test_testps_ymm(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2)
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_testps_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: xorl %eax, %eax # sched: [1:0.25]
; SKX-NEXT: vtestps %ymm1, %ymm0 # sched: [2:1.00]
; SKX-NEXT: setb %al # sched: [1:0.50]
@@ -4929,7 +4929,7 @@ define i32 @test_testps_ymm(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2)
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_testps_ymm:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: xorl %eax, %eax # sched: [1:0.50]
; BTVER2-NEXT: vtestps %ymm1, %ymm0 # sched: [4:2.00]
; BTVER2-NEXT: setb %al # sched: [1:0.50]
@@ -4938,7 +4938,7 @@ define i32 @test_testps_ymm(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2)
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_testps_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: xorl %eax, %eax # sched: [1:0.25]
; ZNVER1-NEXT: vtestps %ymm1, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: setb %al # sched: [1:0.25]
@@ -4956,56 +4956,56 @@ declare i32 @llvm.x86.avx.vtestc.ps.256(<8 x float>, <8 x float>) nounwind readn
define <4 x double> @test_unpckhpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
; GENERIC-LABEL: test_unpckhpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:1.00]
; GENERIC-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] sched: [8:1.00]
; GENERIC-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_unpckhpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:1.00]
; SANDY-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] sched: [8:1.00]
; SANDY-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_unpckhpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:1.00]
; HASWELL-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] sched: [1:1.00]
; HASWELL-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_unpckhpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:1.00]
; BROADWELL-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] sched: [7:1.00]
; BROADWELL-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_unpckhpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:1.00]
; SKYLAKE-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] sched: [8:1.00]
; SKYLAKE-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_unpckhpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:1.00]
; SKX-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] sched: [8:1.00]
; SKX-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_unpckhpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:0.50]
; BTVER2-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] sched: [6:1.00]
; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_unpckhpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:0.50]
; ZNVER1-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] sched: [8:0.50]
; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
@@ -5019,49 +5019,49 @@ define <4 x double> @test_unpckhpd(<4 x double> %a0, <4 x double> %a1, <4 x doub
define <8 x float> @test_unpckhps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) nounwind {
; GENERIC-LABEL: test_unpckhps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:1.00]
; GENERIC-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [8:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_unpckhps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:1.00]
; SANDY-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [8:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_unpckhps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:1.00]
; HASWELL-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_unpckhps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:1.00]
; BROADWELL-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [7:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_unpckhps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:1.00]
; SKYLAKE-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [8:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_unpckhps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_unpckhps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:0.50]
; BTVER2-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_unpckhps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:0.50]
; ZNVER1-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -5073,56 +5073,56 @@ define <8 x float> @test_unpckhps(<8 x float> %a0, <8 x float> %a1, <8 x float>
define <4 x double> @test_unpcklpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
; GENERIC-LABEL: test_unpcklpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:1.00]
; GENERIC-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],mem[0],ymm1[2],mem[2] sched: [8:1.00]
; GENERIC-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_unpcklpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:1.00]
; SANDY-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],mem[0],ymm1[2],mem[2] sched: [8:1.00]
; SANDY-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_unpcklpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:1.00]
; HASWELL-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],mem[0],ymm1[2],mem[2] sched: [1:1.00]
; HASWELL-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_unpcklpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:1.00]
; BROADWELL-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],mem[0],ymm1[2],mem[2] sched: [7:1.00]
; BROADWELL-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_unpcklpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:1.00]
; SKYLAKE-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],mem[0],ymm1[2],mem[2] sched: [8:1.00]
; SKYLAKE-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_unpcklpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:1.00]
; SKX-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],mem[0],ymm1[2],mem[2] sched: [8:1.00]
; SKX-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_unpcklpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:0.50]
; BTVER2-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],mem[0],ymm1[2],mem[2] sched: [6:1.00]
; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_unpcklpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:0.50]
; ZNVER1-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],mem[0],ymm1[2],mem[2] sched: [8:0.50]
; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
@@ -5136,49 +5136,49 @@ define <4 x double> @test_unpcklpd(<4 x double> %a0, <4 x double> %a1, <4 x doub
define <8 x float> @test_unpcklps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) nounwind {
; GENERIC-LABEL: test_unpcklps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:1.00]
; GENERIC-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [8:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_unpcklps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:1.00]
; SANDY-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [8:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_unpcklps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:1.00]
; HASWELL-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_unpcklps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:1.00]
; BROADWELL-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [7:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_unpcklps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:1.00]
; SKYLAKE-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [8:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_unpcklps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_unpcklps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:0.50]
; BTVER2-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_unpcklps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:0.50]
; ZNVER1-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -5190,56 +5190,56 @@ define <8 x float> @test_unpcklps(<8 x float> %a0, <8 x float> %a1, <8 x float>
define <4 x double> @test_xorpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
; GENERIC-LABEL: test_xorpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vxorpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vxorpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; GENERIC-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_xorpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vxorpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; SANDY-NEXT: vxorpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; SANDY-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_xorpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vxorpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vxorpd (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_xorpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vxorpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: vxorpd (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; BROADWELL-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_xorpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vxorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vxorpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_xorpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vxorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: vxorpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_xorpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vxorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BTVER2-NEXT: vxorpd (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
; BTVER2-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_xorpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vxorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vxorpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
@@ -5257,56 +5257,56 @@ define <4 x double> @test_xorpd(<4 x double> %a0, <4 x double> %a1, <4 x double>
define <8 x float> @test_xorps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) {
; GENERIC-LABEL: test_xorps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vxorps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vxorps (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; GENERIC-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_xorps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vxorps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; SANDY-NEXT: vxorps (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; SANDY-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_xorps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vxorps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vxorps (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_xorps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vxorps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: vxorps (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; BROADWELL-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_xorps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vxorps %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vxorps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_xorps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vxorps %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: vxorps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_xorps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vxorps %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BTVER2-NEXT: vxorps (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_xorps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vxorps %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vxorps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
@@ -5324,42 +5324,42 @@ define <8 x float> @test_xorps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a
define void @test_zeroall() {
; GENERIC-LABEL: test_zeroall:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vzeroall
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_zeroall:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vzeroall
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_zeroall:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vzeroall # sched: [16:16.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_zeroall:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vzeroall # sched: [16:16.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_zeroall:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vzeroall # sched: [16:4.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_zeroall:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vzeroall # sched: [16:4.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_zeroall:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vzeroall # sched: [90:?]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_zeroall:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vzeroall # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
call void @llvm.x86.avx.vzeroall()
@@ -5369,42 +5369,42 @@ declare void @llvm.x86.avx.vzeroall() nounwind
define void @test_zeroupper() {
; GENERIC-LABEL: test_zeroupper:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vzeroupper
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SANDY-LABEL: test_zeroupper:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vzeroupper
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_zeroupper:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vzeroupper # sched: [4:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_zeroupper:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vzeroupper # sched: [4:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_zeroupper:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vzeroupper # sched: [4:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_zeroupper:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vzeroupper # sched: [4:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_zeroupper:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vzeroupper # sched: [46:?]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_zeroupper:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vzeroupper # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
call void @llvm.x86.avx.vzeroupper()
diff --git a/test/CodeGen/X86/avx-select.ll b/test/CodeGen/X86/avx-select.ll
index f5ab0cab17f..ea64973eb9d 100644
--- a/test/CodeGen/X86/avx-select.ll
+++ b/test/CodeGen/X86/avx-select.ll
@@ -4,22 +4,22 @@
define <8 x i32> @select00(i32 %a, <8 x i32> %b) nounwind {
; X86-LABEL: select00:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: cmpl $255, {{[0-9]+}}(%esp)
; X86-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X86-NEXT: je .LBB0_2
-; X86-NEXT: # BB#1:
+; X86-NEXT: # %bb.1:
; X86-NEXT: vmovaps %ymm0, %ymm1
; X86-NEXT: .LBB0_2:
; X86-NEXT: vxorps %ymm1, %ymm0, %ymm0
; X86-NEXT: retl
;
; X64-LABEL: select00:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpl $255, %edi
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-NEXT: je .LBB0_2
-; X64-NEXT: # BB#1:
+; X64-NEXT: # %bb.1:
; X64-NEXT: vmovaps %ymm0, %ymm1
; X64-NEXT: .LBB0_2:
; X64-NEXT: vxorps %ymm1, %ymm0, %ymm0
@@ -32,22 +32,22 @@ define <8 x i32> @select00(i32 %a, <8 x i32> %b) nounwind {
define <4 x i64> @select01(i32 %a, <4 x i64> %b) nounwind {
; X86-LABEL: select01:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: cmpl $255, {{[0-9]+}}(%esp)
; X86-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X86-NEXT: je .LBB1_2
-; X86-NEXT: # BB#1:
+; X86-NEXT: # %bb.1:
; X86-NEXT: vmovaps %ymm0, %ymm1
; X86-NEXT: .LBB1_2:
; X86-NEXT: vxorps %ymm1, %ymm0, %ymm0
; X86-NEXT: retl
;
; X64-LABEL: select01:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpl $255, %edi
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-NEXT: je .LBB1_2
-; X64-NEXT: # BB#1:
+; X64-NEXT: # %bb.1:
; X64-NEXT: vmovaps %ymm0, %ymm1
; X64-NEXT: .LBB1_2:
; X64-NEXT: vxorps %ymm1, %ymm0, %ymm0
diff --git a/test/CodeGen/X86/avx-shift.ll b/test/CodeGen/X86/avx-shift.ll
index b65412d99eb..ee6ca2224ea 100644
--- a/test/CodeGen/X86/avx-shift.ll
+++ b/test/CodeGen/X86/avx-shift.ll
@@ -4,7 +4,7 @@
;;; Shift left
define <8 x i32> @vshift00(<8 x i32> %a) {
; CHECK-LABEL: vshift00:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpslld $2, %xmm0, %xmm1
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpslld $2, %xmm0, %xmm0
@@ -16,7 +16,7 @@ define <8 x i32> @vshift00(<8 x i32> %a) {
define <16 x i16> @vshift01(<16 x i16> %a) {
; CHECK-LABEL: vshift01:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsllw $2, %xmm0, %xmm1
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpsllw $2, %xmm0, %xmm0
@@ -28,7 +28,7 @@ define <16 x i16> @vshift01(<16 x i16> %a) {
define <4 x i64> @vshift02(<4 x i64> %a) {
; CHECK-LABEL: vshift02:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsllq $2, %xmm0, %xmm1
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpsllq $2, %xmm0, %xmm0
@@ -41,7 +41,7 @@ define <4 x i64> @vshift02(<4 x i64> %a) {
;;; Logical Shift right
define <8 x i32> @vshift03(<8 x i32> %a) {
; CHECK-LABEL: vshift03:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsrld $2, %xmm0, %xmm1
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpsrld $2, %xmm0, %xmm0
@@ -53,7 +53,7 @@ define <8 x i32> @vshift03(<8 x i32> %a) {
define <16 x i16> @vshift04(<16 x i16> %a) {
; CHECK-LABEL: vshift04:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsrlw $2, %xmm0, %xmm1
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpsrlw $2, %xmm0, %xmm0
@@ -65,7 +65,7 @@ define <16 x i16> @vshift04(<16 x i16> %a) {
define <4 x i64> @vshift05(<4 x i64> %a) {
; CHECK-LABEL: vshift05:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsrlq $2, %xmm0, %xmm1
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpsrlq $2, %xmm0, %xmm0
@@ -78,7 +78,7 @@ define <4 x i64> @vshift05(<4 x i64> %a) {
;;; Arithmetic Shift right
define <8 x i32> @vshift06(<8 x i32> %a) {
; CHECK-LABEL: vshift06:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsrad $2, %xmm0, %xmm1
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpsrad $2, %xmm0, %xmm0
@@ -90,7 +90,7 @@ define <8 x i32> @vshift06(<8 x i32> %a) {
define <16 x i16> @vshift07(<16 x i16> %a) {
; CHECK-LABEL: vshift07:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsraw $2, %xmm0, %xmm1
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpsraw $2, %xmm0, %xmm0
@@ -102,7 +102,7 @@ define <16 x i16> @vshift07(<16 x i16> %a) {
define <32 x i8> @vshift09(<32 x i8> %a) {
; CHECK-LABEL: vshift09:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1
; CHECK-NEXT: vpsrlw $2, %xmm1, %xmm1
; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
@@ -122,7 +122,7 @@ define <32 x i8> @vshift09(<32 x i8> %a) {
define <32 x i8> @vshift10(<32 x i8> %a) {
; CHECK-LABEL: vshift10:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpgtb %xmm1, %xmm2, %xmm1
@@ -135,7 +135,7 @@ define <32 x i8> @vshift10(<32 x i8> %a) {
define <32 x i8> @vshift11(<32 x i8> %a) {
; CHECK-LABEL: vshift11:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1
; CHECK-NEXT: vpsrlw $2, %xmm1, %xmm1
; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
@@ -150,7 +150,7 @@ define <32 x i8> @vshift11(<32 x i8> %a) {
define <32 x i8> @vshift12(<32 x i8> %a) {
; CHECK-LABEL: vshift12:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1
; CHECK-NEXT: vpsllw $2, %xmm1, %xmm1
; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
@@ -166,7 +166,7 @@ define <32 x i8> @vshift12(<32 x i8> %a) {
;;; Support variable shifts
define <8 x i32> @vshift08(<8 x i32> %a) {
; CHECK-LABEL: vshift08:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpslld $23, %xmm0, %xmm1
; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [1065353216,1065353216,1065353216,1065353216]
; CHECK-NEXT: vpaddd %xmm2, %xmm1, %xmm1
@@ -184,7 +184,7 @@ define <8 x i32> @vshift08(<8 x i32> %a) {
; PR15141
define <4 x i32> @vshift13(<4 x i32> %in) {
; CHECK-LABEL: vshift13:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: retq
%T = shl <4 x i32> %in, <i32 0, i32 1, i32 2, i32 4>
@@ -194,7 +194,7 @@ define <4 x i32> @vshift13(<4 x i32> %in) {
;;; Uses shifts for sign extension
define <16 x i16> @sext_v16i16(<16 x i16> %a) {
; CHECK-LABEL: sext_v16i16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsllw $8, %xmm0, %xmm1
; CHECK-NEXT: vpsraw $8, %xmm1, %xmm1
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
@@ -209,7 +209,7 @@ define <16 x i16> @sext_v16i16(<16 x i16> %a) {
define <8 x i32> @sext_v8i32(<8 x i32> %a) {
; CHECK-LABEL: sext_v8i32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpslld $16, %xmm0, %xmm1
; CHECK-NEXT: vpsrad $16, %xmm1, %xmm1
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
diff --git a/test/CodeGen/X86/avx-shuffle-x86_32.ll b/test/CodeGen/X86/avx-shuffle-x86_32.ll
index c95ac5694b1..8c01c440d75 100644
--- a/test/CodeGen/X86/avx-shuffle-x86_32.ll
+++ b/test/CodeGen/X86/avx-shuffle-x86_32.ll
@@ -4,7 +4,7 @@
; Avoid unnecessary vinsertf128
define <4 x i64> @test1(<4 x i64> %a) nounwind {
; CHECK-LABEL: test1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1
; CHECK-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
; CHECK-NEXT: retl
@@ -14,7 +14,7 @@ define <4 x i64> @test1(<4 x i64> %a) nounwind {
define <8 x i16> @test2(<4 x i16>* %v) nounwind {
; CHECK-LABEL: test2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: retl
diff --git a/test/CodeGen/X86/avx-splat.ll b/test/CodeGen/X86/avx-splat.ll
index c6d0c5337c5..da547397c6c 100644
--- a/test/CodeGen/X86/avx-splat.ll
+++ b/test/CodeGen/X86/avx-splat.ll
@@ -3,7 +3,7 @@
define <32 x i8> @funcA(<32 x i8> %a) nounwind uwtable readnone ssp {
; CHECK-LABEL: funcA:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5]
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: retq
@@ -14,7 +14,7 @@ entry:
define <16 x i16> @funcB(<16 x i16> %a) nounwind uwtable readnone ssp {
; CHECK-LABEL: funcB:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,5]
; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
@@ -26,7 +26,7 @@ entry:
define <4 x i64> @funcC(i64 %q) nounwind uwtable readnone ssp {
; CHECK-LABEL: funcC:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmovq %rdi, %xmm0
; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
@@ -41,7 +41,7 @@ entry:
define <4 x double> @funcD(double %q) nounwind uwtable readnone ssp {
; CHECK-LABEL: funcD:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: retq
@@ -58,12 +58,12 @@ entry:
;
define <8 x float> @funcE() nounwind {
; CHECK-LABEL: funcE:
-; CHECK: # BB#0: # %for_exit499
+; CHECK: # %bb.0: # %for_exit499
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: testb %al, %al
; CHECK-NEXT: # implicit-def: %ymm0
; CHECK-NEXT: jne .LBB4_2
-; CHECK-NEXT: # BB#1: # %load.i1247
+; CHECK-NEXT: # %bb.1: # %load.i1247
; CHECK-NEXT: pushq %rbp
; CHECK-NEXT: movq %rsp, %rbp
; CHECK-NEXT: andq $-32, %rsp
@@ -99,7 +99,7 @@ __load_and_broadcast_32.exit1249: ; preds = %load.i1247, %for_ex
define <8 x float> @funcF(i32 %val) nounwind {
; CHECK-LABEL: funcF:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovd %edi, %xmm0
; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,0]
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
@@ -112,7 +112,7 @@ define <8 x float> @funcF(i32 %val) nounwind {
define <8 x float> @funcG(<8 x float> %a) nounwind uwtable readnone ssp {
; CHECK-LABEL: funcG:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0]
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: retq
@@ -123,7 +123,7 @@ entry:
define <8 x float> @funcH(<8 x float> %a) nounwind uwtable readnone ssp {
; CHECK-LABEL: funcH:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,1,1,1,5,5,5,5]
; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
; CHECK-NEXT: retq
@@ -134,7 +134,7 @@ entry:
define <2 x double> @splat_load_2f64_11(<2 x double>* %ptr) {
; CHECK-LABEL: splat_load_2f64_11:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; CHECK-NEXT: retq
%x = load <2 x double>, <2 x double>* %ptr
@@ -144,7 +144,7 @@ define <2 x double> @splat_load_2f64_11(<2 x double>* %ptr) {
define <4 x double> @splat_load_4f64_2222(<4 x double>* %ptr) {
; CHECK-LABEL: splat_load_4f64_2222:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastsd 16(%rdi), %ymm0
; CHECK-NEXT: retq
%x = load <4 x double>, <4 x double>* %ptr
@@ -154,7 +154,7 @@ define <4 x double> @splat_load_4f64_2222(<4 x double>* %ptr) {
define <4 x float> @splat_load_4f32_0000(<4 x float>* %ptr) {
; CHECK-LABEL: splat_load_4f32_0000:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastss (%rdi), %xmm0
; CHECK-NEXT: retq
%x = load <4 x float>, <4 x float>* %ptr
@@ -164,7 +164,7 @@ define <4 x float> @splat_load_4f32_0000(<4 x float>* %ptr) {
define <8 x float> @splat_load_8f32_77777777(<8 x float>* %ptr) {
; CHECK-LABEL: splat_load_8f32_77777777:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastss 28(%rdi), %ymm0
; CHECK-NEXT: retq
%x = load <8 x float>, <8 x float>* %ptr
diff --git a/test/CodeGen/X86/avx-trunc.ll b/test/CodeGen/X86/avx-trunc.ll
index 1a9acd00777..f1af384ce47 100644
--- a/test/CodeGen/X86/avx-trunc.ll
+++ b/test/CodeGen/X86/avx-trunc.ll
@@ -3,7 +3,7 @@
define <4 x i32> @trunc_64_32(<4 x i64> %A) nounwind uwtable readnone ssp{
; CHECK-LABEL: trunc_64_32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1
; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; CHECK-NEXT: vzeroupper
@@ -14,7 +14,7 @@ define <4 x i32> @trunc_64_32(<4 x i64> %A) nounwind uwtable readnone ssp{
define <8 x i16> @trunc_32_16(<8 x i32> %A) nounwind uwtable readnone ssp{
; CHECK-LABEL: trunc_32_16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1
; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; CHECK-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -28,7 +28,7 @@ define <8 x i16> @trunc_32_16(<8 x i32> %A) nounwind uwtable readnone ssp{
define <16 x i8> @trunc_16_8(<16 x i16> %A) nounwind uwtable readnone ssp{
; CHECK-LABEL: trunc_16_8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1
; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; CHECK-NEXT: vpshufb %xmm2, %xmm1, %xmm1
diff --git a/test/CodeGen/X86/avx-unpack.ll b/test/CodeGen/X86/avx-unpack.ll
index 801a0ceac0d..8c709809756 100644
--- a/test/CodeGen/X86/avx-unpack.ll
+++ b/test/CodeGen/X86/avx-unpack.ll
@@ -3,7 +3,7 @@
define <8 x float> @unpackhips(<8 x float> %src1, <8 x float> %src2) nounwind uwtable readnone ssp {
; CHECK-LABEL: unpackhips:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
; CHECK-NEXT: retq
%shuffle.i = shufflevector <8 x float> %src1, <8 x float> %src2, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
@@ -12,7 +12,7 @@ define <8 x float> @unpackhips(<8 x float> %src1, <8 x float> %src2) nounwind uw
define <4 x double> @unpackhipd(<4 x double> %src1, <4 x double> %src2) nounwind uwtable readnone ssp {
; CHECK-LABEL: unpackhipd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
; CHECK-NEXT: retq
%shuffle.i = shufflevector <4 x double> %src1, <4 x double> %src2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
@@ -21,7 +21,7 @@ define <4 x double> @unpackhipd(<4 x double> %src1, <4 x double> %src2) nounwind
define <8 x float> @unpacklops(<8 x float> %src1, <8 x float> %src2) nounwind uwtable readnone ssp {
; CHECK-LABEL: unpacklops:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
; CHECK-NEXT: retq
%shuffle.i = shufflevector <8 x float> %src1, <8 x float> %src2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
@@ -30,7 +30,7 @@ define <8 x float> @unpacklops(<8 x float> %src1, <8 x float> %src2) nounwind uw
define <4 x double> @unpacklopd(<4 x double> %src1, <4 x double> %src2) nounwind uwtable readnone ssp {
; CHECK-LABEL: unpacklopd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; CHECK-NEXT: retq
%shuffle.i = shufflevector <4 x double> %src1, <4 x double> %src2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
@@ -39,7 +39,7 @@ define <4 x double> @unpacklopd(<4 x double> %src1, <4 x double> %src2) nounwind
define <8 x float> @unpacklops_not(<8 x float> %src1, <8 x float> %src2) nounwind uwtable readnone ssp {
; CHECK-LABEL: unpacklops_not:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpckhps {{.*#+}} xmm2 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; CHECK-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
@@ -50,7 +50,7 @@ define <8 x float> @unpacklops_not(<8 x float> %src1, <8 x float> %src2) nounwin
define <4 x double> @unpacklopd_not(<4 x double> %src1, <4 x double> %src2) nounwind uwtable readnone ssp {
; CHECK-LABEL: unpacklopd_not:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm0[1],xmm1[1]
; CHECK-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; CHECK-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
@@ -61,7 +61,7 @@ define <4 x double> @unpacklopd_not(<4 x double> %src1, <4 x double> %src2) noun
define <8 x float> @unpackhips_not(<8 x float> %src1, <8 x float> %src2) nounwind uwtable readnone ssp {
; CHECK-LABEL: unpackhips_not:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[u,2,u,3,u,4,u,5]
; CHECK-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[2,u,3,u,4,u,5,u]
; CHECK-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
@@ -72,7 +72,7 @@ define <8 x float> @unpackhips_not(<8 x float> %src1, <8 x float> %src2) nounwin
define <4 x double> @unpackhipd_not(<4 x double> %src1, <4 x double> %src2) nounwind uwtable readnone ssp {
; CHECK-LABEL: unpackhipd_not:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm1
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
; CHECK-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm0[1],xmm1[1]
@@ -89,7 +89,7 @@ define <4 x double> @unpackhipd_not(<4 x double> %src1, <4 x double> %src2) noun
define <8 x i32> @unpackhips1(<8 x i32> %src1, <8 x i32> %src2) nounwind uwtable readnone ssp {
; CHECK-LABEL: unpackhips1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
; CHECK-NEXT: retq
%shuffle.i = shufflevector <8 x i32> %src1, <8 x i32> %src2, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
@@ -98,7 +98,7 @@ define <8 x i32> @unpackhips1(<8 x i32> %src1, <8 x i32> %src2) nounwind uwtable
define <8 x i32> @unpackhips2(<8 x i32>* %src1, <8 x i32>* %src2) nounwind uwtable readnone ssp {
; CHECK-LABEL: unpackhips2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %ymm0
; CHECK-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7]
; CHECK-NEXT: retq
@@ -110,7 +110,7 @@ define <8 x i32> @unpackhips2(<8 x i32>* %src1, <8 x i32>* %src2) nounwind uwtab
define <4 x i64> @unpackhipd1(<4 x i64> %src1, <4 x i64> %src2) nounwind uwtable readnone ssp {
; CHECK-LABEL: unpackhipd1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
; CHECK-NEXT: retq
%shuffle.i = shufflevector <4 x i64> %src1, <4 x i64> %src2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
@@ -119,7 +119,7 @@ define <4 x i64> @unpackhipd1(<4 x i64> %src1, <4 x i64> %src2) nounwind uwtable
define <4 x i64> @unpackhipd2(<4 x i64>* %src1, <4 x i64>* %src2) nounwind uwtable readnone ssp {
; CHECK-LABEL: unpackhipd2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %ymm0
; CHECK-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
; CHECK-NEXT: retq
@@ -131,7 +131,7 @@ define <4 x i64> @unpackhipd2(<4 x i64>* %src1, <4 x i64>* %src2) nounwind uwtab
define <8 x i32> @unpacklops1(<8 x i32> %src1, <8 x i32> %src2) nounwind uwtable readnone ssp {
; CHECK-LABEL: unpacklops1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
; CHECK-NEXT: retq
%shuffle.i = shufflevector <8 x i32> %src1, <8 x i32> %src2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
@@ -140,7 +140,7 @@ define <8 x i32> @unpacklops1(<8 x i32> %src1, <8 x i32> %src2) nounwind uwtable
define <8 x i32> @unpacklops2(<8 x i32>* %src1, <8 x i32>* %src2) nounwind uwtable readnone ssp {
; CHECK-LABEL: unpacklops2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %ymm0
; CHECK-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
; CHECK-NEXT: retq
@@ -152,7 +152,7 @@ define <8 x i32> @unpacklops2(<8 x i32>* %src1, <8 x i32>* %src2) nounwind uwtab
define <4 x i64> @unpacklopd1(<4 x i64> %src1, <4 x i64> %src2) nounwind uwtable readnone ssp {
; CHECK-LABEL: unpacklopd1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; CHECK-NEXT: retq
%shuffle.i = shufflevector <4 x i64> %src1, <4 x i64> %src2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
@@ -161,7 +161,7 @@ define <4 x i64> @unpacklopd1(<4 x i64> %src1, <4 x i64> %src2) nounwind uwtable
define <4 x i64> @unpacklopd2(<4 x i64>* %src1, <4 x i64>* %src2) nounwind uwtable readnone ssp {
; CHECK-LABEL: unpacklopd2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %ymm0
; CHECK-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[2],mem[2]
; CHECK-NEXT: retq
@@ -173,7 +173,7 @@ define <4 x i64> @unpacklopd2(<4 x i64>* %src1, <4 x i64>* %src2) nounwind uwtab
define <16 x i16> @unpackhwd_undef(<16 x i16> %src1) nounwind uwtable readnone ssp {
; CHECK-LABEL: unpackhwd_undef:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4,4,5,5,6,6,7,7]
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
@@ -185,7 +185,7 @@ define <16 x i16> @unpackhwd_undef(<16 x i16> %src1) nounwind uwtable readnone s
define <16 x i16> @unpacklwd_undef(<16 x i16> %src1) nounwind uwtable readnone ssp {
; CHECK-LABEL: unpacklwd_undef:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm0[0,0,1,1,2,2,3,3]
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
@@ -197,7 +197,7 @@ define <16 x i16> @unpacklwd_undef(<16 x i16> %src1) nounwind uwtable readnone s
define <32 x i8> @unpackhbw_undef(<32 x i8> %src1, <32 x i8> %src2) nounwind uwtable readnone ssp {
; CHECK-LABEL: unpackhbw_undef:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
@@ -209,7 +209,7 @@ define <32 x i8> @unpackhbw_undef(<32 x i8> %src1, <32 x i8> %src2) nounwind uwt
define <32 x i8> @unpacklbw_undef(<32 x i8> %src1) nounwind uwtable readnone ssp {
; CHECK-LABEL: unpacklbw_undef:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
diff --git a/test/CodeGen/X86/avx-vbroadcast.ll b/test/CodeGen/X86/avx-vbroadcast.ll
index 5dcc5a70529..5a9f23007d8 100644
--- a/test/CodeGen/X86/avx-vbroadcast.ll
+++ b/test/CodeGen/X86/avx-vbroadcast.ll
@@ -4,7 +4,7 @@
define <4 x i64> @A(i64* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: A:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl (%eax), %ecx
; X32-NEXT: movl 4(%eax), %eax
@@ -16,7 +16,7 @@ define <4 x i64> @A(i64* %ptr) nounwind uwtable readnone ssp {
; X32-NEXT: retl
;
; X64-LABEL: A:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastsd (%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -30,7 +30,7 @@ entry:
define <4 x i64> @A2(i64* %ptr, i64* %ptr2) nounwind uwtable readnone ssp {
; X32-LABEL: A2:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl (%ecx), %edx
@@ -45,7 +45,7 @@ define <4 x i64> @A2(i64* %ptr, i64* %ptr2) nounwind uwtable readnone ssp {
; X32-NEXT: retl
;
; X64-LABEL: A2:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: movq (%rdi), %rax
; X64-NEXT: vmovq %rax, %xmm0
; X64-NEXT: movq %rax, (%rsi)
@@ -64,13 +64,13 @@ entry:
define <8 x i32> @B(i32* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: B:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastss (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: B:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastss (%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -84,13 +84,13 @@ entry:
define <8 x i32> @B2(i32* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: B2:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastss (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: B2:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastss (%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -108,7 +108,7 @@ entry:
define <8 x i32> @B3(i32* %ptr, i32* %ptr2) nounwind uwtable readnone ssp {
; X32-LABEL: B3:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl (%ecx), %ecx
@@ -119,7 +119,7 @@ define <8 x i32> @B3(i32* %ptr, i32* %ptr2) nounwind uwtable readnone ssp {
; X32-NEXT: retl
;
; X64-LABEL: B3:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: movl (%rdi), %eax
; X64-NEXT: vmovd %eax, %xmm0
; X64-NEXT: movl %eax, (%rsi)
@@ -142,13 +142,13 @@ entry:
define <4 x double> @C(double* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: C:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastsd (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: C:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastsd (%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -162,7 +162,7 @@ entry:
define <4 x double> @C2(double* %ptr, double* %ptr2) nounwind uwtable readnone ssp {
; X32-LABEL: C2:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
@@ -172,7 +172,7 @@ define <4 x double> @C2(double* %ptr, double* %ptr2) nounwind uwtable readnone s
; X32-NEXT: retl
;
; X64-LABEL: C2:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X64-NEXT: vmovsd %xmm0, (%rsi)
; X64-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
@@ -190,13 +190,13 @@ entry:
define <8 x float> @D(float* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: D:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastss (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: D:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastss (%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -210,13 +210,13 @@ entry:
define <8 x float> @D2(float* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: D2:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastss (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: D2:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastss (%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -234,7 +234,7 @@ entry:
define <8 x float> @D3(float* %ptr, float* %ptr2) nounwind uwtable readnone ssp {
; X32-LABEL: D3:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -244,7 +244,7 @@ define <8 x float> @D3(float* %ptr, float* %ptr2) nounwind uwtable readnone ssp
; X32-NEXT: retl
;
; X64-LABEL: D3:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-NEXT: vmovss %xmm0, (%rsi)
; X64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0]
@@ -268,13 +268,13 @@ entry:
define <4 x float> @e(float* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: e:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastss (%eax), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: e:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastss (%rdi), %xmm0
; X64-NEXT: retq
entry:
@@ -288,7 +288,7 @@ entry:
define <4 x float> @e2(float* %ptr, float* %ptr2) nounwind uwtable readnone ssp {
; X32-LABEL: e2:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -297,7 +297,7 @@ define <4 x float> @e2(float* %ptr, float* %ptr2) nounwind uwtable readnone ssp
; X32-NEXT: retl
;
; X64-LABEL: e2:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-NEXT: vmovss %xmm0, (%rsi)
; X64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0]
@@ -315,12 +315,12 @@ entry:
; Don't broadcast constants on pre-AVX2 hardware.
define <4 x float> @_e2(float* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: _e2:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: vmovaps {{.*#+}} xmm0 = [-7.812500e-03,-7.812500e-03,-7.812500e-03,-7.812500e-03]
; X32-NEXT: retl
;
; X64-LABEL: _e2:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vmovaps {{.*#+}} xmm0 = [-7.812500e-03,-7.812500e-03,-7.812500e-03,-7.812500e-03]
; X64-NEXT: retq
entry:
@@ -334,13 +334,13 @@ entry:
define <4 x i32> @F(i32* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: F:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastss (%eax), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: F:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastss (%rdi), %xmm0
; X64-NEXT: retq
entry:
@@ -354,7 +354,7 @@ entry:
define <4 x i32> @F2(i32* %ptr, i32* %ptr2) nounwind uwtable readnone ssp {
; X32-LABEL: F2:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl (%ecx), %ecx
@@ -364,7 +364,7 @@ define <4 x i32> @F2(i32* %ptr, i32* %ptr2) nounwind uwtable readnone ssp {
; X32-NEXT: retl
;
; X64-LABEL: F2:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: movl (%rdi), %eax
; X64-NEXT: movl %eax, (%rsi)
; X64-NEXT: vmovd %eax, %xmm0
@@ -384,13 +384,13 @@ entry:
define <4 x i32> @load_splat_4i32_4i32_1111(<4 x i32>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_4i32_4i32_1111:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpermilps {{.*#+}} xmm0 = mem[1,1,1,1]
; X32-NEXT: retl
;
; X64-LABEL: load_splat_4i32_4i32_1111:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vpermilps {{.*#+}} xmm0 = mem[1,1,1,1]
; X64-NEXT: retq
entry:
@@ -401,13 +401,13 @@ entry:
define <8 x i32> @load_splat_8i32_4i32_33333333(<4 x i32>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_8i32_4i32_33333333:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastss 12(%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_splat_8i32_4i32_33333333:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastss 12(%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -418,13 +418,13 @@ entry:
define <8 x i32> @load_splat_8i32_8i32_55555555(<8 x i32>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_8i32_8i32_55555555:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastss 20(%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_splat_8i32_8i32_55555555:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastss 20(%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -435,13 +435,13 @@ entry:
define <4 x float> @load_splat_4f32_4f32_1111(<4 x float>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_4f32_4f32_1111:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastss 4(%eax), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: load_splat_4f32_4f32_1111:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastss 4(%rdi), %xmm0
; X64-NEXT: retq
entry:
@@ -452,13 +452,13 @@ entry:
define <8 x float> @load_splat_8f32_4f32_33333333(<4 x float>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_8f32_4f32_33333333:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastss 12(%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_splat_8f32_4f32_33333333:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastss 12(%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -469,13 +469,13 @@ entry:
define <8 x float> @load_splat_8f32_8f32_55555555(<8 x float>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_8f32_8f32_55555555:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastss 20(%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_splat_8f32_8f32_55555555:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastss 20(%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -486,13 +486,13 @@ entry:
define <2 x i64> @load_splat_2i64_2i64_1111(<2 x i64>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_2i64_2i64_1111:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpermilps {{.*#+}} xmm0 = mem[2,3,2,3]
; X32-NEXT: retl
;
; X64-LABEL: load_splat_2i64_2i64_1111:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vpermilps {{.*#+}} xmm0 = mem[2,3,2,3]
; X64-NEXT: retq
entry:
@@ -503,13 +503,13 @@ entry:
define <4 x i64> @load_splat_4i64_2i64_1111(<2 x i64>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_4i64_2i64_1111:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastsd 8(%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_splat_4i64_2i64_1111:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastsd 8(%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -520,13 +520,13 @@ entry:
define <4 x i64> @load_splat_4i64_4i64_2222(<4 x i64>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_4i64_4i64_2222:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastsd 16(%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_splat_4i64_4i64_2222:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastsd 16(%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -537,13 +537,13 @@ entry:
define <2 x double> @load_splat_2f64_2f64_1111(<2 x double>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_2f64_2f64_1111:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; X32-NEXT: retl
;
; X64-LABEL: load_splat_2f64_2f64_1111:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; X64-NEXT: retq
entry:
@@ -554,13 +554,13 @@ entry:
define <4 x double> @load_splat_4f64_2f64_1111(<2 x double>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_4f64_2f64_1111:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastsd 8(%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_splat_4f64_2f64_1111:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastsd 8(%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -571,13 +571,13 @@ entry:
define <4 x double> @load_splat_4f64_4f64_2222(<4 x double>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_4f64_4f64_2222:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastsd 16(%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_splat_4f64_4f64_2222:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastsd 16(%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -590,7 +590,7 @@ entry:
define <2 x i64> @G(i64* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: G:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl (%eax), %ecx
; X32-NEXT: movl 4(%eax), %eax
@@ -601,7 +601,7 @@ define <2 x i64> @G(i64* %ptr) nounwind uwtable readnone ssp {
; X32-NEXT: retl
;
; X64-LABEL: G:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,0,1]
; X64-NEXT: retq
@@ -614,7 +614,7 @@ entry:
define <2 x i64> @G2(i64* %ptr, i64* %ptr2) nounwind uwtable readnone ssp {
; X32-LABEL: G2:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl (%ecx), %edx
@@ -628,7 +628,7 @@ define <2 x i64> @G2(i64* %ptr, i64* %ptr2) nounwind uwtable readnone ssp {
; X32-NEXT: retl
;
; X64-LABEL: G2:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: movq (%rdi), %rax
; X64-NEXT: movq %rax, (%rsi)
; X64-NEXT: vmovq %rax, %xmm0
@@ -644,12 +644,12 @@ entry:
define <4 x i32> @H(<4 x i32> %a) {
; X32-LABEL: H:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,1,2,3]
; X32-NEXT: retl
;
; X64-LABEL: H:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,1,2,3]
; X64-NEXT: retq
entry:
@@ -659,13 +659,13 @@ entry:
define <2 x double> @I(double* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: I:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; X32-NEXT: retl
;
; X64-LABEL: I:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; X64-NEXT: retq
entry:
@@ -677,7 +677,7 @@ entry:
define <2 x double> @I2(double* %ptr, double* %ptr2) nounwind uwtable readnone ssp {
; X32-LABEL: I2:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
@@ -686,7 +686,7 @@ define <2 x double> @I2(double* %ptr, double* %ptr2) nounwind uwtable readnone s
; X32-NEXT: retl
;
; X64-LABEL: I2:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X64-NEXT: vmovsd %xmm0, (%rsi)
; X64-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
@@ -701,7 +701,7 @@ entry:
define <4 x float> @_RR(float* %ptr, i32* %k) nounwind uwtable readnone ssp {
; X32-LABEL: _RR:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vbroadcastss (%ecx), %xmm0
@@ -710,7 +710,7 @@ define <4 x float> @_RR(float* %ptr, i32* %k) nounwind uwtable readnone ssp {
; X32-NEXT: retl
;
; X64-LABEL: _RR:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastss (%rdi), %xmm0
; X64-NEXT: movl (%rsi), %eax
; X64-NEXT: movl %eax, (%rax)
@@ -729,13 +729,13 @@ entry:
define <4 x float> @_RR2(float* %ptr, i32* %k) nounwind uwtable readnone ssp {
; X32-LABEL: _RR2:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastss (%eax), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: _RR2:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastss (%rdi), %xmm0
; X64-NEXT: retq
entry:
@@ -751,13 +751,13 @@ entry:
define <8 x float> @splat_concat1(float* %p) {
; X32-LABEL: splat_concat1:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastss (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: splat_concat1:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vbroadcastss (%rdi), %ymm0
; X64-NEXT: retq
%1 = load float, float* %p, align 4
@@ -771,13 +771,13 @@ define <8 x float> @splat_concat1(float* %p) {
define <8 x float> @splat_concat2(float* %p) {
; X32-LABEL: splat_concat2:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastss (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: splat_concat2:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vbroadcastss (%rdi), %ymm0
; X64-NEXT: retq
%1 = load float, float* %p, align 4
@@ -795,13 +795,13 @@ define <8 x float> @splat_concat2(float* %p) {
define <4 x double> @splat_concat3(double* %p) {
; X32-LABEL: splat_concat3:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastsd (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: splat_concat3:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vbroadcastsd (%rdi), %ymm0
; X64-NEXT: retq
%1 = load double, double* %p, align 8
@@ -813,13 +813,13 @@ define <4 x double> @splat_concat3(double* %p) {
define <4 x double> @splat_concat4(double* %p) {
; X32-LABEL: splat_concat4:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastsd (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: splat_concat4:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vbroadcastsd (%rdi), %ymm0
; X64-NEXT: retq
%1 = load double, double* %p, align 8
@@ -834,13 +834,13 @@ define <4 x double> @splat_concat4(double* %p) {
; PR34041
define <4 x double> @broadcast_shuffle_1000(double* %p) {
; X32-LABEL: broadcast_shuffle_1000:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastsd (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: broadcast_shuffle_1000:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vbroadcastsd (%rdi), %ymm0
; X64-NEXT: retq
%1 = load double, double* %p
@@ -851,13 +851,13 @@ define <4 x double> @broadcast_shuffle_1000(double* %p) {
define <4 x double> @broadcast_shuffle1032(double* %p) {
; X32-LABEL: broadcast_shuffle1032:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastsd (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: broadcast_shuffle1032:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vbroadcastsd (%rdi), %ymm0
; X64-NEXT: retq
%1 = load double, double* %p
@@ -872,7 +872,7 @@ define <4 x double> @broadcast_shuffle1032(double* %p) {
;
define float @broadcast_lifetime() nounwind {
; X32-LABEL: broadcast_lifetime:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: pushl %esi
; X32-NEXT: subl $56, %esp
; X32-NEXT: leal {{[0-9]+}}(%esp), %esi
@@ -894,7 +894,7 @@ define float @broadcast_lifetime() nounwind {
; X32-NEXT: retl
;
; X64-LABEL: broadcast_lifetime:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: subq $40, %rsp
; X64-NEXT: movq %rsp, %rdi
; X64-NEXT: callq _gfunc
diff --git a/test/CodeGen/X86/avx-vbroadcastf128.ll b/test/CodeGen/X86/avx-vbroadcastf128.ll
index c4512d863f8..7fdbf31a993 100644
--- a/test/CodeGen/X86/avx-vbroadcastf128.ll
+++ b/test/CodeGen/X86/avx-vbroadcastf128.ll
@@ -4,13 +4,13 @@
define <4 x double> @test_broadcast_2f64_4f64(<2 x double> *%p) nounwind {
; X32-LABEL: test_broadcast_2f64_4f64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_2f64_4f64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-NEXT: retq
%1 = load <2 x double>, <2 x double> *%p
@@ -20,13 +20,13 @@ define <4 x double> @test_broadcast_2f64_4f64(<2 x double> *%p) nounwind {
define <4 x i64> @test_broadcast_2i64_4i64(<2 x i64> *%p) nounwind {
; X32-LABEL: test_broadcast_2i64_4i64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_2i64_4i64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-NEXT: retq
%1 = load <2 x i64>, <2 x i64> *%p
@@ -36,13 +36,13 @@ define <4 x i64> @test_broadcast_2i64_4i64(<2 x i64> *%p) nounwind {
define <8 x float> @test_broadcast_4f32_8f32(<4 x float> *%p) nounwind {
; X32-LABEL: test_broadcast_4f32_8f32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_4f32_8f32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-NEXT: retq
%1 = load <4 x float>, <4 x float> *%p
@@ -52,13 +52,13 @@ define <8 x float> @test_broadcast_4f32_8f32(<4 x float> *%p) nounwind {
define <8 x i32> @test_broadcast_4i32_8i32(<4 x i32> *%p) nounwind {
; X32-LABEL: test_broadcast_4i32_8i32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_4i32_8i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-NEXT: retq
%1 = load <4 x i32>, <4 x i32> *%p
@@ -68,13 +68,13 @@ define <8 x i32> @test_broadcast_4i32_8i32(<4 x i32> *%p) nounwind {
define <16 x i16> @test_broadcast_8i16_16i16(<8 x i16> *%p) nounwind {
; X32-LABEL: test_broadcast_8i16_16i16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_8i16_16i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-NEXT: retq
%1 = load <8 x i16>, <8 x i16> *%p
@@ -84,13 +84,13 @@ define <16 x i16> @test_broadcast_8i16_16i16(<8 x i16> *%p) nounwind {
define <32 x i8> @test_broadcast_16i8_32i8(<16 x i8> *%p) nounwind {
; X32-LABEL: test_broadcast_16i8_32i8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_16i8_32i8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-NEXT: retq
%1 = load <16 x i8>, <16 x i8> *%p
@@ -100,7 +100,7 @@ define <32 x i8> @test_broadcast_16i8_32i8(<16 x i8> *%p) nounwind {
define <4 x double> @test_broadcast_2f64_4f64_reuse(<2 x double>* %p0, <2 x double>* %p1) {
; X32-LABEL: test_broadcast_2f64_4f64_reuse:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovaps (%ecx), %xmm1
@@ -109,7 +109,7 @@ define <4 x double> @test_broadcast_2f64_4f64_reuse(<2 x double>* %p0, <2 x doub
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_2f64_4f64_reuse:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps (%rdi), %xmm1
; X64-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm0
; X64-NEXT: vmovaps %xmm1, (%rsi)
@@ -122,7 +122,7 @@ define <4 x double> @test_broadcast_2f64_4f64_reuse(<2 x double>* %p0, <2 x doub
define <4 x i64> @test_broadcast_2i64_4i64_reuse(<2 x i64>* %p0, <2 x i64>* %p1) {
; X32-LABEL: test_broadcast_2i64_4i64_reuse:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovaps (%ecx), %xmm1
@@ -131,7 +131,7 @@ define <4 x i64> @test_broadcast_2i64_4i64_reuse(<2 x i64>* %p0, <2 x i64>* %p1)
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_2i64_4i64_reuse:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps (%rdi), %xmm1
; X64-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm0
; X64-NEXT: vmovaps %xmm1, (%rsi)
@@ -144,7 +144,7 @@ define <4 x i64> @test_broadcast_2i64_4i64_reuse(<2 x i64>* %p0, <2 x i64>* %p1)
define <8 x float> @test_broadcast_4f32_8f32_reuse(<4 x float>* %p0, <4 x float>* %p1) {
; X32-LABEL: test_broadcast_4f32_8f32_reuse:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovaps (%ecx), %xmm1
@@ -153,7 +153,7 @@ define <8 x float> @test_broadcast_4f32_8f32_reuse(<4 x float>* %p0, <4 x float>
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_4f32_8f32_reuse:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps (%rdi), %xmm1
; X64-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm0
; X64-NEXT: vmovaps %xmm1, (%rsi)
@@ -166,7 +166,7 @@ define <8 x float> @test_broadcast_4f32_8f32_reuse(<4 x float>* %p0, <4 x float>
define <8 x i32> @test_broadcast_4i32_8i32_reuse(<4 x i32>* %p0, <4 x i32>* %p1) {
; X32-LABEL: test_broadcast_4i32_8i32_reuse:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovaps (%ecx), %xmm1
@@ -175,7 +175,7 @@ define <8 x i32> @test_broadcast_4i32_8i32_reuse(<4 x i32>* %p0, <4 x i32>* %p1)
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_4i32_8i32_reuse:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps (%rdi), %xmm1
; X64-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm0
; X64-NEXT: vmovaps %xmm1, (%rsi)
@@ -188,7 +188,7 @@ define <8 x i32> @test_broadcast_4i32_8i32_reuse(<4 x i32>* %p0, <4 x i32>* %p1)
define <16 x i16> @test_broadcast_8i16_16i16_reuse(<8 x i16> *%p0, <8 x i16> *%p1) nounwind {
; X32-LABEL: test_broadcast_8i16_16i16_reuse:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovaps (%ecx), %xmm1
@@ -197,7 +197,7 @@ define <16 x i16> @test_broadcast_8i16_16i16_reuse(<8 x i16> *%p0, <8 x i16> *%p
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_8i16_16i16_reuse:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps (%rdi), %xmm1
; X64-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm0
; X64-NEXT: vmovaps %xmm1, (%rsi)
@@ -210,7 +210,7 @@ define <16 x i16> @test_broadcast_8i16_16i16_reuse(<8 x i16> *%p0, <8 x i16> *%p
define <32 x i8> @test_broadcast_16i8_32i8_reuse(<16 x i8> *%p0, <16 x i8> *%p1) nounwind {
; X32-LABEL: test_broadcast_16i8_32i8_reuse:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovaps (%ecx), %xmm1
@@ -219,7 +219,7 @@ define <32 x i8> @test_broadcast_16i8_32i8_reuse(<16 x i8> *%p0, <16 x i8> *%p1)
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_16i8_32i8_reuse:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps (%rdi), %xmm1
; X64-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm0
; X64-NEXT: vmovaps %xmm1, (%rsi)
@@ -232,7 +232,7 @@ define <32 x i8> @test_broadcast_16i8_32i8_reuse(<16 x i8> *%p0, <16 x i8> *%p1)
define <8 x i32> @PR29088(<4 x i32>* %p0, <8 x float>* %p1) {
; X32-LABEL: PR29088:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovaps (%ecx), %xmm0
@@ -242,7 +242,7 @@ define <8 x i32> @PR29088(<4 x i32>* %p0, <8 x float>* %p1) {
; X32-NEXT: retl
;
; X64-LABEL: PR29088:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps (%rdi), %xmm0
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-NEXT: vmovaps %ymm1, (%rsi)
diff --git a/test/CodeGen/X86/avx-vextractf128.ll b/test/CodeGen/X86/avx-vextractf128.ll
index d7a6d61ba0a..924e510338e 100644
--- a/test/CodeGen/X86/avx-vextractf128.ll
+++ b/test/CodeGen/X86/avx-vextractf128.ll
@@ -3,7 +3,7 @@
define <8 x float> @A(<8 x float> %a) nounwind uwtable readnone ssp {
; CHECK-LABEL: A:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
; CHECK-NEXT: retq
entry:
@@ -13,7 +13,7 @@ entry:
define <4 x double> @B(<4 x double> %a) nounwind uwtable readnone ssp {
; CHECK-LABEL: B:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
; CHECK-NEXT: retq
entry:
@@ -23,7 +23,7 @@ entry:
define void @t0(float* nocapture %addr, <8 x float> %a) nounwind uwtable ssp {
; CHECK-LABEL: t0:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vextractf128 $1, %ymm0, (%rdi)
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
@@ -36,7 +36,7 @@ entry:
define void @t2(double* nocapture %addr, <4 x double> %a) nounwind uwtable ssp {
; CHECK-LABEL: t2:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vextractf128 $1, %ymm0, (%rdi)
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
@@ -49,7 +49,7 @@ entry:
define void @t4(<2 x i64>* nocapture %addr, <4 x i64> %a) nounwind uwtable ssp {
; CHECK-LABEL: t4:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vextractf128 $1, %ymm0, (%rdi)
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
@@ -63,7 +63,7 @@ entry:
define void @t5(float* nocapture %addr, <8 x float> %a) nounwind uwtable ssp {
; CHECK-LABEL: t5:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vmovaps %xmm0, (%rdi)
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
@@ -76,7 +76,7 @@ entry:
define void @t6(double* nocapture %addr, <4 x double> %a) nounwind uwtable ssp {
; CHECK-LABEL: t6:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vmovaps %xmm0, (%rdi)
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
@@ -89,7 +89,7 @@ entry:
define void @t7(<2 x i64>* nocapture %addr, <4 x i64> %a) nounwind uwtable ssp {
; CHECK-LABEL: t7:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vmovaps %xmm0, (%rdi)
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
@@ -103,7 +103,7 @@ entry:
define void @t8(<2 x i64>* nocapture %addr, <4 x i64> %a) nounwind uwtable ssp {
; CHECK-LABEL: t8:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vmovups %xmm0, (%rdi)
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
@@ -118,7 +118,7 @@ entry:
; PR15462
define void @t9(i64* %p) {
; CHECK-LABEL: t9:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vmovups %ymm0, (%rdi)
; CHECK-NEXT: vzeroupper
diff --git a/test/CodeGen/X86/avx-vinsertf128.ll b/test/CodeGen/X86/avx-vinsertf128.ll
index 2028e9c50aa..6ae43d93e64 100644
--- a/test/CodeGen/X86/avx-vinsertf128.ll
+++ b/test/CodeGen/X86/avx-vinsertf128.ll
@@ -3,7 +3,7 @@
define <8 x float> @A(<8 x float> %a) nounwind uwtable readnone ssp {
; CHECK-LABEL: A:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> <i32 8, i32 8, i32 8, i32 8, i32 0, i32 1, i32 2, i32 3>
@@ -12,7 +12,7 @@ define <8 x float> @A(<8 x float> %a) nounwind uwtable readnone ssp {
define <4 x double> @B(<4 x double> %a) nounwind uwtable readnone ssp {
; CHECK-LABEL: B:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: retq
%shuffle = shufflevector <4 x double> %a, <4 x double> undef, <4 x i32> <i32 4, i32 4, i32 0, i32 1>
@@ -24,7 +24,7 @@ declare <2 x double> @llvm.x86.sse2.min.sd(<2 x double>, <2 x double>) nounwind
define void @insert_crash() nounwind {
; CHECK-LABEL: insert_crash:
-; CHECK: # BB#0: # %allocas
+; CHECK: # %bb.0: # %allocas
; CHECK-NEXT: vxorpd %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vminpd %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vminsd %xmm0, %xmm0, %xmm0
@@ -49,7 +49,7 @@ allocas:
define <4 x i32> @DAGCombineA(<4 x i32> %v1) nounwind readonly {
; CHECK-LABEL: DAGCombineA:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%t1 = shufflevector <4 x i32> %v1, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%t2 = shufflevector <8 x i32> %t1, <8 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -58,7 +58,7 @@ define <4 x i32> @DAGCombineA(<4 x i32> %v1) nounwind readonly {
define <8 x i32> @DAGCombineB(<8 x i32> %v1, <8 x i32> %v2) nounwind readonly {
; CHECK-LABEL: DAGCombineB:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm2
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm3
; CHECK-NEXT: vpaddd %xmm3, %xmm2, %xmm2
@@ -74,7 +74,7 @@ define <8 x i32> @DAGCombineB(<8 x i32> %v1, <8 x i32> %v2) nounwind readonly {
define <4 x double> @insert_undef_pd(<4 x double> %a0, <2 x double> %a1) {
; CHECK-LABEL: insert_undef_pd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
; CHECK-NEXT: vmovaps %ymm1, %ymm0
; CHECK-NEXT: retq
@@ -85,7 +85,7 @@ declare <4 x double> @llvm.x86.avx.vinsertf128.pd.256(<4 x double>, <2 x double>
define <8 x float> @insert_undef_ps(<8 x float> %a0, <4 x float> %a1) {
; CHECK-LABEL: insert_undef_ps:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
; CHECK-NEXT: vmovaps %ymm1, %ymm0
; CHECK-NEXT: retq
@@ -96,7 +96,7 @@ declare <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float>, <4 x float>, i
define <8 x i32> @insert_undef_si(<8 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: insert_undef_si:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
; CHECK-NEXT: vmovaps %ymm1, %ymm0
; CHECK-NEXT: retq
@@ -108,7 +108,7 @@ declare <8 x i32> @llvm.x86.avx.vinsertf128.si.256(<8 x i32>, <4 x i32>, i8) nou
; rdar://10643481
define <8 x float> @vinsertf128_combine(float* nocapture %f) nounwind uwtable readonly ssp {
; CHECK-LABEL: vinsertf128_combine:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vinsertf128 $1, 16(%rdi), %ymm0, %ymm0
; CHECK-NEXT: retq
%add.ptr = getelementptr inbounds float, float* %f, i64 4
@@ -121,7 +121,7 @@ define <8 x float> @vinsertf128_combine(float* nocapture %f) nounwind uwtable re
; rdar://11076953
define <8 x float> @vinsertf128_ucombine(float* nocapture %f) nounwind uwtable readonly ssp {
; CHECK-LABEL: vinsertf128_ucombine:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vinsertf128 $1, 16(%rdi), %ymm0, %ymm0
; CHECK-NEXT: retq
%add.ptr = getelementptr inbounds float, float* %f, i64 4
diff --git a/test/CodeGen/X86/avx-vpclmulqdq.ll b/test/CodeGen/X86/avx-vpclmulqdq.ll
index abf18fe5b0a..2447ff0907c 100644
--- a/test/CodeGen/X86/avx-vpclmulqdq.ll
+++ b/test/CodeGen/X86/avx-vpclmulqdq.ll
@@ -3,7 +3,7 @@
; Check for vpclmulqdq
define <4 x i64> @test_x86_pclmulqdq(<4 x i64> %a0, <4 x i64> %a1) {
; AVX_VPCLMULQDQ-LABEL: test_x86_pclmulqdq:
-; AVX_VPCLMULQDQ: # BB#0:
+; AVX_VPCLMULQDQ: # %bb.0:
; AVX_VPCLMULQDQ-NEXT: vpclmulqdq $17, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0x7d,0x44,0xc1,0x11]
; AVX_VPCLMULQDQ-NEXT: retl # encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.pclmulqdq.256(<4 x i64> %a0, <4 x i64> %a1, i8 17)
diff --git a/test/CodeGen/X86/avx-vperm2x128.ll b/test/CodeGen/X86/avx-vperm2x128.ll
index 8d05e11337b..42bc6b39369 100644
--- a/test/CodeGen/X86/avx-vperm2x128.ll
+++ b/test/CodeGen/X86/avx-vperm2x128.ll
@@ -4,12 +4,12 @@
define <8 x float> @shuffle_v8f32_45670123(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp {
; AVX1-LABEL: shuffle_v8f32_45670123:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v8f32_45670123:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,3,0,1]
; AVX2-NEXT: retq
entry:
@@ -19,12 +19,12 @@ entry:
define <8 x float> @shuffle_v8f32_45670123_mem(<8 x float>* %pa, <8 x float>* %pb) nounwind uwtable readnone ssp {
; AVX1-LABEL: shuffle_v8f32_45670123_mem:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = mem[2,3,0,1]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v8f32_45670123_mem:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = mem[2,3,0,1]
; AVX2-NEXT: retq
entry:
@@ -36,7 +36,7 @@ entry:
define <8 x float> @shuffle_v8f32_0123cdef(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp {
; ALL-LABEL: shuffle_v8f32_0123cdef:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
; ALL-NEXT: retq
entry:
@@ -46,12 +46,12 @@ entry:
define <8 x float> @shuffle_v8f32_01230123(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp {
; AVX1-LABEL: shuffle_v8f32_01230123:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v8f32_01230123:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,0,1]
; AVX2-NEXT: retq
entry:
@@ -61,12 +61,12 @@ entry:
define <8 x float> @shuffle_v8f32_01230123_mem(<8 x float>* %pa, <8 x float>* %pb) nounwind uwtable readnone ssp {
; AVX1-LABEL: shuffle_v8f32_01230123_mem:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = mem[0,1,0,1]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v8f32_01230123_mem:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = mem[0,1,0,1]
; AVX2-NEXT: retq
entry:
@@ -78,12 +78,12 @@ entry:
define <8 x float> @shuffle_v8f32_45674567(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp {
; AVX1-LABEL: shuffle_v8f32_45674567:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v8f32_45674567:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX2-NEXT: retq
entry:
@@ -93,12 +93,12 @@ entry:
define <8 x float> @shuffle_v8f32_45674567_mem(<8 x float>* %pa, <8 x float>* %pb) nounwind uwtable readnone ssp {
; AVX1-LABEL: shuffle_v8f32_45674567_mem:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = mem[2,3,2,3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v8f32_45674567_mem:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = mem[2,3,2,3]
; AVX2-NEXT: retq
entry:
@@ -110,12 +110,12 @@ entry:
define <32 x i8> @shuffle_v32i8_2323(<32 x i8> %a, <32 x i8> %b) nounwind uwtable readnone ssp {
; AVX1-LABEL: shuffle_v32i8_2323:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_2323:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX2-NEXT: retq
entry:
@@ -125,7 +125,7 @@ entry:
define <32 x i8> @shuffle_v32i8_2323_domain(<32 x i8> %a, <32 x i8> %b) nounwind uwtable readnone ssp {
; AVX1-LABEL: shuffle_v32i8_2323_domain:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpsubb %xmm1, %xmm0, %xmm0
@@ -134,7 +134,7 @@ define <32 x i8> @shuffle_v32i8_2323_domain(<32 x i8> %a, <32 x i8> %b) nounwind
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_2323_domain:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; AVX2-NEXT: vpsubb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
@@ -148,7 +148,7 @@ entry:
define <4 x i64> @shuffle_v4i64_6701(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
; ALL-LABEL: shuffle_v4i64_6701:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[0,1]
; ALL-NEXT: retq
entry:
@@ -158,14 +158,14 @@ entry:
define <4 x i64> @shuffle_v4i64_6701_domain(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
; AVX1-LABEL: shuffle_v4i64_6701_domain:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpsubq %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[0,1]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4i64_6701_domain:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; AVX2-NEXT: vpsubq %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[0,1]
@@ -179,7 +179,7 @@ entry:
define <8 x i32> @shuffle_v8i32_u5u7cdef(<8 x i32> %a, <8 x i32> %b) nounwind uwtable readnone ssp {
; AVX1-LABEL: shuffle_v8i32_u5u7cdef:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpsubd %xmm2, %xmm0, %xmm0
@@ -188,7 +188,7 @@ define <8 x i32> @shuffle_v8i32_u5u7cdef(<8 x i32> %a, <8 x i32> %b) nounwind uw
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v8i32_u5u7cdef:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; AVX2-NEXT: vpsubd %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
@@ -202,14 +202,14 @@ entry:
define <16 x i16> @shuffle_v16i16_4501(<16 x i16> %a, <16 x i16> %b) nounwind uwtable readnone ssp {
; AVX1-LABEL: shuffle_v16i16_4501:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpsubw %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_4501:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; AVX2-NEXT: vpsubw %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
@@ -223,7 +223,7 @@ entry:
define <16 x i16> @shuffle_v16i16_4501_mem(<16 x i16>* %a, <16 x i16>* %b) nounwind uwtable readnone ssp {
; AVX1-LABEL: shuffle_v16i16_4501_mem:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovdqa (%rdi), %ymm0
; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpsubw %xmm1, %xmm0, %xmm0
@@ -231,7 +231,7 @@ define <16 x i16> @shuffle_v16i16_4501_mem(<16 x i16>* %a, <16 x i16>* %b) nounw
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_4501_mem:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; AVX2-NEXT: vpsubw %ymm1, %ymm0, %ymm0
@@ -249,7 +249,7 @@ entry:
define <8 x float> @shuffle_v8f32_uu67u9ub(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp {
; ALL-LABEL: shuffle_v8f32_uu67u9ub:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
; ALL-NEXT: retq
entry:
@@ -259,12 +259,12 @@ entry:
define <8 x float> @shuffle_v8f32_uu67uu67(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp {
; AVX1-LABEL: shuffle_v8f32_uu67uu67:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v8f32_uu67uu67:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
; AVX2-NEXT: retq
entry:
@@ -274,7 +274,7 @@ entry:
define <8 x float> @shuffle_v8f32_uu67uuab(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp {
; ALL-LABEL: shuffle_v8f32_uu67uuab:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
; ALL-NEXT: retq
entry:
@@ -284,7 +284,7 @@ entry:
define <8 x float> @shuffle_v8f32_uu67uuef(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp {
; ALL-LABEL: shuffle_v8f32_uu67uuef:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
; ALL-NEXT: retq
entry:
@@ -294,12 +294,12 @@ entry:
define <8 x float> @shuffle_v8f32_uu674567(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp {
; AVX1-LABEL: shuffle_v8f32_uu674567:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v8f32_uu674567:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
; AVX2-NEXT: retq
entry:
@@ -309,7 +309,7 @@ entry:
define <8 x float> @shuffle_v8f32_uu6789ab(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp {
; ALL-LABEL: shuffle_v8f32_uu6789ab:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
; ALL-NEXT: retq
entry:
@@ -319,12 +319,12 @@ entry:
define <8 x float> @shuffle_v8f32_4567uu67(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp {
; AVX1-LABEL: shuffle_v8f32_4567uu67:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v8f32_4567uu67:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX2-NEXT: retq
entry:
@@ -334,7 +334,7 @@ entry:
define <8 x float> @shuffle_v8f32_4567uuef(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp {
; ALL-LABEL: shuffle_v8f32_4567uuef:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
; ALL-NEXT: retq
entry:
@@ -346,7 +346,7 @@ entry:
define <8 x float> @shuffle_v8f32_uu67ucuf(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp {
; ALL-LABEL: shuffle_v8f32_uu67ucuf:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,2,3,4,4,6,7]
; ALL-NEXT: retq
@@ -362,7 +362,7 @@ entry:
define <4 x double> @shuffle_v4f64_zz01(<4 x double> %a) {
; ALL-LABEL: shuffle_v4f64_zz01:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1]
; ALL-NEXT: retq
%s = shufflevector <4 x double> %a, <4 x double> <double 0.0, double 0.0, double undef, double undef>, <4 x i32> <i32 4, i32 5, i32 0, i32 1>
@@ -370,7 +370,7 @@ define <4 x double> @shuffle_v4f64_zz01(<4 x double> %a) {
}
define <4 x double> @shuffle_v4f64_zz01_optsize(<4 x double> %a) optsize {
; ALL-LABEL: shuffle_v4f64_zz01_optsize:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1]
; ALL-NEXT: retq
%s = shufflevector <4 x double> %a, <4 x double> <double 0.0, double 0.0, double undef, double undef>, <4 x i32> <i32 4, i32 5, i32 0, i32 1>
@@ -379,7 +379,7 @@ define <4 x double> @shuffle_v4f64_zz01_optsize(<4 x double> %a) optsize {
define <4 x double> @shuffle_v4f64_zz23(<4 x double> %a) {
; ALL-LABEL: shuffle_v4f64_zz23:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
; ALL-NEXT: retq
@@ -388,7 +388,7 @@ define <4 x double> @shuffle_v4f64_zz23(<4 x double> %a) {
}
define <4 x double> @shuffle_v4f64_zz23_optsize(<4 x double> %a) optsize {
; ALL-LABEL: shuffle_v4f64_zz23_optsize:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
; ALL-NEXT: retq
@@ -398,7 +398,7 @@ define <4 x double> @shuffle_v4f64_zz23_optsize(<4 x double> %a) optsize {
define <4 x double> @shuffle_v4f64_zz45(<4 x double> %a) {
; ALL-LABEL: shuffle_v4f64_zz45:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1]
; ALL-NEXT: retq
%s = shufflevector <4 x double> <double 0.0, double 0.0, double undef, double undef>, <4 x double> %a, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
@@ -406,7 +406,7 @@ define <4 x double> @shuffle_v4f64_zz45(<4 x double> %a) {
}
define <4 x double> @shuffle_v4f64_zz45_optsize(<4 x double> %a) optsize {
; ALL-LABEL: shuffle_v4f64_zz45_optsize:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1]
; ALL-NEXT: retq
%s = shufflevector <4 x double> <double 0.0, double 0.0, double undef, double undef>, <4 x double> %a, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
@@ -415,7 +415,7 @@ define <4 x double> @shuffle_v4f64_zz45_optsize(<4 x double> %a) optsize {
define <4 x double> @shuffle_v4f64_zz67(<4 x double> %a) {
; ALL-LABEL: shuffle_v4f64_zz67:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
; ALL-NEXT: retq
@@ -424,7 +424,7 @@ define <4 x double> @shuffle_v4f64_zz67(<4 x double> %a) {
}
define <4 x double> @shuffle_v4f64_zz67_optsize(<4 x double> %a) optsize {
; ALL-LABEL: shuffle_v4f64_zz67_optsize:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
; ALL-NEXT: retq
@@ -434,7 +434,7 @@ define <4 x double> @shuffle_v4f64_zz67_optsize(<4 x double> %a) optsize {
define <4 x double> @shuffle_v4f64_01zz(<4 x double> %a) {
; ALL-LABEL: shuffle_v4f64_01zz:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
; ALL-NEXT: retq
@@ -443,7 +443,7 @@ define <4 x double> @shuffle_v4f64_01zz(<4 x double> %a) {
}
define <4 x double> @shuffle_v4f64_01zz_optsize(<4 x double> %a) optsize {
; ALL-LABEL: shuffle_v4f64_01zz_optsize:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
; ALL-NEXT: retq
@@ -453,7 +453,7 @@ define <4 x double> @shuffle_v4f64_01zz_optsize(<4 x double> %a) optsize {
define <4 x double> @shuffle_v4f64_23zz(<4 x double> %a) {
; ALL-LABEL: shuffle_v4f64_23zz:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],zero,zero
; ALL-NEXT: retq
%s = shufflevector <4 x double> %a, <4 x double> <double 0.0, double 0.0, double undef, double undef>, <4 x i32> <i32 2, i32 3, i32 4, i32 5>
@@ -461,7 +461,7 @@ define <4 x double> @shuffle_v4f64_23zz(<4 x double> %a) {
}
define <4 x double> @shuffle_v4f64_23zz_optsize(<4 x double> %a) optsize {
; ALL-LABEL: shuffle_v4f64_23zz_optsize:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],zero,zero
; ALL-NEXT: retq
%s = shufflevector <4 x double> %a, <4 x double> <double 0.0, double 0.0, double undef, double undef>, <4 x i32> <i32 2, i32 3, i32 4, i32 5>
@@ -470,7 +470,7 @@ define <4 x double> @shuffle_v4f64_23zz_optsize(<4 x double> %a) optsize {
define <4 x double> @shuffle_v4f64_45zz(<4 x double> %a) {
; ALL-LABEL: shuffle_v4f64_45zz:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
; ALL-NEXT: retq
@@ -479,7 +479,7 @@ define <4 x double> @shuffle_v4f64_45zz(<4 x double> %a) {
}
define <4 x double> @shuffle_v4f64_45zz_optsize(<4 x double> %a) optsize {
; ALL-LABEL: shuffle_v4f64_45zz_optsize:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
; ALL-NEXT: retq
@@ -489,7 +489,7 @@ define <4 x double> @shuffle_v4f64_45zz_optsize(<4 x double> %a) optsize {
define <4 x double> @shuffle_v4f64_67zz(<4 x double> %a) {
; ALL-LABEL: shuffle_v4f64_67zz:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],zero,zero
; ALL-NEXT: retq
%s = shufflevector <4 x double> <double 0.0, double 0.0, double undef, double undef>, <4 x double> %a, <4 x i32> <i32 6, i32 7, i32 0, i32 1>
@@ -497,7 +497,7 @@ define <4 x double> @shuffle_v4f64_67zz(<4 x double> %a) {
}
define <4 x double> @shuffle_v4f64_67zz_optsize(<4 x double> %a) optsize {
; ALL-LABEL: shuffle_v4f64_67zz_optsize:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],zero,zero
; ALL-NEXT: retq
%s = shufflevector <4 x double> <double 0.0, double 0.0, double undef, double undef>, <4 x double> %a, <4 x i32> <i32 6, i32 7, i32 0, i32 1>
@@ -508,14 +508,14 @@ define <4 x double> @shuffle_v4f64_67zz_optsize(<4 x double> %a) optsize {
define <4 x i64> @shuffle_v4i64_67zz(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: shuffle_v4i64_67zz:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],zero,zero
; AVX1-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4i64_67zz:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],zero,zero
; AVX2-NEXT: vpaddq %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
@@ -528,13 +528,13 @@ define <4 x i64> @shuffle_v4i64_67zz(<4 x i64> %a, <4 x i64> %b) {
define <4 x double> @ld0_hi0_lo1_4f64(<4 x double> * %pa, <4 x double> %b) nounwind uwtable readnone ssp {
; AVX1-LABEL: ld0_hi0_lo1_4f64:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[0,1]
; AVX1-NEXT: vaddpd {{.*}}(%rip), %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: ld0_hi0_lo1_4f64:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[0,1]
; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm1 = [1,1,1,1]
; AVX2-NEXT: vaddpd %ymm1, %ymm0, %ymm0
@@ -548,13 +548,13 @@ entry:
define <4 x double> @ld1_hi0_hi1_4f64(<4 x double> %a, <4 x double> * %pb) nounwind uwtable readnone ssp {
; AVX1-LABEL: ld1_hi0_hi1_4f64:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3]
; AVX1-NEXT: vaddpd {{.*}}(%rip), %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: ld1_hi0_hi1_4f64:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3]
; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm1 = [1,1,1,1]
; AVX2-NEXT: vaddpd %ymm1, %ymm0, %ymm0
@@ -568,13 +568,13 @@ entry:
define <8 x float> @ld0_hi0_lo1_8f32(<8 x float> * %pa, <8 x float> %b) nounwind uwtable readnone ssp {
; AVX1-LABEL: ld0_hi0_lo1_8f32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[0,1]
; AVX1-NEXT: vaddps {{.*}}(%rip), %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: ld0_hi0_lo1_8f32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[0,1]
; AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [1,1,1,1,1,1,1,1]
; AVX2-NEXT: vaddps %ymm1, %ymm0, %ymm0
@@ -588,13 +588,13 @@ entry:
define <8 x float> @ld1_hi0_hi1_8f32(<8 x float> %a, <8 x float> * %pb) nounwind uwtable readnone ssp {
; AVX1-LABEL: ld1_hi0_hi1_8f32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3]
; AVX1-NEXT: vaddps {{.*}}(%rip), %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: ld1_hi0_hi1_8f32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3]
; AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [1,1,1,1,1,1,1,1]
; AVX2-NEXT: vaddps %ymm1, %ymm0, %ymm0
@@ -608,7 +608,7 @@ entry:
define <4 x i64> @ld0_hi0_lo1_4i64(<4 x i64> * %pa, <4 x i64> %b) nounwind uwtable readnone ssp {
; AVX1-LABEL: ld0_hi0_lo1_4i64:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[0,1]
; AVX1-NEXT: vpaddq {{.*}}(%rip), %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
@@ -617,7 +617,7 @@ define <4 x i64> @ld0_hi0_lo1_4i64(<4 x i64> * %pa, <4 x i64> %b) nounwind uwtab
; AVX1-NEXT: retq
;
; AVX2-LABEL: ld0_hi0_lo1_4i64:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = mem[2,3],ymm0[0,1]
; AVX2-NEXT: vpaddq {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
@@ -630,7 +630,7 @@ entry:
define <4 x i64> @ld1_hi0_hi1_4i64(<4 x i64> %a, <4 x i64> * %pb) nounwind uwtable readnone ssp {
; AVX1-LABEL: ld1_hi0_hi1_4i64:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3]
; AVX1-NEXT: vpaddq {{.*}}(%rip), %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
@@ -639,7 +639,7 @@ define <4 x i64> @ld1_hi0_hi1_4i64(<4 x i64> %a, <4 x i64> * %pb) nounwind uwtab
; AVX1-NEXT: retq
;
; AVX2-LABEL: ld1_hi0_hi1_4i64:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3]
; AVX2-NEXT: vpaddq {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
@@ -652,7 +652,7 @@ entry:
define <8 x i32> @ld0_hi0_lo1_8i32(<8 x i32> * %pa, <8 x i32> %b) nounwind uwtable readnone ssp {
; AVX1-LABEL: ld0_hi0_lo1_8i32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[0,1]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [1,2,3,4]
@@ -662,7 +662,7 @@ define <8 x i32> @ld0_hi0_lo1_8i32(<8 x i32> * %pa, <8 x i32> %b) nounwind uwtab
; AVX1-NEXT: retq
;
; AVX2-LABEL: ld0_hi0_lo1_8i32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = mem[2,3],ymm0[0,1]
; AVX2-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
@@ -675,7 +675,7 @@ entry:
define <8 x i32> @ld1_hi0_hi1_8i32(<8 x i32> %a, <8 x i32> * %pb) nounwind uwtable readnone ssp {
; AVX1-LABEL: ld1_hi0_hi1_8i32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [1,2,3,4]
@@ -685,7 +685,7 @@ define <8 x i32> @ld1_hi0_hi1_8i32(<8 x i32> %a, <8 x i32> * %pb) nounwind uwtab
; AVX1-NEXT: retq
;
; AVX2-LABEL: ld1_hi0_hi1_8i32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3]
; AVX2-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
diff --git a/test/CodeGen/X86/avx-vzeroupper.ll b/test/CodeGen/X86/avx-vzeroupper.ll
index 244fb962af5..4b077221c14 100644
--- a/test/CodeGen/X86/avx-vzeroupper.ll
+++ b/test/CodeGen/X86/avx-vzeroupper.ll
@@ -15,7 +15,7 @@ declare <4 x float> @llvm.x86.avx.vextractf128.ps.256(<8 x float>, i8) nounwind
define <4 x float> @test00(<4 x float> %a, <4 x float> %b) nounwind {
; ALL-LABEL: test00:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: pushq %rax
; ALL-NEXT: vaddps %xmm1, %xmm0, %xmm0
; ALL-NEXT: callq do_sse
@@ -30,7 +30,7 @@ define <4 x float> @test00(<4 x float> %a, <4 x float> %b) nounwind {
define <8 x float> @test01(<4 x float> %a, <4 x float> %b, <8 x float> %c) nounwind {
; VZ-LABEL: test01:
-; VZ: # BB#0:
+; VZ: # %bb.0:
; VZ-NEXT: subq $56, %rsp
; VZ-NEXT: vmovups %ymm2, (%rsp) # 32-byte Spill
; VZ-NEXT: vmovaps {{.*}}(%rip), %xmm0
@@ -44,7 +44,7 @@ define <8 x float> @test01(<4 x float> %a, <4 x float> %b, <8 x float> %c) nounw
; VZ-NEXT: retq
;
; FAST-YMM-ZMM-LABEL: test01:
-; FAST-YMM-ZMM: # BB#0:
+; FAST-YMM-ZMM: # %bb.0:
; FAST-YMM-ZMM-NEXT: subq $56, %rsp
; FAST-YMM-ZMM-NEXT: vmovups %ymm2, (%rsp) # 32-byte Spill
; FAST-YMM-ZMM-NEXT: vmovaps {{.*}}(%rip), %xmm0
@@ -57,7 +57,7 @@ define <8 x float> @test01(<4 x float> %a, <4 x float> %b, <8 x float> %c) nounw
; FAST-YMM-ZMM-NEXT: retq
;
; BTVER2-LABEL: test01:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: subq $56, %rsp
; BTVER2-NEXT: vmovaps {{.*}}(%rip), %xmm0
; BTVER2-NEXT: vmovups %ymm2, (%rsp) # 32-byte Spill
@@ -80,14 +80,14 @@ define <8 x float> @test01(<4 x float> %a, <4 x float> %b, <8 x float> %c) nounw
define <4 x float> @test02(<8 x float> %a, <8 x float> %b) nounwind {
; VZ-LABEL: test02:
-; VZ: # BB#0:
+; VZ: # %bb.0:
; VZ-NEXT: vaddps %ymm1, %ymm0, %ymm0
; VZ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; VZ-NEXT: vzeroupper
; VZ-NEXT: jmp do_sse # TAILCALL
;
; NO-VZ-LABEL: test02:
-; NO-VZ: # BB#0:
+; NO-VZ: # %bb.0:
; NO-VZ-NEXT: vaddps %ymm1, %ymm0, %ymm0
; NO-VZ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; NO-VZ-NEXT: jmp do_sse # TAILCALL
@@ -102,7 +102,7 @@ define <4 x float> @test02(<8 x float> %a, <8 x float> %b) nounwind {
define <4 x float> @test03(<4 x float> %a, <4 x float> %b) nounwind {
; VZ-LABEL: test03:
-; VZ: # BB#0: # %entry
+; VZ: # %bb.0: # %entry
; VZ-NEXT: pushq %rbx
; VZ-NEXT: subq $16, %rsp
; VZ-NEXT: vaddps %xmm1, %xmm0, %xmm0
@@ -113,7 +113,7 @@ define <4 x float> @test03(<4 x float> %a, <4 x float> %b) nounwind {
; VZ-NEXT: callq foo
; VZ-NEXT: testl %eax, %eax
; VZ-NEXT: jne .LBB3_1
-; VZ-NEXT: # BB#2: # %for.body.preheader
+; VZ-NEXT: # %bb.2: # %for.body.preheader
; VZ-NEXT: movl $4, %ebx
; VZ-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
; VZ-NEXT: .p2align 4, 0x90
@@ -127,13 +127,13 @@ define <4 x float> @test03(<4 x float> %a, <4 x float> %b) nounwind {
; VZ-NEXT: callq do_sse
; VZ-NEXT: decl %ebx
; VZ-NEXT: jne .LBB3_3
-; VZ-NEXT: # BB#4: # %for.end
+; VZ-NEXT: # %bb.4: # %for.end
; VZ-NEXT: addq $16, %rsp
; VZ-NEXT: popq %rbx
; VZ-NEXT: retq
;
; FAST-YMM-ZMM-LABEL: test03:
-; FAST-YMM-ZMM: # BB#0: # %entry
+; FAST-YMM-ZMM: # %bb.0: # %entry
; FAST-YMM-ZMM-NEXT: pushq %rbx
; FAST-YMM-ZMM-NEXT: subq $16, %rsp
; FAST-YMM-ZMM-NEXT: vaddps %xmm1, %xmm0, %xmm0
@@ -144,7 +144,7 @@ define <4 x float> @test03(<4 x float> %a, <4 x float> %b) nounwind {
; FAST-YMM-ZMM-NEXT: callq foo
; FAST-YMM-ZMM-NEXT: testl %eax, %eax
; FAST-YMM-ZMM-NEXT: jne .LBB3_1
-; FAST-YMM-ZMM-NEXT: # BB#2: # %for.body.preheader
+; FAST-YMM-ZMM-NEXT: # %bb.2: # %for.body.preheader
; FAST-YMM-ZMM-NEXT: movl $4, %ebx
; FAST-YMM-ZMM-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
; FAST-YMM-ZMM-NEXT: .p2align 4, 0x90
@@ -157,13 +157,13 @@ define <4 x float> @test03(<4 x float> %a, <4 x float> %b) nounwind {
; FAST-YMM-ZMM-NEXT: callq do_sse
; FAST-YMM-ZMM-NEXT: decl %ebx
; FAST-YMM-ZMM-NEXT: jne .LBB3_3
-; FAST-YMM-ZMM-NEXT: # BB#4: # %for.end
+; FAST-YMM-ZMM-NEXT: # %bb.4: # %for.end
; FAST-YMM-ZMM-NEXT: addq $16, %rsp
; FAST-YMM-ZMM-NEXT: popq %rbx
; FAST-YMM-ZMM-NEXT: retq
;
; BTVER2-LABEL: test03:
-; BTVER2: # BB#0: # %entry
+; BTVER2: # %bb.0: # %entry
; BTVER2-NEXT: pushq %rbx
; BTVER2-NEXT: subq $16, %rsp
; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0
@@ -174,7 +174,7 @@ define <4 x float> @test03(<4 x float> %a, <4 x float> %b) nounwind {
; BTVER2-NEXT: callq foo
; BTVER2-NEXT: testl %eax, %eax
; BTVER2-NEXT: jne .LBB3_1
-; BTVER2-NEXT: # BB#2: # %for.body.preheader
+; BTVER2-NEXT: # %bb.2: # %for.body.preheader
; BTVER2-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
; BTVER2-NEXT: movl $4, %ebx
; BTVER2-NEXT: .p2align 4, 0x90
@@ -187,7 +187,7 @@ define <4 x float> @test03(<4 x float> %a, <4 x float> %b) nounwind {
; BTVER2-NEXT: callq do_sse
; BTVER2-NEXT: decl %ebx
; BTVER2-NEXT: jne .LBB3_3
-; BTVER2-NEXT: # BB#4: # %for.end
+; BTVER2-NEXT: # %bb.4: # %for.end
; BTVER2-NEXT: addq $16, %rsp
; BTVER2-NEXT: popq %rbx
; BTVER2-NEXT: retq
@@ -220,7 +220,7 @@ for.end:
define <4 x float> @test04(<4 x float> %a, <4 x float> %b) nounwind {
; VZ-LABEL: test04:
-; VZ: # BB#0:
+; VZ: # %bb.0:
; VZ-NEXT: pushq %rax
; VZ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; VZ-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
@@ -231,7 +231,7 @@ define <4 x float> @test04(<4 x float> %a, <4 x float> %b) nounwind {
; VZ-NEXT: retq
;
; NO-VZ-LABEL: test04:
-; NO-VZ: # BB#0:
+; NO-VZ: # %bb.0:
; NO-VZ-NEXT: pushq %rax
; NO-VZ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; NO-VZ-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
diff --git a/test/CodeGen/X86/avx1-logical-load-folding.ll b/test/CodeGen/X86/avx1-logical-load-folding.ll
index 7073eb22476..88521dedc1c 100644
--- a/test/CodeGen/X86/avx1-logical-load-folding.ll
+++ b/test/CodeGen/X86/avx1-logical-load-folding.ll
@@ -5,7 +5,7 @@
; Function Attrs: nounwind ssp uwtable
define void @test1(float* %A, float* %C) #0 {
; X86-LABEL: test1:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: vmovaps (%ecx), %ymm0
@@ -15,7 +15,7 @@ define void @test1(float* %A, float* %C) #0 {
; X86-NEXT: retl
;
; X64-LABEL: test1:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vmovaps (%rdi), %ymm0
; X64-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: vmovss %xmm0, (%rsi)
@@ -34,7 +34,7 @@ define void @test1(float* %A, float* %C) #0 {
; Function Attrs: nounwind ssp uwtable
define void @test2(float* %A, float* %C) #0 {
; X86-LABEL: test2:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: vmovaps (%ecx), %ymm0
@@ -44,7 +44,7 @@ define void @test2(float* %A, float* %C) #0 {
; X86-NEXT: retl
;
; X64-LABEL: test2:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vmovaps (%rdi), %ymm0
; X64-NEXT: vorps {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: vmovss %xmm0, (%rsi)
@@ -63,7 +63,7 @@ define void @test2(float* %A, float* %C) #0 {
; Function Attrs: nounwind ssp uwtable
define void @test3(float* %A, float* %C) #0 {
; X86-LABEL: test3:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: vmovaps (%ecx), %ymm0
@@ -73,7 +73,7 @@ define void @test3(float* %A, float* %C) #0 {
; X86-NEXT: retl
;
; X64-LABEL: test3:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vmovaps (%rdi), %ymm0
; X64-NEXT: vxorps {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: vmovss %xmm0, (%rsi)
@@ -91,7 +91,7 @@ define void @test3(float* %A, float* %C) #0 {
define void @test4(float* %A, float* %C) #0 {
; X86-LABEL: test4:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: vmovaps (%ecx), %ymm0
@@ -101,7 +101,7 @@ define void @test4(float* %A, float* %C) #0 {
; X86-NEXT: retl
;
; X64-LABEL: test4:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vmovaps (%rdi), %ymm0
; X64-NEXT: vandnps {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: vmovss %xmm0, (%rsi)
diff --git a/test/CodeGen/X86/avx2-arith.ll b/test/CodeGen/X86/avx2-arith.ll
index 9e471171bae..aa625be4ded 100644
--- a/test/CodeGen/X86/avx2-arith.ll
+++ b/test/CodeGen/X86/avx2-arith.ll
@@ -4,12 +4,12 @@
define <4 x i64> @test_vpaddq(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
; X32-LABEL: test_vpaddq:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_vpaddq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%x = add <4 x i64> %i, %j
@@ -18,12 +18,12 @@ define <4 x i64> @test_vpaddq(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
define <8 x i32> @test_vpaddd(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
; X32-LABEL: test_vpaddd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_vpaddd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%x = add <8 x i32> %i, %j
@@ -32,12 +32,12 @@ define <8 x i32> @test_vpaddd(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
define <16 x i16> @test_vpaddw(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
; X32-LABEL: test_vpaddw:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_vpaddw:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%x = add <16 x i16> %i, %j
@@ -46,12 +46,12 @@ define <16 x i16> @test_vpaddw(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
define <32 x i8> @test_vpaddb(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
; X32-LABEL: test_vpaddb:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_vpaddb:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%x = add <32 x i8> %i, %j
@@ -60,12 +60,12 @@ define <32 x i8> @test_vpaddb(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
define <4 x i64> @test_vpsubq(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
; X32-LABEL: test_vpsubq:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsubq %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_vpsubq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsubq %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%x = sub <4 x i64> %i, %j
@@ -74,12 +74,12 @@ define <4 x i64> @test_vpsubq(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
define <8 x i32> @test_vpsubd(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
; X32-LABEL: test_vpsubd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsubd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_vpsubd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsubd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%x = sub <8 x i32> %i, %j
@@ -88,12 +88,12 @@ define <8 x i32> @test_vpsubd(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
define <16 x i16> @test_vpsubw(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
; X32-LABEL: test_vpsubw:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsubw %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_vpsubw:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsubw %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%x = sub <16 x i16> %i, %j
@@ -102,12 +102,12 @@ define <16 x i16> @test_vpsubw(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
define <32 x i8> @test_vpsubb(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
; X32-LABEL: test_vpsubb:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsubb %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_vpsubb:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsubb %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%x = sub <32 x i8> %i, %j
@@ -116,12 +116,12 @@ define <32 x i8> @test_vpsubb(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
define <8 x i32> @test_vpmulld(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
; X32-LABEL: test_vpmulld:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmulld %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_vpmulld:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmulld %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%x = mul <8 x i32> %i, %j
@@ -130,12 +130,12 @@ define <8 x i32> @test_vpmulld(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
define <16 x i16> @test_vpmullw(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
; X32-LABEL: test_vpmullw:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmullw %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_vpmullw:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmullw %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%x = mul <16 x i16> %i, %j
@@ -144,7 +144,7 @@ define <16 x i16> @test_vpmullw(<16 x i16> %i, <16 x i16> %j) nounwind readnone
define <16 x i8> @mul_v16i8(<16 x i8> %i, <16 x i8> %j) nounwind readnone {
; X32-LABEL: mul_v16i8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmovsxbw %xmm1, %ymm1
; X32-NEXT: vpmovsxbw %xmm0, %ymm0
; X32-NEXT: vpmullw %ymm1, %ymm0, %ymm0
@@ -157,7 +157,7 @@ define <16 x i8> @mul_v16i8(<16 x i8> %i, <16 x i8> %j) nounwind readnone {
; X32-NEXT: retl
;
; X64-LABEL: mul_v16i8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmovsxbw %xmm1, %ymm1
; X64-NEXT: vpmovsxbw %xmm0, %ymm0
; X64-NEXT: vpmullw %ymm1, %ymm0, %ymm0
@@ -174,7 +174,7 @@ define <16 x i8> @mul_v16i8(<16 x i8> %i, <16 x i8> %j) nounwind readnone {
define <32 x i8> @mul_v32i8(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
; X32-LABEL: mul_v32i8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vextracti128 $1, %ymm1, %xmm2
; X32-NEXT: vpmovsxbw %xmm2, %ymm2
; X32-NEXT: vextracti128 $1, %ymm0, %xmm3
@@ -196,7 +196,7 @@ define <32 x i8> @mul_v32i8(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
; X32-NEXT: retl
;
; X64-LABEL: mul_v32i8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vextracti128 $1, %ymm1, %xmm2
; X64-NEXT: vpmovsxbw %xmm2, %ymm2
; X64-NEXT: vextracti128 $1, %ymm0, %xmm3
@@ -222,7 +222,7 @@ define <32 x i8> @mul_v32i8(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
define <4 x i64> @mul_v4i64(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
; X32-LABEL: mul_v4i64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsrlq $32, %ymm0, %ymm2
; X32-NEXT: vpmuludq %ymm1, %ymm2, %ymm2
; X32-NEXT: vpsrlq $32, %ymm1, %ymm3
@@ -234,7 +234,7 @@ define <4 x i64> @mul_v4i64(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
; X32-NEXT: retl
;
; X64-LABEL: mul_v4i64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsrlq $32, %ymm0, %ymm2
; X64-NEXT: vpmuludq %ymm1, %ymm2, %ymm2
; X64-NEXT: vpsrlq $32, %ymm1, %ymm3
@@ -250,12 +250,12 @@ define <4 x i64> @mul_v4i64(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
define <8 x i32> @mul_const1(<8 x i32> %x) {
; X32-LABEL: mul_const1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpaddd %ymm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: mul_const1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpaddd %ymm0, %ymm0, %ymm0
; X64-NEXT: retq
%y = mul <8 x i32> %x, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
@@ -264,12 +264,12 @@ define <8 x i32> @mul_const1(<8 x i32> %x) {
define <4 x i64> @mul_const2(<4 x i64> %x) {
; X32-LABEL: mul_const2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsllq $2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: mul_const2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsllq $2, %ymm0, %ymm0
; X64-NEXT: retq
%y = mul <4 x i64> %x, <i64 4, i64 4, i64 4, i64 4>
@@ -278,12 +278,12 @@ define <4 x i64> @mul_const2(<4 x i64> %x) {
define <16 x i16> @mul_const3(<16 x i16> %x) {
; X32-LABEL: mul_const3:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsllw $3, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: mul_const3:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsllw $3, %ymm0, %ymm0
; X64-NEXT: retq
%y = mul <16 x i16> %x, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
@@ -292,13 +292,13 @@ define <16 x i16> @mul_const3(<16 x i16> %x) {
define <4 x i64> @mul_const4(<4 x i64> %x) {
; X32-LABEL: mul_const4:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X32-NEXT: vpsubq %ymm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: mul_const4:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-NEXT: vpsubq %ymm0, %ymm1, %ymm0
; X64-NEXT: retq
@@ -308,12 +308,12 @@ define <4 x i64> @mul_const4(<4 x i64> %x) {
define <8 x i32> @mul_const5(<8 x i32> %x) {
; X32-LABEL: mul_const5:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: mul_const5:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-NEXT: retq
%y = mul <8 x i32> %x, <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
@@ -322,12 +322,12 @@ define <8 x i32> @mul_const5(<8 x i32> %x) {
define <8 x i32> @mul_const6(<8 x i32> %x) {
; X32-LABEL: mul_const6:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmulld {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: mul_const6:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmulld {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
%y = mul <8 x i32> %x, <i32 0, i32 0, i32 0, i32 2, i32 0, i32 2, i32 0, i32 0>
@@ -336,13 +336,13 @@ define <8 x i32> @mul_const6(<8 x i32> %x) {
define <8 x i64> @mul_const7(<8 x i64> %x) {
; X32-LABEL: mul_const7:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpaddq %ymm0, %ymm0, %ymm0
; X32-NEXT: vpaddq %ymm1, %ymm1, %ymm1
; X32-NEXT: retl
;
; X64-LABEL: mul_const7:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpaddq %ymm0, %ymm0, %ymm0
; X64-NEXT: vpaddq %ymm1, %ymm1, %ymm1
; X64-NEXT: retq
@@ -352,12 +352,12 @@ define <8 x i64> @mul_const7(<8 x i64> %x) {
define <8 x i16> @mul_const8(<8 x i16> %x) {
; X32-LABEL: mul_const8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsllw $3, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: mul_const8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsllw $3, %xmm0, %xmm0
; X64-NEXT: retq
%y = mul <8 x i16> %x, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
@@ -366,14 +366,14 @@ define <8 x i16> @mul_const8(<8 x i16> %x) {
define <8 x i32> @mul_const9(<8 x i32> %x) {
; X32-LABEL: mul_const9:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl $2, %eax
; X32-NEXT: vmovd %eax, %xmm1
; X32-NEXT: vpmulld %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: mul_const9:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl $2, %eax
; X64-NEXT: vmovd %eax, %xmm1
; X64-NEXT: vpmulld %ymm1, %ymm0, %ymm0
@@ -385,13 +385,13 @@ define <8 x i32> @mul_const9(<8 x i32> %x) {
; %x * 0x01010101
define <4 x i32> @mul_const10(<4 x i32> %x) {
; X32-LABEL: mul_const10:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpbroadcastd {{.*#+}} xmm1 = [16843009,16843009,16843009,16843009]
; X32-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: mul_const10:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpbroadcastd {{.*#+}} xmm1 = [16843009,16843009,16843009,16843009]
; X64-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
@@ -402,13 +402,13 @@ define <4 x i32> @mul_const10(<4 x i32> %x) {
; %x * 0x80808080
define <4 x i32> @mul_const11(<4 x i32> %x) {
; X32-LABEL: mul_const11:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpbroadcastd {{.*#+}} xmm1 = [2155905152,2155905152,2155905152,2155905152]
; X32-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: mul_const11:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpbroadcastd {{.*#+}} xmm1 = [2155905152,2155905152,2155905152,2155905152]
; X64-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
diff --git a/test/CodeGen/X86/avx2-cmp.ll b/test/CodeGen/X86/avx2-cmp.ll
index 2369aa5ac9a..2d710e40daf 100644
--- a/test/CodeGen/X86/avx2-cmp.ll
+++ b/test/CodeGen/X86/avx2-cmp.ll
@@ -4,12 +4,12 @@
define <8 x i32> @v8i32_cmpgt(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
; X32-LABEL: v8i32_cmpgt:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpcmpgtd %ymm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: v8i32_cmpgt:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpgtd %ymm0, %ymm1, %ymm0
; X64-NEXT: retq
%bincmp = icmp slt <8 x i32> %i, %j
@@ -19,12 +19,12 @@ define <8 x i32> @v8i32_cmpgt(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
define <4 x i64> @v4i64_cmpgt(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
; X32-LABEL: v4i64_cmpgt:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: v4i64_cmpgt:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
; X64-NEXT: retq
%bincmp = icmp slt <4 x i64> %i, %j
@@ -34,12 +34,12 @@ define <4 x i64> @v4i64_cmpgt(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
define <16 x i16> @v16i16_cmpgt(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
; X32-LABEL: v16i16_cmpgt:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: v16i16_cmpgt:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0
; X64-NEXT: retq
%bincmp = icmp slt <16 x i16> %i, %j
@@ -49,12 +49,12 @@ define <16 x i16> @v16i16_cmpgt(<16 x i16> %i, <16 x i16> %j) nounwind readnone
define <32 x i8> @v32i8_cmpgt(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
; X32-LABEL: v32i8_cmpgt:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: v32i8_cmpgt:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0
; X64-NEXT: retq
%bincmp = icmp slt <32 x i8> %i, %j
@@ -64,12 +64,12 @@ define <32 x i8> @v32i8_cmpgt(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
define <8 x i32> @int256_cmpeq(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
; X32-LABEL: int256_cmpeq:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: int256_cmpeq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%bincmp = icmp eq <8 x i32> %i, %j
@@ -79,12 +79,12 @@ define <8 x i32> @int256_cmpeq(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
define <4 x i64> @v4i64_cmpeq(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
; X32-LABEL: v4i64_cmpeq:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: v4i64_cmpeq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%bincmp = icmp eq <4 x i64> %i, %j
@@ -94,12 +94,12 @@ define <4 x i64> @v4i64_cmpeq(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
define <16 x i16> @v16i16_cmpeq(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
; X32-LABEL: v16i16_cmpeq:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: v16i16_cmpeq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%bincmp = icmp eq <16 x i16> %i, %j
@@ -109,12 +109,12 @@ define <16 x i16> @v16i16_cmpeq(<16 x i16> %i, <16 x i16> %j) nounwind readnone
define <32 x i8> @v32i8_cmpeq(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
; X32-LABEL: v32i8_cmpeq:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: v32i8_cmpeq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%bincmp = icmp eq <32 x i8> %i, %j
diff --git a/test/CodeGen/X86/avx2-conversions.ll b/test/CodeGen/X86/avx2-conversions.ll
index 3a6c65b0bce..cafb3e69558 100644
--- a/test/CodeGen/X86/avx2-conversions.ll
+++ b/test/CodeGen/X86/avx2-conversions.ll
@@ -4,7 +4,7 @@
define <4 x i32> @trunc4(<4 x i64> %A) nounwind {
; X32-LABEL: trunc4:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; X32-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -12,7 +12,7 @@ define <4 x i32> @trunc4(<4 x i64> %A) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: trunc4:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; X64-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -24,7 +24,7 @@ define <4 x i32> @trunc4(<4 x i64> %A) nounwind {
define <8 x i16> @trunc8(<8 x i32> %A) nounwind {
; X32-LABEL: trunc8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -32,7 +32,7 @@ define <8 x i16> @trunc8(<8 x i32> %A) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: trunc8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -44,12 +44,12 @@ define <8 x i16> @trunc8(<8 x i32> %A) nounwind {
define <4 x i64> @sext4(<4 x i32> %A) nounwind {
; X32-LABEL: sext4:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmovsxdq %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: sext4:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmovsxdq %xmm0, %ymm0
; X64-NEXT: retq
%B = sext <4 x i32> %A to <4 x i64>
@@ -58,12 +58,12 @@ define <4 x i64> @sext4(<4 x i32> %A) nounwind {
define <8 x i32> @sext8(<8 x i16> %A) nounwind {
; X32-LABEL: sext8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmovsxwd %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: sext8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmovsxwd %xmm0, %ymm0
; X64-NEXT: retq
%B = sext <8 x i16> %A to <8 x i32>
@@ -72,12 +72,12 @@ define <8 x i32> @sext8(<8 x i16> %A) nounwind {
define <4 x i64> @zext4(<4 x i32> %A) nounwind {
; X32-LABEL: zext4:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; X32-NEXT: retl
;
; X64-LABEL: zext4:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; X64-NEXT: retq
%B = zext <4 x i32> %A to <4 x i64>
@@ -86,12 +86,12 @@ define <4 x i64> @zext4(<4 x i32> %A) nounwind {
define <8 x i32> @zext8(<8 x i16> %A) nounwind {
; X32-LABEL: zext8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X32-NEXT: retl
;
; X64-LABEL: zext8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X64-NEXT: retq
%B = zext <8 x i16> %A to <8 x i32>
@@ -100,13 +100,13 @@ define <8 x i32> @zext8(<8 x i16> %A) nounwind {
define <8 x i32> @zext_8i8_8i32(<8 x i8> %A) nounwind {
; X32-LABEL: zext_8i8_8i32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X32-NEXT: retl
;
; X64-LABEL: zext_8i8_8i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X64-NEXT: retq
@@ -116,12 +116,12 @@ define <8 x i32> @zext_8i8_8i32(<8 x i8> %A) nounwind {
define <16 x i16> @zext_16i8_16i16(<16 x i8> %z) {
; X32-LABEL: zext_16i8_16i16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; X32-NEXT: retl
;
; X64-LABEL: zext_16i8_16i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; X64-NEXT: retq
%t = zext <16 x i8> %z to <16 x i16>
@@ -130,12 +130,12 @@ define <16 x i16> @zext_16i8_16i16(<16 x i8> %z) {
define <16 x i16> @sext_16i8_16i16(<16 x i8> %z) {
; X32-LABEL: sext_16i8_16i16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmovsxbw %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: sext_16i8_16i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmovsxbw %xmm0, %ymm0
; X64-NEXT: retq
%t = sext <16 x i8> %z to <16 x i16>
@@ -144,7 +144,7 @@ define <16 x i16> @sext_16i8_16i16(<16 x i8> %z) {
define <16 x i8> @trunc_16i16_16i8(<16 x i16> %z) {
; X32-LABEL: trunc_16i16_16i8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vextracti128 $1, %ymm0, %xmm1
; X32-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; X32-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -154,7 +154,7 @@ define <16 x i8> @trunc_16i16_16i8(<16 x i16> %z) {
; X32-NEXT: retl
;
; X64-LABEL: trunc_16i16_16i8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; X64-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -168,13 +168,13 @@ define <16 x i8> @trunc_16i16_16i8(<16 x i16> %z) {
define <4 x i64> @load_sext_test1(<4 x i32> *%ptr) {
; X32-LABEL: load_sext_test1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovsxdq (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_sext_test1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmovsxdq (%rdi), %ymm0
; X64-NEXT: retq
%X = load <4 x i32>, <4 x i32>* %ptr
@@ -184,13 +184,13 @@ define <4 x i64> @load_sext_test1(<4 x i32> *%ptr) {
define <4 x i64> @load_sext_test2(<4 x i8> *%ptr) {
; X32-LABEL: load_sext_test2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovsxbq (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_sext_test2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmovsxbq (%rdi), %ymm0
; X64-NEXT: retq
%X = load <4 x i8>, <4 x i8>* %ptr
@@ -200,13 +200,13 @@ define <4 x i64> @load_sext_test2(<4 x i8> *%ptr) {
define <4 x i64> @load_sext_test3(<4 x i16> *%ptr) {
; X32-LABEL: load_sext_test3:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovsxwq (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_sext_test3:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmovsxwq (%rdi), %ymm0
; X64-NEXT: retq
%X = load <4 x i16>, <4 x i16>* %ptr
@@ -216,13 +216,13 @@ define <4 x i64> @load_sext_test3(<4 x i16> *%ptr) {
define <8 x i32> @load_sext_test4(<8 x i16> *%ptr) {
; X32-LABEL: load_sext_test4:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovsxwd (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_sext_test4:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmovsxwd (%rdi), %ymm0
; X64-NEXT: retq
%X = load <8 x i16>, <8 x i16>* %ptr
@@ -232,13 +232,13 @@ define <8 x i32> @load_sext_test4(<8 x i16> *%ptr) {
define <8 x i32> @load_sext_test5(<8 x i8> *%ptr) {
; X32-LABEL: load_sext_test5:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovsxbd (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_sext_test5:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmovsxbd (%rdi), %ymm0
; X64-NEXT: retq
%X = load <8 x i8>, <8 x i8>* %ptr
diff --git a/test/CodeGen/X86/avx2-fma-fneg-combine.ll b/test/CodeGen/X86/avx2-fma-fneg-combine.ll
index 019593cc0f8..212a3ac4a93 100644
--- a/test/CodeGen/X86/avx2-fma-fneg-combine.ll
+++ b/test/CodeGen/X86/avx2-fma-fneg-combine.ll
@@ -6,12 +6,12 @@
define <8 x float> @test1(<8 x float> %a, <8 x float> %b, <8 x float> %c) {
; X32-LABEL: test1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vfmsub213ps %ymm2, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vfmsub213ps %ymm2, %ymm1, %ymm0
; X64-NEXT: retq
entry:
@@ -24,12 +24,12 @@ declare <8 x float> @llvm.x86.fma.vfmadd.ps.256(<8 x float>, <8 x float>, <8 x f
define <4 x float> @test2(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
; X32-LABEL: test2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vfnmsub213ps %xmm2, %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vfnmsub213ps %xmm2, %xmm1, %xmm0
; X64-NEXT: retq
entry:
@@ -42,14 +42,14 @@ declare <4 x float> @llvm.x86.fma.vfmadd.ps(<4 x float> %a, <4 x float> %b, <4 x
define <4 x float> @test3(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
; X32-LABEL: test3:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vfnmadd213ss %xmm2, %xmm1, %xmm0
; X32-NEXT: vbroadcastss {{\.LCPI.*}}, %xmm1
; X32-NEXT: vxorps %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vfnmadd213ss %xmm2, %xmm1, %xmm0
; X64-NEXT: vbroadcastss {{.*}}(%rip), %xmm1
; X64-NEXT: vxorps %xmm1, %xmm0, %xmm0
@@ -64,12 +64,12 @@ declare <4 x float> @llvm.x86.fma.vfnmadd.ss(<4 x float> %a, <4 x float> %b, <4
define <8 x float> @test4(<8 x float> %a, <8 x float> %b, <8 x float> %c) {
; X32-LABEL: test4:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test4:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0
; X64-NEXT: retq
entry:
@@ -80,14 +80,14 @@ entry:
define <8 x float> @test5(<8 x float> %a, <8 x float> %b, <8 x float> %c) {
; X32-LABEL: test5:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vbroadcastss {{\.LCPI.*}}, %ymm3
; X32-NEXT: vxorps %ymm3, %ymm2, %ymm2
; X32-NEXT: vfmsub213ps %ymm2, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test5:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vbroadcastss {{.*}}(%rip), %ymm3
; X64-NEXT: vxorps %ymm3, %ymm2, %ymm2
; X64-NEXT: vfmsub213ps %ymm2, %ymm1, %ymm0
@@ -103,12 +103,12 @@ declare <8 x float> @llvm.x86.fma.vfmsub.ps.256(<8 x float>, <8 x float>, <8 x f
define <2 x double> @test6(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
; X32-LABEL: test6:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vfnmsub213pd %xmm2, %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test6:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vfnmsub213pd %xmm2, %xmm1, %xmm0
; X64-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/avx2-gather.ll b/test/CodeGen/X86/avx2-gather.ll
index 4dc17a3ee99..a7da2fcc91d 100644
--- a/test/CodeGen/X86/avx2-gather.ll
+++ b/test/CodeGen/X86/avx2-gather.ll
@@ -7,7 +7,7 @@ declare <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float>, i8*,
define <4 x float> @test_x86_avx2_gather_d_ps(i8* %a1, <4 x i32> %idx, <4 x float> %mask) {
; X32-LABEL: test_x86_avx2_gather_d_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vxorps %xmm2, %xmm2, %xmm2
; X32-NEXT: vgatherdps %xmm1, (%eax,%xmm0,2), %xmm2
@@ -15,7 +15,7 @@ define <4 x float> @test_x86_avx2_gather_d_ps(i8* %a1, <4 x i32> %idx, <4 x floa
; X32-NEXT: retl
;
; X64-LABEL: test_x86_avx2_gather_d_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %xmm2, %xmm2, %xmm2
; X64-NEXT: vgatherdps %xmm1, (%rdi,%xmm0,2), %xmm2
; X64-NEXT: vmovaps %xmm2, %xmm0
@@ -30,7 +30,7 @@ declare <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double>, i8*,
define <2 x double> @test_x86_avx2_gather_d_pd(i8* %a1, <4 x i32> %idx, <2 x double> %mask) {
; X32-LABEL: test_x86_avx2_gather_d_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; X32-NEXT: vgatherdpd %xmm1, (%eax,%xmm0,2), %xmm2
@@ -38,7 +38,7 @@ define <2 x double> @test_x86_avx2_gather_d_pd(i8* %a1, <4 x i32> %idx, <2 x dou
; X32-NEXT: retl
;
; X64-LABEL: test_x86_avx2_gather_d_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; X64-NEXT: vgatherdpd %xmm1, (%rdi,%xmm0,2), %xmm2
; X64-NEXT: vmovapd %xmm2, %xmm0
@@ -53,7 +53,7 @@ declare <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float>, i8*,
define <8 x float> @test_x86_avx2_gather_d_ps_256(i8* %a1, <8 x i32> %idx, <8 x float> %mask) {
; X32-LABEL: test_x86_avx2_gather_d_ps_256:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vxorps %xmm2, %xmm2, %xmm2
; X32-NEXT: vgatherdps %ymm1, (%eax,%ymm0,4), %ymm2
@@ -61,7 +61,7 @@ define <8 x float> @test_x86_avx2_gather_d_ps_256(i8* %a1, <8 x i32> %idx, <8 x
; X32-NEXT: retl
;
; X64-LABEL: test_x86_avx2_gather_d_ps_256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %xmm2, %xmm2, %xmm2
; X64-NEXT: vgatherdps %ymm1, (%rdi,%ymm0,4), %ymm2
; X64-NEXT: vmovaps %ymm2, %ymm0
@@ -76,7 +76,7 @@ declare <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double>, i8*,
define <4 x double> @test_x86_avx2_gather_d_pd_256(i8* %a1, <4 x i32> %idx, <4 x double> %mask) {
; X32-LABEL: test_x86_avx2_gather_d_pd_256:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; X32-NEXT: vgatherdpd %ymm1, (%eax,%xmm0,8), %ymm2
@@ -84,7 +84,7 @@ define <4 x double> @test_x86_avx2_gather_d_pd_256(i8* %a1, <4 x i32> %idx, <4 x
; X32-NEXT: retl
;
; X64-LABEL: test_x86_avx2_gather_d_pd_256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; X64-NEXT: vgatherdpd %ymm1, (%rdi,%xmm0,8), %ymm2
; X64-NEXT: vmovapd %ymm2, %ymm0
@@ -96,7 +96,7 @@ define <4 x double> @test_x86_avx2_gather_d_pd_256(i8* %a1, <4 x i32> %idx, <4 x
define <2 x i64> @test_mm_i32gather_epi32(i32 *%a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_i32gather_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1
@@ -105,7 +105,7 @@ define <2 x i64> @test_mm_i32gather_epi32(i32 *%a0, <2 x i64> %a1) {
; X32-NEXT: retl
;
; X64-LABEL: test_mm_i32gather_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-NEXT: vpgatherdd %xmm2, (%rdi,%xmm0,2), %xmm1
@@ -122,7 +122,7 @@ declare <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32>, i8*, <4 x i32>, <4 x i32>
define <2 x double> @test_mm_i32gather_pd(double *%a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_i32gather_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X32-NEXT: vxorpd %xmm1, %xmm1, %xmm1
@@ -131,7 +131,7 @@ define <2 x double> @test_mm_i32gather_pd(double *%a0, <2 x i64> %a1) {
; X32-NEXT: retl
;
; X64-LABEL: test_mm_i32gather_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X64-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X64-NEXT: vgatherdpd %xmm2, (%rdi,%xmm0,2), %xmm1
diff --git a/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll b/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll
index 5cda99e0077..1114b56268f 100644
--- a/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll
@@ -6,7 +6,7 @@
define <4 x i64> @test_mm256_abs_epi8(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_abs_epi8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpabsb %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg = bitcast <4 x i64> %a0 to <32 x i8>
@@ -20,7 +20,7 @@ declare <32 x i8> @llvm.x86.avx2.pabs.b(<32 x i8>) nounwind readnone
define <4 x i64> @test_mm256_abs_epi16(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_abs_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpabsw %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg = bitcast <4 x i64> %a0 to <16 x i16>
@@ -34,7 +34,7 @@ declare <16 x i16> @llvm.x86.avx2.pabs.w(<16 x i16>) nounwind readnone
define <4 x i64> @test_mm256_abs_epi32(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_abs_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpabsd %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg = bitcast <4 x i64> %a0 to <8 x i32>
@@ -48,7 +48,7 @@ declare <8 x i32> @llvm.x86.avx2.pabs.d(<8 x i32>) nounwind readnone
define <4 x i64> @test_mm256_add_epi8(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_add_epi8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -60,7 +60,7 @@ define <4 x i64> @test_mm256_add_epi8(<4 x i64> %a0, <4 x i64> %a1) nounwind {
define <4 x i64> @test_mm256_add_epi16(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_add_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -72,7 +72,7 @@ define <4 x i64> @test_mm256_add_epi16(<4 x i64> %a0, <4 x i64> %a1) nounwind {
define <4 x i64> @test_mm256_add_epi32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_add_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -84,7 +84,7 @@ define <4 x i64> @test_mm256_add_epi32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
define <4 x i64> @test_mm256_add_epi64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_add_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = add <4 x i64> %a0, %a1
@@ -93,7 +93,7 @@ define <4 x i64> @test_mm256_add_epi64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
define <4 x i64> @test_mm256_adds_epi8(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_adds_epi8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpaddsb %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -106,7 +106,7 @@ declare <32 x i8> @llvm.x86.avx2.padds.b(<32 x i8>, <32 x i8>) nounwind readnone
define <4 x i64> @test_mm256_adds_epi16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_adds_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpaddsw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -119,7 +119,7 @@ declare <16 x i16> @llvm.x86.avx2.padds.w(<16 x i16>, <16 x i16>) nounwind readn
define <4 x i64> @test_mm256_adds_epu8(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_adds_epu8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpaddusb %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -132,7 +132,7 @@ declare <32 x i8> @llvm.x86.avx2.paddus.b(<32 x i8>, <32 x i8>) nounwind readnon
define <4 x i64> @test_mm256_adds_epu16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_adds_epu16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpaddusw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -145,7 +145,7 @@ declare <16 x i16> @llvm.x86.avx2.paddus.w(<16 x i16>, <16 x i16>) nounwind read
define <4 x i64> @test_mm256_alignr_epi8(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_alignr_epi8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm1[0,1],ymm0[18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm1[16,17]
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -157,7 +157,7 @@ define <4 x i64> @test_mm256_alignr_epi8(<4 x i64> %a0, <4 x i64> %a1) {
define <4 x i64> @test2_mm256_alignr_epi8(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test2_mm256_alignr_epi8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm1[0],ymm0[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm1[16]
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -169,7 +169,7 @@ define <4 x i64> @test2_mm256_alignr_epi8(<4 x i64> %a0, <4 x i64> %a1) {
define <4 x i64> @test_mm256_and_si256(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_and_si256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vandps %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = and <4 x i64> %a0, %a1
@@ -178,7 +178,7 @@ define <4 x i64> @test_mm256_and_si256(<4 x i64> %a0, <4 x i64> %a1) nounwind {
define <4 x i64> @test_mm256_andnot_si256(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_andnot_si256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; CHECK-NEXT: vpxor %ymm2, %ymm0, %ymm0
; CHECK-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -190,7 +190,7 @@ define <4 x i64> @test_mm256_andnot_si256(<4 x i64> %a0, <4 x i64> %a1) nounwind
define <4 x i64> @test_mm256_avg_epu8(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_avg_epu8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpavgb %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -207,7 +207,7 @@ define <4 x i64> @test_mm256_avg_epu8(<4 x i64> %a0, <4 x i64> %a1) nounwind {
define <4 x i64> @test_mm256_avg_epu16(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_avg_epu16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpavgw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -224,7 +224,7 @@ define <4 x i64> @test_mm256_avg_epu16(<4 x i64> %a0, <4 x i64> %a1) nounwind {
define <4 x i64> @test_mm256_blend_epi16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_blend_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6,7,8],ymm1[9],ymm0[10,11,12,13,14,15]
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -236,7 +236,7 @@ define <4 x i64> @test_mm256_blend_epi16(<4 x i64> %a0, <4 x i64> %a1) {
define <2 x i64> @test_mm_blend_epi32(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_mm_blend_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -248,7 +248,7 @@ define <2 x i64> @test_mm_blend_epi32(<2 x i64> %a0, <2 x i64> %a1) {
define <4 x i64> @test_mm256_blend_epi32(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_blend_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6,7]
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -260,7 +260,7 @@ define <4 x i64> @test_mm256_blend_epi32(<4 x i64> %a0, <4 x i64> %a1) {
define <4 x i64> @test_mm256_blendv_epi8(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> %a2) {
; CHECK-LABEL: test_mm256_blendv_epi8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -274,7 +274,7 @@ declare <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8>, <32 x i8>, <32 x i8>) nounw
define <2 x i64> @test_mm_broadcastb_epi8(<2 x i64> %a0) {
; CHECK-LABEL: test_mm_broadcastb_epi8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastb %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -285,7 +285,7 @@ define <2 x i64> @test_mm_broadcastb_epi8(<2 x i64> %a0) {
define <4 x i64> @test_mm256_broadcastb_epi8(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_broadcastb_epi8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastb %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -296,7 +296,7 @@ define <4 x i64> @test_mm256_broadcastb_epi8(<4 x i64> %a0) {
define <2 x i64> @test_mm_broadcastd_epi32(<2 x i64> %a0) {
; CHECK-LABEL: test_mm_broadcastd_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastss %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -307,7 +307,7 @@ define <2 x i64> @test_mm_broadcastd_epi32(<2 x i64> %a0) {
define <4 x i64> @test_mm256_broadcastd_epi32(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_broadcastd_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastss %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -318,7 +318,7 @@ define <4 x i64> @test_mm256_broadcastd_epi32(<4 x i64> %a0) {
define <2 x i64> @test_mm_broadcastq_epi64(<2 x i64> %a0) {
; CHECK-LABEL: test_mm_broadcastq_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastq %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
%res = shufflevector <2 x i64> %a0, <2 x i64> undef, <2 x i32> zeroinitializer
@@ -327,7 +327,7 @@ define <2 x i64> @test_mm_broadcastq_epi64(<2 x i64> %a0) {
define <4 x i64> @test_mm256_broadcastq_epi64(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_broadcastq_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = shufflevector <4 x i64> %a0, <4 x i64> undef, <4 x i32> zeroinitializer
@@ -336,7 +336,7 @@ define <4 x i64> @test_mm256_broadcastq_epi64(<4 x i64> %a0) {
define <2 x double> @test_mm_broadcastsd_pd(<2 x double> %a0) {
; CHECK-LABEL: test_mm_broadcastsd_pd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; CHECK-NEXT: ret{{[l|q]}}
%res = shufflevector <2 x double> %a0, <2 x double> undef, <2 x i32> zeroinitializer
@@ -345,7 +345,7 @@ define <2 x double> @test_mm_broadcastsd_pd(<2 x double> %a0) {
define <4 x double> @test_mm256_broadcastsd_pd(<4 x double> %a0) {
; CHECK-LABEL: test_mm256_broadcastsd_pd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> zeroinitializer
@@ -354,7 +354,7 @@ define <4 x double> @test_mm256_broadcastsd_pd(<4 x double> %a0) {
define <4 x i64> @test_mm256_broadcastsi128_si256(<2 x i64> %a0) {
; CHECK-LABEL: test_mm256_broadcastsi128_si256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
@@ -364,13 +364,13 @@ define <4 x i64> @test_mm256_broadcastsi128_si256(<2 x i64> %a0) {
define <4 x i64> @test_mm256_broadcastsi128_si256_mem(<2 x i64>* %p0) {
; X86-LABEL: test_mm256_broadcastsi128_si256_mem:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm256_broadcastsi128_si256_mem:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-NEXT: ret{{[l|q]}}
%a0 = load <2 x i64>, <2 x i64>* %p0
@@ -380,7 +380,7 @@ define <4 x i64> @test_mm256_broadcastsi128_si256_mem(<2 x i64>* %p0) {
define <4 x float> @test_mm_broadcastss_ps(<4 x float> %a0) {
; CHECK-LABEL: test_mm_broadcastss_ps:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastss %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
%res = shufflevector <4 x float> %a0, <4 x float> undef, <4 x i32> zeroinitializer
@@ -389,7 +389,7 @@ define <4 x float> @test_mm_broadcastss_ps(<4 x float> %a0) {
define <8 x float> @test_mm256_broadcastss_ps(<8 x float> %a0) {
; CHECK-LABEL: test_mm256_broadcastss_ps:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastss %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> zeroinitializer
@@ -398,7 +398,7 @@ define <8 x float> @test_mm256_broadcastss_ps(<8 x float> %a0) {
define <2 x i64> @test_mm_broadcastw_epi16(<2 x i64> %a0) {
; CHECK-LABEL: test_mm_broadcastw_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastw %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -409,7 +409,7 @@ define <2 x i64> @test_mm_broadcastw_epi16(<2 x i64> %a0) {
define <4 x i64> @test_mm256_broadcastw_epi16(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_broadcastw_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastw %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -420,7 +420,7 @@ define <4 x i64> @test_mm256_broadcastw_epi16(<4 x i64> %a0) {
define <4 x i64> @test_mm256_bslli_epi128(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_bslli_epi128:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpslldq {{.*#+}} ymm0 = zero,zero,zero,ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12],zero,zero,zero,ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28]
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -431,7 +431,7 @@ define <4 x i64> @test_mm256_bslli_epi128(<4 x i64> %a0) {
define <4 x i64> @test_mm256_bsrli_epi128(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_bsrli_epi128:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,ymm0[19,20,21,22,23,24,25,26,27,28,29,30,31],zero,zero,zero
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -442,7 +442,7 @@ define <4 x i64> @test_mm256_bsrli_epi128(<4 x i64> %a0) {
define <4 x i64> @test_mm256_cmpeq_epi8(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_cmpeq_epi8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -455,7 +455,7 @@ define <4 x i64> @test_mm256_cmpeq_epi8(<4 x i64> %a0, <4 x i64> %a1) nounwind {
define <4 x i64> @test_mm256_cmpeq_epi16(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_cmpeq_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -468,7 +468,7 @@ define <4 x i64> @test_mm256_cmpeq_epi16(<4 x i64> %a0, <4 x i64> %a1) nounwind
define <4 x i64> @test_mm256_cmpeq_epi32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_cmpeq_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -481,7 +481,7 @@ define <4 x i64> @test_mm256_cmpeq_epi32(<4 x i64> %a0, <4 x i64> %a1) nounwind
define <4 x i64> @test_mm256_cmpeq_epi64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_cmpeq_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%cmp = icmp eq <4 x i64> %a0, %a1
@@ -491,7 +491,7 @@ define <4 x i64> @test_mm256_cmpeq_epi64(<4 x i64> %a0, <4 x i64> %a1) nounwind
define <4 x i64> @test_mm256_cmpgt_epi8(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_cmpgt_epi8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -504,7 +504,7 @@ define <4 x i64> @test_mm256_cmpgt_epi8(<4 x i64> %a0, <4 x i64> %a1) nounwind {
define <4 x i64> @test_mm256_cmpgt_epi16(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_cmpgt_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -517,7 +517,7 @@ define <4 x i64> @test_mm256_cmpgt_epi16(<4 x i64> %a0, <4 x i64> %a1) nounwind
define <4 x i64> @test_mm256_cmpgt_epi32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_cmpgt_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -530,7 +530,7 @@ define <4 x i64> @test_mm256_cmpgt_epi32(<4 x i64> %a0, <4 x i64> %a1) nounwind
define <4 x i64> @test_mm256_cmpgt_epi64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_cmpgt_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%cmp = icmp sgt <4 x i64> %a0, %a1
@@ -540,7 +540,7 @@ define <4 x i64> @test_mm256_cmpgt_epi64(<4 x i64> %a0, <4 x i64> %a1) nounwind
define <4 x i64> @test_mm256_cvtepi8_epi16(<2 x i64> %a0) {
; CHECK-LABEL: test_mm256_cvtepi8_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovsxbw %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -551,7 +551,7 @@ define <4 x i64> @test_mm256_cvtepi8_epi16(<2 x i64> %a0) {
define <4 x i64> @test_mm256_cvtepi8_epi32(<2 x i64> %a0) {
; CHECK-LABEL: test_mm256_cvtepi8_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovsxbd %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -563,7 +563,7 @@ define <4 x i64> @test_mm256_cvtepi8_epi32(<2 x i64> %a0) {
define <4 x i64> @test_mm256_cvtepi8_epi64(<2 x i64> %a0) {
; CHECK-LABEL: test_mm256_cvtepi8_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovsxbq %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -574,7 +574,7 @@ define <4 x i64> @test_mm256_cvtepi8_epi64(<2 x i64> %a0) {
define <4 x i64> @test_mm256_cvtepi16_epi32(<2 x i64> %a0) {
; CHECK-LABEL: test_mm256_cvtepi16_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovsxwd %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -585,7 +585,7 @@ define <4 x i64> @test_mm256_cvtepi16_epi32(<2 x i64> %a0) {
define <4 x i64> @test_mm256_cvtepi16_epi64(<2 x i64> %a0) {
; CHECK-LABEL: test_mm256_cvtepi16_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovsxwq %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -596,7 +596,7 @@ define <4 x i64> @test_mm256_cvtepi16_epi64(<2 x i64> %a0) {
define <4 x i64> @test_mm256_cvtepi32_epi64(<2 x i64> %a0) {
; CHECK-LABEL: test_mm256_cvtepi32_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovsxdq %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -606,7 +606,7 @@ define <4 x i64> @test_mm256_cvtepi32_epi64(<2 x i64> %a0) {
define <4 x i64> @test_mm256_cvtepu8_epi16(<2 x i64> %a0) {
; CHECK-LABEL: test_mm256_cvtepu8_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -617,7 +617,7 @@ define <4 x i64> @test_mm256_cvtepu8_epi16(<2 x i64> %a0) {
define <4 x i64> @test_mm256_cvtepu8_epi32(<2 x i64> %a0) {
; CHECK-LABEL: test_mm256_cvtepu8_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -629,7 +629,7 @@ define <4 x i64> @test_mm256_cvtepu8_epi32(<2 x i64> %a0) {
define <4 x i64> @test_mm256_cvtepu8_epi64(<2 x i64> %a0) {
; CHECK-LABEL: test_mm256_cvtepu8_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -640,7 +640,7 @@ define <4 x i64> @test_mm256_cvtepu8_epi64(<2 x i64> %a0) {
define <4 x i64> @test_mm256_cvtepu16_epi32(<2 x i64> %a0) {
; CHECK-LABEL: test_mm256_cvtepu16_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -651,7 +651,7 @@ define <4 x i64> @test_mm256_cvtepu16_epi32(<2 x i64> %a0) {
define <4 x i64> @test_mm256_cvtepu16_epi64(<2 x i64> %a0) {
; CHECK-LABEL: test_mm256_cvtepu16_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -662,7 +662,7 @@ define <4 x i64> @test_mm256_cvtepu16_epi64(<2 x i64> %a0) {
define <4 x i64> @test_mm256_cvtepu32_epi64(<2 x i64> %a0) {
; CHECK-LABEL: test_mm256_cvtepu32_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -672,7 +672,7 @@ define <4 x i64> @test_mm256_cvtepu32_epi64(<2 x i64> %a0) {
define <2 x i64> @test_mm256_extracti128_si256(<4 x i64> %a0) nounwind {
; CHECK-LABEL: test_mm256_extracti128_si256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: ret{{[l|q]}}
@@ -682,7 +682,7 @@ define <2 x i64> @test_mm256_extracti128_si256(<4 x i64> %a0) nounwind {
define <4 x i64> @test_mm256_hadd_epi16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_hadd_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vphaddw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -695,7 +695,7 @@ declare <16 x i16> @llvm.x86.avx2.phadd.w(<16 x i16>, <16 x i16>) nounwind readn
define <4 x i64> @test_mm256_hadd_epi32(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_hadd_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vphaddd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -708,7 +708,7 @@ declare <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32>, <8 x i32>) nounwind readnone
define <4 x i64> @test_mm256_hadds_epi16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_hadds_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vphaddsw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -721,7 +721,7 @@ declare <16 x i16> @llvm.x86.avx2.phadd.sw(<16 x i16>, <16 x i16>) nounwind read
define <4 x i64> @test_mm256_hsub_epi16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_hsub_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vphsubw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -734,7 +734,7 @@ declare <16 x i16> @llvm.x86.avx2.phsub.w(<16 x i16>, <16 x i16>) nounwind readn
define <4 x i64> @test_mm256_hsub_epi32(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_hsub_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vphsubd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -747,7 +747,7 @@ declare <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32>, <8 x i32>) nounwind readnone
define <4 x i64> @test_mm256_hsubs_epi16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_hsubs_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vphsubsw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -760,7 +760,7 @@ declare <16 x i16> @llvm.x86.avx2.phsub.sw(<16 x i16>, <16 x i16>) nounwind read
define <2 x i64> @test_mm_i32gather_epi32(i32 *%a0, <2 x i64> %a1) {
; X86-LABEL: test_mm_i32gather_epi32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X86-NEXT: vpxor %xmm1, %xmm1, %xmm1
@@ -769,7 +769,7 @@ define <2 x i64> @test_mm_i32gather_epi32(i32 *%a0, <2 x i64> %a1) {
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm_i32gather_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-NEXT: vpgatherdd %xmm2, (%rdi,%xmm0,2), %xmm1
@@ -786,13 +786,13 @@ declare <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32>, i8*, <4 x i32>, <4 x i32>
define <2 x i64> @test_mm_mask_i32gather_epi32(<2 x i64> %a0, i32 *%a1, <2 x i64> %a2, <2 x i64> %a3) {
; X86-LABEL: test_mm_mask_i32gather_epi32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpgatherdd %xmm2, (%eax,%xmm1,2), %xmm0
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm_mask_i32gather_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpgatherdd %xmm2, (%rdi,%xmm1,2), %xmm0
; X64-NEXT: ret{{[l|q]}}
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -806,7 +806,7 @@ define <2 x i64> @test_mm_mask_i32gather_epi32(<2 x i64> %a0, i32 *%a1, <2 x i64
define <4 x i64> @test_mm256_i32gather_epi32(i32 *%a0, <4 x i64> %a1) {
; X86-LABEL: test_mm256_i32gather_epi32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; X86-NEXT: vpxor %xmm1, %xmm1, %xmm1
@@ -815,7 +815,7 @@ define <4 x i64> @test_mm256_i32gather_epi32(i32 *%a0, <4 x i64> %a1) {
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm256_i32gather_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-NEXT: vpgatherdd %ymm2, (%rdi,%ymm0,2), %ymm1
@@ -832,13 +832,13 @@ declare <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32>, i8*, <8 x i32>, <8 x
define <4 x i64> @test_mm256_mask_i32gather_epi32(<4 x i64> %a0, i32 *%a1, <4 x i64> %a2, <4 x i64> %a3) {
; X86-LABEL: test_mm256_mask_i32gather_epi32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpgatherdd %ymm2, (%eax,%ymm1,2), %ymm0
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm256_mask_i32gather_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpgatherdd %ymm2, (%rdi,%ymm1,2), %ymm0
; X64-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -852,7 +852,7 @@ define <4 x i64> @test_mm256_mask_i32gather_epi32(<4 x i64> %a0, i32 *%a1, <4 x
define <2 x i64> @test_mm_i32gather_epi64(i64 *%a0, <2 x i64> %a1) {
; X86-LABEL: test_mm_i32gather_epi64:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X86-NEXT: vpxor %xmm1, %xmm1, %xmm1
@@ -861,7 +861,7 @@ define <2 x i64> @test_mm_i32gather_epi64(i64 *%a0, <2 x i64> %a1) {
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm_i32gather_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-NEXT: vpgatherdq %xmm2, (%rdi,%xmm0,2), %xmm1
@@ -876,13 +876,13 @@ declare <2 x i64> @llvm.x86.avx2.gather.d.q(<2 x i64>, i8*, <4 x i32>, <2 x i64>
define <2 x i64> @test_mm_mask_i32gather_epi64(<2 x i64> %a0, i64 *%a1, <2 x i64> %a2, <2 x i64> %a3) {
; X86-LABEL: test_mm_mask_i32gather_epi64:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpgatherdq %xmm2, (%eax,%xmm1,2), %xmm0
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm_mask_i32gather_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpgatherdq %xmm2, (%rdi,%xmm1,2), %xmm0
; X64-NEXT: ret{{[l|q]}}
%arg1 = bitcast i64 *%a1 to i8*
@@ -893,7 +893,7 @@ define <2 x i64> @test_mm_mask_i32gather_epi64(<2 x i64> %a0, i64 *%a1, <2 x i64
define <4 x i64> @test_mm256_i32gather_epi64(i64 *%a0, <2 x i64> %a1) {
; X86-LABEL: test_mm256_i32gather_epi64:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; X86-NEXT: vpxor %xmm1, %xmm1, %xmm1
@@ -902,7 +902,7 @@ define <4 x i64> @test_mm256_i32gather_epi64(i64 *%a0, <2 x i64> %a1) {
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm256_i32gather_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-NEXT: vpgatherdq %ymm2, (%rdi,%xmm0,2), %ymm1
@@ -917,13 +917,13 @@ declare <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64>, i8*, <4 x i32>, <4 x
define <4 x i64> @test_mm256_mask_i32gather_epi64(<4 x i64> %a0, i64 *%a1, <2 x i64> %a2, <4 x i64> %a3) {
; X86-LABEL: test_mm256_mask_i32gather_epi64:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpgatherdq %ymm2, (%eax,%xmm1,2), %ymm0
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm256_mask_i32gather_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpgatherdq %ymm2, (%rdi,%xmm1,2), %ymm0
; X64-NEXT: ret{{[l|q]}}
%arg1 = bitcast i64 *%a1 to i8*
@@ -934,7 +934,7 @@ define <4 x i64> @test_mm256_mask_i32gather_epi64(<4 x i64> %a0, i64 *%a1, <2 x
define <2 x double> @test_mm_i32gather_pd(double *%a0, <2 x i64> %a1) {
; X86-LABEL: test_mm_i32gather_pd:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X86-NEXT: vxorpd %xmm1, %xmm1, %xmm1
@@ -943,7 +943,7 @@ define <2 x double> @test_mm_i32gather_pd(double *%a0, <2 x i64> %a1) {
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm_i32gather_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X64-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X64-NEXT: vgatherdpd %xmm2, (%rdi,%xmm0,2), %xmm1
@@ -961,13 +961,13 @@ declare <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double>, i8*, <4 x i32>, <2
define <2 x double> @test_mm_mask_i32gather_pd(<2 x double> %a0, double *%a1, <2 x i64> %a2, <2 x double> %a3) {
; X86-LABEL: test_mm_mask_i32gather_pd:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vgatherdpd %xmm2, (%eax,%xmm1,2), %xmm0
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm_mask_i32gather_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vgatherdpd %xmm2, (%rdi,%xmm1,2), %xmm0
; X64-NEXT: ret{{[l|q]}}
%arg1 = bitcast double *%a1 to i8*
@@ -978,7 +978,7 @@ define <2 x double> @test_mm_mask_i32gather_pd(<2 x double> %a0, double *%a1, <2
define <4 x double> @test_mm256_i32gather_pd(double *%a0, <2 x i64> %a1) {
; X86-LABEL: test_mm256_i32gather_pd:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X86-NEXT: vcmpeqpd %ymm1, %ymm1, %ymm2
@@ -987,7 +987,7 @@ define <4 x double> @test_mm256_i32gather_pd(double *%a0, <2 x i64> %a1) {
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm256_i32gather_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X64-NEXT: vcmpeqpd %ymm1, %ymm1, %ymm2
; X64-NEXT: vgatherdpd %ymm2, (%rdi,%xmm0,2), %ymm1
@@ -1003,13 +1003,13 @@ declare <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double>, i8*, <4 x i32>
define <4 x double> @test_mm256_mask_i32gather_pd(<4 x double> %a0, double *%a1, <2 x i64> %a2, <4 x double> %a3) {
; X86-LABEL: test_mm256_mask_i32gather_pd:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vgatherdpd %ymm2, (%eax,%xmm1,2), %ymm0
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm256_mask_i32gather_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vgatherdpd %ymm2, (%rdi,%xmm1,2), %ymm0
; X64-NEXT: ret{{[l|q]}}
%arg1 = bitcast double *%a1 to i8*
@@ -1020,7 +1020,7 @@ define <4 x double> @test_mm256_mask_i32gather_pd(<4 x double> %a0, double *%a1,
define <4 x float> @test_mm_i32gather_ps(float *%a0, <2 x i64> %a1) {
; X86-LABEL: test_mm_i32gather_ps:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X86-NEXT: vxorps %xmm1, %xmm1, %xmm1
@@ -1029,7 +1029,7 @@ define <4 x float> @test_mm_i32gather_ps(float *%a0, <2 x i64> %a1) {
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm_i32gather_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-NEXT: vgatherdps %xmm2, (%rdi,%xmm0,2), %xmm1
@@ -1047,13 +1047,13 @@ declare <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float>, i8*, <4 x i32>, <4 x
define <4 x float> @test_mm_mask_i32gather_ps(<4 x float> %a0, float *%a1, <2 x i64> %a2, <4 x float> %a3) {
; X86-LABEL: test_mm_mask_i32gather_ps:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vgatherdps %xmm2, (%eax,%xmm1,2), %xmm0
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm_mask_i32gather_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vgatherdps %xmm2, (%rdi,%xmm1,2), %xmm0
; X64-NEXT: ret{{[l|q]}}
%arg1 = bitcast float *%a1 to i8*
@@ -1064,7 +1064,7 @@ define <4 x float> @test_mm_mask_i32gather_ps(<4 x float> %a0, float *%a1, <2 x
define <8 x float> @test_mm256_i32gather_ps(float *%a0, <4 x i64> %a1) {
; X86-LABEL: test_mm256_i32gather_ps:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X86-NEXT: vcmpeqps %ymm1, %ymm1, %ymm2
@@ -1073,7 +1073,7 @@ define <8 x float> @test_mm256_i32gather_ps(float *%a0, <4 x i64> %a1) {
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm256_i32gather_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-NEXT: vcmpeqps %ymm1, %ymm1, %ymm2
; X64-NEXT: vgatherdps %ymm2, (%rdi,%ymm0,2), %ymm1
@@ -1089,13 +1089,13 @@ declare <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float>, i8*, <8 x i32>,
define <8 x float> @test_mm256_mask_i32gather_ps(<8 x float> %a0, float *%a1, <4 x i64> %a2, <8 x float> %a3) {
; X86-LABEL: test_mm256_mask_i32gather_ps:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vgatherdps %ymm2, (%eax,%ymm1,2), %ymm0
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm256_mask_i32gather_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vgatherdps %ymm2, (%rdi,%ymm1,2), %ymm0
; X64-NEXT: ret{{[l|q]}}
%arg1 = bitcast float *%a1 to i8*
@@ -1106,7 +1106,7 @@ define <8 x float> @test_mm256_mask_i32gather_ps(<8 x float> %a0, float *%a1, <4
define <2 x i64> @test_mm_i64gather_epi32(i32 *%a0, <2 x i64> %a1) {
; X86-LABEL: test_mm_i64gather_epi32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X86-NEXT: vpxor %xmm1, %xmm1, %xmm1
@@ -1115,7 +1115,7 @@ define <2 x i64> @test_mm_i64gather_epi32(i32 *%a0, <2 x i64> %a1) {
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm_i64gather_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-NEXT: vpgatherqd %xmm2, (%rdi,%xmm0,2), %xmm1
@@ -1131,13 +1131,13 @@ declare <4 x i32> @llvm.x86.avx2.gather.q.d(<4 x i32>, i8*, <2 x i64>, <4 x i32>
define <2 x i64> @test_mm_mask_i64gather_epi32(<2 x i64> %a0, i32 *%a1, <2 x i64> %a2, <2 x i64> %a3) {
; X86-LABEL: test_mm_mask_i64gather_epi32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpgatherqd %xmm2, (%eax,%xmm1,2), %xmm0
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm_mask_i64gather_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpgatherqd %xmm2, (%rdi,%xmm1,2), %xmm0
; X64-NEXT: ret{{[l|q]}}
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -1150,7 +1150,7 @@ define <2 x i64> @test_mm_mask_i64gather_epi32(<2 x i64> %a0, i32 *%a1, <2 x i64
define <2 x i64> @test_mm256_i64gather_epi32(i32 *%a0, <4 x i64> %a1) {
; X86-LABEL: test_mm256_i64gather_epi32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X86-NEXT: vpxor %xmm1, %xmm1, %xmm1
@@ -1160,7 +1160,7 @@ define <2 x i64> @test_mm256_i64gather_epi32(i32 *%a0, <4 x i64> %a1) {
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm256_i64gather_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-NEXT: vpgatherqd %xmm2, (%rdi,%ymm0,2), %xmm1
@@ -1177,14 +1177,14 @@ declare <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32>, i8*, <4 x i64>, <4 x
define <2 x i64> @test_mm256_mask_i64gather_epi32(<2 x i64> %a0, i32 *%a1, <4 x i64> %a2, <2 x i64> %a3) {
; X86-LABEL: test_mm256_mask_i64gather_epi32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpgatherqd %xmm2, (%eax,%ymm1,2), %xmm0
; X86-NEXT: vzeroupper
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm256_mask_i64gather_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpgatherqd %xmm2, (%rdi,%ymm1,2), %xmm0
; X64-NEXT: vzeroupper
; X64-NEXT: ret{{[l|q]}}
@@ -1198,7 +1198,7 @@ define <2 x i64> @test_mm256_mask_i64gather_epi32(<2 x i64> %a0, i32 *%a1, <4 x
define <2 x i64> @test_mm_i64gather_epi64(i64 *%a0, <2 x i64> %a1) {
; X86-LABEL: test_mm_i64gather_epi64:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X86-NEXT: vpxor %xmm1, %xmm1, %xmm1
@@ -1207,7 +1207,7 @@ define <2 x i64> @test_mm_i64gather_epi64(i64 *%a0, <2 x i64> %a1) {
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm_i64gather_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-NEXT: vpgatherqq %xmm2, (%rdi,%xmm0,2), %xmm1
@@ -1221,13 +1221,13 @@ declare <2 x i64> @llvm.x86.avx2.gather.q.q(<2 x i64>, i8*, <2 x i64>, <2 x i64>
define <2 x i64> @test_mm_mask_i64gather_epi64(<2 x i64> %a0, i64 *%a1, <2 x i64> %a2, <2 x i64> %a3) {
; X86-LABEL: test_mm_mask_i64gather_epi64:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpgatherqq %xmm2, (%eax,%xmm1,2), %xmm0
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm_mask_i64gather_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpgatherqq %xmm2, (%rdi,%xmm1,2), %xmm0
; X64-NEXT: ret{{[l|q]}}
%arg1 = bitcast i64 *%a1 to i8*
@@ -1237,7 +1237,7 @@ define <2 x i64> @test_mm_mask_i64gather_epi64(<2 x i64> %a0, i64 *%a1, <2 x i64
define <4 x i64> @test_mm256_i64gather_epi64(i64 *%a0, <4 x i64> %a1) {
; X86-LABEL: test_mm256_i64gather_epi64:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; X86-NEXT: vpxor %xmm1, %xmm1, %xmm1
@@ -1246,7 +1246,7 @@ define <4 x i64> @test_mm256_i64gather_epi64(i64 *%a0, <4 x i64> %a1) {
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm256_i64gather_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-NEXT: vpgatherqq %ymm2, (%rdi,%ymm0,2), %ymm1
@@ -1260,13 +1260,13 @@ declare <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64>, i8*, <4 x i64>, <4 x
define <4 x i64> @test_mm256_mask_i64gather_epi64(<4 x i64> %a0, i64 *%a1, <4 x i64> %a2, <4 x i64> %a3) {
; X86-LABEL: test_mm256_mask_i64gather_epi64:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpgatherqq %ymm2, (%eax,%ymm1,2), %ymm0
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm256_mask_i64gather_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpgatherqq %ymm2, (%rdi,%ymm1,2), %ymm0
; X64-NEXT: ret{{[l|q]}}
%arg1 = bitcast i64 *%a1 to i8*
@@ -1276,7 +1276,7 @@ define <4 x i64> @test_mm256_mask_i64gather_epi64(<4 x i64> %a0, i64 *%a1, <4 x
define <2 x double> @test_mm_i64gather_pd(double *%a0, <2 x i64> %a1) {
; X86-LABEL: test_mm_i64gather_pd:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X86-NEXT: vxorpd %xmm1, %xmm1, %xmm1
@@ -1285,7 +1285,7 @@ define <2 x double> @test_mm_i64gather_pd(double *%a0, <2 x i64> %a1) {
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm_i64gather_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X64-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X64-NEXT: vgatherqpd %xmm2, (%rdi,%xmm0,2), %xmm1
@@ -1302,13 +1302,13 @@ declare <2 x double> @llvm.x86.avx2.gather.q.pd(<2 x double>, i8*, <2 x i64>, <2
define <2 x double> @test_mm_mask_i64gather_pd(<2 x double> %a0, double *%a1, <2 x i64> %a2, <2 x double> %a3) {
; X86-LABEL: test_mm_mask_i64gather_pd:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vgatherqpd %xmm2, (%eax,%xmm1,2), %xmm0
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm_mask_i64gather_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vgatherqpd %xmm2, (%rdi,%xmm1,2), %xmm0
; X64-NEXT: ret{{[l|q]}}
%arg1 = bitcast double *%a1 to i8*
@@ -1318,7 +1318,7 @@ define <2 x double> @test_mm_mask_i64gather_pd(<2 x double> %a0, double *%a1, <2
define <4 x double> @test_mm256_i64gather_pd(double *%a0, <4 x i64> %a1) {
; X86-LABEL: test_mm256_i64gather_pd:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X86-NEXT: vcmpeqpd %ymm1, %ymm1, %ymm2
@@ -1327,7 +1327,7 @@ define <4 x double> @test_mm256_i64gather_pd(double *%a0, <4 x i64> %a1) {
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm256_i64gather_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X64-NEXT: vcmpeqpd %ymm1, %ymm1, %ymm2
; X64-NEXT: vgatherqpd %ymm2, (%rdi,%ymm0,2), %ymm1
@@ -1342,13 +1342,13 @@ declare <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double>, i8*, <4 x i64>
define <4 x double> @test_mm256_mask_i64gather_pd(<4 x double> %a0, i64 *%a1, <4 x i64> %a2, <4 x double> %a3) {
; X86-LABEL: test_mm256_mask_i64gather_pd:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vgatherqpd %ymm2, (%eax,%ymm1,2), %ymm0
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm256_mask_i64gather_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vgatherqpd %ymm2, (%rdi,%ymm1,2), %ymm0
; X64-NEXT: ret{{[l|q]}}
%arg1 = bitcast i64 *%a1 to i8*
@@ -1358,7 +1358,7 @@ define <4 x double> @test_mm256_mask_i64gather_pd(<4 x double> %a0, i64 *%a1, <4
define <4 x float> @test_mm_i64gather_ps(float *%a0, <2 x i64> %a1) {
; X86-LABEL: test_mm_i64gather_ps:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X86-NEXT: vxorps %xmm1, %xmm1, %xmm1
@@ -1367,7 +1367,7 @@ define <4 x float> @test_mm_i64gather_ps(float *%a0, <2 x i64> %a1) {
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm_i64gather_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-NEXT: vgatherqps %xmm2, (%rdi,%xmm0,2), %xmm1
@@ -1384,13 +1384,13 @@ declare <4 x float> @llvm.x86.avx2.gather.q.ps(<4 x float>, i8*, <2 x i64>, <4 x
define <4 x float> @test_mm_mask_i64gather_ps(<4 x float> %a0, float *%a1, <2 x i64> %a2, <4 x float> %a3) {
; X86-LABEL: test_mm_mask_i64gather_ps:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vgatherqps %xmm2, (%eax,%xmm1,2), %xmm0
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm_mask_i64gather_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vgatherqps %xmm2, (%rdi,%xmm1,2), %xmm0
; X64-NEXT: ret{{[l|q]}}
%arg1 = bitcast float *%a1 to i8*
@@ -1400,7 +1400,7 @@ define <4 x float> @test_mm_mask_i64gather_ps(<4 x float> %a0, float *%a1, <2 x
define <4 x float> @test_mm256_i64gather_ps(float *%a0, <4 x i64> %a1) {
; X86-LABEL: test_mm256_i64gather_ps:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X86-NEXT: vxorps %xmm1, %xmm1, %xmm1
@@ -1410,7 +1410,7 @@ define <4 x float> @test_mm256_i64gather_ps(float *%a0, <4 x i64> %a1) {
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm256_i64gather_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-NEXT: vgatherqps %xmm2, (%rdi,%ymm0,2), %xmm1
@@ -1428,14 +1428,14 @@ declare <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float>, i8*, <4 x i64>,
define <4 x float> @test_mm256_mask_i64gather_ps(<4 x float> %a0, float *%a1, <4 x i64> %a2, <4 x float> %a3) {
; X86-LABEL: test_mm256_mask_i64gather_ps:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vgatherqps %xmm2, (%eax,%ymm1,2), %xmm0
; X86-NEXT: vzeroupper
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm256_mask_i64gather_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vgatherqps %xmm2, (%rdi,%ymm1,2), %xmm0
; X64-NEXT: vzeroupper
; X64-NEXT: ret{{[l|q]}}
@@ -1446,7 +1446,7 @@ define <4 x float> @test_mm256_mask_i64gather_ps(<4 x float> %a0, float *%a1, <4
define <4 x i64> @test0_mm256_inserti128_si256(<4 x i64> %a0, <2 x i64> %a1) nounwind {
; CHECK-LABEL: test0_mm256_inserti128_si256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
; CHECK-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; CHECK-NEXT: ret{{[l|q]}}
@@ -1457,7 +1457,7 @@ define <4 x i64> @test0_mm256_inserti128_si256(<4 x i64> %a0, <2 x i64> %a1) nou
define <4 x i64> @test1_mm256_inserti128_si256(<4 x i64> %a0, <2 x i64> %a1) nounwind {
; CHECK-LABEL: test1_mm256_inserti128_si256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%ext = shufflevector <2 x i64> %a1, <2 x i64> %a1, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
@@ -1467,7 +1467,7 @@ define <4 x i64> @test1_mm256_inserti128_si256(<4 x i64> %a0, <2 x i64> %a1) nou
define <4 x i64> @test_mm256_madd_epi16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_madd_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -1480,7 +1480,7 @@ declare <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16>, <16 x i16>) nounwind readn
define <4 x i64> @test_mm256_maddubs_epi16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_maddubs_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -1493,13 +1493,13 @@ declare <16 x i16> @llvm.x86.avx2.pmadd.ub.sw(<32 x i8>, <32 x i8>) nounwind rea
define <2 x i64> @test_mm_maskload_epi32(i32* %a0, <2 x i64> %a1) nounwind {
; X86-LABEL: test_mm_maskload_epi32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpmaskmovd (%eax), %xmm0, %xmm0
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm_maskload_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmaskmovd (%rdi), %xmm0, %xmm0
; X64-NEXT: ret{{[l|q]}}
%arg0 = bitcast i32* %a0 to i8*
@@ -1512,13 +1512,13 @@ declare <4 x i32> @llvm.x86.avx2.maskload.d(i8*, <4 x i32>) nounwind readonly
define <4 x i64> @test_mm256_maskload_epi32(i32* %a0, <4 x i64> %a1) nounwind {
; X86-LABEL: test_mm256_maskload_epi32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpmaskmovd (%eax), %ymm0, %ymm0
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm256_maskload_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmaskmovd (%rdi), %ymm0, %ymm0
; X64-NEXT: ret{{[l|q]}}
%arg0 = bitcast i32* %a0 to i8*
@@ -1531,13 +1531,13 @@ declare <8 x i32> @llvm.x86.avx2.maskload.d.256(i8*, <8 x i32>) nounwind readonl
define <2 x i64> @test_mm_maskload_epi64(i64* %a0, <2 x i64> %a1) nounwind {
; X86-LABEL: test_mm_maskload_epi64:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpmaskmovq (%eax), %xmm0, %xmm0
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm_maskload_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmaskmovq (%rdi), %xmm0, %xmm0
; X64-NEXT: ret{{[l|q]}}
%arg0 = bitcast i64* %a0 to i8*
@@ -1548,13 +1548,13 @@ declare <2 x i64> @llvm.x86.avx2.maskload.q(i8*, <2 x i64>) nounwind readonly
define <4 x i64> @test_mm256_maskload_epi64(i64* %a0, <4 x i64> %a1) nounwind {
; X86-LABEL: test_mm256_maskload_epi64:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpmaskmovq (%eax), %ymm0, %ymm0
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm256_maskload_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmaskmovq (%rdi), %ymm0, %ymm0
; X64-NEXT: ret{{[l|q]}}
%arg0 = bitcast i64* %a0 to i8*
@@ -1565,13 +1565,13 @@ declare <4 x i64> @llvm.x86.avx2.maskload.q.256(i8*, <4 x i64>) nounwind readonl
define void @test_mm_maskstore_epi32(float* %a0, <2 x i64> %a1, <2 x i64> %a2) nounwind {
; X86-LABEL: test_mm_maskstore_epi32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpmaskmovd %xmm1, %xmm0, (%eax)
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm_maskstore_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmaskmovd %xmm1, %xmm0, (%rdi)
; X64-NEXT: ret{{[l|q]}}
%arg0 = bitcast float* %a0 to i8*
@@ -1584,14 +1584,14 @@ declare void @llvm.x86.avx2.maskstore.d(i8*, <4 x i32>, <4 x i32>) nounwind read
define void @test_mm256_maskstore_epi32(float* %a0, <4 x i64> %a1, <4 x i64> %a2) nounwind {
; X86-LABEL: test_mm256_maskstore_epi32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpmaskmovd %ymm1, %ymm0, (%eax)
; X86-NEXT: vzeroupper
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm256_maskstore_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmaskmovd %ymm1, %ymm0, (%rdi)
; X64-NEXT: vzeroupper
; X64-NEXT: ret{{[l|q]}}
@@ -1605,13 +1605,13 @@ declare void @llvm.x86.avx2.maskstore.d.256(i8*, <8 x i32>, <8 x i32>) nounwind
define void @test_mm_maskstore_epi64(i64* %a0, <2 x i64> %a1, <2 x i64> %a2) nounwind {
; X86-LABEL: test_mm_maskstore_epi64:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpmaskmovq %xmm1, %xmm0, (%eax)
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm_maskstore_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmaskmovq %xmm1, %xmm0, (%rdi)
; X64-NEXT: ret{{[l|q]}}
%arg0 = bitcast i64* %a0 to i8*
@@ -1622,14 +1622,14 @@ declare void @llvm.x86.avx2.maskstore.q(i8*, <2 x i64>, <2 x i64>) nounwind read
define void @test_mm256_maskstore_epi64(i64* %a0, <4 x i64> %a1, <4 x i64> %a2) nounwind {
; X86-LABEL: test_mm256_maskstore_epi64:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpmaskmovq %ymm1, %ymm0, (%eax)
; X86-NEXT: vzeroupper
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm256_maskstore_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmaskmovq %ymm1, %ymm0, (%rdi)
; X64-NEXT: vzeroupper
; X64-NEXT: ret{{[l|q]}}
@@ -1641,7 +1641,7 @@ declare void @llvm.x86.avx2.maskstore.q.256(i8*, <4 x i64>, <4 x i64>) nounwind
define <4 x i64> @test_mm256_max_epi8(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_max_epi8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -1654,7 +1654,7 @@ define <4 x i64> @test_mm256_max_epi8(<4 x i64> %a0, <4 x i64> %a1) {
define <4 x i64> @test_mm256_max_epi16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_max_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -1667,7 +1667,7 @@ define <4 x i64> @test_mm256_max_epi16(<4 x i64> %a0, <4 x i64> %a1) {
define <4 x i64> @test_mm256_max_epi32(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_max_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -1680,7 +1680,7 @@ define <4 x i64> @test_mm256_max_epi32(<4 x i64> %a0, <4 x i64> %a1) {
define <4 x i64> @test_mm256_max_epu8(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_max_epu8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -1693,7 +1693,7 @@ define <4 x i64> @test_mm256_max_epu8(<4 x i64> %a0, <4 x i64> %a1) {
define <4 x i64> @test_mm256_max_epu16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_max_epu16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -1706,7 +1706,7 @@ define <4 x i64> @test_mm256_max_epu16(<4 x i64> %a0, <4 x i64> %a1) {
define <4 x i64> @test_mm256_max_epu32(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_max_epu32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -1719,7 +1719,7 @@ define <4 x i64> @test_mm256_max_epu32(<4 x i64> %a0, <4 x i64> %a1) {
define <4 x i64> @test_mm256_min_epi8(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_min_epi8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpminsb %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -1732,7 +1732,7 @@ define <4 x i64> @test_mm256_min_epi8(<4 x i64> %a0, <4 x i64> %a1) {
define <4 x i64> @test_mm256_min_epi16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_min_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpminsw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -1745,7 +1745,7 @@ define <4 x i64> @test_mm256_min_epi16(<4 x i64> %a0, <4 x i64> %a1) {
define <4 x i64> @test_mm256_min_epi32(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_min_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpminsd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -1758,7 +1758,7 @@ define <4 x i64> @test_mm256_min_epi32(<4 x i64> %a0, <4 x i64> %a1) {
define <4 x i64> @test_mm256_min_epu8(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_min_epu8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpminub %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -1771,7 +1771,7 @@ define <4 x i64> @test_mm256_min_epu8(<4 x i64> %a0, <4 x i64> %a1) {
define <4 x i64> @test_mm256_min_epu16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_min_epu16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpminuw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -1784,7 +1784,7 @@ define <4 x i64> @test_mm256_min_epu16(<4 x i64> %a0, <4 x i64> %a1) {
define <4 x i64> @test_mm256_min_epu32(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_min_epu32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpminud %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -1797,7 +1797,7 @@ define <4 x i64> @test_mm256_min_epu32(<4 x i64> %a0, <4 x i64> %a1) {
define i32 @test_mm256_movemask_epi8(<4 x i64> %a0) nounwind {
; CHECK-LABEL: test_mm256_movemask_epi8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovmskb %ymm0, %eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: ret{{[l|q]}}
@@ -1809,7 +1809,7 @@ declare i32 @llvm.x86.avx2.pmovmskb(<32 x i8>) nounwind readnone
define <4 x i64> @test_mm256_mpsadbw_epu8(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_mpsadbw_epu8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmpsadbw $3, %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -1822,7 +1822,7 @@ declare <16 x i16> @llvm.x86.avx2.mpsadbw(<32 x i8>, <32 x i8>, i8) nounwind rea
define <4 x i64> @test_mm256_mul_epi32(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_mul_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmuldq %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -1834,7 +1834,7 @@ declare <4 x i64> @llvm.x86.avx2.pmul.dq(<8 x i32>, <8 x i32>) nounwind readnone
define <4 x i64> @test_mm256_mul_epu32(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_mul_epu32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -1846,7 +1846,7 @@ declare <4 x i64> @llvm.x86.avx2.pmulu.dq(<8 x i32>, <8 x i32>) nounwind readnon
define <4 x i64> @test_mm256_mulhi_epi16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_mulhi_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmulhw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -1859,7 +1859,7 @@ declare <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16>, <16 x i16>) nounwind readn
define <4 x i64> @test_mm256_mulhi_epu16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_mulhi_epu16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -1872,7 +1872,7 @@ declare <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16>, <16 x i16>) nounwind read
define <4 x i64> @test_mm256_mulhrs_epi16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_mulhrs_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmulhrsw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -1885,7 +1885,7 @@ declare <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16>, <16 x i16>) nounwind re
define <4 x i64> @test_mm256_mullo_epi16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_mullo_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmullw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -1897,7 +1897,7 @@ define <4 x i64> @test_mm256_mullo_epi16(<4 x i64> %a0, <4 x i64> %a1) {
define <4 x i64> @test_mm256_mullo_epi32(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_mullo_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmulld %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -1909,7 +1909,7 @@ define <4 x i64> @test_mm256_mullo_epi32(<4 x i64> %a0, <4 x i64> %a1) {
define <4 x i64> @test_mm256_or_si256(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_or_si256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vorps %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = or <4 x i64> %a0, %a1
@@ -1918,7 +1918,7 @@ define <4 x i64> @test_mm256_or_si256(<4 x i64> %a0, <4 x i64> %a1) nounwind {
define <4 x i64> @test_mm256_packs_epi16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_packs_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -1931,7 +1931,7 @@ declare <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16>, <16 x i16>) nounwind readn
define <4 x i64> @test_mm256_packs_epi32(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_packs_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -1944,7 +1944,7 @@ declare <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32>, <8 x i32>) nounwind readno
define <4 x i64> @test_mm256_packus_epi16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_packus_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpackuswb %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -1957,7 +1957,7 @@ declare <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16>, <16 x i16>) nounwind readn
define <4 x i64> @test_mm256_packus_epi32(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_packus_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpackusdw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -1970,7 +1970,7 @@ declare <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32>, <8 x i32>) nounwind readno
define <4 x i64> @test_mm256_permute2x128_si256(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_permute2x128_si256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
; CHECK-NEXT: ret{{[l|q]}}
%res = shufflevector <4 x i64> %a0, <4 x i64> %a1, <4 x i32> <i32 2, i32 3, i32 6, i32 7>
@@ -1980,7 +1980,7 @@ declare <4 x i64> @llvm.x86.avx2.vperm2i128(<4 x i64>, <4 x i64>, i8) nounwind r
define <4 x i64> @test_mm256_permute4x64_epi64(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_permute4x64_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,0,2,0]
; CHECK-NEXT: ret{{[l|q]}}
%res = shufflevector <4 x i64> %a0, <4 x i64> undef, <4 x i32> <i32 3, i32 0, i32 2, i32 0>
@@ -1989,7 +1989,7 @@ define <4 x i64> @test_mm256_permute4x64_epi64(<4 x i64> %a0) {
define <4 x double> @test_mm256_permute4x64_pd(<4 x double> %a0) {
; CHECK-LABEL: test_mm256_permute4x64_pd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,2,1,0]
; CHECK-NEXT: ret{{[l|q]}}
%res = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> <i32 1, i32 2, i32 1, i32 0>
@@ -1998,7 +1998,7 @@ define <4 x double> @test_mm256_permute4x64_pd(<4 x double> %a0) {
define <4 x i64> @test_mm256_permutevar8x32_epi32(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_permutevar8x32_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermps %ymm0, %ymm1, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -2011,7 +2011,7 @@ declare <8 x i32> @llvm.x86.avx2.permd(<8 x i32>, <8 x i32>) nounwind readonly
define <8 x float> @test_mm256_permutevar8x32_ps(<8 x float> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_permutevar8x32_ps:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermps %ymm0, %ymm1, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg1 = bitcast <4 x i64> %a1 to <8 x i32>
@@ -2022,7 +2022,7 @@ declare <8 x float> @llvm.x86.avx2.permps(<8 x float>, <8 x i32>) nounwind reado
define <4 x i64> @test_mm256_sad_epu8(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_sad_epu8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsadbw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -2034,7 +2034,7 @@ declare <4 x i64> @llvm.x86.avx2.psad.bw(<32 x i8>, <32 x i8>) nounwind readnone
define <4 x i64> @test_mm256_shuffle_epi32(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_shuffle_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,3,0,0,7,7,4,4]
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -2045,7 +2045,7 @@ define <4 x i64> @test_mm256_shuffle_epi32(<4 x i64> %a0) {
define <4 x i64> @test_mm256_shuffle_epi8(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_shuffle_epi8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshufb %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -2058,7 +2058,7 @@ declare <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8>, <32 x i8>) nounwind readnone
define <4 x i64> @test_mm256_shufflehi_epi16(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_shufflehi_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,7,6,6,5,8,9,10,11,15,14,14,13]
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -2069,7 +2069,7 @@ define <4 x i64> @test_mm256_shufflehi_epi16(<4 x i64> %a0) {
define <4 x i64> @test_mm256_shufflelo_epi16(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_shufflelo_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[3,0,1,1,4,5,6,7,11,8,9,9,12,13,14,15]
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -2080,7 +2080,7 @@ define <4 x i64> @test_mm256_shufflelo_epi16(<4 x i64> %a0) {
define <4 x i64> @test_mm256_sign_epi8(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_sign_epi8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsignb %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -2093,7 +2093,7 @@ declare <32 x i8> @llvm.x86.avx2.psign.b(<32 x i8>, <32 x i8>) nounwind readnone
define <4 x i64> @test_mm256_sign_epi16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_sign_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsignw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -2106,7 +2106,7 @@ declare <16 x i16> @llvm.x86.avx2.psign.w(<16 x i16>, <16 x i16>) nounwind readn
define <4 x i64> @test_mm256_sign_epi32(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_sign_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsignd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -2119,7 +2119,7 @@ declare <8 x i32> @llvm.x86.avx2.psign.d(<8 x i32>, <8 x i32>) nounwind readnone
define <4 x i64> @test_mm256_sll_epi16(<4 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_mm256_sll_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsllw %xmm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -2132,7 +2132,7 @@ declare <16 x i16> @llvm.x86.avx2.psll.w(<16 x i16>, <8 x i16>) nounwind readnon
define <4 x i64> @test_mm256_sll_epi32(<4 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_mm256_sll_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpslld %xmm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -2145,7 +2145,7 @@ declare <8 x i32> @llvm.x86.avx2.psll.d(<8 x i32>, <4 x i32>) nounwind readnone
define <4 x i64> @test_mm256_sll_epi64(<4 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_mm256_sll_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsllq %xmm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i64> @llvm.x86.avx2.psll.q(<4 x i64> %a0, <2 x i64> %a1)
@@ -2155,7 +2155,7 @@ declare <4 x i64> @llvm.x86.avx2.psll.q(<4 x i64>, <2 x i64>) nounwind readnone
define <4 x i64> @test_mm256_slli_epi16(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_slli_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsllw $3, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -2167,7 +2167,7 @@ declare <16 x i16> @llvm.x86.avx2.pslli.w(<16 x i16>, i32) nounwind readnone
define <4 x i64> @test_mm256_slli_epi32(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_slli_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpslld $3, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -2179,7 +2179,7 @@ declare <8 x i32> @llvm.x86.avx2.pslli.d(<8 x i32>, i32) nounwind readnone
define <4 x i64> @test_mm256_slli_epi64(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_slli_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsllq $3, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i64> @llvm.x86.avx2.pslli.q(<4 x i64> %a0, i32 3)
@@ -2189,7 +2189,7 @@ declare <4 x i64> @llvm.x86.avx2.pslli.q(<4 x i64>, i32) nounwind readnone
define <4 x i64> @test_mm256_slli_si256(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_slli_si256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpslldq {{.*#+}} ymm0 = zero,zero,zero,ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12],zero,zero,zero,ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28]
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -2200,7 +2200,7 @@ define <4 x i64> @test_mm256_slli_si256(<4 x i64> %a0) {
define <2 x i64> @test_mm_sllv_epi32(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_mm_sllv_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsllvd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -2213,7 +2213,7 @@ declare <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32>, <4 x i32>) nounwind readnone
define <4 x i64> @test_mm256_sllv_epi32(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_sllv_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -2226,7 +2226,7 @@ declare <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32>, <8 x i32>) nounwind read
define <2 x i64> @test_mm_sllv_epi64(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_mm_sllv_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsllvq %xmm1, %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64> %a0, <2 x i64> %a1)
@@ -2236,7 +2236,7 @@ declare <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64>, <2 x i64>) nounwind readnone
define <4 x i64> @test_mm256_sllv_epi64(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_sllv_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsllvq %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64> %a0, <4 x i64> %a1)
@@ -2246,7 +2246,7 @@ declare <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64>, <4 x i64>) nounwind read
define <4 x i64> @test_mm256_sra_epi16(<4 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_mm256_sra_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsraw %xmm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -2259,7 +2259,7 @@ declare <16 x i16> @llvm.x86.avx2.psra.w(<16 x i16>, <8 x i16>) nounwind readnon
define <4 x i64> @test_mm256_sra_epi32(<4 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_mm256_sra_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsrad %xmm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -2272,7 +2272,7 @@ declare <8 x i32> @llvm.x86.avx2.psra.d(<8 x i32>, <4 x i32>) nounwind readnone
define <4 x i64> @test_mm256_srai_epi16(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_srai_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsraw $3, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -2284,7 +2284,7 @@ declare <16 x i16> @llvm.x86.avx2.psrai.w(<16 x i16>, i32) nounwind readnone
define <4 x i64> @test_mm256_srai_epi32(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_srai_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsrad $3, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -2296,7 +2296,7 @@ declare <8 x i32> @llvm.x86.avx2.psrai.d(<8 x i32>, i32) nounwind readnone
define <2 x i64> @test_mm_srav_epi32(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_mm_srav_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsravd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -2309,7 +2309,7 @@ declare <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32>, <4 x i32>) nounwind readnone
define <4 x i64> @test_mm256_srav_epi32(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_srav_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsravd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -2322,7 +2322,7 @@ declare <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32>, <8 x i32>) nounwind read
define <4 x i64> @test_mm256_srl_epi16(<4 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_mm256_srl_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -2335,7 +2335,7 @@ declare <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16>, <8 x i16>) nounwind readnon
define <4 x i64> @test_mm256_srl_epi32(<4 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_mm256_srl_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsrld %xmm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -2348,7 +2348,7 @@ declare <8 x i32> @llvm.x86.avx2.psrl.d(<8 x i32>, <4 x i32>) nounwind readnone
define <4 x i64> @test_mm256_srl_epi64(<4 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_mm256_srl_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsrlq %xmm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64> %a0, <2 x i64> %a1)
@@ -2358,7 +2358,7 @@ declare <4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64>, <2 x i64>) nounwind readnone
define <4 x i64> @test_mm256_srli_epi16(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_srli_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsrlw $3, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -2370,7 +2370,7 @@ declare <16 x i16> @llvm.x86.avx2.psrli.w(<16 x i16>, i32) nounwind readnone
define <4 x i64> @test_mm256_srli_epi32(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_srli_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsrld $3, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -2382,7 +2382,7 @@ declare <8 x i32> @llvm.x86.avx2.psrli.d(<8 x i32>, i32) nounwind readnone
define <4 x i64> @test_mm256_srli_epi64(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_srli_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsrlq $3, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i64> @llvm.x86.avx2.psrli.q(<4 x i64> %a0, i32 3)
@@ -2392,7 +2392,7 @@ declare <4 x i64> @llvm.x86.avx2.psrli.q(<4 x i64>, i32) nounwind readnone
define <4 x i64> @test_mm256_srli_si256(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_srli_si256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,ymm0[19,20,21,22,23,24,25,26,27,28,29,30,31],zero,zero,zero
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -2403,7 +2403,7 @@ define <4 x i64> @test_mm256_srli_si256(<4 x i64> %a0) {
define <2 x i64> @test_mm_srlv_epi32(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_mm_srlv_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -2416,7 +2416,7 @@ declare <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32>, <4 x i32>) nounwind readnone
define <4 x i64> @test_mm256_srlv_epi32(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_srlv_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -2429,7 +2429,7 @@ declare <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32>, <8 x i32>) nounwind read
define <2 x i64> @test_mm_srlv_epi64(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_mm_srlv_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64> %a0, <2 x i64> %a1)
@@ -2439,7 +2439,7 @@ declare <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64>, <2 x i64>) nounwind readnone
define <4 x i64> @test_mm256_srlv_epi64(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_srlv_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64> %a0, <4 x i64> %a1)
@@ -2449,13 +2449,13 @@ declare <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64>, <4 x i64>) nounwind read
define <4 x i64> @test_mm256_stream_load_si256(<4 x i64> *%a0) {
; X86-LABEL: test_mm256_stream_load_si256:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vmovntdqa (%eax), %ymm0
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm256_stream_load_si256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovntdqa (%rdi), %ymm0
; X64-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> *%a0 to i8*
@@ -2466,7 +2466,7 @@ declare <4 x i64> @llvm.x86.avx2.movntdqa(i8*) nounwind readonly
define <4 x i64> @test_mm256_sub_epi8(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_sub_epi8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsubb %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -2478,7 +2478,7 @@ define <4 x i64> @test_mm256_sub_epi8(<4 x i64> %a0, <4 x i64> %a1) nounwind {
define <4 x i64> @test_mm256_sub_epi16(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_sub_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsubw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -2490,7 +2490,7 @@ define <4 x i64> @test_mm256_sub_epi16(<4 x i64> %a0, <4 x i64> %a1) nounwind {
define <4 x i64> @test_mm256_sub_epi32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_sub_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsubd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -2502,7 +2502,7 @@ define <4 x i64> @test_mm256_sub_epi32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
define <4 x i64> @test_mm256_sub_epi64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_sub_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsubq %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = sub <4 x i64> %a0, %a1
@@ -2511,7 +2511,7 @@ define <4 x i64> @test_mm256_sub_epi64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
define <4 x i64> @test_mm256_subs_epi8(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_subs_epi8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsubsb %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -2524,7 +2524,7 @@ declare <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8>, <32 x i8>) nounwind readnone
define <4 x i64> @test_mm256_subs_epi16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_subs_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsubsw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -2537,7 +2537,7 @@ declare <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16>, <16 x i16>) nounwind readn
define <4 x i64> @test_mm256_subs_epu8(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_subs_epu8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsubusb %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -2550,7 +2550,7 @@ declare <32 x i8> @llvm.x86.avx2.psubus.b(<32 x i8>, <32 x i8>) nounwind readnon
define <4 x i64> @test_mm256_subs_epu16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_subs_epu16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsubusw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -2563,7 +2563,7 @@ declare <16 x i16> @llvm.x86.avx2.psubus.w(<16 x i16>, <16 x i16>) nounwind read
define <4 x i64> @test_mm256_unpackhi_epi8(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_unpackhi_epi8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -2575,7 +2575,7 @@ define <4 x i64> @test_mm256_unpackhi_epi8(<4 x i64> %a0, <4 x i64> %a1) nounwin
define <4 x i64> @test_mm256_unpackhi_epi16(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_unpackhi_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -2587,7 +2587,7 @@ define <4 x i64> @test_mm256_unpackhi_epi16(<4 x i64> %a0, <4 x i64> %a1) nounwi
define <4 x i64> @test_mm256_unpackhi_epi32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_unpackhi_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -2599,7 +2599,7 @@ define <4 x i64> @test_mm256_unpackhi_epi32(<4 x i64> %a0, <4 x i64> %a1) nounwi
define <4 x i64> @test_mm256_unpackhi_epi64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_unpackhi_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
; CHECK-NEXT: ret{{[l|q]}}
%res = shufflevector <4 x i64> %a0, <4 x i64> %a1, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
@@ -2608,7 +2608,7 @@ define <4 x i64> @test_mm256_unpackhi_epi64(<4 x i64> %a0, <4 x i64> %a1) nounwi
define <4 x i64> @test_mm256_unpacklo_epi8(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_unpacklo_epi8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -2620,7 +2620,7 @@ define <4 x i64> @test_mm256_unpacklo_epi8(<4 x i64> %a0, <4 x i64> %a1) nounwin
define <4 x i64> @test_mm256_unpacklo_epi16(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_unpacklo_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -2632,7 +2632,7 @@ define <4 x i64> @test_mm256_unpacklo_epi16(<4 x i64> %a0, <4 x i64> %a1) nounwi
define <4 x i64> @test_mm256_unpacklo_epi32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_unpacklo_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -2644,7 +2644,7 @@ define <4 x i64> @test_mm256_unpacklo_epi32(<4 x i64> %a0, <4 x i64> %a1) nounwi
define <4 x i64> @test_mm256_unpacklo_epi64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_unpacklo_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; CHECK-NEXT: ret{{[l|q]}}
%res = shufflevector <4 x i64> %a0, <4 x i64> %a1, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
@@ -2653,7 +2653,7 @@ define <4 x i64> @test_mm256_unpacklo_epi64(<4 x i64> %a0, <4 x i64> %a1) nounwi
define <4 x i64> @test_mm256_xor_si256(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_xor_si256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = xor <4 x i64> %a0, %a1
diff --git a/test/CodeGen/X86/avx2-intrinsics-x86-upgrade.ll b/test/CodeGen/X86/avx2-intrinsics-x86-upgrade.ll
index 52cae06d84b..a761ec955fb 100644
--- a/test/CodeGen/X86/avx2-intrinsics-x86-upgrade.ll
+++ b/test/CodeGen/X86/avx2-intrinsics-x86-upgrade.ll
@@ -6,7 +6,7 @@
define <16 x i16> @test_x86_avx2_pblendw(<16 x i16> %a0, <16 x i16> %a1) {
; CHECK-LABEL: test_x86_avx2_pblendw:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
; CHECK-NEXT: ret{{[l|q]}}
%res = call <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16> %a0, <16 x i16> %a1, i32 7) ; <<16 x i16>> [#uses=1]
@@ -17,7 +17,7 @@ declare <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16>, <16 x i16>, i32) nounwind
define <4 x i32> @test_x86_avx2_pblendd_128(<4 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_x86_avx2_pblendd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32> %a0, <4 x i32> %a1, i32 7) ; <<4 x i32>> [#uses=1]
@@ -28,7 +28,7 @@ declare <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32>, <4 x i32>, i32) nounwind
define <8 x i32> @test_x86_avx2_pblendd_256(<8 x i32> %a0, <8 x i32> %a1) {
; CHECK-LABEL: test_x86_avx2_pblendd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32> %a0, <8 x i32> %a1, i32 7) ; <<8 x i32>> [#uses=1]
@@ -39,13 +39,13 @@ declare <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32>, <8 x i32>, i32) nounwind
define <4 x i64> @test_x86_avx2_movntdqa(i8* %a0) {
; X86-LABEL: test_x86_avx2_movntdqa:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vmovntdqa (%eax), %ymm0
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_x86_avx2_movntdqa:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vmovntdqa (%rdi), %ymm0
; X64-NEXT: ret{{[l|q]}}
%res = call <4 x i64> @llvm.x86.avx2.movntdqa(i8* %a0) ; <<4 x i64>> [#uses=1]
@@ -56,7 +56,7 @@ declare <4 x i64> @llvm.x86.avx2.movntdqa(i8*) nounwind readonly
define <16 x i16> @test_x86_avx2_mpsadbw(<32 x i8> %a0, <32 x i8> %a1) {
; CHECK-LABEL: test_x86_avx2_mpsadbw:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmpsadbw $7, %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <16 x i16> @llvm.x86.avx2.mpsadbw(<32 x i8> %a0, <32 x i8> %a1, i32 7) ; <<16 x i16>> [#uses=1]
@@ -67,7 +67,7 @@ declare <16 x i16> @llvm.x86.avx2.mpsadbw(<32 x i8>, <32 x i8>, i32) nounwind re
define <4 x i64> @test_x86_avx2_psll_dq_bs(<4 x i64> %a0) {
; CHECK-LABEL: test_x86_avx2_psll_dq_bs:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpslldq {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,ymm0[0,1,2,3,4,5,6,7,8],zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,18,19,20,21,22,23,24]
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i64> @llvm.x86.avx2.psll.dq.bs(<4 x i64> %a0, i32 7) ; <<4 x i64>> [#uses=1]
@@ -78,7 +78,7 @@ declare <4 x i64> @llvm.x86.avx2.psll.dq.bs(<4 x i64>, i32) nounwind readnone
define <4 x i64> @test_x86_avx2_psrl_dq_bs(<4 x i64> %a0) {
; CHECK-LABEL: test_x86_avx2_psrl_dq_bs:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,ymm0[23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i64> @llvm.x86.avx2.psrl.dq.bs(<4 x i64> %a0, i32 7) ; <<4 x i64>> [#uses=1]
@@ -89,7 +89,7 @@ declare <4 x i64> @llvm.x86.avx2.psrl.dq.bs(<4 x i64>, i32) nounwind readnone
define <4 x i64> @test_x86_avx2_psll_dq(<4 x i64> %a0) {
; CHECK-LABEL: test_x86_avx2_psll_dq:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpslldq {{.*#+}} ymm0 = zero,ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14],zero,ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i64> @llvm.x86.avx2.psll.dq(<4 x i64> %a0, i32 8) ; <<4 x i64>> [#uses=1]
@@ -100,7 +100,7 @@ declare <4 x i64> @llvm.x86.avx2.psll.dq(<4 x i64>, i32) nounwind readnone
define <4 x i64> @test_x86_avx2_psrl_dq(<4 x i64> %a0) {
; CHECK-LABEL: test_x86_avx2_psrl_dq:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,ymm0[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],zero
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i64> @llvm.x86.avx2.psrl.dq(<4 x i64> %a0, i32 8) ; <<4 x i64>> [#uses=1]
@@ -111,7 +111,7 @@ declare <4 x i64> @llvm.x86.avx2.psrl.dq(<4 x i64>, i32) nounwind readnone
define <2 x i64> @test_x86_avx2_vextracti128(<4 x i64> %a0) {
; CHECK-LABEL: test_x86_avx2_vextracti128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: ret{{[l|q]}}
@@ -123,7 +123,7 @@ declare <2 x i64> @llvm.x86.avx2.vextracti128(<4 x i64>, i8) nounwind readnone
define <4 x i64> @test_x86_avx2_vinserti128(<4 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_x86_avx2_vinserti128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i64> @llvm.x86.avx2.vinserti128(<4 x i64> %a0, <2 x i64> %a1, i8 7)
@@ -134,7 +134,7 @@ declare <4 x i64> @llvm.x86.avx2.vinserti128(<4 x i64>, <2 x i64>, i8) nounwind
define <4 x double> @test_x86_avx2_vbroadcast_sd_pd_256(<2 x double> %a0) {
; CHECK-LABEL: test_x86_avx2_vbroadcast_sd_pd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x double> @llvm.x86.avx2.vbroadcast.sd.pd.256(<2 x double> %a0)
@@ -145,7 +145,7 @@ declare <4 x double> @llvm.x86.avx2.vbroadcast.sd.pd.256(<2 x double>) nounwind
define <4 x float> @test_x86_avx2_vbroadcast_ss_ps(<4 x float> %a0) {
; CHECK-LABEL: test_x86_avx2_vbroadcast_ss_ps:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vbroadcastss %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x float> @llvm.x86.avx2.vbroadcast.ss.ps(<4 x float> %a0)
@@ -156,7 +156,7 @@ declare <4 x float> @llvm.x86.avx2.vbroadcast.ss.ps(<4 x float>) nounwind readon
define <8 x float> @test_x86_avx2_vbroadcast_ss_ps_256(<4 x float> %a0) {
; CHECK-LABEL: test_x86_avx2_vbroadcast_ss_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vbroadcastss %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x float> @llvm.x86.avx2.vbroadcast.ss.ps.256(<4 x float> %a0)
@@ -167,7 +167,7 @@ declare <8 x float> @llvm.x86.avx2.vbroadcast.ss.ps.256(<4 x float>) nounwind re
define <16 x i8> @test_x86_avx2_pbroadcastb_128(<16 x i8> %a0) {
; CHECK-LABEL: test_x86_avx2_pbroadcastb_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpbroadcastb %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <16 x i8> @llvm.x86.avx2.pbroadcastb.128(<16 x i8> %a0)
@@ -178,7 +178,7 @@ declare <16 x i8> @llvm.x86.avx2.pbroadcastb.128(<16 x i8>) nounwind readonly
define <32 x i8> @test_x86_avx2_pbroadcastb_256(<16 x i8> %a0) {
; CHECK-LABEL: test_x86_avx2_pbroadcastb_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpbroadcastb %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <32 x i8> @llvm.x86.avx2.pbroadcastb.256(<16 x i8> %a0)
@@ -189,7 +189,7 @@ declare <32 x i8> @llvm.x86.avx2.pbroadcastb.256(<16 x i8>) nounwind readonly
define <8 x i16> @test_x86_avx2_pbroadcastw_128(<8 x i16> %a0) {
; CHECK-LABEL: test_x86_avx2_pbroadcastw_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpbroadcastw %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x i16> @llvm.x86.avx2.pbroadcastw.128(<8 x i16> %a0)
@@ -200,7 +200,7 @@ declare <8 x i16> @llvm.x86.avx2.pbroadcastw.128(<8 x i16>) nounwind readonly
define <16 x i16> @test_x86_avx2_pbroadcastw_256(<8 x i16> %a0) {
; CHECK-LABEL: test_x86_avx2_pbroadcastw_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpbroadcastw %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <16 x i16> @llvm.x86.avx2.pbroadcastw.256(<8 x i16> %a0)
@@ -211,7 +211,7 @@ declare <16 x i16> @llvm.x86.avx2.pbroadcastw.256(<8 x i16>) nounwind readonly
define <4 x i32> @test_x86_avx2_pbroadcastd_128(<4 x i32> %a0) {
; CHECK-LABEL: test_x86_avx2_pbroadcastd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vbroadcastss %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i32> @llvm.x86.avx2.pbroadcastd.128(<4 x i32> %a0)
@@ -222,7 +222,7 @@ declare <4 x i32> @llvm.x86.avx2.pbroadcastd.128(<4 x i32>) nounwind readonly
define <8 x i32> @test_x86_avx2_pbroadcastd_256(<4 x i32> %a0) {
; CHECK-LABEL: test_x86_avx2_pbroadcastd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vbroadcastss %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x i32> @llvm.x86.avx2.pbroadcastd.256(<4 x i32> %a0)
@@ -233,7 +233,7 @@ declare <8 x i32> @llvm.x86.avx2.pbroadcastd.256(<4 x i32>) nounwind readonly
define <2 x i64> @test_x86_avx2_pbroadcastq_128(<2 x i64> %a0) {
; CHECK-LABEL: test_x86_avx2_pbroadcastq_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpbroadcastq %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <2 x i64> @llvm.x86.avx2.pbroadcastq.128(<2 x i64> %a0)
@@ -244,7 +244,7 @@ declare <2 x i64> @llvm.x86.avx2.pbroadcastq.128(<2 x i64>) nounwind readonly
define <4 x i64> @test_x86_avx2_pbroadcastq_256(<2 x i64> %a0) {
; CHECK-LABEL: test_x86_avx2_pbroadcastq_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i64> @llvm.x86.avx2.pbroadcastq.256(<2 x i64> %a0)
@@ -255,7 +255,7 @@ declare <4 x i64> @llvm.x86.avx2.pbroadcastq.256(<2 x i64>) nounwind readonly
define <8 x i32> @test_x86_avx2_pmovsxbd(<16 x i8> %a0) {
; CHECK-LABEL: test_x86_avx2_pmovsxbd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovsxbd %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x i32> @llvm.x86.avx2.pmovsxbd(<16 x i8> %a0) ; <<8 x i32>> [#uses=1]
@@ -266,7 +266,7 @@ declare <8 x i32> @llvm.x86.avx2.pmovsxbd(<16 x i8>) nounwind readnone
define <4 x i64> @test_x86_avx2_pmovsxbq(<16 x i8> %a0) {
; CHECK-LABEL: test_x86_avx2_pmovsxbq:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovsxbq %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i64> @llvm.x86.avx2.pmovsxbq(<16 x i8> %a0) ; <<4 x i64>> [#uses=1]
@@ -277,7 +277,7 @@ declare <4 x i64> @llvm.x86.avx2.pmovsxbq(<16 x i8>) nounwind readnone
define <16 x i16> @test_x86_avx2_pmovsxbw(<16 x i8> %a0) {
; CHECK-LABEL: test_x86_avx2_pmovsxbw:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovsxbw %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <16 x i16> @llvm.x86.avx2.pmovsxbw(<16 x i8> %a0) ; <<8 x i16>> [#uses=1]
@@ -288,7 +288,7 @@ declare <16 x i16> @llvm.x86.avx2.pmovsxbw(<16 x i8>) nounwind readnone
define <4 x i64> @test_x86_avx2_pmovsxdq(<4 x i32> %a0) {
; CHECK-LABEL: test_x86_avx2_pmovsxdq:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovsxdq %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i64> @llvm.x86.avx2.pmovsxdq(<4 x i32> %a0) ; <<4 x i64>> [#uses=1]
@@ -299,7 +299,7 @@ declare <4 x i64> @llvm.x86.avx2.pmovsxdq(<4 x i32>) nounwind readnone
define <8 x i32> @test_x86_avx2_pmovsxwd(<8 x i16> %a0) {
; CHECK-LABEL: test_x86_avx2_pmovsxwd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovsxwd %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x i32> @llvm.x86.avx2.pmovsxwd(<8 x i16> %a0) ; <<8 x i32>> [#uses=1]
@@ -310,7 +310,7 @@ declare <8 x i32> @llvm.x86.avx2.pmovsxwd(<8 x i16>) nounwind readnone
define <4 x i64> @test_x86_avx2_pmovsxwq(<8 x i16> %a0) {
; CHECK-LABEL: test_x86_avx2_pmovsxwq:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovsxwq %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i64> @llvm.x86.avx2.pmovsxwq(<8 x i16> %a0) ; <<4 x i64>> [#uses=1]
@@ -321,7 +321,7 @@ declare <4 x i64> @llvm.x86.avx2.pmovsxwq(<8 x i16>) nounwind readnone
define <8 x i32> @test_x86_avx2_pmovzxbd(<16 x i8> %a0) {
; CHECK-LABEL: test_x86_avx2_pmovzxbd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x i32> @llvm.x86.avx2.pmovzxbd(<16 x i8> %a0) ; <<8 x i32>> [#uses=1]
@@ -332,7 +332,7 @@ declare <8 x i32> @llvm.x86.avx2.pmovzxbd(<16 x i8>) nounwind readnone
define <4 x i64> @test_x86_avx2_pmovzxbq(<16 x i8> %a0) {
; CHECK-LABEL: test_x86_avx2_pmovzxbq:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i64> @llvm.x86.avx2.pmovzxbq(<16 x i8> %a0) ; <<4 x i64>> [#uses=1]
@@ -343,7 +343,7 @@ declare <4 x i64> @llvm.x86.avx2.pmovzxbq(<16 x i8>) nounwind readnone
define <16 x i16> @test_x86_avx2_pmovzxbw(<16 x i8> %a0) {
; CHECK-LABEL: test_x86_avx2_pmovzxbw:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; CHECK-NEXT: ret{{[l|q]}}
%res = call <16 x i16> @llvm.x86.avx2.pmovzxbw(<16 x i8> %a0) ; <<16 x i16>> [#uses=1]
@@ -354,7 +354,7 @@ declare <16 x i16> @llvm.x86.avx2.pmovzxbw(<16 x i8>) nounwind readnone
define <4 x i64> @test_x86_avx2_pmovzxdq(<4 x i32> %a0) {
; CHECK-LABEL: test_x86_avx2_pmovzxdq:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i64> @llvm.x86.avx2.pmovzxdq(<4 x i32> %a0) ; <<4 x i64>> [#uses=1]
@@ -365,7 +365,7 @@ declare <4 x i64> @llvm.x86.avx2.pmovzxdq(<4 x i32>) nounwind readnone
define <8 x i32> @test_x86_avx2_pmovzxwd(<8 x i16> %a0) {
; CHECK-LABEL: test_x86_avx2_pmovzxwd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x i32> @llvm.x86.avx2.pmovzxwd(<8 x i16> %a0) ; <<8 x i32>> [#uses=1]
@@ -376,7 +376,7 @@ declare <8 x i32> @llvm.x86.avx2.pmovzxwd(<8 x i16>) nounwind readnone
define <4 x i64> @test_x86_avx2_pmovzxwq(<8 x i16> %a0) {
; CHECK-LABEL: test_x86_avx2_pmovzxwq:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i64> @llvm.x86.avx2.pmovzxwq(<8 x i16> %a0) ; <<4 x i64>> [#uses=1]
@@ -388,7 +388,7 @@ declare <4 x i64> @llvm.x86.avx2.pmovzxwq(<8 x i16>) nounwind readnone
define void @test_x86_avx_storeu_dq_256(i8* %a0, <32 x i8> %a1) {
; add operation forces the execution domain.
; X86-LABEL: test_x86_avx_storeu_dq_256:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; X86-NEXT: vpsubb %ymm1, %ymm0, %ymm0
@@ -397,7 +397,7 @@ define void @test_x86_avx_storeu_dq_256(i8* %a0, <32 x i8> %a1) {
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_x86_avx_storeu_dq_256:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; X64-NEXT: vpsubb %ymm1, %ymm0, %ymm0
; X64-NEXT: vmovdqu %ymm0, (%rdi)
@@ -411,7 +411,7 @@ declare void @llvm.x86.avx.storeu.dq.256(i8*, <32 x i8>) nounwind
define <32 x i8> @mm256_max_epi8(<32 x i8> %a0, <32 x i8> %a1) {
; CHECK-LABEL: mm256_max_epi8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <32 x i8> @llvm.x86.avx2.pmaxs.b(<32 x i8> %a0, <32 x i8> %a1)
@@ -421,7 +421,7 @@ declare <32 x i8> @llvm.x86.avx2.pmaxs.b(<32 x i8>, <32 x i8>) nounwind readnone
define <16 x i16> @mm256_max_epi16(<16 x i16> %a0, <16 x i16> %a1) {
; CHECK-LABEL: mm256_max_epi16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <16 x i16> @llvm.x86.avx2.pmaxs.w(<16 x i16> %a0, <16 x i16> %a1)
@@ -431,7 +431,7 @@ declare <16 x i16> @llvm.x86.avx2.pmaxs.w(<16 x i16>, <16 x i16>) nounwind readn
define <8 x i32> @mm256_max_epi32(<8 x i32> %a0, <8 x i32> %a1) {
; CHECK-LABEL: mm256_max_epi32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x i32> @llvm.x86.avx2.pmaxs.d(<8 x i32> %a0, <8 x i32> %a1)
@@ -441,7 +441,7 @@ declare <8 x i32> @llvm.x86.avx2.pmaxs.d(<8 x i32>, <8 x i32>) nounwind readnone
define <32 x i8> @mm256_max_epu8(<32 x i8> %a0, <32 x i8> %a1) {
; CHECK-LABEL: mm256_max_epu8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <32 x i8> @llvm.x86.avx2.pmaxu.b(<32 x i8> %a0, <32 x i8> %a1)
@@ -451,7 +451,7 @@ declare <32 x i8> @llvm.x86.avx2.pmaxu.b(<32 x i8>, <32 x i8>) nounwind readnone
define <16 x i16> @mm256_max_epu16(<16 x i16> %a0, <16 x i16> %a1) {
; CHECK-LABEL: mm256_max_epu16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <16 x i16> @llvm.x86.avx2.pmaxu.w(<16 x i16> %a0, <16 x i16> %a1)
@@ -461,7 +461,7 @@ declare <16 x i16> @llvm.x86.avx2.pmaxu.w(<16 x i16>, <16 x i16>) nounwind readn
define <8 x i32> @mm256_max_epu32(<8 x i32> %a0, <8 x i32> %a1) {
; CHECK-LABEL: mm256_max_epu32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x i32> @llvm.x86.avx2.pmaxu.d(<8 x i32> %a0, <8 x i32> %a1)
@@ -471,7 +471,7 @@ declare <8 x i32> @llvm.x86.avx2.pmaxu.d(<8 x i32>, <8 x i32>) nounwind readnone
define <32 x i8> @mm256_min_epi8(<32 x i8> %a0, <32 x i8> %a1) {
; CHECK-LABEL: mm256_min_epi8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpminsb %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <32 x i8> @llvm.x86.avx2.pmins.b(<32 x i8> %a0, <32 x i8> %a1)
@@ -481,7 +481,7 @@ declare <32 x i8> @llvm.x86.avx2.pmins.b(<32 x i8>, <32 x i8>) nounwind readnone
define <16 x i16> @mm256_min_epi16(<16 x i16> %a0, <16 x i16> %a1) {
; CHECK-LABEL: mm256_min_epi16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpminsw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <16 x i16> @llvm.x86.avx2.pmins.w(<16 x i16> %a0, <16 x i16> %a1)
@@ -491,7 +491,7 @@ declare <16 x i16> @llvm.x86.avx2.pmins.w(<16 x i16>, <16 x i16>) nounwind readn
define <8 x i32> @mm256_min_epi32(<8 x i32> %a0, <8 x i32> %a1) {
; CHECK-LABEL: mm256_min_epi32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpminsd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x i32> @llvm.x86.avx2.pmins.d(<8 x i32> %a0, <8 x i32> %a1)
@@ -501,7 +501,7 @@ declare <8 x i32> @llvm.x86.avx2.pmins.d(<8 x i32>, <8 x i32>) nounwind readnone
define <32 x i8> @mm256_min_epu8(<32 x i8> %a0, <32 x i8> %a1) {
; CHECK-LABEL: mm256_min_epu8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpminub %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <32 x i8> @llvm.x86.avx2.pminu.b(<32 x i8> %a0, <32 x i8> %a1)
@@ -511,7 +511,7 @@ declare <32 x i8> @llvm.x86.avx2.pminu.b(<32 x i8>, <32 x i8>) nounwind readnone
define <16 x i16> @mm256_min_epu16(<16 x i16> %a0, <16 x i16> %a1) {
; CHECK-LABEL: mm256_min_epu16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpminuw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <16 x i16> @llvm.x86.avx2.pminu.w(<16 x i16> %a0, <16 x i16> %a1)
@@ -521,7 +521,7 @@ declare <16 x i16> @llvm.x86.avx2.pminu.w(<16 x i16>, <16 x i16>) nounwind readn
define <8 x i32> @mm256_min_epu32(<8 x i32> %a0, <8 x i32> %a1) {
; CHECK-LABEL: mm256_min_epu32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpminud %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x i32> @llvm.x86.avx2.pminu.d(<8 x i32> %a0, <8 x i32> %a1)
@@ -531,7 +531,7 @@ declare <8 x i32> @llvm.x86.avx2.pminu.d(<8 x i32>, <8 x i32>) nounwind readnone
define <32 x i8> @mm256_avg_epu8(<32 x i8> %a0, <32 x i8> %a1) {
; CHECK-LABEL: mm256_avg_epu8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpavgb %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <32 x i8> @llvm.x86.avx2.pavg.b(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1]
@@ -541,7 +541,7 @@ declare <32 x i8> @llvm.x86.avx2.pavg.b(<32 x i8>, <32 x i8>) nounwind readnone
define <16 x i16> @mm256_avg_epu16(<16 x i16> %a0, <16 x i16> %a1) {
; CHECK-LABEL: mm256_avg_epu16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpavgw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <16 x i16> @llvm.x86.avx2.pavg.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -551,7 +551,7 @@ declare <16 x i16> @llvm.x86.avx2.pavg.w(<16 x i16>, <16 x i16>) nounwind readno
define <32 x i8> @test_x86_avx2_pabs_b(<32 x i8> %a0) {
; CHECK-LABEL: test_x86_avx2_pabs_b:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpabsb %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <32 x i8> @llvm.x86.avx2.pabs.b(<32 x i8> %a0) ; <<32 x i8>> [#uses=1]
@@ -561,7 +561,7 @@ declare <32 x i8> @llvm.x86.avx2.pabs.b(<32 x i8>) nounwind readnone
define <8 x i32> @test_x86_avx2_pabs_d(<8 x i32> %a0) {
; CHECK-LABEL: test_x86_avx2_pabs_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpabsd %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x i32> @llvm.x86.avx2.pabs.d(<8 x i32> %a0) ; <<8 x i32>> [#uses=1]
@@ -572,7 +572,7 @@ declare <8 x i32> @llvm.x86.avx2.pabs.d(<8 x i32>) nounwind readnone
define <16 x i16> @test_x86_avx2_pabs_w(<16 x i16> %a0) {
; CHECK-LABEL: test_x86_avx2_pabs_w:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpabsw %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <16 x i16> @llvm.x86.avx2.pabs.w(<16 x i16> %a0) ; <<16 x i16>> [#uses=1]
@@ -583,7 +583,7 @@ declare <16 x i16> @llvm.x86.avx2.pabs.w(<16 x i16>) nounwind readnone
define <4 x i64> @test_x86_avx2_vperm2i128(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_x86_avx2_vperm2i128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,3,0,1]
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i64> @llvm.x86.avx2.vperm2i128(<4 x i64> %a0, <4 x i64> %a1, i8 1) ; <<4 x i64>> [#uses=1]
diff --git a/test/CodeGen/X86/avx2-intrinsics-x86.ll b/test/CodeGen/X86/avx2-intrinsics-x86.ll
index c11722f916d..20ebda5beb0 100644
--- a/test/CodeGen/X86/avx2-intrinsics-x86.ll
+++ b/test/CodeGen/X86/avx2-intrinsics-x86.ll
@@ -6,12 +6,12 @@
define <16 x i16> @test_x86_avx2_packssdw(<8 x i32> %a0, <8 x i32> %a1) {
; AVX2-LABEL: test_x86_avx2_packssdw:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x6b,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_packssdw:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6b,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> %a0, <8 x i32> %a1) ; <<16 x i16>> [#uses=1]
@@ -22,28 +22,28 @@ declare <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32>, <8 x i32>) nounwind readno
define <16 x i16> @test_x86_avx2_packssdw_fold() {
; X86-AVX-LABEL: test_x86_avx2_packssdw_fold:
-; X86-AVX: ## BB#0:
+; X86-AVX: ## %bb.0:
; X86-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,0,0,0,255,32767,32767,65535,0,0,0,0,32769,32768,0,65280]
; X86-AVX-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X86-AVX-NEXT: ## fixup A - offset: 4, value: LCPI1_0, kind: FK_Data_4
; X86-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X86-AVX512VL-LABEL: test_x86_avx2_packssdw_fold:
-; X86-AVX512VL: ## BB#0:
+; X86-AVX512VL: ## %bb.0:
; X86-AVX512VL-NEXT: vmovaps LCPI1_0, %ymm0 ## EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,32767,65535,0,0,0,0,32769,32768,0,65280]
; X86-AVX512VL-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X86-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI1_0, kind: FK_Data_4
; X86-AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-AVX-LABEL: test_x86_avx2_packssdw_fold:
-; X64-AVX: ## BB#0:
+; X64-AVX: ## %bb.0:
; X64-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,0,0,0,255,32767,32767,65535,0,0,0,0,32769,32768,0,65280]
; X64-AVX-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X64-AVX-NEXT: ## fixup A - offset: 4, value: LCPI1_0-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-AVX512VL-LABEL: test_x86_avx2_packssdw_fold:
-; X64-AVX512VL: ## BB#0:
+; X64-AVX512VL: ## %bb.0:
; X64-AVX512VL-NEXT: vmovaps {{.*}}(%rip), %ymm0 ## EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,32767,65535,0,0,0,0,32769,32768,0,65280]
; X64-AVX512VL-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X64-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI1_0-4, kind: reloc_riprel_4byte
@@ -55,12 +55,12 @@ define <16 x i16> @test_x86_avx2_packssdw_fold() {
define <32 x i8> @test_x86_avx2_packsswb(<16 x i16> %a0, <16 x i16> %a1) {
; AVX2-LABEL: test_x86_avx2_packsswb:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x63,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_packsswb:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x63,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> %a0, <16 x i16> %a1) ; <<32 x i8>> [#uses=1]
@@ -71,28 +71,28 @@ declare <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16>, <16 x i16>) nounwind readn
define <32 x i8> @test_x86_avx2_packsswb_fold() {
; X86-AVX-LABEL: test_x86_avx2_packsswb_fold:
-; X86-AVX: ## BB#0:
+; X86-AVX: ## %bb.0:
; X86-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0,0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
; X86-AVX-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X86-AVX-NEXT: ## fixup A - offset: 4, value: LCPI3_0, kind: FK_Data_4
; X86-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X86-AVX512VL-LABEL: test_x86_avx2_packsswb_fold:
-; X86-AVX512VL: ## BB#0:
+; X86-AVX512VL: ## %bb.0:
; X86-AVX512VL-NEXT: vmovaps LCPI3_0, %ymm0 ## EVEX TO VEX Compression ymm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0,0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
; X86-AVX512VL-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X86-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI3_0, kind: FK_Data_4
; X86-AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-AVX-LABEL: test_x86_avx2_packsswb_fold:
-; X64-AVX: ## BB#0:
+; X64-AVX: ## %bb.0:
; X64-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0,0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
; X64-AVX-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X64-AVX-NEXT: ## fixup A - offset: 4, value: LCPI3_0-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-AVX512VL-LABEL: test_x86_avx2_packsswb_fold:
-; X64-AVX512VL: ## BB#0:
+; X64-AVX512VL: ## %bb.0:
; X64-AVX512VL-NEXT: vmovaps {{.*}}(%rip), %ymm0 ## EVEX TO VEX Compression ymm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0,0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
; X64-AVX512VL-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X64-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI3_0-4, kind: reloc_riprel_4byte
@@ -104,12 +104,12 @@ define <32 x i8> @test_x86_avx2_packsswb_fold() {
define <32 x i8> @test_x86_avx2_packuswb(<16 x i16> %a0, <16 x i16> %a1) {
; AVX2-LABEL: test_x86_avx2_packuswb:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpackuswb %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x67,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_packuswb:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpackuswb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x67,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16> %a0, <16 x i16> %a1) ; <<32 x i8>> [#uses=1]
@@ -120,28 +120,28 @@ declare <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16>, <16 x i16>) nounwind readn
define <32 x i8> @test_x86_avx2_packuswb_fold() {
; X86-AVX-LABEL: test_x86_avx2_packuswb_fold:
-; X86-AVX: ## BB#0:
+; X86-AVX: ## %bb.0:
; X86-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
; X86-AVX-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X86-AVX-NEXT: ## fixup A - offset: 4, value: LCPI5_0, kind: FK_Data_4
; X86-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X86-AVX512VL-LABEL: test_x86_avx2_packuswb_fold:
-; X86-AVX512VL: ## BB#0:
+; X86-AVX512VL: ## %bb.0:
; X86-AVX512VL-NEXT: vmovaps LCPI5_0, %ymm0 ## EVEX TO VEX Compression ymm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
; X86-AVX512VL-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X86-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI5_0, kind: FK_Data_4
; X86-AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-AVX-LABEL: test_x86_avx2_packuswb_fold:
-; X64-AVX: ## BB#0:
+; X64-AVX: ## %bb.0:
; X64-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
; X64-AVX-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X64-AVX-NEXT: ## fixup A - offset: 4, value: LCPI5_0-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-AVX512VL-LABEL: test_x86_avx2_packuswb_fold:
-; X64-AVX512VL: ## BB#0:
+; X64-AVX512VL: ## %bb.0:
; X64-AVX512VL-NEXT: vmovaps {{.*}}(%rip), %ymm0 ## EVEX TO VEX Compression ymm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
; X64-AVX512VL-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X64-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI5_0-4, kind: reloc_riprel_4byte
@@ -153,12 +153,12 @@ define <32 x i8> @test_x86_avx2_packuswb_fold() {
define <32 x i8> @test_x86_avx2_padds_b(<32 x i8> %a0, <32 x i8> %a1) {
; AVX2-LABEL: test_x86_avx2_padds_b:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpaddsb %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xec,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_padds_b:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpaddsb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xec,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <32 x i8> @llvm.x86.avx2.padds.b(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1]
@@ -169,12 +169,12 @@ declare <32 x i8> @llvm.x86.avx2.padds.b(<32 x i8>, <32 x i8>) nounwind readnone
define <16 x i16> @test_x86_avx2_padds_w(<16 x i16> %a0, <16 x i16> %a1) {
; AVX2-LABEL: test_x86_avx2_padds_w:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpaddsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xed,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_padds_w:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpaddsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xed,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.padds.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -185,12 +185,12 @@ declare <16 x i16> @llvm.x86.avx2.padds.w(<16 x i16>, <16 x i16>) nounwind readn
define <32 x i8> @test_x86_avx2_paddus_b(<32 x i8> %a0, <32 x i8> %a1) {
; AVX2-LABEL: test_x86_avx2_paddus_b:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpaddusb %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xdc,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_paddus_b:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpaddusb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xdc,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <32 x i8> @llvm.x86.avx2.paddus.b(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1]
@@ -201,12 +201,12 @@ declare <32 x i8> @llvm.x86.avx2.paddus.b(<32 x i8>, <32 x i8>) nounwind readnon
define <16 x i16> @test_x86_avx2_paddus_w(<16 x i16> %a0, <16 x i16> %a1) {
; AVX2-LABEL: test_x86_avx2_paddus_w:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpaddusw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xdd,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_paddus_w:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpaddusw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xdd,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.paddus.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -217,12 +217,12 @@ declare <16 x i16> @llvm.x86.avx2.paddus.w(<16 x i16>, <16 x i16>) nounwind read
define <8 x i32> @test_x86_avx2_pmadd_wd(<16 x i16> %a0, <16 x i16> %a1) {
; AVX2-LABEL: test_x86_avx2_pmadd_wd:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xf5,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_pmadd_wd:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf5,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16> %a0, <16 x i16> %a1) ; <<8 x i32>> [#uses=1]
@@ -233,12 +233,12 @@ declare <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16>, <16 x i16>) nounwind readn
define <16 x i16> @test_x86_avx2_pmaxs_w(<16 x i16> %a0, <16 x i16> %a1) {
; AVX2-LABEL: test_x86_avx2_pmaxs_w:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xee,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_pmaxs_w:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xee,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.pmaxs.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -249,12 +249,12 @@ declare <16 x i16> @llvm.x86.avx2.pmaxs.w(<16 x i16>, <16 x i16>) nounwind readn
define <32 x i8> @test_x86_avx2_pmaxu_b(<32 x i8> %a0, <32 x i8> %a1) {
; AVX2-LABEL: test_x86_avx2_pmaxu_b:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpmaxub %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xde,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_pmaxu_b:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpmaxub %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xde,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <32 x i8> @llvm.x86.avx2.pmaxu.b(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1]
@@ -265,12 +265,12 @@ declare <32 x i8> @llvm.x86.avx2.pmaxu.b(<32 x i8>, <32 x i8>) nounwind readnone
define <16 x i16> @test_x86_avx2_pmins_w(<16 x i16> %a0, <16 x i16> %a1) {
; AVX2-LABEL: test_x86_avx2_pmins_w:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpminsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xea,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_pmins_w:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpminsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xea,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.pmins.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -281,12 +281,12 @@ declare <16 x i16> @llvm.x86.avx2.pmins.w(<16 x i16>, <16 x i16>) nounwind readn
define <32 x i8> @test_x86_avx2_pminu_b(<32 x i8> %a0, <32 x i8> %a1) {
; AVX2-LABEL: test_x86_avx2_pminu_b:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpminub %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xda,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_pminu_b:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpminub %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xda,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <32 x i8> @llvm.x86.avx2.pminu.b(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1]
@@ -297,7 +297,7 @@ declare <32 x i8> @llvm.x86.avx2.pminu.b(<32 x i8>, <32 x i8>) nounwind readnone
define i32 @test_x86_avx2_pmovmskb(<32 x i8> %a0) {
; CHECK-LABEL: test_x86_avx2_pmovmskb:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovmskb %ymm0, %eax ## encoding: [0xc5,0xfd,0xd7,0xc0]
; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
@@ -309,12 +309,12 @@ declare i32 @llvm.x86.avx2.pmovmskb(<32 x i8>) nounwind readnone
define <16 x i16> @test_x86_avx2_pmulh_w(<16 x i16> %a0, <16 x i16> %a1) {
; AVX2-LABEL: test_x86_avx2_pmulh_w:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpmulhw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xe5,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_pmulh_w:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpmulhw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe5,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -325,12 +325,12 @@ declare <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16>, <16 x i16>) nounwind readn
define <16 x i16> @test_x86_avx2_pmulhu_w(<16 x i16> %a0, <16 x i16> %a1) {
; AVX2-LABEL: test_x86_avx2_pmulhu_w:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xe4,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_pmulhu_w:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe4,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -341,12 +341,12 @@ declare <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16>, <16 x i16>) nounwind read
define <4 x i64> @test_x86_avx2_pmulu_dq(<8 x i32> %a0, <8 x i32> %a1) {
; AVX2-LABEL: test_x86_avx2_pmulu_dq:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xf4,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_pmulu_dq:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpmuludq %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf4,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx2.pmulu.dq(<8 x i32> %a0, <8 x i32> %a1) ; <<4 x i64>> [#uses=1]
@@ -357,12 +357,12 @@ declare <4 x i64> @llvm.x86.avx2.pmulu.dq(<8 x i32>, <8 x i32>) nounwind readnon
define <4 x i64> @test_x86_avx2_psad_bw(<32 x i8> %a0, <32 x i8> %a1) {
; AVX2-LABEL: test_x86_avx2_psad_bw:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsadbw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xf6,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psad_bw:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsadbw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf6,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx2.psad.bw(<32 x i8> %a0, <32 x i8> %a1) ; <<4 x i64>> [#uses=1]
@@ -373,12 +373,12 @@ declare <4 x i64> @llvm.x86.avx2.psad.bw(<32 x i8>, <32 x i8>) nounwind readnone
define <8 x i32> @test_x86_avx2_psll_d(<8 x i32> %a0, <4 x i32> %a1) {
; AVX2-LABEL: test_x86_avx2_psll_d:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpslld %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xf2,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psll_d:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpslld %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf2,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx2.psll.d(<8 x i32> %a0, <4 x i32> %a1) ; <<8 x i32>> [#uses=1]
@@ -389,12 +389,12 @@ declare <8 x i32> @llvm.x86.avx2.psll.d(<8 x i32>, <4 x i32>) nounwind readnone
define <4 x i64> @test_x86_avx2_psll_q(<4 x i64> %a0, <2 x i64> %a1) {
; AVX2-LABEL: test_x86_avx2_psll_q:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsllq %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xf3,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psll_q:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsllq %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf3,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx2.psll.q(<4 x i64> %a0, <2 x i64> %a1) ; <<4 x i64>> [#uses=1]
@@ -405,12 +405,12 @@ declare <4 x i64> @llvm.x86.avx2.psll.q(<4 x i64>, <2 x i64>) nounwind readnone
define <16 x i16> @test_x86_avx2_psll_w(<16 x i16> %a0, <8 x i16> %a1) {
; AVX2-LABEL: test_x86_avx2_psll_w:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsllw %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xf1,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psll_w:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsllw %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf1,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.psll.w(<16 x i16> %a0, <8 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -421,12 +421,12 @@ declare <16 x i16> @llvm.x86.avx2.psll.w(<16 x i16>, <8 x i16>) nounwind readnon
define <8 x i32> @test_x86_avx2_pslli_d(<8 x i32> %a0) {
; AVX2-LABEL: test_x86_avx2_pslli_d:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpslld $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x72,0xf0,0x07]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_pslli_d:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpslld $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x72,0xf0,0x07]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx2.pslli.d(<8 x i32> %a0, i32 7) ; <<8 x i32>> [#uses=1]
@@ -437,12 +437,12 @@ declare <8 x i32> @llvm.x86.avx2.pslli.d(<8 x i32>, i32) nounwind readnone
define <4 x i64> @test_x86_avx2_pslli_q(<4 x i64> %a0) {
; AVX2-LABEL: test_x86_avx2_pslli_q:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsllq $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x73,0xf0,0x07]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_pslli_q:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsllq $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x73,0xf0,0x07]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx2.pslli.q(<4 x i64> %a0, i32 7) ; <<4 x i64>> [#uses=1]
@@ -453,12 +453,12 @@ declare <4 x i64> @llvm.x86.avx2.pslli.q(<4 x i64>, i32) nounwind readnone
define <16 x i16> @test_x86_avx2_pslli_w(<16 x i16> %a0) {
; AVX2-LABEL: test_x86_avx2_pslli_w:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsllw $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x71,0xf0,0x07]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_pslli_w:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsllw $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x71,0xf0,0x07]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.pslli.w(<16 x i16> %a0, i32 7) ; <<16 x i16>> [#uses=1]
@@ -469,12 +469,12 @@ declare <16 x i16> @llvm.x86.avx2.pslli.w(<16 x i16>, i32) nounwind readnone
define <8 x i32> @test_x86_avx2_psra_d(<8 x i32> %a0, <4 x i32> %a1) {
; AVX2-LABEL: test_x86_avx2_psra_d:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsrad %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xe2,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psra_d:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsrad %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe2,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx2.psra.d(<8 x i32> %a0, <4 x i32> %a1) ; <<8 x i32>> [#uses=1]
@@ -485,12 +485,12 @@ declare <8 x i32> @llvm.x86.avx2.psra.d(<8 x i32>, <4 x i32>) nounwind readnone
define <16 x i16> @test_x86_avx2_psra_w(<16 x i16> %a0, <8 x i16> %a1) {
; AVX2-LABEL: test_x86_avx2_psra_w:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsraw %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xe1,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psra_w:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsraw %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe1,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.psra.w(<16 x i16> %a0, <8 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -501,12 +501,12 @@ declare <16 x i16> @llvm.x86.avx2.psra.w(<16 x i16>, <8 x i16>) nounwind readnon
define <8 x i32> @test_x86_avx2_psrai_d(<8 x i32> %a0) {
; AVX2-LABEL: test_x86_avx2_psrai_d:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsrad $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x72,0xe0,0x07]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psrai_d:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsrad $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x72,0xe0,0x07]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx2.psrai.d(<8 x i32> %a0, i32 7) ; <<8 x i32>> [#uses=1]
@@ -517,12 +517,12 @@ declare <8 x i32> @llvm.x86.avx2.psrai.d(<8 x i32>, i32) nounwind readnone
define <16 x i16> @test_x86_avx2_psrai_w(<16 x i16> %a0) {
; AVX2-LABEL: test_x86_avx2_psrai_w:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsraw $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x71,0xe0,0x07]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psrai_w:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsraw $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x71,0xe0,0x07]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.psrai.w(<16 x i16> %a0, i32 7) ; <<16 x i16>> [#uses=1]
@@ -533,12 +533,12 @@ declare <16 x i16> @llvm.x86.avx2.psrai.w(<16 x i16>, i32) nounwind readnone
define <8 x i32> @test_x86_avx2_psrl_d(<8 x i32> %a0, <4 x i32> %a1) {
; AVX2-LABEL: test_x86_avx2_psrl_d:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsrld %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xd2,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psrl_d:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsrld %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd2,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx2.psrl.d(<8 x i32> %a0, <4 x i32> %a1) ; <<8 x i32>> [#uses=1]
@@ -549,12 +549,12 @@ declare <8 x i32> @llvm.x86.avx2.psrl.d(<8 x i32>, <4 x i32>) nounwind readnone
define <4 x i64> @test_x86_avx2_psrl_q(<4 x i64> %a0, <2 x i64> %a1) {
; AVX2-LABEL: test_x86_avx2_psrl_q:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsrlq %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xd3,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psrl_q:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsrlq %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd3,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64> %a0, <2 x i64> %a1) ; <<4 x i64>> [#uses=1]
@@ -565,12 +565,12 @@ declare <4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64>, <2 x i64>) nounwind readnone
define <16 x i16> @test_x86_avx2_psrl_w(<16 x i16> %a0, <8 x i16> %a1) {
; AVX2-LABEL: test_x86_avx2_psrl_w:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsrlw %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xd1,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psrl_w:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsrlw %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd1,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16> %a0, <8 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -581,12 +581,12 @@ declare <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16>, <8 x i16>) nounwind readnon
define <8 x i32> @test_x86_avx2_psrli_d(<8 x i32> %a0) {
; AVX2-LABEL: test_x86_avx2_psrli_d:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsrld $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x72,0xd0,0x07]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psrli_d:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsrld $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x72,0xd0,0x07]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx2.psrli.d(<8 x i32> %a0, i32 7) ; <<8 x i32>> [#uses=1]
@@ -597,12 +597,12 @@ declare <8 x i32> @llvm.x86.avx2.psrli.d(<8 x i32>, i32) nounwind readnone
define <4 x i64> @test_x86_avx2_psrli_q(<4 x i64> %a0) {
; AVX2-LABEL: test_x86_avx2_psrli_q:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsrlq $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x73,0xd0,0x07]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psrli_q:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsrlq $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x73,0xd0,0x07]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx2.psrli.q(<4 x i64> %a0, i32 7) ; <<4 x i64>> [#uses=1]
@@ -613,12 +613,12 @@ declare <4 x i64> @llvm.x86.avx2.psrli.q(<4 x i64>, i32) nounwind readnone
define <16 x i16> @test_x86_avx2_psrli_w(<16 x i16> %a0) {
; AVX2-LABEL: test_x86_avx2_psrli_w:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsrlw $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x71,0xd0,0x07]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psrli_w:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsrlw $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x71,0xd0,0x07]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.psrli.w(<16 x i16> %a0, i32 7) ; <<16 x i16>> [#uses=1]
@@ -629,12 +629,12 @@ declare <16 x i16> @llvm.x86.avx2.psrli.w(<16 x i16>, i32) nounwind readnone
define <32 x i8> @test_x86_avx2_psubs_b(<32 x i8> %a0, <32 x i8> %a1) {
; AVX2-LABEL: test_x86_avx2_psubs_b:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsubsb %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xe8,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psubs_b:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsubsb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe8,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1]
@@ -645,12 +645,12 @@ declare <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8>, <32 x i8>) nounwind readnone
define <16 x i16> @test_x86_avx2_psubs_w(<16 x i16> %a0, <16 x i16> %a1) {
; AVX2-LABEL: test_x86_avx2_psubs_w:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsubsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xe9,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psubs_w:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsubsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe9,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -661,12 +661,12 @@ declare <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16>, <16 x i16>) nounwind readn
define <32 x i8> @test_x86_avx2_psubus_b(<32 x i8> %a0, <32 x i8> %a1) {
; AVX2-LABEL: test_x86_avx2_psubus_b:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsubusb %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xd8,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psubus_b:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsubusb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd8,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <32 x i8> @llvm.x86.avx2.psubus.b(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1]
@@ -677,12 +677,12 @@ declare <32 x i8> @llvm.x86.avx2.psubus.b(<32 x i8>, <32 x i8>) nounwind readnon
define <16 x i16> @test_x86_avx2_psubus_w(<16 x i16> %a0, <16 x i16> %a1) {
; AVX2-LABEL: test_x86_avx2_psubus_w:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsubusw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xd9,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psubus_w:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsubusw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd9,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.psubus.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -692,7 +692,7 @@ declare <16 x i16> @llvm.x86.avx2.psubus.w(<16 x i16>, <16 x i16>) nounwind read
define <8 x i32> @test_x86_avx2_phadd_d(<8 x i32> %a0, <8 x i32> %a1) {
; CHECK-LABEL: test_x86_avx2_phadd_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vphaddd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x02,0xc1]
; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
@@ -703,7 +703,7 @@ declare <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32>, <8 x i32>) nounwind readnone
define <16 x i16> @test_x86_avx2_phadd_sw(<16 x i16> %a0, <16 x i16> %a1) {
; CHECK-LABEL: test_x86_avx2_phadd_sw:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vphaddsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x03,0xc1]
; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.phadd.sw(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -714,7 +714,7 @@ declare <16 x i16> @llvm.x86.avx2.phadd.sw(<16 x i16>, <16 x i16>) nounwind read
define <16 x i16> @test_x86_avx2_phadd_w(<16 x i16> %a0, <16 x i16> %a1) {
; CHECK-LABEL: test_x86_avx2_phadd_w:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vphaddw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x01,0xc1]
; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.phadd.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -725,7 +725,7 @@ declare <16 x i16> @llvm.x86.avx2.phadd.w(<16 x i16>, <16 x i16>) nounwind readn
define <8 x i32> @test_x86_avx2_phsub_d(<8 x i32> %a0, <8 x i32> %a1) {
; CHECK-LABEL: test_x86_avx2_phsub_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vphsubd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x06,0xc1]
; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
@@ -736,7 +736,7 @@ declare <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32>, <8 x i32>) nounwind readnone
define <16 x i16> @test_x86_avx2_phsub_sw(<16 x i16> %a0, <16 x i16> %a1) {
; CHECK-LABEL: test_x86_avx2_phsub_sw:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vphsubsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x07,0xc1]
; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.phsub.sw(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -747,7 +747,7 @@ declare <16 x i16> @llvm.x86.avx2.phsub.sw(<16 x i16>, <16 x i16>) nounwind read
define <16 x i16> @test_x86_avx2_phsub_w(<16 x i16> %a0, <16 x i16> %a1) {
; CHECK-LABEL: test_x86_avx2_phsub_w:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vphsubw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x05,0xc1]
; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.phsub.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -758,12 +758,12 @@ declare <16 x i16> @llvm.x86.avx2.phsub.w(<16 x i16>, <16 x i16>) nounwind readn
define <16 x i16> @test_x86_avx2_pmadd_ub_sw(<32 x i8> %a0, <32 x i8> %a1) {
; AVX2-LABEL: test_x86_avx2_pmadd_ub_sw:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x04,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_pmadd_ub_sw:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x04,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.pmadd.ub.sw(<32 x i8> %a0, <32 x i8> %a1) ; <<16 x i16>> [#uses=1]
@@ -774,27 +774,27 @@ declare <16 x i16> @llvm.x86.avx2.pmadd.ub.sw(<32 x i8>, <32 x i8>) nounwind rea
; Make sure we don't commute this operation.
define <16 x i16> @test_x86_avx2_pmadd_ub_sw_load_op0(<32 x i8>* %ptr, <32 x i8> %a1) {
; X86-AVX-LABEL: test_x86_avx2_pmadd_ub_sw_load_op0:
-; X86-AVX: ## BB#0:
+; X86-AVX: ## %bb.0:
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX-NEXT: vmovdqa (%eax), %ymm1 ## encoding: [0xc5,0xfd,0x6f,0x08]
; X86-AVX-NEXT: vpmaddubsw %ymm0, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x75,0x04,0xc0]
; X86-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X86-AVX512VL-LABEL: test_x86_avx2_pmadd_ub_sw_load_op0:
-; X86-AVX512VL: ## BB#0:
+; X86-AVX512VL: ## %bb.0:
; X86-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX512VL-NEXT: vmovdqa (%eax), %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0x08]
; X86-AVX512VL-NEXT: vpmaddubsw %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x04,0xc0]
; X86-AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-AVX-LABEL: test_x86_avx2_pmadd_ub_sw_load_op0:
-; X64-AVX: ## BB#0:
+; X64-AVX: ## %bb.0:
; X64-AVX-NEXT: vmovdqa (%rdi), %ymm1 ## encoding: [0xc5,0xfd,0x6f,0x0f]
; X64-AVX-NEXT: vpmaddubsw %ymm0, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x75,0x04,0xc0]
; X64-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-AVX512VL-LABEL: test_x86_avx2_pmadd_ub_sw_load_op0:
-; X64-AVX512VL: ## BB#0:
+; X64-AVX512VL: ## %bb.0:
; X64-AVX512VL-NEXT: vmovdqa (%rdi), %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0x0f]
; X64-AVX512VL-NEXT: vpmaddubsw %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x04,0xc0]
; X64-AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
@@ -805,12 +805,12 @@ define <16 x i16> @test_x86_avx2_pmadd_ub_sw_load_op0(<32 x i8>* %ptr, <32 x i8>
define <16 x i16> @test_x86_avx2_pmul_hr_sw(<16 x i16> %a0, <16 x i16> %a1) {
; AVX2-LABEL: test_x86_avx2_pmul_hr_sw:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpmulhrsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x0b,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_pmul_hr_sw:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpmulhrsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x0b,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -821,12 +821,12 @@ declare <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16>, <16 x i16>) nounwind re
define <32 x i8> @test_x86_avx2_pshuf_b(<32 x i8> %a0, <32 x i8> %a1) {
; AVX2-LABEL: test_x86_avx2_pshuf_b:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpshufb %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x00,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_pshuf_b:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpshufb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x00,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> %a1) ; <<16 x i8>> [#uses=1]
@@ -837,7 +837,7 @@ declare <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8>, <32 x i8>) nounwind readnone
define <32 x i8> @test_x86_avx2_psign_b(<32 x i8> %a0, <32 x i8> %a1) {
; CHECK-LABEL: test_x86_avx2_psign_b:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsignb %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x08,0xc1]
; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <32 x i8> @llvm.x86.avx2.psign.b(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1]
@@ -848,7 +848,7 @@ declare <32 x i8> @llvm.x86.avx2.psign.b(<32 x i8>, <32 x i8>) nounwind readnone
define <8 x i32> @test_x86_avx2_psign_d(<8 x i32> %a0, <8 x i32> %a1) {
; CHECK-LABEL: test_x86_avx2_psign_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsignd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x0a,0xc1]
; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx2.psign.d(<8 x i32> %a0, <8 x i32> %a1) ; <<4 x i32>> [#uses=1]
@@ -859,7 +859,7 @@ declare <8 x i32> @llvm.x86.avx2.psign.d(<8 x i32>, <8 x i32>) nounwind readnone
define <16 x i16> @test_x86_avx2_psign_w(<16 x i16> %a0, <16 x i16> %a1) {
; CHECK-LABEL: test_x86_avx2_psign_w:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsignw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x09,0xc1]
; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.psign.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -870,7 +870,7 @@ declare <16 x i16> @llvm.x86.avx2.psign.w(<16 x i16>, <16 x i16>) nounwind readn
define <16 x i16> @test_x86_avx2_mpsadbw(<32 x i8> %a0, <32 x i8> %a1) {
; CHECK-LABEL: test_x86_avx2_mpsadbw:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmpsadbw $7, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x42,0xc1,0x07]
; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.mpsadbw(<32 x i8> %a0, <32 x i8> %a1, i8 7) ; <<16 x i16>> [#uses=1]
@@ -881,12 +881,12 @@ declare <16 x i16> @llvm.x86.avx2.mpsadbw(<32 x i8>, <32 x i8>, i8) nounwind rea
define <16 x i16> @test_x86_avx2_packusdw(<8 x i32> %a0, <8 x i32> %a1) {
; AVX2-LABEL: test_x86_avx2_packusdw:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpackusdw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x2b,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_packusdw:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpackusdw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x2b,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> %a0, <8 x i32> %a1) ; <<16 x i16>> [#uses=1]
@@ -897,28 +897,28 @@ declare <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32>, <8 x i32>) nounwind readno
define <16 x i16> @test_x86_avx2_packusdw_fold() {
; X86-AVX-LABEL: test_x86_avx2_packusdw_fold:
-; X86-AVX: ## BB#0:
+; X86-AVX: ## %bb.0:
; X86-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0]
; X86-AVX-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X86-AVX-NEXT: ## fixup A - offset: 4, value: LCPI55_0, kind: FK_Data_4
; X86-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X86-AVX512VL-LABEL: test_x86_avx2_packusdw_fold:
-; X86-AVX512VL: ## BB#0:
+; X86-AVX512VL: ## %bb.0:
; X86-AVX512VL-NEXT: vmovaps LCPI55_0, %ymm0 ## EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0]
; X86-AVX512VL-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X86-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI55_0, kind: FK_Data_4
; X86-AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-AVX-LABEL: test_x86_avx2_packusdw_fold:
-; X64-AVX: ## BB#0:
+; X64-AVX: ## %bb.0:
; X64-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0]
; X64-AVX-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X64-AVX-NEXT: ## fixup A - offset: 4, value: LCPI55_0-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-AVX512VL-LABEL: test_x86_avx2_packusdw_fold:
-; X64-AVX512VL: ## BB#0:
+; X64-AVX512VL: ## %bb.0:
; X64-AVX512VL-NEXT: vmovaps {{.*}}(%rip), %ymm0 ## EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0]
; X64-AVX512VL-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X64-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI55_0-4, kind: reloc_riprel_4byte
@@ -930,7 +930,7 @@ define <16 x i16> @test_x86_avx2_packusdw_fold() {
define <32 x i8> @test_x86_avx2_pblendvb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> %a2) {
; CHECK-LABEL: test_x86_avx2_pblendvb:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x4c,0xc1,0x20]
; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> %a2) ; <<32 x i8>> [#uses=1]
@@ -941,7 +941,7 @@ declare <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8>, <32 x i8>, <32 x i8>) nounw
define <16 x i16> @test_x86_avx2_pblendw(<16 x i16> %a0, <16 x i16> %a1) {
; CHECK-LABEL: test_x86_avx2_pblendw:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpblendw $7, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x0e,0xc1,0x07]
; CHECK-NEXT: ## ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
@@ -953,12 +953,12 @@ declare <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16>, <16 x i16>, i8) nounwind r
define <32 x i8> @test_x86_avx2_pmaxsb(<32 x i8> %a0, <32 x i8> %a1) {
; AVX2-LABEL: test_x86_avx2_pmaxsb:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x3c,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_pmaxsb:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3c,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <32 x i8> @llvm.x86.avx2.pmaxs.b(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1]
@@ -969,12 +969,12 @@ declare <32 x i8> @llvm.x86.avx2.pmaxs.b(<32 x i8>, <32 x i8>) nounwind readnone
define <8 x i32> @test_x86_avx2_pmaxsd(<8 x i32> %a0, <8 x i32> %a1) {
; AVX2-LABEL: test_x86_avx2_pmaxsd:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x3d,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_pmaxsd:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3d,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx2.pmaxs.d(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
@@ -985,12 +985,12 @@ declare <8 x i32> @llvm.x86.avx2.pmaxs.d(<8 x i32>, <8 x i32>) nounwind readnone
define <8 x i32> @test_x86_avx2_pmaxud(<8 x i32> %a0, <8 x i32> %a1) {
; AVX2-LABEL: test_x86_avx2_pmaxud:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpmaxud %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x3f,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_pmaxud:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpmaxud %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3f,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx2.pmaxu.d(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
@@ -1001,12 +1001,12 @@ declare <8 x i32> @llvm.x86.avx2.pmaxu.d(<8 x i32>, <8 x i32>) nounwind readnone
define <16 x i16> @test_x86_avx2_pmaxuw(<16 x i16> %a0, <16 x i16> %a1) {
; AVX2-LABEL: test_x86_avx2_pmaxuw:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x3e,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_pmaxuw:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3e,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.pmaxu.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -1017,12 +1017,12 @@ declare <16 x i16> @llvm.x86.avx2.pmaxu.w(<16 x i16>, <16 x i16>) nounwind readn
define <32 x i8> @test_x86_avx2_pminsb(<32 x i8> %a0, <32 x i8> %a1) {
; AVX2-LABEL: test_x86_avx2_pminsb:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpminsb %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x38,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_pminsb:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpminsb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x38,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <32 x i8> @llvm.x86.avx2.pmins.b(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1]
@@ -1033,12 +1033,12 @@ declare <32 x i8> @llvm.x86.avx2.pmins.b(<32 x i8>, <32 x i8>) nounwind readnone
define <8 x i32> @test_x86_avx2_pminsd(<8 x i32> %a0, <8 x i32> %a1) {
; AVX2-LABEL: test_x86_avx2_pminsd:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpminsd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x39,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_pminsd:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpminsd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x39,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx2.pmins.d(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
@@ -1049,12 +1049,12 @@ declare <8 x i32> @llvm.x86.avx2.pmins.d(<8 x i32>, <8 x i32>) nounwind readnone
define <8 x i32> @test_x86_avx2_pminud(<8 x i32> %a0, <8 x i32> %a1) {
; AVX2-LABEL: test_x86_avx2_pminud:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x3b,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_pminud:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpminud %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3b,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx2.pminu.d(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
@@ -1065,12 +1065,12 @@ declare <8 x i32> @llvm.x86.avx2.pminu.d(<8 x i32>, <8 x i32>) nounwind readnone
define <16 x i16> @test_x86_avx2_pminuw(<16 x i16> %a0, <16 x i16> %a1) {
; AVX2-LABEL: test_x86_avx2_pminuw:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpminuw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x3a,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_pminuw:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpminuw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3a,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.pminu.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -1088,7 +1088,7 @@ declare <4 x i64> @llvm.x86.avx2.pmul.dq(<8 x i32>, <8 x i32>) nounwind readnone
define <4 x i32> @test_x86_avx2_pblendd_128(<4 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_x86_avx2_pblendd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vblendps $8, %xmm0, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x71,0x0c,0xc0,0x08]
; CHECK-NEXT: ## xmm0 = xmm1[0,1,2],xmm0[3]
; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
@@ -1100,7 +1100,7 @@ declare <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32>, <4 x i32>, i8) nounwind
define <8 x i32> @test_x86_avx2_pblendd_256(<8 x i32> %a0, <8 x i32> %a1) {
; CHECK-LABEL: test_x86_avx2_pblendd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vblendps $7, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x0c,0xc1,0x07]
; CHECK-NEXT: ## ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
@@ -1115,12 +1115,12 @@ declare <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32>, <8 x i32>, i8) nounwind
; the instruction.
define <8 x i32> @test_x86_avx2_permd(<8 x i32> %a0, <8 x i32> %a1) {
; AVX2-LABEL: test_x86_avx2_permd:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpermps %ymm0, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x75,0x16,0xc0]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_permd:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpermps %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x16,0xc0]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
@@ -1134,12 +1134,12 @@ declare <8 x i32> @llvm.x86.avx2.permd(<8 x i32>, <8 x i32>) nounwind readonly
; the instruction.
define <8 x float> @test_x86_avx2_permps(<8 x float> %a0, <8 x i32> %a1) {
; AVX2-LABEL: test_x86_avx2_permps:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpermps %ymm0, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x75,0x16,0xc0]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_permps:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpermps %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x16,0xc0]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx2.permps(<8 x float> %a0, <8 x i32> %a1) ; <<8 x float>> [#uses=1]
@@ -1150,13 +1150,13 @@ declare <8 x float> @llvm.x86.avx2.permps(<8 x float>, <8 x i32>) nounwind reado
define <2 x i64> @test_x86_avx2_maskload_q(i8* %a0, <2 x i64> %a1) {
; X86-LABEL: test_x86_avx2_maskload_q:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vpmaskmovq (%eax), %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0xf9,0x8c,0x00]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_maskload_q:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmaskmovq (%rdi), %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0xf9,0x8c,0x07]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.avx2.maskload.q(i8* %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1]
@@ -1167,13 +1167,13 @@ declare <2 x i64> @llvm.x86.avx2.maskload.q(i8*, <2 x i64>) nounwind readonly
define <4 x i64> @test_x86_avx2_maskload_q_256(i8* %a0, <4 x i64> %a1) {
; X86-LABEL: test_x86_avx2_maskload_q_256:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vpmaskmovq (%eax), %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0xfd,0x8c,0x00]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_maskload_q_256:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmaskmovq (%rdi), %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0xfd,0x8c,0x07]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx2.maskload.q.256(i8* %a0, <4 x i64> %a1) ; <<4 x i64>> [#uses=1]
@@ -1184,13 +1184,13 @@ declare <4 x i64> @llvm.x86.avx2.maskload.q.256(i8*, <4 x i64>) nounwind readonl
define <4 x i32> @test_x86_avx2_maskload_d(i8* %a0, <4 x i32> %a1) {
; X86-LABEL: test_x86_avx2_maskload_d:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vpmaskmovd (%eax), %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x8c,0x00]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_maskload_d:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmaskmovd (%rdi), %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x8c,0x07]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx2.maskload.d(i8* %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
@@ -1201,13 +1201,13 @@ declare <4 x i32> @llvm.x86.avx2.maskload.d(i8*, <4 x i32>) nounwind readonly
define <8 x i32> @test_x86_avx2_maskload_d_256(i8* %a0, <8 x i32> %a1) {
; X86-LABEL: test_x86_avx2_maskload_d_256:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vpmaskmovd (%eax), %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x8c,0x00]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_maskload_d_256:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmaskmovd (%rdi), %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x8c,0x07]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx2.maskload.d.256(i8* %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
@@ -1218,13 +1218,13 @@ declare <8 x i32> @llvm.x86.avx2.maskload.d.256(i8*, <8 x i32>) nounwind readonl
define void @test_x86_avx2_maskstore_q(i8* %a0, <2 x i64> %a1, <2 x i64> %a2) {
; X86-LABEL: test_x86_avx2_maskstore_q:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vpmaskmovq %xmm1, %xmm0, (%eax) ## encoding: [0xc4,0xe2,0xf9,0x8e,0x08]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_maskstore_q:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmaskmovq %xmm1, %xmm0, (%rdi) ## encoding: [0xc4,0xe2,0xf9,0x8e,0x0f]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
call void @llvm.x86.avx2.maskstore.q(i8* %a0, <2 x i64> %a1, <2 x i64> %a2)
@@ -1235,14 +1235,14 @@ declare void @llvm.x86.avx2.maskstore.q(i8*, <2 x i64>, <2 x i64>) nounwind
define void @test_x86_avx2_maskstore_q_256(i8* %a0, <4 x i64> %a1, <4 x i64> %a2) {
; X86-LABEL: test_x86_avx2_maskstore_q_256:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vpmaskmovq %ymm1, %ymm0, (%eax) ## encoding: [0xc4,0xe2,0xfd,0x8e,0x08]
; X86-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_maskstore_q_256:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmaskmovq %ymm1, %ymm0, (%rdi) ## encoding: [0xc4,0xe2,0xfd,0x8e,0x0f]
; X64-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
@@ -1254,13 +1254,13 @@ declare void @llvm.x86.avx2.maskstore.q.256(i8*, <4 x i64>, <4 x i64>) nounwind
define void @test_x86_avx2_maskstore_d(i8* %a0, <4 x i32> %a1, <4 x i32> %a2) {
; X86-LABEL: test_x86_avx2_maskstore_d:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vpmaskmovd %xmm1, %xmm0, (%eax) ## encoding: [0xc4,0xe2,0x79,0x8e,0x08]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_maskstore_d:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmaskmovd %xmm1, %xmm0, (%rdi) ## encoding: [0xc4,0xe2,0x79,0x8e,0x0f]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
call void @llvm.x86.avx2.maskstore.d(i8* %a0, <4 x i32> %a1, <4 x i32> %a2)
@@ -1271,14 +1271,14 @@ declare void @llvm.x86.avx2.maskstore.d(i8*, <4 x i32>, <4 x i32>) nounwind
define void @test_x86_avx2_maskstore_d_256(i8* %a0, <8 x i32> %a1, <8 x i32> %a2) {
; X86-LABEL: test_x86_avx2_maskstore_d_256:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vpmaskmovd %ymm1, %ymm0, (%eax) ## encoding: [0xc4,0xe2,0x7d,0x8e,0x08]
; X86-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_maskstore_d_256:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmaskmovd %ymm1, %ymm0, (%rdi) ## encoding: [0xc4,0xe2,0x7d,0x8e,0x0f]
; X64-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
@@ -1290,12 +1290,12 @@ declare void @llvm.x86.avx2.maskstore.d.256(i8*, <8 x i32>, <8 x i32>) nounwind
define <4 x i32> @test_x86_avx2_psllv_d(<4 x i32> %a0, <4 x i32> %a1) {
; AVX2-LABEL: test_x86_avx2_psllv_d:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsllvd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x47,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psllv_d:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsllvd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x47,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
@@ -1306,12 +1306,12 @@ declare <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32>, <4 x i32>) nounwind readnone
define <8 x i32> @test_x86_avx2_psllv_d_256(<8 x i32> %a0, <8 x i32> %a1) {
; AVX2-LABEL: test_x86_avx2_psllv_d_256:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsllvd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x47,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psllv_d_256:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsllvd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x47,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
@@ -1322,12 +1322,12 @@ declare <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32>, <8 x i32>) nounwind read
define <2 x i64> @test_x86_avx2_psllv_q(<2 x i64> %a0, <2 x i64> %a1) {
; AVX2-LABEL: test_x86_avx2_psllv_q:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsllvq %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0xf9,0x47,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psllv_q:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsllvq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0x47,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64> %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1]
@@ -1338,12 +1338,12 @@ declare <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64>, <2 x i64>) nounwind readnone
define <4 x i64> @test_x86_avx2_psllv_q_256(<4 x i64> %a0, <4 x i64> %a1) {
; AVX2-LABEL: test_x86_avx2_psllv_q_256:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsllvq %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0xfd,0x47,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psllv_q_256:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsllvq %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0x47,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64> %a0, <4 x i64> %a1) ; <<4 x i64>> [#uses=1]
@@ -1354,12 +1354,12 @@ declare <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64>, <4 x i64>) nounwind read
define <4 x i32> @test_x86_avx2_psrlv_d(<4 x i32> %a0, <4 x i32> %a1) {
; AVX2-LABEL: test_x86_avx2_psrlv_d:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x45,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psrlv_d:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x45,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
@@ -1370,12 +1370,12 @@ declare <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32>, <4 x i32>) nounwind readnone
define <8 x i32> @test_x86_avx2_psrlv_d_256(<8 x i32> %a0, <8 x i32> %a1) {
; AVX2-LABEL: test_x86_avx2_psrlv_d_256:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x45,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psrlv_d_256:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x45,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
@@ -1386,12 +1386,12 @@ declare <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32>, <8 x i32>) nounwind read
define <2 x i64> @test_x86_avx2_psrlv_q(<2 x i64> %a0, <2 x i64> %a1) {
; AVX2-LABEL: test_x86_avx2_psrlv_q:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0xf9,0x45,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psrlv_q:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0x45,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64> %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1]
@@ -1402,12 +1402,12 @@ declare <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64>, <2 x i64>) nounwind readnone
define <4 x i64> @test_x86_avx2_psrlv_q_256(<4 x i64> %a0, <4 x i64> %a1) {
; AVX2-LABEL: test_x86_avx2_psrlv_q_256:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0xfd,0x45,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psrlv_q_256:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0x45,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64> %a0, <4 x i64> %a1) ; <<4 x i64>> [#uses=1]
@@ -1418,12 +1418,12 @@ declare <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64>, <4 x i64>) nounwind read
define <4 x i32> @test_x86_avx2_psrav_d(<4 x i32> %a0, <4 x i32> %a1) {
; AVX2-LABEL: test_x86_avx2_psrav_d:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsravd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x46,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psrav_d:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsravd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x46,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
@@ -1432,7 +1432,7 @@ define <4 x i32> @test_x86_avx2_psrav_d(<4 x i32> %a0, <4 x i32> %a1) {
define <4 x i32> @test_x86_avx2_psrav_d_const(<4 x i32> %a0, <4 x i32> %a1) {
; X86-AVX-LABEL: test_x86_avx2_psrav_d_const:
-; X86-AVX: ## BB#0:
+; X86-AVX: ## %bb.0:
; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [2,9,4294967284,23]
; X86-AVX-NEXT: ## encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X86-AVX-NEXT: ## fixup A - offset: 4, value: LCPI88_0, kind: FK_Data_4
@@ -1441,7 +1441,7 @@ define <4 x i32> @test_x86_avx2_psrav_d_const(<4 x i32> %a0, <4 x i32> %a1) {
; X86-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X86-AVX512VL-LABEL: test_x86_avx2_psrav_d_const:
-; X86-AVX512VL: ## BB#0:
+; X86-AVX512VL: ## %bb.0:
; X86-AVX512VL-NEXT: vmovdqa LCPI88_0, %xmm0 ## EVEX TO VEX Compression xmm0 = [2,9,4294967284,23]
; X86-AVX512VL-NEXT: ## encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X86-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI88_0, kind: FK_Data_4
@@ -1450,7 +1450,7 @@ define <4 x i32> @test_x86_avx2_psrav_d_const(<4 x i32> %a0, <4 x i32> %a1) {
; X86-AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-AVX-LABEL: test_x86_avx2_psrav_d_const:
-; X64-AVX: ## BB#0:
+; X64-AVX: ## %bb.0:
; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [2,9,4294967284,23]
; X64-AVX-NEXT: ## encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X64-AVX-NEXT: ## fixup A - offset: 4, value: LCPI88_0-4, kind: reloc_riprel_4byte
@@ -1459,7 +1459,7 @@ define <4 x i32> @test_x86_avx2_psrav_d_const(<4 x i32> %a0, <4 x i32> %a1) {
; X64-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-AVX512VL-LABEL: test_x86_avx2_psrav_d_const:
-; X64-AVX512VL: ## BB#0:
+; X64-AVX512VL: ## %bb.0:
; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %xmm0 ## EVEX TO VEX Compression xmm0 = [2,9,4294967284,23]
; X64-AVX512VL-NEXT: ## encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X64-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI88_0-4, kind: reloc_riprel_4byte
@@ -1473,12 +1473,12 @@ declare <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32>, <4 x i32>) nounwind readnone
define <8 x i32> @test_x86_avx2_psrav_d_256(<8 x i32> %a0, <8 x i32> %a1) {
; AVX2-LABEL: test_x86_avx2_psrav_d_256:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsravd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x46,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psrav_d_256:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsravd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x46,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
@@ -1487,7 +1487,7 @@ define <8 x i32> @test_x86_avx2_psrav_d_256(<8 x i32> %a0, <8 x i32> %a1) {
define <8 x i32> @test_x86_avx2_psrav_d_256_const(<8 x i32> %a0, <8 x i32> %a1) {
; X86-AVX-LABEL: test_x86_avx2_psrav_d_256_const:
-; X86-AVX: ## BB#0:
+; X86-AVX: ## %bb.0:
; X86-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
; X86-AVX-NEXT: ## encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; X86-AVX-NEXT: ## fixup A - offset: 4, value: LCPI90_0, kind: FK_Data_4
@@ -1496,7 +1496,7 @@ define <8 x i32> @test_x86_avx2_psrav_d_256_const(<8 x i32> %a0, <8 x i32> %a1)
; X86-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X86-AVX512VL-LABEL: test_x86_avx2_psrav_d_256_const:
-; X86-AVX512VL: ## BB#0:
+; X86-AVX512VL: ## %bb.0:
; X86-AVX512VL-NEXT: vmovdqa LCPI90_0, %ymm0 ## EVEX TO VEX Compression ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
; X86-AVX512VL-NEXT: ## encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; X86-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI90_0, kind: FK_Data_4
@@ -1505,7 +1505,7 @@ define <8 x i32> @test_x86_avx2_psrav_d_256_const(<8 x i32> %a0, <8 x i32> %a1)
; X86-AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-AVX-LABEL: test_x86_avx2_psrav_d_256_const:
-; X64-AVX: ## BB#0:
+; X64-AVX: ## %bb.0:
; X64-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
; X64-AVX-NEXT: ## encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; X64-AVX-NEXT: ## fixup A - offset: 4, value: LCPI90_0-4, kind: reloc_riprel_4byte
@@ -1514,7 +1514,7 @@ define <8 x i32> @test_x86_avx2_psrav_d_256_const(<8 x i32> %a0, <8 x i32> %a1)
; X64-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-AVX512VL-LABEL: test_x86_avx2_psrav_d_256_const:
-; X64-AVX512VL: ## BB#0:
+; X64-AVX512VL: ## %bb.0:
; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %ymm0 ## EVEX TO VEX Compression ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
; X64-AVX512VL-NEXT: ## encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; X64-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI90_0-4, kind: reloc_riprel_4byte
@@ -1528,13 +1528,13 @@ declare <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32>, <8 x i32>) nounwind read
define <2 x double> @test_x86_avx2_gather_d_pd(<2 x double> %a0, i8* %a1, <4 x i32> %idx, <2 x double> %mask) {
; X86-LABEL: test_x86_avx2_gather_d_pd:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vgatherdpd %xmm2, (%eax,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0xe9,0x92,0x04,0x48]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_gather_d_pd:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vgatherdpd %xmm2, (%rdi,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0xe9,0x92,0x04,0x4f]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double> %a0,
@@ -1546,13 +1546,13 @@ declare <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double>, i8*,
define <4 x double> @test_x86_avx2_gather_d_pd_256(<4 x double> %a0, i8* %a1, <4 x i32> %idx, <4 x double> %mask) {
; X86-LABEL: test_x86_avx2_gather_d_pd_256:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vgatherdpd %ymm2, (%eax,%xmm1,2), %ymm0 ## encoding: [0xc4,0xe2,0xed,0x92,0x04,0x48]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_gather_d_pd_256:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vgatherdpd %ymm2, (%rdi,%xmm1,2), %ymm0 ## encoding: [0xc4,0xe2,0xed,0x92,0x04,0x4f]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> %a0,
@@ -1564,13 +1564,13 @@ declare <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double>, i8*,
define <2 x double> @test_x86_avx2_gather_q_pd(<2 x double> %a0, i8* %a1, <2 x i64> %idx, <2 x double> %mask) {
; X86-LABEL: test_x86_avx2_gather_q_pd:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vgatherqpd %xmm2, (%eax,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0xe9,0x93,0x04,0x48]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_gather_q_pd:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vgatherqpd %xmm2, (%rdi,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0xe9,0x93,0x04,0x4f]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <2 x double> @llvm.x86.avx2.gather.q.pd(<2 x double> %a0,
@@ -1582,13 +1582,13 @@ declare <2 x double> @llvm.x86.avx2.gather.q.pd(<2 x double>, i8*,
define <4 x double> @test_x86_avx2_gather_q_pd_256(<4 x double> %a0, i8* %a1, <4 x i64> %idx, <4 x double> %mask) {
; X86-LABEL: test_x86_avx2_gather_q_pd_256:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vgatherqpd %ymm2, (%eax,%ymm1,2), %ymm0 ## encoding: [0xc4,0xe2,0xed,0x93,0x04,0x48]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_gather_q_pd_256:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vgatherqpd %ymm2, (%rdi,%ymm1,2), %ymm0 ## encoding: [0xc4,0xe2,0xed,0x93,0x04,0x4f]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> %a0,
@@ -1600,13 +1600,13 @@ declare <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double>, i8*,
define <4 x float> @test_x86_avx2_gather_d_ps(<4 x float> %a0, i8* %a1, <4 x i32> %idx, <4 x float> %mask) {
; X86-LABEL: test_x86_avx2_gather_d_ps:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vgatherdps %xmm2, (%eax,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x69,0x92,0x04,0x48]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_gather_d_ps:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vgatherdps %xmm2, (%rdi,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x69,0x92,0x04,0x4f]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float> %a0,
@@ -1618,13 +1618,13 @@ declare <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float>, i8*,
define <8 x float> @test_x86_avx2_gather_d_ps_256(<8 x float> %a0, i8* %a1, <8 x i32> %idx, <8 x float> %mask) {
; X86-LABEL: test_x86_avx2_gather_d_ps_256:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vgatherdps %ymm2, (%eax,%ymm1,2), %ymm0 ## encoding: [0xc4,0xe2,0x6d,0x92,0x04,0x48]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_gather_d_ps_256:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vgatherdps %ymm2, (%rdi,%ymm1,2), %ymm0 ## encoding: [0xc4,0xe2,0x6d,0x92,0x04,0x4f]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> %a0,
@@ -1636,13 +1636,13 @@ declare <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float>, i8*,
define <4 x float> @test_x86_avx2_gather_q_ps(<4 x float> %a0, i8* %a1, <2 x i64> %idx, <4 x float> %mask) {
; X86-LABEL: test_x86_avx2_gather_q_ps:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vgatherqps %xmm2, (%eax,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x69,0x93,0x04,0x48]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_gather_q_ps:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vgatherqps %xmm2, (%rdi,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x69,0x93,0x04,0x4f]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.avx2.gather.q.ps(<4 x float> %a0,
@@ -1654,14 +1654,14 @@ declare <4 x float> @llvm.x86.avx2.gather.q.ps(<4 x float>, i8*,
define <4 x float> @test_x86_avx2_gather_q_ps_256(<4 x float> %a0, i8* %a1, <4 x i64> %idx, <4 x float> %mask) {
; X86-LABEL: test_x86_avx2_gather_q_ps_256:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vgatherqps %xmm2, (%eax,%ymm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x6d,0x93,0x04,0x48]
; X86-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_gather_q_ps_256:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vgatherqps %xmm2, (%rdi,%ymm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x6d,0x93,0x04,0x4f]
; X64-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
@@ -1674,13 +1674,13 @@ declare <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float>, i8*,
define <2 x i64> @test_x86_avx2_gather_d_q(<2 x i64> %a0, i8* %a1, <4 x i32> %idx, <2 x i64> %mask) {
; X86-LABEL: test_x86_avx2_gather_d_q:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vpgatherdq %xmm2, (%eax,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0xe9,0x90,0x04,0x48]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_gather_d_q:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpgatherdq %xmm2, (%rdi,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0xe9,0x90,0x04,0x4f]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.avx2.gather.d.q(<2 x i64> %a0,
@@ -1692,13 +1692,13 @@ declare <2 x i64> @llvm.x86.avx2.gather.d.q(<2 x i64>, i8*,
define <4 x i64> @test_x86_avx2_gather_d_q_256(<4 x i64> %a0, i8* %a1, <4 x i32> %idx, <4 x i64> %mask) {
; X86-LABEL: test_x86_avx2_gather_d_q_256:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vpgatherdq %ymm2, (%eax,%xmm1,2), %ymm0 ## encoding: [0xc4,0xe2,0xed,0x90,0x04,0x48]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_gather_d_q_256:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpgatherdq %ymm2, (%rdi,%xmm1,2), %ymm0 ## encoding: [0xc4,0xe2,0xed,0x90,0x04,0x4f]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> %a0,
@@ -1710,13 +1710,13 @@ declare <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64>, i8*,
define <2 x i64> @test_x86_avx2_gather_q_q(<2 x i64> %a0, i8* %a1, <2 x i64> %idx, <2 x i64> %mask) {
; X86-LABEL: test_x86_avx2_gather_q_q:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vpgatherqq %xmm2, (%eax,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0xe9,0x91,0x04,0x48]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_gather_q_q:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpgatherqq %xmm2, (%rdi,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0xe9,0x91,0x04,0x4f]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.avx2.gather.q.q(<2 x i64> %a0,
@@ -1728,13 +1728,13 @@ declare <2 x i64> @llvm.x86.avx2.gather.q.q(<2 x i64>, i8*,
define <4 x i64> @test_x86_avx2_gather_q_q_256(<4 x i64> %a0, i8* %a1, <4 x i64> %idx, <4 x i64> %mask) {
; X86-LABEL: test_x86_avx2_gather_q_q_256:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vpgatherqq %ymm2, (%eax,%ymm1,2), %ymm0 ## encoding: [0xc4,0xe2,0xed,0x91,0x04,0x48]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_gather_q_q_256:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpgatherqq %ymm2, (%rdi,%ymm1,2), %ymm0 ## encoding: [0xc4,0xe2,0xed,0x91,0x04,0x4f]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> %a0,
@@ -1746,13 +1746,13 @@ declare <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64>, i8*,
define <4 x i32> @test_x86_avx2_gather_d_d(<4 x i32> %a0, i8* %a1, <4 x i32> %idx, <4 x i32> %mask) {
; X86-LABEL: test_x86_avx2_gather_d_d:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vpgatherdd %xmm2, (%eax,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x69,0x90,0x04,0x48]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_gather_d_d:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpgatherdd %xmm2, (%rdi,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x69,0x90,0x04,0x4f]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32> %a0,
@@ -1764,13 +1764,13 @@ declare <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32>, i8*,
define <8 x i32> @test_x86_avx2_gather_d_d_256(<8 x i32> %a0, i8* %a1, <8 x i32> %idx, <8 x i32> %mask) {
; X86-LABEL: test_x86_avx2_gather_d_d_256:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vpgatherdd %ymm2, (%eax,%ymm1,2), %ymm0 ## encoding: [0xc4,0xe2,0x6d,0x90,0x04,0x48]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_gather_d_d_256:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpgatherdd %ymm2, (%rdi,%ymm1,2), %ymm0 ## encoding: [0xc4,0xe2,0x6d,0x90,0x04,0x4f]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> %a0,
@@ -1782,13 +1782,13 @@ declare <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32>, i8*,
define <4 x i32> @test_x86_avx2_gather_q_d(<4 x i32> %a0, i8* %a1, <2 x i64> %idx, <4 x i32> %mask) {
; X86-LABEL: test_x86_avx2_gather_q_d:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vpgatherqd %xmm2, (%eax,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x69,0x91,0x04,0x48]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_gather_q_d:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpgatherqd %xmm2, (%rdi,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x69,0x91,0x04,0x4f]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx2.gather.q.d(<4 x i32> %a0,
@@ -1800,14 +1800,14 @@ declare <4 x i32> @llvm.x86.avx2.gather.q.d(<4 x i32>, i8*,
define <4 x i32> @test_x86_avx2_gather_q_d_256(<4 x i32> %a0, i8* %a1, <4 x i64> %idx, <4 x i32> %mask) {
; X86-LABEL: test_x86_avx2_gather_q_d_256:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vpgatherqd %xmm2, (%eax,%ymm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x6d,0x91,0x04,0x48]
; X86-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_gather_q_d_256:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpgatherqd %xmm2, (%rdi,%ymm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x6d,0x91,0x04,0x4f]
; X64-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
@@ -1822,7 +1822,7 @@ declare <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32>, i8*,
define <8 x float> @test_gather_mask(<8 x float> %a0, float* %a, <8 x i32> %idx, <8 x float> %mask, float* nocapture %out) {
;; gather with mask
; X86-AVX-LABEL: test_gather_mask:
-; X86-AVX: ## BB#0:
+; X86-AVX: ## %bb.0:
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08]
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
; X86-AVX-NEXT: vmovaps %ymm2, %ymm3 ## encoding: [0xc5,0xfc,0x28,0xda]
@@ -1831,7 +1831,7 @@ define <8 x float> @test_gather_mask(<8 x float> %a0, float* %a, <8 x i32> %idx
; X86-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X86-AVX512VL-LABEL: test_gather_mask:
-; X86-AVX512VL: ## BB#0:
+; X86-AVX512VL: ## %bb.0:
; X86-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08]
; X86-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
; X86-AVX512VL-NEXT: vmovaps %ymm2, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xda]
@@ -1840,14 +1840,14 @@ define <8 x float> @test_gather_mask(<8 x float> %a0, float* %a, <8 x i32> %idx
; X86-AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-AVX-LABEL: test_gather_mask:
-; X64-AVX: ## BB#0:
+; X64-AVX: ## %bb.0:
; X64-AVX-NEXT: vmovaps %ymm2, %ymm3 ## encoding: [0xc5,0xfc,0x28,0xda]
; X64-AVX-NEXT: vgatherdps %ymm3, (%rdi,%ymm1,4), %ymm0 ## encoding: [0xc4,0xe2,0x65,0x92,0x04,0x8f]
; X64-AVX-NEXT: vmovups %ymm2, (%rsi) ## encoding: [0xc5,0xfc,0x11,0x16]
; X64-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-AVX512VL-LABEL: test_gather_mask:
-; X64-AVX512VL: ## BB#0:
+; X64-AVX512VL: ## %bb.0:
; X64-AVX512VL-NEXT: vmovaps %ymm2, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xda]
; X64-AVX512VL-NEXT: vgatherdps %ymm3, (%rdi,%ymm1,4), %ymm0 ## encoding: [0xc4,0xe2,0x65,0x92,0x04,0x8f]
; X64-AVX512VL-NEXT: vmovups %ymm2, (%rsi) ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x11,0x16]
diff --git a/test/CodeGen/X86/avx2-logic.ll b/test/CodeGen/X86/avx2-logic.ll
index 68d486699cb..8f2207fafef 100644
--- a/test/CodeGen/X86/avx2-logic.ll
+++ b/test/CodeGen/X86/avx2-logic.ll
@@ -4,14 +4,14 @@
define <4 x i64> @vpandn(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
; X32-LABEL: vpandn:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; X32-NEXT: vpsubq %ymm1, %ymm0, %ymm1
; X32-NEXT: vpandn %ymm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vpandn:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; X64-NEXT: vpsubq %ymm1, %ymm0, %ymm1
; X64-NEXT: vpandn %ymm0, %ymm1, %ymm0
@@ -26,14 +26,14 @@ entry:
define <4 x i64> @vpand(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
; X32-LABEL: vpand:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; X32-NEXT: vpsubq %ymm2, %ymm0, %ymm0
; X32-NEXT: vpand %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vpand:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; X64-NEXT: vpsubq %ymm2, %ymm0, %ymm0
; X64-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -47,14 +47,14 @@ entry:
define <4 x i64> @vpor(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
; X32-LABEL: vpor:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; X32-NEXT: vpsubq %ymm2, %ymm0, %ymm0
; X32-NEXT: vpor %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vpor:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; X64-NEXT: vpsubq %ymm2, %ymm0, %ymm0
; X64-NEXT: vpor %ymm1, %ymm0, %ymm0
@@ -68,14 +68,14 @@ entry:
define <4 x i64> @vpxor(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
; X32-LABEL: vpxor:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; X32-NEXT: vpsubq %ymm2, %ymm0, %ymm0
; X32-NEXT: vpxor %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vpxor:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; X64-NEXT: vpsubq %ymm2, %ymm0, %ymm0
; X64-NEXT: vpxor %ymm1, %ymm0, %ymm0
@@ -89,14 +89,14 @@ entry:
define <32 x i8> @vpblendvb(<32 x i1> %cond, <32 x i8> %x, <32 x i8> %y) {
; X32-LABEL: vpblendvb:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsllw $7, %ymm0, %ymm0
; X32-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: vpblendvb %ymm0, %ymm1, %ymm2, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vpblendvb:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsllw $7, %ymm0, %ymm0
; X64-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: vpblendvb %ymm0, %ymm1, %ymm2, %ymm0
@@ -107,12 +107,12 @@ define <32 x i8> @vpblendvb(<32 x i1> %cond, <32 x i8> %x, <32 x i8> %y) {
define <8 x i32> @allOnes() nounwind {
; X32-LABEL: allOnes:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: allOnes:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X64-NEXT: retq
ret <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
@@ -120,12 +120,12 @@ define <8 x i32> @allOnes() nounwind {
define <16 x i16> @allOnes2() nounwind {
; X32-LABEL: allOnes2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: allOnes2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X64-NEXT: retq
ret <16 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
diff --git a/test/CodeGen/X86/avx2-masked-gather.ll b/test/CodeGen/X86/avx2-masked-gather.ll
index e8f5d7114fc..eb482c24cc9 100644
--- a/test/CodeGen/X86/avx2-masked-gather.ll
+++ b/test/CodeGen/X86/avx2-masked-gather.ll
@@ -8,7 +8,7 @@ declare <2 x i32> @llvm.masked.gather.v2i32(<2 x i32*> %ptrs, i32 %align, <2 x i
define <2 x i32> @masked_gather_v2i32(<2 x i32*>* %ptr, <2 x i1> %masks, <2 x i32> %passthro) {
; X86-LABEL: masked_gather_v2i32:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -19,7 +19,7 @@ define <2 x i32> @masked_gather_v2i32(<2 x i32*>* %ptr, <2 x i1> %masks, <2 x i3
; X86-NEXT: retl
;
; X64-LABEL: masked_gather_v2i32:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vmovdqa (%rdi), %xmm2
; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -29,20 +29,20 @@ define <2 x i32> @masked_gather_v2i32(<2 x i32*>* %ptr, <2 x i1> %masks, <2 x i3
; X64-NEXT: retq
;
; NOGATHER-LABEL: masked_gather_v2i32:
-; NOGATHER: # BB#0: # %entry
+; NOGATHER: # %bb.0: # %entry
; NOGATHER-NEXT: vmovdqa (%rdi), %xmm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
; NOGATHER-NEXT: # implicit-def: %xmm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB0_2
-; NOGATHER-NEXT: # BB#1: # %cond.load
+; NOGATHER-NEXT: # %bb.1: # %cond.load
; NOGATHER-NEXT: vmovq %xmm3, %rax
; NOGATHER-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero
; NOGATHER-NEXT: .LBB0_2: # %else
; NOGATHER-NEXT: vpextrb $8, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB0_4
-; NOGATHER-NEXT: # BB#3: # %cond.load1
+; NOGATHER-NEXT: # %bb.3: # %cond.load1
; NOGATHER-NEXT: vpextrq $1, %xmm3, %rax
; NOGATHER-NEXT: movl (%rax), %eax
; NOGATHER-NEXT: vpinsrq $1, %rax, %xmm2, %xmm2
@@ -58,7 +58,7 @@ entry:
define <4 x i32> @masked_gather_v2i32_concat(<2 x i32*>* %ptr, <2 x i1> %masks, <2 x i32> %passthro) {
; X86-LABEL: masked_gather_v2i32_concat:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -69,7 +69,7 @@ define <4 x i32> @masked_gather_v2i32_concat(<2 x i32*>* %ptr, <2 x i1> %masks,
; X86-NEXT: retl
;
; X64-LABEL: masked_gather_v2i32_concat:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vmovdqa (%rdi), %xmm2
; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -79,20 +79,20 @@ define <4 x i32> @masked_gather_v2i32_concat(<2 x i32*>* %ptr, <2 x i1> %masks,
; X64-NEXT: retq
;
; NOGATHER-LABEL: masked_gather_v2i32_concat:
-; NOGATHER: # BB#0: # %entry
+; NOGATHER: # %bb.0: # %entry
; NOGATHER-NEXT: vmovdqa (%rdi), %xmm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
; NOGATHER-NEXT: # implicit-def: %xmm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB1_2
-; NOGATHER-NEXT: # BB#1: # %cond.load
+; NOGATHER-NEXT: # %bb.1: # %cond.load
; NOGATHER-NEXT: vmovq %xmm3, %rax
; NOGATHER-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero
; NOGATHER-NEXT: .LBB1_2: # %else
; NOGATHER-NEXT: vpextrb $8, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB1_4
-; NOGATHER-NEXT: # BB#3: # %cond.load1
+; NOGATHER-NEXT: # %bb.3: # %cond.load1
; NOGATHER-NEXT: vpextrq $1, %xmm3, %rax
; NOGATHER-NEXT: movl (%rax), %eax
; NOGATHER-NEXT: vpinsrq $1, %rax, %xmm2, %xmm2
@@ -112,7 +112,7 @@ declare <2 x float> @llvm.masked.gather.v2float(<2 x float*> %ptrs, i32 %align,
define <2 x float> @masked_gather_v2float(<2 x float*>* %ptr, <2 x i1> %masks, <2 x float> %passthro) {
; X86-LABEL: masked_gather_v2float:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; X86-NEXT: vpslld $31, %xmm0, %xmm0
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -122,7 +122,7 @@ define <2 x float> @masked_gather_v2float(<2 x float*>* %ptr, <2 x i1> %masks, <
; X86-NEXT: retl
;
; X64-LABEL: masked_gather_v2float:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vmovaps (%rdi), %xmm2
; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X64-NEXT: vpslld $31, %xmm0, %xmm0
@@ -131,20 +131,20 @@ define <2 x float> @masked_gather_v2float(<2 x float*>* %ptr, <2 x i1> %masks, <
; X64-NEXT: retq
;
; NOGATHER-LABEL: masked_gather_v2float:
-; NOGATHER: # BB#0: # %entry
+; NOGATHER: # %bb.0: # %entry
; NOGATHER-NEXT: vmovdqa (%rdi), %xmm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
; NOGATHER-NEXT: # implicit-def: %xmm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB2_2
-; NOGATHER-NEXT: # BB#1: # %cond.load
+; NOGATHER-NEXT: # %bb.1: # %cond.load
; NOGATHER-NEXT: vmovq %xmm3, %rax
; NOGATHER-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; NOGATHER-NEXT: .LBB2_2: # %else
; NOGATHER-NEXT: vpextrb $8, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB2_4
-; NOGATHER-NEXT: # BB#3: # %cond.load1
+; NOGATHER-NEXT: # %bb.3: # %cond.load1
; NOGATHER-NEXT: vpextrq $1, %xmm3, %rax
; NOGATHER-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[2,3]
; NOGATHER-NEXT: .LBB2_4: # %else2
@@ -160,7 +160,7 @@ entry:
define <4 x float> @masked_gather_v2float_concat(<2 x float*>* %ptr, <2 x i1> %masks, <2 x float> %passthro) {
; X86-LABEL: masked_gather_v2float_concat:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; X86-NEXT: vpslld $31, %xmm0, %xmm0
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -170,7 +170,7 @@ define <4 x float> @masked_gather_v2float_concat(<2 x float*>* %ptr, <2 x i1> %m
; X86-NEXT: retl
;
; X64-LABEL: masked_gather_v2float_concat:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vmovaps (%rdi), %xmm2
; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X64-NEXT: vpslld $31, %xmm0, %xmm0
@@ -179,20 +179,20 @@ define <4 x float> @masked_gather_v2float_concat(<2 x float*>* %ptr, <2 x i1> %m
; X64-NEXT: retq
;
; NOGATHER-LABEL: masked_gather_v2float_concat:
-; NOGATHER: # BB#0: # %entry
+; NOGATHER: # %bb.0: # %entry
; NOGATHER-NEXT: vmovdqa (%rdi), %xmm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
; NOGATHER-NEXT: # implicit-def: %xmm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB3_2
-; NOGATHER-NEXT: # BB#1: # %cond.load
+; NOGATHER-NEXT: # %bb.1: # %cond.load
; NOGATHER-NEXT: vmovq %xmm3, %rax
; NOGATHER-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; NOGATHER-NEXT: .LBB3_2: # %else
; NOGATHER-NEXT: vpextrb $8, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB3_4
-; NOGATHER-NEXT: # BB#3: # %cond.load1
+; NOGATHER-NEXT: # %bb.3: # %cond.load1
; NOGATHER-NEXT: vpextrq $1, %xmm3, %rax
; NOGATHER-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[2,3]
; NOGATHER-NEXT: .LBB3_4: # %else2
@@ -212,14 +212,14 @@ declare <4 x i32> @llvm.masked.gather.v4i32(<4 x i32*> %ptrs, i32 %align, <4 x i
define <4 x i32> @masked_gather_v4i32(<4 x i32*> %ptrs, <4 x i1> %masks, <4 x i32> %passthro) {
; X86-LABEL: masked_gather_v4i32:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: vpslld $31, %xmm1, %xmm1
; X86-NEXT: vpgatherdd %xmm1, (,%xmm0), %xmm2
; X86-NEXT: vmovdqa %xmm2, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: masked_gather_v4i32:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpslld $31, %xmm1, %xmm1
; X64-NEXT: vpgatherqd %xmm1, (,%ymm0), %xmm2
; X64-NEXT: vmovdqa %xmm2, %xmm0
@@ -227,26 +227,26 @@ define <4 x i32> @masked_gather_v4i32(<4 x i32*> %ptrs, <4 x i1> %masks, <4 x i3
; X64-NEXT: retq
;
; NOGATHER-LABEL: masked_gather_v4i32:
-; NOGATHER: # BB#0: # %entry
+; NOGATHER: # %bb.0: # %entry
; NOGATHER-NEXT: vpextrb $0, %xmm1, %eax
; NOGATHER-NEXT: # implicit-def: %xmm3
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB4_2
-; NOGATHER-NEXT: # BB#1: # %cond.load
+; NOGATHER-NEXT: # %bb.1: # %cond.load
; NOGATHER-NEXT: vmovq %xmm0, %rax
; NOGATHER-NEXT: vmovd {{.*#+}} xmm3 = mem[0],zero,zero,zero
; NOGATHER-NEXT: .LBB4_2: # %else
; NOGATHER-NEXT: vpextrb $4, %xmm1, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB4_4
-; NOGATHER-NEXT: # BB#3: # %cond.load1
+; NOGATHER-NEXT: # %bb.3: # %cond.load1
; NOGATHER-NEXT: vpextrq $1, %xmm0, %rax
; NOGATHER-NEXT: vpinsrd $1, (%rax), %xmm3, %xmm3
; NOGATHER-NEXT: .LBB4_4: # %else2
; NOGATHER-NEXT: vpextrb $8, %xmm1, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB4_6
-; NOGATHER-NEXT: # BB#5: # %cond.load4
+; NOGATHER-NEXT: # %bb.5: # %cond.load4
; NOGATHER-NEXT: vextractf128 $1, %ymm0, %xmm4
; NOGATHER-NEXT: vmovq %xmm4, %rax
; NOGATHER-NEXT: vpinsrd $2, (%rax), %xmm3, %xmm3
@@ -254,7 +254,7 @@ define <4 x i32> @masked_gather_v4i32(<4 x i32*> %ptrs, <4 x i1> %masks, <4 x i3
; NOGATHER-NEXT: vpextrb $12, %xmm1, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB4_8
-; NOGATHER-NEXT: # BB#7: # %cond.load7
+; NOGATHER-NEXT: # %bb.7: # %cond.load7
; NOGATHER-NEXT: vextractf128 $1, %ymm0, %xmm0
; NOGATHER-NEXT: vpextrq $1, %xmm0, %rax
; NOGATHER-NEXT: vpinsrd $3, (%rax), %xmm3, %xmm3
@@ -272,14 +272,14 @@ declare <4 x float> @llvm.masked.gather.v4float(<4 x float*> %ptrs, i32 %align,
define <4 x float> @masked_gather_v4float(<4 x float*> %ptrs, <4 x i1> %masks, <4 x float> %passthro) {
; X86-LABEL: masked_gather_v4float:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: vpslld $31, %xmm1, %xmm1
; X86-NEXT: vgatherdps %xmm1, (,%xmm0), %xmm2
; X86-NEXT: vmovaps %xmm2, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: masked_gather_v4float:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpslld $31, %xmm1, %xmm1
; X64-NEXT: vgatherqps %xmm1, (,%ymm0), %xmm2
; X64-NEXT: vmovaps %xmm2, %xmm0
@@ -287,26 +287,26 @@ define <4 x float> @masked_gather_v4float(<4 x float*> %ptrs, <4 x i1> %masks, <
; X64-NEXT: retq
;
; NOGATHER-LABEL: masked_gather_v4float:
-; NOGATHER: # BB#0: # %entry
+; NOGATHER: # %bb.0: # %entry
; NOGATHER-NEXT: vpextrb $0, %xmm1, %eax
; NOGATHER-NEXT: # implicit-def: %xmm3
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB5_2
-; NOGATHER-NEXT: # BB#1: # %cond.load
+; NOGATHER-NEXT: # %bb.1: # %cond.load
; NOGATHER-NEXT: vmovq %xmm0, %rax
; NOGATHER-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
; NOGATHER-NEXT: .LBB5_2: # %else
; NOGATHER-NEXT: vpextrb $4, %xmm1, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB5_4
-; NOGATHER-NEXT: # BB#3: # %cond.load1
+; NOGATHER-NEXT: # %bb.3: # %cond.load1
; NOGATHER-NEXT: vpextrq $1, %xmm0, %rax
; NOGATHER-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0],mem[0],xmm3[2,3]
; NOGATHER-NEXT: .LBB5_4: # %else2
; NOGATHER-NEXT: vpextrb $8, %xmm1, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB5_6
-; NOGATHER-NEXT: # BB#5: # %cond.load4
+; NOGATHER-NEXT: # %bb.5: # %cond.load4
; NOGATHER-NEXT: vextractf128 $1, %ymm0, %xmm4
; NOGATHER-NEXT: vmovq %xmm4, %rax
; NOGATHER-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0,1],mem[0],xmm3[3]
@@ -314,7 +314,7 @@ define <4 x float> @masked_gather_v4float(<4 x float*> %ptrs, <4 x i1> %masks, <
; NOGATHER-NEXT: vpextrb $12, %xmm1, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB5_8
-; NOGATHER-NEXT: # BB#7: # %cond.load7
+; NOGATHER-NEXT: # %bb.7: # %cond.load7
; NOGATHER-NEXT: vextractf128 $1, %ymm0, %xmm0
; NOGATHER-NEXT: vpextrq $1, %xmm0, %rax
; NOGATHER-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0,1,2],mem[0]
@@ -332,7 +332,7 @@ declare <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> %ptrs, i32 %align, <8 x i
define <8 x i32> @masked_gather_v8i32(<8 x i32*>* %ptr, <8 x i1> %masks, <8 x i32> %passthro) {
; X86-LABEL: masked_gather_v8i32:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X86-NEXT: vpslld $31, %ymm0, %ymm0
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -342,7 +342,7 @@ define <8 x i32> @masked_gather_v8i32(<8 x i32*>* %ptr, <8 x i1> %masks, <8 x i3
; X86-NEXT: retl
;
; X64-LABEL: masked_gather_v8i32:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X64-NEXT: vpslld $31, %ymm0, %ymm0
; X64-NEXT: vpsrad $31, %ymm0, %ymm0
@@ -356,21 +356,21 @@ define <8 x i32> @masked_gather_v8i32(<8 x i32*>* %ptr, <8 x i1> %masks, <8 x i3
; X64-NEXT: retq
;
; NOGATHER-LABEL: masked_gather_v8i32:
-; NOGATHER: # BB#0: # %entry
+; NOGATHER: # %bb.0: # %entry
; NOGATHER-NEXT: vmovdqa (%rdi), %ymm4
; NOGATHER-NEXT: vmovdqa 32(%rdi), %ymm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
; NOGATHER-NEXT: # implicit-def: %ymm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB6_2
-; NOGATHER-NEXT: # BB#1: # %cond.load
+; NOGATHER-NEXT: # %bb.1: # %cond.load
; NOGATHER-NEXT: vmovq %xmm4, %rax
; NOGATHER-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero
; NOGATHER-NEXT: .LBB6_2: # %else
; NOGATHER-NEXT: vpextrb $2, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB6_4
-; NOGATHER-NEXT: # BB#3: # %cond.load1
+; NOGATHER-NEXT: # %bb.3: # %cond.load1
; NOGATHER-NEXT: vpextrq $1, %xmm4, %rax
; NOGATHER-NEXT: vpinsrd $1, (%rax), %xmm2, %xmm5
; NOGATHER-NEXT: vblendps {{.*#+}} ymm2 = ymm5[0,1,2,3],ymm2[4,5,6,7]
@@ -378,7 +378,7 @@ define <8 x i32> @masked_gather_v8i32(<8 x i32*>* %ptr, <8 x i1> %masks, <8 x i3
; NOGATHER-NEXT: vpextrb $4, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB6_6
-; NOGATHER-NEXT: # BB#5: # %cond.load4
+; NOGATHER-NEXT: # %bb.5: # %cond.load4
; NOGATHER-NEXT: vextractf128 $1, %ymm4, %xmm5
; NOGATHER-NEXT: vmovq %xmm5, %rax
; NOGATHER-NEXT: vpinsrd $2, (%rax), %xmm2, %xmm5
@@ -387,7 +387,7 @@ define <8 x i32> @masked_gather_v8i32(<8 x i32*>* %ptr, <8 x i1> %masks, <8 x i3
; NOGATHER-NEXT: vpextrb $6, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB6_8
-; NOGATHER-NEXT: # BB#7: # %cond.load7
+; NOGATHER-NEXT: # %bb.7: # %cond.load7
; NOGATHER-NEXT: vextractf128 $1, %ymm4, %xmm4
; NOGATHER-NEXT: vpextrq $1, %xmm4, %rax
; NOGATHER-NEXT: vpinsrd $3, (%rax), %xmm2, %xmm4
@@ -396,7 +396,7 @@ define <8 x i32> @masked_gather_v8i32(<8 x i32*>* %ptr, <8 x i1> %masks, <8 x i3
; NOGATHER-NEXT: vpextrb $8, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB6_10
-; NOGATHER-NEXT: # BB#9: # %cond.load10
+; NOGATHER-NEXT: # %bb.9: # %cond.load10
; NOGATHER-NEXT: vmovq %xmm3, %rax
; NOGATHER-NEXT: vextractf128 $1, %ymm2, %xmm4
; NOGATHER-NEXT: vpinsrd $0, (%rax), %xmm4, %xmm4
@@ -405,7 +405,7 @@ define <8 x i32> @masked_gather_v8i32(<8 x i32*>* %ptr, <8 x i1> %masks, <8 x i3
; NOGATHER-NEXT: vpextrb $10, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB6_12
-; NOGATHER-NEXT: # BB#11: # %cond.load13
+; NOGATHER-NEXT: # %bb.11: # %cond.load13
; NOGATHER-NEXT: vpextrq $1, %xmm3, %rax
; NOGATHER-NEXT: vextractf128 $1, %ymm2, %xmm4
; NOGATHER-NEXT: vpinsrd $1, (%rax), %xmm4, %xmm4
@@ -414,7 +414,7 @@ define <8 x i32> @masked_gather_v8i32(<8 x i32*>* %ptr, <8 x i1> %masks, <8 x i3
; NOGATHER-NEXT: vpextrb $12, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB6_14
-; NOGATHER-NEXT: # BB#13: # %cond.load16
+; NOGATHER-NEXT: # %bb.13: # %cond.load16
; NOGATHER-NEXT: vextractf128 $1, %ymm3, %xmm4
; NOGATHER-NEXT: vmovq %xmm4, %rax
; NOGATHER-NEXT: vextractf128 $1, %ymm2, %xmm4
@@ -424,7 +424,7 @@ define <8 x i32> @masked_gather_v8i32(<8 x i32*>* %ptr, <8 x i1> %masks, <8 x i3
; NOGATHER-NEXT: vpextrb $14, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB6_16
-; NOGATHER-NEXT: # BB#15: # %cond.load19
+; NOGATHER-NEXT: # %bb.15: # %cond.load19
; NOGATHER-NEXT: vextractf128 $1, %ymm3, %xmm3
; NOGATHER-NEXT: vpextrq $1, %xmm3, %rax
; NOGATHER-NEXT: vextractf128 $1, %ymm2, %xmm3
@@ -448,7 +448,7 @@ declare <8 x float> @llvm.masked.gather.v8float(<8 x float*> %ptrs, i32 %align,
define <8 x float> @masked_gather_v8float(<8 x float*>* %ptr, <8 x i1> %masks, <8 x float> %passthro) {
; X86-LABEL: masked_gather_v8float:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X86-NEXT: vpslld $31, %ymm0, %ymm0
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -458,7 +458,7 @@ define <8 x float> @masked_gather_v8float(<8 x float*>* %ptr, <8 x i1> %masks, <
; X86-NEXT: retl
;
; X64-LABEL: masked_gather_v8float:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X64-NEXT: vpslld $31, %ymm0, %ymm0
; X64-NEXT: vpsrad $31, %ymm0, %ymm0
@@ -472,21 +472,21 @@ define <8 x float> @masked_gather_v8float(<8 x float*>* %ptr, <8 x i1> %masks, <
; X64-NEXT: retq
;
; NOGATHER-LABEL: masked_gather_v8float:
-; NOGATHER: # BB#0: # %entry
+; NOGATHER: # %bb.0: # %entry
; NOGATHER-NEXT: vmovdqa (%rdi), %ymm4
; NOGATHER-NEXT: vmovdqa 32(%rdi), %ymm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
; NOGATHER-NEXT: # implicit-def: %ymm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB7_2
-; NOGATHER-NEXT: # BB#1: # %cond.load
+; NOGATHER-NEXT: # %bb.1: # %cond.load
; NOGATHER-NEXT: vmovq %xmm4, %rax
; NOGATHER-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; NOGATHER-NEXT: .LBB7_2: # %else
; NOGATHER-NEXT: vpextrb $2, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB7_4
-; NOGATHER-NEXT: # BB#3: # %cond.load1
+; NOGATHER-NEXT: # %bb.3: # %cond.load1
; NOGATHER-NEXT: vpextrq $1, %xmm4, %rax
; NOGATHER-NEXT: vinsertps {{.*#+}} xmm5 = xmm2[0],mem[0],xmm2[2,3]
; NOGATHER-NEXT: vblendps {{.*#+}} ymm2 = ymm5[0,1,2,3],ymm2[4,5,6,7]
@@ -494,7 +494,7 @@ define <8 x float> @masked_gather_v8float(<8 x float*>* %ptr, <8 x i1> %masks, <
; NOGATHER-NEXT: vpextrb $4, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB7_6
-; NOGATHER-NEXT: # BB#5: # %cond.load4
+; NOGATHER-NEXT: # %bb.5: # %cond.load4
; NOGATHER-NEXT: vextractf128 $1, %ymm4, %xmm5
; NOGATHER-NEXT: vmovq %xmm5, %rax
; NOGATHER-NEXT: vinsertps {{.*#+}} xmm5 = xmm2[0,1],mem[0],xmm2[3]
@@ -503,7 +503,7 @@ define <8 x float> @masked_gather_v8float(<8 x float*>* %ptr, <8 x i1> %masks, <
; NOGATHER-NEXT: vpextrb $6, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB7_8
-; NOGATHER-NEXT: # BB#7: # %cond.load7
+; NOGATHER-NEXT: # %bb.7: # %cond.load7
; NOGATHER-NEXT: vextractf128 $1, %ymm4, %xmm4
; NOGATHER-NEXT: vpextrq $1, %xmm4, %rax
; NOGATHER-NEXT: vinsertps {{.*#+}} xmm4 = xmm2[0,1,2],mem[0]
@@ -512,7 +512,7 @@ define <8 x float> @masked_gather_v8float(<8 x float*>* %ptr, <8 x i1> %masks, <
; NOGATHER-NEXT: vpextrb $8, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB7_10
-; NOGATHER-NEXT: # BB#9: # %cond.load10
+; NOGATHER-NEXT: # %bb.9: # %cond.load10
; NOGATHER-NEXT: vmovq %xmm3, %rax
; NOGATHER-NEXT: vmovss {{.*#+}} xmm4 = mem[0],zero,zero,zero
; NOGATHER-NEXT: vextractf128 $1, %ymm2, %xmm5
@@ -522,7 +522,7 @@ define <8 x float> @masked_gather_v8float(<8 x float*>* %ptr, <8 x i1> %masks, <
; NOGATHER-NEXT: vpextrb $10, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB7_12
-; NOGATHER-NEXT: # BB#11: # %cond.load13
+; NOGATHER-NEXT: # %bb.11: # %cond.load13
; NOGATHER-NEXT: vpextrq $1, %xmm3, %rax
; NOGATHER-NEXT: vextractf128 $1, %ymm2, %xmm4
; NOGATHER-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0],mem[0],xmm4[2,3]
@@ -531,7 +531,7 @@ define <8 x float> @masked_gather_v8float(<8 x float*>* %ptr, <8 x i1> %masks, <
; NOGATHER-NEXT: vpextrb $12, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB7_14
-; NOGATHER-NEXT: # BB#13: # %cond.load16
+; NOGATHER-NEXT: # %bb.13: # %cond.load16
; NOGATHER-NEXT: vextractf128 $1, %ymm3, %xmm4
; NOGATHER-NEXT: vmovq %xmm4, %rax
; NOGATHER-NEXT: vextractf128 $1, %ymm2, %xmm4
@@ -541,7 +541,7 @@ define <8 x float> @masked_gather_v8float(<8 x float*>* %ptr, <8 x i1> %masks, <
; NOGATHER-NEXT: vpextrb $14, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB7_16
-; NOGATHER-NEXT: # BB#15: # %cond.load19
+; NOGATHER-NEXT: # %bb.15: # %cond.load19
; NOGATHER-NEXT: vextractf128 $1, %ymm3, %xmm3
; NOGATHER-NEXT: vpextrq $1, %xmm3, %rax
; NOGATHER-NEXT: vextractf128 $1, %ymm2, %xmm3
@@ -565,7 +565,7 @@ declare <4 x i64> @llvm.masked.gather.v4i64(<4 x i64*> %ptrs, i32 %align, <4 x i
define <4 x i64> @masked_gather_v4i64(<4 x i64*>* %ptr, <4 x i1> %masks, <4 x i64> %passthro) {
; X86-LABEL: masked_gather_v4i64:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: vpslld $31, %xmm0, %xmm0
; X86-NEXT: vpmovsxdq %xmm0, %ymm0
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -575,7 +575,7 @@ define <4 x i64> @masked_gather_v4i64(<4 x i64*>* %ptr, <4 x i1> %masks, <4 x i6
; X86-NEXT: retl
;
; X64-LABEL: masked_gather_v4i64:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpslld $31, %xmm0, %xmm0
; X64-NEXT: vpmovsxdq %xmm0, %ymm0
; X64-NEXT: vmovdqa (%rdi), %ymm2
@@ -584,20 +584,20 @@ define <4 x i64> @masked_gather_v4i64(<4 x i64*>* %ptr, <4 x i1> %masks, <4 x i6
; X64-NEXT: retq
;
; NOGATHER-LABEL: masked_gather_v4i64:
-; NOGATHER: # BB#0: # %entry
+; NOGATHER: # %bb.0: # %entry
; NOGATHER-NEXT: vmovdqa (%rdi), %ymm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
; NOGATHER-NEXT: # implicit-def: %ymm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB8_2
-; NOGATHER-NEXT: # BB#1: # %cond.load
+; NOGATHER-NEXT: # %bb.1: # %cond.load
; NOGATHER-NEXT: vmovq %xmm3, %rax
; NOGATHER-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
; NOGATHER-NEXT: .LBB8_2: # %else
; NOGATHER-NEXT: vpextrb $4, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB8_4
-; NOGATHER-NEXT: # BB#3: # %cond.load1
+; NOGATHER-NEXT: # %bb.3: # %cond.load1
; NOGATHER-NEXT: vpextrq $1, %xmm3, %rax
; NOGATHER-NEXT: vpinsrq $1, (%rax), %xmm2, %xmm4
; NOGATHER-NEXT: vblendps {{.*#+}} ymm2 = ymm4[0,1,2,3],ymm2[4,5,6,7]
@@ -605,7 +605,7 @@ define <4 x i64> @masked_gather_v4i64(<4 x i64*>* %ptr, <4 x i1> %masks, <4 x i6
; NOGATHER-NEXT: vpextrb $8, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB8_6
-; NOGATHER-NEXT: # BB#5: # %cond.load4
+; NOGATHER-NEXT: # %bb.5: # %cond.load4
; NOGATHER-NEXT: vextractf128 $1, %ymm3, %xmm4
; NOGATHER-NEXT: vmovq %xmm4, %rax
; NOGATHER-NEXT: vextractf128 $1, %ymm2, %xmm4
@@ -615,7 +615,7 @@ define <4 x i64> @masked_gather_v4i64(<4 x i64*>* %ptr, <4 x i1> %masks, <4 x i6
; NOGATHER-NEXT: vpextrb $12, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB8_8
-; NOGATHER-NEXT: # BB#7: # %cond.load7
+; NOGATHER-NEXT: # %bb.7: # %cond.load7
; NOGATHER-NEXT: vextractf128 $1, %ymm3, %xmm3
; NOGATHER-NEXT: vpextrq $1, %xmm3, %rax
; NOGATHER-NEXT: vextractf128 $1, %ymm2, %xmm3
@@ -640,7 +640,7 @@ declare <4 x double> @llvm.masked.gather.v4double(<4 x double*> %ptrs, i32 %alig
define <4 x double> @masked_gather_v4double(<4 x double*>* %ptr, <4 x i1> %masks, <4 x double> %passthro) {
; X86-LABEL: masked_gather_v4double:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: vpslld $31, %xmm0, %xmm0
; X86-NEXT: vpmovsxdq %xmm0, %ymm0
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -650,7 +650,7 @@ define <4 x double> @masked_gather_v4double(<4 x double*>* %ptr, <4 x i1> %masks
; X86-NEXT: retl
;
; X64-LABEL: masked_gather_v4double:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpslld $31, %xmm0, %xmm0
; X64-NEXT: vpmovsxdq %xmm0, %ymm0
; X64-NEXT: vmovapd (%rdi), %ymm2
@@ -659,20 +659,20 @@ define <4 x double> @masked_gather_v4double(<4 x double*>* %ptr, <4 x i1> %masks
; X64-NEXT: retq
;
; NOGATHER-LABEL: masked_gather_v4double:
-; NOGATHER: # BB#0: # %entry
+; NOGATHER: # %bb.0: # %entry
; NOGATHER-NEXT: vmovdqa (%rdi), %ymm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
; NOGATHER-NEXT: # implicit-def: %ymm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB9_2
-; NOGATHER-NEXT: # BB#1: # %cond.load
+; NOGATHER-NEXT: # %bb.1: # %cond.load
; NOGATHER-NEXT: vmovq %xmm3, %rax
; NOGATHER-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; NOGATHER-NEXT: .LBB9_2: # %else
; NOGATHER-NEXT: vpextrb $4, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB9_4
-; NOGATHER-NEXT: # BB#3: # %cond.load1
+; NOGATHER-NEXT: # %bb.3: # %cond.load1
; NOGATHER-NEXT: vpextrq $1, %xmm3, %rax
; NOGATHER-NEXT: vmovhpd {{.*#+}} xmm4 = xmm2[0],mem[0]
; NOGATHER-NEXT: vblendpd {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3]
@@ -680,7 +680,7 @@ define <4 x double> @masked_gather_v4double(<4 x double*>* %ptr, <4 x i1> %masks
; NOGATHER-NEXT: vpextrb $8, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB9_6
-; NOGATHER-NEXT: # BB#5: # %cond.load4
+; NOGATHER-NEXT: # %bb.5: # %cond.load4
; NOGATHER-NEXT: vextractf128 $1, %ymm3, %xmm4
; NOGATHER-NEXT: vmovq %xmm4, %rax
; NOGATHER-NEXT: vextractf128 $1, %ymm2, %xmm4
@@ -690,7 +690,7 @@ define <4 x double> @masked_gather_v4double(<4 x double*>* %ptr, <4 x i1> %masks
; NOGATHER-NEXT: vpextrb $12, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB9_8
-; NOGATHER-NEXT: # BB#7: # %cond.load7
+; NOGATHER-NEXT: # %bb.7: # %cond.load7
; NOGATHER-NEXT: vextractf128 $1, %ymm3, %xmm3
; NOGATHER-NEXT: vpextrq $1, %xmm3, %rax
; NOGATHER-NEXT: vextractf128 $1, %ymm2, %xmm3
@@ -715,7 +715,7 @@ declare <2 x i64> @llvm.masked.gather.v2i64(<2 x i64*> %ptrs, i32 %align, <2 x i
define <2 x i64> @masked_gather_v2i64(<2 x i64*>* %ptr, <2 x i1> %masks, <2 x i64> %passthro) {
; X86-LABEL: masked_gather_v2i64:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpmovsxdq (%eax), %xmm2
; X86-NEXT: vpsllq $63, %xmm0, %xmm0
@@ -724,7 +724,7 @@ define <2 x i64> @masked_gather_v2i64(<2 x i64*>* %ptr, <2 x i1> %masks, <2 x i6
; X86-NEXT: retl
;
; X64-LABEL: masked_gather_v2i64:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpsllq $63, %xmm0, %xmm0
; X64-NEXT: vmovdqa (%rdi), %xmm2
; X64-NEXT: vpgatherqq %xmm0, (,%xmm2), %xmm1
@@ -732,20 +732,20 @@ define <2 x i64> @masked_gather_v2i64(<2 x i64*>* %ptr, <2 x i1> %masks, <2 x i6
; X64-NEXT: retq
;
; NOGATHER-LABEL: masked_gather_v2i64:
-; NOGATHER: # BB#0: # %entry
+; NOGATHER: # %bb.0: # %entry
; NOGATHER-NEXT: vmovdqa (%rdi), %xmm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
; NOGATHER-NEXT: # implicit-def: %xmm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB10_2
-; NOGATHER-NEXT: # BB#1: # %cond.load
+; NOGATHER-NEXT: # %bb.1: # %cond.load
; NOGATHER-NEXT: vmovq %xmm3, %rax
; NOGATHER-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
; NOGATHER-NEXT: .LBB10_2: # %else
; NOGATHER-NEXT: vpextrb $8, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB10_4
-; NOGATHER-NEXT: # BB#3: # %cond.load1
+; NOGATHER-NEXT: # %bb.3: # %cond.load1
; NOGATHER-NEXT: vpextrq $1, %xmm3, %rax
; NOGATHER-NEXT: vpinsrq $1, (%rax), %xmm2, %xmm2
; NOGATHER-NEXT: .LBB10_4: # %else2
@@ -762,7 +762,7 @@ declare <2 x double> @llvm.masked.gather.v2double(<2 x double*> %ptrs, i32 %alig
define <2 x double> @masked_gather_v2double(<2 x double*>* %ptr, <2 x i1> %masks, <2 x double> %passthro) {
; X86-LABEL: masked_gather_v2double:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpmovsxdq (%eax), %xmm2
; X86-NEXT: vpsllq $63, %xmm0, %xmm0
@@ -771,7 +771,7 @@ define <2 x double> @masked_gather_v2double(<2 x double*>* %ptr, <2 x i1> %masks
; X86-NEXT: retl
;
; X64-LABEL: masked_gather_v2double:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpsllq $63, %xmm0, %xmm0
; X64-NEXT: vmovapd (%rdi), %xmm2
; X64-NEXT: vgatherqpd %xmm0, (,%xmm2), %xmm1
@@ -779,20 +779,20 @@ define <2 x double> @masked_gather_v2double(<2 x double*>* %ptr, <2 x i1> %masks
; X64-NEXT: retq
;
; NOGATHER-LABEL: masked_gather_v2double:
-; NOGATHER: # BB#0: # %entry
+; NOGATHER: # %bb.0: # %entry
; NOGATHER-NEXT: vmovdqa (%rdi), %xmm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
; NOGATHER-NEXT: # implicit-def: %xmm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB11_2
-; NOGATHER-NEXT: # BB#1: # %cond.load
+; NOGATHER-NEXT: # %bb.1: # %cond.load
; NOGATHER-NEXT: vmovq %xmm3, %rax
; NOGATHER-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; NOGATHER-NEXT: .LBB11_2: # %else
; NOGATHER-NEXT: vpextrb $8, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB11_4
-; NOGATHER-NEXT: # BB#3: # %cond.load1
+; NOGATHER-NEXT: # %bb.3: # %cond.load1
; NOGATHER-NEXT: vpextrq $1, %xmm3, %rax
; NOGATHER-NEXT: vmovhpd {{.*#+}} xmm2 = xmm2[0],mem[0]
; NOGATHER-NEXT: .LBB11_4: # %else2
diff --git a/test/CodeGen/X86/avx2-nontemporal.ll b/test/CodeGen/X86/avx2-nontemporal.ll
index 55c966f6f88..cb51df3065d 100644
--- a/test/CodeGen/X86/avx2-nontemporal.ll
+++ b/test/CodeGen/X86/avx2-nontemporal.ll
@@ -4,7 +4,7 @@
define void @f(<8 x float> %A, i8* %B, <4 x double> %C, <4 x i64> %E, <8 x i32> %F, <16 x i16> %G, <32 x i8> %H) nounwind {
; X32-LABEL: f:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebp
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: andl $-32, %esp
@@ -31,7 +31,7 @@ define void @f(<8 x float> %A, i8* %B, <4 x double> %C, <4 x i64> %E, <8 x i32>
; X32-NEXT: retl
;
; X64-LABEL: f:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vaddps {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: vmovntps %ymm0, (%rdi)
; X64-NEXT: vpaddq {{.*}}(%rip), %ymm2, %ymm0
diff --git a/test/CodeGen/X86/avx2-phaddsub.ll b/test/CodeGen/X86/avx2-phaddsub.ll
index 232a3326fa1..67ea37575ab 100644
--- a/test/CodeGen/X86/avx2-phaddsub.ll
+++ b/test/CodeGen/X86/avx2-phaddsub.ll
@@ -4,12 +4,12 @@
define <16 x i16> @phaddw1(<16 x i16> %x, <16 x i16> %y) {
; X32-LABEL: phaddw1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vphaddw %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: phaddw1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vphaddw %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%a = shufflevector <16 x i16> %x, <16 x i16> %y, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 16, i32 18, i32 20, i32 22, i32 8, i32 10, i32 12, i32 14, i32 24, i32 26, i32 28, i32 30>
@@ -20,12 +20,12 @@ define <16 x i16> @phaddw1(<16 x i16> %x, <16 x i16> %y) {
define <16 x i16> @phaddw2(<16 x i16> %x, <16 x i16> %y) {
; X32-LABEL: phaddw2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vphaddw %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: phaddw2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vphaddw %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%a = shufflevector <16 x i16> %x, <16 x i16> %y, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 17, i32 19, i32 21, i32 23, i32 9, i32 11, i32 13, i32 15, i32 25, i32 27, i32 29, i32 31>
@@ -36,12 +36,12 @@ define <16 x i16> @phaddw2(<16 x i16> %x, <16 x i16> %y) {
define <8 x i32> @phaddd1(<8 x i32> %x, <8 x i32> %y) {
; X32-LABEL: phaddd1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vphaddd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: phaddd1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vphaddd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%a = shufflevector <8 x i32> %x, <8 x i32> %y, <8 x i32> <i32 0, i32 2, i32 8, i32 10, i32 4, i32 6, i32 12, i32 14>
@@ -52,12 +52,12 @@ define <8 x i32> @phaddd1(<8 x i32> %x, <8 x i32> %y) {
define <8 x i32> @phaddd2(<8 x i32> %x, <8 x i32> %y) {
; X32-LABEL: phaddd2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vphaddd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: phaddd2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vphaddd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%a = shufflevector <8 x i32> %x, <8 x i32> %y, <8 x i32> <i32 1, i32 2, i32 9, i32 10, i32 5, i32 6, i32 13, i32 14>
@@ -68,12 +68,12 @@ define <8 x i32> @phaddd2(<8 x i32> %x, <8 x i32> %y) {
define <8 x i32> @phaddd3(<8 x i32> %x) {
; X32-LABEL: phaddd3:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vphaddd %ymm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: phaddd3:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vphaddd %ymm0, %ymm0, %ymm0
; X64-NEXT: retq
%a = shufflevector <8 x i32> %x, <8 x i32> undef, <8 x i32> <i32 undef, i32 2, i32 8, i32 10, i32 4, i32 6, i32 undef, i32 14>
@@ -84,12 +84,12 @@ define <8 x i32> @phaddd3(<8 x i32> %x) {
define <16 x i16> @phsubw1(<16 x i16> %x, <16 x i16> %y) {
; X32-LABEL: phsubw1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vphsubw %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: phsubw1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vphsubw %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%a = shufflevector <16 x i16> %x, <16 x i16> %y, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 16, i32 18, i32 20, i32 22, i32 8, i32 10, i32 12, i32 14, i32 24, i32 26, i32 28, i32 30>
@@ -100,12 +100,12 @@ define <16 x i16> @phsubw1(<16 x i16> %x, <16 x i16> %y) {
define <8 x i32> @phsubd1(<8 x i32> %x, <8 x i32> %y) {
; X32-LABEL: phsubd1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vphsubd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: phsubd1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vphsubd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%a = shufflevector <8 x i32> %x, <8 x i32> %y, <8 x i32> <i32 0, i32 2, i32 8, i32 10, i32 4, i32 6, i32 12, i32 14>
@@ -116,12 +116,12 @@ define <8 x i32> @phsubd1(<8 x i32> %x, <8 x i32> %y) {
define <8 x i32> @phsubd2(<8 x i32> %x, <8 x i32> %y) {
; X32-LABEL: phsubd2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vphsubd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: phsubd2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vphsubd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%a = shufflevector <8 x i32> %x, <8 x i32> %y, <8 x i32> <i32 0, i32 undef, i32 8, i32 undef, i32 4, i32 6, i32 12, i32 14>
diff --git a/test/CodeGen/X86/avx2-pmovxrm.ll b/test/CodeGen/X86/avx2-pmovxrm.ll
index 7ba7ae52738..67f33b2a14c 100644
--- a/test/CodeGen/X86/avx2-pmovxrm.ll
+++ b/test/CodeGen/X86/avx2-pmovxrm.ll
@@ -6,13 +6,13 @@
define <16 x i16> @test_llvm_x86_avx2_pmovsxbw(<16 x i8>* %a) {
; X32-LABEL: test_llvm_x86_avx2_pmovsxbw:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovsxbw (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_llvm_x86_avx2_pmovsxbw:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmovsxbw (%rdi), %ymm0
; X64-NEXT: retq
%1 = load <16 x i8>, <16 x i8>* %a, align 1
@@ -22,13 +22,13 @@ define <16 x i16> @test_llvm_x86_avx2_pmovsxbw(<16 x i8>* %a) {
define <8 x i32> @test_llvm_x86_avx2_pmovsxbd(<16 x i8>* %a) {
; X32-LABEL: test_llvm_x86_avx2_pmovsxbd:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovsxbd (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_llvm_x86_avx2_pmovsxbd:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmovsxbd (%rdi), %ymm0
; X64-NEXT: retq
%1 = load <16 x i8>, <16 x i8>* %a, align 1
@@ -39,13 +39,13 @@ define <8 x i32> @test_llvm_x86_avx2_pmovsxbd(<16 x i8>* %a) {
define <4 x i64> @test_llvm_x86_avx2_pmovsxbq(<16 x i8>* %a) {
; X32-LABEL: test_llvm_x86_avx2_pmovsxbq:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovsxbq (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_llvm_x86_avx2_pmovsxbq:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmovsxbq (%rdi), %ymm0
; X64-NEXT: retq
%1 = load <16 x i8>, <16 x i8>* %a, align 1
@@ -56,13 +56,13 @@ define <4 x i64> @test_llvm_x86_avx2_pmovsxbq(<16 x i8>* %a) {
define <8 x i32> @test_llvm_x86_avx2_pmovsxwd(<8 x i16>* %a) {
; X32-LABEL: test_llvm_x86_avx2_pmovsxwd:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovsxwd (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_llvm_x86_avx2_pmovsxwd:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmovsxwd (%rdi), %ymm0
; X64-NEXT: retq
%1 = load <8 x i16>, <8 x i16>* %a, align 1
@@ -72,13 +72,13 @@ define <8 x i32> @test_llvm_x86_avx2_pmovsxwd(<8 x i16>* %a) {
define <4 x i64> @test_llvm_x86_avx2_pmovsxwq(<8 x i16>* %a) {
; X32-LABEL: test_llvm_x86_avx2_pmovsxwq:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovsxwq (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_llvm_x86_avx2_pmovsxwq:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmovsxwq (%rdi), %ymm0
; X64-NEXT: retq
%1 = load <8 x i16>, <8 x i16>* %a, align 1
@@ -89,13 +89,13 @@ define <4 x i64> @test_llvm_x86_avx2_pmovsxwq(<8 x i16>* %a) {
define <4 x i64> @test_llvm_x86_avx2_pmovsxdq(<4 x i32>* %a) {
; X32-LABEL: test_llvm_x86_avx2_pmovsxdq:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovsxdq (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_llvm_x86_avx2_pmovsxdq:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmovsxdq (%rdi), %ymm0
; X64-NEXT: retq
%1 = load <4 x i32>, <4 x i32>* %a, align 1
@@ -105,13 +105,13 @@ define <4 x i64> @test_llvm_x86_avx2_pmovsxdq(<4 x i32>* %a) {
define <16 x i16> @test_llvm_x86_avx2_pmovzxbw(<16 x i8>* %a) {
; X32-LABEL: test_llvm_x86_avx2_pmovzxbw:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; X32-NEXT: retl
;
; X64-LABEL: test_llvm_x86_avx2_pmovzxbw:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; X64-NEXT: retq
%1 = load <16 x i8>, <16 x i8>* %a, align 1
@@ -121,13 +121,13 @@ define <16 x i16> @test_llvm_x86_avx2_pmovzxbw(<16 x i8>* %a) {
define <8 x i32> @test_llvm_x86_avx2_pmovzxbd(<16 x i8>* %a) {
; X32-LABEL: test_llvm_x86_avx2_pmovzxbd:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; X32-NEXT: retl
;
; X64-LABEL: test_llvm_x86_avx2_pmovzxbd:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; X64-NEXT: retq
%1 = load <16 x i8>, <16 x i8>* %a, align 1
@@ -138,13 +138,13 @@ define <8 x i32> @test_llvm_x86_avx2_pmovzxbd(<16 x i8>* %a) {
define <4 x i64> @test_llvm_x86_avx2_pmovzxbq(<16 x i8>* %a) {
; X32-LABEL: test_llvm_x86_avx2_pmovzxbq:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovzxbq {{.*#+}} ymm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero
; X32-NEXT: retl
;
; X64-LABEL: test_llvm_x86_avx2_pmovzxbq:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmovzxbq {{.*#+}} ymm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero
; X64-NEXT: retq
%1 = load <16 x i8>, <16 x i8>* %a, align 1
@@ -155,13 +155,13 @@ define <4 x i64> @test_llvm_x86_avx2_pmovzxbq(<16 x i8>* %a) {
define <8 x i32> @test_llvm_x86_avx2_pmovzxwd(<8 x i16>* %a) {
; X32-LABEL: test_llvm_x86_avx2_pmovzxwd:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; X32-NEXT: retl
;
; X64-LABEL: test_llvm_x86_avx2_pmovzxwd:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; X64-NEXT: retq
%1 = load <8 x i16>, <8 x i16>* %a, align 1
@@ -171,13 +171,13 @@ define <8 x i32> @test_llvm_x86_avx2_pmovzxwd(<8 x i16>* %a) {
define <4 x i64> @test_llvm_x86_avx2_pmovzxwq(<8 x i16>* %a) {
; X32-LABEL: test_llvm_x86_avx2_pmovzxwq:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovzxwq {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; X32-NEXT: retl
;
; X64-LABEL: test_llvm_x86_avx2_pmovzxwq:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmovzxwq {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; X64-NEXT: retq
%1 = load <8 x i16>, <8 x i16>* %a, align 1
@@ -188,13 +188,13 @@ define <4 x i64> @test_llvm_x86_avx2_pmovzxwq(<8 x i16>* %a) {
define <4 x i64> @test_llvm_x86_avx2_pmovzxdq(<4 x i32>* %a) {
; X32-LABEL: test_llvm_x86_avx2_pmovzxdq:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovzxdq {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; X32-NEXT: retl
;
; X64-LABEL: test_llvm_x86_avx2_pmovzxdq:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmovzxdq {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; X64-NEXT: retq
%1 = load <4 x i32>, <4 x i32>* %a, align 1
diff --git a/test/CodeGen/X86/avx2-schedule.ll b/test/CodeGen/X86/avx2-schedule.ll
index cec8ca94409..1c750350204 100644
--- a/test/CodeGen/X86/avx2-schedule.ll
+++ b/test/CodeGen/X86/avx2-schedule.ll
@@ -8,37 +8,37 @@
define <8 x i32> @test_broadcasti128(<8 x i32> %a0, <4 x i32> *%a1) {
; GENERIC-LABEL: test_broadcasti128:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1] sched: [4:0.50]
; GENERIC-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_broadcasti128:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1] sched: [1:0.50]
; HASWELL-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_broadcasti128:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1] sched: [6:0.50]
; BROADWELL-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_broadcasti128:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1] sched: [7:0.50]
; SKYLAKE-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_broadcasti128:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1] sched: [7:0.50]
; SKX-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_broadcasti128:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1] sched: [8:0.50]
; ZNVER1-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -50,37 +50,37 @@ define <8 x i32> @test_broadcasti128(<8 x i32> %a0, <4 x i32> *%a1) {
define <4 x double> @test_broadcastsd_ymm(<2 x double> %a0) {
; GENERIC-LABEL: test_broadcastsd_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vbroadcastsd %xmm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_broadcastsd_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vbroadcastsd %xmm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_broadcastsd_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vbroadcastsd %xmm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_broadcastsd_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vbroadcastsd %xmm0, %ymm0 # sched: [3:1.00]
; SKYLAKE-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_broadcastsd_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vbroadcastsd %xmm0, %ymm0 # sched: [3:1.00]
; SKX-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_broadcastsd_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vbroadcastsd %xmm0, %ymm0 # sched: [100:0.25]
; ZNVER1-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -91,37 +91,37 @@ define <4 x double> @test_broadcastsd_ymm(<2 x double> %a0) {
define <4 x float> @test_broadcastss(<4 x float> %a0) {
; GENERIC-LABEL: test_broadcastss:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vbroadcastss %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_broadcastss:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vbroadcastss %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_broadcastss:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vbroadcastss %xmm0, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_broadcastss:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vbroadcastss %xmm0, %xmm0 # sched: [1:1.00]
; SKYLAKE-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_broadcastss:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vbroadcastss %xmm0, %xmm0 # sched: [1:1.00]
; SKX-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_broadcastss:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vbroadcastss %xmm0, %xmm0 # sched: [1:0.50]
; ZNVER1-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -132,37 +132,37 @@ define <4 x float> @test_broadcastss(<4 x float> %a0) {
define <8 x float> @test_broadcastss_ymm(<4 x float> %a0) {
; GENERIC-LABEL: test_broadcastss_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vbroadcastss %xmm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_broadcastss_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vbroadcastss %xmm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_broadcastss_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vbroadcastss %xmm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_broadcastss_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vbroadcastss %xmm0, %ymm0 # sched: [3:1.00]
; SKYLAKE-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_broadcastss_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vbroadcastss %xmm0, %ymm0 # sched: [3:1.00]
; SKX-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_broadcastss_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vbroadcastss %xmm0, %ymm0 # sched: [100:0.25]
; ZNVER1-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -173,7 +173,7 @@ define <8 x float> @test_broadcastss_ymm(<4 x float> %a0) {
define <4 x i32> @test_extracti128(<8 x i32> %a0, <8 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_extracti128:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpaddd %ymm1, %ymm0, %ymm2 # sched: [3:1.00]
; GENERIC-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vextracti128 $1, %ymm0, %xmm0 # sched: [1:1.00]
@@ -182,7 +182,7 @@ define <4 x i32> @test_extracti128(<8 x i32> %a0, <8 x i32> %a1, <4 x i32> *%a2)
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_extracti128:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm2 # sched: [1:0.50]
; HASWELL-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vextracti128 $1, %ymm0, %xmm0 # sched: [3:1.00]
@@ -191,7 +191,7 @@ define <4 x i32> @test_extracti128(<8 x i32> %a0, <8 x i32> %a1, <4 x i32> *%a2)
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_extracti128:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm2 # sched: [1:0.50]
; BROADWELL-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vextracti128 $1, %ymm0, %xmm0 # sched: [3:1.00]
@@ -200,7 +200,7 @@ define <4 x i32> @test_extracti128(<8 x i32> %a0, <8 x i32> %a1, <4 x i32> *%a2)
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_extracti128:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpaddd %ymm1, %ymm0, %ymm2 # sched: [1:0.33]
; SKYLAKE-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vextracti128 $1, %ymm0, %xmm0 # sched: [3:1.00]
@@ -209,7 +209,7 @@ define <4 x i32> @test_extracti128(<8 x i32> %a0, <8 x i32> %a1, <4 x i32> *%a2)
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_extracti128:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpaddd %ymm1, %ymm0, %ymm2 # sched: [1:0.33]
; SKX-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: vextracti128 $1, %ymm0, %xmm0 # sched: [3:1.00]
@@ -218,7 +218,7 @@ define <4 x i32> @test_extracti128(<8 x i32> %a0, <8 x i32> %a1, <4 x i32> *%a2)
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_extracti128:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpaddd %ymm1, %ymm0, %ymm2 # sched: [1:0.25]
; ZNVER1-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vextracti128 $1, %ymm0, %xmm0 # sched: [2:0.25]
@@ -235,32 +235,32 @@ define <4 x i32> @test_extracti128(<8 x i32> %a0, <8 x i32> %a1, <4 x i32> *%a2)
define <2 x double> @test_gatherdpd(<2 x double> %a0, i8* %a1, <4 x i32> %a2, <2 x double> %a3) {
; GENERIC-LABEL: test_gatherdpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vgatherdpd %xmm2, (%rdi,%xmm1,2), %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_gatherdpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vgatherdpd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [1:?]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_gatherdpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vgatherdpd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [25:3.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_gatherdpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vgatherdpd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [22:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_gatherdpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vgatherdpd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [22:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_gatherdpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vgatherdpd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double> %a0, i8* %a1, <4 x i32> %a2, <2 x double> %a3, i8 2)
@@ -270,32 +270,32 @@ declare <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double>, i8*, <4 x i32>, <2
define <4 x double> @test_gatherdpd_ymm(<4 x double> %a0, i8* %a1, <4 x i32> %a2, <4 x double> %a3) {
; GENERIC-LABEL: test_gatherdpd_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vgatherdpd %ymm2, (%rdi,%xmm1,8), %ymm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_gatherdpd_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vgatherdpd %ymm2, (%rdi,%xmm1,8), %ymm0 # sched: [1:?]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_gatherdpd_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vgatherdpd %ymm2, (%rdi,%xmm1,8), %ymm0 # sched: [26:5.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_gatherdpd_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vgatherdpd %ymm2, (%rdi,%xmm1,8), %ymm0 # sched: [25:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_gatherdpd_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vgatherdpd %ymm2, (%rdi,%xmm1,8), %ymm0 # sched: [25:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_gatherdpd_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vgatherdpd %ymm2, (%rdi,%xmm1,8), %ymm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> %a0, i8* %a1, <4 x i32> %a2, <4 x double> %a3, i8 8)
@@ -305,32 +305,32 @@ declare <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double>, i8*, <4 x i32>
define <4 x float> @test_gatherdps(<4 x float> %a0, i8* %a1, <4 x i32> %a2, <4 x float> %a3) {
; GENERIC-LABEL: test_gatherdps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vgatherdps %xmm2, (%rdi,%xmm1,2), %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_gatherdps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vgatherdps %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [1:?]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_gatherdps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vgatherdps %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [25:3.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_gatherdps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vgatherdps %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [22:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_gatherdps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vgatherdps %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [22:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_gatherdps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vgatherdps %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float> %a0, i8* %a1, <4 x i32> %a2, <4 x float> %a3, i8 2)
@@ -340,32 +340,32 @@ declare <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float>, i8*, <4 x i32>, <4 x
define <8 x float> @test_gatherdps_ymm(<8 x float> %a0, i8* %a1, <8 x i32> %a2, <8 x float> %a3) {
; GENERIC-LABEL: test_gatherdps_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vgatherdps %ymm2, (%rdi,%ymm1,4), %ymm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_gatherdps_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vgatherdps %ymm2, (%rdi,%ymm1,4), %ymm0 # sched: [1:?]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_gatherdps_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vgatherdps %ymm2, (%rdi,%ymm1,4), %ymm0 # sched: [26:4.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_gatherdps_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vgatherdps %ymm2, (%rdi,%ymm1,4), %ymm0 # sched: [25:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_gatherdps_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vgatherdps %ymm2, (%rdi,%ymm1,4), %ymm0 # sched: [25:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_gatherdps_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vgatherdps %ymm2, (%rdi,%ymm1,4), %ymm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> %a0, i8* %a1, <8 x i32> %a2, <8 x float> %a3, i8 4)
@@ -375,32 +375,32 @@ declare <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float>, i8*, <8 x i32>,
define <2 x double> @test_gatherqpd(<2 x double> %a0, i8* %a1, <2 x i64> %a2, <2 x double> %a3) {
; GENERIC-LABEL: test_gatherqpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vgatherqpd %xmm2, (%rdi,%xmm1,2), %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_gatherqpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vgatherqpd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [1:?]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_gatherqpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vgatherqpd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [22:3.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_gatherqpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vgatherqpd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [22:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_gatherqpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vgatherqpd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [22:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_gatherqpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vgatherqpd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <2 x double> @llvm.x86.avx2.gather.q.pd(<2 x double> %a0, i8* %a1, <2 x i64> %a2, <2 x double> %a3, i8 2)
@@ -410,32 +410,32 @@ declare <2 x double> @llvm.x86.avx2.gather.q.pd(<2 x double>, i8*, <2 x i64>, <2
define <4 x double> @test_gatherqpd_ymm(<4 x double> %a0, i8* %a1, <4 x i64> %a2, <4 x double> %a3) {
; GENERIC-LABEL: test_gatherqpd_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vgatherqpd %ymm2, (%rdi,%ymm1,8), %ymm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_gatherqpd_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vgatherqpd %ymm2, (%rdi,%ymm1,8), %ymm0 # sched: [1:?]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_gatherqpd_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vgatherqpd %ymm2, (%rdi,%ymm1,8), %ymm0 # sched: [23:3.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_gatherqpd_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vgatherqpd %ymm2, (%rdi,%ymm1,8), %ymm0 # sched: [25:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_gatherqpd_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vgatherqpd %ymm2, (%rdi,%ymm1,8), %ymm0 # sched: [25:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_gatherqpd_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vgatherqpd %ymm2, (%rdi,%ymm1,8), %ymm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> %a0, i8* %a1, <4 x i64> %a2, <4 x double> %a3, i8 8)
@@ -445,32 +445,32 @@ declare <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double>, i8*, <4 x i64>
define <4 x float> @test_gatherqps(<4 x float> %a0, i8* %a1, <2 x i64> %a2, <4 x float> %a3) {
; GENERIC-LABEL: test_gatherqps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vgatherqps %xmm2, (%rdi,%xmm1,2), %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_gatherqps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vgatherqps %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [1:?]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_gatherqps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vgatherqps %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [27:5.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_gatherqps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vgatherqps %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [22:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_gatherqps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vgatherqps %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [22:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_gatherqps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vgatherqps %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x float> @llvm.x86.avx2.gather.q.ps(<4 x float> %a0, i8* %a1, <2 x i64> %a2, <4 x float> %a3, i8 2)
@@ -480,37 +480,37 @@ declare <4 x float> @llvm.x86.avx2.gather.q.ps(<4 x float>, i8*, <2 x i64>, <4 x
define <4 x float> @test_gatherqps_ymm(<4 x float> %a0, i8* %a1, <4 x i64> %a2, <4 x float> %a3) {
; GENERIC-LABEL: test_gatherqps_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vgatherqps %xmm2, (%rdi,%ymm1,4), %xmm0
; GENERIC-NEXT: vzeroupper
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_gatherqps_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vgatherqps %xmm2, (%rdi,%ymm1,4), %xmm0 # sched: [1:?]
; HASWELL-NEXT: vzeroupper # sched: [4:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_gatherqps_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vgatherqps %xmm2, (%rdi,%ymm1,4), %xmm0 # sched: [24:5.00]
; BROADWELL-NEXT: vzeroupper # sched: [4:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_gatherqps_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vgatherqps %xmm2, (%rdi,%ymm1,4), %xmm0 # sched: [25:1.00]
; SKYLAKE-NEXT: vzeroupper # sched: [4:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_gatherqps_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vgatherqps %xmm2, (%rdi,%ymm1,4), %xmm0 # sched: [25:1.00]
; SKX-NEXT: vzeroupper # sched: [4:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_gatherqps_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vgatherqps %xmm2, (%rdi,%ymm1,4), %xmm0 # sched: [100:?]
; ZNVER1-NEXT: vzeroupper # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -521,42 +521,42 @@ declare <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float>, i8*, <4 x i64>,
define <8 x i32> @test_inserti128(<8 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_inserti128:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1 # sched: [1:1.00]
; GENERIC-NEXT: vinserti128 $1, (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_inserti128:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1 # sched: [3:1.00]
; HASWELL-NEXT: vinserti128 $1, (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_inserti128:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1 # sched: [3:1.00]
; BROADWELL-NEXT: vinserti128 $1, (%rdi), %ymm0, %ymm0 # sched: [6:0.50]
; BROADWELL-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_inserti128:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1 # sched: [3:1.00]
; SKYLAKE-NEXT: vinserti128 $1, (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; SKYLAKE-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_inserti128:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1 # sched: [3:1.00]
; SKX-NEXT: vinserti128 $1, (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; SKX-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_inserti128:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1 # sched: [2:0.25]
; ZNVER1-NEXT: vinserti128 $1, (%rdi), %ymm0, %ymm0 # sched: [9:0.50]
; ZNVER1-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [1:0.25]
@@ -572,32 +572,32 @@ define <8 x i32> @test_inserti128(<8 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2)
define <4 x i64> @test_movntdqa(i8* %a0) {
; GENERIC-LABEL: test_movntdqa:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovntdqa (%rdi), %ymm0 # sched: [4:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movntdqa:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmovntdqa (%rdi), %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movntdqa:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmovntdqa (%rdi), %ymm0 # sched: [6:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movntdqa:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmovntdqa (%rdi), %ymm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movntdqa:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovntdqa (%rdi), %ymm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_movntdqa:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmovntdqa (%rdi), %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x i64> @llvm.x86.avx2.movntdqa(i8* %a0)
@@ -607,37 +607,37 @@ declare <4 x i64> @llvm.x86.avx2.movntdqa(i8*) nounwind readonly
define <16 x i16> @test_mpsadbw(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
; GENERIC-LABEL: test_mpsadbw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmpsadbw $7, %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vmpsadbw $7, (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_mpsadbw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmpsadbw $7, %ymm1, %ymm0, %ymm0 # sched: [7:2.00]
; HASWELL-NEXT: vmpsadbw $7, (%rdi), %ymm0, %ymm0 # sched: [7:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_mpsadbw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmpsadbw $7, %ymm1, %ymm0, %ymm0 # sched: [7:2.00]
; BROADWELL-NEXT: vmpsadbw $7, (%rdi), %ymm0, %ymm0 # sched: [13:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_mpsadbw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmpsadbw $7, %ymm1, %ymm0, %ymm0 # sched: [4:2.00]
; SKYLAKE-NEXT: vmpsadbw $7, (%rdi), %ymm0, %ymm0 # sched: [11:2.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_mpsadbw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmpsadbw $7, %ymm1, %ymm0, %ymm0 # sched: [4:2.00]
; SKX-NEXT: vmpsadbw $7, (%rdi), %ymm0, %ymm0 # sched: [11:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_mpsadbw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmpsadbw $7, %ymm1, %ymm0, %ymm0 # sched: [100:?]
; ZNVER1-NEXT: vmpsadbw $7, (%rdi), %ymm0, %ymm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -651,42 +651,42 @@ declare <16 x i16> @llvm.x86.avx2.mpsadbw(<32 x i8>, <32 x i8>, i8) nounwind rea
define <32 x i8> @test_pabsb(<32 x i8> %a0, <32 x i8> *%a1) {
; GENERIC-LABEL: test_pabsb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpabsb %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpabsb (%rdi), %ymm1 # sched: [7:1.00]
; GENERIC-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pabsb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpabsb %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpabsb (%rdi), %ymm1 # sched: [1:0.50]
; HASWELL-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pabsb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpabsb %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpabsb (%rdi), %ymm1 # sched: [7:0.50]
; BROADWELL-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pabsb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpabsb %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpabsb (%rdi), %ymm1 # sched: [8:0.50]
; SKYLAKE-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pabsb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpabsb %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpabsb (%rdi), %ymm1 # sched: [8:0.50]
; SKX-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pabsb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpabsb (%rdi), %ymm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpabsb %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -701,42 +701,42 @@ declare <32 x i8> @llvm.x86.avx2.pabs.b(<32 x i8>) nounwind readnone
define <8 x i32> @test_pabsd(<8 x i32> %a0, <8 x i32> *%a1) {
; GENERIC-LABEL: test_pabsd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpabsd %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpabsd (%rdi), %ymm1 # sched: [7:1.00]
; GENERIC-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pabsd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpabsd %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpabsd (%rdi), %ymm1 # sched: [1:0.50]
; HASWELL-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pabsd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpabsd %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpabsd (%rdi), %ymm1 # sched: [7:0.50]
; BROADWELL-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pabsd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpabsd %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpabsd (%rdi), %ymm1 # sched: [8:0.50]
; SKYLAKE-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pabsd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpabsd %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpabsd (%rdi), %ymm1 # sched: [8:0.50]
; SKX-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pabsd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpabsd (%rdi), %ymm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpabsd %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -751,42 +751,42 @@ declare <8 x i32> @llvm.x86.avx2.pabs.d(<8 x i32>) nounwind readnone
define <16 x i16> @test_pabsw(<16 x i16> %a0, <16 x i16> *%a1) {
; GENERIC-LABEL: test_pabsw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpabsw %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpabsw (%rdi), %ymm1 # sched: [7:1.00]
; GENERIC-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pabsw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpabsw %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpabsw (%rdi), %ymm1 # sched: [1:0.50]
; HASWELL-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pabsw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpabsw %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpabsw (%rdi), %ymm1 # sched: [7:0.50]
; BROADWELL-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pabsw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpabsw %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpabsw (%rdi), %ymm1 # sched: [8:0.50]
; SKYLAKE-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pabsw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpabsw %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpabsw (%rdi), %ymm1 # sched: [8:0.50]
; SKX-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pabsw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpabsw (%rdi), %ymm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpabsw %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -801,37 +801,37 @@ declare <16 x i16> @llvm.x86.avx2.pabs.w(<16 x i16>) nounwind readnone
define <16 x i16> @test_packssdw(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_packssdw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpackssdw (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_packssdw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vpackssdw (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_packssdw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: vpackssdw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_packssdw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; SKYLAKE-NEXT: vpackssdw (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_packssdw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; SKX-NEXT: vpackssdw (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_packssdw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpackssdw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -845,37 +845,37 @@ declare <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32>, <8 x i32>) nounwind readno
define <32 x i8> @test_packsswb(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_packsswb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpacksswb (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_packsswb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vpacksswb (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_packsswb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: vpacksswb (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_packsswb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; SKYLAKE-NEXT: vpacksswb (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_packsswb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; SKX-NEXT: vpacksswb (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_packsswb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpacksswb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -889,37 +889,37 @@ declare <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16>, <16 x i16>) nounwind readn
define <16 x i16> @test_packusdw(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_packusdw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpackusdw %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpackusdw (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_packusdw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpackusdw %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vpackusdw (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_packusdw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpackusdw %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: vpackusdw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_packusdw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpackusdw %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; SKYLAKE-NEXT: vpackusdw (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_packusdw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpackusdw %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; SKX-NEXT: vpackusdw (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_packusdw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpackusdw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpackusdw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -933,37 +933,37 @@ declare <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32>, <8 x i32>) nounwind readno
define <32 x i8> @test_packuswb(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_packuswb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpackuswb %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpackuswb (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_packuswb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpackuswb %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vpackuswb (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_packuswb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpackuswb %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: vpackuswb (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_packuswb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpackuswb %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; SKYLAKE-NEXT: vpackuswb (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_packuswb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpackuswb %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; SKX-NEXT: vpackuswb (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_packuswb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpackuswb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpackuswb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -977,37 +977,37 @@ declare <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16>, <16 x i16>) nounwind readn
define <32 x i8> @test_paddb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
; GENERIC-LABEL: test_paddb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpaddb %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpaddb (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_paddb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpaddb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpaddb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_paddb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpaddb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpaddb (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_paddb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpaddb %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vpaddb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_paddb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpaddb %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: vpaddb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_paddb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpaddb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1019,37 +1019,37 @@ define <32 x i8> @test_paddb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
define <8 x i32> @test_paddd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_paddd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpaddd (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_paddd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpaddd (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_paddd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpaddd (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_paddd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vpaddd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_paddd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: vpaddd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_paddd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1061,37 +1061,37 @@ define <8 x i32> @test_paddd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
define <4 x i64> @test_paddq(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
; GENERIC-LABEL: test_paddq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpaddq (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_paddq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpaddq (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_paddq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpaddq (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_paddq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vpaddq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_paddq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: vpaddq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_paddq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1103,37 +1103,37 @@ define <4 x i64> @test_paddq(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
define <32 x i8> @test_paddsb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
; GENERIC-LABEL: test_paddsb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpaddsb %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpaddsb (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_paddsb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpaddsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpaddsb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_paddsb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpaddsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpaddsb (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_paddsb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpaddsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpaddsb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_paddsb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpaddsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpaddsb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_paddsb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpaddsb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddsb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1146,37 +1146,37 @@ declare <32 x i8> @llvm.x86.avx2.padds.b(<32 x i8>, <32 x i8>) nounwind readnone
define <16 x i16> @test_paddsw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_paddsw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpaddsw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpaddsw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_paddsw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpaddsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpaddsw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_paddsw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpaddsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpaddsw (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_paddsw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpaddsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpaddsw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_paddsw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpaddsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpaddsw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_paddsw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpaddsw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddsw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1189,37 +1189,37 @@ declare <16 x i16> @llvm.x86.avx2.padds.w(<16 x i16>, <16 x i16>) nounwind readn
define <32 x i8> @test_paddusb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
; GENERIC-LABEL: test_paddusb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpaddusb %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpaddusb (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_paddusb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpaddusb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpaddusb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_paddusb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpaddusb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpaddusb (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_paddusb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpaddusb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpaddusb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_paddusb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpaddusb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpaddusb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_paddusb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpaddusb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddusb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1232,37 +1232,37 @@ declare <32 x i8> @llvm.x86.avx2.paddus.b(<32 x i8>, <32 x i8>) nounwind readnon
define <16 x i16> @test_paddusw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_paddusw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpaddusw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpaddusw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_paddusw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpaddusw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpaddusw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_paddusw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpaddusw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpaddusw (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_paddusw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpaddusw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpaddusw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_paddusw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpaddusw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpaddusw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_paddusw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpaddusw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddusw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1275,37 +1275,37 @@ declare <16 x i16> @llvm.x86.avx2.paddus.w(<16 x i16>, <16 x i16>) nounwind read
define <16 x i16> @test_paddw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_paddw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpaddw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_paddw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpaddw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_paddw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpaddw (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_paddw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vpaddw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_paddw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: vpaddw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_paddw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1317,37 +1317,37 @@ define <16 x i16> @test_paddw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
define <32 x i8> @test_palignr(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
; GENERIC-LABEL: test_palignr:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0],ymm1[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16] sched: [1:1.00]
; GENERIC-NEXT: vpalignr {{.*#+}} ymm0 = mem[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0],mem[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_palignr:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0],ymm1[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16] sched: [1:1.00]
; HASWELL-NEXT: vpalignr {{.*#+}} ymm0 = mem[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0],mem[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16] sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_palignr:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0],ymm1[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16] sched: [1:1.00]
; BROADWELL-NEXT: vpalignr {{.*#+}} ymm0 = mem[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0],mem[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16] sched: [7:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_palignr:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0],ymm1[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16] sched: [1:1.00]
; SKYLAKE-NEXT: vpalignr {{.*#+}} ymm0 = mem[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0],mem[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16] sched: [8:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_palignr:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0],ymm1[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16] sched: [1:1.00]
; SKX-NEXT: vpalignr {{.*#+}} ymm0 = mem[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0],mem[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_palignr:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0],ymm1[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16] sched: [1:0.25]
; ZNVER1-NEXT: vpalignr {{.*#+}} ymm0 = mem[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0],mem[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16] sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1359,42 +1359,42 @@ define <32 x i8> @test_palignr(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
define <4 x i64> @test_pand(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
; GENERIC-LABEL: test_pand:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpand %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpand (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pand:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpand %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; HASWELL-NEXT: vpand (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pand:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpand %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; BROADWELL-NEXT: vpand (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pand:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpand %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vpand (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pand:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpand %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: vpand (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pand:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpand %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpand (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -1408,42 +1408,42 @@ define <4 x i64> @test_pand(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
define <4 x i64> @test_pandn(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
; GENERIC-LABEL: test_pandn:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpandn %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpandn (%rdi), %ymm0, %ymm1 # sched: [5:1.00]
; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pandn:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpandn %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; HASWELL-NEXT: vpandn (%rdi), %ymm0, %ymm1 # sched: [1:0.50]
; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pandn:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpandn %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; BROADWELL-NEXT: vpandn (%rdi), %ymm0, %ymm1 # sched: [7:0.50]
; BROADWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pandn:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpandn %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vpandn (%rdi), %ymm0, %ymm1 # sched: [8:0.50]
; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pandn:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpandn %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: vpandn (%rdi), %ymm0, %ymm1 # sched: [8:0.50]
; SKX-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pandn:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpandn %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpandn (%rdi), %ymm0, %ymm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -1459,37 +1459,37 @@ define <4 x i64> @test_pandn(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
define <32 x i8> @test_pavgb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
; GENERIC-LABEL: test_pavgb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpavgb %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpavgb (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pavgb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpavgb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpavgb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pavgb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpavgb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpavgb (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pavgb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpavgb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpavgb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pavgb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpavgb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpavgb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pavgb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpavgb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpavgb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1511,37 +1511,37 @@ define <32 x i8> @test_pavgb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
define <16 x i16> @test_pavgw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_pavgw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpavgw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpavgw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pavgw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpavgw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpavgw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pavgw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpavgw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpavgw (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pavgw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpavgw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpavgw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pavgw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpavgw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpavgw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pavgw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpavgw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpavgw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1563,42 +1563,42 @@ define <16 x i16> @test_pavgw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
define <4 x i32> @test_pblendd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_pblendd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[3] sched: [1:0.50]
; GENERIC-NEXT: vpblendd {{.*#+}} xmm1 = mem[0],xmm1[1],mem[2],xmm1[3] sched: [5:0.50]
; GENERIC-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pblendd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[3] sched: [1:0.33]
; HASWELL-NEXT: vpblendd {{.*#+}} xmm1 = mem[0],xmm1[1],mem[2],xmm1[3] sched: [1:0.50]
; HASWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pblendd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[3] sched: [1:0.33]
; BROADWELL-NEXT: vpblendd {{.*#+}} xmm1 = mem[0],xmm1[1],mem[2],xmm1[3] sched: [6:0.50]
; BROADWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pblendd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[3] sched: [1:0.33]
; SKYLAKE-NEXT: vpblendd {{.*#+}} xmm1 = mem[0],xmm1[1],mem[2],xmm1[3] sched: [7:0.50]
; SKYLAKE-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pblendd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[3] sched: [1:0.33]
; SKX-NEXT: vpblendd {{.*#+}} xmm1 = mem[0],xmm1[1],mem[2],xmm1[3] sched: [7:0.50]
; SKX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pblendd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[3] sched: [1:0.50]
; ZNVER1-NEXT: vpblendd {{.*#+}} xmm1 = mem[0],xmm1[1],mem[2],xmm1[3] sched: [8:1.00]
; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
@@ -1612,42 +1612,42 @@ define <4 x i32> @test_pblendd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
define <8 x i32> @test_pblendd_ymm(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_pblendd_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7] sched: [1:0.50]
; GENERIC-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],mem[1,2],ymm1[3,4,5,6,7] sched: [5:0.50]
; GENERIC-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pblendd_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7] sched: [1:0.33]
; HASWELL-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],mem[1,2],ymm1[3,4,5,6,7] sched: [1:0.50]
; HASWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pblendd_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7] sched: [1:0.33]
; BROADWELL-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],mem[1,2],ymm1[3,4,5,6,7] sched: [7:0.50]
; BROADWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pblendd_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7] sched: [1:0.33]
; SKYLAKE-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],mem[1,2],ymm1[3,4,5,6,7] sched: [8:0.50]
; SKYLAKE-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pblendd_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7] sched: [1:0.33]
; SKX-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],mem[1,2],ymm1[3,4,5,6,7] sched: [8:0.50]
; SKX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pblendd_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7] sched: [1:0.50]
; ZNVER1-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],mem[1,2],ymm1[3,4,5,6,7] sched: [9:1.50]
; ZNVER1-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -1661,37 +1661,37 @@ define <8 x i32> @test_pblendd_ymm(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2)
define <32 x i8> @test_pblendvb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> %a2, <32 x i8> *%a3, <32 x i8> %a4) {
; GENERIC-LABEL: test_pblendvb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:1.00]
; GENERIC-NEXT: vpblendvb %ymm3, (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pblendvb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:2.00]
; HASWELL-NEXT: vpblendvb %ymm3, (%rdi), %ymm0, %ymm0 # sched: [2:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pblendvb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:2.00]
; BROADWELL-NEXT: vpblendvb %ymm3, (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pblendvb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:0.67]
; SKYLAKE-NEXT: vpblendvb %ymm3, (%rdi), %ymm0, %ymm0 # sched: [8:0.67]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pblendvb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:0.67]
; SKX-NEXT: vpblendvb %ymm3, (%rdi), %ymm0, %ymm0 # sched: [8:0.67]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pblendvb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; ZNVER1-NEXT: vpblendvb %ymm3, (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1704,37 +1704,37 @@ declare <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8>, <32 x i8>, <32 x i8>) nounw
define <16 x i16> @test_pblendw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_pblendw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7,8,9],ymm1[10,11,12],ymm0[13,14,15] sched: [1:0.50]
; GENERIC-NEXT: vpblendw {{.*#+}} ymm0 = mem[0],ymm0[1],mem[2],ymm0[3],mem[4],ymm0[5],mem[6],ymm0[7],mem[8],ymm0[9],mem[10],ymm0[11],mem[12],ymm0[13],mem[14],ymm0[15] sched: [5:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pblendw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7,8,9],ymm1[10,11,12],ymm0[13,14,15] sched: [1:1.00]
; HASWELL-NEXT: vpblendw {{.*#+}} ymm0 = mem[0],ymm0[1],mem[2],ymm0[3],mem[4],ymm0[5],mem[6],ymm0[7],mem[8],ymm0[9],mem[10],ymm0[11],mem[12],ymm0[13],mem[14],ymm0[15] sched: [4:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pblendw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7,8,9],ymm1[10,11,12],ymm0[13,14,15] sched: [1:1.00]
; BROADWELL-NEXT: vpblendw {{.*#+}} ymm0 = mem[0],ymm0[1],mem[2],ymm0[3],mem[4],ymm0[5],mem[6],ymm0[7],mem[8],ymm0[9],mem[10],ymm0[11],mem[12],ymm0[13],mem[14],ymm0[15] sched: [7:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pblendw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7,8,9],ymm1[10,11,12],ymm0[13,14,15] sched: [1:1.00]
; SKYLAKE-NEXT: vpblendw {{.*#+}} ymm0 = mem[0],ymm0[1],mem[2],ymm0[3],mem[4],ymm0[5],mem[6],ymm0[7],mem[8],ymm0[9],mem[10],ymm0[11],mem[12],ymm0[13],mem[14],ymm0[15] sched: [8:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pblendw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7,8,9],ymm1[10,11,12],ymm0[13,14,15] sched: [1:1.00]
; SKX-NEXT: vpblendw {{.*#+}} ymm0 = mem[0],ymm0[1],mem[2],ymm0[3],mem[4],ymm0[5],mem[6],ymm0[7],mem[8],ymm0[9],mem[10],ymm0[11],mem[12],ymm0[13],mem[14],ymm0[15] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pblendw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7,8,9],ymm1[10,11,12],ymm0[13,14,15] sched: [2:0.33]
; ZNVER1-NEXT: vpblendw {{.*#+}} ymm0 = mem[0],ymm0[1],mem[2],ymm0[3],mem[4],ymm0[5],mem[6],ymm0[7],mem[8],ymm0[9],mem[10],ymm0[11],mem[12],ymm0[13],mem[14],ymm0[15] sched: [9:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1746,42 +1746,42 @@ define <16 x i16> @test_pblendw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2)
define <16 x i8> @test_pbroadcastb(<16 x i8> %a0, <16 x i8> *%a1) {
; GENERIC-LABEL: test_pbroadcastb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpbroadcastb %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpbroadcastb (%rdi), %xmm1 # sched: [4:0.50]
; GENERIC-NEXT: vpaddb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pbroadcastb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpbroadcastb %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: vpbroadcastb (%rdi), %xmm1 # sched: [4:1.00]
; HASWELL-NEXT: vpaddb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pbroadcastb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpbroadcastb %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: vpbroadcastb (%rdi), %xmm1 # sched: [9:1.00]
; BROADWELL-NEXT: vpaddb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pbroadcastb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpbroadcastb %xmm0, %xmm0 # sched: [3:1.00]
; SKYLAKE-NEXT: vpbroadcastb (%rdi), %xmm1 # sched: [7:1.00]
; SKYLAKE-NEXT: vpaddb %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pbroadcastb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpbroadcastb %xmm0, %xmm0 # sched: [3:1.00]
; SKX-NEXT: vpbroadcastb (%rdi), %xmm1 # sched: [7:1.00]
; SKX-NEXT: vpaddb %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pbroadcastb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpbroadcastb (%rdi), %xmm1 # sched: [8:1.00]
; ZNVER1-NEXT: vpbroadcastb %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
@@ -1795,42 +1795,42 @@ define <16 x i8> @test_pbroadcastb(<16 x i8> %a0, <16 x i8> *%a1) {
define <32 x i8> @test_pbroadcastb_ymm(<32 x i8> %a0, <32 x i8> *%a1) {
; GENERIC-LABEL: test_pbroadcastb_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpbroadcastb %xmm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpbroadcastb (%rdi), %ymm1 # sched: [4:0.50]
; GENERIC-NEXT: vpaddb %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pbroadcastb_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpbroadcastb %xmm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vpbroadcastb (%rdi), %ymm1 # sched: [4:1.00]
; HASWELL-NEXT: vpaddb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pbroadcastb_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpbroadcastb %xmm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: vpbroadcastb (%rdi), %ymm1 # sched: [9:1.00]
; BROADWELL-NEXT: vpaddb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pbroadcastb_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpbroadcastb %xmm0, %ymm0 # sched: [3:1.00]
; SKYLAKE-NEXT: vpbroadcastb (%rdi), %ymm1 # sched: [8:1.00]
; SKYLAKE-NEXT: vpaddb %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pbroadcastb_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpbroadcastb %xmm0, %ymm0 # sched: [3:1.00]
; SKX-NEXT: vpbroadcastb (%rdi), %ymm1 # sched: [8:1.00]
; SKX-NEXT: vpaddb %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pbroadcastb_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpbroadcastb (%rdi), %ymm1 # sched: [8:2.00]
; ZNVER1-NEXT: vpbroadcastb %xmm0, %ymm0 # sched: [2:0.25]
; ZNVER1-NEXT: vpaddb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -1844,41 +1844,41 @@ define <32 x i8> @test_pbroadcastb_ymm(<32 x i8> %a0, <32 x i8> *%a1) {
define <4 x i32> @test_pbroadcastd(<4 x i32> %a0, <4 x i32> *%a1) {
; GENERIC-LABEL: test_pbroadcastd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpbroadcastd %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpbroadcastd (%rdi), %xmm1 # sched: [4:0.50]
; GENERIC-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pbroadcastd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpbroadcastd %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: vpbroadcastd (%rdi), %xmm1 # sched: [1:0.50]
; HASWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pbroadcastd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpbroadcastd %xmm0, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: vpbroadcastd (%rdi), %xmm1 # sched: [5:0.50]
; BROADWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pbroadcastd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpbroadcastd %xmm0, %xmm0 # sched: [1:1.00]
; SKYLAKE-NEXT: vpbroadcastd (%rdi), %xmm1 # sched: [6:0.50]
; SKYLAKE-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pbroadcastd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpbroadcastd %xmm0, %xmm0 # sched: [1:1.00]
; SKX-NEXT: vpaddd (%rdi){1to4}, %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pbroadcastd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpbroadcastd (%rdi), %xmm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpbroadcastd %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
@@ -1892,41 +1892,41 @@ define <4 x i32> @test_pbroadcastd(<4 x i32> %a0, <4 x i32> *%a1) {
define <8 x i32> @test_pbroadcastd_ymm(<8 x i32> %a0, <8 x i32> *%a1) {
; GENERIC-LABEL: test_pbroadcastd_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpbroadcastd %xmm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpbroadcastd (%rdi), %ymm1 # sched: [4:0.50]
; GENERIC-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pbroadcastd_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpbroadcastd %xmm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vpbroadcastd (%rdi), %ymm1 # sched: [1:0.50]
; HASWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pbroadcastd_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpbroadcastd %xmm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: vpbroadcastd (%rdi), %ymm1 # sched: [6:0.50]
; BROADWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pbroadcastd_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpbroadcastd %xmm0, %ymm0 # sched: [3:1.00]
; SKYLAKE-NEXT: vpbroadcastd (%rdi), %ymm1 # sched: [7:0.50]
; SKYLAKE-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pbroadcastd_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpbroadcastd %xmm0, %ymm0 # sched: [3:1.00]
; SKX-NEXT: vpaddd (%rdi){1to8}, %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pbroadcastd_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpbroadcastd (%rdi), %ymm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpbroadcastd %xmm0, %ymm0 # sched: [2:0.25]
; ZNVER1-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -1940,41 +1940,41 @@ define <8 x i32> @test_pbroadcastd_ymm(<8 x i32> %a0, <8 x i32> *%a1) {
define <2 x i64> @test_pbroadcastq(<2 x i64> %a0, <2 x i64> *%a1) {
; GENERIC-LABEL: test_pbroadcastq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpbroadcastq %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpbroadcastq (%rdi), %xmm1 # sched: [4:0.50]
; GENERIC-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pbroadcastq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpbroadcastq %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: vpbroadcastq (%rdi), %xmm1 # sched: [1:0.50]
; HASWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pbroadcastq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpbroadcastq %xmm0, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: vpbroadcastq (%rdi), %xmm1 # sched: [5:0.50]
; BROADWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pbroadcastq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpbroadcastq %xmm0, %xmm0 # sched: [1:1.00]
; SKYLAKE-NEXT: vpbroadcastq (%rdi), %xmm1 # sched: [6:0.50]
; SKYLAKE-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pbroadcastq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpbroadcastq %xmm0, %xmm0 # sched: [1:1.00]
; SKX-NEXT: vpaddq (%rdi){1to2}, %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pbroadcastq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpbroadcastq (%rdi), %xmm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpbroadcastq %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
@@ -1988,41 +1988,41 @@ define <2 x i64> @test_pbroadcastq(<2 x i64> %a0, <2 x i64> *%a1) {
define <4 x i64> @test_pbroadcastq_ymm(<4 x i64> %a0, <4 x i64> *%a1) {
; GENERIC-LABEL: test_pbroadcastq_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpbroadcastq %xmm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpbroadcastq (%rdi), %ymm1 # sched: [4:0.50]
; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pbroadcastq_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpbroadcastq %xmm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vpbroadcastq (%rdi), %ymm1 # sched: [1:0.50]
; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pbroadcastq_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpbroadcastq %xmm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: vpbroadcastq (%rdi), %ymm1 # sched: [6:0.50]
; BROADWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pbroadcastq_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpbroadcastq %xmm0, %ymm0 # sched: [3:1.00]
; SKYLAKE-NEXT: vpbroadcastq (%rdi), %ymm1 # sched: [7:0.50]
; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pbroadcastq_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpbroadcastq %xmm0, %ymm0 # sched: [3:1.00]
; SKX-NEXT: vpaddq (%rdi){1to4}, %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pbroadcastq_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpbroadcastq (%rdi), %ymm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpbroadcastq %xmm0, %ymm0 # sched: [2:0.25]
; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -2036,42 +2036,42 @@ define <4 x i64> @test_pbroadcastq_ymm(<4 x i64> %a0, <4 x i64> *%a1) {
define <8 x i16> @test_pbroadcastw(<8 x i16> %a0, <8 x i16> *%a1) {
; GENERIC-LABEL: test_pbroadcastw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpbroadcastw %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpbroadcastw (%rdi), %xmm1 # sched: [4:0.50]
; GENERIC-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pbroadcastw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpbroadcastw %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: vpbroadcastw (%rdi), %xmm1 # sched: [4:1.00]
; HASWELL-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pbroadcastw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpbroadcastw %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: vpbroadcastw (%rdi), %xmm1 # sched: [9:1.00]
; BROADWELL-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pbroadcastw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpbroadcastw %xmm0, %xmm0 # sched: [3:1.00]
; SKYLAKE-NEXT: vpbroadcastw (%rdi), %xmm1 # sched: [7:1.00]
; SKYLAKE-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pbroadcastw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpbroadcastw %xmm0, %xmm0 # sched: [3:1.00]
; SKX-NEXT: vpbroadcastw (%rdi), %xmm1 # sched: [7:1.00]
; SKX-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pbroadcastw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpbroadcastw (%rdi), %xmm1 # sched: [8:1.00]
; ZNVER1-NEXT: vpbroadcastw %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
@@ -2085,42 +2085,42 @@ define <8 x i16> @test_pbroadcastw(<8 x i16> %a0, <8 x i16> *%a1) {
define <16 x i16> @test_pbroadcastw_ymm(<16 x i16> %a0, <16 x i16> *%a1) {
; GENERIC-LABEL: test_pbroadcastw_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpbroadcastw %xmm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpbroadcastw (%rdi), %ymm1 # sched: [4:0.50]
; GENERIC-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pbroadcastw_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpbroadcastw %xmm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vpbroadcastw (%rdi), %ymm1 # sched: [4:1.00]
; HASWELL-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pbroadcastw_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpbroadcastw %xmm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: vpbroadcastw (%rdi), %ymm1 # sched: [9:1.00]
; BROADWELL-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pbroadcastw_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpbroadcastw %xmm0, %ymm0 # sched: [3:1.00]
; SKYLAKE-NEXT: vpbroadcastw (%rdi), %ymm1 # sched: [8:1.00]
; SKYLAKE-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pbroadcastw_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpbroadcastw %xmm0, %ymm0 # sched: [3:1.00]
; SKX-NEXT: vpbroadcastw (%rdi), %ymm1 # sched: [8:1.00]
; SKX-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pbroadcastw_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpbroadcastw (%rdi), %ymm1 # sched: [8:2.00]
; ZNVER1-NEXT: vpbroadcastw %xmm0, %ymm0 # sched: [2:0.25]
; ZNVER1-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -2134,31 +2134,31 @@ define <16 x i16> @test_pbroadcastw_ymm(<16 x i16> %a0, <16 x i16> *%a1) {
define <32 x i8> @test_pcmpeqb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
; GENERIC-LABEL: test_pcmpeqb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpcmpeqb (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpeqb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpcmpeqb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pcmpeqb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpcmpeqb (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pcmpeqb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpcmpeqb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pcmpeqb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpeqb %ymm1, %ymm0, %k0 # sched: [3:1.00]
; SKX-NEXT: vpmovm2b %k0, %ymm0
; SKX-NEXT: vpcmpeqb (%rdi), %ymm0, %k0 # sched: [10:1.00]
@@ -2166,7 +2166,7 @@ define <32 x i8> @test_pcmpeqb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pcmpeqb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpcmpeqb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -2180,31 +2180,31 @@ define <32 x i8> @test_pcmpeqb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
define <8 x i32> @test_pcmpeqd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_pcmpeqd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpcmpeqd (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpeqd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpcmpeqd (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pcmpeqd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpcmpeqd (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pcmpeqd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpcmpeqd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pcmpeqd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpeqd %ymm1, %ymm0, %k0 # sched: [3:1.00]
; SKX-NEXT: vpmovm2d %k0, %ymm0
; SKX-NEXT: vpcmpeqd (%rdi), %ymm0, %k0 # sched: [10:1.00]
@@ -2212,7 +2212,7 @@ define <8 x i32> @test_pcmpeqd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pcmpeqd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpcmpeqd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -2226,31 +2226,31 @@ define <8 x i32> @test_pcmpeqd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
define <4 x i64> @test_pcmpeqq(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
; GENERIC-LABEL: test_pcmpeqq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpcmpeqq (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpeqq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpcmpeqq (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pcmpeqq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpcmpeqq (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pcmpeqq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpcmpeqq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pcmpeqq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 # sched: [3:1.00]
; SKX-NEXT: vpmovm2q %k0, %ymm0
; SKX-NEXT: vpcmpeqq (%rdi), %ymm0, %k0 # sched: [10:1.00]
@@ -2258,7 +2258,7 @@ define <4 x i64> @test_pcmpeqq(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pcmpeqq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpcmpeqq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -2272,31 +2272,31 @@ define <4 x i64> @test_pcmpeqq(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
define <16 x i16> @test_pcmpeqw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_pcmpeqw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpcmpeqw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpeqw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpcmpeqw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pcmpeqw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpcmpeqw (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pcmpeqw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpcmpeqw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pcmpeqw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpeqw %ymm1, %ymm0, %k0 # sched: [3:1.00]
; SKX-NEXT: vpmovm2w %k0, %ymm0
; SKX-NEXT: vpcmpeqw (%rdi), %ymm0, %k0 # sched: [10:1.00]
@@ -2304,7 +2304,7 @@ define <16 x i16> @test_pcmpeqw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2)
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pcmpeqw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpcmpeqw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -2318,31 +2318,31 @@ define <16 x i16> @test_pcmpeqw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2)
define <32 x i8> @test_pcmpgtb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
; GENERIC-LABEL: test_pcmpgtb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpcmpgtb (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpgtb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpcmpgtb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pcmpgtb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpcmpgtb (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pcmpgtb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpcmpgtb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pcmpgtb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpgtb %ymm1, %ymm0, %k0 # sched: [3:1.00]
; SKX-NEXT: vpmovm2b %k0, %ymm0
; SKX-NEXT: vpcmpgtb (%rdi), %ymm0, %k0 # sched: [10:1.00]
@@ -2350,7 +2350,7 @@ define <32 x i8> @test_pcmpgtb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pcmpgtb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpcmpgtb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -2364,31 +2364,31 @@ define <32 x i8> @test_pcmpgtb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
define <8 x i32> @test_pcmpgtd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_pcmpgtd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpcmpgtd (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpgtd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpcmpgtd (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pcmpgtd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpcmpgtd (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pcmpgtd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpcmpgtd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pcmpgtd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpgtd %ymm1, %ymm0, %k0 # sched: [3:1.00]
; SKX-NEXT: vpmovm2d %k0, %ymm0
; SKX-NEXT: vpcmpgtd (%rdi), %ymm0, %k0 # sched: [10:1.00]
@@ -2396,7 +2396,7 @@ define <8 x i32> @test_pcmpgtd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pcmpgtd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpcmpgtd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -2410,31 +2410,31 @@ define <8 x i32> @test_pcmpgtd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
define <4 x i64> @test_pcmpgtq(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
; GENERIC-LABEL: test_pcmpgtq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpcmpgtq (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpgtq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; HASWELL-NEXT: vpcmpgtq (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pcmpgtq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; BROADWELL-NEXT: vpcmpgtq (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pcmpgtq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; SKYLAKE-NEXT: vpcmpgtq (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pcmpgtq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 # sched: [3:1.00]
; SKX-NEXT: vpmovm2q %k0, %ymm0
; SKX-NEXT: vpcmpgtq (%rdi), %ymm0, %k0 # sched: [10:1.00]
@@ -2442,7 +2442,7 @@ define <4 x i64> @test_pcmpgtq(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pcmpgtq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; ZNVER1-NEXT: vpcmpgtq (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -2456,31 +2456,31 @@ define <4 x i64> @test_pcmpgtq(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
define <16 x i16> @test_pcmpgtw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_pcmpgtw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpcmpgtw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpgtw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpcmpgtw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pcmpgtw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpcmpgtw (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pcmpgtw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpcmpgtw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pcmpgtw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpgtw %ymm1, %ymm0, %k0 # sched: [3:1.00]
; SKX-NEXT: vpmovm2w %k0, %ymm0
; SKX-NEXT: vpcmpgtw (%rdi), %ymm0, %k0 # sched: [10:1.00]
@@ -2488,7 +2488,7 @@ define <16 x i16> @test_pcmpgtw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2)
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pcmpgtw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpcmpgtw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -2502,42 +2502,42 @@ define <16 x i16> @test_pcmpgtw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2)
define <4 x i64> @test_perm2i128(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
; GENERIC-LABEL: test_perm2i128:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3],ymm1[0,1] sched: [1:1.00]
; GENERIC-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] sched: [5:1.00]
; GENERIC-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_perm2i128:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3],ymm1[0,1] sched: [3:1.00]
; HASWELL-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] sched: [3:1.00]
; HASWELL-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_perm2i128:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3],ymm1[0,1] sched: [3:1.00]
; BROADWELL-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] sched: [9:1.00]
; BROADWELL-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_perm2i128:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3],ymm1[0,1] sched: [3:1.00]
; SKYLAKE-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] sched: [10:1.00]
; SKYLAKE-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_perm2i128:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3],ymm1[0,1] sched: [3:1.00]
; SKX-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] sched: [10:1.00]
; SKX-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_perm2i128:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3],ymm1[0,1] sched: [2:0.25]
; ZNVER1-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] sched: [9:0.50]
; ZNVER1-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.25]
@@ -2551,42 +2551,42 @@ define <4 x i64> @test_perm2i128(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
define <8 x i32> @test_permd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_permd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpermd %ymm1, %ymm0, %ymm1 # sched: [1:1.00]
; GENERIC-NEXT: vpermd (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_permd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpermd %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
; HASWELL-NEXT: vpermd (%rdi), %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_permd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpermd %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
; BROADWELL-NEXT: vpermd (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; BROADWELL-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_permd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpermd %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
; SKYLAKE-NEXT: vpermd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; SKYLAKE-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_permd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermd %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
; SKX-NEXT: vpermd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; SKX-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_permd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpermd %ymm1, %ymm0, %ymm1 # sched: [2:0.25]
; ZNVER1-NEXT: vpermd (%rdi), %ymm0, %ymm0 # sched: [9:0.50]
; ZNVER1-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [1:0.25]
@@ -2601,42 +2601,42 @@ declare <8 x i32> @llvm.x86.avx2.permd(<8 x i32>, <8 x i32>) nounwind readonly
define <4 x double> @test_permpd(<4 x double> %a0, <4 x double> *%a1) {
; GENERIC-LABEL: test_permpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,2,2,3] sched: [1:1.00]
; GENERIC-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,2,2,3] sched: [5:1.00]
; GENERIC-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_permpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,2,2,3] sched: [3:1.00]
; HASWELL-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,2,2,3] sched: [3:1.00]
; HASWELL-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_permpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,2,2,3] sched: [3:1.00]
; BROADWELL-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,2,2,3] sched: [9:1.00]
; BROADWELL-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_permpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,2,2,3] sched: [3:1.00]
; SKYLAKE-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,2,2,3] sched: [10:1.00]
; SKYLAKE-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_permpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,2,2,3] sched: [3:1.00]
; SKX-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,2,2,3] sched: [10:1.00]
; SKX-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_permpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,2,2,3] sched: [107:0.50]
; ZNVER1-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,2,2,3] sched: [100:0.25]
; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
@@ -2650,42 +2650,42 @@ define <4 x double> @test_permpd(<4 x double> %a0, <4 x double> *%a1) {
define <8 x float> @test_permps(<8 x i32> %a0, <8 x float> %a1, <8 x float> *%a2) {
; GENERIC-LABEL: test_permps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpermps %ymm1, %ymm0, %ymm1 # sched: [1:1.00]
; GENERIC-NEXT: vpermps (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_permps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpermps %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
; HASWELL-NEXT: vpermps (%rdi), %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_permps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpermps %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
; BROADWELL-NEXT: vpermps (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; BROADWELL-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_permps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpermps %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
; SKYLAKE-NEXT: vpermps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; SKYLAKE-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_permps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermps %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
; SKX-NEXT: vpermps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; SKX-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_permps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpermps %ymm1, %ymm0, %ymm1 # sched: [100:0.25]
; ZNVER1-NEXT: vpermps (%rdi), %ymm0, %ymm0 # sched: [107:0.50]
; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
@@ -2700,42 +2700,42 @@ declare <8 x float> @llvm.x86.avx2.permps(<8 x float>, <8 x i32>) nounwind reado
define <4 x i64> @test_permq(<4 x i64> %a0, <4 x i64> *%a1) {
; GENERIC-LABEL: test_permq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpermq {{.*#+}} ymm0 = ymm0[3,2,2,3] sched: [1:1.00]
; GENERIC-NEXT: vpermq {{.*#+}} ymm1 = mem[0,2,2,3] sched: [5:1.00]
; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_permq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[3,2,2,3] sched: [3:1.00]
; HASWELL-NEXT: vpermq {{.*#+}} ymm1 = mem[0,2,2,3] sched: [3:1.00]
; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_permq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[3,2,2,3] sched: [3:1.00]
; BROADWELL-NEXT: vpermq {{.*#+}} ymm1 = mem[0,2,2,3] sched: [9:1.00]
; BROADWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_permq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[3,2,2,3] sched: [3:1.00]
; SKYLAKE-NEXT: vpermq {{.*#+}} ymm1 = mem[0,2,2,3] sched: [10:1.00]
; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_permq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermq {{.*#+}} ymm0 = ymm0[3,2,2,3] sched: [3:1.00]
; SKX-NEXT: vpermq {{.*#+}} ymm1 = mem[0,2,2,3] sched: [10:1.00]
; SKX-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_permq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpermq {{.*#+}} ymm1 = mem[0,2,2,3] sched: [9:0.50]
; ZNVER1-NEXT: vpermq {{.*#+}} ymm0 = ymm0[3,2,2,3] sched: [2:0.25]
; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -2749,32 +2749,32 @@ define <4 x i64> @test_permq(<4 x i64> %a0, <4 x i64> *%a1) {
define <4 x i32> @test_pgatherdd(<4 x i32> %a0, i8* %a1, <4 x i32> %a2, <4 x i32> %a3) {
; GENERIC-LABEL: test_pgatherdd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpgatherdd %xmm2, (%rdi,%xmm1,2), %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pgatherdd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpgatherdd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [1:?]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pgatherdd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpgatherdd %xmm2, (%rdi,%xmm1,2), %xmm0
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pgatherdd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpgatherdd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [22:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pgatherdd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpgatherdd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [22:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pgatherdd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpgatherdd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32> %a0, i8* %a1, <4 x i32> %a2, <4 x i32> %a3, i8 2)
@@ -2784,32 +2784,32 @@ declare <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32>, i8*, <4 x i32>, <4 x i32>
define <8 x i32> @test_pgatherdd_ymm(<8 x i32> %a0, i8* %a1, <8 x i32> %a2, <8 x i32> %a3) {
; GENERIC-LABEL: test_pgatherdd_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpgatherdd %ymm2, (%rdi,%ymm1,2), %ymm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pgatherdd_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpgatherdd %ymm2, (%rdi,%ymm1,2), %ymm0 # sched: [1:?]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pgatherdd_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpgatherdd %ymm2, (%rdi,%ymm1,2), %ymm0
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pgatherdd_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpgatherdd %ymm2, (%rdi,%ymm1,2), %ymm0 # sched: [25:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pgatherdd_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpgatherdd %ymm2, (%rdi,%ymm1,2), %ymm0 # sched: [25:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pgatherdd_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpgatherdd %ymm2, (%rdi,%ymm1,2), %ymm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> %a0, i8* %a1, <8 x i32> %a2, <8 x i32> %a3, i8 2)
@@ -2819,32 +2819,32 @@ declare <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32>, i8*, <8 x i32>, <8 x
define <2 x i64> @test_pgatherdq(<2 x i64> %a0, i8* %a1, <4 x i32> %a2, <2 x i64> %a3) {
; GENERIC-LABEL: test_pgatherdq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpgatherdq %xmm2, (%rdi,%xmm1,2), %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pgatherdq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpgatherdq %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [1:?]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pgatherdq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpgatherdq %xmm2, (%rdi,%xmm1,2), %xmm0
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pgatherdq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpgatherdq %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [22:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pgatherdq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpgatherdq %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [22:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pgatherdq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpgatherdq %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <2 x i64> @llvm.x86.avx2.gather.d.q(<2 x i64> %a0, i8* %a1, <4 x i32> %a2, <2 x i64> %a3, i8 2)
@@ -2854,32 +2854,32 @@ declare <2 x i64> @llvm.x86.avx2.gather.d.q(<2 x i64>, i8*, <4 x i32>, <2 x i64>
define <4 x i64> @test_pgatherdq_ymm(<4 x i64> %a0, i8* %a1, <4 x i32> %a2, <4 x i64> %a3) {
; GENERIC-LABEL: test_pgatherdq_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpgatherdq %ymm2, (%rdi,%xmm1,2), %ymm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pgatherdq_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpgatherdq %ymm2, (%rdi,%xmm1,2), %ymm0 # sched: [1:?]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pgatherdq_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpgatherdq %ymm2, (%rdi,%xmm1,2), %ymm0
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pgatherdq_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpgatherdq %ymm2, (%rdi,%xmm1,2), %ymm0 # sched: [25:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pgatherdq_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpgatherdq %ymm2, (%rdi,%xmm1,2), %ymm0 # sched: [25:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pgatherdq_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpgatherdq %ymm2, (%rdi,%xmm1,2), %ymm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> %a0, i8* %a1, <4 x i32> %a2, <4 x i64> %a3, i8 2)
@@ -2889,32 +2889,32 @@ declare <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64>, i8*, <4 x i32>, <4 x
define <4 x i32> @test_pgatherqd(<4 x i32> %a0, i8* %a1, <2 x i64> %a2, <4 x i32> %a3) {
; GENERIC-LABEL: test_pgatherqd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpgatherqd %xmm2, (%rdi,%xmm1,2), %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pgatherqd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpgatherqd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [1:?]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pgatherqd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpgatherqd %xmm2, (%rdi,%xmm1,2), %xmm0
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pgatherqd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpgatherqd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [22:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pgatherqd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpgatherqd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [22:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pgatherqd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpgatherqd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x i32> @llvm.x86.avx2.gather.q.d(<4 x i32> %a0, i8* %a1, <2 x i64> %a2, <4 x i32> %a3, i8 2)
@@ -2924,37 +2924,37 @@ declare <4 x i32> @llvm.x86.avx2.gather.q.d(<4 x i32>, i8*, <2 x i64>, <4 x i32>
define <4 x i32> @test_pgatherqd_ymm(<4 x i32> %a0, i8* %a1, <4 x i64> %a2, <4 x i32> %a3) {
; GENERIC-LABEL: test_pgatherqd_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpgatherqd %xmm2, (%rdi,%ymm1,2), %xmm0
; GENERIC-NEXT: vzeroupper
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pgatherqd_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpgatherqd %xmm2, (%rdi,%ymm1,2), %xmm0 # sched: [1:?]
; HASWELL-NEXT: vzeroupper # sched: [4:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pgatherqd_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpgatherqd %xmm2, (%rdi,%ymm1,2), %xmm0
; BROADWELL-NEXT: vzeroupper # sched: [4:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pgatherqd_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpgatherqd %xmm2, (%rdi,%ymm1,2), %xmm0 # sched: [25:1.00]
; SKYLAKE-NEXT: vzeroupper # sched: [4:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pgatherqd_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpgatherqd %xmm2, (%rdi,%ymm1,2), %xmm0 # sched: [25:1.00]
; SKX-NEXT: vzeroupper # sched: [4:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pgatherqd_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpgatherqd %xmm2, (%rdi,%ymm1,2), %xmm0 # sched: [100:?]
; ZNVER1-NEXT: vzeroupper # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -2965,32 +2965,32 @@ declare <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32>, i8*, <4 x i64>, <4 x
define <2 x i64> @test_pgatherqq(<2 x i64> %a0, i8 *%a1, <2 x i64> %a2, <2 x i64> %a3) {
; GENERIC-LABEL: test_pgatherqq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpgatherqq %xmm2, (%rdi,%xmm1,2), %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pgatherqq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpgatherqq %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [1:?]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pgatherqq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpgatherqq %xmm2, (%rdi,%xmm1,2), %xmm0
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pgatherqq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpgatherqq %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [22:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pgatherqq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpgatherqq %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [22:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pgatherqq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpgatherqq %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <2 x i64> @llvm.x86.avx2.gather.q.q(<2 x i64> %a0, i8* %a1, <2 x i64> %a2, <2 x i64> %a3, i8 2)
@@ -3000,32 +3000,32 @@ declare <2 x i64> @llvm.x86.avx2.gather.q.q(<2 x i64>, i8*, <2 x i64>, <2 x i64>
define <4 x i64> @test_pgatherqq_ymm(<4 x i64> %a0, i8 *%a1, <4 x i64> %a2, <4 x i64> %a3) {
; GENERIC-LABEL: test_pgatherqq_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpgatherqq %ymm2, (%rdi,%ymm1,2), %ymm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pgatherqq_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpgatherqq %ymm2, (%rdi,%ymm1,2), %ymm0 # sched: [1:?]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pgatherqq_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpgatherqq %ymm2, (%rdi,%ymm1,2), %ymm0
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pgatherqq_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpgatherqq %ymm2, (%rdi,%ymm1,2), %ymm0 # sched: [25:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pgatherqq_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpgatherqq %ymm2, (%rdi,%ymm1,2), %ymm0 # sched: [25:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pgatherqq_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpgatherqq %ymm2, (%rdi,%ymm1,2), %ymm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> %a0, i8* %a1, <4 x i64> %a2, <4 x i64> %a3, i8 2)
@@ -3035,37 +3035,37 @@ declare <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64>, i8*, <4 x i64>, <4 x
define <8 x i32> @test_phaddd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_phaddd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vphaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; GENERIC-NEXT: vphaddd (%rdi), %ymm0, %ymm0 # sched: [5:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_phaddd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vphaddd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; HASWELL-NEXT: vphaddd (%rdi), %ymm0, %ymm0 # sched: [3:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_phaddd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vphaddd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BROADWELL-NEXT: vphaddd (%rdi), %ymm0, %ymm0 # sched: [9:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_phaddd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vphaddd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; SKYLAKE-NEXT: vphaddd (%rdi), %ymm0, %ymm0 # sched: [10:2.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_phaddd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vphaddd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; SKX-NEXT: vphaddd (%rdi), %ymm0, %ymm0 # sched: [10:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_phaddd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vphaddd %ymm1, %ymm0, %ymm0 # sched: [100:?]
; ZNVER1-NEXT: vphaddd (%rdi), %ymm0, %ymm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3078,37 +3078,37 @@ declare <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32>, <8 x i32>) nounwind readnone
define <16 x i16> @test_phaddsw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_phaddsw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vphaddsw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vphaddsw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_phaddsw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vphaddsw %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; HASWELL-NEXT: vphaddsw (%rdi), %ymm0, %ymm0 # sched: [3:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_phaddsw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vphaddsw %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BROADWELL-NEXT: vphaddsw (%rdi), %ymm0, %ymm0 # sched: [9:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_phaddsw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vphaddsw %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; SKYLAKE-NEXT: vphaddsw (%rdi), %ymm0, %ymm0 # sched: [10:2.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_phaddsw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vphaddsw %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; SKX-NEXT: vphaddsw (%rdi), %ymm0, %ymm0 # sched: [10:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_phaddsw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vphaddsw %ymm1, %ymm0, %ymm0 # sched: [100:?]
; ZNVER1-NEXT: vphaddsw (%rdi), %ymm0, %ymm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3121,37 +3121,37 @@ declare <16 x i16> @llvm.x86.avx2.phadd.sw(<16 x i16>, <16 x i16>) nounwind read
define <16 x i16> @test_phaddw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_phaddw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vphaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; GENERIC-NEXT: vphaddw (%rdi), %ymm0, %ymm0 # sched: [5:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_phaddw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vphaddw %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; HASWELL-NEXT: vphaddw (%rdi), %ymm0, %ymm0 # sched: [3:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_phaddw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vphaddw %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BROADWELL-NEXT: vphaddw (%rdi), %ymm0, %ymm0 # sched: [9:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_phaddw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vphaddw %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; SKYLAKE-NEXT: vphaddw (%rdi), %ymm0, %ymm0 # sched: [10:2.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_phaddw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vphaddw %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; SKX-NEXT: vphaddw (%rdi), %ymm0, %ymm0 # sched: [10:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_phaddw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vphaddw %ymm1, %ymm0, %ymm0 # sched: [100:?]
; ZNVER1-NEXT: vphaddw (%rdi), %ymm0, %ymm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3164,37 +3164,37 @@ declare <16 x i16> @llvm.x86.avx2.phadd.w(<16 x i16>, <16 x i16>) nounwind readn
define <8 x i32> @test_phsubd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_phsubd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vphsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; GENERIC-NEXT: vphsubd (%rdi), %ymm0, %ymm0 # sched: [5:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_phsubd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vphsubd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; HASWELL-NEXT: vphsubd (%rdi), %ymm0, %ymm0 # sched: [3:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_phsubd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vphsubd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BROADWELL-NEXT: vphsubd (%rdi), %ymm0, %ymm0 # sched: [9:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_phsubd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vphsubd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; SKYLAKE-NEXT: vphsubd (%rdi), %ymm0, %ymm0 # sched: [10:2.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_phsubd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vphsubd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; SKX-NEXT: vphsubd (%rdi), %ymm0, %ymm0 # sched: [10:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_phsubd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vphsubd %ymm1, %ymm0, %ymm0 # sched: [100:?]
; ZNVER1-NEXT: vphsubd (%rdi), %ymm0, %ymm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3207,37 +3207,37 @@ declare <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32>, <8 x i32>) nounwind readnone
define <16 x i16> @test_phsubsw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_phsubsw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vphsubsw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vphsubsw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_phsubsw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vphsubsw %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; HASWELL-NEXT: vphsubsw (%rdi), %ymm0, %ymm0 # sched: [3:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_phsubsw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vphsubsw %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BROADWELL-NEXT: vphsubsw (%rdi), %ymm0, %ymm0 # sched: [9:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_phsubsw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vphsubsw %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; SKYLAKE-NEXT: vphsubsw (%rdi), %ymm0, %ymm0 # sched: [10:2.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_phsubsw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vphsubsw %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; SKX-NEXT: vphsubsw (%rdi), %ymm0, %ymm0 # sched: [10:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_phsubsw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vphsubsw %ymm1, %ymm0, %ymm0 # sched: [100:?]
; ZNVER1-NEXT: vphsubsw (%rdi), %ymm0, %ymm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3250,37 +3250,37 @@ declare <16 x i16> @llvm.x86.avx2.phsub.sw(<16 x i16>, <16 x i16>) nounwind read
define <16 x i16> @test_phsubw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_phsubw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vphsubw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; GENERIC-NEXT: vphsubw (%rdi), %ymm0, %ymm0 # sched: [5:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_phsubw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vphsubw %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; HASWELL-NEXT: vphsubw (%rdi), %ymm0, %ymm0 # sched: [3:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_phsubw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vphsubw %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BROADWELL-NEXT: vphsubw (%rdi), %ymm0, %ymm0 # sched: [9:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_phsubw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vphsubw %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; SKYLAKE-NEXT: vphsubw (%rdi), %ymm0, %ymm0 # sched: [10:2.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_phsubw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vphsubw %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; SKX-NEXT: vphsubw (%rdi), %ymm0, %ymm0 # sched: [10:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_phsubw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vphsubw %ymm1, %ymm0, %ymm0 # sched: [100:?]
; ZNVER1-NEXT: vphsubw (%rdi), %ymm0, %ymm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3293,37 +3293,37 @@ declare <16 x i16> @llvm.x86.avx2.phsub.w(<16 x i16>, <16 x i16>) nounwind readn
define <16 x i16> @test_pmaddubsw(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
; GENERIC-LABEL: test_pmaddubsw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpmaddubsw (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmaddubsw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; HASWELL-NEXT: vpmaddubsw (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmaddubsw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; BROADWELL-NEXT: vpmaddubsw (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmaddubsw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKYLAKE-NEXT: vpmaddubsw (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmaddubsw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vpmaddubsw (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmaddubsw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0 # sched: [4:1.00]
; ZNVER1-NEXT: vpmaddubsw (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3337,37 +3337,37 @@ declare <16 x i16> @llvm.x86.avx2.pmadd.ub.sw(<32 x i8>, <32 x i8>) nounwind rea
define <8 x i32> @test_pmaddwd(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_pmaddwd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpmaddwd (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmaddwd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; HASWELL-NEXT: vpmaddwd (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmaddwd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; BROADWELL-NEXT: vpmaddwd (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmaddwd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKYLAKE-NEXT: vpmaddwd (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmaddwd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vpmaddwd (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmaddwd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0 # sched: [4:1.00]
; ZNVER1-NEXT: vpmaddwd (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3381,42 +3381,42 @@ declare <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16>, <16 x i16>) nounwind readn
define <4 x i32> @test_pmaskmovd(i8* %a0, <4 x i32> %a1, <4 x i32> %a2) {
; GENERIC-LABEL: test_pmaskmovd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmaskmovd (%rdi), %xmm0, %xmm2
; GENERIC-NEXT: vpmaskmovd %xmm1, %xmm0, (%rdi)
; GENERIC-NEXT: vmovdqa %xmm2, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmaskmovd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmaskmovd (%rdi), %xmm0, %xmm2 # sched: [2:2.00]
; HASWELL-NEXT: vpmaskmovd %xmm1, %xmm0, (%rdi) # sched: [4:1.00]
; HASWELL-NEXT: vmovdqa %xmm2, %xmm0 # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmaskmovd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmaskmovd (%rdi), %xmm0, %xmm2 # sched: [7:2.00]
; BROADWELL-NEXT: vpmaskmovd %xmm1, %xmm0, (%rdi) # sched: [5:1.00]
; BROADWELL-NEXT: vmovdqa %xmm2, %xmm0 # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmaskmovd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmaskmovd (%rdi), %xmm0, %xmm2 # sched: [7:0.50]
; SKYLAKE-NEXT: vpmaskmovd %xmm1, %xmm0, (%rdi) # sched: [2:1.00]
; SKYLAKE-NEXT: vmovdqa %xmm2, %xmm0 # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmaskmovd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmaskmovd (%rdi), %xmm0, %xmm2 # sched: [7:0.50]
; SKX-NEXT: vpmaskmovd %xmm1, %xmm0, (%rdi) # sched: [2:1.00]
; SKX-NEXT: vmovdqa %xmm2, %xmm0 # sched: [1:0.25]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmaskmovd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmaskmovd (%rdi), %xmm0, %xmm2 # sched: [100:?]
; ZNVER1-NEXT: vpmaskmovd %xmm1, %xmm0, (%rdi) # sched: [100:?]
; ZNVER1-NEXT: vmovdqa %xmm2, %xmm0 # sched: [1:0.25]
@@ -3430,42 +3430,42 @@ declare void @llvm.x86.avx2.maskstore.d(i8*, <4 x i32>, <4 x i32>) nounwind
define <8 x i32> @test_pmaskmovd_ymm(i8* %a0, <8 x i32> %a1, <8 x i32> %a2) {
; GENERIC-LABEL: test_pmaskmovd_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmaskmovd (%rdi), %ymm0, %ymm2
; GENERIC-NEXT: vpmaskmovd %ymm1, %ymm0, (%rdi)
; GENERIC-NEXT: vmovdqa %ymm2, %ymm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmaskmovd_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmaskmovd (%rdi), %ymm0, %ymm2 # sched: [2:2.00]
; HASWELL-NEXT: vpmaskmovd %ymm1, %ymm0, (%rdi) # sched: [4:1.00]
; HASWELL-NEXT: vmovdqa %ymm2, %ymm0 # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmaskmovd_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmaskmovd (%rdi), %ymm0, %ymm2 # sched: [8:2.00]
; BROADWELL-NEXT: vpmaskmovd %ymm1, %ymm0, (%rdi) # sched: [5:1.00]
; BROADWELL-NEXT: vmovdqa %ymm2, %ymm0 # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmaskmovd_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmaskmovd (%rdi), %ymm0, %ymm2 # sched: [8:0.50]
; SKYLAKE-NEXT: vpmaskmovd %ymm1, %ymm0, (%rdi) # sched: [2:1.00]
; SKYLAKE-NEXT: vmovdqa %ymm2, %ymm0 # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmaskmovd_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmaskmovd (%rdi), %ymm0, %ymm2 # sched: [8:0.50]
; SKX-NEXT: vpmaskmovd %ymm1, %ymm0, (%rdi) # sched: [2:1.00]
; SKX-NEXT: vmovdqa %ymm2, %ymm0 # sched: [1:0.25]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmaskmovd_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmaskmovd (%rdi), %ymm0, %ymm2 # sched: [100:?]
; ZNVER1-NEXT: vpmaskmovd %ymm1, %ymm0, (%rdi) # sched: [100:?]
; ZNVER1-NEXT: vmovdqa %ymm2, %ymm0 # sched: [2:0.25]
@@ -3479,42 +3479,42 @@ declare void @llvm.x86.avx2.maskstore.d.256(i8*, <8 x i32>, <8 x i32>) nounwind
define <2 x i64> @test_pmaskmovq(i8* %a0, <2 x i64> %a1, <2 x i64> %a2) {
; GENERIC-LABEL: test_pmaskmovq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmaskmovq (%rdi), %xmm0, %xmm2
; GENERIC-NEXT: vpmaskmovq %xmm1, %xmm0, (%rdi)
; GENERIC-NEXT: vmovdqa %xmm2, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmaskmovq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmaskmovq (%rdi), %xmm0, %xmm2 # sched: [2:2.00]
; HASWELL-NEXT: vpmaskmovq %xmm1, %xmm0, (%rdi) # sched: [4:1.00]
; HASWELL-NEXT: vmovdqa %xmm2, %xmm0 # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmaskmovq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmaskmovq (%rdi), %xmm0, %xmm2 # sched: [7:2.00]
; BROADWELL-NEXT: vpmaskmovq %xmm1, %xmm0, (%rdi) # sched: [5:1.00]
; BROADWELL-NEXT: vmovdqa %xmm2, %xmm0 # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmaskmovq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmaskmovq (%rdi), %xmm0, %xmm2 # sched: [7:0.50]
; SKYLAKE-NEXT: vpmaskmovq %xmm1, %xmm0, (%rdi) # sched: [2:1.00]
; SKYLAKE-NEXT: vmovdqa %xmm2, %xmm0 # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmaskmovq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmaskmovq (%rdi), %xmm0, %xmm2 # sched: [7:0.50]
; SKX-NEXT: vpmaskmovq %xmm1, %xmm0, (%rdi) # sched: [2:1.00]
; SKX-NEXT: vmovdqa %xmm2, %xmm0 # sched: [1:0.25]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmaskmovq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmaskmovq (%rdi), %xmm0, %xmm2 # sched: [8:1.00]
; ZNVER1-NEXT: vpmaskmovq %xmm1, %xmm0, (%rdi) # sched: [100:?]
; ZNVER1-NEXT: vmovdqa %xmm2, %xmm0 # sched: [1:0.25]
@@ -3528,42 +3528,42 @@ declare void @llvm.x86.avx2.maskstore.q(i8*, <2 x i64>, <2 x i64>) nounwind
define <4 x i64> @test_pmaskmovq_ymm(i8* %a0, <4 x i64> %a1, <4 x i64> %a2) {
; GENERIC-LABEL: test_pmaskmovq_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmaskmovq (%rdi), %ymm0, %ymm2
; GENERIC-NEXT: vpmaskmovq %ymm1, %ymm0, (%rdi)
; GENERIC-NEXT: vmovdqa %ymm2, %ymm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmaskmovq_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmaskmovq (%rdi), %ymm0, %ymm2 # sched: [2:2.00]
; HASWELL-NEXT: vpmaskmovq %ymm1, %ymm0, (%rdi) # sched: [4:1.00]
; HASWELL-NEXT: vmovdqa %ymm2, %ymm0 # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmaskmovq_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmaskmovq (%rdi), %ymm0, %ymm2 # sched: [8:2.00]
; BROADWELL-NEXT: vpmaskmovq %ymm1, %ymm0, (%rdi) # sched: [5:1.00]
; BROADWELL-NEXT: vmovdqa %ymm2, %ymm0 # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmaskmovq_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmaskmovq (%rdi), %ymm0, %ymm2 # sched: [8:0.50]
; SKYLAKE-NEXT: vpmaskmovq %ymm1, %ymm0, (%rdi) # sched: [2:1.00]
; SKYLAKE-NEXT: vmovdqa %ymm2, %ymm0 # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmaskmovq_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmaskmovq (%rdi), %ymm0, %ymm2 # sched: [8:0.50]
; SKX-NEXT: vpmaskmovq %ymm1, %ymm0, (%rdi) # sched: [2:1.00]
; SKX-NEXT: vmovdqa %ymm2, %ymm0 # sched: [1:0.25]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmaskmovq_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmaskmovq (%rdi), %ymm0, %ymm2 # sched: [9:1.50]
; ZNVER1-NEXT: vpmaskmovq %ymm1, %ymm0, (%rdi) # sched: [100:?]
; ZNVER1-NEXT: vmovdqa %ymm2, %ymm0 # sched: [2:0.25]
@@ -3577,37 +3577,37 @@ declare void @llvm.x86.avx2.maskstore.q.256(i8*, <4 x i64>, <4 x i64>) nounwind
define <32 x i8> @test_pmaxsb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
; GENERIC-LABEL: test_pmaxsb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpmaxsb (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmaxsb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpmaxsb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmaxsb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpmaxsb (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmaxsb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpmaxsb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmaxsb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpmaxsb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmaxsb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpmaxsb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3620,37 +3620,37 @@ declare <32 x i8> @llvm.x86.avx2.pmaxs.b(<32 x i8>, <32 x i8>) nounwind readnone
define <8 x i32> @test_pmaxsd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_pmaxsd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpmaxsd (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmaxsd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpmaxsd (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmaxsd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpmaxsd (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmaxsd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpmaxsd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmaxsd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpmaxsd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmaxsd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpmaxsd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3663,37 +3663,37 @@ declare <8 x i32> @llvm.x86.avx2.pmaxs.d(<8 x i32>, <8 x i32>) nounwind readnone
define <16 x i16> @test_pmaxsw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_pmaxsw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpmaxsw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmaxsw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpmaxsw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmaxsw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpmaxsw (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmaxsw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpmaxsw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmaxsw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpmaxsw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmaxsw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpmaxsw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3706,37 +3706,37 @@ declare <16 x i16> @llvm.x86.avx2.pmaxs.w(<16 x i16>, <16 x i16>) nounwind readn
define <32 x i8> @test_pmaxub(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
; GENERIC-LABEL: test_pmaxub:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmaxub %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpmaxub (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmaxub:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmaxub %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpmaxub (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmaxub:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmaxub %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpmaxub (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmaxub:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmaxub %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpmaxub (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmaxub:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmaxub %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpmaxub (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmaxub:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmaxub %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpmaxub (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3749,37 +3749,37 @@ declare <32 x i8> @llvm.x86.avx2.pmaxu.b(<32 x i8>, <32 x i8>) nounwind readnone
define <8 x i32> @test_pmaxud(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_pmaxud:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmaxud %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpmaxud (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmaxud:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmaxud %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpmaxud (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmaxud:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmaxud %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpmaxud (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmaxud:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmaxud %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpmaxud (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmaxud:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmaxud %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpmaxud (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmaxud:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmaxud %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpmaxud (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3792,37 +3792,37 @@ declare <8 x i32> @llvm.x86.avx2.pmaxu.d(<8 x i32>, <8 x i32>) nounwind readnone
define <16 x i16> @test_pmaxuw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_pmaxuw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpmaxuw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmaxuw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpmaxuw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmaxuw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpmaxuw (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmaxuw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpmaxuw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmaxuw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpmaxuw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmaxuw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpmaxuw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3835,37 +3835,37 @@ declare <16 x i16> @llvm.x86.avx2.pmaxu.w(<16 x i16>, <16 x i16>) nounwind readn
define <32 x i8> @test_pminsb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
; GENERIC-LABEL: test_pminsb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpminsb %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpminsb (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pminsb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpminsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpminsb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pminsb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpminsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpminsb (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pminsb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpminsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpminsb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pminsb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpminsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpminsb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pminsb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpminsb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpminsb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3878,37 +3878,37 @@ declare <32 x i8> @llvm.x86.avx2.pmins.b(<32 x i8>, <32 x i8>) nounwind readnone
define <8 x i32> @test_pminsd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_pminsd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpminsd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpminsd (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pminsd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpminsd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpminsd (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pminsd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpminsd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpminsd (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pminsd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpminsd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpminsd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pminsd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpminsd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpminsd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pminsd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpminsd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpminsd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3921,37 +3921,37 @@ declare <8 x i32> @llvm.x86.avx2.pmins.d(<8 x i32>, <8 x i32>) nounwind readnone
define <16 x i16> @test_pminsw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_pminsw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpminsw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpminsw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pminsw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpminsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpminsw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pminsw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpminsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpminsw (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pminsw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpminsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpminsw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pminsw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpminsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpminsw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pminsw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpminsw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpminsw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3964,37 +3964,37 @@ declare <16 x i16> @llvm.x86.avx2.pmins.w(<16 x i16>, <16 x i16>) nounwind readn
define <32 x i8> @test_pminub(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
; GENERIC-LABEL: test_pminub:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpminub %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpminub (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pminub:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpminub %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpminub (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pminub:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpminub %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpminub (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pminub:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpminub %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpminub (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pminub:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpminub %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpminub (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pminub:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpminub %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpminub (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -4007,37 +4007,37 @@ declare <32 x i8> @llvm.x86.avx2.pminu.b(<32 x i8>, <32 x i8>) nounwind readnone
define <8 x i32> @test_pminud(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_pminud:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpminud %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpminud (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pminud:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpminud %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpminud (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pminud:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpminud %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpminud (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pminud:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpminud %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpminud (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pminud:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpminud %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpminud (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pminud:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpminud %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpminud (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -4050,37 +4050,37 @@ declare <8 x i32> @llvm.x86.avx2.pminu.d(<8 x i32>, <8 x i32>) nounwind readnone
define <16 x i16> @test_pminuw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_pminuw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpminuw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpminuw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pminuw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpminuw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpminuw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pminuw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpminuw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpminuw (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pminuw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpminuw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpminuw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pminuw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpminuw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpminuw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pminuw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpminuw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpminuw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -4093,37 +4093,37 @@ declare <16 x i16> @llvm.x86.avx2.pminu.w(<16 x i16>, <16 x i16>) nounwind readn
define i32 @test_pmovmskb(<32 x i8> %a0) {
; GENERIC-LABEL: test_pmovmskb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovmskb %ymm0, %eax # sched: [1:1.00]
; GENERIC-NEXT: vzeroupper
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovmskb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmovmskb %ymm0, %eax # sched: [3:1.00]
; HASWELL-NEXT: vzeroupper # sched: [4:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmovmskb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmovmskb %ymm0, %eax # sched: [3:1.00]
; BROADWELL-NEXT: vzeroupper # sched: [4:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmovmskb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmovmskb %ymm0, %eax # sched: [2:1.00]
; SKYLAKE-NEXT: vzeroupper # sched: [4:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmovmskb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovmskb %ymm0, %eax # sched: [2:1.00]
; SKX-NEXT: vzeroupper # sched: [4:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmovmskb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmovmskb %ymm0, %eax # sched: [2:1.00]
; ZNVER1-NEXT: vzeroupper # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -4134,42 +4134,42 @@ declare i32 @llvm.x86.avx2.pmovmskb(<32 x i8>) nounwind readnone
define <8 x i32> @test_pmovsxbd(<16 x i8> %a0, <16 x i8> *%a1) {
; GENERIC-LABEL: test_pmovsxbd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovsxbd %xmm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovsxbd (%rdi), %ymm1 # sched: [5:1.00]
; GENERIC-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovsxbd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmovsxbd %xmm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vpmovsxbd (%rdi), %ymm1 # sched: [3:1.00]
; HASWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmovsxbd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmovsxbd %xmm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: vpmovsxbd (%rdi), %ymm1 # sched: [8:1.00]
; BROADWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmovsxbd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmovsxbd %xmm0, %ymm0 # sched: [3:1.00]
; SKYLAKE-NEXT: vpmovsxbd (%rdi), %ymm1 # sched: [8:1.00]
; SKYLAKE-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmovsxbd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxbd %xmm0, %ymm0 # sched: [3:1.00]
; SKX-NEXT: vpmovsxbd (%rdi), %ymm1 # sched: [8:1.00]
; SKX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmovsxbd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmovsxbd (%rdi), %ymm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpmovsxbd %xmm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -4185,42 +4185,42 @@ define <8 x i32> @test_pmovsxbd(<16 x i8> %a0, <16 x i8> *%a1) {
define <4 x i64> @test_pmovsxbq(<16 x i8> %a0, <16 x i8> *%a1) {
; GENERIC-LABEL: test_pmovsxbq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovsxbq %xmm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovsxbq (%rdi), %ymm1 # sched: [5:1.00]
; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovsxbq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmovsxbq %xmm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vpmovsxbq (%rdi), %ymm1 # sched: [3:1.00]
; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmovsxbq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmovsxbq %xmm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: vpmovsxbq (%rdi), %ymm1 # sched: [8:1.00]
; BROADWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmovsxbq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmovsxbq %xmm0, %ymm0 # sched: [3:1.00]
; SKYLAKE-NEXT: vpmovsxbq (%rdi), %ymm1 # sched: [8:1.00]
; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmovsxbq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxbq %xmm0, %ymm0 # sched: [3:1.00]
; SKX-NEXT: vpmovsxbq (%rdi), %ymm1 # sched: [8:1.00]
; SKX-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmovsxbq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmovsxbq (%rdi), %ymm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpmovsxbq %xmm0, %ymm0 # sched: [1:0.50]
; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -4236,42 +4236,42 @@ define <4 x i64> @test_pmovsxbq(<16 x i8> %a0, <16 x i8> *%a1) {
define <16 x i16> @test_pmovsxbw(<16 x i8> %a0, <16 x i8> *%a1) {
; GENERIC-LABEL: test_pmovsxbw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovsxbw %xmm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovsxbw (%rdi), %ymm1 # sched: [5:1.00]
; GENERIC-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovsxbw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmovsxbw %xmm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vpmovsxbw (%rdi), %ymm1 # sched: [3:1.00]
; HASWELL-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmovsxbw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmovsxbw %xmm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: vpmovsxbw (%rdi), %ymm1 # sched: [8:1.00]
; BROADWELL-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmovsxbw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmovsxbw %xmm0, %ymm0 # sched: [3:1.00]
; SKYLAKE-NEXT: vpmovsxbw (%rdi), %ymm1 # sched: [9:1.00]
; SKYLAKE-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmovsxbw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxbw %xmm0, %ymm0 # sched: [3:1.00]
; SKX-NEXT: vpmovsxbw (%rdi), %ymm1 # sched: [9:1.00]
; SKX-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmovsxbw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmovsxbw (%rdi), %ymm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpmovsxbw %xmm0, %ymm0 # sched: [1:0.50]
; ZNVER1-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -4285,42 +4285,42 @@ define <16 x i16> @test_pmovsxbw(<16 x i8> %a0, <16 x i8> *%a1) {
define <4 x i64> @test_pmovsxdq(<4 x i32> %a0, <4 x i32> *%a1) {
; GENERIC-LABEL: test_pmovsxdq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovsxdq %xmm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovsxdq (%rdi), %ymm1 # sched: [5:1.00]
; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovsxdq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmovsxdq %xmm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vpmovsxdq (%rdi), %ymm1 # sched: [3:1.00]
; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmovsxdq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmovsxdq %xmm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: vpmovsxdq (%rdi), %ymm1 # sched: [8:1.00]
; BROADWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmovsxdq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmovsxdq %xmm0, %ymm0 # sched: [3:1.00]
; SKYLAKE-NEXT: vpmovsxdq (%rdi), %ymm1 # sched: [9:1.00]
; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmovsxdq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxdq %xmm0, %ymm0 # sched: [3:1.00]
; SKX-NEXT: vpmovsxdq (%rdi), %ymm1 # sched: [9:1.00]
; SKX-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmovsxdq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmovsxdq (%rdi), %ymm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpmovsxdq %xmm0, %ymm0 # sched: [1:0.50]
; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -4334,42 +4334,42 @@ define <4 x i64> @test_pmovsxdq(<4 x i32> %a0, <4 x i32> *%a1) {
define <8 x i32> @test_pmovsxwd(<8 x i16> %a0, <8 x i16> *%a1) {
; GENERIC-LABEL: test_pmovsxwd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovsxwd %xmm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovsxwd (%rdi), %ymm1 # sched: [5:1.00]
; GENERIC-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovsxwd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmovsxwd %xmm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vpmovsxwd (%rdi), %ymm1 # sched: [3:1.00]
; HASWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmovsxwd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmovsxwd %xmm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: vpmovsxwd (%rdi), %ymm1 # sched: [8:1.00]
; BROADWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmovsxwd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmovsxwd %xmm0, %ymm0 # sched: [3:1.00]
; SKYLAKE-NEXT: vpmovsxwd (%rdi), %ymm1 # sched: [9:1.00]
; SKYLAKE-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmovsxwd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxwd %xmm0, %ymm0 # sched: [3:1.00]
; SKX-NEXT: vpmovsxwd (%rdi), %ymm1 # sched: [9:1.00]
; SKX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmovsxwd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmovsxwd (%rdi), %ymm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpmovsxwd %xmm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -4383,42 +4383,42 @@ define <8 x i32> @test_pmovsxwd(<8 x i16> %a0, <8 x i16> *%a1) {
define <4 x i64> @test_pmovsxwq(<8 x i16> %a0, <8 x i16> *%a1) {
; GENERIC-LABEL: test_pmovsxwq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovsxwq %xmm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovsxwq (%rdi), %ymm1 # sched: [5:1.00]
; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovsxwq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmovsxwq %xmm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vpmovsxwq (%rdi), %ymm1 # sched: [3:1.00]
; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmovsxwq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmovsxwq %xmm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: vpmovsxwq (%rdi), %ymm1 # sched: [8:1.00]
; BROADWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmovsxwq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmovsxwq %xmm0, %ymm0 # sched: [3:1.00]
; SKYLAKE-NEXT: vpmovsxwq (%rdi), %ymm1 # sched: [8:1.00]
; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmovsxwq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxwq %xmm0, %ymm0 # sched: [3:1.00]
; SKX-NEXT: vpmovsxwq (%rdi), %ymm1 # sched: [8:1.00]
; SKX-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmovsxwq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmovsxwq (%rdi), %ymm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpmovsxwq %xmm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -4434,42 +4434,42 @@ define <4 x i64> @test_pmovsxwq(<8 x i16> %a0, <8 x i16> *%a1) {
define <8 x i32> @test_pmovzxbd(<16 x i8> %a0, <16 x i8> *%a1) {
; GENERIC-LABEL: test_pmovzxbd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero sched: [1:1.00]
; GENERIC-NEXT: vpmovzxbd {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero sched: [5:1.00]
; GENERIC-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovzxbd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero sched: [3:1.00]
; HASWELL-NEXT: vpmovzxbd {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero sched: [3:1.00]
; HASWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmovzxbd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero sched: [3:1.00]
; BROADWELL-NEXT: vpmovzxbd {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero sched: [9:1.00]
; BROADWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmovzxbd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero sched: [3:1.00]
; SKYLAKE-NEXT: vpmovzxbd {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero sched: [10:1.00]
; SKYLAKE-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmovzxbd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero sched: [3:1.00]
; SKX-NEXT: vpmovzxbd {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero sched: [10:1.00]
; SKX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmovzxbd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmovzxbd {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero sched: [8:0.50]
; ZNVER1-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero sched: [1:0.25]
; ZNVER1-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -4485,42 +4485,42 @@ define <8 x i32> @test_pmovzxbd(<16 x i8> %a0, <16 x i8> *%a1) {
define <4 x i64> @test_pmovzxbq(<16 x i8> %a0, <16 x i8> *%a1) {
; GENERIC-LABEL: test_pmovzxbq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero sched: [1:1.00]
; GENERIC-NEXT: vpmovzxbq {{.*#+}} ymm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero sched: [5:1.00]
; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovzxbq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero sched: [3:1.00]
; HASWELL-NEXT: vpmovzxbq {{.*#+}} ymm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero sched: [3:1.00]
; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmovzxbq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero sched: [3:1.00]
; BROADWELL-NEXT: vpmovzxbq {{.*#+}} ymm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero sched: [9:1.00]
; BROADWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmovzxbq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero sched: [3:1.00]
; SKYLAKE-NEXT: vpmovzxbq {{.*#+}} ymm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero sched: [10:1.00]
; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmovzxbq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero sched: [3:1.00]
; SKX-NEXT: vpmovzxbq {{.*#+}} ymm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero sched: [10:1.00]
; SKX-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmovzxbq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmovzxbq {{.*#+}} ymm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero sched: [8:0.50]
; ZNVER1-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero sched: [1:0.50]
; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -4536,42 +4536,42 @@ define <4 x i64> @test_pmovzxbq(<16 x i8> %a0, <16 x i8> *%a1) {
define <16 x i16> @test_pmovzxbw(<16 x i8> %a0, <16 x i8> *%a1) {
; GENERIC-LABEL: test_pmovzxbw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero sched: [1:1.00]
; GENERIC-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero sched: [5:1.00]
; GENERIC-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovzxbw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero sched: [3:1.00]
; HASWELL-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero sched: [3:1.00]
; HASWELL-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmovzxbw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero sched: [3:1.00]
; BROADWELL-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero sched: [9:1.00]
; BROADWELL-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmovzxbw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero sched: [3:1.00]
; SKYLAKE-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero sched: [10:1.00]
; SKYLAKE-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmovzxbw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero sched: [3:1.00]
; SKX-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero sched: [10:1.00]
; SKX-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmovzxbw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero sched: [8:0.50]
; ZNVER1-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero sched: [1:0.50]
; ZNVER1-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -4585,42 +4585,42 @@ define <16 x i16> @test_pmovzxbw(<16 x i8> %a0, <16 x i8> *%a1) {
define <4 x i64> @test_pmovzxdq(<4 x i32> %a0, <4 x i32> *%a1) {
; GENERIC-LABEL: test_pmovzxdq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero sched: [1:1.00]
; GENERIC-NEXT: vpmovzxdq {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero sched: [5:1.00]
; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovzxdq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero sched: [3:1.00]
; HASWELL-NEXT: vpmovzxdq {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero sched: [3:1.00]
; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmovzxdq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero sched: [3:1.00]
; BROADWELL-NEXT: vpmovzxdq {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero sched: [9:1.00]
; BROADWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmovzxdq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero sched: [3:1.00]
; SKYLAKE-NEXT: vpmovzxdq {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero sched: [10:1.00]
; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmovzxdq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero sched: [3:1.00]
; SKX-NEXT: vpmovzxdq {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero sched: [10:1.00]
; SKX-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmovzxdq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmovzxdq {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero sched: [8:0.50]
; ZNVER1-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero sched: [1:0.50]
; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -4634,42 +4634,42 @@ define <4 x i64> @test_pmovzxdq(<4 x i32> %a0, <4 x i32> *%a1) {
define <8 x i32> @test_pmovzxwd(<8 x i16> %a0, <8 x i16> *%a1) {
; GENERIC-LABEL: test_pmovzxwd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [1:1.00]
; GENERIC-NEXT: vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero sched: [5:1.00]
; GENERIC-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovzxwd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [3:1.00]
; HASWELL-NEXT: vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero sched: [3:1.00]
; HASWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmovzxwd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [3:1.00]
; BROADWELL-NEXT: vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero sched: [8:1.00]
; BROADWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmovzxwd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [3:1.00]
; SKYLAKE-NEXT: vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero sched: [9:1.00]
; SKYLAKE-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmovzxwd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [3:1.00]
; SKX-NEXT: vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero sched: [9:1.00]
; SKX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmovzxwd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero sched: [8:0.50]
; ZNVER1-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [1:0.25]
; ZNVER1-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -4683,42 +4683,42 @@ define <8 x i32> @test_pmovzxwd(<8 x i16> %a0, <8 x i16> *%a1) {
define <4 x i64> @test_pmovzxwq(<8 x i16> %a0, <8 x i16> *%a1) {
; GENERIC-LABEL: test_pmovzxwq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero sched: [1:1.00]
; GENERIC-NEXT: vpmovzxwq {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero sched: [5:1.00]
; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovzxwq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero sched: [3:1.00]
; HASWELL-NEXT: vpmovzxwq {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero sched: [3:1.00]
; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmovzxwq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero sched: [3:1.00]
; BROADWELL-NEXT: vpmovzxwq {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero sched: [9:1.00]
; BROADWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmovzxwq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero sched: [3:1.00]
; SKYLAKE-NEXT: vpmovzxwq {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero sched: [10:1.00]
; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmovzxwq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero sched: [3:1.00]
; SKX-NEXT: vpmovzxwq {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero sched: [10:1.00]
; SKX-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmovzxwq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmovzxwq {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero sched: [8:0.50]
; ZNVER1-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero sched: [1:0.25]
; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -4734,37 +4734,37 @@ define <4 x i64> @test_pmovzxwq(<8 x i16> %a0, <8 x i16> *%a1) {
define <4 x i64> @test_pmuldq(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_pmuldq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmuldq %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpmuldq (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmuldq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmuldq %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; HASWELL-NEXT: vpmuldq (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmuldq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmuldq %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; BROADWELL-NEXT: vpmuldq (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmuldq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmuldq %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKYLAKE-NEXT: vpmuldq (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmuldq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmuldq %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vpmuldq (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmuldq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmuldq %ymm1, %ymm0, %ymm0 # sched: [4:1.00]
; ZNVER1-NEXT: vpmuldq (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -4778,37 +4778,37 @@ declare <4 x i64> @llvm.x86.avx2.pmul.dq(<8 x i32>, <8 x i32>) nounwind readnone
define <16 x i16> @test_pmulhrsw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_pmulhrsw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmulhrsw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpmulhrsw (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmulhrsw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmulhrsw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; HASWELL-NEXT: vpmulhrsw (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmulhrsw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmulhrsw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; BROADWELL-NEXT: vpmulhrsw (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmulhrsw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmulhrsw %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKYLAKE-NEXT: vpmulhrsw (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmulhrsw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmulhrsw %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vpmulhrsw (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmulhrsw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmulhrsw %ymm1, %ymm0, %ymm0 # sched: [4:1.00]
; ZNVER1-NEXT: vpmulhrsw (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -4821,37 +4821,37 @@ declare <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16>, <16 x i16>) nounwind re
define <16 x i16> @test_pmulhuw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_pmulhuw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpmulhuw (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmulhuw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; HASWELL-NEXT: vpmulhuw (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmulhuw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; BROADWELL-NEXT: vpmulhuw (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmulhuw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKYLAKE-NEXT: vpmulhuw (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmulhuw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vpmulhuw (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmulhuw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 # sched: [4:1.00]
; ZNVER1-NEXT: vpmulhuw (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -4864,37 +4864,37 @@ declare <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16>, <16 x i16>) nounwind read
define <16 x i16> @test_pmulhw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_pmulhw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmulhw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpmulhw (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmulhw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmulhw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; HASWELL-NEXT: vpmulhw (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmulhw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmulhw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; BROADWELL-NEXT: vpmulhw (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmulhw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmulhw %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKYLAKE-NEXT: vpmulhw (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmulhw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmulhw %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vpmulhw (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmulhw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmulhw %ymm1, %ymm0, %ymm0 # sched: [4:1.00]
; ZNVER1-NEXT: vpmulhw (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -4907,37 +4907,37 @@ declare <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16>, <16 x i16>) nounwind readn
define <8 x i32> @test_pmulld(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_pmulld:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmulld %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpmulld (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmulld:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmulld %ymm1, %ymm0, %ymm0 # sched: [10:2.00]
; HASWELL-NEXT: vpmulld (%rdi), %ymm0, %ymm0 # sched: [10:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmulld:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmulld %ymm1, %ymm0, %ymm0 # sched: [10:2.00]
; BROADWELL-NEXT: vpmulld (%rdi), %ymm0, %ymm0 # sched: [16:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmulld:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmulld %ymm1, %ymm0, %ymm0 # sched: [8:0.67]
; SKYLAKE-NEXT: vpmulld (%rdi), %ymm0, %ymm0 # sched: [15:0.67]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmulld:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmulld %ymm1, %ymm0, %ymm0 # sched: [8:0.67]
; SKX-NEXT: vpmulld (%rdi), %ymm0, %ymm0 # sched: [15:0.67]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmulld:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmulld %ymm1, %ymm0, %ymm0 # sched: [5:2.00]
; ZNVER1-NEXT: vpmulld (%rdi), %ymm0, %ymm0 # sched: [12:2.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -4949,37 +4949,37 @@ define <8 x i32> @test_pmulld(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
define <16 x i16> @test_pmullw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_pmullw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmullw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpmullw (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmullw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmullw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; HASWELL-NEXT: vpmullw (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmullw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmullw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; BROADWELL-NEXT: vpmullw (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmullw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmullw %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKYLAKE-NEXT: vpmullw (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmullw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmullw %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vpmullw (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmullw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmullw %ymm1, %ymm0, %ymm0 # sched: [4:1.00]
; ZNVER1-NEXT: vpmullw (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -4991,37 +4991,37 @@ define <16 x i16> @test_pmullw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2)
define <4 x i64> @test_pmuludq(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_pmuludq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmuludq %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpmuludq (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmuludq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmuludq %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; HASWELL-NEXT: vpmuludq (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmuludq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmuludq %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; BROADWELL-NEXT: vpmuludq (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmuludq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmuludq %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKYLAKE-NEXT: vpmuludq (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmuludq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmuludq %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vpmuludq (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmuludq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmuludq %ymm1, %ymm0, %ymm0 # sched: [4:1.00]
; ZNVER1-NEXT: vpmuludq (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -5035,42 +5035,42 @@ declare <4 x i64> @llvm.x86.avx2.pmulu.dq(<8 x i32>, <8 x i32>) nounwind readnon
define <4 x i64> @test_por(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
; GENERIC-LABEL: test_por:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpor (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_por:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; HASWELL-NEXT: vpor (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_por:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; BROADWELL-NEXT: vpor (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_por:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vpor (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_por:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: vpor (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_por:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpor (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -5084,37 +5084,37 @@ define <4 x i64> @test_por(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
define <4 x i64> @test_psadbw(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
; GENERIC-LABEL: test_psadbw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsadbw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpsadbw (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psadbw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsadbw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; HASWELL-NEXT: vpsadbw (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psadbw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsadbw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; BROADWELL-NEXT: vpsadbw (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psadbw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsadbw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; SKYLAKE-NEXT: vpsadbw (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psadbw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsadbw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; SKX-NEXT: vpsadbw (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psadbw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsadbw %ymm1, %ymm0, %ymm0 # sched: [4:1.00]
; ZNVER1-NEXT: vpsadbw (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -5128,37 +5128,37 @@ declare <4 x i64> @llvm.x86.avx2.psad.bw(<32 x i8>, <32 x i8>) nounwind readnone
define <32 x i8> @test_pshufb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
; GENERIC-LABEL: test_pshufb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpshufb %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpshufb (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pshufb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpshufb %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vpshufb (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pshufb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpshufb %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: vpshufb (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pshufb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpshufb %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; SKYLAKE-NEXT: vpshufb (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pshufb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpshufb %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; SKX-NEXT: vpshufb (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pshufb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpshufb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpshufb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -5171,42 +5171,42 @@ declare <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8>, <32 x i8>) nounwind readnone
define <8 x i32> @test_pshufd(<8 x i32> %a0, <8 x i32> *%a1) {
; GENERIC-LABEL: test_pshufd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:1.00]
; GENERIC-NEXT: vpshufd {{.*#+}} ymm1 = mem[1,0,3,2,5,4,7,6] sched: [5:1.00]
; GENERIC-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pshufd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:1.00]
; HASWELL-NEXT: vpshufd {{.*#+}} ymm1 = mem[1,0,3,2,5,4,7,6] sched: [1:1.00]
; HASWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pshufd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:1.00]
; BROADWELL-NEXT: vpshufd {{.*#+}} ymm1 = mem[1,0,3,2,5,4,7,6] sched: [7:1.00]
; BROADWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pshufd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:1.00]
; SKYLAKE-NEXT: vpshufd {{.*#+}} ymm1 = mem[1,0,3,2,5,4,7,6] sched: [8:1.00]
; SKYLAKE-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pshufd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:1.00]
; SKX-NEXT: vpshufd {{.*#+}} ymm1 = mem[1,0,3,2,5,4,7,6] sched: [8:1.00]
; SKX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pshufd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpshufd {{.*#+}} ymm1 = mem[1,0,3,2,5,4,7,6] sched: [8:0.50]
; ZNVER1-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:0.25]
; ZNVER1-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -5220,42 +5220,42 @@ define <8 x i32> @test_pshufd(<8 x i32> %a0, <8 x i32> *%a1) {
define <16 x i16> @test_pshufhw(<16 x i16> %a0, <16 x i16> *%a1) {
; GENERIC-LABEL: test_pshufhw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,7,6,5,4,8,9,10,11,15,14,13,12] sched: [1:1.00]
; GENERIC-NEXT: vpshufhw {{.*#+}} ymm1 = mem[0,1,2,3,5,4,7,6,8,9,10,11,13,12,15,14] sched: [5:1.00]
; GENERIC-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pshufhw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,7,6,5,4,8,9,10,11,15,14,13,12] sched: [1:1.00]
; HASWELL-NEXT: vpshufhw {{.*#+}} ymm1 = mem[0,1,2,3,5,4,7,6,8,9,10,11,13,12,15,14] sched: [1:1.00]
; HASWELL-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pshufhw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,7,6,5,4,8,9,10,11,15,14,13,12] sched: [1:1.00]
; BROADWELL-NEXT: vpshufhw {{.*#+}} ymm1 = mem[0,1,2,3,5,4,7,6,8,9,10,11,13,12,15,14] sched: [7:1.00]
; BROADWELL-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pshufhw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,7,6,5,4,8,9,10,11,15,14,13,12] sched: [1:1.00]
; SKYLAKE-NEXT: vpshufhw {{.*#+}} ymm1 = mem[0,1,2,3,5,4,7,6,8,9,10,11,13,12,15,14] sched: [8:1.00]
; SKYLAKE-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pshufhw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,7,6,5,4,8,9,10,11,15,14,13,12] sched: [1:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} ymm1 = mem[0,1,2,3,5,4,7,6,8,9,10,11,13,12,15,14] sched: [8:1.00]
; SKX-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pshufhw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpshufhw {{.*#+}} ymm1 = mem[0,1,2,3,5,4,7,6,8,9,10,11,13,12,15,14] sched: [8:0.50]
; ZNVER1-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,7,6,5,4,8,9,10,11,15,14,13,12] sched: [1:0.25]
; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -5269,42 +5269,42 @@ define <16 x i16> @test_pshufhw(<16 x i16> %a0, <16 x i16> *%a1) {
define <16 x i16> @test_pshuflw(<16 x i16> %a0, <16 x i16> *%a1) {
; GENERIC-LABEL: test_pshuflw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[3,2,1,0,4,5,6,7,11,10,9,8,12,13,14,15] sched: [1:1.00]
; GENERIC-NEXT: vpshuflw {{.*#+}} ymm1 = mem[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15] sched: [5:1.00]
; GENERIC-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pshuflw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[3,2,1,0,4,5,6,7,11,10,9,8,12,13,14,15] sched: [1:1.00]
; HASWELL-NEXT: vpshuflw {{.*#+}} ymm1 = mem[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15] sched: [1:1.00]
; HASWELL-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pshuflw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[3,2,1,0,4,5,6,7,11,10,9,8,12,13,14,15] sched: [1:1.00]
; BROADWELL-NEXT: vpshuflw {{.*#+}} ymm1 = mem[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15] sched: [7:1.00]
; BROADWELL-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pshuflw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[3,2,1,0,4,5,6,7,11,10,9,8,12,13,14,15] sched: [1:1.00]
; SKYLAKE-NEXT: vpshuflw {{.*#+}} ymm1 = mem[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15] sched: [8:1.00]
; SKYLAKE-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pshuflw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[3,2,1,0,4,5,6,7,11,10,9,8,12,13,14,15] sched: [1:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} ymm1 = mem[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15] sched: [8:1.00]
; SKX-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pshuflw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpshuflw {{.*#+}} ymm1 = mem[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15] sched: [8:0.50]
; ZNVER1-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[3,2,1,0,4,5,6,7,11,10,9,8,12,13,14,15] sched: [1:0.25]
; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -5318,37 +5318,37 @@ define <16 x i16> @test_pshuflw(<16 x i16> %a0, <16 x i16> *%a1) {
define <32 x i8> @test_psignb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
; GENERIC-LABEL: test_psignb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsignb %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpsignb (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psignb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsignb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpsignb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psignb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsignb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpsignb (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psignb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsignb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpsignb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psignb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsignb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpsignb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psignb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsignb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsignb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -5361,37 +5361,37 @@ declare <32 x i8> @llvm.x86.avx2.psign.b(<32 x i8>, <32 x i8>) nounwind readnone
define <8 x i32> @test_psignd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_psignd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsignd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpsignd (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psignd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsignd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpsignd (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psignd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsignd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpsignd (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psignd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsignd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpsignd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psignd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsignd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpsignd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psignd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsignd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsignd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -5404,37 +5404,37 @@ declare <8 x i32> @llvm.x86.avx2.psign.d(<8 x i32>, <8 x i32>) nounwind readnone
define <16 x i16> @test_psignw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_psignw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsignw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpsignw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psignw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsignw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpsignw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psignw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsignw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpsignw (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psignw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsignw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpsignw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psignw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsignw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpsignw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psignw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsignw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsignw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -5447,42 +5447,42 @@ declare <16 x i16> @llvm.x86.avx2.psign.w(<16 x i16>, <16 x i16>) nounwind readn
define <8 x i32> @test_pslld(<8 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_pslld:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpslld %xmm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpslld (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpslld $2, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pslld:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpslld %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; HASWELL-NEXT: vpslld (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vpslld $2, %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pslld:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpslld %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; BROADWELL-NEXT: vpslld (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; BROADWELL-NEXT: vpslld $2, %ymm0, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pslld:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpslld %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; SKYLAKE-NEXT: vpslld (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: vpslld $2, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pslld:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; SKX-NEXT: vpslld (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: vpslld $2, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pslld:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpslld %xmm1, %ymm0, %ymm0 # sched: [2:1.00]
; ZNVER1-NEXT: vpslld (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; ZNVER1-NEXT: vpslld $2, %ymm0, %ymm0 # sched: [1:0.25]
@@ -5497,32 +5497,32 @@ declare <8 x i32> @llvm.x86.avx2.psll.d(<8 x i32>, <4 x i32>) nounwind readnone
define <32 x i8> @test_pslldq(<32 x i8> %a0) {
; GENERIC-LABEL: test_pslldq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpslldq {{.*#+}} ymm0 = zero,zero,zero,ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12],zero,zero,zero,ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pslldq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpslldq {{.*#+}} ymm0 = zero,zero,zero,ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12],zero,zero,zero,ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28] sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pslldq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpslldq {{.*#+}} ymm0 = zero,zero,zero,ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12],zero,zero,zero,ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28] sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pslldq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpslldq {{.*#+}} ymm0 = zero,zero,zero,ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12],zero,zero,zero,ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28] sched: [1:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pslldq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslldq {{.*#+}} ymm0 = zero,zero,zero,ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12],zero,zero,zero,ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28] sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pslldq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpslldq {{.*#+}} ymm0 = zero,zero,zero,ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12],zero,zero,zero,ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28] sched: [2:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <32 x i8> zeroinitializer, <32 x i8> %a0, <32 x i32> <i32 13, i32 14, i32 15, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 29, i32 30, i32 31, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60>
@@ -5531,42 +5531,42 @@ define <32 x i8> @test_pslldq(<32 x i8> %a0) {
define <4 x i64> @test_psllq(<4 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; GENERIC-LABEL: test_psllq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllq %xmm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpsllq (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpsllq $2, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psllq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsllq %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; HASWELL-NEXT: vpsllq (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vpsllq $2, %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psllq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsllq %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; BROADWELL-NEXT: vpsllq (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; BROADWELL-NEXT: vpsllq $2, %ymm0, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psllq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsllq %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; SKYLAKE-NEXT: vpsllq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: vpsllq $2, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psllq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllq %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; SKX-NEXT: vpsllq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: vpsllq $2, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psllq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsllq %xmm1, %ymm0, %ymm0 # sched: [2:1.00]
; ZNVER1-NEXT: vpsllq (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; ZNVER1-NEXT: vpsllq $2, %ymm0, %ymm0 # sched: [1:0.25]
@@ -5581,37 +5581,37 @@ declare <4 x i64> @llvm.x86.avx2.psll.q(<4 x i64>, <2 x i64>) nounwind readnone
define <4 x i32> @test_psllvd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_psllvd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllvd %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpsllvd (%rdi), %xmm0, %xmm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psllvd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsllvd %xmm1, %xmm0, %xmm0 # sched: [3:2.00]
; HASWELL-NEXT: vpsllvd (%rdi), %xmm0, %xmm0 # sched: [3:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psllvd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsllvd %xmm1, %xmm0, %xmm0 # sched: [3:2.00]
; BROADWELL-NEXT: vpsllvd (%rdi), %xmm0, %xmm0 # sched: [8:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psllvd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsllvd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpsllvd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psllvd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllvd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpsllvd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psllvd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsllvd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; ZNVER1-NEXT: vpsllvd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -5624,37 +5624,37 @@ declare <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32>, <4 x i32>) nounwind readnone
define <8 x i32> @test_psllvd_ymm(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_psllvd_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllvd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpsllvd (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psllvd_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsllvd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; HASWELL-NEXT: vpsllvd (%rdi), %ymm0, %ymm0 # sched: [3:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psllvd_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsllvd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BROADWELL-NEXT: vpsllvd (%rdi), %ymm0, %ymm0 # sched: [9:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psllvd_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsllvd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpsllvd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psllvd_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllvd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpsllvd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psllvd_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsllvd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; ZNVER1-NEXT: vpsllvd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -5667,37 +5667,37 @@ declare <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32>, <8 x i32>) nounwind read
define <2 x i64> @test_psllvq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; GENERIC-LABEL: test_psllvq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllvq %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpsllvq (%rdi), %xmm0, %xmm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psllvq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsllvq %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: vpsllvq (%rdi), %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psllvq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsllvq %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: vpsllvq (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psllvq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsllvq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpsllvq (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psllvq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllvq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpsllvq (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psllvq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsllvq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; ZNVER1-NEXT: vpsllvq (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -5710,37 +5710,37 @@ declare <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64>, <2 x i64>) nounwind readnone
define <4 x i64> @test_psllvq_ymm(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
; GENERIC-LABEL: test_psllvq_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllvq %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpsllvq (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psllvq_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsllvq %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vpsllvq (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psllvq_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsllvq %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: vpsllvq (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psllvq_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsllvq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpsllvq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psllvq_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllvq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpsllvq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psllvq_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsllvq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; ZNVER1-NEXT: vpsllvq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -5753,42 +5753,42 @@ declare <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64>, <4 x i64>) nounwind read
define <16 x i16> @test_psllw(<16 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; GENERIC-LABEL: test_psllw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw %xmm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpsllw (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpsllw $2, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psllw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsllw %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; HASWELL-NEXT: vpsllw (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vpsllw $2, %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psllw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsllw %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; BROADWELL-NEXT: vpsllw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; BROADWELL-NEXT: vpsllw $2, %ymm0, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psllw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsllw %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; SKYLAKE-NEXT: vpsllw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: vpsllw $2, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psllw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; SKX-NEXT: vpsllw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: vpsllw $2, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psllw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsllw %xmm1, %ymm0, %ymm0 # sched: [2:1.00]
; ZNVER1-NEXT: vpsllw (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; ZNVER1-NEXT: vpsllw $2, %ymm0, %ymm0 # sched: [1:0.25]
@@ -5803,42 +5803,42 @@ declare <16 x i16> @llvm.x86.avx2.psll.w(<16 x i16>, <8 x i16>) nounwind readnon
define <8 x i32> @test_psrad(<8 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_psrad:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsrad %xmm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpsrad (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpsrad $2, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psrad:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsrad %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; HASWELL-NEXT: vpsrad (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vpsrad $2, %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psrad:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsrad %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; BROADWELL-NEXT: vpsrad (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; BROADWELL-NEXT: vpsrad $2, %ymm0, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psrad:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsrad %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; SKYLAKE-NEXT: vpsrad (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: vpsrad $2, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psrad:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsrad %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; SKX-NEXT: vpsrad (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: vpsrad $2, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psrad:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsrad %xmm1, %ymm0, %ymm0 # sched: [2:1.00]
; ZNVER1-NEXT: vpsrad (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; ZNVER1-NEXT: vpsrad $2, %ymm0, %ymm0 # sched: [1:0.25]
@@ -5853,37 +5853,37 @@ declare <8 x i32> @llvm.x86.avx2.psra.d(<8 x i32>, <4 x i32>) nounwind readnone
define <4 x i32> @test_psravd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_psravd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsravd %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpsravd (%rdi), %xmm0, %xmm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psravd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsravd %xmm1, %xmm0, %xmm0 # sched: [3:2.00]
; HASWELL-NEXT: vpsravd (%rdi), %xmm0, %xmm0 # sched: [3:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psravd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsravd %xmm1, %xmm0, %xmm0 # sched: [3:2.00]
; BROADWELL-NEXT: vpsravd (%rdi), %xmm0, %xmm0 # sched: [8:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psravd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsravd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpsravd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psravd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsravd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpsravd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psravd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsravd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; ZNVER1-NEXT: vpsravd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -5896,37 +5896,37 @@ declare <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32>, <4 x i32>) nounwind readnone
define <8 x i32> @test_psravd_ymm(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_psravd_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsravd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpsravd (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psravd_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsravd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; HASWELL-NEXT: vpsravd (%rdi), %ymm0, %ymm0 # sched: [3:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psravd_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsravd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BROADWELL-NEXT: vpsravd (%rdi), %ymm0, %ymm0 # sched: [9:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psravd_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsravd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpsravd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psravd_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsravd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpsravd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psravd_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsravd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; ZNVER1-NEXT: vpsravd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -5939,42 +5939,42 @@ declare <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32>, <8 x i32>) nounwind read
define <16 x i16> @test_psraw(<16 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; GENERIC-LABEL: test_psraw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsraw %xmm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpsraw (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpsraw $2, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psraw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsraw %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; HASWELL-NEXT: vpsraw (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vpsraw $2, %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psraw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsraw %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; BROADWELL-NEXT: vpsraw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; BROADWELL-NEXT: vpsraw $2, %ymm0, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psraw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsraw %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; SKYLAKE-NEXT: vpsraw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: vpsraw $2, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psraw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsraw %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; SKX-NEXT: vpsraw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: vpsraw $2, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psraw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsraw %xmm1, %ymm0, %ymm0 # sched: [2:1.00]
; ZNVER1-NEXT: vpsraw (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; ZNVER1-NEXT: vpsraw $2, %ymm0, %ymm0 # sched: [1:0.25]
@@ -5989,42 +5989,42 @@ declare <16 x i16> @llvm.x86.avx2.psra.w(<16 x i16>, <8 x i16>) nounwind readnon
define <8 x i32> @test_psrld(<8 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_psrld:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsrld %xmm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpsrld (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpsrld $2, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psrld:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsrld %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; HASWELL-NEXT: vpsrld (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vpsrld $2, %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psrld:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsrld %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; BROADWELL-NEXT: vpsrld (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; BROADWELL-NEXT: vpsrld $2, %ymm0, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psrld:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsrld %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; SKYLAKE-NEXT: vpsrld (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: vpsrld $2, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psrld:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsrld %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; SKX-NEXT: vpsrld (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: vpsrld $2, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psrld:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsrld %xmm1, %ymm0, %ymm0 # sched: [2:1.00]
; ZNVER1-NEXT: vpsrld (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; ZNVER1-NEXT: vpsrld $2, %ymm0, %ymm0 # sched: [1:0.25]
@@ -6039,32 +6039,32 @@ declare <8 x i32> @llvm.x86.avx2.psrl.d(<8 x i32>, <4 x i32>) nounwind readnone
define <32 x i8> @test_psrldq(<32 x i8> %a0) {
; GENERIC-LABEL: test_psrldq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,ymm0[19,20,21,22,23,24,25,26,27,28,29,30,31],zero,zero,zero sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psrldq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,ymm0[19,20,21,22,23,24,25,26,27,28,29,30,31],zero,zero,zero sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psrldq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,ymm0[19,20,21,22,23,24,25,26,27,28,29,30,31],zero,zero,zero sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psrldq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,ymm0[19,20,21,22,23,24,25,26,27,28,29,30,31],zero,zero,zero sched: [1:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psrldq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,ymm0[19,20,21,22,23,24,25,26,27,28,29,30,31],zero,zero,zero sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psrldq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,ymm0[19,20,21,22,23,24,25,26,27,28,29,30,31],zero,zero,zero sched: [2:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <32 x i8> %a0, <32 x i8> zeroinitializer, <32 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 32, i32 33, i32 34, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 48, i32 49, i32 50>
@@ -6073,42 +6073,42 @@ define <32 x i8> @test_psrldq(<32 x i8> %a0) {
define <4 x i64> @test_psrlq(<4 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; GENERIC-LABEL: test_psrlq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsrlq %xmm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpsrlq (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpsrlq $2, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psrlq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsrlq %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; HASWELL-NEXT: vpsrlq (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vpsrlq $2, %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psrlq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsrlq %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; BROADWELL-NEXT: vpsrlq (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; BROADWELL-NEXT: vpsrlq $2, %ymm0, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psrlq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsrlq %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; SKYLAKE-NEXT: vpsrlq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: vpsrlq $2, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psrlq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsrlq %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; SKX-NEXT: vpsrlq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: vpsrlq $2, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psrlq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsrlq %xmm1, %ymm0, %ymm0 # sched: [2:1.00]
; ZNVER1-NEXT: vpsrlq (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; ZNVER1-NEXT: vpsrlq $2, %ymm0, %ymm0 # sched: [1:0.25]
@@ -6123,37 +6123,37 @@ declare <4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64>, <2 x i64>) nounwind readnone
define <4 x i32> @test_psrlvd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_psrlvd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpsrlvd (%rdi), %xmm0, %xmm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psrlvd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 # sched: [3:2.00]
; HASWELL-NEXT: vpsrlvd (%rdi), %xmm0, %xmm0 # sched: [3:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psrlvd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 # sched: [3:2.00]
; BROADWELL-NEXT: vpsrlvd (%rdi), %xmm0, %xmm0 # sched: [8:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psrlvd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpsrlvd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psrlvd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpsrlvd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psrlvd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; ZNVER1-NEXT: vpsrlvd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -6166,37 +6166,37 @@ declare <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32>, <4 x i32>) nounwind readnone
define <8 x i32> @test_psrlvd_ymm(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_psrlvd_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpsrlvd (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psrlvd_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; HASWELL-NEXT: vpsrlvd (%rdi), %ymm0, %ymm0 # sched: [3:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psrlvd_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BROADWELL-NEXT: vpsrlvd (%rdi), %ymm0, %ymm0 # sched: [9:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psrlvd_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpsrlvd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psrlvd_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpsrlvd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psrlvd_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; ZNVER1-NEXT: vpsrlvd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -6209,37 +6209,37 @@ declare <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32>, <8 x i32>) nounwind read
define <2 x i64> @test_psrlvq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; GENERIC-LABEL: test_psrlvq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpsrlvq (%rdi), %xmm0, %xmm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psrlvq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: vpsrlvq (%rdi), %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psrlvq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: vpsrlvq (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psrlvq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpsrlvq (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psrlvq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpsrlvq (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psrlvq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; ZNVER1-NEXT: vpsrlvq (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -6252,37 +6252,37 @@ declare <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64>, <2 x i64>) nounwind readnone
define <4 x i64> @test_psrlvq_ymm(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
; GENERIC-LABEL: test_psrlvq_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpsrlvq (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psrlvq_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vpsrlvq (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psrlvq_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: vpsrlvq (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psrlvq_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpsrlvq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psrlvq_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpsrlvq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psrlvq_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; ZNVER1-NEXT: vpsrlvq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -6295,42 +6295,42 @@ declare <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64>, <4 x i64>) nounwind read
define <16 x i16> @test_psrlw(<16 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; GENERIC-LABEL: test_psrlw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsrlw %xmm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpsrlw (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpsrlw $2, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psrlw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsrlw %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; HASWELL-NEXT: vpsrlw (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vpsrlw $2, %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psrlw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsrlw %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; BROADWELL-NEXT: vpsrlw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; BROADWELL-NEXT: vpsrlw $2, %ymm0, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psrlw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsrlw %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; SKYLAKE-NEXT: vpsrlw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: vpsrlw $2, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psrlw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsrlw %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; SKX-NEXT: vpsrlw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: vpsrlw $2, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psrlw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsrlw %xmm1, %ymm0, %ymm0 # sched: [2:1.00]
; ZNVER1-NEXT: vpsrlw (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; ZNVER1-NEXT: vpsrlw $2, %ymm0, %ymm0 # sched: [1:0.25]
@@ -6345,37 +6345,37 @@ declare <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16>, <8 x i16>) nounwind readnon
define <32 x i8> @test_psubb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
; GENERIC-LABEL: test_psubb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsubb %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpsubb (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psubb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsubb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpsubb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psubb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsubb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpsubb (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psubb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsubb %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vpsubb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psubb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsubb %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: vpsubb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psubb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsubb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsubb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -6387,37 +6387,37 @@ define <32 x i8> @test_psubb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
define <8 x i32> @test_psubd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_psubd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpsubd (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psubd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpsubd (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psubd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpsubd (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psubd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vpsubd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psubd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: vpsubd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psubd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsubd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -6429,37 +6429,37 @@ define <8 x i32> @test_psubd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
define <4 x i64> @test_psubq(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
; GENERIC-LABEL: test_psubq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsubq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpsubq (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psubq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsubq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpsubq (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psubq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsubq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpsubq (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psubq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsubq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vpsubq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psubq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsubq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: vpsubq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psubq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsubq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsubq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -6471,37 +6471,37 @@ define <4 x i64> @test_psubq(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
define <32 x i8> @test_psubsb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
; GENERIC-LABEL: test_psubsb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsubsb %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpsubsb (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psubsb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsubsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpsubsb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psubsb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsubsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpsubsb (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psubsb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsubsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpsubsb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psubsb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsubsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpsubsb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psubsb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsubsb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsubsb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -6514,37 +6514,37 @@ declare <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8>, <32 x i8>) nounwind readnone
define <16 x i16> @test_psubsw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_psubsw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsubsw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpsubsw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psubsw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsubsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpsubsw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psubsw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsubsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpsubsw (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psubsw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsubsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpsubsw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psubsw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsubsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpsubsw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psubsw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsubsw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsubsw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -6557,37 +6557,37 @@ declare <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16>, <16 x i16>) nounwind readn
define <32 x i8> @test_psubusb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
; GENERIC-LABEL: test_psubusb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsubusb %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpsubusb (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psubusb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsubusb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpsubusb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psubusb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsubusb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpsubusb (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psubusb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsubusb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpsubusb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psubusb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsubusb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpsubusb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psubusb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsubusb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsubusb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -6600,37 +6600,37 @@ declare <32 x i8> @llvm.x86.avx2.psubus.b(<32 x i8>, <32 x i8>) nounwind readnon
define <16 x i16> @test_psubusw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_psubusw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsubusw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpsubusw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psubusw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsubusw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpsubusw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psubusw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsubusw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpsubusw (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psubusw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsubusw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpsubusw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psubusw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsubusw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpsubusw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psubusw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsubusw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsubusw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -6643,37 +6643,37 @@ declare <16 x i16> @llvm.x86.avx2.psubus.w(<16 x i16>, <16 x i16>) nounwind read
define <16 x i16> @test_psubw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_psubw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsubw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpsubw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psubw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsubw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpsubw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psubw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsubw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpsubw (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psubw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsubw %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vpsubw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psubw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsubw %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: vpsubw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psubw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsubw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsubw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -6685,37 +6685,37 @@ define <16 x i16> @test_psubw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
define <32 x i8> @test_punpckhbw(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
; GENERIC-LABEL: test_punpckhbw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] sched: [1:1.00]
; GENERIC-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15],ymm0[24],mem[24],ymm0[25],mem[25],ymm0[26],mem[26],ymm0[27],mem[27],ymm0[28],mem[28],ymm0[29],mem[29],ymm0[30],mem[30],ymm0[31],mem[31] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_punpckhbw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] sched: [1:1.00]
; HASWELL-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15],ymm0[24],mem[24],ymm0[25],mem[25],ymm0[26],mem[26],ymm0[27],mem[27],ymm0[28],mem[28],ymm0[29],mem[29],ymm0[30],mem[30],ymm0[31],mem[31] sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_punpckhbw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] sched: [1:1.00]
; BROADWELL-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15],ymm0[24],mem[24],ymm0[25],mem[25],ymm0[26],mem[26],ymm0[27],mem[27],ymm0[28],mem[28],ymm0[29],mem[29],ymm0[30],mem[30],ymm0[31],mem[31] sched: [7:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_punpckhbw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] sched: [1:1.00]
; SKYLAKE-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15],ymm0[24],mem[24],ymm0[25],mem[25],ymm0[26],mem[26],ymm0[27],mem[27],ymm0[28],mem[28],ymm0[29],mem[29],ymm0[30],mem[30],ymm0[31],mem[31] sched: [8:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_punpckhbw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] sched: [1:1.00]
; SKX-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15],ymm0[24],mem[24],ymm0[25],mem[25],ymm0[26],mem[26],ymm0[27],mem[27],ymm0[28],mem[28],ymm0[29],mem[29],ymm0[30],mem[30],ymm0[31],mem[31] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_punpckhbw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] sched: [1:0.25]
; ZNVER1-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15],ymm0[24],mem[24],ymm0[25],mem[25],ymm0[26],mem[26],ymm0[27],mem[27],ymm0[28],mem[28],ymm0[29],mem[29],ymm0[30],mem[30],ymm0[31],mem[31] sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -6727,7 +6727,7 @@ define <32 x i8> @test_punpckhbw(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
define <8 x i32> @test_punpckhdq(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_punpckhdq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:1.00]
; GENERIC-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [5:1.00]
; GENERIC-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 # sched: [3:1.00]
@@ -6735,7 +6735,7 @@ define <8 x i32> @test_punpckhdq(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_punpckhdq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:1.00]
; HASWELL-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [1:1.00]
; HASWELL-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 # sched: [1:0.50]
@@ -6743,7 +6743,7 @@ define <8 x i32> @test_punpckhdq(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_punpckhdq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:1.00]
; BROADWELL-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [7:1.00]
; BROADWELL-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 # sched: [1:0.50]
@@ -6751,7 +6751,7 @@ define <8 x i32> @test_punpckhdq(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_punpckhdq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:1.00]
; SKYLAKE-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [8:1.00]
; SKYLAKE-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 # sched: [1:0.50]
@@ -6759,7 +6759,7 @@ define <8 x i32> @test_punpckhdq(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_punpckhdq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:1.00]
; SKX-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [8:1.00]
; SKX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 # sched: [1:0.50]
@@ -6767,7 +6767,7 @@ define <8 x i32> @test_punpckhdq(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_punpckhdq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:0.25]
; ZNVER1-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [8:0.50]
; ZNVER1-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 # sched: [1:0.25]
@@ -6782,42 +6782,42 @@ define <8 x i32> @test_punpckhdq(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
define <4 x i64> @test_punpckhqdq(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
; GENERIC-LABEL: test_punpckhqdq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpunpckhqdq {{.*#+}} ymm1 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:1.00]
; GENERIC-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] sched: [5:1.00]
; GENERIC-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_punpckhqdq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpunpckhqdq {{.*#+}} ymm1 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:1.00]
; HASWELL-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] sched: [1:1.00]
; HASWELL-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_punpckhqdq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpunpckhqdq {{.*#+}} ymm1 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:1.00]
; BROADWELL-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] sched: [7:1.00]
; BROADWELL-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_punpckhqdq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpunpckhqdq {{.*#+}} ymm1 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:1.00]
; SKYLAKE-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] sched: [8:1.00]
; SKYLAKE-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_punpckhqdq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpunpckhqdq {{.*#+}} ymm1 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:1.00]
; SKX-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] sched: [8:1.00]
; SKX-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_punpckhqdq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpunpckhqdq {{.*#+}} ymm1 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:0.25]
; ZNVER1-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] sched: [8:0.50]
; ZNVER1-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.25]
@@ -6831,37 +6831,37 @@ define <4 x i64> @test_punpckhqdq(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2)
define <16 x i16> @test_punpckhwd(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_punpckhwd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15] sched: [1:1.00]
; GENERIC-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_punpckhwd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15] sched: [1:1.00]
; HASWELL-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15] sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_punpckhwd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15] sched: [1:1.00]
; BROADWELL-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15] sched: [7:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_punpckhwd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15] sched: [1:1.00]
; SKYLAKE-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15] sched: [8:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_punpckhwd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15] sched: [1:1.00]
; SKX-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_punpckhwd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15] sched: [1:0.25]
; ZNVER1-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15] sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -6873,37 +6873,37 @@ define <16 x i16> @test_punpckhwd(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a
define <32 x i8> @test_punpcklbw(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
; GENERIC-LABEL: test_punpcklbw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] sched: [1:1.00]
; GENERIC-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[16],mem[16],ymm0[17],mem[17],ymm0[18],mem[18],ymm0[19],mem[19],ymm0[20],mem[20],ymm0[21],mem[21],ymm0[22],mem[22],ymm0[23],mem[23] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_punpcklbw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] sched: [1:1.00]
; HASWELL-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[16],mem[16],ymm0[17],mem[17],ymm0[18],mem[18],ymm0[19],mem[19],ymm0[20],mem[20],ymm0[21],mem[21],ymm0[22],mem[22],ymm0[23],mem[23] sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_punpcklbw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] sched: [1:1.00]
; BROADWELL-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[16],mem[16],ymm0[17],mem[17],ymm0[18],mem[18],ymm0[19],mem[19],ymm0[20],mem[20],ymm0[21],mem[21],ymm0[22],mem[22],ymm0[23],mem[23] sched: [7:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_punpcklbw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] sched: [1:1.00]
; SKYLAKE-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[16],mem[16],ymm0[17],mem[17],ymm0[18],mem[18],ymm0[19],mem[19],ymm0[20],mem[20],ymm0[21],mem[21],ymm0[22],mem[22],ymm0[23],mem[23] sched: [8:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_punpcklbw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] sched: [1:1.00]
; SKX-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[16],mem[16],ymm0[17],mem[17],ymm0[18],mem[18],ymm0[19],mem[19],ymm0[20],mem[20],ymm0[21],mem[21],ymm0[22],mem[22],ymm0[23],mem[23] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_punpcklbw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] sched: [1:0.25]
; ZNVER1-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[16],mem[16],ymm0[17],mem[17],ymm0[18],mem[18],ymm0[19],mem[19],ymm0[20],mem[20],ymm0[21],mem[21],ymm0[22],mem[22],ymm0[23],mem[23] sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -6915,7 +6915,7 @@ define <32 x i8> @test_punpcklbw(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
define <8 x i32> @test_punpckldq(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_punpckldq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:1.00]
; GENERIC-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [5:1.00]
; GENERIC-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 # sched: [3:1.00]
@@ -6923,7 +6923,7 @@ define <8 x i32> @test_punpckldq(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_punpckldq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:1.00]
; HASWELL-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [1:1.00]
; HASWELL-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 # sched: [1:0.50]
@@ -6931,7 +6931,7 @@ define <8 x i32> @test_punpckldq(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_punpckldq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:1.00]
; BROADWELL-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [7:1.00]
; BROADWELL-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 # sched: [1:0.50]
@@ -6939,7 +6939,7 @@ define <8 x i32> @test_punpckldq(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_punpckldq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:1.00]
; SKYLAKE-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [8:1.00]
; SKYLAKE-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 # sched: [1:0.50]
@@ -6947,7 +6947,7 @@ define <8 x i32> @test_punpckldq(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_punpckldq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:1.00]
; SKX-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [8:1.00]
; SKX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 # sched: [1:0.50]
@@ -6955,7 +6955,7 @@ define <8 x i32> @test_punpckldq(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_punpckldq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:0.25]
; ZNVER1-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [8:0.50]
; ZNVER1-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 # sched: [1:0.25]
@@ -6970,42 +6970,42 @@ define <8 x i32> @test_punpckldq(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
define <4 x i64> @test_punpcklqdq(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
; GENERIC-LABEL: test_punpcklqdq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpunpcklqdq {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:1.00]
; GENERIC-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[2],mem[2] sched: [5:1.00]
; GENERIC-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_punpcklqdq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpunpcklqdq {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:1.00]
; HASWELL-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[2],mem[2] sched: [1:1.00]
; HASWELL-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_punpcklqdq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpunpcklqdq {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:1.00]
; BROADWELL-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[2],mem[2] sched: [7:1.00]
; BROADWELL-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_punpcklqdq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpunpcklqdq {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:1.00]
; SKYLAKE-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[2],mem[2] sched: [8:1.00]
; SKYLAKE-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_punpcklqdq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpunpcklqdq {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:1.00]
; SKX-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[2],mem[2] sched: [8:1.00]
; SKX-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_punpcklqdq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpunpcklqdq {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:0.25]
; ZNVER1-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[2],mem[2] sched: [8:0.50]
; ZNVER1-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.25]
@@ -7019,37 +7019,37 @@ define <4 x i64> @test_punpcklqdq(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2)
define <16 x i16> @test_punpcklwd(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_punpcklwd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11] sched: [1:1.00]
; GENERIC-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_punpcklwd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11] sched: [1:1.00]
; HASWELL-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11] sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_punpcklwd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11] sched: [1:1.00]
; BROADWELL-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11] sched: [7:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_punpcklwd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11] sched: [1:1.00]
; SKYLAKE-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11] sched: [8:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_punpcklwd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11] sched: [1:1.00]
; SKX-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_punpcklwd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11] sched: [1:0.25]
; ZNVER1-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11] sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -7061,42 +7061,42 @@ define <16 x i16> @test_punpcklwd(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a
define <4 x i64> @test_pxor(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
; GENERIC-LABEL: test_pxor:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpxor (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pxor:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpxor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; HASWELL-NEXT: vpxor (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pxor:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpxor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; BROADWELL-NEXT: vpxor (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pxor:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpxor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vpxor (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pxor:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: vpxor (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pxor:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpxor %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpxor (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
diff --git a/test/CodeGen/X86/avx2-shift.ll b/test/CodeGen/X86/avx2-shift.ll
index 8db4cae4970..7b1a3978bcb 100644
--- a/test/CodeGen/X86/avx2-shift.ll
+++ b/test/CodeGen/X86/avx2-shift.ll
@@ -4,12 +4,12 @@
define <4 x i32> @variable_shl0(<4 x i32> %x, <4 x i32> %y) {
; X32-LABEL: variable_shl0:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsllvd %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: variable_shl0:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsllvd %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%k = shl <4 x i32> %x, %y
@@ -18,12 +18,12 @@ define <4 x i32> @variable_shl0(<4 x i32> %x, <4 x i32> %y) {
define <8 x i32> @variable_shl1(<8 x i32> %x, <8 x i32> %y) {
; X32-LABEL: variable_shl1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: variable_shl1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%k = shl <8 x i32> %x, %y
@@ -32,12 +32,12 @@ define <8 x i32> @variable_shl1(<8 x i32> %x, <8 x i32> %y) {
define <2 x i64> @variable_shl2(<2 x i64> %x, <2 x i64> %y) {
; X32-LABEL: variable_shl2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsllvq %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: variable_shl2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsllvq %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%k = shl <2 x i64> %x, %y
@@ -46,12 +46,12 @@ define <2 x i64> @variable_shl2(<2 x i64> %x, <2 x i64> %y) {
define <4 x i64> @variable_shl3(<4 x i64> %x, <4 x i64> %y) {
; X32-LABEL: variable_shl3:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsllvq %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: variable_shl3:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsllvq %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%k = shl <4 x i64> %x, %y
@@ -60,12 +60,12 @@ define <4 x i64> @variable_shl3(<4 x i64> %x, <4 x i64> %y) {
define <4 x i32> @variable_srl0(<4 x i32> %x, <4 x i32> %y) {
; X32-LABEL: variable_srl0:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: variable_srl0:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%k = lshr <4 x i32> %x, %y
@@ -74,12 +74,12 @@ define <4 x i32> @variable_srl0(<4 x i32> %x, <4 x i32> %y) {
define <8 x i32> @variable_srl1(<8 x i32> %x, <8 x i32> %y) {
; X32-LABEL: variable_srl1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: variable_srl1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%k = lshr <8 x i32> %x, %y
@@ -88,12 +88,12 @@ define <8 x i32> @variable_srl1(<8 x i32> %x, <8 x i32> %y) {
define <2 x i64> @variable_srl2(<2 x i64> %x, <2 x i64> %y) {
; X32-LABEL: variable_srl2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: variable_srl2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%k = lshr <2 x i64> %x, %y
@@ -102,12 +102,12 @@ define <2 x i64> @variable_srl2(<2 x i64> %x, <2 x i64> %y) {
define <4 x i64> @variable_srl3(<4 x i64> %x, <4 x i64> %y) {
; X32-LABEL: variable_srl3:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: variable_srl3:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%k = lshr <4 x i64> %x, %y
@@ -116,12 +116,12 @@ define <4 x i64> @variable_srl3(<4 x i64> %x, <4 x i64> %y) {
define <4 x i32> @variable_sra0(<4 x i32> %x, <4 x i32> %y) {
; X32-LABEL: variable_sra0:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsravd %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: variable_sra0:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsravd %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%k = ashr <4 x i32> %x, %y
@@ -130,12 +130,12 @@ define <4 x i32> @variable_sra0(<4 x i32> %x, <4 x i32> %y) {
define <8 x i32> @variable_sra1(<8 x i32> %x, <8 x i32> %y) {
; X32-LABEL: variable_sra1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsravd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: variable_sra1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsravd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%k = ashr <8 x i32> %x, %y
@@ -146,12 +146,12 @@ define <8 x i32> @variable_sra1(<8 x i32> %x, <8 x i32> %y) {
define <8 x i32> @vshift00(<8 x i32> %a) nounwind readnone {
; X32-LABEL: vshift00:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpslld $2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vshift00:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpslld $2, %ymm0, %ymm0
; X64-NEXT: retq
%s = shl <8 x i32> %a, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
@@ -160,12 +160,12 @@ define <8 x i32> @vshift00(<8 x i32> %a) nounwind readnone {
define <16 x i16> @vshift01(<16 x i16> %a) nounwind readnone {
; X32-LABEL: vshift01:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsllw $2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vshift01:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsllw $2, %ymm0, %ymm0
; X64-NEXT: retq
%s = shl <16 x i16> %a, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
@@ -174,12 +174,12 @@ define <16 x i16> @vshift01(<16 x i16> %a) nounwind readnone {
define <4 x i64> @vshift02(<4 x i64> %a) nounwind readnone {
; X32-LABEL: vshift02:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsllq $2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vshift02:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsllq $2, %ymm0, %ymm0
; X64-NEXT: retq
%s = shl <4 x i64> %a, <i64 2, i64 2, i64 2, i64 2>
@@ -190,12 +190,12 @@ define <4 x i64> @vshift02(<4 x i64> %a) nounwind readnone {
define <8 x i32> @vshift03(<8 x i32> %a) nounwind readnone {
; X32-LABEL: vshift03:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsrld $2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vshift03:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsrld $2, %ymm0, %ymm0
; X64-NEXT: retq
%s = lshr <8 x i32> %a, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
@@ -204,12 +204,12 @@ define <8 x i32> @vshift03(<8 x i32> %a) nounwind readnone {
define <16 x i16> @vshift04(<16 x i16> %a) nounwind readnone {
; X32-LABEL: vshift04:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsrlw $2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vshift04:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsrlw $2, %ymm0, %ymm0
; X64-NEXT: retq
%s = lshr <16 x i16> %a, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
@@ -218,12 +218,12 @@ define <16 x i16> @vshift04(<16 x i16> %a) nounwind readnone {
define <4 x i64> @vshift05(<4 x i64> %a) nounwind readnone {
; X32-LABEL: vshift05:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsrlq $2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vshift05:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsrlq $2, %ymm0, %ymm0
; X64-NEXT: retq
%s = lshr <4 x i64> %a, <i64 2, i64 2, i64 2, i64 2>
@@ -234,12 +234,12 @@ define <4 x i64> @vshift05(<4 x i64> %a) nounwind readnone {
define <8 x i32> @vshift06(<8 x i32> %a) nounwind readnone {
; X32-LABEL: vshift06:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsrad $2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vshift06:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsrad $2, %ymm0, %ymm0
; X64-NEXT: retq
%s = ashr <8 x i32> %a, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
@@ -248,12 +248,12 @@ define <8 x i32> @vshift06(<8 x i32> %a) nounwind readnone {
define <16 x i16> @vshift07(<16 x i16> %a) nounwind readnone {
; X32-LABEL: vshift07:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsraw $2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vshift07:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsraw $2, %ymm0, %ymm0
; X64-NEXT: retq
%s = ashr <16 x i16> %a, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
@@ -262,13 +262,13 @@ define <16 x i16> @vshift07(<16 x i16> %a) nounwind readnone {
define <4 x i32> @variable_sra0_load(<4 x i32> %x, <4 x i32>* %y) {
; X32-LABEL: variable_sra0_load:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpsravd (%eax), %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: variable_sra0_load:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsravd (%rdi), %xmm0, %xmm0
; X64-NEXT: retq
%y1 = load <4 x i32>, <4 x i32>* %y
@@ -278,13 +278,13 @@ define <4 x i32> @variable_sra0_load(<4 x i32> %x, <4 x i32>* %y) {
define <8 x i32> @variable_sra1_load(<8 x i32> %x, <8 x i32>* %y) {
; X32-LABEL: variable_sra1_load:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpsravd (%eax), %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: variable_sra1_load:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsravd (%rdi), %ymm0, %ymm0
; X64-NEXT: retq
%y1 = load <8 x i32>, <8 x i32>* %y
@@ -294,13 +294,13 @@ define <8 x i32> @variable_sra1_load(<8 x i32> %x, <8 x i32>* %y) {
define <4 x i32> @variable_shl0_load(<4 x i32> %x, <4 x i32>* %y) {
; X32-LABEL: variable_shl0_load:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpsllvd (%eax), %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: variable_shl0_load:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsllvd (%rdi), %xmm0, %xmm0
; X64-NEXT: retq
%y1 = load <4 x i32>, <4 x i32>* %y
@@ -310,13 +310,13 @@ define <4 x i32> @variable_shl0_load(<4 x i32> %x, <4 x i32>* %y) {
define <8 x i32> @variable_shl1_load(<8 x i32> %x, <8 x i32>* %y) {
; X32-LABEL: variable_shl1_load:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpsllvd (%eax), %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: variable_shl1_load:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsllvd (%rdi), %ymm0, %ymm0
; X64-NEXT: retq
%y1 = load <8 x i32>, <8 x i32>* %y
@@ -326,13 +326,13 @@ define <8 x i32> @variable_shl1_load(<8 x i32> %x, <8 x i32>* %y) {
define <2 x i64> @variable_shl2_load(<2 x i64> %x, <2 x i64>* %y) {
; X32-LABEL: variable_shl2_load:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpsllvq (%eax), %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: variable_shl2_load:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsllvq (%rdi), %xmm0, %xmm0
; X64-NEXT: retq
%y1 = load <2 x i64>, <2 x i64>* %y
@@ -342,13 +342,13 @@ define <2 x i64> @variable_shl2_load(<2 x i64> %x, <2 x i64>* %y) {
define <4 x i64> @variable_shl3_load(<4 x i64> %x, <4 x i64>* %y) {
; X32-LABEL: variable_shl3_load:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpsllvq (%eax), %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: variable_shl3_load:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsllvq (%rdi), %ymm0, %ymm0
; X64-NEXT: retq
%y1 = load <4 x i64>, <4 x i64>* %y
@@ -358,13 +358,13 @@ define <4 x i64> @variable_shl3_load(<4 x i64> %x, <4 x i64>* %y) {
define <4 x i32> @variable_srl0_load(<4 x i32> %x, <4 x i32>* %y) {
; X32-LABEL: variable_srl0_load:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpsrlvd (%eax), %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: variable_srl0_load:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsrlvd (%rdi), %xmm0, %xmm0
; X64-NEXT: retq
%y1 = load <4 x i32>, <4 x i32>* %y
@@ -374,13 +374,13 @@ define <4 x i32> @variable_srl0_load(<4 x i32> %x, <4 x i32>* %y) {
define <8 x i32> @variable_srl1_load(<8 x i32> %x, <8 x i32>* %y) {
; X32-LABEL: variable_srl1_load:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpsrlvd (%eax), %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: variable_srl1_load:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsrlvd (%rdi), %ymm0, %ymm0
; X64-NEXT: retq
%y1 = load <8 x i32>, <8 x i32>* %y
@@ -390,13 +390,13 @@ define <8 x i32> @variable_srl1_load(<8 x i32> %x, <8 x i32>* %y) {
define <2 x i64> @variable_srl2_load(<2 x i64> %x, <2 x i64>* %y) {
; X32-LABEL: variable_srl2_load:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpsrlvq (%eax), %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: variable_srl2_load:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsrlvq (%rdi), %xmm0, %xmm0
; X64-NEXT: retq
%y1 = load <2 x i64>, <2 x i64>* %y
@@ -406,13 +406,13 @@ define <2 x i64> @variable_srl2_load(<2 x i64> %x, <2 x i64>* %y) {
define <4 x i64> @variable_srl3_load(<4 x i64> %x, <4 x i64>* %y) {
; X32-LABEL: variable_srl3_load:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpsrlvq (%eax), %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: variable_srl3_load:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsrlvq (%rdi), %ymm0, %ymm0
; X64-NEXT: retq
%y1 = load <4 x i64>, <4 x i64>* %y
@@ -422,13 +422,13 @@ define <4 x i64> @variable_srl3_load(<4 x i64> %x, <4 x i64>* %y) {
define <32 x i8> @shl9(<32 x i8> %A) nounwind {
; X32-LABEL: shl9:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsllw $3, %ymm0, %ymm0
; X32-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: shl9:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsllw $3, %ymm0, %ymm0
; X64-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
@@ -438,13 +438,13 @@ define <32 x i8> @shl9(<32 x i8> %A) nounwind {
define <32 x i8> @shr9(<32 x i8> %A) nounwind {
; X32-LABEL: shr9:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsrlw $3, %ymm0, %ymm0
; X32-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: shr9:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsrlw $3, %ymm0, %ymm0
; X64-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
@@ -454,13 +454,13 @@ define <32 x i8> @shr9(<32 x i8> %A) nounwind {
define <32 x i8> @sra_v32i8_7(<32 x i8> %A) nounwind {
; X32-LABEL: sra_v32i8_7:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X32-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: sra_v32i8_7:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0
; X64-NEXT: retq
@@ -470,7 +470,7 @@ define <32 x i8> @sra_v32i8_7(<32 x i8> %A) nounwind {
define <32 x i8> @sra_v32i8(<32 x i8> %A) nounwind {
; X32-LABEL: sra_v32i8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsrlw $3, %ymm0, %ymm0
; X32-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: vmovdqa {{.*#+}} ymm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
@@ -479,7 +479,7 @@ define <32 x i8> @sra_v32i8(<32 x i8> %A) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: sra_v32i8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsrlw $3, %ymm0, %ymm0
; X64-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: vmovdqa {{.*#+}} ymm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
@@ -492,13 +492,13 @@ define <32 x i8> @sra_v32i8(<32 x i8> %A) nounwind {
define <16 x i16> @sext_v16i16(<16 x i16> %a) nounwind {
; X32-LABEL: sext_v16i16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsllw $8, %ymm0, %ymm0
; X32-NEXT: vpsraw $8, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: sext_v16i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsllw $8, %ymm0, %ymm0
; X64-NEXT: vpsraw $8, %ymm0, %ymm0
; X64-NEXT: retq
@@ -509,13 +509,13 @@ define <16 x i16> @sext_v16i16(<16 x i16> %a) nounwind {
define <8 x i32> @sext_v8i32(<8 x i32> %a) nounwind {
; X32-LABEL: sext_v8i32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpslld $16, %ymm0, %ymm0
; X32-NEXT: vpsrad $16, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: sext_v8i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpslld $16, %ymm0, %ymm0
; X64-NEXT: vpsrad $16, %ymm0, %ymm0
; X64-NEXT: retq
@@ -526,7 +526,7 @@ define <8 x i32> @sext_v8i32(<8 x i32> %a) nounwind {
define <8 x i16> @variable_shl16(<8 x i16> %lhs, <8 x i16> %rhs) {
; X32-LABEL: variable_shl16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X32-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
@@ -537,7 +537,7 @@ define <8 x i16> @variable_shl16(<8 x i16> %lhs, <8 x i16> %rhs) {
; X32-NEXT: retl
;
; X64-LABEL: variable_shl16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X64-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
@@ -552,7 +552,7 @@ define <8 x i16> @variable_shl16(<8 x i16> %lhs, <8 x i16> %rhs) {
define <8 x i16> @variable_ashr16(<8 x i16> %lhs, <8 x i16> %rhs) {
; X32-LABEL: variable_ashr16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X32-NEXT: vpmovsxwd %xmm0, %ymm0
; X32-NEXT: vpsravd %ymm1, %ymm0, %ymm0
@@ -562,7 +562,7 @@ define <8 x i16> @variable_ashr16(<8 x i16> %lhs, <8 x i16> %rhs) {
; X32-NEXT: retl
;
; X64-LABEL: variable_ashr16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X64-NEXT: vpmovsxwd %xmm0, %ymm0
; X64-NEXT: vpsravd %ymm1, %ymm0, %ymm0
@@ -576,7 +576,7 @@ define <8 x i16> @variable_ashr16(<8 x i16> %lhs, <8 x i16> %rhs) {
define <8 x i16> @variable_lshr16(<8 x i16> %lhs, <8 x i16> %rhs) {
; X32-LABEL: variable_lshr16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X32-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
@@ -587,7 +587,7 @@ define <8 x i16> @variable_lshr16(<8 x i16> %lhs, <8 x i16> %rhs) {
; X32-NEXT: retl
;
; X64-LABEL: variable_lshr16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X64-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
diff --git a/test/CodeGen/X86/avx2-vbroadcast.ll b/test/CodeGen/X86/avx2-vbroadcast.ll
index 97b20b1e56d..e5506257e4c 100644
--- a/test/CodeGen/X86/avx2-vbroadcast.ll
+++ b/test/CodeGen/X86/avx2-vbroadcast.ll
@@ -6,13 +6,13 @@
define <16 x i8> @BB16(i8* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: BB16:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpbroadcastb (%eax), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: BB16:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vpbroadcastb (%rdi), %xmm0
; X64-NEXT: retq
entry:
@@ -38,13 +38,13 @@ entry:
define <32 x i8> @BB32(i8* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: BB32:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpbroadcastb (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: BB32:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vpbroadcastb (%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -87,13 +87,13 @@ entry:
define <8 x i16> @W16(i16* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: W16:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpbroadcastw (%eax), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: W16:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vpbroadcastw (%rdi), %xmm0
; X64-NEXT: retq
entry:
@@ -111,13 +111,13 @@ entry:
define <16 x i16> @WW16(i16* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: WW16:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpbroadcastw (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: WW16:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vpbroadcastw (%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -143,13 +143,13 @@ entry:
define <4 x i32> @D32(i32* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: D32:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastss (%eax), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: D32:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastss (%rdi), %xmm0
; X64-NEXT: retq
entry:
@@ -163,13 +163,13 @@ entry:
define <8 x i32> @DD32(i32* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: DD32:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastss (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: DD32:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastss (%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -187,7 +187,7 @@ entry:
define <2 x i64> @Q64(i64* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: Q64:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl (%eax), %ecx
; X32-NEXT: movl 4(%eax), %eax
@@ -198,7 +198,7 @@ define <2 x i64> @Q64(i64* %ptr) nounwind uwtable readnone ssp {
; X32-NEXT: retl
;
; X64-LABEL: Q64:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vpbroadcastq (%rdi), %xmm0
; X64-NEXT: retq
entry:
@@ -210,7 +210,7 @@ entry:
define <4 x i64> @QQ64(i64* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: QQ64:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl (%eax), %ecx
; X32-NEXT: movl 4(%eax), %eax
@@ -222,7 +222,7 @@ define <4 x i64> @QQ64(i64* %ptr) nounwind uwtable readnone ssp {
; X32-NEXT: retl
;
; X64-LABEL: QQ64:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastsd (%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -236,13 +236,13 @@ entry:
define <8 x i16> @broadcast_mem_v4i16_v8i16(<4 x i16>* %ptr) {
; X32-LABEL: broadcast_mem_v4i16_v8i16:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; X32-NEXT: retl
;
; X64-LABEL: broadcast_mem_v4i16_v8i16:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpbroadcastq (%rdi), %xmm0
; X64-NEXT: retq
%load = load <4 x i16>, <4 x i16>* %ptr
@@ -252,14 +252,14 @@ define <8 x i16> @broadcast_mem_v4i16_v8i16(<4 x i16>* %ptr) {
define <16 x i16> @broadcast_mem_v4i16_v16i16(<4 x i16>* %ptr) {
; X32-LABEL: broadcast_mem_v4i16_v16i16:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-NEXT: vbroadcastsd %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: broadcast_mem_v4i16_v16i16:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vbroadcastsd (%rdi), %ymm0
; X64-NEXT: retq
%load = load <4 x i16>, <4 x i16>* %ptr
@@ -271,13 +271,13 @@ define <16 x i16> @broadcast_mem_v4i16_v16i16(<4 x i16>* %ptr) {
define <16 x i8> @load_splat_16i8_16i8_1111111111111111(<16 x i8>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_16i8_16i8_1111111111111111:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpbroadcastb 1(%eax), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: load_splat_16i8_16i8_1111111111111111:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vpbroadcastb 1(%rdi), %xmm0
; X64-NEXT: retq
entry:
@@ -288,13 +288,13 @@ entry:
define <32 x i8> @load_splat_32i8_16i8_11111111111111111111111111111111(<16 x i8>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_32i8_16i8_11111111111111111111111111111111:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpbroadcastb 1(%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_splat_32i8_16i8_11111111111111111111111111111111:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vpbroadcastb 1(%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -305,13 +305,13 @@ entry:
define <32 x i8> @load_splat_32i8_32i8_11111111111111111111111111111111(<32 x i8>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_32i8_32i8_11111111111111111111111111111111:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpbroadcastb 1(%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_splat_32i8_32i8_11111111111111111111111111111111:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vpbroadcastb 1(%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -322,13 +322,13 @@ entry:
define <8 x i16> @load_splat_8i16_8i16_11111111(<8 x i16>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_8i16_8i16_11111111:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpbroadcastw 2(%eax), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: load_splat_8i16_8i16_11111111:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vpbroadcastw 2(%rdi), %xmm0
; X64-NEXT: retq
entry:
@@ -339,13 +339,13 @@ entry:
define <16 x i16> @load_splat_16i16_8i16_1111111111111111(<8 x i16>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_16i16_8i16_1111111111111111:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpbroadcastw 2(%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_splat_16i16_8i16_1111111111111111:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vpbroadcastw 2(%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -356,13 +356,13 @@ entry:
define <16 x i16> @load_splat_16i16_16i16_1111111111111111(<16 x i16>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_16i16_16i16_1111111111111111:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpbroadcastw 2(%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_splat_16i16_16i16_1111111111111111:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vpbroadcastw 2(%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -373,13 +373,13 @@ entry:
define <4 x i32> @load_splat_4i32_4i32_1111(<4 x i32>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_4i32_4i32_1111:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastss 4(%eax), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: load_splat_4i32_4i32_1111:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastss 4(%rdi), %xmm0
; X64-NEXT: retq
entry:
@@ -390,13 +390,13 @@ entry:
define <8 x i32> @load_splat_8i32_4i32_33333333(<4 x i32>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_8i32_4i32_33333333:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastss 12(%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_splat_8i32_4i32_33333333:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastss 12(%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -407,13 +407,13 @@ entry:
define <8 x i32> @load_splat_8i32_8i32_55555555(<8 x i32>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_8i32_8i32_55555555:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastss 20(%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_splat_8i32_8i32_55555555:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastss 20(%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -424,13 +424,13 @@ entry:
define <4 x float> @load_splat_4f32_4f32_1111(<4 x float>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_4f32_4f32_1111:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastss 4(%eax), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: load_splat_4f32_4f32_1111:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastss 4(%rdi), %xmm0
; X64-NEXT: retq
entry:
@@ -441,13 +441,13 @@ entry:
define <8 x float> @load_splat_8f32_4f32_33333333(<4 x float>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_8f32_4f32_33333333:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastss 12(%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_splat_8f32_4f32_33333333:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastss 12(%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -458,13 +458,13 @@ entry:
define <8 x float> @load_splat_8f32_8f32_55555555(<8 x float>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_8f32_8f32_55555555:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastss 20(%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_splat_8f32_8f32_55555555:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastss 20(%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -475,13 +475,13 @@ entry:
define <2 x i64> @load_splat_2i64_2i64_1111(<2 x i64>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_2i64_2i64_1111:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; X32-NEXT: retl
;
; X64-LABEL: load_splat_2i64_2i64_1111:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vpbroadcastq 8(%rdi), %xmm0
; X64-NEXT: retq
entry:
@@ -492,13 +492,13 @@ entry:
define <4 x i64> @load_splat_4i64_2i64_1111(<2 x i64>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_4i64_2i64_1111:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastsd 8(%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_splat_4i64_2i64_1111:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastsd 8(%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -509,13 +509,13 @@ entry:
define <4 x i64> @load_splat_4i64_4i64_2222(<4 x i64>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_4i64_4i64_2222:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastsd 16(%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_splat_4i64_4i64_2222:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastsd 16(%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -526,13 +526,13 @@ entry:
define <2 x double> @load_splat_2f64_2f64_1111(<2 x double>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_2f64_2f64_1111:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; X32-NEXT: retl
;
; X64-LABEL: load_splat_2f64_2f64_1111:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; X64-NEXT: retq
entry:
@@ -543,13 +543,13 @@ entry:
define <4 x double> @load_splat_4f64_2f64_1111(<2 x double>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_4f64_2f64_1111:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastsd 8(%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_splat_4f64_2f64_1111:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastsd 8(%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -560,13 +560,13 @@ entry:
define <4 x double> @load_splat_4f64_4f64_2222(<4 x double>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_4f64_4f64_2222:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastsd 16(%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_splat_4f64_4f64_2222:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastsd 16(%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -579,13 +579,13 @@ entry:
; this used to crash
define <2 x double> @I(double* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: I:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; X32-NEXT: retl
;
; X64-LABEL: I:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; X64-NEXT: retq
entry:
@@ -597,24 +597,24 @@ entry:
define <8 x i32> @V111(<8 x i32> %in) nounwind uwtable readnone ssp {
; X32-AVX2-LABEL: V111:
-; X32-AVX2: ## BB#0: ## %entry
+; X32-AVX2: ## %bb.0: ## %entry
; X32-AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [2,2,2,2,2,2,2,2]
; X32-AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; X32-AVX2-NEXT: retl
;
; X64-AVX2-LABEL: V111:
-; X64-AVX2: ## BB#0: ## %entry
+; X64-AVX2: ## %bb.0: ## %entry
; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [2,2,2,2,2,2,2,2]
; X64-AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; X64-AVX2-NEXT: retq
;
; X32-AVX512VL-LABEL: V111:
-; X32-AVX512VL: ## BB#0: ## %entry
+; X32-AVX512VL: ## %bb.0: ## %entry
; X32-AVX512VL-NEXT: vpaddd LCPI29_0{1to8}, %ymm0, %ymm0
; X32-AVX512VL-NEXT: retl
;
; X64-AVX512VL-LABEL: V111:
-; X64-AVX512VL: ## BB#0: ## %entry
+; X64-AVX512VL: ## %bb.0: ## %entry
; X64-AVX512VL-NEXT: vpaddd {{.*}}(%rip){1to8}, %ymm0, %ymm0
; X64-AVX512VL-NEXT: retq
entry:
@@ -624,24 +624,24 @@ entry:
define <8 x float> @V113(<8 x float> %in) nounwind uwtable readnone ssp {
; X32-AVX2-LABEL: V113:
-; X32-AVX2: ## BB#0: ## %entry
+; X32-AVX2: ## %bb.0: ## %entry
; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [-0.0078125,-0.0078125,-0.0078125,-0.0078125,-0.0078125,-0.0078125,-0.0078125,-0.0078125]
; X32-AVX2-NEXT: vaddps %ymm1, %ymm0, %ymm0
; X32-AVX2-NEXT: retl
;
; X64-AVX2-LABEL: V113:
-; X64-AVX2: ## BB#0: ## %entry
+; X64-AVX2: ## %bb.0: ## %entry
; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [-0.0078125,-0.0078125,-0.0078125,-0.0078125,-0.0078125,-0.0078125,-0.0078125,-0.0078125]
; X64-AVX2-NEXT: vaddps %ymm1, %ymm0, %ymm0
; X64-AVX2-NEXT: retq
;
; X32-AVX512VL-LABEL: V113:
-; X32-AVX512VL: ## BB#0: ## %entry
+; X32-AVX512VL: ## %bb.0: ## %entry
; X32-AVX512VL-NEXT: vaddps LCPI30_0{1to8}, %ymm0, %ymm0
; X32-AVX512VL-NEXT: retl
;
; X64-AVX512VL-LABEL: V113:
-; X64-AVX512VL: ## BB#0: ## %entry
+; X64-AVX512VL: ## %bb.0: ## %entry
; X64-AVX512VL-NEXT: vaddps {{.*}}(%rip){1to8}, %ymm0, %ymm0
; X64-AVX512VL-NEXT: retq
entry:
@@ -651,12 +651,12 @@ entry:
define <4 x float> @_e2(float* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: _e2:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vbroadcastss {{.*#+}} xmm0 = [-0.0078125,-0.0078125,-0.0078125,-0.0078125]
; X32-NEXT: retl
;
; X64-LABEL: _e2:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vbroadcastss {{.*#+}} xmm0 = [-0.0078125,-0.0078125,-0.0078125,-0.0078125]
; X64-NEXT: retq
%vecinit.i = insertelement <4 x float> undef, float 0xbf80000000000000, i32 0
@@ -668,12 +668,12 @@ define <4 x float> @_e2(float* %ptr) nounwind uwtable readnone ssp {
define <8 x i8> @_e4(i8* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: _e4:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vmovaps {{.*#+}} xmm0 = [52,52,52,52,52,52,52,52]
; X32-NEXT: retl
;
; X64-LABEL: _e4:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vmovaps {{.*#+}} xmm0 = [52,52,52,52,52,52,52,52]
; X64-NEXT: retq
%vecinit0.i = insertelement <8 x i8> undef, i8 52, i32 0
@@ -689,11 +689,11 @@ define <8 x i8> @_e4(i8* %ptr) nounwind uwtable readnone ssp {
define void @crash() nounwind alwaysinline {
; X32-LABEL: crash:
-; X32: ## BB#0: ## %WGLoopsEntry
+; X32: ## %bb.0: ## %WGLoopsEntry
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: testb %al, %al
; X32-NEXT: je LBB33_1
-; X32-NEXT: ## BB#2: ## %ret
+; X32-NEXT: ## %bb.2: ## %ret
; X32-NEXT: retl
; X32-NEXT: .p2align 4, 0x90
; X32-NEXT: LBB33_1: ## %footer349VF
@@ -701,11 +701,11 @@ define void @crash() nounwind alwaysinline {
; X32-NEXT: jmp LBB33_1
;
; X64-LABEL: crash:
-; X64: ## BB#0: ## %WGLoopsEntry
+; X64: ## %bb.0: ## %WGLoopsEntry
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: testb %al, %al
; X64-NEXT: je LBB33_1
-; X64-NEXT: ## BB#2: ## %ret
+; X64-NEXT: ## %bb.2: ## %ret
; X64-NEXT: retq
; X64-NEXT: .p2align 4, 0x90
; X64-NEXT: LBB33_1: ## %footer349VF
@@ -739,18 +739,18 @@ ret:
define <8 x i32> @_inreg0(i32 %scalar) nounwind uwtable readnone ssp {
; X32-LABEL: _inreg0:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vbroadcastss {{[0-9]+}}(%esp), %ymm0
; X32-NEXT: retl
;
; X64-AVX2-LABEL: _inreg0:
-; X64-AVX2: ## BB#0:
+; X64-AVX2: ## %bb.0:
; X64-AVX2-NEXT: vmovd %edi, %xmm0
; X64-AVX2-NEXT: vpbroadcastd %xmm0, %ymm0
; X64-AVX2-NEXT: retq
;
; X64-AVX512VL-LABEL: _inreg0:
-; X64-AVX512VL: ## BB#0:
+; X64-AVX512VL: ## %bb.0:
; X64-AVX512VL-NEXT: vpbroadcastd %edi, %ymm0
; X64-AVX512VL-NEXT: retq
%in = insertelement <8 x i32> undef, i32 %scalar, i32 0
@@ -760,12 +760,12 @@ define <8 x i32> @_inreg0(i32 %scalar) nounwind uwtable readnone ssp {
define <8 x float> @_inreg1(float %scalar) nounwind uwtable readnone ssp {
; X32-LABEL: _inreg1:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vbroadcastss {{[0-9]+}}(%esp), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: _inreg1:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vbroadcastss %xmm0, %ymm0
; X64-NEXT: retq
%in = insertelement <8 x float> undef, float %scalar, i32 0
@@ -775,12 +775,12 @@ define <8 x float> @_inreg1(float %scalar) nounwind uwtable readnone ssp {
define <4 x float> @_inreg2(float %scalar) nounwind uwtable readnone ssp {
; X32-LABEL: _inreg2:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vbroadcastss {{[0-9]+}}(%esp), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: _inreg2:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vbroadcastss %xmm0, %xmm0
; X64-NEXT: retq
%in = insertelement <4 x float> undef, float %scalar, i32 0
@@ -790,12 +790,12 @@ define <4 x float> @_inreg2(float %scalar) nounwind uwtable readnone ssp {
define <4 x double> @_inreg3(double %scalar) nounwind uwtable readnone ssp {
; X32-LABEL: _inreg3:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vbroadcastsd {{[0-9]+}}(%esp), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: _inreg3:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vbroadcastsd %xmm0, %ymm0
; X64-NEXT: retq
%in = insertelement <4 x double> undef, double %scalar, i32 0
@@ -805,12 +805,12 @@ define <4 x double> @_inreg3(double %scalar) nounwind uwtable readnone ssp {
define <8 x float> @_inreg8xfloat(<8 x float> %a) {
; X32-LABEL: _inreg8xfloat:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vbroadcastss %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: _inreg8xfloat:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vbroadcastss %xmm0, %ymm0
; X64-NEXT: retq
%b = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> zeroinitializer
@@ -819,12 +819,12 @@ define <8 x float> @_inreg8xfloat(<8 x float> %a) {
define <4 x float> @_inreg4xfloat(<4 x float> %a) {
; X32-LABEL: _inreg4xfloat:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vbroadcastss %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: _inreg4xfloat:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vbroadcastss %xmm0, %xmm0
; X64-NEXT: retq
%b = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> zeroinitializer
@@ -833,12 +833,12 @@ define <4 x float> @_inreg4xfloat(<4 x float> %a) {
define <16 x i16> @_inreg16xi16(<16 x i16> %a) {
; X32-LABEL: _inreg16xi16:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vpbroadcastw %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: _inreg16xi16:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpbroadcastw %xmm0, %ymm0
; X64-NEXT: retq
%b = shufflevector <16 x i16> %a, <16 x i16> undef, <16 x i32> zeroinitializer
@@ -847,12 +847,12 @@ define <16 x i16> @_inreg16xi16(<16 x i16> %a) {
define <8 x i16> @_inreg8xi16(<8 x i16> %a) {
; X32-LABEL: _inreg8xi16:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vpbroadcastw %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: _inreg8xi16:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpbroadcastw %xmm0, %xmm0
; X64-NEXT: retq
%b = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> zeroinitializer
@@ -861,12 +861,12 @@ define <8 x i16> @_inreg8xi16(<8 x i16> %a) {
define <4 x i64> @_inreg4xi64(<4 x i64> %a) {
; X32-LABEL: _inreg4xi64:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vbroadcastsd %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: _inreg4xi64:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vbroadcastsd %xmm0, %ymm0
; X64-NEXT: retq
%b = shufflevector <4 x i64> %a, <4 x i64> undef, <4 x i32> zeroinitializer
@@ -875,12 +875,12 @@ define <4 x i64> @_inreg4xi64(<4 x i64> %a) {
define <2 x i64> @_inreg2xi64(<2 x i64> %a) {
; X32-LABEL: _inreg2xi64:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vpbroadcastq %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: _inreg2xi64:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpbroadcastq %xmm0, %xmm0
; X64-NEXT: retq
%b = shufflevector <2 x i64> %a, <2 x i64> undef, <2 x i32> zeroinitializer
@@ -889,12 +889,12 @@ define <2 x i64> @_inreg2xi64(<2 x i64> %a) {
define <4 x double> @_inreg4xdouble(<4 x double> %a) {
; X32-LABEL: _inreg4xdouble:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vbroadcastsd %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: _inreg4xdouble:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vbroadcastsd %xmm0, %ymm0
; X64-NEXT: retq
%b = shufflevector <4 x double> %a, <4 x double> undef, <4 x i32> zeroinitializer
@@ -903,12 +903,12 @@ define <4 x double> @_inreg4xdouble(<4 x double> %a) {
define <2 x double> @_inreg2xdouble(<2 x double> %a) {
; X32-LABEL: _inreg2xdouble:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; X32-NEXT: retl
;
; X64-LABEL: _inreg2xdouble:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; X64-NEXT: retq
%b = shufflevector <2 x double> %a, <2 x double> undef, <2 x i32> zeroinitializer
@@ -917,12 +917,12 @@ define <2 x double> @_inreg2xdouble(<2 x double> %a) {
define <8 x i32> @_inreg8xi32(<8 x i32> %a) {
; X32-LABEL: _inreg8xi32:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vbroadcastss %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: _inreg8xi32:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vbroadcastss %xmm0, %ymm0
; X64-NEXT: retq
%b = shufflevector <8 x i32> %a, <8 x i32> undef, <8 x i32> zeroinitializer
@@ -931,12 +931,12 @@ define <8 x i32> @_inreg8xi32(<8 x i32> %a) {
define <4 x i32> @_inreg4xi32(<4 x i32> %a) {
; X32-LABEL: _inreg4xi32:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vbroadcastss %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: _inreg4xi32:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vbroadcastss %xmm0, %xmm0
; X64-NEXT: retq
%b = shufflevector <4 x i32> %a, <4 x i32> undef, <4 x i32> zeroinitializer
@@ -945,12 +945,12 @@ define <4 x i32> @_inreg4xi32(<4 x i32> %a) {
define <32 x i8> @_inreg32xi8(<32 x i8> %a) {
; X32-LABEL: _inreg32xi8:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vpbroadcastb %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: _inreg32xi8:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpbroadcastb %xmm0, %ymm0
; X64-NEXT: retq
%b = shufflevector <32 x i8> %a, <32 x i8> undef, <32 x i32> zeroinitializer
@@ -959,12 +959,12 @@ define <32 x i8> @_inreg32xi8(<32 x i8> %a) {
define <16 x i8> @_inreg16xi8(<16 x i8> %a) {
; X32-LABEL: _inreg16xi8:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vpbroadcastb %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: _inreg16xi8:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpbroadcastb %xmm0, %xmm0
; X64-NEXT: retq
%b = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> zeroinitializer
@@ -977,12 +977,12 @@ define <16 x i8> @_inreg16xi8(<16 x i8> %a) {
define <8 x float> @splat_concat1(float %f) {
; X32-LABEL: splat_concat1:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vbroadcastss {{[0-9]+}}(%esp), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: splat_concat1:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vbroadcastss %xmm0, %ymm0
; X64-NEXT: retq
%1 = insertelement <4 x float> undef, float %f, i32 0
@@ -995,12 +995,12 @@ define <8 x float> @splat_concat1(float %f) {
define <8 x float> @splat_concat2(float %f) {
; X32-LABEL: splat_concat2:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vbroadcastss {{[0-9]+}}(%esp), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: splat_concat2:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vbroadcastss %xmm0, %ymm0
; X64-NEXT: retq
%1 = insertelement <4 x float> undef, float %f, i32 0
@@ -1017,12 +1017,12 @@ define <8 x float> @splat_concat2(float %f) {
define <4 x double> @splat_concat3(double %d) {
; X32-LABEL: splat_concat3:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vbroadcastsd {{[0-9]+}}(%esp), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: splat_concat3:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vbroadcastsd %xmm0, %ymm0
; X64-NEXT: retq
%1 = insertelement <2 x double> undef, double %d, i32 0
@@ -1033,12 +1033,12 @@ define <4 x double> @splat_concat3(double %d) {
define <4 x double> @splat_concat4(double %d) {
; X32-LABEL: splat_concat4:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vbroadcastsd {{[0-9]+}}(%esp), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: splat_concat4:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vbroadcastsd %xmm0, %ymm0
; X64-NEXT: retq
%1 = insertelement <2 x double> undef, double %d, i32 0
@@ -1059,7 +1059,7 @@ define <4 x double> @splat_concat4(double %d) {
define void @isel_crash_16b(i8* %cV_R.addr) {
; X32-LABEL: isel_crash_16b:
-; X32: ## BB#0: ## %eintry
+; X32: ## %bb.0: ## %eintry
; X32-NEXT: subl $60, %esp
; X32-NEXT: .cfi_def_cfa_offset 64
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -1072,7 +1072,7 @@ define void @isel_crash_16b(i8* %cV_R.addr) {
; X32-NEXT: retl
;
; X64-LABEL: isel_crash_16b:
-; X64: ## BB#0: ## %eintry
+; X64: ## %bb.0: ## %eintry
; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-NEXT: movb (%rdi), %al
@@ -1098,7 +1098,7 @@ eintry:
define void @isel_crash_32b(i8* %cV_R.addr) {
; X32-LABEL: isel_crash_32b:
-; X32: ## BB#0: ## %eintry
+; X32: ## %bb.0: ## %eintry
; X32-NEXT: pushl %ebp
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: .cfi_offset %ebp, -8
@@ -1118,7 +1118,7 @@ define void @isel_crash_32b(i8* %cV_R.addr) {
; X32-NEXT: retl
;
; X64-LABEL: isel_crash_32b:
-; X64: ## BB#0: ## %eintry
+; X64: ## %bb.0: ## %eintry
; X64-NEXT: pushq %rbp
; X64-NEXT: .cfi_def_cfa_offset 16
; X64-NEXT: .cfi_offset %rbp, -16
@@ -1154,7 +1154,7 @@ eintry:
define void @isel_crash_8w(i16* %cV_R.addr) {
; X32-LABEL: isel_crash_8w:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: subl $60, %esp
; X32-NEXT: .cfi_def_cfa_offset 64
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -1167,7 +1167,7 @@ define void @isel_crash_8w(i16* %cV_R.addr) {
; X32-NEXT: retl
;
; X64-LABEL: isel_crash_8w:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-NEXT: movzwl (%rdi), %eax
@@ -1193,7 +1193,7 @@ entry:
define void @isel_crash_16w(i16* %cV_R.addr) {
; X32-LABEL: isel_crash_16w:
-; X32: ## BB#0: ## %eintry
+; X32: ## %bb.0: ## %eintry
; X32-NEXT: pushl %ebp
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: .cfi_offset %ebp, -8
@@ -1213,7 +1213,7 @@ define void @isel_crash_16w(i16* %cV_R.addr) {
; X32-NEXT: retl
;
; X64-LABEL: isel_crash_16w:
-; X64: ## BB#0: ## %eintry
+; X64: ## %bb.0: ## %eintry
; X64-NEXT: pushq %rbp
; X64-NEXT: .cfi_def_cfa_offset 16
; X64-NEXT: .cfi_offset %rbp, -16
@@ -1249,7 +1249,7 @@ eintry:
define void @isel_crash_4d(i32* %cV_R.addr) {
; X32-LABEL: isel_crash_4d:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: subl $60, %esp
; X32-NEXT: .cfi_def_cfa_offset 64
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -1262,7 +1262,7 @@ define void @isel_crash_4d(i32* %cV_R.addr) {
; X32-NEXT: retl
;
; X64-AVX2-LABEL: isel_crash_4d:
-; X64-AVX2: ## BB#0: ## %entry
+; X64-AVX2: ## %bb.0: ## %entry
; X64-AVX2-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-AVX2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-AVX2-NEXT: movl (%rdi), %eax
@@ -1273,7 +1273,7 @@ define void @isel_crash_4d(i32* %cV_R.addr) {
; X64-AVX2-NEXT: retq
;
; X64-AVX512VL-LABEL: isel_crash_4d:
-; X64-AVX512VL: ## BB#0: ## %entry
+; X64-AVX512VL: ## %bb.0: ## %entry
; X64-AVX512VL-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-AVX512VL-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-AVX512VL-NEXT: movl (%rdi), %eax
@@ -1298,7 +1298,7 @@ entry:
define void @isel_crash_8d(i32* %cV_R.addr) {
; X32-LABEL: isel_crash_8d:
-; X32: ## BB#0: ## %eintry
+; X32: ## %bb.0: ## %eintry
; X32-NEXT: pushl %ebp
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: .cfi_offset %ebp, -8
@@ -1318,7 +1318,7 @@ define void @isel_crash_8d(i32* %cV_R.addr) {
; X32-NEXT: retl
;
; X64-AVX2-LABEL: isel_crash_8d:
-; X64-AVX2: ## BB#0: ## %eintry
+; X64-AVX2: ## %bb.0: ## %eintry
; X64-AVX2-NEXT: pushq %rbp
; X64-AVX2-NEXT: .cfi_def_cfa_offset 16
; X64-AVX2-NEXT: .cfi_offset %rbp, -16
@@ -1339,7 +1339,7 @@ define void @isel_crash_8d(i32* %cV_R.addr) {
; X64-AVX2-NEXT: retq
;
; X64-AVX512VL-LABEL: isel_crash_8d:
-; X64-AVX512VL: ## BB#0: ## %eintry
+; X64-AVX512VL: ## %bb.0: ## %eintry
; X64-AVX512VL-NEXT: pushq %rbp
; X64-AVX512VL-NEXT: .cfi_def_cfa_offset 16
; X64-AVX512VL-NEXT: .cfi_offset %rbp, -16
@@ -1374,7 +1374,7 @@ eintry:
define void @isel_crash_2q(i64* %cV_R.addr) {
; X32-LABEL: isel_crash_2q:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: subl $60, %esp
; X32-NEXT: .cfi_def_cfa_offset 64
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -1392,7 +1392,7 @@ define void @isel_crash_2q(i64* %cV_R.addr) {
; X32-NEXT: retl
;
; X64-AVX2-LABEL: isel_crash_2q:
-; X64-AVX2: ## BB#0: ## %entry
+; X64-AVX2: ## %bb.0: ## %entry
; X64-AVX2-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-AVX2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-AVX2-NEXT: movq (%rdi), %rax
@@ -1403,7 +1403,7 @@ define void @isel_crash_2q(i64* %cV_R.addr) {
; X64-AVX2-NEXT: retq
;
; X64-AVX512VL-LABEL: isel_crash_2q:
-; X64-AVX512VL: ## BB#0: ## %entry
+; X64-AVX512VL: ## %bb.0: ## %entry
; X64-AVX512VL-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-AVX512VL-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-AVX512VL-NEXT: movq (%rdi), %rax
@@ -1427,7 +1427,7 @@ entry:
define void @isel_crash_4q(i64* %cV_R.addr) {
; X32-LABEL: isel_crash_4q:
-; X32: ## BB#0: ## %eintry
+; X32: ## %bb.0: ## %eintry
; X32-NEXT: pushl %ebp
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: .cfi_offset %ebp, -8
@@ -1453,7 +1453,7 @@ define void @isel_crash_4q(i64* %cV_R.addr) {
; X32-NEXT: retl
;
; X64-AVX2-LABEL: isel_crash_4q:
-; X64-AVX2: ## BB#0: ## %eintry
+; X64-AVX2: ## %bb.0: ## %eintry
; X64-AVX2-NEXT: pushq %rbp
; X64-AVX2-NEXT: .cfi_def_cfa_offset 16
; X64-AVX2-NEXT: .cfi_offset %rbp, -16
@@ -1474,7 +1474,7 @@ define void @isel_crash_4q(i64* %cV_R.addr) {
; X64-AVX2-NEXT: retq
;
; X64-AVX512VL-LABEL: isel_crash_4q:
-; X64-AVX512VL: ## BB#0: ## %eintry
+; X64-AVX512VL: ## %bb.0: ## %eintry
; X64-AVX512VL-NEXT: pushq %rbp
; X64-AVX512VL-NEXT: .cfi_def_cfa_offset 16
; X64-AVX512VL-NEXT: .cfi_offset %rbp, -16
diff --git a/test/CodeGen/X86/avx2-vbroadcasti128.ll b/test/CodeGen/X86/avx2-vbroadcasti128.ll
index dedd6be4c8e..254cdfdd8cb 100644
--- a/test/CodeGen/X86/avx2-vbroadcasti128.ll
+++ b/test/CodeGen/X86/avx2-vbroadcasti128.ll
@@ -4,14 +4,14 @@
define <4 x double> @test_broadcast_2f64_4f64(<2 x double> *%p) nounwind {
; X32-LABEL: test_broadcast_2f64_4f64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-NEXT: vaddpd {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_2f64_4f64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-NEXT: vaddpd {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
@@ -23,14 +23,14 @@ define <4 x double> @test_broadcast_2f64_4f64(<2 x double> *%p) nounwind {
define <4 x i64> @test_broadcast_2i64_4i64(<2 x i64> *%p) nounwind {
; X32-LABEL: test_broadcast_2i64_4i64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-NEXT: vpaddq {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_2i64_4i64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-NEXT: vpaddq {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
@@ -42,14 +42,14 @@ define <4 x i64> @test_broadcast_2i64_4i64(<2 x i64> *%p) nounwind {
define <8 x float> @test_broadcast_4f32_8f32(<4 x float> *%p) nounwind {
; X32-LABEL: test_broadcast_4f32_8f32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-NEXT: vaddps {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_4f32_8f32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-NEXT: vaddps {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
@@ -61,14 +61,14 @@ define <8 x float> @test_broadcast_4f32_8f32(<4 x float> *%p) nounwind {
define <8 x i32> @test_broadcast_4i32_8i32(<4 x i32> *%p) nounwind {
; X32-LABEL: test_broadcast_4i32_8i32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-NEXT: vpaddd {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_4i32_8i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
@@ -80,14 +80,14 @@ define <8 x i32> @test_broadcast_4i32_8i32(<4 x i32> *%p) nounwind {
define <16 x i16> @test_broadcast_8i16_16i16(<8 x i16> *%p) nounwind {
; X32-LABEL: test_broadcast_8i16_16i16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-NEXT: vpaddw {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_8i16_16i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-NEXT: vpaddw {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
@@ -99,14 +99,14 @@ define <16 x i16> @test_broadcast_8i16_16i16(<8 x i16> *%p) nounwind {
define <32 x i8> @test_broadcast_16i8_32i8(<16 x i8> *%p) nounwind {
; X32-LABEL: test_broadcast_16i8_32i8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-NEXT: vpaddb {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_16i8_32i8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-NEXT: vpaddb {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
@@ -118,7 +118,7 @@ define <32 x i8> @test_broadcast_16i8_32i8(<16 x i8> *%p) nounwind {
define <4 x double> @test_broadcast_2f64_4f64_reuse(<2 x double>* %p0, <2 x double>* %p1) {
; X32-LABEL: test_broadcast_2f64_4f64_reuse:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovapd (%ecx), %xmm1
@@ -128,7 +128,7 @@ define <4 x double> @test_broadcast_2f64_4f64_reuse(<2 x double>* %p0, <2 x doub
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_2f64_4f64_reuse:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovapd (%rdi), %xmm1
; X64-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm0
; X64-NEXT: vaddpd {{.*}}(%rip), %ymm0, %ymm0
@@ -143,7 +143,7 @@ define <4 x double> @test_broadcast_2f64_4f64_reuse(<2 x double>* %p0, <2 x doub
define <4 x i64> @test_broadcast_2i64_4i64_reuse(<2 x i64>* %p0, <2 x i64>* %p1) {
; X32-LABEL: test_broadcast_2i64_4i64_reuse:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovdqa (%ecx), %xmm1
@@ -153,7 +153,7 @@ define <4 x i64> @test_broadcast_2i64_4i64_reuse(<2 x i64>* %p0, <2 x i64>* %p1)
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_2i64_4i64_reuse:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovdqa (%rdi), %xmm1
; X64-NEXT: vinserti128 $1, %xmm1, %ymm1, %ymm0
; X64-NEXT: vpaddq {{.*}}(%rip), %ymm0, %ymm0
@@ -168,7 +168,7 @@ define <4 x i64> @test_broadcast_2i64_4i64_reuse(<2 x i64>* %p0, <2 x i64>* %p1)
define <8 x float> @test_broadcast_4f32_8f32_reuse(<4 x float>* %p0, <4 x float>* %p1) {
; X32-LABEL: test_broadcast_4f32_8f32_reuse:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovaps (%ecx), %xmm1
@@ -178,7 +178,7 @@ define <8 x float> @test_broadcast_4f32_8f32_reuse(<4 x float>* %p0, <4 x float>
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_4f32_8f32_reuse:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps (%rdi), %xmm1
; X64-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm0
; X64-NEXT: vaddps {{.*}}(%rip), %ymm0, %ymm0
@@ -193,7 +193,7 @@ define <8 x float> @test_broadcast_4f32_8f32_reuse(<4 x float>* %p0, <4 x float>
define <8 x i32> @test_broadcast_4i32_8i32_reuse(<4 x i32>* %p0, <4 x i32>* %p1) {
; X32-LABEL: test_broadcast_4i32_8i32_reuse:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovdqa (%ecx), %xmm1
@@ -203,7 +203,7 @@ define <8 x i32> @test_broadcast_4i32_8i32_reuse(<4 x i32>* %p0, <4 x i32>* %p1)
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_4i32_8i32_reuse:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovdqa (%rdi), %xmm1
; X64-NEXT: vinserti128 $1, %xmm1, %ymm1, %ymm0
; X64-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0
@@ -218,7 +218,7 @@ define <8 x i32> @test_broadcast_4i32_8i32_reuse(<4 x i32>* %p0, <4 x i32>* %p1)
define <16 x i16> @test_broadcast_8i16_16i16_reuse(<8 x i16> *%p0, <8 x i16> *%p1) nounwind {
; X32-LABEL: test_broadcast_8i16_16i16_reuse:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovdqa (%ecx), %xmm1
@@ -228,7 +228,7 @@ define <16 x i16> @test_broadcast_8i16_16i16_reuse(<8 x i16> *%p0, <8 x i16> *%p
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_8i16_16i16_reuse:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovdqa (%rdi), %xmm1
; X64-NEXT: vinserti128 $1, %xmm1, %ymm1, %ymm0
; X64-NEXT: vpaddw {{.*}}(%rip), %ymm0, %ymm0
@@ -243,7 +243,7 @@ define <16 x i16> @test_broadcast_8i16_16i16_reuse(<8 x i16> *%p0, <8 x i16> *%p
define <32 x i8> @test_broadcast_16i8_32i8_reuse(<16 x i8> *%p0, <16 x i8> *%p1) nounwind {
; X32-LABEL: test_broadcast_16i8_32i8_reuse:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovdqa (%ecx), %xmm1
@@ -253,7 +253,7 @@ define <32 x i8> @test_broadcast_16i8_32i8_reuse(<16 x i8> *%p0, <16 x i8> *%p1)
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_16i8_32i8_reuse:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovdqa (%rdi), %xmm1
; X64-NEXT: vinserti128 $1, %xmm1, %ymm1, %ymm0
; X64-NEXT: vpaddb {{.*}}(%rip), %ymm0, %ymm0
@@ -268,7 +268,7 @@ define <32 x i8> @test_broadcast_16i8_32i8_reuse(<16 x i8> *%p0, <16 x i8> *%p1)
define <8 x i32> @PR29088(<4 x i32>* %p0, <8 x float>* %p1) {
; X32-LABEL: PR29088:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovaps (%ecx), %xmm0
@@ -278,7 +278,7 @@ define <8 x i32> @PR29088(<4 x i32>* %p0, <8 x float>* %p1) {
; X32-NEXT: retl
;
; X64-LABEL: PR29088:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps (%rdi), %xmm0
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-NEXT: vmovaps %ymm1, (%rsi)
diff --git a/test/CodeGen/X86/avx2-vector-shifts.ll b/test/CodeGen/X86/avx2-vector-shifts.ll
index f5909f22210..8f0f3777fc8 100644
--- a/test/CodeGen/X86/avx2-vector-shifts.ll
+++ b/test/CodeGen/X86/avx2-vector-shifts.ll
@@ -6,11 +6,11 @@
define <16 x i16> @test_sllw_1(<16 x i16> %InVec) {
; X32-LABEL: test_sllw_1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: retl
;
; X64-LABEL: test_sllw_1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: retq
entry:
%shl = shl <16 x i16> %InVec, <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>
@@ -19,12 +19,12 @@ entry:
define <16 x i16> @test_sllw_2(<16 x i16> %InVec) {
; X32-LABEL: test_sllw_2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vpaddw %ymm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_sllw_2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpaddw %ymm0, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -34,12 +34,12 @@ entry:
define <16 x i16> @test_sllw_3(<16 x i16> %InVec) {
; X32-LABEL: test_sllw_3:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vpsllw $15, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_sllw_3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpsllw $15, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -49,11 +49,11 @@ entry:
define <8 x i32> @test_slld_1(<8 x i32> %InVec) {
; X32-LABEL: test_slld_1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: retl
;
; X64-LABEL: test_slld_1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: retq
entry:
%shl = shl <8 x i32> %InVec, <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
@@ -62,12 +62,12 @@ entry:
define <8 x i32> @test_slld_2(<8 x i32> %InVec) {
; X32-LABEL: test_slld_2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vpaddd %ymm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_slld_2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpaddd %ymm0, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -77,14 +77,14 @@ entry:
define <8 x i32> @test_vpslld_var(i32 %shift) {
; X32-LABEL: test_vpslld_var:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: vmovdqa {{.*#+}} ymm1 = [192,193,194,195,196,197,198,199]
; X32-NEXT: vpslld %xmm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_vpslld_var:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovd %edi, %xmm0
; X64-NEXT: vmovdqa {{.*#+}} ymm1 = [192,193,194,195,196,197,198,199]
; X64-NEXT: vpslld %xmm0, %ymm1, %ymm0
@@ -96,12 +96,12 @@ define <8 x i32> @test_vpslld_var(i32 %shift) {
define <8 x i32> @test_slld_3(<8 x i32> %InVec) {
; X32-LABEL: test_slld_3:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vpslld $31, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_slld_3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpslld $31, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -111,11 +111,11 @@ entry:
define <4 x i64> @test_sllq_1(<4 x i64> %InVec) {
; X32-LABEL: test_sllq_1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: retl
;
; X64-LABEL: test_sllq_1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: retq
entry:
%shl = shl <4 x i64> %InVec, <i64 0, i64 0, i64 0, i64 0>
@@ -124,12 +124,12 @@ entry:
define <4 x i64> @test_sllq_2(<4 x i64> %InVec) {
; X32-LABEL: test_sllq_2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vpaddq %ymm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_sllq_2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpaddq %ymm0, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -139,12 +139,12 @@ entry:
define <4 x i64> @test_sllq_3(<4 x i64> %InVec) {
; X32-LABEL: test_sllq_3:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vpsllq $63, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_sllq_3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpsllq $63, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -156,11 +156,11 @@ entry:
define <16 x i16> @test_sraw_1(<16 x i16> %InVec) {
; X32-LABEL: test_sraw_1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: retl
;
; X64-LABEL: test_sraw_1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: retq
entry:
%shl = ashr <16 x i16> %InVec, <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>
@@ -169,12 +169,12 @@ entry:
define <16 x i16> @test_sraw_2(<16 x i16> %InVec) {
; X32-LABEL: test_sraw_2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vpsraw $1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_sraw_2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpsraw $1, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -184,12 +184,12 @@ entry:
define <16 x i16> @test_sraw_3(<16 x i16> %InVec) {
; X32-LABEL: test_sraw_3:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vpsraw $15, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_sraw_3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpsraw $15, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -199,11 +199,11 @@ entry:
define <8 x i32> @test_srad_1(<8 x i32> %InVec) {
; X32-LABEL: test_srad_1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: retl
;
; X64-LABEL: test_srad_1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: retq
entry:
%shl = ashr <8 x i32> %InVec, <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
@@ -212,12 +212,12 @@ entry:
define <8 x i32> @test_srad_2(<8 x i32> %InVec) {
; X32-LABEL: test_srad_2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vpsrad $1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_srad_2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpsrad $1, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -227,12 +227,12 @@ entry:
define <8 x i32> @test_srad_3(<8 x i32> %InVec) {
; X32-LABEL: test_srad_3:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vpsrad $31, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_srad_3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpsrad $31, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -244,11 +244,11 @@ entry:
define <16 x i16> @test_srlw_1(<16 x i16> %InVec) {
; X32-LABEL: test_srlw_1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: retl
;
; X64-LABEL: test_srlw_1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: retq
entry:
%shl = lshr <16 x i16> %InVec, <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>
@@ -257,12 +257,12 @@ entry:
define <16 x i16> @test_srlw_2(<16 x i16> %InVec) {
; X32-LABEL: test_srlw_2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vpsrlw $1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_srlw_2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpsrlw $1, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -272,12 +272,12 @@ entry:
define <16 x i16> @test_srlw_3(<16 x i16> %InVec) {
; X32-LABEL: test_srlw_3:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vpsrlw $15, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_srlw_3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpsrlw $15, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -287,11 +287,11 @@ entry:
define <8 x i32> @test_srld_1(<8 x i32> %InVec) {
; X32-LABEL: test_srld_1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: retl
;
; X64-LABEL: test_srld_1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: retq
entry:
%shl = lshr <8 x i32> %InVec, <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
@@ -300,12 +300,12 @@ entry:
define <8 x i32> @test_srld_2(<8 x i32> %InVec) {
; X32-LABEL: test_srld_2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vpsrld $1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_srld_2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpsrld $1, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -315,12 +315,12 @@ entry:
define <8 x i32> @test_srld_3(<8 x i32> %InVec) {
; X32-LABEL: test_srld_3:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vpsrld $31, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_srld_3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpsrld $31, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -330,11 +330,11 @@ entry:
define <4 x i64> @test_srlq_1(<4 x i64> %InVec) {
; X32-LABEL: test_srlq_1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: retl
;
; X64-LABEL: test_srlq_1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: retq
entry:
%shl = lshr <4 x i64> %InVec, <i64 0, i64 0, i64 0, i64 0>
@@ -343,12 +343,12 @@ entry:
define <4 x i64> @test_srlq_2(<4 x i64> %InVec) {
; X32-LABEL: test_srlq_2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vpsrlq $1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_srlq_2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpsrlq $1, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -358,12 +358,12 @@ entry:
define <4 x i64> @test_srlq_3(<4 x i64> %InVec) {
; X32-LABEL: test_srlq_3:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vpsrlq $63, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_srlq_3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpsrlq $63, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -373,7 +373,7 @@ entry:
define <4 x i32> @srl_trunc_and_v4i64(<4 x i32> %x, <4 x i64> %y) nounwind {
; X32-LABEL: srl_trunc_and_v4i64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
; X32-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
; X32-NEXT: vpbroadcastd {{.*#+}} xmm2 = [8,8,8,8]
@@ -383,7 +383,7 @@ define <4 x i32> @srl_trunc_and_v4i64(<4 x i32> %x, <4 x i64> %y) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: srl_trunc_and_v4i64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
; X64-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
; X64-NEXT: vpbroadcastd {{.*#+}} xmm2 = [8,8,8,8]
@@ -403,7 +403,7 @@ define <4 x i32> @srl_trunc_and_v4i64(<4 x i32> %x, <4 x i64> %y) nounwind {
define <8 x i16> @shl_8i16(<8 x i16> %r, <8 x i16> %a) nounwind {
; X32-LABEL: shl_8i16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X32-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
@@ -414,7 +414,7 @@ define <8 x i16> @shl_8i16(<8 x i16> %r, <8 x i16> %a) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: shl_8i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X64-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
@@ -429,7 +429,7 @@ define <8 x i16> @shl_8i16(<8 x i16> %r, <8 x i16> %a) nounwind {
define <16 x i16> @shl_16i16(<16 x i16> %r, <16 x i16> %a) nounwind {
; X32-LABEL: shl_16i16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpxor %xmm2, %xmm2, %xmm2
; X32-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
; X32-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
@@ -443,7 +443,7 @@ define <16 x i16> @shl_16i16(<16 x i16> %r, <16 x i16> %a) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: shl_16i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpxor %xmm2, %xmm2, %xmm2
; X64-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
; X64-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
@@ -461,7 +461,7 @@ define <16 x i16> @shl_16i16(<16 x i16> %r, <16 x i16> %a) nounwind {
define <32 x i8> @shl_32i8(<32 x i8> %r, <32 x i8> %a) nounwind {
; X32-LABEL: shl_32i8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsllw $5, %ymm1, %ymm1
; X32-NEXT: vpsllw $4, %ymm0, %ymm2
; X32-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2
@@ -476,7 +476,7 @@ define <32 x i8> @shl_32i8(<32 x i8> %r, <32 x i8> %a) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: shl_32i8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsllw $5, %ymm1, %ymm1
; X64-NEXT: vpsllw $4, %ymm0, %ymm2
; X64-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
@@ -495,7 +495,7 @@ define <32 x i8> @shl_32i8(<32 x i8> %r, <32 x i8> %a) nounwind {
define <8 x i16> @ashr_8i16(<8 x i16> %r, <8 x i16> %a) nounwind {
; X32-LABEL: ashr_8i16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X32-NEXT: vpmovsxwd %xmm0, %ymm0
; X32-NEXT: vpsravd %ymm1, %ymm0, %ymm0
@@ -505,7 +505,7 @@ define <8 x i16> @ashr_8i16(<8 x i16> %r, <8 x i16> %a) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: ashr_8i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X64-NEXT: vpmovsxwd %xmm0, %ymm0
; X64-NEXT: vpsravd %ymm1, %ymm0, %ymm0
@@ -519,7 +519,7 @@ define <8 x i16> @ashr_8i16(<8 x i16> %r, <8 x i16> %a) nounwind {
define <16 x i16> @ashr_16i16(<16 x i16> %r, <16 x i16> %a) nounwind {
; X32-LABEL: ashr_16i16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpxor %xmm2, %xmm2, %xmm2
; X32-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
; X32-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
@@ -533,7 +533,7 @@ define <16 x i16> @ashr_16i16(<16 x i16> %r, <16 x i16> %a) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: ashr_16i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpxor %xmm2, %xmm2, %xmm2
; X64-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
; X64-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
@@ -551,7 +551,7 @@ define <16 x i16> @ashr_16i16(<16 x i16> %r, <16 x i16> %a) nounwind {
define <32 x i8> @ashr_32i8(<32 x i8> %r, <32 x i8> %a) nounwind {
; X32-LABEL: ashr_32i8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsllw $5, %ymm1, %ymm1
; X32-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
; X32-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
@@ -579,7 +579,7 @@ define <32 x i8> @ashr_32i8(<32 x i8> %r, <32 x i8> %a) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: ashr_32i8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsllw $5, %ymm1, %ymm1
; X64-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
; X64-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
@@ -611,7 +611,7 @@ define <32 x i8> @ashr_32i8(<32 x i8> %r, <32 x i8> %a) nounwind {
define <8 x i16> @lshr_8i16(<8 x i16> %r, <8 x i16> %a) nounwind {
; X32-LABEL: lshr_8i16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X32-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
@@ -622,7 +622,7 @@ define <8 x i16> @lshr_8i16(<8 x i16> %r, <8 x i16> %a) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: lshr_8i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X64-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
@@ -637,7 +637,7 @@ define <8 x i16> @lshr_8i16(<8 x i16> %r, <8 x i16> %a) nounwind {
define <16 x i16> @lshr_16i16(<16 x i16> %r, <16 x i16> %a) nounwind {
; X32-LABEL: lshr_16i16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpxor %xmm2, %xmm2, %xmm2
; X32-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
; X32-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
@@ -651,7 +651,7 @@ define <16 x i16> @lshr_16i16(<16 x i16> %r, <16 x i16> %a) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: lshr_16i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpxor %xmm2, %xmm2, %xmm2
; X64-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
; X64-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
@@ -669,7 +669,7 @@ define <16 x i16> @lshr_16i16(<16 x i16> %r, <16 x i16> %a) nounwind {
define <32 x i8> @lshr_32i8(<32 x i8> %r, <32 x i8> %a) nounwind {
; X32-LABEL: lshr_32i8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsllw $5, %ymm1, %ymm1
; X32-NEXT: vpsrlw $4, %ymm0, %ymm2
; X32-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2
@@ -685,7 +685,7 @@ define <32 x i8> @lshr_32i8(<32 x i8> %r, <32 x i8> %a) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: lshr_32i8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsllw $5, %ymm1, %ymm1
; X64-NEXT: vpsrlw $4, %ymm0, %ymm2
; X64-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
diff --git a/test/CodeGen/X86/avx2-vperm.ll b/test/CodeGen/X86/avx2-vperm.ll
index c88d67119bb..32ab55dc12a 100644
--- a/test/CodeGen/X86/avx2-vperm.ll
+++ b/test/CodeGen/X86/avx2-vperm.ll
@@ -4,13 +4,13 @@
define <8 x i32> @perm_cl_int_8x32(<8 x i32> %A) nounwind readnone {
; X32-LABEL: perm_cl_int_8x32:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vmovaps {{.*#+}} ymm1 = [0,7,2,1,2,7,6,0]
; X32-NEXT: vpermps %ymm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: perm_cl_int_8x32:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vmovaps {{.*#+}} ymm1 = [0,7,2,1,2,7,6,0]
; X64-NEXT: vpermps %ymm0, %ymm1, %ymm0
; X64-NEXT: retq
@@ -22,13 +22,13 @@ entry:
define <8 x float> @perm_cl_fp_8x32(<8 x float> %A) nounwind readnone {
; X32-LABEL: perm_cl_fp_8x32:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vmovaps {{.*#+}} ymm1 = <u,7,2,u,4,u,1,6>
; X32-NEXT: vpermps %ymm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: perm_cl_fp_8x32:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vmovaps {{.*#+}} ymm1 = <u,7,2,u,4,u,1,6>
; X64-NEXT: vpermps %ymm0, %ymm1, %ymm0
; X64-NEXT: retq
@@ -39,12 +39,12 @@ entry:
define <4 x i64> @perm_cl_int_4x64(<4 x i64> %A) nounwind readnone {
; X32-LABEL: perm_cl_int_4x64:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,1]
; X32-NEXT: retl
;
; X64-LABEL: perm_cl_int_4x64:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,1]
; X64-NEXT: retq
entry:
@@ -54,12 +54,12 @@ entry:
define <4 x double> @perm_cl_fp_4x64(<4 x double> %A) nounwind readnone {
; X32-LABEL: perm_cl_fp_4x64:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,1]
; X32-NEXT: retl
;
; X64-LABEL: perm_cl_fp_4x64:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,1]
; X64-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/avx512-adc-sbb.ll b/test/CodeGen/X86/avx512-adc-sbb.ll
index c994fdef691..bb21dea68df 100644
--- a/test/CodeGen/X86/avx512-adc-sbb.ll
+++ b/test/CodeGen/X86/avx512-adc-sbb.ll
@@ -6,7 +6,7 @@
define i8 @PR32316(i8 %t1, i32 %t5, i8 %t8) {
; CHECK-LABEL: PR32316:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: testb %dil, %dil
; CHECK-NEXT: sete %al
diff --git a/test/CodeGen/X86/avx512-any_extend_load.ll b/test/CodeGen/X86/avx512-any_extend_load.ll
index 57c033df8fd..de2ca2212d9 100644
--- a/test/CodeGen/X86/avx512-any_extend_load.ll
+++ b/test/CodeGen/X86/avx512-any_extend_load.ll
@@ -5,14 +5,14 @@
define void @any_extend_load_v8i64(<8 x i8> * %ptr) {
; KNL-LABEL: any_extend_load_v8i64:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovzxbq {{.*#+}} zmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero,mem[4],zero,zero,zero,zero,zero,zero,zero,mem[5],zero,zero,zero,zero,zero,zero,zero,mem[6],zero,zero,zero,zero,zero,zero,zero,mem[7],zero,zero,zero,zero,zero,zero,zero
; KNL-NEXT: vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; KNL-NEXT: vpmovqb %zmm0, (%rdi)
; KNL-NEXT: retq
;
; SKX-LABEL: any_extend_load_v8i64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxbq {{.*#+}} zmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero,mem[4],zero,zero,zero,zero,zero,zero,zero,mem[5],zero,zero,zero,zero,zero,zero,zero,mem[6],zero,zero,zero,zero,zero,zero,zero,mem[7],zero,zero,zero,zero,zero,zero,zero
; SKX-NEXT: vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; SKX-NEXT: vpmovqb %zmm0, (%rdi)
@@ -29,7 +29,7 @@ define void @any_extend_load_v8i64(<8 x i8> * %ptr) {
define void @any_extend_load_v8i32(<8 x i8> * %ptr) {
; KNL-LABEL: any_extend_load_v8i32:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; KNL-NEXT: vpaddw {{.*}}(%rip), %xmm0, %xmm0
; KNL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
@@ -37,7 +37,7 @@ define void @any_extend_load_v8i32(<8 x i8> * %ptr) {
; KNL-NEXT: retq
;
; SKX-LABEL: any_extend_load_v8i32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; SKX-NEXT: vpaddd {{.*}}(%rip){1to8}, %ymm0, %ymm0
; SKX-NEXT: vpmovdb %ymm0, (%rdi)
@@ -54,7 +54,7 @@ define void @any_extend_load_v8i32(<8 x i8> * %ptr) {
define void @any_extend_load_v8i16(<8 x i8> * %ptr) {
; KNL-LABEL: any_extend_load_v8i16:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; KNL-NEXT: vpaddb {{.*}}(%rip), %xmm0, %xmm0
; KNL-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
@@ -62,7 +62,7 @@ define void @any_extend_load_v8i16(<8 x i8> * %ptr) {
; KNL-NEXT: retq
;
; SKX-LABEL: any_extend_load_v8i16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; SKX-NEXT: vpaddw {{.*}}(%rip), %xmm0, %xmm0
; SKX-NEXT: vpmovwb %xmm0, (%rdi)
diff --git a/test/CodeGen/X86/avx512-arith.ll b/test/CodeGen/X86/avx512-arith.ll
index 51c3fb815c6..6862280fa9d 100644
--- a/test/CodeGen/X86/avx512-arith.ll
+++ b/test/CodeGen/X86/avx512-arith.ll
@@ -7,7 +7,7 @@
define <8 x double> @addpd512(<8 x double> %y, <8 x double> %x) {
; CHECK-LABEL: addpd512:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vaddpd %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
entry:
@@ -17,7 +17,7 @@ entry:
define <8 x double> @addpd512fold(<8 x double> %y) {
; CHECK-LABEL: addpd512fold:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vaddpd {{.*}}(%rip), %zmm0, %zmm0
; CHECK-NEXT: retq
entry:
@@ -27,7 +27,7 @@ entry:
define <16 x float> @addps512(<16 x float> %y, <16 x float> %x) {
; CHECK-LABEL: addps512:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vaddps %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
entry:
@@ -37,7 +37,7 @@ entry:
define <16 x float> @addps512fold(<16 x float> %y) {
; CHECK-LABEL: addps512fold:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vaddps {{.*}}(%rip), %zmm0, %zmm0
; CHECK-NEXT: retq
entry:
@@ -47,7 +47,7 @@ entry:
define <8 x double> @subpd512(<8 x double> %y, <8 x double> %x) {
; CHECK-LABEL: subpd512:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsubpd %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
entry:
@@ -57,7 +57,7 @@ entry:
define <8 x double> @subpd512fold(<8 x double> %y, <8 x double>* %x) {
; CHECK-LABEL: subpd512fold:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsubpd (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
entry:
@@ -68,7 +68,7 @@ entry:
define <16 x float> @subps512(<16 x float> %y, <16 x float> %x) {
; CHECK-LABEL: subps512:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsubps %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
entry:
@@ -78,7 +78,7 @@ entry:
define <16 x float> @subps512fold(<16 x float> %y, <16 x float>* %x) {
; CHECK-LABEL: subps512fold:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsubps (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
entry:
@@ -89,7 +89,7 @@ entry:
define <8 x i64> @imulq512(<8 x i64> %y, <8 x i64> %x) {
; AVX512F-LABEL: imulq512:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsrlq $32, %zmm1, %zmm2
; AVX512F-NEXT: vpmuludq %zmm0, %zmm2, %zmm2
; AVX512F-NEXT: vpsrlq $32, %zmm0, %zmm3
@@ -101,7 +101,7 @@ define <8 x i64> @imulq512(<8 x i64> %y, <8 x i64> %x) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: imulq512:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlq $32, %zmm1, %zmm2
; AVX512VL-NEXT: vpmuludq %zmm0, %zmm2, %zmm2
; AVX512VL-NEXT: vpsrlq $32, %zmm0, %zmm3
@@ -113,7 +113,7 @@ define <8 x i64> @imulq512(<8 x i64> %y, <8 x i64> %x) {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: imulq512:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsrlq $32, %zmm1, %zmm2
; AVX512BW-NEXT: vpmuludq %zmm0, %zmm2, %zmm2
; AVX512BW-NEXT: vpsrlq $32, %zmm0, %zmm3
@@ -125,12 +125,12 @@ define <8 x i64> @imulq512(<8 x i64> %y, <8 x i64> %x) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: imulq512:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpmullq %zmm0, %zmm1, %zmm0
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: imulq512:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmullq %zmm0, %zmm1, %zmm0
; SKX-NEXT: retq
%z = mul <8 x i64>%x, %y
@@ -139,7 +139,7 @@ define <8 x i64> @imulq512(<8 x i64> %y, <8 x i64> %x) {
define <4 x i64> @imulq256(<4 x i64> %y, <4 x i64> %x) {
; AVX512F-LABEL: imulq256:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsrlq $32, %ymm1, %ymm2
; AVX512F-NEXT: vpmuludq %ymm0, %ymm2, %ymm2
; AVX512F-NEXT: vpsrlq $32, %ymm0, %ymm3
@@ -151,7 +151,7 @@ define <4 x i64> @imulq256(<4 x i64> %y, <4 x i64> %x) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: imulq256:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlq $32, %ymm1, %ymm2
; AVX512VL-NEXT: vpmuludq %ymm0, %ymm2, %ymm2
; AVX512VL-NEXT: vpsrlq $32, %ymm0, %ymm3
@@ -163,7 +163,7 @@ define <4 x i64> @imulq256(<4 x i64> %y, <4 x i64> %x) {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: imulq256:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsrlq $32, %ymm1, %ymm2
; AVX512BW-NEXT: vpmuludq %ymm0, %ymm2, %ymm2
; AVX512BW-NEXT: vpsrlq $32, %ymm0, %ymm3
@@ -175,7 +175,7 @@ define <4 x i64> @imulq256(<4 x i64> %y, <4 x i64> %x) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: imulq256:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vpmullq %zmm0, %zmm1, %zmm0
@@ -183,7 +183,7 @@ define <4 x i64> @imulq256(<4 x i64> %y, <4 x i64> %x) {
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: imulq256:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmullq %ymm0, %ymm1, %ymm0
; SKX-NEXT: retq
%z = mul <4 x i64>%x, %y
@@ -192,7 +192,7 @@ define <4 x i64> @imulq256(<4 x i64> %y, <4 x i64> %x) {
define <2 x i64> @imulq128(<2 x i64> %y, <2 x i64> %x) {
; AVX512F-LABEL: imulq128:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsrlq $32, %xmm1, %xmm2
; AVX512F-NEXT: vpmuludq %xmm0, %xmm2, %xmm2
; AVX512F-NEXT: vpsrlq $32, %xmm0, %xmm3
@@ -204,7 +204,7 @@ define <2 x i64> @imulq128(<2 x i64> %y, <2 x i64> %x) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: imulq128:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlq $32, %xmm1, %xmm2
; AVX512VL-NEXT: vpmuludq %xmm0, %xmm2, %xmm2
; AVX512VL-NEXT: vpsrlq $32, %xmm0, %xmm3
@@ -216,7 +216,7 @@ define <2 x i64> @imulq128(<2 x i64> %y, <2 x i64> %x) {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: imulq128:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsrlq $32, %xmm1, %xmm2
; AVX512BW-NEXT: vpmuludq %xmm0, %xmm2, %xmm2
; AVX512BW-NEXT: vpsrlq $32, %xmm0, %xmm3
@@ -228,7 +228,7 @@ define <2 x i64> @imulq128(<2 x i64> %y, <2 x i64> %x) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: imulq128:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vpmullq %zmm0, %zmm1, %zmm0
@@ -237,7 +237,7 @@ define <2 x i64> @imulq128(<2 x i64> %y, <2 x i64> %x) {
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: imulq128:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmullq %xmm0, %xmm1, %xmm0
; SKX-NEXT: retq
%z = mul <2 x i64>%x, %y
@@ -246,7 +246,7 @@ define <2 x i64> @imulq128(<2 x i64> %y, <2 x i64> %x) {
define <8 x double> @mulpd512(<8 x double> %y, <8 x double> %x) {
; CHECK-LABEL: mulpd512:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmulpd %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
entry:
@@ -256,7 +256,7 @@ entry:
define <8 x double> @mulpd512fold(<8 x double> %y) {
; CHECK-LABEL: mulpd512fold:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmulpd {{.*}}(%rip), %zmm0, %zmm0
; CHECK-NEXT: retq
entry:
@@ -266,7 +266,7 @@ entry:
define <16 x float> @mulps512(<16 x float> %y, <16 x float> %x) {
; CHECK-LABEL: mulps512:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmulps %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
entry:
@@ -276,7 +276,7 @@ entry:
define <16 x float> @mulps512fold(<16 x float> %y) {
; CHECK-LABEL: mulps512fold:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmulps {{.*}}(%rip), %zmm0, %zmm0
; CHECK-NEXT: retq
entry:
@@ -286,7 +286,7 @@ entry:
define <8 x double> @divpd512(<8 x double> %y, <8 x double> %x) {
; CHECK-LABEL: divpd512:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vdivpd %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
entry:
@@ -296,7 +296,7 @@ entry:
define <8 x double> @divpd512fold(<8 x double> %y) {
; CHECK-LABEL: divpd512fold:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vdivpd {{.*}}(%rip), %zmm0, %zmm0
; CHECK-NEXT: retq
entry:
@@ -306,7 +306,7 @@ entry:
define <16 x float> @divps512(<16 x float> %y, <16 x float> %x) {
; CHECK-LABEL: divps512:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vdivps %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
entry:
@@ -316,7 +316,7 @@ entry:
define <16 x float> @divps512fold(<16 x float> %y) {
; CHECK-LABEL: divps512fold:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vdivps {{.*}}(%rip), %zmm0, %zmm0
; CHECK-NEXT: retq
entry:
@@ -326,7 +326,7 @@ entry:
define <8 x i64> @vpaddq_test(<8 x i64> %i, <8 x i64> %j) nounwind readnone {
; CHECK-LABEL: vpaddq_test:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpaddq %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%x = add <8 x i64> %i, %j
@@ -335,7 +335,7 @@ define <8 x i64> @vpaddq_test(<8 x i64> %i, <8 x i64> %j) nounwind readnone {
define <8 x i64> @vpaddq_fold_test(<8 x i64> %i, <8 x i64>* %j) nounwind {
; CHECK-LABEL: vpaddq_fold_test:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpaddq (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
%tmp = load <8 x i64>, <8 x i64>* %j, align 4
@@ -345,7 +345,7 @@ define <8 x i64> @vpaddq_fold_test(<8 x i64> %i, <8 x i64>* %j) nounwind {
define <8 x i64> @vpaddq_broadcast_test(<8 x i64> %i) nounwind {
; CHECK-LABEL: vpaddq_broadcast_test:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; CHECK-NEXT: retq
%x = add <8 x i64> %i, <i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2>
@@ -354,7 +354,7 @@ define <8 x i64> @vpaddq_broadcast_test(<8 x i64> %i) nounwind {
define <8 x i64> @vpaddq_broadcast2_test(<8 x i64> %i, i64* %j) nounwind {
; CHECK-LABEL: vpaddq_broadcast2_test:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpaddq (%rdi){1to8}, %zmm0, %zmm0
; CHECK-NEXT: retq
%tmp = load i64, i64* %j
@@ -372,7 +372,7 @@ define <8 x i64> @vpaddq_broadcast2_test(<8 x i64> %i, i64* %j) nounwind {
define <16 x i32> @vpaddd_test(<16 x i32> %i, <16 x i32> %j) nounwind readnone {
; CHECK-LABEL: vpaddd_test:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%x = add <16 x i32> %i, %j
@@ -381,7 +381,7 @@ define <16 x i32> @vpaddd_test(<16 x i32> %i, <16 x i32> %j) nounwind readnone {
define <16 x i32> @vpaddd_fold_test(<16 x i32> %i, <16 x i32>* %j) nounwind {
; CHECK-LABEL: vpaddd_fold_test:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpaddd (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
%tmp = load <16 x i32>, <16 x i32>* %j, align 4
@@ -391,7 +391,7 @@ define <16 x i32> @vpaddd_fold_test(<16 x i32> %i, <16 x i32>* %j) nounwind {
define <16 x i32> @vpaddd_broadcast_test(<16 x i32> %i) nounwind {
; CHECK-LABEL: vpaddd_broadcast_test:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0
; CHECK-NEXT: retq
%x = add <16 x i32> %i, <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
@@ -400,7 +400,7 @@ define <16 x i32> @vpaddd_broadcast_test(<16 x i32> %i) nounwind {
define <16 x i32> @vpaddd_mask_test(<16 x i32> %i, <16 x i32> %j, <16 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: vpaddd_mask_test:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpneqd %zmm3, %zmm2, %k1
; CHECK-NEXT: vpaddd %zmm1, %zmm0, %zmm0 {%k1}
@@ -413,7 +413,7 @@ define <16 x i32> @vpaddd_mask_test(<16 x i32> %i, <16 x i32> %j, <16 x i32> %ma
define <16 x i32> @vpaddd_maskz_test(<16 x i32> %i, <16 x i32> %j, <16 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: vpaddd_maskz_test:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpneqd %zmm3, %zmm2, %k1
; CHECK-NEXT: vpaddd %zmm1, %zmm0, %zmm0 {%k1} {z}
@@ -426,7 +426,7 @@ define <16 x i32> @vpaddd_maskz_test(<16 x i32> %i, <16 x i32> %j, <16 x i32> %m
define <16 x i32> @vpaddd_mask_fold_test(<16 x i32> %i, <16 x i32>* %j.ptr, <16 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: vpaddd_mask_fold_test:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpneqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpaddd (%rdi), %zmm0, %zmm0 {%k1}
@@ -440,7 +440,7 @@ define <16 x i32> @vpaddd_mask_fold_test(<16 x i32> %i, <16 x i32>* %j.ptr, <16
define <16 x i32> @vpaddd_mask_broadcast_test(<16 x i32> %i, <16 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: vpaddd_mask_broadcast_test:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpneqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 {%k1}
@@ -453,7 +453,7 @@ define <16 x i32> @vpaddd_mask_broadcast_test(<16 x i32> %i, <16 x i32> %mask1)
define <16 x i32> @vpaddd_maskz_fold_test(<16 x i32> %i, <16 x i32>* %j.ptr, <16 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: vpaddd_maskz_fold_test:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpneqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpaddd (%rdi), %zmm0, %zmm0 {%k1} {z}
@@ -467,7 +467,7 @@ define <16 x i32> @vpaddd_maskz_fold_test(<16 x i32> %i, <16 x i32>* %j.ptr, <16
define <16 x i32> @vpaddd_maskz_broadcast_test(<16 x i32> %i, <16 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: vpaddd_maskz_broadcast_test:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpneqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 {%k1} {z}
@@ -480,7 +480,7 @@ define <16 x i32> @vpaddd_maskz_broadcast_test(<16 x i32> %i, <16 x i32> %mask1)
define <8 x i64> @vpsubq_test(<8 x i64> %i, <8 x i64> %j) nounwind readnone {
; CHECK-LABEL: vpsubq_test:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsubq %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%x = sub <8 x i64> %i, %j
@@ -489,7 +489,7 @@ define <8 x i64> @vpsubq_test(<8 x i64> %i, <8 x i64> %j) nounwind readnone {
define <16 x i32> @vpsubd_test(<16 x i32> %i, <16 x i32> %j) nounwind readnone {
; CHECK-LABEL: vpsubd_test:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsubd %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%x = sub <16 x i32> %i, %j
@@ -498,7 +498,7 @@ define <16 x i32> @vpsubd_test(<16 x i32> %i, <16 x i32> %j) nounwind readnone {
define <16 x i32> @vpmulld_test(<16 x i32> %i, <16 x i32> %j) {
; CHECK-LABEL: vpmulld_test:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmulld %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%x = mul <16 x i32> %i, %j
@@ -508,7 +508,7 @@ define <16 x i32> @vpmulld_test(<16 x i32> %i, <16 x i32> %j) {
declare float @sqrtf(float) readnone
define float @sqrtA(float %a) nounwind uwtable readnone ssp {
; CHECK-LABEL: sqrtA:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsqrtss %xmm0, %xmm0, %xmm0
; CHECK-NEXT: retq
entry:
@@ -519,7 +519,7 @@ entry:
declare double @sqrt(double) readnone
define double @sqrtB(double %a) nounwind uwtable readnone ssp {
; CHECK-LABEL: sqrtB:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0
; CHECK-NEXT: retq
entry:
@@ -530,7 +530,7 @@ entry:
declare float @llvm.sqrt.f32(float)
define float @sqrtC(float %a) nounwind {
; CHECK-LABEL: sqrtC:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vsqrtss %xmm0, %xmm0, %xmm0
; CHECK-NEXT: retq
%b = call float @llvm.sqrt.f32(float %a)
@@ -540,7 +540,7 @@ define float @sqrtC(float %a) nounwind {
declare <16 x float> @llvm.sqrt.v16f32(<16 x float>)
define <16 x float> @sqrtD(<16 x float> %a) nounwind {
; CHECK-LABEL: sqrtD:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vsqrtps %zmm0, %zmm0
; CHECK-NEXT: retq
%b = call <16 x float> @llvm.sqrt.v16f32(<16 x float> %a)
@@ -550,7 +550,7 @@ define <16 x float> @sqrtD(<16 x float> %a) nounwind {
declare <8 x double> @llvm.sqrt.v8f64(<8 x double>)
define <8 x double> @sqrtE(<8 x double> %a) nounwind {
; CHECK-LABEL: sqrtE:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vsqrtpd %zmm0, %zmm0
; CHECK-NEXT: retq
%b = call <8 x double> @llvm.sqrt.v8f64(<8 x double> %a)
@@ -559,7 +559,7 @@ define <8 x double> @sqrtE(<8 x double> %a) nounwind {
define <16 x float> @fadd_broadcast(<16 x float> %a) nounwind {
; CHECK-LABEL: fadd_broadcast:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vaddps {{.*}}(%rip){1to16}, %zmm0, %zmm0
; CHECK-NEXT: retq
%b = fadd <16 x float> %a, <float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000>
@@ -568,7 +568,7 @@ define <16 x float> @fadd_broadcast(<16 x float> %a) nounwind {
define <8 x i64> @addq_broadcast(<8 x i64> %a) nounwind {
; CHECK-LABEL: addq_broadcast:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; CHECK-NEXT: retq
%b = add <8 x i64> %a, <i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2>
@@ -577,27 +577,27 @@ define <8 x i64> @addq_broadcast(<8 x i64> %a) nounwind {
define <8 x i64> @orq_broadcast(<8 x i64> %a) nounwind {
; AVX512F-LABEL: orq_broadcast:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vporq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: orq_broadcast:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vporq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: orq_broadcast:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vporq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: orq_broadcast:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vorpd {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: orq_broadcast:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vorpd {{.*}}(%rip){1to8}, %zmm0, %zmm0
; SKX-NEXT: retq
%b = or <8 x i64> %a, <i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2>
@@ -606,27 +606,27 @@ define <8 x i64> @orq_broadcast(<8 x i64> %a) nounwind {
define <16 x i32> @andd512fold(<16 x i32> %y, <16 x i32>* %x) {
; AVX512F-LABEL: andd512fold:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpandq (%rdi), %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: andd512fold:
-; AVX512VL: # BB#0: # %entry
+; AVX512VL: # %bb.0: # %entry
; AVX512VL-NEXT: vpandq (%rdi), %zmm0, %zmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: andd512fold:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpandq (%rdi), %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: andd512fold:
-; AVX512DQ: # BB#0: # %entry
+; AVX512DQ: # %bb.0: # %entry
; AVX512DQ-NEXT: vandps (%rdi), %zmm0, %zmm0
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: andd512fold:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vandps (%rdi), %zmm0, %zmm0
; SKX-NEXT: retq
entry:
@@ -637,27 +637,27 @@ entry:
define <8 x i64> @andqbrst(<8 x i64> %p1, i64* %ap) {
; AVX512F-LABEL: andqbrst:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpandq (%rdi){1to8}, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: andqbrst:
-; AVX512VL: # BB#0: # %entry
+; AVX512VL: # %bb.0: # %entry
; AVX512VL-NEXT: vpandq (%rdi){1to8}, %zmm0, %zmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: andqbrst:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpandq (%rdi){1to8}, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: andqbrst:
-; AVX512DQ: # BB#0: # %entry
+; AVX512DQ: # %bb.0: # %entry
; AVX512DQ-NEXT: vandpd (%rdi){1to8}, %zmm0, %zmm0
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: andqbrst:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vandpd (%rdi){1to8}, %zmm0, %zmm0
; SKX-NEXT: retq
entry:
@@ -670,7 +670,7 @@ entry:
define <16 x float> @test_mask_vaddps(<16 x float> %dst, <16 x float> %i,
; CHECK-LABEL: test_mask_vaddps:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; CHECK-NEXT: vaddps %zmm2, %zmm1, %zmm0 {%k1}
@@ -685,7 +685,7 @@ define <16 x float> @test_mask_vaddps(<16 x float> %dst, <16 x float> %i,
define <16 x float> @test_mask_vmulps(<16 x float> %dst, <16 x float> %i,
; CHECK-LABEL: test_mask_vmulps:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; CHECK-NEXT: vmulps %zmm2, %zmm1, %zmm0 {%k1}
@@ -700,7 +700,7 @@ define <16 x float> @test_mask_vmulps(<16 x float> %dst, <16 x float> %i,
define <16 x float> @test_mask_vminps(<16 x float> %dst, <16 x float> %i,
; CHECK-LABEL: test_mask_vminps:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; CHECK-NEXT: vminps %zmm2, %zmm1, %zmm0 {%k1}
@@ -716,7 +716,7 @@ define <16 x float> @test_mask_vminps(<16 x float> %dst, <16 x float> %i,
define <8 x double> @test_mask_vminpd(<8 x double> %dst, <8 x double> %i,
; AVX512F-LABEL: test_mask_vminpd:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: %ymm3<def> %ymm3<kill> %zmm3<def>
; AVX512F-NEXT: vpxor %xmm4, %xmm4, %xmm4
; AVX512F-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
@@ -724,14 +724,14 @@ define <8 x double> @test_mask_vminpd(<8 x double> %dst, <8 x double> %i,
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: test_mask_vminpd:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpxor %xmm4, %xmm4, %xmm4
; AVX512VL-NEXT: vpcmpneqd %ymm4, %ymm3, %k1
; AVX512VL-NEXT: vminpd %zmm2, %zmm1, %zmm0 {%k1}
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: test_mask_vminpd:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: # kill: %ymm3<def> %ymm3<kill> %zmm3<def>
; AVX512BW-NEXT: vpxor %xmm4, %xmm4, %xmm4
; AVX512BW-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
@@ -739,7 +739,7 @@ define <8 x double> @test_mask_vminpd(<8 x double> %dst, <8 x double> %i,
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_mask_vminpd:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %ymm3<def> %ymm3<kill> %zmm3<def>
; AVX512DQ-NEXT: vpxor %xmm4, %xmm4, %xmm4
; AVX512DQ-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
@@ -747,7 +747,7 @@ define <8 x double> @test_mask_vminpd(<8 x double> %dst, <8 x double> %i,
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: test_mask_vminpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4
; SKX-NEXT: vpcmpneqd %ymm4, %ymm3, %k1
; SKX-NEXT: vminpd %zmm2, %zmm1, %zmm0 {%k1}
@@ -763,7 +763,7 @@ define <8 x double> @test_mask_vminpd(<8 x double> %dst, <8 x double> %i,
define <16 x float> @test_mask_vmaxps(<16 x float> %dst, <16 x float> %i,
; CHECK-LABEL: test_mask_vmaxps:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; CHECK-NEXT: vmaxps %zmm2, %zmm1, %zmm0 {%k1}
@@ -779,7 +779,7 @@ define <16 x float> @test_mask_vmaxps(<16 x float> %dst, <16 x float> %i,
define <8 x double> @test_mask_vmaxpd(<8 x double> %dst, <8 x double> %i,
; AVX512F-LABEL: test_mask_vmaxpd:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: %ymm3<def> %ymm3<kill> %zmm3<def>
; AVX512F-NEXT: vpxor %xmm4, %xmm4, %xmm4
; AVX512F-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
@@ -787,14 +787,14 @@ define <8 x double> @test_mask_vmaxpd(<8 x double> %dst, <8 x double> %i,
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: test_mask_vmaxpd:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpxor %xmm4, %xmm4, %xmm4
; AVX512VL-NEXT: vpcmpneqd %ymm4, %ymm3, %k1
; AVX512VL-NEXT: vmaxpd %zmm2, %zmm1, %zmm0 {%k1}
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: test_mask_vmaxpd:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: # kill: %ymm3<def> %ymm3<kill> %zmm3<def>
; AVX512BW-NEXT: vpxor %xmm4, %xmm4, %xmm4
; AVX512BW-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
@@ -802,7 +802,7 @@ define <8 x double> @test_mask_vmaxpd(<8 x double> %dst, <8 x double> %i,
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_mask_vmaxpd:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %ymm3<def> %ymm3<kill> %zmm3<def>
; AVX512DQ-NEXT: vpxor %xmm4, %xmm4, %xmm4
; AVX512DQ-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
@@ -810,7 +810,7 @@ define <8 x double> @test_mask_vmaxpd(<8 x double> %dst, <8 x double> %i,
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: test_mask_vmaxpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4
; SKX-NEXT: vpcmpneqd %ymm4, %ymm3, %k1
; SKX-NEXT: vmaxpd %zmm2, %zmm1, %zmm0 {%k1}
@@ -826,7 +826,7 @@ define <8 x double> @test_mask_vmaxpd(<8 x double> %dst, <8 x double> %i,
define <16 x float> @test_mask_vsubps(<16 x float> %dst, <16 x float> %i,
; CHECK-LABEL: test_mask_vsubps:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; CHECK-NEXT: vsubps %zmm2, %zmm1, %zmm0 {%k1}
@@ -841,7 +841,7 @@ define <16 x float> @test_mask_vsubps(<16 x float> %dst, <16 x float> %i,
define <16 x float> @test_mask_vdivps(<16 x float> %dst, <16 x float> %i,
; CHECK-LABEL: test_mask_vdivps:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; CHECK-NEXT: vdivps %zmm2, %zmm1, %zmm0 {%k1}
@@ -856,7 +856,7 @@ define <16 x float> @test_mask_vdivps(<16 x float> %dst, <16 x float> %i,
define <8 x double> @test_mask_vaddpd(<8 x double> %dst, <8 x double> %i,
; CHECK-LABEL: test_mask_vaddpd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpneqq %zmm4, %zmm3, %k1
; CHECK-NEXT: vaddpd %zmm2, %zmm1, %zmm0 {%k1}
@@ -871,7 +871,7 @@ define <8 x double> @test_mask_vaddpd(<8 x double> %dst, <8 x double> %i,
define <8 x double> @test_maskz_vaddpd(<8 x double> %i, <8 x double> %j,
; CHECK-LABEL: test_maskz_vaddpd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpneqq %zmm3, %zmm2, %k1
; CHECK-NEXT: vaddpd %zmm1, %zmm0, %zmm0 {%k1} {z}
@@ -885,7 +885,7 @@ define <8 x double> @test_maskz_vaddpd(<8 x double> %i, <8 x double> %j,
define <8 x double> @test_mask_fold_vaddpd(<8 x double> %dst, <8 x double> %i,
; CHECK-LABEL: test_mask_fold_vaddpd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpneqq %zmm3, %zmm2, %k1
; CHECK-NEXT: vaddpd (%rdi), %zmm1, %zmm0 {%k1}
@@ -901,7 +901,7 @@ define <8 x double> @test_mask_fold_vaddpd(<8 x double> %dst, <8 x double> %i,
define <8 x double> @test_maskz_fold_vaddpd(<8 x double> %i, <8 x double>* %j,
; CHECK-LABEL: test_maskz_fold_vaddpd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpneqq %zmm2, %zmm1, %k1
; CHECK-NEXT: vaddpd (%rdi), %zmm0, %zmm0 {%k1} {z}
@@ -916,7 +916,7 @@ define <8 x double> @test_maskz_fold_vaddpd(<8 x double> %i, <8 x double>* %j,
define <8 x double> @test_broadcast_vaddpd(<8 x double> %i, double* %j) nounwind {
; CHECK-LABEL: test_broadcast_vaddpd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vaddpd (%rdi){1to8}, %zmm0, %zmm0
; CHECK-NEXT: retq
%tmp = load double, double* %j
@@ -929,7 +929,7 @@ define <8 x double> @test_broadcast_vaddpd(<8 x double> %i, double* %j) nounwind
define <8 x double> @test_mask_broadcast_vaddpd(<8 x double> %dst, <8 x double> %i,
; CHECK-LABEL: test_mask_broadcast_vaddpd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vpcmpneqq %zmm0, %zmm2, %k1
; CHECK-NEXT: vaddpd (%rdi){1to8}, %zmm1, %zmm1 {%k1}
@@ -948,7 +948,7 @@ define <8 x double> @test_mask_broadcast_vaddpd(<8 x double> %dst, <8 x double>
define <8 x double> @test_maskz_broadcast_vaddpd(<8 x double> %i, double* %j,
; CHECK-LABEL: test_maskz_broadcast_vaddpd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpneqq %zmm2, %zmm1, %k1
; CHECK-NEXT: vaddpd (%rdi){1to8}, %zmm0, %zmm0 {%k1} {z}
@@ -966,27 +966,27 @@ define <8 x double> @test_maskz_broadcast_vaddpd(<8 x double> %i, double* %j,
define <16 x float> @test_fxor(<16 x float> %a) {
; AVX512F-LABEL: test_fxor:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpxord {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: test_fxor:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpxord {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: test_fxor:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpxord {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_fxor:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vxorps {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: test_fxor:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vxorps {{.*}}(%rip){1to16}, %zmm0, %zmm0
; SKX-NEXT: retq
@@ -996,30 +996,30 @@ define <16 x float> @test_fxor(<16 x float> %a) {
define <8 x float> @test_fxor_8f32(<8 x float> %a) {
; AVX512F-LABEL: test_fxor_8f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vbroadcastss {{.*#+}} ymm1 = [-0,-0,-0,-0,-0,-0,-0,-0]
; AVX512F-NEXT: vxorps %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: test_fxor_8f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpxord {{.*}}(%rip){1to8}, %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: test_fxor_8f32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vbroadcastss {{.*#+}} ymm1 = [-0,-0,-0,-0,-0,-0,-0,-0]
; AVX512BW-NEXT: vxorps %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_fxor_8f32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vbroadcastss {{.*#+}} ymm1 = [-0,-0,-0,-0,-0,-0,-0,-0]
; AVX512DQ-NEXT: vxorps %ymm1, %ymm0, %ymm0
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: test_fxor_8f32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vxorps {{.*}}(%rip){1to8}, %ymm0, %ymm0
; SKX-NEXT: retq
%res = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %a
@@ -1028,27 +1028,27 @@ define <8 x float> @test_fxor_8f32(<8 x float> %a) {
define <8 x double> @fabs_v8f64(<8 x double> %p)
; AVX512F-LABEL: fabs_v8f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fabs_v8f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: fabs_v8f64:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: fabs_v8f64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vandpd {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: fabs_v8f64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vandpd {{.*}}(%rip){1to8}, %zmm0, %zmm0
; SKX-NEXT: retq
{
@@ -1059,27 +1059,27 @@ declare <8 x double> @llvm.fabs.v8f64(<8 x double> %p)
define <16 x float> @fabs_v16f32(<16 x float> %p)
; AVX512F-LABEL: fabs_v16f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpandd {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fabs_v16f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpandd {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: fabs_v16f32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpandd {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: fabs_v16f32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vandps {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: fabs_v16f32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vandps {{.*}}(%rip){1to16}, %zmm0, %zmm0
; SKX-NEXT: retq
{
diff --git a/test/CodeGen/X86/avx512-bugfix-23634.ll b/test/CodeGen/X86/avx512-bugfix-23634.ll
index acb3f121c8d..97356854da6 100644
--- a/test/CodeGen/X86/avx512-bugfix-23634.ll
+++ b/test/CodeGen/X86/avx512-bugfix-23634.ll
@@ -6,7 +6,7 @@ target triple = "x86_64-unknown-linux-gnu"
define void @f_fu(float* %ret, float* %aa, float %b) {
; CHECK-LABEL: f_fu:
-; CHECK: ## BB#0: ## %allocas
+; CHECK: ## %bb.0: ## %allocas
; CHECK-NEXT: vcvttss2si %xmm0, %eax
; CHECK-NEXT: vpbroadcastd %eax, %zmm0
; CHECK-NEXT: vcvttps2dq (%rsi), %zmm1
diff --git a/test/CodeGen/X86/avx512-bugfix-25270.ll b/test/CodeGen/X86/avx512-bugfix-25270.ll
index 47384fa9884..49c98bb5457 100644
--- a/test/CodeGen/X86/avx512-bugfix-25270.ll
+++ b/test/CodeGen/X86/avx512-bugfix-25270.ll
@@ -5,7 +5,7 @@ declare void @Print__512(<16 x i32>) #0
define void @bar__512(<16 x i32>* %var) #0 {
; CHECK-LABEL: bar__512:
-; CHECK: ## BB#0: ## %allocas
+; CHECK: ## %bb.0: ## %allocas
; CHECK-NEXT: pushq %rbx
; CHECK-NEXT: subq $112, %rsp
; CHECK-NEXT: movq %rdi, %rbx
diff --git a/test/CodeGen/X86/avx512-bugfix-26264.ll b/test/CodeGen/X86/avx512-bugfix-26264.ll
index b29b6ee0658..4d54fb71523 100644
--- a/test/CodeGen/X86/avx512-bugfix-26264.ll
+++ b/test/CodeGen/X86/avx512-bugfix-26264.ll
@@ -3,7 +3,7 @@
define <32 x double> @test_load_32f64(<32 x double>* %ptrs, <32 x i1> %mask, <32 x double> %src0) {
; AVX512BW-LABEL: test_load_32f64:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpsllw $7, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovb2m %zmm0, %k1
; AVX512BW-NEXT: vblendmpd (%rdi), %zmm1, %zmm0 {%k1}
@@ -21,7 +21,7 @@ define <32 x double> @test_load_32f64(<32 x double>* %ptrs, <32 x i1> %mask, <32
define <32 x i64> @test_load_32i64(<32 x i64>* %ptrs, <32 x i1> %mask, <32 x i64> %src0) {
; AVX512BW-LABEL: test_load_32i64:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpsllw $7, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovb2m %zmm0, %k1
; AVX512BW-NEXT: vpblendmq (%rdi), %zmm1, %zmm0 {%k1}
diff --git a/test/CodeGen/X86/avx512-build-vector.ll b/test/CodeGen/X86/avx512-build-vector.ll
index a79a053941d..9751a84b927 100644
--- a/test/CodeGen/X86/avx512-build-vector.ll
+++ b/test/CodeGen/X86/avx512-build-vector.ll
@@ -3,7 +3,7 @@
define <16 x i32> @test2(<16 x i32> %x) {
; CHECK-LABEL: test2:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
; CHECK-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
@@ -13,7 +13,7 @@ define <16 x i32> @test2(<16 x i32> %x) {
define <16 x float> @test3(<4 x float> %a) {
; CHECK-LABEL: test3:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; CHECK-NEXT: vmovaps {{.*#+}} zmm2 = [0,1,2,3,4,18,16,7,8,9,10,11,12,13,14,15]
; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
diff --git a/test/CodeGen/X86/avx512-calling-conv.ll b/test/CodeGen/X86/avx512-calling-conv.ll
index 60c454ae16c..e62367d2560 100644
--- a/test/CodeGen/X86/avx512-calling-conv.ll
+++ b/test/CodeGen/X86/avx512-calling-conv.ll
@@ -5,12 +5,12 @@
define <16 x i1> @test1() {
; ALL_X64-LABEL: test1:
-; ALL_X64: ## BB#0:
+; ALL_X64: ## %bb.0:
; ALL_X64-NEXT: vxorps %xmm0, %xmm0, %xmm0
; ALL_X64-NEXT: retq
;
; KNL_X32-LABEL: test1:
-; KNL_X32: ## BB#0:
+; KNL_X32: ## %bb.0:
; KNL_X32-NEXT: vxorps %xmm0, %xmm0, %xmm0
; KNL_X32-NEXT: retl
ret <16 x i1> zeroinitializer
@@ -18,7 +18,7 @@ define <16 x i1> @test1() {
define <16 x i1> @test2(<16 x i1>%a, <16 x i1>%b) {
; KNL-LABEL: test2:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpmovsxbd %xmm1, %zmm1
; KNL-NEXT: vpslld $31, %zmm1, %zmm1
; KNL-NEXT: vpmovsxbd %xmm0, %zmm0
@@ -30,7 +30,7 @@ define <16 x i1> @test2(<16 x i1>%a, <16 x i1>%b) {
; KNL-NEXT: retq
;
; SKX-LABEL: test2:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsllw $7, %xmm1, %xmm1
; SKX-NEXT: vpmovb2m %xmm1, %k0
; SKX-NEXT: vpsllw $7, %xmm0, %xmm0
@@ -40,7 +40,7 @@ define <16 x i1> @test2(<16 x i1>%a, <16 x i1>%b) {
; SKX-NEXT: retq
;
; KNL_X32-LABEL: test2:
-; KNL_X32: ## BB#0:
+; KNL_X32: ## %bb.0:
; KNL_X32-NEXT: vpmovsxbd %xmm1, %zmm1
; KNL_X32-NEXT: vpslld $31, %zmm1, %zmm1
; KNL_X32-NEXT: vpmovsxbd %xmm0, %zmm0
@@ -56,7 +56,7 @@ define <16 x i1> @test2(<16 x i1>%a, <16 x i1>%b) {
define <8 x i1> @test3(<8 x i1>%a, <8 x i1>%b) {
; KNL-LABEL: test3:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpmovsxwq %xmm1, %zmm1
; KNL-NEXT: vpsllq $63, %zmm1, %zmm1
; KNL-NEXT: vpmovsxwq %xmm0, %zmm0
@@ -68,7 +68,7 @@ define <8 x i1> @test3(<8 x i1>%a, <8 x i1>%b) {
; KNL-NEXT: retq
;
; SKX-LABEL: test3:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsllw $15, %xmm1, %xmm1
; SKX-NEXT: vpmovw2m %xmm1, %k0
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
@@ -78,7 +78,7 @@ define <8 x i1> @test3(<8 x i1>%a, <8 x i1>%b) {
; SKX-NEXT: retq
;
; KNL_X32-LABEL: test3:
-; KNL_X32: ## BB#0:
+; KNL_X32: ## %bb.0:
; KNL_X32-NEXT: vpmovsxwq %xmm1, %zmm1
; KNL_X32-NEXT: vpsllq $63, %zmm1, %zmm1
; KNL_X32-NEXT: vpmovsxwq %xmm0, %zmm0
@@ -94,12 +94,12 @@ define <8 x i1> @test3(<8 x i1>%a, <8 x i1>%b) {
define <4 x i1> @test4(<4 x i1>%a, <4 x i1>%b) {
; KNL-LABEL: test4:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vandps %xmm1, %xmm0, %xmm0
; KNL-NEXT: retq
;
; SKX-LABEL: test4:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpslld $31, %xmm1, %xmm1
; SKX-NEXT: vpslld $31, %xmm0, %xmm0
; SKX-NEXT: vptestmd %xmm0, %xmm0, %k1
@@ -108,7 +108,7 @@ define <4 x i1> @test4(<4 x i1>%a, <4 x i1>%b) {
; SKX-NEXT: retq
;
; KNL_X32-LABEL: test4:
-; KNL_X32: ## BB#0:
+; KNL_X32: ## %bb.0:
; KNL_X32-NEXT: vandps %xmm1, %xmm0, %xmm0
; KNL_X32-NEXT: retl
%c = and <4 x i1>%a, %b
@@ -119,7 +119,7 @@ declare <8 x i1> @func8xi1(<8 x i1> %a)
define <8 x i32> @test5(<8 x i32>%a, <8 x i32>%b) {
; KNL-LABEL: test5:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: pushq %rax
; KNL-NEXT: .cfi_def_cfa_offset 16
; KNL-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
@@ -133,7 +133,7 @@ define <8 x i32> @test5(<8 x i32>%a, <8 x i32>%b) {
; KNL-NEXT: retq
;
; SKX-LABEL: test5:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: pushq %rax
; SKX-NEXT: .cfi_def_cfa_offset 16
; SKX-NEXT: vpcmpgtd %ymm1, %ymm0, %k0
@@ -147,7 +147,7 @@ define <8 x i32> @test5(<8 x i32>%a, <8 x i32>%b) {
; SKX-NEXT: retq
;
; KNL_X32-LABEL: test5:
-; KNL_X32: ## BB#0:
+; KNL_X32: ## %bb.0:
; KNL_X32-NEXT: subl $12, %esp
; KNL_X32-NEXT: .cfi_def_cfa_offset 16
; KNL_X32-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
@@ -169,7 +169,7 @@ declare <16 x i1> @func16xi1(<16 x i1> %a)
define <16 x i32> @test6(<16 x i32>%a, <16 x i32>%b) {
; KNL-LABEL: test6:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: pushq %rax
; KNL-NEXT: .cfi_def_cfa_offset 16
; KNL-NEXT: vpcmpgtd %zmm1, %zmm0, %k1
@@ -183,7 +183,7 @@ define <16 x i32> @test6(<16 x i32>%a, <16 x i32>%b) {
; KNL-NEXT: retq
;
; SKX-LABEL: test6:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: pushq %rax
; SKX-NEXT: .cfi_def_cfa_offset 16
; SKX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
@@ -197,7 +197,7 @@ define <16 x i32> @test6(<16 x i32>%a, <16 x i32>%b) {
; SKX-NEXT: retq
;
; KNL_X32-LABEL: test6:
-; KNL_X32: ## BB#0:
+; KNL_X32: ## %bb.0:
; KNL_X32-NEXT: subl $12, %esp
; KNL_X32-NEXT: .cfi_def_cfa_offset 16
; KNL_X32-NEXT: vpcmpgtd %zmm1, %zmm0, %k1
@@ -219,7 +219,7 @@ declare <4 x i1> @func4xi1(<4 x i1> %a)
define <4 x i32> @test7(<4 x i32>%a, <4 x i32>%b) {
; KNL-LABEL: test7:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: pushq %rax
; KNL-NEXT: .cfi_def_cfa_offset 16
; KNL-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
@@ -230,7 +230,7 @@ define <4 x i32> @test7(<4 x i32>%a, <4 x i32>%b) {
; KNL-NEXT: retq
;
; SKX-LABEL: test7:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: pushq %rax
; SKX-NEXT: .cfi_def_cfa_offset 16
; SKX-NEXT: vpcmpgtd %xmm1, %xmm0, %k0
@@ -242,7 +242,7 @@ define <4 x i32> @test7(<4 x i32>%a, <4 x i32>%b) {
; SKX-NEXT: retq
;
; KNL_X32-LABEL: test7:
-; KNL_X32: ## BB#0:
+; KNL_X32: ## %bb.0:
; KNL_X32-NEXT: subl $12, %esp
; KNL_X32-NEXT: .cfi_def_cfa_offset 16
; KNL_X32-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
@@ -259,7 +259,7 @@ define <4 x i32> @test7(<4 x i32>%a, <4 x i32>%b) {
define <8 x i1> @test7a(<8 x i32>%a, <8 x i32>%b) {
; KNL-LABEL: test7a:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: pushq %rax
; KNL-NEXT: .cfi_def_cfa_offset 16
; KNL-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
@@ -277,7 +277,7 @@ define <8 x i1> @test7a(<8 x i32>%a, <8 x i32>%b) {
; KNL-NEXT: retq
;
; SKX-LABEL: test7a:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: pushq %rax
; SKX-NEXT: .cfi_def_cfa_offset 16
; SKX-NEXT: vpcmpgtd %ymm1, %ymm0, %k0
@@ -294,7 +294,7 @@ define <8 x i1> @test7a(<8 x i32>%a, <8 x i32>%b) {
; SKX-NEXT: retq
;
; KNL_X32-LABEL: test7a:
-; KNL_X32: ## BB#0:
+; KNL_X32: ## %bb.0:
; KNL_X32-NEXT: subl $12, %esp
; KNL_X32-NEXT: .cfi_def_cfa_offset 16
; KNL_X32-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
@@ -318,19 +318,19 @@ define <8 x i1> @test7a(<8 x i32>%a, <8 x i32>%b) {
define <16 x i8> @test8(<16 x i8> %a1, <16 x i8> %a2, i1 %cond) {
; ALL_X64-LABEL: test8:
-; ALL_X64: ## BB#0:
+; ALL_X64: ## %bb.0:
; ALL_X64-NEXT: testb $1, %dil
; ALL_X64-NEXT: jne LBB8_2
-; ALL_X64-NEXT: ## BB#1:
+; ALL_X64-NEXT: ## %bb.1:
; ALL_X64-NEXT: vmovaps %xmm1, %xmm0
; ALL_X64-NEXT: LBB8_2:
; ALL_X64-NEXT: retq
;
; KNL_X32-LABEL: test8:
-; KNL_X32: ## BB#0:
+; KNL_X32: ## %bb.0:
; KNL_X32-NEXT: testb $1, {{[0-9]+}}(%esp)
; KNL_X32-NEXT: jne LBB8_2
-; KNL_X32-NEXT: ## BB#1:
+; KNL_X32-NEXT: ## %bb.1:
; KNL_X32-NEXT: vmovaps %xmm1, %xmm0
; KNL_X32-NEXT: LBB8_2:
; KNL_X32-NEXT: retl
@@ -340,13 +340,13 @@ define <16 x i8> @test8(<16 x i8> %a1, <16 x i8> %a2, i1 %cond) {
define i1 @test9(double %a, double %b) {
; ALL_X64-LABEL: test9:
-; ALL_X64: ## BB#0:
+; ALL_X64: ## %bb.0:
; ALL_X64-NEXT: vucomisd %xmm0, %xmm1
; ALL_X64-NEXT: setb %al
; ALL_X64-NEXT: retq
;
; KNL_X32-LABEL: test9:
-; KNL_X32: ## BB#0:
+; KNL_X32: ## %bb.0:
; KNL_X32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; KNL_X32-NEXT: vucomisd {{[0-9]+}}(%esp), %xmm0
; KNL_X32-NEXT: setb %al
@@ -357,14 +357,14 @@ define i1 @test9(double %a, double %b) {
define i32 @test10(i32 %a, i32 %b, i1 %cond) {
; ALL_X64-LABEL: test10:
-; ALL_X64: ## BB#0:
+; ALL_X64: ## %bb.0:
; ALL_X64-NEXT: testb $1, %dl
; ALL_X64-NEXT: cmovel %esi, %edi
; ALL_X64-NEXT: movl %edi, %eax
; ALL_X64-NEXT: retq
;
; KNL_X32-LABEL: test10:
-; KNL_X32: ## BB#0:
+; KNL_X32: ## %bb.0:
; KNL_X32-NEXT: testb $1, {{[0-9]+}}(%esp)
; KNL_X32-NEXT: leal {{[0-9]+}}(%esp), %eax
; KNL_X32-NEXT: leal {{[0-9]+}}(%esp), %ecx
@@ -377,13 +377,13 @@ define i32 @test10(i32 %a, i32 %b, i1 %cond) {
define i1 @test11(i32 %a, i32 %b) {
; ALL_X64-LABEL: test11:
-; ALL_X64: ## BB#0:
+; ALL_X64: ## %bb.0:
; ALL_X64-NEXT: cmpl %esi, %edi
; ALL_X64-NEXT: setg %al
; ALL_X64-NEXT: retq
;
; KNL_X32-LABEL: test11:
-; KNL_X32: ## BB#0:
+; KNL_X32: ## %bb.0:
; KNL_X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; KNL_X32-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; KNL_X32-NEXT: setg %al
@@ -394,7 +394,7 @@ define i1 @test11(i32 %a, i32 %b) {
define i32 @test12(i32 %a1, i32 %a2, i32 %b1) {
; ALL_X64-LABEL: test12:
-; ALL_X64: ## BB#0:
+; ALL_X64: ## %bb.0:
; ALL_X64-NEXT: pushq %rbp
; ALL_X64-NEXT: .cfi_def_cfa_offset 16
; ALL_X64-NEXT: pushq %r14
@@ -422,7 +422,7 @@ define i32 @test12(i32 %a1, i32 %a2, i32 %b1) {
; ALL_X64-NEXT: retq
;
; KNL_X32-LABEL: test12:
-; KNL_X32: ## BB#0:
+; KNL_X32: ## %bb.0:
; KNL_X32-NEXT: pushl %ebx
; KNL_X32-NEXT: .cfi_def_cfa_offset 8
; KNL_X32-NEXT: pushl %edi
diff --git a/test/CodeGen/X86/avx512-cmp-kor-sequence.ll b/test/CodeGen/X86/avx512-cmp-kor-sequence.ll
index e8881294671..fd4c5d0cbd9 100644
--- a/test/CodeGen/X86/avx512-cmp-kor-sequence.ll
+++ b/test/CodeGen/X86/avx512-cmp-kor-sequence.ll
@@ -10,7 +10,7 @@ target triple = "x86_64-unknown-linux-gnu"
; Function Attrs: nounwind readnone uwtable
define zeroext i16 @cmp_kor_seq_16(<16 x float> %a, <16 x float> %b, <16 x float> %c, <16 x float> %d, <16 x float> %x) local_unnamed_addr #0 {
; CHECK-LABEL: cmp_kor_seq_16:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vcmpgeps %zmm4, %zmm0, %k0
; CHECK-NEXT: vcmpgeps %zmm4, %zmm1, %k1
; CHECK-NEXT: korw %k1, %k0, %k0
diff --git a/test/CodeGen/X86/avx512-cmp.ll b/test/CodeGen/X86/avx512-cmp.ll
index e75907a864a..f5b787de064 100644
--- a/test/CodeGen/X86/avx512-cmp.ll
+++ b/test/CodeGen/X86/avx512-cmp.ll
@@ -4,7 +4,7 @@
define double @test1(double %a, double %b) nounwind {
; ALL-LABEL: test1:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vucomisd %xmm1, %xmm0
; ALL-NEXT: jne LBB0_1
; ALL-NEXT: jnp LBB0_2
@@ -28,10 +28,10 @@ l2:
define float @test2(float %a, float %b) nounwind {
; ALL-LABEL: test2:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vucomiss %xmm0, %xmm1
; ALL-NEXT: jbe LBB1_2
-; ALL-NEXT: ## BB#1: ## %l1
+; ALL-NEXT: ## %bb.1: ## %l1
; ALL-NEXT: vsubss %xmm1, %xmm0, %xmm0
; ALL-NEXT: retq
; ALL-NEXT: LBB1_2: ## %l2
@@ -51,14 +51,14 @@ l2:
define i32 @test3(float %a, float %b) {
; KNL-LABEL: test3:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vcmpeqss %xmm1, %xmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
; KNL-NEXT: movzbl %al, %eax
; KNL-NEXT: retq
;
; SKX-LABEL: test3:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vcmpeqss %xmm1, %xmm0, %k0
; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: movzbl %al, %eax
@@ -71,12 +71,12 @@ define i32 @test3(float %a, float %b) {
define float @test5(float %p) #0 {
; ALL-LABEL: test5:
-; ALL: ## BB#0: ## %entry
+; ALL: ## %bb.0: ## %entry
; ALL-NEXT: vxorps %xmm1, %xmm1, %xmm1
; ALL-NEXT: vucomiss %xmm1, %xmm0
; ALL-NEXT: jne LBB3_1
; ALL-NEXT: jp LBB3_1
-; ALL-NEXT: ## BB#2: ## %return
+; ALL-NEXT: ## %bb.2: ## %return
; ALL-NEXT: retq
; ALL-NEXT: LBB3_1: ## %if.end
; ALL-NEXT: seta %al
@@ -100,7 +100,7 @@ return: ; preds = %if.end, %entry
define i32 @test6(i32 %a, i32 %b) {
; ALL-LABEL: test6:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: xorl %eax, %eax
; ALL-NEXT: cmpl %esi, %edi
; ALL-NEXT: sete %al
@@ -112,7 +112,7 @@ define i32 @test6(i32 %a, i32 %b) {
define i32 @test7(double %x, double %y) #2 {
; ALL-LABEL: test7:
-; ALL: ## BB#0: ## %entry
+; ALL: ## %bb.0: ## %entry
; ALL-NEXT: xorl %eax, %eax
; ALL-NEXT: vucomisd %xmm1, %xmm0
; ALL-NEXT: setne %al
@@ -125,7 +125,7 @@ entry:
define i32 @test8(i32 %a1, i32 %a2, i32 %a3) {
; ALL-LABEL: test8:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: notl %edi
; ALL-NEXT: xorl $-2147483648, %esi ## imm = 0x80000000
; ALL-NEXT: testl %edx, %edx
@@ -145,10 +145,10 @@ define i32 @test8(i32 %a1, i32 %a2, i32 %a3) {
define i32 @test9(i64 %a) {
; ALL-LABEL: test9:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: testb $1, %dil
; ALL-NEXT: jne LBB7_2
-; ALL-NEXT: ## BB#1: ## %A
+; ALL-NEXT: ## %bb.1: ## %A
; ALL-NEXT: movl $6, %eax
; ALL-NEXT: retq
; ALL-NEXT: LBB7_2: ## %B
@@ -165,7 +165,7 @@ B:
define i32 @test10(i64 %b, i64 %c, i1 %d) {
; ALL-LABEL: test10:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: movl %edx, %eax
; ALL-NEXT: andb $1, %al
; ALL-NEXT: cmpq %rsi, %rdi
@@ -174,7 +174,7 @@ define i32 @test10(i64 %b, i64 %c, i1 %d) {
; ALL-NEXT: andb $1, %cl
; ALL-NEXT: cmpb %cl, %al
; ALL-NEXT: je LBB8_1
-; ALL-NEXT: ## BB#2: ## %if.end.i
+; ALL-NEXT: ## %bb.2: ## %if.end.i
; ALL-NEXT: movl $6, %eax
; ALL-NEXT: retq
; ALL-NEXT: LBB8_1: ## %if.then.i
diff --git a/test/CodeGen/X86/avx512-cvt.ll b/test/CodeGen/X86/avx512-cvt.ll
index 9c17ea603ac..0487b560729 100644
--- a/test/CodeGen/X86/avx512-cvt.ll
+++ b/test/CodeGen/X86/avx512-cvt.ll
@@ -10,7 +10,7 @@
define <16 x float> @sitof32(<16 x i32> %a) nounwind {
; ALL-LABEL: sitof32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvtdq2ps %zmm0, %zmm0
; ALL-NEXT: retq
%b = sitofp <16 x i32> %a to <16 x float>
@@ -19,7 +19,7 @@ define <16 x float> @sitof32(<16 x i32> %a) nounwind {
define <8 x double> @sltof864(<8 x i64> %a) {
; NODQ-LABEL: sltof864:
-; NODQ: # BB#0:
+; NODQ: # %bb.0:
; NODQ-NEXT: vextracti32x4 $3, %zmm0, %xmm1
; NODQ-NEXT: vpextrq $1, %xmm1, %rax
; NODQ-NEXT: vcvtsi2sdq %rax, %xmm2, %xmm2
@@ -49,7 +49,7 @@ define <8 x double> @sltof864(<8 x i64> %a) {
; NODQ-NEXT: retq
;
; DQ-LABEL: sltof864:
-; DQ: # BB#0:
+; DQ: # %bb.0:
; DQ-NEXT: vcvtqq2pd %zmm0, %zmm0
; DQ-NEXT: retq
%b = sitofp <8 x i64> %a to <8 x double>
@@ -58,7 +58,7 @@ define <8 x double> @sltof864(<8 x i64> %a) {
define <4 x double> @slto4f64(<4 x i64> %a) {
; NODQ-LABEL: slto4f64:
-; NODQ: # BB#0:
+; NODQ: # %bb.0:
; NODQ-NEXT: vextracti128 $1, %ymm0, %xmm1
; NODQ-NEXT: vpextrq $1, %xmm1, %rax
; NODQ-NEXT: vcvtsi2sdq %rax, %xmm2, %xmm2
@@ -74,12 +74,12 @@ define <4 x double> @slto4f64(<4 x i64> %a) {
; NODQ-NEXT: retq
;
; VLDQ-LABEL: slto4f64:
-; VLDQ: # BB#0:
+; VLDQ: # %bb.0:
; VLDQ-NEXT: vcvtqq2pd %ymm0, %ymm0
; VLDQ-NEXT: retq
;
; AVX512DQ-LABEL: slto4f64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtqq2pd %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
@@ -90,7 +90,7 @@ define <4 x double> @slto4f64(<4 x i64> %a) {
define <2 x double> @slto2f64(<2 x i64> %a) {
; NODQ-LABEL: slto2f64:
-; NODQ: # BB#0:
+; NODQ: # %bb.0:
; NODQ-NEXT: vpextrq $1, %xmm0, %rax
; NODQ-NEXT: vcvtsi2sdq %rax, %xmm1, %xmm1
; NODQ-NEXT: vmovq %xmm0, %rax
@@ -99,12 +99,12 @@ define <2 x double> @slto2f64(<2 x i64> %a) {
; NODQ-NEXT: retq
;
; VLDQ-LABEL: slto2f64:
-; VLDQ: # BB#0:
+; VLDQ: # %bb.0:
; VLDQ-NEXT: vcvtqq2pd %xmm0, %xmm0
; VLDQ-NEXT: retq
;
; AVX512DQ-LABEL: slto2f64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtqq2pd %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -116,7 +116,7 @@ define <2 x double> @slto2f64(<2 x i64> %a) {
define <2 x float> @sltof2f32(<2 x i64> %a) {
; NODQ-LABEL: sltof2f32:
-; NODQ: # BB#0:
+; NODQ: # %bb.0:
; NODQ-NEXT: vpextrq $1, %xmm0, %rax
; NODQ-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; NODQ-NEXT: vmovq %xmm0, %rax
@@ -127,12 +127,12 @@ define <2 x float> @sltof2f32(<2 x i64> %a) {
; NODQ-NEXT: retq
;
; VLDQ-LABEL: sltof2f32:
-; VLDQ: # BB#0:
+; VLDQ: # %bb.0:
; VLDQ-NEXT: vcvtqq2ps %xmm0, %xmm0
; VLDQ-NEXT: retq
;
; AVX512DQ-LABEL: sltof2f32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -144,7 +144,7 @@ define <2 x float> @sltof2f32(<2 x i64> %a) {
define <4 x float> @slto4f32_mem(<4 x i64>* %a) {
; NODQ-LABEL: slto4f32_mem:
-; NODQ: # BB#0:
+; NODQ: # %bb.0:
; NODQ-NEXT: vmovdqu (%rdi), %ymm0
; NODQ-NEXT: vpextrq $1, %xmm0, %rax
; NODQ-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
@@ -162,12 +162,12 @@ define <4 x float> @slto4f32_mem(<4 x i64>* %a) {
; NODQ-NEXT: retq
;
; VLDQ-LABEL: slto4f32_mem:
-; VLDQ: # BB#0:
+; VLDQ: # %bb.0:
; VLDQ-NEXT: vcvtqq2psy (%rdi), %xmm0
; VLDQ-NEXT: retq
;
; AVX512DQ-LABEL: slto4f32_mem:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovups (%rdi), %ymm0
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -180,7 +180,7 @@ define <4 x float> @slto4f32_mem(<4 x i64>* %a) {
define <4 x i64> @f64to4sl(<4 x double> %a) {
; NODQ-LABEL: f64to4sl:
-; NODQ: # BB#0:
+; NODQ: # %bb.0:
; NODQ-NEXT: vextractf128 $1, %ymm0, %xmm1
; NODQ-NEXT: vcvttsd2si %xmm1, %rax
; NODQ-NEXT: vmovq %rax, %xmm2
@@ -198,12 +198,12 @@ define <4 x i64> @f64to4sl(<4 x double> %a) {
; NODQ-NEXT: retq
;
; VLDQ-LABEL: f64to4sl:
-; VLDQ: # BB#0:
+; VLDQ: # %bb.0:
; VLDQ-NEXT: vcvttpd2qq %ymm0, %ymm0
; VLDQ-NEXT: retq
;
; AVX512DQ-LABEL: f64to4sl:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvttpd2qq %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
@@ -214,7 +214,7 @@ define <4 x i64> @f64to4sl(<4 x double> %a) {
define <4 x i64> @f32to4sl(<4 x float> %a) {
; NODQ-LABEL: f32to4sl:
-; NODQ: # BB#0:
+; NODQ: # %bb.0:
; NODQ-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
; NODQ-NEXT: vcvttss2si %xmm1, %rax
; NODQ-NEXT: vmovq %rax, %xmm1
@@ -232,12 +232,12 @@ define <4 x i64> @f32to4sl(<4 x float> %a) {
; NODQ-NEXT: retq
;
; VLDQ-LABEL: f32to4sl:
-; VLDQ: # BB#0:
+; VLDQ: # %bb.0:
; VLDQ-NEXT: vcvttps2qq %xmm0, %ymm0
; VLDQ-NEXT: retq
;
; AVX512DQ-LABEL: f32to4sl:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512DQ-NEXT: vcvttps2qq %ymm0, %zmm0
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
@@ -248,7 +248,7 @@ define <4 x i64> @f32to4sl(<4 x float> %a) {
define <4 x float> @slto4f32(<4 x i64> %a) {
; NODQ-LABEL: slto4f32:
-; NODQ: # BB#0:
+; NODQ: # %bb.0:
; NODQ-NEXT: vpextrq $1, %xmm0, %rax
; NODQ-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; NODQ-NEXT: vmovq %xmm0, %rax
@@ -265,13 +265,13 @@ define <4 x float> @slto4f32(<4 x i64> %a) {
; NODQ-NEXT: retq
;
; VLDQ-LABEL: slto4f32:
-; VLDQ: # BB#0:
+; VLDQ: # %bb.0:
; VLDQ-NEXT: vcvtqq2ps %ymm0, %xmm0
; VLDQ-NEXT: vzeroupper
; VLDQ-NEXT: retq
;
; AVX512DQ-LABEL: slto4f32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -283,7 +283,7 @@ define <4 x float> @slto4f32(<4 x i64> %a) {
define <4 x float> @ulto4f32(<4 x i64> %a) {
; NODQ-LABEL: ulto4f32:
-; NODQ: # BB#0:
+; NODQ: # %bb.0:
; NODQ-NEXT: vpextrq $1, %xmm0, %rax
; NODQ-NEXT: vcvtusi2ssq %rax, %xmm1, %xmm1
; NODQ-NEXT: vmovq %xmm0, %rax
@@ -300,13 +300,13 @@ define <4 x float> @ulto4f32(<4 x i64> %a) {
; NODQ-NEXT: retq
;
; VLDQ-LABEL: ulto4f32:
-; VLDQ: # BB#0:
+; VLDQ: # %bb.0:
; VLDQ-NEXT: vcvtuqq2ps %ymm0, %xmm0
; VLDQ-NEXT: vzeroupper
; VLDQ-NEXT: retq
;
; AVX512DQ-LABEL: ulto4f32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -318,7 +318,7 @@ define <4 x float> @ulto4f32(<4 x i64> %a) {
define <8 x double> @ulto8f64(<8 x i64> %a) {
; NODQ-LABEL: ulto8f64:
-; NODQ: # BB#0:
+; NODQ: # %bb.0:
; NODQ-NEXT: vextracti32x4 $3, %zmm0, %xmm1
; NODQ-NEXT: vpextrq $1, %xmm1, %rax
; NODQ-NEXT: vcvtusi2sdq %rax, %xmm2, %xmm2
@@ -348,7 +348,7 @@ define <8 x double> @ulto8f64(<8 x i64> %a) {
; NODQ-NEXT: retq
;
; DQ-LABEL: ulto8f64:
-; DQ: # BB#0:
+; DQ: # %bb.0:
; DQ-NEXT: vcvtuqq2pd %zmm0, %zmm0
; DQ-NEXT: retq
%b = uitofp <8 x i64> %a to <8 x double>
@@ -357,7 +357,7 @@ define <8 x double> @ulto8f64(<8 x i64> %a) {
define <16 x double> @ulto16f64(<16 x i64> %a) {
; NODQ-LABEL: ulto16f64:
-; NODQ: # BB#0:
+; NODQ: # %bb.0:
; NODQ-NEXT: vextracti32x4 $3, %zmm0, %xmm2
; NODQ-NEXT: vpextrq $1, %xmm2, %rax
; NODQ-NEXT: vcvtusi2sdq %rax, %xmm3, %xmm3
@@ -413,7 +413,7 @@ define <16 x double> @ulto16f64(<16 x i64> %a) {
; NODQ-NEXT: retq
;
; DQ-LABEL: ulto16f64:
-; DQ: # BB#0:
+; DQ: # %bb.0:
; DQ-NEXT: vcvtuqq2pd %zmm0, %zmm0
; DQ-NEXT: vcvtuqq2pd %zmm1, %zmm1
; DQ-NEXT: retq
@@ -423,7 +423,7 @@ define <16 x double> @ulto16f64(<16 x i64> %a) {
define <16 x i32> @f64to16si(<16 x float> %a) nounwind {
; ALL-LABEL: f64to16si:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvttps2dq %zmm0, %zmm0
; ALL-NEXT: retq
%b = fptosi <16 x float> %a to <16 x i32>
@@ -432,7 +432,7 @@ define <16 x i32> @f64to16si(<16 x float> %a) nounwind {
define <16 x i8> @f32to16sc(<16 x float> %f) {
; ALL-LABEL: f32to16sc:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvttps2dq %zmm0, %zmm0
; ALL-NEXT: vpmovdb %zmm0, %xmm0
; ALL-NEXT: vzeroupper
@@ -443,7 +443,7 @@ define <16 x i8> @f32to16sc(<16 x float> %f) {
define <16 x i16> @f32to16ss(<16 x float> %f) {
; ALL-LABEL: f32to16ss:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvttps2dq %zmm0, %zmm0
; ALL-NEXT: vpmovdw %zmm0, %ymm0
; ALL-NEXT: retq
@@ -453,7 +453,7 @@ define <16 x i16> @f32to16ss(<16 x float> %f) {
define <16 x i32> @f32to16ui(<16 x float> %a) nounwind {
; ALL-LABEL: f32to16ui:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvttps2udq %zmm0, %zmm0
; ALL-NEXT: retq
%b = fptoui <16 x float> %a to <16 x i32>
@@ -462,7 +462,7 @@ define <16 x i32> @f32to16ui(<16 x float> %a) nounwind {
define <16 x i8> @f32to16uc(<16 x float> %f) {
; ALL-LABEL: f32to16uc:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvttps2dq %zmm0, %zmm0
; ALL-NEXT: vpmovdb %zmm0, %xmm0
; ALL-NEXT: vzeroupper
@@ -473,7 +473,7 @@ define <16 x i8> @f32to16uc(<16 x float> %f) {
define <16 x i16> @f32to16us(<16 x float> %f) {
; ALL-LABEL: f32to16us:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvttps2dq %zmm0, %zmm0
; ALL-NEXT: vpmovdw %zmm0, %ymm0
; ALL-NEXT: retq
@@ -483,14 +483,14 @@ define <16 x i16> @f32to16us(<16 x float> %f) {
define <8 x i32> @f32to8ui(<8 x float> %a) nounwind {
; NOVL-LABEL: f32to8ui:
-; NOVL: # BB#0:
+; NOVL: # %bb.0:
; NOVL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NOVL-NEXT: vcvttps2udq %zmm0, %zmm0
; NOVL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; NOVL-NEXT: retq
;
; VL-LABEL: f32to8ui:
-; VL: # BB#0:
+; VL: # %bb.0:
; VL-NEXT: vcvttps2udq %ymm0, %ymm0
; VL-NEXT: retq
%b = fptoui <8 x float> %a to <8 x i32>
@@ -499,7 +499,7 @@ define <8 x i32> @f32to8ui(<8 x float> %a) nounwind {
define <4 x i32> @f32to4ui(<4 x float> %a) nounwind {
; NOVL-LABEL: f32to4ui:
-; NOVL: # BB#0:
+; NOVL: # %bb.0:
; NOVL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; NOVL-NEXT: vcvttps2udq %zmm0, %zmm0
; NOVL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -507,7 +507,7 @@ define <4 x i32> @f32to4ui(<4 x float> %a) nounwind {
; NOVL-NEXT: retq
;
; VL-LABEL: f32to4ui:
-; VL: # BB#0:
+; VL: # %bb.0:
; VL-NEXT: vcvttps2udq %xmm0, %xmm0
; VL-NEXT: retq
%b = fptoui <4 x float> %a to <4 x i32>
@@ -516,7 +516,7 @@ define <4 x i32> @f32to4ui(<4 x float> %a) nounwind {
define <8 x i32> @f64to8ui(<8 x double> %a) nounwind {
; ALL-LABEL: f64to8ui:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvttpd2udq %zmm0, %ymm0
; ALL-NEXT: retq
%b = fptoui <8 x double> %a to <8 x i32>
@@ -525,7 +525,7 @@ define <8 x i32> @f64to8ui(<8 x double> %a) nounwind {
define <8 x i16> @f64to8us(<8 x double> %f) {
; NOVL-LABEL: f64to8us:
-; NOVL: # BB#0:
+; NOVL: # %bb.0:
; NOVL-NEXT: vcvttpd2dq %zmm0, %ymm0
; NOVL-NEXT: vpmovdw %zmm0, %ymm0
; NOVL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -533,7 +533,7 @@ define <8 x i16> @f64to8us(<8 x double> %f) {
; NOVL-NEXT: retq
;
; VL-LABEL: f64to8us:
-; VL: # BB#0:
+; VL: # %bb.0:
; VL-NEXT: vcvttpd2dq %zmm0, %ymm0
; VL-NEXT: vpmovdw %ymm0, %xmm0
; VL-NEXT: vzeroupper
@@ -544,7 +544,7 @@ define <8 x i16> @f64to8us(<8 x double> %f) {
define <8 x i8> @f64to8uc(<8 x double> %f) {
; NOVL-LABEL: f64to8uc:
-; NOVL: # BB#0:
+; NOVL: # %bb.0:
; NOVL-NEXT: vcvttpd2dq %zmm0, %ymm0
; NOVL-NEXT: vpmovdw %zmm0, %ymm0
; NOVL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -552,7 +552,7 @@ define <8 x i8> @f64to8uc(<8 x double> %f) {
; NOVL-NEXT: retq
;
; VL-LABEL: f64to8uc:
-; VL: # BB#0:
+; VL: # %bb.0:
; VL-NEXT: vcvttpd2dq %zmm0, %ymm0
; VL-NEXT: vpmovdw %ymm0, %xmm0
; VL-NEXT: vzeroupper
@@ -563,7 +563,7 @@ define <8 x i8> @f64to8uc(<8 x double> %f) {
define <4 x i32> @f64to4ui(<4 x double> %a) nounwind {
; NOVL-LABEL: f64to4ui:
-; NOVL: # BB#0:
+; NOVL: # %bb.0:
; NOVL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NOVL-NEXT: vcvttpd2udq %zmm0, %ymm0
; NOVL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -571,7 +571,7 @@ define <4 x i32> @f64to4ui(<4 x double> %a) nounwind {
; NOVL-NEXT: retq
;
; VL-LABEL: f64to4ui:
-; VL: # BB#0:
+; VL: # %bb.0:
; VL-NEXT: vcvttpd2udq %ymm0, %xmm0
; VL-NEXT: vzeroupper
; VL-NEXT: retq
@@ -581,7 +581,7 @@ define <4 x i32> @f64to4ui(<4 x double> %a) nounwind {
define <8 x double> @sito8f64(<8 x i32> %a) {
; ALL-LABEL: sito8f64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvtdq2pd %ymm0, %zmm0
; ALL-NEXT: retq
%b = sitofp <8 x i32> %a to <8 x double>
@@ -589,31 +589,31 @@ define <8 x double> @sito8f64(<8 x i32> %a) {
}
define <8 x double> @i32to8f64_mask(<8 x double> %a, <8 x i32> %b, i8 %c) nounwind {
; KNL-LABEL: i32to8f64_mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vcvtdq2pd %ymm1, %zmm0 {%k1}
; KNL-NEXT: retq
;
; VLBW-LABEL: i32to8f64_mask:
-; VLBW: # BB#0:
+; VLBW: # %bb.0:
; VLBW-NEXT: kmovd %edi, %k1
; VLBW-NEXT: vcvtdq2pd %ymm1, %zmm0 {%k1}
; VLBW-NEXT: retq
;
; VLNOBW-LABEL: i32to8f64_mask:
-; VLNOBW: # BB#0:
+; VLNOBW: # %bb.0:
; VLNOBW-NEXT: kmovw %edi, %k1
; VLNOBW-NEXT: vcvtdq2pd %ymm1, %zmm0 {%k1}
; VLNOBW-NEXT: retq
;
; AVX512DQ-LABEL: i32to8f64_mask:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: kmovw %edi, %k1
; AVX512DQ-NEXT: vcvtdq2pd %ymm1, %zmm0 {%k1}
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: i32to8f64_mask:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vcvtdq2pd %ymm1, %zmm0 {%k1}
; AVX512BW-NEXT: retq
@@ -624,31 +624,31 @@ define <8 x double> @i32to8f64_mask(<8 x double> %a, <8 x i32> %b, i8 %c) nounwi
}
define <8 x double> @sito8f64_maskz(<8 x i32> %a, i8 %b) nounwind {
; KNL-LABEL: sito8f64_maskz:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vcvtdq2pd %ymm0, %zmm0 {%k1} {z}
; KNL-NEXT: retq
;
; VLBW-LABEL: sito8f64_maskz:
-; VLBW: # BB#0:
+; VLBW: # %bb.0:
; VLBW-NEXT: kmovd %edi, %k1
; VLBW-NEXT: vcvtdq2pd %ymm0, %zmm0 {%k1} {z}
; VLBW-NEXT: retq
;
; VLNOBW-LABEL: sito8f64_maskz:
-; VLNOBW: # BB#0:
+; VLNOBW: # %bb.0:
; VLNOBW-NEXT: kmovw %edi, %k1
; VLNOBW-NEXT: vcvtdq2pd %ymm0, %zmm0 {%k1} {z}
; VLNOBW-NEXT: retq
;
; AVX512DQ-LABEL: sito8f64_maskz:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: kmovw %edi, %k1
; AVX512DQ-NEXT: vcvtdq2pd %ymm0, %zmm0 {%k1} {z}
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: sito8f64_maskz:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vcvtdq2pd %ymm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: retq
@@ -660,7 +660,7 @@ define <8 x double> @sito8f64_maskz(<8 x i32> %a, i8 %b) nounwind {
define <8 x i32> @f64to8si(<8 x double> %a) {
; ALL-LABEL: f64to8si:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvttpd2dq %zmm0, %ymm0
; ALL-NEXT: retq
%b = fptosi <8 x double> %a to <8 x i32>
@@ -669,7 +669,7 @@ define <8 x i32> @f64to8si(<8 x double> %a) {
define <4 x i32> @f64to4si(<4 x double> %a) {
; ALL-LABEL: f64to4si:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvttpd2dq %ymm0, %xmm0
; ALL-NEXT: vzeroupper
; ALL-NEXT: retq
@@ -679,7 +679,7 @@ define <4 x i32> @f64to4si(<4 x double> %a) {
define <16 x float> @f64to16f32(<16 x double> %b) nounwind {
; ALL-LABEL: f64to16f32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvtpd2ps %zmm0, %ymm0
; ALL-NEXT: vcvtpd2ps %zmm1, %ymm1
; ALL-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
@@ -690,7 +690,7 @@ define <16 x float> @f64to16f32(<16 x double> %b) nounwind {
define <4 x float> @f64to4f32(<4 x double> %b) {
; ALL-LABEL: f64to4f32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvtpd2ps %ymm0, %xmm0
; ALL-NEXT: vzeroupper
; ALL-NEXT: retq
@@ -700,7 +700,7 @@ define <4 x float> @f64to4f32(<4 x double> %b) {
define <4 x float> @f64to4f32_mask(<4 x double> %b, <4 x i1> %mask) {
; NOVL-LABEL: f64to4f32_mask:
-; NOVL: # BB#0:
+; NOVL: # %bb.0:
; NOVL-NEXT: vpslld $31, %xmm1, %xmm1
; NOVL-NEXT: vpsrad $31, %xmm1, %xmm1
; NOVL-NEXT: vcvtpd2ps %ymm0, %xmm0
@@ -709,7 +709,7 @@ define <4 x float> @f64to4f32_mask(<4 x double> %b, <4 x i1> %mask) {
; NOVL-NEXT: retq
;
; VL-LABEL: f64to4f32_mask:
-; VL: # BB#0:
+; VL: # %bb.0:
; VL-NEXT: vpslld $31, %xmm1, %xmm1
; VL-NEXT: vptestmd %xmm1, %xmm1, %k1
; VL-NEXT: vcvtpd2ps %ymm0, %xmm0 {%k1} {z}
@@ -722,7 +722,7 @@ define <4 x float> @f64to4f32_mask(<4 x double> %b, <4 x i1> %mask) {
define <4 x float> @f64tof32_inreg(<2 x double> %a0, <4 x float> %a1) nounwind {
; ALL-LABEL: f64tof32_inreg:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvtsd2ss %xmm0, %xmm1, %xmm0
; ALL-NEXT: retq
%ext = extractelement <2 x double> %a0, i32 0
@@ -733,7 +733,7 @@ define <4 x float> @f64tof32_inreg(<2 x double> %a0, <4 x float> %a1) nounwind {
define <8 x double> @f32to8f64(<8 x float> %b) nounwind {
; ALL-LABEL: f32to8f64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvtps2pd %ymm0, %zmm0
; ALL-NEXT: retq
%a = fpext <8 x float> %b to <8 x double>
@@ -742,14 +742,14 @@ define <8 x double> @f32to8f64(<8 x float> %b) nounwind {
define <4 x double> @f32to4f64_mask(<4 x float> %b, <4 x double> %b1, <4 x double> %a1) {
; NOVL-LABEL: f32to4f64_mask:
-; NOVL: # BB#0:
+; NOVL: # %bb.0:
; NOVL-NEXT: vcvtps2pd %xmm0, %ymm0
; NOVL-NEXT: vcmpltpd %ymm2, %ymm1, %ymm1
; NOVL-NEXT: vandpd %ymm0, %ymm1, %ymm0
; NOVL-NEXT: retq
;
; VL-LABEL: f32to4f64_mask:
-; VL: # BB#0:
+; VL: # %bb.0:
; VL-NEXT: vcmpltpd %ymm2, %ymm1, %k1
; VL-NEXT: vcvtps2pd %xmm0, %ymm0 {%k1} {z}
; VL-NEXT: retq
@@ -761,7 +761,7 @@ define <4 x double> @f32to4f64_mask(<4 x float> %b, <4 x double> %b1, <4 x doubl
define <2 x double> @f32tof64_inreg(<2 x double> %a0, <4 x float> %a1) nounwind {
; ALL-LABEL: f32tof64_inreg:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvtss2sd %xmm1, %xmm0, %xmm0
; ALL-NEXT: retq
%ext = extractelement <4 x float> %a1, i32 0
@@ -772,7 +772,7 @@ define <2 x double> @f32tof64_inreg(<2 x double> %a0, <4 x float> %a1) nounwind
define double @sltof64_load(i64* nocapture %e) {
; ALL-LABEL: sltof64_load:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: vcvtsi2sdq (%rdi), %xmm0, %xmm0
; ALL-NEXT: retq
entry:
@@ -783,7 +783,7 @@ entry:
define double @sitof64_load(i32* %e) {
; ALL-LABEL: sitof64_load:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: vcvtsi2sdl (%rdi), %xmm0, %xmm0
; ALL-NEXT: retq
entry:
@@ -794,7 +794,7 @@ entry:
define float @sitof32_load(i32* %e) {
; ALL-LABEL: sitof32_load:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: vcvtsi2ssl (%rdi), %xmm0, %xmm0
; ALL-NEXT: retq
entry:
@@ -805,7 +805,7 @@ entry:
define float @sltof32_load(i64* %e) {
; ALL-LABEL: sltof32_load:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: vcvtsi2ssq (%rdi), %xmm0, %xmm0
; ALL-NEXT: retq
entry:
@@ -816,7 +816,7 @@ entry:
define void @f32tof64_loadstore() {
; ALL-LABEL: f32tof64_loadstore:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; ALL-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
; ALL-NEXT: vmovsd %xmm0, -{{[0-9]+}}(%rsp)
@@ -832,7 +832,7 @@ entry:
define void @f64tof32_loadstore() nounwind uwtable {
; ALL-LABEL: f64tof32_loadstore:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; ALL-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0
; ALL-NEXT: vmovss %xmm0, -{{[0-9]+}}(%rsp)
@@ -848,7 +848,7 @@ entry:
define double @long_to_double(i64 %x) {
; ALL-LABEL: long_to_double:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovq %rdi, %xmm0
; ALL-NEXT: retq
%res = bitcast i64 %x to double
@@ -857,7 +857,7 @@ define double @long_to_double(i64 %x) {
define i64 @double_to_long(double %x) {
; ALL-LABEL: double_to_long:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovq %xmm0, %rax
; ALL-NEXT: retq
%res = bitcast double %x to i64
@@ -866,7 +866,7 @@ define i64 @double_to_long(double %x) {
define float @int_to_float(i32 %x) {
; ALL-LABEL: int_to_float:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovd %edi, %xmm0
; ALL-NEXT: retq
%res = bitcast i32 %x to float
@@ -875,7 +875,7 @@ define float @int_to_float(i32 %x) {
define i32 @float_to_int(float %x) {
; ALL-LABEL: float_to_int:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovd %xmm0, %eax
; ALL-NEXT: retq
%res = bitcast float %x to i32
@@ -884,7 +884,7 @@ define i32 @float_to_int(float %x) {
define <16 x double> @uito16f64(<16 x i32> %a) nounwind {
; ALL-LABEL: uito16f64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvtudq2pd %ymm0, %zmm2
; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm0
; ALL-NEXT: vcvtudq2pd %ymm0, %zmm1
@@ -896,7 +896,7 @@ define <16 x double> @uito16f64(<16 x i32> %a) nounwind {
define <8 x float> @slto8f32(<8 x i64> %a) {
; NODQ-LABEL: slto8f32:
-; NODQ: # BB#0:
+; NODQ: # %bb.0:
; NODQ-NEXT: vextracti32x4 $2, %zmm0, %xmm1
; NODQ-NEXT: vpextrq $1, %xmm1, %rax
; NODQ-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm2
@@ -926,7 +926,7 @@ define <8 x float> @slto8f32(<8 x i64> %a) {
; NODQ-NEXT: retq
;
; DQ-LABEL: slto8f32:
-; DQ: # BB#0:
+; DQ: # %bb.0:
; DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
; DQ-NEXT: retq
%b = sitofp <8 x i64> %a to <8 x float>
@@ -935,7 +935,7 @@ define <8 x float> @slto8f32(<8 x i64> %a) {
define <16 x float> @slto16f32(<16 x i64> %a) {
; NODQ-LABEL: slto16f32:
-; NODQ: # BB#0:
+; NODQ: # %bb.0:
; NODQ-NEXT: vextracti32x4 $2, %zmm1, %xmm2
; NODQ-NEXT: vpextrq $1, %xmm2, %rax
; NODQ-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm3
@@ -992,7 +992,7 @@ define <16 x float> @slto16f32(<16 x i64> %a) {
; NODQ-NEXT: retq
;
; DQ-LABEL: slto16f32:
-; DQ: # BB#0:
+; DQ: # %bb.0:
; DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
; DQ-NEXT: vcvtqq2ps %zmm1, %ymm1
; DQ-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
@@ -1003,7 +1003,7 @@ define <16 x float> @slto16f32(<16 x i64> %a) {
define <8 x double> @slto8f64(<8 x i64> %a) {
; NODQ-LABEL: slto8f64:
-; NODQ: # BB#0:
+; NODQ: # %bb.0:
; NODQ-NEXT: vextracti32x4 $3, %zmm0, %xmm1
; NODQ-NEXT: vpextrq $1, %xmm1, %rax
; NODQ-NEXT: vcvtsi2sdq %rax, %xmm2, %xmm2
@@ -1033,7 +1033,7 @@ define <8 x double> @slto8f64(<8 x i64> %a) {
; NODQ-NEXT: retq
;
; DQ-LABEL: slto8f64:
-; DQ: # BB#0:
+; DQ: # %bb.0:
; DQ-NEXT: vcvtqq2pd %zmm0, %zmm0
; DQ-NEXT: retq
%b = sitofp <8 x i64> %a to <8 x double>
@@ -1042,7 +1042,7 @@ define <8 x double> @slto8f64(<8 x i64> %a) {
define <16 x double> @slto16f64(<16 x i64> %a) {
; NODQ-LABEL: slto16f64:
-; NODQ: # BB#0:
+; NODQ: # %bb.0:
; NODQ-NEXT: vextracti32x4 $3, %zmm0, %xmm2
; NODQ-NEXT: vpextrq $1, %xmm2, %rax
; NODQ-NEXT: vcvtsi2sdq %rax, %xmm3, %xmm3
@@ -1098,7 +1098,7 @@ define <16 x double> @slto16f64(<16 x i64> %a) {
; NODQ-NEXT: retq
;
; DQ-LABEL: slto16f64:
-; DQ: # BB#0:
+; DQ: # %bb.0:
; DQ-NEXT: vcvtqq2pd %zmm0, %zmm0
; DQ-NEXT: vcvtqq2pd %zmm1, %zmm1
; DQ-NEXT: retq
@@ -1108,7 +1108,7 @@ define <16 x double> @slto16f64(<16 x i64> %a) {
define <8 x float> @ulto8f32(<8 x i64> %a) {
; NODQ-LABEL: ulto8f32:
-; NODQ: # BB#0:
+; NODQ: # %bb.0:
; NODQ-NEXT: vextracti32x4 $2, %zmm0, %xmm1
; NODQ-NEXT: vpextrq $1, %xmm1, %rax
; NODQ-NEXT: vcvtusi2ssq %rax, %xmm2, %xmm2
@@ -1138,7 +1138,7 @@ define <8 x float> @ulto8f32(<8 x i64> %a) {
; NODQ-NEXT: retq
;
; DQ-LABEL: ulto8f32:
-; DQ: # BB#0:
+; DQ: # %bb.0:
; DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0
; DQ-NEXT: retq
%b = uitofp <8 x i64> %a to <8 x float>
@@ -1147,7 +1147,7 @@ define <8 x float> @ulto8f32(<8 x i64> %a) {
define <16 x float> @ulto16f32(<16 x i64> %a) {
; NODQ-LABEL: ulto16f32:
-; NODQ: # BB#0:
+; NODQ: # %bb.0:
; NODQ-NEXT: vextracti32x4 $2, %zmm1, %xmm2
; NODQ-NEXT: vpextrq $1, %xmm2, %rax
; NODQ-NEXT: vcvtusi2ssq %rax, %xmm3, %xmm3
@@ -1204,7 +1204,7 @@ define <16 x float> @ulto16f32(<16 x i64> %a) {
; NODQ-NEXT: retq
;
; DQ-LABEL: ulto16f32:
-; DQ: # BB#0:
+; DQ: # %bb.0:
; DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0
; DQ-NEXT: vcvtuqq2ps %zmm1, %ymm1
; DQ-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
@@ -1215,31 +1215,31 @@ define <16 x float> @ulto16f32(<16 x i64> %a) {
define <8 x double> @uito8f64_mask(<8 x double> %a, <8 x i32> %b, i8 %c) nounwind {
; KNL-LABEL: uito8f64_mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vcvtudq2pd %ymm1, %zmm0 {%k1}
; KNL-NEXT: retq
;
; VLBW-LABEL: uito8f64_mask:
-; VLBW: # BB#0:
+; VLBW: # %bb.0:
; VLBW-NEXT: kmovd %edi, %k1
; VLBW-NEXT: vcvtudq2pd %ymm1, %zmm0 {%k1}
; VLBW-NEXT: retq
;
; VLNOBW-LABEL: uito8f64_mask:
-; VLNOBW: # BB#0:
+; VLNOBW: # %bb.0:
; VLNOBW-NEXT: kmovw %edi, %k1
; VLNOBW-NEXT: vcvtudq2pd %ymm1, %zmm0 {%k1}
; VLNOBW-NEXT: retq
;
; AVX512DQ-LABEL: uito8f64_mask:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: kmovw %edi, %k1
; AVX512DQ-NEXT: vcvtudq2pd %ymm1, %zmm0 {%k1}
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: uito8f64_mask:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vcvtudq2pd %ymm1, %zmm0 {%k1}
; AVX512BW-NEXT: retq
@@ -1250,31 +1250,31 @@ define <8 x double> @uito8f64_mask(<8 x double> %a, <8 x i32> %b, i8 %c) nounwin
}
define <8 x double> @uito8f64_maskz(<8 x i32> %a, i8 %b) nounwind {
; KNL-LABEL: uito8f64_maskz:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vcvtudq2pd %ymm0, %zmm0 {%k1} {z}
; KNL-NEXT: retq
;
; VLBW-LABEL: uito8f64_maskz:
-; VLBW: # BB#0:
+; VLBW: # %bb.0:
; VLBW-NEXT: kmovd %edi, %k1
; VLBW-NEXT: vcvtudq2pd %ymm0, %zmm0 {%k1} {z}
; VLBW-NEXT: retq
;
; VLNOBW-LABEL: uito8f64_maskz:
-; VLNOBW: # BB#0:
+; VLNOBW: # %bb.0:
; VLNOBW-NEXT: kmovw %edi, %k1
; VLNOBW-NEXT: vcvtudq2pd %ymm0, %zmm0 {%k1} {z}
; VLNOBW-NEXT: retq
;
; AVX512DQ-LABEL: uito8f64_maskz:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: kmovw %edi, %k1
; AVX512DQ-NEXT: vcvtudq2pd %ymm0, %zmm0 {%k1} {z}
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: uito8f64_maskz:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vcvtudq2pd %ymm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: retq
@@ -1286,14 +1286,14 @@ define <8 x double> @uito8f64_maskz(<8 x i32> %a, i8 %b) nounwind {
define <4 x double> @uito4f64(<4 x i32> %a) nounwind {
; NOVL-LABEL: uito4f64:
-; NOVL: # BB#0:
+; NOVL: # %bb.0:
; NOVL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; NOVL-NEXT: vcvtudq2pd %ymm0, %zmm0
; NOVL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; NOVL-NEXT: retq
;
; VL-LABEL: uito4f64:
-; VL: # BB#0:
+; VL: # %bb.0:
; VL-NEXT: vcvtudq2pd %xmm0, %ymm0
; VL-NEXT: retq
%b = uitofp <4 x i32> %a to <4 x double>
@@ -1302,7 +1302,7 @@ define <4 x double> @uito4f64(<4 x i32> %a) nounwind {
define <16 x float> @uito16f32(<16 x i32> %a) nounwind {
; ALL-LABEL: uito16f32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvtudq2ps %zmm0, %zmm0
; ALL-NEXT: retq
%b = uitofp <16 x i32> %a to <16 x float>
@@ -1311,7 +1311,7 @@ define <16 x float> @uito16f32(<16 x i32> %a) nounwind {
define <8 x double> @uito8f64(<8 x i32> %a) {
; ALL-LABEL: uito8f64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvtudq2pd %ymm0, %zmm0
; ALL-NEXT: retq
%b = uitofp <8 x i32> %a to <8 x double>
@@ -1320,14 +1320,14 @@ define <8 x double> @uito8f64(<8 x i32> %a) {
define <8 x float> @uito8f32(<8 x i32> %a) nounwind {
; NOVL-LABEL: uito8f32:
-; NOVL: # BB#0:
+; NOVL: # %bb.0:
; NOVL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NOVL-NEXT: vcvtudq2ps %zmm0, %zmm0
; NOVL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; NOVL-NEXT: retq
;
; VL-LABEL: uito8f32:
-; VL: # BB#0:
+; VL: # %bb.0:
; VL-NEXT: vcvtudq2ps %ymm0, %ymm0
; VL-NEXT: retq
%b = uitofp <8 x i32> %a to <8 x float>
@@ -1336,7 +1336,7 @@ define <8 x float> @uito8f32(<8 x i32> %a) nounwind {
define <4 x float> @uito4f32(<4 x i32> %a) nounwind {
; NOVL-LABEL: uito4f32:
-; NOVL: # BB#0:
+; NOVL: # %bb.0:
; NOVL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; NOVL-NEXT: vcvtudq2ps %zmm0, %zmm0
; NOVL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -1344,7 +1344,7 @@ define <4 x float> @uito4f32(<4 x i32> %a) nounwind {
; NOVL-NEXT: retq
;
; VL-LABEL: uito4f32:
-; VL: # BB#0:
+; VL: # %bb.0:
; VL-NEXT: vcvtudq2ps %xmm0, %xmm0
; VL-NEXT: retq
%b = uitofp <4 x i32> %a to <4 x float>
@@ -1353,7 +1353,7 @@ define <4 x float> @uito4f32(<4 x i32> %a) nounwind {
define i32 @fptosi(float %a) nounwind {
; ALL-LABEL: fptosi:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvttss2si %xmm0, %eax
; ALL-NEXT: retq
%b = fptosi float %a to i32
@@ -1362,7 +1362,7 @@ define i32 @fptosi(float %a) nounwind {
define i32 @fptoui(float %a) nounwind {
; ALL-LABEL: fptoui:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvttss2usi %xmm0, %eax
; ALL-NEXT: retq
%b = fptoui float %a to i32
@@ -1371,7 +1371,7 @@ define i32 @fptoui(float %a) nounwind {
define float @uitof32(i32 %a) nounwind {
; ALL-LABEL: uitof32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvtusi2ssl %edi, %xmm0, %xmm0
; ALL-NEXT: retq
%b = uitofp i32 %a to float
@@ -1380,7 +1380,7 @@ define float @uitof32(i32 %a) nounwind {
define double @uitof64(i32 %a) nounwind {
; ALL-LABEL: uitof64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvtusi2sdl %edi, %xmm0, %xmm0
; ALL-NEXT: retq
%b = uitofp i32 %a to double
@@ -1389,7 +1389,7 @@ define double @uitof64(i32 %a) nounwind {
define <16 x float> @sbto16f32(<16 x i32> %a) {
; NODQ-LABEL: sbto16f32:
-; NODQ: # BB#0:
+; NODQ: # %bb.0:
; NODQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; NODQ-NEXT: vpcmpgtd %zmm0, %zmm1, %k1
; NODQ-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -1397,7 +1397,7 @@ define <16 x float> @sbto16f32(<16 x i32> %a) {
; NODQ-NEXT: retq
;
; DQ-LABEL: sbto16f32:
-; DQ: # BB#0:
+; DQ: # %bb.0:
; DQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; DQ-NEXT: vpcmpgtd %zmm0, %zmm1, %k0
; DQ-NEXT: vpmovm2d %k0, %zmm0
@@ -1410,7 +1410,7 @@ define <16 x float> @sbto16f32(<16 x i32> %a) {
define <16 x float> @scto16f32(<16 x i8> %a) {
; ALL-LABEL: scto16f32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxbd %xmm0, %zmm0
; ALL-NEXT: vcvtdq2ps %zmm0, %zmm0
; ALL-NEXT: retq
@@ -1420,7 +1420,7 @@ define <16 x float> @scto16f32(<16 x i8> %a) {
define <16 x float> @ssto16f32(<16 x i16> %a) {
; ALL-LABEL: ssto16f32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxwd %ymm0, %zmm0
; ALL-NEXT: vcvtdq2ps %zmm0, %zmm0
; ALL-NEXT: retq
@@ -1430,7 +1430,7 @@ define <16 x float> @ssto16f32(<16 x i16> %a) {
define <8 x double> @ssto16f64(<8 x i16> %a) {
; ALL-LABEL: ssto16f64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxwd %xmm0, %ymm0
; ALL-NEXT: vcvtdq2pd %ymm0, %zmm0
; ALL-NEXT: retq
@@ -1440,7 +1440,7 @@ define <8 x double> @ssto16f64(<8 x i16> %a) {
define <8 x double> @scto8f64(<8 x i8> %a) {
; ALL-LABEL: scto8f64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; ALL-NEXT: vpslld $24, %ymm0, %ymm0
; ALL-NEXT: vpsrad $24, %ymm0, %ymm0
@@ -1452,7 +1452,7 @@ define <8 x double> @scto8f64(<8 x i8> %a) {
define <16 x double> @scto16f64(<16 x i8> %a) {
; ALL-LABEL: scto16f64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxbd %xmm0, %zmm1
; ALL-NEXT: vcvtdq2pd %ymm1, %zmm0
; ALL-NEXT: vextracti64x4 $1, %zmm1, %ymm1
@@ -1464,7 +1464,7 @@ define <16 x double> @scto16f64(<16 x i8> %a) {
define <16 x double> @sbto16f64(<16 x double> %a) {
; NOVLDQ-LABEL: sbto16f64:
-; NOVLDQ: # BB#0:
+; NOVLDQ: # %bb.0:
; NOVLDQ-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; NOVLDQ-NEXT: vcmpltpd %zmm1, %zmm2, %k1
; NOVLDQ-NEXT: vcmpltpd %zmm0, %zmm2, %k2
@@ -1477,7 +1477,7 @@ define <16 x double> @sbto16f64(<16 x double> %a) {
; NOVLDQ-NEXT: retq
;
; VLDQ-LABEL: sbto16f64:
-; VLDQ: # BB#0:
+; VLDQ: # %bb.0:
; VLDQ-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; VLDQ-NEXT: vcmpltpd %zmm1, %zmm2, %k0
; VLDQ-NEXT: vcmpltpd %zmm0, %zmm2, %k1
@@ -1488,7 +1488,7 @@ define <16 x double> @sbto16f64(<16 x double> %a) {
; VLDQ-NEXT: retq
;
; VLNODQ-LABEL: sbto16f64:
-; VLNODQ: # BB#0:
+; VLNODQ: # %bb.0:
; VLNODQ-NEXT: vpxor %xmm2, %xmm2, %xmm2
; VLNODQ-NEXT: vcmpltpd %zmm1, %zmm2, %k1
; VLNODQ-NEXT: vcmpltpd %zmm0, %zmm2, %k2
@@ -1500,7 +1500,7 @@ define <16 x double> @sbto16f64(<16 x double> %a) {
; VLNODQ-NEXT: retq
;
; AVX512DQ-LABEL: sbto16f64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; AVX512DQ-NEXT: vcmpltpd %zmm1, %zmm2, %k0
; AVX512DQ-NEXT: vcmpltpd %zmm0, %zmm2, %k1
@@ -1516,7 +1516,7 @@ define <16 x double> @sbto16f64(<16 x double> %a) {
define <8 x double> @sbto8f64(<8 x double> %a) {
; NOVLDQ-LABEL: sbto8f64:
-; NOVLDQ: # BB#0:
+; NOVLDQ: # %bb.0:
; NOVLDQ-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; NOVLDQ-NEXT: vcmpltpd %zmm0, %zmm1, %k1
; NOVLDQ-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -1525,7 +1525,7 @@ define <8 x double> @sbto8f64(<8 x double> %a) {
; NOVLDQ-NEXT: retq
;
; VLDQ-LABEL: sbto8f64:
-; VLDQ: # BB#0:
+; VLDQ: # %bb.0:
; VLDQ-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; VLDQ-NEXT: vcmpltpd %zmm0, %zmm1, %k0
; VLDQ-NEXT: vpmovm2d %k0, %ymm0
@@ -1533,7 +1533,7 @@ define <8 x double> @sbto8f64(<8 x double> %a) {
; VLDQ-NEXT: retq
;
; VLNODQ-LABEL: sbto8f64:
-; VLNODQ: # BB#0:
+; VLNODQ: # %bb.0:
; VLNODQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; VLNODQ-NEXT: vcmpltpd %zmm0, %zmm1, %k1
; VLNODQ-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
@@ -1542,7 +1542,7 @@ define <8 x double> @sbto8f64(<8 x double> %a) {
; VLNODQ-NEXT: retq
;
; AVX512DQ-LABEL: sbto8f64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX512DQ-NEXT: vcmpltpd %zmm0, %zmm1, %k0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
@@ -1555,7 +1555,7 @@ define <8 x double> @sbto8f64(<8 x double> %a) {
define <8 x float> @sbto8f32(<8 x float> %a) {
; NOVLDQ-LABEL: sbto8f32:
-; NOVLDQ: # BB#0:
+; NOVLDQ: # %bb.0:
; NOVLDQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NOVLDQ-NEXT: vxorps %xmm1, %xmm1, %xmm1
; NOVLDQ-NEXT: vcmpltps %zmm0, %zmm1, %k1
@@ -1565,7 +1565,7 @@ define <8 x float> @sbto8f32(<8 x float> %a) {
; NOVLDQ-NEXT: retq
;
; VLDQ-LABEL: sbto8f32:
-; VLDQ: # BB#0:
+; VLDQ: # %bb.0:
; VLDQ-NEXT: vxorps %xmm1, %xmm1, %xmm1
; VLDQ-NEXT: vcmpltps %ymm0, %ymm1, %k0
; VLDQ-NEXT: vpmovm2d %k0, %ymm0
@@ -1573,7 +1573,7 @@ define <8 x float> @sbto8f32(<8 x float> %a) {
; VLDQ-NEXT: retq
;
; VLNODQ-LABEL: sbto8f32:
-; VLNODQ: # BB#0:
+; VLNODQ: # %bb.0:
; VLNODQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; VLNODQ-NEXT: vcmpltps %ymm0, %ymm1, %k1
; VLNODQ-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
@@ -1582,7 +1582,7 @@ define <8 x float> @sbto8f32(<8 x float> %a) {
; VLNODQ-NEXT: retq
;
; AVX512DQ-LABEL: sbto8f32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX512DQ-NEXT: vcmpltps %zmm0, %zmm1, %k0
@@ -1596,14 +1596,14 @@ define <8 x float> @sbto8f32(<8 x float> %a) {
define <4 x float> @sbto4f32(<4 x float> %a) {
; NOVL-LABEL: sbto4f32:
-; NOVL: # BB#0:
+; NOVL: # %bb.0:
; NOVL-NEXT: vxorps %xmm1, %xmm1, %xmm1
; NOVL-NEXT: vcmpltps %xmm0, %xmm1, %xmm0
; NOVL-NEXT: vcvtdq2ps %xmm0, %xmm0
; NOVL-NEXT: retq
;
; VLDQ-LABEL: sbto4f32:
-; VLDQ: # BB#0:
+; VLDQ: # %bb.0:
; VLDQ-NEXT: vxorps %xmm1, %xmm1, %xmm1
; VLDQ-NEXT: vcmpltps %xmm0, %xmm1, %k0
; VLDQ-NEXT: vpmovm2d %k0, %xmm0
@@ -1611,7 +1611,7 @@ define <4 x float> @sbto4f32(<4 x float> %a) {
; VLDQ-NEXT: retq
;
; VLNODQ-LABEL: sbto4f32:
-; VLNODQ: # BB#0:
+; VLNODQ: # %bb.0:
; VLNODQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; VLNODQ-NEXT: vcmpltps %xmm0, %xmm1, %k1
; VLNODQ-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -1625,7 +1625,7 @@ define <4 x float> @sbto4f32(<4 x float> %a) {
define <4 x double> @sbto4f64(<4 x double> %a) {
; NOVL-LABEL: sbto4f64:
-; NOVL: # BB#0:
+; NOVL: # %bb.0:
; NOVL-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; NOVL-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
; NOVL-NEXT: vpmovqd %zmm0, %ymm0
@@ -1633,7 +1633,7 @@ define <4 x double> @sbto4f64(<4 x double> %a) {
; NOVL-NEXT: retq
;
; VLDQ-LABEL: sbto4f64:
-; VLDQ: # BB#0:
+; VLDQ: # %bb.0:
; VLDQ-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; VLDQ-NEXT: vcmpltpd %ymm0, %ymm1, %k0
; VLDQ-NEXT: vpmovm2d %k0, %xmm0
@@ -1641,7 +1641,7 @@ define <4 x double> @sbto4f64(<4 x double> %a) {
; VLDQ-NEXT: retq
;
; VLNODQ-LABEL: sbto4f64:
-; VLNODQ: # BB#0:
+; VLNODQ: # %bb.0:
; VLNODQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; VLNODQ-NEXT: vcmpltpd %ymm0, %ymm1, %k1
; VLNODQ-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -1655,14 +1655,14 @@ define <4 x double> @sbto4f64(<4 x double> %a) {
define <2 x float> @sbto2f32(<2 x float> %a) {
; NOVL-LABEL: sbto2f32:
-; NOVL: # BB#0:
+; NOVL: # %bb.0:
; NOVL-NEXT: vxorps %xmm1, %xmm1, %xmm1
; NOVL-NEXT: vcmpltps %xmm0, %xmm1, %xmm0
; NOVL-NEXT: vcvtdq2ps %xmm0, %xmm0
; NOVL-NEXT: retq
;
; VLDQ-LABEL: sbto2f32:
-; VLDQ: # BB#0:
+; VLDQ: # %bb.0:
; VLDQ-NEXT: vxorps %xmm1, %xmm1, %xmm1
; VLDQ-NEXT: vcmpltps %xmm0, %xmm1, %k0
; VLDQ-NEXT: vpmovm2d %k0, %xmm0
@@ -1670,7 +1670,7 @@ define <2 x float> @sbto2f32(<2 x float> %a) {
; VLDQ-NEXT: retq
;
; VLNODQ-LABEL: sbto2f32:
-; VLNODQ: # BB#0:
+; VLNODQ: # %bb.0:
; VLNODQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; VLNODQ-NEXT: vcmpltps %xmm0, %xmm1, %k1
; VLNODQ-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -1684,7 +1684,7 @@ define <2 x float> @sbto2f32(<2 x float> %a) {
define <2 x double> @sbto2f64(<2 x double> %a) {
; NOVL-LABEL: sbto2f64:
-; NOVL: # BB#0:
+; NOVL: # %bb.0:
; NOVL-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; NOVL-NEXT: vcmpltpd %xmm0, %xmm1, %xmm0
; NOVL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -1692,7 +1692,7 @@ define <2 x double> @sbto2f64(<2 x double> %a) {
; NOVL-NEXT: retq
;
; VLDQ-LABEL: sbto2f64:
-; VLDQ: # BB#0:
+; VLDQ: # %bb.0:
; VLDQ-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; VLDQ-NEXT: vcmpltpd %xmm0, %xmm1, %k0
; VLDQ-NEXT: vpmovm2q %k0, %xmm0
@@ -1700,7 +1700,7 @@ define <2 x double> @sbto2f64(<2 x double> %a) {
; VLDQ-NEXT: retq
;
; VLNODQ-LABEL: sbto2f64:
-; VLNODQ: # BB#0:
+; VLNODQ: # %bb.0:
; VLNODQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; VLNODQ-NEXT: vcmpltpd %xmm0, %xmm1, %k1
; VLNODQ-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -1718,7 +1718,7 @@ define <2 x double> @sbto2f64(<2 x double> %a) {
define <16 x float> @ucto16f32(<16 x i8> %a) {
; ALL-LABEL: ucto16f32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; ALL-NEXT: vcvtdq2ps %zmm0, %zmm0
; ALL-NEXT: retq
@@ -1728,7 +1728,7 @@ define <16 x float> @ucto16f32(<16 x i8> %a) {
define <8 x double> @ucto8f64(<8 x i8> %a) {
; ALL-LABEL: ucto8f64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; ALL-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; ALL-NEXT: vcvtdq2pd %ymm0, %zmm0
@@ -1739,7 +1739,7 @@ define <8 x double> @ucto8f64(<8 x i8> %a) {
define <16 x float> @swto16f32(<16 x i16> %a) {
; ALL-LABEL: swto16f32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxwd %ymm0, %zmm0
; ALL-NEXT: vcvtdq2ps %zmm0, %zmm0
; ALL-NEXT: retq
@@ -1749,7 +1749,7 @@ define <16 x float> @swto16f32(<16 x i16> %a) {
define <8 x double> @swto8f64(<8 x i16> %a) {
; ALL-LABEL: swto8f64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxwd %xmm0, %ymm0
; ALL-NEXT: vcvtdq2pd %ymm0, %zmm0
; ALL-NEXT: retq
@@ -1759,7 +1759,7 @@ define <8 x double> @swto8f64(<8 x i16> %a) {
define <16 x double> @swto16f64(<16 x i16> %a) {
; ALL-LABEL: swto16f64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxwd %ymm0, %zmm1
; ALL-NEXT: vcvtdq2pd %ymm1, %zmm0
; ALL-NEXT: vextracti64x4 $1, %zmm1, %ymm1
@@ -1771,7 +1771,7 @@ define <16 x double> @swto16f64(<16 x i16> %a) {
define <16 x double> @ucto16f64(<16 x i8> %a) {
; ALL-LABEL: ucto16f64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; ALL-NEXT: vcvtdq2pd %ymm1, %zmm0
; ALL-NEXT: vextracti64x4 $1, %zmm1, %ymm1
@@ -1783,7 +1783,7 @@ define <16 x double> @ucto16f64(<16 x i8> %a) {
define <16 x float> @uwto16f32(<16 x i16> %a) {
; ALL-LABEL: uwto16f32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; ALL-NEXT: vcvtdq2ps %zmm0, %zmm0
; ALL-NEXT: retq
@@ -1793,7 +1793,7 @@ define <16 x float> @uwto16f32(<16 x i16> %a) {
define <8 x double> @uwto8f64(<8 x i16> %a) {
; ALL-LABEL: uwto8f64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; ALL-NEXT: vcvtdq2pd %ymm0, %zmm0
; ALL-NEXT: retq
@@ -1803,7 +1803,7 @@ define <8 x double> @uwto8f64(<8 x i16> %a) {
define <16 x double> @uwto16f64(<16 x i16> %a) {
; ALL-LABEL: uwto16f64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; ALL-NEXT: vcvtdq2pd %ymm1, %zmm0
; ALL-NEXT: vextracti64x4 $1, %zmm1, %ymm1
@@ -1815,7 +1815,7 @@ define <16 x double> @uwto16f64(<16 x i16> %a) {
define <16 x float> @sito16f32(<16 x i32> %a) {
; ALL-LABEL: sito16f32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvtdq2ps %zmm0, %zmm0
; ALL-NEXT: retq
%b = sitofp <16 x i32> %a to <16 x float>
@@ -1824,7 +1824,7 @@ define <16 x float> @sito16f32(<16 x i32> %a) {
define <16 x double> @sito16f64(<16 x i32> %a) {
; ALL-LABEL: sito16f64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvtdq2pd %ymm0, %zmm2
; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm0
; ALL-NEXT: vcvtdq2pd %ymm0, %zmm1
@@ -1836,7 +1836,7 @@ define <16 x double> @sito16f64(<16 x i32> %a) {
define <16 x float> @usto16f32(<16 x i16> %a) {
; ALL-LABEL: usto16f32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; ALL-NEXT: vcvtdq2ps %zmm0, %zmm0
; ALL-NEXT: retq
@@ -1846,7 +1846,7 @@ define <16 x float> @usto16f32(<16 x i16> %a) {
define <16 x float> @ubto16f32(<16 x i32> %a) {
; ALL-LABEL: ubto16f32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; ALL-NEXT: vpcmpgtd %zmm0, %zmm1, %k1
; ALL-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
@@ -1859,7 +1859,7 @@ define <16 x float> @ubto16f32(<16 x i32> %a) {
define <16 x double> @ubto16f64(<16 x i32> %a) {
; NOVL-LABEL: ubto16f64:
-; NOVL: # BB#0:
+; NOVL: # %bb.0:
; NOVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; NOVL-NEXT: vpcmpgtd %zmm0, %zmm1, %k1
; NOVL-NEXT: movq {{.*}}(%rip), %rax
@@ -1873,7 +1873,7 @@ define <16 x double> @ubto16f64(<16 x i32> %a) {
; NOVL-NEXT: retq
;
; VL-LABEL: ubto16f64:
-; VL: # BB#0:
+; VL: # %bb.0:
; VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; VL-NEXT: vpcmpgtd %zmm0, %zmm1, %k1
; VL-NEXT: movl {{.*}}(%rip), %eax
@@ -1890,7 +1890,7 @@ define <16 x double> @ubto16f64(<16 x i32> %a) {
define <8 x float> @ubto8f32(<8 x i32> %a) {
; NOVL-LABEL: ubto8f32:
-; NOVL: # BB#0:
+; NOVL: # %bb.0:
; NOVL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NOVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; NOVL-NEXT: vpcmpgtd %zmm0, %zmm1, %k1
@@ -1901,7 +1901,7 @@ define <8 x float> @ubto8f32(<8 x i32> %a) {
; NOVL-NEXT: retq
;
; VL-LABEL: ubto8f32:
-; VL: # BB#0:
+; VL: # %bb.0:
; VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; VL-NEXT: vpcmpgtd %ymm0, %ymm1, %k1
; VL-NEXT: vpbroadcastd {{.*}}(%rip), %ymm0 {%k1} {z}
@@ -1914,7 +1914,7 @@ define <8 x float> @ubto8f32(<8 x i32> %a) {
define <8 x double> @ubto8f64(<8 x i32> %a) {
; NOVL-LABEL: ubto8f64:
-; NOVL: # BB#0:
+; NOVL: # %bb.0:
; NOVL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NOVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; NOVL-NEXT: vpcmpgtd %zmm0, %zmm1, %k1
@@ -1924,7 +1924,7 @@ define <8 x double> @ubto8f64(<8 x i32> %a) {
; NOVL-NEXT: retq
;
; VL-LABEL: ubto8f64:
-; VL: # BB#0:
+; VL: # %bb.0:
; VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; VL-NEXT: vpcmpgtd %ymm0, %ymm1, %k1
; VL-NEXT: vpbroadcastd {{.*}}(%rip), %ymm0 {%k1} {z}
@@ -1937,7 +1937,7 @@ define <8 x double> @ubto8f64(<8 x i32> %a) {
define <4 x float> @ubto4f32(<4 x i32> %a) {
; NOVL-LABEL: ubto4f32:
-; NOVL: # BB#0:
+; NOVL: # %bb.0:
; NOVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; NOVL-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
; NOVL-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1]
@@ -1945,7 +1945,7 @@ define <4 x float> @ubto4f32(<4 x i32> %a) {
; NOVL-NEXT: retq
;
; VL-LABEL: ubto4f32:
-; VL: # BB#0:
+; VL: # %bb.0:
; VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; VL-NEXT: vpcmpgtd %xmm0, %xmm1, %k1
; VL-NEXT: vpbroadcastd {{.*}}(%rip), %xmm0 {%k1} {z}
@@ -1958,7 +1958,7 @@ define <4 x float> @ubto4f32(<4 x i32> %a) {
define <4 x double> @ubto4f64(<4 x i32> %a) {
; NOVL-LABEL: ubto4f64:
-; NOVL: # BB#0:
+; NOVL: # %bb.0:
; NOVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; NOVL-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
; NOVL-NEXT: vpsrld $31, %xmm0, %xmm0
@@ -1966,7 +1966,7 @@ define <4 x double> @ubto4f64(<4 x i32> %a) {
; NOVL-NEXT: retq
;
; VL-LABEL: ubto4f64:
-; VL: # BB#0:
+; VL: # %bb.0:
; VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; VL-NEXT: vpcmpgtd %xmm0, %xmm1, %k1
; VL-NEXT: vpbroadcastd {{.*}}(%rip), %xmm0 {%k1} {z}
@@ -1979,7 +1979,7 @@ define <4 x double> @ubto4f64(<4 x i32> %a) {
define <2 x float> @ubto2f32(<2 x i32> %a) {
; NOVL-LABEL: ubto2f32:
-; NOVL: # BB#0:
+; NOVL: # %bb.0:
; NOVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; NOVL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; NOVL-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
@@ -1993,7 +1993,7 @@ define <2 x float> @ubto2f32(<2 x i32> %a) {
; NOVL-NEXT: retq
;
; VL-LABEL: ubto2f32:
-; VL: # BB#0:
+; VL: # %bb.0:
; VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; VL-NEXT: vpcmpltuq %xmm1, %xmm0, %k1
@@ -2007,7 +2007,7 @@ define <2 x float> @ubto2f32(<2 x i32> %a) {
define <2 x double> @ubto2f64(<2 x i32> %a) {
; NOVL-LABEL: ubto2f64:
-; NOVL: # BB#0:
+; NOVL: # %bb.0:
; NOVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; NOVL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; NOVL-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
@@ -2015,7 +2015,7 @@ define <2 x double> @ubto2f64(<2 x i32> %a) {
; NOVL-NEXT: retq
;
; VLDQ-LABEL: ubto2f64:
-; VLDQ: # BB#0:
+; VLDQ: # %bb.0:
; VLDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; VLDQ-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; VLDQ-NEXT: vpcmpltuq %xmm1, %xmm0, %k1
@@ -2024,7 +2024,7 @@ define <2 x double> @ubto2f64(<2 x i32> %a) {
; VLDQ-NEXT: retq
;
; VLNODQ-LABEL: ubto2f64:
-; VLNODQ: # BB#0:
+; VLNODQ: # %bb.0:
; VLNODQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; VLNODQ-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; VLNODQ-NEXT: vpcmpltuq %xmm1, %xmm0, %k1
diff --git a/test/CodeGen/X86/avx512-ext.ll b/test/CodeGen/X86/avx512-ext.ll
index 84698103d4a..ab7eff399f0 100644
--- a/test/CodeGen/X86/avx512-ext.ll
+++ b/test/CodeGen/X86/avx512-ext.ll
@@ -4,7 +4,7 @@
define <8 x i16> @zext_8x8mem_to_8x16(<8 x i8> *%i , <8 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_8x8mem_to_8x16:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; KNL-NEXT: vpsllw $15, %xmm0, %xmm0
; KNL-NEXT: vpsraw $15, %xmm0, %xmm0
@@ -12,7 +12,7 @@ define <8 x i16> @zext_8x8mem_to_8x16(<8 x i8> *%i , <8 x i1> %mask) nounwind re
; KNL-NEXT: retq
;
; SKX-LABEL: zext_8x8mem_to_8x16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
; SKX-NEXT: vpmovw2m %xmm0, %k1
; SKX-NEXT: vpmovzxbw {{.*#+}} xmm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
@@ -25,7 +25,7 @@ define <8 x i16> @zext_8x8mem_to_8x16(<8 x i8> *%i , <8 x i1> %mask) nounwind re
define <8 x i16> @sext_8x8mem_to_8x16(<8 x i8> *%i , <8 x i1> %mask) nounwind readnone {
; KNL-LABEL: sext_8x8mem_to_8x16:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxbw (%rdi), %xmm1
; KNL-NEXT: vpsllw $15, %xmm0, %xmm0
; KNL-NEXT: vpsraw $15, %xmm0, %xmm0
@@ -33,7 +33,7 @@ define <8 x i16> @sext_8x8mem_to_8x16(<8 x i8> *%i , <8 x i1> %mask) nounwind re
; KNL-NEXT: retq
;
; SKX-LABEL: sext_8x8mem_to_8x16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
; SKX-NEXT: vpmovw2m %xmm0, %k1
; SKX-NEXT: vpmovsxbw (%rdi), %xmm0 {%k1} {z}
@@ -47,7 +47,7 @@ define <8 x i16> @sext_8x8mem_to_8x16(<8 x i8> *%i , <8 x i1> %mask) nounwind re
define <16 x i16> @zext_16x8mem_to_16x16(<16 x i8> *%i , <16 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_16x8mem_to_16x16:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; KNL-NEXT: vpsllw $15, %ymm0, %ymm0
@@ -56,7 +56,7 @@ define <16 x i16> @zext_16x8mem_to_16x16(<16 x i8> *%i , <16 x i1> %mask) nounwi
; KNL-NEXT: retq
;
; SKX-LABEL: zext_16x8mem_to_16x16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm0, %xmm0
; SKX-NEXT: vpmovb2m %xmm0, %k1
; SKX-NEXT: vpmovzxbw {{.*#+}} ymm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
@@ -69,7 +69,7 @@ define <16 x i16> @zext_16x8mem_to_16x16(<16 x i8> *%i , <16 x i1> %mask) nounwi
define <16 x i16> @sext_16x8mem_to_16x16(<16 x i8> *%i , <16 x i1> %mask) nounwind readnone {
; KNL-LABEL: sext_16x8mem_to_16x16:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxbw (%rdi), %ymm1
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; KNL-NEXT: vpsllw $15, %ymm0, %ymm0
@@ -78,7 +78,7 @@ define <16 x i16> @sext_16x8mem_to_16x16(<16 x i8> *%i , <16 x i1> %mask) nounwi
; KNL-NEXT: retq
;
; SKX-LABEL: sext_16x8mem_to_16x16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm0, %xmm0
; SKX-NEXT: vpmovb2m %xmm0, %k1
; SKX-NEXT: vpmovsxbw (%rdi), %ymm0 {%k1} {z}
@@ -91,7 +91,7 @@ define <16 x i16> @sext_16x8mem_to_16x16(<16 x i8> *%i , <16 x i1> %mask) nounwi
define <16 x i16> @zext_16x8_to_16x16(<16 x i8> %a ) nounwind readnone {
; ALL-LABEL: zext_16x8_to_16x16:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; ALL-NEXT: retq
%x = zext <16 x i8> %a to <16 x i16>
@@ -100,7 +100,7 @@ define <16 x i16> @zext_16x8_to_16x16(<16 x i8> %a ) nounwind readnone {
define <16 x i16> @zext_16x8_to_16x16_mask(<16 x i8> %a ,<16 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_16x8_to_16x16_mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; KNL-NEXT: vpsllw $15, %ymm1, %ymm1
@@ -109,7 +109,7 @@ define <16 x i16> @zext_16x8_to_16x16_mask(<16 x i8> %a ,<16 x i1> %mask) nounwi
; KNL-NEXT: retq
;
; SKX-LABEL: zext_16x8_to_16x16_mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm1, %xmm1
; SKX-NEXT: vpmovb2m %xmm1, %k1
; SKX-NEXT: vpmovzxbw {{.*#+}} ymm0 {%k1} {z} = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
@@ -121,7 +121,7 @@ define <16 x i16> @zext_16x8_to_16x16_mask(<16 x i8> %a ,<16 x i1> %mask) nounwi
define <16 x i16> @sext_16x8_to_16x16(<16 x i8> %a ) nounwind readnone {
; ALL-LABEL: sext_16x8_to_16x16:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxbw %xmm0, %ymm0
; ALL-NEXT: retq
%x = sext <16 x i8> %a to <16 x i16>
@@ -130,7 +130,7 @@ define <16 x i16> @sext_16x8_to_16x16(<16 x i8> %a ) nounwind readnone {
define <16 x i16> @sext_16x8_to_16x16_mask(<16 x i8> %a ,<16 x i1> %mask) nounwind readnone {
; KNL-LABEL: sext_16x8_to_16x16_mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
; KNL-NEXT: vpmovsxbw %xmm0, %ymm0
; KNL-NEXT: vpsllw $15, %ymm1, %ymm1
@@ -139,7 +139,7 @@ define <16 x i16> @sext_16x8_to_16x16_mask(<16 x i8> %a ,<16 x i1> %mask) nounwi
; KNL-NEXT: retq
;
; SKX-LABEL: sext_16x8_to_16x16_mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm1, %xmm1
; SKX-NEXT: vpmovb2m %xmm1, %k1
; SKX-NEXT: vpmovsxbw %xmm0, %ymm0 {%k1} {z}
@@ -151,7 +151,7 @@ define <16 x i16> @sext_16x8_to_16x16_mask(<16 x i8> %a ,<16 x i1> %mask) nounwi
define <32 x i16> @zext_32x8mem_to_32x16(<32 x i8> *%i , <32 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_32x8mem_to_32x16:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
@@ -167,7 +167,7 @@ define <32 x i16> @zext_32x8mem_to_32x16(<32 x i8> *%i , <32 x i1> %mask) nounwi
; KNL-NEXT: retq
;
; SKX-LABEL: zext_32x8mem_to_32x16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %ymm0, %ymm0
; SKX-NEXT: vpmovb2m %ymm0, %k1
; SKX-NEXT: vpmovzxbw {{.*#+}} zmm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero,mem[16],zero,mem[17],zero,mem[18],zero,mem[19],zero,mem[20],zero,mem[21],zero,mem[22],zero,mem[23],zero,mem[24],zero,mem[25],zero,mem[26],zero,mem[27],zero,mem[28],zero,mem[29],zero,mem[30],zero,mem[31],zero
@@ -180,7 +180,7 @@ define <32 x i16> @zext_32x8mem_to_32x16(<32 x i8> *%i , <32 x i1> %mask) nounwi
define <32 x i16> @sext_32x8mem_to_32x16(<32 x i8> *%i , <32 x i1> %mask) nounwind readnone {
; KNL-LABEL: sext_32x8mem_to_32x16:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxbw 16(%rdi), %ymm1
; KNL-NEXT: vpmovsxbw (%rdi), %ymm2
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
@@ -196,7 +196,7 @@ define <32 x i16> @sext_32x8mem_to_32x16(<32 x i8> *%i , <32 x i1> %mask) nounwi
; KNL-NEXT: retq
;
; SKX-LABEL: sext_32x8mem_to_32x16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %ymm0, %ymm0
; SKX-NEXT: vpmovb2m %ymm0, %k1
; SKX-NEXT: vpmovsxbw (%rdi), %zmm0 {%k1} {z}
@@ -209,7 +209,7 @@ define <32 x i16> @sext_32x8mem_to_32x16(<32 x i8> *%i , <32 x i1> %mask) nounwi
define <32 x i16> @zext_32x8_to_32x16(<32 x i8> %a ) nounwind readnone {
; KNL-LABEL: zext_32x8_to_32x16:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; KNL-NEXT: vextracti128 $1, %ymm0, %xmm0
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
@@ -217,7 +217,7 @@ define <32 x i16> @zext_32x8_to_32x16(<32 x i8> %a ) nounwind readnone {
; KNL-NEXT: retq
;
; SKX-LABEL: zext_32x8_to_32x16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
; SKX-NEXT: retq
%x = zext <32 x i8> %a to <32 x i16>
@@ -226,7 +226,7 @@ define <32 x i16> @zext_32x8_to_32x16(<32 x i8> %a ) nounwind readnone {
define <32 x i16> @zext_32x8_to_32x16_mask(<32 x i8> %a ,<32 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_32x8_to_32x16_mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vextracti128 $1, %ymm0, %xmm2
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
@@ -242,7 +242,7 @@ define <32 x i16> @zext_32x8_to_32x16_mask(<32 x i8> %a ,<32 x i1> %mask) nounwi
; KNL-NEXT: retq
;
; SKX-LABEL: zext_32x8_to_32x16_mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %ymm1, %ymm1
; SKX-NEXT: vpmovb2m %ymm1, %k1
; SKX-NEXT: vpmovzxbw {{.*#+}} zmm0 {%k1} {z} = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
@@ -254,7 +254,7 @@ define <32 x i16> @zext_32x8_to_32x16_mask(<32 x i8> %a ,<32 x i1> %mask) nounwi
define <32 x i16> @sext_32x8_to_32x16(<32 x i8> %a ) nounwind readnone {
; KNL-LABEL: sext_32x8_to_32x16:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxbw %xmm0, %ymm2
; KNL-NEXT: vextracti128 $1, %ymm0, %xmm0
; KNL-NEXT: vpmovsxbw %xmm0, %ymm1
@@ -262,7 +262,7 @@ define <32 x i16> @sext_32x8_to_32x16(<32 x i8> %a ) nounwind readnone {
; KNL-NEXT: retq
;
; SKX-LABEL: sext_32x8_to_32x16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxbw %ymm0, %zmm0
; SKX-NEXT: retq
%x = sext <32 x i8> %a to <32 x i16>
@@ -271,7 +271,7 @@ define <32 x i16> @sext_32x8_to_32x16(<32 x i8> %a ) nounwind readnone {
define <32 x i16> @sext_32x8_to_32x16_mask(<32 x i8> %a ,<32 x i1> %mask) nounwind readnone {
; KNL-LABEL: sext_32x8_to_32x16_mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vextracti128 $1, %ymm0, %xmm2
; KNL-NEXT: vpmovsxbw %xmm2, %ymm2
; KNL-NEXT: vpmovsxbw %xmm0, %ymm0
@@ -287,7 +287,7 @@ define <32 x i16> @sext_32x8_to_32x16_mask(<32 x i8> %a ,<32 x i1> %mask) nounwi
; KNL-NEXT: retq
;
; SKX-LABEL: sext_32x8_to_32x16_mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %ymm1, %ymm1
; SKX-NEXT: vpmovb2m %ymm1, %k1
; SKX-NEXT: vpmovsxbw %ymm0, %zmm0 {%k1} {z}
@@ -299,7 +299,7 @@ define <32 x i16> @sext_32x8_to_32x16_mask(<32 x i8> %a ,<32 x i1> %mask) nounwi
define <4 x i32> @zext_4x8mem_to_4x32(<4 x i8> *%i , <4 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_4x8mem_to_4x32:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpslld $31, %xmm0, %xmm0
; KNL-NEXT: vpsrad $31, %xmm0, %xmm0
; KNL-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
@@ -307,7 +307,7 @@ define <4 x i32> @zext_4x8mem_to_4x32(<4 x i8> *%i , <4 x i1> %mask) nounwind re
; KNL-NEXT: retq
;
; SKX-LABEL: zext_4x8mem_to_4x32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm0, %xmm0
; SKX-NEXT: vptestmd %xmm0, %xmm0, %k1
; SKX-NEXT: vpmovzxbd {{.*#+}} xmm0 {%k1} {z} = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
@@ -320,7 +320,7 @@ define <4 x i32> @zext_4x8mem_to_4x32(<4 x i8> *%i , <4 x i1> %mask) nounwind re
define <4 x i32> @sext_4x8mem_to_4x32(<4 x i8> *%i , <4 x i1> %mask) nounwind readnone {
; KNL-LABEL: sext_4x8mem_to_4x32:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpslld $31, %xmm0, %xmm0
; KNL-NEXT: vpsrad $31, %xmm0, %xmm0
; KNL-NEXT: vpmovsxbd (%rdi), %xmm1
@@ -328,7 +328,7 @@ define <4 x i32> @sext_4x8mem_to_4x32(<4 x i8> *%i , <4 x i1> %mask) nounwind re
; KNL-NEXT: retq
;
; SKX-LABEL: sext_4x8mem_to_4x32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm0, %xmm0
; SKX-NEXT: vptestmd %xmm0, %xmm0, %k1
; SKX-NEXT: vpmovsxbd (%rdi), %xmm0 {%k1} {z}
@@ -341,7 +341,7 @@ define <4 x i32> @sext_4x8mem_to_4x32(<4 x i8> *%i , <4 x i1> %mask) nounwind re
define <8 x i32> @zext_8x8mem_to_8x32(<8 x i8> *%i , <8 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_8x8mem_to_8x32:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxwq %xmm0, %zmm0
; KNL-NEXT: vpsllq $63, %zmm0, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
@@ -352,7 +352,7 @@ define <8 x i32> @zext_8x8mem_to_8x32(<8 x i8> *%i , <8 x i1> %mask) nounwind re
; KNL-NEXT: retq
;
; SKX-LABEL: zext_8x8mem_to_8x32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
; SKX-NEXT: vpmovw2m %xmm0, %k1
; SKX-NEXT: vpmovzxbd {{.*#+}} ymm0 {%k1} {z} = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
@@ -365,7 +365,7 @@ define <8 x i32> @zext_8x8mem_to_8x32(<8 x i8> *%i , <8 x i1> %mask) nounwind re
define <8 x i32> @sext_8x8mem_to_8x32(<8 x i8> *%i , <8 x i1> %mask) nounwind readnone {
; KNL-LABEL: sext_8x8mem_to_8x32:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxwq %xmm0, %zmm0
; KNL-NEXT: vpsllq $63, %zmm0, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
@@ -376,7 +376,7 @@ define <8 x i32> @sext_8x8mem_to_8x32(<8 x i8> *%i , <8 x i1> %mask) nounwind re
; KNL-NEXT: retq
;
; SKX-LABEL: sext_8x8mem_to_8x32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
; SKX-NEXT: vpmovw2m %xmm0, %k1
; SKX-NEXT: vpmovsxbd (%rdi), %ymm0 {%k1} {z}
@@ -389,7 +389,7 @@ define <8 x i32> @sext_8x8mem_to_8x32(<8 x i8> *%i , <8 x i1> %mask) nounwind re
define <16 x i32> @zext_16x8mem_to_16x32(<16 x i8> *%i , <16 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_16x8mem_to_16x32:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxbd %xmm0, %zmm0
; KNL-NEXT: vpslld $31, %zmm0, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
@@ -397,7 +397,7 @@ define <16 x i32> @zext_16x8mem_to_16x32(<16 x i8> *%i , <16 x i1> %mask) nounwi
; KNL-NEXT: retq
;
; SKX-LABEL: zext_16x8mem_to_16x32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm0, %xmm0
; SKX-NEXT: vpmovb2m %xmm0, %k1
; SKX-NEXT: vpmovzxbd {{.*#+}} zmm0 {%k1} {z} = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
@@ -410,7 +410,7 @@ define <16 x i32> @zext_16x8mem_to_16x32(<16 x i8> *%i , <16 x i1> %mask) nounwi
define <16 x i32> @sext_16x8mem_to_16x32(<16 x i8> *%i , <16 x i1> %mask) nounwind readnone {
; KNL-LABEL: sext_16x8mem_to_16x32:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxbd %xmm0, %zmm0
; KNL-NEXT: vpslld $31, %zmm0, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
@@ -418,7 +418,7 @@ define <16 x i32> @sext_16x8mem_to_16x32(<16 x i8> *%i , <16 x i1> %mask) nounwi
; KNL-NEXT: retq
;
; SKX-LABEL: sext_16x8mem_to_16x32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm0, %xmm0
; SKX-NEXT: vpmovb2m %xmm0, %k1
; SKX-NEXT: vpmovsxbd (%rdi), %zmm0 {%k1} {z}
@@ -431,7 +431,7 @@ define <16 x i32> @sext_16x8mem_to_16x32(<16 x i8> *%i , <16 x i1> %mask) nounwi
define <16 x i32> @zext_16x8_to_16x32_mask(<16 x i8> %a , <16 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_16x8_to_16x32_mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxbd %xmm1, %zmm1
; KNL-NEXT: vpslld $31, %zmm1, %zmm1
; KNL-NEXT: vptestmd %zmm1, %zmm1, %k1
@@ -439,7 +439,7 @@ define <16 x i32> @zext_16x8_to_16x32_mask(<16 x i8> %a , <16 x i1> %mask) nounw
; KNL-NEXT: retq
;
; SKX-LABEL: zext_16x8_to_16x32_mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm1, %xmm1
; SKX-NEXT: vpmovb2m %xmm1, %k1
; SKX-NEXT: vpmovzxbd {{.*#+}} zmm0 {%k1} {z} = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
@@ -451,7 +451,7 @@ define <16 x i32> @zext_16x8_to_16x32_mask(<16 x i8> %a , <16 x i1> %mask) nounw
define <16 x i32> @sext_16x8_to_16x32_mask(<16 x i8> %a , <16 x i1> %mask) nounwind readnone {
; KNL-LABEL: sext_16x8_to_16x32_mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxbd %xmm1, %zmm1
; KNL-NEXT: vpslld $31, %zmm1, %zmm1
; KNL-NEXT: vptestmd %zmm1, %zmm1, %k1
@@ -459,7 +459,7 @@ define <16 x i32> @sext_16x8_to_16x32_mask(<16 x i8> %a , <16 x i1> %mask) nounw
; KNL-NEXT: retq
;
; SKX-LABEL: sext_16x8_to_16x32_mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm1, %xmm1
; SKX-NEXT: vpmovb2m %xmm1, %k1
; SKX-NEXT: vpmovsxbd %xmm0, %zmm0 {%k1} {z}
@@ -471,7 +471,7 @@ define <16 x i32> @sext_16x8_to_16x32_mask(<16 x i8> %a , <16 x i1> %mask) nounw
define <16 x i32> @zext_16x8_to_16x32(<16 x i8> %i) nounwind readnone {
; ALL-LABEL: zext_16x8_to_16x32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; ALL-NEXT: retq
%x = zext <16 x i8> %i to <16 x i32>
@@ -480,7 +480,7 @@ define <16 x i32> @zext_16x8_to_16x32(<16 x i8> %i) nounwind readnone {
define <16 x i32> @sext_16x8_to_16x32(<16 x i8> %i) nounwind readnone {
; ALL-LABEL: sext_16x8_to_16x32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxbd %xmm0, %zmm0
; ALL-NEXT: retq
%x = sext <16 x i8> %i to <16 x i32>
@@ -489,7 +489,7 @@ define <16 x i32> @sext_16x8_to_16x32(<16 x i8> %i) nounwind readnone {
define <2 x i64> @zext_2x8mem_to_2x64(<2 x i8> *%i , <2 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_2x8mem_to_2x64:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpsllq $63, %xmm0, %xmm0
; KNL-NEXT: vpsraq $63, %zmm0, %zmm0
; KNL-NEXT: vpmovzxbq {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
@@ -497,7 +497,7 @@ define <2 x i64> @zext_2x8mem_to_2x64(<2 x i8> *%i , <2 x i1> %mask) nounwind re
; KNL-NEXT: retq
;
; SKX-LABEL: zext_2x8mem_to_2x64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllq $63, %xmm0, %xmm0
; SKX-NEXT: vptestmq %xmm0, %xmm0, %k1
; SKX-NEXT: vpmovzxbq {{.*#+}} xmm0 {%k1} {z} = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
@@ -509,7 +509,7 @@ define <2 x i64> @zext_2x8mem_to_2x64(<2 x i8> *%i , <2 x i1> %mask) nounwind re
}
define <2 x i64> @sext_2x8mem_to_2x64mask(<2 x i8> *%i , <2 x i1> %mask) nounwind readnone {
; KNL-LABEL: sext_2x8mem_to_2x64mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpsllq $63, %xmm0, %xmm0
; KNL-NEXT: vpsraq $63, %zmm0, %zmm0
; KNL-NEXT: vpmovsxbq (%rdi), %xmm1
@@ -517,7 +517,7 @@ define <2 x i64> @sext_2x8mem_to_2x64mask(<2 x i8> *%i , <2 x i1> %mask) nounwin
; KNL-NEXT: retq
;
; SKX-LABEL: sext_2x8mem_to_2x64mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllq $63, %xmm0, %xmm0
; SKX-NEXT: vptestmq %xmm0, %xmm0, %k1
; SKX-NEXT: vpmovsxbq (%rdi), %xmm0 {%k1} {z}
@@ -529,7 +529,7 @@ define <2 x i64> @sext_2x8mem_to_2x64mask(<2 x i8> *%i , <2 x i1> %mask) nounwin
}
define <2 x i64> @sext_2x8mem_to_2x64(<2 x i8> *%i) nounwind readnone {
; ALL-LABEL: sext_2x8mem_to_2x64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxbq (%rdi), %xmm0
; ALL-NEXT: retq
%a = load <2 x i8>,<2 x i8> *%i,align 1
@@ -539,7 +539,7 @@ define <2 x i64> @sext_2x8mem_to_2x64(<2 x i8> *%i) nounwind readnone {
define <4 x i64> @zext_4x8mem_to_4x64(<4 x i8> *%i , <4 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_4x8mem_to_4x64:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpslld $31, %xmm0, %xmm0
; KNL-NEXT: vpsrad $31, %xmm0, %xmm0
; KNL-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
@@ -548,7 +548,7 @@ define <4 x i64> @zext_4x8mem_to_4x64(<4 x i8> *%i , <4 x i1> %mask) nounwind re
; KNL-NEXT: retq
;
; SKX-LABEL: zext_4x8mem_to_4x64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm0, %xmm0
; SKX-NEXT: vptestmd %xmm0, %xmm0, %k1
; SKX-NEXT: vpmovzxbq {{.*#+}} ymm0 {%k1} {z} = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero
@@ -561,7 +561,7 @@ define <4 x i64> @zext_4x8mem_to_4x64(<4 x i8> *%i , <4 x i1> %mask) nounwind re
define <4 x i64> @sext_4x8mem_to_4x64mask(<4 x i8> *%i , <4 x i1> %mask) nounwind readnone {
; KNL-LABEL: sext_4x8mem_to_4x64mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpslld $31, %xmm0, %xmm0
; KNL-NEXT: vpsrad $31, %xmm0, %xmm0
; KNL-NEXT: vpmovsxdq %xmm0, %ymm0
@@ -570,7 +570,7 @@ define <4 x i64> @sext_4x8mem_to_4x64mask(<4 x i8> *%i , <4 x i1> %mask) nounwin
; KNL-NEXT: retq
;
; SKX-LABEL: sext_4x8mem_to_4x64mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm0, %xmm0
; SKX-NEXT: vptestmd %xmm0, %xmm0, %k1
; SKX-NEXT: vpmovsxbq (%rdi), %ymm0 {%k1} {z}
@@ -583,7 +583,7 @@ define <4 x i64> @sext_4x8mem_to_4x64mask(<4 x i8> *%i , <4 x i1> %mask) nounwin
define <4 x i64> @sext_4x8mem_to_4x64(<4 x i8> *%i) nounwind readnone {
; ALL-LABEL: sext_4x8mem_to_4x64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxbq (%rdi), %ymm0
; ALL-NEXT: retq
%a = load <4 x i8>,<4 x i8> *%i,align 1
@@ -593,7 +593,7 @@ define <4 x i64> @sext_4x8mem_to_4x64(<4 x i8> *%i) nounwind readnone {
define <8 x i64> @zext_8x8mem_to_8x64(<8 x i8> *%i , <8 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_8x8mem_to_8x64:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxwq %xmm0, %zmm0
; KNL-NEXT: vpsllq $63, %zmm0, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
@@ -601,7 +601,7 @@ define <8 x i64> @zext_8x8mem_to_8x64(<8 x i8> *%i , <8 x i1> %mask) nounwind re
; KNL-NEXT: retq
;
; SKX-LABEL: zext_8x8mem_to_8x64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
; SKX-NEXT: vpmovw2m %xmm0, %k1
; SKX-NEXT: vpmovzxbq {{.*#+}} zmm0 {%k1} {z} = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero,mem[4],zero,zero,zero,zero,zero,zero,zero,mem[5],zero,zero,zero,zero,zero,zero,zero,mem[6],zero,zero,zero,zero,zero,zero,zero,mem[7],zero,zero,zero,zero,zero,zero,zero
@@ -614,7 +614,7 @@ define <8 x i64> @zext_8x8mem_to_8x64(<8 x i8> *%i , <8 x i1> %mask) nounwind re
define <8 x i64> @sext_8x8mem_to_8x64mask(<8 x i8> *%i , <8 x i1> %mask) nounwind readnone {
; KNL-LABEL: sext_8x8mem_to_8x64mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxwq %xmm0, %zmm0
; KNL-NEXT: vpsllq $63, %zmm0, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
@@ -622,7 +622,7 @@ define <8 x i64> @sext_8x8mem_to_8x64mask(<8 x i8> *%i , <8 x i1> %mask) nounwin
; KNL-NEXT: retq
;
; SKX-LABEL: sext_8x8mem_to_8x64mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
; SKX-NEXT: vpmovw2m %xmm0, %k1
; SKX-NEXT: vpmovsxbq (%rdi), %zmm0 {%k1} {z}
@@ -635,7 +635,7 @@ define <8 x i64> @sext_8x8mem_to_8x64mask(<8 x i8> *%i , <8 x i1> %mask) nounwin
define <8 x i64> @sext_8x8mem_to_8x64(<8 x i8> *%i) nounwind readnone {
; ALL-LABEL: sext_8x8mem_to_8x64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxbq (%rdi), %zmm0
; ALL-NEXT: retq
%a = load <8 x i8>,<8 x i8> *%i,align 1
@@ -645,7 +645,7 @@ define <8 x i64> @sext_8x8mem_to_8x64(<8 x i8> *%i) nounwind readnone {
define <4 x i32> @zext_4x16mem_to_4x32(<4 x i16> *%i , <4 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_4x16mem_to_4x32:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpslld $31, %xmm0, %xmm0
; KNL-NEXT: vpsrad $31, %xmm0, %xmm0
; KNL-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
@@ -653,7 +653,7 @@ define <4 x i32> @zext_4x16mem_to_4x32(<4 x i16> *%i , <4 x i1> %mask) nounwind
; KNL-NEXT: retq
;
; SKX-LABEL: zext_4x16mem_to_4x32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm0, %xmm0
; SKX-NEXT: vptestmd %xmm0, %xmm0, %k1
; SKX-NEXT: vpmovzxwd {{.*#+}} xmm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
@@ -666,7 +666,7 @@ define <4 x i32> @zext_4x16mem_to_4x32(<4 x i16> *%i , <4 x i1> %mask) nounwind
define <4 x i32> @sext_4x16mem_to_4x32mask(<4 x i16> *%i , <4 x i1> %mask) nounwind readnone {
; KNL-LABEL: sext_4x16mem_to_4x32mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpslld $31, %xmm0, %xmm0
; KNL-NEXT: vpsrad $31, %xmm0, %xmm0
; KNL-NEXT: vpmovsxwd (%rdi), %xmm1
@@ -674,7 +674,7 @@ define <4 x i32> @sext_4x16mem_to_4x32mask(<4 x i16> *%i , <4 x i1> %mask) nounw
; KNL-NEXT: retq
;
; SKX-LABEL: sext_4x16mem_to_4x32mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm0, %xmm0
; SKX-NEXT: vptestmd %xmm0, %xmm0, %k1
; SKX-NEXT: vpmovsxwd (%rdi), %xmm0 {%k1} {z}
@@ -687,7 +687,7 @@ define <4 x i32> @sext_4x16mem_to_4x32mask(<4 x i16> *%i , <4 x i1> %mask) nounw
define <4 x i32> @sext_4x16mem_to_4x32(<4 x i16> *%i) nounwind readnone {
; ALL-LABEL: sext_4x16mem_to_4x32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxwd (%rdi), %xmm0
; ALL-NEXT: retq
%a = load <4 x i16>,<4 x i16> *%i,align 1
@@ -698,7 +698,7 @@ define <4 x i32> @sext_4x16mem_to_4x32(<4 x i16> *%i) nounwind readnone {
define <8 x i32> @zext_8x16mem_to_8x32(<8 x i16> *%i , <8 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_8x16mem_to_8x32:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxwq %xmm0, %zmm0
; KNL-NEXT: vpsllq $63, %zmm0, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
@@ -709,7 +709,7 @@ define <8 x i32> @zext_8x16mem_to_8x32(<8 x i16> *%i , <8 x i1> %mask) nounwind
; KNL-NEXT: retq
;
; SKX-LABEL: zext_8x16mem_to_8x32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
; SKX-NEXT: vpmovw2m %xmm0, %k1
; SKX-NEXT: vpmovzxwd {{.*#+}} ymm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
@@ -722,7 +722,7 @@ define <8 x i32> @zext_8x16mem_to_8x32(<8 x i16> *%i , <8 x i1> %mask) nounwind
define <8 x i32> @sext_8x16mem_to_8x32mask(<8 x i16> *%i , <8 x i1> %mask) nounwind readnone {
; KNL-LABEL: sext_8x16mem_to_8x32mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxwq %xmm0, %zmm0
; KNL-NEXT: vpsllq $63, %zmm0, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
@@ -733,7 +733,7 @@ define <8 x i32> @sext_8x16mem_to_8x32mask(<8 x i16> *%i , <8 x i1> %mask) nounw
; KNL-NEXT: retq
;
; SKX-LABEL: sext_8x16mem_to_8x32mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
; SKX-NEXT: vpmovw2m %xmm0, %k1
; SKX-NEXT: vpmovsxwd (%rdi), %ymm0 {%k1} {z}
@@ -746,7 +746,7 @@ define <8 x i32> @sext_8x16mem_to_8x32mask(<8 x i16> *%i , <8 x i1> %mask) nounw
define <8 x i32> @sext_8x16mem_to_8x32(<8 x i16> *%i) nounwind readnone {
; ALL-LABEL: sext_8x16mem_to_8x32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxwd (%rdi), %ymm0
; ALL-NEXT: retq
%a = load <8 x i16>,<8 x i16> *%i,align 1
@@ -756,7 +756,7 @@ define <8 x i32> @sext_8x16mem_to_8x32(<8 x i16> *%i) nounwind readnone {
define <8 x i32> @zext_8x16_to_8x32mask(<8 x i16> %a , <8 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_8x16_to_8x32mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxwq %xmm1, %zmm1
; KNL-NEXT: vpsllq $63, %zmm1, %zmm1
; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1
@@ -767,7 +767,7 @@ define <8 x i32> @zext_8x16_to_8x32mask(<8 x i16> %a , <8 x i1> %mask) nounwind
; KNL-NEXT: retq
;
; SKX-LABEL: zext_8x16_to_8x32mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm1, %xmm1
; SKX-NEXT: vpmovw2m %xmm1, %k1
; SKX-NEXT: vpmovzxwd {{.*#+}} ymm0 {%k1} {z} = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
@@ -779,7 +779,7 @@ define <8 x i32> @zext_8x16_to_8x32mask(<8 x i16> %a , <8 x i1> %mask) nounwind
define <8 x i32> @zext_8x16_to_8x32(<8 x i16> %a ) nounwind readnone {
; ALL-LABEL: zext_8x16_to_8x32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; ALL-NEXT: retq
%x = zext <8 x i16> %a to <8 x i32>
@@ -788,7 +788,7 @@ define <8 x i32> @zext_8x16_to_8x32(<8 x i16> %a ) nounwind readnone {
define <16 x i32> @zext_16x16mem_to_16x32(<16 x i16> *%i , <16 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_16x16mem_to_16x32:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxbd %xmm0, %zmm0
; KNL-NEXT: vpslld $31, %zmm0, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
@@ -796,7 +796,7 @@ define <16 x i32> @zext_16x16mem_to_16x32(<16 x i16> *%i , <16 x i1> %mask) noun
; KNL-NEXT: retq
;
; SKX-LABEL: zext_16x16mem_to_16x32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm0, %xmm0
; SKX-NEXT: vpmovb2m %xmm0, %k1
; SKX-NEXT: vpmovzxwd {{.*#+}} zmm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
@@ -809,7 +809,7 @@ define <16 x i32> @zext_16x16mem_to_16x32(<16 x i16> *%i , <16 x i1> %mask) noun
define <16 x i32> @sext_16x16mem_to_16x32mask(<16 x i16> *%i , <16 x i1> %mask) nounwind readnone {
; KNL-LABEL: sext_16x16mem_to_16x32mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxbd %xmm0, %zmm0
; KNL-NEXT: vpslld $31, %zmm0, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
@@ -817,7 +817,7 @@ define <16 x i32> @sext_16x16mem_to_16x32mask(<16 x i16> *%i , <16 x i1> %mask)
; KNL-NEXT: retq
;
; SKX-LABEL: sext_16x16mem_to_16x32mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm0, %xmm0
; SKX-NEXT: vpmovb2m %xmm0, %k1
; SKX-NEXT: vpmovsxwd (%rdi), %zmm0 {%k1} {z}
@@ -830,7 +830,7 @@ define <16 x i32> @sext_16x16mem_to_16x32mask(<16 x i16> *%i , <16 x i1> %mask)
define <16 x i32> @sext_16x16mem_to_16x32(<16 x i16> *%i) nounwind readnone {
; ALL-LABEL: sext_16x16mem_to_16x32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxwd (%rdi), %zmm0
; ALL-NEXT: retq
%a = load <16 x i16>,<16 x i16> *%i,align 1
@@ -839,7 +839,7 @@ define <16 x i32> @sext_16x16mem_to_16x32(<16 x i16> *%i) nounwind readnone {
}
define <16 x i32> @zext_16x16_to_16x32mask(<16 x i16> %a , <16 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_16x16_to_16x32mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxbd %xmm1, %zmm1
; KNL-NEXT: vpslld $31, %zmm1, %zmm1
; KNL-NEXT: vptestmd %zmm1, %zmm1, %k1
@@ -847,7 +847,7 @@ define <16 x i32> @zext_16x16_to_16x32mask(<16 x i16> %a , <16 x i1> %mask) noun
; KNL-NEXT: retq
;
; SKX-LABEL: zext_16x16_to_16x32mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm1, %xmm1
; SKX-NEXT: vpmovb2m %xmm1, %k1
; SKX-NEXT: vpmovzxwd {{.*#+}} zmm0 {%k1} {z} = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
@@ -859,7 +859,7 @@ define <16 x i32> @zext_16x16_to_16x32mask(<16 x i16> %a , <16 x i1> %mask) noun
define <16 x i32> @zext_16x16_to_16x32(<16 x i16> %a ) nounwind readnone {
; ALL-LABEL: zext_16x16_to_16x32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; ALL-NEXT: retq
%x = zext <16 x i16> %a to <16 x i32>
@@ -868,7 +868,7 @@ define <16 x i32> @zext_16x16_to_16x32(<16 x i16> %a ) nounwind readnone {
define <2 x i64> @zext_2x16mem_to_2x64(<2 x i16> *%i , <2 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_2x16mem_to_2x64:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpsllq $63, %xmm0, %xmm0
; KNL-NEXT: vpsraq $63, %zmm0, %zmm0
; KNL-NEXT: vpmovzxwq {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero
@@ -876,7 +876,7 @@ define <2 x i64> @zext_2x16mem_to_2x64(<2 x i16> *%i , <2 x i1> %mask) nounwind
; KNL-NEXT: retq
;
; SKX-LABEL: zext_2x16mem_to_2x64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllq $63, %xmm0, %xmm0
; SKX-NEXT: vptestmq %xmm0, %xmm0, %k1
; SKX-NEXT: vpmovzxwq {{.*#+}} xmm0 {%k1} {z} = mem[0],zero,zero,zero,mem[1],zero,zero,zero
@@ -889,7 +889,7 @@ define <2 x i64> @zext_2x16mem_to_2x64(<2 x i16> *%i , <2 x i1> %mask) nounwind
define <2 x i64> @sext_2x16mem_to_2x64mask(<2 x i16> *%i , <2 x i1> %mask) nounwind readnone {
; KNL-LABEL: sext_2x16mem_to_2x64mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpsllq $63, %xmm0, %xmm0
; KNL-NEXT: vpsraq $63, %zmm0, %zmm0
; KNL-NEXT: vpmovsxwq (%rdi), %xmm1
@@ -897,7 +897,7 @@ define <2 x i64> @sext_2x16mem_to_2x64mask(<2 x i16> *%i , <2 x i1> %mask) nounw
; KNL-NEXT: retq
;
; SKX-LABEL: sext_2x16mem_to_2x64mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllq $63, %xmm0, %xmm0
; SKX-NEXT: vptestmq %xmm0, %xmm0, %k1
; SKX-NEXT: vpmovsxwq (%rdi), %xmm0 {%k1} {z}
@@ -910,7 +910,7 @@ define <2 x i64> @sext_2x16mem_to_2x64mask(<2 x i16> *%i , <2 x i1> %mask) nounw
define <2 x i64> @sext_2x16mem_to_2x64(<2 x i16> *%i) nounwind readnone {
; ALL-LABEL: sext_2x16mem_to_2x64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxwq (%rdi), %xmm0
; ALL-NEXT: retq
%a = load <2 x i16>,<2 x i16> *%i,align 1
@@ -920,7 +920,7 @@ define <2 x i64> @sext_2x16mem_to_2x64(<2 x i16> *%i) nounwind readnone {
define <4 x i64> @zext_4x16mem_to_4x64(<4 x i16> *%i , <4 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_4x16mem_to_4x64:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpslld $31, %xmm0, %xmm0
; KNL-NEXT: vpsrad $31, %xmm0, %xmm0
; KNL-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
@@ -929,7 +929,7 @@ define <4 x i64> @zext_4x16mem_to_4x64(<4 x i16> *%i , <4 x i1> %mask) nounwind
; KNL-NEXT: retq
;
; SKX-LABEL: zext_4x16mem_to_4x64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm0, %xmm0
; SKX-NEXT: vptestmd %xmm0, %xmm0, %k1
; SKX-NEXT: vpmovzxwq {{.*#+}} ymm0 {%k1} {z} = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
@@ -942,7 +942,7 @@ define <4 x i64> @zext_4x16mem_to_4x64(<4 x i16> *%i , <4 x i1> %mask) nounwind
define <4 x i64> @sext_4x16mem_to_4x64mask(<4 x i16> *%i , <4 x i1> %mask) nounwind readnone {
; KNL-LABEL: sext_4x16mem_to_4x64mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpslld $31, %xmm0, %xmm0
; KNL-NEXT: vpsrad $31, %xmm0, %xmm0
; KNL-NEXT: vpmovsxdq %xmm0, %ymm0
@@ -951,7 +951,7 @@ define <4 x i64> @sext_4x16mem_to_4x64mask(<4 x i16> *%i , <4 x i1> %mask) nounw
; KNL-NEXT: retq
;
; SKX-LABEL: sext_4x16mem_to_4x64mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm0, %xmm0
; SKX-NEXT: vptestmd %xmm0, %xmm0, %k1
; SKX-NEXT: vpmovsxwq (%rdi), %ymm0 {%k1} {z}
@@ -964,7 +964,7 @@ define <4 x i64> @sext_4x16mem_to_4x64mask(<4 x i16> *%i , <4 x i1> %mask) nounw
define <4 x i64> @sext_4x16mem_to_4x64(<4 x i16> *%i) nounwind readnone {
; ALL-LABEL: sext_4x16mem_to_4x64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxwq (%rdi), %ymm0
; ALL-NEXT: retq
%a = load <4 x i16>,<4 x i16> *%i,align 1
@@ -974,7 +974,7 @@ define <4 x i64> @sext_4x16mem_to_4x64(<4 x i16> *%i) nounwind readnone {
define <8 x i64> @zext_8x16mem_to_8x64(<8 x i16> *%i , <8 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_8x16mem_to_8x64:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxwq %xmm0, %zmm0
; KNL-NEXT: vpsllq $63, %zmm0, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
@@ -982,7 +982,7 @@ define <8 x i64> @zext_8x16mem_to_8x64(<8 x i16> *%i , <8 x i1> %mask) nounwind
; KNL-NEXT: retq
;
; SKX-LABEL: zext_8x16mem_to_8x64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
; SKX-NEXT: vpmovw2m %xmm0, %k1
; SKX-NEXT: vpmovzxwq {{.*#+}} zmm0 {%k1} {z} = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
@@ -995,7 +995,7 @@ define <8 x i64> @zext_8x16mem_to_8x64(<8 x i16> *%i , <8 x i1> %mask) nounwind
define <8 x i64> @sext_8x16mem_to_8x64mask(<8 x i16> *%i , <8 x i1> %mask) nounwind readnone {
; KNL-LABEL: sext_8x16mem_to_8x64mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxwq %xmm0, %zmm0
; KNL-NEXT: vpsllq $63, %zmm0, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
@@ -1003,7 +1003,7 @@ define <8 x i64> @sext_8x16mem_to_8x64mask(<8 x i16> *%i , <8 x i1> %mask) nounw
; KNL-NEXT: retq
;
; SKX-LABEL: sext_8x16mem_to_8x64mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
; SKX-NEXT: vpmovw2m %xmm0, %k1
; SKX-NEXT: vpmovsxwq (%rdi), %zmm0 {%k1} {z}
@@ -1016,7 +1016,7 @@ define <8 x i64> @sext_8x16mem_to_8x64mask(<8 x i16> *%i , <8 x i1> %mask) nounw
define <8 x i64> @sext_8x16mem_to_8x64(<8 x i16> *%i) nounwind readnone {
; ALL-LABEL: sext_8x16mem_to_8x64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxwq (%rdi), %zmm0
; ALL-NEXT: retq
%a = load <8 x i16>,<8 x i16> *%i,align 1
@@ -1026,7 +1026,7 @@ define <8 x i64> @sext_8x16mem_to_8x64(<8 x i16> *%i) nounwind readnone {
define <8 x i64> @zext_8x16_to_8x64mask(<8 x i16> %a , <8 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_8x16_to_8x64mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxwq %xmm1, %zmm1
; KNL-NEXT: vpsllq $63, %zmm1, %zmm1
; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1
@@ -1034,7 +1034,7 @@ define <8 x i64> @zext_8x16_to_8x64mask(<8 x i16> %a , <8 x i1> %mask) nounwind
; KNL-NEXT: retq
;
; SKX-LABEL: zext_8x16_to_8x64mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm1, %xmm1
; SKX-NEXT: vpmovw2m %xmm1, %k1
; SKX-NEXT: vpmovzxwq {{.*#+}} zmm0 {%k1} {z} = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
@@ -1046,7 +1046,7 @@ define <8 x i64> @zext_8x16_to_8x64mask(<8 x i16> %a , <8 x i1> %mask) nounwind
define <8 x i64> @zext_8x16_to_8x64(<8 x i16> %a) nounwind readnone {
; ALL-LABEL: zext_8x16_to_8x64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovzxwq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; ALL-NEXT: retq
%ret = zext <8 x i16> %a to <8 x i64>
@@ -1055,7 +1055,7 @@ define <8 x i64> @zext_8x16_to_8x64(<8 x i16> %a) nounwind readnone {
define <2 x i64> @zext_2x32mem_to_2x64(<2 x i32> *%i , <2 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_2x32mem_to_2x64:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpsllq $63, %xmm0, %xmm0
; KNL-NEXT: vpsraq $63, %zmm0, %zmm0
; KNL-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
@@ -1063,7 +1063,7 @@ define <2 x i64> @zext_2x32mem_to_2x64(<2 x i32> *%i , <2 x i1> %mask) nounwind
; KNL-NEXT: retq
;
; SKX-LABEL: zext_2x32mem_to_2x64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllq $63, %xmm0, %xmm0
; SKX-NEXT: vptestmq %xmm0, %xmm0, %k1
; SKX-NEXT: vpmovzxdq {{.*#+}} xmm0 {%k1} {z} = mem[0],zero,mem[1],zero
@@ -1076,7 +1076,7 @@ define <2 x i64> @zext_2x32mem_to_2x64(<2 x i32> *%i , <2 x i1> %mask) nounwind
define <2 x i64> @sext_2x32mem_to_2x64mask(<2 x i32> *%i , <2 x i1> %mask) nounwind readnone {
; KNL-LABEL: sext_2x32mem_to_2x64mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpsllq $63, %xmm0, %xmm0
; KNL-NEXT: vpsraq $63, %zmm0, %zmm0
; KNL-NEXT: vpmovsxdq (%rdi), %xmm1
@@ -1084,7 +1084,7 @@ define <2 x i64> @sext_2x32mem_to_2x64mask(<2 x i32> *%i , <2 x i1> %mask) nounw
; KNL-NEXT: retq
;
; SKX-LABEL: sext_2x32mem_to_2x64mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllq $63, %xmm0, %xmm0
; SKX-NEXT: vptestmq %xmm0, %xmm0, %k1
; SKX-NEXT: vpmovsxdq (%rdi), %xmm0 {%k1} {z}
@@ -1097,7 +1097,7 @@ define <2 x i64> @sext_2x32mem_to_2x64mask(<2 x i32> *%i , <2 x i1> %mask) nounw
define <2 x i64> @sext_2x32mem_to_2x64(<2 x i32> *%i) nounwind readnone {
; ALL-LABEL: sext_2x32mem_to_2x64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxdq (%rdi), %xmm0
; ALL-NEXT: retq
%a = load <2 x i32>,<2 x i32> *%i,align 1
@@ -1107,7 +1107,7 @@ define <2 x i64> @sext_2x32mem_to_2x64(<2 x i32> *%i) nounwind readnone {
define <4 x i64> @zext_4x32mem_to_4x64(<4 x i32> *%i , <4 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_4x32mem_to_4x64:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpslld $31, %xmm0, %xmm0
; KNL-NEXT: vpsrad $31, %xmm0, %xmm0
; KNL-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
@@ -1116,7 +1116,7 @@ define <4 x i64> @zext_4x32mem_to_4x64(<4 x i32> *%i , <4 x i1> %mask) nounwind
; KNL-NEXT: retq
;
; SKX-LABEL: zext_4x32mem_to_4x64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm0, %xmm0
; SKX-NEXT: vptestmd %xmm0, %xmm0, %k1
; SKX-NEXT: vpmovzxdq {{.*#+}} ymm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
@@ -1129,7 +1129,7 @@ define <4 x i64> @zext_4x32mem_to_4x64(<4 x i32> *%i , <4 x i1> %mask) nounwind
define <4 x i64> @sext_4x32mem_to_4x64mask(<4 x i32> *%i , <4 x i1> %mask) nounwind readnone {
; KNL-LABEL: sext_4x32mem_to_4x64mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpslld $31, %xmm0, %xmm0
; KNL-NEXT: vpsrad $31, %xmm0, %xmm0
; KNL-NEXT: vpmovsxdq %xmm0, %ymm0
@@ -1138,7 +1138,7 @@ define <4 x i64> @sext_4x32mem_to_4x64mask(<4 x i32> *%i , <4 x i1> %mask) nounw
; KNL-NEXT: retq
;
; SKX-LABEL: sext_4x32mem_to_4x64mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm0, %xmm0
; SKX-NEXT: vptestmd %xmm0, %xmm0, %k1
; SKX-NEXT: vpmovsxdq (%rdi), %ymm0 {%k1} {z}
@@ -1151,7 +1151,7 @@ define <4 x i64> @sext_4x32mem_to_4x64mask(<4 x i32> *%i , <4 x i1> %mask) nounw
define <4 x i64> @sext_4x32mem_to_4x64(<4 x i32> *%i) nounwind readnone {
; ALL-LABEL: sext_4x32mem_to_4x64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxdq (%rdi), %ymm0
; ALL-NEXT: retq
%a = load <4 x i32>,<4 x i32> *%i,align 1
@@ -1161,7 +1161,7 @@ define <4 x i64> @sext_4x32mem_to_4x64(<4 x i32> *%i) nounwind readnone {
define <4 x i64> @sext_4x32_to_4x64(<4 x i32> %a) nounwind readnone {
; ALL-LABEL: sext_4x32_to_4x64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxdq %xmm0, %ymm0
; ALL-NEXT: retq
%x = sext <4 x i32> %a to <4 x i64>
@@ -1170,7 +1170,7 @@ define <4 x i64> @sext_4x32_to_4x64(<4 x i32> %a) nounwind readnone {
define <4 x i64> @zext_4x32_to_4x64mask(<4 x i32> %a , <4 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_4x32_to_4x64mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpslld $31, %xmm1, %xmm1
; KNL-NEXT: vpsrad $31, %xmm1, %xmm1
; KNL-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
@@ -1179,7 +1179,7 @@ define <4 x i64> @zext_4x32_to_4x64mask(<4 x i32> %a , <4 x i1> %mask) nounwind
; KNL-NEXT: retq
;
; SKX-LABEL: zext_4x32_to_4x64mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm1, %xmm1
; SKX-NEXT: vptestmd %xmm1, %xmm1, %k1
; SKX-NEXT: vpmovzxdq {{.*#+}} ymm0 {%k1} {z} = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
@@ -1191,7 +1191,7 @@ define <4 x i64> @zext_4x32_to_4x64mask(<4 x i32> %a , <4 x i1> %mask) nounwind
define <8 x i64> @zext_8x32mem_to_8x64(<8 x i32> *%i , <8 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_8x32mem_to_8x64:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxwq %xmm0, %zmm0
; KNL-NEXT: vpsllq $63, %zmm0, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
@@ -1199,7 +1199,7 @@ define <8 x i64> @zext_8x32mem_to_8x64(<8 x i32> *%i , <8 x i1> %mask) nounwind
; KNL-NEXT: retq
;
; SKX-LABEL: zext_8x32mem_to_8x64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
; SKX-NEXT: vpmovw2m %xmm0, %k1
; SKX-NEXT: vpmovzxdq {{.*#+}} zmm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
@@ -1212,7 +1212,7 @@ define <8 x i64> @zext_8x32mem_to_8x64(<8 x i32> *%i , <8 x i1> %mask) nounwind
define <8 x i64> @sext_8x32mem_to_8x64mask(<8 x i32> *%i , <8 x i1> %mask) nounwind readnone {
; KNL-LABEL: sext_8x32mem_to_8x64mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxwq %xmm0, %zmm0
; KNL-NEXT: vpsllq $63, %zmm0, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
@@ -1220,7 +1220,7 @@ define <8 x i64> @sext_8x32mem_to_8x64mask(<8 x i32> *%i , <8 x i1> %mask) nounw
; KNL-NEXT: retq
;
; SKX-LABEL: sext_8x32mem_to_8x64mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
; SKX-NEXT: vpmovw2m %xmm0, %k1
; SKX-NEXT: vpmovsxdq (%rdi), %zmm0 {%k1} {z}
@@ -1233,7 +1233,7 @@ define <8 x i64> @sext_8x32mem_to_8x64mask(<8 x i32> *%i , <8 x i1> %mask) nounw
define <8 x i64> @sext_8x32mem_to_8x64(<8 x i32> *%i) nounwind readnone {
; ALL-LABEL: sext_8x32mem_to_8x64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxdq (%rdi), %zmm0
; ALL-NEXT: retq
%a = load <8 x i32>,<8 x i32> *%i,align 1
@@ -1243,7 +1243,7 @@ define <8 x i64> @sext_8x32mem_to_8x64(<8 x i32> *%i) nounwind readnone {
define <8 x i64> @sext_8x32_to_8x64(<8 x i32> %a) nounwind readnone {
; ALL-LABEL: sext_8x32_to_8x64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxdq %ymm0, %zmm0
; ALL-NEXT: retq
%x = sext <8 x i32> %a to <8 x i64>
@@ -1252,7 +1252,7 @@ define <8 x i64> @sext_8x32_to_8x64(<8 x i32> %a) nounwind readnone {
define <8 x i64> @zext_8x32_to_8x64mask(<8 x i32> %a , <8 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_8x32_to_8x64mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxwq %xmm1, %zmm1
; KNL-NEXT: vpsllq $63, %zmm1, %zmm1
; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1
@@ -1260,7 +1260,7 @@ define <8 x i64> @zext_8x32_to_8x64mask(<8 x i32> %a , <8 x i1> %mask) nounwind
; KNL-NEXT: retq
;
; SKX-LABEL: zext_8x32_to_8x64mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm1, %xmm1
; SKX-NEXT: vpmovw2m %xmm1, %k1
; SKX-NEXT: vpmovzxdq {{.*#+}} zmm0 {%k1} {z} = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
@@ -1271,7 +1271,7 @@ define <8 x i64> @zext_8x32_to_8x64mask(<8 x i32> %a , <8 x i1> %mask) nounwind
}
define <8 x float> @fptrunc_test(<8 x double> %a) nounwind readnone {
; ALL-LABEL: fptrunc_test:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvtpd2ps %zmm0, %ymm0
; ALL-NEXT: retq
%b = fptrunc <8 x double> %a to <8 x float>
@@ -1280,7 +1280,7 @@ define <8 x float> @fptrunc_test(<8 x double> %a) nounwind readnone {
define <8 x double> @fpext_test(<8 x float> %a) nounwind readnone {
; ALL-LABEL: fpext_test:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvtps2pd %ymm0, %zmm0
; ALL-NEXT: retq
%b = fpext <8 x float> %a to <8 x double>
@@ -1289,13 +1289,13 @@ define <8 x double> @fpext_test(<8 x float> %a) nounwind readnone {
define <16 x i32> @zext_16i1_to_16xi32(i16 %b) {
; KNL-LABEL: zext_16i1_to_16xi32:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
; KNL-NEXT: retq
;
; SKX-LABEL: zext_16i1_to_16xi32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
; SKX-NEXT: retq
@@ -1306,13 +1306,13 @@ define <16 x i32> @zext_16i1_to_16xi32(i16 %b) {
define <8 x i64> @zext_8i1_to_8xi64(i8 %b) {
; KNL-LABEL: zext_8i1_to_8xi64:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpbroadcastq {{.*}}(%rip), %zmm0 {%k1} {z}
; KNL-NEXT: retq
;
; SKX-LABEL: zext_8i1_to_8xi64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vpbroadcastq {{.*}}(%rip), %zmm0 {%k1} {z}
; SKX-NEXT: retq
@@ -1323,7 +1323,7 @@ define <8 x i64> @zext_8i1_to_8xi64(i8 %b) {
define i16 @trunc_16i8_to_16i1(<16 x i8> %a) {
; KNL-LABEL: trunc_16i8_to_16i1:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxbd %xmm0, %zmm0
; KNL-NEXT: vpslld $31, %zmm0, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0
@@ -1332,7 +1332,7 @@ define i16 @trunc_16i8_to_16i1(<16 x i8> %a) {
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_16i8_to_16i1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm0, %xmm0
; SKX-NEXT: vpmovb2m %xmm0, %k0
; SKX-NEXT: kmovd %k0, %eax
@@ -1345,7 +1345,7 @@ define i16 @trunc_16i8_to_16i1(<16 x i8> %a) {
define i16 @trunc_16i32_to_16i1(<16 x i32> %a) {
; KNL-LABEL: trunc_16i32_to_16i1:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpslld $31, %zmm0, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
@@ -1353,7 +1353,7 @@ define i16 @trunc_16i32_to_16i1(<16 x i32> %a) {
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_16i32_to_16i1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %zmm0, %zmm0
; SKX-NEXT: vptestmd %zmm0, %zmm0, %k0
; SKX-NEXT: kmovd %k0, %eax
@@ -1367,14 +1367,14 @@ define i16 @trunc_16i32_to_16i1(<16 x i32> %a) {
define <4 x i32> @trunc_4i32_to_4i1(<4 x i32> %a, <4 x i32> %b) {
; KNL-LABEL: trunc_4i32_to_4i1:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpand %xmm1, %xmm0, %xmm0
; KNL-NEXT: vpslld $31, %xmm0, %xmm0
; KNL-NEXT: vpsrad $31, %xmm0, %xmm0
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_4i32_to_4i1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm0, %xmm0
; SKX-NEXT: vptestmd %xmm0, %xmm0, %k1
; SKX-NEXT: vpslld $31, %xmm1, %xmm0
@@ -1391,7 +1391,7 @@ define <4 x i32> @trunc_4i32_to_4i1(<4 x i32> %a, <4 x i32> %b) {
define i8 @trunc_8i16_to_8i1(<8 x i16> %a) {
; KNL-LABEL: trunc_8i16_to_8i1:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxwq %xmm0, %zmm0
; KNL-NEXT: vpsllq $63, %zmm0, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0
@@ -1400,7 +1400,7 @@ define i8 @trunc_8i16_to_8i1(<8 x i16> %a) {
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_8i16_to_8i1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
; SKX-NEXT: vpmovw2m %xmm0, %k0
; SKX-NEXT: kmovd %k0, %eax
@@ -1413,14 +1413,14 @@ define i8 @trunc_8i16_to_8i1(<8 x i16> %a) {
define <8 x i32> @sext_8i1_8i32(<8 x i32> %a1, <8 x i32> %a2) nounwind {
; KNL-LABEL: sext_8i1_8i32:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpcmpgtd %ymm0, %ymm1, %ymm0
; KNL-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; KNL-NEXT: vpxor %ymm1, %ymm0, %ymm0
; KNL-NEXT: retq
;
; SKX-LABEL: sext_8i1_8i32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpled %ymm0, %ymm1, %k0
; SKX-NEXT: vpmovm2d %k0, %ymm0
; SKX-NEXT: retq
@@ -1433,7 +1433,7 @@ define <8 x i32> @sext_8i1_8i32(<8 x i32> %a1, <8 x i32> %a2) nounwind {
define i16 @trunc_i32_to_i1(i32 %a) {
; KNL-LABEL: trunc_i32_to_i1:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: movw $-4, %ax
; KNL-NEXT: kmovw %eax, %k0
; KNL-NEXT: kshiftrw $1, %k0, %k0
@@ -1446,7 +1446,7 @@ define i16 @trunc_i32_to_i1(i32 %a) {
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_i32_to_i1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movw $-4, %ax
; SKX-NEXT: kmovd %eax, %k0
; SKX-NEXT: kshiftrw $1, %k0, %k0
@@ -1465,14 +1465,14 @@ define i16 @trunc_i32_to_i1(i32 %a) {
define <8 x i16> @sext_8i1_8i16(<8 x i32> %a1, <8 x i32> %a2) nounwind {
; KNL-LABEL: sext_8i1_8i16:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpcmpgtd %ymm0, %ymm1, %ymm0
; KNL-NEXT: vpmovdw %zmm0, %ymm0
; KNL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: sext_8i1_8i16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpgtd %ymm0, %ymm1, %k0
; SKX-NEXT: vpmovm2w %k0, %xmm0
; SKX-NEXT: vzeroupper
@@ -1484,13 +1484,13 @@ define <8 x i16> @sext_8i1_8i16(<8 x i32> %a1, <8 x i32> %a2) nounwind {
define <16 x i32> @sext_16i1_16i32(<16 x i32> %a1, <16 x i32> %a2) nounwind {
; KNL-LABEL: sext_16i1_16i32:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpcmpgtd %zmm0, %zmm1, %k1
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: retq
;
; SKX-LABEL: sext_16i1_16i32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpgtd %zmm0, %zmm1, %k0
; SKX-NEXT: vpmovm2d %k0, %zmm0
; SKX-NEXT: retq
@@ -1501,13 +1501,13 @@ define <16 x i32> @sext_16i1_16i32(<16 x i32> %a1, <16 x i32> %a2) nounwind {
define <8 x i64> @sext_8i1_8i64(<8 x i32> %a1, <8 x i32> %a2) nounwind {
; KNL-LABEL: sext_8i1_8i64:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpcmpgtd %ymm0, %ymm1, %ymm0
; KNL-NEXT: vpmovsxdq %ymm0, %zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: sext_8i1_8i64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpgtd %ymm0, %ymm1, %k0
; SKX-NEXT: vpmovm2q %k0, %zmm0
; SKX-NEXT: retq
@@ -1518,13 +1518,13 @@ define <8 x i64> @sext_8i1_8i64(<8 x i32> %a1, <8 x i32> %a2) nounwind {
define void @extload_v8i64(<8 x i8>* %a, <8 x i64>* %res) {
; KNL-LABEL: extload_v8i64:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxbq (%rdi), %zmm0
; KNL-NEXT: vmovdqa64 %zmm0, (%rsi)
; KNL-NEXT: retq
;
; SKX-LABEL: extload_v8i64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxbq (%rdi), %zmm0
; SKX-NEXT: vmovdqa64 %zmm0, (%rsi)
; SKX-NEXT: vzeroupper
@@ -1537,7 +1537,7 @@ define void @extload_v8i64(<8 x i8>* %a, <8 x i64>* %res) {
define <64 x i16> @test21(<64 x i16> %x , <64 x i1> %mask) nounwind readnone {
; KNL-LABEL: test21:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm7 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero,xmm7[4],zero,xmm7[5],zero,xmm7[6],zero,xmm7[7],zero,xmm7[8],zero,xmm7[9],zero,xmm7[10],zero,xmm7[11],zero,xmm7[12],zero,xmm7[13],zero,xmm7[14],zero,xmm7[15],zero
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero,xmm6[8],zero,xmm6[9],zero,xmm6[10],zero,xmm6[11],zero,xmm6[12],zero,xmm6[13],zero,xmm6[14],zero,xmm6[15],zero
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero,xmm5[8],zero,xmm5[9],zero,xmm5[10],zero,xmm5[11],zero,xmm5[12],zero,xmm5[13],zero,xmm5[14],zero,xmm5[15],zero
@@ -1557,7 +1557,7 @@ define <64 x i16> @test21(<64 x i16> %x , <64 x i1> %mask) nounwind readnone {
; KNL-NEXT: retq
;
; SKX-LABEL: test21:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %zmm2, %zmm2
; SKX-NEXT: vpmovb2m %zmm2, %k1
; SKX-NEXT: vmovdqu16 %zmm0, %zmm0 {%k1} {z}
@@ -1570,7 +1570,7 @@ define <64 x i16> @test21(<64 x i16> %x , <64 x i1> %mask) nounwind readnone {
define <16 x i16> @shuffle_zext_16x8_to_16x16(<16 x i8> %a) nounwind readnone {
; ALL-LABEL: shuffle_zext_16x8_to_16x16:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; ALL-NEXT: retq
%1 = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <32 x i32> <i32 0, i32 16, i32 1, i32 16, i32 2, i32 16, i32 3, i32 16, i32 4, i32 16, i32 5, i32 16, i32 6, i32 16, i32 7, i32 16, i32 8, i32 16, i32 9, i32 16, i32 10, i32 16, i32 11, i32 16, i32 12, i32 16, i32 13, i32 16, i32 14, i32 16, i32 15, i32 16>
@@ -1580,7 +1580,7 @@ define <16 x i16> @shuffle_zext_16x8_to_16x16(<16 x i8> %a) nounwind readnone {
define <16 x i16> @shuffle_zext_16x8_to_16x16_mask(<16 x i8> %a, <16 x i1> %mask) nounwind readnone {
; KNL-LABEL: shuffle_zext_16x8_to_16x16_mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; KNL-NEXT: vpsllw $15, %ymm1, %ymm1
@@ -1589,7 +1589,7 @@ define <16 x i16> @shuffle_zext_16x8_to_16x16_mask(<16 x i8> %a, <16 x i1> %mask
; KNL-NEXT: retq
;
; SKX-LABEL: shuffle_zext_16x8_to_16x16_mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm1, %xmm1
; SKX-NEXT: vpmovb2m %xmm1, %k1
; SKX-NEXT: vpmovzxbw {{.*#+}} ymm0 {%k1} {z} = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
@@ -1602,7 +1602,7 @@ define <16 x i16> @shuffle_zext_16x8_to_16x16_mask(<16 x i8> %a, <16 x i1> %mask
define <16 x i16> @zext_32x8_to_16x16(<32 x i8> %a) {
; ALL-LABEL: zext_32x8_to_16x16:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; ALL-NEXT: retq
%1 = shufflevector <32 x i8> %a, <32 x i8> zeroinitializer, <32 x i32> <i32 0, i32 32, i32 1, i32 32, i32 2, i32 32, i32 3, i32 32, i32 4, i32 32, i32 5, i32 32, i32 6, i32 32, i32 7, i32 32, i32 8, i32 32, i32 9, i32 32, i32 10, i32 32, i32 11, i32 32, i32 12, i32 32, i32 13, i32 32, i32 14, i32 32, i32 15, i32 32>
@@ -1612,7 +1612,7 @@ define <16 x i16> @zext_32x8_to_16x16(<32 x i8> %a) {
define <8 x i32> @zext_32x8_to_8x32(<32 x i8> %a) {
; ALL-LABEL: zext_32x8_to_8x32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; ALL-NEXT: retq
%1 = shufflevector <32 x i8> %a, <32 x i8> zeroinitializer, <32 x i32> <i32 0, i32 32, i32 32, i32 32, i32 1, i32 32, i32 32, i32 32, i32 2, i32 32, i32 32, i32 32, i32 3, i32 32, i32 32, i32 32, i32 4, i32 32, i32 32, i32 32, i32 5, i32 32, i32 32, i32 32, i32 6, i32 32, i32 32, i32 32, i32 7, i32 32, i32 32, i32 32>
@@ -1622,7 +1622,7 @@ define <8 x i32> @zext_32x8_to_8x32(<32 x i8> %a) {
define <4 x i64> @zext_32x8_to_4x64(<32 x i8> %a) {
; ALL-LABEL: zext_32x8_to_4x64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
; ALL-NEXT: retq
%1 = shufflevector <32 x i8> %a, <32 x i8> zeroinitializer, <32 x i32> <i32 0, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 1, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 2, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 3, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>
@@ -1632,7 +1632,7 @@ define <4 x i64> @zext_32x8_to_4x64(<32 x i8> %a) {
define <8 x i32> @zext_16x16_to_8x32(<16 x i16> %a) {
; ALL-LABEL: zext_16x16_to_8x32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; ALL-NEXT: retq
%1 = shufflevector <16 x i16> %a, <16 x i16> zeroinitializer, <16 x i32> <i32 0, i32 16, i32 1, i32 16, i32 2, i32 16, i32 3, i32 16, i32 4, i32 16, i32 5, i32 16, i32 6, i32 16, i32 7, i32 16>
@@ -1642,7 +1642,7 @@ define <8 x i32> @zext_16x16_to_8x32(<16 x i16> %a) {
define <4 x i64> @zext_16x16_to_4x64(<16 x i16> %a) {
; ALL-LABEL: zext_16x16_to_4x64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; ALL-NEXT: retq
%1 = shufflevector <16 x i16> %a, <16 x i16> zeroinitializer, <16 x i32> <i32 0, i32 16, i32 16, i32 16, i32 1, i32 16, i32 16, i32 16, i32 2, i32 16, i32 16, i32 16, i32 3, i32 16, i32 16, i32 16>
@@ -1652,7 +1652,7 @@ define <4 x i64> @zext_16x16_to_4x64(<16 x i16> %a) {
define <4 x i64> @zext_8x32_to_4x64(<8 x i32> %a) {
; ALL-LABEL: zext_8x32_to_4x64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; ALL-NEXT: retq
%1 = shufflevector <8 x i32> %a, <8 x i32> zeroinitializer, <8 x i32> <i32 0, i32 8, i32 1, i32 8, i32 2, i32 8, i32 3, i32 8>
@@ -1662,7 +1662,7 @@ define <4 x i64> @zext_8x32_to_4x64(<8 x i32> %a) {
define <64 x i8> @zext_64xi1_to_64xi8(<64 x i8> %x, <64 x i8> %y) #0 {
; KNL-LABEL: zext_64xi1_to_64xi8:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpcmpeqb %ymm2, %ymm0, %ymm0
; KNL-NEXT: vmovdqa {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; KNL-NEXT: vpand %ymm2, %ymm0, %ymm0
@@ -1671,7 +1671,7 @@ define <64 x i8> @zext_64xi1_to_64xi8(<64 x i8> %x, <64 x i8> %y) #0 {
; KNL-NEXT: retq
;
; SKX-LABEL: zext_64xi1_to_64xi8:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpeqb %zmm1, %zmm0, %k1
; SKX-NEXT: vmovdqu8 {{.*}}(%rip), %zmm0 {%k1} {z}
; SKX-NEXT: retq
@@ -1682,7 +1682,7 @@ define <64 x i8> @zext_64xi1_to_64xi8(<64 x i8> %x, <64 x i8> %y) #0 {
define <32 x i16> @zext_32xi1_to_32xi16(<32 x i16> %x, <32 x i16> %y) #0 {
; KNL-LABEL: zext_32xi1_to_32xi16:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpcmpeqw %ymm2, %ymm0, %ymm0
; KNL-NEXT: vpsrlw $15, %ymm0, %ymm0
; KNL-NEXT: vpcmpeqw %ymm3, %ymm1, %ymm1
@@ -1690,7 +1690,7 @@ define <32 x i16> @zext_32xi1_to_32xi16(<32 x i16> %x, <32 x i16> %y) #0 {
; KNL-NEXT: retq
;
; SKX-LABEL: zext_32xi1_to_32xi16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpeqw %zmm1, %zmm0, %k1
; SKX-NEXT: vmovdqu16 {{.*}}(%rip), %zmm0 {%k1} {z}
; SKX-NEXT: retq
@@ -1701,13 +1701,13 @@ define <32 x i16> @zext_32xi1_to_32xi16(<32 x i16> %x, <32 x i16> %y) #0 {
define <16 x i16> @zext_16xi1_to_16xi16(<16 x i16> %x, <16 x i16> %y) #0 {
; KNL-LABEL: zext_16xi1_to_16xi16:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
; KNL-NEXT: vpsrlw $15, %ymm0, %ymm0
; KNL-NEXT: retq
;
; SKX-LABEL: zext_16xi1_to_16xi16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpeqw %ymm1, %ymm0, %k1
; SKX-NEXT: vmovdqu16 {{.*}}(%rip), %ymm0 {%k1} {z}
; SKX-NEXT: retq
@@ -1719,7 +1719,7 @@ define <16 x i16> @zext_16xi1_to_16xi16(<16 x i16> %x, <16 x i16> %y) #0 {
define <32 x i8> @zext_32xi1_to_32xi8(<32 x i16> %x, <32 x i16> %y) #0 {
; KNL-LABEL: zext_32xi1_to_32xi8:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpcmpeqw %ymm2, %ymm0, %ymm0
; KNL-NEXT: vpmovsxwd %ymm0, %zmm0
; KNL-NEXT: vpmovdb %zmm0, %xmm0
@@ -1731,7 +1731,7 @@ define <32 x i8> @zext_32xi1_to_32xi8(<32 x i16> %x, <32 x i16> %y) #0 {
; KNL-NEXT: retq
;
; SKX-LABEL: zext_32xi1_to_32xi8:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpeqw %zmm1, %zmm0, %k1
; SKX-NEXT: vmovdqu8 {{.*}}(%rip), %ymm0 {%k1} {z}
; SKX-NEXT: retq
@@ -1742,7 +1742,7 @@ define <32 x i8> @zext_32xi1_to_32xi8(<32 x i16> %x, <32 x i16> %y) #0 {
define <4 x i32> @zext_4xi1_to_4x32(<4 x i8> %x, <4 x i8> %y) #0 {
; KNL-LABEL: zext_4xi1_to_4x32:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vmovdqa {{.*#+}} xmm2 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
; KNL-NEXT: vpand %xmm2, %xmm1, %xmm1
; KNL-NEXT: vpand %xmm2, %xmm0, %xmm0
@@ -1751,7 +1751,7 @@ define <4 x i32> @zext_4xi1_to_4x32(<4 x i8> %x, <4 x i8> %y) #0 {
; KNL-NEXT: retq
;
; SKX-LABEL: zext_4xi1_to_4x32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa {{.*#+}} xmm2 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
; SKX-NEXT: vpand %xmm2, %xmm1, %xmm1
; SKX-NEXT: vpand %xmm2, %xmm0, %xmm0
@@ -1765,7 +1765,7 @@ define <4 x i32> @zext_4xi1_to_4x32(<4 x i8> %x, <4 x i8> %y) #0 {
define <2 x i64> @zext_2xi1_to_2xi64(<2 x i8> %x, <2 x i8> %y) #0 {
; KNL-LABEL: zext_2xi1_to_2xi64:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vmovdqa {{.*#+}} xmm2 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
; KNL-NEXT: vpand %xmm2, %xmm1, %xmm1
; KNL-NEXT: vpand %xmm2, %xmm0, %xmm0
@@ -1774,7 +1774,7 @@ define <2 x i64> @zext_2xi1_to_2xi64(<2 x i8> %x, <2 x i8> %y) #0 {
; KNL-NEXT: retq
;
; SKX-LABEL: zext_2xi1_to_2xi64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa {{.*#+}} xmm2 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
; SKX-NEXT: vpand %xmm2, %xmm1, %xmm1
; SKX-NEXT: vpand %xmm2, %xmm0, %xmm0
diff --git a/test/CodeGen/X86/avx512-extract-subvector-load-store.ll b/test/CodeGen/X86/avx512-extract-subvector-load-store.ll
index f556cb977ae..34ea468aebe 100644
--- a/test/CodeGen/X86/avx512-extract-subvector-load-store.ll
+++ b/test/CodeGen/X86/avx512-extract-subvector-load-store.ll
@@ -4,7 +4,7 @@
define void @load_v8i1_broadcast_4_v2i1(<8 x i1>* %a0,<2 x double> %a1,<2 x double> %a2,<2 x double>* %a3) {
; AVX512-LABEL: load_v8i1_broadcast_4_v2i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovb (%rdi), %k0
; AVX512-NEXT: kshiftrw $4, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %xmm2
@@ -15,7 +15,7 @@ define void @load_v8i1_broadcast_4_v2i1(<8 x i1>* %a0,<2 x double> %a1,<2 x doub
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v8i1_broadcast_4_v2i1:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: movzbl (%rdi), %eax
; AVX512NOTDQ-NEXT: kmovd %eax, %k0
; AVX512NOTDQ-NEXT: kshiftrw $4, %k0, %k1
@@ -35,7 +35,7 @@ define void @load_v8i1_broadcast_4_v2i1(<8 x i1>* %a0,<2 x double> %a1,<2 x doub
}
define void @load_v8i1_broadcast_7_v2i1(<8 x i1>* %a0,<2 x double> %a1,<2 x double> %a2,<2 x double>* %a3) {
; AVX512-LABEL: load_v8i1_broadcast_7_v2i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovb (%rdi), %k0
; AVX512-NEXT: kshiftrw $6, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %xmm2
@@ -46,7 +46,7 @@ define void @load_v8i1_broadcast_7_v2i1(<8 x i1>* %a0,<2 x double> %a1,<2 x doub
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v8i1_broadcast_7_v2i1:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: movzbl (%rdi), %eax
; AVX512NOTDQ-NEXT: kmovd %eax, %k0
; AVX512NOTDQ-NEXT: kshiftrw $6, %k0, %k1
@@ -66,7 +66,7 @@ define void @load_v8i1_broadcast_7_v2i1(<8 x i1>* %a0,<2 x double> %a1,<2 x doub
}
define void @load_v16i1_broadcast_8_v2i1(<16 x i1>* %a0,<2 x double> %a1,<2 x double> %a2,<2 x double>* %a3) {
; AVX512-LABEL: load_v16i1_broadcast_8_v2i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovw (%rdi), %k0
; AVX512-NEXT: kshiftrw $8, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %xmm2
@@ -77,7 +77,7 @@ define void @load_v16i1_broadcast_8_v2i1(<16 x i1>* %a0,<2 x double> %a1,<2 x do
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v16i1_broadcast_8_v2i1:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovw (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrw $8, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
@@ -96,7 +96,7 @@ define void @load_v16i1_broadcast_8_v2i1(<16 x i1>* %a0,<2 x double> %a1,<2 x do
}
define void @load_v16i1_broadcast_8_v4i1(<16 x i1>* %a0,<4 x float> %a1,<4 x float> %a2,<4 x float>* %a3) {
; AVX512-LABEL: load_v16i1_broadcast_8_v4i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovw (%rdi), %k0
; AVX512-NEXT: kshiftrw $8, %k0, %k0
; AVX512-NEXT: vpmovm2d %k0, %xmm2
@@ -107,7 +107,7 @@ define void @load_v16i1_broadcast_8_v4i1(<16 x i1>* %a0,<4 x float> %a1,<4 x flo
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v16i1_broadcast_8_v4i1:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovw (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrw $8, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
@@ -126,7 +126,7 @@ define void @load_v16i1_broadcast_8_v4i1(<16 x i1>* %a0,<4 x float> %a1,<4 x flo
}
define void @load_v16i1_broadcast_15_v2i1(<16 x i1>* %a0,<2 x double> %a1,<2 x double> %a2,<2 x double>* %a3) {
; AVX512-LABEL: load_v16i1_broadcast_15_v2i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovw (%rdi), %k0
; AVX512-NEXT: kshiftrw $14, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %xmm2
@@ -137,7 +137,7 @@ define void @load_v16i1_broadcast_15_v2i1(<16 x i1>* %a0,<2 x double> %a1,<2 x d
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v16i1_broadcast_15_v2i1:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovw (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrw $14, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
@@ -156,7 +156,7 @@ define void @load_v16i1_broadcast_15_v2i1(<16 x i1>* %a0,<2 x double> %a1,<2 x d
}
define void @load_v16i1_broadcast_15_v4i1(<16 x i1>* %a0,<4 x float> %a1,<4 x float> %a2,<4 x float>* %a3) {
; AVX512-LABEL: load_v16i1_broadcast_15_v4i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovw (%rdi), %k0
; AVX512-NEXT: kshiftrw $12, %k0, %k0
; AVX512-NEXT: vpmovm2d %k0, %xmm2
@@ -167,7 +167,7 @@ define void @load_v16i1_broadcast_15_v4i1(<16 x i1>* %a0,<4 x float> %a1,<4 x fl
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v16i1_broadcast_15_v4i1:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovw (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrw $12, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
@@ -186,7 +186,7 @@ define void @load_v16i1_broadcast_15_v4i1(<16 x i1>* %a0,<4 x float> %a1,<4 x fl
}
define void @load_v32i1_broadcast_16_v2i1(<32 x i1>* %a0,<2 x double> %a1,<2 x double> %a2,<2 x double>* %a3) {
; AVX512-LABEL: load_v32i1_broadcast_16_v2i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovd (%rdi), %k0
; AVX512-NEXT: kshiftrd $16, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %xmm2
@@ -197,7 +197,7 @@ define void @load_v32i1_broadcast_16_v2i1(<32 x i1>* %a0,<2 x double> %a1,<2 x d
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v32i1_broadcast_16_v2i1:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovd (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrd $16, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
@@ -216,7 +216,7 @@ define void @load_v32i1_broadcast_16_v2i1(<32 x i1>* %a0,<2 x double> %a1,<2 x d
}
define void @load_v32i1_broadcast_16_v4i1(<32 x i1>* %a0,<4 x float> %a1,<4 x float> %a2,<4 x float>* %a3) {
; AVX512-LABEL: load_v32i1_broadcast_16_v4i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovd (%rdi), %k0
; AVX512-NEXT: kshiftrd $16, %k0, %k0
; AVX512-NEXT: vpmovm2d %k0, %xmm2
@@ -227,7 +227,7 @@ define void @load_v32i1_broadcast_16_v4i1(<32 x i1>* %a0,<4 x float> %a1,<4 x fl
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v32i1_broadcast_16_v4i1:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovd (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrd $16, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
@@ -246,7 +246,7 @@ define void @load_v32i1_broadcast_16_v4i1(<32 x i1>* %a0,<4 x float> %a1,<4 x fl
}
define void @load_v32i1_broadcast_16_v8i1(<32 x i1>* %a0,<8 x float> %a1,<8 x float> %a2,<8 x float>* %a3) {
; AVX512-LABEL: load_v32i1_broadcast_16_v8i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovd (%rdi), %k0
; AVX512-NEXT: kshiftrd $16, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %zmm2
@@ -258,7 +258,7 @@ define void @load_v32i1_broadcast_16_v8i1(<32 x i1>* %a0,<8 x float> %a1,<8 x fl
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v32i1_broadcast_16_v8i1:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovd (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrd $16, %k0, %k1
; AVX512NOTDQ-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
@@ -277,7 +277,7 @@ define void @load_v32i1_broadcast_16_v8i1(<32 x i1>* %a0,<8 x float> %a1,<8 x fl
}
define void @load_v32i1_broadcast_31_v2i1(<32 x i1>* %a0,<2 x double> %a1,<2 x double> %a2,<2 x double>* %a3) {
; AVX512-LABEL: load_v32i1_broadcast_31_v2i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovd (%rdi), %k0
; AVX512-NEXT: kshiftrd $30, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %xmm2
@@ -288,7 +288,7 @@ define void @load_v32i1_broadcast_31_v2i1(<32 x i1>* %a0,<2 x double> %a1,<2 x d
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v32i1_broadcast_31_v2i1:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovd (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrd $30, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
@@ -307,7 +307,7 @@ define void @load_v32i1_broadcast_31_v2i1(<32 x i1>* %a0,<2 x double> %a1,<2 x d
}
define void @load_v32i1_broadcast_31_v4i1(<32 x i1>* %a0,<4 x float> %a1,<4 x float> %a2,<4 x float>* %a3) {
; AVX512-LABEL: load_v32i1_broadcast_31_v4i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovd (%rdi), %k0
; AVX512-NEXT: kshiftrd $28, %k0, %k0
; AVX512-NEXT: vpmovm2d %k0, %xmm2
@@ -318,7 +318,7 @@ define void @load_v32i1_broadcast_31_v4i1(<32 x i1>* %a0,<4 x float> %a1,<4 x fl
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v32i1_broadcast_31_v4i1:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovd (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrd $28, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
@@ -337,7 +337,7 @@ define void @load_v32i1_broadcast_31_v4i1(<32 x i1>* %a0,<4 x float> %a1,<4 x fl
}
define void @load_v32i1_broadcast_31_v8i1(<32 x i1>* %a0,<8 x float> %a1,<8 x float> %a2,<8 x float>* %a3) {
; AVX512-LABEL: load_v32i1_broadcast_31_v8i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovd (%rdi), %k0
; AVX512-NEXT: kshiftrd $24, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %zmm2
@@ -350,7 +350,7 @@ define void @load_v32i1_broadcast_31_v8i1(<32 x i1>* %a0,<8 x float> %a1,<8 x fl
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v32i1_broadcast_31_v8i1:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovd (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrd $24, %k0, %k1
; AVX512NOTDQ-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
@@ -370,7 +370,7 @@ define void @load_v32i1_broadcast_31_v8i1(<32 x i1>* %a0,<8 x float> %a1,<8 x fl
}
define void @load_v64i1_broadcast_32_v2i1(<64 x i1>* %a0,<2 x double> %a1,<2 x double> %a2,<2 x double>* %a3) {
; AVX512-LABEL: load_v64i1_broadcast_32_v2i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovq (%rdi), %k0
; AVX512-NEXT: kshiftrq $32, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %xmm2
@@ -381,7 +381,7 @@ define void @load_v64i1_broadcast_32_v2i1(<64 x i1>* %a0,<2 x double> %a1,<2 x d
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v64i1_broadcast_32_v2i1:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovq (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrq $32, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
@@ -400,7 +400,7 @@ define void @load_v64i1_broadcast_32_v2i1(<64 x i1>* %a0,<2 x double> %a1,<2 x d
}
define void @load_v64i1_broadcast_32_v4i1(<64 x i1>* %a0,<4 x float> %a1,<4 x float> %a2,<4 x float>* %a3) {
; AVX512-LABEL: load_v64i1_broadcast_32_v4i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovq (%rdi), %k0
; AVX512-NEXT: kshiftrq $32, %k0, %k0
; AVX512-NEXT: vpmovm2d %k0, %xmm2
@@ -411,7 +411,7 @@ define void @load_v64i1_broadcast_32_v4i1(<64 x i1>* %a0,<4 x float> %a1,<4 x fl
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v64i1_broadcast_32_v4i1:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovq (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrq $32, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
@@ -430,7 +430,7 @@ define void @load_v64i1_broadcast_32_v4i1(<64 x i1>* %a0,<4 x float> %a1,<4 x fl
}
define void @load_v64i1_broadcast_32_v8i1(<64 x i1>* %a0,<8 x float> %a1,<8 x float> %a2,<8 x float>* %a3) {
; AVX512-LABEL: load_v64i1_broadcast_32_v8i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovq (%rdi), %k0
; AVX512-NEXT: kshiftrq $32, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %zmm2
@@ -442,7 +442,7 @@ define void @load_v64i1_broadcast_32_v8i1(<64 x i1>* %a0,<8 x float> %a1,<8 x fl
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v64i1_broadcast_32_v8i1:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovq (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrq $32, %k0, %k1
; AVX512NOTDQ-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
@@ -461,7 +461,7 @@ define void @load_v64i1_broadcast_32_v8i1(<64 x i1>* %a0,<8 x float> %a1,<8 x fl
}
define void @load_v64i1_broadcast_32_v16i1(<64 x i1>* %a0,<16 x float> %a1,<16 x float> %a2,<16 x float>* %a3) {
; AVX512-LABEL: load_v64i1_broadcast_32_v16i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovq (%rdi), %k0
; AVX512-NEXT: kshiftrq $32, %k0, %k0
; AVX512-NEXT: vpmovm2d %k0, %zmm2
@@ -473,7 +473,7 @@ define void @load_v64i1_broadcast_32_v16i1(<64 x i1>* %a0,<16 x float> %a1,<16 x
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v64i1_broadcast_32_v16i1:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovq (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrq $32, %k0, %k1
; AVX512NOTDQ-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
@@ -492,7 +492,7 @@ define void @load_v64i1_broadcast_32_v16i1(<64 x i1>* %a0,<16 x float> %a1,<16 x
}
define void @load_v64i1_broadcast_63_v2i1(<64 x i1>* %a0,<2 x double> %a1,<2 x double> %a2,<2 x double>* %a3) {
; AVX512-LABEL: load_v64i1_broadcast_63_v2i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovq (%rdi), %k0
; AVX512-NEXT: kshiftrq $62, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %xmm2
@@ -503,7 +503,7 @@ define void @load_v64i1_broadcast_63_v2i1(<64 x i1>* %a0,<2 x double> %a1,<2 x d
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v64i1_broadcast_63_v2i1:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovq (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrq $62, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
@@ -522,7 +522,7 @@ define void @load_v64i1_broadcast_63_v2i1(<64 x i1>* %a0,<2 x double> %a1,<2 x d
}
define void @load_v64i1_broadcast_63_v4i1(<64 x i1>* %a0,<4 x float> %a1,<4 x float> %a2,<4 x float>* %a3) {
; AVX512-LABEL: load_v64i1_broadcast_63_v4i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovq (%rdi), %k0
; AVX512-NEXT: kshiftrq $60, %k0, %k0
; AVX512-NEXT: vpmovm2d %k0, %xmm2
@@ -533,7 +533,7 @@ define void @load_v64i1_broadcast_63_v4i1(<64 x i1>* %a0,<4 x float> %a1,<4 x fl
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v64i1_broadcast_63_v4i1:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovq (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrq $60, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
@@ -552,7 +552,7 @@ define void @load_v64i1_broadcast_63_v4i1(<64 x i1>* %a0,<4 x float> %a1,<4 x fl
}
define void @load_v64i1_broadcast_63_v8i1(<64 x i1>* %a0,<8 x float> %a1,<8 x float> %a2,<8 x float>* %a3) {
; AVX512-LABEL: load_v64i1_broadcast_63_v8i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovq (%rdi), %k0
; AVX512-NEXT: kshiftrq $56, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %zmm2
@@ -565,7 +565,7 @@ define void @load_v64i1_broadcast_63_v8i1(<64 x i1>* %a0,<8 x float> %a1,<8 x fl
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v64i1_broadcast_63_v8i1:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovq (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrq $56, %k0, %k1
; AVX512NOTDQ-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
@@ -585,7 +585,7 @@ define void @load_v64i1_broadcast_63_v8i1(<64 x i1>* %a0,<8 x float> %a1,<8 x fl
}
define void @load_v64i1_broadcast_63_v16i1(<64 x i1>* %a0,<16 x float> %a1,<16 x float> %a2,<16 x float>* %a3) {
; AVX512-LABEL: load_v64i1_broadcast_63_v16i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovq (%rdi), %k0
; AVX512-NEXT: kshiftrq $48, %k0, %k0
; AVX512-NEXT: vpmovm2d %k0, %zmm2
@@ -598,7 +598,7 @@ define void @load_v64i1_broadcast_63_v16i1(<64 x i1>* %a0,<16 x float> %a1,<16 x
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v64i1_broadcast_63_v16i1:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovq (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrq $48, %k0, %k1
; AVX512NOTDQ-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
@@ -618,14 +618,14 @@ define void @load_v64i1_broadcast_63_v16i1(<64 x i1>* %a0,<16 x float> %a1,<16 x
}
define void @load_v2i1_broadcast_1_v1i1_store(<2 x i1>* %a0,<1 x i1>* %a1) {
; AVX512-LABEL: load_v2i1_broadcast_1_v1i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovb (%rdi), %k0
; AVX512-NEXT: kshiftrw $1, %k0, %k0
; AVX512-NEXT: kmovb %k0, (%rsi)
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v2i1_broadcast_1_v1i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: movzbl (%rdi), %eax
; AVX512NOTDQ-NEXT: kmovd %eax, %k0
; AVX512NOTDQ-NEXT: kshiftrw $1, %k0, %k0
@@ -639,14 +639,14 @@ define void @load_v2i1_broadcast_1_v1i1_store(<2 x i1>* %a0,<1 x i1>* %a1) {
}
define void @load_v3i1_broadcast_1_v1i1_store(<3 x i1>* %a0,<1 x i1>* %a1) {
; AVX512-LABEL: load_v3i1_broadcast_1_v1i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovb (%rdi), %k0
; AVX512-NEXT: kshiftrw $1, %k0, %k0
; AVX512-NEXT: kmovb %k0, (%rsi)
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v3i1_broadcast_1_v1i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: movzbl (%rdi), %eax
; AVX512NOTDQ-NEXT: kmovd %eax, %k0
; AVX512NOTDQ-NEXT: kshiftrw $1, %k0, %k0
@@ -660,14 +660,14 @@ define void @load_v3i1_broadcast_1_v1i1_store(<3 x i1>* %a0,<1 x i1>* %a1) {
}
define void @load_v3i1_broadcast_2_v1i1_store(<3 x i1>* %a0,<1 x i1>* %a1) {
; AVX512-LABEL: load_v3i1_broadcast_2_v1i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovb (%rdi), %k0
; AVX512-NEXT: kshiftrw $2, %k0, %k0
; AVX512-NEXT: kmovb %k0, (%rsi)
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v3i1_broadcast_2_v1i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: movzbl (%rdi), %eax
; AVX512NOTDQ-NEXT: kmovd %eax, %k0
; AVX512NOTDQ-NEXT: kshiftrw $2, %k0, %k0
@@ -681,14 +681,14 @@ define void @load_v3i1_broadcast_2_v1i1_store(<3 x i1>* %a0,<1 x i1>* %a1) {
}
define void @load_v4i1_broadcast_2_v1i1_store(<4 x i1>* %a0,<1 x i1>* %a1) {
; AVX512-LABEL: load_v4i1_broadcast_2_v1i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovb (%rdi), %k0
; AVX512-NEXT: kshiftrw $2, %k0, %k0
; AVX512-NEXT: kmovb %k0, (%rsi)
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v4i1_broadcast_2_v1i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: movzbl (%rdi), %eax
; AVX512NOTDQ-NEXT: kmovd %eax, %k0
; AVX512NOTDQ-NEXT: kshiftrw $2, %k0, %k0
@@ -702,14 +702,14 @@ define void @load_v4i1_broadcast_2_v1i1_store(<4 x i1>* %a0,<1 x i1>* %a1) {
}
define void @load_v4i1_broadcast_3_v1i1_store(<4 x i1>* %a0,<1 x i1>* %a1) {
; AVX512-LABEL: load_v4i1_broadcast_3_v1i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovb (%rdi), %k0
; AVX512-NEXT: kshiftrw $3, %k0, %k0
; AVX512-NEXT: kmovb %k0, (%rsi)
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v4i1_broadcast_3_v1i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: movzbl (%rdi), %eax
; AVX512NOTDQ-NEXT: kmovd %eax, %k0
; AVX512NOTDQ-NEXT: kshiftrw $3, %k0, %k0
@@ -723,14 +723,14 @@ define void @load_v4i1_broadcast_3_v1i1_store(<4 x i1>* %a0,<1 x i1>* %a1) {
}
define void @load_v8i1_broadcast_4_v1i1_store(<8 x i1>* %a0,<1 x i1>* %a1) {
; AVX512-LABEL: load_v8i1_broadcast_4_v1i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovb (%rdi), %k0
; AVX512-NEXT: kshiftrw $4, %k0, %k0
; AVX512-NEXT: kmovb %k0, (%rsi)
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v8i1_broadcast_4_v1i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: movzbl (%rdi), %eax
; AVX512NOTDQ-NEXT: kmovd %eax, %k0
; AVX512NOTDQ-NEXT: kshiftrw $4, %k0, %k0
@@ -744,7 +744,7 @@ define void @load_v8i1_broadcast_4_v1i1_store(<8 x i1>* %a0,<1 x i1>* %a1) {
}
define void @load_v8i1_broadcast_4_v2i1_store(<8 x i1>* %a0,<2 x i1>* %a1) {
; AVX512-LABEL: load_v8i1_broadcast_4_v2i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovb (%rdi), %k0
; AVX512-NEXT: kshiftrw $4, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %xmm0
@@ -754,7 +754,7 @@ define void @load_v8i1_broadcast_4_v2i1_store(<8 x i1>* %a0,<2 x i1>* %a1) {
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v8i1_broadcast_4_v2i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: movzbl (%rdi), %eax
; AVX512NOTDQ-NEXT: kmovd %eax, %k0
; AVX512NOTDQ-NEXT: kshiftrw $4, %k0, %k1
@@ -773,14 +773,14 @@ define void @load_v8i1_broadcast_4_v2i1_store(<8 x i1>* %a0,<2 x i1>* %a1) {
}
define void @load_v8i1_broadcast_7_v1i1_store(<8 x i1>* %a0,<1 x i1>* %a1) {
; AVX512-LABEL: load_v8i1_broadcast_7_v1i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovb (%rdi), %k0
; AVX512-NEXT: kshiftrw $7, %k0, %k0
; AVX512-NEXT: kmovb %k0, (%rsi)
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v8i1_broadcast_7_v1i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: movzbl (%rdi), %eax
; AVX512NOTDQ-NEXT: kmovd %eax, %k0
; AVX512NOTDQ-NEXT: kshiftrw $7, %k0, %k0
@@ -794,7 +794,7 @@ define void @load_v8i1_broadcast_7_v1i1_store(<8 x i1>* %a0,<1 x i1>* %a1) {
}
define void @load_v8i1_broadcast_7_v2i1_store(<8 x i1>* %a0,<2 x i1>* %a1) {
; AVX512-LABEL: load_v8i1_broadcast_7_v2i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovb (%rdi), %k0
; AVX512-NEXT: kshiftrw $6, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %xmm0
@@ -804,7 +804,7 @@ define void @load_v8i1_broadcast_7_v2i1_store(<8 x i1>* %a0,<2 x i1>* %a1) {
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v8i1_broadcast_7_v2i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: movzbl (%rdi), %eax
; AVX512NOTDQ-NEXT: kmovd %eax, %k0
; AVX512NOTDQ-NEXT: kshiftrw $6, %k0, %k1
@@ -823,14 +823,14 @@ define void @load_v8i1_broadcast_7_v2i1_store(<8 x i1>* %a0,<2 x i1>* %a1) {
}
define void @load_v16i1_broadcast_8_v1i1_store(<16 x i1>* %a0,<1 x i1>* %a1) {
; AVX512-LABEL: load_v16i1_broadcast_8_v1i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovw (%rdi), %k0
; AVX512-NEXT: kshiftrw $8, %k0, %k0
; AVX512-NEXT: kmovb %k0, (%rsi)
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v16i1_broadcast_8_v1i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovw (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrw $8, %k0, %k0
; AVX512NOTDQ-NEXT: kmovd %k0, %eax
@@ -843,7 +843,7 @@ define void @load_v16i1_broadcast_8_v1i1_store(<16 x i1>* %a0,<1 x i1>* %a1) {
}
define void @load_v16i1_broadcast_8_v2i1_store(<16 x i1>* %a0,<2 x i1>* %a1) {
; AVX512-LABEL: load_v16i1_broadcast_8_v2i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovw (%rdi), %k0
; AVX512-NEXT: kshiftrw $8, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %xmm0
@@ -853,7 +853,7 @@ define void @load_v16i1_broadcast_8_v2i1_store(<16 x i1>* %a0,<2 x i1>* %a1) {
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v16i1_broadcast_8_v2i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovw (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrw $8, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -871,7 +871,7 @@ define void @load_v16i1_broadcast_8_v2i1_store(<16 x i1>* %a0,<2 x i1>* %a1) {
}
define void @load_v16i1_broadcast_8_v4i1_store(<16 x i1>* %a0,<4 x i1>* %a1) {
; AVX512-LABEL: load_v16i1_broadcast_8_v4i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovw (%rdi), %k0
; AVX512-NEXT: kshiftrw $8, %k0, %k0
; AVX512-NEXT: vpmovm2d %k0, %xmm0
@@ -881,7 +881,7 @@ define void @load_v16i1_broadcast_8_v4i1_store(<16 x i1>* %a0,<4 x i1>* %a1) {
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v16i1_broadcast_8_v4i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovw (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrw $8, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -899,14 +899,14 @@ define void @load_v16i1_broadcast_8_v4i1_store(<16 x i1>* %a0,<4 x i1>* %a1) {
}
define void @load_v16i1_broadcast_15_v1i1_store(<16 x i1>* %a0,<1 x i1>* %a1) {
; AVX512-LABEL: load_v16i1_broadcast_15_v1i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovw (%rdi), %k0
; AVX512-NEXT: kshiftrw $15, %k0, %k0
; AVX512-NEXT: kmovb %k0, (%rsi)
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v16i1_broadcast_15_v1i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovw (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrw $15, %k0, %k0
; AVX512NOTDQ-NEXT: kmovd %k0, %eax
@@ -919,7 +919,7 @@ define void @load_v16i1_broadcast_15_v1i1_store(<16 x i1>* %a0,<1 x i1>* %a1) {
}
define void @load_v16i1_broadcast_15_v2i1_store(<16 x i1>* %a0,<2 x i1>* %a1) {
; AVX512-LABEL: load_v16i1_broadcast_15_v2i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovw (%rdi), %k0
; AVX512-NEXT: kshiftrw $14, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %xmm0
@@ -929,7 +929,7 @@ define void @load_v16i1_broadcast_15_v2i1_store(<16 x i1>* %a0,<2 x i1>* %a1) {
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v16i1_broadcast_15_v2i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovw (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrw $14, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -947,7 +947,7 @@ define void @load_v16i1_broadcast_15_v2i1_store(<16 x i1>* %a0,<2 x i1>* %a1) {
}
define void @load_v16i1_broadcast_15_v4i1_store(<16 x i1>* %a0,<4 x i1>* %a1) {
; AVX512-LABEL: load_v16i1_broadcast_15_v4i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovw (%rdi), %k0
; AVX512-NEXT: kshiftrw $12, %k0, %k0
; AVX512-NEXT: vpmovm2d %k0, %xmm0
@@ -957,7 +957,7 @@ define void @load_v16i1_broadcast_15_v4i1_store(<16 x i1>* %a0,<4 x i1>* %a1) {
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v16i1_broadcast_15_v4i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovw (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrw $12, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -975,14 +975,14 @@ define void @load_v16i1_broadcast_15_v4i1_store(<16 x i1>* %a0,<4 x i1>* %a1) {
}
define void @load_v32i1_broadcast_16_v1i1_store(<32 x i1>* %a0,<1 x i1>* %a1) {
; AVX512-LABEL: load_v32i1_broadcast_16_v1i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovd (%rdi), %k0
; AVX512-NEXT: kshiftrd $16, %k0, %k0
; AVX512-NEXT: kmovb %k0, (%rsi)
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v32i1_broadcast_16_v1i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovd (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrd $16, %k0, %k0
; AVX512NOTDQ-NEXT: kmovd %k0, %eax
@@ -995,7 +995,7 @@ define void @load_v32i1_broadcast_16_v1i1_store(<32 x i1>* %a0,<1 x i1>* %a1) {
}
define void @load_v32i1_broadcast_16_v2i1_store(<32 x i1>* %a0,<2 x i1>* %a1) {
; AVX512-LABEL: load_v32i1_broadcast_16_v2i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovd (%rdi), %k0
; AVX512-NEXT: kshiftrd $16, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %xmm0
@@ -1005,7 +1005,7 @@ define void @load_v32i1_broadcast_16_v2i1_store(<32 x i1>* %a0,<2 x i1>* %a1) {
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v32i1_broadcast_16_v2i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovd (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrd $16, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -1023,7 +1023,7 @@ define void @load_v32i1_broadcast_16_v2i1_store(<32 x i1>* %a0,<2 x i1>* %a1) {
}
define void @load_v32i1_broadcast_16_v4i1_store(<32 x i1>* %a0,<4 x i1>* %a1) {
; AVX512-LABEL: load_v32i1_broadcast_16_v4i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovd (%rdi), %k0
; AVX512-NEXT: kshiftrd $16, %k0, %k0
; AVX512-NEXT: vpmovm2d %k0, %xmm0
@@ -1033,7 +1033,7 @@ define void @load_v32i1_broadcast_16_v4i1_store(<32 x i1>* %a0,<4 x i1>* %a1) {
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v32i1_broadcast_16_v4i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovd (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrd $16, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -1051,7 +1051,7 @@ define void @load_v32i1_broadcast_16_v4i1_store(<32 x i1>* %a0,<4 x i1>* %a1) {
}
define void @load_v32i1_broadcast_16_v8i1_store(<32 x i1>* %a0,<8 x i1>* %a1) {
; AVX512-LABEL: load_v32i1_broadcast_16_v8i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovd (%rdi), %k0
; AVX512-NEXT: kshiftrd $16, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %zmm0
@@ -1062,7 +1062,7 @@ define void @load_v32i1_broadcast_16_v8i1_store(<32 x i1>* %a0,<8 x i1>* %a1) {
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v32i1_broadcast_16_v8i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovd (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrd $16, %k0, %k1
; AVX512NOTDQ-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -1080,14 +1080,14 @@ define void @load_v32i1_broadcast_16_v8i1_store(<32 x i1>* %a0,<8 x i1>* %a1) {
}
define void @load_v32i1_broadcast_31_v1i1_store(<32 x i1>* %a0,<1 x i1>* %a1) {
; AVX512-LABEL: load_v32i1_broadcast_31_v1i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovd (%rdi), %k0
; AVX512-NEXT: kshiftrd $31, %k0, %k0
; AVX512-NEXT: kmovb %k0, (%rsi)
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v32i1_broadcast_31_v1i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovd (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrd $31, %k0, %k0
; AVX512NOTDQ-NEXT: kmovd %k0, %eax
@@ -1100,7 +1100,7 @@ define void @load_v32i1_broadcast_31_v1i1_store(<32 x i1>* %a0,<1 x i1>* %a1) {
}
define void @load_v32i1_broadcast_31_v2i1_store(<32 x i1>* %a0,<2 x i1>* %a1) {
; AVX512-LABEL: load_v32i1_broadcast_31_v2i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovd (%rdi), %k0
; AVX512-NEXT: kshiftrd $30, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %xmm0
@@ -1110,7 +1110,7 @@ define void @load_v32i1_broadcast_31_v2i1_store(<32 x i1>* %a0,<2 x i1>* %a1) {
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v32i1_broadcast_31_v2i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovd (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrd $30, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -1128,7 +1128,7 @@ define void @load_v32i1_broadcast_31_v2i1_store(<32 x i1>* %a0,<2 x i1>* %a1) {
}
define void @load_v32i1_broadcast_31_v4i1_store(<32 x i1>* %a0,<4 x i1>* %a1) {
; AVX512-LABEL: load_v32i1_broadcast_31_v4i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovd (%rdi), %k0
; AVX512-NEXT: kshiftrd $28, %k0, %k0
; AVX512-NEXT: vpmovm2d %k0, %xmm0
@@ -1138,7 +1138,7 @@ define void @load_v32i1_broadcast_31_v4i1_store(<32 x i1>* %a0,<4 x i1>* %a1) {
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v32i1_broadcast_31_v4i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovd (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrd $28, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -1156,7 +1156,7 @@ define void @load_v32i1_broadcast_31_v4i1_store(<32 x i1>* %a0,<4 x i1>* %a1) {
}
define void @load_v32i1_broadcast_31_v8i1_store(<32 x i1>* %a0,<8 x i1>* %a1) {
; AVX512-LABEL: load_v32i1_broadcast_31_v8i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovd (%rdi), %k0
; AVX512-NEXT: kshiftrd $24, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %zmm0
@@ -1168,7 +1168,7 @@ define void @load_v32i1_broadcast_31_v8i1_store(<32 x i1>* %a0,<8 x i1>* %a1) {
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v32i1_broadcast_31_v8i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovd (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrd $24, %k0, %k1
; AVX512NOTDQ-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -1187,14 +1187,14 @@ define void @load_v32i1_broadcast_31_v8i1_store(<32 x i1>* %a0,<8 x i1>* %a1) {
}
define void @load_v64i1_broadcast_32_v1i1_store(<64 x i1>* %a0,<1 x i1>* %a1) {
; AVX512-LABEL: load_v64i1_broadcast_32_v1i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovq (%rdi), %k0
; AVX512-NEXT: kshiftrq $32, %k0, %k0
; AVX512-NEXT: kmovb %k0, (%rsi)
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v64i1_broadcast_32_v1i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovq (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrq $32, %k0, %k0
; AVX512NOTDQ-NEXT: kmovd %k0, %eax
@@ -1207,7 +1207,7 @@ define void @load_v64i1_broadcast_32_v1i1_store(<64 x i1>* %a0,<1 x i1>* %a1) {
}
define void @load_v64i1_broadcast_32_v2i1_store(<64 x i1>* %a0,<2 x i1>* %a1) {
; AVX512-LABEL: load_v64i1_broadcast_32_v2i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovq (%rdi), %k0
; AVX512-NEXT: kshiftrq $32, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %xmm0
@@ -1217,7 +1217,7 @@ define void @load_v64i1_broadcast_32_v2i1_store(<64 x i1>* %a0,<2 x i1>* %a1) {
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v64i1_broadcast_32_v2i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovq (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrq $32, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -1235,7 +1235,7 @@ define void @load_v64i1_broadcast_32_v2i1_store(<64 x i1>* %a0,<2 x i1>* %a1) {
}
define void @load_v64i1_broadcast_32_v4i1_store(<64 x i1>* %a0,<4 x i1>* %a1) {
; AVX512-LABEL: load_v64i1_broadcast_32_v4i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovq (%rdi), %k0
; AVX512-NEXT: kshiftrq $32, %k0, %k0
; AVX512-NEXT: vpmovm2d %k0, %xmm0
@@ -1245,7 +1245,7 @@ define void @load_v64i1_broadcast_32_v4i1_store(<64 x i1>* %a0,<4 x i1>* %a1) {
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v64i1_broadcast_32_v4i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovq (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrq $32, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -1263,7 +1263,7 @@ define void @load_v64i1_broadcast_32_v4i1_store(<64 x i1>* %a0,<4 x i1>* %a1) {
}
define void @load_v64i1_broadcast_32_v8i1_store(<64 x i1>* %a0,<8 x i1>* %a1) {
; AVX512-LABEL: load_v64i1_broadcast_32_v8i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovq (%rdi), %k0
; AVX512-NEXT: kshiftrq $32, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %zmm0
@@ -1274,7 +1274,7 @@ define void @load_v64i1_broadcast_32_v8i1_store(<64 x i1>* %a0,<8 x i1>* %a1) {
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v64i1_broadcast_32_v8i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovq (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrq $32, %k0, %k1
; AVX512NOTDQ-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -1292,7 +1292,7 @@ define void @load_v64i1_broadcast_32_v8i1_store(<64 x i1>* %a0,<8 x i1>* %a1) {
}
define void @load_v64i1_broadcast_32_v16i1_store(<64 x i1>* %a0,<16 x i1>* %a1) {
; AVX512-LABEL: load_v64i1_broadcast_32_v16i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovq (%rdi), %k0
; AVX512-NEXT: kshiftrq $32, %k0, %k0
; AVX512-NEXT: vpmovm2d %k0, %zmm0
@@ -1303,7 +1303,7 @@ define void @load_v64i1_broadcast_32_v16i1_store(<64 x i1>* %a0,<16 x i1>* %a1)
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v64i1_broadcast_32_v16i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovq (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrq $32, %k0, %k1
; AVX512NOTDQ-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -1320,14 +1320,14 @@ define void @load_v64i1_broadcast_32_v16i1_store(<64 x i1>* %a0,<16 x i1>* %a1)
}
define void @load_v64i1_broadcast_63_v1i1_store(<64 x i1>* %a0,<1 x i1>* %a1) {
; AVX512-LABEL: load_v64i1_broadcast_63_v1i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovq (%rdi), %k0
; AVX512-NEXT: kshiftrq $63, %k0, %k0
; AVX512-NEXT: kmovb %k0, (%rsi)
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v64i1_broadcast_63_v1i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovq (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrq $63, %k0, %k0
; AVX512NOTDQ-NEXT: kmovd %k0, %eax
@@ -1340,7 +1340,7 @@ define void @load_v64i1_broadcast_63_v1i1_store(<64 x i1>* %a0,<1 x i1>* %a1) {
}
define void @load_v64i1_broadcast_63_v2i1_store(<64 x i1>* %a0,<2 x i1>* %a1) {
; AVX512-LABEL: load_v64i1_broadcast_63_v2i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovq (%rdi), %k0
; AVX512-NEXT: kshiftrq $62, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %xmm0
@@ -1350,7 +1350,7 @@ define void @load_v64i1_broadcast_63_v2i1_store(<64 x i1>* %a0,<2 x i1>* %a1) {
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v64i1_broadcast_63_v2i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovq (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrq $62, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -1368,7 +1368,7 @@ define void @load_v64i1_broadcast_63_v2i1_store(<64 x i1>* %a0,<2 x i1>* %a1) {
}
define void @load_v64i1_broadcast_63_v4i1_store(<64 x i1>* %a0,<4 x i1>* %a1) {
; AVX512-LABEL: load_v64i1_broadcast_63_v4i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovq (%rdi), %k0
; AVX512-NEXT: kshiftrq $60, %k0, %k0
; AVX512-NEXT: vpmovm2d %k0, %xmm0
@@ -1378,7 +1378,7 @@ define void @load_v64i1_broadcast_63_v4i1_store(<64 x i1>* %a0,<4 x i1>* %a1) {
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v64i1_broadcast_63_v4i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovq (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrq $60, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -1396,7 +1396,7 @@ define void @load_v64i1_broadcast_63_v4i1_store(<64 x i1>* %a0,<4 x i1>* %a1) {
}
define void @load_v64i1_broadcast_63_v8i1_store(<64 x i1>* %a0,<8 x i1>* %a1) {
; AVX512-LABEL: load_v64i1_broadcast_63_v8i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovq (%rdi), %k0
; AVX512-NEXT: kshiftrq $56, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %zmm0
@@ -1408,7 +1408,7 @@ define void @load_v64i1_broadcast_63_v8i1_store(<64 x i1>* %a0,<8 x i1>* %a1) {
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v64i1_broadcast_63_v8i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovq (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrq $56, %k0, %k1
; AVX512NOTDQ-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -1427,7 +1427,7 @@ define void @load_v64i1_broadcast_63_v8i1_store(<64 x i1>* %a0,<8 x i1>* %a1) {
}
define void @load_v64i1_broadcast_63_v16i1_store(<64 x i1>* %a0,<16 x i1>* %a1) {
; AVX512-LABEL: load_v64i1_broadcast_63_v16i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovq (%rdi), %k0
; AVX512-NEXT: kshiftrq $48, %k0, %k0
; AVX512-NEXT: vpmovm2d %k0, %zmm0
@@ -1439,7 +1439,7 @@ define void @load_v64i1_broadcast_63_v16i1_store(<64 x i1>* %a0,<16 x i1>* %a1)
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v64i1_broadcast_63_v16i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovq (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrq $48, %k0, %k1
; AVX512NOTDQ-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
diff --git a/test/CodeGen/X86/avx512-extract-subvector.ll b/test/CodeGen/X86/avx512-extract-subvector.ll
index 8a63f5b8c09..6eedb5a5e9d 100644
--- a/test/CodeGen/X86/avx512-extract-subvector.ll
+++ b/test/CodeGen/X86/avx512-extract-subvector.ll
@@ -4,7 +4,7 @@
define <8 x i16> @extract_subvector128_v32i16(<32 x i16> %x) nounwind {
; SKX-LABEL: extract_subvector128_v32i16:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vextractf32x4 $2, %zmm0, %xmm0
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -14,7 +14,7 @@ define <8 x i16> @extract_subvector128_v32i16(<32 x i16> %x) nounwind {
define <8 x i16> @extract_subvector128_v32i16_first_element(<32 x i16> %x) nounwind {
; SKX-LABEL: extract_subvector128_v32i16_first_element:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -24,7 +24,7 @@ define <8 x i16> @extract_subvector128_v32i16_first_element(<32 x i16> %x) nounw
define <16 x i8> @extract_subvector128_v64i8(<64 x i8> %x) nounwind {
; SKX-LABEL: extract_subvector128_v64i8:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vextractf32x4 $2, %zmm0, %xmm0
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -34,7 +34,7 @@ define <16 x i8> @extract_subvector128_v64i8(<64 x i8> %x) nounwind {
define <16 x i8> @extract_subvector128_v64i8_first_element(<64 x i8> %x) nounwind {
; SKX-LABEL: extract_subvector128_v64i8_first_element:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -45,7 +45,7 @@ define <16 x i8> @extract_subvector128_v64i8_first_element(<64 x i8> %x) nounwin
define <16 x i16> @extract_subvector256_v32i16(<32 x i16> %x) nounwind {
; SKX-LABEL: extract_subvector256_v32i16:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vextractf64x4 $1, %zmm0, %ymm0
; SKX-NEXT: retq
%r1 = shufflevector <32 x i16> %x, <32 x i16> undef, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
@@ -54,7 +54,7 @@ define <16 x i16> @extract_subvector256_v32i16(<32 x i16> %x) nounwind {
define <32 x i8> @extract_subvector256_v64i8(<64 x i8> %x) nounwind {
; SKX-LABEL: extract_subvector256_v64i8:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vextractf64x4 $1, %zmm0, %ymm0
; SKX-NEXT: retq
%r1 = shufflevector <64 x i8> %x, <64 x i8> undef, <32 x i32> <i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
@@ -63,7 +63,7 @@ define <32 x i8> @extract_subvector256_v64i8(<64 x i8> %x) nounwind {
define void @extract_subvector256_v8f64_store(double* nocapture %addr, <4 x double> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector256_v8f64_store:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vextractf128 $1, %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -76,7 +76,7 @@ entry:
define void @extract_subvector256_v8f32_store(float* nocapture %addr, <8 x float> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector256_v8f32_store:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vextractf128 $1, %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -89,7 +89,7 @@ entry:
define void @extract_subvector256_v4i64_store(i64* nocapture %addr, <4 x i64> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector256_v4i64_store:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vextractf128 $1, %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -102,7 +102,7 @@ entry:
define void @extract_subvector256_v8i32_store(i32* nocapture %addr, <8 x i32> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector256_v8i32_store:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vextractf128 $1, %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -115,7 +115,7 @@ entry:
define void @extract_subvector256_v16i16_store(i16* nocapture %addr, <16 x i16> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector256_v16i16_store:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vextractf128 $1, %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -128,7 +128,7 @@ entry:
define void @extract_subvector256_v32i8_store(i8* nocapture %addr, <32 x i8> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector256_v32i8_store:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vextractf128 $1, %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -141,7 +141,7 @@ entry:
define void @extract_subvector256_v4f64_store_lo(double* nocapture %addr, <4 x double> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector256_v4f64_store_lo:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -154,7 +154,7 @@ entry:
define void @extract_subvector256_v4f64_store_lo_align_16(double* nocapture %addr, <4 x double> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector256_v4f64_store_lo_align_16:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovaps %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -167,7 +167,7 @@ entry:
define void @extract_subvector256_v4f32_store_lo(float* nocapture %addr, <8 x float> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector256_v4f32_store_lo:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -180,7 +180,7 @@ entry:
define void @extract_subvector256_v4f32_store_lo_align_16(float* nocapture %addr, <8 x float> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector256_v4f32_store_lo_align_16:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovaps %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -193,7 +193,7 @@ entry:
define void @extract_subvector256_v2i64_store_lo(i64* nocapture %addr, <4 x i64> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector256_v2i64_store_lo:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -206,7 +206,7 @@ entry:
define void @extract_subvector256_v2i64_store_lo_align_16(i64* nocapture %addr, <4 x i64> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector256_v2i64_store_lo_align_16:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovaps %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -219,7 +219,7 @@ entry:
define void @extract_subvector256_v4i32_store_lo(i32* nocapture %addr, <8 x i32> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector256_v4i32_store_lo:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -232,7 +232,7 @@ entry:
define void @extract_subvector256_v4i32_store_lo_align_16(i32* nocapture %addr, <8 x i32> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector256_v4i32_store_lo_align_16:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovaps %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -245,7 +245,7 @@ entry:
define void @extract_subvector256_v8i16_store_lo(i16* nocapture %addr, <16 x i16> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector256_v8i16_store_lo:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -258,7 +258,7 @@ entry:
define void @extract_subvector256_v8i16_store_lo_align_16(i16* nocapture %addr, <16 x i16> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector256_v8i16_store_lo_align_16:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovaps %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -271,7 +271,7 @@ entry:
define void @extract_subvector256_v16i8_store_lo(i8* nocapture %addr, <32 x i8> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector256_v16i8_store_lo:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -284,7 +284,7 @@ entry:
define void @extract_subvector256_v16i8_store_lo_align_16(i8* nocapture %addr, <32 x i8> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector256_v16i8_store_lo_align_16:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovaps %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -297,7 +297,7 @@ entry:
define void @extract_subvector512_v2f64_store_lo(double* nocapture %addr, <8 x double> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v2f64_store_lo:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -310,7 +310,7 @@ entry:
define void @extract_subvector512_v2f64_store_lo_align_16(double* nocapture %addr, <8 x double> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v2f64_store_lo_align_16:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovaps %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -323,7 +323,7 @@ entry:
define void @extract_subvector512_v4f32_store_lo(float* nocapture %addr, <16 x float> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v4f32_store_lo:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -336,7 +336,7 @@ entry:
define void @extract_subvector512_v4f32_store_lo_align_16(float* nocapture %addr, <16 x float> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v4f32_store_lo_align_16:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovaps %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -349,7 +349,7 @@ entry:
define void @extract_subvector512_v2i64_store_lo(i64* nocapture %addr, <8 x i64> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v2i64_store_lo:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -362,7 +362,7 @@ entry:
define void @extract_subvector512_v2i64_store_lo_align_16(i64* nocapture %addr, <8 x i64> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v2i64_store_lo_align_16:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovaps %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -375,7 +375,7 @@ entry:
define void @extract_subvector512_v4i32_store_lo(i32* nocapture %addr, <16 x i32> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v4i32_store_lo:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -388,7 +388,7 @@ entry:
define void @extract_subvector512_v4i32_store_lo_align_16(i32* nocapture %addr, <16 x i32> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v4i32_store_lo_align_16:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovaps %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -401,7 +401,7 @@ entry:
define void @extract_subvector512_v8i16_store_lo(i16* nocapture %addr, <32 x i16> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v8i16_store_lo:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -414,7 +414,7 @@ entry:
define void @extract_subvector512_v16i8_store_lo(i8* nocapture %addr, <64 x i8> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v16i8_store_lo:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -427,7 +427,7 @@ entry:
define void @extract_subvector512_v16i8_store_lo_align_16(i8* nocapture %addr, <64 x i8> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v16i8_store_lo_align_16:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovaps %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -440,7 +440,7 @@ entry:
define void @extract_subvector512_v4f64_store_lo(double* nocapture %addr, <8 x double> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v4f64_store_lo:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -453,7 +453,7 @@ entry:
define void @extract_subvector512_v4f64_store_lo_align_16(double* nocapture %addr, <8 x double> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v4f64_store_lo_align_16:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -466,7 +466,7 @@ entry:
define void @extract_subvector512_v4f64_store_lo_align_32(double* nocapture %addr, <8 x double> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v4f64_store_lo_align_32:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovaps %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -479,7 +479,7 @@ entry:
define void @extract_subvector512_v8f32_store_lo(float* nocapture %addr, <16 x float> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v8f32_store_lo:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -492,7 +492,7 @@ entry:
define void @extract_subvector512_v8f32_store_lo_align_16(float* nocapture %addr, <16 x float> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v8f32_store_lo_align_16:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -505,7 +505,7 @@ entry:
define void @extract_subvector512_v8f32_store_lo_align_32(float* nocapture %addr, <16 x float> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v8f32_store_lo_align_32:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovaps %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -518,7 +518,7 @@ entry:
define void @extract_subvector512_v4i64_store_lo(i64* nocapture %addr, <8 x i64> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v4i64_store_lo:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -531,7 +531,7 @@ entry:
define void @extract_subvector512_v4i64_store_lo_align_16(i64* nocapture %addr, <8 x i64> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v4i64_store_lo_align_16:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -544,7 +544,7 @@ entry:
define void @extract_subvector512_v4i64_store_lo_align_32(i64* nocapture %addr, <8 x i64> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v4i64_store_lo_align_32:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovaps %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -557,7 +557,7 @@ entry:
define void @extract_subvector512_v8i32_store_lo(i32* nocapture %addr, <16 x i32> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v8i32_store_lo:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -570,7 +570,7 @@ entry:
define void @extract_subvector512_v8i32_store_lo_align_16(i32* nocapture %addr, <16 x i32> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v8i32_store_lo_align_16:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -583,7 +583,7 @@ entry:
define void @extract_subvector512_v8i32_store_lo_align_32(i32* nocapture %addr, <16 x i32> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v8i32_store_lo_align_32:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovaps %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -596,7 +596,7 @@ entry:
define void @extract_subvector512_v16i16_store_lo(i16* nocapture %addr, <32 x i16> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v16i16_store_lo:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -609,7 +609,7 @@ entry:
define void @extract_subvector512_v16i16_store_lo_align_16(i16* nocapture %addr, <32 x i16> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v16i16_store_lo_align_16:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -622,7 +622,7 @@ entry:
define void @extract_subvector512_v16i16_store_lo_align_32(i16* nocapture %addr, <32 x i16> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v16i16_store_lo_align_32:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovaps %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -635,7 +635,7 @@ entry:
define void @extract_subvector512_v32i8_store_lo(i8* nocapture %addr, <64 x i8> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v32i8_store_lo:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -648,7 +648,7 @@ entry:
define void @extract_subvector512_v32i8_store_lo_align_16(i8* nocapture %addr, <64 x i8> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v32i8_store_lo_align_16:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -661,7 +661,7 @@ entry:
define void @extract_subvector512_v32i8_store_lo_align_32(i8* nocapture %addr, <64 x i8> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v32i8_store_lo_align_32:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovaps %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -674,7 +674,7 @@ entry:
define <4 x double> @test_mm512_mask_extractf64x4_pd(<4 x double> %__W, i8 %__U, <8 x double> %__A) {
; SKX-LABEL: test_mm512_mask_extractf64x4_pd:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextractf64x4 $1, %zmm1, %ymm0 {%k1}
; SKX-NEXT: retq
@@ -688,7 +688,7 @@ entry:
define <4 x double> @test_mm512_maskz_extractf64x4_pd(i8 %__U, <8 x double> %__A) {
; SKX-LABEL: test_mm512_maskz_extractf64x4_pd:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextractf64x4 $1, %zmm0, %ymm0 {%k1} {z}
; SKX-NEXT: retq
@@ -702,7 +702,7 @@ entry:
define <4 x float> @test_mm512_mask_extractf32x4_ps(<4 x float> %__W, i8 %__U, <8 x double> %__A) {
; SKX-LABEL: test_mm512_mask_extractf32x4_ps:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextractf32x4 $1, %zmm1, %xmm0 {%k1}
; SKX-NEXT: vzeroupper
@@ -718,7 +718,7 @@ entry:
define <4 x float> @test_mm512_maskz_extractf32x4_ps(i8 %__U, <8 x double> %__A) {
; SKX-LABEL: test_mm512_maskz_extractf32x4_ps:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextractf32x4 $1, %zmm0, %xmm0 {%k1} {z}
; SKX-NEXT: vzeroupper
@@ -734,7 +734,7 @@ entry:
define <2 x double> @test_mm256_mask_extractf64x2_pd(<2 x double> %__W, i8 %__U, <4 x double> %__A) {
; SKX-LABEL: test_mm256_mask_extractf64x2_pd:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextractf64x2 $1, %ymm1, %xmm0 {%k1}
; SKX-NEXT: vzeroupper
@@ -749,7 +749,7 @@ entry:
define <2 x double> @test_mm256_maskz_extractf64x2_pd(i8 %__U, <4 x double> %__A) {
; SKX-LABEL: test_mm256_maskz_extractf64x2_pd:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextractf64x2 $1, %ymm0, %xmm0 {%k1} {z}
; SKX-NEXT: vzeroupper
@@ -764,7 +764,7 @@ entry:
define <2 x i64> @test_mm256_mask_extracti64x2_epi64(<2 x i64> %__W, i8 %__U, <4 x i64> %__A) {
; SKX-LABEL: test_mm256_mask_extracti64x2_epi64:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextracti64x2 $1, %ymm1, %xmm0 {%k1}
; SKX-NEXT: vzeroupper
@@ -779,7 +779,7 @@ entry:
define <2 x i64> @test_mm256_maskz_extracti64x2_epi64(i8 %__U, <4 x i64> %__A) {
; SKX-LABEL: test_mm256_maskz_extracti64x2_epi64:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextracti64x2 $1, %ymm0, %xmm0 {%k1} {z}
; SKX-NEXT: vzeroupper
@@ -794,7 +794,7 @@ entry:
define <4 x float> @test_mm256_mask_extractf32x4_ps(<4 x float> %__W, i8 %__U, <8 x float> %__A) {
; SKX-LABEL: test_mm256_mask_extractf32x4_ps:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextractf32x4 $1, %ymm1, %xmm0 {%k1}
; SKX-NEXT: vzeroupper
@@ -809,7 +809,7 @@ entry:
define <4 x float> @test_mm256_maskz_extractf32x4_ps(i8 %__U, <8 x float> %__A) {
; SKX-LABEL: test_mm256_maskz_extractf32x4_ps:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextractf32x4 $1, %ymm0, %xmm0 {%k1} {z}
; SKX-NEXT: vzeroupper
@@ -824,7 +824,7 @@ entry:
define <2 x i64> @test_mm256_mask_extracti32x4_epi32(<2 x i64> %__W, i8 %__U, <4 x i64> %__A) {
; SKX-LABEL: test_mm256_mask_extracti32x4_epi32:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextracti32x4 $1, %ymm1, %xmm0 {%k1}
; SKX-NEXT: vzeroupper
@@ -842,7 +842,7 @@ entry:
define <2 x i64> @test_mm256_maskz_extracti32x4_epi32(i8 %__U, <4 x i64> %__A) {
; SKX-LABEL: test_mm256_maskz_extracti32x4_epi32:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextracti32x4 $1, %ymm0, %xmm0 {%k1} {z}
; SKX-NEXT: vzeroupper
@@ -859,7 +859,7 @@ entry:
define <8 x float> @test_mm512_mask_extractf32x8_ps(<8 x float> %__W, i8 %__U, <16 x float> %__A) {
; SKX-LABEL: test_mm512_mask_extractf32x8_ps:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextractf32x8 $1, %zmm1, %ymm0 {%k1}
; SKX-NEXT: retq
@@ -872,7 +872,7 @@ entry:
define <8 x float> @test_mm512_maskz_extractf32x8_ps(i8 %__U, <16 x float> %__A) {
; SKX-LABEL: test_mm512_maskz_extractf32x8_ps:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextractf32x8 $1, %zmm0, %ymm0 {%k1} {z}
; SKX-NEXT: retq
@@ -885,7 +885,7 @@ entry:
define <2 x double> @test_mm512_mask_extractf64x2_pd(<2 x double> %__W, i8 %__U, <8 x double> %__A) {
; SKX-LABEL: test_mm512_mask_extractf64x2_pd:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextractf64x2 $3, %zmm1, %xmm0 {%k1}
; SKX-NEXT: vzeroupper
@@ -900,7 +900,7 @@ entry:
define <2 x double> @test_mm512_maskz_extractf64x2_pd(i8 %__U, <8 x double> %__A) {
; SKX-LABEL: test_mm512_maskz_extractf64x2_pd:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextractf64x2 $3, %zmm0, %xmm0 {%k1} {z}
; SKX-NEXT: vzeroupper
diff --git a/test/CodeGen/X86/avx512-fma-commute.ll b/test/CodeGen/X86/avx512-fma-commute.ll
index 8dd484787a9..19425517927 100644
--- a/test/CodeGen/X86/avx512-fma-commute.ll
+++ b/test/CodeGen/X86/avx512-fma-commute.ll
@@ -8,7 +8,7 @@ declare <2 x double> @llvm.x86.avx512.mask3.vfmsub.sd(<2 x double>, <2 x double>
define <4 x float> @test_int_x86_avx512_mask3_vfmadd_ss_load0(<4 x float>* %x0ptr, <4 x float> %x1, <4 x float> %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_ss_load0:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd231ss (%rdi), %xmm0, %xmm1
; CHECK-NEXT: vmovaps %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -19,7 +19,7 @@ define <4 x float> @test_int_x86_avx512_mask3_vfmadd_ss_load0(<4 x float>* %x0pt
define <4 x float> @test_int_x86_avx512_mask3_vfmadd_ss_load1(<4 x float> %x0, <4 x float>* %x1ptr, <4 x float> %x2){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_ss_load1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd231ss (%rdi), %xmm0, %xmm1
; CHECK-NEXT: vmovaps %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -30,7 +30,7 @@ define <4 x float> @test_int_x86_avx512_mask3_vfmadd_ss_load1(<4 x float> %x0, <
define <2 x double> @test_int_x86_avx512_mask3_vfmadd_sd_load0(<2 x double>* %x0ptr, <2 x double> %x1, <2 x double> %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_sd_load0:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd231sd (%rdi), %xmm0, %xmm1
; CHECK-NEXT: vmovapd %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -41,7 +41,7 @@ define <2 x double> @test_int_x86_avx512_mask3_vfmadd_sd_load0(<2 x double>* %x0
define <2 x double> @test_int_x86_avx512_mask3_vfmadd_sd_load1(<2 x double> %x0, <2 x double>* %x1ptr, <2 x double> %x2){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_sd_load1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd231sd (%rdi), %xmm0, %xmm1
; CHECK-NEXT: vmovapd %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -52,7 +52,7 @@ define <2 x double> @test_int_x86_avx512_mask3_vfmadd_sd_load1(<2 x double> %x0,
define <4 x float> @test_int_x86_avx512_mask3_vfmsub_ss_load0(<4 x float>* %x0ptr, <4 x float> %x1, <4 x float> %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsub_ss_load0:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmsub231ss (%rdi), %xmm0, %xmm1
; CHECK-NEXT: vmovaps %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -63,7 +63,7 @@ define <4 x float> @test_int_x86_avx512_mask3_vfmsub_ss_load0(<4 x float>* %x0pt
define <4 x float> @test_int_x86_avx512_mask3_vfmsub_ss_load1(<4 x float> %x0, <4 x float>* %x1ptr, <4 x float> %x2){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsub_ss_load1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmsub231ss (%rdi), %xmm0, %xmm1
; CHECK-NEXT: vmovaps %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -74,7 +74,7 @@ define <4 x float> @test_int_x86_avx512_mask3_vfmsub_ss_load1(<4 x float> %x0, <
define <2 x double> @test_int_x86_avx512_mask3_vfmsub_sd_load0(<2 x double>* %x0ptr, <2 x double> %x1, <2 x double> %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsub_sd_load0:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmsub231sd (%rdi), %xmm0, %xmm1
; CHECK-NEXT: vmovapd %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -85,7 +85,7 @@ define <2 x double> @test_int_x86_avx512_mask3_vfmsub_sd_load0(<2 x double>* %x0
define <2 x double> @test_int_x86_avx512_mask3_vfmsub_sd_load1(<2 x double> %x0, <2 x double>* %x1ptr, <2 x double> %x2){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsub_sd_load1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmsub231sd (%rdi), %xmm0, %xmm1
; CHECK-NEXT: vmovapd %xmm1, %xmm0
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/avx512-fma-intrinsics.ll b/test/CodeGen/X86/avx512-fma-intrinsics.ll
index 27350f5d4c3..f24856e54da 100644
--- a/test/CodeGen/X86/avx512-fma-intrinsics.ll
+++ b/test/CodeGen/X86/avx512-fma-intrinsics.ll
@@ -6,7 +6,7 @@ declare <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double>, <8 x doub
define <16 x float> @test_x86_vfnmadd_ps_z(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
; CHECK-LABEL: test_x86_vfnmadd_ps_z:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfnmadd213ps %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.vfnmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 4) nounwind
@@ -16,7 +16,7 @@ declare <16 x float> @llvm.x86.avx512.mask.vfnmadd.ps.512(<16 x float>, <16 x fl
define <16 x float> @test_mask_vfnmadd_ps(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) {
; CHECK-LABEL: test_mask_vfnmadd_ps:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfnmadd132ps %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -26,7 +26,7 @@ define <16 x float> @test_mask_vfnmadd_ps(<16 x float> %a0, <16 x float> %a1, <1
define <8 x double> @test_x86_vfnmadd_pd_z(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
; CHECK-LABEL: test_x86_vfnmadd_pd_z:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfnmadd213pd %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.vfnmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 4) nounwind
@@ -36,7 +36,7 @@ declare <8 x double> @llvm.x86.avx512.mask.vfnmadd.pd.512(<8 x double>, <8 x dou
define <8 x double> @test_mask_vfnmadd_pd(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_vfnmadd_pd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfnmadd132pd %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -46,7 +46,7 @@ define <8 x double> @test_mask_vfnmadd_pd(<8 x double> %a0, <8 x double> %a1, <8
define <16 x float> @test_x86_vfnmsubps_z(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
; CHECK-LABEL: test_x86_vfnmsubps_z:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfnmsub213ps %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.vfnmsub.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 4) nounwind
@@ -56,7 +56,7 @@ declare <16 x float> @llvm.x86.avx512.mask.vfnmsub.ps.512(<16 x float>, <16 x fl
define <16 x float> @test_mask_vfnmsub_ps(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) {
; CHECK-LABEL: test_mask_vfnmsub_ps:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfnmsub132ps %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -66,7 +66,7 @@ define <16 x float> @test_mask_vfnmsub_ps(<16 x float> %a0, <16 x float> %a1, <1
define <8 x double> @test_x86_vfnmsubpd_z(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
; CHECK-LABEL: test_x86_vfnmsubpd_z:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfnmsub213pd %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.vfnmsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 4) nounwind
@@ -76,7 +76,7 @@ declare <8 x double> @llvm.x86.avx512.mask.vfnmsub.pd.512(<8 x double>, <8 x dou
define <8 x double> @test_mask_vfnmsub_pd(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_vfnmsub_pd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfnmsub132pd %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -86,7 +86,7 @@ define <8 x double> @test_mask_vfnmsub_pd(<8 x double> %a0, <8 x double> %a1, <8
define <16 x float> @test_x86_vfmaddsubps_z(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
; CHECK-LABEL: test_x86_vfmaddsubps_z:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmaddsub213ps %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 4) nounwind
@@ -95,7 +95,7 @@ define <16 x float> @test_x86_vfmaddsubps_z(<16 x float> %a0, <16 x float> %a1,
define <16 x float> @test_mask_fmaddsub_ps(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask) {
; CHECK-LABEL: test_mask_fmaddsub_ps:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmaddsub132ps %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -107,7 +107,7 @@ declare <16 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.512(<16 x float>, <16 x
define <8 x double> @test_x86_vfmaddsubpd_z(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
; CHECK-LABEL: test_x86_vfmaddsubpd_z:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmaddsub213pd %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 4) nounwind
@@ -117,7 +117,7 @@ declare <8 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.512(<8 x double>, <8 x d
define <8 x double> @test_mask_vfmaddsub_pd(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_vfmaddsub_pd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmaddsub132pd %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -127,7 +127,7 @@ define <8 x double> @test_mask_vfmaddsub_pd(<8 x double> %a0, <8 x double> %a1,
define <8 x double>@test_int_x86_avx512_mask_vfmaddsub_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3){
; CHECK-LABEL: test_int_x86_avx512_mask_vfmaddsub_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %zmm0, %zmm3
; CHECK-NEXT: vfmaddsub132pd %zmm1, %zmm2, %zmm3 {%k1}
@@ -144,7 +144,7 @@ declare <8 x double> @llvm.x86.avx512.mask3.vfmaddsub.pd.512(<8 x double>, <8 x
define <8 x double>@test_int_x86_avx512_mask3_vfmaddsub_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmaddsub_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %zmm2, %zmm3
; CHECK-NEXT: vfmaddsub231pd %zmm1, %zmm0, %zmm3 {%k1}
@@ -161,7 +161,7 @@ declare <8 x double> @llvm.x86.avx512.maskz.vfmaddsub.pd.512(<8 x double>, <8 x
define <8 x double>@test_int_x86_avx512_maskz_vfmaddsub_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3){
; CHECK-LABEL: test_int_x86_avx512_maskz_vfmaddsub_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %zmm1, %zmm3
; CHECK-NEXT: vfmaddsub213pd %zmm2, %zmm0, %zmm3 {%k1} {z}
@@ -176,7 +176,7 @@ define <8 x double>@test_int_x86_avx512_maskz_vfmaddsub_pd_512(<8 x double> %x0,
define <16 x float>@test_int_x86_avx512_mask_vfmaddsub_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3){
; CHECK-LABEL: test_int_x86_avx512_mask_vfmaddsub_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %zmm0, %zmm3
; CHECK-NEXT: vfmaddsub132ps %zmm1, %zmm2, %zmm3 {%k1}
@@ -193,7 +193,7 @@ declare <16 x float> @llvm.x86.avx512.mask3.vfmaddsub.ps.512(<16 x float>, <16 x
define <16 x float>@test_int_x86_avx512_mask3_vfmaddsub_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmaddsub_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %zmm2, %zmm3
; CHECK-NEXT: vfmaddsub231ps %zmm1, %zmm0, %zmm3 {%k1}
@@ -210,7 +210,7 @@ declare <16 x float> @llvm.x86.avx512.maskz.vfmaddsub.ps.512(<16 x float>, <16 x
define <16 x float>@test_int_x86_avx512_maskz_vfmaddsub_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3){
; CHECK-LABEL: test_int_x86_avx512_maskz_vfmaddsub_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %zmm1, %zmm3
; CHECK-NEXT: vfmaddsub213ps %zmm2, %zmm0, %zmm3 {%k1} {z}
@@ -227,7 +227,7 @@ declare <8 x double> @llvm.x86.avx512.mask3.vfmsubadd.pd.512(<8 x double>, <8 x
define <8 x double>@test_int_x86_avx512_mask3_vfmsubadd_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsubadd_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %zmm2, %zmm3
; CHECK-NEXT: vfmsubadd231pd %zmm1, %zmm0, %zmm3 {%k1}
@@ -244,7 +244,7 @@ declare <16 x float> @llvm.x86.avx512.mask3.vfmsubadd.ps.512(<16 x float>, <16 x
define <16 x float>@test_int_x86_avx512_mask3_vfmsubadd_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsubadd_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %zmm2, %zmm3
; CHECK-NEXT: vfmsubadd231ps %zmm1, %zmm0, %zmm3 {%k1}
@@ -259,7 +259,7 @@ define <16 x float>@test_int_x86_avx512_mask3_vfmsubadd_ps_512(<16 x float> %x0,
define <16 x float> @test_mask_round_vfmadd512_ps_rrb_rne(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) {
; CHECK-LABEL: test_mask_round_vfmadd512_ps_rrb_rne:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmadd132ps {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -269,7 +269,7 @@ define <16 x float> @test_mask_round_vfmadd512_ps_rrb_rne(<16 x float> %a0, <16
define <16 x float> @test_mask_round_vfmadd512_ps_rrb_rtn(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) {
; CHECK-LABEL: test_mask_round_vfmadd512_ps_rrb_rtn:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmadd132ps {rd-sae}, %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -279,7 +279,7 @@ define <16 x float> @test_mask_round_vfmadd512_ps_rrb_rtn(<16 x float> %a0, <16
define <16 x float> @test_mask_round_vfmadd512_ps_rrb_rtp(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) {
; CHECK-LABEL: test_mask_round_vfmadd512_ps_rrb_rtp:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmadd132ps {ru-sae}, %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -289,7 +289,7 @@ define <16 x float> @test_mask_round_vfmadd512_ps_rrb_rtp(<16 x float> %a0, <16
define <16 x float> @test_mask_round_vfmadd512_ps_rrb_rtz(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) {
; CHECK-LABEL: test_mask_round_vfmadd512_ps_rrb_rtz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmadd132ps {rz-sae}, %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -299,7 +299,7 @@ define <16 x float> @test_mask_round_vfmadd512_ps_rrb_rtz(<16 x float> %a0, <16
define <16 x float> @test_mask_round_vfmadd512_ps_rrb_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) {
; CHECK-LABEL: test_mask_round_vfmadd512_ps_rrb_current:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmadd132ps %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -309,7 +309,7 @@ define <16 x float> @test_mask_round_vfmadd512_ps_rrb_current(<16 x float> %a0,
define <16 x float> @test_mask_round_vfmadd512_ps_rrbz_rne(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
; CHECK-LABEL: test_mask_round_vfmadd512_ps_rrbz_rne:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd213ps {rn-sae}, %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 0) nounwind
@@ -318,7 +318,7 @@ define <16 x float> @test_mask_round_vfmadd512_ps_rrbz_rne(<16 x float> %a0, <16
define <16 x float> @test_mask_round_vfmadd512_ps_rrbz_rtn(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
; CHECK-LABEL: test_mask_round_vfmadd512_ps_rrbz_rtn:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd213ps {rd-sae}, %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 1) nounwind
@@ -327,7 +327,7 @@ define <16 x float> @test_mask_round_vfmadd512_ps_rrbz_rtn(<16 x float> %a0, <16
define <16 x float> @test_mask_round_vfmadd512_ps_rrbz_rtp(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
; CHECK-LABEL: test_mask_round_vfmadd512_ps_rrbz_rtp:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd213ps {ru-sae}, %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 2) nounwind
@@ -336,7 +336,7 @@ define <16 x float> @test_mask_round_vfmadd512_ps_rrbz_rtp(<16 x float> %a0, <16
define <16 x float> @test_mask_round_vfmadd512_ps_rrbz_rtz(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
; CHECK-LABEL: test_mask_round_vfmadd512_ps_rrbz_rtz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd213ps {rz-sae}, %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 3) nounwind
@@ -345,7 +345,7 @@ define <16 x float> @test_mask_round_vfmadd512_ps_rrbz_rtz(<16 x float> %a0, <16
define <16 x float> @test_mask_round_vfmadd512_ps_rrbz_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
; CHECK-LABEL: test_mask_round_vfmadd512_ps_rrbz_current:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd213ps %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 4) nounwind
@@ -356,7 +356,7 @@ declare <8 x double> @llvm.x86.avx512.mask3.vfmsub.pd.512(<8 x double>, <8 x dou
define <8 x double>@test_int_x86_avx512_mask3_vfmsub_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsub_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %zmm2, %zmm3
; CHECK-NEXT: vfmsub231pd %zmm1, %zmm0, %zmm3 {%k1}
@@ -373,7 +373,7 @@ declare <16 x float> @llvm.x86.avx512.mask3.vfmsub.ps.512(<16 x float>, <16 x fl
define <16 x float>@test_int_x86_avx512_mask3_vfmsub_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsub_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %zmm2, %zmm3
; CHECK-NEXT: vfmsub231ps %zmm1, %zmm0, %zmm3 {%k1}
@@ -388,7 +388,7 @@ define <16 x float>@test_int_x86_avx512_mask3_vfmsub_ps_512(<16 x float> %x0, <1
define <8 x double> @test_mask_round_vfmadd512_pd_rrb_rne(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_round_vfmadd512_pd_rrb_rne:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmadd132pd {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -398,7 +398,7 @@ define <8 x double> @test_mask_round_vfmadd512_pd_rrb_rne(<8 x double> %a0, <8 x
define <8 x double> @test_mask_round_vfmadd512_pd_rrb_rtn(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_round_vfmadd512_pd_rrb_rtn:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmadd132pd {rd-sae}, %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -408,7 +408,7 @@ define <8 x double> @test_mask_round_vfmadd512_pd_rrb_rtn(<8 x double> %a0, <8 x
define <8 x double> @test_mask_round_vfmadd512_pd_rrb_rtp(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_round_vfmadd512_pd_rrb_rtp:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmadd132pd {ru-sae}, %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -418,7 +418,7 @@ define <8 x double> @test_mask_round_vfmadd512_pd_rrb_rtp(<8 x double> %a0, <8 x
define <8 x double> @test_mask_round_vfmadd512_pd_rrb_rtz(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_round_vfmadd512_pd_rrb_rtz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmadd132pd {rz-sae}, %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -428,7 +428,7 @@ define <8 x double> @test_mask_round_vfmadd512_pd_rrb_rtz(<8 x double> %a0, <8 x
define <8 x double> @test_mask_round_vfmadd512_pd_rrb_current(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_round_vfmadd512_pd_rrb_current:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmadd132pd %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -438,7 +438,7 @@ define <8 x double> @test_mask_round_vfmadd512_pd_rrb_current(<8 x double> %a0,
define <8 x double> @test_mask_round_vfmadd512_pd_rrbz_rne(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
; CHECK-LABEL: test_mask_round_vfmadd512_pd_rrbz_rne:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd213pd {rn-sae}, %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 0) nounwind
@@ -447,7 +447,7 @@ define <8 x double> @test_mask_round_vfmadd512_pd_rrbz_rne(<8 x double> %a0, <8
define <8 x double> @test_mask_round_vfmadd512_pd_rrbz_rtn(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
; CHECK-LABEL: test_mask_round_vfmadd512_pd_rrbz_rtn:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd213pd {rd-sae}, %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 1) nounwind
@@ -456,7 +456,7 @@ define <8 x double> @test_mask_round_vfmadd512_pd_rrbz_rtn(<8 x double> %a0, <8
define <8 x double> @test_mask_round_vfmadd512_pd_rrbz_rtp(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
; CHECK-LABEL: test_mask_round_vfmadd512_pd_rrbz_rtp:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd213pd {ru-sae}, %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 2) nounwind
@@ -465,7 +465,7 @@ define <8 x double> @test_mask_round_vfmadd512_pd_rrbz_rtp(<8 x double> %a0, <8
define <8 x double> @test_mask_round_vfmadd512_pd_rrbz_rtz(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
; CHECK-LABEL: test_mask_round_vfmadd512_pd_rrbz_rtz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd213pd {rz-sae}, %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 3) nounwind
@@ -474,7 +474,7 @@ define <8 x double> @test_mask_round_vfmadd512_pd_rrbz_rtz(<8 x double> %a0, <8
define <8 x double> @test_mask_round_vfmadd512_pd_rrbz_current(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
; CHECK-LABEL: test_mask_round_vfmadd512_pd_rrbz_current:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd213pd %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 4) nounwind
@@ -483,7 +483,7 @@ define <8 x double> @test_mask_round_vfmadd512_pd_rrbz_current(<8 x double> %a0,
define <8 x double>@test_int_x86_avx512_mask_vfmadd_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3){
; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %zmm0, %zmm3
; CHECK-NEXT: vfmadd132pd %zmm1, %zmm2, %zmm3 {%k1}
@@ -500,7 +500,7 @@ declare <8 x double> @llvm.x86.avx512.mask3.vfmadd.pd.512(<8 x double>, <8 x dou
define <8 x double>@test_int_x86_avx512_mask3_vfmadd_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %zmm2, %zmm3
; CHECK-NEXT: vfmadd231pd %zmm1, %zmm0, %zmm3 {%k1}
@@ -517,7 +517,7 @@ declare <8 x double> @llvm.x86.avx512.maskz.vfmadd.pd.512(<8 x double>, <8 x dou
define <8 x double>@test_int_x86_avx512_maskz_vfmadd_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3){
; CHECK-LABEL: test_int_x86_avx512_maskz_vfmadd_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %zmm1, %zmm3
; CHECK-NEXT: vfmadd213pd %zmm2, %zmm0, %zmm3 {%k1} {z}
@@ -532,7 +532,7 @@ define <8 x double>@test_int_x86_avx512_maskz_vfmadd_pd_512(<8 x double> %x0, <8
define <16 x float>@test_int_x86_avx512_mask_vfmadd_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3){
; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %zmm0, %zmm3
; CHECK-NEXT: vfmadd132ps %zmm1, %zmm2, %zmm3 {%k1}
@@ -549,7 +549,7 @@ declare <16 x float> @llvm.x86.avx512.mask3.vfmadd.ps.512(<16 x float>, <16 x fl
define <16 x float>@test_int_x86_avx512_mask3_vfmadd_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %zmm2, %zmm3
; CHECK-NEXT: vfmadd231ps %zmm1, %zmm0, %zmm3 {%k1}
@@ -566,7 +566,7 @@ declare <16 x float> @llvm.x86.avx512.maskz.vfmadd.ps.512(<16 x float>, <16 x fl
define <16 x float>@test_int_x86_avx512_maskz_vfmadd_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3){
; CHECK-LABEL: test_int_x86_avx512_maskz_vfmadd_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %zmm1, %zmm3
; CHECK-NEXT: vfmadd213ps %zmm2, %zmm0, %zmm3 {%k1} {z}
@@ -582,7 +582,7 @@ define <16 x float>@test_int_x86_avx512_maskz_vfmadd_ps_512(<16 x float> %x0, <1
define <8 x double> @test_mask_round_vfnmsub512_pd_rrb_rne(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_round_vfnmsub512_pd_rrb_rne:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfnmsub132pd {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -592,7 +592,7 @@ define <8 x double> @test_mask_round_vfnmsub512_pd_rrb_rne(<8 x double> %a0, <8
define <8 x double> @test_mask_round_vfnmsub512_pd_rrb_rtn(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_round_vfnmsub512_pd_rrb_rtn:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfnmsub132pd {rd-sae}, %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -602,7 +602,7 @@ define <8 x double> @test_mask_round_vfnmsub512_pd_rrb_rtn(<8 x double> %a0, <8
define <8 x double> @test_mask_round_vfnmsub512_pd_rrb_rtp(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_round_vfnmsub512_pd_rrb_rtp:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfnmsub132pd {ru-sae}, %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -612,7 +612,7 @@ define <8 x double> @test_mask_round_vfnmsub512_pd_rrb_rtp(<8 x double> %a0, <8
define <8 x double> @test_mask_round_vfnmsub512_pd_rrb_rtz(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_round_vfnmsub512_pd_rrb_rtz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfnmsub132pd {rz-sae}, %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -622,7 +622,7 @@ define <8 x double> @test_mask_round_vfnmsub512_pd_rrb_rtz(<8 x double> %a0, <8
define <8 x double> @test_mask_round_vfnmsub512_pd_rrb_current(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_round_vfnmsub512_pd_rrb_current:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfnmsub132pd %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -632,7 +632,7 @@ define <8 x double> @test_mask_round_vfnmsub512_pd_rrb_current(<8 x double> %a0,
define <8 x double> @test_mask_round_vfnmsub512_pd_rrbz_rne(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
; CHECK-LABEL: test_mask_round_vfnmsub512_pd_rrbz_rne:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfnmsub213pd {rn-sae}, %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.vfnmsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 0) nounwind
@@ -641,7 +641,7 @@ define <8 x double> @test_mask_round_vfnmsub512_pd_rrbz_rne(<8 x double> %a0, <8
define <8 x double> @test_mask_round_vfnmsub512_pd_rrbz_rtn(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
; CHECK-LABEL: test_mask_round_vfnmsub512_pd_rrbz_rtn:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfnmsub213pd {rd-sae}, %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.vfnmsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 1) nounwind
@@ -650,7 +650,7 @@ define <8 x double> @test_mask_round_vfnmsub512_pd_rrbz_rtn(<8 x double> %a0, <8
define <8 x double> @test_mask_round_vfnmsub512_pd_rrbz_rtp(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
; CHECK-LABEL: test_mask_round_vfnmsub512_pd_rrbz_rtp:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfnmsub213pd {ru-sae}, %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.vfnmsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 2) nounwind
@@ -659,7 +659,7 @@ define <8 x double> @test_mask_round_vfnmsub512_pd_rrbz_rtp(<8 x double> %a0, <8
define <8 x double> @test_mask_round_vfnmsub512_pd_rrbz_rtz(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
; CHECK-LABEL: test_mask_round_vfnmsub512_pd_rrbz_rtz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfnmsub213pd {rz-sae}, %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.vfnmsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 3) nounwind
@@ -668,7 +668,7 @@ define <8 x double> @test_mask_round_vfnmsub512_pd_rrbz_rtz(<8 x double> %a0, <8
define <8 x double> @test_mask_round_vfnmsub512_pd_rrbz_current(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
; CHECK-LABEL: test_mask_round_vfnmsub512_pd_rrbz_current:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfnmsub213pd %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.vfnmsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 4) nounwind
@@ -677,7 +677,7 @@ define <8 x double> @test_mask_round_vfnmsub512_pd_rrbz_current(<8 x double> %a0
define <8 x double>@test_int_x86_avx512_mask_vfnmsub_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3){
; CHECK-LABEL: test_int_x86_avx512_mask_vfnmsub_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %zmm0, %zmm3
; CHECK-NEXT: vfnmsub132pd %zmm1, %zmm2, %zmm3 {%k1}
@@ -694,7 +694,7 @@ declare <8 x double> @llvm.x86.avx512.mask3.vfnmsub.pd.512(<8 x double>, <8 x do
define <8 x double>@test_int_x86_avx512_mask3_vfnmsub_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfnmsub_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %zmm2, %zmm3
; CHECK-NEXT: vfnmsub231pd %zmm1, %zmm0, %zmm3 {%k1}
@@ -709,7 +709,7 @@ define <8 x double>@test_int_x86_avx512_mask3_vfnmsub_pd_512(<8 x double> %x0, <
define <16 x float>@test_int_x86_avx512_mask_vfnmsub_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3){
; CHECK-LABEL: test_int_x86_avx512_mask_vfnmsub_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %zmm0, %zmm3
; CHECK-NEXT: vfnmsub132ps %zmm1, %zmm2, %zmm3 {%k1}
@@ -726,7 +726,7 @@ declare <16 x float> @llvm.x86.avx512.mask3.vfnmsub.ps.512(<16 x float>, <16 x f
define <16 x float>@test_int_x86_avx512_mask3_vfnmsub_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfnmsub_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %zmm2, %zmm3
; CHECK-NEXT: vfnmsub231ps %zmm1, %zmm0, %zmm3 {%k1}
@@ -741,7 +741,7 @@ define <16 x float>@test_int_x86_avx512_mask3_vfnmsub_ps_512(<16 x float> %x0, <
define <8 x double>@test_int_x86_avx512_mask_vfnmadd_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3){
; CHECK-LABEL: test_int_x86_avx512_mask_vfnmadd_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %zmm0, %zmm3
; CHECK-NEXT: vfnmadd132pd %zmm1, %zmm2, %zmm3 {%k1}
@@ -756,7 +756,7 @@ define <8 x double>@test_int_x86_avx512_mask_vfnmadd_pd_512(<8 x double> %x0, <8
define <16 x float>@test_int_x86_avx512_mask_vfnmadd_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3){
; CHECK-LABEL: test_int_x86_avx512_mask_vfnmadd_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %zmm0, %zmm3
; CHECK-NEXT: vfnmadd132ps %zmm1, %zmm2, %zmm3 {%k1}
diff --git a/test/CodeGen/X86/avx512-fma.ll b/test/CodeGen/X86/avx512-fma.ll
index 9622b81fd76..29ab76d4d37 100644
--- a/test/CodeGen/X86/avx512-fma.ll
+++ b/test/CodeGen/X86/avx512-fma.ll
@@ -4,7 +4,7 @@
define <16 x float> @test_x86_fmadd_ps_z(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
; ALL-LABEL: test_x86_fmadd_ps_z:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vfmadd213ps %zmm2, %zmm1, %zmm0
; ALL-NEXT: retq
%x = fmul <16 x float> %a0, %a1
@@ -14,7 +14,7 @@ define <16 x float> @test_x86_fmadd_ps_z(<16 x float> %a0, <16 x float> %a1, <16
define <16 x float> @test_x86_fmsub_ps_z(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
; ALL-LABEL: test_x86_fmsub_ps_z:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vfmsub213ps %zmm2, %zmm1, %zmm0
; ALL-NEXT: retq
%x = fmul <16 x float> %a0, %a1
@@ -24,7 +24,7 @@ define <16 x float> @test_x86_fmsub_ps_z(<16 x float> %a0, <16 x float> %a1, <16
define <16 x float> @test_x86_fnmadd_ps_z(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
; ALL-LABEL: test_x86_fnmadd_ps_z:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vfnmadd213ps %zmm2, %zmm1, %zmm0
; ALL-NEXT: retq
%x = fmul <16 x float> %a0, %a1
@@ -34,7 +34,7 @@ define <16 x float> @test_x86_fnmadd_ps_z(<16 x float> %a0, <16 x float> %a1, <1
define <16 x float> @test_x86_fnmsub_ps_z(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
; ALL-LABEL: test_x86_fnmsub_ps_z:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vfnmsub213ps %zmm2, %zmm1, %zmm0
; ALL-NEXT: retq
%x = fmul <16 x float> %a0, %a1
@@ -48,7 +48,7 @@ define <16 x float> @test_x86_fnmsub_ps_z(<16 x float> %a0, <16 x float> %a1, <1
define <8 x double> @test_x86_fmadd_pd_z(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
; ALL-LABEL: test_x86_fmadd_pd_z:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vfmadd213pd %zmm2, %zmm1, %zmm0
; ALL-NEXT: retq
%x = fmul <8 x double> %a0, %a1
@@ -58,7 +58,7 @@ define <8 x double> @test_x86_fmadd_pd_z(<8 x double> %a0, <8 x double> %a1, <8
define <8 x double> @test_x86_fmsub_pd_z(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
; ALL-LABEL: test_x86_fmsub_pd_z:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vfmsub213pd %zmm2, %zmm1, %zmm0
; ALL-NEXT: retq
%x = fmul <8 x double> %a0, %a1
@@ -68,7 +68,7 @@ define <8 x double> @test_x86_fmsub_pd_z(<8 x double> %a0, <8 x double> %a1, <8
define double @test_x86_fmsub_213(double %a0, double %a1, double %a2) {
; ALL-LABEL: test_x86_fmsub_213:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vfmsub213sd %xmm2, %xmm1, %xmm0
; ALL-NEXT: retq
%x = fmul double %a0, %a1
@@ -78,7 +78,7 @@ define double @test_x86_fmsub_213(double %a0, double %a1, double %a2) {
define double @test_x86_fmsub_213_m(double %a0, double %a1, double * %a2_ptr) {
; ALL-LABEL: test_x86_fmsub_213_m:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vfmsub213sd (%rdi), %xmm1, %xmm0
; ALL-NEXT: retq
%a2 = load double , double *%a2_ptr
@@ -89,7 +89,7 @@ define double @test_x86_fmsub_213_m(double %a0, double %a1, double * %a2_ptr) {
define double @test_x86_fmsub_231_m(double %a0, double %a1, double * %a2_ptr) {
; ALL-LABEL: test_x86_fmsub_231_m:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vfmsub132sd (%rdi), %xmm1, %xmm0
; ALL-NEXT: retq
%a2 = load double , double *%a2_ptr
@@ -100,7 +100,7 @@ define double @test_x86_fmsub_231_m(double %a0, double %a1, double * %a2_ptr) {
define <16 x float> @test231_br(<16 x float> %a1, <16 x float> %a2) nounwind {
; ALL-LABEL: test231_br:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vfmadd132ps {{.*}}(%rip){1to16}, %zmm1, %zmm0
; ALL-NEXT: retq
%b1 = fmul <16 x float> %a1, <float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000>
@@ -110,7 +110,7 @@ define <16 x float> @test231_br(<16 x float> %a1, <16 x float> %a2) nounwind {
define <16 x float> @test213_br(<16 x float> %a1, <16 x float> %a2) nounwind {
; ALL-LABEL: test213_br:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vfmadd213ps {{.*}}(%rip){1to16}, %zmm1, %zmm0
; ALL-NEXT: retq
%b1 = fmul <16 x float> %a1, %a2
@@ -121,7 +121,7 @@ define <16 x float> @test213_br(<16 x float> %a1, <16 x float> %a2) nounwind {
;mask (a*c+b , a)
define <16 x float> @test_x86_fmadd132_ps(<16 x float> %a0, <16 x float> %a1, <16 x float> *%a2_ptrt, <16 x i1> %mask) {
; KNL-LABEL: test_x86_fmadd132_ps:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpmovsxbd %xmm2, %zmm2
; KNL-NEXT: vpslld $31, %zmm2, %zmm2
; KNL-NEXT: vptestmd %zmm2, %zmm2, %k1
@@ -129,7 +129,7 @@ define <16 x float> @test_x86_fmadd132_ps(<16 x float> %a0, <16 x float> %a1, <1
; KNL-NEXT: retq
;
; SKX-LABEL: test_x86_fmadd132_ps:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsllw $7, %xmm2, %xmm2
; SKX-NEXT: vpmovb2m %xmm2, %k1
; SKX-NEXT: vfmadd132ps (%rdi), %zmm1, %zmm0 {%k1}
@@ -144,7 +144,7 @@ define <16 x float> @test_x86_fmadd132_ps(<16 x float> %a0, <16 x float> %a1, <1
;mask (a*c+b , b)
define <16 x float> @test_x86_fmadd231_ps(<16 x float> %a0, <16 x float> %a1, <16 x float> *%a2_ptrt, <16 x i1> %mask) {
; KNL-LABEL: test_x86_fmadd231_ps:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpmovsxbd %xmm2, %zmm2
; KNL-NEXT: vpslld $31, %zmm2, %zmm2
; KNL-NEXT: vptestmd %zmm2, %zmm2, %k1
@@ -153,7 +153,7 @@ define <16 x float> @test_x86_fmadd231_ps(<16 x float> %a0, <16 x float> %a1, <1
; KNL-NEXT: retq
;
; SKX-LABEL: test_x86_fmadd231_ps:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsllw $7, %xmm2, %xmm2
; SKX-NEXT: vpmovb2m %xmm2, %k1
; SKX-NEXT: vfmadd231ps (%rdi), %zmm0, %zmm1 {%k1}
@@ -169,7 +169,7 @@ define <16 x float> @test_x86_fmadd231_ps(<16 x float> %a0, <16 x float> %a1, <1
;mask (b*a+c , b)
define <16 x float> @test_x86_fmadd213_ps(<16 x float> %a0, <16 x float> %a1, <16 x float> *%a2_ptrt, <16 x i1> %mask) {
; KNL-LABEL: test_x86_fmadd213_ps:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpmovsxbd %xmm2, %zmm2
; KNL-NEXT: vpslld $31, %zmm2, %zmm2
; KNL-NEXT: vptestmd %zmm2, %zmm2, %k1
@@ -178,7 +178,7 @@ define <16 x float> @test_x86_fmadd213_ps(<16 x float> %a0, <16 x float> %a1, <1
; KNL-NEXT: retq
;
; SKX-LABEL: test_x86_fmadd213_ps:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsllw $7, %xmm2, %xmm2
; SKX-NEXT: vpmovb2m %xmm2, %k1
; SKX-NEXT: vfmadd213ps (%rdi), %zmm0, %zmm1 {%k1}
diff --git a/test/CodeGen/X86/avx512-fsel.ll b/test/CodeGen/X86/avx512-fsel.ll
index 9936ec75a0c..0da690669c3 100644
--- a/test/CodeGen/X86/avx512-fsel.ll
+++ b/test/CodeGen/X86/avx512-fsel.ll
@@ -6,7 +6,7 @@ target triple = "x86_64-apple-macosx10.11.0"
define i32 @test(float %a, float %b) {
; CHECK-LABEL: test:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: vucomiss %xmm1, %xmm0
diff --git a/test/CodeGen/X86/avx512-gather-scatter-intrin.ll b/test/CodeGen/X86/avx512-gather-scatter-intrin.ll
index 7406dc514d3..9502ec95d09 100644
--- a/test/CodeGen/X86/avx512-gather-scatter-intrin.ll
+++ b/test/CodeGen/X86/avx512-gather-scatter-intrin.ll
@@ -13,7 +13,7 @@ declare void @llvm.x86.avx512.scatter.qpd.512 (i8*, i8, <8 x i64>, <8 x double>,
define void @gather_mask_dps(<16 x i32> %ind, <16 x float> %src, i16 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: gather_mask_dps:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: kmovq %k1, %k2
; CHECK-NEXT: vgatherdps (%rsi,%zmm0,4), %zmm1 {%k2}
@@ -29,7 +29,7 @@ define void @gather_mask_dps(<16 x i32> %ind, <16 x float> %src, i16 %mask, i8*
define void @gather_mask_dpd(<8 x i32> %ind, <8 x double> %src, i8 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: gather_mask_dpd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: kmovq %k1, %k2
; CHECK-NEXT: vgatherdpd (%rsi,%ymm0,4), %zmm1 {%k2}
@@ -45,7 +45,7 @@ define void @gather_mask_dpd(<8 x i32> %ind, <8 x double> %src, i8 %mask, i8* %b
define void @gather_mask_qps(<8 x i64> %ind, <8 x float> %src, i8 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: gather_mask_qps:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: kmovq %k1, %k2
; CHECK-NEXT: vgatherqps (%rsi,%zmm0,4), %ymm1 {%k2}
@@ -61,7 +61,7 @@ define void @gather_mask_qps(<8 x i64> %ind, <8 x float> %src, i8 %mask, i8* %ba
define void @gather_mask_qpd(<8 x i64> %ind, <8 x double> %src, i8 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: gather_mask_qpd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: kmovq %k1, %k2
; CHECK-NEXT: vgatherqpd (%rsi,%zmm0,4), %zmm1 {%k2}
@@ -89,7 +89,7 @@ declare void @llvm.x86.avx512.scatter.qpq.512 (i8*, i8, <8 x i64>, <8 x i64>, i3
define void @gather_mask_dd(<16 x i32> %ind, <16 x i32> %src, i16 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: gather_mask_dd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: kmovq %k1, %k2
; CHECK-NEXT: vpgatherdd (%rsi,%zmm0,4), %zmm1 {%k2}
@@ -105,7 +105,7 @@ define void @gather_mask_dd(<16 x i32> %ind, <16 x i32> %src, i16 %mask, i8* %ba
define void @gather_mask_qd(<8 x i64> %ind, <8 x i32> %src, i8 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: gather_mask_qd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: kmovq %k1, %k2
; CHECK-NEXT: vpgatherqd (%rsi,%zmm0,4), %ymm1 {%k2}
@@ -121,7 +121,7 @@ define void @gather_mask_qd(<8 x i64> %ind, <8 x i32> %src, i8 %mask, i8* %base,
define void @gather_mask_qq(<8 x i64> %ind, <8 x i64> %src, i8 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: gather_mask_qq:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: kmovq %k1, %k2
; CHECK-NEXT: vpgatherqq (%rsi,%zmm0,4), %zmm1 {%k2}
@@ -137,7 +137,7 @@ define void @gather_mask_qq(<8 x i64> %ind, <8 x i64> %src, i8 %mask, i8* %base,
define void @gather_mask_dq(<8 x i32> %ind, <8 x i64> %src, i8 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: gather_mask_dq:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: kmovq %k1, %k2
; CHECK-NEXT: vpgatherdq (%rsi,%ymm0,4), %zmm1 {%k2}
@@ -153,7 +153,7 @@ define void @gather_mask_dq(<8 x i32> %ind, <8 x i64> %src, i8 %mask, i8* %base,
define void @gather_mask_dpd_execdomain(<8 x i32> %ind, <8 x double> %src, i8 %mask, i8* %base, <8 x double>* %stbuf) {
; CHECK-LABEL: gather_mask_dpd_execdomain:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vgatherdpd (%rsi,%ymm0,4), %zmm1 {%k1}
; CHECK-NEXT: vmovapd %zmm1, (%rdx)
@@ -166,7 +166,7 @@ define void @gather_mask_dpd_execdomain(<8 x i32> %ind, <8 x double> %src, i8 %m
define void @gather_mask_qpd_execdomain(<8 x i64> %ind, <8 x double> %src, i8 %mask, i8* %base, <8 x double>* %stbuf) {
; CHECK-LABEL: gather_mask_qpd_execdomain:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vgatherqpd (%rsi,%zmm0,4), %zmm1 {%k1}
; CHECK-NEXT: vmovapd %zmm1, (%rdx)
@@ -179,7 +179,7 @@ define void @gather_mask_qpd_execdomain(<8 x i64> %ind, <8 x double> %src, i8 %m
define <16 x float> @gather_mask_dps_execdomain(<16 x i32> %ind, <16 x float> %src, i16 %mask, i8* %base) {
; CHECK-LABEL: gather_mask_dps_execdomain:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vgatherdps (%rsi,%zmm0,4), %zmm1 {%k1}
; CHECK-NEXT: vmovaps %zmm1, %zmm0
@@ -190,7 +190,7 @@ define <16 x float> @gather_mask_dps_execdomain(<16 x i32> %ind, <16 x float> %s
define <8 x float> @gather_mask_qps_execdomain(<8 x i64> %ind, <8 x float> %src, i8 %mask, i8* %base) {
; CHECK-LABEL: gather_mask_qps_execdomain:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vgatherqps (%rsi,%zmm0,4), %ymm1 {%k1}
; CHECK-NEXT: vmovaps %ymm1, %ymm0
@@ -201,7 +201,7 @@ define <8 x float> @gather_mask_qps_execdomain(<8 x i64> %ind, <8 x float> %src,
define void @scatter_mask_dpd_execdomain(<8 x i32> %ind, <8 x double>* %src, i8 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: scatter_mask_dpd_execdomain:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovapd (%rdi), %zmm1
; CHECK-NEXT: vscatterdpd %zmm1, (%rcx,%ymm0,4) {%k1}
@@ -214,7 +214,7 @@ define void @scatter_mask_dpd_execdomain(<8 x i32> %ind, <8 x double>* %src, i8
define void @scatter_mask_qpd_execdomain(<8 x i64> %ind, <8 x double>* %src, i8 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: scatter_mask_qpd_execdomain:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovapd (%rdi), %zmm1
; CHECK-NEXT: vscatterqpd %zmm1, (%rcx,%zmm0,4) {%k1}
@@ -227,7 +227,7 @@ define void @scatter_mask_qpd_execdomain(<8 x i64> %ind, <8 x double>* %src, i8
define void @scatter_mask_dps_execdomain(<16 x i32> %ind, <16 x float>* %src, i16 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: scatter_mask_dps_execdomain:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovaps (%rdi), %zmm1
; CHECK-NEXT: vscatterdps %zmm1, (%rcx,%zmm0,4) {%k1}
@@ -240,7 +240,7 @@ define void @scatter_mask_dps_execdomain(<16 x i32> %ind, <16 x float>* %src, i1
define void @scatter_mask_qps_execdomain(<8 x i64> %ind, <8 x float>* %src, i8 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: scatter_mask_qps_execdomain:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovaps (%rdi), %ymm1
; CHECK-NEXT: vscatterqps %ymm1, (%rcx,%zmm0,4) {%k1}
@@ -253,7 +253,7 @@ define void @scatter_mask_qps_execdomain(<8 x i64> %ind, <8 x float>* %src, i8 %
define void @gather_qps(<8 x i64> %ind, <8 x float> %src, i8* %base, i8* %stbuf) {
; CHECK-LABEL: gather_qps:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
; CHECK-NEXT: kxnorw %k0, %k0, %k1
; CHECK-NEXT: kxnorw %k0, %k0, %k2
@@ -272,7 +272,7 @@ declare void @llvm.x86.avx512.gatherpf.qps.512(i8, <8 x i64>, i8* , i32, i32);
declare void @llvm.x86.avx512.scatterpf.qps.512(i8, <8 x i64>, i8* , i32, i32);
define void @prefetch(<8 x i64> %ind, i8* %base) {
; CHECK-LABEL: prefetch:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kxnorw %k0, %k0, %k1
; CHECK-NEXT: vgatherpf0qps (%rdi,%zmm0,4) {%k1}
; CHECK-NEXT: kxorw %k0, %k0, %k1
@@ -296,7 +296,7 @@ declare <2 x double> @llvm.x86.avx512.gather3div2.df(<2 x double>, i8*, <2 x i64
define <2 x double>@test_int_x86_avx512_gather3div2_df(<2 x double> %x0, i8* %x1, <2 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3div2_df:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vgatherqpd (%rdi,%xmm1,4), %xmm0 {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -314,7 +314,7 @@ declare <2 x i64> @llvm.x86.avx512.gather3div2.di(<2 x i64>, i8*, <2 x i64>, i8,
define <2 x i64>@test_int_x86_avx512_gather3div2_di(<2 x i64> %x0, i8* %x1, <2 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3div2_di:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpgatherqq (%rdi,%xmm1,8), %xmm0 {%k1}
; CHECK-NEXT: vpaddq %xmm0, %xmm0, %xmm0
@@ -329,7 +329,7 @@ declare <4 x double> @llvm.x86.avx512.gather3div4.df(<4 x double>, i8*, <4 x i64
define <4 x double>@test_int_x86_avx512_gather3div4_df(<4 x double> %x0, i8* %x1, <4 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3div4_df:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vgatherqpd (%rdi,%ymm1,4), %ymm0 {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -347,7 +347,7 @@ declare <4 x i64> @llvm.x86.avx512.gather3div4.di(<4 x i64>, i8*, <4 x i64>, i8,
define <4 x i64>@test_int_x86_avx512_gather3div4_di(<4 x i64> %x0, i8* %x1, <4 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3div4_di:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpgatherqq (%rdi,%ymm1,8), %ymm0 {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -365,7 +365,7 @@ declare <4 x float> @llvm.x86.avx512.gather3div4.sf(<4 x float>, i8*, <2 x i64>,
define <4 x float>@test_int_x86_avx512_gather3div4_sf(<4 x float> %x0, i8* %x1, <2 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3div4_sf:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vgatherqps (%rdi,%xmm1,4), %xmm0 {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -383,7 +383,7 @@ declare <4 x i32> @llvm.x86.avx512.gather3div4.si(<4 x i32>, i8*, <2 x i64>, i8,
define <4 x i32>@test_int_x86_avx512_gather3div4_si(<4 x i32> %x0, i8* %x1, <2 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3div4_si:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: kxnorw %k0, %k0, %k2
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
@@ -401,7 +401,7 @@ declare <4 x float> @llvm.x86.avx512.gather3div8.sf(<4 x float>, i8*, <4 x i64>,
define <4 x float>@test_int_x86_avx512_gather3div8_sf(<4 x float> %x0, i8* %x1, <4 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3div8_sf:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vgatherqps (%rdi,%ymm1,4), %xmm0 {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -420,7 +420,7 @@ declare <4 x i32> @llvm.x86.avx512.gather3div8.si(<4 x i32>, i8*, <4 x i64>, i8,
define <4 x i32>@test_int_x86_avx512_gather3div8_si(<4 x i32> %x0, i8* %x1, <4 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3div8_si:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa %xmm0, %xmm2
; CHECK-NEXT: kmovq %k1, %k2
@@ -439,7 +439,7 @@ declare <2 x double> @llvm.x86.avx512.gather3siv2.df(<2 x double>, i8*, <4 x i32
define <2 x double>@test_int_x86_avx512_gather3siv2_df(<2 x double> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3siv2_df:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vgatherdpd (%rdi,%xmm1,4), %xmm0 {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -457,7 +457,7 @@ declare <2 x i64> @llvm.x86.avx512.gather3siv2.di(<2 x i64>, i8*, <4 x i32>, i8,
define <2 x i64>@test_int_x86_avx512_gather3siv2_di(<2 x i64> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3siv2_di:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpgatherdq (%rdi,%xmm1,8), %xmm0 {%k1}
; CHECK-NEXT: vpaddq %xmm0, %xmm0, %xmm0
@@ -472,7 +472,7 @@ declare <4 x double> @llvm.x86.avx512.gather3siv4.df(<4 x double>, i8*, <4 x i32
define <4 x double>@test_int_x86_avx512_gather3siv4_df(<4 x double> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3siv4_df:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vgatherdpd (%rdi,%xmm1,4), %ymm0 {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -490,7 +490,7 @@ declare <4 x i64> @llvm.x86.avx512.gather3siv4.di(<4 x i64>, i8*, <4 x i32>, i8,
define <4 x i64>@test_int_x86_avx512_gather3siv4_di(<4 x i64> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3siv4_di:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpgatherdq (%rdi,%xmm1,8), %ymm0 {%k1}
; CHECK-NEXT: vpaddq %ymm0, %ymm0, %ymm0
@@ -505,7 +505,7 @@ declare <4 x float> @llvm.x86.avx512.gather3siv4.sf(<4 x float>, i8*, <4 x i32>,
define <4 x float>@test_int_x86_avx512_gather3siv4_sf(<4 x float> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3siv4_sf:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vgatherdps (%rdi,%xmm1,4), %xmm0 {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -523,7 +523,7 @@ declare <4 x i32> @llvm.x86.avx512.gather3siv4.si(<4 x i32>, i8*, <4 x i32>, i8,
define <4 x i32>@test_int_x86_avx512_gather3siv4_si(<4 x i32> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3siv4_si:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: kxnorw %k0, %k0, %k2
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
@@ -541,7 +541,7 @@ declare <8 x float> @llvm.x86.avx512.gather3siv8.sf(<8 x float>, i8*, <8 x i32>,
define <8 x float>@test_int_x86_avx512_gather3siv8_sf(<8 x float> %x0, i8* %x1, <8 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3siv8_sf:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vgatherdps (%rdi,%ymm1,4), %ymm0 {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -559,7 +559,7 @@ declare <8 x i32> @llvm.x86.avx512.gather3siv8.si(<8 x i32>, i8*, <8 x i32>, i8,
define <8 x i32>@test_int_x86_avx512_gather3siv8_si(<8 x i32> %x0, i8* %x1, <8 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3siv8_si:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa %ymm0, %ymm2
; CHECK-NEXT: kmovq %k1, %k2
@@ -577,7 +577,7 @@ declare void @llvm.x86.avx512.scatterdiv2.df(i8*, i8, <2 x i64>, <2 x double>, i
define void@test_int_x86_avx512_scatterdiv2_df(i8* %x0, i8 %x1, <2 x i64> %x2, <2 x double> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scatterdiv2_df:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: kxnorw %k0, %k0, %k2
; CHECK-NEXT: vscatterqpd %xmm1, (%rdi,%xmm0,2) {%k2}
@@ -592,7 +592,7 @@ declare void @llvm.x86.avx512.scatterdiv2.di(i8*, i8, <2 x i64>, <2 x i64>, i32)
define void@test_int_x86_avx512_scatterdiv2_di(i8* %x0, i8 %x1, <2 x i64> %x2, <2 x i64> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scatterdiv2_di:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpscatterqq %xmm1, (%rdi,%xmm0,2) {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -607,7 +607,7 @@ declare void @llvm.x86.avx512.scatterdiv4.df(i8*, i8, <4 x i64>, <4 x double>, i
define void@test_int_x86_avx512_scatterdiv4_df(i8* %x0, i8 %x1, <4 x i64> %x2, <4 x double> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scatterdiv4_df:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vscatterqpd %ymm1, (%rdi,%ymm0,2) {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -623,7 +623,7 @@ declare void @llvm.x86.avx512.scatterdiv4.di(i8*, i8, <4 x i64>, <4 x i64>, i32)
define void@test_int_x86_avx512_scatterdiv4_di(i8* %x0, i8 %x1, <4 x i64> %x2, <4 x i64> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scatterdiv4_di:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpscatterqq %ymm1, (%rdi,%ymm0,2) {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -639,7 +639,7 @@ declare void @llvm.x86.avx512.scatterdiv4.sf(i8*, i8, <2 x i64>, <4 x float>, i3
define void@test_int_x86_avx512_scatterdiv4_sf(i8* %x0, i8 %x1, <2 x i64> %x2, <4 x float> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scatterdiv4_sf:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vscatterqps %xmm1, (%rdi,%xmm0,2) {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -654,7 +654,7 @@ declare void @llvm.x86.avx512.scatterdiv4.si(i8*, i8, <2 x i64>, <4 x i32>, i32)
define void@test_int_x86_avx512_scatterdiv4_si(i8* %x0, i8 %x1, <2 x i64> %x2, <4 x i32> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scatterdiv4_si:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: kxnorw %k0, %k0, %k2
; CHECK-NEXT: vpscatterqd %xmm1, (%rdi,%xmm0,2) {%k2}
@@ -669,7 +669,7 @@ declare void @llvm.x86.avx512.scatterdiv8.sf(i8*, i8, <4 x i64>, <4 x float>, i3
define void@test_int_x86_avx512_scatterdiv8_sf(i8* %x0, i8 %x1, <4 x i64> %x2, <4 x float> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scatterdiv8_sf:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vscatterqps %xmm1, (%rdi,%ymm0,2) {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -685,7 +685,7 @@ declare void @llvm.x86.avx512.scatterdiv8.si(i8*, i8, <4 x i64>, <4 x i32>, i32)
define void@test_int_x86_avx512_scatterdiv8_si(i8* %x0, i8 %x1, <4 x i64> %x2, <4 x i32> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scatterdiv8_si:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpscatterqd %xmm1, (%rdi,%ymm0,2) {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -701,7 +701,7 @@ declare void @llvm.x86.avx512.scattersiv2.df(i8*, i8, <4 x i32>, <2 x double>, i
define void@test_int_x86_avx512_scattersiv2_df(i8* %x0, i8 %x1, <4 x i32> %x2, <2 x double> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scattersiv2_df:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: kxnorw %k0, %k0, %k2
; CHECK-NEXT: vscatterdpd %xmm1, (%rdi,%xmm0,2) {%k2}
@@ -716,7 +716,7 @@ declare void @llvm.x86.avx512.scattersiv2.di(i8*, i8, <4 x i32>, <2 x i64>, i32)
define void@test_int_x86_avx512_scattersiv2_di(i8* %x0, i8 %x1, <4 x i32> %x2, <2 x i64> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scattersiv2_di:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: kxnorw %k0, %k0, %k2
; CHECK-NEXT: vpscatterdq %xmm1, (%rdi,%xmm0,2) {%k2}
@@ -731,7 +731,7 @@ declare void @llvm.x86.avx512.scattersiv4.df(i8*, i8, <4 x i32>, <4 x double>, i
define void@test_int_x86_avx512_scattersiv4_df(i8* %x0, i8 %x1, <4 x i32> %x2, <4 x double> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scattersiv4_df:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vscatterdpd %ymm1, (%rdi,%xmm0,2) {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -747,7 +747,7 @@ declare void @llvm.x86.avx512.scattersiv4.di(i8*, i8, <4 x i32>, <4 x i64>, i32)
define void@test_int_x86_avx512_scattersiv4_di(i8* %x0, i8 %x1, <4 x i32> %x2, <4 x i64> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scattersiv4_di:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: kxnorw %k0, %k0, %k2
; CHECK-NEXT: vpscatterdq %ymm1, (%rdi,%xmm0,2) {%k2}
@@ -763,7 +763,7 @@ declare void @llvm.x86.avx512.scattersiv4.sf(i8*, i8, <4 x i32>, <4 x float>, i3
define void@test_int_x86_avx512_scattersiv4_sf(i8* %x0, i8 %x1, <4 x i32> %x2, <4 x float> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scattersiv4_sf:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vscatterdps %xmm1, (%rdi,%xmm0,2) {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -778,7 +778,7 @@ declare void @llvm.x86.avx512.scattersiv4.si(i8*, i8, <4 x i32>, <4 x i32>, i32)
define void@test_int_x86_avx512_scattersiv4_si(i8* %x0, i8 %x1, <4 x i32> %x2, <4 x i32> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scattersiv4_si:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpscatterdd %xmm1, (%rdi,%xmm0,2) {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -793,7 +793,7 @@ declare void @llvm.x86.avx512.scattersiv8.sf(i8*, i8, <8 x i32>, <8 x float>, i3
define void@test_int_x86_avx512_scattersiv8_sf(i8* %x0, i8 %x1, <8 x i32> %x2, <8 x float> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scattersiv8_sf:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vscatterdps %ymm1, (%rdi,%ymm0,2) {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -809,7 +809,7 @@ declare void @llvm.x86.avx512.scattersiv8.si(i8*, i8, <8 x i32>, <8 x i32>, i32)
define void@test_int_x86_avx512_scattersiv8_si(i8* %x0, i8 %x1, <8 x i32> %x2, <8 x i32> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scattersiv8_si:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpscatterdd %ymm1, (%rdi,%ymm0,2) {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -823,7 +823,7 @@ define void@test_int_x86_avx512_scattersiv8_si(i8* %x0, i8 %x1, <8 x i32> %x2, <
define void @scatter_mask_test(i8* %x0, <8 x i32> %x2, <8 x i32> %x3) {
; CHECK-LABEL: scatter_mask_test:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kxnorw %k0, %k0, %k1
; CHECK-NEXT: vpscatterdd %ymm1, (%rdi,%ymm0,2) {%k1}
; CHECK-NEXT: kxorw %k0, %k0, %k1
@@ -845,7 +845,7 @@ define void @scatter_mask_test(i8* %x0, <8 x i32> %x2, <8 x i32> %x3) {
define <16 x float> @gather_mask_test(<16 x i32> %ind, <16 x float> %src, i8* %base) {
; CHECK-LABEL: gather_mask_test:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kxnorw %k0, %k0, %k1
; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vgatherdps (%rdi,%zmm0,4), %zmm2 {%k1}
diff --git a/test/CodeGen/X86/avx512-gfni-intrinsics.ll b/test/CodeGen/X86/avx512-gfni-intrinsics.ll
index b975b64e0b4..a1a6aaf53b4 100644
--- a/test/CodeGen/X86/avx512-gfni-intrinsics.ll
+++ b/test/CodeGen/X86/avx512-gfni-intrinsics.ll
@@ -4,7 +4,7 @@
declare <16 x i8> @llvm.x86.vgf2p8affineinvqb.128(<16 x i8>, <16 x i8>, i8)
define <16 x i8> @test_vgf2p8affineinvqb_128(<16 x i8> %src1, <16 x i8> %src2, <16 x i8> %passthru, i16 %mask) {
; CHECK-LABEL: test_vgf2p8affineinvqb_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vgf2p8affineinvqb $3, %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0xf9,0xcf,0xd9,0x03]
; CHECK-NEXT: vgf2p8affineinvqb $3, %xmm1, %xmm0, %xmm4 {%k1} {z} ## encoding: [0x62,0xf3,0xfd,0x89,0xcf,0xe1,0x03]
@@ -24,7 +24,7 @@ define <16 x i8> @test_vgf2p8affineinvqb_128(<16 x i8> %src1, <16 x i8> %src2, <
declare <32 x i8> @llvm.x86.vgf2p8affineinvqb.256(<32 x i8>, <32 x i8>, i8)
define <32 x i8> @test_vgf2p8affineinvqb_256(<32 x i8> %src1, <32 x i8> %src2, <32 x i8> %passthru, i32 %mask) {
; CHECK-LABEL: test_vgf2p8affineinvqb_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vgf2p8affineinvqb $3, %ymm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0xfd,0xcf,0xd9,0x03]
; CHECK-NEXT: vgf2p8affineinvqb $3, %ymm1, %ymm0, %ymm4 {%k1} {z} ## encoding: [0x62,0xf3,0xfd,0xa9,0xcf,0xe1,0x03]
@@ -44,7 +44,7 @@ define <32 x i8> @test_vgf2p8affineinvqb_256(<32 x i8> %src1, <32 x i8> %src2, <
declare <64 x i8> @llvm.x86.vgf2p8affineinvqb.512(<64 x i8>, <64 x i8>, i8)
define <64 x i8> @test_vgf2p8affineinvqb_512(<64 x i8> %src1, <64 x i8> %src2, <64 x i8> %passthru, i64 %mask) {
; CHECK-LABEL: test_vgf2p8affineinvqb_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovq %rdi, %k1 ## encoding: [0xc4,0xe1,0xfb,0x92,0xcf]
; CHECK-NEXT: vgf2p8affineinvqb $3, %zmm1, %zmm0, %zmm3 ## encoding: [0x62,0xf3,0xfd,0x48,0xcf,0xd9,0x03]
; CHECK-NEXT: vgf2p8affineinvqb $3, %zmm1, %zmm0, %zmm4 {%k1} {z} ## encoding: [0x62,0xf3,0xfd,0xc9,0xcf,0xe1,0x03]
@@ -64,7 +64,7 @@ define <64 x i8> @test_vgf2p8affineinvqb_512(<64 x i8> %src1, <64 x i8> %src2, <
declare <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8>, <16 x i8>, i8)
define <16 x i8> @test_vgf2p8affineqb_128(<16 x i8> %src1, <16 x i8> %src2, <16 x i8> %passthru, i16 %mask) {
; CHECK-LABEL: test_vgf2p8affineqb_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vgf2p8affineqb $3, %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0xf9,0xce,0xd9,0x03]
; CHECK-NEXT: vgf2p8affineqb $3, %xmm1, %xmm0, %xmm4 {%k1} {z} ## encoding: [0x62,0xf3,0xfd,0x89,0xce,0xe1,0x03]
@@ -84,7 +84,7 @@ define <16 x i8> @test_vgf2p8affineqb_128(<16 x i8> %src1, <16 x i8> %src2, <16
declare <32 x i8> @llvm.x86.vgf2p8affineqb.256(<32 x i8>, <32 x i8>, i8)
define <32 x i8> @test_vgf2p8affineqb_256(<32 x i8> %src1, <32 x i8> %src2, <32 x i8> %passthru, i32 %mask) {
; CHECK-LABEL: test_vgf2p8affineqb_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vgf2p8affineqb $3, %ymm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0xfd,0xce,0xd9,0x03]
; CHECK-NEXT: vgf2p8affineqb $3, %ymm1, %ymm0, %ymm4 {%k1} {z} ## encoding: [0x62,0xf3,0xfd,0xa9,0xce,0xe1,0x03]
@@ -104,7 +104,7 @@ define <32 x i8> @test_vgf2p8affineqb_256(<32 x i8> %src1, <32 x i8> %src2, <32
declare <64 x i8> @llvm.x86.vgf2p8affineqb.512(<64 x i8>, <64 x i8>, i8)
define <64 x i8> @test_vgf2p8affineqb_512(<64 x i8> %src1, <64 x i8> %src2, <64 x i8> %passthru, i64 %mask) {
; CHECK-LABEL: test_vgf2p8affineqb_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovq %rdi, %k1 ## encoding: [0xc4,0xe1,0xfb,0x92,0xcf]
; CHECK-NEXT: vgf2p8affineqb $3, %zmm1, %zmm0, %zmm3 ## encoding: [0x62,0xf3,0xfd,0x48,0xce,0xd9,0x03]
; CHECK-NEXT: vgf2p8affineqb $3, %zmm1, %zmm0, %zmm4 {%k1} {z} ## encoding: [0x62,0xf3,0xfd,0xc9,0xce,0xe1,0x03]
@@ -124,7 +124,7 @@ define <64 x i8> @test_vgf2p8affineqb_512(<64 x i8> %src1, <64 x i8> %src2, <64
declare <16 x i8> @llvm.x86.vgf2p8mulb.128(<16 x i8>, <16 x i8>)
define <16 x i8> @test_vgf2p8mulb_128(<16 x i8> %src1, <16 x i8> %src2, <16 x i8> %passthru, i16 %mask) {
; CHECK-LABEL: test_vgf2p8mulb_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vgf2p8mulb %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xcf,0xd9]
; CHECK-NEXT: vgf2p8mulb %xmm1, %xmm0, %xmm4 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0xcf,0xe1]
@@ -144,7 +144,7 @@ define <16 x i8> @test_vgf2p8mulb_128(<16 x i8> %src1, <16 x i8> %src2, <16 x i8
declare <32 x i8> @llvm.x86.vgf2p8mulb.256(<32 x i8>, <32 x i8>)
define <32 x i8> @test_vgf2p8mulb_256(<32 x i8> %src1, <32 x i8> %src2, <32 x i8> %passthru, i32 %mask) {
; CHECK-LABEL: test_vgf2p8mulb_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vgf2p8mulb %ymm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xcf,0xd9]
; CHECK-NEXT: vgf2p8mulb %ymm1, %ymm0, %ymm4 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0xcf,0xe1]
@@ -164,7 +164,7 @@ define <32 x i8> @test_vgf2p8mulb_256(<32 x i8> %src1, <32 x i8> %src2, <32 x i8
declare <64 x i8> @llvm.x86.vgf2p8mulb.512(<64 x i8>, <64 x i8>)
define <64 x i8> @test_vgf2p8mulb_512(<64 x i8> %src1, <64 x i8> %src2, <64 x i8> %passthru, i64 %mask) {
; CHECK-LABEL: test_vgf2p8mulb_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovq %rdi, %k1 ## encoding: [0xc4,0xe1,0xfb,0x92,0xcf]
; CHECK-NEXT: vgf2p8mulb %zmm1, %zmm0, %zmm3 ## encoding: [0x62,0xf2,0x7d,0x48,0xcf,0xd9]
; CHECK-NEXT: vgf2p8mulb %zmm1, %zmm0, %zmm4 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xc9,0xcf,0xe1]
diff --git a/test/CodeGen/X86/avx512-hadd-hsub.ll b/test/CodeGen/X86/avx512-hadd-hsub.ll
index 3b76050fa21..255ac8a81f3 100644
--- a/test/CodeGen/X86/avx512-hadd-hsub.ll
+++ b/test/CodeGen/X86/avx512-hadd-hsub.ll
@@ -4,7 +4,7 @@
define i32 @hadd_16(<16 x i32> %x225) {
; KNL-LABEL: hadd_16:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; KNL-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; KNL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
@@ -13,7 +13,7 @@ define i32 @hadd_16(<16 x i32> %x225) {
; KNL-NEXT: retq
;
; SKX-LABEL: hadd_16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; SKX-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; SKX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
@@ -31,7 +31,7 @@ define i32 @hadd_16(<16 x i32> %x225) {
define i32 @hsub_16(<16 x i32> %x225) {
; KNL-LABEL: hsub_16:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; KNL-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; KNL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
@@ -40,7 +40,7 @@ define i32 @hsub_16(<16 x i32> %x225) {
; KNL-NEXT: retq
;
; SKX-LABEL: hsub_16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; SKX-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; SKX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
@@ -58,7 +58,7 @@ define i32 @hsub_16(<16 x i32> %x225) {
define float @fhadd_16(<16 x float> %x225) {
; KNL-LABEL: fhadd_16:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; KNL-NEXT: vaddps %zmm1, %zmm0, %zmm0
; KNL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
@@ -67,7 +67,7 @@ define float @fhadd_16(<16 x float> %x225) {
; KNL-NEXT: retq
;
; SKX-LABEL: fhadd_16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; SKX-NEXT: vaddps %zmm1, %zmm0, %zmm0
; SKX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
@@ -85,7 +85,7 @@ define float @fhadd_16(<16 x float> %x225) {
define float @fhsub_16(<16 x float> %x225) {
; KNL-LABEL: fhsub_16:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; KNL-NEXT: vaddps %zmm1, %zmm0, %zmm0
; KNL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
@@ -94,7 +94,7 @@ define float @fhsub_16(<16 x float> %x225) {
; KNL-NEXT: retq
;
; SKX-LABEL: fhsub_16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; SKX-NEXT: vaddps %zmm1, %zmm0, %zmm0
; SKX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
@@ -112,14 +112,14 @@ define float @fhsub_16(<16 x float> %x225) {
define <16 x i32> @hadd_16_3(<16 x i32> %x225, <16 x i32> %x227) {
; KNL-LABEL: hadd_16_3:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vshufps {{.*#+}} ymm2 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6]
; KNL-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,3],ymm1[1,3],ymm0[5,7],ymm1[5,7]
; KNL-NEXT: vpaddd %zmm0, %zmm2, %zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: hadd_16_3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vshufps {{.*#+}} ymm2 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6]
; SKX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,3],ymm1[1,3],ymm0[5,7],ymm1[5,7]
; SKX-NEXT: vpaddd %zmm0, %zmm2, %zmm0
@@ -135,14 +135,14 @@ define <16 x i32> @hadd_16_3(<16 x i32> %x225, <16 x i32> %x227) {
define <16 x float> @fhadd_16_3(<16 x float> %x225, <16 x float> %x227) {
; KNL-LABEL: fhadd_16_3:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vshufps {{.*#+}} ymm2 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6]
; KNL-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,3],ymm1[1,3],ymm0[5,7],ymm1[5,7]
; KNL-NEXT: vaddps %zmm0, %zmm2, %zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: fhadd_16_3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vshufps {{.*#+}} ymm2 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6]
; SKX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,3],ymm1[1,3],ymm0[5,7],ymm1[5,7]
; SKX-NEXT: vaddps %zmm0, %zmm2, %zmm0
@@ -157,14 +157,14 @@ define <16 x float> @fhadd_16_3(<16 x float> %x225, <16 x float> %x227) {
define <8 x double> @fhadd_16_4(<8 x double> %x225, <8 x double> %x227) {
; KNL-LABEL: fhadd_16_4:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; KNL-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
; KNL-NEXT: vaddpd %zmm0, %zmm2, %zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: fhadd_16_4:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; SKX-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
; SKX-NEXT: vaddpd %zmm0, %zmm2, %zmm0
@@ -177,7 +177,7 @@ define <8 x double> @fhadd_16_4(<8 x double> %x225, <8 x double> %x227) {
define <4 x double> @fadd_noundef_low(<8 x double> %x225, <8 x double> %x227) {
; KNL-LABEL: fadd_noundef_low:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vunpcklpd {{.*#+}} zmm2 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; KNL-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; KNL-NEXT: vaddpd %zmm0, %zmm2, %zmm0
@@ -185,7 +185,7 @@ define <4 x double> @fadd_noundef_low(<8 x double> %x225, <8 x double> %x227) {
; KNL-NEXT: retq
;
; SKX-LABEL: fadd_noundef_low:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpcklpd {{.*#+}} zmm2 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; SKX-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; SKX-NEXT: vaddpd %zmm0, %zmm2, %zmm0
@@ -200,7 +200,7 @@ define <4 x double> @fadd_noundef_low(<8 x double> %x225, <8 x double> %x227) {
define <4 x double> @fadd_noundef_high(<8 x double> %x225, <8 x double> %x227) {
; KNL-LABEL: fadd_noundef_high:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vunpcklpd {{.*#+}} zmm2 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; KNL-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; KNL-NEXT: vaddpd %zmm0, %zmm2, %zmm0
@@ -208,7 +208,7 @@ define <4 x double> @fadd_noundef_high(<8 x double> %x225, <8 x double> %x227) {
; KNL-NEXT: retq
;
; SKX-LABEL: fadd_noundef_high:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpcklpd {{.*#+}} zmm2 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; SKX-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; SKX-NEXT: vaddpd %zmm0, %zmm2, %zmm0
@@ -224,7 +224,7 @@ define <4 x double> @fadd_noundef_high(<8 x double> %x225, <8 x double> %x227) {
define <8 x i32> @hadd_16_3_sv(<16 x i32> %x225, <16 x i32> %x227) {
; KNL-LABEL: hadd_16_3_sv:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vshufps {{.*#+}} zmm2 = zmm0[0,2],zmm1[0,2],zmm0[4,6],zmm1[4,6],zmm0[8,10],zmm1[8,10],zmm0[12,14],zmm1[12,14]
; KNL-NEXT: vshufps {{.*#+}} zmm0 = zmm0[1,3],zmm1[1,3],zmm0[5,7],zmm1[5,7],zmm0[9,11],zmm1[9,11],zmm0[13,15],zmm1[13,15]
; KNL-NEXT: vpaddd %zmm0, %zmm2, %zmm0
@@ -232,7 +232,7 @@ define <8 x i32> @hadd_16_3_sv(<16 x i32> %x225, <16 x i32> %x227) {
; KNL-NEXT: retq
;
; SKX-LABEL: hadd_16_3_sv:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vshufps {{.*#+}} zmm2 = zmm0[0,2],zmm1[0,2],zmm0[4,6],zmm1[4,6],zmm0[8,10],zmm1[8,10],zmm0[12,14],zmm1[12,14]
; SKX-NEXT: vshufps {{.*#+}} zmm0 = zmm0[1,3],zmm1[1,3],zmm0[5,7],zmm1[5,7],zmm0[9,11],zmm1[9,11],zmm0[13,15],zmm1[13,15]
; SKX-NEXT: vpaddd %zmm0, %zmm2, %zmm0
@@ -251,7 +251,7 @@ define <8 x i32> @hadd_16_3_sv(<16 x i32> %x225, <16 x i32> %x227) {
define double @fadd_noundef_eel(<8 x double> %x225, <8 x double> %x227) {
; KNL-LABEL: fadd_noundef_eel:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vunpcklpd {{.*#+}} zmm2 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; KNL-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; KNL-NEXT: vaddpd %zmm0, %zmm2, %zmm0
@@ -259,7 +259,7 @@ define double @fadd_noundef_eel(<8 x double> %x225, <8 x double> %x227) {
; KNL-NEXT: retq
;
; SKX-LABEL: fadd_noundef_eel:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpcklpd {{.*#+}} zmm2 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; SKX-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; SKX-NEXT: vaddpd %zmm0, %zmm2, %zmm0
@@ -277,7 +277,7 @@ define double @fadd_noundef_eel(<8 x double> %x225, <8 x double> %x227) {
define double @fsub_noundef_ee (<8 x double> %x225, <8 x double> %x227) {
; KNL-LABEL: fsub_noundef_ee:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vunpcklpd {{.*#+}} zmm2 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; KNL-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; KNL-NEXT: vsubpd %zmm0, %zmm2, %zmm0
@@ -286,7 +286,7 @@ define double @fsub_noundef_ee (<8 x double> %x225, <8 x double> %x227) {
; KNL-NEXT: retq
;
; SKX-LABEL: fsub_noundef_ee:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpcklpd {{.*#+}} zmm2 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; SKX-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; SKX-NEXT: vsubpd %zmm0, %zmm2, %zmm0
diff --git a/test/CodeGen/X86/avx512-i1test.ll b/test/CodeGen/X86/avx512-i1test.ll
index 321f26674e1..df81b83d7c2 100644
--- a/test/CodeGen/X86/avx512-i1test.ll
+++ b/test/CodeGen/X86/avx512-i1test.ll
@@ -7,11 +7,11 @@ target triple = "x86_64-unknown-linux-gnu"
define void @func() {
; CHECK-LABEL: func:
-; CHECK: # BB#0: # %L_10
+; CHECK: # %bb.0: # %L_10
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: testb %al, %al
; CHECK-NEXT: je .LBB0_1
-; CHECK-NEXT: # BB#4: # %L_30
+; CHECK-NEXT: # %bb.4: # %L_30
; CHECK-NEXT: retq
; CHECK-NEXT: .LBB0_1: # %bb56
; CHECK-NEXT: xorl %eax, %eax
@@ -65,10 +65,10 @@ L_30: ; preds = %bb51, %L_10
; PR 28175
define i64 @func2(i1 zeroext %i, i32 %j) {
; CHECK-LABEL: func2:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: testl %esi, %esi
; CHECK-NEXT: je .LBB1_1
-; CHECK-NEXT: # BB#2: # %if.then
+; CHECK-NEXT: # %bb.2: # %if.then
; CHECK-NEXT: jmp bar # TAILCALL
; CHECK-NEXT: .LBB1_1: # %return
; CHECK-NEXT: movzbl %dil, %eax
diff --git a/test/CodeGen/X86/avx512-insert-extract.ll b/test/CodeGen/X86/avx512-insert-extract.ll
index 091bb39d178..9d12697acf1 100644
--- a/test/CodeGen/X86/avx512-insert-extract.ll
+++ b/test/CodeGen/X86/avx512-insert-extract.ll
@@ -5,7 +5,7 @@
define <16 x float> @test1(<16 x float> %x, float* %br, float %y) nounwind {
; CHECK-LABEL: test1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vinsertps {{.*#+}} xmm2 = xmm0[0],mem[0],xmm0[2,3]
; CHECK-NEXT: vinsertf32x4 $0, %xmm2, %zmm0, %zmm2
; CHECK-NEXT: vextractf32x4 $3, %zmm0, %xmm0
@@ -20,7 +20,7 @@ define <16 x float> @test1(<16 x float> %x, float* %br, float %y) nounwind {
define <8 x double> @test2(<8 x double> %x, double* %br, double %y) nounwind {
; CHECK-LABEL: test2:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovhpd {{.*#+}} xmm2 = xmm0[0],mem[0]
; CHECK-NEXT: vinsertf32x4 $0, %xmm2, %zmm0, %zmm2
; CHECK-NEXT: vextractf32x4 $3, %zmm0, %xmm0
@@ -35,7 +35,7 @@ define <8 x double> @test2(<8 x double> %x, double* %br, double %y) nounwind {
define <16 x float> @test3(<16 x float> %x) nounwind {
; CHECK-LABEL: test3:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1
; CHECK-NEXT: vinsertps {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[2,3]
; CHECK-NEXT: vinsertf32x4 $0, %xmm1, %zmm0, %zmm0
@@ -47,7 +47,7 @@ define <16 x float> @test3(<16 x float> %x) nounwind {
define <8 x i64> @test4(<8 x i64> %x) nounwind {
; CHECK-LABEL: test4:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vextracti32x4 $2, %zmm0, %xmm1
; CHECK-NEXT: vmovq %xmm1, %rax
; CHECK-NEXT: vpinsrq $1, %rax, %xmm0, %xmm1
@@ -60,7 +60,7 @@ define <8 x i64> @test4(<8 x i64> %x) nounwind {
define i32 @test5(<4 x float> %x) nounwind {
; CHECK-LABEL: test5:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vextractps $3, %xmm0, %eax
; CHECK-NEXT: retq
%ef = extractelement <4 x float> %x, i32 3
@@ -70,7 +70,7 @@ define i32 @test5(<4 x float> %x) nounwind {
define void @test6(<4 x float> %x, float* %out) nounwind {
; CHECK-LABEL: test6:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vextractps $3, %xmm0, (%rdi)
; CHECK-NEXT: retq
%ef = extractelement <4 x float> %x, i32 3
@@ -80,7 +80,7 @@ define void @test6(<4 x float> %x, float* %out) nounwind {
define float @test7(<16 x float> %x, i32 %ind) nounwind {
; CHECK-LABEL: test7:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pushq %rbp
; CHECK-NEXT: movq %rsp, %rbp
; CHECK-NEXT: andq $-64, %rsp
@@ -99,7 +99,7 @@ define float @test7(<16 x float> %x, i32 %ind) nounwind {
define double @test8(<8 x double> %x, i32 %ind) nounwind {
; CHECK-LABEL: test8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pushq %rbp
; CHECK-NEXT: movq %rsp, %rbp
; CHECK-NEXT: andq $-64, %rsp
@@ -118,7 +118,7 @@ define double @test8(<8 x double> %x, i32 %ind) nounwind {
define float @test9(<8 x float> %x, i32 %ind) nounwind {
; CHECK-LABEL: test9:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pushq %rbp
; CHECK-NEXT: movq %rsp, %rbp
; CHECK-NEXT: andq $-32, %rsp
@@ -137,7 +137,7 @@ define float @test9(<8 x float> %x, i32 %ind) nounwind {
define i32 @test10(<16 x i32> %x, i32 %ind) nounwind {
; CHECK-LABEL: test10:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pushq %rbp
; CHECK-NEXT: movq %rsp, %rbp
; CHECK-NEXT: andq $-64, %rsp
@@ -156,14 +156,14 @@ define i32 @test10(<16 x i32> %x, i32 %ind) nounwind {
define <16 x i32> @test11(<16 x i32>%a, <16 x i32>%b) {
; KNL-LABEL: test11:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; KNL-NEXT: kshiftlw $11, %k0, %k0
; KNL-NEXT: kshiftrw $15, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
; KNL-NEXT: testb $1, %al
; KNL-NEXT: je LBB10_2
-; KNL-NEXT: ## BB#1: ## %A
+; KNL-NEXT: ## %bb.1: ## %A
; KNL-NEXT: vmovdqa64 %zmm1, %zmm0
; KNL-NEXT: retq
; KNL-NEXT: LBB10_2: ## %B
@@ -171,14 +171,14 @@ define <16 x i32> @test11(<16 x i32>%a, <16 x i32>%b) {
; KNL-NEXT: retq
;
; SKX-LABEL: test11:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; SKX-NEXT: kshiftlw $11, %k0, %k0
; SKX-NEXT: kshiftrw $15, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: testb $1, %al
; SKX-NEXT: je LBB10_2
-; SKX-NEXT: ## BB#1: ## %A
+; SKX-NEXT: ## %bb.1: ## %A
; SKX-NEXT: vmovdqa64 %zmm1, %zmm0
; SKX-NEXT: retq
; SKX-NEXT: LBB10_2: ## %B
@@ -196,7 +196,7 @@ define <16 x i32> @test11(<16 x i32>%a, <16 x i32>%b) {
define i64 @test12(<16 x i64>%a, <16 x i64>%b, i64 %a1, i64 %b1) {
; KNL-LABEL: test12:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpcmpgtq %zmm0, %zmm2, %k0
; KNL-NEXT: kshiftlw $15, %k0, %k0
; KNL-NEXT: kshiftrw $15, %k0, %k0
@@ -208,7 +208,7 @@ define i64 @test12(<16 x i64>%a, <16 x i64>%b, i64 %a1, i64 %b1) {
; KNL-NEXT: retq
;
; SKX-LABEL: test12:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpcmpgtq %zmm0, %zmm2, %k0
; SKX-NEXT: kshiftlb $7, %k0, %k0
; SKX-NEXT: kshiftrb $7, %k0, %k0
@@ -226,7 +226,7 @@ define i64 @test12(<16 x i64>%a, <16 x i64>%b, i64 %a1, i64 %b1) {
define i16 @test13(i32 %a, i32 %b) {
; KNL-LABEL: test13:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: cmpl %esi, %edi
; KNL-NEXT: setb %al
; KNL-NEXT: movw $-4, %cx
@@ -241,7 +241,7 @@ define i16 @test13(i32 %a, i32 %b) {
; KNL-NEXT: retq
;
; SKX-LABEL: test13:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: cmpl %esi, %edi
; SKX-NEXT: setb %al
; SKX-NEXT: movw $-4, %cx
@@ -262,7 +262,7 @@ define i16 @test13(i32 %a, i32 %b) {
define i64 @test14(<8 x i64>%a, <8 x i64>%b, i64 %a1, i64 %b1) {
; KNL-LABEL: test14:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpcmpgtq %zmm0, %zmm1, %k0
; KNL-NEXT: kshiftlw $11, %k0, %k0
; KNL-NEXT: kshiftrw $15, %k0, %k0
@@ -274,7 +274,7 @@ define i64 @test14(<8 x i64>%a, <8 x i64>%b, i64 %a1, i64 %b1) {
; KNL-NEXT: retq
;
; SKX-LABEL: test14:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpcmpgtq %zmm0, %zmm1, %k0
; SKX-NEXT: kshiftlb $3, %k0, %k0
; SKX-NEXT: kshiftrb $7, %k0, %k0
@@ -292,7 +292,7 @@ define i64 @test14(<8 x i64>%a, <8 x i64>%b, i64 %a1, i64 %b1) {
define i16 @test15(i1 *%addr) {
; CHECK-LABEL: test15:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movb (%rdi), %al
; CHECK-NEXT: xorl %ecx, %ecx
; CHECK-NEXT: testb %al, %al
@@ -307,7 +307,7 @@ define i16 @test15(i1 *%addr) {
define i16 @test16(i1 *%addr, i16 %a) {
; KNL-LABEL: test16:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: movb (%rdi), %al
; KNL-NEXT: kmovw %esi, %k1
; KNL-NEXT: kmovw %eax, %k2
@@ -323,7 +323,7 @@ define i16 @test16(i1 *%addr, i16 %a) {
; KNL-NEXT: retq
;
; SKX-LABEL: test16:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: kmovb (%rdi), %k0
; SKX-NEXT: kmovd %esi, %k1
; SKX-NEXT: vpmovm2d %k0, %zmm0
@@ -344,7 +344,7 @@ define i16 @test16(i1 *%addr, i16 %a) {
define i8 @test17(i1 *%addr, i8 %a) {
; KNL-LABEL: test17:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: movb (%rdi), %al
; KNL-NEXT: kmovw %esi, %k1
; KNL-NEXT: kmovw %eax, %k2
@@ -360,7 +360,7 @@ define i8 @test17(i1 *%addr, i8 %a) {
; KNL-NEXT: retq
;
; SKX-LABEL: test17:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: kmovb (%rdi), %k0
; SKX-NEXT: kmovd %esi, %k1
; SKX-NEXT: vpmovm2q %k0, %zmm0
@@ -381,7 +381,7 @@ define i8 @test17(i1 *%addr, i8 %a) {
define i64 @extract_v8i64(<8 x i64> %x, i64* %dst) {
; CHECK-LABEL: extract_v8i64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpextrq $1, %xmm0, %rax
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpextrq $1, %xmm0, (%rdi)
@@ -395,7 +395,7 @@ define i64 @extract_v8i64(<8 x i64> %x, i64* %dst) {
define i64 @extract_v4i64(<4 x i64> %x, i64* %dst) {
; CHECK-LABEL: extract_v4i64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpextrq $1, %xmm0, %rax
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpextrq $1, %xmm0, (%rdi)
@@ -409,7 +409,7 @@ define i64 @extract_v4i64(<4 x i64> %x, i64* %dst) {
define i64 @extract_v2i64(<2 x i64> %x, i64* %dst) {
; CHECK-LABEL: extract_v2i64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovq %xmm0, %rax
; CHECK-NEXT: vpextrq $1, %xmm0, (%rdi)
; CHECK-NEXT: retq
@@ -421,7 +421,7 @@ define i64 @extract_v2i64(<2 x i64> %x, i64* %dst) {
define i32 @extract_v16i32(<16 x i32> %x, i32* %dst) {
; CHECK-LABEL: extract_v16i32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vextractps $1, %xmm0, %eax
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
; CHECK-NEXT: vextractps $1, %xmm0, (%rdi)
@@ -435,7 +435,7 @@ define i32 @extract_v16i32(<16 x i32> %x, i32* %dst) {
define i32 @extract_v8i32(<8 x i32> %x, i32* %dst) {
; CHECK-LABEL: extract_v8i32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vextractps $1, %xmm0, %eax
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
; CHECK-NEXT: vextractps $1, %xmm0, (%rdi)
@@ -449,7 +449,7 @@ define i32 @extract_v8i32(<8 x i32> %x, i32* %dst) {
define i32 @extract_v4i32(<4 x i32> %x, i32* %dst) {
; CHECK-LABEL: extract_v4i32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vextractps $1, %xmm0, %eax
; CHECK-NEXT: vextractps $3, %xmm0, (%rdi)
; CHECK-NEXT: retq
@@ -461,7 +461,7 @@ define i32 @extract_v4i32(<4 x i32> %x, i32* %dst) {
define i16 @extract_v32i16(<32 x i16> %x, i16* %dst) {
; CHECK-LABEL: extract_v32i16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpextrw $1, %xmm0, %eax
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpextrw $1, %xmm0, (%rdi)
@@ -476,7 +476,7 @@ define i16 @extract_v32i16(<32 x i16> %x, i16* %dst) {
define i16 @extract_v16i16(<16 x i16> %x, i16* %dst) {
; CHECK-LABEL: extract_v16i16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpextrw $1, %xmm0, %eax
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpextrw $1, %xmm0, (%rdi)
@@ -491,7 +491,7 @@ define i16 @extract_v16i16(<16 x i16> %x, i16* %dst) {
define i16 @extract_v8i16(<8 x i16> %x, i16* %dst) {
; CHECK-LABEL: extract_v8i16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpextrw $1, %xmm0, %eax
; CHECK-NEXT: vpextrw $3, %xmm0, (%rdi)
; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
@@ -504,7 +504,7 @@ define i16 @extract_v8i16(<8 x i16> %x, i16* %dst) {
define i8 @extract_v64i8(<64 x i8> %x, i8* %dst) {
; CHECK-LABEL: extract_v64i8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpextrb $1, %xmm0, %eax
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpextrb $1, %xmm0, (%rdi)
@@ -519,7 +519,7 @@ define i8 @extract_v64i8(<64 x i8> %x, i8* %dst) {
define i8 @extract_v32i8(<32 x i8> %x, i8* %dst) {
; CHECK-LABEL: extract_v32i8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpextrb $1, %xmm0, %eax
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpextrb $1, %xmm0, (%rdi)
@@ -534,7 +534,7 @@ define i8 @extract_v32i8(<32 x i8> %x, i8* %dst) {
define i8 @extract_v16i8(<16 x i8> %x, i8* %dst) {
; CHECK-LABEL: extract_v16i8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpextrb $1, %xmm0, %eax
; CHECK-NEXT: vpextrb $3, %xmm0, (%rdi)
; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
@@ -547,7 +547,7 @@ define i8 @extract_v16i8(<16 x i8> %x, i8* %dst) {
define <8 x i64> @insert_v8i64(<8 x i64> %x, i64 %y , i64* %ptr) {
; CHECK-LABEL: insert_v8i64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpinsrq $1, (%rsi), %xmm0, %xmm1
; CHECK-NEXT: vinserti32x4 $0, %xmm1, %zmm0, %zmm1
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0
@@ -562,7 +562,7 @@ define <8 x i64> @insert_v8i64(<8 x i64> %x, i64 %y , i64* %ptr) {
define <4 x i64> @insert_v4i64(<4 x i64> %x, i64 %y , i64* %ptr) {
; CHECK-LABEL: insert_v4i64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpinsrq $1, (%rsi), %xmm0, %xmm1
; CHECK-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0
@@ -577,7 +577,7 @@ define <4 x i64> @insert_v4i64(<4 x i64> %x, i64 %y , i64* %ptr) {
define <2 x i64> @insert_v2i64(<2 x i64> %x, i64 %y , i64* %ptr) {
; CHECK-LABEL: insert_v2i64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpinsrq $0, %rdi, %xmm0, %xmm0
; CHECK-NEXT: vpinsrq $1, (%rsi), %xmm0, %xmm0
; CHECK-NEXT: retq
@@ -589,7 +589,7 @@ define <2 x i64> @insert_v2i64(<2 x i64> %x, i64 %y , i64* %ptr) {
define <16 x i32> @insert_v16i32(<16 x i32> %x, i32 %y, i32* %ptr) {
; CHECK-LABEL: insert_v16i32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpinsrd $1, (%rsi), %xmm0, %xmm1
; CHECK-NEXT: vinserti32x4 $0, %xmm1, %zmm0, %zmm1
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0
@@ -604,7 +604,7 @@ define <16 x i32> @insert_v16i32(<16 x i32> %x, i32 %y, i32* %ptr) {
define <8 x i32> @insert_v8i32(<8 x i32> %x, i32 %y, i32* %ptr) {
; CHECK-LABEL: insert_v8i32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpinsrd $1, (%rsi), %xmm0, %xmm1
; CHECK-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0
@@ -619,7 +619,7 @@ define <8 x i32> @insert_v8i32(<8 x i32> %x, i32 %y, i32* %ptr) {
define <4 x i32> @insert_v4i32(<4 x i32> %x, i32 %y, i32* %ptr) {
; CHECK-LABEL: insert_v4i32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpinsrd $1, (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vpinsrd $3, %edi, %xmm0, %xmm0
; CHECK-NEXT: retq
@@ -631,7 +631,7 @@ define <4 x i32> @insert_v4i32(<4 x i32> %x, i32 %y, i32* %ptr) {
define <32 x i16> @insert_v32i16(<32 x i16> %x, i16 %y, i16* %ptr) {
; KNL-LABEL: insert_v32i16:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpinsrw $1, (%rsi), %xmm0, %xmm2
; KNL-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm0[4,5,6,7]
; KNL-NEXT: vextracti128 $1, %ymm0, %xmm0
@@ -640,7 +640,7 @@ define <32 x i16> @insert_v32i16(<32 x i16> %x, i16 %y, i16* %ptr) {
; KNL-NEXT: retq
;
; SKX-LABEL: insert_v32i16:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpinsrw $1, (%rsi), %xmm0, %xmm1
; SKX-NEXT: vinserti32x4 $0, %xmm1, %zmm0, %zmm1
; SKX-NEXT: vextracti128 $1, %ymm0, %xmm0
@@ -655,7 +655,7 @@ define <32 x i16> @insert_v32i16(<32 x i16> %x, i16 %y, i16* %ptr) {
define <16 x i16> @insert_v16i16(<16 x i16> %x, i16 %y, i16* %ptr) {
; CHECK-LABEL: insert_v16i16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpinsrw $1, (%rsi), %xmm0, %xmm1
; CHECK-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0
@@ -670,7 +670,7 @@ define <16 x i16> @insert_v16i16(<16 x i16> %x, i16 %y, i16* %ptr) {
define <8 x i16> @insert_v8i16(<8 x i16> %x, i16 %y, i16* %ptr) {
; CHECK-LABEL: insert_v8i16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpinsrw $1, (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vpinsrw $5, %edi, %xmm0, %xmm0
; CHECK-NEXT: retq
@@ -682,7 +682,7 @@ define <8 x i16> @insert_v8i16(<8 x i16> %x, i16 %y, i16* %ptr) {
define <64 x i8> @insert_v64i8(<64 x i8> %x, i8 %y, i8* %ptr) {
; KNL-LABEL: insert_v64i8:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpinsrb $1, (%rsi), %xmm0, %xmm2
; KNL-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
; KNL-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -691,7 +691,7 @@ define <64 x i8> @insert_v64i8(<64 x i8> %x, i8 %y, i8* %ptr) {
; KNL-NEXT: retq
;
; SKX-LABEL: insert_v64i8:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpinsrb $1, (%rsi), %xmm0, %xmm1
; SKX-NEXT: vinserti32x4 $0, %xmm1, %zmm0, %zmm1
; SKX-NEXT: vextracti32x4 $3, %zmm0, %xmm0
@@ -706,7 +706,7 @@ define <64 x i8> @insert_v64i8(<64 x i8> %x, i8 %y, i8* %ptr) {
define <32 x i8> @insert_v32i8(<32 x i8> %x, i8 %y, i8* %ptr) {
; CHECK-LABEL: insert_v32i8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpinsrb $1, (%rsi), %xmm0, %xmm1
; CHECK-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0
@@ -721,7 +721,7 @@ define <32 x i8> @insert_v32i8(<32 x i8> %x, i8 %y, i8* %ptr) {
define <16 x i8> @insert_v16i8(<16 x i8> %x, i8 %y, i8* %ptr) {
; CHECK-LABEL: insert_v16i8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpinsrb $3, (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
; CHECK-NEXT: retq
@@ -733,7 +733,7 @@ define <16 x i8> @insert_v16i8(<16 x i8> %x, i8 %y, i8* %ptr) {
define <8 x i64> @test_insert_128_v8i64(<8 x i64> %x, i64 %y) {
; CHECK-LABEL: test_insert_128_v8i64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpinsrq $1, %rdi, %xmm0, %xmm1
; CHECK-NEXT: vinserti32x4 $0, %xmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
@@ -743,7 +743,7 @@ define <8 x i64> @test_insert_128_v8i64(<8 x i64> %x, i64 %y) {
define <16 x i32> @test_insert_128_v16i32(<16 x i32> %x, i32 %y) {
; CHECK-LABEL: test_insert_128_v16i32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpinsrd $1, %edi, %xmm0, %xmm1
; CHECK-NEXT: vinserti32x4 $0, %xmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
@@ -753,7 +753,7 @@ define <16 x i32> @test_insert_128_v16i32(<16 x i32> %x, i32 %y) {
define <8 x double> @test_insert_128_v8f64(<8 x double> %x, double %y) {
; CHECK-LABEL: test_insert_128_v8f64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovlhps {{.*#+}} xmm1 = xmm0[0],xmm1[0]
; CHECK-NEXT: vinsertf32x4 $0, %xmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
@@ -763,7 +763,7 @@ define <8 x double> @test_insert_128_v8f64(<8 x double> %x, double %y) {
define <16 x float> @test_insert_128_v16f32(<16 x float> %x, float %y) {
; CHECK-LABEL: test_insert_128_v16f32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vinsertps {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[2,3]
; CHECK-NEXT: vinsertf32x4 $0, %xmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
@@ -773,7 +773,7 @@ define <16 x float> @test_insert_128_v16f32(<16 x float> %x, float %y) {
define <16 x i16> @test_insert_128_v16i16(<16 x i16> %x, i16 %y) {
; CHECK-LABEL: test_insert_128_v16i16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm1
; CHECK-NEXT: vpinsrw $2, %edi, %xmm1, %xmm1
; CHECK-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
@@ -784,7 +784,7 @@ define <16 x i16> @test_insert_128_v16i16(<16 x i16> %x, i16 %y) {
define <32 x i8> @test_insert_128_v32i8(<32 x i8> %x, i8 %y) {
; CHECK-LABEL: test_insert_128_v32i8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm1
; CHECK-NEXT: vpinsrb $4, %edi, %xmm1, %xmm1
; CHECK-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
@@ -795,7 +795,7 @@ define <32 x i8> @test_insert_128_v32i8(<32 x i8> %x, i8 %y) {
define i32 @test_insertelement_v32i1(i32 %a, i32 %b, <32 x i32> %x , <32 x i32> %y) {
; KNL-LABEL: test_insertelement_v32i1:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: pushq %rbp
; KNL-NEXT: .cfi_def_cfa_offset 16
; KNL-NEXT: .cfi_offset %rbp, -16
@@ -956,7 +956,7 @@ define i32 @test_insertelement_v32i1(i32 %a, i32 %b, <32 x i32> %x , <32 x i32>
; KNL-NEXT: retq
;
; SKX-LABEL: test_insertelement_v32i1:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: cmpl %esi, %edi
; SKX-NEXT: setb %al
; SKX-NEXT: vpcmpltud %zmm2, %zmm0, %k0
@@ -980,7 +980,7 @@ define i32 @test_insertelement_v32i1(i32 %a, i32 %b, <32 x i32> %x , <32 x i32>
define i8 @test_iinsertelement_v4i1(i32 %a, i32 %b, <4 x i32> %x , <4 x i32> %y) {
; KNL-LABEL: test_iinsertelement_v4i1:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: cmpl %esi, %edi
; KNL-NEXT: setb %al
; KNL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
@@ -1018,7 +1018,7 @@ define i8 @test_iinsertelement_v4i1(i32 %a, i32 %b, <4 x i32> %x , <4 x i32> %y)
; KNL-NEXT: retq
;
; SKX-LABEL: test_iinsertelement_v4i1:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: cmpl %esi, %edi
; SKX-NEXT: setb %al
; SKX-NEXT: vpcmpltud %xmm1, %xmm0, %k0
@@ -1041,7 +1041,7 @@ define i8 @test_iinsertelement_v4i1(i32 %a, i32 %b, <4 x i32> %x , <4 x i32> %y)
define i8 @test_iinsertelement_v2i1(i32 %a, i32 %b, <2 x i64> %x , <2 x i64> %y) {
; KNL-LABEL: test_iinsertelement_v2i1:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: cmpl %esi, %edi
; KNL-NEXT: setb %al
; KNL-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
@@ -1063,7 +1063,7 @@ define i8 @test_iinsertelement_v2i1(i32 %a, i32 %b, <2 x i64> %x , <2 x i64> %y)
; KNL-NEXT: retq
;
; SKX-LABEL: test_iinsertelement_v2i1:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: cmpl %esi, %edi
; SKX-NEXT: setb %al
; SKX-NEXT: vpcmpltuq %xmm1, %xmm0, %k0
@@ -1085,7 +1085,7 @@ define i8 @test_iinsertelement_v2i1(i32 %a, i32 %b, <2 x i64> %x , <2 x i64> %y)
define zeroext i8 @test_extractelement_v2i1(<2 x i64> %a, <2 x i64> %b) {
; KNL-LABEL: test_extractelement_v2i1:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; KNL-NEXT: vpxor %xmm2, %xmm1, %xmm1
; KNL-NEXT: vpxor %xmm2, %xmm0, %xmm0
@@ -1098,7 +1098,7 @@ define zeroext i8 @test_extractelement_v2i1(<2 x i64> %a, <2 x i64> %b) {
; KNL-NEXT: retq
;
; SKX-LABEL: test_extractelement_v2i1:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpcmpnleuq %xmm1, %xmm0, %k0
; SKX-NEXT: kshiftlw $15, %k0, %k0
; SKX-NEXT: kshiftrw $15, %k0, %k0
@@ -1116,7 +1116,7 @@ define zeroext i8 @test_extractelement_v2i1(<2 x i64> %a, <2 x i64> %b) {
define zeroext i8 @extractelement_v2i1_alt(<2 x i64> %a, <2 x i64> %b) {
; KNL-LABEL: extractelement_v2i1_alt:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; KNL-NEXT: vpxor %xmm2, %xmm1, %xmm1
; KNL-NEXT: vpxor %xmm2, %xmm0, %xmm0
@@ -1129,7 +1129,7 @@ define zeroext i8 @extractelement_v2i1_alt(<2 x i64> %a, <2 x i64> %b) {
; KNL-NEXT: retq
;
; SKX-LABEL: extractelement_v2i1_alt:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpcmpnleuq %xmm1, %xmm0, %k0
; SKX-NEXT: kshiftlw $15, %k0, %k0
; SKX-NEXT: kshiftrw $15, %k0, %k0
@@ -1148,7 +1148,7 @@ define zeroext i8 @extractelement_v2i1_alt(<2 x i64> %a, <2 x i64> %b) {
define zeroext i8 @test_extractelement_v4i1(<4 x i32> %a, <4 x i32> %b) {
; KNL-LABEL: test_extractelement_v4i1:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; KNL-NEXT: vpxor %xmm2, %xmm1, %xmm1
; KNL-NEXT: vpxor %xmm2, %xmm0, %xmm0
@@ -1158,7 +1158,7 @@ define zeroext i8 @test_extractelement_v4i1(<4 x i32> %a, <4 x i32> %b) {
; KNL-NEXT: retq
;
; SKX-LABEL: test_extractelement_v4i1:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpcmpnleud %xmm1, %xmm0, %k0
; SKX-NEXT: kshiftlw $12, %k0, %k0
; SKX-NEXT: kshiftrw $15, %k0, %k0
@@ -1173,7 +1173,7 @@ define zeroext i8 @test_extractelement_v4i1(<4 x i32> %a, <4 x i32> %b) {
define zeroext i8 @test_extractelement_v32i1(<32 x i8> %a, <32 x i8> %b) {
; KNL-LABEL: test_extractelement_v32i1:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
; KNL-NEXT: vpxor %ymm2, %ymm1, %ymm1
; KNL-NEXT: vpxor %ymm2, %ymm0, %ymm0
@@ -1184,7 +1184,7 @@ define zeroext i8 @test_extractelement_v32i1(<32 x i8> %a, <32 x i8> %b) {
; KNL-NEXT: retq
;
; SKX-LABEL: test_extractelement_v32i1:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpcmpnleub %ymm1, %ymm0, %k0
; SKX-NEXT: kshiftld $29, %k0, %k0
; SKX-NEXT: kshiftrd $31, %k0, %k0
@@ -1200,7 +1200,7 @@ define zeroext i8 @test_extractelement_v32i1(<32 x i8> %a, <32 x i8> %b) {
define zeroext i8 @test_extractelement_v64i1(<64 x i8> %a, <64 x i8> %b) {
; KNL-LABEL: test_extractelement_v64i1:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
; KNL-NEXT: vpxor %ymm0, %ymm3, %ymm2
; KNL-NEXT: vpxor %ymm0, %ymm1, %ymm0
@@ -1215,7 +1215,7 @@ define zeroext i8 @test_extractelement_v64i1(<64 x i8> %a, <64 x i8> %b) {
; KNL-NEXT: retq
;
; SKX-LABEL: test_extractelement_v64i1:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpcmpnleub %zmm1, %zmm0, %k0
; SKX-NEXT: kshiftrq $63, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
@@ -1233,7 +1233,7 @@ define zeroext i8 @test_extractelement_v64i1(<64 x i8> %a, <64 x i8> %b) {
define zeroext i8 @extractelement_v64i1_alt(<64 x i8> %a, <64 x i8> %b) {
; KNL-LABEL: extractelement_v64i1_alt:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
; KNL-NEXT: vpxor %ymm0, %ymm3, %ymm2
; KNL-NEXT: vpxor %ymm0, %ymm1, %ymm0
@@ -1248,7 +1248,7 @@ define zeroext i8 @extractelement_v64i1_alt(<64 x i8> %a, <64 x i8> %b) {
; KNL-NEXT: retq
;
; SKX-LABEL: extractelement_v64i1_alt:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpcmpnleub %zmm1, %zmm0, %k0
; SKX-NEXT: kshiftrq $63, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
@@ -1267,7 +1267,7 @@ define zeroext i8 @extractelement_v64i1_alt(<64 x i8> %a, <64 x i8> %b) {
define i64 @test_extractelement_variable_v2i64(<2 x i64> %t1, i32 %index) {
; CHECK-LABEL: test_extractelement_variable_v2i64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: andl $1, %edi
@@ -1279,7 +1279,7 @@ define i64 @test_extractelement_variable_v2i64(<2 x i64> %t1, i32 %index) {
define i64 @test_extractelement_variable_v4i64(<4 x i64> %t1, i32 %index) {
; CHECK-LABEL: test_extractelement_variable_v4i64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pushq %rbp
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset %rbp, -16
@@ -1301,7 +1301,7 @@ define i64 @test_extractelement_variable_v4i64(<4 x i64> %t1, i32 %index) {
define i64 @test_extractelement_variable_v8i64(<8 x i64> %t1, i32 %index) {
; CHECK-LABEL: test_extractelement_variable_v8i64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pushq %rbp
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset %rbp, -16
@@ -1323,7 +1323,7 @@ define i64 @test_extractelement_variable_v8i64(<8 x i64> %t1, i32 %index) {
define double @test_extractelement_variable_v2f64(<2 x double> %t1, i32 %index) {
; CHECK-LABEL: test_extractelement_variable_v2f64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: andl $1, %edi
@@ -1335,7 +1335,7 @@ define double @test_extractelement_variable_v2f64(<2 x double> %t1, i32 %index)
define double @test_extractelement_variable_v4f64(<4 x double> %t1, i32 %index) {
; CHECK-LABEL: test_extractelement_variable_v4f64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pushq %rbp
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset %rbp, -16
@@ -1357,7 +1357,7 @@ define double @test_extractelement_variable_v4f64(<4 x double> %t1, i32 %index)
define double @test_extractelement_variable_v8f64(<8 x double> %t1, i32 %index) {
; CHECK-LABEL: test_extractelement_variable_v8f64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pushq %rbp
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset %rbp, -16
@@ -1379,7 +1379,7 @@ define double @test_extractelement_variable_v8f64(<8 x double> %t1, i32 %index)
define i32 @test_extractelement_variable_v4i32(<4 x i32> %t1, i32 %index) {
; CHECK-LABEL: test_extractelement_variable_v4i32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: andl $3, %edi
@@ -1391,7 +1391,7 @@ define i32 @test_extractelement_variable_v4i32(<4 x i32> %t1, i32 %index) {
define i32 @test_extractelement_variable_v8i32(<8 x i32> %t1, i32 %index) {
; CHECK-LABEL: test_extractelement_variable_v8i32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pushq %rbp
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset %rbp, -16
@@ -1413,7 +1413,7 @@ define i32 @test_extractelement_variable_v8i32(<8 x i32> %t1, i32 %index) {
define i32 @test_extractelement_variable_v16i32(<16 x i32> %t1, i32 %index) {
; CHECK-LABEL: test_extractelement_variable_v16i32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pushq %rbp
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset %rbp, -16
@@ -1435,7 +1435,7 @@ define i32 @test_extractelement_variable_v16i32(<16 x i32> %t1, i32 %index) {
define float @test_extractelement_variable_v4f32(<4 x float> %t1, i32 %index) {
; CHECK-LABEL: test_extractelement_variable_v4f32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: andl $3, %edi
@@ -1447,7 +1447,7 @@ define float @test_extractelement_variable_v4f32(<4 x float> %t1, i32 %index) {
define float @test_extractelement_variable_v8f32(<8 x float> %t1, i32 %index) {
; CHECK-LABEL: test_extractelement_variable_v8f32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pushq %rbp
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset %rbp, -16
@@ -1469,7 +1469,7 @@ define float @test_extractelement_variable_v8f32(<8 x float> %t1, i32 %index) {
define float @test_extractelement_variable_v16f32(<16 x float> %t1, i32 %index) {
; CHECK-LABEL: test_extractelement_variable_v16f32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pushq %rbp
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset %rbp, -16
@@ -1491,7 +1491,7 @@ define float @test_extractelement_variable_v16f32(<16 x float> %t1, i32 %index)
define i16 @test_extractelement_variable_v8i16(<8 x i16> %t1, i32 %index) {
; CHECK-LABEL: test_extractelement_variable_v8i16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: andl $7, %edi
@@ -1503,7 +1503,7 @@ define i16 @test_extractelement_variable_v8i16(<8 x i16> %t1, i32 %index) {
define i16 @test_extractelement_variable_v16i16(<16 x i16> %t1, i32 %index) {
; CHECK-LABEL: test_extractelement_variable_v16i16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pushq %rbp
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset %rbp, -16
@@ -1525,7 +1525,7 @@ define i16 @test_extractelement_variable_v16i16(<16 x i16> %t1, i32 %index) {
define i16 @test_extractelement_variable_v32i16(<32 x i16> %t1, i32 %index) {
; KNL-LABEL: test_extractelement_variable_v32i16:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: pushq %rbp
; KNL-NEXT: .cfi_def_cfa_offset 16
; KNL-NEXT: .cfi_offset %rbp, -16
@@ -1544,7 +1544,7 @@ define i16 @test_extractelement_variable_v32i16(<32 x i16> %t1, i32 %index) {
; KNL-NEXT: retq
;
; SKX-LABEL: test_extractelement_variable_v32i16:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: pushq %rbp
; SKX-NEXT: .cfi_def_cfa_offset 16
; SKX-NEXT: .cfi_offset %rbp, -16
@@ -1566,7 +1566,7 @@ define i16 @test_extractelement_variable_v32i16(<32 x i16> %t1, i32 %index) {
define i8 @test_extractelement_variable_v16i8(<16 x i8> %t1, i32 %index) {
; CHECK-LABEL: test_extractelement_variable_v16i8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: andl $15, %edi
@@ -1579,7 +1579,7 @@ define i8 @test_extractelement_variable_v16i8(<16 x i8> %t1, i32 %index) {
define i8 @test_extractelement_variable_v32i8(<32 x i8> %t1, i32 %index) {
; CHECK-LABEL: test_extractelement_variable_v32i8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pushq %rbp
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset %rbp, -16
@@ -1603,7 +1603,7 @@ define i8 @test_extractelement_variable_v32i8(<32 x i8> %t1, i32 %index) {
define i8 @test_extractelement_variable_v64i8(<64 x i8> %t1, i32 %index) {
; KNL-LABEL: test_extractelement_variable_v64i8:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: pushq %rbp
; KNL-NEXT: .cfi_def_cfa_offset 16
; KNL-NEXT: .cfi_offset %rbp, -16
@@ -1623,7 +1623,7 @@ define i8 @test_extractelement_variable_v64i8(<64 x i8> %t1, i32 %index) {
; KNL-NEXT: retq
;
; SKX-LABEL: test_extractelement_variable_v64i8:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: pushq %rbp
; SKX-NEXT: .cfi_def_cfa_offset 16
; SKX-NEXT: .cfi_offset %rbp, -16
@@ -1647,7 +1647,7 @@ define i8 @test_extractelement_variable_v64i8(<64 x i8> %t1, i32 %index) {
define i8 @test_extractelement_variable_v64i8_indexi8(<64 x i8> %t1, i8 %index) {
; KNL-LABEL: test_extractelement_variable_v64i8_indexi8:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: pushq %rbp
; KNL-NEXT: .cfi_def_cfa_offset 16
; KNL-NEXT: .cfi_offset %rbp, -16
@@ -1668,7 +1668,7 @@ define i8 @test_extractelement_variable_v64i8_indexi8(<64 x i8> %t1, i8 %index)
; KNL-NEXT: retq
;
; SKX-LABEL: test_extractelement_variable_v64i8_indexi8:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: pushq %rbp
; SKX-NEXT: .cfi_def_cfa_offset 16
; SKX-NEXT: .cfi_offset %rbp, -16
@@ -1694,7 +1694,7 @@ define i8 @test_extractelement_variable_v64i8_indexi8(<64 x i8> %t1, i8 %index)
define zeroext i8 @test_extractelement_varible_v2i1(<2 x i64> %a, <2 x i64> %b, i32 %index) {
; KNL-LABEL: test_extractelement_varible_v2i1:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; KNL-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; KNL-NEXT: vpxor %xmm2, %xmm1, %xmm1
@@ -1707,7 +1707,7 @@ define zeroext i8 @test_extractelement_varible_v2i1(<2 x i64> %a, <2 x i64> %b,
; KNL-NEXT: retq
;
; SKX-LABEL: test_extractelement_varible_v2i1:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; SKX-NEXT: vpcmpnleuq %xmm1, %xmm0, %k0
; SKX-NEXT: vpmovm2q %k0, %xmm0
@@ -1724,7 +1724,7 @@ define zeroext i8 @test_extractelement_varible_v2i1(<2 x i64> %a, <2 x i64> %b,
define zeroext i8 @test_extractelement_varible_v4i1(<4 x i32> %a, <4 x i32> %b, i32 %index) {
; KNL-LABEL: test_extractelement_varible_v4i1:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; KNL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; KNL-NEXT: vpxor %xmm2, %xmm1, %xmm1
@@ -1737,7 +1737,7 @@ define zeroext i8 @test_extractelement_varible_v4i1(<4 x i32> %a, <4 x i32> %b,
; KNL-NEXT: retq
;
; SKX-LABEL: test_extractelement_varible_v4i1:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; SKX-NEXT: vpcmpnleud %xmm1, %xmm0, %k0
; SKX-NEXT: vpmovm2d %k0, %xmm0
@@ -1754,7 +1754,7 @@ define zeroext i8 @test_extractelement_varible_v4i1(<4 x i32> %a, <4 x i32> %b,
define zeroext i8 @test_extractelement_varible_v8i1(<8 x i32> %a, <8 x i32> %b, i32 %index) {
; KNL-LABEL: test_extractelement_varible_v8i1:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: pushq %rbp
; KNL-NEXT: .cfi_def_cfa_offset 16
; KNL-NEXT: .cfi_offset %rbp, -16
@@ -1777,7 +1777,7 @@ define zeroext i8 @test_extractelement_varible_v8i1(<8 x i32> %a, <8 x i32> %b,
; KNL-NEXT: retq
;
; SKX-LABEL: test_extractelement_varible_v8i1:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: pushq %rbp
; SKX-NEXT: .cfi_def_cfa_offset 16
; SKX-NEXT: .cfi_offset %rbp, -16
@@ -1804,7 +1804,7 @@ define zeroext i8 @test_extractelement_varible_v8i1(<8 x i32> %a, <8 x i32> %b,
define zeroext i8 @test_extractelement_varible_v16i1(<16 x i32> %a, <16 x i32> %b, i32 %index) {
; KNL-LABEL: test_extractelement_varible_v16i1:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: pushq %rbp
; KNL-NEXT: .cfi_def_cfa_offset 16
; KNL-NEXT: .cfi_offset %rbp, -16
@@ -1825,7 +1825,7 @@ define zeroext i8 @test_extractelement_varible_v16i1(<16 x i32> %a, <16 x i32> %
; KNL-NEXT: retq
;
; SKX-LABEL: test_extractelement_varible_v16i1:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: pushq %rbp
; SKX-NEXT: .cfi_def_cfa_offset 16
; SKX-NEXT: .cfi_offset %rbp, -16
@@ -1852,7 +1852,7 @@ define zeroext i8 @test_extractelement_varible_v16i1(<16 x i32> %a, <16 x i32> %
define zeroext i8 @test_extractelement_varible_v32i1(<32 x i8> %a, <32 x i8> %b, i32 %index) {
; KNL-LABEL: test_extractelement_varible_v32i1:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: pushq %rbp
; KNL-NEXT: .cfi_def_cfa_offset 16
; KNL-NEXT: .cfi_offset %rbp, -16
@@ -1876,7 +1876,7 @@ define zeroext i8 @test_extractelement_varible_v32i1(<32 x i8> %a, <32 x i8> %b,
; KNL-NEXT: retq
;
; SKX-LABEL: test_extractelement_varible_v32i1:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: pushq %rbp
; SKX-NEXT: .cfi_def_cfa_offset 16
; SKX-NEXT: .cfi_offset %rbp, -16
@@ -1903,7 +1903,7 @@ define zeroext i8 @test_extractelement_varible_v32i1(<32 x i8> %a, <32 x i8> %b,
define <8 x i64> @insert_double_zero(<2 x i64> %a) nounwind {
; CHECK-LABEL: insert_double_zero:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vinsertf32x4 $2, %xmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/avx512-insert-extract_i1.ll b/test/CodeGen/X86/avx512-insert-extract_i1.ll
index ebe99c1ec87..f088626d2f1 100644
--- a/test/CodeGen/X86/avx512-insert-extract_i1.ll
+++ b/test/CodeGen/X86/avx512-insert-extract_i1.ll
@@ -5,7 +5,7 @@
define zeroext i8 @test_extractelement_varible_v64i1(<64 x i8> %a, <64 x i8> %b, i32 %index) {
; SKX-LABEL: test_extractelement_varible_v64i1:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: pushq %rbp
; SKX-NEXT: .cfi_def_cfa_offset 16
; SKX-NEXT: .cfi_offset %rbp, -16
diff --git a/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll b/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll
index 46699a60471..80127f66bdf 100644
--- a/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll
@@ -7,12 +7,12 @@
define <16 x float> @test_mm512_shuffle_f32x4(<16 x float> %__A, <16 x float> %__B) {
; X32-LABEL: test_mm512_shuffle_f32x4:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm1[0,1,0,1]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_shuffle_f32x4:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm1[0,1,0,1]
; X64-NEXT: retq
entry:
@@ -23,13 +23,13 @@ entry:
define <16 x float> @test_mm512_mask_shuffle_f32x4(<16 x float> %__W, i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
; X32-LABEL: test_mm512_mask_shuffle_f32x4:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vshuff32x4 {{.*#+}} zmm0 {%k1} = zmm1[0,1,2,3,4,5,6,7],zmm2[0,1,2,3,0,1,2,3]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_shuffle_f32x4:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vshuff32x4 {{.*#+}} zmm0 {%k1} = zmm1[0,1,2,3,4,5,6,7],zmm2[0,1,2,3,0,1,2,3]
; X64-NEXT: retq
@@ -42,13 +42,13 @@ entry:
define <16 x float> @test_mm512_maskz_shuffle_f32x4(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
; X32-LABEL: test_mm512_maskz_shuffle_f32x4:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vshuff32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3,4,5,6,7],zmm1[0,1,2,3,0,1,2,3]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_maskz_shuffle_f32x4:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vshuff32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3,4,5,6,7],zmm1[0,1,2,3,0,1,2,3]
; X64-NEXT: retq
@@ -61,12 +61,12 @@ entry:
define <8 x double> @test_mm512_shuffle_f64x2(<8 x double> %__A, <8 x double> %__B) {
; X32-LABEL: test_mm512_shuffle_f64x2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm1[0,1,0,1]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_shuffle_f64x2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm1[0,1,0,1]
; X64-NEXT: retq
entry:
@@ -76,14 +76,14 @@ entry:
define <8 x double> @test_mm512_mask_shuffle_f64x2(<8 x double> %__W, i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B) {
; X32-LABEL: test_mm512_mask_shuffle_f64x2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vshuff64x2 {{.*#+}} zmm0 {%k1} = zmm1[0,1,2,3],zmm2[0,1,0,1]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_shuffle_f64x2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vshuff64x2 {{.*#+}} zmm0 {%k1} = zmm1[0,1,2,3],zmm2[0,1,0,1]
; X64-NEXT: retq
@@ -96,14 +96,14 @@ entry:
define <8 x double> @test_mm512_maskz_shuffle_f64x2(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B) {
; X32-LABEL: test_mm512_maskz_shuffle_f64x2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vshuff64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3],zmm1[0,1,0,1]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_maskz_shuffle_f64x2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vshuff64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3],zmm1[0,1,0,1]
; X64-NEXT: retq
@@ -116,12 +116,12 @@ entry:
define <8 x i64> @test_mm512_shuffle_i32x4(<8 x i64> %__A, <8 x i64> %__B) local_unnamed_addr #0 {
; X32-LABEL: test_mm512_shuffle_i32x4:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm1[0,1,0,1]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_shuffle_i32x4:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm1[0,1,0,1]
; X64-NEXT: retq
entry:
@@ -131,13 +131,13 @@ entry:
define <8 x i64> @test_mm512_mask_shuffle_i32x4(<8 x i64> %__W, i16 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) local_unnamed_addr #0 {
; X32-LABEL: test_mm512_mask_shuffle_i32x4:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vshufi32x4 {{.*#+}} zmm0 {%k1} = zmm1[0,1,2,3,4,5,6,7],zmm2[0,1,2,3,0,1,2,3]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_shuffle_i32x4:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vshufi32x4 {{.*#+}} zmm0 {%k1} = zmm1[0,1,2,3,4,5,6,7],zmm2[0,1,2,3,0,1,2,3]
; X64-NEXT: retq
@@ -153,13 +153,13 @@ entry:
define <8 x i64> @test_mm512_maskz_shuffle_i32x4(i16 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) local_unnamed_addr #0 {
; X32-LABEL: test_mm512_maskz_shuffle_i32x4:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vshufi32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3,4,5,6,7],zmm1[0,1,2,3,0,1,2,3]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_maskz_shuffle_i32x4:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vshufi32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3,4,5,6,7],zmm1[0,1,2,3,0,1,2,3]
; X64-NEXT: retq
@@ -174,12 +174,12 @@ entry:
define <8 x i64> @test_mm512_shuffle_i64x2(<8 x i64> %__A, <8 x i64> %__B) local_unnamed_addr #0 {
; X32-LABEL: test_mm512_shuffle_i64x2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm1[0,1,0,1]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_shuffle_i64x2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm1[0,1,0,1]
; X64-NEXT: retq
entry:
@@ -189,14 +189,14 @@ entry:
define <8 x i64> @test_mm512_mask_shuffle_i64x2(<8 x i64> %__W, i8 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) local_unnamed_addr #0 {
; X32-LABEL: test_mm512_mask_shuffle_i64x2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vshufi64x2 {{.*#+}} zmm0 {%k1} = zmm1[0,1,2,3],zmm2[0,1,0,1]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_shuffle_i64x2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vshufi64x2 {{.*#+}} zmm0 {%k1} = zmm1[0,1,2,3],zmm2[0,1,0,1]
; X64-NEXT: retq
@@ -209,14 +209,14 @@ entry:
define <8 x i64> @test_mm512_maskz_shuffle_i64x2(i8 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) local_unnamed_addr #0 {
; X32-LABEL: test_mm512_maskz_shuffle_i64x2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vshufi64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3],zmm1[0,1,0,1]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_maskz_shuffle_i64x2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vshufi64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3],zmm1[0,1,0,1]
; X64-NEXT: retq
@@ -230,7 +230,7 @@ entry:
define zeroext i16 @test_mm512_testn_epi32_mask(<8 x i64> %__A, <8 x i64> %__B) {
; X32-LABEL: test_mm512_testn_epi32_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vptestnmd %zmm0, %zmm1, %k0
; X32-NEXT: kmovw %k0, %eax
; X32-NEXT: movzwl %ax, %eax
@@ -238,7 +238,7 @@ define zeroext i16 @test_mm512_testn_epi32_mask(<8 x i64> %__A, <8 x i64> %__B)
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_testn_epi32_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vptestnmd %zmm0, %zmm1, %k0
; X64-NEXT: kmovw %k0, %eax
; X64-NEXT: movzwl %ax, %eax
@@ -254,7 +254,7 @@ entry:
define zeroext i16 @test_mm512_mask_testn_epi32_mask(i16 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) {
; X32-LABEL: test_mm512_mask_testn_epi32_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vptestnmd %zmm0, %zmm1, %k0 {%k1}
; X32-NEXT: kmovw %k0, %eax
@@ -263,7 +263,7 @@ define zeroext i16 @test_mm512_mask_testn_epi32_mask(i16 zeroext %__U, <8 x i64>
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_testn_epi32_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vptestnmd %zmm0, %zmm1, %k0 {%k1}
; X64-NEXT: kmovw %k0, %eax
@@ -282,7 +282,7 @@ entry:
define zeroext i8 @test_mm512_testn_epi64_mask(<8 x i64> %__A, <8 x i64> %__B) {
; X32-LABEL: test_mm512_testn_epi64_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vptestnmq %zmm0, %zmm1, %k0
; X32-NEXT: kmovw %k0, %eax
; X32-NEXT: movzbl %al, %eax
@@ -290,7 +290,7 @@ define zeroext i8 @test_mm512_testn_epi64_mask(<8 x i64> %__A, <8 x i64> %__B) {
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_testn_epi64_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vptestnmq %zmm0, %zmm1, %k0
; X64-NEXT: kmovw %k0, %eax
; X64-NEXT: movzbl %al, %eax
@@ -305,7 +305,7 @@ entry:
define zeroext i8 @test_mm512_mask_testn_epi64_mask(i8 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) {
; X32-LABEL: test_mm512_mask_testn_epi64_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vptestnmq %zmm0, %zmm1, %k0 {%k1}
@@ -315,7 +315,7 @@ define zeroext i8 @test_mm512_mask_testn_epi64_mask(i8 zeroext %__U, <8 x i64> %
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_testn_epi64_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vptestnmq %zmm0, %zmm1, %k0 {%k1}
; X64-NEXT: kmovw %k0, %eax
@@ -333,7 +333,7 @@ entry:
define zeroext i16 @test_mm512_mask_test_epi32_mask(i16 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) {
; X32-LABEL: test_mm512_mask_test_epi32_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vptestmd %zmm0, %zmm1, %k0 {%k1}
; X32-NEXT: kmovw %k0, %eax
@@ -342,7 +342,7 @@ define zeroext i16 @test_mm512_mask_test_epi32_mask(i16 zeroext %__U, <8 x i64>
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_test_epi32_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vptestmd %zmm0, %zmm1, %k0 {%k1}
; X64-NEXT: kmovw %k0, %eax
@@ -361,7 +361,7 @@ entry:
define zeroext i8 @test_mm512_mask_test_epi64_mask(i8 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) {
; X32-LABEL: test_mm512_mask_test_epi64_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vptestmq %zmm0, %zmm1, %k0 {%k1}
@@ -371,7 +371,7 @@ define zeroext i8 @test_mm512_mask_test_epi64_mask(i8 zeroext %__U, <8 x i64> %_
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_test_epi64_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vptestmq %zmm0, %zmm1, %k0 {%k1}
; X64-NEXT: kmovw %k0, %eax
@@ -389,14 +389,14 @@ entry:
define <8 x i64> @test_mm512_mask_set1_epi32(<8 x i64> %__O, i16 zeroext %__M, i32 %__A) {
; X32-LABEL: test_mm512_mask_set1_epi32:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vpbroadcastd %eax, %zmm0 {%k1}
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_set1_epi32:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vpbroadcastd %esi, %zmm0 {%k1}
; X64-NEXT: retq
@@ -412,14 +412,14 @@ entry:
define <8 x i64> @test_mm512_maskz_set1_epi32(i16 zeroext %__M, i32 %__A) {
; X32-LABEL: test_mm512_maskz_set1_epi32:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vpbroadcastd %eax, %zmm0 {%k1} {z}
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_maskz_set1_epi32:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vpbroadcastd %esi, %zmm0 {%k1} {z}
; X64-NEXT: retq
@@ -434,7 +434,7 @@ entry:
define <8 x i64> @test_mm512_mask_set1_epi64(<8 x i64> %__O, i8 zeroext %__M, i64 %__A) {
; X32-LABEL: test_mm512_mask_set1_epi64:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
@@ -448,7 +448,7 @@ define <8 x i64> @test_mm512_mask_set1_epi64(<8 x i64> %__O, i8 zeroext %__M, i6
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_set1_epi64:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vpbroadcastq %rsi, %zmm0 {%k1}
; X64-NEXT: retq
@@ -462,7 +462,7 @@ entry:
define <8 x i64> @test_mm512_maskz_set1_epi64(i8 zeroext %__M, i64 %__A) {
; X32-LABEL: test_mm512_maskz_set1_epi64:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
@@ -476,7 +476,7 @@ define <8 x i64> @test_mm512_maskz_set1_epi64(i8 zeroext %__M, i64 %__A) {
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_maskz_set1_epi64:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vpbroadcastq %rsi, %zmm0 {%k1} {z}
; X64-NEXT: retq
@@ -491,12 +491,12 @@ entry:
define <8 x i64> @test_mm512_broadcastd_epi32(<2 x i64> %a0) {
; X32-LABEL: test_mm512_broadcastd_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vbroadcastss %xmm0, %zmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_broadcastd_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcastss %xmm0, %zmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -507,13 +507,13 @@ define <8 x i64> @test_mm512_broadcastd_epi32(<2 x i64> %a0) {
define <8 x i64> @test_mm512_mask_broadcastd_epi32(<8 x i64> %a0, i16 %a1, <2 x i64> %a2) {
; X32-LABEL: test_mm512_mask_broadcastd_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vpbroadcastd %xmm1, %zmm0 {%k1}
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_broadcastd_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vpbroadcastd %xmm1, %zmm0 {%k1}
; X64-NEXT: retq
@@ -528,13 +528,13 @@ define <8 x i64> @test_mm512_mask_broadcastd_epi32(<8 x i64> %a0, i16 %a1, <2 x
define <8 x i64> @test_mm512_maskz_broadcastd_epi32(i16 %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm512_maskz_broadcastd_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vpbroadcastd %xmm0, %zmm0 {%k1} {z}
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_maskz_broadcastd_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vpbroadcastd %xmm0, %zmm0 {%k1} {z}
; X64-NEXT: retq
@@ -548,12 +548,12 @@ define <8 x i64> @test_mm512_maskz_broadcastd_epi32(i16 %a0, <2 x i64> %a1) {
define <8 x i64> @test_mm512_broadcastq_epi64(<2 x i64> %a0) {
; X32-LABEL: test_mm512_broadcastq_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vbroadcastsd %xmm0, %zmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_broadcastq_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcastsd %xmm0, %zmm0
; X64-NEXT: retq
%res = shufflevector <2 x i64> %a0, <2 x i64> undef, <8 x i32> zeroinitializer
@@ -562,14 +562,14 @@ define <8 x i64> @test_mm512_broadcastq_epi64(<2 x i64> %a0) {
define <8 x i64> @test_mm512_mask_broadcastq_epi64(<8 x i64> %a0, i8 %a1, <2 x i64> %a2) {
; X32-LABEL: test_mm512_mask_broadcastq_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vpbroadcastq %xmm1, %zmm0 {%k1}
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_broadcastq_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vpbroadcastq %xmm1, %zmm0 {%k1}
; X64-NEXT: retq
@@ -581,14 +581,14 @@ define <8 x i64> @test_mm512_mask_broadcastq_epi64(<8 x i64> %a0, i8 %a1, <2 x i
define <8 x i64> @test_mm512_maskz_broadcastq_epi64(i8 %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm512_maskz_broadcastq_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vpbroadcastq %xmm0, %zmm0 {%k1} {z}
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_maskz_broadcastq_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vpbroadcastq %xmm0, %zmm0 {%k1} {z}
; X64-NEXT: retq
@@ -600,12 +600,12 @@ define <8 x i64> @test_mm512_maskz_broadcastq_epi64(i8 %a0, <2 x i64> %a1) {
define <8 x double> @test_mm512_broadcastsd_pd(<2 x double> %a0) {
; X32-LABEL: test_mm512_broadcastsd_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vbroadcastsd %xmm0, %zmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_broadcastsd_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcastsd %xmm0, %zmm0
; X64-NEXT: retq
%res = shufflevector <2 x double> %a0, <2 x double> undef, <8 x i32> zeroinitializer
@@ -614,14 +614,14 @@ define <8 x double> @test_mm512_broadcastsd_pd(<2 x double> %a0) {
define <8 x double> @test_mm512_mask_broadcastsd_pd(<8 x double> %a0, i8 %a1, <2 x double> %a2) {
; X32-LABEL: test_mm512_mask_broadcastsd_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vbroadcastsd %xmm1, %zmm0 {%k1}
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_broadcastsd_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vbroadcastsd %xmm1, %zmm0 {%k1}
; X64-NEXT: retq
@@ -633,14 +633,14 @@ define <8 x double> @test_mm512_mask_broadcastsd_pd(<8 x double> %a0, i8 %a1, <2
define <8 x double> @test_mm512_maskz_broadcastsd_pd(i8 %a0, <2 x double> %a1) {
; X32-LABEL: test_mm512_maskz_broadcastsd_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vbroadcastsd %xmm0, %zmm0 {%k1} {z}
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_maskz_broadcastsd_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vbroadcastsd %xmm0, %zmm0 {%k1} {z}
; X64-NEXT: retq
@@ -652,12 +652,12 @@ define <8 x double> @test_mm512_maskz_broadcastsd_pd(i8 %a0, <2 x double> %a1) {
define <16 x float> @test_mm512_broadcastss_ps(<4 x float> %a0) {
; X32-LABEL: test_mm512_broadcastss_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vbroadcastss %xmm0, %zmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_broadcastss_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcastss %xmm0, %zmm0
; X64-NEXT: retq
%res = shufflevector <4 x float> %a0, <4 x float> undef, <16 x i32> zeroinitializer
@@ -666,13 +666,13 @@ define <16 x float> @test_mm512_broadcastss_ps(<4 x float> %a0) {
define <16 x float> @test_mm512_mask_broadcastss_ps(<16 x float> %a0, i16 %a1, <4 x float> %a2) {
; X32-LABEL: test_mm512_mask_broadcastss_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vbroadcastss %xmm1, %zmm0 {%k1}
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_broadcastss_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vbroadcastss %xmm1, %zmm0 {%k1}
; X64-NEXT: retq
@@ -684,13 +684,13 @@ define <16 x float> @test_mm512_mask_broadcastss_ps(<16 x float> %a0, i16 %a1, <
define <16 x float> @test_mm512_maskz_broadcastss_ps(i16 %a0, <4 x float> %a1) {
; X32-LABEL: test_mm512_maskz_broadcastss_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vbroadcastss %xmm0, %zmm0 {%k1} {z}
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_maskz_broadcastss_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vbroadcastss %xmm0, %zmm0 {%k1} {z}
; X64-NEXT: retq
@@ -702,12 +702,12 @@ define <16 x float> @test_mm512_maskz_broadcastss_ps(i16 %a0, <4 x float> %a1) {
define <8 x double> @test_mm512_movddup_pd(<8 x double> %a0) {
; X32-LABEL: test_mm512_movddup_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovddup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_movddup_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovddup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6]
; X64-NEXT: retq
%res = shufflevector <8 x double> %a0, <8 x double> undef, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
@@ -716,14 +716,14 @@ define <8 x double> @test_mm512_movddup_pd(<8 x double> %a0) {
define <8 x double> @test_mm512_mask_movddup_pd(<8 x double> %a0, i8 %a1, <8 x double> %a2) {
; X32-LABEL: test_mm512_mask_movddup_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vmovddup {{.*#+}} zmm0 {%k1} = zmm1[0,0,2,2,4,4,6,6]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_movddup_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vmovddup {{.*#+}} zmm0 {%k1} = zmm1[0,0,2,2,4,4,6,6]
; X64-NEXT: retq
@@ -735,14 +735,14 @@ define <8 x double> @test_mm512_mask_movddup_pd(<8 x double> %a0, i8 %a1, <8 x d
define <8 x double> @test_mm512_maskz_movddup_pd(i8 %a0, <8 x double> %a1) {
; X32-LABEL: test_mm512_maskz_movddup_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vmovddup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_maskz_movddup_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vmovddup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6]
; X64-NEXT: retq
@@ -754,12 +754,12 @@ define <8 x double> @test_mm512_maskz_movddup_pd(i8 %a0, <8 x double> %a1) {
define <16 x float> @test_mm512_movehdup_ps(<16 x float> %a0) {
; X32-LABEL: test_mm512_movehdup_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovshdup {{.*#+}} zmm0 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_movehdup_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovshdup {{.*#+}} zmm0 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
; X64-NEXT: retq
%res = shufflevector <16 x float> %a0, <16 x float> undef, <16 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7, i32 9, i32 9, i32 11, i32 11, i32 13, i32 13, i32 15, i32 15>
@@ -768,13 +768,13 @@ define <16 x float> @test_mm512_movehdup_ps(<16 x float> %a0) {
define <16 x float> @test_mm512_mask_movehdup_ps(<16 x float> %a0, i16 %a1, <16 x float> %a2) {
; X32-LABEL: test_mm512_mask_movehdup_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vmovshdup {{.*#+}} zmm0 {%k1} = zmm1[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_movehdup_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vmovshdup {{.*#+}} zmm0 {%k1} = zmm1[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
; X64-NEXT: retq
@@ -786,13 +786,13 @@ define <16 x float> @test_mm512_mask_movehdup_ps(<16 x float> %a0, i16 %a1, <16
define <16 x float> @test_mm512_maskz_movehdup_ps(i16 %a0, <16 x float> %a1) {
; X32-LABEL: test_mm512_maskz_movehdup_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vmovshdup {{.*#+}} zmm0 {%k1} {z} = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_maskz_movehdup_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vmovshdup {{.*#+}} zmm0 {%k1} {z} = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
; X64-NEXT: retq
@@ -804,12 +804,12 @@ define <16 x float> @test_mm512_maskz_movehdup_ps(i16 %a0, <16 x float> %a1) {
define <16 x float> @test_mm512_moveldup_ps(<16 x float> %a0) {
; X32-LABEL: test_mm512_moveldup_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovsldup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_moveldup_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovsldup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
; X64-NEXT: retq
%res = shufflevector <16 x float> %a0, <16 x float> undef, <16 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6, i32 8, i32 8, i32 10, i32 10, i32 12, i32 12, i32 14, i32 14>
@@ -818,13 +818,13 @@ define <16 x float> @test_mm512_moveldup_ps(<16 x float> %a0) {
define <16 x float> @test_mm512_mask_moveldup_ps(<16 x float> %a0, i16 %a1, <16 x float> %a2) {
; X32-LABEL: test_mm512_mask_moveldup_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} = zmm1[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_moveldup_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} = zmm1[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
; X64-NEXT: retq
@@ -836,13 +836,13 @@ define <16 x float> @test_mm512_mask_moveldup_ps(<16 x float> %a0, i16 %a1, <16
define <16 x float> @test_mm512_maskz_moveldup_ps(i16 %a0, <16 x float> %a1) {
; X32-LABEL: test_mm512_maskz_moveldup_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_maskz_moveldup_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
; X64-NEXT: retq
@@ -854,12 +854,12 @@ define <16 x float> @test_mm512_maskz_moveldup_ps(i16 %a0, <16 x float> %a1) {
define <8 x double> @test_mm512_permute_pd(<8 x double> %a0) {
; X32-LABEL: test_mm512_permute_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpermilpd {{.*#+}} zmm0 = zmm0[0,1,2,2,4,4,6,6]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_permute_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermilpd {{.*#+}} zmm0 = zmm0[0,1,2,2,4,4,6,6]
; X64-NEXT: retq
%res = shufflevector <8 x double> %a0, <8 x double> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
@@ -868,14 +868,14 @@ define <8 x double> @test_mm512_permute_pd(<8 x double> %a0) {
define <8 x double> @test_mm512_mask_permute_pd(<8 x double> %a0, i8 %a1, <8 x double> %a2) {
; X32-LABEL: test_mm512_mask_permute_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vpermilpd {{.*#+}} zmm0 {%k1} = zmm1[0,1,2,2,4,4,6,6]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_permute_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vpermilpd {{.*#+}} zmm0 {%k1} = zmm1[0,1,2,2,4,4,6,6]
; X64-NEXT: retq
@@ -887,14 +887,14 @@ define <8 x double> @test_mm512_mask_permute_pd(<8 x double> %a0, i8 %a1, <8 x d
define <8 x double> @test_mm512_maskz_permute_pd(i8 %a0, <8 x double> %a1) {
; X32-LABEL: test_mm512_maskz_permute_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vpermilpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,2,4,4,6,6]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_maskz_permute_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vpermilpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,2,4,4,6,6]
; X64-NEXT: retq
@@ -906,12 +906,12 @@ define <8 x double> @test_mm512_maskz_permute_pd(i8 %a0, <8 x double> %a1) {
define <16 x float> @test_mm512_permute_ps(<16 x float> %a0) {
; X32-LABEL: test_mm512_permute_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpermilps {{.*#+}} zmm0 = zmm0[2,0,0,0,6,4,4,4,10,8,8,8,14,12,12,12]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_permute_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermilps {{.*#+}} zmm0 = zmm0[2,0,0,0,6,4,4,4,10,8,8,8,14,12,12,12]
; X64-NEXT: retq
%res = shufflevector <16 x float> %a0, <16 x float> undef, <16 x i32> <i32 2, i32 0, i32 0, i32 0, i32 6, i32 4, i32 4, i32 4, i32 10, i32 8, i32 8, i32 8, i32 14, i32 12, i32 12, i32 12>
@@ -920,13 +920,13 @@ define <16 x float> @test_mm512_permute_ps(<16 x float> %a0) {
define <16 x float> @test_mm512_mask_permute_ps(<16 x float> %a0, i16 %a1, <16 x float> %a2) {
; X32-LABEL: test_mm512_mask_permute_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vpermilps {{.*#+}} zmm0 {%k1} = zmm1[2,0,0,0,6,4,4,4,10,8,8,8,14,12,12,12]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_permute_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vpermilps {{.*#+}} zmm0 {%k1} = zmm1[2,0,0,0,6,4,4,4,10,8,8,8,14,12,12,12]
; X64-NEXT: retq
@@ -938,13 +938,13 @@ define <16 x float> @test_mm512_mask_permute_ps(<16 x float> %a0, i16 %a1, <16 x
define <16 x float> @test_mm512_maskz_permute_ps(i16 %a0, <16 x float> %a1) {
; X32-LABEL: test_mm512_maskz_permute_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vpermilps {{.*#+}} zmm0 {%k1} {z} = zmm0[2,0,0,0,6,4,4,4,10,8,8,8,14,12,12,12]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_maskz_permute_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vpermilps {{.*#+}} zmm0 {%k1} {z} = zmm0[2,0,0,0,6,4,4,4,10,8,8,8,14,12,12,12]
; X64-NEXT: retq
@@ -956,12 +956,12 @@ define <16 x float> @test_mm512_maskz_permute_ps(i16 %a0, <16 x float> %a1) {
define <8 x i64> @test_mm512_permutex_epi64(<8 x i64> %a0) {
; X32-LABEL: test_mm512_permutex_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[0,0,0,0,4,4,4,4]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_permutex_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[0,0,0,0,4,4,4,4]
; X64-NEXT: retq
%res = shufflevector <8 x i64> %a0, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4>
@@ -970,14 +970,14 @@ define <8 x i64> @test_mm512_permutex_epi64(<8 x i64> %a0) {
define <8 x i64> @test_mm512_mask_permutex_epi64(<8 x i64> %a0, i8 %a1, <8 x i64> %a2) {
; X32-LABEL: test_mm512_mask_permutex_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vpermq {{.*#+}} zmm0 {%k1} = zmm1[0,0,0,0,4,4,4,4]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_permutex_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vpermq {{.*#+}} zmm0 {%k1} = zmm1[0,0,0,0,4,4,4,4]
; X64-NEXT: retq
@@ -989,14 +989,14 @@ define <8 x i64> @test_mm512_mask_permutex_epi64(<8 x i64> %a0, i8 %a1, <8 x i64
define <8 x i64> @test_mm512_maskz_permutex_epi64(i8 %a0, <8 x i64> %a1) {
; X32-LABEL: test_mm512_maskz_permutex_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vpermq {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,0,0,4,4,4,4]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_maskz_permutex_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vpermq {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,0,0,4,4,4,4]
; X64-NEXT: retq
@@ -1008,12 +1008,12 @@ define <8 x i64> @test_mm512_maskz_permutex_epi64(i8 %a0, <8 x i64> %a1) {
define <8 x double> @test_mm512_permutex_pd(<8 x double> %a0) {
; X32-LABEL: test_mm512_permutex_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[0,0,0,0,4,4,4,4]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_permutex_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[0,0,0,0,4,4,4,4]
; X64-NEXT: retq
%res = shufflevector <8 x double> %a0, <8 x double> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4>
@@ -1022,14 +1022,14 @@ define <8 x double> @test_mm512_permutex_pd(<8 x double> %a0) {
define <8 x double> @test_mm512_mask_permutex_pd(<8 x double> %a0, i8 %a1, <8 x double> %a2) {
; X32-LABEL: test_mm512_mask_permutex_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vpermpd {{.*#+}} zmm0 {%k1} = zmm1[0,0,0,0,4,4,4,4]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_permutex_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vpermpd {{.*#+}} zmm0 {%k1} = zmm1[0,0,0,0,4,4,4,4]
; X64-NEXT: retq
@@ -1041,14 +1041,14 @@ define <8 x double> @test_mm512_mask_permutex_pd(<8 x double> %a0, i8 %a1, <8 x
define <8 x double> @test_mm512_maskz_permutex_pd(i8 %a0, <8 x double> %a1) {
; X32-LABEL: test_mm512_maskz_permutex_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vpermpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,0,0,4,4,4,4]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_maskz_permutex_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vpermpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,0,0,4,4,4,4]
; X64-NEXT: retq
@@ -1060,12 +1060,12 @@ define <8 x double> @test_mm512_maskz_permutex_pd(i8 %a0, <8 x double> %a1) {
define <8 x i64> @test_mm512_shuffle_epi32(<8 x i64> %a0) {
; X32-LABEL: test_mm512_shuffle_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpermilps {{.*#+}} zmm0 = zmm0[1,0,0,0,5,4,4,4,9,8,8,8,13,12,12,12]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_shuffle_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermilps {{.*#+}} zmm0 = zmm0[1,0,0,0,5,4,4,4,9,8,8,8,13,12,12,12]
; X64-NEXT: retq
%arg0 = bitcast <8 x i64> %a0 to <16 x i32>
@@ -1076,13 +1076,13 @@ define <8 x i64> @test_mm512_shuffle_epi32(<8 x i64> %a0) {
define <8 x i64> @test_mm512_mask_shuffle_epi32(<8 x i64> %a0, i16 %a1, <8 x i64> %a2) {
; X32-LABEL: test_mm512_mask_shuffle_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vpshufd {{.*#+}} zmm0 {%k1} = zmm1[1,0,0,0,5,4,4,4,9,8,8,8,13,12,12,12]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_shuffle_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vpshufd {{.*#+}} zmm0 {%k1} = zmm1[1,0,0,0,5,4,4,4,9,8,8,8,13,12,12,12]
; X64-NEXT: retq
@@ -1097,13 +1097,13 @@ define <8 x i64> @test_mm512_mask_shuffle_epi32(<8 x i64> %a0, i16 %a1, <8 x i64
define <8 x i64> @test_mm512_maskz_shuffle_epi32(i16 %a0, <8 x i64> %a1) {
; X32-LABEL: test_mm512_maskz_shuffle_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vpshufd {{.*#+}} zmm0 {%k1} {z} = zmm0[1,0,0,0,5,4,4,4,9,8,8,8,13,12,12,12]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_maskz_shuffle_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vpshufd {{.*#+}} zmm0 {%k1} {z} = zmm0[1,0,0,0,5,4,4,4,9,8,8,8,13,12,12,12]
; X64-NEXT: retq
@@ -1117,12 +1117,12 @@ define <8 x i64> @test_mm512_maskz_shuffle_epi32(i16 %a0, <8 x i64> %a1) {
define <8 x double> @test_mm512_shuffle_pd(<8 x double> %a0, <8 x double> %a1) {
; X32-LABEL: test_mm512_shuffle_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vshufpd {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[3],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_shuffle_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vshufpd {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[3],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; X64-NEXT: retq
%res = shufflevector <8 x double> %a0, <8 x double> %a1, <8 x i32> <i32 0, i32 8, i32 3, i32 10, i32 4, i32 12, i32 6, i32 14>
@@ -1131,14 +1131,14 @@ define <8 x double> @test_mm512_shuffle_pd(<8 x double> %a0, <8 x double> %a1) {
define <8 x double> @test_mm512_mask_shuffle_pd(<8 x double> %a0, i8 %a1, <8 x double> %a2, <8 x double> %a3) {
; X32-LABEL: test_mm512_mask_shuffle_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vshufpd {{.*#+}} zmm0 {%k1} = zmm1[0],zmm2[0],zmm1[3],zmm2[2],zmm1[4],zmm2[4],zmm1[6],zmm2[6]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_shuffle_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vshufpd {{.*#+}} zmm0 {%k1} = zmm1[0],zmm2[0],zmm1[3],zmm2[2],zmm1[4],zmm2[4],zmm1[6],zmm2[6]
; X64-NEXT: retq
@@ -1150,14 +1150,14 @@ define <8 x double> @test_mm512_mask_shuffle_pd(<8 x double> %a0, i8 %a1, <8 x d
define <8 x double> @test_mm512_maskz_shuffle_pd(i8 %a0, <8 x double> %a1, <8 x double> %a2) {
; X32-LABEL: test_mm512_maskz_shuffle_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vshufpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[3],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_maskz_shuffle_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vshufpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[3],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; X64-NEXT: retq
@@ -1169,12 +1169,12 @@ define <8 x double> @test_mm512_maskz_shuffle_pd(i8 %a0, <8 x double> %a1, <8 x
define <8 x i64> @test_mm512_unpackhi_epi32(<8 x i64> %a0, <8 x i64> %a1) {
; X32-LABEL: test_mm512_unpackhi_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vunpckhps {{.*#+}} zmm0 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_unpackhi_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vunpckhps {{.*#+}} zmm0 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
; X64-NEXT: retq
%arg0 = bitcast <8 x i64> %a0 to <16 x i32>
@@ -1186,13 +1186,13 @@ define <8 x i64> @test_mm512_unpackhi_epi32(<8 x i64> %a0, <8 x i64> %a1) {
define <8 x i64> @test_mm512_mask_unpackhi_epi32(<8 x i64> %a0, i16 %a1, <8 x i64> %a2, <8 x i64> %a3) {
; X32-LABEL: test_mm512_mask_unpackhi_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vpunpckhdq {{.*#+}} zmm0 {%k1} = zmm1[2],zmm2[2],zmm1[3],zmm2[3],zmm1[6],zmm2[6],zmm1[7],zmm2[7],zmm1[10],zmm2[10],zmm1[11],zmm2[11],zmm1[14],zmm2[14],zmm1[15],zmm2[15]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_unpackhi_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vpunpckhdq {{.*#+}} zmm0 {%k1} = zmm1[2],zmm2[2],zmm1[3],zmm2[3],zmm1[6],zmm2[6],zmm1[7],zmm2[7],zmm1[10],zmm2[10],zmm1[11],zmm2[11],zmm1[14],zmm2[14],zmm1[15],zmm2[15]
; X64-NEXT: retq
@@ -1208,13 +1208,13 @@ define <8 x i64> @test_mm512_mask_unpackhi_epi32(<8 x i64> %a0, i16 %a1, <8 x i6
define <8 x i64> @test_mm512_maskz_unpackhi_epi32(i16 %a0, <8 x i64> %a1, <8 x i64> %a2) {
; X32-LABEL: test_mm512_maskz_unpackhi_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vpunpckhdq {{.*#+}} zmm0 {%k1} {z} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_maskz_unpackhi_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vpunpckhdq {{.*#+}} zmm0 {%k1} {z} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
; X64-NEXT: retq
@@ -1229,12 +1229,12 @@ define <8 x i64> @test_mm512_maskz_unpackhi_epi32(i16 %a0, <8 x i64> %a1, <8 x i
define <8 x i64> @test_mm512_unpackhi_epi64(<8 x i64> %a0, <8 x i64> %a1) {
; X32-LABEL: test_mm512_unpackhi_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_unpackhi_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; X64-NEXT: retq
%res = shufflevector <8 x i64> %a0, <8 x i64> %a1, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
@@ -1243,14 +1243,14 @@ define <8 x i64> @test_mm512_unpackhi_epi64(<8 x i64> %a0, <8 x i64> %a1) {
define <8 x i64> @test_mm512_mask_unpackhi_epi64(<8 x i64> %a0, i8 %a1, <8 x i64> %a2, <8 x i64> %a3) {
; X32-LABEL: test_mm512_mask_unpackhi_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vpunpckhqdq {{.*#+}} zmm0 = zmm1[1],zmm2[1],zmm1[3],zmm2[3],zmm1[5],zmm2[5],zmm1[7],zmm2[7]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_unpackhi_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vpunpckhqdq {{.*#+}} zmm0 = zmm1[1],zmm2[1],zmm1[3],zmm2[3],zmm1[5],zmm2[5],zmm1[7],zmm2[7]
; X64-NEXT: retq
@@ -1262,14 +1262,14 @@ define <8 x i64> @test_mm512_mask_unpackhi_epi64(<8 x i64> %a0, i8 %a1, <8 x i64
define <8 x i64> @test_mm512_maskz_unpackhi_epi64(i8 %a0, <8 x i64> %a1, <8 x i64> %a2) {
; X32-LABEL: test_mm512_maskz_unpackhi_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vpunpckhqdq {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_maskz_unpackhi_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vpunpckhqdq {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; X64-NEXT: retq
@@ -1281,12 +1281,12 @@ define <8 x i64> @test_mm512_maskz_unpackhi_epi64(i8 %a0, <8 x i64> %a1, <8 x i6
define <8 x double> @test_mm512_unpackhi_pd(<8 x double> %a0, <8 x double> %a1) {
; X32-LABEL: test_mm512_unpackhi_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_unpackhi_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; X64-NEXT: retq
%res = shufflevector <8 x double> %a0, <8 x double> %a1, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
@@ -1295,14 +1295,14 @@ define <8 x double> @test_mm512_unpackhi_pd(<8 x double> %a0, <8 x double> %a1)
define <8 x double> @test_mm512_mask_unpackhi_pd(<8 x double> %a0, i8 %a1, <8 x double> %a2, <8 x double> %a3) {
; X32-LABEL: test_mm512_mask_unpackhi_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vunpckhpd {{.*#+}} zmm0 {%k1} = zmm1[1],zmm2[1],zmm1[3],zmm2[3],zmm1[5],zmm2[5],zmm1[7],zmm2[7]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_unpackhi_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vunpckhpd {{.*#+}} zmm0 {%k1} = zmm1[1],zmm2[1],zmm1[3],zmm2[3],zmm1[5],zmm2[5],zmm1[7],zmm2[7]
; X64-NEXT: retq
@@ -1314,14 +1314,14 @@ define <8 x double> @test_mm512_mask_unpackhi_pd(<8 x double> %a0, i8 %a1, <8 x
define <8 x double> @test_mm512_maskz_unpackhi_pd(i8 %a0, <8 x double> %a1, <8 x double> %a2) {
; X32-LABEL: test_mm512_maskz_unpackhi_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vunpckhpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_maskz_unpackhi_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vunpckhpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; X64-NEXT: retq
@@ -1333,12 +1333,12 @@ define <8 x double> @test_mm512_maskz_unpackhi_pd(i8 %a0, <8 x double> %a1, <8 x
define <16 x float> @test_mm512_unpackhi_ps(<16 x float> %a0, <16 x float> %a1) {
; X32-LABEL: test_mm512_unpackhi_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vunpckhps {{.*#+}} zmm0 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_unpackhi_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vunpckhps {{.*#+}} zmm0 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
; X64-NEXT: retq
%res = shufflevector <16 x float> %a0, <16 x float> %a1, <16 x i32> <i32 2, i32 18, i32 3, i32 19, i32 6, i32 22, i32 7, i32 23, i32 10, i32 26, i32 11, i32 27, i32 14, i32 30, i32 15, i32 31>
@@ -1347,13 +1347,13 @@ define <16 x float> @test_mm512_unpackhi_ps(<16 x float> %a0, <16 x float> %a1)
define <16 x float> @test_mm512_mask_unpackhi_ps(<16 x float> %a0, i16 %a1, <16 x float> %a2, <16 x float> %a3) {
; X32-LABEL: test_mm512_mask_unpackhi_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vunpckhps {{.*#+}} zmm0 {%k1} = zmm1[2],zmm2[2],zmm1[3],zmm2[3],zmm1[6],zmm2[6],zmm1[7],zmm2[7],zmm1[10],zmm2[10],zmm1[11],zmm2[11],zmm1[14],zmm2[14],zmm1[15],zmm2[15]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_unpackhi_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vunpckhps {{.*#+}} zmm0 {%k1} = zmm1[2],zmm2[2],zmm1[3],zmm2[3],zmm1[6],zmm2[6],zmm1[7],zmm2[7],zmm1[10],zmm2[10],zmm1[11],zmm2[11],zmm1[14],zmm2[14],zmm1[15],zmm2[15]
; X64-NEXT: retq
@@ -1365,13 +1365,13 @@ define <16 x float> @test_mm512_mask_unpackhi_ps(<16 x float> %a0, i16 %a1, <16
define <16 x float> @test_mm512_maskz_unpackhi_ps(i16 %a0, <16 x float> %a1, <16 x float> %a2) {
; X32-LABEL: test_mm512_maskz_unpackhi_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vunpckhps {{.*#+}} zmm0 {%k1} {z} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_maskz_unpackhi_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vunpckhps {{.*#+}} zmm0 {%k1} {z} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
; X64-NEXT: retq
@@ -1383,12 +1383,12 @@ define <16 x float> @test_mm512_maskz_unpackhi_ps(i16 %a0, <16 x float> %a1, <16
define <8 x i64> @test_mm512_unpacklo_epi32(<8 x i64> %a0, <8 x i64> %a1) {
; X32-LABEL: test_mm512_unpacklo_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vunpcklps {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_unpacklo_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vunpcklps {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
; X64-NEXT: retq
%arg0 = bitcast <8 x i64> %a0 to <16 x i32>
@@ -1400,13 +1400,13 @@ define <8 x i64> @test_mm512_unpacklo_epi32(<8 x i64> %a0, <8 x i64> %a1) {
define <8 x i64> @test_mm512_mask_unpacklo_epi32(<8 x i64> %a0, i16 %a1, <8 x i64> %a2, <8 x i64> %a3) {
; X32-LABEL: test_mm512_mask_unpacklo_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vpunpckldq {{.*#+}} zmm0 {%k1} = zmm1[0],zmm2[0],zmm1[1],zmm2[1],zmm1[4],zmm2[4],zmm1[5],zmm2[5],zmm1[8],zmm2[8],zmm1[9],zmm2[9],zmm1[12],zmm2[12],zmm1[13],zmm2[13]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_unpacklo_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vpunpckldq {{.*#+}} zmm0 {%k1} = zmm1[0],zmm2[0],zmm1[1],zmm2[1],zmm1[4],zmm2[4],zmm1[5],zmm2[5],zmm1[8],zmm2[8],zmm1[9],zmm2[9],zmm1[12],zmm2[12],zmm1[13],zmm2[13]
; X64-NEXT: retq
@@ -1422,13 +1422,13 @@ define <8 x i64> @test_mm512_mask_unpacklo_epi32(<8 x i64> %a0, i16 %a1, <8 x i6
define <8 x i64> @test_mm512_maskz_unpacklo_epi32(i16 %a0, <8 x i64> %a1, <8 x i64> %a2) {
; X32-LABEL: test_mm512_maskz_unpacklo_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vpunpckldq {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_maskz_unpacklo_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vpunpckldq {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
; X64-NEXT: retq
@@ -1443,12 +1443,12 @@ define <8 x i64> @test_mm512_maskz_unpacklo_epi32(i16 %a0, <8 x i64> %a1, <8 x i
define <8 x i64> @test_mm512_unpacklo_epi64(<8 x i64> %a0, <8 x i64> %a1) {
; X32-LABEL: test_mm512_unpacklo_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vunpcklpd {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_unpacklo_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vunpcklpd {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; X64-NEXT: retq
%res = shufflevector <8 x i64> %a0, <8 x i64> %a1, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
@@ -1457,14 +1457,14 @@ define <8 x i64> @test_mm512_unpacklo_epi64(<8 x i64> %a0, <8 x i64> %a1) {
define <8 x i64> @test_mm512_mask_unpacklo_epi64(<8 x i64> %a0, i8 %a1, <8 x i64> %a2, <8 x i64> %a3) {
; X32-LABEL: test_mm512_mask_unpacklo_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vpunpcklqdq {{.*#+}} zmm0 = zmm1[0],zmm2[0],zmm1[2],zmm2[2],zmm1[4],zmm2[4],zmm1[6],zmm2[6]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_unpacklo_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vpunpcklqdq {{.*#+}} zmm0 = zmm1[0],zmm2[0],zmm1[2],zmm2[2],zmm1[4],zmm2[4],zmm1[6],zmm2[6]
; X64-NEXT: retq
@@ -1476,14 +1476,14 @@ define <8 x i64> @test_mm512_mask_unpacklo_epi64(<8 x i64> %a0, i8 %a1, <8 x i64
define <8 x i64> @test_mm512_maskz_unpacklo_epi64(i8 %a0, <8 x i64> %a1, <8 x i64> %a2) {
; X32-LABEL: test_mm512_maskz_unpacklo_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vpunpcklqdq {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_maskz_unpacklo_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vpunpcklqdq {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; X64-NEXT: retq
@@ -1495,12 +1495,12 @@ define <8 x i64> @test_mm512_maskz_unpacklo_epi64(i8 %a0, <8 x i64> %a1, <8 x i6
define <8 x double> @test_mm512_unpacklo_pd(<8 x double> %a0, <8 x double> %a1) {
; X32-LABEL: test_mm512_unpacklo_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vunpcklpd {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_unpacklo_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vunpcklpd {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; X64-NEXT: retq
%res = shufflevector <8 x double> %a0, <8 x double> %a1, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
@@ -1509,14 +1509,14 @@ define <8 x double> @test_mm512_unpacklo_pd(<8 x double> %a0, <8 x double> %a1)
define <8 x double> @test_mm512_mask_unpacklo_pd(<8 x double> %a0, i8 %a1, <8 x double> %a2, <8 x double> %a3) {
; X32-LABEL: test_mm512_mask_unpacklo_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vunpcklpd {{.*#+}} zmm0 {%k1} = zmm1[0],zmm2[0],zmm1[2],zmm2[2],zmm1[4],zmm2[4],zmm1[6],zmm2[6]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_unpacklo_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vunpcklpd {{.*#+}} zmm0 {%k1} = zmm1[0],zmm2[0],zmm1[2],zmm2[2],zmm1[4],zmm2[4],zmm1[6],zmm2[6]
; X64-NEXT: retq
@@ -1528,14 +1528,14 @@ define <8 x double> @test_mm512_mask_unpacklo_pd(<8 x double> %a0, i8 %a1, <8 x
define <8 x double> @test_mm512_maskz_unpacklo_pd(i8 %a0, <8 x double> %a1, <8 x double> %a2) {
; X32-LABEL: test_mm512_maskz_unpacklo_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vunpcklpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_maskz_unpacklo_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vunpcklpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; X64-NEXT: retq
@@ -1547,12 +1547,12 @@ define <8 x double> @test_mm512_maskz_unpacklo_pd(i8 %a0, <8 x double> %a1, <8 x
define <16 x float> @test_mm512_unpacklo_ps(<16 x float> %a0, <16 x float> %a1) {
; X32-LABEL: test_mm512_unpacklo_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vunpcklps {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_unpacklo_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vunpcklps {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
; X64-NEXT: retq
%res = shufflevector <16 x float> %a0, <16 x float> %a1, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 4, i32 20, i32 5, i32 21, i32 8, i32 24, i32 9, i32 25, i32 12, i32 28, i32 13, i32 29>
@@ -1561,13 +1561,13 @@ define <16 x float> @test_mm512_unpacklo_ps(<16 x float> %a0, <16 x float> %a1)
define <16 x float> @test_mm512_mask_unpacklo_ps(<16 x float> %a0, i16 %a1, <16 x float> %a2, <16 x float> %a3) {
; X32-LABEL: test_mm512_mask_unpacklo_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vunpcklps {{.*#+}} zmm0 {%k1} = zmm1[0],zmm2[0],zmm1[1],zmm2[1],zmm1[4],zmm2[4],zmm1[5],zmm2[5],zmm1[8],zmm2[8],zmm1[9],zmm2[9],zmm1[12],zmm2[12],zmm1[13],zmm2[13]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_unpacklo_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vunpcklps {{.*#+}} zmm0 {%k1} = zmm1[0],zmm2[0],zmm1[1],zmm2[1],zmm1[4],zmm2[4],zmm1[5],zmm2[5],zmm1[8],zmm2[8],zmm1[9],zmm2[9],zmm1[12],zmm2[12],zmm1[13],zmm2[13]
; X64-NEXT: retq
@@ -1579,13 +1579,13 @@ define <16 x float> @test_mm512_mask_unpacklo_ps(<16 x float> %a0, i16 %a1, <16
define <16 x float> @test_mm512_maskz_unpacklo_ps(i16 %a0, <16 x float> %a1, <16 x float> %a2) {
; X32-LABEL: test_mm512_maskz_unpacklo_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vunpcklps {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_maskz_unpacklo_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vunpcklps {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
; X64-NEXT: retq
@@ -1597,12 +1597,12 @@ define <16 x float> @test_mm512_maskz_unpacklo_ps(i16 %a0, <16 x float> %a1, <16
define <8 x double> @test_mm512_zextpd128_pd512(<2 x double> %a0) nounwind {
; X32-LABEL: test_mm512_zextpd128_pd512:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_zextpd128_pd512:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps %xmm0, %xmm0
; X64-NEXT: retq
%res = shufflevector <2 x double> %a0, <2 x double> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
@@ -1611,12 +1611,12 @@ define <8 x double> @test_mm512_zextpd128_pd512(<2 x double> %a0) nounwind {
define <8 x double> @test_mm512_zextpd256_pd512(<4 x double> %a0) nounwind {
; X32-LABEL: test_mm512_zextpd256_pd512:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_zextpd256_pd512:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps %ymm0, %ymm0
; X64-NEXT: retq
%res = shufflevector <4 x double> %a0, <4 x double> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -1625,14 +1625,14 @@ define <8 x double> @test_mm512_zextpd256_pd512(<4 x double> %a0) nounwind {
define <16 x float> @test_mm512_zextps128_ps512(<4 x float> %a0) nounwind {
; X32-LABEL: test_mm512_zextps128_ps512:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps %xmm0, %xmm0
; X32-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X32-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_zextps128_ps512:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps %xmm0, %xmm0
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
@@ -1643,12 +1643,12 @@ define <16 x float> @test_mm512_zextps128_ps512(<4 x float> %a0) nounwind {
define <16 x float> @test_mm512_zextps256_ps512(<8 x float> %a0) nounwind {
; X32-LABEL: test_mm512_zextps256_ps512:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_zextps256_ps512:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps %ymm0, %ymm0
; X64-NEXT: retq
%res = shufflevector <8 x float> %a0, <8 x float> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -1657,12 +1657,12 @@ define <16 x float> @test_mm512_zextps256_ps512(<8 x float> %a0) nounwind {
define <8 x i64> @test_mm512_zextsi128_si512(<2 x i64> %a0) nounwind {
; X32-LABEL: test_mm512_zextsi128_si512:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_zextsi128_si512:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps %xmm0, %xmm0
; X64-NEXT: retq
%res = shufflevector <2 x i64> %a0, <2 x i64> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
@@ -1671,12 +1671,12 @@ define <8 x i64> @test_mm512_zextsi128_si512(<2 x i64> %a0) nounwind {
define <8 x i64> @test_mm512_zextsi256_si512(<4 x i64> %a0) nounwind {
; X32-LABEL: test_mm512_zextsi256_si512:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_zextsi256_si512:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps %ymm0, %ymm0
; X64-NEXT: retq
%res = shufflevector <4 x i64> %a0, <4 x i64> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
diff --git a/test/CodeGen/X86/avx512-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512-intrinsics-upgrade.ll
index ef15a85e500..a90652735b5 100644
--- a/test/CodeGen/X86/avx512-intrinsics-upgrade.ll
+++ b/test/CodeGen/X86/avx512-intrinsics-upgrade.ll
@@ -3,7 +3,7 @@
define <16 x i32>@test_int_x86_avx512_mask_pbroadcastd_gpr_512(i32 %x0, <16 x i32> %x1, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_pbroadcastd_gpr_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpbroadcastd %edi, %zmm1
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpbroadcastd %edi, %zmm0 {%k1}
@@ -23,7 +23,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.pbroadcast.d.gpr.512(i32, <16 x i32>, i
define <8 x i64>@test_int_x86_avx512_mask_pbroadcastq_gpr_512(i64 %x0, <8 x i64> %x1, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_pbroadcastq_gpr_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpbroadcastq %rdi, %zmm1
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpbroadcastq %rdi, %zmm0 {%k1}
@@ -45,7 +45,7 @@ declare <16 x float> @llvm.x86.avx512.mask.broadcast.ss.ps.512(<4 x float>, <16
define <16 x float> @test_x86_vbroadcast_ss_ps_512(<4 x float> %a0, <16 x float> %a1, i16 %mask ) {
; CHECK-LABEL: test_x86_vbroadcast_ss_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vbroadcastss %xmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vbroadcastss %xmm0, %zmm1 {%k1}
@@ -66,7 +66,7 @@ declare <8 x double> @llvm.x86.avx512.mask.broadcast.sd.pd.512(<2 x double>, <8
define <8 x double> @test_x86_vbroadcast_sd_pd_512(<2 x double> %a0, <8 x double> %a1, i8 %mask ) {
; CHECK-LABEL: test_x86_vbroadcast_sd_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vbroadcastsd %xmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vbroadcastsd %xmm0, %zmm1 {%k1}
@@ -87,7 +87,7 @@ declare <16 x i32> @llvm.x86.avx512.pbroadcastd.512(<4 x i32>, <16 x i32>, i16)
define <16 x i32>@test_int_x86_avx512_pbroadcastd_512(<4 x i32> %x0, <16 x i32> %x1, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_pbroadcastd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpbroadcastd %xmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpbroadcastd %xmm0, %zmm1 {%k1}
@@ -107,7 +107,7 @@ declare <8 x i64> @llvm.x86.avx512.pbroadcastq.512(<2 x i64>, <8 x i64>, i8)
define <8 x i64>@test_int_x86_avx512_pbroadcastq_512(<2 x i64> %x0, <8 x i64> %x1, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_pbroadcastq_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpbroadcastq %xmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpbroadcastq %xmm0, %zmm1 {%k1}
@@ -127,7 +127,7 @@ declare <16 x float> @llvm.x86.avx512.mask.movsldup.512(<16 x float>, <16 x floa
define <16 x float>@test_int_x86_avx512_mask_movsldup_512(<16 x float> %x0, <16 x float> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_movsldup_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovsldup {{.*#+}} zmm2 = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} zmm1 {%k1} = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
@@ -147,7 +147,7 @@ declare <16 x float> @llvm.x86.avx512.mask.movshdup.512(<16 x float>, <16 x floa
define <16 x float>@test_int_x86_avx512_mask_movshdup_512(<16 x float> %x0, <16 x float> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_movshdup_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovshdup {{.*#+}} zmm2 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} zmm1 {%k1} = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
@@ -167,7 +167,7 @@ declare <8 x double> @llvm.x86.avx512.mask.movddup.512(<8 x double>, <8 x double
define <8 x double>@test_int_x86_avx512_mask_movddup_512(<8 x double> %x0, <8 x double> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_movddup_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovddup {{.*#+}} zmm2 = zmm0[0,0,2,2,4,4,6,6]
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovddup {{.*#+}} zmm1 {%k1} = zmm0[0,0,2,2,4,4,6,6]
@@ -187,7 +187,7 @@ declare <8 x double> @llvm.x86.avx512.mask.perm.df.512(<8 x double>, i32, <8 x d
define <8 x double>@test_int_x86_avx512_mask_perm_df_512(<8 x double> %x0, i32 %x1, <8 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_perm_df_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpermpd {{.*#+}} zmm2 = zmm0[3,0,0,0,7,4,4,4]
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpermpd {{.*#+}} zmm1 {%k1} = zmm0[3,0,0,0,7,4,4,4]
@@ -207,7 +207,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.perm.di.512(<8 x i64>, i32, <8 x i64>, i
define <8 x i64>@test_int_x86_avx512_mask_perm_di_512(<8 x i64> %x0, i32 %x1, <8 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_perm_di_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpermq {{.*#+}} zmm2 = zmm0[3,0,0,0,7,4,4,4]
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpermq {{.*#+}} zmm1 {%k1} = zmm0[3,0,0,0,7,4,4,4]
@@ -225,7 +225,7 @@ define <8 x i64>@test_int_x86_avx512_mask_perm_di_512(<8 x i64> %x0, i32 %x1, <8
define void @test_store1(<16 x float> %data, i8* %ptr, i8* %ptr2, i16 %mask) {
; CHECK-LABEL: test_store1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edx, %k1
; CHECK-NEXT: vmovups %zmm0, (%rdi) {%k1}
; CHECK-NEXT: vmovups %zmm0, (%rsi)
@@ -239,7 +239,7 @@ declare void @llvm.x86.avx512.mask.storeu.ps.512(i8*, <16 x float>, i16 )
define void @test_store2(<8 x double> %data, i8* %ptr, i8* %ptr2, i8 %mask) {
; CHECK-LABEL: test_store2:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edx, %k1
; CHECK-NEXT: vmovupd %zmm0, (%rdi) {%k1}
; CHECK-NEXT: vmovupd %zmm0, (%rsi)
@@ -253,7 +253,7 @@ declare void @llvm.x86.avx512.mask.storeu.pd.512(i8*, <8 x double>, i8)
define void @test_mask_store_aligned_ps(<16 x float> %data, i8* %ptr, i8* %ptr2, i16 %mask) {
; CHECK-LABEL: test_mask_store_aligned_ps:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edx, %k1
; CHECK-NEXT: vmovaps %zmm0, (%rdi) {%k1}
; CHECK-NEXT: vmovaps %zmm0, (%rsi)
@@ -267,7 +267,7 @@ declare void @llvm.x86.avx512.mask.store.ps.512(i8*, <16 x float>, i16 )
define void @test_mask_store_aligned_pd(<8 x double> %data, i8* %ptr, i8* %ptr2, i8 %mask) {
; CHECK-LABEL: test_mask_store_aligned_pd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edx, %k1
; CHECK-NEXT: vmovapd %zmm0, (%rdi) {%k1}
; CHECK-NEXT: vmovapd %zmm0, (%rsi)
@@ -281,7 +281,7 @@ declare void @llvm.x86.avx512.mask.store.pd.512(i8*, <8 x double>, i8)
define void@test_int_x86_avx512_mask_storeu_q_512(i8* %ptr1, i8* %ptr2, <8 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_storeu_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edx, %k1
; CHECK-NEXT: vmovdqu64 %zmm0, (%rdi) {%k1}
; CHECK-NEXT: vmovdqu64 %zmm0, (%rsi)
@@ -295,7 +295,7 @@ declare void @llvm.x86.avx512.mask.storeu.q.512(i8*, <8 x i64>, i8)
define void@test_int_x86_avx512_mask_storeu_d_512(i8* %ptr1, i8* %ptr2, <16 x i32> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_storeu_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edx, %k1
; CHECK-NEXT: vmovdqu32 %zmm0, (%rdi) {%k1}
; CHECK-NEXT: vmovdqu32 %zmm0, (%rsi)
@@ -309,7 +309,7 @@ declare void @llvm.x86.avx512.mask.storeu.d.512(i8*, <16 x i32>, i16)
define void@test_int_x86_avx512_mask_store_q_512(i8* %ptr1, i8* %ptr2, <8 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_store_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edx, %k1
; CHECK-NEXT: vmovdqa64 %zmm0, (%rdi) {%k1}
; CHECK-NEXT: vmovdqa64 %zmm0, (%rsi)
@@ -323,7 +323,7 @@ declare void @llvm.x86.avx512.mask.store.q.512(i8*, <8 x i64>, i8)
define void@test_int_x86_avx512_mask_store_d_512(i8* %ptr1, i8* %ptr2, <16 x i32> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_store_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edx, %k1
; CHECK-NEXT: vmovdqa32 %zmm0, (%rdi) {%k1}
; CHECK-NEXT: vmovdqa32 %zmm0, (%rsi)
@@ -337,7 +337,7 @@ declare void @llvm.x86.avx512.mask.store.d.512(i8*, <16 x i32>, i16)
define <16 x float> @test_mask_load_aligned_ps(<16 x float> %data, i8* %ptr, i16 %mask) {
; CHECK-LABEL: test_mask_load_aligned_ps:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %zmm0
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vmovaps (%rdi), %zmm0 {%k1}
@@ -355,7 +355,7 @@ declare <16 x float> @llvm.x86.avx512.mask.load.ps.512(i8*, <16 x float>, i16)
define <16 x float> @test_mask_load_unaligned_ps(<16 x float> %data, i8* %ptr, i16 %mask) {
; CHECK-LABEL: test_mask_load_unaligned_ps:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovups (%rdi), %zmm0
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vmovups (%rdi), %zmm0 {%k1}
@@ -373,7 +373,7 @@ declare <16 x float> @llvm.x86.avx512.mask.loadu.ps.512(i8*, <16 x float>, i16)
define <8 x double> @test_mask_load_aligned_pd(<8 x double> %data, i8* %ptr, i8 %mask) {
; CHECK-LABEL: test_mask_load_aligned_pd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovapd (%rdi), %zmm0
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vmovapd (%rdi), %zmm0 {%k1}
@@ -391,7 +391,7 @@ declare <8 x double> @llvm.x86.avx512.mask.load.pd.512(i8*, <8 x double>, i8)
define <8 x double> @test_mask_load_unaligned_pd(<8 x double> %data, i8* %ptr, i8 %mask) {
; CHECK-LABEL: test_mask_load_unaligned_pd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovupd (%rdi), %zmm0
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vmovupd (%rdi), %zmm0 {%k1}
@@ -411,7 +411,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.loadu.d.512(i8*, <16 x i32>, i16)
define <16 x i32> @test_mask_load_unaligned_d(i8* %ptr, i8* %ptr2, <16 x i32> %data, i16 %mask) {
; CHECK-LABEL: test_mask_load_unaligned_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovdqu32 (%rdi), %zmm0
; CHECK-NEXT: kmovw %edx, %k1
; CHECK-NEXT: vmovdqu32 (%rsi), %zmm0 {%k1}
@@ -429,7 +429,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.loadu.q.512(i8*, <8 x i64>, i8)
define <8 x i64> @test_mask_load_unaligned_q(i8* %ptr, i8* %ptr2, <8 x i64> %data, i8 %mask) {
; CHECK-LABEL: test_mask_load_unaligned_q:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovdqu64 (%rdi), %zmm0
; CHECK-NEXT: kmovw %edx, %k1
; CHECK-NEXT: vmovdqu64 (%rsi), %zmm0 {%k1}
@@ -447,7 +447,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.load.d.512(i8*, <16 x i32>, i16)
define <16 x i32> @test_mask_load_aligned_d(<16 x i32> %data, i8* %ptr, i16 %mask) {
; CHECK-LABEL: test_mask_load_aligned_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovdqa32 (%rdi), %zmm0
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vmovdqa32 (%rdi), %zmm0 {%k1}
@@ -465,7 +465,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.load.q.512(i8*, <8 x i64>, i8)
define <8 x i64> @test_mask_load_aligned_q(<8 x i64> %data, i8* %ptr, i8 %mask) {
; CHECK-LABEL: test_mask_load_aligned_q:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm0
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm0 {%k1}
@@ -483,7 +483,7 @@ declare <8 x double> @llvm.x86.avx512.mask.vpermil.pd.512(<8 x double>, i32, <8
define <8 x double>@test_int_x86_avx512_mask_vpermil_pd_512(<8 x double> %x0, <8 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermil_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpermilpd {{.*#+}} zmm2 = zmm0[0,1,3,2,5,4,6,6]
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpermilpd {{.*#+}} zmm1 {%k1} = zmm0[0,1,3,2,5,4,6,6]
@@ -503,7 +503,7 @@ declare <16 x float> @llvm.x86.avx512.mask.vpermil.ps.512(<16 x float>, i32, <16
define <16 x float>@test_int_x86_avx512_mask_vpermil_ps_512(<16 x float> %x0, <16 x float> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermil_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpermilps {{.*#+}} zmm2 = zmm0[2,1,1,0,6,5,5,4,10,9,9,8,14,13,13,12]
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpermilps {{.*#+}} zmm1 {%k1} = zmm0[2,1,1,0,6,5,5,4,10,9,9,8,14,13,13,12]
@@ -523,7 +523,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.pshuf.d.512(<16 x i32>, i32, <16 x i32>
define <16 x i32>@test_int_x86_avx512_mask_pshuf_d_512(<16 x i32> %x0, i32 %x1, <16 x i32> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pshuf_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpshufd {{.*#+}} zmm2 = zmm0[3,0,0,0,7,4,4,4,11,8,8,8,15,12,12,12]
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpshufd {{.*#+}} zmm1 {%k1} = zmm0[3,0,0,0,7,4,4,4,11,8,8,8,15,12,12,12]
@@ -541,7 +541,7 @@ define <16 x i32>@test_int_x86_avx512_mask_pshuf_d_512(<16 x i32> %x0, i32 %x1,
define i16 @test_pcmpeq_d(<16 x i32> %a, <16 x i32> %b) {
; CHECK-LABEL: test_pcmpeq_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
@@ -552,7 +552,7 @@ define i16 @test_pcmpeq_d(<16 x i32> %a, <16 x i32> %b) {
define i16 @test_mask_pcmpeq_d(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
; CHECK-LABEL: test_mask_pcmpeq_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
@@ -566,7 +566,7 @@ declare i16 @llvm.x86.avx512.mask.pcmpeq.d.512(<16 x i32>, <16 x i32>, i16)
define i8 @test_pcmpeq_q(<8 x i64> %a, <8 x i64> %b) {
; CHECK-LABEL: test_pcmpeq_q:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
@@ -577,7 +577,7 @@ define i8 @test_pcmpeq_q(<8 x i64> %a, <8 x i64> %b) {
define i8 @test_mask_pcmpeq_q(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
; CHECK-LABEL: test_mask_pcmpeq_q:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
@@ -591,7 +591,7 @@ declare i8 @llvm.x86.avx512.mask.pcmpeq.q.512(<8 x i64>, <8 x i64>, i8)
define i16 @test_pcmpgt_d(<16 x i32> %a, <16 x i32> %b) {
; CHECK-LABEL: test_pcmpgt_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
@@ -602,7 +602,7 @@ define i16 @test_pcmpgt_d(<16 x i32> %a, <16 x i32> %b) {
define i16 @test_mask_pcmpgt_d(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
; CHECK-LABEL: test_mask_pcmpgt_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
@@ -616,7 +616,7 @@ declare i16 @llvm.x86.avx512.mask.pcmpgt.d.512(<16 x i32>, <16 x i32>, i16)
define i8 @test_pcmpgt_q(<8 x i64> %a, <8 x i64> %b) {
; CHECK-LABEL: test_pcmpgt_q:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
@@ -627,7 +627,7 @@ define i8 @test_pcmpgt_q(<8 x i64> %a, <8 x i64> %b) {
define i8 @test_mask_pcmpgt_q(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
; CHECK-LABEL: test_mask_pcmpgt_q:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
@@ -643,7 +643,7 @@ declare <8 x double> @llvm.x86.avx512.mask.unpckh.pd.512(<8 x double>, <8 x doub
define <8 x double>@test_int_x86_avx512_mask_unpckh_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_unpckh_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vunpckhpd {{.*#+}} zmm3 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vunpckhpd {{.*#+}} zmm2 {%k1} = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
@@ -659,7 +659,7 @@ declare <16 x float> @llvm.x86.avx512.mask.unpckh.ps.512(<16 x float>, <16 x flo
define <16 x float>@test_int_x86_avx512_mask_unpckh_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_unpckh_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vunpckhps {{.*#+}} zmm3 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} zmm2 {%k1} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
@@ -675,7 +675,7 @@ declare <8 x double> @llvm.x86.avx512.mask.unpckl.pd.512(<8 x double>, <8 x doub
define <8 x double>@test_int_x86_avx512_mask_unpckl_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_unpckl_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vunpcklpd {{.*#+}} zmm3 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vunpcklpd {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
@@ -691,7 +691,7 @@ declare <16 x float> @llvm.x86.avx512.mask.unpckl.ps.512(<16 x float>, <16 x flo
define <16 x float>@test_int_x86_avx512_mask_unpckl_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_unpckl_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vunpcklps {{.*#+}} zmm3 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
@@ -707,7 +707,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.punpcklqd.q.512(<8 x i64>, <8 x i64>, <8
define <8 x i64>@test_int_x86_avx512_mask_punpcklqd_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_punpcklqd_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpunpcklqdq {{.*#+}} zmm2 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
@@ -727,7 +727,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.punpckhqd.q.512(<8 x i64>, <8 x i64>, <8
define <8 x i64>@test_int_x86_avx512_mask_punpckhqd_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_punpckhqd_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpunpckhqdq {{.*#+}} zmm3 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpunpckhqdq {{.*#+}} zmm2 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
@@ -743,7 +743,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.punpckhd.q.512(<16 x i32>, <16 x i32>,
define <16 x i32>@test_int_x86_avx512_mask_punpckhd_q_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_punpckhd_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpunpckhdq {{.*#+}} zmm3 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpunpckhdq {{.*#+}} zmm2 {%k1} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
@@ -759,7 +759,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.punpckld.q.512(<16 x i32>, <16 x i32>,
define <16 x i32>@test_int_x86_avx512_mask_punpckld_q_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_punpckld_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpunpckldq {{.*#+}} zmm3 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpunpckldq {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
@@ -773,7 +773,7 @@ define <16 x i32>@test_int_x86_avx512_mask_punpckld_q_512(<16 x i32> %x0, <16 x
define <16 x i32> @test_x86_avx512_pslli_d(<16 x i32> %a0) {
; CHECK-LABEL: test_x86_avx512_pslli_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpslld $7, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.mask.pslli.d(<16 x i32> %a0, i32 7, <16 x i32> zeroinitializer, i16 -1)
@@ -782,7 +782,7 @@ define <16 x i32> @test_x86_avx512_pslli_d(<16 x i32> %a0) {
define <16 x i32> @test_x86_avx512_mask_pslli_d(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_pslli_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpslld $7, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -793,7 +793,7 @@ define <16 x i32> @test_x86_avx512_mask_pslli_d(<16 x i32> %a0, <16 x i32> %a1,
define <16 x i32> @test_x86_avx512_maskz_pslli_d(<16 x i32> %a0, i16 %mask) {
; CHECK-LABEL: test_x86_avx512_maskz_pslli_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpslld $7, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -805,7 +805,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.pslli.d(<16 x i32>, i32, <16 x i32>, i1
define <8 x i64> @test_x86_avx512_pslli_q(<8 x i64> %a0) {
; CHECK-LABEL: test_x86_avx512_pslli_q:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsllq $7, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.mask.pslli.q(<8 x i64> %a0, i32 7, <8 x i64> zeroinitializer, i8 -1)
@@ -814,7 +814,7 @@ define <8 x i64> @test_x86_avx512_pslli_q(<8 x i64> %a0) {
define <8 x i64> @test_x86_avx512_mask_pslli_q(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_pslli_q:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsllq $7, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -825,7 +825,7 @@ define <8 x i64> @test_x86_avx512_mask_pslli_q(<8 x i64> %a0, <8 x i64> %a1, i8
define <8 x i64> @test_x86_avx512_maskz_pslli_q(<8 x i64> %a0, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_maskz_pslli_q:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsllq $7, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -837,7 +837,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.pslli.q(<8 x i64>, i32, <8 x i64>, i8) n
define <16 x i32> @test_x86_avx512_psrli_d(<16 x i32> %a0) {
; CHECK-LABEL: test_x86_avx512_psrli_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsrld $7, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.mask.psrli.d(<16 x i32> %a0, i32 7, <16 x i32> zeroinitializer, i16 -1)
@@ -846,7 +846,7 @@ define <16 x i32> @test_x86_avx512_psrli_d(<16 x i32> %a0) {
define <16 x i32> @test_x86_avx512_mask_psrli_d(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_psrli_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsrld $7, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -857,7 +857,7 @@ define <16 x i32> @test_x86_avx512_mask_psrli_d(<16 x i32> %a0, <16 x i32> %a1,
define <16 x i32> @test_x86_avx512_maskz_psrli_d(<16 x i32> %a0, i16 %mask) {
; CHECK-LABEL: test_x86_avx512_maskz_psrli_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsrld $7, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -869,7 +869,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.psrli.d(<16 x i32>, i32, <16 x i32>, i1
define <8 x i64> @test_x86_avx512_psrli_q(<8 x i64> %a0) {
; CHECK-LABEL: test_x86_avx512_psrli_q:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsrlq $7, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.mask.psrli.q(<8 x i64> %a0, i32 7, <8 x i64> zeroinitializer, i8 -1)
@@ -878,7 +878,7 @@ define <8 x i64> @test_x86_avx512_psrli_q(<8 x i64> %a0) {
define <8 x i64> @test_x86_avx512_mask_psrli_q(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_psrli_q:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsrlq $7, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -889,7 +889,7 @@ define <8 x i64> @test_x86_avx512_mask_psrli_q(<8 x i64> %a0, <8 x i64> %a1, i8
define <8 x i64> @test_x86_avx512_maskz_psrli_q(<8 x i64> %a0, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_maskz_psrli_q:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsrlq $7, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -901,7 +901,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.psrli.q(<8 x i64>, i32, <8 x i64>, i8) n
define <16 x i32> @test_x86_avx512_psrai_d(<16 x i32> %a0) {
; CHECK-LABEL: test_x86_avx512_psrai_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsrad $7, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.mask.psrai.d(<16 x i32> %a0, i32 7, <16 x i32> zeroinitializer, i16 -1)
@@ -910,7 +910,7 @@ define <16 x i32> @test_x86_avx512_psrai_d(<16 x i32> %a0) {
define <16 x i32> @test_x86_avx512_mask_psrai_d(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_psrai_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsrad $7, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -921,7 +921,7 @@ define <16 x i32> @test_x86_avx512_mask_psrai_d(<16 x i32> %a0, <16 x i32> %a1,
define <16 x i32> @test_x86_avx512_maskz_psrai_d(<16 x i32> %a0, i16 %mask) {
; CHECK-LABEL: test_x86_avx512_maskz_psrai_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsrad $7, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -933,7 +933,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.psrai.d(<16 x i32>, i32, <16 x i32>, i1
define <8 x i64> @test_x86_avx512_psrai_q(<8 x i64> %a0) {
; CHECK-LABEL: test_x86_avx512_psrai_q:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsraq $7, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.mask.psrai.q(<8 x i64> %a0, i32 7, <8 x i64> zeroinitializer, i8 -1)
@@ -942,7 +942,7 @@ define <8 x i64> @test_x86_avx512_psrai_q(<8 x i64> %a0) {
define <8 x i64> @test_x86_avx512_mask_psrai_q(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_psrai_q:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsraq $7, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -953,7 +953,7 @@ define <8 x i64> @test_x86_avx512_mask_psrai_q(<8 x i64> %a0, <8 x i64> %a1, i8
define <8 x i64> @test_x86_avx512_maskz_psrai_q(<8 x i64> %a0, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_maskz_psrai_q:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsraq $7, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -967,7 +967,7 @@ declare void @llvm.x86.avx512.storent.q.512(i8*, <8 x i64>)
define void@test_storent_q_512(<8 x i64> %data, i8* %ptr) {
; CHECK-LABEL: test_storent_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovntps %zmm0, (%rdi)
; CHECK-NEXT: retq
call void @llvm.x86.avx512.storent.q.512(i8* %ptr, <8 x i64> %data)
@@ -978,7 +978,7 @@ declare void @llvm.x86.avx512.storent.pd.512(i8*, <8 x double>)
define void @test_storent_pd_512(<8 x double> %data, i8* %ptr) {
; CHECK-LABEL: test_storent_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovntps %zmm0, (%rdi)
; CHECK-NEXT: retq
call void @llvm.x86.avx512.storent.pd.512(i8* %ptr, <8 x double> %data)
@@ -989,7 +989,7 @@ declare void @llvm.x86.avx512.storent.ps.512(i8*, <16 x float>)
define void @test_storent_ps_512(<16 x float> %data, i8* %ptr) {
; CHECK-LABEL: test_storent_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovntps %zmm0, (%rdi)
; CHECK-NEXT: retq
call void @llvm.x86.avx512.storent.ps.512(i8* %ptr, <16 x float> %data)
@@ -998,7 +998,7 @@ define void @test_storent_ps_512(<16 x float> %data, i8* %ptr) {
define <16 x i32> @test_xor_epi32(<16 x i32> %a, <16 x i32> %b) {
; CHECK-LABEL: test_xor_epi32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxorq %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.mask.pxor.d.512(<16 x i32> %a,<16 x i32> %b, <16 x i32>zeroinitializer, i16 -1)
@@ -1007,7 +1007,7 @@ define <16 x i32> @test_xor_epi32(<16 x i32> %a, <16 x i32> %b) {
define <16 x i32> @test_mask_xor_epi32(<16 x i32> %a,<16 x i32> %b, <16 x i32> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_xor_epi32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpxord %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -1020,7 +1020,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.pxor.d.512(<16 x i32>, <16 x i32>, <16
define <16 x i32> @test_or_epi32(<16 x i32> %a, <16 x i32> %b) {
; CHECK-LABEL: test_or_epi32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vporq %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.mask.por.d.512(<16 x i32> %a,<16 x i32> %b, <16 x i32>zeroinitializer, i16 -1)
@@ -1029,7 +1029,7 @@ define <16 x i32> @test_or_epi32(<16 x i32> %a, <16 x i32> %b) {
define <16 x i32> @test_mask_or_epi32(<16 x i32> %a,<16 x i32> %b, <16 x i32> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_or_epi32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpord %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -1042,7 +1042,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.por.d.512(<16 x i32>, <16 x i32>, <16 x
define <16 x i32> @test_and_epi32(<16 x i32> %a, <16 x i32> %b) {
; CHECK-LABEL: test_and_epi32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpandq %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.mask.pand.d.512(<16 x i32> %a,<16 x i32> %b, <16 x i32>zeroinitializer, i16 -1)
@@ -1051,7 +1051,7 @@ define <16 x i32> @test_and_epi32(<16 x i32> %a, <16 x i32> %b) {
define <16 x i32> @test_mask_and_epi32(<16 x i32> %a,<16 x i32> %b, <16 x i32> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_and_epi32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpandd %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -1064,7 +1064,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.pand.d.512(<16 x i32>, <16 x i32>, <16
define <8 x i64> @test_xor_epi64(<8 x i64> %a, <8 x i64> %b) {
; CHECK-LABEL: test_xor_epi64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxorq %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.mask.pxor.q.512(<8 x i64> %a,<8 x i64> %b, <8 x i64>zeroinitializer, i8 -1)
@@ -1073,7 +1073,7 @@ define <8 x i64> @test_xor_epi64(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @test_mask_xor_epi64(<8 x i64> %a,<8 x i64> %b, <8 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_xor_epi64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpxorq %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -1086,7 +1086,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.pxor.q.512(<8 x i64>, <8 x i64>, <8 x i6
define <8 x i64> @test_or_epi64(<8 x i64> %a, <8 x i64> %b) {
; CHECK-LABEL: test_or_epi64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vporq %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.mask.por.q.512(<8 x i64> %a,<8 x i64> %b, <8 x i64>zeroinitializer, i8 -1)
@@ -1095,7 +1095,7 @@ define <8 x i64> @test_or_epi64(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @test_mask_or_epi64(<8 x i64> %a,<8 x i64> %b, <8 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_or_epi64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vporq %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -1108,7 +1108,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.por.q.512(<8 x i64>, <8 x i64>, <8 x i64
define <8 x i64> @test_and_epi64(<8 x i64> %a, <8 x i64> %b) {
; CHECK-LABEL: test_and_epi64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpandq %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.mask.pand.q.512(<8 x i64> %a,<8 x i64> %b, <8 x i64>zeroinitializer, i8 -1)
@@ -1117,7 +1117,7 @@ define <8 x i64> @test_and_epi64(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @test_mask_and_epi64(<8 x i64> %a,<8 x i64> %b, <8 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_and_epi64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpandq %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -1130,7 +1130,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.pand.q.512(<8 x i64>, <8 x i64>, <8 x i6
define <16 x i32> @test_mask_add_epi32_rr(<16 x i32> %a, <16 x i32> %b) {
; CHECK-LABEL: test_mask_add_epi32_rr:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.mask.padd.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> zeroinitializer, i16 -1)
@@ -1139,7 +1139,7 @@ define <16 x i32> @test_mask_add_epi32_rr(<16 x i32> %a, <16 x i32> %b) {
define <16 x i32> @test_mask_add_epi32_rrk(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_add_epi32_rrk:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpaddd %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -1150,7 +1150,7 @@ define <16 x i32> @test_mask_add_epi32_rrk(<16 x i32> %a, <16 x i32> %b, <16 x i
define <16 x i32> @test_mask_add_epi32_rrkz(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
; CHECK-LABEL: test_mask_add_epi32_rrkz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpaddd %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1160,7 +1160,7 @@ define <16 x i32> @test_mask_add_epi32_rrkz(<16 x i32> %a, <16 x i32> %b, i16 %m
define <16 x i32> @test_mask_add_epi32_rm(<16 x i32> %a, <16 x i32>* %ptr_b) {
; CHECK-LABEL: test_mask_add_epi32_rm:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddd (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
%b = load <16 x i32>, <16 x i32>* %ptr_b
@@ -1170,7 +1170,7 @@ define <16 x i32> @test_mask_add_epi32_rm(<16 x i32> %a, <16 x i32>* %ptr_b) {
define <16 x i32> @test_mask_add_epi32_rmk(<16 x i32> %a, <16 x i32>* %ptr_b, <16 x i32> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_add_epi32_rmk:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpaddd (%rdi), %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -1182,7 +1182,7 @@ define <16 x i32> @test_mask_add_epi32_rmk(<16 x i32> %a, <16 x i32>* %ptr_b, <1
define <16 x i32> @test_mask_add_epi32_rmkz(<16 x i32> %a, <16 x i32>* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_add_epi32_rmkz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpaddd (%rdi), %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1193,7 +1193,7 @@ define <16 x i32> @test_mask_add_epi32_rmkz(<16 x i32> %a, <16 x i32>* %ptr_b, i
define <16 x i32> @test_mask_add_epi32_rmb(<16 x i32> %a, i32* %ptr_b) {
; CHECK-LABEL: test_mask_add_epi32_rmb:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddd (%rdi){1to16}, %zmm0, %zmm0
; CHECK-NEXT: retq
%q = load i32, i32* %ptr_b
@@ -1205,7 +1205,7 @@ define <16 x i32> @test_mask_add_epi32_rmb(<16 x i32> %a, i32* %ptr_b) {
define <16 x i32> @test_mask_add_epi32_rmbk(<16 x i32> %a, i32* %ptr_b, <16 x i32> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_add_epi32_rmbk:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpaddd (%rdi){1to16}, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -1219,7 +1219,7 @@ define <16 x i32> @test_mask_add_epi32_rmbk(<16 x i32> %a, i32* %ptr_b, <16 x i3
define <16 x i32> @test_mask_add_epi32_rmbkz(<16 x i32> %a, i32* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_add_epi32_rmbkz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpaddd (%rdi){1to16}, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1234,7 +1234,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.padd.d.512(<16 x i32>, <16 x i32>, <16
define <16 x i32> @test_mask_sub_epi32_rr(<16 x i32> %a, <16 x i32> %b) {
; CHECK-LABEL: test_mask_sub_epi32_rr:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsubd %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.mask.psub.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> zeroinitializer, i16 -1)
@@ -1243,7 +1243,7 @@ define <16 x i32> @test_mask_sub_epi32_rr(<16 x i32> %a, <16 x i32> %b) {
define <16 x i32> @test_mask_sub_epi32_rrk(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_sub_epi32_rrk:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsubd %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -1254,7 +1254,7 @@ define <16 x i32> @test_mask_sub_epi32_rrk(<16 x i32> %a, <16 x i32> %b, <16 x i
define <16 x i32> @test_mask_sub_epi32_rrkz(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
; CHECK-LABEL: test_mask_sub_epi32_rrkz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsubd %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1264,7 +1264,7 @@ define <16 x i32> @test_mask_sub_epi32_rrkz(<16 x i32> %a, <16 x i32> %b, i16 %m
define <16 x i32> @test_mask_sub_epi32_rm(<16 x i32> %a, <16 x i32>* %ptr_b) {
; CHECK-LABEL: test_mask_sub_epi32_rm:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsubd (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
%b = load <16 x i32>, <16 x i32>* %ptr_b
@@ -1274,7 +1274,7 @@ define <16 x i32> @test_mask_sub_epi32_rm(<16 x i32> %a, <16 x i32>* %ptr_b) {
define <16 x i32> @test_mask_sub_epi32_rmk(<16 x i32> %a, <16 x i32>* %ptr_b, <16 x i32> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_sub_epi32_rmk:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpsubd (%rdi), %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -1286,7 +1286,7 @@ define <16 x i32> @test_mask_sub_epi32_rmk(<16 x i32> %a, <16 x i32>* %ptr_b, <1
define <16 x i32> @test_mask_sub_epi32_rmkz(<16 x i32> %a, <16 x i32>* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_sub_epi32_rmkz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpsubd (%rdi), %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1297,7 +1297,7 @@ define <16 x i32> @test_mask_sub_epi32_rmkz(<16 x i32> %a, <16 x i32>* %ptr_b, i
define <16 x i32> @test_mask_sub_epi32_rmb(<16 x i32> %a, i32* %ptr_b) {
; CHECK-LABEL: test_mask_sub_epi32_rmb:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsubd (%rdi){1to16}, %zmm0, %zmm0
; CHECK-NEXT: retq
%q = load i32, i32* %ptr_b
@@ -1309,7 +1309,7 @@ define <16 x i32> @test_mask_sub_epi32_rmb(<16 x i32> %a, i32* %ptr_b) {
define <16 x i32> @test_mask_sub_epi32_rmbk(<16 x i32> %a, i32* %ptr_b, <16 x i32> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_sub_epi32_rmbk:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpsubd (%rdi){1to16}, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -1323,7 +1323,7 @@ define <16 x i32> @test_mask_sub_epi32_rmbk(<16 x i32> %a, i32* %ptr_b, <16 x i3
define <16 x i32> @test_mask_sub_epi32_rmbkz(<16 x i32> %a, i32* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_sub_epi32_rmbkz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpsubd (%rdi){1to16}, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1338,7 +1338,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.psub.d.512(<16 x i32>, <16 x i32>, <16
define <8 x i64> @test_mask_add_epi64_rr(<8 x i64> %a, <8 x i64> %b) {
; CHECK-LABEL: test_mask_add_epi64_rr:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddq %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.mask.padd.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> zeroinitializer, i8 -1)
@@ -1347,7 +1347,7 @@ define <8 x i64> @test_mask_add_epi64_rr(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @test_mask_add_epi64_rrk(<8 x i64> %a, <8 x i64> %b, <8 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_add_epi64_rrk:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpaddq %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -1358,7 +1358,7 @@ define <8 x i64> @test_mask_add_epi64_rrk(<8 x i64> %a, <8 x i64> %b, <8 x i64>
define <8 x i64> @test_mask_add_epi64_rrkz(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
; CHECK-LABEL: test_mask_add_epi64_rrkz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpaddq %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1368,7 +1368,7 @@ define <8 x i64> @test_mask_add_epi64_rrkz(<8 x i64> %a, <8 x i64> %b, i8 %mask)
define <8 x i64> @test_mask_add_epi64_rm(<8 x i64> %a, <8 x i64>* %ptr_b) {
; CHECK-LABEL: test_mask_add_epi64_rm:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddq (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
%b = load <8 x i64>, <8 x i64>* %ptr_b
@@ -1378,7 +1378,7 @@ define <8 x i64> @test_mask_add_epi64_rm(<8 x i64> %a, <8 x i64>* %ptr_b) {
define <8 x i64> @test_mask_add_epi64_rmk(<8 x i64> %a, <8 x i64>* %ptr_b, <8 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_add_epi64_rmk:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpaddq (%rdi), %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -1390,7 +1390,7 @@ define <8 x i64> @test_mask_add_epi64_rmk(<8 x i64> %a, <8 x i64>* %ptr_b, <8 x
define <8 x i64> @test_mask_add_epi64_rmkz(<8 x i64> %a, <8 x i64>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_add_epi64_rmkz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpaddq (%rdi), %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1401,7 +1401,7 @@ define <8 x i64> @test_mask_add_epi64_rmkz(<8 x i64> %a, <8 x i64>* %ptr_b, i8 %
define <8 x i64> @test_mask_add_epi64_rmb(<8 x i64> %a, i64* %ptr_b) {
; CHECK-LABEL: test_mask_add_epi64_rmb:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddq (%rdi){1to8}, %zmm0, %zmm0
; CHECK-NEXT: retq
%q = load i64, i64* %ptr_b
@@ -1413,7 +1413,7 @@ define <8 x i64> @test_mask_add_epi64_rmb(<8 x i64> %a, i64* %ptr_b) {
define <8 x i64> @test_mask_add_epi64_rmbk(<8 x i64> %a, i64* %ptr_b, <8 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_add_epi64_rmbk:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpaddq (%rdi){1to8}, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -1427,7 +1427,7 @@ define <8 x i64> @test_mask_add_epi64_rmbk(<8 x i64> %a, i64* %ptr_b, <8 x i64>
define <8 x i64> @test_mask_add_epi64_rmbkz(<8 x i64> %a, i64* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_add_epi64_rmbkz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpaddq (%rdi){1to8}, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1442,7 +1442,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.padd.q.512(<8 x i64>, <8 x i64>, <8 x i6
define <8 x i64> @test_mask_sub_epi64_rr(<8 x i64> %a, <8 x i64> %b) {
; CHECK-LABEL: test_mask_sub_epi64_rr:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsubq %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.mask.psub.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> zeroinitializer, i8 -1)
@@ -1451,7 +1451,7 @@ define <8 x i64> @test_mask_sub_epi64_rr(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @test_mask_sub_epi64_rrk(<8 x i64> %a, <8 x i64> %b, <8 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_sub_epi64_rrk:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsubq %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -1462,7 +1462,7 @@ define <8 x i64> @test_mask_sub_epi64_rrk(<8 x i64> %a, <8 x i64> %b, <8 x i64>
define <8 x i64> @test_mask_sub_epi64_rrkz(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
; CHECK-LABEL: test_mask_sub_epi64_rrkz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsubq %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1472,7 +1472,7 @@ define <8 x i64> @test_mask_sub_epi64_rrkz(<8 x i64> %a, <8 x i64> %b, i8 %mask)
define <8 x i64> @test_mask_sub_epi64_rm(<8 x i64> %a, <8 x i64>* %ptr_b) {
; CHECK-LABEL: test_mask_sub_epi64_rm:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsubq (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
%b = load <8 x i64>, <8 x i64>* %ptr_b
@@ -1482,7 +1482,7 @@ define <8 x i64> @test_mask_sub_epi64_rm(<8 x i64> %a, <8 x i64>* %ptr_b) {
define <8 x i64> @test_mask_sub_epi64_rmk(<8 x i64> %a, <8 x i64>* %ptr_b, <8 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_sub_epi64_rmk:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpsubq (%rdi), %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -1494,7 +1494,7 @@ define <8 x i64> @test_mask_sub_epi64_rmk(<8 x i64> %a, <8 x i64>* %ptr_b, <8 x
define <8 x i64> @test_mask_sub_epi64_rmkz(<8 x i64> %a, <8 x i64>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_sub_epi64_rmkz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpsubq (%rdi), %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1505,7 +1505,7 @@ define <8 x i64> @test_mask_sub_epi64_rmkz(<8 x i64> %a, <8 x i64>* %ptr_b, i8 %
define <8 x i64> @test_mask_sub_epi64_rmb(<8 x i64> %a, i64* %ptr_b) {
; CHECK-LABEL: test_mask_sub_epi64_rmb:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsubq (%rdi){1to8}, %zmm0, %zmm0
; CHECK-NEXT: retq
%q = load i64, i64* %ptr_b
@@ -1517,7 +1517,7 @@ define <8 x i64> @test_mask_sub_epi64_rmb(<8 x i64> %a, i64* %ptr_b) {
define <8 x i64> @test_mask_sub_epi64_rmbk(<8 x i64> %a, i64* %ptr_b, <8 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_sub_epi64_rmbk:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpsubq (%rdi){1to8}, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -1531,7 +1531,7 @@ define <8 x i64> @test_mask_sub_epi64_rmbk(<8 x i64> %a, i64* %ptr_b, <8 x i64>
define <8 x i64> @test_mask_sub_epi64_rmbkz(<8 x i64> %a, i64* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_sub_epi64_rmbkz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpsubq (%rdi){1to8}, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1546,7 +1546,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.psub.q.512(<8 x i64>, <8 x i64>, <8 x i6
define <16 x i32> @test_mask_mullo_epi32_rr_512(<16 x i32> %a, <16 x i32> %b) {
; CHECK-LABEL: test_mask_mullo_epi32_rr_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmulld %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.mask.pmull.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> zeroinitializer, i16 -1)
@@ -1555,7 +1555,7 @@ define <16 x i32> @test_mask_mullo_epi32_rr_512(<16 x i32> %a, <16 x i32> %b) {
define <16 x i32> @test_mask_mullo_epi32_rrk_512(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_mullo_epi32_rrk_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpmulld %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -1566,7 +1566,7 @@ define <16 x i32> @test_mask_mullo_epi32_rrk_512(<16 x i32> %a, <16 x i32> %b, <
define <16 x i32> @test_mask_mullo_epi32_rrkz_512(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
; CHECK-LABEL: test_mask_mullo_epi32_rrkz_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpmulld %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1576,7 +1576,7 @@ define <16 x i32> @test_mask_mullo_epi32_rrkz_512(<16 x i32> %a, <16 x i32> %b,
define <16 x i32> @test_mask_mullo_epi32_rm_512(<16 x i32> %a, <16 x i32>* %ptr_b) {
; CHECK-LABEL: test_mask_mullo_epi32_rm_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmulld (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
%b = load <16 x i32>, <16 x i32>* %ptr_b
@@ -1586,7 +1586,7 @@ define <16 x i32> @test_mask_mullo_epi32_rm_512(<16 x i32> %a, <16 x i32>* %ptr_
define <16 x i32> @test_mask_mullo_epi32_rmk_512(<16 x i32> %a, <16 x i32>* %ptr_b, <16 x i32> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_mullo_epi32_rmk_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpmulld (%rdi), %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -1598,7 +1598,7 @@ define <16 x i32> @test_mask_mullo_epi32_rmk_512(<16 x i32> %a, <16 x i32>* %ptr
define <16 x i32> @test_mask_mullo_epi32_rmkz_512(<16 x i32> %a, <16 x i32>* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_mullo_epi32_rmkz_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpmulld (%rdi), %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1609,7 +1609,7 @@ define <16 x i32> @test_mask_mullo_epi32_rmkz_512(<16 x i32> %a, <16 x i32>* %pt
define <16 x i32> @test_mask_mullo_epi32_rmb_512(<16 x i32> %a, i32* %ptr_b) {
; CHECK-LABEL: test_mask_mullo_epi32_rmb_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmulld (%rdi){1to16}, %zmm0, %zmm0
; CHECK-NEXT: retq
%q = load i32, i32* %ptr_b
@@ -1621,7 +1621,7 @@ define <16 x i32> @test_mask_mullo_epi32_rmb_512(<16 x i32> %a, i32* %ptr_b) {
define <16 x i32> @test_mask_mullo_epi32_rmbk_512(<16 x i32> %a, i32* %ptr_b, <16 x i32> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_mullo_epi32_rmbk_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpmulld (%rdi){1to16}, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -1635,7 +1635,7 @@ define <16 x i32> @test_mask_mullo_epi32_rmbk_512(<16 x i32> %a, i32* %ptr_b, <1
define <16 x i32> @test_mask_mullo_epi32_rmbkz_512(<16 x i32> %a, i32* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_mullo_epi32_rmbkz_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpmulld (%rdi){1to16}, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1653,7 +1653,7 @@ declare <16 x float> @llvm.x86.avx512.mask.shuf.f32x4(<16 x float>, <16 x float>
define <16 x float>@test_int_x86_avx512_mask_shuf_f32x4(<16 x float> %x0, <16 x float> %x1, <16 x float> %x3, i16 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_shuf_f32x4:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vshuff32x4 {{.*#+}} zmm3 = zmm0[8,9,10,11,4,5,6,7],zmm1[4,5,6,7,0,1,2,3]
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vshuff32x4 {{.*#+}} zmm2 {%k1} = zmm0[8,9,10,11,4,5,6,7],zmm1[4,5,6,7,0,1,2,3]
@@ -1669,7 +1669,7 @@ declare <8 x double> @llvm.x86.avx512.mask.shuf.f64x2(<8 x double>, <8 x double>
define <8 x double>@test_int_x86_avx512_mask_shuf_f64x2(<8 x double> %x0, <8 x double> %x1, <8 x double> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_shuf_f64x2:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm3 = zmm0[4,5,2,3],zmm1[2,3,0,1]
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm2 {%k1} = zmm0[4,5,2,3],zmm1[2,3,0,1]
@@ -1690,7 +1690,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.shuf.i32x4(<16 x i32>, <16 x i32>, i32,
define <16 x i32>@test_int_x86_avx512_mask_shuf_i32x4(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x3, i16 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_shuf_i32x4:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vshufi32x4 {{.*#+}} zmm3 = zmm0[8,9,10,11,4,5,6,7],zmm1[4,5,6,7,0,1,2,3]
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vshufi32x4 {{.*#+}} zmm2 {%k1} = zmm0[8,9,10,11,4,5,6,7],zmm1[4,5,6,7,0,1,2,3]
@@ -1706,7 +1706,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.shuf.i64x2(<8 x i64>, <8 x i64>, i32, <8
define <8 x i64>@test_int_x86_avx512_mask_shuf_i64x2(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_shuf_i64x2:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm0[4,5,2,3],zmm1[2,3,0,1]
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm2 {%k1} = zmm0[4,5,2,3],zmm1[2,3,0,1]
@@ -1722,7 +1722,7 @@ declare <8 x double> @llvm.x86.avx512.mask.shuf.pd.512(<8 x double>, <8 x double
define <8 x double>@test_int_x86_avx512_mask_shuf_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_shuf_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vshufpd {{.*#+}} zmm3 = zmm0[0],zmm1[1],zmm0[3],zmm1[2],zmm0[5],zmm1[4],zmm0[6],zmm1[6]
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vshufpd {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[1],zmm0[3],zmm1[2],zmm0[5],zmm1[4],zmm0[6],zmm1[6]
@@ -1743,7 +1743,7 @@ declare <16 x float> @llvm.x86.avx512.mask.shuf.ps.512(<16 x float>, <16 x float
define <16 x float>@test_int_x86_avx512_mask_shuf_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x3, i16 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_shuf_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vshufps {{.*#+}} zmm3 = zmm0[2,1],zmm1[1,0],zmm0[6,5],zmm1[5,4],zmm0[10,9],zmm1[9,8],zmm0[14,13],zmm1[13,12]
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vshufps {{.*#+}} zmm2 {%k1} = zmm0[2,1],zmm1[1,0],zmm0[6,5],zmm1[5,4],zmm0[10,9],zmm1[9,8],zmm0[14,13],zmm1[13,12]
@@ -1759,7 +1759,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.pmaxs.d.512(<16 x i32>, <16 x i32>, <16
define <16 x i32>@test_int_x86_avx512_mask_pmaxs_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmaxs_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmaxsd %zmm1, %zmm0, %zmm3
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpmaxsd %zmm1, %zmm0, %zmm2 {%k1}
@@ -1775,7 +1775,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.pmaxs.q.512(<8 x i64>, <8 x i64>, <8 x i
define <8 x i64>@test_int_x86_avx512_mask_pmaxs_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmaxs_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmaxsq %zmm1, %zmm0, %zmm3
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpmaxsq %zmm1, %zmm0, %zmm2 {%k1}
@@ -1791,7 +1791,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.pmaxu.d.512(<16 x i32>, <16 x i32>, <16
define <16 x i32>@test_int_x86_avx512_mask_pmaxu_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmaxu_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmaxud %zmm1, %zmm0, %zmm3
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpmaxud %zmm1, %zmm0, %zmm2 {%k1}
@@ -1807,7 +1807,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.pmaxu.q.512(<8 x i64>, <8 x i64>, <8 x i
define <8 x i64>@test_int_x86_avx512_mask_pmaxu_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmaxu_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmaxuq %zmm1, %zmm0, %zmm3
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpmaxuq %zmm1, %zmm0, %zmm2 {%k1}
@@ -1823,7 +1823,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.pmins.d.512(<16 x i32>, <16 x i32>, <16
define <16 x i32>@test_int_x86_avx512_mask_pmins_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmins_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpminsd %zmm1, %zmm0, %zmm3
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpminsd %zmm1, %zmm0, %zmm2 {%k1}
@@ -1839,7 +1839,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.pmins.q.512(<8 x i64>, <8 x i64>, <8 x i
define <8 x i64>@test_int_x86_avx512_mask_pmins_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmins_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpminsq %zmm1, %zmm0, %zmm3
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpminsq %zmm1, %zmm0, %zmm2 {%k1}
@@ -1855,7 +1855,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.pminu.d.512(<16 x i32>, <16 x i32>, <16
define <16 x i32>@test_int_x86_avx512_mask_pminu_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pminu_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpminud %zmm1, %zmm0, %zmm3
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpminud %zmm1, %zmm0, %zmm2 {%k1}
@@ -1871,7 +1871,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.pminu.q.512(<8 x i64>, <8 x i64>, <8 x i
define <8 x i64>@test_int_x86_avx512_mask_pminu_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pminu_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpminuq %zmm1, %zmm0, %zmm3
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpminuq %zmm1, %zmm0, %zmm2 {%k1}
@@ -1885,7 +1885,7 @@ define <8 x i64>@test_int_x86_avx512_mask_pminu_q_512(<8 x i64> %x0, <8 x i64> %
define <4 x float> @test_mm_mask_move_ss(<4 x float> %__W, i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) {
; CHECK-LABEL: test_mm_mask_move_ss:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovss %xmm2, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
@@ -1897,7 +1897,7 @@ entry:
define <4 x float> @test_mm_maskz_move_ss(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) {
; CHECK-LABEL: test_mm_maskz_move_ss:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1908,7 +1908,7 @@ entry:
define <2 x double> @test_mm_mask_move_sd(<2 x double> %__W, i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) {
; CHECK-LABEL: test_mm_mask_move_sd:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovsd %xmm2, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
@@ -1919,7 +1919,7 @@ entry:
define <2 x double> @test_mm_maskz_move_sd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) {
; CHECK-LABEL: test_mm_maskz_move_sd:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovsd %xmm1, %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1935,7 +1935,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.pmovzxb.d.512(<16 x i8>, <16 x i32>, i1
define <16 x i32>@test_int_x86_avx512_mask_pmovzxb_d_512(<16 x i8> %x0, <16 x i32> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovzxb_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpmovzxbd {{.*#+}} zmm1 {%k1} = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
@@ -1955,7 +1955,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.pmovzxb.q.512(<16 x i8>, <8 x i64>, i8)
define <8 x i64>@test_int_x86_avx512_mask_pmovzxb_q_512(<16 x i8> %x0, <8 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovzxb_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovzxbq {{.*#+}} zmm2 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero,xmm0[4],zero,zero,zero,zero,zero,zero,zero,xmm0[5],zero,zero,zero,zero,zero,zero,zero,xmm0[6],zero,zero,zero,zero,zero,zero,zero,xmm0[7],zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpmovzxbq {{.*#+}} zmm1 {%k1} = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero,xmm0[4],zero,zero,zero,zero,zero,zero,zero,xmm0[5],zero,zero,zero,zero,zero,zero,zero,xmm0[6],zero,zero,zero,zero,zero,zero,zero,xmm0[7],zero,zero,zero,zero,zero,zero,zero
@@ -1975,7 +1975,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.pmovzxd.q.512(<8 x i32>, <8 x i64>, i8)
define <8 x i64>@test_int_x86_avx512_mask_pmovzxd_q_512(<8 x i32> %x0, <8 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovzxd_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovzxdq {{.*#+}} zmm2 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpmovzxdq {{.*#+}} zmm1 {%k1} = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
@@ -1995,7 +1995,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.pmovzxw.d.512(<16 x i16>, <16 x i32>, i
define <16 x i32>@test_int_x86_avx512_mask_pmovzxw_d_512(<16 x i16> %x0, <16 x i32> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovzxw_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpmovzxwd {{.*#+}} zmm1 {%k1} = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
@@ -2015,7 +2015,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.pmovzxw.q.512(<8 x i16>, <8 x i64>, i8)
define <8 x i64>@test_int_x86_avx512_mask_pmovzxw_q_512(<8 x i16> %x0, <8 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovzxw_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovzxwq {{.*#+}} zmm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpmovzxwq {{.*#+}} zmm1 {%k1} = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
@@ -2035,7 +2035,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.pmovsxb.d.512(<16 x i8>, <16 x i32>, i1
define <16 x i32>@test_int_x86_avx512_mask_pmovsxb_d_512(<16 x i8> %x0, <16 x i32> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovsxb_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovsxbd %xmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpmovsxbd %xmm0, %zmm1 {%k1}
@@ -2055,7 +2055,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.pmovsxb.q.512(<16 x i8>, <8 x i64>, i8)
define <8 x i64>@test_int_x86_avx512_mask_pmovsxb_q_512(<16 x i8> %x0, <8 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovsxb_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovsxbq %xmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpmovsxbq %xmm0, %zmm1 {%k1}
@@ -2075,7 +2075,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.pmovsxd.q.512(<8 x i32>, <8 x i64>, i8)
define <8 x i64>@test_int_x86_avx512_mask_pmovsxd_q_512(<8 x i32> %x0, <8 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovsxd_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovsxdq %ymm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpmovsxdq %ymm0, %zmm1 {%k1}
@@ -2096,7 +2096,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.pmovsxw.d.512(<16 x i16>, <16 x i32>, i
define <16 x i32>@test_int_x86_avx512_mask_pmovsxw_d_512(<16 x i16> %x0, <16 x i32> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovsxw_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovsxwd %ymm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpmovsxwd %ymm0, %zmm1 {%k1}
@@ -2117,7 +2117,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.pmovsxw.q.512(<8 x i16>, <8 x i64>, i8)
define <8 x i64>@test_int_x86_avx512_mask_pmovsxw_q_512(<8 x i16> %x0, <8 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovsxw_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovsxwq %xmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpmovsxwq %xmm0, %zmm1 {%k1}
@@ -2137,7 +2137,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.psrl.qi.512(<8 x i64>, i32, <8 x i64>, i
define <8 x i64>@test_int_x86_avx512_mask_psrl_qi_512(<8 x i64> %x0, i32 %x1, <8 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psrl_qi_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsrlq $4, %zmm0, %zmm2
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpsrlq $4, %zmm0, %zmm1 {%k1}
@@ -2157,7 +2157,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.psrl.di.512(<16 x i32>, i32, <16 x i32>
define <16 x i32>@test_int_x86_avx512_mask_psrl_di_512(<16 x i32> %x0, i32 %x1, <16 x i32> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psrl_di_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsrld $4, %zmm0, %zmm2
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpsrld $4, %zmm0, %zmm1 {%k1}
@@ -2177,7 +2177,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.psra.di.512(<16 x i32>, i32, <16 x i32>
define <16 x i32>@test_int_x86_avx512_mask_psra_di_512(<16 x i32> %x0, i32 %x1, <16 x i32> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psra_di_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsrad $3, %zmm0, %zmm2
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpsrad $3, %zmm0, %zmm1 {%k1}
@@ -2197,7 +2197,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.psra.qi.512(<8 x i64>, i32, <8 x i64>, i
define <8 x i64>@test_int_x86_avx512_mask_psra_qi_512(<8 x i64> %x0, i32 %x1, <8 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psra_qi_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsraq $3, %zmm0, %zmm2
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpsraq $3, %zmm0, %zmm1 {%k1}
@@ -2217,7 +2217,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.psll.di.512(<16 x i32>, i32, <16 x i32>
define <16 x i32>@test_int_x86_avx512_mask_psll_di_512(<16 x i32> %x0, i32 %x1, <16 x i32> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psll_di_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpslld $3, %zmm0, %zmm2
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpslld $3, %zmm0, %zmm1 {%k1}
@@ -2237,7 +2237,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.psll.qi.512(<8 x i64>, i32, <8 x i64>, i
define <8 x i64>@test_int_x86_avx512_mask_psll_qi_512(<8 x i64> %x0, i32 %x1, <8 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psll_qi_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsllq $3, %zmm0, %zmm2
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpsllq $3, %zmm0, %zmm1 {%k1}
@@ -2255,7 +2255,7 @@ define <8 x i64>@test_int_x86_avx512_mask_psll_qi_512(<8 x i64> %x0, i32 %x1, <8
define <16 x i32> @test_x86_avx512_psll_d(<16 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_x86_avx512_psll_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpslld %xmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.mask.psll.d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> zeroinitializer, i16 -1)
@@ -2264,7 +2264,7 @@ define <16 x i32> @test_x86_avx512_psll_d(<16 x i32> %a0, <4 x i32> %a1) {
define <16 x i32> @test_x86_avx512_mask_psll_d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> %a2, i16 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_psll_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpslld %xmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -2275,7 +2275,7 @@ define <16 x i32> @test_x86_avx512_mask_psll_d(<16 x i32> %a0, <4 x i32> %a1, <1
define <16 x i32> @test_x86_avx512_maskz_psll_d(<16 x i32> %a0, <4 x i32> %a1, i16 %mask) {
; CHECK-LABEL: test_x86_avx512_maskz_psll_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpslld %xmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -2287,7 +2287,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.psll.d(<16 x i32>, <4 x i32>, <16 x i32
define <8 x i64> @test_x86_avx512_psll_q(<8 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_x86_avx512_psll_q:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsllq %xmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.mask.psll.q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> zeroinitializer, i8 -1)
@@ -2296,7 +2296,7 @@ define <8 x i64> @test_x86_avx512_psll_q(<8 x i64> %a0, <2 x i64> %a1) {
define <8 x i64> @test_x86_avx512_mask_psll_q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> %a2, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_psll_q:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsllq %xmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -2307,7 +2307,7 @@ define <8 x i64> @test_x86_avx512_mask_psll_q(<8 x i64> %a0, <2 x i64> %a1, <8 x
define <8 x i64> @test_x86_avx512_maskz_psll_q(<8 x i64> %a0, <2 x i64> %a1, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_maskz_psll_q:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsllq %xmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -2319,7 +2319,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.psll.q(<8 x i64>, <2 x i64>, <8 x i64>,
define <16 x i32> @test_x86_avx512_psrl_d(<16 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_x86_avx512_psrl_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsrld %xmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.mask.psrl.d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> zeroinitializer, i16 -1)
@@ -2328,7 +2328,7 @@ define <16 x i32> @test_x86_avx512_psrl_d(<16 x i32> %a0, <4 x i32> %a1) {
define <16 x i32> @test_x86_avx512_mask_psrl_d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> %a2, i16 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_psrl_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsrld %xmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -2339,7 +2339,7 @@ define <16 x i32> @test_x86_avx512_mask_psrl_d(<16 x i32> %a0, <4 x i32> %a1, <1
define <16 x i32> @test_x86_avx512_maskz_psrl_d(<16 x i32> %a0, <4 x i32> %a1, i16 %mask) {
; CHECK-LABEL: test_x86_avx512_maskz_psrl_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsrld %xmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -2351,7 +2351,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.psrl.d(<16 x i32>, <4 x i32>, <16 x i32
define <8 x i64> @test_x86_avx512_psrl_q(<8 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_x86_avx512_psrl_q:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsrlq %xmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.mask.psrl.q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> zeroinitializer, i8 -1)
@@ -2360,7 +2360,7 @@ define <8 x i64> @test_x86_avx512_psrl_q(<8 x i64> %a0, <2 x i64> %a1) {
define <8 x i64> @test_x86_avx512_mask_psrl_q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> %a2, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_psrl_q:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsrlq %xmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -2371,7 +2371,7 @@ define <8 x i64> @test_x86_avx512_mask_psrl_q(<8 x i64> %a0, <2 x i64> %a1, <8 x
define <8 x i64> @test_x86_avx512_maskz_psrl_q(<8 x i64> %a0, <2 x i64> %a1, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_maskz_psrl_q:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsrlq %xmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -2383,7 +2383,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.psrl.q(<8 x i64>, <2 x i64>, <8 x i64>,
define <16 x i32> @test_x86_avx512_psra_d(<16 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_x86_avx512_psra_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsrad %xmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.mask.psra.d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> zeroinitializer, i16 -1)
@@ -2392,7 +2392,7 @@ define <16 x i32> @test_x86_avx512_psra_d(<16 x i32> %a0, <4 x i32> %a1) {
define <16 x i32> @test_x86_avx512_mask_psra_d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> %a2, i16 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_psra_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsrad %xmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -2403,7 +2403,7 @@ define <16 x i32> @test_x86_avx512_mask_psra_d(<16 x i32> %a0, <4 x i32> %a1, <1
define <16 x i32> @test_x86_avx512_maskz_psra_d(<16 x i32> %a0, <4 x i32> %a1, i16 %mask) {
; CHECK-LABEL: test_x86_avx512_maskz_psra_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsrad %xmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -2415,7 +2415,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.psra.d(<16 x i32>, <4 x i32>, <16 x i32
define <8 x i64> @test_x86_avx512_psra_q(<8 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_x86_avx512_psra_q:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsraq %xmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.mask.psra.q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> zeroinitializer, i8 -1)
@@ -2424,7 +2424,7 @@ define <8 x i64> @test_x86_avx512_psra_q(<8 x i64> %a0, <2 x i64> %a1) {
define <8 x i64> @test_x86_avx512_mask_psra_q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> %a2, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_psra_q:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsraq %xmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -2435,7 +2435,7 @@ define <8 x i64> @test_x86_avx512_mask_psra_q(<8 x i64> %a0, <2 x i64> %a1, <8 x
define <8 x i64> @test_x86_avx512_maskz_psra_q(<8 x i64> %a0, <2 x i64> %a1, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_maskz_psra_q:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsraq %xmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -2447,7 +2447,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.psra.q(<8 x i64>, <2 x i64>, <8 x i64>,
define <16 x i32> @test_x86_avx512_psllv_d(<16 x i32> %a0, <16 x i32> %a1) {
; CHECK-LABEL: test_x86_avx512_psllv_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsllvd %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.mask.psllv.d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> zeroinitializer, i16 -1)
@@ -2456,7 +2456,7 @@ define <16 x i32> @test_x86_avx512_psllv_d(<16 x i32> %a0, <16 x i32> %a1) {
define <16 x i32> @test_x86_avx512_mask_psllv_d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2, i16 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_psllv_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsllvd %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -2467,7 +2467,7 @@ define <16 x i32> @test_x86_avx512_mask_psllv_d(<16 x i32> %a0, <16 x i32> %a1,
define <16 x i32> @test_x86_avx512_maskz_psllv_d(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
; CHECK-LABEL: test_x86_avx512_maskz_psllv_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsllvd %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -2479,7 +2479,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.psllv.d(<16 x i32>, <16 x i32>, <16 x i
define <8 x i64> @test_x86_avx512_psllv_q(<8 x i64> %a0, <8 x i64> %a1) {
; CHECK-LABEL: test_x86_avx512_psllv_q:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsllvq %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.mask.psllv.q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> zeroinitializer, i8 -1)
@@ -2488,7 +2488,7 @@ define <8 x i64> @test_x86_avx512_psllv_q(<8 x i64> %a0, <8 x i64> %a1) {
define <8 x i64> @test_x86_avx512_mask_psllv_q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> %a2, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_psllv_q:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsllvq %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -2499,7 +2499,7 @@ define <8 x i64> @test_x86_avx512_mask_psllv_q(<8 x i64> %a0, <8 x i64> %a1, <8
define <8 x i64> @test_x86_avx512_maskz_psllv_q(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_maskz_psllv_q:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsllvq %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -2512,7 +2512,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.psllv.q(<8 x i64>, <8 x i64>, <8 x i64>,
define <16 x i32> @test_x86_avx512_psrav_d(<16 x i32> %a0, <16 x i32> %a1) {
; CHECK-LABEL: test_x86_avx512_psrav_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsravd %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.mask.psrav.d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> zeroinitializer, i16 -1)
@@ -2521,7 +2521,7 @@ define <16 x i32> @test_x86_avx512_psrav_d(<16 x i32> %a0, <16 x i32> %a1) {
define <16 x i32> @test_x86_avx512_mask_psrav_d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2, i16 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_psrav_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsravd %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -2532,7 +2532,7 @@ define <16 x i32> @test_x86_avx512_mask_psrav_d(<16 x i32> %a0, <16 x i32> %a1,
define <16 x i32> @test_x86_avx512_maskz_psrav_d(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
; CHECK-LABEL: test_x86_avx512_maskz_psrav_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsravd %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -2544,7 +2544,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.psrav.d(<16 x i32>, <16 x i32>, <16 x i
define <8 x i64> @test_x86_avx512_psrav_q(<8 x i64> %a0, <8 x i64> %a1) {
; CHECK-LABEL: test_x86_avx512_psrav_q:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsravq %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.mask.psrav.q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> zeroinitializer, i8 -1)
@@ -2553,7 +2553,7 @@ define <8 x i64> @test_x86_avx512_psrav_q(<8 x i64> %a0, <8 x i64> %a1) {
define <8 x i64> @test_x86_avx512_mask_psrav_q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> %a2, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_psrav_q:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsravq %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -2564,7 +2564,7 @@ define <8 x i64> @test_x86_avx512_mask_psrav_q(<8 x i64> %a0, <8 x i64> %a1, <8
define <8 x i64> @test_x86_avx512_maskz_psrav_q(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_maskz_psrav_q:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsravq %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -2576,7 +2576,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.psrav.q(<8 x i64>, <8 x i64>, <8 x i64>,
define <16 x i32> @test_x86_avx512_psrlv_d(<16 x i32> %a0, <16 x i32> %a1) {
; CHECK-LABEL: test_x86_avx512_psrlv_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsrlvd %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.mask.psrlv.d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> zeroinitializer, i16 -1)
@@ -2585,7 +2585,7 @@ define <16 x i32> @test_x86_avx512_psrlv_d(<16 x i32> %a0, <16 x i32> %a1) {
define <16 x i32> @test_x86_avx512_mask_psrlv_d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2, i16 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_psrlv_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsrlvd %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -2596,7 +2596,7 @@ define <16 x i32> @test_x86_avx512_mask_psrlv_d(<16 x i32> %a0, <16 x i32> %a1,
define <16 x i32> @test_x86_avx512_maskz_psrlv_d(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
; CHECK-LABEL: test_x86_avx512_maskz_psrlv_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsrlvd %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -2608,7 +2608,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.psrlv.d(<16 x i32>, <16 x i32>, <16 x i
define <8 x i64> @test_x86_avx512_psrlv_q(<8 x i64> %a0, <8 x i64> %a1) {
; CHECK-LABEL: test_x86_avx512_psrlv_q:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsrlvq %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.mask.psrlv.q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> zeroinitializer, i8 -1)
@@ -2617,7 +2617,7 @@ define <8 x i64> @test_x86_avx512_psrlv_q(<8 x i64> %a0, <8 x i64> %a1) {
define <8 x i64> @test_x86_avx512_mask_psrlv_q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> %a2, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_psrlv_q:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsrlvq %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -2628,7 +2628,7 @@ define <8 x i64> @test_x86_avx512_mask_psrlv_q(<8 x i64> %a0, <8 x i64> %a1, <8
define <8 x i64> @test_x86_avx512_maskz_psrlv_q(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_maskz_psrlv_q:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsrlvq %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -2640,7 +2640,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.psrlv.q(<8 x i64>, <8 x i64>, <8 x i64>,
define <8 x i64> @test_x86_avx512_psrlv_q_memop(<8 x i64> %a0, <8 x i64>* %ptr) {
; CHECK-LABEL: test_x86_avx512_psrlv_q_memop:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsrlvq (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
%b = load <8 x i64>, <8 x i64>* %ptr
@@ -2652,7 +2652,7 @@ declare <8 x double> @llvm.x86.avx512.mask.cvtdq2pd.512(<8 x i32>, <8 x double>,
define <8 x double>@test_int_x86_avx512_mask_cvt_dq2pd_512(<8 x i32> %x0, <8 x double> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_dq2pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvtdq2pd %ymm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvtdq2pd %ymm0, %zmm1 {%k1}
@@ -2668,7 +2668,7 @@ declare <8 x double> @llvm.x86.avx512.mask.cvtudq2pd.512(<8 x i32>, <8 x double>
define <8 x double>@test_int_x86_avx512_mask_cvt_udq2pd_512(<8 x i32> %x0, <8 x double> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_udq2pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvtudq2pd %ymm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvtudq2pd %ymm0, %zmm1 {%k1}
@@ -2682,7 +2682,7 @@ define <8 x double>@test_int_x86_avx512_mask_cvt_udq2pd_512(<8 x i32> %x0, <8 x
define <8 x i64> @test_valign_q(<8 x i64> %a, <8 x i64> %b) {
; CHECK-LABEL: test_valign_q:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: valignq {{.*#+}} zmm0 = zmm1[2,3,4,5,6,7],zmm0[0,1]
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.mask.valign.q.512(<8 x i64> %a, <8 x i64> %b, i32 2, <8 x i64> zeroinitializer, i8 -1)
@@ -2691,7 +2691,7 @@ define <8 x i64> @test_valign_q(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @test_mask_valign_q(<8 x i64> %a, <8 x i64> %b, <8 x i64> %src, i8 %mask) {
; CHECK-LABEL: test_mask_valign_q:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: valignq {{.*#+}} zmm2 {%k1} = zmm1[2,3,4,5,6,7],zmm0[0,1]
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -2704,7 +2704,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.valign.q.512(<8 x i64>, <8 x i64>, i32,
define <16 x i32> @test_maskz_valign_d(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
; CHECK-LABEL: test_maskz_valign_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: valignd {{.*#+}} zmm0 {%k1} {z} = zmm1[5,6,7,8,9,10,11,12,13,14,15],zmm0[0,1,2,3,4]
; CHECK-NEXT: retq
@@ -2718,7 +2718,7 @@ declare <8 x double> @llvm.x86.avx512.mask.vpermilvar.pd.512(<8 x double>, <8 x
define <8 x double>@test_int_x86_avx512_mask_vpermilvar_pd_512(<8 x double> %x0, <8 x i64> %x1, <8 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermilvar_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpermilpd %zmm1, %zmm0, %zmm3
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpermilpd %zmm1, %zmm0, %zmm2 {%k1}
@@ -2738,7 +2738,7 @@ declare <16 x float> @llvm.x86.avx512.mask.vpermilvar.ps.512(<16 x float>, <16 x
define <16 x float>@test_int_x86_avx512_mask_vpermilvar_ps_512(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermilvar_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpermilps %zmm1, %zmm0, %zmm3
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpermilps %zmm1, %zmm0, %zmm2 {%k1}
@@ -2757,7 +2757,7 @@ define <16 x float>@test_int_x86_avx512_mask_vpermilvar_ps_512(<16 x float> %x0,
; Test case to make sure we can print shuffle decode comments for constant pool loads.
define <16 x float>@test_int_x86_avx512_mask_vpermilvar_ps_512_constant_pool(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermilvar_ps_512_constant_pool:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpermilps {{.*#+}} zmm2 {%k1} = zmm0[2,3,0,1,7,6,5,4,9,8,11,10,12,13,14,15]
; CHECK-NEXT: vpermilps {{.*#+}} zmm1 {%k1} {z} = zmm0[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15]
@@ -2775,7 +2775,7 @@ define <16 x float>@test_int_x86_avx512_mask_vpermilvar_ps_512_constant_pool(<16
define <8 x i64> @test_mask_mul_epi32_rr(<16 x i32> %a, <16 x i32> %b) {
; CHECK-LABEL: test_mask_mul_epi32_rr:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmuldq %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 -1)
@@ -2784,7 +2784,7 @@ define <8 x i64> @test_mask_mul_epi32_rr(<16 x i32> %a, <16 x i32> %b) {
define <8 x i64> @test_mask_mul_epi32_rrk(<16 x i32> %a, <16 x i32> %b, <8 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_mul_epi32_rrk:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpmuldq %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -2795,7 +2795,7 @@ define <8 x i64> @test_mask_mul_epi32_rrk(<16 x i32> %a, <16 x i32> %b, <8 x i64
define <8 x i64> @test_mask_mul_epi32_rrkz(<16 x i32> %a, <16 x i32> %b, i8 %mask) {
; CHECK-LABEL: test_mask_mul_epi32_rrkz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpmuldq %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -2805,7 +2805,7 @@ define <8 x i64> @test_mask_mul_epi32_rrkz(<16 x i32> %a, <16 x i32> %b, i8 %mas
define <8 x i64> @test_mask_mul_epi32_rm(<16 x i32> %a, <16 x i32>* %ptr_b) {
; CHECK-LABEL: test_mask_mul_epi32_rm:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmuldq (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
%b = load <16 x i32>, <16 x i32>* %ptr_b
@@ -2815,7 +2815,7 @@ define <8 x i64> @test_mask_mul_epi32_rm(<16 x i32> %a, <16 x i32>* %ptr_b) {
define <8 x i64> @test_mask_mul_epi32_rmk(<16 x i32> %a, <16 x i32>* %ptr_b, <8 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_mul_epi32_rmk:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpmuldq (%rdi), %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -2827,7 +2827,7 @@ define <8 x i64> @test_mask_mul_epi32_rmk(<16 x i32> %a, <16 x i32>* %ptr_b, <8
define <8 x i64> @test_mask_mul_epi32_rmkz(<16 x i32> %a, <16 x i32>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_mul_epi32_rmkz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpmuldq (%rdi), %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -2838,7 +2838,7 @@ define <8 x i64> @test_mask_mul_epi32_rmkz(<16 x i32> %a, <16 x i32>* %ptr_b, i8
define <8 x i64> @test_mask_mul_epi32_rmb(<16 x i32> %a, i64* %ptr_b) {
; CHECK-LABEL: test_mask_mul_epi32_rmb:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmuldq (%rdi){1to8}, %zmm0, %zmm0
; CHECK-NEXT: retq
%q = load i64, i64* %ptr_b
@@ -2851,7 +2851,7 @@ define <8 x i64> @test_mask_mul_epi32_rmb(<16 x i32> %a, i64* %ptr_b) {
define <8 x i64> @test_mask_mul_epi32_rmbk(<16 x i32> %a, i64* %ptr_b, <8 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_mul_epi32_rmbk:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpmuldq (%rdi){1to8}, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -2866,7 +2866,7 @@ define <8 x i64> @test_mask_mul_epi32_rmbk(<16 x i32> %a, i64* %ptr_b, <8 x i64>
define <8 x i64> @test_mask_mul_epi32_rmbkz(<16 x i32> %a, i64* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_mul_epi32_rmbkz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpmuldq (%rdi){1to8}, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -2882,7 +2882,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32>, <16 x i32>, <8 x
define <8 x i64> @test_mask_mul_epu32_rr(<16 x i32> %a, <16 x i32> %b) {
; CHECK-LABEL: test_mask_mul_epu32_rr:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmuludq %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 -1)
@@ -2891,7 +2891,7 @@ define <8 x i64> @test_mask_mul_epu32_rr(<16 x i32> %a, <16 x i32> %b) {
define <8 x i64> @test_mask_mul_epu32_rrk(<16 x i32> %a, <16 x i32> %b, <8 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_mul_epu32_rrk:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpmuludq %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -2902,7 +2902,7 @@ define <8 x i64> @test_mask_mul_epu32_rrk(<16 x i32> %a, <16 x i32> %b, <8 x i64
define <8 x i64> @test_mask_mul_epu32_rrkz(<16 x i32> %a, <16 x i32> %b, i8 %mask) {
; CHECK-LABEL: test_mask_mul_epu32_rrkz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpmuludq %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -2912,7 +2912,7 @@ define <8 x i64> @test_mask_mul_epu32_rrkz(<16 x i32> %a, <16 x i32> %b, i8 %mas
define <8 x i64> @test_mask_mul_epu32_rm(<16 x i32> %a, <16 x i32>* %ptr_b) {
; CHECK-LABEL: test_mask_mul_epu32_rm:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmuludq (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
%b = load <16 x i32>, <16 x i32>* %ptr_b
@@ -2922,7 +2922,7 @@ define <8 x i64> @test_mask_mul_epu32_rm(<16 x i32> %a, <16 x i32>* %ptr_b) {
define <8 x i64> @test_mask_mul_epu32_rmk(<16 x i32> %a, <16 x i32>* %ptr_b, <8 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_mul_epu32_rmk:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpmuludq (%rdi), %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -2934,7 +2934,7 @@ define <8 x i64> @test_mask_mul_epu32_rmk(<16 x i32> %a, <16 x i32>* %ptr_b, <8
define <8 x i64> @test_mask_mul_epu32_rmkz(<16 x i32> %a, <16 x i32>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_mul_epu32_rmkz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpmuludq (%rdi), %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -2945,7 +2945,7 @@ define <8 x i64> @test_mask_mul_epu32_rmkz(<16 x i32> %a, <16 x i32>* %ptr_b, i8
define <8 x i64> @test_mask_mul_epu32_rmb(<16 x i32> %a, i64* %ptr_b) {
; CHECK-LABEL: test_mask_mul_epu32_rmb:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmuludq (%rdi){1to8}, %zmm0, %zmm0
; CHECK-NEXT: retq
%q = load i64, i64* %ptr_b
@@ -2958,7 +2958,7 @@ define <8 x i64> @test_mask_mul_epu32_rmb(<16 x i32> %a, i64* %ptr_b) {
define <8 x i64> @test_mask_mul_epu32_rmbk(<16 x i32> %a, i64* %ptr_b, <8 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_mul_epu32_rmbk:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpmuludq (%rdi){1to8}, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -2973,7 +2973,7 @@ define <8 x i64> @test_mask_mul_epu32_rmbk(<16 x i32> %a, i64* %ptr_b, <8 x i64>
define <8 x i64> @test_mask_mul_epu32_rmbkz(<16 x i32> %a, i64* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_mul_epu32_rmbkz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpmuludq (%rdi){1to8}, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -2989,7 +2989,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32>, <16 x i32>, <8
define <4 x float> @test_mask_vextractf32x4(<4 x float> %b, <16 x float> %a, i8 %mask) {
; CHECK-LABEL: test_mask_vextractf32x4:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k0
; CHECK-NEXT: kshiftlw $12, %k0, %k1
; CHECK-NEXT: kshiftrw $15, %k1, %k1
@@ -3019,7 +3019,7 @@ declare <4 x float> @llvm.x86.avx512.mask.vextractf32x4.512(<16 x float>, i32, <
define <4 x i64> @test_mask_vextracti64x4(<4 x i64> %b, <8 x i64> %a, i8 %mask) {
; CHECK-LABEL: test_mask_vextracti64x4:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm1
; CHECK-NEXT: kmovw %edi, %k0
; CHECK-NEXT: kshiftlw $12, %k0, %k1
@@ -3050,7 +3050,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.vextracti64x4.512(<8 x i64>, i32, <4 x i
define <4 x i32> @test_maskz_vextracti32x4(<16 x i32> %a, i8 %mask) {
; CHECK-LABEL: test_maskz_vextracti32x4:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k0
; CHECK-NEXT: kshiftlw $12, %k0, %k1
; CHECK-NEXT: kshiftrw $15, %k1, %k1
@@ -3081,7 +3081,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.vextracti32x4.512(<16 x i32>, i32, <4 x
define <4 x double> @test_vextractf64x4(<8 x double> %a) {
; CHECK-LABEL: test_vextractf64x4:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm0
; CHECK-NEXT: retq
%res = call <4 x double> @llvm.x86.avx512.mask.vextractf64x4.512(<8 x double> %a, i32 1, <4 x double> zeroinitializer, i8 -1)
@@ -3094,7 +3094,7 @@ declare <16 x float> @llvm.x86.avx512.mask.insertf32x4.512(<16 x float>, <4 x fl
define <16 x float>@test_int_x86_avx512_mask_insertf32x4_512(<16 x float> %x0, <4 x float> %x1, <16 x float> %x3, i16 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_insertf32x4_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: ## kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; CHECK-NEXT: vinsertf32x4 $1, %xmm1, %zmm0, %zmm3
; CHECK-NEXT: kmovw %edi, %k1
@@ -3115,7 +3115,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.inserti32x4.512(<16 x i32>, <4 x i32>,
define <16 x i32>@test_int_x86_avx512_mask_inserti32x4_512(<16 x i32> %x0, <4 x i32> %x1, <16 x i32> %x3, i16 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_inserti32x4_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: ## kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; CHECK-NEXT: vinserti32x4 $1, %xmm1, %zmm0, %zmm3
; CHECK-NEXT: kmovw %edi, %k1
@@ -3136,7 +3136,7 @@ declare <8 x double> @llvm.x86.avx512.mask.insertf64x4.512(<8 x double>, <4 x do
define <8 x double>@test_int_x86_avx512_mask_insertf64x4_512(<8 x double> %x0, <4 x double> %x1, <8 x double> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_insertf64x4_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm3
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm2 {%k1}
@@ -3156,7 +3156,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.inserti64x4.512(<8 x i64>, <4 x i64>, i3
define <8 x i64>@test_int_x86_avx512_mask_inserti64x4_512(<8 x i64> %x0, <4 x i64> %x1, <8 x i64> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_inserti64x4_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm3
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm2 {%k1}
@@ -3174,7 +3174,7 @@ define <8 x i64>@test_int_x86_avx512_mask_inserti64x4_512(<8 x i64> %x0, <4 x i6
define <8 x i64> @test_x86_avx512_movntdqa(i8* %a0) {
; CHECK-LABEL: test_x86_avx512_movntdqa:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovntdqa (%rdi), %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.movntdqa(i8* %a0)
@@ -3185,7 +3185,7 @@ declare <8 x i64> @llvm.x86.avx512.movntdqa(i8*) nounwind readonly
define <8 x i16> @test_cmp_d_512(<16 x i32> %a0, <16 x i32> %a1) {
; CHECK-LABEL: test_cmp_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; CHECK-NEXT: vpcmpgtd %zmm0, %zmm1, %k1
; CHECK-NEXT: vpcmpled %zmm1, %zmm0, %k2
@@ -3230,7 +3230,7 @@ define <8 x i16> @test_cmp_d_512(<16 x i32> %a0, <16 x i32> %a1) {
define <8 x i16> @test_mask_cmp_d_512(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
; CHECK-LABEL: test_mask_cmp_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
; CHECK-NEXT: vpcmpgtd %zmm0, %zmm1, %k2 {%k1}
@@ -3278,7 +3278,7 @@ declare i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32>, <16 x i32>, i32, i16) no
define <8 x i16> @test_ucmp_d_512(<16 x i32> %a0, <16 x i32> %a1) {
; CHECK-LABEL: test_ucmp_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; CHECK-NEXT: vpcmpltud %zmm1, %zmm0, %k1
; CHECK-NEXT: vpcmpleud %zmm1, %zmm0, %k2
@@ -3323,7 +3323,7 @@ define <8 x i16> @test_ucmp_d_512(<16 x i32> %a0, <16 x i32> %a1) {
define <8 x i16> @test_mask_ucmp_d_512(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
; CHECK-LABEL: test_mask_ucmp_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
; CHECK-NEXT: vpcmpltud %zmm1, %zmm0, %k2 {%k1}
@@ -3371,7 +3371,7 @@ declare i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32>, <16 x i32>, i32, i16) n
define <8 x i8> @test_cmp_q_512(<8 x i64> %a0, <8 x i64> %a1) {
; CHECK-LABEL: test_cmp_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; CHECK-NEXT: vpcmpgtq %zmm0, %zmm1, %k1
; CHECK-NEXT: vpcmpleq %zmm1, %zmm0, %k2
@@ -3416,7 +3416,7 @@ define <8 x i8> @test_cmp_q_512(<8 x i64> %a0, <8 x i64> %a1) {
define <8 x i8> @test_mask_cmp_q_512(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
; CHECK-LABEL: test_mask_cmp_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
; CHECK-NEXT: vpcmpgtq %zmm0, %zmm1, %k2 {%k1}
@@ -3464,7 +3464,7 @@ declare i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64>, <8 x i64>, i32, i8) nounwi
define <8 x i8> @test_ucmp_q_512(<8 x i64> %a0, <8 x i64> %a1) {
; CHECK-LABEL: test_ucmp_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; CHECK-NEXT: vpcmpltuq %zmm1, %zmm0, %k1
; CHECK-NEXT: vpcmpleuq %zmm1, %zmm0, %k2
@@ -3509,7 +3509,7 @@ define <8 x i8> @test_ucmp_q_512(<8 x i64> %a0, <8 x i64> %a1) {
define <8 x i8> @test_mask_ucmp_q_512(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
; CHECK-LABEL: test_mask_ucmp_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
; CHECK-NEXT: vpcmpltuq %zmm1, %zmm0, %k2 {%k1}
@@ -3559,7 +3559,7 @@ declare <16 x float> @llvm.x86.avx512.mask.broadcastf32x4.512(<4 x float>, <16 x
define <16 x float>@test_int_x86_avx512_mask_broadcastf32x4_512(<4 x float> %x0, <16 x float> %x2, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf32x4_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
@@ -3580,7 +3580,7 @@ define <16 x float>@test_int_x86_avx512_mask_broadcastf32x4_512(<4 x float> %x0,
define <16 x float>@test_int_x86_avx512_mask_broadcastf32x4_512_load(<4 x float>* %x0ptr, <16 x float> %x2, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf32x4_512_load:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vbroadcastf32x4 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; CHECK-NEXT: retq
@@ -3593,7 +3593,7 @@ declare <8 x double> @llvm.x86.avx512.mask.broadcastf64x4.512(<4 x double>, <8 x
define <8 x double>@test_int_x86_avx512_mask_broadcastf64x4_512(<4 x double> %x0, <8 x double> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf64x4_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; CHECK-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
@@ -3613,7 +3613,7 @@ define <8 x double>@test_int_x86_avx512_mask_broadcastf64x4_512(<4 x double> %x0
define <8 x double>@test_int_x86_avx512_mask_broadcastf64x4_512_load(<4 x double>* %x0ptr, <8 x double> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf64x4_512_load:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vbroadcastf64x4 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,0,1,2,3]
; CHECK-NEXT: retq
@@ -3627,7 +3627,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.broadcasti32x4.512(<4 x i32>, <16 x i32
define <16 x i32>@test_int_x86_avx512_mask_broadcasti32x4_512(<4 x i32> %x0, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x4_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; CHECK-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
@@ -3648,7 +3648,7 @@ define <16 x i32>@test_int_x86_avx512_mask_broadcasti32x4_512(<4 x i32> %x0, <16
define <16 x i32>@test_int_x86_avx512_mask_broadcasti32x4_512_load(<4 x i32>* %x0ptr, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x4_512_load:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; CHECK-NEXT: retq
@@ -3662,7 +3662,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.broadcasti64x4.512(<4 x i64>, <8 x i64>,
define <8 x i64>@test_int_x86_avx512_mask_broadcasti64x4_512(<4 x i64> %x0, <8 x i64> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti64x4_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
@@ -3682,7 +3682,7 @@ define <8 x i64>@test_int_x86_avx512_mask_broadcasti64x4_512(<4 x i64> %x0, <8 x
define <8 x i64>@test_int_x86_avx512_mask_broadcasti64x4_512_load(<4 x i64>* %x0ptr, <8 x i64> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti64x4_512_load:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,0,1,2,3]
; CHECK-NEXT: retq
@@ -3696,7 +3696,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.pabs.d.512(<16 x i32>, <16 x i32>, i16)
define <16 x i32>@test_int_x86_avx512_mask_pabs_d_512(<16 x i32> %x0, <16 x i32> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pabs_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpabsd %zmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpabsd %zmm0, %zmm1 {%k1}
@@ -3712,7 +3712,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.pabs.q.512(<8 x i64>, <8 x i64>, i8)
define <8 x i64>@test_int_x86_avx512_mask_pabs_q_512(<8 x i64> %x0, <8 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pabs_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpabsq %zmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpabsq %zmm0, %zmm1 {%k1}
@@ -3726,7 +3726,7 @@ define <8 x i64>@test_int_x86_avx512_mask_pabs_q_512(<8 x i64> %x0, <8 x i64> %x
define i8 @test_vptestmq(<8 x i64> %a0, <8 x i64> %a1, i8 %m) {
; CHECK-LABEL: test_vptestmq:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vptestmq %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %ecx
; CHECK-NEXT: kmovw %edi, %k1
@@ -3744,7 +3744,7 @@ declare i8 @llvm.x86.avx512.ptestm.q.512(<8 x i64>, <8 x i64>, i8)
define i16 @test_vptestmd(<16 x i32> %a0, <16 x i32> %a1, i16 %m) {
; CHECK-LABEL: test_vptestmd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vptestmd %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %ecx
; CHECK-NEXT: kmovw %edi, %k1
@@ -3764,7 +3764,7 @@ declare i16 @llvm.x86.avx512.ptestnm.d.512(<16 x i32>, <16 x i32>, i16 %x2)
define i16@test_int_x86_avx512_ptestnm_d_512(<16 x i32> %x0, <16 x i32> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_ptestnm_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vptestnmd %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vptestnmd %zmm1, %zmm0, %k1 {%k1}
@@ -3783,7 +3783,7 @@ declare i8 @llvm.x86.avx512.ptestnm.q.512(<8 x i64>, <8 x i64>, i8 %x2)
define i8@test_int_x86_avx512_ptestnm_q_512(<8 x i64> %x0, <8 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_ptestnm_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vptestnmq %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vptestnmq %zmm1, %zmm0, %k1 {%k1}
diff --git a/test/CodeGen/X86/avx512-intrinsics.ll b/test/CodeGen/X86/avx512-intrinsics.ll
index c05a69c4e9a..6829b6f378c 100644
--- a/test/CodeGen/X86/avx512-intrinsics.ll
+++ b/test/CodeGen/X86/avx512-intrinsics.ll
@@ -4,7 +4,7 @@
declare i32 @llvm.x86.avx512.kortestz.w(i16, i16) nounwind readnone
define i32 @test_kortestz(i16 %a0, i16 %a1) {
; CHECK-LABEL: test_kortestz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k0
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: xorl %eax, %eax
@@ -18,7 +18,7 @@ define i32 @test_kortestz(i16 %a0, i16 %a1) {
declare i32 @llvm.x86.avx512.kortestc.w(i16, i16) nounwind readnone
define i32 @test_kortestc(i16 %a0, i16 %a1) {
; CHECK-LABEL: test_kortestc:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k0
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: xorl %eax, %eax
@@ -32,7 +32,7 @@ define i32 @test_kortestc(i16 %a0, i16 %a1) {
declare i16 @llvm.x86.avx512.kand.w(i16, i16) nounwind readnone
define i16 @test_kand(i16 %a0, i16 %a1) {
; CHECK-LABEL: test_kand:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k0
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: movw $8, %ax
@@ -50,7 +50,7 @@ define i16 @test_kand(i16 %a0, i16 %a1) {
declare i16 @llvm.x86.avx512.kandn.w(i16, i16) nounwind readnone
define i16 @test_kandn(i16 %a0, i16 %a1) {
; CHECK-LABEL: test_kandn:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k0
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: movw $8, %ax
@@ -68,7 +68,7 @@ define i16 @test_kandn(i16 %a0, i16 %a1) {
declare i16 @llvm.x86.avx512.knot.w(i16) nounwind readnone
define i16 @test_knot(i16 %a0) {
; CHECK-LABEL: test_knot:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k0
; CHECK-NEXT: knotw %k0, %k0
; CHECK-NEXT: kmovw %k0, %eax
@@ -81,7 +81,7 @@ define i16 @test_knot(i16 %a0) {
declare i16 @llvm.x86.avx512.kor.w(i16, i16) nounwind readnone
define i16 @test_kor(i16 %a0, i16 %a1) {
; CHECK-LABEL: test_kor:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k0
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: movw $8, %ax
@@ -100,7 +100,7 @@ declare i16 @llvm.x86.avx512.kunpck.bw(i16, i16) nounwind readnone
define i16 @unpckbw_test(i16 %a0, i16 %a1) {
; CHECK-LABEL: unpckbw_test:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k0
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: kunpckbw %k1, %k0, %k0
@@ -116,7 +116,7 @@ declare i16 @llvm.x86.avx512.kxnor.w(i16, i16) nounwind readnone
; probably by FoldConstantArithmetic in SelectionDAG.
define i16 @test_kxnor(i16 %a0, i16 %a1) {
; CHECK-LABEL: test_kxnor:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k0
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: movw $8, %ax
@@ -134,7 +134,7 @@ define i16 @test_kxnor(i16 %a0, i16 %a1) {
declare i16 @llvm.x86.avx512.kxor.w(i16, i16) nounwind readnone
define i16 @test_kxor(i16 %a0, i16 %a1) {
; CHECK-LABEL: test_kxor:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k0
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: movw $8, %ax
@@ -151,7 +151,7 @@ define i16 @test_kxor(i16 %a0, i16 %a1) {
define <16 x float> @test_rcp_ps_512(<16 x float> %a0) {
; CHECK-LABEL: test_rcp_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vrcp14ps %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.rcp14.ps.512(<16 x float> %a0, <16 x float> zeroinitializer, i16 -1) ; <<16 x float>> [#uses=1]
@@ -161,7 +161,7 @@ declare <16 x float> @llvm.x86.avx512.rcp14.ps.512(<16 x float>, <16 x float>, i
define <8 x double> @test_rcp_pd_512(<8 x double> %a0) {
; CHECK-LABEL: test_rcp_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vrcp14pd %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.rcp14.pd.512(<8 x double> %a0, <8 x double> zeroinitializer, i8 -1) ; <<8 x double>> [#uses=1]
@@ -173,7 +173,7 @@ declare <2 x double> @llvm.x86.avx512.mask.rndscale.sd(<2 x double>, <2 x double
define <2 x double> @test_rndscale_sd(<2 x double> %a, <2 x double> %b) {
; CHECK-LABEL: test_rndscale_sd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vrndscalesd $11, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x double> @llvm.x86.avx512.mask.rndscale.sd(<2 x double> %a, <2 x double> %b, <2 x double> undef, i8 -1, i32 11, i32 4)
@@ -182,7 +182,7 @@ define <2 x double> @test_rndscale_sd(<2 x double> %a, <2 x double> %b) {
define <2 x double> @test_rndscale_sd_mask(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) {
; CHECK-LABEL: test_rndscale_sd_mask:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vrndscalesd $11, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vmovapd %xmm2, %xmm0
@@ -193,7 +193,7 @@ define <2 x double> @test_rndscale_sd_mask(<2 x double> %a, <2 x double> %b, <2
define <2 x double> @test_rndscale_sd_mask_load(<2 x double> %a, <2 x double>* %bptr, <2 x double> %c, i8 %mask) {
; CHECK-LABEL: test_rndscale_sd_mask_load:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vrndscalesd $11, (%rdi), %xmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovapd %xmm1, %xmm0
@@ -205,7 +205,7 @@ define <2 x double> @test_rndscale_sd_mask_load(<2 x double> %a, <2 x double>* %
define <2 x double> @test_rndscale_sd_maskz(<2 x double> %a, <2 x double> %b, i8 %mask) {
; CHECK-LABEL: test_rndscale_sd_maskz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vrndscalesd $11, %xmm1, %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -217,7 +217,7 @@ declare <4 x float> @llvm.x86.avx512.mask.rndscale.ss(<4 x float>, <4 x float>,
define <4 x float> @test_rndscale_ss(<4 x float> %a, <4 x float> %b) {
; CHECK-LABEL: test_rndscale_ss:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vrndscaless $11, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.avx512.mask.rndscale.ss(<4 x float> %a, <4 x float> %b, <4 x float> undef, i8 -1, i32 11, i32 4)
@@ -226,7 +226,7 @@ define <4 x float> @test_rndscale_ss(<4 x float> %a, <4 x float> %b) {
define <4 x float> @test_rndscale_ss_load(<4 x float> %a, <4 x float>* %bptr) {
; CHECK-LABEL: test_rndscale_ss_load:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vrndscaless $11, (%rdi), %xmm0, %xmm0
; CHECK-NEXT: retq
%b = load <4 x float>, <4 x float>* %bptr
@@ -236,7 +236,7 @@ define <4 x float> @test_rndscale_ss_load(<4 x float> %a, <4 x float>* %bptr) {
define <4 x float> @test_rndscale_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) {
; CHECK-LABEL: test_rndscale_ss_mask:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vrndscaless $11, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vmovaps %xmm2, %xmm0
@@ -247,7 +247,7 @@ define <4 x float> @test_rndscale_ss_mask(<4 x float> %a, <4 x float> %b, <4 x f
define <4 x float> @test_rndscale_ss_maskz(<4 x float> %a, <4 x float> %b, i8 %mask) {
; CHECK-LABEL: test_rndscale_ss_maskz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vrndscaless $11, %xmm1, %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -259,7 +259,7 @@ declare <8 x double> @llvm.x86.avx512.mask.rndscale.pd.512(<8 x double>, i32, <8
define <8 x double> @test7(<8 x double> %a) {
; CHECK-LABEL: test7:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vrndscalepd $11, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.rndscale.pd.512(<8 x double> %a, i32 11, <8 x double> %a, i8 -1, i32 4)
@@ -270,7 +270,7 @@ declare <16 x float> @llvm.x86.avx512.mask.rndscale.ps.512(<16 x float>, i32, <1
define <16 x float> @test8(<16 x float> %a) {
; CHECK-LABEL: test8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vrndscaleps $11, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.rndscale.ps.512(<16 x float> %a, i32 11, <16 x float> %a, i16 -1, i32 4)
@@ -279,7 +279,7 @@ define <16 x float> @test8(<16 x float> %a) {
define <16 x float> @test_rsqrt_ps_512(<16 x float> %a0) {
; CHECK-LABEL: test_rsqrt_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vrsqrt14ps %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.rsqrt14.ps.512(<16 x float> %a0, <16 x float> zeroinitializer, i16 -1) ; <<16 x float>> [#uses=1]
@@ -289,7 +289,7 @@ declare <16 x float> @llvm.x86.avx512.rsqrt14.ps.512(<16 x float>, <16 x float>,
define <8 x double> @test_sqrt_pd_512(<8 x double> %a0) {
; CHECK-LABEL: test_sqrt_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vsqrtpd %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.sqrt.pd.512(<8 x double> %a0, <8 x double> zeroinitializer, i8 -1, i32 4)
@@ -299,7 +299,7 @@ declare <8 x double> @llvm.x86.avx512.mask.sqrt.pd.512(<8 x double>, <8 x double
define <16 x float> @test_sqrt_ps_512(<16 x float> %a0) {
; CHECK-LABEL: test_sqrt_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vsqrtps %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.sqrt.ps.512(<16 x float> %a0, <16 x float> zeroinitializer, i16 -1, i32 4)
@@ -307,7 +307,7 @@ define <16 x float> @test_sqrt_ps_512(<16 x float> %a0) {
}
define <16 x float> @test_sqrt_round_ps_512(<16 x float> %a0) {
; CHECK-LABEL: test_sqrt_round_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vsqrtps {rz-sae}, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.sqrt.ps.512(<16 x float> %a0, <16 x float> zeroinitializer, i16 -1, i32 3)
@@ -317,7 +317,7 @@ declare <16 x float> @llvm.x86.avx512.mask.sqrt.ps.512(<16 x float>, <16 x float
define <8 x double> @test_getexp_pd_512(<8 x double> %a0) {
; CHECK-LABEL: test_getexp_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vgetexppd %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.getexp.pd.512(<8 x double> %a0, <8 x double> zeroinitializer, i8 -1, i32 4)
@@ -325,7 +325,7 @@ define <8 x double> @test_getexp_pd_512(<8 x double> %a0) {
}
define <8 x double> @test_getexp_round_pd_512(<8 x double> %a0) {
; CHECK-LABEL: test_getexp_round_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vgetexppd {sae}, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.getexp.pd.512(<8 x double> %a0, <8 x double> zeroinitializer, i8 -1, i32 8)
@@ -335,7 +335,7 @@ declare <8 x double> @llvm.x86.avx512.mask.getexp.pd.512(<8 x double>, <8 x doub
define <16 x float> @test_getexp_ps_512(<16 x float> %a0) {
; CHECK-LABEL: test_getexp_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vgetexpps %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.getexp.ps.512(<16 x float> %a0, <16 x float> zeroinitializer, i16 -1, i32 4)
@@ -344,7 +344,7 @@ define <16 x float> @test_getexp_ps_512(<16 x float> %a0) {
define <16 x float> @test_getexp_round_ps_512(<16 x float> %a0) {
; CHECK-LABEL: test_getexp_round_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vgetexpps {sae}, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.getexp.ps.512(<16 x float> %a0, <16 x float> zeroinitializer, i16 -1, i32 8)
@@ -356,7 +356,7 @@ declare <4 x float> @llvm.x86.avx512.mask.sqrt.ss(<4 x float>, <4 x float>, <4 x
define <4 x float> @test_sqrt_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
; CHECK-LABEL: test_sqrt_ss:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %xmm2, %xmm3
; CHECK-NEXT: vsqrtss %xmm1, %xmm0, %xmm3 {%k1}
@@ -382,7 +382,7 @@ declare <2 x double> @llvm.x86.avx512.mask.sqrt.sd(<2 x double>, <2 x double>, <
define <2 x double> @test_sqrt_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_sqrt_sd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %xmm2, %xmm3
; CHECK-NEXT: vsqrtsd %xmm1, %xmm0, %xmm3 {%k1}
@@ -406,7 +406,7 @@ define <2 x double> @test_sqrt_sd(<2 x double> %a0, <2 x double> %a1, <2 x doubl
define i64 @test_x86_sse2_cvtsd2si64(<2 x double> %a0) {
; CHECK-LABEL: test_x86_sse2_cvtsd2si64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvtsd2si %xmm0, %rax
; CHECK-NEXT: retq
%res = call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> %a0) ; <i64> [#uses=1]
@@ -416,7 +416,7 @@ declare i64 @llvm.x86.sse2.cvtsd2si64(<2 x double>) nounwind readnone
define <2 x double> @test_x86_sse2_cvtsi642sd(<2 x double> %a0, i64 %a1) {
; CHECK-LABEL: test_x86_sse2_cvtsi642sd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvtsi2sdq %rdi, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x double> @llvm.x86.sse2.cvtsi642sd(<2 x double> %a0, i64 %a1) ; <<2 x double>> [#uses=1]
@@ -426,7 +426,7 @@ declare <2 x double> @llvm.x86.sse2.cvtsi642sd(<2 x double>, i64) nounwind readn
define i64 @test_x86_avx512_cvttsd2si64(<2 x double> %a0) {
; CHECK-LABEL: test_x86_avx512_cvttsd2si64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvttsd2si %xmm0, %rcx
; CHECK-NEXT: vcvttsd2si {sae}, %xmm0, %rax
; CHECK-NEXT: addq %rcx, %rax
@@ -440,7 +440,7 @@ declare i64 @llvm.x86.avx512.cvttsd2si64(<2 x double>, i32) nounwind readnone
define i32 @test_x86_avx512_cvttsd2usi(<2 x double> %a0) {
; CHECK-LABEL: test_x86_avx512_cvttsd2usi:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvttsd2usi %xmm0, %ecx
; CHECK-NEXT: vcvttsd2usi {sae}, %xmm0, %eax
; CHECK-NEXT: addl %ecx, %eax
@@ -454,7 +454,7 @@ declare i32 @llvm.x86.avx512.cvttsd2usi(<2 x double>, i32) nounwind readnone
define i32 @test_x86_avx512_cvttsd2si(<2 x double> %a0) {
; CHECK-LABEL: test_x86_avx512_cvttsd2si:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvttsd2si %xmm0, %ecx
; CHECK-NEXT: vcvttsd2si {sae}, %xmm0, %eax
; CHECK-NEXT: addl %ecx, %eax
@@ -470,7 +470,7 @@ declare i32 @llvm.x86.avx512.cvttsd2si(<2 x double>, i32) nounwind readnone
define i64 @test_x86_avx512_cvttsd2usi64(<2 x double> %a0) {
; CHECK-LABEL: test_x86_avx512_cvttsd2usi64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvttsd2usi %xmm0, %rcx
; CHECK-NEXT: vcvttsd2usi {sae}, %xmm0, %rax
; CHECK-NEXT: addq %rcx, %rax
@@ -484,7 +484,7 @@ declare i64 @llvm.x86.avx512.cvttsd2usi64(<2 x double>, i32) nounwind readnone
define i64 @test_x86_sse_cvtss2si64(<4 x float> %a0) {
; CHECK-LABEL: test_x86_sse_cvtss2si64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvtss2si %xmm0, %rax
; CHECK-NEXT: retq
%res = call i64 @llvm.x86.sse.cvtss2si64(<4 x float> %a0) ; <i64> [#uses=1]
@@ -495,7 +495,7 @@ declare i64 @llvm.x86.sse.cvtss2si64(<4 x float>) nounwind readnone
define <4 x float> @test_x86_sse_cvtsi642ss(<4 x float> %a0, i64 %a1) {
; CHECK-LABEL: test_x86_sse_cvtsi642ss:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvtsi2ssq %rdi, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.sse.cvtsi642ss(<4 x float> %a0, i64 %a1) ; <<4 x float>> [#uses=1]
@@ -506,7 +506,7 @@ declare <4 x float> @llvm.x86.sse.cvtsi642ss(<4 x float>, i64) nounwind readnone
define i32 @test_x86_avx512_cvttss2si(<4 x float> %a0) {
; CHECK-LABEL: test_x86_avx512_cvttss2si:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvttss2si {sae}, %xmm0, %ecx
; CHECK-NEXT: vcvttss2si %xmm0, %eax
; CHECK-NEXT: addl %ecx, %eax
@@ -520,7 +520,7 @@ declare i32 @llvm.x86.avx512.cvttss2si(<4 x float>, i32) nounwind readnone
define i64 @test_x86_avx512_cvttss2si64(<4 x float> %a0) {
; CHECK-LABEL: test_x86_avx512_cvttss2si64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvttss2si %xmm0, %rcx
; CHECK-NEXT: vcvttss2si {sae}, %xmm0, %rax
; CHECK-NEXT: addq %rcx, %rax
@@ -534,7 +534,7 @@ declare i64 @llvm.x86.avx512.cvttss2si64(<4 x float>, i32) nounwind readnone
define i32 @test_x86_avx512_cvttss2usi(<4 x float> %a0) {
; CHECK-LABEL: test_x86_avx512_cvttss2usi:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvttss2usi {sae}, %xmm0, %ecx
; CHECK-NEXT: vcvttss2usi %xmm0, %eax
; CHECK-NEXT: addl %ecx, %eax
@@ -548,7 +548,7 @@ declare i32 @llvm.x86.avx512.cvttss2usi(<4 x float>, i32) nounwind readnone
define i64 @test_x86_avx512_cvttss2usi64(<4 x float> %a0) {
; CHECK-LABEL: test_x86_avx512_cvttss2usi64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvttss2usi %xmm0, %rcx
; CHECK-NEXT: vcvttss2usi {sae}, %xmm0, %rax
; CHECK-NEXT: addq %rcx, %rax
@@ -562,7 +562,7 @@ declare i64 @llvm.x86.avx512.cvttss2usi64(<4 x float>, i32) nounwind readnone
define i64 @test_x86_avx512_cvtsd2usi64(<2 x double> %a0) {
; CHECK-LABEL: test_x86_avx512_cvtsd2usi64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvtsd2usi %xmm0, %rax
; CHECK-NEXT: vcvtsd2usi {rz-sae}, %xmm0, %rcx
; CHECK-NEXT: addq %rax, %rcx
@@ -581,7 +581,7 @@ declare i64 @llvm.x86.avx512.vcvtsd2usi64(<2 x double>, i32) nounwind readnone
define i64 @test_x86_avx512_cvtsd2si64(<2 x double> %a0) {
; CHECK-LABEL: test_x86_avx512_cvtsd2si64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvtsd2si %xmm0, %rax
; CHECK-NEXT: vcvtsd2si {rz-sae}, %xmm0, %rcx
; CHECK-NEXT: addq %rax, %rcx
@@ -600,7 +600,7 @@ declare i64 @llvm.x86.avx512.vcvtsd2si64(<2 x double>, i32) nounwind readnone
define i64 @test_x86_avx512_cvtss2usi64(<4 x float> %a0) {
; CHECK-LABEL: test_x86_avx512_cvtss2usi64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvtss2usi %xmm0, %rax
; CHECK-NEXT: vcvtss2usi {rz-sae}, %xmm0, %rcx
; CHECK-NEXT: addq %rax, %rcx
@@ -619,7 +619,7 @@ declare i64 @llvm.x86.avx512.vcvtss2usi64(<4 x float>, i32) nounwind readnone
define i64 @test_x86_avx512_cvtss2si64(<4 x float> %a0) {
; CHECK-LABEL: test_x86_avx512_cvtss2si64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvtss2si %xmm0, %rax
; CHECK-NEXT: vcvtss2si {rz-sae}, %xmm0, %rcx
; CHECK-NEXT: addq %rax, %rcx
@@ -638,7 +638,7 @@ declare i64 @llvm.x86.avx512.vcvtss2si64(<4 x float>, i32) nounwind readnone
define i32 @test_x86_avx512_cvtsd2usi32(<2 x double> %a0) {
; CHECK-LABEL: test_x86_avx512_cvtsd2usi32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvtsd2usi %xmm0, %eax
; CHECK-NEXT: vcvtsd2usi {rz-sae}, %xmm0, %ecx
; CHECK-NEXT: addl %eax, %ecx
@@ -657,7 +657,7 @@ declare i32 @llvm.x86.avx512.vcvtsd2usi32(<2 x double>, i32) nounwind readnone
define i32 @test_x86_avx512_cvtsd2si32(<2 x double> %a0) {
; CHECK-LABEL: test_x86_avx512_cvtsd2si32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvtsd2si %xmm0, %eax
; CHECK-NEXT: vcvtsd2si {rz-sae}, %xmm0, %ecx
; CHECK-NEXT: addl %eax, %ecx
@@ -676,7 +676,7 @@ declare i32 @llvm.x86.avx512.vcvtsd2si32(<2 x double>, i32) nounwind readnone
define i32 @test_x86_avx512_cvtss2usi32(<4 x float> %a0) {
; CHECK-LABEL: test_x86_avx512_cvtss2usi32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvtss2usi %xmm0, %eax
; CHECK-NEXT: vcvtss2usi {rz-sae}, %xmm0, %ecx
; CHECK-NEXT: addl %eax, %ecx
@@ -695,7 +695,7 @@ declare i32 @llvm.x86.avx512.vcvtss2usi32(<4 x float>, i32) nounwind readnone
define i32 @test_x86_avx512_cvtss2si32(<4 x float> %a0) {
; CHECK-LABEL: test_x86_avx512_cvtss2si32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvtss2si %xmm0, %eax
; CHECK-NEXT: vcvtss2si {rz-sae}, %xmm0, %ecx
; CHECK-NEXT: addl %eax, %ecx
@@ -714,7 +714,7 @@ declare i32 @llvm.x86.avx512.vcvtss2si32(<4 x float>, i32) nounwind readnone
define <16 x float> @test_x86_vcvtph2ps_512(<16 x i16> %a0) {
; CHECK-LABEL: test_x86_vcvtph2ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvtph2ps %ymm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.vcvtph2ps.512(<16 x i16> %a0, <16 x float> zeroinitializer, i16 -1, i32 4)
@@ -723,7 +723,7 @@ define <16 x float> @test_x86_vcvtph2ps_512(<16 x i16> %a0) {
define <16 x float> @test_x86_vcvtph2ps_512_sae(<16 x i16> %a0) {
; CHECK-LABEL: test_x86_vcvtph2ps_512_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvtph2ps {sae}, %ymm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.vcvtph2ps.512(<16 x i16> %a0, <16 x float> zeroinitializer, i16 -1, i32 8)
@@ -732,7 +732,7 @@ define <16 x float> @test_x86_vcvtph2ps_512_sae(<16 x i16> %a0) {
define <16 x float> @test_x86_vcvtph2ps_512_rrk(<16 x i16> %a0,<16 x float> %a1, i16 %mask) {
; CHECK-LABEL: test_x86_vcvtph2ps_512_rrk:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvtph2ps %ymm0, %zmm1 {%k1}
; CHECK-NEXT: vmovaps %zmm1, %zmm0
@@ -743,7 +743,7 @@ define <16 x float> @test_x86_vcvtph2ps_512_rrk(<16 x i16> %a0,<16 x float> %a1,
define <16 x float> @test_x86_vcvtph2ps_512_sae_rrkz(<16 x i16> %a0, i16 %mask) {
; CHECK-LABEL: test_x86_vcvtph2ps_512_sae_rrkz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvtph2ps {sae}, %ymm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -753,7 +753,7 @@ define <16 x float> @test_x86_vcvtph2ps_512_sae_rrkz(<16 x i16> %a0, i16 %mask)
define <16 x float> @test_x86_vcvtph2ps_512_rrkz(<16 x i16> %a0, i16 %mask) {
; CHECK-LABEL: test_x86_vcvtph2ps_512_rrkz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvtph2ps %ymm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -765,7 +765,7 @@ declare <16 x float> @llvm.x86.avx512.mask.vcvtph2ps.512(<16 x i16>, <16 x float
define <16 x i16> @test_x86_vcvtps2ph_256(<16 x float> %a0, <16 x i16> %src, i16 %mask, <16 x i16> * %dst) {
; CHECK-LABEL: test_x86_vcvtps2ph_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvtps2ph $2, %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vcvtps2ph $2, %zmm0, %ymm2 {%k1} {z}
@@ -785,7 +785,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.vcvtps2ph.512(<16 x float>, i32, <16 x
define <16 x float> @test_x86_vbroadcast_ss_512(i8* %a0) {
; CHECK-LABEL: test_x86_vbroadcast_ss_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vbroadcastss (%rdi), %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.vbroadcast.ss.512(i8* %a0) ; <<16 x float>> [#uses=1]
@@ -795,7 +795,7 @@ declare <16 x float> @llvm.x86.avx512.vbroadcast.ss.512(i8*) nounwind readonly
define <8 x double> @test_x86_vbroadcast_sd_512(i8* %a0) {
; CHECK-LABEL: test_x86_vbroadcast_sd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vbroadcastsd (%rdi), %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.vbroadcast.sd.512(i8* %a0) ; <<8 x double>> [#uses=1]
@@ -805,7 +805,7 @@ declare <8 x double> @llvm.x86.avx512.vbroadcast.sd.512(i8*) nounwind readonly
define i16 @test_cmpps(<16 x float> %a, <16 x float> %b) {
; CHECK-LABEL: test_cmpps:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcmpleps {sae}, %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
@@ -817,7 +817,7 @@ declare <8 x double> @llvm.x86.avx512.vbroadcast.sd.512(i8*) nounwind readonly
define i8 @test_cmppd(<8 x double> %a, <8 x double> %b) {
; CHECK-LABEL: test_cmppd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcmpneqpd %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
@@ -830,7 +830,7 @@ declare <8 x double> @llvm.x86.avx512.vbroadcast.sd.512(i8*) nounwind readonly
; fp min - max
define <8 x double> @test_vmaxpd(<8 x double> %a0, <8 x double> %a1) {
; CHECK-LABEL: test_vmaxpd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmaxpd %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.max.pd.512(<8 x double> %a0, <8 x double> %a1,
@@ -842,7 +842,7 @@ declare <8 x double> @llvm.x86.avx512.mask.max.pd.512(<8 x double>, <8 x double>
define <8 x double> @test_vminpd(<8 x double> %a0, <8 x double> %a1) {
; CHECK-LABEL: test_vminpd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vminpd %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.min.pd.512(<8 x double> %a0, <8 x double> %a1,
@@ -854,7 +854,7 @@ declare <8 x double> @llvm.x86.avx512.mask.min.pd.512(<8 x double>, <8 x double>
define void @test_mask_store_ss(i8* %ptr, <4 x float> %data, i8 %mask) {
; CHECK-LABEL: test_mask_store_ss:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vmovss %xmm0, (%rdi) {%k1}
; CHECK-NEXT: retq
@@ -870,7 +870,7 @@ declare <8 x double> @llvm.x86.avx512.mask.mul.pd.512(<8 x double>, <8 x double>
define <16 x float> @test_vsubps_rn(<16 x float> %a0, <16 x float> %a1) {
; CHECK-LABEL: test_vsubps_rn:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vsubps {rn-sae}, %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a0, <16 x float> %a1,
@@ -880,7 +880,7 @@ define <16 x float> @test_vsubps_rn(<16 x float> %a0, <16 x float> %a1) {
define <16 x float> @test_vsubps_rd(<16 x float> %a0, <16 x float> %a1) {
; CHECK-LABEL: test_vsubps_rd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vsubps {rd-sae}, %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a0, <16 x float> %a1,
@@ -890,7 +890,7 @@ define <16 x float> @test_vsubps_rd(<16 x float> %a0, <16 x float> %a1) {
define <16 x float> @test_vsubps_ru(<16 x float> %a0, <16 x float> %a1) {
; CHECK-LABEL: test_vsubps_ru:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vsubps {ru-sae}, %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a0, <16 x float> %a1,
@@ -900,7 +900,7 @@ define <16 x float> @test_vsubps_ru(<16 x float> %a0, <16 x float> %a1) {
define <16 x float> @test_vsubps_rz(<16 x float> %a0, <16 x float> %a1) {
; CHECK-LABEL: test_vsubps_rz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vsubps {rz-sae}, %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a0, <16 x float> %a1,
@@ -910,7 +910,7 @@ define <16 x float> @test_vsubps_rz(<16 x float> %a0, <16 x float> %a1) {
define <16 x float> @test_vmulps_rn(<16 x float> %a0, <16 x float> %a1) {
; CHECK-LABEL: test_vmulps_rn:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmulps {rn-sae}, %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> %a0, <16 x float> %a1,
@@ -920,7 +920,7 @@ define <16 x float> @test_vmulps_rn(<16 x float> %a0, <16 x float> %a1) {
define <16 x float> @test_vmulps_rd(<16 x float> %a0, <16 x float> %a1) {
; CHECK-LABEL: test_vmulps_rd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmulps {rd-sae}, %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> %a0, <16 x float> %a1,
@@ -930,7 +930,7 @@ define <16 x float> @test_vmulps_rd(<16 x float> %a0, <16 x float> %a1) {
define <16 x float> @test_vmulps_ru(<16 x float> %a0, <16 x float> %a1) {
; CHECK-LABEL: test_vmulps_ru:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmulps {ru-sae}, %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> %a0, <16 x float> %a1,
@@ -940,7 +940,7 @@ define <16 x float> @test_vmulps_ru(<16 x float> %a0, <16 x float> %a1) {
define <16 x float> @test_vmulps_rz(<16 x float> %a0, <16 x float> %a1) {
; CHECK-LABEL: test_vmulps_rz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmulps {rz-sae}, %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> %a0, <16 x float> %a1,
@@ -951,7 +951,7 @@ define <16 x float> @test_vmulps_rz(<16 x float> %a0, <16 x float> %a1) {
;; mask float
define <16 x float> @test_vmulps_mask_rn(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
; CHECK-LABEL: test_vmulps_mask_rn:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmulps {rn-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -962,7 +962,7 @@ define <16 x float> @test_vmulps_mask_rn(<16 x float> %a0, <16 x float> %a1, i16
define <16 x float> @test_vmulps_mask_rd(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
; CHECK-LABEL: test_vmulps_mask_rd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmulps {rd-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -973,7 +973,7 @@ define <16 x float> @test_vmulps_mask_rd(<16 x float> %a0, <16 x float> %a1, i16
define <16 x float> @test_vmulps_mask_ru(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
; CHECK-LABEL: test_vmulps_mask_ru:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmulps {ru-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -984,7 +984,7 @@ define <16 x float> @test_vmulps_mask_ru(<16 x float> %a0, <16 x float> %a1, i16
define <16 x float> @test_vmulps_mask_rz(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
; CHECK-LABEL: test_vmulps_mask_rz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmulps {rz-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -996,7 +996,7 @@ define <16 x float> @test_vmulps_mask_rz(<16 x float> %a0, <16 x float> %a1, i16
;; With Passthru value
define <16 x float> @test_vmulps_mask_passthru_rn(<16 x float> %a0, <16 x float> %a1, <16 x float> %passthru, i16 %mask) {
; CHECK-LABEL: test_vmulps_mask_passthru_rn:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmulps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovaps %zmm2, %zmm0
@@ -1008,7 +1008,7 @@ define <16 x float> @test_vmulps_mask_passthru_rn(<16 x float> %a0, <16 x float>
define <16 x float> @test_vmulps_mask_passthru_rd(<16 x float> %a0, <16 x float> %a1, <16 x float> %passthru, i16 %mask) {
; CHECK-LABEL: test_vmulps_mask_passthru_rd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmulps {rd-sae}, %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovaps %zmm2, %zmm0
@@ -1020,7 +1020,7 @@ define <16 x float> @test_vmulps_mask_passthru_rd(<16 x float> %a0, <16 x float>
define <16 x float> @test_vmulps_mask_passthru_ru(<16 x float> %a0, <16 x float> %a1, <16 x float> %passthru, i16 %mask) {
; CHECK-LABEL: test_vmulps_mask_passthru_ru:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmulps {ru-sae}, %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovaps %zmm2, %zmm0
@@ -1032,7 +1032,7 @@ define <16 x float> @test_vmulps_mask_passthru_ru(<16 x float> %a0, <16 x float>
define <16 x float> @test_vmulps_mask_passthru_rz(<16 x float> %a0, <16 x float> %a1, <16 x float> %passthru, i16 %mask) {
; CHECK-LABEL: test_vmulps_mask_passthru_rz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmulps {rz-sae}, %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovaps %zmm2, %zmm0
@@ -1045,7 +1045,7 @@ define <16 x float> @test_vmulps_mask_passthru_rz(<16 x float> %a0, <16 x float>
;; mask double
define <8 x double> @test_vmulpd_mask_rn(<8 x double> %a0, <8 x double> %a1, i8 %mask) {
; CHECK-LABEL: test_vmulpd_mask_rn:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmulpd {rn-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1056,7 +1056,7 @@ define <8 x double> @test_vmulpd_mask_rn(<8 x double> %a0, <8 x double> %a1, i8
define <8 x double> @test_vmulpd_mask_rd(<8 x double> %a0, <8 x double> %a1, i8 %mask) {
; CHECK-LABEL: test_vmulpd_mask_rd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmulpd {rd-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1067,7 +1067,7 @@ define <8 x double> @test_vmulpd_mask_rd(<8 x double> %a0, <8 x double> %a1, i8
define <8 x double> @test_vmulpd_mask_ru(<8 x double> %a0, <8 x double> %a1, i8 %mask) {
; CHECK-LABEL: test_vmulpd_mask_ru:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmulpd {ru-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1078,7 +1078,7 @@ define <8 x double> @test_vmulpd_mask_ru(<8 x double> %a0, <8 x double> %a1, i8
define <8 x double> @test_vmulpd_mask_rz(<8 x double> %a0, <8 x double> %a1, i8 %mask) {
; CHECK-LABEL: test_vmulpd_mask_rz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmulpd {rz-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1089,7 +1089,7 @@ define <8 x double> @test_vmulpd_mask_rz(<8 x double> %a0, <8 x double> %a1, i8
define <8 x i64> @test_mul_epi32_rr(<16 x i32> %a, <16 x i32> %b) {
; CHECK-LABEL: test_mul_epi32_rr:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmuldq %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.pmul.dq.512(<16 x i32> %a, <16 x i32> %b)
@@ -1098,7 +1098,7 @@ define <8 x i64> @test_mul_epi32_rr(<16 x i32> %a, <16 x i32> %b) {
define <8 x i64> @test_mul_epi32_rrk(<16 x i32> %a, <16 x i32> %b, <8 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mul_epi32_rrk:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpmuldq %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -1111,7 +1111,7 @@ define <8 x i64> @test_mul_epi32_rrk(<16 x i32> %a, <16 x i32> %b, <8 x i64> %pa
define <8 x i64> @test_mul_epi32_rrkz(<16 x i32> %a, <16 x i32> %b, i8 %mask) {
; CHECK-LABEL: test_mul_epi32_rrkz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpmuldq %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1123,7 +1123,7 @@ define <8 x i64> @test_mul_epi32_rrkz(<16 x i32> %a, <16 x i32> %b, i8 %mask) {
define <8 x i64> @test_mul_epi32_rm(<16 x i32> %a, <16 x i32>* %ptr_b) {
; CHECK-LABEL: test_mul_epi32_rm:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmuldq (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
%b = load <16 x i32>, <16 x i32>* %ptr_b
@@ -1133,7 +1133,7 @@ define <8 x i64> @test_mul_epi32_rm(<16 x i32> %a, <16 x i32>* %ptr_b) {
define <8 x i64> @test_mul_epi32_rmk(<16 x i32> %a, <16 x i32>* %ptr_b, <8 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mul_epi32_rmk:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpmuldq (%rdi), %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -1147,7 +1147,7 @@ define <8 x i64> @test_mul_epi32_rmk(<16 x i32> %a, <16 x i32>* %ptr_b, <8 x i64
define <8 x i64> @test_mul_epi32_rmkz(<16 x i32> %a, <16 x i32>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mul_epi32_rmkz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpmuldq (%rdi), %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1160,7 +1160,7 @@ define <8 x i64> @test_mul_epi32_rmkz(<16 x i32> %a, <16 x i32>* %ptr_b, i8 %mas
define <8 x i64> @test_mul_epi32_rmb(<16 x i32> %a, i64* %ptr_b) {
; CHECK-LABEL: test_mul_epi32_rmb:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmuldq (%rdi){1to8}, %zmm0, %zmm0
; CHECK-NEXT: retq
%q = load i64, i64* %ptr_b
@@ -1173,7 +1173,7 @@ define <8 x i64> @test_mul_epi32_rmb(<16 x i32> %a, i64* %ptr_b) {
define <8 x i64> @test_mul_epi32_rmbk(<16 x i32> %a, i64* %ptr_b, <8 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mul_epi32_rmbk:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpmuldq (%rdi){1to8}, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -1190,7 +1190,7 @@ define <8 x i64> @test_mul_epi32_rmbk(<16 x i32> %a, i64* %ptr_b, <8 x i64> %pas
define <8 x i64> @test_mul_epi32_rmbkz(<16 x i32> %a, i64* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mul_epi32_rmbkz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpmuldq (%rdi){1to8}, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1208,7 +1208,7 @@ declare <8 x i64> @llvm.x86.avx512.pmul.dq.512(<16 x i32>, <16 x i32>)
define <8 x i64> @test_mul_epu32_rr(<16 x i32> %a, <16 x i32> %b) {
; CHECK-LABEL: test_mul_epu32_rr:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmuludq %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.pmulu.dq.512(<16 x i32> %a, <16 x i32> %b)
@@ -1217,7 +1217,7 @@ define <8 x i64> @test_mul_epu32_rr(<16 x i32> %a, <16 x i32> %b) {
define <8 x i64> @test_mul_epu32_rrk(<16 x i32> %a, <16 x i32> %b, <8 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mul_epu32_rrk:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpmuludq %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -1230,7 +1230,7 @@ define <8 x i64> @test_mul_epu32_rrk(<16 x i32> %a, <16 x i32> %b, <8 x i64> %pa
define <8 x i64> @test_mul_epu32_rrkz(<16 x i32> %a, <16 x i32> %b, i8 %mask) {
; CHECK-LABEL: test_mul_epu32_rrkz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpmuludq %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1242,7 +1242,7 @@ define <8 x i64> @test_mul_epu32_rrkz(<16 x i32> %a, <16 x i32> %b, i8 %mask) {
define <8 x i64> @test_mul_epu32_rm(<16 x i32> %a, <16 x i32>* %ptr_b) {
; CHECK-LABEL: test_mul_epu32_rm:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmuludq (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
%b = load <16 x i32>, <16 x i32>* %ptr_b
@@ -1252,7 +1252,7 @@ define <8 x i64> @test_mul_epu32_rm(<16 x i32> %a, <16 x i32>* %ptr_b) {
define <8 x i64> @test_mul_epu32_rmk(<16 x i32> %a, <16 x i32>* %ptr_b, <8 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mul_epu32_rmk:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpmuludq (%rdi), %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -1266,7 +1266,7 @@ define <8 x i64> @test_mul_epu32_rmk(<16 x i32> %a, <16 x i32>* %ptr_b, <8 x i64
define <8 x i64> @test_mul_epu32_rmkz(<16 x i32> %a, <16 x i32>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mul_epu32_rmkz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpmuludq (%rdi), %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1279,7 +1279,7 @@ define <8 x i64> @test_mul_epu32_rmkz(<16 x i32> %a, <16 x i32>* %ptr_b, i8 %mas
define <8 x i64> @test_mul_epu32_rmb(<16 x i32> %a, i64* %ptr_b) {
; CHECK-LABEL: test_mul_epu32_rmb:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmuludq (%rdi){1to8}, %zmm0, %zmm0
; CHECK-NEXT: retq
%q = load i64, i64* %ptr_b
@@ -1292,7 +1292,7 @@ define <8 x i64> @test_mul_epu32_rmb(<16 x i32> %a, i64* %ptr_b) {
define <8 x i64> @test_mul_epu32_rmbk(<16 x i32> %a, i64* %ptr_b, <8 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mul_epu32_rmbk:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpmuludq (%rdi){1to8}, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -1309,7 +1309,7 @@ define <8 x i64> @test_mul_epu32_rmbk(<16 x i32> %a, i64* %ptr_b, <8 x i64> %pas
define <8 x i64> @test_mul_epu32_rmbkz(<16 x i32> %a, i64* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mul_epu32_rmbkz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpmuludq (%rdi){1to8}, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1327,7 +1327,7 @@ declare <8 x i64> @llvm.x86.avx512.pmulu.dq.512(<16 x i32>, <16 x i32>)
define <16 x float> @test_mm512_maskz_add_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
; CHECK-LABEL: test_mm512_maskz_add_round_ps_rn_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddps {rn-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1336,7 +1336,7 @@ define <16 x float> @test_mm512_maskz_add_round_ps_rn_sae(<16 x float> %a0, <16
}
define <16 x float> @test_mm512_maskz_add_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
; CHECK-LABEL: test_mm512_maskz_add_round_ps_rd_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddps {rd-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1345,7 +1345,7 @@ define <16 x float> @test_mm512_maskz_add_round_ps_rd_sae(<16 x float> %a0, <16
}
define <16 x float> @test_mm512_maskz_add_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
; CHECK-LABEL: test_mm512_maskz_add_round_ps_ru_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddps {ru-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1355,7 +1355,7 @@ define <16 x float> @test_mm512_maskz_add_round_ps_ru_sae(<16 x float> %a0, <16
define <16 x float> @test_mm512_maskz_add_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
; CHECK-LABEL: test_mm512_maskz_add_round_ps_rz_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddps {rz-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1366,7 +1366,7 @@ define <16 x float> @test_mm512_maskz_add_round_ps_rz_sae(<16 x float> %a0, <16
define <16 x float> @test_mm512_maskz_add_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
; CHECK-LABEL: test_mm512_maskz_add_round_ps_current:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddps %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1376,7 +1376,7 @@ define <16 x float> @test_mm512_maskz_add_round_ps_current(<16 x float> %a0, <16
define <16 x float> @test_mm512_mask_add_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
; CHECK-LABEL: test_mm512_mask_add_round_ps_rn_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovaps %zmm2, %zmm0
@@ -1386,7 +1386,7 @@ define <16 x float> @test_mm512_mask_add_round_ps_rn_sae(<16 x float> %a0, <16 x
}
define <16 x float> @test_mm512_mask_add_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
; CHECK-LABEL: test_mm512_mask_add_round_ps_rd_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddps {rd-sae}, %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovaps %zmm2, %zmm0
@@ -1396,7 +1396,7 @@ define <16 x float> @test_mm512_mask_add_round_ps_rd_sae(<16 x float> %a0, <16 x
}
define <16 x float> @test_mm512_mask_add_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
; CHECK-LABEL: test_mm512_mask_add_round_ps_ru_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddps {ru-sae}, %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovaps %zmm2, %zmm0
@@ -1407,7 +1407,7 @@ define <16 x float> @test_mm512_mask_add_round_ps_ru_sae(<16 x float> %a0, <16 x
define <16 x float> @test_mm512_mask_add_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
; CHECK-LABEL: test_mm512_mask_add_round_ps_rz_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddps {rz-sae}, %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovaps %zmm2, %zmm0
@@ -1419,7 +1419,7 @@ define <16 x float> @test_mm512_mask_add_round_ps_rz_sae(<16 x float> %a0, <16 x
define <16 x float> @test_mm512_mask_add_round_ps_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
; CHECK-LABEL: test_mm512_mask_add_round_ps_current:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddps %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovaps %zmm2, %zmm0
@@ -1431,7 +1431,7 @@ define <16 x float> @test_mm512_mask_add_round_ps_current(<16 x float> %a0, <16
define <16 x float> @test_mm512_add_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
; CHECK-LABEL: test_mm512_add_round_ps_rn_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vaddps {rn-sae}, %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.add.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 0)
@@ -1439,7 +1439,7 @@ define <16 x float> @test_mm512_add_round_ps_rn_sae(<16 x float> %a0, <16 x floa
}
define <16 x float> @test_mm512_add_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
; CHECK-LABEL: test_mm512_add_round_ps_rd_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vaddps {rd-sae}, %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.add.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 1)
@@ -1447,7 +1447,7 @@ define <16 x float> @test_mm512_add_round_ps_rd_sae(<16 x float> %a0, <16 x floa
}
define <16 x float> @test_mm512_add_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
; CHECK-LABEL: test_mm512_add_round_ps_ru_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vaddps {ru-sae}, %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.add.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 2)
@@ -1456,7 +1456,7 @@ define <16 x float> @test_mm512_add_round_ps_ru_sae(<16 x float> %a0, <16 x floa
define <16 x float> @test_mm512_add_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
; CHECK-LABEL: test_mm512_add_round_ps_rz_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vaddps {rz-sae}, %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.add.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 3)
@@ -1465,7 +1465,7 @@ define <16 x float> @test_mm512_add_round_ps_rz_sae(<16 x float> %a0, <16 x floa
define <16 x float> @test_mm512_add_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
; CHECK-LABEL: test_mm512_add_round_ps_current:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vaddps %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.add.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 4)
@@ -1475,7 +1475,7 @@ declare <16 x float> @llvm.x86.avx512.mask.add.ps.512(<16 x float>, <16 x float>
define <16 x float> @test_mm512_mask_sub_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
; CHECK-LABEL: test_mm512_mask_sub_round_ps_rn_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vsubps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovaps %zmm2, %zmm0
@@ -1485,7 +1485,7 @@ define <16 x float> @test_mm512_mask_sub_round_ps_rn_sae(<16 x float> %a0, <16 x
}
define <16 x float> @test_mm512_mask_sub_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
; CHECK-LABEL: test_mm512_mask_sub_round_ps_rd_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vsubps {rd-sae}, %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovaps %zmm2, %zmm0
@@ -1495,7 +1495,7 @@ define <16 x float> @test_mm512_mask_sub_round_ps_rd_sae(<16 x float> %a0, <16 x
}
define <16 x float> @test_mm512_mask_sub_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
; CHECK-LABEL: test_mm512_mask_sub_round_ps_ru_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vsubps {ru-sae}, %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovaps %zmm2, %zmm0
@@ -1506,7 +1506,7 @@ define <16 x float> @test_mm512_mask_sub_round_ps_ru_sae(<16 x float> %a0, <16 x
define <16 x float> @test_mm512_mask_sub_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
; CHECK-LABEL: test_mm512_mask_sub_round_ps_rz_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vsubps {rz-sae}, %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovaps %zmm2, %zmm0
@@ -1518,7 +1518,7 @@ define <16 x float> @test_mm512_mask_sub_round_ps_rz_sae(<16 x float> %a0, <16 x
define <16 x float> @test_mm512_mask_sub_round_ps_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
; CHECK-LABEL: test_mm512_mask_sub_round_ps_current:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vsubps %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovaps %zmm2, %zmm0
@@ -1529,7 +1529,7 @@ define <16 x float> @test_mm512_mask_sub_round_ps_current(<16 x float> %a0, <16
define <16 x float> @test_mm512_sub_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
; CHECK-LABEL: test_mm512_sub_round_ps_rn_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vsubps {rn-sae}, %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 0)
@@ -1537,7 +1537,7 @@ define <16 x float> @test_mm512_sub_round_ps_rn_sae(<16 x float> %a0, <16 x floa
}
define <16 x float> @test_mm512_sub_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
; CHECK-LABEL: test_mm512_sub_round_ps_rd_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vsubps {rd-sae}, %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 1)
@@ -1545,7 +1545,7 @@ define <16 x float> @test_mm512_sub_round_ps_rd_sae(<16 x float> %a0, <16 x floa
}
define <16 x float> @test_mm512_sub_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
; CHECK-LABEL: test_mm512_sub_round_ps_ru_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vsubps {ru-sae}, %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 2)
@@ -1554,7 +1554,7 @@ define <16 x float> @test_mm512_sub_round_ps_ru_sae(<16 x float> %a0, <16 x floa
define <16 x float> @test_mm512_sub_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
; CHECK-LABEL: test_mm512_sub_round_ps_rz_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vsubps {rz-sae}, %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 3)
@@ -1563,7 +1563,7 @@ define <16 x float> @test_mm512_sub_round_ps_rz_sae(<16 x float> %a0, <16 x floa
define <16 x float> @test_mm512_sub_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
; CHECK-LABEL: test_mm512_sub_round_ps_current:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vsubps %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 4)
@@ -1572,7 +1572,7 @@ define <16 x float> @test_mm512_sub_round_ps_current(<16 x float> %a0, <16 x flo
define <16 x float> @test_mm512_maskz_div_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
; CHECK-LABEL: test_mm512_maskz_div_round_ps_rn_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vdivps {rn-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1581,7 +1581,7 @@ define <16 x float> @test_mm512_maskz_div_round_ps_rn_sae(<16 x float> %a0, <16
}
define <16 x float> @test_mm512_maskz_div_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
; CHECK-LABEL: test_mm512_maskz_div_round_ps_rd_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vdivps {rd-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1590,7 +1590,7 @@ define <16 x float> @test_mm512_maskz_div_round_ps_rd_sae(<16 x float> %a0, <16
}
define <16 x float> @test_mm512_maskz_div_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
; CHECK-LABEL: test_mm512_maskz_div_round_ps_ru_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vdivps {ru-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1600,7 +1600,7 @@ define <16 x float> @test_mm512_maskz_div_round_ps_ru_sae(<16 x float> %a0, <16
define <16 x float> @test_mm512_maskz_div_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
; CHECK-LABEL: test_mm512_maskz_div_round_ps_rz_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vdivps {rz-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1611,7 +1611,7 @@ define <16 x float> @test_mm512_maskz_div_round_ps_rz_sae(<16 x float> %a0, <16
define <16 x float> @test_mm512_maskz_div_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
; CHECK-LABEL: test_mm512_maskz_div_round_ps_current:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vdivps %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1621,7 +1621,7 @@ define <16 x float> @test_mm512_maskz_div_round_ps_current(<16 x float> %a0, <16
define <16 x float> @test_mm512_mask_div_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
; CHECK-LABEL: test_mm512_mask_div_round_ps_rn_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vdivps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovaps %zmm2, %zmm0
@@ -1631,7 +1631,7 @@ define <16 x float> @test_mm512_mask_div_round_ps_rn_sae(<16 x float> %a0, <16 x
}
define <16 x float> @test_mm512_mask_div_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
; CHECK-LABEL: test_mm512_mask_div_round_ps_rd_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vdivps {rd-sae}, %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovaps %zmm2, %zmm0
@@ -1641,7 +1641,7 @@ define <16 x float> @test_mm512_mask_div_round_ps_rd_sae(<16 x float> %a0, <16 x
}
define <16 x float> @test_mm512_mask_div_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
; CHECK-LABEL: test_mm512_mask_div_round_ps_ru_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vdivps {ru-sae}, %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovaps %zmm2, %zmm0
@@ -1652,7 +1652,7 @@ define <16 x float> @test_mm512_mask_div_round_ps_ru_sae(<16 x float> %a0, <16 x
define <16 x float> @test_mm512_mask_div_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
; CHECK-LABEL: test_mm512_mask_div_round_ps_rz_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vdivps {rz-sae}, %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovaps %zmm2, %zmm0
@@ -1664,7 +1664,7 @@ define <16 x float> @test_mm512_mask_div_round_ps_rz_sae(<16 x float> %a0, <16 x
define <16 x float> @test_mm512_mask_div_round_ps_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
; CHECK-LABEL: test_mm512_mask_div_round_ps_current:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vdivps %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovaps %zmm2, %zmm0
@@ -1676,7 +1676,7 @@ define <16 x float> @test_mm512_mask_div_round_ps_current(<16 x float> %a0, <16
define <16 x float> @test_mm512_div_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
; CHECK-LABEL: test_mm512_div_round_ps_rn_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vdivps {rn-sae}, %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 0)
@@ -1684,7 +1684,7 @@ define <16 x float> @test_mm512_div_round_ps_rn_sae(<16 x float> %a0, <16 x floa
}
define <16 x float> @test_mm512_div_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
; CHECK-LABEL: test_mm512_div_round_ps_rd_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vdivps {rd-sae}, %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 1)
@@ -1692,7 +1692,7 @@ define <16 x float> @test_mm512_div_round_ps_rd_sae(<16 x float> %a0, <16 x floa
}
define <16 x float> @test_mm512_div_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
; CHECK-LABEL: test_mm512_div_round_ps_ru_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vdivps {ru-sae}, %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 2)
@@ -1701,7 +1701,7 @@ define <16 x float> @test_mm512_div_round_ps_ru_sae(<16 x float> %a0, <16 x floa
define <16 x float> @test_mm512_div_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
; CHECK-LABEL: test_mm512_div_round_ps_rz_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vdivps {rz-sae}, %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 3)
@@ -1710,7 +1710,7 @@ define <16 x float> @test_mm512_div_round_ps_rz_sae(<16 x float> %a0, <16 x floa
define <16 x float> @test_mm512_div_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
; CHECK-LABEL: test_mm512_div_round_ps_current:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vdivps %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 4)
@@ -1720,7 +1720,7 @@ declare <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float>, <16 x float>
define <16 x float> @test_mm512_maskz_min_round_ps_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
; CHECK-LABEL: test_mm512_maskz_min_round_ps_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vminps {sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1730,7 +1730,7 @@ define <16 x float> @test_mm512_maskz_min_round_ps_sae(<16 x float> %a0, <16 x f
define <16 x float> @test_mm512_maskz_min_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
; CHECK-LABEL: test_mm512_maskz_min_round_ps_current:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vminps %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1740,7 +1740,7 @@ define <16 x float> @test_mm512_maskz_min_round_ps_current(<16 x float> %a0, <16
define <16 x float> @test_mm512_mask_min_round_ps_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
; CHECK-LABEL: test_mm512_mask_min_round_ps_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vminps {sae}, %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovaps %zmm2, %zmm0
@@ -1751,7 +1751,7 @@ define <16 x float> @test_mm512_mask_min_round_ps_sae(<16 x float> %a0, <16 x fl
define <16 x float> @test_mm512_mask_min_round_ps_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
; CHECK-LABEL: test_mm512_mask_min_round_ps_current:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vminps %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovaps %zmm2, %zmm0
@@ -1762,7 +1762,7 @@ define <16 x float> @test_mm512_mask_min_round_ps_current(<16 x float> %a0, <16
define <16 x float> @test_mm512_min_round_ps_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
; CHECK-LABEL: test_mm512_min_round_ps_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vminps {sae}, %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.min.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 8)
@@ -1771,7 +1771,7 @@ define <16 x float> @test_mm512_min_round_ps_sae(<16 x float> %a0, <16 x float>
define <16 x float> @test_mm512_min_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
; CHECK-LABEL: test_mm512_min_round_ps_current:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vminps %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.min.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 4)
@@ -1781,7 +1781,7 @@ declare <16 x float> @llvm.x86.avx512.mask.min.ps.512(<16 x float>, <16 x float>
define <16 x float> @test_mm512_maskz_max_round_ps_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
; CHECK-LABEL: test_mm512_maskz_max_round_ps_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmaxps {sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1791,7 +1791,7 @@ define <16 x float> @test_mm512_maskz_max_round_ps_sae(<16 x float> %a0, <16 x f
define <16 x float> @test_mm512_maskz_max_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
; CHECK-LABEL: test_mm512_maskz_max_round_ps_current:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmaxps %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1801,7 +1801,7 @@ define <16 x float> @test_mm512_maskz_max_round_ps_current(<16 x float> %a0, <16
define <16 x float> @test_mm512_mask_max_round_ps_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
; CHECK-LABEL: test_mm512_mask_max_round_ps_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmaxps {sae}, %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovaps %zmm2, %zmm0
@@ -1812,7 +1812,7 @@ define <16 x float> @test_mm512_mask_max_round_ps_sae(<16 x float> %a0, <16 x fl
define <16 x float> @test_mm512_mask_max_round_ps_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
; CHECK-LABEL: test_mm512_mask_max_round_ps_current:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmaxps %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovaps %zmm2, %zmm0
@@ -1823,7 +1823,7 @@ define <16 x float> @test_mm512_mask_max_round_ps_current(<16 x float> %a0, <16
define <16 x float> @test_mm512_max_round_ps_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
; CHECK-LABEL: test_mm512_max_round_ps_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmaxps {sae}, %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.max.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 8)
@@ -1832,7 +1832,7 @@ define <16 x float> @test_mm512_max_round_ps_sae(<16 x float> %a0, <16 x float>
define <16 x float> @test_mm512_max_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
; CHECK-LABEL: test_mm512_max_round_ps_current:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmaxps %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.max.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 4)
@@ -1844,7 +1844,7 @@ declare <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float>, <4 x float>,
define <4 x float> @test_mask_add_ss_rn(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_add_ss_rn:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddss {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vmovaps %xmm2, %xmm0
@@ -1855,7 +1855,7 @@ define <4 x float> @test_mask_add_ss_rn(<4 x float> %a0, <4 x float> %a1, <4 x f
define <4 x float> @test_mask_add_ss_rd(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_add_ss_rd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddss {rd-sae}, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vmovaps %xmm2, %xmm0
@@ -1866,7 +1866,7 @@ define <4 x float> @test_mask_add_ss_rd(<4 x float> %a0, <4 x float> %a1, <4 x f
define <4 x float> @test_mask_add_ss_ru(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_add_ss_ru:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddss {ru-sae}, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vmovaps %xmm2, %xmm0
@@ -1877,7 +1877,7 @@ define <4 x float> @test_mask_add_ss_ru(<4 x float> %a0, <4 x float> %a1, <4 x f
define <4 x float> @test_mask_add_ss_rz(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_add_ss_rz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vmovaps %xmm2, %xmm0
@@ -1888,7 +1888,7 @@ define <4 x float> @test_mask_add_ss_rz(<4 x float> %a0, <4 x float> %a1, <4 x f
define <4 x float> @test_mask_add_ss_current(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_add_ss_current:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vmovaps %xmm2, %xmm0
@@ -1899,7 +1899,7 @@ define <4 x float> @test_mask_add_ss_current(<4 x float> %a0, <4 x float> %a1, <
define <4 x float> @test_maskz_add_ss_rn(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_maskz_add_ss_rn:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddss {rn-sae}, %xmm1, %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1909,7 +1909,7 @@ define <4 x float> @test_maskz_add_ss_rn(<4 x float> %a0, <4 x float> %a1, i8 %m
define <4 x float> @test_add_ss_rn(<4 x float> %a0, <4 x float> %a1) {
; CHECK-LABEL: test_add_ss_rn:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vaddss {rn-sae}, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> zeroinitializer, i8 -1, i32 0)
@@ -1918,7 +1918,7 @@ define <4 x float> @test_add_ss_rn(<4 x float> %a0, <4 x float> %a1) {
define <4 x float> @test_mask_add_ss_current_memfold(<4 x float> %a0, float* %a1, <4 x float> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_add_ss_current_memfold:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vaddss (%rdi), %xmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovaps %xmm1, %xmm0
@@ -1934,7 +1934,7 @@ define <4 x float> @test_mask_add_ss_current_memfold(<4 x float> %a0, float* %a1
define <4 x float> @test_maskz_add_ss_current_memfold(<4 x float> %a0, float* %a1, i8 %mask) {
; CHECK-LABEL: test_maskz_add_ss_current_memfold:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vaddss (%rdi), %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1951,7 +1951,7 @@ declare <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double>, <2 x doubl
define <2 x double> @test_mask_add_sd_rn(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_add_sd_rn:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddsd {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vmovapd %xmm2, %xmm0
@@ -1962,7 +1962,7 @@ define <2 x double> @test_mask_add_sd_rn(<2 x double> %a0, <2 x double> %a1, <2
define <2 x double> @test_mask_add_sd_rd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_add_sd_rd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddsd {rd-sae}, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vmovapd %xmm2, %xmm0
@@ -1973,7 +1973,7 @@ define <2 x double> @test_mask_add_sd_rd(<2 x double> %a0, <2 x double> %a1, <2
define <2 x double> @test_mask_add_sd_ru(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_add_sd_ru:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddsd {ru-sae}, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vmovapd %xmm2, %xmm0
@@ -1984,7 +1984,7 @@ define <2 x double> @test_mask_add_sd_ru(<2 x double> %a0, <2 x double> %a1, <2
define <2 x double> @test_mask_add_sd_rz(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_add_sd_rz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddsd {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vmovapd %xmm2, %xmm0
@@ -1995,7 +1995,7 @@ define <2 x double> @test_mask_add_sd_rz(<2 x double> %a0, <2 x double> %a1, <2
define <2 x double> @test_mask_add_sd_current(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_add_sd_current:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddsd %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vmovapd %xmm2, %xmm0
@@ -2006,7 +2006,7 @@ define <2 x double> @test_mask_add_sd_current(<2 x double> %a0, <2 x double> %a1
define <2 x double> @test_maskz_add_sd_rn(<2 x double> %a0, <2 x double> %a1, i8 %mask) {
; CHECK-LABEL: test_maskz_add_sd_rn:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddsd {rn-sae}, %xmm1, %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -2016,7 +2016,7 @@ define <2 x double> @test_maskz_add_sd_rn(<2 x double> %a0, <2 x double> %a1, i8
define <2 x double> @test_add_sd_rn(<2 x double> %a0, <2 x double> %a1) {
; CHECK-LABEL: test_add_sd_rn:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vaddsd {rn-sae}, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> zeroinitializer, i8 -1, i32 0)
@@ -2025,7 +2025,7 @@ define <2 x double> @test_add_sd_rn(<2 x double> %a0, <2 x double> %a1) {
define <2 x double> @test_mask_add_sd_current_memfold(<2 x double> %a0, double* %a1, <2 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_add_sd_current_memfold:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vaddsd (%rdi), %xmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovapd %xmm1, %xmm0
@@ -2039,7 +2039,7 @@ define <2 x double> @test_mask_add_sd_current_memfold(<2 x double> %a0, double*
define <2 x double> @test_maskz_add_sd_current_memfold(<2 x double> %a0, double* %a1, i8 %mask) {
; CHECK-LABEL: test_maskz_add_sd_current_memfold:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vaddsd (%rdi), %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -2054,7 +2054,7 @@ declare <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>, <4 x float>,
define <4 x float> @test_mask_max_ss_sae(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_max_ss_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmaxss {sae}, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vmovaps %xmm2, %xmm0
@@ -2065,7 +2065,7 @@ define <4 x float> @test_mask_max_ss_sae(<4 x float> %a0, <4 x float> %a1, <4 x
define <4 x float> @test_maskz_max_ss_sae(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_maskz_max_ss_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmaxss {sae}, %xmm1, %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -2075,7 +2075,7 @@ define <4 x float> @test_maskz_max_ss_sae(<4 x float> %a0, <4 x float> %a1, i8 %
define <4 x float> @test_max_ss_sae(<4 x float> %a0, <4 x float> %a1) {
; CHECK-LABEL: test_max_ss_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmaxss {sae}, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> zeroinitializer, i8 -1, i32 8)
@@ -2084,7 +2084,7 @@ define <4 x float> @test_max_ss_sae(<4 x float> %a0, <4 x float> %a1) {
define <4 x float> @test_mask_max_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_max_ss:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmaxss %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vmovaps %xmm2, %xmm0
@@ -2095,7 +2095,7 @@ define <4 x float> @test_mask_max_ss(<4 x float> %a0, <4 x float> %a1, <4 x floa
define <4 x float> @test_maskz_max_ss(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_maskz_max_ss:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmaxss %xmm1, %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -2105,7 +2105,7 @@ define <4 x float> @test_maskz_max_ss(<4 x float> %a0, <4 x float> %a1, i8 %mask
define <4 x float> @test_max_ss(<4 x float> %a0, <4 x float> %a1) {
; CHECK-LABEL: test_max_ss:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmaxss %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> zeroinitializer, i8 -1, i32 4)
@@ -2114,7 +2114,7 @@ define <4 x float> @test_max_ss(<4 x float> %a0, <4 x float> %a1) {
define <4 x float> @test_mask_max_ss_memfold(<4 x float> %a0, float* %a1, <4 x float> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_max_ss_memfold:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vmaxss (%rdi), %xmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovaps %xmm1, %xmm0
@@ -2130,7 +2130,7 @@ define <4 x float> @test_mask_max_ss_memfold(<4 x float> %a0, float* %a1, <4 x f
define <4 x float> @test_maskz_max_ss_memfold(<4 x float> %a0, float* %a1, i8 %mask) {
; CHECK-LABEL: test_maskz_max_ss_memfold:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vmaxss (%rdi), %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -2146,7 +2146,7 @@ declare <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double>, <2 x doubl
define <2 x double> @test_mask_max_sd_sae(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_max_sd_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmaxsd {sae}, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vmovapd %xmm2, %xmm0
@@ -2157,7 +2157,7 @@ define <2 x double> @test_mask_max_sd_sae(<2 x double> %a0, <2 x double> %a1, <2
define <2 x double> @test_maskz_max_sd_sae(<2 x double> %a0, <2 x double> %a1, i8 %mask) {
; CHECK-LABEL: test_maskz_max_sd_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmaxsd {sae}, %xmm1, %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -2167,7 +2167,7 @@ define <2 x double> @test_maskz_max_sd_sae(<2 x double> %a0, <2 x double> %a1, i
define <2 x double> @test_max_sd_sae(<2 x double> %a0, <2 x double> %a1) {
; CHECK-LABEL: test_max_sd_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmaxsd {sae}, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> zeroinitializer, i8 -1, i32 8)
@@ -2176,7 +2176,7 @@ define <2 x double> @test_max_sd_sae(<2 x double> %a0, <2 x double> %a1) {
define <2 x double> @test_mask_max_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_max_sd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmaxsd %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vmovapd %xmm2, %xmm0
@@ -2187,7 +2187,7 @@ define <2 x double> @test_mask_max_sd(<2 x double> %a0, <2 x double> %a1, <2 x d
define <2 x double> @test_maskz_max_sd(<2 x double> %a0, <2 x double> %a1, i8 %mask) {
; CHECK-LABEL: test_maskz_max_sd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmaxsd %xmm1, %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -2197,7 +2197,7 @@ define <2 x double> @test_maskz_max_sd(<2 x double> %a0, <2 x double> %a1, i8 %m
define <2 x double> @test_max_sd(<2 x double> %a0, <2 x double> %a1) {
; CHECK-LABEL: test_max_sd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmaxsd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> zeroinitializer, i8 -1, i32 4)
@@ -2206,7 +2206,7 @@ define <2 x double> @test_max_sd(<2 x double> %a0, <2 x double> %a1) {
define <2 x double> @test_mask_max_sd_memfold(<2 x double> %a0, double* %a1, <2 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_max_sd_memfold:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vmaxsd (%rdi), %xmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovapd %xmm1, %xmm0
@@ -2220,7 +2220,7 @@ define <2 x double> @test_mask_max_sd_memfold(<2 x double> %a0, double* %a1, <2
define <2 x double> @test_maskz_max_sd_memfold(<2 x double> %a0, double* %a1, i8 %mask) {
; CHECK-LABEL: test_maskz_max_sd_memfold:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vmaxsd (%rdi), %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -2233,7 +2233,7 @@ define <2 x double> @test_maskz_max_sd_memfold(<2 x double> %a0, double* %a1, i8
define <2 x double> @test_x86_avx512_cvtsi2sd64(<2 x double> %a, i64 %b) {
; CHECK-LABEL: test_x86_avx512_cvtsi2sd64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvtsi2sdq %rdi, {rz-sae}, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x double> @llvm.x86.avx512.cvtsi2sd64(<2 x double> %a, i64 %b, i32 3) ; <<<2 x double>> [#uses=1]
@@ -2243,7 +2243,7 @@ declare <2 x double> @llvm.x86.avx512.cvtsi2sd64(<2 x double>, i64, i32) nounwin
define <4 x float> @test_x86_avx512_cvtsi2ss32(<4 x float> %a, i32 %b) {
; CHECK-LABEL: test_x86_avx512_cvtsi2ss32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvtsi2ssl %edi, {rz-sae}, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.avx512.cvtsi2ss32(<4 x float> %a, i32 %b, i32 3) ; <<<4 x float>> [#uses=1]
@@ -2253,7 +2253,7 @@ declare <4 x float> @llvm.x86.avx512.cvtsi2ss32(<4 x float>, i32, i32) nounwind
define <4 x float> @test_x86_avx512_cvtsi2ss64(<4 x float> %a, i64 %b) {
; CHECK-LABEL: test_x86_avx512_cvtsi2ss64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvtsi2ssq %rdi, {rz-sae}, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.avx512.cvtsi2ss64(<4 x float> %a, i64 %b, i32 3) ; <<<4 x float>> [#uses=1]
@@ -2263,7 +2263,7 @@ declare <4 x float> @llvm.x86.avx512.cvtsi2ss64(<4 x float>, i64, i32) nounwind
define <4 x float> @test_x86_avx512__mm_cvt_roundu32_ss (<4 x float> %a, i32 %b)
; CHECK-LABEL: test_x86_avx512__mm_cvt_roundu32_ss:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvtusi2ssl %edi, {rd-sae}, %xmm0, %xmm0
; CHECK-NEXT: retq
{
@@ -2273,7 +2273,7 @@ define <4 x float> @test_x86_avx512__mm_cvt_roundu32_ss (<4 x float> %a, i32 %b)
define <4 x float> @test_x86_avx512__mm_cvt_roundu32_ss_mem(<4 x float> %a, i32* %ptr)
; CHECK-LABEL: test_x86_avx512__mm_cvt_roundu32_ss_mem:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movl (%rdi), %eax
; CHECK-NEXT: vcvtusi2ssl %eax, {rd-sae}, %xmm0, %xmm0
; CHECK-NEXT: retq
@@ -2285,7 +2285,7 @@ define <4 x float> @test_x86_avx512__mm_cvt_roundu32_ss_mem(<4 x float> %a, i32*
define <4 x float> @test_x86_avx512__mm_cvtu32_ss(<4 x float> %a, i32 %b)
; CHECK-LABEL: test_x86_avx512__mm_cvtu32_ss:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvtusi2ssl %edi, %xmm0, %xmm0
; CHECK-NEXT: retq
{
@@ -2295,7 +2295,7 @@ define <4 x float> @test_x86_avx512__mm_cvtu32_ss(<4 x float> %a, i32 %b)
define <4 x float> @test_x86_avx512__mm_cvtu32_ss_mem(<4 x float> %a, i32* %ptr)
; CHECK-LABEL: test_x86_avx512__mm_cvtu32_ss_mem:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvtusi2ssl (%rdi), %xmm0, %xmm0
; CHECK-NEXT: retq
{
@@ -2307,7 +2307,7 @@ declare <4 x float> @llvm.x86.avx512.cvtusi2ss(<4 x float>, i32, i32) nounwind r
define <4 x float> @_mm_cvt_roundu64_ss (<4 x float> %a, i64 %b)
; CHECK-LABEL: _mm_cvt_roundu64_ss:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvtusi2ssq %rdi, {rd-sae}, %xmm0, %xmm0
; CHECK-NEXT: retq
{
@@ -2317,7 +2317,7 @@ define <4 x float> @_mm_cvt_roundu64_ss (<4 x float> %a, i64 %b)
define <4 x float> @_mm_cvtu64_ss(<4 x float> %a, i64 %b)
; CHECK-LABEL: _mm_cvtu64_ss:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvtusi2ssq %rdi, %xmm0, %xmm0
; CHECK-NEXT: retq
{
@@ -2328,7 +2328,7 @@ declare <4 x float> @llvm.x86.avx512.cvtusi642ss(<4 x float>, i64, i32) nounwind
define <2 x double> @test_x86_avx512_mm_cvtu32_sd(<2 x double> %a, i32 %b)
; CHECK-LABEL: test_x86_avx512_mm_cvtu32_sd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvtusi2sdl %edi, %xmm0, %xmm0
; CHECK-NEXT: retq
{
@@ -2339,7 +2339,7 @@ declare <2 x double> @llvm.x86.avx512.cvtusi2sd(<2 x double>, i32) nounwind read
define <2 x double> @test_x86_avx512_mm_cvtu64_sd(<2 x double> %a, i64 %b)
; CHECK-LABEL: test_x86_avx512_mm_cvtu64_sd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvtusi2sdq %rdi, {rd-sae}, %xmm0, %xmm0
; CHECK-NEXT: retq
{
@@ -2349,7 +2349,7 @@ define <2 x double> @test_x86_avx512_mm_cvtu64_sd(<2 x double> %a, i64 %b)
define <2 x double> @test_x86_avx512__mm_cvt_roundu64_sd(<2 x double> %a, i64 %b)
; CHECK-LABEL: test_x86_avx512__mm_cvt_roundu64_sd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvtusi2sdq %rdi, %xmm0, %xmm0
; CHECK-NEXT: retq
{
@@ -2362,7 +2362,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.vpermi2var.d.512(<16 x i32>, <16 x i32>
define <16 x i32>@test_int_x86_avx512_mask_vpermi2var_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2p, <16 x i32> %x4, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermi2var_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm3
; CHECK-NEXT: vpermi2d (%rdi), %zmm0, %zmm3 {%k1}
@@ -2380,7 +2380,7 @@ declare <8 x double> @llvm.x86.avx512.mask.vpermi2var.pd.512(<8 x double>, <8 x
define <8 x double>@test_int_x86_avx512_mask_vpermi2var_pd_512(<8 x double> %x0, <8 x i64> %x1, <8 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermi2var_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %zmm1, %zmm3
; CHECK-NEXT: vpermi2pd %zmm2, %zmm0, %zmm3
@@ -2397,7 +2397,7 @@ declare <16 x float> @llvm.x86.avx512.mask.vpermi2var.ps.512(<16 x float>, <16 x
define <16 x float>@test_int_x86_avx512_mask_vpermi2var_ps_512(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermi2var_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %zmm1, %zmm3
; CHECK-NEXT: vpermi2ps %zmm2, %zmm0, %zmm3
@@ -2414,7 +2414,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.vpermi2var.q.512(<8 x i64>, <8 x i64>, <
define <8 x i64>@test_int_x86_avx512_mask_vpermi2var_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermi2var_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm3
; CHECK-NEXT: vpermi2q %zmm2, %zmm0, %zmm3
@@ -2431,7 +2431,7 @@ declare <16 x i32> @llvm.x86.avx512.maskz.vpermt2var.d.512(<16 x i32>, <16 x i32
define <16 x i32>@test_int_x86_avx512_maskz_vpermt2var_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2p, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpermt2var_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm2
; CHECK-NEXT: vpermt2d (%rdi), %zmm0, %zmm2 {%k1} {z}
@@ -2449,7 +2449,7 @@ declare <8 x double> @llvm.x86.avx512.maskz.vpermt2var.pd.512(<8 x i64>, <8 x do
define <8 x double>@test_int_x86_avx512_maskz_vpermt2var_pd_512(<8 x i64> %x0, <8 x double> %x1, double* %x2ptr, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpermt2var_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vmovapd %zmm1, %zmm2
; CHECK-NEXT: vpermt2pd (%rdi){1to8}, %zmm0, %zmm2 {%k1} {z}
@@ -2469,7 +2469,7 @@ declare <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32>, <16 x
define <16 x float>@test_int_x86_avx512_maskz_vpermt2var_ps_512(<16 x i32> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpermt2var_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %zmm1, %zmm3
; CHECK-NEXT: vpermt2ps %zmm2, %zmm0, %zmm3
@@ -2487,7 +2487,7 @@ declare <8 x i64> @llvm.x86.avx512.maskz.vpermt2var.q.512(<8 x i64>, <8 x i64>,
define <8 x i64>@test_int_x86_avx512_maskz_vpermt2var_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpermt2var_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm3
; CHECK-NEXT: vpermt2q %zmm2, %zmm0, %zmm3
@@ -2504,7 +2504,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.vpermt2var.d.512(<16 x i32>, <16 x i32>
define <16 x i32>@test_int_x86_avx512_mask_vpermt2var_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermt2var_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm3
; CHECK-NEXT: vpermt2d %zmm2, %zmm0, %zmm3
@@ -2520,7 +2520,7 @@ define <16 x i32>@test_int_x86_avx512_mask_vpermt2var_d_512(<16 x i32> %x0, <16
declare <8 x double> @llvm.x86.avx512.mask.scalef.pd.512(<8 x double>, <8 x double>, <8 x double>, i8, i32)
define <8 x double>@test_int_x86_avx512_mask_scalef_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_scalef_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vscalefpd {rz-sae}, %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vscalefpd {rn-sae}, %zmm1, %zmm0, %zmm0
@@ -2535,7 +2535,7 @@ define <8 x double>@test_int_x86_avx512_mask_scalef_pd_512(<8 x double> %x0, <8
declare <16 x float> @llvm.x86.avx512.mask.scalef.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32)
define <16 x float>@test_int_x86_avx512_mask_scalef_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_scalef_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vscalefps {ru-sae}, %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vscalefps {rn-sae}, %zmm1, %zmm0, %zmm0
@@ -2551,7 +2551,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmov.qb.512(<8 x i64>, <16 x i8>, i8)
define <16 x i8>@test_int_x86_avx512_mask_pmov_qb_512(<8 x i64> %x0, <16 x i8> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_qb_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpmovqb %zmm0, %xmm2 {%k1} {z}
; CHECK-NEXT: vpmovqb %zmm0, %xmm1 {%k1}
@@ -2571,7 +2571,7 @@ declare void @llvm.x86.avx512.mask.pmov.qb.mem.512(i8* %ptr, <8 x i64>, i8)
define void @test_int_x86_avx512_mask_pmov_qb_mem_512(i8* %ptr, <8 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_qb_mem_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpmovqb %zmm0, (%rdi)
; CHECK-NEXT: vpmovqb %zmm0, (%rdi) {%k1}
@@ -2585,7 +2585,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmovs.qb.512(<8 x i64>, <16 x i8>, i8)
define <16 x i8>@test_int_x86_avx512_mask_pmovs_qb_512(<8 x i64> %x0, <16 x i8> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_qb_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpmovsqb %zmm0, %xmm2 {%k1} {z}
; CHECK-NEXT: vpmovsqb %zmm0, %xmm1 {%k1}
@@ -2605,7 +2605,7 @@ declare void @llvm.x86.avx512.mask.pmovs.qb.mem.512(i8* %ptr, <8 x i64>, i8)
define void @test_int_x86_avx512_mask_pmovs_qb_mem_512(i8* %ptr, <8 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_qb_mem_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpmovsqb %zmm0, (%rdi)
; CHECK-NEXT: vpmovsqb %zmm0, (%rdi) {%k1}
@@ -2619,7 +2619,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmovus.qb.512(<8 x i64>, <16 x i8>, i8)
define <16 x i8>@test_int_x86_avx512_mask_pmovus_qb_512(<8 x i64> %x0, <16 x i8> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_qb_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpmovusqb %zmm0, %xmm2 {%k1} {z}
; CHECK-NEXT: vpmovusqb %zmm0, %xmm1 {%k1}
@@ -2639,7 +2639,7 @@ declare void @llvm.x86.avx512.mask.pmovus.qb.mem.512(i8* %ptr, <8 x i64>, i8)
define void @test_int_x86_avx512_mask_pmovus_qb_mem_512(i8* %ptr, <8 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_qb_mem_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpmovusqb %zmm0, (%rdi)
; CHECK-NEXT: vpmovusqb %zmm0, (%rdi) {%k1}
@@ -2653,7 +2653,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.pmov.qw.512(<8 x i64>, <8 x i16>, i8)
define <8 x i16>@test_int_x86_avx512_mask_pmov_qw_512(<8 x i64> %x0, <8 x i16> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_qw_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpmovqw %zmm0, %xmm2 {%k1} {z}
; CHECK-NEXT: vpmovqw %zmm0, %xmm1 {%k1}
@@ -2673,7 +2673,7 @@ declare void @llvm.x86.avx512.mask.pmov.qw.mem.512(i8* %ptr, <8 x i64>, i8)
define void @test_int_x86_avx512_mask_pmov_qw_mem_512(i8* %ptr, <8 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_qw_mem_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpmovqw %zmm0, (%rdi)
; CHECK-NEXT: vpmovqw %zmm0, (%rdi) {%k1}
@@ -2687,7 +2687,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.pmovs.qw.512(<8 x i64>, <8 x i16>, i8)
define <8 x i16>@test_int_x86_avx512_mask_pmovs_qw_512(<8 x i64> %x0, <8 x i16> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_qw_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpmovsqw %zmm0, %xmm2 {%k1} {z}
; CHECK-NEXT: vpmovsqw %zmm0, %xmm1 {%k1}
@@ -2707,7 +2707,7 @@ declare void @llvm.x86.avx512.mask.pmovs.qw.mem.512(i8* %ptr, <8 x i64>, i8)
define void @test_int_x86_avx512_mask_pmovs_qw_mem_512(i8* %ptr, <8 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_qw_mem_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpmovsqw %zmm0, (%rdi)
; CHECK-NEXT: vpmovsqw %zmm0, (%rdi) {%k1}
@@ -2721,7 +2721,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.pmovus.qw.512(<8 x i64>, <8 x i16>, i8)
define <8 x i16>@test_int_x86_avx512_mask_pmovus_qw_512(<8 x i64> %x0, <8 x i16> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_qw_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpmovusqw %zmm0, %xmm2 {%k1} {z}
; CHECK-NEXT: vpmovusqw %zmm0, %xmm1 {%k1}
@@ -2741,7 +2741,7 @@ declare void @llvm.x86.avx512.mask.pmovus.qw.mem.512(i8* %ptr, <8 x i64>, i8)
define void @test_int_x86_avx512_mask_pmovus_qw_mem_512(i8* %ptr, <8 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_qw_mem_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpmovusqw %zmm0, (%rdi)
; CHECK-NEXT: vpmovusqw %zmm0, (%rdi) {%k1}
@@ -2755,7 +2755,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.pmov.qd.512(<8 x i64>, <8 x i32>, i8)
define <8 x i32>@test_int_x86_avx512_mask_pmov_qd_512(<8 x i64> %x0, <8 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_qd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpmovqd %zmm0, %ymm2 {%k1} {z}
; CHECK-NEXT: vpmovqd %zmm0, %ymm1 {%k1}
@@ -2775,7 +2775,7 @@ declare void @llvm.x86.avx512.mask.pmov.qd.mem.512(i8* %ptr, <8 x i64>, i8)
define void @test_int_x86_avx512_mask_pmov_qd_mem_512(i8* %ptr, <8 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_qd_mem_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpmovqd %zmm0, (%rdi)
; CHECK-NEXT: vpmovqd %zmm0, (%rdi) {%k1}
@@ -2789,7 +2789,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.pmovs.qd.512(<8 x i64>, <8 x i32>, i8)
define <8 x i32>@test_int_x86_avx512_mask_pmovs_qd_512(<8 x i64> %x0, <8 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_qd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpmovsqd %zmm0, %ymm2 {%k1} {z}
; CHECK-NEXT: vpmovsqd %zmm0, %ymm1 {%k1}
@@ -2809,7 +2809,7 @@ declare void @llvm.x86.avx512.mask.pmovs.qd.mem.512(i8* %ptr, <8 x i64>, i8)
define void @test_int_x86_avx512_mask_pmovs_qd_mem_512(i8* %ptr, <8 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_qd_mem_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpmovsqd %zmm0, (%rdi)
; CHECK-NEXT: vpmovsqd %zmm0, (%rdi) {%k1}
@@ -2823,7 +2823,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.pmovus.qd.512(<8 x i64>, <8 x i32>, i8)
define <8 x i32>@test_int_x86_avx512_mask_pmovus_qd_512(<8 x i64> %x0, <8 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_qd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpmovusqd %zmm0, %ymm2 {%k1} {z}
; CHECK-NEXT: vpmovusqd %zmm0, %ymm1 {%k1}
@@ -2843,7 +2843,7 @@ declare void @llvm.x86.avx512.mask.pmovus.qd.mem.512(i8* %ptr, <8 x i64>, i8)
define void @test_int_x86_avx512_mask_pmovus_qd_mem_512(i8* %ptr, <8 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_qd_mem_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpmovusqd %zmm0, (%rdi)
; CHECK-NEXT: vpmovusqd %zmm0, (%rdi) {%k1}
@@ -2857,7 +2857,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmov.db.512(<16 x i32>, <16 x i8>, i16)
define <16 x i8>@test_int_x86_avx512_mask_pmov_db_512(<16 x i32> %x0, <16 x i8> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_db_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpmovdb %zmm0, %xmm2 {%k1} {z}
; CHECK-NEXT: vpmovdb %zmm0, %xmm1 {%k1}
@@ -2877,7 +2877,7 @@ declare void @llvm.x86.avx512.mask.pmov.db.mem.512(i8* %ptr, <16 x i32>, i16)
define void @test_int_x86_avx512_mask_pmov_db_mem_512(i8* %ptr, <16 x i32> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_db_mem_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpmovdb %zmm0, (%rdi)
; CHECK-NEXT: vpmovdb %zmm0, (%rdi) {%k1}
@@ -2891,7 +2891,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmovs.db.512(<16 x i32>, <16 x i8>, i16)
define <16 x i8>@test_int_x86_avx512_mask_pmovs_db_512(<16 x i32> %x0, <16 x i8> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_db_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpmovsdb %zmm0, %xmm2 {%k1} {z}
; CHECK-NEXT: vpmovsdb %zmm0, %xmm1 {%k1}
@@ -2911,7 +2911,7 @@ declare void @llvm.x86.avx512.mask.pmovs.db.mem.512(i8* %ptr, <16 x i32>, i16)
define void @test_int_x86_avx512_mask_pmovs_db_mem_512(i8* %ptr, <16 x i32> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_db_mem_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpmovsdb %zmm0, (%rdi)
; CHECK-NEXT: vpmovsdb %zmm0, (%rdi) {%k1}
@@ -2925,7 +2925,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmovus.db.512(<16 x i32>, <16 x i8>, i16
define <16 x i8>@test_int_x86_avx512_mask_pmovus_db_512(<16 x i32> %x0, <16 x i8> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_db_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpmovusdb %zmm0, %xmm2 {%k1} {z}
; CHECK-NEXT: vpmovusdb %zmm0, %xmm1 {%k1}
@@ -2945,7 +2945,7 @@ declare void @llvm.x86.avx512.mask.pmovus.db.mem.512(i8* %ptr, <16 x i32>, i16)
define void @test_int_x86_avx512_mask_pmovus_db_mem_512(i8* %ptr, <16 x i32> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_db_mem_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpmovusdb %zmm0, (%rdi)
; CHECK-NEXT: vpmovusdb %zmm0, (%rdi) {%k1}
@@ -2959,7 +2959,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.pmov.dw.512(<16 x i32>, <16 x i16>, i16
define <16 x i16>@test_int_x86_avx512_mask_pmov_dw_512(<16 x i32> %x0, <16 x i16> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_dw_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpmovdw %zmm0, %ymm2 {%k1} {z}
; CHECK-NEXT: vpmovdw %zmm0, %ymm1 {%k1}
@@ -2979,7 +2979,7 @@ declare void @llvm.x86.avx512.mask.pmov.dw.mem.512(i8* %ptr, <16 x i32>, i16)
define void @test_int_x86_avx512_mask_pmov_dw_mem_512(i8* %ptr, <16 x i32> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_dw_mem_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpmovdw %zmm0, (%rdi)
; CHECK-NEXT: vpmovdw %zmm0, (%rdi) {%k1}
@@ -2993,7 +2993,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.pmovs.dw.512(<16 x i32>, <16 x i16>, i1
define <16 x i16>@test_int_x86_avx512_mask_pmovs_dw_512(<16 x i32> %x0, <16 x i16> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_dw_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpmovsdw %zmm0, %ymm2 {%k1} {z}
; CHECK-NEXT: vpmovsdw %zmm0, %ymm1 {%k1}
@@ -3013,7 +3013,7 @@ declare void @llvm.x86.avx512.mask.pmovs.dw.mem.512(i8* %ptr, <16 x i32>, i16)
define void @test_int_x86_avx512_mask_pmovs_dw_mem_512(i8* %ptr, <16 x i32> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_dw_mem_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpmovsdw %zmm0, (%rdi)
; CHECK-NEXT: vpmovsdw %zmm0, (%rdi) {%k1}
@@ -3027,7 +3027,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.pmovus.dw.512(<16 x i32>, <16 x i16>, i
define <16 x i16>@test_int_x86_avx512_mask_pmovus_dw_512(<16 x i32> %x0, <16 x i16> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_dw_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpmovusdw %zmm0, %ymm2 {%k1} {z}
; CHECK-NEXT: vpmovusdw %zmm0, %ymm1 {%k1}
@@ -3047,7 +3047,7 @@ declare void @llvm.x86.avx512.mask.pmovus.dw.mem.512(i8* %ptr, <16 x i32>, i16)
define void @test_int_x86_avx512_mask_pmovus_dw_mem_512(i8* %ptr, <16 x i32> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_dw_mem_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpmovusdw %zmm0, (%rdi)
; CHECK-NEXT: vpmovusdw %zmm0, (%rdi) {%k1}
@@ -3061,7 +3061,7 @@ declare <16 x float> @llvm.x86.avx512.mask.cvtdq2ps.512(<16 x i32>, <16 x float>
define <16 x float>@test_int_x86_avx512_mask_cvt_dq2ps_512(<16 x i32> %x0, <16 x float> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_dq2ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvtdq2ps %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vcvtdq2ps {rn-sae}, %zmm0, %zmm0
@@ -3077,7 +3077,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.cvtpd2dq.512(<8 x double>, <8 x i32>, i8
define <8 x i32>@test_int_x86_avx512_mask_cvt_pd2dq_512(<8 x double> %x0, <8 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_pd2dq_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvtpd2dq %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vcvtpd2dq {rn-sae}, %zmm0, %ymm0
@@ -3093,7 +3093,7 @@ declare <8 x float> @llvm.x86.avx512.mask.cvtpd2ps.512(<8 x double>, <8 x float>
define <8 x float>@test_int_x86_avx512_mask_cvt_pd2ps_512(<8 x double> %x0, <8 x float> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_pd2ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvtpd2ps %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vcvtpd2ps {ru-sae}, %zmm0, %ymm0
@@ -3109,7 +3109,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.cvtpd2udq.512(<8 x double>, <8 x i32>, i
define <8 x i32>@test_int_x86_avx512_mask_cvt_pd2udq_512(<8 x double> %x0, <8 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_pd2udq_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvtpd2udq {ru-sae}, %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vcvtpd2udq {rn-sae}, %zmm0, %ymm0
@@ -3125,7 +3125,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.cvtps2dq.512(<16 x float>, <16 x i32>,
define <16 x i32>@test_int_x86_avx512_mask_cvt_ps2dq_512(<16 x float> %x0, <16 x i32> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_ps2dq_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvtps2dq {ru-sae}, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vcvtps2dq {rn-sae}, %zmm0, %zmm0
@@ -3141,7 +3141,7 @@ declare <8 x double> @llvm.x86.avx512.mask.cvtps2pd.512(<8 x float>, <8 x double
define <8 x double>@test_int_x86_avx512_mask_cvt_ps2pd_512(<8 x float> %x0, <8 x double> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_ps2pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvtps2pd %ymm0, %zmm1 {%k1}
; CHECK-NEXT: vcvtps2pd {sae}, %ymm0, %zmm0
@@ -3157,7 +3157,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.cvtps2udq.512(<16 x float>, <16 x i32>,
define <16 x i32>@test_int_x86_avx512_mask_cvt_ps2udq_512(<16 x float> %x0, <16 x i32> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_ps2udq_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvtps2udq {ru-sae}, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vcvtps2udq {rn-sae}, %zmm0, %zmm0
@@ -3173,7 +3173,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.cvttpd2dq.512(<8 x double>, <8 x i32>, i
define <8 x i32>@test_int_x86_avx512_mask_cvtt_pd2dq_512(<8 x double> %x0, <8 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_pd2dq_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvttpd2dq %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vcvttpd2dq {sae}, %zmm0, %ymm0
@@ -3189,7 +3189,7 @@ declare <16 x float> @llvm.x86.avx512.mask.cvtudq2ps.512(<16 x i32>, <16 x float
define <16 x float>@test_int_x86_avx512_mask_cvt_udq2ps_512(<16 x i32> %x0, <16 x float> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_udq2ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvtudq2ps %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vcvtudq2ps {rn-sae}, %zmm0, %zmm0
@@ -3205,7 +3205,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.cvttpd2udq.512(<8 x double>, <8 x i32>,
define <8 x i32>@test_int_x86_avx512_mask_cvtt_pd2udq_512(<8 x double> %x0, <8 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_pd2udq_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvttpd2udq %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vcvttpd2udq {sae}, %zmm0, %ymm0
@@ -3221,7 +3221,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.cvttps2dq.512(<16 x float>, <16 x i32>,
define <16 x i32>@test_int_x86_avx512_mask_cvtt_ps2dq_512(<16 x float> %x0, <16 x i32> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_ps2dq_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvttps2dq %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vcvttps2dq {sae}, %zmm0, %zmm0
@@ -3237,7 +3237,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.cvttps2udq.512(<16 x float>, <16 x i32>
define <16 x i32>@test_int_x86_avx512_mask_cvtt_ps2udq_512(<16 x float> %x0, <16 x i32> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_ps2udq_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvttps2udq %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vcvttps2udq {sae}, %zmm0, %zmm0
@@ -3253,7 +3253,7 @@ declare <4 x float> @llvm.x86.avx512.mask.getexp.ss(<4 x float>, <4 x float>, <4
define <4 x float> @test_getexp_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
; CHECK-LABEL: test_getexp_ss:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %xmm2, %xmm3
; CHECK-NEXT: vgetexpss %xmm1, %xmm0, %xmm3 {%k1}
@@ -3279,7 +3279,7 @@ declare <2 x double> @llvm.x86.avx512.mask.getexp.sd(<2 x double>, <2 x double>,
define <2 x double> @test_getexp_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_getexp_sd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vgetexpsd %xmm1, %xmm0, %xmm3
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %xmm2, %xmm4
@@ -3305,7 +3305,7 @@ declare i8 @llvm.x86.avx512.mask.cmp.sd(<2 x double>, <2 x double>, i32, i8, i32
define i8@test_int_x86_avx512_mask_cmp_sd(<2 x double> %x0, <2 x double> %x1, i8 %x3, i32 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_cmp_sd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcmpnltsd {sae}, %xmm1, %xmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
@@ -3318,7 +3318,7 @@ define i8@test_int_x86_avx512_mask_cmp_sd(<2 x double> %x0, <2 x double> %x1, i8
define i8@test_int_x86_avx512_mask_cmp_sd_all(<2 x double> %x0, <2 x double> %x1, i8 %x3, i32 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_cmp_sd_all:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcmplesd %xmm1, %xmm0, %k0
; CHECK-NEXT: kmovw %k0, %ecx
; CHECK-NEXT: vcmpunordsd {sae}, %xmm1, %xmm0, %k0
@@ -3349,7 +3349,7 @@ declare i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float>, <4 x float>, i32, i8, i32)
define i8@test_int_x86_avx512_mask_cmp_ss(<4 x float> %x0, <4 x float> %x1, i8 %x3, i32 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_cmp_ss:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcmpunordss %xmm1, %xmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
@@ -3363,7 +3363,7 @@ define i8@test_int_x86_avx512_mask_cmp_ss(<4 x float> %x0, <4 x float> %x1, i8 %
define i8@test_int_x86_avx512_mask_cmp_ss_all(<4 x float> %x0, <4 x float> %x1, i8 %x3, i32 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_cmp_ss_all:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcmpless %xmm1, %xmm0, %k0
; CHECK-NEXT: kmovw %k0, %ecx
; CHECK-NEXT: vcmpunordss {sae}, %xmm1, %xmm0, %k0
@@ -3393,7 +3393,7 @@ declare <8 x double> @llvm.x86.avx512.mask.getmant.pd.512(<8 x double>, i32, <8
define <8 x double>@test_int_x86_avx512_mask_getmant_pd_512(<8 x double> %x0, <8 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_getmant_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vgetmantpd $11, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vgetmantpd $11, {sae}, %zmm0, %zmm0
@@ -3409,7 +3409,7 @@ declare <16 x float> @llvm.x86.avx512.mask.getmant.ps.512(<16 x float>, i32, <16
define <16 x float>@test_int_x86_avx512_mask_getmant_ps_512(<16 x float> %x0, <16 x float> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_getmant_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vgetmantps $11, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vgetmantps $11, {sae}, %zmm0, %zmm0
@@ -3425,7 +3425,7 @@ declare <2 x double> @llvm.x86.avx512.mask.getmant.sd(<2 x double>, <2 x double>
define <2 x double>@test_int_x86_avx512_mask_getmant_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_getmant_sd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vgetmantsd $11, %xmm1, %xmm0, %xmm3
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %xmm2, %xmm4
@@ -3450,7 +3450,7 @@ declare <4 x float> @llvm.x86.avx512.mask.getmant.ss(<4 x float>, <4 x float>, i
define <4 x float>@test_int_x86_avx512_mask_getmant_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_getmant_ss:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vgetmantss $11, %xmm1, %xmm0, %xmm3
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vgetmantss $11, %xmm1, %xmm0, %xmm2 {%k1}
@@ -3474,7 +3474,7 @@ declare <8 x double> @llvm.x86.avx512.vpermilvar.pd.512(<8 x double>, <8 x i64>)
define <8 x double>@test_int_x86_avx512_vpermilvar_pd_512(<8 x double> %x0, <8 x i64> %x1) {
; CHECK-LABEL: test_int_x86_avx512_vpermilvar_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpermilpd %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.vpermilvar.pd.512(<8 x double> %x0, <8 x i64> %x1)
@@ -3483,7 +3483,7 @@ define <8 x double>@test_int_x86_avx512_vpermilvar_pd_512(<8 x double> %x0, <8 x
define <8 x double>@test_int_x86_avx512_vpermilvar_pd_512_mask(<8 x double> %x0, <8 x i64> %x1, <8 x double> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_vpermilvar_pd_512_mask:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpermilpd %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovapd %zmm2, %zmm0
@@ -3496,7 +3496,7 @@ define <8 x double>@test_int_x86_avx512_vpermilvar_pd_512_mask(<8 x double> %x0,
define <8 x double>@test_int_x86_avx512_vpermilvar_pd_512_maskz(<8 x double> %x0, <8 x i64> %x1, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_vpermilvar_pd_512_maskz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpermilpd %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -3510,7 +3510,7 @@ declare <16 x float> @llvm.x86.avx512.vpermilvar.ps.512(<16 x float>, <16 x i32>
define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512(<16 x float> %x0, <16 x i32> %x1) {
; CHECK-LABEL: test_int_x86_avx512_vpermilvar_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpermilps %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.vpermilvar.ps.512(<16 x float> %x0, <16 x i32> %x1)
@@ -3519,7 +3519,7 @@ define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512(<16 x float> %x0, <16
define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512_mask(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_vpermilvar_ps_512_mask:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpermilps %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovaps %zmm2, %zmm0
@@ -3532,7 +3532,7 @@ define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512_mask(<16 x float> %x0,
define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512_maskz(<16 x float> %x0, <16 x i32> %x1, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_vpermilvar_ps_512_maskz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpermilps %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -3545,7 +3545,7 @@ define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512_maskz(<16 x float> %x0
; Test case to make sure we can print shuffle decode comments for constant pool loads.
define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512_constant_pool(<16 x float> %x0, <16 x i32> %x1) {
; CHECK-LABEL: test_int_x86_avx512_vpermilvar_ps_512_constant_pool:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpermilps {{.*#+}} zmm0 = zmm0[1,0,3,2,4,5,6,7,10,11,8,9,14,15,13,12]
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.vpermilvar.ps.512(<16 x float> %x0, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 1, i32 0>)
@@ -3554,7 +3554,7 @@ define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512_constant_pool(<16 x fl
define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512_constant_pool_mask(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_vpermilvar_ps_512_constant_pool_mask:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpermilps {{.*#+}} zmm2 {%k1} = zmm0[1,0,3,2,4,5,6,7,10,11,8,9,14,15,13,12]
; CHECK-NEXT: vmovaps %zmm2, %zmm0
@@ -3567,7 +3567,7 @@ define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512_constant_pool_mask(<16
define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512_constant_pool_maskz(<16 x float> %x0, <16 x i32> %x1, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_vpermilvar_ps_512_constant_pool_maskz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpermilps {{.*#+}} zmm0 {%k1} {z} = zmm0[1,0,3,2,4,5,6,7,10,11,8,9,14,15,13,12]
; CHECK-NEXT: retq
@@ -3581,7 +3581,7 @@ declare <2 x double> @llvm.x86.avx512.mask.cvtss2sd.round(<2 x double>, <4 x flo
define <2 x double>@test_int_x86_avx512_mask_cvt_ss2sd_round(<2 x double> %x0,<4 x float> %x1, <2 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_ss2sd_round:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvtss2sd %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vcvtss2sd {sae}, %xmm1, %xmm0, %xmm0
@@ -3597,7 +3597,7 @@ declare <4 x float> @llvm.x86.avx512.mask.cvtsd2ss.round(<4 x float>, <2 x doubl
define <4 x float>@test_int_x86_avx512_mask_cvt_sd2ss_round(<4 x float> %x0,<2 x double> %x1, <4 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_sd2ss_round:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvtsd2ss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vcvtsd2ss {rn-sae}, %xmm1, %xmm0, %xmm0
@@ -3613,7 +3613,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32>, <16 x i32>,
define <16 x i32>@test_int_x86_avx512_mask_pternlog_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_pternlog_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovdqa64 %zmm0, %zmm3
; CHECK-NEXT: vpternlogd $33, %zmm2, %zmm1, %zmm3
@@ -3630,7 +3630,7 @@ declare <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32>, <16 x i32>,
define <16 x i32>@test_int_x86_avx512_maskz_pternlog_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x4) {
; CHECK-LABEL: test_int_x86_avx512_maskz_pternlog_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovdqa64 %zmm0, %zmm3
; CHECK-NEXT: vpternlogd $33, %zmm2, %zmm1, %zmm3
@@ -3647,7 +3647,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.pternlog.q.512(<8 x i64>, <8 x i64>, <8
define <8 x i64>@test_int_x86_avx512_mask_pternlog_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_pternlog_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovdqa64 %zmm0, %zmm3
; CHECK-NEXT: vpternlogq $33, %zmm2, %zmm1, %zmm3
@@ -3664,7 +3664,7 @@ declare <8 x i64> @llvm.x86.avx512.maskz.pternlog.q.512(<8 x i64>, <8 x i64>, <8
define <8 x i64>@test_int_x86_avx512_maskz_pternlog_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_maskz_pternlog_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovdqa64 %zmm0, %zmm3
; CHECK-NEXT: vpternlogq $33, %zmm2, %zmm1, %zmm3
@@ -3679,7 +3679,7 @@ define <8 x i64>@test_int_x86_avx512_maskz_pternlog_q_512(<8 x i64> %x0, <8 x i6
define i32 @test_x86_avx512_comi_sd_eq_sae(<2 x double> %a0, <2 x double> %a1) {
; CHECK-LABEL: test_x86_avx512_comi_sd_eq_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcmpeqsd {sae}, %xmm1, %xmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: retq
@@ -3689,7 +3689,7 @@ define i32 @test_x86_avx512_comi_sd_eq_sae(<2 x double> %a0, <2 x double> %a1) {
define i32 @test_x86_avx512_ucomi_sd_eq_sae(<2 x double> %a0, <2 x double> %a1) {
; CHECK-LABEL: test_x86_avx512_ucomi_sd_eq_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcmpeq_uqsd {sae}, %xmm1, %xmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: retq
@@ -3699,7 +3699,7 @@ define i32 @test_x86_avx512_ucomi_sd_eq_sae(<2 x double> %a0, <2 x double> %a1)
define i32 @test_x86_avx512_comi_sd_eq(<2 x double> %a0, <2 x double> %a1) {
; CHECK-LABEL: test_x86_avx512_comi_sd_eq:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcmpeqsd %xmm1, %xmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: retq
@@ -3709,7 +3709,7 @@ define i32 @test_x86_avx512_comi_sd_eq(<2 x double> %a0, <2 x double> %a1) {
define i32 @test_x86_avx512_ucomi_sd_eq(<2 x double> %a0, <2 x double> %a1) {
; CHECK-LABEL: test_x86_avx512_ucomi_sd_eq:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcmpeq_uqsd %xmm1, %xmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: retq
@@ -3719,7 +3719,7 @@ define i32 @test_x86_avx512_ucomi_sd_eq(<2 x double> %a0, <2 x double> %a1) {
define i32 @test_x86_avx512_comi_sd_lt_sae(<2 x double> %a0, <2 x double> %a1) {
; CHECK-LABEL: test_x86_avx512_comi_sd_lt_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcmpltsd {sae}, %xmm1, %xmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: retq
@@ -3729,7 +3729,7 @@ define i32 @test_x86_avx512_comi_sd_lt_sae(<2 x double> %a0, <2 x double> %a1) {
define i32 @test_x86_avx512_ucomi_sd_lt_sae(<2 x double> %a0, <2 x double> %a1) {
; CHECK-LABEL: test_x86_avx512_ucomi_sd_lt_sae:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcmpngesd {sae}, %xmm1, %xmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: retq
@@ -3739,7 +3739,7 @@ define i32 @test_x86_avx512_ucomi_sd_lt_sae(<2 x double> %a0, <2 x double> %a1)
define i32 @test_x86_avx512_comi_sd_lt(<2 x double> %a0, <2 x double> %a1) {
; CHECK-LABEL: test_x86_avx512_comi_sd_lt:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcmpltsd %xmm1, %xmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: retq
@@ -3749,7 +3749,7 @@ define i32 @test_x86_avx512_comi_sd_lt(<2 x double> %a0, <2 x double> %a1) {
define i32 @test_x86_avx512_ucomi_sd_lt(<2 x double> %a0, <2 x double> %a1) {
; CHECK-LABEL: test_x86_avx512_ucomi_sd_lt:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcmpngesd %xmm1, %xmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: retq
@@ -3761,7 +3761,7 @@ declare i32 @llvm.x86.avx512.vcomi.sd(<2 x double>, <2 x double>, i32, i32)
define i32 @test_x86_avx512_ucomi_ss_lt(<4 x float> %a0, <4 x float> %a1) {
; CHECK-LABEL: test_x86_avx512_ucomi_ss_lt:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcmpngess %xmm1, %xmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: retq
@@ -3775,7 +3775,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.prorv.d.512(<16 x i32>, <16 x i32>, <16
define <16 x i32>@test_int_x86_avx512_mask_prorv_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_prorv_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vprorvd %zmm1, %zmm0, %zmm3
; CHECK-NEXT: vprorvd %zmm1, %zmm0, %zmm2 {%k1}
@@ -3795,7 +3795,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.prorv.q.512(<8 x i64>, <8 x i64>, <8 x i
define <8 x i64>@test_int_x86_avx512_mask_prorv_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_prorv_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vprorvq %zmm1, %zmm0, %zmm3
; CHECK-NEXT: vprorvq %zmm1, %zmm0, %zmm2 {%k1}
@@ -3815,7 +3815,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.prol.d.512(<16 x i32>, i32, <16 x i32>,
define <16 x i32>@test_int_x86_avx512_mask_prol_d_512(<16 x i32> %x0, i32 %x1, <16 x i32> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_prol_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vprold $3, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vprold $3, %zmm0, %zmm2 {%k1} {z}
@@ -3835,7 +3835,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.prol.q.512(<8 x i64>, i32, <8 x i64>, i8
define <8 x i64>@test_int_x86_avx512_mask_prol_q_512(<8 x i64> %x0, i32 %x1, <8 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_prol_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vprolq $3, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vprolq $3, %zmm0, %zmm2 {%k1} {z}
@@ -3855,7 +3855,7 @@ declare <8 x double> @llvm.x86.avx512.mask.permvar.df.512(<8 x double>, <8 x i64
define <8 x double>@test_int_x86_avx512_mask_permvar_df_512(<8 x double> %x0, <8 x i64> %x1, <8 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_permvar_df_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpermpd %zmm0, %zmm1, %zmm3
; CHECK-NEXT: vpermpd %zmm0, %zmm1, %zmm2 {%k1}
@@ -3875,7 +3875,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64>, <8 x i64>, <8
define <8 x i64>@test_int_x86_avx512_mask_permvar_di_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_permvar_di_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpermq %zmm0, %zmm1, %zmm3
; CHECK-NEXT: vpermq %zmm0, %zmm1, %zmm2 {%k1}
@@ -3895,7 +3895,7 @@ declare <16 x float> @llvm.x86.avx512.mask.permvar.sf.512(<16 x float>, <16 x i3
define <16 x float>@test_int_x86_avx512_mask_permvar_sf_512(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_permvar_sf_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpermps %zmm0, %zmm1, %zmm3
; CHECK-NEXT: vpermps %zmm0, %zmm1, %zmm2 {%k1}
@@ -3915,7 +3915,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.permvar.si.512(<16 x i32>, <16 x i32>,
define <16 x i32>@test_int_x86_avx512_mask_permvar_si_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_permvar_si_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpermd %zmm0, %zmm1, %zmm3
; CHECK-NEXT: vpermd %zmm0, %zmm1, %zmm2 {%k1}
@@ -3935,7 +3935,7 @@ declare <8 x double> @llvm.x86.avx512.mask.fixupimm.pd.512(<8 x double>, <8 x do
define <8 x double>@test_int_x86_avx512_mask_fixupimm_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x i64> %x2, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_fixupimm_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %zmm0, %zmm3
; CHECK-NEXT: vfixupimmpd $4, %zmm2, %zmm1, %zmm3 {%k1}
@@ -3957,7 +3957,7 @@ declare <8 x double> @llvm.x86.avx512.maskz.fixupimm.pd.512(<8 x double>, <8 x d
define <8 x double>@test_int_x86_avx512_maskz_fixupimm_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x i64> %x2, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_maskz_fixupimm_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %zmm0, %zmm3
; CHECK-NEXT: vfixupimmpd $3, %zmm2, %zmm1, %zmm3 {%k1} {z}
@@ -3980,7 +3980,7 @@ declare <4 x float> @llvm.x86.avx512.mask.fixupimm.ss(<4 x float>, <4 x float>,
define <4 x float>@test_int_x86_avx512_mask_fixupimm_ss(<4 x float> %x0, <4 x float> %x1, <4 x i32> %x2, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_fixupimm_ss:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %xmm0, %xmm3
; CHECK-NEXT: vfixupimmss $5, %xmm2, %xmm1, %xmm3 {%k1}
@@ -4003,7 +4003,7 @@ declare <4 x float> @llvm.x86.avx512.maskz.fixupimm.ss(<4 x float>, <4 x float>,
define <4 x float>@test_int_x86_avx512_maskz_fixupimm_ss(<4 x float> %x0, <4 x float> %x1, <4 x i32> %x2, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_maskz_fixupimm_ss:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovaps %xmm0, %xmm3
; CHECK-NEXT: vfixupimmss $5, %xmm2, %xmm1, %xmm3
; CHECK-NEXT: kmovw %edi, %k1
@@ -4026,7 +4026,7 @@ declare <16 x float> @llvm.x86.avx512.mask.fixupimm.ps.512(<16 x float>, <16 x f
define <16 x float>@test_int_x86_avx512_mask_fixupimm_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x i32> %x2, i16 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_fixupimm_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %zmm0, %zmm3
; CHECK-NEXT: vfixupimmps $5, %zmm2, %zmm1, %zmm3 {%k1}
@@ -4049,7 +4049,7 @@ declare <16 x float> @llvm.x86.avx512.maskz.fixupimm.ps.512(<16 x float>, <16 x
define <16 x float>@test_int_x86_avx512_maskz_fixupimm_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x i32> %x2, i16 %x4) {
; CHECK-LABEL: test_int_x86_avx512_maskz_fixupimm_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %zmm0, %zmm3
; CHECK-NEXT: vfixupimmps $5, %zmm2, %zmm1, %zmm3
@@ -4072,7 +4072,7 @@ declare <2 x double> @llvm.x86.avx512.mask.fixupimm.sd(<2 x double>, <2 x double
define <2 x double>@test_int_x86_avx512_mask_fixupimm_sd(<2 x double> %x0, <2 x double> %x1, <2 x i64> %x2, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_fixupimm_sd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovapd %xmm0, %xmm3
; CHECK-NEXT: vfixupimmsd $5, %xmm2, %xmm1, %xmm3
; CHECK-NEXT: kmovw %edi, %k1
@@ -4095,7 +4095,7 @@ declare <2 x double> @llvm.x86.avx512.maskz.fixupimm.sd(<2 x double>, <2 x doubl
define <2 x double>@test_int_x86_avx512_maskz_fixupimm_sd(<2 x double> %x0, <2 x double> %x1, <2 x i64> %x2, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_maskz_fixupimm_sd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %xmm0, %xmm3
; CHECK-NEXT: vfixupimmsd $5, %xmm2, %xmm1, %xmm3 {%k1} {z}
@@ -4118,7 +4118,7 @@ declare <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double>, <2 x double>,
define <2 x double>@test_int_x86_avx512_mask_vfmadd_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3,i32 %x4 ){
; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_sd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovapd %xmm0, %xmm3
; CHECK-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm3
; CHECK-NEXT: kmovw %edi, %k1
@@ -4145,7 +4145,7 @@ declare <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float>, <4 x float>, <4
define <4 x float>@test_int_x86_avx512_mask_vfmadd_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3,i32 %x4 ){
; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_ss:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovaps %xmm0, %xmm3
; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm3
; CHECK-NEXT: kmovw %edi, %k1
@@ -4172,7 +4172,7 @@ declare <2 x double> @llvm.x86.avx512.maskz.vfmadd.sd(<2 x double>, <2 x double>
define <2 x double>@test_int_x86_avx512_maskz_vfmadd_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3,i32 %x4 ){
; CHECK-LABEL: test_int_x86_avx512_maskz_vfmadd_sd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %xmm0, %xmm3
; CHECK-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm3 {%k1} {z}
@@ -4189,7 +4189,7 @@ declare <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float>, <4 x float>, <
define <4 x float>@test_int_x86_avx512_maskz_vfmadd_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3,i32 %x4 ){
; CHECK-LABEL: test_int_x86_avx512_maskz_vfmadd_ss:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -4202,7 +4202,7 @@ declare <2 x double> @llvm.x86.avx512.mask3.vfmadd.sd(<2 x double>, <2 x double>
define <2 x double>@test_int_x86_avx512_mask3_vfmadd_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3,i32 %x4 ){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_sd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovapd %xmm2, %xmm3
; CHECK-NEXT: vfmadd231sd %xmm1, %xmm0, %xmm3
; CHECK-NEXT: kmovw %edi, %k1
@@ -4229,7 +4229,7 @@ declare <4 x float> @llvm.x86.avx512.mask3.vfmadd.ss(<4 x float>, <4 x float>, <
define <4 x float>@test_int_x86_avx512_mask3_vfmadd_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3,i32 %x4 ){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_ss:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovaps %xmm2, %xmm3
; CHECK-NEXT: vfmadd231ss %xmm1, %xmm0, %xmm3
; CHECK-NEXT: kmovw %edi, %k1
@@ -4254,7 +4254,7 @@ define <4 x float>@test_int_x86_avx512_mask3_vfmadd_ss(<4 x float> %x0, <4 x flo
define void @fmadd_ss_mask_memfold(float* %a, float* %b, i8 %c) {
; CHECK-LABEL: fmadd_ss_mask_memfold:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: kmovw %edx, %k1
; CHECK-NEXT: vfmadd132ss (%rsi), %xmm0, %xmm0 {%k1}
@@ -4281,7 +4281,7 @@ define void @fmadd_ss_mask_memfold(float* %a, float* %b, i8 %c) {
define void @fmadd_ss_maskz_memfold(float* %a, float* %b, i8 %c) {
; CHECK-LABEL: fmadd_ss_maskz_memfold:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: kmovw %edx, %k1
; CHECK-NEXT: vfmadd132ss (%rsi), %xmm0, %xmm0 {%k1} {z}
@@ -4308,7 +4308,7 @@ define void @fmadd_ss_maskz_memfold(float* %a, float* %b, i8 %c) {
define void @fmadd_sd_mask_memfold(double* %a, double* %b, i8 %c) {
; CHECK-LABEL: fmadd_sd_mask_memfold:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: kmovw %edx, %k1
; CHECK-NEXT: vfmadd132sd (%rsi), %xmm0, %xmm0 {%k1}
@@ -4331,7 +4331,7 @@ define void @fmadd_sd_mask_memfold(double* %a, double* %b, i8 %c) {
define void @fmadd_sd_maskz_memfold(double* %a, double* %b, i8 %c) {
; CHECK-LABEL: fmadd_sd_maskz_memfold:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: kmovw %edx, %k1
; CHECK-NEXT: vfmadd132sd (%rsi), %xmm0, %xmm0 {%k1} {z}
@@ -4356,7 +4356,7 @@ declare <2 x double> @llvm.x86.avx512.mask3.vfmsub.sd(<2 x double>, <2 x double>
define <2 x double>@test_int_x86_avx512_mask3_vfmsub_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3,i32 %x4 ){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsub_sd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovapd %xmm2, %xmm3
; CHECK-NEXT: vfmsub231sd %xmm1, %xmm0, %xmm3
; CHECK-NEXT: kmovw %edi, %k1
@@ -4383,7 +4383,7 @@ declare <4 x float> @llvm.x86.avx512.mask3.vfmsub.ss(<4 x float>, <4 x float>, <
define <4 x float>@test_int_x86_avx512_mask3_vfmsub_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3,i32 %x4 ){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsub_ss:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovaps %xmm2, %xmm3
; CHECK-NEXT: vfmsub231ss %xmm1, %xmm0, %xmm3
; CHECK-NEXT: kmovw %edi, %k1
@@ -4410,7 +4410,7 @@ declare <2 x double> @llvm.x86.avx512.mask3.vfnmsub.sd(<2 x double>, <2 x double
define <2 x double>@test_int_x86_avx512_mask3_vfnmsub_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3,i32 %x4 ){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfnmsub_sd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovapd %xmm2, %xmm3
; CHECK-NEXT: vfnmsub231sd %xmm1, %xmm0, %xmm3
; CHECK-NEXT: kmovw %edi, %k1
@@ -4437,7 +4437,7 @@ declare <4 x float> @llvm.x86.avx512.mask3.vfnmsub.ss(<4 x float>, <4 x float>,
define <4 x float>@test_int_x86_avx512_mask3_vfnmsub_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3,i32 %x4 ){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfnmsub_ss:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovaps %xmm2, %xmm3
; CHECK-NEXT: vfnmsub231ss %xmm1, %xmm0, %xmm3
; CHECK-NEXT: kmovw %edi, %k1
@@ -4462,7 +4462,7 @@ define <4 x float>@test_int_x86_avx512_mask3_vfnmsub_ss(<4 x float> %x0, <4 x fl
define <4 x float>@test_int_x86_avx512_mask3_vfmadd_ss_rm(<4 x float> %x0, <4 x float> %x1, float *%ptr_b ,i8 %x3,i32 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_ss_rm:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vfmadd231ss (%rdi), %xmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovaps %xmm1, %xmm0
@@ -4475,7 +4475,7 @@ define <4 x float>@test_int_x86_avx512_mask3_vfmadd_ss_rm(<4 x float> %x0, <4 x
define <4 x float>@test_int_x86_avx512_mask_vfmadd_ss_rm(<4 x float> %x0, <4 x float> %x1,float *%ptr_b ,i8 %x3,i32 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_ss_rm:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vfmadd132ss (%rdi), %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
@@ -4488,7 +4488,7 @@ define <4 x float>@test_int_x86_avx512_mask_vfmadd_ss_rm(<4 x float> %x0, <4 x f
define <4 x float>@test_int_x86_avx512_maskz_vfmadd_ss_rm(<4 x float> %x0, <4 x float> %x1,float *%ptr_b ,i8 %x3,i32 %x4) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vfmadd_ss_rm:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: kmovw %eax, %k1
; CHECK-NEXT: vfmadd213ss (%rdi), %xmm1, %xmm0 {%k1} {z}
@@ -4501,7 +4501,7 @@ define <4 x float>@test_int_x86_avx512_maskz_vfmadd_ss_rm(<4 x float> %x0, <4 x
define <16 x i32> @test_x86_avx512_psll_d_512(<16 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_x86_avx512_psll_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpslld %xmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.psll.d.512(<16 x i32> %a0, <4 x i32> %a1) ; <<16 x i32>> [#uses=1]
@@ -4509,7 +4509,7 @@ define <16 x i32> @test_x86_avx512_psll_d_512(<16 x i32> %a0, <4 x i32> %a1) {
}
define <16 x i32> @test_x86_avx512_mask_psll_d_512(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> %passthru, i16 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_psll_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpslld %xmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -4521,7 +4521,7 @@ define <16 x i32> @test_x86_avx512_mask_psll_d_512(<16 x i32> %a0, <4 x i32> %a1
}
define <16 x i32> @test_x86_avx512_maskz_psll_d_512(<16 x i32> %a0, <4 x i32> %a1, i16 %mask) {
; CHECK-LABEL: test_x86_avx512_maskz_psll_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpslld %xmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -4535,7 +4535,7 @@ declare <16 x i32> @llvm.x86.avx512.psll.d.512(<16 x i32>, <4 x i32>) nounwind r
define <8 x i64> @test_x86_avx512_psll_q_512(<8 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_x86_avx512_psll_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsllq %xmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.psll.q.512(<8 x i64> %a0, <2 x i64> %a1) ; <<8 x i64>> [#uses=1]
@@ -4543,7 +4543,7 @@ define <8 x i64> @test_x86_avx512_psll_q_512(<8 x i64> %a0, <2 x i64> %a1) {
}
define <8 x i64> @test_x86_avx512_mask_psll_q_512(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_psll_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsllq %xmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -4555,7 +4555,7 @@ define <8 x i64> @test_x86_avx512_mask_psll_q_512(<8 x i64> %a0, <2 x i64> %a1,
}
define <8 x i64> @test_x86_avx512_maskz_psll_q_512(<8 x i64> %a0, <2 x i64> %a1, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_maskz_psll_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsllq %xmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -4569,7 +4569,7 @@ declare <8 x i64> @llvm.x86.avx512.psll.q.512(<8 x i64>, <2 x i64>) nounwind rea
define <16 x i32> @test_x86_avx512_pslli_d_512(<16 x i32> %a0) {
; CHECK-LABEL: test_x86_avx512_pslli_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpslld $7, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.pslli.d.512(<16 x i32> %a0, i32 7) ; <<16 x i32>> [#uses=1]
@@ -4577,7 +4577,7 @@ define <16 x i32> @test_x86_avx512_pslli_d_512(<16 x i32> %a0) {
}
define <16 x i32> @test_x86_avx512_mask_pslli_d_512(<16 x i32> %a0, <16 x i32> %passthru, i16 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_pslli_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpslld $7, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -4589,7 +4589,7 @@ define <16 x i32> @test_x86_avx512_mask_pslli_d_512(<16 x i32> %a0, <16 x i32> %
}
define <16 x i32> @test_x86_avx512_maskz_pslli_d_512(<16 x i32> %a0, i16 %mask) {
; CHECK-LABEL: test_x86_avx512_maskz_pslli_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpslld $7, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -4603,7 +4603,7 @@ declare <16 x i32> @llvm.x86.avx512.pslli.d.512(<16 x i32>, i32) nounwind readno
define <8 x i64> @test_x86_avx512_pslli_q_512(<8 x i64> %a0) {
; CHECK-LABEL: test_x86_avx512_pslli_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsllq $7, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.pslli.q.512(<8 x i64> %a0, i32 7) ; <<8 x i64>> [#uses=1]
@@ -4611,7 +4611,7 @@ define <8 x i64> @test_x86_avx512_pslli_q_512(<8 x i64> %a0) {
}
define <8 x i64> @test_x86_avx512_mask_pslli_q_512(<8 x i64> %a0, <8 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_pslli_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsllq $7, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -4623,7 +4623,7 @@ define <8 x i64> @test_x86_avx512_mask_pslli_q_512(<8 x i64> %a0, <8 x i64> %pas
}
define <8 x i64> @test_x86_avx512_maskz_pslli_q_512(<8 x i64> %a0, <8 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_maskz_pslli_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsllq $7, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -4637,7 +4637,7 @@ declare <8 x i64> @llvm.x86.avx512.pslli.q.512(<8 x i64>, i32) nounwind readnone
define <8 x i64> @test_x86_avx512_psra_q_512(<8 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_x86_avx512_psra_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsraq %xmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.psra.q.512(<8 x i64> %a0, <2 x i64> %a1) ; <<8 x i64>> [#uses=1]
@@ -4645,7 +4645,7 @@ define <8 x i64> @test_x86_avx512_psra_q_512(<8 x i64> %a0, <2 x i64> %a1) {
}
define <8 x i64> @test_x86_avx512_mask_psra_q_512(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_psra_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsraq %xmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -4657,7 +4657,7 @@ define <8 x i64> @test_x86_avx512_mask_psra_q_512(<8 x i64> %a0, <2 x i64> %a1,
}
define <8 x i64> @test_x86_avx512_maskz_psra_q_512(<8 x i64> %a0, <2 x i64> %a1, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_maskz_psra_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsraq %xmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -4671,7 +4671,7 @@ declare <8 x i64> @llvm.x86.avx512.psra.q.512(<8 x i64>, <2 x i64>) nounwind rea
define <16 x i32> @test_x86_avx512_psra_d_512(<16 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_x86_avx512_psra_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsrad %xmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.psra.d.512(<16 x i32> %a0, <4 x i32> %a1) ; <<16 x i32>> [#uses=1]
@@ -4679,7 +4679,7 @@ define <16 x i32> @test_x86_avx512_psra_d_512(<16 x i32> %a0, <4 x i32> %a1) {
}
define <16 x i32> @test_x86_avx512_mask_psra_d_512(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> %passthru, i16 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_psra_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsrad %xmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -4691,7 +4691,7 @@ define <16 x i32> @test_x86_avx512_mask_psra_d_512(<16 x i32> %a0, <4 x i32> %a1
}
define <16 x i32> @test_x86_avx512_maskz_psra_d_512(<16 x i32> %a0, <4 x i32> %a1, i16 %mask) {
; CHECK-LABEL: test_x86_avx512_maskz_psra_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsrad %xmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -4706,7 +4706,7 @@ declare <16 x i32> @llvm.x86.avx512.psra.d.512(<16 x i32>, <4 x i32>) nounwind r
define <8 x i64> @test_x86_avx512_psrai_q_512(<8 x i64> %a0) {
; CHECK-LABEL: test_x86_avx512_psrai_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsraq $7, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.psrai.q.512(<8 x i64> %a0, i32 7) ; <<8 x i64>> [#uses=1]
@@ -4714,7 +4714,7 @@ define <8 x i64> @test_x86_avx512_psrai_q_512(<8 x i64> %a0) {
}
define <8 x i64> @test_x86_avx512_mask_psrai_q_512(<8 x i64> %a0, <8 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_psrai_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsraq $7, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -4726,7 +4726,7 @@ define <8 x i64> @test_x86_avx512_mask_psrai_q_512(<8 x i64> %a0, <8 x i64> %pas
}
define <8 x i64> @test_x86_avx512_maskz_psrai_q_512(<8 x i64> %a0, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_maskz_psrai_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsraq $7, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -4740,7 +4740,7 @@ declare <8 x i64> @llvm.x86.avx512.psrai.q.512(<8 x i64>, i32) nounwind readnone
define <16 x i32> @test_x86_avx512_psrai_d_512(<16 x i32> %a0) {
; CHECK-LABEL: test_x86_avx512_psrai_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsrad $7, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.psrai.d.512(<16 x i32> %a0, i32 7) ; <<16 x i32>> [#uses=1]
@@ -4748,7 +4748,7 @@ define <16 x i32> @test_x86_avx512_psrai_d_512(<16 x i32> %a0) {
}
define <16 x i32> @test_x86_avx512_mask_psrai_d_512(<16 x i32> %a0, <16 x i32> %passthru, i16 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_psrai_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsrad $7, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -4760,7 +4760,7 @@ define <16 x i32> @test_x86_avx512_mask_psrai_d_512(<16 x i32> %a0, <16 x i32> %
}
define <16 x i32> @test_x86_avx512_maskz_psrai_d_512(<16 x i32> %a0, i16 %mask) {
; CHECK-LABEL: test_x86_avx512_maskz_psrai_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsrad $7, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -4775,7 +4775,7 @@ declare <16 x i32> @llvm.x86.avx512.psrai.d.512(<16 x i32>, i32) nounwind readno
define <16 x i32> @test_x86_avx512_psrl_d_512(<16 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_x86_avx512_psrl_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsrld %xmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.psrl.d.512(<16 x i32> %a0, <4 x i32> %a1) ; <<16 x i32>> [#uses=1]
@@ -4783,7 +4783,7 @@ define <16 x i32> @test_x86_avx512_psrl_d_512(<16 x i32> %a0, <4 x i32> %a1) {
}
define <16 x i32> @test_x86_avx512_mask_psrl_d_512(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> %passthru, i16 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_psrl_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsrld %xmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -4795,7 +4795,7 @@ define <16 x i32> @test_x86_avx512_mask_psrl_d_512(<16 x i32> %a0, <4 x i32> %a1
}
define <16 x i32> @test_x86_avx512_maskz_psrl_d_512(<16 x i32> %a0, <4 x i32> %a1, i16 %mask) {
; CHECK-LABEL: test_x86_avx512_maskz_psrl_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsrld %xmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -4809,7 +4809,7 @@ declare <16 x i32> @llvm.x86.avx512.psrl.d.512(<16 x i32>, <4 x i32>) nounwind r
define <8 x i64> @test_x86_avx512_psrl_q_512(<8 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_x86_avx512_psrl_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsrlq %xmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.psrl.q.512(<8 x i64> %a0, <2 x i64> %a1) ; <<8 x i64>> [#uses=1]
@@ -4817,7 +4817,7 @@ define <8 x i64> @test_x86_avx512_psrl_q_512(<8 x i64> %a0, <2 x i64> %a1) {
}
define <8 x i64> @test_x86_avx512_mask_psrl_q_512(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_psrl_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsrlq %xmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -4829,7 +4829,7 @@ define <8 x i64> @test_x86_avx512_mask_psrl_q_512(<8 x i64> %a0, <2 x i64> %a1,
}
define <8 x i64> @test_x86_avx512_maskz_psrl_q_512(<8 x i64> %a0, <2 x i64> %a1, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_maskz_psrl_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsrlq %xmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -4843,7 +4843,7 @@ declare <8 x i64> @llvm.x86.avx512.psrl.q.512(<8 x i64>, <2 x i64>) nounwind rea
define <16 x i32> @test_x86_avx512_psrli_d_512(<16 x i32> %a0) {
; CHECK-LABEL: test_x86_avx512_psrli_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsrld $7, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.psrli.d.512(<16 x i32> %a0, i32 7) ; <<16 x i32>> [#uses=1]
@@ -4851,7 +4851,7 @@ define <16 x i32> @test_x86_avx512_psrli_d_512(<16 x i32> %a0) {
}
define <16 x i32> @test_x86_avx512_mask_psrli_d_512(<16 x i32> %a0, <16 x i32> %passthru, i16 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_psrli_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsrld $7, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -4863,7 +4863,7 @@ define <16 x i32> @test_x86_avx512_mask_psrli_d_512(<16 x i32> %a0, <16 x i32> %
}
define <16 x i32> @test_x86_avx512_maskz_psrli_d_512(<16 x i32> %a0, i16 %mask) {
; CHECK-LABEL: test_x86_avx512_maskz_psrli_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsrld $7, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -4877,7 +4877,7 @@ declare <16 x i32> @llvm.x86.avx512.psrli.d.512(<16 x i32>, i32) nounwind readno
define <8 x i64> @test_x86_avx512_psrli_q_512(<8 x i64> %a0) {
; CHECK-LABEL: test_x86_avx512_psrli_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsrlq $7, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.psrli.q.512(<8 x i64> %a0, i32 7) ; <<8 x i64>> [#uses=1]
@@ -4885,7 +4885,7 @@ define <8 x i64> @test_x86_avx512_psrli_q_512(<8 x i64> %a0) {
}
define <8 x i64> @test_x86_avx512_mask_psrli_q_512(<8 x i64> %a0, <8 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_psrli_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsrlq $7, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -4897,7 +4897,7 @@ define <8 x i64> @test_x86_avx512_mask_psrli_q_512(<8 x i64> %a0, <8 x i64> %pas
}
define <8 x i64> @test_x86_avx512_maskz_psrli_q_512(<8 x i64> %a0, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_maskz_psrli_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsrlq $7, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -4910,7 +4910,7 @@ declare <8 x i64> @llvm.x86.avx512.psrli.q.512(<8 x i64>, i32) nounwind readnone
define <16 x i32> @test_x86_avx512_psllv_d_512(<16 x i32> %a0, <16 x i32> %a1) {
; CHECK-LABEL: test_x86_avx512_psllv_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsllvd %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.psllv.d.512(<16 x i32> %a0, <16 x i32> %a1)
@@ -4919,7 +4919,7 @@ define <16 x i32> @test_x86_avx512_psllv_d_512(<16 x i32> %a0, <16 x i32> %a1) {
define <16 x i32> @test_x86_avx512_mask_psllv_d_512(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2, i16 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_psllv_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsllvd %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -4932,7 +4932,7 @@ define <16 x i32> @test_x86_avx512_mask_psllv_d_512(<16 x i32> %a0, <16 x i32> %
define <16 x i32> @test_x86_avx512_maskz_psllv_d_512(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
; CHECK-LABEL: test_x86_avx512_maskz_psllv_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsllvd %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -4946,7 +4946,7 @@ declare <16 x i32> @llvm.x86.avx512.psllv.d.512(<16 x i32>, <16 x i32>) nounwind
define <8 x i64> @test_x86_avx512_psllv_q_512(<8 x i64> %a0, <8 x i64> %a1) {
; CHECK-LABEL: test_x86_avx512_psllv_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsllvq %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.psllv.q.512(<8 x i64> %a0, <8 x i64> %a1)
@@ -4955,7 +4955,7 @@ define <8 x i64> @test_x86_avx512_psllv_q_512(<8 x i64> %a0, <8 x i64> %a1) {
define <8 x i64> @test_x86_avx512_mask_psllv_q_512(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> %a2, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_psllv_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsllvq %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -4968,7 +4968,7 @@ define <8 x i64> @test_x86_avx512_mask_psllv_q_512(<8 x i64> %a0, <8 x i64> %a1,
define <8 x i64> @test_x86_avx512_maskz_psllv_q_512(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_maskz_psllv_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsllvq %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -4982,7 +4982,7 @@ declare <8 x i64> @llvm.x86.avx512.psllv.q.512(<8 x i64>, <8 x i64>) nounwind re
define <16 x i32> @test_x86_avx512_psrav_d_512(<16 x i32> %a0, <16 x i32> %a1) {
; CHECK-LABEL: test_x86_avx512_psrav_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsravd %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.psrav.d.512(<16 x i32> %a0, <16 x i32> %a1)
@@ -4991,7 +4991,7 @@ define <16 x i32> @test_x86_avx512_psrav_d_512(<16 x i32> %a0, <16 x i32> %a1) {
define <16 x i32> @test_x86_avx512_mask_psrav_d_512(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2, i16 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_psrav_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsravd %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -5004,7 +5004,7 @@ define <16 x i32> @test_x86_avx512_mask_psrav_d_512(<16 x i32> %a0, <16 x i32> %
define <16 x i32> @test_x86_avx512_maskz_psrav_d_512(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
; CHECK-LABEL: test_x86_avx512_maskz_psrav_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsravd %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -5018,7 +5018,7 @@ declare <16 x i32> @llvm.x86.avx512.psrav.d.512(<16 x i32>, <16 x i32>) nounwind
define <8 x i64> @test_x86_avx512_psrav_q_512(<8 x i64> %a0, <8 x i64> %a1) {
; CHECK-LABEL: test_x86_avx512_psrav_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsravq %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.psrav.q.512(<8 x i64> %a0, <8 x i64> %a1)
@@ -5027,7 +5027,7 @@ define <8 x i64> @test_x86_avx512_psrav_q_512(<8 x i64> %a0, <8 x i64> %a1) {
define <8 x i64> @test_x86_avx512_mask_psrav_q_512(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> %a2, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_psrav_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsravq %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -5040,7 +5040,7 @@ define <8 x i64> @test_x86_avx512_mask_psrav_q_512(<8 x i64> %a0, <8 x i64> %a1,
define <8 x i64> @test_x86_avx512_maskz_psrav_q_512(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_maskz_psrav_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsravq %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -5054,7 +5054,7 @@ declare <8 x i64> @llvm.x86.avx512.psrav.q.512(<8 x i64>, <8 x i64>) nounwind re
define <16 x i32> @test_x86_avx512_psrlv_d_512(<16 x i32> %a0, <16 x i32> %a1) {
; CHECK-LABEL: test_x86_avx512_psrlv_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsrlvd %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.psrlv.d.512(<16 x i32> %a0, <16 x i32> %a1)
@@ -5063,7 +5063,7 @@ define <16 x i32> @test_x86_avx512_psrlv_d_512(<16 x i32> %a0, <16 x i32> %a1) {
define <16 x i32> @test_x86_avx512_mask_psrlv_d_512(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2, i16 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_psrlv_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsrlvd %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -5076,7 +5076,7 @@ define <16 x i32> @test_x86_avx512_mask_psrlv_d_512(<16 x i32> %a0, <16 x i32> %
define <16 x i32> @test_x86_avx512_maskz_psrlv_d_512(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
; CHECK-LABEL: test_x86_avx512_maskz_psrlv_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsrlvd %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -5090,7 +5090,7 @@ declare <16 x i32> @llvm.x86.avx512.psrlv.d.512(<16 x i32>, <16 x i32>) nounwind
define <8 x i64> @test_x86_avx512_psrlv_q_512(<8 x i64> %a0, <8 x i64> %a1) {
; CHECK-LABEL: test_x86_avx512_psrlv_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsrlvq %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.psrlv.q.512(<8 x i64> %a0, <8 x i64> %a1)
@@ -5099,7 +5099,7 @@ define <8 x i64> @test_x86_avx512_psrlv_q_512(<8 x i64> %a0, <8 x i64> %a1) {
define <8 x i64> @test_x86_avx512_mask_psrlv_q_512(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> %a2, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_psrlv_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsrlvq %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -5112,7 +5112,7 @@ define <8 x i64> @test_x86_avx512_mask_psrlv_q_512(<8 x i64> %a0, <8 x i64> %a1,
define <8 x i64> @test_x86_avx512_maskz_psrlv_q_512(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_maskz_psrlv_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpsrlvq %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/avx512-load-store.ll b/test/CodeGen/X86/avx512-load-store.ll
index e755e96792e..8589215f4a1 100644
--- a/test/CodeGen/X86/avx512-load-store.ll
+++ b/test/CodeGen/X86/avx512-load-store.ll
@@ -4,13 +4,13 @@
define <4 x float> @test_mm_mask_move_ss(<4 x float> %__W, i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) local_unnamed_addr #0 {
; CHECK64-LABEL: test_mm_mask_move_ss:
-; CHECK64: # BB#0: # %entry
+; CHECK64: # %bb.0: # %entry
; CHECK64-NEXT: kmovw %edi, %k1
; CHECK64-NEXT: vmovss %xmm2, %xmm1, %xmm0 {%k1}
; CHECK64-NEXT: retq
;
; CHECK32-LABEL: test_mm_mask_move_ss:
-; CHECK32: # BB#0: # %entry
+; CHECK32: # %bb.0: # %entry
; CHECK32-NEXT: movb {{[0-9]+}}(%esp), %al
; CHECK32-NEXT: kmovw %eax, %k1
; CHECK32-NEXT: vmovss %xmm2, %xmm0, %xmm0 {%k1}
@@ -28,13 +28,13 @@ entry:
define <4 x float> @test_mm_maskz_move_ss(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) local_unnamed_addr #0 {
; CHECK64-LABEL: test_mm_maskz_move_ss:
-; CHECK64: # BB#0: # %entry
+; CHECK64: # %bb.0: # %entry
; CHECK64-NEXT: kmovw %edi, %k1
; CHECK64-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} {z}
; CHECK64-NEXT: retq
;
; CHECK32-LABEL: test_mm_maskz_move_ss:
-; CHECK32: # BB#0: # %entry
+; CHECK32: # %bb.0: # %entry
; CHECK32-NEXT: movb {{[0-9]+}}(%esp), %al
; CHECK32-NEXT: kmovw %eax, %k1
; CHECK32-NEXT: vxorps %xmm2, %xmm2, %xmm2
@@ -52,13 +52,13 @@ entry:
define <2 x double> @test_mm_mask_move_sd(<2 x double> %__W, i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) local_unnamed_addr #0 {
; CHECK64-LABEL: test_mm_mask_move_sd:
-; CHECK64: # BB#0: # %entry
+; CHECK64: # %bb.0: # %entry
; CHECK64-NEXT: kmovw %edi, %k1
; CHECK64-NEXT: vmovsd %xmm2, %xmm1, %xmm0 {%k1}
; CHECK64-NEXT: retq
;
; CHECK32-LABEL: test_mm_mask_move_sd:
-; CHECK32: # BB#0: # %entry
+; CHECK32: # %bb.0: # %entry
; CHECK32-NEXT: movb {{[0-9]+}}(%esp), %al
; CHECK32-NEXT: kmovw %eax, %k1
; CHECK32-NEXT: vmovsd %xmm2, %xmm0, %xmm0 {%k1}
@@ -76,13 +76,13 @@ entry:
define <2 x double> @test_mm_maskz_move_sd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) local_unnamed_addr #0 {
; CHECK64-LABEL: test_mm_maskz_move_sd:
-; CHECK64: # BB#0: # %entry
+; CHECK64: # %bb.0: # %entry
; CHECK64-NEXT: kmovw %edi, %k1
; CHECK64-NEXT: vmovsd %xmm1, %xmm0, %xmm0 {%k1} {z}
; CHECK64-NEXT: retq
;
; CHECK32-LABEL: test_mm_maskz_move_sd:
-; CHECK32: # BB#0: # %entry
+; CHECK32: # %bb.0: # %entry
; CHECK32-NEXT: movb {{[0-9]+}}(%esp), %al
; CHECK32-NEXT: kmovw %eax, %k1
; CHECK32-NEXT: vxorpd %xmm2, %xmm2, %xmm2
@@ -100,13 +100,13 @@ entry:
define void @test_mm_mask_store_ss(float* %__W, i8 zeroext %__U, <4 x float> %__A) local_unnamed_addr #1 {
; CHECK64-LABEL: test_mm_mask_store_ss:
-; CHECK64: # BB#0: # %entry
+; CHECK64: # %bb.0: # %entry
; CHECK64-NEXT: kmovw %esi, %k1
; CHECK64-NEXT: vmovss %xmm0, (%rdi) {%k1}
; CHECK64-NEXT: retq
;
; CHECK32-LABEL: test_mm_mask_store_ss:
-; CHECK32: # BB#0: # %entry
+; CHECK32: # %bb.0: # %entry
; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK32-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
; CHECK32-NEXT: kmovw %ecx, %k1
@@ -124,13 +124,13 @@ entry:
define void @test_mm_mask_store_sd(double* %__W, i8 zeroext %__U, <2 x double> %__A) local_unnamed_addr #1 {
; CHECK64-LABEL: test_mm_mask_store_sd:
-; CHECK64: # BB#0: # %entry
+; CHECK64: # %bb.0: # %entry
; CHECK64-NEXT: kmovw %esi, %k1
; CHECK64-NEXT: vmovsd %xmm0, (%rdi) {%k1}
; CHECK64-NEXT: retq
;
; CHECK32-LABEL: test_mm_mask_store_sd:
-; CHECK32: # BB#0: # %entry
+; CHECK32: # %bb.0: # %entry
; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK32-NEXT: movb {{[0-9]+}}(%esp), %cl
; CHECK32-NEXT: kmovw %ecx, %k1
@@ -147,13 +147,13 @@ entry:
define <4 x float> @test_mm_mask_load_ss(<4 x float> %__A, i8 zeroext %__U, float* %__W) local_unnamed_addr #2 {
; CHECK64-LABEL: test_mm_mask_load_ss:
-; CHECK64: # BB#0: # %entry
+; CHECK64: # %bb.0: # %entry
; CHECK64-NEXT: kmovw %edi, %k1
; CHECK64-NEXT: vmovss (%rsi), %xmm0 {%k1}
; CHECK64-NEXT: retq
;
; CHECK32-LABEL: test_mm_mask_load_ss:
-; CHECK32: # BB#0: # %entry
+; CHECK32: # %bb.0: # %entry
; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK32-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
; CHECK32-NEXT: kmovw %ecx, %k1
@@ -173,13 +173,13 @@ entry:
define <2 x double> @test_mm_mask_load_sd(<2 x double> %__A, i8 zeroext %__U, double* %__W) local_unnamed_addr #2 {
; CHECK64-LABEL: test_mm_mask_load_sd:
-; CHECK64: # BB#0: # %entry
+; CHECK64: # %bb.0: # %entry
; CHECK64-NEXT: kmovw %edi, %k1
; CHECK64-NEXT: vmovsd (%rsi), %xmm0 {%k1}
; CHECK64-NEXT: retq
;
; CHECK32-LABEL: test_mm_mask_load_sd:
-; CHECK32: # BB#0: # %entry
+; CHECK32: # %bb.0: # %entry
; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK32-NEXT: movb {{[0-9]+}}(%esp), %cl
; CHECK32-NEXT: kmovw %ecx, %k1
@@ -198,13 +198,13 @@ entry:
define <4 x float> @test_mm_maskz_load_ss(i8 zeroext %__U, float* %__W) local_unnamed_addr #2 {
; CHECK64-LABEL: test_mm_maskz_load_ss:
-; CHECK64: # BB#0: # %entry
+; CHECK64: # %bb.0: # %entry
; CHECK64-NEXT: kmovw %edi, %k1
; CHECK64-NEXT: vmovss (%rsi), %xmm0 {%k1} {z}
; CHECK64-NEXT: retq
;
; CHECK32-LABEL: test_mm_maskz_load_ss:
-; CHECK32: # BB#0: # %entry
+; CHECK32: # %bb.0: # %entry
; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK32-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
; CHECK32-NEXT: kmovw %ecx, %k1
@@ -222,13 +222,13 @@ entry:
define <2 x double> @test_mm_maskz_load_sd(i8 zeroext %__U, double* %__W) local_unnamed_addr #2 {
; CHECK64-LABEL: test_mm_maskz_load_sd:
-; CHECK64: # BB#0: # %entry
+; CHECK64: # %bb.0: # %entry
; CHECK64-NEXT: kmovw %edi, %k1
; CHECK64-NEXT: vmovsd (%rsi), %xmm0 {%k1} {z}
; CHECK64-NEXT: retq
;
; CHECK32-LABEL: test_mm_maskz_load_sd:
-; CHECK32: # BB#0: # %entry
+; CHECK32: # %bb.0: # %entry
; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK32-NEXT: movb {{[0-9]+}}(%esp), %cl
; CHECK32-NEXT: kmovw %ecx, %k1
diff --git a/test/CodeGen/X86/avx512-logic.ll b/test/CodeGen/X86/avx512-logic.ll
index c96c63dd0a4..bb1e8550ba2 100644
--- a/test/CodeGen/X86/avx512-logic.ll
+++ b/test/CodeGen/X86/avx512-logic.ll
@@ -5,7 +5,7 @@
define <16 x i32> @vpandd(<16 x i32> %a, <16 x i32> %b) nounwind uwtable readnone ssp {
; ALL-LABEL: vpandd:
-; ALL: ## BB#0: ## %entry
+; ALL: ## %bb.0: ## %entry
; ALL-NEXT: vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0
; ALL-NEXT: vpandq %zmm1, %zmm0, %zmm0
; ALL-NEXT: retq
@@ -19,7 +19,7 @@ entry:
define <16 x i32> @vpandnd(<16 x i32> %a, <16 x i32> %b) nounwind uwtable readnone ssp {
; ALL-LABEL: vpandnd:
-; ALL: ## BB#0: ## %entry
+; ALL: ## %bb.0: ## %entry
; ALL-NEXT: vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0
; ALL-NEXT: vpandnq %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
@@ -35,7 +35,7 @@ entry:
define <16 x i32> @vpord(<16 x i32> %a, <16 x i32> %b) nounwind uwtable readnone ssp {
; ALL-LABEL: vpord:
-; ALL: ## BB#0: ## %entry
+; ALL: ## %bb.0: ## %entry
; ALL-NEXT: vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0
; ALL-NEXT: vporq %zmm1, %zmm0, %zmm0
; ALL-NEXT: retq
@@ -49,7 +49,7 @@ entry:
define <16 x i32> @vpxord(<16 x i32> %a, <16 x i32> %b) nounwind uwtable readnone ssp {
; ALL-LABEL: vpxord:
-; ALL: ## BB#0: ## %entry
+; ALL: ## %bb.0: ## %entry
; ALL-NEXT: vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0
; ALL-NEXT: vpxorq %zmm1, %zmm0, %zmm0
; ALL-NEXT: retq
@@ -63,7 +63,7 @@ entry:
define <8 x i64> @vpandq(<8 x i64> %a, <8 x i64> %b) nounwind uwtable readnone ssp {
; ALL-LABEL: vpandq:
-; ALL: ## BB#0: ## %entry
+; ALL: ## %bb.0: ## %entry
; ALL-NEXT: vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; ALL-NEXT: vpandq %zmm1, %zmm0, %zmm0
; ALL-NEXT: retq
@@ -76,7 +76,7 @@ entry:
define <8 x i64> @vpandnq(<8 x i64> %a, <8 x i64> %b) nounwind uwtable readnone ssp {
; ALL-LABEL: vpandnq:
-; ALL: ## BB#0: ## %entry
+; ALL: ## %bb.0: ## %entry
; ALL-NEXT: vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; ALL-NEXT: vpandnq %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
@@ -90,7 +90,7 @@ entry:
define <8 x i64> @vporq(<8 x i64> %a, <8 x i64> %b) nounwind uwtable readnone ssp {
; ALL-LABEL: vporq:
-; ALL: ## BB#0: ## %entry
+; ALL: ## %bb.0: ## %entry
; ALL-NEXT: vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; ALL-NEXT: vporq %zmm1, %zmm0, %zmm0
; ALL-NEXT: retq
@@ -103,7 +103,7 @@ entry:
define <8 x i64> @vpxorq(<8 x i64> %a, <8 x i64> %b) nounwind uwtable readnone ssp {
; ALL-LABEL: vpxorq:
-; ALL: ## BB#0: ## %entry
+; ALL: ## %bb.0: ## %entry
; ALL-NEXT: vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; ALL-NEXT: vpxorq %zmm1, %zmm0, %zmm0
; ALL-NEXT: retq
@@ -117,12 +117,12 @@ entry:
define <8 x i64> @orq_broadcast(<8 x i64> %a) nounwind {
; KNL-LABEL: orq_broadcast:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vporq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: orq_broadcast:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vorpd {{.*}}(%rip){1to8}, %zmm0, %zmm0
; SKX-NEXT: retq
%b = or <8 x i64> %a, <i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2>
@@ -131,12 +131,12 @@ define <8 x i64> @orq_broadcast(<8 x i64> %a) nounwind {
define <16 x i32> @andd512fold(<16 x i32> %y, <16 x i32>* %x) {
; KNL-LABEL: andd512fold:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: vpandq (%rdi), %zmm0, %zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: andd512fold:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vandps (%rdi), %zmm0, %zmm0
; SKX-NEXT: retq
entry:
@@ -147,12 +147,12 @@ entry:
define <8 x i64> @andqbrst(<8 x i64> %p1, i64* %ap) {
; KNL-LABEL: andqbrst:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: vpandq (%rdi){1to8}, %zmm0, %zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: andqbrst:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vandpd (%rdi){1to8}, %zmm0, %zmm0
; SKX-NEXT: retq
entry:
@@ -165,13 +165,13 @@ entry:
define <64 x i8> @and_v64i8(<64 x i8> %a, <64 x i8> %b) {
; KNL-LABEL: and_v64i8:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vandps %ymm2, %ymm0, %ymm0
; KNL-NEXT: vandps %ymm3, %ymm1, %ymm1
; KNL-NEXT: retq
;
; SKX-LABEL: and_v64i8:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vandps %zmm1, %zmm0, %zmm0
; SKX-NEXT: retq
%res = and <64 x i8> %a, %b
@@ -180,13 +180,13 @@ define <64 x i8> @and_v64i8(<64 x i8> %a, <64 x i8> %b) {
define <64 x i8> @andn_v64i8(<64 x i8> %a, <64 x i8> %b) {
; KNL-LABEL: andn_v64i8:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vandnps %ymm0, %ymm2, %ymm0
; KNL-NEXT: vandnps %ymm1, %ymm3, %ymm1
; KNL-NEXT: retq
;
; SKX-LABEL: andn_v64i8:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vandnps %zmm0, %zmm1, %zmm0
; SKX-NEXT: retq
%b2 = xor <64 x i8> %b, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1,
@@ -199,13 +199,13 @@ define <64 x i8> @andn_v64i8(<64 x i8> %a, <64 x i8> %b) {
define <64 x i8> @or_v64i8(<64 x i8> %a, <64 x i8> %b) {
; KNL-LABEL: or_v64i8:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vorps %ymm2, %ymm0, %ymm0
; KNL-NEXT: vorps %ymm3, %ymm1, %ymm1
; KNL-NEXT: retq
;
; SKX-LABEL: or_v64i8:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vorps %zmm1, %zmm0, %zmm0
; SKX-NEXT: retq
%res = or <64 x i8> %a, %b
@@ -214,13 +214,13 @@ define <64 x i8> @or_v64i8(<64 x i8> %a, <64 x i8> %b) {
define <64 x i8> @xor_v64i8(<64 x i8> %a, <64 x i8> %b) {
; KNL-LABEL: xor_v64i8:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vxorps %ymm2, %ymm0, %ymm0
; KNL-NEXT: vxorps %ymm3, %ymm1, %ymm1
; KNL-NEXT: retq
;
; SKX-LABEL: xor_v64i8:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vxorps %zmm1, %zmm0, %zmm0
; SKX-NEXT: retq
%res = xor <64 x i8> %a, %b
@@ -229,13 +229,13 @@ define <64 x i8> @xor_v64i8(<64 x i8> %a, <64 x i8> %b) {
define <32 x i16> @and_v32i16(<32 x i16> %a, <32 x i16> %b) {
; KNL-LABEL: and_v32i16:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vandps %ymm2, %ymm0, %ymm0
; KNL-NEXT: vandps %ymm3, %ymm1, %ymm1
; KNL-NEXT: retq
;
; SKX-LABEL: and_v32i16:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vandps %zmm1, %zmm0, %zmm0
; SKX-NEXT: retq
%res = and <32 x i16> %a, %b
@@ -244,13 +244,13 @@ define <32 x i16> @and_v32i16(<32 x i16> %a, <32 x i16> %b) {
define <32 x i16> @andn_v32i16(<32 x i16> %a, <32 x i16> %b) {
; KNL-LABEL: andn_v32i16:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vandnps %ymm0, %ymm2, %ymm0
; KNL-NEXT: vandnps %ymm1, %ymm3, %ymm1
; KNL-NEXT: retq
;
; SKX-LABEL: andn_v32i16:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vandnps %zmm0, %zmm1, %zmm0
; SKX-NEXT: retq
%b2 = xor <32 x i16> %b, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1,
@@ -261,13 +261,13 @@ define <32 x i16> @andn_v32i16(<32 x i16> %a, <32 x i16> %b) {
define <32 x i16> @or_v32i16(<32 x i16> %a, <32 x i16> %b) {
; KNL-LABEL: or_v32i16:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vorps %ymm2, %ymm0, %ymm0
; KNL-NEXT: vorps %ymm3, %ymm1, %ymm1
; KNL-NEXT: retq
;
; SKX-LABEL: or_v32i16:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vorps %zmm1, %zmm0, %zmm0
; SKX-NEXT: retq
%res = or <32 x i16> %a, %b
@@ -276,13 +276,13 @@ define <32 x i16> @or_v32i16(<32 x i16> %a, <32 x i16> %b) {
define <32 x i16> @xor_v32i16(<32 x i16> %a, <32 x i16> %b) {
; KNL-LABEL: xor_v32i16:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vxorps %ymm2, %ymm0, %ymm0
; KNL-NEXT: vxorps %ymm3, %ymm1, %ymm1
; KNL-NEXT: retq
;
; SKX-LABEL: xor_v32i16:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vxorps %zmm1, %zmm0, %zmm0
; SKX-NEXT: retq
%res = xor <32 x i16> %a, %b
@@ -291,14 +291,14 @@ define <32 x i16> @xor_v32i16(<32 x i16> %a, <32 x i16> %b) {
define <16 x float> @masked_and_v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %passThru, i16 %mask, <16 x float> %c) {
; KNL-LABEL: masked_and_v16f32:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpandd %zmm1, %zmm0, %zmm2 {%k1}
; KNL-NEXT: vaddps %zmm2, %zmm3, %zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: masked_and_v16f32:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandps %zmm1, %zmm0, %zmm2 {%k1}
; SKX-NEXT: vaddps %zmm2, %zmm3, %zmm0
@@ -316,14 +316,14 @@ define <16 x float> @masked_and_v16f32(<16 x float> %a, <16 x float> %b, <16 x f
define <16 x float> @masked_or_v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %passThru, i16 %mask, <16 x float> %c) {
; KNL-LABEL: masked_or_v16f32:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpandd %zmm1, %zmm0, %zmm2 {%k1}
; KNL-NEXT: vaddps %zmm2, %zmm3, %zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: masked_or_v16f32:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandps %zmm1, %zmm0, %zmm2 {%k1}
; SKX-NEXT: vaddps %zmm2, %zmm3, %zmm0
@@ -341,14 +341,14 @@ define <16 x float> @masked_or_v16f32(<16 x float> %a, <16 x float> %b, <16 x fl
define <16 x float> @masked_xor_v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %passThru, i16 %mask, <16 x float> %c) {
; KNL-LABEL: masked_xor_v16f32:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpandd %zmm1, %zmm0, %zmm2 {%k1}
; KNL-NEXT: vaddps %zmm2, %zmm3, %zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: masked_xor_v16f32:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandps %zmm1, %zmm0, %zmm2 {%k1}
; SKX-NEXT: vaddps %zmm2, %zmm3, %zmm0
@@ -366,14 +366,14 @@ define <16 x float> @masked_xor_v16f32(<16 x float> %a, <16 x float> %b, <16 x f
define <8 x double> @masked_and_v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %passThru, i8 %mask, <8 x double> %c) {
; KNL-LABEL: masked_and_v8f64:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpandq %zmm1, %zmm0, %zmm2 {%k1}
; KNL-NEXT: vaddpd %zmm2, %zmm3, %zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: masked_and_v8f64:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandpd %zmm1, %zmm0, %zmm2 {%k1}
; SKX-NEXT: vaddpd %zmm2, %zmm3, %zmm0
@@ -391,14 +391,14 @@ define <8 x double> @masked_and_v8f64(<8 x double> %a, <8 x double> %b, <8 x dou
define <8 x double> @masked_or_v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %passThru, i8 %mask, <8 x double> %c) {
; KNL-LABEL: masked_or_v8f64:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpandq %zmm1, %zmm0, %zmm2 {%k1}
; KNL-NEXT: vaddpd %zmm2, %zmm3, %zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: masked_or_v8f64:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandpd %zmm1, %zmm0, %zmm2 {%k1}
; SKX-NEXT: vaddpd %zmm2, %zmm3, %zmm0
@@ -416,14 +416,14 @@ define <8 x double> @masked_or_v8f64(<8 x double> %a, <8 x double> %b, <8 x doub
define <8 x double> @masked_xor_v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %passThru, i8 %mask, <8 x double> %c) {
; KNL-LABEL: masked_xor_v8f64:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpandq %zmm1, %zmm0, %zmm2 {%k1}
; KNL-NEXT: vaddpd %zmm2, %zmm3, %zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: masked_xor_v8f64:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandpd %zmm1, %zmm0, %zmm2 {%k1}
; SKX-NEXT: vaddpd %zmm2, %zmm3, %zmm0
@@ -441,13 +441,13 @@ define <8 x double> @masked_xor_v8f64(<8 x double> %a, <8 x double> %b, <8 x dou
define <8 x i64> @test_mm512_mask_and_epi32(<8 x i64> %__src, i16 zeroext %__k, <8 x i64> %__a, <8 x i64> %__b) {
; KNL-LABEL: test_mm512_mask_and_epi32:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpandd %zmm2, %zmm1, %zmm0 {%k1}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm512_mask_and_epi32:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandps %zmm2, %zmm1, %zmm0 {%k1}
; SKX-NEXT: retq
@@ -463,13 +463,13 @@ entry:
define <8 x i64> @test_mm512_mask_or_epi32(<8 x i64> %__src, i16 zeroext %__k, <8 x i64> %__a, <8 x i64> %__b) {
; KNL-LABEL: test_mm512_mask_or_epi32:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpord %zmm2, %zmm1, %zmm0 {%k1}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm512_mask_or_epi32:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vorps %zmm2, %zmm1, %zmm0 {%k1}
; SKX-NEXT: retq
@@ -485,13 +485,13 @@ entry:
define <8 x i64> @test_mm512_mask_xor_epi32(<8 x i64> %__src, i16 zeroext %__k, <8 x i64> %__a, <8 x i64> %__b) {
; KNL-LABEL: test_mm512_mask_xor_epi32:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpxord %zmm2, %zmm1, %zmm0 {%k1}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm512_mask_xor_epi32:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vxorps %zmm2, %zmm1, %zmm0 {%k1}
; SKX-NEXT: retq
@@ -507,13 +507,13 @@ entry:
define <8 x double> @test_mm512_mask_xor_pd(<8 x double> %__W, i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B) {
; KNL-LABEL: test_mm512_mask_xor_pd:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpxorq %zmm2, %zmm1, %zmm0 {%k1}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm512_mask_xor_pd:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vxorpd %zmm2, %zmm1, %zmm0 {%k1}
; SKX-NEXT: retq
@@ -529,13 +529,13 @@ entry:
define <8 x double> @test_mm512_maskz_xor_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B) {
; KNL-LABEL: test_mm512_maskz_xor_pd:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpxorq %zmm1, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm512_maskz_xor_pd:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vxorpd %zmm1, %zmm0, %zmm0 {%k1} {z}
; SKX-NEXT: retq
@@ -551,13 +551,13 @@ entry:
define <16 x float> @test_mm512_mask_xor_ps(<16 x float> %__W, i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
; KNL-LABEL: test_mm512_mask_xor_ps:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpxord %zmm2, %zmm1, %zmm0 {%k1}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm512_mask_xor_ps:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vxorps %zmm2, %zmm1, %zmm0 {%k1}
; SKX-NEXT: retq
@@ -573,13 +573,13 @@ entry:
define <16 x float> @test_mm512_maskz_xor_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
; KNL-LABEL: test_mm512_maskz_xor_ps:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpxord %zmm1, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm512_maskz_xor_ps:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vxorps %zmm1, %zmm0, %zmm0 {%k1} {z}
; SKX-NEXT: retq
@@ -595,13 +595,13 @@ entry:
define <8 x double> @test_mm512_mask_or_pd(<8 x double> %__W, i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B) {
; KNL-LABEL: test_mm512_mask_or_pd:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vporq %zmm1, %zmm2, %zmm0 {%k1}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm512_mask_or_pd:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vorpd %zmm1, %zmm2, %zmm0 {%k1}
; SKX-NEXT: retq
@@ -617,13 +617,13 @@ entry:
define <8 x double> @test_mm512_maskz_or_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B) {
; KNL-LABEL: test_mm512_maskz_or_pd:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vporq %zmm0, %zmm1, %zmm0 {%k1} {z}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm512_maskz_or_pd:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vorpd %zmm0, %zmm1, %zmm0 {%k1} {z}
; SKX-NEXT: retq
@@ -639,13 +639,13 @@ entry:
define <16 x float> @test_mm512_mask_or_ps(<16 x float> %__W, i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
; KNL-LABEL: test_mm512_mask_or_ps:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpord %zmm1, %zmm2, %zmm0 {%k1}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm512_mask_or_ps:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vorps %zmm1, %zmm2, %zmm0 {%k1}
; SKX-NEXT: retq
@@ -661,13 +661,13 @@ entry:
define <16 x float> @test_mm512_maskz_or_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
; KNL-LABEL: test_mm512_maskz_or_ps:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpord %zmm0, %zmm1, %zmm0 {%k1} {z}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm512_maskz_or_ps:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vorps %zmm0, %zmm1, %zmm0 {%k1} {z}
; SKX-NEXT: retq
@@ -683,13 +683,13 @@ entry:
define <8 x double> @test_mm512_mask_and_pd(<8 x double> %__W, i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B) {
; KNL-LABEL: test_mm512_mask_and_pd:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpandq %zmm1, %zmm2, %zmm0 {%k1}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm512_mask_and_pd:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandpd %zmm1, %zmm2, %zmm0 {%k1}
; SKX-NEXT: retq
@@ -705,13 +705,13 @@ entry:
define <8 x double> @test_mm512_maskz_and_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B) {
; KNL-LABEL: test_mm512_maskz_and_pd:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpandq %zmm0, %zmm1, %zmm0 {%k1} {z}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm512_maskz_and_pd:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandpd %zmm0, %zmm1, %zmm0 {%k1} {z}
; SKX-NEXT: retq
@@ -727,13 +727,13 @@ entry:
define <16 x float> @test_mm512_mask_and_ps(<16 x float> %__W, i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
; KNL-LABEL: test_mm512_mask_and_ps:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpandd %zmm1, %zmm2, %zmm0 {%k1}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm512_mask_and_ps:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandps %zmm1, %zmm2, %zmm0 {%k1}
; SKX-NEXT: retq
@@ -749,13 +749,13 @@ entry:
define <16 x float> @test_mm512_maskz_and_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
; KNL-LABEL: test_mm512_maskz_and_ps:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpandd %zmm0, %zmm1, %zmm0 {%k1} {z}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm512_maskz_and_ps:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandps %zmm0, %zmm1, %zmm0 {%k1} {z}
; SKX-NEXT: retq
@@ -771,13 +771,13 @@ entry:
define <8 x double> @test_mm512_mask_andnot_pd(<8 x double> %__W, i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B) {
; KNL-LABEL: test_mm512_mask_andnot_pd:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpandnq %zmm2, %zmm1, %zmm0 {%k1}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm512_mask_andnot_pd:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandnpd %zmm2, %zmm1, %zmm0 {%k1}
; SKX-NEXT: retq
@@ -794,13 +794,13 @@ entry:
define <8 x double> @test_mm512_maskz_andnot_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B) {
; KNL-LABEL: test_mm512_maskz_andnot_pd:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpandnq %zmm1, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm512_maskz_andnot_pd:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandnpd %zmm1, %zmm0, %zmm0 {%k1} {z}
; SKX-NEXT: retq
@@ -817,13 +817,13 @@ entry:
define <16 x float> @test_mm512_mask_andnot_ps(<16 x float> %__W, i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
; KNL-LABEL: test_mm512_mask_andnot_ps:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpandnd %zmm2, %zmm1, %zmm0 {%k1}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm512_mask_andnot_ps:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandnps %zmm2, %zmm1, %zmm0 {%k1}
; SKX-NEXT: retq
@@ -840,13 +840,13 @@ entry:
define <16 x float> @test_mm512_maskz_andnot_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
; KNL-LABEL: test_mm512_maskz_andnot_ps:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpandnd %zmm1, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm512_maskz_andnot_ps:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandnps %zmm1, %zmm0, %zmm0 {%k1} {z}
; SKX-NEXT: retq
diff --git a/test/CodeGen/X86/avx512-mask-op.ll b/test/CodeGen/X86/avx512-mask-op.ll
index fe59d4c35c3..ab634d7d8d5 100644
--- a/test/CodeGen/X86/avx512-mask-op.ll
+++ b/test/CodeGen/X86/avx512-mask-op.ll
@@ -7,7 +7,7 @@
define i16 @mask16(i16 %x) {
; KNL-LABEL: mask16:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: kmovw %edi, %k0
; KNL-NEXT: knotw %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
@@ -15,7 +15,7 @@ define i16 @mask16(i16 %x) {
; KNL-NEXT: retq
;
; SKX-LABEL: mask16:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: kmovd %edi, %k0
; SKX-NEXT: knotw %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
@@ -23,7 +23,7 @@ define i16 @mask16(i16 %x) {
; SKX-NEXT: retq
;
; AVX512BW-LABEL: mask16:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k0
; AVX512BW-NEXT: knotw %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
@@ -31,7 +31,7 @@ define i16 @mask16(i16 %x) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: mask16:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: kmovw %edi, %k0
; AVX512DQ-NEXT: knotw %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
@@ -45,28 +45,28 @@ define i16 @mask16(i16 %x) {
define i32 @mask16_zext(i16 %x) {
; KNL-LABEL: mask16_zext:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: kmovw %edi, %k0
; KNL-NEXT: knotw %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
; KNL-NEXT: retq
;
; SKX-LABEL: mask16_zext:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: kmovd %edi, %k0
; SKX-NEXT: knotw %k0, %k0
; SKX-NEXT: kmovw %k0, %eax
; SKX-NEXT: retq
;
; AVX512BW-LABEL: mask16_zext:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k0
; AVX512BW-NEXT: knotw %k0, %k0
; AVX512BW-NEXT: kmovw %k0, %eax
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: mask16_zext:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: kmovw %edi, %k0
; AVX512DQ-NEXT: knotw %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
@@ -80,7 +80,7 @@ define i32 @mask16_zext(i16 %x) {
define i8 @mask8(i8 %x) {
; KNL-LABEL: mask8:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: kmovw %edi, %k0
; KNL-NEXT: knotw %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
@@ -88,7 +88,7 @@ define i8 @mask8(i8 %x) {
; KNL-NEXT: retq
;
; SKX-LABEL: mask8:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: kmovd %edi, %k0
; SKX-NEXT: knotb %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
@@ -96,7 +96,7 @@ define i8 @mask8(i8 %x) {
; SKX-NEXT: retq
;
; AVX512BW-LABEL: mask8:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k0
; AVX512BW-NEXT: knotw %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
@@ -104,7 +104,7 @@ define i8 @mask8(i8 %x) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: mask8:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: kmovw %edi, %k0
; AVX512DQ-NEXT: knotb %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
@@ -118,7 +118,7 @@ define i8 @mask8(i8 %x) {
define i32 @mask8_zext(i8 %x) {
; KNL-LABEL: mask8_zext:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: kmovw %edi, %k0
; KNL-NEXT: knotw %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
@@ -126,14 +126,14 @@ define i32 @mask8_zext(i8 %x) {
; KNL-NEXT: retq
;
; SKX-LABEL: mask8_zext:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: kmovd %edi, %k0
; SKX-NEXT: knotb %k0, %k0
; SKX-NEXT: kmovb %k0, %eax
; SKX-NEXT: retq
;
; AVX512BW-LABEL: mask8_zext:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k0
; AVX512BW-NEXT: knotw %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
@@ -141,7 +141,7 @@ define i32 @mask8_zext(i8 %x) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: mask8_zext:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: kmovw %edi, %k0
; AVX512DQ-NEXT: knotb %k0, %k0
; AVX512DQ-NEXT: kmovb %k0, %eax
@@ -155,7 +155,7 @@ define i32 @mask8_zext(i8 %x) {
define void @mask16_mem(i16* %ptr) {
; CHECK-LABEL: mask16_mem:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw (%rdi), %k0
; CHECK-NEXT: knotw %k0, %k0
; CHECK-NEXT: kmovw %k0, (%rdi)
@@ -170,7 +170,7 @@ define void @mask16_mem(i16* %ptr) {
define void @mask8_mem(i8* %ptr) {
; KNL-LABEL: mask8_mem:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: movzbl (%rdi), %eax
; KNL-NEXT: kmovw %eax, %k0
; KNL-NEXT: knotw %k0, %k0
@@ -179,14 +179,14 @@ define void @mask8_mem(i8* %ptr) {
; KNL-NEXT: retq
;
; SKX-LABEL: mask8_mem:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: kmovb (%rdi), %k0
; SKX-NEXT: knotb %k0, %k0
; SKX-NEXT: kmovb %k0, (%rdi)
; SKX-NEXT: retq
;
; AVX512BW-LABEL: mask8_mem:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: movzbl (%rdi), %eax
; AVX512BW-NEXT: kmovd %eax, %k0
; AVX512BW-NEXT: knotw %k0, %k0
@@ -195,7 +195,7 @@ define void @mask8_mem(i8* %ptr) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: mask8_mem:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: kmovb (%rdi), %k0
; AVX512DQ-NEXT: knotb %k0, %k0
; AVX512DQ-NEXT: kmovb %k0, (%rdi)
@@ -210,7 +210,7 @@ define void @mask8_mem(i8* %ptr) {
define i16 @mand16(i16 %x, i16 %y) {
; CHECK-LABEL: mand16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: xorl %esi, %eax
; CHECK-NEXT: andl %esi, %edi
@@ -228,7 +228,7 @@ define i16 @mand16(i16 %x, i16 %y) {
define i16 @mand16_mem(<16 x i1>* %x, <16 x i1>* %y) {
; KNL-LABEL: mand16_mem:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: kmovw (%rdi), %k0
; KNL-NEXT: kmovw (%rsi), %k1
; KNL-NEXT: kandw %k1, %k0, %k2
@@ -239,7 +239,7 @@ define i16 @mand16_mem(<16 x i1>* %x, <16 x i1>* %y) {
; KNL-NEXT: retq
;
; SKX-LABEL: mand16_mem:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: kmovw (%rdi), %k0
; SKX-NEXT: kmovw (%rsi), %k1
; SKX-NEXT: kandw %k1, %k0, %k2
@@ -250,7 +250,7 @@ define i16 @mand16_mem(<16 x i1>* %x, <16 x i1>* %y) {
; SKX-NEXT: retq
;
; AVX512BW-LABEL: mand16_mem:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovw (%rdi), %k0
; AVX512BW-NEXT: kmovw (%rsi), %k1
; AVX512BW-NEXT: kandw %k1, %k0, %k2
@@ -261,7 +261,7 @@ define i16 @mand16_mem(<16 x i1>* %x, <16 x i1>* %y) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: mand16_mem:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: kmovw (%rdi), %k0
; AVX512DQ-NEXT: kmovw (%rsi), %k1
; AVX512DQ-NEXT: kandw %k1, %k0, %k2
@@ -281,7 +281,7 @@ define i16 @mand16_mem(<16 x i1>* %x, <16 x i1>* %y) {
define i8 @shuf_test1(i16 %v) nounwind {
; KNL-LABEL: shuf_test1:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: kmovw %edi, %k0
; KNL-NEXT: kshiftrw $8, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
@@ -289,7 +289,7 @@ define i8 @shuf_test1(i16 %v) nounwind {
; KNL-NEXT: retq
;
; SKX-LABEL: shuf_test1:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: kmovd %edi, %k0
; SKX-NEXT: kshiftrw $8, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
@@ -297,7 +297,7 @@ define i8 @shuf_test1(i16 %v) nounwind {
; SKX-NEXT: retq
;
; AVX512BW-LABEL: shuf_test1:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k0
; AVX512BW-NEXT: kshiftrw $8, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
@@ -305,7 +305,7 @@ define i8 @shuf_test1(i16 %v) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: shuf_test1:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: kmovw %edi, %k0
; AVX512DQ-NEXT: kshiftrw $8, %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
@@ -319,7 +319,7 @@ define i8 @shuf_test1(i16 %v) nounwind {
define i32 @zext_test1(<16 x i32> %a, <16 x i32> %b) {
; KNL-LABEL: zext_test1:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpcmpnleud %zmm1, %zmm0, %k0
; KNL-NEXT: kshiftlw $10, %k0, %k0
; KNL-NEXT: kshiftrw $15, %k0, %k0
@@ -329,7 +329,7 @@ define i32 @zext_test1(<16 x i32> %a, <16 x i32> %b) {
; KNL-NEXT: retq
;
; SKX-LABEL: zext_test1:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpcmpnleud %zmm1, %zmm0, %k0
; SKX-NEXT: kshiftlw $10, %k0, %k0
; SKX-NEXT: kshiftrw $15, %k0, %k0
@@ -339,7 +339,7 @@ define i32 @zext_test1(<16 x i32> %a, <16 x i32> %b) {
; SKX-NEXT: retq
;
; AVX512BW-LABEL: zext_test1:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpcmpnleud %zmm1, %zmm0, %k0
; AVX512BW-NEXT: kshiftlw $10, %k0, %k0
; AVX512BW-NEXT: kshiftrw $15, %k0, %k0
@@ -349,7 +349,7 @@ define i32 @zext_test1(<16 x i32> %a, <16 x i32> %b) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: zext_test1:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: vpcmpnleud %zmm1, %zmm0, %k0
; AVX512DQ-NEXT: kshiftlw $10, %k0, %k0
; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
@@ -365,7 +365,7 @@ define i32 @zext_test1(<16 x i32> %a, <16 x i32> %b) {
define i16 @zext_test2(<16 x i32> %a, <16 x i32> %b) {
; KNL-LABEL: zext_test2:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpcmpnleud %zmm1, %zmm0, %k0
; KNL-NEXT: kshiftlw $10, %k0, %k0
; KNL-NEXT: kshiftrw $15, %k0, %k0
@@ -376,7 +376,7 @@ define i16 @zext_test2(<16 x i32> %a, <16 x i32> %b) {
; KNL-NEXT: retq
;
; SKX-LABEL: zext_test2:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpcmpnleud %zmm1, %zmm0, %k0
; SKX-NEXT: kshiftlw $10, %k0, %k0
; SKX-NEXT: kshiftrw $15, %k0, %k0
@@ -387,7 +387,7 @@ define i16 @zext_test2(<16 x i32> %a, <16 x i32> %b) {
; SKX-NEXT: retq
;
; AVX512BW-LABEL: zext_test2:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpcmpnleud %zmm1, %zmm0, %k0
; AVX512BW-NEXT: kshiftlw $10, %k0, %k0
; AVX512BW-NEXT: kshiftrw $15, %k0, %k0
@@ -398,7 +398,7 @@ define i16 @zext_test2(<16 x i32> %a, <16 x i32> %b) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: zext_test2:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: vpcmpnleud %zmm1, %zmm0, %k0
; AVX512DQ-NEXT: kshiftlw $10, %k0, %k0
; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
@@ -415,7 +415,7 @@ define i16 @zext_test2(<16 x i32> %a, <16 x i32> %b) {
define i8 @zext_test3(<16 x i32> %a, <16 x i32> %b) {
; KNL-LABEL: zext_test3:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpcmpnleud %zmm1, %zmm0, %k0
; KNL-NEXT: kshiftlw $10, %k0, %k0
; KNL-NEXT: kshiftrw $15, %k0, %k0
@@ -426,7 +426,7 @@ define i8 @zext_test3(<16 x i32> %a, <16 x i32> %b) {
; KNL-NEXT: retq
;
; SKX-LABEL: zext_test3:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpcmpnleud %zmm1, %zmm0, %k0
; SKX-NEXT: kshiftlw $10, %k0, %k0
; SKX-NEXT: kshiftrw $15, %k0, %k0
@@ -437,7 +437,7 @@ define i8 @zext_test3(<16 x i32> %a, <16 x i32> %b) {
; SKX-NEXT: retq
;
; AVX512BW-LABEL: zext_test3:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpcmpnleud %zmm1, %zmm0, %k0
; AVX512BW-NEXT: kshiftlw $10, %k0, %k0
; AVX512BW-NEXT: kshiftrw $15, %k0, %k0
@@ -448,7 +448,7 @@ define i8 @zext_test3(<16 x i32> %a, <16 x i32> %b) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: zext_test3:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: vpcmpnleud %zmm1, %zmm0, %k0
; AVX512DQ-NEXT: kshiftlw $10, %k0, %k0
; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
@@ -465,7 +465,7 @@ define i8 @zext_test3(<16 x i32> %a, <16 x i32> %b) {
define i8 @conv1(<8 x i1>* %R) {
; KNL-LABEL: conv1:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kxnorw %k0, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
; KNL-NEXT: movb %al, (%rdi)
@@ -474,7 +474,7 @@ define i8 @conv1(<8 x i1>* %R) {
; KNL-NEXT: retq
;
; SKX-LABEL: conv1:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kxnorw %k0, %k0, %k0
; SKX-NEXT: kmovb %k0, (%rdi)
; SKX-NEXT: movb $-2, -{{[0-9]+}}(%rsp)
@@ -482,7 +482,7 @@ define i8 @conv1(<8 x i1>* %R) {
; SKX-NEXT: retq
;
; AVX512BW-LABEL: conv1:
-; AVX512BW: ## BB#0: ## %entry
+; AVX512BW: ## %bb.0: ## %entry
; AVX512BW-NEXT: kxnorw %k0, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: movb %al, (%rdi)
@@ -491,7 +491,7 @@ define i8 @conv1(<8 x i1>* %R) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: conv1:
-; AVX512DQ: ## BB#0: ## %entry
+; AVX512DQ: ## %bb.0: ## %entry
; AVX512DQ-NEXT: kxnorw %k0, %k0, %k0
; AVX512DQ-NEXT: kmovb %k0, (%rdi)
; AVX512DQ-NEXT: movb $-2, -{{[0-9]+}}(%rsp)
@@ -509,7 +509,7 @@ entry:
define <4 x i32> @test4(<4 x i64> %x, <4 x i64> %y, <4 x i64> %x1, <4 x i64> %y1) {
; KNL-LABEL: test4:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
; KNL-NEXT: vpmovqd %zmm0, %ymm0
; KNL-NEXT: vpcmpgtq %ymm3, %ymm2, %ymm1
@@ -519,7 +519,7 @@ define <4 x i32> @test4(<4 x i64> %x, <4 x i64> %y, <4 x i64> %x1, <4 x i64> %y1
; KNL-NEXT: retq
;
; SKX-LABEL: test4:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpcmpgtq %ymm1, %ymm0, %k0
; SKX-NEXT: vpcmpgtq %ymm3, %ymm2, %k1
; SKX-NEXT: kandnw %k0, %k1, %k0
@@ -528,7 +528,7 @@ define <4 x i32> @test4(<4 x i64> %x, <4 x i64> %y, <4 x i64> %x1, <4 x i64> %y1
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test4:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
; AVX512BW-NEXT: vpcmpgtq %ymm3, %ymm2, %ymm1
@@ -538,7 +538,7 @@ define <4 x i32> @test4(<4 x i64> %x, <4 x i64> %y, <4 x i64> %x1, <4 x i64> %y1
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test4:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovqd %zmm0, %ymm0
; AVX512DQ-NEXT: vpcmpgtq %ymm3, %ymm2, %ymm1
@@ -555,14 +555,14 @@ define <4 x i32> @test4(<4 x i64> %x, <4 x i64> %y, <4 x i64> %x1, <4 x i64> %y1
define <2 x i64> @test5(<2 x i64> %x, <2 x i64> %y, <2 x i64> %x1, <2 x i64> %y1) {
; KNL-LABEL: test5:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
; KNL-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm1
; KNL-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
; KNL-NEXT: retq
;
; SKX-LABEL: test5:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpcmpgtq %xmm0, %xmm1, %k0
; SKX-NEXT: vpcmpgtq %xmm3, %xmm2, %k1
; SKX-NEXT: kandnw %k1, %k0, %k0
@@ -570,14 +570,14 @@ define <2 x i64> @test5(<2 x i64> %x, <2 x i64> %y, <2 x i64> %x1, <2 x i64> %y1
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test5:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
; AVX512BW-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm1
; AVX512BW-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test5:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
; AVX512DQ-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm1
; AVX512DQ-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
@@ -602,7 +602,7 @@ false:
}
define void @test7(<8 x i1> %mask) {
; KNL-LABEL: test7:
-; KNL: ## BB#0: ## %allocas
+; KNL: ## %bb.0: ## %allocas
; KNL-NEXT: vpmovsxwq %xmm0, %zmm0
; KNL-NEXT: vpsllq $63, %zmm0, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0
@@ -615,7 +615,7 @@ define void @test7(<8 x i1> %mask) {
; KNL-NEXT: retq
;
; SKX-LABEL: test7:
-; SKX: ## BB#0: ## %allocas
+; SKX: ## %bb.0: ## %allocas
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
; SKX-NEXT: vpmovw2m %xmm0, %k0
; SKX-NEXT: movb $85, %al
@@ -625,7 +625,7 @@ define void @test7(<8 x i1> %mask) {
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test7:
-; AVX512BW: ## BB#0: ## %allocas
+; AVX512BW: ## %bb.0: ## %allocas
; AVX512BW-NEXT: vpsllw $15, %xmm0, %xmm0
; AVX512BW-NEXT: vpmovw2m %zmm0, %k0
; AVX512BW-NEXT: movb $85, %al
@@ -637,7 +637,7 @@ define void @test7(<8 x i1> %mask) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test7:
-; AVX512DQ: ## BB#0: ## %allocas
+; AVX512DQ: ## %bb.0: ## %allocas
; AVX512DQ-NEXT: vpmovsxwq %xmm0, %zmm0
; AVX512DQ-NEXT: vpsllq $63, %zmm0, %zmm0
; AVX512DQ-NEXT: vptestmq %zmm0, %zmm0, %k0
@@ -661,11 +661,11 @@ false:
}
define <16 x i8> @test8(<16 x i32>%a, <16 x i32>%b, i32 %a1, i32 %b1) {
; KNL-LABEL: test8:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: cmpl %esi, %edi
; KNL-NEXT: vpxor %xmm2, %xmm2, %xmm2
; KNL-NEXT: jg LBB17_1
-; KNL-NEXT: ## BB#2:
+; KNL-NEXT: ## %bb.2:
; KNL-NEXT: vpcmpltud %zmm2, %zmm1, %k1
; KNL-NEXT: jmp LBB17_3
; KNL-NEXT: LBB17_1:
@@ -677,11 +677,11 @@ define <16 x i8> @test8(<16 x i32>%a, <16 x i32>%b, i32 %a1, i32 %b1) {
; KNL-NEXT: retq
;
; SKX-LABEL: test8:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: cmpl %esi, %edi
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; SKX-NEXT: jg LBB17_1
-; SKX-NEXT: ## BB#2:
+; SKX-NEXT: ## %bb.2:
; SKX-NEXT: vpcmpltud %zmm2, %zmm1, %k0
; SKX-NEXT: vpmovm2b %k0, %xmm0
; SKX-NEXT: vzeroupper
@@ -693,11 +693,11 @@ define <16 x i8> @test8(<16 x i32>%a, <16 x i32>%b, i32 %a1, i32 %b1) {
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test8:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: cmpl %esi, %edi
; AVX512BW-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512BW-NEXT: jg LBB17_1
-; AVX512BW-NEXT: ## BB#2:
+; AVX512BW-NEXT: ## %bb.2:
; AVX512BW-NEXT: vpcmpltud %zmm2, %zmm1, %k0
; AVX512BW-NEXT: jmp LBB17_3
; AVX512BW-NEXT: LBB17_1:
@@ -709,11 +709,11 @@ define <16 x i8> @test8(<16 x i32>%a, <16 x i32>%b, i32 %a1, i32 %b1) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test8:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: cmpl %esi, %edi
; AVX512DQ-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512DQ-NEXT: jg LBB17_1
-; AVX512DQ-NEXT: ## BB#2:
+; AVX512DQ-NEXT: ## %bb.2:
; AVX512DQ-NEXT: vpcmpltud %zmm2, %zmm1, %k0
; AVX512DQ-NEXT: jmp LBB17_3
; AVX512DQ-NEXT: LBB17_1:
@@ -732,10 +732,10 @@ define <16 x i8> @test8(<16 x i32>%a, <16 x i32>%b, i32 %a1, i32 %b1) {
}
define <16 x i1> @test9(<16 x i1>%a, <16 x i1>%b, i32 %a1, i32 %b1) {
; KNL-LABEL: test9:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: cmpl %esi, %edi
; KNL-NEXT: jg LBB18_1
-; KNL-NEXT: ## BB#2:
+; KNL-NEXT: ## %bb.2:
; KNL-NEXT: vpmovsxbd %xmm1, %zmm0
; KNL-NEXT: jmp LBB18_3
; KNL-NEXT: LBB18_1:
@@ -749,10 +749,10 @@ define <16 x i1> @test9(<16 x i1>%a, <16 x i1>%b, i32 %a1, i32 %b1) {
; KNL-NEXT: retq
;
; SKX-LABEL: test9:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: cmpl %esi, %edi
; SKX-NEXT: jg LBB18_1
-; SKX-NEXT: ## BB#2:
+; SKX-NEXT: ## %bb.2:
; SKX-NEXT: vpsllw $7, %xmm1, %xmm0
; SKX-NEXT: jmp LBB18_3
; SKX-NEXT: LBB18_1:
@@ -763,10 +763,10 @@ define <16 x i1> @test9(<16 x i1>%a, <16 x i1>%b, i32 %a1, i32 %b1) {
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test9:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: cmpl %esi, %edi
; AVX512BW-NEXT: jg LBB18_1
-; AVX512BW-NEXT: ## BB#2:
+; AVX512BW-NEXT: ## %bb.2:
; AVX512BW-NEXT: vpsllw $7, %xmm1, %xmm0
; AVX512BW-NEXT: jmp LBB18_3
; AVX512BW-NEXT: LBB18_1:
@@ -779,10 +779,10 @@ define <16 x i1> @test9(<16 x i1>%a, <16 x i1>%b, i32 %a1, i32 %b1) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test9:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: cmpl %esi, %edi
; AVX512DQ-NEXT: jg LBB18_1
-; AVX512DQ-NEXT: ## BB#2:
+; AVX512DQ-NEXT: ## %bb.2:
; AVX512DQ-NEXT: vpmovsxbd %xmm1, %zmm0
; AVX512DQ-NEXT: jmp LBB18_3
; AVX512DQ-NEXT: LBB18_1:
@@ -805,19 +805,19 @@ define <16 x i1> @test9(<16 x i1>%a, <16 x i1>%b, i32 %a1, i32 %b1) {
define <4 x i1> @test11(<4 x i1>%a, <4 x i1>%b, i32 %a1, i32 %b1) {
; KNL-LABEL: test11:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: cmpl %esi, %edi
; KNL-NEXT: jg LBB20_2
-; KNL-NEXT: ## BB#1:
+; KNL-NEXT: ## %bb.1:
; KNL-NEXT: vmovaps %xmm1, %xmm0
; KNL-NEXT: LBB20_2:
; KNL-NEXT: retq
;
; SKX-LABEL: test11:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: cmpl %esi, %edi
; SKX-NEXT: jg LBB20_1
-; SKX-NEXT: ## BB#2:
+; SKX-NEXT: ## %bb.2:
; SKX-NEXT: vpslld $31, %xmm1, %xmm0
; SKX-NEXT: jmp LBB20_3
; SKX-NEXT: LBB20_1:
@@ -828,19 +828,19 @@ define <4 x i1> @test11(<4 x i1>%a, <4 x i1>%b, i32 %a1, i32 %b1) {
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test11:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: cmpl %esi, %edi
; AVX512BW-NEXT: jg LBB20_2
-; AVX512BW-NEXT: ## BB#1:
+; AVX512BW-NEXT: ## %bb.1:
; AVX512BW-NEXT: vmovaps %xmm1, %xmm0
; AVX512BW-NEXT: LBB20_2:
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test11:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: cmpl %esi, %edi
; AVX512DQ-NEXT: jg LBB20_2
-; AVX512DQ-NEXT: ## BB#1:
+; AVX512DQ-NEXT: ## %bb.1:
; AVX512DQ-NEXT: vmovaps %xmm1, %xmm0
; AVX512DQ-NEXT: LBB20_2:
; AVX512DQ-NEXT: retq
@@ -851,7 +851,7 @@ define <4 x i1> @test11(<4 x i1>%a, <4 x i1>%b, i32 %a1, i32 %b1) {
define i32 @test12(i32 %x, i32 %y) {
; CHECK-LABEL: test12:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
%a = bitcast i16 21845 to <16 x i1>
@@ -862,7 +862,7 @@ define i32 @test12(i32 %x, i32 %y) {
define i32 @test13(i32 %x, i32 %y) {
; CHECK-LABEL: test13:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movl %esi, %eax
; CHECK-NEXT: retq
%a = bitcast i16 21845 to <16 x i1>
@@ -878,7 +878,7 @@ define i32 @test13(i32 %x, i32 %y) {
define <16 x i1> @test15(i32 %x, i32 %y) {
; KNL-LABEL: test15:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: cmpl %esi, %edi
; KNL-NEXT: movw $21845, %ax ## imm = 0x5555
; KNL-NEXT: movw $1, %cx
@@ -890,7 +890,7 @@ define <16 x i1> @test15(i32 %x, i32 %y) {
; KNL-NEXT: retq
;
; SKX-LABEL: test15:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: cmpl %esi, %edi
; SKX-NEXT: movw $21845, %ax ## imm = 0x5555
; SKX-NEXT: movw $1, %cx
@@ -900,7 +900,7 @@ define <16 x i1> @test15(i32 %x, i32 %y) {
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test15:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: cmpl %esi, %edi
; AVX512BW-NEXT: movw $21845, %ax ## imm = 0x5555
; AVX512BW-NEXT: movw $1, %cx
@@ -912,7 +912,7 @@ define <16 x i1> @test15(i32 %x, i32 %y) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test15:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: cmpl %esi, %edi
; AVX512DQ-NEXT: movw $21845, %ax ## imm = 0x5555
; AVX512DQ-NEXT: movw $1, %cx
@@ -932,7 +932,7 @@ define <16 x i1> @test15(i32 %x, i32 %y) {
define <64 x i8> @test16(i64 %x) {
;
; KNL-LABEL: test16:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: pushq %rbp
; KNL-NEXT: .cfi_def_cfa_offset 16
; KNL-NEXT: .cfi_offset %rbp, -16
@@ -968,7 +968,7 @@ define <64 x i8> @test16(i64 %x) {
; KNL-NEXT: retq
;
; SKX-LABEL: test16:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: kmovq %rdi, %k0
; SKX-NEXT: movb $1, %al
; SKX-NEXT: kmovd %eax, %k1
@@ -984,7 +984,7 @@ define <64 x i8> @test16(i64 %x) {
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test16:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovq %rdi, %k0
; AVX512BW-NEXT: movb $1, %al
; AVX512BW-NEXT: kmovd %eax, %k1
@@ -999,7 +999,7 @@ define <64 x i8> @test16(i64 %x) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test16:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: pushq %rbp
; AVX512DQ-NEXT: .cfi_def_cfa_offset 16
; AVX512DQ-NEXT: .cfi_offset %rbp, -16
@@ -1042,7 +1042,7 @@ define <64 x i8> @test16(i64 %x) {
define <64 x i8> @test17(i64 %x, i32 %y, i32 %z) {
;
; KNL-LABEL: test17:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: pushq %rbp
; KNL-NEXT: .cfi_def_cfa_offset 16
; KNL-NEXT: .cfi_offset %rbp, -16
@@ -1080,7 +1080,7 @@ define <64 x i8> @test17(i64 %x, i32 %y, i32 %z) {
; KNL-NEXT: retq
;
; SKX-LABEL: test17:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: kmovq %rdi, %k0
; SKX-NEXT: cmpl %edx, %esi
; SKX-NEXT: setg %al
@@ -1097,7 +1097,7 @@ define <64 x i8> @test17(i64 %x, i32 %y, i32 %z) {
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test17:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovq %rdi, %k0
; AVX512BW-NEXT: cmpl %edx, %esi
; AVX512BW-NEXT: setg %al
@@ -1113,7 +1113,7 @@ define <64 x i8> @test17(i64 %x, i32 %y, i32 %z) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test17:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: pushq %rbp
; AVX512DQ-NEXT: .cfi_def_cfa_offset 16
; AVX512DQ-NEXT: .cfi_offset %rbp, -16
@@ -1158,7 +1158,7 @@ define <64 x i8> @test17(i64 %x, i32 %y, i32 %z) {
define <8 x i1> @test18(i8 %a, i16 %y) {
; KNL-LABEL: test18:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: kmovw %esi, %k2
; KNL-NEXT: kshiftlw $7, %k2, %k0
@@ -1181,7 +1181,7 @@ define <8 x i1> @test18(i8 %a, i16 %y) {
; KNL-NEXT: retq
;
; SKX-LABEL: test18:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: kmovd %esi, %k2
; SKX-NEXT: kshiftlw $7, %k2, %k0
@@ -1202,7 +1202,7 @@ define <8 x i1> @test18(i8 %a, i16 %y) {
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test18:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: kmovd %esi, %k2
; AVX512BW-NEXT: kshiftlw $7, %k2, %k0
@@ -1225,7 +1225,7 @@ define <8 x i1> @test18(i8 %a, i16 %y) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test18:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: kmovw %edi, %k1
; AVX512DQ-NEXT: kmovw %esi, %k2
; AVX512DQ-NEXT: kshiftlw $7, %k2, %k0
@@ -1255,7 +1255,7 @@ define <8 x i1> @test18(i8 %a, i16 %y) {
}
define <32 x i16> @test21(<32 x i16> %x , <32 x i1> %mask) nounwind readnone {
; KNL-LABEL: test21:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
; KNL-NEXT: vpsllw $15, %ymm3, %ymm3
; KNL-NEXT: vpsraw $15, %ymm3, %ymm3
@@ -1268,21 +1268,21 @@ define <32 x i16> @test21(<32 x i16> %x , <32 x i1> %mask) nounwind readnone {
; KNL-NEXT: retq
;
; SKX-LABEL: test21:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsllw $7, %ymm1, %ymm1
; SKX-NEXT: vpmovb2m %ymm1, %k1
; SKX-NEXT: vmovdqu16 %zmm0, %zmm0 {%k1} {z}
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test21:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpsllw $7, %ymm1, %ymm1
; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
; AVX512BW-NEXT: vmovdqu16 %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test21:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
; AVX512DQ-NEXT: vpsllw $15, %ymm3, %ymm3
; AVX512DQ-NEXT: vpsraw $15, %ymm3, %ymm3
@@ -1299,7 +1299,7 @@ define <32 x i16> @test21(<32 x i16> %x , <32 x i1> %mask) nounwind readnone {
define void @test22(<4 x i1> %a, <4 x i1>* %addr) {
; KNL-LABEL: test22:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; KNL-NEXT: vpslld $31, %ymm0, %ymm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0
@@ -1309,14 +1309,14 @@ define void @test22(<4 x i1> %a, <4 x i1>* %addr) {
; KNL-NEXT: retq
;
; SKX-LABEL: test22:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpslld $31, %xmm0, %xmm0
; SKX-NEXT: vptestmd %xmm0, %xmm0, %k0
; SKX-NEXT: kmovb %k0, (%rdi)
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test22:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512BW-NEXT: vpslld $31, %ymm0, %ymm0
; AVX512BW-NEXT: vptestmd %zmm0, %zmm0, %k0
@@ -1326,7 +1326,7 @@ define void @test22(<4 x i1> %a, <4 x i1>* %addr) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test22:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512DQ-NEXT: vpslld $31, %ymm0, %ymm0
; AVX512DQ-NEXT: vptestmd %zmm0, %zmm0, %k0
@@ -1339,7 +1339,7 @@ define void @test22(<4 x i1> %a, <4 x i1>* %addr) {
define void @test23(<2 x i1> %a, <2 x i1>* %addr) {
; KNL-LABEL: test23:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; KNL-NEXT: vpsllq $63, %zmm0, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0
@@ -1349,14 +1349,14 @@ define void @test23(<2 x i1> %a, <2 x i1>* %addr) {
; KNL-NEXT: retq
;
; SKX-LABEL: test23:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsllq $63, %xmm0, %xmm0
; SKX-NEXT: vptestmq %xmm0, %xmm0, %k0
; SKX-NEXT: kmovb %k0, (%rdi)
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test23:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
@@ -1366,7 +1366,7 @@ define void @test23(<2 x i1> %a, <2 x i1>* %addr) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test23:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vpsllq $63, %zmm0, %zmm0
; AVX512DQ-NEXT: vptestmq %zmm0, %zmm0, %k0
@@ -1379,7 +1379,7 @@ define void @test23(<2 x i1> %a, <2 x i1>* %addr) {
define void @store_v1i1(<1 x i1> %c , <1 x i1>* %ptr) {
; KNL-LABEL: store_v1i1:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: kmovw %edi, %k0
; KNL-NEXT: kxnorw %k0, %k0, %k1
; KNL-NEXT: kxorw %k1, %k0, %k0
@@ -1388,7 +1388,7 @@ define void @store_v1i1(<1 x i1> %c , <1 x i1>* %ptr) {
; KNL-NEXT: retq
;
; SKX-LABEL: store_v1i1:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: kmovd %edi, %k0
; SKX-NEXT: kxnorw %k0, %k0, %k1
; SKX-NEXT: kxorw %k1, %k0, %k0
@@ -1396,7 +1396,7 @@ define void @store_v1i1(<1 x i1> %c , <1 x i1>* %ptr) {
; SKX-NEXT: retq
;
; AVX512BW-LABEL: store_v1i1:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k0
; AVX512BW-NEXT: kxnorw %k0, %k0, %k1
; AVX512BW-NEXT: kxorw %k1, %k0, %k0
@@ -1405,7 +1405,7 @@ define void @store_v1i1(<1 x i1> %c , <1 x i1>* %ptr) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: store_v1i1:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: kmovw %edi, %k0
; AVX512DQ-NEXT: kxnorw %k0, %k0, %k1
; AVX512DQ-NEXT: kxorw %k1, %k0, %k0
@@ -1418,7 +1418,7 @@ define void @store_v1i1(<1 x i1> %c , <1 x i1>* %ptr) {
define void @store_v2i1(<2 x i1> %c , <2 x i1>* %ptr) {
; KNL-LABEL: store_v2i1:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; KNL-NEXT: vpxor %xmm1, %xmm0, %xmm0
; KNL-NEXT: vpsllq $63, %zmm0, %zmm0
@@ -1429,7 +1429,7 @@ define void @store_v2i1(<2 x i1> %c , <2 x i1>* %ptr) {
; KNL-NEXT: retq
;
; SKX-LABEL: store_v2i1:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsllq $63, %xmm0, %xmm0
; SKX-NEXT: vptestmq %xmm0, %xmm0, %k0
; SKX-NEXT: knotw %k0, %k0
@@ -1437,7 +1437,7 @@ define void @store_v2i1(<2 x i1> %c , <2 x i1>* %ptr) {
; SKX-NEXT: retq
;
; AVX512BW-LABEL: store_v2i1:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX512BW-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
@@ -1448,7 +1448,7 @@ define void @store_v2i1(<2 x i1> %c , <2 x i1>* %ptr) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: store_v2i1:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX512DQ-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX512DQ-NEXT: vpsllq $63, %zmm0, %zmm0
@@ -1463,7 +1463,7 @@ define void @store_v2i1(<2 x i1> %c , <2 x i1>* %ptr) {
define void @store_v4i1(<4 x i1> %c , <4 x i1>* %ptr) {
; KNL-LABEL: store_v4i1:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; KNL-NEXT: vpxor %xmm1, %xmm0, %xmm0
; KNL-NEXT: vpslld $31, %ymm0, %ymm0
@@ -1474,7 +1474,7 @@ define void @store_v4i1(<4 x i1> %c , <4 x i1>* %ptr) {
; KNL-NEXT: retq
;
; SKX-LABEL: store_v4i1:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpslld $31, %xmm0, %xmm0
; SKX-NEXT: vptestmd %xmm0, %xmm0, %k0
; SKX-NEXT: knotw %k0, %k0
@@ -1482,7 +1482,7 @@ define void @store_v4i1(<4 x i1> %c , <4 x i1>* %ptr) {
; SKX-NEXT: retq
;
; AVX512BW-LABEL: store_v4i1:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX512BW-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX512BW-NEXT: vpslld $31, %ymm0, %ymm0
@@ -1493,7 +1493,7 @@ define void @store_v4i1(<4 x i1> %c , <4 x i1>* %ptr) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: store_v4i1:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX512DQ-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX512DQ-NEXT: vpslld $31, %ymm0, %ymm0
@@ -1508,7 +1508,7 @@ define void @store_v4i1(<4 x i1> %c , <4 x i1>* %ptr) {
define void @store_v8i1(<8 x i1> %c , <8 x i1>* %ptr) {
; KNL-LABEL: store_v8i1:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpmovsxwq %xmm0, %zmm0
; KNL-NEXT: vpsllq $63, %zmm0, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0
@@ -1519,7 +1519,7 @@ define void @store_v8i1(<8 x i1> %c , <8 x i1>* %ptr) {
; KNL-NEXT: retq
;
; SKX-LABEL: store_v8i1:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
; SKX-NEXT: vpmovw2m %xmm0, %k0
; SKX-NEXT: knotb %k0, %k0
@@ -1527,7 +1527,7 @@ define void @store_v8i1(<8 x i1> %c , <8 x i1>* %ptr) {
; SKX-NEXT: retq
;
; AVX512BW-LABEL: store_v8i1:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpsllw $15, %xmm0, %xmm0
; AVX512BW-NEXT: vpmovw2m %zmm0, %k0
; AVX512BW-NEXT: knotw %k0, %k0
@@ -1537,7 +1537,7 @@ define void @store_v8i1(<8 x i1> %c , <8 x i1>* %ptr) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: store_v8i1:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: vpmovsxwq %xmm0, %zmm0
; AVX512DQ-NEXT: vpsllq $63, %zmm0, %zmm0
; AVX512DQ-NEXT: vptestmq %zmm0, %zmm0, %k0
@@ -1552,7 +1552,7 @@ define void @store_v8i1(<8 x i1> %c , <8 x i1>* %ptr) {
define void @store_v16i1(<16 x i1> %c , <16 x i1>* %ptr) {
; KNL-LABEL: store_v16i1:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpmovsxbd %xmm0, %zmm0
; KNL-NEXT: vpslld $31, %zmm0, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0
@@ -1562,7 +1562,7 @@ define void @store_v16i1(<16 x i1> %c , <16 x i1>* %ptr) {
; KNL-NEXT: retq
;
; SKX-LABEL: store_v16i1:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsllw $7, %xmm0, %xmm0
; SKX-NEXT: vpmovb2m %xmm0, %k0
; SKX-NEXT: knotw %k0, %k0
@@ -1570,7 +1570,7 @@ define void @store_v16i1(<16 x i1> %c , <16 x i1>* %ptr) {
; SKX-NEXT: retq
;
; AVX512BW-LABEL: store_v16i1:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpsllw $7, %xmm0, %xmm0
; AVX512BW-NEXT: vpmovb2m %zmm0, %k0
; AVX512BW-NEXT: knotw %k0, %k0
@@ -1579,7 +1579,7 @@ define void @store_v16i1(<16 x i1> %c , <16 x i1>* %ptr) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: store_v16i1:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512DQ-NEXT: vpslld $31, %zmm0, %zmm0
; AVX512DQ-NEXT: vptestmd %zmm0, %zmm0, %k0
@@ -1607,7 +1607,7 @@ define void @store_v16i1(<16 x i1> %c , <16 x i1>* %ptr) {
define void @f1(i32 %c) {
; CHECK-LABEL: f1:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movzbl {{.*}}(%rip), %edi
; CHECK-NEXT: xorl $1, %edi
; CHECK-NEXT: movb %dil, {{.*}}(%rip)
@@ -1625,7 +1625,7 @@ declare void @f2(i32) #1
define void @store_i16_i1(i16 %x, i1 *%y) {
; CHECK-LABEL: store_i16_i1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: movb %dil, (%rsi)
; CHECK-NEXT: retq
@@ -1636,7 +1636,7 @@ define void @store_i16_i1(i16 %x, i1 *%y) {
define void @store_i8_i1(i8 %x, i1 *%y) {
; CHECK-LABEL: store_i8_i1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: movb %dil, (%rsi)
; CHECK-NEXT: retq
@@ -1647,27 +1647,27 @@ define void @store_i8_i1(i8 %x, i1 *%y) {
define <32 x i16> @test_build_vec_v32i1(<32 x i16> %x) {
; KNL-LABEL: test_build_vec_v32i1:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; KNL-NEXT: vandps {{.*}}(%rip), %ymm1, %ymm1
; KNL-NEXT: retq
;
; SKX-LABEL: test_build_vec_v32i1:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: movl $1497715861, %eax ## imm = 0x59455495
; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vmovdqu16 %zmm0, %zmm0 {%k1} {z}
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test_build_vec_v32i1:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: movl $1497715861, %eax ## imm = 0x59455495
; AVX512BW-NEXT: kmovd %eax, %k1
; AVX512BW-NEXT: vmovdqu16 %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_build_vec_v32i1:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; AVX512DQ-NEXT: vandps {{.*}}(%rip), %ymm1, %ymm1
; AVX512DQ-NEXT: retq
@@ -1677,23 +1677,23 @@ define <32 x i16> @test_build_vec_v32i1(<32 x i16> %x) {
define <64 x i8> @test_build_vec_v64i1(<64 x i8> %x) {
; KNL-LABEL: test_build_vec_v64i1:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; KNL-NEXT: vandps {{.*}}(%rip), %ymm1, %ymm1
; KNL-NEXT: retq
;
; SKX-LABEL: test_build_vec_v64i1:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpshufb {{.*#+}} zmm0 = zero,zero,zmm0[2],zero,zero,zero,zmm0[6],zero,zmm0[8],zero,zmm0[10],zero,zmm0[12],zero,zero,zmm0[15],zero,zero,zmm0[18],zero,zmm0[20],zero,zmm0[22],zero,zmm0[24],zero,zero,zmm0[27],zero,zero,zmm0[30],zero,zmm0[32],zero,zmm0[34],zero,zero,zero,zmm0[38],zero,zmm0[40],zero,zero,zmm0[43,44],zero,zmm0[46],zero,zmm0[48],zero,zmm0[50],zero,zero,zero,zmm0[54],zero,zmm0[56],zero,zero,zmm0[59,60],zero,zmm0[62],zero
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test_build_vec_v64i1:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpshufb {{.*#+}} zmm0 = zero,zero,zmm0[2],zero,zero,zero,zmm0[6],zero,zmm0[8],zero,zmm0[10],zero,zmm0[12],zero,zero,zmm0[15],zero,zero,zmm0[18],zero,zmm0[20],zero,zmm0[22],zero,zmm0[24],zero,zero,zmm0[27],zero,zero,zmm0[30],zero,zmm0[32],zero,zmm0[34],zero,zero,zero,zmm0[38],zero,zmm0[40],zero,zero,zmm0[43,44],zero,zmm0[46],zero,zmm0[48],zero,zmm0[50],zero,zero,zero,zmm0[54],zero,zmm0[56],zero,zero,zmm0[59,60],zero,zmm0[62],zero
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_build_vec_v64i1:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; AVX512DQ-NEXT: vandps {{.*}}(%rip), %ymm1, %ymm1
; AVX512DQ-NEXT: retq
@@ -1703,7 +1703,7 @@ define <64 x i8> @test_build_vec_v64i1(<64 x i8> %x) {
define void @ktest_1(<8 x double> %in, double * %base) {
; KNL-LABEL: ktest_1:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vmovupd (%rdi), %zmm1
; KNL-NEXT: vcmpltpd %zmm0, %zmm1, %k1
; KNL-NEXT: vmovupd 8(%rdi), %zmm1 {%k1} {z}
@@ -1711,7 +1711,7 @@ define void @ktest_1(<8 x double> %in, double * %base) {
; KNL-NEXT: kmovw %k0, %eax
; KNL-NEXT: testb %al, %al
; KNL-NEXT: je LBB41_2
-; KNL-NEXT: ## BB#1: ## %L1
+; KNL-NEXT: ## %bb.1: ## %L1
; KNL-NEXT: vmovapd %zmm0, (%rdi)
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
@@ -1721,14 +1721,14 @@ define void @ktest_1(<8 x double> %in, double * %base) {
; KNL-NEXT: retq
;
; SKX-LABEL: ktest_1:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vmovupd (%rdi), %zmm1
; SKX-NEXT: vcmpltpd %zmm0, %zmm1, %k1
; SKX-NEXT: vmovupd 8(%rdi), %zmm1 {%k1} {z}
; SKX-NEXT: vcmpltpd %zmm1, %zmm0, %k0 {%k1}
; SKX-NEXT: ktestb %k0, %k0
; SKX-NEXT: je LBB41_2
-; SKX-NEXT: ## BB#1: ## %L1
+; SKX-NEXT: ## %bb.1: ## %L1
; SKX-NEXT: vmovapd %zmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -1738,7 +1738,7 @@ define void @ktest_1(<8 x double> %in, double * %base) {
; SKX-NEXT: retq
;
; AVX512BW-LABEL: ktest_1:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vmovupd (%rdi), %zmm1
; AVX512BW-NEXT: vcmpltpd %zmm0, %zmm1, %k1
; AVX512BW-NEXT: vmovupd 8(%rdi), %zmm1 {%k1} {z}
@@ -1746,7 +1746,7 @@ define void @ktest_1(<8 x double> %in, double * %base) {
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: testb %al, %al
; AVX512BW-NEXT: je LBB41_2
-; AVX512BW-NEXT: ## BB#1: ## %L1
+; AVX512BW-NEXT: ## %bb.1: ## %L1
; AVX512BW-NEXT: vmovapd %zmm0, (%rdi)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
@@ -1756,14 +1756,14 @@ define void @ktest_1(<8 x double> %in, double * %base) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: ktest_1:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: vmovupd (%rdi), %zmm1
; AVX512DQ-NEXT: vcmpltpd %zmm0, %zmm1, %k1
; AVX512DQ-NEXT: vmovupd 8(%rdi), %zmm1 {%k1} {z}
; AVX512DQ-NEXT: vcmpltpd %zmm1, %zmm0, %k0 {%k1}
; AVX512DQ-NEXT: ktestb %k0, %k0
; AVX512DQ-NEXT: je LBB41_2
-; AVX512DQ-NEXT: ## BB#1: ## %L1
+; AVX512DQ-NEXT: ## %bb.1: ## %L1
; AVX512DQ-NEXT: vmovapd %zmm0, (%rdi)
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
@@ -1801,7 +1801,7 @@ End:
define void @ktest_2(<32 x float> %in, float * %base) {
;
; KNL-LABEL: ktest_2:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: pushq %rbp
; KNL-NEXT: .cfi_def_cfa_offset 16
; KNL-NEXT: .cfi_offset %rbp, -16
@@ -2083,7 +2083,7 @@ define void @ktest_2(<32 x float> %in, float * %base) {
; KNL-NEXT: kmovw %k0, (%rsp)
; KNL-NEXT: cmpl $0, (%rsp)
; KNL-NEXT: je LBB42_2
-; KNL-NEXT: ## BB#1: ## %L1
+; KNL-NEXT: ## %bb.1: ## %L1
; KNL-NEXT: vmovaps %zmm0, (%rdi)
; KNL-NEXT: vmovaps %zmm1, 64(%rdi)
; KNL-NEXT: jmp LBB42_3
@@ -2097,7 +2097,7 @@ define void @ktest_2(<32 x float> %in, float * %base) {
; KNL-NEXT: retq
;
; SKX-LABEL: ktest_2:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vmovups (%rdi), %zmm2
; SKX-NEXT: vmovups 64(%rdi), %zmm3
; SKX-NEXT: vcmpltps %zmm0, %zmm2, %k1
@@ -2111,7 +2111,7 @@ define void @ktest_2(<32 x float> %in, float * %base) {
; SKX-NEXT: kord %k1, %k0, %k0
; SKX-NEXT: ktestd %k0, %k0
; SKX-NEXT: je LBB42_2
-; SKX-NEXT: ## BB#1: ## %L1
+; SKX-NEXT: ## %bb.1: ## %L1
; SKX-NEXT: vmovaps %zmm0, (%rdi)
; SKX-NEXT: vmovaps %zmm1, 64(%rdi)
; SKX-NEXT: vzeroupper
@@ -2123,7 +2123,7 @@ define void @ktest_2(<32 x float> %in, float * %base) {
; SKX-NEXT: retq
;
; AVX512BW-LABEL: ktest_2:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vmovups (%rdi), %zmm2
; AVX512BW-NEXT: vmovups 64(%rdi), %zmm3
; AVX512BW-NEXT: vcmpltps %zmm0, %zmm2, %k1
@@ -2137,7 +2137,7 @@ define void @ktest_2(<32 x float> %in, float * %base) {
; AVX512BW-NEXT: kord %k1, %k0, %k0
; AVX512BW-NEXT: ktestd %k0, %k0
; AVX512BW-NEXT: je LBB42_2
-; AVX512BW-NEXT: ## BB#1: ## %L1
+; AVX512BW-NEXT: ## %bb.1: ## %L1
; AVX512BW-NEXT: vmovaps %zmm0, (%rdi)
; AVX512BW-NEXT: vmovaps %zmm1, 64(%rdi)
; AVX512BW-NEXT: vzeroupper
@@ -2149,7 +2149,7 @@ define void @ktest_2(<32 x float> %in, float * %base) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: ktest_2:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: pushq %rbp
; AVX512DQ-NEXT: .cfi_def_cfa_offset 16
; AVX512DQ-NEXT: .cfi_offset %rbp, -16
@@ -2431,7 +2431,7 @@ define void @ktest_2(<32 x float> %in, float * %base) {
; AVX512DQ-NEXT: kmovw %k0, (%rsp)
; AVX512DQ-NEXT: cmpl $0, (%rsp)
; AVX512DQ-NEXT: je LBB42_2
-; AVX512DQ-NEXT: ## BB#1: ## %L1
+; AVX512DQ-NEXT: ## %bb.1: ## %L1
; AVX512DQ-NEXT: vmovaps %zmm0, (%rdi)
; AVX512DQ-NEXT: vmovaps %zmm1, 64(%rdi)
; AVX512DQ-NEXT: jmp LBB42_3
@@ -2472,27 +2472,27 @@ End:
define <8 x i64> @load_8i1(<8 x i1>* %a) {
; KNL-LABEL: load_8i1:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: movzbl (%rdi), %eax
; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: retq
;
; SKX-LABEL: load_8i1:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: kmovb (%rdi), %k0
; SKX-NEXT: vpmovm2q %k0, %zmm0
; SKX-NEXT: retq
;
; AVX512BW-LABEL: load_8i1:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: movzbl (%rdi), %eax
; AVX512BW-NEXT: kmovd %eax, %k1
; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: load_8i1:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: kmovb (%rdi), %k0
; AVX512DQ-NEXT: vpmovm2q %k0, %zmm0
; AVX512DQ-NEXT: retq
@@ -2503,25 +2503,25 @@ define <8 x i64> @load_8i1(<8 x i1>* %a) {
define <16 x i32> @load_16i1(<16 x i1>* %a) {
; KNL-LABEL: load_16i1:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: kmovw (%rdi), %k1
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: retq
;
; SKX-LABEL: load_16i1:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: kmovw (%rdi), %k0
; SKX-NEXT: vpmovm2d %k0, %zmm0
; SKX-NEXT: retq
;
; AVX512BW-LABEL: load_16i1:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovw (%rdi), %k1
; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: load_16i1:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: kmovw (%rdi), %k0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
; AVX512DQ-NEXT: retq
@@ -2532,7 +2532,7 @@ define <16 x i32> @load_16i1(<16 x i1>* %a) {
define <2 x i16> @load_2i1(<2 x i1>* %a) {
; KNL-LABEL: load_2i1:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: movzbl (%rdi), %eax
; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -2541,13 +2541,13 @@ define <2 x i16> @load_2i1(<2 x i1>* %a) {
; KNL-NEXT: retq
;
; SKX-LABEL: load_2i1:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: kmovb (%rdi), %k0
; SKX-NEXT: vpmovm2q %k0, %xmm0
; SKX-NEXT: retq
;
; AVX512BW-LABEL: load_2i1:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: movzbl (%rdi), %eax
; AVX512BW-NEXT: kmovd %eax, %k1
; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -2556,7 +2556,7 @@ define <2 x i16> @load_2i1(<2 x i1>* %a) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: load_2i1:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: kmovb (%rdi), %k0
; AVX512DQ-NEXT: vpmovm2q %k0, %zmm0
; AVX512DQ-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -2569,7 +2569,7 @@ define <2 x i16> @load_2i1(<2 x i1>* %a) {
define <4 x i16> @load_4i1(<4 x i1>* %a) {
; KNL-LABEL: load_4i1:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: movzbl (%rdi), %eax
; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -2579,13 +2579,13 @@ define <4 x i16> @load_4i1(<4 x i1>* %a) {
; KNL-NEXT: retq
;
; SKX-LABEL: load_4i1:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: kmovb (%rdi), %k0
; SKX-NEXT: vpmovm2d %k0, %xmm0
; SKX-NEXT: retq
;
; AVX512BW-LABEL: load_4i1:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: movzbl (%rdi), %eax
; AVX512BW-NEXT: kmovd %eax, %k1
; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -2595,7 +2595,7 @@ define <4 x i16> @load_4i1(<4 x i1>* %a) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: load_4i1:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: kmovb (%rdi), %k0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
; AVX512DQ-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -2608,7 +2608,7 @@ define <4 x i16> @load_4i1(<4 x i1>* %a) {
define <32 x i16> @load_32i1(<32 x i1>* %a) {
; KNL-LABEL: load_32i1:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: kmovw (%rdi), %k1
; KNL-NEXT: kmovw 2(%rdi), %k2
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -2618,19 +2618,19 @@ define <32 x i16> @load_32i1(<32 x i1>* %a) {
; KNL-NEXT: retq
;
; SKX-LABEL: load_32i1:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: kmovd (%rdi), %k0
; SKX-NEXT: vpmovm2w %k0, %zmm0
; SKX-NEXT: retq
;
; AVX512BW-LABEL: load_32i1:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd (%rdi), %k0
; AVX512BW-NEXT: vpmovm2w %k0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: load_32i1:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: kmovw (%rdi), %k0
; AVX512DQ-NEXT: kmovw 2(%rdi), %k1
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
@@ -2645,7 +2645,7 @@ define <32 x i16> @load_32i1(<32 x i1>* %a) {
define <64 x i8> @load_64i1(<64 x i1>* %a) {
; KNL-LABEL: load_64i1:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: kmovw (%rdi), %k1
; KNL-NEXT: kmovw 2(%rdi), %k2
; KNL-NEXT: kmovw 4(%rdi), %k3
@@ -2663,19 +2663,19 @@ define <64 x i8> @load_64i1(<64 x i1>* %a) {
; KNL-NEXT: retq
;
; SKX-LABEL: load_64i1:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: kmovq (%rdi), %k0
; SKX-NEXT: vpmovm2b %k0, %zmm0
; SKX-NEXT: retq
;
; AVX512BW-LABEL: load_64i1:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovq (%rdi), %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: load_64i1:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: kmovw (%rdi), %k0
; AVX512DQ-NEXT: kmovw 2(%rdi), %k1
; AVX512DQ-NEXT: kmovw 4(%rdi), %k2
@@ -2698,7 +2698,7 @@ define <64 x i8> @load_64i1(<64 x i1>* %a) {
define void @store_8i1(<8 x i1>* %a, <8 x i1> %v) {
; KNL-LABEL: store_8i1:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpmovsxwq %xmm0, %zmm0
; KNL-NEXT: vpsllq $63, %zmm0, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0
@@ -2708,14 +2708,14 @@ define void @store_8i1(<8 x i1>* %a, <8 x i1> %v) {
; KNL-NEXT: retq
;
; SKX-LABEL: store_8i1:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
; SKX-NEXT: vpmovw2m %xmm0, %k0
; SKX-NEXT: kmovb %k0, (%rdi)
; SKX-NEXT: retq
;
; AVX512BW-LABEL: store_8i1:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpsllw $15, %xmm0, %xmm0
; AVX512BW-NEXT: vpmovw2m %zmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
@@ -2724,7 +2724,7 @@ define void @store_8i1(<8 x i1>* %a, <8 x i1> %v) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: store_8i1:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: vpmovsxwq %xmm0, %zmm0
; AVX512DQ-NEXT: vpsllq $63, %zmm0, %zmm0
; AVX512DQ-NEXT: vptestmq %zmm0, %zmm0, %k0
@@ -2737,7 +2737,7 @@ define void @store_8i1(<8 x i1>* %a, <8 x i1> %v) {
define void @store_8i1_1(<8 x i1>* %a, <8 x i16> %v) {
; KNL-LABEL: store_8i1_1:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpmovsxwq %xmm0, %zmm0
; KNL-NEXT: vpsllq $63, %zmm0, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0
@@ -2747,14 +2747,14 @@ define void @store_8i1_1(<8 x i1>* %a, <8 x i16> %v) {
; KNL-NEXT: retq
;
; SKX-LABEL: store_8i1_1:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
; SKX-NEXT: vpmovw2m %xmm0, %k0
; SKX-NEXT: kmovb %k0, (%rdi)
; SKX-NEXT: retq
;
; AVX512BW-LABEL: store_8i1_1:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpsllw $15, %xmm0, %xmm0
; AVX512BW-NEXT: vpmovw2m %zmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
@@ -2763,7 +2763,7 @@ define void @store_8i1_1(<8 x i1>* %a, <8 x i16> %v) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: store_8i1_1:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: vpmovsxwq %xmm0, %zmm0
; AVX512DQ-NEXT: vpsllq $63, %zmm0, %zmm0
; AVX512DQ-NEXT: vptestmq %zmm0, %zmm0, %k0
@@ -2777,7 +2777,7 @@ define void @store_8i1_1(<8 x i1>* %a, <8 x i16> %v) {
define void @store_16i1(<16 x i1>* %a, <16 x i1> %v) {
; KNL-LABEL: store_16i1:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpmovsxbd %xmm0, %zmm0
; KNL-NEXT: vpslld $31, %zmm0, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0
@@ -2786,14 +2786,14 @@ define void @store_16i1(<16 x i1>* %a, <16 x i1> %v) {
; KNL-NEXT: retq
;
; SKX-LABEL: store_16i1:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsllw $7, %xmm0, %xmm0
; SKX-NEXT: vpmovb2m %xmm0, %k0
; SKX-NEXT: kmovw %k0, (%rdi)
; SKX-NEXT: retq
;
; AVX512BW-LABEL: store_16i1:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpsllw $7, %xmm0, %xmm0
; AVX512BW-NEXT: vpmovb2m %zmm0, %k0
; AVX512BW-NEXT: kmovw %k0, (%rdi)
@@ -2801,7 +2801,7 @@ define void @store_16i1(<16 x i1>* %a, <16 x i1> %v) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: store_16i1:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512DQ-NEXT: vpslld $31, %zmm0, %zmm0
; AVX512DQ-NEXT: vptestmd %zmm0, %zmm0, %k0
@@ -2814,7 +2814,7 @@ define void @store_16i1(<16 x i1>* %a, <16 x i1> %v) {
define void @store_32i1(<32 x i1>* %a, <32 x i1> %v) {
; KNL-LABEL: store_32i1:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vextracti128 $1, %ymm0, %xmm1
; KNL-NEXT: vpmovsxbd %xmm1, %zmm1
; KNL-NEXT: vpslld $31, %zmm1, %zmm1
@@ -2828,7 +2828,7 @@ define void @store_32i1(<32 x i1>* %a, <32 x i1> %v) {
; KNL-NEXT: retq
;
; SKX-LABEL: store_32i1:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsllw $7, %ymm0, %ymm0
; SKX-NEXT: vpmovb2m %ymm0, %k0
; SKX-NEXT: kmovd %k0, (%rdi)
@@ -2836,7 +2836,7 @@ define void @store_32i1(<32 x i1>* %a, <32 x i1> %v) {
; SKX-NEXT: retq
;
; AVX512BW-LABEL: store_32i1:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpsllw $7, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovb2m %zmm0, %k0
; AVX512BW-NEXT: kmovd %k0, (%rdi)
@@ -2844,7 +2844,7 @@ define void @store_32i1(<32 x i1>* %a, <32 x i1> %v) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: store_32i1:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512DQ-NEXT: vpmovsxbd %xmm1, %zmm1
; AVX512DQ-NEXT: vpslld $31, %zmm1, %zmm1
@@ -2862,7 +2862,7 @@ define void @store_32i1(<32 x i1>* %a, <32 x i1> %v) {
define void @store_32i1_1(<32 x i1>* %a, <32 x i16> %v) {
; KNL-LABEL: store_32i1_1:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpmovsxwd %ymm0, %zmm0
; KNL-NEXT: vpmovdb %zmm0, %xmm0
; KNL-NEXT: vpmovsxwd %ymm1, %zmm1
@@ -2879,7 +2879,7 @@ define void @store_32i1_1(<32 x i1>* %a, <32 x i16> %v) {
; KNL-NEXT: retq
;
; SKX-LABEL: store_32i1_1:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsllw $15, %zmm0, %zmm0
; SKX-NEXT: vpmovw2m %zmm0, %k0
; SKX-NEXT: kmovd %k0, (%rdi)
@@ -2887,7 +2887,7 @@ define void @store_32i1_1(<32 x i1>* %a, <32 x i16> %v) {
; SKX-NEXT: retq
;
; AVX512BW-LABEL: store_32i1_1:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpsllw $15, %zmm0, %zmm0
; AVX512BW-NEXT: vpmovw2m %zmm0, %k0
; AVX512BW-NEXT: kmovd %k0, (%rdi)
@@ -2895,7 +2895,7 @@ define void @store_32i1_1(<32 x i1>* %a, <32 x i16> %v) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: store_32i1_1:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
; AVX512DQ-NEXT: vpmovsxwd %ymm1, %zmm1
@@ -2919,7 +2919,7 @@ define void @store_32i1_1(<32 x i1>* %a, <32 x i16> %v) {
define void @store_64i1(<64 x i1>* %a, <64 x i1> %v) {
;
; KNL-LABEL: store_64i1:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: pushq %rbp
; KNL-NEXT: .cfi_def_cfa_offset 16
; KNL-NEXT: pushq %r15
@@ -3228,7 +3228,7 @@ define void @store_64i1(<64 x i1>* %a, <64 x i1> %v) {
; KNL-NEXT: retq
;
; SKX-LABEL: store_64i1:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsllw $7, %zmm0, %zmm0
; SKX-NEXT: vpmovb2m %zmm0, %k0
; SKX-NEXT: kmovq %k0, (%rdi)
@@ -3236,7 +3236,7 @@ define void @store_64i1(<64 x i1>* %a, <64 x i1> %v) {
; SKX-NEXT: retq
;
; AVX512BW-LABEL: store_64i1:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpsllw $7, %zmm0, %zmm0
; AVX512BW-NEXT: vpmovb2m %zmm0, %k0
; AVX512BW-NEXT: kmovq %k0, (%rdi)
@@ -3244,7 +3244,7 @@ define void @store_64i1(<64 x i1>* %a, <64 x i1> %v) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: store_64i1:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: pushq %rbp
; AVX512DQ-NEXT: .cfi_def_cfa_offset 16
; AVX512DQ-NEXT: pushq %r15
@@ -3557,7 +3557,7 @@ define void @store_64i1(<64 x i1>* %a, <64 x i1> %v) {
define i32 @test_bitcast_v8i1_zext(<16 x i32> %a) {
; KNL-LABEL: test_bitcast_v8i1_zext:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; KNL-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
@@ -3567,7 +3567,7 @@ define i32 @test_bitcast_v8i1_zext(<16 x i32> %a) {
; KNL-NEXT: retq
;
; SKX-LABEL: test_bitcast_v8i1_zext:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; SKX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; SKX-NEXT: kmovb %k0, %eax
@@ -3576,7 +3576,7 @@ define i32 @test_bitcast_v8i1_zext(<16 x i32> %a) {
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test_bitcast_v8i1_zext:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512BW-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
@@ -3586,7 +3586,7 @@ define i32 @test_bitcast_v8i1_zext(<16 x i32> %a) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_bitcast_v8i1_zext:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512DQ-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; AVX512DQ-NEXT: kmovb %k0, %eax
@@ -3603,7 +3603,7 @@ define i32 @test_bitcast_v8i1_zext(<16 x i32> %a) {
define i32 @test_bitcast_v16i1_zext(<16 x i32> %a) {
; CHECK-LABEL: test_bitcast_v16i1_zext:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
@@ -3619,7 +3619,7 @@ define i32 @test_bitcast_v16i1_zext(<16 x i32> %a) {
define i16 @test_v16i1_add(i16 %x, i16 %y) {
; KNL-LABEL: test_v16i1_add:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: kmovw %edi, %k0
; KNL-NEXT: kmovw %esi, %k1
; KNL-NEXT: kxorw %k1, %k0, %k0
@@ -3628,7 +3628,7 @@ define i16 @test_v16i1_add(i16 %x, i16 %y) {
; KNL-NEXT: retq
;
; SKX-LABEL: test_v16i1_add:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: kmovd %edi, %k0
; SKX-NEXT: kmovd %esi, %k1
; SKX-NEXT: kxorw %k1, %k0, %k0
@@ -3637,7 +3637,7 @@ define i16 @test_v16i1_add(i16 %x, i16 %y) {
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test_v16i1_add:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k0
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: kxorw %k1, %k0, %k0
@@ -3646,7 +3646,7 @@ define i16 @test_v16i1_add(i16 %x, i16 %y) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_v16i1_add:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: kmovw %edi, %k0
; AVX512DQ-NEXT: kmovw %esi, %k1
; AVX512DQ-NEXT: kxorw %k1, %k0, %k0
@@ -3662,7 +3662,7 @@ define i16 @test_v16i1_add(i16 %x, i16 %y) {
define i16 @test_v16i1_sub(i16 %x, i16 %y) {
; KNL-LABEL: test_v16i1_sub:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: kmovw %edi, %k0
; KNL-NEXT: kmovw %esi, %k1
; KNL-NEXT: kxorw %k1, %k0, %k0
@@ -3671,7 +3671,7 @@ define i16 @test_v16i1_sub(i16 %x, i16 %y) {
; KNL-NEXT: retq
;
; SKX-LABEL: test_v16i1_sub:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: kmovd %edi, %k0
; SKX-NEXT: kmovd %esi, %k1
; SKX-NEXT: kxorw %k1, %k0, %k0
@@ -3680,7 +3680,7 @@ define i16 @test_v16i1_sub(i16 %x, i16 %y) {
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test_v16i1_sub:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k0
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: kxorw %k1, %k0, %k0
@@ -3689,7 +3689,7 @@ define i16 @test_v16i1_sub(i16 %x, i16 %y) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_v16i1_sub:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: kmovw %edi, %k0
; AVX512DQ-NEXT: kmovw %esi, %k1
; AVX512DQ-NEXT: kxorw %k1, %k0, %k0
@@ -3705,7 +3705,7 @@ define i16 @test_v16i1_sub(i16 %x, i16 %y) {
define i16 @test_v16i1_mul(i16 %x, i16 %y) {
; KNL-LABEL: test_v16i1_mul:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: kmovw %edi, %k0
; KNL-NEXT: kmovw %esi, %k1
; KNL-NEXT: kandw %k1, %k0, %k0
@@ -3714,7 +3714,7 @@ define i16 @test_v16i1_mul(i16 %x, i16 %y) {
; KNL-NEXT: retq
;
; SKX-LABEL: test_v16i1_mul:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: kmovd %edi, %k0
; SKX-NEXT: kmovd %esi, %k1
; SKX-NEXT: kandw %k1, %k0, %k0
@@ -3723,7 +3723,7 @@ define i16 @test_v16i1_mul(i16 %x, i16 %y) {
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test_v16i1_mul:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k0
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: kandw %k1, %k0, %k0
@@ -3732,7 +3732,7 @@ define i16 @test_v16i1_mul(i16 %x, i16 %y) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_v16i1_mul:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: kmovw %edi, %k0
; AVX512DQ-NEXT: kmovw %esi, %k1
; AVX512DQ-NEXT: kandw %k1, %k0, %k0
@@ -3748,7 +3748,7 @@ define i16 @test_v16i1_mul(i16 %x, i16 %y) {
define i8 @test_v8i1_add(i8 %x, i8 %y) {
; KNL-LABEL: test_v8i1_add:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: kmovw %edi, %k0
; KNL-NEXT: kmovw %esi, %k1
; KNL-NEXT: kxorw %k1, %k0, %k0
@@ -3757,7 +3757,7 @@ define i8 @test_v8i1_add(i8 %x, i8 %y) {
; KNL-NEXT: retq
;
; SKX-LABEL: test_v8i1_add:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: kmovd %edi, %k0
; SKX-NEXT: kmovd %esi, %k1
; SKX-NEXT: kxorb %k1, %k0, %k0
@@ -3766,7 +3766,7 @@ define i8 @test_v8i1_add(i8 %x, i8 %y) {
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test_v8i1_add:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k0
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: kxorw %k1, %k0, %k0
@@ -3775,7 +3775,7 @@ define i8 @test_v8i1_add(i8 %x, i8 %y) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_v8i1_add:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: kmovw %edi, %k0
; AVX512DQ-NEXT: kmovw %esi, %k1
; AVX512DQ-NEXT: kxorb %k1, %k0, %k0
@@ -3791,7 +3791,7 @@ define i8 @test_v8i1_add(i8 %x, i8 %y) {
define i8 @test_v8i1_sub(i8 %x, i8 %y) {
; KNL-LABEL: test_v8i1_sub:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: kmovw %edi, %k0
; KNL-NEXT: kmovw %esi, %k1
; KNL-NEXT: kxorw %k1, %k0, %k0
@@ -3800,7 +3800,7 @@ define i8 @test_v8i1_sub(i8 %x, i8 %y) {
; KNL-NEXT: retq
;
; SKX-LABEL: test_v8i1_sub:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: kmovd %edi, %k0
; SKX-NEXT: kmovd %esi, %k1
; SKX-NEXT: kxorb %k1, %k0, %k0
@@ -3809,7 +3809,7 @@ define i8 @test_v8i1_sub(i8 %x, i8 %y) {
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test_v8i1_sub:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k0
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: kxorw %k1, %k0, %k0
@@ -3818,7 +3818,7 @@ define i8 @test_v8i1_sub(i8 %x, i8 %y) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_v8i1_sub:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: kmovw %edi, %k0
; AVX512DQ-NEXT: kmovw %esi, %k1
; AVX512DQ-NEXT: kxorb %k1, %k0, %k0
@@ -3834,7 +3834,7 @@ define i8 @test_v8i1_sub(i8 %x, i8 %y) {
define i8 @test_v8i1_mul(i8 %x, i8 %y) {
; KNL-LABEL: test_v8i1_mul:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: kmovw %edi, %k0
; KNL-NEXT: kmovw %esi, %k1
; KNL-NEXT: kandw %k1, %k0, %k0
@@ -3843,7 +3843,7 @@ define i8 @test_v8i1_mul(i8 %x, i8 %y) {
; KNL-NEXT: retq
;
; SKX-LABEL: test_v8i1_mul:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: kmovd %edi, %k0
; SKX-NEXT: kmovd %esi, %k1
; SKX-NEXT: kandb %k1, %k0, %k0
@@ -3852,7 +3852,7 @@ define i8 @test_v8i1_mul(i8 %x, i8 %y) {
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test_v8i1_mul:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k0
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: kandw %k1, %k0, %k0
@@ -3861,7 +3861,7 @@ define i8 @test_v8i1_mul(i8 %x, i8 %y) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_v8i1_mul:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: kmovw %edi, %k0
; AVX512DQ-NEXT: kmovw %esi, %k1
; AVX512DQ-NEXT: kandb %k1, %k0, %k0
diff --git a/test/CodeGen/X86/avx512-mask-spills.ll b/test/CodeGen/X86/avx512-mask-spills.ll
index 8120836bd0b..b9f483e997c 100644
--- a/test/CodeGen/X86/avx512-mask-spills.ll
+++ b/test/CodeGen/X86/avx512-mask-spills.ll
@@ -4,7 +4,7 @@
declare void @f()
define <4 x i1> @test_4i1(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test_4i1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: vpcmpnleud %xmm1, %xmm0, %k0
@@ -26,7 +26,7 @@ define <4 x i1> @test_4i1(<4 x i32> %a, <4 x i32> %b) {
define <8 x i1> @test_8i1(<8 x i32> %a, <8 x i32> %b) {
; CHECK-LABEL: test_8i1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: vpcmpnleud %ymm1, %ymm0, %k0
@@ -49,7 +49,7 @@ define <8 x i1> @test_8i1(<8 x i32> %a, <8 x i32> %b) {
define <16 x i1> @test_16i1(<16 x i32> %a, <16 x i32> %b) {
; CHECK-LABEL: test_16i1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: vpcmpnleud %zmm1, %zmm0, %k0
@@ -71,7 +71,7 @@ define <16 x i1> @test_16i1(<16 x i32> %a, <16 x i32> %b) {
define <32 x i1> @test_32i1(<32 x i16> %a, <32 x i16> %b) {
; CHECK-LABEL: test_32i1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: vpcmpnleuw %zmm1, %zmm0, %k0
@@ -93,7 +93,7 @@ define <32 x i1> @test_32i1(<32 x i16> %a, <32 x i16> %b) {
define <64 x i1> @test_64i1(<64 x i8> %a, <64 x i8> %b) {
; CHECK-LABEL: test_64i1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: vpcmpnleub %zmm1, %zmm0, %k0
diff --git a/test/CodeGen/X86/avx512-mask-zext-bugfix.ll b/test/CodeGen/X86/avx512-mask-zext-bugfix.ll
index 14406da3116..f828c4dcef7 100755
--- a/test/CodeGen/X86/avx512-mask-zext-bugfix.ll
+++ b/test/CodeGen/X86/avx512-mask-zext-bugfix.ll
@@ -16,7 +16,7 @@ declare i32 @check_mask16(i16 zeroext %res_mask, i16 zeroext %exp_mask, i8* %fna
; Function Attrs: nounwind uwtable
define void @test_xmm(i32 %shift, i32 %mulp, <2 x i64> %a,i8* %arraydecay,i8* %fname){
; CHECK-LABEL: test_xmm:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: subq $56, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 64
; CHECK-NEXT: movl $2, %esi
diff --git a/test/CodeGen/X86/avx512-masked-memop-64-32.ll b/test/CodeGen/X86/avx512-masked-memop-64-32.ll
index b7dce39bd5c..e64ac5c5873 100644
--- a/test/CodeGen/X86/avx512-masked-memop-64-32.ll
+++ b/test/CodeGen/X86/avx512-masked-memop-64-32.ll
@@ -4,7 +4,7 @@
define <16 x i32> @test1(<16 x i32> %trigger, <16 x i32>* %addr) {
; AVX512-LABEL: test1:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
; AVX512-NEXT: vmovdqu32 (%rdi), %zmm0 {%k1} {z}
@@ -16,7 +16,7 @@ define <16 x i32> @test1(<16 x i32> %trigger, <16 x i32>* %addr) {
define <16 x i32> @test2(<16 x i32> %trigger, <16 x i32>* %addr) {
; AVX512-LABEL: test2:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
; AVX512-NEXT: vmovdqu32 (%rdi), %zmm0 {%k1} {z}
@@ -28,7 +28,7 @@ define <16 x i32> @test2(<16 x i32> %trigger, <16 x i32>* %addr) {
define void @test3(<16 x i32> %trigger, <16 x i32>* %addr, <16 x i32> %val) {
; AVX512-LABEL: test3:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512-NEXT: vpcmpeqd %zmm2, %zmm0, %k1
; AVX512-NEXT: vmovdqu32 %zmm1, (%rdi) {%k1}
@@ -41,7 +41,7 @@ define void @test3(<16 x i32> %trigger, <16 x i32>* %addr, <16 x i32> %val) {
define <16 x float> @test4(<16 x i32> %trigger, <16 x float>* %addr, <16 x float> %dst) {
; AVX512-LABEL: test4:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512-NEXT: vpcmpeqd %zmm2, %zmm0, %k1
; AVX512-NEXT: vblendmps (%rdi), %zmm1, %zmm0 {%k1}
@@ -53,7 +53,7 @@ define <16 x float> @test4(<16 x i32> %trigger, <16 x float>* %addr, <16 x float
define void @test13(<16 x i32> %trigger, <16 x float>* %addr, <16 x float> %val) {
; AVX512-LABEL: test13:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512-NEXT: vpcmpeqd %zmm2, %zmm0, %k1
; AVX512-NEXT: vmovups %zmm1, (%rdi) {%k1}
@@ -66,7 +66,7 @@ define void @test13(<16 x i32> %trigger, <16 x float>* %addr, <16 x float> %val)
define void @one_mask_bit_set5(<8 x double>* %addr, <8 x double> %val) {
; AVX512-LABEL: one_mask_bit_set5:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vextractf32x4 $3, %zmm0, %xmm0
; AVX512-NEXT: vmovlps %xmm0, 48(%rdi)
; AVX512-NEXT: vzeroupper
@@ -78,7 +78,7 @@ define void @one_mask_bit_set5(<8 x double>* %addr, <8 x double> %val) {
define <8 x double> @load_one_mask_bit_set5(<8 x double>* %addr, <8 x double> %val) {
;
; AVX512-LABEL: load_one_mask_bit_set5:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vextractf32x4 $3, %zmm0, %xmm1
; AVX512-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
; AVX512-NEXT: vinsertf32x4 $3, %xmm1, %zmm0, %zmm0
@@ -98,7 +98,7 @@ declare <16 x i32*> @llvm.masked.load.v16p0i32.p0v16p0i32(<16 x i32*>*, i32, <16
define <16 x i32*> @test23(<16 x i32*> %trigger, <16 x i32*>* %addr) {
; AVX512-LABEL: test23:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512-NEXT: vpcmpeqq %zmm2, %zmm0, %k1
; AVX512-NEXT: vpcmpeqq %zmm2, %zmm1, %k2
@@ -116,7 +116,7 @@ declare <16 x %mystruct*> @llvm.masked.load.v16p0mystruct.p0v16p0mystruct(<16 x
define <16 x %mystruct*> @test24(<16 x i1> %mask, <16 x %mystruct*>* %addr) {
; AVX512F-LABEL: test24:
-; AVX512F: ## BB#0:
+; AVX512F: ## %bb.0:
; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k1
@@ -126,7 +126,7 @@ define <16 x %mystruct*> @test24(<16 x i1> %mask, <16 x %mystruct*>* %addr) {
; AVX512F-NEXT: retq
;
; SKX-LABEL: test24:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsllw $7, %xmm0, %xmm0
; SKX-NEXT: vpmovb2m %xmm0, %k1
; SKX-NEXT: vmovdqu64 (%rdi), %zmm0 {%k1} {z}
@@ -139,7 +139,7 @@ define <16 x %mystruct*> @test24(<16 x i1> %mask, <16 x %mystruct*>* %addr) {
define void @test_store_16i64(<16 x i64>* %ptrs, <16 x i1> %mask, <16 x i64> %src0) {
; AVX512F-LABEL: test_store_16i64:
-; AVX512F: ## BB#0:
+; AVX512F: ## %bb.0:
; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k1
@@ -150,7 +150,7 @@ define void @test_store_16i64(<16 x i64>* %ptrs, <16 x i1> %mask, <16 x i64> %sr
; AVX512F-NEXT: retq
;
; SKX-LABEL: test_store_16i64:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsllw $7, %xmm0, %xmm0
; SKX-NEXT: vpmovb2m %xmm0, %k1
; SKX-NEXT: vmovdqu64 %zmm1, (%rdi) {%k1}
@@ -165,7 +165,7 @@ declare void @llvm.masked.store.v16i64.p0v16i64(<16 x i64> %src0, <16 x i64>* %p
define void @test_store_16f64(<16 x double>* %ptrs, <16 x i1> %mask, <16 x double> %src0) {
; AVX512F-LABEL: test_store_16f64:
-; AVX512F: ## BB#0:
+; AVX512F: ## %bb.0:
; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k1
@@ -176,7 +176,7 @@ define void @test_store_16f64(<16 x double>* %ptrs, <16 x i1> %mask, <16 x doubl
; AVX512F-NEXT: retq
;
; SKX-LABEL: test_store_16f64:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsllw $7, %xmm0, %xmm0
; SKX-NEXT: vpmovb2m %xmm0, %k1
; SKX-NEXT: vmovupd %zmm1, (%rdi) {%k1}
@@ -191,7 +191,7 @@ declare void @llvm.masked.store.v16f64.p0v16f64(<16 x double> %src0, <16 x doubl
define <16 x i64> @test_load_16i64(<16 x i64>* %ptrs, <16 x i1> %mask, <16 x i64> %src0) {
; AVX512F-LABEL: test_load_16i64:
-; AVX512F: ## BB#0:
+; AVX512F: ## %bb.0:
; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k1
@@ -201,7 +201,7 @@ define <16 x i64> @test_load_16i64(<16 x i64>* %ptrs, <16 x i1> %mask, <16 x i64
; AVX512F-NEXT: retq
;
; SKX-LABEL: test_load_16i64:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsllw $7, %xmm0, %xmm0
; SKX-NEXT: vpmovb2m %xmm0, %k1
; SKX-NEXT: vpblendmq (%rdi), %zmm1, %zmm0 {%k1}
@@ -215,7 +215,7 @@ declare <16 x i64> @llvm.masked.load.v16i64.p0v16i64(<16 x i64>* %ptrs, i32, <16
define <16 x double> @test_load_16f64(<16 x double>* %ptrs, <16 x i1> %mask, <16 x double> %src0) {
; AVX512F-LABEL: test_load_16f64:
-; AVX512F: ## BB#0:
+; AVX512F: ## %bb.0:
; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k1
@@ -225,7 +225,7 @@ define <16 x double> @test_load_16f64(<16 x double>* %ptrs, <16 x i1> %mask, <16
; AVX512F-NEXT: retq
;
; SKX-LABEL: test_load_16f64:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsllw $7, %xmm0, %xmm0
; SKX-NEXT: vpmovb2m %xmm0, %k1
; SKX-NEXT: vblendmpd (%rdi), %zmm1, %zmm0 {%k1}
@@ -239,7 +239,7 @@ declare <16 x double> @llvm.masked.load.v16f64.p0v16f64(<16 x double>* %ptrs, i3
define <32 x double> @test_load_32f64(<32 x double>* %ptrs, <32 x i1> %mask, <32 x double> %src0) {
; AVX512F-LABEL: test_load_32f64:
-; AVX512F: ## BB#0:
+; AVX512F: ## %bb.0:
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm5
; AVX512F-NEXT: vpmovsxbd %xmm5, %zmm5
; AVX512F-NEXT: vpslld $31, %zmm5, %zmm5
@@ -257,7 +257,7 @@ define <32 x double> @test_load_32f64(<32 x double>* %ptrs, <32 x i1> %mask, <32
; AVX512F-NEXT: retq
;
; SKX-LABEL: test_load_32f64:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsllw $7, %ymm0, %ymm0
; SKX-NEXT: vpmovb2m %ymm0, %k1
; SKX-NEXT: vblendmpd (%rdi), %zmm1, %zmm0 {%k1}
diff --git a/test/CodeGen/X86/avx512-masked_memop-16-8.ll b/test/CodeGen/X86/avx512-masked_memop-16-8.ll
index aedfbf7dbd6..c8df2bffd9a 100644
--- a/test/CodeGen/X86/avx512-masked_memop-16-8.ll
+++ b/test/CodeGen/X86/avx512-masked_memop-16-8.ll
@@ -5,7 +5,7 @@
define <16 x i8> @test_mask_load_16xi8(<16 x i1> %mask, <16 x i8>* %addr, <16 x i8> %val) {
; CHECK-LABEL: test_mask_load_16xi8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0
; CHECK-NEXT: vpmovb2m %xmm0, %k1
; CHECK-NEXT: vmovdqu8 (%rdi), %xmm0 {%k1} {z}
@@ -17,7 +17,7 @@ declare <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>*, i32, <16 x i1>, <1
define <32 x i8> @test_mask_load_32xi8(<32 x i1> %mask, <32 x i8>* %addr, <32 x i8> %val) {
; CHECK-LABEL: test_mask_load_32xi8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsllw $7, %ymm0, %ymm0
; CHECK-NEXT: vpmovb2m %ymm0, %k1
; CHECK-NEXT: vpblendmb (%rdi), %ymm1, %ymm0 {%k1}
@@ -29,7 +29,7 @@ declare <32 x i8> @llvm.masked.load.v32i8.p0v32i8(<32 x i8>*, i32, <32 x i1>, <3
define <64 x i8> @test_mask_load_64xi8(<64 x i1> %mask, <64 x i8>* %addr, <64 x i8> %val) {
; CHECK-LABEL: test_mask_load_64xi8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsllw $7, %zmm0, %zmm0
; CHECK-NEXT: vpmovb2m %zmm0, %k1
; CHECK-NEXT: vpblendmb (%rdi), %zmm1, %zmm0 {%k1}
@@ -41,7 +41,7 @@ declare <64 x i8> @llvm.masked.load.v64i8.p0v64i8(<64 x i8>*, i32, <64 x i1>, <6
define <8 x i16> @test_mask_load_8xi16(<8 x i1> %mask, <8 x i16>* %addr, <8 x i16> %val) {
; CHECK-LABEL: test_mask_load_8xi16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsllw $15, %xmm0, %xmm0
; CHECK-NEXT: vpmovw2m %xmm0, %k1
; CHECK-NEXT: vmovdqu16 (%rdi), %xmm0 {%k1} {z}
@@ -53,7 +53,7 @@ declare <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>*, i32, <8 x i1>, <8
define <16 x i16> @test_mask_load_16xi16(<16 x i1> %mask, <16 x i16>* %addr, <16 x i16> %val) {
; CHECK-LABEL: test_mask_load_16xi16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0
; CHECK-NEXT: vpmovb2m %xmm0, %k1
; CHECK-NEXT: vmovdqu16 (%rdi), %ymm0 {%k1} {z}
@@ -65,7 +65,7 @@ declare <16 x i16> @llvm.masked.load.v16i16.p0v16i16(<16 x i16>*, i32, <16 x i1>
define <32 x i16> @test_mask_load_32xi16(<32 x i1> %mask, <32 x i16>* %addr, <32 x i16> %val) {
; CHECK-LABEL: test_mask_load_32xi16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsllw $7, %ymm0, %ymm0
; CHECK-NEXT: vpmovb2m %ymm0, %k1
; CHECK-NEXT: vpblendmw (%rdi), %zmm1, %zmm0 {%k1}
@@ -77,7 +77,7 @@ declare <32 x i16> @llvm.masked.load.v32i16.p0v32i16(<32 x i16>*, i32, <32 x i1>
define void @test_mask_store_16xi8(<16 x i1> %mask, <16 x i8>* %addr, <16 x i8> %val) {
; CHECK-LABEL: test_mask_store_16xi8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0
; CHECK-NEXT: vpmovb2m %xmm0, %k1
; CHECK-NEXT: vmovdqu8 %xmm1, (%rdi) {%k1}
@@ -89,7 +89,7 @@ declare void @llvm.masked.store.v16i8.p0v16i8(<16 x i8>, <16 x i8>*, i32, <16 x
define void @test_mask_store_32xi8(<32 x i1> %mask, <32 x i8>* %addr, <32 x i8> %val) {
; CHECK-LABEL: test_mask_store_32xi8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsllw $7, %ymm0, %ymm0
; CHECK-NEXT: vpmovb2m %ymm0, %k1
; CHECK-NEXT: vmovdqu8 %ymm1, (%rdi) {%k1}
@@ -102,7 +102,7 @@ declare void @llvm.masked.store.v32i8.p0v32i8(<32 x i8>, <32 x i8>*, i32, <32 x
define void @test_mask_store_64xi8(<64 x i1> %mask, <64 x i8>* %addr, <64 x i8> %val) {
; CHECK-LABEL: test_mask_store_64xi8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsllw $7, %zmm0, %zmm0
; CHECK-NEXT: vpmovb2m %zmm0, %k1
; CHECK-NEXT: vmovdqu8 %zmm1, (%rdi) {%k1}
@@ -115,7 +115,7 @@ declare void @llvm.masked.store.v64i8.p0v64i8(<64 x i8>, <64 x i8>*, i32, <64 x
define void @test_mask_store_8xi16(<8 x i1> %mask, <8 x i16>* %addr, <8 x i16> %val) {
; CHECK-LABEL: test_mask_store_8xi16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsllw $15, %xmm0, %xmm0
; CHECK-NEXT: vpmovw2m %xmm0, %k1
; CHECK-NEXT: vmovdqu16 %xmm1, (%rdi) {%k1}
@@ -127,7 +127,7 @@ declare void @llvm.masked.store.v8i16.p0v8i16(<8 x i16>, <8 x i16>*, i32, <8 x i
define void @test_mask_store_16xi16(<16 x i1> %mask, <16 x i16>* %addr, <16 x i16> %val) {
; CHECK-LABEL: test_mask_store_16xi16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0
; CHECK-NEXT: vpmovb2m %xmm0, %k1
; CHECK-NEXT: vmovdqu16 %ymm1, (%rdi) {%k1}
@@ -140,7 +140,7 @@ declare void @llvm.masked.store.v16i16.p0v16i16(<16 x i16>, <16 x i16>*, i32, <1
define void @test_mask_store_32xi16(<32 x i1> %mask, <32 x i16>* %addr, <32 x i16> %val) {
; CHECK-LABEL: test_mask_store_32xi16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsllw $7, %ymm0, %ymm0
; CHECK-NEXT: vpmovb2m %ymm0, %k1
; CHECK-NEXT: vmovdqu16 %zmm1, (%rdi) {%k1}
diff --git a/test/CodeGen/X86/avx512-memfold.ll b/test/CodeGen/X86/avx512-memfold.ll
index 3184140102a..80941181995 100644
--- a/test/CodeGen/X86/avx512-memfold.ll
+++ b/test/CodeGen/X86/avx512-memfold.ll
@@ -3,7 +3,7 @@
define i8 @test_int_x86_avx512_mask_cmp_ss(<4 x float> %a, float* %b, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_cmp_ss:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vcmpunordss (%rdi), %xmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
@@ -21,7 +21,7 @@ declare i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float>, <4 x float>, i32, i8, i32)
define <4 x float> @test_mask_max_ss(<4 x float> %a, float* %b, i8 %mask) {
; CHECK-LABEL: test_mask_max_ss:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vmaxss (%rdi), %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -37,7 +37,7 @@ declare <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>, <4 x float>,
define <4 x float> @test_maskz_add_ss(<4 x float> %a, float* %b, i8 %mask) {
; CHECK-LABEL: test_maskz_add_ss:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vaddss (%rdi), %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -56,7 +56,7 @@ declare <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double>, <2 x double>,
define <2 x double> @test_int_x86_avx512_mask_vfmadd_sd(<2 x double> %a, <2 x double> %b, double* %c, i8 %mask){
; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_sd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vfmadd213sd (%rdi), %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
@@ -71,7 +71,7 @@ define <2 x double> @test_int_x86_avx512_mask_vfmadd_sd(<2 x double> %a, <2 x do
; TODO: We shouldn't fold the load twice here.
define <4 x float> @test_mask_add_ss_double_use(<4 x float> %a, float* %b, i8 %mask, <4 x float> %c) {
; CHECK-LABEL: test_mask_add_ss_double_use:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vaddss %xmm2, %xmm0, %xmm1 {%k1}
diff --git a/test/CodeGen/X86/avx512-mov.ll b/test/CodeGen/X86/avx512-mov.ll
index a1f50c57af7..f1a2ac880ed 100644
--- a/test/CodeGen/X86/avx512-mov.ll
+++ b/test/CodeGen/X86/avx512-mov.ll
@@ -3,7 +3,7 @@
define i32 @test1(float %x) {
; CHECK-LABEL: test1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovd %xmm0, %eax ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x7e,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = bitcast float %x to i32
@@ -12,7 +12,7 @@ define i32 @test1(float %x) {
define <4 x i32> @test2(i32 %x) {
; CHECK-LABEL: test2:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovd %edi, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc7]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = insertelement <4 x i32>undef, i32 %x, i32 0
@@ -21,7 +21,7 @@ define <4 x i32> @test2(i32 %x) {
define <2 x i64> @test3(i64 %x) {
; CHECK-LABEL: test3:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovq %rdi, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe1,0xf9,0x6e,0xc7]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = insertelement <2 x i64>undef, i64 %x, i32 0
@@ -30,7 +30,7 @@ define <2 x i64> @test3(i64 %x) {
define <4 x i32> @test4(i32* %x) {
; CHECK-LABEL: test4:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovss (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
; CHECK-NEXT: ## xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -41,7 +41,7 @@ define <4 x i32> @test4(i32* %x) {
define void @test5(float %x, float* %y) {
; CHECK-LABEL: test5:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovss %xmm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x11,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
store float %x, float* %y, align 4
@@ -50,7 +50,7 @@ define void @test5(float %x, float* %y) {
define void @test6(double %x, double* %y) {
; CHECK-LABEL: test6:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovsd %xmm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x11,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
store double %x, double* %y, align 8
@@ -59,7 +59,7 @@ define void @test6(double %x, double* %y) {
define float @test7(i32* %x) {
; CHECK-LABEL: test7:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovss (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
; CHECK-NEXT: ## xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -70,7 +70,7 @@ define float @test7(i32* %x) {
define i32 @test8(<4 x i32> %x) {
; CHECK-LABEL: test8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovd %xmm0, %eax ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x7e,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = extractelement <4 x i32> %x, i32 0
@@ -79,7 +79,7 @@ define i32 @test8(<4 x i32> %x) {
define i64 @test9(<2 x i64> %x) {
; CHECK-LABEL: test9:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovq %xmm0, %rax ## EVEX TO VEX Compression encoding: [0xc4,0xe1,0xf9,0x7e,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = extractelement <2 x i64> %x, i32 0
@@ -88,7 +88,7 @@ define i64 @test9(<2 x i64> %x) {
define <4 x i32> @test10(i32* %x) {
; CHECK-LABEL: test10:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovss (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
; CHECK-NEXT: ## xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -99,7 +99,7 @@ define <4 x i32> @test10(i32* %x) {
define <4 x float> @test11(float* %x) {
; CHECK-LABEL: test11:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovss (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
; CHECK-NEXT: ## xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -110,7 +110,7 @@ define <4 x float> @test11(float* %x) {
define <2 x double> @test12(double* %x) {
; CHECK-LABEL: test12:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovsd (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x07]
; CHECK-NEXT: ## xmm0 = mem[0],zero
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -121,7 +121,7 @@ define <2 x double> @test12(double* %x) {
define <2 x i64> @test13(i64 %x) {
; CHECK-LABEL: test13:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovq %rdi, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe1,0xf9,0x6e,0xc7]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = insertelement <2 x i64>zeroinitializer, i64 %x, i32 0
@@ -130,7 +130,7 @@ define <2 x i64> @test13(i64 %x) {
define <4 x i32> @test14(i32 %x) {
; CHECK-LABEL: test14:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovd %edi, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc7]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = insertelement <4 x i32>zeroinitializer, i32 %x, i32 0
@@ -139,7 +139,7 @@ define <4 x i32> @test14(i32 %x) {
define <4 x i32> @test15(i32* %x) {
; CHECK-LABEL: test15:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovss (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
; CHECK-NEXT: ## xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -150,7 +150,7 @@ define <4 x i32> @test15(i32* %x) {
define <16 x i32> @test16(i8 * %addr) {
; CHECK-LABEL: test16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovups (%rdi), %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x10,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <16 x i32>*
@@ -160,7 +160,7 @@ define <16 x i32> @test16(i8 * %addr) {
define <16 x i32> @test17(i8 * %addr) {
; CHECK-LABEL: test17:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <16 x i32>*
@@ -170,7 +170,7 @@ define <16 x i32> @test17(i8 * %addr) {
define void @test18(i8 * %addr, <8 x i64> %data) {
; CHECK-LABEL: test18:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovaps %zmm0, (%rdi) ## encoding: [0x62,0xf1,0x7c,0x48,0x29,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <8 x i64>*
@@ -180,7 +180,7 @@ define void @test18(i8 * %addr, <8 x i64> %data) {
define void @test19(i8 * %addr, <16 x i32> %data) {
; CHECK-LABEL: test19:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovups %zmm0, (%rdi) ## encoding: [0x62,0xf1,0x7c,0x48,0x11,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <16 x i32>*
@@ -190,7 +190,7 @@ define void @test19(i8 * %addr, <16 x i32> %data) {
define void @test20(i8 * %addr, <16 x i32> %data) {
; CHECK-LABEL: test20:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovaps %zmm0, (%rdi) ## encoding: [0x62,0xf1,0x7c,0x48,0x29,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <16 x i32>*
@@ -200,7 +200,7 @@ define void @test20(i8 * %addr, <16 x i32> %data) {
define <8 x i64> @test21(i8 * %addr) {
; CHECK-LABEL: test21:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <8 x i64>*
@@ -210,7 +210,7 @@ define <8 x i64> @test21(i8 * %addr) {
define void @test22(i8 * %addr, <8 x i64> %data) {
; CHECK-LABEL: test22:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovups %zmm0, (%rdi) ## encoding: [0x62,0xf1,0x7c,0x48,0x11,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <8 x i64>*
@@ -220,7 +220,7 @@ define void @test22(i8 * %addr, <8 x i64> %data) {
define <8 x i64> @test23(i8 * %addr) {
; CHECK-LABEL: test23:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovups (%rdi), %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x10,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <8 x i64>*
@@ -230,7 +230,7 @@ define <8 x i64> @test23(i8 * %addr) {
define void @test24(i8 * %addr, <8 x double> %data) {
; CHECK-LABEL: test24:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovaps %zmm0, (%rdi) ## encoding: [0x62,0xf1,0x7c,0x48,0x29,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <8 x double>*
@@ -240,7 +240,7 @@ define void @test24(i8 * %addr, <8 x double> %data) {
define <8 x double> @test25(i8 * %addr) {
; CHECK-LABEL: test25:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <8 x double>*
@@ -250,7 +250,7 @@ define <8 x double> @test25(i8 * %addr) {
define void @test26(i8 * %addr, <16 x float> %data) {
; CHECK-LABEL: test26:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovaps %zmm0, (%rdi) ## encoding: [0x62,0xf1,0x7c,0x48,0x29,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <16 x float>*
@@ -260,7 +260,7 @@ define void @test26(i8 * %addr, <16 x float> %data) {
define <16 x float> @test27(i8 * %addr) {
; CHECK-LABEL: test27:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <16 x float>*
@@ -270,7 +270,7 @@ define <16 x float> @test27(i8 * %addr) {
define void @test28(i8 * %addr, <8 x double> %data) {
; CHECK-LABEL: test28:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovups %zmm0, (%rdi) ## encoding: [0x62,0xf1,0x7c,0x48,0x11,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <8 x double>*
@@ -280,7 +280,7 @@ define void @test28(i8 * %addr, <8 x double> %data) {
define <8 x double> @test29(i8 * %addr) {
; CHECK-LABEL: test29:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovups (%rdi), %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x10,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <8 x double>*
@@ -290,7 +290,7 @@ define <8 x double> @test29(i8 * %addr) {
define void @test30(i8 * %addr, <16 x float> %data) {
; CHECK-LABEL: test30:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovups %zmm0, (%rdi) ## encoding: [0x62,0xf1,0x7c,0x48,0x11,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <16 x float>*
@@ -300,7 +300,7 @@ define void @test30(i8 * %addr, <16 x float> %data) {
define <16 x float> @test31(i8 * %addr) {
; CHECK-LABEL: test31:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovups (%rdi), %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x10,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <16 x float>*
@@ -310,7 +310,7 @@ define <16 x float> @test31(i8 * %addr) {
define <16 x i32> @test32(i8 * %addr, <16 x i32> %old, <16 x i32> %mask1) {
; CHECK-LABEL: test32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vpcmpneqd %zmm2, %zmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x48,0x1f,0xca,0x04]
; CHECK-NEXT: vmovdqa32 (%rdi), %zmm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x49,0x6f,0x07]
@@ -324,7 +324,7 @@ define <16 x i32> @test32(i8 * %addr, <16 x i32> %old, <16 x i32> %mask1) {
define <16 x i32> @test33(i8 * %addr, <16 x i32> %old, <16 x i32> %mask1) {
; CHECK-LABEL: test33:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vpcmpneqd %zmm2, %zmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x48,0x1f,0xca,0x04]
; CHECK-NEXT: vmovdqu32 (%rdi), %zmm0 {%k1} ## encoding: [0x62,0xf1,0x7e,0x49,0x6f,0x07]
@@ -338,7 +338,7 @@ define <16 x i32> @test33(i8 * %addr, <16 x i32> %old, <16 x i32> %mask1) {
define <16 x i32> @test34(i8 * %addr, <16 x i32> %mask1) {
; CHECK-LABEL: test34:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0xef,0xc9]
; CHECK-NEXT: vpcmpneqd %zmm1, %zmm0, %k1 ## encoding: [0x62,0xf3,0x7d,0x48,0x1f,0xc9,0x04]
; CHECK-NEXT: vmovdqa32 (%rdi), %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xc9,0x6f,0x07]
@@ -352,7 +352,7 @@ define <16 x i32> @test34(i8 * %addr, <16 x i32> %mask1) {
define <16 x i32> @test35(i8 * %addr, <16 x i32> %mask1) {
; CHECK-LABEL: test35:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0xef,0xc9]
; CHECK-NEXT: vpcmpneqd %zmm1, %zmm0, %k1 ## encoding: [0x62,0xf3,0x7d,0x48,0x1f,0xc9,0x04]
; CHECK-NEXT: vmovdqu32 (%rdi), %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7e,0xc9,0x6f,0x07]
@@ -366,7 +366,7 @@ define <16 x i32> @test35(i8 * %addr, <16 x i32> %mask1) {
define <8 x i64> @test36(i8 * %addr, <8 x i64> %old, <8 x i64> %mask1) {
; CHECK-LABEL: test36:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vpcmpneqq %zmm2, %zmm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x48,0x1f,0xca,0x04]
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm0 {%k1} ## encoding: [0x62,0xf1,0xfd,0x49,0x6f,0x07]
@@ -380,7 +380,7 @@ define <8 x i64> @test36(i8 * %addr, <8 x i64> %old, <8 x i64> %mask1) {
define <8 x i64> @test37(i8 * %addr, <8 x i64> %old, <8 x i64> %mask1) {
; CHECK-LABEL: test37:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vpcmpneqq %zmm2, %zmm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x48,0x1f,0xca,0x04]
; CHECK-NEXT: vmovdqu64 (%rdi), %zmm0 {%k1} ## encoding: [0x62,0xf1,0xfe,0x49,0x6f,0x07]
@@ -394,7 +394,7 @@ define <8 x i64> @test37(i8 * %addr, <8 x i64> %old, <8 x i64> %mask1) {
define <8 x i64> @test38(i8 * %addr, <8 x i64> %mask1) {
; CHECK-LABEL: test38:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0xef,0xc9]
; CHECK-NEXT: vpcmpneqq %zmm1, %zmm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x48,0x1f,0xc9,0x04]
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xc9,0x6f,0x07]
@@ -408,7 +408,7 @@ define <8 x i64> @test38(i8 * %addr, <8 x i64> %mask1) {
define <8 x i64> @test39(i8 * %addr, <8 x i64> %mask1) {
; CHECK-LABEL: test39:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0xef,0xc9]
; CHECK-NEXT: vpcmpneqq %zmm1, %zmm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x48,0x1f,0xc9,0x04]
; CHECK-NEXT: vmovdqu64 (%rdi), %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfe,0xc9,0x6f,0x07]
@@ -422,7 +422,7 @@ define <8 x i64> @test39(i8 * %addr, <8 x i64> %mask1) {
define <16 x float> @test40(i8 * %addr, <16 x float> %old, <16 x float> %mask1) {
; CHECK-LABEL: test40:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2 ## encoding: [0xc5,0xe8,0x57,0xd2]
; CHECK-NEXT: vcmpneq_oqps %zmm2, %zmm1, %k1 ## encoding: [0x62,0xf1,0x74,0x48,0xc2,0xca,0x0c]
; CHECK-NEXT: vmovaps (%rdi), %zmm0 {%k1} ## encoding: [0x62,0xf1,0x7c,0x49,0x28,0x07]
@@ -436,7 +436,7 @@ define <16 x float> @test40(i8 * %addr, <16 x float> %old, <16 x float> %mask1)
define <16 x float> @test41(i8 * %addr, <16 x float> %old, <16 x float> %mask1) {
; CHECK-LABEL: test41:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2 ## encoding: [0xc5,0xe8,0x57,0xd2]
; CHECK-NEXT: vcmpneq_oqps %zmm2, %zmm1, %k1 ## encoding: [0x62,0xf1,0x74,0x48,0xc2,0xca,0x0c]
; CHECK-NEXT: vmovups (%rdi), %zmm0 {%k1} ## encoding: [0x62,0xf1,0x7c,0x49,0x10,0x07]
@@ -450,7 +450,7 @@ define <16 x float> @test41(i8 * %addr, <16 x float> %old, <16 x float> %mask1)
define <16 x float> @test42(i8 * %addr, <16 x float> %mask1) {
; CHECK-LABEL: test42:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
; CHECK-NEXT: vcmpneq_oqps %zmm1, %zmm0, %k1 ## encoding: [0x62,0xf1,0x7c,0x48,0xc2,0xc9,0x0c]
; CHECK-NEXT: vmovaps (%rdi), %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xc9,0x28,0x07]
@@ -464,7 +464,7 @@ define <16 x float> @test42(i8 * %addr, <16 x float> %mask1) {
define <16 x float> @test43(i8 * %addr, <16 x float> %mask1) {
; CHECK-LABEL: test43:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
; CHECK-NEXT: vcmpneq_oqps %zmm1, %zmm0, %k1 ## encoding: [0x62,0xf1,0x7c,0x48,0xc2,0xc9,0x0c]
; CHECK-NEXT: vmovups (%rdi), %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xc9,0x10,0x07]
@@ -478,7 +478,7 @@ define <16 x float> @test43(i8 * %addr, <16 x float> %mask1) {
define <8 x double> @test44(i8 * %addr, <8 x double> %old, <8 x double> %mask1) {
; CHECK-LABEL: test44:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vxorpd %xmm2, %xmm2, %xmm2 ## encoding: [0xc5,0xe9,0x57,0xd2]
; CHECK-NEXT: vcmpneq_oqpd %zmm2, %zmm1, %k1 ## encoding: [0x62,0xf1,0xf5,0x48,0xc2,0xca,0x0c]
; CHECK-NEXT: vmovapd (%rdi), %zmm0 {%k1} ## encoding: [0x62,0xf1,0xfd,0x49,0x28,0x07]
@@ -492,7 +492,7 @@ define <8 x double> @test44(i8 * %addr, <8 x double> %old, <8 x double> %mask1)
define <8 x double> @test45(i8 * %addr, <8 x double> %old, <8 x double> %mask1) {
; CHECK-LABEL: test45:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vxorpd %xmm2, %xmm2, %xmm2 ## encoding: [0xc5,0xe9,0x57,0xd2]
; CHECK-NEXT: vcmpneq_oqpd %zmm2, %zmm1, %k1 ## encoding: [0x62,0xf1,0xf5,0x48,0xc2,0xca,0x0c]
; CHECK-NEXT: vmovupd (%rdi), %zmm0 {%k1} ## encoding: [0x62,0xf1,0xfd,0x49,0x10,0x07]
@@ -506,7 +506,7 @@ define <8 x double> @test45(i8 * %addr, <8 x double> %old, <8 x double> %mask1)
define <8 x double> @test46(i8 * %addr, <8 x double> %mask1) {
; CHECK-LABEL: test46:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0x57,0xc9]
; CHECK-NEXT: vcmpneq_oqpd %zmm1, %zmm0, %k1 ## encoding: [0x62,0xf1,0xfd,0x48,0xc2,0xc9,0x0c]
; CHECK-NEXT: vmovapd (%rdi), %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xc9,0x28,0x07]
@@ -520,7 +520,7 @@ define <8 x double> @test46(i8 * %addr, <8 x double> %mask1) {
define <8 x double> @test47(i8 * %addr, <8 x double> %mask1) {
; CHECK-LABEL: test47:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0x57,0xc9]
; CHECK-NEXT: vcmpneq_oqpd %zmm1, %zmm0, %k1 ## encoding: [0x62,0xf1,0xfd,0x48,0xc2,0xc9,0x0c]
; CHECK-NEXT: vmovupd (%rdi), %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xc9,0x10,0x07]
diff --git a/test/CodeGen/X86/avx512-pmovxrm.ll b/test/CodeGen/X86/avx512-pmovxrm.ll
index ab3f32091fc..7725f160200 100644
--- a/test/CodeGen/X86/avx512-pmovxrm.ll
+++ b/test/CodeGen/X86/avx512-pmovxrm.ll
@@ -4,13 +4,13 @@
define <32 x i16> @test_llvm_x86_avx512_pmovsxbw(<32 x i8>* %a) {
; X32-LABEL: test_llvm_x86_avx512_pmovsxbw:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovsxbw (%eax), %zmm0
; X32-NEXT: retl
;
; X64-LABEL: test_llvm_x86_avx512_pmovsxbw:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmovsxbw (%rdi), %zmm0
; X64-NEXT: retq
%1 = load <32 x i8>, <32 x i8>* %a, align 1
@@ -20,13 +20,13 @@ define <32 x i16> @test_llvm_x86_avx512_pmovsxbw(<32 x i8>* %a) {
define <16 x i32> @test_llvm_x86_avx512_pmovsxbd(<16 x i8>* %a) {
; X32-LABEL: test_llvm_x86_avx512_pmovsxbd:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovsxbd (%eax), %zmm0
; X32-NEXT: retl
;
; X64-LABEL: test_llvm_x86_avx512_pmovsxbd:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmovsxbd (%rdi), %zmm0
; X64-NEXT: retq
%1 = load <16 x i8>, <16 x i8>* %a, align 1
@@ -36,13 +36,13 @@ define <16 x i32> @test_llvm_x86_avx512_pmovsxbd(<16 x i8>* %a) {
define <8 x i64> @test_llvm_x86_avx512_pmovsxbq(<16 x i8>* %a) {
; X32-LABEL: test_llvm_x86_avx512_pmovsxbq:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovsxbq (%eax), %zmm0
; X32-NEXT: retl
;
; X64-LABEL: test_llvm_x86_avx512_pmovsxbq:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmovsxbq (%rdi), %zmm0
; X64-NEXT: retq
%1 = load <16 x i8>, <16 x i8>* %a, align 1
@@ -53,13 +53,13 @@ define <8 x i64> @test_llvm_x86_avx512_pmovsxbq(<16 x i8>* %a) {
define <16 x i32> @test_llvm_x86_avx512_pmovsxwd(<16 x i16>* %a) {
; X32-LABEL: test_llvm_x86_avx512_pmovsxwd:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovsxwd (%eax), %zmm0
; X32-NEXT: retl
;
; X64-LABEL: test_llvm_x86_avx512_pmovsxwd:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmovsxwd (%rdi), %zmm0
; X64-NEXT: retq
%1 = load <16 x i16>, <16 x i16>* %a, align 1
@@ -69,13 +69,13 @@ define <16 x i32> @test_llvm_x86_avx512_pmovsxwd(<16 x i16>* %a) {
define <8 x i64> @test_llvm_x86_avx512_pmovsxwq(<8 x i16>* %a) {
; X32-LABEL: test_llvm_x86_avx512_pmovsxwq:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovsxwq (%eax), %zmm0
; X32-NEXT: retl
;
; X64-LABEL: test_llvm_x86_avx512_pmovsxwq:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmovsxwq (%rdi), %zmm0
; X64-NEXT: retq
%1 = load <8 x i16>, <8 x i16>* %a, align 1
@@ -85,13 +85,13 @@ define <8 x i64> @test_llvm_x86_avx512_pmovsxwq(<8 x i16>* %a) {
define <8 x i64> @test_llvm_x86_avx512_pmovsxdq(<8 x i32>* %a) {
; X32-LABEL: test_llvm_x86_avx512_pmovsxdq:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovsxdq (%eax), %zmm0
; X32-NEXT: retl
;
; X64-LABEL: test_llvm_x86_avx512_pmovsxdq:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmovsxdq (%rdi), %zmm0
; X64-NEXT: retq
%1 = load <8 x i32>, <8 x i32>* %a, align 1
@@ -101,13 +101,13 @@ define <8 x i64> @test_llvm_x86_avx512_pmovsxdq(<8 x i32>* %a) {
define <32 x i16> @test_llvm_x86_avx512_pmovzxbw(<32 x i8>* %a) {
; X32-LABEL: test_llvm_x86_avx512_pmovzxbw:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovzxbw {{.*#+}} zmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero,mem[16],zero,mem[17],zero,mem[18],zero,mem[19],zero,mem[20],zero,mem[21],zero,mem[22],zero,mem[23],zero,mem[24],zero,mem[25],zero,mem[26],zero,mem[27],zero,mem[28],zero,mem[29],zero,mem[30],zero,mem[31],zero
; X32-NEXT: retl
;
; X64-LABEL: test_llvm_x86_avx512_pmovzxbw:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmovzxbw {{.*#+}} zmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero,mem[16],zero,mem[17],zero,mem[18],zero,mem[19],zero,mem[20],zero,mem[21],zero,mem[22],zero,mem[23],zero,mem[24],zero,mem[25],zero,mem[26],zero,mem[27],zero,mem[28],zero,mem[29],zero,mem[30],zero,mem[31],zero
; X64-NEXT: retq
%1 = load <32 x i8>, <32 x i8>* %a, align 1
@@ -117,13 +117,13 @@ define <32 x i16> @test_llvm_x86_avx512_pmovzxbw(<32 x i8>* %a) {
define <16 x i32> @test_llvm_x86_avx512_pmovzxbd(<16 x i8>* %a) {
; X32-LABEL: test_llvm_x86_avx512_pmovzxbd:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovzxbd {{.*#+}} zmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
; X32-NEXT: retl
;
; X64-LABEL: test_llvm_x86_avx512_pmovzxbd:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmovzxbd {{.*#+}} zmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
; X64-NEXT: retq
%1 = load <16 x i8>, <16 x i8>* %a, align 1
@@ -133,13 +133,13 @@ define <16 x i32> @test_llvm_x86_avx512_pmovzxbd(<16 x i8>* %a) {
define <8 x i64> @test_llvm_x86_avx512_pmovzxbq(<16 x i8>* %a) {
; X32-LABEL: test_llvm_x86_avx512_pmovzxbq:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovzxbq {{.*#+}} zmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero,mem[4],zero,zero,zero,zero,zero,zero,zero,mem[5],zero,zero,zero,zero,zero,zero,zero,mem[6],zero,zero,zero,zero,zero,zero,zero,mem[7],zero,zero,zero,zero,zero,zero,zero
; X32-NEXT: retl
;
; X64-LABEL: test_llvm_x86_avx512_pmovzxbq:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmovzxbq {{.*#+}} zmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero,mem[4],zero,zero,zero,zero,zero,zero,zero,mem[5],zero,zero,zero,zero,zero,zero,zero,mem[6],zero,zero,zero,zero,zero,zero,zero,mem[7],zero,zero,zero,zero,zero,zero,zero
; X64-NEXT: retq
%1 = load <16 x i8>, <16 x i8>* %a, align 1
@@ -150,13 +150,13 @@ define <8 x i64> @test_llvm_x86_avx512_pmovzxbq(<16 x i8>* %a) {
define <16 x i32> @test_llvm_x86_avx512_pmovzxwd(<16 x i16>* %a) {
; X32-LABEL: test_llvm_x86_avx512_pmovzxwd:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovzxwd {{.*#+}} zmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; X32-NEXT: retl
;
; X64-LABEL: test_llvm_x86_avx512_pmovzxwd:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmovzxwd {{.*#+}} zmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; X64-NEXT: retq
%1 = load <16 x i16>, <16 x i16>* %a, align 1
@@ -166,13 +166,13 @@ define <16 x i32> @test_llvm_x86_avx512_pmovzxwd(<16 x i16>* %a) {
define <8 x i64> @test_llvm_x86_avx512_pmovzxwq(<8 x i16>* %a) {
; X32-LABEL: test_llvm_x86_avx512_pmovzxwq:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovzxwq {{.*#+}} zmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; X32-NEXT: retl
;
; X64-LABEL: test_llvm_x86_avx512_pmovzxwq:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmovzxwq {{.*#+}} zmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; X64-NEXT: retq
%1 = load <8 x i16>, <8 x i16>* %a, align 1
@@ -182,13 +182,13 @@ define <8 x i64> @test_llvm_x86_avx512_pmovzxwq(<8 x i16>* %a) {
define <8 x i64> @test_llvm_x86_avx512_pmovzxdq(<8 x i32>* %a) {
; X32-LABEL: test_llvm_x86_avx512_pmovzxdq:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovzxdq {{.*#+}} zmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; X32-NEXT: retl
;
; X64-LABEL: test_llvm_x86_avx512_pmovzxdq:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmovzxdq {{.*#+}} zmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; X64-NEXT: retq
%1 = load <8 x i32>, <8 x i32>* %a, align 1
diff --git a/test/CodeGen/X86/avx512-regcall-Mask.ll b/test/CodeGen/X86/avx512-regcall-Mask.ll
index d31b3ec2669..d02d6a69f69 100644
--- a/test/CodeGen/X86/avx512-regcall-Mask.ll
+++ b/test/CodeGen/X86/avx512-regcall-Mask.ll
@@ -6,7 +6,7 @@
; Test regcall when receiving arguments of v64i1 type
define x86_regcallcc i64 @test_argv64i1(<64 x i1> %x0, <64 x i1> %x1, <64 x i1> %x2, <64 x i1> %x3, <64 x i1> %x4, <64 x i1> %x5, <64 x i1> %x6, <64 x i1> %x7, <64 x i1> %x8, <64 x i1> %x9, <64 x i1> %x10, <64 x i1> %x11, <64 x i1> %x12) {
; X32-LABEL: test_argv64i1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebp
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: andl $-8, %esp
@@ -50,7 +50,7 @@ define x86_regcallcc i64 @test_argv64i1(<64 x i1> %x0, <64 x i1> %x1, <64 x i1>
; X32-NEXT: retl
;
; WIN64-LABEL: test_argv64i1:
-; WIN64: # BB#0:
+; WIN64: # %bb.0:
; WIN64-NEXT: addq %rcx, %rax
; WIN64-NEXT: addq %rdx, %rax
; WIN64-NEXT: addq %rdi, %rax
@@ -66,7 +66,7 @@ define x86_regcallcc i64 @test_argv64i1(<64 x i1> %x0, <64 x i1> %x1, <64 x i1>
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: test_argv64i1:
-; LINUXOSX64: # BB#0:
+; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: addq %rcx, %rax
; LINUXOSX64-NEXT: addq %rdx, %rax
; LINUXOSX64-NEXT: addq %rdi, %rax
@@ -111,7 +111,7 @@ define x86_regcallcc i64 @test_argv64i1(<64 x i1> %x0, <64 x i1> %x1, <64 x i1>
; Test regcall when passing arguments of v64i1 type
define i64 @caller_argv64i1() #0 {
; X32-LABEL: caller_argv64i1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: pushl %edi
; X32-NEXT: subl $88, %esp
; X32-NEXT: vmovaps {{.*#+}} xmm0 = [2,1,2,1]
@@ -132,7 +132,7 @@ define i64 @caller_argv64i1() #0 {
; X32-NEXT: retl
;
; WIN64-LABEL: caller_argv64i1:
-; WIN64: # BB#0: # %entry
+; WIN64: # %bb.0: # %entry
; WIN64-NEXT: pushq %r15
; WIN64-NEXT: .seh_pushreg 15
; WIN64-NEXT: pushq %r14
@@ -178,7 +178,7 @@ define i64 @caller_argv64i1() #0 {
; WIN64-NEXT: .seh_endproc
;
; LINUXOSX64-LABEL: caller_argv64i1:
-; LINUXOSX64: # BB#0: # %entry
+; LINUXOSX64: # %bb.0: # %entry
; LINUXOSX64-NEXT: pushq %r15
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: pushq %r14
@@ -229,13 +229,13 @@ entry:
; Test regcall when returning v64i1 type
define x86_regcallcc <64 x i1> @test_retv64i1() {
; X32-LABEL: test_retv64i1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl $2, %eax
; X32-NEXT: movl $1, %ecx
; X32-NEXT: retl
;
; CHECK64-LABEL: test_retv64i1:
-; CHECK64: # BB#0:
+; CHECK64: # %bb.0:
; CHECK64-NEXT: movabsq $4294967298, %rax # imm = 0x100000002
; CHECK64-NEXT: retq
%a = bitcast i64 4294967298 to <64 x i1>
@@ -245,7 +245,7 @@ define x86_regcallcc <64 x i1> @test_retv64i1() {
; Test regcall when processing result of v64i1 type
define <64 x i1> @caller_retv64i1() #0 {
; X32-LABEL: caller_retv64i1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: calll _test_retv64i1
; X32-NEXT: kmovd %eax, %k0
; X32-NEXT: kmovd %ecx, %k1
@@ -254,7 +254,7 @@ define <64 x i1> @caller_retv64i1() #0 {
; X32-NEXT: retl
;
; WIN64-LABEL: caller_retv64i1:
-; WIN64: # BB#0: # %entry
+; WIN64: # %bb.0: # %entry
; WIN64-NEXT: pushq %rsi
; WIN64-NEXT: .seh_pushreg 6
; WIN64-NEXT: pushq %rdi
@@ -280,7 +280,7 @@ define <64 x i1> @caller_retv64i1() #0 {
; WIN64-NEXT: .seh_endproc
;
; LINUXOSX64-LABEL: caller_retv64i1:
-; LINUXOSX64: # BB#0: # %entry
+; LINUXOSX64: # %bb.0: # %entry
; LINUXOSX64-NEXT: pushq %rax
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: callq test_retv64i1
@@ -297,7 +297,7 @@ entry:
declare i32 @test_argv32i1helper(<32 x i1> %x0, <32 x i1> %x1, <32 x i1> %x2)
define x86_regcallcc i32 @test_argv32i1(<32 x i1> %x0, <32 x i1> %x1, <32 x i1> %x2) {
; X32-LABEL: test_argv32i1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: pushl %esp
; X32-NEXT: subl $72, %esp
; X32-NEXT: vmovups %xmm7, {{[0-9]+}}(%esp) # 16-byte Spill
@@ -324,7 +324,7 @@ define x86_regcallcc i32 @test_argv32i1(<32 x i1> %x0, <32 x i1> %x1, <32 x i1>
; X32-NEXT: retl
;
; WIN64-LABEL: test_argv32i1:
-; WIN64: # BB#0: # %entry
+; WIN64: # %bb.0: # %entry
; WIN64-NEXT: pushq %r11
; WIN64-NEXT: .seh_pushreg 11
; WIN64-NEXT: pushq %r10
@@ -356,7 +356,7 @@ define x86_regcallcc i32 @test_argv32i1(<32 x i1> %x0, <32 x i1> %x1, <32 x i1>
; WIN64-NEXT: .seh_endproc
;
; LINUXOSX64-LABEL: test_argv32i1:
-; LINUXOSX64: # BB#0: # %entry
+; LINUXOSX64: # %bb.0: # %entry
; LINUXOSX64-NEXT: pushq %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: subq $128, %rsp
@@ -408,7 +408,7 @@ entry:
; Test regcall when passing arguments of v32i1 type
define i32 @caller_argv32i1() #0 {
; X32-LABEL: caller_argv32i1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl $1, %eax
; X32-NEXT: movl $1, %ecx
; X32-NEXT: movl $1, %edx
@@ -416,7 +416,7 @@ define i32 @caller_argv32i1() #0 {
; X32-NEXT: retl
;
; WIN64-LABEL: caller_argv32i1:
-; WIN64: # BB#0: # %entry
+; WIN64: # %bb.0: # %entry
; WIN64-NEXT: pushq %rsi
; WIN64-NEXT: .seh_pushreg 6
; WIN64-NEXT: pushq %rdi
@@ -443,7 +443,7 @@ define i32 @caller_argv32i1() #0 {
; WIN64-NEXT: .seh_endproc
;
; LINUXOSX64-LABEL: caller_argv32i1:
-; LINUXOSX64: # BB#0: # %entry
+; LINUXOSX64: # %bb.0: # %entry
; LINUXOSX64-NEXT: pushq %rax
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: movl $1, %eax
@@ -461,12 +461,12 @@ entry:
; Test regcall when returning v32i1 type
define x86_regcallcc <32 x i1> @test_retv32i1() {
; X32-LABEL: test_retv32i1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl $1, %eax
; X32-NEXT: retl
;
; CHECK64-LABEL: test_retv32i1:
-; CHECK64: # BB#0:
+; CHECK64: # %bb.0:
; CHECK64-NEXT: movl $1, %eax
; CHECK64-NEXT: retq
%a = bitcast i32 1 to <32 x i1>
@@ -476,13 +476,13 @@ define x86_regcallcc <32 x i1> @test_retv32i1() {
; Test regcall when processing result of v32i1 type
define i32 @caller_retv32i1() #0 {
; X32-LABEL: caller_retv32i1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: calll _test_retv32i1
; X32-NEXT: incl %eax
; X32-NEXT: retl
;
; WIN64-LABEL: caller_retv32i1:
-; WIN64: # BB#0: # %entry
+; WIN64: # %bb.0: # %entry
; WIN64-NEXT: pushq %rsi
; WIN64-NEXT: .seh_pushreg 6
; WIN64-NEXT: pushq %rdi
@@ -507,7 +507,7 @@ define i32 @caller_retv32i1() #0 {
; WIN64-NEXT: .seh_endproc
;
; LINUXOSX64-LABEL: caller_retv32i1:
-; LINUXOSX64: # BB#0: # %entry
+; LINUXOSX64: # %bb.0: # %entry
; LINUXOSX64-NEXT: pushq %rax
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: callq test_retv32i1
@@ -525,7 +525,7 @@ entry:
declare i16 @test_argv16i1helper(<16 x i1> %x0, <16 x i1> %x1, <16 x i1> %x2)
define x86_regcallcc i16 @test_argv16i1(<16 x i1> %x0, <16 x i1> %x1, <16 x i1> %x2) {
; X32-LABEL: test_argv16i1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %esp
; X32-NEXT: subl $72, %esp
; X32-NEXT: vmovups %xmm7, {{[0-9]+}}(%esp) # 16-byte Spill
@@ -552,7 +552,7 @@ define x86_regcallcc i16 @test_argv16i1(<16 x i1> %x0, <16 x i1> %x1, <16 x i1>
; X32-NEXT: retl
;
; WIN64-LABEL: test_argv16i1:
-; WIN64: # BB#0:
+; WIN64: # %bb.0:
; WIN64-NEXT: pushq %r11
; WIN64-NEXT: .seh_pushreg 11
; WIN64-NEXT: pushq %r10
@@ -584,7 +584,7 @@ define x86_regcallcc i16 @test_argv16i1(<16 x i1> %x0, <16 x i1> %x1, <16 x i1>
; WIN64-NEXT: .seh_endproc
;
; LINUXOSX64-LABEL: test_argv16i1:
-; LINUXOSX64: # BB#0:
+; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: pushq %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: subq $128, %rsp
@@ -635,7 +635,7 @@ define x86_regcallcc i16 @test_argv16i1(<16 x i1> %x0, <16 x i1> %x1, <16 x i1>
; Test regcall when passing arguments of v16i1 type
define i16 @caller_argv16i1() #0 {
; X32-LABEL: caller_argv16i1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl $1, %eax
; X32-NEXT: movl $1, %ecx
; X32-NEXT: movl $1, %edx
@@ -643,7 +643,7 @@ define i16 @caller_argv16i1() #0 {
; X32-NEXT: retl
;
; WIN64-LABEL: caller_argv16i1:
-; WIN64: # BB#0: # %entry
+; WIN64: # %bb.0: # %entry
; WIN64-NEXT: pushq %rsi
; WIN64-NEXT: .seh_pushreg 6
; WIN64-NEXT: pushq %rdi
@@ -670,7 +670,7 @@ define i16 @caller_argv16i1() #0 {
; WIN64-NEXT: .seh_endproc
;
; LINUXOSX64-LABEL: caller_argv16i1:
-; LINUXOSX64: # BB#0: # %entry
+; LINUXOSX64: # %bb.0: # %entry
; LINUXOSX64-NEXT: pushq %rax
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: movl $1, %eax
@@ -688,12 +688,12 @@ entry:
; Test regcall when returning v16i1 type
define x86_regcallcc <16 x i1> @test_retv16i1() {
; X32-LABEL: test_retv16i1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movw $1, %ax
; X32-NEXT: retl
;
; CHECK64-LABEL: test_retv16i1:
-; CHECK64: # BB#0:
+; CHECK64: # %bb.0:
; CHECK64-NEXT: movw $1, %ax
; CHECK64-NEXT: retq
%a = bitcast i16 1 to <16 x i1>
@@ -703,7 +703,7 @@ define x86_regcallcc <16 x i1> @test_retv16i1() {
; Test regcall when processing result of v16i1 type
define i16 @caller_retv16i1() #0 {
; X32-LABEL: caller_retv16i1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: calll _test_retv16i1
; X32-NEXT: # kill: %ax<def> %ax<kill> %eax<def>
; X32-NEXT: incl %eax
@@ -711,7 +711,7 @@ define i16 @caller_retv16i1() #0 {
; X32-NEXT: retl
;
; WIN64-LABEL: caller_retv16i1:
-; WIN64: # BB#0: # %entry
+; WIN64: # %bb.0: # %entry
; WIN64-NEXT: pushq %rsi
; WIN64-NEXT: .seh_pushreg 6
; WIN64-NEXT: pushq %rdi
@@ -738,7 +738,7 @@ define i16 @caller_retv16i1() #0 {
; WIN64-NEXT: .seh_endproc
;
; LINUXOSX64-LABEL: caller_retv16i1:
-; LINUXOSX64: # BB#0: # %entry
+; LINUXOSX64: # %bb.0: # %entry
; LINUXOSX64-NEXT: pushq %rax
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: callq test_retv16i1
@@ -758,7 +758,7 @@ entry:
declare i8 @test_argv8i1helper(<8 x i1> %x0, <8 x i1> %x1, <8 x i1> %x2)
define x86_regcallcc i8 @test_argv8i1(<8 x i1> %x0, <8 x i1> %x1, <8 x i1> %x2) {
; X32-LABEL: test_argv8i1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %esp
; X32-NEXT: subl $72, %esp
; X32-NEXT: vmovups %xmm7, {{[0-9]+}}(%esp) # 16-byte Spill
@@ -785,7 +785,7 @@ define x86_regcallcc i8 @test_argv8i1(<8 x i1> %x0, <8 x i1> %x1, <8 x i1> %x2)
; X32-NEXT: retl
;
; WIN64-LABEL: test_argv8i1:
-; WIN64: # BB#0:
+; WIN64: # %bb.0:
; WIN64-NEXT: pushq %r11
; WIN64-NEXT: .seh_pushreg 11
; WIN64-NEXT: pushq %r10
@@ -817,7 +817,7 @@ define x86_regcallcc i8 @test_argv8i1(<8 x i1> %x0, <8 x i1> %x1, <8 x i1> %x2)
; WIN64-NEXT: .seh_endproc
;
; LINUXOSX64-LABEL: test_argv8i1:
-; LINUXOSX64: # BB#0:
+; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: pushq %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: subq $128, %rsp
@@ -868,7 +868,7 @@ define x86_regcallcc i8 @test_argv8i1(<8 x i1> %x0, <8 x i1> %x1, <8 x i1> %x2)
; Test regcall when passing arguments of v8i1 type
define i8 @caller_argv8i1() #0 {
; X32-LABEL: caller_argv8i1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl $1, %eax
; X32-NEXT: movl $1, %ecx
; X32-NEXT: movl $1, %edx
@@ -876,7 +876,7 @@ define i8 @caller_argv8i1() #0 {
; X32-NEXT: retl
;
; WIN64-LABEL: caller_argv8i1:
-; WIN64: # BB#0: # %entry
+; WIN64: # %bb.0: # %entry
; WIN64-NEXT: pushq %rsi
; WIN64-NEXT: .seh_pushreg 6
; WIN64-NEXT: pushq %rdi
@@ -903,7 +903,7 @@ define i8 @caller_argv8i1() #0 {
; WIN64-NEXT: .seh_endproc
;
; LINUXOSX64-LABEL: caller_argv8i1:
-; LINUXOSX64: # BB#0: # %entry
+; LINUXOSX64: # %bb.0: # %entry
; LINUXOSX64-NEXT: pushq %rax
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: movl $1, %eax
@@ -921,12 +921,12 @@ entry:
; Test regcall when returning v8i1 type
define x86_regcallcc <8 x i1> @test_retv8i1() {
; X32-LABEL: test_retv8i1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb $1, %al
; X32-NEXT: retl
;
; CHECK64-LABEL: test_retv8i1:
-; CHECK64: # BB#0:
+; CHECK64: # %bb.0:
; CHECK64-NEXT: movb $1, %al
; CHECK64-NEXT: retq
%a = bitcast i8 1 to <8 x i1>
@@ -936,7 +936,7 @@ define x86_regcallcc <8 x i1> @test_retv8i1() {
; Test regcall when processing result of v8i1 type
define <8 x i1> @caller_retv8i1() #0 {
; X32-LABEL: caller_retv8i1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: calll _test_retv8i1
; X32-NEXT: # kill: %al<def> %al<kill> %eax<def>
; X32-NEXT: kmovd %eax, %k0
@@ -946,7 +946,7 @@ define <8 x i1> @caller_retv8i1() #0 {
; X32-NEXT: retl
;
; WIN64-LABEL: caller_retv8i1:
-; WIN64: # BB#0: # %entry
+; WIN64: # %bb.0: # %entry
; WIN64-NEXT: pushq %rsi
; WIN64-NEXT: .seh_pushreg 6
; WIN64-NEXT: pushq %rdi
@@ -975,7 +975,7 @@ define <8 x i1> @caller_retv8i1() #0 {
; WIN64-NEXT: .seh_endproc
;
; LINUXOSX64-LABEL: caller_retv8i1:
-; LINUXOSX64: # BB#0: # %entry
+; LINUXOSX64: # %bb.0: # %entry
; LINUXOSX64-NEXT: pushq %rax
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: callq test_retv8i1
diff --git a/test/CodeGen/X86/avx512-regcall-NoMask.ll b/test/CodeGen/X86/avx512-regcall-NoMask.ll
index 82c435f2268..b8d6be37d92 100644
--- a/test/CodeGen/X86/avx512-regcall-NoMask.ll
+++ b/test/CodeGen/X86/avx512-regcall-NoMask.ll
@@ -6,19 +6,19 @@
; Test regcall when receiving/returning i1
define x86_regcallcc i1 @test_argReti1(i1 %a) {
; X32-LABEL: test_argReti1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: incb %al
; X32-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X32-NEXT: retl
;
; WIN64-LABEL: test_argReti1:
-; WIN64: # BB#0:
+; WIN64: # %bb.0:
; WIN64-NEXT: incb %al
; WIN64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: test_argReti1:
-; LINUXOSX64: # BB#0:
+; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: incb %al
; LINUXOSX64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; LINUXOSX64-NEXT: retq
@@ -29,7 +29,7 @@ define x86_regcallcc i1 @test_argReti1(i1 %a) {
; Test regcall when passing/retrieving i1
define x86_regcallcc i1 @test_CallargReti1(i1 %a) {
; X32-LABEL: test_CallargReti1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %esp
; X32-NEXT: incb %al
; X32-NEXT: movzbl %al, %eax
@@ -39,7 +39,7 @@ define x86_regcallcc i1 @test_CallargReti1(i1 %a) {
; X32-NEXT: retl
;
; WIN64-LABEL: test_CallargReti1:
-; WIN64: # BB#0:
+; WIN64: # %bb.0:
; WIN64-NEXT: pushq %rsp
; WIN64-NEXT: .seh_pushreg 4
; WIN64-NEXT: .seh_endprologue
@@ -54,7 +54,7 @@ define x86_regcallcc i1 @test_CallargReti1(i1 %a) {
; WIN64-NEXT: .seh_endproc
;
; LINUXOSX64-LABEL: test_CallargReti1:
-; LINUXOSX64: # BB#0:
+; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: pushq %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: .cfi_offset %rsp, -16
@@ -73,19 +73,19 @@ define x86_regcallcc i1 @test_CallargReti1(i1 %a) {
; Test regcall when receiving/returning i8
define x86_regcallcc i8 @test_argReti8(i8 %a) {
; X32-LABEL: test_argReti8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: incb %al
; X32-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X32-NEXT: retl
;
; WIN64-LABEL: test_argReti8:
-; WIN64: # BB#0:
+; WIN64: # %bb.0:
; WIN64-NEXT: incb %al
; WIN64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: test_argReti8:
-; LINUXOSX64: # BB#0:
+; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: incb %al
; LINUXOSX64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; LINUXOSX64-NEXT: retq
@@ -96,7 +96,7 @@ define x86_regcallcc i8 @test_argReti8(i8 %a) {
; Test regcall when passing/retrieving i8
define x86_regcallcc i8 @test_CallargReti8(i8 %a) {
; X32-LABEL: test_CallargReti8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %esp
; X32-NEXT: incb %al
; X32-NEXT: movzbl %al, %eax
@@ -106,7 +106,7 @@ define x86_regcallcc i8 @test_CallargReti8(i8 %a) {
; X32-NEXT: retl
;
; WIN64-LABEL: test_CallargReti8:
-; WIN64: # BB#0:
+; WIN64: # %bb.0:
; WIN64-NEXT: pushq %rsp
; WIN64-NEXT: .seh_pushreg 4
; WIN64-NEXT: .seh_endprologue
@@ -121,7 +121,7 @@ define x86_regcallcc i8 @test_CallargReti8(i8 %a) {
; WIN64-NEXT: .seh_endproc
;
; LINUXOSX64-LABEL: test_CallargReti8:
-; LINUXOSX64: # BB#0:
+; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: pushq %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: .cfi_offset %rsp, -16
@@ -140,19 +140,19 @@ define x86_regcallcc i8 @test_CallargReti8(i8 %a) {
; Test regcall when receiving/returning i16
define x86_regcallcc i16 @test_argReti16(i16 %a) {
; X32-LABEL: test_argReti16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: incl %eax
; X32-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X32-NEXT: retl
;
; WIN64-LABEL: test_argReti16:
-; WIN64: # BB#0:
+; WIN64: # %bb.0:
; WIN64-NEXT: incl %eax
; WIN64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: test_argReti16:
-; LINUXOSX64: # BB#0:
+; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: incl %eax
; LINUXOSX64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; LINUXOSX64-NEXT: retq
@@ -163,7 +163,7 @@ define x86_regcallcc i16 @test_argReti16(i16 %a) {
; Test regcall when passing/retrieving i16
define x86_regcallcc i16 @test_CallargReti16(i16 %a) {
; X32-LABEL: test_CallargReti16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %esp
; X32-NEXT: incl %eax
; X32-NEXT: calll _test_argReti16
@@ -174,7 +174,7 @@ define x86_regcallcc i16 @test_CallargReti16(i16 %a) {
; X32-NEXT: retl
;
; WIN64-LABEL: test_CallargReti16:
-; WIN64: # BB#0:
+; WIN64: # %bb.0:
; WIN64-NEXT: pushq %rsp
; WIN64-NEXT: .seh_pushreg 4
; WIN64-NEXT: .seh_endprologue
@@ -190,7 +190,7 @@ define x86_regcallcc i16 @test_CallargReti16(i16 %a) {
; WIN64-NEXT: .seh_endproc
;
; LINUXOSX64-LABEL: test_CallargReti16:
-; LINUXOSX64: # BB#0:
+; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: pushq %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: .cfi_offset %rsp, -16
@@ -210,17 +210,17 @@ define x86_regcallcc i16 @test_CallargReti16(i16 %a) {
; Test regcall when receiving/returning i32
define x86_regcallcc i32 @test_argReti32(i32 %a) {
; X32-LABEL: test_argReti32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: incl %eax
; X32-NEXT: retl
;
; WIN64-LABEL: test_argReti32:
-; WIN64: # BB#0:
+; WIN64: # %bb.0:
; WIN64-NEXT: incl %eax
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: test_argReti32:
-; LINUXOSX64: # BB#0:
+; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: incl %eax
; LINUXOSX64-NEXT: retq
%add = add i32 %a, 1
@@ -230,7 +230,7 @@ define x86_regcallcc i32 @test_argReti32(i32 %a) {
; Test regcall when passing/retrieving i32
define x86_regcallcc i32 @test_CallargReti32(i32 %a) {
; X32-LABEL: test_CallargReti32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %esp
; X32-NEXT: incl %eax
; X32-NEXT: calll _test_argReti32
@@ -239,7 +239,7 @@ define x86_regcallcc i32 @test_CallargReti32(i32 %a) {
; X32-NEXT: retl
;
; WIN64-LABEL: test_CallargReti32:
-; WIN64: # BB#0:
+; WIN64: # %bb.0:
; WIN64-NEXT: pushq %rsp
; WIN64-NEXT: .seh_pushreg 4
; WIN64-NEXT: .seh_endprologue
@@ -253,7 +253,7 @@ define x86_regcallcc i32 @test_CallargReti32(i32 %a) {
; WIN64-NEXT: .seh_endproc
;
; LINUXOSX64-LABEL: test_CallargReti32:
-; LINUXOSX64: # BB#0:
+; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: pushq %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: .cfi_offset %rsp, -16
@@ -271,19 +271,19 @@ define x86_regcallcc i32 @test_CallargReti32(i32 %a) {
; Test regcall when receiving/returning i64
define x86_regcallcc i64 @test_argReti64(i64 %a) {
; X32-LABEL: test_argReti64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: addl $3, %eax
; X32-NEXT: adcl $1, %ecx
; X32-NEXT: retl
;
; WIN64-LABEL: test_argReti64:
-; WIN64: # BB#0:
+; WIN64: # %bb.0:
; WIN64-NEXT: movabsq $4294967299, %rcx # imm = 0x100000003
; WIN64-NEXT: addq %rcx, %rax
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: test_argReti64:
-; LINUXOSX64: # BB#0:
+; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: movabsq $4294967299, %rcx # imm = 0x100000003
; LINUXOSX64-NEXT: addq %rcx, %rax
; LINUXOSX64-NEXT: retq
@@ -294,7 +294,7 @@ define x86_regcallcc i64 @test_argReti64(i64 %a) {
; Test regcall when passing/retrieving i64
define x86_regcallcc i64 @test_CallargReti64(i64 %a) {
; X32-LABEL: test_CallargReti64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %esp
; X32-NEXT: addl $1, %eax
; X32-NEXT: adcl $0, %ecx
@@ -305,7 +305,7 @@ define x86_regcallcc i64 @test_CallargReti64(i64 %a) {
; X32-NEXT: retl
;
; WIN64-LABEL: test_CallargReti64:
-; WIN64: # BB#0:
+; WIN64: # %bb.0:
; WIN64-NEXT: pushq %rsp
; WIN64-NEXT: .seh_pushreg 4
; WIN64-NEXT: .seh_endprologue
@@ -319,7 +319,7 @@ define x86_regcallcc i64 @test_CallargReti64(i64 %a) {
; WIN64-NEXT: .seh_endproc
;
; LINUXOSX64-LABEL: test_CallargReti64:
-; LINUXOSX64: # BB#0:
+; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: pushq %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: .cfi_offset %rsp, -16
@@ -337,17 +337,17 @@ define x86_regcallcc i64 @test_CallargReti64(i64 %a) {
; Test regcall when receiving/returning float
define x86_regcallcc float @test_argRetFloat(float %a) {
; X32-LABEL: test_argRetFloat:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vaddss __real@3f800000, %xmm0, %xmm0
; X32-NEXT: retl
;
; WIN64-LABEL: test_argRetFloat:
-; WIN64: # BB#0:
+; WIN64: # %bb.0:
; WIN64-NEXT: vaddss __real@{{.*}}(%rip), %xmm0, %xmm0
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: test_argRetFloat:
-; LINUXOSX64: # BB#0:
+; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: vaddss {{.*}}(%rip), %xmm0, %xmm0
; LINUXOSX64-NEXT: retq
%add = fadd float 1.0, %a
@@ -357,7 +357,7 @@ define x86_regcallcc float @test_argRetFloat(float %a) {
; Test regcall when passing/retrieving float
define x86_regcallcc float @test_CallargRetFloat(float %a) {
; X32-LABEL: test_CallargRetFloat:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %esp
; X32-NEXT: subl $24, %esp
; X32-NEXT: vmovups %xmm4, (%esp) # 16-byte Spill
@@ -371,7 +371,7 @@ define x86_regcallcc float @test_CallargRetFloat(float %a) {
; X32-NEXT: retl
;
; WIN64-LABEL: test_CallargRetFloat:
-; WIN64: # BB#0:
+; WIN64: # %bb.0:
; WIN64-NEXT: pushq %rsp
; WIN64-NEXT: .seh_pushreg 4
; WIN64-NEXT: subq $16, %rsp
@@ -392,7 +392,7 @@ define x86_regcallcc float @test_CallargRetFloat(float %a) {
; WIN64-NEXT: .seh_endproc
;
; LINUXOSX64-LABEL: test_CallargRetFloat:
-; LINUXOSX64: # BB#0:
+; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: pushq %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: subq $16, %rsp
@@ -417,17 +417,17 @@ define x86_regcallcc float @test_CallargRetFloat(float %a) {
; Test regcall when receiving/returning double
define x86_regcallcc double @test_argRetDouble(double %a) {
; X32-LABEL: test_argRetDouble:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vaddsd __real@3ff0000000000000, %xmm0, %xmm0
; X32-NEXT: retl
;
; WIN64-LABEL: test_argRetDouble:
-; WIN64: # BB#0:
+; WIN64: # %bb.0:
; WIN64-NEXT: vaddsd __real@{{.*}}(%rip), %xmm0, %xmm0
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: test_argRetDouble:
-; LINUXOSX64: # BB#0:
+; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: vaddsd {{.*}}(%rip), %xmm0, %xmm0
; LINUXOSX64-NEXT: retq
%add = fadd double %a, 1.0
@@ -437,7 +437,7 @@ define x86_regcallcc double @test_argRetDouble(double %a) {
; Test regcall when passing/retrieving double
define x86_regcallcc double @test_CallargRetDouble(double %a) {
; X32-LABEL: test_CallargRetDouble:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %esp
; X32-NEXT: subl $24, %esp
; X32-NEXT: vmovups %xmm4, (%esp) # 16-byte Spill
@@ -451,7 +451,7 @@ define x86_regcallcc double @test_CallargRetDouble(double %a) {
; X32-NEXT: retl
;
; WIN64-LABEL: test_CallargRetDouble:
-; WIN64: # BB#0:
+; WIN64: # %bb.0:
; WIN64-NEXT: pushq %rsp
; WIN64-NEXT: .seh_pushreg 4
; WIN64-NEXT: subq $16, %rsp
@@ -472,7 +472,7 @@ define x86_regcallcc double @test_CallargRetDouble(double %a) {
; WIN64-NEXT: .seh_endproc
;
; LINUXOSX64-LABEL: test_CallargRetDouble:
-; LINUXOSX64: # BB#0:
+; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: pushq %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: subq $16, %rsp
@@ -497,17 +497,17 @@ define x86_regcallcc double @test_CallargRetDouble(double %a) {
; Test regcall when receiving/returning long double
define x86_regcallcc x86_fp80 @test_argRetf80(x86_fp80 %a0) nounwind {
; X32-LABEL: test_argRetf80:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: fadd %st(0), %st(0)
; X32-NEXT: retl
;
; WIN64-LABEL: test_argRetf80:
-; WIN64: # BB#0:
+; WIN64: # %bb.0:
; WIN64-NEXT: fadd %st(0), %st(0)
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: test_argRetf80:
-; LINUXOSX64: # BB#0:
+; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: fadd %st(0), %st(0)
; LINUXOSX64-NEXT: retq
%r0 = fadd x86_fp80 %a0, %a0
@@ -517,7 +517,7 @@ define x86_regcallcc x86_fp80 @test_argRetf80(x86_fp80 %a0) nounwind {
; Test regcall when passing/retrieving long double
define x86_regcallcc x86_fp80 @test_CallargRetf80(x86_fp80 %a) {
; X32-LABEL: test_CallargRetf80:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %esp
; X32-NEXT: fadd %st(0), %st(0)
; X32-NEXT: calll _test_argRetf80
@@ -526,7 +526,7 @@ define x86_regcallcc x86_fp80 @test_CallargRetf80(x86_fp80 %a) {
; X32-NEXT: retl
;
; WIN64-LABEL: test_CallargRetf80:
-; WIN64: # BB#0:
+; WIN64: # %bb.0:
; WIN64-NEXT: pushq %rsp
; WIN64-NEXT: .seh_pushreg 4
; WIN64-NEXT: .seh_endprologue
@@ -540,7 +540,7 @@ define x86_regcallcc x86_fp80 @test_CallargRetf80(x86_fp80 %a) {
; WIN64-NEXT: .seh_endproc
;
; LINUXOSX64-LABEL: test_CallargRetf80:
-; LINUXOSX64: # BB#0:
+; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: pushq %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: .cfi_offset %rsp, -16
@@ -558,17 +558,17 @@ define x86_regcallcc x86_fp80 @test_CallargRetf80(x86_fp80 %a) {
; Test regcall when receiving/returning pointer
define x86_regcallcc [4 x i32]* @test_argRetPointer([4 x i32]* %a) {
; X32-LABEL: test_argRetPointer:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: incl %eax
; X32-NEXT: retl
;
; WIN64-LABEL: test_argRetPointer:
-; WIN64: # BB#0:
+; WIN64: # %bb.0:
; WIN64-NEXT: incl %eax
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: test_argRetPointer:
-; LINUXOSX64: # BB#0:
+; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: incl %eax
; LINUXOSX64-NEXT: retq
%b = ptrtoint [4 x i32]* %a to i32
@@ -580,7 +580,7 @@ define x86_regcallcc [4 x i32]* @test_argRetPointer([4 x i32]* %a) {
; Test regcall when passing/retrieving pointer
define x86_regcallcc [4 x i32]* @test_CallargRetPointer([4 x i32]* %a) {
; X32-LABEL: test_CallargRetPointer:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %esp
; X32-NEXT: incl %eax
; X32-NEXT: calll _test_argRetPointer
@@ -589,7 +589,7 @@ define x86_regcallcc [4 x i32]* @test_CallargRetPointer([4 x i32]* %a) {
; X32-NEXT: retl
;
; WIN64-LABEL: test_CallargRetPointer:
-; WIN64: # BB#0:
+; WIN64: # %bb.0:
; WIN64-NEXT: pushq %rsp
; WIN64-NEXT: .seh_pushreg 4
; WIN64-NEXT: .seh_endprologue
@@ -603,7 +603,7 @@ define x86_regcallcc [4 x i32]* @test_CallargRetPointer([4 x i32]* %a) {
; WIN64-NEXT: .seh_endproc
;
; LINUXOSX64-LABEL: test_CallargRetPointer:
-; LINUXOSX64: # BB#0:
+; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: pushq %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: .cfi_offset %rsp, -16
@@ -625,17 +625,17 @@ define x86_regcallcc [4 x i32]* @test_CallargRetPointer([4 x i32]* %a) {
; Test regcall when receiving/returning 128 bit vector
define x86_regcallcc <4 x i32> @test_argRet128Vector(<4 x i32> %a, <4 x i32> %b) {
; X32-LABEL: test_argRet128Vector:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
; X32-NEXT: retl
;
; WIN64-LABEL: test_argRet128Vector:
-; WIN64: # BB#0:
+; WIN64: # %bb.0:
; WIN64-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: test_argRet128Vector:
-; LINUXOSX64: # BB#0:
+; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
; LINUXOSX64-NEXT: retq
%d = select <4 x i1> undef , <4 x i32> %a, <4 x i32> %b
@@ -645,7 +645,7 @@ define x86_regcallcc <4 x i32> @test_argRet128Vector(<4 x i32> %a, <4 x i32> %b)
; Test regcall when passing/retrieving 128 bit vector
define x86_regcallcc <4 x i32> @test_CallargRet128Vector(<4 x i32> %a) {
; X32-LABEL: test_CallargRet128Vector:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %esp
; X32-NEXT: subl $24, %esp
; X32-NEXT: vmovups %xmm4, (%esp) # 16-byte Spill
@@ -659,7 +659,7 @@ define x86_regcallcc <4 x i32> @test_CallargRet128Vector(<4 x i32> %a) {
; X32-NEXT: retl
;
; WIN64-LABEL: test_CallargRet128Vector:
-; WIN64: # BB#0:
+; WIN64: # %bb.0:
; WIN64-NEXT: pushq %rsp
; WIN64-NEXT: .seh_pushreg 4
; WIN64-NEXT: subq $16, %rsp
@@ -680,7 +680,7 @@ define x86_regcallcc <4 x i32> @test_CallargRet128Vector(<4 x i32> %a) {
; WIN64-NEXT: .seh_endproc
;
; LINUXOSX64-LABEL: test_CallargRet128Vector:
-; LINUXOSX64: # BB#0:
+; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: pushq %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: subq $16, %rsp
@@ -704,17 +704,17 @@ define x86_regcallcc <4 x i32> @test_CallargRet128Vector(<4 x i32> %a) {
; Test regcall when receiving/returning 256 bit vector
define x86_regcallcc <8 x i32> @test_argRet256Vector(<8 x i32> %a, <8 x i32> %b) {
; X32-LABEL: test_argRet256Vector:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; X32-NEXT: retl
;
; WIN64-LABEL: test_argRet256Vector:
-; WIN64: # BB#0:
+; WIN64: # %bb.0:
; WIN64-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: test_argRet256Vector:
-; LINUXOSX64: # BB#0:
+; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; LINUXOSX64-NEXT: retq
%d = select <8 x i1> undef , <8 x i32> %a, <8 x i32> %b
@@ -724,7 +724,7 @@ define x86_regcallcc <8 x i32> @test_argRet256Vector(<8 x i32> %a, <8 x i32> %b)
; Test regcall when passing/retrieving 256 bit vector
define x86_regcallcc <8 x i32> @test_CallargRet256Vector(<8 x i32> %a) {
; X32-LABEL: test_CallargRet256Vector:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %esp
; X32-NEXT: subl $56, %esp
; X32-NEXT: vmovdqu %ymm0, (%esp) # 32-byte Spill
@@ -737,7 +737,7 @@ define x86_regcallcc <8 x i32> @test_CallargRet256Vector(<8 x i32> %a) {
; X32-NEXT: retl
;
; WIN64-LABEL: test_CallargRet256Vector:
-; WIN64: # BB#0:
+; WIN64: # %bb.0:
; WIN64-NEXT: pushq %rsp
; WIN64-NEXT: .seh_pushreg 4
; WIN64-NEXT: subq $48, %rsp
@@ -756,7 +756,7 @@ define x86_regcallcc <8 x i32> @test_CallargRet256Vector(<8 x i32> %a) {
; WIN64-NEXT: .seh_endproc
;
; LINUXOSX64-LABEL: test_CallargRet256Vector:
-; LINUXOSX64: # BB#0:
+; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: pushq %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: subq $48, %rsp
@@ -778,17 +778,17 @@ define x86_regcallcc <8 x i32> @test_CallargRet256Vector(<8 x i32> %a) {
; Test regcall when receiving/returning 512 bit vector
define x86_regcallcc <16 x i32> @test_argRet512Vector(<16 x i32> %a, <16 x i32> %b) {
; X32-LABEL: test_argRet512Vector:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
; X32-NEXT: retl
;
; WIN64-LABEL: test_argRet512Vector:
-; WIN64: # BB#0:
+; WIN64: # %bb.0:
; WIN64-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: test_argRet512Vector:
-; LINUXOSX64: # BB#0:
+; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
; LINUXOSX64-NEXT: retq
%d = select <16 x i1> undef , <16 x i32> %a, <16 x i32> %b
@@ -798,7 +798,7 @@ define x86_regcallcc <16 x i32> @test_argRet512Vector(<16 x i32> %a, <16 x i32>
; Test regcall when passing/retrieving 512 bit vector
define x86_regcallcc <16 x i32> @test_CallargRet512Vector(<16 x i32> %a) {
; X32-LABEL: test_CallargRet512Vector:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %esp
; X32-NEXT: subl $120, %esp
; X32-NEXT: vmovdqu64 %zmm0, (%esp) # 64-byte Spill
@@ -811,7 +811,7 @@ define x86_regcallcc <16 x i32> @test_CallargRet512Vector(<16 x i32> %a) {
; X32-NEXT: retl
;
; WIN64-LABEL: test_CallargRet512Vector:
-; WIN64: # BB#0:
+; WIN64: # %bb.0:
; WIN64-NEXT: pushq %rsp
; WIN64-NEXT: .seh_pushreg 4
; WIN64-NEXT: subq $112, %rsp
@@ -830,7 +830,7 @@ define x86_regcallcc <16 x i32> @test_CallargRet512Vector(<16 x i32> %a) {
; WIN64-NEXT: .seh_endproc
;
; LINUXOSX64-LABEL: test_CallargRet512Vector:
-; LINUXOSX64: # BB#0:
+; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: pushq %rsp
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: subq $112, %rsp
@@ -852,7 +852,7 @@ define x86_regcallcc <16 x i32> @test_CallargRet512Vector(<16 x i32> %a) {
; Test regcall when running multiple input parameters - callee saved xmms
define x86_regcallcc <32 x float> @testf32_inp(<32 x float> %a, <32 x float> %b, <32 x float> %c) nounwind {
; X32-LABEL: testf32_inp:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: subl $44, %esp
; X32-NEXT: vmovups %xmm7, {{[0-9]+}}(%esp) # 16-byte Spill
; X32-NEXT: vmovups %xmm6, (%esp) # 16-byte Spill
@@ -870,7 +870,7 @@ define x86_regcallcc <32 x float> @testf32_inp(<32 x float> %a, <32 x float> %b,
; X32-NEXT: retl
;
; WIN64-LABEL: testf32_inp:
-; WIN64: # BB#0:
+; WIN64: # %bb.0:
; WIN64-NEXT: vaddps %zmm2, %zmm0, %zmm6
; WIN64-NEXT: vaddps %zmm3, %zmm1, %zmm7
; WIN64-NEXT: vmulps %zmm2, %zmm0, %zmm0
@@ -882,7 +882,7 @@ define x86_regcallcc <32 x float> @testf32_inp(<32 x float> %a, <32 x float> %b,
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: testf32_inp:
-; LINUXOSX64: # BB#0:
+; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: vaddps %zmm2, %zmm0, %zmm6
; LINUXOSX64-NEXT: vaddps %zmm3, %zmm1, %zmm7
; LINUXOSX64-NEXT: vmulps %zmm2, %zmm0, %zmm0
@@ -902,7 +902,7 @@ define x86_regcallcc <32 x float> @testf32_inp(<32 x float> %a, <32 x float> %b,
; Test regcall when running multiple input parameters - callee saved GPRs
define x86_regcallcc i32 @testi32_inp(i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %b1, i32 %b2, i32 %b3, i32 %b4, i32 %b5, i32 %b6) nounwind {
; X32-LABEL: testi32_inp:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebp
; X32-NEXT: pushl %ebx
; X32-NEXT: subl $20, %esp
@@ -954,7 +954,7 @@ define x86_regcallcc i32 @testi32_inp(i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a
; X32-NEXT: retl
;
; WIN64-LABEL: testi32_inp:
-; WIN64: # BB#0:
+; WIN64: # %bb.0:
; WIN64-NEXT: pushq %r13
; WIN64-NEXT: pushq %rbp
; WIN64-NEXT: pushq %rbx
@@ -993,7 +993,7 @@ define x86_regcallcc i32 @testi32_inp(i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: testi32_inp:
-; LINUXOSX64: # BB#0:
+; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: pushq %rbp
; LINUXOSX64-NEXT: pushq %rbx
; LINUXOSX64-NEXT: movl %eax, %r10d
@@ -1058,7 +1058,7 @@ define x86_regcallcc i32 @testi32_inp(i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a
; Test that parameters, overflowing register capacity, are passed through the stack
define x86_regcallcc <32 x float> @testf32_stack(<32 x float> %a0, <32 x float> %b0, <32 x float> %c0, <32 x float> %a1, <32 x float> %b1, <32 x float> %c1, <32 x float> %a2, <32 x float> %b2, <32 x float> %c2) nounwind {
; X32-LABEL: testf32_stack:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebp
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: andl $-64, %esp
@@ -1084,7 +1084,7 @@ define x86_regcallcc <32 x float> @testf32_stack(<32 x float> %a0, <32 x float>
; X32-NEXT: retl
;
; WIN64-LABEL: testf32_stack:
-; WIN64: # BB#0:
+; WIN64: # %bb.0:
; WIN64-NEXT: pushq %rbp
; WIN64-NEXT: subq $48, %rsp
; WIN64-NEXT: leaq {{[0-9]+}}(%rsp), %rbp
@@ -1110,7 +1110,7 @@ define x86_regcallcc <32 x float> @testf32_stack(<32 x float> %a0, <32 x float>
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: testf32_stack:
-; LINUXOSX64: # BB#0:
+; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: pushq %rbp
; LINUXOSX64-NEXT: movq %rsp, %rbp
; LINUXOSX64-NEXT: andq $-64, %rsp
@@ -1148,7 +1148,7 @@ define x86_regcallcc <32 x float> @testf32_stack(<32 x float> %a0, <32 x float>
; Test regcall when passing/retrieving mixed types
define x86_regcallcc i32 @test_argRetMixTypes(double, float, i8 signext, i32, i64, i16 signext, i32*) #0 {
; X32-LABEL: test_argRetMixTypes:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebp
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: andl $-8, %esp
@@ -1176,7 +1176,7 @@ define x86_regcallcc i32 @test_argRetMixTypes(double, float, i8 signext, i32, i6
; X32-NEXT: retl
;
; WIN64-LABEL: test_argRetMixTypes:
-; WIN64: # BB#0:
+; WIN64: # %bb.0:
; WIN64-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1
; WIN64-NEXT: vaddsd %xmm0, %xmm1, %xmm0
; WIN64-NEXT: vcvtsi2sdl %eax, %xmm2, %xmm1
@@ -1193,7 +1193,7 @@ define x86_regcallcc i32 @test_argRetMixTypes(double, float, i8 signext, i32, i6
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: test_argRetMixTypes:
-; LINUXOSX64: # BB#0:
+; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1
; LINUXOSX64-NEXT: vaddsd %xmm0, %xmm1, %xmm0
; LINUXOSX64-NEXT: vcvtsi2sdl %eax, %xmm2, %xmm1
@@ -1229,7 +1229,7 @@ define x86_regcallcc i32 @test_argRetMixTypes(double, float, i8 signext, i32, i6
define x86_regcallcc %struct.complex @test_argMultiRet(float, double, i32, i8, i64) local_unnamed_addr #0 {
; X32-LABEL: test_argMultiRet:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vaddsd __real@4014000000000000, %xmm1, %xmm1
; X32-NEXT: movl $4, %eax
; X32-NEXT: movb $7, %cl
@@ -1238,7 +1238,7 @@ define x86_regcallcc %struct.complex @test_argMultiRet(float, double, i32, i8, i
; X32-NEXT: retl
;
; WIN64-LABEL: test_argMultiRet:
-; WIN64: # BB#0:
+; WIN64: # %bb.0:
; WIN64-NEXT: vaddsd __real@{{.*}}(%rip), %xmm1, %xmm1
; WIN64-NEXT: movl $4, %eax
; WIN64-NEXT: movb $7, %cl
@@ -1246,7 +1246,7 @@ define x86_regcallcc %struct.complex @test_argMultiRet(float, double, i32, i8, i
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: test_argMultiRet:
-; LINUXOSX64: # BB#0:
+; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: vaddsd {{.*}}(%rip), %xmm1, %xmm1
; LINUXOSX64-NEXT: movl $4, %eax
; LINUXOSX64-NEXT: movb $7, %cl
diff --git a/test/CodeGen/X86/avx512-rotate.ll b/test/CodeGen/X86/avx512-rotate.ll
index c2ea0bc4ab7..203092e88d3 100644
--- a/test/CodeGen/X86/avx512-rotate.ll
+++ b/test/CodeGen/X86/avx512-rotate.ll
@@ -14,7 +14,7 @@ declare <16 x i32> @llvm.x86.avx512.maskz.vpermt2var.d.512(<16 x i32>, <16 x i32
define <16 x i32> @test_splat_rol_v16i32(<16 x i32> %x0, <16 x i32> %x1, i16 %x2) {
; KNL-LABEL: test_splat_rol_v16i32:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vprold $5, %zmm0, %zmm1 {%k1}
; KNL-NEXT: vprold $5, %zmm0, %zmm2 {%k1} {z}
@@ -24,7 +24,7 @@ define <16 x i32> @test_splat_rol_v16i32(<16 x i32> %x0, <16 x i32> %x1, i16 %x2
; KNL-NEXT: retq
;
; SKX-LABEL: test_splat_rol_v16i32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vprold $5, %zmm0, %zmm1 {%k1}
; SKX-NEXT: vprold $5, %zmm0, %zmm2 {%k1} {z}
@@ -42,7 +42,7 @@ define <16 x i32> @test_splat_rol_v16i32(<16 x i32> %x0, <16 x i32> %x1, i16 %x2
define <8 x i64>@test_splat_rol_v8i64(<8 x i64> %x0, <8 x i64> %x1, i8 %x2) {
; KNL-LABEL: test_splat_rol_v8i64:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vprolq $5, %zmm0, %zmm1 {%k1}
; KNL-NEXT: vprolq $5, %zmm0, %zmm2 {%k1} {z}
@@ -52,7 +52,7 @@ define <8 x i64>@test_splat_rol_v8i64(<8 x i64> %x0, <8 x i64> %x1, i8 %x2) {
; KNL-NEXT: retq
;
; SKX-LABEL: test_splat_rol_v8i64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vprolq $5, %zmm0, %zmm1 {%k1}
; SKX-NEXT: vprolq $5, %zmm0, %zmm2 {%k1} {z}
@@ -70,7 +70,7 @@ define <8 x i64>@test_splat_rol_v8i64(<8 x i64> %x0, <8 x i64> %x1, i8 %x2) {
define <16 x i32> @test_splat_ror_v16i32(<16 x i32> %x0, <16 x i32> %x1, i16 %x2) {
; KNL-LABEL: test_splat_ror_v16i32:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vprord $5, %zmm0, %zmm1 {%k1}
; KNL-NEXT: vprord $5, %zmm0, %zmm2 {%k1} {z}
@@ -80,7 +80,7 @@ define <16 x i32> @test_splat_ror_v16i32(<16 x i32> %x0, <16 x i32> %x1, i16 %x2
; KNL-NEXT: retq
;
; SKX-LABEL: test_splat_ror_v16i32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vprord $5, %zmm0, %zmm1 {%k1}
; SKX-NEXT: vprord $5, %zmm0, %zmm2 {%k1} {z}
@@ -98,7 +98,7 @@ define <16 x i32> @test_splat_ror_v16i32(<16 x i32> %x0, <16 x i32> %x1, i16 %x2
define <8 x i64>@test_splat_ror_v8i64(<8 x i64> %x0, <8 x i64> %x1, i8 %x2) {
; KNL-LABEL: test_splat_ror_v8i64:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vprorq $5, %zmm0, %zmm1 {%k1}
; KNL-NEXT: vprorq $5, %zmm0, %zmm2 {%k1} {z}
@@ -108,7 +108,7 @@ define <8 x i64>@test_splat_ror_v8i64(<8 x i64> %x0, <8 x i64> %x1, i8 %x2) {
; KNL-NEXT: retq
;
; SKX-LABEL: test_splat_ror_v8i64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vprorq $5, %zmm0, %zmm1 {%k1}
; SKX-NEXT: vprorq $5, %zmm0, %zmm2 {%k1} {z}
@@ -128,7 +128,7 @@ define <8 x i64>@test_splat_ror_v8i64(<8 x i64> %x0, <8 x i64> %x1, i8 %x2) {
define <16 x i32> @test_splat_bounds_rol_v16i32(<16 x i32> %x0, <16 x i32> %x1, i16 %x2) {
; KNL-LABEL: test_splat_bounds_rol_v16i32:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vprold $1, %zmm0, %zmm1 {%k1}
; KNL-NEXT: vprold $31, %zmm0, %zmm2 {%k1} {z}
@@ -138,7 +138,7 @@ define <16 x i32> @test_splat_bounds_rol_v16i32(<16 x i32> %x0, <16 x i32> %x1,
; KNL-NEXT: retq
;
; SKX-LABEL: test_splat_bounds_rol_v16i32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vprold $1, %zmm0, %zmm1 {%k1}
; SKX-NEXT: vprold $31, %zmm0, %zmm2 {%k1} {z}
@@ -156,7 +156,7 @@ define <16 x i32> @test_splat_bounds_rol_v16i32(<16 x i32> %x0, <16 x i32> %x1,
define <8 x i64>@test_splat_bounds_rol_v8i64(<8 x i64> %x0, <8 x i64> %x1, i8 %x2) {
; KNL-LABEL: test_splat_bounds_rol_v8i64:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vprolq $62, %zmm0, %zmm1 {%k1}
; KNL-NEXT: vprolq $1, %zmm0, %zmm2 {%k1} {z}
@@ -166,7 +166,7 @@ define <8 x i64>@test_splat_bounds_rol_v8i64(<8 x i64> %x0, <8 x i64> %x1, i8 %x
; KNL-NEXT: retq
;
; SKX-LABEL: test_splat_bounds_rol_v8i64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vprolq $62, %zmm0, %zmm1 {%k1}
; SKX-NEXT: vprolq $1, %zmm0, %zmm2 {%k1} {z}
@@ -184,7 +184,7 @@ define <8 x i64>@test_splat_bounds_rol_v8i64(<8 x i64> %x0, <8 x i64> %x1, i8 %x
define <16 x i32> @test_splat_bounds_ror_v16i32(<16 x i32> %x0, <16 x i32> %x1, i16 %x2) {
; KNL-LABEL: test_splat_bounds_ror_v16i32:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vprord $1, %zmm0, %zmm1 {%k1}
; KNL-NEXT: vprord $31, %zmm0, %zmm2 {%k1} {z}
@@ -194,7 +194,7 @@ define <16 x i32> @test_splat_bounds_ror_v16i32(<16 x i32> %x0, <16 x i32> %x1,
; KNL-NEXT: retq
;
; SKX-LABEL: test_splat_bounds_ror_v16i32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vprord $1, %zmm0, %zmm1 {%k1}
; SKX-NEXT: vprord $31, %zmm0, %zmm2 {%k1} {z}
@@ -212,7 +212,7 @@ define <16 x i32> @test_splat_bounds_ror_v16i32(<16 x i32> %x0, <16 x i32> %x1,
define <8 x i64>@test_splat_bounds_ror_v8i64(<8 x i64> %x0, <8 x i64> %x1, i8 %x2) {
; KNL-LABEL: test_splat_bounds_ror_v8i64:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vprorq $62, %zmm0, %zmm1 {%k1}
; KNL-NEXT: vprorq $1, %zmm0, %zmm2 {%k1} {z}
@@ -222,7 +222,7 @@ define <8 x i64>@test_splat_bounds_ror_v8i64(<8 x i64> %x0, <8 x i64> %x1, i8 %x
; KNL-NEXT: retq
;
; SKX-LABEL: test_splat_bounds_ror_v8i64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vprorq $62, %zmm0, %zmm1 {%k1}
; SKX-NEXT: vprorq $1, %zmm0, %zmm2 {%k1} {z}
@@ -244,7 +244,7 @@ define <8 x i64>@test_splat_bounds_ror_v8i64(<8 x i64> %x0, <8 x i64> %x1, i8 %x
define <8 x i64> @test_fold_rol_v8i64() {
; CHECK-LABEL: test_fold_rol_v8i64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} zmm0 = [1,2,4,9223372036854775808,2,4611686018427387904,9223372036854775808,9223372036854775808]
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.mask.prolv.q.512(<8 x i64> <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>, <8 x i64> <i64 0, i64 1, i64 2, i64 63, i64 65, i64 65534, i64 65535, i64 -1>, <8 x i64> zeroinitializer, i8 -1)
@@ -253,7 +253,7 @@ define <8 x i64> @test_fold_rol_v8i64() {
define <16 x i32> @test_fold_rol_v16i32(<16 x i32> %x0, <16 x i32> %x1) {
; CHECK-LABEL: test_fold_rol_v16i32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastd {{.*#+}} zmm0 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; CHECK-NEXT: vprolvd {{.*}}(%rip), %zmm0, %zmm0
; CHECK-NEXT: retq
@@ -264,7 +264,7 @@ define <16 x i32> @test_fold_rol_v16i32(<16 x i32> %x0, <16 x i32> %x1) {
define <8 x i64> @test_fold_ror_v8i64() {
; CHECK-LABEL: test_fold_ror_v8i64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastq {{.*#+}} zmm0 = [1,1,1,1,1,1,1,1]
; CHECK-NEXT: vprorvq {{.*}}(%rip), %zmm0, %zmm0
; CHECK-NEXT: retq
@@ -275,7 +275,7 @@ define <8 x i64> @test_fold_ror_v8i64() {
define <16 x i32> @test_fold_ror_v16i32(<16 x i32> %x0, <16 x i32> %x1) {
; CHECK-LABEL: test_fold_ror_v16i32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastd {{.*#+}} zmm0 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; CHECK-NEXT: vprorvd {{.*}}(%rip), %zmm0, %zmm0
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/avx512-scalarIntrinsics.ll b/test/CodeGen/X86/avx512-scalarIntrinsics.ll
index 20e8b60c114..0286aabd61a 100644
--- a/test/CodeGen/X86/avx512-scalarIntrinsics.ll
+++ b/test/CodeGen/X86/avx512-scalarIntrinsics.ll
@@ -5,7 +5,7 @@
define <4 x float> @test_rsqrt14_ss(<4 x float> %a0) {
; CHECK-LABEL: test_rsqrt14_ss:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vrsqrt14ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.avx512.rsqrt14.ss(<4 x float> %a0, <4 x float> %a0, <4 x float> zeroinitializer, i8 -1) ;
@@ -14,7 +14,7 @@ define <4 x float> @test_rsqrt14_ss(<4 x float> %a0) {
define <4 x float> @test_rsqrt14_ss_load(<4 x float> %a0, <4 x float>* %a1ptr) {
; CHECK-LABEL: test_rsqrt14_ss_load:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vrsqrt14ss (%rdi), %xmm0, %xmm0
; CHECK-NEXT: retq
%a1 = load <4 x float>, <4 x float>* %a1ptr
@@ -25,7 +25,7 @@ declare <4 x float> @llvm.x86.avx512.rsqrt14.ss(<4 x float>, <4 x float>, <4 x f
define <4 x float> @test_rcp14_ss(<4 x float> %a0) {
; CHECK-LABEL: test_rcp14_ss:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vrcp14ss %xmm0, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.avx512.rcp14.ss(<4 x float> %a0, <4 x float> %a0, <4 x float> zeroinitializer, i8 -1) ;
@@ -34,7 +34,7 @@ define <4 x float> @test_rcp14_ss(<4 x float> %a0) {
define <4 x float> @test_rcp14_ss_load(<4 x float> %a0, <4 x float>* %a1ptr) {
; CHECK-LABEL: test_rcp14_ss_load:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vrcp14ss (%rdi), %xmm0, %xmm0
; CHECK-NEXT: retq
%a1 = load <4 x float>, <4 x float>* %a1ptr
@@ -45,7 +45,7 @@ declare <4 x float> @llvm.x86.avx512.rcp14.ss(<4 x float>, <4 x float>, <4 x flo
define <2 x double> @test_rsqrt14_sd(<2 x double> %a0) {
; CHECK-LABEL: test_rsqrt14_sd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vrsqrt14sd %xmm0, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x double> @llvm.x86.avx512.rsqrt14.sd(<2 x double> %a0, <2 x double> %a0, <2 x double> zeroinitializer, i8 -1) ;
@@ -54,7 +54,7 @@ define <2 x double> @test_rsqrt14_sd(<2 x double> %a0) {
define <2 x double> @test_rsqrt14_sd_load(<2 x double> %a0, <2 x double>* %a1ptr) {
; CHECK-LABEL: test_rsqrt14_sd_load:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vrsqrt14sd (%rdi), %xmm0, %xmm0
; CHECK-NEXT: retq
%a1 = load <2 x double>, <2 x double>* %a1ptr
@@ -65,7 +65,7 @@ declare <2 x double> @llvm.x86.avx512.rsqrt14.sd(<2 x double>, <2 x double>, <2
define <2 x double> @test_rcp14_sd(<2 x double> %a0) {
; CHECK-LABEL: test_rcp14_sd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vrcp14sd %xmm0, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x double> @llvm.x86.avx512.rcp14.sd(<2 x double> %a0, <2 x double> %a0, <2 x double> zeroinitializer, i8 -1) ;
@@ -75,7 +75,7 @@ define <2 x double> @test_rcp14_sd(<2 x double> %a0) {
define <2 x double> @test_rcp14_sd_load(<2 x double> %a0, <2 x double>* %a1ptr) {
; CHECK-LABEL: test_rcp14_sd_load:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vrcp14sd (%rdi), %xmm0, %xmm0
; CHECK-NEXT: retq
%a1 = load <2 x double>, <2 x double>* %a1ptr
@@ -87,7 +87,7 @@ declare <2 x double> @llvm.x86.avx512.rcp14.sd(<2 x double>, <2 x double>, <2 x
declare <4 x float> @llvm.x86.avx512.mask.scalef.ss(<4 x float>, <4 x float>,<4 x float>, i8, i32)
define <4 x float>@test_int_x86_avx512_mask_scalef_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x3, i8 %x4) {
; SKX-LABEL: test_int_x86_avx512_mask_scalef_ss:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vscalefss %xmm1, %xmm0, %xmm2 {%k1}
; SKX-NEXT: vscalefss {rn-sae}, %xmm1, %xmm0, %xmm0
@@ -95,7 +95,7 @@ define <4 x float>@test_int_x86_avx512_mask_scalef_ss(<4 x float> %x0, <4 x floa
; SKX-NEXT: retq
;
; KNL-LABEL: test_int_x86_avx512_mask_scalef_ss:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vscalefss %xmm1, %xmm0, %xmm2 {%k1}
; KNL-NEXT: vscalefss {rn-sae}, %xmm1, %xmm0, %xmm0
@@ -109,7 +109,7 @@ define <4 x float>@test_int_x86_avx512_mask_scalef_ss(<4 x float> %x0, <4 x floa
define <4 x float>@test_int_x86_avx512_mask_scalef_ss_load(<4 x float> %x0, <4 x float>* %x1ptr) {
; CHECK-LABEL: test_int_x86_avx512_mask_scalef_ss_load:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vscalefss (%rdi), %xmm0, %xmm0
; CHECK-NEXT: retq
%x1 = load <4 x float>, <4 x float>* %x1ptr
@@ -120,7 +120,7 @@ define <4 x float>@test_int_x86_avx512_mask_scalef_ss_load(<4 x float> %x0, <4 x
declare <2 x double> @llvm.x86.avx512.mask.scalef.sd(<2 x double>, <2 x double>,<2 x double>, i8, i32)
define <2 x double>@test_int_x86_avx512_mask_scalef_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x3, i8 %x4) {
; SKX-LABEL: test_int_x86_avx512_mask_scalef_sd:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vscalefsd %xmm1, %xmm0, %xmm2 {%k1}
; SKX-NEXT: vscalefsd {rn-sae}, %xmm1, %xmm0, %xmm0
@@ -128,7 +128,7 @@ define <2 x double>@test_int_x86_avx512_mask_scalef_sd(<2 x double> %x0, <2 x do
; SKX-NEXT: retq
;
; KNL-LABEL: test_int_x86_avx512_mask_scalef_sd:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vscalefsd %xmm1, %xmm0, %xmm2 {%k1}
; KNL-NEXT: vscalefsd {rn-sae}, %xmm1, %xmm0, %xmm0
@@ -142,7 +142,7 @@ define <2 x double>@test_int_x86_avx512_mask_scalef_sd(<2 x double> %x0, <2 x do
define <2 x double>@test_int_x86_avx512_mask_scalef_sd_load(<2 x double> %x0, <2 x double>* %x1ptr) {
; CHECK-LABEL: test_int_x86_avx512_mask_scalef_sd_load:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vscalefsd (%rdi), %xmm0, %xmm0
; CHECK-NEXT: retq
%x1 = load <2 x double>, <2 x double>* %x1ptr
diff --git a/test/CodeGen/X86/avx512-scalar_mask.ll b/test/CodeGen/X86/avx512-scalar_mask.ll
index f6ee8ff4c0f..e0a91575636 100644
--- a/test/CodeGen/X86/avx512-scalar_mask.ll
+++ b/test/CodeGen/X86/avx512-scalar_mask.ll
@@ -6,7 +6,7 @@ declare <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float>, <4 x float>, <
define <4 x float>@test_var_mask(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2, i8 %mask) {
; CHECK-LABEL: test_var_mask:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
@@ -16,7 +16,7 @@ define <4 x float>@test_var_mask(<4 x float> %v0, <4 x float> %v1, <4 x float> %
define <4 x float>@test_var_maskz(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2, i8 %mask) {
; CHECK-LABEL: test_var_maskz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -27,7 +27,7 @@ define <4 x float>@test_var_maskz(<4 x float> %v0, <4 x float> %v1, <4 x float>
; FIXME: we should just return %xmm0 here.
define <4 x float>@test_const0_mask(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2) {
; CHECK-LABEL: test_const0_mask:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: kmovw %eax, %k1
; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1}
@@ -39,7 +39,7 @@ define <4 x float>@test_const0_mask(<4 x float> %v0, <4 x float> %v1, <4 x float
; FIXME: we should zero the lower element of xmm0 and return it.
define <4 x float>@test_const0_maskz(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2) {
; CHECK-LABEL: test_const0_maskz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: kmovw %eax, %k1
; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1} {z}
@@ -51,7 +51,7 @@ define <4 x float>@test_const0_maskz(<4 x float> %v0, <4 x float> %v1, <4 x floa
; FIXME: we should just return %xmm0 here.
define <4 x float>@test_const2_mask(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2) {
; CHECK-LABEL: test_const2_mask:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movb $2, %al
; CHECK-NEXT: kmovw %eax, %k1
; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1}
@@ -63,7 +63,7 @@ define <4 x float>@test_const2_mask(<4 x float> %v0, <4 x float> %v1, <4 x float
; FIXME: we should zero the lower element of xmm0 and return it.
define <4 x float>@test_const2_maskz(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2) {
; CHECK-LABEL: test_const2_maskz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movb $2, %al
; CHECK-NEXT: kmovw %eax, %k1
; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1} {z}
@@ -74,7 +74,7 @@ define <4 x float>@test_const2_maskz(<4 x float> %v0, <4 x float> %v1, <4 x floa
define <4 x float>@test_const_allone_mask(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2) {
; CHECK-LABEL: test_const_allone_mask:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %v0,<4 x float> %v1, <4 x float> %v2, i8 -1, i32 4)
@@ -83,7 +83,7 @@ define <4 x float>@test_const_allone_mask(<4 x float> %v0, <4 x float> %v1, <4 x
define <4 x float>@test_const_allone_maskz(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2) {
; CHECK-LABEL: test_const_allone_maskz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> %v0,<4 x float> %v1, <4 x float> %v2, i8 -1, i32 4)
@@ -92,7 +92,7 @@ define <4 x float>@test_const_allone_maskz(<4 x float> %v0, <4 x float> %v1, <4
define <4 x float>@test_const_3_mask(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2) {
; CHECK-LABEL: test_const_3_mask:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %v0,<4 x float> %v1, <4 x float> %v2, i8 3, i32 4)
@@ -101,7 +101,7 @@ define <4 x float>@test_const_3_mask(<4 x float> %v0, <4 x float> %v1, <4 x floa
define <4 x float>@test_const_3_maskz(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2) {
; CHECK-LABEL: test_const_3_maskz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> %v0,<4 x float> %v1, <4 x float> %v2, i8 3, i32 4)
diff --git a/test/CodeGen/X86/avx512-schedule.ll b/test/CodeGen/X86/avx512-schedule.ll
index 7b27630ca5b..3ef36e7e5be 100755
--- a/test/CodeGen/X86/avx512-schedule.ll
+++ b/test/CodeGen/X86/avx512-schedule.ll
@@ -6,12 +6,12 @@
define <8 x double> @addpd512(<8 x double> %y, <8 x double> %x) {
; GENERIC-LABEL: addpd512:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: vaddpd %zmm0, %zmm1, %zmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: addpd512:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vaddpd %zmm0, %zmm1, %zmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
entry:
@@ -21,12 +21,12 @@ entry:
define <8 x double> @addpd512fold(<8 x double> %y) {
; GENERIC-LABEL: addpd512fold:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: vaddpd {{.*}}(%rip), %zmm0, %zmm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: addpd512fold:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vaddpd {{.*}}(%rip), %zmm0, %zmm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
entry:
@@ -36,12 +36,12 @@ entry:
define <16 x float> @addps512(<16 x float> %y, <16 x float> %x) {
; GENERIC-LABEL: addps512:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: vaddps %zmm0, %zmm1, %zmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: addps512:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vaddps %zmm0, %zmm1, %zmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
entry:
@@ -51,12 +51,12 @@ entry:
define <16 x float> @addps512fold(<16 x float> %y) {
; GENERIC-LABEL: addps512fold:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: vaddps {{.*}}(%rip), %zmm0, %zmm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: addps512fold:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vaddps {{.*}}(%rip), %zmm0, %zmm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
entry:
@@ -66,12 +66,12 @@ entry:
define <8 x double> @subpd512(<8 x double> %y, <8 x double> %x) {
; GENERIC-LABEL: subpd512:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: vsubpd %zmm0, %zmm1, %zmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: subpd512:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vsubpd %zmm0, %zmm1, %zmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
entry:
@@ -81,12 +81,12 @@ entry:
define <8 x double> @subpd512fold(<8 x double> %y, <8 x double>* %x) {
; GENERIC-LABEL: subpd512fold:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: vsubpd (%rdi), %zmm0, %zmm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: subpd512fold:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vsubpd (%rdi), %zmm0, %zmm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
entry:
@@ -97,12 +97,12 @@ entry:
define <16 x float> @subps512(<16 x float> %y, <16 x float> %x) {
; GENERIC-LABEL: subps512:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: vsubps %zmm0, %zmm1, %zmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: subps512:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vsubps %zmm0, %zmm1, %zmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
entry:
@@ -112,12 +112,12 @@ entry:
define <16 x float> @subps512fold(<16 x float> %y, <16 x float>* %x) {
; GENERIC-LABEL: subps512fold:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: vsubps (%rdi), %zmm0, %zmm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: subps512fold:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vsubps (%rdi), %zmm0, %zmm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
entry:
@@ -128,12 +128,12 @@ entry:
define <8 x i64> @imulq512(<8 x i64> %y, <8 x i64> %x) {
; GENERIC-LABEL: imulq512:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmullq %zmm0, %zmm1, %zmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: imulq512:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmullq %zmm0, %zmm1, %zmm0 # sched: [12:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%z = mul <8 x i64>%x, %y
@@ -142,12 +142,12 @@ define <8 x i64> @imulq512(<8 x i64> %y, <8 x i64> %x) {
define <4 x i64> @imulq256(<4 x i64> %y, <4 x i64> %x) {
; GENERIC-LABEL: imulq256:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmullq %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: imulq256:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmullq %ymm0, %ymm1, %ymm0 # sched: [12:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%z = mul <4 x i64>%x, %y
@@ -156,12 +156,12 @@ define <4 x i64> @imulq256(<4 x i64> %y, <4 x i64> %x) {
define <2 x i64> @imulq128(<2 x i64> %y, <2 x i64> %x) {
; GENERIC-LABEL: imulq128:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmullq %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: imulq128:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmullq %xmm0, %xmm1, %xmm0 # sched: [12:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%z = mul <2 x i64>%x, %y
@@ -170,12 +170,12 @@ define <2 x i64> @imulq128(<2 x i64> %y, <2 x i64> %x) {
define <8 x double> @mulpd512(<8 x double> %y, <8 x double> %x) {
; GENERIC-LABEL: mulpd512:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: vmulpd %zmm0, %zmm1, %zmm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mulpd512:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vmulpd %zmm0, %zmm1, %zmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
entry:
@@ -185,12 +185,12 @@ entry:
define <8 x double> @mulpd512fold(<8 x double> %y) {
; GENERIC-LABEL: mulpd512fold:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: vmulpd {{.*}}(%rip), %zmm0, %zmm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mulpd512fold:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vmulpd {{.*}}(%rip), %zmm0, %zmm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
entry:
@@ -200,12 +200,12 @@ entry:
define <16 x float> @mulps512(<16 x float> %y, <16 x float> %x) {
; GENERIC-LABEL: mulps512:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: vmulps %zmm0, %zmm1, %zmm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mulps512:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vmulps %zmm0, %zmm1, %zmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
entry:
@@ -215,12 +215,12 @@ entry:
define <16 x float> @mulps512fold(<16 x float> %y) {
; GENERIC-LABEL: mulps512fold:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: vmulps {{.*}}(%rip), %zmm0, %zmm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mulps512fold:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vmulps {{.*}}(%rip), %zmm0, %zmm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
entry:
@@ -230,12 +230,12 @@ entry:
define <8 x double> @divpd512(<8 x double> %y, <8 x double> %x) {
; GENERIC-LABEL: divpd512:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: vdivpd %zmm0, %zmm1, %zmm0 # sched: [24:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: divpd512:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vdivpd %zmm0, %zmm1, %zmm0 # sched: [23:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
entry:
@@ -245,12 +245,12 @@ entry:
define <8 x double> @divpd512fold(<8 x double> %y) {
; GENERIC-LABEL: divpd512fold:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: vdivpd {{.*}}(%rip), %zmm0, %zmm0 # sched: [28:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: divpd512fold:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vdivpd {{.*}}(%rip), %zmm0, %zmm0 # sched: [30:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
entry:
@@ -260,12 +260,12 @@ entry:
define <16 x float> @divps512(<16 x float> %y, <16 x float> %x) {
; GENERIC-LABEL: divps512:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: vdivps %zmm0, %zmm1, %zmm0 # sched: [24:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: divps512:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vdivps %zmm0, %zmm1, %zmm0 # sched: [23:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
entry:
@@ -275,12 +275,12 @@ entry:
define <16 x float> @divps512fold(<16 x float> %y) {
; GENERIC-LABEL: divps512fold:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: vdivps {{.*}}(%rip), %zmm0, %zmm0 # sched: [28:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: divps512fold:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vdivps {{.*}}(%rip), %zmm0, %zmm0 # sched: [24:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
entry:
@@ -290,12 +290,12 @@ entry:
define <8 x i64> @vpaddq_test(<8 x i64> %i, <8 x i64> %j) nounwind readnone {
; GENERIC-LABEL: vpaddq_test:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpaddq %zmm1, %zmm0, %zmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: vpaddq_test:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpaddq %zmm1, %zmm0, %zmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
%x = add <8 x i64> %i, %j
@@ -304,12 +304,12 @@ define <8 x i64> @vpaddq_test(<8 x i64> %i, <8 x i64> %j) nounwind readnone {
define <8 x i64> @vpaddq_fold_test(<8 x i64> %i, <8 x i64>* %j) nounwind {
; GENERIC-LABEL: vpaddq_fold_test:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpaddq (%rdi), %zmm0, %zmm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: vpaddq_fold_test:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpaddq (%rdi), %zmm0, %zmm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
%tmp = load <8 x i64>, <8 x i64>* %j, align 4
@@ -319,12 +319,12 @@ define <8 x i64> @vpaddq_fold_test(<8 x i64> %i, <8 x i64>* %j) nounwind {
define <8 x i64> @vpaddq_broadcast_test(<8 x i64> %i) nounwind {
; GENERIC-LABEL: vpaddq_broadcast_test:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: vpaddq_broadcast_test:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
%x = add <8 x i64> %i, <i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2>
@@ -333,12 +333,12 @@ define <8 x i64> @vpaddq_broadcast_test(<8 x i64> %i) nounwind {
define <8 x i64> @vpaddq_broadcast2_test(<8 x i64> %i, i64* %j) nounwind {
; GENERIC-LABEL: vpaddq_broadcast2_test:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpaddq (%rdi){1to8}, %zmm0, %zmm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: vpaddq_broadcast2_test:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpaddq (%rdi){1to8}, %zmm0, %zmm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
%tmp = load i64, i64* %j
@@ -356,12 +356,12 @@ define <8 x i64> @vpaddq_broadcast2_test(<8 x i64> %i, i64* %j) nounwind {
define <16 x i32> @vpaddd_test(<16 x i32> %i, <16 x i32> %j) nounwind readnone {
; GENERIC-LABEL: vpaddd_test:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpaddd %zmm1, %zmm0, %zmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: vpaddd_test:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpaddd %zmm1, %zmm0, %zmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
%x = add <16 x i32> %i, %j
@@ -370,12 +370,12 @@ define <16 x i32> @vpaddd_test(<16 x i32> %i, <16 x i32> %j) nounwind readnone {
define <16 x i32> @vpaddd_fold_test(<16 x i32> %i, <16 x i32>* %j) nounwind {
; GENERIC-LABEL: vpaddd_fold_test:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpaddd (%rdi), %zmm0, %zmm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: vpaddd_fold_test:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpaddd (%rdi), %zmm0, %zmm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
%tmp = load <16 x i32>, <16 x i32>* %j, align 4
@@ -385,12 +385,12 @@ define <16 x i32> @vpaddd_fold_test(<16 x i32> %i, <16 x i32>* %j) nounwind {
define <16 x i32> @vpaddd_broadcast_test(<16 x i32> %i) nounwind {
; GENERIC-LABEL: vpaddd_broadcast_test:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: vpaddd_broadcast_test:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
%x = add <16 x i32> %i, <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
@@ -399,14 +399,14 @@ define <16 x i32> @vpaddd_broadcast_test(<16 x i32> %i) nounwind {
define <16 x i32> @vpaddd_mask_test(<16 x i32> %i, <16 x i32> %j, <16 x i32> %mask1) nounwind readnone {
; GENERIC-LABEL: vpaddd_mask_test:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpneqd %zmm3, %zmm2, %k1
; GENERIC-NEXT: vpaddd %zmm1, %zmm0, %zmm0 {%k1} # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: vpaddd_mask_test:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpneqd %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpaddd %zmm1, %zmm0, %zmm0 {%k1} # sched: [1:0.33]
@@ -419,14 +419,14 @@ define <16 x i32> @vpaddd_mask_test(<16 x i32> %i, <16 x i32> %j, <16 x i32> %ma
define <16 x i32> @vpaddd_maskz_test(<16 x i32> %i, <16 x i32> %j, <16 x i32> %mask1) nounwind readnone {
; GENERIC-LABEL: vpaddd_maskz_test:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpneqd %zmm3, %zmm2, %k1
; GENERIC-NEXT: vpaddd %zmm1, %zmm0, %zmm0 {%k1} {z} # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: vpaddd_maskz_test:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpneqd %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpaddd %zmm1, %zmm0, %zmm0 {%k1} {z} # sched: [1:0.33]
@@ -439,14 +439,14 @@ define <16 x i32> @vpaddd_maskz_test(<16 x i32> %i, <16 x i32> %j, <16 x i32> %m
define <16 x i32> @vpaddd_mask_fold_test(<16 x i32> %i, <16 x i32>* %j.ptr, <16 x i32> %mask1) nounwind readnone {
; GENERIC-LABEL: vpaddd_mask_fold_test:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpneqd %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpaddd (%rdi), %zmm0, %zmm0 {%k1} # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: vpaddd_mask_fold_test:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpneqd %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpaddd (%rdi), %zmm0, %zmm0 {%k1} # sched: [8:0.50]
@@ -460,14 +460,14 @@ define <16 x i32> @vpaddd_mask_fold_test(<16 x i32> %i, <16 x i32>* %j.ptr, <16
define <16 x i32> @vpaddd_mask_broadcast_test(<16 x i32> %i, <16 x i32> %mask1) nounwind readnone {
; GENERIC-LABEL: vpaddd_mask_broadcast_test:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpneqd %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 {%k1} # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: vpaddd_mask_broadcast_test:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpneqd %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 {%k1} # sched: [8:0.50]
@@ -480,14 +480,14 @@ define <16 x i32> @vpaddd_mask_broadcast_test(<16 x i32> %i, <16 x i32> %mask1)
define <16 x i32> @vpaddd_maskz_fold_test(<16 x i32> %i, <16 x i32>* %j.ptr, <16 x i32> %mask1) nounwind readnone {
; GENERIC-LABEL: vpaddd_maskz_fold_test:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpneqd %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpaddd (%rdi), %zmm0, %zmm0 {%k1} {z} # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: vpaddd_maskz_fold_test:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpneqd %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpaddd (%rdi), %zmm0, %zmm0 {%k1} {z} # sched: [8:0.50]
@@ -501,14 +501,14 @@ define <16 x i32> @vpaddd_maskz_fold_test(<16 x i32> %i, <16 x i32>* %j.ptr, <16
define <16 x i32> @vpaddd_maskz_broadcast_test(<16 x i32> %i, <16 x i32> %mask1) nounwind readnone {
; GENERIC-LABEL: vpaddd_maskz_broadcast_test:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpneqd %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 {%k1} {z} # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: vpaddd_maskz_broadcast_test:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpneqd %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 {%k1} {z} # sched: [8:0.50]
@@ -521,12 +521,12 @@ define <16 x i32> @vpaddd_maskz_broadcast_test(<16 x i32> %i, <16 x i32> %mask1)
define <8 x i64> @vpsubq_test(<8 x i64> %i, <8 x i64> %j) nounwind readnone {
; GENERIC-LABEL: vpsubq_test:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsubq %zmm1, %zmm0, %zmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: vpsubq_test:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsubq %zmm1, %zmm0, %zmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
%x = sub <8 x i64> %i, %j
@@ -535,12 +535,12 @@ define <8 x i64> @vpsubq_test(<8 x i64> %i, <8 x i64> %j) nounwind readnone {
define <16 x i32> @vpsubd_test(<16 x i32> %i, <16 x i32> %j) nounwind readnone {
; GENERIC-LABEL: vpsubd_test:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsubd %zmm1, %zmm0, %zmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: vpsubd_test:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsubd %zmm1, %zmm0, %zmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
%x = sub <16 x i32> %i, %j
@@ -549,12 +549,12 @@ define <16 x i32> @vpsubd_test(<16 x i32> %i, <16 x i32> %j) nounwind readnone {
define <16 x i32> @vpmulld_test(<16 x i32> %i, <16 x i32> %j) {
; GENERIC-LABEL: vpmulld_test:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmulld %zmm1, %zmm0, %zmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: vpmulld_test:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmulld %zmm1, %zmm0, %zmm0 # sched: [8:0.67]
; SKX-NEXT: retq # sched: [7:1.00]
%x = mul <16 x i32> %i, %j
@@ -564,12 +564,12 @@ define <16 x i32> @vpmulld_test(<16 x i32> %i, <16 x i32> %j) {
declare float @sqrtf(float) readnone
define float @sqrtA(float %a) nounwind uwtable readnone ssp {
; GENERIC-LABEL: sqrtA:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: vsqrtss %xmm0, %xmm0, %xmm0 # sched: [114:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sqrtA:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vsqrtss %xmm0, %xmm0, %xmm0 # sched: [12:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
entry:
@@ -580,12 +580,12 @@ entry:
declare double @sqrt(double) readnone
define double @sqrtB(double %a) nounwind uwtable readnone ssp {
; GENERIC-LABEL: sqrtB:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 # sched: [21:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sqrtB:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 # sched: [18:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
entry:
@@ -596,12 +596,12 @@ entry:
declare float @llvm.sqrt.f32(float)
define float @sqrtC(float %a) nounwind {
; GENERIC-LABEL: sqrtC:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vsqrtss %xmm0, %xmm0, %xmm0 # sched: [114:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sqrtC:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vsqrtss %xmm0, %xmm0, %xmm0 # sched: [12:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%b = call float @llvm.sqrt.f32(float %a)
@@ -611,12 +611,12 @@ define float @sqrtC(float %a) nounwind {
declare <16 x float> @llvm.sqrt.v16f32(<16 x float>)
define <16 x float> @sqrtD(<16 x float> %a) nounwind {
; GENERIC-LABEL: sqrtD:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vsqrtps %zmm0, %zmm0 # sched: [14:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sqrtD:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vsqrtps %zmm0, %zmm0 # sched: [19:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
%b = call <16 x float> @llvm.sqrt.v16f32(<16 x float> %a)
@@ -626,12 +626,12 @@ define <16 x float> @sqrtD(<16 x float> %a) nounwind {
declare <8 x double> @llvm.sqrt.v8f64(<8 x double>)
define <8 x double> @sqrtE(<8 x double> %a) nounwind {
; GENERIC-LABEL: sqrtE:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vsqrtpd %zmm0, %zmm0 # sched: [14:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sqrtE:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vsqrtpd %zmm0, %zmm0 # sched: [31:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
%b = call <8 x double> @llvm.sqrt.v8f64(<8 x double> %a)
@@ -640,12 +640,12 @@ define <8 x double> @sqrtE(<8 x double> %a) nounwind {
define <16 x float> @fadd_broadcast(<16 x float> %a) nounwind {
; GENERIC-LABEL: fadd_broadcast:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vaddps {{.*}}(%rip){1to16}, %zmm0, %zmm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: fadd_broadcast:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vaddps {{.*}}(%rip){1to16}, %zmm0, %zmm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
%b = fadd <16 x float> %a, <float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000>
@@ -654,12 +654,12 @@ define <16 x float> @fadd_broadcast(<16 x float> %a) nounwind {
define <8 x i64> @addq_broadcast(<8 x i64> %a) nounwind {
; GENERIC-LABEL: addq_broadcast:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: addq_broadcast:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
%b = add <8 x i64> %a, <i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2>
@@ -668,12 +668,12 @@ define <8 x i64> @addq_broadcast(<8 x i64> %a) nounwind {
define <8 x i64> @orq_broadcast(<8 x i64> %a) nounwind {
; GENERIC-LABEL: orq_broadcast:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vorpd {{.*}}(%rip){1to8}, %zmm0, %zmm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: orq_broadcast:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vorpd {{.*}}(%rip){1to8}, %zmm0, %zmm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
%b = or <8 x i64> %a, <i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2>
@@ -682,12 +682,12 @@ define <8 x i64> @orq_broadcast(<8 x i64> %a) nounwind {
define <16 x i32> @andd512fold(<16 x i32> %y, <16 x i32>* %x) {
; GENERIC-LABEL: andd512fold:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: vandps (%rdi), %zmm0, %zmm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: andd512fold:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vandps (%rdi), %zmm0, %zmm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
entry:
@@ -698,12 +698,12 @@ entry:
define <8 x i64> @andqbrst(<8 x i64> %p1, i64* %ap) {
; GENERIC-LABEL: andqbrst:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: vandpd (%rdi){1to8}, %zmm0, %zmm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: andqbrst:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vandpd (%rdi){1to8}, %zmm0, %zmm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
entry:
@@ -716,14 +716,14 @@ entry:
define <16 x float> @test_mask_vaddps(<16 x float> %dst, <16 x float> %i,
; GENERIC-LABEL: test_mask_vaddps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; GENERIC-NEXT: vaddps %zmm2, %zmm1, %zmm0 {%k1} # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_mask_vaddps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpneqd %zmm4, %zmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vaddps %zmm2, %zmm1, %zmm0 {%k1} # sched: [4:0.33]
@@ -738,14 +738,14 @@ define <16 x float> @test_mask_vaddps(<16 x float> %dst, <16 x float> %i,
define <16 x float> @test_mask_vmulps(<16 x float> %dst, <16 x float> %i, <16 x float> %j, <16 x i32> %mask1) nounwind readnone {
; GENERIC-LABEL: test_mask_vmulps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; GENERIC-NEXT: vmulps %zmm2, %zmm1, %zmm0 {%k1} # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_mask_vmulps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpneqd %zmm4, %zmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vmulps %zmm2, %zmm1, %zmm0 {%k1} # sched: [4:0.33]
@@ -758,14 +758,14 @@ define <16 x float> @test_mask_vmulps(<16 x float> %dst, <16 x float> %i, <16 x
define <16 x float> @test_mask_vminps(<16 x float> %dst, <16 x float> %i, <16 x float> %j, <16 x i32> %mask1) nounwind readnone {
; GENERIC-LABEL: test_mask_vminps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; GENERIC-NEXT: vminps %zmm2, %zmm1, %zmm0 {%k1} # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_mask_vminps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpneqd %zmm4, %zmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vminps %zmm2, %zmm1, %zmm0 {%k1} # sched: [4:0.33]
@@ -779,14 +779,14 @@ define <16 x float> @test_mask_vminps(<16 x float> %dst, <16 x float> %i, <16 x
define <8 x double> @test_mask_vminpd(<8 x double> %dst, <8 x double> %i, <8 x double> %j, <8 x i32> %mask1) nounwind readnone {
; GENERIC-LABEL: test_mask_vminpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpneqd %ymm4, %ymm3, %k1
; GENERIC-NEXT: vminpd %zmm2, %zmm1, %zmm0 {%k1} # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_mask_vminpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpneqd %ymm4, %ymm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vminpd %zmm2, %zmm1, %zmm0 {%k1} # sched: [4:0.33]
@@ -800,14 +800,14 @@ define <8 x double> @test_mask_vminpd(<8 x double> %dst, <8 x double> %i, <8 x d
define <16 x float> @test_mask_vmaxps(<16 x float> %dst, <16 x float> %i, <16 x float> %j, <16 x i32> %mask1) nounwind readnone {
; GENERIC-LABEL: test_mask_vmaxps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; GENERIC-NEXT: vmaxps %zmm2, %zmm1, %zmm0 {%k1} # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_mask_vmaxps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpneqd %zmm4, %zmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vmaxps %zmm2, %zmm1, %zmm0 {%k1} # sched: [4:0.33]
@@ -821,14 +821,14 @@ define <16 x float> @test_mask_vmaxps(<16 x float> %dst, <16 x float> %i, <16 x
define <8 x double> @test_mask_vmaxpd(<8 x double> %dst, <8 x double> %i, <8 x double> %j, <8 x i32> %mask1) nounwind readnone {
; GENERIC-LABEL: test_mask_vmaxpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpneqd %ymm4, %ymm3, %k1
; GENERIC-NEXT: vmaxpd %zmm2, %zmm1, %zmm0 {%k1} # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_mask_vmaxpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpneqd %ymm4, %ymm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vmaxpd %zmm2, %zmm1, %zmm0 {%k1} # sched: [4:0.33]
@@ -842,14 +842,14 @@ define <8 x double> @test_mask_vmaxpd(<8 x double> %dst, <8 x double> %i, <8 x d
define <16 x float> @test_mask_vsubps(<16 x float> %dst, <16 x float> %i, <16 x float> %j, <16 x i32> %mask1) nounwind readnone {
; GENERIC-LABEL: test_mask_vsubps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; GENERIC-NEXT: vsubps %zmm2, %zmm1, %zmm0 {%k1} # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_mask_vsubps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpneqd %zmm4, %zmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vsubps %zmm2, %zmm1, %zmm0 {%k1} # sched: [4:0.33]
@@ -862,14 +862,14 @@ define <16 x float> @test_mask_vsubps(<16 x float> %dst, <16 x float> %i, <16 x
define <16 x float> @test_mask_vdivps(<16 x float> %dst, <16 x float> %i, <16 x float> %j, <16 x i32> %mask1) nounwind readnone {
; GENERIC-LABEL: test_mask_vdivps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; GENERIC-NEXT: vdivps %zmm2, %zmm1, %zmm0 {%k1} # sched: [24:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_mask_vdivps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpneqd %zmm4, %zmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vdivps %zmm2, %zmm1, %zmm0 {%k1} # sched: [23:2.00]
@@ -882,14 +882,14 @@ define <16 x float> @test_mask_vdivps(<16 x float> %dst, <16 x float> %i, <16 x
define <8 x double> @test_mask_vaddpd(<8 x double> %dst, <8 x double> %i, <8 x double> %j, <8 x i64> %mask1) nounwind readnone {
; GENERIC-LABEL: test_mask_vaddpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpneqq %zmm4, %zmm3, %k1
; GENERIC-NEXT: vaddpd %zmm2, %zmm1, %zmm0 {%k1} # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_mask_vaddpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpneqq %zmm4, %zmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vaddpd %zmm2, %zmm1, %zmm0 {%k1} # sched: [4:0.33]
@@ -902,14 +902,14 @@ define <8 x double> @test_mask_vaddpd(<8 x double> %dst, <8 x double> %i, <8 x d
define <8 x double> @test_maskz_vaddpd(<8 x double> %i, <8 x double> %j, <8 x i64> %mask1) nounwind readnone {
; GENERIC-LABEL: test_maskz_vaddpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpneqq %zmm3, %zmm2, %k1
; GENERIC-NEXT: vaddpd %zmm1, %zmm0, %zmm0 {%k1} {z} # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_maskz_vaddpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpneqq %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vaddpd %zmm1, %zmm0, %zmm0 {%k1} {z} # sched: [4:0.33]
@@ -922,14 +922,14 @@ define <8 x double> @test_maskz_vaddpd(<8 x double> %i, <8 x double> %j, <8 x i6
define <8 x double> @test_mask_fold_vaddpd(<8 x double> %dst, <8 x double> %i, <8 x double>* %j, <8 x i64> %mask1) nounwind {
; GENERIC-LABEL: test_mask_fold_vaddpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpneqq %zmm3, %zmm2, %k1
; GENERIC-NEXT: vaddpd (%rdi), %zmm1, %zmm0 {%k1} # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_mask_fold_vaddpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpneqq %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vaddpd (%rdi), %zmm1, %zmm0 {%k1} # sched: [11:0.50]
@@ -943,14 +943,14 @@ define <8 x double> @test_mask_fold_vaddpd(<8 x double> %dst, <8 x double> %i, <
define <8 x double> @test_maskz_fold_vaddpd(<8 x double> %i, <8 x double>* %j, <8 x i64> %mask1) nounwind {
; GENERIC-LABEL: test_maskz_fold_vaddpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpneqq %zmm2, %zmm1, %k1
; GENERIC-NEXT: vaddpd (%rdi), %zmm0, %zmm0 {%k1} {z} # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_maskz_fold_vaddpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpneqq %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vaddpd (%rdi), %zmm0, %zmm0 {%k1} {z} # sched: [11:0.50]
@@ -964,12 +964,12 @@ define <8 x double> @test_maskz_fold_vaddpd(<8 x double> %i, <8 x double>* %j, <
define <8 x double> @test_broadcast_vaddpd(<8 x double> %i, double* %j) nounwind {
; GENERIC-LABEL: test_broadcast_vaddpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vaddpd (%rdi){1to8}, %zmm0, %zmm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_broadcast_vaddpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vaddpd (%rdi){1to8}, %zmm0, %zmm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
%tmp = load double, double* %j
@@ -982,7 +982,7 @@ define <8 x double> @test_broadcast_vaddpd(<8 x double> %i, double* %j) nounwind
define <8 x double> @test_mask_broadcast_vaddpd(<8 x double> %dst, <8 x double> %i, double* %j, <8 x i64> %mask1) nounwind {
; GENERIC-LABEL: test_mask_broadcast_vaddpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm0, %xmm0, %xmm0 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpneqq %zmm0, %zmm2, %k1
; GENERIC-NEXT: vaddpd (%rdi){1to8}, %zmm1, %zmm1 {%k1} # sched: [7:1.00]
@@ -990,7 +990,7 @@ define <8 x double> @test_mask_broadcast_vaddpd(<8 x double> %dst, <8 x double>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_mask_broadcast_vaddpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm0, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: vpcmpneqq %zmm0, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vaddpd (%rdi){1to8}, %zmm1, %zmm1 {%k1} # sched: [11:0.50]
@@ -1008,14 +1008,14 @@ define <8 x double> @test_mask_broadcast_vaddpd(<8 x double> %dst, <8 x double>
define <8 x double> @test_maskz_broadcast_vaddpd(<8 x double> %i, double* %j,
; GENERIC-LABEL: test_maskz_broadcast_vaddpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpneqq %zmm2, %zmm1, %k1
; GENERIC-NEXT: vaddpd (%rdi){1to8}, %zmm0, %zmm0 {%k1} {z} # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_maskz_broadcast_vaddpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpneqq %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vaddpd (%rdi){1to8}, %zmm0, %zmm0 {%k1} {z} # sched: [11:0.50]
@@ -1033,12 +1033,12 @@ define <8 x double> @test_maskz_broadcast_vaddpd(<8 x double> %i, double* %j,
define <16 x float> @test_fxor(<16 x float> %a) {
; GENERIC-LABEL: test_fxor:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vxorps {{.*}}(%rip){1to16}, %zmm0, %zmm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_fxor:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vxorps {{.*}}(%rip){1to16}, %zmm0, %zmm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -1048,12 +1048,12 @@ define <16 x float> @test_fxor(<16 x float> %a) {
define <8 x float> @test_fxor_8f32(<8 x float> %a) {
; GENERIC-LABEL: test_fxor_8f32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vxorps {{.*}}(%rip){1to8}, %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_fxor_8f32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vxorps {{.*}}(%rip){1to8}, %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
%res = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %a
@@ -1062,12 +1062,12 @@ define <8 x float> @test_fxor_8f32(<8 x float> %a) {
define <8 x double> @fabs_v8f64(<8 x double> %p)
; GENERIC-LABEL: fabs_v8f64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vandpd {{.*}}(%rip){1to8}, %zmm0, %zmm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: fabs_v8f64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vandpd {{.*}}(%rip){1to8}, %zmm0, %zmm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
{
@@ -1078,12 +1078,12 @@ declare <8 x double> @llvm.fabs.v8f64(<8 x double> %p)
define <16 x float> @fabs_v16f32(<16 x float> %p)
; GENERIC-LABEL: fabs_v16f32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vandps {{.*}}(%rip){1to16}, %zmm0, %zmm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: fabs_v16f32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vandps {{.*}}(%rip){1to16}, %zmm0, %zmm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
{
@@ -1094,7 +1094,7 @@ declare <16 x float> @llvm.fabs.v16f32(<16 x float> %p)
define double @test1(double %a, double %b) nounwind {
; GENERIC-LABEL: test1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vucomisd %xmm1, %xmm0 # sched: [2:1.00]
; GENERIC-NEXT: jne .LBB64_1 # sched: [1:1.00]
; GENERIC-NEXT: jnp .LBB64_2 # sched: [1:1.00]
@@ -1106,7 +1106,7 @@ define double @test1(double %a, double %b) nounwind {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vucomisd %xmm1, %xmm0 # sched: [2:1.00]
; SKX-NEXT: jne .LBB64_1 # sched: [1:0.50]
; SKX-NEXT: jnp .LBB64_2 # sched: [1:0.50]
@@ -1129,10 +1129,10 @@ l2:
define float @test2(float %a, float %b) nounwind {
; GENERIC-LABEL: test2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vucomiss %xmm0, %xmm1 # sched: [2:1.00]
; GENERIC-NEXT: jbe .LBB65_2 # sched: [1:1.00]
-; GENERIC-NEXT: # BB#1: # %l1
+; GENERIC-NEXT: # %bb.1: # %l1
; GENERIC-NEXT: vsubss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
; GENERIC-NEXT: .LBB65_2: # %l2
@@ -1140,10 +1140,10 @@ define float @test2(float %a, float %b) nounwind {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vucomiss %xmm0, %xmm1 # sched: [2:1.00]
; SKX-NEXT: jbe .LBB65_2 # sched: [1:0.50]
-; SKX-NEXT: # BB#1: # %l1
+; SKX-NEXT: # %bb.1: # %l1
; SKX-NEXT: vsubss %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
; SKX-NEXT: .LBB65_2: # %l2
@@ -1162,14 +1162,14 @@ l2:
define i32 @test3(float %a, float %b) {
; GENERIC-LABEL: test3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcmpeqss %xmm1, %xmm0, %k0
; GENERIC-NEXT: kmovd %k0, %eax
; GENERIC-NEXT: movzbl %al, %eax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcmpeqss %xmm1, %xmm0, %k0
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
; SKX-NEXT: movzbl %al, %eax # sched: [1:0.25]
@@ -1182,12 +1182,12 @@ define i32 @test3(float %a, float %b) {
define float @test5(float %p) #0 {
; GENERIC-LABEL: test5:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: vxorps %xmm1, %xmm1, %xmm1 # sched: [1:1.00]
; GENERIC-NEXT: vucomiss %xmm1, %xmm0 # sched: [2:1.00]
; GENERIC-NEXT: jne .LBB67_1 # sched: [1:1.00]
; GENERIC-NEXT: jp .LBB67_1 # sched: [1:1.00]
-; GENERIC-NEXT: # BB#2: # %return
+; GENERIC-NEXT: # %bb.2: # %return
; GENERIC-NEXT: retq # sched: [1:1.00]
; GENERIC-NEXT: .LBB67_1: # %if.end
; GENERIC-NEXT: seta %al # sched: [2:1.00]
@@ -1196,12 +1196,12 @@ define float @test5(float %p) #0 {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test5:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vxorps %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vucomiss %xmm1, %xmm0 # sched: [2:1.00]
; SKX-NEXT: jne .LBB67_1 # sched: [1:0.50]
; SKX-NEXT: jp .LBB67_1 # sched: [1:0.50]
-; SKX-NEXT: # BB#2: # %return
+; SKX-NEXT: # %bb.2: # %return
; SKX-NEXT: retq # sched: [7:1.00]
; SKX-NEXT: .LBB67_1: # %if.end
; SKX-NEXT: seta %al # sched: [2:1.00]
@@ -1224,14 +1224,14 @@ return: ; preds = %if.end, %entry
define i32 @test6(i32 %a, i32 %b) {
; GENERIC-LABEL: test6:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: xorl %eax, %eax # sched: [1:0.33]
; GENERIC-NEXT: cmpl %esi, %edi # sched: [1:0.33]
; GENERIC-NEXT: sete %al # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test6:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: xorl %eax, %eax # sched: [1:0.25]
; SKX-NEXT: cmpl %esi, %edi # sched: [1:0.25]
; SKX-NEXT: sete %al # sched: [1:0.50]
@@ -1243,14 +1243,14 @@ define i32 @test6(i32 %a, i32 %b) {
define i32 @test7(double %x, double %y) #2 {
; GENERIC-LABEL: test7:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: xorl %eax, %eax # sched: [1:0.33]
; GENERIC-NEXT: vucomisd %xmm1, %xmm0 # sched: [2:1.00]
; GENERIC-NEXT: setne %al # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test7:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: xorl %eax, %eax # sched: [1:0.25]
; SKX-NEXT: vucomisd %xmm1, %xmm0 # sched: [2:1.00]
; SKX-NEXT: setne %al # sched: [1:0.50]
@@ -1263,7 +1263,7 @@ entry:
define i32 @test8(i32 %a1, i32 %a2, i32 %a3) {
; GENERIC-LABEL: test8:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: xorl $-2147483648, %esi # imm = 0x80000000
; GENERIC-NEXT: # sched: [1:0.33]
; GENERIC-NEXT: testl %edx, %edx # sched: [1:0.33]
@@ -1275,7 +1275,7 @@ define i32 @test8(i32 %a1, i32 %a2, i32 %a3) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test8:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: notl %edi # sched: [1:0.25]
; SKX-NEXT: xorl $-2147483648, %esi # imm = 0x80000000
; SKX-NEXT: # sched: [1:0.25]
@@ -1296,10 +1296,10 @@ define i32 @test8(i32 %a1, i32 %a2, i32 %a3) {
define i32 @test9(i64 %a) {
; GENERIC-LABEL: test9:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: testb $1, %dil # sched: [1:0.33]
; GENERIC-NEXT: jne .LBB71_2 # sched: [1:1.00]
-; GENERIC-NEXT: # BB#1: # %A
+; GENERIC-NEXT: # %bb.1: # %A
; GENERIC-NEXT: movl $6, %eax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
; GENERIC-NEXT: .LBB71_2: # %B
@@ -1307,10 +1307,10 @@ define i32 @test9(i64 %a) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test9:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: testb $1, %dil # sched: [1:0.25]
; SKX-NEXT: jne .LBB71_2 # sched: [1:0.50]
-; SKX-NEXT: # BB#1: # %A
+; SKX-NEXT: # %bb.1: # %A
; SKX-NEXT: movl $6, %eax # sched: [1:0.25]
; SKX-NEXT: retq # sched: [7:1.00]
; SKX-NEXT: .LBB71_2: # %B
@@ -1327,7 +1327,7 @@ B:
define i32 @test10(i64 %b, i64 %c, i1 %d) {
; GENERIC-LABEL: test10:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movl %edx, %eax # sched: [1:0.33]
; GENERIC-NEXT: andb $1, %al # sched: [1:0.33]
; GENERIC-NEXT: cmpq %rsi, %rdi # sched: [1:0.33]
@@ -1336,7 +1336,7 @@ define i32 @test10(i64 %b, i64 %c, i1 %d) {
; GENERIC-NEXT: andb $1, %cl # sched: [1:0.33]
; GENERIC-NEXT: cmpb %cl, %al # sched: [1:0.33]
; GENERIC-NEXT: je .LBB72_1 # sched: [1:1.00]
-; GENERIC-NEXT: # BB#2: # %if.end.i
+; GENERIC-NEXT: # %bb.2: # %if.end.i
; GENERIC-NEXT: movl $6, %eax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
; GENERIC-NEXT: .LBB72_1: # %if.then.i
@@ -1344,7 +1344,7 @@ define i32 @test10(i64 %b, i64 %c, i1 %d) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test10:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movl %edx, %eax # sched: [1:0.25]
; SKX-NEXT: andb $1, %al # sched: [1:0.25]
; SKX-NEXT: cmpq %rsi, %rdi # sched: [1:0.25]
@@ -1353,7 +1353,7 @@ define i32 @test10(i64 %b, i64 %c, i1 %d) {
; SKX-NEXT: andb $1, %cl # sched: [1:0.25]
; SKX-NEXT: cmpb %cl, %al # sched: [1:0.25]
; SKX-NEXT: je .LBB72_1 # sched: [1:0.50]
-; SKX-NEXT: # BB#2: # %if.end.i
+; SKX-NEXT: # %bb.2: # %if.end.i
; SKX-NEXT: movl $6, %eax # sched: [1:0.25]
; SKX-NEXT: retq # sched: [7:1.00]
; SKX-NEXT: .LBB72_1: # %if.then.i
@@ -1374,12 +1374,12 @@ if.end.i:
define <16 x float> @sitof32(<16 x i32> %a) nounwind {
; GENERIC-LABEL: sitof32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvtdq2ps %zmm0, %zmm0 # sched: [4:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sitof32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtdq2ps %zmm0, %zmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
%b = sitofp <16 x i32> %a to <16 x float>
@@ -1388,12 +1388,12 @@ define <16 x float> @sitof32(<16 x i32> %a) nounwind {
define <8 x double> @sltof864(<8 x i64> %a) {
; GENERIC-LABEL: sltof864:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvtqq2pd %zmm0, %zmm0 # sched: [4:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sltof864:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtqq2pd %zmm0, %zmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
%b = sitofp <8 x i64> %a to <8 x double>
@@ -1402,12 +1402,12 @@ define <8 x double> @sltof864(<8 x i64> %a) {
define <4 x double> @slto4f64(<4 x i64> %a) {
; GENERIC-LABEL: slto4f64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvtqq2pd %ymm0, %ymm0 # sched: [4:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: slto4f64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtqq2pd %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
%b = sitofp <4 x i64> %a to <4 x double>
@@ -1416,12 +1416,12 @@ define <4 x double> @slto4f64(<4 x i64> %a) {
define <2 x double> @slto2f64(<2 x i64> %a) {
; GENERIC-LABEL: slto2f64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvtqq2pd %xmm0, %xmm0 # sched: [4:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: slto2f64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtqq2pd %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
%b = sitofp <2 x i64> %a to <2 x double>
@@ -1430,12 +1430,12 @@ define <2 x double> @slto2f64(<2 x i64> %a) {
define <2 x float> @sltof2f32(<2 x i64> %a) {
; GENERIC-LABEL: sltof2f32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvtqq2ps %xmm0, %xmm0 # sched: [4:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sltof2f32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtqq2ps %xmm0, %xmm0 # sched: [5:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%b = sitofp <2 x i64> %a to <2 x float>
@@ -1444,12 +1444,12 @@ define <2 x float> @sltof2f32(<2 x i64> %a) {
define <4 x float> @slto4f32_mem(<4 x i64>* %a) {
; GENERIC-LABEL: slto4f32_mem:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvtqq2psy (%rdi), %xmm0 # sched: [8:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: slto4f32_mem:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtqq2psy (%rdi), %xmm0 # sched: [9:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%a1 = load <4 x i64>, <4 x i64>* %a, align 8
@@ -1459,12 +1459,12 @@ define <4 x float> @slto4f32_mem(<4 x i64>* %a) {
define <4 x i64> @f64to4sl(<4 x double> %a) {
; GENERIC-LABEL: f64to4sl:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvttpd2qq %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: f64to4sl:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvttpd2qq %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
%b = fptosi <4 x double> %a to <4 x i64>
@@ -1473,12 +1473,12 @@ define <4 x i64> @f64to4sl(<4 x double> %a) {
define <4 x i64> @f32to4sl(<4 x float> %a) {
; GENERIC-LABEL: f32to4sl:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvttps2qq %xmm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: f32to4sl:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvttps2qq %xmm0, %ymm0 # sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%b = fptosi <4 x float> %a to <4 x i64>
@@ -1487,13 +1487,13 @@ define <4 x i64> @f32to4sl(<4 x float> %a) {
define <4 x float> @slto4f32(<4 x i64> %a) {
; GENERIC-LABEL: slto4f32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvtqq2ps %ymm0, %xmm0 # sched: [4:1.00]
; GENERIC-NEXT: vzeroupper
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: slto4f32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtqq2ps %ymm0, %xmm0 # sched: [7:1.00]
; SKX-NEXT: vzeroupper # sched: [4:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -1503,13 +1503,13 @@ define <4 x float> @slto4f32(<4 x i64> %a) {
define <4 x float> @ulto4f32(<4 x i64> %a) {
; GENERIC-LABEL: ulto4f32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvtuqq2ps %ymm0, %xmm0 # sched: [4:1.00]
; GENERIC-NEXT: vzeroupper
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: ulto4f32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtuqq2ps %ymm0, %xmm0 # sched: [7:1.00]
; SKX-NEXT: vzeroupper # sched: [4:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -1519,12 +1519,12 @@ define <4 x float> @ulto4f32(<4 x i64> %a) {
define <8 x double> @ulto8f64(<8 x i64> %a) {
; GENERIC-LABEL: ulto8f64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvtuqq2pd %zmm0, %zmm0 # sched: [4:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: ulto8f64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtuqq2pd %zmm0, %zmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
%b = uitofp <8 x i64> %a to <8 x double>
@@ -1533,13 +1533,13 @@ define <8 x double> @ulto8f64(<8 x i64> %a) {
define <16 x double> @ulto16f64(<16 x i64> %a) {
; GENERIC-LABEL: ulto16f64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvtuqq2pd %zmm0, %zmm0 # sched: [4:1.00]
; GENERIC-NEXT: vcvtuqq2pd %zmm1, %zmm1 # sched: [4:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: ulto16f64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtuqq2pd %zmm0, %zmm0 # sched: [4:0.33]
; SKX-NEXT: vcvtuqq2pd %zmm1, %zmm1 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -1549,12 +1549,12 @@ define <16 x double> @ulto16f64(<16 x i64> %a) {
define <16 x i32> @f64to16si(<16 x float> %a) nounwind {
; GENERIC-LABEL: f64to16si:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvttps2dq %zmm0, %zmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: f64to16si:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvttps2dq %zmm0, %zmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
%b = fptosi <16 x float> %a to <16 x i32>
@@ -1563,12 +1563,12 @@ define <16 x i32> @f64to16si(<16 x float> %a) nounwind {
define <16 x i32> @f32to16ui(<16 x float> %a) nounwind {
; GENERIC-LABEL: f32to16ui:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvttps2udq %zmm0, %zmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: f32to16ui:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvttps2udq %zmm0, %zmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
%b = fptoui <16 x float> %a to <16 x i32>
@@ -1577,14 +1577,14 @@ define <16 x i32> @f32to16ui(<16 x float> %a) nounwind {
define <16 x i8> @f32to16uc(<16 x float> %f) {
; GENERIC-LABEL: f32to16uc:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvttps2dq %zmm0, %zmm0 # sched: [3:1.00]
; GENERIC-NEXT: vpmovdb %zmm0, %xmm0
; GENERIC-NEXT: vzeroupper
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: f32to16uc:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvttps2dq %zmm0, %zmm0 # sched: [4:0.33]
; SKX-NEXT: vpmovdb %zmm0, %xmm0 # sched: [4:2.00]
; SKX-NEXT: vzeroupper # sched: [4:1.00]
@@ -1595,13 +1595,13 @@ define <16 x i8> @f32to16uc(<16 x float> %f) {
define <16 x i16> @f32to16us(<16 x float> %f) {
; GENERIC-LABEL: f32to16us:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvttps2dq %zmm0, %zmm0 # sched: [3:1.00]
; GENERIC-NEXT: vpmovdw %zmm0, %ymm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: f32to16us:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvttps2dq %zmm0, %zmm0 # sched: [4:0.33]
; SKX-NEXT: vpmovdw %zmm0, %ymm0 # sched: [4:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -1611,12 +1611,12 @@ define <16 x i16> @f32to16us(<16 x float> %f) {
define <8 x i32> @f32to8ui(<8 x float> %a) nounwind {
; GENERIC-LABEL: f32to8ui:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvttps2udq %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: f32to8ui:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvttps2udq %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
%b = fptoui <8 x float> %a to <8 x i32>
@@ -1625,12 +1625,12 @@ define <8 x i32> @f32to8ui(<8 x float> %a) nounwind {
define <4 x i32> @f32to4ui(<4 x float> %a) nounwind {
; GENERIC-LABEL: f32to4ui:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvttps2udq %xmm0, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: f32to4ui:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvttps2udq %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
%b = fptoui <4 x float> %a to <4 x i32>
@@ -1639,12 +1639,12 @@ define <4 x i32> @f32to4ui(<4 x float> %a) nounwind {
define <8 x i32> @f64to8ui(<8 x double> %a) nounwind {
; GENERIC-LABEL: f64to8ui:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvttpd2udq %zmm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: f64to8ui:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvttpd2udq %zmm0, %ymm0 # sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%b = fptoui <8 x double> %a to <8 x i32>
@@ -1653,14 +1653,14 @@ define <8 x i32> @f64to8ui(<8 x double> %a) nounwind {
define <8 x i16> @f64to8us(<8 x double> %f) {
; GENERIC-LABEL: f64to8us:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvttpd2dq %zmm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpmovdw %ymm0, %xmm0
; GENERIC-NEXT: vzeroupper
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: f64to8us:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvttpd2dq %zmm0, %ymm0 # sched: [7:1.00]
; SKX-NEXT: vpmovdw %ymm0, %xmm0 # sched: [4:2.00]
; SKX-NEXT: vzeroupper # sched: [4:1.00]
@@ -1671,14 +1671,14 @@ define <8 x i16> @f64to8us(<8 x double> %f) {
define <8 x i8> @f64to8uc(<8 x double> %f) {
; GENERIC-LABEL: f64to8uc:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvttpd2dq %zmm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpmovdw %ymm0, %xmm0
; GENERIC-NEXT: vzeroupper
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: f64to8uc:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvttpd2dq %zmm0, %ymm0 # sched: [7:1.00]
; SKX-NEXT: vpmovdw %ymm0, %xmm0 # sched: [4:2.00]
; SKX-NEXT: vzeroupper # sched: [4:1.00]
@@ -1689,13 +1689,13 @@ define <8 x i8> @f64to8uc(<8 x double> %f) {
define <4 x i32> @f64to4ui(<4 x double> %a) nounwind {
; GENERIC-LABEL: f64to4ui:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvttpd2udq %ymm0, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: vzeroupper
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: f64to4ui:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvttpd2udq %ymm0, %xmm0 # sched: [7:1.00]
; SKX-NEXT: vzeroupper # sched: [4:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -1705,12 +1705,12 @@ define <4 x i32> @f64to4ui(<4 x double> %a) nounwind {
define <8 x double> @sito8f64(<8 x i32> %a) {
; GENERIC-LABEL: sito8f64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvtdq2pd %ymm0, %zmm0 # sched: [4:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sito8f64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtdq2pd %ymm0, %zmm0 # sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%b = sitofp <8 x i32> %a to <8 x double>
@@ -1718,18 +1718,18 @@ define <8 x double> @sito8f64(<8 x i32> %a) {
}
define <8 x double> @i32to8f64_mask(<8 x double> %a, <8 x i32> %b, i8 %c) nounwind {
; GENERIC-LABEL: i32to8f64_mask:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: kmovd %edi, %k1
; GENERIC-NEXT: vcvtdq2pd %ymm1, %zmm0 {%k1} # sched: [4:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: i32to8f64_mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovd %edi, %k1 # sched: [1:1.00]
; SKX-NEXT: vcvtdq2pd %ymm1, %zmm0 {%k1} # sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
; VLNOBW-LABEL: i32to8f64_mask:
-; VLNOBW: # BB#0:
+; VLNOBW: # %bb.0:
; VLNOBW-NEXT: kmovw %edi, %k1
; VLNOBW-NEXT: vcvtdq2pd %ymm1, %zmm0 {%k1}
; VLNOBW-NEXT: ret{{[l|q]}}
@@ -1740,18 +1740,18 @@ define <8 x double> @i32to8f64_mask(<8 x double> %a, <8 x i32> %b, i8 %c) nounwi
}
define <8 x double> @sito8f64_maskz(<8 x i32> %a, i8 %b) nounwind {
; GENERIC-LABEL: sito8f64_maskz:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: kmovd %edi, %k1
; GENERIC-NEXT: vcvtdq2pd %ymm0, %zmm0 {%k1} {z} # sched: [4:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sito8f64_maskz:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovd %edi, %k1 # sched: [1:1.00]
; SKX-NEXT: vcvtdq2pd %ymm0, %zmm0 {%k1} {z} # sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
; VLNOBW-LABEL: sito8f64_maskz:
-; VLNOBW: # BB#0:
+; VLNOBW: # %bb.0:
; VLNOBW-NEXT: kmovw %edi, %k1
; VLNOBW-NEXT: vcvtdq2pd %ymm0, %zmm0 {%k1} {z}
; VLNOBW-NEXT: ret{{[l|q]}}
@@ -1763,12 +1763,12 @@ define <8 x double> @sito8f64_maskz(<8 x i32> %a, i8 %b) nounwind {
define <8 x i32> @f64to8si(<8 x double> %a) {
; GENERIC-LABEL: f64to8si:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvttpd2dq %zmm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: f64to8si:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvttpd2dq %zmm0, %ymm0 # sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%b = fptosi <8 x double> %a to <8 x i32>
@@ -1777,13 +1777,13 @@ define <8 x i32> @f64to8si(<8 x double> %a) {
define <4 x i32> @f64to4si(<4 x double> %a) {
; GENERIC-LABEL: f64to4si:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvttpd2dq %ymm0, %xmm0 # sched: [4:1.00]
; GENERIC-NEXT: vzeroupper
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: f64to4si:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvttpd2dq %ymm0, %xmm0 # sched: [7:1.00]
; SKX-NEXT: vzeroupper # sched: [4:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -1793,14 +1793,14 @@ define <4 x i32> @f64to4si(<4 x double> %a) {
define <16 x float> @f64to16f32(<16 x double> %b) nounwind {
; GENERIC-LABEL: f64to16f32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvtpd2ps %zmm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vcvtpd2ps %zmm1, %ymm1 # sched: [3:1.00]
; GENERIC-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: f64to16f32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtpd2ps %zmm0, %ymm0 # sched: [7:1.00]
; SKX-NEXT: vcvtpd2ps %zmm1, %ymm1 # sched: [7:1.00]
; SKX-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 # sched: [3:1.00]
@@ -1811,13 +1811,13 @@ define <16 x float> @f64to16f32(<16 x double> %b) nounwind {
define <4 x float> @f64to4f32(<4 x double> %b) {
; GENERIC-LABEL: f64to4f32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvtpd2ps %ymm0, %xmm0 # sched: [4:1.00]
; GENERIC-NEXT: vzeroupper
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: f64to4f32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtpd2ps %ymm0, %xmm0 # sched: [7:1.00]
; SKX-NEXT: vzeroupper # sched: [4:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -1827,7 +1827,7 @@ define <4 x float> @f64to4f32(<4 x double> %b) {
define <4 x float> @f64to4f32_mask(<4 x double> %b, <4 x i1> %mask) {
; GENERIC-LABEL: f64to4f32_mask:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpslld $31, %xmm1, %xmm1 # sched: [1:1.00]
; GENERIC-NEXT: vptestmd %xmm1, %xmm1, %k1 # sched: [1:1.00]
; GENERIC-NEXT: vcvtpd2ps %ymm0, %xmm0 {%k1} {z} # sched: [3:1.00]
@@ -1835,7 +1835,7 @@ define <4 x float> @f64to4f32_mask(<4 x double> %b, <4 x i1> %mask) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: f64to4f32_mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm1, %xmm1 # sched: [1:0.50]
; SKX-NEXT: vptestmd %xmm1, %xmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vcvtpd2ps %ymm0, %xmm0 {%k1} {z} # sched: [7:1.00]
@@ -1848,12 +1848,12 @@ define <4 x float> @f64to4f32_mask(<4 x double> %b, <4 x i1> %mask) {
define <4 x float> @f64tof32_inreg(<2 x double> %a0, <4 x float> %a1) nounwind {
; GENERIC-LABEL: f64tof32_inreg:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvtsd2ss %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: f64tof32_inreg:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtsd2ss %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%ext = extractelement <2 x double> %a0, i32 0
@@ -1864,12 +1864,12 @@ define <4 x float> @f64tof32_inreg(<2 x double> %a0, <4 x float> %a1) nounwind {
define <8 x double> @f32to8f64(<8 x float> %b) nounwind {
; GENERIC-LABEL: f32to8f64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvtps2pd %ymm0, %zmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: f32to8f64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtps2pd %ymm0, %zmm0 # sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%a = fpext <8 x float> %b to <8 x double>
@@ -1878,13 +1878,13 @@ define <8 x double> @f32to8f64(<8 x float> %b) nounwind {
define <4 x double> @f32to4f64_mask(<4 x float> %b, <4 x double> %b1, <4 x double> %a1) {
; GENERIC-LABEL: f32to4f64_mask:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcmpltpd %ymm2, %ymm1, %k1 # sched: [3:1.00]
; GENERIC-NEXT: vcvtps2pd %xmm0, %ymm0 {%k1} {z} # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: f32to4f64_mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcmpltpd %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vcvtps2pd %xmm0, %ymm0 {%k1} {z} # sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -1896,12 +1896,12 @@ define <4 x double> @f32to4f64_mask(<4 x float> %b, <4 x double> %b1, <4 x doubl
define <2 x double> @f32tof64_inreg(<2 x double> %a0, <4 x float> %a1) nounwind {
; GENERIC-LABEL: f32tof64_inreg:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvtss2sd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: f32tof64_inreg:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtss2sd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%ext = extractelement <4 x float> %a1, i32 0
@@ -1912,12 +1912,12 @@ define <2 x double> @f32tof64_inreg(<2 x double> %a0, <4 x float> %a1) nounwind
define double @sltof64_load(i64* nocapture %e) {
; GENERIC-LABEL: sltof64_load:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: vcvtsi2sdq (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sltof64_load:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vcvtsi2sdq (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
entry:
@@ -1928,12 +1928,12 @@ entry:
define double @sitof64_load(i32* %e) {
; GENERIC-LABEL: sitof64_load:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: vcvtsi2sdl (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sitof64_load:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vcvtsi2sdl (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
entry:
@@ -1944,12 +1944,12 @@ entry:
define float @sitof32_load(i32* %e) {
; GENERIC-LABEL: sitof32_load:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: vcvtsi2ssl (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sitof32_load:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vcvtsi2ssl (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
entry:
@@ -1960,12 +1960,12 @@ entry:
define float @sltof32_load(i64* %e) {
; GENERIC-LABEL: sltof32_load:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: vcvtsi2ssq (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sltof32_load:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vcvtsi2ssq (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
entry:
@@ -1976,14 +1976,14 @@ entry:
define void @f32tof64_loadstore() {
; GENERIC-LABEL: f32tof64_loadstore:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [6:0.50]
; GENERIC-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vmovsd %xmm0, -{{[0-9]+}}(%rsp) # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: f32tof64_loadstore:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [5:0.50]
; SKX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 # sched: [5:1.00]
; SKX-NEXT: vmovsd %xmm0, -{{[0-9]+}}(%rsp) # sched: [1:1.00]
@@ -1999,14 +1999,14 @@ entry:
define void @f64tof32_loadstore() nounwind uwtable {
; GENERIC-LABEL: f64tof32_loadstore:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero sched: [6:0.50]
; GENERIC-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0 # sched: [4:1.00]
; GENERIC-NEXT: vmovss %xmm0, -{{[0-9]+}}(%rsp) # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: f64tof32_loadstore:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero sched: [5:0.50]
; SKX-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0 # sched: [5:1.00]
; SKX-NEXT: vmovss %xmm0, -{{[0-9]+}}(%rsp) # sched: [1:1.00]
@@ -2022,12 +2022,12 @@ entry:
define double @long_to_double(i64 %x) {
; GENERIC-LABEL: long_to_double:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovq %rdi, %xmm0 # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: long_to_double:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovq %rdi, %xmm0 # sched: [1:0.25]
; SKX-NEXT: retq # sched: [7:1.00]
%res = bitcast i64 %x to double
@@ -2036,12 +2036,12 @@ define double @long_to_double(i64 %x) {
define i64 @double_to_long(double %x) {
; GENERIC-LABEL: double_to_long:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovq %xmm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: double_to_long:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovq %xmm0, %rax # sched: [1:0.25]
; SKX-NEXT: retq # sched: [7:1.00]
%res = bitcast double %x to i64
@@ -2050,12 +2050,12 @@ define i64 @double_to_long(double %x) {
define float @int_to_float(i32 %x) {
; GENERIC-LABEL: int_to_float:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovd %edi, %xmm0 # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: int_to_float:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovd %edi, %xmm0 # sched: [1:0.25]
; SKX-NEXT: retq # sched: [7:1.00]
%res = bitcast i32 %x to float
@@ -2064,12 +2064,12 @@ define float @int_to_float(i32 %x) {
define i32 @float_to_int(float %x) {
; GENERIC-LABEL: float_to_int:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovd %xmm0, %eax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: float_to_int:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovd %xmm0, %eax # sched: [1:0.25]
; SKX-NEXT: retq # sched: [7:1.00]
%res = bitcast float %x to i32
@@ -2078,7 +2078,7 @@ define i32 @float_to_int(float %x) {
define <16 x double> @uito16f64(<16 x i32> %a) nounwind {
; GENERIC-LABEL: uito16f64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvtudq2pd %ymm0, %zmm2 # sched: [4:1.00]
; GENERIC-NEXT: vextractf64x4 $1, %zmm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vcvtudq2pd %ymm0, %zmm1 # sched: [4:1.00]
@@ -2086,7 +2086,7 @@ define <16 x double> @uito16f64(<16 x i32> %a) nounwind {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: uito16f64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtudq2pd %ymm0, %zmm2 # sched: [7:1.00]
; SKX-NEXT: vextractf64x4 $1, %zmm0, %ymm0 # sched: [3:1.00]
; SKX-NEXT: vcvtudq2pd %ymm0, %zmm1 # sched: [7:1.00]
@@ -2098,12 +2098,12 @@ define <16 x double> @uito16f64(<16 x i32> %a) nounwind {
define <8 x float> @slto8f32(<8 x i64> %a) {
; GENERIC-LABEL: slto8f32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvtqq2ps %zmm0, %ymm0 # sched: [4:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: slto8f32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtqq2ps %zmm0, %ymm0 # sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%b = sitofp <8 x i64> %a to <8 x float>
@@ -2112,14 +2112,14 @@ define <8 x float> @slto8f32(<8 x i64> %a) {
define <16 x float> @slto16f32(<16 x i64> %a) {
; GENERIC-LABEL: slto16f32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvtqq2ps %zmm0, %ymm0 # sched: [4:1.00]
; GENERIC-NEXT: vcvtqq2ps %zmm1, %ymm1 # sched: [4:1.00]
; GENERIC-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: slto16f32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtqq2ps %zmm0, %ymm0 # sched: [7:1.00]
; SKX-NEXT: vcvtqq2ps %zmm1, %ymm1 # sched: [7:1.00]
; SKX-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 # sched: [3:1.00]
@@ -2130,12 +2130,12 @@ define <16 x float> @slto16f32(<16 x i64> %a) {
define <8 x double> @slto8f64(<8 x i64> %a) {
; GENERIC-LABEL: slto8f64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvtqq2pd %zmm0, %zmm0 # sched: [4:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: slto8f64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtqq2pd %zmm0, %zmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
%b = sitofp <8 x i64> %a to <8 x double>
@@ -2144,13 +2144,13 @@ define <8 x double> @slto8f64(<8 x i64> %a) {
define <16 x double> @slto16f64(<16 x i64> %a) {
; GENERIC-LABEL: slto16f64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvtqq2pd %zmm0, %zmm0 # sched: [4:1.00]
; GENERIC-NEXT: vcvtqq2pd %zmm1, %zmm1 # sched: [4:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: slto16f64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtqq2pd %zmm0, %zmm0 # sched: [4:0.33]
; SKX-NEXT: vcvtqq2pd %zmm1, %zmm1 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -2160,12 +2160,12 @@ define <16 x double> @slto16f64(<16 x i64> %a) {
define <8 x float> @ulto8f32(<8 x i64> %a) {
; GENERIC-LABEL: ulto8f32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvtuqq2ps %zmm0, %ymm0 # sched: [4:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: ulto8f32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtuqq2ps %zmm0, %ymm0 # sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%b = uitofp <8 x i64> %a to <8 x float>
@@ -2174,14 +2174,14 @@ define <8 x float> @ulto8f32(<8 x i64> %a) {
define <16 x float> @ulto16f32(<16 x i64> %a) {
; GENERIC-LABEL: ulto16f32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvtuqq2ps %zmm0, %ymm0 # sched: [4:1.00]
; GENERIC-NEXT: vcvtuqq2ps %zmm1, %ymm1 # sched: [4:1.00]
; GENERIC-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: ulto16f32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtuqq2ps %zmm0, %ymm0 # sched: [7:1.00]
; SKX-NEXT: vcvtuqq2ps %zmm1, %ymm1 # sched: [7:1.00]
; SKX-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 # sched: [3:1.00]
@@ -2192,18 +2192,18 @@ define <16 x float> @ulto16f32(<16 x i64> %a) {
define <8 x double> @uito8f64_mask(<8 x double> %a, <8 x i32> %b, i8 %c) nounwind {
; GENERIC-LABEL: uito8f64_mask:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: kmovd %edi, %k1
; GENERIC-NEXT: vcvtudq2pd %ymm1, %zmm0 {%k1} # sched: [4:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: uito8f64_mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovd %edi, %k1 # sched: [1:1.00]
; SKX-NEXT: vcvtudq2pd %ymm1, %zmm0 {%k1} # sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
; VLNOBW-LABEL: uito8f64_mask:
-; VLNOBW: # BB#0:
+; VLNOBW: # %bb.0:
; VLNOBW-NEXT: kmovw %edi, %k1
; VLNOBW-NEXT: vcvtudq2pd %ymm1, %zmm0 {%k1}
; VLNOBW-NEXT: ret{{[l|q]}}
@@ -2214,13 +2214,13 @@ define <8 x double> @uito8f64_mask(<8 x double> %a, <8 x i32> %b, i8 %c) nounwin
}
define <8 x double> @uito8f64_maskz(<8 x i32> %a, i8 %b) nounwind {
; GENERIC-LABEL: uito8f64_maskz:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: kmovd %edi, %k1
; GENERIC-NEXT: vcvtudq2pd %ymm0, %zmm0 {%k1} {z} # sched: [4:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: uito8f64_maskz:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovd %edi, %k1 # sched: [1:1.00]
; SKX-NEXT: vcvtudq2pd %ymm0, %zmm0 {%k1} {z} # sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -2232,12 +2232,12 @@ define <8 x double> @uito8f64_maskz(<8 x i32> %a, i8 %b) nounwind {
define <4 x double> @uito4f64(<4 x i32> %a) nounwind {
; GENERIC-LABEL: uito4f64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvtudq2pd %xmm0, %ymm0 # sched: [4:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: uito4f64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtudq2pd %xmm0, %ymm0 # sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%b = uitofp <4 x i32> %a to <4 x double>
@@ -2246,12 +2246,12 @@ define <4 x double> @uito4f64(<4 x i32> %a) nounwind {
define <16 x float> @uito16f32(<16 x i32> %a) nounwind {
; GENERIC-LABEL: uito16f32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvtudq2ps %zmm0, %zmm0 # sched: [4:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: uito16f32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtudq2ps %zmm0, %zmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
%b = uitofp <16 x i32> %a to <16 x float>
@@ -2260,12 +2260,12 @@ define <16 x float> @uito16f32(<16 x i32> %a) nounwind {
define <8 x double> @uito8f64(<8 x i32> %a) {
; GENERIC-LABEL: uito8f64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvtudq2pd %ymm0, %zmm0 # sched: [4:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: uito8f64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtudq2pd %ymm0, %zmm0 # sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%b = uitofp <8 x i32> %a to <8 x double>
@@ -2274,12 +2274,12 @@ define <8 x double> @uito8f64(<8 x i32> %a) {
define <8 x float> @uito8f32(<8 x i32> %a) nounwind {
; GENERIC-LABEL: uito8f32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvtudq2ps %ymm0, %ymm0 # sched: [4:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: uito8f32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtudq2ps %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
%b = uitofp <8 x i32> %a to <8 x float>
@@ -2288,12 +2288,12 @@ define <8 x float> @uito8f32(<8 x i32> %a) nounwind {
define <4 x float> @uito4f32(<4 x i32> %a) nounwind {
; GENERIC-LABEL: uito4f32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvtudq2ps %xmm0, %xmm0 # sched: [4:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: uito4f32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtudq2ps %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
%b = uitofp <4 x i32> %a to <4 x float>
@@ -2302,12 +2302,12 @@ define <4 x float> @uito4f32(<4 x i32> %a) nounwind {
define i32 @fptosi(float %a) nounwind {
; GENERIC-LABEL: fptosi:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvttss2si %xmm0, %eax # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: fptosi:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvttss2si %xmm0, %eax # sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%b = fptosi float %a to i32
@@ -2316,12 +2316,12 @@ define i32 @fptosi(float %a) nounwind {
define i32 @fptoui(float %a) nounwind {
; GENERIC-LABEL: fptoui:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvttss2usi %xmm0, %eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: fptoui:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvttss2usi %xmm0, %eax
; SKX-NEXT: retq # sched: [7:1.00]
%b = fptoui float %a to i32
@@ -2330,12 +2330,12 @@ define i32 @fptoui(float %a) nounwind {
define float @uitof32(i32 %a) nounwind {
; GENERIC-LABEL: uitof32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvtusi2ssl %edi, %xmm0, %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: uitof32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtusi2ssl %edi, %xmm0, %xmm0 # sched: [5:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%b = uitofp i32 %a to float
@@ -2344,12 +2344,12 @@ define float @uitof32(i32 %a) nounwind {
define double @uitof64(i32 %a) nounwind {
; GENERIC-LABEL: uitof64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvtusi2sdl %edi, %xmm0, %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: uitof64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtusi2sdl %edi, %xmm0, %xmm0 # sched: [5:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%b = uitofp i32 %a to double
@@ -2358,7 +2358,7 @@ define double @uitof64(i32 %a) nounwind {
define <16 x float> @sbto16f32(<16 x i32> %a) {
; GENERIC-LABEL: sbto16f32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpgtd %zmm0, %zmm1, %k0
; GENERIC-NEXT: vpmovm2d %k0, %zmm0
@@ -2366,7 +2366,7 @@ define <16 x float> @sbto16f32(<16 x i32> %a) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sbto16f32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpgtd %zmm0, %zmm1, %k0 # sched: [3:1.00]
; SKX-NEXT: vpmovm2d %k0, %zmm0
@@ -2379,13 +2379,13 @@ define <16 x float> @sbto16f32(<16 x i32> %a) {
define <16 x float> @scto16f32(<16 x i8> %a) {
; GENERIC-LABEL: scto16f32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovsxbd %xmm0, %zmm0
; GENERIC-NEXT: vcvtdq2ps %zmm0, %zmm0 # sched: [4:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: scto16f32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxbd %xmm0, %zmm0 # sched: [3:1.00]
; SKX-NEXT: vcvtdq2ps %zmm0, %zmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -2395,13 +2395,13 @@ define <16 x float> @scto16f32(<16 x i8> %a) {
define <16 x float> @ssto16f32(<16 x i16> %a) {
; GENERIC-LABEL: ssto16f32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovsxwd %ymm0, %zmm0
; GENERIC-NEXT: vcvtdq2ps %zmm0, %zmm0 # sched: [4:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: ssto16f32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxwd %ymm0, %zmm0 # sched: [3:1.00]
; SKX-NEXT: vcvtdq2ps %zmm0, %zmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -2411,13 +2411,13 @@ define <16 x float> @ssto16f32(<16 x i16> %a) {
define <8 x double> @ssto16f64(<8 x i16> %a) {
; GENERIC-LABEL: ssto16f64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovsxwd %xmm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vcvtdq2pd %ymm0, %zmm0 # sched: [4:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: ssto16f64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxwd %xmm0, %ymm0 # sched: [3:1.00]
; SKX-NEXT: vcvtdq2pd %ymm0, %zmm0 # sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -2427,7 +2427,7 @@ define <8 x double> @ssto16f64(<8 x i16> %a) {
define <8 x double> @scto8f64(<8 x i8> %a) {
; GENERIC-LABEL: scto8f64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [1:1.00]
; GENERIC-NEXT: vpslld $24, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpsrad $24, %ymm0, %ymm0 # sched: [1:1.00]
@@ -2435,7 +2435,7 @@ define <8 x double> @scto8f64(<8 x i8> %a) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: scto8f64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [3:1.00]
; SKX-NEXT: vpslld $24, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpsrad $24, %ymm0, %ymm0 # sched: [1:0.50]
@@ -2447,7 +2447,7 @@ define <8 x double> @scto8f64(<8 x i8> %a) {
define <16 x double> @scto16f64(<16 x i8> %a) {
; GENERIC-LABEL: scto16f64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovsxbd %xmm0, %zmm1
; GENERIC-NEXT: vcvtdq2pd %ymm1, %zmm0 # sched: [4:1.00]
; GENERIC-NEXT: vextracti64x4 $1, %zmm1, %ymm1 # sched: [1:1.00]
@@ -2455,7 +2455,7 @@ define <16 x double> @scto16f64(<16 x i8> %a) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: scto16f64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxbd %xmm0, %zmm1 # sched: [3:1.00]
; SKX-NEXT: vcvtdq2pd %ymm1, %zmm0 # sched: [7:1.00]
; SKX-NEXT: vextracti64x4 $1, %zmm1, %ymm1 # sched: [3:1.00]
@@ -2467,7 +2467,7 @@ define <16 x double> @scto16f64(<16 x i8> %a) {
define <16 x double> @sbto16f64(<16 x double> %a) {
; GENERIC-LABEL: sbto16f64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vxorpd %xmm2, %xmm2, %xmm2 # sched: [1:1.00]
; GENERIC-NEXT: vcmpltpd %zmm1, %zmm2, %k0 # sched: [3:1.00]
; GENERIC-NEXT: vcmpltpd %zmm0, %zmm2, %k1 # sched: [3:1.00]
@@ -2478,7 +2478,7 @@ define <16 x double> @sbto16f64(<16 x double> %a) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sbto16f64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vxorpd %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vcmpltpd %zmm1, %zmm2, %k0 # sched: [3:1.00]
; SKX-NEXT: vcmpltpd %zmm0, %zmm2, %k1 # sched: [3:1.00]
@@ -2494,7 +2494,7 @@ define <16 x double> @sbto16f64(<16 x double> %a) {
define <8 x double> @sbto8f64(<8 x double> %a) {
; GENERIC-LABEL: sbto8f64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vxorpd %xmm1, %xmm1, %xmm1 # sched: [1:1.00]
; GENERIC-NEXT: vcmpltpd %zmm0, %zmm1, %k0 # sched: [3:1.00]
; GENERIC-NEXT: vpmovm2d %k0, %ymm0
@@ -2502,7 +2502,7 @@ define <8 x double> @sbto8f64(<8 x double> %a) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sbto8f64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vxorpd %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vcmpltpd %zmm0, %zmm1, %k0 # sched: [3:1.00]
; SKX-NEXT: vpmovm2d %k0, %ymm0
@@ -2515,7 +2515,7 @@ define <8 x double> @sbto8f64(<8 x double> %a) {
define <8 x float> @sbto8f32(<8 x float> %a) {
; GENERIC-LABEL: sbto8f32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vxorps %xmm1, %xmm1, %xmm1 # sched: [1:1.00]
; GENERIC-NEXT: vcmpltps %ymm0, %ymm1, %k0 # sched: [3:1.00]
; GENERIC-NEXT: vpmovm2d %k0, %ymm0
@@ -2523,7 +2523,7 @@ define <8 x float> @sbto8f32(<8 x float> %a) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sbto8f32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vxorps %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vcmpltps %ymm0, %ymm1, %k0 # sched: [3:1.00]
; SKX-NEXT: vpmovm2d %k0, %ymm0
@@ -2536,7 +2536,7 @@ define <8 x float> @sbto8f32(<8 x float> %a) {
define <4 x float> @sbto4f32(<4 x float> %a) {
; GENERIC-LABEL: sbto4f32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vxorps %xmm1, %xmm1, %xmm1 # sched: [1:1.00]
; GENERIC-NEXT: vcmpltps %xmm0, %xmm1, %k0 # sched: [3:1.00]
; GENERIC-NEXT: vpmovm2d %k0, %xmm0
@@ -2544,7 +2544,7 @@ define <4 x float> @sbto4f32(<4 x float> %a) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sbto4f32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vxorps %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vcmpltps %xmm0, %xmm1, %k0 # sched: [3:1.00]
; SKX-NEXT: vpmovm2d %k0, %xmm0
@@ -2557,7 +2557,7 @@ define <4 x float> @sbto4f32(<4 x float> %a) {
define <4 x double> @sbto4f64(<4 x double> %a) {
; GENERIC-LABEL: sbto4f64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vxorpd %xmm1, %xmm1, %xmm1 # sched: [1:1.00]
; GENERIC-NEXT: vcmpltpd %ymm0, %ymm1, %k0 # sched: [3:1.00]
; GENERIC-NEXT: vpmovm2d %k0, %xmm0
@@ -2565,7 +2565,7 @@ define <4 x double> @sbto4f64(<4 x double> %a) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sbto4f64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vxorpd %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vcmpltpd %ymm0, %ymm1, %k0 # sched: [3:1.00]
; SKX-NEXT: vpmovm2d %k0, %xmm0
@@ -2578,7 +2578,7 @@ define <4 x double> @sbto4f64(<4 x double> %a) {
define <2 x float> @sbto2f32(<2 x float> %a) {
; GENERIC-LABEL: sbto2f32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vxorps %xmm1, %xmm1, %xmm1 # sched: [1:1.00]
; GENERIC-NEXT: vcmpltps %xmm0, %xmm1, %k0 # sched: [3:1.00]
; GENERIC-NEXT: vpmovm2d %k0, %xmm0
@@ -2586,7 +2586,7 @@ define <2 x float> @sbto2f32(<2 x float> %a) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sbto2f32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vxorps %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vcmpltps %xmm0, %xmm1, %k0 # sched: [3:1.00]
; SKX-NEXT: vpmovm2d %k0, %xmm0
@@ -2599,7 +2599,7 @@ define <2 x float> @sbto2f32(<2 x float> %a) {
define <2 x double> @sbto2f64(<2 x double> %a) {
; GENERIC-LABEL: sbto2f64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vxorpd %xmm1, %xmm1, %xmm1 # sched: [1:1.00]
; GENERIC-NEXT: vcmpltpd %xmm0, %xmm1, %k0 # sched: [3:1.00]
; GENERIC-NEXT: vpmovm2q %k0, %xmm0
@@ -2607,7 +2607,7 @@ define <2 x double> @sbto2f64(<2 x double> %a) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sbto2f64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vxorpd %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vcmpltpd %xmm0, %xmm1, %k0 # sched: [3:1.00]
; SKX-NEXT: vpmovm2q %k0, %xmm0
@@ -2620,13 +2620,13 @@ define <2 x double> @sbto2f64(<2 x double> %a) {
define <16 x float> @ucto16f32(<16 x i8> %a) {
; GENERIC-LABEL: ucto16f32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; GENERIC-NEXT: vcvtdq2ps %zmm0, %zmm0 # sched: [4:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: ucto16f32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero sched: [3:1.00]
; SKX-NEXT: vcvtdq2ps %zmm0, %zmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -2636,14 +2636,14 @@ define <16 x float> @ucto16f32(<16 x i8> %a) {
define <8 x double> @ucto8f64(<8 x i8> %a) {
; GENERIC-LABEL: ucto8f64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [1:1.00]
; GENERIC-NEXT: vcvtdq2pd %ymm0, %zmm0 # sched: [4:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: ucto8f64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [3:1.00]
; SKX-NEXT: vcvtdq2pd %ymm0, %zmm0 # sched: [7:1.00]
@@ -2654,13 +2654,13 @@ define <8 x double> @ucto8f64(<8 x i8> %a) {
define <16 x float> @swto16f32(<16 x i16> %a) {
; GENERIC-LABEL: swto16f32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovsxwd %ymm0, %zmm0
; GENERIC-NEXT: vcvtdq2ps %zmm0, %zmm0 # sched: [4:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: swto16f32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxwd %ymm0, %zmm0 # sched: [3:1.00]
; SKX-NEXT: vcvtdq2ps %zmm0, %zmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -2670,13 +2670,13 @@ define <16 x float> @swto16f32(<16 x i16> %a) {
define <8 x double> @swto8f64(<8 x i16> %a) {
; GENERIC-LABEL: swto8f64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovsxwd %xmm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vcvtdq2pd %ymm0, %zmm0 # sched: [4:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: swto8f64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxwd %xmm0, %ymm0 # sched: [3:1.00]
; SKX-NEXT: vcvtdq2pd %ymm0, %zmm0 # sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -2686,7 +2686,7 @@ define <8 x double> @swto8f64(<8 x i16> %a) {
define <16 x double> @swto16f64(<16 x i16> %a) {
; GENERIC-LABEL: swto16f64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovsxwd %ymm0, %zmm1
; GENERIC-NEXT: vcvtdq2pd %ymm1, %zmm0 # sched: [4:1.00]
; GENERIC-NEXT: vextracti64x4 $1, %zmm1, %ymm1 # sched: [1:1.00]
@@ -2694,7 +2694,7 @@ define <16 x double> @swto16f64(<16 x i16> %a) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: swto16f64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxwd %ymm0, %zmm1 # sched: [3:1.00]
; SKX-NEXT: vcvtdq2pd %ymm1, %zmm0 # sched: [7:1.00]
; SKX-NEXT: vextracti64x4 $1, %zmm1, %ymm1 # sched: [3:1.00]
@@ -2706,7 +2706,7 @@ define <16 x double> @swto16f64(<16 x i16> %a) {
define <16 x double> @ucto16f64(<16 x i8> %a) {
; GENERIC-LABEL: ucto16f64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; GENERIC-NEXT: vcvtdq2pd %ymm1, %zmm0 # sched: [4:1.00]
; GENERIC-NEXT: vextracti64x4 $1, %zmm1, %ymm1 # sched: [1:1.00]
@@ -2714,7 +2714,7 @@ define <16 x double> @ucto16f64(<16 x i8> %a) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: ucto16f64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero sched: [3:1.00]
; SKX-NEXT: vcvtdq2pd %ymm1, %zmm0 # sched: [7:1.00]
; SKX-NEXT: vextracti64x4 $1, %zmm1, %ymm1 # sched: [3:1.00]
@@ -2726,13 +2726,13 @@ define <16 x double> @ucto16f64(<16 x i8> %a) {
define <16 x float> @uwto16f32(<16 x i16> %a) {
; GENERIC-LABEL: uwto16f32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; GENERIC-NEXT: vcvtdq2ps %zmm0, %zmm0 # sched: [4:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: uwto16f32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero sched: [3:1.00]
; SKX-NEXT: vcvtdq2ps %zmm0, %zmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -2742,13 +2742,13 @@ define <16 x float> @uwto16f32(<16 x i16> %a) {
define <8 x double> @uwto8f64(<8 x i16> %a) {
; GENERIC-LABEL: uwto8f64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [1:1.00]
; GENERIC-NEXT: vcvtdq2pd %ymm0, %zmm0 # sched: [4:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: uwto8f64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [3:1.00]
; SKX-NEXT: vcvtdq2pd %ymm0, %zmm0 # sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -2758,7 +2758,7 @@ define <8 x double> @uwto8f64(<8 x i16> %a) {
define <16 x double> @uwto16f64(<16 x i16> %a) {
; GENERIC-LABEL: uwto16f64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; GENERIC-NEXT: vcvtdq2pd %ymm1, %zmm0 # sched: [4:1.00]
; GENERIC-NEXT: vextracti64x4 $1, %zmm1, %ymm1 # sched: [1:1.00]
@@ -2766,7 +2766,7 @@ define <16 x double> @uwto16f64(<16 x i16> %a) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: uwto16f64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero sched: [3:1.00]
; SKX-NEXT: vcvtdq2pd %ymm1, %zmm0 # sched: [7:1.00]
; SKX-NEXT: vextracti64x4 $1, %zmm1, %ymm1 # sched: [3:1.00]
@@ -2778,12 +2778,12 @@ define <16 x double> @uwto16f64(<16 x i16> %a) {
define <16 x float> @sito16f32(<16 x i32> %a) {
; GENERIC-LABEL: sito16f32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvtdq2ps %zmm0, %zmm0 # sched: [4:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sito16f32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtdq2ps %zmm0, %zmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
%b = sitofp <16 x i32> %a to <16 x float>
@@ -2792,7 +2792,7 @@ define <16 x float> @sito16f32(<16 x i32> %a) {
define <16 x double> @sito16f64(<16 x i32> %a) {
; GENERIC-LABEL: sito16f64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvtdq2pd %ymm0, %zmm2 # sched: [4:1.00]
; GENERIC-NEXT: vextractf64x4 $1, %zmm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vcvtdq2pd %ymm0, %zmm1 # sched: [4:1.00]
@@ -2800,7 +2800,7 @@ define <16 x double> @sito16f64(<16 x i32> %a) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sito16f64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtdq2pd %ymm0, %zmm2 # sched: [7:1.00]
; SKX-NEXT: vextractf64x4 $1, %zmm0, %ymm0 # sched: [3:1.00]
; SKX-NEXT: vcvtdq2pd %ymm0, %zmm1 # sched: [7:1.00]
@@ -2812,13 +2812,13 @@ define <16 x double> @sito16f64(<16 x i32> %a) {
define <16 x float> @usto16f32(<16 x i16> %a) {
; GENERIC-LABEL: usto16f32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; GENERIC-NEXT: vcvtdq2ps %zmm0, %zmm0 # sched: [4:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: usto16f32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero sched: [3:1.00]
; SKX-NEXT: vcvtdq2ps %zmm0, %zmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -2828,7 +2828,7 @@ define <16 x float> @usto16f32(<16 x i16> %a) {
define <16 x float> @ubto16f32(<16 x i32> %a) {
; GENERIC-LABEL: ubto16f32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpgtd %zmm0, %zmm1, %k1
; GENERIC-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
@@ -2836,7 +2836,7 @@ define <16 x float> @ubto16f32(<16 x i32> %a) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: ubto16f32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpgtd %zmm0, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z} # sched: [8:0.50]
@@ -2849,7 +2849,7 @@ define <16 x float> @ubto16f32(<16 x i32> %a) {
define <16 x double> @ubto16f64(<16 x i32> %a) {
; GENERIC-LABEL: ubto16f64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpgtd %zmm0, %zmm1, %k1
; GENERIC-NEXT: movl {{.*}}(%rip), %eax # sched: [5:0.50]
@@ -2861,7 +2861,7 @@ define <16 x double> @ubto16f64(<16 x i32> %a) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: ubto16f64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpgtd %zmm0, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: movl {{.*}}(%rip), %eax # sched: [5:0.50]
@@ -2878,7 +2878,7 @@ define <16 x double> @ubto16f64(<16 x i32> %a) {
define <8 x float> @ubto8f32(<8 x i32> %a) {
; GENERIC-LABEL: ubto8f32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpgtd %ymm0, %ymm1, %k1
; GENERIC-NEXT: vpbroadcastd {{.*}}(%rip), %ymm0 {%k1} {z}
@@ -2886,7 +2886,7 @@ define <8 x float> @ubto8f32(<8 x i32> %a) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: ubto8f32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpgtd %ymm0, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpbroadcastd {{.*}}(%rip), %ymm0 {%k1} {z} # sched: [8:0.50]
@@ -2899,7 +2899,7 @@ define <8 x float> @ubto8f32(<8 x i32> %a) {
define <8 x double> @ubto8f64(<8 x i32> %a) {
; GENERIC-LABEL: ubto8f64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpgtd %ymm0, %ymm1, %k1
; GENERIC-NEXT: vpbroadcastd {{.*}}(%rip), %ymm0 {%k1} {z}
@@ -2907,7 +2907,7 @@ define <8 x double> @ubto8f64(<8 x i32> %a) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: ubto8f64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpgtd %ymm0, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpbroadcastd {{.*}}(%rip), %ymm0 {%k1} {z} # sched: [8:0.50]
@@ -2920,7 +2920,7 @@ define <8 x double> @ubto8f64(<8 x i32> %a) {
define <4 x float> @ubto4f32(<4 x i32> %a) {
; GENERIC-LABEL: ubto4f32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpgtd %xmm0, %xmm1, %k1
; GENERIC-NEXT: vpbroadcastd {{.*}}(%rip), %xmm0 {%k1} {z}
@@ -2928,7 +2928,7 @@ define <4 x float> @ubto4f32(<4 x i32> %a) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: ubto4f32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpgtd %xmm0, %xmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm0 {%k1} {z} # sched: [7:0.50]
@@ -2941,7 +2941,7 @@ define <4 x float> @ubto4f32(<4 x i32> %a) {
define <4 x double> @ubto4f64(<4 x i32> %a) {
; GENERIC-LABEL: ubto4f64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpgtd %xmm0, %xmm1, %k1
; GENERIC-NEXT: vpbroadcastd {{.*}}(%rip), %xmm0 {%k1} {z}
@@ -2949,7 +2949,7 @@ define <4 x double> @ubto4f64(<4 x i32> %a) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: ubto4f64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpgtd %xmm0, %xmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm0 {%k1} {z} # sched: [7:0.50]
@@ -2962,7 +2962,7 @@ define <4 x double> @ubto4f64(<4 x i32> %a) {
define <2 x float> @ubto2f32(<2 x i32> %a) {
; GENERIC-LABEL: ubto2f32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] sched: [1:0.50]
; GENERIC-NEXT: vpcmpltuq %xmm1, %xmm0, %k1
@@ -2971,7 +2971,7 @@ define <2 x float> @ubto2f32(<2 x i32> %a) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: ubto2f32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] sched: [1:0.33]
; SKX-NEXT: vpcmpltuq %xmm1, %xmm0, %k1 # sched: [3:1.00]
@@ -2985,7 +2985,7 @@ define <2 x float> @ubto2f32(<2 x i32> %a) {
define <2 x double> @ubto2f64(<2 x i32> %a) {
; GENERIC-LABEL: ubto2f64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] sched: [1:0.50]
; GENERIC-NEXT: vpcmpltuq %xmm1, %xmm0, %k1
@@ -2994,7 +2994,7 @@ define <2 x double> @ubto2f64(<2 x i32> %a) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: ubto2f64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] sched: [1:0.33]
; SKX-NEXT: vpcmpltuq %xmm1, %xmm0, %k1 # sched: [3:1.00]
@@ -3008,14 +3008,14 @@ define <2 x double> @ubto2f64(<2 x i32> %a) {
define <8 x i16> @zext_8x8mem_to_8x16(<8 x i8> *%i , <8 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: zext_8x8mem_to_8x16:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $15, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovw2m %xmm0, %k1
; GENERIC-NEXT: vpmovzxbw {{.*#+}} xmm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_8x8mem_to_8x16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpmovw2m %xmm0, %k1 # sched: [1:1.00]
; SKX-NEXT: vpmovzxbw {{.*#+}} xmm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero sched: [9:1.00]
@@ -3028,14 +3028,14 @@ define <8 x i16> @zext_8x8mem_to_8x16(<8 x i8> *%i , <8 x i1> %mask) nounwind re
define <8 x i16> @sext_8x8mem_to_8x16(<8 x i8> *%i , <8 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: sext_8x8mem_to_8x16:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $15, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovw2m %xmm0, %k1
; GENERIC-NEXT: vpmovsxbw (%rdi), %xmm0 {%k1} {z}
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sext_8x8mem_to_8x16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpmovw2m %xmm0, %k1 # sched: [1:1.00]
; SKX-NEXT: vpmovsxbw (%rdi), %xmm0 {%k1} {z} # sched: [9:1.00]
@@ -3049,14 +3049,14 @@ define <8 x i16> @sext_8x8mem_to_8x16(<8 x i8> *%i , <8 x i1> %mask) nounwind re
define <16 x i16> @zext_16x8mem_to_16x16(<16 x i8> *%i , <16 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: zext_16x8mem_to_16x16:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $7, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovb2m %xmm0, %k1
; GENERIC-NEXT: vpmovzxbw {{.*#+}} ymm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_16x8mem_to_16x16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpmovb2m %xmm0, %k1 # sched: [1:1.00]
; SKX-NEXT: vpmovzxbw {{.*#+}} ymm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero sched: [10:1.00]
@@ -3069,14 +3069,14 @@ define <16 x i16> @zext_16x8mem_to_16x16(<16 x i8> *%i , <16 x i1> %mask) nounwi
define <16 x i16> @sext_16x8mem_to_16x16(<16 x i8> *%i , <16 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: sext_16x8mem_to_16x16:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $7, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovb2m %xmm0, %k1
; GENERIC-NEXT: vpmovsxbw (%rdi), %ymm0 {%k1} {z}
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sext_16x8mem_to_16x16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpmovb2m %xmm0, %k1 # sched: [1:1.00]
; SKX-NEXT: vpmovsxbw (%rdi), %ymm0 {%k1} {z} # sched: [10:1.00]
@@ -3089,12 +3089,12 @@ define <16 x i16> @sext_16x8mem_to_16x16(<16 x i8> *%i , <16 x i1> %mask) nounwi
define <16 x i16> @zext_16x8_to_16x16(<16 x i8> %a ) nounwind readnone {
; GENERIC-LABEL: zext_16x8_to_16x16:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_16x8_to_16x16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%x = zext <16 x i8> %a to <16 x i16>
@@ -3103,14 +3103,14 @@ define <16 x i16> @zext_16x8_to_16x16(<16 x i8> %a ) nounwind readnone {
define <16 x i16> @zext_16x8_to_16x16_mask(<16 x i8> %a ,<16 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: zext_16x8_to_16x16_mask:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $7, %xmm1, %xmm1 # sched: [1:1.00]
; GENERIC-NEXT: vpmovb2m %xmm1, %k1
; GENERIC-NEXT: vpmovzxbw {{.*#+}} ymm0 {%k1} {z} = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_16x8_to_16x16_mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm1, %xmm1 # sched: [1:0.50]
; SKX-NEXT: vpmovb2m %xmm1, %k1 # sched: [1:1.00]
; SKX-NEXT: vpmovzxbw {{.*#+}} ymm0 {%k1} {z} = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero sched: [3:1.00]
@@ -3122,12 +3122,12 @@ define <16 x i16> @zext_16x8_to_16x16_mask(<16 x i8> %a ,<16 x i1> %mask) nounwi
define <16 x i16> @sext_16x8_to_16x16(<16 x i8> %a ) nounwind readnone {
; GENERIC-LABEL: sext_16x8_to_16x16:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovsxbw %xmm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sext_16x8_to_16x16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxbw %xmm0, %ymm0 # sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%x = sext <16 x i8> %a to <16 x i16>
@@ -3136,14 +3136,14 @@ define <16 x i16> @sext_16x8_to_16x16(<16 x i8> %a ) nounwind readnone {
define <16 x i16> @sext_16x8_to_16x16_mask(<16 x i8> %a ,<16 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: sext_16x8_to_16x16_mask:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $7, %xmm1, %xmm1 # sched: [1:1.00]
; GENERIC-NEXT: vpmovb2m %xmm1, %k1
; GENERIC-NEXT: vpmovsxbw %xmm0, %ymm0 {%k1} {z}
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sext_16x8_to_16x16_mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm1, %xmm1 # sched: [1:0.50]
; SKX-NEXT: vpmovb2m %xmm1, %k1 # sched: [1:1.00]
; SKX-NEXT: vpmovsxbw %xmm0, %ymm0 {%k1} {z} # sched: [3:1.00]
@@ -3155,14 +3155,14 @@ define <16 x i16> @sext_16x8_to_16x16_mask(<16 x i8> %a ,<16 x i1> %mask) nounwi
define <32 x i16> @zext_32x8mem_to_32x16(<32 x i8> *%i , <32 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: zext_32x8mem_to_32x16:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $7, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovb2m %ymm0, %k1
; GENERIC-NEXT: vpmovzxbw {{.*#+}} zmm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero,mem[16],zero,mem[17],zero,mem[18],zero,mem[19],zero,mem[20],zero,mem[21],zero,mem[22],zero,mem[23],zero,mem[24],zero,mem[25],zero,mem[26],zero,mem[27],zero,mem[28],zero,mem[29],zero,mem[30],zero,mem[31],zero
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_32x8mem_to_32x16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpmovb2m %ymm0, %k1 # sched: [1:1.00]
; SKX-NEXT: vpmovzxbw {{.*#+}} zmm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero,mem[16],zero,mem[17],zero,mem[18],zero,mem[19],zero,mem[20],zero,mem[21],zero,mem[22],zero,mem[23],zero,mem[24],zero,mem[25],zero,mem[26],zero,mem[27],zero,mem[28],zero,mem[29],zero,mem[30],zero,mem[31],zero sched: [10:1.00]
@@ -3175,14 +3175,14 @@ define <32 x i16> @zext_32x8mem_to_32x16(<32 x i8> *%i , <32 x i1> %mask) nounwi
define <32 x i16> @sext_32x8mem_to_32x16(<32 x i8> *%i , <32 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: sext_32x8mem_to_32x16:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $7, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovb2m %ymm0, %k1
; GENERIC-NEXT: vpmovsxbw (%rdi), %zmm0 {%k1} {z}
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sext_32x8mem_to_32x16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpmovb2m %ymm0, %k1 # sched: [1:1.00]
; SKX-NEXT: vpmovsxbw (%rdi), %zmm0 {%k1} {z} # sched: [10:1.00]
@@ -3195,12 +3195,12 @@ define <32 x i16> @sext_32x8mem_to_32x16(<32 x i8> *%i , <32 x i1> %mask) nounwi
define <32 x i16> @zext_32x8_to_32x16(<32 x i8> %a ) nounwind readnone {
; GENERIC-LABEL: zext_32x8_to_32x16:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_32x8_to_32x16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%x = zext <32 x i8> %a to <32 x i16>
@@ -3209,14 +3209,14 @@ define <32 x i16> @zext_32x8_to_32x16(<32 x i8> %a ) nounwind readnone {
define <32 x i16> @zext_32x8_to_32x16_mask(<32 x i8> %a ,<32 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: zext_32x8_to_32x16_mask:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $7, %ymm1, %ymm1 # sched: [1:1.00]
; GENERIC-NEXT: vpmovb2m %ymm1, %k1
; GENERIC-NEXT: vpmovzxbw {{.*#+}} zmm0 {%k1} {z} = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_32x8_to_32x16_mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %ymm1, %ymm1 # sched: [1:0.50]
; SKX-NEXT: vpmovb2m %ymm1, %k1 # sched: [1:1.00]
; SKX-NEXT: vpmovzxbw {{.*#+}} zmm0 {%k1} {z} = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero sched: [3:1.00]
@@ -3228,12 +3228,12 @@ define <32 x i16> @zext_32x8_to_32x16_mask(<32 x i8> %a ,<32 x i1> %mask) nounwi
define <32 x i16> @sext_32x8_to_32x16(<32 x i8> %a ) nounwind readnone {
; GENERIC-LABEL: sext_32x8_to_32x16:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovsxbw %ymm0, %zmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sext_32x8_to_32x16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxbw %ymm0, %zmm0 # sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%x = sext <32 x i8> %a to <32 x i16>
@@ -3242,14 +3242,14 @@ define <32 x i16> @sext_32x8_to_32x16(<32 x i8> %a ) nounwind readnone {
define <32 x i16> @sext_32x8_to_32x16_mask(<32 x i8> %a ,<32 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: sext_32x8_to_32x16_mask:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $7, %ymm1, %ymm1 # sched: [1:1.00]
; GENERIC-NEXT: vpmovb2m %ymm1, %k1
; GENERIC-NEXT: vpmovsxbw %ymm0, %zmm0 {%k1} {z}
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sext_32x8_to_32x16_mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %ymm1, %ymm1 # sched: [1:0.50]
; SKX-NEXT: vpmovb2m %ymm1, %k1 # sched: [1:1.00]
; SKX-NEXT: vpmovsxbw %ymm0, %zmm0 {%k1} {z} # sched: [3:1.00]
@@ -3261,14 +3261,14 @@ define <32 x i16> @sext_32x8_to_32x16_mask(<32 x i8> %a ,<32 x i1> %mask) nounwi
define <4 x i32> @zext_4x8mem_to_4x32(<4 x i8> *%i , <4 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: zext_4x8mem_to_4x32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpslld $31, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vptestmd %xmm0, %xmm0, %k1 # sched: [1:1.00]
; GENERIC-NEXT: vpmovzxbd {{.*#+}} xmm0 {%k1} {z} = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_4x8mem_to_4x32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vptestmd %xmm0, %xmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpmovzxbd {{.*#+}} xmm0 {%k1} {z} = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero sched: [9:1.00]
@@ -3281,14 +3281,14 @@ define <4 x i32> @zext_4x8mem_to_4x32(<4 x i8> *%i , <4 x i1> %mask) nounwind re
define <4 x i32> @sext_4x8mem_to_4x32(<4 x i8> *%i , <4 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: sext_4x8mem_to_4x32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpslld $31, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vptestmd %xmm0, %xmm0, %k1 # sched: [1:1.00]
; GENERIC-NEXT: vpmovsxbd (%rdi), %xmm0 {%k1} {z}
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sext_4x8mem_to_4x32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vptestmd %xmm0, %xmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpmovsxbd (%rdi), %xmm0 {%k1} {z} # sched: [9:1.00]
@@ -3301,14 +3301,14 @@ define <4 x i32> @sext_4x8mem_to_4x32(<4 x i8> *%i , <4 x i1> %mask) nounwind re
define <8 x i32> @zext_8x8mem_to_8x32(<8 x i8> *%i , <8 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: zext_8x8mem_to_8x32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $15, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovw2m %xmm0, %k1
; GENERIC-NEXT: vpmovzxbd {{.*#+}} ymm0 {%k1} {z} = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_8x8mem_to_8x32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpmovw2m %xmm0, %k1 # sched: [1:1.00]
; SKX-NEXT: vpmovzxbd {{.*#+}} ymm0 {%k1} {z} = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero sched: [10:1.00]
@@ -3321,14 +3321,14 @@ define <8 x i32> @zext_8x8mem_to_8x32(<8 x i8> *%i , <8 x i1> %mask) nounwind re
define <8 x i32> @sext_8x8mem_to_8x32(<8 x i8> *%i , <8 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: sext_8x8mem_to_8x32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $15, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovw2m %xmm0, %k1
; GENERIC-NEXT: vpmovsxbd (%rdi), %ymm0 {%k1} {z}
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sext_8x8mem_to_8x32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpmovw2m %xmm0, %k1 # sched: [1:1.00]
; SKX-NEXT: vpmovsxbd (%rdi), %ymm0 {%k1} {z} # sched: [10:1.00]
@@ -3341,14 +3341,14 @@ define <8 x i32> @sext_8x8mem_to_8x32(<8 x i8> *%i , <8 x i1> %mask) nounwind re
define <16 x i32> @zext_16x8mem_to_16x32(<16 x i8> *%i , <16 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: zext_16x8mem_to_16x32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $7, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovb2m %xmm0, %k1
; GENERIC-NEXT: vpmovzxbd {{.*#+}} zmm0 {%k1} {z} = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_16x8mem_to_16x32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpmovb2m %xmm0, %k1 # sched: [1:1.00]
; SKX-NEXT: vpmovzxbd {{.*#+}} zmm0 {%k1} {z} = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero sched: [10:1.00]
@@ -3361,14 +3361,14 @@ define <16 x i32> @zext_16x8mem_to_16x32(<16 x i8> *%i , <16 x i1> %mask) nounwi
define <16 x i32> @sext_16x8mem_to_16x32(<16 x i8> *%i , <16 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: sext_16x8mem_to_16x32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $7, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovb2m %xmm0, %k1
; GENERIC-NEXT: vpmovsxbd (%rdi), %zmm0 {%k1} {z}
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sext_16x8mem_to_16x32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpmovb2m %xmm0, %k1 # sched: [1:1.00]
; SKX-NEXT: vpmovsxbd (%rdi), %zmm0 {%k1} {z} # sched: [10:1.00]
@@ -3381,14 +3381,14 @@ define <16 x i32> @sext_16x8mem_to_16x32(<16 x i8> *%i , <16 x i1> %mask) nounwi
define <16 x i32> @zext_16x8_to_16x32_mask(<16 x i8> %a , <16 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: zext_16x8_to_16x32_mask:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $7, %xmm1, %xmm1 # sched: [1:1.00]
; GENERIC-NEXT: vpmovb2m %xmm1, %k1
; GENERIC-NEXT: vpmovzxbd {{.*#+}} zmm0 {%k1} {z} = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_16x8_to_16x32_mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm1, %xmm1 # sched: [1:0.50]
; SKX-NEXT: vpmovb2m %xmm1, %k1 # sched: [1:1.00]
; SKX-NEXT: vpmovzxbd {{.*#+}} zmm0 {%k1} {z} = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero sched: [3:1.00]
@@ -3400,14 +3400,14 @@ define <16 x i32> @zext_16x8_to_16x32_mask(<16 x i8> %a , <16 x i1> %mask) nounw
define <16 x i32> @sext_16x8_to_16x32_mask(<16 x i8> %a , <16 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: sext_16x8_to_16x32_mask:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $7, %xmm1, %xmm1 # sched: [1:1.00]
; GENERIC-NEXT: vpmovb2m %xmm1, %k1
; GENERIC-NEXT: vpmovsxbd %xmm0, %zmm0 {%k1} {z}
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sext_16x8_to_16x32_mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm1, %xmm1 # sched: [1:0.50]
; SKX-NEXT: vpmovb2m %xmm1, %k1 # sched: [1:1.00]
; SKX-NEXT: vpmovsxbd %xmm0, %zmm0 {%k1} {z} # sched: [3:1.00]
@@ -3419,12 +3419,12 @@ define <16 x i32> @sext_16x8_to_16x32_mask(<16 x i8> %a , <16 x i1> %mask) nounw
define <16 x i32> @zext_16x8_to_16x32(<16 x i8> %i) nounwind readnone {
; GENERIC-LABEL: zext_16x8_to_16x32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_16x8_to_16x32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%x = zext <16 x i8> %i to <16 x i32>
@@ -3433,12 +3433,12 @@ define <16 x i32> @zext_16x8_to_16x32(<16 x i8> %i) nounwind readnone {
define <16 x i32> @sext_16x8_to_16x32(<16 x i8> %i) nounwind readnone {
; GENERIC-LABEL: sext_16x8_to_16x32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovsxbd %xmm0, %zmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sext_16x8_to_16x32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxbd %xmm0, %zmm0 # sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%x = sext <16 x i8> %i to <16 x i32>
@@ -3447,14 +3447,14 @@ define <16 x i32> @sext_16x8_to_16x32(<16 x i8> %i) nounwind readnone {
define <2 x i64> @zext_2x8mem_to_2x64(<2 x i8> *%i , <2 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: zext_2x8mem_to_2x64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllq $63, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vptestmq %xmm0, %xmm0, %k1 # sched: [1:1.00]
; GENERIC-NEXT: vpmovzxbq {{.*#+}} xmm0 {%k1} {z} = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_2x8mem_to_2x64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllq $63, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vptestmq %xmm0, %xmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpmovzxbq {{.*#+}} xmm0 {%k1} {z} = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero sched: [9:1.00]
@@ -3466,14 +3466,14 @@ define <2 x i64> @zext_2x8mem_to_2x64(<2 x i8> *%i , <2 x i1> %mask) nounwind re
}
define <2 x i64> @sext_2x8mem_to_2x64mask(<2 x i8> *%i , <2 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: sext_2x8mem_to_2x64mask:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllq $63, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vptestmq %xmm0, %xmm0, %k1 # sched: [1:1.00]
; GENERIC-NEXT: vpmovsxbq (%rdi), %xmm0 {%k1} {z}
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sext_2x8mem_to_2x64mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllq $63, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vptestmq %xmm0, %xmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpmovsxbq (%rdi), %xmm0 {%k1} {z} # sched: [9:1.00]
@@ -3485,12 +3485,12 @@ define <2 x i64> @sext_2x8mem_to_2x64mask(<2 x i8> *%i , <2 x i1> %mask) nounwin
}
define <2 x i64> @sext_2x8mem_to_2x64(<2 x i8> *%i) nounwind readnone {
; GENERIC-LABEL: sext_2x8mem_to_2x64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovsxbq (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sext_2x8mem_to_2x64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxbq (%rdi), %xmm0 # sched: [6:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%a = load <2 x i8>,<2 x i8> *%i,align 1
@@ -3500,14 +3500,14 @@ define <2 x i64> @sext_2x8mem_to_2x64(<2 x i8> *%i) nounwind readnone {
define <4 x i64> @zext_4x8mem_to_4x64(<4 x i8> *%i , <4 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: zext_4x8mem_to_4x64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpslld $31, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vptestmd %xmm0, %xmm0, %k1 # sched: [1:1.00]
; GENERIC-NEXT: vpmovzxbq {{.*#+}} ymm0 {%k1} {z} = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_4x8mem_to_4x64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vptestmd %xmm0, %xmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpmovzxbq {{.*#+}} ymm0 {%k1} {z} = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero sched: [10:1.00]
@@ -3520,14 +3520,14 @@ define <4 x i64> @zext_4x8mem_to_4x64(<4 x i8> *%i , <4 x i1> %mask) nounwind re
define <4 x i64> @sext_4x8mem_to_4x64mask(<4 x i8> *%i , <4 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: sext_4x8mem_to_4x64mask:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpslld $31, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vptestmd %xmm0, %xmm0, %k1 # sched: [1:1.00]
; GENERIC-NEXT: vpmovsxbq (%rdi), %ymm0 {%k1} {z}
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sext_4x8mem_to_4x64mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vptestmd %xmm0, %xmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpmovsxbq (%rdi), %ymm0 {%k1} {z} # sched: [10:1.00]
@@ -3540,12 +3540,12 @@ define <4 x i64> @sext_4x8mem_to_4x64mask(<4 x i8> *%i , <4 x i1> %mask) nounwin
define <4 x i64> @sext_4x8mem_to_4x64(<4 x i8> *%i) nounwind readnone {
; GENERIC-LABEL: sext_4x8mem_to_4x64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovsxbq (%rdi), %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sext_4x8mem_to_4x64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxbq (%rdi), %ymm0 # sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%a = load <4 x i8>,<4 x i8> *%i,align 1
@@ -3555,14 +3555,14 @@ define <4 x i64> @sext_4x8mem_to_4x64(<4 x i8> *%i) nounwind readnone {
define <8 x i64> @zext_8x8mem_to_8x64(<8 x i8> *%i , <8 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: zext_8x8mem_to_8x64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $15, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovw2m %xmm0, %k1
; GENERIC-NEXT: vpmovzxbq {{.*#+}} zmm0 {%k1} {z} = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero,mem[4],zero,zero,zero,zero,zero,zero,zero,mem[5],zero,zero,zero,zero,zero,zero,zero,mem[6],zero,zero,zero,zero,zero,zero,zero,mem[7],zero,zero,zero,zero,zero,zero,zero
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_8x8mem_to_8x64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpmovw2m %xmm0, %k1 # sched: [1:1.00]
; SKX-NEXT: vpmovzxbq {{.*#+}} zmm0 {%k1} {z} = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero,mem[4],zero,zero,zero,zero,zero,zero,zero,mem[5],zero,zero,zero,zero,zero,zero,zero,mem[6],zero,zero,zero,zero,zero,zero,zero,mem[7],zero,zero,zero,zero,zero,zero,zero sched: [10:1.00]
@@ -3575,14 +3575,14 @@ define <8 x i64> @zext_8x8mem_to_8x64(<8 x i8> *%i , <8 x i1> %mask) nounwind re
define <8 x i64> @sext_8x8mem_to_8x64mask(<8 x i8> *%i , <8 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: sext_8x8mem_to_8x64mask:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $15, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovw2m %xmm0, %k1
; GENERIC-NEXT: vpmovsxbq (%rdi), %zmm0 {%k1} {z}
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sext_8x8mem_to_8x64mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpmovw2m %xmm0, %k1 # sched: [1:1.00]
; SKX-NEXT: vpmovsxbq (%rdi), %zmm0 {%k1} {z} # sched: [10:1.00]
@@ -3595,12 +3595,12 @@ define <8 x i64> @sext_8x8mem_to_8x64mask(<8 x i8> *%i , <8 x i1> %mask) nounwin
define <8 x i64> @sext_8x8mem_to_8x64(<8 x i8> *%i) nounwind readnone {
; GENERIC-LABEL: sext_8x8mem_to_8x64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovsxbq (%rdi), %zmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sext_8x8mem_to_8x64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxbq (%rdi), %zmm0 # sched: [10:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%a = load <8 x i8>,<8 x i8> *%i,align 1
@@ -3610,14 +3610,14 @@ define <8 x i64> @sext_8x8mem_to_8x64(<8 x i8> *%i) nounwind readnone {
define <4 x i32> @zext_4x16mem_to_4x32(<4 x i16> *%i , <4 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: zext_4x16mem_to_4x32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpslld $31, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vptestmd %xmm0, %xmm0, %k1 # sched: [1:1.00]
; GENERIC-NEXT: vpmovzxwd {{.*#+}} xmm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_4x16mem_to_4x32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vptestmd %xmm0, %xmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpmovzxwd {{.*#+}} xmm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero sched: [9:1.00]
@@ -3630,14 +3630,14 @@ define <4 x i32> @zext_4x16mem_to_4x32(<4 x i16> *%i , <4 x i1> %mask) nounwind
define <4 x i32> @sext_4x16mem_to_4x32mask(<4 x i16> *%i , <4 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: sext_4x16mem_to_4x32mask:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpslld $31, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vptestmd %xmm0, %xmm0, %k1 # sched: [1:1.00]
; GENERIC-NEXT: vpmovsxwd (%rdi), %xmm0 {%k1} {z}
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sext_4x16mem_to_4x32mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vptestmd %xmm0, %xmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpmovsxwd (%rdi), %xmm0 {%k1} {z} # sched: [9:1.00]
@@ -3650,12 +3650,12 @@ define <4 x i32> @sext_4x16mem_to_4x32mask(<4 x i16> *%i , <4 x i1> %mask) nounw
define <4 x i32> @sext_4x16mem_to_4x32(<4 x i16> *%i) nounwind readnone {
; GENERIC-LABEL: sext_4x16mem_to_4x32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovsxwd (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sext_4x16mem_to_4x32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxwd (%rdi), %xmm0 # sched: [6:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%a = load <4 x i16>,<4 x i16> *%i,align 1
@@ -3666,14 +3666,14 @@ define <4 x i32> @sext_4x16mem_to_4x32(<4 x i16> *%i) nounwind readnone {
define <8 x i32> @zext_8x16mem_to_8x32(<8 x i16> *%i , <8 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: zext_8x16mem_to_8x32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $15, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovw2m %xmm0, %k1
; GENERIC-NEXT: vpmovzxwd {{.*#+}} ymm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_8x16mem_to_8x32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpmovw2m %xmm0, %k1 # sched: [1:1.00]
; SKX-NEXT: vpmovzxwd {{.*#+}} ymm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero sched: [10:1.00]
@@ -3686,14 +3686,14 @@ define <8 x i32> @zext_8x16mem_to_8x32(<8 x i16> *%i , <8 x i1> %mask) nounwind
define <8 x i32> @sext_8x16mem_to_8x32mask(<8 x i16> *%i , <8 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: sext_8x16mem_to_8x32mask:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $15, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovw2m %xmm0, %k1
; GENERIC-NEXT: vpmovsxwd (%rdi), %ymm0 {%k1} {z}
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sext_8x16mem_to_8x32mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpmovw2m %xmm0, %k1 # sched: [1:1.00]
; SKX-NEXT: vpmovsxwd (%rdi), %ymm0 {%k1} {z} # sched: [10:1.00]
@@ -3706,12 +3706,12 @@ define <8 x i32> @sext_8x16mem_to_8x32mask(<8 x i16> *%i , <8 x i1> %mask) nounw
define <8 x i32> @sext_8x16mem_to_8x32(<8 x i16> *%i) nounwind readnone {
; GENERIC-LABEL: sext_8x16mem_to_8x32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovsxwd (%rdi), %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sext_8x16mem_to_8x32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxwd (%rdi), %ymm0 # sched: [9:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%a = load <8 x i16>,<8 x i16> *%i,align 1
@@ -3721,14 +3721,14 @@ define <8 x i32> @sext_8x16mem_to_8x32(<8 x i16> *%i) nounwind readnone {
define <8 x i32> @zext_8x16_to_8x32mask(<8 x i16> %a , <8 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: zext_8x16_to_8x32mask:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $15, %xmm1, %xmm1 # sched: [1:1.00]
; GENERIC-NEXT: vpmovw2m %xmm1, %k1
; GENERIC-NEXT: vpmovzxwd {{.*#+}} ymm0 {%k1} {z} = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_8x16_to_8x32mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm1, %xmm1 # sched: [1:0.50]
; SKX-NEXT: vpmovw2m %xmm1, %k1 # sched: [1:1.00]
; SKX-NEXT: vpmovzxwd {{.*#+}} ymm0 {%k1} {z} = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [3:1.00]
@@ -3740,12 +3740,12 @@ define <8 x i32> @zext_8x16_to_8x32mask(<8 x i16> %a , <8 x i1> %mask) nounwind
define <8 x i32> @zext_8x16_to_8x32(<8 x i16> %a ) nounwind readnone {
; GENERIC-LABEL: zext_8x16_to_8x32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_8x16_to_8x32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%x = zext <8 x i16> %a to <8 x i32>
@@ -3754,14 +3754,14 @@ define <8 x i32> @zext_8x16_to_8x32(<8 x i16> %a ) nounwind readnone {
define <16 x i32> @zext_16x16mem_to_16x32(<16 x i16> *%i , <16 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: zext_16x16mem_to_16x32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $7, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovb2m %xmm0, %k1
; GENERIC-NEXT: vpmovzxwd {{.*#+}} zmm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_16x16mem_to_16x32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpmovb2m %xmm0, %k1 # sched: [1:1.00]
; SKX-NEXT: vpmovzxwd {{.*#+}} zmm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero sched: [10:1.00]
@@ -3774,14 +3774,14 @@ define <16 x i32> @zext_16x16mem_to_16x32(<16 x i16> *%i , <16 x i1> %mask) noun
define <16 x i32> @sext_16x16mem_to_16x32mask(<16 x i16> *%i , <16 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: sext_16x16mem_to_16x32mask:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $7, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovb2m %xmm0, %k1
; GENERIC-NEXT: vpmovsxwd (%rdi), %zmm0 {%k1} {z}
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sext_16x16mem_to_16x32mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpmovb2m %xmm0, %k1 # sched: [1:1.00]
; SKX-NEXT: vpmovsxwd (%rdi), %zmm0 {%k1} {z} # sched: [10:1.00]
@@ -3794,12 +3794,12 @@ define <16 x i32> @sext_16x16mem_to_16x32mask(<16 x i16> *%i , <16 x i1> %mask)
define <16 x i32> @sext_16x16mem_to_16x32(<16 x i16> *%i) nounwind readnone {
; GENERIC-LABEL: sext_16x16mem_to_16x32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovsxwd (%rdi), %zmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sext_16x16mem_to_16x32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxwd (%rdi), %zmm0 # sched: [10:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%a = load <16 x i16>,<16 x i16> *%i,align 1
@@ -3808,14 +3808,14 @@ define <16 x i32> @sext_16x16mem_to_16x32(<16 x i16> *%i) nounwind readnone {
}
define <16 x i32> @zext_16x16_to_16x32mask(<16 x i16> %a , <16 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: zext_16x16_to_16x32mask:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $7, %xmm1, %xmm1 # sched: [1:1.00]
; GENERIC-NEXT: vpmovb2m %xmm1, %k1
; GENERIC-NEXT: vpmovzxwd {{.*#+}} zmm0 {%k1} {z} = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_16x16_to_16x32mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm1, %xmm1 # sched: [1:0.50]
; SKX-NEXT: vpmovb2m %xmm1, %k1 # sched: [1:1.00]
; SKX-NEXT: vpmovzxwd {{.*#+}} zmm0 {%k1} {z} = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero sched: [3:1.00]
@@ -3827,12 +3827,12 @@ define <16 x i32> @zext_16x16_to_16x32mask(<16 x i16> %a , <16 x i1> %mask) noun
define <16 x i32> @zext_16x16_to_16x32(<16 x i16> %a ) nounwind readnone {
; GENERIC-LABEL: zext_16x16_to_16x32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_16x16_to_16x32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%x = zext <16 x i16> %a to <16 x i32>
@@ -3841,14 +3841,14 @@ define <16 x i32> @zext_16x16_to_16x32(<16 x i16> %a ) nounwind readnone {
define <2 x i64> @zext_2x16mem_to_2x64(<2 x i16> *%i , <2 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: zext_2x16mem_to_2x64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllq $63, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vptestmq %xmm0, %xmm0, %k1 # sched: [1:1.00]
; GENERIC-NEXT: vpmovzxwq {{.*#+}} xmm0 {%k1} {z} = mem[0],zero,zero,zero,mem[1],zero,zero,zero
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_2x16mem_to_2x64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllq $63, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vptestmq %xmm0, %xmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpmovzxwq {{.*#+}} xmm0 {%k1} {z} = mem[0],zero,zero,zero,mem[1],zero,zero,zero sched: [9:1.00]
@@ -3861,14 +3861,14 @@ define <2 x i64> @zext_2x16mem_to_2x64(<2 x i16> *%i , <2 x i1> %mask) nounwind
define <2 x i64> @sext_2x16mem_to_2x64mask(<2 x i16> *%i , <2 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: sext_2x16mem_to_2x64mask:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllq $63, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vptestmq %xmm0, %xmm0, %k1 # sched: [1:1.00]
; GENERIC-NEXT: vpmovsxwq (%rdi), %xmm0 {%k1} {z}
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sext_2x16mem_to_2x64mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllq $63, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vptestmq %xmm0, %xmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpmovsxwq (%rdi), %xmm0 {%k1} {z} # sched: [9:1.00]
@@ -3881,12 +3881,12 @@ define <2 x i64> @sext_2x16mem_to_2x64mask(<2 x i16> *%i , <2 x i1> %mask) nounw
define <2 x i64> @sext_2x16mem_to_2x64(<2 x i16> *%i) nounwind readnone {
; GENERIC-LABEL: sext_2x16mem_to_2x64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovsxwq (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sext_2x16mem_to_2x64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxwq (%rdi), %xmm0 # sched: [6:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%a = load <2 x i16>,<2 x i16> *%i,align 1
@@ -3896,14 +3896,14 @@ define <2 x i64> @sext_2x16mem_to_2x64(<2 x i16> *%i) nounwind readnone {
define <4 x i64> @zext_4x16mem_to_4x64(<4 x i16> *%i , <4 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: zext_4x16mem_to_4x64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpslld $31, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vptestmd %xmm0, %xmm0, %k1 # sched: [1:1.00]
; GENERIC-NEXT: vpmovzxwq {{.*#+}} ymm0 {%k1} {z} = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_4x16mem_to_4x64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vptestmd %xmm0, %xmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpmovzxwq {{.*#+}} ymm0 {%k1} {z} = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero sched: [10:1.00]
@@ -3916,14 +3916,14 @@ define <4 x i64> @zext_4x16mem_to_4x64(<4 x i16> *%i , <4 x i1> %mask) nounwind
define <4 x i64> @sext_4x16mem_to_4x64mask(<4 x i16> *%i , <4 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: sext_4x16mem_to_4x64mask:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpslld $31, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vptestmd %xmm0, %xmm0, %k1 # sched: [1:1.00]
; GENERIC-NEXT: vpmovsxwq (%rdi), %ymm0 {%k1} {z}
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sext_4x16mem_to_4x64mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vptestmd %xmm0, %xmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpmovsxwq (%rdi), %ymm0 {%k1} {z} # sched: [10:1.00]
@@ -3936,12 +3936,12 @@ define <4 x i64> @sext_4x16mem_to_4x64mask(<4 x i16> *%i , <4 x i1> %mask) nounw
define <4 x i64> @sext_4x16mem_to_4x64(<4 x i16> *%i) nounwind readnone {
; GENERIC-LABEL: sext_4x16mem_to_4x64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovsxwq (%rdi), %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sext_4x16mem_to_4x64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxwq (%rdi), %ymm0 # sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%a = load <4 x i16>,<4 x i16> *%i,align 1
@@ -3951,14 +3951,14 @@ define <4 x i64> @sext_4x16mem_to_4x64(<4 x i16> *%i) nounwind readnone {
define <8 x i64> @zext_8x16mem_to_8x64(<8 x i16> *%i , <8 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: zext_8x16mem_to_8x64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $15, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovw2m %xmm0, %k1
; GENERIC-NEXT: vpmovzxwq {{.*#+}} zmm0 {%k1} {z} = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_8x16mem_to_8x64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpmovw2m %xmm0, %k1 # sched: [1:1.00]
; SKX-NEXT: vpmovzxwq {{.*#+}} zmm0 {%k1} {z} = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero sched: [10:1.00]
@@ -3971,14 +3971,14 @@ define <8 x i64> @zext_8x16mem_to_8x64(<8 x i16> *%i , <8 x i1> %mask) nounwind
define <8 x i64> @sext_8x16mem_to_8x64mask(<8 x i16> *%i , <8 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: sext_8x16mem_to_8x64mask:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $15, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovw2m %xmm0, %k1
; GENERIC-NEXT: vpmovsxwq (%rdi), %zmm0 {%k1} {z}
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sext_8x16mem_to_8x64mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpmovw2m %xmm0, %k1 # sched: [1:1.00]
; SKX-NEXT: vpmovsxwq (%rdi), %zmm0 {%k1} {z} # sched: [10:1.00]
@@ -3991,12 +3991,12 @@ define <8 x i64> @sext_8x16mem_to_8x64mask(<8 x i16> *%i , <8 x i1> %mask) nounw
define <8 x i64> @sext_8x16mem_to_8x64(<8 x i16> *%i) nounwind readnone {
; GENERIC-LABEL: sext_8x16mem_to_8x64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovsxwq (%rdi), %zmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sext_8x16mem_to_8x64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxwq (%rdi), %zmm0 # sched: [10:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%a = load <8 x i16>,<8 x i16> *%i,align 1
@@ -4006,14 +4006,14 @@ define <8 x i64> @sext_8x16mem_to_8x64(<8 x i16> *%i) nounwind readnone {
define <8 x i64> @zext_8x16_to_8x64mask(<8 x i16> %a , <8 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: zext_8x16_to_8x64mask:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $15, %xmm1, %xmm1 # sched: [1:1.00]
; GENERIC-NEXT: vpmovw2m %xmm1, %k1
; GENERIC-NEXT: vpmovzxwq {{.*#+}} zmm0 {%k1} {z} = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_8x16_to_8x64mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm1, %xmm1 # sched: [1:0.50]
; SKX-NEXT: vpmovw2m %xmm1, %k1 # sched: [1:1.00]
; SKX-NEXT: vpmovzxwq {{.*#+}} zmm0 {%k1} {z} = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero sched: [3:1.00]
@@ -4025,12 +4025,12 @@ define <8 x i64> @zext_8x16_to_8x64mask(<8 x i16> %a , <8 x i1> %mask) nounwind
define <8 x i64> @zext_8x16_to_8x64(<8 x i16> %a) nounwind readnone {
; GENERIC-LABEL: zext_8x16_to_8x64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovzxwq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_8x16_to_8x64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxwq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%ret = zext <8 x i16> %a to <8 x i64>
@@ -4039,14 +4039,14 @@ define <8 x i64> @zext_8x16_to_8x64(<8 x i16> %a) nounwind readnone {
define <2 x i64> @zext_2x32mem_to_2x64(<2 x i32> *%i , <2 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: zext_2x32mem_to_2x64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllq $63, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vptestmq %xmm0, %xmm0, %k1 # sched: [1:1.00]
; GENERIC-NEXT: vpmovzxdq {{.*#+}} xmm0 {%k1} {z} = mem[0],zero,mem[1],zero
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_2x32mem_to_2x64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllq $63, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vptestmq %xmm0, %xmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpmovzxdq {{.*#+}} xmm0 {%k1} {z} = mem[0],zero,mem[1],zero sched: [9:1.00]
@@ -4059,14 +4059,14 @@ define <2 x i64> @zext_2x32mem_to_2x64(<2 x i32> *%i , <2 x i1> %mask) nounwind
define <2 x i64> @sext_2x32mem_to_2x64mask(<2 x i32> *%i , <2 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: sext_2x32mem_to_2x64mask:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllq $63, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vptestmq %xmm0, %xmm0, %k1 # sched: [1:1.00]
; GENERIC-NEXT: vpmovsxdq (%rdi), %xmm0 {%k1} {z}
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sext_2x32mem_to_2x64mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllq $63, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vptestmq %xmm0, %xmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpmovsxdq (%rdi), %xmm0 {%k1} {z} # sched: [9:1.00]
@@ -4079,12 +4079,12 @@ define <2 x i64> @sext_2x32mem_to_2x64mask(<2 x i32> *%i , <2 x i1> %mask) nounw
define <2 x i64> @sext_2x32mem_to_2x64(<2 x i32> *%i) nounwind readnone {
; GENERIC-LABEL: sext_2x32mem_to_2x64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovsxdq (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sext_2x32mem_to_2x64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxdq (%rdi), %xmm0 # sched: [6:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%a = load <2 x i32>,<2 x i32> *%i,align 1
@@ -4094,14 +4094,14 @@ define <2 x i64> @sext_2x32mem_to_2x64(<2 x i32> *%i) nounwind readnone {
define <4 x i64> @zext_4x32mem_to_4x64(<4 x i32> *%i , <4 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: zext_4x32mem_to_4x64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpslld $31, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vptestmd %xmm0, %xmm0, %k1 # sched: [1:1.00]
; GENERIC-NEXT: vpmovzxdq {{.*#+}} ymm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_4x32mem_to_4x64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vptestmd %xmm0, %xmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpmovzxdq {{.*#+}} ymm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero sched: [10:1.00]
@@ -4114,14 +4114,14 @@ define <4 x i64> @zext_4x32mem_to_4x64(<4 x i32> *%i , <4 x i1> %mask) nounwind
define <4 x i64> @sext_4x32mem_to_4x64mask(<4 x i32> *%i , <4 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: sext_4x32mem_to_4x64mask:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpslld $31, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vptestmd %xmm0, %xmm0, %k1 # sched: [1:1.00]
; GENERIC-NEXT: vpmovsxdq (%rdi), %ymm0 {%k1} {z}
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sext_4x32mem_to_4x64mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vptestmd %xmm0, %xmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpmovsxdq (%rdi), %ymm0 {%k1} {z} # sched: [10:1.00]
@@ -4134,12 +4134,12 @@ define <4 x i64> @sext_4x32mem_to_4x64mask(<4 x i32> *%i , <4 x i1> %mask) nounw
define <4 x i64> @sext_4x32mem_to_4x64(<4 x i32> *%i) nounwind readnone {
; GENERIC-LABEL: sext_4x32mem_to_4x64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovsxdq (%rdi), %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sext_4x32mem_to_4x64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxdq (%rdi), %ymm0 # sched: [9:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%a = load <4 x i32>,<4 x i32> *%i,align 1
@@ -4149,12 +4149,12 @@ define <4 x i64> @sext_4x32mem_to_4x64(<4 x i32> *%i) nounwind readnone {
define <4 x i64> @sext_4x32_to_4x64(<4 x i32> %a) nounwind readnone {
; GENERIC-LABEL: sext_4x32_to_4x64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovsxdq %xmm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sext_4x32_to_4x64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxdq %xmm0, %ymm0 # sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%x = sext <4 x i32> %a to <4 x i64>
@@ -4163,14 +4163,14 @@ define <4 x i64> @sext_4x32_to_4x64(<4 x i32> %a) nounwind readnone {
define <4 x i64> @zext_4x32_to_4x64mask(<4 x i32> %a , <4 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: zext_4x32_to_4x64mask:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpslld $31, %xmm1, %xmm1 # sched: [1:1.00]
; GENERIC-NEXT: vptestmd %xmm1, %xmm1, %k1 # sched: [1:1.00]
; GENERIC-NEXT: vpmovzxdq {{.*#+}} ymm0 {%k1} {z} = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_4x32_to_4x64mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm1, %xmm1 # sched: [1:0.50]
; SKX-NEXT: vptestmd %xmm1, %xmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpmovzxdq {{.*#+}} ymm0 {%k1} {z} = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero sched: [3:1.00]
@@ -4182,14 +4182,14 @@ define <4 x i64> @zext_4x32_to_4x64mask(<4 x i32> %a , <4 x i1> %mask) nounwind
define <8 x i64> @zext_8x32mem_to_8x64(<8 x i32> *%i , <8 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: zext_8x32mem_to_8x64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $15, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovw2m %xmm0, %k1
; GENERIC-NEXT: vpmovzxdq {{.*#+}} zmm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_8x32mem_to_8x64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpmovw2m %xmm0, %k1 # sched: [1:1.00]
; SKX-NEXT: vpmovzxdq {{.*#+}} zmm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero sched: [10:1.00]
@@ -4202,14 +4202,14 @@ define <8 x i64> @zext_8x32mem_to_8x64(<8 x i32> *%i , <8 x i1> %mask) nounwind
define <8 x i64> @sext_8x32mem_to_8x64mask(<8 x i32> *%i , <8 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: sext_8x32mem_to_8x64mask:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $15, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovw2m %xmm0, %k1
; GENERIC-NEXT: vpmovsxdq (%rdi), %zmm0 {%k1} {z}
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sext_8x32mem_to_8x64mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpmovw2m %xmm0, %k1 # sched: [1:1.00]
; SKX-NEXT: vpmovsxdq (%rdi), %zmm0 {%k1} {z} # sched: [10:1.00]
@@ -4222,12 +4222,12 @@ define <8 x i64> @sext_8x32mem_to_8x64mask(<8 x i32> *%i , <8 x i1> %mask) nounw
define <8 x i64> @sext_8x32mem_to_8x64(<8 x i32> *%i) nounwind readnone {
; GENERIC-LABEL: sext_8x32mem_to_8x64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovsxdq (%rdi), %zmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sext_8x32mem_to_8x64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxdq (%rdi), %zmm0 # sched: [10:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%a = load <8 x i32>,<8 x i32> *%i,align 1
@@ -4237,12 +4237,12 @@ define <8 x i64> @sext_8x32mem_to_8x64(<8 x i32> *%i) nounwind readnone {
define <8 x i64> @sext_8x32_to_8x64(<8 x i32> %a) nounwind readnone {
; GENERIC-LABEL: sext_8x32_to_8x64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovsxdq %ymm0, %zmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sext_8x32_to_8x64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxdq %ymm0, %zmm0 # sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%x = sext <8 x i32> %a to <8 x i64>
@@ -4251,14 +4251,14 @@ define <8 x i64> @sext_8x32_to_8x64(<8 x i32> %a) nounwind readnone {
define <8 x i64> @zext_8x32_to_8x64mask(<8 x i32> %a , <8 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: zext_8x32_to_8x64mask:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $15, %xmm1, %xmm1 # sched: [1:1.00]
; GENERIC-NEXT: vpmovw2m %xmm1, %k1
; GENERIC-NEXT: vpmovzxdq {{.*#+}} zmm0 {%k1} {z} = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_8x32_to_8x64mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm1, %xmm1 # sched: [1:0.50]
; SKX-NEXT: vpmovw2m %xmm1, %k1 # sched: [1:1.00]
; SKX-NEXT: vpmovzxdq {{.*#+}} zmm0 {%k1} {z} = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero sched: [3:1.00]
@@ -4269,12 +4269,12 @@ define <8 x i64> @zext_8x32_to_8x64mask(<8 x i32> %a , <8 x i1> %mask) nounwind
}
define <8 x float> @fptrunc_test(<8 x double> %a) nounwind readnone {
; GENERIC-LABEL: fptrunc_test:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvtpd2ps %zmm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: fptrunc_test:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtpd2ps %zmm0, %ymm0 # sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%b = fptrunc <8 x double> %a to <8 x float>
@@ -4283,12 +4283,12 @@ define <8 x float> @fptrunc_test(<8 x double> %a) nounwind readnone {
define <8 x double> @fpext_test(<8 x float> %a) nounwind readnone {
; GENERIC-LABEL: fpext_test:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvtps2pd %ymm0, %zmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: fpext_test:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtps2pd %ymm0, %zmm0 # sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%b = fpext <8 x float> %a to <8 x double>
@@ -4297,13 +4297,13 @@ define <8 x double> @fpext_test(<8 x float> %a) nounwind readnone {
define <16 x i32> @zext_16i1_to_16xi32(i16 %b) {
; GENERIC-LABEL: zext_16i1_to_16xi32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: kmovd %edi, %k1
; GENERIC-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_16i1_to_16xi32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovd %edi, %k1 # sched: [1:1.00]
; SKX-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z} # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -4314,13 +4314,13 @@ define <16 x i32> @zext_16i1_to_16xi32(i16 %b) {
define <8 x i64> @zext_8i1_to_8xi64(i8 %b) {
; GENERIC-LABEL: zext_8i1_to_8xi64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: kmovd %edi, %k1
; GENERIC-NEXT: vpbroadcastq {{.*}}(%rip), %zmm0 {%k1} {z}
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_8i1_to_8xi64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovd %edi, %k1 # sched: [1:1.00]
; SKX-NEXT: vpbroadcastq {{.*}}(%rip), %zmm0 {%k1} {z} # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -4331,7 +4331,7 @@ define <8 x i64> @zext_8i1_to_8xi64(i8 %b) {
define i16 @trunc_16i8_to_16i1(<16 x i8> %a) {
; GENERIC-LABEL: trunc_16i8_to_16i1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $7, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovb2m %xmm0, %k0
; GENERIC-NEXT: kmovd %k0, %eax
@@ -4339,7 +4339,7 @@ define i16 @trunc_16i8_to_16i1(<16 x i8> %a) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: trunc_16i8_to_16i1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpmovb2m %xmm0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
@@ -4352,7 +4352,7 @@ define i16 @trunc_16i8_to_16i1(<16 x i8> %a) {
define i16 @trunc_16i32_to_16i1(<16 x i32> %a) {
; GENERIC-LABEL: trunc_16i32_to_16i1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpslld $31, %zmm0, %zmm0 # sched: [3:1.00]
; GENERIC-NEXT: vptestmd %zmm0, %zmm0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovd %k0, %eax
@@ -4361,7 +4361,7 @@ define i16 @trunc_16i32_to_16i1(<16 x i32> %a) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: trunc_16i32_to_16i1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %zmm0, %zmm0 # sched: [1:0.50]
; SKX-NEXT: vptestmd %zmm0, %zmm0, %k0 # sched: [3:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
@@ -4375,7 +4375,7 @@ define i16 @trunc_16i32_to_16i1(<16 x i32> %a) {
define <4 x i32> @trunc_4i32_to_4i1(<4 x i32> %a, <4 x i32> %b) {
; GENERIC-LABEL: trunc_4i32_to_4i1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpslld $31, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vptestmd %xmm0, %xmm0, %k1 # sched: [1:1.00]
; GENERIC-NEXT: vpslld $31, %xmm1, %xmm0 # sched: [1:1.00]
@@ -4384,7 +4384,7 @@ define <4 x i32> @trunc_4i32_to_4i1(<4 x i32> %a, <4 x i32> %b) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: trunc_4i32_to_4i1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vptestmd %xmm0, %xmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpslld $31, %xmm1, %xmm0 # sched: [1:0.50]
@@ -4401,7 +4401,7 @@ define <4 x i32> @trunc_4i32_to_4i1(<4 x i32> %a, <4 x i32> %b) {
define i8 @trunc_8i16_to_8i1(<8 x i16> %a) {
; GENERIC-LABEL: trunc_8i16_to_8i1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $15, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovw2m %xmm0, %k0
; GENERIC-NEXT: kmovd %k0, %eax
@@ -4409,7 +4409,7 @@ define i8 @trunc_8i16_to_8i1(<8 x i16> %a) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: trunc_8i16_to_8i1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpmovw2m %xmm0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
@@ -4422,13 +4422,13 @@ define i8 @trunc_8i16_to_8i1(<8 x i16> %a) {
define <8 x i32> @sext_8i1_8i32(<8 x i32> %a1, <8 x i32> %a2) nounwind {
; GENERIC-LABEL: sext_8i1_8i32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpcmpled %ymm0, %ymm1, %k0
; GENERIC-NEXT: vpmovm2d %k0, %ymm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sext_8i1_8i32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpled %ymm0, %ymm1, %k0 # sched: [3:1.00]
; SKX-NEXT: vpmovm2d %k0, %ymm0
; SKX-NEXT: retq # sched: [7:1.00]
@@ -4441,7 +4441,7 @@ define <8 x i32> @sext_8i1_8i32(<8 x i32> %a1, <8 x i32> %a2) nounwind {
define i16 @trunc_i32_to_i1(i32 %a) {
; GENERIC-LABEL: trunc_i32_to_i1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movw $-4, %ax # sched: [1:0.33]
; GENERIC-NEXT: kmovd %eax, %k0
; GENERIC-NEXT: kshiftrw $1, %k0, %k0
@@ -4454,7 +4454,7 @@ define i16 @trunc_i32_to_i1(i32 %a) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: trunc_i32_to_i1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movw $-4, %ax # sched: [1:0.25]
; SKX-NEXT: kmovd %eax, %k0 # sched: [1:1.00]
; SKX-NEXT: kshiftrw $1, %k0, %k0 # sched: [3:1.00]
@@ -4473,14 +4473,14 @@ define i16 @trunc_i32_to_i1(i32 %a) {
define <8 x i16> @sext_8i1_8i16(<8 x i32> %a1, <8 x i32> %a2) nounwind {
; GENERIC-LABEL: sext_8i1_8i16:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpcmpgtd %ymm0, %ymm1, %k0
; GENERIC-NEXT: vpmovm2w %k0, %xmm0
; GENERIC-NEXT: vzeroupper
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sext_8i1_8i16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpgtd %ymm0, %ymm1, %k0 # sched: [3:1.00]
; SKX-NEXT: vpmovm2w %k0, %xmm0
; SKX-NEXT: vzeroupper # sched: [4:1.00]
@@ -4492,13 +4492,13 @@ define <8 x i16> @sext_8i1_8i16(<8 x i32> %a1, <8 x i32> %a2) nounwind {
define <16 x i32> @sext_16i1_16i32(<16 x i32> %a1, <16 x i32> %a2) nounwind {
; GENERIC-LABEL: sext_16i1_16i32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpcmpgtd %zmm0, %zmm1, %k0
; GENERIC-NEXT: vpmovm2d %k0, %zmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sext_16i1_16i32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpgtd %zmm0, %zmm1, %k0 # sched: [3:1.00]
; SKX-NEXT: vpmovm2d %k0, %zmm0
; SKX-NEXT: retq # sched: [7:1.00]
@@ -4509,13 +4509,13 @@ define <16 x i32> @sext_16i1_16i32(<16 x i32> %a1, <16 x i32> %a2) nounwind {
define <8 x i64> @sext_8i1_8i64(<8 x i32> %a1, <8 x i32> %a2) nounwind {
; GENERIC-LABEL: sext_8i1_8i64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpcmpgtd %ymm0, %ymm1, %k0
; GENERIC-NEXT: vpmovm2q %k0, %zmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: sext_8i1_8i64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpgtd %ymm0, %ymm1, %k0 # sched: [3:1.00]
; SKX-NEXT: vpmovm2q %k0, %zmm0
; SKX-NEXT: retq # sched: [7:1.00]
@@ -4526,14 +4526,14 @@ define <8 x i64> @sext_8i1_8i64(<8 x i32> %a1, <8 x i32> %a2) nounwind {
define void @extload_v8i64(<8 x i8>* %a, <8 x i64>* %res) {
; GENERIC-LABEL: extload_v8i64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovsxbq (%rdi), %zmm0
; GENERIC-NEXT: vmovdqa64 %zmm0, (%rsi)
; GENERIC-NEXT: vzeroupper
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: extload_v8i64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxbq (%rdi), %zmm0 # sched: [10:1.00]
; SKX-NEXT: vmovdqa64 %zmm0, (%rsi) # sched: [1:1.00]
; SKX-NEXT: vzeroupper # sched: [4:1.00]
@@ -4546,7 +4546,7 @@ define void @extload_v8i64(<8 x i8>* %a, <8 x i64>* %res) {
define <64 x i16> @test21(<64 x i16> %x , <64 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: test21:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $7, %zmm2, %zmm2 # sched: [3:1.00]
; GENERIC-NEXT: vpmovb2m %zmm2, %k1
; GENERIC-NEXT: vmovdqu16 %zmm0, %zmm0 {%k1} {z}
@@ -4555,7 +4555,7 @@ define <64 x i16> @test21(<64 x i16> %x , <64 x i1> %mask) nounwind readnone {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test21:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %zmm2, %zmm2 # sched: [1:0.50]
; SKX-NEXT: vpmovb2m %zmm2, %k1 # sched: [1:1.00]
; SKX-NEXT: vmovdqu16 %zmm0, %zmm0 {%k1} {z}
@@ -4568,12 +4568,12 @@ define <64 x i16> @test21(<64 x i16> %x , <64 x i1> %mask) nounwind readnone {
define <16 x i16> @shuffle_zext_16x8_to_16x16(<16 x i8> %a) nounwind readnone {
; GENERIC-LABEL: shuffle_zext_16x8_to_16x16:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: shuffle_zext_16x8_to_16x16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%1 = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <32 x i32> <i32 0, i32 16, i32 1, i32 16, i32 2, i32 16, i32 3, i32 16, i32 4, i32 16, i32 5, i32 16, i32 6, i32 16, i32 7, i32 16, i32 8, i32 16, i32 9, i32 16, i32 10, i32 16, i32 11, i32 16, i32 12, i32 16, i32 13, i32 16, i32 14, i32 16, i32 15, i32 16>
@@ -4583,14 +4583,14 @@ define <16 x i16> @shuffle_zext_16x8_to_16x16(<16 x i8> %a) nounwind readnone {
define <16 x i16> @shuffle_zext_16x8_to_16x16_mask(<16 x i8> %a, <16 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: shuffle_zext_16x8_to_16x16_mask:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $7, %xmm1, %xmm1 # sched: [1:1.00]
; GENERIC-NEXT: vpmovb2m %xmm1, %k1
; GENERIC-NEXT: vpmovzxbw {{.*#+}} ymm0 {%k1} {z} = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: shuffle_zext_16x8_to_16x16_mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm1, %xmm1 # sched: [1:0.50]
; SKX-NEXT: vpmovb2m %xmm1, %k1 # sched: [1:1.00]
; SKX-NEXT: vpmovzxbw {{.*#+}} ymm0 {%k1} {z} = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero sched: [3:1.00]
@@ -4603,12 +4603,12 @@ define <16 x i16> @shuffle_zext_16x8_to_16x16_mask(<16 x i8> %a, <16 x i1> %mask
define <16 x i16> @zext_32x8_to_16x16(<32 x i8> %a) {
; GENERIC-LABEL: zext_32x8_to_16x16:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_32x8_to_16x16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%1 = shufflevector <32 x i8> %a, <32 x i8> zeroinitializer, <32 x i32> <i32 0, i32 32, i32 1, i32 32, i32 2, i32 32, i32 3, i32 32, i32 4, i32 32, i32 5, i32 32, i32 6, i32 32, i32 7, i32 32, i32 8, i32 32, i32 9, i32 32, i32 10, i32 32, i32 11, i32 32, i32 12, i32 32, i32 13, i32 32, i32 14, i32 32, i32 15, i32 32>
@@ -4618,12 +4618,12 @@ define <16 x i16> @zext_32x8_to_16x16(<32 x i8> %a) {
define <8 x i32> @zext_32x8_to_8x32(<32 x i8> %a) {
; GENERIC-LABEL: zext_32x8_to_8x32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_32x8_to_8x32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%1 = shufflevector <32 x i8> %a, <32 x i8> zeroinitializer, <32 x i32> <i32 0, i32 32, i32 32, i32 32, i32 1, i32 32, i32 32, i32 32, i32 2, i32 32, i32 32, i32 32, i32 3, i32 32, i32 32, i32 32, i32 4, i32 32, i32 32, i32 32, i32 5, i32 32, i32 32, i32 32, i32 6, i32 32, i32 32, i32 32, i32 7, i32 32, i32 32, i32 32>
@@ -4633,12 +4633,12 @@ define <8 x i32> @zext_32x8_to_8x32(<32 x i8> %a) {
define <4 x i64> @zext_32x8_to_4x64(<32 x i8> %a) {
; GENERIC-LABEL: zext_32x8_to_4x64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_32x8_to_4x64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%1 = shufflevector <32 x i8> %a, <32 x i8> zeroinitializer, <32 x i32> <i32 0, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 1, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 2, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 3, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>
@@ -4648,12 +4648,12 @@ define <4 x i64> @zext_32x8_to_4x64(<32 x i8> %a) {
define <8 x i32> @zext_16x16_to_8x32(<16 x i16> %a) {
; GENERIC-LABEL: zext_16x16_to_8x32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_16x16_to_8x32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%1 = shufflevector <16 x i16> %a, <16 x i16> zeroinitializer, <16 x i32> <i32 0, i32 16, i32 1, i32 16, i32 2, i32 16, i32 3, i32 16, i32 4, i32 16, i32 5, i32 16, i32 6, i32 16, i32 7, i32 16>
@@ -4663,12 +4663,12 @@ define <8 x i32> @zext_16x16_to_8x32(<16 x i16> %a) {
define <4 x i64> @zext_16x16_to_4x64(<16 x i16> %a) {
; GENERIC-LABEL: zext_16x16_to_4x64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_16x16_to_4x64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%1 = shufflevector <16 x i16> %a, <16 x i16> zeroinitializer, <16 x i32> <i32 0, i32 16, i32 16, i32 16, i32 1, i32 16, i32 16, i32 16, i32 2, i32 16, i32 16, i32 16, i32 3, i32 16, i32 16, i32 16>
@@ -4678,12 +4678,12 @@ define <4 x i64> @zext_16x16_to_4x64(<16 x i16> %a) {
define <4 x i64> @zext_8x32_to_4x64(<8 x i32> %a) {
; GENERIC-LABEL: zext_8x32_to_4x64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_8x32_to_4x64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%1 = shufflevector <8 x i32> %a, <8 x i32> zeroinitializer, <8 x i32> <i32 0, i32 8, i32 1, i32 8, i32 2, i32 8, i32 3, i32 8>
@@ -4693,13 +4693,13 @@ define <4 x i64> @zext_8x32_to_4x64(<8 x i32> %a) {
define <64 x i8> @zext_64xi1_to_64xi8(<64 x i8> %x, <64 x i8> %y) #0 {
; GENERIC-LABEL: zext_64xi1_to_64xi8:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpcmpeqb %zmm1, %zmm0, %k1
; GENERIC-NEXT: vmovdqu8 {{.*}}(%rip), %zmm0 {%k1} {z} # sched: [4:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_64xi1_to_64xi8:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpeqb %zmm1, %zmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vmovdqu8 {{.*}}(%rip), %zmm0 {%k1} {z} # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -4710,13 +4710,13 @@ define <64 x i8> @zext_64xi1_to_64xi8(<64 x i8> %x, <64 x i8> %y) #0 {
define <32 x i16> @zext_32xi1_to_32xi16(<32 x i16> %x, <32 x i16> %y) #0 {
; GENERIC-LABEL: zext_32xi1_to_32xi16:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpcmpeqw %zmm1, %zmm0, %k1
; GENERIC-NEXT: vmovdqu16 {{.*}}(%rip), %zmm0 {%k1} {z} # sched: [4:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_32xi1_to_32xi16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpeqw %zmm1, %zmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vmovdqu16 {{.*}}(%rip), %zmm0 {%k1} {z} # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -4727,13 +4727,13 @@ define <32 x i16> @zext_32xi1_to_32xi16(<32 x i16> %x, <32 x i16> %y) #0 {
define <16 x i16> @zext_16xi1_to_16xi16(<16 x i16> %x, <16 x i16> %y) #0 {
; GENERIC-LABEL: zext_16xi1_to_16xi16:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpcmpeqw %ymm1, %ymm0, %k1
; GENERIC-NEXT: vmovdqu16 {{.*}}(%rip), %ymm0 {%k1} {z} # sched: [4:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_16xi1_to_16xi16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpeqw %ymm1, %ymm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vmovdqu16 {{.*}}(%rip), %ymm0 {%k1} {z} # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -4745,13 +4745,13 @@ define <16 x i16> @zext_16xi1_to_16xi16(<16 x i16> %x, <16 x i16> %y) #0 {
define <32 x i8> @zext_32xi1_to_32xi8(<32 x i16> %x, <32 x i16> %y) #0 {
; GENERIC-LABEL: zext_32xi1_to_32xi8:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpcmpeqw %zmm1, %zmm0, %k1
; GENERIC-NEXT: vmovdqu8 {{.*}}(%rip), %ymm0 {%k1} {z} # sched: [4:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_32xi1_to_32xi8:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpeqw %zmm1, %zmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vmovdqu8 {{.*}}(%rip), %ymm0 {%k1} {z} # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -4762,7 +4762,7 @@ define <32 x i8> @zext_32xi1_to_32xi8(<32 x i16> %x, <32 x i16> %y) #0 {
define <4 x i32> @zext_4xi1_to_4x32(<4 x i8> %x, <4 x i8> %y) #0 {
; GENERIC-LABEL: zext_4xi1_to_4x32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa {{.*#+}} xmm2 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] sched: [6:0.50]
; GENERIC-NEXT: vpand %xmm2, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpand %xmm2, %xmm0, %xmm0 # sched: [1:0.33]
@@ -4771,7 +4771,7 @@ define <4 x i32> @zext_4xi1_to_4x32(<4 x i8> %x, <4 x i8> %y) #0 {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_4xi1_to_4x32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa {{.*#+}} xmm2 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] sched: [6:0.50]
; SKX-NEXT: vpand %xmm2, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpand %xmm2, %xmm0, %xmm0 # sched: [1:0.33]
@@ -4785,7 +4785,7 @@ define <4 x i32> @zext_4xi1_to_4x32(<4 x i8> %x, <4 x i8> %y) #0 {
define <2 x i64> @zext_2xi1_to_2xi64(<2 x i8> %x, <2 x i8> %y) #0 {
; GENERIC-LABEL: zext_2xi1_to_2xi64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa {{.*#+}} xmm2 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0] sched: [6:0.50]
; GENERIC-NEXT: vpand %xmm2, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpand %xmm2, %xmm0, %xmm0 # sched: [1:0.33]
@@ -4794,7 +4794,7 @@ define <2 x i64> @zext_2xi1_to_2xi64(<2 x i8> %x, <2 x i8> %y) #0 {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_2xi1_to_2xi64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa {{.*#+}} xmm2 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0] sched: [6:0.50]
; SKX-NEXT: vpand %xmm2, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpand %xmm2, %xmm0, %xmm0 # sched: [1:0.33]
@@ -4808,13 +4808,13 @@ define <2 x i64> @zext_2xi1_to_2xi64(<2 x i8> %x, <2 x i8> %y) #0 {
define <16 x float> @test_x86_fmadd_ps_z(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
; GENERIC-LABEL: test_x86_fmadd_ps_z:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmulps %zmm1, %zmm0, %zmm0 # sched: [5:1.00]
; GENERIC-NEXT: vaddps %zmm2, %zmm0, %zmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_x86_fmadd_ps_z:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmulps %zmm1, %zmm0, %zmm0 # sched: [4:0.33]
; SKX-NEXT: vaddps %zmm2, %zmm0, %zmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -4825,13 +4825,13 @@ define <16 x float> @test_x86_fmadd_ps_z(<16 x float> %a0, <16 x float> %a1, <16
define <16 x float> @test_x86_fmsub_ps_z(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
; GENERIC-LABEL: test_x86_fmsub_ps_z:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmulps %zmm1, %zmm0, %zmm0 # sched: [5:1.00]
; GENERIC-NEXT: vsubps %zmm2, %zmm0, %zmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_x86_fmsub_ps_z:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmulps %zmm1, %zmm0, %zmm0 # sched: [4:0.33]
; SKX-NEXT: vsubps %zmm2, %zmm0, %zmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -4842,13 +4842,13 @@ define <16 x float> @test_x86_fmsub_ps_z(<16 x float> %a0, <16 x float> %a1, <16
define <16 x float> @test_x86_fnmadd_ps_z(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
; GENERIC-LABEL: test_x86_fnmadd_ps_z:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmulps %zmm1, %zmm0, %zmm0 # sched: [5:1.00]
; GENERIC-NEXT: vsubps %zmm0, %zmm2, %zmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_x86_fnmadd_ps_z:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmulps %zmm1, %zmm0, %zmm0 # sched: [4:0.33]
; SKX-NEXT: vsubps %zmm0, %zmm2, %zmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -4859,14 +4859,14 @@ define <16 x float> @test_x86_fnmadd_ps_z(<16 x float> %a0, <16 x float> %a1, <1
define <16 x float> @test_x86_fnmsub_ps_z(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
; GENERIC-LABEL: test_x86_fnmsub_ps_z:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmulps %zmm1, %zmm0, %zmm0 # sched: [5:1.00]
; GENERIC-NEXT: vxorps {{.*}}(%rip){1to16}, %zmm0, %zmm0 # sched: [7:1.00]
; GENERIC-NEXT: vsubps %zmm2, %zmm0, %zmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_x86_fnmsub_ps_z:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmulps %zmm1, %zmm0, %zmm0 # sched: [4:0.33]
; SKX-NEXT: vxorps {{.*}}(%rip){1to16}, %zmm0, %zmm0 # sched: [8:0.50]
; SKX-NEXT: vsubps %zmm2, %zmm0, %zmm0 # sched: [4:0.33]
@@ -4882,13 +4882,13 @@ define <16 x float> @test_x86_fnmsub_ps_z(<16 x float> %a0, <16 x float> %a1, <1
define <8 x double> @test_x86_fmadd_pd_z(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
; GENERIC-LABEL: test_x86_fmadd_pd_z:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmulpd %zmm1, %zmm0, %zmm0 # sched: [5:1.00]
; GENERIC-NEXT: vaddpd %zmm2, %zmm0, %zmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_x86_fmadd_pd_z:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmulpd %zmm1, %zmm0, %zmm0 # sched: [4:0.33]
; SKX-NEXT: vaddpd %zmm2, %zmm0, %zmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -4899,13 +4899,13 @@ define <8 x double> @test_x86_fmadd_pd_z(<8 x double> %a0, <8 x double> %a1, <8
define <8 x double> @test_x86_fmsub_pd_z(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
; GENERIC-LABEL: test_x86_fmsub_pd_z:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmulpd %zmm1, %zmm0, %zmm0 # sched: [5:1.00]
; GENERIC-NEXT: vsubpd %zmm2, %zmm0, %zmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_x86_fmsub_pd_z:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmulpd %zmm1, %zmm0, %zmm0 # sched: [4:0.33]
; SKX-NEXT: vsubpd %zmm2, %zmm0, %zmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -4916,13 +4916,13 @@ define <8 x double> @test_x86_fmsub_pd_z(<8 x double> %a0, <8 x double> %a1, <8
define double @test_x86_fmsub_213(double %a0, double %a1, double %a2) {
; GENERIC-LABEL: test_x86_fmsub_213:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmulsd %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
; GENERIC-NEXT: vsubsd %xmm2, %xmm0, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_x86_fmsub_213:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vsubsd %xmm2, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -4933,13 +4933,13 @@ define double @test_x86_fmsub_213(double %a0, double %a1, double %a2) {
define double @test_x86_fmsub_213_m(double %a0, double %a1, double * %a2_ptr) {
; GENERIC-LABEL: test_x86_fmsub_213_m:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmulsd %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
; GENERIC-NEXT: vsubsd (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_x86_fmsub_213_m:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vsubsd (%rdi), %xmm0, %xmm0 # sched: [9:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -4951,13 +4951,13 @@ define double @test_x86_fmsub_213_m(double %a0, double %a1, double * %a2_ptr) {
define double @test_x86_fmsub_231_m(double %a0, double %a1, double * %a2_ptr) {
; GENERIC-LABEL: test_x86_fmsub_231_m:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmulsd (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
; GENERIC-NEXT: vsubsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_x86_fmsub_231_m:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmulsd (%rdi), %xmm0, %xmm0 # sched: [9:0.50]
; SKX-NEXT: vsubsd %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -4969,13 +4969,13 @@ define double @test_x86_fmsub_231_m(double %a0, double %a1, double * %a2_ptr) {
define <16 x float> @test231_br(<16 x float> %a1, <16 x float> %a2) nounwind {
; GENERIC-LABEL: test231_br:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmulps {{.*}}(%rip){1to16}, %zmm0, %zmm0 # sched: [9:1.00]
; GENERIC-NEXT: vaddps %zmm1, %zmm0, %zmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test231_br:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmulps {{.*}}(%rip){1to16}, %zmm0, %zmm0 # sched: [11:0.50]
; SKX-NEXT: vaddps %zmm1, %zmm0, %zmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -4986,13 +4986,13 @@ define <16 x float> @test231_br(<16 x float> %a1, <16 x float> %a2) nounwind {
define <16 x float> @test213_br(<16 x float> %a1, <16 x float> %a2) nounwind {
; GENERIC-LABEL: test213_br:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmulps %zmm1, %zmm0, %zmm0 # sched: [5:1.00]
; GENERIC-NEXT: vaddps {{.*}}(%rip){1to16}, %zmm0, %zmm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test213_br:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmulps %zmm1, %zmm0, %zmm0 # sched: [4:0.33]
; SKX-NEXT: vaddps {{.*}}(%rip){1to16}, %zmm0, %zmm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -5004,7 +5004,7 @@ define <16 x float> @test213_br(<16 x float> %a1, <16 x float> %a2) nounwind {
;mask (a*c+b , a)
define <16 x float> @test_x86_fmadd132_ps(<16 x float> %a0, <16 x float> %a1, <16 x float> *%a2_ptrt, <16 x i1> %mask) {
; GENERIC-LABEL: test_x86_fmadd132_ps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $7, %xmm2, %xmm2 # sched: [1:1.00]
; GENERIC-NEXT: vpmovb2m %xmm2, %k1
; GENERIC-NEXT: vmulps (%rdi), %zmm0, %zmm2 # sched: [9:1.00]
@@ -5012,7 +5012,7 @@ define <16 x float> @test_x86_fmadd132_ps(<16 x float> %a0, <16 x float> %a1, <1
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_x86_fmadd132_ps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm2, %xmm2 # sched: [1:0.50]
; SKX-NEXT: vpmovb2m %xmm2, %k1 # sched: [1:1.00]
; SKX-NEXT: vmulps (%rdi), %zmm0, %zmm2 # sched: [11:0.50]
@@ -5028,7 +5028,7 @@ define <16 x float> @test_x86_fmadd132_ps(<16 x float> %a0, <16 x float> %a1, <1
;mask (a*c+b , b)
define <16 x float> @test_x86_fmadd231_ps(<16 x float> %a0, <16 x float> %a1, <16 x float> *%a2_ptrt, <16 x i1> %mask) {
; GENERIC-LABEL: test_x86_fmadd231_ps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $7, %xmm2, %xmm2 # sched: [1:1.00]
; GENERIC-NEXT: vpmovb2m %xmm2, %k1
; GENERIC-NEXT: vmulps (%rdi), %zmm0, %zmm0 # sched: [9:1.00]
@@ -5037,7 +5037,7 @@ define <16 x float> @test_x86_fmadd231_ps(<16 x float> %a0, <16 x float> %a1, <1
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_x86_fmadd231_ps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm2, %xmm2 # sched: [1:0.50]
; SKX-NEXT: vpmovb2m %xmm2, %k1 # sched: [1:1.00]
; SKX-NEXT: vmulps (%rdi), %zmm0, %zmm0 # sched: [11:0.50]
@@ -5054,7 +5054,7 @@ define <16 x float> @test_x86_fmadd231_ps(<16 x float> %a0, <16 x float> %a1, <1
;mask (b*a+c , b)
define <16 x float> @test_x86_fmadd213_ps(<16 x float> %a0, <16 x float> %a1, <16 x float> *%a2_ptrt, <16 x i1> %mask) {
; GENERIC-LABEL: test_x86_fmadd213_ps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $7, %xmm2, %xmm2 # sched: [1:1.00]
; GENERIC-NEXT: vpmovb2m %xmm2, %k1
; GENERIC-NEXT: vmulps %zmm0, %zmm1, %zmm0 # sched: [5:1.00]
@@ -5063,7 +5063,7 @@ define <16 x float> @test_x86_fmadd213_ps(<16 x float> %a0, <16 x float> %a1, <1
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_x86_fmadd213_ps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm2, %xmm2 # sched: [1:0.50]
; SKX-NEXT: vpmovb2m %xmm2, %k1 # sched: [1:1.00]
; SKX-NEXT: vmulps %zmm0, %zmm1, %zmm0 # sched: [4:0.33]
@@ -5079,13 +5079,13 @@ define <16 x float> @test_x86_fmadd213_ps(<16 x float> %a0, <16 x float> %a1, <1
define <16 x i32> @vpandd(<16 x i32> %a, <16 x i32> %b) nounwind uwtable readnone ssp {
; GENERIC-LABEL: vpandd:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 # sched: [7:1.00]
; GENERIC-NEXT: vpandq %zmm1, %zmm0, %zmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: vpandd:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 # sched: [8:0.50]
; SKX-NEXT: vpandq %zmm1, %zmm0, %zmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -5099,13 +5099,13 @@ entry:
define <16 x i32> @vpandnd(<16 x i32> %a, <16 x i32> %b) nounwind uwtable readnone ssp {
; GENERIC-LABEL: vpandnd:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 # sched: [7:1.00]
; GENERIC-NEXT: vpandnq %zmm0, %zmm1, %zmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: vpandnd:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 # sched: [8:0.50]
; SKX-NEXT: vpandnq %zmm0, %zmm1, %zmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -5121,13 +5121,13 @@ entry:
define <16 x i32> @vpord(<16 x i32> %a, <16 x i32> %b) nounwind uwtable readnone ssp {
; GENERIC-LABEL: vpord:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 # sched: [7:1.00]
; GENERIC-NEXT: vporq %zmm1, %zmm0, %zmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: vpord:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 # sched: [8:0.50]
; SKX-NEXT: vporq %zmm1, %zmm0, %zmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -5141,13 +5141,13 @@ entry:
define <16 x i32> @vpxord(<16 x i32> %a, <16 x i32> %b) nounwind uwtable readnone ssp {
; GENERIC-LABEL: vpxord:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 # sched: [7:1.00]
; GENERIC-NEXT: vpxorq %zmm1, %zmm0, %zmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: vpxord:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 # sched: [8:0.50]
; SKX-NEXT: vpxorq %zmm1, %zmm0, %zmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -5161,13 +5161,13 @@ entry:
define <8 x i64> @vpandq(<8 x i64> %a, <8 x i64> %b) nounwind uwtable readnone ssp {
; GENERIC-LABEL: vpandq:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0 # sched: [7:1.00]
; GENERIC-NEXT: vpandq %zmm1, %zmm0, %zmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: vpandq:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0 # sched: [8:0.50]
; SKX-NEXT: vpandq %zmm1, %zmm0, %zmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -5180,13 +5180,13 @@ entry:
define <8 x i64> @vpandnq(<8 x i64> %a, <8 x i64> %b) nounwind uwtable readnone ssp {
; GENERIC-LABEL: vpandnq:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0 # sched: [7:1.00]
; GENERIC-NEXT: vpandnq %zmm0, %zmm1, %zmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: vpandnq:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0 # sched: [8:0.50]
; SKX-NEXT: vpandnq %zmm0, %zmm1, %zmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -5200,13 +5200,13 @@ entry:
define <8 x i64> @vporq(<8 x i64> %a, <8 x i64> %b) nounwind uwtable readnone ssp {
; GENERIC-LABEL: vporq:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0 # sched: [7:1.00]
; GENERIC-NEXT: vporq %zmm1, %zmm0, %zmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: vporq:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0 # sched: [8:0.50]
; SKX-NEXT: vporq %zmm1, %zmm0, %zmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -5219,13 +5219,13 @@ entry:
define <8 x i64> @vpxorq(<8 x i64> %a, <8 x i64> %b) nounwind uwtable readnone ssp {
; GENERIC-LABEL: vpxorq:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0 # sched: [7:1.00]
; GENERIC-NEXT: vpxorq %zmm1, %zmm0, %zmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: vpxorq:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0 # sched: [8:0.50]
; SKX-NEXT: vpxorq %zmm1, %zmm0, %zmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -5238,12 +5238,12 @@ entry:
define <64 x i8> @and_v64i8(<64 x i8> %a, <64 x i8> %b) {
; GENERIC-LABEL: and_v64i8:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vandps %zmm1, %zmm0, %zmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: and_v64i8:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vandps %zmm1, %zmm0, %zmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
%res = and <64 x i8> %a, %b
@@ -5252,12 +5252,12 @@ define <64 x i8> @and_v64i8(<64 x i8> %a, <64 x i8> %b) {
define <64 x i8> @andn_v64i8(<64 x i8> %a, <64 x i8> %b) {
; GENERIC-LABEL: andn_v64i8:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vandnps %zmm0, %zmm1, %zmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: andn_v64i8:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vandnps %zmm0, %zmm1, %zmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
%b2 = xor <64 x i8> %b, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1,
@@ -5270,12 +5270,12 @@ define <64 x i8> @andn_v64i8(<64 x i8> %a, <64 x i8> %b) {
define <64 x i8> @or_v64i8(<64 x i8> %a, <64 x i8> %b) {
; GENERIC-LABEL: or_v64i8:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vorps %zmm1, %zmm0, %zmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: or_v64i8:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vorps %zmm1, %zmm0, %zmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
%res = or <64 x i8> %a, %b
@@ -5284,12 +5284,12 @@ define <64 x i8> @or_v64i8(<64 x i8> %a, <64 x i8> %b) {
define <64 x i8> @xor_v64i8(<64 x i8> %a, <64 x i8> %b) {
; GENERIC-LABEL: xor_v64i8:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vxorps %zmm1, %zmm0, %zmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: xor_v64i8:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vxorps %zmm1, %zmm0, %zmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
%res = xor <64 x i8> %a, %b
@@ -5298,12 +5298,12 @@ define <64 x i8> @xor_v64i8(<64 x i8> %a, <64 x i8> %b) {
define <32 x i16> @and_v32i16(<32 x i16> %a, <32 x i16> %b) {
; GENERIC-LABEL: and_v32i16:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vandps %zmm1, %zmm0, %zmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: and_v32i16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vandps %zmm1, %zmm0, %zmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
%res = and <32 x i16> %a, %b
@@ -5312,12 +5312,12 @@ define <32 x i16> @and_v32i16(<32 x i16> %a, <32 x i16> %b) {
define <32 x i16> @andn_v32i16(<32 x i16> %a, <32 x i16> %b) {
; GENERIC-LABEL: andn_v32i16:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vandnps %zmm0, %zmm1, %zmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: andn_v32i16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vandnps %zmm0, %zmm1, %zmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
%b2 = xor <32 x i16> %b, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1,
@@ -5328,12 +5328,12 @@ define <32 x i16> @andn_v32i16(<32 x i16> %a, <32 x i16> %b) {
define <32 x i16> @or_v32i16(<32 x i16> %a, <32 x i16> %b) {
; GENERIC-LABEL: or_v32i16:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vorps %zmm1, %zmm0, %zmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: or_v32i16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vorps %zmm1, %zmm0, %zmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
%res = or <32 x i16> %a, %b
@@ -5342,12 +5342,12 @@ define <32 x i16> @or_v32i16(<32 x i16> %a, <32 x i16> %b) {
define <32 x i16> @xor_v32i16(<32 x i16> %a, <32 x i16> %b) {
; GENERIC-LABEL: xor_v32i16:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vxorps %zmm1, %zmm0, %zmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: xor_v32i16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vxorps %zmm1, %zmm0, %zmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
%res = xor <32 x i16> %a, %b
@@ -5356,14 +5356,14 @@ define <32 x i16> @xor_v32i16(<32 x i16> %a, <32 x i16> %b) {
define <16 x float> @masked_and_v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %passThru, i16 %mask, <16 x float> %c) {
; GENERIC-LABEL: masked_and_v16f32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: kmovd %edi, %k1
; GENERIC-NEXT: vandps %zmm1, %zmm0, %zmm2 {%k1} # sched: [3:1.00]
; GENERIC-NEXT: vaddps %zmm2, %zmm3, %zmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: masked_and_v16f32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovd %edi, %k1 # sched: [1:1.00]
; SKX-NEXT: vandps %zmm1, %zmm0, %zmm2 {%k1} # sched: [1:0.33]
; SKX-NEXT: vaddps %zmm2, %zmm3, %zmm0 # sched: [4:0.33]
@@ -5381,14 +5381,14 @@ define <16 x float> @masked_and_v16f32(<16 x float> %a, <16 x float> %b, <16 x f
define <16 x float> @masked_or_v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %passThru, i16 %mask, <16 x float> %c) {
; GENERIC-LABEL: masked_or_v16f32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: kmovd %edi, %k1
; GENERIC-NEXT: vandps %zmm1, %zmm0, %zmm2 {%k1} # sched: [3:1.00]
; GENERIC-NEXT: vaddps %zmm2, %zmm3, %zmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: masked_or_v16f32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovd %edi, %k1 # sched: [1:1.00]
; SKX-NEXT: vandps %zmm1, %zmm0, %zmm2 {%k1} # sched: [1:0.33]
; SKX-NEXT: vaddps %zmm2, %zmm3, %zmm0 # sched: [4:0.33]
@@ -5406,14 +5406,14 @@ define <16 x float> @masked_or_v16f32(<16 x float> %a, <16 x float> %b, <16 x fl
define <16 x float> @masked_xor_v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %passThru, i16 %mask, <16 x float> %c) {
; GENERIC-LABEL: masked_xor_v16f32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: kmovd %edi, %k1
; GENERIC-NEXT: vandps %zmm1, %zmm0, %zmm2 {%k1} # sched: [3:1.00]
; GENERIC-NEXT: vaddps %zmm2, %zmm3, %zmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: masked_xor_v16f32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovd %edi, %k1 # sched: [1:1.00]
; SKX-NEXT: vandps %zmm1, %zmm0, %zmm2 {%k1} # sched: [1:0.33]
; SKX-NEXT: vaddps %zmm2, %zmm3, %zmm0 # sched: [4:0.33]
@@ -5431,14 +5431,14 @@ define <16 x float> @masked_xor_v16f32(<16 x float> %a, <16 x float> %b, <16 x f
define <8 x double> @masked_and_v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %passThru, i8 %mask, <8 x double> %c) {
; GENERIC-LABEL: masked_and_v8f64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: kmovd %edi, %k1
; GENERIC-NEXT: vandpd %zmm1, %zmm0, %zmm2 {%k1} # sched: [3:1.00]
; GENERIC-NEXT: vaddpd %zmm2, %zmm3, %zmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: masked_and_v8f64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovd %edi, %k1 # sched: [1:1.00]
; SKX-NEXT: vandpd %zmm1, %zmm0, %zmm2 {%k1} # sched: [1:0.33]
; SKX-NEXT: vaddpd %zmm2, %zmm3, %zmm0 # sched: [4:0.33]
@@ -5456,14 +5456,14 @@ define <8 x double> @masked_and_v8f64(<8 x double> %a, <8 x double> %b, <8 x dou
define <8 x double> @masked_or_v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %passThru, i8 %mask, <8 x double> %c) {
; GENERIC-LABEL: masked_or_v8f64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: kmovd %edi, %k1
; GENERIC-NEXT: vandpd %zmm1, %zmm0, %zmm2 {%k1} # sched: [3:1.00]
; GENERIC-NEXT: vaddpd %zmm2, %zmm3, %zmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: masked_or_v8f64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovd %edi, %k1 # sched: [1:1.00]
; SKX-NEXT: vandpd %zmm1, %zmm0, %zmm2 {%k1} # sched: [1:0.33]
; SKX-NEXT: vaddpd %zmm2, %zmm3, %zmm0 # sched: [4:0.33]
@@ -5481,14 +5481,14 @@ define <8 x double> @masked_or_v8f64(<8 x double> %a, <8 x double> %b, <8 x doub
define <8 x double> @masked_xor_v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %passThru, i8 %mask, <8 x double> %c) {
; GENERIC-LABEL: masked_xor_v8f64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: kmovd %edi, %k1
; GENERIC-NEXT: vandpd %zmm1, %zmm0, %zmm2 {%k1} # sched: [3:1.00]
; GENERIC-NEXT: vaddpd %zmm2, %zmm3, %zmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: masked_xor_v8f64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovd %edi, %k1 # sched: [1:1.00]
; SKX-NEXT: vandpd %zmm1, %zmm0, %zmm2 {%k1} # sched: [1:0.33]
; SKX-NEXT: vaddpd %zmm2, %zmm3, %zmm0 # sched: [4:0.33]
@@ -5506,13 +5506,13 @@ define <8 x double> @masked_xor_v8f64(<8 x double> %a, <8 x double> %b, <8 x dou
define <8 x i64> @test_mm512_mask_and_epi32(<8 x i64> %__src, i16 zeroext %__k, <8 x i64> %__a, <8 x i64> %__b) {
; GENERIC-LABEL: test_mm512_mask_and_epi32:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: kmovd %edi, %k1
; GENERIC-NEXT: vandps %zmm2, %zmm1, %zmm0 {%k1} # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_mm512_mask_and_epi32:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: kmovd %edi, %k1 # sched: [1:1.00]
; SKX-NEXT: vandps %zmm2, %zmm1, %zmm0 {%k1} # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -5528,13 +5528,13 @@ entry:
define <8 x i64> @test_mm512_mask_or_epi32(<8 x i64> %__src, i16 zeroext %__k, <8 x i64> %__a, <8 x i64> %__b) {
; GENERIC-LABEL: test_mm512_mask_or_epi32:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: kmovd %edi, %k1
; GENERIC-NEXT: vorps %zmm2, %zmm1, %zmm0 {%k1} # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_mm512_mask_or_epi32:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: kmovd %edi, %k1 # sched: [1:1.00]
; SKX-NEXT: vorps %zmm2, %zmm1, %zmm0 {%k1} # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -5550,13 +5550,13 @@ entry:
define <8 x i64> @test_mm512_mask_xor_epi32(<8 x i64> %__src, i16 zeroext %__k, <8 x i64> %__a, <8 x i64> %__b) {
; GENERIC-LABEL: test_mm512_mask_xor_epi32:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: kmovd %edi, %k1
; GENERIC-NEXT: vxorps %zmm2, %zmm1, %zmm0 {%k1} # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_mm512_mask_xor_epi32:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: kmovd %edi, %k1 # sched: [1:1.00]
; SKX-NEXT: vxorps %zmm2, %zmm1, %zmm0 {%k1} # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -5572,13 +5572,13 @@ entry:
define <8 x double> @test_mm512_mask_xor_pd(<8 x double> %__W, i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B) {
; GENERIC-LABEL: test_mm512_mask_xor_pd:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: kmovd %edi, %k1
; GENERIC-NEXT: vxorpd %zmm2, %zmm1, %zmm0 {%k1} # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_mm512_mask_xor_pd:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: kmovd %edi, %k1 # sched: [1:1.00]
; SKX-NEXT: vxorpd %zmm2, %zmm1, %zmm0 {%k1} # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -5594,13 +5594,13 @@ entry:
define <8 x double> @test_mm512_maskz_xor_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B) {
; GENERIC-LABEL: test_mm512_maskz_xor_pd:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: kmovd %edi, %k1
; GENERIC-NEXT: vxorpd %zmm1, %zmm0, %zmm0 {%k1} {z} # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_mm512_maskz_xor_pd:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: kmovd %edi, %k1 # sched: [1:1.00]
; SKX-NEXT: vxorpd %zmm1, %zmm0, %zmm0 {%k1} {z} # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -5616,13 +5616,13 @@ entry:
define <16 x float> @test_mm512_mask_xor_ps(<16 x float> %__W, i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
; GENERIC-LABEL: test_mm512_mask_xor_ps:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: kmovd %edi, %k1
; GENERIC-NEXT: vxorps %zmm2, %zmm1, %zmm0 {%k1} # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_mm512_mask_xor_ps:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: kmovd %edi, %k1 # sched: [1:1.00]
; SKX-NEXT: vxorps %zmm2, %zmm1, %zmm0 {%k1} # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -5638,13 +5638,13 @@ entry:
define <16 x float> @test_mm512_maskz_xor_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
; GENERIC-LABEL: test_mm512_maskz_xor_ps:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: kmovd %edi, %k1
; GENERIC-NEXT: vxorps %zmm1, %zmm0, %zmm0 {%k1} {z} # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_mm512_maskz_xor_ps:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: kmovd %edi, %k1 # sched: [1:1.00]
; SKX-NEXT: vxorps %zmm1, %zmm0, %zmm0 {%k1} {z} # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -5660,13 +5660,13 @@ entry:
define <8 x double> @test_mm512_mask_or_pd(<8 x double> %__W, i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B) {
; GENERIC-LABEL: test_mm512_mask_or_pd:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: kmovd %edi, %k1
; GENERIC-NEXT: vorpd %zmm1, %zmm2, %zmm0 {%k1} # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_mm512_mask_or_pd:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: kmovd %edi, %k1 # sched: [1:1.00]
; SKX-NEXT: vorpd %zmm1, %zmm2, %zmm0 {%k1} # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -5682,13 +5682,13 @@ entry:
define <8 x double> @test_mm512_maskz_or_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B) {
; GENERIC-LABEL: test_mm512_maskz_or_pd:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: kmovd %edi, %k1
; GENERIC-NEXT: vorpd %zmm0, %zmm1, %zmm0 {%k1} {z} # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_mm512_maskz_or_pd:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: kmovd %edi, %k1 # sched: [1:1.00]
; SKX-NEXT: vorpd %zmm0, %zmm1, %zmm0 {%k1} {z} # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -5704,13 +5704,13 @@ entry:
define <16 x float> @test_mm512_mask_or_ps(<16 x float> %__W, i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
; GENERIC-LABEL: test_mm512_mask_or_ps:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: kmovd %edi, %k1
; GENERIC-NEXT: vorps %zmm1, %zmm2, %zmm0 {%k1} # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_mm512_mask_or_ps:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: kmovd %edi, %k1 # sched: [1:1.00]
; SKX-NEXT: vorps %zmm1, %zmm2, %zmm0 {%k1} # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -5726,13 +5726,13 @@ entry:
define <16 x float> @test_mm512_maskz_or_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
; GENERIC-LABEL: test_mm512_maskz_or_ps:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: kmovd %edi, %k1
; GENERIC-NEXT: vorps %zmm0, %zmm1, %zmm0 {%k1} {z} # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_mm512_maskz_or_ps:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: kmovd %edi, %k1 # sched: [1:1.00]
; SKX-NEXT: vorps %zmm0, %zmm1, %zmm0 {%k1} {z} # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -5748,13 +5748,13 @@ entry:
define <8 x double> @test_mm512_mask_and_pd(<8 x double> %__W, i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B) {
; GENERIC-LABEL: test_mm512_mask_and_pd:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: kmovd %edi, %k1
; GENERIC-NEXT: vandpd %zmm1, %zmm2, %zmm0 {%k1} # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_mm512_mask_and_pd:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: kmovd %edi, %k1 # sched: [1:1.00]
; SKX-NEXT: vandpd %zmm1, %zmm2, %zmm0 {%k1} # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -5770,13 +5770,13 @@ entry:
define <8 x double> @test_mm512_maskz_and_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B) {
; GENERIC-LABEL: test_mm512_maskz_and_pd:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: kmovd %edi, %k1
; GENERIC-NEXT: vandpd %zmm0, %zmm1, %zmm0 {%k1} {z} # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_mm512_maskz_and_pd:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: kmovd %edi, %k1 # sched: [1:1.00]
; SKX-NEXT: vandpd %zmm0, %zmm1, %zmm0 {%k1} {z} # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -5792,13 +5792,13 @@ entry:
define <16 x float> @test_mm512_mask_and_ps(<16 x float> %__W, i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
; GENERIC-LABEL: test_mm512_mask_and_ps:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: kmovd %edi, %k1
; GENERIC-NEXT: vandps %zmm1, %zmm2, %zmm0 {%k1} # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_mm512_mask_and_ps:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: kmovd %edi, %k1 # sched: [1:1.00]
; SKX-NEXT: vandps %zmm1, %zmm2, %zmm0 {%k1} # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -5814,13 +5814,13 @@ entry:
define <16 x float> @test_mm512_maskz_and_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
; GENERIC-LABEL: test_mm512_maskz_and_ps:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: kmovd %edi, %k1
; GENERIC-NEXT: vandps %zmm0, %zmm1, %zmm0 {%k1} {z} # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_mm512_maskz_and_ps:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: kmovd %edi, %k1 # sched: [1:1.00]
; SKX-NEXT: vandps %zmm0, %zmm1, %zmm0 {%k1} {z} # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -5836,13 +5836,13 @@ entry:
define <8 x double> @test_mm512_mask_andnot_pd(<8 x double> %__W, i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B) {
; GENERIC-LABEL: test_mm512_mask_andnot_pd:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: kmovd %edi, %k1
; GENERIC-NEXT: vandnpd %zmm2, %zmm1, %zmm0 {%k1} # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_mm512_mask_andnot_pd:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: kmovd %edi, %k1 # sched: [1:1.00]
; SKX-NEXT: vandnpd %zmm2, %zmm1, %zmm0 {%k1} # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -5859,13 +5859,13 @@ entry:
define <8 x double> @test_mm512_maskz_andnot_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B) {
; GENERIC-LABEL: test_mm512_maskz_andnot_pd:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: kmovd %edi, %k1
; GENERIC-NEXT: vandnpd %zmm1, %zmm0, %zmm0 {%k1} {z} # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_mm512_maskz_andnot_pd:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: kmovd %edi, %k1 # sched: [1:1.00]
; SKX-NEXT: vandnpd %zmm1, %zmm0, %zmm0 {%k1} {z} # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -5882,13 +5882,13 @@ entry:
define <16 x float> @test_mm512_mask_andnot_ps(<16 x float> %__W, i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
; GENERIC-LABEL: test_mm512_mask_andnot_ps:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: kmovd %edi, %k1
; GENERIC-NEXT: vandnps %zmm2, %zmm1, %zmm0 {%k1} # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_mm512_mask_andnot_ps:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: kmovd %edi, %k1 # sched: [1:1.00]
; SKX-NEXT: vandnps %zmm2, %zmm1, %zmm0 {%k1} # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -5905,13 +5905,13 @@ entry:
define <16 x float> @test_mm512_maskz_andnot_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
; GENERIC-LABEL: test_mm512_maskz_andnot_ps:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: kmovd %edi, %k1
; GENERIC-NEXT: vandnps %zmm1, %zmm0, %zmm0 {%k1} {z} # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_mm512_maskz_andnot_ps:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: kmovd %edi, %k1 # sched: [1:1.00]
; SKX-NEXT: vandnps %zmm1, %zmm0, %zmm0 {%k1} {z} # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -5928,12 +5928,12 @@ entry:
define i32 @mov_test1(float %x) {
; GENERIC-LABEL: mov_test1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovd %xmm0, %eax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovd %xmm0, %eax # sched: [1:0.25]
; SKX-NEXT: retq # sched: [7:1.00]
%res = bitcast float %x to i32
@@ -5942,12 +5942,12 @@ define i32 @mov_test1(float %x) {
define <4 x i32> @mov_test2(i32 %x) {
; GENERIC-LABEL: mov_test2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovd %edi, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovd %edi, %xmm0 # sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = insertelement <4 x i32>undef, i32 %x, i32 0
@@ -5956,12 +5956,12 @@ define <4 x i32> @mov_test2(i32 %x) {
define <2 x i64> @mov_test3(i64 %x) {
; GENERIC-LABEL: mov_test3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovq %rdi, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovq %rdi, %xmm0 # sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = insertelement <2 x i64>undef, i64 %x, i32 0
@@ -5970,12 +5970,12 @@ define <2 x i64> @mov_test3(i64 %x) {
define <4 x i32> @mov_test4(i32* %x) {
; GENERIC-LABEL: mov_test4:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [6:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test4:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [5:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
%y = load i32, i32* %x
@@ -5985,12 +5985,12 @@ define <4 x i32> @mov_test4(i32* %x) {
define void @mov_test5(float %x, float* %y) {
; GENERIC-LABEL: mov_test5:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovss %xmm0, (%rdi) # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test5:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovss %xmm0, (%rdi) # sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
store float %x, float* %y, align 4
@@ -5999,12 +5999,12 @@ define void @mov_test5(float %x, float* %y) {
define void @mov_test6(double %x, double* %y) {
; GENERIC-LABEL: mov_test6:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovsd %xmm0, (%rdi) # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test6:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovsd %xmm0, (%rdi) # sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
store double %x, double* %y, align 8
@@ -6013,12 +6013,12 @@ define void @mov_test6(double %x, double* %y) {
define float @mov_test7(i32* %x) {
; GENERIC-LABEL: mov_test7:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [6:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test7:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [5:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
%y = load i32, i32* %x
@@ -6028,12 +6028,12 @@ define float @mov_test7(i32* %x) {
define i32 @mov_test8(<4 x i32> %x) {
; GENERIC-LABEL: mov_test8:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovd %xmm0, %eax # sched: [2:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test8:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovd %xmm0, %eax # sched: [2:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = extractelement <4 x i32> %x, i32 0
@@ -6042,12 +6042,12 @@ define i32 @mov_test8(<4 x i32> %x) {
define i64 @mov_test9(<2 x i64> %x) {
; GENERIC-LABEL: mov_test9:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovq %xmm0, %rax # sched: [2:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test9:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovq %xmm0, %rax # sched: [2:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = extractelement <2 x i64> %x, i32 0
@@ -6056,12 +6056,12 @@ define i64 @mov_test9(<2 x i64> %x) {
define <4 x i32> @mov_test10(i32* %x) {
; GENERIC-LABEL: mov_test10:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [6:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test10:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [5:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
%y = load i32, i32* %x, align 4
@@ -6071,12 +6071,12 @@ define <4 x i32> @mov_test10(i32* %x) {
define <4 x float> @mov_test11(float* %x) {
; GENERIC-LABEL: mov_test11:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [6:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test11:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [5:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
%y = load float, float* %x, align 4
@@ -6086,12 +6086,12 @@ define <4 x float> @mov_test11(float* %x) {
define <2 x double> @mov_test12(double* %x) {
; GENERIC-LABEL: mov_test12:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero sched: [6:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test12:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero sched: [5:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
%y = load double, double* %x, align 8
@@ -6101,12 +6101,12 @@ define <2 x double> @mov_test12(double* %x) {
define <2 x i64> @mov_test13(i64 %x) {
; GENERIC-LABEL: mov_test13:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovq %rdi, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test13:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovq %rdi, %xmm0 # sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = insertelement <2 x i64>zeroinitializer, i64 %x, i32 0
@@ -6115,12 +6115,12 @@ define <2 x i64> @mov_test13(i64 %x) {
define <4 x i32> @mov_test14(i32 %x) {
; GENERIC-LABEL: mov_test14:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovd %edi, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test14:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovd %edi, %xmm0 # sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = insertelement <4 x i32>zeroinitializer, i32 %x, i32 0
@@ -6129,12 +6129,12 @@ define <4 x i32> @mov_test14(i32 %x) {
define <4 x i32> @mov_test15(i32* %x) {
; GENERIC-LABEL: mov_test15:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [6:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test15:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [5:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
%y = load i32, i32* %x, align 4
@@ -6144,12 +6144,12 @@ define <4 x i32> @mov_test15(i32* %x) {
define <16 x i32> @mov_test16(i8 * %addr) {
; GENERIC-LABEL: mov_test16:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovups (%rdi), %zmm0 # sched: [4:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovups (%rdi), %zmm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
%vaddr = bitcast i8* %addr to <16 x i32>*
@@ -6159,12 +6159,12 @@ define <16 x i32> @mov_test16(i8 * %addr) {
define <16 x i32> @mov_test17(i8 * %addr) {
; GENERIC-LABEL: mov_test17:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps (%rdi), %zmm0 # sched: [4:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test17:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps (%rdi), %zmm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
%vaddr = bitcast i8* %addr to <16 x i32>*
@@ -6174,13 +6174,13 @@ define <16 x i32> @mov_test17(i8 * %addr) {
define void @mov_test18(i8 * %addr, <8 x i64> %data) {
; GENERIC-LABEL: mov_test18:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps %zmm0, (%rdi)
; GENERIC-NEXT: vzeroupper
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test18:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps %zmm0, (%rdi) # sched: [1:1.00]
; SKX-NEXT: vzeroupper # sched: [4:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -6191,13 +6191,13 @@ define void @mov_test18(i8 * %addr, <8 x i64> %data) {
define void @mov_test19(i8 * %addr, <16 x i32> %data) {
; GENERIC-LABEL: mov_test19:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovups %zmm0, (%rdi)
; GENERIC-NEXT: vzeroupper
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test19:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovups %zmm0, (%rdi) # sched: [1:1.00]
; SKX-NEXT: vzeroupper # sched: [4:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -6208,13 +6208,13 @@ define void @mov_test19(i8 * %addr, <16 x i32> %data) {
define void @mov_test20(i8 * %addr, <16 x i32> %data) {
; GENERIC-LABEL: mov_test20:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps %zmm0, (%rdi)
; GENERIC-NEXT: vzeroupper
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test20:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps %zmm0, (%rdi) # sched: [1:1.00]
; SKX-NEXT: vzeroupper # sched: [4:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -6225,12 +6225,12 @@ define void @mov_test20(i8 * %addr, <16 x i32> %data) {
define <8 x i64> @mov_test21(i8 * %addr) {
; GENERIC-LABEL: mov_test21:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps (%rdi), %zmm0 # sched: [4:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test21:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps (%rdi), %zmm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
%vaddr = bitcast i8* %addr to <8 x i64>*
@@ -6240,13 +6240,13 @@ define <8 x i64> @mov_test21(i8 * %addr) {
define void @mov_test22(i8 * %addr, <8 x i64> %data) {
; GENERIC-LABEL: mov_test22:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovups %zmm0, (%rdi)
; GENERIC-NEXT: vzeroupper
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test22:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovups %zmm0, (%rdi) # sched: [1:1.00]
; SKX-NEXT: vzeroupper # sched: [4:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -6257,12 +6257,12 @@ define void @mov_test22(i8 * %addr, <8 x i64> %data) {
define <8 x i64> @mov_test23(i8 * %addr) {
; GENERIC-LABEL: mov_test23:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovups (%rdi), %zmm0 # sched: [4:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test23:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovups (%rdi), %zmm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
%vaddr = bitcast i8* %addr to <8 x i64>*
@@ -6272,13 +6272,13 @@ define <8 x i64> @mov_test23(i8 * %addr) {
define void @mov_test24(i8 * %addr, <8 x double> %data) {
; GENERIC-LABEL: mov_test24:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps %zmm0, (%rdi)
; GENERIC-NEXT: vzeroupper
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test24:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps %zmm0, (%rdi) # sched: [1:1.00]
; SKX-NEXT: vzeroupper # sched: [4:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -6289,12 +6289,12 @@ define void @mov_test24(i8 * %addr, <8 x double> %data) {
define <8 x double> @mov_test25(i8 * %addr) {
; GENERIC-LABEL: mov_test25:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps (%rdi), %zmm0 # sched: [4:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test25:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps (%rdi), %zmm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
%vaddr = bitcast i8* %addr to <8 x double>*
@@ -6304,13 +6304,13 @@ define <8 x double> @mov_test25(i8 * %addr) {
define void @mov_test26(i8 * %addr, <16 x float> %data) {
; GENERIC-LABEL: mov_test26:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps %zmm0, (%rdi)
; GENERIC-NEXT: vzeroupper
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test26:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps %zmm0, (%rdi) # sched: [1:1.00]
; SKX-NEXT: vzeroupper # sched: [4:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -6321,12 +6321,12 @@ define void @mov_test26(i8 * %addr, <16 x float> %data) {
define <16 x float> @mov_test27(i8 * %addr) {
; GENERIC-LABEL: mov_test27:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps (%rdi), %zmm0 # sched: [4:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test27:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps (%rdi), %zmm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
%vaddr = bitcast i8* %addr to <16 x float>*
@@ -6336,13 +6336,13 @@ define <16 x float> @mov_test27(i8 * %addr) {
define void @mov_test28(i8 * %addr, <8 x double> %data) {
; GENERIC-LABEL: mov_test28:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovups %zmm0, (%rdi)
; GENERIC-NEXT: vzeroupper
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test28:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovups %zmm0, (%rdi) # sched: [1:1.00]
; SKX-NEXT: vzeroupper # sched: [4:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -6353,12 +6353,12 @@ define void @mov_test28(i8 * %addr, <8 x double> %data) {
define <8 x double> @mov_test29(i8 * %addr) {
; GENERIC-LABEL: mov_test29:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovups (%rdi), %zmm0 # sched: [4:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test29:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovups (%rdi), %zmm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
%vaddr = bitcast i8* %addr to <8 x double>*
@@ -6368,13 +6368,13 @@ define <8 x double> @mov_test29(i8 * %addr) {
define void @mov_test30(i8 * %addr, <16 x float> %data) {
; GENERIC-LABEL: mov_test30:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovups %zmm0, (%rdi)
; GENERIC-NEXT: vzeroupper
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test30:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovups %zmm0, (%rdi) # sched: [1:1.00]
; SKX-NEXT: vzeroupper # sched: [4:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -6385,12 +6385,12 @@ define void @mov_test30(i8 * %addr, <16 x float> %data) {
define <16 x float> @mov_test31(i8 * %addr) {
; GENERIC-LABEL: mov_test31:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovups (%rdi), %zmm0 # sched: [4:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test31:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovups (%rdi), %zmm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
%vaddr = bitcast i8* %addr to <16 x float>*
@@ -6400,14 +6400,14 @@ define <16 x float> @mov_test31(i8 * %addr) {
define <16 x i32> @mov_test32(i8 * %addr, <16 x i32> %old, <16 x i32> %mask1) {
; GENERIC-LABEL: mov_test32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpneqd %zmm2, %zmm1, %k1
; GENERIC-NEXT: vmovdqa32 (%rdi), %zmm0 {%k1} # sched: [4:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpneqd %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vmovdqa32 (%rdi), %zmm0 {%k1} # sched: [8:0.50]
@@ -6421,14 +6421,14 @@ define <16 x i32> @mov_test32(i8 * %addr, <16 x i32> %old, <16 x i32> %mask1) {
define <16 x i32> @mov_test33(i8 * %addr, <16 x i32> %old, <16 x i32> %mask1) {
; GENERIC-LABEL: mov_test33:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpneqd %zmm2, %zmm1, %k1
; GENERIC-NEXT: vmovdqu32 (%rdi), %zmm0 {%k1} # sched: [4:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test33:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpneqd %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vmovdqu32 (%rdi), %zmm0 {%k1} # sched: [8:0.50]
@@ -6442,14 +6442,14 @@ define <16 x i32> @mov_test33(i8 * %addr, <16 x i32> %old, <16 x i32> %mask1) {
define <16 x i32> @mov_test34(i8 * %addr, <16 x i32> %mask1) {
; GENERIC-LABEL: mov_test34:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpneqd %zmm1, %zmm0, %k1
; GENERIC-NEXT: vmovdqa32 (%rdi), %zmm0 {%k1} {z} # sched: [4:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test34:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpneqd %zmm1, %zmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vmovdqa32 (%rdi), %zmm0 {%k1} {z} # sched: [8:0.50]
@@ -6463,14 +6463,14 @@ define <16 x i32> @mov_test34(i8 * %addr, <16 x i32> %mask1) {
define <16 x i32> @mov_test35(i8 * %addr, <16 x i32> %mask1) {
; GENERIC-LABEL: mov_test35:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpneqd %zmm1, %zmm0, %k1
; GENERIC-NEXT: vmovdqu32 (%rdi), %zmm0 {%k1} {z} # sched: [4:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test35:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpneqd %zmm1, %zmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vmovdqu32 (%rdi), %zmm0 {%k1} {z} # sched: [8:0.50]
@@ -6484,14 +6484,14 @@ define <16 x i32> @mov_test35(i8 * %addr, <16 x i32> %mask1) {
define <8 x i64> @mov_test36(i8 * %addr, <8 x i64> %old, <8 x i64> %mask1) {
; GENERIC-LABEL: mov_test36:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpneqq %zmm2, %zmm1, %k1
; GENERIC-NEXT: vmovdqa64 (%rdi), %zmm0 {%k1} # sched: [4:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test36:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpneqq %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vmovdqa64 (%rdi), %zmm0 {%k1} # sched: [8:0.50]
@@ -6505,14 +6505,14 @@ define <8 x i64> @mov_test36(i8 * %addr, <8 x i64> %old, <8 x i64> %mask1) {
define <8 x i64> @mov_test37(i8 * %addr, <8 x i64> %old, <8 x i64> %mask1) {
; GENERIC-LABEL: mov_test37:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpneqq %zmm2, %zmm1, %k1
; GENERIC-NEXT: vmovdqu64 (%rdi), %zmm0 {%k1} # sched: [4:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test37:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpneqq %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vmovdqu64 (%rdi), %zmm0 {%k1} # sched: [8:0.50]
@@ -6526,14 +6526,14 @@ define <8 x i64> @mov_test37(i8 * %addr, <8 x i64> %old, <8 x i64> %mask1) {
define <8 x i64> @mov_test38(i8 * %addr, <8 x i64> %mask1) {
; GENERIC-LABEL: mov_test38:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpneqq %zmm1, %zmm0, %k1
; GENERIC-NEXT: vmovdqa64 (%rdi), %zmm0 {%k1} {z} # sched: [4:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test38:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpneqq %zmm1, %zmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vmovdqa64 (%rdi), %zmm0 {%k1} {z} # sched: [8:0.50]
@@ -6547,14 +6547,14 @@ define <8 x i64> @mov_test38(i8 * %addr, <8 x i64> %mask1) {
define <8 x i64> @mov_test39(i8 * %addr, <8 x i64> %mask1) {
; GENERIC-LABEL: mov_test39:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpneqq %zmm1, %zmm0, %k1
; GENERIC-NEXT: vmovdqu64 (%rdi), %zmm0 {%k1} {z} # sched: [4:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test39:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpneqq %zmm1, %zmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vmovdqu64 (%rdi), %zmm0 {%k1} {z} # sched: [8:0.50]
@@ -6568,14 +6568,14 @@ define <8 x i64> @mov_test39(i8 * %addr, <8 x i64> %mask1) {
define <16 x float> @mov_test40(i8 * %addr, <16 x float> %old, <16 x float> %mask1) {
; GENERIC-LABEL: mov_test40:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vxorps %xmm2, %xmm2, %xmm2 # sched: [1:1.00]
; GENERIC-NEXT: vcmpneq_oqps %zmm2, %zmm1, %k1 # sched: [3:1.00]
; GENERIC-NEXT: vmovaps (%rdi), %zmm0 {%k1} # sched: [4:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test40:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vxorps %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vcmpneq_oqps %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vmovaps (%rdi), %zmm0 {%k1} # sched: [8:0.50]
@@ -6589,14 +6589,14 @@ define <16 x float> @mov_test40(i8 * %addr, <16 x float> %old, <16 x float> %mas
define <16 x float> @mov_test41(i8 * %addr, <16 x float> %old, <16 x float> %mask1) {
; GENERIC-LABEL: mov_test41:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vxorps %xmm2, %xmm2, %xmm2 # sched: [1:1.00]
; GENERIC-NEXT: vcmpneq_oqps %zmm2, %zmm1, %k1 # sched: [3:1.00]
; GENERIC-NEXT: vmovups (%rdi), %zmm0 {%k1} # sched: [4:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test41:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vxorps %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vcmpneq_oqps %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vmovups (%rdi), %zmm0 {%k1} # sched: [8:0.50]
@@ -6610,14 +6610,14 @@ define <16 x float> @mov_test41(i8 * %addr, <16 x float> %old, <16 x float> %mas
define <16 x float> @mov_test42(i8 * %addr, <16 x float> %mask1) {
; GENERIC-LABEL: mov_test42:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vxorps %xmm1, %xmm1, %xmm1 # sched: [1:1.00]
; GENERIC-NEXT: vcmpneq_oqps %zmm1, %zmm0, %k1 # sched: [3:1.00]
; GENERIC-NEXT: vmovaps (%rdi), %zmm0 {%k1} {z} # sched: [4:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test42:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vxorps %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vcmpneq_oqps %zmm1, %zmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vmovaps (%rdi), %zmm0 {%k1} {z} # sched: [8:0.50]
@@ -6631,14 +6631,14 @@ define <16 x float> @mov_test42(i8 * %addr, <16 x float> %mask1) {
define <16 x float> @mov_test43(i8 * %addr, <16 x float> %mask1) {
; GENERIC-LABEL: mov_test43:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vxorps %xmm1, %xmm1, %xmm1 # sched: [1:1.00]
; GENERIC-NEXT: vcmpneq_oqps %zmm1, %zmm0, %k1 # sched: [3:1.00]
; GENERIC-NEXT: vmovups (%rdi), %zmm0 {%k1} {z} # sched: [4:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test43:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vxorps %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vcmpneq_oqps %zmm1, %zmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vmovups (%rdi), %zmm0 {%k1} {z} # sched: [8:0.50]
@@ -6652,14 +6652,14 @@ define <16 x float> @mov_test43(i8 * %addr, <16 x float> %mask1) {
define <8 x double> @mov_test44(i8 * %addr, <8 x double> %old, <8 x double> %mask1) {
; GENERIC-LABEL: mov_test44:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vxorpd %xmm2, %xmm2, %xmm2 # sched: [1:1.00]
; GENERIC-NEXT: vcmpneq_oqpd %zmm2, %zmm1, %k1 # sched: [3:1.00]
; GENERIC-NEXT: vmovapd (%rdi), %zmm0 {%k1} # sched: [4:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test44:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vxorpd %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vcmpneq_oqpd %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vmovapd (%rdi), %zmm0 {%k1} # sched: [8:0.50]
@@ -6673,14 +6673,14 @@ define <8 x double> @mov_test44(i8 * %addr, <8 x double> %old, <8 x double> %mas
define <8 x double> @mov_test45(i8 * %addr, <8 x double> %old, <8 x double> %mask1) {
; GENERIC-LABEL: mov_test45:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vxorpd %xmm2, %xmm2, %xmm2 # sched: [1:1.00]
; GENERIC-NEXT: vcmpneq_oqpd %zmm2, %zmm1, %k1 # sched: [3:1.00]
; GENERIC-NEXT: vmovupd (%rdi), %zmm0 {%k1} # sched: [4:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test45:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vxorpd %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vcmpneq_oqpd %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vmovupd (%rdi), %zmm0 {%k1} # sched: [8:0.50]
@@ -6694,14 +6694,14 @@ define <8 x double> @mov_test45(i8 * %addr, <8 x double> %old, <8 x double> %mas
define <8 x double> @mov_test46(i8 * %addr, <8 x double> %mask1) {
; GENERIC-LABEL: mov_test46:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vxorpd %xmm1, %xmm1, %xmm1 # sched: [1:1.00]
; GENERIC-NEXT: vcmpneq_oqpd %zmm1, %zmm0, %k1 # sched: [3:1.00]
; GENERIC-NEXT: vmovapd (%rdi), %zmm0 {%k1} {z} # sched: [4:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test46:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vxorpd %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vcmpneq_oqpd %zmm1, %zmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vmovapd (%rdi), %zmm0 {%k1} {z} # sched: [8:0.50]
@@ -6715,14 +6715,14 @@ define <8 x double> @mov_test46(i8 * %addr, <8 x double> %mask1) {
define <8 x double> @mov_test47(i8 * %addr, <8 x double> %mask1) {
; GENERIC-LABEL: mov_test47:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vxorpd %xmm1, %xmm1, %xmm1 # sched: [1:1.00]
; GENERIC-NEXT: vcmpneq_oqpd %zmm1, %zmm0, %k1 # sched: [3:1.00]
; GENERIC-NEXT: vmovupd (%rdi), %zmm0 {%k1} {z} # sched: [4:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mov_test47:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vxorpd %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vcmpneq_oqpd %zmm1, %zmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vmovupd (%rdi), %zmm0 {%k1} {z} # sched: [8:0.50]
@@ -6736,7 +6736,7 @@ define <8 x double> @mov_test47(i8 * %addr, <8 x double> %mask1) {
define i16 @mask16(i16 %x) {
; GENERIC-LABEL: mask16:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: kmovd %edi, %k0
; GENERIC-NEXT: knotw %k0, %k0
; GENERIC-NEXT: kmovd %k0, %eax
@@ -6744,7 +6744,7 @@ define i16 @mask16(i16 %x) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mask16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovd %edi, %k0 # sched: [1:1.00]
; SKX-NEXT: knotw %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
@@ -6758,14 +6758,14 @@ define i16 @mask16(i16 %x) {
define i32 @mask16_zext(i16 %x) {
; GENERIC-LABEL: mask16_zext:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: kmovd %edi, %k0
; GENERIC-NEXT: knotw %k0, %k0
; GENERIC-NEXT: kmovw %k0, %eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mask16_zext:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovd %edi, %k0 # sched: [1:1.00]
; SKX-NEXT: knotw %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovw %k0, %eax # sched: [3:1.00]
@@ -6779,7 +6779,7 @@ define i32 @mask16_zext(i16 %x) {
define i8 @mask8(i8 %x) {
; GENERIC-LABEL: mask8:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: kmovd %edi, %k0
; GENERIC-NEXT: knotb %k0, %k0
; GENERIC-NEXT: kmovd %k0, %eax
@@ -6787,7 +6787,7 @@ define i8 @mask8(i8 %x) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mask8:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovd %edi, %k0 # sched: [1:1.00]
; SKX-NEXT: knotb %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
@@ -6801,14 +6801,14 @@ define i8 @mask8(i8 %x) {
define i32 @mask8_zext(i8 %x) {
; GENERIC-LABEL: mask8_zext:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: kmovd %edi, %k0
; GENERIC-NEXT: knotb %k0, %k0
; GENERIC-NEXT: kmovb %k0, %eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mask8_zext:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovd %edi, %k0 # sched: [1:1.00]
; SKX-NEXT: knotb %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovb %k0, %eax # sched: [3:1.00]
@@ -6822,14 +6822,14 @@ define i32 @mask8_zext(i8 %x) {
define void @mask16_mem(i16* %ptr) {
; GENERIC-LABEL: mask16_mem:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: kmovw (%rdi), %k0
; GENERIC-NEXT: knotw %k0, %k0
; GENERIC-NEXT: kmovw %k0, (%rdi)
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mask16_mem:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovw (%rdi), %k0 # sched: [7:1.00]
; SKX-NEXT: knotw %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovw %k0, (%rdi) # sched: [1:1.00]
@@ -6844,14 +6844,14 @@ define void @mask16_mem(i16* %ptr) {
define void @mask8_mem(i8* %ptr) {
; GENERIC-LABEL: mask8_mem:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: kmovb (%rdi), %k0
; GENERIC-NEXT: knotb %k0, %k0
; GENERIC-NEXT: kmovb %k0, (%rdi)
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mask8_mem:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovb (%rdi), %k0 # sched: [7:1.00]
; SKX-NEXT: knotb %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovb %k0, (%rdi) # sched: [1:1.00]
@@ -6866,7 +6866,7 @@ define void @mask8_mem(i8* %ptr) {
define i16 @mand16(i16 %x, i16 %y) {
; GENERIC-LABEL: mand16:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movl %edi, %eax # sched: [1:0.33]
; GENERIC-NEXT: xorl %esi, %eax # sched: [1:0.33]
; GENERIC-NEXT: andl %esi, %edi # sched: [1:0.33]
@@ -6875,7 +6875,7 @@ define i16 @mand16(i16 %x, i16 %y) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mand16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movl %edi, %eax # sched: [1:0.25]
; SKX-NEXT: xorl %esi, %eax # sched: [1:0.25]
; SKX-NEXT: andl %esi, %edi # sched: [1:0.25]
@@ -6893,7 +6893,7 @@ define i16 @mand16(i16 %x, i16 %y) {
define i16 @mand16_mem(<16 x i1>* %x, <16 x i1>* %y) {
; GENERIC-LABEL: mand16_mem:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: kmovw (%rdi), %k0
; GENERIC-NEXT: kmovw (%rsi), %k1
; GENERIC-NEXT: kandw %k1, %k0, %k2
@@ -6904,7 +6904,7 @@ define i16 @mand16_mem(<16 x i1>* %x, <16 x i1>* %y) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mand16_mem:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovw (%rdi), %k0 # sched: [7:1.00]
; SKX-NEXT: kmovw (%rsi), %k1 # sched: [7:1.00]
; SKX-NEXT: kandw %k1, %k0, %k2 # sched: [1:1.00]
@@ -6924,7 +6924,7 @@ define i16 @mand16_mem(<16 x i1>* %x, <16 x i1>* %y) {
define i8 @shuf_test1(i16 %v) nounwind {
; GENERIC-LABEL: shuf_test1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: kmovd %edi, %k0
; GENERIC-NEXT: kshiftrw $8, %k0, %k0
; GENERIC-NEXT: kmovd %k0, %eax
@@ -6932,7 +6932,7 @@ define i8 @shuf_test1(i16 %v) nounwind {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: shuf_test1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovd %edi, %k0 # sched: [1:1.00]
; SKX-NEXT: kshiftrw $8, %k0, %k0 # sched: [3:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
@@ -6946,7 +6946,7 @@ define i8 @shuf_test1(i16 %v) nounwind {
define i32 @zext_test1(<16 x i32> %a, <16 x i32> %b) {
; GENERIC-LABEL: zext_test1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpcmpnleud %zmm1, %zmm0, %k0
; GENERIC-NEXT: kshiftlw $10, %k0, %k0
; GENERIC-NEXT: kshiftrw $15, %k0, %k0
@@ -6956,7 +6956,7 @@ define i32 @zext_test1(<16 x i32> %a, <16 x i32> %b) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_test1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpnleud %zmm1, %zmm0, %k0 # sched: [3:1.00]
; SKX-NEXT: kshiftlw $10, %k0, %k0 # sched: [3:1.00]
; SKX-NEXT: kshiftrw $15, %k0, %k0 # sched: [3:1.00]
@@ -6972,7 +6972,7 @@ define i32 @zext_test1(<16 x i32> %a, <16 x i32> %b) {
define i16 @zext_test2(<16 x i32> %a, <16 x i32> %b) {
; GENERIC-LABEL: zext_test2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpcmpnleud %zmm1, %zmm0, %k0
; GENERIC-NEXT: kshiftlw $10, %k0, %k0
; GENERIC-NEXT: kshiftrw $15, %k0, %k0
@@ -6983,7 +6983,7 @@ define i16 @zext_test2(<16 x i32> %a, <16 x i32> %b) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_test2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpnleud %zmm1, %zmm0, %k0 # sched: [3:1.00]
; SKX-NEXT: kshiftlw $10, %k0, %k0 # sched: [3:1.00]
; SKX-NEXT: kshiftrw $15, %k0, %k0 # sched: [3:1.00]
@@ -7000,7 +7000,7 @@ define i16 @zext_test2(<16 x i32> %a, <16 x i32> %b) {
define i8 @zext_test3(<16 x i32> %a, <16 x i32> %b) {
; GENERIC-LABEL: zext_test3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpcmpnleud %zmm1, %zmm0, %k0
; GENERIC-NEXT: kshiftlw $10, %k0, %k0
; GENERIC-NEXT: kshiftrw $15, %k0, %k0
@@ -7011,7 +7011,7 @@ define i8 @zext_test3(<16 x i32> %a, <16 x i32> %b) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: zext_test3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpnleud %zmm1, %zmm0, %k0 # sched: [3:1.00]
; SKX-NEXT: kshiftlw $10, %k0, %k0 # sched: [3:1.00]
; SKX-NEXT: kshiftrw $15, %k0, %k0 # sched: [3:1.00]
@@ -7028,7 +7028,7 @@ define i8 @zext_test3(<16 x i32> %a, <16 x i32> %b) {
define i8 @conv1(<8 x i1>* %R) {
; GENERIC-LABEL: conv1:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: kxnorw %k0, %k0, %k0
; GENERIC-NEXT: kmovb %k0, (%rdi)
; GENERIC-NEXT: movb $-2, -{{[0-9]+}}(%rsp) # sched: [5:1.00]
@@ -7036,7 +7036,7 @@ define i8 @conv1(<8 x i1>* %R) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: conv1:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: kxnorw %k0, %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovb %k0, (%rdi) # sched: [1:1.00]
; SKX-NEXT: movb $-2, -{{[0-9]+}}(%rsp) # sched: [1:1.00]
@@ -7054,7 +7054,7 @@ entry:
define <4 x i32> @test4(<4 x i64> %x, <4 x i64> %y, <4 x i64> %x1, <4 x i64> %y1) {
; GENERIC-LABEL: test4:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpcmpgtq %ymm1, %ymm0, %k0
; GENERIC-NEXT: vpcmpgtq %ymm3, %ymm2, %k1
; GENERIC-NEXT: kandnw %k0, %k1, %k0
@@ -7063,7 +7063,7 @@ define <4 x i32> @test4(<4 x i64> %x, <4 x i64> %y, <4 x i64> %x1, <4 x i64> %y1
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test4:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 # sched: [3:1.00]
; SKX-NEXT: vpcmpgtq %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: kandnw %k0, %k1, %k0 # sched: [1:1.00]
@@ -7079,7 +7079,7 @@ define <4 x i32> @test4(<4 x i64> %x, <4 x i64> %y, <4 x i64> %x1, <4 x i64> %y1
define <2 x i64> @vcmp_test5(<2 x i64> %x, <2 x i64> %y, <2 x i64> %x1, <2 x i64> %y1) {
; GENERIC-LABEL: vcmp_test5:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpcmpgtq %xmm0, %xmm1, %k0
; GENERIC-NEXT: vpcmpgtq %xmm3, %xmm2, %k1
; GENERIC-NEXT: kandnw %k1, %k0, %k0
@@ -7087,7 +7087,7 @@ define <2 x i64> @vcmp_test5(<2 x i64> %x, <2 x i64> %y, <2 x i64> %x1, <2 x i64
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: vcmp_test5:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpgtq %xmm0, %xmm1, %k0 # sched: [3:1.00]
; SKX-NEXT: vpcmpgtq %xmm3, %xmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: kandnw %k1, %k0, %k0 # sched: [1:1.00]
@@ -7113,7 +7113,7 @@ false:
}
define void @vcmp_test7(<8 x i1> %mask) {
; GENERIC-LABEL: vcmp_test7:
-; GENERIC: # BB#0: # %allocas
+; GENERIC: # %bb.0: # %allocas
; GENERIC-NEXT: vpsllw $15, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovw2m %xmm0, %k0
; GENERIC-NEXT: movb $85, %al # sched: [1:0.33]
@@ -7123,7 +7123,7 @@ define void @vcmp_test7(<8 x i1> %mask) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: vcmp_test7:
-; SKX: # BB#0: # %allocas
+; SKX: # %bb.0: # %allocas
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpmovw2m %xmm0, %k0 # sched: [1:1.00]
; SKX-NEXT: movb $85, %al # sched: [1:0.25]
@@ -7145,11 +7145,11 @@ false:
}
define <16 x i8> @vcmp_test8(<16 x i32>%a, <16 x i32>%b, i32 %a1, i32 %b1) {
; GENERIC-LABEL: vcmp_test8:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: cmpl %esi, %edi # sched: [1:0.33]
; GENERIC-NEXT: jg .LBB386_1 # sched: [1:1.00]
-; GENERIC-NEXT: # BB#2:
+; GENERIC-NEXT: # %bb.2:
; GENERIC-NEXT: vpcmpltud %zmm2, %zmm1, %k0
; GENERIC-NEXT: vpmovm2b %k0, %xmm0
; GENERIC-NEXT: vzeroupper
@@ -7161,11 +7161,11 @@ define <16 x i8> @vcmp_test8(<16 x i32>%a, <16 x i32>%b, i32 %a1, i32 %b1) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: vcmp_test8:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: cmpl %esi, %edi # sched: [1:0.25]
; SKX-NEXT: jg .LBB386_1 # sched: [1:0.50]
-; SKX-NEXT: # BB#2:
+; SKX-NEXT: # %bb.2:
; SKX-NEXT: vpcmpltud %zmm2, %zmm1, %k0 # sched: [3:1.00]
; SKX-NEXT: vpmovm2b %k0, %xmm0
; SKX-NEXT: vzeroupper # sched: [4:1.00]
@@ -7184,10 +7184,10 @@ define <16 x i8> @vcmp_test8(<16 x i32>%a, <16 x i32>%b, i32 %a1, i32 %b1) {
}
define <16 x i1> @vpmov_test9(<16 x i1>%a, <16 x i1>%b, i32 %a1, i32 %b1) {
; GENERIC-LABEL: vpmov_test9:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: cmpl %esi, %edi # sched: [1:0.33]
; GENERIC-NEXT: jg .LBB387_1 # sched: [1:1.00]
-; GENERIC-NEXT: # BB#2:
+; GENERIC-NEXT: # %bb.2:
; GENERIC-NEXT: vpsllw $7, %xmm1, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: jmp .LBB387_3 # sched: [1:1.00]
; GENERIC-NEXT: .LBB387_1:
@@ -7198,10 +7198,10 @@ define <16 x i1> @vpmov_test9(<16 x i1>%a, <16 x i1>%b, i32 %a1, i32 %b1) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: vpmov_test9:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: cmpl %esi, %edi # sched: [1:0.25]
; SKX-NEXT: jg .LBB387_1 # sched: [1:0.50]
-; SKX-NEXT: # BB#2:
+; SKX-NEXT: # %bb.2:
; SKX-NEXT: vpsllw $7, %xmm1, %xmm0 # sched: [1:0.50]
; SKX-NEXT: jmp .LBB387_3 # sched: [1:0.50]
; SKX-NEXT: .LBB387_1:
@@ -7221,10 +7221,10 @@ define <16 x i1> @vpmov_test9(<16 x i1>%a, <16 x i1>%b, i32 %a1, i32 %b1) {
define <4 x i1> @vmov_test11(<4 x i1>%a, <4 x i1>%b, i32 %a1, i32 %b1) {
; GENERIC-LABEL: vmov_test11:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: cmpl %esi, %edi # sched: [1:0.33]
; GENERIC-NEXT: jg .LBB389_1 # sched: [1:1.00]
-; GENERIC-NEXT: # BB#2:
+; GENERIC-NEXT: # %bb.2:
; GENERIC-NEXT: vpslld $31, %xmm1, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: jmp .LBB389_3 # sched: [1:1.00]
; GENERIC-NEXT: .LBB389_1:
@@ -7235,10 +7235,10 @@ define <4 x i1> @vmov_test11(<4 x i1>%a, <4 x i1>%b, i32 %a1, i32 %b1) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: vmov_test11:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: cmpl %esi, %edi # sched: [1:0.25]
; SKX-NEXT: jg .LBB389_1 # sched: [1:0.50]
-; SKX-NEXT: # BB#2:
+; SKX-NEXT: # %bb.2:
; SKX-NEXT: vpslld $31, %xmm1, %xmm0 # sched: [1:0.50]
; SKX-NEXT: jmp .LBB389_3 # sched: [1:0.50]
; SKX-NEXT: .LBB389_1:
@@ -7254,12 +7254,12 @@ define <4 x i1> @vmov_test11(<4 x i1>%a, <4 x i1>%b, i32 %a1, i32 %b1) {
define i32 @vmov_test12(i32 %x, i32 %y) {
; GENERIC-LABEL: vmov_test12:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movl %edi, %eax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: vmov_test12:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movl %edi, %eax # sched: [1:0.25]
; SKX-NEXT: retq # sched: [7:1.00]
%a = bitcast i16 21845 to <16 x i1>
@@ -7270,12 +7270,12 @@ define i32 @vmov_test12(i32 %x, i32 %y) {
define i32 @vmov_test13(i32 %x, i32 %y) {
; GENERIC-LABEL: vmov_test13:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movl %esi, %eax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: vmov_test13:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movl %esi, %eax # sched: [1:0.25]
; SKX-NEXT: retq # sched: [7:1.00]
%a = bitcast i16 21845 to <16 x i1>
@@ -7291,7 +7291,7 @@ define i32 @vmov_test13(i32 %x, i32 %y) {
define <16 x i1> @vmov_test15(i32 %x, i32 %y) {
; GENERIC-LABEL: vmov_test15:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: cmpl %esi, %edi # sched: [1:0.33]
; GENERIC-NEXT: movw $21845, %ax # imm = 0x5555
; GENERIC-NEXT: # sched: [1:0.33]
@@ -7302,7 +7302,7 @@ define <16 x i1> @vmov_test15(i32 %x, i32 %y) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: vmov_test15:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: cmpl %esi, %edi # sched: [1:0.25]
; SKX-NEXT: movw $21845, %ax # imm = 0x5555
; SKX-NEXT: # sched: [1:0.25]
@@ -7321,7 +7321,7 @@ define <16 x i1> @vmov_test15(i32 %x, i32 %y) {
define <64 x i8> @vmov_test16(i64 %x) {
;
; GENERIC-LABEL: vmov_test16:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: kmovq %rdi, %k0
; GENERIC-NEXT: movb $1, %al # sched: [1:0.33]
; GENERIC-NEXT: kmovd %eax, %k1
@@ -7337,7 +7337,7 @@ define <64 x i8> @vmov_test16(i64 %x) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: vmov_test16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovq %rdi, %k0 # sched: [1:1.00]
; SKX-NEXT: movb $1, %al # sched: [1:0.25]
; SKX-NEXT: kmovd %eax, %k1 # sched: [1:1.00]
@@ -7360,7 +7360,7 @@ define <64 x i8> @vmov_test16(i64 %x) {
define <64 x i8> @vmov_test17(i64 %x, i32 %y, i32 %z) {
;
; GENERIC-LABEL: vmov_test17:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: kmovq %rdi, %k0
; GENERIC-NEXT: cmpl %edx, %esi # sched: [1:0.33]
; GENERIC-NEXT: setg %al # sched: [1:0.50]
@@ -7377,7 +7377,7 @@ define <64 x i8> @vmov_test17(i64 %x, i32 %y, i32 %z) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: vmov_test17:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovq %rdi, %k0 # sched: [1:1.00]
; SKX-NEXT: cmpl %edx, %esi # sched: [1:0.25]
; SKX-NEXT: setg %al # sched: [1:0.50]
@@ -7401,7 +7401,7 @@ define <64 x i8> @vmov_test17(i64 %x, i32 %y, i32 %z) {
define <8 x i1> @vmov_test18(i8 %a, i16 %y) {
; GENERIC-LABEL: vmov_test18:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: kmovd %edi, %k1
; GENERIC-NEXT: kmovd %esi, %k2
; GENERIC-NEXT: kshiftlw $7, %k2, %k0
@@ -7422,7 +7422,7 @@ define <8 x i1> @vmov_test18(i8 %a, i16 %y) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: vmov_test18:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovd %edi, %k1 # sched: [1:1.00]
; SKX-NEXT: kmovd %esi, %k2 # sched: [1:1.00]
; SKX-NEXT: kshiftlw $7, %k2, %k0 # sched: [3:1.00]
@@ -7451,14 +7451,14 @@ define <8 x i1> @vmov_test18(i8 %a, i16 %y) {
}
define <32 x i16> @vmov_test21(<32 x i16> %x , <32 x i1> %mask) nounwind readnone {
; GENERIC-LABEL: vmov_test21:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $7, %ymm1, %ymm1 # sched: [1:1.00]
; GENERIC-NEXT: vpmovb2m %ymm1, %k1
; GENERIC-NEXT: vmovdqu16 %zmm0, %zmm0 {%k1} {z}
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: vmov_test21:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %ymm1, %ymm1 # sched: [1:0.50]
; SKX-NEXT: vpmovb2m %ymm1, %k1 # sched: [1:1.00]
; SKX-NEXT: vmovdqu16 %zmm0, %zmm0 {%k1} {z}
@@ -7469,14 +7469,14 @@ define <32 x i16> @vmov_test21(<32 x i16> %x , <32 x i1> %mask) nounwind readnon
define void @vmov_test22(<4 x i1> %a, <4 x i1>* %addr) {
; GENERIC-LABEL: vmov_test22:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpslld $31, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vptestmd %xmm0, %xmm0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovb %k0, (%rdi)
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: vmov_test22:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vptestmd %xmm0, %xmm0, %k0 # sched: [3:1.00]
; SKX-NEXT: kmovb %k0, (%rdi) # sched: [1:1.00]
@@ -7487,14 +7487,14 @@ define void @vmov_test22(<4 x i1> %a, <4 x i1>* %addr) {
define void @vmov_test23(<2 x i1> %a, <2 x i1>* %addr) {
; GENERIC-LABEL: vmov_test23:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllq $63, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vptestmq %xmm0, %xmm0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovb %k0, (%rdi)
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: vmov_test23:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllq $63, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vptestmq %xmm0, %xmm0, %k0 # sched: [3:1.00]
; SKX-NEXT: kmovb %k0, (%rdi) # sched: [1:1.00]
@@ -7505,7 +7505,7 @@ define void @vmov_test23(<2 x i1> %a, <2 x i1>* %addr) {
define void @store_v1i1(<1 x i1> %c , <1 x i1>* %ptr) {
; GENERIC-LABEL: store_v1i1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: kmovd %edi, %k0
; GENERIC-NEXT: kxnorw %k0, %k0, %k1
; GENERIC-NEXT: kxorw %k1, %k0, %k0
@@ -7513,7 +7513,7 @@ define void @store_v1i1(<1 x i1> %c , <1 x i1>* %ptr) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: store_v1i1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovd %edi, %k0 # sched: [1:1.00]
; SKX-NEXT: kxnorw %k0, %k0, %k1 # sched: [1:1.00]
; SKX-NEXT: kxorw %k1, %k0, %k0 # sched: [1:1.00]
@@ -7526,7 +7526,7 @@ define void @store_v1i1(<1 x i1> %c , <1 x i1>* %ptr) {
define void @store_v2i1(<2 x i1> %c , <2 x i1>* %ptr) {
; GENERIC-LABEL: store_v2i1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllq $63, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vptestmq %xmm0, %xmm0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: knotw %k0, %k0
@@ -7534,7 +7534,7 @@ define void @store_v2i1(<2 x i1> %c , <2 x i1>* %ptr) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: store_v2i1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllq $63, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vptestmq %xmm0, %xmm0, %k0 # sched: [3:1.00]
; SKX-NEXT: knotw %k0, %k0 # sched: [1:1.00]
@@ -7547,7 +7547,7 @@ define void @store_v2i1(<2 x i1> %c , <2 x i1>* %ptr) {
define void @store_v4i1(<4 x i1> %c , <4 x i1>* %ptr) {
; GENERIC-LABEL: store_v4i1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpslld $31, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vptestmd %xmm0, %xmm0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: knotw %k0, %k0
@@ -7555,7 +7555,7 @@ define void @store_v4i1(<4 x i1> %c , <4 x i1>* %ptr) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: store_v4i1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vptestmd %xmm0, %xmm0, %k0 # sched: [3:1.00]
; SKX-NEXT: knotw %k0, %k0 # sched: [1:1.00]
@@ -7568,7 +7568,7 @@ define void @store_v4i1(<4 x i1> %c , <4 x i1>* %ptr) {
define void @store_v8i1(<8 x i1> %c , <8 x i1>* %ptr) {
; GENERIC-LABEL: store_v8i1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $15, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovw2m %xmm0, %k0
; GENERIC-NEXT: knotb %k0, %k0
@@ -7576,7 +7576,7 @@ define void @store_v8i1(<8 x i1> %c , <8 x i1>* %ptr) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: store_v8i1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpmovw2m %xmm0, %k0 # sched: [1:1.00]
; SKX-NEXT: knotb %k0, %k0 # sched: [1:1.00]
@@ -7589,7 +7589,7 @@ define void @store_v8i1(<8 x i1> %c , <8 x i1>* %ptr) {
define void @store_v16i1(<16 x i1> %c , <16 x i1>* %ptr) {
; GENERIC-LABEL: store_v16i1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $7, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovb2m %xmm0, %k0
; GENERIC-NEXT: knotw %k0, %k0
@@ -7597,7 +7597,7 @@ define void @store_v16i1(<16 x i1> %c , <16 x i1>* %ptr) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: store_v16i1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpmovb2m %xmm0, %k0 # sched: [1:1.00]
; SKX-NEXT: knotw %k0, %k0 # sched: [1:1.00]
@@ -7623,14 +7623,14 @@ define void @store_v16i1(<16 x i1> %c , <16 x i1>* %ptr) {
define void @f1(i32 %c) {
; GENERIC-LABEL: f1:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: movzbl {{.*}}(%rip), %edi # sched: [5:0.50]
; GENERIC-NEXT: xorl $1, %edi # sched: [1:0.33]
; GENERIC-NEXT: movb %dil, {{.*}}(%rip) # sched: [5:1.00]
; GENERIC-NEXT: jmp f2 # TAILCALL
;
; SKX-LABEL: f1:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: movzbl {{.*}}(%rip), %edi # sched: [5:0.50]
; SKX-NEXT: xorl $1, %edi # sched: [1:0.25]
; SKX-NEXT: movb %dil, {{.*}}(%rip) # sched: [1:1.00]
@@ -7648,13 +7648,13 @@ declare void @f2(i32) #1
define void @store_i16_i1(i16 %x, i1 *%y) {
; GENERIC-LABEL: store_i16_i1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: andl $1, %edi # sched: [1:0.33]
; GENERIC-NEXT: movb %dil, (%rsi) # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: store_i16_i1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: andl $1, %edi # sched: [1:0.25]
; SKX-NEXT: movb %dil, (%rsi) # sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -7665,13 +7665,13 @@ define void @store_i16_i1(i16 %x, i1 *%y) {
define void @store_i8_i1(i8 %x, i1 *%y) {
; GENERIC-LABEL: store_i8_i1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: andl $1, %edi # sched: [1:0.33]
; GENERIC-NEXT: movb %dil, (%rsi) # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: store_i8_i1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: andl $1, %edi # sched: [1:0.25]
; SKX-NEXT: movb %dil, (%rsi) # sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -7682,7 +7682,7 @@ define void @store_i8_i1(i8 %x, i1 *%y) {
define <32 x i16> @test_build_vec_v32i1(<32 x i16> %x) {
; GENERIC-LABEL: test_build_vec_v32i1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movl $1497715861, %eax # imm = 0x59455495
; GENERIC-NEXT: # sched: [1:0.33]
; GENERIC-NEXT: kmovd %eax, %k1
@@ -7690,7 +7690,7 @@ define <32 x i16> @test_build_vec_v32i1(<32 x i16> %x) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_build_vec_v32i1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movl $1497715861, %eax # imm = 0x59455495
; SKX-NEXT: # sched: [1:0.25]
; SKX-NEXT: kmovd %eax, %k1 # sched: [1:1.00]
@@ -7702,12 +7702,12 @@ define <32 x i16> @test_build_vec_v32i1(<32 x i16> %x) {
define <64 x i8> @test_build_vec_v64i1(<64 x i8> %x) {
; GENERIC-LABEL: test_build_vec_v64i1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpshufb {{.*#+}} zmm0 = zero,zero,zmm0[2],zero,zero,zero,zmm0[6],zero,zmm0[8],zero,zmm0[10],zero,zmm0[12],zero,zero,zmm0[15],zero,zero,zmm0[18],zero,zmm0[20],zero,zmm0[22],zero,zmm0[24],zero,zero,zmm0[27],zero,zero,zmm0[30],zero,zmm0[32],zero,zmm0[34],zero,zero,zero,zmm0[38],zero,zmm0[40],zero,zero,zmm0[43,44],zero,zmm0[46],zero,zmm0[48],zero,zmm0[50],zero,zero,zero,zmm0[54],zero,zmm0[56],zero,zero,zmm0[59,60],zero,zmm0[62],zero sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_build_vec_v64i1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpshufb {{.*#+}} zmm0 = zero,zero,zmm0[2],zero,zero,zero,zmm0[6],zero,zmm0[8],zero,zmm0[10],zero,zmm0[12],zero,zero,zmm0[15],zero,zero,zmm0[18],zero,zmm0[20],zero,zmm0[22],zero,zmm0[24],zero,zero,zmm0[27],zero,zero,zmm0[30],zero,zmm0[32],zero,zmm0[34],zero,zero,zero,zmm0[38],zero,zmm0[40],zero,zero,zmm0[43,44],zero,zmm0[46],zero,zmm0[48],zero,zmm0[50],zero,zero,zero,zmm0[54],zero,zmm0[56],zero,zero,zmm0[59,60],zero,zmm0[62],zero sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%ret = select <64 x i1> <i1 false, i1 false, i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 true, i1 false>, <64 x i8> %x, <64 x i8> zeroinitializer
@@ -7716,14 +7716,14 @@ define <64 x i8> @test_build_vec_v64i1(<64 x i8> %x) {
define void @ktest_1(<8 x double> %in, double * %base) {
; GENERIC-LABEL: ktest_1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovupd (%rdi), %zmm1 # sched: [4:0.50]
; GENERIC-NEXT: vcmpltpd %zmm0, %zmm1, %k1 # sched: [3:1.00]
; GENERIC-NEXT: vmovupd 8(%rdi), %zmm1 {%k1} {z} # sched: [4:0.50]
; GENERIC-NEXT: vcmpltpd %zmm1, %zmm0, %k0 {%k1} # sched: [3:1.00]
; GENERIC-NEXT: ktestb %k0, %k0
; GENERIC-NEXT: je .LBB410_2 # sched: [1:1.00]
-; GENERIC-NEXT: # BB#1: # %L1
+; GENERIC-NEXT: # %bb.1: # %L1
; GENERIC-NEXT: vmovapd %zmm0, (%rdi)
; GENERIC-NEXT: vzeroupper
; GENERIC-NEXT: retq # sched: [1:1.00]
@@ -7733,14 +7733,14 @@ define void @ktest_1(<8 x double> %in, double * %base) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: ktest_1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovupd (%rdi), %zmm1 # sched: [8:0.50]
; SKX-NEXT: vcmpltpd %zmm0, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vmovupd 8(%rdi), %zmm1 {%k1} {z} # sched: [8:0.50]
; SKX-NEXT: vcmpltpd %zmm1, %zmm0, %k0 {%k1} # sched: [3:1.00]
; SKX-NEXT: ktestb %k0, %k0 # sched: [3:1.00]
; SKX-NEXT: je .LBB410_2 # sched: [1:0.50]
-; SKX-NEXT: # BB#1: # %L1
+; SKX-NEXT: # %bb.1: # %L1
; SKX-NEXT: vmovapd %zmm0, (%rdi) # sched: [1:1.00]
; SKX-NEXT: vzeroupper # sched: [4:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -7778,7 +7778,7 @@ End:
define void @ktest_2(<32 x float> %in, float * %base) {
;
; GENERIC-LABEL: ktest_2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovups (%rdi), %zmm2 # sched: [4:0.50]
; GENERIC-NEXT: vmovups 64(%rdi), %zmm3 # sched: [4:0.50]
; GENERIC-NEXT: vcmpltps %zmm0, %zmm2, %k1 # sched: [3:1.00]
@@ -7792,7 +7792,7 @@ define void @ktest_2(<32 x float> %in, float * %base) {
; GENERIC-NEXT: kord %k1, %k0, %k0
; GENERIC-NEXT: ktestd %k0, %k0
; GENERIC-NEXT: je .LBB411_2 # sched: [1:1.00]
-; GENERIC-NEXT: # BB#1: # %L1
+; GENERIC-NEXT: # %bb.1: # %L1
; GENERIC-NEXT: vmovaps %zmm0, (%rdi)
; GENERIC-NEXT: vmovaps %zmm1, 64(%rdi)
; GENERIC-NEXT: vzeroupper
@@ -7804,7 +7804,7 @@ define void @ktest_2(<32 x float> %in, float * %base) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: ktest_2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovups (%rdi), %zmm2 # sched: [8:0.50]
; SKX-NEXT: vmovups 64(%rdi), %zmm3 # sched: [8:0.50]
; SKX-NEXT: vcmpltps %zmm0, %zmm2, %k1 # sched: [3:1.00]
@@ -7818,7 +7818,7 @@ define void @ktest_2(<32 x float> %in, float * %base) {
; SKX-NEXT: kord %k1, %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: ktestd %k0, %k0 # sched: [3:1.00]
; SKX-NEXT: je .LBB411_2 # sched: [1:0.50]
-; SKX-NEXT: # BB#1: # %L1
+; SKX-NEXT: # %bb.1: # %L1
; SKX-NEXT: vmovaps %zmm0, (%rdi) # sched: [1:1.00]
; SKX-NEXT: vmovaps %zmm1, 64(%rdi) # sched: [1:1.00]
; SKX-NEXT: vzeroupper # sched: [4:1.00]
@@ -7857,13 +7857,13 @@ End:
define <8 x i64> @load_8i1(<8 x i1>* %a) {
; GENERIC-LABEL: load_8i1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: kmovb (%rdi), %k0
; GENERIC-NEXT: vpmovm2q %k0, %zmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: load_8i1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovb (%rdi), %k0 # sched: [7:1.00]
; SKX-NEXT: vpmovm2q %k0, %zmm0
; SKX-NEXT: retq # sched: [7:1.00]
@@ -7874,13 +7874,13 @@ define <8 x i64> @load_8i1(<8 x i1>* %a) {
define <16 x i32> @load_16i1(<16 x i1>* %a) {
; GENERIC-LABEL: load_16i1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: kmovw (%rdi), %k0
; GENERIC-NEXT: vpmovm2d %k0, %zmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: load_16i1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovw (%rdi), %k0 # sched: [7:1.00]
; SKX-NEXT: vpmovm2d %k0, %zmm0
; SKX-NEXT: retq # sched: [7:1.00]
@@ -7891,13 +7891,13 @@ define <16 x i32> @load_16i1(<16 x i1>* %a) {
define <2 x i16> @load_2i1(<2 x i1>* %a) {
; GENERIC-LABEL: load_2i1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: kmovb (%rdi), %k0
; GENERIC-NEXT: vpmovm2q %k0, %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: load_2i1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovb (%rdi), %k0 # sched: [7:1.00]
; SKX-NEXT: vpmovm2q %k0, %xmm0
; SKX-NEXT: retq # sched: [7:1.00]
@@ -7908,13 +7908,13 @@ define <2 x i16> @load_2i1(<2 x i1>* %a) {
define <4 x i16> @load_4i1(<4 x i1>* %a) {
; GENERIC-LABEL: load_4i1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: kmovb (%rdi), %k0
; GENERIC-NEXT: vpmovm2d %k0, %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: load_4i1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovb (%rdi), %k0 # sched: [7:1.00]
; SKX-NEXT: vpmovm2d %k0, %xmm0
; SKX-NEXT: retq # sched: [7:1.00]
@@ -7925,13 +7925,13 @@ define <4 x i16> @load_4i1(<4 x i1>* %a) {
define <32 x i16> @load_32i1(<32 x i1>* %a) {
; GENERIC-LABEL: load_32i1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: kmovd (%rdi), %k0
; GENERIC-NEXT: vpmovm2w %k0, %zmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: load_32i1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovd (%rdi), %k0 # sched: [7:1.00]
; SKX-NEXT: vpmovm2w %k0, %zmm0
; SKX-NEXT: retq # sched: [7:1.00]
@@ -7942,13 +7942,13 @@ define <32 x i16> @load_32i1(<32 x i1>* %a) {
define <64 x i8> @load_64i1(<64 x i1>* %a) {
; GENERIC-LABEL: load_64i1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: kmovq (%rdi), %k0
; GENERIC-NEXT: vpmovm2b %k0, %zmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: load_64i1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovq (%rdi), %k0 # sched: [7:1.00]
; SKX-NEXT: vpmovm2b %k0, %zmm0
; SKX-NEXT: retq # sched: [7:1.00]
@@ -7959,14 +7959,14 @@ define <64 x i8> @load_64i1(<64 x i1>* %a) {
define void @store_8i1(<8 x i1>* %a, <8 x i1> %v) {
; GENERIC-LABEL: store_8i1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $15, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovw2m %xmm0, %k0
; GENERIC-NEXT: kmovb %k0, (%rdi)
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: store_8i1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpmovw2m %xmm0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovb %k0, (%rdi) # sched: [1:1.00]
@@ -7977,14 +7977,14 @@ define void @store_8i1(<8 x i1>* %a, <8 x i1> %v) {
define void @store_8i1_1(<8 x i1>* %a, <8 x i16> %v) {
; GENERIC-LABEL: store_8i1_1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $15, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovw2m %xmm0, %k0
; GENERIC-NEXT: kmovb %k0, (%rdi)
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: store_8i1_1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpmovw2m %xmm0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovb %k0, (%rdi) # sched: [1:1.00]
@@ -7996,14 +7996,14 @@ define void @store_8i1_1(<8 x i1>* %a, <8 x i16> %v) {
define void @store_16i1(<16 x i1>* %a, <16 x i1> %v) {
; GENERIC-LABEL: store_16i1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $7, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovb2m %xmm0, %k0
; GENERIC-NEXT: kmovw %k0, (%rdi)
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: store_16i1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpmovb2m %xmm0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovw %k0, (%rdi) # sched: [1:1.00]
@@ -8014,7 +8014,7 @@ define void @store_16i1(<16 x i1>* %a, <16 x i1> %v) {
define void @store_32i1(<32 x i1>* %a, <32 x i1> %v) {
; GENERIC-LABEL: store_32i1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $7, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovb2m %ymm0, %k0
; GENERIC-NEXT: kmovd %k0, (%rdi)
@@ -8022,7 +8022,7 @@ define void @store_32i1(<32 x i1>* %a, <32 x i1> %v) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: store_32i1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpmovb2m %ymm0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, (%rdi) # sched: [1:1.00]
@@ -8034,7 +8034,7 @@ define void @store_32i1(<32 x i1>* %a, <32 x i1> %v) {
define void @store_32i1_1(<32 x i1>* %a, <32 x i16> %v) {
; GENERIC-LABEL: store_32i1_1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $15, %zmm0, %zmm0 # sched: [3:1.00]
; GENERIC-NEXT: vpmovw2m %zmm0, %k0
; GENERIC-NEXT: kmovd %k0, (%rdi)
@@ -8042,7 +8042,7 @@ define void @store_32i1_1(<32 x i1>* %a, <32 x i16> %v) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: store_32i1_1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %zmm0, %zmm0 # sched: [1:0.50]
; SKX-NEXT: vpmovw2m %zmm0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, (%rdi) # sched: [1:1.00]
@@ -8057,7 +8057,7 @@ define void @store_32i1_1(<32 x i1>* %a, <32 x i16> %v) {
define void @store_64i1(<64 x i1>* %a, <64 x i1> %v) {
;
; GENERIC-LABEL: store_64i1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw $7, %zmm0, %zmm0 # sched: [3:1.00]
; GENERIC-NEXT: vpmovb2m %zmm0, %k0
; GENERIC-NEXT: kmovq %k0, (%rdi)
@@ -8065,7 +8065,7 @@ define void @store_64i1(<64 x i1>* %a, <64 x i1> %v) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: store_64i1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %zmm0, %zmm0 # sched: [1:0.50]
; SKX-NEXT: vpmovb2m %zmm0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovq %k0, (%rdi) # sched: [1:1.00]
@@ -8077,7 +8077,7 @@ define void @store_64i1(<64 x i1>* %a, <64 x i1> %v) {
define i32 @test_bitcast_v8i1_zext(<16 x i32> %a) {
; GENERIC-LABEL: test_bitcast_v8i1_zext:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; GENERIC-NEXT: kmovb %k0, %eax
@@ -8086,7 +8086,7 @@ define i32 @test_bitcast_v8i1_zext(<16 x i32> %a) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_bitcast_v8i1_zext:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 # sched: [3:1.00]
; SKX-NEXT: kmovb %k0, %eax # sched: [3:1.00]
@@ -8103,7 +8103,7 @@ define i32 @test_bitcast_v8i1_zext(<16 x i32> %a) {
define i32 @test_bitcast_v16i1_zext(<16 x i32> %a) {
; GENERIC-LABEL: test_bitcast_v16i1_zext:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; GENERIC-NEXT: kmovw %k0, %eax
@@ -8112,7 +8112,7 @@ define i32 @test_bitcast_v16i1_zext(<16 x i32> %a) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_bitcast_v16i1_zext:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 # sched: [3:1.00]
; SKX-NEXT: kmovw %k0, %eax # sched: [3:1.00]
@@ -8128,7 +8128,7 @@ define i32 @test_bitcast_v16i1_zext(<16 x i32> %a) {
define i16 @test_v16i1_add(i16 %x, i16 %y) {
; GENERIC-LABEL: test_v16i1_add:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: kmovd %edi, %k0
; GENERIC-NEXT: kmovd %esi, %k1
; GENERIC-NEXT: kxorw %k1, %k0, %k0
@@ -8137,7 +8137,7 @@ define i16 @test_v16i1_add(i16 %x, i16 %y) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_v16i1_add:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovd %edi, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %esi, %k1 # sched: [1:1.00]
; SKX-NEXT: kxorw %k1, %k0, %k0 # sched: [1:1.00]
@@ -8153,7 +8153,7 @@ define i16 @test_v16i1_add(i16 %x, i16 %y) {
define i16 @test_v16i1_sub(i16 %x, i16 %y) {
; GENERIC-LABEL: test_v16i1_sub:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: kmovd %edi, %k0
; GENERIC-NEXT: kmovd %esi, %k1
; GENERIC-NEXT: kxorw %k1, %k0, %k0
@@ -8162,7 +8162,7 @@ define i16 @test_v16i1_sub(i16 %x, i16 %y) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_v16i1_sub:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovd %edi, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %esi, %k1 # sched: [1:1.00]
; SKX-NEXT: kxorw %k1, %k0, %k0 # sched: [1:1.00]
@@ -8178,7 +8178,7 @@ define i16 @test_v16i1_sub(i16 %x, i16 %y) {
define i16 @test_v16i1_mul(i16 %x, i16 %y) {
; GENERIC-LABEL: test_v16i1_mul:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: kmovd %edi, %k0
; GENERIC-NEXT: kmovd %esi, %k1
; GENERIC-NEXT: kandw %k1, %k0, %k0
@@ -8187,7 +8187,7 @@ define i16 @test_v16i1_mul(i16 %x, i16 %y) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_v16i1_mul:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovd %edi, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %esi, %k1 # sched: [1:1.00]
; SKX-NEXT: kandw %k1, %k0, %k0 # sched: [1:1.00]
@@ -8203,7 +8203,7 @@ define i16 @test_v16i1_mul(i16 %x, i16 %y) {
define i8 @test_v8i1_add(i8 %x, i8 %y) {
; GENERIC-LABEL: test_v8i1_add:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: kmovd %edi, %k0
; GENERIC-NEXT: kmovd %esi, %k1
; GENERIC-NEXT: kxorb %k1, %k0, %k0
@@ -8212,7 +8212,7 @@ define i8 @test_v8i1_add(i8 %x, i8 %y) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_v8i1_add:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovd %edi, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %esi, %k1 # sched: [1:1.00]
; SKX-NEXT: kxorb %k1, %k0, %k0 # sched: [1:1.00]
@@ -8228,7 +8228,7 @@ define i8 @test_v8i1_add(i8 %x, i8 %y) {
define i8 @test_v8i1_sub(i8 %x, i8 %y) {
; GENERIC-LABEL: test_v8i1_sub:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: kmovd %edi, %k0
; GENERIC-NEXT: kmovd %esi, %k1
; GENERIC-NEXT: kxorb %k1, %k0, %k0
@@ -8237,7 +8237,7 @@ define i8 @test_v8i1_sub(i8 %x, i8 %y) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_v8i1_sub:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovd %edi, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %esi, %k1 # sched: [1:1.00]
; SKX-NEXT: kxorb %k1, %k0, %k0 # sched: [1:1.00]
@@ -8253,7 +8253,7 @@ define i8 @test_v8i1_sub(i8 %x, i8 %y) {
define i8 @test_v8i1_mul(i8 %x, i8 %y) {
; GENERIC-LABEL: test_v8i1_mul:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: kmovd %edi, %k0
; GENERIC-NEXT: kmovd %esi, %k1
; GENERIC-NEXT: kandb %k1, %k0, %k0
@@ -8262,7 +8262,7 @@ define i8 @test_v8i1_mul(i8 %x, i8 %y) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_v8i1_mul:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovd %edi, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %esi, %k1 # sched: [1:1.00]
; SKX-NEXT: kandb %k1, %k0, %k0 # sched: [1:1.00]
@@ -8278,12 +8278,12 @@ define i8 @test_v8i1_mul(i8 %x, i8 %y) {
define <16 x i32> @_inreg16xi32(i32 %a) {
; GENERIC-LABEL: _inreg16xi32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpbroadcastd %edi, %zmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: _inreg16xi32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpbroadcastd %edi, %zmm0 # sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%b = insertelement <16 x i32> undef, i32 %a, i32 0
@@ -8293,12 +8293,12 @@ define <16 x i32> @_inreg16xi32(i32 %a) {
define <8 x i64> @_inreg8xi64(i64 %a) {
; GENERIC-LABEL: _inreg8xi64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpbroadcastq %rdi, %zmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: _inreg8xi64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpbroadcastq %rdi, %zmm0 # sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%b = insertelement <8 x i64> undef, i64 %a, i32 0
@@ -8308,12 +8308,12 @@ define <8 x i64> @_inreg8xi64(i64 %a) {
define <16 x float> @_ss16xfloat_v4(<4 x float> %a) {
; GENERIC-LABEL: _ss16xfloat_v4:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vbroadcastss %xmm0, %zmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: _ss16xfloat_v4:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vbroadcastss %xmm0, %zmm0 # sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%b = shufflevector <4 x float> %a, <4 x float> undef, <16 x i32> zeroinitializer
@@ -8322,12 +8322,12 @@ define <16 x float> @_ss16xfloat_v4(<4 x float> %a) {
define <16 x float> @_inreg16xfloat(float %a) {
; GENERIC-LABEL: _inreg16xfloat:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vbroadcastss %xmm0, %zmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: _inreg16xfloat:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vbroadcastss %xmm0, %zmm0 # sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%b = insertelement <16 x float> undef, float %a, i32 0
@@ -8337,7 +8337,7 @@ define <16 x float> @_inreg16xfloat(float %a) {
define <16 x float> @_ss16xfloat_mask(float %a, <16 x float> %i, <16 x i32> %mask1) {
; GENERIC-LABEL: _ss16xfloat_mask:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpneqd %zmm3, %zmm2, %k1
; GENERIC-NEXT: vbroadcastss %xmm0, %zmm1 {%k1}
@@ -8345,7 +8345,7 @@ define <16 x float> @_ss16xfloat_mask(float %a, <16 x float> %i, <16 x i32> %m
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: _ss16xfloat_mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpneqd %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vbroadcastss %xmm0, %zmm1 {%k1} # sched: [3:1.00]
@@ -8360,14 +8360,14 @@ define <16 x float> @_ss16xfloat_mask(float %a, <16 x float> %i, <16 x i32> %m
define <16 x float> @_ss16xfloat_maskz(float %a, <16 x i32> %mask1) {
; GENERIC-LABEL: _ss16xfloat_maskz:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpneqd %zmm2, %zmm1, %k1
; GENERIC-NEXT: vbroadcastss %xmm0, %zmm0 {%k1} {z}
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: _ss16xfloat_maskz:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpneqd %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vbroadcastss %xmm0, %zmm0 {%k1} {z} # sched: [3:1.00]
@@ -8381,12 +8381,12 @@ define <16 x float> @_ss16xfloat_maskz(float %a, <16 x i32> %mask1) {
define <16 x float> @_ss16xfloat_load(float* %a.ptr) {
; GENERIC-LABEL: _ss16xfloat_load:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vbroadcastss (%rdi), %zmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: _ss16xfloat_load:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vbroadcastss (%rdi), %zmm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
%a = load float, float* %a.ptr
@@ -8397,14 +8397,14 @@ define <16 x float> @_ss16xfloat_load(float* %a.ptr) {
define <16 x float> @_ss16xfloat_mask_load(float* %a.ptr, <16 x float> %i, <16 x i32> %mask1) {
; GENERIC-LABEL: _ss16xfloat_mask_load:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpneqd %zmm2, %zmm1, %k1
; GENERIC-NEXT: vbroadcastss (%rdi), %zmm0 {%k1}
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: _ss16xfloat_mask_load:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpneqd %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vbroadcastss (%rdi), %zmm0 {%k1} # sched: [8:0.50]
@@ -8419,14 +8419,14 @@ define <16 x float> @_ss16xfloat_mask_load(float* %a.ptr, <16 x float> %i, <16
define <16 x float> @_ss16xfloat_maskz_load(float* %a.ptr, <16 x i32> %mask1) {
; GENERIC-LABEL: _ss16xfloat_maskz_load:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpneqd %zmm1, %zmm0, %k1
; GENERIC-NEXT: vbroadcastss (%rdi), %zmm0 {%k1} {z}
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: _ss16xfloat_maskz_load:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpneqd %zmm1, %zmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vbroadcastss (%rdi), %zmm0 {%k1} {z} # sched: [8:0.50]
@@ -8441,12 +8441,12 @@ define <16 x float> @_ss16xfloat_maskz_load(float* %a.ptr, <16 x i32> %mask1)
define <8 x double> @_inreg8xdouble(double %a) {
; GENERIC-LABEL: _inreg8xdouble:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vbroadcastsd %xmm0, %zmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: _inreg8xdouble:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vbroadcastsd %xmm0, %zmm0 # sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%b = insertelement <8 x double> undef, double %a, i32 0
@@ -8456,7 +8456,7 @@ define <8 x double> @_inreg8xdouble(double %a) {
define <8 x double> @_sd8xdouble_mask(double %a, <8 x double> %i, <8 x i32> %mask1) {
; GENERIC-LABEL: _sd8xdouble_mask:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpneqd %ymm3, %ymm2, %k1
; GENERIC-NEXT: vbroadcastsd %xmm0, %zmm1 {%k1}
@@ -8464,7 +8464,7 @@ define <8 x double> @_sd8xdouble_mask(double %a, <8 x double> %i, <8 x i32> %m
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: _sd8xdouble_mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpneqd %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vbroadcastsd %xmm0, %zmm1 {%k1} # sched: [3:1.00]
@@ -8479,14 +8479,14 @@ define <8 x double> @_sd8xdouble_mask(double %a, <8 x double> %i, <8 x i32> %m
define <8 x double> @_sd8xdouble_maskz(double %a, <8 x i32> %mask1) {
; GENERIC-LABEL: _sd8xdouble_maskz:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpneqd %ymm2, %ymm1, %k1
; GENERIC-NEXT: vbroadcastsd %xmm0, %zmm0 {%k1} {z}
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: _sd8xdouble_maskz:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpneqd %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vbroadcastsd %xmm0, %zmm0 {%k1} {z} # sched: [3:1.00]
@@ -8500,12 +8500,12 @@ define <8 x double> @_sd8xdouble_maskz(double %a, <8 x i32> %mask1) {
define <8 x double> @_sd8xdouble_load(double* %a.ptr) {
; GENERIC-LABEL: _sd8xdouble_load:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vbroadcastsd (%rdi), %zmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: _sd8xdouble_load:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vbroadcastsd (%rdi), %zmm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
%a = load double, double* %a.ptr
@@ -8516,14 +8516,14 @@ define <8 x double> @_sd8xdouble_load(double* %a.ptr) {
define <8 x double> @_sd8xdouble_mask_load(double* %a.ptr, <8 x double> %i, <8 x i32> %mask1) {
; GENERIC-LABEL: _sd8xdouble_mask_load:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpneqd %ymm2, %ymm1, %k1
; GENERIC-NEXT: vbroadcastsd (%rdi), %zmm0 {%k1}
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: _sd8xdouble_mask_load:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpneqd %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vbroadcastsd (%rdi), %zmm0 {%k1} # sched: [8:0.50]
@@ -8538,14 +8538,14 @@ define <8 x double> @_sd8xdouble_mask_load(double* %a.ptr, <8 x double> %i, <8
define <8 x double> @_sd8xdouble_maskz_load(double* %a.ptr, <8 x i32> %mask1) {
; GENERIC-LABEL: _sd8xdouble_maskz_load:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpneqd %ymm1, %ymm0, %k1
; GENERIC-NEXT: vbroadcastsd (%rdi), %zmm0 {%k1} {z}
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: _sd8xdouble_maskz_load:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpneqd %ymm1, %ymm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vbroadcastsd (%rdi), %zmm0 {%k1} {z} # sched: [8:0.50]
@@ -8560,12 +8560,12 @@ define <8 x double> @_sd8xdouble_maskz_load(double* %a.ptr, <8 x i32> %mask1)
define <16 x i32> @_xmm16xi32(<16 x i32> %a) {
; GENERIC-LABEL: _xmm16xi32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vbroadcastss %xmm0, %zmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: _xmm16xi32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vbroadcastss %xmm0, %zmm0 # sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%b = shufflevector <16 x i32> %a, <16 x i32> undef, <16 x i32> zeroinitializer
@@ -8574,12 +8574,12 @@ define <16 x i32> @_xmm16xi32(<16 x i32> %a) {
define <16 x float> @_xmm16xfloat(<16 x float> %a) {
; GENERIC-LABEL: _xmm16xfloat:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vbroadcastss %xmm0, %zmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: _xmm16xfloat:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vbroadcastss %xmm0, %zmm0 # sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%b = shufflevector <16 x float> %a, <16 x float> undef, <16 x i32> zeroinitializer
@@ -8588,7 +8588,7 @@ define <16 x float> @_xmm16xfloat(<16 x float> %a) {
define <16 x i32> @test_vbroadcast() {
; GENERIC-LABEL: test_vbroadcast:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: vxorps %xmm0, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vcmpunordps %zmm0, %zmm0, %k0 # sched: [3:1.00]
; GENERIC-NEXT: vpmovm2d %k0, %zmm0
@@ -8597,7 +8597,7 @@ define <16 x i32> @test_vbroadcast() {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_vbroadcast:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vxorps %xmm0, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: vcmpunordps %zmm0, %zmm0, %k0 # sched: [3:1.00]
; SKX-NEXT: vpmovm2d %k0, %zmm0
@@ -8616,12 +8616,12 @@ entry:
; IR generated will produce broadcasts at the end.
define <8 x double> @test_set1_pd(double %d) #2 {
; GENERIC-LABEL: test_set1_pd:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: vbroadcastsd %xmm0, %zmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_set1_pd:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vbroadcastsd %xmm0, %zmm0 # sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
entry:
@@ -8638,12 +8638,12 @@ entry:
define <8 x i64> @test_set1_epi64(i64 %d) #2 {
; GENERIC-LABEL: test_set1_epi64:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: vpbroadcastq %rdi, %zmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_set1_epi64:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vpbroadcastq %rdi, %zmm0 # sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
entry:
@@ -8660,12 +8660,12 @@ entry:
define <16 x float> @test_set1_ps(float %f) #2 {
; GENERIC-LABEL: test_set1_ps:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: vbroadcastss %xmm0, %zmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_set1_ps:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vbroadcastss %xmm0, %zmm0 # sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
entry:
@@ -8690,12 +8690,12 @@ entry:
define <16 x i32> @test_set1_epi32(i32 %f) #2 {
; GENERIC-LABEL: test_set1_epi32:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: vpbroadcastd %edi, %zmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_set1_epi32:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vpbroadcastd %edi, %zmm0 # sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
entry:
@@ -8722,12 +8722,12 @@ entry:
; Verify that the IR generated will produce the broadcast at the end.
define <8 x double> @test_mm512_broadcastsd_pd(<2 x double> %a) {
; GENERIC-LABEL: test_mm512_broadcastsd_pd:
-; GENERIC: # BB#0: # %entry
+; GENERIC: # %bb.0: # %entry
; GENERIC-NEXT: vbroadcastsd %xmm0, %zmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_mm512_broadcastsd_pd:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vbroadcastsd %xmm0, %zmm0 # sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
entry:
@@ -8745,12 +8745,12 @@ entry:
define <16 x float> @suff_test1(<8 x float>%a) {
; GENERIC-LABEL: suff_test1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vbroadcastss %xmm0, %zmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: suff_test1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vbroadcastss %xmm0, %zmm0 # sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <8 x float> %a, <8 x float> undef, <16 x i32> zeroinitializer
@@ -8759,12 +8759,12 @@ define <16 x float> @suff_test1(<8 x float>%a) {
define <8 x double> @suff_test2(<4 x double>%a) {
; GENERIC-LABEL: suff_test2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vbroadcastsd %xmm0, %zmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: suff_test2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vbroadcastsd %xmm0, %zmm0 # sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <4 x double> %a, <4 x double> undef, <8 x i32> zeroinitializer
@@ -8773,12 +8773,12 @@ define <8 x double> @suff_test2(<4 x double>%a) {
define <64 x i8> @_invec32xi8(<32 x i8>%a) {
; GENERIC-LABEL: _invec32xi8:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpbroadcastb %xmm0, %zmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: _invec32xi8:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpbroadcastb %xmm0, %zmm0 # sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <32 x i8> %a, <32 x i8> undef, <64 x i32> zeroinitializer
@@ -8787,12 +8787,12 @@ define <64 x i8> @_invec32xi8(<32 x i8>%a) {
define <32 x i16> @_invec16xi16(<16 x i16>%a) {
; GENERIC-LABEL: _invec16xi16:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpbroadcastw %xmm0, %zmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: _invec16xi16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpbroadcastw %xmm0, %zmm0 # sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <16 x i16> %a, <16 x i16> undef, <32 x i32> zeroinitializer
@@ -8801,12 +8801,12 @@ define <32 x i16> @_invec16xi16(<16 x i16>%a) {
define <16 x i32> @_invec8xi32(<8 x i32>%a) {
; GENERIC-LABEL: _invec8xi32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vbroadcastss %xmm0, %zmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: _invec8xi32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vbroadcastss %xmm0, %zmm0 # sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <8 x i32> %a, <8 x i32> undef, <16 x i32> zeroinitializer
@@ -8815,12 +8815,12 @@ define <16 x i32> @_invec8xi32(<8 x i32>%a) {
define <8 x i64> @_invec4xi64(<4 x i64>%a) {
; GENERIC-LABEL: _invec4xi64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vbroadcastsd %xmm0, %zmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: _invec4xi64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vbroadcastsd %xmm0, %zmm0 # sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <4 x i64> %a, <4 x i64> undef, <8 x i32> zeroinitializer
@@ -8830,7 +8830,7 @@ define <8 x i64> @_invec4xi64(<4 x i64>%a) {
declare void @func_f32(float)
define <16 x float> @broadcast_ss_spill(float %x) {
; GENERIC-LABEL: broadcast_ss_spill:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: subq $24, %rsp # sched: [1:0.33]
; GENERIC-NEXT: .cfi_def_cfa_offset 32
; GENERIC-NEXT: vaddss %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
@@ -8842,7 +8842,7 @@ define <16 x float> @broadcast_ss_spill(float %x) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: broadcast_ss_spill:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: subq $24, %rsp # sched: [1:0.25]
; SKX-NEXT: .cfi_def_cfa_offset 32
; SKX-NEXT: vaddss %xmm0, %xmm0, %xmm0 # sched: [4:0.33]
@@ -8863,7 +8863,7 @@ define <16 x float> @broadcast_ss_spill(float %x) {
declare void @func_f64(double)
define <8 x double> @broadcast_sd_spill(double %x) {
; GENERIC-LABEL: broadcast_sd_spill:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: subq $24, %rsp # sched: [1:0.33]
; GENERIC-NEXT: .cfi_def_cfa_offset 32
; GENERIC-NEXT: vaddsd %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
@@ -8875,7 +8875,7 @@ define <8 x double> @broadcast_sd_spill(double %x) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: broadcast_sd_spill:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: subq $24, %rsp # sched: [1:0.25]
; SKX-NEXT: .cfi_def_cfa_offset 32
; SKX-NEXT: vaddsd %xmm0, %xmm0, %xmm0 # sched: [4:0.33]
diff --git a/test/CodeGen/X86/avx512-select.ll b/test/CodeGen/X86/avx512-select.ll
index b73e307c868..c05601a263d 100644
--- a/test/CodeGen/X86/avx512-select.ll
+++ b/test/CodeGen/X86/avx512-select.ll
@@ -4,22 +4,22 @@
define <16 x i32> @select00(i32 %a, <16 x i32> %b) nounwind {
; X86-LABEL: select00:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: cmpl $255, {{[0-9]+}}(%esp)
; X86-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X86-NEXT: je .LBB0_2
-; X86-NEXT: # BB#1:
+; X86-NEXT: # %bb.1:
; X86-NEXT: vmovdqa64 %zmm0, %zmm1
; X86-NEXT: .LBB0_2:
; X86-NEXT: vpxorq %zmm1, %zmm0, %zmm0
; X86-NEXT: retl
;
; X64-LABEL: select00:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-NEXT: cmpl $255, %edi
; X64-NEXT: je .LBB0_2
-; X64-NEXT: # BB#1:
+; X64-NEXT: # %bb.1:
; X64-NEXT: vmovdqa64 %zmm0, %zmm1
; X64-NEXT: .LBB0_2:
; X64-NEXT: vpxorq %zmm1, %zmm0, %zmm0
@@ -32,22 +32,22 @@ define <16 x i32> @select00(i32 %a, <16 x i32> %b) nounwind {
define <8 x i64> @select01(i32 %a, <8 x i64> %b) nounwind {
; X86-LABEL: select01:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: cmpl $255, {{[0-9]+}}(%esp)
; X86-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X86-NEXT: je .LBB1_2
-; X86-NEXT: # BB#1:
+; X86-NEXT: # %bb.1:
; X86-NEXT: vmovdqa64 %zmm0, %zmm1
; X86-NEXT: .LBB1_2:
; X86-NEXT: vpxorq %zmm1, %zmm0, %zmm0
; X86-NEXT: retl
;
; X64-LABEL: select01:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-NEXT: cmpl $255, %edi
; X64-NEXT: je .LBB1_2
-; X64-NEXT: # BB#1:
+; X64-NEXT: # %bb.1:
; X64-NEXT: vmovdqa64 %zmm0, %zmm1
; X64-NEXT: .LBB1_2:
; X64-NEXT: vpxorq %zmm1, %zmm0, %zmm0
@@ -60,7 +60,7 @@ define <8 x i64> @select01(i32 %a, <8 x i64> %b) nounwind {
define float @select02(float %a, float %b, float %c, float %eps) {
; X86-LABEL: select02:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-NEXT: vucomiss {{[0-9]+}}(%esp), %xmm0
; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
@@ -70,7 +70,7 @@ define float @select02(float %a, float %b, float %c, float %eps) {
; X86-NEXT: retl
;
; X64-LABEL: select02:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vcmpless %xmm0, %xmm3, %k1
; X64-NEXT: vmovss %xmm2, %xmm0, %xmm1 {%k1}
; X64-NEXT: vmovaps %xmm1, %xmm0
@@ -82,7 +82,7 @@ define float @select02(float %a, float %b, float %c, float %eps) {
define double @select03(double %a, double %b, double %c, double %eps) {
; X86-LABEL: select03:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-NEXT: vucomisd {{[0-9]+}}(%esp), %xmm0
; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
@@ -92,7 +92,7 @@ define double @select03(double %a, double %b, double %c, double %eps) {
; X86-NEXT: retl
;
; X64-LABEL: select03:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vcmplesd %xmm0, %xmm3, %k1
; X64-NEXT: vmovsd %xmm2, %xmm0, %xmm1 {%k1}
; X64-NEXT: vmovapd %xmm1, %xmm0
@@ -104,7 +104,7 @@ define double @select03(double %a, double %b, double %c, double %eps) {
define <16 x double> @select04(<16 x double> %a, <16 x double> %b) {
; X86-LABEL: select04:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl %ebp
; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: .cfi_offset %ebp, -8
@@ -118,7 +118,7 @@ define <16 x double> @select04(<16 x double> %a, <16 x double> %b) {
; X86-NEXT: retl
;
; X64-LABEL: select04:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps %zmm3, %zmm1
; X64-NEXT: retq
%sel = select <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>, <16 x double> %a, <16 x double> %b
@@ -127,13 +127,13 @@ define <16 x double> @select04(<16 x double> %a, <16 x double> %b) {
define i8 @select05(i8 %a.0, i8 %m) {
; X86-LABEL: select05:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
; X86-NEXT: orb {{[0-9]+}}(%esp), %al
; X86-NEXT: retl
;
; X64-LABEL: select05:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: orl %esi, %edi
; X64-NEXT: movl %edi, %eax
; X64-NEXT: retq
@@ -146,7 +146,7 @@ define i8 @select05(i8 %a.0, i8 %m) {
define i8 @select05_mem(<8 x i1>* %a.0, <8 x i1>* %m) {
; X86-LABEL: select05_mem:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movzbl (%ecx), %ecx
@@ -159,7 +159,7 @@ define i8 @select05_mem(<8 x i1>* %a.0, <8 x i1>* %m) {
; X86-NEXT: retl
;
; X64-LABEL: select05_mem:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movzbl (%rsi), %eax
; X64-NEXT: kmovw %eax, %k0
; X64-NEXT: movzbl (%rdi), %eax
@@ -177,13 +177,13 @@ define i8 @select05_mem(<8 x i1>* %a.0, <8 x i1>* %m) {
define i8 @select06(i8 %a.0, i8 %m) {
; X86-LABEL: select06:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
; X86-NEXT: andb {{[0-9]+}}(%esp), %al
; X86-NEXT: retl
;
; X64-LABEL: select06:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andl %esi, %edi
; X64-NEXT: movl %edi, %eax
; X64-NEXT: retq
@@ -196,7 +196,7 @@ define i8 @select06(i8 %a.0, i8 %m) {
define i8 @select06_mem(<8 x i1>* %a.0, <8 x i1>* %m) {
; X86-LABEL: select06_mem:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movzbl (%ecx), %ecx
@@ -209,7 +209,7 @@ define i8 @select06_mem(<8 x i1>* %a.0, <8 x i1>* %m) {
; X86-NEXT: retl
;
; X64-LABEL: select06_mem:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movzbl (%rsi), %eax
; X64-NEXT: kmovw %eax, %k0
; X64-NEXT: movzbl (%rdi), %eax
@@ -226,7 +226,7 @@ define i8 @select06_mem(<8 x i1>* %a.0, <8 x i1>* %m) {
}
define i8 @select07(i8 %a.0, i8 %b.0, i8 %m) {
; X86-LABEL: select07:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X86-NEXT: kmovw %eax, %k0
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
@@ -241,7 +241,7 @@ define i8 @select07(i8 %a.0, i8 %b.0, i8 %m) {
; X86-NEXT: retl
;
; X64-LABEL: select07:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edx, %k0
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: kmovw %esi, %k2
@@ -261,13 +261,13 @@ define i8 @select07(i8 %a.0, i8 %b.0, i8 %m) {
define i64 @pr30249() {
; X86-LABEL: pr30249:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl $2, %eax
; X86-NEXT: xorl %edx, %edx
; X86-NEXT: retl
;
; X64-LABEL: pr30249:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl $2, %eax
; X64-NEXT: retq
%v = select i1 undef , i64 1, i64 2
@@ -276,7 +276,7 @@ define i64 @pr30249() {
define double @pr30561_f64(double %b, double %a, i1 %c) {
; X86-LABEL: pr30561_f64:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: testb $1, {{[0-9]+}}(%esp)
; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal {{[0-9]+}}(%esp), %ecx
@@ -285,7 +285,7 @@ define double @pr30561_f64(double %b, double %a, i1 %c) {
; X86-NEXT: retl
;
; X64-LABEL: pr30561_f64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vmovsd %xmm1, %xmm0, %xmm0 {%k1}
; X64-NEXT: retq
@@ -295,7 +295,7 @@ define double @pr30561_f64(double %b, double %a, i1 %c) {
define float @pr30561_f32(float %b, float %a, i1 %c) {
; X86-LABEL: pr30561_f32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: testb $1, {{[0-9]+}}(%esp)
; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal {{[0-9]+}}(%esp), %ecx
@@ -304,7 +304,7 @@ define float @pr30561_f32(float %b, float %a, i1 %c) {
; X86-NEXT: retl
;
; X64-LABEL: pr30561_f32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1}
; X64-NEXT: retq
@@ -314,7 +314,7 @@ define float @pr30561_f32(float %b, float %a, i1 %c) {
define <16 x i16> @pr31515(<16 x i1> %a, <16 x i1> %b, <16 x i16> %c) nounwind {
; X86-LABEL: pr31515:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: vpmovsxbd %xmm1, %zmm1
; X86-NEXT: vpslld $31, %zmm1, %zmm1
; X86-NEXT: vpmovsxbd %xmm0, %zmm0
@@ -327,7 +327,7 @@ define <16 x i16> @pr31515(<16 x i1> %a, <16 x i1> %b, <16 x i16> %c) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: pr31515:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmovsxbd %xmm1, %zmm1
; X64-NEXT: vpslld $31, %zmm1, %zmm1
; X64-NEXT: vpmovsxbd %xmm0, %zmm0
diff --git a/test/CodeGen/X86/avx512-shift.ll b/test/CodeGen/X86/avx512-shift.ll
index c18efd922c3..cbc601b71da 100644
--- a/test/CodeGen/X86/avx512-shift.ll
+++ b/test/CodeGen/X86/avx512-shift.ll
@@ -4,7 +4,7 @@
define <16 x i32> @shift_16_i32(<16 x i32> %a) {
; CHECK-LABEL: shift_16_i32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsrld $1, %zmm0, %zmm0
; CHECK-NEXT: vpslld $12, %zmm0, %zmm0
; CHECK-NEXT: vpsrad $12, %zmm0, %zmm0
@@ -17,7 +17,7 @@ define <16 x i32> @shift_16_i32(<16 x i32> %a) {
define <8 x i64> @shift_8_i64(<8 x i64> %a) {
; CHECK-LABEL: shift_8_i64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsrlq $1, %zmm0, %zmm0
; CHECK-NEXT: vpsllq $12, %zmm0, %zmm0
; CHECK-NEXT: vpsraq $12, %zmm0, %zmm0
@@ -30,7 +30,7 @@ define <8 x i64> @shift_8_i64(<8 x i64> %a) {
define <4 x i64> @shift_4_i64(<4 x i64> %a) {
; KNL-LABEL: shift_4_i64:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpsrlq $1, %ymm0, %ymm0
; KNL-NEXT: vpsllq $12, %ymm0, %ymm0
; KNL-NEXT: vpsraq $12, %zmm0, %zmm0
@@ -38,7 +38,7 @@ define <4 x i64> @shift_4_i64(<4 x i64> %a) {
; KNL-NEXT: retq
;
; SKX-LABEL: shift_4_i64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsrlq $1, %ymm0, %ymm0
; SKX-NEXT: vpsllq $12, %ymm0, %ymm0
; SKX-NEXT: vpsraq $12, %ymm0, %ymm0
@@ -51,7 +51,7 @@ define <4 x i64> @shift_4_i64(<4 x i64> %a) {
define <8 x i64> @variable_shl4(<8 x i64> %x, <8 x i64> %y) {
; CHECK-LABEL: variable_shl4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsllvq %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%k = shl <8 x i64> %x, %y
@@ -60,7 +60,7 @@ define <8 x i64> @variable_shl4(<8 x i64> %x, <8 x i64> %y) {
define <16 x i32> @variable_shl5(<16 x i32> %x, <16 x i32> %y) {
; CHECK-LABEL: variable_shl5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsllvd %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%k = shl <16 x i32> %x, %y
@@ -69,7 +69,7 @@ define <16 x i32> @variable_shl5(<16 x i32> %x, <16 x i32> %y) {
define <16 x i32> @variable_srl0(<16 x i32> %x, <16 x i32> %y) {
; CHECK-LABEL: variable_srl0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsrlvd %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%k = lshr <16 x i32> %x, %y
@@ -78,7 +78,7 @@ define <16 x i32> @variable_srl0(<16 x i32> %x, <16 x i32> %y) {
define <8 x i64> @variable_srl2(<8 x i64> %x, <8 x i64> %y) {
; CHECK-LABEL: variable_srl2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsrlvq %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%k = lshr <8 x i64> %x, %y
@@ -87,7 +87,7 @@ define <8 x i64> @variable_srl2(<8 x i64> %x, <8 x i64> %y) {
define <16 x i32> @variable_sra1(<16 x i32> %x, <16 x i32> %y) {
; CHECK-LABEL: variable_sra1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsravd %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%k = ashr <16 x i32> %x, %y
@@ -96,7 +96,7 @@ define <16 x i32> @variable_sra1(<16 x i32> %x, <16 x i32> %y) {
define <8 x i64> @variable_sra2(<8 x i64> %x, <8 x i64> %y) {
; CHECK-LABEL: variable_sra2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsravq %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%k = ashr <8 x i64> %x, %y
@@ -105,7 +105,7 @@ define <8 x i64> @variable_sra2(<8 x i64> %x, <8 x i64> %y) {
define <4 x i64> @variable_sra3(<4 x i64> %x, <4 x i64> %y) {
; KNL-LABEL: variable_sra3:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; KNL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL-NEXT: vpsravq %zmm1, %zmm0, %zmm0
@@ -113,7 +113,7 @@ define <4 x i64> @variable_sra3(<4 x i64> %x, <4 x i64> %y) {
; KNL-NEXT: retq
;
; SKX-LABEL: variable_sra3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsravq %ymm1, %ymm0, %ymm0
; SKX-NEXT: retq
%k = ashr <4 x i64> %x, %y
@@ -122,7 +122,7 @@ define <4 x i64> @variable_sra3(<4 x i64> %x, <4 x i64> %y) {
define <8 x i16> @variable_sra4(<8 x i16> %x, <8 x i16> %y) {
; KNL-LABEL: variable_sra4:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; KNL-NEXT: vpmovsxwd %xmm0, %ymm0
; KNL-NEXT: vpsravd %ymm1, %ymm0, %ymm0
@@ -131,7 +131,7 @@ define <8 x i16> @variable_sra4(<8 x i16> %x, <8 x i16> %y) {
; KNL-NEXT: retq
;
; SKX-LABEL: variable_sra4:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsravw %xmm1, %xmm0, %xmm0
; SKX-NEXT: retq
%k = ashr <8 x i16> %x, %y
@@ -140,7 +140,7 @@ define <8 x i16> @variable_sra4(<8 x i16> %x, <8 x i16> %y) {
define <16 x i32> @variable_sra01_load(<16 x i32> %x, <16 x i32>* %y) {
; CHECK-LABEL: variable_sra01_load:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsravd (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
%y1 = load <16 x i32>, <16 x i32>* %y
@@ -150,7 +150,7 @@ define <16 x i32> @variable_sra01_load(<16 x i32> %x, <16 x i32>* %y) {
define <16 x i32> @variable_shl1_load(<16 x i32> %x, <16 x i32>* %y) {
; CHECK-LABEL: variable_shl1_load:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsllvd (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
%y1 = load <16 x i32>, <16 x i32>* %y
@@ -160,7 +160,7 @@ define <16 x i32> @variable_shl1_load(<16 x i32> %x, <16 x i32>* %y) {
define <16 x i32> @variable_srl0_load(<16 x i32> %x, <16 x i32>* %y) {
; CHECK-LABEL: variable_srl0_load:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsrlvd (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
%y1 = load <16 x i32>, <16 x i32>* %y
@@ -170,7 +170,7 @@ define <16 x i32> @variable_srl0_load(<16 x i32> %x, <16 x i32>* %y) {
define <8 x i64> @variable_srl3_load(<8 x i64> %x, <8 x i64>* %y) {
; CHECK-LABEL: variable_srl3_load:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsrlvq (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
%y1 = load <8 x i64>, <8 x i64>* %y
diff --git a/test/CodeGen/X86/avx512-shuffle-schedule.ll b/test/CodeGen/X86/avx512-shuffle-schedule.ll
index 7bca8d32d84..db7b9cb2502 100755
--- a/test/CodeGen/X86/avx512-shuffle-schedule.ll
+++ b/test/CodeGen/X86/avx512-shuffle-schedule.ll
@@ -6,13 +6,13 @@
define <16 x i16> @test_16xi16_perm_mask0(<16 x i16> %vec) {
; GENERIC-LABEL: test_16xi16_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa {{.*#+}} ymm1 = [8,6,12,4,7,9,14,8,4,12,9,4,14,15,12,14] sched: [7:0.50]
; GENERIC-NEXT: vpermw %ymm0, %ymm1, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xi16_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa {{.*#+}} ymm1 = [8,6,12,4,7,9,14,8,4,12,9,4,14,15,12,14] sched: [7:0.50]
; SKX-NEXT: vpermw %ymm0, %ymm1, %ymm0 # sched: [6:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -21,7 +21,7 @@ define <16 x i16> @test_16xi16_perm_mask0(<16 x i16> %vec) {
}
define <16 x i16> @test_masked_16xi16_perm_mask0(<16 x i16> %vec, <16 x i16> %vec2, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_16xi16_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa {{.*#+}} ymm3 = [8,6,12,4,7,9,14,8,4,12,9,4,14,15,12,14] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm4, %ymm2, %k1
@@ -30,7 +30,7 @@ define <16 x i16> @test_masked_16xi16_perm_mask0(<16 x i16> %vec, <16 x i16> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xi16_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa {{.*#+}} ymm3 = [8,6,12,4,7,9,14,8,4,12,9,4,14,15,12,14] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm4, %ymm2, %k1 # sched: [3:1.00]
@@ -45,7 +45,7 @@ define <16 x i16> @test_masked_16xi16_perm_mask0(<16 x i16> %vec, <16 x i16> %ve
define <16 x i16> @test_masked_z_16xi16_perm_mask0(<16 x i16> %vec, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_16xi16_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa {{.*#+}} ymm2 = [8,6,12,4,7,9,14,8,4,12,9,4,14,15,12,14] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm3, %ymm1, %k1
@@ -53,7 +53,7 @@ define <16 x i16> @test_masked_z_16xi16_perm_mask0(<16 x i16> %vec, <16 x i16> %
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xi16_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa {{.*#+}} ymm2 = [8,6,12,4,7,9,14,8,4,12,9,4,14,15,12,14] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm3, %ymm1, %k1 # sched: [3:1.00]
@@ -66,7 +66,7 @@ define <16 x i16> @test_masked_z_16xi16_perm_mask0(<16 x i16> %vec, <16 x i16> %
}
define <16 x i16> @test_masked_16xi16_perm_mask1(<16 x i16> %vec, <16 x i16> %vec2, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_16xi16_perm_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa {{.*#+}} ymm3 = [4,11,14,10,7,1,6,9,14,15,7,13,4,12,8,0] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm4, %ymm2, %k1
@@ -75,7 +75,7 @@ define <16 x i16> @test_masked_16xi16_perm_mask1(<16 x i16> %vec, <16 x i16> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xi16_perm_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa {{.*#+}} ymm3 = [4,11,14,10,7,1,6,9,14,15,7,13,4,12,8,0] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm4, %ymm2, %k1 # sched: [3:1.00]
@@ -90,7 +90,7 @@ define <16 x i16> @test_masked_16xi16_perm_mask1(<16 x i16> %vec, <16 x i16> %ve
define <16 x i16> @test_masked_z_16xi16_perm_mask1(<16 x i16> %vec, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_16xi16_perm_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa {{.*#+}} ymm2 = [4,11,14,10,7,1,6,9,14,15,7,13,4,12,8,0] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm3, %ymm1, %k1
@@ -98,7 +98,7 @@ define <16 x i16> @test_masked_z_16xi16_perm_mask1(<16 x i16> %vec, <16 x i16> %
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xi16_perm_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa {{.*#+}} ymm2 = [4,11,14,10,7,1,6,9,14,15,7,13,4,12,8,0] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm3, %ymm1, %k1 # sched: [3:1.00]
@@ -111,7 +111,7 @@ define <16 x i16> @test_masked_z_16xi16_perm_mask1(<16 x i16> %vec, <16 x i16> %
}
define <16 x i16> @test_masked_16xi16_perm_mask2(<16 x i16> %vec, <16 x i16> %vec2, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_16xi16_perm_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa {{.*#+}} ymm3 = [11,6,13,10,0,7,13,3,5,13,3,9,3,15,12,7] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm4, %ymm2, %k1
@@ -120,7 +120,7 @@ define <16 x i16> @test_masked_16xi16_perm_mask2(<16 x i16> %vec, <16 x i16> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xi16_perm_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa {{.*#+}} ymm3 = [11,6,13,10,0,7,13,3,5,13,3,9,3,15,12,7] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm4, %ymm2, %k1 # sched: [3:1.00]
@@ -135,7 +135,7 @@ define <16 x i16> @test_masked_16xi16_perm_mask2(<16 x i16> %vec, <16 x i16> %ve
define <16 x i16> @test_masked_z_16xi16_perm_mask2(<16 x i16> %vec, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_16xi16_perm_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa {{.*#+}} ymm2 = [11,6,13,10,0,7,13,3,5,13,3,9,3,15,12,7] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm3, %ymm1, %k1
@@ -143,7 +143,7 @@ define <16 x i16> @test_masked_z_16xi16_perm_mask2(<16 x i16> %vec, <16 x i16> %
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xi16_perm_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa {{.*#+}} ymm2 = [11,6,13,10,0,7,13,3,5,13,3,9,3,15,12,7] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm3, %ymm1, %k1 # sched: [3:1.00]
@@ -156,13 +156,13 @@ define <16 x i16> @test_masked_z_16xi16_perm_mask2(<16 x i16> %vec, <16 x i16> %
}
define <16 x i16> @test_16xi16_perm_mask3(<16 x i16> %vec) {
; GENERIC-LABEL: test_16xi16_perm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa {{.*#+}} ymm1 = [1,5,8,14,1,8,11,8,13,8,15,9,9,7,9,6] sched: [7:0.50]
; GENERIC-NEXT: vpermw %ymm0, %ymm1, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xi16_perm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa {{.*#+}} ymm1 = [1,5,8,14,1,8,11,8,13,8,15,9,9,7,9,6] sched: [7:0.50]
; SKX-NEXT: vpermw %ymm0, %ymm1, %ymm0 # sched: [6:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -171,7 +171,7 @@ define <16 x i16> @test_16xi16_perm_mask3(<16 x i16> %vec) {
}
define <16 x i16> @test_masked_16xi16_perm_mask3(<16 x i16> %vec, <16 x i16> %vec2, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_16xi16_perm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa {{.*#+}} ymm3 = [1,5,8,14,1,8,11,8,13,8,15,9,9,7,9,6] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm4, %ymm2, %k1
@@ -180,7 +180,7 @@ define <16 x i16> @test_masked_16xi16_perm_mask3(<16 x i16> %vec, <16 x i16> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xi16_perm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa {{.*#+}} ymm3 = [1,5,8,14,1,8,11,8,13,8,15,9,9,7,9,6] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm4, %ymm2, %k1 # sched: [3:1.00]
@@ -195,7 +195,7 @@ define <16 x i16> @test_masked_16xi16_perm_mask3(<16 x i16> %vec, <16 x i16> %ve
define <16 x i16> @test_masked_z_16xi16_perm_mask3(<16 x i16> %vec, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_16xi16_perm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa {{.*#+}} ymm2 = [1,5,8,14,1,8,11,8,13,8,15,9,9,7,9,6] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm3, %ymm1, %k1
@@ -203,7 +203,7 @@ define <16 x i16> @test_masked_z_16xi16_perm_mask3(<16 x i16> %vec, <16 x i16> %
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xi16_perm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa {{.*#+}} ymm2 = [1,5,8,14,1,8,11,8,13,8,15,9,9,7,9,6] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm3, %ymm1, %k1 # sched: [3:1.00]
@@ -216,13 +216,13 @@ define <16 x i16> @test_masked_z_16xi16_perm_mask3(<16 x i16> %vec, <16 x i16> %
}
define <16 x i16> @test_16xi16_perm_mem_mask0(<16 x i16>* %vp) {
; GENERIC-LABEL: test_16xi16_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa {{.*#+}} ymm0 = [9,10,7,1,12,14,14,13,14,14,8,6,11,4,12,13] sched: [7:0.50]
; GENERIC-NEXT: vpermw (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xi16_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa {{.*#+}} ymm0 = [9,10,7,1,12,14,14,13,14,14,8,6,11,4,12,13] sched: [7:0.50]
; SKX-NEXT: vpermw (%rdi), %ymm0, %ymm0 # sched: [13:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -232,7 +232,7 @@ define <16 x i16> @test_16xi16_perm_mem_mask0(<16 x i16>* %vp) {
}
define <16 x i16> @test_masked_16xi16_perm_mem_mask0(<16 x i16>* %vp, <16 x i16> %vec2, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_16xi16_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa {{.*#+}} ymm2 = [9,10,7,1,12,14,14,13,14,14,8,6,11,4,12,13] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm3, %ymm1, %k1
@@ -240,7 +240,7 @@ define <16 x i16> @test_masked_16xi16_perm_mem_mask0(<16 x i16>* %vp, <16 x i16>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xi16_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa {{.*#+}} ymm2 = [9,10,7,1,12,14,14,13,14,14,8,6,11,4,12,13] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm3, %ymm1, %k1 # sched: [3:1.00]
@@ -255,7 +255,7 @@ define <16 x i16> @test_masked_16xi16_perm_mem_mask0(<16 x i16>* %vp, <16 x i16>
define <16 x i16> @test_masked_z_16xi16_perm_mem_mask0(<16 x i16>* %vp, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_16xi16_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa {{.*#+}} ymm1 = [9,10,7,1,12,14,14,13,14,14,8,6,11,4,12,13] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm2, %ymm0, %k1
@@ -263,7 +263,7 @@ define <16 x i16> @test_masked_z_16xi16_perm_mem_mask0(<16 x i16>* %vp, <16 x i1
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xi16_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa {{.*#+}} ymm1 = [9,10,7,1,12,14,14,13,14,14,8,6,11,4,12,13] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm2, %ymm0, %k1 # sched: [3:1.00]
@@ -278,7 +278,7 @@ define <16 x i16> @test_masked_z_16xi16_perm_mem_mask0(<16 x i16>* %vp, <16 x i1
define <16 x i16> @test_masked_16xi16_perm_mem_mask1(<16 x i16>* %vp, <16 x i16> %vec2, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_16xi16_perm_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa {{.*#+}} ymm2 = [14,9,15,9,7,10,15,14,12,1,9,7,10,13,3,11] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm3, %ymm1, %k1
@@ -286,7 +286,7 @@ define <16 x i16> @test_masked_16xi16_perm_mem_mask1(<16 x i16>* %vp, <16 x i16>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xi16_perm_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa {{.*#+}} ymm2 = [14,9,15,9,7,10,15,14,12,1,9,7,10,13,3,11] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm3, %ymm1, %k1 # sched: [3:1.00]
@@ -301,7 +301,7 @@ define <16 x i16> @test_masked_16xi16_perm_mem_mask1(<16 x i16>* %vp, <16 x i16>
define <16 x i16> @test_masked_z_16xi16_perm_mem_mask1(<16 x i16>* %vp, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_16xi16_perm_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa {{.*#+}} ymm1 = [14,9,15,9,7,10,15,14,12,1,9,7,10,13,3,11] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm2, %ymm0, %k1
@@ -309,7 +309,7 @@ define <16 x i16> @test_masked_z_16xi16_perm_mem_mask1(<16 x i16>* %vp, <16 x i1
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xi16_perm_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa {{.*#+}} ymm1 = [14,9,15,9,7,10,15,14,12,1,9,7,10,13,3,11] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm2, %ymm0, %k1 # sched: [3:1.00]
@@ -324,7 +324,7 @@ define <16 x i16> @test_masked_z_16xi16_perm_mem_mask1(<16 x i16>* %vp, <16 x i1
define <16 x i16> @test_masked_16xi16_perm_mem_mask2(<16 x i16>* %vp, <16 x i16> %vec2, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_16xi16_perm_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa {{.*#+}} ymm2 = [1,3,12,5,13,1,2,11,0,9,14,8,10,0,10,9] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm3, %ymm1, %k1
@@ -332,7 +332,7 @@ define <16 x i16> @test_masked_16xi16_perm_mem_mask2(<16 x i16>* %vp, <16 x i16>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xi16_perm_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa {{.*#+}} ymm2 = [1,3,12,5,13,1,2,11,0,9,14,8,10,0,10,9] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm3, %ymm1, %k1 # sched: [3:1.00]
@@ -347,7 +347,7 @@ define <16 x i16> @test_masked_16xi16_perm_mem_mask2(<16 x i16>* %vp, <16 x i16>
define <16 x i16> @test_masked_z_16xi16_perm_mem_mask2(<16 x i16>* %vp, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_16xi16_perm_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa {{.*#+}} ymm1 = [1,3,12,5,13,1,2,11,0,9,14,8,10,0,10,9] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm2, %ymm0, %k1
@@ -355,7 +355,7 @@ define <16 x i16> @test_masked_z_16xi16_perm_mem_mask2(<16 x i16>* %vp, <16 x i1
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xi16_perm_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa {{.*#+}} ymm1 = [1,3,12,5,13,1,2,11,0,9,14,8,10,0,10,9] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm2, %ymm0, %k1 # sched: [3:1.00]
@@ -370,13 +370,13 @@ define <16 x i16> @test_masked_z_16xi16_perm_mem_mask2(<16 x i16>* %vp, <16 x i1
define <16 x i16> @test_16xi16_perm_mem_mask3(<16 x i16>* %vp) {
; GENERIC-LABEL: test_16xi16_perm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa {{.*#+}} ymm0 = [9,6,5,15,0,0,15,2,1,3,12,14,0,6,1,4] sched: [7:0.50]
; GENERIC-NEXT: vpermw (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xi16_perm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa {{.*#+}} ymm0 = [9,6,5,15,0,0,15,2,1,3,12,14,0,6,1,4] sched: [7:0.50]
; SKX-NEXT: vpermw (%rdi), %ymm0, %ymm0 # sched: [13:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -386,7 +386,7 @@ define <16 x i16> @test_16xi16_perm_mem_mask3(<16 x i16>* %vp) {
}
define <16 x i16> @test_masked_16xi16_perm_mem_mask3(<16 x i16>* %vp, <16 x i16> %vec2, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_16xi16_perm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa {{.*#+}} ymm2 = [9,6,5,15,0,0,15,2,1,3,12,14,0,6,1,4] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm3, %ymm1, %k1
@@ -394,7 +394,7 @@ define <16 x i16> @test_masked_16xi16_perm_mem_mask3(<16 x i16>* %vp, <16 x i16>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xi16_perm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa {{.*#+}} ymm2 = [9,6,5,15,0,0,15,2,1,3,12,14,0,6,1,4] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm3, %ymm1, %k1 # sched: [3:1.00]
@@ -409,7 +409,7 @@ define <16 x i16> @test_masked_16xi16_perm_mem_mask3(<16 x i16>* %vp, <16 x i16>
define <16 x i16> @test_masked_z_16xi16_perm_mem_mask3(<16 x i16>* %vp, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_16xi16_perm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa {{.*#+}} ymm1 = [9,6,5,15,0,0,15,2,1,3,12,14,0,6,1,4] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm2, %ymm0, %k1
@@ -417,7 +417,7 @@ define <16 x i16> @test_masked_z_16xi16_perm_mem_mask3(<16 x i16>* %vp, <16 x i1
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xi16_perm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa {{.*#+}} ymm1 = [9,6,5,15,0,0,15,2,1,3,12,14,0,6,1,4] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm2, %ymm0, %k1 # sched: [3:1.00]
@@ -432,13 +432,13 @@ define <16 x i16> @test_masked_z_16xi16_perm_mem_mask3(<16 x i16>* %vp, <16 x i1
define <32 x i16> @test_32xi16_perm_mask0(<32 x i16> %vec) {
; GENERIC-LABEL: test_32xi16_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 {{.*#+}} zmm1 = [16,1,3,31,6,11,23,26,29,5,21,30,1,21,27,10,8,19,14,5,15,13,18,16,9,11,26,8,17,0,23,10] sched: [4:0.50]
; GENERIC-NEXT: vpermw %zmm0, %zmm1, %zmm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_32xi16_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm1 = [16,1,3,31,6,11,23,26,29,5,21,30,1,21,27,10,8,19,14,5,15,13,18,16,9,11,26,8,17,0,23,10] sched: [8:0.50]
; SKX-NEXT: vpermw %zmm0, %zmm1, %zmm0 # sched: [6:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -447,7 +447,7 @@ define <32 x i16> @test_32xi16_perm_mask0(<32 x i16> %vec) {
}
define <32 x i16> @test_masked_32xi16_perm_mask0(<32 x i16> %vec, <32 x i16> %vec2, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_32xi16_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 {{.*#+}} zmm3 = [16,1,3,31,6,11,23,26,29,5,21,30,1,21,27,10,8,19,14,5,15,13,18,16,9,11,26,8,17,0,23,10] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm4, %zmm2, %k1
@@ -456,7 +456,7 @@ define <32 x i16> @test_masked_32xi16_perm_mask0(<32 x i16> %vec, <32 x i16> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_32xi16_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [16,1,3,31,6,11,23,26,29,5,21,30,1,21,27,10,8,19,14,5,15,13,18,16,9,11,26,8,17,0,23,10] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm4, %zmm2, %k1 # sched: [3:1.00]
@@ -471,7 +471,7 @@ define <32 x i16> @test_masked_32xi16_perm_mask0(<32 x i16> %vec, <32 x i16> %ve
define <32 x i16> @test_masked_z_32xi16_perm_mask0(<32 x i16> %vec, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_32xi16_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 {{.*#+}} zmm2 = [16,1,3,31,6,11,23,26,29,5,21,30,1,21,27,10,8,19,14,5,15,13,18,16,9,11,26,8,17,0,23,10] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm3, %zmm1, %k1
@@ -479,7 +479,7 @@ define <32 x i16> @test_masked_z_32xi16_perm_mask0(<32 x i16> %vec, <32 x i16> %
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_32xi16_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [16,1,3,31,6,11,23,26,29,5,21,30,1,21,27,10,8,19,14,5,15,13,18,16,9,11,26,8,17,0,23,10] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -492,7 +492,7 @@ define <32 x i16> @test_masked_z_32xi16_perm_mask0(<32 x i16> %vec, <32 x i16> %
}
define <32 x i16> @test_masked_32xi16_perm_mask1(<32 x i16> %vec, <32 x i16> %vec2, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_32xi16_perm_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 {{.*#+}} zmm3 = [1,8,7,30,11,9,11,30,20,19,22,12,13,20,0,6,10,7,20,12,28,18,13,12,22,13,21,1,14,8,5,16] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm4, %zmm2, %k1
@@ -501,7 +501,7 @@ define <32 x i16> @test_masked_32xi16_perm_mask1(<32 x i16> %vec, <32 x i16> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_32xi16_perm_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [1,8,7,30,11,9,11,30,20,19,22,12,13,20,0,6,10,7,20,12,28,18,13,12,22,13,21,1,14,8,5,16] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm4, %zmm2, %k1 # sched: [3:1.00]
@@ -516,7 +516,7 @@ define <32 x i16> @test_masked_32xi16_perm_mask1(<32 x i16> %vec, <32 x i16> %ve
define <32 x i16> @test_masked_z_32xi16_perm_mask1(<32 x i16> %vec, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_32xi16_perm_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 {{.*#+}} zmm2 = [1,8,7,30,11,9,11,30,20,19,22,12,13,20,0,6,10,7,20,12,28,18,13,12,22,13,21,1,14,8,5,16] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm3, %zmm1, %k1
@@ -524,7 +524,7 @@ define <32 x i16> @test_masked_z_32xi16_perm_mask1(<32 x i16> %vec, <32 x i16> %
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_32xi16_perm_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [1,8,7,30,11,9,11,30,20,19,22,12,13,20,0,6,10,7,20,12,28,18,13,12,22,13,21,1,14,8,5,16] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -537,7 +537,7 @@ define <32 x i16> @test_masked_z_32xi16_perm_mask1(<32 x i16> %vec, <32 x i16> %
}
define <32 x i16> @test_masked_32xi16_perm_mask2(<32 x i16> %vec, <32 x i16> %vec2, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_32xi16_perm_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 {{.*#+}} zmm3 = [15,17,24,28,15,9,14,25,28,25,6,31,20,2,23,31,12,21,10,6,22,0,26,16,3,3,20,27,8,31,3,27] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm4, %zmm2, %k1
@@ -546,7 +546,7 @@ define <32 x i16> @test_masked_32xi16_perm_mask2(<32 x i16> %vec, <32 x i16> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_32xi16_perm_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [15,17,24,28,15,9,14,25,28,25,6,31,20,2,23,31,12,21,10,6,22,0,26,16,3,3,20,27,8,31,3,27] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm4, %zmm2, %k1 # sched: [3:1.00]
@@ -561,7 +561,7 @@ define <32 x i16> @test_masked_32xi16_perm_mask2(<32 x i16> %vec, <32 x i16> %ve
define <32 x i16> @test_masked_z_32xi16_perm_mask2(<32 x i16> %vec, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_32xi16_perm_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 {{.*#+}} zmm2 = [15,17,24,28,15,9,14,25,28,25,6,31,20,2,23,31,12,21,10,6,22,0,26,16,3,3,20,27,8,31,3,27] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm3, %zmm1, %k1
@@ -569,7 +569,7 @@ define <32 x i16> @test_masked_z_32xi16_perm_mask2(<32 x i16> %vec, <32 x i16> %
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_32xi16_perm_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [15,17,24,28,15,9,14,25,28,25,6,31,20,2,23,31,12,21,10,6,22,0,26,16,3,3,20,27,8,31,3,27] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -582,13 +582,13 @@ define <32 x i16> @test_masked_z_32xi16_perm_mask2(<32 x i16> %vec, <32 x i16> %
}
define <32 x i16> @test_32xi16_perm_mask3(<32 x i16> %vec) {
; GENERIC-LABEL: test_32xi16_perm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 {{.*#+}} zmm1 = [12,2,8,14,25,27,4,16,20,11,27,8,0,1,21,17,30,30,29,1,23,22,20,22,28,20,11,17,6,18,0,4] sched: [4:0.50]
; GENERIC-NEXT: vpermw %zmm0, %zmm1, %zmm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_32xi16_perm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm1 = [12,2,8,14,25,27,4,16,20,11,27,8,0,1,21,17,30,30,29,1,23,22,20,22,28,20,11,17,6,18,0,4] sched: [8:0.50]
; SKX-NEXT: vpermw %zmm0, %zmm1, %zmm0 # sched: [6:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -597,7 +597,7 @@ define <32 x i16> @test_32xi16_perm_mask3(<32 x i16> %vec) {
}
define <32 x i16> @test_masked_32xi16_perm_mask3(<32 x i16> %vec, <32 x i16> %vec2, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_32xi16_perm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 {{.*#+}} zmm3 = [12,2,8,14,25,27,4,16,20,11,27,8,0,1,21,17,30,30,29,1,23,22,20,22,28,20,11,17,6,18,0,4] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm4, %zmm2, %k1
@@ -606,7 +606,7 @@ define <32 x i16> @test_masked_32xi16_perm_mask3(<32 x i16> %vec, <32 x i16> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_32xi16_perm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [12,2,8,14,25,27,4,16,20,11,27,8,0,1,21,17,30,30,29,1,23,22,20,22,28,20,11,17,6,18,0,4] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm4, %zmm2, %k1 # sched: [3:1.00]
@@ -621,7 +621,7 @@ define <32 x i16> @test_masked_32xi16_perm_mask3(<32 x i16> %vec, <32 x i16> %ve
define <32 x i16> @test_masked_z_32xi16_perm_mask3(<32 x i16> %vec, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_32xi16_perm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 {{.*#+}} zmm2 = [12,2,8,14,25,27,4,16,20,11,27,8,0,1,21,17,30,30,29,1,23,22,20,22,28,20,11,17,6,18,0,4] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm3, %zmm1, %k1
@@ -629,7 +629,7 @@ define <32 x i16> @test_masked_z_32xi16_perm_mask3(<32 x i16> %vec, <32 x i16> %
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_32xi16_perm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [12,2,8,14,25,27,4,16,20,11,27,8,0,1,21,17,30,30,29,1,23,22,20,22,28,20,11,17,6,18,0,4] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -642,13 +642,13 @@ define <32 x i16> @test_masked_z_32xi16_perm_mask3(<32 x i16> %vec, <32 x i16> %
}
define <32 x i16> @test_32xi16_perm_mem_mask0(<32 x i16>* %vp) {
; GENERIC-LABEL: test_32xi16_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 {{.*#+}} zmm0 = [19,1,5,31,9,12,17,9,15,7,1,5,16,2,12,10,13,3,29,15,26,31,10,15,22,13,9,23,28,29,20,12] sched: [4:0.50]
; GENERIC-NEXT: vpermw (%rdi), %zmm0, %zmm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_32xi16_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm0 = [19,1,5,31,9,12,17,9,15,7,1,5,16,2,12,10,13,3,29,15,26,31,10,15,22,13,9,23,28,29,20,12] sched: [8:0.50]
; SKX-NEXT: vpermw (%rdi), %zmm0, %zmm0 # sched: [13:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -658,7 +658,7 @@ define <32 x i16> @test_32xi16_perm_mem_mask0(<32 x i16>* %vp) {
}
define <32 x i16> @test_masked_32xi16_perm_mem_mask0(<32 x i16>* %vp, <32 x i16> %vec2, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_32xi16_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 {{.*#+}} zmm2 = [19,1,5,31,9,12,17,9,15,7,1,5,16,2,12,10,13,3,29,15,26,31,10,15,22,13,9,23,28,29,20,12] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm3, %zmm1, %k1
@@ -666,7 +666,7 @@ define <32 x i16> @test_masked_32xi16_perm_mem_mask0(<32 x i16>* %vp, <32 x i16>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_32xi16_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [19,1,5,31,9,12,17,9,15,7,1,5,16,2,12,10,13,3,29,15,26,31,10,15,22,13,9,23,28,29,20,12] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -681,7 +681,7 @@ define <32 x i16> @test_masked_32xi16_perm_mem_mask0(<32 x i16>* %vp, <32 x i16>
define <32 x i16> @test_masked_z_32xi16_perm_mem_mask0(<32 x i16>* %vp, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_32xi16_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 {{.*#+}} zmm1 = [19,1,5,31,9,12,17,9,15,7,1,5,16,2,12,10,13,3,29,15,26,31,10,15,22,13,9,23,28,29,20,12] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm2, %zmm0, %k1
@@ -689,7 +689,7 @@ define <32 x i16> @test_masked_z_32xi16_perm_mem_mask0(<32 x i16>* %vp, <32 x i1
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_32xi16_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm1 = [19,1,5,31,9,12,17,9,15,7,1,5,16,2,12,10,13,3,29,15,26,31,10,15,22,13,9,23,28,29,20,12] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm2, %zmm0, %k1 # sched: [3:1.00]
@@ -704,7 +704,7 @@ define <32 x i16> @test_masked_z_32xi16_perm_mem_mask0(<32 x i16>* %vp, <32 x i1
define <32 x i16> @test_masked_32xi16_perm_mem_mask1(<32 x i16>* %vp, <32 x i16> %vec2, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_32xi16_perm_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 {{.*#+}} zmm2 = [31,20,2,2,23,1,0,12,16,14,15,18,21,13,11,31,8,24,13,11,2,27,22,28,14,21,3,12,6,1,30,6] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm3, %zmm1, %k1
@@ -712,7 +712,7 @@ define <32 x i16> @test_masked_32xi16_perm_mem_mask1(<32 x i16>* %vp, <32 x i16>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_32xi16_perm_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [31,20,2,2,23,1,0,12,16,14,15,18,21,13,11,31,8,24,13,11,2,27,22,28,14,21,3,12,6,1,30,6] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -727,7 +727,7 @@ define <32 x i16> @test_masked_32xi16_perm_mem_mask1(<32 x i16>* %vp, <32 x i16>
define <32 x i16> @test_masked_z_32xi16_perm_mem_mask1(<32 x i16>* %vp, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_32xi16_perm_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 {{.*#+}} zmm1 = [31,20,2,2,23,1,0,12,16,14,15,18,21,13,11,31,8,24,13,11,2,27,22,28,14,21,3,12,6,1,30,6] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm2, %zmm0, %k1
@@ -735,7 +735,7 @@ define <32 x i16> @test_masked_z_32xi16_perm_mem_mask1(<32 x i16>* %vp, <32 x i1
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_32xi16_perm_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm1 = [31,20,2,2,23,1,0,12,16,14,15,18,21,13,11,31,8,24,13,11,2,27,22,28,14,21,3,12,6,1,30,6] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm2, %zmm0, %k1 # sched: [3:1.00]
@@ -750,7 +750,7 @@ define <32 x i16> @test_masked_z_32xi16_perm_mem_mask1(<32 x i16>* %vp, <32 x i1
define <32 x i16> @test_masked_32xi16_perm_mem_mask2(<32 x i16>* %vp, <32 x i16> %vec2, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_32xi16_perm_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 {{.*#+}} zmm2 = [4,6,12,17,4,31,31,4,12,21,28,15,29,10,15,15,21,6,19,7,10,30,28,26,1,4,8,25,26,18,22,25] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm3, %zmm1, %k1
@@ -758,7 +758,7 @@ define <32 x i16> @test_masked_32xi16_perm_mem_mask2(<32 x i16>* %vp, <32 x i16>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_32xi16_perm_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [4,6,12,17,4,31,31,4,12,21,28,15,29,10,15,15,21,6,19,7,10,30,28,26,1,4,8,25,26,18,22,25] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -773,7 +773,7 @@ define <32 x i16> @test_masked_32xi16_perm_mem_mask2(<32 x i16>* %vp, <32 x i16>
define <32 x i16> @test_masked_z_32xi16_perm_mem_mask2(<32 x i16>* %vp, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_32xi16_perm_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 {{.*#+}} zmm1 = [4,6,12,17,4,31,31,4,12,21,28,15,29,10,15,15,21,6,19,7,10,30,28,26,1,4,8,25,26,18,22,25] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm2, %zmm0, %k1
@@ -781,7 +781,7 @@ define <32 x i16> @test_masked_z_32xi16_perm_mem_mask2(<32 x i16>* %vp, <32 x i1
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_32xi16_perm_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm1 = [4,6,12,17,4,31,31,4,12,21,28,15,29,10,15,15,21,6,19,7,10,30,28,26,1,4,8,25,26,18,22,25] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm2, %zmm0, %k1 # sched: [3:1.00]
@@ -796,13 +796,13 @@ define <32 x i16> @test_masked_z_32xi16_perm_mem_mask2(<32 x i16>* %vp, <32 x i1
define <32 x i16> @test_32xi16_perm_mem_mask3(<32 x i16>* %vp) {
; GENERIC-LABEL: test_32xi16_perm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 {{.*#+}} zmm0 = [2,2,27,1,7,1,0,27,10,5,4,20,30,16,28,16,18,21,25,24,31,23,28,6,17,19,26,15,25,12,18,27] sched: [4:0.50]
; GENERIC-NEXT: vpermw (%rdi), %zmm0, %zmm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_32xi16_perm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm0 = [2,2,27,1,7,1,0,27,10,5,4,20,30,16,28,16,18,21,25,24,31,23,28,6,17,19,26,15,25,12,18,27] sched: [8:0.50]
; SKX-NEXT: vpermw (%rdi), %zmm0, %zmm0 # sched: [13:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -812,7 +812,7 @@ define <32 x i16> @test_32xi16_perm_mem_mask3(<32 x i16>* %vp) {
}
define <32 x i16> @test_masked_32xi16_perm_mem_mask3(<32 x i16>* %vp, <32 x i16> %vec2, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_32xi16_perm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 {{.*#+}} zmm2 = [2,2,27,1,7,1,0,27,10,5,4,20,30,16,28,16,18,21,25,24,31,23,28,6,17,19,26,15,25,12,18,27] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm3, %zmm1, %k1
@@ -820,7 +820,7 @@ define <32 x i16> @test_masked_32xi16_perm_mem_mask3(<32 x i16>* %vp, <32 x i16>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_32xi16_perm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [2,2,27,1,7,1,0,27,10,5,4,20,30,16,28,16,18,21,25,24,31,23,28,6,17,19,26,15,25,12,18,27] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -835,7 +835,7 @@ define <32 x i16> @test_masked_32xi16_perm_mem_mask3(<32 x i16>* %vp, <32 x i16>
define <32 x i16> @test_masked_z_32xi16_perm_mem_mask3(<32 x i16>* %vp, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_32xi16_perm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 {{.*#+}} zmm1 = [2,2,27,1,7,1,0,27,10,5,4,20,30,16,28,16,18,21,25,24,31,23,28,6,17,19,26,15,25,12,18,27] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm2, %zmm0, %k1
@@ -843,7 +843,7 @@ define <32 x i16> @test_masked_z_32xi16_perm_mem_mask3(<32 x i16>* %vp, <32 x i1
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_32xi16_perm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm1 = [2,2,27,1,7,1,0,27,10,5,4,20,30,16,28,16,18,21,25,24,31,23,28,6,17,19,26,15,25,12,18,27] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm2, %zmm0, %k1 # sched: [3:1.00]
@@ -858,13 +858,13 @@ define <32 x i16> @test_masked_z_32xi16_perm_mem_mask3(<32 x i16>* %vp, <32 x i1
define <8 x i32> @test_8xi32_perm_mask0(<8 x i32> %vec) {
; GENERIC-LABEL: test_8xi32_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} ymm1 = [4,2,0,6,7,2,3,6] sched: [7:0.50]
; GENERIC-NEXT: vpermps %ymm0, %ymm1, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi32_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} ymm1 = [4,2,0,6,7,2,3,6] sched: [7:0.50]
; SKX-NEXT: vpermps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -873,7 +873,7 @@ define <8 x i32> @test_8xi32_perm_mask0(<8 x i32> %vec) {
}
define <8 x i32> @test_masked_8xi32_perm_mask0(<8 x i32> %vec, <8 x i32> %vec2, <8 x i32> %mask) {
; GENERIC-LABEL: test_masked_8xi32_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa {{.*#+}} ymm3 = [4,2,0,6,7,2,3,6] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm4, %ymm2, %k1
@@ -882,7 +882,7 @@ define <8 x i32> @test_masked_8xi32_perm_mask0(<8 x i32> %vec, <8 x i32> %vec2,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xi32_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa {{.*#+}} ymm3 = [4,2,0,6,7,2,3,6] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm4, %ymm2, %k1 # sched: [3:1.00]
@@ -897,7 +897,7 @@ define <8 x i32> @test_masked_8xi32_perm_mask0(<8 x i32> %vec, <8 x i32> %vec2,
define <8 x i32> @test_masked_z_8xi32_perm_mask0(<8 x i32> %vec, <8 x i32> %mask) {
; GENERIC-LABEL: test_masked_z_8xi32_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa {{.*#+}} ymm2 = [4,2,0,6,7,2,3,6] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm1, %k1
@@ -905,7 +905,7 @@ define <8 x i32> @test_masked_z_8xi32_perm_mask0(<8 x i32> %vec, <8 x i32> %mask
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xi32_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa {{.*#+}} ymm2 = [4,2,0,6,7,2,3,6] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm1, %k1 # sched: [3:1.00]
@@ -918,7 +918,7 @@ define <8 x i32> @test_masked_z_8xi32_perm_mask0(<8 x i32> %vec, <8 x i32> %mask
}
define <8 x i32> @test_masked_8xi32_perm_mask1(<8 x i32> %vec, <8 x i32> %vec2, <8 x i32> %mask) {
; GENERIC-LABEL: test_masked_8xi32_perm_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa {{.*#+}} ymm3 = [0,5,1,2,6,0,0,3] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm4, %ymm2, %k1
@@ -927,7 +927,7 @@ define <8 x i32> @test_masked_8xi32_perm_mask1(<8 x i32> %vec, <8 x i32> %vec2,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xi32_perm_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa {{.*#+}} ymm3 = [0,5,1,2,6,0,0,3] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm4, %ymm2, %k1 # sched: [3:1.00]
@@ -942,7 +942,7 @@ define <8 x i32> @test_masked_8xi32_perm_mask1(<8 x i32> %vec, <8 x i32> %vec2,
define <8 x i32> @test_masked_z_8xi32_perm_mask1(<8 x i32> %vec, <8 x i32> %mask) {
; GENERIC-LABEL: test_masked_z_8xi32_perm_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa {{.*#+}} ymm2 = [0,5,1,2,6,0,0,3] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm1, %k1
@@ -950,7 +950,7 @@ define <8 x i32> @test_masked_z_8xi32_perm_mask1(<8 x i32> %vec, <8 x i32> %mask
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xi32_perm_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa {{.*#+}} ymm2 = [0,5,1,2,6,0,0,3] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm1, %k1 # sched: [3:1.00]
@@ -963,7 +963,7 @@ define <8 x i32> @test_masked_z_8xi32_perm_mask1(<8 x i32> %vec, <8 x i32> %mask
}
define <8 x i32> @test_masked_8xi32_perm_mask2(<8 x i32> %vec, <8 x i32> %vec2, <8 x i32> %mask) {
; GENERIC-LABEL: test_masked_8xi32_perm_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa {{.*#+}} ymm3 = [3,6,5,5,1,7,3,4] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm4, %ymm2, %k1
@@ -972,7 +972,7 @@ define <8 x i32> @test_masked_8xi32_perm_mask2(<8 x i32> %vec, <8 x i32> %vec2,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xi32_perm_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa {{.*#+}} ymm3 = [3,6,5,5,1,7,3,4] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm4, %ymm2, %k1 # sched: [3:1.00]
@@ -987,7 +987,7 @@ define <8 x i32> @test_masked_8xi32_perm_mask2(<8 x i32> %vec, <8 x i32> %vec2,
define <8 x i32> @test_masked_z_8xi32_perm_mask2(<8 x i32> %vec, <8 x i32> %mask) {
; GENERIC-LABEL: test_masked_z_8xi32_perm_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa {{.*#+}} ymm2 = [3,6,5,5,1,7,3,4] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm1, %k1
@@ -995,7 +995,7 @@ define <8 x i32> @test_masked_z_8xi32_perm_mask2(<8 x i32> %vec, <8 x i32> %mask
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xi32_perm_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa {{.*#+}} ymm2 = [3,6,5,5,1,7,3,4] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm1, %k1 # sched: [3:1.00]
@@ -1008,13 +1008,13 @@ define <8 x i32> @test_masked_z_8xi32_perm_mask2(<8 x i32> %vec, <8 x i32> %mask
}
define <8 x i32> @test_8xi32_perm_mask3(<8 x i32> %vec) {
; GENERIC-LABEL: test_8xi32_perm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} ymm1 = [3,0,3,1,0,4,5,0] sched: [7:0.50]
; GENERIC-NEXT: vpermps %ymm0, %ymm1, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi32_perm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} ymm1 = [3,0,3,1,0,4,5,0] sched: [7:0.50]
; SKX-NEXT: vpermps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -1023,7 +1023,7 @@ define <8 x i32> @test_8xi32_perm_mask3(<8 x i32> %vec) {
}
define <8 x i32> @test_masked_8xi32_perm_mask3(<8 x i32> %vec, <8 x i32> %vec2, <8 x i32> %mask) {
; GENERIC-LABEL: test_masked_8xi32_perm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa {{.*#+}} ymm3 = [3,0,3,1,0,4,5,0] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm4, %ymm2, %k1
@@ -1032,7 +1032,7 @@ define <8 x i32> @test_masked_8xi32_perm_mask3(<8 x i32> %vec, <8 x i32> %vec2,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xi32_perm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa {{.*#+}} ymm3 = [3,0,3,1,0,4,5,0] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm4, %ymm2, %k1 # sched: [3:1.00]
@@ -1047,7 +1047,7 @@ define <8 x i32> @test_masked_8xi32_perm_mask3(<8 x i32> %vec, <8 x i32> %vec2,
define <8 x i32> @test_masked_z_8xi32_perm_mask3(<8 x i32> %vec, <8 x i32> %mask) {
; GENERIC-LABEL: test_masked_z_8xi32_perm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa {{.*#+}} ymm2 = [3,0,3,1,0,4,5,0] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm1, %k1
@@ -1055,7 +1055,7 @@ define <8 x i32> @test_masked_z_8xi32_perm_mask3(<8 x i32> %vec, <8 x i32> %mask
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xi32_perm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa {{.*#+}} ymm2 = [3,0,3,1,0,4,5,0] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm1, %k1 # sched: [3:1.00]
@@ -1068,13 +1068,13 @@ define <8 x i32> @test_masked_z_8xi32_perm_mask3(<8 x i32> %vec, <8 x i32> %mask
}
define <8 x i32> @test_8xi32_perm_mem_mask0(<8 x i32>* %vp) {
; GENERIC-LABEL: test_8xi32_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} ymm0 = [3,7,4,3,5,2,0,5] sched: [7:0.50]
; GENERIC-NEXT: vpermps (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi32_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} ymm0 = [3,7,4,3,5,2,0,5] sched: [7:0.50]
; SKX-NEXT: vpermps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -1084,7 +1084,7 @@ define <8 x i32> @test_8xi32_perm_mem_mask0(<8 x i32>* %vp) {
}
define <8 x i32> @test_masked_8xi32_perm_mem_mask0(<8 x i32>* %vp, <8 x i32> %vec2, <8 x i32> %mask) {
; GENERIC-LABEL: test_masked_8xi32_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa {{.*#+}} ymm2 = [3,7,4,3,5,2,0,5] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm1, %k1
@@ -1092,7 +1092,7 @@ define <8 x i32> @test_masked_8xi32_perm_mem_mask0(<8 x i32>* %vp, <8 x i32> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xi32_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa {{.*#+}} ymm2 = [3,7,4,3,5,2,0,5] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm1, %k1 # sched: [3:1.00]
@@ -1107,7 +1107,7 @@ define <8 x i32> @test_masked_8xi32_perm_mem_mask0(<8 x i32>* %vp, <8 x i32> %ve
define <8 x i32> @test_masked_z_8xi32_perm_mem_mask0(<8 x i32>* %vp, <8 x i32> %mask) {
; GENERIC-LABEL: test_masked_z_8xi32_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa {{.*#+}} ymm1 = [3,7,4,3,5,2,0,5] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm2, %ymm0, %k1
@@ -1115,7 +1115,7 @@ define <8 x i32> @test_masked_z_8xi32_perm_mem_mask0(<8 x i32>* %vp, <8 x i32> %
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xi32_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa {{.*#+}} ymm1 = [3,7,4,3,5,2,0,5] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm2, %ymm0, %k1 # sched: [3:1.00]
@@ -1130,7 +1130,7 @@ define <8 x i32> @test_masked_z_8xi32_perm_mem_mask0(<8 x i32>* %vp, <8 x i32> %
define <8 x i32> @test_masked_8xi32_perm_mem_mask1(<8 x i32>* %vp, <8 x i32> %vec2, <8 x i32> %mask) {
; GENERIC-LABEL: test_masked_8xi32_perm_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa {{.*#+}} ymm2 = [4,6,1,7,6,7,6,5] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm1, %k1
@@ -1138,7 +1138,7 @@ define <8 x i32> @test_masked_8xi32_perm_mem_mask1(<8 x i32>* %vp, <8 x i32> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xi32_perm_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa {{.*#+}} ymm2 = [4,6,1,7,6,7,6,5] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm1, %k1 # sched: [3:1.00]
@@ -1153,7 +1153,7 @@ define <8 x i32> @test_masked_8xi32_perm_mem_mask1(<8 x i32>* %vp, <8 x i32> %ve
define <8 x i32> @test_masked_z_8xi32_perm_mem_mask1(<8 x i32>* %vp, <8 x i32> %mask) {
; GENERIC-LABEL: test_masked_z_8xi32_perm_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa {{.*#+}} ymm1 = [4,6,1,7,6,7,6,5] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm2, %ymm0, %k1
@@ -1161,7 +1161,7 @@ define <8 x i32> @test_masked_z_8xi32_perm_mem_mask1(<8 x i32>* %vp, <8 x i32> %
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xi32_perm_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa {{.*#+}} ymm1 = [4,6,1,7,6,7,6,5] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm2, %ymm0, %k1 # sched: [3:1.00]
@@ -1176,7 +1176,7 @@ define <8 x i32> @test_masked_z_8xi32_perm_mem_mask1(<8 x i32>* %vp, <8 x i32> %
define <8 x i32> @test_masked_8xi32_perm_mem_mask2(<8 x i32>* %vp, <8 x i32> %vec2, <8 x i32> %mask) {
; GENERIC-LABEL: test_masked_8xi32_perm_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa {{.*#+}} ymm2 = [6,4,6,1,6,3,6,3] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm1, %k1
@@ -1184,7 +1184,7 @@ define <8 x i32> @test_masked_8xi32_perm_mem_mask2(<8 x i32>* %vp, <8 x i32> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xi32_perm_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa {{.*#+}} ymm2 = [6,4,6,1,6,3,6,3] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm1, %k1 # sched: [3:1.00]
@@ -1199,7 +1199,7 @@ define <8 x i32> @test_masked_8xi32_perm_mem_mask2(<8 x i32>* %vp, <8 x i32> %ve
define <8 x i32> @test_masked_z_8xi32_perm_mem_mask2(<8 x i32>* %vp, <8 x i32> %mask) {
; GENERIC-LABEL: test_masked_z_8xi32_perm_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa {{.*#+}} ymm1 = [6,4,6,1,6,3,6,3] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm2, %ymm0, %k1
@@ -1207,7 +1207,7 @@ define <8 x i32> @test_masked_z_8xi32_perm_mem_mask2(<8 x i32>* %vp, <8 x i32> %
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xi32_perm_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa {{.*#+}} ymm1 = [6,4,6,1,6,3,6,3] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm2, %ymm0, %k1 # sched: [3:1.00]
@@ -1222,13 +1222,13 @@ define <8 x i32> @test_masked_z_8xi32_perm_mem_mask2(<8 x i32>* %vp, <8 x i32> %
define <8 x i32> @test_8xi32_perm_mem_mask3(<8 x i32>* %vp) {
; GENERIC-LABEL: test_8xi32_perm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} ymm0 = [6,0,0,7,3,7,7,5] sched: [7:0.50]
; GENERIC-NEXT: vpermps (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi32_perm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} ymm0 = [6,0,0,7,3,7,7,5] sched: [7:0.50]
; SKX-NEXT: vpermps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -1238,7 +1238,7 @@ define <8 x i32> @test_8xi32_perm_mem_mask3(<8 x i32>* %vp) {
}
define <8 x i32> @test_masked_8xi32_perm_mem_mask3(<8 x i32>* %vp, <8 x i32> %vec2, <8 x i32> %mask) {
; GENERIC-LABEL: test_masked_8xi32_perm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa {{.*#+}} ymm2 = [6,0,0,7,3,7,7,5] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm1, %k1
@@ -1246,7 +1246,7 @@ define <8 x i32> @test_masked_8xi32_perm_mem_mask3(<8 x i32>* %vp, <8 x i32> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xi32_perm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa {{.*#+}} ymm2 = [6,0,0,7,3,7,7,5] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm1, %k1 # sched: [3:1.00]
@@ -1261,7 +1261,7 @@ define <8 x i32> @test_masked_8xi32_perm_mem_mask3(<8 x i32>* %vp, <8 x i32> %ve
define <8 x i32> @test_masked_z_8xi32_perm_mem_mask3(<8 x i32>* %vp, <8 x i32> %mask) {
; GENERIC-LABEL: test_masked_z_8xi32_perm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa {{.*#+}} ymm1 = [6,0,0,7,3,7,7,5] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm2, %ymm0, %k1
@@ -1269,7 +1269,7 @@ define <8 x i32> @test_masked_z_8xi32_perm_mem_mask3(<8 x i32>* %vp, <8 x i32> %
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xi32_perm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa {{.*#+}} ymm1 = [6,0,0,7,3,7,7,5] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm2, %ymm0, %k1 # sched: [3:1.00]
@@ -1284,13 +1284,13 @@ define <8 x i32> @test_masked_z_8xi32_perm_mem_mask3(<8 x i32>* %vp, <8 x i32> %
define <16 x i32> @test_16xi32_perm_mask0(<16 x i32> %vec, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xi32_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} zmm1 = [14,12,11,6,4,1,6,9,14,14,6,1,12,11,0,7] sched: [4:0.50]
; GENERIC-NEXT: vpermps %zmm0, %zmm1, %zmm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xi32_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} zmm1 = [14,12,11,6,4,1,6,9,14,14,6,1,12,11,0,7] sched: [8:0.50]
; SKX-NEXT: vpermps %zmm0, %zmm1, %zmm0 # sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -1299,7 +1299,7 @@ define <16 x i32> @test_16xi32_perm_mask0(<16 x i32> %vec, <16 x i32> %mask) {
}
define <16 x i32> @test_masked_16xi32_perm_mask0(<16 x i32> %vec, <16 x i32> %vec2, <16 x i32> %mask) {
; GENERIC-LABEL: test_masked_16xi32_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa32 {{.*#+}} zmm3 = [14,12,11,6,4,1,6,9,14,14,6,1,12,11,0,7] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm4, %zmm2, %k1
@@ -1308,7 +1308,7 @@ define <16 x i32> @test_masked_16xi32_perm_mask0(<16 x i32> %vec, <16 x i32> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xi32_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [14,12,11,6,4,1,6,9,14,14,6,1,12,11,0,7] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm4, %zmm2, %k1 # sched: [3:1.00]
@@ -1323,7 +1323,7 @@ define <16 x i32> @test_masked_16xi32_perm_mask0(<16 x i32> %vec, <16 x i32> %ve
define <16 x i32> @test_masked_z_16xi32_perm_mask0(<16 x i32> %vec, <16 x i32> %mask) {
; GENERIC-LABEL: test_masked_z_16xi32_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa32 {{.*#+}} zmm2 = [14,12,11,6,4,1,6,9,14,14,6,1,12,11,0,7] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm1, %k1
@@ -1331,7 +1331,7 @@ define <16 x i32> @test_masked_z_16xi32_perm_mask0(<16 x i32> %vec, <16 x i32> %
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xi32_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [14,12,11,6,4,1,6,9,14,14,6,1,12,11,0,7] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -1344,7 +1344,7 @@ define <16 x i32> @test_masked_z_16xi32_perm_mask0(<16 x i32> %vec, <16 x i32> %
}
define <16 x i32> @test_masked_16xi32_perm_mask1(<16 x i32> %vec, <16 x i32> %vec2, <16 x i32> %mask) {
; GENERIC-LABEL: test_masked_16xi32_perm_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa32 {{.*#+}} zmm3 = [10,0,14,15,11,1,1,5,0,5,0,15,13,1,14,3] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm4, %zmm2, %k1
@@ -1353,7 +1353,7 @@ define <16 x i32> @test_masked_16xi32_perm_mask1(<16 x i32> %vec, <16 x i32> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xi32_perm_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [10,0,14,15,11,1,1,5,0,5,0,15,13,1,14,3] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm4, %zmm2, %k1 # sched: [3:1.00]
@@ -1368,7 +1368,7 @@ define <16 x i32> @test_masked_16xi32_perm_mask1(<16 x i32> %vec, <16 x i32> %ve
define <16 x i32> @test_masked_z_16xi32_perm_mask1(<16 x i32> %vec, <16 x i32> %mask) {
; GENERIC-LABEL: test_masked_z_16xi32_perm_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa32 {{.*#+}} zmm2 = [10,0,14,15,11,1,1,5,0,5,0,15,13,1,14,3] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm1, %k1
@@ -1376,7 +1376,7 @@ define <16 x i32> @test_masked_z_16xi32_perm_mask1(<16 x i32> %vec, <16 x i32> %
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xi32_perm_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [10,0,14,15,11,1,1,5,0,5,0,15,13,1,14,3] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -1389,7 +1389,7 @@ define <16 x i32> @test_masked_z_16xi32_perm_mask1(<16 x i32> %vec, <16 x i32> %
}
define <16 x i32> @test_masked_16xi32_perm_mask2(<16 x i32> %vec, <16 x i32> %vec2, <16 x i32> %mask) {
; GENERIC-LABEL: test_masked_16xi32_perm_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa32 {{.*#+}} zmm3 = [3,10,15,1,0,5,0,9,13,2,1,5,15,2,15,5] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm4, %zmm2, %k1
@@ -1398,7 +1398,7 @@ define <16 x i32> @test_masked_16xi32_perm_mask2(<16 x i32> %vec, <16 x i32> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xi32_perm_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [3,10,15,1,0,5,0,9,13,2,1,5,15,2,15,5] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm4, %zmm2, %k1 # sched: [3:1.00]
@@ -1413,7 +1413,7 @@ define <16 x i32> @test_masked_16xi32_perm_mask2(<16 x i32> %vec, <16 x i32> %ve
define <16 x i32> @test_masked_z_16xi32_perm_mask2(<16 x i32> %vec, <16 x i32> %mask) {
; GENERIC-LABEL: test_masked_z_16xi32_perm_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa32 {{.*#+}} zmm2 = [3,10,15,1,0,5,0,9,13,2,1,5,15,2,15,5] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm1, %k1
@@ -1421,7 +1421,7 @@ define <16 x i32> @test_masked_z_16xi32_perm_mask2(<16 x i32> %vec, <16 x i32> %
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xi32_perm_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [3,10,15,1,0,5,0,9,13,2,1,5,15,2,15,5] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -1434,13 +1434,13 @@ define <16 x i32> @test_masked_z_16xi32_perm_mask2(<16 x i32> %vec, <16 x i32> %
}
define <16 x i32> @test_16xi32_perm_mask3(<16 x i32> %vec) {
; GENERIC-LABEL: test_16xi32_perm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} zmm1 = [7,4,14,15,10,2,15,1,9,2,14,15,12,5,3,12] sched: [4:0.50]
; GENERIC-NEXT: vpermps %zmm0, %zmm1, %zmm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xi32_perm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} zmm1 = [7,4,14,15,10,2,15,1,9,2,14,15,12,5,3,12] sched: [8:0.50]
; SKX-NEXT: vpermps %zmm0, %zmm1, %zmm0 # sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -1449,7 +1449,7 @@ define <16 x i32> @test_16xi32_perm_mask3(<16 x i32> %vec) {
}
define <16 x i32> @test_masked_16xi32_perm_mask3(<16 x i32> %vec, <16 x i32> %vec2, <16 x i32> %mask) {
; GENERIC-LABEL: test_masked_16xi32_perm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa32 {{.*#+}} zmm3 = [7,4,14,15,10,2,15,1,9,2,14,15,12,5,3,12] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm4, %zmm2, %k1
@@ -1458,7 +1458,7 @@ define <16 x i32> @test_masked_16xi32_perm_mask3(<16 x i32> %vec, <16 x i32> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xi32_perm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa32 {{.*#+}} zmm3 = [7,4,14,15,10,2,15,1,9,2,14,15,12,5,3,12] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm4, %zmm2, %k1 # sched: [3:1.00]
@@ -1473,7 +1473,7 @@ define <16 x i32> @test_masked_16xi32_perm_mask3(<16 x i32> %vec, <16 x i32> %ve
define <16 x i32> @test_masked_z_16xi32_perm_mask3(<16 x i32> %vec, <16 x i32> %mask) {
; GENERIC-LABEL: test_masked_z_16xi32_perm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa32 {{.*#+}} zmm2 = [7,4,14,15,10,2,15,1,9,2,14,15,12,5,3,12] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm1, %k1
@@ -1481,7 +1481,7 @@ define <16 x i32> @test_masked_z_16xi32_perm_mask3(<16 x i32> %vec, <16 x i32> %
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xi32_perm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [7,4,14,15,10,2,15,1,9,2,14,15,12,5,3,12] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -1494,13 +1494,13 @@ define <16 x i32> @test_masked_z_16xi32_perm_mask3(<16 x i32> %vec, <16 x i32> %
}
define <16 x i32> @test_16xi32_perm_mem_mask0(<16 x i32>* %vp) {
; GENERIC-LABEL: test_16xi32_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} zmm0 = [0,1,1,6,8,11,2,6,10,1,7,5,15,0,6,6] sched: [4:0.50]
; GENERIC-NEXT: vpermps (%rdi), %zmm0, %zmm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xi32_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} zmm0 = [0,1,1,6,8,11,2,6,10,1,7,5,15,0,6,6] sched: [8:0.50]
; SKX-NEXT: vpermps (%rdi), %zmm0, %zmm0 # sched: [10:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -1510,7 +1510,7 @@ define <16 x i32> @test_16xi32_perm_mem_mask0(<16 x i32>* %vp) {
}
define <16 x i32> @test_masked_16xi32_perm_mem_mask0(<16 x i32>* %vp, <16 x i32> %vec2, <16 x i32> %mask) {
; GENERIC-LABEL: test_masked_16xi32_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,1,6,8,11,2,6,10,1,7,5,15,0,6,6] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm1, %k1
@@ -1518,7 +1518,7 @@ define <16 x i32> @test_masked_16xi32_perm_mem_mask0(<16 x i32>* %vp, <16 x i32>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xi32_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,1,6,8,11,2,6,10,1,7,5,15,0,6,6] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -1533,7 +1533,7 @@ define <16 x i32> @test_masked_16xi32_perm_mem_mask0(<16 x i32>* %vp, <16 x i32>
define <16 x i32> @test_masked_z_16xi32_perm_mem_mask0(<16 x i32>* %vp, <16 x i32> %mask) {
; GENERIC-LABEL: test_masked_z_16xi32_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa32 {{.*#+}} zmm1 = [0,1,1,6,8,11,2,6,10,1,7,5,15,0,6,6] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm2, %zmm0, %k1
@@ -1541,7 +1541,7 @@ define <16 x i32> @test_masked_z_16xi32_perm_mem_mask0(<16 x i32>* %vp, <16 x i3
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xi32_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa32 {{.*#+}} zmm1 = [0,1,1,6,8,11,2,6,10,1,7,5,15,0,6,6] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm2, %zmm0, %k1 # sched: [3:1.00]
@@ -1556,7 +1556,7 @@ define <16 x i32> @test_masked_z_16xi32_perm_mem_mask0(<16 x i32>* %vp, <16 x i3
define <16 x i32> @test_masked_16xi32_perm_mem_mask1(<16 x i32>* %vp, <16 x i32> %vec2, <16 x i32> %mask) {
; GENERIC-LABEL: test_masked_16xi32_perm_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa32 {{.*#+}} zmm2 = [11,5,3,4,7,15,12,4,8,11,12,7,6,12,6,3] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm1, %k1
@@ -1564,7 +1564,7 @@ define <16 x i32> @test_masked_16xi32_perm_mem_mask1(<16 x i32>* %vp, <16 x i32>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xi32_perm_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [11,5,3,4,7,15,12,4,8,11,12,7,6,12,6,3] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -1579,7 +1579,7 @@ define <16 x i32> @test_masked_16xi32_perm_mem_mask1(<16 x i32>* %vp, <16 x i32>
define <16 x i32> @test_masked_z_16xi32_perm_mem_mask1(<16 x i32>* %vp, <16 x i32> %mask) {
; GENERIC-LABEL: test_masked_z_16xi32_perm_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa32 {{.*#+}} zmm1 = [11,5,3,4,7,15,12,4,8,11,12,7,6,12,6,3] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm2, %zmm0, %k1
@@ -1587,7 +1587,7 @@ define <16 x i32> @test_masked_z_16xi32_perm_mem_mask1(<16 x i32>* %vp, <16 x i3
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xi32_perm_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa32 {{.*#+}} zmm1 = [11,5,3,4,7,15,12,4,8,11,12,7,6,12,6,3] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm2, %zmm0, %k1 # sched: [3:1.00]
@@ -1602,7 +1602,7 @@ define <16 x i32> @test_masked_z_16xi32_perm_mem_mask1(<16 x i32>* %vp, <16 x i3
define <16 x i32> @test_masked_16xi32_perm_mem_mask2(<16 x i32>* %vp, <16 x i32> %vec2, <16 x i32> %mask) {
; GENERIC-LABEL: test_masked_16xi32_perm_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa32 {{.*#+}} zmm2 = [7,14,2,7,10,7,3,0,11,9,0,4,12,10,8,2] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm1, %k1
@@ -1610,7 +1610,7 @@ define <16 x i32> @test_masked_16xi32_perm_mem_mask2(<16 x i32>* %vp, <16 x i32>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xi32_perm_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [7,14,2,7,10,7,3,0,11,9,0,4,12,10,8,2] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -1625,7 +1625,7 @@ define <16 x i32> @test_masked_16xi32_perm_mem_mask2(<16 x i32>* %vp, <16 x i32>
define <16 x i32> @test_masked_z_16xi32_perm_mem_mask2(<16 x i32>* %vp, <16 x i32> %mask) {
; GENERIC-LABEL: test_masked_z_16xi32_perm_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa32 {{.*#+}} zmm1 = [7,14,2,7,10,7,3,0,11,9,0,4,12,10,8,2] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm2, %zmm0, %k1
@@ -1633,7 +1633,7 @@ define <16 x i32> @test_masked_z_16xi32_perm_mem_mask2(<16 x i32>* %vp, <16 x i3
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xi32_perm_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa32 {{.*#+}} zmm1 = [7,14,2,7,10,7,3,0,11,9,0,4,12,10,8,2] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm2, %zmm0, %k1 # sched: [3:1.00]
@@ -1648,13 +1648,13 @@ define <16 x i32> @test_masked_z_16xi32_perm_mem_mask2(<16 x i32>* %vp, <16 x i3
define <16 x i32> @test_16xi32_perm_mem_mask3(<16 x i32>* %vp) {
; GENERIC-LABEL: test_16xi32_perm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} zmm0 = [11,7,10,12,3,12,4,15,1,14,0,4,8,9,6,1] sched: [4:0.50]
; GENERIC-NEXT: vpermps (%rdi), %zmm0, %zmm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xi32_perm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} zmm0 = [11,7,10,12,3,12,4,15,1,14,0,4,8,9,6,1] sched: [8:0.50]
; SKX-NEXT: vpermps (%rdi), %zmm0, %zmm0 # sched: [10:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -1664,7 +1664,7 @@ define <16 x i32> @test_16xi32_perm_mem_mask3(<16 x i32>* %vp) {
}
define <16 x i32> @test_masked_16xi32_perm_mem_mask3(<16 x i32>* %vp, <16 x i32> %vec2, <16 x i32> %mask) {
; GENERIC-LABEL: test_masked_16xi32_perm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa32 {{.*#+}} zmm2 = [11,7,10,12,3,12,4,15,1,14,0,4,8,9,6,1] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm1, %k1
@@ -1672,7 +1672,7 @@ define <16 x i32> @test_masked_16xi32_perm_mem_mask3(<16 x i32>* %vp, <16 x i32>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xi32_perm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [11,7,10,12,3,12,4,15,1,14,0,4,8,9,6,1] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -1687,7 +1687,7 @@ define <16 x i32> @test_masked_16xi32_perm_mem_mask3(<16 x i32>* %vp, <16 x i32>
define <16 x i32> @test_masked_z_16xi32_perm_mem_mask3(<16 x i32>* %vp, <16 x i32> %mask) {
; GENERIC-LABEL: test_masked_z_16xi32_perm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa32 {{.*#+}} zmm1 = [11,7,10,12,3,12,4,15,1,14,0,4,8,9,6,1] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm2, %zmm0, %k1
@@ -1695,7 +1695,7 @@ define <16 x i32> @test_masked_z_16xi32_perm_mem_mask3(<16 x i32>* %vp, <16 x i3
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xi32_perm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa32 {{.*#+}} zmm1 = [11,7,10,12,3,12,4,15,1,14,0,4,8,9,6,1] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm2, %zmm0, %k1 # sched: [3:1.00]
@@ -1710,12 +1710,12 @@ define <16 x i32> @test_masked_z_16xi32_perm_mem_mask3(<16 x i32>* %vp, <16 x i3
define <4 x i64> @test_4xi64_perm_mask0(<4 x i64> %vec) {
; GENERIC-LABEL: test_4xi64_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,0,3,1] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xi64_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,0,3,1] sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 2, i32 0, i32 3, i32 1>
@@ -1723,7 +1723,7 @@ define <4 x i64> @test_4xi64_perm_mask0(<4 x i64> %vec) {
}
define <4 x i64> @test_masked_4xi64_perm_mask0(<4 x i64> %vec, <4 x i64> %vec2, <4 x i64> %mask) {
; GENERIC-LABEL: test_masked_4xi64_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; GENERIC-NEXT: vpermq {{.*#+}} ymm1 {%k1} = ymm0[2,0,3,1] sched: [1:1.00]
@@ -1731,7 +1731,7 @@ define <4 x i64> @test_masked_4xi64_perm_mask0(<4 x i64> %vec, <4 x i64> %vec2,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_4xi64_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermq {{.*#+}} ymm1 {%k1} = ymm0[2,0,3,1] sched: [3:1.00]
@@ -1745,14 +1745,14 @@ define <4 x i64> @test_masked_4xi64_perm_mask0(<4 x i64> %vec, <4 x i64> %vec2,
define <4 x i64> @test_masked_z_4xi64_perm_mask0(<4 x i64> %vec, <4 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_4xi64_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; GENERIC-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = ymm0[2,0,3,1] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_4xi64_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = ymm0[2,0,3,1] sched: [3:1.00]
@@ -1764,7 +1764,7 @@ define <4 x i64> @test_masked_z_4xi64_perm_mask0(<4 x i64> %vec, <4 x i64> %mask
}
define <4 x i64> @test_masked_4xi64_perm_mask1(<4 x i64> %vec, <4 x i64> %vec2, <4 x i64> %mask) {
; GENERIC-LABEL: test_masked_4xi64_perm_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; GENERIC-NEXT: vpermq {{.*#+}} ymm1 {%k1} = ymm0[1,2,0,3] sched: [1:1.00]
@@ -1772,7 +1772,7 @@ define <4 x i64> @test_masked_4xi64_perm_mask1(<4 x i64> %vec, <4 x i64> %vec2,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_4xi64_perm_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermq {{.*#+}} ymm1 {%k1} = ymm0[1,2,0,3] sched: [3:1.00]
@@ -1786,14 +1786,14 @@ define <4 x i64> @test_masked_4xi64_perm_mask1(<4 x i64> %vec, <4 x i64> %vec2,
define <4 x i64> @test_masked_z_4xi64_perm_mask1(<4 x i64> %vec, <4 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_4xi64_perm_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; GENERIC-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = ymm0[1,2,0,3] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_4xi64_perm_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = ymm0[1,2,0,3] sched: [3:1.00]
@@ -1805,7 +1805,7 @@ define <4 x i64> @test_masked_z_4xi64_perm_mask1(<4 x i64> %vec, <4 x i64> %mask
}
define <4 x i64> @test_masked_4xi64_perm_mask2(<4 x i64> %vec, <4 x i64> %vec2, <4 x i64> %mask) {
; GENERIC-LABEL: test_masked_4xi64_perm_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; GENERIC-NEXT: vpermq {{.*#+}} ymm1 {%k1} = ymm0[2,2,2,1] sched: [1:1.00]
@@ -1813,7 +1813,7 @@ define <4 x i64> @test_masked_4xi64_perm_mask2(<4 x i64> %vec, <4 x i64> %vec2,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_4xi64_perm_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermq {{.*#+}} ymm1 {%k1} = ymm0[2,2,2,1] sched: [3:1.00]
@@ -1827,14 +1827,14 @@ define <4 x i64> @test_masked_4xi64_perm_mask2(<4 x i64> %vec, <4 x i64> %vec2,
define <4 x i64> @test_masked_z_4xi64_perm_mask2(<4 x i64> %vec, <4 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_4xi64_perm_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; GENERIC-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = ymm0[2,2,2,1] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_4xi64_perm_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = ymm0[2,2,2,1] sched: [3:1.00]
@@ -1846,12 +1846,12 @@ define <4 x i64> @test_masked_z_4xi64_perm_mask2(<4 x i64> %vec, <4 x i64> %mask
}
define <4 x i64> @test_4xi64_perm_mask3(<4 x i64> %vec) {
; GENERIC-LABEL: test_4xi64_perm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,3,3] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xi64_perm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,3,3] sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 2, i32 1, i32 3, i32 3>
@@ -1859,7 +1859,7 @@ define <4 x i64> @test_4xi64_perm_mask3(<4 x i64> %vec) {
}
define <4 x i64> @test_masked_4xi64_perm_mask3(<4 x i64> %vec, <4 x i64> %vec2, <4 x i64> %mask) {
; GENERIC-LABEL: test_masked_4xi64_perm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; GENERIC-NEXT: vpermq {{.*#+}} ymm1 {%k1} = ymm0[2,1,3,3] sched: [1:1.00]
@@ -1867,7 +1867,7 @@ define <4 x i64> @test_masked_4xi64_perm_mask3(<4 x i64> %vec, <4 x i64> %vec2,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_4xi64_perm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermq {{.*#+}} ymm1 {%k1} = ymm0[2,1,3,3] sched: [3:1.00]
@@ -1881,14 +1881,14 @@ define <4 x i64> @test_masked_4xi64_perm_mask3(<4 x i64> %vec, <4 x i64> %vec2,
define <4 x i64> @test_masked_z_4xi64_perm_mask3(<4 x i64> %vec, <4 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_4xi64_perm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; GENERIC-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = ymm0[2,1,3,3] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_4xi64_perm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = ymm0[2,1,3,3] sched: [3:1.00]
@@ -1900,12 +1900,12 @@ define <4 x i64> @test_masked_z_4xi64_perm_mask3(<4 x i64> %vec, <4 x i64> %mask
}
define <4 x i64> @test_4xi64_perm_mem_mask0(<4 x i64>* %vp) {
; GENERIC-LABEL: test_4xi64_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpermpd {{.*#+}} ymm0 = mem[2,1,2,0] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xi64_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermpd {{.*#+}} ymm0 = mem[2,1,2,0] sched: [10:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec = load <4 x i64>, <4 x i64>* %vp
@@ -1914,14 +1914,14 @@ define <4 x i64> @test_4xi64_perm_mem_mask0(<4 x i64>* %vp) {
}
define <4 x i64> @test_masked_4xi64_perm_mem_mask0(<4 x i64>* %vp, <4 x i64> %vec2, <4 x i64> %mask) {
; GENERIC-LABEL: test_masked_4xi64_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; GENERIC-NEXT: vpermq {{.*#+}} ymm0 {%k1} = mem[2,1,2,0] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_4xi64_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermq {{.*#+}} ymm0 {%k1} = mem[2,1,2,0] sched: [10:1.00]
@@ -1935,14 +1935,14 @@ define <4 x i64> @test_masked_4xi64_perm_mem_mask0(<4 x i64>* %vp, <4 x i64> %ve
define <4 x i64> @test_masked_z_4xi64_perm_mem_mask0(<4 x i64>* %vp, <4 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_4xi64_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm1, %ymm0, %k1
; GENERIC-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = mem[2,1,2,0] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_4xi64_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm1, %ymm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = mem[2,1,2,0] sched: [10:1.00]
@@ -1956,14 +1956,14 @@ define <4 x i64> @test_masked_z_4xi64_perm_mem_mask0(<4 x i64>* %vp, <4 x i64> %
define <4 x i64> @test_masked_4xi64_perm_mem_mask1(<4 x i64>* %vp, <4 x i64> %vec2, <4 x i64> %mask) {
; GENERIC-LABEL: test_masked_4xi64_perm_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; GENERIC-NEXT: vpermq {{.*#+}} ymm0 {%k1} = mem[2,1,1,1] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_4xi64_perm_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermq {{.*#+}} ymm0 {%k1} = mem[2,1,1,1] sched: [10:1.00]
@@ -1977,14 +1977,14 @@ define <4 x i64> @test_masked_4xi64_perm_mem_mask1(<4 x i64>* %vp, <4 x i64> %ve
define <4 x i64> @test_masked_z_4xi64_perm_mem_mask1(<4 x i64>* %vp, <4 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_4xi64_perm_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm1, %ymm0, %k1
; GENERIC-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = mem[2,1,1,1] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_4xi64_perm_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm1, %ymm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = mem[2,1,1,1] sched: [10:1.00]
@@ -1998,14 +1998,14 @@ define <4 x i64> @test_masked_z_4xi64_perm_mem_mask1(<4 x i64>* %vp, <4 x i64> %
define <4 x i64> @test_masked_4xi64_perm_mem_mask2(<4 x i64>* %vp, <4 x i64> %vec2, <4 x i64> %mask) {
; GENERIC-LABEL: test_masked_4xi64_perm_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; GENERIC-NEXT: vpermq {{.*#+}} ymm0 {%k1} = mem[0,1,2,0] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_4xi64_perm_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermq {{.*#+}} ymm0 {%k1} = mem[0,1,2,0] sched: [10:1.00]
@@ -2019,14 +2019,14 @@ define <4 x i64> @test_masked_4xi64_perm_mem_mask2(<4 x i64>* %vp, <4 x i64> %ve
define <4 x i64> @test_masked_z_4xi64_perm_mem_mask2(<4 x i64>* %vp, <4 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_4xi64_perm_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm1, %ymm0, %k1
; GENERIC-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = mem[0,1,2,0] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_4xi64_perm_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm1, %ymm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = mem[0,1,2,0] sched: [10:1.00]
@@ -2040,12 +2040,12 @@ define <4 x i64> @test_masked_z_4xi64_perm_mem_mask2(<4 x i64>* %vp, <4 x i64> %
define <4 x i64> @test_4xi64_perm_mem_mask3(<4 x i64>* %vp) {
; GENERIC-LABEL: test_4xi64_perm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpermpd {{.*#+}} ymm0 = mem[2,0,1,3] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xi64_perm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermpd {{.*#+}} ymm0 = mem[2,0,1,3] sched: [10:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec = load <4 x i64>, <4 x i64>* %vp
@@ -2054,14 +2054,14 @@ define <4 x i64> @test_4xi64_perm_mem_mask3(<4 x i64>* %vp) {
}
define <4 x i64> @test_masked_4xi64_perm_mem_mask3(<4 x i64>* %vp, <4 x i64> %vec2, <4 x i64> %mask) {
; GENERIC-LABEL: test_masked_4xi64_perm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; GENERIC-NEXT: vpermq {{.*#+}} ymm0 {%k1} = mem[2,0,1,3] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_4xi64_perm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermq {{.*#+}} ymm0 {%k1} = mem[2,0,1,3] sched: [10:1.00]
@@ -2075,14 +2075,14 @@ define <4 x i64> @test_masked_4xi64_perm_mem_mask3(<4 x i64>* %vp, <4 x i64> %ve
define <4 x i64> @test_masked_z_4xi64_perm_mem_mask3(<4 x i64>* %vp, <4 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_4xi64_perm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm1, %ymm0, %k1
; GENERIC-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = mem[2,0,1,3] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_4xi64_perm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm1, %ymm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = mem[2,0,1,3] sched: [10:1.00]
@@ -2096,13 +2096,13 @@ define <4 x i64> @test_masked_z_4xi64_perm_mem_mask3(<4 x i64>* %vp, <4 x i64> %
define <8 x i64> @test_8xi64_perm_mask0(<8 x i64> %vec) {
; GENERIC-LABEL: test_8xi64_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} zmm1 = [0,4,7,6,5,5,1,6] sched: [4:0.50]
; GENERIC-NEXT: vpermpd %zmm0, %zmm1, %zmm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi64_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} zmm1 = [0,4,7,6,5,5,1,6] sched: [8:0.50]
; SKX-NEXT: vpermpd %zmm0, %zmm1, %zmm0 # sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -2111,7 +2111,7 @@ define <8 x i64> @test_8xi64_perm_mask0(<8 x i64> %vec) {
}
define <8 x i64> @test_masked_8xi64_perm_mask0(<8 x i64> %vec, <8 x i64> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_8xi64_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,4,7,6,5,5,1,6] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm4, %zmm2, %k1
@@ -2120,7 +2120,7 @@ define <8 x i64> @test_masked_8xi64_perm_mask0(<8 x i64> %vec, <8 x i64> %vec2,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xi64_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,4,7,6,5,5,1,6] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm4, %zmm2, %k1 # sched: [3:1.00]
@@ -2135,7 +2135,7 @@ define <8 x i64> @test_masked_8xi64_perm_mask0(<8 x i64> %vec, <8 x i64> %vec2,
define <8 x i64> @test_masked_z_8xi64_perm_mask0(<8 x i64> %vec, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_8xi64_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,4,7,6,5,5,1,6] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
@@ -2143,7 +2143,7 @@ define <8 x i64> @test_masked_z_8xi64_perm_mask0(<8 x i64> %vec, <8 x i64> %mask
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xi64_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,4,7,6,5,5,1,6] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -2156,7 +2156,7 @@ define <8 x i64> @test_masked_z_8xi64_perm_mask0(<8 x i64> %vec, <8 x i64> %mask
}
define <8 x i64> @test_masked_8xi64_perm_imm_mask1(<8 x i64> %vec, <8 x i64> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_8xi64_perm_imm_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; GENERIC-NEXT: vpermq {{.*#+}} zmm1 {%k1} = zmm0[1,0,1,1,5,4,5,5] sched: [1:1.00]
@@ -2164,7 +2164,7 @@ define <8 x i64> @test_masked_8xi64_perm_imm_mask1(<8 x i64> %vec, <8 x i64> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xi64_perm_imm_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermq {{.*#+}} zmm1 {%k1} = zmm0[1,0,1,1,5,4,5,5] sched: [3:1.00]
@@ -2178,14 +2178,14 @@ define <8 x i64> @test_masked_8xi64_perm_imm_mask1(<8 x i64> %vec, <8 x i64> %ve
define <8 x i64> @test_masked_z_8xi64_perm_imm_mask1(<8 x i64> %vec, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_8xi64_perm_imm_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpermq {{.*#+}} zmm0 {%k1} {z} = zmm0[1,0,1,1,5,4,5,5] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xi64_perm_imm_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermq {{.*#+}} zmm0 {%k1} {z} = zmm0[1,0,1,1,5,4,5,5] sched: [3:1.00]
@@ -2197,7 +2197,7 @@ define <8 x i64> @test_masked_z_8xi64_perm_imm_mask1(<8 x i64> %vec, <8 x i64> %
}
define <8 x i64> @test_masked_8xi64_perm_mask2(<8 x i64> %vec, <8 x i64> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_8xi64_perm_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 {{.*#+}} zmm3 = [1,3,7,3,3,5,4,1] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm4, %zmm2, %k1
@@ -2206,7 +2206,7 @@ define <8 x i64> @test_masked_8xi64_perm_mask2(<8 x i64> %vec, <8 x i64> %vec2,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xi64_perm_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [1,3,7,3,3,5,4,1] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm4, %zmm2, %k1 # sched: [3:1.00]
@@ -2221,7 +2221,7 @@ define <8 x i64> @test_masked_8xi64_perm_mask2(<8 x i64> %vec, <8 x i64> %vec2,
define <8 x i64> @test_masked_z_8xi64_perm_mask2(<8 x i64> %vec, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_8xi64_perm_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 {{.*#+}} zmm2 = [1,3,7,3,3,5,4,1] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
@@ -2229,7 +2229,7 @@ define <8 x i64> @test_masked_z_8xi64_perm_mask2(<8 x i64> %vec, <8 x i64> %mask
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xi64_perm_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [1,3,7,3,3,5,4,1] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -2242,12 +2242,12 @@ define <8 x i64> @test_masked_z_8xi64_perm_mask2(<8 x i64> %vec, <8 x i64> %mask
}
define <8 x i64> @test_8xi64_perm_imm_mask3(<8 x i64> %vec) {
; GENERIC-LABEL: test_8xi64_perm_imm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[3,1,3,1,7,5,7,5] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi64_perm_imm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[3,1,3,1,7,5,7,5] sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 3, i32 1, i32 3, i32 1, i32 7, i32 5, i32 7, i32 5>
@@ -2255,7 +2255,7 @@ define <8 x i64> @test_8xi64_perm_imm_mask3(<8 x i64> %vec) {
}
define <8 x i64> @test_masked_8xi64_perm_imm_mask3(<8 x i64> %vec, <8 x i64> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_8xi64_perm_imm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; GENERIC-NEXT: vpermq {{.*#+}} zmm1 {%k1} = zmm0[3,1,3,1,7,5,7,5] sched: [1:1.00]
@@ -2263,7 +2263,7 @@ define <8 x i64> @test_masked_8xi64_perm_imm_mask3(<8 x i64> %vec, <8 x i64> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xi64_perm_imm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermq {{.*#+}} zmm1 {%k1} = zmm0[3,1,3,1,7,5,7,5] sched: [3:1.00]
@@ -2277,14 +2277,14 @@ define <8 x i64> @test_masked_8xi64_perm_imm_mask3(<8 x i64> %vec, <8 x i64> %ve
define <8 x i64> @test_masked_z_8xi64_perm_imm_mask3(<8 x i64> %vec, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_8xi64_perm_imm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpermq {{.*#+}} zmm0 {%k1} {z} = zmm0[3,1,3,1,7,5,7,5] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xi64_perm_imm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermq {{.*#+}} zmm0 {%k1} {z} = zmm0[3,1,3,1,7,5,7,5] sched: [3:1.00]
@@ -2296,7 +2296,7 @@ define <8 x i64> @test_masked_z_8xi64_perm_imm_mask3(<8 x i64> %vec, <8 x i64> %
}
define <8 x i64> @test_masked_8xi64_perm_mask4(<8 x i64> %vec, <8 x i64> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_8xi64_perm_mask4:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 {{.*#+}} zmm3 = [6,3,1,1,7,4,0,3] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm4, %zmm2, %k1
@@ -2305,7 +2305,7 @@ define <8 x i64> @test_masked_8xi64_perm_mask4(<8 x i64> %vec, <8 x i64> %vec2,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xi64_perm_mask4:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [6,3,1,1,7,4,0,3] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm4, %zmm2, %k1 # sched: [3:1.00]
@@ -2320,7 +2320,7 @@ define <8 x i64> @test_masked_8xi64_perm_mask4(<8 x i64> %vec, <8 x i64> %vec2,
define <8 x i64> @test_masked_z_8xi64_perm_mask4(<8 x i64> %vec, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_8xi64_perm_mask4:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 {{.*#+}} zmm2 = [6,3,1,1,7,4,0,3] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
@@ -2328,7 +2328,7 @@ define <8 x i64> @test_masked_z_8xi64_perm_mask4(<8 x i64> %vec, <8 x i64> %mask
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xi64_perm_mask4:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [6,3,1,1,7,4,0,3] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -2341,7 +2341,7 @@ define <8 x i64> @test_masked_z_8xi64_perm_mask4(<8 x i64> %vec, <8 x i64> %mask
}
define <8 x i64> @test_masked_8xi64_perm_imm_mask5(<8 x i64> %vec, <8 x i64> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_8xi64_perm_imm_mask5:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; GENERIC-NEXT: vpermq {{.*#+}} zmm1 {%k1} = zmm0[0,0,0,0,4,4,4,4] sched: [1:1.00]
@@ -2349,7 +2349,7 @@ define <8 x i64> @test_masked_8xi64_perm_imm_mask5(<8 x i64> %vec, <8 x i64> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xi64_perm_imm_mask5:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermq {{.*#+}} zmm1 {%k1} = zmm0[0,0,0,0,4,4,4,4] sched: [3:1.00]
@@ -2363,14 +2363,14 @@ define <8 x i64> @test_masked_8xi64_perm_imm_mask5(<8 x i64> %vec, <8 x i64> %ve
define <8 x i64> @test_masked_z_8xi64_perm_imm_mask5(<8 x i64> %vec, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_8xi64_perm_imm_mask5:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpermq {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,0,0,4,4,4,4] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xi64_perm_imm_mask5:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermq {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,0,0,4,4,4,4] sched: [3:1.00]
@@ -2382,13 +2382,13 @@ define <8 x i64> @test_masked_z_8xi64_perm_imm_mask5(<8 x i64> %vec, <8 x i64> %
}
define <8 x i64> @test_8xi64_perm_mask6(<8 x i64> %vec) {
; GENERIC-LABEL: test_8xi64_perm_mask6:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} zmm1 = [5,1,4,4,5,4,2,7] sched: [4:0.50]
; GENERIC-NEXT: vpermpd %zmm0, %zmm1, %zmm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi64_perm_mask6:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} zmm1 = [5,1,4,4,5,4,2,7] sched: [8:0.50]
; SKX-NEXT: vpermpd %zmm0, %zmm1, %zmm0 # sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -2397,7 +2397,7 @@ define <8 x i64> @test_8xi64_perm_mask6(<8 x i64> %vec) {
}
define <8 x i64> @test_masked_8xi64_perm_mask6(<8 x i64> %vec, <8 x i64> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_8xi64_perm_mask6:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 {{.*#+}} zmm3 = [5,1,4,4,5,4,2,7] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm4, %zmm2, %k1
@@ -2406,7 +2406,7 @@ define <8 x i64> @test_masked_8xi64_perm_mask6(<8 x i64> %vec, <8 x i64> %vec2,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xi64_perm_mask6:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [5,1,4,4,5,4,2,7] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm4, %zmm2, %k1 # sched: [3:1.00]
@@ -2421,7 +2421,7 @@ define <8 x i64> @test_masked_8xi64_perm_mask6(<8 x i64> %vec, <8 x i64> %vec2,
define <8 x i64> @test_masked_z_8xi64_perm_mask6(<8 x i64> %vec, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_8xi64_perm_mask6:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 {{.*#+}} zmm2 = [5,1,4,4,5,4,2,7] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
@@ -2429,7 +2429,7 @@ define <8 x i64> @test_masked_z_8xi64_perm_mask6(<8 x i64> %vec, <8 x i64> %mask
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xi64_perm_mask6:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [5,1,4,4,5,4,2,7] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -2442,7 +2442,7 @@ define <8 x i64> @test_masked_z_8xi64_perm_mask6(<8 x i64> %vec, <8 x i64> %mask
}
define <8 x i64> @test_masked_8xi64_perm_imm_mask7(<8 x i64> %vec, <8 x i64> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_8xi64_perm_imm_mask7:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; GENERIC-NEXT: vpermq {{.*#+}} zmm1 {%k1} = zmm0[3,3,3,3,7,7,7,7] sched: [1:1.00]
@@ -2450,7 +2450,7 @@ define <8 x i64> @test_masked_8xi64_perm_imm_mask7(<8 x i64> %vec, <8 x i64> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xi64_perm_imm_mask7:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermq {{.*#+}} zmm1 {%k1} = zmm0[3,3,3,3,7,7,7,7] sched: [3:1.00]
@@ -2464,14 +2464,14 @@ define <8 x i64> @test_masked_8xi64_perm_imm_mask7(<8 x i64> %vec, <8 x i64> %ve
define <8 x i64> @test_masked_z_8xi64_perm_imm_mask7(<8 x i64> %vec, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_8xi64_perm_imm_mask7:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpermq {{.*#+}} zmm0 {%k1} {z} = zmm0[3,3,3,3,7,7,7,7] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xi64_perm_imm_mask7:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermq {{.*#+}} zmm0 {%k1} {z} = zmm0[3,3,3,3,7,7,7,7] sched: [3:1.00]
@@ -2483,13 +2483,13 @@ define <8 x i64> @test_masked_z_8xi64_perm_imm_mask7(<8 x i64> %vec, <8 x i64> %
}
define <8 x i64> @test_8xi64_perm_mem_mask0(<8 x i64>* %vp) {
; GENERIC-LABEL: test_8xi64_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} zmm0 = [5,1,6,5,7,3,7,3] sched: [4:0.50]
; GENERIC-NEXT: vpermpd (%rdi), %zmm0, %zmm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi64_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} zmm0 = [5,1,6,5,7,3,7,3] sched: [8:0.50]
; SKX-NEXT: vpermpd (%rdi), %zmm0, %zmm0 # sched: [10:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -2499,7 +2499,7 @@ define <8 x i64> @test_8xi64_perm_mem_mask0(<8 x i64>* %vp) {
}
define <8 x i64> @test_masked_8xi64_perm_mem_mask0(<8 x i64>* %vp, <8 x i64> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_8xi64_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 {{.*#+}} zmm2 = [5,1,6,5,7,3,7,3] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
@@ -2507,7 +2507,7 @@ define <8 x i64> @test_masked_8xi64_perm_mem_mask0(<8 x i64>* %vp, <8 x i64> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xi64_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [5,1,6,5,7,3,7,3] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -2522,7 +2522,7 @@ define <8 x i64> @test_masked_8xi64_perm_mem_mask0(<8 x i64>* %vp, <8 x i64> %ve
define <8 x i64> @test_masked_z_8xi64_perm_mem_mask0(<8 x i64>* %vp, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_8xi64_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 {{.*#+}} zmm1 = [5,1,6,5,7,3,7,3] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm2, %zmm0, %k1
@@ -2530,7 +2530,7 @@ define <8 x i64> @test_masked_z_8xi64_perm_mem_mask0(<8 x i64>* %vp, <8 x i64> %
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xi64_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm1 = [5,1,6,5,7,3,7,3] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm2, %zmm0, %k1 # sched: [3:1.00]
@@ -2545,14 +2545,14 @@ define <8 x i64> @test_masked_z_8xi64_perm_mem_mask0(<8 x i64>* %vp, <8 x i64> %
define <8 x i64> @test_masked_8xi64_perm_imm_mem_mask1(<8 x i64>* %vp, <8 x i64> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_8xi64_perm_imm_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpermq {{.*#+}} zmm0 {%k1} = mem[1,1,1,0,5,5,5,4] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xi64_perm_imm_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermq {{.*#+}} zmm0 {%k1} = mem[1,1,1,0,5,5,5,4] sched: [10:1.00]
@@ -2566,14 +2566,14 @@ define <8 x i64> @test_masked_8xi64_perm_imm_mem_mask1(<8 x i64>* %vp, <8 x i64>
define <8 x i64> @test_masked_z_8xi64_perm_imm_mem_mask1(<8 x i64>* %vp, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_8xi64_perm_imm_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm1, %zmm0, %k1
; GENERIC-NEXT: vpermq {{.*#+}} zmm0 {%k1} {z} = mem[1,1,1,0,5,5,5,4] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xi64_perm_imm_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm1, %zmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermq {{.*#+}} zmm0 {%k1} {z} = mem[1,1,1,0,5,5,5,4] sched: [10:1.00]
@@ -2587,7 +2587,7 @@ define <8 x i64> @test_masked_z_8xi64_perm_imm_mem_mask1(<8 x i64>* %vp, <8 x i6
define <8 x i64> @test_masked_8xi64_perm_mem_mask2(<8 x i64>* %vp, <8 x i64> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_8xi64_perm_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,2,1,4,1,1,5,5] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
@@ -2595,7 +2595,7 @@ define <8 x i64> @test_masked_8xi64_perm_mem_mask2(<8 x i64>* %vp, <8 x i64> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xi64_perm_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,2,1,4,1,1,5,5] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -2610,7 +2610,7 @@ define <8 x i64> @test_masked_8xi64_perm_mem_mask2(<8 x i64>* %vp, <8 x i64> %ve
define <8 x i64> @test_masked_z_8xi64_perm_mem_mask2(<8 x i64>* %vp, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_8xi64_perm_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,2,1,4,1,1,5,5] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm2, %zmm0, %k1
@@ -2618,7 +2618,7 @@ define <8 x i64> @test_masked_z_8xi64_perm_mem_mask2(<8 x i64>* %vp, <8 x i64> %
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xi64_perm_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,2,1,4,1,1,5,5] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm2, %zmm0, %k1 # sched: [3:1.00]
@@ -2633,12 +2633,12 @@ define <8 x i64> @test_masked_z_8xi64_perm_mem_mask2(<8 x i64>* %vp, <8 x i64> %
define <8 x i64> @test_8xi64_perm_imm_mem_mask3(<8 x i64>* %vp) {
; GENERIC-LABEL: test_8xi64_perm_imm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpermpd {{.*#+}} zmm0 = mem[1,3,1,1,5,7,5,5] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi64_perm_imm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermpd {{.*#+}} zmm0 = mem[1,3,1,1,5,7,5,5] sched: [10:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec = load <8 x i64>, <8 x i64>* %vp
@@ -2647,14 +2647,14 @@ define <8 x i64> @test_8xi64_perm_imm_mem_mask3(<8 x i64>* %vp) {
}
define <8 x i64> @test_masked_8xi64_perm_imm_mem_mask3(<8 x i64>* %vp, <8 x i64> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_8xi64_perm_imm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpermq {{.*#+}} zmm0 {%k1} = mem[1,3,1,1,5,7,5,5] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xi64_perm_imm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermq {{.*#+}} zmm0 {%k1} = mem[1,3,1,1,5,7,5,5] sched: [10:1.00]
@@ -2668,14 +2668,14 @@ define <8 x i64> @test_masked_8xi64_perm_imm_mem_mask3(<8 x i64>* %vp, <8 x i64>
define <8 x i64> @test_masked_z_8xi64_perm_imm_mem_mask3(<8 x i64>* %vp, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_8xi64_perm_imm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm1, %zmm0, %k1
; GENERIC-NEXT: vpermq {{.*#+}} zmm0 {%k1} {z} = mem[1,3,1,1,5,7,5,5] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xi64_perm_imm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm1, %zmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermq {{.*#+}} zmm0 {%k1} {z} = mem[1,3,1,1,5,7,5,5] sched: [10:1.00]
@@ -2689,7 +2689,7 @@ define <8 x i64> @test_masked_z_8xi64_perm_imm_mem_mask3(<8 x i64>* %vp, <8 x i6
define <8 x i64> @test_masked_8xi64_perm_mem_mask4(<8 x i64>* %vp, <8 x i64> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_8xi64_perm_mem_mask4:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 {{.*#+}} zmm2 = [5,0,7,0,3,5,0,6] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
@@ -2697,7 +2697,7 @@ define <8 x i64> @test_masked_8xi64_perm_mem_mask4(<8 x i64>* %vp, <8 x i64> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xi64_perm_mem_mask4:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [5,0,7,0,3,5,0,6] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -2712,7 +2712,7 @@ define <8 x i64> @test_masked_8xi64_perm_mem_mask4(<8 x i64>* %vp, <8 x i64> %ve
define <8 x i64> @test_masked_z_8xi64_perm_mem_mask4(<8 x i64>* %vp, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_8xi64_perm_mem_mask4:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 {{.*#+}} zmm1 = [5,0,7,0,3,5,0,6] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm2, %zmm0, %k1
@@ -2720,7 +2720,7 @@ define <8 x i64> @test_masked_z_8xi64_perm_mem_mask4(<8 x i64>* %vp, <8 x i64> %
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xi64_perm_mem_mask4:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm1 = [5,0,7,0,3,5,0,6] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm2, %zmm0, %k1 # sched: [3:1.00]
@@ -2735,14 +2735,14 @@ define <8 x i64> @test_masked_z_8xi64_perm_mem_mask4(<8 x i64>* %vp, <8 x i64> %
define <8 x i64> @test_masked_8xi64_perm_imm_mem_mask5(<8 x i64>* %vp, <8 x i64> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_8xi64_perm_imm_mem_mask5:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpermq {{.*#+}} zmm0 {%k1} = mem[3,1,0,0,7,5,4,4] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xi64_perm_imm_mem_mask5:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermq {{.*#+}} zmm0 {%k1} = mem[3,1,0,0,7,5,4,4] sched: [10:1.00]
@@ -2756,14 +2756,14 @@ define <8 x i64> @test_masked_8xi64_perm_imm_mem_mask5(<8 x i64>* %vp, <8 x i64>
define <8 x i64> @test_masked_z_8xi64_perm_imm_mem_mask5(<8 x i64>* %vp, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_8xi64_perm_imm_mem_mask5:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm1, %zmm0, %k1
; GENERIC-NEXT: vpermq {{.*#+}} zmm0 {%k1} {z} = mem[3,1,0,0,7,5,4,4] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xi64_perm_imm_mem_mask5:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm1, %zmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermq {{.*#+}} zmm0 {%k1} {z} = mem[3,1,0,0,7,5,4,4] sched: [10:1.00]
@@ -2777,13 +2777,13 @@ define <8 x i64> @test_masked_z_8xi64_perm_imm_mem_mask5(<8 x i64>* %vp, <8 x i6
define <8 x i64> @test_8xi64_perm_mem_mask6(<8 x i64>* %vp) {
; GENERIC-LABEL: test_8xi64_perm_mem_mask6:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} zmm0 = [0,6,3,7,3,0,3,6] sched: [4:0.50]
; GENERIC-NEXT: vpermpd (%rdi), %zmm0, %zmm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi64_perm_mem_mask6:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} zmm0 = [0,6,3,7,3,0,3,6] sched: [8:0.50]
; SKX-NEXT: vpermpd (%rdi), %zmm0, %zmm0 # sched: [10:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -2793,7 +2793,7 @@ define <8 x i64> @test_8xi64_perm_mem_mask6(<8 x i64>* %vp) {
}
define <8 x i64> @test_masked_8xi64_perm_mem_mask6(<8 x i64>* %vp, <8 x i64> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_8xi64_perm_mem_mask6:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,6,3,7,3,0,3,6] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
@@ -2801,7 +2801,7 @@ define <8 x i64> @test_masked_8xi64_perm_mem_mask6(<8 x i64>* %vp, <8 x i64> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xi64_perm_mem_mask6:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,6,3,7,3,0,3,6] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -2816,7 +2816,7 @@ define <8 x i64> @test_masked_8xi64_perm_mem_mask6(<8 x i64>* %vp, <8 x i64> %ve
define <8 x i64> @test_masked_z_8xi64_perm_mem_mask6(<8 x i64>* %vp, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_8xi64_perm_mem_mask6:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,6,3,7,3,0,3,6] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm2, %zmm0, %k1
@@ -2824,7 +2824,7 @@ define <8 x i64> @test_masked_z_8xi64_perm_mem_mask6(<8 x i64>* %vp, <8 x i64> %
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xi64_perm_mem_mask6:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,6,3,7,3,0,3,6] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm2, %zmm0, %k1 # sched: [3:1.00]
@@ -2839,14 +2839,14 @@ define <8 x i64> @test_masked_z_8xi64_perm_mem_mask6(<8 x i64>* %vp, <8 x i64> %
define <8 x i64> @test_masked_8xi64_perm_imm_mem_mask7(<8 x i64>* %vp, <8 x i64> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_8xi64_perm_imm_mem_mask7:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpermq {{.*#+}} zmm0 {%k1} = mem[3,0,0,1,7,4,4,5] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xi64_perm_imm_mem_mask7:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermq {{.*#+}} zmm0 {%k1} = mem[3,0,0,1,7,4,4,5] sched: [10:1.00]
@@ -2860,14 +2860,14 @@ define <8 x i64> @test_masked_8xi64_perm_imm_mem_mask7(<8 x i64>* %vp, <8 x i64>
define <8 x i64> @test_masked_z_8xi64_perm_imm_mem_mask7(<8 x i64>* %vp, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_8xi64_perm_imm_mem_mask7:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm1, %zmm0, %k1
; GENERIC-NEXT: vpermq {{.*#+}} zmm0 {%k1} {z} = mem[3,0,0,1,7,4,4,5] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xi64_perm_imm_mem_mask7:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm1, %zmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermq {{.*#+}} zmm0 {%k1} {z} = mem[3,0,0,1,7,4,4,5] sched: [10:1.00]
@@ -2881,13 +2881,13 @@ define <8 x i64> @test_masked_z_8xi64_perm_imm_mem_mask7(<8 x i64>* %vp, <8 x i6
define <8 x float> @test_8xfloat_perm_mask0(<8 x float> %vec) {
; GENERIC-LABEL: test_8xfloat_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} ymm1 = [3,4,2,4,1,2,3,4] sched: [7:0.50]
; GENERIC-NEXT: vpermps %ymm0, %ymm1, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} ymm1 = [3,4,2,4,1,2,3,4] sched: [7:0.50]
; SKX-NEXT: vpermps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -2896,7 +2896,7 @@ define <8 x float> @test_8xfloat_perm_mask0(<8 x float> %vec) {
}
define <8 x float> @test_masked_8xfloat_perm_mask0(<8 x float> %vec, <8 x float> %vec2, <8 x i32> %mask) {
; GENERIC-LABEL: test_masked_8xfloat_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} ymm3 = [3,4,2,4,1,2,3,4] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm4, %ymm2, %k1
@@ -2905,7 +2905,7 @@ define <8 x float> @test_masked_8xfloat_perm_mask0(<8 x float> %vec, <8 x float>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xfloat_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} ymm3 = [3,4,2,4,1,2,3,4] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm4, %ymm2, %k1 # sched: [3:1.00]
@@ -2920,7 +2920,7 @@ define <8 x float> @test_masked_8xfloat_perm_mask0(<8 x float> %vec, <8 x float>
define <8 x float> @test_masked_z_8xfloat_perm_mask0(<8 x float> %vec, <8 x i32> %mask) {
; GENERIC-LABEL: test_masked_z_8xfloat_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} ymm2 = [3,4,2,4,1,2,3,4] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm1, %k1
@@ -2928,7 +2928,7 @@ define <8 x float> @test_masked_z_8xfloat_perm_mask0(<8 x float> %vec, <8 x i32>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xfloat_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} ymm2 = [3,4,2,4,1,2,3,4] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm1, %k1 # sched: [3:1.00]
@@ -2941,7 +2941,7 @@ define <8 x float> @test_masked_z_8xfloat_perm_mask0(<8 x float> %vec, <8 x i32>
}
define <8 x float> @test_masked_8xfloat_perm_mask1(<8 x float> %vec, <8 x float> %vec2, <8 x i32> %mask) {
; GENERIC-LABEL: test_masked_8xfloat_perm_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} ymm3 = [4,2,1,0,6,0,5,1] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm4, %ymm2, %k1
@@ -2950,7 +2950,7 @@ define <8 x float> @test_masked_8xfloat_perm_mask1(<8 x float> %vec, <8 x float>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xfloat_perm_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} ymm3 = [4,2,1,0,6,0,5,1] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm4, %ymm2, %k1 # sched: [3:1.00]
@@ -2965,7 +2965,7 @@ define <8 x float> @test_masked_8xfloat_perm_mask1(<8 x float> %vec, <8 x float>
define <8 x float> @test_masked_z_8xfloat_perm_mask1(<8 x float> %vec, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_8xfloat_perm_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} ymm2 = [4,2,1,0,6,0,5,1] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
@@ -2973,7 +2973,7 @@ define <8 x float> @test_masked_z_8xfloat_perm_mask1(<8 x float> %vec, <8 x i64>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xfloat_perm_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} ymm2 = [4,2,1,0,6,0,5,1] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -2986,7 +2986,7 @@ define <8 x float> @test_masked_z_8xfloat_perm_mask1(<8 x float> %vec, <8 x i64>
}
define <8 x float> @test_masked_8xfloat_perm_mask2(<8 x float> %vec, <8 x float> %vec2, <8 x i32> %mask) {
; GENERIC-LABEL: test_masked_8xfloat_perm_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} ymm3 = [2,5,5,5,4,6,0,5] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm4, %ymm2, %k1
@@ -2995,7 +2995,7 @@ define <8 x float> @test_masked_8xfloat_perm_mask2(<8 x float> %vec, <8 x float>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xfloat_perm_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} ymm3 = [2,5,5,5,4,6,0,5] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm4, %ymm2, %k1 # sched: [3:1.00]
@@ -3010,7 +3010,7 @@ define <8 x float> @test_masked_8xfloat_perm_mask2(<8 x float> %vec, <8 x float>
define <8 x float> @test_masked_z_8xfloat_perm_mask2(<8 x float> %vec, <8 x i32> %mask) {
; GENERIC-LABEL: test_masked_z_8xfloat_perm_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} ymm2 = [2,5,5,5,4,6,0,5] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm1, %k1
@@ -3018,7 +3018,7 @@ define <8 x float> @test_masked_z_8xfloat_perm_mask2(<8 x float> %vec, <8 x i32>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xfloat_perm_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} ymm2 = [2,5,5,5,4,6,0,5] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm1, %k1 # sched: [3:1.00]
@@ -3031,13 +3031,13 @@ define <8 x float> @test_masked_z_8xfloat_perm_mask2(<8 x float> %vec, <8 x i32>
}
define <8 x float> @test_8xfloat_perm_mask3(<8 x float> %vec) {
; GENERIC-LABEL: test_8xfloat_perm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} ymm1 = [0,5,2,5,5,5,1,6] sched: [7:0.50]
; GENERIC-NEXT: vpermps %ymm0, %ymm1, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_perm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} ymm1 = [0,5,2,5,5,5,1,6] sched: [7:0.50]
; SKX-NEXT: vpermps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -3046,7 +3046,7 @@ define <8 x float> @test_8xfloat_perm_mask3(<8 x float> %vec) {
}
define <8 x float> @test_masked_8xfloat_perm_mask3(<8 x float> %vec, <8 x float> %vec2, <8 x i32> %mask) {
; GENERIC-LABEL: test_masked_8xfloat_perm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} ymm3 = [0,5,2,5,5,5,1,6] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm4, %ymm2, %k1
@@ -3055,7 +3055,7 @@ define <8 x float> @test_masked_8xfloat_perm_mask3(<8 x float> %vec, <8 x float>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xfloat_perm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} ymm3 = [0,5,2,5,5,5,1,6] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm4, %ymm2, %k1 # sched: [3:1.00]
@@ -3070,7 +3070,7 @@ define <8 x float> @test_masked_8xfloat_perm_mask3(<8 x float> %vec, <8 x float>
define <8 x float> @test_masked_z_8xfloat_perm_mask3(<8 x float> %vec, <8 x i32> %mask) {
; GENERIC-LABEL: test_masked_z_8xfloat_perm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} ymm2 = [0,5,2,5,5,5,1,6] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm1, %k1
@@ -3078,7 +3078,7 @@ define <8 x float> @test_masked_z_8xfloat_perm_mask3(<8 x float> %vec, <8 x i32>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xfloat_perm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} ymm2 = [0,5,2,5,5,5,1,6] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm1, %k1 # sched: [3:1.00]
@@ -3091,13 +3091,13 @@ define <8 x float> @test_masked_z_8xfloat_perm_mask3(<8 x float> %vec, <8 x i32>
}
define <8 x float> @test_8xfloat_perm_mem_mask0(<8 x float>* %vp) {
; GENERIC-LABEL: test_8xfloat_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} ymm0 = [5,2,1,6,4,2,4,0] sched: [7:0.50]
; GENERIC-NEXT: vpermps (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} ymm0 = [5,2,1,6,4,2,4,0] sched: [7:0.50]
; SKX-NEXT: vpermps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -3107,7 +3107,7 @@ define <8 x float> @test_8xfloat_perm_mem_mask0(<8 x float>* %vp) {
}
define <8 x float> @test_masked_8xfloat_perm_mem_mask0(<8 x float>* %vp, <8 x float> %vec2, <8 x i32> %mask) {
; GENERIC-LABEL: test_masked_8xfloat_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} ymm2 = [5,2,1,6,4,2,4,0] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm1, %k1
@@ -3115,7 +3115,7 @@ define <8 x float> @test_masked_8xfloat_perm_mem_mask0(<8 x float>* %vp, <8 x fl
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xfloat_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} ymm2 = [5,2,1,6,4,2,4,0] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm1, %k1 # sched: [3:1.00]
@@ -3130,7 +3130,7 @@ define <8 x float> @test_masked_8xfloat_perm_mem_mask0(<8 x float>* %vp, <8 x fl
define <8 x float> @test_masked_z_8xfloat_perm_mem_mask0(<8 x float>* %vp, <8 x i32> %mask) {
; GENERIC-LABEL: test_masked_z_8xfloat_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} ymm1 = [5,2,1,6,4,2,4,0] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm2, %ymm0, %k1
@@ -3138,7 +3138,7 @@ define <8 x float> @test_masked_z_8xfloat_perm_mem_mask0(<8 x float>* %vp, <8 x
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xfloat_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} ymm1 = [5,2,1,6,4,2,4,0] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm2, %ymm0, %k1 # sched: [3:1.00]
@@ -3153,7 +3153,7 @@ define <8 x float> @test_masked_z_8xfloat_perm_mem_mask0(<8 x float>* %vp, <8 x
define <8 x float> @test_masked_8xfloat_perm_mem_mask1(<8 x float>* %vp, <8 x float> %vec2, <8 x i32> %mask) {
; GENERIC-LABEL: test_masked_8xfloat_perm_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} ymm2 = [1,3,7,4,0,6,6,6] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm1, %k1
@@ -3161,7 +3161,7 @@ define <8 x float> @test_masked_8xfloat_perm_mem_mask1(<8 x float>* %vp, <8 x fl
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xfloat_perm_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} ymm2 = [1,3,7,4,0,6,6,6] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm1, %k1 # sched: [3:1.00]
@@ -3176,7 +3176,7 @@ define <8 x float> @test_masked_8xfloat_perm_mem_mask1(<8 x float>* %vp, <8 x fl
define <8 x float> @test_masked_z_8xfloat_perm_mem_mask1(<8 x float>* %vp, <8 x i32> %mask) {
; GENERIC-LABEL: test_masked_z_8xfloat_perm_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} ymm1 = [1,3,7,4,0,6,6,6] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm2, %ymm0, %k1
@@ -3184,7 +3184,7 @@ define <8 x float> @test_masked_z_8xfloat_perm_mem_mask1(<8 x float>* %vp, <8 x
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xfloat_perm_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} ymm1 = [1,3,7,4,0,6,6,6] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm2, %ymm0, %k1 # sched: [3:1.00]
@@ -3199,7 +3199,7 @@ define <8 x float> @test_masked_z_8xfloat_perm_mem_mask1(<8 x float>* %vp, <8 x
define <8 x float> @test_masked_8xfloat_perm_mem_mask2(<8 x float>* %vp, <8 x float> %vec2, <8 x i32> %mask) {
; GENERIC-LABEL: test_masked_8xfloat_perm_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} ymm2 = [4,5,1,5,6,6,2,4] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm1, %k1
@@ -3207,7 +3207,7 @@ define <8 x float> @test_masked_8xfloat_perm_mem_mask2(<8 x float>* %vp, <8 x fl
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xfloat_perm_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} ymm2 = [4,5,1,5,6,6,2,4] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm1, %k1 # sched: [3:1.00]
@@ -3222,7 +3222,7 @@ define <8 x float> @test_masked_8xfloat_perm_mem_mask2(<8 x float>* %vp, <8 x fl
define <8 x float> @test_masked_z_8xfloat_perm_mem_mask2(<8 x float>* %vp, <8 x i32> %mask) {
; GENERIC-LABEL: test_masked_z_8xfloat_perm_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} ymm1 = [4,5,1,5,6,6,2,4] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm2, %ymm0, %k1
@@ -3230,7 +3230,7 @@ define <8 x float> @test_masked_z_8xfloat_perm_mem_mask2(<8 x float>* %vp, <8 x
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xfloat_perm_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} ymm1 = [4,5,1,5,6,6,2,4] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm2, %ymm0, %k1 # sched: [3:1.00]
@@ -3245,13 +3245,13 @@ define <8 x float> @test_masked_z_8xfloat_perm_mem_mask2(<8 x float>* %vp, <8 x
define <8 x float> @test_8xfloat_perm_mem_mask3(<8 x float>* %vp, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xfloat_perm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} ymm0 = [5,7,0,6,4,2,3,0] sched: [7:0.50]
; GENERIC-NEXT: vpermps (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_perm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} ymm0 = [5,7,0,6,4,2,3,0] sched: [7:0.50]
; SKX-NEXT: vpermps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -3261,7 +3261,7 @@ define <8 x float> @test_8xfloat_perm_mem_mask3(<8 x float>* %vp, <8 x i32> %mas
}
define <8 x float> @test_masked_8xfloat_perm_mem_mask3(<8 x float>* %vp, <8 x float> %vec2, <8 x i32> %mask) {
; GENERIC-LABEL: test_masked_8xfloat_perm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} ymm2 = [5,7,0,6,4,2,3,0] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm1, %k1
@@ -3269,7 +3269,7 @@ define <8 x float> @test_masked_8xfloat_perm_mem_mask3(<8 x float>* %vp, <8 x fl
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xfloat_perm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} ymm2 = [5,7,0,6,4,2,3,0] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm1, %k1 # sched: [3:1.00]
@@ -3284,7 +3284,7 @@ define <8 x float> @test_masked_8xfloat_perm_mem_mask3(<8 x float>* %vp, <8 x fl
define <8 x float> @test_masked_z_8xfloat_perm_mem_mask3(<8 x float>* %vp, <8 x i32> %mask) {
; GENERIC-LABEL: test_masked_z_8xfloat_perm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} ymm1 = [5,7,0,6,4,2,3,0] sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm2, %ymm0, %k1
@@ -3292,7 +3292,7 @@ define <8 x float> @test_masked_z_8xfloat_perm_mem_mask3(<8 x float>* %vp, <8 x
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xfloat_perm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} ymm1 = [5,7,0,6,4,2,3,0] sched: [7:0.50]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm2, %ymm0, %k1 # sched: [3:1.00]
@@ -3307,13 +3307,13 @@ define <8 x float> @test_masked_z_8xfloat_perm_mem_mask3(<8 x float>* %vp, <8 x
define <16 x float> @test_16xfloat_perm_mask0(<16 x float> %vec) {
; GENERIC-LABEL: test_16xfloat_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} zmm1 = [15,7,5,13,4,9,11,13,12,6,0,0,11,15,5,7] sched: [4:0.50]
; GENERIC-NEXT: vpermps %zmm0, %zmm1, %zmm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} zmm1 = [15,7,5,13,4,9,11,13,12,6,0,0,11,15,5,7] sched: [8:0.50]
; SKX-NEXT: vpermps %zmm0, %zmm1, %zmm0 # sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -3322,7 +3322,7 @@ define <16 x float> @test_16xfloat_perm_mask0(<16 x float> %vec) {
}
define <16 x float> @test_masked_16xfloat_perm_mask0(<16 x float> %vec, <16 x float> %vec2, <16 x i32> %mask) {
; GENERIC-LABEL: test_masked_16xfloat_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} zmm3 = [15,7,5,13,4,9,11,13,12,6,0,0,11,15,5,7] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm4, %zmm2, %k1
@@ -3331,7 +3331,7 @@ define <16 x float> @test_masked_16xfloat_perm_mask0(<16 x float> %vec, <16 x fl
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xfloat_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} zmm3 = [15,7,5,13,4,9,11,13,12,6,0,0,11,15,5,7] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm4, %zmm2, %k1 # sched: [3:1.00]
@@ -3346,7 +3346,7 @@ define <16 x float> @test_masked_16xfloat_perm_mask0(<16 x float> %vec, <16 x fl
define <16 x float> @test_masked_z_16xfloat_perm_mask0(<16 x float> %vec, <16 x i32> %mask) {
; GENERIC-LABEL: test_masked_z_16xfloat_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} zmm2 = [15,7,5,13,4,9,11,13,12,6,0,0,11,15,5,7] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm1, %k1
@@ -3354,7 +3354,7 @@ define <16 x float> @test_masked_z_16xfloat_perm_mask0(<16 x float> %vec, <16 x
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xfloat_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} zmm2 = [15,7,5,13,4,9,11,13,12,6,0,0,11,15,5,7] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -3367,7 +3367,7 @@ define <16 x float> @test_masked_z_16xfloat_perm_mask0(<16 x float> %vec, <16 x
}
define <16 x float> @test_masked_16xfloat_perm_mask1(<16 x float> %vec, <16 x float> %vec2, <16 x i32> %mask) {
; GENERIC-LABEL: test_masked_16xfloat_perm_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} zmm3 = [11,10,4,10,4,5,8,11,2,0,10,0,0,3,10,1] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm4, %zmm2, %k1
@@ -3376,7 +3376,7 @@ define <16 x float> @test_masked_16xfloat_perm_mask1(<16 x float> %vec, <16 x fl
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xfloat_perm_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} zmm3 = [11,10,4,10,4,5,8,11,2,0,10,0,0,3,10,1] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm4, %zmm2, %k1 # sched: [3:1.00]
@@ -3391,7 +3391,7 @@ define <16 x float> @test_masked_16xfloat_perm_mask1(<16 x float> %vec, <16 x fl
define <16 x float> @test_masked_z_16xfloat_perm_mask1(<16 x float> %vec, <16 x i32> %mask) {
; GENERIC-LABEL: test_masked_z_16xfloat_perm_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} zmm2 = [11,10,4,10,4,5,8,11,2,0,10,0,0,3,10,1] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm1, %k1
@@ -3399,7 +3399,7 @@ define <16 x float> @test_masked_z_16xfloat_perm_mask1(<16 x float> %vec, <16 x
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xfloat_perm_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} zmm2 = [11,10,4,10,4,5,8,11,2,0,10,0,0,3,10,1] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -3412,7 +3412,7 @@ define <16 x float> @test_masked_z_16xfloat_perm_mask1(<16 x float> %vec, <16 x
}
define <16 x float> @test_masked_16xfloat_perm_mask2(<16 x float> %vec, <16 x float> %vec2, <16 x i32> %mask) {
; GENERIC-LABEL: test_masked_16xfloat_perm_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} zmm3 = [0,15,6,14,3,6,5,2,5,15,11,6,6,4,8,11] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm4, %zmm2, %k1
@@ -3421,7 +3421,7 @@ define <16 x float> @test_masked_16xfloat_perm_mask2(<16 x float> %vec, <16 x fl
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xfloat_perm_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} zmm3 = [0,15,6,14,3,6,5,2,5,15,11,6,6,4,8,11] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm4, %zmm2, %k1 # sched: [3:1.00]
@@ -3436,7 +3436,7 @@ define <16 x float> @test_masked_16xfloat_perm_mask2(<16 x float> %vec, <16 x fl
define <16 x float> @test_masked_z_16xfloat_perm_mask2(<16 x float> %vec, <16 x i32> %mask) {
; GENERIC-LABEL: test_masked_z_16xfloat_perm_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} zmm2 = [0,15,6,14,3,6,5,2,5,15,11,6,6,4,8,11] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm1, %k1
@@ -3444,7 +3444,7 @@ define <16 x float> @test_masked_z_16xfloat_perm_mask2(<16 x float> %vec, <16 x
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xfloat_perm_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} zmm2 = [0,15,6,14,3,6,5,2,5,15,11,6,6,4,8,11] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -3457,13 +3457,13 @@ define <16 x float> @test_masked_z_16xfloat_perm_mask2(<16 x float> %vec, <16 x
}
define <16 x float> @test_16xfloat_perm_mask3(<16 x float> %vec) {
; GENERIC-LABEL: test_16xfloat_perm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} zmm1 = [10,7,0,14,6,6,0,2,13,8,11,2,5,13,13,3] sched: [4:0.50]
; GENERIC-NEXT: vpermps %zmm0, %zmm1, %zmm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_perm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} zmm1 = [10,7,0,14,6,6,0,2,13,8,11,2,5,13,13,3] sched: [8:0.50]
; SKX-NEXT: vpermps %zmm0, %zmm1, %zmm0 # sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -3472,7 +3472,7 @@ define <16 x float> @test_16xfloat_perm_mask3(<16 x float> %vec) {
}
define <16 x float> @test_masked_16xfloat_perm_mask3(<16 x float> %vec, <16 x float> %vec2, <16 x i32> %mask) {
; GENERIC-LABEL: test_masked_16xfloat_perm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} zmm3 = [10,7,0,14,6,6,0,2,13,8,11,2,5,13,13,3] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm4, %zmm2, %k1
@@ -3481,7 +3481,7 @@ define <16 x float> @test_masked_16xfloat_perm_mask3(<16 x float> %vec, <16 x fl
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xfloat_perm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} zmm3 = [10,7,0,14,6,6,0,2,13,8,11,2,5,13,13,3] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm4, %zmm2, %k1 # sched: [3:1.00]
@@ -3496,7 +3496,7 @@ define <16 x float> @test_masked_16xfloat_perm_mask3(<16 x float> %vec, <16 x fl
define <16 x float> @test_masked_z_16xfloat_perm_mask3(<16 x float> %vec, <16 x i32> %mask) {
; GENERIC-LABEL: test_masked_z_16xfloat_perm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} zmm2 = [10,7,0,14,6,6,0,2,13,8,11,2,5,13,13,3] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm1, %k1
@@ -3504,7 +3504,7 @@ define <16 x float> @test_masked_z_16xfloat_perm_mask3(<16 x float> %vec, <16 x
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xfloat_perm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} zmm2 = [10,7,0,14,6,6,0,2,13,8,11,2,5,13,13,3] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -3517,13 +3517,13 @@ define <16 x float> @test_masked_z_16xfloat_perm_mask3(<16 x float> %vec, <16 x
}
define <16 x float> @test_16xfloat_perm_mem_mask0(<16 x float>* %vp) {
; GENERIC-LABEL: test_16xfloat_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} zmm0 = [10,2,1,14,9,9,7,2,9,4,12,11,0,14,0,1] sched: [4:0.50]
; GENERIC-NEXT: vpermps (%rdi), %zmm0, %zmm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} zmm0 = [10,2,1,14,9,9,7,2,9,4,12,11,0,14,0,1] sched: [8:0.50]
; SKX-NEXT: vpermps (%rdi), %zmm0, %zmm0 # sched: [10:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -3533,7 +3533,7 @@ define <16 x float> @test_16xfloat_perm_mem_mask0(<16 x float>* %vp) {
}
define <16 x float> @test_masked_16xfloat_perm_mem_mask0(<16 x float>* %vp, <16 x float> %vec2, <16 x i32> %mask) {
; GENERIC-LABEL: test_masked_16xfloat_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} zmm2 = [10,2,1,14,9,9,7,2,9,4,12,11,0,14,0,1] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm1, %k1
@@ -3541,7 +3541,7 @@ define <16 x float> @test_masked_16xfloat_perm_mem_mask0(<16 x float>* %vp, <16
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xfloat_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} zmm2 = [10,2,1,14,9,9,7,2,9,4,12,11,0,14,0,1] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -3556,7 +3556,7 @@ define <16 x float> @test_masked_16xfloat_perm_mem_mask0(<16 x float>* %vp, <16
define <16 x float> @test_masked_z_16xfloat_perm_mem_mask0(<16 x float>* %vp, <16 x i32> %mask) {
; GENERIC-LABEL: test_masked_z_16xfloat_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} zmm1 = [10,2,1,14,9,9,7,2,9,4,12,11,0,14,0,1] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm2, %zmm0, %k1
@@ -3564,7 +3564,7 @@ define <16 x float> @test_masked_z_16xfloat_perm_mem_mask0(<16 x float>* %vp, <1
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xfloat_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} zmm1 = [10,2,1,14,9,9,7,2,9,4,12,11,0,14,0,1] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm2, %zmm0, %k1 # sched: [3:1.00]
@@ -3579,7 +3579,7 @@ define <16 x float> @test_masked_z_16xfloat_perm_mem_mask0(<16 x float>* %vp, <1
define <16 x float> @test_masked_16xfloat_perm_mem_mask1(<16 x float>* %vp, <16 x float> %vec2, <16 x i32> %mask) {
; GENERIC-LABEL: test_masked_16xfloat_perm_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} zmm2 = [4,2,3,5,11,6,4,7,6,4,14,8,15,12,9,4] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm1, %k1
@@ -3587,7 +3587,7 @@ define <16 x float> @test_masked_16xfloat_perm_mem_mask1(<16 x float>* %vp, <16
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xfloat_perm_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} zmm2 = [4,2,3,5,11,6,4,7,6,4,14,8,15,12,9,4] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -3602,7 +3602,7 @@ define <16 x float> @test_masked_16xfloat_perm_mem_mask1(<16 x float>* %vp, <16
define <16 x float> @test_masked_z_16xfloat_perm_mem_mask1(<16 x float>* %vp, <16 x i32> %mask) {
; GENERIC-LABEL: test_masked_z_16xfloat_perm_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} zmm1 = [4,2,3,5,11,6,4,7,6,4,14,8,15,12,9,4] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm2, %zmm0, %k1
@@ -3610,7 +3610,7 @@ define <16 x float> @test_masked_z_16xfloat_perm_mem_mask1(<16 x float>* %vp, <1
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xfloat_perm_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} zmm1 = [4,2,3,5,11,6,4,7,6,4,14,8,15,12,9,4] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm2, %zmm0, %k1 # sched: [3:1.00]
@@ -3625,7 +3625,7 @@ define <16 x float> @test_masked_z_16xfloat_perm_mem_mask1(<16 x float>* %vp, <1
define <16 x float> @test_masked_16xfloat_perm_mem_mask2(<16 x float>* %vp, <16 x float> %vec2, <16 x i32> %mask) {
; GENERIC-LABEL: test_masked_16xfloat_perm_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} zmm2 = [10,7,11,6,7,0,11,0,10,9,12,4,10,3,8,5] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm1, %k1
@@ -3633,7 +3633,7 @@ define <16 x float> @test_masked_16xfloat_perm_mem_mask2(<16 x float>* %vp, <16
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xfloat_perm_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} zmm2 = [10,7,11,6,7,0,11,0,10,9,12,4,10,3,8,5] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -3648,7 +3648,7 @@ define <16 x float> @test_masked_16xfloat_perm_mem_mask2(<16 x float>* %vp, <16
define <16 x float> @test_masked_z_16xfloat_perm_mem_mask2(<16 x float>* %vp, <16 x i32> %mask) {
; GENERIC-LABEL: test_masked_z_16xfloat_perm_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} zmm1 = [10,7,11,6,7,0,11,0,10,9,12,4,10,3,8,5] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm2, %zmm0, %k1
@@ -3656,7 +3656,7 @@ define <16 x float> @test_masked_z_16xfloat_perm_mem_mask2(<16 x float>* %vp, <1
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xfloat_perm_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} zmm1 = [10,7,11,6,7,0,11,0,10,9,12,4,10,3,8,5] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm2, %zmm0, %k1 # sched: [3:1.00]
@@ -3671,13 +3671,13 @@ define <16 x float> @test_masked_z_16xfloat_perm_mem_mask2(<16 x float>* %vp, <1
define <16 x float> @test_16xfloat_perm_mem_mask3(<16 x float>* %vp) {
; GENERIC-LABEL: test_16xfloat_perm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} zmm0 = [15,15,3,9,5,15,14,9,11,10,5,14,14,5,11,0] sched: [4:0.50]
; GENERIC-NEXT: vpermps (%rdi), %zmm0, %zmm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_perm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} zmm0 = [15,15,3,9,5,15,14,9,11,10,5,14,14,5,11,0] sched: [8:0.50]
; SKX-NEXT: vpermps (%rdi), %zmm0, %zmm0 # sched: [10:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -3687,7 +3687,7 @@ define <16 x float> @test_16xfloat_perm_mem_mask3(<16 x float>* %vp) {
}
define <16 x float> @test_masked_16xfloat_perm_mem_mask3(<16 x float>* %vp, <16 x float> %vec2, <16 x i32> %mask) {
; GENERIC-LABEL: test_masked_16xfloat_perm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} zmm2 = [15,15,3,9,5,15,14,9,11,10,5,14,14,5,11,0] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm1, %k1
@@ -3695,7 +3695,7 @@ define <16 x float> @test_masked_16xfloat_perm_mem_mask3(<16 x float>* %vp, <16
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xfloat_perm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} zmm2 = [15,15,3,9,5,15,14,9,11,10,5,14,14,5,11,0] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -3710,7 +3710,7 @@ define <16 x float> @test_masked_16xfloat_perm_mem_mask3(<16 x float>* %vp, <16
define <16 x float> @test_masked_z_16xfloat_perm_mem_mask3(<16 x float>* %vp, <16 x i32> %mask) {
; GENERIC-LABEL: test_masked_z_16xfloat_perm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} zmm1 = [15,15,3,9,5,15,14,9,11,10,5,14,14,5,11,0] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm2, %zmm0, %k1
@@ -3718,7 +3718,7 @@ define <16 x float> @test_masked_z_16xfloat_perm_mem_mask3(<16 x float>* %vp, <1
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xfloat_perm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} zmm1 = [15,15,3,9,5,15,14,9,11,10,5,14,14,5,11,0] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm2, %zmm0, %k1 # sched: [3:1.00]
@@ -3733,12 +3733,12 @@ define <16 x float> @test_masked_z_16xfloat_perm_mem_mask3(<16 x float>* %vp, <1
define <4 x double> @test_4xdouble_perm_mask0(<4 x double> %vec) {
; GENERIC-LABEL: test_4xdouble_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,3,2] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,3,2] sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> <i32 2, i32 1, i32 3, i32 2>
@@ -3746,7 +3746,7 @@ define <4 x double> @test_4xdouble_perm_mask0(<4 x double> %vec) {
}
define <4 x double> @test_masked_4xdouble_perm_mask0(<4 x double> %vec, <4 x double> %vec2, <4 x i64> %mask) {
; GENERIC-LABEL: test_masked_4xdouble_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; GENERIC-NEXT: vpermpd {{.*#+}} ymm1 {%k1} = ymm0[2,1,3,2] sched: [1:1.00]
@@ -3754,7 +3754,7 @@ define <4 x double> @test_masked_4xdouble_perm_mask0(<4 x double> %vec, <4 x dou
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_4xdouble_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermpd {{.*#+}} ymm1 {%k1} = ymm0[2,1,3,2] sched: [3:1.00]
@@ -3768,14 +3768,14 @@ define <4 x double> @test_masked_4xdouble_perm_mask0(<4 x double> %vec, <4 x dou
define <4 x double> @test_masked_z_4xdouble_perm_mask0(<4 x double> %vec, <4 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_4xdouble_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; GENERIC-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = ymm0[2,1,3,2] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_4xdouble_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = ymm0[2,1,3,2] sched: [3:1.00]
@@ -3787,7 +3787,7 @@ define <4 x double> @test_masked_z_4xdouble_perm_mask0(<4 x double> %vec, <4 x i
}
define <4 x double> @test_masked_4xdouble_perm_mask1(<4 x double> %vec, <4 x double> %vec2, <4 x i64> %mask) {
; GENERIC-LABEL: test_masked_4xdouble_perm_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; GENERIC-NEXT: vpermpd {{.*#+}} ymm1 {%k1} = ymm0[3,0,0,0] sched: [1:1.00]
@@ -3795,7 +3795,7 @@ define <4 x double> @test_masked_4xdouble_perm_mask1(<4 x double> %vec, <4 x dou
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_4xdouble_perm_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermpd {{.*#+}} ymm1 {%k1} = ymm0[3,0,0,0] sched: [3:1.00]
@@ -3809,14 +3809,14 @@ define <4 x double> @test_masked_4xdouble_perm_mask1(<4 x double> %vec, <4 x dou
define <4 x double> @test_masked_z_4xdouble_perm_mask1(<4 x double> %vec, <4 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_4xdouble_perm_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; GENERIC-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = ymm0[3,0,0,0] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_4xdouble_perm_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = ymm0[3,0,0,0] sched: [3:1.00]
@@ -3828,7 +3828,7 @@ define <4 x double> @test_masked_z_4xdouble_perm_mask1(<4 x double> %vec, <4 x i
}
define <4 x double> @test_masked_4xdouble_perm_mask2(<4 x double> %vec, <4 x double> %vec2, <4 x i64> %mask) {
; GENERIC-LABEL: test_masked_4xdouble_perm_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; GENERIC-NEXT: vpermpd {{.*#+}} ymm1 {%k1} = ymm0[0,3,3,1] sched: [1:1.00]
@@ -3836,7 +3836,7 @@ define <4 x double> @test_masked_4xdouble_perm_mask2(<4 x double> %vec, <4 x dou
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_4xdouble_perm_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermpd {{.*#+}} ymm1 {%k1} = ymm0[0,3,3,1] sched: [3:1.00]
@@ -3850,14 +3850,14 @@ define <4 x double> @test_masked_4xdouble_perm_mask2(<4 x double> %vec, <4 x dou
define <4 x double> @test_masked_z_4xdouble_perm_mask2(<4 x double> %vec, <4 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_4xdouble_perm_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; GENERIC-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0,3,3,1] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_4xdouble_perm_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0,3,3,1] sched: [3:1.00]
@@ -3869,12 +3869,12 @@ define <4 x double> @test_masked_z_4xdouble_perm_mask2(<4 x double> %vec, <4 x i
}
define <4 x double> @test_4xdouble_perm_mask3(<4 x double> %vec) {
; GENERIC-LABEL: test_4xdouble_perm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,2] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_perm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,2] sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 2>
@@ -3882,7 +3882,7 @@ define <4 x double> @test_4xdouble_perm_mask3(<4 x double> %vec) {
}
define <4 x double> @test_masked_4xdouble_perm_mask3(<4 x double> %vec, <4 x double> %vec2, <4 x i64> %mask) {
; GENERIC-LABEL: test_masked_4xdouble_perm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; GENERIC-NEXT: vpermpd {{.*#+}} ymm1 {%k1} = ymm0[3,3,3,2] sched: [1:1.00]
@@ -3890,7 +3890,7 @@ define <4 x double> @test_masked_4xdouble_perm_mask3(<4 x double> %vec, <4 x dou
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_4xdouble_perm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermpd {{.*#+}} ymm1 {%k1} = ymm0[3,3,3,2] sched: [3:1.00]
@@ -3904,14 +3904,14 @@ define <4 x double> @test_masked_4xdouble_perm_mask3(<4 x double> %vec, <4 x dou
define <4 x double> @test_masked_z_4xdouble_perm_mask3(<4 x double> %vec, <4 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_4xdouble_perm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; GENERIC-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = ymm0[3,3,3,2] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_4xdouble_perm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = ymm0[3,3,3,2] sched: [3:1.00]
@@ -3923,12 +3923,12 @@ define <4 x double> @test_masked_z_4xdouble_perm_mask3(<4 x double> %vec, <4 x i
}
define <4 x double> @test_4xdouble_perm_mem_mask0(<4 x double>* %vp) {
; GENERIC-LABEL: test_4xdouble_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpermpd {{.*#+}} ymm0 = mem[0,0,2,0] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermpd {{.*#+}} ymm0 = mem[0,0,2,0] sched: [10:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec = load <4 x double>, <4 x double>* %vp
@@ -3937,14 +3937,14 @@ define <4 x double> @test_4xdouble_perm_mem_mask0(<4 x double>* %vp) {
}
define <4 x double> @test_masked_4xdouble_perm_mem_mask0(<4 x double>* %vp, <4 x double> %vec2, <4 x i64> %mask) {
; GENERIC-LABEL: test_masked_4xdouble_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; GENERIC-NEXT: vpermpd {{.*#+}} ymm0 {%k1} = mem[0,0,2,0] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_4xdouble_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermpd {{.*#+}} ymm0 {%k1} = mem[0,0,2,0] sched: [10:1.00]
@@ -3958,14 +3958,14 @@ define <4 x double> @test_masked_4xdouble_perm_mem_mask0(<4 x double>* %vp, <4 x
define <4 x double> @test_masked_z_4xdouble_perm_mem_mask0(<4 x double>* %vp, <4 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_4xdouble_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm1, %ymm0, %k1
; GENERIC-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = mem[0,0,2,0] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_4xdouble_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm1, %ymm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = mem[0,0,2,0] sched: [10:1.00]
@@ -3979,14 +3979,14 @@ define <4 x double> @test_masked_z_4xdouble_perm_mem_mask0(<4 x double>* %vp, <4
define <4 x double> @test_masked_4xdouble_perm_mem_mask1(<4 x double>* %vp, <4 x double> %vec2, <4 x i64> %mask) {
; GENERIC-LABEL: test_masked_4xdouble_perm_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; GENERIC-NEXT: vpermpd {{.*#+}} ymm0 {%k1} = mem[0,2,3,2] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_4xdouble_perm_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermpd {{.*#+}} ymm0 {%k1} = mem[0,2,3,2] sched: [10:1.00]
@@ -4000,14 +4000,14 @@ define <4 x double> @test_masked_4xdouble_perm_mem_mask1(<4 x double>* %vp, <4 x
define <4 x double> @test_masked_z_4xdouble_perm_mem_mask1(<4 x double>* %vp, <4 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_4xdouble_perm_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm1, %ymm0, %k1
; GENERIC-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = mem[0,2,3,2] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_4xdouble_perm_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm1, %ymm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = mem[0,2,3,2] sched: [10:1.00]
@@ -4021,14 +4021,14 @@ define <4 x double> @test_masked_z_4xdouble_perm_mem_mask1(<4 x double>* %vp, <4
define <4 x double> @test_masked_4xdouble_perm_mem_mask2(<4 x double>* %vp, <4 x double> %vec2, <4 x i64> %mask) {
; GENERIC-LABEL: test_masked_4xdouble_perm_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; GENERIC-NEXT: vpermpd {{.*#+}} ymm0 {%k1} = mem[3,1,1,1] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_4xdouble_perm_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermpd {{.*#+}} ymm0 {%k1} = mem[3,1,1,1] sched: [10:1.00]
@@ -4042,14 +4042,14 @@ define <4 x double> @test_masked_4xdouble_perm_mem_mask2(<4 x double>* %vp, <4 x
define <4 x double> @test_masked_z_4xdouble_perm_mem_mask2(<4 x double>* %vp, <4 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_4xdouble_perm_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm1, %ymm0, %k1
; GENERIC-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = mem[3,1,1,1] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_4xdouble_perm_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm1, %ymm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = mem[3,1,1,1] sched: [10:1.00]
@@ -4063,12 +4063,12 @@ define <4 x double> @test_masked_z_4xdouble_perm_mem_mask2(<4 x double>* %vp, <4
define <4 x double> @test_4xdouble_perm_mem_mask3(<4 x double>* %vp) {
; GENERIC-LABEL: test_4xdouble_perm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpermpd {{.*#+}} ymm0 = mem[3,2,3,2] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_perm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermpd {{.*#+}} ymm0 = mem[3,2,3,2] sched: [10:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec = load <4 x double>, <4 x double>* %vp
@@ -4077,14 +4077,14 @@ define <4 x double> @test_4xdouble_perm_mem_mask3(<4 x double>* %vp) {
}
define <4 x double> @test_masked_4xdouble_perm_mem_mask3(<4 x double>* %vp, <4 x double> %vec2, <4 x i64> %mask) {
; GENERIC-LABEL: test_masked_4xdouble_perm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; GENERIC-NEXT: vpermpd {{.*#+}} ymm0 {%k1} = mem[3,2,3,2] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_4xdouble_perm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermpd {{.*#+}} ymm0 {%k1} = mem[3,2,3,2] sched: [10:1.00]
@@ -4098,14 +4098,14 @@ define <4 x double> @test_masked_4xdouble_perm_mem_mask3(<4 x double>* %vp, <4 x
define <4 x double> @test_masked_z_4xdouble_perm_mem_mask3(<4 x double>* %vp, <4 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_4xdouble_perm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm1, %ymm0, %k1
; GENERIC-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = mem[3,2,3,2] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_4xdouble_perm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm1, %ymm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = mem[3,2,3,2] sched: [10:1.00]
@@ -4119,13 +4119,13 @@ define <4 x double> @test_masked_z_4xdouble_perm_mem_mask3(<4 x double>* %vp, <4
define <8 x double> @test_8xdouble_perm_mask0(<8 x double> %vec) {
; GENERIC-LABEL: test_8xdouble_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} zmm1 = [5,7,4,2,7,4,3,4] sched: [4:0.50]
; GENERIC-NEXT: vpermpd %zmm0, %zmm1, %zmm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} zmm1 = [5,7,4,2,7,4,3,4] sched: [8:0.50]
; SKX-NEXT: vpermpd %zmm0, %zmm1, %zmm0 # sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -4134,7 +4134,7 @@ define <8 x double> @test_8xdouble_perm_mask0(<8 x double> %vec) {
}
define <8 x double> @test_masked_8xdouble_perm_mask0(<8 x double> %vec, <8 x double> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_8xdouble_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovapd {{.*#+}} zmm3 = [5,7,4,2,7,4,3,4] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm4, %zmm2, %k1
@@ -4143,7 +4143,7 @@ define <8 x double> @test_masked_8xdouble_perm_mask0(<8 x double> %vec, <8 x dou
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xdouble_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovapd {{.*#+}} zmm3 = [5,7,4,2,7,4,3,4] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm4, %zmm2, %k1 # sched: [3:1.00]
@@ -4158,7 +4158,7 @@ define <8 x double> @test_masked_8xdouble_perm_mask0(<8 x double> %vec, <8 x dou
define <8 x double> @test_masked_z_8xdouble_perm_mask0(<8 x double> %vec, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_8xdouble_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovapd {{.*#+}} zmm2 = [5,7,4,2,7,4,3,4] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
@@ -4166,7 +4166,7 @@ define <8 x double> @test_masked_z_8xdouble_perm_mask0(<8 x double> %vec, <8 x i
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xdouble_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovapd {{.*#+}} zmm2 = [5,7,4,2,7,4,3,4] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -4179,7 +4179,7 @@ define <8 x double> @test_masked_z_8xdouble_perm_mask0(<8 x double> %vec, <8 x i
}
define <8 x double> @test_masked_8xdouble_perm_imm_mask1(<8 x double> %vec, <8 x double> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_8xdouble_perm_imm_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; GENERIC-NEXT: vpermpd {{.*#+}} zmm1 {%k1} = zmm0[3,0,0,2,7,4,4,6] sched: [1:1.00]
@@ -4187,7 +4187,7 @@ define <8 x double> @test_masked_8xdouble_perm_imm_mask1(<8 x double> %vec, <8 x
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xdouble_perm_imm_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermpd {{.*#+}} zmm1 {%k1} = zmm0[3,0,0,2,7,4,4,6] sched: [3:1.00]
@@ -4201,14 +4201,14 @@ define <8 x double> @test_masked_8xdouble_perm_imm_mask1(<8 x double> %vec, <8 x
define <8 x double> @test_masked_z_8xdouble_perm_imm_mask1(<8 x double> %vec, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_8xdouble_perm_imm_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpermpd {{.*#+}} zmm0 {%k1} {z} = zmm0[3,0,0,2,7,4,4,6] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xdouble_perm_imm_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermpd {{.*#+}} zmm0 {%k1} {z} = zmm0[3,0,0,2,7,4,4,6] sched: [3:1.00]
@@ -4220,7 +4220,7 @@ define <8 x double> @test_masked_z_8xdouble_perm_imm_mask1(<8 x double> %vec, <8
}
define <8 x double> @test_masked_8xdouble_perm_mask2(<8 x double> %vec, <8 x double> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_8xdouble_perm_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovapd {{.*#+}} zmm3 = [7,5,5,5,3,5,1,7] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm4, %zmm2, %k1
@@ -4229,7 +4229,7 @@ define <8 x double> @test_masked_8xdouble_perm_mask2(<8 x double> %vec, <8 x dou
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xdouble_perm_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovapd {{.*#+}} zmm3 = [7,5,5,5,3,5,1,7] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm4, %zmm2, %k1 # sched: [3:1.00]
@@ -4244,7 +4244,7 @@ define <8 x double> @test_masked_8xdouble_perm_mask2(<8 x double> %vec, <8 x dou
define <8 x double> @test_masked_z_8xdouble_perm_mask2(<8 x double> %vec, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_8xdouble_perm_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovapd {{.*#+}} zmm2 = [7,5,5,5,3,5,1,7] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
@@ -4252,7 +4252,7 @@ define <8 x double> @test_masked_z_8xdouble_perm_mask2(<8 x double> %vec, <8 x i
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xdouble_perm_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovapd {{.*#+}} zmm2 = [7,5,5,5,3,5,1,7] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -4265,12 +4265,12 @@ define <8 x double> @test_masked_z_8xdouble_perm_mask2(<8 x double> %vec, <8 x i
}
define <8 x double> @test_8xdouble_perm_imm_mask3(<8 x double> %vec) {
; GENERIC-LABEL: test_8xdouble_perm_imm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[1,3,3,0,5,7,7,4] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_perm_imm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[1,3,3,0,5,7,7,4] sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> <i32 1, i32 3, i32 3, i32 0, i32 5, i32 7, i32 7, i32 4>
@@ -4278,7 +4278,7 @@ define <8 x double> @test_8xdouble_perm_imm_mask3(<8 x double> %vec) {
}
define <8 x double> @test_masked_8xdouble_perm_imm_mask3(<8 x double> %vec, <8 x double> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_8xdouble_perm_imm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; GENERIC-NEXT: vpermpd {{.*#+}} zmm1 {%k1} = zmm0[1,3,3,0,5,7,7,4] sched: [1:1.00]
@@ -4286,7 +4286,7 @@ define <8 x double> @test_masked_8xdouble_perm_imm_mask3(<8 x double> %vec, <8 x
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xdouble_perm_imm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermpd {{.*#+}} zmm1 {%k1} = zmm0[1,3,3,0,5,7,7,4] sched: [3:1.00]
@@ -4300,14 +4300,14 @@ define <8 x double> @test_masked_8xdouble_perm_imm_mask3(<8 x double> %vec, <8 x
define <8 x double> @test_masked_z_8xdouble_perm_imm_mask3(<8 x double> %vec, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_8xdouble_perm_imm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpermpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1,3,3,0,5,7,7,4] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xdouble_perm_imm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1,3,3,0,5,7,7,4] sched: [3:1.00]
@@ -4319,7 +4319,7 @@ define <8 x double> @test_masked_z_8xdouble_perm_imm_mask3(<8 x double> %vec, <8
}
define <8 x double> @test_masked_8xdouble_perm_mask4(<8 x double> %vec, <8 x double> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_8xdouble_perm_mask4:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovapd {{.*#+}} zmm3 = [3,5,3,4,6,5,7,1] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm4, %zmm2, %k1
@@ -4328,7 +4328,7 @@ define <8 x double> @test_masked_8xdouble_perm_mask4(<8 x double> %vec, <8 x dou
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xdouble_perm_mask4:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovapd {{.*#+}} zmm3 = [3,5,3,4,6,5,7,1] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm4, %zmm2, %k1 # sched: [3:1.00]
@@ -4343,7 +4343,7 @@ define <8 x double> @test_masked_8xdouble_perm_mask4(<8 x double> %vec, <8 x dou
define <8 x double> @test_masked_z_8xdouble_perm_mask4(<8 x double> %vec, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_8xdouble_perm_mask4:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovapd {{.*#+}} zmm2 = [3,5,3,4,6,5,7,1] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
@@ -4351,7 +4351,7 @@ define <8 x double> @test_masked_z_8xdouble_perm_mask4(<8 x double> %vec, <8 x i
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xdouble_perm_mask4:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovapd {{.*#+}} zmm2 = [3,5,3,4,6,5,7,1] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -4364,7 +4364,7 @@ define <8 x double> @test_masked_z_8xdouble_perm_mask4(<8 x double> %vec, <8 x i
}
define <8 x double> @test_masked_8xdouble_perm_imm_mask5(<8 x double> %vec, <8 x double> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_8xdouble_perm_imm_mask5:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; GENERIC-NEXT: vpermpd {{.*#+}} zmm1 {%k1} = zmm0[3,3,2,3,7,7,6,7] sched: [1:1.00]
@@ -4372,7 +4372,7 @@ define <8 x double> @test_masked_8xdouble_perm_imm_mask5(<8 x double> %vec, <8 x
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xdouble_perm_imm_mask5:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermpd {{.*#+}} zmm1 {%k1} = zmm0[3,3,2,3,7,7,6,7] sched: [3:1.00]
@@ -4386,14 +4386,14 @@ define <8 x double> @test_masked_8xdouble_perm_imm_mask5(<8 x double> %vec, <8 x
define <8 x double> @test_masked_z_8xdouble_perm_imm_mask5(<8 x double> %vec, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_8xdouble_perm_imm_mask5:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpermpd {{.*#+}} zmm0 {%k1} {z} = zmm0[3,3,2,3,7,7,6,7] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xdouble_perm_imm_mask5:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermpd {{.*#+}} zmm0 {%k1} {z} = zmm0[3,3,2,3,7,7,6,7] sched: [3:1.00]
@@ -4405,13 +4405,13 @@ define <8 x double> @test_masked_z_8xdouble_perm_imm_mask5(<8 x double> %vec, <8
}
define <8 x double> @test_8xdouble_perm_mask6(<8 x double> %vec) {
; GENERIC-LABEL: test_8xdouble_perm_mask6:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} zmm1 = [2,7,6,4,0,0,0,2] sched: [4:0.50]
; GENERIC-NEXT: vpermpd %zmm0, %zmm1, %zmm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_perm_mask6:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} zmm1 = [2,7,6,4,0,0,0,2] sched: [8:0.50]
; SKX-NEXT: vpermpd %zmm0, %zmm1, %zmm0 # sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -4420,7 +4420,7 @@ define <8 x double> @test_8xdouble_perm_mask6(<8 x double> %vec) {
}
define <8 x double> @test_masked_8xdouble_perm_mask6(<8 x double> %vec, <8 x double> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_8xdouble_perm_mask6:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovapd {{.*#+}} zmm3 = [2,7,6,4,0,0,0,2] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm4, %zmm2, %k1
@@ -4429,7 +4429,7 @@ define <8 x double> @test_masked_8xdouble_perm_mask6(<8 x double> %vec, <8 x dou
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xdouble_perm_mask6:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovapd {{.*#+}} zmm3 = [2,7,6,4,0,0,0,2] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm4, %zmm2, %k1 # sched: [3:1.00]
@@ -4444,7 +4444,7 @@ define <8 x double> @test_masked_8xdouble_perm_mask6(<8 x double> %vec, <8 x dou
define <8 x double> @test_masked_z_8xdouble_perm_mask6(<8 x double> %vec, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_8xdouble_perm_mask6:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovapd {{.*#+}} zmm2 = [2,7,6,4,0,0,0,2] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
@@ -4452,7 +4452,7 @@ define <8 x double> @test_masked_z_8xdouble_perm_mask6(<8 x double> %vec, <8 x i
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xdouble_perm_mask6:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovapd {{.*#+}} zmm2 = [2,7,6,4,0,0,0,2] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -4465,7 +4465,7 @@ define <8 x double> @test_masked_z_8xdouble_perm_mask6(<8 x double> %vec, <8 x i
}
define <8 x double> @test_masked_8xdouble_perm_imm_mask7(<8 x double> %vec, <8 x double> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_8xdouble_perm_imm_mask7:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; GENERIC-NEXT: vpermpd {{.*#+}} zmm1 {%k1} = zmm0[3,1,3,2,7,5,7,6] sched: [1:1.00]
@@ -4473,7 +4473,7 @@ define <8 x double> @test_masked_8xdouble_perm_imm_mask7(<8 x double> %vec, <8 x
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xdouble_perm_imm_mask7:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermpd {{.*#+}} zmm1 {%k1} = zmm0[3,1,3,2,7,5,7,6] sched: [3:1.00]
@@ -4487,14 +4487,14 @@ define <8 x double> @test_masked_8xdouble_perm_imm_mask7(<8 x double> %vec, <8 x
define <8 x double> @test_masked_z_8xdouble_perm_imm_mask7(<8 x double> %vec, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_8xdouble_perm_imm_mask7:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpermpd {{.*#+}} zmm0 {%k1} {z} = zmm0[3,1,3,2,7,5,7,6] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xdouble_perm_imm_mask7:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermpd {{.*#+}} zmm0 {%k1} {z} = zmm0[3,1,3,2,7,5,7,6] sched: [3:1.00]
@@ -4506,13 +4506,13 @@ define <8 x double> @test_masked_z_8xdouble_perm_imm_mask7(<8 x double> %vec, <8
}
define <8 x double> @test_8xdouble_perm_mem_mask0(<8 x double>* %vp) {
; GENERIC-LABEL: test_8xdouble_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} zmm0 = [0,3,4,0,4,2,0,1] sched: [4:0.50]
; GENERIC-NEXT: vpermpd (%rdi), %zmm0, %zmm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} zmm0 = [0,3,4,0,4,2,0,1] sched: [8:0.50]
; SKX-NEXT: vpermpd (%rdi), %zmm0, %zmm0 # sched: [10:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -4522,7 +4522,7 @@ define <8 x double> @test_8xdouble_perm_mem_mask0(<8 x double>* %vp) {
}
define <8 x double> @test_masked_8xdouble_perm_mem_mask0(<8 x double>* %vp, <8 x double> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_8xdouble_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovapd {{.*#+}} zmm2 = [0,3,4,0,4,2,0,1] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
@@ -4530,7 +4530,7 @@ define <8 x double> @test_masked_8xdouble_perm_mem_mask0(<8 x double>* %vp, <8 x
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xdouble_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovapd {{.*#+}} zmm2 = [0,3,4,0,4,2,0,1] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -4545,7 +4545,7 @@ define <8 x double> @test_masked_8xdouble_perm_mem_mask0(<8 x double>* %vp, <8 x
define <8 x double> @test_masked_z_8xdouble_perm_mem_mask0(<8 x double>* %vp, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_8xdouble_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovapd {{.*#+}} zmm1 = [0,3,4,0,4,2,0,1] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm2, %zmm0, %k1
@@ -4553,7 +4553,7 @@ define <8 x double> @test_masked_z_8xdouble_perm_mem_mask0(<8 x double>* %vp, <8
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xdouble_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovapd {{.*#+}} zmm1 = [0,3,4,0,4,2,0,1] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm2, %zmm0, %k1 # sched: [3:1.00]
@@ -4568,14 +4568,14 @@ define <8 x double> @test_masked_z_8xdouble_perm_mem_mask0(<8 x double>* %vp, <8
define <8 x double> @test_masked_8xdouble_perm_imm_mem_mask1(<8 x double>* %vp, <8 x double> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_8xdouble_perm_imm_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpermpd {{.*#+}} zmm0 {%k1} = mem[0,2,0,3,4,6,4,7] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xdouble_perm_imm_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermpd {{.*#+}} zmm0 {%k1} = mem[0,2,0,3,4,6,4,7] sched: [10:1.00]
@@ -4589,14 +4589,14 @@ define <8 x double> @test_masked_8xdouble_perm_imm_mem_mask1(<8 x double>* %vp,
define <8 x double> @test_masked_z_8xdouble_perm_imm_mem_mask1(<8 x double>* %vp, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_8xdouble_perm_imm_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm1, %zmm0, %k1
; GENERIC-NEXT: vpermpd {{.*#+}} zmm0 {%k1} {z} = mem[0,2,0,3,4,6,4,7] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xdouble_perm_imm_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm1, %zmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermpd {{.*#+}} zmm0 {%k1} {z} = mem[0,2,0,3,4,6,4,7] sched: [10:1.00]
@@ -4610,7 +4610,7 @@ define <8 x double> @test_masked_z_8xdouble_perm_imm_mem_mask1(<8 x double>* %vp
define <8 x double> @test_masked_8xdouble_perm_mem_mask2(<8 x double>* %vp, <8 x double> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_8xdouble_perm_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovapd {{.*#+}} zmm2 = [6,7,2,7,7,6,2,5] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
@@ -4618,7 +4618,7 @@ define <8 x double> @test_masked_8xdouble_perm_mem_mask2(<8 x double>* %vp, <8 x
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xdouble_perm_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovapd {{.*#+}} zmm2 = [6,7,2,7,7,6,2,5] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -4633,7 +4633,7 @@ define <8 x double> @test_masked_8xdouble_perm_mem_mask2(<8 x double>* %vp, <8 x
define <8 x double> @test_masked_z_8xdouble_perm_mem_mask2(<8 x double>* %vp, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_8xdouble_perm_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovapd {{.*#+}} zmm1 = [6,7,2,7,7,6,2,5] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm2, %zmm0, %k1
@@ -4641,7 +4641,7 @@ define <8 x double> @test_masked_z_8xdouble_perm_mem_mask2(<8 x double>* %vp, <8
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xdouble_perm_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovapd {{.*#+}} zmm1 = [6,7,2,7,7,6,2,5] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm2, %zmm0, %k1 # sched: [3:1.00]
@@ -4656,12 +4656,12 @@ define <8 x double> @test_masked_z_8xdouble_perm_mem_mask2(<8 x double>* %vp, <8
define <8 x double> @test_8xdouble_perm_imm_mem_mask3(<8 x double>* %vp) {
; GENERIC-LABEL: test_8xdouble_perm_imm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpermpd {{.*#+}} zmm0 = mem[2,1,1,0,6,5,5,4] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_perm_imm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermpd {{.*#+}} zmm0 = mem[2,1,1,0,6,5,5,4] sched: [10:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec = load <8 x double>, <8 x double>* %vp
@@ -4670,14 +4670,14 @@ define <8 x double> @test_8xdouble_perm_imm_mem_mask3(<8 x double>* %vp) {
}
define <8 x double> @test_masked_8xdouble_perm_imm_mem_mask3(<8 x double>* %vp, <8 x double> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_8xdouble_perm_imm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpermpd {{.*#+}} zmm0 {%k1} = mem[2,1,1,0,6,5,5,4] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xdouble_perm_imm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermpd {{.*#+}} zmm0 {%k1} = mem[2,1,1,0,6,5,5,4] sched: [10:1.00]
@@ -4691,14 +4691,14 @@ define <8 x double> @test_masked_8xdouble_perm_imm_mem_mask3(<8 x double>* %vp,
define <8 x double> @test_masked_z_8xdouble_perm_imm_mem_mask3(<8 x double>* %vp, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_8xdouble_perm_imm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm1, %zmm0, %k1
; GENERIC-NEXT: vpermpd {{.*#+}} zmm0 {%k1} {z} = mem[2,1,1,0,6,5,5,4] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xdouble_perm_imm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm1, %zmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermpd {{.*#+}} zmm0 {%k1} {z} = mem[2,1,1,0,6,5,5,4] sched: [10:1.00]
@@ -4712,7 +4712,7 @@ define <8 x double> @test_masked_z_8xdouble_perm_imm_mem_mask3(<8 x double>* %vp
define <8 x double> @test_masked_8xdouble_perm_mem_mask4(<8 x double>* %vp, <8 x double> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_8xdouble_perm_mem_mask4:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovapd {{.*#+}} zmm2 = [1,1,3,5,6,0,6,0] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
@@ -4720,7 +4720,7 @@ define <8 x double> @test_masked_8xdouble_perm_mem_mask4(<8 x double>* %vp, <8 x
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xdouble_perm_mem_mask4:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovapd {{.*#+}} zmm2 = [1,1,3,5,6,0,6,0] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -4735,7 +4735,7 @@ define <8 x double> @test_masked_8xdouble_perm_mem_mask4(<8 x double>* %vp, <8 x
define <8 x double> @test_masked_z_8xdouble_perm_mem_mask4(<8 x double>* %vp, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_8xdouble_perm_mem_mask4:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovapd {{.*#+}} zmm1 = [1,1,3,5,6,0,6,0] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm2, %zmm0, %k1
@@ -4743,7 +4743,7 @@ define <8 x double> @test_masked_z_8xdouble_perm_mem_mask4(<8 x double>* %vp, <8
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xdouble_perm_mem_mask4:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovapd {{.*#+}} zmm1 = [1,1,3,5,6,0,6,0] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm2, %zmm0, %k1 # sched: [3:1.00]
@@ -4758,14 +4758,14 @@ define <8 x double> @test_masked_z_8xdouble_perm_mem_mask4(<8 x double>* %vp, <8
define <8 x double> @test_masked_8xdouble_perm_imm_mem_mask5(<8 x double>* %vp, <8 x double> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_8xdouble_perm_imm_mem_mask5:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpermpd {{.*#+}} zmm0 {%k1} = mem[2,2,2,3,6,6,6,7] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xdouble_perm_imm_mem_mask5:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermpd {{.*#+}} zmm0 {%k1} = mem[2,2,2,3,6,6,6,7] sched: [10:1.00]
@@ -4779,14 +4779,14 @@ define <8 x double> @test_masked_8xdouble_perm_imm_mem_mask5(<8 x double>* %vp,
define <8 x double> @test_masked_z_8xdouble_perm_imm_mem_mask5(<8 x double>* %vp, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_8xdouble_perm_imm_mem_mask5:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm1, %zmm0, %k1
; GENERIC-NEXT: vpermpd {{.*#+}} zmm0 {%k1} {z} = mem[2,2,2,3,6,6,6,7] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xdouble_perm_imm_mem_mask5:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm1, %zmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermpd {{.*#+}} zmm0 {%k1} {z} = mem[2,2,2,3,6,6,6,7] sched: [10:1.00]
@@ -4800,13 +4800,13 @@ define <8 x double> @test_masked_z_8xdouble_perm_imm_mem_mask5(<8 x double>* %vp
define <8 x double> @test_8xdouble_perm_mem_mask6(<8 x double>* %vp) {
; GENERIC-LABEL: test_8xdouble_perm_mem_mask6:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovaps {{.*#+}} zmm0 = [2,4,0,4,6,1,2,5] sched: [4:0.50]
; GENERIC-NEXT: vpermpd (%rdi), %zmm0, %zmm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_perm_mem_mask6:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps {{.*#+}} zmm0 = [2,4,0,4,6,1,2,5] sched: [8:0.50]
; SKX-NEXT: vpermpd (%rdi), %zmm0, %zmm0 # sched: [10:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -4816,7 +4816,7 @@ define <8 x double> @test_8xdouble_perm_mem_mask6(<8 x double>* %vp) {
}
define <8 x double> @test_masked_8xdouble_perm_mem_mask6(<8 x double>* %vp, <8 x double> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_8xdouble_perm_mem_mask6:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovapd {{.*#+}} zmm2 = [2,4,0,4,6,1,2,5] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
@@ -4824,7 +4824,7 @@ define <8 x double> @test_masked_8xdouble_perm_mem_mask6(<8 x double>* %vp, <8 x
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xdouble_perm_mem_mask6:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovapd {{.*#+}} zmm2 = [2,4,0,4,6,1,2,5] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -4839,7 +4839,7 @@ define <8 x double> @test_masked_8xdouble_perm_mem_mask6(<8 x double>* %vp, <8 x
define <8 x double> @test_masked_z_8xdouble_perm_mem_mask6(<8 x double>* %vp, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_8xdouble_perm_mem_mask6:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovapd {{.*#+}} zmm1 = [2,4,0,4,6,1,2,5] sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm2, %zmm0, %k1
@@ -4847,7 +4847,7 @@ define <8 x double> @test_masked_z_8xdouble_perm_mem_mask6(<8 x double>* %vp, <8
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xdouble_perm_mem_mask6:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovapd {{.*#+}} zmm1 = [2,4,0,4,6,1,2,5] sched: [8:0.50]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm2, %zmm0, %k1 # sched: [3:1.00]
@@ -4862,14 +4862,14 @@ define <8 x double> @test_masked_z_8xdouble_perm_mem_mask6(<8 x double>* %vp, <8
define <8 x double> @test_masked_8xdouble_perm_imm_mem_mask7(<8 x double>* %vp, <8 x double> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_8xdouble_perm_imm_mem_mask7:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpermpd {{.*#+}} zmm0 {%k1} = mem[0,3,2,0,4,7,6,4] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xdouble_perm_imm_mem_mask7:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermpd {{.*#+}} zmm0 {%k1} = mem[0,3,2,0,4,7,6,4] sched: [10:1.00]
@@ -4883,14 +4883,14 @@ define <8 x double> @test_masked_8xdouble_perm_imm_mem_mask7(<8 x double>* %vp,
define <8 x double> @test_masked_z_8xdouble_perm_imm_mem_mask7(<8 x double>* %vp, <8 x i64> %mask) {
; GENERIC-LABEL: test_masked_z_8xdouble_perm_imm_mem_mask7:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm1, %zmm0, %k1
; GENERIC-NEXT: vpermpd {{.*#+}} zmm0 {%k1} {z} = mem[0,3,2,0,4,7,6,4] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xdouble_perm_imm_mem_mask7:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm1, %zmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpermpd {{.*#+}} zmm0 {%k1} {z} = mem[0,3,2,0,4,7,6,4] sched: [10:1.00]
@@ -4904,12 +4904,12 @@ define <8 x double> @test_masked_z_8xdouble_perm_imm_mem_mask7(<8 x double>* %vp
define <16 x i8> @test_16xi8_perm_mask0(<16 x i8> %vec) {
; GENERIC-LABEL: test_16xi8_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8,6,12,4,7,9,14,8,4,12,9,4,14,15,12,14] sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xi8_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8,6,12,4,7,9,14,8,4,12,9,4,14,15,12,14] sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <16 x i8> %vec, <16 x i8> undef, <16 x i32> <i32 8, i32 6, i32 12, i32 4, i32 7, i32 9, i32 14, i32 8, i32 4, i32 12, i32 9, i32 4, i32 14, i32 15, i32 12, i32 14>
@@ -4917,7 +4917,7 @@ define <16 x i8> @test_16xi8_perm_mask0(<16 x i8> %vec) {
}
define <16 x i8> @test_masked_16xi8_perm_mask0(<16 x i8> %vec, <16 x i8> %vec2, <16 x i8> %mask) {
; GENERIC-LABEL: test_masked_16xi8_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %xmm3, %xmm2, %k1
; GENERIC-NEXT: vpshufb {{.*#+}} xmm1 {%k1} = xmm0[8,6,12,4,7,9,14,8,4,12,9,4,14,15,12,14] sched: [5:1.00]
@@ -4925,7 +4925,7 @@ define <16 x i8> @test_masked_16xi8_perm_mask0(<16 x i8> %vec, <16 x i8> %vec2,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xi8_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %xmm3, %xmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufb {{.*#+}} xmm1 {%k1} = xmm0[8,6,12,4,7,9,14,8,4,12,9,4,14,15,12,14] sched: [7:1.00]
@@ -4939,14 +4939,14 @@ define <16 x i8> @test_masked_16xi8_perm_mask0(<16 x i8> %vec, <16 x i8> %vec2,
define <16 x i8> @test_masked_z_16xi8_perm_mask0(<16 x i8> %vec, <16 x i8> %mask) {
; GENERIC-LABEL: test_masked_z_16xi8_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %xmm2, %xmm1, %k1
; GENERIC-NEXT: vpshufb {{.*#+}} xmm0 {%k1} {z} = xmm0[8,6,12,4,7,9,14,8,4,12,9,4,14,15,12,14] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xi8_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %xmm2, %xmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufb {{.*#+}} xmm0 {%k1} {z} = xmm0[8,6,12,4,7,9,14,8,4,12,9,4,14,15,12,14] sched: [7:1.00]
@@ -4958,7 +4958,7 @@ define <16 x i8> @test_masked_z_16xi8_perm_mask0(<16 x i8> %vec, <16 x i8> %mask
}
define <16 x i8> @test_masked_16xi8_perm_mask1(<16 x i8> %vec, <16 x i8> %vec2, <16 x i8> %mask) {
; GENERIC-LABEL: test_masked_16xi8_perm_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %xmm3, %xmm2, %k1
; GENERIC-NEXT: vpshufb {{.*#+}} xmm1 {%k1} = xmm0[4,11,14,10,7,1,6,9,14,15,7,13,4,12,8,0] sched: [5:1.00]
@@ -4966,7 +4966,7 @@ define <16 x i8> @test_masked_16xi8_perm_mask1(<16 x i8> %vec, <16 x i8> %vec2,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xi8_perm_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %xmm3, %xmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufb {{.*#+}} xmm1 {%k1} = xmm0[4,11,14,10,7,1,6,9,14,15,7,13,4,12,8,0] sched: [7:1.00]
@@ -4980,14 +4980,14 @@ define <16 x i8> @test_masked_16xi8_perm_mask1(<16 x i8> %vec, <16 x i8> %vec2,
define <16 x i8> @test_masked_z_16xi8_perm_mask1(<16 x i8> %vec, <16 x i8> %mask) {
; GENERIC-LABEL: test_masked_z_16xi8_perm_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %xmm2, %xmm1, %k1
; GENERIC-NEXT: vpshufb {{.*#+}} xmm0 {%k1} {z} = xmm0[4,11,14,10,7,1,6,9,14,15,7,13,4,12,8,0] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xi8_perm_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %xmm2, %xmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufb {{.*#+}} xmm0 {%k1} {z} = xmm0[4,11,14,10,7,1,6,9,14,15,7,13,4,12,8,0] sched: [7:1.00]
@@ -4999,7 +4999,7 @@ define <16 x i8> @test_masked_z_16xi8_perm_mask1(<16 x i8> %vec, <16 x i8> %mask
}
define <16 x i8> @test_masked_16xi8_perm_mask2(<16 x i8> %vec, <16 x i8> %vec2, <16 x i8> %mask) {
; GENERIC-LABEL: test_masked_16xi8_perm_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %xmm3, %xmm2, %k1
; GENERIC-NEXT: vpshufb {{.*#+}} xmm1 {%k1} = xmm0[11,6,13,10,0,7,13,3,5,13,3,9,3,15,12,7] sched: [5:1.00]
@@ -5007,7 +5007,7 @@ define <16 x i8> @test_masked_16xi8_perm_mask2(<16 x i8> %vec, <16 x i8> %vec2,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xi8_perm_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %xmm3, %xmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufb {{.*#+}} xmm1 {%k1} = xmm0[11,6,13,10,0,7,13,3,5,13,3,9,3,15,12,7] sched: [7:1.00]
@@ -5021,14 +5021,14 @@ define <16 x i8> @test_masked_16xi8_perm_mask2(<16 x i8> %vec, <16 x i8> %vec2,
define <16 x i8> @test_masked_z_16xi8_perm_mask2(<16 x i8> %vec, <16 x i8> %mask) {
; GENERIC-LABEL: test_masked_z_16xi8_perm_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %xmm2, %xmm1, %k1
; GENERIC-NEXT: vpshufb {{.*#+}} xmm0 {%k1} {z} = xmm0[11,6,13,10,0,7,13,3,5,13,3,9,3,15,12,7] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xi8_perm_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %xmm2, %xmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufb {{.*#+}} xmm0 {%k1} {z} = xmm0[11,6,13,10,0,7,13,3,5,13,3,9,3,15,12,7] sched: [7:1.00]
@@ -5040,12 +5040,12 @@ define <16 x i8> @test_masked_z_16xi8_perm_mask2(<16 x i8> %vec, <16 x i8> %mask
}
define <16 x i8> @test_16xi8_perm_mask3(<16 x i8> %vec) {
; GENERIC-LABEL: test_16xi8_perm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[1,5,8,14,1,8,11,8,13,8,15,9,9,7,9,6] sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xi8_perm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[1,5,8,14,1,8,11,8,13,8,15,9,9,7,9,6] sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <16 x i8> %vec, <16 x i8> undef, <16 x i32> <i32 1, i32 5, i32 8, i32 14, i32 1, i32 8, i32 11, i32 8, i32 13, i32 8, i32 15, i32 9, i32 9, i32 7, i32 9, i32 6>
@@ -5053,7 +5053,7 @@ define <16 x i8> @test_16xi8_perm_mask3(<16 x i8> %vec) {
}
define <16 x i8> @test_masked_16xi8_perm_mask3(<16 x i8> %vec, <16 x i8> %vec2, <16 x i8> %mask) {
; GENERIC-LABEL: test_masked_16xi8_perm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %xmm3, %xmm2, %k1
; GENERIC-NEXT: vpshufb {{.*#+}} xmm1 {%k1} = xmm0[1,5,8,14,1,8,11,8,13,8,15,9,9,7,9,6] sched: [5:1.00]
@@ -5061,7 +5061,7 @@ define <16 x i8> @test_masked_16xi8_perm_mask3(<16 x i8> %vec, <16 x i8> %vec2,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xi8_perm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %xmm3, %xmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufb {{.*#+}} xmm1 {%k1} = xmm0[1,5,8,14,1,8,11,8,13,8,15,9,9,7,9,6] sched: [7:1.00]
@@ -5075,14 +5075,14 @@ define <16 x i8> @test_masked_16xi8_perm_mask3(<16 x i8> %vec, <16 x i8> %vec2,
define <16 x i8> @test_masked_z_16xi8_perm_mask3(<16 x i8> %vec, <16 x i8> %mask) {
; GENERIC-LABEL: test_masked_z_16xi8_perm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %xmm2, %xmm1, %k1
; GENERIC-NEXT: vpshufb {{.*#+}} xmm0 {%k1} {z} = xmm0[1,5,8,14,1,8,11,8,13,8,15,9,9,7,9,6] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xi8_perm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %xmm2, %xmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufb {{.*#+}} xmm0 {%k1} {z} = xmm0[1,5,8,14,1,8,11,8,13,8,15,9,9,7,9,6] sched: [7:1.00]
@@ -5094,13 +5094,13 @@ define <16 x i8> @test_masked_z_16xi8_perm_mask3(<16 x i8> %vec, <16 x i8> %mask
}
define <16 x i8> @test_16xi8_perm_mem_mask0(<16 x i8>* %vp) {
; GENERIC-LABEL: test_16xi8_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa (%rdi), %xmm0 # sched: [6:0.50]
; GENERIC-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[9,10,7,1,12,14,14,13,14,14,8,6,11,4,12,13] sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xi8_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa (%rdi), %xmm0 # sched: [6:0.50]
; SKX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[9,10,7,1,12,14,14,13,14,14,8,6,11,4,12,13] sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -5110,7 +5110,7 @@ define <16 x i8> @test_16xi8_perm_mem_mask0(<16 x i8>* %vp) {
}
define <16 x i8> @test_masked_16xi8_perm_mem_mask0(<16 x i8>* %vp, <16 x i8> %vec2, <16 x i8> %mask) {
; GENERIC-LABEL: test_masked_16xi8_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa (%rdi), %xmm2 # sched: [6:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %xmm3, %xmm1, %k1
@@ -5118,7 +5118,7 @@ define <16 x i8> @test_masked_16xi8_perm_mem_mask0(<16 x i8>* %vp, <16 x i8> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xi8_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa (%rdi), %xmm2 # sched: [6:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %xmm3, %xmm1, %k1 # sched: [3:1.00]
@@ -5133,7 +5133,7 @@ define <16 x i8> @test_masked_16xi8_perm_mem_mask0(<16 x i8>* %vp, <16 x i8> %ve
define <16 x i8> @test_masked_z_16xi8_perm_mem_mask0(<16 x i8>* %vp, <16 x i8> %mask) {
; GENERIC-LABEL: test_masked_z_16xi8_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa (%rdi), %xmm1 # sched: [6:0.50]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %xmm2, %xmm0, %k1
@@ -5141,7 +5141,7 @@ define <16 x i8> @test_masked_z_16xi8_perm_mem_mask0(<16 x i8>* %vp, <16 x i8> %
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xi8_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa (%rdi), %xmm1 # sched: [6:0.50]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %xmm2, %xmm0, %k1 # sched: [3:1.00]
@@ -5156,7 +5156,7 @@ define <16 x i8> @test_masked_z_16xi8_perm_mem_mask0(<16 x i8>* %vp, <16 x i8> %
define <16 x i8> @test_masked_16xi8_perm_mem_mask1(<16 x i8>* %vp, <16 x i8> %vec2, <16 x i8> %mask) {
; GENERIC-LABEL: test_masked_16xi8_perm_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa (%rdi), %xmm2 # sched: [6:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %xmm3, %xmm1, %k1
@@ -5164,7 +5164,7 @@ define <16 x i8> @test_masked_16xi8_perm_mem_mask1(<16 x i8>* %vp, <16 x i8> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xi8_perm_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa (%rdi), %xmm2 # sched: [6:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %xmm3, %xmm1, %k1 # sched: [3:1.00]
@@ -5179,7 +5179,7 @@ define <16 x i8> @test_masked_16xi8_perm_mem_mask1(<16 x i8>* %vp, <16 x i8> %ve
define <16 x i8> @test_masked_z_16xi8_perm_mem_mask1(<16 x i8>* %vp, <16 x i8> %mask) {
; GENERIC-LABEL: test_masked_z_16xi8_perm_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa (%rdi), %xmm1 # sched: [6:0.50]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %xmm2, %xmm0, %k1
@@ -5187,7 +5187,7 @@ define <16 x i8> @test_masked_z_16xi8_perm_mem_mask1(<16 x i8>* %vp, <16 x i8> %
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xi8_perm_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa (%rdi), %xmm1 # sched: [6:0.50]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %xmm2, %xmm0, %k1 # sched: [3:1.00]
@@ -5202,7 +5202,7 @@ define <16 x i8> @test_masked_z_16xi8_perm_mem_mask1(<16 x i8>* %vp, <16 x i8> %
define <16 x i8> @test_masked_16xi8_perm_mem_mask2(<16 x i8>* %vp, <16 x i8> %vec2, <16 x i8> %mask) {
; GENERIC-LABEL: test_masked_16xi8_perm_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa (%rdi), %xmm2 # sched: [6:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %xmm3, %xmm1, %k1
@@ -5210,7 +5210,7 @@ define <16 x i8> @test_masked_16xi8_perm_mem_mask2(<16 x i8>* %vp, <16 x i8> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xi8_perm_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa (%rdi), %xmm2 # sched: [6:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %xmm3, %xmm1, %k1 # sched: [3:1.00]
@@ -5225,7 +5225,7 @@ define <16 x i8> @test_masked_16xi8_perm_mem_mask2(<16 x i8>* %vp, <16 x i8> %ve
define <16 x i8> @test_masked_z_16xi8_perm_mem_mask2(<16 x i8>* %vp, <16 x i8> %mask) {
; GENERIC-LABEL: test_masked_z_16xi8_perm_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa (%rdi), %xmm1 # sched: [6:0.50]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %xmm2, %xmm0, %k1
@@ -5233,7 +5233,7 @@ define <16 x i8> @test_masked_z_16xi8_perm_mem_mask2(<16 x i8>* %vp, <16 x i8> %
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xi8_perm_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa (%rdi), %xmm1 # sched: [6:0.50]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %xmm2, %xmm0, %k1 # sched: [3:1.00]
@@ -5248,13 +5248,13 @@ define <16 x i8> @test_masked_z_16xi8_perm_mem_mask2(<16 x i8>* %vp, <16 x i8> %
define <16 x i8> @test_16xi8_perm_mem_mask3(<16 x i8>* %vp) {
; GENERIC-LABEL: test_16xi8_perm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa (%rdi), %xmm0 # sched: [6:0.50]
; GENERIC-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[9,6,5,15,0,0,15,2,1,3,12,14,0,6,1,4] sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xi8_perm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa (%rdi), %xmm0 # sched: [6:0.50]
; SKX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[9,6,5,15,0,0,15,2,1,3,12,14,0,6,1,4] sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -5264,7 +5264,7 @@ define <16 x i8> @test_16xi8_perm_mem_mask3(<16 x i8>* %vp) {
}
define <16 x i8> @test_masked_16xi8_perm_mem_mask3(<16 x i8>* %vp, <16 x i8> %vec2, <16 x i8> %mask) {
; GENERIC-LABEL: test_masked_16xi8_perm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa (%rdi), %xmm2 # sched: [6:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %xmm3, %xmm1, %k1
@@ -5272,7 +5272,7 @@ define <16 x i8> @test_masked_16xi8_perm_mem_mask3(<16 x i8>* %vp, <16 x i8> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xi8_perm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa (%rdi), %xmm2 # sched: [6:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %xmm3, %xmm1, %k1 # sched: [3:1.00]
@@ -5287,7 +5287,7 @@ define <16 x i8> @test_masked_16xi8_perm_mem_mask3(<16 x i8>* %vp, <16 x i8> %ve
define <16 x i8> @test_masked_z_16xi8_perm_mem_mask3(<16 x i8>* %vp, <16 x i8> %mask) {
; GENERIC-LABEL: test_masked_z_16xi8_perm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa (%rdi), %xmm1 # sched: [6:0.50]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %xmm2, %xmm0, %k1
@@ -5295,7 +5295,7 @@ define <16 x i8> @test_masked_z_16xi8_perm_mem_mask3(<16 x i8>* %vp, <16 x i8> %
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xi8_perm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa (%rdi), %xmm1 # sched: [6:0.50]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %xmm2, %xmm0, %k1 # sched: [3:1.00]
@@ -5310,12 +5310,12 @@ define <16 x i8> @test_masked_z_16xi8_perm_mem_mask3(<16 x i8>* %vp, <16 x i8> %
define <32 x i8> @test_32xi8_perm_mask0(<32 x i8> %vec) {
; GENERIC-LABEL: test_32xi8_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[8,0,1,15,3,5,11,13,14,2,10,15,0,10,13,5,20,25,23,18,23,22,25,24,20,21,29,20,24,16,27,21] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_32xi8_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[8,0,1,15,3,5,11,13,14,2,10,15,0,10,13,5,20,25,23,18,23,22,25,24,20,21,29,20,24,16,27,21] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <32 x i8> %vec, <32 x i8> undef, <32 x i32> <i32 8, i32 0, i32 1, i32 15, i32 3, i32 5, i32 11, i32 13, i32 14, i32 2, i32 10, i32 15, i32 0, i32 10, i32 13, i32 5, i32 20, i32 25, i32 23, i32 18, i32 23, i32 22, i32 25, i32 24, i32 20, i32 21, i32 29, i32 20, i32 24, i32 16, i32 27, i32 21>
@@ -5323,7 +5323,7 @@ define <32 x i8> @test_32xi8_perm_mask0(<32 x i8> %vec) {
}
define <32 x i8> @test_masked_32xi8_perm_mask0(<32 x i8> %vec, <32 x i8> %vec2, <32 x i8> %mask) {
; GENERIC-LABEL: test_masked_32xi8_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %ymm3, %ymm2, %k1
; GENERIC-NEXT: vpshufb {{.*#+}} ymm1 {%k1} = ymm0[8,0,1,15,3,5,11,13,14,2,10,15,0,10,13,5,20,25,23,18,23,22,25,24,20,21,29,20,24,16,27,21] sched: [5:1.00]
@@ -5331,7 +5331,7 @@ define <32 x i8> @test_masked_32xi8_perm_mask0(<32 x i8> %vec, <32 x i8> %vec2,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_32xi8_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufb {{.*#+}} ymm1 {%k1} = ymm0[8,0,1,15,3,5,11,13,14,2,10,15,0,10,13,5,20,25,23,18,23,22,25,24,20,21,29,20,24,16,27,21] sched: [8:1.00]
@@ -5345,14 +5345,14 @@ define <32 x i8> @test_masked_32xi8_perm_mask0(<32 x i8> %vec, <32 x i8> %vec2,
define <32 x i8> @test_masked_z_32xi8_perm_mask0(<32 x i8> %vec, <32 x i8> %mask) {
; GENERIC-LABEL: test_masked_z_32xi8_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %ymm2, %ymm1, %k1
; GENERIC-NEXT: vpshufb {{.*#+}} ymm0 {%k1} {z} = ymm0[8,0,1,15,3,5,11,13,14,2,10,15,0,10,13,5,20,25,23,18,23,22,25,24,20,21,29,20,24,16,27,21] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_32xi8_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufb {{.*#+}} ymm0 {%k1} {z} = ymm0[8,0,1,15,3,5,11,13,14,2,10,15,0,10,13,5,20,25,23,18,23,22,25,24,20,21,29,20,24,16,27,21] sched: [8:1.00]
@@ -5364,7 +5364,7 @@ define <32 x i8> @test_masked_z_32xi8_perm_mask0(<32 x i8> %vec, <32 x i8> %mask
}
define <32 x i8> @test_masked_32xi8_perm_mask1(<32 x i8> %vec, <32 x i8> %vec2, <32 x i8> %mask) {
; GENERIC-LABEL: test_masked_32xi8_perm_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %ymm3, %ymm2, %k1
; GENERIC-NEXT: vpshufb {{.*#+}} ymm1 {%k1} = ymm0[0,4,3,15,5,4,5,15,10,9,11,6,6,10,0,3,21,19,26,22,30,25,22,22,27,22,26,16,23,20,18,24] sched: [5:1.00]
@@ -5372,7 +5372,7 @@ define <32 x i8> @test_masked_32xi8_perm_mask1(<32 x i8> %vec, <32 x i8> %vec2,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_32xi8_perm_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufb {{.*#+}} ymm1 {%k1} = ymm0[0,4,3,15,5,4,5,15,10,9,11,6,6,10,0,3,21,19,26,22,30,25,22,22,27,22,26,16,23,20,18,24] sched: [8:1.00]
@@ -5386,14 +5386,14 @@ define <32 x i8> @test_masked_32xi8_perm_mask1(<32 x i8> %vec, <32 x i8> %vec2,
define <32 x i8> @test_masked_z_32xi8_perm_mask1(<32 x i8> %vec, <32 x i8> %mask) {
; GENERIC-LABEL: test_masked_z_32xi8_perm_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %ymm2, %ymm1, %k1
; GENERIC-NEXT: vpshufb {{.*#+}} ymm0 {%k1} {z} = ymm0[0,4,3,15,5,4,5,15,10,9,11,6,6,10,0,3,21,19,26,22,30,25,22,22,27,22,26,16,23,20,18,24] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_32xi8_perm_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufb {{.*#+}} ymm0 {%k1} {z} = ymm0[0,4,3,15,5,4,5,15,10,9,11,6,6,10,0,3,21,19,26,22,30,25,22,22,27,22,26,16,23,20,18,24] sched: [8:1.00]
@@ -5405,7 +5405,7 @@ define <32 x i8> @test_masked_z_32xi8_perm_mask1(<32 x i8> %vec, <32 x i8> %mask
}
define <32 x i8> @test_masked_32xi8_perm_mask2(<32 x i8> %vec, <32 x i8> %vec2, <32 x i8> %mask) {
; GENERIC-LABEL: test_masked_32xi8_perm_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %ymm3, %ymm2, %k1
; GENERIC-NEXT: vpshufb {{.*#+}} ymm1 {%k1} = ymm0[7,8,12,14,7,4,7,12,14,12,3,15,10,1,11,15,22,26,21,19,27,16,29,24,17,17,26,29,20,31,17,29] sched: [5:1.00]
@@ -5413,7 +5413,7 @@ define <32 x i8> @test_masked_32xi8_perm_mask2(<32 x i8> %vec, <32 x i8> %vec2,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_32xi8_perm_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufb {{.*#+}} ymm1 {%k1} = ymm0[7,8,12,14,7,4,7,12,14,12,3,15,10,1,11,15,22,26,21,19,27,16,29,24,17,17,26,29,20,31,17,29] sched: [8:1.00]
@@ -5427,14 +5427,14 @@ define <32 x i8> @test_masked_32xi8_perm_mask2(<32 x i8> %vec, <32 x i8> %vec2,
define <32 x i8> @test_masked_z_32xi8_perm_mask2(<32 x i8> %vec, <32 x i8> %mask) {
; GENERIC-LABEL: test_masked_z_32xi8_perm_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %ymm2, %ymm1, %k1
; GENERIC-NEXT: vpshufb {{.*#+}} ymm0 {%k1} {z} = ymm0[7,8,12,14,7,4,7,12,14,12,3,15,10,1,11,15,22,26,21,19,27,16,29,24,17,17,26,29,20,31,17,29] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_32xi8_perm_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufb {{.*#+}} ymm0 {%k1} {z} = ymm0[7,8,12,14,7,4,7,12,14,12,3,15,10,1,11,15,22,26,21,19,27,16,29,24,17,17,26,29,20,31,17,29] sched: [8:1.00]
@@ -5446,12 +5446,12 @@ define <32 x i8> @test_masked_z_32xi8_perm_mask2(<32 x i8> %vec, <32 x i8> %mask
}
define <32 x i8> @test_32xi8_perm_mask3(<32 x i8> %vec) {
; GENERIC-LABEL: test_32xi8_perm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[6,1,4,7,12,13,2,8,10,5,13,4,0,0,10,8,31,31,30,16,27,27,26,27,30,26,21,24,19,25,16,18] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_32xi8_perm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[6,1,4,7,12,13,2,8,10,5,13,4,0,0,10,8,31,31,30,16,27,27,26,27,30,26,21,24,19,25,16,18] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <32 x i8> %vec, <32 x i8> undef, <32 x i32> <i32 6, i32 1, i32 4, i32 7, i32 12, i32 13, i32 2, i32 8, i32 10, i32 5, i32 13, i32 4, i32 0, i32 0, i32 10, i32 8, i32 31, i32 31, i32 30, i32 16, i32 27, i32 27, i32 26, i32 27, i32 30, i32 26, i32 21, i32 24, i32 19, i32 25, i32 16, i32 18>
@@ -5459,7 +5459,7 @@ define <32 x i8> @test_32xi8_perm_mask3(<32 x i8> %vec) {
}
define <32 x i8> @test_masked_32xi8_perm_mask3(<32 x i8> %vec, <32 x i8> %vec2, <32 x i8> %mask) {
; GENERIC-LABEL: test_masked_32xi8_perm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %ymm3, %ymm2, %k1
; GENERIC-NEXT: vpshufb {{.*#+}} ymm1 {%k1} = ymm0[6,1,4,7,12,13,2,8,10,5,13,4,0,0,10,8,31,31,30,16,27,27,26,27,30,26,21,24,19,25,16,18] sched: [5:1.00]
@@ -5467,7 +5467,7 @@ define <32 x i8> @test_masked_32xi8_perm_mask3(<32 x i8> %vec, <32 x i8> %vec2,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_32xi8_perm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufb {{.*#+}} ymm1 {%k1} = ymm0[6,1,4,7,12,13,2,8,10,5,13,4,0,0,10,8,31,31,30,16,27,27,26,27,30,26,21,24,19,25,16,18] sched: [8:1.00]
@@ -5481,14 +5481,14 @@ define <32 x i8> @test_masked_32xi8_perm_mask3(<32 x i8> %vec, <32 x i8> %vec2,
define <32 x i8> @test_masked_z_32xi8_perm_mask3(<32 x i8> %vec, <32 x i8> %mask) {
; GENERIC-LABEL: test_masked_z_32xi8_perm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %ymm2, %ymm1, %k1
; GENERIC-NEXT: vpshufb {{.*#+}} ymm0 {%k1} {z} = ymm0[6,1,4,7,12,13,2,8,10,5,13,4,0,0,10,8,31,31,30,16,27,27,26,27,30,26,21,24,19,25,16,18] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_32xi8_perm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufb {{.*#+}} ymm0 {%k1} {z} = ymm0[6,1,4,7,12,13,2,8,10,5,13,4,0,0,10,8,31,31,30,16,27,27,26,27,30,26,21,24,19,25,16,18] sched: [8:1.00]
@@ -5500,13 +5500,13 @@ define <32 x i8> @test_masked_z_32xi8_perm_mask3(<32 x i8> %vec, <32 x i8> %mask
}
define <32 x i8> @test_32xi8_perm_mem_mask0(<32 x i8>* %vp) {
; GENERIC-LABEL: test_32xi8_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa (%rdi), %ymm0 # sched: [7:0.50]
; GENERIC-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[9,0,2,15,4,6,8,4,7,3,0,2,8,1,6,5,22,17,30,23,29,31,21,23,27,22,20,27,30,30,26,22] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_32xi8_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa (%rdi), %ymm0 # sched: [7:0.50]
; SKX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[9,0,2,15,4,6,8,4,7,3,0,2,8,1,6,5,22,17,30,23,29,31,21,23,27,22,20,27,30,30,26,22] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -5516,7 +5516,7 @@ define <32 x i8> @test_32xi8_perm_mem_mask0(<32 x i8>* %vp) {
}
define <32 x i8> @test_masked_32xi8_perm_mem_mask0(<32 x i8>* %vp, <32 x i8> %vec2, <32 x i8> %mask) {
; GENERIC-LABEL: test_masked_32xi8_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa (%rdi), %ymm2 # sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %ymm3, %ymm1, %k1
@@ -5524,7 +5524,7 @@ define <32 x i8> @test_masked_32xi8_perm_mem_mask0(<32 x i8>* %vp, <32 x i8> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_32xi8_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa (%rdi), %ymm2 # sched: [7:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %ymm3, %ymm1, %k1 # sched: [3:1.00]
@@ -5539,7 +5539,7 @@ define <32 x i8> @test_masked_32xi8_perm_mem_mask0(<32 x i8>* %vp, <32 x i8> %ve
define <32 x i8> @test_masked_z_32xi8_perm_mem_mask0(<32 x i8>* %vp, <32 x i8> %mask) {
; GENERIC-LABEL: test_masked_z_32xi8_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa (%rdi), %ymm1 # sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %ymm2, %ymm0, %k1
@@ -5547,7 +5547,7 @@ define <32 x i8> @test_masked_z_32xi8_perm_mem_mask0(<32 x i8>* %vp, <32 x i8> %
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_32xi8_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa (%rdi), %ymm1 # sched: [7:0.50]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %ymm2, %ymm0, %k1 # sched: [3:1.00]
@@ -5562,7 +5562,7 @@ define <32 x i8> @test_masked_z_32xi8_perm_mem_mask0(<32 x i8>* %vp, <32 x i8> %
define <32 x i8> @test_masked_32xi8_perm_mem_mask1(<32 x i8>* %vp, <32 x i8> %vec2, <32 x i8> %mask) {
; GENERIC-LABEL: test_masked_32xi8_perm_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa (%rdi), %ymm2 # sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %ymm3, %ymm1, %k1
@@ -5570,7 +5570,7 @@ define <32 x i8> @test_masked_32xi8_perm_mem_mask1(<32 x i8>* %vp, <32 x i8> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_32xi8_perm_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa (%rdi), %ymm2 # sched: [7:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %ymm3, %ymm1, %k1 # sched: [3:1.00]
@@ -5585,7 +5585,7 @@ define <32 x i8> @test_masked_32xi8_perm_mem_mask1(<32 x i8>* %vp, <32 x i8> %ve
define <32 x i8> @test_masked_z_32xi8_perm_mem_mask1(<32 x i8>* %vp, <32 x i8> %mask) {
; GENERIC-LABEL: test_masked_z_32xi8_perm_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa (%rdi), %ymm1 # sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %ymm2, %ymm0, %k1
@@ -5593,7 +5593,7 @@ define <32 x i8> @test_masked_z_32xi8_perm_mem_mask1(<32 x i8>* %vp, <32 x i8> %
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_32xi8_perm_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa (%rdi), %ymm1 # sched: [7:0.50]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %ymm2, %ymm0, %k1 # sched: [3:1.00]
@@ -5608,7 +5608,7 @@ define <32 x i8> @test_masked_z_32xi8_perm_mem_mask1(<32 x i8>* %vp, <32 x i8> %
define <32 x i8> @test_masked_32xi8_perm_mem_mask2(<32 x i8>* %vp, <32 x i8> %vec2, <32 x i8> %mask) {
; GENERIC-LABEL: test_masked_32xi8_perm_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa (%rdi), %ymm2 # sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %ymm3, %ymm1, %k1
@@ -5616,7 +5616,7 @@ define <32 x i8> @test_masked_32xi8_perm_mem_mask2(<32 x i8>* %vp, <32 x i8> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_32xi8_perm_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa (%rdi), %ymm2 # sched: [7:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %ymm3, %ymm1, %k1 # sched: [3:1.00]
@@ -5631,7 +5631,7 @@ define <32 x i8> @test_masked_32xi8_perm_mem_mask2(<32 x i8>* %vp, <32 x i8> %ve
define <32 x i8> @test_masked_z_32xi8_perm_mem_mask2(<32 x i8>* %vp, <32 x i8> %mask) {
; GENERIC-LABEL: test_masked_z_32xi8_perm_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa (%rdi), %ymm1 # sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %ymm2, %ymm0, %k1
@@ -5639,7 +5639,7 @@ define <32 x i8> @test_masked_z_32xi8_perm_mem_mask2(<32 x i8>* %vp, <32 x i8> %
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_32xi8_perm_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa (%rdi), %ymm1 # sched: [7:0.50]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %ymm2, %ymm0, %k1 # sched: [3:1.00]
@@ -5654,13 +5654,13 @@ define <32 x i8> @test_masked_z_32xi8_perm_mem_mask2(<32 x i8>* %vp, <32 x i8> %
define <32 x i8> @test_32xi8_perm_mem_mask3(<32 x i8>* %vp) {
; GENERIC-LABEL: test_32xi8_perm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa (%rdi), %ymm0 # sched: [7:0.50]
; GENERIC-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[1,1,13,0,3,0,0,13,5,2,2,10,15,8,14,8,25,26,28,28,31,27,30,19,24,25,29,23,28,22,25,29] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_32xi8_perm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa (%rdi), %ymm0 # sched: [7:0.50]
; SKX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[1,1,13,0,3,0,0,13,5,2,2,10,15,8,14,8,25,26,28,28,31,27,30,19,24,25,29,23,28,22,25,29] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -5670,7 +5670,7 @@ define <32 x i8> @test_32xi8_perm_mem_mask3(<32 x i8>* %vp) {
}
define <32 x i8> @test_masked_32xi8_perm_mem_mask3(<32 x i8>* %vp, <32 x i8> %vec2, <32 x i8> %mask) {
; GENERIC-LABEL: test_masked_32xi8_perm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa (%rdi), %ymm2 # sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %ymm3, %ymm1, %k1
@@ -5678,7 +5678,7 @@ define <32 x i8> @test_masked_32xi8_perm_mem_mask3(<32 x i8>* %vp, <32 x i8> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_32xi8_perm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa (%rdi), %ymm2 # sched: [7:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %ymm3, %ymm1, %k1 # sched: [3:1.00]
@@ -5693,7 +5693,7 @@ define <32 x i8> @test_masked_32xi8_perm_mem_mask3(<32 x i8>* %vp, <32 x i8> %ve
define <32 x i8> @test_masked_z_32xi8_perm_mem_mask3(<32 x i8>* %vp, <32 x i8> %mask) {
; GENERIC-LABEL: test_masked_z_32xi8_perm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa (%rdi), %ymm1 # sched: [7:0.50]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %ymm2, %ymm0, %k1
@@ -5701,7 +5701,7 @@ define <32 x i8> @test_masked_z_32xi8_perm_mem_mask3(<32 x i8>* %vp, <32 x i8> %
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_32xi8_perm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa (%rdi), %ymm1 # sched: [7:0.50]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %ymm2, %ymm0, %k1 # sched: [3:1.00]
@@ -5716,12 +5716,12 @@ define <32 x i8> @test_masked_z_32xi8_perm_mem_mask3(<32 x i8>* %vp, <32 x i8> %
define <64 x i8> @test_64xi8_perm_mask0(<64 x i8> %vec) {
; GENERIC-LABEL: test_64xi8_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[8,4,1,13,15,4,6,12,0,10,2,4,13,0,0,6,23,29,27,26,18,31,22,25,22,16,23,18,16,25,26,17,40,37,38,44,39,46,41,39,42,37,33,42,41,44,34,46,60,62,61,58,60,56,60,51,60,55,60,55,60,49,48,62] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_64xi8_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[8,4,1,13,15,4,6,12,0,10,2,4,13,0,0,6,23,29,27,26,18,31,22,25,22,16,23,18,16,25,26,17,40,37,38,44,39,46,41,39,42,37,33,42,41,44,34,46,60,62,61,58,60,56,60,51,60,55,60,55,60,49,48,62] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <64 x i8> %vec, <64 x i8> undef, <64 x i32> <i32 8, i32 4, i32 1, i32 13, i32 15, i32 4, i32 6, i32 12, i32 0, i32 10, i32 2, i32 4, i32 13, i32 0, i32 0, i32 6, i32 23, i32 29, i32 27, i32 26, i32 18, i32 31, i32 22, i32 25, i32 22, i32 16, i32 23, i32 18, i32 16, i32 25, i32 26, i32 17, i32 40, i32 37, i32 38, i32 44, i32 39, i32 46, i32 41, i32 39, i32 42, i32 37, i32 33, i32 42, i32 41, i32 44, i32 34, i32 46, i32 60, i32 62, i32 61, i32 58, i32 60, i32 56, i32 60, i32 51, i32 60, i32 55, i32 60, i32 55, i32 60, i32 49, i32 48, i32 62>
@@ -5729,7 +5729,7 @@ define <64 x i8> @test_64xi8_perm_mask0(<64 x i8> %vec) {
}
define <64 x i8> @test_masked_64xi8_perm_mask0(<64 x i8> %vec, <64 x i8> %vec2, <64 x i8> %mask) {
; GENERIC-LABEL: test_masked_64xi8_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %zmm3, %zmm2, %k1
; GENERIC-NEXT: vpshufb {{.*#+}} zmm1 {%k1} = zmm0[8,4,1,13,15,4,6,12,0,10,2,4,13,0,0,6,23,29,27,26,18,31,22,25,22,16,23,18,16,25,26,17,40,37,38,44,39,46,41,39,42,37,33,42,41,44,34,46,60,62,61,58,60,56,60,51,60,55,60,55,60,49,48,62] sched: [5:1.00]
@@ -5737,7 +5737,7 @@ define <64 x i8> @test_masked_64xi8_perm_mask0(<64 x i8> %vec, <64 x i8> %vec2,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_64xi8_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufb {{.*#+}} zmm1 {%k1} = zmm0[8,4,1,13,15,4,6,12,0,10,2,4,13,0,0,6,23,29,27,26,18,31,22,25,22,16,23,18,16,25,26,17,40,37,38,44,39,46,41,39,42,37,33,42,41,44,34,46,60,62,61,58,60,56,60,51,60,55,60,55,60,49,48,62] sched: [8:1.00]
@@ -5751,14 +5751,14 @@ define <64 x i8> @test_masked_64xi8_perm_mask0(<64 x i8> %vec, <64 x i8> %vec2,
define <64 x i8> @test_masked_z_64xi8_perm_mask0(<64 x i8> %vec, <64 x i8> %mask) {
; GENERIC-LABEL: test_masked_z_64xi8_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpshufb {{.*#+}} zmm0 {%k1} {z} = zmm0[8,4,1,13,15,4,6,12,0,10,2,4,13,0,0,6,23,29,27,26,18,31,22,25,22,16,23,18,16,25,26,17,40,37,38,44,39,46,41,39,42,37,33,42,41,44,34,46,60,62,61,58,60,56,60,51,60,55,60,55,60,49,48,62] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_64xi8_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufb {{.*#+}} zmm0 {%k1} {z} = zmm0[8,4,1,13,15,4,6,12,0,10,2,4,13,0,0,6,23,29,27,26,18,31,22,25,22,16,23,18,16,25,26,17,40,37,38,44,39,46,41,39,42,37,33,42,41,44,34,46,60,62,61,58,60,56,60,51,60,55,60,55,60,49,48,62] sched: [8:1.00]
@@ -5770,7 +5770,7 @@ define <64 x i8> @test_masked_z_64xi8_perm_mask0(<64 x i8> %vec, <64 x i8> %mask
}
define <64 x i8> @test_masked_64xi8_perm_mask1(<64 x i8> %vec, <64 x i8> %vec2, <64 x i8> %mask) {
; GENERIC-LABEL: test_masked_64xi8_perm_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %zmm3, %zmm2, %k1
; GENERIC-NEXT: vpshufb {{.*#+}} zmm1 {%k1} = zmm0[7,14,15,10,9,3,1,13,14,12,11,6,4,1,6,9,30,30,22,17,28,27,16,23,26,16,30,31,27,17,17,21,32,37,32,47,45,33,46,35,35,42,47,33,32,37,32,41,61,50,49,53,63,50,63,53,55,52,62,63,58,50,63,49] sched: [5:1.00]
@@ -5778,7 +5778,7 @@ define <64 x i8> @test_masked_64xi8_perm_mask1(<64 x i8> %vec, <64 x i8> %vec2,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_64xi8_perm_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufb {{.*#+}} zmm1 {%k1} = zmm0[7,14,15,10,9,3,1,13,14,12,11,6,4,1,6,9,30,30,22,17,28,27,16,23,26,16,30,31,27,17,17,21,32,37,32,47,45,33,46,35,35,42,47,33,32,37,32,41,61,50,49,53,63,50,63,53,55,52,62,63,58,50,63,49] sched: [8:1.00]
@@ -5792,14 +5792,14 @@ define <64 x i8> @test_masked_64xi8_perm_mask1(<64 x i8> %vec, <64 x i8> %vec2,
define <64 x i8> @test_masked_z_64xi8_perm_mask1(<64 x i8> %vec, <64 x i8> %mask) {
; GENERIC-LABEL: test_masked_z_64xi8_perm_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpshufb {{.*#+}} zmm0 {%k1} {z} = zmm0[7,14,15,10,9,3,1,13,14,12,11,6,4,1,6,9,30,30,22,17,28,27,16,23,26,16,30,31,27,17,17,21,32,37,32,47,45,33,46,35,35,42,47,33,32,37,32,41,61,50,49,53,63,50,63,53,55,52,62,63,58,50,63,49] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_64xi8_perm_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufb {{.*#+}} zmm0 {%k1} {z} = zmm0[7,14,15,10,9,3,1,13,14,12,11,6,4,1,6,9,30,30,22,17,28,27,16,23,26,16,30,31,27,17,17,21,32,37,32,47,45,33,46,35,35,42,47,33,32,37,32,41,61,50,49,53,63,50,63,53,55,52,62,63,58,50,63,49] sched: [8:1.00]
@@ -5811,7 +5811,7 @@ define <64 x i8> @test_masked_z_64xi8_perm_mask1(<64 x i8> %vec, <64 x i8> %mask
}
define <64 x i8> @test_masked_64xi8_perm_mask2(<64 x i8> %vec, <64 x i8> %vec2, <64 x i8> %mask) {
; GENERIC-LABEL: test_masked_64xi8_perm_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %zmm3, %zmm2, %k1
; GENERIC-NEXT: vpshufb {{.*#+}} zmm1 {%k1} = zmm0[9,2,14,15,12,5,3,12,4,6,0,2,0,1,1,6,24,27,18,22,26,17,23,21,31,16,22,22,27,21,19,20,39,47,44,36,40,43,44,39,38,44,38,35,39,46,34,39,58,55,51,48,59,57,48,52,60,58,56,50,59,55,58,60] sched: [5:1.00]
@@ -5819,7 +5819,7 @@ define <64 x i8> @test_masked_64xi8_perm_mask2(<64 x i8> %vec, <64 x i8> %vec2,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_64xi8_perm_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufb {{.*#+}} zmm1 {%k1} = zmm0[9,2,14,15,12,5,3,12,4,6,0,2,0,1,1,6,24,27,18,22,26,17,23,21,31,16,22,22,27,21,19,20,39,47,44,36,40,43,44,39,38,44,38,35,39,46,34,39,58,55,51,48,59,57,48,52,60,58,56,50,59,55,58,60] sched: [8:1.00]
@@ -5833,14 +5833,14 @@ define <64 x i8> @test_masked_64xi8_perm_mask2(<64 x i8> %vec, <64 x i8> %vec2,
define <64 x i8> @test_masked_z_64xi8_perm_mask2(<64 x i8> %vec, <64 x i8> %mask) {
; GENERIC-LABEL: test_masked_z_64xi8_perm_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpshufb {{.*#+}} zmm0 {%k1} {z} = zmm0[9,2,14,15,12,5,3,12,4,6,0,2,0,1,1,6,24,27,18,22,26,17,23,21,31,16,22,22,27,21,19,20,39,47,44,36,40,43,44,39,38,44,38,35,39,46,34,39,58,55,51,48,59,57,48,52,60,58,56,50,59,55,58,60] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_64xi8_perm_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufb {{.*#+}} zmm0 {%k1} {z} = zmm0[9,2,14,15,12,5,3,12,4,6,0,2,0,1,1,6,24,27,18,22,26,17,23,21,31,16,22,22,27,21,19,20,39,47,44,36,40,43,44,39,38,44,38,35,39,46,34,39,58,55,51,48,59,57,48,52,60,58,56,50,59,55,58,60] sched: [8:1.00]
@@ -5852,12 +5852,12 @@ define <64 x i8> @test_masked_z_64xi8_perm_mask2(<64 x i8> %vec, <64 x i8> %mask
}
define <64 x i8> @test_64xi8_perm_mask3(<64 x i8> %vec) {
; GENERIC-LABEL: test_64xi8_perm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[3,12,4,15,1,14,0,4,8,9,6,1,4,4,12,14,25,16,28,20,21,24,19,30,18,22,20,24,25,26,24,22,42,38,44,44,36,37,42,34,43,38,41,34,42,37,39,38,55,59,53,58,48,52,59,48,57,48,55,62,48,56,49,61] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_64xi8_perm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[3,12,4,15,1,14,0,4,8,9,6,1,4,4,12,14,25,16,28,20,21,24,19,30,18,22,20,24,25,26,24,22,42,38,44,44,36,37,42,34,43,38,41,34,42,37,39,38,55,59,53,58,48,52,59,48,57,48,55,62,48,56,49,61] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <64 x i8> %vec, <64 x i8> undef, <64 x i32> <i32 3, i32 12, i32 4, i32 15, i32 1, i32 14, i32 0, i32 4, i32 8, i32 9, i32 6, i32 1, i32 4, i32 4, i32 12, i32 14, i32 25, i32 16, i32 28, i32 20, i32 21, i32 24, i32 19, i32 30, i32 18, i32 22, i32 20, i32 24, i32 25, i32 26, i32 24, i32 22, i32 42, i32 38, i32 44, i32 44, i32 36, i32 37, i32 42, i32 34, i32 43, i32 38, i32 41, i32 34, i32 42, i32 37, i32 39, i32 38, i32 55, i32 59, i32 53, i32 58, i32 48, i32 52, i32 59, i32 48, i32 57, i32 48, i32 55, i32 62, i32 48, i32 56, i32 49, i32 61>
@@ -5865,7 +5865,7 @@ define <64 x i8> @test_64xi8_perm_mask3(<64 x i8> %vec) {
}
define <64 x i8> @test_masked_64xi8_perm_mask3(<64 x i8> %vec, <64 x i8> %vec2, <64 x i8> %mask) {
; GENERIC-LABEL: test_masked_64xi8_perm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %zmm3, %zmm2, %k1
; GENERIC-NEXT: vpshufb {{.*#+}} zmm1 {%k1} = zmm0[3,12,4,15,1,14,0,4,8,9,6,1,4,4,12,14,25,16,28,20,21,24,19,30,18,22,20,24,25,26,24,22,42,38,44,44,36,37,42,34,43,38,41,34,42,37,39,38,55,59,53,58,48,52,59,48,57,48,55,62,48,56,49,61] sched: [5:1.00]
@@ -5873,7 +5873,7 @@ define <64 x i8> @test_masked_64xi8_perm_mask3(<64 x i8> %vec, <64 x i8> %vec2,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_64xi8_perm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufb {{.*#+}} zmm1 {%k1} = zmm0[3,12,4,15,1,14,0,4,8,9,6,1,4,4,12,14,25,16,28,20,21,24,19,30,18,22,20,24,25,26,24,22,42,38,44,44,36,37,42,34,43,38,41,34,42,37,39,38,55,59,53,58,48,52,59,48,57,48,55,62,48,56,49,61] sched: [8:1.00]
@@ -5887,14 +5887,14 @@ define <64 x i8> @test_masked_64xi8_perm_mask3(<64 x i8> %vec, <64 x i8> %vec2,
define <64 x i8> @test_masked_z_64xi8_perm_mask3(<64 x i8> %vec, <64 x i8> %mask) {
; GENERIC-LABEL: test_masked_z_64xi8_perm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpshufb {{.*#+}} zmm0 {%k1} {z} = zmm0[3,12,4,15,1,14,0,4,8,9,6,1,4,4,12,14,25,16,28,20,21,24,19,30,18,22,20,24,25,26,24,22,42,38,44,44,36,37,42,34,43,38,41,34,42,37,39,38,55,59,53,58,48,52,59,48,57,48,55,62,48,56,49,61] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_64xi8_perm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufb {{.*#+}} zmm0 {%k1} {z} = zmm0[3,12,4,15,1,14,0,4,8,9,6,1,4,4,12,14,25,16,28,20,21,24,19,30,18,22,20,24,25,26,24,22,42,38,44,44,36,37,42,34,43,38,41,34,42,37,39,38,55,59,53,58,48,52,59,48,57,48,55,62,48,56,49,61] sched: [8:1.00]
@@ -5906,13 +5906,13 @@ define <64 x i8> @test_masked_z_64xi8_perm_mask3(<64 x i8> %vec, <64 x i8> %mask
}
define <64 x i8> @test_64xi8_perm_mem_mask0(<64 x i8>* %vp) {
; GENERIC-LABEL: test_64xi8_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 (%rdi), %zmm0 # sched: [4:0.50]
; GENERIC-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[0,9,15,13,11,11,3,12,4,1,7,5,2,6,14,6,23,27,24,18,30,23,28,22,28,22,19,19,31,25,16,22,35,33,34,32,42,34,41,41,43,40,36,46,37,39,42,40,63,63,62,62,57,55,59,51,52,48,50,48,58,50,60,58] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_64xi8_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 (%rdi), %zmm0 # sched: [8:0.50]
; SKX-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[0,9,15,13,11,11,3,12,4,1,7,5,2,6,14,6,23,27,24,18,30,23,28,22,28,22,19,19,31,25,16,22,35,33,34,32,42,34,41,41,43,40,36,46,37,39,42,40,63,63,62,62,57,55,59,51,52,48,50,48,58,50,60,58] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -5922,7 +5922,7 @@ define <64 x i8> @test_64xi8_perm_mem_mask0(<64 x i8>* %vp) {
}
define <64 x i8> @test_masked_64xi8_perm_mem_mask0(<64 x i8>* %vp, <64 x i8> %vec2, <64 x i8> %mask) {
; GENERIC-LABEL: test_masked_64xi8_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 (%rdi), %zmm2 # sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %zmm3, %zmm1, %k1
@@ -5930,7 +5930,7 @@ define <64 x i8> @test_masked_64xi8_perm_mem_mask0(<64 x i8>* %vp, <64 x i8> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_64xi8_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 (%rdi), %zmm2 # sched: [8:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -5945,7 +5945,7 @@ define <64 x i8> @test_masked_64xi8_perm_mem_mask0(<64 x i8>* %vp, <64 x i8> %ve
define <64 x i8> @test_masked_z_64xi8_perm_mem_mask0(<64 x i8>* %vp, <64 x i8> %mask) {
; GENERIC-LABEL: test_masked_z_64xi8_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 (%rdi), %zmm1 # sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %zmm2, %zmm0, %k1
@@ -5953,7 +5953,7 @@ define <64 x i8> @test_masked_z_64xi8_perm_mem_mask0(<64 x i8>* %vp, <64 x i8> %
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_64xi8_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 (%rdi), %zmm1 # sched: [8:0.50]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %zmm2, %zmm0, %k1 # sched: [3:1.00]
@@ -5968,7 +5968,7 @@ define <64 x i8> @test_masked_z_64xi8_perm_mem_mask0(<64 x i8>* %vp, <64 x i8> %
define <64 x i8> @test_masked_64xi8_perm_mem_mask1(<64 x i8>* %vp, <64 x i8> %vec2, <64 x i8> %mask) {
; GENERIC-LABEL: test_masked_64xi8_perm_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 (%rdi), %zmm2 # sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %zmm3, %zmm1, %k1
@@ -5976,7 +5976,7 @@ define <64 x i8> @test_masked_64xi8_perm_mem_mask1(<64 x i8>* %vp, <64 x i8> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_64xi8_perm_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 (%rdi), %zmm2 # sched: [8:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -5991,7 +5991,7 @@ define <64 x i8> @test_masked_64xi8_perm_mem_mask1(<64 x i8>* %vp, <64 x i8> %ve
define <64 x i8> @test_masked_z_64xi8_perm_mem_mask1(<64 x i8>* %vp, <64 x i8> %mask) {
; GENERIC-LABEL: test_masked_z_64xi8_perm_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 (%rdi), %zmm1 # sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %zmm2, %zmm0, %k1
@@ -5999,7 +5999,7 @@ define <64 x i8> @test_masked_z_64xi8_perm_mem_mask1(<64 x i8>* %vp, <64 x i8> %
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_64xi8_perm_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 (%rdi), %zmm1 # sched: [8:0.50]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %zmm2, %zmm0, %k1 # sched: [3:1.00]
@@ -6014,7 +6014,7 @@ define <64 x i8> @test_masked_z_64xi8_perm_mem_mask1(<64 x i8>* %vp, <64 x i8> %
define <64 x i8> @test_masked_64xi8_perm_mem_mask2(<64 x i8>* %vp, <64 x i8> %vec2, <64 x i8> %mask) {
; GENERIC-LABEL: test_masked_64xi8_perm_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 (%rdi), %zmm2 # sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %zmm3, %zmm1, %k1
@@ -6022,7 +6022,7 @@ define <64 x i8> @test_masked_64xi8_perm_mem_mask2(<64 x i8>* %vp, <64 x i8> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_64xi8_perm_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 (%rdi), %zmm2 # sched: [8:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -6037,7 +6037,7 @@ define <64 x i8> @test_masked_64xi8_perm_mem_mask2(<64 x i8>* %vp, <64 x i8> %ve
define <64 x i8> @test_masked_z_64xi8_perm_mem_mask2(<64 x i8>* %vp, <64 x i8> %mask) {
; GENERIC-LABEL: test_masked_z_64xi8_perm_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 (%rdi), %zmm1 # sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %zmm2, %zmm0, %k1
@@ -6045,7 +6045,7 @@ define <64 x i8> @test_masked_z_64xi8_perm_mem_mask2(<64 x i8>* %vp, <64 x i8> %
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_64xi8_perm_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 (%rdi), %zmm1 # sched: [8:0.50]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %zmm2, %zmm0, %k1 # sched: [3:1.00]
@@ -6060,13 +6060,13 @@ define <64 x i8> @test_masked_z_64xi8_perm_mem_mask2(<64 x i8>* %vp, <64 x i8> %
define <64 x i8> @test_64xi8_perm_mem_mask3(<64 x i8>* %vp) {
; GENERIC-LABEL: test_64xi8_perm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 (%rdi), %zmm0 # sched: [4:0.50]
; GENERIC-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[4,9,11,13,12,6,0,0,11,15,5,7,11,10,4,10,20,21,24,27,18,16,26,16,16,19,26,17,16,31,22,30,35,38,37,34,37,47,43,38,38,36,40,43,42,39,32,46,54,54,48,50,61,56,59,50,53,61,61,51,48,60,50,60] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_64xi8_perm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 (%rdi), %zmm0 # sched: [8:0.50]
; SKX-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[4,9,11,13,12,6,0,0,11,15,5,7,11,10,4,10,20,21,24,27,18,16,26,16,16,19,26,17,16,31,22,30,35,38,37,34,37,47,43,38,38,36,40,43,42,39,32,46,54,54,48,50,61,56,59,50,53,61,61,51,48,60,50,60] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -6076,7 +6076,7 @@ define <64 x i8> @test_64xi8_perm_mem_mask3(<64 x i8>* %vp) {
}
define <64 x i8> @test_masked_64xi8_perm_mem_mask3(<64 x i8>* %vp, <64 x i8> %vec2, <64 x i8> %mask) {
; GENERIC-LABEL: test_masked_64xi8_perm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 (%rdi), %zmm2 # sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %zmm3, %zmm1, %k1
@@ -6084,7 +6084,7 @@ define <64 x i8> @test_masked_64xi8_perm_mem_mask3(<64 x i8>* %vp, <64 x i8> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_64xi8_perm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 (%rdi), %zmm2 # sched: [8:0.50]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -6099,7 +6099,7 @@ define <64 x i8> @test_masked_64xi8_perm_mem_mask3(<64 x i8>* %vp, <64 x i8> %ve
define <64 x i8> @test_masked_z_64xi8_perm_mem_mask3(<64 x i8>* %vp, <64 x i8> %mask) {
; GENERIC-LABEL: test_masked_z_64xi8_perm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovdqa64 (%rdi), %zmm1 # sched: [4:0.50]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqb %zmm2, %zmm0, %k1
@@ -6107,7 +6107,7 @@ define <64 x i8> @test_masked_z_64xi8_perm_mem_mask3(<64 x i8>* %vp, <64 x i8> %
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_64xi8_perm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa64 (%rdi), %zmm1 # sched: [8:0.50]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqb %zmm2, %zmm0, %k1 # sched: [3:1.00]
@@ -6122,12 +6122,12 @@ define <64 x i8> @test_masked_z_64xi8_perm_mem_mask3(<64 x i8>* %vp, <64 x i8> %
define <8 x i16> @test_8xi16_perm_high_mask0(<8 x i16> %vec) {
; GENERIC-LABEL: test_8xi16_perm_high_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,7,6] sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi16_perm_high_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,7,6] sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <8 x i16> %vec, <8 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 6, i32 5, i32 7, i32 6>
@@ -6135,7 +6135,7 @@ define <8 x i16> @test_8xi16_perm_high_mask0(<8 x i16> %vec) {
}
define <8 x i16> @test_masked_8xi16_perm_high_mask0(<8 x i16> %vec, <8 x i16> %vec2, <8 x i16> %mask) {
; GENERIC-LABEL: test_masked_8xi16_perm_high_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %xmm3, %xmm2, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} xmm1 {%k1} = xmm0[0,1,2,3,6,5,7,6] sched: [1:1.00]
@@ -6143,7 +6143,7 @@ define <8 x i16> @test_masked_8xi16_perm_high_mask0(<8 x i16> %vec, <8 x i16> %v
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xi16_perm_high_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %xmm3, %xmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} xmm1 {%k1} = xmm0[0,1,2,3,6,5,7,6] sched: [1:1.00]
@@ -6157,14 +6157,14 @@ define <8 x i16> @test_masked_8xi16_perm_high_mask0(<8 x i16> %vec, <8 x i16> %v
define <8 x i16> @test_masked_z_8xi16_perm_high_mask0(<8 x i16> %vec, <8 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_8xi16_perm_high_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %xmm2, %xmm1, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} {z} = xmm0[0,1,2,3,6,5,7,6] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xi16_perm_high_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %xmm2, %xmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} {z} = xmm0[0,1,2,3,6,5,7,6] sched: [1:1.00]
@@ -6176,7 +6176,7 @@ define <8 x i16> @test_masked_z_8xi16_perm_high_mask0(<8 x i16> %vec, <8 x i16>
}
define <8 x i16> @test_masked_8xi16_perm_low_mask1(<8 x i16> %vec, <8 x i16> %vec2, <8 x i16> %mask) {
; GENERIC-LABEL: test_masked_8xi16_perm_low_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %xmm3, %xmm2, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} xmm1 {%k1} = xmm0[0,3,0,0,4,5,6,7] sched: [1:1.00]
@@ -6184,7 +6184,7 @@ define <8 x i16> @test_masked_8xi16_perm_low_mask1(<8 x i16> %vec, <8 x i16> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xi16_perm_low_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %xmm3, %xmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} xmm1 {%k1} = xmm0[0,3,0,0,4,5,6,7] sched: [1:1.00]
@@ -6198,14 +6198,14 @@ define <8 x i16> @test_masked_8xi16_perm_low_mask1(<8 x i16> %vec, <8 x i16> %ve
define <8 x i16> @test_masked_z_8xi16_perm_low_mask1(<8 x i16> %vec, <8 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_8xi16_perm_low_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %xmm2, %xmm1, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} {z} = xmm0[0,3,0,0,4,5,6,7] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xi16_perm_low_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %xmm2, %xmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} {z} = xmm0[0,3,0,0,4,5,6,7] sched: [1:1.00]
@@ -6217,7 +6217,7 @@ define <8 x i16> @test_masked_z_8xi16_perm_low_mask1(<8 x i16> %vec, <8 x i16> %
}
define <8 x i16> @test_masked_8xi16_perm_high_mask2(<8 x i16> %vec, <8 x i16> %vec2, <8 x i16> %mask) {
; GENERIC-LABEL: test_masked_8xi16_perm_high_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %xmm3, %xmm2, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} xmm1 {%k1} = xmm0[0,1,2,3,5,4,4,5] sched: [1:1.00]
@@ -6225,7 +6225,7 @@ define <8 x i16> @test_masked_8xi16_perm_high_mask2(<8 x i16> %vec, <8 x i16> %v
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xi16_perm_high_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %xmm3, %xmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} xmm1 {%k1} = xmm0[0,1,2,3,5,4,4,5] sched: [1:1.00]
@@ -6239,14 +6239,14 @@ define <8 x i16> @test_masked_8xi16_perm_high_mask2(<8 x i16> %vec, <8 x i16> %v
define <8 x i16> @test_masked_z_8xi16_perm_high_mask2(<8 x i16> %vec, <8 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_8xi16_perm_high_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %xmm2, %xmm1, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} {z} = xmm0[0,1,2,3,5,4,4,5] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xi16_perm_high_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %xmm2, %xmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} {z} = xmm0[0,1,2,3,5,4,4,5] sched: [1:1.00]
@@ -6258,12 +6258,12 @@ define <8 x i16> @test_masked_z_8xi16_perm_high_mask2(<8 x i16> %vec, <8 x i16>
}
define <8 x i16> @test_8xi16_perm_low_mask3(<8 x i16> %vec) {
; GENERIC-LABEL: test_8xi16_perm_low_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[2,1,1,1,4,5,6,7] sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi16_perm_low_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[2,1,1,1,4,5,6,7] sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <8 x i16> %vec, <8 x i16> undef, <8 x i32> <i32 2, i32 1, i32 1, i32 1, i32 4, i32 5, i32 6, i32 7>
@@ -6271,7 +6271,7 @@ define <8 x i16> @test_8xi16_perm_low_mask3(<8 x i16> %vec) {
}
define <8 x i16> @test_masked_8xi16_perm_low_mask3(<8 x i16> %vec, <8 x i16> %vec2, <8 x i16> %mask) {
; GENERIC-LABEL: test_masked_8xi16_perm_low_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %xmm3, %xmm2, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} xmm1 {%k1} = xmm0[2,1,1,1,4,5,6,7] sched: [1:1.00]
@@ -6279,7 +6279,7 @@ define <8 x i16> @test_masked_8xi16_perm_low_mask3(<8 x i16> %vec, <8 x i16> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xi16_perm_low_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %xmm3, %xmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} xmm1 {%k1} = xmm0[2,1,1,1,4,5,6,7] sched: [1:1.00]
@@ -6293,14 +6293,14 @@ define <8 x i16> @test_masked_8xi16_perm_low_mask3(<8 x i16> %vec, <8 x i16> %ve
define <8 x i16> @test_masked_z_8xi16_perm_low_mask3(<8 x i16> %vec, <8 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_8xi16_perm_low_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %xmm2, %xmm1, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} {z} = xmm0[2,1,1,1,4,5,6,7] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xi16_perm_low_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %xmm2, %xmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} {z} = xmm0[2,1,1,1,4,5,6,7] sched: [1:1.00]
@@ -6312,7 +6312,7 @@ define <8 x i16> @test_masked_z_8xi16_perm_low_mask3(<8 x i16> %vec, <8 x i16> %
}
define <8 x i16> @test_masked_8xi16_perm_high_mask4(<8 x i16> %vec, <8 x i16> %vec2, <8 x i16> %mask) {
; GENERIC-LABEL: test_masked_8xi16_perm_high_mask4:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %xmm3, %xmm2, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} xmm1 {%k1} = xmm0[0,1,2,3,5,5,7,6] sched: [1:1.00]
@@ -6320,7 +6320,7 @@ define <8 x i16> @test_masked_8xi16_perm_high_mask4(<8 x i16> %vec, <8 x i16> %v
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xi16_perm_high_mask4:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %xmm3, %xmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} xmm1 {%k1} = xmm0[0,1,2,3,5,5,7,6] sched: [1:1.00]
@@ -6334,14 +6334,14 @@ define <8 x i16> @test_masked_8xi16_perm_high_mask4(<8 x i16> %vec, <8 x i16> %v
define <8 x i16> @test_masked_z_8xi16_perm_high_mask4(<8 x i16> %vec, <8 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_8xi16_perm_high_mask4:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %xmm2, %xmm1, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} {z} = xmm0[0,1,2,3,5,5,7,6] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xi16_perm_high_mask4:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %xmm2, %xmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} {z} = xmm0[0,1,2,3,5,5,7,6] sched: [1:1.00]
@@ -6353,7 +6353,7 @@ define <8 x i16> @test_masked_z_8xi16_perm_high_mask4(<8 x i16> %vec, <8 x i16>
}
define <8 x i16> @test_masked_8xi16_perm_low_mask5(<8 x i16> %vec, <8 x i16> %vec2, <8 x i16> %mask) {
; GENERIC-LABEL: test_masked_8xi16_perm_low_mask5:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %xmm3, %xmm2, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} xmm1 {%k1} = xmm0[3,3,2,1,4,5,6,7] sched: [1:1.00]
@@ -6361,7 +6361,7 @@ define <8 x i16> @test_masked_8xi16_perm_low_mask5(<8 x i16> %vec, <8 x i16> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xi16_perm_low_mask5:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %xmm3, %xmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} xmm1 {%k1} = xmm0[3,3,2,1,4,5,6,7] sched: [1:1.00]
@@ -6375,14 +6375,14 @@ define <8 x i16> @test_masked_8xi16_perm_low_mask5(<8 x i16> %vec, <8 x i16> %ve
define <8 x i16> @test_masked_z_8xi16_perm_low_mask5(<8 x i16> %vec, <8 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_8xi16_perm_low_mask5:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %xmm2, %xmm1, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} {z} = xmm0[3,3,2,1,4,5,6,7] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xi16_perm_low_mask5:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %xmm2, %xmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} {z} = xmm0[3,3,2,1,4,5,6,7] sched: [1:1.00]
@@ -6394,12 +6394,12 @@ define <8 x i16> @test_masked_z_8xi16_perm_low_mask5(<8 x i16> %vec, <8 x i16> %
}
define <8 x i16> @test_8xi16_perm_high_mask6(<8 x i16> %vec) {
; GENERIC-LABEL: test_8xi16_perm_high_mask6:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,5] sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi16_perm_high_mask6:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,5] sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <8 x i16> %vec, <8 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 6, i32 5, i32 6, i32 5>
@@ -6407,7 +6407,7 @@ define <8 x i16> @test_8xi16_perm_high_mask6(<8 x i16> %vec) {
}
define <8 x i16> @test_masked_8xi16_perm_high_mask6(<8 x i16> %vec, <8 x i16> %vec2, <8 x i16> %mask) {
; GENERIC-LABEL: test_masked_8xi16_perm_high_mask6:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %xmm3, %xmm2, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} xmm1 {%k1} = xmm0[0,1,2,3,6,5,6,5] sched: [1:1.00]
@@ -6415,7 +6415,7 @@ define <8 x i16> @test_masked_8xi16_perm_high_mask6(<8 x i16> %vec, <8 x i16> %v
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xi16_perm_high_mask6:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %xmm3, %xmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} xmm1 {%k1} = xmm0[0,1,2,3,6,5,6,5] sched: [1:1.00]
@@ -6429,14 +6429,14 @@ define <8 x i16> @test_masked_8xi16_perm_high_mask6(<8 x i16> %vec, <8 x i16> %v
define <8 x i16> @test_masked_z_8xi16_perm_high_mask6(<8 x i16> %vec, <8 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_8xi16_perm_high_mask6:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %xmm2, %xmm1, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} {z} = xmm0[0,1,2,3,6,5,6,5] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xi16_perm_high_mask6:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %xmm2, %xmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} {z} = xmm0[0,1,2,3,6,5,6,5] sched: [1:1.00]
@@ -6448,7 +6448,7 @@ define <8 x i16> @test_masked_z_8xi16_perm_high_mask6(<8 x i16> %vec, <8 x i16>
}
define <8 x i16> @test_masked_8xi16_perm_low_mask7(<8 x i16> %vec, <8 x i16> %vec2, <8 x i16> %mask) {
; GENERIC-LABEL: test_masked_8xi16_perm_low_mask7:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %xmm3, %xmm2, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} xmm1 {%k1} = xmm0[1,0,2,0,4,5,6,7] sched: [1:1.00]
@@ -6456,7 +6456,7 @@ define <8 x i16> @test_masked_8xi16_perm_low_mask7(<8 x i16> %vec, <8 x i16> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xi16_perm_low_mask7:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %xmm3, %xmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} xmm1 {%k1} = xmm0[1,0,2,0,4,5,6,7] sched: [1:1.00]
@@ -6470,14 +6470,14 @@ define <8 x i16> @test_masked_8xi16_perm_low_mask7(<8 x i16> %vec, <8 x i16> %ve
define <8 x i16> @test_masked_z_8xi16_perm_low_mask7(<8 x i16> %vec, <8 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_8xi16_perm_low_mask7:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %xmm2, %xmm1, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} {z} = xmm0[1,0,2,0,4,5,6,7] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xi16_perm_low_mask7:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %xmm2, %xmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} {z} = xmm0[1,0,2,0,4,5,6,7] sched: [1:1.00]
@@ -6489,12 +6489,12 @@ define <8 x i16> @test_masked_z_8xi16_perm_low_mask7(<8 x i16> %vec, <8 x i16> %
}
define <8 x i16> @test_8xi16_perm_high_mem_mask0(<8 x i16>* %vp) {
; GENERIC-LABEL: test_8xi16_perm_high_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpshufhw {{.*#+}} xmm0 = mem[0,1,2,3,7,7,4,6] sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi16_perm_high_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpshufhw {{.*#+}} xmm0 = mem[0,1,2,3,7,7,4,6] sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec = load <8 x i16>, <8 x i16>* %vp
@@ -6503,14 +6503,14 @@ define <8 x i16> @test_8xi16_perm_high_mem_mask0(<8 x i16>* %vp) {
}
define <8 x i16> @test_masked_8xi16_perm_high_mem_mask0(<8 x i16>* %vp, <8 x i16> %vec2, <8 x i16> %mask) {
; GENERIC-LABEL: test_masked_8xi16_perm_high_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %xmm2, %xmm1, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} = mem[0,1,2,3,7,7,4,6] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xi16_perm_high_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %xmm2, %xmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} = mem[0,1,2,3,7,7,4,6] sched: [7:1.00]
@@ -6524,14 +6524,14 @@ define <8 x i16> @test_masked_8xi16_perm_high_mem_mask0(<8 x i16>* %vp, <8 x i16
define <8 x i16> @test_masked_z_8xi16_perm_high_mem_mask0(<8 x i16>* %vp, <8 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_8xi16_perm_high_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %xmm1, %xmm0, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} {z} = mem[0,1,2,3,7,7,4,6] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xi16_perm_high_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %xmm1, %xmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} {z} = mem[0,1,2,3,7,7,4,6] sched: [7:1.00]
@@ -6545,14 +6545,14 @@ define <8 x i16> @test_masked_z_8xi16_perm_high_mem_mask0(<8 x i16>* %vp, <8 x i
define <8 x i16> @test_masked_8xi16_perm_low_mem_mask1(<8 x i16>* %vp, <8 x i16> %vec2, <8 x i16> %mask) {
; GENERIC-LABEL: test_masked_8xi16_perm_low_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %xmm2, %xmm1, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} = mem[1,3,3,2,4,5,6,7] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xi16_perm_low_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %xmm2, %xmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} = mem[1,3,3,2,4,5,6,7] sched: [7:1.00]
@@ -6566,14 +6566,14 @@ define <8 x i16> @test_masked_8xi16_perm_low_mem_mask1(<8 x i16>* %vp, <8 x i16>
define <8 x i16> @test_masked_z_8xi16_perm_low_mem_mask1(<8 x i16>* %vp, <8 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_8xi16_perm_low_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %xmm1, %xmm0, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} {z} = mem[1,3,3,2,4,5,6,7] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xi16_perm_low_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %xmm1, %xmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} {z} = mem[1,3,3,2,4,5,6,7] sched: [7:1.00]
@@ -6587,14 +6587,14 @@ define <8 x i16> @test_masked_z_8xi16_perm_low_mem_mask1(<8 x i16>* %vp, <8 x i1
define <8 x i16> @test_masked_8xi16_perm_high_mem_mask2(<8 x i16>* %vp, <8 x i16> %vec2, <8 x i16> %mask) {
; GENERIC-LABEL: test_masked_8xi16_perm_high_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %xmm2, %xmm1, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} = mem[0,1,2,3,6,6,5,7] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xi16_perm_high_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %xmm2, %xmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} = mem[0,1,2,3,6,6,5,7] sched: [7:1.00]
@@ -6608,14 +6608,14 @@ define <8 x i16> @test_masked_8xi16_perm_high_mem_mask2(<8 x i16>* %vp, <8 x i16
define <8 x i16> @test_masked_z_8xi16_perm_high_mem_mask2(<8 x i16>* %vp, <8 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_8xi16_perm_high_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %xmm1, %xmm0, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} {z} = mem[0,1,2,3,6,6,5,7] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xi16_perm_high_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %xmm1, %xmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} {z} = mem[0,1,2,3,6,6,5,7] sched: [7:1.00]
@@ -6629,12 +6629,12 @@ define <8 x i16> @test_masked_z_8xi16_perm_high_mem_mask2(<8 x i16>* %vp, <8 x i
define <8 x i16> @test_8xi16_perm_low_mem_mask3(<8 x i16>* %vp) {
; GENERIC-LABEL: test_8xi16_perm_low_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpshuflw {{.*#+}} xmm0 = mem[3,1,2,0,4,5,6,7] sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi16_perm_low_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpshuflw {{.*#+}} xmm0 = mem[3,1,2,0,4,5,6,7] sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec = load <8 x i16>, <8 x i16>* %vp
@@ -6643,14 +6643,14 @@ define <8 x i16> @test_8xi16_perm_low_mem_mask3(<8 x i16>* %vp) {
}
define <8 x i16> @test_masked_8xi16_perm_low_mem_mask3(<8 x i16>* %vp, <8 x i16> %vec2, <8 x i16> %mask) {
; GENERIC-LABEL: test_masked_8xi16_perm_low_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %xmm2, %xmm1, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} = mem[3,1,2,0,4,5,6,7] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xi16_perm_low_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %xmm2, %xmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} = mem[3,1,2,0,4,5,6,7] sched: [7:1.00]
@@ -6664,14 +6664,14 @@ define <8 x i16> @test_masked_8xi16_perm_low_mem_mask3(<8 x i16>* %vp, <8 x i16>
define <8 x i16> @test_masked_z_8xi16_perm_low_mem_mask3(<8 x i16>* %vp, <8 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_8xi16_perm_low_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %xmm1, %xmm0, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} {z} = mem[3,1,2,0,4,5,6,7] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xi16_perm_low_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %xmm1, %xmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} {z} = mem[3,1,2,0,4,5,6,7] sched: [7:1.00]
@@ -6685,14 +6685,14 @@ define <8 x i16> @test_masked_z_8xi16_perm_low_mem_mask3(<8 x i16>* %vp, <8 x i1
define <8 x i16> @test_masked_8xi16_perm_high_mem_mask4(<8 x i16>* %vp, <8 x i16> %vec2, <8 x i16> %mask) {
; GENERIC-LABEL: test_masked_8xi16_perm_high_mem_mask4:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %xmm2, %xmm1, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} = mem[0,1,2,3,7,6,7,5] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xi16_perm_high_mem_mask4:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %xmm2, %xmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} = mem[0,1,2,3,7,6,7,5] sched: [7:1.00]
@@ -6706,14 +6706,14 @@ define <8 x i16> @test_masked_8xi16_perm_high_mem_mask4(<8 x i16>* %vp, <8 x i16
define <8 x i16> @test_masked_z_8xi16_perm_high_mem_mask4(<8 x i16>* %vp, <8 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_8xi16_perm_high_mem_mask4:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %xmm1, %xmm0, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} {z} = mem[0,1,2,3,7,6,7,5] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xi16_perm_high_mem_mask4:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %xmm1, %xmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} {z} = mem[0,1,2,3,7,6,7,5] sched: [7:1.00]
@@ -6727,14 +6727,14 @@ define <8 x i16> @test_masked_z_8xi16_perm_high_mem_mask4(<8 x i16>* %vp, <8 x i
define <8 x i16> @test_masked_8xi16_perm_low_mem_mask5(<8 x i16>* %vp, <8 x i16> %vec2, <8 x i16> %mask) {
; GENERIC-LABEL: test_masked_8xi16_perm_low_mem_mask5:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %xmm2, %xmm1, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} = mem[2,1,3,2,4,5,6,7] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xi16_perm_low_mem_mask5:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %xmm2, %xmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} = mem[2,1,3,2,4,5,6,7] sched: [7:1.00]
@@ -6748,14 +6748,14 @@ define <8 x i16> @test_masked_8xi16_perm_low_mem_mask5(<8 x i16>* %vp, <8 x i16>
define <8 x i16> @test_masked_z_8xi16_perm_low_mem_mask5(<8 x i16>* %vp, <8 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_8xi16_perm_low_mem_mask5:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %xmm1, %xmm0, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} {z} = mem[2,1,3,2,4,5,6,7] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xi16_perm_low_mem_mask5:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %xmm1, %xmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} {z} = mem[2,1,3,2,4,5,6,7] sched: [7:1.00]
@@ -6769,12 +6769,12 @@ define <8 x i16> @test_masked_z_8xi16_perm_low_mem_mask5(<8 x i16>* %vp, <8 x i1
define <8 x i16> @test_8xi16_perm_high_mem_mask6(<8 x i16>* %vp) {
; GENERIC-LABEL: test_8xi16_perm_high_mem_mask6:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpshufhw {{.*#+}} xmm0 = mem[0,1,2,3,7,4,4,4] sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi16_perm_high_mem_mask6:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpshufhw {{.*#+}} xmm0 = mem[0,1,2,3,7,4,4,4] sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec = load <8 x i16>, <8 x i16>* %vp
@@ -6783,14 +6783,14 @@ define <8 x i16> @test_8xi16_perm_high_mem_mask6(<8 x i16>* %vp) {
}
define <8 x i16> @test_masked_8xi16_perm_high_mem_mask6(<8 x i16>* %vp, <8 x i16> %vec2, <8 x i16> %mask) {
; GENERIC-LABEL: test_masked_8xi16_perm_high_mem_mask6:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %xmm2, %xmm1, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} = mem[0,1,2,3,7,4,4,4] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xi16_perm_high_mem_mask6:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %xmm2, %xmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} = mem[0,1,2,3,7,4,4,4] sched: [7:1.00]
@@ -6804,14 +6804,14 @@ define <8 x i16> @test_masked_8xi16_perm_high_mem_mask6(<8 x i16>* %vp, <8 x i16
define <8 x i16> @test_masked_z_8xi16_perm_high_mem_mask6(<8 x i16>* %vp, <8 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_8xi16_perm_high_mem_mask6:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %xmm1, %xmm0, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} {z} = mem[0,1,2,3,7,4,4,4] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xi16_perm_high_mem_mask6:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %xmm1, %xmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} {z} = mem[0,1,2,3,7,4,4,4] sched: [7:1.00]
@@ -6825,14 +6825,14 @@ define <8 x i16> @test_masked_z_8xi16_perm_high_mem_mask6(<8 x i16>* %vp, <8 x i
define <8 x i16> @test_masked_8xi16_perm_low_mem_mask7(<8 x i16>* %vp, <8 x i16> %vec2, <8 x i16> %mask) {
; GENERIC-LABEL: test_masked_8xi16_perm_low_mem_mask7:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %xmm2, %xmm1, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} = mem[0,3,3,1,4,5,6,7] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_8xi16_perm_low_mem_mask7:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %xmm2, %xmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} = mem[0,3,3,1,4,5,6,7] sched: [7:1.00]
@@ -6846,14 +6846,14 @@ define <8 x i16> @test_masked_8xi16_perm_low_mem_mask7(<8 x i16>* %vp, <8 x i16>
define <8 x i16> @test_masked_z_8xi16_perm_low_mem_mask7(<8 x i16>* %vp, <8 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_8xi16_perm_low_mem_mask7:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %xmm1, %xmm0, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} {z} = mem[0,3,3,1,4,5,6,7] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_8xi16_perm_low_mem_mask7:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %xmm1, %xmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} {z} = mem[0,3,3,1,4,5,6,7] sched: [7:1.00]
@@ -6867,12 +6867,12 @@ define <8 x i16> @test_masked_z_8xi16_perm_low_mem_mask7(<8 x i16>* %vp, <8 x i1
define <16 x i16> @test_16xi16_perm_high_mask0(<16 x i16> %vec) {
; GENERIC-LABEL: test_16xi16_perm_high_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,4,6,4,8,9,10,11,12,12,14,12] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xi16_perm_high_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,4,6,4,8,9,10,11,12,12,14,12] sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 4, i32 6, i32 4, i32 8, i32 9, i32 10, i32 11, i32 12, i32 12, i32 14, i32 12>
@@ -6880,7 +6880,7 @@ define <16 x i16> @test_16xi16_perm_high_mask0(<16 x i16> %vec) {
}
define <16 x i16> @test_masked_16xi16_perm_high_mask0(<16 x i16> %vec, <16 x i16> %vec2, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_16xi16_perm_high_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm3, %ymm2, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} ymm1 {%k1} = ymm0[0,1,2,3,4,4,6,4,8,9,10,11,12,12,14,12] sched: [1:1.00]
@@ -6888,7 +6888,7 @@ define <16 x i16> @test_masked_16xi16_perm_high_mask0(<16 x i16> %vec, <16 x i16
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xi16_perm_high_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} ymm1 {%k1} = ymm0[0,1,2,3,4,4,6,4,8,9,10,11,12,12,14,12] sched: [1:1.00]
@@ -6902,14 +6902,14 @@ define <16 x i16> @test_masked_16xi16_perm_high_mask0(<16 x i16> %vec, <16 x i16
define <16 x i16> @test_masked_z_16xi16_perm_high_mask0(<16 x i16> %vec, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_16xi16_perm_high_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm2, %ymm1, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} {z} = ymm0[0,1,2,3,4,4,6,4,8,9,10,11,12,12,14,12] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xi16_perm_high_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} {z} = ymm0[0,1,2,3,4,4,6,4,8,9,10,11,12,12,14,12] sched: [1:1.00]
@@ -6921,7 +6921,7 @@ define <16 x i16> @test_masked_z_16xi16_perm_high_mask0(<16 x i16> %vec, <16 x i
}
define <16 x i16> @test_masked_16xi16_perm_low_mask1(<16 x i16> %vec, <16 x i16> %vec2, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_16xi16_perm_low_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm3, %ymm2, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} ymm1 {%k1} = ymm0[0,2,3,2,4,5,6,7,8,10,11,10,12,13,14,15] sched: [1:1.00]
@@ -6929,7 +6929,7 @@ define <16 x i16> @test_masked_16xi16_perm_low_mask1(<16 x i16> %vec, <16 x i16>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xi16_perm_low_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} ymm1 {%k1} = ymm0[0,2,3,2,4,5,6,7,8,10,11,10,12,13,14,15] sched: [1:1.00]
@@ -6943,14 +6943,14 @@ define <16 x i16> @test_masked_16xi16_perm_low_mask1(<16 x i16> %vec, <16 x i16>
define <16 x i16> @test_masked_z_16xi16_perm_low_mask1(<16 x i16> %vec, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_16xi16_perm_low_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm2, %ymm1, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} {z} = ymm0[0,2,3,2,4,5,6,7,8,10,11,10,12,13,14,15] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xi16_perm_low_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} {z} = ymm0[0,2,3,2,4,5,6,7,8,10,11,10,12,13,14,15] sched: [1:1.00]
@@ -6962,7 +6962,7 @@ define <16 x i16> @test_masked_z_16xi16_perm_low_mask1(<16 x i16> %vec, <16 x i1
}
define <16 x i16> @test_masked_16xi16_perm_high_mask2(<16 x i16> %vec, <16 x i16> %vec2, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_16xi16_perm_high_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm3, %ymm2, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} ymm1 {%k1} = ymm0[0,1,2,3,7,5,5,5,8,9,10,11,15,13,13,13] sched: [1:1.00]
@@ -6970,7 +6970,7 @@ define <16 x i16> @test_masked_16xi16_perm_high_mask2(<16 x i16> %vec, <16 x i16
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xi16_perm_high_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} ymm1 {%k1} = ymm0[0,1,2,3,7,5,5,5,8,9,10,11,15,13,13,13] sched: [1:1.00]
@@ -6984,14 +6984,14 @@ define <16 x i16> @test_masked_16xi16_perm_high_mask2(<16 x i16> %vec, <16 x i16
define <16 x i16> @test_masked_z_16xi16_perm_high_mask2(<16 x i16> %vec, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_16xi16_perm_high_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm2, %ymm1, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} {z} = ymm0[0,1,2,3,7,5,5,5,8,9,10,11,15,13,13,13] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xi16_perm_high_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} {z} = ymm0[0,1,2,3,7,5,5,5,8,9,10,11,15,13,13,13] sched: [1:1.00]
@@ -7003,12 +7003,12 @@ define <16 x i16> @test_masked_z_16xi16_perm_high_mask2(<16 x i16> %vec, <16 x i
}
define <16 x i16> @test_16xi16_perm_low_mask3(<16 x i16> %vec) {
; GENERIC-LABEL: test_16xi16_perm_low_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[3,2,3,2,4,5,6,7,11,10,11,10,12,13,14,15] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xi16_perm_low_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[3,2,3,2,4,5,6,7,11,10,11,10,12,13,14,15] sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> <i32 3, i32 2, i32 3, i32 2, i32 4, i32 5, i32 6, i32 7, i32 11, i32 10, i32 11, i32 10, i32 12, i32 13, i32 14, i32 15>
@@ -7016,7 +7016,7 @@ define <16 x i16> @test_16xi16_perm_low_mask3(<16 x i16> %vec) {
}
define <16 x i16> @test_masked_16xi16_perm_low_mask3(<16 x i16> %vec, <16 x i16> %vec2, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_16xi16_perm_low_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm3, %ymm2, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} ymm1 {%k1} = ymm0[3,2,3,2,4,5,6,7,11,10,11,10,12,13,14,15] sched: [1:1.00]
@@ -7024,7 +7024,7 @@ define <16 x i16> @test_masked_16xi16_perm_low_mask3(<16 x i16> %vec, <16 x i16>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xi16_perm_low_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} ymm1 {%k1} = ymm0[3,2,3,2,4,5,6,7,11,10,11,10,12,13,14,15] sched: [1:1.00]
@@ -7038,14 +7038,14 @@ define <16 x i16> @test_masked_16xi16_perm_low_mask3(<16 x i16> %vec, <16 x i16>
define <16 x i16> @test_masked_z_16xi16_perm_low_mask3(<16 x i16> %vec, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_16xi16_perm_low_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm2, %ymm1, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} {z} = ymm0[3,2,3,2,4,5,6,7,11,10,11,10,12,13,14,15] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xi16_perm_low_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} {z} = ymm0[3,2,3,2,4,5,6,7,11,10,11,10,12,13,14,15] sched: [1:1.00]
@@ -7057,7 +7057,7 @@ define <16 x i16> @test_masked_z_16xi16_perm_low_mask3(<16 x i16> %vec, <16 x i1
}
define <16 x i16> @test_masked_16xi16_perm_high_mask4(<16 x i16> %vec, <16 x i16> %vec2, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_16xi16_perm_high_mask4:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm3, %ymm2, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} ymm1 {%k1} = ymm0[0,1,2,3,6,7,4,7,8,9,10,11,14,15,12,15] sched: [1:1.00]
@@ -7065,7 +7065,7 @@ define <16 x i16> @test_masked_16xi16_perm_high_mask4(<16 x i16> %vec, <16 x i16
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xi16_perm_high_mask4:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} ymm1 {%k1} = ymm0[0,1,2,3,6,7,4,7,8,9,10,11,14,15,12,15] sched: [1:1.00]
@@ -7079,14 +7079,14 @@ define <16 x i16> @test_masked_16xi16_perm_high_mask4(<16 x i16> %vec, <16 x i16
define <16 x i16> @test_masked_z_16xi16_perm_high_mask4(<16 x i16> %vec, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_16xi16_perm_high_mask4:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm2, %ymm1, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} {z} = ymm0[0,1,2,3,6,7,4,7,8,9,10,11,14,15,12,15] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xi16_perm_high_mask4:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} {z} = ymm0[0,1,2,3,6,7,4,7,8,9,10,11,14,15,12,15] sched: [1:1.00]
@@ -7098,7 +7098,7 @@ define <16 x i16> @test_masked_z_16xi16_perm_high_mask4(<16 x i16> %vec, <16 x i
}
define <16 x i16> @test_masked_16xi16_perm_low_mask5(<16 x i16> %vec, <16 x i16> %vec2, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_16xi16_perm_low_mask5:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm3, %ymm2, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} ymm1 {%k1} = ymm0[3,3,3,0,4,5,6,7,11,11,11,8,12,13,14,15] sched: [1:1.00]
@@ -7106,7 +7106,7 @@ define <16 x i16> @test_masked_16xi16_perm_low_mask5(<16 x i16> %vec, <16 x i16>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xi16_perm_low_mask5:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} ymm1 {%k1} = ymm0[3,3,3,0,4,5,6,7,11,11,11,8,12,13,14,15] sched: [1:1.00]
@@ -7120,14 +7120,14 @@ define <16 x i16> @test_masked_16xi16_perm_low_mask5(<16 x i16> %vec, <16 x i16>
define <16 x i16> @test_masked_z_16xi16_perm_low_mask5(<16 x i16> %vec, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_16xi16_perm_low_mask5:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm2, %ymm1, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} {z} = ymm0[3,3,3,0,4,5,6,7,11,11,11,8,12,13,14,15] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xi16_perm_low_mask5:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} {z} = ymm0[3,3,3,0,4,5,6,7,11,11,11,8,12,13,14,15] sched: [1:1.00]
@@ -7139,12 +7139,12 @@ define <16 x i16> @test_masked_z_16xi16_perm_low_mask5(<16 x i16> %vec, <16 x i1
}
define <16 x i16> @test_16xi16_perm_high_mask6(<16 x i16> %vec) {
; GENERIC-LABEL: test_16xi16_perm_high_mask6:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,6,7,6,5,8,9,10,11,14,15,14,13] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xi16_perm_high_mask6:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,6,7,6,5,8,9,10,11,14,15,14,13] sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 6, i32 7, i32 6, i32 5, i32 8, i32 9, i32 10, i32 11, i32 14, i32 15, i32 14, i32 13>
@@ -7152,7 +7152,7 @@ define <16 x i16> @test_16xi16_perm_high_mask6(<16 x i16> %vec) {
}
define <16 x i16> @test_masked_16xi16_perm_high_mask6(<16 x i16> %vec, <16 x i16> %vec2, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_16xi16_perm_high_mask6:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm3, %ymm2, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} ymm1 {%k1} = ymm0[0,1,2,3,6,7,6,5,8,9,10,11,14,15,14,13] sched: [1:1.00]
@@ -7160,7 +7160,7 @@ define <16 x i16> @test_masked_16xi16_perm_high_mask6(<16 x i16> %vec, <16 x i16
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xi16_perm_high_mask6:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} ymm1 {%k1} = ymm0[0,1,2,3,6,7,6,5,8,9,10,11,14,15,14,13] sched: [1:1.00]
@@ -7174,14 +7174,14 @@ define <16 x i16> @test_masked_16xi16_perm_high_mask6(<16 x i16> %vec, <16 x i16
define <16 x i16> @test_masked_z_16xi16_perm_high_mask6(<16 x i16> %vec, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_16xi16_perm_high_mask6:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm2, %ymm1, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} {z} = ymm0[0,1,2,3,6,7,6,5,8,9,10,11,14,15,14,13] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xi16_perm_high_mask6:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} {z} = ymm0[0,1,2,3,6,7,6,5,8,9,10,11,14,15,14,13] sched: [1:1.00]
@@ -7193,7 +7193,7 @@ define <16 x i16> @test_masked_z_16xi16_perm_high_mask6(<16 x i16> %vec, <16 x i
}
define <16 x i16> @test_masked_16xi16_perm_low_mask7(<16 x i16> %vec, <16 x i16> %vec2, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_16xi16_perm_low_mask7:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm3, %ymm2, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} ymm1 {%k1} = ymm0[3,2,1,2,4,5,6,7,11,10,9,10,12,13,14,15] sched: [1:1.00]
@@ -7201,7 +7201,7 @@ define <16 x i16> @test_masked_16xi16_perm_low_mask7(<16 x i16> %vec, <16 x i16>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xi16_perm_low_mask7:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} ymm1 {%k1} = ymm0[3,2,1,2,4,5,6,7,11,10,9,10,12,13,14,15] sched: [1:1.00]
@@ -7215,14 +7215,14 @@ define <16 x i16> @test_masked_16xi16_perm_low_mask7(<16 x i16> %vec, <16 x i16>
define <16 x i16> @test_masked_z_16xi16_perm_low_mask7(<16 x i16> %vec, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_16xi16_perm_low_mask7:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm2, %ymm1, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} {z} = ymm0[3,2,1,2,4,5,6,7,11,10,9,10,12,13,14,15] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xi16_perm_low_mask7:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} {z} = ymm0[3,2,1,2,4,5,6,7,11,10,9,10,12,13,14,15] sched: [1:1.00]
@@ -7234,12 +7234,12 @@ define <16 x i16> @test_masked_z_16xi16_perm_low_mask7(<16 x i16> %vec, <16 x i1
}
define <16 x i16> @test_16xi16_perm_high_mem_mask0(<16 x i16>* %vp) {
; GENERIC-LABEL: test_16xi16_perm_high_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpshufhw {{.*#+}} ymm0 = mem[0,1,2,3,5,6,4,7,8,9,10,11,13,14,12,15] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xi16_perm_high_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpshufhw {{.*#+}} ymm0 = mem[0,1,2,3,5,6,4,7,8,9,10,11,13,14,12,15] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec = load <16 x i16>, <16 x i16>* %vp
@@ -7248,14 +7248,14 @@ define <16 x i16> @test_16xi16_perm_high_mem_mask0(<16 x i16>* %vp) {
}
define <16 x i16> @test_masked_16xi16_perm_high_mem_mask0(<16 x i16>* %vp, <16 x i16> %vec2, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_16xi16_perm_high_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm2, %ymm1, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} = mem[0,1,2,3,5,6,4,7,8,9,10,11,13,14,12,15] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xi16_perm_high_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} = mem[0,1,2,3,5,6,4,7,8,9,10,11,13,14,12,15] sched: [6:1.00]
@@ -7269,14 +7269,14 @@ define <16 x i16> @test_masked_16xi16_perm_high_mem_mask0(<16 x i16>* %vp, <16 x
define <16 x i16> @test_masked_z_16xi16_perm_high_mem_mask0(<16 x i16>* %vp, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_16xi16_perm_high_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm1, %ymm0, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} {z} = mem[0,1,2,3,5,6,4,7,8,9,10,11,13,14,12,15] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xi16_perm_high_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm1, %ymm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} {z} = mem[0,1,2,3,5,6,4,7,8,9,10,11,13,14,12,15] sched: [6:1.00]
@@ -7290,14 +7290,14 @@ define <16 x i16> @test_masked_z_16xi16_perm_high_mem_mask0(<16 x i16>* %vp, <16
define <16 x i16> @test_masked_16xi16_perm_low_mem_mask1(<16 x i16>* %vp, <16 x i16> %vec2, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_16xi16_perm_low_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm2, %ymm1, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} = mem[1,3,3,0,4,5,6,7,9,11,11,8,12,13,14,15] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xi16_perm_low_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} = mem[1,3,3,0,4,5,6,7,9,11,11,8,12,13,14,15] sched: [6:1.00]
@@ -7311,14 +7311,14 @@ define <16 x i16> @test_masked_16xi16_perm_low_mem_mask1(<16 x i16>* %vp, <16 x
define <16 x i16> @test_masked_z_16xi16_perm_low_mem_mask1(<16 x i16>* %vp, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_16xi16_perm_low_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm1, %ymm0, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} {z} = mem[1,3,3,0,4,5,6,7,9,11,11,8,12,13,14,15] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xi16_perm_low_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm1, %ymm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} {z} = mem[1,3,3,0,4,5,6,7,9,11,11,8,12,13,14,15] sched: [6:1.00]
@@ -7332,14 +7332,14 @@ define <16 x i16> @test_masked_z_16xi16_perm_low_mem_mask1(<16 x i16>* %vp, <16
define <16 x i16> @test_masked_16xi16_perm_high_mem_mask2(<16 x i16>* %vp, <16 x i16> %vec2, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_16xi16_perm_high_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm2, %ymm1, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} = mem[0,1,2,3,5,6,5,6,8,9,10,11,13,14,13,14] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xi16_perm_high_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} = mem[0,1,2,3,5,6,5,6,8,9,10,11,13,14,13,14] sched: [6:1.00]
@@ -7353,14 +7353,14 @@ define <16 x i16> @test_masked_16xi16_perm_high_mem_mask2(<16 x i16>* %vp, <16 x
define <16 x i16> @test_masked_z_16xi16_perm_high_mem_mask2(<16 x i16>* %vp, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_16xi16_perm_high_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm1, %ymm0, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} {z} = mem[0,1,2,3,5,6,5,6,8,9,10,11,13,14,13,14] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xi16_perm_high_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm1, %ymm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} {z} = mem[0,1,2,3,5,6,5,6,8,9,10,11,13,14,13,14] sched: [6:1.00]
@@ -7374,12 +7374,12 @@ define <16 x i16> @test_masked_z_16xi16_perm_high_mem_mask2(<16 x i16>* %vp, <16
define <16 x i16> @test_16xi16_perm_low_mem_mask3(<16 x i16>* %vp) {
; GENERIC-LABEL: test_16xi16_perm_low_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpshuflw {{.*#+}} ymm0 = mem[3,2,3,0,4,5,6,7,11,10,11,8,12,13,14,15] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xi16_perm_low_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpshuflw {{.*#+}} ymm0 = mem[3,2,3,0,4,5,6,7,11,10,11,8,12,13,14,15] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec = load <16 x i16>, <16 x i16>* %vp
@@ -7388,14 +7388,14 @@ define <16 x i16> @test_16xi16_perm_low_mem_mask3(<16 x i16>* %vp) {
}
define <16 x i16> @test_masked_16xi16_perm_low_mem_mask3(<16 x i16>* %vp, <16 x i16> %vec2, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_16xi16_perm_low_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm2, %ymm1, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} = mem[3,2,3,0,4,5,6,7,11,10,11,8,12,13,14,15] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xi16_perm_low_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} = mem[3,2,3,0,4,5,6,7,11,10,11,8,12,13,14,15] sched: [6:1.00]
@@ -7409,14 +7409,14 @@ define <16 x i16> @test_masked_16xi16_perm_low_mem_mask3(<16 x i16>* %vp, <16 x
define <16 x i16> @test_masked_z_16xi16_perm_low_mem_mask3(<16 x i16>* %vp, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_16xi16_perm_low_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm1, %ymm0, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} {z} = mem[3,2,3,0,4,5,6,7,11,10,11,8,12,13,14,15] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xi16_perm_low_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm1, %ymm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} {z} = mem[3,2,3,0,4,5,6,7,11,10,11,8,12,13,14,15] sched: [6:1.00]
@@ -7430,14 +7430,14 @@ define <16 x i16> @test_masked_z_16xi16_perm_low_mem_mask3(<16 x i16>* %vp, <16
define <16 x i16> @test_masked_16xi16_perm_high_mem_mask4(<16 x i16>* %vp, <16 x i16> %vec2, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_16xi16_perm_high_mem_mask4:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm2, %ymm1, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} = mem[0,1,2,3,7,7,6,7,8,9,10,11,15,15,14,15] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xi16_perm_high_mem_mask4:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} = mem[0,1,2,3,7,7,6,7,8,9,10,11,15,15,14,15] sched: [6:1.00]
@@ -7451,14 +7451,14 @@ define <16 x i16> @test_masked_16xi16_perm_high_mem_mask4(<16 x i16>* %vp, <16 x
define <16 x i16> @test_masked_z_16xi16_perm_high_mem_mask4(<16 x i16>* %vp, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_16xi16_perm_high_mem_mask4:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm1, %ymm0, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} {z} = mem[0,1,2,3,7,7,6,7,8,9,10,11,15,15,14,15] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xi16_perm_high_mem_mask4:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm1, %ymm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} {z} = mem[0,1,2,3,7,7,6,7,8,9,10,11,15,15,14,15] sched: [6:1.00]
@@ -7472,14 +7472,14 @@ define <16 x i16> @test_masked_z_16xi16_perm_high_mem_mask4(<16 x i16>* %vp, <16
define <16 x i16> @test_masked_16xi16_perm_low_mem_mask5(<16 x i16>* %vp, <16 x i16> %vec2, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_16xi16_perm_low_mem_mask5:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm2, %ymm1, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} = mem[1,3,3,2,4,5,6,7,9,11,11,10,12,13,14,15] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xi16_perm_low_mem_mask5:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} = mem[1,3,3,2,4,5,6,7,9,11,11,10,12,13,14,15] sched: [6:1.00]
@@ -7493,14 +7493,14 @@ define <16 x i16> @test_masked_16xi16_perm_low_mem_mask5(<16 x i16>* %vp, <16 x
define <16 x i16> @test_masked_z_16xi16_perm_low_mem_mask5(<16 x i16>* %vp, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_16xi16_perm_low_mem_mask5:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm1, %ymm0, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} {z} = mem[1,3,3,2,4,5,6,7,9,11,11,10,12,13,14,15] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xi16_perm_low_mem_mask5:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm1, %ymm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} {z} = mem[1,3,3,2,4,5,6,7,9,11,11,10,12,13,14,15] sched: [6:1.00]
@@ -7514,12 +7514,12 @@ define <16 x i16> @test_masked_z_16xi16_perm_low_mem_mask5(<16 x i16>* %vp, <16
define <16 x i16> @test_16xi16_perm_high_mem_mask6(<16 x i16>* %vp) {
; GENERIC-LABEL: test_16xi16_perm_high_mem_mask6:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpshufhw {{.*#+}} ymm0 = mem[0,1,2,3,4,4,4,5,8,9,10,11,12,12,12,13] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xi16_perm_high_mem_mask6:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpshufhw {{.*#+}} ymm0 = mem[0,1,2,3,4,4,4,5,8,9,10,11,12,12,12,13] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec = load <16 x i16>, <16 x i16>* %vp
@@ -7528,14 +7528,14 @@ define <16 x i16> @test_16xi16_perm_high_mem_mask6(<16 x i16>* %vp) {
}
define <16 x i16> @test_masked_16xi16_perm_high_mem_mask6(<16 x i16>* %vp, <16 x i16> %vec2, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_16xi16_perm_high_mem_mask6:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm2, %ymm1, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} = mem[0,1,2,3,4,4,4,5,8,9,10,11,12,12,12,13] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xi16_perm_high_mem_mask6:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} = mem[0,1,2,3,4,4,4,5,8,9,10,11,12,12,12,13] sched: [6:1.00]
@@ -7549,14 +7549,14 @@ define <16 x i16> @test_masked_16xi16_perm_high_mem_mask6(<16 x i16>* %vp, <16 x
define <16 x i16> @test_masked_z_16xi16_perm_high_mem_mask6(<16 x i16>* %vp, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_16xi16_perm_high_mem_mask6:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm1, %ymm0, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} {z} = mem[0,1,2,3,4,4,4,5,8,9,10,11,12,12,12,13] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xi16_perm_high_mem_mask6:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm1, %ymm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} {z} = mem[0,1,2,3,4,4,4,5,8,9,10,11,12,12,12,13] sched: [6:1.00]
@@ -7570,14 +7570,14 @@ define <16 x i16> @test_masked_z_16xi16_perm_high_mem_mask6(<16 x i16>* %vp, <16
define <16 x i16> @test_masked_16xi16_perm_low_mem_mask7(<16 x i16>* %vp, <16 x i16> %vec2, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_16xi16_perm_low_mem_mask7:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm2, %ymm1, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} = mem[3,1,3,2,4,5,6,7,11,9,11,10,12,13,14,15] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_16xi16_perm_low_mem_mask7:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} = mem[3,1,3,2,4,5,6,7,11,9,11,10,12,13,14,15] sched: [6:1.00]
@@ -7591,14 +7591,14 @@ define <16 x i16> @test_masked_16xi16_perm_low_mem_mask7(<16 x i16>* %vp, <16 x
define <16 x i16> @test_masked_z_16xi16_perm_low_mem_mask7(<16 x i16>* %vp, <16 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_16xi16_perm_low_mem_mask7:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %ymm1, %ymm0, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} {z} = mem[3,1,3,2,4,5,6,7,11,9,11,10,12,13,14,15] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_16xi16_perm_low_mem_mask7:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %ymm1, %ymm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} {z} = mem[3,1,3,2,4,5,6,7,11,9,11,10,12,13,14,15] sched: [6:1.00]
@@ -7612,12 +7612,12 @@ define <16 x i16> @test_masked_z_16xi16_perm_low_mem_mask7(<16 x i16>* %vp, <16
define <32 x i16> @test_32xi16_perm_high_mask0(<32 x i16> %vec) {
; GENERIC-LABEL: test_32xi16_perm_high_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpshufhw {{.*#+}} zmm0 = zmm0[0,1,2,3,4,5,6,4,8,9,10,11,12,13,14,12,16,17,18,19,20,21,22,20,24,25,26,27,28,29,30,28] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_32xi16_perm_high_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpshufhw {{.*#+}} zmm0 = zmm0[0,1,2,3,4,5,6,4,8,9,10,11,12,13,14,12,16,17,18,19,20,21,22,20,24,25,26,27,28,29,30,28] sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 4, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 12, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 20, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 28>
@@ -7625,7 +7625,7 @@ define <32 x i16> @test_32xi16_perm_high_mask0(<32 x i16> %vec) {
}
define <32 x i16> @test_masked_32xi16_perm_high_mask0(<32 x i16> %vec, <32 x i16> %vec2, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_32xi16_perm_high_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm3, %zmm2, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} zmm1 {%k1} = zmm0[0,1,2,3,4,5,6,4,8,9,10,11,12,13,14,12,16,17,18,19,20,21,22,20,24,25,26,27,28,29,30,28] sched: [1:1.00]
@@ -7633,7 +7633,7 @@ define <32 x i16> @test_masked_32xi16_perm_high_mask0(<32 x i16> %vec, <32 x i16
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_32xi16_perm_high_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} zmm1 {%k1} = zmm0[0,1,2,3,4,5,6,4,8,9,10,11,12,13,14,12,16,17,18,19,20,21,22,20,24,25,26,27,28,29,30,28] sched: [1:1.00]
@@ -7647,14 +7647,14 @@ define <32 x i16> @test_masked_32xi16_perm_high_mask0(<32 x i16> %vec, <32 x i16
define <32 x i16> @test_masked_z_32xi16_perm_high_mask0(<32 x i16> %vec, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_32xi16_perm_high_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3,4,5,6,4,8,9,10,11,12,13,14,12,16,17,18,19,20,21,22,20,24,25,26,27,28,29,30,28] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_32xi16_perm_high_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3,4,5,6,4,8,9,10,11,12,13,14,12,16,17,18,19,20,21,22,20,24,25,26,27,28,29,30,28] sched: [1:1.00]
@@ -7666,7 +7666,7 @@ define <32 x i16> @test_masked_z_32xi16_perm_high_mask0(<32 x i16> %vec, <32 x i
}
define <32 x i16> @test_masked_32xi16_perm_low_mask1(<32 x i16> %vec, <32 x i16> %vec2, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_32xi16_perm_low_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm3, %zmm2, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} zmm1 {%k1} = zmm0[2,1,0,0,4,5,6,7,10,9,8,8,12,13,14,15,18,17,16,16,20,21,22,23,26,25,24,24,28,29,30,31] sched: [1:1.00]
@@ -7674,7 +7674,7 @@ define <32 x i16> @test_masked_32xi16_perm_low_mask1(<32 x i16> %vec, <32 x i16>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_32xi16_perm_low_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} zmm1 {%k1} = zmm0[2,1,0,0,4,5,6,7,10,9,8,8,12,13,14,15,18,17,16,16,20,21,22,23,26,25,24,24,28,29,30,31] sched: [1:1.00]
@@ -7688,14 +7688,14 @@ define <32 x i16> @test_masked_32xi16_perm_low_mask1(<32 x i16> %vec, <32 x i16>
define <32 x i16> @test_masked_z_32xi16_perm_low_mask1(<32 x i16> %vec, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_32xi16_perm_low_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} zmm0 {%k1} {z} = zmm0[2,1,0,0,4,5,6,7,10,9,8,8,12,13,14,15,18,17,16,16,20,21,22,23,26,25,24,24,28,29,30,31] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_32xi16_perm_low_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} zmm0 {%k1} {z} = zmm0[2,1,0,0,4,5,6,7,10,9,8,8,12,13,14,15,18,17,16,16,20,21,22,23,26,25,24,24,28,29,30,31] sched: [1:1.00]
@@ -7707,7 +7707,7 @@ define <32 x i16> @test_masked_z_32xi16_perm_low_mask1(<32 x i16> %vec, <32 x i1
}
define <32 x i16> @test_masked_32xi16_perm_high_mask2(<32 x i16> %vec, <32 x i16> %vec2, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_32xi16_perm_high_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm3, %zmm2, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} zmm1 {%k1} = zmm0[0,1,2,3,4,6,4,7,8,9,10,11,12,14,12,15,16,17,18,19,20,22,20,23,24,25,26,27,28,30,28,31] sched: [1:1.00]
@@ -7715,7 +7715,7 @@ define <32 x i16> @test_masked_32xi16_perm_high_mask2(<32 x i16> %vec, <32 x i16
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_32xi16_perm_high_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} zmm1 {%k1} = zmm0[0,1,2,3,4,6,4,7,8,9,10,11,12,14,12,15,16,17,18,19,20,22,20,23,24,25,26,27,28,30,28,31] sched: [1:1.00]
@@ -7729,14 +7729,14 @@ define <32 x i16> @test_masked_32xi16_perm_high_mask2(<32 x i16> %vec, <32 x i16
define <32 x i16> @test_masked_z_32xi16_perm_high_mask2(<32 x i16> %vec, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_32xi16_perm_high_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3,4,6,4,7,8,9,10,11,12,14,12,15,16,17,18,19,20,22,20,23,24,25,26,27,28,30,28,31] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_32xi16_perm_high_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3,4,6,4,7,8,9,10,11,12,14,12,15,16,17,18,19,20,22,20,23,24,25,26,27,28,30,28,31] sched: [1:1.00]
@@ -7748,12 +7748,12 @@ define <32 x i16> @test_masked_z_32xi16_perm_high_mask2(<32 x i16> %vec, <32 x i
}
define <32 x i16> @test_32xi16_perm_low_mask3(<32 x i16> %vec) {
; GENERIC-LABEL: test_32xi16_perm_low_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpshuflw {{.*#+}} zmm0 = zmm0[3,3,1,3,4,5,6,7,11,11,9,11,12,13,14,15,19,19,17,19,20,21,22,23,27,27,25,27,28,29,30,31] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_32xi16_perm_low_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpshuflw {{.*#+}} zmm0 = zmm0[3,3,1,3,4,5,6,7,11,11,9,11,12,13,14,15,19,19,17,19,20,21,22,23,27,27,25,27,28,29,30,31] sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> <i32 3, i32 3, i32 1, i32 3, i32 4, i32 5, i32 6, i32 7, i32 11, i32 11, i32 9, i32 11, i32 12, i32 13, i32 14, i32 15, i32 19, i32 19, i32 17, i32 19, i32 20, i32 21, i32 22, i32 23, i32 27, i32 27, i32 25, i32 27, i32 28, i32 29, i32 30, i32 31>
@@ -7761,7 +7761,7 @@ define <32 x i16> @test_32xi16_perm_low_mask3(<32 x i16> %vec) {
}
define <32 x i16> @test_masked_32xi16_perm_low_mask3(<32 x i16> %vec, <32 x i16> %vec2, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_32xi16_perm_low_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm3, %zmm2, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} zmm1 {%k1} = zmm0[3,3,1,3,4,5,6,7,11,11,9,11,12,13,14,15,19,19,17,19,20,21,22,23,27,27,25,27,28,29,30,31] sched: [1:1.00]
@@ -7769,7 +7769,7 @@ define <32 x i16> @test_masked_32xi16_perm_low_mask3(<32 x i16> %vec, <32 x i16>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_32xi16_perm_low_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} zmm1 {%k1} = zmm0[3,3,1,3,4,5,6,7,11,11,9,11,12,13,14,15,19,19,17,19,20,21,22,23,27,27,25,27,28,29,30,31] sched: [1:1.00]
@@ -7783,14 +7783,14 @@ define <32 x i16> @test_masked_32xi16_perm_low_mask3(<32 x i16> %vec, <32 x i16>
define <32 x i16> @test_masked_z_32xi16_perm_low_mask3(<32 x i16> %vec, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_32xi16_perm_low_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} zmm0 {%k1} {z} = zmm0[3,3,1,3,4,5,6,7,11,11,9,11,12,13,14,15,19,19,17,19,20,21,22,23,27,27,25,27,28,29,30,31] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_32xi16_perm_low_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} zmm0 {%k1} {z} = zmm0[3,3,1,3,4,5,6,7,11,11,9,11,12,13,14,15,19,19,17,19,20,21,22,23,27,27,25,27,28,29,30,31] sched: [1:1.00]
@@ -7802,7 +7802,7 @@ define <32 x i16> @test_masked_z_32xi16_perm_low_mask3(<32 x i16> %vec, <32 x i1
}
define <32 x i16> @test_masked_32xi16_perm_high_mask4(<32 x i16> %vec, <32 x i16> %vec2, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_32xi16_perm_high_mask4:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm3, %zmm2, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} zmm1 {%k1} = zmm0[0,1,2,3,7,7,5,6,8,9,10,11,15,15,13,14,16,17,18,19,23,23,21,22,24,25,26,27,31,31,29,30] sched: [1:1.00]
@@ -7810,7 +7810,7 @@ define <32 x i16> @test_masked_32xi16_perm_high_mask4(<32 x i16> %vec, <32 x i16
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_32xi16_perm_high_mask4:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} zmm1 {%k1} = zmm0[0,1,2,3,7,7,5,6,8,9,10,11,15,15,13,14,16,17,18,19,23,23,21,22,24,25,26,27,31,31,29,30] sched: [1:1.00]
@@ -7824,14 +7824,14 @@ define <32 x i16> @test_masked_32xi16_perm_high_mask4(<32 x i16> %vec, <32 x i16
define <32 x i16> @test_masked_z_32xi16_perm_high_mask4(<32 x i16> %vec, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_32xi16_perm_high_mask4:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3,7,7,5,6,8,9,10,11,15,15,13,14,16,17,18,19,23,23,21,22,24,25,26,27,31,31,29,30] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_32xi16_perm_high_mask4:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3,7,7,5,6,8,9,10,11,15,15,13,14,16,17,18,19,23,23,21,22,24,25,26,27,31,31,29,30] sched: [1:1.00]
@@ -7843,7 +7843,7 @@ define <32 x i16> @test_masked_z_32xi16_perm_high_mask4(<32 x i16> %vec, <32 x i
}
define <32 x i16> @test_masked_32xi16_perm_low_mask5(<32 x i16> %vec, <32 x i16> %vec2, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_32xi16_perm_low_mask5:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm3, %zmm2, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} zmm1 {%k1} = zmm0[2,1,1,0,4,5,6,7,10,9,9,8,12,13,14,15,18,17,17,16,20,21,22,23,26,25,25,24,28,29,30,31] sched: [1:1.00]
@@ -7851,7 +7851,7 @@ define <32 x i16> @test_masked_32xi16_perm_low_mask5(<32 x i16> %vec, <32 x i16>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_32xi16_perm_low_mask5:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} zmm1 {%k1} = zmm0[2,1,1,0,4,5,6,7,10,9,9,8,12,13,14,15,18,17,17,16,20,21,22,23,26,25,25,24,28,29,30,31] sched: [1:1.00]
@@ -7865,14 +7865,14 @@ define <32 x i16> @test_masked_32xi16_perm_low_mask5(<32 x i16> %vec, <32 x i16>
define <32 x i16> @test_masked_z_32xi16_perm_low_mask5(<32 x i16> %vec, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_32xi16_perm_low_mask5:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} zmm0 {%k1} {z} = zmm0[2,1,1,0,4,5,6,7,10,9,9,8,12,13,14,15,18,17,17,16,20,21,22,23,26,25,25,24,28,29,30,31] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_32xi16_perm_low_mask5:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} zmm0 {%k1} {z} = zmm0[2,1,1,0,4,5,6,7,10,9,9,8,12,13,14,15,18,17,17,16,20,21,22,23,26,25,25,24,28,29,30,31] sched: [1:1.00]
@@ -7884,12 +7884,12 @@ define <32 x i16> @test_masked_z_32xi16_perm_low_mask5(<32 x i16> %vec, <32 x i1
}
define <32 x i16> @test_32xi16_perm_high_mask6(<32 x i16> %vec) {
; GENERIC-LABEL: test_32xi16_perm_high_mask6:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpshufhw {{.*#+}} zmm0 = zmm0[0,1,2,3,4,4,5,6,8,9,10,11,12,12,13,14,16,17,18,19,20,20,21,22,24,25,26,27,28,28,29,30] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_32xi16_perm_high_mask6:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpshufhw {{.*#+}} zmm0 = zmm0[0,1,2,3,4,4,5,6,8,9,10,11,12,12,13,14,16,17,18,19,20,20,21,22,24,25,26,27,28,28,29,30] sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 4, i32 5, i32 6, i32 8, i32 9, i32 10, i32 11, i32 12, i32 12, i32 13, i32 14, i32 16, i32 17, i32 18, i32 19, i32 20, i32 20, i32 21, i32 22, i32 24, i32 25, i32 26, i32 27, i32 28, i32 28, i32 29, i32 30>
@@ -7897,7 +7897,7 @@ define <32 x i16> @test_32xi16_perm_high_mask6(<32 x i16> %vec) {
}
define <32 x i16> @test_masked_32xi16_perm_high_mask6(<32 x i16> %vec, <32 x i16> %vec2, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_32xi16_perm_high_mask6:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm3, %zmm2, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} zmm1 {%k1} = zmm0[0,1,2,3,4,4,5,6,8,9,10,11,12,12,13,14,16,17,18,19,20,20,21,22,24,25,26,27,28,28,29,30] sched: [1:1.00]
@@ -7905,7 +7905,7 @@ define <32 x i16> @test_masked_32xi16_perm_high_mask6(<32 x i16> %vec, <32 x i16
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_32xi16_perm_high_mask6:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} zmm1 {%k1} = zmm0[0,1,2,3,4,4,5,6,8,9,10,11,12,12,13,14,16,17,18,19,20,20,21,22,24,25,26,27,28,28,29,30] sched: [1:1.00]
@@ -7919,14 +7919,14 @@ define <32 x i16> @test_masked_32xi16_perm_high_mask6(<32 x i16> %vec, <32 x i16
define <32 x i16> @test_masked_z_32xi16_perm_high_mask6(<32 x i16> %vec, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_32xi16_perm_high_mask6:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3,4,4,5,6,8,9,10,11,12,12,13,14,16,17,18,19,20,20,21,22,24,25,26,27,28,28,29,30] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_32xi16_perm_high_mask6:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3,4,4,5,6,8,9,10,11,12,12,13,14,16,17,18,19,20,20,21,22,24,25,26,27,28,28,29,30] sched: [1:1.00]
@@ -7938,7 +7938,7 @@ define <32 x i16> @test_masked_z_32xi16_perm_high_mask6(<32 x i16> %vec, <32 x i
}
define <32 x i16> @test_masked_32xi16_perm_low_mask7(<32 x i16> %vec, <32 x i16> %vec2, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_32xi16_perm_low_mask7:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm3, %zmm2, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} zmm1 {%k1} = zmm0[3,0,3,0,4,5,6,7,11,8,11,8,12,13,14,15,19,16,19,16,20,21,22,23,27,24,27,24,28,29,30,31] sched: [1:1.00]
@@ -7946,7 +7946,7 @@ define <32 x i16> @test_masked_32xi16_perm_low_mask7(<32 x i16> %vec, <32 x i16>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_32xi16_perm_low_mask7:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} zmm1 {%k1} = zmm0[3,0,3,0,4,5,6,7,11,8,11,8,12,13,14,15,19,16,19,16,20,21,22,23,27,24,27,24,28,29,30,31] sched: [1:1.00]
@@ -7960,14 +7960,14 @@ define <32 x i16> @test_masked_32xi16_perm_low_mask7(<32 x i16> %vec, <32 x i16>
define <32 x i16> @test_masked_z_32xi16_perm_low_mask7(<32 x i16> %vec, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_32xi16_perm_low_mask7:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} zmm0 {%k1} {z} = zmm0[3,0,3,0,4,5,6,7,11,8,11,8,12,13,14,15,19,16,19,16,20,21,22,23,27,24,27,24,28,29,30,31] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_32xi16_perm_low_mask7:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} zmm0 {%k1} {z} = zmm0[3,0,3,0,4,5,6,7,11,8,11,8,12,13,14,15,19,16,19,16,20,21,22,23,27,24,27,24,28,29,30,31] sched: [1:1.00]
@@ -7979,12 +7979,12 @@ define <32 x i16> @test_masked_z_32xi16_perm_low_mask7(<32 x i16> %vec, <32 x i1
}
define <32 x i16> @test_32xi16_perm_high_mem_mask0(<32 x i16>* %vp) {
; GENERIC-LABEL: test_32xi16_perm_high_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpshufhw {{.*#+}} zmm0 = mem[0,1,2,3,7,4,5,6,8,9,10,11,15,12,13,14,16,17,18,19,23,20,21,22,24,25,26,27,31,28,29,30] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_32xi16_perm_high_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpshufhw {{.*#+}} zmm0 = mem[0,1,2,3,7,4,5,6,8,9,10,11,15,12,13,14,16,17,18,19,23,20,21,22,24,25,26,27,31,28,29,30] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec = load <32 x i16>, <32 x i16>* %vp
@@ -7993,14 +7993,14 @@ define <32 x i16> @test_32xi16_perm_high_mem_mask0(<32 x i16>* %vp) {
}
define <32 x i16> @test_masked_32xi16_perm_high_mem_mask0(<32 x i16>* %vp, <32 x i16> %vec2, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_32xi16_perm_high_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,7,4,5,6,8,9,10,11,15,12,13,14,16,17,18,19,23,20,21,22,24,25,26,27,31,28,29,30] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_32xi16_perm_high_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,7,4,5,6,8,9,10,11,15,12,13,14,16,17,18,19,23,20,21,22,24,25,26,27,31,28,29,30] sched: [8:1.00]
@@ -8014,14 +8014,14 @@ define <32 x i16> @test_masked_32xi16_perm_high_mem_mask0(<32 x i16>* %vp, <32 x
define <32 x i16> @test_masked_z_32xi16_perm_high_mem_mask0(<32 x i16>* %vp, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_32xi16_perm_high_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm1, %zmm0, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,7,4,5,6,8,9,10,11,15,12,13,14,16,17,18,19,23,20,21,22,24,25,26,27,31,28,29,30] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_32xi16_perm_high_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm1, %zmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,7,4,5,6,8,9,10,11,15,12,13,14,16,17,18,19,23,20,21,22,24,25,26,27,31,28,29,30] sched: [8:1.00]
@@ -8035,14 +8035,14 @@ define <32 x i16> @test_masked_z_32xi16_perm_high_mem_mask0(<32 x i16>* %vp, <32
define <32 x i16> @test_masked_32xi16_perm_low_mem_mask1(<32 x i16>* %vp, <32 x i16> %vec2, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_32xi16_perm_low_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} zmm0 {%k1} = mem[1,1,3,3,4,5,6,7,9,9,11,11,12,13,14,15,17,17,19,19,20,21,22,23,25,25,27,27,28,29,30,31] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_32xi16_perm_low_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} zmm0 {%k1} = mem[1,1,3,3,4,5,6,7,9,9,11,11,12,13,14,15,17,17,19,19,20,21,22,23,25,25,27,27,28,29,30,31] sched: [8:1.00]
@@ -8056,14 +8056,14 @@ define <32 x i16> @test_masked_32xi16_perm_low_mem_mask1(<32 x i16>* %vp, <32 x
define <32 x i16> @test_masked_z_32xi16_perm_low_mem_mask1(<32 x i16>* %vp, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_32xi16_perm_low_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm1, %zmm0, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} zmm0 {%k1} {z} = mem[1,1,3,3,4,5,6,7,9,9,11,11,12,13,14,15,17,17,19,19,20,21,22,23,25,25,27,27,28,29,30,31] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_32xi16_perm_low_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm1, %zmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} zmm0 {%k1} {z} = mem[1,1,3,3,4,5,6,7,9,9,11,11,12,13,14,15,17,17,19,19,20,21,22,23,25,25,27,27,28,29,30,31] sched: [8:1.00]
@@ -8077,14 +8077,14 @@ define <32 x i16> @test_masked_z_32xi16_perm_low_mem_mask1(<32 x i16>* %vp, <32
define <32 x i16> @test_masked_32xi16_perm_high_mem_mask2(<32 x i16>* %vp, <32 x i16> %vec2, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_32xi16_perm_high_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,4,7,6,4,8,9,10,11,12,15,14,12,16,17,18,19,20,23,22,20,24,25,26,27,28,31,30,28] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_32xi16_perm_high_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,4,7,6,4,8,9,10,11,12,15,14,12,16,17,18,19,20,23,22,20,24,25,26,27,28,31,30,28] sched: [8:1.00]
@@ -8098,14 +8098,14 @@ define <32 x i16> @test_masked_32xi16_perm_high_mem_mask2(<32 x i16>* %vp, <32 x
define <32 x i16> @test_masked_z_32xi16_perm_high_mem_mask2(<32 x i16>* %vp, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_32xi16_perm_high_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm1, %zmm0, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,4,7,6,4,8,9,10,11,12,15,14,12,16,17,18,19,20,23,22,20,24,25,26,27,28,31,30,28] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_32xi16_perm_high_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm1, %zmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,4,7,6,4,8,9,10,11,12,15,14,12,16,17,18,19,20,23,22,20,24,25,26,27,28,31,30,28] sched: [8:1.00]
@@ -8119,12 +8119,12 @@ define <32 x i16> @test_masked_z_32xi16_perm_high_mem_mask2(<32 x i16>* %vp, <32
define <32 x i16> @test_32xi16_perm_low_mem_mask3(<32 x i16>* %vp) {
; GENERIC-LABEL: test_32xi16_perm_low_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpshuflw {{.*#+}} zmm0 = mem[2,2,0,3,4,5,6,7,10,10,8,11,12,13,14,15,18,18,16,19,20,21,22,23,26,26,24,27,28,29,30,31] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_32xi16_perm_low_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpshuflw {{.*#+}} zmm0 = mem[2,2,0,3,4,5,6,7,10,10,8,11,12,13,14,15,18,18,16,19,20,21,22,23,26,26,24,27,28,29,30,31] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec = load <32 x i16>, <32 x i16>* %vp
@@ -8133,14 +8133,14 @@ define <32 x i16> @test_32xi16_perm_low_mem_mask3(<32 x i16>* %vp) {
}
define <32 x i16> @test_masked_32xi16_perm_low_mem_mask3(<32 x i16>* %vp, <32 x i16> %vec2, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_32xi16_perm_low_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} zmm0 {%k1} = mem[2,2,0,3,4,5,6,7,10,10,8,11,12,13,14,15,18,18,16,19,20,21,22,23,26,26,24,27,28,29,30,31] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_32xi16_perm_low_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} zmm0 {%k1} = mem[2,2,0,3,4,5,6,7,10,10,8,11,12,13,14,15,18,18,16,19,20,21,22,23,26,26,24,27,28,29,30,31] sched: [8:1.00]
@@ -8154,14 +8154,14 @@ define <32 x i16> @test_masked_32xi16_perm_low_mem_mask3(<32 x i16>* %vp, <32 x
define <32 x i16> @test_masked_z_32xi16_perm_low_mem_mask3(<32 x i16>* %vp, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_32xi16_perm_low_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm1, %zmm0, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} zmm0 {%k1} {z} = mem[2,2,0,3,4,5,6,7,10,10,8,11,12,13,14,15,18,18,16,19,20,21,22,23,26,26,24,27,28,29,30,31] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_32xi16_perm_low_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm1, %zmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} zmm0 {%k1} {z} = mem[2,2,0,3,4,5,6,7,10,10,8,11,12,13,14,15,18,18,16,19,20,21,22,23,26,26,24,27,28,29,30,31] sched: [8:1.00]
@@ -8175,14 +8175,14 @@ define <32 x i16> @test_masked_z_32xi16_perm_low_mem_mask3(<32 x i16>* %vp, <32
define <32 x i16> @test_masked_32xi16_perm_high_mem_mask4(<32 x i16>* %vp, <32 x i16> %vec2, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_32xi16_perm_high_mem_mask4:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,7,4,6,5,8,9,10,11,15,12,14,13,16,17,18,19,23,20,22,21,24,25,26,27,31,28,30,29] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_32xi16_perm_high_mem_mask4:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,7,4,6,5,8,9,10,11,15,12,14,13,16,17,18,19,23,20,22,21,24,25,26,27,31,28,30,29] sched: [8:1.00]
@@ -8196,14 +8196,14 @@ define <32 x i16> @test_masked_32xi16_perm_high_mem_mask4(<32 x i16>* %vp, <32 x
define <32 x i16> @test_masked_z_32xi16_perm_high_mem_mask4(<32 x i16>* %vp, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_32xi16_perm_high_mem_mask4:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm1, %zmm0, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,7,4,6,5,8,9,10,11,15,12,14,13,16,17,18,19,23,20,22,21,24,25,26,27,31,28,30,29] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_32xi16_perm_high_mem_mask4:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm1, %zmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,7,4,6,5,8,9,10,11,15,12,14,13,16,17,18,19,23,20,22,21,24,25,26,27,31,28,30,29] sched: [8:1.00]
@@ -8217,7 +8217,7 @@ define <32 x i16> @test_masked_z_32xi16_perm_high_mem_mask4(<32 x i16>* %vp, <32
define <32 x i16> @test_masked_32xi16_perm_low_mem_mask5(<32 x i16>* %vp, <32 x i16> %vec2, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_32xi16_perm_low_mem_mask5:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpshufd {{.*#+}} zmm2 = mem[0,0,2,3,4,4,6,7,8,8,10,11,12,12,14,15] sched: [5:1.00]
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm3, %zmm1, %k1
@@ -8225,7 +8225,7 @@ define <32 x i16> @test_masked_32xi16_perm_low_mem_mask5(<32 x i16>* %vp, <32 x
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_32xi16_perm_low_mem_mask5:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpshufd {{.*#+}} zmm2 = mem[0,0,2,3,4,4,6,7,8,8,10,11,12,12,14,15] sched: [8:1.00]
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm3, %zmm1, %k1 # sched: [3:1.00]
@@ -8240,7 +8240,7 @@ define <32 x i16> @test_masked_32xi16_perm_low_mem_mask5(<32 x i16>* %vp, <32 x
define <32 x i16> @test_masked_z_32xi16_perm_low_mem_mask5(<32 x i16>* %vp, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_32xi16_perm_low_mem_mask5:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpshufd {{.*#+}} zmm1 = mem[0,0,2,3,4,4,6,7,8,8,10,11,12,12,14,15] sched: [5:1.00]
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm2, %zmm0, %k1
@@ -8248,7 +8248,7 @@ define <32 x i16> @test_masked_z_32xi16_perm_low_mem_mask5(<32 x i16>* %vp, <32
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_32xi16_perm_low_mem_mask5:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpshufd {{.*#+}} zmm1 = mem[0,0,2,3,4,4,6,7,8,8,10,11,12,12,14,15] sched: [8:1.00]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm2, %zmm0, %k1 # sched: [3:1.00]
@@ -8263,12 +8263,12 @@ define <32 x i16> @test_masked_z_32xi16_perm_low_mem_mask5(<32 x i16>* %vp, <32
define <32 x i16> @test_32xi16_perm_high_mem_mask6(<32 x i16>* %vp) {
; GENERIC-LABEL: test_32xi16_perm_high_mem_mask6:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpshufhw {{.*#+}} zmm0 = mem[0,1,2,3,6,5,6,6,8,9,10,11,14,13,14,14,16,17,18,19,22,21,22,22,24,25,26,27,30,29,30,30] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_32xi16_perm_high_mem_mask6:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpshufhw {{.*#+}} zmm0 = mem[0,1,2,3,6,5,6,6,8,9,10,11,14,13,14,14,16,17,18,19,22,21,22,22,24,25,26,27,30,29,30,30] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec = load <32 x i16>, <32 x i16>* %vp
@@ -8277,14 +8277,14 @@ define <32 x i16> @test_32xi16_perm_high_mem_mask6(<32 x i16>* %vp) {
}
define <32 x i16> @test_masked_32xi16_perm_high_mem_mask6(<32 x i16>* %vp, <32 x i16> %vec2, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_32xi16_perm_high_mem_mask6:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,6,5,6,6,8,9,10,11,14,13,14,14,16,17,18,19,22,21,22,22,24,25,26,27,30,29,30,30] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_32xi16_perm_high_mem_mask6:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,6,5,6,6,8,9,10,11,14,13,14,14,16,17,18,19,22,21,22,22,24,25,26,27,30,29,30,30] sched: [8:1.00]
@@ -8298,14 +8298,14 @@ define <32 x i16> @test_masked_32xi16_perm_high_mem_mask6(<32 x i16>* %vp, <32 x
define <32 x i16> @test_masked_z_32xi16_perm_high_mem_mask6(<32 x i16>* %vp, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_32xi16_perm_high_mem_mask6:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm1, %zmm0, %k1
; GENERIC-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,6,5,6,6,8,9,10,11,14,13,14,14,16,17,18,19,22,21,22,22,24,25,26,27,30,29,30,30] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_32xi16_perm_high_mem_mask6:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm1, %zmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,6,5,6,6,8,9,10,11,14,13,14,14,16,17,18,19,22,21,22,22,24,25,26,27,30,29,30,30] sched: [8:1.00]
@@ -8319,14 +8319,14 @@ define <32 x i16> @test_masked_z_32xi16_perm_high_mem_mask6(<32 x i16>* %vp, <32
define <32 x i16> @test_masked_32xi16_perm_low_mem_mask7(<32 x i16>* %vp, <32 x i16> %vec2, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_32xi16_perm_low_mem_mask7:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} zmm0 {%k1} = mem[3,1,3,0,4,5,6,7,11,9,11,8,12,13,14,15,19,17,19,16,20,21,22,23,27,25,27,24,28,29,30,31] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_32xi16_perm_low_mem_mask7:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} zmm0 {%k1} = mem[3,1,3,0,4,5,6,7,11,9,11,8,12,13,14,15,19,17,19,16,20,21,22,23,27,25,27,24,28,29,30,31] sched: [8:1.00]
@@ -8340,14 +8340,14 @@ define <32 x i16> @test_masked_32xi16_perm_low_mem_mask7(<32 x i16>* %vp, <32 x
define <32 x i16> @test_masked_z_32xi16_perm_low_mem_mask7(<32 x i16>* %vp, <32 x i16> %mask) {
; GENERIC-LABEL: test_masked_z_32xi16_perm_low_mem_mask7:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqw %zmm1, %zmm0, %k1
; GENERIC-NEXT: vpshuflw {{.*#+}} zmm0 {%k1} {z} = mem[3,1,3,0,4,5,6,7,11,9,11,8,12,13,14,15,19,17,19,16,20,21,22,23,27,25,27,24,28,29,30,31] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_32xi16_perm_low_mem_mask7:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqw %zmm1, %zmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} zmm0 {%k1} {z} = mem[3,1,3,0,4,5,6,7,11,9,11,8,12,13,14,15,19,17,19,16,20,21,22,23,27,25,27,24,28,29,30,31] sched: [8:1.00]
@@ -8361,12 +8361,12 @@ define <32 x i16> @test_masked_z_32xi16_perm_low_mem_mask7(<32 x i16>* %vp, <32
define <4 x i32> @test_4xi32_perm_mask0(<4 x i32> %vec) {
; GENERIC-LABEL: test_4xi32_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,3,0] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xi32_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,3,0] sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 3, i32 0>
@@ -8374,7 +8374,7 @@ define <4 x i32> @test_4xi32_perm_mask0(<4 x i32> %vec) {
}
define <4 x i32> @test_masked_4xi32_perm_mask0(<4 x i32> %vec, <4 x i32> %vec2, <4 x i32> %mask) {
; GENERIC-LABEL: test_masked_4xi32_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm3, %xmm2, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} xmm1 {%k1} = xmm0[2,3,3,0] sched: [1:1.00]
@@ -8382,7 +8382,7 @@ define <4 x i32> @test_masked_4xi32_perm_mask0(<4 x i32> %vec, <4 x i32> %vec2,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_4xi32_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm3, %xmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} xmm1 {%k1} = xmm0[2,3,3,0] sched: [1:1.00]
@@ -8396,14 +8396,14 @@ define <4 x i32> @test_masked_4xi32_perm_mask0(<4 x i32> %vec, <4 x i32> %vec2,
define <4 x i32> @test_masked_z_4xi32_perm_mask0(<4 x i32> %vec, <4 x i32> %mask) {
; GENERIC-LABEL: test_masked_z_4xi32_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm2, %xmm1, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} xmm0 {%k1} {z} = xmm0[2,3,3,0] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_4xi32_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm2, %xmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} xmm0 {%k1} {z} = xmm0[2,3,3,0] sched: [1:1.00]
@@ -8415,7 +8415,7 @@ define <4 x i32> @test_masked_z_4xi32_perm_mask0(<4 x i32> %vec, <4 x i32> %mask
}
define <4 x i32> @test_masked_4xi32_perm_mask1(<4 x i32> %vec, <4 x i32> %vec2, <4 x i32> %mask) {
; GENERIC-LABEL: test_masked_4xi32_perm_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm3, %xmm2, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} xmm1 {%k1} = xmm0[1,0,2,0] sched: [1:1.00]
@@ -8423,7 +8423,7 @@ define <4 x i32> @test_masked_4xi32_perm_mask1(<4 x i32> %vec, <4 x i32> %vec2,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_4xi32_perm_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm3, %xmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} xmm1 {%k1} = xmm0[1,0,2,0] sched: [1:1.00]
@@ -8437,14 +8437,14 @@ define <4 x i32> @test_masked_4xi32_perm_mask1(<4 x i32> %vec, <4 x i32> %vec2,
define <4 x i32> @test_masked_z_4xi32_perm_mask1(<4 x i32> %vec, <4 x i32> %mask) {
; GENERIC-LABEL: test_masked_z_4xi32_perm_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm2, %xmm1, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} xmm0 {%k1} {z} = xmm0[1,0,2,0] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_4xi32_perm_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm2, %xmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} xmm0 {%k1} {z} = xmm0[1,0,2,0] sched: [1:1.00]
@@ -8456,7 +8456,7 @@ define <4 x i32> @test_masked_z_4xi32_perm_mask1(<4 x i32> %vec, <4 x i32> %mask
}
define <4 x i32> @test_masked_4xi32_perm_mask2(<4 x i32> %vec, <4 x i32> %vec2, <4 x i32> %mask) {
; GENERIC-LABEL: test_masked_4xi32_perm_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm3, %xmm2, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} xmm1 {%k1} = xmm0[3,0,1,0] sched: [1:1.00]
@@ -8464,7 +8464,7 @@ define <4 x i32> @test_masked_4xi32_perm_mask2(<4 x i32> %vec, <4 x i32> %vec2,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_4xi32_perm_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm3, %xmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} xmm1 {%k1} = xmm0[3,0,1,0] sched: [1:1.00]
@@ -8478,14 +8478,14 @@ define <4 x i32> @test_masked_4xi32_perm_mask2(<4 x i32> %vec, <4 x i32> %vec2,
define <4 x i32> @test_masked_z_4xi32_perm_mask2(<4 x i32> %vec, <4 x i32> %mask) {
; GENERIC-LABEL: test_masked_z_4xi32_perm_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm2, %xmm1, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} xmm0 {%k1} {z} = xmm0[3,0,1,0] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_4xi32_perm_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm2, %xmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} xmm0 {%k1} {z} = xmm0[3,0,1,0] sched: [1:1.00]
@@ -8497,12 +8497,12 @@ define <4 x i32> @test_masked_z_4xi32_perm_mask2(<4 x i32> %vec, <4 x i32> %mask
}
define <4 x i32> @test_4xi32_perm_mask3(<4 x i32> %vec) {
; GENERIC-LABEL: test_4xi32_perm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,1,0,3] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xi32_perm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,1,0,3] sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 1, i32 1, i32 0, i32 3>
@@ -8510,7 +8510,7 @@ define <4 x i32> @test_4xi32_perm_mask3(<4 x i32> %vec) {
}
define <4 x i32> @test_masked_4xi32_perm_mask3(<4 x i32> %vec, <4 x i32> %vec2, <4 x i32> %mask) {
; GENERIC-LABEL: test_masked_4xi32_perm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm3, %xmm2, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} xmm1 {%k1} = xmm0[1,1,0,3] sched: [1:1.00]
@@ -8518,7 +8518,7 @@ define <4 x i32> @test_masked_4xi32_perm_mask3(<4 x i32> %vec, <4 x i32> %vec2,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_4xi32_perm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm3, %xmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} xmm1 {%k1} = xmm0[1,1,0,3] sched: [1:1.00]
@@ -8532,14 +8532,14 @@ define <4 x i32> @test_masked_4xi32_perm_mask3(<4 x i32> %vec, <4 x i32> %vec2,
define <4 x i32> @test_masked_z_4xi32_perm_mask3(<4 x i32> %vec, <4 x i32> %mask) {
; GENERIC-LABEL: test_masked_z_4xi32_perm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm2, %xmm1, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} xmm0 {%k1} {z} = xmm0[1,1,0,3] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_4xi32_perm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm2, %xmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} xmm0 {%k1} {z} = xmm0[1,1,0,3] sched: [1:1.00]
@@ -8551,12 +8551,12 @@ define <4 x i32> @test_masked_z_4xi32_perm_mask3(<4 x i32> %vec, <4 x i32> %mask
}
define <4 x i32> @test_4xi32_perm_mem_mask0(<4 x i32>* %vp) {
; GENERIC-LABEL: test_4xi32_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpermilps {{.*#+}} xmm0 = mem[0,1,3,3] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xi32_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermilps {{.*#+}} xmm0 = mem[0,1,3,3] sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec = load <4 x i32>, <4 x i32>* %vp
@@ -8565,14 +8565,14 @@ define <4 x i32> @test_4xi32_perm_mem_mask0(<4 x i32>* %vp) {
}
define <4 x i32> @test_masked_4xi32_perm_mem_mask0(<4 x i32>* %vp, <4 x i32> %vec2, <4 x i32> %mask) {
; GENERIC-LABEL: test_masked_4xi32_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm2, %xmm1, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} xmm0 {%k1} = mem[0,1,3,3] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_4xi32_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm2, %xmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} xmm0 {%k1} = mem[0,1,3,3] sched: [7:1.00]
@@ -8586,14 +8586,14 @@ define <4 x i32> @test_masked_4xi32_perm_mem_mask0(<4 x i32>* %vp, <4 x i32> %ve
define <4 x i32> @test_masked_z_4xi32_perm_mem_mask0(<4 x i32>* %vp, <4 x i32> %mask) {
; GENERIC-LABEL: test_masked_z_4xi32_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm1, %xmm0, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} xmm0 {%k1} {z} = mem[0,1,3,3] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_4xi32_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm1, %xmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} xmm0 {%k1} {z} = mem[0,1,3,3] sched: [7:1.00]
@@ -8607,14 +8607,14 @@ define <4 x i32> @test_masked_z_4xi32_perm_mem_mask0(<4 x i32>* %vp, <4 x i32> %
define <4 x i32> @test_masked_4xi32_perm_mem_mask1(<4 x i32>* %vp, <4 x i32> %vec2, <4 x i32> %mask) {
; GENERIC-LABEL: test_masked_4xi32_perm_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm2, %xmm1, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} xmm0 {%k1} = mem[2,2,3,1] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_4xi32_perm_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm2, %xmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} xmm0 {%k1} = mem[2,2,3,1] sched: [7:1.00]
@@ -8628,14 +8628,14 @@ define <4 x i32> @test_masked_4xi32_perm_mem_mask1(<4 x i32>* %vp, <4 x i32> %ve
define <4 x i32> @test_masked_z_4xi32_perm_mem_mask1(<4 x i32>* %vp, <4 x i32> %mask) {
; GENERIC-LABEL: test_masked_z_4xi32_perm_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm1, %xmm0, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} xmm0 {%k1} {z} = mem[2,2,3,1] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_4xi32_perm_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm1, %xmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} xmm0 {%k1} {z} = mem[2,2,3,1] sched: [7:1.00]
@@ -8649,14 +8649,14 @@ define <4 x i32> @test_masked_z_4xi32_perm_mem_mask1(<4 x i32>* %vp, <4 x i32> %
define <4 x i32> @test_masked_4xi32_perm_mem_mask2(<4 x i32>* %vp, <4 x i32> %vec2, <4 x i32> %mask) {
; GENERIC-LABEL: test_masked_4xi32_perm_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm2, %xmm1, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} xmm0 {%k1} = mem[0,3,0,1] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_4xi32_perm_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm2, %xmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} xmm0 {%k1} = mem[0,3,0,1] sched: [7:1.00]
@@ -8670,14 +8670,14 @@ define <4 x i32> @test_masked_4xi32_perm_mem_mask2(<4 x i32>* %vp, <4 x i32> %ve
define <4 x i32> @test_masked_z_4xi32_perm_mem_mask2(<4 x i32>* %vp, <4 x i32> %mask) {
; GENERIC-LABEL: test_masked_z_4xi32_perm_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm1, %xmm0, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} xmm0 {%k1} {z} = mem[0,3,0,1] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_4xi32_perm_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm1, %xmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} xmm0 {%k1} {z} = mem[0,3,0,1] sched: [7:1.00]
@@ -8691,12 +8691,12 @@ define <4 x i32> @test_masked_z_4xi32_perm_mem_mask2(<4 x i32>* %vp, <4 x i32> %
define <4 x i32> @test_4xi32_perm_mem_mask3(<4 x i32>* %vp) {
; GENERIC-LABEL: test_4xi32_perm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpermilps {{.*#+}} xmm0 = mem[1,0,1,0] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xi32_perm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermilps {{.*#+}} xmm0 = mem[1,0,1,0] sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec = load <4 x i32>, <4 x i32>* %vp
@@ -8705,14 +8705,14 @@ define <4 x i32> @test_4xi32_perm_mem_mask3(<4 x i32>* %vp) {
}
define <4 x i32> @test_masked_4xi32_perm_mem_mask3(<4 x i32>* %vp, <4 x i32> %vec2, <4 x i32> %mask) {
; GENERIC-LABEL: test_masked_4xi32_perm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm2, %xmm1, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} xmm0 {%k1} = mem[1,0,1,0] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_4xi32_perm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm2, %xmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} xmm0 {%k1} = mem[1,0,1,0] sched: [7:1.00]
@@ -8726,14 +8726,14 @@ define <4 x i32> @test_masked_4xi32_perm_mem_mask3(<4 x i32>* %vp, <4 x i32> %ve
define <4 x i32> @test_masked_z_4xi32_perm_mem_mask3(<4 x i32>* %vp, <4 x i32> %mask) {
; GENERIC-LABEL: test_masked_z_4xi32_perm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm1, %xmm0, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} xmm0 {%k1} {z} = mem[1,0,1,0] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_masked_z_4xi32_perm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm1, %xmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} xmm0 {%k1} {z} = mem[1,0,1,0] sched: [7:1.00]
@@ -8747,12 +8747,12 @@ define <4 x i32> @test_masked_z_4xi32_perm_mem_mask3(<4 x i32>* %vp, <4 x i32> %
define <8 x i32> @test2_8xi32_perm_mask0(<8 x i32> %vec) {
; GENERIC-LABEL: test2_8xi32_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[2,3,1,0,6,7,5,4] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_8xi32_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[2,3,1,0,6,7,5,4] sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 2, i32 3, i32 1, i32 0, i32 6, i32 7, i32 5, i32 4>
@@ -8760,7 +8760,7 @@ define <8 x i32> @test2_8xi32_perm_mask0(<8 x i32> %vec) {
}
define <8 x i32> @test2_masked_8xi32_perm_mask0(<8 x i32> %vec, <8 x i32> %vec2, <8 x i32> %mask) {
; GENERIC-LABEL: test2_masked_8xi32_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} ymm1 {%k1} = ymm0[2,3,1,0,6,7,5,4] sched: [1:1.00]
@@ -8768,7 +8768,7 @@ define <8 x i32> @test2_masked_8xi32_perm_mask0(<8 x i32> %vec, <8 x i32> %vec2,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_masked_8xi32_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} ymm1 {%k1} = ymm0[2,3,1,0,6,7,5,4] sched: [1:1.00]
@@ -8782,14 +8782,14 @@ define <8 x i32> @test2_masked_8xi32_perm_mask0(<8 x i32> %vec, <8 x i32> %vec2,
define <8 x i32> @test2_masked_z_8xi32_perm_mask0(<8 x i32> %vec, <8 x i32> %mask) {
; GENERIC-LABEL: test2_masked_z_8xi32_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3,1,0,6,7,5,4] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_masked_z_8xi32_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3,1,0,6,7,5,4] sched: [1:1.00]
@@ -8801,7 +8801,7 @@ define <8 x i32> @test2_masked_z_8xi32_perm_mask0(<8 x i32> %vec, <8 x i32> %mas
}
define <8 x i32> @test2_masked_8xi32_perm_mask1(<8 x i32> %vec, <8 x i32> %vec2, <8 x i32> %mask) {
; GENERIC-LABEL: test2_masked_8xi32_perm_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} ymm1 {%k1} = ymm0[0,3,3,3,4,7,7,7] sched: [1:1.00]
@@ -8809,7 +8809,7 @@ define <8 x i32> @test2_masked_8xi32_perm_mask1(<8 x i32> %vec, <8 x i32> %vec2,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_masked_8xi32_perm_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} ymm1 {%k1} = ymm0[0,3,3,3,4,7,7,7] sched: [1:1.00]
@@ -8823,14 +8823,14 @@ define <8 x i32> @test2_masked_8xi32_perm_mask1(<8 x i32> %vec, <8 x i32> %vec2,
define <8 x i32> @test2_masked_z_8xi32_perm_mask1(<8 x i32> %vec, <8 x i32> %mask) {
; GENERIC-LABEL: test2_masked_z_8xi32_perm_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} ymm0 {%k1} {z} = ymm0[0,3,3,3,4,7,7,7] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_masked_z_8xi32_perm_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} ymm0 {%k1} {z} = ymm0[0,3,3,3,4,7,7,7] sched: [1:1.00]
@@ -8842,7 +8842,7 @@ define <8 x i32> @test2_masked_z_8xi32_perm_mask1(<8 x i32> %vec, <8 x i32> %mas
}
define <8 x i32> @test2_masked_8xi32_perm_mask2(<8 x i32> %vec, <8 x i32> %vec2, <8 x i32> %mask) {
; GENERIC-LABEL: test2_masked_8xi32_perm_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} ymm1 {%k1} = ymm0[1,2,0,3,5,6,4,7] sched: [1:1.00]
@@ -8850,7 +8850,7 @@ define <8 x i32> @test2_masked_8xi32_perm_mask2(<8 x i32> %vec, <8 x i32> %vec2,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_masked_8xi32_perm_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} ymm1 {%k1} = ymm0[1,2,0,3,5,6,4,7] sched: [1:1.00]
@@ -8864,14 +8864,14 @@ define <8 x i32> @test2_masked_8xi32_perm_mask2(<8 x i32> %vec, <8 x i32> %vec2,
define <8 x i32> @test2_masked_z_8xi32_perm_mask2(<8 x i32> %vec, <8 x i32> %mask) {
; GENERIC-LABEL: test2_masked_z_8xi32_perm_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} ymm0 {%k1} {z} = ymm0[1,2,0,3,5,6,4,7] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_masked_z_8xi32_perm_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} ymm0 {%k1} {z} = ymm0[1,2,0,3,5,6,4,7] sched: [1:1.00]
@@ -8883,12 +8883,12 @@ define <8 x i32> @test2_masked_z_8xi32_perm_mask2(<8 x i32> %vec, <8 x i32> %mas
}
define <8 x i32> @test2_8xi32_perm_mask3(<8 x i32> %vec) {
; GENERIC-LABEL: test2_8xi32_perm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,3,1,0,5,7,5,4] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_8xi32_perm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,3,1,0,5,7,5,4] sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 1, i32 3, i32 1, i32 0, i32 5, i32 7, i32 5, i32 4>
@@ -8896,7 +8896,7 @@ define <8 x i32> @test2_8xi32_perm_mask3(<8 x i32> %vec) {
}
define <8 x i32> @test2_masked_8xi32_perm_mask3(<8 x i32> %vec, <8 x i32> %vec2, <8 x i32> %mask) {
; GENERIC-LABEL: test2_masked_8xi32_perm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} ymm1 {%k1} = ymm0[1,3,1,0,5,7,5,4] sched: [1:1.00]
@@ -8904,7 +8904,7 @@ define <8 x i32> @test2_masked_8xi32_perm_mask3(<8 x i32> %vec, <8 x i32> %vec2,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_masked_8xi32_perm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} ymm1 {%k1} = ymm0[1,3,1,0,5,7,5,4] sched: [1:1.00]
@@ -8918,14 +8918,14 @@ define <8 x i32> @test2_masked_8xi32_perm_mask3(<8 x i32> %vec, <8 x i32> %vec2,
define <8 x i32> @test2_masked_z_8xi32_perm_mask3(<8 x i32> %vec, <8 x i32> %mask) {
; GENERIC-LABEL: test2_masked_z_8xi32_perm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} ymm0 {%k1} {z} = ymm0[1,3,1,0,5,7,5,4] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_masked_z_8xi32_perm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} ymm0 {%k1} {z} = ymm0[1,3,1,0,5,7,5,4] sched: [1:1.00]
@@ -8937,12 +8937,12 @@ define <8 x i32> @test2_masked_z_8xi32_perm_mask3(<8 x i32> %vec, <8 x i32> %mas
}
define <8 x i32> @test2_8xi32_perm_mem_mask0(<8 x i32>* %vp) {
; GENERIC-LABEL: test2_8xi32_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpermilps {{.*#+}} ymm0 = mem[1,0,2,0,5,4,6,4] sched: [8:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_8xi32_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermilps {{.*#+}} ymm0 = mem[1,0,2,0,5,4,6,4] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec = load <8 x i32>, <8 x i32>* %vp
@@ -8951,14 +8951,14 @@ define <8 x i32> @test2_8xi32_perm_mem_mask0(<8 x i32>* %vp) {
}
define <8 x i32> @test2_masked_8xi32_perm_mem_mask0(<8 x i32>* %vp, <8 x i32> %vec2, <8 x i32> %mask) {
; GENERIC-LABEL: test2_masked_8xi32_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} ymm0 {%k1} = mem[1,0,2,0,5,4,6,4] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_masked_8xi32_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} ymm0 {%k1} = mem[1,0,2,0,5,4,6,4] sched: [8:1.00]
@@ -8972,14 +8972,14 @@ define <8 x i32> @test2_masked_8xi32_perm_mem_mask0(<8 x i32>* %vp, <8 x i32> %v
define <8 x i32> @test2_masked_z_8xi32_perm_mem_mask0(<8 x i32>* %vp, <8 x i32> %mask) {
; GENERIC-LABEL: test2_masked_z_8xi32_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm1, %ymm0, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} ymm0 {%k1} {z} = mem[1,0,2,0,5,4,6,4] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_masked_z_8xi32_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm1, %ymm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} ymm0 {%k1} {z} = mem[1,0,2,0,5,4,6,4] sched: [8:1.00]
@@ -8993,14 +8993,14 @@ define <8 x i32> @test2_masked_z_8xi32_perm_mem_mask0(<8 x i32>* %vp, <8 x i32>
define <8 x i32> @test2_masked_8xi32_perm_mem_mask1(<8 x i32>* %vp, <8 x i32> %vec2, <8 x i32> %mask) {
; GENERIC-LABEL: test2_masked_8xi32_perm_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} ymm0 {%k1} = mem[0,3,2,0,4,7,6,4] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_masked_8xi32_perm_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} ymm0 {%k1} = mem[0,3,2,0,4,7,6,4] sched: [8:1.00]
@@ -9014,14 +9014,14 @@ define <8 x i32> @test2_masked_8xi32_perm_mem_mask1(<8 x i32>* %vp, <8 x i32> %v
define <8 x i32> @test2_masked_z_8xi32_perm_mem_mask1(<8 x i32>* %vp, <8 x i32> %mask) {
; GENERIC-LABEL: test2_masked_z_8xi32_perm_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm1, %ymm0, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} ymm0 {%k1} {z} = mem[0,3,2,0,4,7,6,4] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_masked_z_8xi32_perm_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm1, %ymm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} ymm0 {%k1} {z} = mem[0,3,2,0,4,7,6,4] sched: [8:1.00]
@@ -9035,14 +9035,14 @@ define <8 x i32> @test2_masked_z_8xi32_perm_mem_mask1(<8 x i32>* %vp, <8 x i32>
define <8 x i32> @test2_masked_8xi32_perm_mem_mask2(<8 x i32>* %vp, <8 x i32> %vec2, <8 x i32> %mask) {
; GENERIC-LABEL: test2_masked_8xi32_perm_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} ymm0 {%k1} = mem[3,2,3,1,7,6,7,5] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_masked_8xi32_perm_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} ymm0 {%k1} = mem[3,2,3,1,7,6,7,5] sched: [8:1.00]
@@ -9056,14 +9056,14 @@ define <8 x i32> @test2_masked_8xi32_perm_mem_mask2(<8 x i32>* %vp, <8 x i32> %v
define <8 x i32> @test2_masked_z_8xi32_perm_mem_mask2(<8 x i32>* %vp, <8 x i32> %mask) {
; GENERIC-LABEL: test2_masked_z_8xi32_perm_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm1, %ymm0, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} ymm0 {%k1} {z} = mem[3,2,3,1,7,6,7,5] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_masked_z_8xi32_perm_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm1, %ymm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} ymm0 {%k1} {z} = mem[3,2,3,1,7,6,7,5] sched: [8:1.00]
@@ -9077,12 +9077,12 @@ define <8 x i32> @test2_masked_z_8xi32_perm_mem_mask2(<8 x i32>* %vp, <8 x i32>
define <8 x i32> @test2_8xi32_perm_mem_mask3(<8 x i32>* %vp) {
; GENERIC-LABEL: test2_8xi32_perm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpermilps {{.*#+}} ymm0 = mem[3,2,0,0,7,6,4,4] sched: [8:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_8xi32_perm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermilps {{.*#+}} ymm0 = mem[3,2,0,0,7,6,4,4] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec = load <8 x i32>, <8 x i32>* %vp
@@ -9091,14 +9091,14 @@ define <8 x i32> @test2_8xi32_perm_mem_mask3(<8 x i32>* %vp) {
}
define <8 x i32> @test2_masked_8xi32_perm_mem_mask3(<8 x i32>* %vp, <8 x i32> %vec2, <8 x i32> %mask) {
; GENERIC-LABEL: test2_masked_8xi32_perm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} ymm0 {%k1} = mem[3,2,0,0,7,6,4,4] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_masked_8xi32_perm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} ymm0 {%k1} = mem[3,2,0,0,7,6,4,4] sched: [8:1.00]
@@ -9112,14 +9112,14 @@ define <8 x i32> @test2_masked_8xi32_perm_mem_mask3(<8 x i32>* %vp, <8 x i32> %v
define <8 x i32> @test2_masked_z_8xi32_perm_mem_mask3(<8 x i32>* %vp, <8 x i32> %mask) {
; GENERIC-LABEL: test2_masked_z_8xi32_perm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm1, %ymm0, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} ymm0 {%k1} {z} = mem[3,2,0,0,7,6,4,4] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_masked_z_8xi32_perm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm1, %ymm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} ymm0 {%k1} {z} = mem[3,2,0,0,7,6,4,4] sched: [8:1.00]
@@ -9133,12 +9133,12 @@ define <8 x i32> @test2_masked_z_8xi32_perm_mem_mask3(<8 x i32>* %vp, <8 x i32>
define <16 x i32> @test2_16xi32_perm_mask0(<16 x i32> %vec) {
; GENERIC-LABEL: test2_16xi32_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpermilps {{.*#+}} zmm0 = zmm0[3,1,3,0,7,5,7,4,11,9,11,8,15,13,15,12] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_16xi32_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermilps {{.*#+}} zmm0 = zmm0[3,1,3,0,7,5,7,4,11,9,11,8,15,13,15,12] sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> <i32 3, i32 1, i32 3, i32 0, i32 7, i32 5, i32 7, i32 4, i32 11, i32 9, i32 11, i32 8, i32 15, i32 13, i32 15, i32 12>
@@ -9146,7 +9146,7 @@ define <16 x i32> @test2_16xi32_perm_mask0(<16 x i32> %vec) {
}
define <16 x i32> @test2_masked_16xi32_perm_mask0(<16 x i32> %vec, <16 x i32> %vec2, <16 x i32> %mask) {
; GENERIC-LABEL: test2_masked_16xi32_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} zmm1 {%k1} = zmm0[3,1,3,0,7,5,7,4,11,9,11,8,15,13,15,12] sched: [1:1.00]
@@ -9154,7 +9154,7 @@ define <16 x i32> @test2_masked_16xi32_perm_mask0(<16 x i32> %vec, <16 x i32> %v
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_masked_16xi32_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} zmm1 {%k1} = zmm0[3,1,3,0,7,5,7,4,11,9,11,8,15,13,15,12] sched: [1:1.00]
@@ -9168,14 +9168,14 @@ define <16 x i32> @test2_masked_16xi32_perm_mask0(<16 x i32> %vec, <16 x i32> %v
define <16 x i32> @test2_masked_z_16xi32_perm_mask0(<16 x i32> %vec, <16 x i32> %mask) {
; GENERIC-LABEL: test2_masked_z_16xi32_perm_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} zmm0 {%k1} {z} = zmm0[3,1,3,0,7,5,7,4,11,9,11,8,15,13,15,12] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_masked_z_16xi32_perm_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} zmm0 {%k1} {z} = zmm0[3,1,3,0,7,5,7,4,11,9,11,8,15,13,15,12] sched: [1:1.00]
@@ -9187,7 +9187,7 @@ define <16 x i32> @test2_masked_z_16xi32_perm_mask0(<16 x i32> %vec, <16 x i32>
}
define <16 x i32> @test2_masked_16xi32_perm_mask1(<16 x i32> %vec, <16 x i32> %vec2, <16 x i32> %mask) {
; GENERIC-LABEL: test2_masked_16xi32_perm_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} zmm1 {%k1} = zmm0[2,0,3,0,6,4,7,4,10,8,11,8,14,12,15,12] sched: [1:1.00]
@@ -9195,7 +9195,7 @@ define <16 x i32> @test2_masked_16xi32_perm_mask1(<16 x i32> %vec, <16 x i32> %v
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_masked_16xi32_perm_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} zmm1 {%k1} = zmm0[2,0,3,0,6,4,7,4,10,8,11,8,14,12,15,12] sched: [1:1.00]
@@ -9209,14 +9209,14 @@ define <16 x i32> @test2_masked_16xi32_perm_mask1(<16 x i32> %vec, <16 x i32> %v
define <16 x i32> @test2_masked_z_16xi32_perm_mask1(<16 x i32> %vec, <16 x i32> %mask) {
; GENERIC-LABEL: test2_masked_z_16xi32_perm_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} zmm0 {%k1} {z} = zmm0[2,0,3,0,6,4,7,4,10,8,11,8,14,12,15,12] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_masked_z_16xi32_perm_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} zmm0 {%k1} {z} = zmm0[2,0,3,0,6,4,7,4,10,8,11,8,14,12,15,12] sched: [1:1.00]
@@ -9228,7 +9228,7 @@ define <16 x i32> @test2_masked_z_16xi32_perm_mask1(<16 x i32> %vec, <16 x i32>
}
define <16 x i32> @test2_masked_16xi32_perm_mask2(<16 x i32> %vec, <16 x i32> %vec2, <16 x i32> %mask) {
; GENERIC-LABEL: test2_masked_16xi32_perm_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} zmm1 {%k1} = zmm0[1,3,3,0,5,7,7,4,9,11,11,8,13,15,15,12] sched: [1:1.00]
@@ -9236,7 +9236,7 @@ define <16 x i32> @test2_masked_16xi32_perm_mask2(<16 x i32> %vec, <16 x i32> %v
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_masked_16xi32_perm_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} zmm1 {%k1} = zmm0[1,3,3,0,5,7,7,4,9,11,11,8,13,15,15,12] sched: [1:1.00]
@@ -9250,14 +9250,14 @@ define <16 x i32> @test2_masked_16xi32_perm_mask2(<16 x i32> %vec, <16 x i32> %v
define <16 x i32> @test2_masked_z_16xi32_perm_mask2(<16 x i32> %vec, <16 x i32> %mask) {
; GENERIC-LABEL: test2_masked_z_16xi32_perm_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} zmm0 {%k1} {z} = zmm0[1,3,3,0,5,7,7,4,9,11,11,8,13,15,15,12] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_masked_z_16xi32_perm_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} zmm0 {%k1} {z} = zmm0[1,3,3,0,5,7,7,4,9,11,11,8,13,15,15,12] sched: [1:1.00]
@@ -9269,12 +9269,12 @@ define <16 x i32> @test2_masked_z_16xi32_perm_mask2(<16 x i32> %vec, <16 x i32>
}
define <16 x i32> @test2_16xi32_perm_mask3(<16 x i32> %vec) {
; GENERIC-LABEL: test2_16xi32_perm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpermilps {{.*#+}} zmm0 = zmm0[3,2,0,3,7,6,4,7,11,10,8,11,15,14,12,15] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_16xi32_perm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermilps {{.*#+}} zmm0 = zmm0[3,2,0,3,7,6,4,7,11,10,8,11,15,14,12,15] sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> <i32 3, i32 2, i32 0, i32 3, i32 7, i32 6, i32 4, i32 7, i32 11, i32 10, i32 8, i32 11, i32 15, i32 14, i32 12, i32 15>
@@ -9282,7 +9282,7 @@ define <16 x i32> @test2_16xi32_perm_mask3(<16 x i32> %vec) {
}
define <16 x i32> @test2_masked_16xi32_perm_mask3(<16 x i32> %vec, <16 x i32> %vec2, <16 x i32> %mask) {
; GENERIC-LABEL: test2_masked_16xi32_perm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} zmm1 {%k1} = zmm0[3,2,0,3,7,6,4,7,11,10,8,11,15,14,12,15] sched: [1:1.00]
@@ -9290,7 +9290,7 @@ define <16 x i32> @test2_masked_16xi32_perm_mask3(<16 x i32> %vec, <16 x i32> %v
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_masked_16xi32_perm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} zmm1 {%k1} = zmm0[3,2,0,3,7,6,4,7,11,10,8,11,15,14,12,15] sched: [1:1.00]
@@ -9304,14 +9304,14 @@ define <16 x i32> @test2_masked_16xi32_perm_mask3(<16 x i32> %vec, <16 x i32> %v
define <16 x i32> @test2_masked_z_16xi32_perm_mask3(<16 x i32> %vec, <16 x i32> %mask) {
; GENERIC-LABEL: test2_masked_z_16xi32_perm_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} zmm0 {%k1} {z} = zmm0[3,2,0,3,7,6,4,7,11,10,8,11,15,14,12,15] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_masked_z_16xi32_perm_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} zmm0 {%k1} {z} = zmm0[3,2,0,3,7,6,4,7,11,10,8,11,15,14,12,15] sched: [1:1.00]
@@ -9323,12 +9323,12 @@ define <16 x i32> @test2_masked_z_16xi32_perm_mask3(<16 x i32> %vec, <16 x i32>
}
define <16 x i32> @test2_16xi32_perm_mem_mask0(<16 x i32>* %vp) {
; GENERIC-LABEL: test2_16xi32_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpermilps {{.*#+}} zmm0 = mem[1,0,1,3,5,4,5,7,9,8,9,11,13,12,13,15] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_16xi32_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermilps {{.*#+}} zmm0 = mem[1,0,1,3,5,4,5,7,9,8,9,11,13,12,13,15] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec = load <16 x i32>, <16 x i32>* %vp
@@ -9337,14 +9337,14 @@ define <16 x i32> @test2_16xi32_perm_mem_mask0(<16 x i32>* %vp) {
}
define <16 x i32> @test2_masked_16xi32_perm_mem_mask0(<16 x i32>* %vp, <16 x i32> %vec2, <16 x i32> %mask) {
; GENERIC-LABEL: test2_masked_16xi32_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} zmm0 {%k1} = mem[1,0,1,3,5,4,5,7,9,8,9,11,13,12,13,15] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_masked_16xi32_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} zmm0 {%k1} = mem[1,0,1,3,5,4,5,7,9,8,9,11,13,12,13,15] sched: [8:1.00]
@@ -9358,14 +9358,14 @@ define <16 x i32> @test2_masked_16xi32_perm_mem_mask0(<16 x i32>* %vp, <16 x i32
define <16 x i32> @test2_masked_z_16xi32_perm_mem_mask0(<16 x i32>* %vp, <16 x i32> %mask) {
; GENERIC-LABEL: test2_masked_z_16xi32_perm_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} zmm0 {%k1} {z} = mem[1,0,1,3,5,4,5,7,9,8,9,11,13,12,13,15] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_masked_z_16xi32_perm_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm1, %zmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} zmm0 {%k1} {z} = mem[1,0,1,3,5,4,5,7,9,8,9,11,13,12,13,15] sched: [8:1.00]
@@ -9379,14 +9379,14 @@ define <16 x i32> @test2_masked_z_16xi32_perm_mem_mask0(<16 x i32>* %vp, <16 x i
define <16 x i32> @test2_masked_16xi32_perm_mem_mask1(<16 x i32>* %vp, <16 x i32> %vec2, <16 x i32> %mask) {
; GENERIC-LABEL: test2_masked_16xi32_perm_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} zmm0 {%k1} = mem[1,0,0,2,5,4,4,6,9,8,8,10,13,12,12,14] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_masked_16xi32_perm_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} zmm0 {%k1} = mem[1,0,0,2,5,4,4,6,9,8,8,10,13,12,12,14] sched: [8:1.00]
@@ -9400,14 +9400,14 @@ define <16 x i32> @test2_masked_16xi32_perm_mem_mask1(<16 x i32>* %vp, <16 x i32
define <16 x i32> @test2_masked_z_16xi32_perm_mem_mask1(<16 x i32>* %vp, <16 x i32> %mask) {
; GENERIC-LABEL: test2_masked_z_16xi32_perm_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} zmm0 {%k1} {z} = mem[1,0,0,2,5,4,4,6,9,8,8,10,13,12,12,14] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_masked_z_16xi32_perm_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm1, %zmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} zmm0 {%k1} {z} = mem[1,0,0,2,5,4,4,6,9,8,8,10,13,12,12,14] sched: [8:1.00]
@@ -9421,14 +9421,14 @@ define <16 x i32> @test2_masked_z_16xi32_perm_mem_mask1(<16 x i32>* %vp, <16 x i
define <16 x i32> @test2_masked_16xi32_perm_mem_mask2(<16 x i32>* %vp, <16 x i32> %vec2, <16 x i32> %mask) {
; GENERIC-LABEL: test2_masked_16xi32_perm_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} zmm0 {%k1} = mem[2,0,1,2,6,4,5,6,10,8,9,10,14,12,13,14] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_masked_16xi32_perm_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} zmm0 {%k1} = mem[2,0,1,2,6,4,5,6,10,8,9,10,14,12,13,14] sched: [8:1.00]
@@ -9442,14 +9442,14 @@ define <16 x i32> @test2_masked_16xi32_perm_mem_mask2(<16 x i32>* %vp, <16 x i32
define <16 x i32> @test2_masked_z_16xi32_perm_mem_mask2(<16 x i32>* %vp, <16 x i32> %mask) {
; GENERIC-LABEL: test2_masked_z_16xi32_perm_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} zmm0 {%k1} {z} = mem[2,0,1,2,6,4,5,6,10,8,9,10,14,12,13,14] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_masked_z_16xi32_perm_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm1, %zmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} zmm0 {%k1} {z} = mem[2,0,1,2,6,4,5,6,10,8,9,10,14,12,13,14] sched: [8:1.00]
@@ -9463,12 +9463,12 @@ define <16 x i32> @test2_masked_z_16xi32_perm_mem_mask2(<16 x i32>* %vp, <16 x i
define <16 x i32> @test2_16xi32_perm_mem_mask3(<16 x i32>* %vp) {
; GENERIC-LABEL: test2_16xi32_perm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpermilps {{.*#+}} zmm0 = mem[3,1,1,1,7,5,5,5,11,9,9,9,15,13,13,13] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_16xi32_perm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermilps {{.*#+}} zmm0 = mem[3,1,1,1,7,5,5,5,11,9,9,9,15,13,13,13] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec = load <16 x i32>, <16 x i32>* %vp
@@ -9477,14 +9477,14 @@ define <16 x i32> @test2_16xi32_perm_mem_mask3(<16 x i32>* %vp) {
}
define <16 x i32> @test2_masked_16xi32_perm_mem_mask3(<16 x i32>* %vp, <16 x i32> %vec2, <16 x i32> %mask) {
; GENERIC-LABEL: test2_masked_16xi32_perm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} zmm0 {%k1} = mem[3,1,1,1,7,5,5,5,11,9,9,9,15,13,13,13] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_masked_16xi32_perm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} zmm0 {%k1} = mem[3,1,1,1,7,5,5,5,11,9,9,9,15,13,13,13] sched: [8:1.00]
@@ -9498,14 +9498,14 @@ define <16 x i32> @test2_masked_16xi32_perm_mem_mask3(<16 x i32>* %vp, <16 x i32
define <16 x i32> @test2_masked_z_16xi32_perm_mem_mask3(<16 x i32>* %vp, <16 x i32> %mask) {
; GENERIC-LABEL: test2_masked_z_16xi32_perm_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
; GENERIC-NEXT: vpshufd {{.*#+}} zmm0 {%k1} {z} = mem[3,1,1,1,7,5,5,5,11,9,9,9,15,13,13,13] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_masked_z_16xi32_perm_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm1, %zmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpshufd {{.*#+}} zmm0 {%k1} {z} = mem[3,1,1,1,7,5,5,5,11,9,9,9,15,13,13,13] sched: [8:1.00]
@@ -9519,12 +9519,12 @@ define <16 x i32> @test2_masked_z_16xi32_perm_mem_mask3(<16 x i32>* %vp, <16 x i
define <8 x float> @test2_8xfloat_shuff_mask0(<8 x float> %vec1, <8 x float> %vec2) {
; GENERIC-LABEL: test2_8xfloat_shuff_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_8xfloat_shuff_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1] sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
@@ -9532,7 +9532,7 @@ define <8 x float> @test2_8xfloat_shuff_mask0(<8 x float> %vec1, <8 x float> %ve
}
define <8 x float> @test2_8xfloat_masked_shuff_mask0(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %vec3, <8 x i32> %mask) {
; GENERIC-LABEL: test2_8xfloat_masked_shuff_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm4, %ymm3, %k1
; GENERIC-NEXT: vshuff32x4 {{.*#+}} ymm2 {%k1} = ymm0[4,5,6,7],ymm1[0,1,2,3] sched: [1:1.00]
@@ -9540,7 +9540,7 @@ define <8 x float> @test2_8xfloat_masked_shuff_mask0(<8 x float> %vec1, <8 x flo
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_8xfloat_masked_shuff_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm4, %ymm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff32x4 {{.*#+}} ymm2 {%k1} = ymm0[4,5,6,7],ymm1[0,1,2,3] sched: [3:1.00]
@@ -9554,14 +9554,14 @@ define <8 x float> @test2_8xfloat_masked_shuff_mask0(<8 x float> %vec1, <8 x flo
define <8 x float> @test2_8xfloat_zero_masked_shuff_mask0(<8 x float> %vec1, <8 x float> %vec2, <8 x i32> %mask) {
; GENERIC-LABEL: test2_8xfloat_zero_masked_shuff_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; GENERIC-NEXT: vshuff32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],ymm1[0,1,2,3] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_8xfloat_zero_masked_shuff_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],ymm1[0,1,2,3] sched: [3:1.00]
@@ -9573,7 +9573,7 @@ define <8 x float> @test2_8xfloat_zero_masked_shuff_mask0(<8 x float> %vec1, <8
}
define <8 x float> @test2_8xfloat_masked_shuff_mask1(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %vec3, <8 x i32> %mask) {
; GENERIC-LABEL: test2_8xfloat_masked_shuff_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm4, %ymm3, %k1
; GENERIC-NEXT: vshuff32x4 {{.*#+}} ymm2 {%k1} = ymm0[4,5,6,7],ymm1[0,1,2,3] sched: [1:1.00]
@@ -9581,7 +9581,7 @@ define <8 x float> @test2_8xfloat_masked_shuff_mask1(<8 x float> %vec1, <8 x flo
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_8xfloat_masked_shuff_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm4, %ymm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff32x4 {{.*#+}} ymm2 {%k1} = ymm0[4,5,6,7],ymm1[0,1,2,3] sched: [3:1.00]
@@ -9595,14 +9595,14 @@ define <8 x float> @test2_8xfloat_masked_shuff_mask1(<8 x float> %vec1, <8 x flo
define <8 x float> @test2_8xfloat_zero_masked_shuff_mask1(<8 x float> %vec1, <8 x float> %vec2, <8 x i32> %mask) {
; GENERIC-LABEL: test2_8xfloat_zero_masked_shuff_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; GENERIC-NEXT: vshuff32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],ymm1[0,1,2,3] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_8xfloat_zero_masked_shuff_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],ymm1[0,1,2,3] sched: [3:1.00]
@@ -9614,7 +9614,7 @@ define <8 x float> @test2_8xfloat_zero_masked_shuff_mask1(<8 x float> %vec1, <8
}
define <8 x float> @test2_8xfloat_masked_shuff_mask2(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %vec3, <8 x i32> %mask) {
; GENERIC-LABEL: test2_8xfloat_masked_shuff_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm4, %ymm3, %k1
; GENERIC-NEXT: vshuff32x4 {{.*#+}} ymm2 {%k1} = ymm0[4,5,6,7],ymm1[4,5,6,7] sched: [1:1.00]
@@ -9622,7 +9622,7 @@ define <8 x float> @test2_8xfloat_masked_shuff_mask2(<8 x float> %vec1, <8 x flo
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_8xfloat_masked_shuff_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm4, %ymm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff32x4 {{.*#+}} ymm2 {%k1} = ymm0[4,5,6,7],ymm1[4,5,6,7] sched: [3:1.00]
@@ -9636,14 +9636,14 @@ define <8 x float> @test2_8xfloat_masked_shuff_mask2(<8 x float> %vec1, <8 x flo
define <8 x float> @test2_8xfloat_zero_masked_shuff_mask2(<8 x float> %vec1, <8 x float> %vec2, <8 x i32> %mask) {
; GENERIC-LABEL: test2_8xfloat_zero_masked_shuff_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; GENERIC-NEXT: vshuff32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],ymm1[4,5,6,7] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_8xfloat_zero_masked_shuff_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],ymm1[4,5,6,7] sched: [3:1.00]
@@ -9655,12 +9655,12 @@ define <8 x float> @test2_8xfloat_zero_masked_shuff_mask2(<8 x float> %vec1, <8
}
define <8 x float> @test2_8xfloat_shuff_mask3(<8 x float> %vec1, <8 x float> %vec2) {
; GENERIC-LABEL: test2_8xfloat_shuff_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_8xfloat_shuff_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1] sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
@@ -9668,7 +9668,7 @@ define <8 x float> @test2_8xfloat_shuff_mask3(<8 x float> %vec1, <8 x float> %ve
}
define <8 x float> @test2_8xfloat_masked_shuff_mask3(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %vec3, <8 x i32> %mask) {
; GENERIC-LABEL: test2_8xfloat_masked_shuff_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm4, %ymm3, %k1
; GENERIC-NEXT: vshuff32x4 {{.*#+}} ymm2 {%k1} = ymm0[4,5,6,7],ymm1[0,1,2,3] sched: [1:1.00]
@@ -9676,7 +9676,7 @@ define <8 x float> @test2_8xfloat_masked_shuff_mask3(<8 x float> %vec1, <8 x flo
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test2_8xfloat_masked_shuff_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm4, %ymm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff32x4 {{.*#+}} ymm2 {%k1} = ymm0[4,5,6,7],ymm1[0,1,2,3] sched: [3:1.00]
@@ -9690,14 +9690,14 @@ define <8 x float> @test2_8xfloat_masked_shuff_mask3(<8 x float> %vec1, <8 x flo
define <8 x float> @test_8xfloat_zero_masked_shuff_mask3(<8 x float> %vec1, <8 x float> %vec2, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xfloat_zero_masked_shuff_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; GENERIC-NEXT: vshuff32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],ymm1[0,1,2,3] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_zero_masked_shuff_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],ymm1[0,1,2,3] sched: [3:1.00]
@@ -9709,12 +9709,12 @@ define <8 x float> @test_8xfloat_zero_masked_shuff_mask3(<8 x float> %vec1, <8 x
}
define <8 x float> @test_8xfloat_shuff_mem_mask0(<8 x float> %vec1, <8 x float>* %vec2p) {
; GENERIC-LABEL: test_8xfloat_shuff_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] sched: [8:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_shuff_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] sched: [10:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec2 = load <8 x float>, <8 x float>* %vec2p
@@ -9723,7 +9723,7 @@ define <8 x float> @test_8xfloat_shuff_mem_mask0(<8 x float> %vec1, <8 x float>*
}
define <8 x float> @test_8xfloat_masked_shuff_mem_mask0(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %vec3, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xfloat_masked_shuff_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; GENERIC-NEXT: vshuff32x4 {{.*#+}} ymm1 {%k1} = ymm0[4,5,6,7],mem[4,5,6,7] sched: [5:1.00]
@@ -9731,7 +9731,7 @@ define <8 x float> @test_8xfloat_masked_shuff_mem_mask0(<8 x float> %vec1, <8 x
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_masked_shuff_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff32x4 {{.*#+}} ymm1 {%k1} = ymm0[4,5,6,7],mem[4,5,6,7] sched: [10:1.00]
@@ -9746,14 +9746,14 @@ define <8 x float> @test_8xfloat_masked_shuff_mem_mask0(<8 x float> %vec1, <8 x
define <8 x float> @test_8xfloat_zero_masked_shuff_mem_mask0(<8 x float> %vec1, <8 x float>* %vec2p, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xfloat_zero_masked_shuff_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; GENERIC-NEXT: vshuff32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],mem[4,5,6,7] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_zero_masked_shuff_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],mem[4,5,6,7] sched: [10:1.00]
@@ -9767,7 +9767,7 @@ define <8 x float> @test_8xfloat_zero_masked_shuff_mem_mask0(<8 x float> %vec1,
define <8 x float> @test_8xfloat_masked_shuff_mem_mask1(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %vec3, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xfloat_masked_shuff_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; GENERIC-NEXT: vshuff32x4 {{.*#+}} ymm1 {%k1} = ymm0[4,5,6,7],mem[4,5,6,7] sched: [5:1.00]
@@ -9775,7 +9775,7 @@ define <8 x float> @test_8xfloat_masked_shuff_mem_mask1(<8 x float> %vec1, <8 x
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_masked_shuff_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff32x4 {{.*#+}} ymm1 {%k1} = ymm0[4,5,6,7],mem[4,5,6,7] sched: [10:1.00]
@@ -9790,14 +9790,14 @@ define <8 x float> @test_8xfloat_masked_shuff_mem_mask1(<8 x float> %vec1, <8 x
define <8 x float> @test_8xfloat_zero_masked_shuff_mem_mask1(<8 x float> %vec1, <8 x float>* %vec2p, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xfloat_zero_masked_shuff_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; GENERIC-NEXT: vshuff32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],mem[4,5,6,7] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_zero_masked_shuff_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],mem[4,5,6,7] sched: [10:1.00]
@@ -9811,7 +9811,7 @@ define <8 x float> @test_8xfloat_zero_masked_shuff_mem_mask1(<8 x float> %vec1,
define <8 x float> @test_8xfloat_masked_shuff_mem_mask2(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %vec3, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xfloat_masked_shuff_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; GENERIC-NEXT: vshuff32x4 {{.*#+}} ymm1 {%k1} = ymm0[4,5,6,7],mem[0,1,2,3] sched: [5:1.00]
@@ -9819,7 +9819,7 @@ define <8 x float> @test_8xfloat_masked_shuff_mem_mask2(<8 x float> %vec1, <8 x
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_masked_shuff_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff32x4 {{.*#+}} ymm1 {%k1} = ymm0[4,5,6,7],mem[0,1,2,3] sched: [10:1.00]
@@ -9834,14 +9834,14 @@ define <8 x float> @test_8xfloat_masked_shuff_mem_mask2(<8 x float> %vec1, <8 x
define <8 x float> @test_8xfloat_zero_masked_shuff_mem_mask2(<8 x float> %vec1, <8 x float>* %vec2p, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xfloat_zero_masked_shuff_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; GENERIC-NEXT: vshuff32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],mem[0,1,2,3] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_zero_masked_shuff_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],mem[0,1,2,3] sched: [10:1.00]
@@ -9855,12 +9855,12 @@ define <8 x float> @test_8xfloat_zero_masked_shuff_mem_mask2(<8 x float> %vec1,
define <8 x float> @test_8xfloat_shuff_mem_mask3(<8 x float> %vec1, <8 x float>* %vec2p) {
; GENERIC-LABEL: test_8xfloat_shuff_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] sched: [8:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_shuff_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] sched: [10:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec2 = load <8 x float>, <8 x float>* %vec2p
@@ -9869,7 +9869,7 @@ define <8 x float> @test_8xfloat_shuff_mem_mask3(<8 x float> %vec1, <8 x float>*
}
define <8 x float> @test_8xfloat_masked_shuff_mem_mask3(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %vec3, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xfloat_masked_shuff_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; GENERIC-NEXT: vshuff32x4 {{.*#+}} ymm1 {%k1} = ymm0[4,5,6,7],mem[0,1,2,3] sched: [5:1.00]
@@ -9877,7 +9877,7 @@ define <8 x float> @test_8xfloat_masked_shuff_mem_mask3(<8 x float> %vec1, <8 x
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_masked_shuff_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff32x4 {{.*#+}} ymm1 {%k1} = ymm0[4,5,6,7],mem[0,1,2,3] sched: [10:1.00]
@@ -9892,14 +9892,14 @@ define <8 x float> @test_8xfloat_masked_shuff_mem_mask3(<8 x float> %vec1, <8 x
define <8 x float> @test_8xfloat_zero_masked_shuff_mem_mask3(<8 x float> %vec1, <8 x float>* %vec2p, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xfloat_zero_masked_shuff_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; GENERIC-NEXT: vshuff32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],mem[0,1,2,3] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_zero_masked_shuff_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],mem[0,1,2,3] sched: [10:1.00]
@@ -9913,12 +9913,12 @@ define <8 x float> @test_8xfloat_zero_masked_shuff_mem_mask3(<8 x float> %vec1,
define <16 x float> @test_16xfloat_shuff_mask0(<16 x float> %vec1, <16 x float> %vec2, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_shuff_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[6,7,0,1],zmm1[2,3,6,7] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_shuff_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[6,7,0,1],zmm1[2,3,6,7] sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> <i32 12, i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 20, i32 21, i32 22, i32 23, i32 28, i32 29, i32 30, i32 31>
@@ -9926,7 +9926,7 @@ define <16 x float> @test_16xfloat_shuff_mask0(<16 x float> %vec1, <16 x float>
}
define <16 x float> @test_16xfloat_masked_shuff_mask0(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %vec3, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_masked_shuff_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm4, %zmm3, %k1
; GENERIC-NEXT: vshuff32x4 {{.*#+}} zmm2 {%k1} = zmm0[12,13,14,15,0,1,2,3],zmm1[4,5,6,7,12,13,14,15] sched: [1:1.00]
@@ -9934,7 +9934,7 @@ define <16 x float> @test_16xfloat_masked_shuff_mask0(<16 x float> %vec1, <16 x
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_masked_shuff_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm4, %zmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff32x4 {{.*#+}} zmm2 {%k1} = zmm0[12,13,14,15,0,1,2,3],zmm1[4,5,6,7,12,13,14,15] sched: [3:1.00]
@@ -9948,14 +9948,14 @@ define <16 x float> @test_16xfloat_masked_shuff_mask0(<16 x float> %vec1, <16 x
define <16 x float> @test_16xfloat_zero_masked_shuff_mask0(<16 x float> %vec1, <16 x float> %vec2, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_zero_masked_shuff_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; GENERIC-NEXT: vshuff32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[12,13,14,15,0,1,2,3],zmm1[4,5,6,7,12,13,14,15] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_zero_masked_shuff_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[12,13,14,15,0,1,2,3],zmm1[4,5,6,7,12,13,14,15] sched: [3:1.00]
@@ -9967,7 +9967,7 @@ define <16 x float> @test_16xfloat_zero_masked_shuff_mask0(<16 x float> %vec1, <
}
define <16 x float> @test_16xfloat_masked_shuff_mask1(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %vec3, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_masked_shuff_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm4, %zmm3, %k1
; GENERIC-NEXT: vshuff32x4 {{.*#+}} zmm2 {%k1} = zmm0[0,1,2,3,8,9,10,11],zmm1[0,1,2,3,12,13,14,15] sched: [1:1.00]
@@ -9975,7 +9975,7 @@ define <16 x float> @test_16xfloat_masked_shuff_mask1(<16 x float> %vec1, <16 x
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_masked_shuff_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm4, %zmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff32x4 {{.*#+}} zmm2 {%k1} = zmm0[0,1,2,3,8,9,10,11],zmm1[0,1,2,3,12,13,14,15] sched: [3:1.00]
@@ -9989,14 +9989,14 @@ define <16 x float> @test_16xfloat_masked_shuff_mask1(<16 x float> %vec1, <16 x
define <16 x float> @test_16xfloat_zero_masked_shuff_mask1(<16 x float> %vec1, <16 x float> %vec2, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_zero_masked_shuff_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; GENERIC-NEXT: vshuff32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3,8,9,10,11],zmm1[0,1,2,3,12,13,14,15] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_zero_masked_shuff_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3,8,9,10,11],zmm1[0,1,2,3,12,13,14,15] sched: [3:1.00]
@@ -10008,7 +10008,7 @@ define <16 x float> @test_16xfloat_zero_masked_shuff_mask1(<16 x float> %vec1, <
}
define <16 x float> @test_16xfloat_masked_shuff_mask2(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %vec3, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_masked_shuff_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm4, %zmm3, %k1
; GENERIC-NEXT: vshuff32x4 {{.*#+}} zmm2 {%k1} = zmm0[12,13,14,15,4,5,6,7],zmm1[0,1,2,3,4,5,6,7] sched: [1:1.00]
@@ -10016,7 +10016,7 @@ define <16 x float> @test_16xfloat_masked_shuff_mask2(<16 x float> %vec1, <16 x
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_masked_shuff_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm4, %zmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff32x4 {{.*#+}} zmm2 {%k1} = zmm0[12,13,14,15,4,5,6,7],zmm1[0,1,2,3,4,5,6,7] sched: [3:1.00]
@@ -10030,14 +10030,14 @@ define <16 x float> @test_16xfloat_masked_shuff_mask2(<16 x float> %vec1, <16 x
define <16 x float> @test_16xfloat_zero_masked_shuff_mask2(<16 x float> %vec1, <16 x float> %vec2, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_zero_masked_shuff_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; GENERIC-NEXT: vshuff32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[12,13,14,15,4,5,6,7],zmm1[0,1,2,3,4,5,6,7] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_zero_masked_shuff_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[12,13,14,15,4,5,6,7],zmm1[0,1,2,3,4,5,6,7] sched: [3:1.00]
@@ -10049,12 +10049,12 @@ define <16 x float> @test_16xfloat_zero_masked_shuff_mask2(<16 x float> %vec1, <
}
define <16 x float> @test_16xfloat_shuff_mask3(<16 x float> %vec1, <16 x float> %vec2) {
; GENERIC-LABEL: test_16xfloat_shuff_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[4,5,6,7],zmm1[0,1,4,5] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_shuff_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[4,5,6,7],zmm1[0,1,4,5] sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 24, i32 25, i32 26, i32 27>
@@ -10062,7 +10062,7 @@ define <16 x float> @test_16xfloat_shuff_mask3(<16 x float> %vec1, <16 x float>
}
define <16 x float> @test_16xfloat_masked_shuff_mask3(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %vec3, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_masked_shuff_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm4, %zmm3, %k1
; GENERIC-NEXT: vshuff32x4 {{.*#+}} zmm2 {%k1} = zmm0[8,9,10,11,12,13,14,15],zmm1[0,1,2,3,8,9,10,11] sched: [1:1.00]
@@ -10070,7 +10070,7 @@ define <16 x float> @test_16xfloat_masked_shuff_mask3(<16 x float> %vec1, <16 x
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_masked_shuff_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm4, %zmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff32x4 {{.*#+}} zmm2 {%k1} = zmm0[8,9,10,11,12,13,14,15],zmm1[0,1,2,3,8,9,10,11] sched: [3:1.00]
@@ -10084,14 +10084,14 @@ define <16 x float> @test_16xfloat_masked_shuff_mask3(<16 x float> %vec1, <16 x
define <16 x float> @test_16xfloat_zero_masked_shuff_mask3(<16 x float> %vec1, <16 x float> %vec2, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_zero_masked_shuff_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; GENERIC-NEXT: vshuff32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[8,9,10,11,12,13,14,15],zmm1[0,1,2,3,8,9,10,11] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_zero_masked_shuff_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[8,9,10,11,12,13,14,15],zmm1[0,1,2,3,8,9,10,11] sched: [3:1.00]
@@ -10103,12 +10103,12 @@ define <16 x float> @test_16xfloat_zero_masked_shuff_mask3(<16 x float> %vec1, <
}
define <16 x float> @test_16xfloat_shuff_mem_mask0(<16 x float> %vec1, <16 x float>* %vec2p) {
; GENERIC-LABEL: test_16xfloat_shuff_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[6,7,4,5],mem[4,5,2,3] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_shuff_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[6,7,4,5],mem[4,5,2,3] sched: [10:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec2 = load <16 x float>, <16 x float>* %vec2p
@@ -10117,7 +10117,7 @@ define <16 x float> @test_16xfloat_shuff_mem_mask0(<16 x float> %vec1, <16 x flo
}
define <16 x float> @test_16xfloat_masked_shuff_mem_mask0(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %vec3, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_masked_shuff_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; GENERIC-NEXT: vshuff32x4 {{.*#+}} zmm1 {%k1} = zmm0[12,13,14,15,8,9,10,11],mem[8,9,10,11,4,5,6,7] sched: [5:1.00]
@@ -10125,7 +10125,7 @@ define <16 x float> @test_16xfloat_masked_shuff_mem_mask0(<16 x float> %vec1, <1
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_masked_shuff_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff32x4 {{.*#+}} zmm1 {%k1} = zmm0[12,13,14,15,8,9,10,11],mem[8,9,10,11,4,5,6,7] sched: [10:1.00]
@@ -10140,14 +10140,14 @@ define <16 x float> @test_16xfloat_masked_shuff_mem_mask0(<16 x float> %vec1, <1
define <16 x float> @test_16xfloat_zero_masked_shuff_mem_mask0(<16 x float> %vec1, <16 x float>* %vec2p, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_zero_masked_shuff_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; GENERIC-NEXT: vshuff32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[12,13,14,15,8,9,10,11],mem[8,9,10,11,4,5,6,7] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_zero_masked_shuff_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[12,13,14,15,8,9,10,11],mem[8,9,10,11,4,5,6,7] sched: [10:1.00]
@@ -10161,7 +10161,7 @@ define <16 x float> @test_16xfloat_zero_masked_shuff_mem_mask0(<16 x float> %vec
define <16 x float> @test_16xfloat_masked_shuff_mem_mask1(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %vec3, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_masked_shuff_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; GENERIC-NEXT: vshuff32x4 {{.*#+}} zmm1 {%k1} = zmm0[8,9,10,11,4,5,6,7],mem[8,9,10,11,4,5,6,7] sched: [5:1.00]
@@ -10169,7 +10169,7 @@ define <16 x float> @test_16xfloat_masked_shuff_mem_mask1(<16 x float> %vec1, <1
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_masked_shuff_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff32x4 {{.*#+}} zmm1 {%k1} = zmm0[8,9,10,11,4,5,6,7],mem[8,9,10,11,4,5,6,7] sched: [10:1.00]
@@ -10184,14 +10184,14 @@ define <16 x float> @test_16xfloat_masked_shuff_mem_mask1(<16 x float> %vec1, <1
define <16 x float> @test_16xfloat_zero_masked_shuff_mem_mask1(<16 x float> %vec1, <16 x float>* %vec2p, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_zero_masked_shuff_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; GENERIC-NEXT: vshuff32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[8,9,10,11,4,5,6,7],mem[8,9,10,11,4,5,6,7] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_zero_masked_shuff_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[8,9,10,11,4,5,6,7],mem[8,9,10,11,4,5,6,7] sched: [10:1.00]
@@ -10205,7 +10205,7 @@ define <16 x float> @test_16xfloat_zero_masked_shuff_mem_mask1(<16 x float> %vec
define <16 x float> @test_16xfloat_masked_shuff_mem_mask2(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %vec3, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_masked_shuff_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; GENERIC-NEXT: vshuff32x4 {{.*#+}} zmm1 {%k1} = zmm0[0,1,2,3,0,1,2,3],mem[8,9,10,11,8,9,10,11] sched: [5:1.00]
@@ -10213,7 +10213,7 @@ define <16 x float> @test_16xfloat_masked_shuff_mem_mask2(<16 x float> %vec1, <1
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_masked_shuff_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff32x4 {{.*#+}} zmm1 {%k1} = zmm0[0,1,2,3,0,1,2,3],mem[8,9,10,11,8,9,10,11] sched: [10:1.00]
@@ -10228,14 +10228,14 @@ define <16 x float> @test_16xfloat_masked_shuff_mem_mask2(<16 x float> %vec1, <1
define <16 x float> @test_16xfloat_zero_masked_shuff_mem_mask2(<16 x float> %vec1, <16 x float>* %vec2p, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_zero_masked_shuff_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; GENERIC-NEXT: vshuff32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3,0,1,2,3],mem[8,9,10,11,8,9,10,11] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_zero_masked_shuff_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3,0,1,2,3],mem[8,9,10,11,8,9,10,11] sched: [10:1.00]
@@ -10249,12 +10249,12 @@ define <16 x float> @test_16xfloat_zero_masked_shuff_mem_mask2(<16 x float> %vec
define <16 x float> @test_16xfloat_shuff_mem_mask3(<16 x float> %vec1, <16 x float>* %vec2p) {
; GENERIC-LABEL: test_16xfloat_shuff_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[2,3,0,1],mem[6,7,6,7] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_shuff_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[2,3,0,1],mem[6,7,6,7] sched: [10:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec2 = load <16 x float>, <16 x float>* %vec2p
@@ -10263,7 +10263,7 @@ define <16 x float> @test_16xfloat_shuff_mem_mask3(<16 x float> %vec1, <16 x flo
}
define <16 x float> @test_16xfloat_masked_shuff_mem_mask3(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %vec3, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_masked_shuff_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; GENERIC-NEXT: vshuff32x4 {{.*#+}} zmm1 {%k1} = zmm0[4,5,6,7,0,1,2,3],mem[12,13,14,15,12,13,14,15] sched: [5:1.00]
@@ -10271,7 +10271,7 @@ define <16 x float> @test_16xfloat_masked_shuff_mem_mask3(<16 x float> %vec1, <1
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_masked_shuff_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff32x4 {{.*#+}} zmm1 {%k1} = zmm0[4,5,6,7,0,1,2,3],mem[12,13,14,15,12,13,14,15] sched: [10:1.00]
@@ -10286,14 +10286,14 @@ define <16 x float> @test_16xfloat_masked_shuff_mem_mask3(<16 x float> %vec1, <1
define <16 x float> @test_16xfloat_zero_masked_shuff_mem_mask3(<16 x float> %vec1, <16 x float>* %vec2p, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_zero_masked_shuff_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; GENERIC-NEXT: vshuff32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[4,5,6,7,0,1,2,3],mem[12,13,14,15,12,13,14,15] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_zero_masked_shuff_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[4,5,6,7,0,1,2,3],mem[12,13,14,15,12,13,14,15] sched: [10:1.00]
@@ -10307,12 +10307,12 @@ define <16 x float> @test_16xfloat_zero_masked_shuff_mem_mask3(<16 x float> %vec
define <4 x double> @test_4xdouble_shuff_mask0(<4 x double> %vec1, <4 x double> %vec2) {
; GENERIC-LABEL: test_4xdouble_shuff_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_shuff_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1] sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> <i32 2, i32 3, i32 4, i32 5>
@@ -10320,7 +10320,7 @@ define <4 x double> @test_4xdouble_shuff_mask0(<4 x double> %vec1, <4 x double>
}
define <4 x double> @test_4xdouble_masked_shuff_mask0(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %vec3, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_masked_shuff_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm4, %ymm3, %k1
; GENERIC-NEXT: vshuff64x2 {{.*#+}} ymm2 {%k1} = ymm0[2,3],ymm1[0,1] sched: [1:1.00]
@@ -10328,7 +10328,7 @@ define <4 x double> @test_4xdouble_masked_shuff_mask0(<4 x double> %vec1, <4 x d
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_masked_shuff_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm4, %ymm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff64x2 {{.*#+}} ymm2 {%k1} = ymm0[2,3],ymm1[0,1] sched: [3:1.00]
@@ -10342,14 +10342,14 @@ define <4 x double> @test_4xdouble_masked_shuff_mask0(<4 x double> %vec1, <4 x d
define <4 x double> @test_4xdouble_zero_masked_shuff_mask0(<4 x double> %vec1, <4 x double> %vec2, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_zero_masked_shuff_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; GENERIC-NEXT: vshuff64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],ymm1[0,1] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_zero_masked_shuff_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],ymm1[0,1] sched: [3:1.00]
@@ -10361,7 +10361,7 @@ define <4 x double> @test_4xdouble_zero_masked_shuff_mask0(<4 x double> %vec1, <
}
define <4 x double> @test_4xdouble_masked_shuff_mask1(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %vec3, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_masked_shuff_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm4, %ymm3, %k1
; GENERIC-NEXT: vshuff64x2 {{.*#+}} ymm2 {%k1} = ymm0[2,3],ymm1[0,1] sched: [1:1.00]
@@ -10369,7 +10369,7 @@ define <4 x double> @test_4xdouble_masked_shuff_mask1(<4 x double> %vec1, <4 x d
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_masked_shuff_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm4, %ymm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff64x2 {{.*#+}} ymm2 {%k1} = ymm0[2,3],ymm1[0,1] sched: [3:1.00]
@@ -10383,14 +10383,14 @@ define <4 x double> @test_4xdouble_masked_shuff_mask1(<4 x double> %vec1, <4 x d
define <4 x double> @test_4xdouble_zero_masked_shuff_mask1(<4 x double> %vec1, <4 x double> %vec2, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_zero_masked_shuff_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; GENERIC-NEXT: vshuff64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],ymm1[0,1] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_zero_masked_shuff_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],ymm1[0,1] sched: [3:1.00]
@@ -10402,7 +10402,7 @@ define <4 x double> @test_4xdouble_zero_masked_shuff_mask1(<4 x double> %vec1, <
}
define <4 x double> @test_4xdouble_masked_shuff_mask2(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %vec3, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_masked_shuff_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm4, %ymm3, %k1
; GENERIC-NEXT: vshuff64x2 {{.*#+}} ymm2 {%k1} = ymm0[2,3],ymm1[2,3] sched: [1:1.00]
@@ -10410,7 +10410,7 @@ define <4 x double> @test_4xdouble_masked_shuff_mask2(<4 x double> %vec1, <4 x d
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_masked_shuff_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm4, %ymm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff64x2 {{.*#+}} ymm2 {%k1} = ymm0[2,3],ymm1[2,3] sched: [3:1.00]
@@ -10424,14 +10424,14 @@ define <4 x double> @test_4xdouble_masked_shuff_mask2(<4 x double> %vec1, <4 x d
define <4 x double> @test_4xdouble_zero_masked_shuff_mask2(<4 x double> %vec1, <4 x double> %vec2, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_zero_masked_shuff_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; GENERIC-NEXT: vshuff64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],ymm1[2,3] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_zero_masked_shuff_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],ymm1[2,3] sched: [3:1.00]
@@ -10443,12 +10443,12 @@ define <4 x double> @test_4xdouble_zero_masked_shuff_mask2(<4 x double> %vec1, <
}
define <4 x double> @test_4xdouble_shuff_mask3(<4 x double> %vec1, <4 x double> %vec2) {
; GENERIC-LABEL: test_4xdouble_shuff_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_shuff_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> <i32 2, i32 3, i32 6, i32 7>
@@ -10456,7 +10456,7 @@ define <4 x double> @test_4xdouble_shuff_mask3(<4 x double> %vec1, <4 x double>
}
define <4 x double> @test_4xdouble_masked_shuff_mask3(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %vec3, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_masked_shuff_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm4, %ymm3, %k1
; GENERIC-NEXT: vshuff64x2 {{.*#+}} ymm2 {%k1} = ymm0[2,3],ymm1[2,3] sched: [1:1.00]
@@ -10464,7 +10464,7 @@ define <4 x double> @test_4xdouble_masked_shuff_mask3(<4 x double> %vec1, <4 x d
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_masked_shuff_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm4, %ymm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff64x2 {{.*#+}} ymm2 {%k1} = ymm0[2,3],ymm1[2,3] sched: [3:1.00]
@@ -10478,14 +10478,14 @@ define <4 x double> @test_4xdouble_masked_shuff_mask3(<4 x double> %vec1, <4 x d
define <4 x double> @test_4xdouble_zero_masked_shuff_mask3(<4 x double> %vec1, <4 x double> %vec2, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_zero_masked_shuff_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; GENERIC-NEXT: vshuff64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],ymm1[2,3] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_zero_masked_shuff_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],ymm1[2,3] sched: [3:1.00]
@@ -10497,12 +10497,12 @@ define <4 x double> @test_4xdouble_zero_masked_shuff_mask3(<4 x double> %vec1, <
}
define <4 x double> @test_4xdouble_shuff_mem_mask0(<4 x double> %vec1, <4 x double>* %vec2p) {
; GENERIC-LABEL: test_4xdouble_shuff_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] sched: [8:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_shuff_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] sched: [10:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec2 = load <4 x double>, <4 x double>* %vec2p
@@ -10511,7 +10511,7 @@ define <4 x double> @test_4xdouble_shuff_mem_mask0(<4 x double> %vec1, <4 x doub
}
define <4 x double> @test_4xdouble_masked_shuff_mem_mask0(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %vec3, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_masked_shuff_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; GENERIC-NEXT: vshuff64x2 {{.*#+}} ymm1 {%k1} = ymm0[2,3],mem[2,3] sched: [5:1.00]
@@ -10519,7 +10519,7 @@ define <4 x double> @test_4xdouble_masked_shuff_mem_mask0(<4 x double> %vec1, <4
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_masked_shuff_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff64x2 {{.*#+}} ymm1 {%k1} = ymm0[2,3],mem[2,3] sched: [10:1.00]
@@ -10534,14 +10534,14 @@ define <4 x double> @test_4xdouble_masked_shuff_mem_mask0(<4 x double> %vec1, <4
define <4 x double> @test_4xdouble_zero_masked_shuff_mem_mask0(<4 x double> %vec1, <4 x double>* %vec2p, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_zero_masked_shuff_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; GENERIC-NEXT: vshuff64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],mem[2,3] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_zero_masked_shuff_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],mem[2,3] sched: [10:1.00]
@@ -10555,7 +10555,7 @@ define <4 x double> @test_4xdouble_zero_masked_shuff_mem_mask0(<4 x double> %vec
define <4 x double> @test_4xdouble_masked_shuff_mem_mask1(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %vec3, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_masked_shuff_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; GENERIC-NEXT: vshuff64x2 {{.*#+}} ymm1 {%k1} = ymm0[2,3],mem[0,1] sched: [5:1.00]
@@ -10563,7 +10563,7 @@ define <4 x double> @test_4xdouble_masked_shuff_mem_mask1(<4 x double> %vec1, <4
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_masked_shuff_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff64x2 {{.*#+}} ymm1 {%k1} = ymm0[2,3],mem[0,1] sched: [10:1.00]
@@ -10578,14 +10578,14 @@ define <4 x double> @test_4xdouble_masked_shuff_mem_mask1(<4 x double> %vec1, <4
define <4 x double> @test_4xdouble_zero_masked_shuff_mem_mask1(<4 x double> %vec1, <4 x double>* %vec2p, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_zero_masked_shuff_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; GENERIC-NEXT: vshuff64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],mem[0,1] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_zero_masked_shuff_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],mem[0,1] sched: [10:1.00]
@@ -10599,7 +10599,7 @@ define <4 x double> @test_4xdouble_zero_masked_shuff_mem_mask1(<4 x double> %vec
define <4 x double> @test_4xdouble_masked_shuff_mem_mask2(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %vec3, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_masked_shuff_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; GENERIC-NEXT: vshuff64x2 {{.*#+}} ymm1 {%k1} = ymm0[2,3],mem[0,1] sched: [5:1.00]
@@ -10607,7 +10607,7 @@ define <4 x double> @test_4xdouble_masked_shuff_mem_mask2(<4 x double> %vec1, <4
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_masked_shuff_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff64x2 {{.*#+}} ymm1 {%k1} = ymm0[2,3],mem[0,1] sched: [10:1.00]
@@ -10622,14 +10622,14 @@ define <4 x double> @test_4xdouble_masked_shuff_mem_mask2(<4 x double> %vec1, <4
define <4 x double> @test_4xdouble_zero_masked_shuff_mem_mask2(<4 x double> %vec1, <4 x double>* %vec2p, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_zero_masked_shuff_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; GENERIC-NEXT: vshuff64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],mem[0,1] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_zero_masked_shuff_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],mem[0,1] sched: [10:1.00]
@@ -10643,12 +10643,12 @@ define <4 x double> @test_4xdouble_zero_masked_shuff_mem_mask2(<4 x double> %vec
define <4 x double> @test_4xdouble_shuff_mem_mask3(<4 x double> %vec1, <4 x double>* %vec2p) {
; GENERIC-LABEL: test_4xdouble_shuff_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] sched: [8:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_shuff_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] sched: [10:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec2 = load <4 x double>, <4 x double>* %vec2p
@@ -10657,7 +10657,7 @@ define <4 x double> @test_4xdouble_shuff_mem_mask3(<4 x double> %vec1, <4 x doub
}
define <4 x double> @test_4xdouble_masked_shuff_mem_mask3(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %vec3, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_masked_shuff_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; GENERIC-NEXT: vshuff64x2 {{.*#+}} ymm1 {%k1} = ymm0[2,3],mem[2,3] sched: [5:1.00]
@@ -10665,7 +10665,7 @@ define <4 x double> @test_4xdouble_masked_shuff_mem_mask3(<4 x double> %vec1, <4
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_masked_shuff_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff64x2 {{.*#+}} ymm1 {%k1} = ymm0[2,3],mem[2,3] sched: [10:1.00]
@@ -10680,14 +10680,14 @@ define <4 x double> @test_4xdouble_masked_shuff_mem_mask3(<4 x double> %vec1, <4
define <4 x double> @test_4xdouble_zero_masked_shuff_mem_mask3(<4 x double> %vec1, <4 x double>* %vec2p, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_zero_masked_shuff_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; GENERIC-NEXT: vshuff64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],mem[2,3] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_zero_masked_shuff_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],mem[2,3] sched: [10:1.00]
@@ -10701,12 +10701,12 @@ define <4 x double> @test_4xdouble_zero_masked_shuff_mem_mask3(<4 x double> %vec
define <8 x double> @test_8xdouble_shuff_mask0(<8 x double> %vec1, <8 x double> %vec2) {
; GENERIC-LABEL: test_8xdouble_shuff_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[6,7,2,3],zmm1[6,7,0,1] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_shuff_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[6,7,2,3],zmm1[6,7,0,1] sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> <i32 6, i32 7, i32 2, i32 3, i32 14, i32 15, i32 8, i32 9>
@@ -10714,7 +10714,7 @@ define <8 x double> @test_8xdouble_shuff_mask0(<8 x double> %vec1, <8 x double>
}
define <8 x double> @test_8xdouble_masked_shuff_mask0(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %vec3, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_masked_shuff_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm4, %zmm3, %k1
; GENERIC-NEXT: vshuff64x2 {{.*#+}} zmm2 {%k1} = zmm0[6,7,2,3],zmm1[6,7,0,1] sched: [1:1.00]
@@ -10722,7 +10722,7 @@ define <8 x double> @test_8xdouble_masked_shuff_mask0(<8 x double> %vec1, <8 x d
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_masked_shuff_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm4, %zmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff64x2 {{.*#+}} zmm2 {%k1} = zmm0[6,7,2,3],zmm1[6,7,0,1] sched: [3:1.00]
@@ -10736,14 +10736,14 @@ define <8 x double> @test_8xdouble_masked_shuff_mask0(<8 x double> %vec1, <8 x d
define <8 x double> @test_8xdouble_zero_masked_shuff_mask0(<8 x double> %vec1, <8 x double> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_zero_masked_shuff_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; GENERIC-NEXT: vshuff64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[6,7,2,3],zmm1[6,7,0,1] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_zero_masked_shuff_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[6,7,2,3],zmm1[6,7,0,1] sched: [3:1.00]
@@ -10755,7 +10755,7 @@ define <8 x double> @test_8xdouble_zero_masked_shuff_mask0(<8 x double> %vec1, <
}
define <8 x double> @test_8xdouble_masked_shuff_mask1(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %vec3, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_masked_shuff_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm4, %zmm3, %k1
; GENERIC-NEXT: vshuff64x2 {{.*#+}} zmm2 {%k1} = zmm0[0,1,4,5],zmm1[0,1,4,5] sched: [1:1.00]
@@ -10763,7 +10763,7 @@ define <8 x double> @test_8xdouble_masked_shuff_mask1(<8 x double> %vec1, <8 x d
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_masked_shuff_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm4, %zmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff64x2 {{.*#+}} zmm2 {%k1} = zmm0[0,1,4,5],zmm1[0,1,4,5] sched: [3:1.00]
@@ -10777,14 +10777,14 @@ define <8 x double> @test_8xdouble_masked_shuff_mask1(<8 x double> %vec1, <8 x d
define <8 x double> @test_8xdouble_zero_masked_shuff_mask1(<8 x double> %vec1, <8 x double> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_zero_masked_shuff_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; GENERIC-NEXT: vshuff64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,4,5],zmm1[0,1,4,5] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_zero_masked_shuff_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,4,5],zmm1[0,1,4,5] sched: [3:1.00]
@@ -10796,7 +10796,7 @@ define <8 x double> @test_8xdouble_zero_masked_shuff_mask1(<8 x double> %vec1, <
}
define <8 x double> @test_8xdouble_masked_shuff_mask2(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %vec3, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_masked_shuff_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm4, %zmm3, %k1
; GENERIC-NEXT: vshuff64x2 {{.*#+}} zmm2 {%k1} = zmm0[6,7,4,5],zmm1[4,5,0,1] sched: [1:1.00]
@@ -10804,7 +10804,7 @@ define <8 x double> @test_8xdouble_masked_shuff_mask2(<8 x double> %vec1, <8 x d
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_masked_shuff_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm4, %zmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff64x2 {{.*#+}} zmm2 {%k1} = zmm0[6,7,4,5],zmm1[4,5,0,1] sched: [3:1.00]
@@ -10818,14 +10818,14 @@ define <8 x double> @test_8xdouble_masked_shuff_mask2(<8 x double> %vec1, <8 x d
define <8 x double> @test_8xdouble_zero_masked_shuff_mask2(<8 x double> %vec1, <8 x double> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_zero_masked_shuff_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; GENERIC-NEXT: vshuff64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[6,7,4,5],zmm1[4,5,0,1] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_zero_masked_shuff_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[6,7,4,5],zmm1[4,5,0,1] sched: [3:1.00]
@@ -10837,12 +10837,12 @@ define <8 x double> @test_8xdouble_zero_masked_shuff_mask2(<8 x double> %vec1, <
}
define <8 x double> @test_8xdouble_shuff_mask3(<8 x double> %vec1, <8 x double> %vec2) {
; GENERIC-LABEL: test_8xdouble_shuff_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[4,5,4,5],zmm1[4,5,2,3] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_shuff_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[4,5,4,5],zmm1[4,5,2,3] sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> <i32 4, i32 5, i32 4, i32 5, i32 12, i32 13, i32 10, i32 11>
@@ -10850,7 +10850,7 @@ define <8 x double> @test_8xdouble_shuff_mask3(<8 x double> %vec1, <8 x double>
}
define <8 x double> @test_8xdouble_masked_shuff_mask3(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %vec3, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_masked_shuff_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm4, %zmm3, %k1
; GENERIC-NEXT: vshuff64x2 {{.*#+}} zmm2 {%k1} = zmm0[4,5,4,5],zmm1[4,5,2,3] sched: [1:1.00]
@@ -10858,7 +10858,7 @@ define <8 x double> @test_8xdouble_masked_shuff_mask3(<8 x double> %vec1, <8 x d
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_masked_shuff_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm4, %zmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff64x2 {{.*#+}} zmm2 {%k1} = zmm0[4,5,4,5],zmm1[4,5,2,3] sched: [3:1.00]
@@ -10872,14 +10872,14 @@ define <8 x double> @test_8xdouble_masked_shuff_mask3(<8 x double> %vec1, <8 x d
define <8 x double> @test_8xdouble_zero_masked_shuff_mask3(<8 x double> %vec1, <8 x double> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_zero_masked_shuff_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; GENERIC-NEXT: vshuff64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[4,5,4,5],zmm1[4,5,2,3] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_zero_masked_shuff_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[4,5,4,5],zmm1[4,5,2,3] sched: [3:1.00]
@@ -10891,12 +10891,12 @@ define <8 x double> @test_8xdouble_zero_masked_shuff_mask3(<8 x double> %vec1, <
}
define <8 x double> @test_8xdouble_shuff_mem_mask0(<8 x double> %vec1, <8 x double>* %vec2p) {
; GENERIC-LABEL: test_8xdouble_shuff_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[6,7,0,1],mem[0,1,0,1] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_shuff_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[6,7,0,1],mem[0,1,0,1] sched: [10:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec2 = load <8 x double>, <8 x double>* %vec2p
@@ -10905,7 +10905,7 @@ define <8 x double> @test_8xdouble_shuff_mem_mask0(<8 x double> %vec1, <8 x doub
}
define <8 x double> @test_8xdouble_masked_shuff_mem_mask0(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %vec3, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_masked_shuff_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; GENERIC-NEXT: vshuff64x2 {{.*#+}} zmm1 {%k1} = zmm0[6,7,0,1],mem[0,1,0,1] sched: [5:1.00]
@@ -10913,7 +10913,7 @@ define <8 x double> @test_8xdouble_masked_shuff_mem_mask0(<8 x double> %vec1, <8
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_masked_shuff_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff64x2 {{.*#+}} zmm1 {%k1} = zmm0[6,7,0,1],mem[0,1,0,1] sched: [10:1.00]
@@ -10928,14 +10928,14 @@ define <8 x double> @test_8xdouble_masked_shuff_mem_mask0(<8 x double> %vec1, <8
define <8 x double> @test_8xdouble_zero_masked_shuff_mem_mask0(<8 x double> %vec1, <8 x double>* %vec2p, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_zero_masked_shuff_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; GENERIC-NEXT: vshuff64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[6,7,0,1],mem[0,1,0,1] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_zero_masked_shuff_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[6,7,0,1],mem[0,1,0,1] sched: [10:1.00]
@@ -10949,7 +10949,7 @@ define <8 x double> @test_8xdouble_zero_masked_shuff_mem_mask0(<8 x double> %vec
define <8 x double> @test_8xdouble_masked_shuff_mem_mask1(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %vec3, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_masked_shuff_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; GENERIC-NEXT: vshuff64x2 {{.*#+}} zmm1 {%k1} = zmm0[6,7,6,7],mem[0,1,2,3] sched: [5:1.00]
@@ -10957,7 +10957,7 @@ define <8 x double> @test_8xdouble_masked_shuff_mem_mask1(<8 x double> %vec1, <8
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_masked_shuff_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff64x2 {{.*#+}} zmm1 {%k1} = zmm0[6,7,6,7],mem[0,1,2,3] sched: [10:1.00]
@@ -10972,14 +10972,14 @@ define <8 x double> @test_8xdouble_masked_shuff_mem_mask1(<8 x double> %vec1, <8
define <8 x double> @test_8xdouble_zero_masked_shuff_mem_mask1(<8 x double> %vec1, <8 x double>* %vec2p, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_zero_masked_shuff_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; GENERIC-NEXT: vshuff64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[6,7,6,7],mem[0,1,2,3] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_zero_masked_shuff_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[6,7,6,7],mem[0,1,2,3] sched: [10:1.00]
@@ -10993,7 +10993,7 @@ define <8 x double> @test_8xdouble_zero_masked_shuff_mem_mask1(<8 x double> %vec
define <8 x double> @test_8xdouble_masked_shuff_mem_mask2(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %vec3, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_masked_shuff_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; GENERIC-NEXT: vshuff64x2 {{.*#+}} zmm1 {%k1} = zmm0[0,1,2,3],mem[0,1,4,5] sched: [5:1.00]
@@ -11001,7 +11001,7 @@ define <8 x double> @test_8xdouble_masked_shuff_mem_mask2(<8 x double> %vec1, <8
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_masked_shuff_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff64x2 {{.*#+}} zmm1 {%k1} = zmm0[0,1,2,3],mem[0,1,4,5] sched: [10:1.00]
@@ -11016,14 +11016,14 @@ define <8 x double> @test_8xdouble_masked_shuff_mem_mask2(<8 x double> %vec1, <8
define <8 x double> @test_8xdouble_zero_masked_shuff_mem_mask2(<8 x double> %vec1, <8 x double>* %vec2p, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_zero_masked_shuff_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; GENERIC-NEXT: vshuff64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3],mem[0,1,4,5] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_zero_masked_shuff_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3],mem[0,1,4,5] sched: [10:1.00]
@@ -11037,12 +11037,12 @@ define <8 x double> @test_8xdouble_zero_masked_shuff_mem_mask2(<8 x double> %vec
define <8 x double> @test_8xdouble_shuff_mem_mask3(<8 x double> %vec1, <8 x double>* %vec2p) {
; GENERIC-LABEL: test_8xdouble_shuff_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[2,3,0,1],mem[4,5,0,1] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_shuff_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[2,3,0,1],mem[4,5,0,1] sched: [10:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec2 = load <8 x double>, <8 x double>* %vec2p
@@ -11051,7 +11051,7 @@ define <8 x double> @test_8xdouble_shuff_mem_mask3(<8 x double> %vec1, <8 x doub
}
define <8 x double> @test_8xdouble_masked_shuff_mem_mask3(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %vec3, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_masked_shuff_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; GENERIC-NEXT: vshuff64x2 {{.*#+}} zmm1 {%k1} = zmm0[2,3,0,1],mem[4,5,0,1] sched: [5:1.00]
@@ -11059,7 +11059,7 @@ define <8 x double> @test_8xdouble_masked_shuff_mem_mask3(<8 x double> %vec1, <8
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_masked_shuff_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff64x2 {{.*#+}} zmm1 {%k1} = zmm0[2,3,0,1],mem[4,5,0,1] sched: [10:1.00]
@@ -11074,14 +11074,14 @@ define <8 x double> @test_8xdouble_masked_shuff_mem_mask3(<8 x double> %vec1, <8
define <8 x double> @test_8xdouble_zero_masked_shuff_mem_mask3(<8 x double> %vec1, <8 x double>* %vec2p, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_zero_masked_shuff_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; GENERIC-NEXT: vshuff64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[2,3,0,1],mem[4,5,0,1] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_zero_masked_shuff_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vshuff64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[2,3,0,1],mem[4,5,0,1] sched: [10:1.00]
@@ -11095,12 +11095,12 @@ define <8 x double> @test_8xdouble_zero_masked_shuff_mem_mask3(<8 x double> %vec
define <8 x i32> @test_8xi32_shuff_mask0(<8 x i32> %vec1, <8 x i32> %vec2) {
; GENERIC-LABEL: test_8xi32_shuff_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi32_shuff_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <8 x i32> %vec1, <8 x i32> %vec2, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15>
@@ -11108,7 +11108,7 @@ define <8 x i32> @test_8xi32_shuff_mask0(<8 x i32> %vec1, <8 x i32> %vec2) {
}
define <8 x i32> @test_8xi32_masked_shuff_mask0(<8 x i32> %vec1, <8 x i32> %vec2, <8 x i32> %vec3, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xi32_masked_shuff_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm4, %ymm3, %k1
; GENERIC-NEXT: vshufi32x4 {{.*#+}} ymm2 {%k1} = ymm0[4,5,6,7],ymm1[4,5,6,7] sched: [1:1.00]
@@ -11116,7 +11116,7 @@ define <8 x i32> @test_8xi32_masked_shuff_mask0(<8 x i32> %vec1, <8 x i32> %vec2
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi32_masked_shuff_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm4, %ymm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi32x4 {{.*#+}} ymm2 {%k1} = ymm0[4,5,6,7],ymm1[4,5,6,7] sched: [3:1.00]
@@ -11130,14 +11130,14 @@ define <8 x i32> @test_8xi32_masked_shuff_mask0(<8 x i32> %vec1, <8 x i32> %vec2
define <8 x i32> @test_8xi32_zero_masked_shuff_mask0(<8 x i32> %vec1, <8 x i32> %vec2, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xi32_zero_masked_shuff_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; GENERIC-NEXT: vshufi32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],ymm1[4,5,6,7] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi32_zero_masked_shuff_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],ymm1[4,5,6,7] sched: [3:1.00]
@@ -11149,7 +11149,7 @@ define <8 x i32> @test_8xi32_zero_masked_shuff_mask0(<8 x i32> %vec1, <8 x i32>
}
define <8 x i32> @test_8xi32_masked_shuff_mask1(<8 x i32> %vec1, <8 x i32> %vec2, <8 x i32> %vec3, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xi32_masked_shuff_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm4, %ymm3, %k1
; GENERIC-NEXT: vshufi32x4 {{.*#+}} ymm2 {%k1} = ymm0[4,5,6,7],ymm1[0,1,2,3] sched: [1:1.00]
@@ -11157,7 +11157,7 @@ define <8 x i32> @test_8xi32_masked_shuff_mask1(<8 x i32> %vec1, <8 x i32> %vec2
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi32_masked_shuff_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm4, %ymm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi32x4 {{.*#+}} ymm2 {%k1} = ymm0[4,5,6,7],ymm1[0,1,2,3] sched: [3:1.00]
@@ -11171,14 +11171,14 @@ define <8 x i32> @test_8xi32_masked_shuff_mask1(<8 x i32> %vec1, <8 x i32> %vec2
define <8 x i32> @test_8xi32_zero_masked_shuff_mask1(<8 x i32> %vec1, <8 x i32> %vec2, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xi32_zero_masked_shuff_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; GENERIC-NEXT: vshufi32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],ymm1[0,1,2,3] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi32_zero_masked_shuff_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],ymm1[0,1,2,3] sched: [3:1.00]
@@ -11190,7 +11190,7 @@ define <8 x i32> @test_8xi32_zero_masked_shuff_mask1(<8 x i32> %vec1, <8 x i32>
}
define <8 x i32> @test_8xi32_masked_shuff_mask2(<8 x i32> %vec1, <8 x i32> %vec2, <8 x i32> %vec3, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xi32_masked_shuff_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm4, %ymm3, %k1
; GENERIC-NEXT: vshufi32x4 {{.*#+}} ymm2 {%k1} = ymm0[4,5,6,7],ymm1[4,5,6,7] sched: [1:1.00]
@@ -11198,7 +11198,7 @@ define <8 x i32> @test_8xi32_masked_shuff_mask2(<8 x i32> %vec1, <8 x i32> %vec2
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi32_masked_shuff_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm4, %ymm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi32x4 {{.*#+}} ymm2 {%k1} = ymm0[4,5,6,7],ymm1[4,5,6,7] sched: [3:1.00]
@@ -11212,14 +11212,14 @@ define <8 x i32> @test_8xi32_masked_shuff_mask2(<8 x i32> %vec1, <8 x i32> %vec2
define <8 x i32> @test_8xi32_zero_masked_shuff_mask2(<8 x i32> %vec1, <8 x i32> %vec2, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xi32_zero_masked_shuff_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; GENERIC-NEXT: vshufi32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],ymm1[4,5,6,7] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi32_zero_masked_shuff_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],ymm1[4,5,6,7] sched: [3:1.00]
@@ -11231,12 +11231,12 @@ define <8 x i32> @test_8xi32_zero_masked_shuff_mask2(<8 x i32> %vec1, <8 x i32>
}
define <8 x i32> @test_8xi32_shuff_mask3(<8 x i32> %vec1, <8 x i32> %vec2) {
; GENERIC-LABEL: test_8xi32_shuff_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi32_shuff_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1] sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <8 x i32> %vec1, <8 x i32> %vec2, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
@@ -11244,7 +11244,7 @@ define <8 x i32> @test_8xi32_shuff_mask3(<8 x i32> %vec1, <8 x i32> %vec2) {
}
define <8 x i32> @test_8xi32_masked_shuff_mask3(<8 x i32> %vec1, <8 x i32> %vec2, <8 x i32> %vec3, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xi32_masked_shuff_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm4, %ymm3, %k1
; GENERIC-NEXT: vshufi32x4 {{.*#+}} ymm2 {%k1} = ymm0[4,5,6,7],ymm1[0,1,2,3] sched: [1:1.00]
@@ -11252,7 +11252,7 @@ define <8 x i32> @test_8xi32_masked_shuff_mask3(<8 x i32> %vec1, <8 x i32> %vec2
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi32_masked_shuff_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm4, %ymm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi32x4 {{.*#+}} ymm2 {%k1} = ymm0[4,5,6,7],ymm1[0,1,2,3] sched: [3:1.00]
@@ -11266,14 +11266,14 @@ define <8 x i32> @test_8xi32_masked_shuff_mask3(<8 x i32> %vec1, <8 x i32> %vec2
define <8 x i32> @test_8xi32_zero_masked_shuff_mask3(<8 x i32> %vec1, <8 x i32> %vec2, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xi32_zero_masked_shuff_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; GENERIC-NEXT: vshufi32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],ymm1[0,1,2,3] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi32_zero_masked_shuff_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],ymm1[0,1,2,3] sched: [3:1.00]
@@ -11285,12 +11285,12 @@ define <8 x i32> @test_8xi32_zero_masked_shuff_mask3(<8 x i32> %vec1, <8 x i32>
}
define <8 x i32> @test_8xi32_shuff_mem_mask0(<8 x i32> %vec1, <8 x i32>* %vec2p) {
; GENERIC-LABEL: test_8xi32_shuff_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi32_shuff_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] sched: [10:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec2 = load <8 x i32>, <8 x i32>* %vec2p
@@ -11299,7 +11299,7 @@ define <8 x i32> @test_8xi32_shuff_mem_mask0(<8 x i32> %vec1, <8 x i32>* %vec2p)
}
define <8 x i32> @test_8xi32_masked_shuff_mem_mask0(<8 x i32> %vec1, <8 x i32>* %vec2p, <8 x i32> %vec3, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xi32_masked_shuff_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; GENERIC-NEXT: vshufi32x4 {{.*#+}} ymm1 {%k1} = ymm0[4,5,6,7],mem[4,5,6,7] sched: [5:1.00]
@@ -11307,7 +11307,7 @@ define <8 x i32> @test_8xi32_masked_shuff_mem_mask0(<8 x i32> %vec1, <8 x i32>*
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi32_masked_shuff_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi32x4 {{.*#+}} ymm1 {%k1} = ymm0[4,5,6,7],mem[4,5,6,7] sched: [10:1.00]
@@ -11322,14 +11322,14 @@ define <8 x i32> @test_8xi32_masked_shuff_mem_mask0(<8 x i32> %vec1, <8 x i32>*
define <8 x i32> @test_8xi32_zero_masked_shuff_mem_mask0(<8 x i32> %vec1, <8 x i32>* %vec2p, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xi32_zero_masked_shuff_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; GENERIC-NEXT: vshufi32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],mem[4,5,6,7] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi32_zero_masked_shuff_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],mem[4,5,6,7] sched: [10:1.00]
@@ -11343,7 +11343,7 @@ define <8 x i32> @test_8xi32_zero_masked_shuff_mem_mask0(<8 x i32> %vec1, <8 x i
define <8 x i32> @test_8xi32_masked_shuff_mem_mask1(<8 x i32> %vec1, <8 x i32>* %vec2p, <8 x i32> %vec3, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xi32_masked_shuff_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; GENERIC-NEXT: vshufi32x4 {{.*#+}} ymm1 {%k1} = ymm0[4,5,6,7],mem[0,1,2,3] sched: [5:1.00]
@@ -11351,7 +11351,7 @@ define <8 x i32> @test_8xi32_masked_shuff_mem_mask1(<8 x i32> %vec1, <8 x i32>*
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi32_masked_shuff_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi32x4 {{.*#+}} ymm1 {%k1} = ymm0[4,5,6,7],mem[0,1,2,3] sched: [10:1.00]
@@ -11366,14 +11366,14 @@ define <8 x i32> @test_8xi32_masked_shuff_mem_mask1(<8 x i32> %vec1, <8 x i32>*
define <8 x i32> @test_8xi32_zero_masked_shuff_mem_mask1(<8 x i32> %vec1, <8 x i32>* %vec2p, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xi32_zero_masked_shuff_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; GENERIC-NEXT: vshufi32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],mem[0,1,2,3] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi32_zero_masked_shuff_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],mem[0,1,2,3] sched: [10:1.00]
@@ -11387,7 +11387,7 @@ define <8 x i32> @test_8xi32_zero_masked_shuff_mem_mask1(<8 x i32> %vec1, <8 x i
define <8 x i32> @test_8xi32_masked_shuff_mem_mask2(<8 x i32> %vec1, <8 x i32>* %vec2p, <8 x i32> %vec3, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xi32_masked_shuff_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; GENERIC-NEXT: vshufi32x4 {{.*#+}} ymm1 {%k1} = ymm0[4,5,6,7],mem[0,1,2,3] sched: [5:1.00]
@@ -11395,7 +11395,7 @@ define <8 x i32> @test_8xi32_masked_shuff_mem_mask2(<8 x i32> %vec1, <8 x i32>*
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi32_masked_shuff_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi32x4 {{.*#+}} ymm1 {%k1} = ymm0[4,5,6,7],mem[0,1,2,3] sched: [10:1.00]
@@ -11410,14 +11410,14 @@ define <8 x i32> @test_8xi32_masked_shuff_mem_mask2(<8 x i32> %vec1, <8 x i32>*
define <8 x i32> @test_8xi32_zero_masked_shuff_mem_mask2(<8 x i32> %vec1, <8 x i32>* %vec2p, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xi32_zero_masked_shuff_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; GENERIC-NEXT: vshufi32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],mem[0,1,2,3] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi32_zero_masked_shuff_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],mem[0,1,2,3] sched: [10:1.00]
@@ -11431,12 +11431,12 @@ define <8 x i32> @test_8xi32_zero_masked_shuff_mem_mask2(<8 x i32> %vec1, <8 x i
define <8 x i32> @test_8xi32_shuff_mem_mask3(<8 x i32> %vec1, <8 x i32>* %vec2p) {
; GENERIC-LABEL: test_8xi32_shuff_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi32_shuff_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] sched: [10:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec2 = load <8 x i32>, <8 x i32>* %vec2p
@@ -11445,7 +11445,7 @@ define <8 x i32> @test_8xi32_shuff_mem_mask3(<8 x i32> %vec1, <8 x i32>* %vec2p)
}
define <8 x i32> @test_8xi32_masked_shuff_mem_mask3(<8 x i32> %vec1, <8 x i32>* %vec2p, <8 x i32> %vec3, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xi32_masked_shuff_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; GENERIC-NEXT: vshufi32x4 {{.*#+}} ymm1 {%k1} = ymm0[4,5,6,7],mem[0,1,2,3] sched: [5:1.00]
@@ -11453,7 +11453,7 @@ define <8 x i32> @test_8xi32_masked_shuff_mem_mask3(<8 x i32> %vec1, <8 x i32>*
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi32_masked_shuff_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi32x4 {{.*#+}} ymm1 {%k1} = ymm0[4,5,6,7],mem[0,1,2,3] sched: [10:1.00]
@@ -11468,14 +11468,14 @@ define <8 x i32> @test_8xi32_masked_shuff_mem_mask3(<8 x i32> %vec1, <8 x i32>*
define <8 x i32> @test_8xi32_zero_masked_shuff_mem_mask3(<8 x i32> %vec1, <8 x i32>* %vec2p, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xi32_zero_masked_shuff_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; GENERIC-NEXT: vshufi32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],mem[0,1,2,3] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi32_zero_masked_shuff_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],mem[0,1,2,3] sched: [10:1.00]
@@ -11489,12 +11489,12 @@ define <8 x i32> @test_8xi32_zero_masked_shuff_mem_mask3(<8 x i32> %vec1, <8 x i
define <16 x i32> @test_16xi32_shuff_mask0(<16 x i32> %vec1, <16 x i32> %vec2) {
; GENERIC-LABEL: test_16xi32_shuff_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[2,3,2,3],zmm1[2,3,6,7] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xi32_shuff_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[2,3,2,3],zmm1[2,3,6,7] sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <16 x i32> %vec1, <16 x i32> %vec2, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 20, i32 21, i32 22, i32 23, i32 28, i32 29, i32 30, i32 31>
@@ -11502,7 +11502,7 @@ define <16 x i32> @test_16xi32_shuff_mask0(<16 x i32> %vec1, <16 x i32> %vec2) {
}
define <16 x i32> @test_16xi32_masked_shuff_mask0(<16 x i32> %vec1, <16 x i32> %vec2, <16 x i32> %vec3, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xi32_masked_shuff_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm4, %zmm3, %k1
; GENERIC-NEXT: vshufi32x4 {{.*#+}} zmm2 {%k1} = zmm0[4,5,6,7,4,5,6,7],zmm1[4,5,6,7,12,13,14,15] sched: [1:1.00]
@@ -11510,7 +11510,7 @@ define <16 x i32> @test_16xi32_masked_shuff_mask0(<16 x i32> %vec1, <16 x i32> %
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xi32_masked_shuff_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm4, %zmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi32x4 {{.*#+}} zmm2 {%k1} = zmm0[4,5,6,7,4,5,6,7],zmm1[4,5,6,7,12,13,14,15] sched: [3:1.00]
@@ -11524,14 +11524,14 @@ define <16 x i32> @test_16xi32_masked_shuff_mask0(<16 x i32> %vec1, <16 x i32> %
define <16 x i32> @test_16xi32_zero_masked_shuff_mask0(<16 x i32> %vec1, <16 x i32> %vec2, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xi32_zero_masked_shuff_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; GENERIC-NEXT: vshufi32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[4,5,6,7,4,5,6,7],zmm1[4,5,6,7,12,13,14,15] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xi32_zero_masked_shuff_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[4,5,6,7,4,5,6,7],zmm1[4,5,6,7,12,13,14,15] sched: [3:1.00]
@@ -11543,7 +11543,7 @@ define <16 x i32> @test_16xi32_zero_masked_shuff_mask0(<16 x i32> %vec1, <16 x i
}
define <16 x i32> @test_16xi32_masked_shuff_mask1(<16 x i32> %vec1, <16 x i32> %vec2, <16 x i32> %vec3, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xi32_masked_shuff_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm4, %zmm3, %k1
; GENERIC-NEXT: vshufi32x4 {{.*#+}} zmm2 {%k1} = zmm0[8,9,10,11,8,9,10,11],zmm1[8,9,10,11,4,5,6,7] sched: [1:1.00]
@@ -11551,7 +11551,7 @@ define <16 x i32> @test_16xi32_masked_shuff_mask1(<16 x i32> %vec1, <16 x i32> %
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xi32_masked_shuff_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm4, %zmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi32x4 {{.*#+}} zmm2 {%k1} = zmm0[8,9,10,11,8,9,10,11],zmm1[8,9,10,11,4,5,6,7] sched: [3:1.00]
@@ -11565,14 +11565,14 @@ define <16 x i32> @test_16xi32_masked_shuff_mask1(<16 x i32> %vec1, <16 x i32> %
define <16 x i32> @test_16xi32_zero_masked_shuff_mask1(<16 x i32> %vec1, <16 x i32> %vec2, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xi32_zero_masked_shuff_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; GENERIC-NEXT: vshufi32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[8,9,10,11,8,9,10,11],zmm1[8,9,10,11,4,5,6,7] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xi32_zero_masked_shuff_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[8,9,10,11,8,9,10,11],zmm1[8,9,10,11,4,5,6,7] sched: [3:1.00]
@@ -11584,7 +11584,7 @@ define <16 x i32> @test_16xi32_zero_masked_shuff_mask1(<16 x i32> %vec1, <16 x i
}
define <16 x i32> @test_16xi32_masked_shuff_mask2(<16 x i32> %vec1, <16 x i32> %vec2, <16 x i32> %vec3, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xi32_masked_shuff_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm4, %zmm3, %k1
; GENERIC-NEXT: vshufi32x4 {{.*#+}} zmm2 {%k1} = zmm0[4,5,6,7,8,9,10,11],zmm1[0,1,2,3,0,1,2,3] sched: [1:1.00]
@@ -11592,7 +11592,7 @@ define <16 x i32> @test_16xi32_masked_shuff_mask2(<16 x i32> %vec1, <16 x i32> %
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xi32_masked_shuff_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm4, %zmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi32x4 {{.*#+}} zmm2 {%k1} = zmm0[4,5,6,7,8,9,10,11],zmm1[0,1,2,3,0,1,2,3] sched: [3:1.00]
@@ -11606,14 +11606,14 @@ define <16 x i32> @test_16xi32_masked_shuff_mask2(<16 x i32> %vec1, <16 x i32> %
define <16 x i32> @test_16xi32_zero_masked_shuff_mask2(<16 x i32> %vec1, <16 x i32> %vec2, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xi32_zero_masked_shuff_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; GENERIC-NEXT: vshufi32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[4,5,6,7,8,9,10,11],zmm1[0,1,2,3,0,1,2,3] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xi32_zero_masked_shuff_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[4,5,6,7,8,9,10,11],zmm1[0,1,2,3,0,1,2,3] sched: [3:1.00]
@@ -11625,12 +11625,12 @@ define <16 x i32> @test_16xi32_zero_masked_shuff_mask2(<16 x i32> %vec1, <16 x i
}
define <16 x i32> @test_16xi32_shuff_mask3(<16 x i32> %vec1, <16 x i32> %vec2) {
; GENERIC-LABEL: test_16xi32_shuff_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[2,3,0,1],zmm1[4,5,2,3] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xi32_shuff_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[2,3,0,1],zmm1[4,5,2,3] sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <16 x i32> %vec1, <16 x i32> %vec2, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 24, i32 25, i32 26, i32 27, i32 20, i32 21, i32 22, i32 23>
@@ -11638,7 +11638,7 @@ define <16 x i32> @test_16xi32_shuff_mask3(<16 x i32> %vec1, <16 x i32> %vec2) {
}
define <16 x i32> @test_16xi32_masked_shuff_mask3(<16 x i32> %vec1, <16 x i32> %vec2, <16 x i32> %vec3, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xi32_masked_shuff_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm4, %zmm3, %k1
; GENERIC-NEXT: vshufi32x4 {{.*#+}} zmm2 {%k1} = zmm0[4,5,6,7,0,1,2,3],zmm1[8,9,10,11,4,5,6,7] sched: [1:1.00]
@@ -11646,7 +11646,7 @@ define <16 x i32> @test_16xi32_masked_shuff_mask3(<16 x i32> %vec1, <16 x i32> %
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xi32_masked_shuff_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm4, %zmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi32x4 {{.*#+}} zmm2 {%k1} = zmm0[4,5,6,7,0,1,2,3],zmm1[8,9,10,11,4,5,6,7] sched: [3:1.00]
@@ -11660,14 +11660,14 @@ define <16 x i32> @test_16xi32_masked_shuff_mask3(<16 x i32> %vec1, <16 x i32> %
define <16 x i32> @test_16xi32_zero_masked_shuff_mask3(<16 x i32> %vec1, <16 x i32> %vec2, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xi32_zero_masked_shuff_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; GENERIC-NEXT: vshufi32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[4,5,6,7,0,1,2,3],zmm1[8,9,10,11,4,5,6,7] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xi32_zero_masked_shuff_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[4,5,6,7,0,1,2,3],zmm1[8,9,10,11,4,5,6,7] sched: [3:1.00]
@@ -11679,12 +11679,12 @@ define <16 x i32> @test_16xi32_zero_masked_shuff_mask3(<16 x i32> %vec1, <16 x i
}
define <16 x i32> @test_16xi32_shuff_mem_mask0(<16 x i32> %vec1, <16 x i32>* %vec2p) {
; GENERIC-LABEL: test_16xi32_shuff_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[4,5,2,3],mem[4,5,0,1] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xi32_shuff_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[4,5,2,3],mem[4,5,0,1] sched: [10:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec2 = load <16 x i32>, <16 x i32>* %vec2p
@@ -11693,7 +11693,7 @@ define <16 x i32> @test_16xi32_shuff_mem_mask0(<16 x i32> %vec1, <16 x i32>* %ve
}
define <16 x i32> @test_16xi32_masked_shuff_mem_mask0(<16 x i32> %vec1, <16 x i32>* %vec2p, <16 x i32> %vec3, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xi32_masked_shuff_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; GENERIC-NEXT: vshufi32x4 {{.*#+}} zmm1 {%k1} = zmm0[8,9,10,11,4,5,6,7],mem[8,9,10,11,0,1,2,3] sched: [5:1.00]
@@ -11701,7 +11701,7 @@ define <16 x i32> @test_16xi32_masked_shuff_mem_mask0(<16 x i32> %vec1, <16 x i3
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xi32_masked_shuff_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi32x4 {{.*#+}} zmm1 {%k1} = zmm0[8,9,10,11,4,5,6,7],mem[8,9,10,11,0,1,2,3] sched: [10:1.00]
@@ -11716,14 +11716,14 @@ define <16 x i32> @test_16xi32_masked_shuff_mem_mask0(<16 x i32> %vec1, <16 x i3
define <16 x i32> @test_16xi32_zero_masked_shuff_mem_mask0(<16 x i32> %vec1, <16 x i32>* %vec2p, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xi32_zero_masked_shuff_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; GENERIC-NEXT: vshufi32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[8,9,10,11,4,5,6,7],mem[8,9,10,11,0,1,2,3] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xi32_zero_masked_shuff_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[8,9,10,11,4,5,6,7],mem[8,9,10,11,0,1,2,3] sched: [10:1.00]
@@ -11737,7 +11737,7 @@ define <16 x i32> @test_16xi32_zero_masked_shuff_mem_mask0(<16 x i32> %vec1, <16
define <16 x i32> @test_16xi32_masked_shuff_mem_mask1(<16 x i32> %vec1, <16 x i32>* %vec2p, <16 x i32> %vec3, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xi32_masked_shuff_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; GENERIC-NEXT: vshufi32x4 {{.*#+}} zmm1 {%k1} = zmm0[4,5,6,7,4,5,6,7],mem[0,1,2,3,8,9,10,11] sched: [5:1.00]
@@ -11745,7 +11745,7 @@ define <16 x i32> @test_16xi32_masked_shuff_mem_mask1(<16 x i32> %vec1, <16 x i3
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xi32_masked_shuff_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi32x4 {{.*#+}} zmm1 {%k1} = zmm0[4,5,6,7,4,5,6,7],mem[0,1,2,3,8,9,10,11] sched: [10:1.00]
@@ -11760,14 +11760,14 @@ define <16 x i32> @test_16xi32_masked_shuff_mem_mask1(<16 x i32> %vec1, <16 x i3
define <16 x i32> @test_16xi32_zero_masked_shuff_mem_mask1(<16 x i32> %vec1, <16 x i32>* %vec2p, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xi32_zero_masked_shuff_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; GENERIC-NEXT: vshufi32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[4,5,6,7,4,5,6,7],mem[0,1,2,3,8,9,10,11] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xi32_zero_masked_shuff_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[4,5,6,7,4,5,6,7],mem[0,1,2,3,8,9,10,11] sched: [10:1.00]
@@ -11781,7 +11781,7 @@ define <16 x i32> @test_16xi32_zero_masked_shuff_mem_mask1(<16 x i32> %vec1, <16
define <16 x i32> @test_16xi32_masked_shuff_mem_mask2(<16 x i32> %vec1, <16 x i32>* %vec2p, <16 x i32> %vec3, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xi32_masked_shuff_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; GENERIC-NEXT: vshufi32x4 {{.*#+}} zmm1 {%k1} = zmm0[4,5,6,7,8,9,10,11],mem[12,13,14,15,12,13,14,15] sched: [5:1.00]
@@ -11789,7 +11789,7 @@ define <16 x i32> @test_16xi32_masked_shuff_mem_mask2(<16 x i32> %vec1, <16 x i3
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xi32_masked_shuff_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi32x4 {{.*#+}} zmm1 {%k1} = zmm0[4,5,6,7,8,9,10,11],mem[12,13,14,15,12,13,14,15] sched: [10:1.00]
@@ -11804,14 +11804,14 @@ define <16 x i32> @test_16xi32_masked_shuff_mem_mask2(<16 x i32> %vec1, <16 x i3
define <16 x i32> @test_16xi32_zero_masked_shuff_mem_mask2(<16 x i32> %vec1, <16 x i32>* %vec2p, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xi32_zero_masked_shuff_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; GENERIC-NEXT: vshufi32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[4,5,6,7,8,9,10,11],mem[12,13,14,15,12,13,14,15] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xi32_zero_masked_shuff_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[4,5,6,7,8,9,10,11],mem[12,13,14,15,12,13,14,15] sched: [10:1.00]
@@ -11825,12 +11825,12 @@ define <16 x i32> @test_16xi32_zero_masked_shuff_mem_mask2(<16 x i32> %vec1, <16
define <16 x i32> @test_16xi32_shuff_mem_mask3(<16 x i32> %vec1, <16 x i32>* %vec2p) {
; GENERIC-LABEL: test_16xi32_shuff_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[2,3,2,3],mem[2,3,6,7] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xi32_shuff_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[2,3,2,3],mem[2,3,6,7] sched: [10:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec2 = load <16 x i32>, <16 x i32>* %vec2p
@@ -11839,7 +11839,7 @@ define <16 x i32> @test_16xi32_shuff_mem_mask3(<16 x i32> %vec1, <16 x i32>* %ve
}
define <16 x i32> @test_16xi32_masked_shuff_mem_mask3(<16 x i32> %vec1, <16 x i32>* %vec2p, <16 x i32> %vec3, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xi32_masked_shuff_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; GENERIC-NEXT: vshufi32x4 {{.*#+}} zmm1 {%k1} = zmm0[4,5,6,7,4,5,6,7],mem[4,5,6,7,12,13,14,15] sched: [5:1.00]
@@ -11847,7 +11847,7 @@ define <16 x i32> @test_16xi32_masked_shuff_mem_mask3(<16 x i32> %vec1, <16 x i3
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xi32_masked_shuff_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi32x4 {{.*#+}} zmm1 {%k1} = zmm0[4,5,6,7,4,5,6,7],mem[4,5,6,7,12,13,14,15] sched: [10:1.00]
@@ -11862,14 +11862,14 @@ define <16 x i32> @test_16xi32_masked_shuff_mem_mask3(<16 x i32> %vec1, <16 x i3
define <16 x i32> @test_16xi32_zero_masked_shuff_mem_mask3(<16 x i32> %vec1, <16 x i32>* %vec2p, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xi32_zero_masked_shuff_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; GENERIC-NEXT: vshufi32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[4,5,6,7,4,5,6,7],mem[4,5,6,7,12,13,14,15] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xi32_zero_masked_shuff_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[4,5,6,7,4,5,6,7],mem[4,5,6,7,12,13,14,15] sched: [10:1.00]
@@ -11883,12 +11883,12 @@ define <16 x i32> @test_16xi32_zero_masked_shuff_mem_mask3(<16 x i32> %vec1, <16
define <4 x i64> @test_4xi64_shuff_mask0(<4 x i64> %vec1, <4 x i64> %vec2) {
; GENERIC-LABEL: test_4xi64_shuff_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xi64_shuff_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1] sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <4 x i64> %vec1, <4 x i64> %vec2, <4 x i32> <i32 2, i32 3, i32 4, i32 5>
@@ -11896,7 +11896,7 @@ define <4 x i64> @test_4xi64_shuff_mask0(<4 x i64> %vec1, <4 x i64> %vec2) {
}
define <4 x i64> @test_4xi64_masked_shuff_mask0(<4 x i64> %vec1, <4 x i64> %vec2, <4 x i64> %vec3, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xi64_masked_shuff_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm4, %ymm3, %k1
; GENERIC-NEXT: vshufi64x2 {{.*#+}} ymm2 {%k1} = ymm0[2,3],ymm1[0,1] sched: [1:1.00]
@@ -11904,7 +11904,7 @@ define <4 x i64> @test_4xi64_masked_shuff_mask0(<4 x i64> %vec1, <4 x i64> %vec2
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xi64_masked_shuff_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm4, %ymm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi64x2 {{.*#+}} ymm2 {%k1} = ymm0[2,3],ymm1[0,1] sched: [3:1.00]
@@ -11918,14 +11918,14 @@ define <4 x i64> @test_4xi64_masked_shuff_mask0(<4 x i64> %vec1, <4 x i64> %vec2
define <4 x i64> @test_4xi64_zero_masked_shuff_mask0(<4 x i64> %vec1, <4 x i64> %vec2, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xi64_zero_masked_shuff_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; GENERIC-NEXT: vshufi64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],ymm1[0,1] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xi64_zero_masked_shuff_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],ymm1[0,1] sched: [3:1.00]
@@ -11937,7 +11937,7 @@ define <4 x i64> @test_4xi64_zero_masked_shuff_mask0(<4 x i64> %vec1, <4 x i64>
}
define <4 x i64> @test_4xi64_masked_shuff_mask1(<4 x i64> %vec1, <4 x i64> %vec2, <4 x i64> %vec3, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xi64_masked_shuff_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm4, %ymm3, %k1
; GENERIC-NEXT: vshufi64x2 {{.*#+}} ymm2 {%k1} = ymm0[2,3],ymm1[2,3] sched: [1:1.00]
@@ -11945,7 +11945,7 @@ define <4 x i64> @test_4xi64_masked_shuff_mask1(<4 x i64> %vec1, <4 x i64> %vec2
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xi64_masked_shuff_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm4, %ymm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi64x2 {{.*#+}} ymm2 {%k1} = ymm0[2,3],ymm1[2,3] sched: [3:1.00]
@@ -11959,14 +11959,14 @@ define <4 x i64> @test_4xi64_masked_shuff_mask1(<4 x i64> %vec1, <4 x i64> %vec2
define <4 x i64> @test_4xi64_zero_masked_shuff_mask1(<4 x i64> %vec1, <4 x i64> %vec2, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xi64_zero_masked_shuff_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; GENERIC-NEXT: vshufi64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],ymm1[2,3] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xi64_zero_masked_shuff_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],ymm1[2,3] sched: [3:1.00]
@@ -11978,7 +11978,7 @@ define <4 x i64> @test_4xi64_zero_masked_shuff_mask1(<4 x i64> %vec1, <4 x i64>
}
define <4 x i64> @test_4xi64_masked_shuff_mask2(<4 x i64> %vec1, <4 x i64> %vec2, <4 x i64> %vec3, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xi64_masked_shuff_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm4, %ymm3, %k1
; GENERIC-NEXT: vshufi64x2 {{.*#+}} ymm2 {%k1} = ymm0[2,3],ymm1[0,1] sched: [1:1.00]
@@ -11986,7 +11986,7 @@ define <4 x i64> @test_4xi64_masked_shuff_mask2(<4 x i64> %vec1, <4 x i64> %vec2
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xi64_masked_shuff_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm4, %ymm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi64x2 {{.*#+}} ymm2 {%k1} = ymm0[2,3],ymm1[0,1] sched: [3:1.00]
@@ -12000,14 +12000,14 @@ define <4 x i64> @test_4xi64_masked_shuff_mask2(<4 x i64> %vec1, <4 x i64> %vec2
define <4 x i64> @test_4xi64_zero_masked_shuff_mask2(<4 x i64> %vec1, <4 x i64> %vec2, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xi64_zero_masked_shuff_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; GENERIC-NEXT: vshufi64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],ymm1[0,1] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xi64_zero_masked_shuff_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],ymm1[0,1] sched: [3:1.00]
@@ -12019,12 +12019,12 @@ define <4 x i64> @test_4xi64_zero_masked_shuff_mask2(<4 x i64> %vec1, <4 x i64>
}
define <4 x i64> @test_4xi64_shuff_mask3(<4 x i64> %vec1, <4 x i64> %vec2) {
; GENERIC-LABEL: test_4xi64_shuff_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xi64_shuff_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <4 x i64> %vec1, <4 x i64> %vec2, <4 x i32> <i32 2, i32 3, i32 6, i32 7>
@@ -12032,7 +12032,7 @@ define <4 x i64> @test_4xi64_shuff_mask3(<4 x i64> %vec1, <4 x i64> %vec2) {
}
define <4 x i64> @test_4xi64_masked_shuff_mask3(<4 x i64> %vec1, <4 x i64> %vec2, <4 x i64> %vec3, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xi64_masked_shuff_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm4, %ymm3, %k1
; GENERIC-NEXT: vshufi64x2 {{.*#+}} ymm2 {%k1} = ymm0[2,3],ymm1[2,3] sched: [1:1.00]
@@ -12040,7 +12040,7 @@ define <4 x i64> @test_4xi64_masked_shuff_mask3(<4 x i64> %vec1, <4 x i64> %vec2
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xi64_masked_shuff_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm4, %ymm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi64x2 {{.*#+}} ymm2 {%k1} = ymm0[2,3],ymm1[2,3] sched: [3:1.00]
@@ -12054,14 +12054,14 @@ define <4 x i64> @test_4xi64_masked_shuff_mask3(<4 x i64> %vec1, <4 x i64> %vec2
define <4 x i64> @test_4xi64_zero_masked_shuff_mask3(<4 x i64> %vec1, <4 x i64> %vec2, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xi64_zero_masked_shuff_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; GENERIC-NEXT: vshufi64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],ymm1[2,3] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xi64_zero_masked_shuff_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],ymm1[2,3] sched: [3:1.00]
@@ -12073,12 +12073,12 @@ define <4 x i64> @test_4xi64_zero_masked_shuff_mask3(<4 x i64> %vec1, <4 x i64>
}
define <4 x i64> @test_4xi64_shuff_mem_mask0(<4 x i64> %vec1, <4 x i64>* %vec2p) {
; GENERIC-LABEL: test_4xi64_shuff_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xi64_shuff_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] sched: [10:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec2 = load <4 x i64>, <4 x i64>* %vec2p
@@ -12087,7 +12087,7 @@ define <4 x i64> @test_4xi64_shuff_mem_mask0(<4 x i64> %vec1, <4 x i64>* %vec2p)
}
define <4 x i64> @test_4xi64_masked_shuff_mem_mask0(<4 x i64> %vec1, <4 x i64>* %vec2p, <4 x i64> %vec3, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xi64_masked_shuff_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; GENERIC-NEXT: vshufi64x2 {{.*#+}} ymm1 {%k1} = ymm0[2,3],mem[2,3] sched: [5:1.00]
@@ -12095,7 +12095,7 @@ define <4 x i64> @test_4xi64_masked_shuff_mem_mask0(<4 x i64> %vec1, <4 x i64>*
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xi64_masked_shuff_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi64x2 {{.*#+}} ymm1 {%k1} = ymm0[2,3],mem[2,3] sched: [10:1.00]
@@ -12110,14 +12110,14 @@ define <4 x i64> @test_4xi64_masked_shuff_mem_mask0(<4 x i64> %vec1, <4 x i64>*
define <4 x i64> @test_4xi64_zero_masked_shuff_mem_mask0(<4 x i64> %vec1, <4 x i64>* %vec2p, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xi64_zero_masked_shuff_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; GENERIC-NEXT: vshufi64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],mem[2,3] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xi64_zero_masked_shuff_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],mem[2,3] sched: [10:1.00]
@@ -12131,7 +12131,7 @@ define <4 x i64> @test_4xi64_zero_masked_shuff_mem_mask0(<4 x i64> %vec1, <4 x i
define <4 x i64> @test_4xi64_masked_shuff_mem_mask1(<4 x i64> %vec1, <4 x i64>* %vec2p, <4 x i64> %vec3, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xi64_masked_shuff_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; GENERIC-NEXT: vshufi64x2 {{.*#+}} ymm1 {%k1} = ymm0[2,3],mem[0,1] sched: [5:1.00]
@@ -12139,7 +12139,7 @@ define <4 x i64> @test_4xi64_masked_shuff_mem_mask1(<4 x i64> %vec1, <4 x i64>*
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xi64_masked_shuff_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi64x2 {{.*#+}} ymm1 {%k1} = ymm0[2,3],mem[0,1] sched: [10:1.00]
@@ -12154,14 +12154,14 @@ define <4 x i64> @test_4xi64_masked_shuff_mem_mask1(<4 x i64> %vec1, <4 x i64>*
define <4 x i64> @test_4xi64_zero_masked_shuff_mem_mask1(<4 x i64> %vec1, <4 x i64>* %vec2p, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xi64_zero_masked_shuff_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; GENERIC-NEXT: vshufi64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],mem[0,1] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xi64_zero_masked_shuff_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],mem[0,1] sched: [10:1.00]
@@ -12175,7 +12175,7 @@ define <4 x i64> @test_4xi64_zero_masked_shuff_mem_mask1(<4 x i64> %vec1, <4 x i
define <4 x i64> @test_4xi64_masked_shuff_mem_mask2(<4 x i64> %vec1, <4 x i64>* %vec2p, <4 x i64> %vec3, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xi64_masked_shuff_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; GENERIC-NEXT: vshufi64x2 {{.*#+}} ymm1 {%k1} = ymm0[2,3],mem[0,1] sched: [5:1.00]
@@ -12183,7 +12183,7 @@ define <4 x i64> @test_4xi64_masked_shuff_mem_mask2(<4 x i64> %vec1, <4 x i64>*
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xi64_masked_shuff_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi64x2 {{.*#+}} ymm1 {%k1} = ymm0[2,3],mem[0,1] sched: [10:1.00]
@@ -12198,14 +12198,14 @@ define <4 x i64> @test_4xi64_masked_shuff_mem_mask2(<4 x i64> %vec1, <4 x i64>*
define <4 x i64> @test_4xi64_zero_masked_shuff_mem_mask2(<4 x i64> %vec1, <4 x i64>* %vec2p, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xi64_zero_masked_shuff_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; GENERIC-NEXT: vshufi64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],mem[0,1] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xi64_zero_masked_shuff_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],mem[0,1] sched: [10:1.00]
@@ -12219,12 +12219,12 @@ define <4 x i64> @test_4xi64_zero_masked_shuff_mem_mask2(<4 x i64> %vec1, <4 x i
define <4 x i64> @test_4xi64_shuff_mem_mask3(<4 x i64> %vec1, <4 x i64>* %vec2p) {
; GENERIC-LABEL: test_4xi64_shuff_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xi64_shuff_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] sched: [10:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec2 = load <4 x i64>, <4 x i64>* %vec2p
@@ -12233,7 +12233,7 @@ define <4 x i64> @test_4xi64_shuff_mem_mask3(<4 x i64> %vec1, <4 x i64>* %vec2p)
}
define <4 x i64> @test_4xi64_masked_shuff_mem_mask3(<4 x i64> %vec1, <4 x i64>* %vec2p, <4 x i64> %vec3, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xi64_masked_shuff_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; GENERIC-NEXT: vshufi64x2 {{.*#+}} ymm1 {%k1} = ymm0[2,3],mem[2,3] sched: [5:1.00]
@@ -12241,7 +12241,7 @@ define <4 x i64> @test_4xi64_masked_shuff_mem_mask3(<4 x i64> %vec1, <4 x i64>*
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xi64_masked_shuff_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi64x2 {{.*#+}} ymm1 {%k1} = ymm0[2,3],mem[2,3] sched: [10:1.00]
@@ -12256,14 +12256,14 @@ define <4 x i64> @test_4xi64_masked_shuff_mem_mask3(<4 x i64> %vec1, <4 x i64>*
define <4 x i64> @test_4xi64_zero_masked_shuff_mem_mask3(<4 x i64> %vec1, <4 x i64>* %vec2p, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xi64_zero_masked_shuff_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; GENERIC-NEXT: vshufi64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],mem[2,3] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xi64_zero_masked_shuff_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],mem[2,3] sched: [10:1.00]
@@ -12277,12 +12277,12 @@ define <4 x i64> @test_4xi64_zero_masked_shuff_mem_mask3(<4 x i64> %vec1, <4 x i
define <8 x i64> @test_8xi64_shuff_mask0(<8 x i64> %vec1, <8 x i64> %vec2) {
; GENERIC-LABEL: test_8xi64_shuff_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[4,5,4,5],zmm1[4,5,4,5] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi64_shuff_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[4,5,4,5],zmm1[4,5,4,5] sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <8 x i64> %vec1, <8 x i64> %vec2, <8 x i32> <i32 4, i32 5, i32 4, i32 5, i32 12, i32 13, i32 12, i32 13>
@@ -12290,7 +12290,7 @@ define <8 x i64> @test_8xi64_shuff_mask0(<8 x i64> %vec1, <8 x i64> %vec2) {
}
define <8 x i64> @test_8xi64_masked_shuff_mask0(<8 x i64> %vec1, <8 x i64> %vec2, <8 x i64> %vec3, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xi64_masked_shuff_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm4, %zmm3, %k1
; GENERIC-NEXT: vshufi64x2 {{.*#+}} zmm2 {%k1} = zmm0[4,5,4,5],zmm1[4,5,4,5] sched: [1:1.00]
@@ -12298,7 +12298,7 @@ define <8 x i64> @test_8xi64_masked_shuff_mask0(<8 x i64> %vec1, <8 x i64> %vec2
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi64_masked_shuff_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm4, %zmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi64x2 {{.*#+}} zmm2 {%k1} = zmm0[4,5,4,5],zmm1[4,5,4,5] sched: [3:1.00]
@@ -12312,14 +12312,14 @@ define <8 x i64> @test_8xi64_masked_shuff_mask0(<8 x i64> %vec1, <8 x i64> %vec2
define <8 x i64> @test_8xi64_zero_masked_shuff_mask0(<8 x i64> %vec1, <8 x i64> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xi64_zero_masked_shuff_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; GENERIC-NEXT: vshufi64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[4,5,4,5],zmm1[4,5,4,5] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi64_zero_masked_shuff_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[4,5,4,5],zmm1[4,5,4,5] sched: [3:1.00]
@@ -12331,7 +12331,7 @@ define <8 x i64> @test_8xi64_zero_masked_shuff_mask0(<8 x i64> %vec1, <8 x i64>
}
define <8 x i64> @test_8xi64_masked_shuff_mask1(<8 x i64> %vec1, <8 x i64> %vec2, <8 x i64> %vec3, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xi64_masked_shuff_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm4, %zmm3, %k1
; GENERIC-NEXT: vshufi64x2 {{.*#+}} zmm2 {%k1} = zmm0[6,7,4,5],zmm1[2,3,4,5] sched: [1:1.00]
@@ -12339,7 +12339,7 @@ define <8 x i64> @test_8xi64_masked_shuff_mask1(<8 x i64> %vec1, <8 x i64> %vec2
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi64_masked_shuff_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm4, %zmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi64x2 {{.*#+}} zmm2 {%k1} = zmm0[6,7,4,5],zmm1[2,3,4,5] sched: [3:1.00]
@@ -12353,14 +12353,14 @@ define <8 x i64> @test_8xi64_masked_shuff_mask1(<8 x i64> %vec1, <8 x i64> %vec2
define <8 x i64> @test_8xi64_zero_masked_shuff_mask1(<8 x i64> %vec1, <8 x i64> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xi64_zero_masked_shuff_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; GENERIC-NEXT: vshufi64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[6,7,4,5],zmm1[2,3,4,5] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi64_zero_masked_shuff_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[6,7,4,5],zmm1[2,3,4,5] sched: [3:1.00]
@@ -12372,7 +12372,7 @@ define <8 x i64> @test_8xi64_zero_masked_shuff_mask1(<8 x i64> %vec1, <8 x i64>
}
define <8 x i64> @test_8xi64_masked_shuff_mask2(<8 x i64> %vec1, <8 x i64> %vec2, <8 x i64> %vec3, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xi64_masked_shuff_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm4, %zmm3, %k1
; GENERIC-NEXT: vshufi64x2 {{.*#+}} zmm2 {%k1} = zmm0[0,1,4,5],zmm1[0,1,0,1] sched: [1:1.00]
@@ -12380,7 +12380,7 @@ define <8 x i64> @test_8xi64_masked_shuff_mask2(<8 x i64> %vec1, <8 x i64> %vec2
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi64_masked_shuff_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm4, %zmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi64x2 {{.*#+}} zmm2 {%k1} = zmm0[0,1,4,5],zmm1[0,1,0,1] sched: [3:1.00]
@@ -12394,14 +12394,14 @@ define <8 x i64> @test_8xi64_masked_shuff_mask2(<8 x i64> %vec1, <8 x i64> %vec2
define <8 x i64> @test_8xi64_zero_masked_shuff_mask2(<8 x i64> %vec1, <8 x i64> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xi64_zero_masked_shuff_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; GENERIC-NEXT: vshufi64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,4,5],zmm1[0,1,0,1] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi64_zero_masked_shuff_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,4,5],zmm1[0,1,0,1] sched: [3:1.00]
@@ -12413,12 +12413,12 @@ define <8 x i64> @test_8xi64_zero_masked_shuff_mask2(<8 x i64> %vec1, <8 x i64>
}
define <8 x i64> @test_8xi64_shuff_mask3(<8 x i64> %vec1, <8 x i64> %vec2) {
; GENERIC-LABEL: test_8xi64_shuff_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[2,3,6,7],zmm1[4,5,2,3] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi64_shuff_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[2,3,6,7],zmm1[4,5,2,3] sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <8 x i64> %vec1, <8 x i64> %vec2, <8 x i32> <i32 2, i32 3, i32 6, i32 7, i32 12, i32 13, i32 10, i32 11>
@@ -12426,7 +12426,7 @@ define <8 x i64> @test_8xi64_shuff_mask3(<8 x i64> %vec1, <8 x i64> %vec2) {
}
define <8 x i64> @test_8xi64_masked_shuff_mask3(<8 x i64> %vec1, <8 x i64> %vec2, <8 x i64> %vec3, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xi64_masked_shuff_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm4, %zmm3, %k1
; GENERIC-NEXT: vshufi64x2 {{.*#+}} zmm2 {%k1} = zmm0[2,3,6,7],zmm1[4,5,2,3] sched: [1:1.00]
@@ -12434,7 +12434,7 @@ define <8 x i64> @test_8xi64_masked_shuff_mask3(<8 x i64> %vec1, <8 x i64> %vec2
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi64_masked_shuff_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm4, %zmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi64x2 {{.*#+}} zmm2 {%k1} = zmm0[2,3,6,7],zmm1[4,5,2,3] sched: [3:1.00]
@@ -12448,14 +12448,14 @@ define <8 x i64> @test_8xi64_masked_shuff_mask3(<8 x i64> %vec1, <8 x i64> %vec2
define <8 x i64> @test_8xi64_zero_masked_shuff_mask3(<8 x i64> %vec1, <8 x i64> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xi64_zero_masked_shuff_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; GENERIC-NEXT: vshufi64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[2,3,6,7],zmm1[4,5,2,3] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi64_zero_masked_shuff_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[2,3,6,7],zmm1[4,5,2,3] sched: [3:1.00]
@@ -12467,12 +12467,12 @@ define <8 x i64> @test_8xi64_zero_masked_shuff_mask3(<8 x i64> %vec1, <8 x i64>
}
define <8 x i64> @test_8xi64_shuff_mem_mask0(<8 x i64> %vec1, <8 x i64>* %vec2p) {
; GENERIC-LABEL: test_8xi64_shuff_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[2,3,2,3],mem[4,5,2,3] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi64_shuff_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[2,3,2,3],mem[4,5,2,3] sched: [10:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec2 = load <8 x i64>, <8 x i64>* %vec2p
@@ -12481,7 +12481,7 @@ define <8 x i64> @test_8xi64_shuff_mem_mask0(<8 x i64> %vec1, <8 x i64>* %vec2p)
}
define <8 x i64> @test_8xi64_masked_shuff_mem_mask0(<8 x i64> %vec1, <8 x i64>* %vec2p, <8 x i64> %vec3, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xi64_masked_shuff_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; GENERIC-NEXT: vshufi64x2 {{.*#+}} zmm1 {%k1} = zmm0[2,3,2,3],mem[4,5,2,3] sched: [5:1.00]
@@ -12489,7 +12489,7 @@ define <8 x i64> @test_8xi64_masked_shuff_mem_mask0(<8 x i64> %vec1, <8 x i64>*
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi64_masked_shuff_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi64x2 {{.*#+}} zmm1 {%k1} = zmm0[2,3,2,3],mem[4,5,2,3] sched: [10:1.00]
@@ -12504,14 +12504,14 @@ define <8 x i64> @test_8xi64_masked_shuff_mem_mask0(<8 x i64> %vec1, <8 x i64>*
define <8 x i64> @test_8xi64_zero_masked_shuff_mem_mask0(<8 x i64> %vec1, <8 x i64>* %vec2p, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xi64_zero_masked_shuff_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; GENERIC-NEXT: vshufi64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[2,3,2,3],mem[4,5,2,3] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi64_zero_masked_shuff_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[2,3,2,3],mem[4,5,2,3] sched: [10:1.00]
@@ -12525,7 +12525,7 @@ define <8 x i64> @test_8xi64_zero_masked_shuff_mem_mask0(<8 x i64> %vec1, <8 x i
define <8 x i64> @test_8xi64_masked_shuff_mem_mask1(<8 x i64> %vec1, <8 x i64>* %vec2p, <8 x i64> %vec3, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xi64_masked_shuff_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; GENERIC-NEXT: vshufi64x2 {{.*#+}} zmm1 {%k1} = zmm0[2,3,0,1],mem[0,1,0,1] sched: [5:1.00]
@@ -12533,7 +12533,7 @@ define <8 x i64> @test_8xi64_masked_shuff_mem_mask1(<8 x i64> %vec1, <8 x i64>*
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi64_masked_shuff_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi64x2 {{.*#+}} zmm1 {%k1} = zmm0[2,3,0,1],mem[0,1,0,1] sched: [10:1.00]
@@ -12548,14 +12548,14 @@ define <8 x i64> @test_8xi64_masked_shuff_mem_mask1(<8 x i64> %vec1, <8 x i64>*
define <8 x i64> @test_8xi64_zero_masked_shuff_mem_mask1(<8 x i64> %vec1, <8 x i64>* %vec2p, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xi64_zero_masked_shuff_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; GENERIC-NEXT: vshufi64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[2,3,0,1],mem[0,1,0,1] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi64_zero_masked_shuff_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[2,3,0,1],mem[0,1,0,1] sched: [10:1.00]
@@ -12569,7 +12569,7 @@ define <8 x i64> @test_8xi64_zero_masked_shuff_mem_mask1(<8 x i64> %vec1, <8 x i
define <8 x i64> @test_8xi64_masked_shuff_mem_mask2(<8 x i64> %vec1, <8 x i64>* %vec2p, <8 x i64> %vec3, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xi64_masked_shuff_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; GENERIC-NEXT: vshufi64x2 {{.*#+}} zmm1 {%k1} = zmm0[4,5,0,1],mem[2,3,2,3] sched: [5:1.00]
@@ -12577,7 +12577,7 @@ define <8 x i64> @test_8xi64_masked_shuff_mem_mask2(<8 x i64> %vec1, <8 x i64>*
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi64_masked_shuff_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi64x2 {{.*#+}} zmm1 {%k1} = zmm0[4,5,0,1],mem[2,3,2,3] sched: [10:1.00]
@@ -12592,14 +12592,14 @@ define <8 x i64> @test_8xi64_masked_shuff_mem_mask2(<8 x i64> %vec1, <8 x i64>*
define <8 x i64> @test_8xi64_zero_masked_shuff_mem_mask2(<8 x i64> %vec1, <8 x i64>* %vec2p, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xi64_zero_masked_shuff_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; GENERIC-NEXT: vshufi64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[4,5,0,1],mem[2,3,2,3] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi64_zero_masked_shuff_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[4,5,0,1],mem[2,3,2,3] sched: [10:1.00]
@@ -12613,12 +12613,12 @@ define <8 x i64> @test_8xi64_zero_masked_shuff_mem_mask2(<8 x i64> %vec1, <8 x i
define <8 x i64> @test_8xi64_shuff_mem_mask3(<8 x i64> %vec1, <8 x i64>* %vec2p) {
; GENERIC-LABEL: test_8xi64_shuff_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[2,3,0,1],mem[6,7,2,3] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi64_shuff_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[2,3,0,1],mem[6,7,2,3] sched: [10:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec2 = load <8 x i64>, <8 x i64>* %vec2p
@@ -12627,7 +12627,7 @@ define <8 x i64> @test_8xi64_shuff_mem_mask3(<8 x i64> %vec1, <8 x i64>* %vec2p)
}
define <8 x i64> @test_8xi64_masked_shuff_mem_mask3(<8 x i64> %vec1, <8 x i64>* %vec2p, <8 x i64> %vec3, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xi64_masked_shuff_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; GENERIC-NEXT: vshufi64x2 {{.*#+}} zmm1 {%k1} = zmm0[2,3,0,1],mem[6,7,2,3] sched: [5:1.00]
@@ -12635,7 +12635,7 @@ define <8 x i64> @test_8xi64_masked_shuff_mem_mask3(<8 x i64> %vec1, <8 x i64>*
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi64_masked_shuff_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi64x2 {{.*#+}} zmm1 {%k1} = zmm0[2,3,0,1],mem[6,7,2,3] sched: [10:1.00]
@@ -12650,14 +12650,14 @@ define <8 x i64> @test_8xi64_masked_shuff_mem_mask3(<8 x i64> %vec1, <8 x i64>*
define <8 x i64> @test_8xi64_zero_masked_shuff_mem_mask3(<8 x i64> %vec1, <8 x i64>* %vec2p, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xi64_zero_masked_shuff_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; GENERIC-NEXT: vshufi64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[2,3,0,1],mem[6,7,2,3] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xi64_zero_masked_shuff_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vshufi64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[2,3,0,1],mem[6,7,2,3] sched: [10:1.00]
@@ -12671,12 +12671,12 @@ define <8 x i64> @test_8xi64_zero_masked_shuff_mem_mask3(<8 x i64> %vec1, <8 x i
define <4 x float> @test_4xfloat_unpack_low_mask0(<4 x float> %vec1, <4 x float> %vec2) {
; GENERIC-LABEL: test_4xfloat_unpack_low_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xfloat_unpack_low_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
@@ -12684,7 +12684,7 @@ define <4 x float> @test_4xfloat_unpack_low_mask0(<4 x float> %vec1, <4 x float>
}
define <4 x float> @test_4xfloat_masked_unpack_low_mask0(<4 x float> %vec1, <4 x float> %vec2, <4 x float> %vec3, <4 x i32> %mask) {
; GENERIC-LABEL: test_4xfloat_masked_unpack_low_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm4, %xmm3, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} xmm2 {%k1} = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [3:1.00]
@@ -12692,7 +12692,7 @@ define <4 x float> @test_4xfloat_masked_unpack_low_mask0(<4 x float> %vec1, <4 x
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xfloat_masked_unpack_low_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm4, %xmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} xmm2 {%k1} = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:1.00]
@@ -12706,14 +12706,14 @@ define <4 x float> @test_4xfloat_masked_unpack_low_mask0(<4 x float> %vec1, <4 x
define <4 x float> @test_4xfloat_zero_masked_unpack_low_mask0(<4 x float> %vec1, <4 x float> %vec2, <4 x i32> %mask) {
; GENERIC-LABEL: test_4xfloat_zero_masked_unpack_low_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm3, %xmm2, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} xmm0 {%k1} {z} = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xfloat_zero_masked_unpack_low_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm3, %xmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} xmm0 {%k1} {z} = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:1.00]
@@ -12725,7 +12725,7 @@ define <4 x float> @test_4xfloat_zero_masked_unpack_low_mask0(<4 x float> %vec1,
}
define <4 x float> @test_4xfloat_masked_unpack_low_mask1(<4 x float> %vec1, <4 x float> %vec2, <4 x float> %vec3, <4 x i32> %mask) {
; GENERIC-LABEL: test_4xfloat_masked_unpack_low_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm4, %xmm3, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} xmm2 {%k1} = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [3:1.00]
@@ -12733,7 +12733,7 @@ define <4 x float> @test_4xfloat_masked_unpack_low_mask1(<4 x float> %vec1, <4 x
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xfloat_masked_unpack_low_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm4, %xmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} xmm2 {%k1} = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:1.00]
@@ -12747,14 +12747,14 @@ define <4 x float> @test_4xfloat_masked_unpack_low_mask1(<4 x float> %vec1, <4 x
define <4 x float> @test_4xfloat_zero_masked_unpack_low_mask1(<4 x float> %vec1, <4 x float> %vec2, <4 x i32> %mask) {
; GENERIC-LABEL: test_4xfloat_zero_masked_unpack_low_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm3, %xmm2, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} xmm0 {%k1} {z} = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xfloat_zero_masked_unpack_low_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm3, %xmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} xmm0 {%k1} {z} = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:1.00]
@@ -12766,7 +12766,7 @@ define <4 x float> @test_4xfloat_zero_masked_unpack_low_mask1(<4 x float> %vec1,
}
define <4 x float> @test_4xfloat_masked_unpack_low_mask2(<4 x float> %vec1, <4 x float> %vec2, <4 x float> %vec3, <4 x i32> %mask) {
; GENERIC-LABEL: test_4xfloat_masked_unpack_low_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm4, %xmm3, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} xmm2 {%k1} = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [3:1.00]
@@ -12774,7 +12774,7 @@ define <4 x float> @test_4xfloat_masked_unpack_low_mask2(<4 x float> %vec1, <4 x
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xfloat_masked_unpack_low_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm4, %xmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} xmm2 {%k1} = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:1.00]
@@ -12788,14 +12788,14 @@ define <4 x float> @test_4xfloat_masked_unpack_low_mask2(<4 x float> %vec1, <4 x
define <4 x float> @test_4xfloat_zero_masked_unpack_low_mask2(<4 x float> %vec1, <4 x float> %vec2, <4 x i32> %mask) {
; GENERIC-LABEL: test_4xfloat_zero_masked_unpack_low_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm3, %xmm2, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} xmm0 {%k1} {z} = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xfloat_zero_masked_unpack_low_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm3, %xmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} xmm0 {%k1} {z} = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:1.00]
@@ -12807,12 +12807,12 @@ define <4 x float> @test_4xfloat_zero_masked_unpack_low_mask2(<4 x float> %vec1,
}
define <4 x float> @test_4xfloat_unpack_low_mask3(<4 x float> %vec1, <4 x float> %vec2) {
; GENERIC-LABEL: test_4xfloat_unpack_low_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xfloat_unpack_low_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
@@ -12820,7 +12820,7 @@ define <4 x float> @test_4xfloat_unpack_low_mask3(<4 x float> %vec1, <4 x float>
}
define <4 x float> @test_4xfloat_masked_unpack_low_mask3(<4 x float> %vec1, <4 x float> %vec2, <4 x float> %vec3, <4 x i32> %mask) {
; GENERIC-LABEL: test_4xfloat_masked_unpack_low_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm4, %xmm3, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} xmm2 {%k1} = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [3:1.00]
@@ -12828,7 +12828,7 @@ define <4 x float> @test_4xfloat_masked_unpack_low_mask3(<4 x float> %vec1, <4 x
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xfloat_masked_unpack_low_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm4, %xmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} xmm2 {%k1} = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:1.00]
@@ -12842,14 +12842,14 @@ define <4 x float> @test_4xfloat_masked_unpack_low_mask3(<4 x float> %vec1, <4 x
define <4 x float> @test_4xfloat_zero_masked_unpack_low_mask3(<4 x float> %vec1, <4 x float> %vec2, <4 x i32> %mask) {
; GENERIC-LABEL: test_4xfloat_zero_masked_unpack_low_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm3, %xmm2, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} xmm0 {%k1} {z} = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xfloat_zero_masked_unpack_low_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm3, %xmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} xmm0 {%k1} {z} = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:1.00]
@@ -12861,12 +12861,12 @@ define <4 x float> @test_4xfloat_zero_masked_unpack_low_mask3(<4 x float> %vec1,
}
define <4 x float> @test_4xfloat_unpack_low_mem_mask0(<4 x float> %vec1, <4 x float>* %vec2p) {
; GENERIC-LABEL: test_4xfloat_unpack_low_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xfloat_unpack_low_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec2 = load <4 x float>, <4 x float>* %vec2p
@@ -12875,7 +12875,7 @@ define <4 x float> @test_4xfloat_unpack_low_mem_mask0(<4 x float> %vec1, <4 x fl
}
define <4 x float> @test_4xfloat_masked_unpack_low_mem_mask0(<4 x float> %vec1, <4 x float>* %vec2p, <4 x float> %vec3, <4 x i32> %mask) {
; GENERIC-LABEL: test_4xfloat_masked_unpack_low_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm3, %xmm2, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} xmm1 {%k1} = xmm0[0],mem[0],xmm0[1],mem[1] sched: [7:1.00]
@@ -12883,7 +12883,7 @@ define <4 x float> @test_4xfloat_masked_unpack_low_mem_mask0(<4 x float> %vec1,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xfloat_masked_unpack_low_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm3, %xmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} xmm1 {%k1} = xmm0[0],mem[0],xmm0[1],mem[1] sched: [7:1.00]
@@ -12898,14 +12898,14 @@ define <4 x float> @test_4xfloat_masked_unpack_low_mem_mask0(<4 x float> %vec1,
define <4 x float> @test_4xfloat_zero_masked_unpack_low_mem_mask0(<4 x float> %vec1, <4 x float>* %vec2p, <4 x i32> %mask) {
; GENERIC-LABEL: test_4xfloat_zero_masked_unpack_low_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm2, %xmm1, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} xmm0 {%k1} {z} = xmm0[0],mem[0],xmm0[1],mem[1] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xfloat_zero_masked_unpack_low_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm2, %xmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} xmm0 {%k1} {z} = xmm0[0],mem[0],xmm0[1],mem[1] sched: [7:1.00]
@@ -12919,7 +12919,7 @@ define <4 x float> @test_4xfloat_zero_masked_unpack_low_mem_mask0(<4 x float> %v
define <4 x float> @test_4xfloat_masked_unpack_low_mem_mask1(<4 x float> %vec1, <4 x float>* %vec2p, <4 x float> %vec3, <4 x i32> %mask) {
; GENERIC-LABEL: test_4xfloat_masked_unpack_low_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm3, %xmm2, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} xmm1 {%k1} = xmm0[0],mem[0],xmm0[1],mem[1] sched: [7:1.00]
@@ -12927,7 +12927,7 @@ define <4 x float> @test_4xfloat_masked_unpack_low_mem_mask1(<4 x float> %vec1,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xfloat_masked_unpack_low_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm3, %xmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} xmm1 {%k1} = xmm0[0],mem[0],xmm0[1],mem[1] sched: [7:1.00]
@@ -12942,14 +12942,14 @@ define <4 x float> @test_4xfloat_masked_unpack_low_mem_mask1(<4 x float> %vec1,
define <4 x float> @test_4xfloat_zero_masked_unpack_low_mem_mask1(<4 x float> %vec1, <4 x float>* %vec2p, <4 x i32> %mask) {
; GENERIC-LABEL: test_4xfloat_zero_masked_unpack_low_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm2, %xmm1, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} xmm0 {%k1} {z} = xmm0[0],mem[0],xmm0[1],mem[1] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xfloat_zero_masked_unpack_low_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm2, %xmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} xmm0 {%k1} {z} = xmm0[0],mem[0],xmm0[1],mem[1] sched: [7:1.00]
@@ -12963,7 +12963,7 @@ define <4 x float> @test_4xfloat_zero_masked_unpack_low_mem_mask1(<4 x float> %v
define <4 x float> @test_4xfloat_masked_unpack_low_mem_mask2(<4 x float> %vec1, <4 x float>* %vec2p, <4 x float> %vec3, <4 x i32> %mask) {
; GENERIC-LABEL: test_4xfloat_masked_unpack_low_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm3, %xmm2, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} xmm1 {%k1} = xmm0[0],mem[0],xmm0[1],mem[1] sched: [7:1.00]
@@ -12971,7 +12971,7 @@ define <4 x float> @test_4xfloat_masked_unpack_low_mem_mask2(<4 x float> %vec1,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xfloat_masked_unpack_low_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm3, %xmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} xmm1 {%k1} = xmm0[0],mem[0],xmm0[1],mem[1] sched: [7:1.00]
@@ -12986,14 +12986,14 @@ define <4 x float> @test_4xfloat_masked_unpack_low_mem_mask2(<4 x float> %vec1,
define <4 x float> @test_4xfloat_zero_masked_unpack_low_mem_mask2(<4 x float> %vec1, <4 x float>* %vec2p, <4 x i32> %mask) {
; GENERIC-LABEL: test_4xfloat_zero_masked_unpack_low_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm2, %xmm1, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} xmm0 {%k1} {z} = xmm0[0],mem[0],xmm0[1],mem[1] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xfloat_zero_masked_unpack_low_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm2, %xmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} xmm0 {%k1} {z} = xmm0[0],mem[0],xmm0[1],mem[1] sched: [7:1.00]
@@ -13007,12 +13007,12 @@ define <4 x float> @test_4xfloat_zero_masked_unpack_low_mem_mask2(<4 x float> %v
define <4 x float> @test_4xfloat_unpack_low_mem_mask3(<4 x float> %vec1, <4 x float>* %vec2p) {
; GENERIC-LABEL: test_4xfloat_unpack_low_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xfloat_unpack_low_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec2 = load <4 x float>, <4 x float>* %vec2p
@@ -13021,7 +13021,7 @@ define <4 x float> @test_4xfloat_unpack_low_mem_mask3(<4 x float> %vec1, <4 x fl
}
define <4 x float> @test_4xfloat_masked_unpack_low_mem_mask3(<4 x float> %vec1, <4 x float>* %vec2p, <4 x float> %vec3, <4 x i32> %mask) {
; GENERIC-LABEL: test_4xfloat_masked_unpack_low_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm3, %xmm2, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} xmm1 {%k1} = xmm0[0],mem[0],xmm0[1],mem[1] sched: [7:1.00]
@@ -13029,7 +13029,7 @@ define <4 x float> @test_4xfloat_masked_unpack_low_mem_mask3(<4 x float> %vec1,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xfloat_masked_unpack_low_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm3, %xmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} xmm1 {%k1} = xmm0[0],mem[0],xmm0[1],mem[1] sched: [7:1.00]
@@ -13044,14 +13044,14 @@ define <4 x float> @test_4xfloat_masked_unpack_low_mem_mask3(<4 x float> %vec1,
define <4 x float> @test_4xfloat_zero_masked_unpack_low_mem_mask3(<4 x float> %vec1, <4 x float>* %vec2p, <4 x i32> %mask) {
; GENERIC-LABEL: test_4xfloat_zero_masked_unpack_low_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm2, %xmm1, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} xmm0 {%k1} {z} = xmm0[0],mem[0],xmm0[1],mem[1] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xfloat_zero_masked_unpack_low_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm2, %xmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} xmm0 {%k1} {z} = xmm0[0],mem[0],xmm0[1],mem[1] sched: [7:1.00]
@@ -13065,12 +13065,12 @@ define <4 x float> @test_4xfloat_zero_masked_unpack_low_mem_mask3(<4 x float> %v
define <8 x float> @test_8xfloat_unpack_low_mask0(<8 x float> %vec1, <8 x float> %vec2) {
; GENERIC-LABEL: test_8xfloat_unpack_low_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_unpack_low_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
@@ -13078,7 +13078,7 @@ define <8 x float> @test_8xfloat_unpack_low_mask0(<8 x float> %vec1, <8 x float>
}
define <8 x float> @test_8xfloat_masked_unpack_low_mask0(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %vec3, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xfloat_masked_unpack_low_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm4, %ymm3, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} ymm2 {%k1} = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [3:1.00]
@@ -13086,7 +13086,7 @@ define <8 x float> @test_8xfloat_masked_unpack_low_mask0(<8 x float> %vec1, <8 x
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_masked_unpack_low_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm4, %ymm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} ymm2 {%k1} = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:1.00]
@@ -13100,14 +13100,14 @@ define <8 x float> @test_8xfloat_masked_unpack_low_mask0(<8 x float> %vec1, <8 x
define <8 x float> @test_8xfloat_zero_masked_unpack_low_mask0(<8 x float> %vec1, <8 x float> %vec2, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xfloat_zero_masked_unpack_low_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} ymm0 {%k1} {z} = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_zero_masked_unpack_low_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} ymm0 {%k1} {z} = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:1.00]
@@ -13119,7 +13119,7 @@ define <8 x float> @test_8xfloat_zero_masked_unpack_low_mask0(<8 x float> %vec1,
}
define <8 x float> @test_8xfloat_masked_unpack_low_mask1(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %vec3, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xfloat_masked_unpack_low_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm4, %ymm3, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} ymm2 {%k1} = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [3:1.00]
@@ -13127,7 +13127,7 @@ define <8 x float> @test_8xfloat_masked_unpack_low_mask1(<8 x float> %vec1, <8 x
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_masked_unpack_low_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm4, %ymm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} ymm2 {%k1} = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:1.00]
@@ -13141,14 +13141,14 @@ define <8 x float> @test_8xfloat_masked_unpack_low_mask1(<8 x float> %vec1, <8 x
define <8 x float> @test_8xfloat_zero_masked_unpack_low_mask1(<8 x float> %vec1, <8 x float> %vec2, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xfloat_zero_masked_unpack_low_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} ymm0 {%k1} {z} = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_zero_masked_unpack_low_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} ymm0 {%k1} {z} = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:1.00]
@@ -13160,7 +13160,7 @@ define <8 x float> @test_8xfloat_zero_masked_unpack_low_mask1(<8 x float> %vec1,
}
define <8 x float> @test_8xfloat_masked_unpack_low_mask2(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %vec3, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xfloat_masked_unpack_low_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm4, %ymm3, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} ymm2 {%k1} = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [3:1.00]
@@ -13168,7 +13168,7 @@ define <8 x float> @test_8xfloat_masked_unpack_low_mask2(<8 x float> %vec1, <8 x
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_masked_unpack_low_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm4, %ymm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} ymm2 {%k1} = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:1.00]
@@ -13182,14 +13182,14 @@ define <8 x float> @test_8xfloat_masked_unpack_low_mask2(<8 x float> %vec1, <8 x
define <8 x float> @test_8xfloat_zero_masked_unpack_low_mask2(<8 x float> %vec1, <8 x float> %vec2, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xfloat_zero_masked_unpack_low_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} ymm0 {%k1} {z} = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_zero_masked_unpack_low_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} ymm0 {%k1} {z} = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:1.00]
@@ -13201,12 +13201,12 @@ define <8 x float> @test_8xfloat_zero_masked_unpack_low_mask2(<8 x float> %vec1,
}
define <8 x float> @test_8xfloat_unpack_low_mask3(<8 x float> %vec1, <8 x float> %vec2) {
; GENERIC-LABEL: test_8xfloat_unpack_low_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_unpack_low_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
@@ -13214,7 +13214,7 @@ define <8 x float> @test_8xfloat_unpack_low_mask3(<8 x float> %vec1, <8 x float>
}
define <8 x float> @test_8xfloat_masked_unpack_low_mask3(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %vec3, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xfloat_masked_unpack_low_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm4, %ymm3, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} ymm2 {%k1} = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [3:1.00]
@@ -13222,7 +13222,7 @@ define <8 x float> @test_8xfloat_masked_unpack_low_mask3(<8 x float> %vec1, <8 x
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_masked_unpack_low_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm4, %ymm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} ymm2 {%k1} = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:1.00]
@@ -13236,14 +13236,14 @@ define <8 x float> @test_8xfloat_masked_unpack_low_mask3(<8 x float> %vec1, <8 x
define <8 x float> @test_8xfloat_zero_masked_unpack_low_mask3(<8 x float> %vec1, <8 x float> %vec2, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xfloat_zero_masked_unpack_low_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} ymm0 {%k1} {z} = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_zero_masked_unpack_low_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} ymm0 {%k1} {z} = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:1.00]
@@ -13255,12 +13255,12 @@ define <8 x float> @test_8xfloat_zero_masked_unpack_low_mask3(<8 x float> %vec1,
}
define <8 x float> @test_8xfloat_unpack_low_mem_mask0(<8 x float> %vec1, <8 x float>* %vec2p) {
; GENERIC-LABEL: test_8xfloat_unpack_low_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [8:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_unpack_low_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec2 = load <8 x float>, <8 x float>* %vec2p
@@ -13269,7 +13269,7 @@ define <8 x float> @test_8xfloat_unpack_low_mem_mask0(<8 x float> %vec1, <8 x fl
}
define <8 x float> @test_8xfloat_masked_unpack_low_mem_mask0(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %vec3, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xfloat_masked_unpack_low_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} ymm1 {%k1} = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [7:1.00]
@@ -13277,7 +13277,7 @@ define <8 x float> @test_8xfloat_masked_unpack_low_mem_mask0(<8 x float> %vec1,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_masked_unpack_low_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} ymm1 {%k1} = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [8:1.00]
@@ -13292,14 +13292,14 @@ define <8 x float> @test_8xfloat_masked_unpack_low_mem_mask0(<8 x float> %vec1,
define <8 x float> @test_8xfloat_zero_masked_unpack_low_mem_mask0(<8 x float> %vec1, <8 x float>* %vec2p, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xfloat_zero_masked_unpack_low_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} ymm0 {%k1} {z} = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_zero_masked_unpack_low_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} ymm0 {%k1} {z} = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [8:1.00]
@@ -13313,7 +13313,7 @@ define <8 x float> @test_8xfloat_zero_masked_unpack_low_mem_mask0(<8 x float> %v
define <8 x float> @test_8xfloat_masked_unpack_low_mem_mask1(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %vec3, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xfloat_masked_unpack_low_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} ymm1 {%k1} = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [7:1.00]
@@ -13321,7 +13321,7 @@ define <8 x float> @test_8xfloat_masked_unpack_low_mem_mask1(<8 x float> %vec1,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_masked_unpack_low_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} ymm1 {%k1} = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [8:1.00]
@@ -13336,14 +13336,14 @@ define <8 x float> @test_8xfloat_masked_unpack_low_mem_mask1(<8 x float> %vec1,
define <8 x float> @test_8xfloat_zero_masked_unpack_low_mem_mask1(<8 x float> %vec1, <8 x float>* %vec2p, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xfloat_zero_masked_unpack_low_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} ymm0 {%k1} {z} = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_zero_masked_unpack_low_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} ymm0 {%k1} {z} = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [8:1.00]
@@ -13357,7 +13357,7 @@ define <8 x float> @test_8xfloat_zero_masked_unpack_low_mem_mask1(<8 x float> %v
define <8 x float> @test_8xfloat_masked_unpack_low_mem_mask2(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %vec3, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xfloat_masked_unpack_low_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} ymm1 {%k1} = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [7:1.00]
@@ -13365,7 +13365,7 @@ define <8 x float> @test_8xfloat_masked_unpack_low_mem_mask2(<8 x float> %vec1,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_masked_unpack_low_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} ymm1 {%k1} = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [8:1.00]
@@ -13380,14 +13380,14 @@ define <8 x float> @test_8xfloat_masked_unpack_low_mem_mask2(<8 x float> %vec1,
define <8 x float> @test_8xfloat_zero_masked_unpack_low_mem_mask2(<8 x float> %vec1, <8 x float>* %vec2p, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xfloat_zero_masked_unpack_low_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} ymm0 {%k1} {z} = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_zero_masked_unpack_low_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} ymm0 {%k1} {z} = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [8:1.00]
@@ -13401,12 +13401,12 @@ define <8 x float> @test_8xfloat_zero_masked_unpack_low_mem_mask2(<8 x float> %v
define <8 x float> @test_8xfloat_unpack_low_mem_mask3(<8 x float> %vec1, <8 x float>* %vec2p) {
; GENERIC-LABEL: test_8xfloat_unpack_low_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [8:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_unpack_low_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec2 = load <8 x float>, <8 x float>* %vec2p
@@ -13415,7 +13415,7 @@ define <8 x float> @test_8xfloat_unpack_low_mem_mask3(<8 x float> %vec1, <8 x fl
}
define <8 x float> @test_8xfloat_masked_unpack_low_mem_mask3(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %vec3, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xfloat_masked_unpack_low_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} ymm1 {%k1} = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [7:1.00]
@@ -13423,7 +13423,7 @@ define <8 x float> @test_8xfloat_masked_unpack_low_mem_mask3(<8 x float> %vec1,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_masked_unpack_low_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} ymm1 {%k1} = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [8:1.00]
@@ -13438,14 +13438,14 @@ define <8 x float> @test_8xfloat_masked_unpack_low_mem_mask3(<8 x float> %vec1,
define <8 x float> @test_8xfloat_zero_masked_unpack_low_mem_mask3(<8 x float> %vec1, <8 x float>* %vec2p, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xfloat_zero_masked_unpack_low_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} ymm0 {%k1} {z} = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_zero_masked_unpack_low_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} ymm0 {%k1} {z} = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [8:1.00]
@@ -13459,12 +13459,12 @@ define <8 x float> @test_8xfloat_zero_masked_unpack_low_mem_mask3(<8 x float> %v
define <16 x float> @test_16xfloat_unpack_low_mask0(<16 x float> %vec1, <16 x float> %vec2) {
; GENERIC-LABEL: test_16xfloat_unpack_low_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpcklps {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_unpack_low_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpcklps {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13] sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 4, i32 20, i32 5, i32 21, i32 8, i32 24, i32 9, i32 25, i32 12, i32 28, i32 13, i32 29>
@@ -13472,7 +13472,7 @@ define <16 x float> @test_16xfloat_unpack_low_mask0(<16 x float> %vec1, <16 x fl
}
define <16 x float> @test_16xfloat_masked_unpack_low_mask0(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %vec3, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_masked_unpack_low_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm4, %zmm3, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13] sched: [3:1.00]
@@ -13480,7 +13480,7 @@ define <16 x float> @test_16xfloat_masked_unpack_low_mask0(<16 x float> %vec1, <
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_masked_unpack_low_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm4, %zmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13] sched: [1:1.00]
@@ -13494,14 +13494,14 @@ define <16 x float> @test_16xfloat_masked_unpack_low_mask0(<16 x float> %vec1, <
define <16 x float> @test_16xfloat_zero_masked_unpack_low_mask0(<16 x float> %vec1, <16 x float> %vec2, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_zero_masked_unpack_low_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_zero_masked_unpack_low_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13] sched: [1:1.00]
@@ -13513,7 +13513,7 @@ define <16 x float> @test_16xfloat_zero_masked_unpack_low_mask0(<16 x float> %ve
}
define <16 x float> @test_16xfloat_masked_unpack_low_mask1(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %vec3, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_masked_unpack_low_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm4, %zmm3, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13] sched: [3:1.00]
@@ -13521,7 +13521,7 @@ define <16 x float> @test_16xfloat_masked_unpack_low_mask1(<16 x float> %vec1, <
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_masked_unpack_low_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm4, %zmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13] sched: [1:1.00]
@@ -13535,14 +13535,14 @@ define <16 x float> @test_16xfloat_masked_unpack_low_mask1(<16 x float> %vec1, <
define <16 x float> @test_16xfloat_zero_masked_unpack_low_mask1(<16 x float> %vec1, <16 x float> %vec2, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_zero_masked_unpack_low_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_zero_masked_unpack_low_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13] sched: [1:1.00]
@@ -13554,7 +13554,7 @@ define <16 x float> @test_16xfloat_zero_masked_unpack_low_mask1(<16 x float> %ve
}
define <16 x float> @test_16xfloat_masked_unpack_low_mask2(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %vec3, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_masked_unpack_low_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm4, %zmm3, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13] sched: [3:1.00]
@@ -13562,7 +13562,7 @@ define <16 x float> @test_16xfloat_masked_unpack_low_mask2(<16 x float> %vec1, <
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_masked_unpack_low_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm4, %zmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13] sched: [1:1.00]
@@ -13576,14 +13576,14 @@ define <16 x float> @test_16xfloat_masked_unpack_low_mask2(<16 x float> %vec1, <
define <16 x float> @test_16xfloat_zero_masked_unpack_low_mask2(<16 x float> %vec1, <16 x float> %vec2, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_zero_masked_unpack_low_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_zero_masked_unpack_low_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13] sched: [1:1.00]
@@ -13595,12 +13595,12 @@ define <16 x float> @test_16xfloat_zero_masked_unpack_low_mask2(<16 x float> %ve
}
define <16 x float> @test_16xfloat_unpack_low_mask3(<16 x float> %vec1, <16 x float> %vec2) {
; GENERIC-LABEL: test_16xfloat_unpack_low_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpcklps {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_unpack_low_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpcklps {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13] sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 4, i32 20, i32 5, i32 21, i32 8, i32 24, i32 9, i32 25, i32 12, i32 28, i32 13, i32 29>
@@ -13608,7 +13608,7 @@ define <16 x float> @test_16xfloat_unpack_low_mask3(<16 x float> %vec1, <16 x fl
}
define <16 x float> @test_16xfloat_masked_unpack_low_mask3(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %vec3, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_masked_unpack_low_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm4, %zmm3, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13] sched: [3:1.00]
@@ -13616,7 +13616,7 @@ define <16 x float> @test_16xfloat_masked_unpack_low_mask3(<16 x float> %vec1, <
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_masked_unpack_low_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm4, %zmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13] sched: [1:1.00]
@@ -13630,14 +13630,14 @@ define <16 x float> @test_16xfloat_masked_unpack_low_mask3(<16 x float> %vec1, <
define <16 x float> @test_16xfloat_zero_masked_unpack_low_mask3(<16 x float> %vec1, <16 x float> %vec2, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_zero_masked_unpack_low_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_zero_masked_unpack_low_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13] sched: [1:1.00]
@@ -13649,12 +13649,12 @@ define <16 x float> @test_16xfloat_zero_masked_unpack_low_mask3(<16 x float> %ve
}
define <16 x float> @test_16xfloat_unpack_low_mem_mask0(<16 x float> %vec1, <16 x float>* %vec2p) {
; GENERIC-LABEL: test_16xfloat_unpack_low_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpcklps {{.*#+}} zmm0 = zmm0[0],mem[0],zmm0[1],mem[1],zmm0[4],mem[4],zmm0[5],mem[5],zmm0[8],mem[8],zmm0[9],mem[9],zmm0[12],mem[12],zmm0[13],mem[13] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_unpack_low_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpcklps {{.*#+}} zmm0 = zmm0[0],mem[0],zmm0[1],mem[1],zmm0[4],mem[4],zmm0[5],mem[5],zmm0[8],mem[8],zmm0[9],mem[9],zmm0[12],mem[12],zmm0[13],mem[13] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec2 = load <16 x float>, <16 x float>* %vec2p
@@ -13663,7 +13663,7 @@ define <16 x float> @test_16xfloat_unpack_low_mem_mask0(<16 x float> %vec1, <16
}
define <16 x float> @test_16xfloat_masked_unpack_low_mem_mask0(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %vec3, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_masked_unpack_low_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} zmm1 {%k1} = zmm0[0],mem[0],zmm0[1],mem[1],zmm0[4],mem[4],zmm0[5],mem[5],zmm0[8],mem[8],zmm0[9],mem[9],zmm0[12],mem[12],zmm0[13],mem[13] sched: [7:1.00]
@@ -13671,7 +13671,7 @@ define <16 x float> @test_16xfloat_masked_unpack_low_mem_mask0(<16 x float> %vec
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_masked_unpack_low_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} zmm1 {%k1} = zmm0[0],mem[0],zmm0[1],mem[1],zmm0[4],mem[4],zmm0[5],mem[5],zmm0[8],mem[8],zmm0[9],mem[9],zmm0[12],mem[12],zmm0[13],mem[13] sched: [8:1.00]
@@ -13686,14 +13686,14 @@ define <16 x float> @test_16xfloat_masked_unpack_low_mem_mask0(<16 x float> %vec
define <16 x float> @test_16xfloat_zero_masked_unpack_low_mem_mask0(<16 x float> %vec1, <16 x float>* %vec2p, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_zero_masked_unpack_low_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} zmm0 {%k1} {z} = zmm0[0],mem[0],zmm0[1],mem[1],zmm0[4],mem[4],zmm0[5],mem[5],zmm0[8],mem[8],zmm0[9],mem[9],zmm0[12],mem[12],zmm0[13],mem[13] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_zero_masked_unpack_low_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} zmm0 {%k1} {z} = zmm0[0],mem[0],zmm0[1],mem[1],zmm0[4],mem[4],zmm0[5],mem[5],zmm0[8],mem[8],zmm0[9],mem[9],zmm0[12],mem[12],zmm0[13],mem[13] sched: [8:1.00]
@@ -13707,7 +13707,7 @@ define <16 x float> @test_16xfloat_zero_masked_unpack_low_mem_mask0(<16 x float>
define <16 x float> @test_16xfloat_masked_unpack_low_mem_mask1(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %vec3, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_masked_unpack_low_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} zmm1 {%k1} = zmm0[0],mem[0],zmm0[1],mem[1],zmm0[4],mem[4],zmm0[5],mem[5],zmm0[8],mem[8],zmm0[9],mem[9],zmm0[12],mem[12],zmm0[13],mem[13] sched: [7:1.00]
@@ -13715,7 +13715,7 @@ define <16 x float> @test_16xfloat_masked_unpack_low_mem_mask1(<16 x float> %vec
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_masked_unpack_low_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} zmm1 {%k1} = zmm0[0],mem[0],zmm0[1],mem[1],zmm0[4],mem[4],zmm0[5],mem[5],zmm0[8],mem[8],zmm0[9],mem[9],zmm0[12],mem[12],zmm0[13],mem[13] sched: [8:1.00]
@@ -13730,14 +13730,14 @@ define <16 x float> @test_16xfloat_masked_unpack_low_mem_mask1(<16 x float> %vec
define <16 x float> @test_16xfloat_zero_masked_unpack_low_mem_mask1(<16 x float> %vec1, <16 x float>* %vec2p, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_zero_masked_unpack_low_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} zmm0 {%k1} {z} = zmm0[0],mem[0],zmm0[1],mem[1],zmm0[4],mem[4],zmm0[5],mem[5],zmm0[8],mem[8],zmm0[9],mem[9],zmm0[12],mem[12],zmm0[13],mem[13] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_zero_masked_unpack_low_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} zmm0 {%k1} {z} = zmm0[0],mem[0],zmm0[1],mem[1],zmm0[4],mem[4],zmm0[5],mem[5],zmm0[8],mem[8],zmm0[9],mem[9],zmm0[12],mem[12],zmm0[13],mem[13] sched: [8:1.00]
@@ -13751,7 +13751,7 @@ define <16 x float> @test_16xfloat_zero_masked_unpack_low_mem_mask1(<16 x float>
define <16 x float> @test_16xfloat_masked_unpack_low_mem_mask2(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %vec3, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_masked_unpack_low_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} zmm1 {%k1} = zmm0[0],mem[0],zmm0[1],mem[1],zmm0[4],mem[4],zmm0[5],mem[5],zmm0[8],mem[8],zmm0[9],mem[9],zmm0[12],mem[12],zmm0[13],mem[13] sched: [7:1.00]
@@ -13759,7 +13759,7 @@ define <16 x float> @test_16xfloat_masked_unpack_low_mem_mask2(<16 x float> %vec
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_masked_unpack_low_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} zmm1 {%k1} = zmm0[0],mem[0],zmm0[1],mem[1],zmm0[4],mem[4],zmm0[5],mem[5],zmm0[8],mem[8],zmm0[9],mem[9],zmm0[12],mem[12],zmm0[13],mem[13] sched: [8:1.00]
@@ -13774,14 +13774,14 @@ define <16 x float> @test_16xfloat_masked_unpack_low_mem_mask2(<16 x float> %vec
define <16 x float> @test_16xfloat_zero_masked_unpack_low_mem_mask2(<16 x float> %vec1, <16 x float>* %vec2p, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_zero_masked_unpack_low_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} zmm0 {%k1} {z} = zmm0[0],mem[0],zmm0[1],mem[1],zmm0[4],mem[4],zmm0[5],mem[5],zmm0[8],mem[8],zmm0[9],mem[9],zmm0[12],mem[12],zmm0[13],mem[13] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_zero_masked_unpack_low_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} zmm0 {%k1} {z} = zmm0[0],mem[0],zmm0[1],mem[1],zmm0[4],mem[4],zmm0[5],mem[5],zmm0[8],mem[8],zmm0[9],mem[9],zmm0[12],mem[12],zmm0[13],mem[13] sched: [8:1.00]
@@ -13795,12 +13795,12 @@ define <16 x float> @test_16xfloat_zero_masked_unpack_low_mem_mask2(<16 x float>
define <16 x float> @test_16xfloat_unpack_low_mem_mask3(<16 x float> %vec1, <16 x float>* %vec2p) {
; GENERIC-LABEL: test_16xfloat_unpack_low_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpcklps {{.*#+}} zmm0 = zmm0[0],mem[0],zmm0[1],mem[1],zmm0[4],mem[4],zmm0[5],mem[5],zmm0[8],mem[8],zmm0[9],mem[9],zmm0[12],mem[12],zmm0[13],mem[13] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_unpack_low_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpcklps {{.*#+}} zmm0 = zmm0[0],mem[0],zmm0[1],mem[1],zmm0[4],mem[4],zmm0[5],mem[5],zmm0[8],mem[8],zmm0[9],mem[9],zmm0[12],mem[12],zmm0[13],mem[13] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec2 = load <16 x float>, <16 x float>* %vec2p
@@ -13809,7 +13809,7 @@ define <16 x float> @test_16xfloat_unpack_low_mem_mask3(<16 x float> %vec1, <16
}
define <16 x float> @test_16xfloat_masked_unpack_low_mem_mask3(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %vec3, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_masked_unpack_low_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} zmm1 {%k1} = zmm0[0],mem[0],zmm0[1],mem[1],zmm0[4],mem[4],zmm0[5],mem[5],zmm0[8],mem[8],zmm0[9],mem[9],zmm0[12],mem[12],zmm0[13],mem[13] sched: [7:1.00]
@@ -13817,7 +13817,7 @@ define <16 x float> @test_16xfloat_masked_unpack_low_mem_mask3(<16 x float> %vec
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_masked_unpack_low_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} zmm1 {%k1} = zmm0[0],mem[0],zmm0[1],mem[1],zmm0[4],mem[4],zmm0[5],mem[5],zmm0[8],mem[8],zmm0[9],mem[9],zmm0[12],mem[12],zmm0[13],mem[13] sched: [8:1.00]
@@ -13832,14 +13832,14 @@ define <16 x float> @test_16xfloat_masked_unpack_low_mem_mask3(<16 x float> %vec
define <16 x float> @test_16xfloat_zero_masked_unpack_low_mem_mask3(<16 x float> %vec1, <16 x float>* %vec2p, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_zero_masked_unpack_low_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; GENERIC-NEXT: vunpcklps {{.*#+}} zmm0 {%k1} {z} = zmm0[0],mem[0],zmm0[1],mem[1],zmm0[4],mem[4],zmm0[5],mem[5],zmm0[8],mem[8],zmm0[9],mem[9],zmm0[12],mem[12],zmm0[13],mem[13] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_zero_masked_unpack_low_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} zmm0 {%k1} {z} = zmm0[0],mem[0],zmm0[1],mem[1],zmm0[4],mem[4],zmm0[5],mem[5],zmm0[8],mem[8],zmm0[9],mem[9],zmm0[12],mem[12],zmm0[13],mem[13] sched: [8:1.00]
@@ -13853,12 +13853,12 @@ define <16 x float> @test_16xfloat_zero_masked_unpack_low_mem_mask3(<16 x float>
define <2 x double> @test_2xdouble_unpack_low_mask0(<2 x double> %vec1, <2 x double> %vec2) {
; GENERIC-LABEL: test_2xdouble_unpack_low_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_2xdouble_unpack_low_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <2 x double> %vec1, <2 x double> %vec2, <2 x i32> <i32 0, i32 2>
@@ -13866,7 +13866,7 @@ define <2 x double> @test_2xdouble_unpack_low_mask0(<2 x double> %vec1, <2 x dou
}
define <2 x double> @test_2xdouble_masked_unpack_low_mask0(<2 x double> %vec1, <2 x double> %vec2, <2 x double> %vec3, <2 x i64> %mask) {
; GENERIC-LABEL: test_2xdouble_masked_unpack_low_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %xmm4, %xmm3, %k1
; GENERIC-NEXT: vunpcklpd {{.*#+}} xmm2 {%k1} = xmm0[0],xmm1[0] sched: [3:1.00]
@@ -13874,7 +13874,7 @@ define <2 x double> @test_2xdouble_masked_unpack_low_mask0(<2 x double> %vec1, <
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_2xdouble_masked_unpack_low_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %xmm4, %xmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklpd {{.*#+}} xmm2 {%k1} = xmm0[0],xmm1[0] sched: [1:1.00]
@@ -13888,14 +13888,14 @@ define <2 x double> @test_2xdouble_masked_unpack_low_mask0(<2 x double> %vec1, <
define <2 x double> @test_2xdouble_zero_masked_unpack_low_mask0(<2 x double> %vec1, <2 x double> %vec2, <2 x i64> %mask) {
; GENERIC-LABEL: test_2xdouble_zero_masked_unpack_low_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %xmm3, %xmm2, %k1
; GENERIC-NEXT: vunpcklpd {{.*#+}} xmm0 {%k1} {z} = xmm0[0],xmm1[0] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_2xdouble_zero_masked_unpack_low_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %xmm3, %xmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklpd {{.*#+}} xmm0 {%k1} {z} = xmm0[0],xmm1[0] sched: [1:1.00]
@@ -13907,7 +13907,7 @@ define <2 x double> @test_2xdouble_zero_masked_unpack_low_mask0(<2 x double> %ve
}
define <2 x double> @test_2xdouble_masked_unpack_low_mask1(<2 x double> %vec1, <2 x double> %vec2, <2 x double> %vec3, <2 x i64> %mask) {
; GENERIC-LABEL: test_2xdouble_masked_unpack_low_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %xmm4, %xmm3, %k1
; GENERIC-NEXT: vunpcklpd {{.*#+}} xmm2 {%k1} = xmm0[0],xmm1[0] sched: [3:1.00]
@@ -13915,7 +13915,7 @@ define <2 x double> @test_2xdouble_masked_unpack_low_mask1(<2 x double> %vec1, <
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_2xdouble_masked_unpack_low_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %xmm4, %xmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklpd {{.*#+}} xmm2 {%k1} = xmm0[0],xmm1[0] sched: [1:1.00]
@@ -13929,14 +13929,14 @@ define <2 x double> @test_2xdouble_masked_unpack_low_mask1(<2 x double> %vec1, <
define <2 x double> @test_2xdouble_zero_masked_unpack_low_mask1(<2 x double> %vec1, <2 x double> %vec2, <2 x i64> %mask) {
; GENERIC-LABEL: test_2xdouble_zero_masked_unpack_low_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %xmm3, %xmm2, %k1
; GENERIC-NEXT: vunpcklpd {{.*#+}} xmm0 {%k1} {z} = xmm0[0],xmm1[0] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_2xdouble_zero_masked_unpack_low_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %xmm3, %xmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklpd {{.*#+}} xmm0 {%k1} {z} = xmm0[0],xmm1[0] sched: [1:1.00]
@@ -13948,12 +13948,12 @@ define <2 x double> @test_2xdouble_zero_masked_unpack_low_mask1(<2 x double> %ve
}
define <2 x double> @test_2xdouble_unpack_low_mem_mask0(<2 x double> %vec1, <2 x double>* %vec2p) {
; GENERIC-LABEL: test_2xdouble_unpack_low_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_2xdouble_unpack_low_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0] sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec2 = load <2 x double>, <2 x double>* %vec2p
@@ -13962,7 +13962,7 @@ define <2 x double> @test_2xdouble_unpack_low_mem_mask0(<2 x double> %vec1, <2 x
}
define <2 x double> @test_2xdouble_masked_unpack_low_mem_mask0(<2 x double> %vec1, <2 x double>* %vec2p, <2 x double> %vec3, <2 x i64> %mask) {
; GENERIC-LABEL: test_2xdouble_masked_unpack_low_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %xmm3, %xmm2, %k1
; GENERIC-NEXT: vunpcklpd {{.*#+}} xmm1 {%k1} = xmm0[0],mem[0] sched: [7:1.00]
@@ -13970,7 +13970,7 @@ define <2 x double> @test_2xdouble_masked_unpack_low_mem_mask0(<2 x double> %vec
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_2xdouble_masked_unpack_low_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %xmm3, %xmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklpd {{.*#+}} xmm1 {%k1} = xmm0[0],mem[0] sched: [7:1.00]
@@ -13985,14 +13985,14 @@ define <2 x double> @test_2xdouble_masked_unpack_low_mem_mask0(<2 x double> %vec
define <2 x double> @test_2xdouble_zero_masked_unpack_low_mem_mask0(<2 x double> %vec1, <2 x double>* %vec2p, <2 x i64> %mask) {
; GENERIC-LABEL: test_2xdouble_zero_masked_unpack_low_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %xmm2, %xmm1, %k1
; GENERIC-NEXT: vunpcklpd {{.*#+}} xmm0 {%k1} {z} = xmm0[0],mem[0] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_2xdouble_zero_masked_unpack_low_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %xmm2, %xmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklpd {{.*#+}} xmm0 {%k1} {z} = xmm0[0],mem[0] sched: [7:1.00]
@@ -14006,7 +14006,7 @@ define <2 x double> @test_2xdouble_zero_masked_unpack_low_mem_mask0(<2 x double>
define <2 x double> @test_2xdouble_masked_unpack_low_mem_mask1(<2 x double> %vec1, <2 x double>* %vec2p, <2 x double> %vec3, <2 x i64> %mask) {
; GENERIC-LABEL: test_2xdouble_masked_unpack_low_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %xmm3, %xmm2, %k1
; GENERIC-NEXT: vunpcklpd {{.*#+}} xmm1 {%k1} = xmm0[0],mem[0] sched: [7:1.00]
@@ -14014,7 +14014,7 @@ define <2 x double> @test_2xdouble_masked_unpack_low_mem_mask1(<2 x double> %vec
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_2xdouble_masked_unpack_low_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %xmm3, %xmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklpd {{.*#+}} xmm1 {%k1} = xmm0[0],mem[0] sched: [7:1.00]
@@ -14029,14 +14029,14 @@ define <2 x double> @test_2xdouble_masked_unpack_low_mem_mask1(<2 x double> %vec
define <2 x double> @test_2xdouble_zero_masked_unpack_low_mem_mask1(<2 x double> %vec1, <2 x double>* %vec2p, <2 x i64> %mask) {
; GENERIC-LABEL: test_2xdouble_zero_masked_unpack_low_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %xmm2, %xmm1, %k1
; GENERIC-NEXT: vunpcklpd {{.*#+}} xmm0 {%k1} {z} = xmm0[0],mem[0] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_2xdouble_zero_masked_unpack_low_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %xmm2, %xmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklpd {{.*#+}} xmm0 {%k1} {z} = xmm0[0],mem[0] sched: [7:1.00]
@@ -14050,12 +14050,12 @@ define <2 x double> @test_2xdouble_zero_masked_unpack_low_mem_mask1(<2 x double>
define <4 x double> @test_4xdouble_unpack_low_mask0(<4 x double> %vec1, <4 x double> %vec2) {
; GENERIC-LABEL: test_4xdouble_unpack_low_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_unpack_low_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
@@ -14063,7 +14063,7 @@ define <4 x double> @test_4xdouble_unpack_low_mask0(<4 x double> %vec1, <4 x dou
}
define <4 x double> @test_4xdouble_masked_unpack_low_mask0(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %vec3, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_masked_unpack_low_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm4, %ymm3, %k1
; GENERIC-NEXT: vunpcklpd {{.*#+}} ymm2 {%k1} = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [3:1.00]
@@ -14071,7 +14071,7 @@ define <4 x double> @test_4xdouble_masked_unpack_low_mask0(<4 x double> %vec1, <
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_masked_unpack_low_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm4, %ymm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklpd {{.*#+}} ymm2 {%k1} = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:1.00]
@@ -14085,14 +14085,14 @@ define <4 x double> @test_4xdouble_masked_unpack_low_mask0(<4 x double> %vec1, <
define <4 x double> @test_4xdouble_zero_masked_unpack_low_mask0(<4 x double> %vec1, <4 x double> %vec2, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_zero_masked_unpack_low_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; GENERIC-NEXT: vunpcklpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_zero_masked_unpack_low_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:1.00]
@@ -14104,7 +14104,7 @@ define <4 x double> @test_4xdouble_zero_masked_unpack_low_mask0(<4 x double> %ve
}
define <4 x double> @test_4xdouble_masked_unpack_low_mask1(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %vec3, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_masked_unpack_low_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm4, %ymm3, %k1
; GENERIC-NEXT: vunpcklpd {{.*#+}} ymm2 {%k1} = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [3:1.00]
@@ -14112,7 +14112,7 @@ define <4 x double> @test_4xdouble_masked_unpack_low_mask1(<4 x double> %vec1, <
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_masked_unpack_low_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm4, %ymm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklpd {{.*#+}} ymm2 {%k1} = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:1.00]
@@ -14126,14 +14126,14 @@ define <4 x double> @test_4xdouble_masked_unpack_low_mask1(<4 x double> %vec1, <
define <4 x double> @test_4xdouble_zero_masked_unpack_low_mask1(<4 x double> %vec1, <4 x double> %vec2, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_zero_masked_unpack_low_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; GENERIC-NEXT: vunpcklpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_zero_masked_unpack_low_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:1.00]
@@ -14145,7 +14145,7 @@ define <4 x double> @test_4xdouble_zero_masked_unpack_low_mask1(<4 x double> %ve
}
define <4 x double> @test_4xdouble_masked_unpack_low_mask2(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %vec3, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_masked_unpack_low_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm4, %ymm3, %k1
; GENERIC-NEXT: vunpcklpd {{.*#+}} ymm2 {%k1} = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [3:1.00]
@@ -14153,7 +14153,7 @@ define <4 x double> @test_4xdouble_masked_unpack_low_mask2(<4 x double> %vec1, <
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_masked_unpack_low_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm4, %ymm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklpd {{.*#+}} ymm2 {%k1} = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:1.00]
@@ -14167,14 +14167,14 @@ define <4 x double> @test_4xdouble_masked_unpack_low_mask2(<4 x double> %vec1, <
define <4 x double> @test_4xdouble_zero_masked_unpack_low_mask2(<4 x double> %vec1, <4 x double> %vec2, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_zero_masked_unpack_low_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; GENERIC-NEXT: vunpcklpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_zero_masked_unpack_low_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:1.00]
@@ -14186,12 +14186,12 @@ define <4 x double> @test_4xdouble_zero_masked_unpack_low_mask2(<4 x double> %ve
}
define <4 x double> @test_4xdouble_unpack_low_mask3(<4 x double> %vec1, <4 x double> %vec2) {
; GENERIC-LABEL: test_4xdouble_unpack_low_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_unpack_low_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
@@ -14199,7 +14199,7 @@ define <4 x double> @test_4xdouble_unpack_low_mask3(<4 x double> %vec1, <4 x dou
}
define <4 x double> @test_4xdouble_masked_unpack_low_mask3(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %vec3, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_masked_unpack_low_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm4, %ymm3, %k1
; GENERIC-NEXT: vunpcklpd {{.*#+}} ymm2 {%k1} = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [3:1.00]
@@ -14207,7 +14207,7 @@ define <4 x double> @test_4xdouble_masked_unpack_low_mask3(<4 x double> %vec1, <
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_masked_unpack_low_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm4, %ymm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklpd {{.*#+}} ymm2 {%k1} = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:1.00]
@@ -14221,14 +14221,14 @@ define <4 x double> @test_4xdouble_masked_unpack_low_mask3(<4 x double> %vec1, <
define <4 x double> @test_4xdouble_zero_masked_unpack_low_mask3(<4 x double> %vec1, <4 x double> %vec2, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_zero_masked_unpack_low_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; GENERIC-NEXT: vunpcklpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_zero_masked_unpack_low_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:1.00]
@@ -14240,12 +14240,12 @@ define <4 x double> @test_4xdouble_zero_masked_unpack_low_mask3(<4 x double> %ve
}
define <4 x double> @test_4xdouble_unpack_low_mem_mask0(<4 x double> %vec1, <4 x double>* %vec2p) {
; GENERIC-LABEL: test_4xdouble_unpack_low_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[2],mem[2] sched: [8:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_unpack_low_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[2],mem[2] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec2 = load <4 x double>, <4 x double>* %vec2p
@@ -14254,7 +14254,7 @@ define <4 x double> @test_4xdouble_unpack_low_mem_mask0(<4 x double> %vec1, <4 x
}
define <4 x double> @test_4xdouble_masked_unpack_low_mem_mask0(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %vec3, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_masked_unpack_low_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; GENERIC-NEXT: vunpcklpd {{.*#+}} ymm1 {%k1} = ymm0[0],mem[0],ymm0[2],mem[2] sched: [7:1.00]
@@ -14262,7 +14262,7 @@ define <4 x double> @test_4xdouble_masked_unpack_low_mem_mask0(<4 x double> %vec
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_masked_unpack_low_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklpd {{.*#+}} ymm1 {%k1} = ymm0[0],mem[0],ymm0[2],mem[2] sched: [8:1.00]
@@ -14277,14 +14277,14 @@ define <4 x double> @test_4xdouble_masked_unpack_low_mem_mask0(<4 x double> %vec
define <4 x double> @test_4xdouble_zero_masked_unpack_low_mem_mask0(<4 x double> %vec1, <4 x double>* %vec2p, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_zero_masked_unpack_low_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; GENERIC-NEXT: vunpcklpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0],mem[0],ymm0[2],mem[2] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_zero_masked_unpack_low_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0],mem[0],ymm0[2],mem[2] sched: [8:1.00]
@@ -14298,7 +14298,7 @@ define <4 x double> @test_4xdouble_zero_masked_unpack_low_mem_mask0(<4 x double>
define <4 x double> @test_4xdouble_masked_unpack_low_mem_mask1(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %vec3, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_masked_unpack_low_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; GENERIC-NEXT: vunpcklpd {{.*#+}} ymm1 {%k1} = ymm0[0],mem[0],ymm0[2],mem[2] sched: [7:1.00]
@@ -14306,7 +14306,7 @@ define <4 x double> @test_4xdouble_masked_unpack_low_mem_mask1(<4 x double> %vec
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_masked_unpack_low_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklpd {{.*#+}} ymm1 {%k1} = ymm0[0],mem[0],ymm0[2],mem[2] sched: [8:1.00]
@@ -14321,14 +14321,14 @@ define <4 x double> @test_4xdouble_masked_unpack_low_mem_mask1(<4 x double> %vec
define <4 x double> @test_4xdouble_zero_masked_unpack_low_mem_mask1(<4 x double> %vec1, <4 x double>* %vec2p, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_zero_masked_unpack_low_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; GENERIC-NEXT: vunpcklpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0],mem[0],ymm0[2],mem[2] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_zero_masked_unpack_low_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0],mem[0],ymm0[2],mem[2] sched: [8:1.00]
@@ -14342,7 +14342,7 @@ define <4 x double> @test_4xdouble_zero_masked_unpack_low_mem_mask1(<4 x double>
define <4 x double> @test_4xdouble_masked_unpack_low_mem_mask2(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %vec3, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_masked_unpack_low_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; GENERIC-NEXT: vunpcklpd {{.*#+}} ymm1 {%k1} = ymm0[0],mem[0],ymm0[2],mem[2] sched: [7:1.00]
@@ -14350,7 +14350,7 @@ define <4 x double> @test_4xdouble_masked_unpack_low_mem_mask2(<4 x double> %vec
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_masked_unpack_low_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklpd {{.*#+}} ymm1 {%k1} = ymm0[0],mem[0],ymm0[2],mem[2] sched: [8:1.00]
@@ -14365,14 +14365,14 @@ define <4 x double> @test_4xdouble_masked_unpack_low_mem_mask2(<4 x double> %vec
define <4 x double> @test_4xdouble_zero_masked_unpack_low_mem_mask2(<4 x double> %vec1, <4 x double>* %vec2p, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_zero_masked_unpack_low_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; GENERIC-NEXT: vunpcklpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0],mem[0],ymm0[2],mem[2] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_zero_masked_unpack_low_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0],mem[0],ymm0[2],mem[2] sched: [8:1.00]
@@ -14386,12 +14386,12 @@ define <4 x double> @test_4xdouble_zero_masked_unpack_low_mem_mask2(<4 x double>
define <4 x double> @test_4xdouble_unpack_low_mem_mask3(<4 x double> %vec1, <4 x double>* %vec2p) {
; GENERIC-LABEL: test_4xdouble_unpack_low_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[2],mem[2] sched: [8:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_unpack_low_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[2],mem[2] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec2 = load <4 x double>, <4 x double>* %vec2p
@@ -14400,7 +14400,7 @@ define <4 x double> @test_4xdouble_unpack_low_mem_mask3(<4 x double> %vec1, <4 x
}
define <4 x double> @test_4xdouble_masked_unpack_low_mem_mask3(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %vec3, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_masked_unpack_low_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; GENERIC-NEXT: vunpcklpd {{.*#+}} ymm1 {%k1} = ymm0[0],mem[0],ymm0[2],mem[2] sched: [7:1.00]
@@ -14408,7 +14408,7 @@ define <4 x double> @test_4xdouble_masked_unpack_low_mem_mask3(<4 x double> %vec
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_masked_unpack_low_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklpd {{.*#+}} ymm1 {%k1} = ymm0[0],mem[0],ymm0[2],mem[2] sched: [8:1.00]
@@ -14423,14 +14423,14 @@ define <4 x double> @test_4xdouble_masked_unpack_low_mem_mask3(<4 x double> %vec
define <4 x double> @test_4xdouble_zero_masked_unpack_low_mem_mask3(<4 x double> %vec1, <4 x double>* %vec2p, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_zero_masked_unpack_low_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; GENERIC-NEXT: vunpcklpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0],mem[0],ymm0[2],mem[2] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_zero_masked_unpack_low_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0],mem[0],ymm0[2],mem[2] sched: [8:1.00]
@@ -14444,12 +14444,12 @@ define <4 x double> @test_4xdouble_zero_masked_unpack_low_mem_mask3(<4 x double>
define <8 x double> @test_8xdouble_unpack_low_mask0(<8 x double> %vec1, <8 x double> %vec2) {
; GENERIC-LABEL: test_8xdouble_unpack_low_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpcklpd {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_unpack_low_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpcklpd {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
@@ -14457,7 +14457,7 @@ define <8 x double> @test_8xdouble_unpack_low_mask0(<8 x double> %vec1, <8 x dou
}
define <8 x double> @test_8xdouble_masked_unpack_low_mask0(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %vec3, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_masked_unpack_low_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm4, %zmm3, %k1
; GENERIC-NEXT: vunpcklpd {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] sched: [3:1.00]
@@ -14465,7 +14465,7 @@ define <8 x double> @test_8xdouble_masked_unpack_low_mask0(<8 x double> %vec1, <
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_masked_unpack_low_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm4, %zmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklpd {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] sched: [1:1.00]
@@ -14479,14 +14479,14 @@ define <8 x double> @test_8xdouble_masked_unpack_low_mask0(<8 x double> %vec1, <
define <8 x double> @test_8xdouble_zero_masked_unpack_low_mask0(<8 x double> %vec1, <8 x double> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_zero_masked_unpack_low_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; GENERIC-NEXT: vunpcklpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_zero_masked_unpack_low_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] sched: [1:1.00]
@@ -14498,7 +14498,7 @@ define <8 x double> @test_8xdouble_zero_masked_unpack_low_mask0(<8 x double> %ve
}
define <8 x double> @test_8xdouble_masked_unpack_low_mask1(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %vec3, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_masked_unpack_low_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm4, %zmm3, %k1
; GENERIC-NEXT: vunpcklpd {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] sched: [3:1.00]
@@ -14506,7 +14506,7 @@ define <8 x double> @test_8xdouble_masked_unpack_low_mask1(<8 x double> %vec1, <
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_masked_unpack_low_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm4, %zmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklpd {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] sched: [1:1.00]
@@ -14520,14 +14520,14 @@ define <8 x double> @test_8xdouble_masked_unpack_low_mask1(<8 x double> %vec1, <
define <8 x double> @test_8xdouble_zero_masked_unpack_low_mask1(<8 x double> %vec1, <8 x double> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_zero_masked_unpack_low_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; GENERIC-NEXT: vunpcklpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_zero_masked_unpack_low_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] sched: [1:1.00]
@@ -14539,7 +14539,7 @@ define <8 x double> @test_8xdouble_zero_masked_unpack_low_mask1(<8 x double> %ve
}
define <8 x double> @test_8xdouble_masked_unpack_low_mask2(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %vec3, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_masked_unpack_low_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm4, %zmm3, %k1
; GENERIC-NEXT: vunpcklpd {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] sched: [3:1.00]
@@ -14547,7 +14547,7 @@ define <8 x double> @test_8xdouble_masked_unpack_low_mask2(<8 x double> %vec1, <
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_masked_unpack_low_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm4, %zmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklpd {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] sched: [1:1.00]
@@ -14561,14 +14561,14 @@ define <8 x double> @test_8xdouble_masked_unpack_low_mask2(<8 x double> %vec1, <
define <8 x double> @test_8xdouble_zero_masked_unpack_low_mask2(<8 x double> %vec1, <8 x double> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_zero_masked_unpack_low_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; GENERIC-NEXT: vunpcklpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_zero_masked_unpack_low_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] sched: [1:1.00]
@@ -14580,12 +14580,12 @@ define <8 x double> @test_8xdouble_zero_masked_unpack_low_mask2(<8 x double> %ve
}
define <8 x double> @test_8xdouble_unpack_low_mask3(<8 x double> %vec1, <8 x double> %vec2) {
; GENERIC-LABEL: test_8xdouble_unpack_low_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpcklpd {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_unpack_low_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpcklpd {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
@@ -14593,7 +14593,7 @@ define <8 x double> @test_8xdouble_unpack_low_mask3(<8 x double> %vec1, <8 x dou
}
define <8 x double> @test_8xdouble_masked_unpack_low_mask3(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %vec3, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_masked_unpack_low_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm4, %zmm3, %k1
; GENERIC-NEXT: vunpcklpd {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] sched: [3:1.00]
@@ -14601,7 +14601,7 @@ define <8 x double> @test_8xdouble_masked_unpack_low_mask3(<8 x double> %vec1, <
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_masked_unpack_low_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm4, %zmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklpd {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] sched: [1:1.00]
@@ -14615,14 +14615,14 @@ define <8 x double> @test_8xdouble_masked_unpack_low_mask3(<8 x double> %vec1, <
define <8 x double> @test_8xdouble_zero_masked_unpack_low_mask3(<8 x double> %vec1, <8 x double> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_zero_masked_unpack_low_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; GENERIC-NEXT: vunpcklpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_zero_masked_unpack_low_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] sched: [1:1.00]
@@ -14634,12 +14634,12 @@ define <8 x double> @test_8xdouble_zero_masked_unpack_low_mask3(<8 x double> %ve
}
define <8 x double> @test_8xdouble_unpack_low_mem_mask0(<8 x double> %vec1, <8 x double>* %vec2p) {
; GENERIC-LABEL: test_8xdouble_unpack_low_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpcklpd {{.*#+}} zmm0 = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_unpack_low_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpcklpd {{.*#+}} zmm0 = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec2 = load <8 x double>, <8 x double>* %vec2p
@@ -14648,7 +14648,7 @@ define <8 x double> @test_8xdouble_unpack_low_mem_mask0(<8 x double> %vec1, <8 x
}
define <8 x double> @test_8xdouble_masked_unpack_low_mem_mask0(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %vec3, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_masked_unpack_low_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; GENERIC-NEXT: vunpcklpd {{.*#+}} zmm1 {%k1} = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6] sched: [7:1.00]
@@ -14656,7 +14656,7 @@ define <8 x double> @test_8xdouble_masked_unpack_low_mem_mask0(<8 x double> %vec
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_masked_unpack_low_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklpd {{.*#+}} zmm1 {%k1} = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6] sched: [8:1.00]
@@ -14671,14 +14671,14 @@ define <8 x double> @test_8xdouble_masked_unpack_low_mem_mask0(<8 x double> %vec
define <8 x double> @test_8xdouble_zero_masked_unpack_low_mem_mask0(<8 x double> %vec1, <8 x double>* %vec2p, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_zero_masked_unpack_low_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; GENERIC-NEXT: vunpcklpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_zero_masked_unpack_low_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6] sched: [8:1.00]
@@ -14692,7 +14692,7 @@ define <8 x double> @test_8xdouble_zero_masked_unpack_low_mem_mask0(<8 x double>
define <8 x double> @test_8xdouble_masked_unpack_low_mem_mask1(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %vec3, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_masked_unpack_low_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; GENERIC-NEXT: vunpcklpd {{.*#+}} zmm1 {%k1} = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6] sched: [7:1.00]
@@ -14700,7 +14700,7 @@ define <8 x double> @test_8xdouble_masked_unpack_low_mem_mask1(<8 x double> %vec
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_masked_unpack_low_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklpd {{.*#+}} zmm1 {%k1} = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6] sched: [8:1.00]
@@ -14715,14 +14715,14 @@ define <8 x double> @test_8xdouble_masked_unpack_low_mem_mask1(<8 x double> %vec
define <8 x double> @test_8xdouble_zero_masked_unpack_low_mem_mask1(<8 x double> %vec1, <8 x double>* %vec2p, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_zero_masked_unpack_low_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; GENERIC-NEXT: vunpcklpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_zero_masked_unpack_low_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6] sched: [8:1.00]
@@ -14736,7 +14736,7 @@ define <8 x double> @test_8xdouble_zero_masked_unpack_low_mem_mask1(<8 x double>
define <8 x double> @test_8xdouble_masked_unpack_low_mem_mask2(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %vec3, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_masked_unpack_low_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; GENERIC-NEXT: vunpcklpd {{.*#+}} zmm1 {%k1} = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6] sched: [7:1.00]
@@ -14744,7 +14744,7 @@ define <8 x double> @test_8xdouble_masked_unpack_low_mem_mask2(<8 x double> %vec
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_masked_unpack_low_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklpd {{.*#+}} zmm1 {%k1} = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6] sched: [8:1.00]
@@ -14759,14 +14759,14 @@ define <8 x double> @test_8xdouble_masked_unpack_low_mem_mask2(<8 x double> %vec
define <8 x double> @test_8xdouble_zero_masked_unpack_low_mem_mask2(<8 x double> %vec1, <8 x double>* %vec2p, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_zero_masked_unpack_low_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; GENERIC-NEXT: vunpcklpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_zero_masked_unpack_low_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6] sched: [8:1.00]
@@ -14780,12 +14780,12 @@ define <8 x double> @test_8xdouble_zero_masked_unpack_low_mem_mask2(<8 x double>
define <8 x double> @test_8xdouble_unpack_low_mem_mask3(<8 x double> %vec1, <8 x double>* %vec2p) {
; GENERIC-LABEL: test_8xdouble_unpack_low_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpcklpd {{.*#+}} zmm0 = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_unpack_low_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpcklpd {{.*#+}} zmm0 = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec2 = load <8 x double>, <8 x double>* %vec2p
@@ -14794,7 +14794,7 @@ define <8 x double> @test_8xdouble_unpack_low_mem_mask3(<8 x double> %vec1, <8 x
}
define <8 x double> @test_8xdouble_masked_unpack_low_mem_mask3(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %vec3, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_masked_unpack_low_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; GENERIC-NEXT: vunpcklpd {{.*#+}} zmm1 {%k1} = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6] sched: [7:1.00]
@@ -14802,7 +14802,7 @@ define <8 x double> @test_8xdouble_masked_unpack_low_mem_mask3(<8 x double> %vec
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_masked_unpack_low_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklpd {{.*#+}} zmm1 {%k1} = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6] sched: [8:1.00]
@@ -14817,14 +14817,14 @@ define <8 x double> @test_8xdouble_masked_unpack_low_mem_mask3(<8 x double> %vec
define <8 x double> @test_8xdouble_zero_masked_unpack_low_mem_mask3(<8 x double> %vec1, <8 x double>* %vec2p, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_zero_masked_unpack_low_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; GENERIC-NEXT: vunpcklpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_zero_masked_unpack_low_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpcklpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6] sched: [8:1.00]
@@ -14838,12 +14838,12 @@ define <8 x double> @test_8xdouble_zero_masked_unpack_low_mem_mask3(<8 x double>
define <4 x float> @test_4xfloat_unpack_high_mask0(<4 x float> %vec1, <4 x float> %vec2) {
; GENERIC-LABEL: test_4xfloat_unpack_high_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xfloat_unpack_high_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
@@ -14851,7 +14851,7 @@ define <4 x float> @test_4xfloat_unpack_high_mask0(<4 x float> %vec1, <4 x float
}
define <4 x float> @test_4xfloat_masked_unpack_high_mask0(<4 x float> %vec1, <4 x float> %vec2, <4 x float> %vec3, <4 x i32> %mask) {
; GENERIC-LABEL: test_4xfloat_masked_unpack_high_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm4, %xmm3, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} xmm2 {%k1} = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [3:1.00]
@@ -14859,7 +14859,7 @@ define <4 x float> @test_4xfloat_masked_unpack_high_mask0(<4 x float> %vec1, <4
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xfloat_masked_unpack_high_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm4, %xmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} xmm2 {%k1} = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00]
@@ -14873,14 +14873,14 @@ define <4 x float> @test_4xfloat_masked_unpack_high_mask0(<4 x float> %vec1, <4
define <4 x float> @test_4xfloat_zero_masked_unpack_high_mask0(<4 x float> %vec1, <4 x float> %vec2, <4 x i32> %mask) {
; GENERIC-LABEL: test_4xfloat_zero_masked_unpack_high_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm3, %xmm2, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} xmm0 {%k1} {z} = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xfloat_zero_masked_unpack_high_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm3, %xmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} xmm0 {%k1} {z} = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00]
@@ -14892,7 +14892,7 @@ define <4 x float> @test_4xfloat_zero_masked_unpack_high_mask0(<4 x float> %vec1
}
define <4 x float> @test_4xfloat_masked_unpack_high_mask1(<4 x float> %vec1, <4 x float> %vec2, <4 x float> %vec3, <4 x i32> %mask) {
; GENERIC-LABEL: test_4xfloat_masked_unpack_high_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm4, %xmm3, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} xmm2 {%k1} = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [3:1.00]
@@ -14900,7 +14900,7 @@ define <4 x float> @test_4xfloat_masked_unpack_high_mask1(<4 x float> %vec1, <4
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xfloat_masked_unpack_high_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm4, %xmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} xmm2 {%k1} = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00]
@@ -14914,14 +14914,14 @@ define <4 x float> @test_4xfloat_masked_unpack_high_mask1(<4 x float> %vec1, <4
define <4 x float> @test_4xfloat_zero_masked_unpack_high_mask1(<4 x float> %vec1, <4 x float> %vec2, <4 x i32> %mask) {
; GENERIC-LABEL: test_4xfloat_zero_masked_unpack_high_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm3, %xmm2, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} xmm0 {%k1} {z} = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xfloat_zero_masked_unpack_high_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm3, %xmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} xmm0 {%k1} {z} = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00]
@@ -14933,7 +14933,7 @@ define <4 x float> @test_4xfloat_zero_masked_unpack_high_mask1(<4 x float> %vec1
}
define <4 x float> @test_4xfloat_masked_unpack_high_mask2(<4 x float> %vec1, <4 x float> %vec2, <4 x float> %vec3, <4 x i32> %mask) {
; GENERIC-LABEL: test_4xfloat_masked_unpack_high_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm4, %xmm3, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} xmm2 {%k1} = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [3:1.00]
@@ -14941,7 +14941,7 @@ define <4 x float> @test_4xfloat_masked_unpack_high_mask2(<4 x float> %vec1, <4
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xfloat_masked_unpack_high_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm4, %xmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} xmm2 {%k1} = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00]
@@ -14955,14 +14955,14 @@ define <4 x float> @test_4xfloat_masked_unpack_high_mask2(<4 x float> %vec1, <4
define <4 x float> @test_4xfloat_zero_masked_unpack_high_mask2(<4 x float> %vec1, <4 x float> %vec2, <4 x i32> %mask) {
; GENERIC-LABEL: test_4xfloat_zero_masked_unpack_high_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm3, %xmm2, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} xmm0 {%k1} {z} = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xfloat_zero_masked_unpack_high_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm3, %xmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} xmm0 {%k1} {z} = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00]
@@ -14974,12 +14974,12 @@ define <4 x float> @test_4xfloat_zero_masked_unpack_high_mask2(<4 x float> %vec1
}
define <4 x float> @test_4xfloat_unpack_high_mask3(<4 x float> %vec1, <4 x float> %vec2) {
; GENERIC-LABEL: test_4xfloat_unpack_high_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xfloat_unpack_high_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
@@ -14987,7 +14987,7 @@ define <4 x float> @test_4xfloat_unpack_high_mask3(<4 x float> %vec1, <4 x float
}
define <4 x float> @test_4xfloat_masked_unpack_high_mask3(<4 x float> %vec1, <4 x float> %vec2, <4 x float> %vec3, <4 x i32> %mask) {
; GENERIC-LABEL: test_4xfloat_masked_unpack_high_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm4, %xmm3, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} xmm2 {%k1} = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [3:1.00]
@@ -14995,7 +14995,7 @@ define <4 x float> @test_4xfloat_masked_unpack_high_mask3(<4 x float> %vec1, <4
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xfloat_masked_unpack_high_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm4, %xmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} xmm2 {%k1} = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00]
@@ -15009,14 +15009,14 @@ define <4 x float> @test_4xfloat_masked_unpack_high_mask3(<4 x float> %vec1, <4
define <4 x float> @test_4xfloat_zero_masked_unpack_high_mask3(<4 x float> %vec1, <4 x float> %vec2, <4 x i32> %mask) {
; GENERIC-LABEL: test_4xfloat_zero_masked_unpack_high_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm3, %xmm2, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} xmm0 {%k1} {z} = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xfloat_zero_masked_unpack_high_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm3, %xmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} xmm0 {%k1} {z} = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00]
@@ -15028,12 +15028,12 @@ define <4 x float> @test_4xfloat_zero_masked_unpack_high_mask3(<4 x float> %vec1
}
define <4 x float> @test_4xfloat_unpack_high_mem_mask0(<4 x float> %vec1, <4 x float>* %vec2p) {
; GENERIC-LABEL: test_4xfloat_unpack_high_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xfloat_unpack_high_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec2 = load <4 x float>, <4 x float>* %vec2p
@@ -15042,7 +15042,7 @@ define <4 x float> @test_4xfloat_unpack_high_mem_mask0(<4 x float> %vec1, <4 x f
}
define <4 x float> @test_4xfloat_masked_unpack_high_mem_mask0(<4 x float> %vec1, <4 x float>* %vec2p, <4 x float> %vec3, <4 x i32> %mask) {
; GENERIC-LABEL: test_4xfloat_masked_unpack_high_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm3, %xmm2, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} xmm1 {%k1} = xmm0[2],mem[2],xmm0[3],mem[3] sched: [7:1.00]
@@ -15050,7 +15050,7 @@ define <4 x float> @test_4xfloat_masked_unpack_high_mem_mask0(<4 x float> %vec1,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xfloat_masked_unpack_high_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm3, %xmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} xmm1 {%k1} = xmm0[2],mem[2],xmm0[3],mem[3] sched: [7:1.00]
@@ -15065,14 +15065,14 @@ define <4 x float> @test_4xfloat_masked_unpack_high_mem_mask0(<4 x float> %vec1,
define <4 x float> @test_4xfloat_zero_masked_unpack_high_mem_mask0(<4 x float> %vec1, <4 x float>* %vec2p, <4 x i32> %mask) {
; GENERIC-LABEL: test_4xfloat_zero_masked_unpack_high_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm2, %xmm1, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} xmm0 {%k1} {z} = xmm0[2],mem[2],xmm0[3],mem[3] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xfloat_zero_masked_unpack_high_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm2, %xmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} xmm0 {%k1} {z} = xmm0[2],mem[2],xmm0[3],mem[3] sched: [7:1.00]
@@ -15086,7 +15086,7 @@ define <4 x float> @test_4xfloat_zero_masked_unpack_high_mem_mask0(<4 x float> %
define <4 x float> @test_4xfloat_masked_unpack_high_mem_mask1(<4 x float> %vec1, <4 x float>* %vec2p, <4 x float> %vec3, <4 x i32> %mask) {
; GENERIC-LABEL: test_4xfloat_masked_unpack_high_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm3, %xmm2, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} xmm1 {%k1} = xmm0[2],mem[2],xmm0[3],mem[3] sched: [7:1.00]
@@ -15094,7 +15094,7 @@ define <4 x float> @test_4xfloat_masked_unpack_high_mem_mask1(<4 x float> %vec1,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xfloat_masked_unpack_high_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm3, %xmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} xmm1 {%k1} = xmm0[2],mem[2],xmm0[3],mem[3] sched: [7:1.00]
@@ -15109,14 +15109,14 @@ define <4 x float> @test_4xfloat_masked_unpack_high_mem_mask1(<4 x float> %vec1,
define <4 x float> @test_4xfloat_zero_masked_unpack_high_mem_mask1(<4 x float> %vec1, <4 x float>* %vec2p, <4 x i32> %mask) {
; GENERIC-LABEL: test_4xfloat_zero_masked_unpack_high_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm2, %xmm1, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} xmm0 {%k1} {z} = xmm0[2],mem[2],xmm0[3],mem[3] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xfloat_zero_masked_unpack_high_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm2, %xmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} xmm0 {%k1} {z} = xmm0[2],mem[2],xmm0[3],mem[3] sched: [7:1.00]
@@ -15130,7 +15130,7 @@ define <4 x float> @test_4xfloat_zero_masked_unpack_high_mem_mask1(<4 x float> %
define <4 x float> @test_4xfloat_masked_unpack_high_mem_mask2(<4 x float> %vec1, <4 x float>* %vec2p, <4 x float> %vec3, <4 x i32> %mask) {
; GENERIC-LABEL: test_4xfloat_masked_unpack_high_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm3, %xmm2, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} xmm1 {%k1} = xmm0[2],mem[2],xmm0[3],mem[3] sched: [7:1.00]
@@ -15138,7 +15138,7 @@ define <4 x float> @test_4xfloat_masked_unpack_high_mem_mask2(<4 x float> %vec1,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xfloat_masked_unpack_high_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm3, %xmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} xmm1 {%k1} = xmm0[2],mem[2],xmm0[3],mem[3] sched: [7:1.00]
@@ -15153,14 +15153,14 @@ define <4 x float> @test_4xfloat_masked_unpack_high_mem_mask2(<4 x float> %vec1,
define <4 x float> @test_4xfloat_zero_masked_unpack_high_mem_mask2(<4 x float> %vec1, <4 x float>* %vec2p, <4 x i32> %mask) {
; GENERIC-LABEL: test_4xfloat_zero_masked_unpack_high_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm2, %xmm1, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} xmm0 {%k1} {z} = xmm0[2],mem[2],xmm0[3],mem[3] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xfloat_zero_masked_unpack_high_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm2, %xmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} xmm0 {%k1} {z} = xmm0[2],mem[2],xmm0[3],mem[3] sched: [7:1.00]
@@ -15174,12 +15174,12 @@ define <4 x float> @test_4xfloat_zero_masked_unpack_high_mem_mask2(<4 x float> %
define <4 x float> @test_4xfloat_unpack_high_mem_mask3(<4 x float> %vec1, <4 x float>* %vec2p) {
; GENERIC-LABEL: test_4xfloat_unpack_high_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xfloat_unpack_high_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec2 = load <4 x float>, <4 x float>* %vec2p
@@ -15188,7 +15188,7 @@ define <4 x float> @test_4xfloat_unpack_high_mem_mask3(<4 x float> %vec1, <4 x f
}
define <4 x float> @test_4xfloat_masked_unpack_high_mem_mask3(<4 x float> %vec1, <4 x float>* %vec2p, <4 x float> %vec3, <4 x i32> %mask) {
; GENERIC-LABEL: test_4xfloat_masked_unpack_high_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm3, %xmm2, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} xmm1 {%k1} = xmm0[2],mem[2],xmm0[3],mem[3] sched: [7:1.00]
@@ -15196,7 +15196,7 @@ define <4 x float> @test_4xfloat_masked_unpack_high_mem_mask3(<4 x float> %vec1,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xfloat_masked_unpack_high_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm3, %xmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} xmm1 {%k1} = xmm0[2],mem[2],xmm0[3],mem[3] sched: [7:1.00]
@@ -15211,14 +15211,14 @@ define <4 x float> @test_4xfloat_masked_unpack_high_mem_mask3(<4 x float> %vec1,
define <4 x float> @test_4xfloat_zero_masked_unpack_high_mem_mask3(<4 x float> %vec1, <4 x float>* %vec2p, <4 x i32> %mask) {
; GENERIC-LABEL: test_4xfloat_zero_masked_unpack_high_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %xmm2, %xmm1, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} xmm0 {%k1} {z} = xmm0[2],mem[2],xmm0[3],mem[3] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xfloat_zero_masked_unpack_high_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %xmm2, %xmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} xmm0 {%k1} {z} = xmm0[2],mem[2],xmm0[3],mem[3] sched: [7:1.00]
@@ -15232,12 +15232,12 @@ define <4 x float> @test_4xfloat_zero_masked_unpack_high_mem_mask3(<4 x float> %
define <8 x float> @test_8xfloat_unpack_high_mask0(<8 x float> %vec1, <8 x float> %vec2) {
; GENERIC-LABEL: test_8xfloat_unpack_high_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_unpack_high_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
@@ -15245,7 +15245,7 @@ define <8 x float> @test_8xfloat_unpack_high_mask0(<8 x float> %vec1, <8 x float
}
define <8 x float> @test_8xfloat_masked_unpack_high_mask0(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %vec3, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xfloat_masked_unpack_high_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm4, %ymm3, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} ymm2 {%k1} = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [3:1.00]
@@ -15253,7 +15253,7 @@ define <8 x float> @test_8xfloat_masked_unpack_high_mask0(<8 x float> %vec1, <8
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_masked_unpack_high_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm4, %ymm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} ymm2 {%k1} = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:1.00]
@@ -15267,14 +15267,14 @@ define <8 x float> @test_8xfloat_masked_unpack_high_mask0(<8 x float> %vec1, <8
define <8 x float> @test_8xfloat_zero_masked_unpack_high_mask0(<8 x float> %vec1, <8 x float> %vec2, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xfloat_zero_masked_unpack_high_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} ymm0 {%k1} {z} = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_zero_masked_unpack_high_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} ymm0 {%k1} {z} = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:1.00]
@@ -15286,7 +15286,7 @@ define <8 x float> @test_8xfloat_zero_masked_unpack_high_mask0(<8 x float> %vec1
}
define <8 x float> @test_8xfloat_masked_unpack_high_mask1(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %vec3, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xfloat_masked_unpack_high_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm4, %ymm3, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} ymm2 {%k1} = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [3:1.00]
@@ -15294,7 +15294,7 @@ define <8 x float> @test_8xfloat_masked_unpack_high_mask1(<8 x float> %vec1, <8
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_masked_unpack_high_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm4, %ymm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} ymm2 {%k1} = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:1.00]
@@ -15308,14 +15308,14 @@ define <8 x float> @test_8xfloat_masked_unpack_high_mask1(<8 x float> %vec1, <8
define <8 x float> @test_8xfloat_zero_masked_unpack_high_mask1(<8 x float> %vec1, <8 x float> %vec2, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xfloat_zero_masked_unpack_high_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} ymm0 {%k1} {z} = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_zero_masked_unpack_high_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} ymm0 {%k1} {z} = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:1.00]
@@ -15327,7 +15327,7 @@ define <8 x float> @test_8xfloat_zero_masked_unpack_high_mask1(<8 x float> %vec1
}
define <8 x float> @test_8xfloat_masked_unpack_high_mask2(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %vec3, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xfloat_masked_unpack_high_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm4, %ymm3, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} ymm2 {%k1} = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [3:1.00]
@@ -15335,7 +15335,7 @@ define <8 x float> @test_8xfloat_masked_unpack_high_mask2(<8 x float> %vec1, <8
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_masked_unpack_high_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm4, %ymm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} ymm2 {%k1} = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:1.00]
@@ -15349,14 +15349,14 @@ define <8 x float> @test_8xfloat_masked_unpack_high_mask2(<8 x float> %vec1, <8
define <8 x float> @test_8xfloat_zero_masked_unpack_high_mask2(<8 x float> %vec1, <8 x float> %vec2, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xfloat_zero_masked_unpack_high_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} ymm0 {%k1} {z} = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_zero_masked_unpack_high_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} ymm0 {%k1} {z} = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:1.00]
@@ -15368,12 +15368,12 @@ define <8 x float> @test_8xfloat_zero_masked_unpack_high_mask2(<8 x float> %vec1
}
define <8 x float> @test_8xfloat_unpack_high_mask3(<8 x float> %vec1, <8 x float> %vec2) {
; GENERIC-LABEL: test_8xfloat_unpack_high_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_unpack_high_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
@@ -15381,7 +15381,7 @@ define <8 x float> @test_8xfloat_unpack_high_mask3(<8 x float> %vec1, <8 x float
}
define <8 x float> @test_8xfloat_masked_unpack_high_mask3(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %vec3, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xfloat_masked_unpack_high_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm4, %ymm3, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} ymm2 {%k1} = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [3:1.00]
@@ -15389,7 +15389,7 @@ define <8 x float> @test_8xfloat_masked_unpack_high_mask3(<8 x float> %vec1, <8
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_masked_unpack_high_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm4, %ymm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} ymm2 {%k1} = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:1.00]
@@ -15403,14 +15403,14 @@ define <8 x float> @test_8xfloat_masked_unpack_high_mask3(<8 x float> %vec1, <8
define <8 x float> @test_8xfloat_zero_masked_unpack_high_mask3(<8 x float> %vec1, <8 x float> %vec2, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xfloat_zero_masked_unpack_high_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} ymm0 {%k1} {z} = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_zero_masked_unpack_high_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} ymm0 {%k1} {z} = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:1.00]
@@ -15422,12 +15422,12 @@ define <8 x float> @test_8xfloat_zero_masked_unpack_high_mask3(<8 x float> %vec1
}
define <8 x float> @test_8xfloat_unpack_high_mem_mask0(<8 x float> %vec1, <8 x float>* %vec2p) {
; GENERIC-LABEL: test_8xfloat_unpack_high_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [8:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_unpack_high_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec2 = load <8 x float>, <8 x float>* %vec2p
@@ -15436,7 +15436,7 @@ define <8 x float> @test_8xfloat_unpack_high_mem_mask0(<8 x float> %vec1, <8 x f
}
define <8 x float> @test_8xfloat_masked_unpack_high_mem_mask0(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %vec3, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xfloat_masked_unpack_high_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} ymm1 {%k1} = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [7:1.00]
@@ -15444,7 +15444,7 @@ define <8 x float> @test_8xfloat_masked_unpack_high_mem_mask0(<8 x float> %vec1,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_masked_unpack_high_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} ymm1 {%k1} = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [8:1.00]
@@ -15459,14 +15459,14 @@ define <8 x float> @test_8xfloat_masked_unpack_high_mem_mask0(<8 x float> %vec1,
define <8 x float> @test_8xfloat_zero_masked_unpack_high_mem_mask0(<8 x float> %vec1, <8 x float>* %vec2p, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xfloat_zero_masked_unpack_high_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} ymm0 {%k1} {z} = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_zero_masked_unpack_high_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} ymm0 {%k1} {z} = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [8:1.00]
@@ -15480,7 +15480,7 @@ define <8 x float> @test_8xfloat_zero_masked_unpack_high_mem_mask0(<8 x float> %
define <8 x float> @test_8xfloat_masked_unpack_high_mem_mask1(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %vec3, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xfloat_masked_unpack_high_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} ymm1 {%k1} = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [7:1.00]
@@ -15488,7 +15488,7 @@ define <8 x float> @test_8xfloat_masked_unpack_high_mem_mask1(<8 x float> %vec1,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_masked_unpack_high_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} ymm1 {%k1} = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [8:1.00]
@@ -15503,14 +15503,14 @@ define <8 x float> @test_8xfloat_masked_unpack_high_mem_mask1(<8 x float> %vec1,
define <8 x float> @test_8xfloat_zero_masked_unpack_high_mem_mask1(<8 x float> %vec1, <8 x float>* %vec2p, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xfloat_zero_masked_unpack_high_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} ymm0 {%k1} {z} = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_zero_masked_unpack_high_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} ymm0 {%k1} {z} = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [8:1.00]
@@ -15524,7 +15524,7 @@ define <8 x float> @test_8xfloat_zero_masked_unpack_high_mem_mask1(<8 x float> %
define <8 x float> @test_8xfloat_masked_unpack_high_mem_mask2(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %vec3, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xfloat_masked_unpack_high_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} ymm1 {%k1} = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [7:1.00]
@@ -15532,7 +15532,7 @@ define <8 x float> @test_8xfloat_masked_unpack_high_mem_mask2(<8 x float> %vec1,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_masked_unpack_high_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} ymm1 {%k1} = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [8:1.00]
@@ -15547,14 +15547,14 @@ define <8 x float> @test_8xfloat_masked_unpack_high_mem_mask2(<8 x float> %vec1,
define <8 x float> @test_8xfloat_zero_masked_unpack_high_mem_mask2(<8 x float> %vec1, <8 x float>* %vec2p, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xfloat_zero_masked_unpack_high_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} ymm0 {%k1} {z} = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_zero_masked_unpack_high_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} ymm0 {%k1} {z} = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [8:1.00]
@@ -15568,12 +15568,12 @@ define <8 x float> @test_8xfloat_zero_masked_unpack_high_mem_mask2(<8 x float> %
define <8 x float> @test_8xfloat_unpack_high_mem_mask3(<8 x float> %vec1, <8 x float>* %vec2p) {
; GENERIC-LABEL: test_8xfloat_unpack_high_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [8:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_unpack_high_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec2 = load <8 x float>, <8 x float>* %vec2p
@@ -15582,7 +15582,7 @@ define <8 x float> @test_8xfloat_unpack_high_mem_mask3(<8 x float> %vec1, <8 x f
}
define <8 x float> @test_8xfloat_masked_unpack_high_mem_mask3(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %vec3, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xfloat_masked_unpack_high_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} ymm1 {%k1} = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [7:1.00]
@@ -15590,7 +15590,7 @@ define <8 x float> @test_8xfloat_masked_unpack_high_mem_mask3(<8 x float> %vec1,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_masked_unpack_high_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} ymm1 {%k1} = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [8:1.00]
@@ -15605,14 +15605,14 @@ define <8 x float> @test_8xfloat_masked_unpack_high_mem_mask3(<8 x float> %vec1,
define <8 x float> @test_8xfloat_zero_masked_unpack_high_mem_mask3(<8 x float> %vec1, <8 x float>* %vec2p, <8 x i32> %mask) {
; GENERIC-LABEL: test_8xfloat_zero_masked_unpack_high_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} ymm0 {%k1} {z} = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xfloat_zero_masked_unpack_high_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} ymm0 {%k1} {z} = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [8:1.00]
@@ -15626,12 +15626,12 @@ define <8 x float> @test_8xfloat_zero_masked_unpack_high_mem_mask3(<8 x float> %
define <16 x float> @test_16xfloat_unpack_high_mask0(<16 x float> %vec1, <16 x float> %vec2) {
; GENERIC-LABEL: test_16xfloat_unpack_high_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpckhps {{.*#+}} zmm0 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_unpack_high_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpckhps {{.*#+}} zmm0 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15] sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> <i32 2, i32 18, i32 3, i32 19, i32 6, i32 22, i32 7, i32 23, i32 10, i32 26, i32 11, i32 27, i32 14, i32 30, i32 15, i32 31>
@@ -15639,7 +15639,7 @@ define <16 x float> @test_16xfloat_unpack_high_mask0(<16 x float> %vec1, <16 x f
}
define <16 x float> @test_16xfloat_masked_unpack_high_mask0(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %vec3, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_masked_unpack_high_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm4, %zmm3, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} zmm2 {%k1} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15] sched: [3:1.00]
@@ -15647,7 +15647,7 @@ define <16 x float> @test_16xfloat_masked_unpack_high_mask0(<16 x float> %vec1,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_masked_unpack_high_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm4, %zmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} zmm2 {%k1} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15] sched: [1:1.00]
@@ -15661,14 +15661,14 @@ define <16 x float> @test_16xfloat_masked_unpack_high_mask0(<16 x float> %vec1,
define <16 x float> @test_16xfloat_zero_masked_unpack_high_mask0(<16 x float> %vec1, <16 x float> %vec2, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_zero_masked_unpack_high_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} zmm0 {%k1} {z} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_zero_masked_unpack_high_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} zmm0 {%k1} {z} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15] sched: [1:1.00]
@@ -15680,7 +15680,7 @@ define <16 x float> @test_16xfloat_zero_masked_unpack_high_mask0(<16 x float> %v
}
define <16 x float> @test_16xfloat_masked_unpack_high_mask1(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %vec3, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_masked_unpack_high_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm4, %zmm3, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} zmm2 {%k1} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15] sched: [3:1.00]
@@ -15688,7 +15688,7 @@ define <16 x float> @test_16xfloat_masked_unpack_high_mask1(<16 x float> %vec1,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_masked_unpack_high_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm4, %zmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} zmm2 {%k1} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15] sched: [1:1.00]
@@ -15702,14 +15702,14 @@ define <16 x float> @test_16xfloat_masked_unpack_high_mask1(<16 x float> %vec1,
define <16 x float> @test_16xfloat_zero_masked_unpack_high_mask1(<16 x float> %vec1, <16 x float> %vec2, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_zero_masked_unpack_high_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} zmm0 {%k1} {z} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_zero_masked_unpack_high_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} zmm0 {%k1} {z} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15] sched: [1:1.00]
@@ -15721,7 +15721,7 @@ define <16 x float> @test_16xfloat_zero_masked_unpack_high_mask1(<16 x float> %v
}
define <16 x float> @test_16xfloat_masked_unpack_high_mask2(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %vec3, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_masked_unpack_high_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm4, %zmm3, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} zmm2 {%k1} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15] sched: [3:1.00]
@@ -15729,7 +15729,7 @@ define <16 x float> @test_16xfloat_masked_unpack_high_mask2(<16 x float> %vec1,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_masked_unpack_high_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm4, %zmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} zmm2 {%k1} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15] sched: [1:1.00]
@@ -15743,14 +15743,14 @@ define <16 x float> @test_16xfloat_masked_unpack_high_mask2(<16 x float> %vec1,
define <16 x float> @test_16xfloat_zero_masked_unpack_high_mask2(<16 x float> %vec1, <16 x float> %vec2, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_zero_masked_unpack_high_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} zmm0 {%k1} {z} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_zero_masked_unpack_high_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} zmm0 {%k1} {z} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15] sched: [1:1.00]
@@ -15762,12 +15762,12 @@ define <16 x float> @test_16xfloat_zero_masked_unpack_high_mask2(<16 x float> %v
}
define <16 x float> @test_16xfloat_unpack_high_mask3(<16 x float> %vec1, <16 x float> %vec2) {
; GENERIC-LABEL: test_16xfloat_unpack_high_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpckhps {{.*#+}} zmm0 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_unpack_high_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpckhps {{.*#+}} zmm0 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15] sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> <i32 2, i32 18, i32 3, i32 19, i32 6, i32 22, i32 7, i32 23, i32 10, i32 26, i32 11, i32 27, i32 14, i32 30, i32 15, i32 31>
@@ -15775,7 +15775,7 @@ define <16 x float> @test_16xfloat_unpack_high_mask3(<16 x float> %vec1, <16 x f
}
define <16 x float> @test_16xfloat_masked_unpack_high_mask3(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %vec3, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_masked_unpack_high_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm4, %zmm3, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} zmm2 {%k1} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15] sched: [3:1.00]
@@ -15783,7 +15783,7 @@ define <16 x float> @test_16xfloat_masked_unpack_high_mask3(<16 x float> %vec1,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_masked_unpack_high_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm4, %zmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} zmm2 {%k1} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15] sched: [1:1.00]
@@ -15797,14 +15797,14 @@ define <16 x float> @test_16xfloat_masked_unpack_high_mask3(<16 x float> %vec1,
define <16 x float> @test_16xfloat_zero_masked_unpack_high_mask3(<16 x float> %vec1, <16 x float> %vec2, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_zero_masked_unpack_high_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} zmm0 {%k1} {z} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_zero_masked_unpack_high_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} zmm0 {%k1} {z} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15] sched: [1:1.00]
@@ -15816,12 +15816,12 @@ define <16 x float> @test_16xfloat_zero_masked_unpack_high_mask3(<16 x float> %v
}
define <16 x float> @test_16xfloat_unpack_high_mem_mask0(<16 x float> %vec1, <16 x float>* %vec2p) {
; GENERIC-LABEL: test_16xfloat_unpack_high_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpckhps {{.*#+}} zmm0 = zmm0[2],mem[2],zmm0[3],mem[3],zmm0[6],mem[6],zmm0[7],mem[7],zmm0[10],mem[10],zmm0[11],mem[11],zmm0[14],mem[14],zmm0[15],mem[15] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_unpack_high_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpckhps {{.*#+}} zmm0 = zmm0[2],mem[2],zmm0[3],mem[3],zmm0[6],mem[6],zmm0[7],mem[7],zmm0[10],mem[10],zmm0[11],mem[11],zmm0[14],mem[14],zmm0[15],mem[15] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec2 = load <16 x float>, <16 x float>* %vec2p
@@ -15830,7 +15830,7 @@ define <16 x float> @test_16xfloat_unpack_high_mem_mask0(<16 x float> %vec1, <16
}
define <16 x float> @test_16xfloat_masked_unpack_high_mem_mask0(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %vec3, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_masked_unpack_high_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} zmm1 {%k1} = zmm0[2],mem[2],zmm0[3],mem[3],zmm0[6],mem[6],zmm0[7],mem[7],zmm0[10],mem[10],zmm0[11],mem[11],zmm0[14],mem[14],zmm0[15],mem[15] sched: [7:1.00]
@@ -15838,7 +15838,7 @@ define <16 x float> @test_16xfloat_masked_unpack_high_mem_mask0(<16 x float> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_masked_unpack_high_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} zmm1 {%k1} = zmm0[2],mem[2],zmm0[3],mem[3],zmm0[6],mem[6],zmm0[7],mem[7],zmm0[10],mem[10],zmm0[11],mem[11],zmm0[14],mem[14],zmm0[15],mem[15] sched: [8:1.00]
@@ -15853,14 +15853,14 @@ define <16 x float> @test_16xfloat_masked_unpack_high_mem_mask0(<16 x float> %ve
define <16 x float> @test_16xfloat_zero_masked_unpack_high_mem_mask0(<16 x float> %vec1, <16 x float>* %vec2p, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_zero_masked_unpack_high_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} zmm0 {%k1} {z} = zmm0[2],mem[2],zmm0[3],mem[3],zmm0[6],mem[6],zmm0[7],mem[7],zmm0[10],mem[10],zmm0[11],mem[11],zmm0[14],mem[14],zmm0[15],mem[15] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_zero_masked_unpack_high_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} zmm0 {%k1} {z} = zmm0[2],mem[2],zmm0[3],mem[3],zmm0[6],mem[6],zmm0[7],mem[7],zmm0[10],mem[10],zmm0[11],mem[11],zmm0[14],mem[14],zmm0[15],mem[15] sched: [8:1.00]
@@ -15874,7 +15874,7 @@ define <16 x float> @test_16xfloat_zero_masked_unpack_high_mem_mask0(<16 x float
define <16 x float> @test_16xfloat_masked_unpack_high_mem_mask1(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %vec3, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_masked_unpack_high_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} zmm1 {%k1} = zmm0[2],mem[2],zmm0[3],mem[3],zmm0[6],mem[6],zmm0[7],mem[7],zmm0[10],mem[10],zmm0[11],mem[11],zmm0[14],mem[14],zmm0[15],mem[15] sched: [7:1.00]
@@ -15882,7 +15882,7 @@ define <16 x float> @test_16xfloat_masked_unpack_high_mem_mask1(<16 x float> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_masked_unpack_high_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} zmm1 {%k1} = zmm0[2],mem[2],zmm0[3],mem[3],zmm0[6],mem[6],zmm0[7],mem[7],zmm0[10],mem[10],zmm0[11],mem[11],zmm0[14],mem[14],zmm0[15],mem[15] sched: [8:1.00]
@@ -15897,14 +15897,14 @@ define <16 x float> @test_16xfloat_masked_unpack_high_mem_mask1(<16 x float> %ve
define <16 x float> @test_16xfloat_zero_masked_unpack_high_mem_mask1(<16 x float> %vec1, <16 x float>* %vec2p, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_zero_masked_unpack_high_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} zmm0 {%k1} {z} = zmm0[2],mem[2],zmm0[3],mem[3],zmm0[6],mem[6],zmm0[7],mem[7],zmm0[10],mem[10],zmm0[11],mem[11],zmm0[14],mem[14],zmm0[15],mem[15] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_zero_masked_unpack_high_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} zmm0 {%k1} {z} = zmm0[2],mem[2],zmm0[3],mem[3],zmm0[6],mem[6],zmm0[7],mem[7],zmm0[10],mem[10],zmm0[11],mem[11],zmm0[14],mem[14],zmm0[15],mem[15] sched: [8:1.00]
@@ -15918,7 +15918,7 @@ define <16 x float> @test_16xfloat_zero_masked_unpack_high_mem_mask1(<16 x float
define <16 x float> @test_16xfloat_masked_unpack_high_mem_mask2(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %vec3, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_masked_unpack_high_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} zmm1 {%k1} = zmm0[2],mem[2],zmm0[3],mem[3],zmm0[6],mem[6],zmm0[7],mem[7],zmm0[10],mem[10],zmm0[11],mem[11],zmm0[14],mem[14],zmm0[15],mem[15] sched: [7:1.00]
@@ -15926,7 +15926,7 @@ define <16 x float> @test_16xfloat_masked_unpack_high_mem_mask2(<16 x float> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_masked_unpack_high_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} zmm1 {%k1} = zmm0[2],mem[2],zmm0[3],mem[3],zmm0[6],mem[6],zmm0[7],mem[7],zmm0[10],mem[10],zmm0[11],mem[11],zmm0[14],mem[14],zmm0[15],mem[15] sched: [8:1.00]
@@ -15941,14 +15941,14 @@ define <16 x float> @test_16xfloat_masked_unpack_high_mem_mask2(<16 x float> %ve
define <16 x float> @test_16xfloat_zero_masked_unpack_high_mem_mask2(<16 x float> %vec1, <16 x float>* %vec2p, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_zero_masked_unpack_high_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} zmm0 {%k1} {z} = zmm0[2],mem[2],zmm0[3],mem[3],zmm0[6],mem[6],zmm0[7],mem[7],zmm0[10],mem[10],zmm0[11],mem[11],zmm0[14],mem[14],zmm0[15],mem[15] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_zero_masked_unpack_high_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} zmm0 {%k1} {z} = zmm0[2],mem[2],zmm0[3],mem[3],zmm0[6],mem[6],zmm0[7],mem[7],zmm0[10],mem[10],zmm0[11],mem[11],zmm0[14],mem[14],zmm0[15],mem[15] sched: [8:1.00]
@@ -15962,12 +15962,12 @@ define <16 x float> @test_16xfloat_zero_masked_unpack_high_mem_mask2(<16 x float
define <16 x float> @test_16xfloat_unpack_high_mem_mask3(<16 x float> %vec1, <16 x float>* %vec2p) {
; GENERIC-LABEL: test_16xfloat_unpack_high_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpckhps {{.*#+}} zmm0 = zmm0[2],mem[2],zmm0[3],mem[3],zmm0[6],mem[6],zmm0[7],mem[7],zmm0[10],mem[10],zmm0[11],mem[11],zmm0[14],mem[14],zmm0[15],mem[15] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_unpack_high_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpckhps {{.*#+}} zmm0 = zmm0[2],mem[2],zmm0[3],mem[3],zmm0[6],mem[6],zmm0[7],mem[7],zmm0[10],mem[10],zmm0[11],mem[11],zmm0[14],mem[14],zmm0[15],mem[15] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec2 = load <16 x float>, <16 x float>* %vec2p
@@ -15976,7 +15976,7 @@ define <16 x float> @test_16xfloat_unpack_high_mem_mask3(<16 x float> %vec1, <16
}
define <16 x float> @test_16xfloat_masked_unpack_high_mem_mask3(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %vec3, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_masked_unpack_high_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} zmm1 {%k1} = zmm0[2],mem[2],zmm0[3],mem[3],zmm0[6],mem[6],zmm0[7],mem[7],zmm0[10],mem[10],zmm0[11],mem[11],zmm0[14],mem[14],zmm0[15],mem[15] sched: [7:1.00]
@@ -15984,7 +15984,7 @@ define <16 x float> @test_16xfloat_masked_unpack_high_mem_mask3(<16 x float> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_masked_unpack_high_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} zmm1 {%k1} = zmm0[2],mem[2],zmm0[3],mem[3],zmm0[6],mem[6],zmm0[7],mem[7],zmm0[10],mem[10],zmm0[11],mem[11],zmm0[14],mem[14],zmm0[15],mem[15] sched: [8:1.00]
@@ -15999,14 +15999,14 @@ define <16 x float> @test_16xfloat_masked_unpack_high_mem_mask3(<16 x float> %ve
define <16 x float> @test_16xfloat_zero_masked_unpack_high_mem_mask3(<16 x float> %vec1, <16 x float>* %vec2p, <16 x i32> %mask) {
; GENERIC-LABEL: test_16xfloat_zero_masked_unpack_high_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; GENERIC-NEXT: vunpckhps {{.*#+}} zmm0 {%k1} {z} = zmm0[2],mem[2],zmm0[3],mem[3],zmm0[6],mem[6],zmm0[7],mem[7],zmm0[10],mem[10],zmm0[11],mem[11],zmm0[14],mem[14],zmm0[15],mem[15] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_16xfloat_zero_masked_unpack_high_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqd %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} zmm0 {%k1} {z} = zmm0[2],mem[2],zmm0[3],mem[3],zmm0[6],mem[6],zmm0[7],mem[7],zmm0[10],mem[10],zmm0[11],mem[11],zmm0[14],mem[14],zmm0[15],mem[15] sched: [8:1.00]
@@ -16020,12 +16020,12 @@ define <16 x float> @test_16xfloat_zero_masked_unpack_high_mem_mask3(<16 x float
define <2 x double> @test_2xdouble_unpack_high_mask0(<2 x double> %vec1, <2 x double> %vec2) {
; GENERIC-LABEL: test_2xdouble_unpack_high_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_2xdouble_unpack_high_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <2 x double> %vec1, <2 x double> %vec2, <2 x i32> <i32 1, i32 3>
@@ -16033,7 +16033,7 @@ define <2 x double> @test_2xdouble_unpack_high_mask0(<2 x double> %vec1, <2 x do
}
define <2 x double> @test_2xdouble_masked_unpack_high_mask0(<2 x double> %vec1, <2 x double> %vec2, <2 x double> %vec3, <2 x i64> %mask) {
; GENERIC-LABEL: test_2xdouble_masked_unpack_high_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %xmm4, %xmm3, %k1
; GENERIC-NEXT: vunpckhpd {{.*#+}} xmm2 {%k1} = xmm0[1],xmm1[1] sched: [3:1.00]
@@ -16041,7 +16041,7 @@ define <2 x double> @test_2xdouble_masked_unpack_high_mask0(<2 x double> %vec1,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_2xdouble_masked_unpack_high_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %xmm4, %xmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhpd {{.*#+}} xmm2 {%k1} = xmm0[1],xmm1[1] sched: [1:1.00]
@@ -16055,14 +16055,14 @@ define <2 x double> @test_2xdouble_masked_unpack_high_mask0(<2 x double> %vec1,
define <2 x double> @test_2xdouble_zero_masked_unpack_high_mask0(<2 x double> %vec1, <2 x double> %vec2, <2 x i64> %mask) {
; GENERIC-LABEL: test_2xdouble_zero_masked_unpack_high_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %xmm3, %xmm2, %k1
; GENERIC-NEXT: vunpckhpd {{.*#+}} xmm0 {%k1} {z} = xmm0[1],xmm1[1] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_2xdouble_zero_masked_unpack_high_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %xmm3, %xmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhpd {{.*#+}} xmm0 {%k1} {z} = xmm0[1],xmm1[1] sched: [1:1.00]
@@ -16074,7 +16074,7 @@ define <2 x double> @test_2xdouble_zero_masked_unpack_high_mask0(<2 x double> %v
}
define <2 x double> @test_2xdouble_masked_unpack_high_mask1(<2 x double> %vec1, <2 x double> %vec2, <2 x double> %vec3, <2 x i64> %mask) {
; GENERIC-LABEL: test_2xdouble_masked_unpack_high_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %xmm4, %xmm3, %k1
; GENERIC-NEXT: vunpckhpd {{.*#+}} xmm2 {%k1} = xmm0[1],xmm1[1] sched: [3:1.00]
@@ -16082,7 +16082,7 @@ define <2 x double> @test_2xdouble_masked_unpack_high_mask1(<2 x double> %vec1,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_2xdouble_masked_unpack_high_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %xmm4, %xmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhpd {{.*#+}} xmm2 {%k1} = xmm0[1],xmm1[1] sched: [1:1.00]
@@ -16096,14 +16096,14 @@ define <2 x double> @test_2xdouble_masked_unpack_high_mask1(<2 x double> %vec1,
define <2 x double> @test_2xdouble_zero_masked_unpack_high_mask1(<2 x double> %vec1, <2 x double> %vec2, <2 x i64> %mask) {
; GENERIC-LABEL: test_2xdouble_zero_masked_unpack_high_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %xmm3, %xmm2, %k1
; GENERIC-NEXT: vunpckhpd {{.*#+}} xmm0 {%k1} {z} = xmm0[1],xmm1[1] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_2xdouble_zero_masked_unpack_high_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %xmm3, %xmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhpd {{.*#+}} xmm0 {%k1} {z} = xmm0[1],xmm1[1] sched: [1:1.00]
@@ -16115,12 +16115,12 @@ define <2 x double> @test_2xdouble_zero_masked_unpack_high_mask1(<2 x double> %v
}
define <2 x double> @test_2xdouble_unpack_high_mem_mask0(<2 x double> %vec1, <2 x double>* %vec2p) {
; GENERIC-LABEL: test_2xdouble_unpack_high_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],mem[1] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_2xdouble_unpack_high_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],mem[1] sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec2 = load <2 x double>, <2 x double>* %vec2p
@@ -16129,7 +16129,7 @@ define <2 x double> @test_2xdouble_unpack_high_mem_mask0(<2 x double> %vec1, <2
}
define <2 x double> @test_2xdouble_masked_unpack_high_mem_mask0(<2 x double> %vec1, <2 x double>* %vec2p, <2 x double> %vec3, <2 x i64> %mask) {
; GENERIC-LABEL: test_2xdouble_masked_unpack_high_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %xmm3, %xmm2, %k1
; GENERIC-NEXT: vunpckhpd {{.*#+}} xmm1 {%k1} = xmm0[1],mem[1] sched: [7:1.00]
@@ -16137,7 +16137,7 @@ define <2 x double> @test_2xdouble_masked_unpack_high_mem_mask0(<2 x double> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_2xdouble_masked_unpack_high_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %xmm3, %xmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhpd {{.*#+}} xmm1 {%k1} = xmm0[1],mem[1] sched: [7:1.00]
@@ -16152,14 +16152,14 @@ define <2 x double> @test_2xdouble_masked_unpack_high_mem_mask0(<2 x double> %ve
define <2 x double> @test_2xdouble_zero_masked_unpack_high_mem_mask0(<2 x double> %vec1, <2 x double>* %vec2p, <2 x i64> %mask) {
; GENERIC-LABEL: test_2xdouble_zero_masked_unpack_high_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %xmm2, %xmm1, %k1
; GENERIC-NEXT: vunpckhpd {{.*#+}} xmm0 {%k1} {z} = xmm0[1],mem[1] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_2xdouble_zero_masked_unpack_high_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %xmm2, %xmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhpd {{.*#+}} xmm0 {%k1} {z} = xmm0[1],mem[1] sched: [7:1.00]
@@ -16173,7 +16173,7 @@ define <2 x double> @test_2xdouble_zero_masked_unpack_high_mem_mask0(<2 x double
define <2 x double> @test_2xdouble_masked_unpack_high_mem_mask1(<2 x double> %vec1, <2 x double>* %vec2p, <2 x double> %vec3, <2 x i64> %mask) {
; GENERIC-LABEL: test_2xdouble_masked_unpack_high_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %xmm3, %xmm2, %k1
; GENERIC-NEXT: vunpckhpd {{.*#+}} xmm1 {%k1} = xmm0[1],mem[1] sched: [7:1.00]
@@ -16181,7 +16181,7 @@ define <2 x double> @test_2xdouble_masked_unpack_high_mem_mask1(<2 x double> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_2xdouble_masked_unpack_high_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %xmm3, %xmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhpd {{.*#+}} xmm1 {%k1} = xmm0[1],mem[1] sched: [7:1.00]
@@ -16196,14 +16196,14 @@ define <2 x double> @test_2xdouble_masked_unpack_high_mem_mask1(<2 x double> %ve
define <2 x double> @test_2xdouble_zero_masked_unpack_high_mem_mask1(<2 x double> %vec1, <2 x double>* %vec2p, <2 x i64> %mask) {
; GENERIC-LABEL: test_2xdouble_zero_masked_unpack_high_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %xmm2, %xmm1, %k1
; GENERIC-NEXT: vunpckhpd {{.*#+}} xmm0 {%k1} {z} = xmm0[1],mem[1] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_2xdouble_zero_masked_unpack_high_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %xmm2, %xmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhpd {{.*#+}} xmm0 {%k1} {z} = xmm0[1],mem[1] sched: [7:1.00]
@@ -16217,12 +16217,12 @@ define <2 x double> @test_2xdouble_zero_masked_unpack_high_mem_mask1(<2 x double
define <4 x double> @test_4xdouble_unpack_high_mask0(<4 x double> %vec1, <4 x double> %vec2) {
; GENERIC-LABEL: test_4xdouble_unpack_high_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_unpack_high_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
@@ -16230,7 +16230,7 @@ define <4 x double> @test_4xdouble_unpack_high_mask0(<4 x double> %vec1, <4 x do
}
define <4 x double> @test_4xdouble_masked_unpack_high_mask0(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %vec3, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_masked_unpack_high_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm4, %ymm3, %k1
; GENERIC-NEXT: vunpckhpd {{.*#+}} ymm2 {%k1} = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [3:1.00]
@@ -16238,7 +16238,7 @@ define <4 x double> @test_4xdouble_masked_unpack_high_mask0(<4 x double> %vec1,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_masked_unpack_high_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm4, %ymm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhpd {{.*#+}} ymm2 {%k1} = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:1.00]
@@ -16252,14 +16252,14 @@ define <4 x double> @test_4xdouble_masked_unpack_high_mask0(<4 x double> %vec1,
define <4 x double> @test_4xdouble_zero_masked_unpack_high_mask0(<4 x double> %vec1, <4 x double> %vec2, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_zero_masked_unpack_high_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; GENERIC-NEXT: vunpckhpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_zero_masked_unpack_high_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:1.00]
@@ -16271,7 +16271,7 @@ define <4 x double> @test_4xdouble_zero_masked_unpack_high_mask0(<4 x double> %v
}
define <4 x double> @test_4xdouble_masked_unpack_high_mask1(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %vec3, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_masked_unpack_high_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm4, %ymm3, %k1
; GENERIC-NEXT: vunpckhpd {{.*#+}} ymm2 {%k1} = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [3:1.00]
@@ -16279,7 +16279,7 @@ define <4 x double> @test_4xdouble_masked_unpack_high_mask1(<4 x double> %vec1,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_masked_unpack_high_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm4, %ymm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhpd {{.*#+}} ymm2 {%k1} = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:1.00]
@@ -16293,14 +16293,14 @@ define <4 x double> @test_4xdouble_masked_unpack_high_mask1(<4 x double> %vec1,
define <4 x double> @test_4xdouble_zero_masked_unpack_high_mask1(<4 x double> %vec1, <4 x double> %vec2, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_zero_masked_unpack_high_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; GENERIC-NEXT: vunpckhpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_zero_masked_unpack_high_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:1.00]
@@ -16312,7 +16312,7 @@ define <4 x double> @test_4xdouble_zero_masked_unpack_high_mask1(<4 x double> %v
}
define <4 x double> @test_4xdouble_masked_unpack_high_mask2(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %vec3, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_masked_unpack_high_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm4, %ymm3, %k1
; GENERIC-NEXT: vunpckhpd {{.*#+}} ymm2 {%k1} = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [3:1.00]
@@ -16320,7 +16320,7 @@ define <4 x double> @test_4xdouble_masked_unpack_high_mask2(<4 x double> %vec1,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_masked_unpack_high_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm4, %ymm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhpd {{.*#+}} ymm2 {%k1} = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:1.00]
@@ -16334,14 +16334,14 @@ define <4 x double> @test_4xdouble_masked_unpack_high_mask2(<4 x double> %vec1,
define <4 x double> @test_4xdouble_zero_masked_unpack_high_mask2(<4 x double> %vec1, <4 x double> %vec2, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_zero_masked_unpack_high_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; GENERIC-NEXT: vunpckhpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_zero_masked_unpack_high_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:1.00]
@@ -16353,12 +16353,12 @@ define <4 x double> @test_4xdouble_zero_masked_unpack_high_mask2(<4 x double> %v
}
define <4 x double> @test_4xdouble_unpack_high_mask3(<4 x double> %vec1, <4 x double> %vec2) {
; GENERIC-LABEL: test_4xdouble_unpack_high_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_unpack_high_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
@@ -16366,7 +16366,7 @@ define <4 x double> @test_4xdouble_unpack_high_mask3(<4 x double> %vec1, <4 x do
}
define <4 x double> @test_4xdouble_masked_unpack_high_mask3(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %vec3, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_masked_unpack_high_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm4, %ymm3, %k1
; GENERIC-NEXT: vunpckhpd {{.*#+}} ymm2 {%k1} = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [3:1.00]
@@ -16374,7 +16374,7 @@ define <4 x double> @test_4xdouble_masked_unpack_high_mask3(<4 x double> %vec1,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_masked_unpack_high_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm4, %ymm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhpd {{.*#+}} ymm2 {%k1} = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:1.00]
@@ -16388,14 +16388,14 @@ define <4 x double> @test_4xdouble_masked_unpack_high_mask3(<4 x double> %vec1,
define <4 x double> @test_4xdouble_zero_masked_unpack_high_mask3(<4 x double> %vec1, <4 x double> %vec2, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_zero_masked_unpack_high_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; GENERIC-NEXT: vunpckhpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_zero_masked_unpack_high_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:1.00]
@@ -16407,12 +16407,12 @@ define <4 x double> @test_4xdouble_zero_masked_unpack_high_mask3(<4 x double> %v
}
define <4 x double> @test_4xdouble_unpack_high_mem_mask0(<4 x double> %vec1, <4 x double>* %vec2p) {
; GENERIC-LABEL: test_4xdouble_unpack_high_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] sched: [8:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_unpack_high_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec2 = load <4 x double>, <4 x double>* %vec2p
@@ -16421,7 +16421,7 @@ define <4 x double> @test_4xdouble_unpack_high_mem_mask0(<4 x double> %vec1, <4
}
define <4 x double> @test_4xdouble_masked_unpack_high_mem_mask0(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %vec3, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_masked_unpack_high_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; GENERIC-NEXT: vunpckhpd {{.*#+}} ymm1 {%k1} = ymm0[1],mem[1],ymm0[3],mem[3] sched: [7:1.00]
@@ -16429,7 +16429,7 @@ define <4 x double> @test_4xdouble_masked_unpack_high_mem_mask0(<4 x double> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_masked_unpack_high_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhpd {{.*#+}} ymm1 {%k1} = ymm0[1],mem[1],ymm0[3],mem[3] sched: [8:1.00]
@@ -16444,14 +16444,14 @@ define <4 x double> @test_4xdouble_masked_unpack_high_mem_mask0(<4 x double> %ve
define <4 x double> @test_4xdouble_zero_masked_unpack_high_mem_mask0(<4 x double> %vec1, <4 x double>* %vec2p, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_zero_masked_unpack_high_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; GENERIC-NEXT: vunpckhpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1],mem[1],ymm0[3],mem[3] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_zero_masked_unpack_high_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1],mem[1],ymm0[3],mem[3] sched: [8:1.00]
@@ -16465,7 +16465,7 @@ define <4 x double> @test_4xdouble_zero_masked_unpack_high_mem_mask0(<4 x double
define <4 x double> @test_4xdouble_masked_unpack_high_mem_mask1(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %vec3, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_masked_unpack_high_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; GENERIC-NEXT: vunpckhpd {{.*#+}} ymm1 {%k1} = ymm0[1],mem[1],ymm0[3],mem[3] sched: [7:1.00]
@@ -16473,7 +16473,7 @@ define <4 x double> @test_4xdouble_masked_unpack_high_mem_mask1(<4 x double> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_masked_unpack_high_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhpd {{.*#+}} ymm1 {%k1} = ymm0[1],mem[1],ymm0[3],mem[3] sched: [8:1.00]
@@ -16488,14 +16488,14 @@ define <4 x double> @test_4xdouble_masked_unpack_high_mem_mask1(<4 x double> %ve
define <4 x double> @test_4xdouble_zero_masked_unpack_high_mem_mask1(<4 x double> %vec1, <4 x double>* %vec2p, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_zero_masked_unpack_high_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; GENERIC-NEXT: vunpckhpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1],mem[1],ymm0[3],mem[3] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_zero_masked_unpack_high_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1],mem[1],ymm0[3],mem[3] sched: [8:1.00]
@@ -16509,7 +16509,7 @@ define <4 x double> @test_4xdouble_zero_masked_unpack_high_mem_mask1(<4 x double
define <4 x double> @test_4xdouble_masked_unpack_high_mem_mask2(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %vec3, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_masked_unpack_high_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; GENERIC-NEXT: vunpckhpd {{.*#+}} ymm1 {%k1} = ymm0[1],mem[1],ymm0[3],mem[3] sched: [7:1.00]
@@ -16517,7 +16517,7 @@ define <4 x double> @test_4xdouble_masked_unpack_high_mem_mask2(<4 x double> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_masked_unpack_high_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhpd {{.*#+}} ymm1 {%k1} = ymm0[1],mem[1],ymm0[3],mem[3] sched: [8:1.00]
@@ -16532,14 +16532,14 @@ define <4 x double> @test_4xdouble_masked_unpack_high_mem_mask2(<4 x double> %ve
define <4 x double> @test_4xdouble_zero_masked_unpack_high_mem_mask2(<4 x double> %vec1, <4 x double>* %vec2p, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_zero_masked_unpack_high_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; GENERIC-NEXT: vunpckhpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1],mem[1],ymm0[3],mem[3] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_zero_masked_unpack_high_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1],mem[1],ymm0[3],mem[3] sched: [8:1.00]
@@ -16553,12 +16553,12 @@ define <4 x double> @test_4xdouble_zero_masked_unpack_high_mem_mask2(<4 x double
define <4 x double> @test_4xdouble_unpack_high_mem_mask3(<4 x double> %vec1, <4 x double>* %vec2p) {
; GENERIC-LABEL: test_4xdouble_unpack_high_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] sched: [8:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_unpack_high_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec2 = load <4 x double>, <4 x double>* %vec2p
@@ -16567,7 +16567,7 @@ define <4 x double> @test_4xdouble_unpack_high_mem_mask3(<4 x double> %vec1, <4
}
define <4 x double> @test_4xdouble_masked_unpack_high_mem_mask3(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %vec3, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_masked_unpack_high_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; GENERIC-NEXT: vunpckhpd {{.*#+}} ymm1 {%k1} = ymm0[1],mem[1],ymm0[3],mem[3] sched: [7:1.00]
@@ -16575,7 +16575,7 @@ define <4 x double> @test_4xdouble_masked_unpack_high_mem_mask3(<4 x double> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_masked_unpack_high_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm3, %ymm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhpd {{.*#+}} ymm1 {%k1} = ymm0[1],mem[1],ymm0[3],mem[3] sched: [8:1.00]
@@ -16590,14 +16590,14 @@ define <4 x double> @test_4xdouble_masked_unpack_high_mem_mask3(<4 x double> %ve
define <4 x double> @test_4xdouble_zero_masked_unpack_high_mem_mask3(<4 x double> %vec1, <4 x double>* %vec2p, <4 x i64> %mask) {
; GENERIC-LABEL: test_4xdouble_zero_masked_unpack_high_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; GENERIC-NEXT: vunpckhpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1],mem[1],ymm0[3],mem[3] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_4xdouble_zero_masked_unpack_high_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %ymm2, %ymm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1],mem[1],ymm0[3],mem[3] sched: [8:1.00]
@@ -16611,12 +16611,12 @@ define <4 x double> @test_4xdouble_zero_masked_unpack_high_mem_mask3(<4 x double
define <8 x double> @test_8xdouble_unpack_high_mask0(<8 x double> %vec1, <8 x double> %vec2) {
; GENERIC-LABEL: test_8xdouble_unpack_high_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_unpack_high_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7] sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
@@ -16624,7 +16624,7 @@ define <8 x double> @test_8xdouble_unpack_high_mask0(<8 x double> %vec1, <8 x do
}
define <8 x double> @test_8xdouble_masked_unpack_high_mask0(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %vec3, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_masked_unpack_high_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm4, %zmm3, %k1
; GENERIC-NEXT: vunpckhpd {{.*#+}} zmm2 {%k1} = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7] sched: [3:1.00]
@@ -16632,7 +16632,7 @@ define <8 x double> @test_8xdouble_masked_unpack_high_mask0(<8 x double> %vec1,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_masked_unpack_high_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm4, %zmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhpd {{.*#+}} zmm2 {%k1} = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7] sched: [1:1.00]
@@ -16646,14 +16646,14 @@ define <8 x double> @test_8xdouble_masked_unpack_high_mask0(<8 x double> %vec1,
define <8 x double> @test_8xdouble_zero_masked_unpack_high_mask0(<8 x double> %vec1, <8 x double> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_zero_masked_unpack_high_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; GENERIC-NEXT: vunpckhpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_zero_masked_unpack_high_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7] sched: [1:1.00]
@@ -16665,7 +16665,7 @@ define <8 x double> @test_8xdouble_zero_masked_unpack_high_mask0(<8 x double> %v
}
define <8 x double> @test_8xdouble_masked_unpack_high_mask1(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %vec3, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_masked_unpack_high_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm4, %zmm3, %k1
; GENERIC-NEXT: vunpckhpd {{.*#+}} zmm2 {%k1} = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7] sched: [3:1.00]
@@ -16673,7 +16673,7 @@ define <8 x double> @test_8xdouble_masked_unpack_high_mask1(<8 x double> %vec1,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_masked_unpack_high_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm4, %zmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhpd {{.*#+}} zmm2 {%k1} = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7] sched: [1:1.00]
@@ -16687,14 +16687,14 @@ define <8 x double> @test_8xdouble_masked_unpack_high_mask1(<8 x double> %vec1,
define <8 x double> @test_8xdouble_zero_masked_unpack_high_mask1(<8 x double> %vec1, <8 x double> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_zero_masked_unpack_high_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; GENERIC-NEXT: vunpckhpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_zero_masked_unpack_high_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7] sched: [1:1.00]
@@ -16706,7 +16706,7 @@ define <8 x double> @test_8xdouble_zero_masked_unpack_high_mask1(<8 x double> %v
}
define <8 x double> @test_8xdouble_masked_unpack_high_mask2(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %vec3, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_masked_unpack_high_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm4, %zmm3, %k1
; GENERIC-NEXT: vunpckhpd {{.*#+}} zmm2 {%k1} = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7] sched: [3:1.00]
@@ -16714,7 +16714,7 @@ define <8 x double> @test_8xdouble_masked_unpack_high_mask2(<8 x double> %vec1,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_masked_unpack_high_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm4, %zmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhpd {{.*#+}} zmm2 {%k1} = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7] sched: [1:1.00]
@@ -16728,14 +16728,14 @@ define <8 x double> @test_8xdouble_masked_unpack_high_mask2(<8 x double> %vec1,
define <8 x double> @test_8xdouble_zero_masked_unpack_high_mask2(<8 x double> %vec1, <8 x double> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_zero_masked_unpack_high_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; GENERIC-NEXT: vunpckhpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_zero_masked_unpack_high_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7] sched: [1:1.00]
@@ -16747,12 +16747,12 @@ define <8 x double> @test_8xdouble_zero_masked_unpack_high_mask2(<8 x double> %v
}
define <8 x double> @test_8xdouble_unpack_high_mask3(<8 x double> %vec1, <8 x double> %vec2) {
; GENERIC-LABEL: test_8xdouble_unpack_high_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_unpack_high_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7] sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%res = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
@@ -16760,7 +16760,7 @@ define <8 x double> @test_8xdouble_unpack_high_mask3(<8 x double> %vec1, <8 x do
}
define <8 x double> @test_8xdouble_masked_unpack_high_mask3(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %vec3, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_masked_unpack_high_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm4, %zmm3, %k1
; GENERIC-NEXT: vunpckhpd {{.*#+}} zmm2 {%k1} = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7] sched: [3:1.00]
@@ -16768,7 +16768,7 @@ define <8 x double> @test_8xdouble_masked_unpack_high_mask3(<8 x double> %vec1,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_masked_unpack_high_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm4, %zmm3, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhpd {{.*#+}} zmm2 {%k1} = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7] sched: [1:1.00]
@@ -16782,14 +16782,14 @@ define <8 x double> @test_8xdouble_masked_unpack_high_mask3(<8 x double> %vec1,
define <8 x double> @test_8xdouble_zero_masked_unpack_high_mask3(<8 x double> %vec1, <8 x double> %vec2, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_zero_masked_unpack_high_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; GENERIC-NEXT: vunpckhpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7] sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_zero_masked_unpack_high_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7] sched: [1:1.00]
@@ -16801,12 +16801,12 @@ define <8 x double> @test_8xdouble_zero_masked_unpack_high_mask3(<8 x double> %v
}
define <8 x double> @test_8xdouble_unpack_high_mem_mask0(<8 x double> %vec1, <8 x double>* %vec2p) {
; GENERIC-LABEL: test_8xdouble_unpack_high_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],mem[1],zmm0[3],mem[3],zmm0[5],mem[5],zmm0[7],mem[7] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_unpack_high_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],mem[1],zmm0[3],mem[3],zmm0[5],mem[5],zmm0[7],mem[7] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec2 = load <8 x double>, <8 x double>* %vec2p
@@ -16815,7 +16815,7 @@ define <8 x double> @test_8xdouble_unpack_high_mem_mask0(<8 x double> %vec1, <8
}
define <8 x double> @test_8xdouble_masked_unpack_high_mem_mask0(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %vec3, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_masked_unpack_high_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; GENERIC-NEXT: vunpckhpd {{.*#+}} zmm1 {%k1} = zmm0[1],mem[1],zmm0[3],mem[3],zmm0[5],mem[5],zmm0[7],mem[7] sched: [7:1.00]
@@ -16823,7 +16823,7 @@ define <8 x double> @test_8xdouble_masked_unpack_high_mem_mask0(<8 x double> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_masked_unpack_high_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhpd {{.*#+}} zmm1 {%k1} = zmm0[1],mem[1],zmm0[3],mem[3],zmm0[5],mem[5],zmm0[7],mem[7] sched: [8:1.00]
@@ -16838,14 +16838,14 @@ define <8 x double> @test_8xdouble_masked_unpack_high_mem_mask0(<8 x double> %ve
define <8 x double> @test_8xdouble_zero_masked_unpack_high_mem_mask0(<8 x double> %vec1, <8 x double>* %vec2p, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_zero_masked_unpack_high_mem_mask0:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; GENERIC-NEXT: vunpckhpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],mem[1],zmm0[3],mem[3],zmm0[5],mem[5],zmm0[7],mem[7] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_zero_masked_unpack_high_mem_mask0:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],mem[1],zmm0[3],mem[3],zmm0[5],mem[5],zmm0[7],mem[7] sched: [8:1.00]
@@ -16859,7 +16859,7 @@ define <8 x double> @test_8xdouble_zero_masked_unpack_high_mem_mask0(<8 x double
define <8 x double> @test_8xdouble_masked_unpack_high_mem_mask1(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %vec3, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_masked_unpack_high_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; GENERIC-NEXT: vunpckhpd {{.*#+}} zmm1 {%k1} = zmm0[1],mem[1],zmm0[3],mem[3],zmm0[5],mem[5],zmm0[7],mem[7] sched: [7:1.00]
@@ -16867,7 +16867,7 @@ define <8 x double> @test_8xdouble_masked_unpack_high_mem_mask1(<8 x double> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_masked_unpack_high_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhpd {{.*#+}} zmm1 {%k1} = zmm0[1],mem[1],zmm0[3],mem[3],zmm0[5],mem[5],zmm0[7],mem[7] sched: [8:1.00]
@@ -16882,14 +16882,14 @@ define <8 x double> @test_8xdouble_masked_unpack_high_mem_mask1(<8 x double> %ve
define <8 x double> @test_8xdouble_zero_masked_unpack_high_mem_mask1(<8 x double> %vec1, <8 x double>* %vec2p, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_zero_masked_unpack_high_mem_mask1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; GENERIC-NEXT: vunpckhpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],mem[1],zmm0[3],mem[3],zmm0[5],mem[5],zmm0[7],mem[7] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_zero_masked_unpack_high_mem_mask1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],mem[1],zmm0[3],mem[3],zmm0[5],mem[5],zmm0[7],mem[7] sched: [8:1.00]
@@ -16903,7 +16903,7 @@ define <8 x double> @test_8xdouble_zero_masked_unpack_high_mem_mask1(<8 x double
define <8 x double> @test_8xdouble_masked_unpack_high_mem_mask2(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %vec3, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_masked_unpack_high_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; GENERIC-NEXT: vunpckhpd {{.*#+}} zmm1 {%k1} = zmm0[1],mem[1],zmm0[3],mem[3],zmm0[5],mem[5],zmm0[7],mem[7] sched: [7:1.00]
@@ -16911,7 +16911,7 @@ define <8 x double> @test_8xdouble_masked_unpack_high_mem_mask2(<8 x double> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_masked_unpack_high_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhpd {{.*#+}} zmm1 {%k1} = zmm0[1],mem[1],zmm0[3],mem[3],zmm0[5],mem[5],zmm0[7],mem[7] sched: [8:1.00]
@@ -16926,14 +16926,14 @@ define <8 x double> @test_8xdouble_masked_unpack_high_mem_mask2(<8 x double> %ve
define <8 x double> @test_8xdouble_zero_masked_unpack_high_mem_mask2(<8 x double> %vec1, <8 x double>* %vec2p, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_zero_masked_unpack_high_mem_mask2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; GENERIC-NEXT: vunpckhpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],mem[1],zmm0[3],mem[3],zmm0[5],mem[5],zmm0[7],mem[7] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_zero_masked_unpack_high_mem_mask2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],mem[1],zmm0[3],mem[3],zmm0[5],mem[5],zmm0[7],mem[7] sched: [8:1.00]
@@ -16947,12 +16947,12 @@ define <8 x double> @test_8xdouble_zero_masked_unpack_high_mem_mask2(<8 x double
define <8 x double> @test_8xdouble_unpack_high_mem_mask3(<8 x double> %vec1, <8 x double>* %vec2p) {
; GENERIC-LABEL: test_8xdouble_unpack_high_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],mem[1],zmm0[3],mem[3],zmm0[5],mem[5],zmm0[7],mem[7] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_unpack_high_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],mem[1],zmm0[3],mem[3],zmm0[5],mem[5],zmm0[7],mem[7] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%vec2 = load <8 x double>, <8 x double>* %vec2p
@@ -16961,7 +16961,7 @@ define <8 x double> @test_8xdouble_unpack_high_mem_mask3(<8 x double> %vec1, <8
}
define <8 x double> @test_8xdouble_masked_unpack_high_mem_mask3(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %vec3, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_masked_unpack_high_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; GENERIC-NEXT: vunpckhpd {{.*#+}} zmm1 {%k1} = zmm0[1],mem[1],zmm0[3],mem[3],zmm0[5],mem[5],zmm0[7],mem[7] sched: [7:1.00]
@@ -16969,7 +16969,7 @@ define <8 x double> @test_8xdouble_masked_unpack_high_mem_mask3(<8 x double> %ve
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_masked_unpack_high_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm3, %zmm2, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhpd {{.*#+}} zmm1 {%k1} = zmm0[1],mem[1],zmm0[3],mem[3],zmm0[5],mem[5],zmm0[7],mem[7] sched: [8:1.00]
@@ -16984,14 +16984,14 @@ define <8 x double> @test_8xdouble_masked_unpack_high_mem_mask3(<8 x double> %ve
define <8 x double> @test_8xdouble_zero_masked_unpack_high_mem_mask3(<8 x double> %vec1, <8 x double>* %vec2p, <8 x i64> %mask) {
; GENERIC-LABEL: test_8xdouble_zero_masked_unpack_high_mem_mask3:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; GENERIC-NEXT: vunpckhpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],mem[1],zmm0[3],mem[3],zmm0[5],mem[5],zmm0[7],mem[7] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_8xdouble_zero_masked_unpack_high_mem_mask3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
; SKX-NEXT: vpcmpeqq %zmm2, %zmm1, %k1 # sched: [3:1.00]
; SKX-NEXT: vunpckhpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],mem[1],zmm0[3],mem[3],zmm0[5],mem[5],zmm0[7],mem[7] sched: [8:1.00]
diff --git a/test/CodeGen/X86/avx512-shuffles/broadcast-scalar-fp.ll b/test/CodeGen/X86/avx512-shuffles/broadcast-scalar-fp.ll
index 5d67dd5a88b..1d477940c6e 100644
--- a/test/CodeGen/X86/avx512-shuffles/broadcast-scalar-fp.ll
+++ b/test/CodeGen/X86/avx512-shuffles/broadcast-scalar-fp.ll
@@ -3,7 +3,7 @@
define <4 x double> @test_double_to_4(double %s) {
; CHECK-LABEL: test_double_to_4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0
; CHECK-NEXT: retq
%vec = insertelement <2 x double> undef, double %s, i32 0
@@ -12,7 +12,7 @@ define <4 x double> @test_double_to_4(double %s) {
}
define <4 x double> @test_masked_double_to_4_mask0(double %s, <4 x double> %default, <4 x double> %mask) {
; CHECK-LABEL: test_masked_double_to_4_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vbroadcastsd %xmm0, %ymm1 {%k1}
@@ -27,7 +27,7 @@ define <4 x double> @test_masked_double_to_4_mask0(double %s, <4 x double> %defa
define <4 x double> @test_masked_z_double_to_4_mask0(double %s, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_double_to_4_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0 {%k1} {z}
@@ -40,7 +40,7 @@ define <4 x double> @test_masked_z_double_to_4_mask0(double %s, <4 x double> %ma
}
define <4 x double> @test_masked_double_to_4_mask1(double %s, <4 x double> %default, <4 x double> %mask) {
; CHECK-LABEL: test_masked_double_to_4_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vbroadcastsd %xmm0, %ymm1 {%k1}
@@ -55,7 +55,7 @@ define <4 x double> @test_masked_double_to_4_mask1(double %s, <4 x double> %defa
define <4 x double> @test_masked_z_double_to_4_mask1(double %s, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_double_to_4_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0 {%k1} {z}
@@ -68,7 +68,7 @@ define <4 x double> @test_masked_z_double_to_4_mask1(double %s, <4 x double> %ma
}
define <4 x double> @test_masked_double_to_4_mask2(double %s, <4 x double> %default, <4 x double> %mask) {
; CHECK-LABEL: test_masked_double_to_4_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vbroadcastsd %xmm0, %ymm1 {%k1}
@@ -83,7 +83,7 @@ define <4 x double> @test_masked_double_to_4_mask2(double %s, <4 x double> %defa
define <4 x double> @test_masked_z_double_to_4_mask2(double %s, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_double_to_4_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0 {%k1} {z}
@@ -96,7 +96,7 @@ define <4 x double> @test_masked_z_double_to_4_mask2(double %s, <4 x double> %ma
}
define <4 x double> @test_masked_double_to_4_mask3(double %s, <4 x double> %default, <4 x double> %mask) {
; CHECK-LABEL: test_masked_double_to_4_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vbroadcastsd %xmm0, %ymm1 {%k1}
@@ -111,7 +111,7 @@ define <4 x double> @test_masked_double_to_4_mask3(double %s, <4 x double> %defa
define <4 x double> @test_masked_z_double_to_4_mask3(double %s, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_double_to_4_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0 {%k1} {z}
@@ -124,7 +124,7 @@ define <4 x double> @test_masked_z_double_to_4_mask3(double %s, <4 x double> %ma
}
define <8 x double> @test_double_to_8(double %s) {
; CHECK-LABEL: test_double_to_8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastsd %xmm0, %zmm0
; CHECK-NEXT: retq
%vec = insertelement <2 x double> undef, double %s, i32 0
@@ -133,7 +133,7 @@ define <8 x double> @test_double_to_8(double %s) {
}
define <8 x double> @test_masked_double_to_8_mask0(double %s, <8 x double> %default, <8 x double> %mask) {
; CHECK-LABEL: test_masked_double_to_8_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vbroadcastsd %xmm0, %zmm1 {%k1}
@@ -148,7 +148,7 @@ define <8 x double> @test_masked_double_to_8_mask0(double %s, <8 x double> %defa
define <8 x double> @test_masked_z_double_to_8_mask0(double %s, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_double_to_8_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcastsd %xmm0, %zmm0 {%k1} {z}
@@ -161,7 +161,7 @@ define <8 x double> @test_masked_z_double_to_8_mask0(double %s, <8 x double> %ma
}
define <8 x double> @test_masked_double_to_8_mask1(double %s, <8 x double> %default, <8 x double> %mask) {
; CHECK-LABEL: test_masked_double_to_8_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vbroadcastsd %xmm0, %zmm1 {%k1}
@@ -176,7 +176,7 @@ define <8 x double> @test_masked_double_to_8_mask1(double %s, <8 x double> %defa
define <8 x double> @test_masked_z_double_to_8_mask1(double %s, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_double_to_8_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcastsd %xmm0, %zmm0 {%k1} {z}
@@ -189,7 +189,7 @@ define <8 x double> @test_masked_z_double_to_8_mask1(double %s, <8 x double> %ma
}
define <8 x double> @test_masked_double_to_8_mask2(double %s, <8 x double> %default, <8 x double> %mask) {
; CHECK-LABEL: test_masked_double_to_8_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vbroadcastsd %xmm0, %zmm1 {%k1}
@@ -204,7 +204,7 @@ define <8 x double> @test_masked_double_to_8_mask2(double %s, <8 x double> %defa
define <8 x double> @test_masked_z_double_to_8_mask2(double %s, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_double_to_8_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcastsd %xmm0, %zmm0 {%k1} {z}
@@ -217,7 +217,7 @@ define <8 x double> @test_masked_z_double_to_8_mask2(double %s, <8 x double> %ma
}
define <8 x double> @test_masked_double_to_8_mask3(double %s, <8 x double> %default, <8 x double> %mask) {
; CHECK-LABEL: test_masked_double_to_8_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vbroadcastsd %xmm0, %zmm1 {%k1}
@@ -232,7 +232,7 @@ define <8 x double> @test_masked_double_to_8_mask3(double %s, <8 x double> %defa
define <8 x double> @test_masked_z_double_to_8_mask3(double %s, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_double_to_8_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcastsd %xmm0, %zmm0 {%k1} {z}
@@ -245,7 +245,7 @@ define <8 x double> @test_masked_z_double_to_8_mask3(double %s, <8 x double> %ma
}
define <4 x float> @test_float_to_4(float %s) {
; CHECK-LABEL: test_float_to_4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastss %xmm0, %xmm0
; CHECK-NEXT: retq
%vec = insertelement <2 x float> undef, float %s, i32 0
@@ -254,7 +254,7 @@ define <4 x float> @test_float_to_4(float %s) {
}
define <4 x float> @test_masked_float_to_4_mask0(float %s, <4 x float> %default, <4 x float> %mask) {
; CHECK-LABEL: test_masked_float_to_4_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %xmm3, %xmm2, %k1
; CHECK-NEXT: vbroadcastss %xmm0, %xmm1 {%k1}
@@ -269,7 +269,7 @@ define <4 x float> @test_masked_float_to_4_mask0(float %s, <4 x float> %default,
define <4 x float> @test_masked_z_float_to_4_mask0(float %s, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_float_to_4_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vbroadcastss %xmm0, %xmm0 {%k1} {z}
@@ -282,7 +282,7 @@ define <4 x float> @test_masked_z_float_to_4_mask0(float %s, <4 x float> %mask)
}
define <4 x float> @test_masked_float_to_4_mask1(float %s, <4 x float> %default, <4 x float> %mask) {
; CHECK-LABEL: test_masked_float_to_4_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %xmm3, %xmm2, %k1
; CHECK-NEXT: vbroadcastss %xmm0, %xmm1 {%k1}
@@ -297,7 +297,7 @@ define <4 x float> @test_masked_float_to_4_mask1(float %s, <4 x float> %default,
define <4 x float> @test_masked_z_float_to_4_mask1(float %s, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_float_to_4_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vbroadcastss %xmm0, %xmm0 {%k1} {z}
@@ -310,7 +310,7 @@ define <4 x float> @test_masked_z_float_to_4_mask1(float %s, <4 x float> %mask)
}
define <4 x float> @test_masked_float_to_4_mask2(float %s, <4 x float> %default, <4 x float> %mask) {
; CHECK-LABEL: test_masked_float_to_4_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %xmm3, %xmm2, %k1
; CHECK-NEXT: vbroadcastss %xmm0, %xmm1 {%k1}
@@ -325,7 +325,7 @@ define <4 x float> @test_masked_float_to_4_mask2(float %s, <4 x float> %default,
define <4 x float> @test_masked_z_float_to_4_mask2(float %s, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_float_to_4_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vbroadcastss %xmm0, %xmm0 {%k1} {z}
@@ -338,7 +338,7 @@ define <4 x float> @test_masked_z_float_to_4_mask2(float %s, <4 x float> %mask)
}
define <4 x float> @test_masked_float_to_4_mask3(float %s, <4 x float> %default, <4 x float> %mask) {
; CHECK-LABEL: test_masked_float_to_4_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %xmm3, %xmm2, %k1
; CHECK-NEXT: vbroadcastss %xmm0, %xmm1 {%k1}
@@ -353,7 +353,7 @@ define <4 x float> @test_masked_float_to_4_mask3(float %s, <4 x float> %default,
define <4 x float> @test_masked_z_float_to_4_mask3(float %s, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_float_to_4_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vbroadcastss %xmm0, %xmm0 {%k1} {z}
@@ -366,7 +366,7 @@ define <4 x float> @test_masked_z_float_to_4_mask3(float %s, <4 x float> %mask)
}
define <8 x float> @test_float_to_8(float %s) {
; CHECK-LABEL: test_float_to_8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastss %xmm0, %ymm0
; CHECK-NEXT: retq
%vec = insertelement <2 x float> undef, float %s, i32 0
@@ -375,7 +375,7 @@ define <8 x float> @test_float_to_8(float %s) {
}
define <8 x float> @test_masked_float_to_8_mask0(float %s, <8 x float> %default, <8 x float> %mask) {
; CHECK-LABEL: test_masked_float_to_8_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vbroadcastss %xmm0, %ymm1 {%k1}
@@ -390,7 +390,7 @@ define <8 x float> @test_masked_float_to_8_mask0(float %s, <8 x float> %default,
define <8 x float> @test_masked_z_float_to_8_mask0(float %s, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_float_to_8_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vbroadcastss %xmm0, %ymm0 {%k1} {z}
@@ -403,7 +403,7 @@ define <8 x float> @test_masked_z_float_to_8_mask0(float %s, <8 x float> %mask)
}
define <8 x float> @test_masked_float_to_8_mask1(float %s, <8 x float> %default, <8 x float> %mask) {
; CHECK-LABEL: test_masked_float_to_8_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vbroadcastss %xmm0, %ymm1 {%k1}
@@ -418,7 +418,7 @@ define <8 x float> @test_masked_float_to_8_mask1(float %s, <8 x float> %default,
define <8 x float> @test_masked_z_float_to_8_mask1(float %s, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_float_to_8_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vbroadcastss %xmm0, %ymm0 {%k1} {z}
@@ -431,7 +431,7 @@ define <8 x float> @test_masked_z_float_to_8_mask1(float %s, <8 x float> %mask)
}
define <8 x float> @test_masked_float_to_8_mask2(float %s, <8 x float> %default, <8 x float> %mask) {
; CHECK-LABEL: test_masked_float_to_8_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vbroadcastss %xmm0, %ymm1 {%k1}
@@ -446,7 +446,7 @@ define <8 x float> @test_masked_float_to_8_mask2(float %s, <8 x float> %default,
define <8 x float> @test_masked_z_float_to_8_mask2(float %s, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_float_to_8_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vbroadcastss %xmm0, %ymm0 {%k1} {z}
@@ -459,7 +459,7 @@ define <8 x float> @test_masked_z_float_to_8_mask2(float %s, <8 x float> %mask)
}
define <8 x float> @test_masked_float_to_8_mask3(float %s, <8 x float> %default, <8 x float> %mask) {
; CHECK-LABEL: test_masked_float_to_8_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vbroadcastss %xmm0, %ymm1 {%k1}
@@ -474,7 +474,7 @@ define <8 x float> @test_masked_float_to_8_mask3(float %s, <8 x float> %default,
define <8 x float> @test_masked_z_float_to_8_mask3(float %s, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_float_to_8_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vbroadcastss %xmm0, %ymm0 {%k1} {z}
@@ -487,7 +487,7 @@ define <8 x float> @test_masked_z_float_to_8_mask3(float %s, <8 x float> %mask)
}
define <16 x float> @test_float_to_16(float %s) {
; CHECK-LABEL: test_float_to_16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastss %xmm0, %zmm0
; CHECK-NEXT: retq
%vec = insertelement <2 x float> undef, float %s, i32 0
@@ -496,7 +496,7 @@ define <16 x float> @test_float_to_16(float %s) {
}
define <16 x float> @test_masked_float_to_16_mask0(float %s, <16 x float> %default, <16 x float> %mask) {
; CHECK-LABEL: test_masked_float_to_16_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vbroadcastss %xmm0, %zmm1 {%k1}
@@ -511,7 +511,7 @@ define <16 x float> @test_masked_float_to_16_mask0(float %s, <16 x float> %defau
define <16 x float> @test_masked_z_float_to_16_mask0(float %s, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_float_to_16_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcastss %xmm0, %zmm0 {%k1} {z}
@@ -524,7 +524,7 @@ define <16 x float> @test_masked_z_float_to_16_mask0(float %s, <16 x float> %mas
}
define <16 x float> @test_masked_float_to_16_mask1(float %s, <16 x float> %default, <16 x float> %mask) {
; CHECK-LABEL: test_masked_float_to_16_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vbroadcastss %xmm0, %zmm1 {%k1}
@@ -539,7 +539,7 @@ define <16 x float> @test_masked_float_to_16_mask1(float %s, <16 x float> %defau
define <16 x float> @test_masked_z_float_to_16_mask1(float %s, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_float_to_16_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcastss %xmm0, %zmm0 {%k1} {z}
@@ -552,7 +552,7 @@ define <16 x float> @test_masked_z_float_to_16_mask1(float %s, <16 x float> %mas
}
define <16 x float> @test_masked_float_to_16_mask2(float %s, <16 x float> %default, <16 x float> %mask) {
; CHECK-LABEL: test_masked_float_to_16_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vbroadcastss %xmm0, %zmm1 {%k1}
@@ -567,7 +567,7 @@ define <16 x float> @test_masked_float_to_16_mask2(float %s, <16 x float> %defau
define <16 x float> @test_masked_z_float_to_16_mask2(float %s, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_float_to_16_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcastss %xmm0, %zmm0 {%k1} {z}
@@ -580,7 +580,7 @@ define <16 x float> @test_masked_z_float_to_16_mask2(float %s, <16 x float> %mas
}
define <16 x float> @test_masked_float_to_16_mask3(float %s, <16 x float> %default, <16 x float> %mask) {
; CHECK-LABEL: test_masked_float_to_16_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vbroadcastss %xmm0, %zmm1 {%k1}
@@ -595,7 +595,7 @@ define <16 x float> @test_masked_float_to_16_mask3(float %s, <16 x float> %defau
define <16 x float> @test_masked_z_float_to_16_mask3(float %s, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_float_to_16_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcastss %xmm0, %zmm0 {%k1} {z}
@@ -608,7 +608,7 @@ define <16 x float> @test_masked_z_float_to_16_mask3(float %s, <16 x float> %mas
}
define <4 x double> @test_double_to_4_mem(double* %p) {
; CHECK-LABEL: test_double_to_4_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastsd (%rdi), %ymm0
; CHECK-NEXT: retq
%s = load double, double* %p
@@ -618,7 +618,7 @@ define <4 x double> @test_double_to_4_mem(double* %p) {
}
define <4 x double> @test_masked_double_to_4_mem_mask0(double* %p, <4 x double> %default, <4 x double> %mask) {
; CHECK-LABEL: test_masked_double_to_4_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vbroadcastsd (%rdi), %ymm0 {%k1}
@@ -633,7 +633,7 @@ define <4 x double> @test_masked_double_to_4_mem_mask0(double* %p, <4 x double>
define <4 x double> @test_masked_z_double_to_4_mem_mask0(double* %p, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_double_to_4_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %ymm1, %ymm0, %k1
; CHECK-NEXT: vbroadcastsd (%rdi), %ymm0 {%k1} {z}
@@ -647,7 +647,7 @@ define <4 x double> @test_masked_z_double_to_4_mem_mask0(double* %p, <4 x double
}
define <4 x double> @test_masked_double_to_4_mem_mask1(double* %p, <4 x double> %default, <4 x double> %mask) {
; CHECK-LABEL: test_masked_double_to_4_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vbroadcastsd (%rdi), %ymm0 {%k1}
@@ -662,7 +662,7 @@ define <4 x double> @test_masked_double_to_4_mem_mask1(double* %p, <4 x double>
define <4 x double> @test_masked_z_double_to_4_mem_mask1(double* %p, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_double_to_4_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %ymm1, %ymm0, %k1
; CHECK-NEXT: vbroadcastsd (%rdi), %ymm0 {%k1} {z}
@@ -676,7 +676,7 @@ define <4 x double> @test_masked_z_double_to_4_mem_mask1(double* %p, <4 x double
}
define <4 x double> @test_masked_double_to_4_mem_mask2(double* %p, <4 x double> %default, <4 x double> %mask) {
; CHECK-LABEL: test_masked_double_to_4_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vbroadcastsd (%rdi), %ymm0 {%k1}
@@ -691,7 +691,7 @@ define <4 x double> @test_masked_double_to_4_mem_mask2(double* %p, <4 x double>
define <4 x double> @test_masked_z_double_to_4_mem_mask2(double* %p, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_double_to_4_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %ymm1, %ymm0, %k1
; CHECK-NEXT: vbroadcastsd (%rdi), %ymm0 {%k1} {z}
@@ -705,7 +705,7 @@ define <4 x double> @test_masked_z_double_to_4_mem_mask2(double* %p, <4 x double
}
define <4 x double> @test_masked_double_to_4_mem_mask3(double* %p, <4 x double> %default, <4 x double> %mask) {
; CHECK-LABEL: test_masked_double_to_4_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vbroadcastsd (%rdi), %ymm0 {%k1}
@@ -720,7 +720,7 @@ define <4 x double> @test_masked_double_to_4_mem_mask3(double* %p, <4 x double>
define <4 x double> @test_masked_z_double_to_4_mem_mask3(double* %p, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_double_to_4_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %ymm1, %ymm0, %k1
; CHECK-NEXT: vbroadcastsd (%rdi), %ymm0 {%k1} {z}
@@ -734,7 +734,7 @@ define <4 x double> @test_masked_z_double_to_4_mem_mask3(double* %p, <4 x double
}
define <8 x double> @test_double_to_8_mem(double* %p) {
; CHECK-LABEL: test_double_to_8_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastsd (%rdi), %zmm0
; CHECK-NEXT: retq
%s = load double, double* %p
@@ -744,7 +744,7 @@ define <8 x double> @test_double_to_8_mem(double* %p) {
}
define <8 x double> @test_masked_double_to_8_mem_mask0(double* %p, <8 x double> %default, <8 x double> %mask) {
; CHECK-LABEL: test_masked_double_to_8_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcastsd (%rdi), %zmm0 {%k1}
@@ -759,7 +759,7 @@ define <8 x double> @test_masked_double_to_8_mem_mask0(double* %p, <8 x double>
define <8 x double> @test_masked_z_double_to_8_mem_mask0(double* %p, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_double_to_8_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %zmm1, %zmm0, %k1
; CHECK-NEXT: vbroadcastsd (%rdi), %zmm0 {%k1} {z}
@@ -773,7 +773,7 @@ define <8 x double> @test_masked_z_double_to_8_mem_mask0(double* %p, <8 x double
}
define <8 x double> @test_masked_double_to_8_mem_mask1(double* %p, <8 x double> %default, <8 x double> %mask) {
; CHECK-LABEL: test_masked_double_to_8_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcastsd (%rdi), %zmm0 {%k1}
@@ -788,7 +788,7 @@ define <8 x double> @test_masked_double_to_8_mem_mask1(double* %p, <8 x double>
define <8 x double> @test_masked_z_double_to_8_mem_mask1(double* %p, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_double_to_8_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %zmm1, %zmm0, %k1
; CHECK-NEXT: vbroadcastsd (%rdi), %zmm0 {%k1} {z}
@@ -802,7 +802,7 @@ define <8 x double> @test_masked_z_double_to_8_mem_mask1(double* %p, <8 x double
}
define <8 x double> @test_masked_double_to_8_mem_mask2(double* %p, <8 x double> %default, <8 x double> %mask) {
; CHECK-LABEL: test_masked_double_to_8_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcastsd (%rdi), %zmm0 {%k1}
@@ -817,7 +817,7 @@ define <8 x double> @test_masked_double_to_8_mem_mask2(double* %p, <8 x double>
define <8 x double> @test_masked_z_double_to_8_mem_mask2(double* %p, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_double_to_8_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %zmm1, %zmm0, %k1
; CHECK-NEXT: vbroadcastsd (%rdi), %zmm0 {%k1} {z}
@@ -831,7 +831,7 @@ define <8 x double> @test_masked_z_double_to_8_mem_mask2(double* %p, <8 x double
}
define <8 x double> @test_masked_double_to_8_mem_mask3(double* %p, <8 x double> %default, <8 x double> %mask) {
; CHECK-LABEL: test_masked_double_to_8_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcastsd (%rdi), %zmm0 {%k1}
@@ -846,7 +846,7 @@ define <8 x double> @test_masked_double_to_8_mem_mask3(double* %p, <8 x double>
define <8 x double> @test_masked_z_double_to_8_mem_mask3(double* %p, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_double_to_8_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %zmm1, %zmm0, %k1
; CHECK-NEXT: vbroadcastsd (%rdi), %zmm0 {%k1} {z}
@@ -860,7 +860,7 @@ define <8 x double> @test_masked_z_double_to_8_mem_mask3(double* %p, <8 x double
}
define <4 x float> @test_float_to_4_mem(float* %p) {
; CHECK-LABEL: test_float_to_4_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastss (%rdi), %xmm0
; CHECK-NEXT: retq
%s = load float, float* %p
@@ -870,7 +870,7 @@ define <4 x float> @test_float_to_4_mem(float* %p) {
}
define <4 x float> @test_masked_float_to_4_mem_mask0(float* %p, <4 x float> %default, <4 x float> %mask) {
; CHECK-LABEL: test_masked_float_to_4_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vbroadcastss (%rdi), %xmm0 {%k1}
@@ -885,7 +885,7 @@ define <4 x float> @test_masked_float_to_4_mem_mask0(float* %p, <4 x float> %def
define <4 x float> @test_masked_z_float_to_4_mem_mask0(float* %p, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_float_to_4_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %xmm1, %xmm0, %k1
; CHECK-NEXT: vbroadcastss (%rdi), %xmm0 {%k1} {z}
@@ -899,7 +899,7 @@ define <4 x float> @test_masked_z_float_to_4_mem_mask0(float* %p, <4 x float> %m
}
define <4 x float> @test_masked_float_to_4_mem_mask1(float* %p, <4 x float> %default, <4 x float> %mask) {
; CHECK-LABEL: test_masked_float_to_4_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vbroadcastss (%rdi), %xmm0 {%k1}
@@ -914,7 +914,7 @@ define <4 x float> @test_masked_float_to_4_mem_mask1(float* %p, <4 x float> %def
define <4 x float> @test_masked_z_float_to_4_mem_mask1(float* %p, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_float_to_4_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %xmm1, %xmm0, %k1
; CHECK-NEXT: vbroadcastss (%rdi), %xmm0 {%k1} {z}
@@ -928,7 +928,7 @@ define <4 x float> @test_masked_z_float_to_4_mem_mask1(float* %p, <4 x float> %m
}
define <4 x float> @test_masked_float_to_4_mem_mask2(float* %p, <4 x float> %default, <4 x float> %mask) {
; CHECK-LABEL: test_masked_float_to_4_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vbroadcastss (%rdi), %xmm0 {%k1}
@@ -943,7 +943,7 @@ define <4 x float> @test_masked_float_to_4_mem_mask2(float* %p, <4 x float> %def
define <4 x float> @test_masked_z_float_to_4_mem_mask2(float* %p, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_float_to_4_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %xmm1, %xmm0, %k1
; CHECK-NEXT: vbroadcastss (%rdi), %xmm0 {%k1} {z}
@@ -957,7 +957,7 @@ define <4 x float> @test_masked_z_float_to_4_mem_mask2(float* %p, <4 x float> %m
}
define <4 x float> @test_masked_float_to_4_mem_mask3(float* %p, <4 x float> %default, <4 x float> %mask) {
; CHECK-LABEL: test_masked_float_to_4_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vbroadcastss (%rdi), %xmm0 {%k1}
@@ -972,7 +972,7 @@ define <4 x float> @test_masked_float_to_4_mem_mask3(float* %p, <4 x float> %def
define <4 x float> @test_masked_z_float_to_4_mem_mask3(float* %p, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_float_to_4_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %xmm1, %xmm0, %k1
; CHECK-NEXT: vbroadcastss (%rdi), %xmm0 {%k1} {z}
@@ -986,7 +986,7 @@ define <4 x float> @test_masked_z_float_to_4_mem_mask3(float* %p, <4 x float> %m
}
define <8 x float> @test_float_to_8_mem(float* %p) {
; CHECK-LABEL: test_float_to_8_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastss (%rdi), %ymm0
; CHECK-NEXT: retq
%s = load float, float* %p
@@ -996,7 +996,7 @@ define <8 x float> @test_float_to_8_mem(float* %p) {
}
define <8 x float> @test_masked_float_to_8_mem_mask0(float* %p, <8 x float> %default, <8 x float> %mask) {
; CHECK-LABEL: test_masked_float_to_8_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vbroadcastss (%rdi), %ymm0 {%k1}
@@ -1011,7 +1011,7 @@ define <8 x float> @test_masked_float_to_8_mem_mask0(float* %p, <8 x float> %def
define <8 x float> @test_masked_z_float_to_8_mem_mask0(float* %p, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_float_to_8_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %ymm1, %ymm0, %k1
; CHECK-NEXT: vbroadcastss (%rdi), %ymm0 {%k1} {z}
@@ -1025,7 +1025,7 @@ define <8 x float> @test_masked_z_float_to_8_mem_mask0(float* %p, <8 x float> %m
}
define <8 x float> @test_masked_float_to_8_mem_mask1(float* %p, <8 x float> %default, <8 x float> %mask) {
; CHECK-LABEL: test_masked_float_to_8_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vbroadcastss (%rdi), %ymm0 {%k1}
@@ -1040,7 +1040,7 @@ define <8 x float> @test_masked_float_to_8_mem_mask1(float* %p, <8 x float> %def
define <8 x float> @test_masked_z_float_to_8_mem_mask1(float* %p, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_float_to_8_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %ymm1, %ymm0, %k1
; CHECK-NEXT: vbroadcastss (%rdi), %ymm0 {%k1} {z}
@@ -1054,7 +1054,7 @@ define <8 x float> @test_masked_z_float_to_8_mem_mask1(float* %p, <8 x float> %m
}
define <8 x float> @test_masked_float_to_8_mem_mask2(float* %p, <8 x float> %default, <8 x float> %mask) {
; CHECK-LABEL: test_masked_float_to_8_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vbroadcastss (%rdi), %ymm0 {%k1}
@@ -1069,7 +1069,7 @@ define <8 x float> @test_masked_float_to_8_mem_mask2(float* %p, <8 x float> %def
define <8 x float> @test_masked_z_float_to_8_mem_mask2(float* %p, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_float_to_8_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %ymm1, %ymm0, %k1
; CHECK-NEXT: vbroadcastss (%rdi), %ymm0 {%k1} {z}
@@ -1083,7 +1083,7 @@ define <8 x float> @test_masked_z_float_to_8_mem_mask2(float* %p, <8 x float> %m
}
define <8 x float> @test_masked_float_to_8_mem_mask3(float* %p, <8 x float> %default, <8 x float> %mask) {
; CHECK-LABEL: test_masked_float_to_8_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vbroadcastss (%rdi), %ymm0 {%k1}
@@ -1098,7 +1098,7 @@ define <8 x float> @test_masked_float_to_8_mem_mask3(float* %p, <8 x float> %def
define <8 x float> @test_masked_z_float_to_8_mem_mask3(float* %p, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_float_to_8_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %ymm1, %ymm0, %k1
; CHECK-NEXT: vbroadcastss (%rdi), %ymm0 {%k1} {z}
@@ -1112,7 +1112,7 @@ define <8 x float> @test_masked_z_float_to_8_mem_mask3(float* %p, <8 x float> %m
}
define <16 x float> @test_float_to_16_mem(float* %p) {
; CHECK-LABEL: test_float_to_16_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastss (%rdi), %zmm0
; CHECK-NEXT: retq
%s = load float, float* %p
@@ -1122,7 +1122,7 @@ define <16 x float> @test_float_to_16_mem(float* %p) {
}
define <16 x float> @test_masked_float_to_16_mem_mask0(float* %p, <16 x float> %default, <16 x float> %mask) {
; CHECK-LABEL: test_masked_float_to_16_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcastss (%rdi), %zmm0 {%k1}
@@ -1137,7 +1137,7 @@ define <16 x float> @test_masked_float_to_16_mem_mask0(float* %p, <16 x float> %
define <16 x float> @test_masked_z_float_to_16_mem_mask0(float* %p, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_float_to_16_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %zmm1, %zmm0, %k1
; CHECK-NEXT: vbroadcastss (%rdi), %zmm0 {%k1} {z}
@@ -1151,7 +1151,7 @@ define <16 x float> @test_masked_z_float_to_16_mem_mask0(float* %p, <16 x float>
}
define <16 x float> @test_masked_float_to_16_mem_mask1(float* %p, <16 x float> %default, <16 x float> %mask) {
; CHECK-LABEL: test_masked_float_to_16_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcastss (%rdi), %zmm0 {%k1}
@@ -1166,7 +1166,7 @@ define <16 x float> @test_masked_float_to_16_mem_mask1(float* %p, <16 x float> %
define <16 x float> @test_masked_z_float_to_16_mem_mask1(float* %p, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_float_to_16_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %zmm1, %zmm0, %k1
; CHECK-NEXT: vbroadcastss (%rdi), %zmm0 {%k1} {z}
@@ -1180,7 +1180,7 @@ define <16 x float> @test_masked_z_float_to_16_mem_mask1(float* %p, <16 x float>
}
define <16 x float> @test_masked_float_to_16_mem_mask2(float* %p, <16 x float> %default, <16 x float> %mask) {
; CHECK-LABEL: test_masked_float_to_16_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcastss (%rdi), %zmm0 {%k1}
@@ -1195,7 +1195,7 @@ define <16 x float> @test_masked_float_to_16_mem_mask2(float* %p, <16 x float> %
define <16 x float> @test_masked_z_float_to_16_mem_mask2(float* %p, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_float_to_16_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %zmm1, %zmm0, %k1
; CHECK-NEXT: vbroadcastss (%rdi), %zmm0 {%k1} {z}
@@ -1209,7 +1209,7 @@ define <16 x float> @test_masked_z_float_to_16_mem_mask2(float* %p, <16 x float>
}
define <16 x float> @test_masked_float_to_16_mem_mask3(float* %p, <16 x float> %default, <16 x float> %mask) {
; CHECK-LABEL: test_masked_float_to_16_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcastss (%rdi), %zmm0 {%k1}
@@ -1224,7 +1224,7 @@ define <16 x float> @test_masked_float_to_16_mem_mask3(float* %p, <16 x float> %
define <16 x float> @test_masked_z_float_to_16_mem_mask3(float* %p, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_float_to_16_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %zmm1, %zmm0, %k1
; CHECK-NEXT: vbroadcastss (%rdi), %zmm0 {%k1} {z}
diff --git a/test/CodeGen/X86/avx512-shuffles/broadcast-scalar-int.ll b/test/CodeGen/X86/avx512-shuffles/broadcast-scalar-int.ll
index a8533a6f7a1..b31302d51ff 100644
--- a/test/CodeGen/X86/avx512-shuffles/broadcast-scalar-int.ll
+++ b/test/CodeGen/X86/avx512-shuffles/broadcast-scalar-int.ll
@@ -3,7 +3,7 @@
define <16 x i8> @test_i8_to_16(i8 %s) {
; CHECK-LABEL: test_i8_to_16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastb %edi, %xmm0
; CHECK-NEXT: retq
%vec = insertelement <2 x i8> undef, i8 %s, i32 0
@@ -12,7 +12,7 @@ define <16 x i8> @test_i8_to_16(i8 %s) {
}
define <16 x i8> @test_masked_i8_to_16_mask0(i8 %s, <16 x i8> %default, <16 x i8> %mask) {
; CHECK-LABEL: test_masked_i8_to_16_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %xmm2, %xmm1, %k1
; CHECK-NEXT: vpbroadcastb %edi, %xmm0 {%k1}
@@ -26,7 +26,7 @@ define <16 x i8> @test_masked_i8_to_16_mask0(i8 %s, <16 x i8> %default, <16 x i8
define <16 x i8> @test_masked_z_i8_to_16_mask0(i8 %s, <16 x i8> %mask) {
; CHECK-LABEL: test_masked_z_i8_to_16_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k1
; CHECK-NEXT: vpbroadcastb %edi, %xmm0 {%k1} {z}
@@ -39,7 +39,7 @@ define <16 x i8> @test_masked_z_i8_to_16_mask0(i8 %s, <16 x i8> %mask) {
}
define <16 x i8> @test_masked_i8_to_16_mask1(i8 %s, <16 x i8> %default, <16 x i8> %mask) {
; CHECK-LABEL: test_masked_i8_to_16_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %xmm2, %xmm1, %k1
; CHECK-NEXT: vpbroadcastb %edi, %xmm0 {%k1}
@@ -53,7 +53,7 @@ define <16 x i8> @test_masked_i8_to_16_mask1(i8 %s, <16 x i8> %default, <16 x i8
define <16 x i8> @test_masked_z_i8_to_16_mask1(i8 %s, <16 x i8> %mask) {
; CHECK-LABEL: test_masked_z_i8_to_16_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k1
; CHECK-NEXT: vpbroadcastb %edi, %xmm0 {%k1} {z}
@@ -66,7 +66,7 @@ define <16 x i8> @test_masked_z_i8_to_16_mask1(i8 %s, <16 x i8> %mask) {
}
define <16 x i8> @test_masked_i8_to_16_mask2(i8 %s, <16 x i8> %default, <16 x i8> %mask) {
; CHECK-LABEL: test_masked_i8_to_16_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %xmm2, %xmm1, %k1
; CHECK-NEXT: vpbroadcastb %edi, %xmm0 {%k1}
@@ -80,7 +80,7 @@ define <16 x i8> @test_masked_i8_to_16_mask2(i8 %s, <16 x i8> %default, <16 x i8
define <16 x i8> @test_masked_z_i8_to_16_mask2(i8 %s, <16 x i8> %mask) {
; CHECK-LABEL: test_masked_z_i8_to_16_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k1
; CHECK-NEXT: vpbroadcastb %edi, %xmm0 {%k1} {z}
@@ -93,7 +93,7 @@ define <16 x i8> @test_masked_z_i8_to_16_mask2(i8 %s, <16 x i8> %mask) {
}
define <16 x i8> @test_masked_i8_to_16_mask3(i8 %s, <16 x i8> %default, <16 x i8> %mask) {
; CHECK-LABEL: test_masked_i8_to_16_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %xmm2, %xmm1, %k1
; CHECK-NEXT: vpbroadcastb %edi, %xmm0 {%k1}
@@ -107,7 +107,7 @@ define <16 x i8> @test_masked_i8_to_16_mask3(i8 %s, <16 x i8> %default, <16 x i8
define <16 x i8> @test_masked_z_i8_to_16_mask3(i8 %s, <16 x i8> %mask) {
; CHECK-LABEL: test_masked_z_i8_to_16_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k1
; CHECK-NEXT: vpbroadcastb %edi, %xmm0 {%k1} {z}
@@ -120,7 +120,7 @@ define <16 x i8> @test_masked_z_i8_to_16_mask3(i8 %s, <16 x i8> %mask) {
}
define <32 x i8> @test_i8_to_32(i8 %s) {
; CHECK-LABEL: test_i8_to_32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastb %edi, %ymm0
; CHECK-NEXT: retq
%vec = insertelement <2 x i8> undef, i8 %s, i32 0
@@ -129,7 +129,7 @@ define <32 x i8> @test_i8_to_32(i8 %s) {
}
define <32 x i8> @test_masked_i8_to_32_mask0(i8 %s, <32 x i8> %default, <32 x i8> %mask) {
; CHECK-LABEL: test_masked_i8_to_32_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %ymm2, %ymm1, %k1
; CHECK-NEXT: vpbroadcastb %edi, %ymm0 {%k1}
@@ -143,7 +143,7 @@ define <32 x i8> @test_masked_i8_to_32_mask0(i8 %s, <32 x i8> %default, <32 x i8
define <32 x i8> @test_masked_z_i8_to_32_mask0(i8 %s, <32 x i8> %mask) {
; CHECK-LABEL: test_masked_z_i8_to_32_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqb %ymm1, %ymm0, %k1
; CHECK-NEXT: vpbroadcastb %edi, %ymm0 {%k1} {z}
@@ -156,7 +156,7 @@ define <32 x i8> @test_masked_z_i8_to_32_mask0(i8 %s, <32 x i8> %mask) {
}
define <32 x i8> @test_masked_i8_to_32_mask1(i8 %s, <32 x i8> %default, <32 x i8> %mask) {
; CHECK-LABEL: test_masked_i8_to_32_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %ymm2, %ymm1, %k1
; CHECK-NEXT: vpbroadcastb %edi, %ymm0 {%k1}
@@ -170,7 +170,7 @@ define <32 x i8> @test_masked_i8_to_32_mask1(i8 %s, <32 x i8> %default, <32 x i8
define <32 x i8> @test_masked_z_i8_to_32_mask1(i8 %s, <32 x i8> %mask) {
; CHECK-LABEL: test_masked_z_i8_to_32_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqb %ymm1, %ymm0, %k1
; CHECK-NEXT: vpbroadcastb %edi, %ymm0 {%k1} {z}
@@ -183,7 +183,7 @@ define <32 x i8> @test_masked_z_i8_to_32_mask1(i8 %s, <32 x i8> %mask) {
}
define <32 x i8> @test_masked_i8_to_32_mask2(i8 %s, <32 x i8> %default, <32 x i8> %mask) {
; CHECK-LABEL: test_masked_i8_to_32_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %ymm2, %ymm1, %k1
; CHECK-NEXT: vpbroadcastb %edi, %ymm0 {%k1}
@@ -197,7 +197,7 @@ define <32 x i8> @test_masked_i8_to_32_mask2(i8 %s, <32 x i8> %default, <32 x i8
define <32 x i8> @test_masked_z_i8_to_32_mask2(i8 %s, <32 x i8> %mask) {
; CHECK-LABEL: test_masked_z_i8_to_32_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqb %ymm1, %ymm0, %k1
; CHECK-NEXT: vpbroadcastb %edi, %ymm0 {%k1} {z}
@@ -210,7 +210,7 @@ define <32 x i8> @test_masked_z_i8_to_32_mask2(i8 %s, <32 x i8> %mask) {
}
define <32 x i8> @test_masked_i8_to_32_mask3(i8 %s, <32 x i8> %default, <32 x i8> %mask) {
; CHECK-LABEL: test_masked_i8_to_32_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %ymm2, %ymm1, %k1
; CHECK-NEXT: vpbroadcastb %edi, %ymm0 {%k1}
@@ -224,7 +224,7 @@ define <32 x i8> @test_masked_i8_to_32_mask3(i8 %s, <32 x i8> %default, <32 x i8
define <32 x i8> @test_masked_z_i8_to_32_mask3(i8 %s, <32 x i8> %mask) {
; CHECK-LABEL: test_masked_z_i8_to_32_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqb %ymm1, %ymm0, %k1
; CHECK-NEXT: vpbroadcastb %edi, %ymm0 {%k1} {z}
@@ -237,7 +237,7 @@ define <32 x i8> @test_masked_z_i8_to_32_mask3(i8 %s, <32 x i8> %mask) {
}
define <64 x i8> @test_i8_to_64(i8 %s) {
; CHECK-LABEL: test_i8_to_64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastb %edi, %zmm0
; CHECK-NEXT: retq
%vec = insertelement <2 x i8> undef, i8 %s, i32 0
@@ -246,7 +246,7 @@ define <64 x i8> @test_i8_to_64(i8 %s) {
}
define <64 x i8> @test_masked_i8_to_64_mask0(i8 %s, <64 x i8> %default, <64 x i8> %mask) {
; CHECK-LABEL: test_masked_i8_to_64_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %zmm2, %zmm1, %k1
; CHECK-NEXT: vpbroadcastb %edi, %zmm0 {%k1}
@@ -260,7 +260,7 @@ define <64 x i8> @test_masked_i8_to_64_mask0(i8 %s, <64 x i8> %default, <64 x i8
define <64 x i8> @test_masked_z_i8_to_64_mask0(i8 %s, <64 x i8> %mask) {
; CHECK-LABEL: test_masked_z_i8_to_64_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqb %zmm1, %zmm0, %k1
; CHECK-NEXT: vpbroadcastb %edi, %zmm0 {%k1} {z}
@@ -273,7 +273,7 @@ define <64 x i8> @test_masked_z_i8_to_64_mask0(i8 %s, <64 x i8> %mask) {
}
define <64 x i8> @test_masked_i8_to_64_mask1(i8 %s, <64 x i8> %default, <64 x i8> %mask) {
; CHECK-LABEL: test_masked_i8_to_64_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %zmm2, %zmm1, %k1
; CHECK-NEXT: vpbroadcastb %edi, %zmm0 {%k1}
@@ -287,7 +287,7 @@ define <64 x i8> @test_masked_i8_to_64_mask1(i8 %s, <64 x i8> %default, <64 x i8
define <64 x i8> @test_masked_z_i8_to_64_mask1(i8 %s, <64 x i8> %mask) {
; CHECK-LABEL: test_masked_z_i8_to_64_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqb %zmm1, %zmm0, %k1
; CHECK-NEXT: vpbroadcastb %edi, %zmm0 {%k1} {z}
@@ -300,7 +300,7 @@ define <64 x i8> @test_masked_z_i8_to_64_mask1(i8 %s, <64 x i8> %mask) {
}
define <64 x i8> @test_masked_i8_to_64_mask2(i8 %s, <64 x i8> %default, <64 x i8> %mask) {
; CHECK-LABEL: test_masked_i8_to_64_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %zmm2, %zmm1, %k1
; CHECK-NEXT: vpbroadcastb %edi, %zmm0 {%k1}
@@ -314,7 +314,7 @@ define <64 x i8> @test_masked_i8_to_64_mask2(i8 %s, <64 x i8> %default, <64 x i8
define <64 x i8> @test_masked_z_i8_to_64_mask2(i8 %s, <64 x i8> %mask) {
; CHECK-LABEL: test_masked_z_i8_to_64_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqb %zmm1, %zmm0, %k1
; CHECK-NEXT: vpbroadcastb %edi, %zmm0 {%k1} {z}
@@ -327,7 +327,7 @@ define <64 x i8> @test_masked_z_i8_to_64_mask2(i8 %s, <64 x i8> %mask) {
}
define <64 x i8> @test_masked_i8_to_64_mask3(i8 %s, <64 x i8> %default, <64 x i8> %mask) {
; CHECK-LABEL: test_masked_i8_to_64_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %zmm2, %zmm1, %k1
; CHECK-NEXT: vpbroadcastb %edi, %zmm0 {%k1}
@@ -341,7 +341,7 @@ define <64 x i8> @test_masked_i8_to_64_mask3(i8 %s, <64 x i8> %default, <64 x i8
define <64 x i8> @test_masked_z_i8_to_64_mask3(i8 %s, <64 x i8> %mask) {
; CHECK-LABEL: test_masked_z_i8_to_64_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqb %zmm1, %zmm0, %k1
; CHECK-NEXT: vpbroadcastb %edi, %zmm0 {%k1} {z}
@@ -354,7 +354,7 @@ define <64 x i8> @test_masked_z_i8_to_64_mask3(i8 %s, <64 x i8> %mask) {
}
define <8 x i16> @test_i16_to_8(i16 %s) {
; CHECK-LABEL: test_i16_to_8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastw %edi, %xmm0
; CHECK-NEXT: retq
%vec = insertelement <2 x i16> undef, i16 %s, i32 0
@@ -363,7 +363,7 @@ define <8 x i16> @test_i16_to_8(i16 %s) {
}
define <8 x i16> @test_masked_i16_to_8_mask0(i16 %s, <8 x i16> %default, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_i16_to_8_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %xmm2, %xmm1, %k1
; CHECK-NEXT: vpbroadcastw %edi, %xmm0 {%k1}
@@ -377,7 +377,7 @@ define <8 x i16> @test_masked_i16_to_8_mask0(i16 %s, <8 x i16> %default, <8 x i1
define <8 x i16> @test_masked_z_i16_to_8_mask0(i16 %s, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_i16_to_8_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k1
; CHECK-NEXT: vpbroadcastw %edi, %xmm0 {%k1} {z}
@@ -390,7 +390,7 @@ define <8 x i16> @test_masked_z_i16_to_8_mask0(i16 %s, <8 x i16> %mask) {
}
define <8 x i16> @test_masked_i16_to_8_mask1(i16 %s, <8 x i16> %default, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_i16_to_8_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %xmm2, %xmm1, %k1
; CHECK-NEXT: vpbroadcastw %edi, %xmm0 {%k1}
@@ -404,7 +404,7 @@ define <8 x i16> @test_masked_i16_to_8_mask1(i16 %s, <8 x i16> %default, <8 x i1
define <8 x i16> @test_masked_z_i16_to_8_mask1(i16 %s, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_i16_to_8_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k1
; CHECK-NEXT: vpbroadcastw %edi, %xmm0 {%k1} {z}
@@ -417,7 +417,7 @@ define <8 x i16> @test_masked_z_i16_to_8_mask1(i16 %s, <8 x i16> %mask) {
}
define <8 x i16> @test_masked_i16_to_8_mask2(i16 %s, <8 x i16> %default, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_i16_to_8_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %xmm2, %xmm1, %k1
; CHECK-NEXT: vpbroadcastw %edi, %xmm0 {%k1}
@@ -431,7 +431,7 @@ define <8 x i16> @test_masked_i16_to_8_mask2(i16 %s, <8 x i16> %default, <8 x i1
define <8 x i16> @test_masked_z_i16_to_8_mask2(i16 %s, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_i16_to_8_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k1
; CHECK-NEXT: vpbroadcastw %edi, %xmm0 {%k1} {z}
@@ -444,7 +444,7 @@ define <8 x i16> @test_masked_z_i16_to_8_mask2(i16 %s, <8 x i16> %mask) {
}
define <8 x i16> @test_masked_i16_to_8_mask3(i16 %s, <8 x i16> %default, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_i16_to_8_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %xmm2, %xmm1, %k1
; CHECK-NEXT: vpbroadcastw %edi, %xmm0 {%k1}
@@ -458,7 +458,7 @@ define <8 x i16> @test_masked_i16_to_8_mask3(i16 %s, <8 x i16> %default, <8 x i1
define <8 x i16> @test_masked_z_i16_to_8_mask3(i16 %s, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_i16_to_8_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k1
; CHECK-NEXT: vpbroadcastw %edi, %xmm0 {%k1} {z}
@@ -471,7 +471,7 @@ define <8 x i16> @test_masked_z_i16_to_8_mask3(i16 %s, <8 x i16> %mask) {
}
define <16 x i16> @test_i16_to_16(i16 %s) {
; CHECK-LABEL: test_i16_to_16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastw %edi, %ymm0
; CHECK-NEXT: retq
%vec = insertelement <2 x i16> undef, i16 %s, i32 0
@@ -480,7 +480,7 @@ define <16 x i16> @test_i16_to_16(i16 %s) {
}
define <16 x i16> @test_masked_i16_to_16_mask0(i16 %s, <16 x i16> %default, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_i16_to_16_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %ymm2, %ymm1, %k1
; CHECK-NEXT: vpbroadcastw %edi, %ymm0 {%k1}
@@ -494,7 +494,7 @@ define <16 x i16> @test_masked_i16_to_16_mask0(i16 %s, <16 x i16> %default, <16
define <16 x i16> @test_masked_z_i16_to_16_mask0(i16 %s, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_z_i16_to_16_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k1
; CHECK-NEXT: vpbroadcastw %edi, %ymm0 {%k1} {z}
@@ -507,7 +507,7 @@ define <16 x i16> @test_masked_z_i16_to_16_mask0(i16 %s, <16 x i16> %mask) {
}
define <16 x i16> @test_masked_i16_to_16_mask1(i16 %s, <16 x i16> %default, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_i16_to_16_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %ymm2, %ymm1, %k1
; CHECK-NEXT: vpbroadcastw %edi, %ymm0 {%k1}
@@ -521,7 +521,7 @@ define <16 x i16> @test_masked_i16_to_16_mask1(i16 %s, <16 x i16> %default, <16
define <16 x i16> @test_masked_z_i16_to_16_mask1(i16 %s, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_z_i16_to_16_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k1
; CHECK-NEXT: vpbroadcastw %edi, %ymm0 {%k1} {z}
@@ -534,7 +534,7 @@ define <16 x i16> @test_masked_z_i16_to_16_mask1(i16 %s, <16 x i16> %mask) {
}
define <16 x i16> @test_masked_i16_to_16_mask2(i16 %s, <16 x i16> %default, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_i16_to_16_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %ymm2, %ymm1, %k1
; CHECK-NEXT: vpbroadcastw %edi, %ymm0 {%k1}
@@ -548,7 +548,7 @@ define <16 x i16> @test_masked_i16_to_16_mask2(i16 %s, <16 x i16> %default, <16
define <16 x i16> @test_masked_z_i16_to_16_mask2(i16 %s, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_z_i16_to_16_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k1
; CHECK-NEXT: vpbroadcastw %edi, %ymm0 {%k1} {z}
@@ -561,7 +561,7 @@ define <16 x i16> @test_masked_z_i16_to_16_mask2(i16 %s, <16 x i16> %mask) {
}
define <16 x i16> @test_masked_i16_to_16_mask3(i16 %s, <16 x i16> %default, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_i16_to_16_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %ymm2, %ymm1, %k1
; CHECK-NEXT: vpbroadcastw %edi, %ymm0 {%k1}
@@ -575,7 +575,7 @@ define <16 x i16> @test_masked_i16_to_16_mask3(i16 %s, <16 x i16> %default, <16
define <16 x i16> @test_masked_z_i16_to_16_mask3(i16 %s, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_z_i16_to_16_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k1
; CHECK-NEXT: vpbroadcastw %edi, %ymm0 {%k1} {z}
@@ -588,7 +588,7 @@ define <16 x i16> @test_masked_z_i16_to_16_mask3(i16 %s, <16 x i16> %mask) {
}
define <32 x i16> @test_i16_to_32(i16 %s) {
; CHECK-LABEL: test_i16_to_32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastw %edi, %zmm0
; CHECK-NEXT: retq
%vec = insertelement <2 x i16> undef, i16 %s, i32 0
@@ -597,7 +597,7 @@ define <32 x i16> @test_i16_to_32(i16 %s) {
}
define <32 x i16> @test_masked_i16_to_32_mask0(i16 %s, <32 x i16> %default, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_i16_to_32_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %zmm2, %zmm1, %k1
; CHECK-NEXT: vpbroadcastw %edi, %zmm0 {%k1}
@@ -611,7 +611,7 @@ define <32 x i16> @test_masked_i16_to_32_mask0(i16 %s, <32 x i16> %default, <32
define <32 x i16> @test_masked_z_i16_to_32_mask0(i16 %s, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_z_i16_to_32_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %zmm1, %zmm0, %k1
; CHECK-NEXT: vpbroadcastw %edi, %zmm0 {%k1} {z}
@@ -624,7 +624,7 @@ define <32 x i16> @test_masked_z_i16_to_32_mask0(i16 %s, <32 x i16> %mask) {
}
define <32 x i16> @test_masked_i16_to_32_mask1(i16 %s, <32 x i16> %default, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_i16_to_32_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %zmm2, %zmm1, %k1
; CHECK-NEXT: vpbroadcastw %edi, %zmm0 {%k1}
@@ -638,7 +638,7 @@ define <32 x i16> @test_masked_i16_to_32_mask1(i16 %s, <32 x i16> %default, <32
define <32 x i16> @test_masked_z_i16_to_32_mask1(i16 %s, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_z_i16_to_32_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %zmm1, %zmm0, %k1
; CHECK-NEXT: vpbroadcastw %edi, %zmm0 {%k1} {z}
@@ -651,7 +651,7 @@ define <32 x i16> @test_masked_z_i16_to_32_mask1(i16 %s, <32 x i16> %mask) {
}
define <32 x i16> @test_masked_i16_to_32_mask2(i16 %s, <32 x i16> %default, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_i16_to_32_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %zmm2, %zmm1, %k1
; CHECK-NEXT: vpbroadcastw %edi, %zmm0 {%k1}
@@ -665,7 +665,7 @@ define <32 x i16> @test_masked_i16_to_32_mask2(i16 %s, <32 x i16> %default, <32
define <32 x i16> @test_masked_z_i16_to_32_mask2(i16 %s, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_z_i16_to_32_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %zmm1, %zmm0, %k1
; CHECK-NEXT: vpbroadcastw %edi, %zmm0 {%k1} {z}
@@ -678,7 +678,7 @@ define <32 x i16> @test_masked_z_i16_to_32_mask2(i16 %s, <32 x i16> %mask) {
}
define <32 x i16> @test_masked_i16_to_32_mask3(i16 %s, <32 x i16> %default, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_i16_to_32_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %zmm2, %zmm1, %k1
; CHECK-NEXT: vpbroadcastw %edi, %zmm0 {%k1}
@@ -692,7 +692,7 @@ define <32 x i16> @test_masked_i16_to_32_mask3(i16 %s, <32 x i16> %default, <32
define <32 x i16> @test_masked_z_i16_to_32_mask3(i16 %s, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_z_i16_to_32_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %zmm1, %zmm0, %k1
; CHECK-NEXT: vpbroadcastw %edi, %zmm0 {%k1} {z}
@@ -705,7 +705,7 @@ define <32 x i16> @test_masked_z_i16_to_32_mask3(i16 %s, <32 x i16> %mask) {
}
define <4 x i32> @test_i32_to_4(i32 %s) {
; CHECK-LABEL: test_i32_to_4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastd %edi, %xmm0
; CHECK-NEXT: retq
%vec = insertelement <2 x i32> undef, i32 %s, i32 0
@@ -714,7 +714,7 @@ define <4 x i32> @test_i32_to_4(i32 %s) {
}
define <4 x i32> @test_masked_i32_to_4_mask0(i32 %s, <4 x i32> %default, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_i32_to_4_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %xmm2, %xmm1, %k1
; CHECK-NEXT: vpbroadcastd %edi, %xmm0 {%k1}
@@ -728,7 +728,7 @@ define <4 x i32> @test_masked_i32_to_4_mask0(i32 %s, <4 x i32> %default, <4 x i3
define <4 x i32> @test_masked_z_i32_to_4_mask0(i32 %s, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_i32_to_4_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k1
; CHECK-NEXT: vpbroadcastd %edi, %xmm0 {%k1} {z}
@@ -741,7 +741,7 @@ define <4 x i32> @test_masked_z_i32_to_4_mask0(i32 %s, <4 x i32> %mask) {
}
define <4 x i32> @test_masked_i32_to_4_mask1(i32 %s, <4 x i32> %default, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_i32_to_4_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %xmm2, %xmm1, %k1
; CHECK-NEXT: vpbroadcastd %edi, %xmm0 {%k1}
@@ -755,7 +755,7 @@ define <4 x i32> @test_masked_i32_to_4_mask1(i32 %s, <4 x i32> %default, <4 x i3
define <4 x i32> @test_masked_z_i32_to_4_mask1(i32 %s, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_i32_to_4_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k1
; CHECK-NEXT: vpbroadcastd %edi, %xmm0 {%k1} {z}
@@ -768,7 +768,7 @@ define <4 x i32> @test_masked_z_i32_to_4_mask1(i32 %s, <4 x i32> %mask) {
}
define <4 x i32> @test_masked_i32_to_4_mask2(i32 %s, <4 x i32> %default, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_i32_to_4_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %xmm2, %xmm1, %k1
; CHECK-NEXT: vpbroadcastd %edi, %xmm0 {%k1}
@@ -782,7 +782,7 @@ define <4 x i32> @test_masked_i32_to_4_mask2(i32 %s, <4 x i32> %default, <4 x i3
define <4 x i32> @test_masked_z_i32_to_4_mask2(i32 %s, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_i32_to_4_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k1
; CHECK-NEXT: vpbroadcastd %edi, %xmm0 {%k1} {z}
@@ -795,7 +795,7 @@ define <4 x i32> @test_masked_z_i32_to_4_mask2(i32 %s, <4 x i32> %mask) {
}
define <4 x i32> @test_masked_i32_to_4_mask3(i32 %s, <4 x i32> %default, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_i32_to_4_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %xmm2, %xmm1, %k1
; CHECK-NEXT: vpbroadcastd %edi, %xmm0 {%k1}
@@ -809,7 +809,7 @@ define <4 x i32> @test_masked_i32_to_4_mask3(i32 %s, <4 x i32> %default, <4 x i3
define <4 x i32> @test_masked_z_i32_to_4_mask3(i32 %s, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_i32_to_4_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k1
; CHECK-NEXT: vpbroadcastd %edi, %xmm0 {%k1} {z}
@@ -822,7 +822,7 @@ define <4 x i32> @test_masked_z_i32_to_4_mask3(i32 %s, <4 x i32> %mask) {
}
define <8 x i32> @test_i32_to_8(i32 %s) {
; CHECK-LABEL: test_i32_to_8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastd %edi, %ymm0
; CHECK-NEXT: retq
%vec = insertelement <2 x i32> undef, i32 %s, i32 0
@@ -831,7 +831,7 @@ define <8 x i32> @test_i32_to_8(i32 %s) {
}
define <8 x i32> @test_masked_i32_to_8_mask0(i32 %s, <8 x i32> %default, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_i32_to_8_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; CHECK-NEXT: vpbroadcastd %edi, %ymm0 {%k1}
@@ -845,7 +845,7 @@ define <8 x i32> @test_masked_i32_to_8_mask0(i32 %s, <8 x i32> %default, <8 x i3
define <8 x i32> @test_masked_z_i32_to_8_mask0(i32 %s, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_i32_to_8_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k1
; CHECK-NEXT: vpbroadcastd %edi, %ymm0 {%k1} {z}
@@ -858,7 +858,7 @@ define <8 x i32> @test_masked_z_i32_to_8_mask0(i32 %s, <8 x i32> %mask) {
}
define <8 x i32> @test_masked_i32_to_8_mask1(i32 %s, <8 x i32> %default, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_i32_to_8_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; CHECK-NEXT: vpbroadcastd %edi, %ymm0 {%k1}
@@ -872,7 +872,7 @@ define <8 x i32> @test_masked_i32_to_8_mask1(i32 %s, <8 x i32> %default, <8 x i3
define <8 x i32> @test_masked_z_i32_to_8_mask1(i32 %s, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_i32_to_8_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k1
; CHECK-NEXT: vpbroadcastd %edi, %ymm0 {%k1} {z}
@@ -885,7 +885,7 @@ define <8 x i32> @test_masked_z_i32_to_8_mask1(i32 %s, <8 x i32> %mask) {
}
define <8 x i32> @test_masked_i32_to_8_mask2(i32 %s, <8 x i32> %default, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_i32_to_8_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; CHECK-NEXT: vpbroadcastd %edi, %ymm0 {%k1}
@@ -899,7 +899,7 @@ define <8 x i32> @test_masked_i32_to_8_mask2(i32 %s, <8 x i32> %default, <8 x i3
define <8 x i32> @test_masked_z_i32_to_8_mask2(i32 %s, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_i32_to_8_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k1
; CHECK-NEXT: vpbroadcastd %edi, %ymm0 {%k1} {z}
@@ -912,7 +912,7 @@ define <8 x i32> @test_masked_z_i32_to_8_mask2(i32 %s, <8 x i32> %mask) {
}
define <8 x i32> @test_masked_i32_to_8_mask3(i32 %s, <8 x i32> %default, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_i32_to_8_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; CHECK-NEXT: vpbroadcastd %edi, %ymm0 {%k1}
@@ -926,7 +926,7 @@ define <8 x i32> @test_masked_i32_to_8_mask3(i32 %s, <8 x i32> %default, <8 x i3
define <8 x i32> @test_masked_z_i32_to_8_mask3(i32 %s, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_i32_to_8_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k1
; CHECK-NEXT: vpbroadcastd %edi, %ymm0 {%k1} {z}
@@ -939,7 +939,7 @@ define <8 x i32> @test_masked_z_i32_to_8_mask3(i32 %s, <8 x i32> %mask) {
}
define <16 x i32> @test_i32_to_16(i32 %s) {
; CHECK-LABEL: test_i32_to_16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastd %edi, %zmm0
; CHECK-NEXT: retq
%vec = insertelement <2 x i32> undef, i32 %s, i32 0
@@ -948,7 +948,7 @@ define <16 x i32> @test_i32_to_16(i32 %s) {
}
define <16 x i32> @test_masked_i32_to_16_mask0(i32 %s, <16 x i32> %default, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_i32_to_16_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpbroadcastd %edi, %zmm0 {%k1}
@@ -962,7 +962,7 @@ define <16 x i32> @test_masked_i32_to_16_mask0(i32 %s, <16 x i32> %default, <16
define <16 x i32> @test_masked_z_i32_to_16_mask0(i32 %s, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_z_i32_to_16_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
; CHECK-NEXT: vpbroadcastd %edi, %zmm0 {%k1} {z}
@@ -975,7 +975,7 @@ define <16 x i32> @test_masked_z_i32_to_16_mask0(i32 %s, <16 x i32> %mask) {
}
define <16 x i32> @test_masked_i32_to_16_mask1(i32 %s, <16 x i32> %default, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_i32_to_16_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpbroadcastd %edi, %zmm0 {%k1}
@@ -989,7 +989,7 @@ define <16 x i32> @test_masked_i32_to_16_mask1(i32 %s, <16 x i32> %default, <16
define <16 x i32> @test_masked_z_i32_to_16_mask1(i32 %s, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_z_i32_to_16_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
; CHECK-NEXT: vpbroadcastd %edi, %zmm0 {%k1} {z}
@@ -1002,7 +1002,7 @@ define <16 x i32> @test_masked_z_i32_to_16_mask1(i32 %s, <16 x i32> %mask) {
}
define <16 x i32> @test_masked_i32_to_16_mask2(i32 %s, <16 x i32> %default, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_i32_to_16_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpbroadcastd %edi, %zmm0 {%k1}
@@ -1016,7 +1016,7 @@ define <16 x i32> @test_masked_i32_to_16_mask2(i32 %s, <16 x i32> %default, <16
define <16 x i32> @test_masked_z_i32_to_16_mask2(i32 %s, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_z_i32_to_16_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
; CHECK-NEXT: vpbroadcastd %edi, %zmm0 {%k1} {z}
@@ -1029,7 +1029,7 @@ define <16 x i32> @test_masked_z_i32_to_16_mask2(i32 %s, <16 x i32> %mask) {
}
define <16 x i32> @test_masked_i32_to_16_mask3(i32 %s, <16 x i32> %default, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_i32_to_16_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpbroadcastd %edi, %zmm0 {%k1}
@@ -1043,7 +1043,7 @@ define <16 x i32> @test_masked_i32_to_16_mask3(i32 %s, <16 x i32> %default, <16
define <16 x i32> @test_masked_z_i32_to_16_mask3(i32 %s, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_z_i32_to_16_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
; CHECK-NEXT: vpbroadcastd %edi, %zmm0 {%k1} {z}
@@ -1056,7 +1056,7 @@ define <16 x i32> @test_masked_z_i32_to_16_mask3(i32 %s, <16 x i32> %mask) {
}
define <2 x i64> @test_i64_to_2(i64 %s) {
; CHECK-LABEL: test_i64_to_2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastq %rdi, %xmm0
; CHECK-NEXT: retq
%vec = insertelement <2 x i64> undef, i64 %s, i32 0
@@ -1065,7 +1065,7 @@ define <2 x i64> @test_i64_to_2(i64 %s) {
}
define <2 x i64> @test_masked_i64_to_2_mask0(i64 %s, <2 x i64> %default, <2 x i64> %mask) {
; CHECK-LABEL: test_masked_i64_to_2_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %xmm2, %xmm1, %k1
; CHECK-NEXT: vpbroadcastq %rdi, %xmm0 {%k1}
@@ -1079,7 +1079,7 @@ define <2 x i64> @test_masked_i64_to_2_mask0(i64 %s, <2 x i64> %default, <2 x i6
define <2 x i64> @test_masked_z_i64_to_2_mask0(i64 %s, <2 x i64> %mask) {
; CHECK-LABEL: test_masked_z_i64_to_2_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k1
; CHECK-NEXT: vpbroadcastq %rdi, %xmm0 {%k1} {z}
@@ -1092,7 +1092,7 @@ define <2 x i64> @test_masked_z_i64_to_2_mask0(i64 %s, <2 x i64> %mask) {
}
define <2 x i64> @test_masked_i64_to_2_mask1(i64 %s, <2 x i64> %default, <2 x i64> %mask) {
; CHECK-LABEL: test_masked_i64_to_2_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %xmm2, %xmm1, %k1
; CHECK-NEXT: vpbroadcastq %rdi, %xmm0 {%k1}
@@ -1106,7 +1106,7 @@ define <2 x i64> @test_masked_i64_to_2_mask1(i64 %s, <2 x i64> %default, <2 x i6
define <2 x i64> @test_masked_z_i64_to_2_mask1(i64 %s, <2 x i64> %mask) {
; CHECK-LABEL: test_masked_z_i64_to_2_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k1
; CHECK-NEXT: vpbroadcastq %rdi, %xmm0 {%k1} {z}
@@ -1119,7 +1119,7 @@ define <2 x i64> @test_masked_z_i64_to_2_mask1(i64 %s, <2 x i64> %mask) {
}
define <4 x i64> @test_i64_to_4(i64 %s) {
; CHECK-LABEL: test_i64_to_4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastq %rdi, %ymm0
; CHECK-NEXT: retq
%vec = insertelement <2 x i64> undef, i64 %s, i32 0
@@ -1128,7 +1128,7 @@ define <4 x i64> @test_i64_to_4(i64 %s) {
}
define <4 x i64> @test_masked_i64_to_4_mask0(i64 %s, <4 x i64> %default, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_i64_to_4_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; CHECK-NEXT: vpbroadcastq %rdi, %ymm0 {%k1}
@@ -1142,7 +1142,7 @@ define <4 x i64> @test_masked_i64_to_4_mask0(i64 %s, <4 x i64> %default, <4 x i6
define <4 x i64> @test_masked_z_i64_to_4_mask0(i64 %s, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_z_i64_to_4_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k1
; CHECK-NEXT: vpbroadcastq %rdi, %ymm0 {%k1} {z}
@@ -1155,7 +1155,7 @@ define <4 x i64> @test_masked_z_i64_to_4_mask0(i64 %s, <4 x i64> %mask) {
}
define <4 x i64> @test_masked_i64_to_4_mask1(i64 %s, <4 x i64> %default, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_i64_to_4_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; CHECK-NEXT: vpbroadcastq %rdi, %ymm0 {%k1}
@@ -1169,7 +1169,7 @@ define <4 x i64> @test_masked_i64_to_4_mask1(i64 %s, <4 x i64> %default, <4 x i6
define <4 x i64> @test_masked_z_i64_to_4_mask1(i64 %s, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_z_i64_to_4_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k1
; CHECK-NEXT: vpbroadcastq %rdi, %ymm0 {%k1} {z}
@@ -1182,7 +1182,7 @@ define <4 x i64> @test_masked_z_i64_to_4_mask1(i64 %s, <4 x i64> %mask) {
}
define <4 x i64> @test_masked_i64_to_4_mask2(i64 %s, <4 x i64> %default, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_i64_to_4_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; CHECK-NEXT: vpbroadcastq %rdi, %ymm0 {%k1}
@@ -1196,7 +1196,7 @@ define <4 x i64> @test_masked_i64_to_4_mask2(i64 %s, <4 x i64> %default, <4 x i6
define <4 x i64> @test_masked_z_i64_to_4_mask2(i64 %s, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_z_i64_to_4_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k1
; CHECK-NEXT: vpbroadcastq %rdi, %ymm0 {%k1} {z}
@@ -1209,7 +1209,7 @@ define <4 x i64> @test_masked_z_i64_to_4_mask2(i64 %s, <4 x i64> %mask) {
}
define <4 x i64> @test_masked_i64_to_4_mask3(i64 %s, <4 x i64> %default, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_i64_to_4_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; CHECK-NEXT: vpbroadcastq %rdi, %ymm0 {%k1}
@@ -1223,7 +1223,7 @@ define <4 x i64> @test_masked_i64_to_4_mask3(i64 %s, <4 x i64> %default, <4 x i6
define <4 x i64> @test_masked_z_i64_to_4_mask3(i64 %s, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_z_i64_to_4_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k1
; CHECK-NEXT: vpbroadcastq %rdi, %ymm0 {%k1} {z}
@@ -1236,7 +1236,7 @@ define <4 x i64> @test_masked_z_i64_to_4_mask3(i64 %s, <4 x i64> %mask) {
}
define <8 x i64> @test_i64_to_8(i64 %s) {
; CHECK-LABEL: test_i64_to_8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastq %rdi, %zmm0
; CHECK-NEXT: retq
%vec = insertelement <2 x i64> undef, i64 %s, i32 0
@@ -1245,7 +1245,7 @@ define <8 x i64> @test_i64_to_8(i64 %s) {
}
define <8 x i64> @test_masked_i64_to_8_mask0(i64 %s, <8 x i64> %default, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_i64_to_8_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; CHECK-NEXT: vpbroadcastq %rdi, %zmm0 {%k1}
@@ -1259,7 +1259,7 @@ define <8 x i64> @test_masked_i64_to_8_mask0(i64 %s, <8 x i64> %default, <8 x i6
define <8 x i64> @test_masked_z_i64_to_8_mask0(i64 %s, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_z_i64_to_8_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k1
; CHECK-NEXT: vpbroadcastq %rdi, %zmm0 {%k1} {z}
@@ -1272,7 +1272,7 @@ define <8 x i64> @test_masked_z_i64_to_8_mask0(i64 %s, <8 x i64> %mask) {
}
define <8 x i64> @test_masked_i64_to_8_mask1(i64 %s, <8 x i64> %default, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_i64_to_8_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; CHECK-NEXT: vpbroadcastq %rdi, %zmm0 {%k1}
@@ -1286,7 +1286,7 @@ define <8 x i64> @test_masked_i64_to_8_mask1(i64 %s, <8 x i64> %default, <8 x i6
define <8 x i64> @test_masked_z_i64_to_8_mask1(i64 %s, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_z_i64_to_8_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k1
; CHECK-NEXT: vpbroadcastq %rdi, %zmm0 {%k1} {z}
@@ -1299,7 +1299,7 @@ define <8 x i64> @test_masked_z_i64_to_8_mask1(i64 %s, <8 x i64> %mask) {
}
define <8 x i64> @test_masked_i64_to_8_mask2(i64 %s, <8 x i64> %default, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_i64_to_8_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; CHECK-NEXT: vpbroadcastq %rdi, %zmm0 {%k1}
@@ -1313,7 +1313,7 @@ define <8 x i64> @test_masked_i64_to_8_mask2(i64 %s, <8 x i64> %default, <8 x i6
define <8 x i64> @test_masked_z_i64_to_8_mask2(i64 %s, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_z_i64_to_8_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k1
; CHECK-NEXT: vpbroadcastq %rdi, %zmm0 {%k1} {z}
@@ -1326,7 +1326,7 @@ define <8 x i64> @test_masked_z_i64_to_8_mask2(i64 %s, <8 x i64> %mask) {
}
define <8 x i64> @test_masked_i64_to_8_mask3(i64 %s, <8 x i64> %default, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_i64_to_8_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; CHECK-NEXT: vpbroadcastq %rdi, %zmm0 {%k1}
@@ -1340,7 +1340,7 @@ define <8 x i64> @test_masked_i64_to_8_mask3(i64 %s, <8 x i64> %default, <8 x i6
define <8 x i64> @test_masked_z_i64_to_8_mask3(i64 %s, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_z_i64_to_8_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k1
; CHECK-NEXT: vpbroadcastq %rdi, %zmm0 {%k1} {z}
@@ -1353,7 +1353,7 @@ define <8 x i64> @test_masked_z_i64_to_8_mask3(i64 %s, <8 x i64> %mask) {
}
define <16 x i8> @test_i8_to_16_mem(i8* %p) {
; CHECK-LABEL: test_i8_to_16_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastb (%rdi), %xmm0
; CHECK-NEXT: retq
%s = load i8, i8* %p
@@ -1363,7 +1363,7 @@ define <16 x i8> @test_i8_to_16_mem(i8* %p) {
}
define <16 x i8> @test_masked_i8_to_16_mem_mask0(i8* %p, <16 x i8> %default, <16 x i8> %mask) {
; CHECK-LABEL: test_masked_i8_to_16_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %xmm2, %xmm1, %k1
; CHECK-NEXT: vpbroadcastb (%rdi), %xmm0 {%k1}
@@ -1378,7 +1378,7 @@ define <16 x i8> @test_masked_i8_to_16_mem_mask0(i8* %p, <16 x i8> %default, <16
define <16 x i8> @test_masked_z_i8_to_16_mem_mask0(i8* %p, <16 x i8> %mask) {
; CHECK-LABEL: test_masked_z_i8_to_16_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k1
; CHECK-NEXT: vpbroadcastb (%rdi), %xmm0 {%k1} {z}
@@ -1392,7 +1392,7 @@ define <16 x i8> @test_masked_z_i8_to_16_mem_mask0(i8* %p, <16 x i8> %mask) {
}
define <16 x i8> @test_masked_i8_to_16_mem_mask1(i8* %p, <16 x i8> %default, <16 x i8> %mask) {
; CHECK-LABEL: test_masked_i8_to_16_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %xmm2, %xmm1, %k1
; CHECK-NEXT: vpbroadcastb (%rdi), %xmm0 {%k1}
@@ -1407,7 +1407,7 @@ define <16 x i8> @test_masked_i8_to_16_mem_mask1(i8* %p, <16 x i8> %default, <16
define <16 x i8> @test_masked_z_i8_to_16_mem_mask1(i8* %p, <16 x i8> %mask) {
; CHECK-LABEL: test_masked_z_i8_to_16_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k1
; CHECK-NEXT: vpbroadcastb (%rdi), %xmm0 {%k1} {z}
@@ -1421,7 +1421,7 @@ define <16 x i8> @test_masked_z_i8_to_16_mem_mask1(i8* %p, <16 x i8> %mask) {
}
define <16 x i8> @test_masked_i8_to_16_mem_mask2(i8* %p, <16 x i8> %default, <16 x i8> %mask) {
; CHECK-LABEL: test_masked_i8_to_16_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %xmm2, %xmm1, %k1
; CHECK-NEXT: vpbroadcastb (%rdi), %xmm0 {%k1}
@@ -1436,7 +1436,7 @@ define <16 x i8> @test_masked_i8_to_16_mem_mask2(i8* %p, <16 x i8> %default, <16
define <16 x i8> @test_masked_z_i8_to_16_mem_mask2(i8* %p, <16 x i8> %mask) {
; CHECK-LABEL: test_masked_z_i8_to_16_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k1
; CHECK-NEXT: vpbroadcastb (%rdi), %xmm0 {%k1} {z}
@@ -1450,7 +1450,7 @@ define <16 x i8> @test_masked_z_i8_to_16_mem_mask2(i8* %p, <16 x i8> %mask) {
}
define <16 x i8> @test_masked_i8_to_16_mem_mask3(i8* %p, <16 x i8> %default, <16 x i8> %mask) {
; CHECK-LABEL: test_masked_i8_to_16_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %xmm2, %xmm1, %k1
; CHECK-NEXT: vpbroadcastb (%rdi), %xmm0 {%k1}
@@ -1465,7 +1465,7 @@ define <16 x i8> @test_masked_i8_to_16_mem_mask3(i8* %p, <16 x i8> %default, <16
define <16 x i8> @test_masked_z_i8_to_16_mem_mask3(i8* %p, <16 x i8> %mask) {
; CHECK-LABEL: test_masked_z_i8_to_16_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k1
; CHECK-NEXT: vpbroadcastb (%rdi), %xmm0 {%k1} {z}
@@ -1479,7 +1479,7 @@ define <16 x i8> @test_masked_z_i8_to_16_mem_mask3(i8* %p, <16 x i8> %mask) {
}
define <32 x i8> @test_i8_to_32_mem(i8* %p) {
; CHECK-LABEL: test_i8_to_32_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastb (%rdi), %ymm0
; CHECK-NEXT: retq
%s = load i8, i8* %p
@@ -1489,7 +1489,7 @@ define <32 x i8> @test_i8_to_32_mem(i8* %p) {
}
define <32 x i8> @test_masked_i8_to_32_mem_mask0(i8* %p, <32 x i8> %default, <32 x i8> %mask) {
; CHECK-LABEL: test_masked_i8_to_32_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %ymm2, %ymm1, %k1
; CHECK-NEXT: vpbroadcastb (%rdi), %ymm0 {%k1}
@@ -1504,7 +1504,7 @@ define <32 x i8> @test_masked_i8_to_32_mem_mask0(i8* %p, <32 x i8> %default, <32
define <32 x i8> @test_masked_z_i8_to_32_mem_mask0(i8* %p, <32 x i8> %mask) {
; CHECK-LABEL: test_masked_z_i8_to_32_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqb %ymm1, %ymm0, %k1
; CHECK-NEXT: vpbroadcastb (%rdi), %ymm0 {%k1} {z}
@@ -1518,7 +1518,7 @@ define <32 x i8> @test_masked_z_i8_to_32_mem_mask0(i8* %p, <32 x i8> %mask) {
}
define <32 x i8> @test_masked_i8_to_32_mem_mask1(i8* %p, <32 x i8> %default, <32 x i8> %mask) {
; CHECK-LABEL: test_masked_i8_to_32_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %ymm2, %ymm1, %k1
; CHECK-NEXT: vpbroadcastb (%rdi), %ymm0 {%k1}
@@ -1533,7 +1533,7 @@ define <32 x i8> @test_masked_i8_to_32_mem_mask1(i8* %p, <32 x i8> %default, <32
define <32 x i8> @test_masked_z_i8_to_32_mem_mask1(i8* %p, <32 x i8> %mask) {
; CHECK-LABEL: test_masked_z_i8_to_32_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqb %ymm1, %ymm0, %k1
; CHECK-NEXT: vpbroadcastb (%rdi), %ymm0 {%k1} {z}
@@ -1547,7 +1547,7 @@ define <32 x i8> @test_masked_z_i8_to_32_mem_mask1(i8* %p, <32 x i8> %mask) {
}
define <32 x i8> @test_masked_i8_to_32_mem_mask2(i8* %p, <32 x i8> %default, <32 x i8> %mask) {
; CHECK-LABEL: test_masked_i8_to_32_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %ymm2, %ymm1, %k1
; CHECK-NEXT: vpbroadcastb (%rdi), %ymm0 {%k1}
@@ -1562,7 +1562,7 @@ define <32 x i8> @test_masked_i8_to_32_mem_mask2(i8* %p, <32 x i8> %default, <32
define <32 x i8> @test_masked_z_i8_to_32_mem_mask2(i8* %p, <32 x i8> %mask) {
; CHECK-LABEL: test_masked_z_i8_to_32_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqb %ymm1, %ymm0, %k1
; CHECK-NEXT: vpbroadcastb (%rdi), %ymm0 {%k1} {z}
@@ -1576,7 +1576,7 @@ define <32 x i8> @test_masked_z_i8_to_32_mem_mask2(i8* %p, <32 x i8> %mask) {
}
define <32 x i8> @test_masked_i8_to_32_mem_mask3(i8* %p, <32 x i8> %default, <32 x i8> %mask) {
; CHECK-LABEL: test_masked_i8_to_32_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %ymm2, %ymm1, %k1
; CHECK-NEXT: vpbroadcastb (%rdi), %ymm0 {%k1}
@@ -1591,7 +1591,7 @@ define <32 x i8> @test_masked_i8_to_32_mem_mask3(i8* %p, <32 x i8> %default, <32
define <32 x i8> @test_masked_z_i8_to_32_mem_mask3(i8* %p, <32 x i8> %mask) {
; CHECK-LABEL: test_masked_z_i8_to_32_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqb %ymm1, %ymm0, %k1
; CHECK-NEXT: vpbroadcastb (%rdi), %ymm0 {%k1} {z}
@@ -1605,7 +1605,7 @@ define <32 x i8> @test_masked_z_i8_to_32_mem_mask3(i8* %p, <32 x i8> %mask) {
}
define <64 x i8> @test_i8_to_64_mem(i8* %p) {
; CHECK-LABEL: test_i8_to_64_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastb (%rdi), %zmm0
; CHECK-NEXT: retq
%s = load i8, i8* %p
@@ -1615,7 +1615,7 @@ define <64 x i8> @test_i8_to_64_mem(i8* %p) {
}
define <64 x i8> @test_masked_i8_to_64_mem_mask0(i8* %p, <64 x i8> %default, <64 x i8> %mask) {
; CHECK-LABEL: test_masked_i8_to_64_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %zmm2, %zmm1, %k1
; CHECK-NEXT: vpbroadcastb (%rdi), %zmm0 {%k1}
@@ -1630,7 +1630,7 @@ define <64 x i8> @test_masked_i8_to_64_mem_mask0(i8* %p, <64 x i8> %default, <64
define <64 x i8> @test_masked_z_i8_to_64_mem_mask0(i8* %p, <64 x i8> %mask) {
; CHECK-LABEL: test_masked_z_i8_to_64_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqb %zmm1, %zmm0, %k1
; CHECK-NEXT: vpbroadcastb (%rdi), %zmm0 {%k1} {z}
@@ -1644,7 +1644,7 @@ define <64 x i8> @test_masked_z_i8_to_64_mem_mask0(i8* %p, <64 x i8> %mask) {
}
define <64 x i8> @test_masked_i8_to_64_mem_mask1(i8* %p, <64 x i8> %default, <64 x i8> %mask) {
; CHECK-LABEL: test_masked_i8_to_64_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %zmm2, %zmm1, %k1
; CHECK-NEXT: vpbroadcastb (%rdi), %zmm0 {%k1}
@@ -1659,7 +1659,7 @@ define <64 x i8> @test_masked_i8_to_64_mem_mask1(i8* %p, <64 x i8> %default, <64
define <64 x i8> @test_masked_z_i8_to_64_mem_mask1(i8* %p, <64 x i8> %mask) {
; CHECK-LABEL: test_masked_z_i8_to_64_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqb %zmm1, %zmm0, %k1
; CHECK-NEXT: vpbroadcastb (%rdi), %zmm0 {%k1} {z}
@@ -1673,7 +1673,7 @@ define <64 x i8> @test_masked_z_i8_to_64_mem_mask1(i8* %p, <64 x i8> %mask) {
}
define <64 x i8> @test_masked_i8_to_64_mem_mask2(i8* %p, <64 x i8> %default, <64 x i8> %mask) {
; CHECK-LABEL: test_masked_i8_to_64_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %zmm2, %zmm1, %k1
; CHECK-NEXT: vpbroadcastb (%rdi), %zmm0 {%k1}
@@ -1688,7 +1688,7 @@ define <64 x i8> @test_masked_i8_to_64_mem_mask2(i8* %p, <64 x i8> %default, <64
define <64 x i8> @test_masked_z_i8_to_64_mem_mask2(i8* %p, <64 x i8> %mask) {
; CHECK-LABEL: test_masked_z_i8_to_64_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqb %zmm1, %zmm0, %k1
; CHECK-NEXT: vpbroadcastb (%rdi), %zmm0 {%k1} {z}
@@ -1702,7 +1702,7 @@ define <64 x i8> @test_masked_z_i8_to_64_mem_mask2(i8* %p, <64 x i8> %mask) {
}
define <64 x i8> @test_masked_i8_to_64_mem_mask3(i8* %p, <64 x i8> %default, <64 x i8> %mask) {
; CHECK-LABEL: test_masked_i8_to_64_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %zmm2, %zmm1, %k1
; CHECK-NEXT: vpbroadcastb (%rdi), %zmm0 {%k1}
@@ -1717,7 +1717,7 @@ define <64 x i8> @test_masked_i8_to_64_mem_mask3(i8* %p, <64 x i8> %default, <64
define <64 x i8> @test_masked_z_i8_to_64_mem_mask3(i8* %p, <64 x i8> %mask) {
; CHECK-LABEL: test_masked_z_i8_to_64_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqb %zmm1, %zmm0, %k1
; CHECK-NEXT: vpbroadcastb (%rdi), %zmm0 {%k1} {z}
@@ -1731,7 +1731,7 @@ define <64 x i8> @test_masked_z_i8_to_64_mem_mask3(i8* %p, <64 x i8> %mask) {
}
define <8 x i16> @test_i16_to_8_mem(i16* %p) {
; CHECK-LABEL: test_i16_to_8_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastw (%rdi), %xmm0
; CHECK-NEXT: retq
%s = load i16, i16* %p
@@ -1741,7 +1741,7 @@ define <8 x i16> @test_i16_to_8_mem(i16* %p) {
}
define <8 x i16> @test_masked_i16_to_8_mem_mask0(i16* %p, <8 x i16> %default, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_i16_to_8_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %xmm2, %xmm1, %k1
; CHECK-NEXT: vpbroadcastw (%rdi), %xmm0 {%k1}
@@ -1756,7 +1756,7 @@ define <8 x i16> @test_masked_i16_to_8_mem_mask0(i16* %p, <8 x i16> %default, <8
define <8 x i16> @test_masked_z_i16_to_8_mem_mask0(i16* %p, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_i16_to_8_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k1
; CHECK-NEXT: vpbroadcastw (%rdi), %xmm0 {%k1} {z}
@@ -1770,7 +1770,7 @@ define <8 x i16> @test_masked_z_i16_to_8_mem_mask0(i16* %p, <8 x i16> %mask) {
}
define <8 x i16> @test_masked_i16_to_8_mem_mask1(i16* %p, <8 x i16> %default, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_i16_to_8_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %xmm2, %xmm1, %k1
; CHECK-NEXT: vpbroadcastw (%rdi), %xmm0 {%k1}
@@ -1785,7 +1785,7 @@ define <8 x i16> @test_masked_i16_to_8_mem_mask1(i16* %p, <8 x i16> %default, <8
define <8 x i16> @test_masked_z_i16_to_8_mem_mask1(i16* %p, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_i16_to_8_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k1
; CHECK-NEXT: vpbroadcastw (%rdi), %xmm0 {%k1} {z}
@@ -1799,7 +1799,7 @@ define <8 x i16> @test_masked_z_i16_to_8_mem_mask1(i16* %p, <8 x i16> %mask) {
}
define <8 x i16> @test_masked_i16_to_8_mem_mask2(i16* %p, <8 x i16> %default, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_i16_to_8_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %xmm2, %xmm1, %k1
; CHECK-NEXT: vpbroadcastw (%rdi), %xmm0 {%k1}
@@ -1814,7 +1814,7 @@ define <8 x i16> @test_masked_i16_to_8_mem_mask2(i16* %p, <8 x i16> %default, <8
define <8 x i16> @test_masked_z_i16_to_8_mem_mask2(i16* %p, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_i16_to_8_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k1
; CHECK-NEXT: vpbroadcastw (%rdi), %xmm0 {%k1} {z}
@@ -1828,7 +1828,7 @@ define <8 x i16> @test_masked_z_i16_to_8_mem_mask2(i16* %p, <8 x i16> %mask) {
}
define <8 x i16> @test_masked_i16_to_8_mem_mask3(i16* %p, <8 x i16> %default, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_i16_to_8_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %xmm2, %xmm1, %k1
; CHECK-NEXT: vpbroadcastw (%rdi), %xmm0 {%k1}
@@ -1843,7 +1843,7 @@ define <8 x i16> @test_masked_i16_to_8_mem_mask3(i16* %p, <8 x i16> %default, <8
define <8 x i16> @test_masked_z_i16_to_8_mem_mask3(i16* %p, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_i16_to_8_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k1
; CHECK-NEXT: vpbroadcastw (%rdi), %xmm0 {%k1} {z}
@@ -1857,7 +1857,7 @@ define <8 x i16> @test_masked_z_i16_to_8_mem_mask3(i16* %p, <8 x i16> %mask) {
}
define <16 x i16> @test_i16_to_16_mem(i16* %p) {
; CHECK-LABEL: test_i16_to_16_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastw (%rdi), %ymm0
; CHECK-NEXT: retq
%s = load i16, i16* %p
@@ -1867,7 +1867,7 @@ define <16 x i16> @test_i16_to_16_mem(i16* %p) {
}
define <16 x i16> @test_masked_i16_to_16_mem_mask0(i16* %p, <16 x i16> %default, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_i16_to_16_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %ymm2, %ymm1, %k1
; CHECK-NEXT: vpbroadcastw (%rdi), %ymm0 {%k1}
@@ -1882,7 +1882,7 @@ define <16 x i16> @test_masked_i16_to_16_mem_mask0(i16* %p, <16 x i16> %default,
define <16 x i16> @test_masked_z_i16_to_16_mem_mask0(i16* %p, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_z_i16_to_16_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k1
; CHECK-NEXT: vpbroadcastw (%rdi), %ymm0 {%k1} {z}
@@ -1896,7 +1896,7 @@ define <16 x i16> @test_masked_z_i16_to_16_mem_mask0(i16* %p, <16 x i16> %mask)
}
define <16 x i16> @test_masked_i16_to_16_mem_mask1(i16* %p, <16 x i16> %default, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_i16_to_16_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %ymm2, %ymm1, %k1
; CHECK-NEXT: vpbroadcastw (%rdi), %ymm0 {%k1}
@@ -1911,7 +1911,7 @@ define <16 x i16> @test_masked_i16_to_16_mem_mask1(i16* %p, <16 x i16> %default,
define <16 x i16> @test_masked_z_i16_to_16_mem_mask1(i16* %p, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_z_i16_to_16_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k1
; CHECK-NEXT: vpbroadcastw (%rdi), %ymm0 {%k1} {z}
@@ -1925,7 +1925,7 @@ define <16 x i16> @test_masked_z_i16_to_16_mem_mask1(i16* %p, <16 x i16> %mask)
}
define <16 x i16> @test_masked_i16_to_16_mem_mask2(i16* %p, <16 x i16> %default, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_i16_to_16_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %ymm2, %ymm1, %k1
; CHECK-NEXT: vpbroadcastw (%rdi), %ymm0 {%k1}
@@ -1940,7 +1940,7 @@ define <16 x i16> @test_masked_i16_to_16_mem_mask2(i16* %p, <16 x i16> %default,
define <16 x i16> @test_masked_z_i16_to_16_mem_mask2(i16* %p, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_z_i16_to_16_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k1
; CHECK-NEXT: vpbroadcastw (%rdi), %ymm0 {%k1} {z}
@@ -1954,7 +1954,7 @@ define <16 x i16> @test_masked_z_i16_to_16_mem_mask2(i16* %p, <16 x i16> %mask)
}
define <16 x i16> @test_masked_i16_to_16_mem_mask3(i16* %p, <16 x i16> %default, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_i16_to_16_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %ymm2, %ymm1, %k1
; CHECK-NEXT: vpbroadcastw (%rdi), %ymm0 {%k1}
@@ -1969,7 +1969,7 @@ define <16 x i16> @test_masked_i16_to_16_mem_mask3(i16* %p, <16 x i16> %default,
define <16 x i16> @test_masked_z_i16_to_16_mem_mask3(i16* %p, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_z_i16_to_16_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k1
; CHECK-NEXT: vpbroadcastw (%rdi), %ymm0 {%k1} {z}
@@ -1983,7 +1983,7 @@ define <16 x i16> @test_masked_z_i16_to_16_mem_mask3(i16* %p, <16 x i16> %mask)
}
define <32 x i16> @test_i16_to_32_mem(i16* %p) {
; CHECK-LABEL: test_i16_to_32_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastw (%rdi), %zmm0
; CHECK-NEXT: retq
%s = load i16, i16* %p
@@ -1993,7 +1993,7 @@ define <32 x i16> @test_i16_to_32_mem(i16* %p) {
}
define <32 x i16> @test_masked_i16_to_32_mem_mask0(i16* %p, <32 x i16> %default, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_i16_to_32_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %zmm2, %zmm1, %k1
; CHECK-NEXT: vpbroadcastw (%rdi), %zmm0 {%k1}
@@ -2008,7 +2008,7 @@ define <32 x i16> @test_masked_i16_to_32_mem_mask0(i16* %p, <32 x i16> %default,
define <32 x i16> @test_masked_z_i16_to_32_mem_mask0(i16* %p, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_z_i16_to_32_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %zmm1, %zmm0, %k1
; CHECK-NEXT: vpbroadcastw (%rdi), %zmm0 {%k1} {z}
@@ -2022,7 +2022,7 @@ define <32 x i16> @test_masked_z_i16_to_32_mem_mask0(i16* %p, <32 x i16> %mask)
}
define <32 x i16> @test_masked_i16_to_32_mem_mask1(i16* %p, <32 x i16> %default, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_i16_to_32_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %zmm2, %zmm1, %k1
; CHECK-NEXT: vpbroadcastw (%rdi), %zmm0 {%k1}
@@ -2037,7 +2037,7 @@ define <32 x i16> @test_masked_i16_to_32_mem_mask1(i16* %p, <32 x i16> %default,
define <32 x i16> @test_masked_z_i16_to_32_mem_mask1(i16* %p, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_z_i16_to_32_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %zmm1, %zmm0, %k1
; CHECK-NEXT: vpbroadcastw (%rdi), %zmm0 {%k1} {z}
@@ -2051,7 +2051,7 @@ define <32 x i16> @test_masked_z_i16_to_32_mem_mask1(i16* %p, <32 x i16> %mask)
}
define <32 x i16> @test_masked_i16_to_32_mem_mask2(i16* %p, <32 x i16> %default, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_i16_to_32_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %zmm2, %zmm1, %k1
; CHECK-NEXT: vpbroadcastw (%rdi), %zmm0 {%k1}
@@ -2066,7 +2066,7 @@ define <32 x i16> @test_masked_i16_to_32_mem_mask2(i16* %p, <32 x i16> %default,
define <32 x i16> @test_masked_z_i16_to_32_mem_mask2(i16* %p, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_z_i16_to_32_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %zmm1, %zmm0, %k1
; CHECK-NEXT: vpbroadcastw (%rdi), %zmm0 {%k1} {z}
@@ -2080,7 +2080,7 @@ define <32 x i16> @test_masked_z_i16_to_32_mem_mask2(i16* %p, <32 x i16> %mask)
}
define <32 x i16> @test_masked_i16_to_32_mem_mask3(i16* %p, <32 x i16> %default, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_i16_to_32_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %zmm2, %zmm1, %k1
; CHECK-NEXT: vpbroadcastw (%rdi), %zmm0 {%k1}
@@ -2095,7 +2095,7 @@ define <32 x i16> @test_masked_i16_to_32_mem_mask3(i16* %p, <32 x i16> %default,
define <32 x i16> @test_masked_z_i16_to_32_mem_mask3(i16* %p, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_z_i16_to_32_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %zmm1, %zmm0, %k1
; CHECK-NEXT: vpbroadcastw (%rdi), %zmm0 {%k1} {z}
@@ -2109,7 +2109,7 @@ define <32 x i16> @test_masked_z_i16_to_32_mem_mask3(i16* %p, <32 x i16> %mask)
}
define <4 x i32> @test_i32_to_4_mem(i32* %p) {
; CHECK-LABEL: test_i32_to_4_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastss (%rdi), %xmm0
; CHECK-NEXT: retq
%s = load i32, i32* %p
@@ -2119,7 +2119,7 @@ define <4 x i32> @test_i32_to_4_mem(i32* %p) {
}
define <4 x i32> @test_masked_i32_to_4_mem_mask0(i32* %p, <4 x i32> %default, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_i32_to_4_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %xmm2, %xmm1, %k1
; CHECK-NEXT: vpbroadcastd (%rdi), %xmm0 {%k1}
@@ -2134,7 +2134,7 @@ define <4 x i32> @test_masked_i32_to_4_mem_mask0(i32* %p, <4 x i32> %default, <4
define <4 x i32> @test_masked_z_i32_to_4_mem_mask0(i32* %p, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_i32_to_4_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k1
; CHECK-NEXT: vpbroadcastd (%rdi), %xmm0 {%k1} {z}
@@ -2148,7 +2148,7 @@ define <4 x i32> @test_masked_z_i32_to_4_mem_mask0(i32* %p, <4 x i32> %mask) {
}
define <4 x i32> @test_masked_i32_to_4_mem_mask1(i32* %p, <4 x i32> %default, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_i32_to_4_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %xmm2, %xmm1, %k1
; CHECK-NEXT: vpbroadcastd (%rdi), %xmm0 {%k1}
@@ -2163,7 +2163,7 @@ define <4 x i32> @test_masked_i32_to_4_mem_mask1(i32* %p, <4 x i32> %default, <4
define <4 x i32> @test_masked_z_i32_to_4_mem_mask1(i32* %p, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_i32_to_4_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k1
; CHECK-NEXT: vpbroadcastd (%rdi), %xmm0 {%k1} {z}
@@ -2177,7 +2177,7 @@ define <4 x i32> @test_masked_z_i32_to_4_mem_mask1(i32* %p, <4 x i32> %mask) {
}
define <4 x i32> @test_masked_i32_to_4_mem_mask2(i32* %p, <4 x i32> %default, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_i32_to_4_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %xmm2, %xmm1, %k1
; CHECK-NEXT: vpbroadcastd (%rdi), %xmm0 {%k1}
@@ -2192,7 +2192,7 @@ define <4 x i32> @test_masked_i32_to_4_mem_mask2(i32* %p, <4 x i32> %default, <4
define <4 x i32> @test_masked_z_i32_to_4_mem_mask2(i32* %p, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_i32_to_4_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k1
; CHECK-NEXT: vpbroadcastd (%rdi), %xmm0 {%k1} {z}
@@ -2206,7 +2206,7 @@ define <4 x i32> @test_masked_z_i32_to_4_mem_mask2(i32* %p, <4 x i32> %mask) {
}
define <4 x i32> @test_masked_i32_to_4_mem_mask3(i32* %p, <4 x i32> %default, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_i32_to_4_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %xmm2, %xmm1, %k1
; CHECK-NEXT: vpbroadcastd (%rdi), %xmm0 {%k1}
@@ -2221,7 +2221,7 @@ define <4 x i32> @test_masked_i32_to_4_mem_mask3(i32* %p, <4 x i32> %default, <4
define <4 x i32> @test_masked_z_i32_to_4_mem_mask3(i32* %p, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_i32_to_4_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k1
; CHECK-NEXT: vpbroadcastd (%rdi), %xmm0 {%k1} {z}
@@ -2235,7 +2235,7 @@ define <4 x i32> @test_masked_z_i32_to_4_mem_mask3(i32* %p, <4 x i32> %mask) {
}
define <8 x i32> @test_i32_to_8_mem(i32* %p) {
; CHECK-LABEL: test_i32_to_8_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastss (%rdi), %ymm0
; CHECK-NEXT: retq
%s = load i32, i32* %p
@@ -2245,7 +2245,7 @@ define <8 x i32> @test_i32_to_8_mem(i32* %p) {
}
define <8 x i32> @test_masked_i32_to_8_mem_mask0(i32* %p, <8 x i32> %default, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_i32_to_8_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; CHECK-NEXT: vpbroadcastd (%rdi), %ymm0 {%k1}
@@ -2260,7 +2260,7 @@ define <8 x i32> @test_masked_i32_to_8_mem_mask0(i32* %p, <8 x i32> %default, <8
define <8 x i32> @test_masked_z_i32_to_8_mem_mask0(i32* %p, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_i32_to_8_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k1
; CHECK-NEXT: vpbroadcastd (%rdi), %ymm0 {%k1} {z}
@@ -2274,7 +2274,7 @@ define <8 x i32> @test_masked_z_i32_to_8_mem_mask0(i32* %p, <8 x i32> %mask) {
}
define <8 x i32> @test_masked_i32_to_8_mem_mask1(i32* %p, <8 x i32> %default, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_i32_to_8_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; CHECK-NEXT: vpbroadcastd (%rdi), %ymm0 {%k1}
@@ -2289,7 +2289,7 @@ define <8 x i32> @test_masked_i32_to_8_mem_mask1(i32* %p, <8 x i32> %default, <8
define <8 x i32> @test_masked_z_i32_to_8_mem_mask1(i32* %p, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_i32_to_8_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k1
; CHECK-NEXT: vpbroadcastd (%rdi), %ymm0 {%k1} {z}
@@ -2303,7 +2303,7 @@ define <8 x i32> @test_masked_z_i32_to_8_mem_mask1(i32* %p, <8 x i32> %mask) {
}
define <8 x i32> @test_masked_i32_to_8_mem_mask2(i32* %p, <8 x i32> %default, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_i32_to_8_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; CHECK-NEXT: vpbroadcastd (%rdi), %ymm0 {%k1}
@@ -2318,7 +2318,7 @@ define <8 x i32> @test_masked_i32_to_8_mem_mask2(i32* %p, <8 x i32> %default, <8
define <8 x i32> @test_masked_z_i32_to_8_mem_mask2(i32* %p, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_i32_to_8_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k1
; CHECK-NEXT: vpbroadcastd (%rdi), %ymm0 {%k1} {z}
@@ -2332,7 +2332,7 @@ define <8 x i32> @test_masked_z_i32_to_8_mem_mask2(i32* %p, <8 x i32> %mask) {
}
define <8 x i32> @test_masked_i32_to_8_mem_mask3(i32* %p, <8 x i32> %default, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_i32_to_8_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; CHECK-NEXT: vpbroadcastd (%rdi), %ymm0 {%k1}
@@ -2347,7 +2347,7 @@ define <8 x i32> @test_masked_i32_to_8_mem_mask3(i32* %p, <8 x i32> %default, <8
define <8 x i32> @test_masked_z_i32_to_8_mem_mask3(i32* %p, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_i32_to_8_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k1
; CHECK-NEXT: vpbroadcastd (%rdi), %ymm0 {%k1} {z}
@@ -2361,7 +2361,7 @@ define <8 x i32> @test_masked_z_i32_to_8_mem_mask3(i32* %p, <8 x i32> %mask) {
}
define <16 x i32> @test_i32_to_16_mem(i32* %p) {
; CHECK-LABEL: test_i32_to_16_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastss (%rdi), %zmm0
; CHECK-NEXT: retq
%s = load i32, i32* %p
@@ -2371,7 +2371,7 @@ define <16 x i32> @test_i32_to_16_mem(i32* %p) {
}
define <16 x i32> @test_masked_i32_to_16_mem_mask0(i32* %p, <16 x i32> %default, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_i32_to_16_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpbroadcastd (%rdi), %zmm0 {%k1}
@@ -2386,7 +2386,7 @@ define <16 x i32> @test_masked_i32_to_16_mem_mask0(i32* %p, <16 x i32> %default,
define <16 x i32> @test_masked_z_i32_to_16_mem_mask0(i32* %p, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_z_i32_to_16_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
; CHECK-NEXT: vpbroadcastd (%rdi), %zmm0 {%k1} {z}
@@ -2400,7 +2400,7 @@ define <16 x i32> @test_masked_z_i32_to_16_mem_mask0(i32* %p, <16 x i32> %mask)
}
define <16 x i32> @test_masked_i32_to_16_mem_mask1(i32* %p, <16 x i32> %default, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_i32_to_16_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpbroadcastd (%rdi), %zmm0 {%k1}
@@ -2415,7 +2415,7 @@ define <16 x i32> @test_masked_i32_to_16_mem_mask1(i32* %p, <16 x i32> %default,
define <16 x i32> @test_masked_z_i32_to_16_mem_mask1(i32* %p, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_z_i32_to_16_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
; CHECK-NEXT: vpbroadcastd (%rdi), %zmm0 {%k1} {z}
@@ -2429,7 +2429,7 @@ define <16 x i32> @test_masked_z_i32_to_16_mem_mask1(i32* %p, <16 x i32> %mask)
}
define <16 x i32> @test_masked_i32_to_16_mem_mask2(i32* %p, <16 x i32> %default, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_i32_to_16_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpbroadcastd (%rdi), %zmm0 {%k1}
@@ -2444,7 +2444,7 @@ define <16 x i32> @test_masked_i32_to_16_mem_mask2(i32* %p, <16 x i32> %default,
define <16 x i32> @test_masked_z_i32_to_16_mem_mask2(i32* %p, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_z_i32_to_16_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
; CHECK-NEXT: vpbroadcastd (%rdi), %zmm0 {%k1} {z}
@@ -2458,7 +2458,7 @@ define <16 x i32> @test_masked_z_i32_to_16_mem_mask2(i32* %p, <16 x i32> %mask)
}
define <16 x i32> @test_masked_i32_to_16_mem_mask3(i32* %p, <16 x i32> %default, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_i32_to_16_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpbroadcastd (%rdi), %zmm0 {%k1}
@@ -2473,7 +2473,7 @@ define <16 x i32> @test_masked_i32_to_16_mem_mask3(i32* %p, <16 x i32> %default,
define <16 x i32> @test_masked_z_i32_to_16_mem_mask3(i32* %p, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_z_i32_to_16_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
; CHECK-NEXT: vpbroadcastd (%rdi), %zmm0 {%k1} {z}
@@ -2487,7 +2487,7 @@ define <16 x i32> @test_masked_z_i32_to_16_mem_mask3(i32* %p, <16 x i32> %mask)
}
define <2 x i64> @test_i64_to_2_mem(i64* %p) {
; CHECK-LABEL: test_i64_to_2_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastq (%rdi), %xmm0
; CHECK-NEXT: retq
%s = load i64, i64* %p
@@ -2497,7 +2497,7 @@ define <2 x i64> @test_i64_to_2_mem(i64* %p) {
}
define <2 x i64> @test_masked_i64_to_2_mem_mask0(i64* %p, <2 x i64> %default, <2 x i64> %mask) {
; CHECK-LABEL: test_masked_i64_to_2_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %xmm2, %xmm1, %k1
; CHECK-NEXT: vpbroadcastq (%rdi), %xmm0 {%k1}
@@ -2512,7 +2512,7 @@ define <2 x i64> @test_masked_i64_to_2_mem_mask0(i64* %p, <2 x i64> %default, <2
define <2 x i64> @test_masked_z_i64_to_2_mem_mask0(i64* %p, <2 x i64> %mask) {
; CHECK-LABEL: test_masked_z_i64_to_2_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k1
; CHECK-NEXT: vpbroadcastq (%rdi), %xmm0 {%k1} {z}
@@ -2526,7 +2526,7 @@ define <2 x i64> @test_masked_z_i64_to_2_mem_mask0(i64* %p, <2 x i64> %mask) {
}
define <2 x i64> @test_masked_i64_to_2_mem_mask1(i64* %p, <2 x i64> %default, <2 x i64> %mask) {
; CHECK-LABEL: test_masked_i64_to_2_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %xmm2, %xmm1, %k1
; CHECK-NEXT: vpbroadcastq (%rdi), %xmm0 {%k1}
@@ -2541,7 +2541,7 @@ define <2 x i64> @test_masked_i64_to_2_mem_mask1(i64* %p, <2 x i64> %default, <2
define <2 x i64> @test_masked_z_i64_to_2_mem_mask1(i64* %p, <2 x i64> %mask) {
; CHECK-LABEL: test_masked_z_i64_to_2_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k1
; CHECK-NEXT: vpbroadcastq (%rdi), %xmm0 {%k1} {z}
@@ -2555,7 +2555,7 @@ define <2 x i64> @test_masked_z_i64_to_2_mem_mask1(i64* %p, <2 x i64> %mask) {
}
define <4 x i64> @test_i64_to_4_mem(i64* %p) {
; CHECK-LABEL: test_i64_to_4_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastsd (%rdi), %ymm0
; CHECK-NEXT: retq
%s = load i64, i64* %p
@@ -2565,7 +2565,7 @@ define <4 x i64> @test_i64_to_4_mem(i64* %p) {
}
define <4 x i64> @test_masked_i64_to_4_mem_mask0(i64* %p, <4 x i64> %default, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_i64_to_4_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; CHECK-NEXT: vpbroadcastq (%rdi), %ymm0 {%k1}
@@ -2580,7 +2580,7 @@ define <4 x i64> @test_masked_i64_to_4_mem_mask0(i64* %p, <4 x i64> %default, <4
define <4 x i64> @test_masked_z_i64_to_4_mem_mask0(i64* %p, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_z_i64_to_4_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k1
; CHECK-NEXT: vpbroadcastq (%rdi), %ymm0 {%k1} {z}
@@ -2594,7 +2594,7 @@ define <4 x i64> @test_masked_z_i64_to_4_mem_mask0(i64* %p, <4 x i64> %mask) {
}
define <4 x i64> @test_masked_i64_to_4_mem_mask1(i64* %p, <4 x i64> %default, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_i64_to_4_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; CHECK-NEXT: vpbroadcastq (%rdi), %ymm0 {%k1}
@@ -2609,7 +2609,7 @@ define <4 x i64> @test_masked_i64_to_4_mem_mask1(i64* %p, <4 x i64> %default, <4
define <4 x i64> @test_masked_z_i64_to_4_mem_mask1(i64* %p, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_z_i64_to_4_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k1
; CHECK-NEXT: vpbroadcastq (%rdi), %ymm0 {%k1} {z}
@@ -2623,7 +2623,7 @@ define <4 x i64> @test_masked_z_i64_to_4_mem_mask1(i64* %p, <4 x i64> %mask) {
}
define <4 x i64> @test_masked_i64_to_4_mem_mask2(i64* %p, <4 x i64> %default, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_i64_to_4_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; CHECK-NEXT: vpbroadcastq (%rdi), %ymm0 {%k1}
@@ -2638,7 +2638,7 @@ define <4 x i64> @test_masked_i64_to_4_mem_mask2(i64* %p, <4 x i64> %default, <4
define <4 x i64> @test_masked_z_i64_to_4_mem_mask2(i64* %p, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_z_i64_to_4_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k1
; CHECK-NEXT: vpbroadcastq (%rdi), %ymm0 {%k1} {z}
@@ -2652,7 +2652,7 @@ define <4 x i64> @test_masked_z_i64_to_4_mem_mask2(i64* %p, <4 x i64> %mask) {
}
define <4 x i64> @test_masked_i64_to_4_mem_mask3(i64* %p, <4 x i64> %default, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_i64_to_4_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; CHECK-NEXT: vpbroadcastq (%rdi), %ymm0 {%k1}
@@ -2667,7 +2667,7 @@ define <4 x i64> @test_masked_i64_to_4_mem_mask3(i64* %p, <4 x i64> %default, <4
define <4 x i64> @test_masked_z_i64_to_4_mem_mask3(i64* %p, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_z_i64_to_4_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k1
; CHECK-NEXT: vpbroadcastq (%rdi), %ymm0 {%k1} {z}
@@ -2681,7 +2681,7 @@ define <4 x i64> @test_masked_z_i64_to_4_mem_mask3(i64* %p, <4 x i64> %mask) {
}
define <8 x i64> @test_i64_to_8_mem(i64* %p) {
; CHECK-LABEL: test_i64_to_8_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastsd (%rdi), %zmm0
; CHECK-NEXT: retq
%s = load i64, i64* %p
@@ -2691,7 +2691,7 @@ define <8 x i64> @test_i64_to_8_mem(i64* %p) {
}
define <8 x i64> @test_masked_i64_to_8_mem_mask0(i64* %p, <8 x i64> %default, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_i64_to_8_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; CHECK-NEXT: vpbroadcastq (%rdi), %zmm0 {%k1}
@@ -2706,7 +2706,7 @@ define <8 x i64> @test_masked_i64_to_8_mem_mask0(i64* %p, <8 x i64> %default, <8
define <8 x i64> @test_masked_z_i64_to_8_mem_mask0(i64* %p, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_z_i64_to_8_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k1
; CHECK-NEXT: vpbroadcastq (%rdi), %zmm0 {%k1} {z}
@@ -2720,7 +2720,7 @@ define <8 x i64> @test_masked_z_i64_to_8_mem_mask0(i64* %p, <8 x i64> %mask) {
}
define <8 x i64> @test_masked_i64_to_8_mem_mask1(i64* %p, <8 x i64> %default, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_i64_to_8_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; CHECK-NEXT: vpbroadcastq (%rdi), %zmm0 {%k1}
@@ -2735,7 +2735,7 @@ define <8 x i64> @test_masked_i64_to_8_mem_mask1(i64* %p, <8 x i64> %default, <8
define <8 x i64> @test_masked_z_i64_to_8_mem_mask1(i64* %p, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_z_i64_to_8_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k1
; CHECK-NEXT: vpbroadcastq (%rdi), %zmm0 {%k1} {z}
@@ -2749,7 +2749,7 @@ define <8 x i64> @test_masked_z_i64_to_8_mem_mask1(i64* %p, <8 x i64> %mask) {
}
define <8 x i64> @test_masked_i64_to_8_mem_mask2(i64* %p, <8 x i64> %default, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_i64_to_8_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; CHECK-NEXT: vpbroadcastq (%rdi), %zmm0 {%k1}
@@ -2764,7 +2764,7 @@ define <8 x i64> @test_masked_i64_to_8_mem_mask2(i64* %p, <8 x i64> %default, <8
define <8 x i64> @test_masked_z_i64_to_8_mem_mask2(i64* %p, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_z_i64_to_8_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k1
; CHECK-NEXT: vpbroadcastq (%rdi), %zmm0 {%k1} {z}
@@ -2778,7 +2778,7 @@ define <8 x i64> @test_masked_z_i64_to_8_mem_mask2(i64* %p, <8 x i64> %mask) {
}
define <8 x i64> @test_masked_i64_to_8_mem_mask3(i64* %p, <8 x i64> %default, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_i64_to_8_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; CHECK-NEXT: vpbroadcastq (%rdi), %zmm0 {%k1}
@@ -2793,7 +2793,7 @@ define <8 x i64> @test_masked_i64_to_8_mem_mask3(i64* %p, <8 x i64> %default, <8
define <8 x i64> @test_masked_z_i64_to_8_mem_mask3(i64* %p, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_z_i64_to_8_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k1
; CHECK-NEXT: vpbroadcastq (%rdi), %zmm0 {%k1} {z}
diff --git a/test/CodeGen/X86/avx512-shuffles/broadcast-vector-fp.ll b/test/CodeGen/X86/avx512-shuffles/broadcast-vector-fp.ll
index 3a664ba6c88..abc49d0ad88 100644
--- a/test/CodeGen/X86/avx512-shuffles/broadcast-vector-fp.ll
+++ b/test/CodeGen/X86/avx512-shuffles/broadcast-vector-fp.ll
@@ -3,7 +3,7 @@
define <8 x float> @test_2xfloat_to_8xfloat(<8 x float> %vec) {
; CHECK-LABEL: test_2xfloat_to_8xfloat:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0
; CHECK-NEXT: retq
%res = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
@@ -11,7 +11,7 @@ define <8 x float> @test_2xfloat_to_8xfloat(<8 x float> %vec) {
}
define <8 x float> @test_masked_2xfloat_to_8xfloat_mask0(<8 x float> %vec, <8 x float> %default, <8 x float> %mask) {
; CHECK-LABEL: test_masked_2xfloat_to_8xfloat_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} ymm1 {%k1} = xmm0[0,1,0,1,0,1,0,1]
@@ -25,7 +25,7 @@ define <8 x float> @test_masked_2xfloat_to_8xfloat_mask0(<8 x float> %vec, <8 x
define <8 x float> @test_masked_z_2xfloat_to_8xfloat_mask0(<8 x float> %vec, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_2xfloat_to_8xfloat_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} ymm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1]
@@ -37,7 +37,7 @@ define <8 x float> @test_masked_z_2xfloat_to_8xfloat_mask0(<8 x float> %vec, <8
}
define <8 x float> @test_masked_2xfloat_to_8xfloat_mask1(<8 x float> %vec, <8 x float> %default, <8 x float> %mask) {
; CHECK-LABEL: test_masked_2xfloat_to_8xfloat_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} ymm1 {%k1} = xmm0[0,1,0,1,0,1,0,1]
@@ -51,7 +51,7 @@ define <8 x float> @test_masked_2xfloat_to_8xfloat_mask1(<8 x float> %vec, <8 x
define <8 x float> @test_masked_z_2xfloat_to_8xfloat_mask1(<8 x float> %vec, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_2xfloat_to_8xfloat_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} ymm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1]
@@ -63,7 +63,7 @@ define <8 x float> @test_masked_z_2xfloat_to_8xfloat_mask1(<8 x float> %vec, <8
}
define <8 x float> @test_masked_2xfloat_to_8xfloat_mask2(<8 x float> %vec, <8 x float> %default, <8 x float> %mask) {
; CHECK-LABEL: test_masked_2xfloat_to_8xfloat_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} ymm1 {%k1} = xmm0[0,1,0,1,0,1,0,1]
@@ -77,7 +77,7 @@ define <8 x float> @test_masked_2xfloat_to_8xfloat_mask2(<8 x float> %vec, <8 x
define <8 x float> @test_masked_z_2xfloat_to_8xfloat_mask2(<8 x float> %vec, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_2xfloat_to_8xfloat_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} ymm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1]
@@ -89,7 +89,7 @@ define <8 x float> @test_masked_z_2xfloat_to_8xfloat_mask2(<8 x float> %vec, <8
}
define <8 x float> @test_masked_2xfloat_to_8xfloat_mask3(<8 x float> %vec, <8 x float> %default, <8 x float> %mask) {
; CHECK-LABEL: test_masked_2xfloat_to_8xfloat_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} ymm1 {%k1} = xmm0[0,1,0,1,0,1,0,1]
@@ -103,7 +103,7 @@ define <8 x float> @test_masked_2xfloat_to_8xfloat_mask3(<8 x float> %vec, <8 x
define <8 x float> @test_masked_z_2xfloat_to_8xfloat_mask3(<8 x float> %vec, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_2xfloat_to_8xfloat_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} ymm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1]
@@ -115,7 +115,7 @@ define <8 x float> @test_masked_z_2xfloat_to_8xfloat_mask3(<8 x float> %vec, <8
}
define <16 x float> @test_2xfloat_to_16xfloat(<16 x float> %vec) {
; CHECK-LABEL: test_2xfloat_to_16xfloat:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastsd %xmm0, %zmm0
; CHECK-NEXT: retq
%res = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
@@ -123,7 +123,7 @@ define <16 x float> @test_2xfloat_to_16xfloat(<16 x float> %vec) {
}
define <16 x float> @test_masked_2xfloat_to_16xfloat_mask0(<16 x float> %vec, <16 x float> %default, <16 x float> %mask) {
; CHECK-LABEL: test_masked_2xfloat_to_16xfloat_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} zmm1 {%k1} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
@@ -137,7 +137,7 @@ define <16 x float> @test_masked_2xfloat_to_16xfloat_mask0(<16 x float> %vec, <1
define <16 x float> @test_masked_z_2xfloat_to_16xfloat_mask0(<16 x float> %vec, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_2xfloat_to_16xfloat_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} zmm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
@@ -149,7 +149,7 @@ define <16 x float> @test_masked_z_2xfloat_to_16xfloat_mask0(<16 x float> %vec,
}
define <16 x float> @test_masked_2xfloat_to_16xfloat_mask1(<16 x float> %vec, <16 x float> %default, <16 x float> %mask) {
; CHECK-LABEL: test_masked_2xfloat_to_16xfloat_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} zmm1 {%k1} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
@@ -163,7 +163,7 @@ define <16 x float> @test_masked_2xfloat_to_16xfloat_mask1(<16 x float> %vec, <1
define <16 x float> @test_masked_z_2xfloat_to_16xfloat_mask1(<16 x float> %vec, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_2xfloat_to_16xfloat_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} zmm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
@@ -175,7 +175,7 @@ define <16 x float> @test_masked_z_2xfloat_to_16xfloat_mask1(<16 x float> %vec,
}
define <16 x float> @test_masked_2xfloat_to_16xfloat_mask2(<16 x float> %vec, <16 x float> %default, <16 x float> %mask) {
; CHECK-LABEL: test_masked_2xfloat_to_16xfloat_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} zmm1 {%k1} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
@@ -189,7 +189,7 @@ define <16 x float> @test_masked_2xfloat_to_16xfloat_mask2(<16 x float> %vec, <1
define <16 x float> @test_masked_z_2xfloat_to_16xfloat_mask2(<16 x float> %vec, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_2xfloat_to_16xfloat_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} zmm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
@@ -201,7 +201,7 @@ define <16 x float> @test_masked_z_2xfloat_to_16xfloat_mask2(<16 x float> %vec,
}
define <16 x float> @test_masked_2xfloat_to_16xfloat_mask3(<16 x float> %vec, <16 x float> %default, <16 x float> %mask) {
; CHECK-LABEL: test_masked_2xfloat_to_16xfloat_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} zmm1 {%k1} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
@@ -215,7 +215,7 @@ define <16 x float> @test_masked_2xfloat_to_16xfloat_mask3(<16 x float> %vec, <1
define <16 x float> @test_masked_z_2xfloat_to_16xfloat_mask3(<16 x float> %vec, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_2xfloat_to_16xfloat_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} zmm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
@@ -227,7 +227,7 @@ define <16 x float> @test_masked_z_2xfloat_to_16xfloat_mask3(<16 x float> %vec,
}
define <4 x double> @test_2xdouble_to_4xdouble_mem(<2 x double>* %vp) {
; CHECK-LABEL: test_2xdouble_to_4xdouble_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; CHECK-NEXT: retq
%vec = load <2 x double>, <2 x double>* %vp
@@ -236,7 +236,7 @@ define <4 x double> @test_2xdouble_to_4xdouble_mem(<2 x double>* %vp) {
}
define <4 x double> @test_masked_2xdouble_to_4xdouble_mem_mask0(<2 x double>* %vp, <4 x double> %default, <4 x double> %mask) {
; CHECK-LABEL: test_masked_2xdouble_to_4xdouble_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vbroadcastf64x2 {{.*#+}} ymm0 {%k1} = mem[0,1,0,1]
@@ -250,7 +250,7 @@ define <4 x double> @test_masked_2xdouble_to_4xdouble_mem_mask0(<2 x double>* %v
define <4 x double> @test_masked_z_2xdouble_to_4xdouble_mem_mask0(<2 x double>* %vp, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_2xdouble_to_4xdouble_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %ymm1, %ymm0, %k1
; CHECK-NEXT: vbroadcastf64x2 {{.*#+}} ymm0 {%k1} {z} = mem[0,1,0,1]
@@ -263,7 +263,7 @@ define <4 x double> @test_masked_z_2xdouble_to_4xdouble_mem_mask0(<2 x double>*
}
define <4 x double> @test_masked_2xdouble_to_4xdouble_mem_mask1(<2 x double>* %vp, <4 x double> %default, <4 x double> %mask) {
; CHECK-LABEL: test_masked_2xdouble_to_4xdouble_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vbroadcastf64x2 {{.*#+}} ymm0 {%k1} = mem[0,1,0,1]
@@ -277,7 +277,7 @@ define <4 x double> @test_masked_2xdouble_to_4xdouble_mem_mask1(<2 x double>* %v
define <4 x double> @test_masked_z_2xdouble_to_4xdouble_mem_mask1(<2 x double>* %vp, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_2xdouble_to_4xdouble_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %ymm1, %ymm0, %k1
; CHECK-NEXT: vbroadcastf64x2 {{.*#+}} ymm0 {%k1} {z} = mem[0,1,0,1]
@@ -290,7 +290,7 @@ define <4 x double> @test_masked_z_2xdouble_to_4xdouble_mem_mask1(<2 x double>*
}
define <4 x double> @test_masked_2xdouble_to_4xdouble_mem_mask2(<2 x double>* %vp, <4 x double> %default, <4 x double> %mask) {
; CHECK-LABEL: test_masked_2xdouble_to_4xdouble_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vbroadcastf64x2 {{.*#+}} ymm0 {%k1} = mem[0,1,0,1]
@@ -304,7 +304,7 @@ define <4 x double> @test_masked_2xdouble_to_4xdouble_mem_mask2(<2 x double>* %v
define <4 x double> @test_masked_z_2xdouble_to_4xdouble_mem_mask2(<2 x double>* %vp, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_2xdouble_to_4xdouble_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %ymm1, %ymm0, %k1
; CHECK-NEXT: vbroadcastf64x2 {{.*#+}} ymm0 {%k1} {z} = mem[0,1,0,1]
@@ -317,7 +317,7 @@ define <4 x double> @test_masked_z_2xdouble_to_4xdouble_mem_mask2(<2 x double>*
}
define <4 x double> @test_masked_2xdouble_to_4xdouble_mem_mask3(<2 x double>* %vp, <4 x double> %default, <4 x double> %mask) {
; CHECK-LABEL: test_masked_2xdouble_to_4xdouble_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vbroadcastf64x2 {{.*#+}} ymm0 {%k1} = mem[0,1,0,1]
@@ -331,7 +331,7 @@ define <4 x double> @test_masked_2xdouble_to_4xdouble_mem_mask3(<2 x double>* %v
define <4 x double> @test_masked_z_2xdouble_to_4xdouble_mem_mask3(<2 x double>* %vp, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_2xdouble_to_4xdouble_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %ymm1, %ymm0, %k1
; CHECK-NEXT: vbroadcastf64x2 {{.*#+}} ymm0 {%k1} {z} = mem[0,1,0,1]
@@ -344,7 +344,7 @@ define <4 x double> @test_masked_z_2xdouble_to_4xdouble_mem_mask3(<2 x double>*
}
define <8 x double> @test_2xdouble_to_8xdouble_mem(<2 x double>* %vp) {
; CHECK-LABEL: test_2xdouble_to_8xdouble_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastf32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; CHECK-NEXT: retq
%vec = load <2 x double>, <2 x double>* %vp
@@ -353,7 +353,7 @@ define <8 x double> @test_2xdouble_to_8xdouble_mem(<2 x double>* %vp) {
}
define <8 x double> @test_masked_2xdouble_to_8xdouble_mem_mask0(<2 x double>* %vp, <8 x double> %default, <8 x double> %mask) {
; CHECK-LABEL: test_masked_2xdouble_to_8xdouble_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcastf64x2 {{.*#+}} zmm0 {%k1} = mem[0,1,0,1,0,1,0,1]
@@ -367,7 +367,7 @@ define <8 x double> @test_masked_2xdouble_to_8xdouble_mem_mask0(<2 x double>* %v
define <8 x double> @test_masked_z_2xdouble_to_8xdouble_mem_mask0(<2 x double>* %vp, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_2xdouble_to_8xdouble_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %zmm1, %zmm0, %k1
; CHECK-NEXT: vbroadcastf64x2 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,0,1,0,1,0,1]
@@ -380,7 +380,7 @@ define <8 x double> @test_masked_z_2xdouble_to_8xdouble_mem_mask0(<2 x double>*
}
define <8 x double> @test_masked_2xdouble_to_8xdouble_mem_mask1(<2 x double>* %vp, <8 x double> %default, <8 x double> %mask) {
; CHECK-LABEL: test_masked_2xdouble_to_8xdouble_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcastf64x2 {{.*#+}} zmm0 {%k1} = mem[0,1,0,1,0,1,0,1]
@@ -394,7 +394,7 @@ define <8 x double> @test_masked_2xdouble_to_8xdouble_mem_mask1(<2 x double>* %v
define <8 x double> @test_masked_z_2xdouble_to_8xdouble_mem_mask1(<2 x double>* %vp, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_2xdouble_to_8xdouble_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %zmm1, %zmm0, %k1
; CHECK-NEXT: vbroadcastf64x2 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,0,1,0,1,0,1]
@@ -407,7 +407,7 @@ define <8 x double> @test_masked_z_2xdouble_to_8xdouble_mem_mask1(<2 x double>*
}
define <8 x double> @test_masked_2xdouble_to_8xdouble_mem_mask2(<2 x double>* %vp, <8 x double> %default, <8 x double> %mask) {
; CHECK-LABEL: test_masked_2xdouble_to_8xdouble_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcastf64x2 {{.*#+}} zmm0 {%k1} = mem[0,1,0,1,0,1,0,1]
@@ -421,7 +421,7 @@ define <8 x double> @test_masked_2xdouble_to_8xdouble_mem_mask2(<2 x double>* %v
define <8 x double> @test_masked_z_2xdouble_to_8xdouble_mem_mask2(<2 x double>* %vp, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_2xdouble_to_8xdouble_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %zmm1, %zmm0, %k1
; CHECK-NEXT: vbroadcastf64x2 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,0,1,0,1,0,1]
@@ -434,7 +434,7 @@ define <8 x double> @test_masked_z_2xdouble_to_8xdouble_mem_mask2(<2 x double>*
}
define <8 x double> @test_masked_2xdouble_to_8xdouble_mem_mask3(<2 x double>* %vp, <8 x double> %default, <8 x double> %mask) {
; CHECK-LABEL: test_masked_2xdouble_to_8xdouble_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcastf64x2 {{.*#+}} zmm0 {%k1} = mem[0,1,0,1,0,1,0,1]
@@ -448,7 +448,7 @@ define <8 x double> @test_masked_2xdouble_to_8xdouble_mem_mask3(<2 x double>* %v
define <8 x double> @test_masked_z_2xdouble_to_8xdouble_mem_mask3(<2 x double>* %vp, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_2xdouble_to_8xdouble_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %zmm1, %zmm0, %k1
; CHECK-NEXT: vbroadcastf64x2 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,0,1,0,1,0,1]
@@ -461,7 +461,7 @@ define <8 x double> @test_masked_z_2xdouble_to_8xdouble_mem_mask3(<2 x double>*
}
define <8 x double> @test_4xdouble_to_8xdouble_mem(<4 x double>* %vp) {
; CHECK-LABEL: test_4xdouble_to_8xdouble_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastf64x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3]
; CHECK-NEXT: retq
%vec = load <4 x double>, <4 x double>* %vp
@@ -470,7 +470,7 @@ define <8 x double> @test_4xdouble_to_8xdouble_mem(<4 x double>* %vp) {
}
define <8 x double> @test_masked_4xdouble_to_8xdouble_mem_mask0(<4 x double>* %vp, <8 x double> %default, <8 x double> %mask) {
; CHECK-LABEL: test_masked_4xdouble_to_8xdouble_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcastf64x4 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,0,1,2,3]
@@ -484,7 +484,7 @@ define <8 x double> @test_masked_4xdouble_to_8xdouble_mem_mask0(<4 x double>* %v
define <8 x double> @test_masked_z_4xdouble_to_8xdouble_mem_mask0(<4 x double>* %vp, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_4xdouble_to_8xdouble_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %zmm1, %zmm0, %k1
; CHECK-NEXT: vbroadcastf64x4 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3]
@@ -497,7 +497,7 @@ define <8 x double> @test_masked_z_4xdouble_to_8xdouble_mem_mask0(<4 x double>*
}
define <8 x double> @test_masked_4xdouble_to_8xdouble_mem_mask1(<4 x double>* %vp, <8 x double> %default, <8 x double> %mask) {
; CHECK-LABEL: test_masked_4xdouble_to_8xdouble_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcastf64x4 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,0,1,2,3]
@@ -511,7 +511,7 @@ define <8 x double> @test_masked_4xdouble_to_8xdouble_mem_mask1(<4 x double>* %v
define <8 x double> @test_masked_z_4xdouble_to_8xdouble_mem_mask1(<4 x double>* %vp, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_4xdouble_to_8xdouble_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %zmm1, %zmm0, %k1
; CHECK-NEXT: vbroadcastf64x4 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3]
@@ -524,7 +524,7 @@ define <8 x double> @test_masked_z_4xdouble_to_8xdouble_mem_mask1(<4 x double>*
}
define <8 x double> @test_masked_4xdouble_to_8xdouble_mem_mask2(<4 x double>* %vp, <8 x double> %default, <8 x double> %mask) {
; CHECK-LABEL: test_masked_4xdouble_to_8xdouble_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcastf64x4 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,0,1,2,3]
@@ -538,7 +538,7 @@ define <8 x double> @test_masked_4xdouble_to_8xdouble_mem_mask2(<4 x double>* %v
define <8 x double> @test_masked_z_4xdouble_to_8xdouble_mem_mask2(<4 x double>* %vp, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_4xdouble_to_8xdouble_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %zmm1, %zmm0, %k1
; CHECK-NEXT: vbroadcastf64x4 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3]
@@ -551,7 +551,7 @@ define <8 x double> @test_masked_z_4xdouble_to_8xdouble_mem_mask2(<4 x double>*
}
define <8 x double> @test_masked_4xdouble_to_8xdouble_mem_mask3(<4 x double>* %vp, <8 x double> %default, <8 x double> %mask) {
; CHECK-LABEL: test_masked_4xdouble_to_8xdouble_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcastf64x4 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,0,1,2,3]
@@ -565,7 +565,7 @@ define <8 x double> @test_masked_4xdouble_to_8xdouble_mem_mask3(<4 x double>* %v
define <8 x double> @test_masked_z_4xdouble_to_8xdouble_mem_mask3(<4 x double>* %vp, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_4xdouble_to_8xdouble_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %zmm1, %zmm0, %k1
; CHECK-NEXT: vbroadcastf64x4 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3]
@@ -578,7 +578,7 @@ define <8 x double> @test_masked_z_4xdouble_to_8xdouble_mem_mask3(<4 x double>*
}
define <8 x float> @test_2xfloat_to_8xfloat_mem(<2 x float>* %vp) {
; CHECK-LABEL: test_2xfloat_to_8xfloat_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0
; CHECK-NEXT: retq
@@ -588,7 +588,7 @@ define <8 x float> @test_2xfloat_to_8xfloat_mem(<2 x float>* %vp) {
}
define <8 x float> @test_masked_2xfloat_to_8xfloat_mem_mask0(<2 x float>* %vp, <8 x float> %default, <8 x float> %mask) {
; CHECK-LABEL: test_masked_2xfloat_to_8xfloat_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; CHECK-NEXT: vxorps %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm1, %k1
@@ -603,7 +603,7 @@ define <8 x float> @test_masked_2xfloat_to_8xfloat_mem_mask0(<2 x float>* %vp, <
define <8 x float> @test_masked_z_2xfloat_to_8xfloat_mem_mask0(<2 x float>* %vp, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_2xfloat_to_8xfloat_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm0, %k1
@@ -617,7 +617,7 @@ define <8 x float> @test_masked_z_2xfloat_to_8xfloat_mem_mask0(<2 x float>* %vp,
}
define <8 x float> @test_masked_2xfloat_to_8xfloat_mem_mask1(<2 x float>* %vp, <8 x float> %default, <8 x float> %mask) {
; CHECK-LABEL: test_masked_2xfloat_to_8xfloat_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; CHECK-NEXT: vxorps %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm1, %k1
@@ -632,7 +632,7 @@ define <8 x float> @test_masked_2xfloat_to_8xfloat_mem_mask1(<2 x float>* %vp, <
define <8 x float> @test_masked_z_2xfloat_to_8xfloat_mem_mask1(<2 x float>* %vp, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_2xfloat_to_8xfloat_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm0, %k1
@@ -646,7 +646,7 @@ define <8 x float> @test_masked_z_2xfloat_to_8xfloat_mem_mask1(<2 x float>* %vp,
}
define <8 x float> @test_masked_2xfloat_to_8xfloat_mem_mask2(<2 x float>* %vp, <8 x float> %default, <8 x float> %mask) {
; CHECK-LABEL: test_masked_2xfloat_to_8xfloat_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; CHECK-NEXT: vxorps %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm1, %k1
@@ -661,7 +661,7 @@ define <8 x float> @test_masked_2xfloat_to_8xfloat_mem_mask2(<2 x float>* %vp, <
define <8 x float> @test_masked_z_2xfloat_to_8xfloat_mem_mask2(<2 x float>* %vp, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_2xfloat_to_8xfloat_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm0, %k1
@@ -675,7 +675,7 @@ define <8 x float> @test_masked_z_2xfloat_to_8xfloat_mem_mask2(<2 x float>* %vp,
}
define <8 x float> @test_masked_2xfloat_to_8xfloat_mem_mask3(<2 x float>* %vp, <8 x float> %default, <8 x float> %mask) {
; CHECK-LABEL: test_masked_2xfloat_to_8xfloat_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; CHECK-NEXT: vxorps %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm1, %k1
@@ -690,7 +690,7 @@ define <8 x float> @test_masked_2xfloat_to_8xfloat_mem_mask3(<2 x float>* %vp, <
define <8 x float> @test_masked_z_2xfloat_to_8xfloat_mem_mask3(<2 x float>* %vp, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_2xfloat_to_8xfloat_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm0, %k1
@@ -704,7 +704,7 @@ define <8 x float> @test_masked_z_2xfloat_to_8xfloat_mem_mask3(<2 x float>* %vp,
}
define <16 x float> @test_2xfloat_to_16xfloat_mem(<2 x float>* %vp) {
; CHECK-LABEL: test_2xfloat_to_16xfloat_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: vbroadcastsd %xmm0, %zmm0
; CHECK-NEXT: retq
@@ -714,7 +714,7 @@ define <16 x float> @test_2xfloat_to_16xfloat_mem(<2 x float>* %vp) {
}
define <16 x float> @test_masked_2xfloat_to_16xfloat_mem_mask0(<2 x float>* %vp, <16 x float> %default, <16 x float> %mask) {
; CHECK-LABEL: test_masked_2xfloat_to_16xfloat_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; CHECK-NEXT: vxorps %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm1, %k1
@@ -729,7 +729,7 @@ define <16 x float> @test_masked_2xfloat_to_16xfloat_mem_mask0(<2 x float>* %vp,
define <16 x float> @test_masked_z_2xfloat_to_16xfloat_mem_mask0(<2 x float>* %vp, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_2xfloat_to_16xfloat_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm0, %k1
@@ -743,7 +743,7 @@ define <16 x float> @test_masked_z_2xfloat_to_16xfloat_mem_mask0(<2 x float>* %v
}
define <16 x float> @test_masked_2xfloat_to_16xfloat_mem_mask1(<2 x float>* %vp, <16 x float> %default, <16 x float> %mask) {
; CHECK-LABEL: test_masked_2xfloat_to_16xfloat_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; CHECK-NEXT: vxorps %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm1, %k1
@@ -758,7 +758,7 @@ define <16 x float> @test_masked_2xfloat_to_16xfloat_mem_mask1(<2 x float>* %vp,
define <16 x float> @test_masked_z_2xfloat_to_16xfloat_mem_mask1(<2 x float>* %vp, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_2xfloat_to_16xfloat_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm0, %k1
@@ -772,7 +772,7 @@ define <16 x float> @test_masked_z_2xfloat_to_16xfloat_mem_mask1(<2 x float>* %v
}
define <16 x float> @test_masked_2xfloat_to_16xfloat_mem_mask2(<2 x float>* %vp, <16 x float> %default, <16 x float> %mask) {
; CHECK-LABEL: test_masked_2xfloat_to_16xfloat_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; CHECK-NEXT: vxorps %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm1, %k1
@@ -787,7 +787,7 @@ define <16 x float> @test_masked_2xfloat_to_16xfloat_mem_mask2(<2 x float>* %vp,
define <16 x float> @test_masked_z_2xfloat_to_16xfloat_mem_mask2(<2 x float>* %vp, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_2xfloat_to_16xfloat_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm0, %k1
@@ -801,7 +801,7 @@ define <16 x float> @test_masked_z_2xfloat_to_16xfloat_mem_mask2(<2 x float>* %v
}
define <16 x float> @test_masked_2xfloat_to_16xfloat_mem_mask3(<2 x float>* %vp, <16 x float> %default, <16 x float> %mask) {
; CHECK-LABEL: test_masked_2xfloat_to_16xfloat_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; CHECK-NEXT: vxorps %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm1, %k1
@@ -816,7 +816,7 @@ define <16 x float> @test_masked_2xfloat_to_16xfloat_mem_mask3(<2 x float>* %vp,
define <16 x float> @test_masked_z_2xfloat_to_16xfloat_mem_mask3(<2 x float>* %vp, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_2xfloat_to_16xfloat_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm0, %k1
@@ -830,7 +830,7 @@ define <16 x float> @test_masked_z_2xfloat_to_16xfloat_mem_mask3(<2 x float>* %v
}
define <8 x float> @test_4xfloat_to_8xfloat_mem(<4 x float>* %vp) {
; CHECK-LABEL: test_4xfloat_to_8xfloat_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; CHECK-NEXT: retq
%vec = load <4 x float>, <4 x float>* %vp
@@ -839,7 +839,7 @@ define <8 x float> @test_4xfloat_to_8xfloat_mem(<4 x float>* %vp) {
}
define <8 x float> @test_masked_4xfloat_to_8xfloat_mem_mask0(<4 x float>* %vp, <8 x float> %default, <8 x float> %mask) {
; CHECK-LABEL: test_masked_4xfloat_to_8xfloat_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 {%k1} = mem[0,1,2,3,0,1,2,3]
@@ -853,7 +853,7 @@ define <8 x float> @test_masked_4xfloat_to_8xfloat_mem_mask0(<4 x float>* %vp, <
define <8 x float> @test_masked_z_4xfloat_to_8xfloat_mem_mask0(<4 x float>* %vp, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_4xfloat_to_8xfloat_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %ymm1, %ymm0, %k1
; CHECK-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3]
@@ -866,7 +866,7 @@ define <8 x float> @test_masked_z_4xfloat_to_8xfloat_mem_mask0(<4 x float>* %vp,
}
define <8 x float> @test_masked_4xfloat_to_8xfloat_mem_mask1(<4 x float>* %vp, <8 x float> %default, <8 x float> %mask) {
; CHECK-LABEL: test_masked_4xfloat_to_8xfloat_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 {%k1} = mem[0,1,2,3,0,1,2,3]
@@ -880,7 +880,7 @@ define <8 x float> @test_masked_4xfloat_to_8xfloat_mem_mask1(<4 x float>* %vp, <
define <8 x float> @test_masked_z_4xfloat_to_8xfloat_mem_mask1(<4 x float>* %vp, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_4xfloat_to_8xfloat_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %ymm1, %ymm0, %k1
; CHECK-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3]
@@ -893,7 +893,7 @@ define <8 x float> @test_masked_z_4xfloat_to_8xfloat_mem_mask1(<4 x float>* %vp,
}
define <8 x float> @test_masked_4xfloat_to_8xfloat_mem_mask2(<4 x float>* %vp, <8 x float> %default, <8 x float> %mask) {
; CHECK-LABEL: test_masked_4xfloat_to_8xfloat_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 {%k1} = mem[0,1,2,3,0,1,2,3]
@@ -907,7 +907,7 @@ define <8 x float> @test_masked_4xfloat_to_8xfloat_mem_mask2(<4 x float>* %vp, <
define <8 x float> @test_masked_z_4xfloat_to_8xfloat_mem_mask2(<4 x float>* %vp, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_4xfloat_to_8xfloat_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %ymm1, %ymm0, %k1
; CHECK-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3]
@@ -920,7 +920,7 @@ define <8 x float> @test_masked_z_4xfloat_to_8xfloat_mem_mask2(<4 x float>* %vp,
}
define <8 x float> @test_masked_4xfloat_to_8xfloat_mem_mask3(<4 x float>* %vp, <8 x float> %default, <8 x float> %mask) {
; CHECK-LABEL: test_masked_4xfloat_to_8xfloat_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 {%k1} = mem[0,1,2,3,0,1,2,3]
@@ -934,7 +934,7 @@ define <8 x float> @test_masked_4xfloat_to_8xfloat_mem_mask3(<4 x float>* %vp, <
define <8 x float> @test_masked_z_4xfloat_to_8xfloat_mem_mask3(<4 x float>* %vp, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_4xfloat_to_8xfloat_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %ymm1, %ymm0, %k1
; CHECK-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3]
@@ -947,7 +947,7 @@ define <8 x float> @test_masked_z_4xfloat_to_8xfloat_mem_mask3(<4 x float>* %vp,
}
define <16 x float> @test_4xfloat_to_16xfloat_mem(<4 x float>* %vp) {
; CHECK-LABEL: test_4xfloat_to_16xfloat_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastf32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; CHECK-NEXT: retq
%vec = load <4 x float>, <4 x float>* %vp
@@ -956,7 +956,7 @@ define <16 x float> @test_4xfloat_to_16xfloat_mem(<4 x float>* %vp) {
}
define <16 x float> @test_masked_4xfloat_to_16xfloat_mem_mask0(<4 x float>* %vp, <16 x float> %default, <16 x float> %mask) {
; CHECK-LABEL: test_masked_4xfloat_to_16xfloat_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcastf32x4 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
@@ -970,7 +970,7 @@ define <16 x float> @test_masked_4xfloat_to_16xfloat_mem_mask0(<4 x float>* %vp,
define <16 x float> @test_masked_z_4xfloat_to_16xfloat_mem_mask0(<4 x float>* %vp, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_4xfloat_to_16xfloat_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %zmm1, %zmm0, %k1
; CHECK-NEXT: vbroadcastf32x4 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
@@ -983,7 +983,7 @@ define <16 x float> @test_masked_z_4xfloat_to_16xfloat_mem_mask0(<4 x float>* %v
}
define <16 x float> @test_masked_4xfloat_to_16xfloat_mem_mask1(<4 x float>* %vp, <16 x float> %default, <16 x float> %mask) {
; CHECK-LABEL: test_masked_4xfloat_to_16xfloat_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcastf32x4 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
@@ -997,7 +997,7 @@ define <16 x float> @test_masked_4xfloat_to_16xfloat_mem_mask1(<4 x float>* %vp,
define <16 x float> @test_masked_z_4xfloat_to_16xfloat_mem_mask1(<4 x float>* %vp, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_4xfloat_to_16xfloat_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %zmm1, %zmm0, %k1
; CHECK-NEXT: vbroadcastf32x4 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
@@ -1010,7 +1010,7 @@ define <16 x float> @test_masked_z_4xfloat_to_16xfloat_mem_mask1(<4 x float>* %v
}
define <16 x float> @test_masked_4xfloat_to_16xfloat_mem_mask2(<4 x float>* %vp, <16 x float> %default, <16 x float> %mask) {
; CHECK-LABEL: test_masked_4xfloat_to_16xfloat_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcastf32x4 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
@@ -1024,7 +1024,7 @@ define <16 x float> @test_masked_4xfloat_to_16xfloat_mem_mask2(<4 x float>* %vp,
define <16 x float> @test_masked_z_4xfloat_to_16xfloat_mem_mask2(<4 x float>* %vp, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_4xfloat_to_16xfloat_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %zmm1, %zmm0, %k1
; CHECK-NEXT: vbroadcastf32x4 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
@@ -1037,7 +1037,7 @@ define <16 x float> @test_masked_z_4xfloat_to_16xfloat_mem_mask2(<4 x float>* %v
}
define <16 x float> @test_masked_4xfloat_to_16xfloat_mem_mask3(<4 x float>* %vp, <16 x float> %default, <16 x float> %mask) {
; CHECK-LABEL: test_masked_4xfloat_to_16xfloat_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcastf32x4 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
@@ -1051,7 +1051,7 @@ define <16 x float> @test_masked_4xfloat_to_16xfloat_mem_mask3(<4 x float>* %vp,
define <16 x float> @test_masked_z_4xfloat_to_16xfloat_mem_mask3(<4 x float>* %vp, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_4xfloat_to_16xfloat_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %zmm1, %zmm0, %k1
; CHECK-NEXT: vbroadcastf32x4 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
@@ -1064,7 +1064,7 @@ define <16 x float> @test_masked_z_4xfloat_to_16xfloat_mem_mask3(<4 x float>* %v
}
define <16 x float> @test_8xfloat_to_16xfloat_mem(<8 x float>* %vp) {
; CHECK-LABEL: test_8xfloat_to_16xfloat_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastf64x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3]
; CHECK-NEXT: retq
%vec = load <8 x float>, <8 x float>* %vp
@@ -1073,7 +1073,7 @@ define <16 x float> @test_8xfloat_to_16xfloat_mem(<8 x float>* %vp) {
}
define <16 x float> @test_masked_8xfloat_to_16xfloat_mem_mask0(<8 x float>* %vp, <16 x float> %default, <16 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_to_16xfloat_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcastf32x8 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
@@ -1087,7 +1087,7 @@ define <16 x float> @test_masked_8xfloat_to_16xfloat_mem_mask0(<8 x float>* %vp,
define <16 x float> @test_masked_z_8xfloat_to_16xfloat_mem_mask0(<8 x float>* %vp, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_to_16xfloat_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %zmm1, %zmm0, %k1
; CHECK-NEXT: vbroadcastf32x8 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
@@ -1100,7 +1100,7 @@ define <16 x float> @test_masked_z_8xfloat_to_16xfloat_mem_mask0(<8 x float>* %v
}
define <16 x float> @test_masked_8xfloat_to_16xfloat_mem_mask1(<8 x float>* %vp, <16 x float> %default, <16 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_to_16xfloat_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcastf32x8 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
@@ -1114,7 +1114,7 @@ define <16 x float> @test_masked_8xfloat_to_16xfloat_mem_mask1(<8 x float>* %vp,
define <16 x float> @test_masked_z_8xfloat_to_16xfloat_mem_mask1(<8 x float>* %vp, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_to_16xfloat_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %zmm1, %zmm0, %k1
; CHECK-NEXT: vbroadcastf32x8 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
@@ -1127,7 +1127,7 @@ define <16 x float> @test_masked_z_8xfloat_to_16xfloat_mem_mask1(<8 x float>* %v
}
define <16 x float> @test_masked_8xfloat_to_16xfloat_mem_mask2(<8 x float>* %vp, <16 x float> %default, <16 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_to_16xfloat_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcastf32x8 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
@@ -1141,7 +1141,7 @@ define <16 x float> @test_masked_8xfloat_to_16xfloat_mem_mask2(<8 x float>* %vp,
define <16 x float> @test_masked_z_8xfloat_to_16xfloat_mem_mask2(<8 x float>* %vp, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_to_16xfloat_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %zmm1, %zmm0, %k1
; CHECK-NEXT: vbroadcastf32x8 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
@@ -1154,7 +1154,7 @@ define <16 x float> @test_masked_z_8xfloat_to_16xfloat_mem_mask2(<8 x float>* %v
}
define <16 x float> @test_masked_8xfloat_to_16xfloat_mem_mask3(<8 x float>* %vp, <16 x float> %default, <16 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_to_16xfloat_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcastf32x8 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
@@ -1168,7 +1168,7 @@ define <16 x float> @test_masked_8xfloat_to_16xfloat_mem_mask3(<8 x float>* %vp,
define <16 x float> @test_masked_z_8xfloat_to_16xfloat_mem_mask3(<8 x float>* %vp, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_to_16xfloat_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %zmm1, %zmm0, %k1
; CHECK-NEXT: vbroadcastf32x8 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
diff --git a/test/CodeGen/X86/avx512-shuffles/broadcast-vector-int.ll b/test/CodeGen/X86/avx512-shuffles/broadcast-vector-int.ll
index 4cf35868647..a6abe24d253 100644
--- a/test/CodeGen/X86/avx512-shuffles/broadcast-vector-int.ll
+++ b/test/CodeGen/X86/avx512-shuffles/broadcast-vector-int.ll
@@ -5,7 +5,7 @@
define <4 x i32> @test_2xi32_to_4xi32(<4 x i32> %vec) {
; CHECK-LABEL: test_2xi32_to_4xi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastq %xmm0, %xmm0
; CHECK-NEXT: retq
%res = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
@@ -13,7 +13,7 @@ define <4 x i32> @test_2xi32_to_4xi32(<4 x i32> %vec) {
}
define <4 x i32> @test_masked_2xi32_to_4xi32_mask0(<4 x i32> %vec, <4 x i32> %default, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_2xi32_to_4xi32_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %xmm3, %xmm2, %k1
; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} xmm1 {%k1} = xmm0[0,1,0,1]
@@ -27,7 +27,7 @@ define <4 x i32> @test_masked_2xi32_to_4xi32_mask0(<4 x i32> %vec, <4 x i32> %de
define <4 x i32> @test_masked_z_2xi32_to_4xi32_mask0(<4 x i32> %vec, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_2xi32_to_4xi32_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %xmm2, %xmm1, %k1
; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} xmm0 {%k1} {z} = xmm0[0,1,0,1]
@@ -39,7 +39,7 @@ define <4 x i32> @test_masked_z_2xi32_to_4xi32_mask0(<4 x i32> %vec, <4 x i32> %
}
define <4 x i32> @test_masked_2xi32_to_4xi32_mask1(<4 x i32> %vec, <4 x i32> %default, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_2xi32_to_4xi32_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %xmm3, %xmm2, %k1
; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} xmm1 {%k1} = xmm0[0,1,0,1]
@@ -53,7 +53,7 @@ define <4 x i32> @test_masked_2xi32_to_4xi32_mask1(<4 x i32> %vec, <4 x i32> %de
define <4 x i32> @test_masked_z_2xi32_to_4xi32_mask1(<4 x i32> %vec, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_2xi32_to_4xi32_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %xmm2, %xmm1, %k1
; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} xmm0 {%k1} {z} = xmm0[0,1,0,1]
@@ -65,7 +65,7 @@ define <4 x i32> @test_masked_z_2xi32_to_4xi32_mask1(<4 x i32> %vec, <4 x i32> %
}
define <4 x i32> @test_masked_2xi32_to_4xi32_mask2(<4 x i32> %vec, <4 x i32> %default, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_2xi32_to_4xi32_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %xmm3, %xmm2, %k1
; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} xmm1 {%k1} = xmm0[0,1,0,1]
@@ -79,7 +79,7 @@ define <4 x i32> @test_masked_2xi32_to_4xi32_mask2(<4 x i32> %vec, <4 x i32> %de
define <4 x i32> @test_masked_z_2xi32_to_4xi32_mask2(<4 x i32> %vec, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_2xi32_to_4xi32_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %xmm2, %xmm1, %k1
; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} xmm0 {%k1} {z} = xmm0[0,1,0,1]
@@ -91,7 +91,7 @@ define <4 x i32> @test_masked_z_2xi32_to_4xi32_mask2(<4 x i32> %vec, <4 x i32> %
}
define <4 x i32> @test_masked_2xi32_to_4xi32_mask3(<4 x i32> %vec, <4 x i32> %default, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_2xi32_to_4xi32_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %xmm3, %xmm2, %k1
; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} xmm1 {%k1} = xmm0[0,1,0,1]
@@ -105,7 +105,7 @@ define <4 x i32> @test_masked_2xi32_to_4xi32_mask3(<4 x i32> %vec, <4 x i32> %de
define <4 x i32> @test_masked_z_2xi32_to_4xi32_mask3(<4 x i32> %vec, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_2xi32_to_4xi32_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %xmm2, %xmm1, %k1
; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} xmm0 {%k1} {z} = xmm0[0,1,0,1]
@@ -117,7 +117,7 @@ define <4 x i32> @test_masked_z_2xi32_to_4xi32_mask3(<4 x i32> %vec, <4 x i32> %
}
define <8 x i32> @test_2xi32_to_8xi32(<8 x i32> %vec) {
; CHECK-LABEL: test_2xi32_to_8xi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0
; CHECK-NEXT: retq
%res = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
@@ -125,7 +125,7 @@ define <8 x i32> @test_2xi32_to_8xi32(<8 x i32> %vec) {
}
define <8 x i32> @test_masked_2xi32_to_8xi32_mask0(<8 x i32> %vec, <8 x i32> %default, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_2xi32_to_8xi32_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} ymm1 {%k1} = xmm0[0,1,0,1,0,1,0,1]
@@ -139,7 +139,7 @@ define <8 x i32> @test_masked_2xi32_to_8xi32_mask0(<8 x i32> %vec, <8 x i32> %de
define <8 x i32> @test_masked_z_2xi32_to_8xi32_mask0(<8 x i32> %vec, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_2xi32_to_8xi32_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} ymm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1]
@@ -151,7 +151,7 @@ define <8 x i32> @test_masked_z_2xi32_to_8xi32_mask0(<8 x i32> %vec, <8 x i32> %
}
define <8 x i32> @test_masked_2xi32_to_8xi32_mask1(<8 x i32> %vec, <8 x i32> %default, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_2xi32_to_8xi32_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} ymm1 {%k1} = xmm0[0,1,0,1,0,1,0,1]
@@ -165,7 +165,7 @@ define <8 x i32> @test_masked_2xi32_to_8xi32_mask1(<8 x i32> %vec, <8 x i32> %de
define <8 x i32> @test_masked_z_2xi32_to_8xi32_mask1(<8 x i32> %vec, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_2xi32_to_8xi32_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} ymm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1]
@@ -177,7 +177,7 @@ define <8 x i32> @test_masked_z_2xi32_to_8xi32_mask1(<8 x i32> %vec, <8 x i32> %
}
define <8 x i32> @test_masked_2xi32_to_8xi32_mask2(<8 x i32> %vec, <8 x i32> %default, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_2xi32_to_8xi32_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} ymm1 {%k1} = xmm0[0,1,0,1,0,1,0,1]
@@ -191,7 +191,7 @@ define <8 x i32> @test_masked_2xi32_to_8xi32_mask2(<8 x i32> %vec, <8 x i32> %de
define <8 x i32> @test_masked_z_2xi32_to_8xi32_mask2(<8 x i32> %vec, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_2xi32_to_8xi32_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} ymm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1]
@@ -203,7 +203,7 @@ define <8 x i32> @test_masked_z_2xi32_to_8xi32_mask2(<8 x i32> %vec, <8 x i32> %
}
define <8 x i32> @test_masked_2xi32_to_8xi32_mask3(<8 x i32> %vec, <8 x i32> %default, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_2xi32_to_8xi32_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} ymm1 {%k1} = xmm0[0,1,0,1,0,1,0,1]
@@ -217,7 +217,7 @@ define <8 x i32> @test_masked_2xi32_to_8xi32_mask3(<8 x i32> %vec, <8 x i32> %de
define <8 x i32> @test_masked_z_2xi32_to_8xi32_mask3(<8 x i32> %vec, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_2xi32_to_8xi32_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} ymm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1]
@@ -229,7 +229,7 @@ define <8 x i32> @test_masked_z_2xi32_to_8xi32_mask3(<8 x i32> %vec, <8 x i32> %
}
define <16 x i32> @test_2xi32_to_16xi32(<16 x i32> %vec) {
; CHECK-LABEL: test_2xi32_to_16xi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastsd %xmm0, %zmm0
; CHECK-NEXT: retq
%res = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
@@ -237,7 +237,7 @@ define <16 x i32> @test_2xi32_to_16xi32(<16 x i32> %vec) {
}
define <16 x i32> @test_masked_2xi32_to_16xi32_mask0(<16 x i32> %vec, <16 x i32> %default, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_2xi32_to_16xi32_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} zmm1 {%k1} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
@@ -251,7 +251,7 @@ define <16 x i32> @test_masked_2xi32_to_16xi32_mask0(<16 x i32> %vec, <16 x i32>
define <16 x i32> @test_masked_z_2xi32_to_16xi32_mask0(<16 x i32> %vec, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_z_2xi32_to_16xi32_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} zmm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
@@ -263,7 +263,7 @@ define <16 x i32> @test_masked_z_2xi32_to_16xi32_mask0(<16 x i32> %vec, <16 x i3
}
define <16 x i32> @test_masked_2xi32_to_16xi32_mask1(<16 x i32> %vec, <16 x i32> %default, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_2xi32_to_16xi32_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} zmm1 {%k1} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
@@ -277,7 +277,7 @@ define <16 x i32> @test_masked_2xi32_to_16xi32_mask1(<16 x i32> %vec, <16 x i32>
define <16 x i32> @test_masked_z_2xi32_to_16xi32_mask1(<16 x i32> %vec, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_z_2xi32_to_16xi32_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} zmm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
@@ -289,7 +289,7 @@ define <16 x i32> @test_masked_z_2xi32_to_16xi32_mask1(<16 x i32> %vec, <16 x i3
}
define <16 x i32> @test_masked_2xi32_to_16xi32_mask2(<16 x i32> %vec, <16 x i32> %default, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_2xi32_to_16xi32_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} zmm1 {%k1} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
@@ -303,7 +303,7 @@ define <16 x i32> @test_masked_2xi32_to_16xi32_mask2(<16 x i32> %vec, <16 x i32>
define <16 x i32> @test_masked_z_2xi32_to_16xi32_mask2(<16 x i32> %vec, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_z_2xi32_to_16xi32_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} zmm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
@@ -315,7 +315,7 @@ define <16 x i32> @test_masked_z_2xi32_to_16xi32_mask2(<16 x i32> %vec, <16 x i3
}
define <16 x i32> @test_masked_2xi32_to_16xi32_mask3(<16 x i32> %vec, <16 x i32> %default, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_2xi32_to_16xi32_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} zmm1 {%k1} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
@@ -329,7 +329,7 @@ define <16 x i32> @test_masked_2xi32_to_16xi32_mask3(<16 x i32> %vec, <16 x i32>
define <16 x i32> @test_masked_z_2xi32_to_16xi32_mask3(<16 x i32> %vec, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_z_2xi32_to_16xi32_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} zmm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
@@ -341,7 +341,7 @@ define <16 x i32> @test_masked_z_2xi32_to_16xi32_mask3(<16 x i32> %vec, <16 x i3
}
define <4 x i32> @test_2xi32_to_4xi32_mem(<2 x i32>* %vp) {
; CHECK-LABEL: test_2xi32_to_4xi32_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastq (%rdi), %xmm0
; CHECK-NEXT: retq
%vec = load <2 x i32>, <2 x i32>* %vp
@@ -350,7 +350,7 @@ define <4 x i32> @test_2xi32_to_4xi32_mem(<2 x i32>* %vp) {
}
define <4 x i32> @test_masked_2xi32_to_4xi32_mem_mask0(<2 x i32>* %vp, <4 x i32> %default, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_2xi32_to_4xi32_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %xmm2, %xmm1, %k1
; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} xmm0 {%k1} = mem[0,1,0,1]
@@ -364,7 +364,7 @@ define <4 x i32> @test_masked_2xi32_to_4xi32_mem_mask0(<2 x i32>* %vp, <4 x i32>
define <4 x i32> @test_masked_z_2xi32_to_4xi32_mem_mask0(<2 x i32>* %vp, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_2xi32_to_4xi32_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k1
; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} xmm0 {%k1} {z} = mem[0,1,0,1]
@@ -377,7 +377,7 @@ define <4 x i32> @test_masked_z_2xi32_to_4xi32_mem_mask0(<2 x i32>* %vp, <4 x i3
}
define <4 x i32> @test_masked_2xi32_to_4xi32_mem_mask1(<2 x i32>* %vp, <4 x i32> %default, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_2xi32_to_4xi32_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %xmm2, %xmm1, %k1
; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} xmm0 {%k1} = mem[0,1,0,1]
@@ -391,7 +391,7 @@ define <4 x i32> @test_masked_2xi32_to_4xi32_mem_mask1(<2 x i32>* %vp, <4 x i32>
define <4 x i32> @test_masked_z_2xi32_to_4xi32_mem_mask1(<2 x i32>* %vp, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_2xi32_to_4xi32_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k1
; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} xmm0 {%k1} {z} = mem[0,1,0,1]
@@ -404,7 +404,7 @@ define <4 x i32> @test_masked_z_2xi32_to_4xi32_mem_mask1(<2 x i32>* %vp, <4 x i3
}
define <4 x i32> @test_masked_2xi32_to_4xi32_mem_mask2(<2 x i32>* %vp, <4 x i32> %default, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_2xi32_to_4xi32_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %xmm2, %xmm1, %k1
; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} xmm0 {%k1} = mem[0,1,0,1]
@@ -418,7 +418,7 @@ define <4 x i32> @test_masked_2xi32_to_4xi32_mem_mask2(<2 x i32>* %vp, <4 x i32>
define <4 x i32> @test_masked_z_2xi32_to_4xi32_mem_mask2(<2 x i32>* %vp, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_2xi32_to_4xi32_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k1
; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} xmm0 {%k1} {z} = mem[0,1,0,1]
@@ -431,7 +431,7 @@ define <4 x i32> @test_masked_z_2xi32_to_4xi32_mem_mask2(<2 x i32>* %vp, <4 x i3
}
define <4 x i32> @test_masked_2xi32_to_4xi32_mem_mask3(<2 x i32>* %vp, <4 x i32> %default, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_2xi32_to_4xi32_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %xmm2, %xmm1, %k1
; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} xmm0 {%k1} = mem[0,1,0,1]
@@ -445,7 +445,7 @@ define <4 x i32> @test_masked_2xi32_to_4xi32_mem_mask3(<2 x i32>* %vp, <4 x i32>
define <4 x i32> @test_masked_z_2xi32_to_4xi32_mem_mask3(<2 x i32>* %vp, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_2xi32_to_4xi32_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k1
; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} xmm0 {%k1} {z} = mem[0,1,0,1]
@@ -458,7 +458,7 @@ define <4 x i32> @test_masked_z_2xi32_to_4xi32_mem_mask3(<2 x i32>* %vp, <4 x i3
}
define <8 x i32> @test_2xi32_to_8xi32_mem(<2 x i32>* %vp) {
; CHECK-LABEL: test_2xi32_to_8xi32_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; CHECK-NEXT: vpbroadcastq %xmm0, %ymm0
@@ -469,7 +469,7 @@ define <8 x i32> @test_2xi32_to_8xi32_mem(<2 x i32>* %vp) {
}
define <8 x i32> @test_masked_2xi32_to_8xi32_mem_mask0(<2 x i32>* %vp, <8 x i32> %default, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_2xi32_to_8xi32_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm2 = mem[0],zero,mem[1],zero
; CHECK-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
@@ -485,7 +485,7 @@ define <8 x i32> @test_masked_2xi32_to_8xi32_mem_mask0(<2 x i32>* %vp, <8 x i32>
define <8 x i32> @test_masked_z_2xi32_to_8xi32_mem_mask0(<2 x i32>* %vp, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_2xi32_to_8xi32_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
@@ -500,7 +500,7 @@ define <8 x i32> @test_masked_z_2xi32_to_8xi32_mem_mask0(<2 x i32>* %vp, <8 x i3
}
define <8 x i32> @test_masked_2xi32_to_8xi32_mem_mask1(<2 x i32>* %vp, <8 x i32> %default, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_2xi32_to_8xi32_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm2 = mem[0],zero,mem[1],zero
; CHECK-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
@@ -516,7 +516,7 @@ define <8 x i32> @test_masked_2xi32_to_8xi32_mem_mask1(<2 x i32>* %vp, <8 x i32>
define <8 x i32> @test_masked_z_2xi32_to_8xi32_mem_mask1(<2 x i32>* %vp, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_2xi32_to_8xi32_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
@@ -531,7 +531,7 @@ define <8 x i32> @test_masked_z_2xi32_to_8xi32_mem_mask1(<2 x i32>* %vp, <8 x i3
}
define <8 x i32> @test_masked_2xi32_to_8xi32_mem_mask2(<2 x i32>* %vp, <8 x i32> %default, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_2xi32_to_8xi32_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm2 = mem[0],zero,mem[1],zero
; CHECK-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
@@ -547,7 +547,7 @@ define <8 x i32> @test_masked_2xi32_to_8xi32_mem_mask2(<2 x i32>* %vp, <8 x i32>
define <8 x i32> @test_masked_z_2xi32_to_8xi32_mem_mask2(<2 x i32>* %vp, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_2xi32_to_8xi32_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
@@ -562,7 +562,7 @@ define <8 x i32> @test_masked_z_2xi32_to_8xi32_mem_mask2(<2 x i32>* %vp, <8 x i3
}
define <8 x i32> @test_masked_2xi32_to_8xi32_mem_mask3(<2 x i32>* %vp, <8 x i32> %default, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_2xi32_to_8xi32_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm2 = mem[0],zero,mem[1],zero
; CHECK-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
@@ -578,7 +578,7 @@ define <8 x i32> @test_masked_2xi32_to_8xi32_mem_mask3(<2 x i32>* %vp, <8 x i32>
define <8 x i32> @test_masked_z_2xi32_to_8xi32_mem_mask3(<2 x i32>* %vp, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_2xi32_to_8xi32_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
@@ -593,7 +593,7 @@ define <8 x i32> @test_masked_z_2xi32_to_8xi32_mem_mask3(<2 x i32>* %vp, <8 x i3
}
define <16 x i32> @test_2xi32_to_16xi32_mem(<2 x i32>* %vp) {
; CHECK-LABEL: test_2xi32_to_16xi32_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm1 = [0,2,0,2,0,2,0,2,0,2,0,2,0,2,0,2]
; CHECK-NEXT: vpermd %zmm0, %zmm1, %zmm0
@@ -604,7 +604,7 @@ define <16 x i32> @test_2xi32_to_16xi32_mem(<2 x i32>* %vp) {
}
define <16 x i32> @test_masked_2xi32_to_16xi32_mem_mask0(<2 x i32>* %vp, <16 x i32> %default, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_2xi32_to_16xi32_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm2 = mem[0],zero,mem[1],zero
; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,2,0,2,0,2,0,2,0,2,0,2,0,2,0,2]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
@@ -620,7 +620,7 @@ define <16 x i32> @test_masked_2xi32_to_16xi32_mem_mask0(<2 x i32>* %vp, <16 x i
define <16 x i32> @test_masked_z_2xi32_to_16xi32_mem_mask0(<2 x i32>* %vp, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_z_2xi32_to_16xi32_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,2,0,2,0,2,0,2,0,2,0,2,0,2,0,2]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
@@ -635,7 +635,7 @@ define <16 x i32> @test_masked_z_2xi32_to_16xi32_mem_mask0(<2 x i32>* %vp, <16 x
}
define <16 x i32> @test_masked_2xi32_to_16xi32_mem_mask1(<2 x i32>* %vp, <16 x i32> %default, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_2xi32_to_16xi32_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm2 = mem[0],zero,mem[1],zero
; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,2,0,2,0,2,0,2,0,2,0,2,0,2,0,2]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
@@ -651,7 +651,7 @@ define <16 x i32> @test_masked_2xi32_to_16xi32_mem_mask1(<2 x i32>* %vp, <16 x i
define <16 x i32> @test_masked_z_2xi32_to_16xi32_mem_mask1(<2 x i32>* %vp, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_z_2xi32_to_16xi32_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,2,0,2,0,2,0,2,0,2,0,2,0,2,0,2]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
@@ -666,7 +666,7 @@ define <16 x i32> @test_masked_z_2xi32_to_16xi32_mem_mask1(<2 x i32>* %vp, <16 x
}
define <16 x i32> @test_masked_2xi32_to_16xi32_mem_mask2(<2 x i32>* %vp, <16 x i32> %default, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_2xi32_to_16xi32_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm2 = mem[0],zero,mem[1],zero
; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,2,0,2,0,2,0,2,0,2,0,2,0,2,0,2]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
@@ -682,7 +682,7 @@ define <16 x i32> @test_masked_2xi32_to_16xi32_mem_mask2(<2 x i32>* %vp, <16 x i
define <16 x i32> @test_masked_z_2xi32_to_16xi32_mem_mask2(<2 x i32>* %vp, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_z_2xi32_to_16xi32_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,2,0,2,0,2,0,2,0,2,0,2,0,2,0,2]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
@@ -697,7 +697,7 @@ define <16 x i32> @test_masked_z_2xi32_to_16xi32_mem_mask2(<2 x i32>* %vp, <16 x
}
define <16 x i32> @test_masked_2xi32_to_16xi32_mem_mask3(<2 x i32>* %vp, <16 x i32> %default, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_2xi32_to_16xi32_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm2 = mem[0],zero,mem[1],zero
; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm3 = [0,2,0,2,0,2,0,2,0,2,0,2,0,2,0,2]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
@@ -713,7 +713,7 @@ define <16 x i32> @test_masked_2xi32_to_16xi32_mem_mask3(<2 x i32>* %vp, <16 x i
define <16 x i32> @test_masked_z_2xi32_to_16xi32_mem_mask3(<2 x i32>* %vp, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_z_2xi32_to_16xi32_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,2,0,2,0,2,0,2,0,2,0,2,0,2,0,2]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
@@ -728,7 +728,7 @@ define <16 x i32> @test_masked_z_2xi32_to_16xi32_mem_mask3(<2 x i32>* %vp, <16 x
}
define <8 x i32> @test_4xi32_to_8xi32_mem(<4 x i32>* %vp) {
; CHECK-LABEL: test_4xi32_to_8xi32_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; CHECK-NEXT: retq
%vec = load <4 x i32>, <4 x i32>* %vp
@@ -737,7 +737,7 @@ define <8 x i32> @test_4xi32_to_8xi32_mem(<4 x i32>* %vp) {
}
define <8 x i32> @test_masked_4xi32_to_8xi32_mem_mask0(<4 x i32>* %vp, <8 x i32> %default, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_4xi32_to_8xi32_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; CHECK-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 {%k1} = mem[0,1,2,3,0,1,2,3]
@@ -751,7 +751,7 @@ define <8 x i32> @test_masked_4xi32_to_8xi32_mem_mask0(<4 x i32>* %vp, <8 x i32>
define <8 x i32> @test_masked_z_4xi32_to_8xi32_mem_mask0(<4 x i32>* %vp, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_4xi32_to_8xi32_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k1
; CHECK-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3]
@@ -764,7 +764,7 @@ define <8 x i32> @test_masked_z_4xi32_to_8xi32_mem_mask0(<4 x i32>* %vp, <8 x i3
}
define <8 x i32> @test_masked_4xi32_to_8xi32_mem_mask1(<4 x i32>* %vp, <8 x i32> %default, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_4xi32_to_8xi32_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; CHECK-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 {%k1} = mem[0,1,2,3,0,1,2,3]
@@ -778,7 +778,7 @@ define <8 x i32> @test_masked_4xi32_to_8xi32_mem_mask1(<4 x i32>* %vp, <8 x i32>
define <8 x i32> @test_masked_z_4xi32_to_8xi32_mem_mask1(<4 x i32>* %vp, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_4xi32_to_8xi32_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k1
; CHECK-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3]
@@ -791,7 +791,7 @@ define <8 x i32> @test_masked_z_4xi32_to_8xi32_mem_mask1(<4 x i32>* %vp, <8 x i3
}
define <8 x i32> @test_masked_4xi32_to_8xi32_mem_mask2(<4 x i32>* %vp, <8 x i32> %default, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_4xi32_to_8xi32_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; CHECK-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 {%k1} = mem[0,1,2,3,0,1,2,3]
@@ -805,7 +805,7 @@ define <8 x i32> @test_masked_4xi32_to_8xi32_mem_mask2(<4 x i32>* %vp, <8 x i32>
define <8 x i32> @test_masked_z_4xi32_to_8xi32_mem_mask2(<4 x i32>* %vp, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_4xi32_to_8xi32_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k1
; CHECK-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3]
@@ -818,7 +818,7 @@ define <8 x i32> @test_masked_z_4xi32_to_8xi32_mem_mask2(<4 x i32>* %vp, <8 x i3
}
define <8 x i32> @test_masked_4xi32_to_8xi32_mem_mask3(<4 x i32>* %vp, <8 x i32> %default, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_4xi32_to_8xi32_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; CHECK-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 {%k1} = mem[0,1,2,3,0,1,2,3]
@@ -832,7 +832,7 @@ define <8 x i32> @test_masked_4xi32_to_8xi32_mem_mask3(<4 x i32>* %vp, <8 x i32>
define <8 x i32> @test_masked_z_4xi32_to_8xi32_mem_mask3(<4 x i32>* %vp, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_4xi32_to_8xi32_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k1
; CHECK-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3]
@@ -845,7 +845,7 @@ define <8 x i32> @test_masked_z_4xi32_to_8xi32_mem_mask3(<4 x i32>* %vp, <8 x i3
}
define <16 x i32> @test_4xi32_to_16xi32_mem(<4 x i32>* %vp) {
; CHECK-LABEL: test_4xi32_to_16xi32_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; CHECK-NEXT: retq
%vec = load <4 x i32>, <4 x i32>* %vp
@@ -854,7 +854,7 @@ define <16 x i32> @test_4xi32_to_16xi32_mem(<4 x i32>* %vp) {
}
define <16 x i32> @test_masked_4xi32_to_16xi32_mem_mask0(<4 x i32>* %vp, <16 x i32> %default, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_4xi32_to_16xi32_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
@@ -868,7 +868,7 @@ define <16 x i32> @test_masked_4xi32_to_16xi32_mem_mask0(<4 x i32>* %vp, <16 x i
define <16 x i32> @test_masked_z_4xi32_to_16xi32_mem_mask0(<4 x i32>* %vp, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_z_4xi32_to_16xi32_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
; CHECK-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
@@ -881,7 +881,7 @@ define <16 x i32> @test_masked_z_4xi32_to_16xi32_mem_mask0(<4 x i32>* %vp, <16 x
}
define <16 x i32> @test_masked_4xi32_to_16xi32_mem_mask1(<4 x i32>* %vp, <16 x i32> %default, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_4xi32_to_16xi32_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
@@ -895,7 +895,7 @@ define <16 x i32> @test_masked_4xi32_to_16xi32_mem_mask1(<4 x i32>* %vp, <16 x i
define <16 x i32> @test_masked_z_4xi32_to_16xi32_mem_mask1(<4 x i32>* %vp, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_z_4xi32_to_16xi32_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
; CHECK-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
@@ -908,7 +908,7 @@ define <16 x i32> @test_masked_z_4xi32_to_16xi32_mem_mask1(<4 x i32>* %vp, <16 x
}
define <16 x i32> @test_masked_4xi32_to_16xi32_mem_mask2(<4 x i32>* %vp, <16 x i32> %default, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_4xi32_to_16xi32_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
@@ -922,7 +922,7 @@ define <16 x i32> @test_masked_4xi32_to_16xi32_mem_mask2(<4 x i32>* %vp, <16 x i
define <16 x i32> @test_masked_z_4xi32_to_16xi32_mem_mask2(<4 x i32>* %vp, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_z_4xi32_to_16xi32_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
; CHECK-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
@@ -935,7 +935,7 @@ define <16 x i32> @test_masked_z_4xi32_to_16xi32_mem_mask2(<4 x i32>* %vp, <16 x
}
define <16 x i32> @test_masked_4xi32_to_16xi32_mem_mask3(<4 x i32>* %vp, <16 x i32> %default, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_4xi32_to_16xi32_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
@@ -949,7 +949,7 @@ define <16 x i32> @test_masked_4xi32_to_16xi32_mem_mask3(<4 x i32>* %vp, <16 x i
define <16 x i32> @test_masked_z_4xi32_to_16xi32_mem_mask3(<4 x i32>* %vp, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_z_4xi32_to_16xi32_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
; CHECK-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
@@ -962,7 +962,7 @@ define <16 x i32> @test_masked_z_4xi32_to_16xi32_mem_mask3(<4 x i32>* %vp, <16 x
}
define <4 x i64> @test_2xi64_to_4xi64_mem(<2 x i64>* %vp) {
; CHECK-LABEL: test_2xi64_to_4xi64_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; CHECK-NEXT: retq
%vec = load <2 x i64>, <2 x i64>* %vp
@@ -971,7 +971,7 @@ define <4 x i64> @test_2xi64_to_4xi64_mem(<2 x i64>* %vp) {
}
define <4 x i64> @test_masked_2xi64_to_4xi64_mem_mask0(<2 x i64>* %vp, <4 x i64> %default, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_2xi64_to_4xi64_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; CHECK-NEXT: vbroadcasti64x2 {{.*#+}} ymm0 {%k1} = mem[0,1,0,1]
@@ -985,7 +985,7 @@ define <4 x i64> @test_masked_2xi64_to_4xi64_mem_mask0(<2 x i64>* %vp, <4 x i64>
define <4 x i64> @test_masked_z_2xi64_to_4xi64_mem_mask0(<2 x i64>* %vp, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_z_2xi64_to_4xi64_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k1
; CHECK-NEXT: vbroadcasti64x2 {{.*#+}} ymm0 {%k1} {z} = mem[0,1,0,1]
@@ -998,7 +998,7 @@ define <4 x i64> @test_masked_z_2xi64_to_4xi64_mem_mask0(<2 x i64>* %vp, <4 x i6
}
define <4 x i64> @test_masked_2xi64_to_4xi64_mem_mask1(<2 x i64>* %vp, <4 x i64> %default, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_2xi64_to_4xi64_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; CHECK-NEXT: vbroadcasti64x2 {{.*#+}} ymm0 {%k1} = mem[0,1,0,1]
@@ -1012,7 +1012,7 @@ define <4 x i64> @test_masked_2xi64_to_4xi64_mem_mask1(<2 x i64>* %vp, <4 x i64>
define <4 x i64> @test_masked_z_2xi64_to_4xi64_mem_mask1(<2 x i64>* %vp, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_z_2xi64_to_4xi64_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k1
; CHECK-NEXT: vbroadcasti64x2 {{.*#+}} ymm0 {%k1} {z} = mem[0,1,0,1]
@@ -1025,7 +1025,7 @@ define <4 x i64> @test_masked_z_2xi64_to_4xi64_mem_mask1(<2 x i64>* %vp, <4 x i6
}
define <4 x i64> @test_masked_2xi64_to_4xi64_mem_mask2(<2 x i64>* %vp, <4 x i64> %default, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_2xi64_to_4xi64_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; CHECK-NEXT: vbroadcasti64x2 {{.*#+}} ymm0 {%k1} = mem[0,1,0,1]
@@ -1039,7 +1039,7 @@ define <4 x i64> @test_masked_2xi64_to_4xi64_mem_mask2(<2 x i64>* %vp, <4 x i64>
define <4 x i64> @test_masked_z_2xi64_to_4xi64_mem_mask2(<2 x i64>* %vp, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_z_2xi64_to_4xi64_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k1
; CHECK-NEXT: vbroadcasti64x2 {{.*#+}} ymm0 {%k1} {z} = mem[0,1,0,1]
@@ -1052,7 +1052,7 @@ define <4 x i64> @test_masked_z_2xi64_to_4xi64_mem_mask2(<2 x i64>* %vp, <4 x i6
}
define <4 x i64> @test_masked_2xi64_to_4xi64_mem_mask3(<2 x i64>* %vp, <4 x i64> %default, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_2xi64_to_4xi64_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; CHECK-NEXT: vbroadcasti64x2 {{.*#+}} ymm0 {%k1} = mem[0,1,0,1]
@@ -1066,7 +1066,7 @@ define <4 x i64> @test_masked_2xi64_to_4xi64_mem_mask3(<2 x i64>* %vp, <4 x i64>
define <4 x i64> @test_masked_z_2xi64_to_4xi64_mem_mask3(<2 x i64>* %vp, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_z_2xi64_to_4xi64_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k1
; CHECK-NEXT: vbroadcasti64x2 {{.*#+}} ymm0 {%k1} {z} = mem[0,1,0,1]
@@ -1079,7 +1079,7 @@ define <4 x i64> @test_masked_z_2xi64_to_4xi64_mem_mask3(<2 x i64>* %vp, <4 x i6
}
define <8 x i64> @test_2xi64_to_8xi64_mem(<2 x i64>* %vp) {
; CHECK-LABEL: test_2xi64_to_8xi64_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; CHECK-NEXT: retq
%vec = load <2 x i64>, <2 x i64>* %vp
@@ -1088,7 +1088,7 @@ define <8 x i64> @test_2xi64_to_8xi64_mem(<2 x i64>* %vp) {
}
define <8 x i64> @test_masked_2xi64_to_8xi64_mem_mask0(<2 x i64>* %vp, <8 x i64> %default, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_2xi64_to_8xi64_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcasti64x2 {{.*#+}} zmm0 {%k1} = mem[0,1,0,1,0,1,0,1]
@@ -1102,7 +1102,7 @@ define <8 x i64> @test_masked_2xi64_to_8xi64_mem_mask0(<2 x i64>* %vp, <8 x i64>
define <8 x i64> @test_masked_z_2xi64_to_8xi64_mem_mask0(<2 x i64>* %vp, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_z_2xi64_to_8xi64_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k1
; CHECK-NEXT: vbroadcasti64x2 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,0,1,0,1,0,1]
@@ -1115,7 +1115,7 @@ define <8 x i64> @test_masked_z_2xi64_to_8xi64_mem_mask0(<2 x i64>* %vp, <8 x i6
}
define <8 x i64> @test_masked_2xi64_to_8xi64_mem_mask1(<2 x i64>* %vp, <8 x i64> %default, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_2xi64_to_8xi64_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcasti64x2 {{.*#+}} zmm0 {%k1} = mem[0,1,0,1,0,1,0,1]
@@ -1129,7 +1129,7 @@ define <8 x i64> @test_masked_2xi64_to_8xi64_mem_mask1(<2 x i64>* %vp, <8 x i64>
define <8 x i64> @test_masked_z_2xi64_to_8xi64_mem_mask1(<2 x i64>* %vp, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_z_2xi64_to_8xi64_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k1
; CHECK-NEXT: vbroadcasti64x2 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,0,1,0,1,0,1]
@@ -1142,7 +1142,7 @@ define <8 x i64> @test_masked_z_2xi64_to_8xi64_mem_mask1(<2 x i64>* %vp, <8 x i6
}
define <8 x i64> @test_masked_2xi64_to_8xi64_mem_mask2(<2 x i64>* %vp, <8 x i64> %default, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_2xi64_to_8xi64_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcasti64x2 {{.*#+}} zmm0 {%k1} = mem[0,1,0,1,0,1,0,1]
@@ -1156,7 +1156,7 @@ define <8 x i64> @test_masked_2xi64_to_8xi64_mem_mask2(<2 x i64>* %vp, <8 x i64>
define <8 x i64> @test_masked_z_2xi64_to_8xi64_mem_mask2(<2 x i64>* %vp, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_z_2xi64_to_8xi64_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k1
; CHECK-NEXT: vbroadcasti64x2 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,0,1,0,1,0,1]
@@ -1169,7 +1169,7 @@ define <8 x i64> @test_masked_z_2xi64_to_8xi64_mem_mask2(<2 x i64>* %vp, <8 x i6
}
define <8 x i64> @test_masked_2xi64_to_8xi64_mem_mask3(<2 x i64>* %vp, <8 x i64> %default, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_2xi64_to_8xi64_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcasti64x2 {{.*#+}} zmm0 {%k1} = mem[0,1,0,1,0,1,0,1]
@@ -1183,7 +1183,7 @@ define <8 x i64> @test_masked_2xi64_to_8xi64_mem_mask3(<2 x i64>* %vp, <8 x i64>
define <8 x i64> @test_masked_z_2xi64_to_8xi64_mem_mask3(<2 x i64>* %vp, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_z_2xi64_to_8xi64_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k1
; CHECK-NEXT: vbroadcasti64x2 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,0,1,0,1,0,1]
@@ -1196,7 +1196,7 @@ define <8 x i64> @test_masked_z_2xi64_to_8xi64_mem_mask3(<2 x i64>* %vp, <8 x i6
}
define <16 x i32> @test_8xi32_to_16xi32_mem(<8 x i32>* %vp) {
; CHECK-LABEL: test_8xi32_to_16xi32_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3]
; CHECK-NEXT: retq
%vec = load <8 x i32>, <8 x i32>* %vp
@@ -1205,7 +1205,7 @@ define <16 x i32> @test_8xi32_to_16xi32_mem(<8 x i32>* %vp) {
}
define <16 x i32> @test_masked_8xi32_to_16xi32_mem_mask0(<8 x i32>* %vp, <16 x i32> %default, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_8xi32_to_16xi32_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcasti32x8 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
@@ -1219,7 +1219,7 @@ define <16 x i32> @test_masked_8xi32_to_16xi32_mem_mask0(<8 x i32>* %vp, <16 x i
define <16 x i32> @test_masked_z_8xi32_to_16xi32_mem_mask0(<8 x i32>* %vp, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_z_8xi32_to_16xi32_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
; CHECK-NEXT: vbroadcasti32x8 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
@@ -1232,7 +1232,7 @@ define <16 x i32> @test_masked_z_8xi32_to_16xi32_mem_mask0(<8 x i32>* %vp, <16 x
}
define <16 x i32> @test_masked_8xi32_to_16xi32_mem_mask1(<8 x i32>* %vp, <16 x i32> %default, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_8xi32_to_16xi32_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcasti32x8 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
@@ -1246,7 +1246,7 @@ define <16 x i32> @test_masked_8xi32_to_16xi32_mem_mask1(<8 x i32>* %vp, <16 x i
define <16 x i32> @test_masked_z_8xi32_to_16xi32_mem_mask1(<8 x i32>* %vp, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_z_8xi32_to_16xi32_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
; CHECK-NEXT: vbroadcasti32x8 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
@@ -1259,7 +1259,7 @@ define <16 x i32> @test_masked_z_8xi32_to_16xi32_mem_mask1(<8 x i32>* %vp, <16 x
}
define <16 x i32> @test_masked_8xi32_to_16xi32_mem_mask2(<8 x i32>* %vp, <16 x i32> %default, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_8xi32_to_16xi32_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcasti32x8 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
@@ -1273,7 +1273,7 @@ define <16 x i32> @test_masked_8xi32_to_16xi32_mem_mask2(<8 x i32>* %vp, <16 x i
define <16 x i32> @test_masked_z_8xi32_to_16xi32_mem_mask2(<8 x i32>* %vp, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_z_8xi32_to_16xi32_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
; CHECK-NEXT: vbroadcasti32x8 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
@@ -1286,7 +1286,7 @@ define <16 x i32> @test_masked_z_8xi32_to_16xi32_mem_mask2(<8 x i32>* %vp, <16 x
}
define <16 x i32> @test_masked_8xi32_to_16xi32_mem_mask3(<8 x i32>* %vp, <16 x i32> %default, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_8xi32_to_16xi32_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcasti32x8 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
@@ -1300,7 +1300,7 @@ define <16 x i32> @test_masked_8xi32_to_16xi32_mem_mask3(<8 x i32>* %vp, <16 x i
define <16 x i32> @test_masked_z_8xi32_to_16xi32_mem_mask3(<8 x i32>* %vp, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_z_8xi32_to_16xi32_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
; CHECK-NEXT: vbroadcasti32x8 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
@@ -1313,7 +1313,7 @@ define <16 x i32> @test_masked_z_8xi32_to_16xi32_mem_mask3(<8 x i32>* %vp, <16 x
}
define <8 x i64> @test_4xi64_to_8xi64_mem(<4 x i64>* %vp) {
; CHECK-LABEL: test_4xi64_to_8xi64_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3]
; CHECK-NEXT: retq
%vec = load <4 x i64>, <4 x i64>* %vp
@@ -1322,7 +1322,7 @@ define <8 x i64> @test_4xi64_to_8xi64_mem(<4 x i64>* %vp) {
}
define <8 x i64> @test_masked_4xi64_to_8xi64_mem_mask0(<4 x i64>* %vp, <8 x i64> %default, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_4xi64_to_8xi64_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,0,1,2,3]
@@ -1336,7 +1336,7 @@ define <8 x i64> @test_masked_4xi64_to_8xi64_mem_mask0(<4 x i64>* %vp, <8 x i64>
define <8 x i64> @test_masked_z_4xi64_to_8xi64_mem_mask0(<4 x i64>* %vp, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_z_4xi64_to_8xi64_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k1
; CHECK-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3]
@@ -1349,7 +1349,7 @@ define <8 x i64> @test_masked_z_4xi64_to_8xi64_mem_mask0(<4 x i64>* %vp, <8 x i6
}
define <8 x i64> @test_masked_4xi64_to_8xi64_mem_mask1(<4 x i64>* %vp, <8 x i64> %default, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_4xi64_to_8xi64_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,0,1,2,3]
@@ -1363,7 +1363,7 @@ define <8 x i64> @test_masked_4xi64_to_8xi64_mem_mask1(<4 x i64>* %vp, <8 x i64>
define <8 x i64> @test_masked_z_4xi64_to_8xi64_mem_mask1(<4 x i64>* %vp, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_z_4xi64_to_8xi64_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k1
; CHECK-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3]
@@ -1376,7 +1376,7 @@ define <8 x i64> @test_masked_z_4xi64_to_8xi64_mem_mask1(<4 x i64>* %vp, <8 x i6
}
define <8 x i64> @test_masked_4xi64_to_8xi64_mem_mask2(<4 x i64>* %vp, <8 x i64> %default, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_4xi64_to_8xi64_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,0,1,2,3]
@@ -1390,7 +1390,7 @@ define <8 x i64> @test_masked_4xi64_to_8xi64_mem_mask2(<4 x i64>* %vp, <8 x i64>
define <8 x i64> @test_masked_z_4xi64_to_8xi64_mem_mask2(<4 x i64>* %vp, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_z_4xi64_to_8xi64_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k1
; CHECK-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3]
@@ -1403,7 +1403,7 @@ define <8 x i64> @test_masked_z_4xi64_to_8xi64_mem_mask2(<4 x i64>* %vp, <8 x i6
}
define <8 x i64> @test_masked_4xi64_to_8xi64_mem_mask3(<4 x i64>* %vp, <8 x i64> %default, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_4xi64_to_8xi64_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; CHECK-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,0,1,2,3]
@@ -1417,7 +1417,7 @@ define <8 x i64> @test_masked_4xi64_to_8xi64_mem_mask3(<4 x i64>* %vp, <8 x i64>
define <8 x i64> @test_masked_z_4xi64_to_8xi64_mem_mask3(<4 x i64>* %vp, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_z_4xi64_to_8xi64_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k1
; CHECK-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3]
diff --git a/test/CodeGen/X86/avx512-shuffles/duplicate-high.ll b/test/CodeGen/X86/avx512-shuffles/duplicate-high.ll
index c1aff0991e4..195c8567899 100644
--- a/test/CodeGen/X86/avx512-shuffles/duplicate-high.ll
+++ b/test/CodeGen/X86/avx512-shuffles/duplicate-high.ll
@@ -3,7 +3,7 @@
define <4 x float> @test_4xfloat_dup_high(<4 x float> %vec) {
; CHECK-LABEL: test_4xfloat_dup_high:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
; CHECK-NEXT: retq
%res = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
@@ -11,7 +11,7 @@ define <4 x float> @test_4xfloat_dup_high(<4 x float> %vec) {
}
define <4 x float> @test_masked_4xfloat_dup_high_mask0(<4 x float> %vec, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_4xfloat_dup_high_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %xmm3, %xmm2, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} xmm1 {%k1} = xmm0[1,1,3,3]
@@ -25,7 +25,7 @@ define <4 x float> @test_masked_4xfloat_dup_high_mask0(<4 x float> %vec, <4 x fl
define <4 x float> @test_masked_z_4xfloat_dup_high_mask0(<4 x float> %vec, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_4xfloat_dup_high_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} xmm0 {%k1} {z} = xmm0[1,1,3,3]
@@ -37,7 +37,7 @@ define <4 x float> @test_masked_z_4xfloat_dup_high_mask0(<4 x float> %vec, <4 x
}
define <4 x float> @test_masked_4xfloat_dup_high_mask1(<4 x float> %vec, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_4xfloat_dup_high_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %xmm3, %xmm2, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} xmm1 {%k1} = xmm0[1,1,3,3]
@@ -51,7 +51,7 @@ define <4 x float> @test_masked_4xfloat_dup_high_mask1(<4 x float> %vec, <4 x fl
define <4 x float> @test_masked_z_4xfloat_dup_high_mask1(<4 x float> %vec, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_4xfloat_dup_high_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} xmm0 {%k1} {z} = xmm0[1,1,3,3]
@@ -63,7 +63,7 @@ define <4 x float> @test_masked_z_4xfloat_dup_high_mask1(<4 x float> %vec, <4 x
}
define <4 x float> @test_masked_4xfloat_dup_high_mask2(<4 x float> %vec, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_4xfloat_dup_high_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %xmm3, %xmm2, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} xmm1 {%k1} = xmm0[1,1,3,3]
@@ -77,7 +77,7 @@ define <4 x float> @test_masked_4xfloat_dup_high_mask2(<4 x float> %vec, <4 x fl
define <4 x float> @test_masked_z_4xfloat_dup_high_mask2(<4 x float> %vec, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_4xfloat_dup_high_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} xmm0 {%k1} {z} = xmm0[1,1,3,3]
@@ -89,7 +89,7 @@ define <4 x float> @test_masked_z_4xfloat_dup_high_mask2(<4 x float> %vec, <4 x
}
define <4 x float> @test_masked_4xfloat_dup_high_mask3(<4 x float> %vec, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_4xfloat_dup_high_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %xmm3, %xmm2, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} xmm1 {%k1} = xmm0[1,1,3,3]
@@ -103,7 +103,7 @@ define <4 x float> @test_masked_4xfloat_dup_high_mask3(<4 x float> %vec, <4 x fl
define <4 x float> @test_masked_z_4xfloat_dup_high_mask3(<4 x float> %vec, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_4xfloat_dup_high_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} xmm0 {%k1} {z} = xmm0[1,1,3,3]
@@ -115,7 +115,7 @@ define <4 x float> @test_masked_z_4xfloat_dup_high_mask3(<4 x float> %vec, <4 x
}
define <4 x float> @test_masked_4xfloat_dup_high_mask4(<4 x float> %vec, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_4xfloat_dup_high_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %xmm3, %xmm2, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} xmm1 {%k1} = xmm0[1,1,3,3]
@@ -129,7 +129,7 @@ define <4 x float> @test_masked_4xfloat_dup_high_mask4(<4 x float> %vec, <4 x fl
define <4 x float> @test_masked_z_4xfloat_dup_high_mask4(<4 x float> %vec, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_4xfloat_dup_high_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} xmm0 {%k1} {z} = xmm0[1,1,3,3]
@@ -141,7 +141,7 @@ define <4 x float> @test_masked_z_4xfloat_dup_high_mask4(<4 x float> %vec, <4 x
}
define <4 x float> @test_4xfloat_dup_high_mem(<4 x float>* %vp) {
; CHECK-LABEL: test_4xfloat_dup_high_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovshdup {{.*#+}} xmm0 = mem[1,1,3,3]
; CHECK-NEXT: retq
%vec = load <4 x float>, <4 x float>* %vp
@@ -150,7 +150,7 @@ define <4 x float> @test_4xfloat_dup_high_mem(<4 x float>* %vp) {
}
define <4 x float> @test_masked_4xfloat_dup_high_mem_mask0(<4 x float>* %vp, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_4xfloat_dup_high_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} xmm0 {%k1} = mem[1,1,3,3]
@@ -164,7 +164,7 @@ define <4 x float> @test_masked_4xfloat_dup_high_mem_mask0(<4 x float>* %vp, <4
define <4 x float> @test_masked_z_4xfloat_dup_high_mem_mask0(<4 x float>* %vp, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_4xfloat_dup_high_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %xmm1, %xmm0, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} xmm0 {%k1} {z} = mem[1,1,3,3]
@@ -177,7 +177,7 @@ define <4 x float> @test_masked_z_4xfloat_dup_high_mem_mask0(<4 x float>* %vp, <
}
define <4 x float> @test_masked_4xfloat_dup_high_mem_mask1(<4 x float>* %vp, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_4xfloat_dup_high_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} xmm0 {%k1} = mem[1,1,3,3]
@@ -191,7 +191,7 @@ define <4 x float> @test_masked_4xfloat_dup_high_mem_mask1(<4 x float>* %vp, <4
define <4 x float> @test_masked_z_4xfloat_dup_high_mem_mask1(<4 x float>* %vp, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_4xfloat_dup_high_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %xmm1, %xmm0, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} xmm0 {%k1} {z} = mem[1,1,3,3]
@@ -204,7 +204,7 @@ define <4 x float> @test_masked_z_4xfloat_dup_high_mem_mask1(<4 x float>* %vp, <
}
define <4 x float> @test_masked_4xfloat_dup_high_mem_mask2(<4 x float>* %vp, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_4xfloat_dup_high_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} xmm0 {%k1} = mem[1,1,3,3]
@@ -218,7 +218,7 @@ define <4 x float> @test_masked_4xfloat_dup_high_mem_mask2(<4 x float>* %vp, <4
define <4 x float> @test_masked_z_4xfloat_dup_high_mem_mask2(<4 x float>* %vp, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_4xfloat_dup_high_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %xmm1, %xmm0, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} xmm0 {%k1} {z} = mem[1,1,3,3]
@@ -231,7 +231,7 @@ define <4 x float> @test_masked_z_4xfloat_dup_high_mem_mask2(<4 x float>* %vp, <
}
define <4 x float> @test_masked_4xfloat_dup_high_mem_mask3(<4 x float>* %vp, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_4xfloat_dup_high_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} xmm0 {%k1} = mem[1,1,3,3]
@@ -245,7 +245,7 @@ define <4 x float> @test_masked_4xfloat_dup_high_mem_mask3(<4 x float>* %vp, <4
define <4 x float> @test_masked_z_4xfloat_dup_high_mem_mask3(<4 x float>* %vp, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_4xfloat_dup_high_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %xmm1, %xmm0, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} xmm0 {%k1} {z} = mem[1,1,3,3]
@@ -258,7 +258,7 @@ define <4 x float> @test_masked_z_4xfloat_dup_high_mem_mask3(<4 x float>* %vp, <
}
define <4 x float> @test_masked_4xfloat_dup_high_mem_mask4(<4 x float>* %vp, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_4xfloat_dup_high_mem_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} xmm0 {%k1} = mem[1,1,3,3]
@@ -272,7 +272,7 @@ define <4 x float> @test_masked_4xfloat_dup_high_mem_mask4(<4 x float>* %vp, <4
define <4 x float> @test_masked_z_4xfloat_dup_high_mem_mask4(<4 x float>* %vp, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_4xfloat_dup_high_mem_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %xmm1, %xmm0, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} xmm0 {%k1} {z} = mem[1,1,3,3]
@@ -285,7 +285,7 @@ define <4 x float> @test_masked_z_4xfloat_dup_high_mem_mask4(<4 x float>* %vp, <
}
define <8 x float> @test_8xfloat_dup_high(<8 x float> %vec) {
; CHECK-LABEL: test_8xfloat_dup_high:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
; CHECK-NEXT: retq
%res = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
@@ -293,7 +293,7 @@ define <8 x float> @test_8xfloat_dup_high(<8 x float> %vec) {
}
define <8 x float> @test_masked_8xfloat_dup_high_mask0(<8 x float> %vec, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_dup_high_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} ymm1 {%k1} = ymm0[1,1,3,3,5,5,7,7]
@@ -307,7 +307,7 @@ define <8 x float> @test_masked_8xfloat_dup_high_mask0(<8 x float> %vec, <8 x fl
define <8 x float> @test_masked_z_8xfloat_dup_high_mask0(<8 x float> %vec, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_dup_high_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} ymm0 {%k1} {z} = ymm0[1,1,3,3,5,5,7,7]
@@ -319,7 +319,7 @@ define <8 x float> @test_masked_z_8xfloat_dup_high_mask0(<8 x float> %vec, <8 x
}
define <8 x float> @test_masked_8xfloat_dup_high_mask1(<8 x float> %vec, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_dup_high_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} ymm1 {%k1} = ymm0[1,1,3,3,5,5,7,7]
@@ -333,7 +333,7 @@ define <8 x float> @test_masked_8xfloat_dup_high_mask1(<8 x float> %vec, <8 x fl
define <8 x float> @test_masked_z_8xfloat_dup_high_mask1(<8 x float> %vec, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_dup_high_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} ymm0 {%k1} {z} = ymm0[1,1,3,3,5,5,7,7]
@@ -345,7 +345,7 @@ define <8 x float> @test_masked_z_8xfloat_dup_high_mask1(<8 x float> %vec, <8 x
}
define <8 x float> @test_masked_8xfloat_dup_high_mask2(<8 x float> %vec, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_dup_high_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} ymm1 {%k1} = ymm0[1,1,3,3,5,5,7,7]
@@ -359,7 +359,7 @@ define <8 x float> @test_masked_8xfloat_dup_high_mask2(<8 x float> %vec, <8 x fl
define <8 x float> @test_masked_z_8xfloat_dup_high_mask2(<8 x float> %vec, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_dup_high_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} ymm0 {%k1} {z} = ymm0[1,1,3,3,5,5,7,7]
@@ -371,7 +371,7 @@ define <8 x float> @test_masked_z_8xfloat_dup_high_mask2(<8 x float> %vec, <8 x
}
define <8 x float> @test_masked_8xfloat_dup_high_mask3(<8 x float> %vec, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_dup_high_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} ymm1 {%k1} = ymm0[1,1,3,3,5,5,7,7]
@@ -385,7 +385,7 @@ define <8 x float> @test_masked_8xfloat_dup_high_mask3(<8 x float> %vec, <8 x fl
define <8 x float> @test_masked_z_8xfloat_dup_high_mask3(<8 x float> %vec, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_dup_high_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} ymm0 {%k1} {z} = ymm0[1,1,3,3,5,5,7,7]
@@ -397,7 +397,7 @@ define <8 x float> @test_masked_z_8xfloat_dup_high_mask3(<8 x float> %vec, <8 x
}
define <8 x float> @test_masked_8xfloat_dup_high_mask4(<8 x float> %vec, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_dup_high_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} ymm1 {%k1} = ymm0[1,1,3,3,5,5,7,7]
@@ -411,7 +411,7 @@ define <8 x float> @test_masked_8xfloat_dup_high_mask4(<8 x float> %vec, <8 x fl
define <8 x float> @test_masked_z_8xfloat_dup_high_mask4(<8 x float> %vec, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_dup_high_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} ymm0 {%k1} {z} = ymm0[1,1,3,3,5,5,7,7]
@@ -423,7 +423,7 @@ define <8 x float> @test_masked_z_8xfloat_dup_high_mask4(<8 x float> %vec, <8 x
}
define <8 x float> @test_8xfloat_dup_high_mem(<8 x float>* %vp) {
; CHECK-LABEL: test_8xfloat_dup_high_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovshdup {{.*#+}} ymm0 = mem[1,1,3,3,5,5,7,7]
; CHECK-NEXT: retq
%vec = load <8 x float>, <8 x float>* %vp
@@ -432,7 +432,7 @@ define <8 x float> @test_8xfloat_dup_high_mem(<8 x float>* %vp) {
}
define <8 x float> @test_masked_8xfloat_dup_high_mem_mask0(<8 x float>* %vp, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_dup_high_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} ymm0 {%k1} = mem[1,1,3,3,5,5,7,7]
@@ -446,7 +446,7 @@ define <8 x float> @test_masked_8xfloat_dup_high_mem_mask0(<8 x float>* %vp, <8
define <8 x float> @test_masked_z_8xfloat_dup_high_mem_mask0(<8 x float>* %vp, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_dup_high_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %ymm1, %ymm0, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} ymm0 {%k1} {z} = mem[1,1,3,3,5,5,7,7]
@@ -459,7 +459,7 @@ define <8 x float> @test_masked_z_8xfloat_dup_high_mem_mask0(<8 x float>* %vp, <
}
define <8 x float> @test_masked_8xfloat_dup_high_mem_mask1(<8 x float>* %vp, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_dup_high_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} ymm0 {%k1} = mem[1,1,3,3,5,5,7,7]
@@ -473,7 +473,7 @@ define <8 x float> @test_masked_8xfloat_dup_high_mem_mask1(<8 x float>* %vp, <8
define <8 x float> @test_masked_z_8xfloat_dup_high_mem_mask1(<8 x float>* %vp, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_dup_high_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %ymm1, %ymm0, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} ymm0 {%k1} {z} = mem[1,1,3,3,5,5,7,7]
@@ -486,7 +486,7 @@ define <8 x float> @test_masked_z_8xfloat_dup_high_mem_mask1(<8 x float>* %vp, <
}
define <8 x float> @test_masked_8xfloat_dup_high_mem_mask2(<8 x float>* %vp, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_dup_high_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} ymm0 {%k1} = mem[1,1,3,3,5,5,7,7]
@@ -500,7 +500,7 @@ define <8 x float> @test_masked_8xfloat_dup_high_mem_mask2(<8 x float>* %vp, <8
define <8 x float> @test_masked_z_8xfloat_dup_high_mem_mask2(<8 x float>* %vp, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_dup_high_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %ymm1, %ymm0, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} ymm0 {%k1} {z} = mem[1,1,3,3,5,5,7,7]
@@ -513,7 +513,7 @@ define <8 x float> @test_masked_z_8xfloat_dup_high_mem_mask2(<8 x float>* %vp, <
}
define <8 x float> @test_masked_8xfloat_dup_high_mem_mask3(<8 x float>* %vp, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_dup_high_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} ymm0 {%k1} = mem[1,1,3,3,5,5,7,7]
@@ -527,7 +527,7 @@ define <8 x float> @test_masked_8xfloat_dup_high_mem_mask3(<8 x float>* %vp, <8
define <8 x float> @test_masked_z_8xfloat_dup_high_mem_mask3(<8 x float>* %vp, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_dup_high_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %ymm1, %ymm0, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} ymm0 {%k1} {z} = mem[1,1,3,3,5,5,7,7]
@@ -540,7 +540,7 @@ define <8 x float> @test_masked_z_8xfloat_dup_high_mem_mask3(<8 x float>* %vp, <
}
define <8 x float> @test_masked_8xfloat_dup_high_mem_mask4(<8 x float>* %vp, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_dup_high_mem_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} ymm0 {%k1} = mem[1,1,3,3,5,5,7,7]
@@ -554,7 +554,7 @@ define <8 x float> @test_masked_8xfloat_dup_high_mem_mask4(<8 x float>* %vp, <8
define <8 x float> @test_masked_z_8xfloat_dup_high_mem_mask4(<8 x float>* %vp, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_dup_high_mem_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %ymm1, %ymm0, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} ymm0 {%k1} {z} = mem[1,1,3,3,5,5,7,7]
@@ -567,7 +567,7 @@ define <8 x float> @test_masked_z_8xfloat_dup_high_mem_mask4(<8 x float>* %vp, <
}
define <16 x float> @test_16xfloat_dup_high(<16 x float> %vec) {
; CHECK-LABEL: test_16xfloat_dup_high:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovshdup {{.*#+}} zmm0 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
; CHECK-NEXT: retq
%res = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7, i32 9, i32 9, i32 11, i32 11, i32 13, i32 13, i32 15, i32 15>
@@ -575,7 +575,7 @@ define <16 x float> @test_16xfloat_dup_high(<16 x float> %vec) {
}
define <16 x float> @test_masked_16xfloat_dup_high_mask0(<16 x float> %vec, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_dup_high_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} zmm1 {%k1} = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
@@ -589,7 +589,7 @@ define <16 x float> @test_masked_16xfloat_dup_high_mask0(<16 x float> %vec, <16
define <16 x float> @test_masked_z_16xfloat_dup_high_mask0(<16 x float> %vec, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_dup_high_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} zmm0 {%k1} {z} = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
@@ -601,7 +601,7 @@ define <16 x float> @test_masked_z_16xfloat_dup_high_mask0(<16 x float> %vec, <1
}
define <16 x float> @test_masked_16xfloat_dup_high_mask1(<16 x float> %vec, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_dup_high_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} zmm1 {%k1} = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
@@ -615,7 +615,7 @@ define <16 x float> @test_masked_16xfloat_dup_high_mask1(<16 x float> %vec, <16
define <16 x float> @test_masked_z_16xfloat_dup_high_mask1(<16 x float> %vec, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_dup_high_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} zmm0 {%k1} {z} = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
@@ -627,7 +627,7 @@ define <16 x float> @test_masked_z_16xfloat_dup_high_mask1(<16 x float> %vec, <1
}
define <16 x float> @test_masked_16xfloat_dup_high_mask2(<16 x float> %vec, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_dup_high_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} zmm1 {%k1} = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
@@ -641,7 +641,7 @@ define <16 x float> @test_masked_16xfloat_dup_high_mask2(<16 x float> %vec, <16
define <16 x float> @test_masked_z_16xfloat_dup_high_mask2(<16 x float> %vec, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_dup_high_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} zmm0 {%k1} {z} = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
@@ -653,7 +653,7 @@ define <16 x float> @test_masked_z_16xfloat_dup_high_mask2(<16 x float> %vec, <1
}
define <16 x float> @test_masked_16xfloat_dup_high_mask3(<16 x float> %vec, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_dup_high_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} zmm1 {%k1} = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
@@ -667,7 +667,7 @@ define <16 x float> @test_masked_16xfloat_dup_high_mask3(<16 x float> %vec, <16
define <16 x float> @test_masked_z_16xfloat_dup_high_mask3(<16 x float> %vec, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_dup_high_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} zmm0 {%k1} {z} = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
@@ -679,7 +679,7 @@ define <16 x float> @test_masked_z_16xfloat_dup_high_mask3(<16 x float> %vec, <1
}
define <16 x float> @test_masked_16xfloat_dup_high_mask4(<16 x float> %vec, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_dup_high_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} zmm1 {%k1} = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
@@ -693,7 +693,7 @@ define <16 x float> @test_masked_16xfloat_dup_high_mask4(<16 x float> %vec, <16
define <16 x float> @test_masked_z_16xfloat_dup_high_mask4(<16 x float> %vec, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_dup_high_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} zmm0 {%k1} {z} = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
@@ -705,7 +705,7 @@ define <16 x float> @test_masked_z_16xfloat_dup_high_mask4(<16 x float> %vec, <1
}
define <16 x float> @test_16xfloat_dup_high_mem(<16 x float>* %vp) {
; CHECK-LABEL: test_16xfloat_dup_high_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovshdup {{.*#+}} zmm0 = mem[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
; CHECK-NEXT: retq
%vec = load <16 x float>, <16 x float>* %vp
@@ -714,7 +714,7 @@ define <16 x float> @test_16xfloat_dup_high_mem(<16 x float>* %vp) {
}
define <16 x float> @test_masked_16xfloat_dup_high_mem_mask0(<16 x float>* %vp, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_dup_high_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} zmm0 {%k1} = mem[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
@@ -728,7 +728,7 @@ define <16 x float> @test_masked_16xfloat_dup_high_mem_mask0(<16 x float>* %vp,
define <16 x float> @test_masked_z_16xfloat_dup_high_mem_mask0(<16 x float>* %vp, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_dup_high_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %zmm1, %zmm0, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} zmm0 {%k1} {z} = mem[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
@@ -741,7 +741,7 @@ define <16 x float> @test_masked_z_16xfloat_dup_high_mem_mask0(<16 x float>* %vp
}
define <16 x float> @test_masked_16xfloat_dup_high_mem_mask1(<16 x float>* %vp, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_dup_high_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} zmm0 {%k1} = mem[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
@@ -755,7 +755,7 @@ define <16 x float> @test_masked_16xfloat_dup_high_mem_mask1(<16 x float>* %vp,
define <16 x float> @test_masked_z_16xfloat_dup_high_mem_mask1(<16 x float>* %vp, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_dup_high_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %zmm1, %zmm0, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} zmm0 {%k1} {z} = mem[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
@@ -768,7 +768,7 @@ define <16 x float> @test_masked_z_16xfloat_dup_high_mem_mask1(<16 x float>* %vp
}
define <16 x float> @test_masked_16xfloat_dup_high_mem_mask2(<16 x float>* %vp, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_dup_high_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} zmm0 {%k1} = mem[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
@@ -782,7 +782,7 @@ define <16 x float> @test_masked_16xfloat_dup_high_mem_mask2(<16 x float>* %vp,
define <16 x float> @test_masked_z_16xfloat_dup_high_mem_mask2(<16 x float>* %vp, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_dup_high_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %zmm1, %zmm0, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} zmm0 {%k1} {z} = mem[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
@@ -795,7 +795,7 @@ define <16 x float> @test_masked_z_16xfloat_dup_high_mem_mask2(<16 x float>* %vp
}
define <16 x float> @test_masked_16xfloat_dup_high_mem_mask3(<16 x float>* %vp, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_dup_high_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} zmm0 {%k1} = mem[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
@@ -809,7 +809,7 @@ define <16 x float> @test_masked_16xfloat_dup_high_mem_mask3(<16 x float>* %vp,
define <16 x float> @test_masked_z_16xfloat_dup_high_mem_mask3(<16 x float>* %vp, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_dup_high_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %zmm1, %zmm0, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} zmm0 {%k1} {z} = mem[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
@@ -822,7 +822,7 @@ define <16 x float> @test_masked_z_16xfloat_dup_high_mem_mask3(<16 x float>* %vp
}
define <16 x float> @test_masked_16xfloat_dup_high_mem_mask4(<16 x float>* %vp, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_dup_high_mem_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} zmm0 {%k1} = mem[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
@@ -836,7 +836,7 @@ define <16 x float> @test_masked_16xfloat_dup_high_mem_mask4(<16 x float>* %vp,
define <16 x float> @test_masked_z_16xfloat_dup_high_mem_mask4(<16 x float>* %vp, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_dup_high_mem_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %zmm1, %zmm0, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} zmm0 {%k1} {z} = mem[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
diff --git a/test/CodeGen/X86/avx512-shuffles/duplicate-low.ll b/test/CodeGen/X86/avx512-shuffles/duplicate-low.ll
index 6690d3c509d..b32cb60c983 100644
--- a/test/CodeGen/X86/avx512-shuffles/duplicate-low.ll
+++ b/test/CodeGen/X86/avx512-shuffles/duplicate-low.ll
@@ -3,7 +3,7 @@
define <2 x double> @test_2xdouble_dup_low(<2 x double> %vec) {
; CHECK-LABEL: test_2xdouble_dup_low:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; CHECK-NEXT: retq
%res = shufflevector <2 x double> %vec, <2 x double> undef, <2 x i32> <i32 0, i32 0>
@@ -11,7 +11,7 @@ define <2 x double> @test_2xdouble_dup_low(<2 x double> %vec) {
}
define <2 x double> @test_masked_2xdouble_dup_low_mask0(<2 x double> %vec, <2 x double> %vec2, <2 x double> %mask) {
; CHECK-LABEL: test_masked_2xdouble_dup_low_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %xmm3, %xmm2, %k1
; CHECK-NEXT: vmovddup {{.*#+}} xmm1 {%k1} = xmm0[0,0]
@@ -25,7 +25,7 @@ define <2 x double> @test_masked_2xdouble_dup_low_mask0(<2 x double> %vec, <2 x
define <2 x double> @test_masked_z_2xdouble_dup_low_mask0(<2 x double> %vec, <2 x double> %mask) {
; CHECK-LABEL: test_masked_z_2xdouble_dup_low_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %xmm2, %xmm1, %k1
; CHECK-NEXT: vmovddup {{.*#+}} xmm0 {%k1} {z} = xmm0[0,0]
@@ -37,7 +37,7 @@ define <2 x double> @test_masked_z_2xdouble_dup_low_mask0(<2 x double> %vec, <2
}
define <2 x double> @test_masked_2xdouble_dup_low_mask1(<2 x double> %vec, <2 x double> %vec2, <2 x double> %mask) {
; CHECK-LABEL: test_masked_2xdouble_dup_low_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %xmm3, %xmm2, %k1
; CHECK-NEXT: vmovddup {{.*#+}} xmm1 {%k1} = xmm0[0,0]
@@ -51,7 +51,7 @@ define <2 x double> @test_masked_2xdouble_dup_low_mask1(<2 x double> %vec, <2 x
define <2 x double> @test_masked_z_2xdouble_dup_low_mask1(<2 x double> %vec, <2 x double> %mask) {
; CHECK-LABEL: test_masked_z_2xdouble_dup_low_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %xmm2, %xmm1, %k1
; CHECK-NEXT: vmovddup {{.*#+}} xmm0 {%k1} {z} = xmm0[0,0]
@@ -63,7 +63,7 @@ define <2 x double> @test_masked_z_2xdouble_dup_low_mask1(<2 x double> %vec, <2
}
define <2 x double> @test_2xdouble_dup_low_mem(<2 x double>* %vp) {
; CHECK-LABEL: test_2xdouble_dup_low_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; CHECK-NEXT: retq
%vec = load <2 x double>, <2 x double>* %vp
@@ -72,7 +72,7 @@ define <2 x double> @test_2xdouble_dup_low_mem(<2 x double>* %vp) {
}
define <2 x double> @test_masked_2xdouble_dup_low_mem_mask0(<2 x double>* %vp, <2 x double> %vec2, <2 x double> %mask) {
; CHECK-LABEL: test_masked_2xdouble_dup_low_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %xmm2, %xmm1, %k1
; CHECK-NEXT: vmovddup {{.*#+}} xmm0 {%k1} = mem[0,0]
@@ -86,7 +86,7 @@ define <2 x double> @test_masked_2xdouble_dup_low_mem_mask0(<2 x double>* %vp, <
define <2 x double> @test_masked_z_2xdouble_dup_low_mem_mask0(<2 x double>* %vp, <2 x double> %mask) {
; CHECK-LABEL: test_masked_z_2xdouble_dup_low_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %xmm1, %xmm0, %k1
; CHECK-NEXT: vmovddup {{.*#+}} xmm0 {%k1} {z} = mem[0,0]
@@ -99,7 +99,7 @@ define <2 x double> @test_masked_z_2xdouble_dup_low_mem_mask0(<2 x double>* %vp,
}
define <2 x double> @test_masked_2xdouble_dup_low_mem_mask1(<2 x double>* %vp, <2 x double> %vec2, <2 x double> %mask) {
; CHECK-LABEL: test_masked_2xdouble_dup_low_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %xmm2, %xmm1, %k1
; CHECK-NEXT: vmovddup {{.*#+}} xmm0 {%k1} = mem[0,0]
@@ -113,7 +113,7 @@ define <2 x double> @test_masked_2xdouble_dup_low_mem_mask1(<2 x double>* %vp, <
define <2 x double> @test_masked_z_2xdouble_dup_low_mem_mask1(<2 x double>* %vp, <2 x double> %mask) {
; CHECK-LABEL: test_masked_z_2xdouble_dup_low_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %xmm1, %xmm0, %k1
; CHECK-NEXT: vmovddup {{.*#+}} xmm0 {%k1} {z} = mem[0,0]
@@ -126,7 +126,7 @@ define <2 x double> @test_masked_z_2xdouble_dup_low_mem_mask1(<2 x double>* %vp,
}
define <4 x double> @test_4xdouble_dup_low(<4 x double> %vec) {
; CHECK-LABEL: test_4xdouble_dup_low:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
; CHECK-NEXT: retq
%res = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
@@ -134,7 +134,7 @@ define <4 x double> @test_4xdouble_dup_low(<4 x double> %vec) {
}
define <4 x double> @test_masked_4xdouble_dup_low_mask0(<4 x double> %vec, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_masked_4xdouble_dup_low_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vmovddup {{.*#+}} ymm1 {%k1} = ymm0[0,0,2,2]
@@ -148,7 +148,7 @@ define <4 x double> @test_masked_4xdouble_dup_low_mask0(<4 x double> %vec, <4 x
define <4 x double> @test_masked_z_4xdouble_dup_low_mask0(<4 x double> %vec, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_4xdouble_dup_low_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vmovddup {{.*#+}} ymm0 {%k1} {z} = ymm0[0,0,2,2]
@@ -160,7 +160,7 @@ define <4 x double> @test_masked_z_4xdouble_dup_low_mask0(<4 x double> %vec, <4
}
define <4 x double> @test_masked_4xdouble_dup_low_mask1(<4 x double> %vec, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_masked_4xdouble_dup_low_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vmovddup {{.*#+}} ymm1 {%k1} = ymm0[0,0,2,2]
@@ -174,7 +174,7 @@ define <4 x double> @test_masked_4xdouble_dup_low_mask1(<4 x double> %vec, <4 x
define <4 x double> @test_masked_z_4xdouble_dup_low_mask1(<4 x double> %vec, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_4xdouble_dup_low_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vmovddup {{.*#+}} ymm0 {%k1} {z} = ymm0[0,0,2,2]
@@ -186,7 +186,7 @@ define <4 x double> @test_masked_z_4xdouble_dup_low_mask1(<4 x double> %vec, <4
}
define <4 x double> @test_masked_4xdouble_dup_low_mask2(<4 x double> %vec, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_masked_4xdouble_dup_low_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vmovddup {{.*#+}} ymm1 {%k1} = ymm0[0,0,2,2]
@@ -200,7 +200,7 @@ define <4 x double> @test_masked_4xdouble_dup_low_mask2(<4 x double> %vec, <4 x
define <4 x double> @test_masked_z_4xdouble_dup_low_mask2(<4 x double> %vec, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_4xdouble_dup_low_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vmovddup {{.*#+}} ymm0 {%k1} {z} = ymm0[0,0,2,2]
@@ -212,7 +212,7 @@ define <4 x double> @test_masked_z_4xdouble_dup_low_mask2(<4 x double> %vec, <4
}
define <4 x double> @test_masked_4xdouble_dup_low_mask3(<4 x double> %vec, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_masked_4xdouble_dup_low_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vmovddup {{.*#+}} ymm1 {%k1} = ymm0[0,0,2,2]
@@ -226,7 +226,7 @@ define <4 x double> @test_masked_4xdouble_dup_low_mask3(<4 x double> %vec, <4 x
define <4 x double> @test_masked_z_4xdouble_dup_low_mask3(<4 x double> %vec, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_4xdouble_dup_low_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vmovddup {{.*#+}} ymm0 {%k1} {z} = ymm0[0,0,2,2]
@@ -238,7 +238,7 @@ define <4 x double> @test_masked_z_4xdouble_dup_low_mask3(<4 x double> %vec, <4
}
define <4 x double> @test_masked_4xdouble_dup_low_mask4(<4 x double> %vec, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_masked_4xdouble_dup_low_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vmovddup {{.*#+}} ymm1 {%k1} = ymm0[0,0,2,2]
@@ -252,7 +252,7 @@ define <4 x double> @test_masked_4xdouble_dup_low_mask4(<4 x double> %vec, <4 x
define <4 x double> @test_masked_z_4xdouble_dup_low_mask4(<4 x double> %vec, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_4xdouble_dup_low_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vmovddup {{.*#+}} ymm0 {%k1} {z} = ymm0[0,0,2,2]
@@ -264,7 +264,7 @@ define <4 x double> @test_masked_z_4xdouble_dup_low_mask4(<4 x double> %vec, <4
}
define <4 x double> @test_4xdouble_dup_low_mem(<4 x double>* %vp) {
; CHECK-LABEL: test_4xdouble_dup_low_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovddup {{.*#+}} ymm0 = mem[0,0,2,2]
; CHECK-NEXT: retq
%vec = load <4 x double>, <4 x double>* %vp
@@ -273,7 +273,7 @@ define <4 x double> @test_4xdouble_dup_low_mem(<4 x double>* %vp) {
}
define <4 x double> @test_masked_4xdouble_dup_low_mem_mask0(<4 x double>* %vp, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_masked_4xdouble_dup_low_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vmovddup {{.*#+}} ymm0 {%k1} = mem[0,0,2,2]
@@ -287,7 +287,7 @@ define <4 x double> @test_masked_4xdouble_dup_low_mem_mask0(<4 x double>* %vp, <
define <4 x double> @test_masked_z_4xdouble_dup_low_mem_mask0(<4 x double>* %vp, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_4xdouble_dup_low_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %ymm1, %ymm0, %k1
; CHECK-NEXT: vmovddup {{.*#+}} ymm0 {%k1} {z} = mem[0,0,2,2]
@@ -300,7 +300,7 @@ define <4 x double> @test_masked_z_4xdouble_dup_low_mem_mask0(<4 x double>* %vp,
}
define <4 x double> @test_masked_4xdouble_dup_low_mem_mask1(<4 x double>* %vp, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_masked_4xdouble_dup_low_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vmovddup {{.*#+}} ymm0 {%k1} = mem[0,0,2,2]
@@ -314,7 +314,7 @@ define <4 x double> @test_masked_4xdouble_dup_low_mem_mask1(<4 x double>* %vp, <
define <4 x double> @test_masked_z_4xdouble_dup_low_mem_mask1(<4 x double>* %vp, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_4xdouble_dup_low_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %ymm1, %ymm0, %k1
; CHECK-NEXT: vmovddup {{.*#+}} ymm0 {%k1} {z} = mem[0,0,2,2]
@@ -327,7 +327,7 @@ define <4 x double> @test_masked_z_4xdouble_dup_low_mem_mask1(<4 x double>* %vp,
}
define <4 x double> @test_masked_4xdouble_dup_low_mem_mask2(<4 x double>* %vp, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_masked_4xdouble_dup_low_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vmovddup {{.*#+}} ymm0 {%k1} = mem[0,0,2,2]
@@ -341,7 +341,7 @@ define <4 x double> @test_masked_4xdouble_dup_low_mem_mask2(<4 x double>* %vp, <
define <4 x double> @test_masked_z_4xdouble_dup_low_mem_mask2(<4 x double>* %vp, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_4xdouble_dup_low_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %ymm1, %ymm0, %k1
; CHECK-NEXT: vmovddup {{.*#+}} ymm0 {%k1} {z} = mem[0,0,2,2]
@@ -354,7 +354,7 @@ define <4 x double> @test_masked_z_4xdouble_dup_low_mem_mask2(<4 x double>* %vp,
}
define <4 x double> @test_masked_4xdouble_dup_low_mem_mask3(<4 x double>* %vp, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_masked_4xdouble_dup_low_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vmovddup {{.*#+}} ymm0 {%k1} = mem[0,0,2,2]
@@ -368,7 +368,7 @@ define <4 x double> @test_masked_4xdouble_dup_low_mem_mask3(<4 x double>* %vp, <
define <4 x double> @test_masked_z_4xdouble_dup_low_mem_mask3(<4 x double>* %vp, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_4xdouble_dup_low_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %ymm1, %ymm0, %k1
; CHECK-NEXT: vmovddup {{.*#+}} ymm0 {%k1} {z} = mem[0,0,2,2]
@@ -381,7 +381,7 @@ define <4 x double> @test_masked_z_4xdouble_dup_low_mem_mask3(<4 x double>* %vp,
}
define <4 x double> @test_masked_4xdouble_dup_low_mem_mask4(<4 x double>* %vp, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_masked_4xdouble_dup_low_mem_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vmovddup {{.*#+}} ymm0 {%k1} = mem[0,0,2,2]
@@ -395,7 +395,7 @@ define <4 x double> @test_masked_4xdouble_dup_low_mem_mask4(<4 x double>* %vp, <
define <4 x double> @test_masked_z_4xdouble_dup_low_mem_mask4(<4 x double>* %vp, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_4xdouble_dup_low_mem_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %ymm1, %ymm0, %k1
; CHECK-NEXT: vmovddup {{.*#+}} ymm0 {%k1} {z} = mem[0,0,2,2]
@@ -408,7 +408,7 @@ define <4 x double> @test_masked_z_4xdouble_dup_low_mem_mask4(<4 x double>* %vp,
}
define <8 x double> @test_8xdouble_dup_low(<8 x double> %vec) {
; CHECK-LABEL: test_8xdouble_dup_low:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovddup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6]
; CHECK-NEXT: retq
%res = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
@@ -416,7 +416,7 @@ define <8 x double> @test_8xdouble_dup_low(<8 x double> %vec) {
}
define <8 x double> @test_masked_8xdouble_dup_low_mask0(<8 x double> %vec, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_dup_low_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vmovddup {{.*#+}} zmm1 {%k1} = zmm0[0,0,2,2,4,4,6,6]
@@ -430,7 +430,7 @@ define <8 x double> @test_masked_8xdouble_dup_low_mask0(<8 x double> %vec, <8 x
define <8 x double> @test_masked_z_8xdouble_dup_low_mask0(<8 x double> %vec, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_dup_low_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vmovddup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6]
@@ -442,7 +442,7 @@ define <8 x double> @test_masked_z_8xdouble_dup_low_mask0(<8 x double> %vec, <8
}
define <8 x double> @test_masked_8xdouble_dup_low_mask1(<8 x double> %vec, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_dup_low_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vmovddup {{.*#+}} zmm1 {%k1} = zmm0[0,0,2,2,4,4,6,6]
@@ -456,7 +456,7 @@ define <8 x double> @test_masked_8xdouble_dup_low_mask1(<8 x double> %vec, <8 x
define <8 x double> @test_masked_z_8xdouble_dup_low_mask1(<8 x double> %vec, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_dup_low_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vmovddup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6]
@@ -468,7 +468,7 @@ define <8 x double> @test_masked_z_8xdouble_dup_low_mask1(<8 x double> %vec, <8
}
define <8 x double> @test_masked_8xdouble_dup_low_mask2(<8 x double> %vec, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_dup_low_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vmovddup {{.*#+}} zmm1 {%k1} = zmm0[0,0,2,2,4,4,6,6]
@@ -482,7 +482,7 @@ define <8 x double> @test_masked_8xdouble_dup_low_mask2(<8 x double> %vec, <8 x
define <8 x double> @test_masked_z_8xdouble_dup_low_mask2(<8 x double> %vec, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_dup_low_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vmovddup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6]
@@ -494,7 +494,7 @@ define <8 x double> @test_masked_z_8xdouble_dup_low_mask2(<8 x double> %vec, <8
}
define <8 x double> @test_masked_8xdouble_dup_low_mask3(<8 x double> %vec, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_dup_low_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vmovddup {{.*#+}} zmm1 {%k1} = zmm0[0,0,2,2,4,4,6,6]
@@ -508,7 +508,7 @@ define <8 x double> @test_masked_8xdouble_dup_low_mask3(<8 x double> %vec, <8 x
define <8 x double> @test_masked_z_8xdouble_dup_low_mask3(<8 x double> %vec, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_dup_low_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vmovddup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6]
@@ -520,7 +520,7 @@ define <8 x double> @test_masked_z_8xdouble_dup_low_mask3(<8 x double> %vec, <8
}
define <8 x double> @test_masked_8xdouble_dup_low_mask4(<8 x double> %vec, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_dup_low_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vmovddup {{.*#+}} zmm1 {%k1} = zmm0[0,0,2,2,4,4,6,6]
@@ -534,7 +534,7 @@ define <8 x double> @test_masked_8xdouble_dup_low_mask4(<8 x double> %vec, <8 x
define <8 x double> @test_masked_z_8xdouble_dup_low_mask4(<8 x double> %vec, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_dup_low_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vmovddup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6]
@@ -546,7 +546,7 @@ define <8 x double> @test_masked_z_8xdouble_dup_low_mask4(<8 x double> %vec, <8
}
define <8 x double> @test_8xdouble_dup_low_mem(<8 x double>* %vp) {
; CHECK-LABEL: test_8xdouble_dup_low_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovddup {{.*#+}} zmm0 = mem[0,0,2,2,4,4,6,6]
; CHECK-NEXT: retq
%vec = load <8 x double>, <8 x double>* %vp
@@ -555,7 +555,7 @@ define <8 x double> @test_8xdouble_dup_low_mem(<8 x double>* %vp) {
}
define <8 x double> @test_masked_8xdouble_dup_low_mem_mask0(<8 x double>* %vp, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_dup_low_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vmovddup {{.*#+}} zmm0 {%k1} = mem[0,0,2,2,4,4,6,6]
@@ -569,7 +569,7 @@ define <8 x double> @test_masked_8xdouble_dup_low_mem_mask0(<8 x double>* %vp, <
define <8 x double> @test_masked_z_8xdouble_dup_low_mem_mask0(<8 x double>* %vp, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_dup_low_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %zmm1, %zmm0, %k1
; CHECK-NEXT: vmovddup {{.*#+}} zmm0 {%k1} {z} = mem[0,0,2,2,4,4,6,6]
@@ -582,7 +582,7 @@ define <8 x double> @test_masked_z_8xdouble_dup_low_mem_mask0(<8 x double>* %vp,
}
define <8 x double> @test_masked_8xdouble_dup_low_mem_mask1(<8 x double>* %vp, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_dup_low_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vmovddup {{.*#+}} zmm0 {%k1} = mem[0,0,2,2,4,4,6,6]
@@ -596,7 +596,7 @@ define <8 x double> @test_masked_8xdouble_dup_low_mem_mask1(<8 x double>* %vp, <
define <8 x double> @test_masked_z_8xdouble_dup_low_mem_mask1(<8 x double>* %vp, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_dup_low_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %zmm1, %zmm0, %k1
; CHECK-NEXT: vmovddup {{.*#+}} zmm0 {%k1} {z} = mem[0,0,2,2,4,4,6,6]
@@ -609,7 +609,7 @@ define <8 x double> @test_masked_z_8xdouble_dup_low_mem_mask1(<8 x double>* %vp,
}
define <8 x double> @test_masked_8xdouble_dup_low_mem_mask2(<8 x double>* %vp, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_dup_low_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vmovddup {{.*#+}} zmm0 {%k1} = mem[0,0,2,2,4,4,6,6]
@@ -623,7 +623,7 @@ define <8 x double> @test_masked_8xdouble_dup_low_mem_mask2(<8 x double>* %vp, <
define <8 x double> @test_masked_z_8xdouble_dup_low_mem_mask2(<8 x double>* %vp, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_dup_low_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %zmm1, %zmm0, %k1
; CHECK-NEXT: vmovddup {{.*#+}} zmm0 {%k1} {z} = mem[0,0,2,2,4,4,6,6]
@@ -636,7 +636,7 @@ define <8 x double> @test_masked_z_8xdouble_dup_low_mem_mask2(<8 x double>* %vp,
}
define <8 x double> @test_masked_8xdouble_dup_low_mem_mask3(<8 x double>* %vp, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_dup_low_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vmovddup {{.*#+}} zmm0 {%k1} = mem[0,0,2,2,4,4,6,6]
@@ -650,7 +650,7 @@ define <8 x double> @test_masked_8xdouble_dup_low_mem_mask3(<8 x double>* %vp, <
define <8 x double> @test_masked_z_8xdouble_dup_low_mem_mask3(<8 x double>* %vp, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_dup_low_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %zmm1, %zmm0, %k1
; CHECK-NEXT: vmovddup {{.*#+}} zmm0 {%k1} {z} = mem[0,0,2,2,4,4,6,6]
@@ -663,7 +663,7 @@ define <8 x double> @test_masked_z_8xdouble_dup_low_mem_mask3(<8 x double>* %vp,
}
define <8 x double> @test_masked_8xdouble_dup_low_mem_mask4(<8 x double>* %vp, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_dup_low_mem_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vmovddup {{.*#+}} zmm0 {%k1} = mem[0,0,2,2,4,4,6,6]
@@ -677,7 +677,7 @@ define <8 x double> @test_masked_8xdouble_dup_low_mem_mask4(<8 x double>* %vp, <
define <8 x double> @test_masked_z_8xdouble_dup_low_mem_mask4(<8 x double>* %vp, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_dup_low_mem_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %zmm1, %zmm0, %k1
; CHECK-NEXT: vmovddup {{.*#+}} zmm0 {%k1} {z} = mem[0,0,2,2,4,4,6,6]
@@ -690,7 +690,7 @@ define <8 x double> @test_masked_z_8xdouble_dup_low_mem_mask4(<8 x double>* %vp,
}
define <4 x float> @test_4xfloat_dup_low(<4 x float> %vec) {
; CHECK-LABEL: test_4xfloat_dup_low:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2]
; CHECK-NEXT: retq
%res = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
@@ -698,7 +698,7 @@ define <4 x float> @test_4xfloat_dup_low(<4 x float> %vec) {
}
define <4 x float> @test_masked_4xfloat_dup_low_mask0(<4 x float> %vec, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_4xfloat_dup_low_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %xmm3, %xmm2, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} xmm1 {%k1} = xmm0[0,0,2,2]
@@ -712,7 +712,7 @@ define <4 x float> @test_masked_4xfloat_dup_low_mask0(<4 x float> %vec, <4 x flo
define <4 x float> @test_masked_z_4xfloat_dup_low_mask0(<4 x float> %vec, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_4xfloat_dup_low_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} xmm0 {%k1} {z} = xmm0[0,0,2,2]
@@ -724,7 +724,7 @@ define <4 x float> @test_masked_z_4xfloat_dup_low_mask0(<4 x float> %vec, <4 x f
}
define <4 x float> @test_masked_4xfloat_dup_low_mask1(<4 x float> %vec, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_4xfloat_dup_low_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %xmm3, %xmm2, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} xmm1 {%k1} = xmm0[0,0,2,2]
@@ -738,7 +738,7 @@ define <4 x float> @test_masked_4xfloat_dup_low_mask1(<4 x float> %vec, <4 x flo
define <4 x float> @test_masked_z_4xfloat_dup_low_mask1(<4 x float> %vec, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_4xfloat_dup_low_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} xmm0 {%k1} {z} = xmm0[0,0,2,2]
@@ -750,7 +750,7 @@ define <4 x float> @test_masked_z_4xfloat_dup_low_mask1(<4 x float> %vec, <4 x f
}
define <4 x float> @test_masked_4xfloat_dup_low_mask2(<4 x float> %vec, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_4xfloat_dup_low_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %xmm3, %xmm2, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} xmm1 {%k1} = xmm0[0,0,2,2]
@@ -764,7 +764,7 @@ define <4 x float> @test_masked_4xfloat_dup_low_mask2(<4 x float> %vec, <4 x flo
define <4 x float> @test_masked_z_4xfloat_dup_low_mask2(<4 x float> %vec, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_4xfloat_dup_low_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} xmm0 {%k1} {z} = xmm0[0,0,2,2]
@@ -776,7 +776,7 @@ define <4 x float> @test_masked_z_4xfloat_dup_low_mask2(<4 x float> %vec, <4 x f
}
define <4 x float> @test_masked_4xfloat_dup_low_mask3(<4 x float> %vec, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_4xfloat_dup_low_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %xmm3, %xmm2, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} xmm1 {%k1} = xmm0[0,0,2,2]
@@ -790,7 +790,7 @@ define <4 x float> @test_masked_4xfloat_dup_low_mask3(<4 x float> %vec, <4 x flo
define <4 x float> @test_masked_z_4xfloat_dup_low_mask3(<4 x float> %vec, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_4xfloat_dup_low_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} xmm0 {%k1} {z} = xmm0[0,0,2,2]
@@ -802,7 +802,7 @@ define <4 x float> @test_masked_z_4xfloat_dup_low_mask3(<4 x float> %vec, <4 x f
}
define <4 x float> @test_masked_4xfloat_dup_low_mask4(<4 x float> %vec, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_4xfloat_dup_low_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %xmm3, %xmm2, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} xmm1 {%k1} = xmm0[0,0,2,2]
@@ -816,7 +816,7 @@ define <4 x float> @test_masked_4xfloat_dup_low_mask4(<4 x float> %vec, <4 x flo
define <4 x float> @test_masked_z_4xfloat_dup_low_mask4(<4 x float> %vec, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_4xfloat_dup_low_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} xmm0 {%k1} {z} = xmm0[0,0,2,2]
@@ -828,7 +828,7 @@ define <4 x float> @test_masked_z_4xfloat_dup_low_mask4(<4 x float> %vec, <4 x f
}
define <4 x float> @test_4xfloat_dup_low_mem(<4 x float>* %vp) {
; CHECK-LABEL: test_4xfloat_dup_low_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovsldup {{.*#+}} xmm0 = mem[0,0,2,2]
; CHECK-NEXT: retq
%vec = load <4 x float>, <4 x float>* %vp
@@ -837,7 +837,7 @@ define <4 x float> @test_4xfloat_dup_low_mem(<4 x float>* %vp) {
}
define <4 x float> @test_masked_4xfloat_dup_low_mem_mask0(<4 x float>* %vp, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_4xfloat_dup_low_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} xmm0 {%k1} = mem[0,0,2,2]
@@ -851,7 +851,7 @@ define <4 x float> @test_masked_4xfloat_dup_low_mem_mask0(<4 x float>* %vp, <4 x
define <4 x float> @test_masked_z_4xfloat_dup_low_mem_mask0(<4 x float>* %vp, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_4xfloat_dup_low_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %xmm1, %xmm0, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} xmm0 {%k1} {z} = mem[0,0,2,2]
@@ -864,7 +864,7 @@ define <4 x float> @test_masked_z_4xfloat_dup_low_mem_mask0(<4 x float>* %vp, <4
}
define <4 x float> @test_masked_4xfloat_dup_low_mem_mask1(<4 x float>* %vp, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_4xfloat_dup_low_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} xmm0 {%k1} = mem[0,0,2,2]
@@ -878,7 +878,7 @@ define <4 x float> @test_masked_4xfloat_dup_low_mem_mask1(<4 x float>* %vp, <4 x
define <4 x float> @test_masked_z_4xfloat_dup_low_mem_mask1(<4 x float>* %vp, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_4xfloat_dup_low_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %xmm1, %xmm0, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} xmm0 {%k1} {z} = mem[0,0,2,2]
@@ -891,7 +891,7 @@ define <4 x float> @test_masked_z_4xfloat_dup_low_mem_mask1(<4 x float>* %vp, <4
}
define <4 x float> @test_masked_4xfloat_dup_low_mem_mask2(<4 x float>* %vp, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_4xfloat_dup_low_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} xmm0 {%k1} = mem[0,0,2,2]
@@ -905,7 +905,7 @@ define <4 x float> @test_masked_4xfloat_dup_low_mem_mask2(<4 x float>* %vp, <4 x
define <4 x float> @test_masked_z_4xfloat_dup_low_mem_mask2(<4 x float>* %vp, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_4xfloat_dup_low_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %xmm1, %xmm0, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} xmm0 {%k1} {z} = mem[0,0,2,2]
@@ -918,7 +918,7 @@ define <4 x float> @test_masked_z_4xfloat_dup_low_mem_mask2(<4 x float>* %vp, <4
}
define <4 x float> @test_masked_4xfloat_dup_low_mem_mask3(<4 x float>* %vp, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_4xfloat_dup_low_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} xmm0 {%k1} = mem[0,0,2,2]
@@ -932,7 +932,7 @@ define <4 x float> @test_masked_4xfloat_dup_low_mem_mask3(<4 x float>* %vp, <4 x
define <4 x float> @test_masked_z_4xfloat_dup_low_mem_mask3(<4 x float>* %vp, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_4xfloat_dup_low_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %xmm1, %xmm0, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} xmm0 {%k1} {z} = mem[0,0,2,2]
@@ -945,7 +945,7 @@ define <4 x float> @test_masked_z_4xfloat_dup_low_mem_mask3(<4 x float>* %vp, <4
}
define <4 x float> @test_masked_4xfloat_dup_low_mem_mask4(<4 x float>* %vp, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_4xfloat_dup_low_mem_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} xmm0 {%k1} = mem[0,0,2,2]
@@ -959,7 +959,7 @@ define <4 x float> @test_masked_4xfloat_dup_low_mem_mask4(<4 x float>* %vp, <4 x
define <4 x float> @test_masked_z_4xfloat_dup_low_mem_mask4(<4 x float>* %vp, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_4xfloat_dup_low_mem_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %xmm1, %xmm0, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} xmm0 {%k1} {z} = mem[0,0,2,2]
@@ -972,7 +972,7 @@ define <4 x float> @test_masked_z_4xfloat_dup_low_mem_mask4(<4 x float>* %vp, <4
}
define <8 x float> @test_8xfloat_dup_low(<8 x float> %vec) {
; CHECK-LABEL: test_8xfloat_dup_low:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6]
; CHECK-NEXT: retq
%res = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
@@ -980,7 +980,7 @@ define <8 x float> @test_8xfloat_dup_low(<8 x float> %vec) {
}
define <8 x float> @test_masked_8xfloat_dup_low_mask0(<8 x float> %vec, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_dup_low_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} ymm1 {%k1} = ymm0[0,0,2,2,4,4,6,6]
@@ -994,7 +994,7 @@ define <8 x float> @test_masked_8xfloat_dup_low_mask0(<8 x float> %vec, <8 x flo
define <8 x float> @test_masked_z_8xfloat_dup_low_mask0(<8 x float> %vec, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_dup_low_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} ymm0 {%k1} {z} = ymm0[0,0,2,2,4,4,6,6]
@@ -1006,7 +1006,7 @@ define <8 x float> @test_masked_z_8xfloat_dup_low_mask0(<8 x float> %vec, <8 x f
}
define <8 x float> @test_masked_8xfloat_dup_low_mask1(<8 x float> %vec, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_dup_low_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} ymm1 {%k1} = ymm0[0,0,2,2,4,4,6,6]
@@ -1020,7 +1020,7 @@ define <8 x float> @test_masked_8xfloat_dup_low_mask1(<8 x float> %vec, <8 x flo
define <8 x float> @test_masked_z_8xfloat_dup_low_mask1(<8 x float> %vec, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_dup_low_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} ymm0 {%k1} {z} = ymm0[0,0,2,2,4,4,6,6]
@@ -1032,7 +1032,7 @@ define <8 x float> @test_masked_z_8xfloat_dup_low_mask1(<8 x float> %vec, <8 x f
}
define <8 x float> @test_masked_8xfloat_dup_low_mask2(<8 x float> %vec, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_dup_low_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} ymm1 {%k1} = ymm0[0,0,2,2,4,4,6,6]
@@ -1046,7 +1046,7 @@ define <8 x float> @test_masked_8xfloat_dup_low_mask2(<8 x float> %vec, <8 x flo
define <8 x float> @test_masked_z_8xfloat_dup_low_mask2(<8 x float> %vec, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_dup_low_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} ymm0 {%k1} {z} = ymm0[0,0,2,2,4,4,6,6]
@@ -1058,7 +1058,7 @@ define <8 x float> @test_masked_z_8xfloat_dup_low_mask2(<8 x float> %vec, <8 x f
}
define <8 x float> @test_masked_8xfloat_dup_low_mask3(<8 x float> %vec, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_dup_low_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} ymm1 {%k1} = ymm0[0,0,2,2,4,4,6,6]
@@ -1072,7 +1072,7 @@ define <8 x float> @test_masked_8xfloat_dup_low_mask3(<8 x float> %vec, <8 x flo
define <8 x float> @test_masked_z_8xfloat_dup_low_mask3(<8 x float> %vec, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_dup_low_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} ymm0 {%k1} {z} = ymm0[0,0,2,2,4,4,6,6]
@@ -1084,7 +1084,7 @@ define <8 x float> @test_masked_z_8xfloat_dup_low_mask3(<8 x float> %vec, <8 x f
}
define <8 x float> @test_masked_8xfloat_dup_low_mask4(<8 x float> %vec, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_dup_low_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} ymm1 {%k1} = ymm0[0,0,2,2,4,4,6,6]
@@ -1098,7 +1098,7 @@ define <8 x float> @test_masked_8xfloat_dup_low_mask4(<8 x float> %vec, <8 x flo
define <8 x float> @test_masked_z_8xfloat_dup_low_mask4(<8 x float> %vec, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_dup_low_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} ymm0 {%k1} {z} = ymm0[0,0,2,2,4,4,6,6]
@@ -1110,7 +1110,7 @@ define <8 x float> @test_masked_z_8xfloat_dup_low_mask4(<8 x float> %vec, <8 x f
}
define <8 x float> @test_8xfloat_dup_low_mem(<8 x float>* %vp) {
; CHECK-LABEL: test_8xfloat_dup_low_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovsldup {{.*#+}} ymm0 = mem[0,0,2,2,4,4,6,6]
; CHECK-NEXT: retq
%vec = load <8 x float>, <8 x float>* %vp
@@ -1119,7 +1119,7 @@ define <8 x float> @test_8xfloat_dup_low_mem(<8 x float>* %vp) {
}
define <8 x float> @test_masked_8xfloat_dup_low_mem_mask0(<8 x float>* %vp, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_dup_low_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} ymm0 {%k1} = mem[0,0,2,2,4,4,6,6]
@@ -1133,7 +1133,7 @@ define <8 x float> @test_masked_8xfloat_dup_low_mem_mask0(<8 x float>* %vp, <8 x
define <8 x float> @test_masked_z_8xfloat_dup_low_mem_mask0(<8 x float>* %vp, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_dup_low_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %ymm1, %ymm0, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} ymm0 {%k1} {z} = mem[0,0,2,2,4,4,6,6]
@@ -1146,7 +1146,7 @@ define <8 x float> @test_masked_z_8xfloat_dup_low_mem_mask0(<8 x float>* %vp, <8
}
define <8 x float> @test_masked_8xfloat_dup_low_mem_mask1(<8 x float>* %vp, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_dup_low_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} ymm0 {%k1} = mem[0,0,2,2,4,4,6,6]
@@ -1160,7 +1160,7 @@ define <8 x float> @test_masked_8xfloat_dup_low_mem_mask1(<8 x float>* %vp, <8 x
define <8 x float> @test_masked_z_8xfloat_dup_low_mem_mask1(<8 x float>* %vp, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_dup_low_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %ymm1, %ymm0, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} ymm0 {%k1} {z} = mem[0,0,2,2,4,4,6,6]
@@ -1173,7 +1173,7 @@ define <8 x float> @test_masked_z_8xfloat_dup_low_mem_mask1(<8 x float>* %vp, <8
}
define <8 x float> @test_masked_8xfloat_dup_low_mem_mask2(<8 x float>* %vp, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_dup_low_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} ymm0 {%k1} = mem[0,0,2,2,4,4,6,6]
@@ -1187,7 +1187,7 @@ define <8 x float> @test_masked_8xfloat_dup_low_mem_mask2(<8 x float>* %vp, <8 x
define <8 x float> @test_masked_z_8xfloat_dup_low_mem_mask2(<8 x float>* %vp, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_dup_low_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %ymm1, %ymm0, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} ymm0 {%k1} {z} = mem[0,0,2,2,4,4,6,6]
@@ -1200,7 +1200,7 @@ define <8 x float> @test_masked_z_8xfloat_dup_low_mem_mask2(<8 x float>* %vp, <8
}
define <8 x float> @test_masked_8xfloat_dup_low_mem_mask3(<8 x float>* %vp, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_dup_low_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} ymm0 {%k1} = mem[0,0,2,2,4,4,6,6]
@@ -1214,7 +1214,7 @@ define <8 x float> @test_masked_8xfloat_dup_low_mem_mask3(<8 x float>* %vp, <8 x
define <8 x float> @test_masked_z_8xfloat_dup_low_mem_mask3(<8 x float>* %vp, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_dup_low_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %ymm1, %ymm0, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} ymm0 {%k1} {z} = mem[0,0,2,2,4,4,6,6]
@@ -1227,7 +1227,7 @@ define <8 x float> @test_masked_z_8xfloat_dup_low_mem_mask3(<8 x float>* %vp, <8
}
define <8 x float> @test_masked_8xfloat_dup_low_mem_mask4(<8 x float>* %vp, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_dup_low_mem_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} ymm0 {%k1} = mem[0,0,2,2,4,4,6,6]
@@ -1241,7 +1241,7 @@ define <8 x float> @test_masked_8xfloat_dup_low_mem_mask4(<8 x float>* %vp, <8 x
define <8 x float> @test_masked_z_8xfloat_dup_low_mem_mask4(<8 x float>* %vp, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_dup_low_mem_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %ymm1, %ymm0, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} ymm0 {%k1} {z} = mem[0,0,2,2,4,4,6,6]
@@ -1254,7 +1254,7 @@ define <8 x float> @test_masked_z_8xfloat_dup_low_mem_mask4(<8 x float>* %vp, <8
}
define <16 x float> @test_16xfloat_dup_low(<16 x float> %vec) {
; CHECK-LABEL: test_16xfloat_dup_low:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovsldup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
; CHECK-NEXT: retq
%res = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6, i32 8, i32 8, i32 10, i32 10, i32 12, i32 12, i32 14, i32 14>
@@ -1262,7 +1262,7 @@ define <16 x float> @test_16xfloat_dup_low(<16 x float> %vec) {
}
define <16 x float> @test_masked_16xfloat_dup_low_mask0(<16 x float> %vec, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_dup_low_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} zmm1 {%k1} = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
@@ -1276,7 +1276,7 @@ define <16 x float> @test_masked_16xfloat_dup_low_mask0(<16 x float> %vec, <16 x
define <16 x float> @test_masked_z_16xfloat_dup_low_mask0(<16 x float> %vec, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_dup_low_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
@@ -1288,7 +1288,7 @@ define <16 x float> @test_masked_z_16xfloat_dup_low_mask0(<16 x float> %vec, <16
}
define <16 x float> @test_masked_16xfloat_dup_low_mask1(<16 x float> %vec, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_dup_low_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} zmm1 {%k1} = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
@@ -1302,7 +1302,7 @@ define <16 x float> @test_masked_16xfloat_dup_low_mask1(<16 x float> %vec, <16 x
define <16 x float> @test_masked_z_16xfloat_dup_low_mask1(<16 x float> %vec, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_dup_low_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
@@ -1314,7 +1314,7 @@ define <16 x float> @test_masked_z_16xfloat_dup_low_mask1(<16 x float> %vec, <16
}
define <16 x float> @test_masked_16xfloat_dup_low_mask2(<16 x float> %vec, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_dup_low_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} zmm1 {%k1} = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
@@ -1328,7 +1328,7 @@ define <16 x float> @test_masked_16xfloat_dup_low_mask2(<16 x float> %vec, <16 x
define <16 x float> @test_masked_z_16xfloat_dup_low_mask2(<16 x float> %vec, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_dup_low_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
@@ -1340,7 +1340,7 @@ define <16 x float> @test_masked_z_16xfloat_dup_low_mask2(<16 x float> %vec, <16
}
define <16 x float> @test_masked_16xfloat_dup_low_mask3(<16 x float> %vec, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_dup_low_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} zmm1 {%k1} = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
@@ -1354,7 +1354,7 @@ define <16 x float> @test_masked_16xfloat_dup_low_mask3(<16 x float> %vec, <16 x
define <16 x float> @test_masked_z_16xfloat_dup_low_mask3(<16 x float> %vec, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_dup_low_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
@@ -1366,7 +1366,7 @@ define <16 x float> @test_masked_z_16xfloat_dup_low_mask3(<16 x float> %vec, <16
}
define <16 x float> @test_masked_16xfloat_dup_low_mask4(<16 x float> %vec, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_dup_low_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} zmm1 {%k1} = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
@@ -1380,7 +1380,7 @@ define <16 x float> @test_masked_16xfloat_dup_low_mask4(<16 x float> %vec, <16 x
define <16 x float> @test_masked_z_16xfloat_dup_low_mask4(<16 x float> %vec, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_dup_low_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
@@ -1392,7 +1392,7 @@ define <16 x float> @test_masked_z_16xfloat_dup_low_mask4(<16 x float> %vec, <16
}
define <16 x float> @test_16xfloat_dup_low_mem(<16 x float>* %vp) {
; CHECK-LABEL: test_16xfloat_dup_low_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovsldup {{.*#+}} zmm0 = mem[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
; CHECK-NEXT: retq
%vec = load <16 x float>, <16 x float>* %vp
@@ -1401,7 +1401,7 @@ define <16 x float> @test_16xfloat_dup_low_mem(<16 x float>* %vp) {
}
define <16 x float> @test_masked_16xfloat_dup_low_mem_mask0(<16 x float>* %vp, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_dup_low_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} = mem[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
@@ -1415,7 +1415,7 @@ define <16 x float> @test_masked_16xfloat_dup_low_mem_mask0(<16 x float>* %vp, <
define <16 x float> @test_masked_z_16xfloat_dup_low_mem_mask0(<16 x float>* %vp, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_dup_low_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %zmm1, %zmm0, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} {z} = mem[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
@@ -1428,7 +1428,7 @@ define <16 x float> @test_masked_z_16xfloat_dup_low_mem_mask0(<16 x float>* %vp,
}
define <16 x float> @test_masked_16xfloat_dup_low_mem_mask1(<16 x float>* %vp, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_dup_low_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} = mem[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
@@ -1442,7 +1442,7 @@ define <16 x float> @test_masked_16xfloat_dup_low_mem_mask1(<16 x float>* %vp, <
define <16 x float> @test_masked_z_16xfloat_dup_low_mem_mask1(<16 x float>* %vp, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_dup_low_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %zmm1, %zmm0, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} {z} = mem[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
@@ -1455,7 +1455,7 @@ define <16 x float> @test_masked_z_16xfloat_dup_low_mem_mask1(<16 x float>* %vp,
}
define <16 x float> @test_masked_16xfloat_dup_low_mem_mask2(<16 x float>* %vp, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_dup_low_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} = mem[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
@@ -1469,7 +1469,7 @@ define <16 x float> @test_masked_16xfloat_dup_low_mem_mask2(<16 x float>* %vp, <
define <16 x float> @test_masked_z_16xfloat_dup_low_mem_mask2(<16 x float>* %vp, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_dup_low_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %zmm1, %zmm0, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} {z} = mem[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
@@ -1482,7 +1482,7 @@ define <16 x float> @test_masked_z_16xfloat_dup_low_mem_mask2(<16 x float>* %vp,
}
define <16 x float> @test_masked_16xfloat_dup_low_mem_mask3(<16 x float>* %vp, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_dup_low_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} = mem[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
@@ -1496,7 +1496,7 @@ define <16 x float> @test_masked_16xfloat_dup_low_mem_mask3(<16 x float>* %vp, <
define <16 x float> @test_masked_z_16xfloat_dup_low_mem_mask3(<16 x float>* %vp, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_dup_low_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %zmm1, %zmm0, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} {z} = mem[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
@@ -1509,7 +1509,7 @@ define <16 x float> @test_masked_z_16xfloat_dup_low_mem_mask3(<16 x float>* %vp,
}
define <16 x float> @test_masked_16xfloat_dup_low_mem_mask4(<16 x float>* %vp, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_dup_low_mem_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} = mem[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
@@ -1523,7 +1523,7 @@ define <16 x float> @test_masked_16xfloat_dup_low_mem_mask4(<16 x float>* %vp, <
define <16 x float> @test_masked_z_16xfloat_dup_low_mem_mask4(<16 x float>* %vp, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_dup_low_mem_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %zmm1, %zmm0, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} {z} = mem[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
diff --git a/test/CodeGen/X86/avx512-shuffles/in_lane_permute.ll b/test/CodeGen/X86/avx512-shuffles/in_lane_permute.ll
index a8e3df75091..24b387d96df 100644
--- a/test/CodeGen/X86/avx512-shuffles/in_lane_permute.ll
+++ b/test/CodeGen/X86/avx512-shuffles/in_lane_permute.ll
@@ -5,7 +5,7 @@
define <4 x float> @test_4xfloat_perm_mask0(<4 x float> %vec) {
; CHECK-LABEL: test_4xfloat_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,1,3,1]
; CHECK-NEXT: retq
%res = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> <i32 2, i32 1, i32 3, i32 1>
@@ -13,7 +13,7 @@ define <4 x float> @test_4xfloat_perm_mask0(<4 x float> %vec) {
}
define <4 x float> @test_masked_4xfloat_perm_mask0(<4 x float> %vec, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_4xfloat_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %xmm3, %xmm2, %k1
; CHECK-NEXT: vpermilps {{.*#+}} xmm1 {%k1} = xmm0[2,1,3,1]
@@ -27,7 +27,7 @@ define <4 x float> @test_masked_4xfloat_perm_mask0(<4 x float> %vec, <4 x float>
define <4 x float> @test_masked_z_4xfloat_perm_mask0(<4 x float> %vec, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_4xfloat_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vpermilps {{.*#+}} xmm0 {%k1} {z} = xmm0[2,1,3,1]
@@ -39,7 +39,7 @@ define <4 x float> @test_masked_z_4xfloat_perm_mask0(<4 x float> %vec, <4 x floa
}
define <4 x float> @test_masked_4xfloat_perm_mask1(<4 x float> %vec, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_4xfloat_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %xmm3, %xmm2, %k1
; CHECK-NEXT: vpermilps {{.*#+}} xmm1 {%k1} = xmm0[1,2,3,2]
@@ -53,7 +53,7 @@ define <4 x float> @test_masked_4xfloat_perm_mask1(<4 x float> %vec, <4 x float>
define <4 x float> @test_masked_z_4xfloat_perm_mask1(<4 x float> %vec, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_4xfloat_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vpermilps {{.*#+}} xmm0 {%k1} {z} = xmm0[1,2,3,2]
@@ -65,7 +65,7 @@ define <4 x float> @test_masked_z_4xfloat_perm_mask1(<4 x float> %vec, <4 x floa
}
define <4 x float> @test_masked_4xfloat_perm_mask2(<4 x float> %vec, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_4xfloat_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %xmm3, %xmm2, %k1
; CHECK-NEXT: vpermilps {{.*#+}} xmm1 {%k1} = xmm0[1,3,2,1]
@@ -79,7 +79,7 @@ define <4 x float> @test_masked_4xfloat_perm_mask2(<4 x float> %vec, <4 x float>
define <4 x float> @test_masked_z_4xfloat_perm_mask2(<4 x float> %vec, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_4xfloat_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vpermilps {{.*#+}} xmm0 {%k1} {z} = xmm0[1,3,2,1]
@@ -91,7 +91,7 @@ define <4 x float> @test_masked_z_4xfloat_perm_mask2(<4 x float> %vec, <4 x floa
}
define <4 x float> @test_4xfloat_perm_mask3(<4 x float> %vec) {
; CHECK-LABEL: test_4xfloat_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,2,3,2]
; CHECK-NEXT: retq
%res = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 2>
@@ -99,7 +99,7 @@ define <4 x float> @test_4xfloat_perm_mask3(<4 x float> %vec) {
}
define <4 x float> @test_masked_4xfloat_perm_mask3(<4 x float> %vec, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_4xfloat_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %xmm3, %xmm2, %k1
; CHECK-NEXT: vpermilps {{.*#+}} xmm1 {%k1} = xmm0[1,2,3,2]
@@ -113,7 +113,7 @@ define <4 x float> @test_masked_4xfloat_perm_mask3(<4 x float> %vec, <4 x float>
define <4 x float> @test_masked_z_4xfloat_perm_mask3(<4 x float> %vec, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_4xfloat_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vpermilps {{.*#+}} xmm0 {%k1} {z} = xmm0[1,2,3,2]
@@ -125,7 +125,7 @@ define <4 x float> @test_masked_z_4xfloat_perm_mask3(<4 x float> %vec, <4 x floa
}
define <4 x float> @test_4xfloat_perm_mem_mask0(<4 x float>* %vp) {
; CHECK-LABEL: test_4xfloat_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = mem[3,3,1,3]
; CHECK-NEXT: retq
%vec = load <4 x float>, <4 x float>* %vp
@@ -134,7 +134,7 @@ define <4 x float> @test_4xfloat_perm_mem_mask0(<4 x float>* %vp) {
}
define <4 x float> @test_masked_4xfloat_perm_mem_mask0(<4 x float>* %vp, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_4xfloat_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vpermilps {{.*#+}} xmm0 {%k1} = mem[3,3,1,3]
@@ -148,7 +148,7 @@ define <4 x float> @test_masked_4xfloat_perm_mem_mask0(<4 x float>* %vp, <4 x fl
define <4 x float> @test_masked_z_4xfloat_perm_mem_mask0(<4 x float>* %vp, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_4xfloat_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %xmm1, %xmm0, %k1
; CHECK-NEXT: vpermilps {{.*#+}} xmm0 {%k1} {z} = mem[3,3,1,3]
@@ -162,7 +162,7 @@ define <4 x float> @test_masked_z_4xfloat_perm_mem_mask0(<4 x float>* %vp, <4 x
define <4 x float> @test_masked_4xfloat_perm_mem_mask1(<4 x float>* %vp, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_4xfloat_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vpermilps {{.*#+}} xmm0 {%k1} = mem[1,3,2,0]
@@ -176,7 +176,7 @@ define <4 x float> @test_masked_4xfloat_perm_mem_mask1(<4 x float>* %vp, <4 x fl
define <4 x float> @test_masked_z_4xfloat_perm_mem_mask1(<4 x float>* %vp, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_4xfloat_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %xmm1, %xmm0, %k1
; CHECK-NEXT: vpermilps {{.*#+}} xmm0 {%k1} {z} = mem[1,3,2,0]
@@ -190,7 +190,7 @@ define <4 x float> @test_masked_z_4xfloat_perm_mem_mask1(<4 x float>* %vp, <4 x
define <4 x float> @test_masked_4xfloat_perm_mem_mask2(<4 x float>* %vp, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_4xfloat_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vpermilps {{.*#+}} xmm0 {%k1} = mem[2,1,3,2]
@@ -204,7 +204,7 @@ define <4 x float> @test_masked_4xfloat_perm_mem_mask2(<4 x float>* %vp, <4 x fl
define <4 x float> @test_masked_z_4xfloat_perm_mem_mask2(<4 x float>* %vp, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_4xfloat_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %xmm1, %xmm0, %k1
; CHECK-NEXT: vpermilps {{.*#+}} xmm0 {%k1} {z} = mem[2,1,3,2]
@@ -218,7 +218,7 @@ define <4 x float> @test_masked_z_4xfloat_perm_mem_mask2(<4 x float>* %vp, <4 x
define <4 x float> @test_4xfloat_perm_mem_mask3(<4 x float>* %vp) {
; CHECK-LABEL: test_4xfloat_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = mem[0,1,3,0]
; CHECK-NEXT: retq
%vec = load <4 x float>, <4 x float>* %vp
@@ -227,7 +227,7 @@ define <4 x float> @test_4xfloat_perm_mem_mask3(<4 x float>* %vp) {
}
define <4 x float> @test_masked_4xfloat_perm_mem_mask3(<4 x float>* %vp, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_4xfloat_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vpermilps {{.*#+}} xmm0 {%k1} = mem[0,1,3,0]
@@ -241,7 +241,7 @@ define <4 x float> @test_masked_4xfloat_perm_mem_mask3(<4 x float>* %vp, <4 x fl
define <4 x float> @test_masked_z_4xfloat_perm_mem_mask3(<4 x float>* %vp, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_4xfloat_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %xmm1, %xmm0, %k1
; CHECK-NEXT: vpermilps {{.*#+}} xmm0 {%k1} {z} = mem[0,1,3,0]
@@ -255,7 +255,7 @@ define <4 x float> @test_masked_z_4xfloat_perm_mem_mask3(<4 x float>* %vp, <4 x
define <8 x float> @test_8xfloat_perm_mask0(<8 x float> %vec) {
; CHECK-LABEL: test_8xfloat_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,6,6,6]
; CHECK-NEXT: retq
%res = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 6, i32 6, i32 6>
@@ -263,7 +263,7 @@ define <8 x float> @test_8xfloat_perm_mask0(<8 x float> %vec) {
}
define <8 x float> @test_masked_8xfloat_perm_mask0(<8 x float> %vec, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vpermilps {{.*#+}} ymm1 {%k1} = ymm0[0,1,2,3,4,6,6,6]
@@ -277,7 +277,7 @@ define <8 x float> @test_masked_8xfloat_perm_mask0(<8 x float> %vec, <8 x float>
define <8 x float> @test_masked_z_8xfloat_perm_mask0(<8 x float> %vec, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vpermilps {{.*#+}} ymm0 {%k1} {z} = ymm0[0,1,2,3,4,6,6,6]
@@ -289,7 +289,7 @@ define <8 x float> @test_masked_z_8xfloat_perm_mask0(<8 x float> %vec, <8 x floa
}
define <8 x float> @test_masked_8xfloat_perm_imm_mask1(<8 x float> %vec, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_perm_imm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vpermilps {{.*#+}} ymm1 {%k1} = ymm0[3,2,3,2,7,6,7,6]
@@ -303,7 +303,7 @@ define <8 x float> @test_masked_8xfloat_perm_imm_mask1(<8 x float> %vec, <8 x fl
define <8 x float> @test_masked_z_8xfloat_perm_imm_mask1(<8 x float> %vec, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_perm_imm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vpermilps {{.*#+}} ymm0 {%k1} {z} = ymm0[3,2,3,2,7,6,7,6]
@@ -315,7 +315,7 @@ define <8 x float> @test_masked_z_8xfloat_perm_imm_mask1(<8 x float> %vec, <8 x
}
define <8 x float> @test_masked_8xfloat_perm_mask2(<8 x float> %vec, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vpermilps {{.*#+}} ymm1 {%k1} = ymm0[2,1,2,1,6,5,4,4]
@@ -329,7 +329,7 @@ define <8 x float> @test_masked_8xfloat_perm_mask2(<8 x float> %vec, <8 x float>
define <8 x float> @test_masked_z_8xfloat_perm_mask2(<8 x float> %vec, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vpermilps {{.*#+}} ymm0 {%k1} {z} = ymm0[2,1,2,1,6,5,4,4]
@@ -341,7 +341,7 @@ define <8 x float> @test_masked_z_8xfloat_perm_mask2(<8 x float> %vec, <8 x floa
}
define <8 x float> @test_8xfloat_perm_imm_mask3(<8 x float> %vec) {
; CHECK-LABEL: test_8xfloat_perm_imm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[2,2,1,0,6,6,5,4]
; CHECK-NEXT: retq
%res = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> <i32 2, i32 2, i32 1, i32 0, i32 6, i32 6, i32 5, i32 4>
@@ -349,7 +349,7 @@ define <8 x float> @test_8xfloat_perm_imm_mask3(<8 x float> %vec) {
}
define <8 x float> @test_masked_8xfloat_perm_imm_mask3(<8 x float> %vec, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_perm_imm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vpermilps {{.*#+}} ymm1 {%k1} = ymm0[2,2,1,0,6,6,5,4]
@@ -363,7 +363,7 @@ define <8 x float> @test_masked_8xfloat_perm_imm_mask3(<8 x float> %vec, <8 x fl
define <8 x float> @test_masked_z_8xfloat_perm_imm_mask3(<8 x float> %vec, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_perm_imm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vpermilps {{.*#+}} ymm0 {%k1} {z} = ymm0[2,2,1,0,6,6,5,4]
@@ -375,7 +375,7 @@ define <8 x float> @test_masked_z_8xfloat_perm_imm_mask3(<8 x float> %vec, <8 x
}
define <8 x float> @test_masked_8xfloat_perm_mask4(<8 x float> %vec, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_perm_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vpermilps {{.*#+}} ymm1 {%k1} = ymm0[3,3,3,3,7,7,6,5]
@@ -389,7 +389,7 @@ define <8 x float> @test_masked_8xfloat_perm_mask4(<8 x float> %vec, <8 x float>
define <8 x float> @test_masked_z_8xfloat_perm_mask4(<8 x float> %vec, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_perm_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vpermilps {{.*#+}} ymm0 {%k1} {z} = ymm0[3,3,3,3,7,7,6,5]
@@ -401,7 +401,7 @@ define <8 x float> @test_masked_z_8xfloat_perm_mask4(<8 x float> %vec, <8 x floa
}
define <8 x float> @test_masked_8xfloat_perm_imm_mask5(<8 x float> %vec, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_perm_imm_mask5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vpermilps {{.*#+}} ymm1 {%k1} = ymm0[2,1,3,3,6,5,7,7]
@@ -415,7 +415,7 @@ define <8 x float> @test_masked_8xfloat_perm_imm_mask5(<8 x float> %vec, <8 x fl
define <8 x float> @test_masked_z_8xfloat_perm_imm_mask5(<8 x float> %vec, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_perm_imm_mask5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vpermilps {{.*#+}} ymm0 {%k1} {z} = ymm0[2,1,3,3,6,5,7,7]
@@ -427,7 +427,7 @@ define <8 x float> @test_masked_z_8xfloat_perm_imm_mask5(<8 x float> %vec, <8 x
}
define <8 x float> @test_8xfloat_perm_mask6(<8 x float> %vec) {
; CHECK-LABEL: test_8xfloat_perm_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,3,2,5,6,7,7]
; CHECK-NEXT: retq
%res = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> <i32 3, i32 2, i32 3, i32 2, i32 5, i32 6, i32 7, i32 7>
@@ -435,7 +435,7 @@ define <8 x float> @test_8xfloat_perm_mask6(<8 x float> %vec) {
}
define <8 x float> @test_masked_8xfloat_perm_mask6(<8 x float> %vec, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_perm_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vpermilps {{.*#+}} ymm1 {%k1} = ymm0[3,2,3,2,5,6,7,7]
@@ -449,7 +449,7 @@ define <8 x float> @test_masked_8xfloat_perm_mask6(<8 x float> %vec, <8 x float>
define <8 x float> @test_masked_z_8xfloat_perm_mask6(<8 x float> %vec, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_perm_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vpermilps {{.*#+}} ymm0 {%k1} {z} = ymm0[3,2,3,2,5,6,7,7]
@@ -461,7 +461,7 @@ define <8 x float> @test_masked_z_8xfloat_perm_mask6(<8 x float> %vec, <8 x floa
}
define <8 x float> @test_masked_8xfloat_perm_imm_mask7(<8 x float> %vec, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_perm_imm_mask7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vpermilps {{.*#+}} ymm1 {%k1} = ymm0[3,0,2,1,7,4,6,5]
@@ -475,7 +475,7 @@ define <8 x float> @test_masked_8xfloat_perm_imm_mask7(<8 x float> %vec, <8 x fl
define <8 x float> @test_masked_z_8xfloat_perm_imm_mask7(<8 x float> %vec, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_perm_imm_mask7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vpermilps {{.*#+}} ymm0 {%k1} {z} = ymm0[3,0,2,1,7,4,6,5]
@@ -487,7 +487,7 @@ define <8 x float> @test_masked_z_8xfloat_perm_imm_mask7(<8 x float> %vec, <8 x
}
define <8 x float> @test_8xfloat_perm_mem_mask0(<8 x float>* %vp) {
; CHECK-LABEL: test_8xfloat_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %ymm0
; CHECK-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,0,0,2,4,6,7,6]
; CHECK-NEXT: retq
@@ -497,7 +497,7 @@ define <8 x float> @test_8xfloat_perm_mem_mask0(<8 x float>* %vp) {
}
define <8 x float> @test_masked_8xfloat_perm_mem_mask0(<8 x float>* %vp, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %ymm2
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm1, %k1
@@ -512,7 +512,7 @@ define <8 x float> @test_masked_8xfloat_perm_mem_mask0(<8 x float>* %vp, <8 x fl
define <8 x float> @test_masked_z_8xfloat_perm_mem_mask0(<8 x float>* %vp, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %ymm1
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm0, %k1
@@ -527,7 +527,7 @@ define <8 x float> @test_masked_z_8xfloat_perm_mem_mask0(<8 x float>* %vp, <8 x
define <8 x float> @test_masked_8xfloat_perm_imm_mem_mask1(<8 x float>* %vp, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_perm_imm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vpermilps {{.*#+}} ymm0 {%k1} = mem[2,0,2,2,6,4,6,6]
@@ -541,7 +541,7 @@ define <8 x float> @test_masked_8xfloat_perm_imm_mem_mask1(<8 x float>* %vp, <8
define <8 x float> @test_masked_z_8xfloat_perm_imm_mem_mask1(<8 x float>* %vp, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_perm_imm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %ymm1, %ymm0, %k1
; CHECK-NEXT: vpermilps {{.*#+}} ymm0 {%k1} {z} = mem[2,0,2,2,6,4,6,6]
@@ -555,7 +555,7 @@ define <8 x float> @test_masked_z_8xfloat_perm_imm_mem_mask1(<8 x float>* %vp, <
define <8 x float> @test_masked_8xfloat_perm_mem_mask2(<8 x float>* %vp, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %ymm2
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm1, %k1
@@ -570,7 +570,7 @@ define <8 x float> @test_masked_8xfloat_perm_mem_mask2(<8 x float>* %vp, <8 x fl
define <8 x float> @test_masked_z_8xfloat_perm_mem_mask2(<8 x float>* %vp, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %ymm1
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm0, %k1
@@ -585,7 +585,7 @@ define <8 x float> @test_masked_z_8xfloat_perm_mem_mask2(<8 x float>* %vp, <8 x
define <8 x float> @test_8xfloat_perm_imm_mem_mask3(<8 x float>* %vp) {
; CHECK-LABEL: test_8xfloat_perm_imm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermilps {{.*#+}} ymm0 = mem[0,0,3,3,4,4,7,7]
; CHECK-NEXT: retq
%vec = load <8 x float>, <8 x float>* %vp
@@ -594,7 +594,7 @@ define <8 x float> @test_8xfloat_perm_imm_mem_mask3(<8 x float>* %vp) {
}
define <8 x float> @test_masked_8xfloat_perm_imm_mem_mask3(<8 x float>* %vp, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_perm_imm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vpermilps {{.*#+}} ymm0 {%k1} = mem[0,0,3,3,4,4,7,7]
@@ -608,7 +608,7 @@ define <8 x float> @test_masked_8xfloat_perm_imm_mem_mask3(<8 x float>* %vp, <8
define <8 x float> @test_masked_z_8xfloat_perm_imm_mem_mask3(<8 x float>* %vp, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_perm_imm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %ymm1, %ymm0, %k1
; CHECK-NEXT: vpermilps {{.*#+}} ymm0 {%k1} {z} = mem[0,0,3,3,4,4,7,7]
@@ -622,7 +622,7 @@ define <8 x float> @test_masked_z_8xfloat_perm_imm_mem_mask3(<8 x float>* %vp, <
define <8 x float> @test_masked_8xfloat_perm_mem_mask4(<8 x float>* %vp, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_perm_mem_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %ymm2
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm1, %k1
@@ -637,7 +637,7 @@ define <8 x float> @test_masked_8xfloat_perm_mem_mask4(<8 x float>* %vp, <8 x fl
define <8 x float> @test_masked_z_8xfloat_perm_mem_mask4(<8 x float>* %vp, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_perm_mem_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %ymm1
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm0, %k1
@@ -652,7 +652,7 @@ define <8 x float> @test_masked_z_8xfloat_perm_mem_mask4(<8 x float>* %vp, <8 x
define <8 x float> @test_masked_8xfloat_perm_imm_mem_mask5(<8 x float>* %vp, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_perm_imm_mem_mask5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vpermilps {{.*#+}} ymm0 {%k1} = mem[2,0,0,3,6,4,4,7]
@@ -666,7 +666,7 @@ define <8 x float> @test_masked_8xfloat_perm_imm_mem_mask5(<8 x float>* %vp, <8
define <8 x float> @test_masked_z_8xfloat_perm_imm_mem_mask5(<8 x float>* %vp, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_perm_imm_mem_mask5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %ymm1, %ymm0, %k1
; CHECK-NEXT: vpermilps {{.*#+}} ymm0 {%k1} {z} = mem[2,0,0,3,6,4,4,7]
@@ -680,7 +680,7 @@ define <8 x float> @test_masked_z_8xfloat_perm_imm_mem_mask5(<8 x float>* %vp, <
define <8 x float> @test_8xfloat_perm_mem_mask6(<8 x float>* %vp) {
; CHECK-LABEL: test_8xfloat_perm_mem_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %ymm0
; CHECK-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,1,2,3,7,4,6,7]
; CHECK-NEXT: retq
@@ -690,7 +690,7 @@ define <8 x float> @test_8xfloat_perm_mem_mask6(<8 x float>* %vp) {
}
define <8 x float> @test_masked_8xfloat_perm_mem_mask6(<8 x float>* %vp, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_perm_mem_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %ymm2
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm1, %k1
@@ -705,7 +705,7 @@ define <8 x float> @test_masked_8xfloat_perm_mem_mask6(<8 x float>* %vp, <8 x fl
define <8 x float> @test_masked_z_8xfloat_perm_mem_mask6(<8 x float>* %vp, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_perm_mem_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %ymm1
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm0, %k1
@@ -720,7 +720,7 @@ define <8 x float> @test_masked_z_8xfloat_perm_mem_mask6(<8 x float>* %vp, <8 x
define <8 x float> @test_masked_8xfloat_perm_imm_mem_mask7(<8 x float>* %vp, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_perm_imm_mem_mask7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vpermilps {{.*#+}} ymm0 {%k1} = mem[0,2,3,1,4,6,7,5]
@@ -734,7 +734,7 @@ define <8 x float> @test_masked_8xfloat_perm_imm_mem_mask7(<8 x float>* %vp, <8
define <8 x float> @test_masked_z_8xfloat_perm_imm_mem_mask7(<8 x float>* %vp, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_perm_imm_mem_mask7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %ymm1, %ymm0, %k1
; CHECK-NEXT: vpermilps {{.*#+}} ymm0 {%k1} {z} = mem[0,2,3,1,4,6,7,5]
@@ -748,7 +748,7 @@ define <8 x float> @test_masked_z_8xfloat_perm_imm_mem_mask7(<8 x float>* %vp, <
define <16 x float> @test_16xfloat_perm_mask0(<16 x float> %vec) {
; CHECK-LABEL: test_16xfloat_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermilps {{.*#+}} zmm0 = zmm0[1,1,3,1,6,4,6,5,8,9,8,11,13,13,13,15]
; CHECK-NEXT: retq
%res = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> <i32 1, i32 1, i32 3, i32 1, i32 6, i32 4, i32 6, i32 5, i32 8, i32 9, i32 8, i32 11, i32 13, i32 13, i32 13, i32 15>
@@ -756,7 +756,7 @@ define <16 x float> @test_16xfloat_perm_mask0(<16 x float> %vec) {
}
define <16 x float> @test_masked_16xfloat_perm_mask0(<16 x float> %vec, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vpermilps {{.*#+}} zmm1 {%k1} = zmm0[1,1,3,1,6,4,6,5,8,9,8,11,13,13,13,15]
@@ -770,7 +770,7 @@ define <16 x float> @test_masked_16xfloat_perm_mask0(<16 x float> %vec, <16 x fl
define <16 x float> @test_masked_z_16xfloat_perm_mask0(<16 x float> %vec, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vpermilps {{.*#+}} zmm0 {%k1} {z} = zmm0[1,1,3,1,6,4,6,5,8,9,8,11,13,13,13,15]
@@ -782,7 +782,7 @@ define <16 x float> @test_masked_z_16xfloat_perm_mask0(<16 x float> %vec, <16 x
}
define <16 x float> @test_masked_16xfloat_perm_imm_mask1(<16 x float> %vec, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_perm_imm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vpermilps {{.*#+}} zmm1 {%k1} = zmm0[2,2,2,1,6,6,6,5,10,10,10,9,14,14,14,13]
@@ -796,7 +796,7 @@ define <16 x float> @test_masked_16xfloat_perm_imm_mask1(<16 x float> %vec, <16
define <16 x float> @test_masked_z_16xfloat_perm_imm_mask1(<16 x float> %vec, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_perm_imm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vpermilps {{.*#+}} zmm0 {%k1} {z} = zmm0[2,2,2,1,6,6,6,5,10,10,10,9,14,14,14,13]
@@ -808,7 +808,7 @@ define <16 x float> @test_masked_z_16xfloat_perm_imm_mask1(<16 x float> %vec, <1
}
define <16 x float> @test_masked_16xfloat_perm_mask2(<16 x float> %vec, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vpermilps {{.*#+}} zmm1 {%k1} = zmm0[1,2,0,0,5,4,6,5,11,10,9,9,14,13,14,12]
@@ -822,7 +822,7 @@ define <16 x float> @test_masked_16xfloat_perm_mask2(<16 x float> %vec, <16 x fl
define <16 x float> @test_masked_z_16xfloat_perm_mask2(<16 x float> %vec, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vpermilps {{.*#+}} zmm0 {%k1} {z} = zmm0[1,2,0,0,5,4,6,5,11,10,9,9,14,13,14,12]
@@ -834,7 +834,7 @@ define <16 x float> @test_masked_z_16xfloat_perm_mask2(<16 x float> %vec, <16 x
}
define <16 x float> @test_16xfloat_perm_imm_mask3(<16 x float> %vec) {
; CHECK-LABEL: test_16xfloat_perm_imm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermilps {{.*#+}} zmm0 = zmm0[1,1,0,2,5,5,4,6,9,9,8,10,13,13,12,14]
; CHECK-NEXT: retq
%res = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> <i32 1, i32 1, i32 0, i32 2, i32 5, i32 5, i32 4, i32 6, i32 9, i32 9, i32 8, i32 10, i32 13, i32 13, i32 12, i32 14>
@@ -842,7 +842,7 @@ define <16 x float> @test_16xfloat_perm_imm_mask3(<16 x float> %vec) {
}
define <16 x float> @test_masked_16xfloat_perm_imm_mask3(<16 x float> %vec, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_perm_imm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vpermilps {{.*#+}} zmm1 {%k1} = zmm0[1,1,0,2,5,5,4,6,9,9,8,10,13,13,12,14]
@@ -856,7 +856,7 @@ define <16 x float> @test_masked_16xfloat_perm_imm_mask3(<16 x float> %vec, <16
define <16 x float> @test_masked_z_16xfloat_perm_imm_mask3(<16 x float> %vec, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_perm_imm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vpermilps {{.*#+}} zmm0 {%k1} {z} = zmm0[1,1,0,2,5,5,4,6,9,9,8,10,13,13,12,14]
@@ -868,7 +868,7 @@ define <16 x float> @test_masked_z_16xfloat_perm_imm_mask3(<16 x float> %vec, <1
}
define <16 x float> @test_masked_16xfloat_perm_mask4(<16 x float> %vec, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_perm_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vpermilps {{.*#+}} zmm1 {%k1} = zmm0[1,2,3,3,5,5,5,7,11,11,8,11,14,12,14,15]
@@ -882,7 +882,7 @@ define <16 x float> @test_masked_16xfloat_perm_mask4(<16 x float> %vec, <16 x fl
define <16 x float> @test_masked_z_16xfloat_perm_mask4(<16 x float> %vec, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_perm_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vpermilps {{.*#+}} zmm0 {%k1} {z} = zmm0[1,2,3,3,5,5,5,7,11,11,8,11,14,12,14,15]
@@ -894,7 +894,7 @@ define <16 x float> @test_masked_z_16xfloat_perm_mask4(<16 x float> %vec, <16 x
}
define <16 x float> @test_masked_16xfloat_perm_imm_mask5(<16 x float> %vec, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_perm_imm_mask5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vpermilps {{.*#+}} zmm1 {%k1} = zmm0[1,2,1,0,5,6,5,4,9,10,9,8,13,14,13,12]
@@ -908,7 +908,7 @@ define <16 x float> @test_masked_16xfloat_perm_imm_mask5(<16 x float> %vec, <16
define <16 x float> @test_masked_z_16xfloat_perm_imm_mask5(<16 x float> %vec, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_perm_imm_mask5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vpermilps {{.*#+}} zmm0 {%k1} {z} = zmm0[1,2,1,0,5,6,5,4,9,10,9,8,13,14,13,12]
@@ -920,7 +920,7 @@ define <16 x float> @test_masked_z_16xfloat_perm_imm_mask5(<16 x float> %vec, <1
}
define <16 x float> @test_16xfloat_perm_mask6(<16 x float> %vec) {
; CHECK-LABEL: test_16xfloat_perm_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermilps {{.*#+}} zmm0 = zmm0[2,0,3,2,4,4,6,7,9,11,8,11,13,12,13,13]
; CHECK-NEXT: retq
%res = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> <i32 2, i32 0, i32 3, i32 2, i32 4, i32 4, i32 6, i32 7, i32 9, i32 11, i32 8, i32 11, i32 13, i32 12, i32 13, i32 13>
@@ -928,7 +928,7 @@ define <16 x float> @test_16xfloat_perm_mask6(<16 x float> %vec) {
}
define <16 x float> @test_masked_16xfloat_perm_mask6(<16 x float> %vec, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_perm_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vpermilps {{.*#+}} zmm1 {%k1} = zmm0[2,0,3,2,4,4,6,7,9,11,8,11,13,12,13,13]
@@ -942,7 +942,7 @@ define <16 x float> @test_masked_16xfloat_perm_mask6(<16 x float> %vec, <16 x fl
define <16 x float> @test_masked_z_16xfloat_perm_mask6(<16 x float> %vec, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_perm_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vpermilps {{.*#+}} zmm0 {%k1} {z} = zmm0[2,0,3,2,4,4,6,7,9,11,8,11,13,12,13,13]
@@ -954,7 +954,7 @@ define <16 x float> @test_masked_z_16xfloat_perm_mask6(<16 x float> %vec, <16 x
}
define <16 x float> @test_masked_16xfloat_perm_imm_mask7(<16 x float> %vec, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_perm_imm_mask7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vpermilps {{.*#+}} zmm1 {%k1} = zmm0[3,3,0,2,7,7,4,6,11,11,8,10,15,15,12,14]
@@ -968,7 +968,7 @@ define <16 x float> @test_masked_16xfloat_perm_imm_mask7(<16 x float> %vec, <16
define <16 x float> @test_masked_z_16xfloat_perm_imm_mask7(<16 x float> %vec, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_perm_imm_mask7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vpermilps {{.*#+}} zmm0 {%k1} {z} = zmm0[3,3,0,2,7,7,4,6,11,11,8,10,15,15,12,14]
@@ -980,7 +980,7 @@ define <16 x float> @test_masked_z_16xfloat_perm_imm_mask7(<16 x float> %vec, <1
}
define <16 x float> @test_16xfloat_perm_mem_mask0(<16 x float>* %vp) {
; CHECK-LABEL: test_16xfloat_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %zmm0
; CHECK-NEXT: vpermilps {{.*#+}} zmm0 = zmm0[3,3,3,0,6,6,6,6,11,10,9,10,12,14,12,12]
; CHECK-NEXT: retq
@@ -990,7 +990,7 @@ define <16 x float> @test_16xfloat_perm_mem_mask0(<16 x float>* %vp) {
}
define <16 x float> @test_masked_16xfloat_perm_mem_mask0(<16 x float>* %vp, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %zmm2
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm1, %k1
@@ -1005,7 +1005,7 @@ define <16 x float> @test_masked_16xfloat_perm_mem_mask0(<16 x float>* %vp, <16
define <16 x float> @test_masked_z_16xfloat_perm_mem_mask0(<16 x float>* %vp, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %zmm1
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm0, %k1
@@ -1020,7 +1020,7 @@ define <16 x float> @test_masked_z_16xfloat_perm_mem_mask0(<16 x float>* %vp, <1
define <16 x float> @test_masked_16xfloat_perm_imm_mem_mask1(<16 x float>* %vp, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_perm_imm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vpermilps {{.*#+}} zmm0 {%k1} = mem[1,3,2,1,5,7,6,5,9,11,10,9,13,15,14,13]
@@ -1034,7 +1034,7 @@ define <16 x float> @test_masked_16xfloat_perm_imm_mem_mask1(<16 x float>* %vp,
define <16 x float> @test_masked_z_16xfloat_perm_imm_mem_mask1(<16 x float>* %vp, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_perm_imm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %zmm1, %zmm0, %k1
; CHECK-NEXT: vpermilps {{.*#+}} zmm0 {%k1} {z} = mem[1,3,2,1,5,7,6,5,9,11,10,9,13,15,14,13]
@@ -1048,7 +1048,7 @@ define <16 x float> @test_masked_z_16xfloat_perm_imm_mem_mask1(<16 x float>* %vp
define <16 x float> @test_masked_16xfloat_perm_mem_mask2(<16 x float>* %vp, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %zmm2
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm1, %k1
@@ -1063,7 +1063,7 @@ define <16 x float> @test_masked_16xfloat_perm_mem_mask2(<16 x float>* %vp, <16
define <16 x float> @test_masked_z_16xfloat_perm_mem_mask2(<16 x float>* %vp, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %zmm1
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm0, %k1
@@ -1078,7 +1078,7 @@ define <16 x float> @test_masked_z_16xfloat_perm_mem_mask2(<16 x float>* %vp, <1
define <16 x float> @test_16xfloat_perm_imm_mem_mask3(<16 x float>* %vp) {
; CHECK-LABEL: test_16xfloat_perm_imm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermilps {{.*#+}} zmm0 = mem[1,0,3,1,5,4,7,5,9,8,11,9,13,12,15,13]
; CHECK-NEXT: retq
%vec = load <16 x float>, <16 x float>* %vp
@@ -1087,7 +1087,7 @@ define <16 x float> @test_16xfloat_perm_imm_mem_mask3(<16 x float>* %vp) {
}
define <16 x float> @test_masked_16xfloat_perm_imm_mem_mask3(<16 x float>* %vp, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_perm_imm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vpermilps {{.*#+}} zmm0 {%k1} = mem[1,0,3,1,5,4,7,5,9,8,11,9,13,12,15,13]
@@ -1101,7 +1101,7 @@ define <16 x float> @test_masked_16xfloat_perm_imm_mem_mask3(<16 x float>* %vp,
define <16 x float> @test_masked_z_16xfloat_perm_imm_mem_mask3(<16 x float>* %vp, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_perm_imm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %zmm1, %zmm0, %k1
; CHECK-NEXT: vpermilps {{.*#+}} zmm0 {%k1} {z} = mem[1,0,3,1,5,4,7,5,9,8,11,9,13,12,15,13]
@@ -1115,7 +1115,7 @@ define <16 x float> @test_masked_z_16xfloat_perm_imm_mem_mask3(<16 x float>* %vp
define <16 x float> @test_masked_16xfloat_perm_mem_mask4(<16 x float>* %vp, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_perm_mem_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %zmm2
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm1, %k1
@@ -1130,7 +1130,7 @@ define <16 x float> @test_masked_16xfloat_perm_mem_mask4(<16 x float>* %vp, <16
define <16 x float> @test_masked_z_16xfloat_perm_mem_mask4(<16 x float>* %vp, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_perm_mem_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %zmm1
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm0, %k1
@@ -1145,7 +1145,7 @@ define <16 x float> @test_masked_z_16xfloat_perm_mem_mask4(<16 x float>* %vp, <1
define <16 x float> @test_masked_16xfloat_perm_imm_mem_mask5(<16 x float>* %vp, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_perm_imm_mem_mask5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vpermilps {{.*#+}} zmm0 {%k1} = mem[2,0,0,1,6,4,4,5,10,8,8,9,14,12,12,13]
@@ -1159,7 +1159,7 @@ define <16 x float> @test_masked_16xfloat_perm_imm_mem_mask5(<16 x float>* %vp,
define <16 x float> @test_masked_z_16xfloat_perm_imm_mem_mask5(<16 x float>* %vp, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_perm_imm_mem_mask5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %zmm1, %zmm0, %k1
; CHECK-NEXT: vpermilps {{.*#+}} zmm0 {%k1} {z} = mem[2,0,0,1,6,4,4,5,10,8,8,9,14,12,12,13]
@@ -1173,7 +1173,7 @@ define <16 x float> @test_masked_z_16xfloat_perm_imm_mem_mask5(<16 x float>* %vp
define <16 x float> @test_16xfloat_perm_mem_mask6(<16 x float>* %vp) {
; CHECK-LABEL: test_16xfloat_perm_mem_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %zmm0
; CHECK-NEXT: vpermilps {{.*#+}} zmm0 = zmm0[2,1,1,2,6,5,5,7,9,11,9,9,12,15,14,15]
; CHECK-NEXT: retq
@@ -1183,7 +1183,7 @@ define <16 x float> @test_16xfloat_perm_mem_mask6(<16 x float>* %vp) {
}
define <16 x float> @test_masked_16xfloat_perm_mem_mask6(<16 x float>* %vp, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_perm_mem_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %zmm2
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm1, %k1
@@ -1198,7 +1198,7 @@ define <16 x float> @test_masked_16xfloat_perm_mem_mask6(<16 x float>* %vp, <16
define <16 x float> @test_masked_z_16xfloat_perm_mem_mask6(<16 x float>* %vp, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_perm_mem_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %zmm1
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm0, %k1
@@ -1213,7 +1213,7 @@ define <16 x float> @test_masked_z_16xfloat_perm_mem_mask6(<16 x float>* %vp, <1
define <16 x float> @test_masked_16xfloat_perm_imm_mem_mask7(<16 x float>* %vp, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_perm_imm_mem_mask7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vpermilps {{.*#+}} zmm0 {%k1} = mem[1,2,0,1,5,6,4,5,9,10,8,9,13,14,12,13]
@@ -1227,7 +1227,7 @@ define <16 x float> @test_masked_16xfloat_perm_imm_mem_mask7(<16 x float>* %vp,
define <16 x float> @test_masked_z_16xfloat_perm_imm_mem_mask7(<16 x float>* %vp, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_perm_imm_mem_mask7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqps %zmm1, %zmm0, %k1
; CHECK-NEXT: vpermilps {{.*#+}} zmm0 {%k1} {z} = mem[1,2,0,1,5,6,4,5,9,10,8,9,13,14,12,13]
@@ -1241,7 +1241,7 @@ define <16 x float> @test_masked_z_16xfloat_perm_imm_mem_mask7(<16 x float>* %vp
define <2 x double> @test_2xdouble_perm_mask0(<2 x double> %vec) {
; CHECK-LABEL: test_2xdouble_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; CHECK-NEXT: retq
%res = shufflevector <2 x double> %vec, <2 x double> undef, <2 x i32> <i32 1, i32 0>
@@ -1249,7 +1249,7 @@ define <2 x double> @test_2xdouble_perm_mask0(<2 x double> %vec) {
}
define <2 x double> @test_masked_2xdouble_perm_mask0(<2 x double> %vec, <2 x double> %vec2, <2 x double> %mask) {
; CHECK-LABEL: test_masked_2xdouble_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %xmm3, %xmm2, %k1
; CHECK-NEXT: vpermilpd {{.*#+}} xmm1 {%k1} = xmm0[1,0]
@@ -1263,7 +1263,7 @@ define <2 x double> @test_masked_2xdouble_perm_mask0(<2 x double> %vec, <2 x dou
define <2 x double> @test_masked_z_2xdouble_perm_mask0(<2 x double> %vec, <2 x double> %mask) {
; CHECK-LABEL: test_masked_z_2xdouble_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %xmm2, %xmm1, %k1
; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 {%k1} {z} = xmm0[1,0]
@@ -1275,7 +1275,7 @@ define <2 x double> @test_masked_z_2xdouble_perm_mask0(<2 x double> %vec, <2 x d
}
define <2 x double> @test_masked_2xdouble_perm_mask1(<2 x double> %vec, <2 x double> %vec2, <2 x double> %mask) {
; CHECK-LABEL: test_masked_2xdouble_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %xmm3, %xmm2, %k1
; CHECK-NEXT: vpermilpd {{.*#+}} xmm1 {%k1} = xmm0[1,0]
@@ -1289,7 +1289,7 @@ define <2 x double> @test_masked_2xdouble_perm_mask1(<2 x double> %vec, <2 x dou
define <2 x double> @test_masked_z_2xdouble_perm_mask1(<2 x double> %vec, <2 x double> %mask) {
; CHECK-LABEL: test_masked_z_2xdouble_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %xmm2, %xmm1, %k1
; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 {%k1} {z} = xmm0[1,0]
@@ -1301,7 +1301,7 @@ define <2 x double> @test_masked_z_2xdouble_perm_mask1(<2 x double> %vec, <2 x d
}
define <2 x double> @test_2xdouble_perm_mem_mask0(<2 x double>* %vp) {
; CHECK-LABEL: test_2xdouble_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = mem[1,0]
; CHECK-NEXT: retq
%vec = load <2 x double>, <2 x double>* %vp
@@ -1310,7 +1310,7 @@ define <2 x double> @test_2xdouble_perm_mem_mask0(<2 x double>* %vp) {
}
define <2 x double> @test_masked_2xdouble_perm_mem_mask0(<2 x double>* %vp, <2 x double> %vec2, <2 x double> %mask) {
; CHECK-LABEL: test_masked_2xdouble_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %xmm2, %xmm1, %k1
; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 {%k1} = mem[1,0]
@@ -1324,7 +1324,7 @@ define <2 x double> @test_masked_2xdouble_perm_mem_mask0(<2 x double>* %vp, <2 x
define <2 x double> @test_masked_z_2xdouble_perm_mem_mask0(<2 x double>* %vp, <2 x double> %mask) {
; CHECK-LABEL: test_masked_z_2xdouble_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %xmm1, %xmm0, %k1
; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 {%k1} {z} = mem[1,0]
@@ -1338,7 +1338,7 @@ define <2 x double> @test_masked_z_2xdouble_perm_mem_mask0(<2 x double>* %vp, <2
define <2 x double> @test_masked_2xdouble_perm_mem_mask1(<2 x double>* %vp, <2 x double> %vec2, <2 x double> %mask) {
; CHECK-LABEL: test_masked_2xdouble_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %xmm2, %xmm1, %k1
; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 {%k1} = mem[1,0]
@@ -1352,7 +1352,7 @@ define <2 x double> @test_masked_2xdouble_perm_mem_mask1(<2 x double>* %vp, <2 x
define <2 x double> @test_masked_z_2xdouble_perm_mem_mask1(<2 x double>* %vp, <2 x double> %mask) {
; CHECK-LABEL: test_masked_z_2xdouble_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %xmm1, %xmm0, %k1
; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 {%k1} {z} = mem[1,0]
@@ -1366,7 +1366,7 @@ define <2 x double> @test_masked_z_2xdouble_perm_mem_mask1(<2 x double>* %vp, <2
define <4 x double> @test_4xdouble_perm_mask0(<4 x double> %vec) {
; CHECK-LABEL: test_4xdouble_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,3]
; CHECK-NEXT: retq
%res = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> <i32 1, i32 0, i32 2, i32 3>
@@ -1374,7 +1374,7 @@ define <4 x double> @test_4xdouble_perm_mask0(<4 x double> %vec) {
}
define <4 x double> @test_masked_4xdouble_perm_mask0(<4 x double> %vec, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_masked_4xdouble_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vpermilpd {{.*#+}} ymm1 {%k1} = ymm0[1,0,2,3]
@@ -1388,7 +1388,7 @@ define <4 x double> @test_masked_4xdouble_perm_mask0(<4 x double> %vec, <4 x dou
define <4 x double> @test_masked_z_4xdouble_perm_mask0(<4 x double> %vec, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_4xdouble_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vpermilpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1,0,2,3]
@@ -1400,7 +1400,7 @@ define <4 x double> @test_masked_z_4xdouble_perm_mask0(<4 x double> %vec, <4 x d
}
define <4 x double> @test_masked_4xdouble_perm_mask1(<4 x double> %vec, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_masked_4xdouble_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vpermilpd {{.*#+}} ymm1 {%k1} = ymm0[1,1,2,2]
@@ -1414,7 +1414,7 @@ define <4 x double> @test_masked_4xdouble_perm_mask1(<4 x double> %vec, <4 x dou
define <4 x double> @test_masked_z_4xdouble_perm_mask1(<4 x double> %vec, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_4xdouble_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vpermilpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1,1,2,2]
@@ -1426,7 +1426,7 @@ define <4 x double> @test_masked_z_4xdouble_perm_mask1(<4 x double> %vec, <4 x d
}
define <4 x double> @test_masked_4xdouble_perm_mask2(<4 x double> %vec, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_masked_4xdouble_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vpermilpd {{.*#+}} ymm1 {%k1} = ymm0[0,1,3,3]
@@ -1440,7 +1440,7 @@ define <4 x double> @test_masked_4xdouble_perm_mask2(<4 x double> %vec, <4 x dou
define <4 x double> @test_masked_z_4xdouble_perm_mask2(<4 x double> %vec, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_4xdouble_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vpermilpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0,1,3,3]
@@ -1452,7 +1452,7 @@ define <4 x double> @test_masked_z_4xdouble_perm_mask2(<4 x double> %vec, <4 x d
}
define <4 x double> @test_4xdouble_perm_mask3(<4 x double> %vec) {
; CHECK-LABEL: test_4xdouble_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
; CHECK-NEXT: retq
%res = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> <i32 1, i32 1, i32 2, i32 2>
@@ -1460,7 +1460,7 @@ define <4 x double> @test_4xdouble_perm_mask3(<4 x double> %vec) {
}
define <4 x double> @test_masked_4xdouble_perm_mask3(<4 x double> %vec, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_masked_4xdouble_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vpermilpd {{.*#+}} ymm1 {%k1} = ymm0[1,1,2,2]
@@ -1474,7 +1474,7 @@ define <4 x double> @test_masked_4xdouble_perm_mask3(<4 x double> %vec, <4 x dou
define <4 x double> @test_masked_z_4xdouble_perm_mask3(<4 x double> %vec, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_4xdouble_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vpermilpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1,1,2,2]
@@ -1486,7 +1486,7 @@ define <4 x double> @test_masked_z_4xdouble_perm_mask3(<4 x double> %vec, <4 x d
}
define <4 x double> @test_4xdouble_perm_mem_mask0(<4 x double>* %vp) {
; CHECK-LABEL: test_4xdouble_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermilpd {{.*#+}} ymm0 = mem[0,1,2,2]
; CHECK-NEXT: retq
%vec = load <4 x double>, <4 x double>* %vp
@@ -1495,7 +1495,7 @@ define <4 x double> @test_4xdouble_perm_mem_mask0(<4 x double>* %vp) {
}
define <4 x double> @test_masked_4xdouble_perm_mem_mask0(<4 x double>* %vp, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_masked_4xdouble_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vpermilpd {{.*#+}} ymm0 {%k1} = mem[0,1,2,2]
@@ -1509,7 +1509,7 @@ define <4 x double> @test_masked_4xdouble_perm_mem_mask0(<4 x double>* %vp, <4 x
define <4 x double> @test_masked_z_4xdouble_perm_mem_mask0(<4 x double>* %vp, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_4xdouble_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %ymm1, %ymm0, %k1
; CHECK-NEXT: vpermilpd {{.*#+}} ymm0 {%k1} {z} = mem[0,1,2,2]
@@ -1523,7 +1523,7 @@ define <4 x double> @test_masked_z_4xdouble_perm_mem_mask0(<4 x double>* %vp, <4
define <4 x double> @test_masked_4xdouble_perm_mem_mask1(<4 x double>* %vp, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_masked_4xdouble_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vpermilpd {{.*#+}} ymm0 {%k1} = mem[0,1,3,3]
@@ -1537,7 +1537,7 @@ define <4 x double> @test_masked_4xdouble_perm_mem_mask1(<4 x double>* %vp, <4 x
define <4 x double> @test_masked_z_4xdouble_perm_mem_mask1(<4 x double>* %vp, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_4xdouble_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %ymm1, %ymm0, %k1
; CHECK-NEXT: vpermilpd {{.*#+}} ymm0 {%k1} {z} = mem[0,1,3,3]
@@ -1551,7 +1551,7 @@ define <4 x double> @test_masked_z_4xdouble_perm_mem_mask1(<4 x double>* %vp, <4
define <4 x double> @test_masked_4xdouble_perm_mem_mask2(<4 x double>* %vp, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_masked_4xdouble_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vpermilpd {{.*#+}} ymm0 {%k1} = mem[1,0,3,3]
@@ -1565,7 +1565,7 @@ define <4 x double> @test_masked_4xdouble_perm_mem_mask2(<4 x double>* %vp, <4 x
define <4 x double> @test_masked_z_4xdouble_perm_mem_mask2(<4 x double>* %vp, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_4xdouble_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %ymm1, %ymm0, %k1
; CHECK-NEXT: vpermilpd {{.*#+}} ymm0 {%k1} {z} = mem[1,0,3,3]
@@ -1579,7 +1579,7 @@ define <4 x double> @test_masked_z_4xdouble_perm_mem_mask2(<4 x double>* %vp, <4
define <4 x double> @test_4xdouble_perm_mem_mask3(<4 x double>* %vp) {
; CHECK-LABEL: test_4xdouble_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermilpd {{.*#+}} ymm0 = mem[1,0,3,2]
; CHECK-NEXT: retq
%vec = load <4 x double>, <4 x double>* %vp
@@ -1588,7 +1588,7 @@ define <4 x double> @test_4xdouble_perm_mem_mask3(<4 x double>* %vp) {
}
define <4 x double> @test_masked_4xdouble_perm_mem_mask3(<4 x double>* %vp, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_masked_4xdouble_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vpermilpd {{.*#+}} ymm0 {%k1} = mem[1,0,3,2]
@@ -1602,7 +1602,7 @@ define <4 x double> @test_masked_4xdouble_perm_mem_mask3(<4 x double>* %vp, <4 x
define <4 x double> @test_masked_z_4xdouble_perm_mem_mask3(<4 x double>* %vp, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_4xdouble_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %ymm1, %ymm0, %k1
; CHECK-NEXT: vpermilpd {{.*#+}} ymm0 {%k1} {z} = mem[1,0,3,2]
@@ -1616,7 +1616,7 @@ define <4 x double> @test_masked_z_4xdouble_perm_mem_mask3(<4 x double>* %vp, <4
define <8 x double> @test_8xdouble_perm_mask0(<8 x double> %vec) {
; CHECK-LABEL: test_8xdouble_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermilpd {{.*#+}} zmm0 = zmm0[0,0,3,2,4,5,7,6]
; CHECK-NEXT: retq
%res = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> <i32 0, i32 0, i32 3, i32 2, i32 4, i32 5, i32 7, i32 6>
@@ -1624,7 +1624,7 @@ define <8 x double> @test_8xdouble_perm_mask0(<8 x double> %vec) {
}
define <8 x double> @test_masked_8xdouble_perm_mask0(<8 x double> %vec, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vpermilpd {{.*#+}} zmm1 {%k1} = zmm0[0,0,3,2,4,5,7,6]
@@ -1638,7 +1638,7 @@ define <8 x double> @test_masked_8xdouble_perm_mask0(<8 x double> %vec, <8 x dou
define <8 x double> @test_masked_z_8xdouble_perm_mask0(<8 x double> %vec, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpermilpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,3,2,4,5,7,6]
@@ -1650,7 +1650,7 @@ define <8 x double> @test_masked_z_8xdouble_perm_mask0(<8 x double> %vec, <8 x d
}
define <8 x double> @test_masked_8xdouble_perm_mask1(<8 x double> %vec, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vpermilpd {{.*#+}} zmm1 {%k1} = zmm0[0,1,2,3,4,4,7,6]
@@ -1664,7 +1664,7 @@ define <8 x double> @test_masked_8xdouble_perm_mask1(<8 x double> %vec, <8 x dou
define <8 x double> @test_masked_z_8xdouble_perm_mask1(<8 x double> %vec, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpermilpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3,4,4,7,6]
@@ -1676,7 +1676,7 @@ define <8 x double> @test_masked_z_8xdouble_perm_mask1(<8 x double> %vec, <8 x d
}
define <8 x double> @test_masked_8xdouble_perm_mask2(<8 x double> %vec, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vpermilpd {{.*#+}} zmm1 {%k1} = zmm0[0,0,2,3,5,5,6,7]
@@ -1690,7 +1690,7 @@ define <8 x double> @test_masked_8xdouble_perm_mask2(<8 x double> %vec, <8 x dou
define <8 x double> @test_masked_z_8xdouble_perm_mask2(<8 x double> %vec, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpermilpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,3,5,5,6,7]
@@ -1702,7 +1702,7 @@ define <8 x double> @test_masked_z_8xdouble_perm_mask2(<8 x double> %vec, <8 x d
}
define <8 x double> @test_8xdouble_perm_mask3(<8 x double> %vec) {
; CHECK-LABEL: test_8xdouble_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermilpd {{.*#+}} zmm0 = zmm0[0,1,2,2,4,4,6,7]
; CHECK-NEXT: retq
%res = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 2, i32 4, i32 4, i32 6, i32 7>
@@ -1710,7 +1710,7 @@ define <8 x double> @test_8xdouble_perm_mask3(<8 x double> %vec) {
}
define <8 x double> @test_masked_8xdouble_perm_mask3(<8 x double> %vec, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vpermilpd {{.*#+}} zmm1 {%k1} = zmm0[0,1,2,2,4,4,6,7]
@@ -1724,7 +1724,7 @@ define <8 x double> @test_masked_8xdouble_perm_mask3(<8 x double> %vec, <8 x dou
define <8 x double> @test_masked_z_8xdouble_perm_mask3(<8 x double> %vec, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpermilpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,2,4,4,6,7]
@@ -1736,7 +1736,7 @@ define <8 x double> @test_masked_z_8xdouble_perm_mask3(<8 x double> %vec, <8 x d
}
define <8 x double> @test_8xdouble_perm_mem_mask0(<8 x double>* %vp) {
; CHECK-LABEL: test_8xdouble_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermilpd {{.*#+}} zmm0 = mem[0,1,2,3,5,4,7,6]
; CHECK-NEXT: retq
%vec = load <8 x double>, <8 x double>* %vp
@@ -1745,7 +1745,7 @@ define <8 x double> @test_8xdouble_perm_mem_mask0(<8 x double>* %vp) {
}
define <8 x double> @test_masked_8xdouble_perm_mem_mask0(<8 x double>* %vp, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpermilpd {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,5,4,7,6]
@@ -1759,7 +1759,7 @@ define <8 x double> @test_masked_8xdouble_perm_mem_mask0(<8 x double>* %vp, <8 x
define <8 x double> @test_masked_z_8xdouble_perm_mem_mask0(<8 x double>* %vp, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %zmm1, %zmm0, %k1
; CHECK-NEXT: vpermilpd {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,5,4,7,6]
@@ -1773,7 +1773,7 @@ define <8 x double> @test_masked_z_8xdouble_perm_mem_mask0(<8 x double>* %vp, <8
define <8 x double> @test_masked_8xdouble_perm_mem_mask1(<8 x double>* %vp, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpermilpd {{.*#+}} zmm0 {%k1} = mem[0,1,3,3,4,5,7,7]
@@ -1787,7 +1787,7 @@ define <8 x double> @test_masked_8xdouble_perm_mem_mask1(<8 x double>* %vp, <8 x
define <8 x double> @test_masked_z_8xdouble_perm_mem_mask1(<8 x double>* %vp, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %zmm1, %zmm0, %k1
; CHECK-NEXT: vpermilpd {{.*#+}} zmm0 {%k1} {z} = mem[0,1,3,3,4,5,7,7]
@@ -1801,7 +1801,7 @@ define <8 x double> @test_masked_z_8xdouble_perm_mem_mask1(<8 x double>* %vp, <8
define <8 x double> @test_masked_8xdouble_perm_mem_mask2(<8 x double>* %vp, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpermilpd {{.*#+}} zmm0 {%k1} = mem[1,1,3,3,5,4,7,6]
@@ -1815,7 +1815,7 @@ define <8 x double> @test_masked_8xdouble_perm_mem_mask2(<8 x double>* %vp, <8 x
define <8 x double> @test_masked_z_8xdouble_perm_mem_mask2(<8 x double>* %vp, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %zmm1, %zmm0, %k1
; CHECK-NEXT: vpermilpd {{.*#+}} zmm0 {%k1} {z} = mem[1,1,3,3,5,4,7,6]
@@ -1829,7 +1829,7 @@ define <8 x double> @test_masked_z_8xdouble_perm_mem_mask2(<8 x double>* %vp, <8
define <8 x double> @test_8xdouble_perm_mem_mask3(<8 x double>* %vp) {
; CHECK-LABEL: test_8xdouble_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermilpd {{.*#+}} zmm0 = mem[1,0,3,2,4,5,6,7]
; CHECK-NEXT: retq
%vec = load <8 x double>, <8 x double>* %vp
@@ -1838,7 +1838,7 @@ define <8 x double> @test_8xdouble_perm_mem_mask3(<8 x double>* %vp) {
}
define <8 x double> @test_masked_8xdouble_perm_mem_mask3(<8 x double>* %vp, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpermilpd {{.*#+}} zmm0 {%k1} = mem[1,0,3,2,4,5,6,7]
@@ -1852,7 +1852,7 @@ define <8 x double> @test_masked_8xdouble_perm_mem_mask3(<8 x double>* %vp, <8 x
define <8 x double> @test_masked_z_8xdouble_perm_mem_mask3(<8 x double>* %vp, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %zmm1, %zmm0, %k1
; CHECK-NEXT: vpermilpd {{.*#+}} zmm0 {%k1} {z} = mem[1,0,3,2,4,5,6,7]
diff --git a/test/CodeGen/X86/avx512-shuffles/partial_permute.ll b/test/CodeGen/X86/avx512-shuffles/partial_permute.ll
index 947013b0555..0f078194023 100644
--- a/test/CodeGen/X86/avx512-shuffles/partial_permute.ll
+++ b/test/CodeGen/X86/avx512-shuffles/partial_permute.ll
@@ -5,7 +5,7 @@
define <8 x i16> @test_16xi16_to_8xi16_perm_mask0(<16 x i16> %vec) {
; CHECK-LABEL: test_16xi16_to_8xi16_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[8,9,12,13,12,13,8,9,14,15,10,11,12,13,14,15]
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,0,3]
@@ -18,7 +18,7 @@ define <8 x i16> @test_16xi16_to_8xi16_perm_mask0(<16 x i16> %vec) {
}
define <8 x i16> @test_masked_16xi16_to_8xi16_perm_mask0(<16 x i16> %vec, <8 x i16> %vec2, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_16xi16_to_8xi16_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[8,9,12,13,12,13,8,9,14,15,10,11,12,13,14,15]
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,0,3]
@@ -37,7 +37,7 @@ define <8 x i16> @test_masked_16xi16_to_8xi16_perm_mask0(<16 x i16> %vec, <8 x i
define <8 x i16> @test_masked_z_16xi16_to_8xi16_perm_mask0(<16 x i16> %vec, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_16xi16_to_8xi16_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[8,9,12,13,12,13,8,9,14,15,10,11,12,13,14,15]
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,0,3]
@@ -55,7 +55,7 @@ define <8 x i16> @test_masked_z_16xi16_to_8xi16_perm_mask0(<16 x i16> %vec, <8 x
}
define <8 x i16> @test_masked_16xi16_to_8xi16_perm_mask1(<16 x i16> %vec, <8 x i16> %vec2, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_16xi16_to_8xi16_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm3
; CHECK-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[0,1,8,9,2,3,10,11,12,13,14,15,8,9,12,13]
; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
@@ -74,7 +74,7 @@ define <8 x i16> @test_masked_16xi16_to_8xi16_perm_mask1(<16 x i16> %vec, <8 x i
define <8 x i16> @test_masked_z_16xi16_to_8xi16_perm_mask1(<16 x i16> %vec, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_16xi16_to_8xi16_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm2
; CHECK-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[0,1,8,9,2,3,10,11,12,13,14,15,8,9,12,13]
; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
@@ -92,7 +92,7 @@ define <8 x i16> @test_masked_z_16xi16_to_8xi16_perm_mask1(<16 x i16> %vec, <8 x
}
define <8 x i16> @test_masked_16xi16_to_8xi16_perm_mask2(<16 x i16> %vec, <8 x i16> %vec2, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_16xi16_to_8xi16_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm3
; CHECK-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[12,13,6,7,12,13,4,5,0,1,2,3,12,13,2,3]
; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,1,0,3]
@@ -111,7 +111,7 @@ define <8 x i16> @test_masked_16xi16_to_8xi16_perm_mask2(<16 x i16> %vec, <8 x i
define <8 x i16> @test_masked_z_16xi16_to_8xi16_perm_mask2(<16 x i16> %vec, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_16xi16_to_8xi16_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm2
; CHECK-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[12,13,6,7,12,13,4,5,0,1,2,3,12,13,2,3]
; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,1,0,3]
@@ -129,7 +129,7 @@ define <8 x i16> @test_masked_z_16xi16_to_8xi16_perm_mask2(<16 x i16> %vec, <8 x
}
define <8 x i16> @test_16xi16_to_8xi16_perm_mask3(<16 x i16> %vec) {
; CHECK-LABEL: test_16xi16_to_8xi16_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,1,2,3,14,15,14,15,8,9,10,11,0,1,0,1]
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[12,13,14,15,12,13,10,11,8,9,8,9,0,1,2,3]
@@ -141,7 +141,7 @@ define <8 x i16> @test_16xi16_to_8xi16_perm_mask3(<16 x i16> %vec) {
}
define <8 x i16> @test_masked_16xi16_to_8xi16_perm_mask3(<16 x i16> %vec, <8 x i16> %vec2, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_16xi16_to_8xi16_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[0,1,2,3,14,15,14,15,8,9,10,11,0,1,0,1]
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[12,13,14,15,12,13,10,11,8,9,8,9,0,1,2,3]
@@ -159,7 +159,7 @@ define <8 x i16> @test_masked_16xi16_to_8xi16_perm_mask3(<16 x i16> %vec, <8 x i
define <8 x i16> @test_masked_z_16xi16_to_8xi16_perm_mask3(<16 x i16> %vec, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_16xi16_to_8xi16_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[0,1,2,3,14,15,14,15,8,9,10,11,0,1,0,1]
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[12,13,14,15,12,13,10,11,8,9,8,9,0,1,2,3]
@@ -176,7 +176,7 @@ define <8 x i16> @test_masked_z_16xi16_to_8xi16_perm_mask3(<16 x i16> %vec, <8 x
}
define <8 x i16> @test_16xi16_to_8xi16_perm_mem_mask0(<16 x i16>* %vp) {
; CHECK-LABEL: test_16xi16_to_8xi16_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %ymm0
; CHECK-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,1,14,15,12,13,6,7,10,11,10,11,6,7,6,7]
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0
@@ -191,7 +191,7 @@ define <8 x i16> @test_16xi16_to_8xi16_perm_mem_mask0(<16 x i16>* %vp) {
}
define <8 x i16> @test_masked_16xi16_to_8xi16_perm_mem_mask0(<16 x i16>* %vp, <8 x i16> %vec2, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_16xi16_to_8xi16_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %ymm2
; CHECK-NEXT: vpshufb {{.*#+}} xmm3 = xmm2[0,1,14,15,12,13,6,7,10,11,10,11,6,7,6,7]
; CHECK-NEXT: vextracti128 $1, %ymm2, %xmm2
@@ -212,7 +212,7 @@ define <8 x i16> @test_masked_16xi16_to_8xi16_perm_mem_mask0(<16 x i16>* %vp, <8
define <8 x i16> @test_masked_z_16xi16_to_8xi16_perm_mem_mask0(<16 x i16>* %vp, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_16xi16_to_8xi16_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %ymm1
; CHECK-NEXT: vpshufb {{.*#+}} xmm2 = xmm1[0,1,14,15,12,13,6,7,10,11,10,11,6,7,6,7]
; CHECK-NEXT: vextracti128 $1, %ymm1, %xmm1
@@ -233,7 +233,7 @@ define <8 x i16> @test_masked_z_16xi16_to_8xi16_perm_mem_mask0(<16 x i16>* %vp,
define <8 x i16> @test_masked_16xi16_to_8xi16_perm_mem_mask1(<16 x i16>* %vp, <8 x i16> %vec2, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_16xi16_to_8xi16_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %ymm2
; CHECK-NEXT: vextracti128 $1, %ymm2, %xmm3
; CHECK-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[8,9,14,15,8,9,14,15,0,1,2,3,0,1,12,13]
@@ -253,7 +253,7 @@ define <8 x i16> @test_masked_16xi16_to_8xi16_perm_mem_mask1(<16 x i16>* %vp, <8
define <8 x i16> @test_masked_z_16xi16_to_8xi16_perm_mem_mask1(<16 x i16>* %vp, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_16xi16_to_8xi16_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %ymm1
; CHECK-NEXT: vextracti128 $1, %ymm1, %xmm2
; CHECK-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[8,9,14,15,8,9,14,15,0,1,2,3,0,1,12,13]
@@ -273,7 +273,7 @@ define <8 x i16> @test_masked_z_16xi16_to_8xi16_perm_mem_mask1(<16 x i16>* %vp,
define <8 x i16> @test_masked_16xi16_to_8xi16_perm_mem_mask2(<16 x i16>* %vp, <8 x i16> %vec2, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_16xi16_to_8xi16_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %ymm2
; CHECK-NEXT: vpsrld $16, %xmm2, %xmm3
; CHECK-NEXT: vextracti128 $1, %ymm2, %xmm2
@@ -293,7 +293,7 @@ define <8 x i16> @test_masked_16xi16_to_8xi16_perm_mem_mask2(<16 x i16>* %vp, <8
define <8 x i16> @test_masked_z_16xi16_to_8xi16_perm_mem_mask2(<16 x i16>* %vp, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_16xi16_to_8xi16_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %ymm1
; CHECK-NEXT: vpsrld $16, %xmm1, %xmm2
; CHECK-NEXT: vextracti128 $1, %ymm1, %xmm1
@@ -313,7 +313,7 @@ define <8 x i16> @test_masked_z_16xi16_to_8xi16_perm_mem_mask2(<16 x i16>* %vp,
define <8 x i16> @test_16xi16_to_8xi16_perm_mem_mask3(<16 x i16>* %vp) {
; CHECK-LABEL: test_16xi16_to_8xi16_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %ymm0
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm1
; CHECK-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
@@ -326,7 +326,7 @@ define <8 x i16> @test_16xi16_to_8xi16_perm_mem_mask3(<16 x i16>* %vp) {
}
define <8 x i16> @test_masked_16xi16_to_8xi16_perm_mem_mask3(<16 x i16>* %vp, <8 x i16> %vec2, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_16xi16_to_8xi16_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %ymm2
; CHECK-NEXT: vextracti128 $1, %ymm2, %xmm3
; CHECK-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0],xmm2[1,2,3]
@@ -345,7 +345,7 @@ define <8 x i16> @test_masked_16xi16_to_8xi16_perm_mem_mask3(<16 x i16>* %vp, <8
define <8 x i16> @test_masked_z_16xi16_to_8xi16_perm_mem_mask3(<16 x i16>* %vp, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_16xi16_to_8xi16_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %ymm1
; CHECK-NEXT: vextracti128 $1, %ymm1, %xmm2
; CHECK-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1,2,3]
@@ -364,7 +364,7 @@ define <8 x i16> @test_masked_z_16xi16_to_8xi16_perm_mem_mask3(<16 x i16>* %vp,
define <16 x i16> @test_32xi16_to_16xi16_perm_mask0(<32 x i16> %vec) {
; CHECK-LABEL: test_32xi16_to_16xi16_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [8,12,13,10,12,13,1,28,6,24,9,11,12,2,14,2]
; CHECK-NEXT: vpermi2w %ymm0, %ymm2, %ymm1
@@ -375,7 +375,7 @@ define <16 x i16> @test_32xi16_to_16xi16_perm_mask0(<32 x i16> %vec) {
}
define <16 x i16> @test_masked_32xi16_to_16xi16_perm_mask0(<32 x i16> %vec, <16 x i16> %vec2, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_to_16xi16_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vmovdqa {{.*#+}} ymm4 = [8,12,13,10,12,13,1,28,6,24,9,11,12,2,14,2]
; CHECK-NEXT: vpermi2w %ymm0, %ymm3, %ymm4
@@ -391,7 +391,7 @@ define <16 x i16> @test_masked_32xi16_to_16xi16_perm_mask0(<32 x i16> %vec, <16
define <16 x i16> @test_masked_z_32xi16_to_16xi16_perm_mask0(<32 x i16> %vec, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_to_16xi16_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = [8,12,13,10,12,13,1,28,6,24,9,11,12,2,14,2]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
@@ -406,7 +406,7 @@ define <16 x i16> @test_masked_z_32xi16_to_16xi16_perm_mask0(<32 x i16> %vec, <1
}
define <16 x i16> @test_masked_32xi16_to_16xi16_perm_mask1(<32 x i16> %vec, <16 x i16> %vec2, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_to_16xi16_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vmovdqa {{.*#+}} ymm4 = [30,5,15,13,9,18,3,31,4,11,23,7,19,23,9,26]
; CHECK-NEXT: vpermi2w %ymm0, %ymm3, %ymm4
@@ -422,7 +422,7 @@ define <16 x i16> @test_masked_32xi16_to_16xi16_perm_mask1(<32 x i16> %vec, <16
define <16 x i16> @test_masked_z_32xi16_to_16xi16_perm_mask1(<32 x i16> %vec, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_to_16xi16_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = [30,5,15,13,9,18,3,31,4,11,23,7,19,23,9,26]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
@@ -437,7 +437,7 @@ define <16 x i16> @test_masked_z_32xi16_to_16xi16_perm_mask1(<32 x i16> %vec, <1
}
define <16 x i16> @test_masked_32xi16_to_16xi16_perm_mask2(<32 x i16> %vec, <16 x i16> %vec2, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_to_16xi16_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vmovdqa {{.*#+}} ymm4 = [10,19,20,6,17,2,13,1,5,16,4,3,2,28,27,15]
; CHECK-NEXT: vpermi2w %ymm0, %ymm3, %ymm4
@@ -453,7 +453,7 @@ define <16 x i16> @test_masked_32xi16_to_16xi16_perm_mask2(<32 x i16> %vec, <16
define <16 x i16> @test_masked_z_32xi16_to_16xi16_perm_mask2(<32 x i16> %vec, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_to_16xi16_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = [10,19,20,6,17,2,13,1,5,16,4,3,2,28,27,15]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
@@ -468,7 +468,7 @@ define <16 x i16> @test_masked_z_32xi16_to_16xi16_perm_mask2(<32 x i16> %vec, <1
}
define <16 x i16> @test_32xi16_to_16xi16_perm_mask3(<32 x i16> %vec) {
; CHECK-LABEL: test_32xi16_to_16xi16_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [1,0,30,5,3,6,25,29,0,13,3,8,7,20,11,5]
; CHECK-NEXT: vpermi2w %ymm2, %ymm0, %ymm1
@@ -479,7 +479,7 @@ define <16 x i16> @test_32xi16_to_16xi16_perm_mask3(<32 x i16> %vec) {
}
define <16 x i16> @test_masked_32xi16_to_16xi16_perm_mask3(<32 x i16> %vec, <16 x i16> %vec2, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_to_16xi16_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vmovdqa {{.*#+}} ymm4 = [1,0,30,5,3,6,25,29,0,13,3,8,7,20,11,5]
; CHECK-NEXT: vpermi2w %ymm3, %ymm0, %ymm4
@@ -495,7 +495,7 @@ define <16 x i16> @test_masked_32xi16_to_16xi16_perm_mask3(<32 x i16> %vec, <16
define <16 x i16> @test_masked_z_32xi16_to_16xi16_perm_mask3(<32 x i16> %vec, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_to_16xi16_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = [1,0,30,5,3,6,25,29,0,13,3,8,7,20,11,5]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
@@ -510,7 +510,7 @@ define <16 x i16> @test_masked_z_32xi16_to_16xi16_perm_mask3(<32 x i16> %vec, <1
}
define <8 x i16> @test_32xi16_to_8xi16_perm_mask0(<32 x i16> %vec) {
; CHECK-LABEL: test_32xi16_to_8xi16_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = <22,27,7,10,13,21,5,14,u,u,u,u,u,u,u,u>
; CHECK-NEXT: vpermi2w %ymm0, %ymm2, %ymm1
@@ -522,7 +522,7 @@ define <8 x i16> @test_32xi16_to_8xi16_perm_mask0(<32 x i16> %vec) {
}
define <8 x i16> @test_masked_32xi16_to_8xi16_perm_mask0(<32 x i16> %vec, <8 x i16> %vec2, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_to_8xi16_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vmovdqa {{.*#+}} ymm4 = <22,27,7,10,13,21,5,14,u,u,u,u,u,u,u,u>
; CHECK-NEXT: vpermi2w %ymm0, %ymm3, %ymm4
@@ -539,7 +539,7 @@ define <8 x i16> @test_masked_32xi16_to_8xi16_perm_mask0(<32 x i16> %vec, <8 x i
define <8 x i16> @test_masked_z_32xi16_to_8xi16_perm_mask0(<32 x i16> %vec, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_to_8xi16_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = <22,27,7,10,13,21,5,14,u,u,u,u,u,u,u,u>
; CHECK-NEXT: vpermi2w %ymm0, %ymm2, %ymm3
@@ -555,7 +555,7 @@ define <8 x i16> @test_masked_z_32xi16_to_8xi16_perm_mask0(<32 x i16> %vec, <8 x
}
define <8 x i16> @test_masked_32xi16_to_8xi16_perm_mask1(<32 x i16> %vec, <8 x i16> %vec2, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_to_8xi16_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vmovdqa {{.*#+}} ymm4 = <1,21,27,10,8,19,14,5,u,u,u,u,u,u,u,u>
; CHECK-NEXT: vpermi2w %ymm3, %ymm0, %ymm4
@@ -572,7 +572,7 @@ define <8 x i16> @test_masked_32xi16_to_8xi16_perm_mask1(<32 x i16> %vec, <8 x i
define <8 x i16> @test_masked_z_32xi16_to_8xi16_perm_mask1(<32 x i16> %vec, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_to_8xi16_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = <1,21,27,10,8,19,14,5,u,u,u,u,u,u,u,u>
; CHECK-NEXT: vpermi2w %ymm2, %ymm0, %ymm3
@@ -588,7 +588,7 @@ define <8 x i16> @test_masked_z_32xi16_to_8xi16_perm_mask1(<32 x i16> %vec, <8 x
}
define <8 x i16> @test_masked_32xi16_to_8xi16_perm_mask2(<32 x i16> %vec, <8 x i16> %vec2, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_to_8xi16_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vmovdqa {{.*#+}} ymm4 = <15,13,18,16,9,11,26,8,u,u,u,u,u,u,u,u>
; CHECK-NEXT: vpermi2w %ymm3, %ymm0, %ymm4
@@ -605,7 +605,7 @@ define <8 x i16> @test_masked_32xi16_to_8xi16_perm_mask2(<32 x i16> %vec, <8 x i
define <8 x i16> @test_masked_z_32xi16_to_8xi16_perm_mask2(<32 x i16> %vec, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_to_8xi16_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = <15,13,18,16,9,11,26,8,u,u,u,u,u,u,u,u>
; CHECK-NEXT: vpermi2w %ymm2, %ymm0, %ymm3
@@ -621,7 +621,7 @@ define <8 x i16> @test_masked_z_32xi16_to_8xi16_perm_mask2(<32 x i16> %vec, <8 x
}
define <8 x i16> @test_32xi16_to_8xi16_perm_mask3(<32 x i16> %vec) {
; CHECK-LABEL: test_32xi16_to_8xi16_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = <17,0,23,10,1,8,7,30,u,u,u,u,u,u,u,u>
; CHECK-NEXT: vpermi2w %ymm2, %ymm0, %ymm1
@@ -633,7 +633,7 @@ define <8 x i16> @test_32xi16_to_8xi16_perm_mask3(<32 x i16> %vec) {
}
define <8 x i16> @test_masked_32xi16_to_8xi16_perm_mask3(<32 x i16> %vec, <8 x i16> %vec2, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_to_8xi16_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vmovdqa {{.*#+}} ymm4 = <17,0,23,10,1,8,7,30,u,u,u,u,u,u,u,u>
; CHECK-NEXT: vpermi2w %ymm3, %ymm0, %ymm4
@@ -650,7 +650,7 @@ define <8 x i16> @test_masked_32xi16_to_8xi16_perm_mask3(<32 x i16> %vec, <8 x i
define <8 x i16> @test_masked_z_32xi16_to_8xi16_perm_mask3(<32 x i16> %vec, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_to_8xi16_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = <17,0,23,10,1,8,7,30,u,u,u,u,u,u,u,u>
; CHECK-NEXT: vpermi2w %ymm2, %ymm0, %ymm3
@@ -666,7 +666,7 @@ define <8 x i16> @test_masked_z_32xi16_to_8xi16_perm_mask3(<32 x i16> %vec, <8 x
}
define <16 x i16> @test_32xi16_to_16xi16_perm_mem_mask0(<32 x i16>* %vp) {
; CHECK-LABEL: test_32xi16_to_16xi16_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1
; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = [20,19,22,12,13,20,0,6,10,7,20,12,28,18,13,12]
@@ -678,7 +678,7 @@ define <16 x i16> @test_32xi16_to_16xi16_perm_mem_mask0(<32 x i16>* %vp) {
}
define <16 x i16> @test_masked_32xi16_to_16xi16_perm_mem_mask0(<32 x i16>* %vp, <16 x i16> %vec2, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_to_16xi16_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm2
; CHECK-NEXT: vextracti64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vmovdqa {{.*#+}} ymm4 = [20,19,22,12,13,20,0,6,10,7,20,12,28,18,13,12]
@@ -696,7 +696,7 @@ define <16 x i16> @test_masked_32xi16_to_16xi16_perm_mem_mask0(<32 x i16>* %vp,
define <16 x i16> @test_masked_z_32xi16_to_16xi16_perm_mem_mask0(<32 x i16>* %vp, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_to_16xi16_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm2
; CHECK-NEXT: vextracti64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [20,19,22,12,13,20,0,6,10,7,20,12,28,18,13,12]
@@ -714,7 +714,7 @@ define <16 x i16> @test_masked_z_32xi16_to_16xi16_perm_mem_mask0(<32 x i16>* %vp
define <16 x i16> @test_masked_32xi16_to_16xi16_perm_mem_mask1(<32 x i16>* %vp, <16 x i16> %vec2, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_to_16xi16_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm2
; CHECK-NEXT: vextracti64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vmovdqa {{.*#+}} ymm4 = [22,13,21,1,14,8,5,16,15,17,24,28,15,9,14,25]
@@ -732,7 +732,7 @@ define <16 x i16> @test_masked_32xi16_to_16xi16_perm_mem_mask1(<32 x i16>* %vp,
define <16 x i16> @test_masked_z_32xi16_to_16xi16_perm_mem_mask1(<32 x i16>* %vp, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_to_16xi16_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm2
; CHECK-NEXT: vextracti64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [22,13,21,1,14,8,5,16,15,17,24,28,15,9,14,25]
@@ -750,7 +750,7 @@ define <16 x i16> @test_masked_z_32xi16_to_16xi16_perm_mem_mask1(<32 x i16>* %vp
define <16 x i16> @test_masked_32xi16_to_16xi16_perm_mem_mask2(<32 x i16>* %vp, <16 x i16> %vec2, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_to_16xi16_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm2
; CHECK-NEXT: vextracti64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vmovdqa {{.*#+}} ymm4 = [12,9,22,15,4,18,7,15,28,5,26,22,6,16,10,0]
@@ -768,7 +768,7 @@ define <16 x i16> @test_masked_32xi16_to_16xi16_perm_mem_mask2(<32 x i16>* %vp,
define <16 x i16> @test_masked_z_32xi16_to_16xi16_perm_mem_mask2(<32 x i16>* %vp, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_to_16xi16_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm2
; CHECK-NEXT: vextracti64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [12,9,22,15,4,18,7,15,28,5,26,22,6,16,10,0]
@@ -786,7 +786,7 @@ define <16 x i16> @test_masked_z_32xi16_to_16xi16_perm_mem_mask2(<32 x i16>* %vp
define <16 x i16> @test_32xi16_to_16xi16_perm_mem_mask3(<32 x i16>* %vp) {
; CHECK-LABEL: test_32xi16_to_16xi16_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1
; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = [3,3,20,27,8,31,3,27,12,2,8,14,25,27,4,16]
@@ -798,7 +798,7 @@ define <16 x i16> @test_32xi16_to_16xi16_perm_mem_mask3(<32 x i16>* %vp) {
}
define <16 x i16> @test_masked_32xi16_to_16xi16_perm_mem_mask3(<32 x i16>* %vp, <16 x i16> %vec2, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_to_16xi16_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm2
; CHECK-NEXT: vextracti64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vmovdqa {{.*#+}} ymm4 = [3,3,20,27,8,31,3,27,12,2,8,14,25,27,4,16]
@@ -816,7 +816,7 @@ define <16 x i16> @test_masked_32xi16_to_16xi16_perm_mem_mask3(<32 x i16>* %vp,
define <16 x i16> @test_masked_z_32xi16_to_16xi16_perm_mem_mask3(<32 x i16>* %vp, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_to_16xi16_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm2
; CHECK-NEXT: vextracti64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [3,3,20,27,8,31,3,27,12,2,8,14,25,27,4,16]
@@ -834,7 +834,7 @@ define <16 x i16> @test_masked_z_32xi16_to_16xi16_perm_mem_mask3(<32 x i16>* %vp
define <8 x i16> @test_32xi16_to_8xi16_perm_mem_mask0(<32 x i16>* %vp) {
; CHECK-LABEL: test_32xi16_to_8xi16_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1
; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = <16,17,5,1,14,14,13,17,u,u,u,u,u,u,u,u>
@@ -848,7 +848,7 @@ define <8 x i16> @test_32xi16_to_8xi16_perm_mem_mask0(<32 x i16>* %vp) {
}
define <8 x i16> @test_masked_32xi16_to_8xi16_perm_mem_mask0(<32 x i16>* %vp, <8 x i16> %vec2, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_to_8xi16_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm2
; CHECK-NEXT: vextracti64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vmovdqa {{.*#+}} ymm4 = <16,17,5,1,14,14,13,17,u,u,u,u,u,u,u,u>
@@ -867,7 +867,7 @@ define <8 x i16> @test_masked_32xi16_to_8xi16_perm_mem_mask0(<32 x i16>* %vp, <8
define <8 x i16> @test_masked_z_32xi16_to_8xi16_perm_mem_mask0(<32 x i16>* %vp, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_to_8xi16_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1
; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = <16,17,5,1,14,14,13,17,u,u,u,u,u,u,u,u>
@@ -886,7 +886,7 @@ define <8 x i16> @test_masked_z_32xi16_to_8xi16_perm_mem_mask0(<32 x i16>* %vp,
define <8 x i16> @test_masked_32xi16_to_8xi16_perm_mem_mask1(<32 x i16>* %vp, <8 x i16> %vec2, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_to_8xi16_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm2
; CHECK-NEXT: vextracti64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vmovdqa {{.*#+}} ymm4 = <7,6,4,6,12,4,27,1,u,u,u,u,u,u,u,u>
@@ -905,7 +905,7 @@ define <8 x i16> @test_masked_32xi16_to_8xi16_perm_mem_mask1(<32 x i16>* %vp, <8
define <8 x i16> @test_masked_z_32xi16_to_8xi16_perm_mem_mask1(<32 x i16>* %vp, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_to_8xi16_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1
; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = <7,6,4,6,12,4,27,1,u,u,u,u,u,u,u,u>
@@ -924,7 +924,7 @@ define <8 x i16> @test_masked_z_32xi16_to_8xi16_perm_mem_mask1(<32 x i16>* %vp,
define <8 x i16> @test_masked_32xi16_to_8xi16_perm_mem_mask2(<32 x i16>* %vp, <8 x i16> %vec2, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_to_8xi16_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm2
; CHECK-NEXT: vextracti64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vmovdqa {{.*#+}} ymm4 = <6,18,0,4,10,25,22,10,u,u,u,u,u,u,u,u>
@@ -943,7 +943,7 @@ define <8 x i16> @test_masked_32xi16_to_8xi16_perm_mem_mask2(<32 x i16>* %vp, <8
define <8 x i16> @test_masked_z_32xi16_to_8xi16_perm_mem_mask2(<32 x i16>* %vp, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_to_8xi16_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1
; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = <6,18,0,4,10,25,22,10,u,u,u,u,u,u,u,u>
@@ -962,7 +962,7 @@ define <8 x i16> @test_masked_z_32xi16_to_8xi16_perm_mem_mask2(<32 x i16>* %vp,
define <8 x i16> @test_32xi16_to_8xi16_perm_mem_mask3(<32 x i16>* %vp) {
; CHECK-LABEL: test_32xi16_to_8xi16_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1
; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = <19,1,5,31,9,12,17,9,u,u,u,u,u,u,u,u>
@@ -976,7 +976,7 @@ define <8 x i16> @test_32xi16_to_8xi16_perm_mem_mask3(<32 x i16>* %vp) {
}
define <8 x i16> @test_masked_32xi16_to_8xi16_perm_mem_mask3(<32 x i16>* %vp, <8 x i16> %vec2, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_to_8xi16_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm2
; CHECK-NEXT: vextracti64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vmovdqa {{.*#+}} ymm4 = <19,1,5,31,9,12,17,9,u,u,u,u,u,u,u,u>
@@ -995,7 +995,7 @@ define <8 x i16> @test_masked_32xi16_to_8xi16_perm_mem_mask3(<32 x i16>* %vp, <8
define <8 x i16> @test_masked_z_32xi16_to_8xi16_perm_mem_mask3(<32 x i16>* %vp, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_to_8xi16_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1
; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = <19,1,5,31,9,12,17,9,u,u,u,u,u,u,u,u>
@@ -1014,7 +1014,7 @@ define <8 x i16> @test_masked_z_32xi16_to_8xi16_perm_mem_mask3(<32 x i16>* %vp,
define <4 x i32> @test_8xi32_to_4xi32_perm_mask0(<8 x i32> %vec) {
; CHECK-LABEL: test_8xi32_to_4xi32_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1
; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,3,2]
; CHECK-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
@@ -1025,7 +1025,7 @@ define <4 x i32> @test_8xi32_to_4xi32_perm_mask0(<8 x i32> %vec) {
}
define <4 x i32> @test_masked_8xi32_to_4xi32_perm_mask0(<8 x i32> %vec, <4 x i32> %vec2, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_8xi32_to_4xi32_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm3
; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,2]
; CHECK-NEXT: vpblendd {{.*#+}} xmm0 = xmm3[0],xmm0[1,2,3]
@@ -1042,7 +1042,7 @@ define <4 x i32> @test_masked_8xi32_to_4xi32_perm_mask0(<8 x i32> %vec, <4 x i32
define <4 x i32> @test_masked_z_8xi32_to_4xi32_perm_mask0(<8 x i32> %vec, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_8xi32_to_4xi32_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm2
; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,2]
; CHECK-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
@@ -1058,7 +1058,7 @@ define <4 x i32> @test_masked_z_8xi32_to_4xi32_perm_mask0(<8 x i32> %vec, <4 x i
}
define <4 x i32> @test_masked_8xi32_to_4xi32_perm_mask1(<8 x i32> %vec, <4 x i32> %vec2, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_8xi32_to_4xi32_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm3
; CHECK-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,0,2,3]
@@ -1076,7 +1076,7 @@ define <4 x i32> @test_masked_8xi32_to_4xi32_perm_mask1(<8 x i32> %vec, <4 x i32
define <4 x i32> @test_masked_z_8xi32_to_4xi32_perm_mask1(<8 x i32> %vec, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_8xi32_to_4xi32_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm2
; CHECK-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,0,2,3]
@@ -1093,7 +1093,7 @@ define <4 x i32> @test_masked_z_8xi32_to_4xi32_perm_mask1(<8 x i32> %vec, <4 x i
}
define <4 x i32> @test_masked_8xi32_to_4xi32_perm_mask2(<8 x i32> %vec, <4 x i32> %vec2, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_8xi32_to_4xi32_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm3
; CHECK-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm3[1],xmm0[1]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
@@ -1109,7 +1109,7 @@ define <4 x i32> @test_masked_8xi32_to_4xi32_perm_mask2(<8 x i32> %vec, <4 x i32
define <4 x i32> @test_masked_z_8xi32_to_4xi32_perm_mask2(<8 x i32> %vec, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_8xi32_to_4xi32_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm2
; CHECK-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm2[1],xmm0[1]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
@@ -1124,7 +1124,7 @@ define <4 x i32> @test_masked_z_8xi32_to_4xi32_perm_mask2(<8 x i32> %vec, <4 x i
}
define <4 x i32> @test_8xi32_to_4xi32_perm_mask3(<8 x i32> %vec) {
; CHECK-LABEL: test_8xi32_to_4xi32_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1
; CHECK-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,3,2,1]
@@ -1135,7 +1135,7 @@ define <4 x i32> @test_8xi32_to_4xi32_perm_mask3(<8 x i32> %vec) {
}
define <4 x i32> @test_masked_8xi32_to_4xi32_perm_mask3(<8 x i32> %vec, <4 x i32> %vec2, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_8xi32_to_4xi32_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm3
; CHECK-NEXT: vpblendd {{.*#+}} xmm0 = xmm3[0,1],xmm0[2,3]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
@@ -1152,7 +1152,7 @@ define <4 x i32> @test_masked_8xi32_to_4xi32_perm_mask3(<8 x i32> %vec, <4 x i32
define <4 x i32> @test_masked_z_8xi32_to_4xi32_perm_mask3(<8 x i32> %vec, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_8xi32_to_4xi32_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm2
; CHECK-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
@@ -1167,7 +1167,7 @@ define <4 x i32> @test_masked_z_8xi32_to_4xi32_perm_mask3(<8 x i32> %vec, <4 x i
}
define <4 x i32> @test_8xi32_to_4xi32_perm_mem_mask0(<8 x i32>* %vp) {
; CHECK-LABEL: test_8xi32_to_4xi32_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %ymm0
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1
; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm1[3,1],xmm0[0,0]
@@ -1179,7 +1179,7 @@ define <4 x i32> @test_8xi32_to_4xi32_perm_mem_mask0(<8 x i32>* %vp) {
}
define <4 x i32> @test_masked_8xi32_to_4xi32_perm_mem_mask0(<8 x i32>* %vp, <4 x i32> %vec2, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_8xi32_to_4xi32_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %ymm2
; CHECK-NEXT: vextractf128 $1, %ymm2, %xmm3
; CHECK-NEXT: vshufps {{.*#+}} xmm2 = xmm3[3,1],xmm2[0,0]
@@ -1197,7 +1197,7 @@ define <4 x i32> @test_masked_8xi32_to_4xi32_perm_mem_mask0(<8 x i32>* %vp, <4 x
define <4 x i32> @test_masked_z_8xi32_to_4xi32_perm_mem_mask0(<8 x i32>* %vp, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_8xi32_to_4xi32_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %ymm1
; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm2
; CHECK-NEXT: vshufps {{.*#+}} xmm1 = xmm2[3,1],xmm1[0,0]
@@ -1215,7 +1215,7 @@ define <4 x i32> @test_masked_z_8xi32_to_4xi32_perm_mem_mask0(<8 x i32>* %vp, <4
define <4 x i32> @test_masked_8xi32_to_4xi32_perm_mem_mask1(<8 x i32>* %vp, <4 x i32> %vec2, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_8xi32_to_4xi32_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %ymm2
; CHECK-NEXT: vextracti128 $1, %ymm2, %xmm3
; CHECK-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3]
@@ -1233,7 +1233,7 @@ define <4 x i32> @test_masked_8xi32_to_4xi32_perm_mem_mask1(<8 x i32>* %vp, <4 x
define <4 x i32> @test_masked_z_8xi32_to_4xi32_perm_mem_mask1(<8 x i32>* %vp, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_8xi32_to_4xi32_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %ymm1
; CHECK-NEXT: vextracti128 $1, %ymm1, %xmm2
; CHECK-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3]
@@ -1251,7 +1251,7 @@ define <4 x i32> @test_masked_z_8xi32_to_4xi32_perm_mem_mask1(<8 x i32>* %vp, <4
define <4 x i32> @test_masked_8xi32_to_4xi32_perm_mem_mask2(<8 x i32>* %vp, <4 x i32> %vec2, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_8xi32_to_4xi32_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %ymm2
; CHECK-NEXT: vextracti128 $1, %ymm2, %xmm3
; CHECK-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3]
@@ -1269,7 +1269,7 @@ define <4 x i32> @test_masked_8xi32_to_4xi32_perm_mem_mask2(<8 x i32>* %vp, <4 x
define <4 x i32> @test_masked_z_8xi32_to_4xi32_perm_mem_mask2(<8 x i32>* %vp, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_8xi32_to_4xi32_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %ymm1
; CHECK-NEXT: vextracti128 $1, %ymm1, %xmm2
; CHECK-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
@@ -1287,7 +1287,7 @@ define <4 x i32> @test_masked_z_8xi32_to_4xi32_perm_mem_mask2(<8 x i32>* %vp, <4
define <4 x i32> @test_8xi32_to_4xi32_perm_mem_mask3(<8 x i32>* %vp) {
; CHECK-LABEL: test_8xi32_to_4xi32_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %ymm0
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1
; CHECK-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[1,1,2,3]
@@ -1301,7 +1301,7 @@ define <4 x i32> @test_8xi32_to_4xi32_perm_mem_mask3(<8 x i32>* %vp) {
}
define <4 x i32> @test_masked_8xi32_to_4xi32_perm_mem_mask3(<8 x i32>* %vp, <4 x i32> %vec2, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_8xi32_to_4xi32_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %ymm2
; CHECK-NEXT: vextracti128 $1, %ymm2, %xmm3
; CHECK-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,2,3]
@@ -1321,7 +1321,7 @@ define <4 x i32> @test_masked_8xi32_to_4xi32_perm_mem_mask3(<8 x i32>* %vp, <4 x
define <4 x i32> @test_masked_z_8xi32_to_4xi32_perm_mem_mask3(<8 x i32>* %vp, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_8xi32_to_4xi32_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %ymm1
; CHECK-NEXT: vextracti128 $1, %ymm1, %xmm2
; CHECK-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,2,3]
@@ -1341,7 +1341,7 @@ define <4 x i32> @test_masked_z_8xi32_to_4xi32_perm_mem_mask3(<8 x i32>* %vp, <4
define <8 x i32> @test_16xi32_to_8xi32_perm_mask0(<16 x i32> %vec) {
; CHECK-LABEL: test_16xi32_to_8xi32_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [9,5,3,6,15,2,9,14]
; CHECK-NEXT: vpermi2d %ymm0, %ymm2, %ymm1
@@ -1352,7 +1352,7 @@ define <8 x i32> @test_16xi32_to_8xi32_perm_mask0(<16 x i32> %vec) {
}
define <8 x i32> @test_masked_16xi32_to_8xi32_perm_mask0(<16 x i32> %vec, <8 x i32> %vec2, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_16xi32_to_8xi32_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vmovdqa {{.*#+}} ymm4 = [9,5,3,6,15,2,9,14]
; CHECK-NEXT: vpermi2d %ymm0, %ymm3, %ymm4
@@ -1368,7 +1368,7 @@ define <8 x i32> @test_masked_16xi32_to_8xi32_perm_mask0(<16 x i32> %vec, <8 x i
define <8 x i32> @test_masked_z_16xi32_to_8xi32_perm_mask0(<16 x i32> %vec, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_16xi32_to_8xi32_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = [9,5,3,6,15,2,9,14]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
@@ -1383,7 +1383,7 @@ define <8 x i32> @test_masked_z_16xi32_to_8xi32_perm_mask0(<16 x i32> %vec, <8 x
}
define <8 x i32> @test_masked_16xi32_to_8xi32_perm_mask1(<16 x i32> %vec, <8 x i32> %vec2, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_16xi32_to_8xi32_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vmovdqa {{.*#+}} ymm4 = [3,0,15,3,2,3,6,8]
; CHECK-NEXT: vpermi2d %ymm3, %ymm0, %ymm4
@@ -1399,7 +1399,7 @@ define <8 x i32> @test_masked_16xi32_to_8xi32_perm_mask1(<16 x i32> %vec, <8 x i
define <8 x i32> @test_masked_z_16xi32_to_8xi32_perm_mask1(<16 x i32> %vec, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_16xi32_to_8xi32_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = [3,0,15,3,2,3,6,8]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
@@ -1414,7 +1414,7 @@ define <8 x i32> @test_masked_z_16xi32_to_8xi32_perm_mask1(<16 x i32> %vec, <8 x
}
define <8 x i32> @test_masked_16xi32_to_8xi32_perm_mask2(<16 x i32> %vec, <8 x i32> %vec2, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_16xi32_to_8xi32_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vmovdqa {{.*#+}} ymm4 = [2,15,15,2,6,10,14,7]
; CHECK-NEXT: vpermi2d %ymm3, %ymm0, %ymm4
@@ -1430,7 +1430,7 @@ define <8 x i32> @test_masked_16xi32_to_8xi32_perm_mask2(<16 x i32> %vec, <8 x i
define <8 x i32> @test_masked_z_16xi32_to_8xi32_perm_mask2(<16 x i32> %vec, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_16xi32_to_8xi32_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = [2,15,15,2,6,10,14,7]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
@@ -1445,7 +1445,7 @@ define <8 x i32> @test_masked_z_16xi32_to_8xi32_perm_mask2(<16 x i32> %vec, <8 x
}
define <8 x i32> @test_16xi32_to_8xi32_perm_mask3(<16 x i32> %vec) {
; CHECK-LABEL: test_16xi32_to_8xi32_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [14,5,7,7,10,3,9,3]
; CHECK-NEXT: vpermi2d %ymm2, %ymm0, %ymm1
@@ -1456,7 +1456,7 @@ define <8 x i32> @test_16xi32_to_8xi32_perm_mask3(<16 x i32> %vec) {
}
define <8 x i32> @test_masked_16xi32_to_8xi32_perm_mask3(<16 x i32> %vec, <8 x i32> %vec2, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_16xi32_to_8xi32_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vmovdqa {{.*#+}} ymm4 = [14,5,7,7,10,3,9,3]
; CHECK-NEXT: vpermi2d %ymm3, %ymm0, %ymm4
@@ -1472,7 +1472,7 @@ define <8 x i32> @test_masked_16xi32_to_8xi32_perm_mask3(<16 x i32> %vec, <8 x i
define <8 x i32> @test_masked_z_16xi32_to_8xi32_perm_mask3(<16 x i32> %vec, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_16xi32_to_8xi32_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = [14,5,7,7,10,3,9,3]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
@@ -1487,7 +1487,7 @@ define <8 x i32> @test_masked_z_16xi32_to_8xi32_perm_mask3(<16 x i32> %vec, <8 x
}
define <4 x i32> @test_16xi32_to_4xi32_perm_mask0(<16 x i32> %vec) {
; CHECK-LABEL: test_16xi32_to_4xi32_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1
; CHECK-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
; CHECK-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,0,3,4,6,4,7]
@@ -1501,7 +1501,7 @@ define <4 x i32> @test_16xi32_to_4xi32_perm_mask0(<16 x i32> %vec) {
}
define <4 x i32> @test_masked_16xi32_to_4xi32_perm_mask0(<16 x i32> %vec, <4 x i32> %vec2, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_16xi32_to_4xi32_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,1,2,0,4,5,6,4]
; CHECK-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,0,3,4,6,4,7]
@@ -1520,7 +1520,7 @@ define <4 x i32> @test_masked_16xi32_to_4xi32_perm_mask0(<16 x i32> %vec, <4 x i
define <4 x i32> @test_masked_z_16xi32_to_4xi32_perm_mask0(<16 x i32> %vec, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_16xi32_to_4xi32_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; CHECK-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,1,2,0,4,5,6,4]
; CHECK-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,0,3,4,6,4,7]
@@ -1538,7 +1538,7 @@ define <4 x i32> @test_masked_z_16xi32_to_4xi32_perm_mask0(<16 x i32> %vec, <4 x
}
define <4 x i32> @test_masked_16xi32_to_4xi32_perm_mask1(<16 x i32> %vec, <4 x i32> %vec2, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_16xi32_to_4xi32_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm0
; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = <5,1,3,4,u,u,u,u>
; CHECK-NEXT: vpermd %ymm0, %ymm3, %ymm0
@@ -1555,7 +1555,7 @@ define <4 x i32> @test_masked_16xi32_to_4xi32_perm_mask1(<16 x i32> %vec, <4 x i
define <4 x i32> @test_masked_z_16xi32_to_4xi32_perm_mask1(<16 x i32> %vec, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_16xi32_to_4xi32_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm0
; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = <5,1,3,4,u,u,u,u>
; CHECK-NEXT: vpermd %ymm0, %ymm2, %ymm0
@@ -1571,7 +1571,7 @@ define <4 x i32> @test_masked_z_16xi32_to_4xi32_perm_mask1(<16 x i32> %vec, <4 x
}
define <4 x i32> @test_masked_16xi32_to_4xi32_perm_mask2(<16 x i32> %vec, <4 x i32> %vec2, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_16xi32_to_4xi32_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vmovdqa {{.*#+}} ymm4 = <1,1,13,0,u,u,u,u>
; CHECK-NEXT: vpermi2d %ymm3, %ymm0, %ymm4
@@ -1588,7 +1588,7 @@ define <4 x i32> @test_masked_16xi32_to_4xi32_perm_mask2(<16 x i32> %vec, <4 x i
define <4 x i32> @test_masked_z_16xi32_to_4xi32_perm_mask2(<16 x i32> %vec, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_16xi32_to_4xi32_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = <1,1,13,0,u,u,u,u>
; CHECK-NEXT: vpermi2d %ymm2, %ymm0, %ymm3
@@ -1604,7 +1604,7 @@ define <4 x i32> @test_masked_z_16xi32_to_4xi32_perm_mask2(<16 x i32> %vec, <4 x
}
define <4 x i32> @test_16xi32_to_4xi32_perm_mask3(<16 x i32> %vec) {
; CHECK-LABEL: test_16xi32_to_4xi32_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = <3,0,0,13,u,u,u,u>
; CHECK-NEXT: vpermi2d %ymm2, %ymm0, %ymm1
@@ -1616,7 +1616,7 @@ define <4 x i32> @test_16xi32_to_4xi32_perm_mask3(<16 x i32> %vec) {
}
define <4 x i32> @test_masked_16xi32_to_4xi32_perm_mask3(<16 x i32> %vec, <4 x i32> %vec2, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_16xi32_to_4xi32_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vmovdqa {{.*#+}} ymm4 = <3,0,0,13,u,u,u,u>
; CHECK-NEXT: vpermi2d %ymm3, %ymm0, %ymm4
@@ -1633,7 +1633,7 @@ define <4 x i32> @test_masked_16xi32_to_4xi32_perm_mask3(<16 x i32> %vec, <4 x i
define <4 x i32> @test_masked_z_16xi32_to_4xi32_perm_mask3(<16 x i32> %vec, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_16xi32_to_4xi32_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = <3,0,0,13,u,u,u,u>
; CHECK-NEXT: vpermi2d %ymm2, %ymm0, %ymm3
@@ -1649,7 +1649,7 @@ define <4 x i32> @test_masked_z_16xi32_to_4xi32_perm_mask3(<16 x i32> %vec, <4 x
}
define <8 x i32> @test_16xi32_to_8xi32_perm_mem_mask0(<16 x i32>* %vp) {
; CHECK-LABEL: test_16xi32_to_8xi32_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} ymm0 = [7,0,6,0,1,2,4,4]
; CHECK-NEXT: vpermps 32(%rdi), %ymm0, %ymm0
; CHECK-NEXT: retq
@@ -1659,7 +1659,7 @@ define <8 x i32> @test_16xi32_to_8xi32_perm_mem_mask0(<16 x i32>* %vp) {
}
define <8 x i32> @test_masked_16xi32_to_8xi32_perm_mem_mask0(<16 x i32>* %vp, <8 x i32> %vec2, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_16xi32_to_8xi32_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = [7,0,6,0,1,2,4,4]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %ymm3, %ymm1, %k1
@@ -1674,7 +1674,7 @@ define <8 x i32> @test_masked_16xi32_to_8xi32_perm_mem_mask0(<16 x i32>* %vp, <8
define <8 x i32> @test_masked_z_16xi32_to_8xi32_perm_mem_mask0(<16 x i32>* %vp, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_16xi32_to_8xi32_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [7,0,6,0,1,2,4,4]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %ymm2, %ymm0, %k1
@@ -1689,7 +1689,7 @@ define <8 x i32> @test_masked_z_16xi32_to_8xi32_perm_mem_mask0(<16 x i32>* %vp,
define <8 x i32> @test_masked_16xi32_to_8xi32_perm_mem_mask1(<16 x i32>* %vp, <8 x i32> %vec2, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_16xi32_to_8xi32_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
; CHECK-NEXT: vextracti64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vmovdqa {{.*#+}} ymm4 = [7,3,6,11,0,1,5,15]
@@ -1707,7 +1707,7 @@ define <8 x i32> @test_masked_16xi32_to_8xi32_perm_mem_mask1(<16 x i32>* %vp, <8
define <8 x i32> @test_masked_z_16xi32_to_8xi32_perm_mem_mask1(<16 x i32>* %vp, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_16xi32_to_8xi32_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
; CHECK-NEXT: vextracti64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [7,3,6,11,0,1,5,15]
@@ -1725,7 +1725,7 @@ define <8 x i32> @test_masked_z_16xi32_to_8xi32_perm_mem_mask1(<16 x i32>* %vp,
define <8 x i32> @test_masked_16xi32_to_8xi32_perm_mem_mask2(<16 x i32>* %vp, <8 x i32> %vec2, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_16xi32_to_8xi32_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
; CHECK-NEXT: vextracti64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vmovdqa {{.*#+}} ymm4 = [4,14,1,5,4,2,8,10]
@@ -1743,7 +1743,7 @@ define <8 x i32> @test_masked_16xi32_to_8xi32_perm_mem_mask2(<16 x i32>* %vp, <8
define <8 x i32> @test_masked_z_16xi32_to_8xi32_perm_mem_mask2(<16 x i32>* %vp, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_16xi32_to_8xi32_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
; CHECK-NEXT: vextracti64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [4,14,1,5,4,2,8,10]
@@ -1761,7 +1761,7 @@ define <8 x i32> @test_masked_z_16xi32_to_8xi32_perm_mem_mask2(<16 x i32>* %vp,
define <8 x i32> @test_16xi32_to_8xi32_perm_mem_mask3(<16 x i32>* %vp) {
; CHECK-LABEL: test_16xi32_to_8xi32_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa32 (%rdi), %zmm1
; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = [8,4,1,13,15,4,6,12]
@@ -1773,7 +1773,7 @@ define <8 x i32> @test_16xi32_to_8xi32_perm_mem_mask3(<16 x i32>* %vp) {
}
define <8 x i32> @test_masked_16xi32_to_8xi32_perm_mem_mask3(<16 x i32>* %vp, <8 x i32> %vec2, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_16xi32_to_8xi32_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
; CHECK-NEXT: vextracti64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vmovdqa {{.*#+}} ymm4 = [8,4,1,13,15,4,6,12]
@@ -1791,7 +1791,7 @@ define <8 x i32> @test_masked_16xi32_to_8xi32_perm_mem_mask3(<16 x i32>* %vp, <8
define <8 x i32> @test_masked_z_16xi32_to_8xi32_perm_mem_mask3(<16 x i32>* %vp, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_16xi32_to_8xi32_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
; CHECK-NEXT: vextracti64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [8,4,1,13,15,4,6,12]
@@ -1809,7 +1809,7 @@ define <8 x i32> @test_masked_z_16xi32_to_8xi32_perm_mem_mask3(<16 x i32>* %vp,
define <4 x i32> @test_16xi32_to_4xi32_perm_mem_mask0(<16 x i32>* %vp) {
; CHECK-LABEL: test_16xi32_to_4xi32_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa32 (%rdi), %zmm1
; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = <13,0,0,6,u,u,u,u>
@@ -1823,7 +1823,7 @@ define <4 x i32> @test_16xi32_to_4xi32_perm_mem_mask0(<16 x i32>* %vp) {
}
define <4 x i32> @test_masked_16xi32_to_4xi32_perm_mem_mask0(<16 x i32>* %vp, <4 x i32> %vec2, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_16xi32_to_4xi32_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
; CHECK-NEXT: vextracti64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vmovdqa {{.*#+}} ymm4 = <13,0,0,6,u,u,u,u>
@@ -1842,7 +1842,7 @@ define <4 x i32> @test_masked_16xi32_to_4xi32_perm_mem_mask0(<16 x i32>* %vp, <4
define <4 x i32> @test_masked_z_16xi32_to_4xi32_perm_mem_mask0(<16 x i32>* %vp, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_16xi32_to_4xi32_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa32 (%rdi), %zmm1
; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = <13,0,0,6,u,u,u,u>
@@ -1861,7 +1861,7 @@ define <4 x i32> @test_masked_z_16xi32_to_4xi32_perm_mem_mask0(<16 x i32>* %vp,
define <4 x i32> @test_masked_16xi32_to_4xi32_perm_mem_mask1(<16 x i32>* %vp, <4 x i32> %vec2, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_16xi32_to_4xi32_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
; CHECK-NEXT: vpshufd {{.*#+}} ymm3 = ymm2[3,1,2,3,7,5,6,7]
; CHECK-NEXT: vextracti64x4 $1, %zmm2, %ymm2
@@ -1882,7 +1882,7 @@ define <4 x i32> @test_masked_16xi32_to_4xi32_perm_mem_mask1(<16 x i32>* %vp, <4
define <4 x i32> @test_masked_z_16xi32_to_4xi32_perm_mem_mask1(<16 x i32>* %vp, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_16xi32_to_4xi32_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa32 (%rdi), %zmm1
; CHECK-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[3,1,2,3,7,5,6,7]
; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm1
@@ -1903,7 +1903,7 @@ define <4 x i32> @test_masked_z_16xi32_to_4xi32_perm_mem_mask1(<16 x i32>* %vp,
define <4 x i32> @test_masked_16xi32_to_4xi32_perm_mem_mask2(<16 x i32>* %vp, <4 x i32> %vec2, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_16xi32_to_4xi32_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
; CHECK-NEXT: vextracti64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vmovdqa {{.*#+}} ymm4 = <2,15,6,9,u,u,u,u>
@@ -1922,7 +1922,7 @@ define <4 x i32> @test_masked_16xi32_to_4xi32_perm_mem_mask2(<16 x i32>* %vp, <4
define <4 x i32> @test_masked_z_16xi32_to_4xi32_perm_mem_mask2(<16 x i32>* %vp, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_16xi32_to_4xi32_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa32 (%rdi), %zmm1
; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = <2,15,6,9,u,u,u,u>
@@ -1941,7 +1941,7 @@ define <4 x i32> @test_masked_z_16xi32_to_4xi32_perm_mem_mask2(<16 x i32>* %vp,
define <4 x i32> @test_16xi32_to_4xi32_perm_mem_mask3(<16 x i32>* %vp) {
; CHECK-LABEL: test_16xi32_to_4xi32_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa32 (%rdi), %zmm0
; CHECK-NEXT: vmovd %xmm0, %eax
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm1
@@ -1959,7 +1959,7 @@ define <4 x i32> @test_16xi32_to_4xi32_perm_mem_mask3(<16 x i32>* %vp) {
}
define <4 x i32> @test_masked_16xi32_to_4xi32_perm_mem_mask3(<16 x i32>* %vp, <4 x i32> %vec2, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_16xi32_to_4xi32_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
; CHECK-NEXT: vmovd %xmm2, %eax
; CHECK-NEXT: vextracti128 $1, %ymm2, %xmm3
@@ -1983,7 +1983,7 @@ define <4 x i32> @test_masked_16xi32_to_4xi32_perm_mem_mask3(<16 x i32>* %vp, <4
define <4 x i32> @test_masked_z_16xi32_to_4xi32_perm_mem_mask3(<16 x i32>* %vp, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_16xi32_to_4xi32_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa32 (%rdi), %zmm1
; CHECK-NEXT: vmovd %xmm1, %eax
; CHECK-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -2007,7 +2007,7 @@ define <4 x i32> @test_masked_z_16xi32_to_4xi32_perm_mem_mask3(<16 x i32>* %vp,
define <2 x i64> @test_4xi64_to_2xi64_perm_mask0(<4 x i64> %vec) {
; CHECK-LABEL: test_4xi64_to_2xi64_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1
; CHECK-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; CHECK-NEXT: vzeroupper
@@ -2017,7 +2017,7 @@ define <2 x i64> @test_4xi64_to_2xi64_perm_mask0(<4 x i64> %vec) {
}
define <2 x i64> @test_masked_4xi64_to_2xi64_perm_mask0(<4 x i64> %vec, <2 x i64> %vec2, <2 x i64> %mask) {
; CHECK-LABEL: test_masked_4xi64_to_2xi64_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm3
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpeqq %xmm4, %xmm2, %k1
@@ -2033,7 +2033,7 @@ define <2 x i64> @test_masked_4xi64_to_2xi64_perm_mask0(<4 x i64> %vec, <2 x i64
define <2 x i64> @test_masked_z_4xi64_to_2xi64_perm_mask0(<4 x i64> %vec, <2 x i64> %mask) {
; CHECK-LABEL: test_masked_z_4xi64_to_2xi64_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm2
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqq %xmm3, %xmm1, %k1
@@ -2047,7 +2047,7 @@ define <2 x i64> @test_masked_z_4xi64_to_2xi64_perm_mask0(<4 x i64> %vec, <2 x i
}
define <2 x i64> @test_masked_4xi64_to_2xi64_perm_mask1(<4 x i64> %vec, <2 x i64> %vec2, <2 x i64> %mask) {
; CHECK-LABEL: test_masked_4xi64_to_2xi64_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm3
; CHECK-NEXT: vpblendd {{.*#+}} xmm0 = xmm3[0,1],xmm0[2,3]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
@@ -2063,7 +2063,7 @@ define <2 x i64> @test_masked_4xi64_to_2xi64_perm_mask1(<4 x i64> %vec, <2 x i64
define <2 x i64> @test_masked_z_4xi64_to_2xi64_perm_mask1(<4 x i64> %vec, <2 x i64> %mask) {
; CHECK-LABEL: test_masked_z_4xi64_to_2xi64_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm2
; CHECK-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
@@ -2078,7 +2078,7 @@ define <2 x i64> @test_masked_z_4xi64_to_2xi64_perm_mask1(<4 x i64> %vec, <2 x i
}
define <2 x i64> @test_4xi64_to_2xi64_perm_mem_mask0(<4 x i64>* %vp) {
; CHECK-LABEL: test_4xi64_to_2xi64_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %ymm0
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1
; CHECK-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
@@ -2090,7 +2090,7 @@ define <2 x i64> @test_4xi64_to_2xi64_perm_mem_mask0(<4 x i64>* %vp) {
}
define <2 x i64> @test_masked_4xi64_to_2xi64_perm_mem_mask0(<4 x i64>* %vp, <2 x i64> %vec2, <2 x i64> %mask) {
; CHECK-LABEL: test_masked_4xi64_to_2xi64_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %ymm2
; CHECK-NEXT: vextracti128 $1, %ymm2, %xmm3
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
@@ -2107,7 +2107,7 @@ define <2 x i64> @test_masked_4xi64_to_2xi64_perm_mem_mask0(<4 x i64>* %vp, <2 x
define <2 x i64> @test_masked_z_4xi64_to_2xi64_perm_mem_mask0(<4 x i64>* %vp, <2 x i64> %mask) {
; CHECK-LABEL: test_masked_z_4xi64_to_2xi64_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %ymm1
; CHECK-NEXT: vextracti128 $1, %ymm1, %xmm2
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
@@ -2124,7 +2124,7 @@ define <2 x i64> @test_masked_z_4xi64_to_2xi64_perm_mem_mask0(<4 x i64>* %vp, <2
define <2 x i64> @test_masked_4xi64_to_2xi64_perm_mem_mask1(<4 x i64>* %vp, <2 x i64> %vec2, <2 x i64> %mask) {
; CHECK-LABEL: test_masked_4xi64_to_2xi64_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %ymm2
; CHECK-NEXT: vextracti128 $1, %ymm2, %xmm3
; CHECK-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3]
@@ -2142,7 +2142,7 @@ define <2 x i64> @test_masked_4xi64_to_2xi64_perm_mem_mask1(<4 x i64>* %vp, <2 x
define <2 x i64> @test_masked_z_4xi64_to_2xi64_perm_mem_mask1(<4 x i64>* %vp, <2 x i64> %mask) {
; CHECK-LABEL: test_masked_z_4xi64_to_2xi64_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %ymm1
; CHECK-NEXT: vextracti128 $1, %ymm1, %xmm2
; CHECK-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
@@ -2160,7 +2160,7 @@ define <2 x i64> @test_masked_z_4xi64_to_2xi64_perm_mem_mask1(<4 x i64>* %vp, <2
define <4 x i64> @test_8xi64_to_4xi64_perm_mask0(<8 x i64> %vec) {
; CHECK-LABEL: test_8xi64_to_4xi64_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm0
; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,3,2,1]
; CHECK-NEXT: retq
@@ -2169,7 +2169,7 @@ define <4 x i64> @test_8xi64_to_4xi64_perm_mask0(<8 x i64> %vec) {
}
define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mask0(<8 x i64> %vec, <4 x i64> %vec2, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_8xi64_to_4xi64_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm0
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
@@ -2184,7 +2184,7 @@ define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mask0(<8 x i64> %vec, <4 x i64
define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mask0(<8 x i64> %vec, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_z_8xi64_to_4xi64_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm0
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
@@ -2197,7 +2197,7 @@ define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mask0(<8 x i64> %vec, <4 x i
}
define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mask1(<8 x i64> %vec, <4 x i64> %vec2, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_8xi64_to_4xi64_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1],ymm0[2,3],ymm3[4,5,6,7]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
@@ -2213,7 +2213,7 @@ define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mask1(<8 x i64> %vec, <4 x i64
define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mask1(<8 x i64> %vec, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_z_8xi64_to_4xi64_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3],ymm2[4,5,6,7]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
@@ -2227,7 +2227,7 @@ define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mask1(<8 x i64> %vec, <4 x i
}
define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mask2(<8 x i64> %vec, <4 x i64> %vec2, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_8xi64_to_4xi64_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
@@ -2243,7 +2243,7 @@ define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mask2(<8 x i64> %vec, <4 x i64
define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mask2(<8 x i64> %vec, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_z_8xi64_to_4xi64_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3,4,5],ymm0[6,7]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
@@ -2257,7 +2257,7 @@ define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mask2(<8 x i64> %vec, <4 x i
}
define <4 x i64> @test_8xi64_to_4xi64_perm_mask3(<8 x i64> %vec) {
; CHECK-LABEL: test_8xi64_to_4xi64_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1
; CHECK-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,0,0,3]
@@ -2267,7 +2267,7 @@ define <4 x i64> @test_8xi64_to_4xi64_perm_mask3(<8 x i64> %vec) {
}
define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mask3(<8 x i64> %vec, <4 x i64> %vec2, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_8xi64_to_4xi64_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3,4,5,6,7]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
@@ -2283,7 +2283,7 @@ define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mask3(<8 x i64> %vec, <4 x i64
define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mask3(<8 x i64> %vec, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_z_8xi64_to_4xi64_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3,4,5,6,7]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
@@ -2297,7 +2297,7 @@ define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mask3(<8 x i64> %vec, <4 x i
}
define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mask4(<8 x i64> %vec, <4 x i64> %vec2, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_8xi64_to_4xi64_perm_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermq {{.*#+}} ymm3 = ymm0[3,1,2,3]
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm0
; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,3,1]
@@ -2314,7 +2314,7 @@ define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mask4(<8 x i64> %vec, <4 x i64
define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mask4(<8 x i64> %vec, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_z_8xi64_to_4xi64_perm_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermq {{.*#+}} ymm2 = ymm0[3,1,2,3]
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm0
; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,3,1]
@@ -2330,7 +2330,7 @@ define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mask4(<8 x i64> %vec, <4 x i
}
define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mask5(<8 x i64> %vec, <4 x i64> %vec2, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_8xi64_to_4xi64_perm_mask5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,1,0,1,4,5,4,5]
; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,3]
@@ -2347,7 +2347,7 @@ define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mask5(<8 x i64> %vec, <4 x i64
define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mask5(<8 x i64> %vec, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_z_8xi64_to_4xi64_perm_mask5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; CHECK-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,1,0,1,4,5,4,5]
; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,3]
@@ -2363,7 +2363,7 @@ define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mask5(<8 x i64> %vec, <4 x i
}
define <4 x i64> @test_8xi64_to_4xi64_perm_mask6(<8 x i64> %vec) {
; CHECK-LABEL: test_8xi64_to_4xi64_perm_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1
; CHECK-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,2,1,3]
; CHECK-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
@@ -2373,7 +2373,7 @@ define <4 x i64> @test_8xi64_to_4xi64_perm_mask6(<8 x i64> %vec) {
}
define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mask6(<8 x i64> %vec, <4 x i64> %vec2, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_8xi64_to_4xi64_perm_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vpermq {{.*#+}} ymm3 = ymm3[3,2,1,3]
; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
@@ -2389,7 +2389,7 @@ define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mask6(<8 x i64> %vec, <4 x i64
define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mask6(<8 x i64> %vec, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_z_8xi64_to_4xi64_perm_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; CHECK-NEXT: vpermq {{.*#+}} ymm2 = ymm2[3,2,1,3]
; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3,4,5],ymm0[6,7]
@@ -2404,7 +2404,7 @@ define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mask6(<8 x i64> %vec, <4 x i
}
define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mask7(<8 x i64> %vec, <4 x i64> %vec2, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_8xi64_to_4xi64_perm_mask7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermq {{.*#+}} ymm3 = ymm0[2,0,3,3]
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm0
; CHECK-NEXT: vpbroadcastq %xmm0, %ymm0
@@ -2421,7 +2421,7 @@ define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mask7(<8 x i64> %vec, <4 x i64
define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mask7(<8 x i64> %vec, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_z_8xi64_to_4xi64_perm_mask7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,0,3,3]
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm0
; CHECK-NEXT: vpbroadcastq %xmm0, %ymm0
@@ -2437,7 +2437,7 @@ define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mask7(<8 x i64> %vec, <4 x i
}
define <2 x i64> @test_8xi64_to_2xi64_perm_mask0(<8 x i64> %vec) {
; CHECK-LABEL: test_8xi64_to_2xi64_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1
; CHECK-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[2,3,0,1]
; CHECK-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
@@ -2448,7 +2448,7 @@ define <2 x i64> @test_8xi64_to_2xi64_perm_mask0(<8 x i64> %vec) {
}
define <2 x i64> @test_masked_8xi64_to_2xi64_perm_mask0(<8 x i64> %vec, <2 x i64> %vec2, <2 x i64> %mask) {
; CHECK-LABEL: test_masked_8xi64_to_2xi64_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm3
; CHECK-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
@@ -2465,7 +2465,7 @@ define <2 x i64> @test_masked_8xi64_to_2xi64_perm_mask0(<8 x i64> %vec, <2 x i64
define <2 x i64> @test_masked_z_8xi64_to_2xi64_perm_mask0(<8 x i64> %vec, <2 x i64> %mask) {
; CHECK-LABEL: test_masked_z_8xi64_to_2xi64_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm2
; CHECK-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
@@ -2480,7 +2480,7 @@ define <2 x i64> @test_masked_z_8xi64_to_2xi64_perm_mask0(<8 x i64> %vec, <2 x i
}
define <2 x i64> @test_masked_8xi64_to_2xi64_perm_mask1(<8 x i64> %vec, <2 x i64> %vec2, <2 x i64> %mask) {
; CHECK-LABEL: test_masked_8xi64_to_2xi64_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm0
; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
@@ -2496,7 +2496,7 @@ define <2 x i64> @test_masked_8xi64_to_2xi64_perm_mask1(<8 x i64> %vec, <2 x i64
define <2 x i64> @test_masked_z_8xi64_to_2xi64_perm_mask1(<8 x i64> %vec, <2 x i64> %mask) {
; CHECK-LABEL: test_masked_z_8xi64_to_2xi64_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm0
; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
@@ -2511,7 +2511,7 @@ define <2 x i64> @test_masked_z_8xi64_to_2xi64_perm_mask1(<8 x i64> %vec, <2 x i
}
define <4 x i64> @test_8xi64_to_4xi64_perm_mem_mask0(<8 x i64>* %vp) {
; CHECK-LABEL: test_8xi64_to_4xi64_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = mem[0,2,0,2]
; CHECK-NEXT: retq
%vec = load <8 x i64>, <8 x i64>* %vp
@@ -2520,7 +2520,7 @@ define <4 x i64> @test_8xi64_to_4xi64_perm_mem_mask0(<8 x i64>* %vp) {
}
define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mem_mask0(<8 x i64>* %vp, <4 x i64> %vec2, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_8xi64_to_4xi64_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; CHECK-NEXT: vpermq {{.*#+}} ymm0 {%k1} = mem[0,2,0,2]
@@ -2534,7 +2534,7 @@ define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mem_mask0(<8 x i64>* %vp, <4 x
define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mem_mask0(<8 x i64>* %vp, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_z_8xi64_to_4xi64_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k1
; CHECK-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = mem[0,2,0,2]
@@ -2548,7 +2548,7 @@ define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mem_mask0(<8 x i64>* %vp, <4
define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mem_mask1(<8 x i64>* %vp, <4 x i64> %vec2, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_8xi64_to_4xi64_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm2
; CHECK-NEXT: vextracti64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3,4,5,6,7]
@@ -2565,7 +2565,7 @@ define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mem_mask1(<8 x i64>* %vp, <4 x
define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mem_mask1(<8 x i64>* %vp, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_z_8xi64_to_4xi64_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1
; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3,4,5,6,7]
@@ -2582,7 +2582,7 @@ define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mem_mask1(<8 x i64>* %vp, <4
define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mem_mask2(<8 x i64>* %vp, <4 x i64> %vec2, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_8xi64_to_4xi64_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm2
; CHECK-NEXT: vextracti64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vpermq {{.*#+}} ymm3 = ymm3[3,1,2,1]
@@ -2601,7 +2601,7 @@ define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mem_mask2(<8 x i64>* %vp, <4 x
define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mem_mask2(<8 x i64>* %vp, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_z_8xi64_to_4xi64_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1
; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vpermq {{.*#+}} ymm2 = ymm2[3,1,2,1]
@@ -2620,7 +2620,7 @@ define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mem_mask2(<8 x i64>* %vp, <4
define <4 x i64> @test_8xi64_to_4xi64_perm_mem_mask3(<8 x i64>* %vp) {
; CHECK-LABEL: test_8xi64_to_4xi64_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %zmm0
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1
; CHECK-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
@@ -2632,7 +2632,7 @@ define <4 x i64> @test_8xi64_to_4xi64_perm_mem_mask3(<8 x i64>* %vp) {
}
define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mem_mask3(<8 x i64>* %vp, <4 x i64> %vec2, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_8xi64_to_4xi64_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm2
; CHECK-NEXT: vextracti64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm3[6,7]
@@ -2649,7 +2649,7 @@ define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mem_mask3(<8 x i64>* %vp, <4 x
define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mem_mask3(<8 x i64>* %vp, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_z_8xi64_to_4xi64_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1
; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
@@ -2666,7 +2666,7 @@ define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mem_mask3(<8 x i64>* %vp, <4
define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mem_mask4(<8 x i64>* %vp, <4 x i64> %vec2, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_8xi64_to_4xi64_perm_mem_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm2
; CHECK-NEXT: vextracti64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,1,0,1,4,5,4,5]
@@ -2685,7 +2685,7 @@ define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mem_mask4(<8 x i64>* %vp, <4 x
define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mem_mask4(<8 x i64>* %vp, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_z_8xi64_to_4xi64_perm_mem_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1
; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,1,0,1,4,5,4,5]
@@ -2704,7 +2704,7 @@ define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mem_mask4(<8 x i64>* %vp, <4
define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mem_mask5(<8 x i64>* %vp, <4 x i64> %vec2, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_8xi64_to_4xi64_perm_mem_mask5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm2
; CHECK-NEXT: vextracti64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm3[6,7]
@@ -2721,7 +2721,7 @@ define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mem_mask5(<8 x i64>* %vp, <4 x
define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mem_mask5(<8 x i64>* %vp, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_z_8xi64_to_4xi64_perm_mem_mask5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1
; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
@@ -2738,7 +2738,7 @@ define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mem_mask5(<8 x i64>* %vp, <4
define <4 x i64> @test_8xi64_to_4xi64_perm_mem_mask6(<8 x i64>* %vp) {
; CHECK-LABEL: test_8xi64_to_4xi64_perm_mem_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %zmm0
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1
; CHECK-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,1,2,3]
@@ -2751,7 +2751,7 @@ define <4 x i64> @test_8xi64_to_4xi64_perm_mem_mask6(<8 x i64>* %vp) {
}
define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mem_mask6(<8 x i64>* %vp, <4 x i64> %vec2, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_8xi64_to_4xi64_perm_mem_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm2
; CHECK-NEXT: vextracti64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vpermq {{.*#+}} ymm3 = ymm3[3,1,2,3]
@@ -2770,7 +2770,7 @@ define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mem_mask6(<8 x i64>* %vp, <4 x
define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mem_mask6(<8 x i64>* %vp, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_z_8xi64_to_4xi64_perm_mem_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1
; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vpermq {{.*#+}} ymm2 = ymm2[3,1,2,3]
@@ -2789,7 +2789,7 @@ define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mem_mask6(<8 x i64>* %vp, <4
define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mem_mask7(<8 x i64>* %vp, <4 x i64> %vec2, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_8xi64_to_4xi64_perm_mem_mask7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm2
; CHECK-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm3
; CHECK-NEXT: vextracti64x4 $1, %zmm2, %ymm2
@@ -2808,7 +2808,7 @@ define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mem_mask7(<8 x i64>* %vp, <4 x
define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mem_mask7(<8 x i64>* %vp, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_z_8xi64_to_4xi64_perm_mem_mask7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1
; CHECK-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm2
; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm1
@@ -2827,7 +2827,7 @@ define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mem_mask7(<8 x i64>* %vp, <4
define <2 x i64> @test_8xi64_to_2xi64_perm_mem_mask0(<8 x i64>* %vp) {
; CHECK-LABEL: test_8xi64_to_2xi64_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %zmm0
; CHECK-NEXT: vextractf32x4 $2, %zmm0, %xmm1
; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -2840,7 +2840,7 @@ define <2 x i64> @test_8xi64_to_2xi64_perm_mem_mask0(<8 x i64>* %vp) {
}
define <2 x i64> @test_masked_8xi64_to_2xi64_perm_mem_mask0(<8 x i64>* %vp, <2 x i64> %vec2, <2 x i64> %mask) {
; CHECK-LABEL: test_masked_8xi64_to_2xi64_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm2
; CHECK-NEXT: vextracti32x4 $2, %zmm2, %xmm3
; CHECK-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
@@ -2858,7 +2858,7 @@ define <2 x i64> @test_masked_8xi64_to_2xi64_perm_mem_mask0(<8 x i64>* %vp, <2 x
define <2 x i64> @test_masked_z_8xi64_to_2xi64_perm_mem_mask0(<8 x i64>* %vp, <2 x i64> %mask) {
; CHECK-LABEL: test_masked_z_8xi64_to_2xi64_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1
; CHECK-NEXT: vextracti32x4 $2, %zmm1, %xmm2
; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
@@ -2876,7 +2876,7 @@ define <2 x i64> @test_masked_z_8xi64_to_2xi64_perm_mem_mask0(<8 x i64>* %vp, <2
define <2 x i64> @test_masked_8xi64_to_2xi64_perm_mem_mask1(<8 x i64>* %vp, <2 x i64> %vec2, <2 x i64> %mask) {
; CHECK-LABEL: test_masked_8xi64_to_2xi64_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm2
; CHECK-NEXT: vextracti64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vextracti128 $1, %ymm3, %xmm3
@@ -2896,7 +2896,7 @@ define <2 x i64> @test_masked_8xi64_to_2xi64_perm_mem_mask1(<8 x i64>* %vp, <2 x
define <2 x i64> @test_masked_z_8xi64_to_2xi64_perm_mem_mask1(<8 x i64>* %vp, <2 x i64> %mask) {
; CHECK-LABEL: test_masked_z_8xi64_to_2xi64_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1
; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vextracti128 $1, %ymm2, %xmm2
@@ -2916,7 +2916,7 @@ define <2 x i64> @test_masked_z_8xi64_to_2xi64_perm_mem_mask1(<8 x i64>* %vp, <2
define <4 x float> @test_8xfloat_to_4xfloat_perm_mask0(<8 x float> %vec) {
; CHECK-LABEL: test_8xfloat_to_4xfloat_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1
; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3],xmm1[0,1]
; CHECK-NEXT: vzeroupper
@@ -2926,7 +2926,7 @@ define <4 x float> @test_8xfloat_to_4xfloat_perm_mask0(<8 x float> %vec) {
}
define <4 x float> @test_masked_8xfloat_to_4xfloat_perm_mask0(<8 x float> %vec, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_to_4xfloat_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm3
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %xmm4, %xmm2, %k1
@@ -2942,7 +2942,7 @@ define <4 x float> @test_masked_8xfloat_to_4xfloat_perm_mask0(<8 x float> %vec,
define <4 x float> @test_masked_z_8xfloat_to_4xfloat_perm_mask0(<8 x float> %vec, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_to_4xfloat_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm2
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %xmm3, %xmm1, %k1
@@ -2956,7 +2956,7 @@ define <4 x float> @test_masked_z_8xfloat_to_4xfloat_perm_mask0(<8 x float> %vec
}
define <4 x float> @test_masked_8xfloat_to_4xfloat_perm_mask1(<8 x float> %vec, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_to_4xfloat_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm3
; CHECK-NEXT: vshufps {{.*#+}} xmm3 = xmm3[1,0],xmm0[0,0]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
@@ -2973,7 +2973,7 @@ define <4 x float> @test_masked_8xfloat_to_4xfloat_perm_mask1(<8 x float> %vec,
define <4 x float> @test_masked_z_8xfloat_to_4xfloat_perm_mask1(<8 x float> %vec, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_to_4xfloat_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm2
; CHECK-NEXT: vshufps {{.*#+}} xmm2 = xmm2[1,0],xmm0[0,0]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
@@ -2988,7 +2988,7 @@ define <4 x float> @test_masked_z_8xfloat_to_4xfloat_perm_mask1(<8 x float> %vec
}
define <4 x float> @test_masked_8xfloat_to_4xfloat_perm_mask2(<8 x float> %vec, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_to_4xfloat_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm3
; CHECK-NEXT: vshufps {{.*#+}} xmm3 = xmm3[3,0],xmm0[0,0]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
@@ -3005,7 +3005,7 @@ define <4 x float> @test_masked_8xfloat_to_4xfloat_perm_mask2(<8 x float> %vec,
define <4 x float> @test_masked_z_8xfloat_to_4xfloat_perm_mask2(<8 x float> %vec, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_to_4xfloat_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm2
; CHECK-NEXT: vshufps {{.*#+}} xmm2 = xmm2[3,0],xmm0[0,0]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
@@ -3020,7 +3020,7 @@ define <4 x float> @test_masked_z_8xfloat_to_4xfloat_perm_mask2(<8 x float> %vec
}
define <4 x float> @test_8xfloat_to_4xfloat_perm_mask3(<8 x float> %vec) {
; CHECK-LABEL: test_8xfloat_to_4xfloat_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,3,1,2]
@@ -3031,7 +3031,7 @@ define <4 x float> @test_8xfloat_to_4xfloat_perm_mask3(<8 x float> %vec) {
}
define <4 x float> @test_masked_8xfloat_to_4xfloat_perm_mask3(<8 x float> %vec, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_to_4xfloat_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm3
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
@@ -3048,7 +3048,7 @@ define <4 x float> @test_masked_8xfloat_to_4xfloat_perm_mask3(<8 x float> %vec,
define <4 x float> @test_masked_z_8xfloat_to_4xfloat_perm_mask3(<8 x float> %vec, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_to_4xfloat_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm2
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
@@ -3063,7 +3063,7 @@ define <4 x float> @test_masked_z_8xfloat_to_4xfloat_perm_mask3(<8 x float> %vec
}
define <4 x float> @test_8xfloat_to_4xfloat_perm_mem_mask0(<8 x float>* %vp) {
; CHECK-LABEL: test_8xfloat_to_4xfloat_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %ymm0
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1
; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,0]
@@ -3076,7 +3076,7 @@ define <4 x float> @test_8xfloat_to_4xfloat_perm_mem_mask0(<8 x float>* %vp) {
}
define <4 x float> @test_masked_8xfloat_to_4xfloat_perm_mem_mask0(<8 x float>* %vp, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_to_4xfloat_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %ymm2
; CHECK-NEXT: vextractf128 $1, %ymm2, %xmm3
; CHECK-NEXT: vshufps {{.*#+}} xmm2 = xmm2[2,0],xmm3[2,0]
@@ -3094,7 +3094,7 @@ define <4 x float> @test_masked_8xfloat_to_4xfloat_perm_mem_mask0(<8 x float>* %
define <4 x float> @test_masked_z_8xfloat_to_4xfloat_perm_mem_mask0(<8 x float>* %vp, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_to_4xfloat_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %ymm1
; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm2
; CHECK-NEXT: vshufps {{.*#+}} xmm1 = xmm1[2,0],xmm2[2,0]
@@ -3112,7 +3112,7 @@ define <4 x float> @test_masked_z_8xfloat_to_4xfloat_perm_mem_mask0(<8 x float>*
define <4 x float> @test_masked_8xfloat_to_4xfloat_perm_mem_mask1(<8 x float>* %vp, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_to_4xfloat_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %ymm2
; CHECK-NEXT: vextracti128 $1, %ymm2, %xmm3
; CHECK-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0,1,2],xmm2[3]
@@ -3130,7 +3130,7 @@ define <4 x float> @test_masked_8xfloat_to_4xfloat_perm_mem_mask1(<8 x float>* %
define <4 x float> @test_masked_z_8xfloat_to_4xfloat_perm_mem_mask1(<8 x float>* %vp, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_to_4xfloat_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %ymm1
; CHECK-NEXT: vextracti128 $1, %ymm1, %xmm2
; CHECK-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3]
@@ -3148,7 +3148,7 @@ define <4 x float> @test_masked_z_8xfloat_to_4xfloat_perm_mem_mask1(<8 x float>*
define <4 x float> @test_masked_8xfloat_to_4xfloat_perm_mem_mask2(<8 x float>* %vp, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_to_4xfloat_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %ymm2
; CHECK-NEXT: vextractf128 $1, %ymm2, %xmm3
; CHECK-NEXT: vshufps {{.*#+}} xmm3 = xmm3[3,0],xmm2[3,0]
@@ -3166,7 +3166,7 @@ define <4 x float> @test_masked_8xfloat_to_4xfloat_perm_mem_mask2(<8 x float>* %
define <4 x float> @test_masked_z_8xfloat_to_4xfloat_perm_mem_mask2(<8 x float>* %vp, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_to_4xfloat_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %ymm1
; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm2
; CHECK-NEXT: vshufps {{.*#+}} xmm2 = xmm2[3,0],xmm1[3,0]
@@ -3184,7 +3184,7 @@ define <4 x float> @test_masked_z_8xfloat_to_4xfloat_perm_mem_mask2(<8 x float>*
define <4 x float> @test_8xfloat_to_4xfloat_perm_mem_mask3(<8 x float>* %vp) {
; CHECK-LABEL: test_8xfloat_to_4xfloat_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %ymm0
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1
; CHECK-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,0],xmm0[3,0]
@@ -3197,7 +3197,7 @@ define <4 x float> @test_8xfloat_to_4xfloat_perm_mem_mask3(<8 x float>* %vp) {
}
define <4 x float> @test_masked_8xfloat_to_4xfloat_perm_mem_mask3(<8 x float>* %vp, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_to_4xfloat_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %ymm2
; CHECK-NEXT: vextractf128 $1, %ymm2, %xmm3
; CHECK-NEXT: vshufps {{.*#+}} xmm3 = xmm3[1,0],xmm2[3,0]
@@ -3215,7 +3215,7 @@ define <4 x float> @test_masked_8xfloat_to_4xfloat_perm_mem_mask3(<8 x float>* %
define <4 x float> @test_masked_z_8xfloat_to_4xfloat_perm_mem_mask3(<8 x float>* %vp, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_to_4xfloat_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %ymm1
; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm2
; CHECK-NEXT: vshufps {{.*#+}} xmm2 = xmm2[1,0],xmm1[3,0]
@@ -3233,7 +3233,7 @@ define <4 x float> @test_masked_z_8xfloat_to_4xfloat_perm_mem_mask3(<8 x float>*
define <8 x float> @test_16xfloat_to_8xfloat_perm_mask0(<16 x float> %vec) {
; CHECK-LABEL: test_16xfloat_to_8xfloat_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm2
; CHECK-NEXT: vmovaps {{.*#+}} ymm1 = [0,4,12,10,8,2,11,7]
; CHECK-NEXT: vpermi2ps %ymm2, %ymm0, %ymm1
@@ -3244,7 +3244,7 @@ define <8 x float> @test_16xfloat_to_8xfloat_perm_mask0(<16 x float> %vec) {
}
define <8 x float> @test_masked_16xfloat_to_8xfloat_perm_mask0(<16 x float> %vec, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_to_8xfloat_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vmovaps {{.*#+}} ymm4 = [0,4,12,10,8,2,11,7]
; CHECK-NEXT: vpermi2ps %ymm3, %ymm0, %ymm4
@@ -3260,7 +3260,7 @@ define <8 x float> @test_masked_16xfloat_to_8xfloat_perm_mask0(<16 x float> %vec
define <8 x float> @test_masked_z_16xfloat_to_8xfloat_perm_mask0(<16 x float> %vec, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_to_8xfloat_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vmovaps {{.*#+}} ymm2 = [0,4,12,10,8,2,11,7]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
@@ -3275,7 +3275,7 @@ define <8 x float> @test_masked_z_16xfloat_to_8xfloat_perm_mask0(<16 x float> %v
}
define <8 x float> @test_masked_16xfloat_to_8xfloat_perm_mask1(<16 x float> %vec, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_to_8xfloat_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vmovaps {{.*#+}} ymm4 = [2,4,11,4,12,7,9,6]
; CHECK-NEXT: vpermi2ps %ymm0, %ymm3, %ymm4
@@ -3291,7 +3291,7 @@ define <8 x float> @test_masked_16xfloat_to_8xfloat_perm_mask1(<16 x float> %vec
define <8 x float> @test_masked_z_16xfloat_to_8xfloat_perm_mask1(<16 x float> %vec, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_to_8xfloat_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vmovaps {{.*#+}} ymm2 = [2,4,11,4,12,7,9,6]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
@@ -3306,7 +3306,7 @@ define <8 x float> @test_masked_z_16xfloat_to_8xfloat_perm_mask1(<16 x float> %v
}
define <8 x float> @test_masked_16xfloat_to_8xfloat_perm_mask2(<16 x float> %vec, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_to_8xfloat_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd {{.*#+}} ymm3 = <0,4,u,u,6,1,4,4>
; CHECK-NEXT: vpermps %ymm0, %ymm3, %ymm3
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm0
@@ -3324,7 +3324,7 @@ define <8 x float> @test_masked_16xfloat_to_8xfloat_perm_mask2(<16 x float> %vec
define <8 x float> @test_masked_z_16xfloat_to_8xfloat_perm_mask2(<16 x float> %vec, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_to_8xfloat_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd {{.*#+}} ymm2 = <0,4,u,u,6,1,4,4>
; CHECK-NEXT: vpermps %ymm0, %ymm2, %ymm2
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm0
@@ -3341,7 +3341,7 @@ define <8 x float> @test_masked_z_16xfloat_to_8xfloat_perm_mask2(<16 x float> %v
}
define <8 x float> @test_16xfloat_to_8xfloat_perm_mask3(<16 x float> %vec) {
; CHECK-LABEL: test_16xfloat_to_8xfloat_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm2
; CHECK-NEXT: vmovaps {{.*#+}} ymm1 = [4,6,1,8,4,12,13,0]
; CHECK-NEXT: vpermi2ps %ymm0, %ymm2, %ymm1
@@ -3352,7 +3352,7 @@ define <8 x float> @test_16xfloat_to_8xfloat_perm_mask3(<16 x float> %vec) {
}
define <8 x float> @test_masked_16xfloat_to_8xfloat_perm_mask3(<16 x float> %vec, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_to_8xfloat_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vmovaps {{.*#+}} ymm4 = [4,6,1,8,4,12,13,0]
; CHECK-NEXT: vpermi2ps %ymm0, %ymm3, %ymm4
@@ -3368,7 +3368,7 @@ define <8 x float> @test_masked_16xfloat_to_8xfloat_perm_mask3(<16 x float> %vec
define <8 x float> @test_masked_z_16xfloat_to_8xfloat_perm_mask3(<16 x float> %vec, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_to_8xfloat_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vmovaps {{.*#+}} ymm2 = [4,6,1,8,4,12,13,0]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
@@ -3383,7 +3383,7 @@ define <8 x float> @test_masked_z_16xfloat_to_8xfloat_perm_mask3(<16 x float> %v
}
define <4 x float> @test_16xfloat_to_4xfloat_perm_mask0(<16 x float> %vec) {
; CHECK-LABEL: test_16xfloat_to_4xfloat_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm2
; CHECK-NEXT: vmovaps {{.*#+}} ymm1 = <12,0,1,2,u,u,u,u>
; CHECK-NEXT: vpermi2ps %ymm0, %ymm2, %ymm1
@@ -3395,7 +3395,7 @@ define <4 x float> @test_16xfloat_to_4xfloat_perm_mask0(<16 x float> %vec) {
}
define <4 x float> @test_masked_16xfloat_to_4xfloat_perm_mask0(<16 x float> %vec, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_to_4xfloat_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vmovaps {{.*#+}} ymm4 = <12,0,1,2,u,u,u,u>
; CHECK-NEXT: vpermi2ps %ymm0, %ymm3, %ymm4
@@ -3412,7 +3412,7 @@ define <4 x float> @test_masked_16xfloat_to_4xfloat_perm_mask0(<16 x float> %vec
define <4 x float> @test_masked_z_16xfloat_to_4xfloat_perm_mask0(<16 x float> %vec, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_to_4xfloat_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm2
; CHECK-NEXT: vmovaps {{.*#+}} ymm3 = <12,0,1,2,u,u,u,u>
; CHECK-NEXT: vpermi2ps %ymm0, %ymm2, %ymm3
@@ -3428,7 +3428,7 @@ define <4 x float> @test_masked_z_16xfloat_to_4xfloat_perm_mask0(<16 x float> %v
}
define <4 x float> @test_masked_16xfloat_to_4xfloat_perm_mask1(<16 x float> %vec, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_to_4xfloat_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,2]
@@ -3446,7 +3446,7 @@ define <4 x float> @test_masked_16xfloat_to_4xfloat_perm_mask1(<16 x float> %vec
define <4 x float> @test_masked_z_16xfloat_to_4xfloat_perm_mask1(<16 x float> %vec, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_to_4xfloat_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,2]
@@ -3463,7 +3463,7 @@ define <4 x float> @test_masked_z_16xfloat_to_4xfloat_perm_mask1(<16 x float> %v
}
define <4 x float> @test_masked_16xfloat_to_4xfloat_perm_mask2(<16 x float> %vec, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_to_4xfloat_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vshufps {{.*#+}} ymm0 = ymm3[0,0],ymm0[0,1],ymm3[4,4],ymm0[4,5]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
@@ -3480,7 +3480,7 @@ define <4 x float> @test_masked_16xfloat_to_4xfloat_perm_mask2(<16 x float> %vec
define <4 x float> @test_masked_z_16xfloat_to_4xfloat_perm_mask2(<16 x float> %vec, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_to_4xfloat_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm2
; CHECK-NEXT: vshufps {{.*#+}} ymm0 = ymm2[0,0],ymm0[0,1],ymm2[4,4],ymm0[4,5]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
@@ -3495,7 +3495,7 @@ define <4 x float> @test_masked_z_16xfloat_to_4xfloat_perm_mask2(<16 x float> %v
}
define <4 x float> @test_16xfloat_to_4xfloat_perm_mask3(<16 x float> %vec) {
; CHECK-LABEL: test_16xfloat_to_4xfloat_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1
; CHECK-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[2,1,3,3]
; CHECK-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
@@ -3508,7 +3508,7 @@ define <4 x float> @test_16xfloat_to_4xfloat_perm_mask3(<16 x float> %vec) {
}
define <4 x float> @test_masked_16xfloat_to_4xfloat_perm_mask3(<16 x float> %vec, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_to_4xfloat_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,1,3,3]
; CHECK-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
@@ -3527,7 +3527,7 @@ define <4 x float> @test_masked_16xfloat_to_4xfloat_perm_mask3(<16 x float> %vec
define <4 x float> @test_masked_z_16xfloat_to_4xfloat_perm_mask3(<16 x float> %vec, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_to_4xfloat_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; CHECK-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,1,3,3]
; CHECK-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
@@ -3545,7 +3545,7 @@ define <4 x float> @test_masked_z_16xfloat_to_4xfloat_perm_mask3(<16 x float> %v
}
define <8 x float> @test_16xfloat_to_8xfloat_perm_mem_mask0(<16 x float>* %vp) {
; CHECK-LABEL: test_16xfloat_to_8xfloat_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %zmm1
; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vmovaps {{.*#+}} ymm0 = [7,6,7,11,5,10,0,4]
@@ -3557,7 +3557,7 @@ define <8 x float> @test_16xfloat_to_8xfloat_perm_mem_mask0(<16 x float>* %vp) {
}
define <8 x float> @test_masked_16xfloat_to_8xfloat_perm_mem_mask0(<16 x float>* %vp, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_to_8xfloat_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %zmm2
; CHECK-NEXT: vextractf64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vmovaps {{.*#+}} ymm4 = [7,6,7,11,5,10,0,4]
@@ -3575,7 +3575,7 @@ define <8 x float> @test_masked_16xfloat_to_8xfloat_perm_mem_mask0(<16 x float>*
define <8 x float> @test_masked_z_16xfloat_to_8xfloat_perm_mem_mask0(<16 x float>* %vp, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_to_8xfloat_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %zmm2
; CHECK-NEXT: vextractf64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vmovaps {{.*#+}} ymm1 = [7,6,7,11,5,10,0,4]
@@ -3593,7 +3593,7 @@ define <8 x float> @test_masked_z_16xfloat_to_8xfloat_perm_mem_mask0(<16 x float
define <8 x float> @test_masked_16xfloat_to_8xfloat_perm_mem_mask1(<16 x float>* %vp, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_to_8xfloat_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %zmm2
; CHECK-NEXT: vextractf64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vmovaps {{.*#+}} ymm4 = [11,0,9,0,7,14,0,8]
@@ -3611,7 +3611,7 @@ define <8 x float> @test_masked_16xfloat_to_8xfloat_perm_mem_mask1(<16 x float>*
define <8 x float> @test_masked_z_16xfloat_to_8xfloat_perm_mem_mask1(<16 x float>* %vp, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_to_8xfloat_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %zmm2
; CHECK-NEXT: vextractf64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vmovaps {{.*#+}} ymm1 = [11,0,9,0,7,14,0,8]
@@ -3629,7 +3629,7 @@ define <8 x float> @test_masked_z_16xfloat_to_8xfloat_perm_mem_mask1(<16 x float
define <8 x float> @test_masked_16xfloat_to_8xfloat_perm_mem_mask2(<16 x float>* %vp, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_to_8xfloat_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %zmm2
; CHECK-NEXT: vpermilps {{.*#+}} xmm3 = xmm2[1,0,0,3]
; CHECK-NEXT: vextractf64x4 $1, %zmm2, %ymm2
@@ -3648,7 +3648,7 @@ define <8 x float> @test_masked_16xfloat_to_8xfloat_perm_mem_mask2(<16 x float>*
define <8 x float> @test_masked_z_16xfloat_to_8xfloat_perm_mem_mask2(<16 x float>* %vp, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_to_8xfloat_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %zmm1
; CHECK-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[1,0,0,3]
; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm3
@@ -3667,7 +3667,7 @@ define <8 x float> @test_masked_z_16xfloat_to_8xfloat_perm_mem_mask2(<16 x float
define <8 x float> @test_16xfloat_to_8xfloat_perm_mem_mask3(<16 x float>* %vp) {
; CHECK-LABEL: test_16xfloat_to_8xfloat_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %zmm1
; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vmovaps {{.*#+}} ymm0 = [7,5,3,3,11,4,12,9]
@@ -3679,7 +3679,7 @@ define <8 x float> @test_16xfloat_to_8xfloat_perm_mem_mask3(<16 x float>* %vp) {
}
define <8 x float> @test_masked_16xfloat_to_8xfloat_perm_mem_mask3(<16 x float>* %vp, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_to_8xfloat_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %zmm2
; CHECK-NEXT: vextractf64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vmovaps {{.*#+}} ymm4 = [7,5,3,3,11,4,12,9]
@@ -3697,7 +3697,7 @@ define <8 x float> @test_masked_16xfloat_to_8xfloat_perm_mem_mask3(<16 x float>*
define <8 x float> @test_masked_z_16xfloat_to_8xfloat_perm_mem_mask3(<16 x float>* %vp, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_to_8xfloat_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %zmm2
; CHECK-NEXT: vextractf64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vmovaps {{.*#+}} ymm1 = [7,5,3,3,11,4,12,9]
@@ -3715,7 +3715,7 @@ define <8 x float> @test_masked_z_16xfloat_to_8xfloat_perm_mem_mask3(<16 x float
define <4 x float> @test_16xfloat_to_4xfloat_perm_mem_mask0(<16 x float>* %vp) {
; CHECK-LABEL: test_16xfloat_to_4xfloat_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %zmm0
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1
; CHECK-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,2,3,3]
@@ -3730,7 +3730,7 @@ define <4 x float> @test_16xfloat_to_4xfloat_perm_mem_mask0(<16 x float>* %vp) {
}
define <4 x float> @test_masked_16xfloat_to_4xfloat_perm_mem_mask0(<16 x float>* %vp, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_to_4xfloat_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm2
; CHECK-NEXT: vextracti128 $1, %ymm2, %xmm3
; CHECK-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,2,3,3]
@@ -3751,7 +3751,7 @@ define <4 x float> @test_masked_16xfloat_to_4xfloat_perm_mem_mask0(<16 x float>*
define <4 x float> @test_masked_z_16xfloat_to_4xfloat_perm_mem_mask0(<16 x float>* %vp, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_to_4xfloat_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1
; CHECK-NEXT: vextracti128 $1, %ymm1, %xmm2
; CHECK-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,2,3,3]
@@ -3772,7 +3772,7 @@ define <4 x float> @test_masked_z_16xfloat_to_4xfloat_perm_mem_mask0(<16 x float
define <4 x float> @test_masked_16xfloat_to_4xfloat_perm_mem_mask1(<16 x float>* %vp, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_to_4xfloat_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %zmm2
; CHECK-NEXT: vextractf64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vshufps {{.*#+}} ymm2 = ymm3[0,2],ymm2[2,3],ymm3[4,6],ymm2[6,7]
@@ -3792,7 +3792,7 @@ define <4 x float> @test_masked_16xfloat_to_4xfloat_perm_mem_mask1(<16 x float>*
define <4 x float> @test_masked_z_16xfloat_to_4xfloat_perm_mem_mask1(<16 x float>* %vp, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_to_4xfloat_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %zmm1
; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,2],ymm1[2,3],ymm2[4,6],ymm1[6,7]
@@ -3812,7 +3812,7 @@ define <4 x float> @test_masked_z_16xfloat_to_4xfloat_perm_mem_mask1(<16 x float
define <4 x float> @test_masked_16xfloat_to_4xfloat_perm_mem_mask2(<16 x float>* %vp, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_to_4xfloat_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %zmm2
; CHECK-NEXT: vextractf64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vshufps {{.*#+}} ymm2 = ymm2[2,0],ymm3[0,0],ymm2[6,4],ymm3[4,4]
@@ -3832,7 +3832,7 @@ define <4 x float> @test_masked_16xfloat_to_4xfloat_perm_mem_mask2(<16 x float>*
define <4 x float> @test_masked_z_16xfloat_to_4xfloat_perm_mem_mask2(<16 x float>* %vp, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_to_4xfloat_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %zmm1
; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0],ymm2[0,0],ymm1[6,4],ymm2[4,4]
@@ -3852,7 +3852,7 @@ define <4 x float> @test_masked_z_16xfloat_to_4xfloat_perm_mem_mask2(<16 x float
define <4 x float> @test_16xfloat_to_4xfloat_perm_mem_mask3(<16 x float>* %vp) {
; CHECK-LABEL: test_16xfloat_to_4xfloat_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %zmm1
; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vmovaps {{.*#+}} ymm0 = <3,3,15,9,u,u,u,u>
@@ -3866,7 +3866,7 @@ define <4 x float> @test_16xfloat_to_4xfloat_perm_mem_mask3(<16 x float>* %vp) {
}
define <4 x float> @test_masked_16xfloat_to_4xfloat_perm_mem_mask3(<16 x float>* %vp, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_to_4xfloat_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %zmm2
; CHECK-NEXT: vextractf64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vmovaps {{.*#+}} ymm4 = <3,3,15,9,u,u,u,u>
@@ -3885,7 +3885,7 @@ define <4 x float> @test_masked_16xfloat_to_4xfloat_perm_mem_mask3(<16 x float>*
define <4 x float> @test_masked_z_16xfloat_to_4xfloat_perm_mem_mask3(<16 x float>* %vp, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_to_4xfloat_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %zmm1
; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vmovaps {{.*#+}} ymm3 = <3,3,15,9,u,u,u,u>
@@ -3904,7 +3904,7 @@ define <4 x float> @test_masked_z_16xfloat_to_4xfloat_perm_mem_mask3(<16 x float
define <2 x double> @test_4xdouble_to_2xdouble_perm_mask0(<4 x double> %vec) {
; CHECK-LABEL: test_4xdouble_to_2xdouble_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1
; CHECK-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; CHECK-NEXT: vzeroupper
@@ -3914,7 +3914,7 @@ define <2 x double> @test_4xdouble_to_2xdouble_perm_mask0(<4 x double> %vec) {
}
define <2 x double> @test_masked_4xdouble_to_2xdouble_perm_mask0(<4 x double> %vec, <2 x double> %vec2, <2 x double> %mask) {
; CHECK-LABEL: test_masked_4xdouble_to_2xdouble_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm3
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqpd %xmm4, %xmm2, %k1
@@ -3930,7 +3930,7 @@ define <2 x double> @test_masked_4xdouble_to_2xdouble_perm_mask0(<4 x double> %v
define <2 x double> @test_masked_z_4xdouble_to_2xdouble_perm_mask0(<4 x double> %vec, <2 x double> %mask) {
; CHECK-LABEL: test_masked_z_4xdouble_to_2xdouble_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm2
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %xmm3, %xmm1, %k1
@@ -3944,7 +3944,7 @@ define <2 x double> @test_masked_z_4xdouble_to_2xdouble_perm_mask0(<4 x double>
}
define <2 x double> @test_masked_4xdouble_to_2xdouble_perm_mask1(<4 x double> %vec, <2 x double> %vec2, <2 x double> %mask) {
; CHECK-LABEL: test_masked_4xdouble_to_2xdouble_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm3
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqpd %xmm4, %xmm2, %k1
@@ -3960,7 +3960,7 @@ define <2 x double> @test_masked_4xdouble_to_2xdouble_perm_mask1(<4 x double> %v
define <2 x double> @test_masked_z_4xdouble_to_2xdouble_perm_mask1(<4 x double> %vec, <2 x double> %mask) {
; CHECK-LABEL: test_masked_z_4xdouble_to_2xdouble_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm2
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %xmm3, %xmm1, %k1
@@ -3974,7 +3974,7 @@ define <2 x double> @test_masked_z_4xdouble_to_2xdouble_perm_mask1(<4 x double>
}
define <2 x double> @test_4xdouble_to_2xdouble_perm_mem_mask0(<4 x double>* %vp) {
; CHECK-LABEL: test_4xdouble_to_2xdouble_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd (%rdi), %ymm0
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
@@ -3986,7 +3986,7 @@ define <2 x double> @test_4xdouble_to_2xdouble_perm_mem_mask0(<4 x double>* %vp)
}
define <2 x double> @test_masked_4xdouble_to_2xdouble_perm_mem_mask0(<4 x double>* %vp, <2 x double> %vec2, <2 x double> %mask) {
; CHECK-LABEL: test_masked_4xdouble_to_2xdouble_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd (%rdi), %ymm2
; CHECK-NEXT: vextractf128 $1, %ymm2, %xmm3
; CHECK-NEXT: vmovsd {{.*#+}} xmm2 = xmm3[0],xmm2[1]
@@ -4004,7 +4004,7 @@ define <2 x double> @test_masked_4xdouble_to_2xdouble_perm_mem_mask0(<4 x double
define <2 x double> @test_masked_z_4xdouble_to_2xdouble_perm_mem_mask0(<4 x double>* %vp, <2 x double> %mask) {
; CHECK-LABEL: test_masked_z_4xdouble_to_2xdouble_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd (%rdi), %ymm1
; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm2
; CHECK-NEXT: vmovsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
@@ -4022,7 +4022,7 @@ define <2 x double> @test_masked_z_4xdouble_to_2xdouble_perm_mem_mask0(<4 x doub
define <2 x double> @test_masked_4xdouble_to_2xdouble_perm_mem_mask1(<4 x double>* %vp, <2 x double> %vec2, <2 x double> %mask) {
; CHECK-LABEL: test_masked_4xdouble_to_2xdouble_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd (%rdi), %ymm2
; CHECK-NEXT: vextractf128 $1, %ymm2, %xmm3
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
@@ -4039,7 +4039,7 @@ define <2 x double> @test_masked_4xdouble_to_2xdouble_perm_mem_mask1(<4 x double
define <2 x double> @test_masked_z_4xdouble_to_2xdouble_perm_mem_mask1(<4 x double>* %vp, <2 x double> %mask) {
; CHECK-LABEL: test_masked_z_4xdouble_to_2xdouble_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd (%rdi), %ymm1
; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm2
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
@@ -4056,7 +4056,7 @@ define <2 x double> @test_masked_z_4xdouble_to_2xdouble_perm_mem_mask1(<4 x doub
define <4 x double> @test_8xdouble_to_4xdouble_perm_mask0(<8 x double> %vec) {
; CHECK-LABEL: test_8xdouble_to_4xdouble_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1
; CHECK-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,3,2,3]
@@ -4066,7 +4066,7 @@ define <4 x double> @test_8xdouble_to_4xdouble_perm_mask0(<8 x double> %vec) {
}
define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mask0(<8 x double> %vec, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_to_4xdouble_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm3[1],ymm0[1],ymm3[3],ymm0[3]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
@@ -4082,7 +4082,7 @@ define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mask0(<8 x double> %v
define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mask0(<8 x double> %vec, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_to_4xdouble_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; CHECK-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm2[1],ymm0[1],ymm2[3],ymm0[3]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
@@ -4096,7 +4096,7 @@ define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mask0(<8 x double>
}
define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mask1(<8 x double> %vec, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_to_4xdouble_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vmovapd {{.*#+}} ymm4 = [2,0,7,6]
; CHECK-NEXT: vpermi2pd %ymm3, %ymm0, %ymm4
@@ -4112,7 +4112,7 @@ define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mask1(<8 x double> %v
define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mask1(<8 x double> %vec, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_to_4xdouble_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vmovapd {{.*#+}} ymm2 = [2,0,7,6]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
@@ -4127,7 +4127,7 @@ define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mask1(<8 x double>
}
define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mask2(<8 x double> %vec, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_to_4xdouble_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vpermpd {{.*#+}} ymm1 {%k1} = ymm0[2,3,2,0]
@@ -4141,7 +4141,7 @@ define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mask2(<8 x double> %v
define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mask2(<8 x double> %vec, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_to_4xdouble_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3,2,0]
@@ -4153,7 +4153,7 @@ define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mask2(<8 x double>
}
define <4 x double> @test_8xdouble_to_4xdouble_perm_mask3(<8 x double> %vec) {
; CHECK-LABEL: test_8xdouble_to_4xdouble_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm2
; CHECK-NEXT: vmovapd {{.*#+}} ymm1 = [0,2,1,4]
; CHECK-NEXT: vpermi2pd %ymm2, %ymm0, %ymm1
@@ -4164,7 +4164,7 @@ define <4 x double> @test_8xdouble_to_4xdouble_perm_mask3(<8 x double> %vec) {
}
define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mask3(<8 x double> %vec, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_to_4xdouble_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vmovapd {{.*#+}} ymm4 = [0,2,1,4]
; CHECK-NEXT: vpermi2pd %ymm3, %ymm0, %ymm4
@@ -4180,7 +4180,7 @@ define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mask3(<8 x double> %v
define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mask3(<8 x double> %vec, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_to_4xdouble_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vmovapd {{.*#+}} ymm2 = [0,2,1,4]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
@@ -4195,7 +4195,7 @@ define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mask3(<8 x double>
}
define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mask4(<8 x double> %vec, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_to_4xdouble_perm_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm3[1]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
@@ -4211,7 +4211,7 @@ define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mask4(<8 x double> %v
define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mask4(<8 x double> %vec, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_to_4xdouble_perm_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; CHECK-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm2[1]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
@@ -4225,7 +4225,7 @@ define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mask4(<8 x double>
}
define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mask5(<8 x double> %vec, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_to_4xdouble_perm_mask5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
@@ -4241,7 +4241,7 @@ define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mask5(<8 x double> %v
define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mask5(<8 x double> %vec, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_to_4xdouble_perm_mask5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; CHECK-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[2],ymm2[2]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
@@ -4255,7 +4255,7 @@ define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mask5(<8 x double>
}
define <4 x double> @test_8xdouble_to_4xdouble_perm_mask6(<8 x double> %vec) {
; CHECK-LABEL: test_8xdouble_to_4xdouble_perm_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1
; CHECK-NEXT: vshufpd {{.*#+}} ymm0 = ymm1[1],ymm0[0],ymm1[3],ymm0[2]
; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,2,1]
@@ -4265,7 +4265,7 @@ define <4 x double> @test_8xdouble_to_4xdouble_perm_mask6(<8 x double> %vec) {
}
define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mask6(<8 x double> %vec, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_to_4xdouble_perm_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vshufpd {{.*#+}} ymm0 = ymm3[1],ymm0[0],ymm3[3],ymm0[2]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
@@ -4281,7 +4281,7 @@ define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mask6(<8 x double> %v
define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mask6(<8 x double> %vec, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_to_4xdouble_perm_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm2
; CHECK-NEXT: vshufpd {{.*#+}} ymm0 = ymm2[1],ymm0[0],ymm2[3],ymm0[2]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
@@ -4295,7 +4295,7 @@ define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mask6(<8 x double>
}
define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mask7(<8 x double> %vec, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_to_4xdouble_perm_mask7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm3[1,2],ymm0[3]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
@@ -4311,7 +4311,7 @@ define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mask7(<8 x double> %v
define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mask7(<8 x double> %vec, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_to_4xdouble_perm_mask7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm2
; CHECK-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm2[1,2],ymm0[3]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
@@ -4325,7 +4325,7 @@ define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mask7(<8 x double>
}
define <2 x double> @test_8xdouble_to_2xdouble_perm_mask0(<8 x double> %vec) {
; CHECK-LABEL: test_8xdouble_to_2xdouble_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1
; CHECK-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
@@ -4337,7 +4337,7 @@ define <2 x double> @test_8xdouble_to_2xdouble_perm_mask0(<8 x double> %vec) {
}
define <2 x double> @test_masked_8xdouble_to_2xdouble_perm_mask0(<8 x double> %vec, <2 x double> %vec2, <2 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_to_2xdouble_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2]
; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3]
@@ -4354,7 +4354,7 @@ define <2 x double> @test_masked_8xdouble_to_2xdouble_perm_mask0(<8 x double> %v
define <2 x double> @test_masked_z_8xdouble_to_2xdouble_perm_mask0(<8 x double> %vec, <2 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_to_2xdouble_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; CHECK-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[2],ymm2[2]
; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3]
@@ -4370,7 +4370,7 @@ define <2 x double> @test_masked_z_8xdouble_to_2xdouble_perm_mask0(<8 x double>
}
define <2 x double> @test_masked_8xdouble_to_2xdouble_perm_mask1(<8 x double> %vec, <2 x double> %vec2, <2 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_to_2xdouble_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; CHECK-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],ymm3[1],ymm0[3],ymm3[3]
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0
@@ -4387,7 +4387,7 @@ define <2 x double> @test_masked_8xdouble_to_2xdouble_perm_mask1(<8 x double> %v
define <2 x double> @test_masked_z_8xdouble_to_2xdouble_perm_mask1(<8 x double> %vec, <2 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_to_2xdouble_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; CHECK-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],ymm2[1],ymm0[3],ymm2[3]
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0
@@ -4403,7 +4403,7 @@ define <2 x double> @test_masked_z_8xdouble_to_2xdouble_perm_mask1(<8 x double>
}
define <4 x double> @test_8xdouble_to_4xdouble_perm_mem_mask0(<8 x double>* %vp) {
; CHECK-LABEL: test_8xdouble_to_4xdouble_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd (%rdi), %zmm1
; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vmovapd {{.*#+}} ymm0 = [1,6,7,2]
@@ -4415,7 +4415,7 @@ define <4 x double> @test_8xdouble_to_4xdouble_perm_mem_mask0(<8 x double>* %vp)
}
define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mem_mask0(<8 x double>* %vp, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_to_4xdouble_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd (%rdi), %zmm2
; CHECK-NEXT: vextractf64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vmovapd {{.*#+}} ymm4 = [1,6,7,2]
@@ -4433,7 +4433,7 @@ define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mem_mask0(<8 x double
define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mem_mask0(<8 x double>* %vp, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_to_4xdouble_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd (%rdi), %zmm2
; CHECK-NEXT: vextractf64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vmovapd {{.*#+}} ymm1 = [1,6,7,2]
@@ -4451,7 +4451,7 @@ define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mem_mask0(<8 x doub
define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mem_mask1(<8 x double>* %vp, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_to_4xdouble_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd (%rdi), %zmm2
; CHECK-NEXT: vextractf64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vblendpd {{.*#+}} ymm2 = ymm3[0],ymm2[1,2,3]
@@ -4468,7 +4468,7 @@ define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mem_mask1(<8 x double
define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mem_mask1(<8 x double>* %vp, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_to_4xdouble_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd (%rdi), %zmm1
; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vblendpd {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3]
@@ -4485,7 +4485,7 @@ define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mem_mask1(<8 x doub
define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mem_mask2(<8 x double>* %vp, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_to_4xdouble_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd (%rdi), %zmm2
; CHECK-NEXT: vextractf64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vblendpd {{.*#+}} ymm2 = ymm3[0],ymm2[1,2,3]
@@ -4502,7 +4502,7 @@ define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mem_mask2(<8 x double
define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mem_mask2(<8 x double>* %vp, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_to_4xdouble_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd (%rdi), %zmm1
; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vblendpd {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3]
@@ -4519,7 +4519,7 @@ define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mem_mask2(<8 x doub
define <4 x double> @test_8xdouble_to_4xdouble_perm_mem_mask3(<8 x double>* %vp) {
; CHECK-LABEL: test_8xdouble_to_4xdouble_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd (%rdi), %zmm0
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1
; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,0]
@@ -4531,7 +4531,7 @@ define <4 x double> @test_8xdouble_to_4xdouble_perm_mem_mask3(<8 x double>* %vp)
}
define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mem_mask3(<8 x double>* %vp, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_to_4xdouble_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd (%rdi), %zmm2
; CHECK-NEXT: vextractf64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,1,0]
@@ -4549,7 +4549,7 @@ define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mem_mask3(<8 x double
define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mem_mask3(<8 x double>* %vp, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_to_4xdouble_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd (%rdi), %zmm1
; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,1,0]
@@ -4567,7 +4567,7 @@ define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mem_mask3(<8 x doub
define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mem_mask4(<8 x double>* %vp, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_to_4xdouble_perm_mem_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd (%rdi), %zmm2
; CHECK-NEXT: vextractf64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vmovapd {{.*#+}} ymm4 = [2,4,1,5]
@@ -4585,7 +4585,7 @@ define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mem_mask4(<8 x double
define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mem_mask4(<8 x double>* %vp, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_to_4xdouble_perm_mem_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd (%rdi), %zmm2
; CHECK-NEXT: vextractf64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vmovapd {{.*#+}} ymm1 = [2,4,1,5]
@@ -4603,7 +4603,7 @@ define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mem_mask4(<8 x doub
define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mem_mask5(<8 x double>* %vp, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_to_4xdouble_perm_mem_mask5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd (%rdi), %zmm2
; CHECK-NEXT: vextractf64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vblendpd {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3]
@@ -4620,7 +4620,7 @@ define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mem_mask5(<8 x double
define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mem_mask5(<8 x double>* %vp, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_to_4xdouble_perm_mem_mask5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd (%rdi), %zmm1
; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3]
@@ -4637,7 +4637,7 @@ define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mem_mask5(<8 x doub
define <4 x double> @test_8xdouble_to_4xdouble_perm_mem_mask6(<8 x double>* %vp) {
; CHECK-LABEL: test_8xdouble_to_4xdouble_perm_mem_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd (%rdi), %zmm0
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm0
@@ -4650,7 +4650,7 @@ define <4 x double> @test_8xdouble_to_4xdouble_perm_mem_mask6(<8 x double>* %vp)
}
define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mem_mask6(<8 x double>* %vp, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_to_4xdouble_perm_mem_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd (%rdi), %zmm2
; CHECK-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm3
; CHECK-NEXT: vextractf64x4 $1, %zmm2, %ymm2
@@ -4669,7 +4669,7 @@ define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mem_mask6(<8 x double
define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mem_mask6(<8 x double>* %vp, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_to_4xdouble_perm_mem_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd (%rdi), %zmm1
; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm2
; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm1
@@ -4688,7 +4688,7 @@ define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mem_mask6(<8 x doub
define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mem_mask7(<8 x double>* %vp, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_to_4xdouble_perm_mem_mask7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd (%rdi), %zmm2
; CHECK-NEXT: vextractf64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vblendpd {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3]
@@ -4705,7 +4705,7 @@ define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mem_mask7(<8 x double
define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mem_mask7(<8 x double>* %vp, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_to_4xdouble_perm_mem_mask7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd (%rdi), %zmm1
; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3]
@@ -4722,7 +4722,7 @@ define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mem_mask7(<8 x doub
define <2 x double> @test_8xdouble_to_2xdouble_perm_mem_mask0(<8 x double>* %vp) {
; CHECK-LABEL: test_8xdouble_to_2xdouble_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd (%rdi), %zmm0
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1
; CHECK-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[3],ymm1[2]
@@ -4736,7 +4736,7 @@ define <2 x double> @test_8xdouble_to_2xdouble_perm_mem_mask0(<8 x double>* %vp)
}
define <2 x double> @test_masked_8xdouble_to_2xdouble_perm_mem_mask0(<8 x double>* %vp, <2 x double> %vec2, <2 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_to_2xdouble_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd (%rdi), %zmm2
; CHECK-NEXT: vextractf64x4 $1, %zmm2, %ymm3
; CHECK-NEXT: vshufpd {{.*#+}} ymm2 = ymm2[1],ymm3[0],ymm2[3],ymm3[2]
@@ -4755,7 +4755,7 @@ define <2 x double> @test_masked_8xdouble_to_2xdouble_perm_mem_mask0(<8 x double
define <2 x double> @test_masked_z_8xdouble_to_2xdouble_perm_mem_mask0(<8 x double>* %vp, <2 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_to_2xdouble_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd (%rdi), %zmm1
; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[1],ymm2[0],ymm1[3],ymm2[2]
@@ -4774,7 +4774,7 @@ define <2 x double> @test_masked_z_8xdouble_to_2xdouble_perm_mem_mask0(<8 x doub
define <2 x double> @test_masked_8xdouble_to_2xdouble_perm_mem_mask1(<8 x double>* %vp, <2 x double> %vec2, <2 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_to_2xdouble_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd (%rdi), %zmm2
; CHECK-NEXT: vextractf32x4 $2, %zmm2, %xmm3
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
@@ -4791,7 +4791,7 @@ define <2 x double> @test_masked_8xdouble_to_2xdouble_perm_mem_mask1(<8 x double
define <2 x double> @test_masked_z_8xdouble_to_2xdouble_perm_mem_mask1(<8 x double>* %vp, <2 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_to_2xdouble_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd (%rdi), %zmm1
; CHECK-NEXT: vextractf32x4 $2, %zmm1, %xmm2
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
diff --git a/test/CodeGen/X86/avx512-shuffles/permute.ll b/test/CodeGen/X86/avx512-shuffles/permute.ll
index 88c09356aec..ff392cca8dd 100644
--- a/test/CodeGen/X86/avx512-shuffles/permute.ll
+++ b/test/CodeGen/X86/avx512-shuffles/permute.ll
@@ -3,7 +3,7 @@
define <16 x i16> @test_16xi16_perm_mask0(<16 x i16> %vec) {
; CHECK-LABEL: test_16xi16_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [8,6,12,4,7,9,14,8,4,12,9,4,14,15,12,14]
; CHECK-NEXT: vpermw %ymm0, %ymm1, %ymm0
; CHECK-NEXT: retq
@@ -12,7 +12,7 @@ define <16 x i16> @test_16xi16_perm_mask0(<16 x i16> %vec) {
}
define <16 x i16> @test_masked_16xi16_perm_mask0(<16 x i16> %vec, <16 x i16> %vec2, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_16xi16_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = [8,6,12,4,7,9,14,8,4,12,9,4,14,15,12,14]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpeqw %ymm4, %ymm2, %k1
@@ -27,7 +27,7 @@ define <16 x i16> @test_masked_16xi16_perm_mask0(<16 x i16> %vec, <16 x i16> %ve
define <16 x i16> @test_masked_z_16xi16_perm_mask0(<16 x i16> %vec, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_z_16xi16_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = [8,6,12,4,7,9,14,8,4,12,9,4,14,15,12,14]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqw %ymm3, %ymm1, %k1
@@ -40,7 +40,7 @@ define <16 x i16> @test_masked_z_16xi16_perm_mask0(<16 x i16> %vec, <16 x i16> %
}
define <16 x i16> @test_masked_16xi16_perm_mask1(<16 x i16> %vec, <16 x i16> %vec2, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_16xi16_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = [4,11,14,10,7,1,6,9,14,15,7,13,4,12,8,0]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpeqw %ymm4, %ymm2, %k1
@@ -55,7 +55,7 @@ define <16 x i16> @test_masked_16xi16_perm_mask1(<16 x i16> %vec, <16 x i16> %ve
define <16 x i16> @test_masked_z_16xi16_perm_mask1(<16 x i16> %vec, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_z_16xi16_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = [4,11,14,10,7,1,6,9,14,15,7,13,4,12,8,0]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqw %ymm3, %ymm1, %k1
@@ -68,7 +68,7 @@ define <16 x i16> @test_masked_z_16xi16_perm_mask1(<16 x i16> %vec, <16 x i16> %
}
define <16 x i16> @test_masked_16xi16_perm_mask2(<16 x i16> %vec, <16 x i16> %vec2, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_16xi16_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = [11,6,13,10,0,7,13,3,5,13,3,9,3,15,12,7]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpeqw %ymm4, %ymm2, %k1
@@ -83,7 +83,7 @@ define <16 x i16> @test_masked_16xi16_perm_mask2(<16 x i16> %vec, <16 x i16> %ve
define <16 x i16> @test_masked_z_16xi16_perm_mask2(<16 x i16> %vec, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_z_16xi16_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = [11,6,13,10,0,7,13,3,5,13,3,9,3,15,12,7]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqw %ymm3, %ymm1, %k1
@@ -96,7 +96,7 @@ define <16 x i16> @test_masked_z_16xi16_perm_mask2(<16 x i16> %vec, <16 x i16> %
}
define <16 x i16> @test_16xi16_perm_mask3(<16 x i16> %vec) {
; CHECK-LABEL: test_16xi16_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [1,5,8,14,1,8,11,8,13,8,15,9,9,7,9,6]
; CHECK-NEXT: vpermw %ymm0, %ymm1, %ymm0
; CHECK-NEXT: retq
@@ -105,7 +105,7 @@ define <16 x i16> @test_16xi16_perm_mask3(<16 x i16> %vec) {
}
define <16 x i16> @test_masked_16xi16_perm_mask3(<16 x i16> %vec, <16 x i16> %vec2, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_16xi16_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = [1,5,8,14,1,8,11,8,13,8,15,9,9,7,9,6]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpeqw %ymm4, %ymm2, %k1
@@ -120,7 +120,7 @@ define <16 x i16> @test_masked_16xi16_perm_mask3(<16 x i16> %vec, <16 x i16> %ve
define <16 x i16> @test_masked_z_16xi16_perm_mask3(<16 x i16> %vec, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_z_16xi16_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = [1,5,8,14,1,8,11,8,13,8,15,9,9,7,9,6]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqw %ymm3, %ymm1, %k1
@@ -133,7 +133,7 @@ define <16 x i16> @test_masked_z_16xi16_perm_mask3(<16 x i16> %vec, <16 x i16> %
}
define <16 x i16> @test_16xi16_perm_mem_mask0(<16 x i16>* %vp) {
; CHECK-LABEL: test_16xi16_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = [9,10,7,1,12,14,14,13,14,14,8,6,11,4,12,13]
; CHECK-NEXT: vpermw (%rdi), %ymm0, %ymm0
; CHECK-NEXT: retq
@@ -143,7 +143,7 @@ define <16 x i16> @test_16xi16_perm_mem_mask0(<16 x i16>* %vp) {
}
define <16 x i16> @test_masked_16xi16_perm_mem_mask0(<16 x i16>* %vp, <16 x i16> %vec2, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_16xi16_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = [9,10,7,1,12,14,14,13,14,14,8,6,11,4,12,13]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqw %ymm3, %ymm1, %k1
@@ -158,7 +158,7 @@ define <16 x i16> @test_masked_16xi16_perm_mem_mask0(<16 x i16>* %vp, <16 x i16>
define <16 x i16> @test_masked_z_16xi16_perm_mem_mask0(<16 x i16>* %vp, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_z_16xi16_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [9,10,7,1,12,14,14,13,14,14,8,6,11,4,12,13]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %ymm2, %ymm0, %k1
@@ -173,7 +173,7 @@ define <16 x i16> @test_masked_z_16xi16_perm_mem_mask0(<16 x i16>* %vp, <16 x i1
define <16 x i16> @test_masked_16xi16_perm_mem_mask1(<16 x i16>* %vp, <16 x i16> %vec2, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_16xi16_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = [14,9,15,9,7,10,15,14,12,1,9,7,10,13,3,11]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqw %ymm3, %ymm1, %k1
@@ -188,7 +188,7 @@ define <16 x i16> @test_masked_16xi16_perm_mem_mask1(<16 x i16>* %vp, <16 x i16>
define <16 x i16> @test_masked_z_16xi16_perm_mem_mask1(<16 x i16>* %vp, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_z_16xi16_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [14,9,15,9,7,10,15,14,12,1,9,7,10,13,3,11]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %ymm2, %ymm0, %k1
@@ -203,7 +203,7 @@ define <16 x i16> @test_masked_z_16xi16_perm_mem_mask1(<16 x i16>* %vp, <16 x i1
define <16 x i16> @test_masked_16xi16_perm_mem_mask2(<16 x i16>* %vp, <16 x i16> %vec2, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_16xi16_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = [1,3,12,5,13,1,2,11,0,9,14,8,10,0,10,9]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqw %ymm3, %ymm1, %k1
@@ -218,7 +218,7 @@ define <16 x i16> @test_masked_16xi16_perm_mem_mask2(<16 x i16>* %vp, <16 x i16>
define <16 x i16> @test_masked_z_16xi16_perm_mem_mask2(<16 x i16>* %vp, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_z_16xi16_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [1,3,12,5,13,1,2,11,0,9,14,8,10,0,10,9]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %ymm2, %ymm0, %k1
@@ -233,7 +233,7 @@ define <16 x i16> @test_masked_z_16xi16_perm_mem_mask2(<16 x i16>* %vp, <16 x i1
define <16 x i16> @test_16xi16_perm_mem_mask3(<16 x i16>* %vp) {
; CHECK-LABEL: test_16xi16_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = [9,6,5,15,0,0,15,2,1,3,12,14,0,6,1,4]
; CHECK-NEXT: vpermw (%rdi), %ymm0, %ymm0
; CHECK-NEXT: retq
@@ -243,7 +243,7 @@ define <16 x i16> @test_16xi16_perm_mem_mask3(<16 x i16>* %vp) {
}
define <16 x i16> @test_masked_16xi16_perm_mem_mask3(<16 x i16>* %vp, <16 x i16> %vec2, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_16xi16_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = [9,6,5,15,0,0,15,2,1,3,12,14,0,6,1,4]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqw %ymm3, %ymm1, %k1
@@ -258,7 +258,7 @@ define <16 x i16> @test_masked_16xi16_perm_mem_mask3(<16 x i16>* %vp, <16 x i16>
define <16 x i16> @test_masked_z_16xi16_perm_mem_mask3(<16 x i16>* %vp, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_z_16xi16_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [9,6,5,15,0,0,15,2,1,3,12,14,0,6,1,4]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %ymm2, %ymm0, %k1
@@ -273,7 +273,7 @@ define <16 x i16> @test_masked_z_16xi16_perm_mem_mask3(<16 x i16>* %vp, <16 x i1
define <32 x i16> @test_32xi16_perm_mask0(<32 x i16> %vec) {
; CHECK-LABEL: test_32xi16_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm1 = [16,1,3,31,6,11,23,26,29,5,21,30,1,21,27,10,8,19,14,5,15,13,18,16,9,11,26,8,17,0,23,10]
; CHECK-NEXT: vpermw %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
@@ -282,7 +282,7 @@ define <32 x i16> @test_32xi16_perm_mask0(<32 x i16> %vec) {
}
define <32 x i16> @test_masked_32xi16_perm_mask0(<32 x i16> %vec, <32 x i16> %vec2, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm3 = [16,1,3,31,6,11,23,26,29,5,21,30,1,21,27,10,8,19,14,5,15,13,18,16,9,11,26,8,17,0,23,10]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpeqw %zmm4, %zmm2, %k1
@@ -297,7 +297,7 @@ define <32 x i16> @test_masked_32xi16_perm_mask0(<32 x i16> %vec, <32 x i16> %ve
define <32 x i16> @test_masked_z_32xi16_perm_mask0(<32 x i16> %vec, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm2 = [16,1,3,31,6,11,23,26,29,5,21,30,1,21,27,10,8,19,14,5,15,13,18,16,9,11,26,8,17,0,23,10]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqw %zmm3, %zmm1, %k1
@@ -310,7 +310,7 @@ define <32 x i16> @test_masked_z_32xi16_perm_mask0(<32 x i16> %vec, <32 x i16> %
}
define <32 x i16> @test_masked_32xi16_perm_mask1(<32 x i16> %vec, <32 x i16> %vec2, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm3 = [1,8,7,30,11,9,11,30,20,19,22,12,13,20,0,6,10,7,20,12,28,18,13,12,22,13,21,1,14,8,5,16]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpeqw %zmm4, %zmm2, %k1
@@ -325,7 +325,7 @@ define <32 x i16> @test_masked_32xi16_perm_mask1(<32 x i16> %vec, <32 x i16> %ve
define <32 x i16> @test_masked_z_32xi16_perm_mask1(<32 x i16> %vec, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm2 = [1,8,7,30,11,9,11,30,20,19,22,12,13,20,0,6,10,7,20,12,28,18,13,12,22,13,21,1,14,8,5,16]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqw %zmm3, %zmm1, %k1
@@ -338,7 +338,7 @@ define <32 x i16> @test_masked_z_32xi16_perm_mask1(<32 x i16> %vec, <32 x i16> %
}
define <32 x i16> @test_masked_32xi16_perm_mask2(<32 x i16> %vec, <32 x i16> %vec2, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm3 = [15,17,24,28,15,9,14,25,28,25,6,31,20,2,23,31,12,21,10,6,22,0,26,16,3,3,20,27,8,31,3,27]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpeqw %zmm4, %zmm2, %k1
@@ -353,7 +353,7 @@ define <32 x i16> @test_masked_32xi16_perm_mask2(<32 x i16> %vec, <32 x i16> %ve
define <32 x i16> @test_masked_z_32xi16_perm_mask2(<32 x i16> %vec, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm2 = [15,17,24,28,15,9,14,25,28,25,6,31,20,2,23,31,12,21,10,6,22,0,26,16,3,3,20,27,8,31,3,27]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqw %zmm3, %zmm1, %k1
@@ -366,7 +366,7 @@ define <32 x i16> @test_masked_z_32xi16_perm_mask2(<32 x i16> %vec, <32 x i16> %
}
define <32 x i16> @test_32xi16_perm_mask3(<32 x i16> %vec) {
; CHECK-LABEL: test_32xi16_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm1 = [12,2,8,14,25,27,4,16,20,11,27,8,0,1,21,17,30,30,29,1,23,22,20,22,28,20,11,17,6,18,0,4]
; CHECK-NEXT: vpermw %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
@@ -375,7 +375,7 @@ define <32 x i16> @test_32xi16_perm_mask3(<32 x i16> %vec) {
}
define <32 x i16> @test_masked_32xi16_perm_mask3(<32 x i16> %vec, <32 x i16> %vec2, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm3 = [12,2,8,14,25,27,4,16,20,11,27,8,0,1,21,17,30,30,29,1,23,22,20,22,28,20,11,17,6,18,0,4]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpeqw %zmm4, %zmm2, %k1
@@ -390,7 +390,7 @@ define <32 x i16> @test_masked_32xi16_perm_mask3(<32 x i16> %vec, <32 x i16> %ve
define <32 x i16> @test_masked_z_32xi16_perm_mask3(<32 x i16> %vec, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm2 = [12,2,8,14,25,27,4,16,20,11,27,8,0,1,21,17,30,30,29,1,23,22,20,22,28,20,11,17,6,18,0,4]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqw %zmm3, %zmm1, %k1
@@ -403,7 +403,7 @@ define <32 x i16> @test_masked_z_32xi16_perm_mask3(<32 x i16> %vec, <32 x i16> %
}
define <32 x i16> @test_32xi16_perm_mem_mask0(<32 x i16>* %vp) {
; CHECK-LABEL: test_32xi16_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm0 = [19,1,5,31,9,12,17,9,15,7,1,5,16,2,12,10,13,3,29,15,26,31,10,15,22,13,9,23,28,29,20,12]
; CHECK-NEXT: vpermw (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
@@ -413,7 +413,7 @@ define <32 x i16> @test_32xi16_perm_mem_mask0(<32 x i16>* %vp) {
}
define <32 x i16> @test_masked_32xi16_perm_mem_mask0(<32 x i16>* %vp, <32 x i16> %vec2, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm2 = [19,1,5,31,9,12,17,9,15,7,1,5,16,2,12,10,13,3,29,15,26,31,10,15,22,13,9,23,28,29,20,12]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqw %zmm3, %zmm1, %k1
@@ -428,7 +428,7 @@ define <32 x i16> @test_masked_32xi16_perm_mem_mask0(<32 x i16>* %vp, <32 x i16>
define <32 x i16> @test_masked_z_32xi16_perm_mem_mask0(<32 x i16>* %vp, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm1 = [19,1,5,31,9,12,17,9,15,7,1,5,16,2,12,10,13,3,29,15,26,31,10,15,22,13,9,23,28,29,20,12]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %zmm2, %zmm0, %k1
@@ -443,7 +443,7 @@ define <32 x i16> @test_masked_z_32xi16_perm_mem_mask0(<32 x i16>* %vp, <32 x i1
define <32 x i16> @test_masked_32xi16_perm_mem_mask1(<32 x i16>* %vp, <32 x i16> %vec2, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm2 = [31,20,2,2,23,1,0,12,16,14,15,18,21,13,11,31,8,24,13,11,2,27,22,28,14,21,3,12,6,1,30,6]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqw %zmm3, %zmm1, %k1
@@ -458,7 +458,7 @@ define <32 x i16> @test_masked_32xi16_perm_mem_mask1(<32 x i16>* %vp, <32 x i16>
define <32 x i16> @test_masked_z_32xi16_perm_mem_mask1(<32 x i16>* %vp, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm1 = [31,20,2,2,23,1,0,12,16,14,15,18,21,13,11,31,8,24,13,11,2,27,22,28,14,21,3,12,6,1,30,6]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %zmm2, %zmm0, %k1
@@ -473,7 +473,7 @@ define <32 x i16> @test_masked_z_32xi16_perm_mem_mask1(<32 x i16>* %vp, <32 x i1
define <32 x i16> @test_masked_32xi16_perm_mem_mask2(<32 x i16>* %vp, <32 x i16> %vec2, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm2 = [4,6,12,17,4,31,31,4,12,21,28,15,29,10,15,15,21,6,19,7,10,30,28,26,1,4,8,25,26,18,22,25]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqw %zmm3, %zmm1, %k1
@@ -488,7 +488,7 @@ define <32 x i16> @test_masked_32xi16_perm_mem_mask2(<32 x i16>* %vp, <32 x i16>
define <32 x i16> @test_masked_z_32xi16_perm_mem_mask2(<32 x i16>* %vp, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm1 = [4,6,12,17,4,31,31,4,12,21,28,15,29,10,15,15,21,6,19,7,10,30,28,26,1,4,8,25,26,18,22,25]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %zmm2, %zmm0, %k1
@@ -503,7 +503,7 @@ define <32 x i16> @test_masked_z_32xi16_perm_mem_mask2(<32 x i16>* %vp, <32 x i1
define <32 x i16> @test_32xi16_perm_mem_mask3(<32 x i16>* %vp) {
; CHECK-LABEL: test_32xi16_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm0 = [2,2,27,1,7,1,0,27,10,5,4,20,30,16,28,16,18,21,25,24,31,23,28,6,17,19,26,15,25,12,18,27]
; CHECK-NEXT: vpermw (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
@@ -513,7 +513,7 @@ define <32 x i16> @test_32xi16_perm_mem_mask3(<32 x i16>* %vp) {
}
define <32 x i16> @test_masked_32xi16_perm_mem_mask3(<32 x i16>* %vp, <32 x i16> %vec2, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm2 = [2,2,27,1,7,1,0,27,10,5,4,20,30,16,28,16,18,21,25,24,31,23,28,6,17,19,26,15,25,12,18,27]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqw %zmm3, %zmm1, %k1
@@ -528,7 +528,7 @@ define <32 x i16> @test_masked_32xi16_perm_mem_mask3(<32 x i16>* %vp, <32 x i16>
define <32 x i16> @test_masked_z_32xi16_perm_mem_mask3(<32 x i16>* %vp, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm1 = [2,2,27,1,7,1,0,27,10,5,4,20,30,16,28,16,18,21,25,24,31,23,28,6,17,19,26,15,25,12,18,27]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %zmm2, %zmm0, %k1
@@ -543,7 +543,7 @@ define <32 x i16> @test_masked_z_32xi16_perm_mem_mask3(<32 x i16>* %vp, <32 x i1
define <8 x i32> @test_8xi32_perm_mask0(<8 x i32> %vec) {
; CHECK-LABEL: test_8xi32_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} ymm1 = [4,2,0,6,7,2,3,6]
; CHECK-NEXT: vpermps %ymm0, %ymm1, %ymm0
; CHECK-NEXT: retq
@@ -552,7 +552,7 @@ define <8 x i32> @test_8xi32_perm_mask0(<8 x i32> %vec) {
}
define <8 x i32> @test_masked_8xi32_perm_mask0(<8 x i32> %vec, <8 x i32> %vec2, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_8xi32_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = [4,2,0,6,7,2,3,6]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpeqd %ymm4, %ymm2, %k1
@@ -567,7 +567,7 @@ define <8 x i32> @test_masked_8xi32_perm_mask0(<8 x i32> %vec, <8 x i32> %vec2,
define <8 x i32> @test_masked_z_8xi32_perm_mask0(<8 x i32> %vec, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_8xi32_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = [4,2,0,6,7,2,3,6]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %ymm3, %ymm1, %k1
@@ -580,7 +580,7 @@ define <8 x i32> @test_masked_z_8xi32_perm_mask0(<8 x i32> %vec, <8 x i32> %mask
}
define <8 x i32> @test_masked_8xi32_perm_mask1(<8 x i32> %vec, <8 x i32> %vec2, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_8xi32_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = [0,5,1,2,6,0,0,3]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpeqd %ymm4, %ymm2, %k1
@@ -595,7 +595,7 @@ define <8 x i32> @test_masked_8xi32_perm_mask1(<8 x i32> %vec, <8 x i32> %vec2,
define <8 x i32> @test_masked_z_8xi32_perm_mask1(<8 x i32> %vec, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_8xi32_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = [0,5,1,2,6,0,0,3]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %ymm3, %ymm1, %k1
@@ -608,7 +608,7 @@ define <8 x i32> @test_masked_z_8xi32_perm_mask1(<8 x i32> %vec, <8 x i32> %mask
}
define <8 x i32> @test_masked_8xi32_perm_mask2(<8 x i32> %vec, <8 x i32> %vec2, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_8xi32_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = [3,6,5,5,1,7,3,4]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpeqd %ymm4, %ymm2, %k1
@@ -623,7 +623,7 @@ define <8 x i32> @test_masked_8xi32_perm_mask2(<8 x i32> %vec, <8 x i32> %vec2,
define <8 x i32> @test_masked_z_8xi32_perm_mask2(<8 x i32> %vec, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_8xi32_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = [3,6,5,5,1,7,3,4]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %ymm3, %ymm1, %k1
@@ -636,7 +636,7 @@ define <8 x i32> @test_masked_z_8xi32_perm_mask2(<8 x i32> %vec, <8 x i32> %mask
}
define <8 x i32> @test_8xi32_perm_mask3(<8 x i32> %vec) {
; CHECK-LABEL: test_8xi32_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} ymm1 = [3,0,3,1,0,4,5,0]
; CHECK-NEXT: vpermps %ymm0, %ymm1, %ymm0
; CHECK-NEXT: retq
@@ -645,7 +645,7 @@ define <8 x i32> @test_8xi32_perm_mask3(<8 x i32> %vec) {
}
define <8 x i32> @test_masked_8xi32_perm_mask3(<8 x i32> %vec, <8 x i32> %vec2, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_8xi32_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = [3,0,3,1,0,4,5,0]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpeqd %ymm4, %ymm2, %k1
@@ -660,7 +660,7 @@ define <8 x i32> @test_masked_8xi32_perm_mask3(<8 x i32> %vec, <8 x i32> %vec2,
define <8 x i32> @test_masked_z_8xi32_perm_mask3(<8 x i32> %vec, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_8xi32_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = [3,0,3,1,0,4,5,0]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %ymm3, %ymm1, %k1
@@ -673,7 +673,7 @@ define <8 x i32> @test_masked_z_8xi32_perm_mask3(<8 x i32> %vec, <8 x i32> %mask
}
define <8 x i32> @test_8xi32_perm_mem_mask0(<8 x i32>* %vp) {
; CHECK-LABEL: test_8xi32_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} ymm0 = [3,7,4,3,5,2,0,5]
; CHECK-NEXT: vpermps (%rdi), %ymm0, %ymm0
; CHECK-NEXT: retq
@@ -683,7 +683,7 @@ define <8 x i32> @test_8xi32_perm_mem_mask0(<8 x i32>* %vp) {
}
define <8 x i32> @test_masked_8xi32_perm_mem_mask0(<8 x i32>* %vp, <8 x i32> %vec2, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_8xi32_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = [3,7,4,3,5,2,0,5]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %ymm3, %ymm1, %k1
@@ -698,7 +698,7 @@ define <8 x i32> @test_masked_8xi32_perm_mem_mask0(<8 x i32>* %vp, <8 x i32> %ve
define <8 x i32> @test_masked_z_8xi32_perm_mem_mask0(<8 x i32>* %vp, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_8xi32_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [3,7,4,3,5,2,0,5]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %ymm2, %ymm0, %k1
@@ -713,7 +713,7 @@ define <8 x i32> @test_masked_z_8xi32_perm_mem_mask0(<8 x i32>* %vp, <8 x i32> %
define <8 x i32> @test_masked_8xi32_perm_mem_mask1(<8 x i32>* %vp, <8 x i32> %vec2, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_8xi32_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = [4,6,1,7,6,7,6,5]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %ymm3, %ymm1, %k1
@@ -728,7 +728,7 @@ define <8 x i32> @test_masked_8xi32_perm_mem_mask1(<8 x i32>* %vp, <8 x i32> %ve
define <8 x i32> @test_masked_z_8xi32_perm_mem_mask1(<8 x i32>* %vp, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_8xi32_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [4,6,1,7,6,7,6,5]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %ymm2, %ymm0, %k1
@@ -743,7 +743,7 @@ define <8 x i32> @test_masked_z_8xi32_perm_mem_mask1(<8 x i32>* %vp, <8 x i32> %
define <8 x i32> @test_masked_8xi32_perm_mem_mask2(<8 x i32>* %vp, <8 x i32> %vec2, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_8xi32_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = [6,4,6,1,6,3,6,3]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %ymm3, %ymm1, %k1
@@ -758,7 +758,7 @@ define <8 x i32> @test_masked_8xi32_perm_mem_mask2(<8 x i32>* %vp, <8 x i32> %ve
define <8 x i32> @test_masked_z_8xi32_perm_mem_mask2(<8 x i32>* %vp, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_8xi32_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [6,4,6,1,6,3,6,3]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %ymm2, %ymm0, %k1
@@ -773,7 +773,7 @@ define <8 x i32> @test_masked_z_8xi32_perm_mem_mask2(<8 x i32>* %vp, <8 x i32> %
define <8 x i32> @test_8xi32_perm_mem_mask3(<8 x i32>* %vp) {
; CHECK-LABEL: test_8xi32_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} ymm0 = [6,0,0,7,3,7,7,5]
; CHECK-NEXT: vpermps (%rdi), %ymm0, %ymm0
; CHECK-NEXT: retq
@@ -783,7 +783,7 @@ define <8 x i32> @test_8xi32_perm_mem_mask3(<8 x i32>* %vp) {
}
define <8 x i32> @test_masked_8xi32_perm_mem_mask3(<8 x i32>* %vp, <8 x i32> %vec2, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_8xi32_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = [6,0,0,7,3,7,7,5]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %ymm3, %ymm1, %k1
@@ -798,7 +798,7 @@ define <8 x i32> @test_masked_8xi32_perm_mem_mask3(<8 x i32>* %vp, <8 x i32> %ve
define <8 x i32> @test_masked_z_8xi32_perm_mem_mask3(<8 x i32>* %vp, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_8xi32_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [6,0,0,7,3,7,7,5]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %ymm2, %ymm0, %k1
@@ -813,7 +813,7 @@ define <8 x i32> @test_masked_z_8xi32_perm_mem_mask3(<8 x i32>* %vp, <8 x i32> %
define <16 x i32> @test_16xi32_perm_mask0(<16 x i32> %vec) {
; CHECK-LABEL: test_16xi32_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} zmm1 = [14,12,11,6,4,1,6,9,14,14,6,1,12,11,0,7]
; CHECK-NEXT: vpermps %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
@@ -822,7 +822,7 @@ define <16 x i32> @test_16xi32_perm_mask0(<16 x i32> %vec) {
}
define <16 x i32> @test_masked_16xi32_perm_mask0(<16 x i32> %vec, <16 x i32> %vec2, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_16xi32_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm3 = [14,12,11,6,4,1,6,9,14,14,6,1,12,11,0,7]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpeqd %zmm4, %zmm2, %k1
@@ -837,7 +837,7 @@ define <16 x i32> @test_masked_16xi32_perm_mask0(<16 x i32> %vec, <16 x i32> %ve
define <16 x i32> @test_masked_z_16xi32_perm_mask0(<16 x i32> %vec, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_z_16xi32_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm2 = [14,12,11,6,4,1,6,9,14,14,6,1,12,11,0,7]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %zmm3, %zmm1, %k1
@@ -850,7 +850,7 @@ define <16 x i32> @test_masked_z_16xi32_perm_mask0(<16 x i32> %vec, <16 x i32> %
}
define <16 x i32> @test_masked_16xi32_perm_mask1(<16 x i32> %vec, <16 x i32> %vec2, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_16xi32_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm3 = [10,0,14,15,11,1,1,5,0,5,0,15,13,1,14,3]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpeqd %zmm4, %zmm2, %k1
@@ -865,7 +865,7 @@ define <16 x i32> @test_masked_16xi32_perm_mask1(<16 x i32> %vec, <16 x i32> %ve
define <16 x i32> @test_masked_z_16xi32_perm_mask1(<16 x i32> %vec, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_z_16xi32_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm2 = [10,0,14,15,11,1,1,5,0,5,0,15,13,1,14,3]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %zmm3, %zmm1, %k1
@@ -878,7 +878,7 @@ define <16 x i32> @test_masked_z_16xi32_perm_mask1(<16 x i32> %vec, <16 x i32> %
}
define <16 x i32> @test_masked_16xi32_perm_mask2(<16 x i32> %vec, <16 x i32> %vec2, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_16xi32_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm3 = [3,10,15,1,0,5,0,9,13,2,1,5,15,2,15,5]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpeqd %zmm4, %zmm2, %k1
@@ -893,7 +893,7 @@ define <16 x i32> @test_masked_16xi32_perm_mask2(<16 x i32> %vec, <16 x i32> %ve
define <16 x i32> @test_masked_z_16xi32_perm_mask2(<16 x i32> %vec, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_z_16xi32_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm2 = [3,10,15,1,0,5,0,9,13,2,1,5,15,2,15,5]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %zmm3, %zmm1, %k1
@@ -906,7 +906,7 @@ define <16 x i32> @test_masked_z_16xi32_perm_mask2(<16 x i32> %vec, <16 x i32> %
}
define <16 x i32> @test_16xi32_perm_mask3(<16 x i32> %vec) {
; CHECK-LABEL: test_16xi32_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} zmm1 = [7,4,14,15,10,2,15,1,9,2,14,15,12,5,3,12]
; CHECK-NEXT: vpermps %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
@@ -915,7 +915,7 @@ define <16 x i32> @test_16xi32_perm_mask3(<16 x i32> %vec) {
}
define <16 x i32> @test_masked_16xi32_perm_mask3(<16 x i32> %vec, <16 x i32> %vec2, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_16xi32_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm3 = [7,4,14,15,10,2,15,1,9,2,14,15,12,5,3,12]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpeqd %zmm4, %zmm2, %k1
@@ -930,7 +930,7 @@ define <16 x i32> @test_masked_16xi32_perm_mask3(<16 x i32> %vec, <16 x i32> %ve
define <16 x i32> @test_masked_z_16xi32_perm_mask3(<16 x i32> %vec, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_z_16xi32_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm2 = [7,4,14,15,10,2,15,1,9,2,14,15,12,5,3,12]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %zmm3, %zmm1, %k1
@@ -943,7 +943,7 @@ define <16 x i32> @test_masked_z_16xi32_perm_mask3(<16 x i32> %vec, <16 x i32> %
}
define <16 x i32> @test_16xi32_perm_mem_mask0(<16 x i32>* %vp) {
; CHECK-LABEL: test_16xi32_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} zmm0 = [0,1,1,6,8,11,2,6,10,1,7,5,15,0,6,6]
; CHECK-NEXT: vpermps (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
@@ -953,7 +953,7 @@ define <16 x i32> @test_16xi32_perm_mem_mask0(<16 x i32>* %vp) {
}
define <16 x i32> @test_masked_16xi32_perm_mem_mask0(<16 x i32>* %vp, <16 x i32> %vec2, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_16xi32_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,1,6,8,11,2,6,10,1,7,5,15,0,6,6]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %zmm3, %zmm1, %k1
@@ -968,7 +968,7 @@ define <16 x i32> @test_masked_16xi32_perm_mem_mask0(<16 x i32>* %vp, <16 x i32>
define <16 x i32> @test_masked_z_16xi32_perm_mem_mask0(<16 x i32>* %vp, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_z_16xi32_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm1 = [0,1,1,6,8,11,2,6,10,1,7,5,15,0,6,6]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %zmm2, %zmm0, %k1
@@ -983,7 +983,7 @@ define <16 x i32> @test_masked_z_16xi32_perm_mem_mask0(<16 x i32>* %vp, <16 x i3
define <16 x i32> @test_masked_16xi32_perm_mem_mask1(<16 x i32>* %vp, <16 x i32> %vec2, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_16xi32_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm2 = [11,5,3,4,7,15,12,4,8,11,12,7,6,12,6,3]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %zmm3, %zmm1, %k1
@@ -998,7 +998,7 @@ define <16 x i32> @test_masked_16xi32_perm_mem_mask1(<16 x i32>* %vp, <16 x i32>
define <16 x i32> @test_masked_z_16xi32_perm_mem_mask1(<16 x i32>* %vp, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_z_16xi32_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm1 = [11,5,3,4,7,15,12,4,8,11,12,7,6,12,6,3]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %zmm2, %zmm0, %k1
@@ -1013,7 +1013,7 @@ define <16 x i32> @test_masked_z_16xi32_perm_mem_mask1(<16 x i32>* %vp, <16 x i3
define <16 x i32> @test_masked_16xi32_perm_mem_mask2(<16 x i32>* %vp, <16 x i32> %vec2, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_16xi32_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm2 = [7,14,2,7,10,7,3,0,11,9,0,4,12,10,8,2]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %zmm3, %zmm1, %k1
@@ -1028,7 +1028,7 @@ define <16 x i32> @test_masked_16xi32_perm_mem_mask2(<16 x i32>* %vp, <16 x i32>
define <16 x i32> @test_masked_z_16xi32_perm_mem_mask2(<16 x i32>* %vp, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_z_16xi32_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm1 = [7,14,2,7,10,7,3,0,11,9,0,4,12,10,8,2]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %zmm2, %zmm0, %k1
@@ -1043,7 +1043,7 @@ define <16 x i32> @test_masked_z_16xi32_perm_mem_mask2(<16 x i32>* %vp, <16 x i3
define <16 x i32> @test_16xi32_perm_mem_mask3(<16 x i32>* %vp) {
; CHECK-LABEL: test_16xi32_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} zmm0 = [11,7,10,12,3,12,4,15,1,14,0,4,8,9,6,1]
; CHECK-NEXT: vpermps (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
@@ -1053,7 +1053,7 @@ define <16 x i32> @test_16xi32_perm_mem_mask3(<16 x i32>* %vp) {
}
define <16 x i32> @test_masked_16xi32_perm_mem_mask3(<16 x i32>* %vp, <16 x i32> %vec2, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_16xi32_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm2 = [11,7,10,12,3,12,4,15,1,14,0,4,8,9,6,1]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %zmm3, %zmm1, %k1
@@ -1068,7 +1068,7 @@ define <16 x i32> @test_masked_16xi32_perm_mem_mask3(<16 x i32>* %vp, <16 x i32>
define <16 x i32> @test_masked_z_16xi32_perm_mem_mask3(<16 x i32>* %vp, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_z_16xi32_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm1 = [11,7,10,12,3,12,4,15,1,14,0,4,8,9,6,1]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %zmm2, %zmm0, %k1
@@ -1083,7 +1083,7 @@ define <16 x i32> @test_masked_z_16xi32_perm_mem_mask3(<16 x i32>* %vp, <16 x i3
define <4 x i64> @test_4xi64_perm_mask0(<4 x i64> %vec) {
; CHECK-LABEL: test_4xi64_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,0,3,1]
; CHECK-NEXT: retq
%res = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 2, i32 0, i32 3, i32 1>
@@ -1091,7 +1091,7 @@ define <4 x i64> @test_4xi64_perm_mask0(<4 x i64> %vec) {
}
define <4 x i64> @test_masked_4xi64_perm_mask0(<4 x i64> %vec, <4 x i64> %vec2, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_4xi64_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; CHECK-NEXT: vpermq {{.*#+}} ymm1 {%k1} = ymm0[2,0,3,1]
@@ -1105,7 +1105,7 @@ define <4 x i64> @test_masked_4xi64_perm_mask0(<4 x i64> %vec, <4 x i64> %vec2,
define <4 x i64> @test_masked_z_4xi64_perm_mask0(<4 x i64> %vec, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_z_4xi64_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; CHECK-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = ymm0[2,0,3,1]
@@ -1117,7 +1117,7 @@ define <4 x i64> @test_masked_z_4xi64_perm_mask0(<4 x i64> %vec, <4 x i64> %mask
}
define <4 x i64> @test_masked_4xi64_perm_mask1(<4 x i64> %vec, <4 x i64> %vec2, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_4xi64_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; CHECK-NEXT: vpermq {{.*#+}} ymm1 {%k1} = ymm0[1,2,0,3]
@@ -1131,7 +1131,7 @@ define <4 x i64> @test_masked_4xi64_perm_mask1(<4 x i64> %vec, <4 x i64> %vec2,
define <4 x i64> @test_masked_z_4xi64_perm_mask1(<4 x i64> %vec, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_z_4xi64_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; CHECK-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = ymm0[1,2,0,3]
@@ -1143,7 +1143,7 @@ define <4 x i64> @test_masked_z_4xi64_perm_mask1(<4 x i64> %vec, <4 x i64> %mask
}
define <4 x i64> @test_masked_4xi64_perm_mask2(<4 x i64> %vec, <4 x i64> %vec2, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_4xi64_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; CHECK-NEXT: vpermq {{.*#+}} ymm1 {%k1} = ymm0[2,2,2,1]
@@ -1157,7 +1157,7 @@ define <4 x i64> @test_masked_4xi64_perm_mask2(<4 x i64> %vec, <4 x i64> %vec2,
define <4 x i64> @test_masked_z_4xi64_perm_mask2(<4 x i64> %vec, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_z_4xi64_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; CHECK-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = ymm0[2,2,2,1]
@@ -1169,7 +1169,7 @@ define <4 x i64> @test_masked_z_4xi64_perm_mask2(<4 x i64> %vec, <4 x i64> %mask
}
define <4 x i64> @test_4xi64_perm_mask3(<4 x i64> %vec) {
; CHECK-LABEL: test_4xi64_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,3,3]
; CHECK-NEXT: retq
%res = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 2, i32 1, i32 3, i32 3>
@@ -1177,7 +1177,7 @@ define <4 x i64> @test_4xi64_perm_mask3(<4 x i64> %vec) {
}
define <4 x i64> @test_masked_4xi64_perm_mask3(<4 x i64> %vec, <4 x i64> %vec2, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_4xi64_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; CHECK-NEXT: vpermq {{.*#+}} ymm1 {%k1} = ymm0[2,1,3,3]
@@ -1191,7 +1191,7 @@ define <4 x i64> @test_masked_4xi64_perm_mask3(<4 x i64> %vec, <4 x i64> %vec2,
define <4 x i64> @test_masked_z_4xi64_perm_mask3(<4 x i64> %vec, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_z_4xi64_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; CHECK-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = ymm0[2,1,3,3]
@@ -1203,7 +1203,7 @@ define <4 x i64> @test_masked_z_4xi64_perm_mask3(<4 x i64> %vec, <4 x i64> %mask
}
define <4 x i64> @test_4xi64_perm_mem_mask0(<4 x i64>* %vp) {
; CHECK-LABEL: test_4xi64_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = mem[2,1,2,0]
; CHECK-NEXT: retq
%vec = load <4 x i64>, <4 x i64>* %vp
@@ -1212,7 +1212,7 @@ define <4 x i64> @test_4xi64_perm_mem_mask0(<4 x i64>* %vp) {
}
define <4 x i64> @test_masked_4xi64_perm_mem_mask0(<4 x i64>* %vp, <4 x i64> %vec2, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_4xi64_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; CHECK-NEXT: vpermq {{.*#+}} ymm0 {%k1} = mem[2,1,2,0]
@@ -1226,7 +1226,7 @@ define <4 x i64> @test_masked_4xi64_perm_mem_mask0(<4 x i64>* %vp, <4 x i64> %ve
define <4 x i64> @test_masked_z_4xi64_perm_mem_mask0(<4 x i64>* %vp, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_z_4xi64_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k1
; CHECK-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = mem[2,1,2,0]
@@ -1240,7 +1240,7 @@ define <4 x i64> @test_masked_z_4xi64_perm_mem_mask0(<4 x i64>* %vp, <4 x i64> %
define <4 x i64> @test_masked_4xi64_perm_mem_mask1(<4 x i64>* %vp, <4 x i64> %vec2, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_4xi64_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; CHECK-NEXT: vpermq {{.*#+}} ymm0 {%k1} = mem[2,1,1,1]
@@ -1254,7 +1254,7 @@ define <4 x i64> @test_masked_4xi64_perm_mem_mask1(<4 x i64>* %vp, <4 x i64> %ve
define <4 x i64> @test_masked_z_4xi64_perm_mem_mask1(<4 x i64>* %vp, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_z_4xi64_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k1
; CHECK-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = mem[2,1,1,1]
@@ -1268,7 +1268,7 @@ define <4 x i64> @test_masked_z_4xi64_perm_mem_mask1(<4 x i64>* %vp, <4 x i64> %
define <4 x i64> @test_masked_4xi64_perm_mem_mask2(<4 x i64>* %vp, <4 x i64> %vec2, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_4xi64_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; CHECK-NEXT: vpermq {{.*#+}} ymm0 {%k1} = mem[0,1,2,0]
@@ -1282,7 +1282,7 @@ define <4 x i64> @test_masked_4xi64_perm_mem_mask2(<4 x i64>* %vp, <4 x i64> %ve
define <4 x i64> @test_masked_z_4xi64_perm_mem_mask2(<4 x i64>* %vp, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_z_4xi64_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k1
; CHECK-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = mem[0,1,2,0]
@@ -1296,7 +1296,7 @@ define <4 x i64> @test_masked_z_4xi64_perm_mem_mask2(<4 x i64>* %vp, <4 x i64> %
define <4 x i64> @test_4xi64_perm_mem_mask3(<4 x i64>* %vp) {
; CHECK-LABEL: test_4xi64_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = mem[2,0,1,3]
; CHECK-NEXT: retq
%vec = load <4 x i64>, <4 x i64>* %vp
@@ -1305,7 +1305,7 @@ define <4 x i64> @test_4xi64_perm_mem_mask3(<4 x i64>* %vp) {
}
define <4 x i64> @test_masked_4xi64_perm_mem_mask3(<4 x i64>* %vp, <4 x i64> %vec2, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_4xi64_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; CHECK-NEXT: vpermq {{.*#+}} ymm0 {%k1} = mem[2,0,1,3]
@@ -1319,7 +1319,7 @@ define <4 x i64> @test_masked_4xi64_perm_mem_mask3(<4 x i64>* %vp, <4 x i64> %ve
define <4 x i64> @test_masked_z_4xi64_perm_mem_mask3(<4 x i64>* %vp, <4 x i64> %mask) {
; CHECK-LABEL: test_masked_z_4xi64_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k1
; CHECK-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = mem[2,0,1,3]
@@ -1333,7 +1333,7 @@ define <4 x i64> @test_masked_z_4xi64_perm_mem_mask3(<4 x i64>* %vp, <4 x i64> %
define <8 x i64> @test_8xi64_perm_mask0(<8 x i64> %vec) {
; CHECK-LABEL: test_8xi64_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} zmm1 = [0,4,7,6,5,5,1,6]
; CHECK-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
@@ -1342,7 +1342,7 @@ define <8 x i64> @test_8xi64_perm_mask0(<8 x i64> %vec) {
}
define <8 x i64> @test_masked_8xi64_perm_mask0(<8 x i64> %vec, <8 x i64> %vec2, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_8xi64_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,4,7,6,5,5,1,6]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpeqq %zmm4, %zmm2, %k1
@@ -1357,7 +1357,7 @@ define <8 x i64> @test_masked_8xi64_perm_mask0(<8 x i64> %vec, <8 x i64> %vec2,
define <8 x i64> @test_masked_z_8xi64_perm_mask0(<8 x i64> %vec, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_z_8xi64_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,4,7,6,5,5,1,6]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
@@ -1370,7 +1370,7 @@ define <8 x i64> @test_masked_z_8xi64_perm_mask0(<8 x i64> %vec, <8 x i64> %mask
}
define <8 x i64> @test_masked_8xi64_perm_imm_mask1(<8 x i64> %vec, <8 x i64> %vec2, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_8xi64_perm_imm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; CHECK-NEXT: vpermq {{.*#+}} zmm1 {%k1} = zmm0[1,0,1,1,5,4,5,5]
@@ -1384,7 +1384,7 @@ define <8 x i64> @test_masked_8xi64_perm_imm_mask1(<8 x i64> %vec, <8 x i64> %ve
define <8 x i64> @test_masked_z_8xi64_perm_imm_mask1(<8 x i64> %vec, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_z_8xi64_perm_imm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; CHECK-NEXT: vpermq {{.*#+}} zmm0 {%k1} {z} = zmm0[1,0,1,1,5,4,5,5]
@@ -1396,7 +1396,7 @@ define <8 x i64> @test_masked_z_8xi64_perm_imm_mask1(<8 x i64> %vec, <8 x i64> %
}
define <8 x i64> @test_masked_8xi64_perm_mask2(<8 x i64> %vec, <8 x i64> %vec2, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_8xi64_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm3 = [1,3,7,3,3,5,4,1]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpeqq %zmm4, %zmm2, %k1
@@ -1411,7 +1411,7 @@ define <8 x i64> @test_masked_8xi64_perm_mask2(<8 x i64> %vec, <8 x i64> %vec2,
define <8 x i64> @test_masked_z_8xi64_perm_mask2(<8 x i64> %vec, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_z_8xi64_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm2 = [1,3,7,3,3,5,4,1]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
@@ -1424,7 +1424,7 @@ define <8 x i64> @test_masked_z_8xi64_perm_mask2(<8 x i64> %vec, <8 x i64> %mask
}
define <8 x i64> @test_8xi64_perm_imm_mask3(<8 x i64> %vec) {
; CHECK-LABEL: test_8xi64_perm_imm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[3,1,3,1,7,5,7,5]
; CHECK-NEXT: retq
%res = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 3, i32 1, i32 3, i32 1, i32 7, i32 5, i32 7, i32 5>
@@ -1432,7 +1432,7 @@ define <8 x i64> @test_8xi64_perm_imm_mask3(<8 x i64> %vec) {
}
define <8 x i64> @test_masked_8xi64_perm_imm_mask3(<8 x i64> %vec, <8 x i64> %vec2, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_8xi64_perm_imm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; CHECK-NEXT: vpermq {{.*#+}} zmm1 {%k1} = zmm0[3,1,3,1,7,5,7,5]
@@ -1446,7 +1446,7 @@ define <8 x i64> @test_masked_8xi64_perm_imm_mask3(<8 x i64> %vec, <8 x i64> %ve
define <8 x i64> @test_masked_z_8xi64_perm_imm_mask3(<8 x i64> %vec, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_z_8xi64_perm_imm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; CHECK-NEXT: vpermq {{.*#+}} zmm0 {%k1} {z} = zmm0[3,1,3,1,7,5,7,5]
@@ -1458,7 +1458,7 @@ define <8 x i64> @test_masked_z_8xi64_perm_imm_mask3(<8 x i64> %vec, <8 x i64> %
}
define <8 x i64> @test_masked_8xi64_perm_mask4(<8 x i64> %vec, <8 x i64> %vec2, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_8xi64_perm_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm3 = [6,3,1,1,7,4,0,3]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpeqq %zmm4, %zmm2, %k1
@@ -1473,7 +1473,7 @@ define <8 x i64> @test_masked_8xi64_perm_mask4(<8 x i64> %vec, <8 x i64> %vec2,
define <8 x i64> @test_masked_z_8xi64_perm_mask4(<8 x i64> %vec, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_z_8xi64_perm_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm2 = [6,3,1,1,7,4,0,3]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
@@ -1486,7 +1486,7 @@ define <8 x i64> @test_masked_z_8xi64_perm_mask4(<8 x i64> %vec, <8 x i64> %mask
}
define <8 x i64> @test_masked_8xi64_perm_imm_mask5(<8 x i64> %vec, <8 x i64> %vec2, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_8xi64_perm_imm_mask5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; CHECK-NEXT: vpermq {{.*#+}} zmm1 {%k1} = zmm0[0,0,0,0,4,4,4,4]
@@ -1500,7 +1500,7 @@ define <8 x i64> @test_masked_8xi64_perm_imm_mask5(<8 x i64> %vec, <8 x i64> %ve
define <8 x i64> @test_masked_z_8xi64_perm_imm_mask5(<8 x i64> %vec, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_z_8xi64_perm_imm_mask5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; CHECK-NEXT: vpermq {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,0,0,4,4,4,4]
@@ -1512,7 +1512,7 @@ define <8 x i64> @test_masked_z_8xi64_perm_imm_mask5(<8 x i64> %vec, <8 x i64> %
}
define <8 x i64> @test_8xi64_perm_mask6(<8 x i64> %vec) {
; CHECK-LABEL: test_8xi64_perm_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} zmm1 = [5,1,4,4,5,4,2,7]
; CHECK-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
@@ -1521,7 +1521,7 @@ define <8 x i64> @test_8xi64_perm_mask6(<8 x i64> %vec) {
}
define <8 x i64> @test_masked_8xi64_perm_mask6(<8 x i64> %vec, <8 x i64> %vec2, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_8xi64_perm_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm3 = [5,1,4,4,5,4,2,7]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpeqq %zmm4, %zmm2, %k1
@@ -1536,7 +1536,7 @@ define <8 x i64> @test_masked_8xi64_perm_mask6(<8 x i64> %vec, <8 x i64> %vec2,
define <8 x i64> @test_masked_z_8xi64_perm_mask6(<8 x i64> %vec, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_z_8xi64_perm_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm2 = [5,1,4,4,5,4,2,7]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
@@ -1549,7 +1549,7 @@ define <8 x i64> @test_masked_z_8xi64_perm_mask6(<8 x i64> %vec, <8 x i64> %mask
}
define <8 x i64> @test_masked_8xi64_perm_imm_mask7(<8 x i64> %vec, <8 x i64> %vec2, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_8xi64_perm_imm_mask7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; CHECK-NEXT: vpermq {{.*#+}} zmm1 {%k1} = zmm0[3,3,3,3,7,7,7,7]
@@ -1563,7 +1563,7 @@ define <8 x i64> @test_masked_8xi64_perm_imm_mask7(<8 x i64> %vec, <8 x i64> %ve
define <8 x i64> @test_masked_z_8xi64_perm_imm_mask7(<8 x i64> %vec, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_z_8xi64_perm_imm_mask7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; CHECK-NEXT: vpermq {{.*#+}} zmm0 {%k1} {z} = zmm0[3,3,3,3,7,7,7,7]
@@ -1575,7 +1575,7 @@ define <8 x i64> @test_masked_z_8xi64_perm_imm_mask7(<8 x i64> %vec, <8 x i64> %
}
define <8 x i64> @test_8xi64_perm_mem_mask0(<8 x i64>* %vp) {
; CHECK-LABEL: test_8xi64_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} zmm0 = [5,1,6,5,7,3,7,3]
; CHECK-NEXT: vpermpd (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
@@ -1585,7 +1585,7 @@ define <8 x i64> @test_8xi64_perm_mem_mask0(<8 x i64>* %vp) {
}
define <8 x i64> @test_masked_8xi64_perm_mem_mask0(<8 x i64>* %vp, <8 x i64> %vec2, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_8xi64_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm2 = [5,1,6,5,7,3,7,3]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
@@ -1600,7 +1600,7 @@ define <8 x i64> @test_masked_8xi64_perm_mem_mask0(<8 x i64>* %vp, <8 x i64> %ve
define <8 x i64> @test_masked_z_8xi64_perm_mem_mask0(<8 x i64>* %vp, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_z_8xi64_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm1 = [5,1,6,5,7,3,7,3]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %zmm2, %zmm0, %k1
@@ -1615,7 +1615,7 @@ define <8 x i64> @test_masked_z_8xi64_perm_mem_mask0(<8 x i64>* %vp, <8 x i64> %
define <8 x i64> @test_masked_8xi64_perm_imm_mem_mask1(<8 x i64>* %vp, <8 x i64> %vec2, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_8xi64_perm_imm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; CHECK-NEXT: vpermq {{.*#+}} zmm0 {%k1} = mem[1,1,1,0,5,5,5,4]
@@ -1629,7 +1629,7 @@ define <8 x i64> @test_masked_8xi64_perm_imm_mem_mask1(<8 x i64>* %vp, <8 x i64>
define <8 x i64> @test_masked_z_8xi64_perm_imm_mem_mask1(<8 x i64>* %vp, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_z_8xi64_perm_imm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k1
; CHECK-NEXT: vpermq {{.*#+}} zmm0 {%k1} {z} = mem[1,1,1,0,5,5,5,4]
@@ -1643,7 +1643,7 @@ define <8 x i64> @test_masked_z_8xi64_perm_imm_mem_mask1(<8 x i64>* %vp, <8 x i6
define <8 x i64> @test_masked_8xi64_perm_mem_mask2(<8 x i64>* %vp, <8 x i64> %vec2, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_8xi64_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,2,1,4,1,1,5,5]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
@@ -1658,7 +1658,7 @@ define <8 x i64> @test_masked_8xi64_perm_mem_mask2(<8 x i64>* %vp, <8 x i64> %ve
define <8 x i64> @test_masked_z_8xi64_perm_mem_mask2(<8 x i64>* %vp, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_z_8xi64_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,2,1,4,1,1,5,5]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %zmm2, %zmm0, %k1
@@ -1673,7 +1673,7 @@ define <8 x i64> @test_masked_z_8xi64_perm_mem_mask2(<8 x i64>* %vp, <8 x i64> %
define <8 x i64> @test_8xi64_perm_imm_mem_mask3(<8 x i64>* %vp) {
; CHECK-LABEL: test_8xi64_perm_imm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermpd {{.*#+}} zmm0 = mem[1,3,1,1,5,7,5,5]
; CHECK-NEXT: retq
%vec = load <8 x i64>, <8 x i64>* %vp
@@ -1682,7 +1682,7 @@ define <8 x i64> @test_8xi64_perm_imm_mem_mask3(<8 x i64>* %vp) {
}
define <8 x i64> @test_masked_8xi64_perm_imm_mem_mask3(<8 x i64>* %vp, <8 x i64> %vec2, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_8xi64_perm_imm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; CHECK-NEXT: vpermq {{.*#+}} zmm0 {%k1} = mem[1,3,1,1,5,7,5,5]
@@ -1696,7 +1696,7 @@ define <8 x i64> @test_masked_8xi64_perm_imm_mem_mask3(<8 x i64>* %vp, <8 x i64>
define <8 x i64> @test_masked_z_8xi64_perm_imm_mem_mask3(<8 x i64>* %vp, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_z_8xi64_perm_imm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k1
; CHECK-NEXT: vpermq {{.*#+}} zmm0 {%k1} {z} = mem[1,3,1,1,5,7,5,5]
@@ -1710,7 +1710,7 @@ define <8 x i64> @test_masked_z_8xi64_perm_imm_mem_mask3(<8 x i64>* %vp, <8 x i6
define <8 x i64> @test_masked_8xi64_perm_mem_mask4(<8 x i64>* %vp, <8 x i64> %vec2, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_8xi64_perm_mem_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm2 = [5,0,7,0,3,5,0,6]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
@@ -1725,7 +1725,7 @@ define <8 x i64> @test_masked_8xi64_perm_mem_mask4(<8 x i64>* %vp, <8 x i64> %ve
define <8 x i64> @test_masked_z_8xi64_perm_mem_mask4(<8 x i64>* %vp, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_z_8xi64_perm_mem_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm1 = [5,0,7,0,3,5,0,6]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %zmm2, %zmm0, %k1
@@ -1740,7 +1740,7 @@ define <8 x i64> @test_masked_z_8xi64_perm_mem_mask4(<8 x i64>* %vp, <8 x i64> %
define <8 x i64> @test_masked_8xi64_perm_imm_mem_mask5(<8 x i64>* %vp, <8 x i64> %vec2, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_8xi64_perm_imm_mem_mask5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; CHECK-NEXT: vpermq {{.*#+}} zmm0 {%k1} = mem[3,1,0,0,7,5,4,4]
@@ -1754,7 +1754,7 @@ define <8 x i64> @test_masked_8xi64_perm_imm_mem_mask5(<8 x i64>* %vp, <8 x i64>
define <8 x i64> @test_masked_z_8xi64_perm_imm_mem_mask5(<8 x i64>* %vp, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_z_8xi64_perm_imm_mem_mask5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k1
; CHECK-NEXT: vpermq {{.*#+}} zmm0 {%k1} {z} = mem[3,1,0,0,7,5,4,4]
@@ -1768,7 +1768,7 @@ define <8 x i64> @test_masked_z_8xi64_perm_imm_mem_mask5(<8 x i64>* %vp, <8 x i6
define <8 x i64> @test_8xi64_perm_mem_mask6(<8 x i64>* %vp) {
; CHECK-LABEL: test_8xi64_perm_mem_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} zmm0 = [0,6,3,7,3,0,3,6]
; CHECK-NEXT: vpermpd (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
@@ -1778,7 +1778,7 @@ define <8 x i64> @test_8xi64_perm_mem_mask6(<8 x i64>* %vp) {
}
define <8 x i64> @test_masked_8xi64_perm_mem_mask6(<8 x i64>* %vp, <8 x i64> %vec2, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_8xi64_perm_mem_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,6,3,7,3,0,3,6]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
@@ -1793,7 +1793,7 @@ define <8 x i64> @test_masked_8xi64_perm_mem_mask6(<8 x i64>* %vp, <8 x i64> %ve
define <8 x i64> @test_masked_z_8xi64_perm_mem_mask6(<8 x i64>* %vp, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_z_8xi64_perm_mem_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,6,3,7,3,0,3,6]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %zmm2, %zmm0, %k1
@@ -1808,7 +1808,7 @@ define <8 x i64> @test_masked_z_8xi64_perm_mem_mask6(<8 x i64>* %vp, <8 x i64> %
define <8 x i64> @test_masked_8xi64_perm_imm_mem_mask7(<8 x i64>* %vp, <8 x i64> %vec2, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_8xi64_perm_imm_mem_mask7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; CHECK-NEXT: vpermq {{.*#+}} zmm0 {%k1} = mem[3,0,0,1,7,4,4,5]
@@ -1822,7 +1822,7 @@ define <8 x i64> @test_masked_8xi64_perm_imm_mem_mask7(<8 x i64>* %vp, <8 x i64>
define <8 x i64> @test_masked_z_8xi64_perm_imm_mem_mask7(<8 x i64>* %vp, <8 x i64> %mask) {
; CHECK-LABEL: test_masked_z_8xi64_perm_imm_mem_mask7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k1
; CHECK-NEXT: vpermq {{.*#+}} zmm0 {%k1} {z} = mem[3,0,0,1,7,4,4,5]
@@ -1836,7 +1836,7 @@ define <8 x i64> @test_masked_z_8xi64_perm_imm_mem_mask7(<8 x i64>* %vp, <8 x i6
define <8 x float> @test_8xfloat_perm_mask0(<8 x float> %vec) {
; CHECK-LABEL: test_8xfloat_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} ymm1 = [3,4,2,4,1,2,3,4]
; CHECK-NEXT: vpermps %ymm0, %ymm1, %ymm0
; CHECK-NEXT: retq
@@ -1845,7 +1845,7 @@ define <8 x float> @test_8xfloat_perm_mask0(<8 x float> %vec) {
}
define <8 x float> @test_masked_8xfloat_perm_mask0(<8 x float> %vec, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} ymm3 = [3,4,2,4,1,2,3,4]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %ymm4, %ymm2, %k1
@@ -1860,7 +1860,7 @@ define <8 x float> @test_masked_8xfloat_perm_mask0(<8 x float> %vec, <8 x float>
define <8 x float> @test_masked_z_8xfloat_perm_mask0(<8 x float> %vec, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} ymm2 = [3,4,2,4,1,2,3,4]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm1, %k1
@@ -1873,7 +1873,7 @@ define <8 x float> @test_masked_z_8xfloat_perm_mask0(<8 x float> %vec, <8 x floa
}
define <8 x float> @test_masked_8xfloat_perm_mask1(<8 x float> %vec, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} ymm3 = [4,2,1,0,6,0,5,1]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %ymm4, %ymm2, %k1
@@ -1888,7 +1888,7 @@ define <8 x float> @test_masked_8xfloat_perm_mask1(<8 x float> %vec, <8 x float>
define <8 x float> @test_masked_z_8xfloat_perm_mask1(<8 x float> %vec, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} ymm2 = [4,2,1,0,6,0,5,1]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm1, %k1
@@ -1901,7 +1901,7 @@ define <8 x float> @test_masked_z_8xfloat_perm_mask1(<8 x float> %vec, <8 x floa
}
define <8 x float> @test_masked_8xfloat_perm_mask2(<8 x float> %vec, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} ymm3 = [2,5,5,5,4,6,0,5]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %ymm4, %ymm2, %k1
@@ -1916,7 +1916,7 @@ define <8 x float> @test_masked_8xfloat_perm_mask2(<8 x float> %vec, <8 x float>
define <8 x float> @test_masked_z_8xfloat_perm_mask2(<8 x float> %vec, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} ymm2 = [2,5,5,5,4,6,0,5]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm1, %k1
@@ -1929,7 +1929,7 @@ define <8 x float> @test_masked_z_8xfloat_perm_mask2(<8 x float> %vec, <8 x floa
}
define <8 x float> @test_8xfloat_perm_mask3(<8 x float> %vec) {
; CHECK-LABEL: test_8xfloat_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} ymm1 = [0,5,2,5,5,5,1,6]
; CHECK-NEXT: vpermps %ymm0, %ymm1, %ymm0
; CHECK-NEXT: retq
@@ -1938,7 +1938,7 @@ define <8 x float> @test_8xfloat_perm_mask3(<8 x float> %vec) {
}
define <8 x float> @test_masked_8xfloat_perm_mask3(<8 x float> %vec, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} ymm3 = [0,5,2,5,5,5,1,6]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %ymm4, %ymm2, %k1
@@ -1953,7 +1953,7 @@ define <8 x float> @test_masked_8xfloat_perm_mask3(<8 x float> %vec, <8 x float>
define <8 x float> @test_masked_z_8xfloat_perm_mask3(<8 x float> %vec, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} ymm2 = [0,5,2,5,5,5,1,6]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm1, %k1
@@ -1966,7 +1966,7 @@ define <8 x float> @test_masked_z_8xfloat_perm_mask3(<8 x float> %vec, <8 x floa
}
define <8 x float> @test_8xfloat_perm_mem_mask0(<8 x float>* %vp) {
; CHECK-LABEL: test_8xfloat_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} ymm0 = [5,2,1,6,4,2,4,0]
; CHECK-NEXT: vpermps (%rdi), %ymm0, %ymm0
; CHECK-NEXT: retq
@@ -1976,7 +1976,7 @@ define <8 x float> @test_8xfloat_perm_mem_mask0(<8 x float>* %vp) {
}
define <8 x float> @test_masked_8xfloat_perm_mem_mask0(<8 x float>* %vp, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} ymm2 = [5,2,1,6,4,2,4,0]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm1, %k1
@@ -1991,7 +1991,7 @@ define <8 x float> @test_masked_8xfloat_perm_mem_mask0(<8 x float>* %vp, <8 x fl
define <8 x float> @test_masked_z_8xfloat_perm_mem_mask0(<8 x float>* %vp, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} ymm1 = [5,2,1,6,4,2,4,0]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm0, %k1
@@ -2006,7 +2006,7 @@ define <8 x float> @test_masked_z_8xfloat_perm_mem_mask0(<8 x float>* %vp, <8 x
define <8 x float> @test_masked_8xfloat_perm_mem_mask1(<8 x float>* %vp, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} ymm2 = [1,3,7,4,0,6,6,6]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm1, %k1
@@ -2021,7 +2021,7 @@ define <8 x float> @test_masked_8xfloat_perm_mem_mask1(<8 x float>* %vp, <8 x fl
define <8 x float> @test_masked_z_8xfloat_perm_mem_mask1(<8 x float>* %vp, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} ymm1 = [1,3,7,4,0,6,6,6]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm0, %k1
@@ -2036,7 +2036,7 @@ define <8 x float> @test_masked_z_8xfloat_perm_mem_mask1(<8 x float>* %vp, <8 x
define <8 x float> @test_masked_8xfloat_perm_mem_mask2(<8 x float>* %vp, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} ymm2 = [4,5,1,5,6,6,2,4]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm1, %k1
@@ -2051,7 +2051,7 @@ define <8 x float> @test_masked_8xfloat_perm_mem_mask2(<8 x float>* %vp, <8 x fl
define <8 x float> @test_masked_z_8xfloat_perm_mem_mask2(<8 x float>* %vp, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} ymm1 = [4,5,1,5,6,6,2,4]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm0, %k1
@@ -2066,7 +2066,7 @@ define <8 x float> @test_masked_z_8xfloat_perm_mem_mask2(<8 x float>* %vp, <8 x
define <8 x float> @test_8xfloat_perm_mem_mask3(<8 x float>* %vp) {
; CHECK-LABEL: test_8xfloat_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} ymm0 = [5,7,0,6,4,2,3,0]
; CHECK-NEXT: vpermps (%rdi), %ymm0, %ymm0
; CHECK-NEXT: retq
@@ -2076,7 +2076,7 @@ define <8 x float> @test_8xfloat_perm_mem_mask3(<8 x float>* %vp) {
}
define <8 x float> @test_masked_8xfloat_perm_mem_mask3(<8 x float>* %vp, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_masked_8xfloat_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} ymm2 = [5,7,0,6,4,2,3,0]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm1, %k1
@@ -2091,7 +2091,7 @@ define <8 x float> @test_masked_8xfloat_perm_mem_mask3(<8 x float>* %vp, <8 x fl
define <8 x float> @test_masked_z_8xfloat_perm_mem_mask3(<8 x float>* %vp, <8 x float> %mask) {
; CHECK-LABEL: test_masked_z_8xfloat_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} ymm1 = [5,7,0,6,4,2,3,0]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm0, %k1
@@ -2106,7 +2106,7 @@ define <8 x float> @test_masked_z_8xfloat_perm_mem_mask3(<8 x float>* %vp, <8 x
define <16 x float> @test_16xfloat_perm_mask0(<16 x float> %vec) {
; CHECK-LABEL: test_16xfloat_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} zmm1 = [15,7,5,13,4,9,11,13,12,6,0,0,11,15,5,7]
; CHECK-NEXT: vpermps %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
@@ -2115,7 +2115,7 @@ define <16 x float> @test_16xfloat_perm_mask0(<16 x float> %vec) {
}
define <16 x float> @test_masked_16xfloat_perm_mask0(<16 x float> %vec, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} zmm3 = [15,7,5,13,4,9,11,13,12,6,0,0,11,15,5,7]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %zmm4, %zmm2, %k1
@@ -2130,7 +2130,7 @@ define <16 x float> @test_masked_16xfloat_perm_mask0(<16 x float> %vec, <16 x fl
define <16 x float> @test_masked_z_16xfloat_perm_mask0(<16 x float> %vec, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} zmm2 = [15,7,5,13,4,9,11,13,12,6,0,0,11,15,5,7]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm1, %k1
@@ -2143,7 +2143,7 @@ define <16 x float> @test_masked_z_16xfloat_perm_mask0(<16 x float> %vec, <16 x
}
define <16 x float> @test_masked_16xfloat_perm_mask1(<16 x float> %vec, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} zmm3 = [11,10,4,10,4,5,8,11,2,0,10,0,0,3,10,1]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %zmm4, %zmm2, %k1
@@ -2158,7 +2158,7 @@ define <16 x float> @test_masked_16xfloat_perm_mask1(<16 x float> %vec, <16 x fl
define <16 x float> @test_masked_z_16xfloat_perm_mask1(<16 x float> %vec, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} zmm2 = [11,10,4,10,4,5,8,11,2,0,10,0,0,3,10,1]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm1, %k1
@@ -2171,7 +2171,7 @@ define <16 x float> @test_masked_z_16xfloat_perm_mask1(<16 x float> %vec, <16 x
}
define <16 x float> @test_masked_16xfloat_perm_mask2(<16 x float> %vec, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} zmm3 = [0,15,6,14,3,6,5,2,5,15,11,6,6,4,8,11]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %zmm4, %zmm2, %k1
@@ -2186,7 +2186,7 @@ define <16 x float> @test_masked_16xfloat_perm_mask2(<16 x float> %vec, <16 x fl
define <16 x float> @test_masked_z_16xfloat_perm_mask2(<16 x float> %vec, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} zmm2 = [0,15,6,14,3,6,5,2,5,15,11,6,6,4,8,11]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm1, %k1
@@ -2199,7 +2199,7 @@ define <16 x float> @test_masked_z_16xfloat_perm_mask2(<16 x float> %vec, <16 x
}
define <16 x float> @test_16xfloat_perm_mask3(<16 x float> %vec) {
; CHECK-LABEL: test_16xfloat_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} zmm1 = [10,7,0,14,6,6,0,2,13,8,11,2,5,13,13,3]
; CHECK-NEXT: vpermps %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
@@ -2208,7 +2208,7 @@ define <16 x float> @test_16xfloat_perm_mask3(<16 x float> %vec) {
}
define <16 x float> @test_masked_16xfloat_perm_mask3(<16 x float> %vec, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} zmm3 = [10,7,0,14,6,6,0,2,13,8,11,2,5,13,13,3]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %zmm4, %zmm2, %k1
@@ -2223,7 +2223,7 @@ define <16 x float> @test_masked_16xfloat_perm_mask3(<16 x float> %vec, <16 x fl
define <16 x float> @test_masked_z_16xfloat_perm_mask3(<16 x float> %vec, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} zmm2 = [10,7,0,14,6,6,0,2,13,8,11,2,5,13,13,3]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm1, %k1
@@ -2236,7 +2236,7 @@ define <16 x float> @test_masked_z_16xfloat_perm_mask3(<16 x float> %vec, <16 x
}
define <16 x float> @test_16xfloat_perm_mem_mask0(<16 x float>* %vp) {
; CHECK-LABEL: test_16xfloat_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} zmm0 = [10,2,1,14,9,9,7,2,9,4,12,11,0,14,0,1]
; CHECK-NEXT: vpermps (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
@@ -2246,7 +2246,7 @@ define <16 x float> @test_16xfloat_perm_mem_mask0(<16 x float>* %vp) {
}
define <16 x float> @test_masked_16xfloat_perm_mem_mask0(<16 x float>* %vp, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} zmm2 = [10,2,1,14,9,9,7,2,9,4,12,11,0,14,0,1]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm1, %k1
@@ -2261,7 +2261,7 @@ define <16 x float> @test_masked_16xfloat_perm_mem_mask0(<16 x float>* %vp, <16
define <16 x float> @test_masked_z_16xfloat_perm_mem_mask0(<16 x float>* %vp, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} zmm1 = [10,2,1,14,9,9,7,2,9,4,12,11,0,14,0,1]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm0, %k1
@@ -2276,7 +2276,7 @@ define <16 x float> @test_masked_z_16xfloat_perm_mem_mask0(<16 x float>* %vp, <1
define <16 x float> @test_masked_16xfloat_perm_mem_mask1(<16 x float>* %vp, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} zmm2 = [4,2,3,5,11,6,4,7,6,4,14,8,15,12,9,4]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm1, %k1
@@ -2291,7 +2291,7 @@ define <16 x float> @test_masked_16xfloat_perm_mem_mask1(<16 x float>* %vp, <16
define <16 x float> @test_masked_z_16xfloat_perm_mem_mask1(<16 x float>* %vp, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} zmm1 = [4,2,3,5,11,6,4,7,6,4,14,8,15,12,9,4]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm0, %k1
@@ -2306,7 +2306,7 @@ define <16 x float> @test_masked_z_16xfloat_perm_mem_mask1(<16 x float>* %vp, <1
define <16 x float> @test_masked_16xfloat_perm_mem_mask2(<16 x float>* %vp, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} zmm2 = [10,7,11,6,7,0,11,0,10,9,12,4,10,3,8,5]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm1, %k1
@@ -2321,7 +2321,7 @@ define <16 x float> @test_masked_16xfloat_perm_mem_mask2(<16 x float>* %vp, <16
define <16 x float> @test_masked_z_16xfloat_perm_mem_mask2(<16 x float>* %vp, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} zmm1 = [10,7,11,6,7,0,11,0,10,9,12,4,10,3,8,5]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm0, %k1
@@ -2336,7 +2336,7 @@ define <16 x float> @test_masked_z_16xfloat_perm_mem_mask2(<16 x float>* %vp, <1
define <16 x float> @test_16xfloat_perm_mem_mask3(<16 x float>* %vp) {
; CHECK-LABEL: test_16xfloat_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} zmm0 = [15,15,3,9,5,15,14,9,11,10,5,14,14,5,11,0]
; CHECK-NEXT: vpermps (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
@@ -2346,7 +2346,7 @@ define <16 x float> @test_16xfloat_perm_mem_mask3(<16 x float>* %vp) {
}
define <16 x float> @test_masked_16xfloat_perm_mem_mask3(<16 x float>* %vp, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_masked_16xfloat_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} zmm2 = [15,15,3,9,5,15,14,9,11,10,5,14,14,5,11,0]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm1, %k1
@@ -2361,7 +2361,7 @@ define <16 x float> @test_masked_16xfloat_perm_mem_mask3(<16 x float>* %vp, <16
define <16 x float> @test_masked_z_16xfloat_perm_mem_mask3(<16 x float>* %vp, <16 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} zmm1 = [15,15,3,9,5,15,14,9,11,10,5,14,14,5,11,0]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm0, %k1
@@ -2376,7 +2376,7 @@ define <16 x float> @test_masked_z_16xfloat_perm_mem_mask3(<16 x float>* %vp, <1
define <4 x double> @test_4xdouble_perm_mask0(<4 x double> %vec) {
; CHECK-LABEL: test_4xdouble_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,3,2]
; CHECK-NEXT: retq
%res = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> <i32 2, i32 1, i32 3, i32 2>
@@ -2384,7 +2384,7 @@ define <4 x double> @test_4xdouble_perm_mask0(<4 x double> %vec) {
}
define <4 x double> @test_masked_4xdouble_perm_mask0(<4 x double> %vec, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_masked_4xdouble_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vpermpd {{.*#+}} ymm1 {%k1} = ymm0[2,1,3,2]
@@ -2398,7 +2398,7 @@ define <4 x double> @test_masked_4xdouble_perm_mask0(<4 x double> %vec, <4 x dou
define <4 x double> @test_masked_z_4xdouble_perm_mask0(<4 x double> %vec, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_4xdouble_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = ymm0[2,1,3,2]
@@ -2410,7 +2410,7 @@ define <4 x double> @test_masked_z_4xdouble_perm_mask0(<4 x double> %vec, <4 x d
}
define <4 x double> @test_masked_4xdouble_perm_mask1(<4 x double> %vec, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_masked_4xdouble_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vpermpd {{.*#+}} ymm1 {%k1} = ymm0[3,0,0,0]
@@ -2424,7 +2424,7 @@ define <4 x double> @test_masked_4xdouble_perm_mask1(<4 x double> %vec, <4 x dou
define <4 x double> @test_masked_z_4xdouble_perm_mask1(<4 x double> %vec, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_4xdouble_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = ymm0[3,0,0,0]
@@ -2436,7 +2436,7 @@ define <4 x double> @test_masked_z_4xdouble_perm_mask1(<4 x double> %vec, <4 x d
}
define <4 x double> @test_masked_4xdouble_perm_mask2(<4 x double> %vec, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_masked_4xdouble_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vpermpd {{.*#+}} ymm1 {%k1} = ymm0[0,3,3,1]
@@ -2450,7 +2450,7 @@ define <4 x double> @test_masked_4xdouble_perm_mask2(<4 x double> %vec, <4 x dou
define <4 x double> @test_masked_z_4xdouble_perm_mask2(<4 x double> %vec, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_4xdouble_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0,3,3,1]
@@ -2462,7 +2462,7 @@ define <4 x double> @test_masked_z_4xdouble_perm_mask2(<4 x double> %vec, <4 x d
}
define <4 x double> @test_4xdouble_perm_mask3(<4 x double> %vec) {
; CHECK-LABEL: test_4xdouble_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,2]
; CHECK-NEXT: retq
%res = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 2>
@@ -2470,7 +2470,7 @@ define <4 x double> @test_4xdouble_perm_mask3(<4 x double> %vec) {
}
define <4 x double> @test_masked_4xdouble_perm_mask3(<4 x double> %vec, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_masked_4xdouble_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vpermpd {{.*#+}} ymm1 {%k1} = ymm0[3,3,3,2]
@@ -2484,7 +2484,7 @@ define <4 x double> @test_masked_4xdouble_perm_mask3(<4 x double> %vec, <4 x dou
define <4 x double> @test_masked_z_4xdouble_perm_mask3(<4 x double> %vec, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_4xdouble_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = ymm0[3,3,3,2]
@@ -2496,7 +2496,7 @@ define <4 x double> @test_masked_z_4xdouble_perm_mask3(<4 x double> %vec, <4 x d
}
define <4 x double> @test_4xdouble_perm_mem_mask0(<4 x double>* %vp) {
; CHECK-LABEL: test_4xdouble_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = mem[0,0,2,0]
; CHECK-NEXT: retq
%vec = load <4 x double>, <4 x double>* %vp
@@ -2505,7 +2505,7 @@ define <4 x double> @test_4xdouble_perm_mem_mask0(<4 x double>* %vp) {
}
define <4 x double> @test_masked_4xdouble_perm_mem_mask0(<4 x double>* %vp, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_masked_4xdouble_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vpermpd {{.*#+}} ymm0 {%k1} = mem[0,0,2,0]
@@ -2519,7 +2519,7 @@ define <4 x double> @test_masked_4xdouble_perm_mem_mask0(<4 x double>* %vp, <4 x
define <4 x double> @test_masked_z_4xdouble_perm_mem_mask0(<4 x double>* %vp, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_4xdouble_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %ymm1, %ymm0, %k1
; CHECK-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = mem[0,0,2,0]
@@ -2533,7 +2533,7 @@ define <4 x double> @test_masked_z_4xdouble_perm_mem_mask0(<4 x double>* %vp, <4
define <4 x double> @test_masked_4xdouble_perm_mem_mask1(<4 x double>* %vp, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_masked_4xdouble_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vpermpd {{.*#+}} ymm0 {%k1} = mem[0,2,3,2]
@@ -2547,7 +2547,7 @@ define <4 x double> @test_masked_4xdouble_perm_mem_mask1(<4 x double>* %vp, <4 x
define <4 x double> @test_masked_z_4xdouble_perm_mem_mask1(<4 x double>* %vp, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_4xdouble_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %ymm1, %ymm0, %k1
; CHECK-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = mem[0,2,3,2]
@@ -2561,7 +2561,7 @@ define <4 x double> @test_masked_z_4xdouble_perm_mem_mask1(<4 x double>* %vp, <4
define <4 x double> @test_masked_4xdouble_perm_mem_mask2(<4 x double>* %vp, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_masked_4xdouble_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vpermpd {{.*#+}} ymm0 {%k1} = mem[3,1,1,1]
@@ -2575,7 +2575,7 @@ define <4 x double> @test_masked_4xdouble_perm_mem_mask2(<4 x double>* %vp, <4 x
define <4 x double> @test_masked_z_4xdouble_perm_mem_mask2(<4 x double>* %vp, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_4xdouble_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %ymm1, %ymm0, %k1
; CHECK-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = mem[3,1,1,1]
@@ -2589,7 +2589,7 @@ define <4 x double> @test_masked_z_4xdouble_perm_mem_mask2(<4 x double>* %vp, <4
define <4 x double> @test_4xdouble_perm_mem_mask3(<4 x double>* %vp) {
; CHECK-LABEL: test_4xdouble_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = mem[3,2,3,2]
; CHECK-NEXT: retq
%vec = load <4 x double>, <4 x double>* %vp
@@ -2598,7 +2598,7 @@ define <4 x double> @test_4xdouble_perm_mem_mask3(<4 x double>* %vp) {
}
define <4 x double> @test_masked_4xdouble_perm_mem_mask3(<4 x double>* %vp, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_masked_4xdouble_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vpermpd {{.*#+}} ymm0 {%k1} = mem[3,2,3,2]
@@ -2612,7 +2612,7 @@ define <4 x double> @test_masked_4xdouble_perm_mem_mask3(<4 x double>* %vp, <4 x
define <4 x double> @test_masked_z_4xdouble_perm_mem_mask3(<4 x double>* %vp, <4 x double> %mask) {
; CHECK-LABEL: test_masked_z_4xdouble_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %ymm1, %ymm0, %k1
; CHECK-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = mem[3,2,3,2]
@@ -2626,7 +2626,7 @@ define <4 x double> @test_masked_z_4xdouble_perm_mem_mask3(<4 x double>* %vp, <4
define <8 x double> @test_8xdouble_perm_mask0(<8 x double> %vec) {
; CHECK-LABEL: test_8xdouble_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} zmm1 = [5,7,4,2,7,4,3,4]
; CHECK-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
@@ -2635,7 +2635,7 @@ define <8 x double> @test_8xdouble_perm_mask0(<8 x double> %vec) {
}
define <8 x double> @test_masked_8xdouble_perm_mask0(<8 x double> %vec, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd {{.*#+}} zmm3 = [5,7,4,2,7,4,3,4]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqpd %zmm4, %zmm2, %k1
@@ -2650,7 +2650,7 @@ define <8 x double> @test_masked_8xdouble_perm_mask0(<8 x double> %vec, <8 x dou
define <8 x double> @test_masked_z_8xdouble_perm_mask0(<8 x double> %vec, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd {{.*#+}} zmm2 = [5,7,4,2,7,4,3,4]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm1, %k1
@@ -2663,7 +2663,7 @@ define <8 x double> @test_masked_z_8xdouble_perm_mask0(<8 x double> %vec, <8 x d
}
define <8 x double> @test_masked_8xdouble_perm_imm_mask1(<8 x double> %vec, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_perm_imm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vpermpd {{.*#+}} zmm1 {%k1} = zmm0[3,0,0,2,7,4,4,6]
@@ -2677,7 +2677,7 @@ define <8 x double> @test_masked_8xdouble_perm_imm_mask1(<8 x double> %vec, <8 x
define <8 x double> @test_masked_z_8xdouble_perm_imm_mask1(<8 x double> %vec, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_perm_imm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpermpd {{.*#+}} zmm0 {%k1} {z} = zmm0[3,0,0,2,7,4,4,6]
@@ -2689,7 +2689,7 @@ define <8 x double> @test_masked_z_8xdouble_perm_imm_mask1(<8 x double> %vec, <8
}
define <8 x double> @test_masked_8xdouble_perm_mask2(<8 x double> %vec, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd {{.*#+}} zmm3 = [7,5,5,5,3,5,1,7]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqpd %zmm4, %zmm2, %k1
@@ -2704,7 +2704,7 @@ define <8 x double> @test_masked_8xdouble_perm_mask2(<8 x double> %vec, <8 x dou
define <8 x double> @test_masked_z_8xdouble_perm_mask2(<8 x double> %vec, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd {{.*#+}} zmm2 = [7,5,5,5,3,5,1,7]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm1, %k1
@@ -2717,7 +2717,7 @@ define <8 x double> @test_masked_z_8xdouble_perm_mask2(<8 x double> %vec, <8 x d
}
define <8 x double> @test_8xdouble_perm_imm_mask3(<8 x double> %vec) {
; CHECK-LABEL: test_8xdouble_perm_imm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[1,3,3,0,5,7,7,4]
; CHECK-NEXT: retq
%res = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> <i32 1, i32 3, i32 3, i32 0, i32 5, i32 7, i32 7, i32 4>
@@ -2725,7 +2725,7 @@ define <8 x double> @test_8xdouble_perm_imm_mask3(<8 x double> %vec) {
}
define <8 x double> @test_masked_8xdouble_perm_imm_mask3(<8 x double> %vec, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_perm_imm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vpermpd {{.*#+}} zmm1 {%k1} = zmm0[1,3,3,0,5,7,7,4]
@@ -2739,7 +2739,7 @@ define <8 x double> @test_masked_8xdouble_perm_imm_mask3(<8 x double> %vec, <8 x
define <8 x double> @test_masked_z_8xdouble_perm_imm_mask3(<8 x double> %vec, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_perm_imm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpermpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1,3,3,0,5,7,7,4]
@@ -2751,7 +2751,7 @@ define <8 x double> @test_masked_z_8xdouble_perm_imm_mask3(<8 x double> %vec, <8
}
define <8 x double> @test_masked_8xdouble_perm_mask4(<8 x double> %vec, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_perm_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd {{.*#+}} zmm3 = [3,5,3,4,6,5,7,1]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqpd %zmm4, %zmm2, %k1
@@ -2766,7 +2766,7 @@ define <8 x double> @test_masked_8xdouble_perm_mask4(<8 x double> %vec, <8 x dou
define <8 x double> @test_masked_z_8xdouble_perm_mask4(<8 x double> %vec, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_perm_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd {{.*#+}} zmm2 = [3,5,3,4,6,5,7,1]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm1, %k1
@@ -2779,7 +2779,7 @@ define <8 x double> @test_masked_z_8xdouble_perm_mask4(<8 x double> %vec, <8 x d
}
define <8 x double> @test_masked_8xdouble_perm_imm_mask5(<8 x double> %vec, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_perm_imm_mask5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vpermpd {{.*#+}} zmm1 {%k1} = zmm0[3,3,2,3,7,7,6,7]
@@ -2793,7 +2793,7 @@ define <8 x double> @test_masked_8xdouble_perm_imm_mask5(<8 x double> %vec, <8 x
define <8 x double> @test_masked_z_8xdouble_perm_imm_mask5(<8 x double> %vec, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_perm_imm_mask5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpermpd {{.*#+}} zmm0 {%k1} {z} = zmm0[3,3,2,3,7,7,6,7]
@@ -2805,7 +2805,7 @@ define <8 x double> @test_masked_z_8xdouble_perm_imm_mask5(<8 x double> %vec, <8
}
define <8 x double> @test_8xdouble_perm_mask6(<8 x double> %vec) {
; CHECK-LABEL: test_8xdouble_perm_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} zmm1 = [2,7,6,4,0,0,0,2]
; CHECK-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
@@ -2814,7 +2814,7 @@ define <8 x double> @test_8xdouble_perm_mask6(<8 x double> %vec) {
}
define <8 x double> @test_masked_8xdouble_perm_mask6(<8 x double> %vec, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_perm_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd {{.*#+}} zmm3 = [2,7,6,4,0,0,0,2]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqpd %zmm4, %zmm2, %k1
@@ -2829,7 +2829,7 @@ define <8 x double> @test_masked_8xdouble_perm_mask6(<8 x double> %vec, <8 x dou
define <8 x double> @test_masked_z_8xdouble_perm_mask6(<8 x double> %vec, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_perm_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd {{.*#+}} zmm2 = [2,7,6,4,0,0,0,2]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm1, %k1
@@ -2842,7 +2842,7 @@ define <8 x double> @test_masked_z_8xdouble_perm_mask6(<8 x double> %vec, <8 x d
}
define <8 x double> @test_masked_8xdouble_perm_imm_mask7(<8 x double> %vec, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_perm_imm_mask7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vpermpd {{.*#+}} zmm1 {%k1} = zmm0[3,1,3,2,7,5,7,6]
@@ -2856,7 +2856,7 @@ define <8 x double> @test_masked_8xdouble_perm_imm_mask7(<8 x double> %vec, <8 x
define <8 x double> @test_masked_z_8xdouble_perm_imm_mask7(<8 x double> %vec, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_perm_imm_mask7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpermpd {{.*#+}} zmm0 {%k1} {z} = zmm0[3,1,3,2,7,5,7,6]
@@ -2868,7 +2868,7 @@ define <8 x double> @test_masked_z_8xdouble_perm_imm_mask7(<8 x double> %vec, <8
}
define <8 x double> @test_8xdouble_perm_mem_mask0(<8 x double>* %vp) {
; CHECK-LABEL: test_8xdouble_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} zmm0 = [0,3,4,0,4,2,0,1]
; CHECK-NEXT: vpermpd (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
@@ -2878,7 +2878,7 @@ define <8 x double> @test_8xdouble_perm_mem_mask0(<8 x double>* %vp) {
}
define <8 x double> @test_masked_8xdouble_perm_mem_mask0(<8 x double>* %vp, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd {{.*#+}} zmm2 = [0,3,4,0,4,2,0,1]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm1, %k1
@@ -2893,7 +2893,7 @@ define <8 x double> @test_masked_8xdouble_perm_mem_mask0(<8 x double>* %vp, <8 x
define <8 x double> @test_masked_z_8xdouble_perm_mem_mask0(<8 x double>* %vp, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd {{.*#+}} zmm1 = [0,3,4,0,4,2,0,1]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm0, %k1
@@ -2908,7 +2908,7 @@ define <8 x double> @test_masked_z_8xdouble_perm_mem_mask0(<8 x double>* %vp, <8
define <8 x double> @test_masked_8xdouble_perm_imm_mem_mask1(<8 x double>* %vp, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_perm_imm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpermpd {{.*#+}} zmm0 {%k1} = mem[0,2,0,3,4,6,4,7]
@@ -2922,7 +2922,7 @@ define <8 x double> @test_masked_8xdouble_perm_imm_mem_mask1(<8 x double>* %vp,
define <8 x double> @test_masked_z_8xdouble_perm_imm_mem_mask1(<8 x double>* %vp, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_perm_imm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %zmm1, %zmm0, %k1
; CHECK-NEXT: vpermpd {{.*#+}} zmm0 {%k1} {z} = mem[0,2,0,3,4,6,4,7]
@@ -2936,7 +2936,7 @@ define <8 x double> @test_masked_z_8xdouble_perm_imm_mem_mask1(<8 x double>* %vp
define <8 x double> @test_masked_8xdouble_perm_mem_mask2(<8 x double>* %vp, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd {{.*#+}} zmm2 = [6,7,2,7,7,6,2,5]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm1, %k1
@@ -2951,7 +2951,7 @@ define <8 x double> @test_masked_8xdouble_perm_mem_mask2(<8 x double>* %vp, <8 x
define <8 x double> @test_masked_z_8xdouble_perm_mem_mask2(<8 x double>* %vp, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd {{.*#+}} zmm1 = [6,7,2,7,7,6,2,5]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm0, %k1
@@ -2966,7 +2966,7 @@ define <8 x double> @test_masked_z_8xdouble_perm_mem_mask2(<8 x double>* %vp, <8
define <8 x double> @test_8xdouble_perm_imm_mem_mask3(<8 x double>* %vp) {
; CHECK-LABEL: test_8xdouble_perm_imm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermpd {{.*#+}} zmm0 = mem[2,1,1,0,6,5,5,4]
; CHECK-NEXT: retq
%vec = load <8 x double>, <8 x double>* %vp
@@ -2975,7 +2975,7 @@ define <8 x double> @test_8xdouble_perm_imm_mem_mask3(<8 x double>* %vp) {
}
define <8 x double> @test_masked_8xdouble_perm_imm_mem_mask3(<8 x double>* %vp, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_perm_imm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpermpd {{.*#+}} zmm0 {%k1} = mem[2,1,1,0,6,5,5,4]
@@ -2989,7 +2989,7 @@ define <8 x double> @test_masked_8xdouble_perm_imm_mem_mask3(<8 x double>* %vp,
define <8 x double> @test_masked_z_8xdouble_perm_imm_mem_mask3(<8 x double>* %vp, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_perm_imm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %zmm1, %zmm0, %k1
; CHECK-NEXT: vpermpd {{.*#+}} zmm0 {%k1} {z} = mem[2,1,1,0,6,5,5,4]
@@ -3003,7 +3003,7 @@ define <8 x double> @test_masked_z_8xdouble_perm_imm_mem_mask3(<8 x double>* %vp
define <8 x double> @test_masked_8xdouble_perm_mem_mask4(<8 x double>* %vp, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_perm_mem_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd {{.*#+}} zmm2 = [1,1,3,5,6,0,6,0]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm1, %k1
@@ -3018,7 +3018,7 @@ define <8 x double> @test_masked_8xdouble_perm_mem_mask4(<8 x double>* %vp, <8 x
define <8 x double> @test_masked_z_8xdouble_perm_mem_mask4(<8 x double>* %vp, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_perm_mem_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd {{.*#+}} zmm1 = [1,1,3,5,6,0,6,0]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm0, %k1
@@ -3033,7 +3033,7 @@ define <8 x double> @test_masked_z_8xdouble_perm_mem_mask4(<8 x double>* %vp, <8
define <8 x double> @test_masked_8xdouble_perm_imm_mem_mask5(<8 x double>* %vp, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_perm_imm_mem_mask5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpermpd {{.*#+}} zmm0 {%k1} = mem[2,2,2,3,6,6,6,7]
@@ -3047,7 +3047,7 @@ define <8 x double> @test_masked_8xdouble_perm_imm_mem_mask5(<8 x double>* %vp,
define <8 x double> @test_masked_z_8xdouble_perm_imm_mem_mask5(<8 x double>* %vp, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_perm_imm_mem_mask5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %zmm1, %zmm0, %k1
; CHECK-NEXT: vpermpd {{.*#+}} zmm0 {%k1} {z} = mem[2,2,2,3,6,6,6,7]
@@ -3061,7 +3061,7 @@ define <8 x double> @test_masked_z_8xdouble_perm_imm_mem_mask5(<8 x double>* %vp
define <8 x double> @test_8xdouble_perm_mem_mask6(<8 x double>* %vp) {
; CHECK-LABEL: test_8xdouble_perm_mem_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} zmm0 = [2,4,0,4,6,1,2,5]
; CHECK-NEXT: vpermpd (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
@@ -3071,7 +3071,7 @@ define <8 x double> @test_8xdouble_perm_mem_mask6(<8 x double>* %vp) {
}
define <8 x double> @test_masked_8xdouble_perm_mem_mask6(<8 x double>* %vp, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_perm_mem_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd {{.*#+}} zmm2 = [2,4,0,4,6,1,2,5]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm1, %k1
@@ -3086,7 +3086,7 @@ define <8 x double> @test_masked_8xdouble_perm_mem_mask6(<8 x double>* %vp, <8 x
define <8 x double> @test_masked_z_8xdouble_perm_mem_mask6(<8 x double>* %vp, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_perm_mem_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd {{.*#+}} zmm1 = [2,4,0,4,6,1,2,5]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm0, %k1
@@ -3101,7 +3101,7 @@ define <8 x double> @test_masked_z_8xdouble_perm_mem_mask6(<8 x double>* %vp, <8
define <8 x double> @test_masked_8xdouble_perm_imm_mem_mask7(<8 x double>* %vp, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_masked_8xdouble_perm_imm_mem_mask7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpermpd {{.*#+}} zmm0 {%k1} = mem[0,3,2,0,4,7,6,4]
@@ -3115,7 +3115,7 @@ define <8 x double> @test_masked_8xdouble_perm_imm_mem_mask7(<8 x double>* %vp,
define <8 x double> @test_masked_z_8xdouble_perm_imm_mem_mask7(<8 x double>* %vp, <8 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_perm_imm_mem_mask7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %zmm1, %zmm0, %k1
; CHECK-NEXT: vpermpd {{.*#+}} zmm0 {%k1} {z} = mem[0,3,2,0,4,7,6,4]
diff --git a/test/CodeGen/X86/avx512-shuffles/shuffle-interleave.ll b/test/CodeGen/X86/avx512-shuffles/shuffle-interleave.ll
index 3be71d26f68..ff840e6411c 100644
--- a/test/CodeGen/X86/avx512-shuffles/shuffle-interleave.ll
+++ b/test/CodeGen/X86/avx512-shuffles/shuffle-interleave.ll
@@ -3,7 +3,7 @@
define <4 x float> @test_4xfloat_shuff_mask0(<4 x float> %vec1, <4 x float> %vec2) {
; CHECK-LABEL: test_4xfloat_shuff_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,1],xmm1[3,1]
; CHECK-NEXT: retq
%res = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> <i32 2, i32 1, i32 7, i32 5>
@@ -11,7 +11,7 @@ define <4 x float> @test_4xfloat_shuff_mask0(<4 x float> %vec1, <4 x float> %vec
}
define <4 x float> @test_4xfloat_masked_shuff_mask0(<4 x float> %vec1, <4 x float> %vec2, <4 x float> %vec3, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_masked_shuff_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %xmm4, %xmm3, %k1
; CHECK-NEXT: vshufps {{.*#+}} xmm2 {%k1} = xmm0[2,1],xmm1[3,1]
@@ -25,7 +25,7 @@ define <4 x float> @test_4xfloat_masked_shuff_mask0(<4 x float> %vec1, <4 x floa
define <4 x float> @test_4xfloat_zero_masked_shuff_mask0(<4 x float> %vec1, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_zero_masked_shuff_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %xmm3, %xmm2, %k1
; CHECK-NEXT: vshufps {{.*#+}} xmm0 {%k1} {z} = xmm0[2,1],xmm1[3,1]
@@ -37,7 +37,7 @@ define <4 x float> @test_4xfloat_zero_masked_shuff_mask0(<4 x float> %vec1, <4 x
}
define <4 x float> @test_4xfloat_masked_shuff_mask1(<4 x float> %vec1, <4 x float> %vec2, <4 x float> %vec3, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_masked_shuff_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %xmm4, %xmm3, %k1
; CHECK-NEXT: vshufps {{.*#+}} xmm2 {%k1} = xmm0[1,2],xmm1[3,2]
@@ -51,7 +51,7 @@ define <4 x float> @test_4xfloat_masked_shuff_mask1(<4 x float> %vec1, <4 x floa
define <4 x float> @test_4xfloat_zero_masked_shuff_mask1(<4 x float> %vec1, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_zero_masked_shuff_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %xmm3, %xmm2, %k1
; CHECK-NEXT: vshufps {{.*#+}} xmm0 {%k1} {z} = xmm0[1,2],xmm1[3,2]
@@ -63,7 +63,7 @@ define <4 x float> @test_4xfloat_zero_masked_shuff_mask1(<4 x float> %vec1, <4 x
}
define <4 x float> @test_4xfloat_masked_shuff_mask2(<4 x float> %vec1, <4 x float> %vec2, <4 x float> %vec3, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_masked_shuff_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %xmm4, %xmm3, %k1
; CHECK-NEXT: vshufps {{.*#+}} xmm2 {%k1} = xmm0[1,3],xmm1[2,1]
@@ -77,7 +77,7 @@ define <4 x float> @test_4xfloat_masked_shuff_mask2(<4 x float> %vec1, <4 x floa
define <4 x float> @test_4xfloat_zero_masked_shuff_mask2(<4 x float> %vec1, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_zero_masked_shuff_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %xmm3, %xmm2, %k1
; CHECK-NEXT: vshufps {{.*#+}} xmm0 {%k1} {z} = xmm0[1,3],xmm1[2,1]
@@ -89,7 +89,7 @@ define <4 x float> @test_4xfloat_zero_masked_shuff_mask2(<4 x float> %vec1, <4 x
}
define <4 x float> @test_4xfloat_shuff_mask3(<4 x float> %vec1, <4 x float> %vec2) {
; CHECK-LABEL: test_4xfloat_shuff_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3],xmm1[3,3]
; CHECK-NEXT: retq
%res = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> <i32 3, i32 3, i32 7, i32 7>
@@ -97,7 +97,7 @@ define <4 x float> @test_4xfloat_shuff_mask3(<4 x float> %vec1, <4 x float> %vec
}
define <4 x float> @test_4xfloat_masked_shuff_mask3(<4 x float> %vec1, <4 x float> %vec2, <4 x float> %vec3, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_masked_shuff_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %xmm4, %xmm3, %k1
; CHECK-NEXT: vshufps {{.*#+}} xmm2 {%k1} = xmm0[3,3],xmm1[3,3]
@@ -111,7 +111,7 @@ define <4 x float> @test_4xfloat_masked_shuff_mask3(<4 x float> %vec1, <4 x floa
define <4 x float> @test_4xfloat_zero_masked_shuff_mask3(<4 x float> %vec1, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_zero_masked_shuff_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %xmm3, %xmm2, %k1
; CHECK-NEXT: vshufps {{.*#+}} xmm0 {%k1} {z} = xmm0[3,3],xmm1[3,3]
@@ -123,7 +123,7 @@ define <4 x float> @test_4xfloat_zero_masked_shuff_mask3(<4 x float> %vec1, <4 x
}
define <4 x float> @test_4xfloat_shuff_mem_mask0(<4 x float> %vec1, <4 x float>* %vec2p) {
; CHECK-LABEL: test_4xfloat_shuff_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,0],mem[1,2]
; CHECK-NEXT: retq
%vec2 = load <4 x float>, <4 x float>* %vec2p
@@ -132,7 +132,7 @@ define <4 x float> @test_4xfloat_shuff_mem_mask0(<4 x float> %vec1, <4 x float>*
}
define <4 x float> @test_4xfloat_masked_shuff_mem_mask0(<4 x float> %vec1, <4 x float>* %vec2p, <4 x float> %vec3, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_masked_shuff_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %xmm3, %xmm2, %k1
; CHECK-NEXT: vshufps {{.*#+}} xmm1 {%k1} = xmm0[1,0],mem[1,2]
@@ -147,7 +147,7 @@ define <4 x float> @test_4xfloat_masked_shuff_mem_mask0(<4 x float> %vec1, <4 x
define <4 x float> @test_4xfloat_zero_masked_shuff_mem_mask0(<4 x float> %vec1, <4 x float>* %vec2p, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_zero_masked_shuff_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vshufps {{.*#+}} xmm0 {%k1} {z} = xmm0[1,0],mem[1,2]
@@ -161,7 +161,7 @@ define <4 x float> @test_4xfloat_zero_masked_shuff_mem_mask0(<4 x float> %vec1,
define <4 x float> @test_4xfloat_masked_shuff_mem_mask1(<4 x float> %vec1, <4 x float>* %vec2p, <4 x float> %vec3, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_masked_shuff_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %xmm3, %xmm2, %k1
; CHECK-NEXT: vshufps {{.*#+}} xmm1 {%k1} = xmm0[3,3],mem[1,3]
@@ -176,7 +176,7 @@ define <4 x float> @test_4xfloat_masked_shuff_mem_mask1(<4 x float> %vec1, <4 x
define <4 x float> @test_4xfloat_zero_masked_shuff_mem_mask1(<4 x float> %vec1, <4 x float>* %vec2p, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_zero_masked_shuff_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vshufps {{.*#+}} xmm0 {%k1} {z} = xmm0[3,3],mem[1,3]
@@ -190,7 +190,7 @@ define <4 x float> @test_4xfloat_zero_masked_shuff_mem_mask1(<4 x float> %vec1,
define <4 x float> @test_4xfloat_masked_shuff_mem_mask2(<4 x float> %vec1, <4 x float>* %vec2p, <4 x float> %vec3, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_masked_shuff_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %xmm3, %xmm2, %k1
; CHECK-NEXT: vshufps {{.*#+}} xmm1 {%k1} = xmm0[1,3],mem[2,0]
@@ -205,7 +205,7 @@ define <4 x float> @test_4xfloat_masked_shuff_mem_mask2(<4 x float> %vec1, <4 x
define <4 x float> @test_4xfloat_zero_masked_shuff_mem_mask2(<4 x float> %vec1, <4 x float>* %vec2p, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_zero_masked_shuff_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vshufps {{.*#+}} xmm0 {%k1} {z} = xmm0[1,3],mem[2,0]
@@ -219,7 +219,7 @@ define <4 x float> @test_4xfloat_zero_masked_shuff_mem_mask2(<4 x float> %vec1,
define <4 x float> @test_4xfloat_shuff_mem_mask3(<4 x float> %vec1, <4 x float>* %vec2p) {
; CHECK-LABEL: test_4xfloat_shuff_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,1],mem[3,2]
; CHECK-NEXT: retq
%vec2 = load <4 x float>, <4 x float>* %vec2p
@@ -228,7 +228,7 @@ define <4 x float> @test_4xfloat_shuff_mem_mask3(<4 x float> %vec1, <4 x float>*
}
define <4 x float> @test_4xfloat_masked_shuff_mem_mask3(<4 x float> %vec1, <4 x float>* %vec2p, <4 x float> %vec3, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_masked_shuff_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %xmm3, %xmm2, %k1
; CHECK-NEXT: vshufps {{.*#+}} xmm1 {%k1} = xmm0[2,1],mem[3,2]
@@ -243,7 +243,7 @@ define <4 x float> @test_4xfloat_masked_shuff_mem_mask3(<4 x float> %vec1, <4 x
define <4 x float> @test_4xfloat_zero_masked_shuff_mem_mask3(<4 x float> %vec1, <4 x float>* %vec2p, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_zero_masked_shuff_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vshufps {{.*#+}} xmm0 {%k1} {z} = xmm0[2,1],mem[3,2]
@@ -257,7 +257,7 @@ define <4 x float> @test_4xfloat_zero_masked_shuff_mem_mask3(<4 x float> %vec1,
define <8 x float> @test_8xfloat_shuff_mask0(<8 x float> %vec1, <8 x float> %vec2) {
; CHECK-LABEL: test_8xfloat_shuff_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,3],ymm1[0,2],ymm0[5,7],ymm1[4,6]
; CHECK-NEXT: retq
%res = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> <i32 1, i32 3, i32 8, i32 10, i32 5, i32 7, i32 12, i32 14>
@@ -265,7 +265,7 @@ define <8 x float> @test_8xfloat_shuff_mask0(<8 x float> %vec1, <8 x float> %vec
}
define <8 x float> @test_8xfloat_masked_shuff_mask0(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %vec3, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_masked_shuff_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %ymm4, %ymm3, %k1
; CHECK-NEXT: vshufps {{.*#+}} ymm2 {%k1} = ymm0[1,3],ymm1[0,2],ymm0[5,7],ymm1[4,6]
@@ -279,7 +279,7 @@ define <8 x float> @test_8xfloat_masked_shuff_mask0(<8 x float> %vec1, <8 x floa
define <8 x float> @test_8xfloat_zero_masked_shuff_mask0(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_zero_masked_shuff_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vshufps {{.*#+}} ymm0 {%k1} {z} = ymm0[1,3],ymm1[0,2],ymm0[5,7],ymm1[4,6]
@@ -291,7 +291,7 @@ define <8 x float> @test_8xfloat_zero_masked_shuff_mask0(<8 x float> %vec1, <8 x
}
define <8 x float> @test_8xfloat_masked_shuff_mask1(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %vec3, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_masked_shuff_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %ymm4, %ymm3, %k1
; CHECK-NEXT: vshufps {{.*#+}} ymm2 {%k1} = ymm0[0,3],ymm1[3,1],ymm0[4,7],ymm1[7,5]
@@ -305,7 +305,7 @@ define <8 x float> @test_8xfloat_masked_shuff_mask1(<8 x float> %vec1, <8 x floa
define <8 x float> @test_8xfloat_zero_masked_shuff_mask1(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_zero_masked_shuff_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vshufps {{.*#+}} ymm0 {%k1} {z} = ymm0[0,3],ymm1[3,1],ymm0[4,7],ymm1[7,5]
@@ -317,7 +317,7 @@ define <8 x float> @test_8xfloat_zero_masked_shuff_mask1(<8 x float> %vec1, <8 x
}
define <8 x float> @test_8xfloat_masked_shuff_mask2(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %vec3, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_masked_shuff_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %ymm4, %ymm3, %k1
; CHECK-NEXT: vshufps {{.*#+}} ymm2 {%k1} = ymm0[0,2],ymm1[2,2],ymm0[4,6],ymm1[6,6]
@@ -331,7 +331,7 @@ define <8 x float> @test_8xfloat_masked_shuff_mask2(<8 x float> %vec1, <8 x floa
define <8 x float> @test_8xfloat_zero_masked_shuff_mask2(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_zero_masked_shuff_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vshufps {{.*#+}} ymm0 {%k1} {z} = ymm0[0,2],ymm1[2,2],ymm0[4,6],ymm1[6,6]
@@ -343,7 +343,7 @@ define <8 x float> @test_8xfloat_zero_masked_shuff_mask2(<8 x float> %vec1, <8 x
}
define <8 x float> @test_8xfloat_shuff_mask3(<8 x float> %vec1, <8 x float> %vec2) {
; CHECK-LABEL: test_8xfloat_shuff_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vshufps {{.*#+}} ymm0 = ymm0[3,2],ymm1[3,2],ymm0[7,6],ymm1[7,6]
; CHECK-NEXT: retq
%res = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> <i32 3, i32 2, i32 11, i32 10, i32 7, i32 6, i32 15, i32 14>
@@ -351,7 +351,7 @@ define <8 x float> @test_8xfloat_shuff_mask3(<8 x float> %vec1, <8 x float> %vec
}
define <8 x float> @test_8xfloat_masked_shuff_mask3(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %vec3, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_masked_shuff_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %ymm4, %ymm3, %k1
; CHECK-NEXT: vshufps {{.*#+}} ymm2 {%k1} = ymm0[3,2],ymm1[3,2],ymm0[7,6],ymm1[7,6]
@@ -365,7 +365,7 @@ define <8 x float> @test_8xfloat_masked_shuff_mask3(<8 x float> %vec1, <8 x floa
define <8 x float> @test_8xfloat_zero_masked_shuff_mask3(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_zero_masked_shuff_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vshufps {{.*#+}} ymm0 {%k1} {z} = ymm0[3,2],ymm1[3,2],ymm0[7,6],ymm1[7,6]
@@ -377,7 +377,7 @@ define <8 x float> @test_8xfloat_zero_masked_shuff_mask3(<8 x float> %vec1, <8 x
}
define <8 x float> @test_8xfloat_shuff_mem_mask0(<8 x float> %vec1, <8 x float>* %vec2p) {
; CHECK-LABEL: test_8xfloat_shuff_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,1],mem[0,0],ymm0[6,5],mem[4,4]
; CHECK-NEXT: retq
%vec2 = load <8 x float>, <8 x float>* %vec2p
@@ -386,7 +386,7 @@ define <8 x float> @test_8xfloat_shuff_mem_mask0(<8 x float> %vec1, <8 x float>*
}
define <8 x float> @test_8xfloat_masked_shuff_mem_mask0(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %vec3, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_masked_shuff_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vshufps {{.*#+}} ymm1 {%k1} = ymm0[2,1],mem[0,0],ymm0[6,5],mem[4,4]
@@ -401,7 +401,7 @@ define <8 x float> @test_8xfloat_masked_shuff_mem_mask0(<8 x float> %vec1, <8 x
define <8 x float> @test_8xfloat_zero_masked_shuff_mem_mask0(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_zero_masked_shuff_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vshufps {{.*#+}} ymm0 {%k1} {z} = ymm0[2,1],mem[0,0],ymm0[6,5],mem[4,4]
@@ -415,7 +415,7 @@ define <8 x float> @test_8xfloat_zero_masked_shuff_mem_mask0(<8 x float> %vec1,
define <8 x float> @test_8xfloat_masked_shuff_mem_mask1(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %vec3, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_masked_shuff_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vshufps {{.*#+}} ymm1 {%k1} = ymm0[2,2],mem[1,0],ymm0[6,6],mem[5,4]
@@ -430,7 +430,7 @@ define <8 x float> @test_8xfloat_masked_shuff_mem_mask1(<8 x float> %vec1, <8 x
define <8 x float> @test_8xfloat_zero_masked_shuff_mem_mask1(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_zero_masked_shuff_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vshufps {{.*#+}} ymm0 {%k1} {z} = ymm0[2,2],mem[1,0],ymm0[6,6],mem[5,4]
@@ -444,7 +444,7 @@ define <8 x float> @test_8xfloat_zero_masked_shuff_mem_mask1(<8 x float> %vec1,
define <8 x float> @test_8xfloat_masked_shuff_mem_mask2(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %vec3, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_masked_shuff_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vshufps {{.*#+}} ymm1 {%k1} = ymm0[3,3],mem[3,3],ymm0[7,7],mem[7,7]
@@ -459,7 +459,7 @@ define <8 x float> @test_8xfloat_masked_shuff_mem_mask2(<8 x float> %vec1, <8 x
define <8 x float> @test_8xfloat_zero_masked_shuff_mem_mask2(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_zero_masked_shuff_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vshufps {{.*#+}} ymm0 {%k1} {z} = ymm0[3,3],mem[3,3],ymm0[7,7],mem[7,7]
@@ -473,7 +473,7 @@ define <8 x float> @test_8xfloat_zero_masked_shuff_mem_mask2(<8 x float> %vec1,
define <8 x float> @test_8xfloat_shuff_mem_mask3(<8 x float> %vec1, <8 x float>* %vec2p) {
; CHECK-LABEL: test_8xfloat_shuff_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vshufps {{.*#+}} ymm0 = ymm0[3,3],mem[2,1],ymm0[7,7],mem[6,5]
; CHECK-NEXT: retq
%vec2 = load <8 x float>, <8 x float>* %vec2p
@@ -482,7 +482,7 @@ define <8 x float> @test_8xfloat_shuff_mem_mask3(<8 x float> %vec1, <8 x float>*
}
define <8 x float> @test_8xfloat_masked_shuff_mem_mask3(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %vec3, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_masked_shuff_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vshufps {{.*#+}} ymm1 {%k1} = ymm0[3,3],mem[2,1],ymm0[7,7],mem[6,5]
@@ -497,7 +497,7 @@ define <8 x float> @test_8xfloat_masked_shuff_mem_mask3(<8 x float> %vec1, <8 x
define <8 x float> @test_8xfloat_zero_masked_shuff_mem_mask3(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_zero_masked_shuff_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vshufps {{.*#+}} ymm0 {%k1} {z} = ymm0[3,3],mem[2,1],ymm0[7,7],mem[6,5]
@@ -511,7 +511,7 @@ define <8 x float> @test_8xfloat_zero_masked_shuff_mem_mask3(<8 x float> %vec1,
define <16 x float> @test_16xfloat_shuff_mask0(<16 x float> %vec1, <16 x float> %vec2) {
; CHECK-LABEL: test_16xfloat_shuff_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vshufps {{.*#+}} zmm0 = zmm0[3,2],zmm1[3,2],zmm0[7,6],zmm1[7,6],zmm0[11,10],zmm1[11,10],zmm0[15,14],zmm1[15,14]
; CHECK-NEXT: retq
%res = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> <i32 3, i32 2, i32 19, i32 18, i32 7, i32 6, i32 23, i32 22, i32 11, i32 10, i32 27, i32 26, i32 15, i32 14, i32 31, i32 30>
@@ -519,7 +519,7 @@ define <16 x float> @test_16xfloat_shuff_mask0(<16 x float> %vec1, <16 x float>
}
define <16 x float> @test_16xfloat_masked_shuff_mask0(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %vec3, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_masked_shuff_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %zmm4, %zmm3, %k1
; CHECK-NEXT: vshufps {{.*#+}} zmm2 {%k1} = zmm0[3,2],zmm1[3,2],zmm0[7,6],zmm1[7,6],zmm0[11,10],zmm1[11,10],zmm0[15,14],zmm1[15,14]
@@ -533,7 +533,7 @@ define <16 x float> @test_16xfloat_masked_shuff_mask0(<16 x float> %vec1, <16 x
define <16 x float> @test_16xfloat_zero_masked_shuff_mask0(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_zero_masked_shuff_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vshufps {{.*#+}} zmm0 {%k1} {z} = zmm0[3,2],zmm1[3,2],zmm0[7,6],zmm1[7,6],zmm0[11,10],zmm1[11,10],zmm0[15,14],zmm1[15,14]
@@ -545,7 +545,7 @@ define <16 x float> @test_16xfloat_zero_masked_shuff_mask0(<16 x float> %vec1, <
}
define <16 x float> @test_16xfloat_masked_shuff_mask1(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %vec3, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_masked_shuff_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %zmm4, %zmm3, %k1
; CHECK-NEXT: vshufps {{.*#+}} zmm2 {%k1} = zmm0[1,2],zmm1[3,3],zmm0[5,6],zmm1[7,7],zmm0[9,10],zmm1[11,11],zmm0[13,14],zmm1[15,15]
@@ -559,7 +559,7 @@ define <16 x float> @test_16xfloat_masked_shuff_mask1(<16 x float> %vec1, <16 x
define <16 x float> @test_16xfloat_zero_masked_shuff_mask1(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_zero_masked_shuff_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vshufps {{.*#+}} zmm0 {%k1} {z} = zmm0[1,2],zmm1[3,3],zmm0[5,6],zmm1[7,7],zmm0[9,10],zmm1[11,11],zmm0[13,14],zmm1[15,15]
@@ -571,7 +571,7 @@ define <16 x float> @test_16xfloat_zero_masked_shuff_mask1(<16 x float> %vec1, <
}
define <16 x float> @test_16xfloat_masked_shuff_mask2(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %vec3, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_masked_shuff_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %zmm4, %zmm3, %k1
; CHECK-NEXT: vshufps {{.*#+}} zmm2 {%k1} = zmm0[3,0],zmm1[2,1],zmm0[7,4],zmm1[6,5],zmm0[11,8],zmm1[10,9],zmm0[15,12],zmm1[14,13]
@@ -585,7 +585,7 @@ define <16 x float> @test_16xfloat_masked_shuff_mask2(<16 x float> %vec1, <16 x
define <16 x float> @test_16xfloat_zero_masked_shuff_mask2(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_zero_masked_shuff_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vshufps {{.*#+}} zmm0 {%k1} {z} = zmm0[3,0],zmm1[2,1],zmm0[7,4],zmm1[6,5],zmm0[11,8],zmm1[10,9],zmm0[15,12],zmm1[14,13]
@@ -597,7 +597,7 @@ define <16 x float> @test_16xfloat_zero_masked_shuff_mask2(<16 x float> %vec1, <
}
define <16 x float> @test_16xfloat_shuff_mask3(<16 x float> %vec1, <16 x float> %vec2) {
; CHECK-LABEL: test_16xfloat_shuff_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vshufps {{.*#+}} zmm0 = zmm0[2,3],zmm1[0,2],zmm0[6,7],zmm1[4,6],zmm0[10,11],zmm1[8,10],zmm0[14,15],zmm1[12,14]
; CHECK-NEXT: retq
%res = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> <i32 2, i32 3, i32 16, i32 18, i32 6, i32 7, i32 20, i32 22, i32 10, i32 11, i32 24, i32 26, i32 14, i32 15, i32 28, i32 30>
@@ -605,7 +605,7 @@ define <16 x float> @test_16xfloat_shuff_mask3(<16 x float> %vec1, <16 x float>
}
define <16 x float> @test_16xfloat_masked_shuff_mask3(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %vec3, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_masked_shuff_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %zmm4, %zmm3, %k1
; CHECK-NEXT: vshufps {{.*#+}} zmm2 {%k1} = zmm0[2,3],zmm1[0,2],zmm0[6,7],zmm1[4,6],zmm0[10,11],zmm1[8,10],zmm0[14,15],zmm1[12,14]
@@ -619,7 +619,7 @@ define <16 x float> @test_16xfloat_masked_shuff_mask3(<16 x float> %vec1, <16 x
define <16 x float> @test_16xfloat_zero_masked_shuff_mask3(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_zero_masked_shuff_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vshufps {{.*#+}} zmm0 {%k1} {z} = zmm0[2,3],zmm1[0,2],zmm0[6,7],zmm1[4,6],zmm0[10,11],zmm1[8,10],zmm0[14,15],zmm1[12,14]
@@ -631,7 +631,7 @@ define <16 x float> @test_16xfloat_zero_masked_shuff_mask3(<16 x float> %vec1, <
}
define <16 x float> @test_16xfloat_shuff_mem_mask0(<16 x float> %vec1, <16 x float>* %vec2p) {
; CHECK-LABEL: test_16xfloat_shuff_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vshufps {{.*#+}} zmm0 = zmm0[3,0],mem[0,2],zmm0[7,4],mem[4,6],zmm0[11,8],mem[8,10],zmm0[15,12],mem[12,14]
; CHECK-NEXT: retq
%vec2 = load <16 x float>, <16 x float>* %vec2p
@@ -640,7 +640,7 @@ define <16 x float> @test_16xfloat_shuff_mem_mask0(<16 x float> %vec1, <16 x flo
}
define <16 x float> @test_16xfloat_masked_shuff_mem_mask0(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %vec3, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_masked_shuff_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vshufps {{.*#+}} zmm1 {%k1} = zmm0[3,0],mem[0,2],zmm0[7,4],mem[4,6],zmm0[11,8],mem[8,10],zmm0[15,12],mem[12,14]
@@ -655,7 +655,7 @@ define <16 x float> @test_16xfloat_masked_shuff_mem_mask0(<16 x float> %vec1, <1
define <16 x float> @test_16xfloat_zero_masked_shuff_mem_mask0(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_zero_masked_shuff_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vshufps {{.*#+}} zmm0 {%k1} {z} = zmm0[3,0],mem[0,2],zmm0[7,4],mem[4,6],zmm0[11,8],mem[8,10],zmm0[15,12],mem[12,14]
@@ -669,7 +669,7 @@ define <16 x float> @test_16xfloat_zero_masked_shuff_mem_mask0(<16 x float> %vec
define <16 x float> @test_16xfloat_masked_shuff_mem_mask1(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %vec3, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_masked_shuff_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vshufps {{.*#+}} zmm1 {%k1} = zmm0[0,2],mem[3,2],zmm0[4,6],mem[7,6],zmm0[8,10],mem[11,10],zmm0[12,14],mem[15,14]
@@ -684,7 +684,7 @@ define <16 x float> @test_16xfloat_masked_shuff_mem_mask1(<16 x float> %vec1, <1
define <16 x float> @test_16xfloat_zero_masked_shuff_mem_mask1(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_zero_masked_shuff_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vshufps {{.*#+}} zmm0 {%k1} {z} = zmm0[0,2],mem[3,2],zmm0[4,6],mem[7,6],zmm0[8,10],mem[11,10],zmm0[12,14],mem[15,14]
@@ -698,7 +698,7 @@ define <16 x float> @test_16xfloat_zero_masked_shuff_mem_mask1(<16 x float> %vec
define <16 x float> @test_16xfloat_masked_shuff_mem_mask2(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %vec3, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_masked_shuff_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vshufps {{.*#+}} zmm1 {%k1} = zmm0[2,0],mem[2,2],zmm0[6,4],mem[6,6],zmm0[10,8],mem[10,10],zmm0[14,12],mem[14,14]
@@ -713,7 +713,7 @@ define <16 x float> @test_16xfloat_masked_shuff_mem_mask2(<16 x float> %vec1, <1
define <16 x float> @test_16xfloat_zero_masked_shuff_mem_mask2(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_zero_masked_shuff_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vshufps {{.*#+}} zmm0 {%k1} {z} = zmm0[2,0],mem[2,2],zmm0[6,4],mem[6,6],zmm0[10,8],mem[10,10],zmm0[14,12],mem[14,14]
@@ -727,7 +727,7 @@ define <16 x float> @test_16xfloat_zero_masked_shuff_mem_mask2(<16 x float> %vec
define <16 x float> @test_16xfloat_shuff_mem_mask3(<16 x float> %vec1, <16 x float>* %vec2p) {
; CHECK-LABEL: test_16xfloat_shuff_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vshufps {{.*#+}} zmm0 = zmm0[2,1],mem[1,3],zmm0[6,5],mem[5,7],zmm0[10,9],mem[9,11],zmm0[14,13],mem[13,15]
; CHECK-NEXT: retq
%vec2 = load <16 x float>, <16 x float>* %vec2p
@@ -736,7 +736,7 @@ define <16 x float> @test_16xfloat_shuff_mem_mask3(<16 x float> %vec1, <16 x flo
}
define <16 x float> @test_16xfloat_masked_shuff_mem_mask3(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %vec3, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_masked_shuff_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vshufps {{.*#+}} zmm1 {%k1} = zmm0[2,1],mem[1,3],zmm0[6,5],mem[5,7],zmm0[10,9],mem[9,11],zmm0[14,13],mem[13,15]
@@ -751,7 +751,7 @@ define <16 x float> @test_16xfloat_masked_shuff_mem_mask3(<16 x float> %vec1, <1
define <16 x float> @test_16xfloat_zero_masked_shuff_mem_mask3(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_zero_masked_shuff_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vshufps {{.*#+}} zmm0 {%k1} {z} = zmm0[2,1],mem[1,3],zmm0[6,5],mem[5,7],zmm0[10,9],mem[9,11],zmm0[14,13],mem[13,15]
@@ -765,7 +765,7 @@ define <16 x float> @test_16xfloat_zero_masked_shuff_mem_mask3(<16 x float> %vec
define <2 x double> @test_2xdouble_shuff_mask0(<2 x double> %vec1, <2 x double> %vec2) {
; CHECK-LABEL: test_2xdouble_shuff_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0]
; CHECK-NEXT: retq
%res = shufflevector <2 x double> %vec1, <2 x double> %vec2, <2 x i32> <i32 1, i32 2>
@@ -773,7 +773,7 @@ define <2 x double> @test_2xdouble_shuff_mask0(<2 x double> %vec1, <2 x double>
}
define <2 x double> @test_2xdouble_masked_shuff_mask0(<2 x double> %vec1, <2 x double> %vec2, <2 x double> %vec3, <2 x double> %mask) {
; CHECK-LABEL: test_2xdouble_masked_shuff_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqpd %xmm4, %xmm3, %k1
; CHECK-NEXT: vshufpd {{.*#+}} xmm2 {%k1} = xmm0[1],xmm1[0]
@@ -787,7 +787,7 @@ define <2 x double> @test_2xdouble_masked_shuff_mask0(<2 x double> %vec1, <2 x d
define <2 x double> @test_2xdouble_zero_masked_shuff_mask0(<2 x double> %vec1, <2 x double> %vec2, <2 x double> %mask) {
; CHECK-LABEL: test_2xdouble_zero_masked_shuff_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %xmm3, %xmm2, %k1
; CHECK-NEXT: vshufpd {{.*#+}} xmm0 {%k1} {z} = xmm0[1],xmm1[0]
@@ -799,7 +799,7 @@ define <2 x double> @test_2xdouble_zero_masked_shuff_mask0(<2 x double> %vec1, <
}
define <2 x double> @test_2xdouble_masked_shuff_mask1(<2 x double> %vec1, <2 x double> %vec2, <2 x double> %vec3, <2 x double> %mask) {
; CHECK-LABEL: test_2xdouble_masked_shuff_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqpd %xmm4, %xmm3, %k1
; CHECK-NEXT: vshufpd {{.*#+}} xmm2 {%k1} = xmm0[1],xmm1[0]
@@ -813,7 +813,7 @@ define <2 x double> @test_2xdouble_masked_shuff_mask1(<2 x double> %vec1, <2 x d
define <2 x double> @test_2xdouble_zero_masked_shuff_mask1(<2 x double> %vec1, <2 x double> %vec2, <2 x double> %mask) {
; CHECK-LABEL: test_2xdouble_zero_masked_shuff_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %xmm3, %xmm2, %k1
; CHECK-NEXT: vshufpd {{.*#+}} xmm0 {%k1} {z} = xmm0[1],xmm1[0]
@@ -825,7 +825,7 @@ define <2 x double> @test_2xdouble_zero_masked_shuff_mask1(<2 x double> %vec1, <
}
define <2 x double> @test_2xdouble_shuff_mem_mask0(<2 x double> %vec1, <2 x double>* %vec2p) {
; CHECK-LABEL: test_2xdouble_shuff_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1],mem[0]
; CHECK-NEXT: retq
%vec2 = load <2 x double>, <2 x double>* %vec2p
@@ -834,7 +834,7 @@ define <2 x double> @test_2xdouble_shuff_mem_mask0(<2 x double> %vec1, <2 x doub
}
define <2 x double> @test_2xdouble_masked_shuff_mem_mask0(<2 x double> %vec1, <2 x double>* %vec2p, <2 x double> %vec3, <2 x double> %mask) {
; CHECK-LABEL: test_2xdouble_masked_shuff_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %xmm3, %xmm2, %k1
; CHECK-NEXT: vshufpd {{.*#+}} xmm1 {%k1} = xmm0[1],mem[0]
@@ -849,7 +849,7 @@ define <2 x double> @test_2xdouble_masked_shuff_mem_mask0(<2 x double> %vec1, <2
define <2 x double> @test_2xdouble_zero_masked_shuff_mem_mask0(<2 x double> %vec1, <2 x double>* %vec2p, <2 x double> %mask) {
; CHECK-LABEL: test_2xdouble_zero_masked_shuff_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %xmm2, %xmm1, %k1
; CHECK-NEXT: vshufpd {{.*#+}} xmm0 {%k1} {z} = xmm0[1],mem[0]
@@ -863,7 +863,7 @@ define <2 x double> @test_2xdouble_zero_masked_shuff_mem_mask0(<2 x double> %vec
define <2 x double> @test_2xdouble_masked_shuff_mem_mask1(<2 x double> %vec1, <2 x double>* %vec2p, <2 x double> %vec3, <2 x double> %mask) {
; CHECK-LABEL: test_2xdouble_masked_shuff_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %xmm3, %xmm2, %k1
; CHECK-NEXT: vshufpd {{.*#+}} xmm1 {%k1} = xmm0[1],mem[0]
@@ -878,7 +878,7 @@ define <2 x double> @test_2xdouble_masked_shuff_mem_mask1(<2 x double> %vec1, <2
define <2 x double> @test_2xdouble_zero_masked_shuff_mem_mask1(<2 x double> %vec1, <2 x double>* %vec2p, <2 x double> %mask) {
; CHECK-LABEL: test_2xdouble_zero_masked_shuff_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %xmm2, %xmm1, %k1
; CHECK-NEXT: vshufpd {{.*#+}} xmm0 {%k1} {z} = xmm0[1],mem[0]
@@ -892,7 +892,7 @@ define <2 x double> @test_2xdouble_zero_masked_shuff_mem_mask1(<2 x double> %vec
define <4 x double> @test_4xdouble_shuff_mask0(<4 x double> %vec1, <4 x double> %vec2) {
; CHECK-LABEL: test_4xdouble_shuff_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[3],ymm1[3]
; CHECK-NEXT: retq
%res = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> <i32 0, i32 4, i32 3, i32 7>
@@ -900,7 +900,7 @@ define <4 x double> @test_4xdouble_shuff_mask0(<4 x double> %vec1, <4 x double>
}
define <4 x double> @test_4xdouble_masked_shuff_mask0(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %vec3, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_masked_shuff_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqpd %ymm4, %ymm3, %k1
; CHECK-NEXT: vshufpd {{.*#+}} ymm2 {%k1} = ymm0[0],ymm1[0],ymm0[3],ymm1[3]
@@ -914,7 +914,7 @@ define <4 x double> @test_4xdouble_masked_shuff_mask0(<4 x double> %vec1, <4 x d
define <4 x double> @test_4xdouble_zero_masked_shuff_mask0(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_zero_masked_shuff_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vshufpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0],ymm1[0],ymm0[3],ymm1[3]
@@ -926,7 +926,7 @@ define <4 x double> @test_4xdouble_zero_masked_shuff_mask0(<4 x double> %vec1, <
}
define <4 x double> @test_4xdouble_masked_shuff_mask1(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %vec3, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_masked_shuff_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqpd %ymm4, %ymm3, %k1
; CHECK-NEXT: vshufpd {{.*#+}} ymm2 {%k1} = ymm0[0],ymm1[0],ymm0[3],ymm1[2]
@@ -940,7 +940,7 @@ define <4 x double> @test_4xdouble_masked_shuff_mask1(<4 x double> %vec1, <4 x d
define <4 x double> @test_4xdouble_zero_masked_shuff_mask1(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_zero_masked_shuff_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vshufpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0],ymm1[0],ymm0[3],ymm1[2]
@@ -952,7 +952,7 @@ define <4 x double> @test_4xdouble_zero_masked_shuff_mask1(<4 x double> %vec1, <
}
define <4 x double> @test_4xdouble_masked_shuff_mask2(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %vec3, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_masked_shuff_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqpd %ymm4, %ymm3, %k1
; CHECK-NEXT: vshufpd {{.*#+}} ymm2 {%k1} = ymm0[1],ymm1[0],ymm0[3],ymm1[2]
@@ -966,7 +966,7 @@ define <4 x double> @test_4xdouble_masked_shuff_mask2(<4 x double> %vec1, <4 x d
define <4 x double> @test_4xdouble_zero_masked_shuff_mask2(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_zero_masked_shuff_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vshufpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1],ymm1[0],ymm0[3],ymm1[2]
@@ -978,7 +978,7 @@ define <4 x double> @test_4xdouble_zero_masked_shuff_mask2(<4 x double> %vec1, <
}
define <4 x double> @test_4xdouble_shuff_mask3(<4 x double> %vec1, <4 x double> %vec2) {
; CHECK-LABEL: test_4xdouble_shuff_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[3]
; CHECK-NEXT: retq
%res = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> <i32 0, i32 4, i32 2, i32 7>
@@ -986,7 +986,7 @@ define <4 x double> @test_4xdouble_shuff_mask3(<4 x double> %vec1, <4 x double>
}
define <4 x double> @test_4xdouble_masked_shuff_mask3(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %vec3, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_masked_shuff_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqpd %ymm4, %ymm3, %k1
; CHECK-NEXT: vshufpd {{.*#+}} ymm2 {%k1} = ymm0[0],ymm1[0],ymm0[2],ymm1[3]
@@ -1000,7 +1000,7 @@ define <4 x double> @test_4xdouble_masked_shuff_mask3(<4 x double> %vec1, <4 x d
define <4 x double> @test_4xdouble_zero_masked_shuff_mask3(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_zero_masked_shuff_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vshufpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0],ymm1[0],ymm0[2],ymm1[3]
@@ -1012,7 +1012,7 @@ define <4 x double> @test_4xdouble_zero_masked_shuff_mask3(<4 x double> %vec1, <
}
define <4 x double> @test_4xdouble_shuff_mem_mask0(<4 x double> %vec1, <4 x double>* %vec2p) {
; CHECK-LABEL: test_4xdouble_shuff_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[2]
; CHECK-NEXT: retq
%vec2 = load <4 x double>, <4 x double>* %vec2p
@@ -1021,7 +1021,7 @@ define <4 x double> @test_4xdouble_shuff_mem_mask0(<4 x double> %vec1, <4 x doub
}
define <4 x double> @test_4xdouble_masked_shuff_mem_mask0(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %vec3, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_masked_shuff_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vshufpd {{.*#+}} ymm1 {%k1} = ymm0[1],mem[1],ymm0[3],mem[2]
@@ -1036,7 +1036,7 @@ define <4 x double> @test_4xdouble_masked_shuff_mem_mask0(<4 x double> %vec1, <4
define <4 x double> @test_4xdouble_zero_masked_shuff_mem_mask0(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_zero_masked_shuff_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vshufpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1],mem[1],ymm0[3],mem[2]
@@ -1050,7 +1050,7 @@ define <4 x double> @test_4xdouble_zero_masked_shuff_mem_mask0(<4 x double> %vec
define <4 x double> @test_4xdouble_masked_shuff_mem_mask1(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %vec3, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_masked_shuff_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vshufpd {{.*#+}} ymm1 {%k1} = ymm0[0],mem[1],ymm0[2],mem[2]
@@ -1065,7 +1065,7 @@ define <4 x double> @test_4xdouble_masked_shuff_mem_mask1(<4 x double> %vec1, <4
define <4 x double> @test_4xdouble_zero_masked_shuff_mem_mask1(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_zero_masked_shuff_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vshufpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0],mem[1],ymm0[2],mem[2]
@@ -1079,7 +1079,7 @@ define <4 x double> @test_4xdouble_zero_masked_shuff_mem_mask1(<4 x double> %vec
define <4 x double> @test_4xdouble_masked_shuff_mem_mask2(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %vec3, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_masked_shuff_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vshufpd {{.*#+}} ymm1 {%k1} = ymm0[0],mem[0],ymm0[3],mem[2]
@@ -1094,7 +1094,7 @@ define <4 x double> @test_4xdouble_masked_shuff_mem_mask2(<4 x double> %vec1, <4
define <4 x double> @test_4xdouble_zero_masked_shuff_mem_mask2(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_zero_masked_shuff_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vshufpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0],mem[0],ymm0[3],mem[2]
@@ -1108,7 +1108,7 @@ define <4 x double> @test_4xdouble_zero_masked_shuff_mem_mask2(<4 x double> %vec
define <4 x double> @test_4xdouble_shuff_mem_mask3(<4 x double> %vec1, <4 x double>* %vec2p) {
; CHECK-LABEL: test_4xdouble_shuff_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[2],mem[2]
; CHECK-NEXT: retq
%vec2 = load <4 x double>, <4 x double>* %vec2p
@@ -1117,7 +1117,7 @@ define <4 x double> @test_4xdouble_shuff_mem_mask3(<4 x double> %vec1, <4 x doub
}
define <4 x double> @test_4xdouble_masked_shuff_mem_mask3(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %vec3, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_masked_shuff_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vshufpd {{.*#+}} ymm1 {%k1} = ymm0[1],mem[1],ymm0[2],mem[2]
@@ -1132,7 +1132,7 @@ define <4 x double> @test_4xdouble_masked_shuff_mem_mask3(<4 x double> %vec1, <4
define <4 x double> @test_4xdouble_zero_masked_shuff_mem_mask3(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_zero_masked_shuff_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vshufpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1],mem[1],ymm0[2],mem[2]
@@ -1146,7 +1146,7 @@ define <4 x double> @test_4xdouble_zero_masked_shuff_mem_mask3(<4 x double> %vec
define <8 x double> @test_8xdouble_shuff_mask0(<8 x double> %vec1, <8 x double> %vec2) {
; CHECK-LABEL: test_8xdouble_shuff_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vshufpd {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[3],zmm0[4],zmm1[5],zmm0[7],zmm1[7]
; CHECK-NEXT: retq
%res = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> <i32 0, i32 8, i32 2, i32 11, i32 4, i32 13, i32 7, i32 15>
@@ -1154,7 +1154,7 @@ define <8 x double> @test_8xdouble_shuff_mask0(<8 x double> %vec1, <8 x double>
}
define <8 x double> @test_8xdouble_masked_shuff_mask0(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %vec3, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_masked_shuff_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqpd %zmm4, %zmm3, %k1
; CHECK-NEXT: vshufpd {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[2],zmm1[3],zmm0[4],zmm1[5],zmm0[7],zmm1[7]
@@ -1168,7 +1168,7 @@ define <8 x double> @test_8xdouble_masked_shuff_mask0(<8 x double> %vec1, <8 x d
define <8 x double> @test_8xdouble_zero_masked_shuff_mask0(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_zero_masked_shuff_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vshufpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[2],zmm1[3],zmm0[4],zmm1[5],zmm0[7],zmm1[7]
@@ -1180,7 +1180,7 @@ define <8 x double> @test_8xdouble_zero_masked_shuff_mask0(<8 x double> %vec1, <
}
define <8 x double> @test_8xdouble_masked_shuff_mask1(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %vec3, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_masked_shuff_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqpd %zmm4, %zmm3, %k1
; CHECK-NEXT: vshufpd {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[2],zmm1[3],zmm0[5],zmm1[5],zmm0[6],zmm1[7]
@@ -1194,7 +1194,7 @@ define <8 x double> @test_8xdouble_masked_shuff_mask1(<8 x double> %vec1, <8 x d
define <8 x double> @test_8xdouble_zero_masked_shuff_mask1(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_zero_masked_shuff_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vshufpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[2],zmm1[3],zmm0[5],zmm1[5],zmm0[6],zmm1[7]
@@ -1206,7 +1206,7 @@ define <8 x double> @test_8xdouble_zero_masked_shuff_mask1(<8 x double> %vec1, <
}
define <8 x double> @test_8xdouble_masked_shuff_mask2(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %vec3, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_masked_shuff_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqpd %zmm4, %zmm3, %k1
; CHECK-NEXT: vshufpd {{.*#+}} zmm2 {%k1} = zmm0[1],zmm1[0],zmm0[3],zmm1[3],zmm0[4],zmm1[5],zmm0[6],zmm1[6]
@@ -1220,7 +1220,7 @@ define <8 x double> @test_8xdouble_masked_shuff_mask2(<8 x double> %vec1, <8 x d
define <8 x double> @test_8xdouble_zero_masked_shuff_mask2(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_zero_masked_shuff_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vshufpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],zmm1[0],zmm0[3],zmm1[3],zmm0[4],zmm1[5],zmm0[6],zmm1[6]
@@ -1232,7 +1232,7 @@ define <8 x double> @test_8xdouble_zero_masked_shuff_mask2(<8 x double> %vec1, <
}
define <8 x double> @test_8xdouble_shuff_mask3(<8 x double> %vec1, <8 x double> %vec2) {
; CHECK-LABEL: test_8xdouble_shuff_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vshufpd {{.*#+}} zmm0 = zmm0[1],zmm1[0],zmm0[3],zmm1[3],zmm0[4],zmm1[4],zmm0[7],zmm1[7]
; CHECK-NEXT: retq
%res = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> <i32 1, i32 8, i32 3, i32 11, i32 4, i32 12, i32 7, i32 15>
@@ -1240,7 +1240,7 @@ define <8 x double> @test_8xdouble_shuff_mask3(<8 x double> %vec1, <8 x double>
}
define <8 x double> @test_8xdouble_masked_shuff_mask3(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %vec3, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_masked_shuff_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqpd %zmm4, %zmm3, %k1
; CHECK-NEXT: vshufpd {{.*#+}} zmm2 {%k1} = zmm0[1],zmm1[0],zmm0[3],zmm1[3],zmm0[4],zmm1[4],zmm0[7],zmm1[7]
@@ -1254,7 +1254,7 @@ define <8 x double> @test_8xdouble_masked_shuff_mask3(<8 x double> %vec1, <8 x d
define <8 x double> @test_8xdouble_zero_masked_shuff_mask3(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_zero_masked_shuff_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vshufpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],zmm1[0],zmm0[3],zmm1[3],zmm0[4],zmm1[4],zmm0[7],zmm1[7]
@@ -1266,7 +1266,7 @@ define <8 x double> @test_8xdouble_zero_masked_shuff_mask3(<8 x double> %vec1, <
}
define <8 x double> @test_8xdouble_shuff_mem_mask0(<8 x double> %vec1, <8 x double>* %vec2p) {
; CHECK-LABEL: test_8xdouble_shuff_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vshufpd {{.*#+}} zmm0 = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[5],mem[5],zmm0[6],mem[7]
; CHECK-NEXT: retq
%vec2 = load <8 x double>, <8 x double>* %vec2p
@@ -1275,7 +1275,7 @@ define <8 x double> @test_8xdouble_shuff_mem_mask0(<8 x double> %vec1, <8 x doub
}
define <8 x double> @test_8xdouble_masked_shuff_mem_mask0(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %vec3, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_masked_shuff_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vshufpd {{.*#+}} zmm1 {%k1} = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[5],mem[5],zmm0[6],mem[7]
@@ -1290,7 +1290,7 @@ define <8 x double> @test_8xdouble_masked_shuff_mem_mask0(<8 x double> %vec1, <8
define <8 x double> @test_8xdouble_zero_masked_shuff_mem_mask0(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_zero_masked_shuff_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vshufpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[5],mem[5],zmm0[6],mem[7]
@@ -1304,7 +1304,7 @@ define <8 x double> @test_8xdouble_zero_masked_shuff_mem_mask0(<8 x double> %vec
define <8 x double> @test_8xdouble_masked_shuff_mem_mask1(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %vec3, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_masked_shuff_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vshufpd {{.*#+}} zmm1 {%k1} = zmm0[1],mem[0],zmm0[3],mem[2],zmm0[4],mem[4],zmm0[7],mem[7]
@@ -1319,7 +1319,7 @@ define <8 x double> @test_8xdouble_masked_shuff_mem_mask1(<8 x double> %vec1, <8
define <8 x double> @test_8xdouble_zero_masked_shuff_mem_mask1(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_zero_masked_shuff_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vshufpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],mem[0],zmm0[3],mem[2],zmm0[4],mem[4],zmm0[7],mem[7]
@@ -1333,7 +1333,7 @@ define <8 x double> @test_8xdouble_zero_masked_shuff_mem_mask1(<8 x double> %vec
define <8 x double> @test_8xdouble_masked_shuff_mem_mask2(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %vec3, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_masked_shuff_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vshufpd {{.*#+}} zmm1 {%k1} = zmm0[1],mem[1],zmm0[3],mem[2],zmm0[5],mem[5],zmm0[7],mem[7]
@@ -1348,7 +1348,7 @@ define <8 x double> @test_8xdouble_masked_shuff_mem_mask2(<8 x double> %vec1, <8
define <8 x double> @test_8xdouble_zero_masked_shuff_mem_mask2(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_zero_masked_shuff_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vshufpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],mem[1],zmm0[3],mem[2],zmm0[5],mem[5],zmm0[7],mem[7]
@@ -1362,7 +1362,7 @@ define <8 x double> @test_8xdouble_zero_masked_shuff_mem_mask2(<8 x double> %vec
define <8 x double> @test_8xdouble_shuff_mem_mask3(<8 x double> %vec1, <8 x double>* %vec2p) {
; CHECK-LABEL: test_8xdouble_shuff_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vshufpd {{.*#+}} zmm0 = zmm0[1],mem[1],zmm0[2],mem[3],zmm0[4],mem[5],zmm0[6],mem[6]
; CHECK-NEXT: retq
%vec2 = load <8 x double>, <8 x double>* %vec2p
@@ -1371,7 +1371,7 @@ define <8 x double> @test_8xdouble_shuff_mem_mask3(<8 x double> %vec1, <8 x doub
}
define <8 x double> @test_8xdouble_masked_shuff_mem_mask3(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %vec3, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_masked_shuff_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vshufpd {{.*#+}} zmm1 {%k1} = zmm0[1],mem[1],zmm0[2],mem[3],zmm0[4],mem[5],zmm0[6],mem[6]
@@ -1386,7 +1386,7 @@ define <8 x double> @test_8xdouble_masked_shuff_mem_mask3(<8 x double> %vec1, <8
define <8 x double> @test_8xdouble_zero_masked_shuff_mem_mask3(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_zero_masked_shuff_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vshufpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],mem[1],zmm0[2],mem[3],zmm0[4],mem[5],zmm0[6],mem[6]
diff --git a/test/CodeGen/X86/avx512-shuffles/shuffle-vec.ll b/test/CodeGen/X86/avx512-shuffles/shuffle-vec.ll
index 799bbc11bee..1896356dafa 100644
--- a/test/CodeGen/X86/avx512-shuffles/shuffle-vec.ll
+++ b/test/CodeGen/X86/avx512-shuffles/shuffle-vec.ll
@@ -5,7 +5,7 @@
define <8 x float> @test_8xfloat_shuff_mask0(<8 x float> %vec1, <8 x float> %vec2) {
; CHECK-LABEL: test_8xfloat_shuff_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
; CHECK-NEXT: retq
%res = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
@@ -13,7 +13,7 @@ define <8 x float> @test_8xfloat_shuff_mask0(<8 x float> %vec1, <8 x float> %vec
}
define <8 x float> @test_8xfloat_masked_shuff_mask0(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %vec3, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_masked_shuff_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %ymm4, %ymm3, %k1
; CHECK-NEXT: vshuff32x4 {{.*#+}} ymm2 {%k1} = ymm0[4,5,6,7],ymm1[0,1,2,3]
@@ -27,7 +27,7 @@ define <8 x float> @test_8xfloat_masked_shuff_mask0(<8 x float> %vec1, <8 x floa
define <8 x float> @test_8xfloat_zero_masked_shuff_mask0(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_zero_masked_shuff_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vshuff32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],ymm1[0,1,2,3]
@@ -39,7 +39,7 @@ define <8 x float> @test_8xfloat_zero_masked_shuff_mask0(<8 x float> %vec1, <8 x
}
define <8 x float> @test_8xfloat_masked_shuff_mask1(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %vec3, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_masked_shuff_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %ymm4, %ymm3, %k1
; CHECK-NEXT: vshuff32x4 {{.*#+}} ymm2 {%k1} = ymm0[4,5,6,7],ymm1[0,1,2,3]
@@ -53,7 +53,7 @@ define <8 x float> @test_8xfloat_masked_shuff_mask1(<8 x float> %vec1, <8 x floa
define <8 x float> @test_8xfloat_zero_masked_shuff_mask1(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_zero_masked_shuff_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vshuff32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],ymm1[0,1,2,3]
@@ -65,7 +65,7 @@ define <8 x float> @test_8xfloat_zero_masked_shuff_mask1(<8 x float> %vec1, <8 x
}
define <8 x float> @test_8xfloat_masked_shuff_mask2(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %vec3, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_masked_shuff_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %ymm4, %ymm3, %k1
; CHECK-NEXT: vshuff32x4 {{.*#+}} ymm2 {%k1} = ymm0[4,5,6,7],ymm1[4,5,6,7]
@@ -79,7 +79,7 @@ define <8 x float> @test_8xfloat_masked_shuff_mask2(<8 x float> %vec1, <8 x floa
define <8 x float> @test_8xfloat_zero_masked_shuff_mask2(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_zero_masked_shuff_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vshuff32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],ymm1[4,5,6,7]
@@ -91,7 +91,7 @@ define <8 x float> @test_8xfloat_zero_masked_shuff_mask2(<8 x float> %vec1, <8 x
}
define <8 x float> @test_8xfloat_shuff_mask3(<8 x float> %vec1, <8 x float> %vec2) {
; CHECK-LABEL: test_8xfloat_shuff_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
; CHECK-NEXT: retq
%res = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
@@ -99,7 +99,7 @@ define <8 x float> @test_8xfloat_shuff_mask3(<8 x float> %vec1, <8 x float> %vec
}
define <8 x float> @test_8xfloat_masked_shuff_mask3(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %vec3, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_masked_shuff_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %ymm4, %ymm3, %k1
; CHECK-NEXT: vshuff32x4 {{.*#+}} ymm2 {%k1} = ymm0[4,5,6,7],ymm1[0,1,2,3]
@@ -113,7 +113,7 @@ define <8 x float> @test_8xfloat_masked_shuff_mask3(<8 x float> %vec1, <8 x floa
define <8 x float> @test_8xfloat_zero_masked_shuff_mask3(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_zero_masked_shuff_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vshuff32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],ymm1[0,1,2,3]
@@ -125,7 +125,7 @@ define <8 x float> @test_8xfloat_zero_masked_shuff_mask3(<8 x float> %vec1, <8 x
}
define <8 x float> @test_8xfloat_shuff_mem_mask0(<8 x float> %vec1, <8 x float>* %vec2p) {
; CHECK-LABEL: test_8xfloat_shuff_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3]
; CHECK-NEXT: retq
%vec2 = load <8 x float>, <8 x float>* %vec2p
@@ -134,7 +134,7 @@ define <8 x float> @test_8xfloat_shuff_mem_mask0(<8 x float> %vec1, <8 x float>*
}
define <8 x float> @test_8xfloat_masked_shuff_mem_mask0(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %vec3, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_masked_shuff_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vshuff32x4 {{.*#+}} ymm1 {%k1} = ymm0[4,5,6,7],mem[4,5,6,7]
@@ -149,7 +149,7 @@ define <8 x float> @test_8xfloat_masked_shuff_mem_mask0(<8 x float> %vec1, <8 x
define <8 x float> @test_8xfloat_zero_masked_shuff_mem_mask0(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_zero_masked_shuff_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vshuff32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],mem[4,5,6,7]
@@ -163,7 +163,7 @@ define <8 x float> @test_8xfloat_zero_masked_shuff_mem_mask0(<8 x float> %vec1,
define <8 x float> @test_8xfloat_masked_shuff_mem_mask1(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %vec3, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_masked_shuff_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vshuff32x4 {{.*#+}} ymm1 {%k1} = ymm0[4,5,6,7],mem[4,5,6,7]
@@ -178,7 +178,7 @@ define <8 x float> @test_8xfloat_masked_shuff_mem_mask1(<8 x float> %vec1, <8 x
define <8 x float> @test_8xfloat_zero_masked_shuff_mem_mask1(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_zero_masked_shuff_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vshuff32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],mem[4,5,6,7]
@@ -192,7 +192,7 @@ define <8 x float> @test_8xfloat_zero_masked_shuff_mem_mask1(<8 x float> %vec1,
define <8 x float> @test_8xfloat_masked_shuff_mem_mask2(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %vec3, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_masked_shuff_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vshuff32x4 {{.*#+}} ymm1 {%k1} = ymm0[4,5,6,7],mem[0,1,2,3]
@@ -207,7 +207,7 @@ define <8 x float> @test_8xfloat_masked_shuff_mem_mask2(<8 x float> %vec1, <8 x
define <8 x float> @test_8xfloat_zero_masked_shuff_mem_mask2(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_zero_masked_shuff_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vshuff32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],mem[0,1,2,3]
@@ -221,7 +221,7 @@ define <8 x float> @test_8xfloat_zero_masked_shuff_mem_mask2(<8 x float> %vec1,
define <8 x float> @test_8xfloat_shuff_mem_mask3(<8 x float> %vec1, <8 x float>* %vec2p) {
; CHECK-LABEL: test_8xfloat_shuff_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1]
; CHECK-NEXT: retq
%vec2 = load <8 x float>, <8 x float>* %vec2p
@@ -230,7 +230,7 @@ define <8 x float> @test_8xfloat_shuff_mem_mask3(<8 x float> %vec1, <8 x float>*
}
define <8 x float> @test_8xfloat_masked_shuff_mem_mask3(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %vec3, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_masked_shuff_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vshuff32x4 {{.*#+}} ymm1 {%k1} = ymm0[4,5,6,7],mem[0,1,2,3]
@@ -245,7 +245,7 @@ define <8 x float> @test_8xfloat_masked_shuff_mem_mask3(<8 x float> %vec1, <8 x
define <8 x float> @test_8xfloat_zero_masked_shuff_mem_mask3(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_zero_masked_shuff_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vshuff32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],mem[0,1,2,3]
@@ -259,7 +259,7 @@ define <8 x float> @test_8xfloat_zero_masked_shuff_mem_mask3(<8 x float> %vec1,
define <16 x float> @test_16xfloat_shuff_mask0(<16 x float> %vec1, <16 x float> %vec2) {
; CHECK-LABEL: test_16xfloat_shuff_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[6,7,0,1],zmm1[2,3,6,7]
; CHECK-NEXT: retq
%res = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> <i32 12, i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 20, i32 21, i32 22, i32 23, i32 28, i32 29, i32 30, i32 31>
@@ -267,7 +267,7 @@ define <16 x float> @test_16xfloat_shuff_mask0(<16 x float> %vec1, <16 x float>
}
define <16 x float> @test_16xfloat_masked_shuff_mask0(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %vec3, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_masked_shuff_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %zmm4, %zmm3, %k1
; CHECK-NEXT: vshuff32x4 {{.*#+}} zmm2 {%k1} = zmm0[12,13,14,15,0,1,2,3],zmm1[4,5,6,7,12,13,14,15]
@@ -281,7 +281,7 @@ define <16 x float> @test_16xfloat_masked_shuff_mask0(<16 x float> %vec1, <16 x
define <16 x float> @test_16xfloat_zero_masked_shuff_mask0(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_zero_masked_shuff_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vshuff32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[12,13,14,15,0,1,2,3],zmm1[4,5,6,7,12,13,14,15]
@@ -293,7 +293,7 @@ define <16 x float> @test_16xfloat_zero_masked_shuff_mask0(<16 x float> %vec1, <
}
define <16 x float> @test_16xfloat_masked_shuff_mask1(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %vec3, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_masked_shuff_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %zmm4, %zmm3, %k1
; CHECK-NEXT: vshuff32x4 {{.*#+}} zmm2 {%k1} = zmm0[0,1,2,3,8,9,10,11],zmm1[0,1,2,3,12,13,14,15]
@@ -307,7 +307,7 @@ define <16 x float> @test_16xfloat_masked_shuff_mask1(<16 x float> %vec1, <16 x
define <16 x float> @test_16xfloat_zero_masked_shuff_mask1(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_zero_masked_shuff_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vshuff32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3,8,9,10,11],zmm1[0,1,2,3,12,13,14,15]
@@ -319,7 +319,7 @@ define <16 x float> @test_16xfloat_zero_masked_shuff_mask1(<16 x float> %vec1, <
}
define <16 x float> @test_16xfloat_masked_shuff_mask2(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %vec3, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_masked_shuff_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %zmm4, %zmm3, %k1
; CHECK-NEXT: vshuff32x4 {{.*#+}} zmm2 {%k1} = zmm0[12,13,14,15,4,5,6,7],zmm1[0,1,2,3,4,5,6,7]
@@ -333,7 +333,7 @@ define <16 x float> @test_16xfloat_masked_shuff_mask2(<16 x float> %vec1, <16 x
define <16 x float> @test_16xfloat_zero_masked_shuff_mask2(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_zero_masked_shuff_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vshuff32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[12,13,14,15,4,5,6,7],zmm1[0,1,2,3,4,5,6,7]
@@ -345,7 +345,7 @@ define <16 x float> @test_16xfloat_zero_masked_shuff_mask2(<16 x float> %vec1, <
}
define <16 x float> @test_16xfloat_shuff_mask3(<16 x float> %vec1, <16 x float> %vec2) {
; CHECK-LABEL: test_16xfloat_shuff_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[4,5,6,7],zmm1[0,1,4,5]
; CHECK-NEXT: retq
%res = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 24, i32 25, i32 26, i32 27>
@@ -353,7 +353,7 @@ define <16 x float> @test_16xfloat_shuff_mask3(<16 x float> %vec1, <16 x float>
}
define <16 x float> @test_16xfloat_masked_shuff_mask3(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %vec3, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_masked_shuff_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %zmm4, %zmm3, %k1
; CHECK-NEXT: vshuff32x4 {{.*#+}} zmm2 {%k1} = zmm0[8,9,10,11,12,13,14,15],zmm1[0,1,2,3,8,9,10,11]
@@ -367,7 +367,7 @@ define <16 x float> @test_16xfloat_masked_shuff_mask3(<16 x float> %vec1, <16 x
define <16 x float> @test_16xfloat_zero_masked_shuff_mask3(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_zero_masked_shuff_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vshuff32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[8,9,10,11,12,13,14,15],zmm1[0,1,2,3,8,9,10,11]
@@ -379,7 +379,7 @@ define <16 x float> @test_16xfloat_zero_masked_shuff_mask3(<16 x float> %vec1, <
}
define <16 x float> @test_16xfloat_shuff_mem_mask0(<16 x float> %vec1, <16 x float>* %vec2p) {
; CHECK-LABEL: test_16xfloat_shuff_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[6,7,4,5],mem[4,5,2,3]
; CHECK-NEXT: retq
%vec2 = load <16 x float>, <16 x float>* %vec2p
@@ -388,7 +388,7 @@ define <16 x float> @test_16xfloat_shuff_mem_mask0(<16 x float> %vec1, <16 x flo
}
define <16 x float> @test_16xfloat_masked_shuff_mem_mask0(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %vec3, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_masked_shuff_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vshuff32x4 {{.*#+}} zmm1 {%k1} = zmm0[12,13,14,15,8,9,10,11],mem[8,9,10,11,4,5,6,7]
@@ -403,7 +403,7 @@ define <16 x float> @test_16xfloat_masked_shuff_mem_mask0(<16 x float> %vec1, <1
define <16 x float> @test_16xfloat_zero_masked_shuff_mem_mask0(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_zero_masked_shuff_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vshuff32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[12,13,14,15,8,9,10,11],mem[8,9,10,11,4,5,6,7]
@@ -417,7 +417,7 @@ define <16 x float> @test_16xfloat_zero_masked_shuff_mem_mask0(<16 x float> %vec
define <16 x float> @test_16xfloat_masked_shuff_mem_mask1(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %vec3, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_masked_shuff_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vshuff32x4 {{.*#+}} zmm1 {%k1} = zmm0[8,9,10,11,4,5,6,7],mem[8,9,10,11,4,5,6,7]
@@ -432,7 +432,7 @@ define <16 x float> @test_16xfloat_masked_shuff_mem_mask1(<16 x float> %vec1, <1
define <16 x float> @test_16xfloat_zero_masked_shuff_mem_mask1(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_zero_masked_shuff_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vshuff32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[8,9,10,11,4,5,6,7],mem[8,9,10,11,4,5,6,7]
@@ -446,7 +446,7 @@ define <16 x float> @test_16xfloat_zero_masked_shuff_mem_mask1(<16 x float> %vec
define <16 x float> @test_16xfloat_masked_shuff_mem_mask2(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %vec3, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_masked_shuff_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vshuff32x4 {{.*#+}} zmm1 {%k1} = zmm0[0,1,2,3,0,1,2,3],mem[8,9,10,11,8,9,10,11]
@@ -461,7 +461,7 @@ define <16 x float> @test_16xfloat_masked_shuff_mem_mask2(<16 x float> %vec1, <1
define <16 x float> @test_16xfloat_zero_masked_shuff_mem_mask2(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_zero_masked_shuff_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vshuff32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3,0,1,2,3],mem[8,9,10,11,8,9,10,11]
@@ -475,7 +475,7 @@ define <16 x float> @test_16xfloat_zero_masked_shuff_mem_mask2(<16 x float> %vec
define <16 x float> @test_16xfloat_shuff_mem_mask3(<16 x float> %vec1, <16 x float>* %vec2p) {
; CHECK-LABEL: test_16xfloat_shuff_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[2,3,0,1],mem[6,7,6,7]
; CHECK-NEXT: retq
%vec2 = load <16 x float>, <16 x float>* %vec2p
@@ -484,7 +484,7 @@ define <16 x float> @test_16xfloat_shuff_mem_mask3(<16 x float> %vec1, <16 x flo
}
define <16 x float> @test_16xfloat_masked_shuff_mem_mask3(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %vec3, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_masked_shuff_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vshuff32x4 {{.*#+}} zmm1 {%k1} = zmm0[4,5,6,7,0,1,2,3],mem[12,13,14,15,12,13,14,15]
@@ -499,7 +499,7 @@ define <16 x float> @test_16xfloat_masked_shuff_mem_mask3(<16 x float> %vec1, <1
define <16 x float> @test_16xfloat_zero_masked_shuff_mem_mask3(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_zero_masked_shuff_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vshuff32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[4,5,6,7,0,1,2,3],mem[12,13,14,15,12,13,14,15]
@@ -513,7 +513,7 @@ define <16 x float> @test_16xfloat_zero_masked_shuff_mem_mask3(<16 x float> %vec
define <4 x double> @test_4xdouble_shuff_mask0(<4 x double> %vec1, <4 x double> %vec2) {
; CHECK-LABEL: test_4xdouble_shuff_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
; CHECK-NEXT: retq
%res = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> <i32 2, i32 3, i32 4, i32 5>
@@ -521,7 +521,7 @@ define <4 x double> @test_4xdouble_shuff_mask0(<4 x double> %vec1, <4 x double>
}
define <4 x double> @test_4xdouble_masked_shuff_mask0(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %vec3, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_masked_shuff_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqpd %ymm4, %ymm3, %k1
; CHECK-NEXT: vshuff64x2 {{.*#+}} ymm2 {%k1} = ymm0[2,3],ymm1[0,1]
@@ -535,7 +535,7 @@ define <4 x double> @test_4xdouble_masked_shuff_mask0(<4 x double> %vec1, <4 x d
define <4 x double> @test_4xdouble_zero_masked_shuff_mask0(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_zero_masked_shuff_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vshuff64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],ymm1[0,1]
@@ -547,7 +547,7 @@ define <4 x double> @test_4xdouble_zero_masked_shuff_mask0(<4 x double> %vec1, <
}
define <4 x double> @test_4xdouble_masked_shuff_mask1(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %vec3, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_masked_shuff_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqpd %ymm4, %ymm3, %k1
; CHECK-NEXT: vshuff64x2 {{.*#+}} ymm2 {%k1} = ymm0[2,3],ymm1[0,1]
@@ -561,7 +561,7 @@ define <4 x double> @test_4xdouble_masked_shuff_mask1(<4 x double> %vec1, <4 x d
define <4 x double> @test_4xdouble_zero_masked_shuff_mask1(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_zero_masked_shuff_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vshuff64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],ymm1[0,1]
@@ -573,7 +573,7 @@ define <4 x double> @test_4xdouble_zero_masked_shuff_mask1(<4 x double> %vec1, <
}
define <4 x double> @test_4xdouble_masked_shuff_mask2(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %vec3, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_masked_shuff_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqpd %ymm4, %ymm3, %k1
; CHECK-NEXT: vshuff64x2 {{.*#+}} ymm2 {%k1} = ymm0[2,3],ymm1[2,3]
@@ -587,7 +587,7 @@ define <4 x double> @test_4xdouble_masked_shuff_mask2(<4 x double> %vec1, <4 x d
define <4 x double> @test_4xdouble_zero_masked_shuff_mask2(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_zero_masked_shuff_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vshuff64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],ymm1[2,3]
@@ -599,7 +599,7 @@ define <4 x double> @test_4xdouble_zero_masked_shuff_mask2(<4 x double> %vec1, <
}
define <4 x double> @test_4xdouble_shuff_mask3(<4 x double> %vec1, <4 x double> %vec2) {
; CHECK-LABEL: test_4xdouble_shuff_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
; CHECK-NEXT: retq
%res = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> <i32 2, i32 3, i32 6, i32 7>
@@ -607,7 +607,7 @@ define <4 x double> @test_4xdouble_shuff_mask3(<4 x double> %vec1, <4 x double>
}
define <4 x double> @test_4xdouble_masked_shuff_mask3(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %vec3, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_masked_shuff_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqpd %ymm4, %ymm3, %k1
; CHECK-NEXT: vshuff64x2 {{.*#+}} ymm2 {%k1} = ymm0[2,3],ymm1[2,3]
@@ -621,7 +621,7 @@ define <4 x double> @test_4xdouble_masked_shuff_mask3(<4 x double> %vec1, <4 x d
define <4 x double> @test_4xdouble_zero_masked_shuff_mask3(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_zero_masked_shuff_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vshuff64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],ymm1[2,3]
@@ -633,7 +633,7 @@ define <4 x double> @test_4xdouble_zero_masked_shuff_mask3(<4 x double> %vec1, <
}
define <4 x double> @test_4xdouble_shuff_mem_mask0(<4 x double> %vec1, <4 x double>* %vec2p) {
; CHECK-LABEL: test_4xdouble_shuff_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3]
; CHECK-NEXT: retq
%vec2 = load <4 x double>, <4 x double>* %vec2p
@@ -642,7 +642,7 @@ define <4 x double> @test_4xdouble_shuff_mem_mask0(<4 x double> %vec1, <4 x doub
}
define <4 x double> @test_4xdouble_masked_shuff_mem_mask0(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %vec3, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_masked_shuff_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vshuff64x2 {{.*#+}} ymm1 {%k1} = ymm0[2,3],mem[2,3]
@@ -657,7 +657,7 @@ define <4 x double> @test_4xdouble_masked_shuff_mem_mask0(<4 x double> %vec1, <4
define <4 x double> @test_4xdouble_zero_masked_shuff_mem_mask0(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_zero_masked_shuff_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vshuff64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],mem[2,3]
@@ -671,7 +671,7 @@ define <4 x double> @test_4xdouble_zero_masked_shuff_mem_mask0(<4 x double> %vec
define <4 x double> @test_4xdouble_masked_shuff_mem_mask1(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %vec3, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_masked_shuff_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vshuff64x2 {{.*#+}} ymm1 {%k1} = ymm0[2,3],mem[0,1]
@@ -686,7 +686,7 @@ define <4 x double> @test_4xdouble_masked_shuff_mem_mask1(<4 x double> %vec1, <4
define <4 x double> @test_4xdouble_zero_masked_shuff_mem_mask1(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_zero_masked_shuff_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vshuff64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],mem[0,1]
@@ -700,7 +700,7 @@ define <4 x double> @test_4xdouble_zero_masked_shuff_mem_mask1(<4 x double> %vec
define <4 x double> @test_4xdouble_masked_shuff_mem_mask2(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %vec3, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_masked_shuff_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vshuff64x2 {{.*#+}} ymm1 {%k1} = ymm0[2,3],mem[0,1]
@@ -715,7 +715,7 @@ define <4 x double> @test_4xdouble_masked_shuff_mem_mask2(<4 x double> %vec1, <4
define <4 x double> @test_4xdouble_zero_masked_shuff_mem_mask2(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_zero_masked_shuff_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vshuff64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],mem[0,1]
@@ -729,7 +729,7 @@ define <4 x double> @test_4xdouble_zero_masked_shuff_mem_mask2(<4 x double> %vec
define <4 x double> @test_4xdouble_shuff_mem_mask3(<4 x double> %vec1, <4 x double>* %vec2p) {
; CHECK-LABEL: test_4xdouble_shuff_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3]
; CHECK-NEXT: retq
%vec2 = load <4 x double>, <4 x double>* %vec2p
@@ -738,7 +738,7 @@ define <4 x double> @test_4xdouble_shuff_mem_mask3(<4 x double> %vec1, <4 x doub
}
define <4 x double> @test_4xdouble_masked_shuff_mem_mask3(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %vec3, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_masked_shuff_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vshuff64x2 {{.*#+}} ymm1 {%k1} = ymm0[2,3],mem[2,3]
@@ -753,7 +753,7 @@ define <4 x double> @test_4xdouble_masked_shuff_mem_mask3(<4 x double> %vec1, <4
define <4 x double> @test_4xdouble_zero_masked_shuff_mem_mask3(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_zero_masked_shuff_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vshuff64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],mem[2,3]
@@ -767,7 +767,7 @@ define <4 x double> @test_4xdouble_zero_masked_shuff_mem_mask3(<4 x double> %vec
define <8 x double> @test_8xdouble_shuff_mask0(<8 x double> %vec1, <8 x double> %vec2) {
; CHECK-LABEL: test_8xdouble_shuff_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[6,7,2,3],zmm1[6,7,0,1]
; CHECK-NEXT: retq
%res = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> <i32 6, i32 7, i32 2, i32 3, i32 14, i32 15, i32 8, i32 9>
@@ -775,7 +775,7 @@ define <8 x double> @test_8xdouble_shuff_mask0(<8 x double> %vec1, <8 x double>
}
define <8 x double> @test_8xdouble_masked_shuff_mask0(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %vec3, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_masked_shuff_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqpd %zmm4, %zmm3, %k1
; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm2 {%k1} = zmm0[6,7,2,3],zmm1[6,7,0,1]
@@ -789,7 +789,7 @@ define <8 x double> @test_8xdouble_masked_shuff_mask0(<8 x double> %vec1, <8 x d
define <8 x double> @test_8xdouble_zero_masked_shuff_mask0(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_zero_masked_shuff_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[6,7,2,3],zmm1[6,7,0,1]
@@ -801,7 +801,7 @@ define <8 x double> @test_8xdouble_zero_masked_shuff_mask0(<8 x double> %vec1, <
}
define <8 x double> @test_8xdouble_masked_shuff_mask1(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %vec3, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_masked_shuff_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqpd %zmm4, %zmm3, %k1
; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm2 {%k1} = zmm0[0,1,4,5],zmm1[0,1,4,5]
@@ -815,7 +815,7 @@ define <8 x double> @test_8xdouble_masked_shuff_mask1(<8 x double> %vec1, <8 x d
define <8 x double> @test_8xdouble_zero_masked_shuff_mask1(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_zero_masked_shuff_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,4,5],zmm1[0,1,4,5]
@@ -827,7 +827,7 @@ define <8 x double> @test_8xdouble_zero_masked_shuff_mask1(<8 x double> %vec1, <
}
define <8 x double> @test_8xdouble_masked_shuff_mask2(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %vec3, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_masked_shuff_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqpd %zmm4, %zmm3, %k1
; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm2 {%k1} = zmm0[6,7,4,5],zmm1[4,5,0,1]
@@ -841,7 +841,7 @@ define <8 x double> @test_8xdouble_masked_shuff_mask2(<8 x double> %vec1, <8 x d
define <8 x double> @test_8xdouble_zero_masked_shuff_mask2(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_zero_masked_shuff_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[6,7,4,5],zmm1[4,5,0,1]
@@ -853,7 +853,7 @@ define <8 x double> @test_8xdouble_zero_masked_shuff_mask2(<8 x double> %vec1, <
}
define <8 x double> @test_8xdouble_shuff_mask3(<8 x double> %vec1, <8 x double> %vec2) {
; CHECK-LABEL: test_8xdouble_shuff_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[4,5,4,5],zmm1[4,5,2,3]
; CHECK-NEXT: retq
%res = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> <i32 4, i32 5, i32 4, i32 5, i32 12, i32 13, i32 10, i32 11>
@@ -861,7 +861,7 @@ define <8 x double> @test_8xdouble_shuff_mask3(<8 x double> %vec1, <8 x double>
}
define <8 x double> @test_8xdouble_masked_shuff_mask3(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %vec3, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_masked_shuff_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqpd %zmm4, %zmm3, %k1
; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm2 {%k1} = zmm0[4,5,4,5],zmm1[4,5,2,3]
@@ -875,7 +875,7 @@ define <8 x double> @test_8xdouble_masked_shuff_mask3(<8 x double> %vec1, <8 x d
define <8 x double> @test_8xdouble_zero_masked_shuff_mask3(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_zero_masked_shuff_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[4,5,4,5],zmm1[4,5,2,3]
@@ -887,7 +887,7 @@ define <8 x double> @test_8xdouble_zero_masked_shuff_mask3(<8 x double> %vec1, <
}
define <8 x double> @test_8xdouble_shuff_mem_mask0(<8 x double> %vec1, <8 x double>* %vec2p) {
; CHECK-LABEL: test_8xdouble_shuff_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[6,7,0,1],mem[0,1,0,1]
; CHECK-NEXT: retq
%vec2 = load <8 x double>, <8 x double>* %vec2p
@@ -896,7 +896,7 @@ define <8 x double> @test_8xdouble_shuff_mem_mask0(<8 x double> %vec1, <8 x doub
}
define <8 x double> @test_8xdouble_masked_shuff_mem_mask0(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %vec3, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_masked_shuff_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm1 {%k1} = zmm0[6,7,0,1],mem[0,1,0,1]
@@ -911,7 +911,7 @@ define <8 x double> @test_8xdouble_masked_shuff_mem_mask0(<8 x double> %vec1, <8
define <8 x double> @test_8xdouble_zero_masked_shuff_mem_mask0(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_zero_masked_shuff_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[6,7,0,1],mem[0,1,0,1]
@@ -925,7 +925,7 @@ define <8 x double> @test_8xdouble_zero_masked_shuff_mem_mask0(<8 x double> %vec
define <8 x double> @test_8xdouble_masked_shuff_mem_mask1(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %vec3, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_masked_shuff_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm1 {%k1} = zmm0[6,7,6,7],mem[0,1,2,3]
@@ -940,7 +940,7 @@ define <8 x double> @test_8xdouble_masked_shuff_mem_mask1(<8 x double> %vec1, <8
define <8 x double> @test_8xdouble_zero_masked_shuff_mem_mask1(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_zero_masked_shuff_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[6,7,6,7],mem[0,1,2,3]
@@ -954,7 +954,7 @@ define <8 x double> @test_8xdouble_zero_masked_shuff_mem_mask1(<8 x double> %vec
define <8 x double> @test_8xdouble_masked_shuff_mem_mask2(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %vec3, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_masked_shuff_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm1 {%k1} = zmm0[0,1,2,3],mem[0,1,4,5]
@@ -969,7 +969,7 @@ define <8 x double> @test_8xdouble_masked_shuff_mem_mask2(<8 x double> %vec1, <8
define <8 x double> @test_8xdouble_zero_masked_shuff_mem_mask2(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_zero_masked_shuff_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3],mem[0,1,4,5]
@@ -983,7 +983,7 @@ define <8 x double> @test_8xdouble_zero_masked_shuff_mem_mask2(<8 x double> %vec
define <8 x double> @test_8xdouble_shuff_mem_mask3(<8 x double> %vec1, <8 x double>* %vec2p) {
; CHECK-LABEL: test_8xdouble_shuff_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[2,3,0,1],mem[4,5,0,1]
; CHECK-NEXT: retq
%vec2 = load <8 x double>, <8 x double>* %vec2p
@@ -992,7 +992,7 @@ define <8 x double> @test_8xdouble_shuff_mem_mask3(<8 x double> %vec1, <8 x doub
}
define <8 x double> @test_8xdouble_masked_shuff_mem_mask3(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %vec3, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_masked_shuff_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm1 {%k1} = zmm0[2,3,0,1],mem[4,5,0,1]
@@ -1007,7 +1007,7 @@ define <8 x double> @test_8xdouble_masked_shuff_mem_mask3(<8 x double> %vec1, <8
define <8 x double> @test_8xdouble_zero_masked_shuff_mem_mask3(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_zero_masked_shuff_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[2,3,0,1],mem[4,5,0,1]
@@ -1021,7 +1021,7 @@ define <8 x double> @test_8xdouble_zero_masked_shuff_mem_mask3(<8 x double> %vec
define <8 x i32> @test_8xi32_shuff_mask0(<8 x i32> %vec1, <8 x i32> %vec2) {
; CHECK-LABEL: test_8xi32_shuff_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
; CHECK-NEXT: retq
%res = shufflevector <8 x i32> %vec1, <8 x i32> %vec2, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15>
@@ -1029,7 +1029,7 @@ define <8 x i32> @test_8xi32_shuff_mask0(<8 x i32> %vec1, <8 x i32> %vec2) {
}
define <8 x i32> @test_8xi32_masked_shuff_mask0(<8 x i32> %vec1, <8 x i32> %vec2, <8 x i32> %vec3, <8 x i32> %mask) {
; CHECK-LABEL: test_8xi32_masked_shuff_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpeqd %ymm4, %ymm3, %k1
; CHECK-NEXT: vshufi32x4 {{.*#+}} ymm2 {%k1} = ymm0[4,5,6,7],ymm1[4,5,6,7]
@@ -1043,7 +1043,7 @@ define <8 x i32> @test_8xi32_masked_shuff_mask0(<8 x i32> %vec1, <8 x i32> %vec2
define <8 x i32> @test_8xi32_zero_masked_shuff_mask0(<8 x i32> %vec1, <8 x i32> %vec2, <8 x i32> %mask) {
; CHECK-LABEL: test_8xi32_zero_masked_shuff_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; CHECK-NEXT: vshufi32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],ymm1[4,5,6,7]
@@ -1055,7 +1055,7 @@ define <8 x i32> @test_8xi32_zero_masked_shuff_mask0(<8 x i32> %vec1, <8 x i32>
}
define <8 x i32> @test_8xi32_masked_shuff_mask1(<8 x i32> %vec1, <8 x i32> %vec2, <8 x i32> %vec3, <8 x i32> %mask) {
; CHECK-LABEL: test_8xi32_masked_shuff_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpeqd %ymm4, %ymm3, %k1
; CHECK-NEXT: vshufi32x4 {{.*#+}} ymm2 {%k1} = ymm0[4,5,6,7],ymm1[0,1,2,3]
@@ -1069,7 +1069,7 @@ define <8 x i32> @test_8xi32_masked_shuff_mask1(<8 x i32> %vec1, <8 x i32> %vec2
define <8 x i32> @test_8xi32_zero_masked_shuff_mask1(<8 x i32> %vec1, <8 x i32> %vec2, <8 x i32> %mask) {
; CHECK-LABEL: test_8xi32_zero_masked_shuff_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; CHECK-NEXT: vshufi32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],ymm1[0,1,2,3]
@@ -1081,7 +1081,7 @@ define <8 x i32> @test_8xi32_zero_masked_shuff_mask1(<8 x i32> %vec1, <8 x i32>
}
define <8 x i32> @test_8xi32_masked_shuff_mask2(<8 x i32> %vec1, <8 x i32> %vec2, <8 x i32> %vec3, <8 x i32> %mask) {
; CHECK-LABEL: test_8xi32_masked_shuff_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpeqd %ymm4, %ymm3, %k1
; CHECK-NEXT: vshufi32x4 {{.*#+}} ymm2 {%k1} = ymm0[4,5,6,7],ymm1[4,5,6,7]
@@ -1095,7 +1095,7 @@ define <8 x i32> @test_8xi32_masked_shuff_mask2(<8 x i32> %vec1, <8 x i32> %vec2
define <8 x i32> @test_8xi32_zero_masked_shuff_mask2(<8 x i32> %vec1, <8 x i32> %vec2, <8 x i32> %mask) {
; CHECK-LABEL: test_8xi32_zero_masked_shuff_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; CHECK-NEXT: vshufi32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],ymm1[4,5,6,7]
@@ -1107,7 +1107,7 @@ define <8 x i32> @test_8xi32_zero_masked_shuff_mask2(<8 x i32> %vec1, <8 x i32>
}
define <8 x i32> @test_8xi32_shuff_mask3(<8 x i32> %vec1, <8 x i32> %vec2) {
; CHECK-LABEL: test_8xi32_shuff_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
; CHECK-NEXT: retq
%res = shufflevector <8 x i32> %vec1, <8 x i32> %vec2, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
@@ -1115,7 +1115,7 @@ define <8 x i32> @test_8xi32_shuff_mask3(<8 x i32> %vec1, <8 x i32> %vec2) {
}
define <8 x i32> @test_8xi32_masked_shuff_mask3(<8 x i32> %vec1, <8 x i32> %vec2, <8 x i32> %vec3, <8 x i32> %mask) {
; CHECK-LABEL: test_8xi32_masked_shuff_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpeqd %ymm4, %ymm3, %k1
; CHECK-NEXT: vshufi32x4 {{.*#+}} ymm2 {%k1} = ymm0[4,5,6,7],ymm1[0,1,2,3]
@@ -1129,7 +1129,7 @@ define <8 x i32> @test_8xi32_masked_shuff_mask3(<8 x i32> %vec1, <8 x i32> %vec2
define <8 x i32> @test_8xi32_zero_masked_shuff_mask3(<8 x i32> %vec1, <8 x i32> %vec2, <8 x i32> %mask) {
; CHECK-LABEL: test_8xi32_zero_masked_shuff_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; CHECK-NEXT: vshufi32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],ymm1[0,1,2,3]
@@ -1141,7 +1141,7 @@ define <8 x i32> @test_8xi32_zero_masked_shuff_mask3(<8 x i32> %vec1, <8 x i32>
}
define <8 x i32> @test_8xi32_shuff_mem_mask0(<8 x i32> %vec1, <8 x i32>* %vec2p) {
; CHECK-LABEL: test_8xi32_shuff_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3]
; CHECK-NEXT: retq
%vec2 = load <8 x i32>, <8 x i32>* %vec2p
@@ -1150,7 +1150,7 @@ define <8 x i32> @test_8xi32_shuff_mem_mask0(<8 x i32> %vec1, <8 x i32>* %vec2p)
}
define <8 x i32> @test_8xi32_masked_shuff_mem_mask0(<8 x i32> %vec1, <8 x i32>* %vec2p, <8 x i32> %vec3, <8 x i32> %mask) {
; CHECK-LABEL: test_8xi32_masked_shuff_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; CHECK-NEXT: vshufi32x4 {{.*#+}} ymm1 {%k1} = ymm0[4,5,6,7],mem[4,5,6,7]
@@ -1165,7 +1165,7 @@ define <8 x i32> @test_8xi32_masked_shuff_mem_mask0(<8 x i32> %vec1, <8 x i32>*
define <8 x i32> @test_8xi32_zero_masked_shuff_mem_mask0(<8 x i32> %vec1, <8 x i32>* %vec2p, <8 x i32> %mask) {
; CHECK-LABEL: test_8xi32_zero_masked_shuff_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; CHECK-NEXT: vshufi32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],mem[4,5,6,7]
@@ -1179,7 +1179,7 @@ define <8 x i32> @test_8xi32_zero_masked_shuff_mem_mask0(<8 x i32> %vec1, <8 x i
define <8 x i32> @test_8xi32_masked_shuff_mem_mask1(<8 x i32> %vec1, <8 x i32>* %vec2p, <8 x i32> %vec3, <8 x i32> %mask) {
; CHECK-LABEL: test_8xi32_masked_shuff_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; CHECK-NEXT: vshufi32x4 {{.*#+}} ymm1 {%k1} = ymm0[4,5,6,7],mem[0,1,2,3]
@@ -1194,7 +1194,7 @@ define <8 x i32> @test_8xi32_masked_shuff_mem_mask1(<8 x i32> %vec1, <8 x i32>*
define <8 x i32> @test_8xi32_zero_masked_shuff_mem_mask1(<8 x i32> %vec1, <8 x i32>* %vec2p, <8 x i32> %mask) {
; CHECK-LABEL: test_8xi32_zero_masked_shuff_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; CHECK-NEXT: vshufi32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],mem[0,1,2,3]
@@ -1208,7 +1208,7 @@ define <8 x i32> @test_8xi32_zero_masked_shuff_mem_mask1(<8 x i32> %vec1, <8 x i
define <8 x i32> @test_8xi32_masked_shuff_mem_mask2(<8 x i32> %vec1, <8 x i32>* %vec2p, <8 x i32> %vec3, <8 x i32> %mask) {
; CHECK-LABEL: test_8xi32_masked_shuff_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; CHECK-NEXT: vshufi32x4 {{.*#+}} ymm1 {%k1} = ymm0[4,5,6,7],mem[0,1,2,3]
@@ -1223,7 +1223,7 @@ define <8 x i32> @test_8xi32_masked_shuff_mem_mask2(<8 x i32> %vec1, <8 x i32>*
define <8 x i32> @test_8xi32_zero_masked_shuff_mem_mask2(<8 x i32> %vec1, <8 x i32>* %vec2p, <8 x i32> %mask) {
; CHECK-LABEL: test_8xi32_zero_masked_shuff_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; CHECK-NEXT: vshufi32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],mem[0,1,2,3]
@@ -1237,7 +1237,7 @@ define <8 x i32> @test_8xi32_zero_masked_shuff_mem_mask2(<8 x i32> %vec1, <8 x i
define <8 x i32> @test_8xi32_shuff_mem_mask3(<8 x i32> %vec1, <8 x i32>* %vec2p) {
; CHECK-LABEL: test_8xi32_shuff_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1]
; CHECK-NEXT: retq
%vec2 = load <8 x i32>, <8 x i32>* %vec2p
@@ -1246,7 +1246,7 @@ define <8 x i32> @test_8xi32_shuff_mem_mask3(<8 x i32> %vec1, <8 x i32>* %vec2p)
}
define <8 x i32> @test_8xi32_masked_shuff_mem_mask3(<8 x i32> %vec1, <8 x i32>* %vec2p, <8 x i32> %vec3, <8 x i32> %mask) {
; CHECK-LABEL: test_8xi32_masked_shuff_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; CHECK-NEXT: vshufi32x4 {{.*#+}} ymm1 {%k1} = ymm0[4,5,6,7],mem[0,1,2,3]
@@ -1261,7 +1261,7 @@ define <8 x i32> @test_8xi32_masked_shuff_mem_mask3(<8 x i32> %vec1, <8 x i32>*
define <8 x i32> @test_8xi32_zero_masked_shuff_mem_mask3(<8 x i32> %vec1, <8 x i32>* %vec2p, <8 x i32> %mask) {
; CHECK-LABEL: test_8xi32_zero_masked_shuff_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; CHECK-NEXT: vshufi32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],mem[0,1,2,3]
@@ -1275,7 +1275,7 @@ define <8 x i32> @test_8xi32_zero_masked_shuff_mem_mask3(<8 x i32> %vec1, <8 x i
define <16 x i32> @test_16xi32_shuff_mask0(<16 x i32> %vec1, <16 x i32> %vec2) {
; CHECK-LABEL: test_16xi32_shuff_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[2,3,2,3],zmm1[2,3,6,7]
; CHECK-NEXT: retq
%res = shufflevector <16 x i32> %vec1, <16 x i32> %vec2, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 20, i32 21, i32 22, i32 23, i32 28, i32 29, i32 30, i32 31>
@@ -1283,7 +1283,7 @@ define <16 x i32> @test_16xi32_shuff_mask0(<16 x i32> %vec1, <16 x i32> %vec2) {
}
define <16 x i32> @test_16xi32_masked_shuff_mask0(<16 x i32> %vec1, <16 x i32> %vec2, <16 x i32> %vec3, <16 x i32> %mask) {
; CHECK-LABEL: test_16xi32_masked_shuff_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpeqd %zmm4, %zmm3, %k1
; CHECK-NEXT: vshufi32x4 {{.*#+}} zmm2 {%k1} = zmm0[4,5,6,7,4,5,6,7],zmm1[4,5,6,7,12,13,14,15]
@@ -1297,7 +1297,7 @@ define <16 x i32> @test_16xi32_masked_shuff_mask0(<16 x i32> %vec1, <16 x i32> %
define <16 x i32> @test_16xi32_zero_masked_shuff_mask0(<16 x i32> %vec1, <16 x i32> %vec2, <16 x i32> %mask) {
; CHECK-LABEL: test_16xi32_zero_masked_shuff_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; CHECK-NEXT: vshufi32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[4,5,6,7,4,5,6,7],zmm1[4,5,6,7,12,13,14,15]
@@ -1309,7 +1309,7 @@ define <16 x i32> @test_16xi32_zero_masked_shuff_mask0(<16 x i32> %vec1, <16 x i
}
define <16 x i32> @test_16xi32_masked_shuff_mask1(<16 x i32> %vec1, <16 x i32> %vec2, <16 x i32> %vec3, <16 x i32> %mask) {
; CHECK-LABEL: test_16xi32_masked_shuff_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpeqd %zmm4, %zmm3, %k1
; CHECK-NEXT: vshufi32x4 {{.*#+}} zmm2 {%k1} = zmm0[8,9,10,11,8,9,10,11],zmm1[8,9,10,11,4,5,6,7]
@@ -1323,7 +1323,7 @@ define <16 x i32> @test_16xi32_masked_shuff_mask1(<16 x i32> %vec1, <16 x i32> %
define <16 x i32> @test_16xi32_zero_masked_shuff_mask1(<16 x i32> %vec1, <16 x i32> %vec2, <16 x i32> %mask) {
; CHECK-LABEL: test_16xi32_zero_masked_shuff_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; CHECK-NEXT: vshufi32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[8,9,10,11,8,9,10,11],zmm1[8,9,10,11,4,5,6,7]
@@ -1335,7 +1335,7 @@ define <16 x i32> @test_16xi32_zero_masked_shuff_mask1(<16 x i32> %vec1, <16 x i
}
define <16 x i32> @test_16xi32_masked_shuff_mask2(<16 x i32> %vec1, <16 x i32> %vec2, <16 x i32> %vec3, <16 x i32> %mask) {
; CHECK-LABEL: test_16xi32_masked_shuff_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpeqd %zmm4, %zmm3, %k1
; CHECK-NEXT: vshufi32x4 {{.*#+}} zmm2 {%k1} = zmm0[4,5,6,7,8,9,10,11],zmm1[0,1,2,3,0,1,2,3]
@@ -1349,7 +1349,7 @@ define <16 x i32> @test_16xi32_masked_shuff_mask2(<16 x i32> %vec1, <16 x i32> %
define <16 x i32> @test_16xi32_zero_masked_shuff_mask2(<16 x i32> %vec1, <16 x i32> %vec2, <16 x i32> %mask) {
; CHECK-LABEL: test_16xi32_zero_masked_shuff_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; CHECK-NEXT: vshufi32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[4,5,6,7,8,9,10,11],zmm1[0,1,2,3,0,1,2,3]
@@ -1361,7 +1361,7 @@ define <16 x i32> @test_16xi32_zero_masked_shuff_mask2(<16 x i32> %vec1, <16 x i
}
define <16 x i32> @test_16xi32_shuff_mask3(<16 x i32> %vec1, <16 x i32> %vec2) {
; CHECK-LABEL: test_16xi32_shuff_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[2,3,0,1],zmm1[4,5,2,3]
; CHECK-NEXT: retq
%res = shufflevector <16 x i32> %vec1, <16 x i32> %vec2, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 24, i32 25, i32 26, i32 27, i32 20, i32 21, i32 22, i32 23>
@@ -1369,7 +1369,7 @@ define <16 x i32> @test_16xi32_shuff_mask3(<16 x i32> %vec1, <16 x i32> %vec2) {
}
define <16 x i32> @test_16xi32_masked_shuff_mask3(<16 x i32> %vec1, <16 x i32> %vec2, <16 x i32> %vec3, <16 x i32> %mask) {
; CHECK-LABEL: test_16xi32_masked_shuff_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpeqd %zmm4, %zmm3, %k1
; CHECK-NEXT: vshufi32x4 {{.*#+}} zmm2 {%k1} = zmm0[4,5,6,7,0,1,2,3],zmm1[8,9,10,11,4,5,6,7]
@@ -1383,7 +1383,7 @@ define <16 x i32> @test_16xi32_masked_shuff_mask3(<16 x i32> %vec1, <16 x i32> %
define <16 x i32> @test_16xi32_zero_masked_shuff_mask3(<16 x i32> %vec1, <16 x i32> %vec2, <16 x i32> %mask) {
; CHECK-LABEL: test_16xi32_zero_masked_shuff_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; CHECK-NEXT: vshufi32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[4,5,6,7,0,1,2,3],zmm1[8,9,10,11,4,5,6,7]
@@ -1395,7 +1395,7 @@ define <16 x i32> @test_16xi32_zero_masked_shuff_mask3(<16 x i32> %vec1, <16 x i
}
define <16 x i32> @test_16xi32_shuff_mem_mask0(<16 x i32> %vec1, <16 x i32>* %vec2p) {
; CHECK-LABEL: test_16xi32_shuff_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[4,5,2,3],mem[4,5,0,1]
; CHECK-NEXT: retq
%vec2 = load <16 x i32>, <16 x i32>* %vec2p
@@ -1404,7 +1404,7 @@ define <16 x i32> @test_16xi32_shuff_mem_mask0(<16 x i32> %vec1, <16 x i32>* %ve
}
define <16 x i32> @test_16xi32_masked_shuff_mem_mask0(<16 x i32> %vec1, <16 x i32>* %vec2p, <16 x i32> %vec3, <16 x i32> %mask) {
; CHECK-LABEL: test_16xi32_masked_shuff_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; CHECK-NEXT: vshufi32x4 {{.*#+}} zmm1 {%k1} = zmm0[8,9,10,11,4,5,6,7],mem[8,9,10,11,0,1,2,3]
@@ -1419,7 +1419,7 @@ define <16 x i32> @test_16xi32_masked_shuff_mem_mask0(<16 x i32> %vec1, <16 x i3
define <16 x i32> @test_16xi32_zero_masked_shuff_mem_mask0(<16 x i32> %vec1, <16 x i32>* %vec2p, <16 x i32> %mask) {
; CHECK-LABEL: test_16xi32_zero_masked_shuff_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vshufi32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[8,9,10,11,4,5,6,7],mem[8,9,10,11,0,1,2,3]
@@ -1433,7 +1433,7 @@ define <16 x i32> @test_16xi32_zero_masked_shuff_mem_mask0(<16 x i32> %vec1, <16
define <16 x i32> @test_16xi32_masked_shuff_mem_mask1(<16 x i32> %vec1, <16 x i32>* %vec2p, <16 x i32> %vec3, <16 x i32> %mask) {
; CHECK-LABEL: test_16xi32_masked_shuff_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; CHECK-NEXT: vshufi32x4 {{.*#+}} zmm1 {%k1} = zmm0[4,5,6,7,4,5,6,7],mem[0,1,2,3,8,9,10,11]
@@ -1448,7 +1448,7 @@ define <16 x i32> @test_16xi32_masked_shuff_mem_mask1(<16 x i32> %vec1, <16 x i3
define <16 x i32> @test_16xi32_zero_masked_shuff_mem_mask1(<16 x i32> %vec1, <16 x i32>* %vec2p, <16 x i32> %mask) {
; CHECK-LABEL: test_16xi32_zero_masked_shuff_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vshufi32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[4,5,6,7,4,5,6,7],mem[0,1,2,3,8,9,10,11]
@@ -1462,7 +1462,7 @@ define <16 x i32> @test_16xi32_zero_masked_shuff_mem_mask1(<16 x i32> %vec1, <16
define <16 x i32> @test_16xi32_masked_shuff_mem_mask2(<16 x i32> %vec1, <16 x i32>* %vec2p, <16 x i32> %vec3, <16 x i32> %mask) {
; CHECK-LABEL: test_16xi32_masked_shuff_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; CHECK-NEXT: vshufi32x4 {{.*#+}} zmm1 {%k1} = zmm0[4,5,6,7,8,9,10,11],mem[12,13,14,15,12,13,14,15]
@@ -1477,7 +1477,7 @@ define <16 x i32> @test_16xi32_masked_shuff_mem_mask2(<16 x i32> %vec1, <16 x i3
define <16 x i32> @test_16xi32_zero_masked_shuff_mem_mask2(<16 x i32> %vec1, <16 x i32>* %vec2p, <16 x i32> %mask) {
; CHECK-LABEL: test_16xi32_zero_masked_shuff_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vshufi32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[4,5,6,7,8,9,10,11],mem[12,13,14,15,12,13,14,15]
@@ -1491,7 +1491,7 @@ define <16 x i32> @test_16xi32_zero_masked_shuff_mem_mask2(<16 x i32> %vec1, <16
define <16 x i32> @test_16xi32_shuff_mem_mask3(<16 x i32> %vec1, <16 x i32>* %vec2p) {
; CHECK-LABEL: test_16xi32_shuff_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[2,3,2,3],mem[2,3,6,7]
; CHECK-NEXT: retq
%vec2 = load <16 x i32>, <16 x i32>* %vec2p
@@ -1500,7 +1500,7 @@ define <16 x i32> @test_16xi32_shuff_mem_mask3(<16 x i32> %vec1, <16 x i32>* %ve
}
define <16 x i32> @test_16xi32_masked_shuff_mem_mask3(<16 x i32> %vec1, <16 x i32>* %vec2p, <16 x i32> %vec3, <16 x i32> %mask) {
; CHECK-LABEL: test_16xi32_masked_shuff_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; CHECK-NEXT: vshufi32x4 {{.*#+}} zmm1 {%k1} = zmm0[4,5,6,7,4,5,6,7],mem[4,5,6,7,12,13,14,15]
@@ -1515,7 +1515,7 @@ define <16 x i32> @test_16xi32_masked_shuff_mem_mask3(<16 x i32> %vec1, <16 x i3
define <16 x i32> @test_16xi32_zero_masked_shuff_mem_mask3(<16 x i32> %vec1, <16 x i32>* %vec2p, <16 x i32> %mask) {
; CHECK-LABEL: test_16xi32_zero_masked_shuff_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vshufi32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[4,5,6,7,4,5,6,7],mem[4,5,6,7,12,13,14,15]
@@ -1529,7 +1529,7 @@ define <16 x i32> @test_16xi32_zero_masked_shuff_mem_mask3(<16 x i32> %vec1, <16
define <4 x i64> @test_4xi64_shuff_mask0(<4 x i64> %vec1, <4 x i64> %vec2) {
; CHECK-LABEL: test_4xi64_shuff_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
; CHECK-NEXT: retq
%res = shufflevector <4 x i64> %vec1, <4 x i64> %vec2, <4 x i32> <i32 2, i32 3, i32 4, i32 5>
@@ -1537,7 +1537,7 @@ define <4 x i64> @test_4xi64_shuff_mask0(<4 x i64> %vec1, <4 x i64> %vec2) {
}
define <4 x i64> @test_4xi64_masked_shuff_mask0(<4 x i64> %vec1, <4 x i64> %vec2, <4 x i64> %vec3, <4 x i64> %mask) {
; CHECK-LABEL: test_4xi64_masked_shuff_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpeqq %ymm4, %ymm3, %k1
; CHECK-NEXT: vshufi64x2 {{.*#+}} ymm2 {%k1} = ymm0[2,3],ymm1[0,1]
@@ -1551,7 +1551,7 @@ define <4 x i64> @test_4xi64_masked_shuff_mask0(<4 x i64> %vec1, <4 x i64> %vec2
define <4 x i64> @test_4xi64_zero_masked_shuff_mask0(<4 x i64> %vec1, <4 x i64> %vec2, <4 x i64> %mask) {
; CHECK-LABEL: test_4xi64_zero_masked_shuff_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; CHECK-NEXT: vshufi64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],ymm1[0,1]
@@ -1563,7 +1563,7 @@ define <4 x i64> @test_4xi64_zero_masked_shuff_mask0(<4 x i64> %vec1, <4 x i64>
}
define <4 x i64> @test_4xi64_masked_shuff_mask1(<4 x i64> %vec1, <4 x i64> %vec2, <4 x i64> %vec3, <4 x i64> %mask) {
; CHECK-LABEL: test_4xi64_masked_shuff_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpeqq %ymm4, %ymm3, %k1
; CHECK-NEXT: vshufi64x2 {{.*#+}} ymm2 {%k1} = ymm0[2,3],ymm1[2,3]
@@ -1577,7 +1577,7 @@ define <4 x i64> @test_4xi64_masked_shuff_mask1(<4 x i64> %vec1, <4 x i64> %vec2
define <4 x i64> @test_4xi64_zero_masked_shuff_mask1(<4 x i64> %vec1, <4 x i64> %vec2, <4 x i64> %mask) {
; CHECK-LABEL: test_4xi64_zero_masked_shuff_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; CHECK-NEXT: vshufi64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],ymm1[2,3]
@@ -1589,7 +1589,7 @@ define <4 x i64> @test_4xi64_zero_masked_shuff_mask1(<4 x i64> %vec1, <4 x i64>
}
define <4 x i64> @test_4xi64_masked_shuff_mask2(<4 x i64> %vec1, <4 x i64> %vec2, <4 x i64> %vec3, <4 x i64> %mask) {
; CHECK-LABEL: test_4xi64_masked_shuff_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpeqq %ymm4, %ymm3, %k1
; CHECK-NEXT: vshufi64x2 {{.*#+}} ymm2 {%k1} = ymm0[2,3],ymm1[0,1]
@@ -1603,7 +1603,7 @@ define <4 x i64> @test_4xi64_masked_shuff_mask2(<4 x i64> %vec1, <4 x i64> %vec2
define <4 x i64> @test_4xi64_zero_masked_shuff_mask2(<4 x i64> %vec1, <4 x i64> %vec2, <4 x i64> %mask) {
; CHECK-LABEL: test_4xi64_zero_masked_shuff_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; CHECK-NEXT: vshufi64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],ymm1[0,1]
@@ -1615,7 +1615,7 @@ define <4 x i64> @test_4xi64_zero_masked_shuff_mask2(<4 x i64> %vec1, <4 x i64>
}
define <4 x i64> @test_4xi64_shuff_mask3(<4 x i64> %vec1, <4 x i64> %vec2) {
; CHECK-LABEL: test_4xi64_shuff_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
; CHECK-NEXT: retq
%res = shufflevector <4 x i64> %vec1, <4 x i64> %vec2, <4 x i32> <i32 2, i32 3, i32 6, i32 7>
@@ -1623,7 +1623,7 @@ define <4 x i64> @test_4xi64_shuff_mask3(<4 x i64> %vec1, <4 x i64> %vec2) {
}
define <4 x i64> @test_4xi64_masked_shuff_mask3(<4 x i64> %vec1, <4 x i64> %vec2, <4 x i64> %vec3, <4 x i64> %mask) {
; CHECK-LABEL: test_4xi64_masked_shuff_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpeqq %ymm4, %ymm3, %k1
; CHECK-NEXT: vshufi64x2 {{.*#+}} ymm2 {%k1} = ymm0[2,3],ymm1[2,3]
@@ -1637,7 +1637,7 @@ define <4 x i64> @test_4xi64_masked_shuff_mask3(<4 x i64> %vec1, <4 x i64> %vec2
define <4 x i64> @test_4xi64_zero_masked_shuff_mask3(<4 x i64> %vec1, <4 x i64> %vec2, <4 x i64> %mask) {
; CHECK-LABEL: test_4xi64_zero_masked_shuff_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; CHECK-NEXT: vshufi64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],ymm1[2,3]
@@ -1649,7 +1649,7 @@ define <4 x i64> @test_4xi64_zero_masked_shuff_mask3(<4 x i64> %vec1, <4 x i64>
}
define <4 x i64> @test_4xi64_shuff_mem_mask0(<4 x i64> %vec1, <4 x i64>* %vec2p) {
; CHECK-LABEL: test_4xi64_shuff_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3]
; CHECK-NEXT: retq
%vec2 = load <4 x i64>, <4 x i64>* %vec2p
@@ -1658,7 +1658,7 @@ define <4 x i64> @test_4xi64_shuff_mem_mask0(<4 x i64> %vec1, <4 x i64>* %vec2p)
}
define <4 x i64> @test_4xi64_masked_shuff_mem_mask0(<4 x i64> %vec1, <4 x i64>* %vec2p, <4 x i64> %vec3, <4 x i64> %mask) {
; CHECK-LABEL: test_4xi64_masked_shuff_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; CHECK-NEXT: vshufi64x2 {{.*#+}} ymm1 {%k1} = ymm0[2,3],mem[2,3]
@@ -1673,7 +1673,7 @@ define <4 x i64> @test_4xi64_masked_shuff_mem_mask0(<4 x i64> %vec1, <4 x i64>*
define <4 x i64> @test_4xi64_zero_masked_shuff_mem_mask0(<4 x i64> %vec1, <4 x i64>* %vec2p, <4 x i64> %mask) {
; CHECK-LABEL: test_4xi64_zero_masked_shuff_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; CHECK-NEXT: vshufi64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],mem[2,3]
@@ -1687,7 +1687,7 @@ define <4 x i64> @test_4xi64_zero_masked_shuff_mem_mask0(<4 x i64> %vec1, <4 x i
define <4 x i64> @test_4xi64_masked_shuff_mem_mask1(<4 x i64> %vec1, <4 x i64>* %vec2p, <4 x i64> %vec3, <4 x i64> %mask) {
; CHECK-LABEL: test_4xi64_masked_shuff_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; CHECK-NEXT: vshufi64x2 {{.*#+}} ymm1 {%k1} = ymm0[2,3],mem[0,1]
@@ -1702,7 +1702,7 @@ define <4 x i64> @test_4xi64_masked_shuff_mem_mask1(<4 x i64> %vec1, <4 x i64>*
define <4 x i64> @test_4xi64_zero_masked_shuff_mem_mask1(<4 x i64> %vec1, <4 x i64>* %vec2p, <4 x i64> %mask) {
; CHECK-LABEL: test_4xi64_zero_masked_shuff_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; CHECK-NEXT: vshufi64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],mem[0,1]
@@ -1716,7 +1716,7 @@ define <4 x i64> @test_4xi64_zero_masked_shuff_mem_mask1(<4 x i64> %vec1, <4 x i
define <4 x i64> @test_4xi64_masked_shuff_mem_mask2(<4 x i64> %vec1, <4 x i64>* %vec2p, <4 x i64> %vec3, <4 x i64> %mask) {
; CHECK-LABEL: test_4xi64_masked_shuff_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; CHECK-NEXT: vshufi64x2 {{.*#+}} ymm1 {%k1} = ymm0[2,3],mem[0,1]
@@ -1731,7 +1731,7 @@ define <4 x i64> @test_4xi64_masked_shuff_mem_mask2(<4 x i64> %vec1, <4 x i64>*
define <4 x i64> @test_4xi64_zero_masked_shuff_mem_mask2(<4 x i64> %vec1, <4 x i64>* %vec2p, <4 x i64> %mask) {
; CHECK-LABEL: test_4xi64_zero_masked_shuff_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; CHECK-NEXT: vshufi64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],mem[0,1]
@@ -1745,7 +1745,7 @@ define <4 x i64> @test_4xi64_zero_masked_shuff_mem_mask2(<4 x i64> %vec1, <4 x i
define <4 x i64> @test_4xi64_shuff_mem_mask3(<4 x i64> %vec1, <4 x i64>* %vec2p) {
; CHECK-LABEL: test_4xi64_shuff_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3]
; CHECK-NEXT: retq
%vec2 = load <4 x i64>, <4 x i64>* %vec2p
@@ -1754,7 +1754,7 @@ define <4 x i64> @test_4xi64_shuff_mem_mask3(<4 x i64> %vec1, <4 x i64>* %vec2p)
}
define <4 x i64> @test_4xi64_masked_shuff_mem_mask3(<4 x i64> %vec1, <4 x i64>* %vec2p, <4 x i64> %vec3, <4 x i64> %mask) {
; CHECK-LABEL: test_4xi64_masked_shuff_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqq %ymm3, %ymm2, %k1
; CHECK-NEXT: vshufi64x2 {{.*#+}} ymm1 {%k1} = ymm0[2,3],mem[2,3]
@@ -1769,7 +1769,7 @@ define <4 x i64> @test_4xi64_masked_shuff_mem_mask3(<4 x i64> %vec1, <4 x i64>*
define <4 x i64> @test_4xi64_zero_masked_shuff_mem_mask3(<4 x i64> %vec1, <4 x i64>* %vec2p, <4 x i64> %mask) {
; CHECK-LABEL: test_4xi64_zero_masked_shuff_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %ymm2, %ymm1, %k1
; CHECK-NEXT: vshufi64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],mem[2,3]
@@ -1783,7 +1783,7 @@ define <4 x i64> @test_4xi64_zero_masked_shuff_mem_mask3(<4 x i64> %vec1, <4 x i
define <8 x i64> @test_8xi64_shuff_mask0(<8 x i64> %vec1, <8 x i64> %vec2) {
; CHECK-LABEL: test_8xi64_shuff_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[4,5,4,5],zmm1[4,5,4,5]
; CHECK-NEXT: retq
%res = shufflevector <8 x i64> %vec1, <8 x i64> %vec2, <8 x i32> <i32 4, i32 5, i32 4, i32 5, i32 12, i32 13, i32 12, i32 13>
@@ -1791,7 +1791,7 @@ define <8 x i64> @test_8xi64_shuff_mask0(<8 x i64> %vec1, <8 x i64> %vec2) {
}
define <8 x i64> @test_8xi64_masked_shuff_mask0(<8 x i64> %vec1, <8 x i64> %vec2, <8 x i64> %vec3, <8 x i64> %mask) {
; CHECK-LABEL: test_8xi64_masked_shuff_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpeqq %zmm4, %zmm3, %k1
; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm2 {%k1} = zmm0[4,5,4,5],zmm1[4,5,4,5]
@@ -1805,7 +1805,7 @@ define <8 x i64> @test_8xi64_masked_shuff_mask0(<8 x i64> %vec1, <8 x i64> %vec2
define <8 x i64> @test_8xi64_zero_masked_shuff_mask0(<8 x i64> %vec1, <8 x i64> %vec2, <8 x i64> %mask) {
; CHECK-LABEL: test_8xi64_zero_masked_shuff_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[4,5,4,5],zmm1[4,5,4,5]
@@ -1817,7 +1817,7 @@ define <8 x i64> @test_8xi64_zero_masked_shuff_mask0(<8 x i64> %vec1, <8 x i64>
}
define <8 x i64> @test_8xi64_masked_shuff_mask1(<8 x i64> %vec1, <8 x i64> %vec2, <8 x i64> %vec3, <8 x i64> %mask) {
; CHECK-LABEL: test_8xi64_masked_shuff_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpeqq %zmm4, %zmm3, %k1
; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm2 {%k1} = zmm0[6,7,4,5],zmm1[2,3,4,5]
@@ -1831,7 +1831,7 @@ define <8 x i64> @test_8xi64_masked_shuff_mask1(<8 x i64> %vec1, <8 x i64> %vec2
define <8 x i64> @test_8xi64_zero_masked_shuff_mask1(<8 x i64> %vec1, <8 x i64> %vec2, <8 x i64> %mask) {
; CHECK-LABEL: test_8xi64_zero_masked_shuff_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[6,7,4,5],zmm1[2,3,4,5]
@@ -1843,7 +1843,7 @@ define <8 x i64> @test_8xi64_zero_masked_shuff_mask1(<8 x i64> %vec1, <8 x i64>
}
define <8 x i64> @test_8xi64_masked_shuff_mask2(<8 x i64> %vec1, <8 x i64> %vec2, <8 x i64> %vec3, <8 x i64> %mask) {
; CHECK-LABEL: test_8xi64_masked_shuff_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpeqq %zmm4, %zmm3, %k1
; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm2 {%k1} = zmm0[0,1,4,5],zmm1[0,1,0,1]
@@ -1857,7 +1857,7 @@ define <8 x i64> @test_8xi64_masked_shuff_mask2(<8 x i64> %vec1, <8 x i64> %vec2
define <8 x i64> @test_8xi64_zero_masked_shuff_mask2(<8 x i64> %vec1, <8 x i64> %vec2, <8 x i64> %mask) {
; CHECK-LABEL: test_8xi64_zero_masked_shuff_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,4,5],zmm1[0,1,0,1]
@@ -1869,7 +1869,7 @@ define <8 x i64> @test_8xi64_zero_masked_shuff_mask2(<8 x i64> %vec1, <8 x i64>
}
define <8 x i64> @test_8xi64_shuff_mask3(<8 x i64> %vec1, <8 x i64> %vec2) {
; CHECK-LABEL: test_8xi64_shuff_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[2,3,6,7],zmm1[4,5,2,3]
; CHECK-NEXT: retq
%res = shufflevector <8 x i64> %vec1, <8 x i64> %vec2, <8 x i32> <i32 2, i32 3, i32 6, i32 7, i32 12, i32 13, i32 10, i32 11>
@@ -1877,7 +1877,7 @@ define <8 x i64> @test_8xi64_shuff_mask3(<8 x i64> %vec1, <8 x i64> %vec2) {
}
define <8 x i64> @test_8xi64_masked_shuff_mask3(<8 x i64> %vec1, <8 x i64> %vec2, <8 x i64> %vec3, <8 x i64> %mask) {
; CHECK-LABEL: test_8xi64_masked_shuff_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpeqq %zmm4, %zmm3, %k1
; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm2 {%k1} = zmm0[2,3,6,7],zmm1[4,5,2,3]
@@ -1891,7 +1891,7 @@ define <8 x i64> @test_8xi64_masked_shuff_mask3(<8 x i64> %vec1, <8 x i64> %vec2
define <8 x i64> @test_8xi64_zero_masked_shuff_mask3(<8 x i64> %vec1, <8 x i64> %vec2, <8 x i64> %mask) {
; CHECK-LABEL: test_8xi64_zero_masked_shuff_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[2,3,6,7],zmm1[4,5,2,3]
@@ -1903,7 +1903,7 @@ define <8 x i64> @test_8xi64_zero_masked_shuff_mask3(<8 x i64> %vec1, <8 x i64>
}
define <8 x i64> @test_8xi64_shuff_mem_mask0(<8 x i64> %vec1, <8 x i64>* %vec2p) {
; CHECK-LABEL: test_8xi64_shuff_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[2,3,2,3],mem[4,5,2,3]
; CHECK-NEXT: retq
%vec2 = load <8 x i64>, <8 x i64>* %vec2p
@@ -1912,7 +1912,7 @@ define <8 x i64> @test_8xi64_shuff_mem_mask0(<8 x i64> %vec1, <8 x i64>* %vec2p)
}
define <8 x i64> @test_8xi64_masked_shuff_mem_mask0(<8 x i64> %vec1, <8 x i64>* %vec2p, <8 x i64> %vec3, <8 x i64> %mask) {
; CHECK-LABEL: test_8xi64_masked_shuff_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm1 {%k1} = zmm0[2,3,2,3],mem[4,5,2,3]
@@ -1927,7 +1927,7 @@ define <8 x i64> @test_8xi64_masked_shuff_mem_mask0(<8 x i64> %vec1, <8 x i64>*
define <8 x i64> @test_8xi64_zero_masked_shuff_mem_mask0(<8 x i64> %vec1, <8 x i64>* %vec2p, <8 x i64> %mask) {
; CHECK-LABEL: test_8xi64_zero_masked_shuff_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[2,3,2,3],mem[4,5,2,3]
@@ -1941,7 +1941,7 @@ define <8 x i64> @test_8xi64_zero_masked_shuff_mem_mask0(<8 x i64> %vec1, <8 x i
define <8 x i64> @test_8xi64_masked_shuff_mem_mask1(<8 x i64> %vec1, <8 x i64>* %vec2p, <8 x i64> %vec3, <8 x i64> %mask) {
; CHECK-LABEL: test_8xi64_masked_shuff_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm1 {%k1} = zmm0[2,3,0,1],mem[0,1,0,1]
@@ -1956,7 +1956,7 @@ define <8 x i64> @test_8xi64_masked_shuff_mem_mask1(<8 x i64> %vec1, <8 x i64>*
define <8 x i64> @test_8xi64_zero_masked_shuff_mem_mask1(<8 x i64> %vec1, <8 x i64>* %vec2p, <8 x i64> %mask) {
; CHECK-LABEL: test_8xi64_zero_masked_shuff_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[2,3,0,1],mem[0,1,0,1]
@@ -1970,7 +1970,7 @@ define <8 x i64> @test_8xi64_zero_masked_shuff_mem_mask1(<8 x i64> %vec1, <8 x i
define <8 x i64> @test_8xi64_masked_shuff_mem_mask2(<8 x i64> %vec1, <8 x i64>* %vec2p, <8 x i64> %vec3, <8 x i64> %mask) {
; CHECK-LABEL: test_8xi64_masked_shuff_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm1 {%k1} = zmm0[4,5,0,1],mem[2,3,2,3]
@@ -1985,7 +1985,7 @@ define <8 x i64> @test_8xi64_masked_shuff_mem_mask2(<8 x i64> %vec1, <8 x i64>*
define <8 x i64> @test_8xi64_zero_masked_shuff_mem_mask2(<8 x i64> %vec1, <8 x i64>* %vec2p, <8 x i64> %mask) {
; CHECK-LABEL: test_8xi64_zero_masked_shuff_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[4,5,0,1],mem[2,3,2,3]
@@ -1999,7 +1999,7 @@ define <8 x i64> @test_8xi64_zero_masked_shuff_mem_mask2(<8 x i64> %vec1, <8 x i
define <8 x i64> @test_8xi64_shuff_mem_mask3(<8 x i64> %vec1, <8 x i64>* %vec2p) {
; CHECK-LABEL: test_8xi64_shuff_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[2,3,0,1],mem[6,7,2,3]
; CHECK-NEXT: retq
%vec2 = load <8 x i64>, <8 x i64>* %vec2p
@@ -2008,7 +2008,7 @@ define <8 x i64> @test_8xi64_shuff_mem_mask3(<8 x i64> %vec1, <8 x i64>* %vec2p)
}
define <8 x i64> @test_8xi64_masked_shuff_mem_mask3(<8 x i64> %vec1, <8 x i64>* %vec2p, <8 x i64> %vec3, <8 x i64> %mask) {
; CHECK-LABEL: test_8xi64_masked_shuff_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqq %zmm3, %zmm2, %k1
; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm1 {%k1} = zmm0[2,3,0,1],mem[6,7,2,3]
@@ -2023,7 +2023,7 @@ define <8 x i64> @test_8xi64_masked_shuff_mem_mask3(<8 x i64> %vec1, <8 x i64>*
define <8 x i64> @test_8xi64_zero_masked_shuff_mem_mask3(<8 x i64> %vec1, <8 x i64>* %vec2p, <8 x i64> %mask) {
; CHECK-LABEL: test_8xi64_zero_masked_shuff_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqq %zmm2, %zmm1, %k1
; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[2,3,0,1],mem[6,7,2,3]
diff --git a/test/CodeGen/X86/avx512-shuffles/shuffle.ll b/test/CodeGen/X86/avx512-shuffles/shuffle.ll
index 8a074b76739..df46487d9ab 100644
--- a/test/CodeGen/X86/avx512-shuffles/shuffle.ll
+++ b/test/CodeGen/X86/avx512-shuffles/shuffle.ll
@@ -3,7 +3,7 @@
define <16 x i8> @test_16xi8_perm_mask0(<16 x i8> %vec) {
; CHECK-LABEL: test_16xi8_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8,6,12,4,7,9,14,8,4,12,9,4,14,15,12,14]
; CHECK-NEXT: retq
%res = shufflevector <16 x i8> %vec, <16 x i8> undef, <16 x i32> <i32 8, i32 6, i32 12, i32 4, i32 7, i32 9, i32 14, i32 8, i32 4, i32 12, i32 9, i32 4, i32 14, i32 15, i32 12, i32 14>
@@ -11,7 +11,7 @@ define <16 x i8> @test_16xi8_perm_mask0(<16 x i8> %vec) {
}
define <16 x i8> @test_masked_16xi8_perm_mask0(<16 x i8> %vec, <16 x i8> %vec2, <16 x i8> %mask) {
; CHECK-LABEL: test_masked_16xi8_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqb %xmm3, %xmm2, %k1
; CHECK-NEXT: vpshufb {{.*#+}} xmm1 {%k1} = xmm0[8,6,12,4,7,9,14,8,4,12,9,4,14,15,12,14]
@@ -25,7 +25,7 @@ define <16 x i8> @test_masked_16xi8_perm_mask0(<16 x i8> %vec, <16 x i8> %vec2,
define <16 x i8> @test_masked_z_16xi8_perm_mask0(<16 x i8> %vec, <16 x i8> %mask) {
; CHECK-LABEL: test_masked_z_16xi8_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %xmm2, %xmm1, %k1
; CHECK-NEXT: vpshufb {{.*#+}} xmm0 {%k1} {z} = xmm0[8,6,12,4,7,9,14,8,4,12,9,4,14,15,12,14]
@@ -37,7 +37,7 @@ define <16 x i8> @test_masked_z_16xi8_perm_mask0(<16 x i8> %vec, <16 x i8> %mask
}
define <16 x i8> @test_masked_16xi8_perm_mask1(<16 x i8> %vec, <16 x i8> %vec2, <16 x i8> %mask) {
; CHECK-LABEL: test_masked_16xi8_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqb %xmm3, %xmm2, %k1
; CHECK-NEXT: vpshufb {{.*#+}} xmm1 {%k1} = xmm0[4,11,14,10,7,1,6,9,14,15,7,13,4,12,8,0]
@@ -51,7 +51,7 @@ define <16 x i8> @test_masked_16xi8_perm_mask1(<16 x i8> %vec, <16 x i8> %vec2,
define <16 x i8> @test_masked_z_16xi8_perm_mask1(<16 x i8> %vec, <16 x i8> %mask) {
; CHECK-LABEL: test_masked_z_16xi8_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %xmm2, %xmm1, %k1
; CHECK-NEXT: vpshufb {{.*#+}} xmm0 {%k1} {z} = xmm0[4,11,14,10,7,1,6,9,14,15,7,13,4,12,8,0]
@@ -63,7 +63,7 @@ define <16 x i8> @test_masked_z_16xi8_perm_mask1(<16 x i8> %vec, <16 x i8> %mask
}
define <16 x i8> @test_masked_16xi8_perm_mask2(<16 x i8> %vec, <16 x i8> %vec2, <16 x i8> %mask) {
; CHECK-LABEL: test_masked_16xi8_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqb %xmm3, %xmm2, %k1
; CHECK-NEXT: vpshufb {{.*#+}} xmm1 {%k1} = xmm0[11,6,13,10,0,7,13,3,5,13,3,9,3,15,12,7]
@@ -77,7 +77,7 @@ define <16 x i8> @test_masked_16xi8_perm_mask2(<16 x i8> %vec, <16 x i8> %vec2,
define <16 x i8> @test_masked_z_16xi8_perm_mask2(<16 x i8> %vec, <16 x i8> %mask) {
; CHECK-LABEL: test_masked_z_16xi8_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %xmm2, %xmm1, %k1
; CHECK-NEXT: vpshufb {{.*#+}} xmm0 {%k1} {z} = xmm0[11,6,13,10,0,7,13,3,5,13,3,9,3,15,12,7]
@@ -89,7 +89,7 @@ define <16 x i8> @test_masked_z_16xi8_perm_mask2(<16 x i8> %vec, <16 x i8> %mask
}
define <16 x i8> @test_16xi8_perm_mask3(<16 x i8> %vec) {
; CHECK-LABEL: test_16xi8_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[1,5,8,14,1,8,11,8,13,8,15,9,9,7,9,6]
; CHECK-NEXT: retq
%res = shufflevector <16 x i8> %vec, <16 x i8> undef, <16 x i32> <i32 1, i32 5, i32 8, i32 14, i32 1, i32 8, i32 11, i32 8, i32 13, i32 8, i32 15, i32 9, i32 9, i32 7, i32 9, i32 6>
@@ -97,7 +97,7 @@ define <16 x i8> @test_16xi8_perm_mask3(<16 x i8> %vec) {
}
define <16 x i8> @test_masked_16xi8_perm_mask3(<16 x i8> %vec, <16 x i8> %vec2, <16 x i8> %mask) {
; CHECK-LABEL: test_masked_16xi8_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqb %xmm3, %xmm2, %k1
; CHECK-NEXT: vpshufb {{.*#+}} xmm1 {%k1} = xmm0[1,5,8,14,1,8,11,8,13,8,15,9,9,7,9,6]
@@ -111,7 +111,7 @@ define <16 x i8> @test_masked_16xi8_perm_mask3(<16 x i8> %vec, <16 x i8> %vec2,
define <16 x i8> @test_masked_z_16xi8_perm_mask3(<16 x i8> %vec, <16 x i8> %mask) {
; CHECK-LABEL: test_masked_z_16xi8_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %xmm2, %xmm1, %k1
; CHECK-NEXT: vpshufb {{.*#+}} xmm0 {%k1} {z} = xmm0[1,5,8,14,1,8,11,8,13,8,15,9,9,7,9,6]
@@ -123,7 +123,7 @@ define <16 x i8> @test_masked_z_16xi8_perm_mask3(<16 x i8> %vec, <16 x i8> %mask
}
define <16 x i8> @test_16xi8_perm_mem_mask0(<16 x i8>* %vp) {
; CHECK-LABEL: test_16xi8_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %xmm0
; CHECK-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[9,10,7,1,12,14,14,13,14,14,8,6,11,4,12,13]
; CHECK-NEXT: retq
@@ -133,7 +133,7 @@ define <16 x i8> @test_16xi8_perm_mem_mask0(<16 x i8>* %vp) {
}
define <16 x i8> @test_masked_16xi8_perm_mem_mask0(<16 x i8>* %vp, <16 x i8> %vec2, <16 x i8> %mask) {
; CHECK-LABEL: test_masked_16xi8_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %xmm2
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqb %xmm3, %xmm1, %k1
@@ -148,7 +148,7 @@ define <16 x i8> @test_masked_16xi8_perm_mem_mask0(<16 x i8>* %vp, <16 x i8> %ve
define <16 x i8> @test_masked_z_16xi8_perm_mem_mask0(<16 x i8>* %vp, <16 x i8> %mask) {
; CHECK-LABEL: test_masked_z_16xi8_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %xmm1
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %xmm2, %xmm0, %k1
@@ -163,7 +163,7 @@ define <16 x i8> @test_masked_z_16xi8_perm_mem_mask0(<16 x i8>* %vp, <16 x i8> %
define <16 x i8> @test_masked_16xi8_perm_mem_mask1(<16 x i8>* %vp, <16 x i8> %vec2, <16 x i8> %mask) {
; CHECK-LABEL: test_masked_16xi8_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %xmm2
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqb %xmm3, %xmm1, %k1
@@ -178,7 +178,7 @@ define <16 x i8> @test_masked_16xi8_perm_mem_mask1(<16 x i8>* %vp, <16 x i8> %ve
define <16 x i8> @test_masked_z_16xi8_perm_mem_mask1(<16 x i8>* %vp, <16 x i8> %mask) {
; CHECK-LABEL: test_masked_z_16xi8_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %xmm1
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %xmm2, %xmm0, %k1
@@ -193,7 +193,7 @@ define <16 x i8> @test_masked_z_16xi8_perm_mem_mask1(<16 x i8>* %vp, <16 x i8> %
define <16 x i8> @test_masked_16xi8_perm_mem_mask2(<16 x i8>* %vp, <16 x i8> %vec2, <16 x i8> %mask) {
; CHECK-LABEL: test_masked_16xi8_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %xmm2
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqb %xmm3, %xmm1, %k1
@@ -208,7 +208,7 @@ define <16 x i8> @test_masked_16xi8_perm_mem_mask2(<16 x i8>* %vp, <16 x i8> %ve
define <16 x i8> @test_masked_z_16xi8_perm_mem_mask2(<16 x i8>* %vp, <16 x i8> %mask) {
; CHECK-LABEL: test_masked_z_16xi8_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %xmm1
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %xmm2, %xmm0, %k1
@@ -223,7 +223,7 @@ define <16 x i8> @test_masked_z_16xi8_perm_mem_mask2(<16 x i8>* %vp, <16 x i8> %
define <16 x i8> @test_16xi8_perm_mem_mask3(<16 x i8>* %vp) {
; CHECK-LABEL: test_16xi8_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %xmm0
; CHECK-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[9,6,5,15,0,0,15,2,1,3,12,14,0,6,1,4]
; CHECK-NEXT: retq
@@ -233,7 +233,7 @@ define <16 x i8> @test_16xi8_perm_mem_mask3(<16 x i8>* %vp) {
}
define <16 x i8> @test_masked_16xi8_perm_mem_mask3(<16 x i8>* %vp, <16 x i8> %vec2, <16 x i8> %mask) {
; CHECK-LABEL: test_masked_16xi8_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %xmm2
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqb %xmm3, %xmm1, %k1
@@ -248,7 +248,7 @@ define <16 x i8> @test_masked_16xi8_perm_mem_mask3(<16 x i8>* %vp, <16 x i8> %ve
define <16 x i8> @test_masked_z_16xi8_perm_mem_mask3(<16 x i8>* %vp, <16 x i8> %mask) {
; CHECK-LABEL: test_masked_z_16xi8_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %xmm1
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %xmm2, %xmm0, %k1
@@ -263,7 +263,7 @@ define <16 x i8> @test_masked_z_16xi8_perm_mem_mask3(<16 x i8>* %vp, <16 x i8> %
define <32 x i8> @test_32xi8_perm_mask0(<32 x i8> %vec) {
; CHECK-LABEL: test_32xi8_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[8,0,1,15,3,5,11,13,14,2,10,15,0,10,13,5,20,25,23,18,23,22,25,24,20,21,29,20,24,16,27,21]
; CHECK-NEXT: retq
%res = shufflevector <32 x i8> %vec, <32 x i8> undef, <32 x i32> <i32 8, i32 0, i32 1, i32 15, i32 3, i32 5, i32 11, i32 13, i32 14, i32 2, i32 10, i32 15, i32 0, i32 10, i32 13, i32 5, i32 20, i32 25, i32 23, i32 18, i32 23, i32 22, i32 25, i32 24, i32 20, i32 21, i32 29, i32 20, i32 24, i32 16, i32 27, i32 21>
@@ -271,7 +271,7 @@ define <32 x i8> @test_32xi8_perm_mask0(<32 x i8> %vec) {
}
define <32 x i8> @test_masked_32xi8_perm_mask0(<32 x i8> %vec, <32 x i8> %vec2, <32 x i8> %mask) {
; CHECK-LABEL: test_masked_32xi8_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqb %ymm3, %ymm2, %k1
; CHECK-NEXT: vpshufb {{.*#+}} ymm1 {%k1} = ymm0[8,0,1,15,3,5,11,13,14,2,10,15,0,10,13,5,20,25,23,18,23,22,25,24,20,21,29,20,24,16,27,21]
@@ -285,7 +285,7 @@ define <32 x i8> @test_masked_32xi8_perm_mask0(<32 x i8> %vec, <32 x i8> %vec2,
define <32 x i8> @test_masked_z_32xi8_perm_mask0(<32 x i8> %vec, <32 x i8> %mask) {
; CHECK-LABEL: test_masked_z_32xi8_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %ymm2, %ymm1, %k1
; CHECK-NEXT: vpshufb {{.*#+}} ymm0 {%k1} {z} = ymm0[8,0,1,15,3,5,11,13,14,2,10,15,0,10,13,5,20,25,23,18,23,22,25,24,20,21,29,20,24,16,27,21]
@@ -297,7 +297,7 @@ define <32 x i8> @test_masked_z_32xi8_perm_mask0(<32 x i8> %vec, <32 x i8> %mask
}
define <32 x i8> @test_masked_32xi8_perm_mask1(<32 x i8> %vec, <32 x i8> %vec2, <32 x i8> %mask) {
; CHECK-LABEL: test_masked_32xi8_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqb %ymm3, %ymm2, %k1
; CHECK-NEXT: vpshufb {{.*#+}} ymm1 {%k1} = ymm0[0,4,3,15,5,4,5,15,10,9,11,6,6,10,0,3,21,19,26,22,30,25,22,22,27,22,26,16,23,20,18,24]
@@ -311,7 +311,7 @@ define <32 x i8> @test_masked_32xi8_perm_mask1(<32 x i8> %vec, <32 x i8> %vec2,
define <32 x i8> @test_masked_z_32xi8_perm_mask1(<32 x i8> %vec, <32 x i8> %mask) {
; CHECK-LABEL: test_masked_z_32xi8_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %ymm2, %ymm1, %k1
; CHECK-NEXT: vpshufb {{.*#+}} ymm0 {%k1} {z} = ymm0[0,4,3,15,5,4,5,15,10,9,11,6,6,10,0,3,21,19,26,22,30,25,22,22,27,22,26,16,23,20,18,24]
@@ -323,7 +323,7 @@ define <32 x i8> @test_masked_z_32xi8_perm_mask1(<32 x i8> %vec, <32 x i8> %mask
}
define <32 x i8> @test_masked_32xi8_perm_mask2(<32 x i8> %vec, <32 x i8> %vec2, <32 x i8> %mask) {
; CHECK-LABEL: test_masked_32xi8_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqb %ymm3, %ymm2, %k1
; CHECK-NEXT: vpshufb {{.*#+}} ymm1 {%k1} = ymm0[7,8,12,14,7,4,7,12,14,12,3,15,10,1,11,15,22,26,21,19,27,16,29,24,17,17,26,29,20,31,17,29]
@@ -337,7 +337,7 @@ define <32 x i8> @test_masked_32xi8_perm_mask2(<32 x i8> %vec, <32 x i8> %vec2,
define <32 x i8> @test_masked_z_32xi8_perm_mask2(<32 x i8> %vec, <32 x i8> %mask) {
; CHECK-LABEL: test_masked_z_32xi8_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %ymm2, %ymm1, %k1
; CHECK-NEXT: vpshufb {{.*#+}} ymm0 {%k1} {z} = ymm0[7,8,12,14,7,4,7,12,14,12,3,15,10,1,11,15,22,26,21,19,27,16,29,24,17,17,26,29,20,31,17,29]
@@ -349,7 +349,7 @@ define <32 x i8> @test_masked_z_32xi8_perm_mask2(<32 x i8> %vec, <32 x i8> %mask
}
define <32 x i8> @test_32xi8_perm_mask3(<32 x i8> %vec) {
; CHECK-LABEL: test_32xi8_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[6,1,4,7,12,13,2,8,10,5,13,4,0,0,10,8,31,31,30,16,27,27,26,27,30,26,21,24,19,25,16,18]
; CHECK-NEXT: retq
%res = shufflevector <32 x i8> %vec, <32 x i8> undef, <32 x i32> <i32 6, i32 1, i32 4, i32 7, i32 12, i32 13, i32 2, i32 8, i32 10, i32 5, i32 13, i32 4, i32 0, i32 0, i32 10, i32 8, i32 31, i32 31, i32 30, i32 16, i32 27, i32 27, i32 26, i32 27, i32 30, i32 26, i32 21, i32 24, i32 19, i32 25, i32 16, i32 18>
@@ -357,7 +357,7 @@ define <32 x i8> @test_32xi8_perm_mask3(<32 x i8> %vec) {
}
define <32 x i8> @test_masked_32xi8_perm_mask3(<32 x i8> %vec, <32 x i8> %vec2, <32 x i8> %mask) {
; CHECK-LABEL: test_masked_32xi8_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqb %ymm3, %ymm2, %k1
; CHECK-NEXT: vpshufb {{.*#+}} ymm1 {%k1} = ymm0[6,1,4,7,12,13,2,8,10,5,13,4,0,0,10,8,31,31,30,16,27,27,26,27,30,26,21,24,19,25,16,18]
@@ -371,7 +371,7 @@ define <32 x i8> @test_masked_32xi8_perm_mask3(<32 x i8> %vec, <32 x i8> %vec2,
define <32 x i8> @test_masked_z_32xi8_perm_mask3(<32 x i8> %vec, <32 x i8> %mask) {
; CHECK-LABEL: test_masked_z_32xi8_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %ymm2, %ymm1, %k1
; CHECK-NEXT: vpshufb {{.*#+}} ymm0 {%k1} {z} = ymm0[6,1,4,7,12,13,2,8,10,5,13,4,0,0,10,8,31,31,30,16,27,27,26,27,30,26,21,24,19,25,16,18]
@@ -383,7 +383,7 @@ define <32 x i8> @test_masked_z_32xi8_perm_mask3(<32 x i8> %vec, <32 x i8> %mask
}
define <32 x i8> @test_32xi8_perm_mem_mask0(<32 x i8>* %vp) {
; CHECK-LABEL: test_32xi8_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %ymm0
; CHECK-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[9,0,2,15,4,6,8,4,7,3,0,2,8,1,6,5,22,17,30,23,29,31,21,23,27,22,20,27,30,30,26,22]
; CHECK-NEXT: retq
@@ -393,7 +393,7 @@ define <32 x i8> @test_32xi8_perm_mem_mask0(<32 x i8>* %vp) {
}
define <32 x i8> @test_masked_32xi8_perm_mem_mask0(<32 x i8>* %vp, <32 x i8> %vec2, <32 x i8> %mask) {
; CHECK-LABEL: test_masked_32xi8_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %ymm2
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqb %ymm3, %ymm1, %k1
@@ -408,7 +408,7 @@ define <32 x i8> @test_masked_32xi8_perm_mem_mask0(<32 x i8>* %vp, <32 x i8> %ve
define <32 x i8> @test_masked_z_32xi8_perm_mem_mask0(<32 x i8>* %vp, <32 x i8> %mask) {
; CHECK-LABEL: test_masked_z_32xi8_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %ymm1
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %ymm2, %ymm0, %k1
@@ -423,7 +423,7 @@ define <32 x i8> @test_masked_z_32xi8_perm_mem_mask0(<32 x i8>* %vp, <32 x i8> %
define <32 x i8> @test_masked_32xi8_perm_mem_mask1(<32 x i8>* %vp, <32 x i8> %vec2, <32 x i8> %mask) {
; CHECK-LABEL: test_masked_32xi8_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %ymm2
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqb %ymm3, %ymm1, %k1
@@ -438,7 +438,7 @@ define <32 x i8> @test_masked_32xi8_perm_mem_mask1(<32 x i8>* %vp, <32 x i8> %ve
define <32 x i8> @test_masked_z_32xi8_perm_mem_mask1(<32 x i8>* %vp, <32 x i8> %mask) {
; CHECK-LABEL: test_masked_z_32xi8_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %ymm1
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %ymm2, %ymm0, %k1
@@ -453,7 +453,7 @@ define <32 x i8> @test_masked_z_32xi8_perm_mem_mask1(<32 x i8>* %vp, <32 x i8> %
define <32 x i8> @test_masked_32xi8_perm_mem_mask2(<32 x i8>* %vp, <32 x i8> %vec2, <32 x i8> %mask) {
; CHECK-LABEL: test_masked_32xi8_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %ymm2
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqb %ymm3, %ymm1, %k1
@@ -468,7 +468,7 @@ define <32 x i8> @test_masked_32xi8_perm_mem_mask2(<32 x i8>* %vp, <32 x i8> %ve
define <32 x i8> @test_masked_z_32xi8_perm_mem_mask2(<32 x i8>* %vp, <32 x i8> %mask) {
; CHECK-LABEL: test_masked_z_32xi8_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %ymm1
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %ymm2, %ymm0, %k1
@@ -483,7 +483,7 @@ define <32 x i8> @test_masked_z_32xi8_perm_mem_mask2(<32 x i8>* %vp, <32 x i8> %
define <32 x i8> @test_32xi8_perm_mem_mask3(<32 x i8>* %vp) {
; CHECK-LABEL: test_32xi8_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %ymm0
; CHECK-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[1,1,13,0,3,0,0,13,5,2,2,10,15,8,14,8,25,26,28,28,31,27,30,19,24,25,29,23,28,22,25,29]
; CHECK-NEXT: retq
@@ -493,7 +493,7 @@ define <32 x i8> @test_32xi8_perm_mem_mask3(<32 x i8>* %vp) {
}
define <32 x i8> @test_masked_32xi8_perm_mem_mask3(<32 x i8>* %vp, <32 x i8> %vec2, <32 x i8> %mask) {
; CHECK-LABEL: test_masked_32xi8_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %ymm2
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqb %ymm3, %ymm1, %k1
@@ -508,7 +508,7 @@ define <32 x i8> @test_masked_32xi8_perm_mem_mask3(<32 x i8>* %vp, <32 x i8> %ve
define <32 x i8> @test_masked_z_32xi8_perm_mem_mask3(<32 x i8>* %vp, <32 x i8> %mask) {
; CHECK-LABEL: test_masked_z_32xi8_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %ymm1
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %ymm2, %ymm0, %k1
@@ -523,7 +523,7 @@ define <32 x i8> @test_masked_z_32xi8_perm_mem_mask3(<32 x i8>* %vp, <32 x i8> %
define <64 x i8> @test_64xi8_perm_mask0(<64 x i8> %vec) {
; CHECK-LABEL: test_64xi8_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[8,4,1,13,15,4,6,12,0,10,2,4,13,0,0,6,23,29,27,26,18,31,22,25,22,16,23,18,16,25,26,17,40,37,38,44,39,46,41,39,42,37,33,42,41,44,34,46,60,62,61,58,60,56,60,51,60,55,60,55,60,49,48,62]
; CHECK-NEXT: retq
%res = shufflevector <64 x i8> %vec, <64 x i8> undef, <64 x i32> <i32 8, i32 4, i32 1, i32 13, i32 15, i32 4, i32 6, i32 12, i32 0, i32 10, i32 2, i32 4, i32 13, i32 0, i32 0, i32 6, i32 23, i32 29, i32 27, i32 26, i32 18, i32 31, i32 22, i32 25, i32 22, i32 16, i32 23, i32 18, i32 16, i32 25, i32 26, i32 17, i32 40, i32 37, i32 38, i32 44, i32 39, i32 46, i32 41, i32 39, i32 42, i32 37, i32 33, i32 42, i32 41, i32 44, i32 34, i32 46, i32 60, i32 62, i32 61, i32 58, i32 60, i32 56, i32 60, i32 51, i32 60, i32 55, i32 60, i32 55, i32 60, i32 49, i32 48, i32 62>
@@ -531,7 +531,7 @@ define <64 x i8> @test_64xi8_perm_mask0(<64 x i8> %vec) {
}
define <64 x i8> @test_masked_64xi8_perm_mask0(<64 x i8> %vec, <64 x i8> %vec2, <64 x i8> %mask) {
; CHECK-LABEL: test_masked_64xi8_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqb %zmm3, %zmm2, %k1
; CHECK-NEXT: vpshufb {{.*#+}} zmm1 {%k1} = zmm0[8,4,1,13,15,4,6,12,0,10,2,4,13,0,0,6,23,29,27,26,18,31,22,25,22,16,23,18,16,25,26,17,40,37,38,44,39,46,41,39,42,37,33,42,41,44,34,46,60,62,61,58,60,56,60,51,60,55,60,55,60,49,48,62]
@@ -545,7 +545,7 @@ define <64 x i8> @test_masked_64xi8_perm_mask0(<64 x i8> %vec, <64 x i8> %vec2,
define <64 x i8> @test_masked_z_64xi8_perm_mask0(<64 x i8> %vec, <64 x i8> %mask) {
; CHECK-LABEL: test_masked_z_64xi8_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %zmm2, %zmm1, %k1
; CHECK-NEXT: vpshufb {{.*#+}} zmm0 {%k1} {z} = zmm0[8,4,1,13,15,4,6,12,0,10,2,4,13,0,0,6,23,29,27,26,18,31,22,25,22,16,23,18,16,25,26,17,40,37,38,44,39,46,41,39,42,37,33,42,41,44,34,46,60,62,61,58,60,56,60,51,60,55,60,55,60,49,48,62]
@@ -557,7 +557,7 @@ define <64 x i8> @test_masked_z_64xi8_perm_mask0(<64 x i8> %vec, <64 x i8> %mask
}
define <64 x i8> @test_masked_64xi8_perm_mask1(<64 x i8> %vec, <64 x i8> %vec2, <64 x i8> %mask) {
; CHECK-LABEL: test_masked_64xi8_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqb %zmm3, %zmm2, %k1
; CHECK-NEXT: vpshufb {{.*#+}} zmm1 {%k1} = zmm0[7,14,15,10,9,3,1,13,14,12,11,6,4,1,6,9,30,30,22,17,28,27,16,23,26,16,30,31,27,17,17,21,32,37,32,47,45,33,46,35,35,42,47,33,32,37,32,41,61,50,49,53,63,50,63,53,55,52,62,63,58,50,63,49]
@@ -571,7 +571,7 @@ define <64 x i8> @test_masked_64xi8_perm_mask1(<64 x i8> %vec, <64 x i8> %vec2,
define <64 x i8> @test_masked_z_64xi8_perm_mask1(<64 x i8> %vec, <64 x i8> %mask) {
; CHECK-LABEL: test_masked_z_64xi8_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %zmm2, %zmm1, %k1
; CHECK-NEXT: vpshufb {{.*#+}} zmm0 {%k1} {z} = zmm0[7,14,15,10,9,3,1,13,14,12,11,6,4,1,6,9,30,30,22,17,28,27,16,23,26,16,30,31,27,17,17,21,32,37,32,47,45,33,46,35,35,42,47,33,32,37,32,41,61,50,49,53,63,50,63,53,55,52,62,63,58,50,63,49]
@@ -583,7 +583,7 @@ define <64 x i8> @test_masked_z_64xi8_perm_mask1(<64 x i8> %vec, <64 x i8> %mask
}
define <64 x i8> @test_masked_64xi8_perm_mask2(<64 x i8> %vec, <64 x i8> %vec2, <64 x i8> %mask) {
; CHECK-LABEL: test_masked_64xi8_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqb %zmm3, %zmm2, %k1
; CHECK-NEXT: vpshufb {{.*#+}} zmm1 {%k1} = zmm0[9,2,14,15,12,5,3,12,4,6,0,2,0,1,1,6,24,27,18,22,26,17,23,21,31,16,22,22,27,21,19,20,39,47,44,36,40,43,44,39,38,44,38,35,39,46,34,39,58,55,51,48,59,57,48,52,60,58,56,50,59,55,58,60]
@@ -597,7 +597,7 @@ define <64 x i8> @test_masked_64xi8_perm_mask2(<64 x i8> %vec, <64 x i8> %vec2,
define <64 x i8> @test_masked_z_64xi8_perm_mask2(<64 x i8> %vec, <64 x i8> %mask) {
; CHECK-LABEL: test_masked_z_64xi8_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %zmm2, %zmm1, %k1
; CHECK-NEXT: vpshufb {{.*#+}} zmm0 {%k1} {z} = zmm0[9,2,14,15,12,5,3,12,4,6,0,2,0,1,1,6,24,27,18,22,26,17,23,21,31,16,22,22,27,21,19,20,39,47,44,36,40,43,44,39,38,44,38,35,39,46,34,39,58,55,51,48,59,57,48,52,60,58,56,50,59,55,58,60]
@@ -609,7 +609,7 @@ define <64 x i8> @test_masked_z_64xi8_perm_mask2(<64 x i8> %vec, <64 x i8> %mask
}
define <64 x i8> @test_64xi8_perm_mask3(<64 x i8> %vec) {
; CHECK-LABEL: test_64xi8_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[3,12,4,15,1,14,0,4,8,9,6,1,4,4,12,14,25,16,28,20,21,24,19,30,18,22,20,24,25,26,24,22,42,38,44,44,36,37,42,34,43,38,41,34,42,37,39,38,55,59,53,58,48,52,59,48,57,48,55,62,48,56,49,61]
; CHECK-NEXT: retq
%res = shufflevector <64 x i8> %vec, <64 x i8> undef, <64 x i32> <i32 3, i32 12, i32 4, i32 15, i32 1, i32 14, i32 0, i32 4, i32 8, i32 9, i32 6, i32 1, i32 4, i32 4, i32 12, i32 14, i32 25, i32 16, i32 28, i32 20, i32 21, i32 24, i32 19, i32 30, i32 18, i32 22, i32 20, i32 24, i32 25, i32 26, i32 24, i32 22, i32 42, i32 38, i32 44, i32 44, i32 36, i32 37, i32 42, i32 34, i32 43, i32 38, i32 41, i32 34, i32 42, i32 37, i32 39, i32 38, i32 55, i32 59, i32 53, i32 58, i32 48, i32 52, i32 59, i32 48, i32 57, i32 48, i32 55, i32 62, i32 48, i32 56, i32 49, i32 61>
@@ -617,7 +617,7 @@ define <64 x i8> @test_64xi8_perm_mask3(<64 x i8> %vec) {
}
define <64 x i8> @test_masked_64xi8_perm_mask3(<64 x i8> %vec, <64 x i8> %vec2, <64 x i8> %mask) {
; CHECK-LABEL: test_masked_64xi8_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqb %zmm3, %zmm2, %k1
; CHECK-NEXT: vpshufb {{.*#+}} zmm1 {%k1} = zmm0[3,12,4,15,1,14,0,4,8,9,6,1,4,4,12,14,25,16,28,20,21,24,19,30,18,22,20,24,25,26,24,22,42,38,44,44,36,37,42,34,43,38,41,34,42,37,39,38,55,59,53,58,48,52,59,48,57,48,55,62,48,56,49,61]
@@ -631,7 +631,7 @@ define <64 x i8> @test_masked_64xi8_perm_mask3(<64 x i8> %vec, <64 x i8> %vec2,
define <64 x i8> @test_masked_z_64xi8_perm_mask3(<64 x i8> %vec, <64 x i8> %mask) {
; CHECK-LABEL: test_masked_z_64xi8_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %zmm2, %zmm1, %k1
; CHECK-NEXT: vpshufb {{.*#+}} zmm0 {%k1} {z} = zmm0[3,12,4,15,1,14,0,4,8,9,6,1,4,4,12,14,25,16,28,20,21,24,19,30,18,22,20,24,25,26,24,22,42,38,44,44,36,37,42,34,43,38,41,34,42,37,39,38,55,59,53,58,48,52,59,48,57,48,55,62,48,56,49,61]
@@ -643,7 +643,7 @@ define <64 x i8> @test_masked_z_64xi8_perm_mask3(<64 x i8> %vec, <64 x i8> %mask
}
define <64 x i8> @test_64xi8_perm_mem_mask0(<64 x i8>* %vp) {
; CHECK-LABEL: test_64xi8_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm0
; CHECK-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[0,9,15,13,11,11,3,12,4,1,7,5,2,6,14,6,23,27,24,18,30,23,28,22,28,22,19,19,31,25,16,22,35,33,34,32,42,34,41,41,43,40,36,46,37,39,42,40,63,63,62,62,57,55,59,51,52,48,50,48,58,50,60,58]
; CHECK-NEXT: retq
@@ -653,7 +653,7 @@ define <64 x i8> @test_64xi8_perm_mem_mask0(<64 x i8>* %vp) {
}
define <64 x i8> @test_masked_64xi8_perm_mem_mask0(<64 x i8>* %vp, <64 x i8> %vec2, <64 x i8> %mask) {
; CHECK-LABEL: test_masked_64xi8_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm2
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqb %zmm3, %zmm1, %k1
@@ -668,7 +668,7 @@ define <64 x i8> @test_masked_64xi8_perm_mem_mask0(<64 x i8>* %vp, <64 x i8> %ve
define <64 x i8> @test_masked_z_64xi8_perm_mem_mask0(<64 x i8>* %vp, <64 x i8> %mask) {
; CHECK-LABEL: test_masked_z_64xi8_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %zmm2, %zmm0, %k1
@@ -683,7 +683,7 @@ define <64 x i8> @test_masked_z_64xi8_perm_mem_mask0(<64 x i8>* %vp, <64 x i8> %
define <64 x i8> @test_masked_64xi8_perm_mem_mask1(<64 x i8>* %vp, <64 x i8> %vec2, <64 x i8> %mask) {
; CHECK-LABEL: test_masked_64xi8_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm2
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqb %zmm3, %zmm1, %k1
@@ -698,7 +698,7 @@ define <64 x i8> @test_masked_64xi8_perm_mem_mask1(<64 x i8>* %vp, <64 x i8> %ve
define <64 x i8> @test_masked_z_64xi8_perm_mem_mask1(<64 x i8>* %vp, <64 x i8> %mask) {
; CHECK-LABEL: test_masked_z_64xi8_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %zmm2, %zmm0, %k1
@@ -713,7 +713,7 @@ define <64 x i8> @test_masked_z_64xi8_perm_mem_mask1(<64 x i8>* %vp, <64 x i8> %
define <64 x i8> @test_masked_64xi8_perm_mem_mask2(<64 x i8>* %vp, <64 x i8> %vec2, <64 x i8> %mask) {
; CHECK-LABEL: test_masked_64xi8_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm2
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqb %zmm3, %zmm1, %k1
@@ -728,7 +728,7 @@ define <64 x i8> @test_masked_64xi8_perm_mem_mask2(<64 x i8>* %vp, <64 x i8> %ve
define <64 x i8> @test_masked_z_64xi8_perm_mem_mask2(<64 x i8>* %vp, <64 x i8> %mask) {
; CHECK-LABEL: test_masked_z_64xi8_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %zmm2, %zmm0, %k1
@@ -743,7 +743,7 @@ define <64 x i8> @test_masked_z_64xi8_perm_mem_mask2(<64 x i8>* %vp, <64 x i8> %
define <64 x i8> @test_64xi8_perm_mem_mask3(<64 x i8>* %vp) {
; CHECK-LABEL: test_64xi8_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm0
; CHECK-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[4,9,11,13,12,6,0,0,11,15,5,7,11,10,4,10,20,21,24,27,18,16,26,16,16,19,26,17,16,31,22,30,35,38,37,34,37,47,43,38,38,36,40,43,42,39,32,46,54,54,48,50,61,56,59,50,53,61,61,51,48,60,50,60]
; CHECK-NEXT: retq
@@ -753,7 +753,7 @@ define <64 x i8> @test_64xi8_perm_mem_mask3(<64 x i8>* %vp) {
}
define <64 x i8> @test_masked_64xi8_perm_mem_mask3(<64 x i8>* %vp, <64 x i8> %vec2, <64 x i8> %mask) {
; CHECK-LABEL: test_masked_64xi8_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm2
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqb %zmm3, %zmm1, %k1
@@ -768,7 +768,7 @@ define <64 x i8> @test_masked_64xi8_perm_mem_mask3(<64 x i8>* %vp, <64 x i8> %ve
define <64 x i8> @test_masked_z_64xi8_perm_mem_mask3(<64 x i8>* %vp, <64 x i8> %mask) {
; CHECK-LABEL: test_masked_z_64xi8_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqb %zmm2, %zmm0, %k1
@@ -783,7 +783,7 @@ define <64 x i8> @test_masked_z_64xi8_perm_mem_mask3(<64 x i8>* %vp, <64 x i8> %
define <8 x i16> @test_8xi16_perm_high_mask0(<8 x i16> %vec) {
; CHECK-LABEL: test_8xi16_perm_high_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,7,6]
; CHECK-NEXT: retq
%res = shufflevector <8 x i16> %vec, <8 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 6, i32 5, i32 7, i32 6>
@@ -791,7 +791,7 @@ define <8 x i16> @test_8xi16_perm_high_mask0(<8 x i16> %vec) {
}
define <8 x i16> @test_masked_8xi16_perm_high_mask0(<8 x i16> %vec, <8 x i16> %vec2, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_8xi16_perm_high_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqw %xmm3, %xmm2, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} xmm1 {%k1} = xmm0[0,1,2,3,6,5,7,6]
@@ -805,7 +805,7 @@ define <8 x i16> @test_masked_8xi16_perm_high_mask0(<8 x i16> %vec, <8 x i16> %v
define <8 x i16> @test_masked_z_8xi16_perm_high_mask0(<8 x i16> %vec, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_8xi16_perm_high_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %xmm2, %xmm1, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} {z} = xmm0[0,1,2,3,6,5,7,6]
@@ -817,7 +817,7 @@ define <8 x i16> @test_masked_z_8xi16_perm_high_mask0(<8 x i16> %vec, <8 x i16>
}
define <8 x i16> @test_masked_8xi16_perm_low_mask1(<8 x i16> %vec, <8 x i16> %vec2, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_8xi16_perm_low_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqw %xmm3, %xmm2, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} xmm1 {%k1} = xmm0[0,3,0,0,4,5,6,7]
@@ -831,7 +831,7 @@ define <8 x i16> @test_masked_8xi16_perm_low_mask1(<8 x i16> %vec, <8 x i16> %ve
define <8 x i16> @test_masked_z_8xi16_perm_low_mask1(<8 x i16> %vec, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_8xi16_perm_low_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %xmm2, %xmm1, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} {z} = xmm0[0,3,0,0,4,5,6,7]
@@ -843,7 +843,7 @@ define <8 x i16> @test_masked_z_8xi16_perm_low_mask1(<8 x i16> %vec, <8 x i16> %
}
define <8 x i16> @test_masked_8xi16_perm_high_mask2(<8 x i16> %vec, <8 x i16> %vec2, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_8xi16_perm_high_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqw %xmm3, %xmm2, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} xmm1 {%k1} = xmm0[0,1,2,3,5,4,4,5]
@@ -857,7 +857,7 @@ define <8 x i16> @test_masked_8xi16_perm_high_mask2(<8 x i16> %vec, <8 x i16> %v
define <8 x i16> @test_masked_z_8xi16_perm_high_mask2(<8 x i16> %vec, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_8xi16_perm_high_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %xmm2, %xmm1, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} {z} = xmm0[0,1,2,3,5,4,4,5]
@@ -869,7 +869,7 @@ define <8 x i16> @test_masked_z_8xi16_perm_high_mask2(<8 x i16> %vec, <8 x i16>
}
define <8 x i16> @test_8xi16_perm_low_mask3(<8 x i16> %vec) {
; CHECK-LABEL: test_8xi16_perm_low_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[2,1,1,1,4,5,6,7]
; CHECK-NEXT: retq
%res = shufflevector <8 x i16> %vec, <8 x i16> undef, <8 x i32> <i32 2, i32 1, i32 1, i32 1, i32 4, i32 5, i32 6, i32 7>
@@ -877,7 +877,7 @@ define <8 x i16> @test_8xi16_perm_low_mask3(<8 x i16> %vec) {
}
define <8 x i16> @test_masked_8xi16_perm_low_mask3(<8 x i16> %vec, <8 x i16> %vec2, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_8xi16_perm_low_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqw %xmm3, %xmm2, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} xmm1 {%k1} = xmm0[2,1,1,1,4,5,6,7]
@@ -891,7 +891,7 @@ define <8 x i16> @test_masked_8xi16_perm_low_mask3(<8 x i16> %vec, <8 x i16> %ve
define <8 x i16> @test_masked_z_8xi16_perm_low_mask3(<8 x i16> %vec, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_8xi16_perm_low_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %xmm2, %xmm1, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} {z} = xmm0[2,1,1,1,4,5,6,7]
@@ -903,7 +903,7 @@ define <8 x i16> @test_masked_z_8xi16_perm_low_mask3(<8 x i16> %vec, <8 x i16> %
}
define <8 x i16> @test_masked_8xi16_perm_high_mask4(<8 x i16> %vec, <8 x i16> %vec2, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_8xi16_perm_high_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqw %xmm3, %xmm2, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} xmm1 {%k1} = xmm0[0,1,2,3,5,5,7,6]
@@ -917,7 +917,7 @@ define <8 x i16> @test_masked_8xi16_perm_high_mask4(<8 x i16> %vec, <8 x i16> %v
define <8 x i16> @test_masked_z_8xi16_perm_high_mask4(<8 x i16> %vec, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_8xi16_perm_high_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %xmm2, %xmm1, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} {z} = xmm0[0,1,2,3,5,5,7,6]
@@ -929,7 +929,7 @@ define <8 x i16> @test_masked_z_8xi16_perm_high_mask4(<8 x i16> %vec, <8 x i16>
}
define <8 x i16> @test_masked_8xi16_perm_low_mask5(<8 x i16> %vec, <8 x i16> %vec2, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_8xi16_perm_low_mask5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqw %xmm3, %xmm2, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} xmm1 {%k1} = xmm0[3,3,2,1,4,5,6,7]
@@ -943,7 +943,7 @@ define <8 x i16> @test_masked_8xi16_perm_low_mask5(<8 x i16> %vec, <8 x i16> %ve
define <8 x i16> @test_masked_z_8xi16_perm_low_mask5(<8 x i16> %vec, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_8xi16_perm_low_mask5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %xmm2, %xmm1, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} {z} = xmm0[3,3,2,1,4,5,6,7]
@@ -955,7 +955,7 @@ define <8 x i16> @test_masked_z_8xi16_perm_low_mask5(<8 x i16> %vec, <8 x i16> %
}
define <8 x i16> @test_8xi16_perm_high_mask6(<8 x i16> %vec) {
; CHECK-LABEL: test_8xi16_perm_high_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,5]
; CHECK-NEXT: retq
%res = shufflevector <8 x i16> %vec, <8 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 6, i32 5, i32 6, i32 5>
@@ -963,7 +963,7 @@ define <8 x i16> @test_8xi16_perm_high_mask6(<8 x i16> %vec) {
}
define <8 x i16> @test_masked_8xi16_perm_high_mask6(<8 x i16> %vec, <8 x i16> %vec2, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_8xi16_perm_high_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqw %xmm3, %xmm2, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} xmm1 {%k1} = xmm0[0,1,2,3,6,5,6,5]
@@ -977,7 +977,7 @@ define <8 x i16> @test_masked_8xi16_perm_high_mask6(<8 x i16> %vec, <8 x i16> %v
define <8 x i16> @test_masked_z_8xi16_perm_high_mask6(<8 x i16> %vec, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_8xi16_perm_high_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %xmm2, %xmm1, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} {z} = xmm0[0,1,2,3,6,5,6,5]
@@ -989,7 +989,7 @@ define <8 x i16> @test_masked_z_8xi16_perm_high_mask6(<8 x i16> %vec, <8 x i16>
}
define <8 x i16> @test_masked_8xi16_perm_low_mask7(<8 x i16> %vec, <8 x i16> %vec2, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_8xi16_perm_low_mask7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqw %xmm3, %xmm2, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} xmm1 {%k1} = xmm0[1,0,2,0,4,5,6,7]
@@ -1003,7 +1003,7 @@ define <8 x i16> @test_masked_8xi16_perm_low_mask7(<8 x i16> %vec, <8 x i16> %ve
define <8 x i16> @test_masked_z_8xi16_perm_low_mask7(<8 x i16> %vec, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_8xi16_perm_low_mask7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %xmm2, %xmm1, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} {z} = xmm0[1,0,2,0,4,5,6,7]
@@ -1015,7 +1015,7 @@ define <8 x i16> @test_masked_z_8xi16_perm_low_mask7(<8 x i16> %vec, <8 x i16> %
}
define <8 x i16> @test_8xi16_perm_high_mem_mask0(<8 x i16>* %vp) {
; CHECK-LABEL: test_8xi16_perm_high_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshufhw {{.*#+}} xmm0 = mem[0,1,2,3,7,7,4,6]
; CHECK-NEXT: retq
%vec = load <8 x i16>, <8 x i16>* %vp
@@ -1024,7 +1024,7 @@ define <8 x i16> @test_8xi16_perm_high_mem_mask0(<8 x i16>* %vp) {
}
define <8 x i16> @test_masked_8xi16_perm_high_mem_mask0(<8 x i16>* %vp, <8 x i16> %vec2, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_8xi16_perm_high_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %xmm2, %xmm1, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} = mem[0,1,2,3,7,7,4,6]
@@ -1038,7 +1038,7 @@ define <8 x i16> @test_masked_8xi16_perm_high_mem_mask0(<8 x i16>* %vp, <8 x i16
define <8 x i16> @test_masked_z_8xi16_perm_high_mem_mask0(<8 x i16>* %vp, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_8xi16_perm_high_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} {z} = mem[0,1,2,3,7,7,4,6]
@@ -1052,7 +1052,7 @@ define <8 x i16> @test_masked_z_8xi16_perm_high_mem_mask0(<8 x i16>* %vp, <8 x i
define <8 x i16> @test_masked_8xi16_perm_low_mem_mask1(<8 x i16>* %vp, <8 x i16> %vec2, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_8xi16_perm_low_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %xmm2, %xmm1, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} = mem[1,3,3,2,4,5,6,7]
@@ -1066,7 +1066,7 @@ define <8 x i16> @test_masked_8xi16_perm_low_mem_mask1(<8 x i16>* %vp, <8 x i16>
define <8 x i16> @test_masked_z_8xi16_perm_low_mem_mask1(<8 x i16>* %vp, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_8xi16_perm_low_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} {z} = mem[1,3,3,2,4,5,6,7]
@@ -1080,7 +1080,7 @@ define <8 x i16> @test_masked_z_8xi16_perm_low_mem_mask1(<8 x i16>* %vp, <8 x i1
define <8 x i16> @test_masked_8xi16_perm_high_mem_mask2(<8 x i16>* %vp, <8 x i16> %vec2, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_8xi16_perm_high_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %xmm2, %xmm1, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} = mem[0,1,2,3,6,6,5,7]
@@ -1094,7 +1094,7 @@ define <8 x i16> @test_masked_8xi16_perm_high_mem_mask2(<8 x i16>* %vp, <8 x i16
define <8 x i16> @test_masked_z_8xi16_perm_high_mem_mask2(<8 x i16>* %vp, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_8xi16_perm_high_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} {z} = mem[0,1,2,3,6,6,5,7]
@@ -1108,7 +1108,7 @@ define <8 x i16> @test_masked_z_8xi16_perm_high_mem_mask2(<8 x i16>* %vp, <8 x i
define <8 x i16> @test_8xi16_perm_low_mem_mask3(<8 x i16>* %vp) {
; CHECK-LABEL: test_8xi16_perm_low_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshuflw {{.*#+}} xmm0 = mem[3,1,2,0,4,5,6,7]
; CHECK-NEXT: retq
%vec = load <8 x i16>, <8 x i16>* %vp
@@ -1117,7 +1117,7 @@ define <8 x i16> @test_8xi16_perm_low_mem_mask3(<8 x i16>* %vp) {
}
define <8 x i16> @test_masked_8xi16_perm_low_mem_mask3(<8 x i16>* %vp, <8 x i16> %vec2, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_8xi16_perm_low_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %xmm2, %xmm1, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} = mem[3,1,2,0,4,5,6,7]
@@ -1131,7 +1131,7 @@ define <8 x i16> @test_masked_8xi16_perm_low_mem_mask3(<8 x i16>* %vp, <8 x i16>
define <8 x i16> @test_masked_z_8xi16_perm_low_mem_mask3(<8 x i16>* %vp, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_8xi16_perm_low_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} {z} = mem[3,1,2,0,4,5,6,7]
@@ -1145,7 +1145,7 @@ define <8 x i16> @test_masked_z_8xi16_perm_low_mem_mask3(<8 x i16>* %vp, <8 x i1
define <8 x i16> @test_masked_8xi16_perm_high_mem_mask4(<8 x i16>* %vp, <8 x i16> %vec2, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_8xi16_perm_high_mem_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %xmm2, %xmm1, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} = mem[0,1,2,3,7,6,7,5]
@@ -1159,7 +1159,7 @@ define <8 x i16> @test_masked_8xi16_perm_high_mem_mask4(<8 x i16>* %vp, <8 x i16
define <8 x i16> @test_masked_z_8xi16_perm_high_mem_mask4(<8 x i16>* %vp, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_8xi16_perm_high_mem_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} {z} = mem[0,1,2,3,7,6,7,5]
@@ -1173,7 +1173,7 @@ define <8 x i16> @test_masked_z_8xi16_perm_high_mem_mask4(<8 x i16>* %vp, <8 x i
define <8 x i16> @test_masked_8xi16_perm_low_mem_mask5(<8 x i16>* %vp, <8 x i16> %vec2, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_8xi16_perm_low_mem_mask5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %xmm2, %xmm1, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} = mem[2,1,3,2,4,5,6,7]
@@ -1187,7 +1187,7 @@ define <8 x i16> @test_masked_8xi16_perm_low_mem_mask5(<8 x i16>* %vp, <8 x i16>
define <8 x i16> @test_masked_z_8xi16_perm_low_mem_mask5(<8 x i16>* %vp, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_8xi16_perm_low_mem_mask5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} {z} = mem[2,1,3,2,4,5,6,7]
@@ -1201,7 +1201,7 @@ define <8 x i16> @test_masked_z_8xi16_perm_low_mem_mask5(<8 x i16>* %vp, <8 x i1
define <8 x i16> @test_8xi16_perm_high_mem_mask6(<8 x i16>* %vp) {
; CHECK-LABEL: test_8xi16_perm_high_mem_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshufhw {{.*#+}} xmm0 = mem[0,1,2,3,7,4,4,4]
; CHECK-NEXT: retq
%vec = load <8 x i16>, <8 x i16>* %vp
@@ -1210,7 +1210,7 @@ define <8 x i16> @test_8xi16_perm_high_mem_mask6(<8 x i16>* %vp) {
}
define <8 x i16> @test_masked_8xi16_perm_high_mem_mask6(<8 x i16>* %vp, <8 x i16> %vec2, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_8xi16_perm_high_mem_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %xmm2, %xmm1, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} = mem[0,1,2,3,7,4,4,4]
@@ -1224,7 +1224,7 @@ define <8 x i16> @test_masked_8xi16_perm_high_mem_mask6(<8 x i16>* %vp, <8 x i16
define <8 x i16> @test_masked_z_8xi16_perm_high_mem_mask6(<8 x i16>* %vp, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_8xi16_perm_high_mem_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} {z} = mem[0,1,2,3,7,4,4,4]
@@ -1238,7 +1238,7 @@ define <8 x i16> @test_masked_z_8xi16_perm_high_mem_mask6(<8 x i16>* %vp, <8 x i
define <8 x i16> @test_masked_8xi16_perm_low_mem_mask7(<8 x i16>* %vp, <8 x i16> %vec2, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_8xi16_perm_low_mem_mask7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %xmm2, %xmm1, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} = mem[0,3,3,1,4,5,6,7]
@@ -1252,7 +1252,7 @@ define <8 x i16> @test_masked_8xi16_perm_low_mem_mask7(<8 x i16>* %vp, <8 x i16>
define <8 x i16> @test_masked_z_8xi16_perm_low_mem_mask7(<8 x i16>* %vp, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_8xi16_perm_low_mem_mask7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} {z} = mem[0,3,3,1,4,5,6,7]
@@ -1266,7 +1266,7 @@ define <8 x i16> @test_masked_z_8xi16_perm_low_mem_mask7(<8 x i16>* %vp, <8 x i1
define <16 x i16> @test_16xi16_perm_high_mask0(<16 x i16> %vec) {
; CHECK-LABEL: test_16xi16_perm_high_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,4,6,4,8,9,10,11,12,12,14,12]
; CHECK-NEXT: retq
%res = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 4, i32 6, i32 4, i32 8, i32 9, i32 10, i32 11, i32 12, i32 12, i32 14, i32 12>
@@ -1274,7 +1274,7 @@ define <16 x i16> @test_16xi16_perm_high_mask0(<16 x i16> %vec) {
}
define <16 x i16> @test_masked_16xi16_perm_high_mask0(<16 x i16> %vec, <16 x i16> %vec2, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_16xi16_perm_high_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqw %ymm3, %ymm2, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} ymm1 {%k1} = ymm0[0,1,2,3,4,4,6,4,8,9,10,11,12,12,14,12]
@@ -1288,7 +1288,7 @@ define <16 x i16> @test_masked_16xi16_perm_high_mask0(<16 x i16> %vec, <16 x i16
define <16 x i16> @test_masked_z_16xi16_perm_high_mask0(<16 x i16> %vec, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_z_16xi16_perm_high_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %ymm2, %ymm1, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} {z} = ymm0[0,1,2,3,4,4,6,4,8,9,10,11,12,12,14,12]
@@ -1300,7 +1300,7 @@ define <16 x i16> @test_masked_z_16xi16_perm_high_mask0(<16 x i16> %vec, <16 x i
}
define <16 x i16> @test_masked_16xi16_perm_low_mask1(<16 x i16> %vec, <16 x i16> %vec2, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_16xi16_perm_low_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqw %ymm3, %ymm2, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} ymm1 {%k1} = ymm0[0,2,3,2,4,5,6,7,8,10,11,10,12,13,14,15]
@@ -1314,7 +1314,7 @@ define <16 x i16> @test_masked_16xi16_perm_low_mask1(<16 x i16> %vec, <16 x i16>
define <16 x i16> @test_masked_z_16xi16_perm_low_mask1(<16 x i16> %vec, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_z_16xi16_perm_low_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %ymm2, %ymm1, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} {z} = ymm0[0,2,3,2,4,5,6,7,8,10,11,10,12,13,14,15]
@@ -1326,7 +1326,7 @@ define <16 x i16> @test_masked_z_16xi16_perm_low_mask1(<16 x i16> %vec, <16 x i1
}
define <16 x i16> @test_masked_16xi16_perm_high_mask2(<16 x i16> %vec, <16 x i16> %vec2, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_16xi16_perm_high_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqw %ymm3, %ymm2, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} ymm1 {%k1} = ymm0[0,1,2,3,7,5,5,5,8,9,10,11,15,13,13,13]
@@ -1340,7 +1340,7 @@ define <16 x i16> @test_masked_16xi16_perm_high_mask2(<16 x i16> %vec, <16 x i16
define <16 x i16> @test_masked_z_16xi16_perm_high_mask2(<16 x i16> %vec, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_z_16xi16_perm_high_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %ymm2, %ymm1, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} {z} = ymm0[0,1,2,3,7,5,5,5,8,9,10,11,15,13,13,13]
@@ -1352,7 +1352,7 @@ define <16 x i16> @test_masked_z_16xi16_perm_high_mask2(<16 x i16> %vec, <16 x i
}
define <16 x i16> @test_16xi16_perm_low_mask3(<16 x i16> %vec) {
; CHECK-LABEL: test_16xi16_perm_low_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[3,2,3,2,4,5,6,7,11,10,11,10,12,13,14,15]
; CHECK-NEXT: retq
%res = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> <i32 3, i32 2, i32 3, i32 2, i32 4, i32 5, i32 6, i32 7, i32 11, i32 10, i32 11, i32 10, i32 12, i32 13, i32 14, i32 15>
@@ -1360,7 +1360,7 @@ define <16 x i16> @test_16xi16_perm_low_mask3(<16 x i16> %vec) {
}
define <16 x i16> @test_masked_16xi16_perm_low_mask3(<16 x i16> %vec, <16 x i16> %vec2, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_16xi16_perm_low_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqw %ymm3, %ymm2, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} ymm1 {%k1} = ymm0[3,2,3,2,4,5,6,7,11,10,11,10,12,13,14,15]
@@ -1374,7 +1374,7 @@ define <16 x i16> @test_masked_16xi16_perm_low_mask3(<16 x i16> %vec, <16 x i16>
define <16 x i16> @test_masked_z_16xi16_perm_low_mask3(<16 x i16> %vec, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_z_16xi16_perm_low_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %ymm2, %ymm1, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} {z} = ymm0[3,2,3,2,4,5,6,7,11,10,11,10,12,13,14,15]
@@ -1386,7 +1386,7 @@ define <16 x i16> @test_masked_z_16xi16_perm_low_mask3(<16 x i16> %vec, <16 x i1
}
define <16 x i16> @test_masked_16xi16_perm_high_mask4(<16 x i16> %vec, <16 x i16> %vec2, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_16xi16_perm_high_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqw %ymm3, %ymm2, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} ymm1 {%k1} = ymm0[0,1,2,3,6,7,4,7,8,9,10,11,14,15,12,15]
@@ -1400,7 +1400,7 @@ define <16 x i16> @test_masked_16xi16_perm_high_mask4(<16 x i16> %vec, <16 x i16
define <16 x i16> @test_masked_z_16xi16_perm_high_mask4(<16 x i16> %vec, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_z_16xi16_perm_high_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %ymm2, %ymm1, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} {z} = ymm0[0,1,2,3,6,7,4,7,8,9,10,11,14,15,12,15]
@@ -1412,7 +1412,7 @@ define <16 x i16> @test_masked_z_16xi16_perm_high_mask4(<16 x i16> %vec, <16 x i
}
define <16 x i16> @test_masked_16xi16_perm_low_mask5(<16 x i16> %vec, <16 x i16> %vec2, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_16xi16_perm_low_mask5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqw %ymm3, %ymm2, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} ymm1 {%k1} = ymm0[3,3,3,0,4,5,6,7,11,11,11,8,12,13,14,15]
@@ -1426,7 +1426,7 @@ define <16 x i16> @test_masked_16xi16_perm_low_mask5(<16 x i16> %vec, <16 x i16>
define <16 x i16> @test_masked_z_16xi16_perm_low_mask5(<16 x i16> %vec, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_z_16xi16_perm_low_mask5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %ymm2, %ymm1, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} {z} = ymm0[3,3,3,0,4,5,6,7,11,11,11,8,12,13,14,15]
@@ -1438,7 +1438,7 @@ define <16 x i16> @test_masked_z_16xi16_perm_low_mask5(<16 x i16> %vec, <16 x i1
}
define <16 x i16> @test_16xi16_perm_high_mask6(<16 x i16> %vec) {
; CHECK-LABEL: test_16xi16_perm_high_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,6,7,6,5,8,9,10,11,14,15,14,13]
; CHECK-NEXT: retq
%res = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 6, i32 7, i32 6, i32 5, i32 8, i32 9, i32 10, i32 11, i32 14, i32 15, i32 14, i32 13>
@@ -1446,7 +1446,7 @@ define <16 x i16> @test_16xi16_perm_high_mask6(<16 x i16> %vec) {
}
define <16 x i16> @test_masked_16xi16_perm_high_mask6(<16 x i16> %vec, <16 x i16> %vec2, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_16xi16_perm_high_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqw %ymm3, %ymm2, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} ymm1 {%k1} = ymm0[0,1,2,3,6,7,6,5,8,9,10,11,14,15,14,13]
@@ -1460,7 +1460,7 @@ define <16 x i16> @test_masked_16xi16_perm_high_mask6(<16 x i16> %vec, <16 x i16
define <16 x i16> @test_masked_z_16xi16_perm_high_mask6(<16 x i16> %vec, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_z_16xi16_perm_high_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %ymm2, %ymm1, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} {z} = ymm0[0,1,2,3,6,7,6,5,8,9,10,11,14,15,14,13]
@@ -1472,7 +1472,7 @@ define <16 x i16> @test_masked_z_16xi16_perm_high_mask6(<16 x i16> %vec, <16 x i
}
define <16 x i16> @test_masked_16xi16_perm_low_mask7(<16 x i16> %vec, <16 x i16> %vec2, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_16xi16_perm_low_mask7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqw %ymm3, %ymm2, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} ymm1 {%k1} = ymm0[3,2,1,2,4,5,6,7,11,10,9,10,12,13,14,15]
@@ -1486,7 +1486,7 @@ define <16 x i16> @test_masked_16xi16_perm_low_mask7(<16 x i16> %vec, <16 x i16>
define <16 x i16> @test_masked_z_16xi16_perm_low_mask7(<16 x i16> %vec, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_z_16xi16_perm_low_mask7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %ymm2, %ymm1, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} {z} = ymm0[3,2,1,2,4,5,6,7,11,10,9,10,12,13,14,15]
@@ -1498,7 +1498,7 @@ define <16 x i16> @test_masked_z_16xi16_perm_low_mask7(<16 x i16> %vec, <16 x i1
}
define <16 x i16> @test_16xi16_perm_high_mem_mask0(<16 x i16>* %vp) {
; CHECK-LABEL: test_16xi16_perm_high_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshufhw {{.*#+}} ymm0 = mem[0,1,2,3,5,6,4,7,8,9,10,11,13,14,12,15]
; CHECK-NEXT: retq
%vec = load <16 x i16>, <16 x i16>* %vp
@@ -1507,7 +1507,7 @@ define <16 x i16> @test_16xi16_perm_high_mem_mask0(<16 x i16>* %vp) {
}
define <16 x i16> @test_masked_16xi16_perm_high_mem_mask0(<16 x i16>* %vp, <16 x i16> %vec2, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_16xi16_perm_high_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %ymm2, %ymm1, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} = mem[0,1,2,3,5,6,4,7,8,9,10,11,13,14,12,15]
@@ -1521,7 +1521,7 @@ define <16 x i16> @test_masked_16xi16_perm_high_mem_mask0(<16 x i16>* %vp, <16 x
define <16 x i16> @test_masked_z_16xi16_perm_high_mem_mask0(<16 x i16>* %vp, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_z_16xi16_perm_high_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} {z} = mem[0,1,2,3,5,6,4,7,8,9,10,11,13,14,12,15]
@@ -1535,7 +1535,7 @@ define <16 x i16> @test_masked_z_16xi16_perm_high_mem_mask0(<16 x i16>* %vp, <16
define <16 x i16> @test_masked_16xi16_perm_low_mem_mask1(<16 x i16>* %vp, <16 x i16> %vec2, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_16xi16_perm_low_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %ymm2, %ymm1, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} = mem[1,3,3,0,4,5,6,7,9,11,11,8,12,13,14,15]
@@ -1549,7 +1549,7 @@ define <16 x i16> @test_masked_16xi16_perm_low_mem_mask1(<16 x i16>* %vp, <16 x
define <16 x i16> @test_masked_z_16xi16_perm_low_mem_mask1(<16 x i16>* %vp, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_z_16xi16_perm_low_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} {z} = mem[1,3,3,0,4,5,6,7,9,11,11,8,12,13,14,15]
@@ -1563,7 +1563,7 @@ define <16 x i16> @test_masked_z_16xi16_perm_low_mem_mask1(<16 x i16>* %vp, <16
define <16 x i16> @test_masked_16xi16_perm_high_mem_mask2(<16 x i16>* %vp, <16 x i16> %vec2, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_16xi16_perm_high_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %ymm2, %ymm1, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} = mem[0,1,2,3,5,6,5,6,8,9,10,11,13,14,13,14]
@@ -1577,7 +1577,7 @@ define <16 x i16> @test_masked_16xi16_perm_high_mem_mask2(<16 x i16>* %vp, <16 x
define <16 x i16> @test_masked_z_16xi16_perm_high_mem_mask2(<16 x i16>* %vp, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_z_16xi16_perm_high_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} {z} = mem[0,1,2,3,5,6,5,6,8,9,10,11,13,14,13,14]
@@ -1591,7 +1591,7 @@ define <16 x i16> @test_masked_z_16xi16_perm_high_mem_mask2(<16 x i16>* %vp, <16
define <16 x i16> @test_16xi16_perm_low_mem_mask3(<16 x i16>* %vp) {
; CHECK-LABEL: test_16xi16_perm_low_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshuflw {{.*#+}} ymm0 = mem[3,2,3,0,4,5,6,7,11,10,11,8,12,13,14,15]
; CHECK-NEXT: retq
%vec = load <16 x i16>, <16 x i16>* %vp
@@ -1600,7 +1600,7 @@ define <16 x i16> @test_16xi16_perm_low_mem_mask3(<16 x i16>* %vp) {
}
define <16 x i16> @test_masked_16xi16_perm_low_mem_mask3(<16 x i16>* %vp, <16 x i16> %vec2, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_16xi16_perm_low_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %ymm2, %ymm1, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} = mem[3,2,3,0,4,5,6,7,11,10,11,8,12,13,14,15]
@@ -1614,7 +1614,7 @@ define <16 x i16> @test_masked_16xi16_perm_low_mem_mask3(<16 x i16>* %vp, <16 x
define <16 x i16> @test_masked_z_16xi16_perm_low_mem_mask3(<16 x i16>* %vp, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_z_16xi16_perm_low_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} {z} = mem[3,2,3,0,4,5,6,7,11,10,11,8,12,13,14,15]
@@ -1628,7 +1628,7 @@ define <16 x i16> @test_masked_z_16xi16_perm_low_mem_mask3(<16 x i16>* %vp, <16
define <16 x i16> @test_masked_16xi16_perm_high_mem_mask4(<16 x i16>* %vp, <16 x i16> %vec2, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_16xi16_perm_high_mem_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %ymm2, %ymm1, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} = mem[0,1,2,3,7,7,6,7,8,9,10,11,15,15,14,15]
@@ -1642,7 +1642,7 @@ define <16 x i16> @test_masked_16xi16_perm_high_mem_mask4(<16 x i16>* %vp, <16 x
define <16 x i16> @test_masked_z_16xi16_perm_high_mem_mask4(<16 x i16>* %vp, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_z_16xi16_perm_high_mem_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} {z} = mem[0,1,2,3,7,7,6,7,8,9,10,11,15,15,14,15]
@@ -1656,7 +1656,7 @@ define <16 x i16> @test_masked_z_16xi16_perm_high_mem_mask4(<16 x i16>* %vp, <16
define <16 x i16> @test_masked_16xi16_perm_low_mem_mask5(<16 x i16>* %vp, <16 x i16> %vec2, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_16xi16_perm_low_mem_mask5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %ymm2, %ymm1, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} = mem[1,3,3,2,4,5,6,7,9,11,11,10,12,13,14,15]
@@ -1670,7 +1670,7 @@ define <16 x i16> @test_masked_16xi16_perm_low_mem_mask5(<16 x i16>* %vp, <16 x
define <16 x i16> @test_masked_z_16xi16_perm_low_mem_mask5(<16 x i16>* %vp, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_z_16xi16_perm_low_mem_mask5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} {z} = mem[1,3,3,2,4,5,6,7,9,11,11,10,12,13,14,15]
@@ -1684,7 +1684,7 @@ define <16 x i16> @test_masked_z_16xi16_perm_low_mem_mask5(<16 x i16>* %vp, <16
define <16 x i16> @test_16xi16_perm_high_mem_mask6(<16 x i16>* %vp) {
; CHECK-LABEL: test_16xi16_perm_high_mem_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshufhw {{.*#+}} ymm0 = mem[0,1,2,3,4,4,4,5,8,9,10,11,12,12,12,13]
; CHECK-NEXT: retq
%vec = load <16 x i16>, <16 x i16>* %vp
@@ -1693,7 +1693,7 @@ define <16 x i16> @test_16xi16_perm_high_mem_mask6(<16 x i16>* %vp) {
}
define <16 x i16> @test_masked_16xi16_perm_high_mem_mask6(<16 x i16>* %vp, <16 x i16> %vec2, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_16xi16_perm_high_mem_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %ymm2, %ymm1, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} = mem[0,1,2,3,4,4,4,5,8,9,10,11,12,12,12,13]
@@ -1707,7 +1707,7 @@ define <16 x i16> @test_masked_16xi16_perm_high_mem_mask6(<16 x i16>* %vp, <16 x
define <16 x i16> @test_masked_z_16xi16_perm_high_mem_mask6(<16 x i16>* %vp, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_z_16xi16_perm_high_mem_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} {z} = mem[0,1,2,3,4,4,4,5,8,9,10,11,12,12,12,13]
@@ -1721,7 +1721,7 @@ define <16 x i16> @test_masked_z_16xi16_perm_high_mem_mask6(<16 x i16>* %vp, <16
define <16 x i16> @test_masked_16xi16_perm_low_mem_mask7(<16 x i16>* %vp, <16 x i16> %vec2, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_16xi16_perm_low_mem_mask7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %ymm2, %ymm1, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} = mem[3,1,3,2,4,5,6,7,11,9,11,10,12,13,14,15]
@@ -1735,7 +1735,7 @@ define <16 x i16> @test_masked_16xi16_perm_low_mem_mask7(<16 x i16>* %vp, <16 x
define <16 x i16> @test_masked_z_16xi16_perm_low_mem_mask7(<16 x i16>* %vp, <16 x i16> %mask) {
; CHECK-LABEL: test_masked_z_16xi16_perm_low_mem_mask7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} {z} = mem[3,1,3,2,4,5,6,7,11,9,11,10,12,13,14,15]
@@ -1749,7 +1749,7 @@ define <16 x i16> @test_masked_z_16xi16_perm_low_mem_mask7(<16 x i16>* %vp, <16
define <32 x i16> @test_32xi16_perm_high_mask0(<32 x i16> %vec) {
; CHECK-LABEL: test_32xi16_perm_high_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshufhw {{.*#+}} zmm0 = zmm0[0,1,2,3,4,5,6,4,8,9,10,11,12,13,14,12,16,17,18,19,20,21,22,20,24,25,26,27,28,29,30,28]
; CHECK-NEXT: retq
%res = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 4, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 12, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 20, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 28>
@@ -1757,7 +1757,7 @@ define <32 x i16> @test_32xi16_perm_high_mask0(<32 x i16> %vec) {
}
define <32 x i16> @test_masked_32xi16_perm_high_mask0(<32 x i16> %vec, <32 x i16> %vec2, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_perm_high_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqw %zmm3, %zmm2, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} zmm1 {%k1} = zmm0[0,1,2,3,4,5,6,4,8,9,10,11,12,13,14,12,16,17,18,19,20,21,22,20,24,25,26,27,28,29,30,28]
@@ -1771,7 +1771,7 @@ define <32 x i16> @test_masked_32xi16_perm_high_mask0(<32 x i16> %vec, <32 x i16
define <32 x i16> @test_masked_z_32xi16_perm_high_mask0(<32 x i16> %vec, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_perm_high_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %zmm2, %zmm1, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3,4,5,6,4,8,9,10,11,12,13,14,12,16,17,18,19,20,21,22,20,24,25,26,27,28,29,30,28]
@@ -1783,7 +1783,7 @@ define <32 x i16> @test_masked_z_32xi16_perm_high_mask0(<32 x i16> %vec, <32 x i
}
define <32 x i16> @test_masked_32xi16_perm_low_mask1(<32 x i16> %vec, <32 x i16> %vec2, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_perm_low_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqw %zmm3, %zmm2, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} zmm1 {%k1} = zmm0[2,1,0,0,4,5,6,7,10,9,8,8,12,13,14,15,18,17,16,16,20,21,22,23,26,25,24,24,28,29,30,31]
@@ -1797,7 +1797,7 @@ define <32 x i16> @test_masked_32xi16_perm_low_mask1(<32 x i16> %vec, <32 x i16>
define <32 x i16> @test_masked_z_32xi16_perm_low_mask1(<32 x i16> %vec, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_perm_low_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %zmm2, %zmm1, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} zmm0 {%k1} {z} = zmm0[2,1,0,0,4,5,6,7,10,9,8,8,12,13,14,15,18,17,16,16,20,21,22,23,26,25,24,24,28,29,30,31]
@@ -1809,7 +1809,7 @@ define <32 x i16> @test_masked_z_32xi16_perm_low_mask1(<32 x i16> %vec, <32 x i1
}
define <32 x i16> @test_masked_32xi16_perm_high_mask2(<32 x i16> %vec, <32 x i16> %vec2, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_perm_high_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqw %zmm3, %zmm2, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} zmm1 {%k1} = zmm0[0,1,2,3,4,6,4,7,8,9,10,11,12,14,12,15,16,17,18,19,20,22,20,23,24,25,26,27,28,30,28,31]
@@ -1823,7 +1823,7 @@ define <32 x i16> @test_masked_32xi16_perm_high_mask2(<32 x i16> %vec, <32 x i16
define <32 x i16> @test_masked_z_32xi16_perm_high_mask2(<32 x i16> %vec, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_perm_high_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %zmm2, %zmm1, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3,4,6,4,7,8,9,10,11,12,14,12,15,16,17,18,19,20,22,20,23,24,25,26,27,28,30,28,31]
@@ -1835,7 +1835,7 @@ define <32 x i16> @test_masked_z_32xi16_perm_high_mask2(<32 x i16> %vec, <32 x i
}
define <32 x i16> @test_32xi16_perm_low_mask3(<32 x i16> %vec) {
; CHECK-LABEL: test_32xi16_perm_low_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshuflw {{.*#+}} zmm0 = zmm0[3,3,1,3,4,5,6,7,11,11,9,11,12,13,14,15,19,19,17,19,20,21,22,23,27,27,25,27,28,29,30,31]
; CHECK-NEXT: retq
%res = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> <i32 3, i32 3, i32 1, i32 3, i32 4, i32 5, i32 6, i32 7, i32 11, i32 11, i32 9, i32 11, i32 12, i32 13, i32 14, i32 15, i32 19, i32 19, i32 17, i32 19, i32 20, i32 21, i32 22, i32 23, i32 27, i32 27, i32 25, i32 27, i32 28, i32 29, i32 30, i32 31>
@@ -1843,7 +1843,7 @@ define <32 x i16> @test_32xi16_perm_low_mask3(<32 x i16> %vec) {
}
define <32 x i16> @test_masked_32xi16_perm_low_mask3(<32 x i16> %vec, <32 x i16> %vec2, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_perm_low_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqw %zmm3, %zmm2, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} zmm1 {%k1} = zmm0[3,3,1,3,4,5,6,7,11,11,9,11,12,13,14,15,19,19,17,19,20,21,22,23,27,27,25,27,28,29,30,31]
@@ -1857,7 +1857,7 @@ define <32 x i16> @test_masked_32xi16_perm_low_mask3(<32 x i16> %vec, <32 x i16>
define <32 x i16> @test_masked_z_32xi16_perm_low_mask3(<32 x i16> %vec, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_perm_low_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %zmm2, %zmm1, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} zmm0 {%k1} {z} = zmm0[3,3,1,3,4,5,6,7,11,11,9,11,12,13,14,15,19,19,17,19,20,21,22,23,27,27,25,27,28,29,30,31]
@@ -1869,7 +1869,7 @@ define <32 x i16> @test_masked_z_32xi16_perm_low_mask3(<32 x i16> %vec, <32 x i1
}
define <32 x i16> @test_masked_32xi16_perm_high_mask4(<32 x i16> %vec, <32 x i16> %vec2, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_perm_high_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqw %zmm3, %zmm2, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} zmm1 {%k1} = zmm0[0,1,2,3,7,7,5,6,8,9,10,11,15,15,13,14,16,17,18,19,23,23,21,22,24,25,26,27,31,31,29,30]
@@ -1883,7 +1883,7 @@ define <32 x i16> @test_masked_32xi16_perm_high_mask4(<32 x i16> %vec, <32 x i16
define <32 x i16> @test_masked_z_32xi16_perm_high_mask4(<32 x i16> %vec, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_perm_high_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %zmm2, %zmm1, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3,7,7,5,6,8,9,10,11,15,15,13,14,16,17,18,19,23,23,21,22,24,25,26,27,31,31,29,30]
@@ -1895,7 +1895,7 @@ define <32 x i16> @test_masked_z_32xi16_perm_high_mask4(<32 x i16> %vec, <32 x i
}
define <32 x i16> @test_masked_32xi16_perm_low_mask5(<32 x i16> %vec, <32 x i16> %vec2, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_perm_low_mask5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqw %zmm3, %zmm2, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} zmm1 {%k1} = zmm0[2,1,1,0,4,5,6,7,10,9,9,8,12,13,14,15,18,17,17,16,20,21,22,23,26,25,25,24,28,29,30,31]
@@ -1909,7 +1909,7 @@ define <32 x i16> @test_masked_32xi16_perm_low_mask5(<32 x i16> %vec, <32 x i16>
define <32 x i16> @test_masked_z_32xi16_perm_low_mask5(<32 x i16> %vec, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_perm_low_mask5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %zmm2, %zmm1, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} zmm0 {%k1} {z} = zmm0[2,1,1,0,4,5,6,7,10,9,9,8,12,13,14,15,18,17,17,16,20,21,22,23,26,25,25,24,28,29,30,31]
@@ -1921,7 +1921,7 @@ define <32 x i16> @test_masked_z_32xi16_perm_low_mask5(<32 x i16> %vec, <32 x i1
}
define <32 x i16> @test_32xi16_perm_high_mask6(<32 x i16> %vec) {
; CHECK-LABEL: test_32xi16_perm_high_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshufhw {{.*#+}} zmm0 = zmm0[0,1,2,3,4,4,5,6,8,9,10,11,12,12,13,14,16,17,18,19,20,20,21,22,24,25,26,27,28,28,29,30]
; CHECK-NEXT: retq
%res = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 4, i32 5, i32 6, i32 8, i32 9, i32 10, i32 11, i32 12, i32 12, i32 13, i32 14, i32 16, i32 17, i32 18, i32 19, i32 20, i32 20, i32 21, i32 22, i32 24, i32 25, i32 26, i32 27, i32 28, i32 28, i32 29, i32 30>
@@ -1929,7 +1929,7 @@ define <32 x i16> @test_32xi16_perm_high_mask6(<32 x i16> %vec) {
}
define <32 x i16> @test_masked_32xi16_perm_high_mask6(<32 x i16> %vec, <32 x i16> %vec2, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_perm_high_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqw %zmm3, %zmm2, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} zmm1 {%k1} = zmm0[0,1,2,3,4,4,5,6,8,9,10,11,12,12,13,14,16,17,18,19,20,20,21,22,24,25,26,27,28,28,29,30]
@@ -1943,7 +1943,7 @@ define <32 x i16> @test_masked_32xi16_perm_high_mask6(<32 x i16> %vec, <32 x i16
define <32 x i16> @test_masked_z_32xi16_perm_high_mask6(<32 x i16> %vec, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_perm_high_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %zmm2, %zmm1, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3,4,4,5,6,8,9,10,11,12,12,13,14,16,17,18,19,20,20,21,22,24,25,26,27,28,28,29,30]
@@ -1955,7 +1955,7 @@ define <32 x i16> @test_masked_z_32xi16_perm_high_mask6(<32 x i16> %vec, <32 x i
}
define <32 x i16> @test_masked_32xi16_perm_low_mask7(<32 x i16> %vec, <32 x i16> %vec2, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_perm_low_mask7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqw %zmm3, %zmm2, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} zmm1 {%k1} = zmm0[3,0,3,0,4,5,6,7,11,8,11,8,12,13,14,15,19,16,19,16,20,21,22,23,27,24,27,24,28,29,30,31]
@@ -1969,7 +1969,7 @@ define <32 x i16> @test_masked_32xi16_perm_low_mask7(<32 x i16> %vec, <32 x i16>
define <32 x i16> @test_masked_z_32xi16_perm_low_mask7(<32 x i16> %vec, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_perm_low_mask7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %zmm2, %zmm1, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} zmm0 {%k1} {z} = zmm0[3,0,3,0,4,5,6,7,11,8,11,8,12,13,14,15,19,16,19,16,20,21,22,23,27,24,27,24,28,29,30,31]
@@ -1981,7 +1981,7 @@ define <32 x i16> @test_masked_z_32xi16_perm_low_mask7(<32 x i16> %vec, <32 x i1
}
define <32 x i16> @test_32xi16_perm_high_mem_mask0(<32 x i16>* %vp) {
; CHECK-LABEL: test_32xi16_perm_high_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshufhw {{.*#+}} zmm0 = mem[0,1,2,3,7,4,5,6,8,9,10,11,15,12,13,14,16,17,18,19,23,20,21,22,24,25,26,27,31,28,29,30]
; CHECK-NEXT: retq
%vec = load <32 x i16>, <32 x i16>* %vp
@@ -1990,7 +1990,7 @@ define <32 x i16> @test_32xi16_perm_high_mem_mask0(<32 x i16>* %vp) {
}
define <32 x i16> @test_masked_32xi16_perm_high_mem_mask0(<32 x i16>* %vp, <32 x i16> %vec2, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_perm_high_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %zmm2, %zmm1, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,7,4,5,6,8,9,10,11,15,12,13,14,16,17,18,19,23,20,21,22,24,25,26,27,31,28,29,30]
@@ -2004,7 +2004,7 @@ define <32 x i16> @test_masked_32xi16_perm_high_mem_mask0(<32 x i16>* %vp, <32 x
define <32 x i16> @test_masked_z_32xi16_perm_high_mem_mask0(<32 x i16>* %vp, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_perm_high_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %zmm1, %zmm0, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,7,4,5,6,8,9,10,11,15,12,13,14,16,17,18,19,23,20,21,22,24,25,26,27,31,28,29,30]
@@ -2018,7 +2018,7 @@ define <32 x i16> @test_masked_z_32xi16_perm_high_mem_mask0(<32 x i16>* %vp, <32
define <32 x i16> @test_masked_32xi16_perm_low_mem_mask1(<32 x i16>* %vp, <32 x i16> %vec2, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_perm_low_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %zmm2, %zmm1, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} zmm0 {%k1} = mem[1,1,3,3,4,5,6,7,9,9,11,11,12,13,14,15,17,17,19,19,20,21,22,23,25,25,27,27,28,29,30,31]
@@ -2032,7 +2032,7 @@ define <32 x i16> @test_masked_32xi16_perm_low_mem_mask1(<32 x i16>* %vp, <32 x
define <32 x i16> @test_masked_z_32xi16_perm_low_mem_mask1(<32 x i16>* %vp, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_perm_low_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %zmm1, %zmm0, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} zmm0 {%k1} {z} = mem[1,1,3,3,4,5,6,7,9,9,11,11,12,13,14,15,17,17,19,19,20,21,22,23,25,25,27,27,28,29,30,31]
@@ -2046,7 +2046,7 @@ define <32 x i16> @test_masked_z_32xi16_perm_low_mem_mask1(<32 x i16>* %vp, <32
define <32 x i16> @test_masked_32xi16_perm_high_mem_mask2(<32 x i16>* %vp, <32 x i16> %vec2, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_perm_high_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %zmm2, %zmm1, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,4,7,6,4,8,9,10,11,12,15,14,12,16,17,18,19,20,23,22,20,24,25,26,27,28,31,30,28]
@@ -2060,7 +2060,7 @@ define <32 x i16> @test_masked_32xi16_perm_high_mem_mask2(<32 x i16>* %vp, <32 x
define <32 x i16> @test_masked_z_32xi16_perm_high_mem_mask2(<32 x i16>* %vp, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_perm_high_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %zmm1, %zmm0, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,4,7,6,4,8,9,10,11,12,15,14,12,16,17,18,19,20,23,22,20,24,25,26,27,28,31,30,28]
@@ -2074,7 +2074,7 @@ define <32 x i16> @test_masked_z_32xi16_perm_high_mem_mask2(<32 x i16>* %vp, <32
define <32 x i16> @test_32xi16_perm_low_mem_mask3(<32 x i16>* %vp) {
; CHECK-LABEL: test_32xi16_perm_low_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshuflw {{.*#+}} zmm0 = mem[2,2,0,3,4,5,6,7,10,10,8,11,12,13,14,15,18,18,16,19,20,21,22,23,26,26,24,27,28,29,30,31]
; CHECK-NEXT: retq
%vec = load <32 x i16>, <32 x i16>* %vp
@@ -2083,7 +2083,7 @@ define <32 x i16> @test_32xi16_perm_low_mem_mask3(<32 x i16>* %vp) {
}
define <32 x i16> @test_masked_32xi16_perm_low_mem_mask3(<32 x i16>* %vp, <32 x i16> %vec2, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_perm_low_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %zmm2, %zmm1, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} zmm0 {%k1} = mem[2,2,0,3,4,5,6,7,10,10,8,11,12,13,14,15,18,18,16,19,20,21,22,23,26,26,24,27,28,29,30,31]
@@ -2097,7 +2097,7 @@ define <32 x i16> @test_masked_32xi16_perm_low_mem_mask3(<32 x i16>* %vp, <32 x
define <32 x i16> @test_masked_z_32xi16_perm_low_mem_mask3(<32 x i16>* %vp, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_perm_low_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %zmm1, %zmm0, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} zmm0 {%k1} {z} = mem[2,2,0,3,4,5,6,7,10,10,8,11,12,13,14,15,18,18,16,19,20,21,22,23,26,26,24,27,28,29,30,31]
@@ -2111,7 +2111,7 @@ define <32 x i16> @test_masked_z_32xi16_perm_low_mem_mask3(<32 x i16>* %vp, <32
define <32 x i16> @test_masked_32xi16_perm_high_mem_mask4(<32 x i16>* %vp, <32 x i16> %vec2, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_perm_high_mem_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %zmm2, %zmm1, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,7,4,6,5,8,9,10,11,15,12,14,13,16,17,18,19,23,20,22,21,24,25,26,27,31,28,30,29]
@@ -2125,7 +2125,7 @@ define <32 x i16> @test_masked_32xi16_perm_high_mem_mask4(<32 x i16>* %vp, <32 x
define <32 x i16> @test_masked_z_32xi16_perm_high_mem_mask4(<32 x i16>* %vp, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_perm_high_mem_mask4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %zmm1, %zmm0, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,7,4,6,5,8,9,10,11,15,12,14,13,16,17,18,19,23,20,22,21,24,25,26,27,31,28,30,29]
@@ -2139,7 +2139,7 @@ define <32 x i16> @test_masked_z_32xi16_perm_high_mem_mask4(<32 x i16>* %vp, <32
define <32 x i16> @test_masked_32xi16_perm_low_mem_mask5(<32 x i16>* %vp, <32 x i16> %vec2, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_perm_low_mem_mask5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshufd {{.*#+}} zmm2 = mem[0,0,2,3,4,4,6,7,8,8,10,11,12,12,14,15]
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqw %zmm3, %zmm1, %k1
@@ -2154,7 +2154,7 @@ define <32 x i16> @test_masked_32xi16_perm_low_mem_mask5(<32 x i16>* %vp, <32 x
define <32 x i16> @test_masked_z_32xi16_perm_low_mem_mask5(<32 x i16>* %vp, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_perm_low_mem_mask5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshufd {{.*#+}} zmm1 = mem[0,0,2,3,4,4,6,7,8,8,10,11,12,12,14,15]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %zmm2, %zmm0, %k1
@@ -2169,7 +2169,7 @@ define <32 x i16> @test_masked_z_32xi16_perm_low_mem_mask5(<32 x i16>* %vp, <32
define <32 x i16> @test_32xi16_perm_high_mem_mask6(<32 x i16>* %vp) {
; CHECK-LABEL: test_32xi16_perm_high_mem_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshufhw {{.*#+}} zmm0 = mem[0,1,2,3,6,5,6,6,8,9,10,11,14,13,14,14,16,17,18,19,22,21,22,22,24,25,26,27,30,29,30,30]
; CHECK-NEXT: retq
%vec = load <32 x i16>, <32 x i16>* %vp
@@ -2178,7 +2178,7 @@ define <32 x i16> @test_32xi16_perm_high_mem_mask6(<32 x i16>* %vp) {
}
define <32 x i16> @test_masked_32xi16_perm_high_mem_mask6(<32 x i16>* %vp, <32 x i16> %vec2, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_perm_high_mem_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %zmm2, %zmm1, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,6,5,6,6,8,9,10,11,14,13,14,14,16,17,18,19,22,21,22,22,24,25,26,27,30,29,30,30]
@@ -2192,7 +2192,7 @@ define <32 x i16> @test_masked_32xi16_perm_high_mem_mask6(<32 x i16>* %vp, <32 x
define <32 x i16> @test_masked_z_32xi16_perm_high_mem_mask6(<32 x i16>* %vp, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_perm_high_mem_mask6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %zmm1, %zmm0, %k1
; CHECK-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,6,5,6,6,8,9,10,11,14,13,14,14,16,17,18,19,22,21,22,22,24,25,26,27,30,29,30,30]
@@ -2206,7 +2206,7 @@ define <32 x i16> @test_masked_z_32xi16_perm_high_mem_mask6(<32 x i16>* %vp, <32
define <32 x i16> @test_masked_32xi16_perm_low_mem_mask7(<32 x i16>* %vp, <32 x i16> %vec2, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_32xi16_perm_low_mem_mask7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqw %zmm2, %zmm1, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} zmm0 {%k1} = mem[3,1,3,0,4,5,6,7,11,9,11,8,12,13,14,15,19,17,19,16,20,21,22,23,27,25,27,24,28,29,30,31]
@@ -2220,7 +2220,7 @@ define <32 x i16> @test_masked_32xi16_perm_low_mem_mask7(<32 x i16>* %vp, <32 x
define <32 x i16> @test_masked_z_32xi16_perm_low_mem_mask7(<32 x i16>* %vp, <32 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_perm_low_mem_mask7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqw %zmm1, %zmm0, %k1
; CHECK-NEXT: vpshuflw {{.*#+}} zmm0 {%k1} {z} = mem[3,1,3,0,4,5,6,7,11,9,11,8,12,13,14,15,19,17,19,16,20,21,22,23,27,25,27,24,28,29,30,31]
@@ -2234,7 +2234,7 @@ define <32 x i16> @test_masked_z_32xi16_perm_low_mem_mask7(<32 x i16>* %vp, <32
define <4 x i32> @test_4xi32_perm_mask0(<4 x i32> %vec) {
; CHECK-LABEL: test_4xi32_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,3,0]
; CHECK-NEXT: retq
%res = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 3, i32 0>
@@ -2242,7 +2242,7 @@ define <4 x i32> @test_4xi32_perm_mask0(<4 x i32> %vec) {
}
define <4 x i32> @test_masked_4xi32_perm_mask0(<4 x i32> %vec, <4 x i32> %vec2, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_4xi32_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %xmm3, %xmm2, %k1
; CHECK-NEXT: vpshufd {{.*#+}} xmm1 {%k1} = xmm0[2,3,3,0]
@@ -2256,7 +2256,7 @@ define <4 x i32> @test_masked_4xi32_perm_mask0(<4 x i32> %vec, <4 x i32> %vec2,
define <4 x i32> @test_masked_z_4xi32_perm_mask0(<4 x i32> %vec, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_4xi32_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %xmm2, %xmm1, %k1
; CHECK-NEXT: vpshufd {{.*#+}} xmm0 {%k1} {z} = xmm0[2,3,3,0]
@@ -2268,7 +2268,7 @@ define <4 x i32> @test_masked_z_4xi32_perm_mask0(<4 x i32> %vec, <4 x i32> %mask
}
define <4 x i32> @test_masked_4xi32_perm_mask1(<4 x i32> %vec, <4 x i32> %vec2, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_4xi32_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %xmm3, %xmm2, %k1
; CHECK-NEXT: vpshufd {{.*#+}} xmm1 {%k1} = xmm0[1,0,2,0]
@@ -2282,7 +2282,7 @@ define <4 x i32> @test_masked_4xi32_perm_mask1(<4 x i32> %vec, <4 x i32> %vec2,
define <4 x i32> @test_masked_z_4xi32_perm_mask1(<4 x i32> %vec, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_4xi32_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %xmm2, %xmm1, %k1
; CHECK-NEXT: vpshufd {{.*#+}} xmm0 {%k1} {z} = xmm0[1,0,2,0]
@@ -2294,7 +2294,7 @@ define <4 x i32> @test_masked_z_4xi32_perm_mask1(<4 x i32> %vec, <4 x i32> %mask
}
define <4 x i32> @test_masked_4xi32_perm_mask2(<4 x i32> %vec, <4 x i32> %vec2, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_4xi32_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %xmm3, %xmm2, %k1
; CHECK-NEXT: vpshufd {{.*#+}} xmm1 {%k1} = xmm0[3,0,1,0]
@@ -2308,7 +2308,7 @@ define <4 x i32> @test_masked_4xi32_perm_mask2(<4 x i32> %vec, <4 x i32> %vec2,
define <4 x i32> @test_masked_z_4xi32_perm_mask2(<4 x i32> %vec, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_4xi32_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %xmm2, %xmm1, %k1
; CHECK-NEXT: vpshufd {{.*#+}} xmm0 {%k1} {z} = xmm0[3,0,1,0]
@@ -2320,7 +2320,7 @@ define <4 x i32> @test_masked_z_4xi32_perm_mask2(<4 x i32> %vec, <4 x i32> %mask
}
define <4 x i32> @test_4xi32_perm_mask3(<4 x i32> %vec) {
; CHECK-LABEL: test_4xi32_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,1,0,3]
; CHECK-NEXT: retq
%res = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 1, i32 1, i32 0, i32 3>
@@ -2328,7 +2328,7 @@ define <4 x i32> @test_4xi32_perm_mask3(<4 x i32> %vec) {
}
define <4 x i32> @test_masked_4xi32_perm_mask3(<4 x i32> %vec, <4 x i32> %vec2, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_4xi32_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %xmm3, %xmm2, %k1
; CHECK-NEXT: vpshufd {{.*#+}} xmm1 {%k1} = xmm0[1,1,0,3]
@@ -2342,7 +2342,7 @@ define <4 x i32> @test_masked_4xi32_perm_mask3(<4 x i32> %vec, <4 x i32> %vec2,
define <4 x i32> @test_masked_z_4xi32_perm_mask3(<4 x i32> %vec, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_4xi32_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %xmm2, %xmm1, %k1
; CHECK-NEXT: vpshufd {{.*#+}} xmm0 {%k1} {z} = xmm0[1,1,0,3]
@@ -2354,7 +2354,7 @@ define <4 x i32> @test_masked_z_4xi32_perm_mask3(<4 x i32> %vec, <4 x i32> %mask
}
define <4 x i32> @test_4xi32_perm_mem_mask0(<4 x i32>* %vp) {
; CHECK-LABEL: test_4xi32_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = mem[0,1,3,3]
; CHECK-NEXT: retq
%vec = load <4 x i32>, <4 x i32>* %vp
@@ -2363,7 +2363,7 @@ define <4 x i32> @test_4xi32_perm_mem_mask0(<4 x i32>* %vp) {
}
define <4 x i32> @test_masked_4xi32_perm_mem_mask0(<4 x i32>* %vp, <4 x i32> %vec2, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_4xi32_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %xmm2, %xmm1, %k1
; CHECK-NEXT: vpshufd {{.*#+}} xmm0 {%k1} = mem[0,1,3,3]
@@ -2377,7 +2377,7 @@ define <4 x i32> @test_masked_4xi32_perm_mem_mask0(<4 x i32>* %vp, <4 x i32> %ve
define <4 x i32> @test_masked_z_4xi32_perm_mem_mask0(<4 x i32>* %vp, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_4xi32_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k1
; CHECK-NEXT: vpshufd {{.*#+}} xmm0 {%k1} {z} = mem[0,1,3,3]
@@ -2391,7 +2391,7 @@ define <4 x i32> @test_masked_z_4xi32_perm_mem_mask0(<4 x i32>* %vp, <4 x i32> %
define <4 x i32> @test_masked_4xi32_perm_mem_mask1(<4 x i32>* %vp, <4 x i32> %vec2, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_4xi32_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %xmm2, %xmm1, %k1
; CHECK-NEXT: vpshufd {{.*#+}} xmm0 {%k1} = mem[2,2,3,1]
@@ -2405,7 +2405,7 @@ define <4 x i32> @test_masked_4xi32_perm_mem_mask1(<4 x i32>* %vp, <4 x i32> %ve
define <4 x i32> @test_masked_z_4xi32_perm_mem_mask1(<4 x i32>* %vp, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_4xi32_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k1
; CHECK-NEXT: vpshufd {{.*#+}} xmm0 {%k1} {z} = mem[2,2,3,1]
@@ -2419,7 +2419,7 @@ define <4 x i32> @test_masked_z_4xi32_perm_mem_mask1(<4 x i32>* %vp, <4 x i32> %
define <4 x i32> @test_masked_4xi32_perm_mem_mask2(<4 x i32>* %vp, <4 x i32> %vec2, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_4xi32_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %xmm2, %xmm1, %k1
; CHECK-NEXT: vpshufd {{.*#+}} xmm0 {%k1} = mem[0,3,0,1]
@@ -2433,7 +2433,7 @@ define <4 x i32> @test_masked_4xi32_perm_mem_mask2(<4 x i32>* %vp, <4 x i32> %ve
define <4 x i32> @test_masked_z_4xi32_perm_mem_mask2(<4 x i32>* %vp, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_4xi32_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k1
; CHECK-NEXT: vpshufd {{.*#+}} xmm0 {%k1} {z} = mem[0,3,0,1]
@@ -2447,7 +2447,7 @@ define <4 x i32> @test_masked_z_4xi32_perm_mem_mask2(<4 x i32>* %vp, <4 x i32> %
define <4 x i32> @test_4xi32_perm_mem_mask3(<4 x i32>* %vp) {
; CHECK-LABEL: test_4xi32_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = mem[1,0,1,0]
; CHECK-NEXT: retq
%vec = load <4 x i32>, <4 x i32>* %vp
@@ -2456,7 +2456,7 @@ define <4 x i32> @test_4xi32_perm_mem_mask3(<4 x i32>* %vp) {
}
define <4 x i32> @test_masked_4xi32_perm_mem_mask3(<4 x i32>* %vp, <4 x i32> %vec2, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_4xi32_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %xmm2, %xmm1, %k1
; CHECK-NEXT: vpshufd {{.*#+}} xmm0 {%k1} = mem[1,0,1,0]
@@ -2470,7 +2470,7 @@ define <4 x i32> @test_masked_4xi32_perm_mem_mask3(<4 x i32>* %vp, <4 x i32> %ve
define <4 x i32> @test_masked_z_4xi32_perm_mem_mask3(<4 x i32>* %vp, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_4xi32_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k1
; CHECK-NEXT: vpshufd {{.*#+}} xmm0 {%k1} {z} = mem[1,0,1,0]
@@ -2484,7 +2484,7 @@ define <4 x i32> @test_masked_z_4xi32_perm_mem_mask3(<4 x i32>* %vp, <4 x i32> %
define <8 x i32> @test_8xi32_perm_mask0(<8 x i32> %vec) {
; CHECK-LABEL: test_8xi32_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[2,3,1,0,6,7,5,4]
; CHECK-NEXT: retq
%res = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 2, i32 3, i32 1, i32 0, i32 6, i32 7, i32 5, i32 4>
@@ -2492,7 +2492,7 @@ define <8 x i32> @test_8xi32_perm_mask0(<8 x i32> %vec) {
}
define <8 x i32> @test_masked_8xi32_perm_mask0(<8 x i32> %vec, <8 x i32> %vec2, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_8xi32_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; CHECK-NEXT: vpshufd {{.*#+}} ymm1 {%k1} = ymm0[2,3,1,0,6,7,5,4]
@@ -2506,7 +2506,7 @@ define <8 x i32> @test_masked_8xi32_perm_mask0(<8 x i32> %vec, <8 x i32> %vec2,
define <8 x i32> @test_masked_z_8xi32_perm_mask0(<8 x i32> %vec, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_8xi32_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; CHECK-NEXT: vpshufd {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3,1,0,6,7,5,4]
@@ -2518,7 +2518,7 @@ define <8 x i32> @test_masked_z_8xi32_perm_mask0(<8 x i32> %vec, <8 x i32> %mask
}
define <8 x i32> @test_masked_8xi32_perm_mask1(<8 x i32> %vec, <8 x i32> %vec2, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_8xi32_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; CHECK-NEXT: vpshufd {{.*#+}} ymm1 {%k1} = ymm0[0,3,3,3,4,7,7,7]
@@ -2532,7 +2532,7 @@ define <8 x i32> @test_masked_8xi32_perm_mask1(<8 x i32> %vec, <8 x i32> %vec2,
define <8 x i32> @test_masked_z_8xi32_perm_mask1(<8 x i32> %vec, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_8xi32_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; CHECK-NEXT: vpshufd {{.*#+}} ymm0 {%k1} {z} = ymm0[0,3,3,3,4,7,7,7]
@@ -2544,7 +2544,7 @@ define <8 x i32> @test_masked_z_8xi32_perm_mask1(<8 x i32> %vec, <8 x i32> %mask
}
define <8 x i32> @test_masked_8xi32_perm_mask2(<8 x i32> %vec, <8 x i32> %vec2, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_8xi32_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; CHECK-NEXT: vpshufd {{.*#+}} ymm1 {%k1} = ymm0[1,2,0,3,5,6,4,7]
@@ -2558,7 +2558,7 @@ define <8 x i32> @test_masked_8xi32_perm_mask2(<8 x i32> %vec, <8 x i32> %vec2,
define <8 x i32> @test_masked_z_8xi32_perm_mask2(<8 x i32> %vec, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_8xi32_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; CHECK-NEXT: vpshufd {{.*#+}} ymm0 {%k1} {z} = ymm0[1,2,0,3,5,6,4,7]
@@ -2570,7 +2570,7 @@ define <8 x i32> @test_masked_z_8xi32_perm_mask2(<8 x i32> %vec, <8 x i32> %mask
}
define <8 x i32> @test_8xi32_perm_mask3(<8 x i32> %vec) {
; CHECK-LABEL: test_8xi32_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,3,1,0,5,7,5,4]
; CHECK-NEXT: retq
%res = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 1, i32 3, i32 1, i32 0, i32 5, i32 7, i32 5, i32 4>
@@ -2578,7 +2578,7 @@ define <8 x i32> @test_8xi32_perm_mask3(<8 x i32> %vec) {
}
define <8 x i32> @test_masked_8xi32_perm_mask3(<8 x i32> %vec, <8 x i32> %vec2, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_8xi32_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %ymm3, %ymm2, %k1
; CHECK-NEXT: vpshufd {{.*#+}} ymm1 {%k1} = ymm0[1,3,1,0,5,7,5,4]
@@ -2592,7 +2592,7 @@ define <8 x i32> @test_masked_8xi32_perm_mask3(<8 x i32> %vec, <8 x i32> %vec2,
define <8 x i32> @test_masked_z_8xi32_perm_mask3(<8 x i32> %vec, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_8xi32_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; CHECK-NEXT: vpshufd {{.*#+}} ymm0 {%k1} {z} = ymm0[1,3,1,0,5,7,5,4]
@@ -2604,7 +2604,7 @@ define <8 x i32> @test_masked_z_8xi32_perm_mask3(<8 x i32> %vec, <8 x i32> %mask
}
define <8 x i32> @test_8xi32_perm_mem_mask0(<8 x i32>* %vp) {
; CHECK-LABEL: test_8xi32_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermilps {{.*#+}} ymm0 = mem[1,0,2,0,5,4,6,4]
; CHECK-NEXT: retq
%vec = load <8 x i32>, <8 x i32>* %vp
@@ -2613,7 +2613,7 @@ define <8 x i32> @test_8xi32_perm_mem_mask0(<8 x i32>* %vp) {
}
define <8 x i32> @test_masked_8xi32_perm_mem_mask0(<8 x i32>* %vp, <8 x i32> %vec2, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_8xi32_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; CHECK-NEXT: vpshufd {{.*#+}} ymm0 {%k1} = mem[1,0,2,0,5,4,6,4]
@@ -2627,7 +2627,7 @@ define <8 x i32> @test_masked_8xi32_perm_mem_mask0(<8 x i32>* %vp, <8 x i32> %ve
define <8 x i32> @test_masked_z_8xi32_perm_mem_mask0(<8 x i32>* %vp, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_8xi32_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k1
; CHECK-NEXT: vpshufd {{.*#+}} ymm0 {%k1} {z} = mem[1,0,2,0,5,4,6,4]
@@ -2641,7 +2641,7 @@ define <8 x i32> @test_masked_z_8xi32_perm_mem_mask0(<8 x i32>* %vp, <8 x i32> %
define <8 x i32> @test_masked_8xi32_perm_mem_mask1(<8 x i32>* %vp, <8 x i32> %vec2, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_8xi32_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; CHECK-NEXT: vpshufd {{.*#+}} ymm0 {%k1} = mem[0,3,2,0,4,7,6,4]
@@ -2655,7 +2655,7 @@ define <8 x i32> @test_masked_8xi32_perm_mem_mask1(<8 x i32>* %vp, <8 x i32> %ve
define <8 x i32> @test_masked_z_8xi32_perm_mem_mask1(<8 x i32>* %vp, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_8xi32_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k1
; CHECK-NEXT: vpshufd {{.*#+}} ymm0 {%k1} {z} = mem[0,3,2,0,4,7,6,4]
@@ -2669,7 +2669,7 @@ define <8 x i32> @test_masked_z_8xi32_perm_mem_mask1(<8 x i32>* %vp, <8 x i32> %
define <8 x i32> @test_masked_8xi32_perm_mem_mask2(<8 x i32>* %vp, <8 x i32> %vec2, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_8xi32_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; CHECK-NEXT: vpshufd {{.*#+}} ymm0 {%k1} = mem[3,2,3,1,7,6,7,5]
@@ -2683,7 +2683,7 @@ define <8 x i32> @test_masked_8xi32_perm_mem_mask2(<8 x i32>* %vp, <8 x i32> %ve
define <8 x i32> @test_masked_z_8xi32_perm_mem_mask2(<8 x i32>* %vp, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_8xi32_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k1
; CHECK-NEXT: vpshufd {{.*#+}} ymm0 {%k1} {z} = mem[3,2,3,1,7,6,7,5]
@@ -2697,7 +2697,7 @@ define <8 x i32> @test_masked_z_8xi32_perm_mem_mask2(<8 x i32>* %vp, <8 x i32> %
define <8 x i32> @test_8xi32_perm_mem_mask3(<8 x i32>* %vp) {
; CHECK-LABEL: test_8xi32_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermilps {{.*#+}} ymm0 = mem[3,2,0,0,7,6,4,4]
; CHECK-NEXT: retq
%vec = load <8 x i32>, <8 x i32>* %vp
@@ -2706,7 +2706,7 @@ define <8 x i32> @test_8xi32_perm_mem_mask3(<8 x i32>* %vp) {
}
define <8 x i32> @test_masked_8xi32_perm_mem_mask3(<8 x i32>* %vp, <8 x i32> %vec2, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_8xi32_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %ymm2, %ymm1, %k1
; CHECK-NEXT: vpshufd {{.*#+}} ymm0 {%k1} = mem[3,2,0,0,7,6,4,4]
@@ -2720,7 +2720,7 @@ define <8 x i32> @test_masked_8xi32_perm_mem_mask3(<8 x i32>* %vp, <8 x i32> %ve
define <8 x i32> @test_masked_z_8xi32_perm_mem_mask3(<8 x i32>* %vp, <8 x i32> %mask) {
; CHECK-LABEL: test_masked_z_8xi32_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k1
; CHECK-NEXT: vpshufd {{.*#+}} ymm0 {%k1} {z} = mem[3,2,0,0,7,6,4,4]
@@ -2734,7 +2734,7 @@ define <8 x i32> @test_masked_z_8xi32_perm_mem_mask3(<8 x i32>* %vp, <8 x i32> %
define <16 x i32> @test_16xi32_perm_mask0(<16 x i32> %vec) {
; CHECK-LABEL: test_16xi32_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermilps {{.*#+}} zmm0 = zmm0[3,1,3,0,7,5,7,4,11,9,11,8,15,13,15,12]
; CHECK-NEXT: retq
%res = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> <i32 3, i32 1, i32 3, i32 0, i32 7, i32 5, i32 7, i32 4, i32 11, i32 9, i32 11, i32 8, i32 15, i32 13, i32 15, i32 12>
@@ -2742,7 +2742,7 @@ define <16 x i32> @test_16xi32_perm_mask0(<16 x i32> %vec) {
}
define <16 x i32> @test_masked_16xi32_perm_mask0(<16 x i32> %vec, <16 x i32> %vec2, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_16xi32_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; CHECK-NEXT: vpshufd {{.*#+}} zmm1 {%k1} = zmm0[3,1,3,0,7,5,7,4,11,9,11,8,15,13,15,12]
@@ -2756,7 +2756,7 @@ define <16 x i32> @test_masked_16xi32_perm_mask0(<16 x i32> %vec, <16 x i32> %ve
define <16 x i32> @test_masked_z_16xi32_perm_mask0(<16 x i32> %vec, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_z_16xi32_perm_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpshufd {{.*#+}} zmm0 {%k1} {z} = zmm0[3,1,3,0,7,5,7,4,11,9,11,8,15,13,15,12]
@@ -2768,7 +2768,7 @@ define <16 x i32> @test_masked_z_16xi32_perm_mask0(<16 x i32> %vec, <16 x i32> %
}
define <16 x i32> @test_masked_16xi32_perm_mask1(<16 x i32> %vec, <16 x i32> %vec2, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_16xi32_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; CHECK-NEXT: vpshufd {{.*#+}} zmm1 {%k1} = zmm0[2,0,3,0,6,4,7,4,10,8,11,8,14,12,15,12]
@@ -2782,7 +2782,7 @@ define <16 x i32> @test_masked_16xi32_perm_mask1(<16 x i32> %vec, <16 x i32> %ve
define <16 x i32> @test_masked_z_16xi32_perm_mask1(<16 x i32> %vec, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_z_16xi32_perm_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpshufd {{.*#+}} zmm0 {%k1} {z} = zmm0[2,0,3,0,6,4,7,4,10,8,11,8,14,12,15,12]
@@ -2794,7 +2794,7 @@ define <16 x i32> @test_masked_z_16xi32_perm_mask1(<16 x i32> %vec, <16 x i32> %
}
define <16 x i32> @test_masked_16xi32_perm_mask2(<16 x i32> %vec, <16 x i32> %vec2, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_16xi32_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; CHECK-NEXT: vpshufd {{.*#+}} zmm1 {%k1} = zmm0[1,3,3,0,5,7,7,4,9,11,11,8,13,15,15,12]
@@ -2808,7 +2808,7 @@ define <16 x i32> @test_masked_16xi32_perm_mask2(<16 x i32> %vec, <16 x i32> %ve
define <16 x i32> @test_masked_z_16xi32_perm_mask2(<16 x i32> %vec, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_z_16xi32_perm_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpshufd {{.*#+}} zmm0 {%k1} {z} = zmm0[1,3,3,0,5,7,7,4,9,11,11,8,13,15,15,12]
@@ -2820,7 +2820,7 @@ define <16 x i32> @test_masked_z_16xi32_perm_mask2(<16 x i32> %vec, <16 x i32> %
}
define <16 x i32> @test_16xi32_perm_mask3(<16 x i32> %vec) {
; CHECK-LABEL: test_16xi32_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermilps {{.*#+}} zmm0 = zmm0[3,2,0,3,7,6,4,7,11,10,8,11,15,14,12,15]
; CHECK-NEXT: retq
%res = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> <i32 3, i32 2, i32 0, i32 3, i32 7, i32 6, i32 4, i32 7, i32 11, i32 10, i32 8, i32 11, i32 15, i32 14, i32 12, i32 15>
@@ -2828,7 +2828,7 @@ define <16 x i32> @test_16xi32_perm_mask3(<16 x i32> %vec) {
}
define <16 x i32> @test_masked_16xi32_perm_mask3(<16 x i32> %vec, <16 x i32> %vec2, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_16xi32_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqd %zmm3, %zmm2, %k1
; CHECK-NEXT: vpshufd {{.*#+}} zmm1 {%k1} = zmm0[3,2,0,3,7,6,4,7,11,10,8,11,15,14,12,15]
@@ -2842,7 +2842,7 @@ define <16 x i32> @test_masked_16xi32_perm_mask3(<16 x i32> %vec, <16 x i32> %ve
define <16 x i32> @test_masked_z_16xi32_perm_mask3(<16 x i32> %vec, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_z_16xi32_perm_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpshufd {{.*#+}} zmm0 {%k1} {z} = zmm0[3,2,0,3,7,6,4,7,11,10,8,11,15,14,12,15]
@@ -2854,7 +2854,7 @@ define <16 x i32> @test_masked_z_16xi32_perm_mask3(<16 x i32> %vec, <16 x i32> %
}
define <16 x i32> @test_16xi32_perm_mem_mask0(<16 x i32>* %vp) {
; CHECK-LABEL: test_16xi32_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermilps {{.*#+}} zmm0 = mem[1,0,1,3,5,4,5,7,9,8,9,11,13,12,13,15]
; CHECK-NEXT: retq
%vec = load <16 x i32>, <16 x i32>* %vp
@@ -2863,7 +2863,7 @@ define <16 x i32> @test_16xi32_perm_mem_mask0(<16 x i32>* %vp) {
}
define <16 x i32> @test_masked_16xi32_perm_mem_mask0(<16 x i32>* %vp, <16 x i32> %vec2, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_16xi32_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpshufd {{.*#+}} zmm0 {%k1} = mem[1,0,1,3,5,4,5,7,9,8,9,11,13,12,13,15]
@@ -2877,7 +2877,7 @@ define <16 x i32> @test_masked_16xi32_perm_mem_mask0(<16 x i32>* %vp, <16 x i32>
define <16 x i32> @test_masked_z_16xi32_perm_mem_mask0(<16 x i32>* %vp, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_z_16xi32_perm_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
; CHECK-NEXT: vpshufd {{.*#+}} zmm0 {%k1} {z} = mem[1,0,1,3,5,4,5,7,9,8,9,11,13,12,13,15]
@@ -2891,7 +2891,7 @@ define <16 x i32> @test_masked_z_16xi32_perm_mem_mask0(<16 x i32>* %vp, <16 x i3
define <16 x i32> @test_masked_16xi32_perm_mem_mask1(<16 x i32>* %vp, <16 x i32> %vec2, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_16xi32_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpshufd {{.*#+}} zmm0 {%k1} = mem[1,0,0,2,5,4,4,6,9,8,8,10,13,12,12,14]
@@ -2905,7 +2905,7 @@ define <16 x i32> @test_masked_16xi32_perm_mem_mask1(<16 x i32>* %vp, <16 x i32>
define <16 x i32> @test_masked_z_16xi32_perm_mem_mask1(<16 x i32>* %vp, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_z_16xi32_perm_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
; CHECK-NEXT: vpshufd {{.*#+}} zmm0 {%k1} {z} = mem[1,0,0,2,5,4,4,6,9,8,8,10,13,12,12,14]
@@ -2919,7 +2919,7 @@ define <16 x i32> @test_masked_z_16xi32_perm_mem_mask1(<16 x i32>* %vp, <16 x i3
define <16 x i32> @test_masked_16xi32_perm_mem_mask2(<16 x i32>* %vp, <16 x i32> %vec2, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_16xi32_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpshufd {{.*#+}} zmm0 {%k1} = mem[2,0,1,2,6,4,5,6,10,8,9,10,14,12,13,14]
@@ -2933,7 +2933,7 @@ define <16 x i32> @test_masked_16xi32_perm_mem_mask2(<16 x i32>* %vp, <16 x i32>
define <16 x i32> @test_masked_z_16xi32_perm_mem_mask2(<16 x i32>* %vp, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_z_16xi32_perm_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
; CHECK-NEXT: vpshufd {{.*#+}} zmm0 {%k1} {z} = mem[2,0,1,2,6,4,5,6,10,8,9,10,14,12,13,14]
@@ -2947,7 +2947,7 @@ define <16 x i32> @test_masked_z_16xi32_perm_mem_mask2(<16 x i32>* %vp, <16 x i3
define <16 x i32> @test_16xi32_perm_mem_mask3(<16 x i32>* %vp) {
; CHECK-LABEL: test_16xi32_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermilps {{.*#+}} zmm0 = mem[3,1,1,1,7,5,5,5,11,9,9,9,15,13,13,13]
; CHECK-NEXT: retq
%vec = load <16 x i32>, <16 x i32>* %vp
@@ -2956,7 +2956,7 @@ define <16 x i32> @test_16xi32_perm_mem_mask3(<16 x i32>* %vp) {
}
define <16 x i32> @test_masked_16xi32_perm_mem_mask3(<16 x i32>* %vp, <16 x i32> %vec2, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_16xi32_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpshufd {{.*#+}} zmm0 {%k1} = mem[3,1,1,1,7,5,5,5,11,9,9,9,15,13,13,13]
@@ -2970,7 +2970,7 @@ define <16 x i32> @test_masked_16xi32_perm_mem_mask3(<16 x i32>* %vp, <16 x i32>
define <16 x i32> @test_masked_z_16xi32_perm_mem_mask3(<16 x i32>* %vp, <16 x i32> %mask) {
; CHECK-LABEL: test_masked_z_16xi32_perm_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
; CHECK-NEXT: vpshufd {{.*#+}} zmm0 {%k1} {z} = mem[3,1,1,1,7,5,5,5,11,9,9,9,15,13,13,13]
diff --git a/test/CodeGen/X86/avx512-shuffles/unpack.ll b/test/CodeGen/X86/avx512-shuffles/unpack.ll
index 2a37cd064f3..5eca7f0ceba 100644
--- a/test/CodeGen/X86/avx512-shuffles/unpack.ll
+++ b/test/CodeGen/X86/avx512-shuffles/unpack.ll
@@ -3,7 +3,7 @@
define <4 x float> @test_4xfloat_unpack_low_mask0(<4 x float> %vec1, <4 x float> %vec2) {
; CHECK-LABEL: test_4xfloat_unpack_low_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-NEXT: retq
%res = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
@@ -11,7 +11,7 @@ define <4 x float> @test_4xfloat_unpack_low_mask0(<4 x float> %vec1, <4 x float>
}
define <4 x float> @test_4xfloat_masked_unpack_low_mask0(<4 x float> %vec1, <4 x float> %vec2, <4 x float> %vec3, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_masked_unpack_low_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %xmm4, %xmm3, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} xmm2 {%k1} = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
@@ -25,7 +25,7 @@ define <4 x float> @test_4xfloat_masked_unpack_low_mask0(<4 x float> %vec1, <4 x
define <4 x float> @test_4xfloat_zero_masked_unpack_low_mask0(<4 x float> %vec1, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_zero_masked_unpack_low_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %xmm3, %xmm2, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} xmm0 {%k1} {z} = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
@@ -37,7 +37,7 @@ define <4 x float> @test_4xfloat_zero_masked_unpack_low_mask0(<4 x float> %vec1,
}
define <4 x float> @test_4xfloat_masked_unpack_low_mask1(<4 x float> %vec1, <4 x float> %vec2, <4 x float> %vec3, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_masked_unpack_low_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %xmm4, %xmm3, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} xmm2 {%k1} = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
@@ -51,7 +51,7 @@ define <4 x float> @test_4xfloat_masked_unpack_low_mask1(<4 x float> %vec1, <4 x
define <4 x float> @test_4xfloat_zero_masked_unpack_low_mask1(<4 x float> %vec1, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_zero_masked_unpack_low_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %xmm3, %xmm2, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} xmm0 {%k1} {z} = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
@@ -63,7 +63,7 @@ define <4 x float> @test_4xfloat_zero_masked_unpack_low_mask1(<4 x float> %vec1,
}
define <4 x float> @test_4xfloat_masked_unpack_low_mask2(<4 x float> %vec1, <4 x float> %vec2, <4 x float> %vec3, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_masked_unpack_low_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %xmm4, %xmm3, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} xmm2 {%k1} = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
@@ -77,7 +77,7 @@ define <4 x float> @test_4xfloat_masked_unpack_low_mask2(<4 x float> %vec1, <4 x
define <4 x float> @test_4xfloat_zero_masked_unpack_low_mask2(<4 x float> %vec1, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_zero_masked_unpack_low_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %xmm3, %xmm2, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} xmm0 {%k1} {z} = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
@@ -89,7 +89,7 @@ define <4 x float> @test_4xfloat_zero_masked_unpack_low_mask2(<4 x float> %vec1,
}
define <4 x float> @test_4xfloat_unpack_low_mask3(<4 x float> %vec1, <4 x float> %vec2) {
; CHECK-LABEL: test_4xfloat_unpack_low_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-NEXT: retq
%res = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
@@ -97,7 +97,7 @@ define <4 x float> @test_4xfloat_unpack_low_mask3(<4 x float> %vec1, <4 x float>
}
define <4 x float> @test_4xfloat_masked_unpack_low_mask3(<4 x float> %vec1, <4 x float> %vec2, <4 x float> %vec3, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_masked_unpack_low_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %xmm4, %xmm3, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} xmm2 {%k1} = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
@@ -111,7 +111,7 @@ define <4 x float> @test_4xfloat_masked_unpack_low_mask3(<4 x float> %vec1, <4 x
define <4 x float> @test_4xfloat_zero_masked_unpack_low_mask3(<4 x float> %vec1, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_zero_masked_unpack_low_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %xmm3, %xmm2, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} xmm0 {%k1} {z} = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
@@ -123,7 +123,7 @@ define <4 x float> @test_4xfloat_zero_masked_unpack_low_mask3(<4 x float> %vec1,
}
define <4 x float> @test_4xfloat_unpack_low_mem_mask0(<4 x float> %vec1, <4 x float>* %vec2p) {
; CHECK-LABEL: test_4xfloat_unpack_low_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
; CHECK-NEXT: retq
%vec2 = load <4 x float>, <4 x float>* %vec2p
@@ -132,7 +132,7 @@ define <4 x float> @test_4xfloat_unpack_low_mem_mask0(<4 x float> %vec1, <4 x fl
}
define <4 x float> @test_4xfloat_masked_unpack_low_mem_mask0(<4 x float> %vec1, <4 x float>* %vec2p, <4 x float> %vec3, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_masked_unpack_low_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %xmm3, %xmm2, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} xmm1 {%k1} = xmm0[0],mem[0],xmm0[1],mem[1]
@@ -147,7 +147,7 @@ define <4 x float> @test_4xfloat_masked_unpack_low_mem_mask0(<4 x float> %vec1,
define <4 x float> @test_4xfloat_zero_masked_unpack_low_mem_mask0(<4 x float> %vec1, <4 x float>* %vec2p, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_zero_masked_unpack_low_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} xmm0 {%k1} {z} = xmm0[0],mem[0],xmm0[1],mem[1]
@@ -161,7 +161,7 @@ define <4 x float> @test_4xfloat_zero_masked_unpack_low_mem_mask0(<4 x float> %v
define <4 x float> @test_4xfloat_masked_unpack_low_mem_mask1(<4 x float> %vec1, <4 x float>* %vec2p, <4 x float> %vec3, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_masked_unpack_low_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %xmm3, %xmm2, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} xmm1 {%k1} = xmm0[0],mem[0],xmm0[1],mem[1]
@@ -176,7 +176,7 @@ define <4 x float> @test_4xfloat_masked_unpack_low_mem_mask1(<4 x float> %vec1,
define <4 x float> @test_4xfloat_zero_masked_unpack_low_mem_mask1(<4 x float> %vec1, <4 x float>* %vec2p, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_zero_masked_unpack_low_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} xmm0 {%k1} {z} = xmm0[0],mem[0],xmm0[1],mem[1]
@@ -190,7 +190,7 @@ define <4 x float> @test_4xfloat_zero_masked_unpack_low_mem_mask1(<4 x float> %v
define <4 x float> @test_4xfloat_masked_unpack_low_mem_mask2(<4 x float> %vec1, <4 x float>* %vec2p, <4 x float> %vec3, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_masked_unpack_low_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %xmm3, %xmm2, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} xmm1 {%k1} = xmm0[0],mem[0],xmm0[1],mem[1]
@@ -205,7 +205,7 @@ define <4 x float> @test_4xfloat_masked_unpack_low_mem_mask2(<4 x float> %vec1,
define <4 x float> @test_4xfloat_zero_masked_unpack_low_mem_mask2(<4 x float> %vec1, <4 x float>* %vec2p, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_zero_masked_unpack_low_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} xmm0 {%k1} {z} = xmm0[0],mem[0],xmm0[1],mem[1]
@@ -219,7 +219,7 @@ define <4 x float> @test_4xfloat_zero_masked_unpack_low_mem_mask2(<4 x float> %v
define <4 x float> @test_4xfloat_unpack_low_mem_mask3(<4 x float> %vec1, <4 x float>* %vec2p) {
; CHECK-LABEL: test_4xfloat_unpack_low_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
; CHECK-NEXT: retq
%vec2 = load <4 x float>, <4 x float>* %vec2p
@@ -228,7 +228,7 @@ define <4 x float> @test_4xfloat_unpack_low_mem_mask3(<4 x float> %vec1, <4 x fl
}
define <4 x float> @test_4xfloat_masked_unpack_low_mem_mask3(<4 x float> %vec1, <4 x float>* %vec2p, <4 x float> %vec3, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_masked_unpack_low_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %xmm3, %xmm2, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} xmm1 {%k1} = xmm0[0],mem[0],xmm0[1],mem[1]
@@ -243,7 +243,7 @@ define <4 x float> @test_4xfloat_masked_unpack_low_mem_mask3(<4 x float> %vec1,
define <4 x float> @test_4xfloat_zero_masked_unpack_low_mem_mask3(<4 x float> %vec1, <4 x float>* %vec2p, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_zero_masked_unpack_low_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} xmm0 {%k1} {z} = xmm0[0],mem[0],xmm0[1],mem[1]
@@ -257,7 +257,7 @@ define <4 x float> @test_4xfloat_zero_masked_unpack_low_mem_mask3(<4 x float> %v
define <8 x float> @test_8xfloat_unpack_low_mask0(<8 x float> %vec1, <8 x float> %vec2) {
; CHECK-LABEL: test_8xfloat_unpack_low_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
; CHECK-NEXT: retq
%res = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
@@ -265,7 +265,7 @@ define <8 x float> @test_8xfloat_unpack_low_mask0(<8 x float> %vec1, <8 x float>
}
define <8 x float> @test_8xfloat_masked_unpack_low_mask0(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %vec3, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_masked_unpack_low_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %ymm4, %ymm3, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} ymm2 {%k1} = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
@@ -279,7 +279,7 @@ define <8 x float> @test_8xfloat_masked_unpack_low_mask0(<8 x float> %vec1, <8 x
define <8 x float> @test_8xfloat_zero_masked_unpack_low_mask0(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_zero_masked_unpack_low_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} ymm0 {%k1} {z} = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
@@ -291,7 +291,7 @@ define <8 x float> @test_8xfloat_zero_masked_unpack_low_mask0(<8 x float> %vec1,
}
define <8 x float> @test_8xfloat_masked_unpack_low_mask1(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %vec3, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_masked_unpack_low_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %ymm4, %ymm3, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} ymm2 {%k1} = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
@@ -305,7 +305,7 @@ define <8 x float> @test_8xfloat_masked_unpack_low_mask1(<8 x float> %vec1, <8 x
define <8 x float> @test_8xfloat_zero_masked_unpack_low_mask1(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_zero_masked_unpack_low_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} ymm0 {%k1} {z} = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
@@ -317,7 +317,7 @@ define <8 x float> @test_8xfloat_zero_masked_unpack_low_mask1(<8 x float> %vec1,
}
define <8 x float> @test_8xfloat_masked_unpack_low_mask2(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %vec3, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_masked_unpack_low_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %ymm4, %ymm3, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} ymm2 {%k1} = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
@@ -331,7 +331,7 @@ define <8 x float> @test_8xfloat_masked_unpack_low_mask2(<8 x float> %vec1, <8 x
define <8 x float> @test_8xfloat_zero_masked_unpack_low_mask2(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_zero_masked_unpack_low_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} ymm0 {%k1} {z} = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
@@ -343,7 +343,7 @@ define <8 x float> @test_8xfloat_zero_masked_unpack_low_mask2(<8 x float> %vec1,
}
define <8 x float> @test_8xfloat_unpack_low_mask3(<8 x float> %vec1, <8 x float> %vec2) {
; CHECK-LABEL: test_8xfloat_unpack_low_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
; CHECK-NEXT: retq
%res = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
@@ -351,7 +351,7 @@ define <8 x float> @test_8xfloat_unpack_low_mask3(<8 x float> %vec1, <8 x float>
}
define <8 x float> @test_8xfloat_masked_unpack_low_mask3(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %vec3, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_masked_unpack_low_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %ymm4, %ymm3, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} ymm2 {%k1} = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
@@ -365,7 +365,7 @@ define <8 x float> @test_8xfloat_masked_unpack_low_mask3(<8 x float> %vec1, <8 x
define <8 x float> @test_8xfloat_zero_masked_unpack_low_mask3(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_zero_masked_unpack_low_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} ymm0 {%k1} {z} = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
@@ -377,7 +377,7 @@ define <8 x float> @test_8xfloat_zero_masked_unpack_low_mask3(<8 x float> %vec1,
}
define <8 x float> @test_8xfloat_unpack_low_mem_mask0(<8 x float> %vec1, <8 x float>* %vec2p) {
; CHECK-LABEL: test_8xfloat_unpack_low_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
; CHECK-NEXT: retq
%vec2 = load <8 x float>, <8 x float>* %vec2p
@@ -386,7 +386,7 @@ define <8 x float> @test_8xfloat_unpack_low_mem_mask0(<8 x float> %vec1, <8 x fl
}
define <8 x float> @test_8xfloat_masked_unpack_low_mem_mask0(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %vec3, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_masked_unpack_low_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} ymm1 {%k1} = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
@@ -401,7 +401,7 @@ define <8 x float> @test_8xfloat_masked_unpack_low_mem_mask0(<8 x float> %vec1,
define <8 x float> @test_8xfloat_zero_masked_unpack_low_mem_mask0(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_zero_masked_unpack_low_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} ymm0 {%k1} {z} = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
@@ -415,7 +415,7 @@ define <8 x float> @test_8xfloat_zero_masked_unpack_low_mem_mask0(<8 x float> %v
define <8 x float> @test_8xfloat_masked_unpack_low_mem_mask1(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %vec3, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_masked_unpack_low_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} ymm1 {%k1} = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
@@ -430,7 +430,7 @@ define <8 x float> @test_8xfloat_masked_unpack_low_mem_mask1(<8 x float> %vec1,
define <8 x float> @test_8xfloat_zero_masked_unpack_low_mem_mask1(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_zero_masked_unpack_low_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} ymm0 {%k1} {z} = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
@@ -444,7 +444,7 @@ define <8 x float> @test_8xfloat_zero_masked_unpack_low_mem_mask1(<8 x float> %v
define <8 x float> @test_8xfloat_masked_unpack_low_mem_mask2(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %vec3, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_masked_unpack_low_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} ymm1 {%k1} = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
@@ -459,7 +459,7 @@ define <8 x float> @test_8xfloat_masked_unpack_low_mem_mask2(<8 x float> %vec1,
define <8 x float> @test_8xfloat_zero_masked_unpack_low_mem_mask2(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_zero_masked_unpack_low_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} ymm0 {%k1} {z} = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
@@ -473,7 +473,7 @@ define <8 x float> @test_8xfloat_zero_masked_unpack_low_mem_mask2(<8 x float> %v
define <8 x float> @test_8xfloat_unpack_low_mem_mask3(<8 x float> %vec1, <8 x float>* %vec2p) {
; CHECK-LABEL: test_8xfloat_unpack_low_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
; CHECK-NEXT: retq
%vec2 = load <8 x float>, <8 x float>* %vec2p
@@ -482,7 +482,7 @@ define <8 x float> @test_8xfloat_unpack_low_mem_mask3(<8 x float> %vec1, <8 x fl
}
define <8 x float> @test_8xfloat_masked_unpack_low_mem_mask3(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %vec3, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_masked_unpack_low_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} ymm1 {%k1} = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
@@ -497,7 +497,7 @@ define <8 x float> @test_8xfloat_masked_unpack_low_mem_mask3(<8 x float> %vec1,
define <8 x float> @test_8xfloat_zero_masked_unpack_low_mem_mask3(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_zero_masked_unpack_low_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} ymm0 {%k1} {z} = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
@@ -511,7 +511,7 @@ define <8 x float> @test_8xfloat_zero_masked_unpack_low_mem_mask3(<8 x float> %v
define <16 x float> @test_16xfloat_unpack_low_mask0(<16 x float> %vec1, <16 x float> %vec2) {
; CHECK-LABEL: test_16xfloat_unpack_low_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpcklps {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
; CHECK-NEXT: retq
%res = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 4, i32 20, i32 5, i32 21, i32 8, i32 24, i32 9, i32 25, i32 12, i32 28, i32 13, i32 29>
@@ -519,7 +519,7 @@ define <16 x float> @test_16xfloat_unpack_low_mask0(<16 x float> %vec1, <16 x fl
}
define <16 x float> @test_16xfloat_masked_unpack_low_mask0(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %vec3, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_masked_unpack_low_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %zmm4, %zmm3, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
@@ -533,7 +533,7 @@ define <16 x float> @test_16xfloat_masked_unpack_low_mask0(<16 x float> %vec1, <
define <16 x float> @test_16xfloat_zero_masked_unpack_low_mask0(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_zero_masked_unpack_low_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
@@ -545,7 +545,7 @@ define <16 x float> @test_16xfloat_zero_masked_unpack_low_mask0(<16 x float> %ve
}
define <16 x float> @test_16xfloat_masked_unpack_low_mask1(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %vec3, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_masked_unpack_low_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %zmm4, %zmm3, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
@@ -559,7 +559,7 @@ define <16 x float> @test_16xfloat_masked_unpack_low_mask1(<16 x float> %vec1, <
define <16 x float> @test_16xfloat_zero_masked_unpack_low_mask1(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_zero_masked_unpack_low_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
@@ -571,7 +571,7 @@ define <16 x float> @test_16xfloat_zero_masked_unpack_low_mask1(<16 x float> %ve
}
define <16 x float> @test_16xfloat_masked_unpack_low_mask2(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %vec3, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_masked_unpack_low_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %zmm4, %zmm3, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
@@ -585,7 +585,7 @@ define <16 x float> @test_16xfloat_masked_unpack_low_mask2(<16 x float> %vec1, <
define <16 x float> @test_16xfloat_zero_masked_unpack_low_mask2(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_zero_masked_unpack_low_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
@@ -597,7 +597,7 @@ define <16 x float> @test_16xfloat_zero_masked_unpack_low_mask2(<16 x float> %ve
}
define <16 x float> @test_16xfloat_unpack_low_mask3(<16 x float> %vec1, <16 x float> %vec2) {
; CHECK-LABEL: test_16xfloat_unpack_low_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpcklps {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
; CHECK-NEXT: retq
%res = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 4, i32 20, i32 5, i32 21, i32 8, i32 24, i32 9, i32 25, i32 12, i32 28, i32 13, i32 29>
@@ -605,7 +605,7 @@ define <16 x float> @test_16xfloat_unpack_low_mask3(<16 x float> %vec1, <16 x fl
}
define <16 x float> @test_16xfloat_masked_unpack_low_mask3(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %vec3, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_masked_unpack_low_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %zmm4, %zmm3, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
@@ -619,7 +619,7 @@ define <16 x float> @test_16xfloat_masked_unpack_low_mask3(<16 x float> %vec1, <
define <16 x float> @test_16xfloat_zero_masked_unpack_low_mask3(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_zero_masked_unpack_low_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
@@ -631,7 +631,7 @@ define <16 x float> @test_16xfloat_zero_masked_unpack_low_mask3(<16 x float> %ve
}
define <16 x float> @test_16xfloat_unpack_low_mem_mask0(<16 x float> %vec1, <16 x float>* %vec2p) {
; CHECK-LABEL: test_16xfloat_unpack_low_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpcklps {{.*#+}} zmm0 = zmm0[0],mem[0],zmm0[1],mem[1],zmm0[4],mem[4],zmm0[5],mem[5],zmm0[8],mem[8],zmm0[9],mem[9],zmm0[12],mem[12],zmm0[13],mem[13]
; CHECK-NEXT: retq
%vec2 = load <16 x float>, <16 x float>* %vec2p
@@ -640,7 +640,7 @@ define <16 x float> @test_16xfloat_unpack_low_mem_mask0(<16 x float> %vec1, <16
}
define <16 x float> @test_16xfloat_masked_unpack_low_mem_mask0(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %vec3, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_masked_unpack_low_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} zmm1 {%k1} = zmm0[0],mem[0],zmm0[1],mem[1],zmm0[4],mem[4],zmm0[5],mem[5],zmm0[8],mem[8],zmm0[9],mem[9],zmm0[12],mem[12],zmm0[13],mem[13]
@@ -655,7 +655,7 @@ define <16 x float> @test_16xfloat_masked_unpack_low_mem_mask0(<16 x float> %vec
define <16 x float> @test_16xfloat_zero_masked_unpack_low_mem_mask0(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_zero_masked_unpack_low_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} zmm0 {%k1} {z} = zmm0[0],mem[0],zmm0[1],mem[1],zmm0[4],mem[4],zmm0[5],mem[5],zmm0[8],mem[8],zmm0[9],mem[9],zmm0[12],mem[12],zmm0[13],mem[13]
@@ -669,7 +669,7 @@ define <16 x float> @test_16xfloat_zero_masked_unpack_low_mem_mask0(<16 x float>
define <16 x float> @test_16xfloat_masked_unpack_low_mem_mask1(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %vec3, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_masked_unpack_low_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} zmm1 {%k1} = zmm0[0],mem[0],zmm0[1],mem[1],zmm0[4],mem[4],zmm0[5],mem[5],zmm0[8],mem[8],zmm0[9],mem[9],zmm0[12],mem[12],zmm0[13],mem[13]
@@ -684,7 +684,7 @@ define <16 x float> @test_16xfloat_masked_unpack_low_mem_mask1(<16 x float> %vec
define <16 x float> @test_16xfloat_zero_masked_unpack_low_mem_mask1(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_zero_masked_unpack_low_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} zmm0 {%k1} {z} = zmm0[0],mem[0],zmm0[1],mem[1],zmm0[4],mem[4],zmm0[5],mem[5],zmm0[8],mem[8],zmm0[9],mem[9],zmm0[12],mem[12],zmm0[13],mem[13]
@@ -698,7 +698,7 @@ define <16 x float> @test_16xfloat_zero_masked_unpack_low_mem_mask1(<16 x float>
define <16 x float> @test_16xfloat_masked_unpack_low_mem_mask2(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %vec3, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_masked_unpack_low_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} zmm1 {%k1} = zmm0[0],mem[0],zmm0[1],mem[1],zmm0[4],mem[4],zmm0[5],mem[5],zmm0[8],mem[8],zmm0[9],mem[9],zmm0[12],mem[12],zmm0[13],mem[13]
@@ -713,7 +713,7 @@ define <16 x float> @test_16xfloat_masked_unpack_low_mem_mask2(<16 x float> %vec
define <16 x float> @test_16xfloat_zero_masked_unpack_low_mem_mask2(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_zero_masked_unpack_low_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} zmm0 {%k1} {z} = zmm0[0],mem[0],zmm0[1],mem[1],zmm0[4],mem[4],zmm0[5],mem[5],zmm0[8],mem[8],zmm0[9],mem[9],zmm0[12],mem[12],zmm0[13],mem[13]
@@ -727,7 +727,7 @@ define <16 x float> @test_16xfloat_zero_masked_unpack_low_mem_mask2(<16 x float>
define <16 x float> @test_16xfloat_unpack_low_mem_mask3(<16 x float> %vec1, <16 x float>* %vec2p) {
; CHECK-LABEL: test_16xfloat_unpack_low_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpcklps {{.*#+}} zmm0 = zmm0[0],mem[0],zmm0[1],mem[1],zmm0[4],mem[4],zmm0[5],mem[5],zmm0[8],mem[8],zmm0[9],mem[9],zmm0[12],mem[12],zmm0[13],mem[13]
; CHECK-NEXT: retq
%vec2 = load <16 x float>, <16 x float>* %vec2p
@@ -736,7 +736,7 @@ define <16 x float> @test_16xfloat_unpack_low_mem_mask3(<16 x float> %vec1, <16
}
define <16 x float> @test_16xfloat_masked_unpack_low_mem_mask3(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %vec3, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_masked_unpack_low_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} zmm1 {%k1} = zmm0[0],mem[0],zmm0[1],mem[1],zmm0[4],mem[4],zmm0[5],mem[5],zmm0[8],mem[8],zmm0[9],mem[9],zmm0[12],mem[12],zmm0[13],mem[13]
@@ -751,7 +751,7 @@ define <16 x float> @test_16xfloat_masked_unpack_low_mem_mask3(<16 x float> %vec
define <16 x float> @test_16xfloat_zero_masked_unpack_low_mem_mask3(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_zero_masked_unpack_low_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vunpcklps {{.*#+}} zmm0 {%k1} {z} = zmm0[0],mem[0],zmm0[1],mem[1],zmm0[4],mem[4],zmm0[5],mem[5],zmm0[8],mem[8],zmm0[9],mem[9],zmm0[12],mem[12],zmm0[13],mem[13]
@@ -765,7 +765,7 @@ define <16 x float> @test_16xfloat_zero_masked_unpack_low_mem_mask3(<16 x float>
define <2 x double> @test_2xdouble_unpack_low_mask0(<2 x double> %vec1, <2 x double> %vec2) {
; CHECK-LABEL: test_2xdouble_unpack_low_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; CHECK-NEXT: retq
%res = shufflevector <2 x double> %vec1, <2 x double> %vec2, <2 x i32> <i32 0, i32 2>
@@ -773,7 +773,7 @@ define <2 x double> @test_2xdouble_unpack_low_mask0(<2 x double> %vec1, <2 x dou
}
define <2 x double> @test_2xdouble_masked_unpack_low_mask0(<2 x double> %vec1, <2 x double> %vec2, <2 x double> %vec3, <2 x double> %mask) {
; CHECK-LABEL: test_2xdouble_masked_unpack_low_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqpd %xmm4, %xmm3, %k1
; CHECK-NEXT: vunpcklpd {{.*#+}} xmm2 {%k1} = xmm0[0],xmm1[0]
@@ -787,7 +787,7 @@ define <2 x double> @test_2xdouble_masked_unpack_low_mask0(<2 x double> %vec1, <
define <2 x double> @test_2xdouble_zero_masked_unpack_low_mask0(<2 x double> %vec1, <2 x double> %vec2, <2 x double> %mask) {
; CHECK-LABEL: test_2xdouble_zero_masked_unpack_low_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %xmm3, %xmm2, %k1
; CHECK-NEXT: vunpcklpd {{.*#+}} xmm0 {%k1} {z} = xmm0[0],xmm1[0]
@@ -799,7 +799,7 @@ define <2 x double> @test_2xdouble_zero_masked_unpack_low_mask0(<2 x double> %ve
}
define <2 x double> @test_2xdouble_masked_unpack_low_mask1(<2 x double> %vec1, <2 x double> %vec2, <2 x double> %vec3, <2 x double> %mask) {
; CHECK-LABEL: test_2xdouble_masked_unpack_low_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqpd %xmm4, %xmm3, %k1
; CHECK-NEXT: vunpcklpd {{.*#+}} xmm2 {%k1} = xmm0[0],xmm1[0]
@@ -813,7 +813,7 @@ define <2 x double> @test_2xdouble_masked_unpack_low_mask1(<2 x double> %vec1, <
define <2 x double> @test_2xdouble_zero_masked_unpack_low_mask1(<2 x double> %vec1, <2 x double> %vec2, <2 x double> %mask) {
; CHECK-LABEL: test_2xdouble_zero_masked_unpack_low_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %xmm3, %xmm2, %k1
; CHECK-NEXT: vunpcklpd {{.*#+}} xmm0 {%k1} {z} = xmm0[0],xmm1[0]
@@ -825,7 +825,7 @@ define <2 x double> @test_2xdouble_zero_masked_unpack_low_mask1(<2 x double> %ve
}
define <2 x double> @test_2xdouble_unpack_low_mem_mask0(<2 x double> %vec1, <2 x double>* %vec2p) {
; CHECK-LABEL: test_2xdouble_unpack_low_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: retq
%vec2 = load <2 x double>, <2 x double>* %vec2p
@@ -834,7 +834,7 @@ define <2 x double> @test_2xdouble_unpack_low_mem_mask0(<2 x double> %vec1, <2 x
}
define <2 x double> @test_2xdouble_masked_unpack_low_mem_mask0(<2 x double> %vec1, <2 x double>* %vec2p, <2 x double> %vec3, <2 x double> %mask) {
; CHECK-LABEL: test_2xdouble_masked_unpack_low_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %xmm3, %xmm2, %k1
; CHECK-NEXT: vunpcklpd {{.*#+}} xmm1 {%k1} = xmm0[0],mem[0]
@@ -849,7 +849,7 @@ define <2 x double> @test_2xdouble_masked_unpack_low_mem_mask0(<2 x double> %vec
define <2 x double> @test_2xdouble_zero_masked_unpack_low_mem_mask0(<2 x double> %vec1, <2 x double>* %vec2p, <2 x double> %mask) {
; CHECK-LABEL: test_2xdouble_zero_masked_unpack_low_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %xmm2, %xmm1, %k1
; CHECK-NEXT: vunpcklpd {{.*#+}} xmm0 {%k1} {z} = xmm0[0],mem[0]
@@ -863,7 +863,7 @@ define <2 x double> @test_2xdouble_zero_masked_unpack_low_mem_mask0(<2 x double>
define <2 x double> @test_2xdouble_masked_unpack_low_mem_mask1(<2 x double> %vec1, <2 x double>* %vec2p, <2 x double> %vec3, <2 x double> %mask) {
; CHECK-LABEL: test_2xdouble_masked_unpack_low_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %xmm3, %xmm2, %k1
; CHECK-NEXT: vunpcklpd {{.*#+}} xmm1 {%k1} = xmm0[0],mem[0]
@@ -878,7 +878,7 @@ define <2 x double> @test_2xdouble_masked_unpack_low_mem_mask1(<2 x double> %vec
define <2 x double> @test_2xdouble_zero_masked_unpack_low_mem_mask1(<2 x double> %vec1, <2 x double>* %vec2p, <2 x double> %mask) {
; CHECK-LABEL: test_2xdouble_zero_masked_unpack_low_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %xmm2, %xmm1, %k1
; CHECK-NEXT: vunpcklpd {{.*#+}} xmm0 {%k1} {z} = xmm0[0],mem[0]
@@ -892,7 +892,7 @@ define <2 x double> @test_2xdouble_zero_masked_unpack_low_mem_mask1(<2 x double>
define <4 x double> @test_4xdouble_unpack_low_mask0(<4 x double> %vec1, <4 x double> %vec2) {
; CHECK-LABEL: test_4xdouble_unpack_low_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; CHECK-NEXT: retq
%res = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
@@ -900,7 +900,7 @@ define <4 x double> @test_4xdouble_unpack_low_mask0(<4 x double> %vec1, <4 x dou
}
define <4 x double> @test_4xdouble_masked_unpack_low_mask0(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %vec3, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_masked_unpack_low_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqpd %ymm4, %ymm3, %k1
; CHECK-NEXT: vunpcklpd {{.*#+}} ymm2 {%k1} = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
@@ -914,7 +914,7 @@ define <4 x double> @test_4xdouble_masked_unpack_low_mask0(<4 x double> %vec1, <
define <4 x double> @test_4xdouble_zero_masked_unpack_low_mask0(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_zero_masked_unpack_low_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vunpcklpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
@@ -926,7 +926,7 @@ define <4 x double> @test_4xdouble_zero_masked_unpack_low_mask0(<4 x double> %ve
}
define <4 x double> @test_4xdouble_masked_unpack_low_mask1(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %vec3, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_masked_unpack_low_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqpd %ymm4, %ymm3, %k1
; CHECK-NEXT: vunpcklpd {{.*#+}} ymm2 {%k1} = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
@@ -940,7 +940,7 @@ define <4 x double> @test_4xdouble_masked_unpack_low_mask1(<4 x double> %vec1, <
define <4 x double> @test_4xdouble_zero_masked_unpack_low_mask1(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_zero_masked_unpack_low_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vunpcklpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
@@ -952,7 +952,7 @@ define <4 x double> @test_4xdouble_zero_masked_unpack_low_mask1(<4 x double> %ve
}
define <4 x double> @test_4xdouble_masked_unpack_low_mask2(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %vec3, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_masked_unpack_low_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqpd %ymm4, %ymm3, %k1
; CHECK-NEXT: vunpcklpd {{.*#+}} ymm2 {%k1} = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
@@ -966,7 +966,7 @@ define <4 x double> @test_4xdouble_masked_unpack_low_mask2(<4 x double> %vec1, <
define <4 x double> @test_4xdouble_zero_masked_unpack_low_mask2(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_zero_masked_unpack_low_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vunpcklpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
@@ -978,7 +978,7 @@ define <4 x double> @test_4xdouble_zero_masked_unpack_low_mask2(<4 x double> %ve
}
define <4 x double> @test_4xdouble_unpack_low_mask3(<4 x double> %vec1, <4 x double> %vec2) {
; CHECK-LABEL: test_4xdouble_unpack_low_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; CHECK-NEXT: retq
%res = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
@@ -986,7 +986,7 @@ define <4 x double> @test_4xdouble_unpack_low_mask3(<4 x double> %vec1, <4 x dou
}
define <4 x double> @test_4xdouble_masked_unpack_low_mask3(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %vec3, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_masked_unpack_low_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqpd %ymm4, %ymm3, %k1
; CHECK-NEXT: vunpcklpd {{.*#+}} ymm2 {%k1} = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
@@ -1000,7 +1000,7 @@ define <4 x double> @test_4xdouble_masked_unpack_low_mask3(<4 x double> %vec1, <
define <4 x double> @test_4xdouble_zero_masked_unpack_low_mask3(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_zero_masked_unpack_low_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vunpcklpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
@@ -1012,7 +1012,7 @@ define <4 x double> @test_4xdouble_zero_masked_unpack_low_mask3(<4 x double> %ve
}
define <4 x double> @test_4xdouble_unpack_low_mem_mask0(<4 x double> %vec1, <4 x double>* %vec2p) {
; CHECK-LABEL: test_4xdouble_unpack_low_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[2],mem[2]
; CHECK-NEXT: retq
%vec2 = load <4 x double>, <4 x double>* %vec2p
@@ -1021,7 +1021,7 @@ define <4 x double> @test_4xdouble_unpack_low_mem_mask0(<4 x double> %vec1, <4 x
}
define <4 x double> @test_4xdouble_masked_unpack_low_mem_mask0(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %vec3, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_masked_unpack_low_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vunpcklpd {{.*#+}} ymm1 {%k1} = ymm0[0],mem[0],ymm0[2],mem[2]
@@ -1036,7 +1036,7 @@ define <4 x double> @test_4xdouble_masked_unpack_low_mem_mask0(<4 x double> %vec
define <4 x double> @test_4xdouble_zero_masked_unpack_low_mem_mask0(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_zero_masked_unpack_low_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vunpcklpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0],mem[0],ymm0[2],mem[2]
@@ -1050,7 +1050,7 @@ define <4 x double> @test_4xdouble_zero_masked_unpack_low_mem_mask0(<4 x double>
define <4 x double> @test_4xdouble_masked_unpack_low_mem_mask1(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %vec3, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_masked_unpack_low_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vunpcklpd {{.*#+}} ymm1 {%k1} = ymm0[0],mem[0],ymm0[2],mem[2]
@@ -1065,7 +1065,7 @@ define <4 x double> @test_4xdouble_masked_unpack_low_mem_mask1(<4 x double> %vec
define <4 x double> @test_4xdouble_zero_masked_unpack_low_mem_mask1(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_zero_masked_unpack_low_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vunpcklpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0],mem[0],ymm0[2],mem[2]
@@ -1079,7 +1079,7 @@ define <4 x double> @test_4xdouble_zero_masked_unpack_low_mem_mask1(<4 x double>
define <4 x double> @test_4xdouble_masked_unpack_low_mem_mask2(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %vec3, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_masked_unpack_low_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vunpcklpd {{.*#+}} ymm1 {%k1} = ymm0[0],mem[0],ymm0[2],mem[2]
@@ -1094,7 +1094,7 @@ define <4 x double> @test_4xdouble_masked_unpack_low_mem_mask2(<4 x double> %vec
define <4 x double> @test_4xdouble_zero_masked_unpack_low_mem_mask2(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_zero_masked_unpack_low_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vunpcklpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0],mem[0],ymm0[2],mem[2]
@@ -1108,7 +1108,7 @@ define <4 x double> @test_4xdouble_zero_masked_unpack_low_mem_mask2(<4 x double>
define <4 x double> @test_4xdouble_unpack_low_mem_mask3(<4 x double> %vec1, <4 x double>* %vec2p) {
; CHECK-LABEL: test_4xdouble_unpack_low_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[2],mem[2]
; CHECK-NEXT: retq
%vec2 = load <4 x double>, <4 x double>* %vec2p
@@ -1117,7 +1117,7 @@ define <4 x double> @test_4xdouble_unpack_low_mem_mask3(<4 x double> %vec1, <4 x
}
define <4 x double> @test_4xdouble_masked_unpack_low_mem_mask3(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %vec3, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_masked_unpack_low_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vunpcklpd {{.*#+}} ymm1 {%k1} = ymm0[0],mem[0],ymm0[2],mem[2]
@@ -1132,7 +1132,7 @@ define <4 x double> @test_4xdouble_masked_unpack_low_mem_mask3(<4 x double> %vec
define <4 x double> @test_4xdouble_zero_masked_unpack_low_mem_mask3(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_zero_masked_unpack_low_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vunpcklpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0],mem[0],ymm0[2],mem[2]
@@ -1146,7 +1146,7 @@ define <4 x double> @test_4xdouble_zero_masked_unpack_low_mem_mask3(<4 x double>
define <8 x double> @test_8xdouble_unpack_low_mask0(<8 x double> %vec1, <8 x double> %vec2) {
; CHECK-LABEL: test_8xdouble_unpack_low_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpcklpd {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; CHECK-NEXT: retq
%res = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
@@ -1154,7 +1154,7 @@ define <8 x double> @test_8xdouble_unpack_low_mask0(<8 x double> %vec1, <8 x dou
}
define <8 x double> @test_8xdouble_masked_unpack_low_mask0(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %vec3, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_masked_unpack_low_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqpd %zmm4, %zmm3, %k1
; CHECK-NEXT: vunpcklpd {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
@@ -1168,7 +1168,7 @@ define <8 x double> @test_8xdouble_masked_unpack_low_mask0(<8 x double> %vec1, <
define <8 x double> @test_8xdouble_zero_masked_unpack_low_mask0(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_zero_masked_unpack_low_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vunpcklpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
@@ -1180,7 +1180,7 @@ define <8 x double> @test_8xdouble_zero_masked_unpack_low_mask0(<8 x double> %ve
}
define <8 x double> @test_8xdouble_masked_unpack_low_mask1(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %vec3, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_masked_unpack_low_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqpd %zmm4, %zmm3, %k1
; CHECK-NEXT: vunpcklpd {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
@@ -1194,7 +1194,7 @@ define <8 x double> @test_8xdouble_masked_unpack_low_mask1(<8 x double> %vec1, <
define <8 x double> @test_8xdouble_zero_masked_unpack_low_mask1(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_zero_masked_unpack_low_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vunpcklpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
@@ -1206,7 +1206,7 @@ define <8 x double> @test_8xdouble_zero_masked_unpack_low_mask1(<8 x double> %ve
}
define <8 x double> @test_8xdouble_masked_unpack_low_mask2(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %vec3, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_masked_unpack_low_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqpd %zmm4, %zmm3, %k1
; CHECK-NEXT: vunpcklpd {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
@@ -1220,7 +1220,7 @@ define <8 x double> @test_8xdouble_masked_unpack_low_mask2(<8 x double> %vec1, <
define <8 x double> @test_8xdouble_zero_masked_unpack_low_mask2(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_zero_masked_unpack_low_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vunpcklpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
@@ -1232,7 +1232,7 @@ define <8 x double> @test_8xdouble_zero_masked_unpack_low_mask2(<8 x double> %ve
}
define <8 x double> @test_8xdouble_unpack_low_mask3(<8 x double> %vec1, <8 x double> %vec2) {
; CHECK-LABEL: test_8xdouble_unpack_low_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpcklpd {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; CHECK-NEXT: retq
%res = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
@@ -1240,7 +1240,7 @@ define <8 x double> @test_8xdouble_unpack_low_mask3(<8 x double> %vec1, <8 x dou
}
define <8 x double> @test_8xdouble_masked_unpack_low_mask3(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %vec3, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_masked_unpack_low_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqpd %zmm4, %zmm3, %k1
; CHECK-NEXT: vunpcklpd {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
@@ -1254,7 +1254,7 @@ define <8 x double> @test_8xdouble_masked_unpack_low_mask3(<8 x double> %vec1, <
define <8 x double> @test_8xdouble_zero_masked_unpack_low_mask3(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_zero_masked_unpack_low_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vunpcklpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
@@ -1266,7 +1266,7 @@ define <8 x double> @test_8xdouble_zero_masked_unpack_low_mask3(<8 x double> %ve
}
define <8 x double> @test_8xdouble_unpack_low_mem_mask0(<8 x double> %vec1, <8 x double>* %vec2p) {
; CHECK-LABEL: test_8xdouble_unpack_low_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpcklpd {{.*#+}} zmm0 = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6]
; CHECK-NEXT: retq
%vec2 = load <8 x double>, <8 x double>* %vec2p
@@ -1275,7 +1275,7 @@ define <8 x double> @test_8xdouble_unpack_low_mem_mask0(<8 x double> %vec1, <8 x
}
define <8 x double> @test_8xdouble_masked_unpack_low_mem_mask0(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %vec3, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_masked_unpack_low_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vunpcklpd {{.*#+}} zmm1 {%k1} = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6]
@@ -1290,7 +1290,7 @@ define <8 x double> @test_8xdouble_masked_unpack_low_mem_mask0(<8 x double> %vec
define <8 x double> @test_8xdouble_zero_masked_unpack_low_mem_mask0(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_zero_masked_unpack_low_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vunpcklpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6]
@@ -1304,7 +1304,7 @@ define <8 x double> @test_8xdouble_zero_masked_unpack_low_mem_mask0(<8 x double>
define <8 x double> @test_8xdouble_masked_unpack_low_mem_mask1(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %vec3, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_masked_unpack_low_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vunpcklpd {{.*#+}} zmm1 {%k1} = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6]
@@ -1319,7 +1319,7 @@ define <8 x double> @test_8xdouble_masked_unpack_low_mem_mask1(<8 x double> %vec
define <8 x double> @test_8xdouble_zero_masked_unpack_low_mem_mask1(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_zero_masked_unpack_low_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vunpcklpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6]
@@ -1333,7 +1333,7 @@ define <8 x double> @test_8xdouble_zero_masked_unpack_low_mem_mask1(<8 x double>
define <8 x double> @test_8xdouble_masked_unpack_low_mem_mask2(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %vec3, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_masked_unpack_low_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vunpcklpd {{.*#+}} zmm1 {%k1} = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6]
@@ -1348,7 +1348,7 @@ define <8 x double> @test_8xdouble_masked_unpack_low_mem_mask2(<8 x double> %vec
define <8 x double> @test_8xdouble_zero_masked_unpack_low_mem_mask2(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_zero_masked_unpack_low_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vunpcklpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6]
@@ -1362,7 +1362,7 @@ define <8 x double> @test_8xdouble_zero_masked_unpack_low_mem_mask2(<8 x double>
define <8 x double> @test_8xdouble_unpack_low_mem_mask3(<8 x double> %vec1, <8 x double>* %vec2p) {
; CHECK-LABEL: test_8xdouble_unpack_low_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpcklpd {{.*#+}} zmm0 = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6]
; CHECK-NEXT: retq
%vec2 = load <8 x double>, <8 x double>* %vec2p
@@ -1371,7 +1371,7 @@ define <8 x double> @test_8xdouble_unpack_low_mem_mask3(<8 x double> %vec1, <8 x
}
define <8 x double> @test_8xdouble_masked_unpack_low_mem_mask3(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %vec3, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_masked_unpack_low_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vunpcklpd {{.*#+}} zmm1 {%k1} = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6]
@@ -1386,7 +1386,7 @@ define <8 x double> @test_8xdouble_masked_unpack_low_mem_mask3(<8 x double> %vec
define <8 x double> @test_8xdouble_zero_masked_unpack_low_mem_mask3(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_zero_masked_unpack_low_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vunpcklpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6]
@@ -1400,7 +1400,7 @@ define <8 x double> @test_8xdouble_zero_masked_unpack_low_mem_mask3(<8 x double>
define <4 x float> @test_4xfloat_unpack_high_mask0(<4 x float> %vec1, <4 x float> %vec2) {
; CHECK-LABEL: test_4xfloat_unpack_high_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; CHECK-NEXT: retq
%res = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
@@ -1408,7 +1408,7 @@ define <4 x float> @test_4xfloat_unpack_high_mask0(<4 x float> %vec1, <4 x float
}
define <4 x float> @test_4xfloat_masked_unpack_high_mask0(<4 x float> %vec1, <4 x float> %vec2, <4 x float> %vec3, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_masked_unpack_high_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %xmm4, %xmm3, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} xmm2 {%k1} = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@@ -1422,7 +1422,7 @@ define <4 x float> @test_4xfloat_masked_unpack_high_mask0(<4 x float> %vec1, <4
define <4 x float> @test_4xfloat_zero_masked_unpack_high_mask0(<4 x float> %vec1, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_zero_masked_unpack_high_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %xmm3, %xmm2, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} xmm0 {%k1} {z} = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@@ -1434,7 +1434,7 @@ define <4 x float> @test_4xfloat_zero_masked_unpack_high_mask0(<4 x float> %vec1
}
define <4 x float> @test_4xfloat_masked_unpack_high_mask1(<4 x float> %vec1, <4 x float> %vec2, <4 x float> %vec3, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_masked_unpack_high_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %xmm4, %xmm3, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} xmm2 {%k1} = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@@ -1448,7 +1448,7 @@ define <4 x float> @test_4xfloat_masked_unpack_high_mask1(<4 x float> %vec1, <4
define <4 x float> @test_4xfloat_zero_masked_unpack_high_mask1(<4 x float> %vec1, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_zero_masked_unpack_high_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %xmm3, %xmm2, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} xmm0 {%k1} {z} = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@@ -1460,7 +1460,7 @@ define <4 x float> @test_4xfloat_zero_masked_unpack_high_mask1(<4 x float> %vec1
}
define <4 x float> @test_4xfloat_masked_unpack_high_mask2(<4 x float> %vec1, <4 x float> %vec2, <4 x float> %vec3, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_masked_unpack_high_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %xmm4, %xmm3, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} xmm2 {%k1} = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@@ -1474,7 +1474,7 @@ define <4 x float> @test_4xfloat_masked_unpack_high_mask2(<4 x float> %vec1, <4
define <4 x float> @test_4xfloat_zero_masked_unpack_high_mask2(<4 x float> %vec1, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_zero_masked_unpack_high_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %xmm3, %xmm2, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} xmm0 {%k1} {z} = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@@ -1486,7 +1486,7 @@ define <4 x float> @test_4xfloat_zero_masked_unpack_high_mask2(<4 x float> %vec1
}
define <4 x float> @test_4xfloat_unpack_high_mask3(<4 x float> %vec1, <4 x float> %vec2) {
; CHECK-LABEL: test_4xfloat_unpack_high_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; CHECK-NEXT: retq
%res = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
@@ -1494,7 +1494,7 @@ define <4 x float> @test_4xfloat_unpack_high_mask3(<4 x float> %vec1, <4 x float
}
define <4 x float> @test_4xfloat_masked_unpack_high_mask3(<4 x float> %vec1, <4 x float> %vec2, <4 x float> %vec3, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_masked_unpack_high_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %xmm4, %xmm3, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} xmm2 {%k1} = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@@ -1508,7 +1508,7 @@ define <4 x float> @test_4xfloat_masked_unpack_high_mask3(<4 x float> %vec1, <4
define <4 x float> @test_4xfloat_zero_masked_unpack_high_mask3(<4 x float> %vec1, <4 x float> %vec2, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_zero_masked_unpack_high_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %xmm3, %xmm2, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} xmm0 {%k1} {z} = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@@ -1520,7 +1520,7 @@ define <4 x float> @test_4xfloat_zero_masked_unpack_high_mask3(<4 x float> %vec1
}
define <4 x float> @test_4xfloat_unpack_high_mem_mask0(<4 x float> %vec1, <4 x float>* %vec2p) {
; CHECK-LABEL: test_4xfloat_unpack_high_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT: retq
%vec2 = load <4 x float>, <4 x float>* %vec2p
@@ -1529,7 +1529,7 @@ define <4 x float> @test_4xfloat_unpack_high_mem_mask0(<4 x float> %vec1, <4 x f
}
define <4 x float> @test_4xfloat_masked_unpack_high_mem_mask0(<4 x float> %vec1, <4 x float>* %vec2p, <4 x float> %vec3, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_masked_unpack_high_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %xmm3, %xmm2, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} xmm1 {%k1} = xmm0[2],mem[2],xmm0[3],mem[3]
@@ -1544,7 +1544,7 @@ define <4 x float> @test_4xfloat_masked_unpack_high_mem_mask0(<4 x float> %vec1,
define <4 x float> @test_4xfloat_zero_masked_unpack_high_mem_mask0(<4 x float> %vec1, <4 x float>* %vec2p, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_zero_masked_unpack_high_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} xmm0 {%k1} {z} = xmm0[2],mem[2],xmm0[3],mem[3]
@@ -1558,7 +1558,7 @@ define <4 x float> @test_4xfloat_zero_masked_unpack_high_mem_mask0(<4 x float> %
define <4 x float> @test_4xfloat_masked_unpack_high_mem_mask1(<4 x float> %vec1, <4 x float>* %vec2p, <4 x float> %vec3, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_masked_unpack_high_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %xmm3, %xmm2, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} xmm1 {%k1} = xmm0[2],mem[2],xmm0[3],mem[3]
@@ -1573,7 +1573,7 @@ define <4 x float> @test_4xfloat_masked_unpack_high_mem_mask1(<4 x float> %vec1,
define <4 x float> @test_4xfloat_zero_masked_unpack_high_mem_mask1(<4 x float> %vec1, <4 x float>* %vec2p, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_zero_masked_unpack_high_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} xmm0 {%k1} {z} = xmm0[2],mem[2],xmm0[3],mem[3]
@@ -1587,7 +1587,7 @@ define <4 x float> @test_4xfloat_zero_masked_unpack_high_mem_mask1(<4 x float> %
define <4 x float> @test_4xfloat_masked_unpack_high_mem_mask2(<4 x float> %vec1, <4 x float>* %vec2p, <4 x float> %vec3, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_masked_unpack_high_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %xmm3, %xmm2, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} xmm1 {%k1} = xmm0[2],mem[2],xmm0[3],mem[3]
@@ -1602,7 +1602,7 @@ define <4 x float> @test_4xfloat_masked_unpack_high_mem_mask2(<4 x float> %vec1,
define <4 x float> @test_4xfloat_zero_masked_unpack_high_mem_mask2(<4 x float> %vec1, <4 x float>* %vec2p, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_zero_masked_unpack_high_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} xmm0 {%k1} {z} = xmm0[2],mem[2],xmm0[3],mem[3]
@@ -1616,7 +1616,7 @@ define <4 x float> @test_4xfloat_zero_masked_unpack_high_mem_mask2(<4 x float> %
define <4 x float> @test_4xfloat_unpack_high_mem_mask3(<4 x float> %vec1, <4 x float>* %vec2p) {
; CHECK-LABEL: test_4xfloat_unpack_high_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
; CHECK-NEXT: retq
%vec2 = load <4 x float>, <4 x float>* %vec2p
@@ -1625,7 +1625,7 @@ define <4 x float> @test_4xfloat_unpack_high_mem_mask3(<4 x float> %vec1, <4 x f
}
define <4 x float> @test_4xfloat_masked_unpack_high_mem_mask3(<4 x float> %vec1, <4 x float>* %vec2p, <4 x float> %vec3, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_masked_unpack_high_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %xmm3, %xmm2, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} xmm1 {%k1} = xmm0[2],mem[2],xmm0[3],mem[3]
@@ -1640,7 +1640,7 @@ define <4 x float> @test_4xfloat_masked_unpack_high_mem_mask3(<4 x float> %vec1,
define <4 x float> @test_4xfloat_zero_masked_unpack_high_mem_mask3(<4 x float> %vec1, <4 x float>* %vec2p, <4 x float> %mask) {
; CHECK-LABEL: test_4xfloat_zero_masked_unpack_high_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} xmm0 {%k1} {z} = xmm0[2],mem[2],xmm0[3],mem[3]
@@ -1654,7 +1654,7 @@ define <4 x float> @test_4xfloat_zero_masked_unpack_high_mem_mask3(<4 x float> %
define <8 x float> @test_8xfloat_unpack_high_mask0(<8 x float> %vec1, <8 x float> %vec2) {
; CHECK-LABEL: test_8xfloat_unpack_high_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
; CHECK-NEXT: retq
%res = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
@@ -1662,7 +1662,7 @@ define <8 x float> @test_8xfloat_unpack_high_mask0(<8 x float> %vec1, <8 x float
}
define <8 x float> @test_8xfloat_masked_unpack_high_mask0(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %vec3, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_masked_unpack_high_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %ymm4, %ymm3, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} ymm2 {%k1} = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
@@ -1676,7 +1676,7 @@ define <8 x float> @test_8xfloat_masked_unpack_high_mask0(<8 x float> %vec1, <8
define <8 x float> @test_8xfloat_zero_masked_unpack_high_mask0(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_zero_masked_unpack_high_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} ymm0 {%k1} {z} = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
@@ -1688,7 +1688,7 @@ define <8 x float> @test_8xfloat_zero_masked_unpack_high_mask0(<8 x float> %vec1
}
define <8 x float> @test_8xfloat_masked_unpack_high_mask1(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %vec3, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_masked_unpack_high_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %ymm4, %ymm3, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} ymm2 {%k1} = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
@@ -1702,7 +1702,7 @@ define <8 x float> @test_8xfloat_masked_unpack_high_mask1(<8 x float> %vec1, <8
define <8 x float> @test_8xfloat_zero_masked_unpack_high_mask1(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_zero_masked_unpack_high_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} ymm0 {%k1} {z} = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
@@ -1714,7 +1714,7 @@ define <8 x float> @test_8xfloat_zero_masked_unpack_high_mask1(<8 x float> %vec1
}
define <8 x float> @test_8xfloat_masked_unpack_high_mask2(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %vec3, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_masked_unpack_high_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %ymm4, %ymm3, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} ymm2 {%k1} = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
@@ -1728,7 +1728,7 @@ define <8 x float> @test_8xfloat_masked_unpack_high_mask2(<8 x float> %vec1, <8
define <8 x float> @test_8xfloat_zero_masked_unpack_high_mask2(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_zero_masked_unpack_high_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} ymm0 {%k1} {z} = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
@@ -1740,7 +1740,7 @@ define <8 x float> @test_8xfloat_zero_masked_unpack_high_mask2(<8 x float> %vec1
}
define <8 x float> @test_8xfloat_unpack_high_mask3(<8 x float> %vec1, <8 x float> %vec2) {
; CHECK-LABEL: test_8xfloat_unpack_high_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
; CHECK-NEXT: retq
%res = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
@@ -1748,7 +1748,7 @@ define <8 x float> @test_8xfloat_unpack_high_mask3(<8 x float> %vec1, <8 x float
}
define <8 x float> @test_8xfloat_masked_unpack_high_mask3(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %vec3, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_masked_unpack_high_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %ymm4, %ymm3, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} ymm2 {%k1} = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
@@ -1762,7 +1762,7 @@ define <8 x float> @test_8xfloat_masked_unpack_high_mask3(<8 x float> %vec1, <8
define <8 x float> @test_8xfloat_zero_masked_unpack_high_mask3(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_zero_masked_unpack_high_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} ymm0 {%k1} {z} = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
@@ -1774,7 +1774,7 @@ define <8 x float> @test_8xfloat_zero_masked_unpack_high_mask3(<8 x float> %vec1
}
define <8 x float> @test_8xfloat_unpack_high_mem_mask0(<8 x float> %vec1, <8 x float>* %vec2p) {
; CHECK-LABEL: test_8xfloat_unpack_high_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7]
; CHECK-NEXT: retq
%vec2 = load <8 x float>, <8 x float>* %vec2p
@@ -1783,7 +1783,7 @@ define <8 x float> @test_8xfloat_unpack_high_mem_mask0(<8 x float> %vec1, <8 x f
}
define <8 x float> @test_8xfloat_masked_unpack_high_mem_mask0(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %vec3, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_masked_unpack_high_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} ymm1 {%k1} = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7]
@@ -1798,7 +1798,7 @@ define <8 x float> @test_8xfloat_masked_unpack_high_mem_mask0(<8 x float> %vec1,
define <8 x float> @test_8xfloat_zero_masked_unpack_high_mem_mask0(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_zero_masked_unpack_high_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} ymm0 {%k1} {z} = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7]
@@ -1812,7 +1812,7 @@ define <8 x float> @test_8xfloat_zero_masked_unpack_high_mem_mask0(<8 x float> %
define <8 x float> @test_8xfloat_masked_unpack_high_mem_mask1(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %vec3, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_masked_unpack_high_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} ymm1 {%k1} = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7]
@@ -1827,7 +1827,7 @@ define <8 x float> @test_8xfloat_masked_unpack_high_mem_mask1(<8 x float> %vec1,
define <8 x float> @test_8xfloat_zero_masked_unpack_high_mem_mask1(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_zero_masked_unpack_high_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} ymm0 {%k1} {z} = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7]
@@ -1841,7 +1841,7 @@ define <8 x float> @test_8xfloat_zero_masked_unpack_high_mem_mask1(<8 x float> %
define <8 x float> @test_8xfloat_masked_unpack_high_mem_mask2(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %vec3, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_masked_unpack_high_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} ymm1 {%k1} = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7]
@@ -1856,7 +1856,7 @@ define <8 x float> @test_8xfloat_masked_unpack_high_mem_mask2(<8 x float> %vec1,
define <8 x float> @test_8xfloat_zero_masked_unpack_high_mem_mask2(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_zero_masked_unpack_high_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} ymm0 {%k1} {z} = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7]
@@ -1870,7 +1870,7 @@ define <8 x float> @test_8xfloat_zero_masked_unpack_high_mem_mask2(<8 x float> %
define <8 x float> @test_8xfloat_unpack_high_mem_mask3(<8 x float> %vec1, <8 x float>* %vec2p) {
; CHECK-LABEL: test_8xfloat_unpack_high_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7]
; CHECK-NEXT: retq
%vec2 = load <8 x float>, <8 x float>* %vec2p
@@ -1879,7 +1879,7 @@ define <8 x float> @test_8xfloat_unpack_high_mem_mask3(<8 x float> %vec1, <8 x f
}
define <8 x float> @test_8xfloat_masked_unpack_high_mem_mask3(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %vec3, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_masked_unpack_high_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %ymm3, %ymm2, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} ymm1 {%k1} = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7]
@@ -1894,7 +1894,7 @@ define <8 x float> @test_8xfloat_masked_unpack_high_mem_mask3(<8 x float> %vec1,
define <8 x float> @test_8xfloat_zero_masked_unpack_high_mem_mask3(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %mask) {
; CHECK-LABEL: test_8xfloat_zero_masked_unpack_high_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %ymm2, %ymm1, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} ymm0 {%k1} {z} = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7]
@@ -1908,7 +1908,7 @@ define <8 x float> @test_8xfloat_zero_masked_unpack_high_mem_mask3(<8 x float> %
define <16 x float> @test_16xfloat_unpack_high_mask0(<16 x float> %vec1, <16 x float> %vec2) {
; CHECK-LABEL: test_16xfloat_unpack_high_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpckhps {{.*#+}} zmm0 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
; CHECK-NEXT: retq
%res = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> <i32 2, i32 18, i32 3, i32 19, i32 6, i32 22, i32 7, i32 23, i32 10, i32 26, i32 11, i32 27, i32 14, i32 30, i32 15, i32 31>
@@ -1916,7 +1916,7 @@ define <16 x float> @test_16xfloat_unpack_high_mask0(<16 x float> %vec1, <16 x f
}
define <16 x float> @test_16xfloat_masked_unpack_high_mask0(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %vec3, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_masked_unpack_high_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %zmm4, %zmm3, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} zmm2 {%k1} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
@@ -1930,7 +1930,7 @@ define <16 x float> @test_16xfloat_masked_unpack_high_mask0(<16 x float> %vec1,
define <16 x float> @test_16xfloat_zero_masked_unpack_high_mask0(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_zero_masked_unpack_high_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} zmm0 {%k1} {z} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
@@ -1942,7 +1942,7 @@ define <16 x float> @test_16xfloat_zero_masked_unpack_high_mask0(<16 x float> %v
}
define <16 x float> @test_16xfloat_masked_unpack_high_mask1(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %vec3, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_masked_unpack_high_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %zmm4, %zmm3, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} zmm2 {%k1} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
@@ -1956,7 +1956,7 @@ define <16 x float> @test_16xfloat_masked_unpack_high_mask1(<16 x float> %vec1,
define <16 x float> @test_16xfloat_zero_masked_unpack_high_mask1(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_zero_masked_unpack_high_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} zmm0 {%k1} {z} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
@@ -1968,7 +1968,7 @@ define <16 x float> @test_16xfloat_zero_masked_unpack_high_mask1(<16 x float> %v
}
define <16 x float> @test_16xfloat_masked_unpack_high_mask2(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %vec3, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_masked_unpack_high_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %zmm4, %zmm3, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} zmm2 {%k1} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
@@ -1982,7 +1982,7 @@ define <16 x float> @test_16xfloat_masked_unpack_high_mask2(<16 x float> %vec1,
define <16 x float> @test_16xfloat_zero_masked_unpack_high_mask2(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_zero_masked_unpack_high_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} zmm0 {%k1} {z} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
@@ -1994,7 +1994,7 @@ define <16 x float> @test_16xfloat_zero_masked_unpack_high_mask2(<16 x float> %v
}
define <16 x float> @test_16xfloat_unpack_high_mask3(<16 x float> %vec1, <16 x float> %vec2) {
; CHECK-LABEL: test_16xfloat_unpack_high_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpckhps {{.*#+}} zmm0 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
; CHECK-NEXT: retq
%res = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> <i32 2, i32 18, i32 3, i32 19, i32 6, i32 22, i32 7, i32 23, i32 10, i32 26, i32 11, i32 27, i32 14, i32 30, i32 15, i32 31>
@@ -2002,7 +2002,7 @@ define <16 x float> @test_16xfloat_unpack_high_mask3(<16 x float> %vec1, <16 x f
}
define <16 x float> @test_16xfloat_masked_unpack_high_mask3(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %vec3, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_masked_unpack_high_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqps %zmm4, %zmm3, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} zmm2 {%k1} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
@@ -2016,7 +2016,7 @@ define <16 x float> @test_16xfloat_masked_unpack_high_mask3(<16 x float> %vec1,
define <16 x float> @test_16xfloat_zero_masked_unpack_high_mask3(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_zero_masked_unpack_high_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} zmm0 {%k1} {z} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
@@ -2028,7 +2028,7 @@ define <16 x float> @test_16xfloat_zero_masked_unpack_high_mask3(<16 x float> %v
}
define <16 x float> @test_16xfloat_unpack_high_mem_mask0(<16 x float> %vec1, <16 x float>* %vec2p) {
; CHECK-LABEL: test_16xfloat_unpack_high_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpckhps {{.*#+}} zmm0 = zmm0[2],mem[2],zmm0[3],mem[3],zmm0[6],mem[6],zmm0[7],mem[7],zmm0[10],mem[10],zmm0[11],mem[11],zmm0[14],mem[14],zmm0[15],mem[15]
; CHECK-NEXT: retq
%vec2 = load <16 x float>, <16 x float>* %vec2p
@@ -2037,7 +2037,7 @@ define <16 x float> @test_16xfloat_unpack_high_mem_mask0(<16 x float> %vec1, <16
}
define <16 x float> @test_16xfloat_masked_unpack_high_mem_mask0(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %vec3, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_masked_unpack_high_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} zmm1 {%k1} = zmm0[2],mem[2],zmm0[3],mem[3],zmm0[6],mem[6],zmm0[7],mem[7],zmm0[10],mem[10],zmm0[11],mem[11],zmm0[14],mem[14],zmm0[15],mem[15]
@@ -2052,7 +2052,7 @@ define <16 x float> @test_16xfloat_masked_unpack_high_mem_mask0(<16 x float> %ve
define <16 x float> @test_16xfloat_zero_masked_unpack_high_mem_mask0(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_zero_masked_unpack_high_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} zmm0 {%k1} {z} = zmm0[2],mem[2],zmm0[3],mem[3],zmm0[6],mem[6],zmm0[7],mem[7],zmm0[10],mem[10],zmm0[11],mem[11],zmm0[14],mem[14],zmm0[15],mem[15]
@@ -2066,7 +2066,7 @@ define <16 x float> @test_16xfloat_zero_masked_unpack_high_mem_mask0(<16 x float
define <16 x float> @test_16xfloat_masked_unpack_high_mem_mask1(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %vec3, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_masked_unpack_high_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} zmm1 {%k1} = zmm0[2],mem[2],zmm0[3],mem[3],zmm0[6],mem[6],zmm0[7],mem[7],zmm0[10],mem[10],zmm0[11],mem[11],zmm0[14],mem[14],zmm0[15],mem[15]
@@ -2081,7 +2081,7 @@ define <16 x float> @test_16xfloat_masked_unpack_high_mem_mask1(<16 x float> %ve
define <16 x float> @test_16xfloat_zero_masked_unpack_high_mem_mask1(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_zero_masked_unpack_high_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} zmm0 {%k1} {z} = zmm0[2],mem[2],zmm0[3],mem[3],zmm0[6],mem[6],zmm0[7],mem[7],zmm0[10],mem[10],zmm0[11],mem[11],zmm0[14],mem[14],zmm0[15],mem[15]
@@ -2095,7 +2095,7 @@ define <16 x float> @test_16xfloat_zero_masked_unpack_high_mem_mask1(<16 x float
define <16 x float> @test_16xfloat_masked_unpack_high_mem_mask2(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %vec3, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_masked_unpack_high_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} zmm1 {%k1} = zmm0[2],mem[2],zmm0[3],mem[3],zmm0[6],mem[6],zmm0[7],mem[7],zmm0[10],mem[10],zmm0[11],mem[11],zmm0[14],mem[14],zmm0[15],mem[15]
@@ -2110,7 +2110,7 @@ define <16 x float> @test_16xfloat_masked_unpack_high_mem_mask2(<16 x float> %ve
define <16 x float> @test_16xfloat_zero_masked_unpack_high_mem_mask2(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_zero_masked_unpack_high_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} zmm0 {%k1} {z} = zmm0[2],mem[2],zmm0[3],mem[3],zmm0[6],mem[6],zmm0[7],mem[7],zmm0[10],mem[10],zmm0[11],mem[11],zmm0[14],mem[14],zmm0[15],mem[15]
@@ -2124,7 +2124,7 @@ define <16 x float> @test_16xfloat_zero_masked_unpack_high_mem_mask2(<16 x float
define <16 x float> @test_16xfloat_unpack_high_mem_mask3(<16 x float> %vec1, <16 x float>* %vec2p) {
; CHECK-LABEL: test_16xfloat_unpack_high_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpckhps {{.*#+}} zmm0 = zmm0[2],mem[2],zmm0[3],mem[3],zmm0[6],mem[6],zmm0[7],mem[7],zmm0[10],mem[10],zmm0[11],mem[11],zmm0[14],mem[14],zmm0[15],mem[15]
; CHECK-NEXT: retq
%vec2 = load <16 x float>, <16 x float>* %vec2p
@@ -2133,7 +2133,7 @@ define <16 x float> @test_16xfloat_unpack_high_mem_mask3(<16 x float> %vec1, <16
}
define <16 x float> @test_16xfloat_masked_unpack_high_mem_mask3(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %vec3, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_masked_unpack_high_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqps %zmm3, %zmm2, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} zmm1 {%k1} = zmm0[2],mem[2],zmm0[3],mem[3],zmm0[6],mem[6],zmm0[7],mem[7],zmm0[10],mem[10],zmm0[11],mem[11],zmm0[14],mem[14],zmm0[15],mem[15]
@@ -2148,7 +2148,7 @@ define <16 x float> @test_16xfloat_masked_unpack_high_mem_mask3(<16 x float> %ve
define <16 x float> @test_16xfloat_zero_masked_unpack_high_mem_mask3(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %mask) {
; CHECK-LABEL: test_16xfloat_zero_masked_unpack_high_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqps %zmm2, %zmm1, %k1
; CHECK-NEXT: vunpckhps {{.*#+}} zmm0 {%k1} {z} = zmm0[2],mem[2],zmm0[3],mem[3],zmm0[6],mem[6],zmm0[7],mem[7],zmm0[10],mem[10],zmm0[11],mem[11],zmm0[14],mem[14],zmm0[15],mem[15]
@@ -2162,7 +2162,7 @@ define <16 x float> @test_16xfloat_zero_masked_unpack_high_mem_mask3(<16 x float
define <2 x double> @test_2xdouble_unpack_high_mask0(<2 x double> %vec1, <2 x double> %vec2) {
; CHECK-LABEL: test_2xdouble_unpack_high_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
; CHECK-NEXT: retq
%res = shufflevector <2 x double> %vec1, <2 x double> %vec2, <2 x i32> <i32 1, i32 3>
@@ -2170,7 +2170,7 @@ define <2 x double> @test_2xdouble_unpack_high_mask0(<2 x double> %vec1, <2 x do
}
define <2 x double> @test_2xdouble_masked_unpack_high_mask0(<2 x double> %vec1, <2 x double> %vec2, <2 x double> %vec3, <2 x double> %mask) {
; CHECK-LABEL: test_2xdouble_masked_unpack_high_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqpd %xmm4, %xmm3, %k1
; CHECK-NEXT: vunpckhpd {{.*#+}} xmm2 {%k1} = xmm0[1],xmm1[1]
@@ -2184,7 +2184,7 @@ define <2 x double> @test_2xdouble_masked_unpack_high_mask0(<2 x double> %vec1,
define <2 x double> @test_2xdouble_zero_masked_unpack_high_mask0(<2 x double> %vec1, <2 x double> %vec2, <2 x double> %mask) {
; CHECK-LABEL: test_2xdouble_zero_masked_unpack_high_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %xmm3, %xmm2, %k1
; CHECK-NEXT: vunpckhpd {{.*#+}} xmm0 {%k1} {z} = xmm0[1],xmm1[1]
@@ -2196,7 +2196,7 @@ define <2 x double> @test_2xdouble_zero_masked_unpack_high_mask0(<2 x double> %v
}
define <2 x double> @test_2xdouble_masked_unpack_high_mask1(<2 x double> %vec1, <2 x double> %vec2, <2 x double> %vec3, <2 x double> %mask) {
; CHECK-LABEL: test_2xdouble_masked_unpack_high_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqpd %xmm4, %xmm3, %k1
; CHECK-NEXT: vunpckhpd {{.*#+}} xmm2 {%k1} = xmm0[1],xmm1[1]
@@ -2210,7 +2210,7 @@ define <2 x double> @test_2xdouble_masked_unpack_high_mask1(<2 x double> %vec1,
define <2 x double> @test_2xdouble_zero_masked_unpack_high_mask1(<2 x double> %vec1, <2 x double> %vec2, <2 x double> %mask) {
; CHECK-LABEL: test_2xdouble_zero_masked_unpack_high_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %xmm3, %xmm2, %k1
; CHECK-NEXT: vunpckhpd {{.*#+}} xmm0 {%k1} {z} = xmm0[1],xmm1[1]
@@ -2222,7 +2222,7 @@ define <2 x double> @test_2xdouble_zero_masked_unpack_high_mask1(<2 x double> %v
}
define <2 x double> @test_2xdouble_unpack_high_mem_mask0(<2 x double> %vec1, <2 x double>* %vec2p) {
; CHECK-LABEL: test_2xdouble_unpack_high_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],mem[1]
; CHECK-NEXT: retq
%vec2 = load <2 x double>, <2 x double>* %vec2p
@@ -2231,7 +2231,7 @@ define <2 x double> @test_2xdouble_unpack_high_mem_mask0(<2 x double> %vec1, <2
}
define <2 x double> @test_2xdouble_masked_unpack_high_mem_mask0(<2 x double> %vec1, <2 x double>* %vec2p, <2 x double> %vec3, <2 x double> %mask) {
; CHECK-LABEL: test_2xdouble_masked_unpack_high_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %xmm3, %xmm2, %k1
; CHECK-NEXT: vunpckhpd {{.*#+}} xmm1 {%k1} = xmm0[1],mem[1]
@@ -2246,7 +2246,7 @@ define <2 x double> @test_2xdouble_masked_unpack_high_mem_mask0(<2 x double> %ve
define <2 x double> @test_2xdouble_zero_masked_unpack_high_mem_mask0(<2 x double> %vec1, <2 x double>* %vec2p, <2 x double> %mask) {
; CHECK-LABEL: test_2xdouble_zero_masked_unpack_high_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %xmm2, %xmm1, %k1
; CHECK-NEXT: vunpckhpd {{.*#+}} xmm0 {%k1} {z} = xmm0[1],mem[1]
@@ -2260,7 +2260,7 @@ define <2 x double> @test_2xdouble_zero_masked_unpack_high_mem_mask0(<2 x double
define <2 x double> @test_2xdouble_masked_unpack_high_mem_mask1(<2 x double> %vec1, <2 x double>* %vec2p, <2 x double> %vec3, <2 x double> %mask) {
; CHECK-LABEL: test_2xdouble_masked_unpack_high_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %xmm3, %xmm2, %k1
; CHECK-NEXT: vunpckhpd {{.*#+}} xmm1 {%k1} = xmm0[1],mem[1]
@@ -2275,7 +2275,7 @@ define <2 x double> @test_2xdouble_masked_unpack_high_mem_mask1(<2 x double> %ve
define <2 x double> @test_2xdouble_zero_masked_unpack_high_mem_mask1(<2 x double> %vec1, <2 x double>* %vec2p, <2 x double> %mask) {
; CHECK-LABEL: test_2xdouble_zero_masked_unpack_high_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %xmm2, %xmm1, %k1
; CHECK-NEXT: vunpckhpd {{.*#+}} xmm0 {%k1} {z} = xmm0[1],mem[1]
@@ -2289,7 +2289,7 @@ define <2 x double> @test_2xdouble_zero_masked_unpack_high_mem_mask1(<2 x double
define <4 x double> @test_4xdouble_unpack_high_mask0(<4 x double> %vec1, <4 x double> %vec2) {
; CHECK-LABEL: test_4xdouble_unpack_high_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
; CHECK-NEXT: retq
%res = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
@@ -2297,7 +2297,7 @@ define <4 x double> @test_4xdouble_unpack_high_mask0(<4 x double> %vec1, <4 x do
}
define <4 x double> @test_4xdouble_masked_unpack_high_mask0(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %vec3, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_masked_unpack_high_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqpd %ymm4, %ymm3, %k1
; CHECK-NEXT: vunpckhpd {{.*#+}} ymm2 {%k1} = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
@@ -2311,7 +2311,7 @@ define <4 x double> @test_4xdouble_masked_unpack_high_mask0(<4 x double> %vec1,
define <4 x double> @test_4xdouble_zero_masked_unpack_high_mask0(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_zero_masked_unpack_high_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vunpckhpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
@@ -2323,7 +2323,7 @@ define <4 x double> @test_4xdouble_zero_masked_unpack_high_mask0(<4 x double> %v
}
define <4 x double> @test_4xdouble_masked_unpack_high_mask1(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %vec3, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_masked_unpack_high_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqpd %ymm4, %ymm3, %k1
; CHECK-NEXT: vunpckhpd {{.*#+}} ymm2 {%k1} = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
@@ -2337,7 +2337,7 @@ define <4 x double> @test_4xdouble_masked_unpack_high_mask1(<4 x double> %vec1,
define <4 x double> @test_4xdouble_zero_masked_unpack_high_mask1(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_zero_masked_unpack_high_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vunpckhpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
@@ -2349,7 +2349,7 @@ define <4 x double> @test_4xdouble_zero_masked_unpack_high_mask1(<4 x double> %v
}
define <4 x double> @test_4xdouble_masked_unpack_high_mask2(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %vec3, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_masked_unpack_high_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqpd %ymm4, %ymm3, %k1
; CHECK-NEXT: vunpckhpd {{.*#+}} ymm2 {%k1} = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
@@ -2363,7 +2363,7 @@ define <4 x double> @test_4xdouble_masked_unpack_high_mask2(<4 x double> %vec1,
define <4 x double> @test_4xdouble_zero_masked_unpack_high_mask2(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_zero_masked_unpack_high_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vunpckhpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
@@ -2375,7 +2375,7 @@ define <4 x double> @test_4xdouble_zero_masked_unpack_high_mask2(<4 x double> %v
}
define <4 x double> @test_4xdouble_unpack_high_mask3(<4 x double> %vec1, <4 x double> %vec2) {
; CHECK-LABEL: test_4xdouble_unpack_high_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
; CHECK-NEXT: retq
%res = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
@@ -2383,7 +2383,7 @@ define <4 x double> @test_4xdouble_unpack_high_mask3(<4 x double> %vec1, <4 x do
}
define <4 x double> @test_4xdouble_masked_unpack_high_mask3(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %vec3, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_masked_unpack_high_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqpd %ymm4, %ymm3, %k1
; CHECK-NEXT: vunpckhpd {{.*#+}} ymm2 {%k1} = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
@@ -2397,7 +2397,7 @@ define <4 x double> @test_4xdouble_masked_unpack_high_mask3(<4 x double> %vec1,
define <4 x double> @test_4xdouble_zero_masked_unpack_high_mask3(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_zero_masked_unpack_high_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vunpckhpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
@@ -2409,7 +2409,7 @@ define <4 x double> @test_4xdouble_zero_masked_unpack_high_mask3(<4 x double> %v
}
define <4 x double> @test_4xdouble_unpack_high_mem_mask0(<4 x double> %vec1, <4 x double>* %vec2p) {
; CHECK-LABEL: test_4xdouble_unpack_high_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
; CHECK-NEXT: retq
%vec2 = load <4 x double>, <4 x double>* %vec2p
@@ -2418,7 +2418,7 @@ define <4 x double> @test_4xdouble_unpack_high_mem_mask0(<4 x double> %vec1, <4
}
define <4 x double> @test_4xdouble_masked_unpack_high_mem_mask0(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %vec3, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_masked_unpack_high_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vunpckhpd {{.*#+}} ymm1 {%k1} = ymm0[1],mem[1],ymm0[3],mem[3]
@@ -2433,7 +2433,7 @@ define <4 x double> @test_4xdouble_masked_unpack_high_mem_mask0(<4 x double> %ve
define <4 x double> @test_4xdouble_zero_masked_unpack_high_mem_mask0(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_zero_masked_unpack_high_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vunpckhpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1],mem[1],ymm0[3],mem[3]
@@ -2447,7 +2447,7 @@ define <4 x double> @test_4xdouble_zero_masked_unpack_high_mem_mask0(<4 x double
define <4 x double> @test_4xdouble_masked_unpack_high_mem_mask1(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %vec3, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_masked_unpack_high_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vunpckhpd {{.*#+}} ymm1 {%k1} = ymm0[1],mem[1],ymm0[3],mem[3]
@@ -2462,7 +2462,7 @@ define <4 x double> @test_4xdouble_masked_unpack_high_mem_mask1(<4 x double> %ve
define <4 x double> @test_4xdouble_zero_masked_unpack_high_mem_mask1(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_zero_masked_unpack_high_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vunpckhpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1],mem[1],ymm0[3],mem[3]
@@ -2476,7 +2476,7 @@ define <4 x double> @test_4xdouble_zero_masked_unpack_high_mem_mask1(<4 x double
define <4 x double> @test_4xdouble_masked_unpack_high_mem_mask2(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %vec3, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_masked_unpack_high_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vunpckhpd {{.*#+}} ymm1 {%k1} = ymm0[1],mem[1],ymm0[3],mem[3]
@@ -2491,7 +2491,7 @@ define <4 x double> @test_4xdouble_masked_unpack_high_mem_mask2(<4 x double> %ve
define <4 x double> @test_4xdouble_zero_masked_unpack_high_mem_mask2(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_zero_masked_unpack_high_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vunpckhpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1],mem[1],ymm0[3],mem[3]
@@ -2505,7 +2505,7 @@ define <4 x double> @test_4xdouble_zero_masked_unpack_high_mem_mask2(<4 x double
define <4 x double> @test_4xdouble_unpack_high_mem_mask3(<4 x double> %vec1, <4 x double>* %vec2p) {
; CHECK-LABEL: test_4xdouble_unpack_high_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
; CHECK-NEXT: retq
%vec2 = load <4 x double>, <4 x double>* %vec2p
@@ -2514,7 +2514,7 @@ define <4 x double> @test_4xdouble_unpack_high_mem_mask3(<4 x double> %vec1, <4
}
define <4 x double> @test_4xdouble_masked_unpack_high_mem_mask3(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %vec3, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_masked_unpack_high_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1
; CHECK-NEXT: vunpckhpd {{.*#+}} ymm1 {%k1} = ymm0[1],mem[1],ymm0[3],mem[3]
@@ -2529,7 +2529,7 @@ define <4 x double> @test_4xdouble_masked_unpack_high_mem_mask3(<4 x double> %ve
define <4 x double> @test_4xdouble_zero_masked_unpack_high_mem_mask3(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %mask) {
; CHECK-LABEL: test_4xdouble_zero_masked_unpack_high_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1
; CHECK-NEXT: vunpckhpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1],mem[1],ymm0[3],mem[3]
@@ -2543,7 +2543,7 @@ define <4 x double> @test_4xdouble_zero_masked_unpack_high_mem_mask3(<4 x double
define <8 x double> @test_8xdouble_unpack_high_mask0(<8 x double> %vec1, <8 x double> %vec2) {
; CHECK-LABEL: test_8xdouble_unpack_high_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; CHECK-NEXT: retq
%res = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
@@ -2551,7 +2551,7 @@ define <8 x double> @test_8xdouble_unpack_high_mask0(<8 x double> %vec1, <8 x do
}
define <8 x double> @test_8xdouble_masked_unpack_high_mask0(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %vec3, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_masked_unpack_high_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqpd %zmm4, %zmm3, %k1
; CHECK-NEXT: vunpckhpd {{.*#+}} zmm2 {%k1} = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
@@ -2565,7 +2565,7 @@ define <8 x double> @test_8xdouble_masked_unpack_high_mask0(<8 x double> %vec1,
define <8 x double> @test_8xdouble_zero_masked_unpack_high_mask0(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_zero_masked_unpack_high_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vunpckhpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
@@ -2577,7 +2577,7 @@ define <8 x double> @test_8xdouble_zero_masked_unpack_high_mask0(<8 x double> %v
}
define <8 x double> @test_8xdouble_masked_unpack_high_mask1(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %vec3, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_masked_unpack_high_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqpd %zmm4, %zmm3, %k1
; CHECK-NEXT: vunpckhpd {{.*#+}} zmm2 {%k1} = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
@@ -2591,7 +2591,7 @@ define <8 x double> @test_8xdouble_masked_unpack_high_mask1(<8 x double> %vec1,
define <8 x double> @test_8xdouble_zero_masked_unpack_high_mask1(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_zero_masked_unpack_high_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vunpckhpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
@@ -2603,7 +2603,7 @@ define <8 x double> @test_8xdouble_zero_masked_unpack_high_mask1(<8 x double> %v
}
define <8 x double> @test_8xdouble_masked_unpack_high_mask2(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %vec3, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_masked_unpack_high_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqpd %zmm4, %zmm3, %k1
; CHECK-NEXT: vunpckhpd {{.*#+}} zmm2 {%k1} = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
@@ -2617,7 +2617,7 @@ define <8 x double> @test_8xdouble_masked_unpack_high_mask2(<8 x double> %vec1,
define <8 x double> @test_8xdouble_zero_masked_unpack_high_mask2(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_zero_masked_unpack_high_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vunpckhpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
@@ -2629,7 +2629,7 @@ define <8 x double> @test_8xdouble_zero_masked_unpack_high_mask2(<8 x double> %v
}
define <8 x double> @test_8xdouble_unpack_high_mask3(<8 x double> %vec1, <8 x double> %vec2) {
; CHECK-LABEL: test_8xdouble_unpack_high_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; CHECK-NEXT: retq
%res = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
@@ -2637,7 +2637,7 @@ define <8 x double> @test_8xdouble_unpack_high_mask3(<8 x double> %vec1, <8 x do
}
define <8 x double> @test_8xdouble_masked_unpack_high_mask3(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %vec3, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_masked_unpack_high_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vcmpeqpd %zmm4, %zmm3, %k1
; CHECK-NEXT: vunpckhpd {{.*#+}} zmm2 {%k1} = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
@@ -2651,7 +2651,7 @@ define <8 x double> @test_8xdouble_masked_unpack_high_mask3(<8 x double> %vec1,
define <8 x double> @test_8xdouble_zero_masked_unpack_high_mask3(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_zero_masked_unpack_high_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vunpckhpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
@@ -2663,7 +2663,7 @@ define <8 x double> @test_8xdouble_zero_masked_unpack_high_mask3(<8 x double> %v
}
define <8 x double> @test_8xdouble_unpack_high_mem_mask0(<8 x double> %vec1, <8 x double>* %vec2p) {
; CHECK-LABEL: test_8xdouble_unpack_high_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],mem[1],zmm0[3],mem[3],zmm0[5],mem[5],zmm0[7],mem[7]
; CHECK-NEXT: retq
%vec2 = load <8 x double>, <8 x double>* %vec2p
@@ -2672,7 +2672,7 @@ define <8 x double> @test_8xdouble_unpack_high_mem_mask0(<8 x double> %vec1, <8
}
define <8 x double> @test_8xdouble_masked_unpack_high_mem_mask0(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %vec3, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_masked_unpack_high_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vunpckhpd {{.*#+}} zmm1 {%k1} = zmm0[1],mem[1],zmm0[3],mem[3],zmm0[5],mem[5],zmm0[7],mem[7]
@@ -2687,7 +2687,7 @@ define <8 x double> @test_8xdouble_masked_unpack_high_mem_mask0(<8 x double> %ve
define <8 x double> @test_8xdouble_zero_masked_unpack_high_mem_mask0(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_zero_masked_unpack_high_mem_mask0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vunpckhpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],mem[1],zmm0[3],mem[3],zmm0[5],mem[5],zmm0[7],mem[7]
@@ -2701,7 +2701,7 @@ define <8 x double> @test_8xdouble_zero_masked_unpack_high_mem_mask0(<8 x double
define <8 x double> @test_8xdouble_masked_unpack_high_mem_mask1(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %vec3, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_masked_unpack_high_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vunpckhpd {{.*#+}} zmm1 {%k1} = zmm0[1],mem[1],zmm0[3],mem[3],zmm0[5],mem[5],zmm0[7],mem[7]
@@ -2716,7 +2716,7 @@ define <8 x double> @test_8xdouble_masked_unpack_high_mem_mask1(<8 x double> %ve
define <8 x double> @test_8xdouble_zero_masked_unpack_high_mem_mask1(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_zero_masked_unpack_high_mem_mask1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vunpckhpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],mem[1],zmm0[3],mem[3],zmm0[5],mem[5],zmm0[7],mem[7]
@@ -2730,7 +2730,7 @@ define <8 x double> @test_8xdouble_zero_masked_unpack_high_mem_mask1(<8 x double
define <8 x double> @test_8xdouble_masked_unpack_high_mem_mask2(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %vec3, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_masked_unpack_high_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vunpckhpd {{.*#+}} zmm1 {%k1} = zmm0[1],mem[1],zmm0[3],mem[3],zmm0[5],mem[5],zmm0[7],mem[7]
@@ -2745,7 +2745,7 @@ define <8 x double> @test_8xdouble_masked_unpack_high_mem_mask2(<8 x double> %ve
define <8 x double> @test_8xdouble_zero_masked_unpack_high_mem_mask2(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_zero_masked_unpack_high_mem_mask2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vunpckhpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],mem[1],zmm0[3],mem[3],zmm0[5],mem[5],zmm0[7],mem[7]
@@ -2759,7 +2759,7 @@ define <8 x double> @test_8xdouble_zero_masked_unpack_high_mem_mask2(<8 x double
define <8 x double> @test_8xdouble_unpack_high_mem_mask3(<8 x double> %vec1, <8 x double>* %vec2p) {
; CHECK-LABEL: test_8xdouble_unpack_high_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],mem[1],zmm0[3],mem[3],zmm0[5],mem[5],zmm0[7],mem[7]
; CHECK-NEXT: retq
%vec2 = load <8 x double>, <8 x double>* %vec2p
@@ -2768,7 +2768,7 @@ define <8 x double> @test_8xdouble_unpack_high_mem_mask3(<8 x double> %vec1, <8
}
define <8 x double> @test_8xdouble_masked_unpack_high_mem_mask3(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %vec3, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_masked_unpack_high_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
; CHECK-NEXT: vunpckhpd {{.*#+}} zmm1 {%k1} = zmm0[1],mem[1],zmm0[3],mem[3],zmm0[5],mem[5],zmm0[7],mem[7]
@@ -2783,7 +2783,7 @@ define <8 x double> @test_8xdouble_masked_unpack_high_mem_mask3(<8 x double> %ve
define <8 x double> @test_8xdouble_zero_masked_unpack_high_mem_mask3(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %mask) {
; CHECK-LABEL: test_8xdouble_zero_masked_unpack_high_mem_mask3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %zmm2, %zmm1, %k1
; CHECK-NEXT: vunpckhpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],mem[1],zmm0[3],mem[3],zmm0[5],mem[5],zmm0[7],mem[7]
diff --git a/test/CodeGen/X86/avx512-skx-insert-subvec.ll b/test/CodeGen/X86/avx512-skx-insert-subvec.ll
index ff25c005e9c..91c3b73a959 100644
--- a/test/CodeGen/X86/avx512-skx-insert-subvec.ll
+++ b/test/CodeGen/X86/avx512-skx-insert-subvec.ll
@@ -3,7 +3,7 @@
define <8 x i1> @test(<2 x i1> %a) {
; CHECK-LABEL: test:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsllq $63, %xmm0, %xmm0
; CHECK-NEXT: vptestmq %xmm0, %xmm0, %k0
; CHECK-NEXT: kshiftlb $2, %k0, %k0
@@ -15,7 +15,7 @@ define <8 x i1> @test(<2 x i1> %a) {
define <8 x i1> @test1(<2 x i1> %a) {
; CHECK-LABEL: test1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsllq $63, %xmm0, %xmm0
; CHECK-NEXT: vptestmq %xmm0, %xmm0, %k0
; CHECK-NEXT: kshiftlb $4, %k0, %k0
@@ -27,7 +27,7 @@ define <8 x i1> @test1(<2 x i1> %a) {
define <8 x i1> @test2(<2 x i1> %a) {
; CHECK-LABEL: test2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsllq $63, %xmm0, %xmm0
; CHECK-NEXT: vptestmq %xmm0, %xmm0, %k0
; CHECK-NEXT: vpmovm2q %k0, %zmm0
@@ -43,7 +43,7 @@ define <8 x i1> @test2(<2 x i1> %a) {
define <8 x i1> @test3(<4 x i1> %a) {
; CHECK-LABEL: test3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpslld $31, %xmm0, %xmm0
; CHECK-NEXT: vptestmd %xmm0, %xmm0, %k0
; CHECK-NEXT: vpmovm2w %k0, %xmm0
@@ -55,7 +55,7 @@ define <8 x i1> @test3(<4 x i1> %a) {
define <8 x i1> @test4(<4 x i1> %a, <4 x i1>%b) {
; CHECK-LABEL: test4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpslld $31, %xmm0, %xmm0
; CHECK-NEXT: vptestmd %xmm0, %xmm0, %k0
; CHECK-NEXT: vpslld $31, %xmm1, %xmm0
@@ -73,7 +73,7 @@ define <8 x i1> @test4(<4 x i1> %a, <4 x i1>%b) {
define <4 x i1> @test5(<2 x i1> %a, <2 x i1>%b) {
; CHECK-LABEL: test5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsllq $63, %xmm0, %xmm0
; CHECK-NEXT: vptestmq %xmm0, %xmm0, %k0
; CHECK-NEXT: vpsllq $63, %xmm1, %xmm0
@@ -91,7 +91,7 @@ define <4 x i1> @test5(<2 x i1> %a, <2 x i1>%b) {
define <16 x i1> @test6(<2 x i1> %a, <2 x i1>%b) {
; CHECK-LABEL: test6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsllq $63, %xmm0, %xmm0
; CHECK-NEXT: vptestmq %xmm0, %xmm0, %k0
; CHECK-NEXT: vpsllq $63, %xmm1, %xmm0
@@ -110,7 +110,7 @@ define <16 x i1> @test6(<2 x i1> %a, <2 x i1>%b) {
define <32 x i1> @test7(<4 x i1> %a, <4 x i1>%b) {
; CHECK-LABEL: test7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpslld $31, %xmm0, %xmm0
; CHECK-NEXT: vptestmd %xmm0, %xmm0, %k0
; CHECK-NEXT: vpslld $31, %xmm1, %xmm0
@@ -130,7 +130,7 @@ define <32 x i1> @test7(<4 x i1> %a, <4 x i1>%b) {
define <64 x i1> @test8(<8 x i1> %a, <8 x i1>%b) {
; CHECK-LABEL: test8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsllw $15, %xmm1, %xmm1
; CHECK-NEXT: vpmovw2m %xmm1, %k0
; CHECK-NEXT: vpsllw $15, %xmm0, %xmm0
@@ -145,7 +145,7 @@ define <64 x i1> @test8(<8 x i1> %a, <8 x i1>%b) {
define <4 x i1> @test9(<8 x i1> %a, <8 x i1> %b) {
; CHECK-LABEL: test9:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsllw $15, %xmm0, %xmm0
; CHECK-NEXT: vpmovw2m %xmm0, %k0
; CHECK-NEXT: kshiftrw $4, %k0, %k0
@@ -157,7 +157,7 @@ define <4 x i1> @test9(<8 x i1> %a, <8 x i1> %b) {
define <2 x i1> @test10(<4 x i1> %a, <4 x i1> %b) {
; CHECK-LABEL: test10:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpslld $31, %xmm0, %xmm0
; CHECK-NEXT: vptestmd %xmm0, %xmm0, %k0
; CHECK-NEXT: kshiftrw $2, %k0, %k0
@@ -169,7 +169,7 @@ define <2 x i1> @test10(<4 x i1> %a, <4 x i1> %b) {
define <8 x i1> @test11(<4 x i1> %a, <4 x i1>%b) {
; CHECK-LABEL: test11:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpslld $31, %xmm0, %xmm0
; CHECK-NEXT: vptestmd %xmm0, %xmm0, %k0
; CHECK-NEXT: kshiftlb $4, %k0, %k0
diff --git a/test/CodeGen/X86/avx512-trunc.ll b/test/CodeGen/X86/avx512-trunc.ll
index cf9a12deaad..54970083844 100644
--- a/test/CodeGen/X86/avx512-trunc.ll
+++ b/test/CodeGen/X86/avx512-trunc.ll
@@ -6,7 +6,7 @@
define <16 x i8> @trunc_16x32_to_16x8(<16 x i32> %i) #0 {
; ALL-LABEL: trunc_16x32_to_16x8:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vpmovdb %zmm0, %xmm0
; ALL-NEXT: vzeroupper
; ALL-NEXT: retq
@@ -16,7 +16,7 @@ define <16 x i8> @trunc_16x32_to_16x8(<16 x i32> %i) #0 {
define <8 x i16> @trunc_8x64_to_8x16(<8 x i64> %i) #0 {
; ALL-LABEL: trunc_8x64_to_8x16:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vpmovqw %zmm0, %xmm0
; ALL-NEXT: vzeroupper
; ALL-NEXT: retq
@@ -26,7 +26,7 @@ define <8 x i16> @trunc_8x64_to_8x16(<8 x i64> %i) #0 {
define <16 x i16> @trunc_v16i32_to_v16i16(<16 x i32> %x) #0 {
; ALL-LABEL: trunc_v16i32_to_v16i16:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vpmovdw %zmm0, %ymm0
; ALL-NEXT: retq
%1 = trunc <16 x i32> %x to <16 x i16>
@@ -35,7 +35,7 @@ define <16 x i16> @trunc_v16i32_to_v16i16(<16 x i32> %x) #0 {
define <8 x i8> @trunc_qb_512(<8 x i64> %i) #0 {
; ALL-LABEL: trunc_qb_512:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vpmovqw %zmm0, %xmm0
; ALL-NEXT: vzeroupper
; ALL-NEXT: retq
@@ -45,7 +45,7 @@ define <8 x i8> @trunc_qb_512(<8 x i64> %i) #0 {
define void @trunc_qb_512_mem(<8 x i64> %i, <8 x i8>* %res) #0 {
; ALL-LABEL: trunc_qb_512_mem:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vpmovqb %zmm0, (%rdi)
; ALL-NEXT: vzeroupper
; ALL-NEXT: retq
@@ -56,7 +56,7 @@ define void @trunc_qb_512_mem(<8 x i64> %i, <8 x i8>* %res) #0 {
define <4 x i8> @trunc_qb_256(<4 x i64> %i) #0 {
; KNL-LABEL: trunc_qb_256:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL-NEXT: vpmovqd %zmm0, %ymm0
; KNL-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -64,7 +64,7 @@ define <4 x i8> @trunc_qb_256(<4 x i64> %i) #0 {
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_qb_256:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpmovqd %ymm0, %xmm0
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -74,7 +74,7 @@ define <4 x i8> @trunc_qb_256(<4 x i64> %i) #0 {
define void @trunc_qb_256_mem(<4 x i64> %i, <4 x i8>* %res) #0 {
; KNL-LABEL: trunc_qb_256_mem:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL-NEXT: vpmovqd %zmm0, %ymm0
; KNL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
@@ -83,7 +83,7 @@ define void @trunc_qb_256_mem(<4 x i64> %i, <4 x i8>* %res) #0 {
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_qb_256_mem:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpmovqb %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -94,7 +94,7 @@ define void @trunc_qb_256_mem(<4 x i64> %i, <4 x i8>* %res) #0 {
define <2 x i8> @trunc_qb_128(<2 x i64> %i) #0 {
; ALL-LABEL: trunc_qb_128:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: retq
%x = trunc <2 x i64> %i to <2 x i8>
ret <2 x i8> %x
@@ -102,13 +102,13 @@ define <2 x i8> @trunc_qb_128(<2 x i64> %i) #0 {
define void @trunc_qb_128_mem(<2 x i64> %i, <2 x i8>* %res) #0 {
; KNL-LABEL: trunc_qb_128_mem:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; KNL-NEXT: vpextrw $0, %xmm0, (%rdi)
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_qb_128_mem:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpmovqb %xmm0, (%rdi)
; SKX-NEXT: retq
%x = trunc <2 x i64> %i to <2 x i8>
@@ -118,7 +118,7 @@ define void @trunc_qb_128_mem(<2 x i64> %i, <2 x i8>* %res) #0 {
define <8 x i16> @trunc_qw_512(<8 x i64> %i) #0 {
; ALL-LABEL: trunc_qw_512:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vpmovqw %zmm0, %xmm0
; ALL-NEXT: vzeroupper
; ALL-NEXT: retq
@@ -128,7 +128,7 @@ define <8 x i16> @trunc_qw_512(<8 x i64> %i) #0 {
define void @trunc_qw_512_mem(<8 x i64> %i, <8 x i16>* %res) #0 {
; ALL-LABEL: trunc_qw_512_mem:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vpmovqw %zmm0, (%rdi)
; ALL-NEXT: vzeroupper
; ALL-NEXT: retq
@@ -139,7 +139,7 @@ define void @trunc_qw_512_mem(<8 x i64> %i, <8 x i16>* %res) #0 {
define <4 x i16> @trunc_qw_256(<4 x i64> %i) #0 {
; KNL-LABEL: trunc_qw_256:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL-NEXT: vpmovqd %zmm0, %ymm0
; KNL-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -147,7 +147,7 @@ define <4 x i16> @trunc_qw_256(<4 x i64> %i) #0 {
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_qw_256:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpmovqd %ymm0, %xmm0
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -157,7 +157,7 @@ define <4 x i16> @trunc_qw_256(<4 x i64> %i) #0 {
define void @trunc_qw_256_mem(<4 x i64> %i, <4 x i16>* %res) #0 {
; KNL-LABEL: trunc_qw_256_mem:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL-NEXT: vpmovqd %zmm0, %ymm0
; KNL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
@@ -166,7 +166,7 @@ define void @trunc_qw_256_mem(<4 x i64> %i, <4 x i16>* %res) #0 {
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_qw_256_mem:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpmovqw %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -177,7 +177,7 @@ define void @trunc_qw_256_mem(<4 x i64> %i, <4 x i16>* %res) #0 {
define <2 x i16> @trunc_qw_128(<2 x i64> %i) #0 {
; ALL-LABEL: trunc_qw_128:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: retq
%x = trunc <2 x i64> %i to <2 x i16>
ret <2 x i16> %x
@@ -185,14 +185,14 @@ define <2 x i16> @trunc_qw_128(<2 x i64> %i) #0 {
define void @trunc_qw_128_mem(<2 x i64> %i, <2 x i16>* %res) #0 {
; KNL-LABEL: trunc_qw_128_mem:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; KNL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; KNL-NEXT: vmovd %xmm0, (%rdi)
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_qw_128_mem:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpmovqw %xmm0, (%rdi)
; SKX-NEXT: retq
%x = trunc <2 x i64> %i to <2 x i16>
@@ -202,7 +202,7 @@ define void @trunc_qw_128_mem(<2 x i64> %i, <2 x i16>* %res) #0 {
define <8 x i32> @trunc_qd_512(<8 x i64> %i) #0 {
; ALL-LABEL: trunc_qd_512:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vpmovqd %zmm0, %ymm0
; ALL-NEXT: retq
%x = trunc <8 x i64> %i to <8 x i32>
@@ -211,7 +211,7 @@ define <8 x i32> @trunc_qd_512(<8 x i64> %i) #0 {
define void @trunc_qd_512_mem(<8 x i64> %i, <8 x i32>* %res) #0 {
; ALL-LABEL: trunc_qd_512_mem:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vpmovqd %zmm0, (%rdi)
; ALL-NEXT: vzeroupper
; ALL-NEXT: retq
@@ -222,7 +222,7 @@ define void @trunc_qd_512_mem(<8 x i64> %i, <8 x i32>* %res) #0 {
define <4 x i32> @trunc_qd_256(<4 x i64> %i) #0 {
; KNL-LABEL: trunc_qd_256:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL-NEXT: vpmovqd %zmm0, %ymm0
; KNL-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -230,7 +230,7 @@ define <4 x i32> @trunc_qd_256(<4 x i64> %i) #0 {
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_qd_256:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpmovqd %ymm0, %xmm0
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -240,7 +240,7 @@ define <4 x i32> @trunc_qd_256(<4 x i64> %i) #0 {
define void @trunc_qd_256_mem(<4 x i64> %i, <4 x i32>* %res) #0 {
; KNL-LABEL: trunc_qd_256_mem:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL-NEXT: vpmovqd %zmm0, %ymm0
; KNL-NEXT: vmovdqa %xmm0, (%rdi)
@@ -248,7 +248,7 @@ define void @trunc_qd_256_mem(<4 x i64> %i, <4 x i32>* %res) #0 {
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_qd_256_mem:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpmovqd %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -259,7 +259,7 @@ define void @trunc_qd_256_mem(<4 x i64> %i, <4 x i32>* %res) #0 {
define <2 x i32> @trunc_qd_128(<2 x i64> %i) #0 {
; ALL-LABEL: trunc_qd_128:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: retq
%x = trunc <2 x i64> %i to <2 x i32>
ret <2 x i32> %x
@@ -267,13 +267,13 @@ define <2 x i32> @trunc_qd_128(<2 x i64> %i) #0 {
define void @trunc_qd_128_mem(<2 x i64> %i, <2 x i32>* %res) #0 {
; KNL-LABEL: trunc_qd_128_mem:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,3]
; KNL-NEXT: vmovlps %xmm0, (%rdi)
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_qd_128_mem:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpmovqd %xmm0, (%rdi)
; SKX-NEXT: retq
%x = trunc <2 x i64> %i to <2 x i32>
@@ -283,7 +283,7 @@ define void @trunc_qd_128_mem(<2 x i64> %i, <2 x i32>* %res) #0 {
define <16 x i8> @trunc_db_512(<16 x i32> %i) #0 {
; ALL-LABEL: trunc_db_512:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vpmovdb %zmm0, %xmm0
; ALL-NEXT: vzeroupper
; ALL-NEXT: retq
@@ -293,7 +293,7 @@ define <16 x i8> @trunc_db_512(<16 x i32> %i) #0 {
define void @trunc_db_512_mem(<16 x i32> %i, <16 x i8>* %res) #0 {
; ALL-LABEL: trunc_db_512_mem:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vpmovdb %zmm0, (%rdi)
; ALL-NEXT: vzeroupper
; ALL-NEXT: retq
@@ -304,7 +304,7 @@ define void @trunc_db_512_mem(<16 x i32> %i, <16 x i8>* %res) #0 {
define <8 x i8> @trunc_db_256(<8 x i32> %i) #0 {
; KNL-LABEL: trunc_db_256:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL-NEXT: vpmovdw %zmm0, %ymm0
; KNL-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -312,7 +312,7 @@ define <8 x i8> @trunc_db_256(<8 x i32> %i) #0 {
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_db_256:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpmovdw %ymm0, %xmm0
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -322,7 +322,7 @@ define <8 x i8> @trunc_db_256(<8 x i32> %i) #0 {
define void @trunc_db_256_mem(<8 x i32> %i, <8 x i8>* %res) #0 {
; KNL-LABEL: trunc_db_256_mem:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL-NEXT: vpmovdw %zmm0, %ymm0
; KNL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
@@ -331,7 +331,7 @@ define void @trunc_db_256_mem(<8 x i32> %i, <8 x i8>* %res) #0 {
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_db_256_mem:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpmovdb %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -342,7 +342,7 @@ define void @trunc_db_256_mem(<8 x i32> %i, <8 x i8>* %res) #0 {
define <4 x i8> @trunc_db_128(<4 x i32> %i) #0 {
; ALL-LABEL: trunc_db_128:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: retq
%x = trunc <4 x i32> %i to <4 x i8>
ret <4 x i8> %x
@@ -350,13 +350,13 @@ define <4 x i8> @trunc_db_128(<4 x i32> %i) #0 {
define void @trunc_db_128_mem(<4 x i32> %i, <4 x i8>* %res) #0 {
; KNL-LABEL: trunc_db_128_mem:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
; KNL-NEXT: vmovd %xmm0, (%rdi)
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_db_128_mem:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpmovdb %xmm0, (%rdi)
; SKX-NEXT: retq
%x = trunc <4 x i32> %i to <4 x i8>
@@ -366,7 +366,7 @@ define void @trunc_db_128_mem(<4 x i32> %i, <4 x i8>* %res) #0 {
define <16 x i16> @trunc_dw_512(<16 x i32> %i) #0 {
; ALL-LABEL: trunc_dw_512:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vpmovdw %zmm0, %ymm0
; ALL-NEXT: retq
%x = trunc <16 x i32> %i to <16 x i16>
@@ -375,7 +375,7 @@ define <16 x i16> @trunc_dw_512(<16 x i32> %i) #0 {
define void @trunc_dw_512_mem(<16 x i32> %i, <16 x i16>* %res) #0 {
; ALL-LABEL: trunc_dw_512_mem:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vpmovdw %zmm0, (%rdi)
; ALL-NEXT: vzeroupper
; ALL-NEXT: retq
@@ -386,7 +386,7 @@ define void @trunc_dw_512_mem(<16 x i32> %i, <16 x i16>* %res) #0 {
define <8 x i16> @trunc_dw_256(<8 x i32> %i) #0 {
; KNL-LABEL: trunc_dw_256:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL-NEXT: vpmovdw %zmm0, %ymm0
; KNL-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -394,7 +394,7 @@ define <8 x i16> @trunc_dw_256(<8 x i32> %i) #0 {
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_dw_256:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpmovdw %ymm0, %xmm0
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -404,7 +404,7 @@ define <8 x i16> @trunc_dw_256(<8 x i32> %i) #0 {
define void @trunc_dw_256_mem(<8 x i32> %i, <8 x i16>* %res) #0 {
; KNL-LABEL: trunc_dw_256_mem:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL-NEXT: vpmovdw %zmm0, %ymm0
; KNL-NEXT: vmovdqa %xmm0, (%rdi)
@@ -412,7 +412,7 @@ define void @trunc_dw_256_mem(<8 x i32> %i, <8 x i16>* %res) #0 {
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_dw_256_mem:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpmovdw %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -423,13 +423,13 @@ define void @trunc_dw_256_mem(<8 x i32> %i, <8 x i16>* %res) #0 {
define void @trunc_dw_128_mem(<4 x i32> %i, <4 x i16>* %res) #0 {
; KNL-LABEL: trunc_dw_128_mem:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; KNL-NEXT: vmovq %xmm0, (%rdi)
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_dw_128_mem:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpmovdw %xmm0, (%rdi)
; SKX-NEXT: retq
%x = trunc <4 x i32> %i to <4 x i16>
@@ -439,7 +439,7 @@ define void @trunc_dw_128_mem(<4 x i32> %i, <4 x i16>* %res) #0 {
define <32 x i8> @trunc_wb_512(<32 x i16> %i) #0 {
; KNL-LABEL: trunc_wb_512:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpmovsxwd %ymm0, %zmm0
; KNL-NEXT: vpmovdb %zmm0, %xmm0
; KNL-NEXT: vpmovsxwd %ymm1, %zmm1
@@ -448,7 +448,7 @@ define <32 x i8> @trunc_wb_512(<32 x i16> %i) #0 {
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_wb_512:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpmovwb %zmm0, %ymm0
; SKX-NEXT: retq
%x = trunc <32 x i16> %i to <32 x i8>
@@ -457,7 +457,7 @@ define <32 x i8> @trunc_wb_512(<32 x i16> %i) #0 {
define void @trunc_wb_512_mem(<32 x i16> %i, <32 x i8>* %res) #0 {
; KNL-LABEL: trunc_wb_512_mem:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpmovsxwd %ymm0, %zmm0
; KNL-NEXT: vpmovdb %zmm0, %xmm0
; KNL-NEXT: vpmovsxwd %ymm1, %zmm1
@@ -468,7 +468,7 @@ define void @trunc_wb_512_mem(<32 x i16> %i, <32 x i8>* %res) #0 {
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_wb_512_mem:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpmovwb %zmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -479,14 +479,14 @@ define void @trunc_wb_512_mem(<32 x i16> %i, <32 x i8>* %res) #0 {
define <16 x i8> @trunc_wb_256(<16 x i16> %i) #0 {
; KNL-LABEL: trunc_wb_256:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpmovsxwd %ymm0, %zmm0
; KNL-NEXT: vpmovdb %zmm0, %xmm0
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_wb_256:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpmovwb %ymm0, %xmm0
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -496,7 +496,7 @@ define <16 x i8> @trunc_wb_256(<16 x i16> %i) #0 {
define void @trunc_wb_256_mem(<16 x i16> %i, <16 x i8>* %res) #0 {
; KNL-LABEL: trunc_wb_256_mem:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpmovsxwd %ymm0, %zmm0
; KNL-NEXT: vpmovdb %zmm0, %xmm0
; KNL-NEXT: vmovdqa %xmm0, (%rdi)
@@ -504,7 +504,7 @@ define void @trunc_wb_256_mem(<16 x i16> %i, <16 x i8>* %res) #0 {
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_wb_256_mem:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpmovwb %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -515,7 +515,7 @@ define void @trunc_wb_256_mem(<16 x i16> %i, <16 x i8>* %res) #0 {
define <8 x i8> @trunc_wb_128(<8 x i16> %i) #0 {
; ALL-LABEL: trunc_wb_128:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: retq
%x = trunc <8 x i16> %i to <8 x i8>
ret <8 x i8> %x
@@ -523,13 +523,13 @@ define <8 x i8> @trunc_wb_128(<8 x i16> %i) #0 {
define void @trunc_wb_128_mem(<8 x i16> %i, <8 x i8>* %res) #0 {
; KNL-LABEL: trunc_wb_128_mem:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; KNL-NEXT: vmovq %xmm0, (%rdi)
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_wb_128_mem:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpmovwb %xmm0, (%rdi)
; SKX-NEXT: retq
%x = trunc <8 x i16> %i to <8 x i8>
@@ -540,7 +540,7 @@ define void @trunc_wb_128_mem(<8 x i16> %i, <8 x i8>* %res) #0 {
define void @usat_trunc_wb_256_mem(<16 x i16> %i, <16 x i8>* %res) {
; KNL-LABEL: usat_trunc_wb_256_mem:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpminuw {{.*}}(%rip), %ymm0, %ymm0
; KNL-NEXT: vpmovsxwd %ymm0, %zmm0
; KNL-NEXT: vpmovdb %zmm0, %xmm0
@@ -549,7 +549,7 @@ define void @usat_trunc_wb_256_mem(<16 x i16> %i, <16 x i8>* %res) {
; KNL-NEXT: retq
;
; SKX-LABEL: usat_trunc_wb_256_mem:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpmovuswb %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -562,7 +562,7 @@ define void @usat_trunc_wb_256_mem(<16 x i16> %i, <16 x i8>* %res) {
define <16 x i8> @usat_trunc_wb_256(<16 x i16> %i) {
; KNL-LABEL: usat_trunc_wb_256:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpminuw {{.*}}(%rip), %ymm0, %ymm0
; KNL-NEXT: vpmovsxwd %ymm0, %zmm0
; KNL-NEXT: vpmovdb %zmm0, %xmm0
@@ -570,7 +570,7 @@ define <16 x i8> @usat_trunc_wb_256(<16 x i16> %i) {
; KNL-NEXT: retq
;
; SKX-LABEL: usat_trunc_wb_256:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpmovuswb %ymm0, %xmm0
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -582,14 +582,14 @@ define <16 x i8> @usat_trunc_wb_256(<16 x i16> %i) {
define void @usat_trunc_wb_128_mem(<8 x i16> %i, <8 x i8>* %res) {
; KNL-LABEL: usat_trunc_wb_128_mem:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpminuw {{.*}}(%rip), %xmm0, %xmm0
; KNL-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
; KNL-NEXT: vmovq %xmm0, (%rdi)
; KNL-NEXT: retq
;
; SKX-LABEL: usat_trunc_wb_128_mem:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpmovuswb %xmm0, (%rdi)
; SKX-NEXT: retq
%x3 = icmp ult <8 x i16> %i, <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>
@@ -601,7 +601,7 @@ define void @usat_trunc_wb_128_mem(<8 x i16> %i, <8 x i8>* %res) {
define void @usat_trunc_db_512_mem(<16 x i32> %i, <16 x i8>* %res) {
; ALL-LABEL: usat_trunc_db_512_mem:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vpmovusdb %zmm0, (%rdi)
; ALL-NEXT: vzeroupper
; ALL-NEXT: retq
@@ -614,7 +614,7 @@ define void @usat_trunc_db_512_mem(<16 x i32> %i, <16 x i8>* %res) {
define void @usat_trunc_qb_512_mem(<8 x i64> %i, <8 x i8>* %res) {
; ALL-LABEL: usat_trunc_qb_512_mem:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vpmovusqb %zmm0, (%rdi)
; ALL-NEXT: vzeroupper
; ALL-NEXT: retq
@@ -627,7 +627,7 @@ define void @usat_trunc_qb_512_mem(<8 x i64> %i, <8 x i8>* %res) {
define void @usat_trunc_qd_512_mem(<8 x i64> %i, <8 x i32>* %res) {
; ALL-LABEL: usat_trunc_qd_512_mem:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vpmovusqd %zmm0, (%rdi)
; ALL-NEXT: vzeroupper
; ALL-NEXT: retq
@@ -640,7 +640,7 @@ define void @usat_trunc_qd_512_mem(<8 x i64> %i, <8 x i32>* %res) {
define void @usat_trunc_qw_512_mem(<8 x i64> %i, <8 x i16>* %res) {
; ALL-LABEL: usat_trunc_qw_512_mem:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vpmovusqw %zmm0, (%rdi)
; ALL-NEXT: vzeroupper
; ALL-NEXT: retq
@@ -653,14 +653,14 @@ define void @usat_trunc_qw_512_mem(<8 x i64> %i, <8 x i16>* %res) {
define <32 x i8> @usat_trunc_db_1024(<32 x i32> %i) {
; KNL-LABEL: usat_trunc_db_1024:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpmovusdb %zmm0, %xmm0
; KNL-NEXT: vpmovusdb %zmm1, %xmm1
; KNL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; KNL-NEXT: retq
;
; SKX-LABEL: usat_trunc_db_1024:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpbroadcastd {{.*#+}} zmm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; SKX-NEXT: vpminud %zmm2, %zmm1, %zmm1
; SKX-NEXT: vpminud %zmm2, %zmm0, %zmm0
@@ -677,7 +677,7 @@ define <32 x i8> @usat_trunc_db_1024(<32 x i32> %i) {
define void @usat_trunc_db_1024_mem(<32 x i32> %i, <32 x i8>* %p) {
; KNL-LABEL: usat_trunc_db_1024_mem:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpmovusdb %zmm0, %xmm0
; KNL-NEXT: vpmovusdb %zmm1, %xmm1
; KNL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
@@ -686,7 +686,7 @@ define void @usat_trunc_db_1024_mem(<32 x i32> %i, <32 x i8>* %p) {
; KNL-NEXT: retq
;
; SKX-LABEL: usat_trunc_db_1024_mem:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpbroadcastd {{.*#+}} zmm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; SKX-NEXT: vpminud %zmm2, %zmm1, %zmm1
; SKX-NEXT: vpminud %zmm2, %zmm0, %zmm0
@@ -705,7 +705,7 @@ define void @usat_trunc_db_1024_mem(<32 x i32> %i, <32 x i8>* %p) {
define <16 x i16> @usat_trunc_dw_512(<16 x i32> %i) {
; ALL-LABEL: usat_trunc_dw_512:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vpmovusdw %zmm0, %ymm0
; ALL-NEXT: retq
%x3 = icmp ult <16 x i32> %i, <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>
@@ -716,7 +716,7 @@ define <16 x i16> @usat_trunc_dw_512(<16 x i32> %i) {
define <8 x i8> @usat_trunc_wb_128(<8 x i16> %i) {
; ALL-LABEL: usat_trunc_wb_128:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vpminuw {{.*}}(%rip), %xmm0, %xmm0
; ALL-NEXT: retq
%x3 = icmp ult <8 x i16> %i, <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>
@@ -727,7 +727,7 @@ define <8 x i8> @usat_trunc_wb_128(<8 x i16> %i) {
define <16 x i16> @usat_trunc_qw_1024(<16 x i64> %i) {
; ALL-LABEL: usat_trunc_qw_1024:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vpbroadcastq {{.*#+}} zmm2 = [65535,65535,65535,65535,65535,65535,65535,65535]
; ALL-NEXT: vpminuq %zmm2, %zmm1, %zmm1
; ALL-NEXT: vpminuq %zmm2, %zmm0, %zmm0
@@ -744,7 +744,7 @@ define <16 x i16> @usat_trunc_qw_1024(<16 x i64> %i) {
define <16 x i8> @usat_trunc_db_256(<8 x i32> %x) {
; KNL-LABEL: usat_trunc_db_256:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpbroadcastd {{.*#+}} ymm1 = [255,255,255,255,255,255,255,255]
; KNL-NEXT: vpminud %ymm1, %ymm0, %ymm0
; KNL-NEXT: vpmovdw %zmm0, %ymm0
@@ -753,7 +753,7 @@ define <16 x i8> @usat_trunc_db_256(<8 x i32> %x) {
; KNL-NEXT: retq
;
; SKX-LABEL: usat_trunc_db_256:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpminud {{.*}}(%rip){1to8}, %ymm0, %ymm0
; SKX-NEXT: vpmovdw %ymm0, %xmm0
; SKX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
diff --git a/test/CodeGen/X86/avx512-unsafe-fp-math.ll b/test/CodeGen/X86/avx512-unsafe-fp-math.ll
index 36f2f0a5acf..00c9e4c957c 100644
--- a/test/CodeGen/X86/avx512-unsafe-fp-math.ll
+++ b/test/CodeGen/X86/avx512-unsafe-fp-math.ll
@@ -4,12 +4,12 @@
define <16 x float> @test_max_v16f32(<16 x float> * %a_ptr, <16 x float> %b) {
; CHECK_UNSAFE-LABEL: test_max_v16f32:
-; CHECK_UNSAFE: # BB#0:
+; CHECK_UNSAFE: # %bb.0:
; CHECK_UNSAFE-NEXT: vmaxps (%rdi), %zmm0, %zmm0
; CHECK_UNSAFE-NEXT: retq
;
; CHECK-LABEL: test_max_v16f32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %zmm1
; CHECK-NEXT: vmaxps %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
@@ -21,12 +21,12 @@ define <16 x float> @test_max_v16f32(<16 x float> * %a_ptr, <16 x float> %b) {
define <16 x float> @test_min_v16f32(<16 x float>* %a_ptr, <16 x float> %b) {
; CHECK_UNSAFE-LABEL: test_min_v16f32:
-; CHECK_UNSAFE: # BB#0:
+; CHECK_UNSAFE: # %bb.0:
; CHECK_UNSAFE-NEXT: vminps (%rdi), %zmm0, %zmm0
; CHECK_UNSAFE-NEXT: retq
;
; CHECK-LABEL: test_min_v16f32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %zmm1
; CHECK-NEXT: vminps %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
@@ -38,12 +38,12 @@ define <16 x float> @test_min_v16f32(<16 x float>* %a_ptr, <16 x float> %b) {
define <8 x double> @test_max_v8f64(<8 x double> * %a_ptr, <8 x double> %b) {
; CHECK_UNSAFE-LABEL: test_max_v8f64:
-; CHECK_UNSAFE: # BB#0:
+; CHECK_UNSAFE: # %bb.0:
; CHECK_UNSAFE-NEXT: vmaxpd (%rdi), %zmm0, %zmm0
; CHECK_UNSAFE-NEXT: retq
;
; CHECK-LABEL: test_max_v8f64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd (%rdi), %zmm1
; CHECK-NEXT: vmaxpd %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
@@ -55,12 +55,12 @@ define <8 x double> @test_max_v8f64(<8 x double> * %a_ptr, <8 x double> %b) {
define <8 x double> @test_min_v8f64(<8 x double>* %a_ptr, <8 x double> %b) {
; CHECK_UNSAFE-LABEL: test_min_v8f64:
-; CHECK_UNSAFE: # BB#0:
+; CHECK_UNSAFE: # %bb.0:
; CHECK_UNSAFE-NEXT: vminpd (%rdi), %zmm0, %zmm0
; CHECK_UNSAFE-NEXT: retq
;
; CHECK-LABEL: test_min_v8f64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd (%rdi), %zmm1
; CHECK-NEXT: vminpd %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
@@ -72,12 +72,12 @@ define <8 x double> @test_min_v8f64(<8 x double>* %a_ptr, <8 x double> %b) {
define float @test_min_f32(float %a, float* %ptr) {
; CHECK_UNSAFE-LABEL: test_min_f32:
-; CHECK_UNSAFE: # BB#0: # %entry
+; CHECK_UNSAFE: # %bb.0: # %entry
; CHECK_UNSAFE-NEXT: vminss (%rdi), %xmm0, %xmm0
; CHECK_UNSAFE-NEXT: retq
;
; CHECK-LABEL: test_min_f32:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT: vminss %xmm0, %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -90,12 +90,12 @@ entry:
define double @test_max_f64(double %a, double* %ptr) {
; CHECK_UNSAFE-LABEL: test_max_f64:
-; CHECK_UNSAFE: # BB#0: # %entry
+; CHECK_UNSAFE: # %bb.0: # %entry
; CHECK_UNSAFE-NEXT: vmaxsd (%rdi), %xmm0, %xmm0
; CHECK_UNSAFE-NEXT: retq
;
; CHECK-LABEL: test_max_f64:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: vmaxsd %xmm0, %xmm1, %xmm0
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/avx512-vbroadcast.ll b/test/CodeGen/X86/avx512-vbroadcast.ll
index 08956556683..27c6fb88e01 100644
--- a/test/CodeGen/X86/avx512-vbroadcast.ll
+++ b/test/CodeGen/X86/avx512-vbroadcast.ll
@@ -4,7 +4,7 @@
define <16 x i32> @_inreg16xi32(i32 %a) {
; ALL-LABEL: _inreg16xi32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpbroadcastd %edi, %zmm0
; ALL-NEXT: retq
%b = insertelement <16 x i32> undef, i32 %a, i32 0
@@ -14,7 +14,7 @@ define <16 x i32> @_inreg16xi32(i32 %a) {
define <8 x i64> @_inreg8xi64(i64 %a) {
; ALL-LABEL: _inreg8xi64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpbroadcastq %rdi, %zmm0
; ALL-NEXT: retq
%b = insertelement <8 x i64> undef, i64 %a, i32 0
@@ -24,7 +24,7 @@ define <8 x i64> @_inreg8xi64(i64 %a) {
define <16 x float> @_ss16xfloat_v4(<4 x float> %a) {
; ALL-LABEL: _ss16xfloat_v4:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vbroadcastss %xmm0, %zmm0
; ALL-NEXT: retq
%b = shufflevector <4 x float> %a, <4 x float> undef, <16 x i32> zeroinitializer
@@ -33,7 +33,7 @@ define <16 x float> @_ss16xfloat_v4(<4 x float> %a) {
define <16 x float> @_inreg16xfloat(float %a) {
; ALL-LABEL: _inreg16xfloat:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vbroadcastss %xmm0, %zmm0
; ALL-NEXT: retq
%b = insertelement <16 x float> undef, float %a, i32 0
@@ -43,7 +43,7 @@ define <16 x float> @_inreg16xfloat(float %a) {
define <16 x float> @_ss16xfloat_mask(float %a, <16 x float> %i, <16 x i32> %mask1) {
; ALL-LABEL: _ss16xfloat_mask:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpxor %xmm3, %xmm3, %xmm3
; ALL-NEXT: vpcmpneqd %zmm3, %zmm2, %k1
; ALL-NEXT: vbroadcastss %xmm0, %zmm1 {%k1}
@@ -58,7 +58,7 @@ define <16 x float> @_ss16xfloat_mask(float %a, <16 x float> %i, <16 x i32> %m
define <16 x float> @_ss16xfloat_maskz(float %a, <16 x i32> %mask1) {
; ALL-LABEL: _ss16xfloat_maskz:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpxor %xmm2, %xmm2, %xmm2
; ALL-NEXT: vpcmpneqd %zmm2, %zmm1, %k1
; ALL-NEXT: vbroadcastss %xmm0, %zmm0 {%k1} {z}
@@ -72,7 +72,7 @@ define <16 x float> @_ss16xfloat_maskz(float %a, <16 x i32> %mask1) {
define <16 x float> @_ss16xfloat_load(float* %a.ptr) {
; ALL-LABEL: _ss16xfloat_load:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vbroadcastss (%rdi), %zmm0
; ALL-NEXT: retq
%a = load float, float* %a.ptr
@@ -83,7 +83,7 @@ define <16 x float> @_ss16xfloat_load(float* %a.ptr) {
define <16 x float> @_ss16xfloat_mask_load(float* %a.ptr, <16 x float> %i, <16 x i32> %mask1) {
; ALL-LABEL: _ss16xfloat_mask_load:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpxor %xmm2, %xmm2, %xmm2
; ALL-NEXT: vpcmpneqd %zmm2, %zmm1, %k1
; ALL-NEXT: vbroadcastss (%rdi), %zmm0 {%k1}
@@ -98,7 +98,7 @@ define <16 x float> @_ss16xfloat_mask_load(float* %a.ptr, <16 x float> %i, <16
define <16 x float> @_ss16xfloat_maskz_load(float* %a.ptr, <16 x i32> %mask1) {
; ALL-LABEL: _ss16xfloat_maskz_load:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; ALL-NEXT: vpcmpneqd %zmm1, %zmm0, %k1
; ALL-NEXT: vbroadcastss (%rdi), %zmm0 {%k1} {z}
@@ -113,7 +113,7 @@ define <16 x float> @_ss16xfloat_maskz_load(float* %a.ptr, <16 x i32> %mask1)
define <8 x double> @_inreg8xdouble(double %a) {
; ALL-LABEL: _inreg8xdouble:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vbroadcastsd %xmm0, %zmm0
; ALL-NEXT: retq
%b = insertelement <8 x double> undef, double %a, i32 0
@@ -123,7 +123,7 @@ define <8 x double> @_inreg8xdouble(double %a) {
define <8 x double> @_sd8xdouble_mask(double %a, <8 x double> %i, <8 x i32> %mask1) {
; ALL-LABEL: _sd8xdouble_mask:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: # kill: %ymm2<def> %ymm2<kill> %zmm2<def>
; ALL-NEXT: vpxor %xmm3, %xmm3, %xmm3
; ALL-NEXT: vpcmpneqd %zmm3, %zmm2, %k1
@@ -139,7 +139,7 @@ define <8 x double> @_sd8xdouble_mask(double %a, <8 x double> %i, <8 x i32> %m
define <8 x double> @_sd8xdouble_maskz(double %a, <8 x i32> %mask1) {
; ALL-LABEL: _sd8xdouble_maskz:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; ALL-NEXT: vpxor %xmm2, %xmm2, %xmm2
; ALL-NEXT: vpcmpneqd %zmm2, %zmm1, %k1
@@ -154,7 +154,7 @@ define <8 x double> @_sd8xdouble_maskz(double %a, <8 x i32> %mask1) {
define <8 x double> @_sd8xdouble_load(double* %a.ptr) {
; ALL-LABEL: _sd8xdouble_load:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vbroadcastsd (%rdi), %zmm0
; ALL-NEXT: retq
%a = load double, double* %a.ptr
@@ -165,7 +165,7 @@ define <8 x double> @_sd8xdouble_load(double* %a.ptr) {
define <8 x double> @_sd8xdouble_mask_load(double* %a.ptr, <8 x double> %i, <8 x i32> %mask1) {
; ALL-LABEL: _sd8xdouble_mask_load:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; ALL-NEXT: vpxor %xmm2, %xmm2, %xmm2
; ALL-NEXT: vpcmpneqd %zmm2, %zmm1, %k1
@@ -181,7 +181,7 @@ define <8 x double> @_sd8xdouble_mask_load(double* %a.ptr, <8 x double> %i, <8
define <8 x double> @_sd8xdouble_maskz_load(double* %a.ptr, <8 x i32> %mask1) {
; ALL-LABEL: _sd8xdouble_maskz_load:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; ALL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; ALL-NEXT: vpcmpneqd %zmm1, %zmm0, %k1
@@ -197,7 +197,7 @@ define <8 x double> @_sd8xdouble_maskz_load(double* %a.ptr, <8 x i32> %mask1)
define <16 x i32> @_xmm16xi32(<16 x i32> %a) {
; ALL-LABEL: _xmm16xi32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vbroadcastss %xmm0, %zmm0
; ALL-NEXT: retq
%b = shufflevector <16 x i32> %a, <16 x i32> undef, <16 x i32> zeroinitializer
@@ -206,7 +206,7 @@ define <16 x i32> @_xmm16xi32(<16 x i32> %a) {
define <16 x float> @_xmm16xfloat(<16 x float> %a) {
; ALL-LABEL: _xmm16xfloat:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vbroadcastss %xmm0, %zmm0
; ALL-NEXT: retq
%b = shufflevector <16 x float> %a, <16 x float> undef, <16 x i32> zeroinitializer
@@ -215,7 +215,7 @@ define <16 x float> @_xmm16xfloat(<16 x float> %a) {
define <16 x i32> @test_vbroadcast() {
; ALL-LABEL: test_vbroadcast:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: vxorps %xmm0, %xmm0, %xmm0
; ALL-NEXT: vcmpunordps %zmm0, %zmm0, %k1
; ALL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -234,7 +234,7 @@ entry:
; IR generated will produce broadcasts at the end.
define <8 x double> @test_set1_pd(double %d) #2 {
; ALL-LABEL: test_set1_pd:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: vbroadcastsd %xmm0, %zmm0
; ALL-NEXT: retq
entry:
@@ -251,7 +251,7 @@ entry:
define <8 x i64> @test_set1_epi64(i64 %d) #2 {
; ALL-LABEL: test_set1_epi64:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: vpbroadcastq %rdi, %zmm0
; ALL-NEXT: retq
entry:
@@ -268,7 +268,7 @@ entry:
define <16 x float> @test_set1_ps(float %f) #2 {
; ALL-LABEL: test_set1_ps:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: vbroadcastss %xmm0, %zmm0
; ALL-NEXT: retq
entry:
@@ -293,7 +293,7 @@ entry:
define <16 x i32> @test_set1_epi32(i32 %f) #2 {
; ALL-LABEL: test_set1_epi32:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: vpbroadcastd %edi, %zmm0
; ALL-NEXT: retq
entry:
@@ -320,7 +320,7 @@ entry:
; Verify that the IR generated will produce the broadcast at the end.
define <8 x double> @test_mm512_broadcastsd_pd(<2 x double> %a) {
; ALL-LABEL: test_mm512_broadcastsd_pd:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: vbroadcastsd %xmm0, %zmm0
; ALL-NEXT: retq
entry:
@@ -338,7 +338,7 @@ entry:
define <16 x float> @test1(<8 x float>%a) {
; ALL-LABEL: test1:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vbroadcastss %xmm0, %zmm0
; ALL-NEXT: retq
%res = shufflevector <8 x float> %a, <8 x float> undef, <16 x i32> zeroinitializer
@@ -347,7 +347,7 @@ define <16 x float> @test1(<8 x float>%a) {
define <8 x double> @test2(<4 x double>%a) {
; ALL-LABEL: test2:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vbroadcastsd %xmm0, %zmm0
; ALL-NEXT: retq
%res = shufflevector <4 x double> %a, <4 x double> undef, <8 x i32> zeroinitializer
@@ -356,13 +356,13 @@ define <8 x double> @test2(<4 x double>%a) {
define <64 x i8> @_invec32xi8(<32 x i8>%a) {
; AVX512F-LABEL: _invec32xi8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpbroadcastb %xmm0, %ymm0
; AVX512F-NEXT: vmovdqa %ymm0, %ymm1
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: _invec32xi8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpbroadcastb %xmm0, %zmm0
; AVX512BW-NEXT: retq
%res = shufflevector <32 x i8> %a, <32 x i8> undef, <64 x i32> zeroinitializer
@@ -371,13 +371,13 @@ define <64 x i8> @_invec32xi8(<32 x i8>%a) {
define <32 x i16> @_invec16xi16(<16 x i16>%a) {
; AVX512F-LABEL: _invec16xi16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpbroadcastw %xmm0, %ymm0
; AVX512F-NEXT: vmovdqa %ymm0, %ymm1
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: _invec16xi16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpbroadcastw %xmm0, %zmm0
; AVX512BW-NEXT: retq
%res = shufflevector <16 x i16> %a, <16 x i16> undef, <32 x i32> zeroinitializer
@@ -386,7 +386,7 @@ define <32 x i16> @_invec16xi16(<16 x i16>%a) {
define <16 x i32> @_invec8xi32(<8 x i32>%a) {
; ALL-LABEL: _invec8xi32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vbroadcastss %xmm0, %zmm0
; ALL-NEXT: retq
%res = shufflevector <8 x i32> %a, <8 x i32> undef, <16 x i32> zeroinitializer
@@ -395,7 +395,7 @@ define <16 x i32> @_invec8xi32(<8 x i32>%a) {
define <8 x i64> @_invec4xi64(<4 x i64>%a) {
; ALL-LABEL: _invec4xi64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vbroadcastsd %xmm0, %zmm0
; ALL-NEXT: retq
%res = shufflevector <4 x i64> %a, <4 x i64> undef, <8 x i32> zeroinitializer
@@ -405,7 +405,7 @@ define <8 x i64> @_invec4xi64(<4 x i64>%a) {
declare void @func_f32(float)
define <16 x float> @broadcast_ss_spill(float %x) {
; ALL-LABEL: broadcast_ss_spill:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: subq $24, %rsp
; ALL-NEXT: .cfi_def_cfa_offset 32
; ALL-NEXT: vaddss %xmm0, %xmm0, %xmm0
@@ -424,7 +424,7 @@ define <16 x float> @broadcast_ss_spill(float %x) {
declare void @func_f64(double)
define <8 x double> @broadcast_sd_spill(double %x) {
; ALL-LABEL: broadcast_sd_spill:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: subq $24, %rsp
; ALL-NEXT: .cfi_def_cfa_offset 32
; ALL-NEXT: vaddsd %xmm0, %xmm0, %xmm0
diff --git a/test/CodeGen/X86/avx512-vbroadcasti128.ll b/test/CodeGen/X86/avx512-vbroadcasti128.ll
index a88e25f6210..c5ecb1559b4 100644
--- a/test/CodeGen/X86/avx512-vbroadcasti128.ll
+++ b/test/CodeGen/X86/avx512-vbroadcasti128.ll
@@ -9,7 +9,7 @@
define <4 x double> @test_broadcast_2f64_4f64(<2 x double> *%p) nounwind {
; X64-AVX512-LABEL: test_broadcast_2f64_4f64:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512-NEXT: vaddpd {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512-NEXT: retq
@@ -21,7 +21,7 @@ define <4 x double> @test_broadcast_2f64_4f64(<2 x double> *%p) nounwind {
define <4 x i64> @test_broadcast_2i64_4i64(<2 x i64> *%p) nounwind {
; X64-AVX512-LABEL: test_broadcast_2i64_4i64:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512-NEXT: vpaddq {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512-NEXT: retq
@@ -33,7 +33,7 @@ define <4 x i64> @test_broadcast_2i64_4i64(<2 x i64> *%p) nounwind {
define <8 x float> @test_broadcast_4f32_8f32(<4 x float> *%p) nounwind {
; X64-AVX512-LABEL: test_broadcast_4f32_8f32:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512-NEXT: vaddps {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512-NEXT: retq
@@ -45,7 +45,7 @@ define <8 x float> @test_broadcast_4f32_8f32(<4 x float> *%p) nounwind {
define <8 x i32> @test_broadcast_4i32_8i32(<4 x i32> *%p) nounwind {
; X64-AVX512-LABEL: test_broadcast_4i32_8i32:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512-NEXT: retq
@@ -57,7 +57,7 @@ define <8 x i32> @test_broadcast_4i32_8i32(<4 x i32> *%p) nounwind {
define <16 x i16> @test_broadcast_8i16_16i16(<8 x i16> *%p) nounwind {
; X64-AVX512-LABEL: test_broadcast_8i16_16i16:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512-NEXT: vpaddw {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512-NEXT: retq
@@ -69,7 +69,7 @@ define <16 x i16> @test_broadcast_8i16_16i16(<8 x i16> *%p) nounwind {
define <32 x i8> @test_broadcast_16i8_32i8(<16 x i8> *%p) nounwind {
; X64-AVX512-LABEL: test_broadcast_16i8_32i8:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512-NEXT: vpaddb {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512-NEXT: retq
@@ -85,7 +85,7 @@ define <32 x i8> @test_broadcast_16i8_32i8(<16 x i8> *%p) nounwind {
define <8 x double> @test_broadcast_2f64_8f64(<2 x double> *%p) nounwind {
; X64-AVX512-LABEL: test_broadcast_2f64_8f64:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vbroadcastf32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X64-AVX512-NEXT: vaddpd {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512-NEXT: retq
@@ -97,7 +97,7 @@ define <8 x double> @test_broadcast_2f64_8f64(<2 x double> *%p) nounwind {
define <8 x i64> @test_broadcast_2i64_8i64(<2 x i64> *%p) nounwind {
; X64-AVX512-LABEL: test_broadcast_2i64_8i64:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X64-AVX512-NEXT: vpaddq {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512-NEXT: retq
@@ -109,7 +109,7 @@ define <8 x i64> @test_broadcast_2i64_8i64(<2 x i64> *%p) nounwind {
define <16 x float> @test_broadcast_4f32_16f32(<4 x float> *%p) nounwind {
; X64-AVX512-LABEL: test_broadcast_4f32_16f32:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vbroadcastf32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X64-AVX512-NEXT: vaddps {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512-NEXT: retq
@@ -121,7 +121,7 @@ define <16 x float> @test_broadcast_4f32_16f32(<4 x float> *%p) nounwind {
define <16 x i32> @test_broadcast_4i32_16i32(<4 x i32> *%p) nounwind {
; X64-AVX512-LABEL: test_broadcast_4i32_16i32:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X64-AVX512-NEXT: vpaddd {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512-NEXT: retq
@@ -133,20 +133,20 @@ define <16 x i32> @test_broadcast_4i32_16i32(<4 x i32> *%p) nounwind {
define <32 x i16> @test_broadcast_8i16_32i16(<8 x i16> *%p) nounwind {
; X64-AVX512VL-LABEL: test_broadcast_8i16_32i16:
-; X64-AVX512VL: ## BB#0:
+; X64-AVX512VL: ## %bb.0:
; X64-AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1]
; X64-AVX512VL-NEXT: vpaddw {{.*}}(%rip), %ymm1, %ymm0
; X64-AVX512VL-NEXT: vpaddw {{.*}}(%rip), %ymm1, %ymm1
; X64-AVX512VL-NEXT: retq
;
; X64-AVX512BWVL-LABEL: test_broadcast_8i16_32i16:
-; X64-AVX512BWVL: ## BB#0:
+; X64-AVX512BWVL: ## %bb.0:
; X64-AVX512BWVL-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X64-AVX512BWVL-NEXT: vpaddw {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: retq
;
; X64-AVX512DQVL-LABEL: test_broadcast_8i16_32i16:
-; X64-AVX512DQVL: ## BB#0:
+; X64-AVX512DQVL: ## %bb.0:
; X64-AVX512DQVL-NEXT: vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1]
; X64-AVX512DQVL-NEXT: vpaddw {{.*}}(%rip), %ymm1, %ymm0
; X64-AVX512DQVL-NEXT: vpaddw {{.*}}(%rip), %ymm1, %ymm1
@@ -159,20 +159,20 @@ define <32 x i16> @test_broadcast_8i16_32i16(<8 x i16> *%p) nounwind {
define <64 x i8> @test_broadcast_16i8_64i8(<16 x i8> *%p) nounwind {
; X64-AVX512VL-LABEL: test_broadcast_16i8_64i8:
-; X64-AVX512VL: ## BB#0:
+; X64-AVX512VL: ## %bb.0:
; X64-AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1]
; X64-AVX512VL-NEXT: vpaddb {{.*}}(%rip), %ymm1, %ymm0
; X64-AVX512VL-NEXT: vpaddb {{.*}}(%rip), %ymm1, %ymm1
; X64-AVX512VL-NEXT: retq
;
; X64-AVX512BWVL-LABEL: test_broadcast_16i8_64i8:
-; X64-AVX512BWVL: ## BB#0:
+; X64-AVX512BWVL: ## %bb.0:
; X64-AVX512BWVL-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X64-AVX512BWVL-NEXT: vpaddb {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: retq
;
; X64-AVX512DQVL-LABEL: test_broadcast_16i8_64i8:
-; X64-AVX512DQVL: ## BB#0:
+; X64-AVX512DQVL: ## %bb.0:
; X64-AVX512DQVL-NEXT: vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1]
; X64-AVX512DQVL-NEXT: vpaddb {{.*}}(%rip), %ymm1, %ymm0
; X64-AVX512DQVL-NEXT: vpaddb {{.*}}(%rip), %ymm1, %ymm1
@@ -185,7 +185,7 @@ define <64 x i8> @test_broadcast_16i8_64i8(<16 x i8> *%p) nounwind {
define <8 x i32> @PR29088(<4 x i32>* %p0, <8 x float>* %p1) {
; X64-AVX512VL-LABEL: PR29088:
-; X64-AVX512VL: ## BB#0:
+; X64-AVX512VL: ## %bb.0:
; X64-AVX512VL-NEXT: vmovaps (%rdi), %xmm0
; X64-AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-AVX512VL-NEXT: vmovdqa %ymm1, (%rsi)
@@ -193,7 +193,7 @@ define <8 x i32> @PR29088(<4 x i32>* %p0, <8 x float>* %p1) {
; X64-AVX512VL-NEXT: retq
;
; X64-AVX512BWVL-LABEL: PR29088:
-; X64-AVX512BWVL: ## BB#0:
+; X64-AVX512BWVL: ## %bb.0:
; X64-AVX512BWVL-NEXT: vmovaps (%rdi), %xmm0
; X64-AVX512BWVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-AVX512BWVL-NEXT: vmovdqa %ymm1, (%rsi)
@@ -201,7 +201,7 @@ define <8 x i32> @PR29088(<4 x i32>* %p0, <8 x float>* %p1) {
; X64-AVX512BWVL-NEXT: retq
;
; X64-AVX512DQVL-LABEL: PR29088:
-; X64-AVX512DQVL: ## BB#0:
+; X64-AVX512DQVL: ## %bb.0:
; X64-AVX512DQVL-NEXT: vmovaps (%rdi), %xmm0
; X64-AVX512DQVL-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-AVX512DQVL-NEXT: vmovaps %ymm1, (%rsi)
diff --git a/test/CodeGen/X86/avx512-vbroadcasti256.ll b/test/CodeGen/X86/avx512-vbroadcasti256.ll
index 1896bc714c2..b7710f3237a 100644
--- a/test/CodeGen/X86/avx512-vbroadcasti256.ll
+++ b/test/CodeGen/X86/avx512-vbroadcasti256.ll
@@ -5,7 +5,7 @@
define <8 x double> @test_broadcast_4f64_8f64(<4 x double> *%p) nounwind {
; X64-AVX512-LABEL: test_broadcast_4f64_8f64:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vbroadcastf64x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512-NEXT: vaddpd {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512-NEXT: retq
@@ -17,7 +17,7 @@ define <8 x double> @test_broadcast_4f64_8f64(<4 x double> *%p) nounwind {
define <8 x i64> @test_broadcast_4i64_8i64(<4 x i64> *%p) nounwind {
; X64-AVX512-LABEL: test_broadcast_4i64_8i64:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512-NEXT: vpaddq {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512-NEXT: retq
@@ -29,7 +29,7 @@ define <8 x i64> @test_broadcast_4i64_8i64(<4 x i64> *%p) nounwind {
define <16 x float> @test_broadcast_8f32_16f32(<8 x float> *%p) nounwind {
; X64-AVX512-LABEL: test_broadcast_8f32_16f32:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vbroadcastf64x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512-NEXT: vaddps {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512-NEXT: retq
@@ -41,7 +41,7 @@ define <16 x float> @test_broadcast_8f32_16f32(<8 x float> *%p) nounwind {
define <16 x i32> @test_broadcast_8i32_16i32(<8 x i32> *%p) nounwind {
; X64-AVX512-LABEL: test_broadcast_8i32_16i32:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512-NEXT: vpaddd {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512-NEXT: retq
@@ -53,20 +53,20 @@ define <16 x i32> @test_broadcast_8i32_16i32(<8 x i32> *%p) nounwind {
define <32 x i16> @test_broadcast_16i16_32i16(<16 x i16> *%p) nounwind {
; X64-AVX512VL-LABEL: test_broadcast_16i16_32i16:
-; X64-AVX512VL: ## BB#0:
+; X64-AVX512VL: ## %bb.0:
; X64-AVX512VL-NEXT: vmovdqa (%rdi), %ymm1
; X64-AVX512VL-NEXT: vpaddw {{.*}}(%rip), %ymm1, %ymm0
; X64-AVX512VL-NEXT: vpaddw {{.*}}(%rip), %ymm1, %ymm1
; X64-AVX512VL-NEXT: retq
;
; X64-AVX512BWVL-LABEL: test_broadcast_16i16_32i16:
-; X64-AVX512BWVL: ## BB#0:
+; X64-AVX512BWVL: ## %bb.0:
; X64-AVX512BWVL-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512BWVL-NEXT: vpaddw {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: retq
;
; X64-AVX512DQVL-LABEL: test_broadcast_16i16_32i16:
-; X64-AVX512DQVL: ## BB#0:
+; X64-AVX512DQVL: ## %bb.0:
; X64-AVX512DQVL-NEXT: vmovdqa (%rdi), %ymm1
; X64-AVX512DQVL-NEXT: vpaddw {{.*}}(%rip), %ymm1, %ymm0
; X64-AVX512DQVL-NEXT: vpaddw {{.*}}(%rip), %ymm1, %ymm1
@@ -79,20 +79,20 @@ define <32 x i16> @test_broadcast_16i16_32i16(<16 x i16> *%p) nounwind {
define <64 x i8> @test_broadcast_32i8_64i8(<32 x i8> *%p) nounwind {
; X64-AVX512VL-LABEL: test_broadcast_32i8_64i8:
-; X64-AVX512VL: ## BB#0:
+; X64-AVX512VL: ## %bb.0:
; X64-AVX512VL-NEXT: vmovdqa (%rdi), %ymm1
; X64-AVX512VL-NEXT: vpaddb {{.*}}(%rip), %ymm1, %ymm0
; X64-AVX512VL-NEXT: vpaddb {{.*}}(%rip), %ymm1, %ymm1
; X64-AVX512VL-NEXT: retq
;
; X64-AVX512BWVL-LABEL: test_broadcast_32i8_64i8:
-; X64-AVX512BWVL: ## BB#0:
+; X64-AVX512BWVL: ## %bb.0:
; X64-AVX512BWVL-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512BWVL-NEXT: vpaddb {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: retq
;
; X64-AVX512DQVL-LABEL: test_broadcast_32i8_64i8:
-; X64-AVX512DQVL: ## BB#0:
+; X64-AVX512DQVL: ## %bb.0:
; X64-AVX512DQVL-NEXT: vmovdqa (%rdi), %ymm1
; X64-AVX512DQVL-NEXT: vpaddb {{.*}}(%rip), %ymm1, %ymm0
; X64-AVX512DQVL-NEXT: vpaddb {{.*}}(%rip), %ymm1, %ymm1
diff --git a/test/CodeGen/X86/avx512-vec-cmp.ll b/test/CodeGen/X86/avx512-vec-cmp.ll
index 1af9ffebeb3..887ec006956 100644
--- a/test/CodeGen/X86/avx512-vec-cmp.ll
+++ b/test/CodeGen/X86/avx512-vec-cmp.ll
@@ -4,7 +4,7 @@
define <16 x float> @test1(<16 x float> %x, <16 x float> %y) nounwind {
; CHECK-LABEL: test1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcmpleps %zmm1, %zmm0, %k1
; CHECK-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -15,7 +15,7 @@ define <16 x float> @test1(<16 x float> %x, <16 x float> %y) nounwind {
define <8 x double> @test2(<8 x double> %x, <8 x double> %y) nounwind {
; CHECK-LABEL: test2:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcmplepd %zmm1, %zmm0, %k1
; CHECK-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -26,7 +26,7 @@ define <8 x double> @test2(<8 x double> %x, <8 x double> %y) nounwind {
define <16 x i32> @test3(<16 x i32> %x, <16 x i32> %x1, <16 x i32>* %yp) nounwind {
; CHECK-LABEL: test3:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqd (%rdi), %zmm0, %k1
; CHECK-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -38,7 +38,7 @@ define <16 x i32> @test3(<16 x i32> %x, <16 x i32> %x1, <16 x i32>* %yp) nounwin
define <16 x i32> @test4_unsigned(<16 x i32> %x, <16 x i32> %y, <16 x i32> %x1) nounwind {
; CHECK-LABEL: test4_unsigned:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpnltud %zmm1, %zmm0, %k1
; CHECK-NEXT: vpblendmd %zmm2, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -49,7 +49,7 @@ define <16 x i32> @test4_unsigned(<16 x i32> %x, <16 x i32> %y, <16 x i32> %x1)
define <8 x i64> @test5(<8 x i64> %x, <8 x i64> %y) nounwind {
; CHECK-LABEL: test5:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k1
; CHECK-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -60,7 +60,7 @@ define <8 x i64> @test5(<8 x i64> %x, <8 x i64> %y) nounwind {
define <8 x i64> @test6_unsigned(<8 x i64> %x, <8 x i64> %y, <8 x i64> %x1) nounwind {
; CHECK-LABEL: test6_unsigned:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpnleuq %zmm1, %zmm0, %k1
; CHECK-NEXT: vpblendmq %zmm2, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -71,14 +71,14 @@ define <8 x i64> @test6_unsigned(<8 x i64> %x, <8 x i64> %y, <8 x i64> %x1) noun
define <4 x float> @test7(<4 x float> %a, <4 x float> %b) {
; KNL-LABEL: test7:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vxorps %xmm2, %xmm2, %xmm2
; KNL-NEXT: vcmpltps %xmm2, %xmm0, %xmm2
; KNL-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; KNL-NEXT: retq
;
; SKX-LABEL: test7:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vxorps %xmm2, %xmm2, %xmm2
; SKX-NEXT: vcmpltps %xmm2, %xmm0, %k1
; SKX-NEXT: vblendmps %xmm0, %xmm1, %xmm0 {%k1}
@@ -91,14 +91,14 @@ define <4 x float> @test7(<4 x float> %a, <4 x float> %b) {
define <2 x double> @test8(<2 x double> %a, <2 x double> %b) {
; KNL-LABEL: test8:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; KNL-NEXT: vcmpltpd %xmm2, %xmm0, %xmm2
; KNL-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; KNL-NEXT: retq
;
; SKX-LABEL: test8:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; SKX-NEXT: vcmpltpd %xmm2, %xmm0, %k1
; SKX-NEXT: vblendmpd %xmm0, %xmm1, %xmm0 {%k1}
@@ -110,7 +110,7 @@ define <2 x double> @test8(<2 x double> %a, <2 x double> %b) {
define <8 x i32> @test9(<8 x i32> %x, <8 x i32> %y) nounwind {
; KNL-LABEL: test9:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: ## kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
@@ -119,7 +119,7 @@ define <8 x i32> @test9(<8 x i32> %x, <8 x i32> %y) nounwind {
; KNL-NEXT: retq
;
; SKX-LABEL: test9:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpcmpeqd %ymm1, %ymm0, %k1
; SKX-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; SKX-NEXT: retq
@@ -130,7 +130,7 @@ define <8 x i32> @test9(<8 x i32> %x, <8 x i32> %y) nounwind {
define <8 x float> @test10(<8 x float> %x, <8 x float> %y) nounwind {
; KNL-LABEL: test10:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: ## kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL-NEXT: vcmpeqps %zmm1, %zmm0, %k1
@@ -139,7 +139,7 @@ define <8 x float> @test10(<8 x float> %x, <8 x float> %y) nounwind {
; KNL-NEXT: retq
;
; SKX-LABEL: test10:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vcmpeqps %ymm1, %ymm0, %k1
; SKX-NEXT: vblendmps %ymm0, %ymm1, %ymm0 {%k1}
; SKX-NEXT: retq
@@ -151,7 +151,7 @@ define <8 x float> @test10(<8 x float> %x, <8 x float> %y) nounwind {
define <8 x i32> @test11_unsigned(<8 x i32> %x, <8 x i32> %y) nounwind {
; CHECK-LABEL: test11_unsigned:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
%mask = icmp ugt <8 x i32> %x, %y
@@ -161,7 +161,7 @@ define <8 x i32> @test11_unsigned(<8 x i32> %x, <8 x i32> %y) nounwind {
define i16 @test12(<16 x i64> %a, <16 x i64> %b) nounwind {
; KNL-LABEL: test12:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpcmpeqq %zmm2, %zmm0, %k0
; KNL-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
; KNL-NEXT: kunpckbw %k0, %k1, %k0
@@ -171,7 +171,7 @@ define i16 @test12(<16 x i64> %a, <16 x i64> %b) nounwind {
; KNL-NEXT: retq
;
; SKX-LABEL: test12:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpcmpeqq %zmm2, %zmm0, %k0
; SKX-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
; SKX-NEXT: kunpckbw %k0, %k1, %k0
@@ -186,7 +186,7 @@ define i16 @test12(<16 x i64> %a, <16 x i64> %b) nounwind {
define i32 @test12_v32i32(<32 x i32> %a, <32 x i32> %b) nounwind {
; KNL-LABEL: test12_v32i32:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: pushq %rbp
; KNL-NEXT: movq %rsp, %rbp
; KNL-NEXT: andq $-32, %rsp
@@ -334,7 +334,7 @@ define i32 @test12_v32i32(<32 x i32> %a, <32 x i32> %b) nounwind {
; KNL-NEXT: retq
;
; SKX-LABEL: test12_v32i32:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpcmpeqd %zmm2, %zmm0, %k0
; SKX-NEXT: vpcmpeqd %zmm3, %zmm1, %k1
; SKX-NEXT: kunpckwd %k0, %k1, %k0
@@ -348,7 +348,7 @@ define i32 @test12_v32i32(<32 x i32> %a, <32 x i32> %b) nounwind {
define i64 @test12_v64i16(<64 x i16> %a, <64 x i16> %b) nounwind {
; KNL-LABEL: test12_v64i16:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: pushq %rbp
; KNL-NEXT: movq %rsp, %rbp
; KNL-NEXT: andq $-32, %rsp
@@ -647,7 +647,7 @@ define i64 @test12_v64i16(<64 x i16> %a, <64 x i16> %b) nounwind {
; KNL-NEXT: retq
;
; SKX-LABEL: test12_v64i16:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpcmpeqw %zmm2, %zmm0, %k0
; SKX-NEXT: vpcmpeqw %zmm3, %zmm1, %k1
; SKX-NEXT: kunpckdq %k0, %k1, %k0
@@ -661,7 +661,7 @@ define i64 @test12_v64i16(<64 x i16> %a, <64 x i16> %b) nounwind {
define <16 x i32> @test13(<16 x float>%a, <16 x float>%b)
; CHECK-LABEL: test13:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcmpeqps %zmm1, %zmm0, %k1
; CHECK-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -673,7 +673,7 @@ define <16 x i32> @test13(<16 x float>%a, <16 x float>%b)
define <16 x i32> @test14(<16 x i32>%a, <16 x i32>%b) {
; CHECK-LABEL: test14:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsubd %zmm1, %zmm0, %zmm2
; CHECK-NEXT: vpcmpgtd %zmm0, %zmm2, %k1
; CHECK-NEXT: vpsubd %zmm1, %zmm0, %zmm0 {%k1} {z}
@@ -688,7 +688,7 @@ define <16 x i32> @test14(<16 x i32>%a, <16 x i32>%b) {
define <8 x i64> @test15(<8 x i64>%a, <8 x i64>%b) {
; CHECK-LABEL: test15:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsubq %zmm1, %zmm0, %zmm2
; CHECK-NEXT: vpcmpgtq %zmm0, %zmm2, %k1
; CHECK-NEXT: vpsubq %zmm1, %zmm0, %zmm0 {%k1} {z}
@@ -703,7 +703,7 @@ define <8 x i64> @test15(<8 x i64>%a, <8 x i64>%b) {
define <16 x i32> @test16(<16 x i32> %x, <16 x i32> %y, <16 x i32> %x1) nounwind {
; CHECK-LABEL: test16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpled %zmm0, %zmm1, %k1
; CHECK-NEXT: vpblendmd %zmm2, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -714,7 +714,7 @@ define <16 x i32> @test16(<16 x i32> %x, <16 x i32> %y, <16 x i32> %x1) nounwind
define <16 x i32> @test17(<16 x i32> %x, <16 x i32> %x1, <16 x i32>* %y.ptr) nounwind {
; CHECK-LABEL: test17:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtd (%rdi), %zmm0, %k1
; CHECK-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -726,7 +726,7 @@ define <16 x i32> @test17(<16 x i32> %x, <16 x i32> %x1, <16 x i32>* %y.ptr) nou
define <16 x i32> @test18(<16 x i32> %x, <16 x i32> %x1, <16 x i32>* %y.ptr) nounwind {
; CHECK-LABEL: test18:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpled (%rdi), %zmm0, %k1
; CHECK-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -738,7 +738,7 @@ define <16 x i32> @test18(<16 x i32> %x, <16 x i32> %x1, <16 x i32>* %y.ptr) nou
define <16 x i32> @test19(<16 x i32> %x, <16 x i32> %x1, <16 x i32>* %y.ptr) nounwind {
; CHECK-LABEL: test19:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpleud (%rdi), %zmm0, %k1
; CHECK-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -750,7 +750,7 @@ define <16 x i32> @test19(<16 x i32> %x, <16 x i32> %x1, <16 x i32>* %y.ptr) nou
define <16 x i32> @test20(<16 x i32> %x, <16 x i32> %y, <16 x i32> %x1, <16 x i32> %y1) nounwind {
; CHECK-LABEL: test20:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
; CHECK-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 {%k1}
; CHECK-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
@@ -764,7 +764,7 @@ define <16 x i32> @test20(<16 x i32> %x, <16 x i32> %y, <16 x i32> %x1, <16 x i3
define <8 x i64> @test21(<8 x i64> %x, <8 x i64> %y, <8 x i64> %x1, <8 x i64> %y1) nounwind {
; CHECK-LABEL: test21:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpleq %zmm1, %zmm0, %k1
; CHECK-NEXT: vpcmpleq %zmm2, %zmm3, %k1 {%k1}
; CHECK-NEXT: vpblendmq %zmm0, %zmm2, %zmm0 {%k1}
@@ -778,7 +778,7 @@ define <8 x i64> @test21(<8 x i64> %x, <8 x i64> %y, <8 x i64> %x1, <8 x i64> %y
define <8 x i64> @test22(<8 x i64> %x, <8 x i64>* %y.ptr, <8 x i64> %x1, <8 x i64> %y1) nounwind {
; CHECK-LABEL: test22:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtq %zmm2, %zmm1, %k1
; CHECK-NEXT: vpcmpgtq (%rdi), %zmm0, %k1 {%k1}
; CHECK-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
@@ -793,7 +793,7 @@ define <8 x i64> @test22(<8 x i64> %x, <8 x i64>* %y.ptr, <8 x i64> %x1, <8 x i6
define <16 x i32> @test23(<16 x i32> %x, <16 x i32>* %y.ptr, <16 x i32> %x1, <16 x i32> %y1) nounwind {
; CHECK-LABEL: test23:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpled %zmm1, %zmm2, %k1
; CHECK-NEXT: vpcmpleud (%rdi), %zmm0, %k1 {%k1}
; CHECK-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
@@ -808,7 +808,7 @@ define <16 x i32> @test23(<16 x i32> %x, <16 x i32>* %y.ptr, <16 x i32> %x1, <16
define <8 x i64> @test24(<8 x i64> %x, <8 x i64> %x1, i64* %yb.ptr) nounwind {
; CHECK-LABEL: test24:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqq (%rdi){1to8}, %zmm0, %k1
; CHECK-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -822,7 +822,7 @@ define <8 x i64> @test24(<8 x i64> %x, <8 x i64> %x1, i64* %yb.ptr) nounwind {
define <16 x i32> @test25(<16 x i32> %x, i32* %yb.ptr, <16 x i32> %x1) nounwind {
; CHECK-LABEL: test25:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpled (%rdi){1to16}, %zmm0, %k1
; CHECK-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -836,7 +836,7 @@ define <16 x i32> @test25(<16 x i32> %x, i32* %yb.ptr, <16 x i32> %x1) nounwind
define <16 x i32> @test26(<16 x i32> %x, i32* %yb.ptr, <16 x i32> %x1, <16 x i32> %y1) nounwind {
; CHECK-LABEL: test26:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpled %zmm1, %zmm2, %k1
; CHECK-NEXT: vpcmpgtd (%rdi){1to16}, %zmm0, %k1 {%k1}
; CHECK-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
@@ -853,7 +853,7 @@ define <16 x i32> @test26(<16 x i32> %x, i32* %yb.ptr, <16 x i32> %x1, <16 x i32
define <8 x i64> @test27(<8 x i64> %x, i64* %yb.ptr, <8 x i64> %x1, <8 x i64> %y1) nounwind {
; CHECK-LABEL: test27:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpleq %zmm1, %zmm2, %k1
; CHECK-NEXT: vpcmpleq (%rdi){1to8}, %zmm0, %k1 {%k1}
; CHECK-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
@@ -870,7 +870,7 @@ define <8 x i64> @test27(<8 x i64> %x, i64* %yb.ptr, <8 x i64> %x1, <8 x i64> %y
define <8 x i32>@test28(<8 x i64> %x, <8 x i64> %y, <8 x i64> %x1, <8 x i64> %y1) {
; KNL-LABEL: test28:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; KNL-NEXT: vpcmpgtq %zmm3, %zmm2, %k1
; KNL-NEXT: kxnorw %k1, %k0, %k1
@@ -879,7 +879,7 @@ define <8 x i32>@test28(<8 x i64> %x, <8 x i64> %y, <8 x i64> %x1, <8 x i64> %y1
; KNL-NEXT: retq
;
; SKX-LABEL: test28:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; SKX-NEXT: vpcmpgtq %zmm3, %zmm2, %k1
; SKX-NEXT: kxnorb %k1, %k0, %k0
@@ -894,7 +894,7 @@ define <8 x i32>@test28(<8 x i64> %x, <8 x i64> %y, <8 x i64> %x1, <8 x i64> %y1
define <16 x i8>@test29(<16 x i32> %x, <16 x i32> %y, <16 x i32> %x1, <16 x i32> %y1) {
; KNL-LABEL: test29:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; KNL-NEXT: vpcmpgtd %zmm3, %zmm2, %k1
; KNL-NEXT: kxorw %k1, %k0, %k1
@@ -904,7 +904,7 @@ define <16 x i8>@test29(<16 x i32> %x, <16 x i32> %y, <16 x i32> %x1, <16 x i32>
; KNL-NEXT: retq
;
; SKX-LABEL: test29:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; SKX-NEXT: vpcmpgtd %zmm3, %zmm2, %k1
; SKX-NEXT: kxorw %k1, %k0, %k0
@@ -920,13 +920,13 @@ define <16 x i8>@test29(<16 x i32> %x, <16 x i32> %y, <16 x i32> %x1, <16 x i32>
define <4 x double> @test30(<4 x double> %x, <4 x double> %y) nounwind {
; KNL-LABEL: test30:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vcmpeqpd %ymm1, %ymm0, %ymm2
; KNL-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
; KNL-NEXT: retq
;
; SKX-LABEL: test30:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vcmpeqpd %ymm1, %ymm0, %k1
; SKX-NEXT: vblendmpd %ymm0, %ymm1, %ymm0 {%k1}
; SKX-NEXT: retq
@@ -938,13 +938,13 @@ define <4 x double> @test30(<4 x double> %x, <4 x double> %y) nounwind {
define <2 x double> @test31(<2 x double> %x, <2 x double> %x1, <2 x double>* %yp) nounwind {
; KNL-LABEL: test31:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vcmpltpd (%rdi), %xmm0, %xmm2
; KNL-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; KNL-NEXT: retq
;
; SKX-LABEL: test31:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vcmpltpd (%rdi), %xmm0, %k1
; SKX-NEXT: vblendmpd %xmm0, %xmm1, %xmm0 {%k1}
; SKX-NEXT: retq
@@ -957,13 +957,13 @@ define <2 x double> @test31(<2 x double> %x, <2 x double> %x1, <2 x double>* %yp
define <4 x double> @test32(<4 x double> %x, <4 x double> %x1, <4 x double>* %yp) nounwind {
; KNL-LABEL: test32:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vcmpltpd (%rdi), %ymm0, %ymm2
; KNL-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
; KNL-NEXT: retq
;
; SKX-LABEL: test32:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vcmpltpd (%rdi), %ymm0, %k1
; SKX-NEXT: vblendmpd %ymm0, %ymm1, %ymm0 {%k1}
; SKX-NEXT: retq
@@ -976,7 +976,7 @@ define <4 x double> @test32(<4 x double> %x, <4 x double> %x1, <4 x double>* %yp
define <8 x double> @test33(<8 x double> %x, <8 x double> %x1, <8 x double>* %yp) nounwind {
; CHECK-LABEL: test33:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcmpltpd (%rdi), %zmm0, %k1
; CHECK-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -988,13 +988,13 @@ define <8 x double> @test33(<8 x double> %x, <8 x double> %x1, <8 x double>* %yp
define <4 x float> @test34(<4 x float> %x, <4 x float> %x1, <4 x float>* %yp) nounwind {
; KNL-LABEL: test34:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vcmpltps (%rdi), %xmm0, %xmm2
; KNL-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; KNL-NEXT: retq
;
; SKX-LABEL: test34:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vcmpltps (%rdi), %xmm0, %k1
; SKX-NEXT: vblendmps %xmm0, %xmm1, %xmm0 {%k1}
; SKX-NEXT: retq
@@ -1006,7 +1006,7 @@ define <4 x float> @test34(<4 x float> %x, <4 x float> %x1, <4 x float>* %yp) no
define <8 x float> @test35(<8 x float> %x, <8 x float> %x1, <8 x float>* %yp) nounwind {
; KNL-LABEL: test35:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: ## kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL-NEXT: vmovups (%rdi), %ymm2
@@ -1016,7 +1016,7 @@ define <8 x float> @test35(<8 x float> %x, <8 x float> %x1, <8 x float>* %yp) no
; KNL-NEXT: retq
;
; SKX-LABEL: test35:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vcmpltps (%rdi), %ymm0, %k1
; SKX-NEXT: vblendmps %ymm0, %ymm1, %ymm0 {%k1}
; SKX-NEXT: retq
@@ -1029,7 +1029,7 @@ define <8 x float> @test35(<8 x float> %x, <8 x float> %x1, <8 x float>* %yp) no
define <16 x float> @test36(<16 x float> %x, <16 x float> %x1, <16 x float>* %yp) nounwind {
; CHECK-LABEL: test36:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcmpltps (%rdi), %zmm0, %k1
; CHECK-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -1041,7 +1041,7 @@ define <16 x float> @test36(<16 x float> %x, <16 x float> %x1, <16 x float>* %yp
define <8 x double> @test37(<8 x double> %x, <8 x double> %x1, double* %ptr) nounwind {
; CHECK-LABEL: test37:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcmpltpd (%rdi){1to8}, %zmm0, %k1
; CHECK-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -1057,14 +1057,14 @@ define <8 x double> @test37(<8 x double> %x, <8 x double> %x1, double* %ptr) nou
define <4 x double> @test38(<4 x double> %x, <4 x double> %x1, double* %ptr) nounwind {
; KNL-LABEL: test38:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vbroadcastsd (%rdi), %ymm2
; KNL-NEXT: vcmpltpd %ymm2, %ymm0, %ymm2
; KNL-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
; KNL-NEXT: retq
;
; SKX-LABEL: test38:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vcmpltpd (%rdi){1to4}, %ymm0, %k1
; SKX-NEXT: vblendmpd %ymm0, %ymm1, %ymm0 {%k1}
; SKX-NEXT: retq
@@ -1080,14 +1080,14 @@ define <4 x double> @test38(<4 x double> %x, <4 x double> %x1, double* %ptr) nou
define <2 x double> @test39(<2 x double> %x, <2 x double> %x1, double* %ptr) nounwind {
; KNL-LABEL: test39:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vmovddup {{.*#+}} xmm2 = mem[0,0]
; KNL-NEXT: vcmpltpd %xmm2, %xmm0, %xmm2
; KNL-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; KNL-NEXT: retq
;
; SKX-LABEL: test39:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vcmpltpd (%rdi){1to2}, %xmm0, %k1
; SKX-NEXT: vblendmpd %xmm0, %xmm1, %xmm0 {%k1}
; SKX-NEXT: retq
@@ -1104,7 +1104,7 @@ define <2 x double> @test39(<2 x double> %x, <2 x double> %x1, double* %ptr) nou
define <16 x float> @test40(<16 x float> %x, <16 x float> %x1, float* %ptr) nounwind {
; CHECK-LABEL: test40:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcmpltps (%rdi){1to16}, %zmm0, %k1
; CHECK-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -1120,7 +1120,7 @@ define <16 x float> @test40(<16 x float> %x, <16 x float> %x1, float* %ptr) n
define <8 x float> @test41(<8 x float> %x, <8 x float> %x1, float* %ptr) nounwind {
; KNL-LABEL: test41:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: ## kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL-NEXT: vbroadcastss (%rdi), %ymm2
@@ -1130,7 +1130,7 @@ define <8 x float> @test41(<8 x float> %x, <8 x float> %x1, float* %ptr) noun
; KNL-NEXT: retq
;
; SKX-LABEL: test41:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vcmpltps (%rdi){1to8}, %ymm0, %k1
; SKX-NEXT: vblendmps %ymm0, %ymm1, %ymm0 {%k1}
; SKX-NEXT: retq
@@ -1146,14 +1146,14 @@ define <8 x float> @test41(<8 x float> %x, <8 x float> %x1, float* %ptr) noun
define <4 x float> @test42(<4 x float> %x, <4 x float> %x1, float* %ptr) nounwind {
; KNL-LABEL: test42:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vbroadcastss (%rdi), %xmm2
; KNL-NEXT: vcmpltps %xmm2, %xmm0, %xmm2
; KNL-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; KNL-NEXT: retq
;
; SKX-LABEL: test42:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vcmpltps (%rdi){1to4}, %xmm0, %k1
; SKX-NEXT: vblendmps %xmm0, %xmm1, %xmm0 {%k1}
; SKX-NEXT: retq
@@ -1169,7 +1169,7 @@ define <4 x float> @test42(<4 x float> %x, <4 x float> %x1, float* %ptr) noun
define <8 x double> @test43(<8 x double> %x, <8 x double> %x1, double* %ptr,<8 x i1> %mask_in) nounwind {
; KNL-LABEL: test43:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpmovsxwq %xmm2, %zmm2
; KNL-NEXT: vpsllq $63, %zmm2, %zmm2
; KNL-NEXT: vptestmq %zmm2, %zmm2, %k1
@@ -1178,7 +1178,7 @@ define <8 x double> @test43(<8 x double> %x, <8 x double> %x1, double* %ptr,<8 x
; KNL-NEXT: retq
;
; SKX-LABEL: test43:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsllw $15, %xmm2, %xmm2
; SKX-NEXT: vpmovw2m %xmm2, %k1
; SKX-NEXT: vcmpltpd (%rdi){1to8}, %zmm0, %k1 {%k1}
@@ -1197,7 +1197,7 @@ define <8 x double> @test43(<8 x double> %x, <8 x double> %x1, double* %ptr,<8 x
define <4 x i32> @test44(<4 x i16> %x, <4 x i16> %y) #0 {
; KNL-LABEL: test44:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpxor %xmm2, %xmm2, %xmm2
; KNL-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
; KNL-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7]
@@ -1205,7 +1205,7 @@ define <4 x i32> @test44(<4 x i16> %x, <4 x i16> %y) #0 {
; KNL-NEXT: retq
;
; SKX-LABEL: test44:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; SKX-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
; SKX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7]
@@ -1219,7 +1219,7 @@ define <4 x i32> @test44(<4 x i16> %x, <4 x i16> %y) #0 {
define <2 x i64> @test45(<2 x i16> %x, <2 x i16> %y) #0 {
; KNL-LABEL: test45:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpxor %xmm2, %xmm2, %xmm2
; KNL-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5,6,7]
; KNL-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3],xmm0[4],xmm2[5,6,7]
@@ -1228,7 +1228,7 @@ define <2 x i64> @test45(<2 x i16> %x, <2 x i16> %y) #0 {
; KNL-NEXT: retq
;
; SKX-LABEL: test45:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; SKX-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5,6,7]
; SKX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3],xmm0[4],xmm2[5,6,7]
@@ -1242,14 +1242,14 @@ define <2 x i64> @test45(<2 x i16> %x, <2 x i16> %y) #0 {
define <2 x i64> @test46(<2 x float> %x, <2 x float> %y) #0 {
; KNL-LABEL: test46:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vcmpeqps %xmm1, %xmm0, %xmm0
; KNL-NEXT: vpmovsxdq %xmm0, %xmm0
; KNL-NEXT: vpsrlq $63, %xmm0, %xmm0
; KNL-NEXT: retq
;
; SKX-LABEL: test46:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vcmpeqps %xmm1, %xmm0, %k1
; SKX-NEXT: vmovdqa64 {{.*}}(%rip), %xmm0 {%k1} {z}
; SKX-NEXT: retq
diff --git a/test/CodeGen/X86/avx512-vec3-crash.ll b/test/CodeGen/X86/avx512-vec3-crash.ll
index 1da07c11ded..a0b296caf39 100644
--- a/test/CodeGen/X86/avx512-vec3-crash.ll
+++ b/test/CodeGen/X86/avx512-vec3-crash.ll
@@ -4,7 +4,7 @@
; This test crashed during type legalization of SETCC result type.
define <3 x i8 > @foo(<3 x i8>%x, <3 x i8>%a, <3 x i8>%b) {
; CHECK-LABEL: foo:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovd %edi, %xmm0
; CHECK-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0
; CHECK-NEXT: vpinsrd $2, %edx, %xmm0, %xmm0
diff --git a/test/CodeGen/X86/avx512-vpclmulqdq.ll b/test/CodeGen/X86/avx512-vpclmulqdq.ll
index 186cdab05af..00dc6ff3cf0 100644
--- a/test/CodeGen/X86/avx512-vpclmulqdq.ll
+++ b/test/CodeGen/X86/avx512-vpclmulqdq.ll
@@ -2,7 +2,7 @@
define <8 x i64> @test_x86_pclmulqdq(<8 x i64> %a0, <8 x i64> %a1) {
; AVX512_VPCLMULQDQ-LABEL: test_x86_pclmulqdq:
-; AVX512_VPCLMULQDQ: # BB#0:
+; AVX512_VPCLMULQDQ: # %bb.0:
; AVX512_VPCLMULQDQ-NEXT: vpclmulqdq $1, %zmm1, %zmm0, %zmm0 # encoding: [0x62,0xf3,0x7d,0x48,0x44,0xc1,0x01]
; AVX512_VPCLMULQDQ-NEXT: retq # encoding: [0xc3]
%res = call <8 x i64> @llvm.x86.pclmulqdq.512(<8 x i64> %a0, <8 x i64> %a1, i8 1)
diff --git a/test/CodeGen/X86/avx512-vpermv3-commute.ll b/test/CodeGen/X86/avx512-vpermv3-commute.ll
index 2827f471762..9031a296bec 100644
--- a/test/CodeGen/X86/avx512-vpermv3-commute.ll
+++ b/test/CodeGen/X86/avx512-vpermv3-commute.ll
@@ -7,7 +7,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.vpermi2var.d.512(<16 x i32>, <16 x i32>
define <16 x i32>@test_int_x86_avx512_mask_vpermi2var_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2p) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermi2var_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpermt2d (%rdi), %zmm1, %zmm0
; CHECK-NEXT: retq
%x2 = load <16 x i32>, <16 x i32>* %x2p
@@ -19,7 +19,7 @@ declare <8 x double> @llvm.x86.avx512.mask.vpermi2var.pd.512(<8 x double>, <8 x
define <8 x double>@test_int_x86_avx512_mask_vpermi2var_pd_512(<8 x double> %x0, <8 x i64> %x1, <8 x double> %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermi2var_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpermt2pd %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.vpermi2var.pd.512(<8 x double> %x0, <8 x i64> %x1, <8 x double> %x2, i8 -1)
@@ -30,7 +30,7 @@ declare <16 x float> @llvm.x86.avx512.mask.vpermi2var.ps.512(<16 x float>, <16 x
define <16 x float>@test_int_x86_avx512_mask_vpermi2var_ps_512(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermi2var_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpermt2ps %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.vpermi2var.ps.512(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2, i16 -1)
@@ -41,7 +41,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.vpermi2var.q.512(<8 x i64>, <8 x i64>, <
define <8 x i64>@test_int_x86_avx512_mask_vpermi2var_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermi2var_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpermt2q %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.mask.vpermi2var.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 -1)
@@ -52,7 +52,7 @@ declare <16 x i32> @llvm.x86.avx512.maskz.vpermt2var.d.512(<16 x i32>, <16 x i32
define <16 x i32>@test_int_x86_avx512_maskz_vpermt2var_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2p, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpermt2var_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpermi2d (%rdi), %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -65,7 +65,7 @@ declare <8 x double> @llvm.x86.avx512.maskz.vpermt2var.pd.512(<8 x i64>, <8 x do
define <8 x double>@test_int_x86_avx512_maskz_vpermt2var_pd_512(<8 x i64> %x0, <8 x double> %x1, double* %x2ptr, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpermt2var_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpermi2pd (%rdi){1to8}, %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -80,7 +80,7 @@ declare <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32>, <16 x
define <16 x float>@test_int_x86_avx512_maskz_vpermt2var_ps_512(<16 x i32> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpermt2var_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpermi2ps %zmm2, %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -93,7 +93,7 @@ declare <8 x i64> @llvm.x86.avx512.maskz.vpermt2var.q.512(<8 x i64>, <8 x i64>,
define <8 x i64>@test_int_x86_avx512_maskz_vpermt2var_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpermt2var_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpermi2q %zmm2, %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -105,7 +105,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.vpermt2var.d.512(<16 x i32>, <16 x i32>
define <16 x i32>@test_int_x86_avx512_mask_vpermt2var_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermt2var_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpermi2d %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.mask.vpermt2var.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 -1)
@@ -116,7 +116,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.vpermt2var.d.128(<4 x i32>, <4 x i32>, <
define <4 x i32>@test_int_x86_avx512_mask_vpermt2var_d_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermt2var_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpermi2d %xmm2, %xmm1, %xmm0
; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.avx512.mask.vpermt2var.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 -1)
@@ -127,7 +127,7 @@ declare <4 x i32> @llvm.x86.avx512.maskz.vpermt2var.d.128(<4 x i32>, <4 x i32>,
define <4 x i32>@test_int_x86_avx512_maskz_vpermt2var_d_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpermt2var_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpermi2d %xmm2, %xmm1, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -137,7 +137,7 @@ define <4 x i32>@test_int_x86_avx512_maskz_vpermt2var_d_128(<4 x i32> %x0, <4 x
define <4 x i32>@test_int_x86_avx512_maskz_vpermt2var_d_128_broadcast(<4 x i32> %x0, <4 x i32> %x1, i32* %x2ptr, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpermt2var_d_128_broadcast:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpermi2d (%rdi){1to4}, %xmm1, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -152,7 +152,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.vpermt2var.d.256(<8 x i32>, <8 x i32>, <
define <8 x i32>@test_int_x86_avx512_mask_vpermt2var_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermt2var_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpermi2d %ymm2, %ymm1, %ymm0
; CHECK-NEXT: retq
%res = call <8 x i32> @llvm.x86.avx512.mask.vpermt2var.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 -1)
@@ -163,7 +163,7 @@ declare <8 x i32> @llvm.x86.avx512.maskz.vpermt2var.d.256(<8 x i32>, <8 x i32>,
define <8 x i32>@test_int_x86_avx512_maskz_vpermt2var_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpermt2var_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpermi2d %ymm2, %ymm1, %ymm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -175,7 +175,7 @@ declare <2 x double> @llvm.x86.avx512.mask.vpermi2var.pd.128(<2 x double>, <2 x
define <2 x double>@test_int_x86_avx512_mask_vpermi2var_pd_128(<2 x double> %x0, <2 x i64> %x1, <2 x double> %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermi2var_pd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpermt2pd %xmm2, %xmm1, %xmm0
; CHECK-NEXT: retq
%res = call <2 x double> @llvm.x86.avx512.mask.vpermi2var.pd.128(<2 x double> %x0, <2 x i64> %x1, <2 x double> %x2, i8 -1)
@@ -186,7 +186,7 @@ declare <4 x double> @llvm.x86.avx512.mask.vpermi2var.pd.256(<4 x double>, <4 x
define <4 x double>@test_int_x86_avx512_mask_vpermi2var_pd_256(<4 x double> %x0, <4 x i64> %x1, <4 x double> %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermi2var_pd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpermt2pd %ymm2, %ymm1, %ymm0
; CHECK-NEXT: retq
%res = call <4 x double> @llvm.x86.avx512.mask.vpermi2var.pd.256(<4 x double> %x0, <4 x i64> %x1, <4 x double> %x2, i8 -1)
@@ -197,7 +197,7 @@ declare <4 x float> @llvm.x86.avx512.mask.vpermi2var.ps.128(<4 x float>, <4 x i3
define <4 x float>@test_int_x86_avx512_mask_vpermi2var_ps_128(<4 x float> %x0, <4 x i32> %x1, <4 x float> %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermi2var_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpermt2ps %xmm2, %xmm1, %xmm0
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.avx512.mask.vpermi2var.ps.128(<4 x float> %x0, <4 x i32> %x1, <4 x float> %x2, i8 -1)
@@ -208,7 +208,7 @@ declare <8 x float> @llvm.x86.avx512.mask.vpermi2var.ps.256(<8 x float>, <8 x i3
define <8 x float>@test_int_x86_avx512_mask_vpermi2var_ps_256(<8 x float> %x0, <8 x i32> %x1, <8 x float> %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermi2var_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpermt2ps %ymm2, %ymm1, %ymm0
; CHECK-NEXT: retq
%res = call <8 x float> @llvm.x86.avx512.mask.vpermi2var.ps.256(<8 x float> %x0, <8 x i32> %x1, <8 x float> %x2, i8 -1)
@@ -217,7 +217,7 @@ define <8 x float>@test_int_x86_avx512_mask_vpermi2var_ps_256(<8 x float> %x0, <
define <8 x float>@test_int_x86_avx512_mask_vpermi2var_ps_256_load(<8 x float> %x0, <8 x i32> %x1, <8 x float>* %x2p) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermi2var_ps_256_load:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpermt2ps (%rdi), %ymm1, %ymm0
; CHECK-NEXT: retq
%x2 = load <8 x float>, <8 x float>* %x2p
@@ -227,7 +227,7 @@ define <8 x float>@test_int_x86_avx512_mask_vpermi2var_ps_256_load(<8 x float> %
define <8 x float>@test_int_x86_avx512_mask_vpermi2var_ps_256_broadcast(<8 x float> %x0, <8 x i32> %x1, float* %x2ptr) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermi2var_ps_256_broadcast:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpermt2ps (%rdi){1to8}, %ymm1, %ymm0
; CHECK-NEXT: retq
%x2s = load float, float* %x2ptr
@@ -241,7 +241,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.vpermi2var.qi.128(<16 x i8>, <16 x i8>,
define <16 x i8>@test_int_x86_avx512_mask_vpermi2var_qi_128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermi2var_qi_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpermt2b %xmm2, %xmm1, %xmm0
; CHECK-NEXT: retq
%res = call <16 x i8> @llvm.x86.avx512.mask.vpermi2var.qi.128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 -1)
@@ -252,7 +252,7 @@ declare <32 x i8> @llvm.x86.avx512.mask.vpermi2var.qi.256(<32 x i8>, <32 x i8>,
define <32 x i8>@test_int_x86_avx512_mask_vpermi2var_qi_256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermi2var_qi_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpermt2b %ymm2, %ymm1, %ymm0
; CHECK-NEXT: retq
%res = call <32 x i8> @llvm.x86.avx512.mask.vpermi2var.qi.256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2, i32 -1)
@@ -263,7 +263,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.vpermt2var.qi.128(<16 x i8>, <16 x i8>,
define <16 x i8>@test_int_x86_avx512_mask_vpermt2var_qi_128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermt2var_qi_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpermi2b %xmm2, %xmm1, %xmm0
; CHECK-NEXT: retq
%res = call <16 x i8> @llvm.x86.avx512.mask.vpermt2var.qi.128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 -1)
@@ -272,7 +272,7 @@ define <16 x i8>@test_int_x86_avx512_mask_vpermt2var_qi_128(<16 x i8> %x0, <16 x
define <16 x i8>@test_int_x86_avx512_mask_vpermt2var_qi_128_load(<16 x i8> %x0, <16 x i8> %x1, <16 x i8>* %x2p) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermt2var_qi_128_load:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpermi2b (%rdi), %xmm1, %xmm0
; CHECK-NEXT: retq
%x2 = load <16 x i8>, <16 x i8>* %x2p
@@ -284,7 +284,7 @@ declare <32 x i8> @llvm.x86.avx512.mask.vpermt2var.qi.256(<32 x i8>, <32 x i8>,
define <32 x i8>@test_int_x86_avx512_mask_vpermt2var_qi_256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermt2var_qi_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpermi2b %ymm2, %ymm1, %ymm0
; CHECK-NEXT: retq
%res = call <32 x i8> @llvm.x86.avx512.mask.vpermt2var.qi.256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2, i32 -1)
@@ -295,7 +295,7 @@ declare <16 x i8> @llvm.x86.avx512.maskz.vpermt2var.qi.128(<16 x i8>, <16 x i8>,
define <16 x i8>@test_int_x86_avx512_maskz_vpermt2var_qi_128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpermt2var_qi_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpermi2b %xmm2, %xmm1, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -305,7 +305,7 @@ define <16 x i8>@test_int_x86_avx512_maskz_vpermt2var_qi_128(<16 x i8> %x0, <16
define <16 x i8>@test_int_x86_avx512_maskz_vpermt2var_qi_128_load(<16 x i8> %x0, <16 x i8> %x1, <16 x i8>* %x2p, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpermt2var_qi_128_load:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpermi2b (%rdi), %xmm1, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -318,7 +318,7 @@ declare <32 x i8> @llvm.x86.avx512.maskz.vpermt2var.qi.256(<32 x i8>, <32 x i8>,
define <32 x i8>@test_int_x86_avx512_maskz_vpermt2var_qi_256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2, i32 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpermt2var_qi_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpermi2b %ymm2, %ymm1, %ymm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -328,7 +328,7 @@ define <32 x i8>@test_int_x86_avx512_maskz_vpermt2var_qi_256(<32 x i8> %x0, <32
define <32 x i8>@test_int_x86_avx512_maskz_vpermt2var_qi_256_load(<32 x i8> %x0, <32 x i8> %x1, <32 x i8>* %x2p, i32 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpermt2var_qi_256_load:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpermi2b (%rdi), %ymm1, %ymm0 {%k1} {z}
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/avx512-vpternlog-commute.ll b/test/CodeGen/X86/avx512-vpternlog-commute.ll
index 5e1b28c1983..a67994efa0e 100644
--- a/test/CodeGen/X86/avx512-vpternlog-commute.ll
+++ b/test/CodeGen/X86/avx512-vpternlog-commute.ll
@@ -8,7 +8,7 @@ declare <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32>, <16 x i32>,
define <16 x i32> @vpternlog_v16i32_012(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) {
; CHECK-LABEL: vpternlog_v16i32_012:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpternlogd $114, %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 -1)
@@ -17,7 +17,7 @@ define <16 x i32> @vpternlog_v16i32_012(<16 x i32> %x0, <16 x i32> %x1, <16 x i3
define <16 x i32> @vpternlog_v16i32_102(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) {
; CHECK-LABEL: vpternlog_v16i32_102:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpternlogd $78, %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 114, i16 -1)
@@ -26,7 +26,7 @@ define <16 x i32> @vpternlog_v16i32_102(<16 x i32> %x0, <16 x i32> %x1, <16 x i3
define <16 x i32> @vpternlog_v16i32_210(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) {
; CHECK-LABEL: vpternlog_v16i32_210:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpternlogd $78, %zmm0, %zmm2, %zmm1
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
; CHECK-NEXT: retq
@@ -36,7 +36,7 @@ define <16 x i32> @vpternlog_v16i32_210(<16 x i32> %x0, <16 x i32> %x1, <16 x i3
define <16 x i32> @vpternlog_v16i32_012_load0(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2) {
; CHECK-LABEL: vpternlog_v16i32_012_load0:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpternlogd $46, (%rdi), %zmm1, %zmm0
; CHECK-NEXT: retq
%x0 = load <16 x i32>, <16 x i32>* %x0ptr
@@ -46,7 +46,7 @@ define <16 x i32> @vpternlog_v16i32_012_load0(<16 x i32>* %x0ptr, <16 x i32> %x1
define <16 x i32> @vpternlog_v16i32_012_load1(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2) {
; CHECK-LABEL: vpternlog_v16i32_012_load1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpternlogd $116, (%rdi), %zmm1, %zmm0
; CHECK-NEXT: retq
%x1 = load <16 x i32>, <16 x i32>* %x1ptr
@@ -56,7 +56,7 @@ define <16 x i32> @vpternlog_v16i32_012_load1(<16 x i32> %x0, <16 x i32>* %x1ptr
define <16 x i32> @vpternlog_v16i32_012_load2(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr) {
; CHECK-LABEL: vpternlog_v16i32_012_load2:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpternlogd $114, (%rdi), %zmm1, %zmm0
; CHECK-NEXT: retq
%x2 = load <16 x i32>, <16 x i32>* %x2ptr
@@ -66,7 +66,7 @@ define <16 x i32> @vpternlog_v16i32_012_load2(<16 x i32> %x0, <16 x i32> %x1, <1
define <16 x i32> @vpternlog_v16i32_102_load0(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2) {
; CHECK-LABEL: vpternlog_v16i32_102_load0:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpternlogd $116, (%rdi), %zmm1, %zmm0
; CHECK-NEXT: retq
%x0 = load <16 x i32>, <16 x i32>* %x0ptr
@@ -76,7 +76,7 @@ define <16 x i32> @vpternlog_v16i32_102_load0(<16 x i32>* %x0ptr, <16 x i32> %x1
define <16 x i32> @vpternlog_v16i32_102_load1(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2) {
; CHECK-LABEL: vpternlog_v16i32_102_load1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpternlogd $46, (%rdi), %zmm1, %zmm0
; CHECK-NEXT: retq
%x1 = load <16 x i32>, <16 x i32>* %x1ptr
@@ -86,7 +86,7 @@ define <16 x i32> @vpternlog_v16i32_102_load1(<16 x i32> %x0, <16 x i32>* %x1ptr
define <16 x i32> @vpternlog_v16i32_102_load2(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr) {
; CHECK-LABEL: vpternlog_v16i32_102_load2:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpternlogd $78, (%rdi), %zmm1, %zmm0
; CHECK-NEXT: retq
%x2 = load <16 x i32>, <16 x i32>* %x2ptr
@@ -96,7 +96,7 @@ define <16 x i32> @vpternlog_v16i32_102_load2(<16 x i32> %x0, <16 x i32> %x1, <1
define <16 x i32> @vpternlog_v16i32_210_load0(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2) {
; CHECK-LABEL: vpternlog_v16i32_210_load0:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpternlogd $78, (%rdi), %zmm1, %zmm0
; CHECK-NEXT: retq
%x0 = load <16 x i32>, <16 x i32>* %x0ptr
@@ -106,7 +106,7 @@ define <16 x i32> @vpternlog_v16i32_210_load0(<16 x i32>* %x0ptr, <16 x i32> %x1
define <16 x i32> @vpternlog_v16i32_210_load1(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2) {
; CHECK-LABEL: vpternlog_v16i32_210_load1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpternlogd $92, (%rdi), %zmm1, %zmm0
; CHECK-NEXT: retq
%x1 = load <16 x i32>, <16 x i32>* %x1ptr
@@ -116,7 +116,7 @@ define <16 x i32> @vpternlog_v16i32_210_load1(<16 x i32> %x0, <16 x i32>* %x1ptr
define <16 x i32> @vpternlog_v16i32_210_load2(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr) {
; CHECK-LABEL: vpternlog_v16i32_210_load2:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpternlogd $58, (%rdi), %zmm1, %zmm0
; CHECK-NEXT: retq
%x2 = load <16 x i32>, <16 x i32>* %x2ptr
@@ -126,7 +126,7 @@ define <16 x i32> @vpternlog_v16i32_210_load2(<16 x i32> %x0, <16 x i32> %x1, <1
define <16 x i32> @vpternlog_v16i32_021_load0(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2) {
; CHECK-LABEL: vpternlog_v16i32_021_load0:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpternlogd $58, (%rdi), %zmm1, %zmm0
; CHECK-NEXT: retq
%x0 = load <16 x i32>, <16 x i32>* %x0ptr
@@ -136,7 +136,7 @@ define <16 x i32> @vpternlog_v16i32_021_load0(<16 x i32>* %x0ptr, <16 x i32> %x1
define <16 x i32> @vpternlog_v16i32_021_load1(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2) {
; CHECK-LABEL: vpternlog_v16i32_021_load1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpternlogd $114, (%rdi), %zmm1, %zmm0
; CHECK-NEXT: retq
%x1 = load <16 x i32>, <16 x i32>* %x1ptr
@@ -146,7 +146,7 @@ define <16 x i32> @vpternlog_v16i32_021_load1(<16 x i32> %x0, <16 x i32>* %x1ptr
define <16 x i32> @vpternlog_v16i32_021_load2(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr) {
; CHECK-LABEL: vpternlog_v16i32_021_load2:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpternlogd $116, (%rdi), %zmm1, %zmm0
; CHECK-NEXT: retq
%x2 = load <16 x i32>, <16 x i32>* %x2ptr
@@ -156,7 +156,7 @@ define <16 x i32> @vpternlog_v16i32_021_load2(<16 x i32> %x0, <16 x i32> %x1, <1
define <16 x i32> @vpternlog_v16i32_012_mask(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_012_mask:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpternlogd $114, %zmm2, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -166,7 +166,7 @@ define <16 x i32> @vpternlog_v16i32_012_mask(<16 x i32> %x0, <16 x i32> %x1, <16
define <16 x i32> @vpternlog_v16i32_102_mask(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_102_mask:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpternlogd $114, %zmm2, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -177,7 +177,7 @@ define <16 x i32> @vpternlog_v16i32_102_mask(<16 x i32> %x0, <16 x i32> %x1, <16
define <16 x i32> @vpternlog_v16i32_210_mask(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_210_mask:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpternlogd $114, %zmm0, %zmm1, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -188,7 +188,7 @@ define <16 x i32> @vpternlog_v16i32_210_mask(<16 x i32> %x0, <16 x i32> %x1, <16
define <16 x i32> @vpternlog_v16i32_012_mask1(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_012_mask1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpternlogd $78, %zmm2, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -201,7 +201,7 @@ define <16 x i32> @vpternlog_v16i32_012_mask1(<16 x i32> %x0, <16 x i32> %x1, <1
define <16 x i32> @vpternlog_v16i32_012_mask2(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_012_mask2:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpternlogd $58, %zmm0, %zmm1, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -214,7 +214,7 @@ define <16 x i32> @vpternlog_v16i32_012_mask2(<16 x i32> %x0, <16 x i32> %x1, <1
define <16 x i32> @vpternlog_v16i32_012_load0_mask(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_012_load0_mask:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
; CHECK-NEXT: vpternlogd $114, %zmm1, %zmm0, %zmm2 {%k1}
@@ -227,7 +227,7 @@ define <16 x i32> @vpternlog_v16i32_012_load0_mask(<16 x i32>* %x0ptr, <16 x i32
define <16 x i32> @vpternlog_v16i32_012_load0_mask1(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_012_load0_mask1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $65, (%rdi), %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -240,7 +240,7 @@ define <16 x i32> @vpternlog_v16i32_012_load0_mask1(<16 x i32>* %x0ptr, <16 x i3
define <16 x i32> @vpternlog_v16i32_012_load0_mask2(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_012_load0_mask2:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $33, (%rdi), %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -254,7 +254,7 @@ define <16 x i32> @vpternlog_v16i32_012_load0_mask2(<16 x i32>* %x0ptr, <16 x i3
define <16 x i32> @vpternlog_v16i32_012_load1_mask(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_012_load1_mask:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $116, (%rdi), %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -265,7 +265,7 @@ define <16 x i32> @vpternlog_v16i32_012_load1_mask(<16 x i32> %x0, <16 x i32>* %
define <16 x i32> @vpternlog_v16i32_012_load1_mask2(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_012_load1_mask2:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $9, (%rdi), %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -279,7 +279,7 @@ define <16 x i32> @vpternlog_v16i32_012_load1_mask2(<16 x i32> %x0, <16 x i32>*
define <16 x i32> @vpternlog_v16i32_012_load2_mask(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_012_load2_mask:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $114, (%rdi), %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -290,7 +290,7 @@ define <16 x i32> @vpternlog_v16i32_012_load2_mask(<16 x i32> %x0, <16 x i32> %x
define <16 x i32> @vpternlog_v16i32_012_load2_mask1(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_012_load2_mask1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $9, (%rdi), %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -304,7 +304,7 @@ define <16 x i32> @vpternlog_v16i32_012_load2_mask1(<16 x i32> %x0, <16 x i32> %
define <16 x i32> @vpternlog_v16i32_102_load0_mask(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_102_load0_mask:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $116, (%rdi), %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -315,7 +315,7 @@ define <16 x i32> @vpternlog_v16i32_102_load0_mask(<16 x i32>* %x0ptr, <16 x i32
define <16 x i32> @vpternlog_v16i32_102_load1_mask(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_102_load1_mask:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
; CHECK-NEXT: vpternlogd $114, %zmm1, %zmm0, %zmm2 {%k1}
@@ -328,7 +328,7 @@ define <16 x i32> @vpternlog_v16i32_102_load1_mask(<16 x i32> %x0, <16 x i32>* %
define <16 x i32> @vpternlog_v16i32_102_load2_mask(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_102_load2_mask:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $114, (%rdi), %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -340,7 +340,7 @@ define <16 x i32> @vpternlog_v16i32_102_load2_mask(<16 x i32> %x0, <16 x i32> %x
define <16 x i32> @vpternlog_v16i32_210_load0_mask(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_210_load0_mask:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $114, (%rdi), %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -352,7 +352,7 @@ define <16 x i32> @vpternlog_v16i32_210_load0_mask(<16 x i32>* %x0ptr, <16 x i32
define <16 x i32> @vpternlog_v16i32_210_load1_mask(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_210_load1_mask:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $116, (%rdi), %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -364,7 +364,7 @@ define <16 x i32> @vpternlog_v16i32_210_load1_mask(<16 x i32> %x0, <16 x i32>* %
define <16 x i32> @vpternlog_v16i32_210_load2_mask(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_210_load2_mask:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
; CHECK-NEXT: vpternlogd $114, %zmm0, %zmm1, %zmm2 {%k1}
@@ -377,7 +377,7 @@ define <16 x i32> @vpternlog_v16i32_210_load2_mask(<16 x i32> %x0, <16 x i32> %x
define <16 x i32> @vpternlog_v16i32_021_load0_mask(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_021_load0_mask:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
; CHECK-NEXT: vpternlogd $114, %zmm0, %zmm1, %zmm2 {%k1}
@@ -390,7 +390,7 @@ define <16 x i32> @vpternlog_v16i32_021_load0_mask(<16 x i32>* %x0ptr, <16 x i32
define <16 x i32> @vpternlog_v16i32_021_load1_mask(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_021_load1_mask:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $114, (%rdi), %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -401,7 +401,7 @@ define <16 x i32> @vpternlog_v16i32_021_load1_mask(<16 x i32> %x0, <16 x i32>* %
define <16 x i32> @vpternlog_v16i32_021_load2_mask(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_021_load2_mask:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $116, (%rdi), %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -412,7 +412,7 @@ define <16 x i32> @vpternlog_v16i32_021_load2_mask(<16 x i32> %x0, <16 x i32> %x
define <16 x i32> @vpternlog_v16i32_012_maskz(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_012_maskz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpternlogd $114, %zmm2, %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -422,7 +422,7 @@ define <16 x i32> @vpternlog_v16i32_012_maskz(<16 x i32> %x0, <16 x i32> %x1, <1
define <16 x i32> @vpternlog_v16i32_102_maskz(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_102_maskz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpternlogd $78, %zmm2, %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -432,7 +432,7 @@ define <16 x i32> @vpternlog_v16i32_102_maskz(<16 x i32> %x0, <16 x i32> %x1, <1
define <16 x i32> @vpternlog_v16i32_210_maskz(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_210_maskz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpternlogd $78, %zmm0, %zmm2, %zmm1 {%k1} {z}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -443,7 +443,7 @@ define <16 x i32> @vpternlog_v16i32_210_maskz(<16 x i32> %x0, <16 x i32> %x1, <1
define <16 x i32> @vpternlog_v16i32_012_load0_maskz(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_012_load0_maskz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $46, (%rdi), %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -454,7 +454,7 @@ define <16 x i32> @vpternlog_v16i32_012_load0_maskz(<16 x i32>* %x0ptr, <16 x i3
define <16 x i32> @vpternlog_v16i32_012_load1_maskz(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_012_load1_maskz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $116, (%rdi), %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -465,7 +465,7 @@ define <16 x i32> @vpternlog_v16i32_012_load1_maskz(<16 x i32> %x0, <16 x i32>*
define <16 x i32> @vpternlog_v16i32_012_load2_maskz(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_012_load2_maskz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $114, (%rdi), %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -476,7 +476,7 @@ define <16 x i32> @vpternlog_v16i32_012_load2_maskz(<16 x i32> %x0, <16 x i32> %
define <16 x i32> @vpternlog_v16i32_102_load0_maskz(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_102_load0_maskz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $116, (%rdi), %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -487,7 +487,7 @@ define <16 x i32> @vpternlog_v16i32_102_load0_maskz(<16 x i32>* %x0ptr, <16 x i3
define <16 x i32> @vpternlog_v16i32_102_load1_maskz(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_102_load1_maskz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $46, (%rdi), %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -498,7 +498,7 @@ define <16 x i32> @vpternlog_v16i32_102_load1_maskz(<16 x i32> %x0, <16 x i32>*
define <16 x i32> @vpternlog_v16i32_102_load2_maskz(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_102_load2_maskz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $78, (%rdi), %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -509,7 +509,7 @@ define <16 x i32> @vpternlog_v16i32_102_load2_maskz(<16 x i32> %x0, <16 x i32> %
define <16 x i32> @vpternlog_v16i32_210_load0_maskz(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_210_load0_maskz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $78, (%rdi), %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -520,7 +520,7 @@ define <16 x i32> @vpternlog_v16i32_210_load0_maskz(<16 x i32>* %x0ptr, <16 x i3
define <16 x i32> @vpternlog_v16i32_210_load1_maskz(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_210_load1_maskz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $92, (%rdi), %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -531,7 +531,7 @@ define <16 x i32> @vpternlog_v16i32_210_load1_maskz(<16 x i32> %x0, <16 x i32>*
define <16 x i32> @vpternlog_v16i32_210_load2_maskz(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_210_load2_maskz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $58, (%rdi), %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -542,7 +542,7 @@ define <16 x i32> @vpternlog_v16i32_210_load2_maskz(<16 x i32> %x0, <16 x i32> %
define <16 x i32> @vpternlog_v16i32_021_load0_maskz(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_021_load0_maskz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $58, (%rdi), %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -553,7 +553,7 @@ define <16 x i32> @vpternlog_v16i32_021_load0_maskz(<16 x i32>* %x0ptr, <16 x i3
define <16 x i32> @vpternlog_v16i32_021_load1_maskz(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_021_load1_maskz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $114, (%rdi), %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -564,7 +564,7 @@ define <16 x i32> @vpternlog_v16i32_021_load1_maskz(<16 x i32> %x0, <16 x i32>*
define <16 x i32> @vpternlog_v16i32_021_load2_maskz(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_021_load2_maskz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $116, (%rdi), %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -575,7 +575,7 @@ define <16 x i32> @vpternlog_v16i32_021_load2_maskz(<16 x i32> %x0, <16 x i32> %
define <16 x i32> @vpternlog_v16i32_012_broadcast0(i32* %ptr_x0, <16 x i32> %x1, <16 x i32> %x2) {
; CHECK-LABEL: vpternlog_v16i32_012_broadcast0:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpternlogd $46, (%rdi){1to16}, %zmm1, %zmm0
; CHECK-NEXT: retq
%x0_scalar = load i32, i32* %ptr_x0
@@ -587,7 +587,7 @@ define <16 x i32> @vpternlog_v16i32_012_broadcast0(i32* %ptr_x0, <16 x i32> %x1,
define <16 x i32> @vpternlog_v16i32_012_broadcast1(<16 x i32> %x0, i32* %ptr_x1, <16 x i32> %x2) {
; CHECK-LABEL: vpternlog_v16i32_012_broadcast1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpternlogd $116, (%rdi){1to16}, %zmm1, %zmm0
; CHECK-NEXT: retq
%x1_scalar = load i32, i32* %ptr_x1
@@ -599,7 +599,7 @@ define <16 x i32> @vpternlog_v16i32_012_broadcast1(<16 x i32> %x0, i32* %ptr_x1,
define <16 x i32> @vpternlog_v16i32_012_broadcast2(<16 x i32> %x0, <16 x i32> %x1, i32* %ptr_x2) {
; CHECK-LABEL: vpternlog_v16i32_012_broadcast2:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpternlogd $114, (%rdi){1to16}, %zmm1, %zmm0
; CHECK-NEXT: retq
%x2_scalar = load i32, i32* %ptr_x2
@@ -611,7 +611,7 @@ define <16 x i32> @vpternlog_v16i32_012_broadcast2(<16 x i32> %x0, <16 x i32> %x
define <16 x i32> @vpternlog_v16i32_102_broadcast0(i32* %ptr_x0, <16 x i32> %x1, <16 x i32> %x2) {
; CHECK-LABEL: vpternlog_v16i32_102_broadcast0:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpternlogd $116, (%rdi){1to16}, %zmm1, %zmm0
; CHECK-NEXT: retq
%x0_scalar = load i32, i32* %ptr_x0
@@ -623,7 +623,7 @@ define <16 x i32> @vpternlog_v16i32_102_broadcast0(i32* %ptr_x0, <16 x i32> %x1,
define <16 x i32> @vpternlog_v16i32_102_broadcast1(<16 x i32> %x0, i32* %ptr_x1, <16 x i32> %x2) {
; CHECK-LABEL: vpternlog_v16i32_102_broadcast1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpternlogd $46, (%rdi){1to16}, %zmm1, %zmm0
; CHECK-NEXT: retq
%x1_scalar = load i32, i32* %ptr_x1
@@ -635,7 +635,7 @@ define <16 x i32> @vpternlog_v16i32_102_broadcast1(<16 x i32> %x0, i32* %ptr_x1,
define <16 x i32> @vpternlog_v16i32_102_broadcast2(<16 x i32> %x0, <16 x i32> %x1, i32* %ptr_x2) {
; CHECK-LABEL: vpternlog_v16i32_102_broadcast2:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpternlogd $78, (%rdi){1to16}, %zmm1, %zmm0
; CHECK-NEXT: retq
%x2_scalar = load i32, i32* %ptr_x2
@@ -647,7 +647,7 @@ define <16 x i32> @vpternlog_v16i32_102_broadcast2(<16 x i32> %x0, <16 x i32> %x
define <16 x i32> @vpternlog_v16i32_210_broadcast0(i32* %ptr_x0, <16 x i32> %x1, <16 x i32> %x2) {
; CHECK-LABEL: vpternlog_v16i32_210_broadcast0:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpternlogd $78, (%rdi){1to16}, %zmm1, %zmm0
; CHECK-NEXT: retq
%x0_scalar = load i32, i32* %ptr_x0
@@ -659,7 +659,7 @@ define <16 x i32> @vpternlog_v16i32_210_broadcast0(i32* %ptr_x0, <16 x i32> %x1,
define <16 x i32> @vpternlog_v16i32_210_broadcast1(<16 x i32> %x0, i32* %ptr_x1, <16 x i32> %x2) {
; CHECK-LABEL: vpternlog_v16i32_210_broadcast1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpternlogd $92, (%rdi){1to16}, %zmm1, %zmm0
; CHECK-NEXT: retq
%x1_scalar = load i32, i32* %ptr_x1
@@ -671,7 +671,7 @@ define <16 x i32> @vpternlog_v16i32_210_broadcast1(<16 x i32> %x0, i32* %ptr_x1,
define <16 x i32> @vpternlog_v16i32_210_broadcast2(<16 x i32> %x0, <16 x i32> %x1, i32* %ptr_x2) {
; CHECK-LABEL: vpternlog_v16i32_210_broadcast2:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpternlogd $58, (%rdi){1to16}, %zmm1, %zmm0
; CHECK-NEXT: retq
%x2_scalar = load i32, i32* %ptr_x2
@@ -683,7 +683,7 @@ define <16 x i32> @vpternlog_v16i32_210_broadcast2(<16 x i32> %x0, <16 x i32> %x
define <16 x i32> @vpternlog_v16i32_012_broadcast0_mask(i32* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_012_broadcast0_mask:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpbroadcastd (%rdi), %zmm2
; CHECK-NEXT: vpternlogd $114, %zmm1, %zmm0, %zmm2 {%k1}
@@ -698,7 +698,7 @@ define <16 x i32> @vpternlog_v16i32_012_broadcast0_mask(i32* %x0ptr, <16 x i32>
define <16 x i32> @vpternlog_v16i32_012_broadcast1_mask(<16 x i32> %x0, i32* %x1ptr, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_012_broadcast1_mask:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $116, (%rdi){1to16}, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -711,7 +711,7 @@ define <16 x i32> @vpternlog_v16i32_012_broadcast1_mask(<16 x i32> %x0, i32* %x1
define <16 x i32> @vpternlog_v16i32_012_broadcast2_mask(<16 x i32> %x0, <16 x i32> %x1, i32* %x2ptr, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_012_broadcast2_mask:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $114, (%rdi){1to16}, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -724,7 +724,7 @@ define <16 x i32> @vpternlog_v16i32_012_broadcast2_mask(<16 x i32> %x0, <16 x i3
define <16 x i32> @vpternlog_v16i32_102_broadcast0_mask(i32* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_102_broadcast0_mask:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $116, (%rdi){1to16}, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -737,7 +737,7 @@ define <16 x i32> @vpternlog_v16i32_102_broadcast0_mask(i32* %x0ptr, <16 x i32>
define <16 x i32> @vpternlog_v16i32_102_broadcast1_mask(<16 x i32> %x0, i32* %x1ptr, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_102_broadcast1_mask:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpbroadcastd (%rdi), %zmm2
; CHECK-NEXT: vpternlogd $114, %zmm1, %zmm0, %zmm2 {%k1}
@@ -752,7 +752,7 @@ define <16 x i32> @vpternlog_v16i32_102_broadcast1_mask(<16 x i32> %x0, i32* %x1
define <16 x i32> @vpternlog_v16i32_102_broadcast2_mask(<16 x i32> %x0, <16 x i32> %x1, i32* %x2ptr, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_102_broadcast2_mask:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $114, (%rdi){1to16}, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -766,7 +766,7 @@ define <16 x i32> @vpternlog_v16i32_102_broadcast2_mask(<16 x i32> %x0, <16 x i3
define <16 x i32> @vpternlog_v16i32_210_broadcast0_mask(i32* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_210_broadcast0_mask:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $114, (%rdi){1to16}, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -780,7 +780,7 @@ define <16 x i32> @vpternlog_v16i32_210_broadcast0_mask(i32* %x0ptr, <16 x i32>
define <16 x i32> @vpternlog_v16i32_210_broadcast1_mask(<16 x i32> %x0, i32* %x1ptr, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_210_broadcast1_mask:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $116, (%rdi){1to16}, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -794,7 +794,7 @@ define <16 x i32> @vpternlog_v16i32_210_broadcast1_mask(<16 x i32> %x0, i32* %x1
define <16 x i32> @vpternlog_v16i32_210_broadcast2_mask(<16 x i32> %x0, <16 x i32> %x1, i32* %x2ptr, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_210_broadcast2_mask:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpbroadcastd (%rdi), %zmm2
; CHECK-NEXT: vpternlogd $114, %zmm0, %zmm1, %zmm2 {%k1}
@@ -809,7 +809,7 @@ define <16 x i32> @vpternlog_v16i32_210_broadcast2_mask(<16 x i32> %x0, <16 x i3
define <16 x i32> @vpternlog_v16i32_021_broadcast0_mask(i32* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_021_broadcast0_mask:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpbroadcastd (%rdi), %zmm2
; CHECK-NEXT: vpternlogd $114, %zmm0, %zmm1, %zmm2 {%k1}
@@ -824,7 +824,7 @@ define <16 x i32> @vpternlog_v16i32_021_broadcast0_mask(i32* %x0ptr, <16 x i32>
define <16 x i32> @vpternlog_v16i32_021_broadcast1_mask(<16 x i32> %x0, i32* %x1ptr, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_021_broadcast1_mask:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $114, (%rdi){1to16}, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -837,7 +837,7 @@ define <16 x i32> @vpternlog_v16i32_021_broadcast1_mask(<16 x i32> %x0, i32* %x1
define <16 x i32> @vpternlog_v16i32_021_broadcast2_mask(<16 x i32> %x0, <16 x i32> %x1, i32* %x2ptr, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_021_broadcast2_mask:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $116, (%rdi){1to16}, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -850,7 +850,7 @@ define <16 x i32> @vpternlog_v16i32_021_broadcast2_mask(<16 x i32> %x0, <16 x i3
define <16 x i32> @vpternlog_v16i32_012_broadcast0_maskz(i32* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_012_broadcast0_maskz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $46, (%rdi){1to16}, %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -863,7 +863,7 @@ define <16 x i32> @vpternlog_v16i32_012_broadcast0_maskz(i32* %x0ptr, <16 x i32>
define <16 x i32> @vpternlog_v16i32_012_broadcast1_maskz(<16 x i32> %x0, i32* %x1ptr, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_012_broadcast1_maskz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $116, (%rdi){1to16}, %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -876,7 +876,7 @@ define <16 x i32> @vpternlog_v16i32_012_broadcast1_maskz(<16 x i32> %x0, i32* %x
define <16 x i32> @vpternlog_v16i32_012_broadcast2_maskz(<16 x i32> %x0, <16 x i32> %x1, i32* %x2ptr, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_012_broadcast2_maskz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $114, (%rdi){1to16}, %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -889,7 +889,7 @@ define <16 x i32> @vpternlog_v16i32_012_broadcast2_maskz(<16 x i32> %x0, <16 x i
define <16 x i32> @vpternlog_v16i32_102_broadcast0_maskz(i32* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_102_broadcast0_maskz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $116, (%rdi){1to16}, %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -902,7 +902,7 @@ define <16 x i32> @vpternlog_v16i32_102_broadcast0_maskz(i32* %x0ptr, <16 x i32>
define <16 x i32> @vpternlog_v16i32_102_broadcast1_maskz(<16 x i32> %x0, i32* %x1ptr, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_102_broadcast1_maskz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $46, (%rdi){1to16}, %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -915,7 +915,7 @@ define <16 x i32> @vpternlog_v16i32_102_broadcast1_maskz(<16 x i32> %x0, i32* %x
define <16 x i32> @vpternlog_v16i32_102_broadcast2_maskz(<16 x i32> %x0, <16 x i32> %x1, i32* %x2ptr, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_102_broadcast2_maskz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $78, (%rdi){1to16}, %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -928,7 +928,7 @@ define <16 x i32> @vpternlog_v16i32_102_broadcast2_maskz(<16 x i32> %x0, <16 x i
define <16 x i32> @vpternlog_v16i32_210_broadcast0_maskz(i32* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_210_broadcast0_maskz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $78, (%rdi){1to16}, %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -941,7 +941,7 @@ define <16 x i32> @vpternlog_v16i32_210_broadcast0_maskz(i32* %x0ptr, <16 x i32>
define <16 x i32> @vpternlog_v16i32_210_broadcast1_maskz(<16 x i32> %x0, i32* %x1ptr, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_210_broadcast1_maskz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $92, (%rdi){1to16}, %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -954,7 +954,7 @@ define <16 x i32> @vpternlog_v16i32_210_broadcast1_maskz(<16 x i32> %x0, i32* %x
define <16 x i32> @vpternlog_v16i32_210_broadcast2_maskz(<16 x i32> %x0, <16 x i32> %x1, i32* %x2ptr, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_210_broadcast2_maskz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $58, (%rdi){1to16}, %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -967,7 +967,7 @@ define <16 x i32> @vpternlog_v16i32_210_broadcast2_maskz(<16 x i32> %x0, <16 x i
define <16 x i32> @vpternlog_v16i32_021_broadcast0_maskz(i32* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_021_broadcast0_maskz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $58, (%rdi){1to16}, %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -980,7 +980,7 @@ define <16 x i32> @vpternlog_v16i32_021_broadcast0_maskz(i32* %x0ptr, <16 x i32>
define <16 x i32> @vpternlog_v16i32_021_broadcast1_maskz(<16 x i32> %x0, i32* %x1ptr, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_021_broadcast1_maskz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $114, (%rdi){1to16}, %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -993,7 +993,7 @@ define <16 x i32> @vpternlog_v16i32_021_broadcast1_maskz(<16 x i32> %x0, i32* %x
define <16 x i32> @vpternlog_v16i32_021_broadcast2_maskz(<16 x i32> %x0, <16 x i32> %x1, i32* %x2ptr, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_021_broadcast2_maskz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $116, (%rdi){1to16}, %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1006,7 +1006,7 @@ define <16 x i32> @vpternlog_v16i32_021_broadcast2_maskz(<16 x i32> %x0, <16 x i
define <16 x i32> @vpternlog_v16i32_012_broadcast0_mask1(i32* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_012_broadcast0_mask1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $92, (%rdi){1to16}, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -1021,7 +1021,7 @@ define <16 x i32> @vpternlog_v16i32_012_broadcast0_mask1(i32* %x0ptr, <16 x i32>
define <16 x i32> @vpternlog_v16i32_012_broadcast0_mask2(i32* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_012_broadcast0_mask2:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $58, (%rdi){1to16}, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -1037,7 +1037,7 @@ define <16 x i32> @vpternlog_v16i32_012_broadcast0_mask2(i32* %x0ptr, <16 x i32>
define <16 x i32> @vpternlog_v16i32_012_broadcast1_mask2(<16 x i32> %x0, i32* %x1ptr, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_012_broadcast1_mask2:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $46, (%rdi){1to16}, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -1053,7 +1053,7 @@ define <16 x i32> @vpternlog_v16i32_012_broadcast1_mask2(<16 x i32> %x0, i32* %x
define <16 x i32> @vpternlog_v16i32_012_broadcast2_mask1(<16 x i32> %x0, <16 x i32> %x1, i32* %x2ptr, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_012_broadcast2_mask1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpternlogd $78, (%rdi){1to16}, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
diff --git a/test/CodeGen/X86/avx512-vselect-crash.ll b/test/CodeGen/X86/avx512-vselect-crash.ll
index 96af4779b05..31ccf867f7a 100644
--- a/test/CodeGen/X86/avx512-vselect-crash.ll
+++ b/test/CodeGen/X86/avx512-vselect-crash.ll
@@ -3,7 +3,7 @@
define <16 x i32> @test() {
; CHECK-LABEL: test:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0
; CHECK-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/avx512-vselect.ll b/test/CodeGen/X86/avx512-vselect.ll
index 5fc84a0aa81..0edd01e8aef 100644
--- a/test/CodeGen/X86/avx512-vselect.ll
+++ b/test/CodeGen/X86/avx512-vselect.ll
@@ -6,7 +6,7 @@ target triple = "x86_64-unknown-unknown"
define <8 x i64> @test1(<8 x i64> %m, <8 x i64> %a, <8 x i64> %b) {
; CHECK-LABEL: test1:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vpsllq $63, %zmm0, %zmm0
; CHECK-NEXT: vptestmq %zmm0, %zmm0, %k1
; CHECK-NEXT: vpblendmq %zmm1, %zmm2, %zmm0 {%k1}
@@ -24,7 +24,7 @@ entry:
; directly form an SDAG input to the lowering.
define <16 x double> @test2(<16 x float> %x, <16 x float> %y, <16 x double> %a, <16 x double> %b) {
; CHECK-LABEL: test2:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vxorps %xmm6, %xmm6, %xmm6
; CHECK-NEXT: vcmpltps %zmm0, %zmm6, %k0
; CHECK-NEXT: vcmpltps %zmm6, %zmm1, %k1
diff --git a/test/CodeGen/X86/avx512bw-intrinsics-fast-isel.ll b/test/CodeGen/X86/avx512bw-intrinsics-fast-isel.ll
index f219769531f..cd6f70b36ff 100644
--- a/test/CodeGen/X86/avx512bw-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/avx512bw-intrinsics-fast-isel.ll
@@ -6,7 +6,7 @@
define <8 x i64> @test_mm512_mask_set1_epi8(<8 x i64> %__O, i64 %__M, i8 signext %__A) {
; X32-LABEL: test_mm512_mask_set1_epi8:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: pushl %ebx
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: .cfi_offset %ebx, -8
@@ -720,7 +720,7 @@ define <8 x i64> @test_mm512_mask_set1_epi8(<8 x i64> %__O, i64 %__M, i8 signext
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_set1_epi8:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovq %rdi, %k1
; X64-NEXT: vpbroadcastb %esi, %zmm0 {%k1}
; X64-NEXT: retq
@@ -736,7 +736,7 @@ define <8 x i64> @test_mm512_mask_set1_epi8(<8 x i64> %__O, i64 %__M, i8 signext
define <8 x i64> @test_mm512_maskz_set1_epi8(i64 %__M, i8 signext %__A) {
; X32-LABEL: test_mm512_maskz_set1_epi8:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: pushl %ebx
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: .cfi_offset %ebx, -8
@@ -1447,7 +1447,7 @@ define <8 x i64> @test_mm512_maskz_set1_epi8(i64 %__M, i8 signext %__A) {
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_maskz_set1_epi8:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovq %rdi, %k1
; X64-NEXT: vpbroadcastb %esi, %zmm0 {%k1} {z}
; X64-NEXT: retq
@@ -1462,14 +1462,14 @@ define <8 x i64> @test_mm512_maskz_set1_epi8(i64 %__M, i8 signext %__A) {
define <8 x i64> @test_mm512_mask_set1_epi16(<8 x i64> %__O, i32 %__M, i16 signext %__A) {
; X32-LABEL: test_mm512_mask_set1_epi16:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; X32-NEXT: vpbroadcastw %eax, %zmm0 {%k1}
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_set1_epi16:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vpbroadcastw %esi, %zmm0 {%k1}
; X64-NEXT: retq
@@ -1485,14 +1485,14 @@ define <8 x i64> @test_mm512_mask_set1_epi16(<8 x i64> %__O, i32 %__M, i16 signe
define <8 x i64> @test_mm512_maskz_set1_epi16(i32 %__M, i16 signext %__A) {
; X32-LABEL: test_mm512_maskz_set1_epi16:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; X32-NEXT: vpbroadcastw %eax, %zmm0 {%k1} {z}
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_maskz_set1_epi16:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vpbroadcastw %esi, %zmm0 {%k1} {z}
; X64-NEXT: retq
@@ -1507,12 +1507,12 @@ define <8 x i64> @test_mm512_maskz_set1_epi16(i32 %__M, i16 signext %__A) {
define <8 x i64> @test_mm512_broadcastb_epi8(<2 x i64> %a0) {
; X32-LABEL: test_mm512_broadcastb_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpbroadcastb %xmm0, %zmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_broadcastb_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpbroadcastb %xmm0, %zmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -1523,14 +1523,14 @@ define <8 x i64> @test_mm512_broadcastb_epi8(<2 x i64> %a0) {
define <8 x i64> @test_mm512_mask_broadcastb_epi8(<8 x i64> %a0, i64* %a1, <2 x i64> %a2) {
; X32-LABEL: test_mm512_mask_broadcastb_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: kmovq (%eax), %k1
; X32-NEXT: vpbroadcastb %xmm1, %zmm0 {%k1}
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_broadcastb_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovq (%rdi), %k1
; X64-NEXT: vpbroadcastb %xmm1, %zmm0 {%k1}
; X64-NEXT: retq
@@ -1546,14 +1546,14 @@ define <8 x i64> @test_mm512_mask_broadcastb_epi8(<8 x i64> %a0, i64* %a1, <2 x
define <8 x i64> @test_mm512_maskz_broadcastb_epi8(i64* %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm512_maskz_broadcastb_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: kmovq (%eax), %k1
; X32-NEXT: vpbroadcastb %xmm0, %zmm0 {%k1} {z}
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_maskz_broadcastb_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovq (%rdi), %k1
; X64-NEXT: vpbroadcastb %xmm0, %zmm0 {%k1} {z}
; X64-NEXT: retq
@@ -1568,12 +1568,12 @@ define <8 x i64> @test_mm512_maskz_broadcastb_epi8(i64* %a0, <2 x i64> %a1) {
define <8 x i64> @test_mm512_broadcastw_epi16(<2 x i64> %a0) {
; X32-LABEL: test_mm512_broadcastw_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpbroadcastw %xmm0, %zmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_broadcastw_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpbroadcastw %xmm0, %zmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -1584,13 +1584,13 @@ define <8 x i64> @test_mm512_broadcastw_epi16(<2 x i64> %a0) {
define <8 x i64> @test_mm512_mask_broadcastw_epi16(<8 x i64> %a0, i32 %a1, <2 x i64> %a2) {
; X32-LABEL: test_mm512_mask_broadcastw_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; X32-NEXT: vpbroadcastw %xmm1, %zmm0 {%k1}
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_broadcastw_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vpbroadcastw %xmm1, %zmm0 {%k1}
; X64-NEXT: retq
@@ -1605,13 +1605,13 @@ define <8 x i64> @test_mm512_mask_broadcastw_epi16(<8 x i64> %a0, i32 %a1, <2 x
define <8 x i64> @test_mm512_maskz_broadcastw_epi16(i32 %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm512_maskz_broadcastw_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; X32-NEXT: vpbroadcastw %xmm0, %zmm0 {%k1} {z}
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_maskz_broadcastw_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vpbroadcastw %xmm0, %zmm0 {%k1} {z}
; X64-NEXT: retq
@@ -1625,12 +1625,12 @@ define <8 x i64> @test_mm512_maskz_broadcastw_epi16(i32 %a0, <2 x i64> %a1) {
define <8 x i64> @test_mm512_bslli_epi128(<8 x i64> %a0) {
; X32-LABEL: test_mm512_bslli_epi128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsrldq {{.*#+}} zmm0 = zmm0[11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[27,28,29,30,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[43,44,45,46,47],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[59,60,61,62,63],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_bslli_epi128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsrldq {{.*#+}} zmm0 = zmm0[11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[27,28,29,30,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[43,44,45,46,47],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[59,60,61,62,63],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; X64-NEXT: retq
%arg0 = bitcast <8 x i64> %a0 to <64 x i8>
@@ -1641,12 +1641,12 @@ define <8 x i64> @test_mm512_bslli_epi128(<8 x i64> %a0) {
define <8 x i64> @test_mm512_bsrli_epi128(<8 x i64> %a0) {
; X32-LABEL: test_mm512_bsrli_epi128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsrldq {{.*#+}} zmm0 = zmm0[5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zmm0[21,22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zmm0[37,38,39,40,41,42,43,44,45,46,47],zero,zero,zero,zero,zero,zmm0[53,54,55,56,57,58,59,60,61,62,63],zero,zero,zero,zero,zero
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_bsrli_epi128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsrldq {{.*#+}} zmm0 = zmm0[5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zmm0[21,22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zmm0[37,38,39,40,41,42,43,44,45,46,47],zero,zero,zero,zero,zero,zmm0[53,54,55,56,57,58,59,60,61,62,63],zero,zero,zero,zero,zero
; X64-NEXT: retq
%arg0 = bitcast <8 x i64> %a0 to <64 x i8>
@@ -1657,12 +1657,12 @@ define <8 x i64> @test_mm512_bsrli_epi128(<8 x i64> %a0) {
define <8 x i64> @test_mm512_unpackhi_epi8(<8 x i64> %a0, <8 x i64> %a1) {
; X32-LABEL: test_mm512_unpackhi_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpunpckhbw {{.*#+}} zmm0 = zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[12],zmm1[12],zmm0[13],zmm1[13],zmm0[14],zmm1[14],zmm0[15],zmm1[15],zmm0[24],zmm1[24],zmm0[25],zmm1[25],zmm0[26],zmm1[26],zmm0[27],zmm1[27],zmm0[28],zmm1[28],zmm0[29],zmm1[29],zmm0[30],zmm1[30],zmm0[31],zmm1[31],zmm0[40],zmm1[40],zmm0[41],zmm1[41],zmm0[42],zmm1[42],zmm0[43],zmm1[43],zmm0[44],zmm1[44],zmm0[45],zmm1[45],zmm0[46],zmm1[46],zmm0[47],zmm1[47],zmm0[56],zmm1[56],zmm0[57],zmm1[57],zmm0[58],zmm1[58],zmm0[59],zmm1[59],zmm0[60],zmm1[60],zmm0[61],zmm1[61],zmm0[62],zmm1[62],zmm0[63],zmm1[63]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_unpackhi_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpunpckhbw {{.*#+}} zmm0 = zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[12],zmm1[12],zmm0[13],zmm1[13],zmm0[14],zmm1[14],zmm0[15],zmm1[15],zmm0[24],zmm1[24],zmm0[25],zmm1[25],zmm0[26],zmm1[26],zmm0[27],zmm1[27],zmm0[28],zmm1[28],zmm0[29],zmm1[29],zmm0[30],zmm1[30],zmm0[31],zmm1[31],zmm0[40],zmm1[40],zmm0[41],zmm1[41],zmm0[42],zmm1[42],zmm0[43],zmm1[43],zmm0[44],zmm1[44],zmm0[45],zmm1[45],zmm0[46],zmm1[46],zmm0[47],zmm1[47],zmm0[56],zmm1[56],zmm0[57],zmm1[57],zmm0[58],zmm1[58],zmm0[59],zmm1[59],zmm0[60],zmm1[60],zmm0[61],zmm1[61],zmm0[62],zmm1[62],zmm0[63],zmm1[63]
; X64-NEXT: retq
%arg0 = bitcast <8 x i64> %a0 to <64 x i8>
@@ -1675,14 +1675,14 @@ define <8 x i64> @test_mm512_unpackhi_epi8(<8 x i64> %a0, <8 x i64> %a1) {
; TODO - improve support for i64 -> mmask64 on 32-bit targets
define <8 x i64> @test_mm512_mask_unpackhi_epi8(<8 x i64> %a0, i64* %a1, <8 x i64> %a2, <8 x i64> %a3) {
; X32-LABEL: test_mm512_mask_unpackhi_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: kmovq (%eax), %k1
; X32-NEXT: vpunpckhbw {{.*#+}} zmm0 {%k1} = zmm1[8],zmm2[8],zmm1[9],zmm2[9],zmm1[10],zmm2[10],zmm1[11],zmm2[11],zmm1[12],zmm2[12],zmm1[13],zmm2[13],zmm1[14],zmm2[14],zmm1[15],zmm2[15],zmm1[24],zmm2[24],zmm1[25],zmm2[25],zmm1[26],zmm2[26],zmm1[27],zmm2[27],zmm1[28],zmm2[28],zmm1[29],zmm2[29],zmm1[30],zmm2[30],zmm1[31],zmm2[31],zmm1[40],zmm2[40],zmm1[41],zmm2[41],zmm1[42],zmm2[42],zmm1[43],zmm2[43],zmm1[44],zmm2[44],zmm1[45],zmm2[45],zmm1[46],zmm2[46],zmm1[47],zmm2[47],zmm1[56],zmm2[56],zmm1[57],zmm2[57],zmm1[58],zmm2[58],zmm1[59],zmm2[59],zmm1[60],zmm2[60],zmm1[61],zmm2[61],zmm1[62],zmm2[62],zmm1[63],zmm2[63]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_unpackhi_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovq (%rdi), %k1
; X64-NEXT: vpunpckhbw {{.*#+}} zmm0 {%k1} = zmm1[8],zmm2[8],zmm1[9],zmm2[9],zmm1[10],zmm2[10],zmm1[11],zmm2[11],zmm1[12],zmm2[12],zmm1[13],zmm2[13],zmm1[14],zmm2[14],zmm1[15],zmm2[15],zmm1[24],zmm2[24],zmm1[25],zmm2[25],zmm1[26],zmm2[26],zmm1[27],zmm2[27],zmm1[28],zmm2[28],zmm1[29],zmm2[29],zmm1[30],zmm2[30],zmm1[31],zmm2[31],zmm1[40],zmm2[40],zmm1[41],zmm2[41],zmm1[42],zmm2[42],zmm1[43],zmm2[43],zmm1[44],zmm2[44],zmm1[45],zmm2[45],zmm1[46],zmm2[46],zmm1[47],zmm2[47],zmm1[56],zmm2[56],zmm1[57],zmm2[57],zmm1[58],zmm2[58],zmm1[59],zmm2[59],zmm1[60],zmm2[60],zmm1[61],zmm2[61],zmm1[62],zmm2[62],zmm1[63],zmm2[63]
; X64-NEXT: retq
@@ -1699,14 +1699,14 @@ define <8 x i64> @test_mm512_mask_unpackhi_epi8(<8 x i64> %a0, i64* %a1, <8 x i6
define <8 x i64> @test_mm512_maskz_unpackhi_epi8(i64* %a0, <8 x i64> %a1, <8 x i64> %a2) {
; X32-LABEL: test_mm512_maskz_unpackhi_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: kmovq (%eax), %k1
; X32-NEXT: vpunpckhbw {{.*#+}} zmm0 {%k1} {z} = zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[12],zmm1[12],zmm0[13],zmm1[13],zmm0[14],zmm1[14],zmm0[15],zmm1[15],zmm0[24],zmm1[24],zmm0[25],zmm1[25],zmm0[26],zmm1[26],zmm0[27],zmm1[27],zmm0[28],zmm1[28],zmm0[29],zmm1[29],zmm0[30],zmm1[30],zmm0[31],zmm1[31],zmm0[40],zmm1[40],zmm0[41],zmm1[41],zmm0[42],zmm1[42],zmm0[43],zmm1[43],zmm0[44],zmm1[44],zmm0[45],zmm1[45],zmm0[46],zmm1[46],zmm0[47],zmm1[47],zmm0[56],zmm1[56],zmm0[57],zmm1[57],zmm0[58],zmm1[58],zmm0[59],zmm1[59],zmm0[60],zmm1[60],zmm0[61],zmm1[61],zmm0[62],zmm1[62],zmm0[63],zmm1[63]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_maskz_unpackhi_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovq (%rdi), %k1
; X64-NEXT: vpunpckhbw {{.*#+}} zmm0 {%k1} {z} = zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[12],zmm1[12],zmm0[13],zmm1[13],zmm0[14],zmm1[14],zmm0[15],zmm1[15],zmm0[24],zmm1[24],zmm0[25],zmm1[25],zmm0[26],zmm1[26],zmm0[27],zmm1[27],zmm0[28],zmm1[28],zmm0[29],zmm1[29],zmm0[30],zmm1[30],zmm0[31],zmm1[31],zmm0[40],zmm1[40],zmm0[41],zmm1[41],zmm0[42],zmm1[42],zmm0[43],zmm1[43],zmm0[44],zmm1[44],zmm0[45],zmm1[45],zmm0[46],zmm1[46],zmm0[47],zmm1[47],zmm0[56],zmm1[56],zmm0[57],zmm1[57],zmm0[58],zmm1[58],zmm0[59],zmm1[59],zmm0[60],zmm1[60],zmm0[61],zmm1[61],zmm0[62],zmm1[62],zmm0[63],zmm1[63]
; X64-NEXT: retq
@@ -1722,12 +1722,12 @@ define <8 x i64> @test_mm512_maskz_unpackhi_epi8(i64* %a0, <8 x i64> %a1, <8 x i
define <8 x i64> @test_mm512_unpackhi_epi16(<8 x i64> %a0, <8 x i64> %a1) {
; X32-LABEL: test_mm512_unpackhi_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpunpckhwd {{.*#+}} zmm0 = zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[12],zmm1[12],zmm0[13],zmm1[13],zmm0[14],zmm1[14],zmm0[15],zmm1[15],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[28],zmm1[28],zmm0[29],zmm1[29],zmm0[30],zmm1[30],zmm0[31],zmm1[31]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_unpackhi_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpunpckhwd {{.*#+}} zmm0 = zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[12],zmm1[12],zmm0[13],zmm1[13],zmm0[14],zmm1[14],zmm0[15],zmm1[15],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[28],zmm1[28],zmm0[29],zmm1[29],zmm0[30],zmm1[30],zmm0[31],zmm1[31]
; X64-NEXT: retq
%arg0 = bitcast <8 x i64> %a0 to <32 x i16>
@@ -1739,13 +1739,13 @@ define <8 x i64> @test_mm512_unpackhi_epi16(<8 x i64> %a0, <8 x i64> %a1) {
define <8 x i64> @test_mm512_mask_unpackhi_epi16(<8 x i64> %a0, i32 %a1, <8 x i64> %a2, <8 x i64> %a3) {
; X32-LABEL: test_mm512_mask_unpackhi_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; X32-NEXT: vpunpckhwd {{.*#+}} zmm0 {%k1} = zmm1[4],zmm2[4],zmm1[5],zmm2[5],zmm1[6],zmm2[6],zmm1[7],zmm2[7],zmm1[12],zmm2[12],zmm1[13],zmm2[13],zmm1[14],zmm2[14],zmm1[15],zmm2[15],zmm1[20],zmm2[20],zmm1[21],zmm2[21],zmm1[22],zmm2[22],zmm1[23],zmm2[23],zmm1[28],zmm2[28],zmm1[29],zmm2[29],zmm1[30],zmm2[30],zmm1[31],zmm2[31]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_unpackhi_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vpunpckhwd {{.*#+}} zmm0 {%k1} = zmm1[4],zmm2[4],zmm1[5],zmm2[5],zmm1[6],zmm2[6],zmm1[7],zmm2[7],zmm1[12],zmm2[12],zmm1[13],zmm2[13],zmm1[14],zmm2[14],zmm1[15],zmm2[15],zmm1[20],zmm2[20],zmm1[21],zmm2[21],zmm1[22],zmm2[22],zmm1[23],zmm2[23],zmm1[28],zmm2[28],zmm1[29],zmm2[29],zmm1[30],zmm2[30],zmm1[31],zmm2[31]
; X64-NEXT: retq
@@ -1761,13 +1761,13 @@ define <8 x i64> @test_mm512_mask_unpackhi_epi16(<8 x i64> %a0, i32 %a1, <8 x i6
define <8 x i64> @test_mm512_maskz_unpackhi_epi16(i32 %a0, <8 x i64> %a1, <8 x i64> %a2) {
; X32-LABEL: test_mm512_maskz_unpackhi_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; X32-NEXT: vpunpckhwd {{.*#+}} zmm0 {%k1} {z} = zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[12],zmm1[12],zmm0[13],zmm1[13],zmm0[14],zmm1[14],zmm0[15],zmm1[15],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[28],zmm1[28],zmm0[29],zmm1[29],zmm0[30],zmm1[30],zmm0[31],zmm1[31]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_maskz_unpackhi_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vpunpckhwd {{.*#+}} zmm0 {%k1} {z} = zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[12],zmm1[12],zmm0[13],zmm1[13],zmm0[14],zmm1[14],zmm0[15],zmm1[15],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[28],zmm1[28],zmm0[29],zmm1[29],zmm0[30],zmm1[30],zmm0[31],zmm1[31]
; X64-NEXT: retq
@@ -1782,12 +1782,12 @@ define <8 x i64> @test_mm512_maskz_unpackhi_epi16(i32 %a0, <8 x i64> %a1, <8 x i
define <8 x i64> @test_mm512_unpacklo_epi8(<8 x i64> %a0, <8 x i64> %a1) {
; X32-LABEL: test_mm512_unpacklo_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[32],zmm1[32],zmm0[33],zmm1[33],zmm0[34],zmm1[34],zmm0[35],zmm1[35],zmm0[36],zmm1[36],zmm0[37],zmm1[37],zmm0[38],zmm1[38],zmm0[39],zmm1[39],zmm0[48],zmm1[48],zmm0[49],zmm1[49],zmm0[50],zmm1[50],zmm0[51],zmm1[51],zmm0[52],zmm1[52],zmm0[53],zmm1[53],zmm0[54],zmm1[54],zmm0[55],zmm1[55]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_unpacklo_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[32],zmm1[32],zmm0[33],zmm1[33],zmm0[34],zmm1[34],zmm0[35],zmm1[35],zmm0[36],zmm1[36],zmm0[37],zmm1[37],zmm0[38],zmm1[38],zmm0[39],zmm1[39],zmm0[48],zmm1[48],zmm0[49],zmm1[49],zmm0[50],zmm1[50],zmm0[51],zmm1[51],zmm0[52],zmm1[52],zmm0[53],zmm1[53],zmm0[54],zmm1[54],zmm0[55],zmm1[55]
; X64-NEXT: retq
%arg0 = bitcast <8 x i64> %a0 to <64 x i8>
@@ -1799,14 +1799,14 @@ define <8 x i64> @test_mm512_unpacklo_epi8(<8 x i64> %a0, <8 x i64> %a1) {
define <8 x i64> @test_mm512_mask_unpacklo_epi8(<8 x i64> %a0, i64* %a1, <8 x i64> %a2, <8 x i64> %a3) {
; X32-LABEL: test_mm512_mask_unpacklo_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: kmovq (%eax), %k1
; X32-NEXT: vpunpcklbw {{.*#+}} zmm0 {%k1} = zmm1[0],zmm2[0],zmm1[1],zmm2[1],zmm1[2],zmm2[2],zmm1[3],zmm2[3],zmm1[4],zmm2[4],zmm1[5],zmm2[5],zmm1[6],zmm2[6],zmm1[7],zmm2[7],zmm1[16],zmm2[16],zmm1[17],zmm2[17],zmm1[18],zmm2[18],zmm1[19],zmm2[19],zmm1[20],zmm2[20],zmm1[21],zmm2[21],zmm1[22],zmm2[22],zmm1[23],zmm2[23],zmm1[32],zmm2[32],zmm1[33],zmm2[33],zmm1[34],zmm2[34],zmm1[35],zmm2[35],zmm1[36],zmm2[36],zmm1[37],zmm2[37],zmm1[38],zmm2[38],zmm1[39],zmm2[39],zmm1[48],zmm2[48],zmm1[49],zmm2[49],zmm1[50],zmm2[50],zmm1[51],zmm2[51],zmm1[52],zmm2[52],zmm1[53],zmm2[53],zmm1[54],zmm2[54],zmm1[55],zmm2[55]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_unpacklo_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovq (%rdi), %k1
; X64-NEXT: vpunpcklbw {{.*#+}} zmm0 {%k1} = zmm1[0],zmm2[0],zmm1[1],zmm2[1],zmm1[2],zmm2[2],zmm1[3],zmm2[3],zmm1[4],zmm2[4],zmm1[5],zmm2[5],zmm1[6],zmm2[6],zmm1[7],zmm2[7],zmm1[16],zmm2[16],zmm1[17],zmm2[17],zmm1[18],zmm2[18],zmm1[19],zmm2[19],zmm1[20],zmm2[20],zmm1[21],zmm2[21],zmm1[22],zmm2[22],zmm1[23],zmm2[23],zmm1[32],zmm2[32],zmm1[33],zmm2[33],zmm1[34],zmm2[34],zmm1[35],zmm2[35],zmm1[36],zmm2[36],zmm1[37],zmm2[37],zmm1[38],zmm2[38],zmm1[39],zmm2[39],zmm1[48],zmm2[48],zmm1[49],zmm2[49],zmm1[50],zmm2[50],zmm1[51],zmm2[51],zmm1[52],zmm2[52],zmm1[53],zmm2[53],zmm1[54],zmm2[54],zmm1[55],zmm2[55]
; X64-NEXT: retq
@@ -1823,14 +1823,14 @@ define <8 x i64> @test_mm512_mask_unpacklo_epi8(<8 x i64> %a0, i64* %a1, <8 x i6
define <8 x i64> @test_mm512_maskz_unpacklo_epi8(i64* %a0, <8 x i64> %a1, <8 x i64> %a2) {
; X32-LABEL: test_mm512_maskz_unpacklo_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: kmovq (%eax), %k1
; X32-NEXT: vpunpcklbw {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[32],zmm1[32],zmm0[33],zmm1[33],zmm0[34],zmm1[34],zmm0[35],zmm1[35],zmm0[36],zmm1[36],zmm0[37],zmm1[37],zmm0[38],zmm1[38],zmm0[39],zmm1[39],zmm0[48],zmm1[48],zmm0[49],zmm1[49],zmm0[50],zmm1[50],zmm0[51],zmm1[51],zmm0[52],zmm1[52],zmm0[53],zmm1[53],zmm0[54],zmm1[54],zmm0[55],zmm1[55]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_maskz_unpacklo_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovq (%rdi), %k1
; X64-NEXT: vpunpcklbw {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[32],zmm1[32],zmm0[33],zmm1[33],zmm0[34],zmm1[34],zmm0[35],zmm1[35],zmm0[36],zmm1[36],zmm0[37],zmm1[37],zmm0[38],zmm1[38],zmm0[39],zmm1[39],zmm0[48],zmm1[48],zmm0[49],zmm1[49],zmm0[50],zmm1[50],zmm0[51],zmm1[51],zmm0[52],zmm1[52],zmm0[53],zmm1[53],zmm0[54],zmm1[54],zmm0[55],zmm1[55]
; X64-NEXT: retq
@@ -1846,12 +1846,12 @@ define <8 x i64> @test_mm512_maskz_unpacklo_epi8(i64* %a0, <8 x i64> %a1, <8 x i
define <8 x i64> @test_mm512_unpacklo_epi16(<8 x i64> %a0, <8 x i64> %a1) {
; X32-LABEL: test_mm512_unpacklo_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpunpcklwd {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[24],zmm1[24],zmm0[25],zmm1[25],zmm0[26],zmm1[26],zmm0[27],zmm1[27]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_unpacklo_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpunpcklwd {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[24],zmm1[24],zmm0[25],zmm1[25],zmm0[26],zmm1[26],zmm0[27],zmm1[27]
; X64-NEXT: retq
%arg0 = bitcast <8 x i64> %a0 to <32 x i16>
@@ -1863,13 +1863,13 @@ define <8 x i64> @test_mm512_unpacklo_epi16(<8 x i64> %a0, <8 x i64> %a1) {
define <8 x i64> @test_mm512_mask_unpacklo_epi16(<8 x i64> %a0, i32 %a1, <8 x i64> %a2, <8 x i64> %a3) {
; X32-LABEL: test_mm512_mask_unpacklo_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; X32-NEXT: vpunpcklwd {{.*#+}} zmm0 {%k1} = zmm1[0],zmm2[0],zmm1[1],zmm2[1],zmm1[2],zmm2[2],zmm1[3],zmm2[3],zmm1[8],zmm2[8],zmm1[9],zmm2[9],zmm1[10],zmm2[10],zmm1[11],zmm2[11],zmm1[16],zmm2[16],zmm1[17],zmm2[17],zmm1[18],zmm2[18],zmm1[19],zmm2[19],zmm1[24],zmm2[24],zmm1[25],zmm2[25],zmm1[26],zmm2[26],zmm1[27],zmm2[27]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_unpacklo_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vpunpcklwd {{.*#+}} zmm0 {%k1} = zmm1[0],zmm2[0],zmm1[1],zmm2[1],zmm1[2],zmm2[2],zmm1[3],zmm2[3],zmm1[8],zmm2[8],zmm1[9],zmm2[9],zmm1[10],zmm2[10],zmm1[11],zmm2[11],zmm1[16],zmm2[16],zmm1[17],zmm2[17],zmm1[18],zmm2[18],zmm1[19],zmm2[19],zmm1[24],zmm2[24],zmm1[25],zmm2[25],zmm1[26],zmm2[26],zmm1[27],zmm2[27]
; X64-NEXT: retq
@@ -1885,13 +1885,13 @@ define <8 x i64> @test_mm512_mask_unpacklo_epi16(<8 x i64> %a0, i32 %a1, <8 x i6
define <8 x i64> @test_mm512_maskz_unpacklo_epi16(i32 %a0, <8 x i64> %a1, <8 x i64> %a2) {
; X32-LABEL: test_mm512_maskz_unpacklo_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; X32-NEXT: vpunpcklwd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[24],zmm1[24],zmm0[25],zmm1[25],zmm0[26],zmm1[26],zmm0[27],zmm1[27]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_maskz_unpacklo_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vpunpcklwd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[24],zmm1[24],zmm0[25],zmm1[25],zmm0[26],zmm1[26],zmm0[27],zmm1[27]
; X64-NEXT: retq
@@ -1906,7 +1906,7 @@ define <8 x i64> @test_mm512_maskz_unpacklo_epi16(i32 %a0, <8 x i64> %a1, <8 x i
define i64 @test_mm512_test_epi8_mask(<8 x i64> %__A, <8 x i64> %__B) {
; X32-LABEL: test_mm512_test_epi8_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: pushl %ebp
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: .cfi_offset %ebp, -8
@@ -1924,7 +1924,7 @@ define i64 @test_mm512_test_epi8_mask(<8 x i64> %__A, <8 x i64> %__B) {
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_test_epi8_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vptestmb %zmm0, %zmm1, %k0
; X64-NEXT: kmovq %k0, %rax
; X64-NEXT: vzeroupper
@@ -1939,7 +1939,7 @@ entry:
define i64 @test_mm512_mask_test_epi8_mask(i64 %__U, <8 x i64> %__A, <8 x i64> %__B) {
; X32-LABEL: test_mm512_mask_test_epi8_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: pushl %ebp
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: .cfi_offset %ebp, -8
@@ -2665,7 +2665,7 @@ define i64 @test_mm512_mask_test_epi8_mask(i64 %__U, <8 x i64> %__A, <8 x i64> %
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_test_epi8_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovq %rdi, %k1
; X64-NEXT: vptestmb %zmm0, %zmm1, %k0 {%k1}
; X64-NEXT: kmovq %k0, %rax
@@ -2683,14 +2683,14 @@ entry:
define i32 @test_mm512_test_epi16_mask(<8 x i64> %__A, <8 x i64> %__B) {
; X32-LABEL: test_mm512_test_epi16_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vptestmw %zmm0, %zmm1, %k0
; X32-NEXT: kmovd %k0, %eax
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_test_epi16_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vptestmw %zmm0, %zmm1, %k0
; X64-NEXT: kmovd %k0, %eax
; X64-NEXT: vzeroupper
@@ -2705,7 +2705,7 @@ entry:
define i32 @test_mm512_mask_test_epi16_mask(i32 %__U, <8 x i64> %__A, <8 x i64> %__B) {
; X32-LABEL: test_mm512_mask_test_epi16_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; X32-NEXT: vptestmw %zmm0, %zmm1, %k0 {%k1}
; X32-NEXT: kmovd %k0, %eax
@@ -2713,7 +2713,7 @@ define i32 @test_mm512_mask_test_epi16_mask(i32 %__U, <8 x i64> %__A, <8 x i64>
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_test_epi16_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vptestmw %zmm0, %zmm1, %k0 {%k1}
; X64-NEXT: kmovd %k0, %eax
@@ -2731,7 +2731,7 @@ entry:
define i64 @test_mm512_testn_epi8_mask(<8 x i64> %__A, <8 x i64> %__B) {
; X32-LABEL: test_mm512_testn_epi8_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: pushl %ebp
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: .cfi_offset %ebp, -8
@@ -2749,7 +2749,7 @@ define i64 @test_mm512_testn_epi8_mask(<8 x i64> %__A, <8 x i64> %__B) {
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_testn_epi8_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vptestnmb %zmm0, %zmm1, %k0
; X64-NEXT: kmovq %k0, %rax
; X64-NEXT: vzeroupper
@@ -2764,7 +2764,7 @@ entry:
define i64 @test_mm512_mask_testn_epi8_mask(i64 %__U, <8 x i64> %__A, <8 x i64> %__B) {
; X32-LABEL: test_mm512_mask_testn_epi8_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: pushl %ebp
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: .cfi_offset %ebp, -8
@@ -3490,7 +3490,7 @@ define i64 @test_mm512_mask_testn_epi8_mask(i64 %__U, <8 x i64> %__A, <8 x i64>
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_testn_epi8_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovq %rdi, %k1
; X64-NEXT: vptestnmb %zmm0, %zmm1, %k0 {%k1}
; X64-NEXT: kmovq %k0, %rax
@@ -3508,14 +3508,14 @@ entry:
define i32 @test_mm512_testn_epi16_mask(<8 x i64> %__A, <8 x i64> %__B) {
; X32-LABEL: test_mm512_testn_epi16_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vptestnmw %zmm0, %zmm1, %k0
; X32-NEXT: kmovd %k0, %eax
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_testn_epi16_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vptestnmw %zmm0, %zmm1, %k0
; X64-NEXT: kmovd %k0, %eax
; X64-NEXT: vzeroupper
@@ -3530,7 +3530,7 @@ entry:
define i32 @test_mm512_mask_testn_epi16_mask(i32 %__U, <8 x i64> %__A, <8 x i64> %__B) {
; X32-LABEL: test_mm512_mask_testn_epi16_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; X32-NEXT: vptestnmw %zmm0, %zmm1, %k0 {%k1}
; X32-NEXT: kmovd %k0, %eax
@@ -3538,7 +3538,7 @@ define i32 @test_mm512_mask_testn_epi16_mask(i32 %__U, <8 x i64> %__A, <8 x i64>
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_testn_epi16_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vptestnmw %zmm0, %zmm1, %k0 {%k1}
; X64-NEXT: kmovd %k0, %eax
diff --git a/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll
index c12dc8075bc..f420be32af0 100644
--- a/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll
+++ b/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll
@@ -6,7 +6,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.pbroadcast.b.gpr.512(i8, <64 x i8>, i64)
define <64 x i8>@test_int_x86_avx512_mask_pbroadcast_b_gpr_512(i8 %x0, <64 x i8> %x1, i64 %mask) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_pbroadcast_b_gpr_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpbroadcastb %edi, %zmm1
; AVX512BW-NEXT: kmovq %rsi, %k1
; AVX512BW-NEXT: vpbroadcastb %edi, %zmm0 {%k1}
@@ -16,7 +16,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.pbroadcast.b.gpr.512(i8, <64 x i8>, i64)
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_pbroadcast_b_gpr_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movb {{[0-9]+}}(%esp), %al
; AVX512F-32-NEXT: vpbroadcastb %eax, %zmm1
; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
@@ -36,7 +36,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.pbroadcast.b.gpr.512(i8, <64 x i8>, i64)
declare <32 x i16> @llvm.x86.avx512.mask.pbroadcast.w.gpr.512(i16, <32 x i16>, i32)
define <32 x i16>@test_int_x86_avx512_mask_pbroadcast_w_gpr_512(i16 %x0, <32 x i16> %x1, i32 %mask) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_pbroadcast_w_gpr_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpbroadcastw %edi, %zmm1
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: vpbroadcastw %edi, %zmm0 {%k1}
@@ -46,7 +46,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.pbroadcast.w.gpr.512(i16, <32 x i16>, i
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_pbroadcast_w_gpr_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: vpbroadcastw %eax, %zmm1
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
@@ -67,7 +67,7 @@ declare void @llvm.x86.avx512.mask.storeu.b.512(i8*, <64 x i8>, i64)
define void@test_int_x86_avx512_mask_storeu_b_512(i8* %ptr1, i8* %ptr2, <64 x i8> %x1, i64 %x2) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_storeu_b_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovq %rdx, %k1
; AVX512BW-NEXT: vmovdqu8 %zmm0, (%rdi) {%k1}
; AVX512BW-NEXT: vmovdqu32 %zmm0, (%rsi)
@@ -75,7 +75,7 @@ define void@test_int_x86_avx512_mask_storeu_b_512(i8* %ptr1, i8* %ptr2, <64 x i8
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_storeu_b_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
@@ -92,7 +92,7 @@ declare void @llvm.x86.avx512.mask.storeu.w.512(i8*, <32 x i16>, i32)
define void@test_int_x86_avx512_mask_storeu_w_512(i8* %ptr1, i8* %ptr2, <32 x i16> %x1, i32 %x2) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_storeu_w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edx, %k1
; AVX512BW-NEXT: vmovdqu16 %zmm0, (%rdi) {%k1}
; AVX512BW-NEXT: vmovdqu32 %zmm0, (%rsi)
@@ -100,7 +100,7 @@ define void@test_int_x86_avx512_mask_storeu_w_512(i8* %ptr1, i8* %ptr2, <32 x i1
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_storeu_w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
@@ -117,7 +117,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.loadu.w.512(i8*, <32 x i16>, i32)
define <32 x i16>@test_int_x86_avx512_mask_loadu_w_512(i8* %ptr, i8* %ptr2, <32 x i16> %x1, i32 %mask) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_loadu_w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vmovdqu64 (%rdi), %zmm0
; AVX512BW-NEXT: kmovd %edx, %k1
; AVX512BW-NEXT: vmovdqu16 (%rsi), %zmm0 {%k1}
@@ -126,7 +126,7 @@ define <32 x i16>@test_int_x86_avx512_mask_loadu_w_512(i8* %ptr, i8* %ptr2, <32
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_loadu_w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; AVX512F-32-NEXT: vmovdqu64 (%ecx), %zmm0
@@ -146,7 +146,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.loadu.b.512(i8*, <64 x i8>, i64)
define <64 x i8>@test_int_x86_avx512_mask_loadu_b_512(i8* %ptr, i8* %ptr2, <64 x i8> %x1, i64 %mask) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_loadu_b_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vmovdqu64 (%rdi), %zmm0
; AVX512BW-NEXT: kmovq %rdx, %k1
; AVX512BW-NEXT: vmovdqu8 (%rsi), %zmm0 {%k1}
@@ -155,7 +155,7 @@ define <64 x i8>@test_int_x86_avx512_mask_loadu_b_512(i8* %ptr, i8* %ptr2, <64 x
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_loadu_b_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; AVX512F-32-NEXT: vmovdqu64 (%ecx), %zmm0
@@ -175,14 +175,14 @@ declare <8 x i64> @llvm.x86.avx512.psll.dq.512(<8 x i64>, i32)
define <8 x i64>@test_int_x86_avx512_psll_dq_512(<8 x i64> %x0) {
; AVX512BW-LABEL: test_int_x86_avx512_psll_dq_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpslldq {{.*#+}} zmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zmm0[0,1,2,3,4,5,6,7],zero,zero,zero,zero,zero,zero,zero,zero,zmm0[16,17,18,19,20,21,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zmm0[32,33,34,35,36,37,38,39],zero,zero,zero,zero,zero,zero,zero,zero,zmm0[48,49,50,51,52,53,54,55]
; AVX512BW-NEXT: vpslldq {{.*#+}} zmm0 = zero,zero,zero,zero,zmm0[0,1,2,3,4,5,6,7,8,9,10,11],zero,zero,zero,zero,zmm0[16,17,18,19,20,21,22,23,24,25,26,27],zero,zero,zero,zero,zmm0[32,33,34,35,36,37,38,39,40,41,42,43],zero,zero,zero,zero,zmm0[48,49,50,51,52,53,54,55,56,57,58,59]
; AVX512BW-NEXT: vpaddq %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_psll_dq_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpslldq {{.*#+}} zmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zmm0[0,1,2,3,4,5,6,7],zero,zero,zero,zero,zero,zero,zero,zero,zmm0[16,17,18,19,20,21,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zmm0[32,33,34,35,36,37,38,39],zero,zero,zero,zero,zero,zero,zero,zero,zmm0[48,49,50,51,52,53,54,55]
; AVX512F-32-NEXT: vpslldq {{.*#+}} zmm0 = zero,zero,zero,zero,zmm0[0,1,2,3,4,5,6,7,8,9,10,11],zero,zero,zero,zero,zmm0[16,17,18,19,20,21,22,23,24,25,26,27],zero,zero,zero,zero,zmm0[32,33,34,35,36,37,38,39,40,41,42,43],zero,zero,zero,zero,zmm0[48,49,50,51,52,53,54,55,56,57,58,59]
; AVX512F-32-NEXT: vpaddq %zmm0, %zmm1, %zmm0
@@ -195,12 +195,12 @@ define <8 x i64>@test_int_x86_avx512_psll_dq_512(<8 x i64> %x0) {
define <8 x i64>@test_int_x86_avx512_psll_load_dq_512(<8 x i64>* %p0) {
; AVX512BW-LABEL: test_int_x86_avx512_psll_load_dq_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpslldq {{.*#+}} zmm0 = zero,zero,zero,zero,mem[0,1,2,3,4,5,6,7,8,9,10,11],zero,zero,zero,zero,mem[16,17,18,19,20,21,22,23,24,25,26,27],zero,zero,zero,zero,mem[32,33,34,35,36,37,38,39,40,41,42,43],zero,zero,zero,zero,mem[48,49,50,51,52,53,54,55,56,57,58,59]
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_psll_load_dq_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: vpslldq {{.*#+}} zmm0 = zero,zero,zero,zero,mem[0,1,2,3,4,5,6,7,8,9,10,11],zero,zero,zero,zero,mem[16,17,18,19,20,21,22,23,24,25,26,27],zero,zero,zero,zero,mem[32,33,34,35,36,37,38,39,40,41,42,43],zero,zero,zero,zero,mem[48,49,50,51,52,53,54,55,56,57,58,59]
; AVX512F-32-NEXT: retl
@@ -213,14 +213,14 @@ declare <8 x i64> @llvm.x86.avx512.psrl.dq.512(<8 x i64>, i32)
define <8 x i64>@test_int_x86_avx512_psrl_dq_512(<8 x i64> %x0) {
; AVX512BW-LABEL: test_int_x86_avx512_psrl_dq_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpsrldq {{.*#+}} zmm1 = zmm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zmm0[24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero,zero,zero,zmm0[40,41,42,43,44,45,46,47],zero,zero,zero,zero,zero,zero,zero,zero,zmm0[56,57,58,59,60,61,62,63],zero,zero,zero,zero,zero,zero,zero,zero
; AVX512BW-NEXT: vpsrldq {{.*#+}} zmm0 = zmm0[4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zmm0[20,21,22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zmm0[36,37,38,39,40,41,42,43,44,45,46,47],zero,zero,zero,zero,zmm0[52,53,54,55,56,57,58,59,60,61,62,63],zero,zero,zero,zero
; AVX512BW-NEXT: vpaddq %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_psrl_dq_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpsrldq {{.*#+}} zmm1 = zmm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zmm0[24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero,zero,zero,zmm0[40,41,42,43,44,45,46,47],zero,zero,zero,zero,zero,zero,zero,zero,zmm0[56,57,58,59,60,61,62,63],zero,zero,zero,zero,zero,zero,zero,zero
; AVX512F-32-NEXT: vpsrldq {{.*#+}} zmm0 = zmm0[4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zmm0[20,21,22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zmm0[36,37,38,39,40,41,42,43,44,45,46,47],zero,zero,zero,zero,zmm0[52,53,54,55,56,57,58,59,60,61,62,63],zero,zero,zero,zero
; AVX512F-32-NEXT: vpaddq %zmm0, %zmm1, %zmm0
@@ -233,12 +233,12 @@ define <8 x i64>@test_int_x86_avx512_psrl_dq_512(<8 x i64> %x0) {
define <8 x i64>@test_int_x86_avx512_psrl_load_dq_512(<8 x i64>* %p0) {
; AVX512BW-LABEL: test_int_x86_avx512_psrl_load_dq_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpsrldq {{.*#+}} zmm0 = mem[4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,mem[20,21,22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,mem[36,37,38,39,40,41,42,43,44,45,46,47],zero,zero,zero,zero,mem[52,53,54,55,56,57,58,59,60,61,62,63],zero,zero,zero,zero
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_psrl_load_dq_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: vpsrldq {{.*#+}} zmm0 = mem[4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,mem[20,21,22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,mem[36,37,38,39,40,41,42,43,44,45,46,47],zero,zero,zero,zero,mem[52,53,54,55,56,57,58,59,60,61,62,63],zero,zero,zero,zero
; AVX512F-32-NEXT: retl
@@ -251,7 +251,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.palignr.512(<64 x i8>, <64 x i8>, i32, <
define <64 x i8>@test_int_x86_avx512_mask_palignr_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x3, i64 %x4) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_palignr_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpalignr {{.*#+}} zmm3 = zmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zmm0[0,1],zmm1[18,19,20,21,22,23,24,25,26,27,28,29,30,31],zmm0[16,17],zmm1[34,35,36,37,38,39,40,41,42,43,44,45,46,47],zmm0[32,33],zmm1[50,51,52,53,54,55,56,57,58,59,60,61,62,63],zmm0[48,49]
; AVX512BW-NEXT: kmovq %rdi, %k1
; AVX512BW-NEXT: vpalignr {{.*#+}} zmm2 {%k1} = zmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zmm0[0,1],zmm1[18,19,20,21,22,23,24,25,26,27,28,29,30,31],zmm0[16,17],zmm1[34,35,36,37,38,39,40,41,42,43,44,45,46,47],zmm0[32,33],zmm1[50,51,52,53,54,55,56,57,58,59,60,61,62,63],zmm0[48,49]
@@ -261,7 +261,7 @@ define <64 x i8>@test_int_x86_avx512_mask_palignr_512(<64 x i8> %x0, <64 x i8> %
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_palignr_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpalignr {{.*#+}} zmm3 = zmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zmm0[0,1],zmm1[18,19,20,21,22,23,24,25,26,27,28,29,30,31],zmm0[16,17],zmm1[34,35,36,37,38,39,40,41,42,43,44,45,46,47],zmm0[32,33],zmm1[50,51,52,53,54,55,56,57,58,59,60,61,62,63],zmm0[48,49]
; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpalignr {{.*#+}} zmm2 {%k1} = zmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zmm0[0,1],zmm1[18,19,20,21,22,23,24,25,26,27,28,29,30,31],zmm0[16,17],zmm1[34,35,36,37,38,39,40,41,42,43,44,45,46,47],zmm0[32,33],zmm1[50,51,52,53,54,55,56,57,58,59,60,61,62,63],zmm0[48,49]
@@ -281,7 +281,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.pshufh.w.512(<32 x i16>, i32, <32 x i16
define <32 x i16>@test_int_x86_avx512_mask_pshufh_w_512(<32 x i16> %x0, i32 %x1, <32 x i16> %x2, i32 %x3) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_pshufh_w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpshufhw {{.*#+}} zmm2 = zmm0[0,1,2,3,7,4,4,4,8,9,10,11,15,12,12,12,16,17,18,19,23,20,20,20,24,25,26,27,31,28,28,28]
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: vpshufhw {{.*#+}} zmm1 {%k1} = zmm0[0,1,2,3,7,4,4,4,8,9,10,11,15,12,12,12,16,17,18,19,23,20,20,20,24,25,26,27,31,28,28,28]
@@ -291,7 +291,7 @@ define <32 x i16>@test_int_x86_avx512_mask_pshufh_w_512(<32 x i16> %x0, i32 %x1,
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_pshufh_w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpshufhw {{.*#+}} zmm2 = zmm0[0,1,2,3,7,4,4,4,8,9,10,11,15,12,12,12,16,17,18,19,23,20,20,20,24,25,26,27,31,28,28,28]
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpshufhw {{.*#+}} zmm1 {%k1} = zmm0[0,1,2,3,7,4,4,4,8,9,10,11,15,12,12,12,16,17,18,19,23,20,20,20,24,25,26,27,31,28,28,28]
@@ -311,7 +311,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.pshufl.w.512(<32 x i16>, i32, <32 x i16
define <32 x i16>@test_int_x86_avx512_mask_pshufl_w_512(<32 x i16> %x0, i32 %x1, <32 x i16> %x2, i32 %x3) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_pshufl_w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpshuflw {{.*#+}} zmm2 = zmm0[3,0,0,0,4,5,6,7,11,8,8,8,12,13,14,15,19,16,16,16,20,21,22,23,27,24,24,24,28,29,30,31]
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: vpshuflw {{.*#+}} zmm1 {%k1} = zmm0[3,0,0,0,4,5,6,7,11,8,8,8,12,13,14,15,19,16,16,16,20,21,22,23,27,24,24,24,28,29,30,31]
@@ -321,7 +321,7 @@ define <32 x i16>@test_int_x86_avx512_mask_pshufl_w_512(<32 x i16> %x0, i32 %x1,
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_pshufl_w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpshuflw {{.*#+}} zmm2 = zmm0[3,0,0,0,4,5,6,7,11,8,8,8,12,13,14,15,19,16,16,16,20,21,22,23,27,24,24,24,28,29,30,31]
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpshuflw {{.*#+}} zmm1 {%k1} = zmm0[3,0,0,0,4,5,6,7,11,8,8,8,12,13,14,15,19,16,16,16,20,21,22,23,27,24,24,24,28,29,30,31]
@@ -339,14 +339,14 @@ define <32 x i16>@test_int_x86_avx512_mask_pshufl_w_512(<32 x i16> %x0, i32 %x1,
define i64 @test_pcmpeq_b(<64 x i8> %a, <64 x i8> %b) {
; AVX512BW-LABEL: test_pcmpeq_b:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpcmpeqb %zmm1, %zmm0, %k0
; AVX512BW-NEXT: kmovq %k0, %rax
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_pcmpeq_b:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: subl $12, %esp
; AVX512F-32-NEXT: .cfi_def_cfa_offset 16
; AVX512F-32-NEXT: vpcmpeqb %zmm1, %zmm0, %k0
@@ -362,7 +362,7 @@ define i64 @test_pcmpeq_b(<64 x i8> %a, <64 x i8> %b) {
define i64 @test_mask_pcmpeq_b(<64 x i8> %a, <64 x i8> %b, i64 %mask) {
; AVX512BW-LABEL: test_mask_pcmpeq_b:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovq %rdi, %k1
; AVX512BW-NEXT: vpcmpeqb %zmm1, %zmm0, %k0 {%k1}
; AVX512BW-NEXT: kmovq %k0, %rax
@@ -370,7 +370,7 @@ define i64 @test_mask_pcmpeq_b(<64 x i8> %a, <64 x i8> %b, i64 %mask) {
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_pcmpeq_b:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: subl $12, %esp
; AVX512F-32-NEXT: .cfi_def_cfa_offset 16
; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
@@ -389,14 +389,14 @@ declare i64 @llvm.x86.avx512.mask.pcmpeq.b.512(<64 x i8>, <64 x i8>, i64)
define i32 @test_pcmpeq_w(<32 x i16> %a, <32 x i16> %b) {
; AVX512BW-LABEL: test_pcmpeq_w:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpcmpeqw %zmm1, %zmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_pcmpeq_w:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpcmpeqw %zmm1, %zmm0, %k0
; AVX512F-32-NEXT: kmovd %k0, %eax
; AVX512F-32-NEXT: vzeroupper
@@ -407,7 +407,7 @@ define i32 @test_pcmpeq_w(<32 x i16> %a, <32 x i16> %b) {
define i32 @test_mask_pcmpeq_w(<32 x i16> %a, <32 x i16> %b, i32 %mask) {
; AVX512BW-LABEL: test_mask_pcmpeq_w:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpcmpeqw %zmm1, %zmm0, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
@@ -415,7 +415,7 @@ define i32 @test_mask_pcmpeq_w(<32 x i16> %a, <32 x i16> %b, i32 %mask) {
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_pcmpeq_w:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpcmpeqw %zmm1, %zmm0, %k0 {%k1}
; AVX512F-32-NEXT: kmovd %k0, %eax
@@ -429,14 +429,14 @@ declare i32 @llvm.x86.avx512.mask.pcmpeq.w.512(<32 x i16>, <32 x i16>, i32)
define i64 @test_pcmpgt_b(<64 x i8> %a, <64 x i8> %b) {
; AVX512BW-LABEL: test_pcmpgt_b:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpcmpgtb %zmm1, %zmm0, %k0
; AVX512BW-NEXT: kmovq %k0, %rax
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_pcmpgt_b:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: subl $12, %esp
; AVX512F-32-NEXT: .cfi_def_cfa_offset 16
; AVX512F-32-NEXT: vpcmpgtb %zmm1, %zmm0, %k0
@@ -452,7 +452,7 @@ define i64 @test_pcmpgt_b(<64 x i8> %a, <64 x i8> %b) {
define i64 @test_mask_pcmpgt_b(<64 x i8> %a, <64 x i8> %b, i64 %mask) {
; AVX512BW-LABEL: test_mask_pcmpgt_b:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovq %rdi, %k1
; AVX512BW-NEXT: vpcmpgtb %zmm1, %zmm0, %k0 {%k1}
; AVX512BW-NEXT: kmovq %k0, %rax
@@ -460,7 +460,7 @@ define i64 @test_mask_pcmpgt_b(<64 x i8> %a, <64 x i8> %b, i64 %mask) {
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_pcmpgt_b:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: subl $12, %esp
; AVX512F-32-NEXT: .cfi_def_cfa_offset 16
; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
@@ -479,14 +479,14 @@ declare i64 @llvm.x86.avx512.mask.pcmpgt.b.512(<64 x i8>, <64 x i8>, i64)
define i32 @test_pcmpgt_w(<32 x i16> %a, <32 x i16> %b) {
; AVX512BW-LABEL: test_pcmpgt_w:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpcmpgtw %zmm1, %zmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_pcmpgt_w:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpcmpgtw %zmm1, %zmm0, %k0
; AVX512F-32-NEXT: kmovd %k0, %eax
; AVX512F-32-NEXT: vzeroupper
@@ -497,7 +497,7 @@ define i32 @test_pcmpgt_w(<32 x i16> %a, <32 x i16> %b) {
define i32 @test_mask_pcmpgt_w(<32 x i16> %a, <32 x i16> %b, i32 %mask) {
; AVX512BW-LABEL: test_mask_pcmpgt_w:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpcmpgtw %zmm1, %zmm0, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
@@ -505,7 +505,7 @@ define i32 @test_mask_pcmpgt_w(<32 x i16> %a, <32 x i16> %b, i32 %mask) {
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_pcmpgt_w:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpcmpgtw %zmm1, %zmm0, %k0 {%k1}
; AVX512F-32-NEXT: kmovd %k0, %eax
@@ -521,7 +521,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.punpckhb.w.512(<64 x i8>, <64 x i8>, <64
define <64 x i8>@test_int_x86_avx512_mask_punpckhb_w_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_punpckhb_w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm3 = zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[12],zmm1[12],zmm0[13],zmm1[13],zmm0[14],zmm1[14],zmm0[15],zmm1[15],zmm0[24],zmm1[24],zmm0[25],zmm1[25],zmm0[26],zmm1[26],zmm0[27],zmm1[27],zmm0[28],zmm1[28],zmm0[29],zmm1[29],zmm0[30],zmm1[30],zmm0[31],zmm1[31],zmm0[40],zmm1[40],zmm0[41],zmm1[41],zmm0[42],zmm1[42],zmm0[43],zmm1[43],zmm0[44],zmm1[44],zmm0[45],zmm1[45],zmm0[46],zmm1[46],zmm0[47],zmm1[47],zmm0[56],zmm1[56],zmm0[57],zmm1[57],zmm0[58],zmm1[58],zmm0[59],zmm1[59],zmm0[60],zmm1[60],zmm0[61],zmm1[61],zmm0[62],zmm1[62],zmm0[63],zmm1[63]
; AVX512BW-NEXT: kmovq %rdi, %k1
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 {%k1} = zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[12],zmm1[12],zmm0[13],zmm1[13],zmm0[14],zmm1[14],zmm0[15],zmm1[15],zmm0[24],zmm1[24],zmm0[25],zmm1[25],zmm0[26],zmm1[26],zmm0[27],zmm1[27],zmm0[28],zmm1[28],zmm0[29],zmm1[29],zmm0[30],zmm1[30],zmm0[31],zmm1[31],zmm0[40],zmm1[40],zmm0[41],zmm1[41],zmm0[42],zmm1[42],zmm0[43],zmm1[43],zmm0[44],zmm1[44],zmm0[45],zmm1[45],zmm0[46],zmm1[46],zmm0[47],zmm1[47],zmm0[56],zmm1[56],zmm0[57],zmm1[57],zmm0[58],zmm1[58],zmm0[59],zmm1[59],zmm0[60],zmm1[60],zmm0[61],zmm1[61],zmm0[62],zmm1[62],zmm0[63],zmm1[63]
@@ -529,7 +529,7 @@ define <64 x i8>@test_int_x86_avx512_mask_punpckhb_w_512(<64 x i8> %x0, <64 x i8
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_punpckhb_w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpunpckhbw {{.*#+}} zmm3 = zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[12],zmm1[12],zmm0[13],zmm1[13],zmm0[14],zmm1[14],zmm0[15],zmm1[15],zmm0[24],zmm1[24],zmm0[25],zmm1[25],zmm0[26],zmm1[26],zmm0[27],zmm1[27],zmm0[28],zmm1[28],zmm0[29],zmm1[29],zmm0[30],zmm1[30],zmm0[31],zmm1[31],zmm0[40],zmm1[40],zmm0[41],zmm1[41],zmm0[42],zmm1[42],zmm0[43],zmm1[43],zmm0[44],zmm1[44],zmm0[45],zmm1[45],zmm0[46],zmm1[46],zmm0[47],zmm1[47],zmm0[56],zmm1[56],zmm0[57],zmm1[57],zmm0[58],zmm1[58],zmm0[59],zmm1[59],zmm0[60],zmm1[60],zmm0[61],zmm1[61],zmm0[62],zmm1[62],zmm0[63],zmm1[63]
; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpunpckhbw {{.*#+}} zmm2 {%k1} = zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[12],zmm1[12],zmm0[13],zmm1[13],zmm0[14],zmm1[14],zmm0[15],zmm1[15],zmm0[24],zmm1[24],zmm0[25],zmm1[25],zmm0[26],zmm1[26],zmm0[27],zmm1[27],zmm0[28],zmm1[28],zmm0[29],zmm1[29],zmm0[30],zmm1[30],zmm0[31],zmm1[31],zmm0[40],zmm1[40],zmm0[41],zmm1[41],zmm0[42],zmm1[42],zmm0[43],zmm1[43],zmm0[44],zmm1[44],zmm0[45],zmm1[45],zmm0[46],zmm1[46],zmm0[47],zmm1[47],zmm0[56],zmm1[56],zmm0[57],zmm1[57],zmm0[58],zmm1[58],zmm0[59],zmm1[59],zmm0[60],zmm1[60],zmm0[61],zmm1[61],zmm0[62],zmm1[62],zmm0[63],zmm1[63]
@@ -545,7 +545,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.punpcklb.w.512(<64 x i8>, <64 x i8>, <64
define <64 x i8>@test_int_x86_avx512_mask_punpcklb_w_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_punpcklb_w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm3 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[32],zmm1[32],zmm0[33],zmm1[33],zmm0[34],zmm1[34],zmm0[35],zmm1[35],zmm0[36],zmm1[36],zmm0[37],zmm1[37],zmm0[38],zmm1[38],zmm0[39],zmm1[39],zmm0[48],zmm1[48],zmm0[49],zmm1[49],zmm0[50],zmm1[50],zmm0[51],zmm1[51],zmm0[52],zmm1[52],zmm0[53],zmm1[53],zmm0[54],zmm1[54],zmm0[55],zmm1[55]
; AVX512BW-NEXT: kmovq %rdi, %k1
; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[32],zmm1[32],zmm0[33],zmm1[33],zmm0[34],zmm1[34],zmm0[35],zmm1[35],zmm0[36],zmm1[36],zmm0[37],zmm1[37],zmm0[38],zmm1[38],zmm0[39],zmm1[39],zmm0[48],zmm1[48],zmm0[49],zmm1[49],zmm0[50],zmm1[50],zmm0[51],zmm1[51],zmm0[52],zmm1[52],zmm0[53],zmm1[53],zmm0[54],zmm1[54],zmm0[55],zmm1[55]
@@ -553,7 +553,7 @@ define <64 x i8>@test_int_x86_avx512_mask_punpcklb_w_512(<64 x i8> %x0, <64 x i8
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_punpcklb_w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpunpcklbw {{.*#+}} zmm3 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[32],zmm1[32],zmm0[33],zmm1[33],zmm0[34],zmm1[34],zmm0[35],zmm1[35],zmm0[36],zmm1[36],zmm0[37],zmm1[37],zmm0[38],zmm1[38],zmm0[39],zmm1[39],zmm0[48],zmm1[48],zmm0[49],zmm1[49],zmm0[50],zmm1[50],zmm0[51],zmm1[51],zmm0[52],zmm1[52],zmm0[53],zmm1[53],zmm0[54],zmm1[54],zmm0[55],zmm1[55]
; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpunpcklbw {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[32],zmm1[32],zmm0[33],zmm1[33],zmm0[34],zmm1[34],zmm0[35],zmm1[35],zmm0[36],zmm1[36],zmm0[37],zmm1[37],zmm0[38],zmm1[38],zmm0[39],zmm1[39],zmm0[48],zmm1[48],zmm0[49],zmm1[49],zmm0[50],zmm1[50],zmm0[51],zmm1[51],zmm0[52],zmm1[52],zmm0[53],zmm1[53],zmm0[54],zmm1[54],zmm0[55],zmm1[55]
@@ -569,7 +569,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.punpckhw.d.512(<32 x i16>, <32 x i16>,
define <32 x i16>@test_int_x86_avx512_mask_punpckhw_d_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_punpckhw_d_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpunpckhwd {{.*#+}} zmm3 = zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[12],zmm1[12],zmm0[13],zmm1[13],zmm0[14],zmm1[14],zmm0[15],zmm1[15],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[28],zmm1[28],zmm0[29],zmm1[29],zmm0[30],zmm1[30],zmm0[31],zmm1[31]
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpunpckhwd {{.*#+}} zmm2 {%k1} = zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[12],zmm1[12],zmm0[13],zmm1[13],zmm0[14],zmm1[14],zmm0[15],zmm1[15],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[28],zmm1[28],zmm0[29],zmm1[29],zmm0[30],zmm1[30],zmm0[31],zmm1[31]
@@ -577,7 +577,7 @@ define <32 x i16>@test_int_x86_avx512_mask_punpckhw_d_512(<32 x i16> %x0, <32 x
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_punpckhw_d_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpunpckhwd {{.*#+}} zmm3 = zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[12],zmm1[12],zmm0[13],zmm1[13],zmm0[14],zmm1[14],zmm0[15],zmm1[15],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[28],zmm1[28],zmm0[29],zmm1[29],zmm0[30],zmm1[30],zmm0[31],zmm1[31]
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpunpckhwd {{.*#+}} zmm2 {%k1} = zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[12],zmm1[12],zmm0[13],zmm1[13],zmm0[14],zmm1[14],zmm0[15],zmm1[15],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[28],zmm1[28],zmm0[29],zmm1[29],zmm0[30],zmm1[30],zmm0[31],zmm1[31]
@@ -593,7 +593,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.punpcklw.d.512(<32 x i16>, <32 x i16>,
define <32 x i16>@test_int_x86_avx512_mask_punpcklw_d_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_punpcklw_d_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpunpcklwd {{.*#+}} zmm3 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[24],zmm1[24],zmm0[25],zmm1[25],zmm0[26],zmm1[26],zmm0[27],zmm1[27]
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpunpcklwd {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[24],zmm1[24],zmm0[25],zmm1[25],zmm0[26],zmm1[26],zmm0[27],zmm1[27]
@@ -601,7 +601,7 @@ define <32 x i16>@test_int_x86_avx512_mask_punpcklw_d_512(<32 x i16> %x0, <32 x
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_punpcklw_d_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpunpcklwd {{.*#+}} zmm3 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[24],zmm1[24],zmm0[25],zmm1[25],zmm0[26],zmm1[26],zmm0[27],zmm1[27]
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpunpcklwd {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[24],zmm1[24],zmm0[25],zmm1[25],zmm0[26],zmm1[26],zmm0[27],zmm1[27]
@@ -617,7 +617,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.pmaxs.b.512(<64 x i8>, <64 x i8>, <64 x
define <64 x i8>@test_int_x86_avx512_mask_pmaxs_b_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_pmaxs_b_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpmaxsb %zmm1, %zmm0, %zmm3
; AVX512BW-NEXT: kmovq %rdi, %k1
; AVX512BW-NEXT: vpmaxsb %zmm1, %zmm0, %zmm2 {%k1}
@@ -625,7 +625,7 @@ define <64 x i8>@test_int_x86_avx512_mask_pmaxs_b_512(<64 x i8> %x0, <64 x i8> %
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_pmaxs_b_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpmaxsb %zmm1, %zmm0, %zmm3
; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpmaxsb %zmm1, %zmm0, %zmm2 {%k1}
@@ -641,7 +641,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.pmaxs.w.512(<32 x i16>, <32 x i16>, <32
define <32 x i16>@test_int_x86_avx512_mask_pmaxs_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_pmaxs_w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpmaxsw %zmm1, %zmm0, %zmm3
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpmaxsw %zmm1, %zmm0, %zmm2 {%k1}
@@ -649,7 +649,7 @@ define <32 x i16>@test_int_x86_avx512_mask_pmaxs_w_512(<32 x i16> %x0, <32 x i16
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_pmaxs_w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpmaxsw %zmm1, %zmm0, %zmm3
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpmaxsw %zmm1, %zmm0, %zmm2 {%k1}
@@ -665,7 +665,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.pmaxu.b.512(<64 x i8>, <64 x i8>, <64 x
define <64 x i8>@test_int_x86_avx512_mask_pmaxu_b_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_pmaxu_b_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpmaxub %zmm1, %zmm0, %zmm3
; AVX512BW-NEXT: kmovq %rdi, %k1
; AVX512BW-NEXT: vpmaxub %zmm1, %zmm0, %zmm2 {%k1}
@@ -673,7 +673,7 @@ define <64 x i8>@test_int_x86_avx512_mask_pmaxu_b_512(<64 x i8> %x0, <64 x i8> %
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_pmaxu_b_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpmaxub %zmm1, %zmm0, %zmm3
; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpmaxub %zmm1, %zmm0, %zmm2 {%k1}
@@ -689,7 +689,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.pmaxu.w.512(<32 x i16>, <32 x i16>, <32
define <32 x i16>@test_int_x86_avx512_mask_pmaxu_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_pmaxu_w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpmaxuw %zmm1, %zmm0, %zmm3
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpmaxuw %zmm1, %zmm0, %zmm2 {%k1}
@@ -697,7 +697,7 @@ define <32 x i16>@test_int_x86_avx512_mask_pmaxu_w_512(<32 x i16> %x0, <32 x i16
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_pmaxu_w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpmaxuw %zmm1, %zmm0, %zmm3
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpmaxuw %zmm1, %zmm0, %zmm2 {%k1}
@@ -713,7 +713,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.pmins.b.512(<64 x i8>, <64 x i8>, <64 x
define <64 x i8>@test_int_x86_avx512_mask_pmins_b_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_pmins_b_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpminsb %zmm1, %zmm0, %zmm3
; AVX512BW-NEXT: kmovq %rdi, %k1
; AVX512BW-NEXT: vpminsb %zmm1, %zmm0, %zmm2 {%k1}
@@ -721,7 +721,7 @@ define <64 x i8>@test_int_x86_avx512_mask_pmins_b_512(<64 x i8> %x0, <64 x i8> %
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_pmins_b_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpminsb %zmm1, %zmm0, %zmm3
; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpminsb %zmm1, %zmm0, %zmm2 {%k1}
@@ -737,7 +737,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.pmins.w.512(<32 x i16>, <32 x i16>, <32
define <32 x i16>@test_int_x86_avx512_mask_pmins_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_pmins_w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpminsw %zmm1, %zmm0, %zmm3
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpminsw %zmm1, %zmm0, %zmm2 {%k1}
@@ -745,7 +745,7 @@ define <32 x i16>@test_int_x86_avx512_mask_pmins_w_512(<32 x i16> %x0, <32 x i16
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_pmins_w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpminsw %zmm1, %zmm0, %zmm3
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpminsw %zmm1, %zmm0, %zmm2 {%k1}
@@ -761,7 +761,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.pminu.b.512(<64 x i8>, <64 x i8>, <64 x
define <64 x i8>@test_int_x86_avx512_mask_pminu_b_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_pminu_b_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpminub %zmm1, %zmm0, %zmm3
; AVX512BW-NEXT: kmovq %rdi, %k1
; AVX512BW-NEXT: vpminub %zmm1, %zmm0, %zmm2 {%k1}
@@ -769,7 +769,7 @@ define <64 x i8>@test_int_x86_avx512_mask_pminu_b_512(<64 x i8> %x0, <64 x i8> %
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_pminu_b_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpminub %zmm1, %zmm0, %zmm3
; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpminub %zmm1, %zmm0, %zmm2 {%k1}
@@ -785,7 +785,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.pminu.w.512(<32 x i16>, <32 x i16>, <32
define <32 x i16>@test_int_x86_avx512_mask_pminu_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_pminu_w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpminuw %zmm1, %zmm0, %zmm3
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpminuw %zmm1, %zmm0, %zmm2 {%k1}
@@ -793,7 +793,7 @@ define <32 x i16>@test_int_x86_avx512_mask_pminu_w_512(<32 x i16> %x0, <32 x i16
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_pminu_w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpminuw %zmm1, %zmm0, %zmm3
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpminuw %zmm1, %zmm0, %zmm2 {%k1}
@@ -809,7 +809,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.pmovzxb.w.512(<32 x i8>, <32 x i16>, i3
define <32 x i16>@test_int_x86_avx512_mask_pmovzxb_w_512(<32 x i8> %x0, <32 x i16> %x1, i32 %x2) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_pmovzxb_w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm2 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 {%k1} = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
@@ -819,7 +819,7 @@ define <32 x i16>@test_int_x86_avx512_mask_pmovzxb_w_512(<32 x i8> %x0, <32 x i1
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_pmovzxb_w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpmovzxbw {{.*#+}} zmm2 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpmovzxbw {{.*#+}} zmm1 {%k1} = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
@@ -839,7 +839,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.pmovsxb.w.512(<32 x i8>, <32 x i16>, i3
define <32 x i16>@test_int_x86_avx512_mask_pmovsxb_w_512(<32 x i8> %x0, <32 x i16> %x1, i32 %x2) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_pmovsxb_w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm2
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm1 {%k1}
@@ -849,7 +849,7 @@ define <32 x i16>@test_int_x86_avx512_mask_pmovsxb_w_512(<32 x i8> %x0, <32 x i1
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_pmovsxb_w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpmovsxbw %ymm0, %zmm2
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpmovsxbw %ymm0, %zmm1 {%k1}
@@ -869,7 +869,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.psrl.w.512(<32 x i16>, <8 x i16>, <32 x
define <32 x i16>@test_int_x86_avx512_mask_psrl_w_512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> %x2, i32 %x3) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_psrl_w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpsrlw %xmm1, %zmm0, %zmm3
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpsrlw %xmm1, %zmm0, %zmm2 {%k1}
@@ -879,7 +879,7 @@ define <32 x i16>@test_int_x86_avx512_mask_psrl_w_512(<32 x i16> %x0, <8 x i16>
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_psrl_w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpsrlw %xmm1, %zmm0, %zmm3
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpsrlw %xmm1, %zmm0, %zmm2 {%k1}
@@ -899,7 +899,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.psrl.wi.512(<32 x i16>, i32, <32 x i16>
define <32 x i16>@test_int_x86_avx512_mask_psrl_wi_512(<32 x i16> %x0, i32 %x1, <32 x i16> %x2, i32 %x3) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_psrl_wi_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpsrlw $3, %zmm0, %zmm2
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: vpsrlw $3, %zmm0, %zmm1 {%k1}
@@ -909,7 +909,7 @@ define <32 x i16>@test_int_x86_avx512_mask_psrl_wi_512(<32 x i16> %x0, i32 %x1,
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_psrl_wi_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpsrlw $3, %zmm0, %zmm2
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpsrlw $3, %zmm0, %zmm1 {%k1}
@@ -929,7 +929,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.psra.w.512(<32 x i16>, <8 x i16>, <32 x
define <32 x i16>@test_int_x86_avx512_mask_psra_w_512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> %x2, i32 %x3) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_psra_w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpsraw %xmm1, %zmm0, %zmm3
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpsraw %xmm1, %zmm0, %zmm2 {%k1}
@@ -939,7 +939,7 @@ define <32 x i16>@test_int_x86_avx512_mask_psra_w_512(<32 x i16> %x0, <8 x i16>
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_psra_w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpsraw %xmm1, %zmm0, %zmm3
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpsraw %xmm1, %zmm0, %zmm2 {%k1}
@@ -959,7 +959,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.psra.wi.512(<32 x i16>, i32, <32 x i16>
define <32 x i16>@test_int_x86_avx512_mask_psra_wi_512(<32 x i16> %x0, i32 %x1, <32 x i16> %x2, i32 %x3) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_psra_wi_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpsraw $3, %zmm0, %zmm2
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: vpsraw $3, %zmm0, %zmm1 {%k1}
@@ -969,7 +969,7 @@ define <32 x i16>@test_int_x86_avx512_mask_psra_wi_512(<32 x i16> %x0, i32 %x1,
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_psra_wi_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpsraw $3, %zmm0, %zmm2
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpsraw $3, %zmm0, %zmm1 {%k1}
@@ -989,7 +989,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.psll.w.512(<32 x i16>, <8 x i16>, <32 x
define <32 x i16>@test_int_x86_avx512_mask_psll_w_512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> %x2, i32 %x3) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_psll_w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpsllw %xmm1, %zmm0, %zmm3
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpsllw %xmm1, %zmm0, %zmm2 {%k1}
@@ -999,7 +999,7 @@ define <32 x i16>@test_int_x86_avx512_mask_psll_w_512(<32 x i16> %x0, <8 x i16>
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_psll_w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpsllw %xmm1, %zmm0, %zmm3
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpsllw %xmm1, %zmm0, %zmm2 {%k1}
@@ -1019,7 +1019,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.psll.wi.512(<32 x i16>, i32, <32 x i16>
define <32 x i16>@test_int_x86_avx512_mask_psll_wi_512(<32 x i16> %x0, i32 %x1, <32 x i16> %x2, i32 %x3) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_psll_wi_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpsllw $3, %zmm0, %zmm2
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: vpsllw $3, %zmm0, %zmm1 {%k1}
@@ -1029,7 +1029,7 @@ define <32 x i16>@test_int_x86_avx512_mask_psll_wi_512(<32 x i16> %x0, i32 %x1,
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_psll_wi_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpsllw $3, %zmm0, %zmm2
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpsllw $3, %zmm0, %zmm1 {%k1}
@@ -1049,7 +1049,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.pshuf.b.512(<64 x i8>, <64 x i8>, <64 x
define <64 x i8>@test_int_x86_avx512_mask_pshuf_b_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_pshuf_b_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpshufb %zmm1, %zmm0, %zmm3
; AVX512BW-NEXT: kmovq %rdi, %k1
; AVX512BW-NEXT: vpshufb %zmm1, %zmm0, %zmm2 {%k1}
@@ -1057,7 +1057,7 @@ define <64 x i8>@test_int_x86_avx512_mask_pshuf_b_512(<64 x i8> %x0, <64 x i8> %
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_pshuf_b_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpshufb %zmm1, %zmm0, %zmm3
; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpshufb %zmm1, %zmm0, %zmm2 {%k1}
@@ -1074,13 +1074,13 @@ declare <64 x i8> @llvm.x86.avx512.cvtmask2b.512(i64)
define <64 x i8>@test_int_x86_avx512_cvtmask2b_512(i64 %x0) {
; AVX512BW-LABEL: test_int_x86_avx512_cvtmask2b_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovq %rdi, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_cvtmask2b_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k0
; AVX512F-32-NEXT: vpmovm2b %k0, %zmm0
; AVX512F-32-NEXT: retl
@@ -1092,13 +1092,13 @@ declare <32 x i16> @llvm.x86.avx512.cvtmask2w.512(i32)
define <32 x i16>@test_int_x86_avx512_cvtmask2w_512(i32 %x0) {
; AVX512BW-LABEL: test_int_x86_avx512_cvtmask2w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k0
; AVX512BW-NEXT: vpmovm2w %k0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_cvtmask2w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k0
; AVX512F-32-NEXT: vpmovm2w %k0, %zmm0
; AVX512F-32-NEXT: retl
@@ -1107,12 +1107,12 @@ define <32 x i16>@test_int_x86_avx512_cvtmask2w_512(i32 %x0) {
}
define <32 x i16> @test_mask_packs_epi32_rr_512(<16 x i32> %a, <16 x i32> %b) {
; AVX512BW-LABEL: test_mask_packs_epi32_rr_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpackssdw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packs_epi32_rr_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpackssdw %zmm1, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
%res = call <32 x i16> @llvm.x86.avx512.mask.packssdw.512(<16 x i32> %a, <16 x i32> %b, <32 x i16> zeroinitializer, i32 -1)
@@ -1121,14 +1121,14 @@ define <32 x i16> @test_mask_packs_epi32_rr_512(<16 x i32> %a, <16 x i32> %b) {
define <32 x i16> @test_mask_packs_epi32_rrk_512(<16 x i32> %a, <16 x i32> %b, <32 x i16> %passThru, i32 %mask) {
; AVX512BW-LABEL: test_mask_packs_epi32_rrk_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpackssdw %zmm1, %zmm0, %zmm2 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packs_epi32_rrk_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpackssdw %zmm1, %zmm0, %zmm2 {%k1}
; AVX512F-32-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -1139,13 +1139,13 @@ define <32 x i16> @test_mask_packs_epi32_rrk_512(<16 x i32> %a, <16 x i32> %b, <
define <32 x i16> @test_mask_packs_epi32_rrkz_512(<16 x i32> %a, <16 x i32> %b, i32 %mask) {
; AVX512BW-LABEL: test_mask_packs_epi32_rrkz_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpackssdw %zmm1, %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packs_epi32_rrkz_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpackssdw %zmm1, %zmm0, %zmm0 {%k1} {z}
; AVX512F-32-NEXT: retl
@@ -1155,12 +1155,12 @@ define <32 x i16> @test_mask_packs_epi32_rrkz_512(<16 x i32> %a, <16 x i32> %b,
define <32 x i16> @test_mask_packs_epi32_rm_512(<16 x i32> %a, <16 x i32>* %ptr_b) {
; AVX512BW-LABEL: test_mask_packs_epi32_rm_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpackssdw (%rdi), %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packs_epi32_rm_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: vpackssdw (%eax), %zmm0, %zmm0
; AVX512F-32-NEXT: retl
@@ -1171,14 +1171,14 @@ define <32 x i16> @test_mask_packs_epi32_rm_512(<16 x i32> %a, <16 x i32>* %ptr_
define <32 x i16> @test_mask_packs_epi32_rmk_512(<16 x i32> %a, <16 x i32>* %ptr_b, <32 x i16> %passThru, i32 %mask) {
; AVX512BW-LABEL: test_mask_packs_epi32_rmk_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: vpackssdw (%rdi), %zmm0, %zmm1 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packs_epi32_rmk_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpackssdw (%eax), %zmm0, %zmm1 {%k1}
@@ -1191,13 +1191,13 @@ define <32 x i16> @test_mask_packs_epi32_rmk_512(<16 x i32> %a, <16 x i32>* %ptr
define <32 x i16> @test_mask_packs_epi32_rmkz_512(<16 x i32> %a, <16 x i32>* %ptr_b, i32 %mask) {
; AVX512BW-LABEL: test_mask_packs_epi32_rmkz_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: vpackssdw (%rdi), %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packs_epi32_rmkz_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpackssdw (%eax), %zmm0, %zmm0 {%k1} {z}
@@ -1209,12 +1209,12 @@ define <32 x i16> @test_mask_packs_epi32_rmkz_512(<16 x i32> %a, <16 x i32>* %pt
define <32 x i16> @test_mask_packs_epi32_rmb_512(<16 x i32> %a, i32* %ptr_b) {
; AVX512BW-LABEL: test_mask_packs_epi32_rmb_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpackssdw (%rdi){1to16}, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packs_epi32_rmb_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: vpackssdw (%eax){1to16}, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
@@ -1227,14 +1227,14 @@ define <32 x i16> @test_mask_packs_epi32_rmb_512(<16 x i32> %a, i32* %ptr_b) {
define <32 x i16> @test_mask_packs_epi32_rmbk_512(<16 x i32> %a, i32* %ptr_b, <32 x i16> %passThru, i32 %mask) {
; AVX512BW-LABEL: test_mask_packs_epi32_rmbk_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: vpackssdw (%rdi){1to16}, %zmm0, %zmm1 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packs_epi32_rmbk_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpackssdw (%eax){1to16}, %zmm0, %zmm1 {%k1}
@@ -1249,13 +1249,13 @@ define <32 x i16> @test_mask_packs_epi32_rmbk_512(<16 x i32> %a, i32* %ptr_b, <3
define <32 x i16> @test_mask_packs_epi32_rmbkz_512(<16 x i32> %a, i32* %ptr_b, i32 %mask) {
; AVX512BW-LABEL: test_mask_packs_epi32_rmbkz_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: vpackssdw (%rdi){1to16}, %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packs_epi32_rmbkz_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpackssdw (%eax){1to16}, %zmm0, %zmm0 {%k1} {z}
@@ -1271,12 +1271,12 @@ declare <32 x i16> @llvm.x86.avx512.mask.packssdw.512(<16 x i32>, <16 x i32>, <3
define <64 x i8> @test_mask_packs_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) {
; AVX512BW-LABEL: test_mask_packs_epi16_rr_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpacksswb %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packs_epi16_rr_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpacksswb %zmm1, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
%res = call <64 x i8> @llvm.x86.avx512.mask.packsswb.512(<32 x i16> %a, <32 x i16> %b, <64 x i8> zeroinitializer, i64 -1)
@@ -1285,14 +1285,14 @@ define <64 x i8> @test_mask_packs_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) {
define <64 x i8> @test_mask_packs_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <64 x i8> %passThru, i64 %mask) {
; AVX512BW-LABEL: test_mask_packs_epi16_rrk_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovq %rdi, %k1
; AVX512BW-NEXT: vpacksswb %zmm1, %zmm0, %zmm2 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packs_epi16_rrk_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpacksswb %zmm1, %zmm0, %zmm2 {%k1}
; AVX512F-32-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -1303,13 +1303,13 @@ define <64 x i8> @test_mask_packs_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <6
define <64 x i8> @test_mask_packs_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i64 %mask) {
; AVX512BW-LABEL: test_mask_packs_epi16_rrkz_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovq %rdi, %k1
; AVX512BW-NEXT: vpacksswb %zmm1, %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packs_epi16_rrkz_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpacksswb %zmm1, %zmm0, %zmm0 {%k1} {z}
; AVX512F-32-NEXT: retl
@@ -1319,12 +1319,12 @@ define <64 x i8> @test_mask_packs_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i
define <64 x i8> @test_mask_packs_epi16_rm_512(<32 x i16> %a, <32 x i16>* %ptr_b) {
; AVX512BW-LABEL: test_mask_packs_epi16_rm_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpacksswb (%rdi), %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packs_epi16_rm_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: vpacksswb (%eax), %zmm0, %zmm0
; AVX512F-32-NEXT: retl
@@ -1335,14 +1335,14 @@ define <64 x i8> @test_mask_packs_epi16_rm_512(<32 x i16> %a, <32 x i16>* %ptr_b
define <64 x i8> @test_mask_packs_epi16_rmk_512(<32 x i16> %a, <32 x i16>* %ptr_b, <64 x i8> %passThru, i64 %mask) {
; AVX512BW-LABEL: test_mask_packs_epi16_rmk_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovq %rsi, %k1
; AVX512BW-NEXT: vpacksswb (%rdi), %zmm0, %zmm1 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packs_epi16_rmk_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpacksswb (%eax), %zmm0, %zmm1 {%k1}
@@ -1355,13 +1355,13 @@ define <64 x i8> @test_mask_packs_epi16_rmk_512(<32 x i16> %a, <32 x i16>* %ptr_
define <64 x i8> @test_mask_packs_epi16_rmkz_512(<32 x i16> %a, <32 x i16>* %ptr_b, i64 %mask) {
; AVX512BW-LABEL: test_mask_packs_epi16_rmkz_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovq %rsi, %k1
; AVX512BW-NEXT: vpacksswb (%rdi), %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packs_epi16_rmkz_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpacksswb (%eax), %zmm0, %zmm0 {%k1} {z}
@@ -1376,12 +1376,12 @@ declare <64 x i8> @llvm.x86.avx512.mask.packsswb.512(<32 x i16>, <32 x i16>, <64
define <32 x i16> @test_mask_packus_epi32_rr_512(<16 x i32> %a, <16 x i32> %b) {
; AVX512BW-LABEL: test_mask_packus_epi32_rr_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpackusdw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packus_epi32_rr_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpackusdw %zmm1, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
%res = call <32 x i16> @llvm.x86.avx512.mask.packusdw.512(<16 x i32> %a, <16 x i32> %b, <32 x i16> zeroinitializer, i32 -1)
@@ -1390,14 +1390,14 @@ define <32 x i16> @test_mask_packus_epi32_rr_512(<16 x i32> %a, <16 x i32> %b) {
define <32 x i16> @test_mask_packus_epi32_rrk_512(<16 x i32> %a, <16 x i32> %b, <32 x i16> %passThru, i32 %mask) {
; AVX512BW-LABEL: test_mask_packus_epi32_rrk_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpackusdw %zmm1, %zmm0, %zmm2 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packus_epi32_rrk_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpackusdw %zmm1, %zmm0, %zmm2 {%k1}
; AVX512F-32-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -1408,13 +1408,13 @@ define <32 x i16> @test_mask_packus_epi32_rrk_512(<16 x i32> %a, <16 x i32> %b,
define <32 x i16> @test_mask_packus_epi32_rrkz_512(<16 x i32> %a, <16 x i32> %b, i32 %mask) {
; AVX512BW-LABEL: test_mask_packus_epi32_rrkz_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpackusdw %zmm1, %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packus_epi32_rrkz_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpackusdw %zmm1, %zmm0, %zmm0 {%k1} {z}
; AVX512F-32-NEXT: retl
@@ -1424,12 +1424,12 @@ define <32 x i16> @test_mask_packus_epi32_rrkz_512(<16 x i32> %a, <16 x i32> %b,
define <32 x i16> @test_mask_packus_epi32_rm_512(<16 x i32> %a, <16 x i32>* %ptr_b) {
; AVX512BW-LABEL: test_mask_packus_epi32_rm_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpackusdw (%rdi), %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packus_epi32_rm_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: vpackusdw (%eax), %zmm0, %zmm0
; AVX512F-32-NEXT: retl
@@ -1440,14 +1440,14 @@ define <32 x i16> @test_mask_packus_epi32_rm_512(<16 x i32> %a, <16 x i32>* %ptr
define <32 x i16> @test_mask_packus_epi32_rmk_512(<16 x i32> %a, <16 x i32>* %ptr_b, <32 x i16> %passThru, i32 %mask) {
; AVX512BW-LABEL: test_mask_packus_epi32_rmk_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: vpackusdw (%rdi), %zmm0, %zmm1 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packus_epi32_rmk_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpackusdw (%eax), %zmm0, %zmm1 {%k1}
@@ -1460,13 +1460,13 @@ define <32 x i16> @test_mask_packus_epi32_rmk_512(<16 x i32> %a, <16 x i32>* %pt
define <32 x i16> @test_mask_packus_epi32_rmkz_512(<16 x i32> %a, <16 x i32>* %ptr_b, i32 %mask) {
; AVX512BW-LABEL: test_mask_packus_epi32_rmkz_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: vpackusdw (%rdi), %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packus_epi32_rmkz_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpackusdw (%eax), %zmm0, %zmm0 {%k1} {z}
@@ -1478,12 +1478,12 @@ define <32 x i16> @test_mask_packus_epi32_rmkz_512(<16 x i32> %a, <16 x i32>* %p
define <32 x i16> @test_mask_packus_epi32_rmb_512(<16 x i32> %a, i32* %ptr_b) {
; AVX512BW-LABEL: test_mask_packus_epi32_rmb_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpackusdw (%rdi){1to16}, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packus_epi32_rmb_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: vpackusdw (%eax){1to16}, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
@@ -1496,14 +1496,14 @@ define <32 x i16> @test_mask_packus_epi32_rmb_512(<16 x i32> %a, i32* %ptr_b) {
define <32 x i16> @test_mask_packus_epi32_rmbk_512(<16 x i32> %a, i32* %ptr_b, <32 x i16> %passThru, i32 %mask) {
; AVX512BW-LABEL: test_mask_packus_epi32_rmbk_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: vpackusdw (%rdi){1to16}, %zmm0, %zmm1 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packus_epi32_rmbk_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpackusdw (%eax){1to16}, %zmm0, %zmm1 {%k1}
@@ -1518,13 +1518,13 @@ define <32 x i16> @test_mask_packus_epi32_rmbk_512(<16 x i32> %a, i32* %ptr_b, <
define <32 x i16> @test_mask_packus_epi32_rmbkz_512(<16 x i32> %a, i32* %ptr_b, i32 %mask) {
; AVX512BW-LABEL: test_mask_packus_epi32_rmbkz_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: vpackusdw (%rdi){1to16}, %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packus_epi32_rmbkz_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpackusdw (%eax){1to16}, %zmm0, %zmm0 {%k1} {z}
@@ -1540,12 +1540,12 @@ declare <32 x i16> @llvm.x86.avx512.mask.packusdw.512(<16 x i32>, <16 x i32>, <3
define <64 x i8> @test_mask_packus_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) {
; AVX512BW-LABEL: test_mask_packus_epi16_rr_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpackuswb %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packus_epi16_rr_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpackuswb %zmm1, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
%res = call <64 x i8> @llvm.x86.avx512.mask.packuswb.512(<32 x i16> %a, <32 x i16> %b, <64 x i8> zeroinitializer, i64 -1)
@@ -1554,14 +1554,14 @@ define <64 x i8> @test_mask_packus_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) {
define <64 x i8> @test_mask_packus_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <64 x i8> %passThru, i64 %mask) {
; AVX512BW-LABEL: test_mask_packus_epi16_rrk_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovq %rdi, %k1
; AVX512BW-NEXT: vpackuswb %zmm1, %zmm0, %zmm2 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packus_epi16_rrk_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpackuswb %zmm1, %zmm0, %zmm2 {%k1}
; AVX512F-32-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -1572,13 +1572,13 @@ define <64 x i8> @test_mask_packus_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <
define <64 x i8> @test_mask_packus_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i64 %mask) {
; AVX512BW-LABEL: test_mask_packus_epi16_rrkz_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovq %rdi, %k1
; AVX512BW-NEXT: vpackuswb %zmm1, %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packus_epi16_rrkz_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpackuswb %zmm1, %zmm0, %zmm0 {%k1} {z}
; AVX512F-32-NEXT: retl
@@ -1588,12 +1588,12 @@ define <64 x i8> @test_mask_packus_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b,
define <64 x i8> @test_mask_packus_epi16_rm_512(<32 x i16> %a, <32 x i16>* %ptr_b) {
; AVX512BW-LABEL: test_mask_packus_epi16_rm_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpackuswb (%rdi), %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packus_epi16_rm_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: vpackuswb (%eax), %zmm0, %zmm0
; AVX512F-32-NEXT: retl
@@ -1604,14 +1604,14 @@ define <64 x i8> @test_mask_packus_epi16_rm_512(<32 x i16> %a, <32 x i16>* %ptr_
define <64 x i8> @test_mask_packus_epi16_rmk_512(<32 x i16> %a, <32 x i16>* %ptr_b, <64 x i8> %passThru, i64 %mask) {
; AVX512BW-LABEL: test_mask_packus_epi16_rmk_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovq %rsi, %k1
; AVX512BW-NEXT: vpackuswb (%rdi), %zmm0, %zmm1 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packus_epi16_rmk_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpackuswb (%eax), %zmm0, %zmm1 {%k1}
@@ -1624,13 +1624,13 @@ define <64 x i8> @test_mask_packus_epi16_rmk_512(<32 x i16> %a, <32 x i16>* %ptr
define <64 x i8> @test_mask_packus_epi16_rmkz_512(<32 x i16> %a, <32 x i16>* %ptr_b, i64 %mask) {
; AVX512BW-LABEL: test_mask_packus_epi16_rmkz_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovq %rsi, %k1
; AVX512BW-NEXT: vpackuswb (%rdi), %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packus_epi16_rmkz_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpackuswb (%eax), %zmm0, %zmm0 {%k1} {z}
@@ -1644,7 +1644,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.packuswb.512(<32 x i16>, <32 x i16>, <64
define i64 @test_cmp_b_512(<64 x i8> %a0, <64 x i8> %a1) {
; AVX512BW-LABEL: test_cmp_b_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpcmpeqb %zmm1, %zmm0, %k0
; AVX512BW-NEXT: kmovq %k0, %rax
; AVX512BW-NEXT: vpcmpgtb %zmm0, %zmm1, %k0
@@ -1669,7 +1669,7 @@ define i64 @test_cmp_b_512(<64 x i8> %a0, <64 x i8> %a1) {
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_cmp_b_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: subl $60, %esp
; AVX512F-32-NEXT: .cfi_def_cfa_offset 64
; AVX512F-32-NEXT: vpcmpeqb %zmm1, %zmm0, %k0
@@ -1723,7 +1723,7 @@ define i64 @test_cmp_b_512(<64 x i8> %a0, <64 x i8> %a1) {
define i64 @test_mask_cmp_b_512(<64 x i8> %a0, <64 x i8> %a1, i64 %mask) {
; AVX512BW-LABEL: test_mask_cmp_b_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovq %rdi, %k1
; AVX512BW-NEXT: vpcmpeqb %zmm1, %zmm0, %k0 {%k1}
; AVX512BW-NEXT: kmovq %k0, %rax
@@ -1750,7 +1750,7 @@ define i64 @test_mask_cmp_b_512(<64 x i8> %a0, <64 x i8> %a1, i64 %mask) {
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_cmp_b_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: pushl %ebx
; AVX512F-32-NEXT: .cfi_def_cfa_offset 8
; AVX512F-32-NEXT: pushl %esi
@@ -2528,7 +2528,7 @@ declare i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8>, <64 x i8>, i32, i64) noun
define i64 @test_ucmp_b_512(<64 x i8> %a0, <64 x i8> %a1) {
; AVX512BW-LABEL: test_ucmp_b_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpcmpeqb %zmm1, %zmm0, %k0
; AVX512BW-NEXT: kmovq %k0, %rax
; AVX512BW-NEXT: vpcmpltub %zmm1, %zmm0, %k0
@@ -2553,7 +2553,7 @@ define i64 @test_ucmp_b_512(<64 x i8> %a0, <64 x i8> %a1) {
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_ucmp_b_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: subl $60, %esp
; AVX512F-32-NEXT: .cfi_def_cfa_offset 64
; AVX512F-32-NEXT: vpcmpeqb %zmm1, %zmm0, %k0
@@ -2607,7 +2607,7 @@ define i64 @test_ucmp_b_512(<64 x i8> %a0, <64 x i8> %a1) {
define i64 @test_mask_x86_avx512_ucmp_b_512(<64 x i8> %a0, <64 x i8> %a1, i64 %mask) {
; AVX512BW-LABEL: test_mask_x86_avx512_ucmp_b_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovq %rdi, %k1
; AVX512BW-NEXT: vpcmpeqb %zmm1, %zmm0, %k0 {%k1}
; AVX512BW-NEXT: kmovq %k0, %rax
@@ -2634,7 +2634,7 @@ define i64 @test_mask_x86_avx512_ucmp_b_512(<64 x i8> %a0, <64 x i8> %a1, i64 %m
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_x86_avx512_ucmp_b_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: pushl %ebx
; AVX512F-32-NEXT: .cfi_def_cfa_offset 8
; AVX512F-32-NEXT: pushl %esi
@@ -3412,7 +3412,7 @@ declare i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8>, <64 x i8>, i32, i64) nou
define i32 @test_cmp_w_512(<32 x i16> %a0, <32 x i16> %a1) {
; AVX512BW-LABEL: test_cmp_w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpcmpeqw %zmm1, %zmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: vpcmpgtw %zmm0, %zmm1, %k0
@@ -3437,7 +3437,7 @@ define i32 @test_cmp_w_512(<32 x i16> %a0, <32 x i16> %a1) {
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_cmp_w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpcmpeqw %zmm1, %zmm0, %k0
; AVX512F-32-NEXT: kmovd %k0, %eax
; AVX512F-32-NEXT: vpcmpgtw %zmm0, %zmm1, %k0
@@ -3480,7 +3480,7 @@ define i32 @test_cmp_w_512(<32 x i16> %a0, <32 x i16> %a1) {
define i32 @test_mask_cmp_w_512(<32 x i16> %a0, <32 x i16> %a1, i32 %mask) {
; AVX512BW-LABEL: test_mask_cmp_w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpcmpeqw %zmm1, %zmm0, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
@@ -3507,7 +3507,7 @@ define i32 @test_mask_cmp_w_512(<32 x i16> %a0, <32 x i16> %a1, i32 %mask) {
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_cmp_w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; AVX512F-32-NEXT: kmovd %ecx, %k1
; AVX512F-32-NEXT: vpcmpeqw %zmm1, %zmm0, %k0 {%k1}
@@ -3555,7 +3555,7 @@ declare i32 @llvm.x86.avx512.mask.cmp.w.512(<32 x i16>, <32 x i16>, i32, i32) no
define i32 @test_ucmp_w_512(<32 x i16> %a0, <32 x i16> %a1) {
; AVX512BW-LABEL: test_ucmp_w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpcmpeqw %zmm1, %zmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: vpcmpltuw %zmm1, %zmm0, %k0
@@ -3580,7 +3580,7 @@ define i32 @test_ucmp_w_512(<32 x i16> %a0, <32 x i16> %a1) {
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_ucmp_w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpcmpeqw %zmm1, %zmm0, %k0
; AVX512F-32-NEXT: kmovd %k0, %eax
; AVX512F-32-NEXT: vpcmpltuw %zmm1, %zmm0, %k0
@@ -3623,7 +3623,7 @@ define i32 @test_ucmp_w_512(<32 x i16> %a0, <32 x i16> %a1) {
define i32 @test_mask_ucmp_w_512(<32 x i16> %a0, <32 x i16> %a1, i32 %mask) {
; AVX512BW-LABEL: test_mask_ucmp_w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpcmpeqw %zmm1, %zmm0, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
@@ -3650,7 +3650,7 @@ define i32 @test_mask_ucmp_w_512(<32 x i16> %a0, <32 x i16> %a1, i32 %mask) {
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_ucmp_w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; AVX512F-32-NEXT: kmovd %ecx, %k1
; AVX512F-32-NEXT: vpcmpeqw %zmm1, %zmm0, %k0 {%k1}
@@ -3701,7 +3701,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.pavg.b.512(<64 x i8>, <64 x i8>, <64 x i
define <64 x i8>@mm512_avg_epu8(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) {
; AVX512BW-LABEL: mm512_avg_epu8:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpavgb %zmm1, %zmm0, %zmm3
; AVX512BW-NEXT: kmovq %rdi, %k1
; AVX512BW-NEXT: vpavgb %zmm1, %zmm0, %zmm2 {%k1}
@@ -3709,7 +3709,7 @@ define <64 x i8>@mm512_avg_epu8(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: mm512_avg_epu8:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpavgb %zmm1, %zmm0, %zmm3
; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpavgb %zmm1, %zmm0, %zmm2 {%k1}
@@ -3726,7 +3726,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.pavg.w.512(<32 x i16>, <32 x i16>, <32
define <32 x i16>@mm512_avg_epu16(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) {
; AVX512BW-LABEL: mm512_avg_epu16:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpavgw %zmm1, %zmm0, %zmm3
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpavgw %zmm1, %zmm0, %zmm2 {%k1}
@@ -3734,7 +3734,7 @@ define <32 x i16>@mm512_avg_epu16(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: mm512_avg_epu16:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpavgw %zmm1, %zmm0, %zmm3
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpavgw %zmm1, %zmm0, %zmm2 {%k1}
@@ -3750,7 +3750,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.pabs.w.512(<32 x i16>, <32 x i16>, i32)
define <32 x i16>@test_int_x86_avx512_mask_pabs_w_512(<32 x i16> %x0, <32 x i16> %x1, i32 %x2) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_pabs_w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpabsw %zmm0, %zmm2
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpabsw %zmm0, %zmm1 {%k1}
@@ -3758,7 +3758,7 @@ define <32 x i16>@test_int_x86_avx512_mask_pabs_w_512(<32 x i16> %x0, <32 x i16>
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_pabs_w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpabsw %zmm0, %zmm2
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpabsw %zmm0, %zmm1 {%k1}
@@ -3774,7 +3774,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.pabs.b.512(<64 x i8>, <64 x i8>, i64)
define <64 x i8>@test_int_x86_avx512_mask_pabs_b_512(<64 x i8> %x0, <64 x i8> %x1, i64 %x2) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_pabs_b_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpabsb %zmm0, %zmm2
; AVX512BW-NEXT: kmovq %rdi, %k1
; AVX512BW-NEXT: vpabsb %zmm0, %zmm1 {%k1}
@@ -3782,7 +3782,7 @@ define <64 x i8>@test_int_x86_avx512_mask_pabs_b_512(<64 x i8> %x0, <64 x i8> %x
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_pabs_b_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpabsb %zmm0, %zmm2
; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpabsb %zmm0, %zmm1 {%k1}
@@ -3798,7 +3798,7 @@ declare i64 @llvm.x86.avx512.ptestm.b.512(<64 x i8>, <64 x i8>, i64)
define i64@test_int_x86_avx512_ptestm_b_512(<64 x i8> %x0, <64 x i8> %x1, i64 %x2) {
; AVX512BW-LABEL: test_int_x86_avx512_ptestm_b_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vptestmb %zmm1, %zmm0, %k0
; AVX512BW-NEXT: kmovq %rdi, %k1
; AVX512BW-NEXT: vptestmb %zmm1, %zmm0, %k1 {%k1}
@@ -3809,7 +3809,7 @@ define i64@test_int_x86_avx512_ptestm_b_512(<64 x i8> %x0, <64 x i8> %x1, i64 %x
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_ptestm_b_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: subl $20, %esp
; AVX512F-32-NEXT: .cfi_def_cfa_offset 24
; AVX512F-32-NEXT: vptestmb %zmm1, %zmm0, %k0
@@ -3834,7 +3834,7 @@ declare i32 @llvm.x86.avx512.ptestm.w.512(<32 x i16>, <32 x i16>, i32)
define i32@test_int_x86_avx512_ptestm_w_512(<32 x i16> %x0, <32 x i16> %x1, i32 %x2) {
; AVX512BW-LABEL: test_int_x86_avx512_ptestm_w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vptestmw %zmm1, %zmm0, %k0
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vptestmw %zmm1, %zmm0, %k1 {%k1}
@@ -3845,7 +3845,7 @@ define i32@test_int_x86_avx512_ptestm_w_512(<32 x i16> %x0, <32 x i16> %x1, i32
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_ptestm_w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vptestmw %zmm1, %zmm0, %k0
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vptestmw %zmm1, %zmm0, %k1 {%k1}
@@ -3864,7 +3864,7 @@ declare i64 @llvm.x86.avx512.ptestnm.b.512(<64 x i8>, <64 x i8>, i64 %x2)
define i64@test_int_x86_avx512_ptestnm_b_512(<64 x i8> %x0, <64 x i8> %x1, i64 %x2) {
; AVX512BW-LABEL: test_int_x86_avx512_ptestnm_b_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vptestnmb %zmm1, %zmm0, %k0
; AVX512BW-NEXT: kmovq %rdi, %k1
; AVX512BW-NEXT: vptestnmb %zmm1, %zmm0, %k1 {%k1}
@@ -3875,7 +3875,7 @@ define i64@test_int_x86_avx512_ptestnm_b_512(<64 x i8> %x0, <64 x i8> %x1, i64 %
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_ptestnm_b_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: subl $20, %esp
; AVX512F-32-NEXT: .cfi_def_cfa_offset 24
; AVX512F-32-NEXT: vptestnmb %zmm1, %zmm0, %k0
@@ -3900,7 +3900,7 @@ declare i32 @llvm.x86.avx512.ptestnm.w.512(<32 x i16>, <32 x i16>, i32 %x2)
define i32@test_int_x86_avx512_ptestnm_w_512(<32 x i16> %x0, <32 x i16> %x1, i32 %x2) {
; AVX512BW-LABEL: test_int_x86_avx512_ptestnm_w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vptestnmw %zmm1, %zmm0, %k0
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vptestnmw %zmm1, %zmm0, %k1 {%k1}
@@ -3911,7 +3911,7 @@ define i32@test_int_x86_avx512_ptestnm_w_512(<32 x i16> %x0, <32 x i16> %x1, i32
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_ptestnm_w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vptestnmw %zmm1, %zmm0, %k0
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vptestnmw %zmm1, %zmm0, %k1 {%k1}
diff --git a/test/CodeGen/X86/avx512bw-intrinsics.ll b/test/CodeGen/X86/avx512bw-intrinsics.ll
index d6defb7af41..7b5cc5feff0 100644
--- a/test/CodeGen/X86/avx512bw-intrinsics.ll
+++ b/test/CodeGen/X86/avx512bw-intrinsics.ll
@@ -4,12 +4,12 @@
define <32 x i16> @test_mask_packs_epi32_rr_512(<16 x i32> %a, <16 x i32> %b) {
; AVX512BW-LABEL: test_mask_packs_epi32_rr_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpackssdw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packs_epi32_rr_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpackssdw %zmm1, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
%1 = call <32 x i16> @llvm.x86.avx512.packssdw.512(<16 x i32> %a, <16 x i32> %b)
@@ -18,14 +18,14 @@ define <32 x i16> @test_mask_packs_epi32_rr_512(<16 x i32> %a, <16 x i32> %b) {
define <32 x i16> @test_mask_packs_epi32_rrk_512(<16 x i32> %a, <16 x i32> %b, <32 x i16> %passThru, i32 %mask) {
; AVX512BW-LABEL: test_mask_packs_epi32_rrk_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpackssdw %zmm1, %zmm0, %zmm2 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packs_epi32_rrk_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpackssdw %zmm1, %zmm0, %zmm2 {%k1}
; AVX512F-32-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -38,13 +38,13 @@ define <32 x i16> @test_mask_packs_epi32_rrk_512(<16 x i32> %a, <16 x i32> %b, <
define <32 x i16> @test_mask_packs_epi32_rrkz_512(<16 x i32> %a, <16 x i32> %b, i32 %mask) {
; AVX512BW-LABEL: test_mask_packs_epi32_rrkz_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpackssdw %zmm1, %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packs_epi32_rrkz_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpackssdw %zmm1, %zmm0, %zmm0 {%k1} {z}
; AVX512F-32-NEXT: retl
@@ -56,12 +56,12 @@ define <32 x i16> @test_mask_packs_epi32_rrkz_512(<16 x i32> %a, <16 x i32> %b,
define <32 x i16> @test_mask_packs_epi32_rm_512(<16 x i32> %a, <16 x i32>* %ptr_b) {
; AVX512BW-LABEL: test_mask_packs_epi32_rm_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpackssdw (%rdi), %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packs_epi32_rm_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: vpackssdw (%eax), %zmm0, %zmm0
; AVX512F-32-NEXT: retl
@@ -72,14 +72,14 @@ define <32 x i16> @test_mask_packs_epi32_rm_512(<16 x i32> %a, <16 x i32>* %ptr_
define <32 x i16> @test_mask_packs_epi32_rmk_512(<16 x i32> %a, <16 x i32>* %ptr_b, <32 x i16> %passThru, i32 %mask) {
; AVX512BW-LABEL: test_mask_packs_epi32_rmk_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: vpackssdw (%rdi), %zmm0, %zmm1 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packs_epi32_rmk_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpackssdw (%eax), %zmm0, %zmm1 {%k1}
@@ -94,13 +94,13 @@ define <32 x i16> @test_mask_packs_epi32_rmk_512(<16 x i32> %a, <16 x i32>* %ptr
define <32 x i16> @test_mask_packs_epi32_rmkz_512(<16 x i32> %a, <16 x i32>* %ptr_b, i32 %mask) {
; AVX512BW-LABEL: test_mask_packs_epi32_rmkz_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: vpackssdw (%rdi), %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packs_epi32_rmkz_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpackssdw (%eax), %zmm0, %zmm0 {%k1} {z}
@@ -114,12 +114,12 @@ define <32 x i16> @test_mask_packs_epi32_rmkz_512(<16 x i32> %a, <16 x i32>* %pt
define <32 x i16> @test_mask_packs_epi32_rmb_512(<16 x i32> %a, i32* %ptr_b) {
; AVX512BW-LABEL: test_mask_packs_epi32_rmb_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpackssdw (%rdi){1to16}, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packs_epi32_rmb_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: vpackssdw (%eax){1to16}, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
@@ -132,14 +132,14 @@ define <32 x i16> @test_mask_packs_epi32_rmb_512(<16 x i32> %a, i32* %ptr_b) {
define <32 x i16> @test_mask_packs_epi32_rmbk_512(<16 x i32> %a, i32* %ptr_b, <32 x i16> %passThru, i32 %mask) {
; AVX512BW-LABEL: test_mask_packs_epi32_rmbk_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: vpackssdw (%rdi){1to16}, %zmm0, %zmm1 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packs_epi32_rmbk_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpackssdw (%eax){1to16}, %zmm0, %zmm1 {%k1}
@@ -156,13 +156,13 @@ define <32 x i16> @test_mask_packs_epi32_rmbk_512(<16 x i32> %a, i32* %ptr_b, <3
define <32 x i16> @test_mask_packs_epi32_rmbkz_512(<16 x i32> %a, i32* %ptr_b, i32 %mask) {
; AVX512BW-LABEL: test_mask_packs_epi32_rmbkz_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: vpackssdw (%rdi){1to16}, %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packs_epi32_rmbkz_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpackssdw (%eax){1to16}, %zmm0, %zmm0 {%k1} {z}
@@ -180,12 +180,12 @@ declare <32 x i16> @llvm.x86.avx512.packssdw.512(<16 x i32>, <16 x i32>)
define <64 x i8> @test_mask_packs_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) {
; AVX512BW-LABEL: test_mask_packs_epi16_rr_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpacksswb %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packs_epi16_rr_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpacksswb %zmm1, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
%1 = call <64 x i8> @llvm.x86.avx512.packsswb.512(<32 x i16> %a, <32 x i16> %b)
@@ -194,14 +194,14 @@ define <64 x i8> @test_mask_packs_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) {
define <64 x i8> @test_mask_packs_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <64 x i8> %passThru, i64 %mask) {
; AVX512BW-LABEL: test_mask_packs_epi16_rrk_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovq %rdi, %k1
; AVX512BW-NEXT: vpacksswb %zmm1, %zmm0, %zmm2 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packs_epi16_rrk_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpacksswb %zmm1, %zmm0, %zmm2 {%k1}
; AVX512F-32-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -214,13 +214,13 @@ define <64 x i8> @test_mask_packs_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <6
define <64 x i8> @test_mask_packs_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i64 %mask) {
; AVX512BW-LABEL: test_mask_packs_epi16_rrkz_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovq %rdi, %k1
; AVX512BW-NEXT: vpacksswb %zmm1, %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packs_epi16_rrkz_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpacksswb %zmm1, %zmm0, %zmm0 {%k1} {z}
; AVX512F-32-NEXT: retl
@@ -232,12 +232,12 @@ define <64 x i8> @test_mask_packs_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i
define <64 x i8> @test_mask_packs_epi16_rm_512(<32 x i16> %a, <32 x i16>* %ptr_b) {
; AVX512BW-LABEL: test_mask_packs_epi16_rm_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpacksswb (%rdi), %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packs_epi16_rm_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: vpacksswb (%eax), %zmm0, %zmm0
; AVX512F-32-NEXT: retl
@@ -248,14 +248,14 @@ define <64 x i8> @test_mask_packs_epi16_rm_512(<32 x i16> %a, <32 x i16>* %ptr_b
define <64 x i8> @test_mask_packs_epi16_rmk_512(<32 x i16> %a, <32 x i16>* %ptr_b, <64 x i8> %passThru, i64 %mask) {
; AVX512BW-LABEL: test_mask_packs_epi16_rmk_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovq %rsi, %k1
; AVX512BW-NEXT: vpacksswb (%rdi), %zmm0, %zmm1 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packs_epi16_rmk_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpacksswb (%eax), %zmm0, %zmm1 {%k1}
@@ -270,13 +270,13 @@ define <64 x i8> @test_mask_packs_epi16_rmk_512(<32 x i16> %a, <32 x i16>* %ptr_
define <64 x i8> @test_mask_packs_epi16_rmkz_512(<32 x i16> %a, <32 x i16>* %ptr_b, i64 %mask) {
; AVX512BW-LABEL: test_mask_packs_epi16_rmkz_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovq %rsi, %k1
; AVX512BW-NEXT: vpacksswb (%rdi), %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packs_epi16_rmkz_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpacksswb (%eax), %zmm0, %zmm0 {%k1} {z}
@@ -293,12 +293,12 @@ declare <64 x i8> @llvm.x86.avx512.packsswb.512(<32 x i16>, <32 x i16>)
define <32 x i16> @test_mask_packus_epi32_rr_512(<16 x i32> %a, <16 x i32> %b) {
; AVX512BW-LABEL: test_mask_packus_epi32_rr_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpackusdw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packus_epi32_rr_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpackusdw %zmm1, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
%1 = call <32 x i16> @llvm.x86.avx512.packusdw.512(<16 x i32> %a, <16 x i32> %b)
@@ -307,14 +307,14 @@ define <32 x i16> @test_mask_packus_epi32_rr_512(<16 x i32> %a, <16 x i32> %b) {
define <32 x i16> @test_mask_packus_epi32_rrk_512(<16 x i32> %a, <16 x i32> %b, <32 x i16> %passThru, i32 %mask) {
; AVX512BW-LABEL: test_mask_packus_epi32_rrk_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpackusdw %zmm1, %zmm0, %zmm2 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packus_epi32_rrk_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpackusdw %zmm1, %zmm0, %zmm2 {%k1}
; AVX512F-32-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -327,13 +327,13 @@ define <32 x i16> @test_mask_packus_epi32_rrk_512(<16 x i32> %a, <16 x i32> %b,
define <32 x i16> @test_mask_packus_epi32_rrkz_512(<16 x i32> %a, <16 x i32> %b, i32 %mask) {
; AVX512BW-LABEL: test_mask_packus_epi32_rrkz_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpackusdw %zmm1, %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packus_epi32_rrkz_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpackusdw %zmm1, %zmm0, %zmm0 {%k1} {z}
; AVX512F-32-NEXT: retl
@@ -345,12 +345,12 @@ define <32 x i16> @test_mask_packus_epi32_rrkz_512(<16 x i32> %a, <16 x i32> %b,
define <32 x i16> @test_mask_packus_epi32_rm_512(<16 x i32> %a, <16 x i32>* %ptr_b) {
; AVX512BW-LABEL: test_mask_packus_epi32_rm_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpackusdw (%rdi), %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packus_epi32_rm_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: vpackusdw (%eax), %zmm0, %zmm0
; AVX512F-32-NEXT: retl
@@ -361,14 +361,14 @@ define <32 x i16> @test_mask_packus_epi32_rm_512(<16 x i32> %a, <16 x i32>* %ptr
define <32 x i16> @test_mask_packus_epi32_rmk_512(<16 x i32> %a, <16 x i32>* %ptr_b, <32 x i16> %passThru, i32 %mask) {
; AVX512BW-LABEL: test_mask_packus_epi32_rmk_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: vpackusdw (%rdi), %zmm0, %zmm1 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packus_epi32_rmk_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpackusdw (%eax), %zmm0, %zmm1 {%k1}
@@ -383,13 +383,13 @@ define <32 x i16> @test_mask_packus_epi32_rmk_512(<16 x i32> %a, <16 x i32>* %pt
define <32 x i16> @test_mask_packus_epi32_rmkz_512(<16 x i32> %a, <16 x i32>* %ptr_b, i32 %mask) {
; AVX512BW-LABEL: test_mask_packus_epi32_rmkz_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: vpackusdw (%rdi), %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packus_epi32_rmkz_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpackusdw (%eax), %zmm0, %zmm0 {%k1} {z}
@@ -403,12 +403,12 @@ define <32 x i16> @test_mask_packus_epi32_rmkz_512(<16 x i32> %a, <16 x i32>* %p
define <32 x i16> @test_mask_packus_epi32_rmb_512(<16 x i32> %a, i32* %ptr_b) {
; AVX512BW-LABEL: test_mask_packus_epi32_rmb_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpackusdw (%rdi){1to16}, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packus_epi32_rmb_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: vpackusdw (%eax){1to16}, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
@@ -421,14 +421,14 @@ define <32 x i16> @test_mask_packus_epi32_rmb_512(<16 x i32> %a, i32* %ptr_b) {
define <32 x i16> @test_mask_packus_epi32_rmbk_512(<16 x i32> %a, i32* %ptr_b, <32 x i16> %passThru, i32 %mask) {
; AVX512BW-LABEL: test_mask_packus_epi32_rmbk_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: vpackusdw (%rdi){1to16}, %zmm0, %zmm1 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packus_epi32_rmbk_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpackusdw (%eax){1to16}, %zmm0, %zmm1 {%k1}
@@ -445,13 +445,13 @@ define <32 x i16> @test_mask_packus_epi32_rmbk_512(<16 x i32> %a, i32* %ptr_b, <
define <32 x i16> @test_mask_packus_epi32_rmbkz_512(<16 x i32> %a, i32* %ptr_b, i32 %mask) {
; AVX512BW-LABEL: test_mask_packus_epi32_rmbkz_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: vpackusdw (%rdi){1to16}, %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packus_epi32_rmbkz_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpackusdw (%eax){1to16}, %zmm0, %zmm0 {%k1} {z}
@@ -469,12 +469,12 @@ declare <32 x i16> @llvm.x86.avx512.packusdw.512(<16 x i32>, <16 x i32>)
define <64 x i8> @test_mask_packus_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) {
; AVX512BW-LABEL: test_mask_packus_epi16_rr_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpackuswb %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packus_epi16_rr_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpackuswb %zmm1, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
%1 = call <64 x i8> @llvm.x86.avx512.packuswb.512(<32 x i16> %a, <32 x i16> %b)
@@ -483,14 +483,14 @@ define <64 x i8> @test_mask_packus_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) {
define <64 x i8> @test_mask_packus_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <64 x i8> %passThru, i64 %mask) {
; AVX512BW-LABEL: test_mask_packus_epi16_rrk_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovq %rdi, %k1
; AVX512BW-NEXT: vpackuswb %zmm1, %zmm0, %zmm2 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packus_epi16_rrk_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpackuswb %zmm1, %zmm0, %zmm2 {%k1}
; AVX512F-32-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -503,13 +503,13 @@ define <64 x i8> @test_mask_packus_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <
define <64 x i8> @test_mask_packus_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i64 %mask) {
; AVX512BW-LABEL: test_mask_packus_epi16_rrkz_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovq %rdi, %k1
; AVX512BW-NEXT: vpackuswb %zmm1, %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packus_epi16_rrkz_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpackuswb %zmm1, %zmm0, %zmm0 {%k1} {z}
; AVX512F-32-NEXT: retl
@@ -521,12 +521,12 @@ define <64 x i8> @test_mask_packus_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b,
define <64 x i8> @test_mask_packus_epi16_rm_512(<32 x i16> %a, <32 x i16>* %ptr_b) {
; AVX512BW-LABEL: test_mask_packus_epi16_rm_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpackuswb (%rdi), %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packus_epi16_rm_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: vpackuswb (%eax), %zmm0, %zmm0
; AVX512F-32-NEXT: retl
@@ -537,14 +537,14 @@ define <64 x i8> @test_mask_packus_epi16_rm_512(<32 x i16> %a, <32 x i16>* %ptr_
define <64 x i8> @test_mask_packus_epi16_rmk_512(<32 x i16> %a, <32 x i16>* %ptr_b, <64 x i8> %passThru, i64 %mask) {
; AVX512BW-LABEL: test_mask_packus_epi16_rmk_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovq %rsi, %k1
; AVX512BW-NEXT: vpackuswb (%rdi), %zmm0, %zmm1 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packus_epi16_rmk_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpackuswb (%eax), %zmm0, %zmm1 {%k1}
@@ -559,13 +559,13 @@ define <64 x i8> @test_mask_packus_epi16_rmk_512(<32 x i16> %a, <32 x i16>* %ptr
define <64 x i8> @test_mask_packus_epi16_rmkz_512(<32 x i16> %a, <32 x i16>* %ptr_b, i64 %mask) {
; AVX512BW-LABEL: test_mask_packus_epi16_rmkz_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovq %rsi, %k1
; AVX512BW-NEXT: vpackuswb (%rdi), %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_packus_epi16_rmkz_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpackuswb (%eax), %zmm0, %zmm0 {%k1} {z}
@@ -581,12 +581,12 @@ declare <64 x i8> @llvm.x86.avx512.packuswb.512(<32 x i16>, <32 x i16>)
define <32 x i16> @test_mask_adds_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) {
; AVX512BW-LABEL: test_mask_adds_epi16_rr_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpaddsw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_adds_epi16_rr_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpaddsw %zmm1, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
%res = call <32 x i16> @llvm.x86.avx512.mask.padds.w.512(<32 x i16> %a, <32 x i16> %b, <32 x i16> zeroinitializer, i32 -1)
@@ -595,14 +595,14 @@ define <32 x i16> @test_mask_adds_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) {
define <32 x i16> @test_mask_adds_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <32 x i16> %passThru, i32 %mask) {
; AVX512BW-LABEL: test_mask_adds_epi16_rrk_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpaddsw %zmm1, %zmm0, %zmm2 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_adds_epi16_rrk_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpaddsw %zmm1, %zmm0, %zmm2 {%k1}
; AVX512F-32-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -613,13 +613,13 @@ define <32 x i16> @test_mask_adds_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <3
define <32 x i16> @test_mask_adds_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i32 %mask) {
; AVX512BW-LABEL: test_mask_adds_epi16_rrkz_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpaddsw %zmm1, %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_adds_epi16_rrkz_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpaddsw %zmm1, %zmm0, %zmm0 {%k1} {z}
; AVX512F-32-NEXT: retl
@@ -629,12 +629,12 @@ define <32 x i16> @test_mask_adds_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i
define <32 x i16> @test_mask_adds_epi16_rm_512(<32 x i16> %a, <32 x i16>* %ptr_b) {
; AVX512BW-LABEL: test_mask_adds_epi16_rm_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpaddsw (%rdi), %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_adds_epi16_rm_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: vpaddsw (%eax), %zmm0, %zmm0
; AVX512F-32-NEXT: retl
@@ -645,14 +645,14 @@ define <32 x i16> @test_mask_adds_epi16_rm_512(<32 x i16> %a, <32 x i16>* %ptr_b
define <32 x i16> @test_mask_adds_epi16_rmk_512(<32 x i16> %a, <32 x i16>* %ptr_b, <32 x i16> %passThru, i32 %mask) {
; AVX512BW-LABEL: test_mask_adds_epi16_rmk_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: vpaddsw (%rdi), %zmm0, %zmm1 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_adds_epi16_rmk_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpaddsw (%eax), %zmm0, %zmm1 {%k1}
@@ -665,13 +665,13 @@ define <32 x i16> @test_mask_adds_epi16_rmk_512(<32 x i16> %a, <32 x i16>* %ptr_
define <32 x i16> @test_mask_adds_epi16_rmkz_512(<32 x i16> %a, <32 x i16>* %ptr_b, i32 %mask) {
; AVX512BW-LABEL: test_mask_adds_epi16_rmkz_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: vpaddsw (%rdi), %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_adds_epi16_rmkz_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpaddsw (%eax), %zmm0, %zmm0 {%k1} {z}
@@ -685,12 +685,12 @@ declare <32 x i16> @llvm.x86.avx512.mask.padds.w.512(<32 x i16>, <32 x i16>, <32
define <32 x i16> @test_mask_subs_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) {
; AVX512BW-LABEL: test_mask_subs_epi16_rr_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpsubsw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_subs_epi16_rr_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpsubsw %zmm1, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
%res = call <32 x i16> @llvm.x86.avx512.mask.psubs.w.512(<32 x i16> %a, <32 x i16> %b, <32 x i16> zeroinitializer, i32 -1)
@@ -699,14 +699,14 @@ define <32 x i16> @test_mask_subs_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) {
define <32 x i16> @test_mask_subs_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <32 x i16> %passThru, i32 %mask) {
; AVX512BW-LABEL: test_mask_subs_epi16_rrk_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpsubsw %zmm1, %zmm0, %zmm2 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_subs_epi16_rrk_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpsubsw %zmm1, %zmm0, %zmm2 {%k1}
; AVX512F-32-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -717,13 +717,13 @@ define <32 x i16> @test_mask_subs_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <3
define <32 x i16> @test_mask_subs_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i32 %mask) {
; AVX512BW-LABEL: test_mask_subs_epi16_rrkz_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpsubsw %zmm1, %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_subs_epi16_rrkz_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpsubsw %zmm1, %zmm0, %zmm0 {%k1} {z}
; AVX512F-32-NEXT: retl
@@ -733,12 +733,12 @@ define <32 x i16> @test_mask_subs_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i
define <32 x i16> @test_mask_subs_epi16_rm_512(<32 x i16> %a, <32 x i16>* %ptr_b) {
; AVX512BW-LABEL: test_mask_subs_epi16_rm_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpsubsw (%rdi), %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_subs_epi16_rm_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: vpsubsw (%eax), %zmm0, %zmm0
; AVX512F-32-NEXT: retl
@@ -749,14 +749,14 @@ define <32 x i16> @test_mask_subs_epi16_rm_512(<32 x i16> %a, <32 x i16>* %ptr_b
define <32 x i16> @test_mask_subs_epi16_rmk_512(<32 x i16> %a, <32 x i16>* %ptr_b, <32 x i16> %passThru, i32 %mask) {
; AVX512BW-LABEL: test_mask_subs_epi16_rmk_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: vpsubsw (%rdi), %zmm0, %zmm1 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_subs_epi16_rmk_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpsubsw (%eax), %zmm0, %zmm1 {%k1}
@@ -769,13 +769,13 @@ define <32 x i16> @test_mask_subs_epi16_rmk_512(<32 x i16> %a, <32 x i16>* %ptr_
define <32 x i16> @test_mask_subs_epi16_rmkz_512(<32 x i16> %a, <32 x i16>* %ptr_b, i32 %mask) {
; AVX512BW-LABEL: test_mask_subs_epi16_rmkz_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: vpsubsw (%rdi), %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_subs_epi16_rmkz_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpsubsw (%eax), %zmm0, %zmm0 {%k1} {z}
@@ -789,12 +789,12 @@ declare <32 x i16> @llvm.x86.avx512.mask.psubs.w.512(<32 x i16>, <32 x i16>, <32
define <32 x i16> @test_mask_adds_epu16_rr_512(<32 x i16> %a, <32 x i16> %b) {
; AVX512BW-LABEL: test_mask_adds_epu16_rr_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpaddusw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_adds_epu16_rr_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpaddusw %zmm1, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
%res = call <32 x i16> @llvm.x86.avx512.mask.paddus.w.512(<32 x i16> %a, <32 x i16> %b, <32 x i16> zeroinitializer, i32 -1)
@@ -803,14 +803,14 @@ define <32 x i16> @test_mask_adds_epu16_rr_512(<32 x i16> %a, <32 x i16> %b) {
define <32 x i16> @test_mask_adds_epu16_rrk_512(<32 x i16> %a, <32 x i16> %b, <32 x i16> %passThru, i32 %mask) {
; AVX512BW-LABEL: test_mask_adds_epu16_rrk_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpaddusw %zmm1, %zmm0, %zmm2 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_adds_epu16_rrk_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpaddusw %zmm1, %zmm0, %zmm2 {%k1}
; AVX512F-32-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -821,13 +821,13 @@ define <32 x i16> @test_mask_adds_epu16_rrk_512(<32 x i16> %a, <32 x i16> %b, <3
define <32 x i16> @test_mask_adds_epu16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i32 %mask) {
; AVX512BW-LABEL: test_mask_adds_epu16_rrkz_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpaddusw %zmm1, %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_adds_epu16_rrkz_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpaddusw %zmm1, %zmm0, %zmm0 {%k1} {z}
; AVX512F-32-NEXT: retl
@@ -837,12 +837,12 @@ define <32 x i16> @test_mask_adds_epu16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i
define <32 x i16> @test_mask_adds_epu16_rm_512(<32 x i16> %a, <32 x i16>* %ptr_b) {
; AVX512BW-LABEL: test_mask_adds_epu16_rm_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpaddusw (%rdi), %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_adds_epu16_rm_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: vpaddusw (%eax), %zmm0, %zmm0
; AVX512F-32-NEXT: retl
@@ -853,14 +853,14 @@ define <32 x i16> @test_mask_adds_epu16_rm_512(<32 x i16> %a, <32 x i16>* %ptr_b
define <32 x i16> @test_mask_adds_epu16_rmk_512(<32 x i16> %a, <32 x i16>* %ptr_b, <32 x i16> %passThru, i32 %mask) {
; AVX512BW-LABEL: test_mask_adds_epu16_rmk_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: vpaddusw (%rdi), %zmm0, %zmm1 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_adds_epu16_rmk_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpaddusw (%eax), %zmm0, %zmm1 {%k1}
@@ -873,13 +873,13 @@ define <32 x i16> @test_mask_adds_epu16_rmk_512(<32 x i16> %a, <32 x i16>* %ptr_
define <32 x i16> @test_mask_adds_epu16_rmkz_512(<32 x i16> %a, <32 x i16>* %ptr_b, i32 %mask) {
; AVX512BW-LABEL: test_mask_adds_epu16_rmkz_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: vpaddusw (%rdi), %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_adds_epu16_rmkz_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpaddusw (%eax), %zmm0, %zmm0 {%k1} {z}
@@ -893,12 +893,12 @@ declare <32 x i16> @llvm.x86.avx512.mask.paddus.w.512(<32 x i16>, <32 x i16>, <3
define <32 x i16> @test_mask_subs_epu16_rr_512(<32 x i16> %a, <32 x i16> %b) {
; AVX512BW-LABEL: test_mask_subs_epu16_rr_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpsubusw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_subs_epu16_rr_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpsubusw %zmm1, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
%res = call <32 x i16> @llvm.x86.avx512.mask.psubus.w.512(<32 x i16> %a, <32 x i16> %b, <32 x i16> zeroinitializer, i32 -1)
@@ -907,14 +907,14 @@ define <32 x i16> @test_mask_subs_epu16_rr_512(<32 x i16> %a, <32 x i16> %b) {
define <32 x i16> @test_mask_subs_epu16_rrk_512(<32 x i16> %a, <32 x i16> %b, <32 x i16> %passThru, i32 %mask) {
; AVX512BW-LABEL: test_mask_subs_epu16_rrk_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpsubusw %zmm1, %zmm0, %zmm2 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_subs_epu16_rrk_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpsubusw %zmm1, %zmm0, %zmm2 {%k1}
; AVX512F-32-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -925,13 +925,13 @@ define <32 x i16> @test_mask_subs_epu16_rrk_512(<32 x i16> %a, <32 x i16> %b, <3
define <32 x i16> @test_mask_subs_epu16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i32 %mask) {
; AVX512BW-LABEL: test_mask_subs_epu16_rrkz_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpsubusw %zmm1, %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_subs_epu16_rrkz_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpsubusw %zmm1, %zmm0, %zmm0 {%k1} {z}
; AVX512F-32-NEXT: retl
@@ -941,12 +941,12 @@ define <32 x i16> @test_mask_subs_epu16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i
define <32 x i16> @test_mask_subs_epu16_rm_512(<32 x i16> %a, <32 x i16>* %ptr_b) {
; AVX512BW-LABEL: test_mask_subs_epu16_rm_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpsubusw (%rdi), %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_subs_epu16_rm_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: vpsubusw (%eax), %zmm0, %zmm0
; AVX512F-32-NEXT: retl
@@ -957,14 +957,14 @@ define <32 x i16> @test_mask_subs_epu16_rm_512(<32 x i16> %a, <32 x i16>* %ptr_b
define <32 x i16> @test_mask_subs_epu16_rmk_512(<32 x i16> %a, <32 x i16>* %ptr_b, <32 x i16> %passThru, i32 %mask) {
; AVX512BW-LABEL: test_mask_subs_epu16_rmk_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: vpsubusw (%rdi), %zmm0, %zmm1 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_subs_epu16_rmk_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpsubusw (%eax), %zmm0, %zmm1 {%k1}
@@ -977,13 +977,13 @@ define <32 x i16> @test_mask_subs_epu16_rmk_512(<32 x i16> %a, <32 x i16>* %ptr_
define <32 x i16> @test_mask_subs_epu16_rmkz_512(<32 x i16> %a, <32 x i16>* %ptr_b, i32 %mask) {
; AVX512BW-LABEL: test_mask_subs_epu16_rmkz_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: vpsubusw (%rdi), %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_mask_subs_epu16_rmkz_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpsubusw (%eax), %zmm0, %zmm0 {%k1} {z}
@@ -999,7 +999,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.vpermt2var.hi.512(<32 x i16>, <32 x i16
define <32 x i16>@test_int_x86_avx512_mask_vpermt2var_hi_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_vpermt2var_hi_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm3
; AVX512BW-NEXT: vpermt2w %zmm2, %zmm0, %zmm3 {%k1}
@@ -1008,7 +1008,7 @@ define <32 x i16>@test_int_x86_avx512_mask_vpermt2var_hi_512(<32 x i16> %x0, <32
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_vpermt2var_hi_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vmovdqa64 %zmm1, %zmm3
; AVX512F-32-NEXT: vpermt2w %zmm2, %zmm0, %zmm3 {%k1}
@@ -1025,7 +1025,7 @@ declare <32 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.512(<32 x i16>, <32 x i1
define <32 x i16>@test_int_x86_avx512_maskz_vpermt2var_hi_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) {
; AVX512BW-LABEL: test_int_x86_avx512_maskz_vpermt2var_hi_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm3
; AVX512BW-NEXT: vpermt2w %zmm2, %zmm0, %zmm3 {%k1} {z}
@@ -1034,7 +1034,7 @@ define <32 x i16>@test_int_x86_avx512_maskz_vpermt2var_hi_512(<32 x i16> %x0, <3
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_maskz_vpermt2var_hi_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vmovdqa64 %zmm1, %zmm3
; AVX512F-32-NEXT: vpermt2w %zmm2, %zmm0, %zmm3 {%k1} {z}
@@ -1051,7 +1051,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.512(<32 x i16>, <32 x i16
define <32 x i16>@test_int_x86_avx512_mask_vpermi2var_hi_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_vpermi2var_hi_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm3
; AVX512BW-NEXT: vpermi2w %zmm2, %zmm0, %zmm3 {%k1}
@@ -1060,7 +1060,7 @@ define <32 x i16>@test_int_x86_avx512_mask_vpermi2var_hi_512(<32 x i16> %x0, <32
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_vpermi2var_hi_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vmovdqa64 %zmm1, %zmm3
; AVX512F-32-NEXT: vpermi2w %zmm2, %zmm0, %zmm3 {%k1}
@@ -1077,12 +1077,12 @@ declare <64 x i8> @llvm.x86.avx512.pshuf.b.512(<64 x i8>, <64 x i8>)
define <64 x i8>@test_int_x86_avx512_pshuf_b_512(<64 x i8> %x0, <64 x i8> %x1) {
; AVX512BW-LABEL: test_int_x86_avx512_pshuf_b_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpshufb %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_pshuf_b_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpshufb %zmm1, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
%res = call <64 x i8> @llvm.x86.avx512.pshuf.b.512(<64 x i8> %x0, <64 x i8> %x1)
@@ -1091,14 +1091,14 @@ define <64 x i8>@test_int_x86_avx512_pshuf_b_512(<64 x i8> %x0, <64 x i8> %x1) {
define <64 x i8>@test_int_x86_avx512_pshuf_b_512_mask(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %mask) {
; AVX512BW-LABEL: test_int_x86_avx512_pshuf_b_512_mask:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovq %rdi, %k1
; AVX512BW-NEXT: vpshufb %zmm1, %zmm0, %zmm2 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_pshuf_b_512_mask:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpshufb %zmm1, %zmm0, %zmm2 {%k1}
; AVX512F-32-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -1111,13 +1111,13 @@ define <64 x i8>@test_int_x86_avx512_pshuf_b_512_mask(<64 x i8> %x0, <64 x i8> %
define <64 x i8>@test_int_x86_avx512_pshuf_b_512_maskz(<64 x i8> %x0, <64 x i8> %x1, i64 %mask) {
; AVX512BW-LABEL: test_int_x86_avx512_pshuf_b_512_maskz:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovq %rdi, %k1
; AVX512BW-NEXT: vpshufb %zmm1, %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_pshuf_b_512_maskz:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpshufb %zmm1, %zmm0, %zmm0 {%k1} {z}
; AVX512F-32-NEXT: retl
@@ -1131,7 +1131,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.pmulhu.w.512(<32 x i16>, <32 x i16>, <3
define <32 x i16>@test_int_x86_avx512_mask_pmulhu_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_pmulhu_w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpmulhuw %zmm1, %zmm0, %zmm2 {%k1}
; AVX512BW-NEXT: vpmulhuw %zmm1, %zmm0, %zmm0
@@ -1139,7 +1139,7 @@ define <32 x i16>@test_int_x86_avx512_mask_pmulhu_w_512(<32 x i16> %x0, <32 x i1
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_pmulhu_w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpmulhuw %zmm1, %zmm0, %zmm2 {%k1}
; AVX512F-32-NEXT: vpmulhuw %zmm1, %zmm0, %zmm0
@@ -1155,7 +1155,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.pmulh.w.512(<32 x i16>, <32 x i16>, <32
define <32 x i16>@test_int_x86_avx512_mask_pmulh_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_pmulh_w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpmulhw %zmm1, %zmm0, %zmm2 {%k1}
; AVX512BW-NEXT: vpmulhw %zmm1, %zmm0, %zmm0
@@ -1163,7 +1163,7 @@ define <32 x i16>@test_int_x86_avx512_mask_pmulh_w_512(<32 x i16> %x0, <32 x i16
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_pmulh_w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpmulhw %zmm1, %zmm0, %zmm2 {%k1}
; AVX512F-32-NEXT: vpmulhw %zmm1, %zmm0, %zmm0
@@ -1179,7 +1179,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.pmul.hr.sw.512(<32 x i16>, <32 x i16>,
define <32 x i16>@test_int_x86_avx512_mask_pmulhr_sw_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_pmulhr_sw_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpmulhrsw %zmm1, %zmm0, %zmm2 {%k1}
; AVX512BW-NEXT: vpmulhrsw %zmm1, %zmm0, %zmm0
@@ -1187,7 +1187,7 @@ define <32 x i16>@test_int_x86_avx512_mask_pmulhr_sw_512(<32 x i16> %x0, <32 x i
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_pmulhr_sw_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpmulhrsw %zmm1, %zmm0, %zmm2 {%k1}
; AVX512F-32-NEXT: vpmulhrsw %zmm1, %zmm0, %zmm0
@@ -1203,7 +1203,7 @@ declare <32 x i8> @llvm.x86.avx512.mask.pmov.wb.512(<32 x i16>, <32 x i8>, i32)
define <32 x i8>@test_int_x86_avx512_mask_pmov_wb_512(<32 x i16> %x0, <32 x i8> %x1, i32 %x2) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_pmov_wb_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm1 {%k1}
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm2 {%k1} {z}
@@ -1213,7 +1213,7 @@ define <32 x i8>@test_int_x86_avx512_mask_pmov_wb_512(<32 x i16> %x0, <32 x i8>
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_pmov_wb_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpmovwb %zmm0, %ymm1 {%k1}
; AVX512F-32-NEXT: vpmovwb %zmm0, %ymm2 {%k1} {z}
@@ -1233,14 +1233,14 @@ declare void @llvm.x86.avx512.mask.pmov.wb.mem.512(i8* %ptr, <32 x i16>, i32)
define void @test_int_x86_avx512_mask_pmov_wb_mem_512(i8* %ptr, <32 x i16> %x1, i32 %x2) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_pmov_wb_mem_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: vpmovwb %zmm0, (%rdi)
; AVX512BW-NEXT: vpmovwb %zmm0, (%rdi) {%k1}
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_pmov_wb_mem_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: vpmovwb %zmm0, (%eax)
@@ -1255,7 +1255,7 @@ declare <32 x i8> @llvm.x86.avx512.mask.pmovs.wb.512(<32 x i16>, <32 x i8>, i32)
define <32 x i8>@test_int_x86_avx512_mask_pmovs_wb_512(<32 x i16> %x0, <32 x i8> %x1, i32 %x2) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_pmovs_wb_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpmovswb %zmm0, %ymm1 {%k1}
; AVX512BW-NEXT: vpmovswb %zmm0, %ymm2 {%k1} {z}
@@ -1265,7 +1265,7 @@ define <32 x i8>@test_int_x86_avx512_mask_pmovs_wb_512(<32 x i16> %x0, <32 x i8>
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_pmovs_wb_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpmovswb %zmm0, %ymm1 {%k1}
; AVX512F-32-NEXT: vpmovswb %zmm0, %ymm2 {%k1} {z}
@@ -1285,14 +1285,14 @@ declare void @llvm.x86.avx512.mask.pmovs.wb.mem.512(i8* %ptr, <32 x i16>, i32)
define void @test_int_x86_avx512_mask_pmovs_wb_mem_512(i8* %ptr, <32 x i16> %x1, i32 %x2) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_pmovs_wb_mem_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: vpmovswb %zmm0, (%rdi)
; AVX512BW-NEXT: vpmovswb %zmm0, (%rdi) {%k1}
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_pmovs_wb_mem_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: vpmovswb %zmm0, (%eax)
@@ -1307,7 +1307,7 @@ declare <32 x i8> @llvm.x86.avx512.mask.pmovus.wb.512(<32 x i16>, <32 x i8>, i32
define <32 x i8>@test_int_x86_avx512_mask_pmovus_wb_512(<32 x i16> %x0, <32 x i8> %x1, i32 %x2) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_pmovus_wb_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpmovuswb %zmm0, %ymm1 {%k1}
; AVX512BW-NEXT: vpmovuswb %zmm0, %ymm2 {%k1} {z}
@@ -1317,7 +1317,7 @@ define <32 x i8>@test_int_x86_avx512_mask_pmovus_wb_512(<32 x i16> %x0, <32 x i8
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_pmovus_wb_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpmovuswb %zmm0, %ymm1 {%k1}
; AVX512F-32-NEXT: vpmovuswb %zmm0, %ymm2 {%k1} {z}
@@ -1337,14 +1337,14 @@ declare void @llvm.x86.avx512.mask.pmovus.wb.mem.512(i8* %ptr, <32 x i16>, i32)
define void @test_int_x86_avx512_mask_pmovus_wb_mem_512(i8* %ptr, <32 x i16> %x1, i32 %x2) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_pmovus_wb_mem_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: vpmovuswb %zmm0, (%rdi)
; AVX512BW-NEXT: vpmovuswb %zmm0, (%rdi) {%k1}
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_pmovus_wb_mem_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: vpmovuswb %zmm0, (%eax)
@@ -1359,7 +1359,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.pmaddubs.w.512(<64 x i8>, <64 x i8>, <3
define <32 x i16>@test_int_x86_avx512_mask_pmaddubs_w_512(<64 x i8> %x0, <64 x i8> %x1, <32 x i16> %x2, i32 %x3) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_pmaddubs_w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpmaddubsw %zmm1, %zmm0, %zmm2 {%k1}
; AVX512BW-NEXT: vpmaddubsw %zmm1, %zmm0, %zmm0
@@ -1367,7 +1367,7 @@ define <32 x i16>@test_int_x86_avx512_mask_pmaddubs_w_512(<64 x i8> %x0, <64 x i
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_pmaddubs_w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpmaddubsw %zmm1, %zmm0, %zmm2 {%k1}
; AVX512F-32-NEXT: vpmaddubsw %zmm1, %zmm0, %zmm0
@@ -1383,7 +1383,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.pmaddw.d.512(<32 x i16>, <32 x i16>, <1
define <16 x i32>@test_int_x86_avx512_mask_pmaddw_d_512(<32 x i16> %x0, <32 x i16> %x1, <16 x i32> %x2, i16 %x3) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_pmaddw_d_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpmaddwd %zmm1, %zmm0, %zmm2 {%k1}
; AVX512BW-NEXT: vpmaddwd %zmm1, %zmm0, %zmm0
@@ -1391,7 +1391,7 @@ define <16 x i32>@test_int_x86_avx512_mask_pmaddw_d_512(<32 x i16> %x0, <32 x i1
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_pmaddw_d_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpmaddwd %zmm1, %zmm0, %zmm2 {%k1}
; AVX512F-32-NEXT: vpmaddwd %zmm1, %zmm0, %zmm0
@@ -1407,7 +1407,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.dbpsadbw.512(<64 x i8>, <64 x i8>, i32,
define <32 x i16>@test_int_x86_avx512_mask_dbpsadbw_512(<64 x i8> %x0, <64 x i8> %x1, <32 x i16> %x3, i32 %x4) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_dbpsadbw_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vdbpsadbw $2, %zmm1, %zmm0, %zmm2 {%k1}
; AVX512BW-NEXT: vdbpsadbw $2, %zmm1, %zmm0, %zmm3 {%k1} {z}
@@ -1417,7 +1417,7 @@ define <32 x i16>@test_int_x86_avx512_mask_dbpsadbw_512(<64 x i8> %x0, <64 x i8>
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_dbpsadbw_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vdbpsadbw $2, %zmm1, %zmm0, %zmm2 {%k1}
; AVX512F-32-NEXT: vdbpsadbw $2, %zmm1, %zmm0, %zmm3 {%k1} {z}
@@ -1437,14 +1437,14 @@ declare <8 x i64> @llvm.x86.avx512.psad.bw.512(<64 x i8>, <64 x i8>)
define <8 x i64>@test_int_x86_avx512_mask_psadb_w_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2){
; AVX512BW-LABEL: test_int_x86_avx512_mask_psadb_w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpsadbw %zmm1, %zmm0, %zmm1
; AVX512BW-NEXT: vpsadbw %zmm2, %zmm0, %zmm0
; AVX512BW-NEXT: vpaddq %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_psadb_w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpsadbw %zmm1, %zmm0, %zmm1
; AVX512F-32-NEXT: vpsadbw %zmm2, %zmm0, %zmm0
; AVX512F-32-NEXT: vpaddq %zmm0, %zmm1, %zmm0
@@ -1459,7 +1459,7 @@ declare i32 @llvm.x86.avx512.kunpck.wd(i32, i32)
define i32@test_int_x86_avx512_kunpck_wd(i32 %x0, i32 %x1) {
; AVX512BW-LABEL: test_int_x86_avx512_kunpck_wd:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k0
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: kunpckwd %k1, %k0, %k0
@@ -1467,7 +1467,7 @@ define i32@test_int_x86_avx512_kunpck_wd(i32 %x0, i32 %x1) {
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_kunpck_wd:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovw {{[0-9]+}}(%esp), %k0
; AVX512F-32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: kunpckwd %k0, %k1, %k0
@@ -1481,7 +1481,7 @@ declare i64 @llvm.x86.avx512.kunpck.dq(i64, i64)
define i64@test_int_x86_avx512_kunpck_qd(i64 %x0, i64 %x1) {
; AVX512BW-LABEL: test_int_x86_avx512_kunpck_qd:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovq %rdi, %k0
; AVX512BW-NEXT: kmovq %rsi, %k1
; AVX512BW-NEXT: kunpckdq %k1, %k0, %k0
@@ -1489,7 +1489,7 @@ define i64@test_int_x86_avx512_kunpck_qd(i64 %x0, i64 %x1) {
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_kunpck_qd:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: subl $12, %esp
; AVX512F-32-NEXT: .cfi_def_cfa_offset 16
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k0
@@ -1508,13 +1508,13 @@ declare i64 @llvm.x86.avx512.cvtb2mask.512(<64 x i8>)
define i64@test_int_x86_avx512_cvtb2mask_512(<64 x i8> %x0) {
; AVX512BW-LABEL: test_int_x86_avx512_cvtb2mask_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpmovb2m %zmm0, %k0
; AVX512BW-NEXT: kmovq %k0, %rax
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_cvtb2mask_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: subl $12, %esp
; AVX512F-32-NEXT: .cfi_def_cfa_offset 16
; AVX512F-32-NEXT: vpmovb2m %zmm0, %k0
@@ -1531,13 +1531,13 @@ declare i32 @llvm.x86.avx512.cvtw2mask.512(<32 x i16>)
define i32@test_int_x86_avx512_cvtw2mask_512(<32 x i16> %x0) {
; AVX512BW-LABEL: test_int_x86_avx512_cvtw2mask_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpmovw2m %zmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_cvtw2mask_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpmovw2m %zmm0, %k0
; AVX512F-32-NEXT: kmovd %k0, %eax
; AVX512F-32-NEXT: retl
@@ -1549,7 +1549,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.psrlv32hi(<32 x i16>, <32 x i16>, <32 x
define <32 x i16>@test_int_x86_avx512_mask_psrlv32hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_psrlv32hi:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm3
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm2 {%k1}
@@ -1559,7 +1559,7 @@ define <32 x i16>@test_int_x86_avx512_mask_psrlv32hi(<32 x i16> %x0, <32 x i16>
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_psrlv32hi:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpsrlvw %zmm1, %zmm0, %zmm3
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpsrlvw %zmm1, %zmm0, %zmm2 {%k1}
@@ -1579,7 +1579,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.psrav32.hi(<32 x i16>, <32 x i16>, <32
define <32 x i16>@test_int_x86_avx512_mask_psrav32_hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_psrav32_hi:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm3
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm2 {%k1}
@@ -1589,7 +1589,7 @@ define <32 x i16>@test_int_x86_avx512_mask_psrav32_hi(<32 x i16> %x0, <32 x i16>
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_psrav32_hi:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpsravw %zmm1, %zmm0, %zmm3
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpsravw %zmm1, %zmm0, %zmm2 {%k1}
@@ -1607,13 +1607,13 @@ define <32 x i16>@test_int_x86_avx512_mask_psrav32_hi(<32 x i16> %x0, <32 x i16>
define <32 x i16>@test_int_x86_avx512_mask_psrav32_hi_const(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_psrav32_hi_const:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm0 = [2,9,65524,23,65510,37,65496,51,2,9,65524,23,65510,37,65496,51,2,9,65524,23,65510,37,65496,51,2,9,65524,23,65510,37,65496,51]
; AVX512BW-NEXT: vpsravw {{.*}}(%rip), %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_psrav32_hi_const:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovdqa64 {{.*#+}} zmm0 = [2,9,65524,23,65510,37,65496,51,2,9,65524,23,65510,37,65496,51,2,9,65524,23,65510,37,65496,51,2,9,65524,23,65510,37,65496,51]
; AVX512F-32-NEXT: vpsravw {{\.LCPI.*}}, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
@@ -1627,7 +1627,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.psllv32hi(<32 x i16>, <32 x i16>, <32 x
define <32 x i16>@test_int_x86_avx512_mask_psllv32hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_psllv32hi:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm3
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm2 {%k1}
@@ -1637,7 +1637,7 @@ define <32 x i16>@test_int_x86_avx512_mask_psllv32hi(<32 x i16> %x0, <32 x i16>
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_psllv32hi:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpsllvw %zmm1, %zmm0, %zmm3
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpsllvw %zmm1, %zmm0, %zmm2 {%k1}
@@ -1657,7 +1657,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16>, <32 x i16>,
define <32 x i16>@test_int_x86_avx512_mask_permvar_hi_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_permvar_hi_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpermw %zmm0, %zmm1, %zmm2 {%k1}
; AVX512BW-NEXT: vpermw %zmm0, %zmm1, %zmm3 {%k1} {z}
@@ -1667,7 +1667,7 @@ define <32 x i16>@test_int_x86_avx512_mask_permvar_hi_512(<32 x i16> %x0, <32 x
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_permvar_hi_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpermw %zmm0, %zmm1, %zmm2 {%k1}
; AVX512F-32-NEXT: vpermw %zmm0, %zmm1, %zmm3 {%k1} {z}
@@ -1685,12 +1685,12 @@ define <32 x i16>@test_int_x86_avx512_mask_permvar_hi_512(<32 x i16> %x0, <32 x
define <32 x i16> @test_x86_avx512_psll_w_512(<32 x i16> %a0, <8 x i16> %a1) {
; AVX512BW-LABEL: test_x86_avx512_psll_w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpsllw %xmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_x86_avx512_psll_w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpsllw %xmm1, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
%res = call <32 x i16> @llvm.x86.avx512.psll.w.512(<32 x i16> %a0, <8 x i16> %a1) ; <<32 x i16>> [#uses=1]
@@ -1698,14 +1698,14 @@ define <32 x i16> @test_x86_avx512_psll_w_512(<32 x i16> %a0, <8 x i16> %a1) {
}
define <32 x i16> @test_x86_avx512_mask_psll_w_512(<32 x i16> %a0, <8 x i16> %a1, <32 x i16> %passthru, i32 %mask) {
; AVX512BW-LABEL: test_x86_avx512_mask_psll_w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpsllw %xmm1, %zmm0, %zmm2 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_x86_avx512_mask_psll_w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpsllw %xmm1, %zmm0, %zmm2 {%k1}
; AVX512F-32-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -1717,13 +1717,13 @@ define <32 x i16> @test_x86_avx512_mask_psll_w_512(<32 x i16> %a0, <8 x i16> %a1
}
define <32 x i16> @test_x86_avx512_maskz_psll_w_512(<32 x i16> %a0, <8 x i16> %a1, i32 %mask) {
; AVX512BW-LABEL: test_x86_avx512_maskz_psll_w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpsllw %xmm1, %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_x86_avx512_maskz_psll_w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpsllw %xmm1, %zmm0, %zmm0 {%k1} {z}
; AVX512F-32-NEXT: retl
@@ -1737,12 +1737,12 @@ declare <32 x i16> @llvm.x86.avx512.psll.w.512(<32 x i16>, <8 x i16>) nounwind r
define <32 x i16> @test_x86_avx512_pslli_w_512(<32 x i16> %a0) {
; AVX512BW-LABEL: test_x86_avx512_pslli_w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpsllw $7, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_x86_avx512_pslli_w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpsllw $7, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
%res = call <32 x i16> @llvm.x86.avx512.pslli.w.512(<32 x i16> %a0, i32 7) ; <<32 x i16>> [#uses=1]
@@ -1750,14 +1750,14 @@ define <32 x i16> @test_x86_avx512_pslli_w_512(<32 x i16> %a0) {
}
define <32 x i16> @test_x86_avx512_mask_pslli_w_512(<32 x i16> %a0, <32 x i16> %passthru, i32 %mask) {
; AVX512BW-LABEL: test_x86_avx512_mask_pslli_w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpsllw $7, %zmm0, %zmm1 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_x86_avx512_mask_pslli_w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpsllw $7, %zmm0, %zmm1 {%k1}
; AVX512F-32-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -1769,13 +1769,13 @@ define <32 x i16> @test_x86_avx512_mask_pslli_w_512(<32 x i16> %a0, <32 x i16> %
}
define <32 x i16> @test_x86_avx512_maskz_pslli_w_512(<32 x i16> %a0, i32 %mask) {
; AVX512BW-LABEL: test_x86_avx512_maskz_pslli_w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpsllw $7, %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_x86_avx512_maskz_pslli_w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpsllw $7, %zmm0, %zmm0 {%k1} {z}
; AVX512F-32-NEXT: retl
@@ -1789,12 +1789,12 @@ declare <32 x i16> @llvm.x86.avx512.pslli.w.512(<32 x i16>, i32) nounwind readno
define <32 x i16> @test_x86_avx512_psra_w_512(<32 x i16> %a0, <8 x i16> %a1) {
; AVX512BW-LABEL: test_x86_avx512_psra_w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpsraw %xmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_x86_avx512_psra_w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpsraw %xmm1, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
%res = call <32 x i16> @llvm.x86.avx512.psra.w.512(<32 x i16> %a0, <8 x i16> %a1) ; <<32 x i16>> [#uses=1]
@@ -1802,14 +1802,14 @@ define <32 x i16> @test_x86_avx512_psra_w_512(<32 x i16> %a0, <8 x i16> %a1) {
}
define <32 x i16> @test_x86_avx512_mask_psra_w_512(<32 x i16> %a0, <8 x i16> %a1, <32 x i16> %passthru, i32 %mask) {
; AVX512BW-LABEL: test_x86_avx512_mask_psra_w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpsraw %xmm1, %zmm0, %zmm2 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_x86_avx512_mask_psra_w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpsraw %xmm1, %zmm0, %zmm2 {%k1}
; AVX512F-32-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -1821,13 +1821,13 @@ define <32 x i16> @test_x86_avx512_mask_psra_w_512(<32 x i16> %a0, <8 x i16> %a1
}
define <32 x i16> @test_x86_avx512_maskz_psra_w_512(<32 x i16> %a0, <8 x i16> %a1, i32 %mask) {
; AVX512BW-LABEL: test_x86_avx512_maskz_psra_w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpsraw %xmm1, %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_x86_avx512_maskz_psra_w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpsraw %xmm1, %zmm0, %zmm0 {%k1} {z}
; AVX512F-32-NEXT: retl
@@ -1841,12 +1841,12 @@ declare <32 x i16> @llvm.x86.avx512.psra.w.512(<32 x i16>, <8 x i16>) nounwind r
define <32 x i16> @test_x86_avx512_psrai_w_512(<32 x i16> %a0) {
; AVX512BW-LABEL: test_x86_avx512_psrai_w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpsraw $7, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_x86_avx512_psrai_w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpsraw $7, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
%res = call <32 x i16> @llvm.x86.avx512.psrai.w.512(<32 x i16> %a0, i32 7) ; <<32 x i16>> [#uses=1]
@@ -1854,14 +1854,14 @@ define <32 x i16> @test_x86_avx512_psrai_w_512(<32 x i16> %a0) {
}
define <32 x i16> @test_x86_avx512_mask_psrai_w_512(<32 x i16> %a0, <32 x i16> %passthru, i32 %mask) {
; AVX512BW-LABEL: test_x86_avx512_mask_psrai_w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpsraw $7, %zmm0, %zmm1 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_x86_avx512_mask_psrai_w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpsraw $7, %zmm0, %zmm1 {%k1}
; AVX512F-32-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -1873,13 +1873,13 @@ define <32 x i16> @test_x86_avx512_mask_psrai_w_512(<32 x i16> %a0, <32 x i16> %
}
define <32 x i16> @test_x86_avx512_maskz_psrai_w_512(<32 x i16> %a0, <32 x i16> %passthru, i32 %mask) {
; AVX512BW-LABEL: test_x86_avx512_maskz_psrai_w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpsraw $7, %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_x86_avx512_maskz_psrai_w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpsraw $7, %zmm0, %zmm0 {%k1} {z}
; AVX512F-32-NEXT: retl
@@ -1893,12 +1893,12 @@ declare <32 x i16> @llvm.x86.avx512.psrai.w.512(<32 x i16>, i32) nounwind readno
define <32 x i16> @test_x86_avx512_psrl_w_512(<32 x i16> %a0, <8 x i16> %a1) {
; AVX512BW-LABEL: test_x86_avx512_psrl_w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpsrlw %xmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_x86_avx512_psrl_w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpsrlw %xmm1, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
%res = call <32 x i16> @llvm.x86.avx512.psrl.w.512(<32 x i16> %a0, <8 x i16> %a1) ; <<32 x i16>> [#uses=1]
@@ -1906,14 +1906,14 @@ define <32 x i16> @test_x86_avx512_psrl_w_512(<32 x i16> %a0, <8 x i16> %a1) {
}
define <32 x i16> @test_x86_avx512_mask_psrl_w_512(<32 x i16> %a0, <8 x i16> %a1, <32 x i16> %passthru, i32 %mask) {
; AVX512BW-LABEL: test_x86_avx512_mask_psrl_w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpsrlw %xmm1, %zmm0, %zmm2 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_x86_avx512_mask_psrl_w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpsrlw %xmm1, %zmm0, %zmm2 {%k1}
; AVX512F-32-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -1925,13 +1925,13 @@ define <32 x i16> @test_x86_avx512_mask_psrl_w_512(<32 x i16> %a0, <8 x i16> %a1
}
define <32 x i16> @test_x86_avx512_maskz_psrl_w_512(<32 x i16> %a0, <8 x i16> %a1, i32 %mask) {
; AVX512BW-LABEL: test_x86_avx512_maskz_psrl_w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpsrlw %xmm1, %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_x86_avx512_maskz_psrl_w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpsrlw %xmm1, %zmm0, %zmm0 {%k1} {z}
; AVX512F-32-NEXT: retl
@@ -1945,12 +1945,12 @@ declare <32 x i16> @llvm.x86.avx512.psrl.w.512(<32 x i16>, <8 x i16>) nounwind r
define <32 x i16> @test_x86_avx512_psrli_w_512(<32 x i16> %a0) {
; AVX512BW-LABEL: test_x86_avx512_psrli_w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpsrlw $7, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_x86_avx512_psrli_w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpsrlw $7, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
%res = call <32 x i16> @llvm.x86.avx512.psrli.w.512(<32 x i16> %a0, i32 7) ; <<32 x i16>> [#uses=1]
@@ -1958,14 +1958,14 @@ define <32 x i16> @test_x86_avx512_psrli_w_512(<32 x i16> %a0) {
}
define <32 x i16> @test_x86_avx512_mask_psrli_w_512(<32 x i16> %a0, <32 x i16> %passthru, i32 %mask) {
; AVX512BW-LABEL: test_x86_avx512_mask_psrli_w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpsrlw $7, %zmm0, %zmm1 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_x86_avx512_mask_psrli_w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpsrlw $7, %zmm0, %zmm1 {%k1}
; AVX512F-32-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -1977,13 +1977,13 @@ define <32 x i16> @test_x86_avx512_mask_psrli_w_512(<32 x i16> %a0, <32 x i16> %
}
define <32 x i16> @test_x86_avx512_maskz_psrli_w_512(<32 x i16> %a0, i32 %mask) {
; AVX512BW-LABEL: test_x86_avx512_maskz_psrli_w_512:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpsrlw $7, %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_x86_avx512_maskz_psrli_w_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpsrlw $7, %zmm0, %zmm0 {%k1} {z}
; AVX512F-32-NEXT: retl
diff --git a/test/CodeGen/X86/avx512bw-mask-op.ll b/test/CodeGen/X86/avx512bw-mask-op.ll
index e000ef4068f..6d5ea0d8599 100644
--- a/test/CodeGen/X86/avx512bw-mask-op.ll
+++ b/test/CodeGen/X86/avx512bw-mask-op.ll
@@ -3,7 +3,7 @@
define i32 @mask32(i32 %x) {
; CHECK-LABEL: mask32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k0
; CHECK-NEXT: knotd %k0, %k0
; CHECK-NEXT: kmovd %k0, %eax
@@ -19,7 +19,7 @@ define i32 @mask32(i32 %x) {
define i64 @mask64(i64 %x) {
; CHECK-LABEL: mask64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovq %rdi, %k0
; CHECK-NEXT: knotq %k0, %k0
; CHECK-NEXT: kmovq %k0, %rax
@@ -39,7 +39,7 @@ define i64 @mask64(i64 %x) {
define void @mask32_mem(i32* %ptr) {
; CHECK-LABEL: mask32_mem:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd (%rdi), %k0
; CHECK-NEXT: knotd %k0, %k0
; CHECK-NEXT: kmovd %k0, (%rdi)
@@ -57,7 +57,7 @@ define void @mask32_mem(i32* %ptr) {
define void @mask64_mem(i64* %ptr) {
; CHECK-LABEL: mask64_mem:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovq (%rdi), %k0
; CHECK-NEXT: knotq %k0, %k0
; CHECK-NEXT: kmovq %k0, (%rdi)
@@ -79,7 +79,7 @@ define void @mask64_mem(i64* %ptr) {
define i32 @mand32(i32 %x, i32 %y) {
; CHECK-LABEL: mand32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: andl %esi, %eax
; CHECK-NEXT: xorl %esi, %edi
@@ -97,7 +97,7 @@ define i32 @mand32(i32 %x, i32 %y) {
define i32 @mand32_mem(<32 x i1>* %x, <32 x i1>* %y) {
; CHECK-LABEL: mand32_mem:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd (%rdi), %k0
; CHECK-NEXT: kmovd (%rsi), %k1
; CHECK-NEXT: kandd %k1, %k0, %k2
@@ -116,7 +116,7 @@ define i32 @mand32_mem(<32 x i1>* %x, <32 x i1>* %y) {
define i64 @mand64(i64 %x, i64 %y) {
; CHECK-LABEL: mand64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: andq %rsi, %rax
; CHECK-NEXT: xorq %rsi, %rdi
@@ -134,7 +134,7 @@ define i64 @mand64(i64 %x, i64 %y) {
define i64 @mand64_mem(<64 x i1>* %x, <64 x i1>* %y) {
; CHECK-LABEL: mand64_mem:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovq (%rdi), %k0
; CHECK-NEXT: kmovq (%rsi), %k1
; CHECK-NEXT: kandq %k1, %k0, %k2
@@ -153,7 +153,7 @@ define i64 @mand64_mem(<64 x i1>* %x, <64 x i1>* %y) {
define i32 @test_v32i1_add(i32 %x, i32 %y) {
; CHECK-LABEL: test_v32i1_add:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k0
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: kxord %k1, %k0, %k0
@@ -168,7 +168,7 @@ define i32 @test_v32i1_add(i32 %x, i32 %y) {
define i32 @test_v32i1_sub(i32 %x, i32 %y) {
; CHECK-LABEL: test_v32i1_sub:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k0
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: kxord %k1, %k0, %k0
@@ -183,7 +183,7 @@ define i32 @test_v32i1_sub(i32 %x, i32 %y) {
define i32 @test_v32i1_mul(i32 %x, i32 %y) {
; CHECK-LABEL: test_v32i1_mul:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k0
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: kandd %k1, %k0, %k0
@@ -198,7 +198,7 @@ define i32 @test_v32i1_mul(i32 %x, i32 %y) {
define i64 @test_v64i1_add(i64 %x, i64 %y) {
; CHECK-LABEL: test_v64i1_add:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovq %rdi, %k0
; CHECK-NEXT: kmovq %rsi, %k1
; CHECK-NEXT: kxorq %k1, %k0, %k0
@@ -213,7 +213,7 @@ define i64 @test_v64i1_add(i64 %x, i64 %y) {
define i64 @test_v64i1_sub(i64 %x, i64 %y) {
; CHECK-LABEL: test_v64i1_sub:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovq %rdi, %k0
; CHECK-NEXT: kmovq %rsi, %k1
; CHECK-NEXT: kxorq %k1, %k0, %k0
@@ -228,7 +228,7 @@ define i64 @test_v64i1_sub(i64 %x, i64 %y) {
define i64 @test_v64i1_mul(i64 %x, i64 %y) {
; CHECK-LABEL: test_v64i1_mul:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovq %rdi, %k0
; CHECK-NEXT: kmovq %rsi, %k1
; CHECK-NEXT: kandq %k1, %k0, %k0
diff --git a/test/CodeGen/X86/avx512bw-mov.ll b/test/CodeGen/X86/avx512bw-mov.ll
index ab099d911d3..82a3f6310f5 100644
--- a/test/CodeGen/X86/avx512bw-mov.ll
+++ b/test/CodeGen/X86/avx512bw-mov.ll
@@ -3,7 +3,7 @@
define <64 x i8> @test1(i8 * %addr) {
; CHECK-LABEL: test1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovups (%rdi), %zmm0
; CHECK-NEXT: retq
%vaddr = bitcast i8* %addr to <64 x i8>*
@@ -13,7 +13,7 @@ define <64 x i8> @test1(i8 * %addr) {
define void @test2(i8 * %addr, <64 x i8> %data) {
; CHECK-LABEL: test2:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovups %zmm0, (%rdi)
; CHECK-NEXT: retq
%vaddr = bitcast i8* %addr to <64 x i8>*
@@ -23,7 +23,7 @@ define void @test2(i8 * %addr, <64 x i8> %data) {
define <64 x i8> @test3(i8 * %addr, <64 x i8> %old, <64 x i8> %mask1) {
; CHECK-LABEL: test3:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpneqb %zmm2, %zmm1, %k1
; CHECK-NEXT: vmovdqu8 (%rdi), %zmm0 {%k1}
@@ -37,7 +37,7 @@ define <64 x i8> @test3(i8 * %addr, <64 x i8> %old, <64 x i8> %mask1) {
define <64 x i8> @test4(i8 * %addr, <64 x i8> %mask1) {
; CHECK-LABEL: test4:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpneqb %zmm1, %zmm0, %k1
; CHECK-NEXT: vmovdqu8 (%rdi), %zmm0 {%k1} {z}
@@ -51,7 +51,7 @@ define <64 x i8> @test4(i8 * %addr, <64 x i8> %mask1) {
define <32 x i16> @test5(i8 * %addr) {
; CHECK-LABEL: test5:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovups (%rdi), %zmm0
; CHECK-NEXT: retq
%vaddr = bitcast i8* %addr to <32 x i16>*
@@ -61,7 +61,7 @@ define <32 x i16> @test5(i8 * %addr) {
define void @test6(i8 * %addr, <32 x i16> %data) {
; CHECK-LABEL: test6:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovups %zmm0, (%rdi)
; CHECK-NEXT: retq
%vaddr = bitcast i8* %addr to <32 x i16>*
@@ -71,7 +71,7 @@ define void @test6(i8 * %addr, <32 x i16> %data) {
define <32 x i16> @test7(i8 * %addr, <32 x i16> %old, <32 x i16> %mask1) {
; CHECK-LABEL: test7:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpneqw %zmm2, %zmm1, %k1
; CHECK-NEXT: vmovdqu16 (%rdi), %zmm0 {%k1}
@@ -85,7 +85,7 @@ define <32 x i16> @test7(i8 * %addr, <32 x i16> %old, <32 x i16> %mask1) {
define <32 x i16> @test8(i8 * %addr, <32 x i16> %mask1) {
; CHECK-LABEL: test8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpcmpneqw %zmm1, %zmm0, %k1
; CHECK-NEXT: vmovdqu16 (%rdi), %zmm0 {%k1} {z}
@@ -99,7 +99,7 @@ define <32 x i16> @test8(i8 * %addr, <32 x i16> %mask1) {
define <16 x i8> @test_mask_load_16xi8(<16 x i1> %mask, <16 x i8>* %addr, <16 x i8> %val) {
; CHECK-LABEL: test_mask_load_16xi8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0
; CHECK-NEXT: vpmovb2m %zmm0, %k0
; CHECK-NEXT: kshiftlq $48, %k0, %k0
@@ -114,7 +114,7 @@ declare <16 x i8> @llvm.masked.load.v16i8(<16 x i8>*, i32, <16 x i1>, <16 x i8>)
define <32 x i8> @test_mask_load_32xi8(<32 x i1> %mask, <32 x i8>* %addr, <32 x i8> %val) {
; CHECK-LABEL: test_mask_load_32xi8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsllw $7, %ymm0, %ymm0
; CHECK-NEXT: vpmovb2m %zmm0, %k0
; CHECK-NEXT: kshiftlq $32, %k0, %k0
@@ -129,7 +129,7 @@ declare <32 x i8> @llvm.masked.load.v32i8(<32 x i8>*, i32, <32 x i1>, <32 x i8>)
define <8 x i16> @test_mask_load_8xi16(<8 x i1> %mask, <8 x i16>* %addr, <8 x i16> %val) {
; CHECK-LABEL: test_mask_load_8xi16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsllw $15, %xmm0, %xmm0
; CHECK-NEXT: vpmovw2m %zmm0, %k0
; CHECK-NEXT: kshiftld $24, %k0, %k0
@@ -144,7 +144,7 @@ declare <8 x i16> @llvm.masked.load.v8i16(<8 x i16>*, i32, <8 x i1>, <8 x i16>)
define <16 x i16> @test_mask_load_16xi16(<16 x i1> %mask, <16 x i16>* %addr, <16 x i16> %val) {
; CHECK-LABEL: test_mask_load_16xi16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0
; CHECK-NEXT: vpmovb2m %zmm0, %k0
; CHECK-NEXT: kshiftld $16, %k0, %k0
@@ -159,7 +159,7 @@ declare <16 x i16> @llvm.masked.load.v16i16(<16 x i16>*, i32, <16 x i1>, <16 x i
define void @test_mask_store_16xi8(<16 x i1> %mask, <16 x i8>* %addr, <16 x i8> %val) {
; CHECK-LABEL: test_mask_store_16xi8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: ## kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0
; CHECK-NEXT: vpmovb2m %zmm0, %k0
@@ -174,7 +174,7 @@ declare void @llvm.masked.store.v16i8(<16 x i8>, <16 x i8>*, i32, <16 x i1>)
define void @test_mask_store_32xi8(<32 x i1> %mask, <32 x i8>* %addr, <32 x i8> %val) {
; CHECK-LABEL: test_mask_store_32xi8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: ## kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; CHECK-NEXT: vpsllw $7, %ymm0, %ymm0
; CHECK-NEXT: vpmovb2m %zmm0, %k0
@@ -189,7 +189,7 @@ declare void @llvm.masked.store.v32i8(<32 x i8>, <32 x i8>*, i32, <32 x i1>)
define void @test_mask_store_8xi16(<8 x i1> %mask, <8 x i16>* %addr, <8 x i16> %val) {
; CHECK-LABEL: test_mask_store_8xi16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: ## kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; CHECK-NEXT: vpsllw $15, %xmm0, %xmm0
; CHECK-NEXT: vpmovw2m %zmm0, %k0
@@ -204,7 +204,7 @@ declare void @llvm.masked.store.v8i16(<8 x i16>, <8 x i16>*, i32, <8 x i1>)
define void @test_mask_store_16xi16(<16 x i1> %mask, <16 x i16>* %addr, <16 x i16> %val) {
; CHECK-LABEL: test_mask_store_16xi16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: ## kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0
; CHECK-NEXT: vpmovb2m %zmm0, %k0
diff --git a/test/CodeGen/X86/avx512bw-vec-cmp.ll b/test/CodeGen/X86/avx512bw-vec-cmp.ll
index 016837e6130..3d400e1b472 100644
--- a/test/CodeGen/X86/avx512bw-vec-cmp.ll
+++ b/test/CodeGen/X86/avx512bw-vec-cmp.ll
@@ -3,7 +3,7 @@
define <64 x i8> @test1(<64 x i8> %x, <64 x i8> %y) nounwind {
; CHECK-LABEL: test1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqb %zmm1, %zmm0, %k1
; CHECK-NEXT: vpblendmb %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -14,7 +14,7 @@ define <64 x i8> @test1(<64 x i8> %x, <64 x i8> %y) nounwind {
define <64 x i8> @test2(<64 x i8> %x, <64 x i8> %y, <64 x i8> %x1) nounwind {
; CHECK-LABEL: test2:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtb %zmm1, %zmm0, %k1
; CHECK-NEXT: vpblendmb %zmm2, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -25,7 +25,7 @@ define <64 x i8> @test2(<64 x i8> %x, <64 x i8> %y, <64 x i8> %x1) nounwind {
define <32 x i16> @test3(<32 x i16> %x, <32 x i16> %y, <32 x i16> %x1) nounwind {
; CHECK-LABEL: test3:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmplew %zmm0, %zmm1, %k1
; CHECK-NEXT: vpblendmw %zmm2, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -36,7 +36,7 @@ define <32 x i16> @test3(<32 x i16> %x, <32 x i16> %y, <32 x i16> %x1) nounwind
define <64 x i8> @test4(<64 x i8> %x, <64 x i8> %y, <64 x i8> %x1) nounwind {
; CHECK-LABEL: test4:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpnleub %zmm1, %zmm0, %k1
; CHECK-NEXT: vpblendmb %zmm2, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -47,7 +47,7 @@ define <64 x i8> @test4(<64 x i8> %x, <64 x i8> %y, <64 x i8> %x1) nounwind {
define <32 x i16> @test5(<32 x i16> %x, <32 x i16> %x1, <32 x i16>* %yp) nounwind {
; CHECK-LABEL: test5:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqw (%rdi), %zmm0, %k1
; CHECK-NEXT: vpblendmw %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -59,7 +59,7 @@ define <32 x i16> @test5(<32 x i16> %x, <32 x i16> %x1, <32 x i16>* %yp) nounwin
define <32 x i16> @test6(<32 x i16> %x, <32 x i16> %x1, <32 x i16>* %y.ptr) nounwind {
; CHECK-LABEL: test6:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtw (%rdi), %zmm0, %k1
; CHECK-NEXT: vpblendmw %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -71,7 +71,7 @@ define <32 x i16> @test6(<32 x i16> %x, <32 x i16> %x1, <32 x i16>* %y.ptr) noun
define <32 x i16> @test7(<32 x i16> %x, <32 x i16> %x1, <32 x i16>* %y.ptr) nounwind {
; CHECK-LABEL: test7:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmplew (%rdi), %zmm0, %k1
; CHECK-NEXT: vpblendmw %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -83,7 +83,7 @@ define <32 x i16> @test7(<32 x i16> %x, <32 x i16> %x1, <32 x i16>* %y.ptr) noun
define <32 x i16> @test8(<32 x i16> %x, <32 x i16> %x1, <32 x i16>* %y.ptr) nounwind {
; CHECK-LABEL: test8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpleuw (%rdi), %zmm0, %k1
; CHECK-NEXT: vpblendmw %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -95,7 +95,7 @@ define <32 x i16> @test8(<32 x i16> %x, <32 x i16> %x1, <32 x i16>* %y.ptr) noun
define <32 x i16> @test9(<32 x i16> %x, <32 x i16> %y, <32 x i16> %x1, <32 x i16> %y1) nounwind {
; CHECK-LABEL: test9:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqw %zmm1, %zmm0, %k1
; CHECK-NEXT: vpcmpeqw %zmm3, %zmm2, %k1 {%k1}
; CHECK-NEXT: vpblendmw %zmm0, %zmm1, %zmm0 {%k1}
@@ -109,7 +109,7 @@ define <32 x i16> @test9(<32 x i16> %x, <32 x i16> %y, <32 x i16> %x1, <32 x i16
define <64 x i8> @test10(<64 x i8> %x, <64 x i8> %y, <64 x i8> %x1, <64 x i8> %y1) nounwind {
; CHECK-LABEL: test10:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpleb %zmm1, %zmm0, %k1
; CHECK-NEXT: vpcmpleb %zmm2, %zmm3, %k1 {%k1}
; CHECK-NEXT: vpblendmb %zmm0, %zmm2, %zmm0 {%k1}
@@ -123,7 +123,7 @@ define <64 x i8> @test10(<64 x i8> %x, <64 x i8> %y, <64 x i8> %x1, <64 x i8> %y
define <64 x i8> @test11(<64 x i8> %x, <64 x i8>* %y.ptr, <64 x i8> %x1, <64 x i8> %y1) nounwind {
; CHECK-LABEL: test11:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtb %zmm2, %zmm1, %k1
; CHECK-NEXT: vpcmpgtb (%rdi), %zmm0, %k1 {%k1}
; CHECK-NEXT: vpblendmb %zmm0, %zmm1, %zmm0 {%k1}
@@ -138,7 +138,7 @@ define <64 x i8> @test11(<64 x i8> %x, <64 x i8>* %y.ptr, <64 x i8> %x1, <64 x i
define <32 x i16> @test12(<32 x i16> %x, <32 x i16>* %y.ptr, <32 x i16> %x1, <32 x i16> %y1) nounwind {
; CHECK-LABEL: test12:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmplew %zmm1, %zmm2, %k1
; CHECK-NEXT: vpcmpleuw (%rdi), %zmm0, %k1 {%k1}
; CHECK-NEXT: vpblendmw %zmm0, %zmm1, %zmm0 {%k1}
diff --git a/test/CodeGen/X86/avx512bw-vec-test-testn.ll b/test/CodeGen/X86/avx512bw-vec-test-testn.ll
index 82d0b8846de..6ae2f093a2b 100644
--- a/test/CodeGen/X86/avx512bw-vec-test-testn.ll
+++ b/test/CodeGen/X86/avx512bw-vec-test-testn.ll
@@ -4,7 +4,7 @@
; Function Attrs: norecurse nounwind readnone
define zeroext i32 @TEST_mm512_test_epi16_mask(<8 x i64> %__A, <8 x i64> %__B) local_unnamed_addr #0 {
; CHECK-LABEL: TEST_mm512_test_epi16_mask:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestmw %zmm0, %zmm1, %k0
; CHECK-NEXT: kmovd %k0, %eax
; CHECK-NEXT: vzeroupper
@@ -21,7 +21,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define zeroext i64 @TEST_mm512_test_epi8_mask(<8 x i64> %__A, <8 x i64> %__B) local_unnamed_addr #0 {
; CHECK-LABEL: TEST_mm512_test_epi8_mask:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestmb %zmm0, %zmm1, %k0
; CHECK-NEXT: kmovq %k0, %rax
; CHECK-NEXT: vzeroupper
@@ -37,7 +37,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define zeroext i32 @TEST_mm512_mask_test_epi16_mask(i32 %__U, <8 x i64> %__A, <8 x i64> %__B) local_unnamed_addr #0 {
; CHECK-LABEL: TEST_mm512_mask_test_epi16_mask:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vptestmw %zmm0, %zmm1, %k0 {%k1}
; CHECK-NEXT: kmovd %k0, %eax
@@ -56,7 +56,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define zeroext i64 @TEST_mm512_mask_test_epi8_mask(i64 %__U, <8 x i64> %__A, <8 x i64> %__B) local_unnamed_addr #0 {
; CHECK-LABEL: TEST_mm512_mask_test_epi8_mask:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: kmovq %rdi, %k1
; CHECK-NEXT: vptestmb %zmm0, %zmm1, %k0 {%k1}
; CHECK-NEXT: kmovq %k0, %rax
@@ -75,7 +75,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define zeroext i32 @TEST_mm512_testn_epi16_mask(<8 x i64> %__A, <8 x i64> %__B) local_unnamed_addr #0 {
; CHECK-LABEL: TEST_mm512_testn_epi16_mask:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestnmw %zmm0, %zmm1, %k0
; CHECK-NEXT: kmovd %k0, %eax
; CHECK-NEXT: vzeroupper
@@ -92,7 +92,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define zeroext i64 @TEST_mm512_testn_epi8_mask(<8 x i64> %__A, <8 x i64> %__B) local_unnamed_addr #0 {
; CHECK-LABEL: TEST_mm512_testn_epi8_mask:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestnmb %zmm0, %zmm1, %k0
; CHECK-NEXT: kmovq %k0, %rax
; CHECK-NEXT: vzeroupper
@@ -108,7 +108,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define zeroext i32 @TEST_mm512_mask_testn_epi16_mask(i32 %__U, <8 x i64> %__A, <8 x i64> %__B) local_unnamed_addr #0 {
; CHECK-LABEL: TEST_mm512_mask_testn_epi16_mask:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vptestnmw %zmm0, %zmm1, %k0 {%k1}
; CHECK-NEXT: kmovd %k0, %eax
@@ -127,7 +127,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define zeroext i64 @TEST_mm512_mask_testn_epi8_mask(i64 %__U, <8 x i64> %__A, <8 x i64> %__B) local_unnamed_addr #0 {
; CHECK-LABEL: TEST_mm512_mask_testn_epi8_mask:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: kmovq %rdi, %k1
; CHECK-NEXT: vptestnmb %zmm0, %zmm1, %k0 {%k1}
; CHECK-NEXT: kmovq %k0, %rax
diff --git a/test/CodeGen/X86/avx512bwvl-intrinsics-fast-isel.ll b/test/CodeGen/X86/avx512bwvl-intrinsics-fast-isel.ll
index 92354be63df..aac83f47ae3 100644
--- a/test/CodeGen/X86/avx512bwvl-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/avx512bwvl-intrinsics-fast-isel.ll
@@ -6,14 +6,14 @@
define zeroext i16 @test_mm_test_epi8_mask(<2 x i64> %__A, <2 x i64> %__B) {
; X32-LABEL: test_mm_test_epi8_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vptestmb %xmm0, %xmm1, %k0
; X32-NEXT: kmovd %k0, %eax
; X32-NEXT: movzwl %ax, %eax
; X32-NEXT: retl
;
; X64-LABEL: test_mm_test_epi8_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vptestmb %xmm0, %xmm1, %k0
; X64-NEXT: kmovd %k0, %eax
; X64-NEXT: movzwl %ax, %eax
@@ -28,7 +28,7 @@ entry:
define zeroext i16 @test_mm_mask_test_epi8_mask(i16 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
; X32-LABEL: test_mm_mask_test_epi8_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vptestmb %xmm0, %xmm1, %k0 {%k1}
; X32-NEXT: kmovd %k0, %eax
@@ -36,7 +36,7 @@ define zeroext i16 @test_mm_mask_test_epi8_mask(i16 zeroext %__U, <2 x i64> %__A
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mask_test_epi8_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vptestmb %xmm0, %xmm1, %k0 {%k1}
; X64-NEXT: kmovd %k0, %eax
@@ -54,14 +54,14 @@ entry:
define i32 @test_mm256_test_epi8_mask(<4 x i64> %__A, <4 x i64> %__B) {
; X32-LABEL: test_mm256_test_epi8_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vptestmb %ymm0, %ymm1, %k0
; X32-NEXT: kmovd %k0, %eax
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_test_epi8_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vptestmb %ymm0, %ymm1, %k0
; X64-NEXT: kmovd %k0, %eax
; X64-NEXT: vzeroupper
@@ -76,7 +76,7 @@ entry:
define i32 @test_mm256_mask_test_epi8_mask(i32 %__U, <4 x i64> %__A, <4 x i64> %__B) {
; X32-LABEL: test_mm256_mask_test_epi8_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; X32-NEXT: vptestmb %ymm0, %ymm1, %k0 {%k1}
; X32-NEXT: kmovd %k0, %eax
@@ -84,7 +84,7 @@ define i32 @test_mm256_mask_test_epi8_mask(i32 %__U, <4 x i64> %__A, <4 x i64> %
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_mask_test_epi8_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vptestmb %ymm0, %ymm1, %k0 {%k1}
; X64-NEXT: kmovd %k0, %eax
@@ -102,14 +102,14 @@ entry:
define zeroext i8 @test_mm_test_epi16_mask(<2 x i64> %__A, <2 x i64> %__B) {
; X32-LABEL: test_mm_test_epi16_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vptestmw %xmm0, %xmm1, %k0
; X32-NEXT: kmovd %k0, %eax
; X32-NEXT: movzbl %al, %eax
; X32-NEXT: retl
;
; X64-LABEL: test_mm_test_epi16_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vptestmw %xmm0, %xmm1, %k0
; X64-NEXT: kmovd %k0, %eax
; X64-NEXT: movzbl %al, %eax
@@ -124,7 +124,7 @@ entry:
define zeroext i8 @test_mm_mask_test_epi16_mask(i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
; X32-LABEL: test_mm_mask_test_epi16_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovd %eax, %k1
; X32-NEXT: vptestmw %xmm0, %xmm1, %k0 {%k1}
@@ -133,7 +133,7 @@ define zeroext i8 @test_mm_mask_test_epi16_mask(i8 zeroext %__U, <2 x i64> %__A,
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mask_test_epi16_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vptestmw %xmm0, %xmm1, %k0 {%k1}
; X64-NEXT: kmovd %k0, %eax
@@ -151,7 +151,7 @@ entry:
define zeroext i16 @test_mm256_test_epi16_mask(<4 x i64> %__A, <4 x i64> %__B) {
; X32-LABEL: test_mm256_test_epi16_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vptestmw %ymm0, %ymm1, %k0
; X32-NEXT: kmovd %k0, %eax
; X32-NEXT: movzwl %ax, %eax
@@ -159,7 +159,7 @@ define zeroext i16 @test_mm256_test_epi16_mask(<4 x i64> %__A, <4 x i64> %__B) {
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_test_epi16_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vptestmw %ymm0, %ymm1, %k0
; X64-NEXT: kmovd %k0, %eax
; X64-NEXT: movzwl %ax, %eax
@@ -175,7 +175,7 @@ entry:
define zeroext i16 @test_mm256_mask_test_epi16_mask(i16 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
; X32-LABEL: test_mm256_mask_test_epi16_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vptestmw %ymm0, %ymm1, %k0 {%k1}
; X32-NEXT: kmovd %k0, %eax
@@ -184,7 +184,7 @@ define zeroext i16 @test_mm256_mask_test_epi16_mask(i16 zeroext %__U, <4 x i64>
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_mask_test_epi16_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vptestmw %ymm0, %ymm1, %k0 {%k1}
; X64-NEXT: kmovd %k0, %eax
@@ -203,14 +203,14 @@ entry:
define zeroext i16 @test_mm_testn_epi8_mask(<2 x i64> %__A, <2 x i64> %__B) {
; X32-LABEL: test_mm_testn_epi8_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vptestnmb %xmm0, %xmm1, %k0
; X32-NEXT: kmovd %k0, %eax
; X32-NEXT: movzwl %ax, %eax
; X32-NEXT: retl
;
; X64-LABEL: test_mm_testn_epi8_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vptestnmb %xmm0, %xmm1, %k0
; X64-NEXT: kmovd %k0, %eax
; X64-NEXT: movzwl %ax, %eax
@@ -225,7 +225,7 @@ entry:
define zeroext i16 @test_mm_mask_testn_epi8_mask(i16 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
; X32-LABEL: test_mm_mask_testn_epi8_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vptestnmb %xmm0, %xmm1, %k0 {%k1}
; X32-NEXT: kmovd %k0, %eax
@@ -233,7 +233,7 @@ define zeroext i16 @test_mm_mask_testn_epi8_mask(i16 zeroext %__U, <2 x i64> %__
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mask_testn_epi8_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vptestnmb %xmm0, %xmm1, %k0 {%k1}
; X64-NEXT: kmovd %k0, %eax
@@ -251,14 +251,14 @@ entry:
define i32 @test_mm256_testn_epi8_mask(<4 x i64> %__A, <4 x i64> %__B) {
; X32-LABEL: test_mm256_testn_epi8_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vptestnmb %ymm0, %ymm1, %k0
; X32-NEXT: kmovd %k0, %eax
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_testn_epi8_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vptestnmb %ymm0, %ymm1, %k0
; X64-NEXT: kmovd %k0, %eax
; X64-NEXT: vzeroupper
@@ -273,7 +273,7 @@ entry:
define i32 @test_mm256_mask_testn_epi8_mask(i32 %__U, <4 x i64> %__A, <4 x i64> %__B) {
; X32-LABEL: test_mm256_mask_testn_epi8_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; X32-NEXT: vptestnmb %ymm0, %ymm1, %k0 {%k1}
; X32-NEXT: kmovd %k0, %eax
@@ -281,7 +281,7 @@ define i32 @test_mm256_mask_testn_epi8_mask(i32 %__U, <4 x i64> %__A, <4 x i64>
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_mask_testn_epi8_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vptestnmb %ymm0, %ymm1, %k0 {%k1}
; X64-NEXT: kmovd %k0, %eax
@@ -299,14 +299,14 @@ entry:
define zeroext i8 @test_mm_testn_epi16_mask(<2 x i64> %__A, <2 x i64> %__B) {
; X32-LABEL: test_mm_testn_epi16_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vptestnmw %xmm0, %xmm1, %k0
; X32-NEXT: kmovd %k0, %eax
; X32-NEXT: movzbl %al, %eax
; X32-NEXT: retl
;
; X64-LABEL: test_mm_testn_epi16_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vptestnmw %xmm0, %xmm1, %k0
; X64-NEXT: kmovd %k0, %eax
; X64-NEXT: movzbl %al, %eax
@@ -321,7 +321,7 @@ entry:
define zeroext i8 @test_mm_mask_testn_epi16_mask(i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
; X32-LABEL: test_mm_mask_testn_epi16_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovd %eax, %k1
; X32-NEXT: vptestnmw %xmm0, %xmm1, %k0 {%k1}
@@ -330,7 +330,7 @@ define zeroext i8 @test_mm_mask_testn_epi16_mask(i8 zeroext %__U, <2 x i64> %__A
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mask_testn_epi16_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vptestnmw %xmm0, %xmm1, %k0 {%k1}
; X64-NEXT: kmovd %k0, %eax
@@ -348,7 +348,7 @@ entry:
define zeroext i16 @test_mm256_testn_epi16_mask(<4 x i64> %__A, <4 x i64> %__B) {
; X32-LABEL: test_mm256_testn_epi16_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vptestnmw %ymm0, %ymm1, %k0
; X32-NEXT: kmovd %k0, %eax
; X32-NEXT: movzwl %ax, %eax
@@ -356,7 +356,7 @@ define zeroext i16 @test_mm256_testn_epi16_mask(<4 x i64> %__A, <4 x i64> %__B)
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_testn_epi16_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vptestnmw %ymm0, %ymm1, %k0
; X64-NEXT: kmovd %k0, %eax
; X64-NEXT: movzwl %ax, %eax
@@ -372,7 +372,7 @@ entry:
define zeroext i16 @test_mm256_mask_testn_epi16_mask(i16 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
; X32-LABEL: test_mm256_mask_testn_epi16_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vptestnmw %ymm0, %ymm1, %k0 {%k1}
; X32-NEXT: kmovd %k0, %eax
@@ -381,7 +381,7 @@ define zeroext i16 @test_mm256_mask_testn_epi16_mask(i16 zeroext %__U, <4 x i64>
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_mask_testn_epi16_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vptestnmw %ymm0, %ymm1, %k0 {%k1}
; X64-NEXT: kmovd %k0, %eax
@@ -400,14 +400,14 @@ entry:
define <2 x i64> @test_mm_mask_set1_epi8(<2 x i64> %__O, i16 zeroext %__M, i8 signext %__A) local_unnamed_addr #0 {
; X32-LABEL: test_mm_mask_set1_epi8:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vpbroadcastb %eax, %xmm0 {%k1}
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mask_set1_epi8:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vpbroadcastb %esi, %xmm0 {%k1}
; X64-NEXT: retq
@@ -423,14 +423,14 @@ entry:
define <2 x i64> @test_mm_maskz_set1_epi8(i16 zeroext %__M, i8 signext %__A) {
; X32-LABEL: test_mm_maskz_set1_epi8:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vpbroadcastb %eax, %xmm0 {%k1} {z}
; X32-NEXT: retl
;
; X64-LABEL: test_mm_maskz_set1_epi8:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vpbroadcastb %esi, %xmm0 {%k1} {z}
; X64-NEXT: retq
@@ -445,14 +445,14 @@ entry:
define <4 x i64> @test_mm256_mask_set1_epi8(<4 x i64> %__O, i32 %__M, i8 signext %__A){
; X32-LABEL: test_mm256_mask_set1_epi8:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; X32-NEXT: vpbroadcastb %eax, %ymm0 {%k1}
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_mask_set1_epi8:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vpbroadcastb %esi, %ymm0 {%k1}
; X64-NEXT: retq
@@ -468,14 +468,14 @@ entry:
define <4 x i64> @test_mm256_maskz_set1_epi8(i32 %__M, i8 signext %__A) {
; X32-LABEL: test_mm256_maskz_set1_epi8:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; X32-NEXT: vpbroadcastb %eax, %ymm0 {%k1} {z}
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_maskz_set1_epi8:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vpbroadcastb %esi, %ymm0 {%k1} {z}
; X64-NEXT: retq
@@ -490,14 +490,14 @@ entry:
define <4 x i64> @test_mm256_mask_set1_epi16(<4 x i64> %__O, i16 zeroext %__M, i16 signext %__A) {
; X32-LABEL: test_mm256_mask_set1_epi16:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vpbroadcastw %eax, %ymm0 {%k1}
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_mask_set1_epi16:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vpbroadcastw %esi, %ymm0 {%k1}
; X64-NEXT: retq
@@ -513,14 +513,14 @@ entry:
define <4 x i64> @test_mm256_maskz_set1_epi16(i16 zeroext %__M, i16 signext %__A) {
; X32-LABEL: test_mm256_maskz_set1_epi16:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vpbroadcastw %eax, %ymm0 {%k1} {z}
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_maskz_set1_epi16:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vpbroadcastw %esi, %ymm0 {%k1} {z}
; X64-NEXT: retq
@@ -535,7 +535,7 @@ entry:
define <2 x i64> @test_mm_mask_set1_epi16(<2 x i64> %__O, i8 zeroext %__M, i16 signext %__A) {
; X32-LABEL: test_mm_mask_set1_epi16:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movb {{[0-9]+}}(%esp), %cl
; X32-NEXT: kmovd %ecx, %k1
@@ -543,7 +543,7 @@ define <2 x i64> @test_mm_mask_set1_epi16(<2 x i64> %__O, i8 zeroext %__M, i16 s
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mask_set1_epi16:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vpbroadcastw %esi, %xmm0 {%k1}
; X64-NEXT: retq
@@ -559,7 +559,7 @@ entry:
define <2 x i64> @test_mm_maskz_set1_epi16(i8 zeroext %__M, i16 signext %__A) {
; X32-LABEL: test_mm_maskz_set1_epi16:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movb {{[0-9]+}}(%esp), %cl
; X32-NEXT: kmovd %ecx, %k1
@@ -567,7 +567,7 @@ define <2 x i64> @test_mm_maskz_set1_epi16(i8 zeroext %__M, i16 signext %__A) {
; X32-NEXT: retl
;
; X64-LABEL: test_mm_maskz_set1_epi16:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vpbroadcastw %esi, %xmm0 {%k1} {z}
; X64-NEXT: retq
@@ -583,12 +583,12 @@ entry:
define <2 x i64> @test_mm_broadcastb_epi8(<2 x i64> %a0) {
; X32-LABEL: test_mm_broadcastb_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpbroadcastb %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_broadcastb_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpbroadcastb %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -599,13 +599,13 @@ define <2 x i64> @test_mm_broadcastb_epi8(<2 x i64> %a0) {
define <2 x i64> @test_mm_mask_broadcastb_epi8(<2 x i64> %a0, i16 %a1, <2 x i64> %a2) {
; X32-LABEL: test_mm_mask_broadcastb_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vpbroadcastb %xmm1, %xmm0 {%k1}
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mask_broadcastb_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vpbroadcastb %xmm1, %xmm0 {%k1}
; X64-NEXT: retq
@@ -620,13 +620,13 @@ define <2 x i64> @test_mm_mask_broadcastb_epi8(<2 x i64> %a0, i16 %a1, <2 x i64>
define <2 x i64> @test_mm_maskz_broadcastb_epi8(i16 %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_maskz_broadcastb_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vpbroadcastb %xmm0, %xmm0 {%k1} {z}
; X32-NEXT: retl
;
; X64-LABEL: test_mm_maskz_broadcastb_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vpbroadcastb %xmm0, %xmm0 {%k1} {z}
; X64-NEXT: retq
@@ -640,12 +640,12 @@ define <2 x i64> @test_mm_maskz_broadcastb_epi8(i16 %a0, <2 x i64> %a1) {
define <4 x i64> @test_mm256_broadcastb_epi8(<2 x i64> %a0) {
; X32-LABEL: test_mm256_broadcastb_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpbroadcastb %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_broadcastb_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpbroadcastb %xmm0, %ymm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -656,13 +656,13 @@ define <4 x i64> @test_mm256_broadcastb_epi8(<2 x i64> %a0) {
define <4 x i64> @test_mm256_mask_broadcastb_epi8(<4 x i64> %a0, i32 %a1, <2 x i64> %a2) {
; X32-LABEL: test_mm256_mask_broadcastb_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; X32-NEXT: vpbroadcastb %xmm1, %ymm0 {%k1}
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_mask_broadcastb_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vpbroadcastb %xmm1, %ymm0 {%k1}
; X64-NEXT: retq
@@ -677,13 +677,13 @@ define <4 x i64> @test_mm256_mask_broadcastb_epi8(<4 x i64> %a0, i32 %a1, <2 x i
define <4 x i64> @test_mm256_maskz_broadcastb_epi8(i32 %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm256_maskz_broadcastb_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; X32-NEXT: vpbroadcastb %xmm0, %ymm0 {%k1} {z}
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_maskz_broadcastb_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vpbroadcastb %xmm0, %ymm0 {%k1} {z}
; X64-NEXT: retq
@@ -697,12 +697,12 @@ define <4 x i64> @test_mm256_maskz_broadcastb_epi8(i32 %a0, <2 x i64> %a1) {
define <2 x i64> @test_mm_broadcastw_epi16(<2 x i64> %a0) {
; X32-LABEL: test_mm_broadcastw_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpbroadcastw %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_broadcastw_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpbroadcastw %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -713,14 +713,14 @@ define <2 x i64> @test_mm_broadcastw_epi16(<2 x i64> %a0) {
define <2 x i64> @test_mm_mask_broadcastw_epi16(<2 x i64> %a0, i8 %a1, <2 x i64> %a2) {
; X32-LABEL: test_mm_mask_broadcastw_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovd %eax, %k1
; X32-NEXT: vpbroadcastw %xmm1, %xmm0 {%k1}
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mask_broadcastw_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vpbroadcastw %xmm1, %xmm0 {%k1}
; X64-NEXT: retq
@@ -735,14 +735,14 @@ define <2 x i64> @test_mm_mask_broadcastw_epi16(<2 x i64> %a0, i8 %a1, <2 x i64>
define <2 x i64> @test_mm_maskz_broadcastw_epi16(i8 %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_maskz_broadcastw_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovd %eax, %k1
; X32-NEXT: vpbroadcastw %xmm0, %xmm0 {%k1} {z}
; X32-NEXT: retl
;
; X64-LABEL: test_mm_maskz_broadcastw_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vpbroadcastw %xmm0, %xmm0 {%k1} {z}
; X64-NEXT: retq
@@ -756,12 +756,12 @@ define <2 x i64> @test_mm_maskz_broadcastw_epi16(i8 %a0, <2 x i64> %a1) {
define <4 x i64> @test_mm256_broadcastw_epi16(<2 x i64> %a0) {
; X32-LABEL: test_mm256_broadcastw_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpbroadcastw %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_broadcastw_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpbroadcastw %xmm0, %ymm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -772,13 +772,13 @@ define <4 x i64> @test_mm256_broadcastw_epi16(<2 x i64> %a0) {
define <4 x i64> @test_mm256_mask_broadcastw_epi16(<4 x i64> %a0, i16 %a1, <2 x i64> %a2) {
; X32-LABEL: test_mm256_mask_broadcastw_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vpbroadcastw %xmm1, %ymm0 {%k1}
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_mask_broadcastw_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vpbroadcastw %xmm1, %ymm0 {%k1}
; X64-NEXT: retq
@@ -793,13 +793,13 @@ define <4 x i64> @test_mm256_mask_broadcastw_epi16(<4 x i64> %a0, i16 %a1, <2 x
define <4 x i64> @test_mm256_maskz_broadcastw_epi16(i16 %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm256_maskz_broadcastw_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vpbroadcastw %xmm0, %ymm0 {%k1} {z}
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_maskz_broadcastw_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vpbroadcastw %xmm0, %ymm0 {%k1} {z}
; X64-NEXT: retq
diff --git a/test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll
index 4f4f700a4c2..06b97cf6a41 100644
--- a/test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll
+++ b/test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll
@@ -5,7 +5,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.pbroadcast.b.gpr.128(i8, <16 x i8>, i16)
define <16 x i8>@test_int_x86_avx512_mask_pbroadcast_b_gpr_128(i8 %x0, <16 x i8> %x1, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_pbroadcast_b_gpr_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpbroadcastb %edi, %xmm1 ## encoding: [0x62,0xf2,0x7d,0x08,0x7a,0xcf]
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpbroadcastb %edi, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x7a,0xc7]
@@ -26,7 +26,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.pbroadcast.w.gpr.128(i16, <8 x i16>, i8)
define <8 x i16>@test_int_x86_avx512_mask_pbroadcast_w_gpr_128(i16 %x0, <8 x i16> %x1, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_pbroadcast_w_gpr_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpbroadcastw %edi, %xmm1 ## encoding: [0x62,0xf2,0x7d,0x08,0x7b,0xcf]
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpbroadcastw %edi, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x7b,0xc7]
@@ -47,7 +47,7 @@ define <8 x i16>@test_int_x86_avx512_mask_pbroadcast_w_gpr_128(i16 %x0, <8 x i16
define <32 x i8>@test_int_x86_avx512_mask_pbroadcast_b_gpr_256(i8 %x0, <32 x i8> %x1, i32 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_pbroadcast_b_gpr_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpbroadcastb %edi, %ymm1 ## encoding: [0x62,0xf2,0x7d,0x28,0x7a,0xcf]
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpbroadcastb %edi, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x7a,0xc7]
@@ -69,7 +69,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.pbroadcast.w.gpr.256(i16, <16 x i16>, i
define <16 x i16>@test_int_x86_avx512_mask_pbroadcast_w_gpr_256(i16 %x0, <16 x i16> %x1, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_pbroadcast_w_gpr_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpbroadcastw %edi, %ymm1 ## encoding: [0x62,0xf2,0x7d,0x28,0x7b,0xcf]
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpbroadcastw %edi, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x7b,0xc7]
@@ -89,7 +89,7 @@ declare <32 x i8> @llvm.x86.avx512.pbroadcastb.256(<16 x i8>, <32 x i8>, i32)
define <32 x i8>@test_int_x86_avx512_pbroadcastb_256(<16 x i8> %x0, <32 x i8> %x1, i32 %mask) {
; CHECK-LABEL: test_int_x86_avx512_pbroadcastb_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpbroadcastb %xmm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x78,0xd0]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpbroadcastb %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x78,0xc8]
@@ -109,7 +109,7 @@ declare <16 x i8> @llvm.x86.avx512.pbroadcastb.128(<16 x i8>, <16 x i8>, i16)
define <16 x i8>@test_int_x86_avx512_pbroadcastb_128(<16 x i8> %x0, <16 x i8> %x1, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_pbroadcastb_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpbroadcastb %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x78,0xd0]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpbroadcastb %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x78,0xc8]
@@ -129,7 +129,7 @@ declare <16 x i16> @llvm.x86.avx512.pbroadcastw.256(<8 x i16>, <16 x i16>, i16)
define <16 x i16>@test_int_x86_avx512_pbroadcastw_256(<8 x i16> %x0, <16 x i16> %x1, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_pbroadcastw_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpbroadcastw %xmm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x79,0xd0]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpbroadcastw %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x79,0xc8]
@@ -149,7 +149,7 @@ declare <8 x i16> @llvm.x86.avx512.pbroadcastw.128(<8 x i16>, <8 x i16>, i8)
define <8 x i16>@test_int_x86_avx512_pbroadcastw_128(<8 x i16> %x0, <8 x i16> %x1, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_pbroadcastw_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpbroadcastw %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x79,0xd0]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpbroadcastw %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x79,0xc8]
@@ -169,7 +169,7 @@ declare <64 x i8> @llvm.x86.avx512.pbroadcastb.512(<16 x i8>, <64 x i8>, i64)
define <64 x i8>@test_int_x86_avx512_pbroadcastb_512(<16 x i8> %x0, <64 x i8> %x1, i64 %mask) {
; CHECK-LABEL: test_int_x86_avx512_pbroadcastb_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpbroadcastb %xmm0, %zmm2 ## encoding: [0x62,0xf2,0x7d,0x48,0x78,0xd0]
; CHECK-NEXT: kmovq %rdi, %k1 ## encoding: [0xc4,0xe1,0xfb,0x92,0xcf]
; CHECK-NEXT: vpbroadcastb %xmm0, %zmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x49,0x78,0xc8]
@@ -189,7 +189,7 @@ declare <32 x i16> @llvm.x86.avx512.pbroadcastw.512(<8 x i16>, <32 x i16>, i32)
define <32 x i16>@test_int_x86_avx512_pbroadcastw_512(<8 x i16> %x0, <32 x i16> %x1, i32 %mask) {
; CHECK-LABEL: test_int_x86_avx512_pbroadcastw_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpbroadcastw %xmm0, %zmm2 ## encoding: [0x62,0xf2,0x7d,0x48,0x79,0xd0]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpbroadcastw %xmm0, %zmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x49,0x79,0xc8]
@@ -209,7 +209,7 @@ declare void @llvm.x86.avx512.mask.storeu.b.128(i8*, <16 x i8>, i16)
define void@test_int_x86_avx512_mask_storeu_b_128(i8* %ptr1, i8* %ptr2, <16 x i8> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_storeu_b_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edx, %k1 ## encoding: [0xc5,0xfb,0x92,0xca]
; CHECK-NEXT: vmovdqu8 %xmm0, (%rdi) {%k1} ## encoding: [0x62,0xf1,0x7f,0x09,0x7f,0x07]
; CHECK-NEXT: vmovdqu %xmm0, (%rsi) ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7f,0x06]
@@ -223,7 +223,7 @@ declare void @llvm.x86.avx512.mask.storeu.b.256(i8*, <32 x i8>, i32)
define void@test_int_x86_avx512_mask_storeu_b_256(i8* %ptr1, i8* %ptr2, <32 x i8> %x1, i32 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_storeu_b_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edx, %k1 ## encoding: [0xc5,0xfb,0x92,0xca]
; CHECK-NEXT: vmovdqu8 %ymm0, (%rdi) {%k1} ## encoding: [0x62,0xf1,0x7f,0x29,0x7f,0x07]
; CHECK-NEXT: vmovdqu %ymm0, (%rsi) ## EVEX TO VEX Compression encoding: [0xc5,0xfe,0x7f,0x06]
@@ -238,7 +238,7 @@ declare void @llvm.x86.avx512.mask.storeu.w.128(i8*, <8 x i16>, i8)
define void@test_int_x86_avx512_mask_storeu_w_128(i8* %ptr1, i8* %ptr2, <8 x i16> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_storeu_w_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edx, %k1 ## encoding: [0xc5,0xfb,0x92,0xca]
; CHECK-NEXT: vmovdqu16 %xmm0, (%rdi) {%k1} ## encoding: [0x62,0xf1,0xff,0x09,0x7f,0x07]
; CHECK-NEXT: vmovdqu %xmm0, (%rsi) ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7f,0x06]
@@ -252,7 +252,7 @@ declare void @llvm.x86.avx512.mask.storeu.w.256(i8*, <16 x i16>, i16)
define void@test_int_x86_avx512_mask_storeu_w_256(i8* %ptr1, i8* %ptr2, <16 x i16> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_storeu_w_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edx, %k1 ## encoding: [0xc5,0xfb,0x92,0xca]
; CHECK-NEXT: vmovdqu16 %ymm0, (%rdi) {%k1} ## encoding: [0x62,0xf1,0xff,0x29,0x7f,0x07]
; CHECK-NEXT: vmovdqu %ymm0, (%rsi) ## EVEX TO VEX Compression encoding: [0xc5,0xfe,0x7f,0x06]
@@ -267,7 +267,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.loadu.w.128(i8*, <8 x i16>, i8)
define <8 x i16>@test_int_x86_avx512_mask_loadu_w_128(i8* %ptr, i8* %ptr2, <8 x i16> %x1, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_loadu_w_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovdqu (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x6f,0x07]
; CHECK-NEXT: kmovd %edx, %k1 ## encoding: [0xc5,0xfb,0x92,0xca]
; CHECK-NEXT: vmovdqu16 (%rsi), %xmm0 {%k1} ## encoding: [0x62,0xf1,0xff,0x09,0x6f,0x06]
@@ -285,7 +285,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.loadu.w.256(i8*, <16 x i16>, i16)
define <16 x i16>@test_int_x86_avx512_mask_loadu_w_256(i8* %ptr, i8* %ptr2, <16 x i16> %x1, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_loadu_w_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovdqu (%rdi), %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfe,0x6f,0x07]
; CHECK-NEXT: kmovd %edx, %k1 ## encoding: [0xc5,0xfb,0x92,0xca]
; CHECK-NEXT: vmovdqu16 (%rsi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0xff,0x29,0x6f,0x06]
@@ -303,7 +303,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.loadu.b.128(i8*, <16 x i8>, i16)
define <16 x i8>@test_int_x86_avx512_mask_loadu_b_128(i8* %ptr, i8* %ptr2, <16 x i8> %x1, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_loadu_b_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovdqu (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x6f,0x07]
; CHECK-NEXT: kmovd %edx, %k1 ## encoding: [0xc5,0xfb,0x92,0xca]
; CHECK-NEXT: vmovdqu8 (%rsi), %xmm0 {%k1} ## encoding: [0x62,0xf1,0x7f,0x09,0x6f,0x06]
@@ -321,7 +321,7 @@ declare <32 x i8> @llvm.x86.avx512.mask.loadu.b.256(i8*, <32 x i8>, i32)
define <32 x i8>@test_int_x86_avx512_mask_loadu_b_256(i8* %ptr, i8* %ptr2, <32 x i8> %x1, i32 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_loadu_b_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovdqu (%rdi), %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfe,0x6f,0x07]
; CHECK-NEXT: kmovd %edx, %k1 ## encoding: [0xc5,0xfb,0x92,0xca]
; CHECK-NEXT: vmovdqu8 (%rsi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7f,0x29,0x6f,0x06]
@@ -339,7 +339,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.palignr.128(<16 x i8>, <16 x i8>, i32, <
define <16 x i8>@test_int_x86_avx512_mask_palignr_128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x3, i16 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_palignr_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpalignr $2, %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x0f,0xd9,0x02]
; CHECK-NEXT: ## xmm3 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0,1]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
@@ -362,7 +362,7 @@ declare <32 x i8> @llvm.x86.avx512.mask.palignr.256(<32 x i8>, <32 x i8>, i32, <
define <32 x i8>@test_int_x86_avx512_mask_palignr_256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x3, i32 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_palignr_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpalignr $2, %ymm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x0f,0xd9,0x02]
; CHECK-NEXT: ## ymm3 = ymm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0,1],ymm1[18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16,17]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
@@ -385,7 +385,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.pshufh.w.128(<8 x i16>, i32, <8 x i16>,
define <8 x i16>@test_int_x86_avx512_mask_pshufh_w_128(<8 x i16> %x0, i32 %x1, <8 x i16> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pshufh_w_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpshufhw $3, %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x70,0xd0,0x03]
; CHECK-NEXT: ## xmm2 = xmm0[0,1,2,3,7,4,4,4]
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
@@ -408,7 +408,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.pshufh.w.256(<16 x i16>, i32, <16 x i16
define <16 x i16>@test_int_x86_avx512_mask_pshufh_w_256(<16 x i16> %x0, i32 %x1, <16 x i16> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pshufh_w_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpshufhw $3, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xfe,0x70,0xd0,0x03]
; CHECK-NEXT: ## ymm2 = ymm0[0,1,2,3,7,4,4,4,8,9,10,11,15,12,12,12]
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
@@ -431,7 +431,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.pshufl.w.128(<8 x i16>, i32, <8 x i16>,
define <8 x i16>@test_int_x86_avx512_mask_pshufl_w_128(<8 x i16> %x0, i32 %x1, <8 x i16> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pshufl_w_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpshuflw $3, %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x70,0xd0,0x03]
; CHECK-NEXT: ## xmm2 = xmm0[3,0,0,0,4,5,6,7]
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
@@ -454,7 +454,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.pshufl.w.256(<16 x i16>, i32, <16 x i16
define <16 x i16>@test_int_x86_avx512_mask_pshufl_w_256(<16 x i16> %x0, i32 %x1, <16 x i16> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pshufl_w_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpshuflw $3, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xff,0x70,0xd0,0x03]
; CHECK-NEXT: ## ymm2 = ymm0[3,0,0,0,4,5,6,7,11,8,8,8,12,13,14,15]
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
@@ -475,7 +475,7 @@ define <16 x i16>@test_int_x86_avx512_mask_pshufl_w_256(<16 x i16> %x0, i32 %x1,
define i32 @test_pcmpeq_b_256(<32 x i8> %a, <32 x i8> %b) {
; CHECK-LABEL: test_pcmpeq_b_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqb %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x28,0x74,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
@@ -486,7 +486,7 @@ define i32 @test_pcmpeq_b_256(<32 x i8> %a, <32 x i8> %b) {
define i32 @test_mask_pcmpeq_b_256(<32 x i8> %a, <32 x i8> %b, i32 %mask) {
; CHECK-LABEL: test_mask_pcmpeq_b_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpeqb %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x74,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
@@ -500,7 +500,7 @@ declare i32 @llvm.x86.avx512.mask.pcmpeq.b.256(<32 x i8>, <32 x i8>, i32)
define i16 @test_pcmpeq_w_256(<16 x i16> %a, <16 x i16> %b) {
; CHECK-LABEL: test_pcmpeq_w_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x28,0x75,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
@@ -512,7 +512,7 @@ define i16 @test_pcmpeq_w_256(<16 x i16> %a, <16 x i16> %b) {
define i16 @test_mask_pcmpeq_w_256(<16 x i16> %a, <16 x i16> %b, i16 %mask) {
; CHECK-LABEL: test_mask_pcmpeq_w_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x75,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
@@ -527,7 +527,7 @@ declare i16 @llvm.x86.avx512.mask.pcmpeq.w.256(<16 x i16>, <16 x i16>, i16)
define i32 @test_pcmpgt_b_256(<32 x i8> %a, <32 x i8> %b) {
; CHECK-LABEL: test_pcmpgt_b_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtb %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x28,0x64,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
@@ -538,7 +538,7 @@ define i32 @test_pcmpgt_b_256(<32 x i8> %a, <32 x i8> %b) {
define i32 @test_mask_pcmpgt_b_256(<32 x i8> %a, <32 x i8> %b, i32 %mask) {
; CHECK-LABEL: test_mask_pcmpgt_b_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpgtb %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x64,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
@@ -552,7 +552,7 @@ declare i32 @llvm.x86.avx512.mask.pcmpgt.b.256(<32 x i8>, <32 x i8>, i32)
define i16 @test_pcmpgt_w_256(<16 x i16> %a, <16 x i16> %b) {
; CHECK-LABEL: test_pcmpgt_w_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtw %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x28,0x65,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
@@ -564,7 +564,7 @@ define i16 @test_pcmpgt_w_256(<16 x i16> %a, <16 x i16> %b) {
define i16 @test_mask_pcmpgt_w_256(<16 x i16> %a, <16 x i16> %b, i16 %mask) {
; CHECK-LABEL: test_mask_pcmpgt_w_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpgtw %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x65,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
@@ -579,7 +579,7 @@ declare i16 @llvm.x86.avx512.mask.pcmpgt.w.256(<16 x i16>, <16 x i16>, i16)
define i16 @test_pcmpeq_b_128(<16 x i8> %a, <16 x i8> %b) {
; CHECK-LABEL: test_pcmpeq_b_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x74,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
@@ -590,7 +590,7 @@ define i16 @test_pcmpeq_b_128(<16 x i8> %a, <16 x i8> %b) {
define i16 @test_mask_pcmpeq_b_128(<16 x i8> %a, <16 x i8> %b, i16 %mask) {
; CHECK-LABEL: test_mask_pcmpeq_b_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x74,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
@@ -604,7 +604,7 @@ declare i16 @llvm.x86.avx512.mask.pcmpeq.b.128(<16 x i8>, <16 x i8>, i16)
define i8 @test_pcmpeq_w_128(<8 x i16> %a, <8 x i16> %b) {
; CHECK-LABEL: test_pcmpeq_w_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x75,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
@@ -615,7 +615,7 @@ define i8 @test_pcmpeq_w_128(<8 x i16> %a, <8 x i16> %b) {
define i8 @test_mask_pcmpeq_w_128(<8 x i16> %a, <8 x i16> %b, i8 %mask) {
; CHECK-LABEL: test_mask_pcmpeq_w_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x75,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
@@ -629,7 +629,7 @@ declare i8 @llvm.x86.avx512.mask.pcmpeq.w.128(<8 x i16>, <8 x i16>, i8)
define i16 @test_pcmpgt_b_128(<16 x i8> %a, <16 x i8> %b) {
; CHECK-LABEL: test_pcmpgt_b_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtb %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x64,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
@@ -640,7 +640,7 @@ define i16 @test_pcmpgt_b_128(<16 x i8> %a, <16 x i8> %b) {
define i16 @test_mask_pcmpgt_b_128(<16 x i8> %a, <16 x i8> %b, i16 %mask) {
; CHECK-LABEL: test_mask_pcmpgt_b_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpgtb %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x64,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
@@ -654,7 +654,7 @@ declare i16 @llvm.x86.avx512.mask.pcmpgt.b.128(<16 x i8>, <16 x i8>, i16)
define i8 @test_pcmpgt_w_128(<8 x i16> %a, <8 x i16> %b) {
; CHECK-LABEL: test_pcmpgt_w_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtw %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x65,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
@@ -665,7 +665,7 @@ define i8 @test_pcmpgt_w_128(<8 x i16> %a, <8 x i16> %b) {
define i8 @test_mask_pcmpgt_w_128(<8 x i16> %a, <8 x i16> %b, i8 %mask) {
; CHECK-LABEL: test_mask_pcmpgt_w_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpgtw %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x65,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
@@ -681,7 +681,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.punpckhb.w.128(<16 x i8>, <16 x i8>, <16
define <16 x i8>@test_int_x86_avx512_mask_punpckhb_w_128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_punpckhb_w_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpunpckhbw %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x68,0xd9]
; CHECK-NEXT: ## xmm3 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
@@ -699,7 +699,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.punpcklb.w.128(<16 x i8>, <16 x i8>, <16
define <16 x i8>@test_int_x86_avx512_mask_punpcklb_w_128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_punpcklb_w_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpunpcklbw %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x60,0xd9]
; CHECK-NEXT: ## xmm3 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
@@ -717,7 +717,7 @@ declare <32 x i8> @llvm.x86.avx512.mask.punpckhb.w.256(<32 x i8>, <32 x i8>, <32
define <32 x i8>@test_int_x86_avx512_mask_punpckhb_w_256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2, i32 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_punpckhb_w_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpunpckhbw %ymm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x68,0xd9]
; CHECK-NEXT: ## ymm3 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
@@ -735,7 +735,7 @@ declare <32 x i8> @llvm.x86.avx512.mask.punpcklb.w.256(<32 x i8>, <32 x i8>, <32
define <32 x i8>@test_int_x86_avx512_mask_punpcklb_w_256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2, i32 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_punpcklb_w_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpunpcklbw %ymm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x60,0xd9]
; CHECK-NEXT: ## ymm3 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
@@ -753,7 +753,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.punpcklw.d.128(<8 x i16>, <8 x i16>, <8
define <8 x i16>@test_int_x86_avx512_mask_punpcklw_d_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_punpcklw_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpunpcklwd %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x61,0xd9]
; CHECK-NEXT: ## xmm3 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
@@ -771,7 +771,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.punpckhw.d.128(<8 x i16>, <8 x i16>, <8
define <8 x i16>@test_int_x86_avx512_mask_punpckhw_d_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_punpckhw_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpunpckhwd %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x69,0xd9]
; CHECK-NEXT: ## xmm3 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
@@ -789,7 +789,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.punpcklw.d.256(<16 x i16>, <16 x i16>,
define <16 x i16>@test_int_x86_avx512_mask_punpcklw_d_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_punpcklw_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpunpcklwd %ymm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x61,0xd9]
; CHECK-NEXT: ## ymm3 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
@@ -807,7 +807,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.punpckhw.d.256(<16 x i16>, <16 x i16>,
define <16 x i16>@test_int_x86_avx512_mask_punpckhw_d_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_punpckhw_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpunpckhwd %ymm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x69,0xd9]
; CHECK-NEXT: ## ymm3 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
@@ -823,7 +823,7 @@ define <16 x i16>@test_int_x86_avx512_mask_punpckhw_d_256(<16 x i16> %x0, <16 x
define <8 x i16> @test_mask_add_epi16_rr_128(<8 x i16> %a, <8 x i16> %b) {
; CHECK-LABEL: test_mask_add_epi16_rr_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfd,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.avx512.mask.padd.w.128(<8 x i16> %a, <8 x i16> %b, <8 x i16> zeroinitializer, i8 -1)
@@ -832,7 +832,7 @@ define <8 x i16> @test_mask_add_epi16_rr_128(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @test_mask_add_epi16_rrk_128(<8 x i16> %a, <8 x i16> %b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_add_epi16_rrk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpaddw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xfd,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
@@ -843,7 +843,7 @@ define <8 x i16> @test_mask_add_epi16_rrk_128(<8 x i16> %a, <8 x i16> %b, <8 x i
define <8 x i16> @test_mask_add_epi16_rrkz_128(<8 x i16> %a, <8 x i16> %b, i8 %mask) {
; CHECK-LABEL: test_mask_add_epi16_rrkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpaddw %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xfd,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -853,7 +853,7 @@ define <8 x i16> @test_mask_add_epi16_rrkz_128(<8 x i16> %a, <8 x i16> %b, i8 %m
define <8 x i16> @test_mask_add_epi16_rm_128(<8 x i16> %a, <8 x i16>* %ptr_b) {
; CHECK-LABEL: test_mask_add_epi16_rm_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddw (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfd,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i16>, <8 x i16>* %ptr_b
@@ -863,7 +863,7 @@ define <8 x i16> @test_mask_add_epi16_rm_128(<8 x i16> %a, <8 x i16>* %ptr_b) {
define <8 x i16> @test_mask_add_epi16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_add_epi16_rmk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpaddw (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xfd,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
@@ -875,7 +875,7 @@ define <8 x i16> @test_mask_add_epi16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b, <
define <8 x i16> @test_mask_add_epi16_rmkz_128(<8 x i16> %a, <8 x i16>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_add_epi16_rmkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpaddw (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xfd,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -888,7 +888,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.padd.w.128(<8 x i16>, <8 x i16>, <8 x i1
define <16 x i16> @test_mask_add_epi16_rr_256(<16 x i16> %a, <16 x i16> %b) {
; CHECK-LABEL: test_mask_add_epi16_rr_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfd,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx512.mask.padd.w.256(<16 x i16> %a, <16 x i16> %b, <16 x i16> zeroinitializer, i16 -1)
@@ -897,7 +897,7 @@ define <16 x i16> @test_mask_add_epi16_rr_256(<16 x i16> %a, <16 x i16> %b) {
define <16 x i16> @test_mask_add_epi16_rrk_256(<16 x i16> %a, <16 x i16> %b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_add_epi16_rrk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpaddw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xfd,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
@@ -908,7 +908,7 @@ define <16 x i16> @test_mask_add_epi16_rrk_256(<16 x i16> %a, <16 x i16> %b, <16
define <16 x i16> @test_mask_add_epi16_rrkz_256(<16 x i16> %a, <16 x i16> %b, i16 %mask) {
; CHECK-LABEL: test_mask_add_epi16_rrkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpaddw %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xfd,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -918,7 +918,7 @@ define <16 x i16> @test_mask_add_epi16_rrkz_256(<16 x i16> %a, <16 x i16> %b, i1
define <16 x i16> @test_mask_add_epi16_rm_256(<16 x i16> %a, <16 x i16>* %ptr_b) {
; CHECK-LABEL: test_mask_add_epi16_rm_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddw (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfd,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x i16>, <16 x i16>* %ptr_b
@@ -928,7 +928,7 @@ define <16 x i16> @test_mask_add_epi16_rm_256(<16 x i16> %a, <16 x i16>* %ptr_b)
define <16 x i16> @test_mask_add_epi16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_add_epi16_rmk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpaddw (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xfd,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -940,7 +940,7 @@ define <16 x i16> @test_mask_add_epi16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_b
define <16 x i16> @test_mask_add_epi16_rmkz_256(<16 x i16> %a, <16 x i16>* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_add_epi16_rmkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpaddw (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xfd,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -953,7 +953,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.padd.w.256(<16 x i16>, <16 x i16>, <16
define <8 x i16> @test_mask_sub_epi16_rr_128(<8 x i16> %a, <8 x i16> %b) {
; CHECK-LABEL: test_mask_sub_epi16_rr_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsubw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf9,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.avx512.mask.psub.w.128(<8 x i16> %a, <8 x i16> %b, <8 x i16> zeroinitializer, i8 -1)
@@ -962,7 +962,7 @@ define <8 x i16> @test_mask_sub_epi16_rr_128(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @test_mask_sub_epi16_rrk_128(<8 x i16> %a, <8 x i16> %b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_sub_epi16_rrk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsubw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xf9,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
@@ -973,7 +973,7 @@ define <8 x i16> @test_mask_sub_epi16_rrk_128(<8 x i16> %a, <8 x i16> %b, <8 x i
define <8 x i16> @test_mask_sub_epi16_rrkz_128(<8 x i16> %a, <8 x i16> %b, i8 %mask) {
; CHECK-LABEL: test_mask_sub_epi16_rrkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsubw %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xf9,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -983,7 +983,7 @@ define <8 x i16> @test_mask_sub_epi16_rrkz_128(<8 x i16> %a, <8 x i16> %b, i8 %m
define <8 x i16> @test_mask_sub_epi16_rm_128(<8 x i16> %a, <8 x i16>* %ptr_b) {
; CHECK-LABEL: test_mask_sub_epi16_rm_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsubw (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf9,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i16>, <8 x i16>* %ptr_b
@@ -993,7 +993,7 @@ define <8 x i16> @test_mask_sub_epi16_rm_128(<8 x i16> %a, <8 x i16>* %ptr_b) {
define <8 x i16> @test_mask_sub_epi16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_sub_epi16_rmk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsubw (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xf9,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
@@ -1005,7 +1005,7 @@ define <8 x i16> @test_mask_sub_epi16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b, <
define <8 x i16> @test_mask_sub_epi16_rmkz_128(<8 x i16> %a, <8 x i16>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_sub_epi16_rmkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsubw (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xf9,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1018,7 +1018,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.psub.w.128(<8 x i16>, <8 x i16>, <8 x i1
define <16 x i16> @test_mask_sub_epi16_rr_256(<16 x i16> %a, <16 x i16> %b) {
; CHECK-LABEL: test_mask_sub_epi16_rr_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsubw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf9,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx512.mask.psub.w.256(<16 x i16> %a, <16 x i16> %b, <16 x i16> zeroinitializer, i16 -1)
@@ -1027,7 +1027,7 @@ define <16 x i16> @test_mask_sub_epi16_rr_256(<16 x i16> %a, <16 x i16> %b) {
define <16 x i16> @test_mask_sub_epi16_rrk_256(<16 x i16> %a, <16 x i16> %b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_sub_epi16_rrk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsubw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xf9,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
@@ -1038,7 +1038,7 @@ define <16 x i16> @test_mask_sub_epi16_rrk_256(<16 x i16> %a, <16 x i16> %b, <16
define <16 x i16> @test_mask_sub_epi16_rrkz_256(<16 x i16> %a, <16 x i16> %b, i16 %mask) {
; CHECK-LABEL: test_mask_sub_epi16_rrkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsubw %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xf9,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1048,7 +1048,7 @@ define <16 x i16> @test_mask_sub_epi16_rrkz_256(<16 x i16> %a, <16 x i16> %b, i1
define <16 x i16> @test_mask_sub_epi16_rm_256(<16 x i16> %a, <16 x i16>* %ptr_b) {
; CHECK-LABEL: test_mask_sub_epi16_rm_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsubw (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf9,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x i16>, <16 x i16>* %ptr_b
@@ -1058,7 +1058,7 @@ define <16 x i16> @test_mask_sub_epi16_rm_256(<16 x i16> %a, <16 x i16>* %ptr_b)
define <16 x i16> @test_mask_sub_epi16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_sub_epi16_rmk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsubw (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xf9,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -1070,7 +1070,7 @@ define <16 x i16> @test_mask_sub_epi16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_b
define <16 x i16> @test_mask_sub_epi16_rmkz_256(<16 x i16> %a, <16 x i16>* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_sub_epi16_rmkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsubw (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xf9,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1083,7 +1083,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.psub.w.256(<16 x i16>, <16 x i16>, <16
define <32 x i16> @test_mask_add_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) {
; CHECK-LABEL: test_mask_add_epi16_rr_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddw %zmm1, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7d,0x48,0xfd,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <32 x i16> @llvm.x86.avx512.mask.padd.w.512(<32 x i16> %a, <32 x i16> %b, <32 x i16> zeroinitializer, i32 -1)
@@ -1092,7 +1092,7 @@ define <32 x i16> @test_mask_add_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) {
define <32 x i16> @test_mask_add_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <32 x i16> %passThru, i32 %mask) {
; CHECK-LABEL: test_mask_add_epi16_rrk_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpaddw %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x49,0xfd,0xd1]
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc2]
@@ -1103,7 +1103,7 @@ define <32 x i16> @test_mask_add_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <32
define <32 x i16> @test_mask_add_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i32 %mask) {
; CHECK-LABEL: test_mask_add_epi16_rrkz_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpaddw %zmm1, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xc9,0xfd,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1113,7 +1113,7 @@ define <32 x i16> @test_mask_add_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i3
define <32 x i16> @test_mask_add_epi16_rm_512(<32 x i16> %a, <32 x i16>* %ptr_b) {
; CHECK-LABEL: test_mask_add_epi16_rm_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddw (%rdi), %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7d,0x48,0xfd,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <32 x i16>, <32 x i16>* %ptr_b
@@ -1123,7 +1123,7 @@ define <32 x i16> @test_mask_add_epi16_rm_512(<32 x i16> %a, <32 x i16>* %ptr_b)
define <32 x i16> @test_mask_add_epi16_rmk_512(<32 x i16> %a, <32 x i16>* %ptr_b, <32 x i16> %passThru, i32 %mask) {
; CHECK-LABEL: test_mask_add_epi16_rmk_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpaddw (%rdi), %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x49,0xfd,0x0f]
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc1]
@@ -1135,7 +1135,7 @@ define <32 x i16> @test_mask_add_epi16_rmk_512(<32 x i16> %a, <32 x i16>* %ptr_b
define <32 x i16> @test_mask_add_epi16_rmkz_512(<32 x i16> %a, <32 x i16>* %ptr_b, i32 %mask) {
; CHECK-LABEL: test_mask_add_epi16_rmkz_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpaddw (%rdi), %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xc9,0xfd,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1148,7 +1148,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.padd.w.512(<32 x i16>, <32 x i16>, <32
define <32 x i16> @test_mask_sub_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) {
; CHECK-LABEL: test_mask_sub_epi16_rr_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsubw %zmm1, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7d,0x48,0xf9,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <32 x i16> @llvm.x86.avx512.mask.psub.w.512(<32 x i16> %a, <32 x i16> %b, <32 x i16> zeroinitializer, i32 -1)
@@ -1157,7 +1157,7 @@ define <32 x i16> @test_mask_sub_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) {
define <32 x i16> @test_mask_sub_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <32 x i16> %passThru, i32 %mask) {
; CHECK-LABEL: test_mask_sub_epi16_rrk_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsubw %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x49,0xf9,0xd1]
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc2]
@@ -1168,7 +1168,7 @@ define <32 x i16> @test_mask_sub_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <32
define <32 x i16> @test_mask_sub_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i32 %mask) {
; CHECK-LABEL: test_mask_sub_epi16_rrkz_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsubw %zmm1, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xc9,0xf9,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1178,7 +1178,7 @@ define <32 x i16> @test_mask_sub_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i3
define <32 x i16> @test_mask_sub_epi16_rm_512(<32 x i16> %a, <32 x i16>* %ptr_b) {
; CHECK-LABEL: test_mask_sub_epi16_rm_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsubw (%rdi), %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7d,0x48,0xf9,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <32 x i16>, <32 x i16>* %ptr_b
@@ -1188,7 +1188,7 @@ define <32 x i16> @test_mask_sub_epi16_rm_512(<32 x i16> %a, <32 x i16>* %ptr_b)
define <32 x i16> @test_mask_sub_epi16_rmk_512(<32 x i16> %a, <32 x i16>* %ptr_b, <32 x i16> %passThru, i32 %mask) {
; CHECK-LABEL: test_mask_sub_epi16_rmk_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsubw (%rdi), %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x49,0xf9,0x0f]
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc1]
@@ -1200,7 +1200,7 @@ define <32 x i16> @test_mask_sub_epi16_rmk_512(<32 x i16> %a, <32 x i16>* %ptr_b
define <32 x i16> @test_mask_sub_epi16_rmkz_512(<32 x i16> %a, <32 x i16>* %ptr_b, i32 %mask) {
; CHECK-LABEL: test_mask_sub_epi16_rmkz_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsubw (%rdi), %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xc9,0xf9,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1213,7 +1213,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.psub.w.512(<32 x i16>, <32 x i16>, <32
define <32 x i16> @test_mask_mullo_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) {
; CHECK-LABEL: test_mask_mullo_epi16_rr_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmullw %zmm1, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7d,0x48,0xd5,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <32 x i16> @llvm.x86.avx512.mask.pmull.w.512(<32 x i16> %a, <32 x i16> %b, <32 x i16> zeroinitializer, i32 -1)
@@ -1222,7 +1222,7 @@ define <32 x i16> @test_mask_mullo_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) {
define <32 x i16> @test_mask_mullo_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <32 x i16> %passThru, i32 %mask) {
; CHECK-LABEL: test_mask_mullo_epi16_rrk_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmullw %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x49,0xd5,0xd1]
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc2]
@@ -1233,7 +1233,7 @@ define <32 x i16> @test_mask_mullo_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <
define <32 x i16> @test_mask_mullo_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i32 %mask) {
; CHECK-LABEL: test_mask_mullo_epi16_rrkz_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmullw %zmm1, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xc9,0xd5,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1243,7 +1243,7 @@ define <32 x i16> @test_mask_mullo_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b,
define <32 x i16> @test_mask_mullo_epi16_rm_512(<32 x i16> %a, <32 x i16>* %ptr_b) {
; CHECK-LABEL: test_mask_mullo_epi16_rm_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmullw (%rdi), %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7d,0x48,0xd5,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <32 x i16>, <32 x i16>* %ptr_b
@@ -1253,7 +1253,7 @@ define <32 x i16> @test_mask_mullo_epi16_rm_512(<32 x i16> %a, <32 x i16>* %ptr_
define <32 x i16> @test_mask_mullo_epi16_rmk_512(<32 x i16> %a, <32 x i16>* %ptr_b, <32 x i16> %passThru, i32 %mask) {
; CHECK-LABEL: test_mask_mullo_epi16_rmk_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpmullw (%rdi), %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x49,0xd5,0x0f]
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc1]
@@ -1265,7 +1265,7 @@ define <32 x i16> @test_mask_mullo_epi16_rmk_512(<32 x i16> %a, <32 x i16>* %ptr
define <32 x i16> @test_mask_mullo_epi16_rmkz_512(<32 x i16> %a, <32 x i16>* %ptr_b, i32 %mask) {
; CHECK-LABEL: test_mask_mullo_epi16_rmkz_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpmullw (%rdi), %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xc9,0xd5,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1278,7 +1278,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.pmull.w.512(<32 x i16>, <32 x i16>, <32
define <8 x i16> @test_mask_mullo_epi16_rr_128(<8 x i16> %a, <8 x i16> %b) {
; CHECK-LABEL: test_mask_mullo_epi16_rr_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmullw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd5,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.avx512.mask.pmull.w.128(<8 x i16> %a, <8 x i16> %b, <8 x i16> zeroinitializer, i8 -1)
@@ -1287,7 +1287,7 @@ define <8 x i16> @test_mask_mullo_epi16_rr_128(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @test_mask_mullo_epi16_rrk_128(<8 x i16> %a, <8 x i16> %b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_mullo_epi16_rrk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmullw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xd5,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
@@ -1298,7 +1298,7 @@ define <8 x i16> @test_mask_mullo_epi16_rrk_128(<8 x i16> %a, <8 x i16> %b, <8 x
define <8 x i16> @test_mask_mullo_epi16_rrkz_128(<8 x i16> %a, <8 x i16> %b, i8 %mask) {
; CHECK-LABEL: test_mask_mullo_epi16_rrkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmullw %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xd5,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1308,7 +1308,7 @@ define <8 x i16> @test_mask_mullo_epi16_rrkz_128(<8 x i16> %a, <8 x i16> %b, i8
define <8 x i16> @test_mask_mullo_epi16_rm_128(<8 x i16> %a, <8 x i16>* %ptr_b) {
; CHECK-LABEL: test_mask_mullo_epi16_rm_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmullw (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd5,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i16>, <8 x i16>* %ptr_b
@@ -1318,7 +1318,7 @@ define <8 x i16> @test_mask_mullo_epi16_rm_128(<8 x i16> %a, <8 x i16>* %ptr_b)
define <8 x i16> @test_mask_mullo_epi16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_mullo_epi16_rmk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpmullw (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xd5,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
@@ -1330,7 +1330,7 @@ define <8 x i16> @test_mask_mullo_epi16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b,
define <8 x i16> @test_mask_mullo_epi16_rmkz_128(<8 x i16> %a, <8 x i16>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_mullo_epi16_rmkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpmullw (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xd5,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1343,7 +1343,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.pmull.w.128(<8 x i16>, <8 x i16>, <8 x i
define <16 x i16> @test_mask_mullo_epi16_rr_256(<16 x i16> %a, <16 x i16> %b) {
; CHECK-LABEL: test_mask_mullo_epi16_rr_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmullw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd5,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx512.mask.pmull.w.256(<16 x i16> %a, <16 x i16> %b, <16 x i16> zeroinitializer, i16 -1)
@@ -1352,7 +1352,7 @@ define <16 x i16> @test_mask_mullo_epi16_rr_256(<16 x i16> %a, <16 x i16> %b) {
define <16 x i16> @test_mask_mullo_epi16_rrk_256(<16 x i16> %a, <16 x i16> %b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_mullo_epi16_rrk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmullw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xd5,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
@@ -1363,7 +1363,7 @@ define <16 x i16> @test_mask_mullo_epi16_rrk_256(<16 x i16> %a, <16 x i16> %b, <
define <16 x i16> @test_mask_mullo_epi16_rrkz_256(<16 x i16> %a, <16 x i16> %b, i16 %mask) {
; CHECK-LABEL: test_mask_mullo_epi16_rrkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmullw %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xd5,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1373,7 +1373,7 @@ define <16 x i16> @test_mask_mullo_epi16_rrkz_256(<16 x i16> %a, <16 x i16> %b,
define <16 x i16> @test_mask_mullo_epi16_rm_256(<16 x i16> %a, <16 x i16>* %ptr_b) {
; CHECK-LABEL: test_mask_mullo_epi16_rm_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmullw (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd5,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x i16>, <16 x i16>* %ptr_b
@@ -1383,7 +1383,7 @@ define <16 x i16> @test_mask_mullo_epi16_rm_256(<16 x i16> %a, <16 x i16>* %ptr_
define <16 x i16> @test_mask_mullo_epi16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_mullo_epi16_rmk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpmullw (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xd5,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -1395,7 +1395,7 @@ define <16 x i16> @test_mask_mullo_epi16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr
define <16 x i16> @test_mask_mullo_epi16_rmkz_256(<16 x i16> %a, <16 x i16>* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_mullo_epi16_rmkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpmullw (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xd5,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1410,7 +1410,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmaxs.b.128(<16 x i8>, <16 x i8>, <16 x
define <16 x i8>@test_int_x86_avx512_mask_pmaxs_b_128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmaxs_b_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmaxsb %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x3c,0xd1]
; CHECK-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x3c,0xc1]
@@ -1426,7 +1426,7 @@ declare <32 x i8> @llvm.x86.avx512.mask.pmaxs.b.256(<32 x i8>, <32 x i8>, <32 x
define <32 x i8>@test_int_x86_avx512_mask_pmaxs_b_256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2, i32 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmaxs_b_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmaxsb %ymm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3c,0xd9]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmaxsb %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x3c,0xd1]
@@ -1442,7 +1442,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.pmaxs.w.128(<8 x i16>, <8 x i16>, <8 x i
define <8 x i16>@test_int_x86_avx512_mask_pmaxs_w_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmaxs_w_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmaxsw %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xee,0xd9]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmaxsw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xee,0xd1]
@@ -1458,7 +1458,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.pmaxs.w.256(<16 x i16>, <16 x i16>, <16
define <16 x i16>@test_int_x86_avx512_mask_pmaxs_w_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmaxs_w_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmaxsw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xee,0xd1]
; CHECK-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xee,0xc1]
@@ -1474,7 +1474,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmaxu.b.128(<16 x i8>, <16 x i8>, <16 x
define <16 x i8>@test_int_x86_avx512_mask_pmaxu_b_128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2,i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmaxu_b_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmaxub %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xde,0xd1]
; CHECK-NEXT: vpmaxub %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xde,0xc1]
@@ -1490,7 +1490,7 @@ declare <32 x i8> @llvm.x86.avx512.mask.pmaxu.b.256(<32 x i8>, <32 x i8>, <32 x
define <32 x i8>@test_int_x86_avx512_mask_pmaxu_b_256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2, i32 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmaxu_b_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmaxub %ymm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xde,0xd9]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmaxub %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xde,0xd1]
@@ -1506,7 +1506,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.pmaxu.w.128(<8 x i16>, <8 x i16>, <8 x i
define <8 x i16>@test_int_x86_avx512_mask_pmaxu_w_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmaxu_w_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmaxuw %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3e,0xd9]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmaxuw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x3e,0xd1]
@@ -1522,7 +1522,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.pmaxu.w.256(<16 x i16>, <16 x i16>, <16
define <16 x i16>@test_int_x86_avx512_mask_pmaxu_w_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmaxu_w_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmaxuw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x3e,0xd1]
; CHECK-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x3e,0xc1]
@@ -1538,7 +1538,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmins.b.128(<16 x i8>, <16 x i8>, <16 x
define <16 x i8>@test_int_x86_avx512_mask_pmins_b_128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmins_b_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpminsb %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x38,0xd1]
; CHECK-NEXT: vpminsb %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x38,0xc1]
@@ -1554,7 +1554,7 @@ declare <32 x i8> @llvm.x86.avx512.mask.pmins.b.256(<32 x i8>, <32 x i8>, <32 x
define <32 x i8>@test_int_x86_avx512_mask_pmins_b_256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2, i32 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmins_b_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpminsb %ymm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x38,0xd9]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpminsb %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x38,0xd1]
@@ -1570,7 +1570,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.pmins.w.128(<8 x i16>, <8 x i16>, <8 x i
define <8 x i16>@test_int_x86_avx512_mask_pmins_w_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmins_w_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpminsw %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xea,0xd9]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpminsw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xea,0xd1]
@@ -1586,7 +1586,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.pmins.w.256(<16 x i16>, <16 x i16>, <16
define <16 x i16>@test_int_x86_avx512_mask_pmins_w_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmins_w_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpminsw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xea,0xd1]
; CHECK-NEXT: vpminsw %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xea,0xc1]
@@ -1602,7 +1602,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.pminu.b.128(<16 x i8>, <16 x i8>, <16 x
define <16 x i8>@test_int_x86_avx512_mask_pminu_b_128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_pminu_b_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpminub %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xda,0xd1]
; CHECK-NEXT: vpminub %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xda,0xc1]
@@ -1618,7 +1618,7 @@ declare <32 x i8> @llvm.x86.avx512.mask.pminu.b.256(<32 x i8>, <32 x i8>, <32 x
define <32 x i8>@test_int_x86_avx512_mask_pminu_b_256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2, i32 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pminu_b_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpminub %ymm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xda,0xd9]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpminub %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xda,0xd1]
@@ -1634,7 +1634,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.pminu.w.128(<8 x i16>, <8 x i16>, <8 x i
define <8 x i16>@test_int_x86_avx512_mask_pminu_w_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pminu_w_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpminuw %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3a,0xd9]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpminuw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x3a,0xd1]
@@ -1650,7 +1650,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.pminu.w.256(<16 x i16>, <16 x i16>, <16
define <16 x i16>@test_int_x86_avx512_mask_pminu_w_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_pminu_w_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpminuw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x3a,0xd1]
; CHECK-NEXT: vpminuw %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x3a,0xc1]
@@ -1666,7 +1666,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.psrl.w.128(<8 x i16>, <8 x i16>, <8 x i1
define <8 x i16>@test_int_x86_avx512_mask_psrl_w_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psrl_w_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsrlw %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd1,0xd9]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsrlw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xd1,0xd1]
@@ -1686,7 +1686,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.psrl.w.256(<16 x i16>, <8 x i16>, <16 x
define <16 x i16>@test_int_x86_avx512_mask_psrl_w_256(<16 x i16> %x0, <8 x i16> %x1, <16 x i16> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psrl_w_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsrlw %xmm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd1,0xd9]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsrlw %xmm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xd1,0xd1]
@@ -1706,7 +1706,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.psra.w.128(<8 x i16>, <8 x i16>, <8 x i1
define <8 x i16>@test_int_x86_avx512_mask_psra_w_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psra_w_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsraw %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe1,0xd9]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsraw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xe1,0xd1]
@@ -1726,7 +1726,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.psra.w.256(<16 x i16>, <8 x i16>, <16 x
define <16 x i16>@test_int_x86_avx512_mask_psra_w_256(<16 x i16> %x0, <8 x i16> %x1, <16 x i16> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psra_w_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsraw %xmm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe1,0xd9]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsraw %xmm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xe1,0xd1]
@@ -1746,7 +1746,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.psll.w.128(<8 x i16>, <8 x i16>, <8 x i1
define <8 x i16>@test_int_x86_avx512_mask_psll_w_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psll_w_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsllw %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf1,0xd9]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsllw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xf1,0xd1]
@@ -1766,7 +1766,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.psll.w.256(<16 x i16>, <8 x i16>, <16 x
define <16 x i16>@test_int_x86_avx512_mask_psll_w_256(<16 x i16> %x0, <8 x i16> %x1, <16 x i16> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psll_w_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsllw %xmm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf1,0xd9]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsllw %xmm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xf1,0xd1]
@@ -1786,7 +1786,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.psrl.wi.128(<8 x i16>, i32, <8 x i16>, i
define <8 x i16>@test_int_x86_avx512_mask_psrl_wi_128(<8 x i16> %x0, i32 %x1, <8 x i16> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psrl_wi_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsrlw $3, %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0x71,0xd0,0x03]
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsrlw $3, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x09,0x71,0xd0,0x03]
@@ -1806,7 +1806,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.psrl.wi.256(<16 x i16>, i32, <16 x i16>
define <16 x i16>@test_int_x86_avx512_mask_psrl_wi_256(<16 x i16> %x0, i32 %x1, <16 x i16> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psrl_wi_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsrlw $3, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x71,0xd0,0x03]
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsrlw $3, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x29,0x71,0xd0,0x03]
@@ -1826,7 +1826,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.psra.wi.128(<8 x i16>, i32, <8 x i16>, i
define <8 x i16>@test_int_x86_avx512_mask_psra_wi_128(<8 x i16> %x0, i32 %x1, <8 x i16> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psra_wi_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsraw $3, %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0x71,0xe0,0x03]
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsraw $3, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x09,0x71,0xe0,0x03]
@@ -1846,7 +1846,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.psra.wi.256(<16 x i16>, i32, <16 x i16>
define <16 x i16>@test_int_x86_avx512_mask_psra_wi_256(<16 x i16> %x0, i32 %x1, <16 x i16> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psra_wi_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsraw $3, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x71,0xe0,0x03]
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsraw $3, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x29,0x71,0xe0,0x03]
@@ -1866,7 +1866,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.psll.wi.128(<8 x i16>, i32, <8 x i16>, i
define <8 x i16>@test_int_x86_avx512_mask_psll_wi_128(<8 x i16> %x0, i32 %x1, <8 x i16> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psll_wi_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsllw $3, %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0x71,0xf0,0x03]
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsllw $3, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x09,0x71,0xf0,0x03]
@@ -1886,7 +1886,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.psll.wi.256(<16 x i16>, i32, <16 x i16>
define <16 x i16>@test_int_x86_avx512_mask_psll_wi_256(<16 x i16> %x0, i32 %x1, <16 x i16> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psll_wi_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsllw $3, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x71,0xf0,0x03]
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsllw $3, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x29,0x71,0xf0,0x03]
@@ -1906,7 +1906,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.pshuf.b.128(<16 x i8>, <16 x i8>, <16 x
define <16 x i8>@test_int_x86_avx512_mask_pshuf_b_128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pshuf_b_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpshufb %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x00,0xd9]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpshufb %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x00,0xd1]
@@ -1922,7 +1922,7 @@ declare <32 x i8> @llvm.x86.avx512.mask.pshuf.b.256(<32 x i8>, <32 x i8>, <32 x
define <32 x i8>@test_int_x86_avx512_mask_pshuf_b_256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2, i32 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pshuf_b_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpshufb %ymm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x00,0xd9]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpshufb %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x00,0xd1]
@@ -1938,7 +1938,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.pmovzxb.w.128(<16 x i8>, <8 x i16>, i8)
define <8 x i16>@test_int_x86_avx512_mask_pmovzxb_w_128(<16 x i8> %x0, <8 x i16> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovzxb_w_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovzxbw %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x30,0xd0]
; CHECK-NEXT: ## xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
@@ -1961,7 +1961,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.pmovzxb.w.256(<16 x i8>, <16 x i16>, i1
define <16 x i16>@test_int_x86_avx512_mask_pmovzxb_w_256(<16 x i8> %x0, <16 x i16> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovzxb_w_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovzxbw %xmm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x30,0xd0]
; CHECK-NEXT: ## ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
@@ -1985,7 +1985,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.pmovsxb.w.128(<16 x i8>, <8 x i16>, i8)
define <8 x i16>@test_int_x86_avx512_mask_pmovsxb_w_128(<16 x i8> %x0, <8 x i16> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovsxb_w_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovsxbw %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x20,0xd0]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmovsxbw %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x20,0xc8]
@@ -2005,7 +2005,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.pmovsxb.w.256(<16 x i8>, <16 x i16>, i1
define <16 x i16>@test_int_x86_avx512_mask_pmovsxb_w_256(<16 x i8> %x0, <16 x i16> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovsxb_w_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovsxbw %xmm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x20,0xd0]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmovsxbw %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x20,0xc8]
@@ -2025,7 +2025,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.pmovsxd.q.128(<4 x i32>, <2 x i64>, i8)
define <2 x i64>@test_int_x86_avx512_mask_pmovsxd_q_128(<4 x i32> %x0, <2 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovsxd_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovsxdq %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x25,0xd0]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmovsxdq %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x25,0xc8]
@@ -2045,7 +2045,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.pmovsxd.q.256(<4 x i32>, <4 x i64>, i8)
define <4 x i64>@test_int_x86_avx512_mask_pmovsxd_q_256(<4 x i32> %x0, <4 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovsxd_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovsxdq %xmm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x25,0xd0]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmovsxdq %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x25,0xc8]
@@ -2066,7 +2066,7 @@ declare <16 x i8> @llvm.x86.avx512.cvtmask2b.128(i16)
define <16 x i8>@test_int_x86_avx512_cvtmask2b_128(i16 %x0) {
; CHECK-LABEL: test_int_x86_avx512_cvtmask2b_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k0 ## encoding: [0xc5,0xfb,0x92,0xc7]
; CHECK-NEXT: vpmovm2b %k0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x08,0x28,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2078,7 +2078,7 @@ declare <32 x i8> @llvm.x86.avx512.cvtmask2b.256(i32)
define <32 x i8>@test_int_x86_avx512_cvtmask2b_256(i32 %x0) {
; CHECK-LABEL: test_int_x86_avx512_cvtmask2b_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k0 ## encoding: [0xc5,0xfb,0x92,0xc7]
; CHECK-NEXT: vpmovm2b %k0, %ymm0 ## encoding: [0x62,0xf2,0x7e,0x28,0x28,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2090,7 +2090,7 @@ declare <8 x i16> @llvm.x86.avx512.cvtmask2w.128(i8)
define <8 x i16>@test_int_x86_avx512_cvtmask2w_128(i8 %x0) {
; CHECK-LABEL: test_int_x86_avx512_cvtmask2w_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k0 ## encoding: [0xc5,0xfb,0x92,0xc7]
; CHECK-NEXT: vpmovm2w %k0, %xmm0 ## encoding: [0x62,0xf2,0xfe,0x08,0x28,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2102,7 +2102,7 @@ declare <16 x i16> @llvm.x86.avx512.cvtmask2w.256(i16)
define <16 x i16>@test_int_x86_avx512_cvtmask2w_256(i16 %x0) {
; CHECK-LABEL: test_int_x86_avx512_cvtmask2w_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k0 ## encoding: [0xc5,0xfb,0x92,0xc7]
; CHECK-NEXT: vpmovm2w %k0, %ymm0 ## encoding: [0x62,0xf2,0xfe,0x28,0x28,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2111,7 +2111,7 @@ define <16 x i16>@test_int_x86_avx512_cvtmask2w_256(i16 %x0) {
}
define <8 x i16> @test_mask_packs_epi32_rr_128(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test_mask_packs_epi32_rr_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6b,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.avx512.mask.packssdw.128(<4 x i32> %a, <4 x i32> %b, <8 x i16> zeroinitializer, i8 -1)
@@ -2120,7 +2120,7 @@ define <8 x i16> @test_mask_packs_epi32_rr_128(<4 x i32> %a, <4 x i32> %b) {
define <8 x i16> @test_mask_packs_epi32_rrk_128(<4 x i32> %a, <4 x i32> %b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_packs_epi32_rrk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpackssdw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x6b,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
@@ -2131,7 +2131,7 @@ define <8 x i16> @test_mask_packs_epi32_rrk_128(<4 x i32> %a, <4 x i32> %b, <8 x
define <8 x i16> @test_mask_packs_epi32_rrkz_128(<4 x i32> %a, <4 x i32> %b, i8 %mask) {
; CHECK-LABEL: test_mask_packs_epi32_rrkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0x6b,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2141,7 +2141,7 @@ define <8 x i16> @test_mask_packs_epi32_rrkz_128(<4 x i32> %a, <4 x i32> %b, i8
define <8 x i16> @test_mask_packs_epi32_rm_128(<4 x i32> %a, <4 x i32>* %ptr_b) {
; CHECK-LABEL: test_mask_packs_epi32_rm_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpackssdw (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <4 x i32>, <4 x i32>* %ptr_b
@@ -2151,7 +2151,7 @@ define <8 x i16> @test_mask_packs_epi32_rm_128(<4 x i32> %a, <4 x i32>* %ptr_b)
define <8 x i16> @test_mask_packs_epi32_rmk_128(<4 x i32> %a, <4 x i32>* %ptr_b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_packs_epi32_rmk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackssdw (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x6b,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
@@ -2163,7 +2163,7 @@ define <8 x i16> @test_mask_packs_epi32_rmk_128(<4 x i32> %a, <4 x i32>* %ptr_b,
define <8 x i16> @test_mask_packs_epi32_rmkz_128(<4 x i32> %a, <4 x i32>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_packs_epi32_rmkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackssdw (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0x6b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2174,7 +2174,7 @@ define <8 x i16> @test_mask_packs_epi32_rmkz_128(<4 x i32> %a, <4 x i32>* %ptr_b
define <8 x i16> @test_mask_packs_epi32_rmb_128(<4 x i32> %a, i32* %ptr_b) {
; CHECK-LABEL: test_mask_packs_epi32_rmb_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpackssdw (%rdi){1to4}, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7d,0x18,0x6b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load i32, i32* %ptr_b
@@ -2186,7 +2186,7 @@ define <8 x i16> @test_mask_packs_epi32_rmb_128(<4 x i32> %a, i32* %ptr_b) {
define <8 x i16> @test_mask_packs_epi32_rmbk_128(<4 x i32> %a, i32* %ptr_b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_packs_epi32_rmbk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackssdw (%rdi){1to4}, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x19,0x6b,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
@@ -2200,7 +2200,7 @@ define <8 x i16> @test_mask_packs_epi32_rmbk_128(<4 x i32> %a, i32* %ptr_b, <8 x
define <8 x i16> @test_mask_packs_epi32_rmbkz_128(<4 x i32> %a, i32* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_packs_epi32_rmbkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackssdw (%rdi){1to4}, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x99,0x6b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2215,7 +2215,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.packssdw.128(<4 x i32>, <4 x i32>, <8 x
define <16 x i16> @test_mask_packs_epi32_rr_256(<8 x i32> %a, <8 x i32> %b) {
; CHECK-LABEL: test_mask_packs_epi32_rr_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6b,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx512.mask.packssdw.256(<8 x i32> %a, <8 x i32> %b, <16 x i16> zeroinitializer, i16 -1)
@@ -2224,7 +2224,7 @@ define <16 x i16> @test_mask_packs_epi32_rr_256(<8 x i32> %a, <8 x i32> %b) {
define <16 x i16> @test_mask_packs_epi32_rrk_256(<8 x i32> %a, <8 x i32> %b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_packs_epi32_rrk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpackssdw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x6b,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
@@ -2235,7 +2235,7 @@ define <16 x i16> @test_mask_packs_epi32_rrk_256(<8 x i32> %a, <8 x i32> %b, <16
define <16 x i16> @test_mask_packs_epi32_rrkz_256(<8 x i32> %a, <8 x i32> %b, i16 %mask) {
; CHECK-LABEL: test_mask_packs_epi32_rrkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0x6b,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2245,7 +2245,7 @@ define <16 x i16> @test_mask_packs_epi32_rrkz_256(<8 x i32> %a, <8 x i32> %b, i1
define <16 x i16> @test_mask_packs_epi32_rm_256(<8 x i32> %a, <8 x i32>* %ptr_b) {
; CHECK-LABEL: test_mask_packs_epi32_rm_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpackssdw (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i32>, <8 x i32>* %ptr_b
@@ -2255,7 +2255,7 @@ define <16 x i16> @test_mask_packs_epi32_rm_256(<8 x i32> %a, <8 x i32>* %ptr_b)
define <16 x i16> @test_mask_packs_epi32_rmk_256(<8 x i32> %a, <8 x i32>* %ptr_b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_packs_epi32_rmk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackssdw (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x6b,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -2267,7 +2267,7 @@ define <16 x i16> @test_mask_packs_epi32_rmk_256(<8 x i32> %a, <8 x i32>* %ptr_b
define <16 x i16> @test_mask_packs_epi32_rmkz_256(<8 x i32> %a, <8 x i32>* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_packs_epi32_rmkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackssdw (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0x6b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2278,7 +2278,7 @@ define <16 x i16> @test_mask_packs_epi32_rmkz_256(<8 x i32> %a, <8 x i32>* %ptr_
define <16 x i16> @test_mask_packs_epi32_rmb_256(<8 x i32> %a, i32* %ptr_b) {
; CHECK-LABEL: test_mask_packs_epi32_rmb_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpackssdw (%rdi){1to8}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7d,0x38,0x6b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load i32, i32* %ptr_b
@@ -2290,7 +2290,7 @@ define <16 x i16> @test_mask_packs_epi32_rmb_256(<8 x i32> %a, i32* %ptr_b) {
define <16 x i16> @test_mask_packs_epi32_rmbk_256(<8 x i32> %a, i32* %ptr_b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_packs_epi32_rmbk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackssdw (%rdi){1to8}, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x39,0x6b,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -2304,7 +2304,7 @@ define <16 x i16> @test_mask_packs_epi32_rmbk_256(<8 x i32> %a, i32* %ptr_b, <16
define <16 x i16> @test_mask_packs_epi32_rmbkz_256(<8 x i32> %a, i32* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_packs_epi32_rmbkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackssdw (%rdi){1to8}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xb9,0x6b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2319,7 +2319,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.packssdw.256(<8 x i32>, <8 x i32>, <16
define <16 x i8> @test_mask_packs_epi16_rr_128(<8 x i16> %a, <8 x i16> %b) {
; CHECK-LABEL: test_mask_packs_epi16_rr_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x63,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i8> @llvm.x86.avx512.mask.packsswb.128(<8 x i16> %a, <8 x i16> %b, <16 x i8> zeroinitializer, i16 -1)
@@ -2328,7 +2328,7 @@ define <16 x i8> @test_mask_packs_epi16_rr_128(<8 x i16> %a, <8 x i16> %b) {
define <16 x i8> @test_mask_packs_epi16_rrk_128(<8 x i16> %a, <8 x i16> %b, <16 x i8> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_packs_epi16_rrk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpacksswb %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x63,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
@@ -2339,7 +2339,7 @@ define <16 x i8> @test_mask_packs_epi16_rrk_128(<8 x i16> %a, <8 x i16> %b, <16
define <16 x i8> @test_mask_packs_epi16_rrkz_128(<8 x i16> %a, <8 x i16> %b, i16 %mask) {
; CHECK-LABEL: test_mask_packs_epi16_rrkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0x63,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2349,7 +2349,7 @@ define <16 x i8> @test_mask_packs_epi16_rrkz_128(<8 x i16> %a, <8 x i16> %b, i16
define <16 x i8> @test_mask_packs_epi16_rm_128(<8 x i16> %a, <8 x i16>* %ptr_b) {
; CHECK-LABEL: test_mask_packs_epi16_rm_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpacksswb (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x63,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i16>, <8 x i16>* %ptr_b
@@ -2359,7 +2359,7 @@ define <16 x i8> @test_mask_packs_epi16_rm_128(<8 x i16> %a, <8 x i16>* %ptr_b)
define <16 x i8> @test_mask_packs_epi16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b, <16 x i8> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_packs_epi16_rmk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpacksswb (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x63,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
@@ -2371,7 +2371,7 @@ define <16 x i8> @test_mask_packs_epi16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b,
define <16 x i8> @test_mask_packs_epi16_rmkz_128(<8 x i16> %a, <8 x i16>* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_packs_epi16_rmkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpacksswb (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0x63,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2384,7 +2384,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.packsswb.128(<8 x i16>, <8 x i16>, <16 x
define <32 x i8> @test_mask_packs_epi16_rr_256(<16 x i16> %a, <16 x i16> %b) {
; CHECK-LABEL: test_mask_packs_epi16_rr_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x63,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <32 x i8> @llvm.x86.avx512.mask.packsswb.256(<16 x i16> %a, <16 x i16> %b, <32 x i8> zeroinitializer, i32 -1)
@@ -2393,7 +2393,7 @@ define <32 x i8> @test_mask_packs_epi16_rr_256(<16 x i16> %a, <16 x i16> %b) {
define <32 x i8> @test_mask_packs_epi16_rrk_256(<16 x i16> %a, <16 x i16> %b, <32 x i8> %passThru, i32 %mask) {
; CHECK-LABEL: test_mask_packs_epi16_rrk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpacksswb %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x63,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
@@ -2404,7 +2404,7 @@ define <32 x i8> @test_mask_packs_epi16_rrk_256(<16 x i16> %a, <16 x i16> %b, <3
define <32 x i8> @test_mask_packs_epi16_rrkz_256(<16 x i16> %a, <16 x i16> %b, i32 %mask) {
; CHECK-LABEL: test_mask_packs_epi16_rrkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0x63,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2414,7 +2414,7 @@ define <32 x i8> @test_mask_packs_epi16_rrkz_256(<16 x i16> %a, <16 x i16> %b, i
define <32 x i8> @test_mask_packs_epi16_rm_256(<16 x i16> %a, <16 x i16>* %ptr_b) {
; CHECK-LABEL: test_mask_packs_epi16_rm_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpacksswb (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x63,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x i16>, <16 x i16>* %ptr_b
@@ -2424,7 +2424,7 @@ define <32 x i8> @test_mask_packs_epi16_rm_256(<16 x i16> %a, <16 x i16>* %ptr_b
define <32 x i8> @test_mask_packs_epi16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_b, <32 x i8> %passThru, i32 %mask) {
; CHECK-LABEL: test_mask_packs_epi16_rmk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpacksswb (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x63,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -2436,7 +2436,7 @@ define <32 x i8> @test_mask_packs_epi16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_
define <32 x i8> @test_mask_packs_epi16_rmkz_256(<16 x i16> %a, <16 x i16>* %ptr_b, i32 %mask) {
; CHECK-LABEL: test_mask_packs_epi16_rmkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpacksswb (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0x63,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2450,7 +2450,7 @@ declare <32 x i8> @llvm.x86.avx512.mask.packsswb.256(<16 x i16>, <16 x i16>, <32
define <8 x i16> @test_mask_packus_epi32_rr_128(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test_mask_packus_epi32_rr_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x2b,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.avx512.mask.packusdw.128(<4 x i32> %a, <4 x i32> %b, <8 x i16> zeroinitializer, i8 -1)
@@ -2459,7 +2459,7 @@ define <8 x i16> @test_mask_packus_epi32_rr_128(<4 x i32> %a, <4 x i32> %b) {
define <8 x i16> @test_mask_packus_epi32_rrk_128(<4 x i32> %a, <4 x i32> %b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_packus_epi32_rrk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpackusdw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x2b,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
@@ -2470,7 +2470,7 @@ define <8 x i16> @test_mask_packus_epi32_rrk_128(<4 x i32> %a, <4 x i32> %b, <8
define <8 x i16> @test_mask_packus_epi32_rrkz_128(<4 x i32> %a, <4 x i32> %b, i8 %mask) {
; CHECK-LABEL: test_mask_packus_epi32_rrkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x2b,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2480,7 +2480,7 @@ define <8 x i16> @test_mask_packus_epi32_rrkz_128(<4 x i32> %a, <4 x i32> %b, i8
define <8 x i16> @test_mask_packus_epi32_rm_128(<4 x i32> %a, <4 x i32>* %ptr_b) {
; CHECK-LABEL: test_mask_packus_epi32_rm_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpackusdw (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x2b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <4 x i32>, <4 x i32>* %ptr_b
@@ -2490,7 +2490,7 @@ define <8 x i16> @test_mask_packus_epi32_rm_128(<4 x i32> %a, <4 x i32>* %ptr_b)
define <8 x i16> @test_mask_packus_epi32_rmk_128(<4 x i32> %a, <4 x i32>* %ptr_b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_packus_epi32_rmk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackusdw (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x2b,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
@@ -2502,7 +2502,7 @@ define <8 x i16> @test_mask_packus_epi32_rmk_128(<4 x i32> %a, <4 x i32>* %ptr_b
define <8 x i16> @test_mask_packus_epi32_rmkz_128(<4 x i32> %a, <4 x i32>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_packus_epi32_rmkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackusdw (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x2b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2513,7 +2513,7 @@ define <8 x i16> @test_mask_packus_epi32_rmkz_128(<4 x i32> %a, <4 x i32>* %ptr_
define <8 x i16> @test_mask_packus_epi32_rmb_128(<4 x i32> %a, i32* %ptr_b) {
; CHECK-LABEL: test_mask_packus_epi32_rmb_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpackusdw (%rdi){1to4}, %xmm0, %xmm0 ## encoding: [0x62,0xf2,0x7d,0x18,0x2b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load i32, i32* %ptr_b
@@ -2525,7 +2525,7 @@ define <8 x i16> @test_mask_packus_epi32_rmb_128(<4 x i32> %a, i32* %ptr_b) {
define <8 x i16> @test_mask_packus_epi32_rmbk_128(<4 x i32> %a, i32* %ptr_b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_packus_epi32_rmbk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackusdw (%rdi){1to4}, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x19,0x2b,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
@@ -2539,7 +2539,7 @@ define <8 x i16> @test_mask_packus_epi32_rmbk_128(<4 x i32> %a, i32* %ptr_b, <8
define <8 x i16> @test_mask_packus_epi32_rmbkz_128(<4 x i32> %a, i32* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_packus_epi32_rmbkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackusdw (%rdi){1to4}, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x99,0x2b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2554,7 +2554,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.packusdw.128(<4 x i32>, <4 x i32>, <8 x
define <16 x i16> @test_mask_packus_epi32_rr_256(<8 x i32> %a, <8 x i32> %b) {
; CHECK-LABEL: test_mask_packus_epi32_rr_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpackusdw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x2b,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx512.mask.packusdw.256(<8 x i32> %a, <8 x i32> %b, <16 x i16> zeroinitializer, i16 -1)
@@ -2563,7 +2563,7 @@ define <16 x i16> @test_mask_packus_epi32_rr_256(<8 x i32> %a, <8 x i32> %b) {
define <16 x i16> @test_mask_packus_epi32_rrk_256(<8 x i32> %a, <8 x i32> %b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_packus_epi32_rrk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpackusdw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x2b,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
@@ -2574,7 +2574,7 @@ define <16 x i16> @test_mask_packus_epi32_rrk_256(<8 x i32> %a, <8 x i32> %b, <1
define <16 x i16> @test_mask_packus_epi32_rrkz_256(<8 x i32> %a, <8 x i32> %b, i16 %mask) {
; CHECK-LABEL: test_mask_packus_epi32_rrkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpackusdw %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x2b,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2584,7 +2584,7 @@ define <16 x i16> @test_mask_packus_epi32_rrkz_256(<8 x i32> %a, <8 x i32> %b, i
define <16 x i16> @test_mask_packus_epi32_rm_256(<8 x i32> %a, <8 x i32>* %ptr_b) {
; CHECK-LABEL: test_mask_packus_epi32_rm_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpackusdw (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x2b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i32>, <8 x i32>* %ptr_b
@@ -2594,7 +2594,7 @@ define <16 x i16> @test_mask_packus_epi32_rm_256(<8 x i32> %a, <8 x i32>* %ptr_b
define <16 x i16> @test_mask_packus_epi32_rmk_256(<8 x i32> %a, <8 x i32>* %ptr_b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_packus_epi32_rmk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackusdw (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x2b,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -2606,7 +2606,7 @@ define <16 x i16> @test_mask_packus_epi32_rmk_256(<8 x i32> %a, <8 x i32>* %ptr_
define <16 x i16> @test_mask_packus_epi32_rmkz_256(<8 x i32> %a, <8 x i32>* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_packus_epi32_rmkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackusdw (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x2b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2617,7 +2617,7 @@ define <16 x i16> @test_mask_packus_epi32_rmkz_256(<8 x i32> %a, <8 x i32>* %ptr
define <16 x i16> @test_mask_packus_epi32_rmb_256(<8 x i32> %a, i32* %ptr_b) {
; CHECK-LABEL: test_mask_packus_epi32_rmb_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpackusdw (%rdi){1to8}, %ymm0, %ymm0 ## encoding: [0x62,0xf2,0x7d,0x38,0x2b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load i32, i32* %ptr_b
@@ -2629,7 +2629,7 @@ define <16 x i16> @test_mask_packus_epi32_rmb_256(<8 x i32> %a, i32* %ptr_b) {
define <16 x i16> @test_mask_packus_epi32_rmbk_256(<8 x i32> %a, i32* %ptr_b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_packus_epi32_rmbk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackusdw (%rdi){1to8}, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x39,0x2b,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -2643,7 +2643,7 @@ define <16 x i16> @test_mask_packus_epi32_rmbk_256(<8 x i32> %a, i32* %ptr_b, <1
define <16 x i16> @test_mask_packus_epi32_rmbkz_256(<8 x i32> %a, i32* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_packus_epi32_rmbkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackusdw (%rdi){1to8}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xb9,0x2b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2658,7 +2658,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.packusdw.256(<8 x i32>, <8 x i32>, <16
define <16 x i8> @test_mask_packus_epi16_rr_128(<8 x i16> %a, <8 x i16> %b) {
; CHECK-LABEL: test_mask_packus_epi16_rr_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x67,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i8> @llvm.x86.avx512.mask.packuswb.128(<8 x i16> %a, <8 x i16> %b, <16 x i8> zeroinitializer, i16 -1)
@@ -2667,7 +2667,7 @@ define <16 x i8> @test_mask_packus_epi16_rr_128(<8 x i16> %a, <8 x i16> %b) {
define <16 x i8> @test_mask_packus_epi16_rrk_128(<8 x i16> %a, <8 x i16> %b, <16 x i8> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_packus_epi16_rrk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpackuswb %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x67,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
@@ -2678,7 +2678,7 @@ define <16 x i8> @test_mask_packus_epi16_rrk_128(<8 x i16> %a, <8 x i16> %b, <16
define <16 x i8> @test_mask_packus_epi16_rrkz_128(<8 x i16> %a, <8 x i16> %b, i16 %mask) {
; CHECK-LABEL: test_mask_packus_epi16_rrkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0x67,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2688,7 +2688,7 @@ define <16 x i8> @test_mask_packus_epi16_rrkz_128(<8 x i16> %a, <8 x i16> %b, i1
define <16 x i8> @test_mask_packus_epi16_rm_128(<8 x i16> %a, <8 x i16>* %ptr_b) {
; CHECK-LABEL: test_mask_packus_epi16_rm_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpackuswb (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x67,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i16>, <8 x i16>* %ptr_b
@@ -2698,7 +2698,7 @@ define <16 x i8> @test_mask_packus_epi16_rm_128(<8 x i16> %a, <8 x i16>* %ptr_b)
define <16 x i8> @test_mask_packus_epi16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b, <16 x i8> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_packus_epi16_rmk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackuswb (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x67,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
@@ -2710,7 +2710,7 @@ define <16 x i8> @test_mask_packus_epi16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b
define <16 x i8> @test_mask_packus_epi16_rmkz_128(<8 x i16> %a, <8 x i16>* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_packus_epi16_rmkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackuswb (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0x67,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2723,7 +2723,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.packuswb.128(<8 x i16>, <8 x i16>, <16 x
define <32 x i8> @test_mask_packus_epi16_rr_256(<16 x i16> %a, <16 x i16> %b) {
; CHECK-LABEL: test_mask_packus_epi16_rr_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpackuswb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x67,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <32 x i8> @llvm.x86.avx512.mask.packuswb.256(<16 x i16> %a, <16 x i16> %b, <32 x i8> zeroinitializer, i32 -1)
@@ -2732,7 +2732,7 @@ define <32 x i8> @test_mask_packus_epi16_rr_256(<16 x i16> %a, <16 x i16> %b) {
define <32 x i8> @test_mask_packus_epi16_rrk_256(<16 x i16> %a, <16 x i16> %b, <32 x i8> %passThru, i32 %mask) {
; CHECK-LABEL: test_mask_packus_epi16_rrk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpackuswb %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x67,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
@@ -2743,7 +2743,7 @@ define <32 x i8> @test_mask_packus_epi16_rrk_256(<16 x i16> %a, <16 x i16> %b, <
define <32 x i8> @test_mask_packus_epi16_rrkz_256(<16 x i16> %a, <16 x i16> %b, i32 %mask) {
; CHECK-LABEL: test_mask_packus_epi16_rrkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpackuswb %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0x67,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2753,7 +2753,7 @@ define <32 x i8> @test_mask_packus_epi16_rrkz_256(<16 x i16> %a, <16 x i16> %b,
define <32 x i8> @test_mask_packus_epi16_rm_256(<16 x i16> %a, <16 x i16>* %ptr_b) {
; CHECK-LABEL: test_mask_packus_epi16_rm_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpackuswb (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x67,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x i16>, <16 x i16>* %ptr_b
@@ -2763,7 +2763,7 @@ define <32 x i8> @test_mask_packus_epi16_rm_256(<16 x i16> %a, <16 x i16>* %ptr_
define <32 x i8> @test_mask_packus_epi16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_b, <32 x i8> %passThru, i32 %mask) {
; CHECK-LABEL: test_mask_packus_epi16_rmk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackuswb (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x67,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -2775,7 +2775,7 @@ define <32 x i8> @test_mask_packus_epi16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr
define <32 x i8> @test_mask_packus_epi16_rmkz_256(<16 x i16> %a, <16 x i16>* %ptr_b, i32 %mask) {
; CHECK-LABEL: test_mask_packus_epi16_rmkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackuswb (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0x67,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2788,7 +2788,7 @@ declare <32 x i8> @llvm.x86.avx512.mask.packuswb.256(<16 x i16>, <16 x i16>, <32
define <8 x i32> @test_cmp_b_256(<32 x i8> %a0, <32 x i8> %a1) {
; CHECK-LABEL: test_cmp_b_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqb %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x28,0x74,0xc1]
; CHECK-NEXT: kmovd %k0, %r8d ## encoding: [0xc5,0x7b,0x93,0xc0]
; CHECK-NEXT: vpcmpgtb %ymm0, %ymm1, %k0 ## encoding: [0x62,0xf1,0x75,0x28,0x64,0xc0]
@@ -2837,7 +2837,7 @@ define <8 x i32> @test_cmp_b_256(<32 x i8> %a0, <32 x i8> %a1) {
define <8 x i32> @test_mask_cmp_b_256(<32 x i8> %a0, <32 x i8> %a1, i32 %mask) {
; CHECK-LABEL: test_mask_cmp_b_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpeqb %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x74,0xc1]
; CHECK-NEXT: kmovd %k0, %r8d ## encoding: [0xc5,0x7b,0x93,0xc0]
@@ -2886,7 +2886,7 @@ declare i32 @llvm.x86.avx512.mask.cmp.b.256(<32 x i8>, <32 x i8>, i32, i32) noun
define <8 x i32> @test_ucmp_b_256(<32 x i8> %a0, <32 x i8> %a1) {
; CHECK-LABEL: test_ucmp_b_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqb %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x28,0x74,0xc1]
; CHECK-NEXT: kmovd %k0, %r8d ## encoding: [0xc5,0x7b,0x93,0xc0]
; CHECK-NEXT: vpcmpltub %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf3,0x7d,0x28,0x3e,0xc1,0x01]
@@ -2935,7 +2935,7 @@ define <8 x i32> @test_ucmp_b_256(<32 x i8> %a0, <32 x i8> %a1) {
define <8 x i32> @test_mask_ucmp_b_256(<32 x i8> %a0, <32 x i8> %a1, i32 %mask) {
; CHECK-LABEL: test_mask_ucmp_b_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpeqb %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x74,0xc1]
; CHECK-NEXT: kmovd %k0, %r8d ## encoding: [0xc5,0x7b,0x93,0xc0]
@@ -2984,7 +2984,7 @@ declare i32 @llvm.x86.avx512.mask.ucmp.b.256(<32 x i8>, <32 x i8>, i32, i32) nou
define <8 x i16> @test_cmp_w_256(<16 x i16> %a0, <16 x i16> %a1) {
; CHECK-LABEL: test_cmp_w_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x28,0x75,0xc1]
; CHECK-NEXT: vpcmpgtw %ymm0, %ymm1, %k1 ## encoding: [0x62,0xf1,0x75,0x28,0x65,0xc8]
; CHECK-NEXT: vpcmplew %ymm1, %ymm0, %k2 ## encoding: [0x62,0xf3,0xfd,0x28,0x3f,0xd1,0x02]
@@ -3030,7 +3030,7 @@ define <8 x i16> @test_cmp_w_256(<16 x i16> %a0, <16 x i16> %a1) {
define <8 x i16> @test_mask_cmp_w_256(<16 x i16> %a0, <16 x i16> %a1, i16 %mask) {
; CHECK-LABEL: test_mask_cmp_w_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x75,0xc1]
; CHECK-NEXT: vpcmpgtw %ymm0, %ymm1, %k2 {%k1} ## encoding: [0x62,0xf1,0x75,0x29,0x65,0xd0]
@@ -3079,7 +3079,7 @@ declare i16 @llvm.x86.avx512.mask.cmp.w.256(<16 x i16>, <16 x i16>, i32, i16) no
define <8 x i16> @test_ucmp_w_256(<16 x i16> %a0, <16 x i16> %a1) {
; CHECK-LABEL: test_ucmp_w_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x28,0x75,0xc1]
; CHECK-NEXT: vpcmpltuw %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x28,0x3e,0xc9,0x01]
; CHECK-NEXT: vpcmpleuw %ymm1, %ymm0, %k2 ## encoding: [0x62,0xf3,0xfd,0x28,0x3e,0xd1,0x02]
@@ -3125,7 +3125,7 @@ define <8 x i16> @test_ucmp_w_256(<16 x i16> %a0, <16 x i16> %a1) {
define <8 x i16> @test_mask_ucmp_w_256(<16 x i16> %a0, <16 x i16> %a1, i16 %mask) {
; CHECK-LABEL: test_mask_ucmp_w_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x75,0xc1]
; CHECK-NEXT: vpcmpltuw %ymm1, %ymm0, %k2 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x3e,0xd1,0x01]
@@ -3174,7 +3174,7 @@ declare i16 @llvm.x86.avx512.mask.ucmp.w.256(<16 x i16>, <16 x i16>, i32, i16) n
define <8 x i16> @test_cmp_b_128(<16 x i8> %a0, <16 x i8> %a1) {
; CHECK-LABEL: test_cmp_b_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x74,0xc1]
; CHECK-NEXT: vpcmpgtb %xmm0, %xmm1, %k1 ## encoding: [0x62,0xf1,0x75,0x08,0x64,0xc8]
; CHECK-NEXT: vpcmpleb %xmm1, %xmm0, %k2 ## encoding: [0x62,0xf3,0x7d,0x08,0x3f,0xd1,0x02]
@@ -3219,7 +3219,7 @@ define <8 x i16> @test_cmp_b_128(<16 x i8> %a0, <16 x i8> %a1) {
define <8 x i16> @test_mask_cmp_b_128(<16 x i8> %a0, <16 x i8> %a1, i16 %mask) {
; CHECK-LABEL: test_mask_cmp_b_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x74,0xc1]
; CHECK-NEXT: vpcmpgtb %xmm0, %xmm1, %k2 {%k1} ## encoding: [0x62,0xf1,0x75,0x09,0x64,0xd0]
@@ -3267,7 +3267,7 @@ declare i16 @llvm.x86.avx512.mask.cmp.b.128(<16 x i8>, <16 x i8>, i32, i16) noun
define <8 x i16> @test_ucmp_b_128(<16 x i8> %a0, <16 x i8> %a1) {
; CHECK-LABEL: test_ucmp_b_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x74,0xc1]
; CHECK-NEXT: vpcmpltub %xmm1, %xmm0, %k1 ## encoding: [0x62,0xf3,0x7d,0x08,0x3e,0xc9,0x01]
; CHECK-NEXT: vpcmpleub %xmm1, %xmm0, %k2 ## encoding: [0x62,0xf3,0x7d,0x08,0x3e,0xd1,0x02]
@@ -3312,7 +3312,7 @@ define <8 x i16> @test_ucmp_b_128(<16 x i8> %a0, <16 x i8> %a1) {
define <8 x i16> @test_mask_ucmp_b_128(<16 x i8> %a0, <16 x i8> %a1, i16 %mask) {
; CHECK-LABEL: test_mask_ucmp_b_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x74,0xc1]
; CHECK-NEXT: vpcmpltub %xmm1, %xmm0, %k2 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x3e,0xd1,0x01]
@@ -3360,7 +3360,7 @@ declare i16 @llvm.x86.avx512.mask.ucmp.b.128(<16 x i8>, <16 x i8>, i32, i16) nou
define <8 x i8> @test_cmp_w_128(<8 x i16> %a0, <8 x i16> %a1) {
; CHECK-LABEL: test_cmp_w_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x75,0xc1]
; CHECK-NEXT: vpcmpgtw %xmm0, %xmm1, %k1 ## encoding: [0x62,0xf1,0x75,0x08,0x65,0xc8]
; CHECK-NEXT: vpcmplew %xmm1, %xmm0, %k2 ## encoding: [0x62,0xf3,0xfd,0x08,0x3f,0xd1,0x02]
@@ -3405,7 +3405,7 @@ define <8 x i8> @test_cmp_w_128(<8 x i16> %a0, <8 x i16> %a1) {
define <8 x i8> @test_mask_cmp_w_128(<8 x i16> %a0, <8 x i16> %a1, i8 %mask) {
; CHECK-LABEL: test_mask_cmp_w_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x75,0xc1]
; CHECK-NEXT: vpcmpgtw %xmm0, %xmm1, %k2 {%k1} ## encoding: [0x62,0xf1,0x75,0x09,0x65,0xd0]
@@ -3453,7 +3453,7 @@ declare i8 @llvm.x86.avx512.mask.cmp.w.128(<8 x i16>, <8 x i16>, i32, i8) nounwi
define <8 x i8> @test_ucmp_w_128(<8 x i16> %a0, <8 x i16> %a1) {
; CHECK-LABEL: test_ucmp_w_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x75,0xc1]
; CHECK-NEXT: vpcmpltuw %xmm1, %xmm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x08,0x3e,0xc9,0x01]
; CHECK-NEXT: vpcmpleuw %xmm1, %xmm0, %k2 ## encoding: [0x62,0xf3,0xfd,0x08,0x3e,0xd1,0x02]
@@ -3498,7 +3498,7 @@ define <8 x i8> @test_ucmp_w_128(<8 x i16> %a0, <8 x i16> %a1) {
define <8 x i8> @test_mask_ucmp_w_128(<8 x i16> %a0, <8 x i16> %a1, i8 %mask) {
; CHECK-LABEL: test_mask_ucmp_w_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x75,0xc1]
; CHECK-NEXT: vpcmpltuw %xmm1, %xmm0, %k2 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x3e,0xd1,0x01]
@@ -3546,7 +3546,7 @@ declare i8 @llvm.x86.avx512.mask.ucmp.w.128(<8 x i16>, <8 x i16>, i32, i8) nounw
define <16 x i8>@mm_mask_avg_epu8(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 %x3) {
; CHECK-LABEL: mm_mask_avg_epu8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpavgb %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe0,0xd9]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpavgb %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xe0,0xd1]
@@ -3562,7 +3562,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.pabs.b.128(<16 x i8>, <16 x i8>, i16)
define <16 x i8>@test_int_x86_avx512_mask_pabs_b_128(<16 x i8> %x0, <16 x i8> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pabs_b_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpabsb %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x1c,0xd0]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpabsb %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x1c,0xc8]
@@ -3578,7 +3578,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.pavg.b.128(<16 x i8>, <16 x i8>, <16 x i
define <32 x i8>@mm256_mask_avg_epu8(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2, i32 %x3) {
; CHECK-LABEL: mm256_mask_avg_epu8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpavgb %ymm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe0,0xd9]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpavgb %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xe0,0xd1]
@@ -3594,7 +3594,7 @@ declare <32 x i8> @llvm.x86.avx512.mask.pabs.b.256(<32 x i8>, <32 x i8>, i32)
define <32 x i8>@test_int_x86_avx512_mask_pabs_b_256(<32 x i8> %x0, <32 x i8> %x1, i32 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pabs_b_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpabsb %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x1c,0xd0]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpabsb %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x1c,0xc8]
@@ -3610,7 +3610,7 @@ declare <32 x i8> @llvm.x86.avx512.mask.pavg.b.256(<32 x i8>, <32 x i8>, <32 x i
define <8 x i16>@mm_mask_avg_epu16(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) {
; CHECK-LABEL: mm_mask_avg_epu16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpavgw %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe3,0xd9]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpavgw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xe3,0xd1]
@@ -3626,7 +3626,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.pabs.w.128(<8 x i16>, <8 x i16>, i8)
define <8 x i16>@test_int_x86_avx512_mask_pabs_w_128(<8 x i16> %x0, <8 x i16> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pabs_w_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpabsw %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x1d,0xd0]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpabsw %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x1d,0xc8]
@@ -3642,7 +3642,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.pavg.w.128(<8 x i16>, <8 x i16>, <8 x i1
define <16 x i16>@mm256_mask_avg_epu16(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %x3) {
; CHECK-LABEL: mm256_mask_avg_epu16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpavgw %ymm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe3,0xd9]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpavgw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xe3,0xd1]
@@ -3658,7 +3658,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.pabs.w.256(<16 x i16>, <16 x i16>, i16)
define <16 x i16>@test_int_x86_avx512_mask_pabs_w_256(<16 x i16> %x0, <16 x i16> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pabs_w_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpabsw %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x1d,0xd0]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpabsw %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x1d,0xc8]
@@ -3676,7 +3676,7 @@ declare i16 @llvm.x86.avx512.ptestm.b.128(<16 x i8>, <16 x i8>, i16)
define i16@test_int_x86_avx512_ptestm_b_128(<16 x i8> %x0, <16 x i8> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_ptestm_b_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vptestmb %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf2,0x7d,0x08,0x26,0xc1]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vptestmb %xmm1, %xmm0, %k1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x26,0xc9]
@@ -3695,7 +3695,7 @@ declare i32 @llvm.x86.avx512.ptestm.b.256(<32 x i8>, <32 x i8>, i32)
define i32@test_int_x86_avx512_ptestm_b_256(<32 x i8> %x0, <32 x i8> %x1, i32 %x2) {
; CHECK-LABEL: test_int_x86_avx512_ptestm_b_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vptestmb %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf2,0x7d,0x28,0x26,0xc1]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vptestmb %ymm1, %ymm0, %k1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x26,0xc9]
@@ -3714,7 +3714,7 @@ declare i8 @llvm.x86.avx512.ptestm.w.128(<8 x i16>, <8 x i16>, i8)
define i8@test_int_x86_avx512_ptestm_w_128(<8 x i16> %x0, <8 x i16> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_ptestm_w_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vptestmw %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x08,0x26,0xc1]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vptestmw %xmm1, %xmm0, %k1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x26,0xc9]
@@ -3733,7 +3733,7 @@ declare i16 @llvm.x86.avx512.ptestm.w.256(<16 x i16>, <16 x i16>, i16)
define i16@test_int_x86_avx512_ptestm_w_256(<16 x i16> %x0, <16 x i16> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_ptestm_w_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vptestmw %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x28,0x26,0xc1]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vptestmw %ymm1, %ymm0, %k1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x26,0xc9]
@@ -3753,7 +3753,7 @@ declare i16 @llvm.x86.avx512.ptestnm.b.128(<16 x i8>, <16 x i8>, i16)
define i16@test_int_x86_avx512_ptestnm_b_128(<16 x i8> %x0, <16 x i8> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_ptestnm_b_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vptestnmb %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf2,0x7e,0x08,0x26,0xc1]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vptestnmb %xmm1, %xmm0, %k1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x26,0xc9]
@@ -3772,7 +3772,7 @@ declare i32 @llvm.x86.avx512.ptestnm.b.256(<32 x i8>, <32 x i8>, i32)
define i32@test_int_x86_avx512_ptestnm_b_256(<32 x i8> %x0, <32 x i8> %x1, i32 %x2) {
; CHECK-LABEL: test_int_x86_avx512_ptestnm_b_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vptestnmb %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf2,0x7e,0x28,0x26,0xc1]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vptestnmb %ymm1, %ymm0, %k1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x26,0xc9]
@@ -3791,7 +3791,7 @@ declare i8 @llvm.x86.avx512.ptestnm.w.128(<8 x i16>, <8 x i16>, i8 %x2)
define i8@test_int_x86_avx512_ptestnm_w_128(<8 x i16> %x0, <8 x i16> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_ptestnm_w_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vptestnmw %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf2,0xfe,0x08,0x26,0xc1]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vptestnmw %xmm1, %xmm0, %k1 {%k1} ## encoding: [0x62,0xf2,0xfe,0x09,0x26,0xc9]
@@ -3810,7 +3810,7 @@ declare i16 @llvm.x86.avx512.ptestnm.w.256(<16 x i16>, <16 x i16>, i16 %x2)
define i16@test_int_x86_avx512_ptestnm_w_256(<16 x i16> %x0, <16 x i16> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_ptestnm_w_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vptestnmw %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf2,0xfe,0x28,0x26,0xc1]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vptestnmw %ymm1, %ymm0, %k1 {%k1} ## encoding: [0x62,0xf2,0xfe,0x29,0x26,0xc9]
diff --git a/test/CodeGen/X86/avx512bwvl-intrinsics.ll b/test/CodeGen/X86/avx512bwvl-intrinsics.ll
index 89bbe36282b..45b01da8378 100644
--- a/test/CodeGen/X86/avx512bwvl-intrinsics.ll
+++ b/test/CodeGen/X86/avx512bwvl-intrinsics.ll
@@ -3,7 +3,7 @@
define <8 x i16> @test_mask_packs_epi32_rr_128(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test_mask_packs_epi32_rr_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6b,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%1 = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %a, <4 x i32> %b)
@@ -12,7 +12,7 @@ define <8 x i16> @test_mask_packs_epi32_rr_128(<4 x i32> %a, <4 x i32> %b) {
define <8 x i16> @test_mask_packs_epi32_rrk_128(<4 x i32> %a, <4 x i32> %b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_packs_epi32_rrk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpackssdw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x6b,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
@@ -25,7 +25,7 @@ define <8 x i16> @test_mask_packs_epi32_rrk_128(<4 x i32> %a, <4 x i32> %b, <8 x
define <8 x i16> @test_mask_packs_epi32_rrkz_128(<4 x i32> %a, <4 x i32> %b, i8 %mask) {
; CHECK-LABEL: test_mask_packs_epi32_rrkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0x6b,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -37,7 +37,7 @@ define <8 x i16> @test_mask_packs_epi32_rrkz_128(<4 x i32> %a, <4 x i32> %b, i8
define <8 x i16> @test_mask_packs_epi32_rm_128(<4 x i32> %a, <4 x i32>* %ptr_b) {
; CHECK-LABEL: test_mask_packs_epi32_rm_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpackssdw (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <4 x i32>, <4 x i32>* %ptr_b
@@ -47,7 +47,7 @@ define <8 x i16> @test_mask_packs_epi32_rm_128(<4 x i32> %a, <4 x i32>* %ptr_b)
define <8 x i16> @test_mask_packs_epi32_rmk_128(<4 x i32> %a, <4 x i32>* %ptr_b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_packs_epi32_rmk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackssdw (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x6b,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
@@ -61,7 +61,7 @@ define <8 x i16> @test_mask_packs_epi32_rmk_128(<4 x i32> %a, <4 x i32>* %ptr_b,
define <8 x i16> @test_mask_packs_epi32_rmkz_128(<4 x i32> %a, <4 x i32>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_packs_epi32_rmkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackssdw (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0x6b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -74,7 +74,7 @@ define <8 x i16> @test_mask_packs_epi32_rmkz_128(<4 x i32> %a, <4 x i32>* %ptr_b
define <8 x i16> @test_mask_packs_epi32_rmb_128(<4 x i32> %a, i32* %ptr_b) {
; CHECK-LABEL: test_mask_packs_epi32_rmb_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpackssdw (%rdi){1to4}, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7d,0x18,0x6b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load i32, i32* %ptr_b
@@ -86,7 +86,7 @@ define <8 x i16> @test_mask_packs_epi32_rmb_128(<4 x i32> %a, i32* %ptr_b) {
define <8 x i16> @test_mask_packs_epi32_rmbk_128(<4 x i32> %a, i32* %ptr_b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_packs_epi32_rmbk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackssdw (%rdi){1to4}, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x19,0x6b,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
@@ -102,7 +102,7 @@ define <8 x i16> @test_mask_packs_epi32_rmbk_128(<4 x i32> %a, i32* %ptr_b, <8 x
define <8 x i16> @test_mask_packs_epi32_rmbkz_128(<4 x i32> %a, i32* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_packs_epi32_rmbkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackssdw (%rdi){1to4}, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x99,0x6b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -119,7 +119,7 @@ declare <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32>, <4 x i32>)
define <16 x i16> @test_mask_packs_epi32_rr_256(<8 x i32> %a, <8 x i32> %b) {
; CHECK-LABEL: test_mask_packs_epi32_rr_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6b,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%1 = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> %a, <8 x i32> %b)
@@ -128,7 +128,7 @@ define <16 x i16> @test_mask_packs_epi32_rr_256(<8 x i32> %a, <8 x i32> %b) {
define <16 x i16> @test_mask_packs_epi32_rrk_256(<8 x i32> %a, <8 x i32> %b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_packs_epi32_rrk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpackssdw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x6b,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
@@ -141,7 +141,7 @@ define <16 x i16> @test_mask_packs_epi32_rrk_256(<8 x i32> %a, <8 x i32> %b, <16
define <16 x i16> @test_mask_packs_epi32_rrkz_256(<8 x i32> %a, <8 x i32> %b, i16 %mask) {
; CHECK-LABEL: test_mask_packs_epi32_rrkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0x6b,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -153,7 +153,7 @@ define <16 x i16> @test_mask_packs_epi32_rrkz_256(<8 x i32> %a, <8 x i32> %b, i1
define <16 x i16> @test_mask_packs_epi32_rm_256(<8 x i32> %a, <8 x i32>* %ptr_b) {
; CHECK-LABEL: test_mask_packs_epi32_rm_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpackssdw (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i32>, <8 x i32>* %ptr_b
@@ -163,7 +163,7 @@ define <16 x i16> @test_mask_packs_epi32_rm_256(<8 x i32> %a, <8 x i32>* %ptr_b)
define <16 x i16> @test_mask_packs_epi32_rmk_256(<8 x i32> %a, <8 x i32>* %ptr_b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_packs_epi32_rmk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackssdw (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x6b,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -177,7 +177,7 @@ define <16 x i16> @test_mask_packs_epi32_rmk_256(<8 x i32> %a, <8 x i32>* %ptr_b
define <16 x i16> @test_mask_packs_epi32_rmkz_256(<8 x i32> %a, <8 x i32>* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_packs_epi32_rmkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackssdw (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0x6b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -190,7 +190,7 @@ define <16 x i16> @test_mask_packs_epi32_rmkz_256(<8 x i32> %a, <8 x i32>* %ptr_
define <16 x i16> @test_mask_packs_epi32_rmb_256(<8 x i32> %a, i32* %ptr_b) {
; CHECK-LABEL: test_mask_packs_epi32_rmb_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpackssdw (%rdi){1to8}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7d,0x38,0x6b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load i32, i32* %ptr_b
@@ -202,7 +202,7 @@ define <16 x i16> @test_mask_packs_epi32_rmb_256(<8 x i32> %a, i32* %ptr_b) {
define <16 x i16> @test_mask_packs_epi32_rmbk_256(<8 x i32> %a, i32* %ptr_b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_packs_epi32_rmbk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackssdw (%rdi){1to8}, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x39,0x6b,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -218,7 +218,7 @@ define <16 x i16> @test_mask_packs_epi32_rmbk_256(<8 x i32> %a, i32* %ptr_b, <16
define <16 x i16> @test_mask_packs_epi32_rmbkz_256(<8 x i32> %a, i32* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_packs_epi32_rmbkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackssdw (%rdi){1to8}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xb9,0x6b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -235,7 +235,7 @@ declare <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32>, <8 x i32>)
define <16 x i8> @test_mask_packs_epi16_rr_128(<8 x i16> %a, <8 x i16> %b) {
; CHECK-LABEL: test_mask_packs_epi16_rr_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x63,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%1 = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> %a, <8 x i16> %b)
@@ -244,7 +244,7 @@ define <16 x i8> @test_mask_packs_epi16_rr_128(<8 x i16> %a, <8 x i16> %b) {
define <16 x i8> @test_mask_packs_epi16_rrk_128(<8 x i16> %a, <8 x i16> %b, <16 x i8> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_packs_epi16_rrk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpacksswb %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x63,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
@@ -257,7 +257,7 @@ define <16 x i8> @test_mask_packs_epi16_rrk_128(<8 x i16> %a, <8 x i16> %b, <16
define <16 x i8> @test_mask_packs_epi16_rrkz_128(<8 x i16> %a, <8 x i16> %b, i16 %mask) {
; CHECK-LABEL: test_mask_packs_epi16_rrkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0x63,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -269,7 +269,7 @@ define <16 x i8> @test_mask_packs_epi16_rrkz_128(<8 x i16> %a, <8 x i16> %b, i16
define <16 x i8> @test_mask_packs_epi16_rm_128(<8 x i16> %a, <8 x i16>* %ptr_b) {
; CHECK-LABEL: test_mask_packs_epi16_rm_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpacksswb (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x63,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i16>, <8 x i16>* %ptr_b
@@ -279,7 +279,7 @@ define <16 x i8> @test_mask_packs_epi16_rm_128(<8 x i16> %a, <8 x i16>* %ptr_b)
define <16 x i8> @test_mask_packs_epi16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b, <16 x i8> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_packs_epi16_rmk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpacksswb (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x63,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
@@ -293,7 +293,7 @@ define <16 x i8> @test_mask_packs_epi16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b,
define <16 x i8> @test_mask_packs_epi16_rmkz_128(<8 x i16> %a, <8 x i16>* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_packs_epi16_rmkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpacksswb (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0x63,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -308,7 +308,7 @@ declare <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16>, <8 x i16>)
define <32 x i8> @test_mask_packs_epi16_rr_256(<16 x i16> %a, <16 x i16> %b) {
; CHECK-LABEL: test_mask_packs_epi16_rr_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x63,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%1 = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> %a, <16 x i16> %b)
@@ -317,7 +317,7 @@ define <32 x i8> @test_mask_packs_epi16_rr_256(<16 x i16> %a, <16 x i16> %b) {
define <32 x i8> @test_mask_packs_epi16_rrk_256(<16 x i16> %a, <16 x i16> %b, <32 x i8> %passThru, i32 %mask) {
; CHECK-LABEL: test_mask_packs_epi16_rrk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpacksswb %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x63,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
@@ -330,7 +330,7 @@ define <32 x i8> @test_mask_packs_epi16_rrk_256(<16 x i16> %a, <16 x i16> %b, <3
define <32 x i8> @test_mask_packs_epi16_rrkz_256(<16 x i16> %a, <16 x i16> %b, i32 %mask) {
; CHECK-LABEL: test_mask_packs_epi16_rrkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0x63,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -342,7 +342,7 @@ define <32 x i8> @test_mask_packs_epi16_rrkz_256(<16 x i16> %a, <16 x i16> %b, i
define <32 x i8> @test_mask_packs_epi16_rm_256(<16 x i16> %a, <16 x i16>* %ptr_b) {
; CHECK-LABEL: test_mask_packs_epi16_rm_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpacksswb (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x63,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x i16>, <16 x i16>* %ptr_b
@@ -352,7 +352,7 @@ define <32 x i8> @test_mask_packs_epi16_rm_256(<16 x i16> %a, <16 x i16>* %ptr_b
define <32 x i8> @test_mask_packs_epi16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_b, <32 x i8> %passThru, i32 %mask) {
; CHECK-LABEL: test_mask_packs_epi16_rmk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpacksswb (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x63,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -366,7 +366,7 @@ define <32 x i8> @test_mask_packs_epi16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_
define <32 x i8> @test_mask_packs_epi16_rmkz_256(<16 x i16> %a, <16 x i16>* %ptr_b, i32 %mask) {
; CHECK-LABEL: test_mask_packs_epi16_rmkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpacksswb (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0x63,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -382,7 +382,7 @@ declare <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16>, <16 x i16>)
define <8 x i16> @test_mask_packus_epi32_rr_128(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test_mask_packus_epi32_rr_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x2b,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%1 = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> %a, <4 x i32> %b)
@@ -391,7 +391,7 @@ define <8 x i16> @test_mask_packus_epi32_rr_128(<4 x i32> %a, <4 x i32> %b) {
define <8 x i16> @test_mask_packus_epi32_rrk_128(<4 x i32> %a, <4 x i32> %b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_packus_epi32_rrk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpackusdw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x2b,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
@@ -404,7 +404,7 @@ define <8 x i16> @test_mask_packus_epi32_rrk_128(<4 x i32> %a, <4 x i32> %b, <8
define <8 x i16> @test_mask_packus_epi32_rrkz_128(<4 x i32> %a, <4 x i32> %b, i8 %mask) {
; CHECK-LABEL: test_mask_packus_epi32_rrkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x2b,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -416,7 +416,7 @@ define <8 x i16> @test_mask_packus_epi32_rrkz_128(<4 x i32> %a, <4 x i32> %b, i8
define <8 x i16> @test_mask_packus_epi32_rm_128(<4 x i32> %a, <4 x i32>* %ptr_b) {
; CHECK-LABEL: test_mask_packus_epi32_rm_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpackusdw (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x2b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <4 x i32>, <4 x i32>* %ptr_b
@@ -426,7 +426,7 @@ define <8 x i16> @test_mask_packus_epi32_rm_128(<4 x i32> %a, <4 x i32>* %ptr_b)
define <8 x i16> @test_mask_packus_epi32_rmk_128(<4 x i32> %a, <4 x i32>* %ptr_b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_packus_epi32_rmk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackusdw (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x2b,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
@@ -440,7 +440,7 @@ define <8 x i16> @test_mask_packus_epi32_rmk_128(<4 x i32> %a, <4 x i32>* %ptr_b
define <8 x i16> @test_mask_packus_epi32_rmkz_128(<4 x i32> %a, <4 x i32>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_packus_epi32_rmkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackusdw (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x2b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -453,7 +453,7 @@ define <8 x i16> @test_mask_packus_epi32_rmkz_128(<4 x i32> %a, <4 x i32>* %ptr_
define <8 x i16> @test_mask_packus_epi32_rmb_128(<4 x i32> %a, i32* %ptr_b) {
; CHECK-LABEL: test_mask_packus_epi32_rmb_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpackusdw (%rdi){1to4}, %xmm0, %xmm0 ## encoding: [0x62,0xf2,0x7d,0x18,0x2b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load i32, i32* %ptr_b
@@ -465,7 +465,7 @@ define <8 x i16> @test_mask_packus_epi32_rmb_128(<4 x i32> %a, i32* %ptr_b) {
define <8 x i16> @test_mask_packus_epi32_rmbk_128(<4 x i32> %a, i32* %ptr_b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_packus_epi32_rmbk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackusdw (%rdi){1to4}, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x19,0x2b,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
@@ -481,7 +481,7 @@ define <8 x i16> @test_mask_packus_epi32_rmbk_128(<4 x i32> %a, i32* %ptr_b, <8
define <8 x i16> @test_mask_packus_epi32_rmbkz_128(<4 x i32> %a, i32* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_packus_epi32_rmbkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackusdw (%rdi){1to4}, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x99,0x2b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -498,7 +498,7 @@ declare <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32>, <4 x i32>)
define <16 x i16> @test_mask_packus_epi32_rr_256(<8 x i32> %a, <8 x i32> %b) {
; CHECK-LABEL: test_mask_packus_epi32_rr_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpackusdw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x2b,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%1 = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> %a, <8 x i32> %b)
@@ -507,7 +507,7 @@ define <16 x i16> @test_mask_packus_epi32_rr_256(<8 x i32> %a, <8 x i32> %b) {
define <16 x i16> @test_mask_packus_epi32_rrk_256(<8 x i32> %a, <8 x i32> %b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_packus_epi32_rrk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpackusdw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x2b,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
@@ -520,7 +520,7 @@ define <16 x i16> @test_mask_packus_epi32_rrk_256(<8 x i32> %a, <8 x i32> %b, <1
define <16 x i16> @test_mask_packus_epi32_rrkz_256(<8 x i32> %a, <8 x i32> %b, i16 %mask) {
; CHECK-LABEL: test_mask_packus_epi32_rrkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpackusdw %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x2b,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -532,7 +532,7 @@ define <16 x i16> @test_mask_packus_epi32_rrkz_256(<8 x i32> %a, <8 x i32> %b, i
define <16 x i16> @test_mask_packus_epi32_rm_256(<8 x i32> %a, <8 x i32>* %ptr_b) {
; CHECK-LABEL: test_mask_packus_epi32_rm_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpackusdw (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x2b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i32>, <8 x i32>* %ptr_b
@@ -542,7 +542,7 @@ define <16 x i16> @test_mask_packus_epi32_rm_256(<8 x i32> %a, <8 x i32>* %ptr_b
define <16 x i16> @test_mask_packus_epi32_rmk_256(<8 x i32> %a, <8 x i32>* %ptr_b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_packus_epi32_rmk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackusdw (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x2b,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -556,7 +556,7 @@ define <16 x i16> @test_mask_packus_epi32_rmk_256(<8 x i32> %a, <8 x i32>* %ptr_
define <16 x i16> @test_mask_packus_epi32_rmkz_256(<8 x i32> %a, <8 x i32>* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_packus_epi32_rmkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackusdw (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x2b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -569,7 +569,7 @@ define <16 x i16> @test_mask_packus_epi32_rmkz_256(<8 x i32> %a, <8 x i32>* %ptr
define <16 x i16> @test_mask_packus_epi32_rmb_256(<8 x i32> %a, i32* %ptr_b) {
; CHECK-LABEL: test_mask_packus_epi32_rmb_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpackusdw (%rdi){1to8}, %ymm0, %ymm0 ## encoding: [0x62,0xf2,0x7d,0x38,0x2b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load i32, i32* %ptr_b
@@ -581,7 +581,7 @@ define <16 x i16> @test_mask_packus_epi32_rmb_256(<8 x i32> %a, i32* %ptr_b) {
define <16 x i16> @test_mask_packus_epi32_rmbk_256(<8 x i32> %a, i32* %ptr_b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_packus_epi32_rmbk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackusdw (%rdi){1to8}, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x39,0x2b,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -597,7 +597,7 @@ define <16 x i16> @test_mask_packus_epi32_rmbk_256(<8 x i32> %a, i32* %ptr_b, <1
define <16 x i16> @test_mask_packus_epi32_rmbkz_256(<8 x i32> %a, i32* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_packus_epi32_rmbkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackusdw (%rdi){1to8}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xb9,0x2b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -614,7 +614,7 @@ declare <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32>, <8 x i32>)
define <16 x i8> @test_mask_packus_epi16_rr_128(<8 x i16> %a, <8 x i16> %b) {
; CHECK-LABEL: test_mask_packus_epi16_rr_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x67,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%1 = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> %a, <8 x i16> %b)
@@ -623,7 +623,7 @@ define <16 x i8> @test_mask_packus_epi16_rr_128(<8 x i16> %a, <8 x i16> %b) {
define <16 x i8> @test_mask_packus_epi16_rrk_128(<8 x i16> %a, <8 x i16> %b, <16 x i8> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_packus_epi16_rrk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpackuswb %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x67,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
@@ -636,7 +636,7 @@ define <16 x i8> @test_mask_packus_epi16_rrk_128(<8 x i16> %a, <8 x i16> %b, <16
define <16 x i8> @test_mask_packus_epi16_rrkz_128(<8 x i16> %a, <8 x i16> %b, i16 %mask) {
; CHECK-LABEL: test_mask_packus_epi16_rrkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0x67,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -648,7 +648,7 @@ define <16 x i8> @test_mask_packus_epi16_rrkz_128(<8 x i16> %a, <8 x i16> %b, i1
define <16 x i8> @test_mask_packus_epi16_rm_128(<8 x i16> %a, <8 x i16>* %ptr_b) {
; CHECK-LABEL: test_mask_packus_epi16_rm_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpackuswb (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x67,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i16>, <8 x i16>* %ptr_b
@@ -658,7 +658,7 @@ define <16 x i8> @test_mask_packus_epi16_rm_128(<8 x i16> %a, <8 x i16>* %ptr_b)
define <16 x i8> @test_mask_packus_epi16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b, <16 x i8> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_packus_epi16_rmk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackuswb (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x67,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
@@ -672,7 +672,7 @@ define <16 x i8> @test_mask_packus_epi16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b
define <16 x i8> @test_mask_packus_epi16_rmkz_128(<8 x i16> %a, <8 x i16>* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_packus_epi16_rmkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackuswb (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0x67,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -687,7 +687,7 @@ declare <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16>, <8 x i16>)
define <32 x i8> @test_mask_packus_epi16_rr_256(<16 x i16> %a, <16 x i16> %b) {
; CHECK-LABEL: test_mask_packus_epi16_rr_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpackuswb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x67,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%1 = call <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16> %a, <16 x i16> %b)
@@ -696,7 +696,7 @@ define <32 x i8> @test_mask_packus_epi16_rr_256(<16 x i16> %a, <16 x i16> %b) {
define <32 x i8> @test_mask_packus_epi16_rrk_256(<16 x i16> %a, <16 x i16> %b, <32 x i8> %passThru, i32 %mask) {
; CHECK-LABEL: test_mask_packus_epi16_rrk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpackuswb %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x67,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
@@ -709,7 +709,7 @@ define <32 x i8> @test_mask_packus_epi16_rrk_256(<16 x i16> %a, <16 x i16> %b, <
define <32 x i8> @test_mask_packus_epi16_rrkz_256(<16 x i16> %a, <16 x i16> %b, i32 %mask) {
; CHECK-LABEL: test_mask_packus_epi16_rrkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpackuswb %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0x67,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -721,7 +721,7 @@ define <32 x i8> @test_mask_packus_epi16_rrkz_256(<16 x i16> %a, <16 x i16> %b,
define <32 x i8> @test_mask_packus_epi16_rm_256(<16 x i16> %a, <16 x i16>* %ptr_b) {
; CHECK-LABEL: test_mask_packus_epi16_rm_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpackuswb (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x67,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x i16>, <16 x i16>* %ptr_b
@@ -731,7 +731,7 @@ define <32 x i8> @test_mask_packus_epi16_rm_256(<16 x i16> %a, <16 x i16>* %ptr_
define <32 x i8> @test_mask_packus_epi16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_b, <32 x i8> %passThru, i32 %mask) {
; CHECK-LABEL: test_mask_packus_epi16_rmk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackuswb (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x67,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -745,7 +745,7 @@ define <32 x i8> @test_mask_packus_epi16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr
define <32 x i8> @test_mask_packus_epi16_rmkz_256(<16 x i16> %a, <16 x i16>* %ptr_b, i32 %mask) {
; CHECK-LABEL: test_mask_packus_epi16_rmkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackuswb (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0x67,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -760,7 +760,7 @@ declare <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16>, <16 x i16>)
define <8 x i16> @test_mask_adds_epi16_rr_128(<8 x i16> %a, <8 x i16> %b) {
; CHECK-LABEL: test_mask_adds_epi16_rr_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xed,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.avx512.mask.padds.w.128(<8 x i16> %a, <8 x i16> %b, <8 x i16> zeroinitializer, i8 -1)
@@ -769,7 +769,7 @@ define <8 x i16> @test_mask_adds_epi16_rr_128(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @test_mask_adds_epi16_rrk_128(<8 x i16> %a, <8 x i16> %b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_adds_epi16_rrk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpaddsw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xed,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
@@ -780,7 +780,7 @@ define <8 x i16> @test_mask_adds_epi16_rrk_128(<8 x i16> %a, <8 x i16> %b, <8 x
define <8 x i16> @test_mask_adds_epi16_rrkz_128(<8 x i16> %a, <8 x i16> %b, i8 %mask) {
; CHECK-LABEL: test_mask_adds_epi16_rrkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpaddsw %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xed,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -790,7 +790,7 @@ define <8 x i16> @test_mask_adds_epi16_rrkz_128(<8 x i16> %a, <8 x i16> %b, i8 %
define <8 x i16> @test_mask_adds_epi16_rm_128(<8 x i16> %a, <8 x i16>* %ptr_b) {
; CHECK-LABEL: test_mask_adds_epi16_rm_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddsw (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xed,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i16>, <8 x i16>* %ptr_b
@@ -800,7 +800,7 @@ define <8 x i16> @test_mask_adds_epi16_rm_128(<8 x i16> %a, <8 x i16>* %ptr_b) {
define <8 x i16> @test_mask_adds_epi16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_adds_epi16_rmk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpaddsw (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xed,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
@@ -812,7 +812,7 @@ define <8 x i16> @test_mask_adds_epi16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b,
define <8 x i16> @test_mask_adds_epi16_rmkz_128(<8 x i16> %a, <8 x i16>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_adds_epi16_rmkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpaddsw (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xed,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -825,7 +825,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.padds.w.128(<8 x i16>, <8 x i16>, <8 x i
define <16 x i16> @test_mask_adds_epi16_rr_256(<16 x i16> %a, <16 x i16> %b) {
; CHECK-LABEL: test_mask_adds_epi16_rr_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xed,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx512.mask.padds.w.256(<16 x i16> %a, <16 x i16> %b, <16 x i16> zeroinitializer, i16 -1)
@@ -834,7 +834,7 @@ define <16 x i16> @test_mask_adds_epi16_rr_256(<16 x i16> %a, <16 x i16> %b) {
define <16 x i16> @test_mask_adds_epi16_rrk_256(<16 x i16> %a, <16 x i16> %b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_adds_epi16_rrk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpaddsw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xed,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
@@ -845,7 +845,7 @@ define <16 x i16> @test_mask_adds_epi16_rrk_256(<16 x i16> %a, <16 x i16> %b, <1
define <16 x i16> @test_mask_adds_epi16_rrkz_256(<16 x i16> %a, <16 x i16> %b, i16 %mask) {
; CHECK-LABEL: test_mask_adds_epi16_rrkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpaddsw %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xed,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -855,7 +855,7 @@ define <16 x i16> @test_mask_adds_epi16_rrkz_256(<16 x i16> %a, <16 x i16> %b, i
define <16 x i16> @test_mask_adds_epi16_rm_256(<16 x i16> %a, <16 x i16>* %ptr_b) {
; CHECK-LABEL: test_mask_adds_epi16_rm_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddsw (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xed,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x i16>, <16 x i16>* %ptr_b
@@ -865,7 +865,7 @@ define <16 x i16> @test_mask_adds_epi16_rm_256(<16 x i16> %a, <16 x i16>* %ptr_b
define <16 x i16> @test_mask_adds_epi16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_adds_epi16_rmk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpaddsw (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xed,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -877,7 +877,7 @@ define <16 x i16> @test_mask_adds_epi16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_
define <16 x i16> @test_mask_adds_epi16_rmkz_256(<16 x i16> %a, <16 x i16>* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_adds_epi16_rmkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpaddsw (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xed,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -890,7 +890,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.padds.w.256(<16 x i16>, <16 x i16>, <16
define <8 x i16> @test_mask_subs_epi16_rr_128(<8 x i16> %a, <8 x i16> %b) {
; CHECK-LABEL: test_mask_subs_epi16_rr_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsubsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe9,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.avx512.mask.psubs.w.128(<8 x i16> %a, <8 x i16> %b, <8 x i16> zeroinitializer, i8 -1)
@@ -899,7 +899,7 @@ define <8 x i16> @test_mask_subs_epi16_rr_128(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @test_mask_subs_epi16_rrk_128(<8 x i16> %a, <8 x i16> %b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_subs_epi16_rrk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsubsw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xe9,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
@@ -910,7 +910,7 @@ define <8 x i16> @test_mask_subs_epi16_rrk_128(<8 x i16> %a, <8 x i16> %b, <8 x
define <8 x i16> @test_mask_subs_epi16_rrkz_128(<8 x i16> %a, <8 x i16> %b, i8 %mask) {
; CHECK-LABEL: test_mask_subs_epi16_rrkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsubsw %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xe9,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -920,7 +920,7 @@ define <8 x i16> @test_mask_subs_epi16_rrkz_128(<8 x i16> %a, <8 x i16> %b, i8 %
define <8 x i16> @test_mask_subs_epi16_rm_128(<8 x i16> %a, <8 x i16>* %ptr_b) {
; CHECK-LABEL: test_mask_subs_epi16_rm_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsubsw (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe9,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i16>, <8 x i16>* %ptr_b
@@ -930,7 +930,7 @@ define <8 x i16> @test_mask_subs_epi16_rm_128(<8 x i16> %a, <8 x i16>* %ptr_b) {
define <8 x i16> @test_mask_subs_epi16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_subs_epi16_rmk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsubsw (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xe9,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
@@ -942,7 +942,7 @@ define <8 x i16> @test_mask_subs_epi16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b,
define <8 x i16> @test_mask_subs_epi16_rmkz_128(<8 x i16> %a, <8 x i16>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_subs_epi16_rmkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsubsw (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xe9,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -955,7 +955,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.psubs.w.128(<8 x i16>, <8 x i16>, <8 x i
define <16 x i16> @test_mask_subs_epi16_rr_256(<16 x i16> %a, <16 x i16> %b) {
; CHECK-LABEL: test_mask_subs_epi16_rr_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsubsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe9,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx512.mask.psubs.w.256(<16 x i16> %a, <16 x i16> %b, <16 x i16> zeroinitializer, i16 -1)
@@ -964,7 +964,7 @@ define <16 x i16> @test_mask_subs_epi16_rr_256(<16 x i16> %a, <16 x i16> %b) {
define <16 x i16> @test_mask_subs_epi16_rrk_256(<16 x i16> %a, <16 x i16> %b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_subs_epi16_rrk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsubsw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xe9,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
@@ -975,7 +975,7 @@ define <16 x i16> @test_mask_subs_epi16_rrk_256(<16 x i16> %a, <16 x i16> %b, <1
define <16 x i16> @test_mask_subs_epi16_rrkz_256(<16 x i16> %a, <16 x i16> %b, i16 %mask) {
; CHECK-LABEL: test_mask_subs_epi16_rrkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsubsw %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xe9,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -985,7 +985,7 @@ define <16 x i16> @test_mask_subs_epi16_rrkz_256(<16 x i16> %a, <16 x i16> %b, i
define <16 x i16> @test_mask_subs_epi16_rm_256(<16 x i16> %a, <16 x i16>* %ptr_b) {
; CHECK-LABEL: test_mask_subs_epi16_rm_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsubsw (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe9,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x i16>, <16 x i16>* %ptr_b
@@ -995,7 +995,7 @@ define <16 x i16> @test_mask_subs_epi16_rm_256(<16 x i16> %a, <16 x i16>* %ptr_b
define <16 x i16> @test_mask_subs_epi16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_subs_epi16_rmk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsubsw (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xe9,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -1007,7 +1007,7 @@ define <16 x i16> @test_mask_subs_epi16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_
define <16 x i16> @test_mask_subs_epi16_rmkz_256(<16 x i16> %a, <16 x i16>* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_subs_epi16_rmkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsubsw (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xe9,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1020,7 +1020,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.psubs.w.256(<16 x i16>, <16 x i16>, <16
define <8 x i16> @test_mask_adds_epu16_rr_128(<8 x i16> %a, <8 x i16> %b) {
; CHECK-LABEL: test_mask_adds_epu16_rr_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddusw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdd,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.avx512.mask.paddus.w.128(<8 x i16> %a, <8 x i16> %b, <8 x i16> zeroinitializer, i8 -1)
@@ -1029,7 +1029,7 @@ define <8 x i16> @test_mask_adds_epu16_rr_128(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @test_mask_adds_epu16_rrk_128(<8 x i16> %a, <8 x i16> %b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_adds_epu16_rrk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpaddusw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xdd,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
@@ -1040,7 +1040,7 @@ define <8 x i16> @test_mask_adds_epu16_rrk_128(<8 x i16> %a, <8 x i16> %b, <8 x
define <8 x i16> @test_mask_adds_epu16_rrkz_128(<8 x i16> %a, <8 x i16> %b, i8 %mask) {
; CHECK-LABEL: test_mask_adds_epu16_rrkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpaddusw %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xdd,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1050,7 +1050,7 @@ define <8 x i16> @test_mask_adds_epu16_rrkz_128(<8 x i16> %a, <8 x i16> %b, i8 %
define <8 x i16> @test_mask_adds_epu16_rm_128(<8 x i16> %a, <8 x i16>* %ptr_b) {
; CHECK-LABEL: test_mask_adds_epu16_rm_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddusw (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdd,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i16>, <8 x i16>* %ptr_b
@@ -1060,7 +1060,7 @@ define <8 x i16> @test_mask_adds_epu16_rm_128(<8 x i16> %a, <8 x i16>* %ptr_b) {
define <8 x i16> @test_mask_adds_epu16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_adds_epu16_rmk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpaddusw (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xdd,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
@@ -1072,7 +1072,7 @@ define <8 x i16> @test_mask_adds_epu16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b,
define <8 x i16> @test_mask_adds_epu16_rmkz_128(<8 x i16> %a, <8 x i16>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_adds_epu16_rmkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpaddusw (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xdd,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1085,7 +1085,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.paddus.w.128(<8 x i16>, <8 x i16>, <8 x
define <16 x i16> @test_mask_adds_epu16_rr_256(<16 x i16> %a, <16 x i16> %b) {
; CHECK-LABEL: test_mask_adds_epu16_rr_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddusw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xdd,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx512.mask.paddus.w.256(<16 x i16> %a, <16 x i16> %b, <16 x i16> zeroinitializer, i16 -1)
@@ -1094,7 +1094,7 @@ define <16 x i16> @test_mask_adds_epu16_rr_256(<16 x i16> %a, <16 x i16> %b) {
define <16 x i16> @test_mask_adds_epu16_rrk_256(<16 x i16> %a, <16 x i16> %b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_adds_epu16_rrk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpaddusw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xdd,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
@@ -1105,7 +1105,7 @@ define <16 x i16> @test_mask_adds_epu16_rrk_256(<16 x i16> %a, <16 x i16> %b, <1
define <16 x i16> @test_mask_adds_epu16_rrkz_256(<16 x i16> %a, <16 x i16> %b, i16 %mask) {
; CHECK-LABEL: test_mask_adds_epu16_rrkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpaddusw %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xdd,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1115,7 +1115,7 @@ define <16 x i16> @test_mask_adds_epu16_rrkz_256(<16 x i16> %a, <16 x i16> %b, i
define <16 x i16> @test_mask_adds_epu16_rm_256(<16 x i16> %a, <16 x i16>* %ptr_b) {
; CHECK-LABEL: test_mask_adds_epu16_rm_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddusw (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xdd,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x i16>, <16 x i16>* %ptr_b
@@ -1125,7 +1125,7 @@ define <16 x i16> @test_mask_adds_epu16_rm_256(<16 x i16> %a, <16 x i16>* %ptr_b
define <16 x i16> @test_mask_adds_epu16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_adds_epu16_rmk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpaddusw (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xdd,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -1137,7 +1137,7 @@ define <16 x i16> @test_mask_adds_epu16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_
define <16 x i16> @test_mask_adds_epu16_rmkz_256(<16 x i16> %a, <16 x i16>* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_adds_epu16_rmkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpaddusw (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xdd,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1150,7 +1150,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.paddus.w.256(<16 x i16>, <16 x i16>, <1
define <8 x i16> @test_mask_subs_epu16_rr_128(<8 x i16> %a, <8 x i16> %b) {
; CHECK-LABEL: test_mask_subs_epu16_rr_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsubusw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd9,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.avx512.mask.psubus.w.128(<8 x i16> %a, <8 x i16> %b, <8 x i16> zeroinitializer, i8 -1)
@@ -1159,7 +1159,7 @@ define <8 x i16> @test_mask_subs_epu16_rr_128(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @test_mask_subs_epu16_rrk_128(<8 x i16> %a, <8 x i16> %b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_subs_epu16_rrk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsubusw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xd9,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
@@ -1170,7 +1170,7 @@ define <8 x i16> @test_mask_subs_epu16_rrk_128(<8 x i16> %a, <8 x i16> %b, <8 x
define <8 x i16> @test_mask_subs_epu16_rrkz_128(<8 x i16> %a, <8 x i16> %b, i8 %mask) {
; CHECK-LABEL: test_mask_subs_epu16_rrkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsubusw %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xd9,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1180,7 +1180,7 @@ define <8 x i16> @test_mask_subs_epu16_rrkz_128(<8 x i16> %a, <8 x i16> %b, i8 %
define <8 x i16> @test_mask_subs_epu16_rm_128(<8 x i16> %a, <8 x i16>* %ptr_b) {
; CHECK-LABEL: test_mask_subs_epu16_rm_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsubusw (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd9,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i16>, <8 x i16>* %ptr_b
@@ -1190,7 +1190,7 @@ define <8 x i16> @test_mask_subs_epu16_rm_128(<8 x i16> %a, <8 x i16>* %ptr_b) {
define <8 x i16> @test_mask_subs_epu16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_subs_epu16_rmk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsubusw (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xd9,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
@@ -1202,7 +1202,7 @@ define <8 x i16> @test_mask_subs_epu16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b,
define <8 x i16> @test_mask_subs_epu16_rmkz_128(<8 x i16> %a, <8 x i16>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_subs_epu16_rmkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsubusw (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xd9,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1215,7 +1215,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.psubus.w.128(<8 x i16>, <8 x i16>, <8 x
define <16 x i16> @test_mask_subs_epu16_rr_256(<16 x i16> %a, <16 x i16> %b) {
; CHECK-LABEL: test_mask_subs_epu16_rr_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsubusw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd9,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx512.mask.psubus.w.256(<16 x i16> %a, <16 x i16> %b, <16 x i16> zeroinitializer, i16 -1)
@@ -1224,7 +1224,7 @@ define <16 x i16> @test_mask_subs_epu16_rr_256(<16 x i16> %a, <16 x i16> %b) {
define <16 x i16> @test_mask_subs_epu16_rrk_256(<16 x i16> %a, <16 x i16> %b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_subs_epu16_rrk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsubusw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xd9,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
@@ -1235,7 +1235,7 @@ define <16 x i16> @test_mask_subs_epu16_rrk_256(<16 x i16> %a, <16 x i16> %b, <1
define <16 x i16> @test_mask_subs_epu16_rrkz_256(<16 x i16> %a, <16 x i16> %b, i16 %mask) {
; CHECK-LABEL: test_mask_subs_epu16_rrkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsubusw %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xd9,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1245,7 +1245,7 @@ define <16 x i16> @test_mask_subs_epu16_rrkz_256(<16 x i16> %a, <16 x i16> %b, i
define <16 x i16> @test_mask_subs_epu16_rm_256(<16 x i16> %a, <16 x i16>* %ptr_b) {
; CHECK-LABEL: test_mask_subs_epu16_rm_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsubusw (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd9,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x i16>, <16 x i16>* %ptr_b
@@ -1255,7 +1255,7 @@ define <16 x i16> @test_mask_subs_epu16_rm_256(<16 x i16> %a, <16 x i16>* %ptr_b
define <16 x i16> @test_mask_subs_epu16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_subs_epu16_rmk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsubusw (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xd9,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -1267,7 +1267,7 @@ define <16 x i16> @test_mask_subs_epu16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_
define <16 x i16> @test_mask_subs_epu16_rmkz_256(<16 x i16> %a, <16 x i16>* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_subs_epu16_rmkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsubusw (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xd9,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1280,7 +1280,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.psubus.w.256(<16 x i16>, <16 x i16>, <1
define <16 x i8> @test_mask_adds_epi8_rr_128(<16 x i8> %a, <16 x i8> %b) {
; CHECK-LABEL: test_mask_adds_epi8_rr_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddsb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xec,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i8> @llvm.x86.avx512.mask.padds.b.128(<16 x i8> %a, <16 x i8> %b, <16 x i8> zeroinitializer, i16 -1)
@@ -1289,7 +1289,7 @@ define <16 x i8> @test_mask_adds_epi8_rr_128(<16 x i8> %a, <16 x i8> %b) {
define <16 x i8> @test_mask_adds_epi8_rrk_128(<16 x i8> %a, <16 x i8> %b, <16 x i8> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_adds_epi8_rrk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpaddsb %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xec,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
@@ -1300,7 +1300,7 @@ define <16 x i8> @test_mask_adds_epi8_rrk_128(<16 x i8> %a, <16 x i8> %b, <16 x
define <16 x i8> @test_mask_adds_epi8_rrkz_128(<16 x i8> %a, <16 x i8> %b, i16 %mask) {
; CHECK-LABEL: test_mask_adds_epi8_rrkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpaddsb %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xec,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1310,7 +1310,7 @@ define <16 x i8> @test_mask_adds_epi8_rrkz_128(<16 x i8> %a, <16 x i8> %b, i16 %
define <16 x i8> @test_mask_adds_epi8_rm_128(<16 x i8> %a, <16 x i8>* %ptr_b) {
; CHECK-LABEL: test_mask_adds_epi8_rm_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddsb (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xec,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x i8>, <16 x i8>* %ptr_b
@@ -1320,7 +1320,7 @@ define <16 x i8> @test_mask_adds_epi8_rm_128(<16 x i8> %a, <16 x i8>* %ptr_b) {
define <16 x i8> @test_mask_adds_epi8_rmk_128(<16 x i8> %a, <16 x i8>* %ptr_b, <16 x i8> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_adds_epi8_rmk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpaddsb (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xec,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
@@ -1332,7 +1332,7 @@ define <16 x i8> @test_mask_adds_epi8_rmk_128(<16 x i8> %a, <16 x i8>* %ptr_b, <
define <16 x i8> @test_mask_adds_epi8_rmkz_128(<16 x i8> %a, <16 x i8>* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_adds_epi8_rmkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpaddsb (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xec,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1345,7 +1345,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.padds.b.128(<16 x i8>, <16 x i8>, <16 x
define <32 x i8> @test_mask_adds_epi8_rr_256(<32 x i8> %a, <32 x i8> %b) {
; CHECK-LABEL: test_mask_adds_epi8_rr_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddsb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xec,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <32 x i8> @llvm.x86.avx512.mask.padds.b.256(<32 x i8> %a, <32 x i8> %b, <32 x i8> zeroinitializer, i32 -1)
@@ -1354,7 +1354,7 @@ define <32 x i8> @test_mask_adds_epi8_rr_256(<32 x i8> %a, <32 x i8> %b) {
define <32 x i8> @test_mask_adds_epi8_rrk_256(<32 x i8> %a, <32 x i8> %b, <32 x i8> %passThru, i32 %mask) {
; CHECK-LABEL: test_mask_adds_epi8_rrk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpaddsb %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xec,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
@@ -1365,7 +1365,7 @@ define <32 x i8> @test_mask_adds_epi8_rrk_256(<32 x i8> %a, <32 x i8> %b, <32 x
define <32 x i8> @test_mask_adds_epi8_rrkz_256(<32 x i8> %a, <32 x i8> %b, i32 %mask) {
; CHECK-LABEL: test_mask_adds_epi8_rrkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpaddsb %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xec,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1375,7 +1375,7 @@ define <32 x i8> @test_mask_adds_epi8_rrkz_256(<32 x i8> %a, <32 x i8> %b, i32 %
define <32 x i8> @test_mask_adds_epi8_rm_256(<32 x i8> %a, <32 x i8>* %ptr_b) {
; CHECK-LABEL: test_mask_adds_epi8_rm_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddsb (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xec,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <32 x i8>, <32 x i8>* %ptr_b
@@ -1385,7 +1385,7 @@ define <32 x i8> @test_mask_adds_epi8_rm_256(<32 x i8> %a, <32 x i8>* %ptr_b) {
define <32 x i8> @test_mask_adds_epi8_rmk_256(<32 x i8> %a, <32 x i8>* %ptr_b, <32 x i8> %passThru, i32 %mask) {
; CHECK-LABEL: test_mask_adds_epi8_rmk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpaddsb (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xec,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -1397,7 +1397,7 @@ define <32 x i8> @test_mask_adds_epi8_rmk_256(<32 x i8> %a, <32 x i8>* %ptr_b, <
define <32 x i8> @test_mask_adds_epi8_rmkz_256(<32 x i8> %a, <32 x i8>* %ptr_b, i32 %mask) {
; CHECK-LABEL: test_mask_adds_epi8_rmkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpaddsb (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xec,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1410,7 +1410,7 @@ declare <32 x i8> @llvm.x86.avx512.mask.padds.b.256(<32 x i8>, <32 x i8>, <32 x
define <16 x i8> @test_mask_subs_epi8_rr_128(<16 x i8> %a, <16 x i8> %b) {
; CHECK-LABEL: test_mask_subs_epi8_rr_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsubsb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe8,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i8> @llvm.x86.avx512.mask.psubs.b.128(<16 x i8> %a, <16 x i8> %b, <16 x i8> zeroinitializer, i16 -1)
@@ -1419,7 +1419,7 @@ define <16 x i8> @test_mask_subs_epi8_rr_128(<16 x i8> %a, <16 x i8> %b) {
define <16 x i8> @test_mask_subs_epi8_rrk_128(<16 x i8> %a, <16 x i8> %b, <16 x i8> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_subs_epi8_rrk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsubsb %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xe8,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
@@ -1430,7 +1430,7 @@ define <16 x i8> @test_mask_subs_epi8_rrk_128(<16 x i8> %a, <16 x i8> %b, <16 x
define <16 x i8> @test_mask_subs_epi8_rrkz_128(<16 x i8> %a, <16 x i8> %b, i16 %mask) {
; CHECK-LABEL: test_mask_subs_epi8_rrkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsubsb %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xe8,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1440,7 +1440,7 @@ define <16 x i8> @test_mask_subs_epi8_rrkz_128(<16 x i8> %a, <16 x i8> %b, i16 %
define <16 x i8> @test_mask_subs_epi8_rm_128(<16 x i8> %a, <16 x i8>* %ptr_b) {
; CHECK-LABEL: test_mask_subs_epi8_rm_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsubsb (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe8,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x i8>, <16 x i8>* %ptr_b
@@ -1450,7 +1450,7 @@ define <16 x i8> @test_mask_subs_epi8_rm_128(<16 x i8> %a, <16 x i8>* %ptr_b) {
define <16 x i8> @test_mask_subs_epi8_rmk_128(<16 x i8> %a, <16 x i8>* %ptr_b, <16 x i8> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_subs_epi8_rmk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsubsb (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xe8,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
@@ -1462,7 +1462,7 @@ define <16 x i8> @test_mask_subs_epi8_rmk_128(<16 x i8> %a, <16 x i8>* %ptr_b, <
define <16 x i8> @test_mask_subs_epi8_rmkz_128(<16 x i8> %a, <16 x i8>* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_subs_epi8_rmkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsubsb (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xe8,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1475,7 +1475,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.psubs.b.128(<16 x i8>, <16 x i8>, <16 x
define <32 x i8> @test_mask_subs_epi8_rr_256(<32 x i8> %a, <32 x i8> %b) {
; CHECK-LABEL: test_mask_subs_epi8_rr_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsubsb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe8,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <32 x i8> @llvm.x86.avx512.mask.psubs.b.256(<32 x i8> %a, <32 x i8> %b, <32 x i8> zeroinitializer, i32 -1)
@@ -1484,7 +1484,7 @@ define <32 x i8> @test_mask_subs_epi8_rr_256(<32 x i8> %a, <32 x i8> %b) {
define <32 x i8> @test_mask_subs_epi8_rrk_256(<32 x i8> %a, <32 x i8> %b, <32 x i8> %passThru, i32 %mask) {
; CHECK-LABEL: test_mask_subs_epi8_rrk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsubsb %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xe8,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
@@ -1495,7 +1495,7 @@ define <32 x i8> @test_mask_subs_epi8_rrk_256(<32 x i8> %a, <32 x i8> %b, <32 x
define <32 x i8> @test_mask_subs_epi8_rrkz_256(<32 x i8> %a, <32 x i8> %b, i32 %mask) {
; CHECK-LABEL: test_mask_subs_epi8_rrkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsubsb %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xe8,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1505,7 +1505,7 @@ define <32 x i8> @test_mask_subs_epi8_rrkz_256(<32 x i8> %a, <32 x i8> %b, i32 %
define <32 x i8> @test_mask_subs_epi8_rm_256(<32 x i8> %a, <32 x i8>* %ptr_b) {
; CHECK-LABEL: test_mask_subs_epi8_rm_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsubsb (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe8,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <32 x i8>, <32 x i8>* %ptr_b
@@ -1515,7 +1515,7 @@ define <32 x i8> @test_mask_subs_epi8_rm_256(<32 x i8> %a, <32 x i8>* %ptr_b) {
define <32 x i8> @test_mask_subs_epi8_rmk_256(<32 x i8> %a, <32 x i8>* %ptr_b, <32 x i8> %passThru, i32 %mask) {
; CHECK-LABEL: test_mask_subs_epi8_rmk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsubsb (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xe8,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -1527,7 +1527,7 @@ define <32 x i8> @test_mask_subs_epi8_rmk_256(<32 x i8> %a, <32 x i8>* %ptr_b, <
define <32 x i8> @test_mask_subs_epi8_rmkz_256(<32 x i8> %a, <32 x i8>* %ptr_b, i32 %mask) {
; CHECK-LABEL: test_mask_subs_epi8_rmkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsubsb (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xe8,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1540,7 +1540,7 @@ declare <32 x i8> @llvm.x86.avx512.mask.psubs.b.256(<32 x i8>, <32 x i8>, <32 x
define <16 x i8> @test_mask_adds_epu8_rr_128(<16 x i8> %a, <16 x i8> %b) {
; CHECK-LABEL: test_mask_adds_epu8_rr_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdc,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i8> @llvm.x86.avx512.mask.paddus.b.128(<16 x i8> %a, <16 x i8> %b, <16 x i8> zeroinitializer, i16 -1)
@@ -1549,7 +1549,7 @@ define <16 x i8> @test_mask_adds_epu8_rr_128(<16 x i8> %a, <16 x i8> %b) {
define <16 x i8> @test_mask_adds_epu8_rrk_128(<16 x i8> %a, <16 x i8> %b, <16 x i8> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_adds_epu8_rrk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpaddusb %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xdc,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
@@ -1560,7 +1560,7 @@ define <16 x i8> @test_mask_adds_epu8_rrk_128(<16 x i8> %a, <16 x i8> %b, <16 x
define <16 x i8> @test_mask_adds_epu8_rrkz_128(<16 x i8> %a, <16 x i8> %b, i16 %mask) {
; CHECK-LABEL: test_mask_adds_epu8_rrkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xdc,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1570,7 +1570,7 @@ define <16 x i8> @test_mask_adds_epu8_rrkz_128(<16 x i8> %a, <16 x i8> %b, i16 %
define <16 x i8> @test_mask_adds_epu8_rm_128(<16 x i8> %a, <16 x i8>* %ptr_b) {
; CHECK-LABEL: test_mask_adds_epu8_rm_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddusb (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdc,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x i8>, <16 x i8>* %ptr_b
@@ -1580,7 +1580,7 @@ define <16 x i8> @test_mask_adds_epu8_rm_128(<16 x i8> %a, <16 x i8>* %ptr_b) {
define <16 x i8> @test_mask_adds_epu8_rmk_128(<16 x i8> %a, <16 x i8>* %ptr_b, <16 x i8> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_adds_epu8_rmk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpaddusb (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xdc,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
@@ -1592,7 +1592,7 @@ define <16 x i8> @test_mask_adds_epu8_rmk_128(<16 x i8> %a, <16 x i8>* %ptr_b, <
define <16 x i8> @test_mask_adds_epu8_rmkz_128(<16 x i8> %a, <16 x i8>* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_adds_epu8_rmkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpaddusb (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xdc,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1605,7 +1605,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.paddus.b.128(<16 x i8>, <16 x i8>, <16 x
define <32 x i8> @test_mask_adds_epu8_rr_256(<32 x i8> %a, <32 x i8> %b) {
; CHECK-LABEL: test_mask_adds_epu8_rr_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddusb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xdc,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <32 x i8> @llvm.x86.avx512.mask.paddus.b.256(<32 x i8> %a, <32 x i8> %b, <32 x i8> zeroinitializer, i32 -1)
@@ -1614,7 +1614,7 @@ define <32 x i8> @test_mask_adds_epu8_rr_256(<32 x i8> %a, <32 x i8> %b) {
define <32 x i8> @test_mask_adds_epu8_rrk_256(<32 x i8> %a, <32 x i8> %b, <32 x i8> %passThru, i32 %mask) {
; CHECK-LABEL: test_mask_adds_epu8_rrk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpaddusb %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xdc,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
@@ -1625,7 +1625,7 @@ define <32 x i8> @test_mask_adds_epu8_rrk_256(<32 x i8> %a, <32 x i8> %b, <32 x
define <32 x i8> @test_mask_adds_epu8_rrkz_256(<32 x i8> %a, <32 x i8> %b, i32 %mask) {
; CHECK-LABEL: test_mask_adds_epu8_rrkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpaddusb %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xdc,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1635,7 +1635,7 @@ define <32 x i8> @test_mask_adds_epu8_rrkz_256(<32 x i8> %a, <32 x i8> %b, i32 %
define <32 x i8> @test_mask_adds_epu8_rm_256(<32 x i8> %a, <32 x i8>* %ptr_b) {
; CHECK-LABEL: test_mask_adds_epu8_rm_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddusb (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xdc,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <32 x i8>, <32 x i8>* %ptr_b
@@ -1645,7 +1645,7 @@ define <32 x i8> @test_mask_adds_epu8_rm_256(<32 x i8> %a, <32 x i8>* %ptr_b) {
define <32 x i8> @test_mask_adds_epu8_rmk_256(<32 x i8> %a, <32 x i8>* %ptr_b, <32 x i8> %passThru, i32 %mask) {
; CHECK-LABEL: test_mask_adds_epu8_rmk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpaddusb (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xdc,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -1657,7 +1657,7 @@ define <32 x i8> @test_mask_adds_epu8_rmk_256(<32 x i8> %a, <32 x i8>* %ptr_b, <
define <32 x i8> @test_mask_adds_epu8_rmkz_256(<32 x i8> %a, <32 x i8>* %ptr_b, i32 %mask) {
; CHECK-LABEL: test_mask_adds_epu8_rmkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpaddusb (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xdc,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1670,7 +1670,7 @@ declare <32 x i8> @llvm.x86.avx512.mask.paddus.b.256(<32 x i8>, <32 x i8>, <32 x
define <16 x i8> @test_mask_subs_epu8_rr_128(<16 x i8> %a, <16 x i8> %b) {
; CHECK-LABEL: test_mask_subs_epu8_rr_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd8,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i8> @llvm.x86.avx512.mask.psubus.b.128(<16 x i8> %a, <16 x i8> %b, <16 x i8> zeroinitializer, i16 -1)
@@ -1679,7 +1679,7 @@ define <16 x i8> @test_mask_subs_epu8_rr_128(<16 x i8> %a, <16 x i8> %b) {
define <16 x i8> @test_mask_subs_epu8_rrk_128(<16 x i8> %a, <16 x i8> %b, <16 x i8> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_subs_epu8_rrk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsubusb %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xd8,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
@@ -1690,7 +1690,7 @@ define <16 x i8> @test_mask_subs_epu8_rrk_128(<16 x i8> %a, <16 x i8> %b, <16 x
define <16 x i8> @test_mask_subs_epu8_rrkz_128(<16 x i8> %a, <16 x i8> %b, i16 %mask) {
; CHECK-LABEL: test_mask_subs_epu8_rrkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xd8,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1700,7 +1700,7 @@ define <16 x i8> @test_mask_subs_epu8_rrkz_128(<16 x i8> %a, <16 x i8> %b, i16 %
define <16 x i8> @test_mask_subs_epu8_rm_128(<16 x i8> %a, <16 x i8>* %ptr_b) {
; CHECK-LABEL: test_mask_subs_epu8_rm_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsubusb (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd8,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x i8>, <16 x i8>* %ptr_b
@@ -1710,7 +1710,7 @@ define <16 x i8> @test_mask_subs_epu8_rm_128(<16 x i8> %a, <16 x i8>* %ptr_b) {
define <16 x i8> @test_mask_subs_epu8_rmk_128(<16 x i8> %a, <16 x i8>* %ptr_b, <16 x i8> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_subs_epu8_rmk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsubusb (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xd8,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
@@ -1722,7 +1722,7 @@ define <16 x i8> @test_mask_subs_epu8_rmk_128(<16 x i8> %a, <16 x i8>* %ptr_b, <
define <16 x i8> @test_mask_subs_epu8_rmkz_128(<16 x i8> %a, <16 x i8>* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_subs_epu8_rmkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsubusb (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xd8,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1735,7 +1735,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.psubus.b.128(<16 x i8>, <16 x i8>, <16 x
define <32 x i8> @test_mask_subs_epu8_rr_256(<32 x i8> %a, <32 x i8> %b) {
; CHECK-LABEL: test_mask_subs_epu8_rr_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsubusb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd8,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <32 x i8> @llvm.x86.avx512.mask.psubus.b.256(<32 x i8> %a, <32 x i8> %b, <32 x i8> zeroinitializer, i32 -1)
@@ -1744,7 +1744,7 @@ define <32 x i8> @test_mask_subs_epu8_rr_256(<32 x i8> %a, <32 x i8> %b) {
define <32 x i8> @test_mask_subs_epu8_rrk_256(<32 x i8> %a, <32 x i8> %b, <32 x i8> %passThru, i32 %mask) {
; CHECK-LABEL: test_mask_subs_epu8_rrk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsubusb %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xd8,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
@@ -1755,7 +1755,7 @@ define <32 x i8> @test_mask_subs_epu8_rrk_256(<32 x i8> %a, <32 x i8> %b, <32 x
define <32 x i8> @test_mask_subs_epu8_rrkz_256(<32 x i8> %a, <32 x i8> %b, i32 %mask) {
; CHECK-LABEL: test_mask_subs_epu8_rrkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsubusb %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xd8,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1765,7 +1765,7 @@ define <32 x i8> @test_mask_subs_epu8_rrkz_256(<32 x i8> %a, <32 x i8> %b, i32 %
define <32 x i8> @test_mask_subs_epu8_rm_256(<32 x i8> %a, <32 x i8>* %ptr_b) {
; CHECK-LABEL: test_mask_subs_epu8_rm_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsubusb (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd8,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <32 x i8>, <32 x i8>* %ptr_b
@@ -1775,7 +1775,7 @@ define <32 x i8> @test_mask_subs_epu8_rm_256(<32 x i8> %a, <32 x i8>* %ptr_b) {
define <32 x i8> @test_mask_subs_epu8_rmk_256(<32 x i8> %a, <32 x i8>* %ptr_b, <32 x i8> %passThru, i32 %mask) {
; CHECK-LABEL: test_mask_subs_epu8_rmk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsubusb (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xd8,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -1787,7 +1787,7 @@ define <32 x i8> @test_mask_subs_epu8_rmk_256(<32 x i8> %a, <32 x i8>* %ptr_b, <
define <32 x i8> @test_mask_subs_epu8_rmkz_256(<32 x i8> %a, <32 x i8>* %ptr_b, i32 %mask) {
; CHECK-LABEL: test_mask_subs_epu8_rmkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsubusb (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xd8,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1802,7 +1802,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.vpermt2var.hi.128(<8 x i16>, <8 x i16>,
define <8 x i16>@test_int_x86_avx512_mask_vpermt2var_hi_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermt2var_hi_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vmovdqa %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xd9]
; CHECK-NEXT: vpermt2w %xmm2, %xmm0, %xmm3 ## encoding: [0x62,0xf2,0xfd,0x08,0x7d,0xda]
@@ -1819,7 +1819,7 @@ declare <8 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.128(<8 x i16>, <8 x i16>,
define <8 x i16>@test_int_x86_avx512_maskz_vpermt2var_hi_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpermt2var_hi_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vmovdqa %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xd9]
; CHECK-NEXT: vpermt2w %xmm2, %xmm0, %xmm3 ## encoding: [0x62,0xf2,0xfd,0x08,0x7d,0xda]
@@ -1836,7 +1836,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.vpermt2var.hi.256(<16 x i16>, <16 x i16
define <16 x i16>@test_int_x86_avx512_mask_vpermt2var_hi_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermt2var_hi_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vmovdqa %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xd9]
; CHECK-NEXT: vpermt2w %ymm2, %ymm0, %ymm3 ## encoding: [0x62,0xf2,0xfd,0x28,0x7d,0xda]
@@ -1853,7 +1853,7 @@ declare <16 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.256(<16 x i16>, <16 x i1
define <16 x i16>@test_int_x86_avx512_maskz_vpermt2var_hi_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpermt2var_hi_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vmovdqa %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xd9]
; CHECK-NEXT: vpermt2w %ymm2, %ymm0, %ymm3 ## encoding: [0x62,0xf2,0xfd,0x28,0x7d,0xda]
@@ -1870,7 +1870,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.128(<8 x i16>, <8 x i16>,
define <8 x i16>@test_int_x86_avx512_mask_vpermi2var_hi_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermi2var_hi_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vmovdqa %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xd9]
; CHECK-NEXT: vpermi2w %xmm2, %xmm0, %xmm3 ## encoding: [0x62,0xf2,0xfd,0x08,0x75,0xda]
@@ -1887,7 +1887,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.256(<16 x i16>, <16 x i16
define <16 x i16>@test_int_x86_avx512_mask_vpermi2var_hi_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermi2var_hi_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vmovdqa %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xd9]
; CHECK-NEXT: vpermi2w %ymm2, %ymm0, %ymm3 ## encoding: [0x62,0xf2,0xfd,0x28,0x75,0xda]
@@ -1904,7 +1904,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.pmulhu.w.128(<8 x i16>, <8 x i16>, <8 x
define <8 x i16>@test_int_x86_avx512_mask_pmulhu_w_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmulhu_w_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmulhuw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xe4,0xd1]
; CHECK-NEXT: vpmulhuw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe4,0xc1]
@@ -1920,7 +1920,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.pmulhu.w.256(<16 x i16>, <16 x i16>, <1
define <16 x i16>@test_int_x86_avx512_mask_pmulhu_w_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmulhu_w_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmulhuw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xe4,0xd1]
; CHECK-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe4,0xc1]
@@ -1936,7 +1936,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.pmulh.w.128(<8 x i16>, <8 x i16>, <8 x i
define <8 x i16>@test_int_x86_avx512_mask_pmulh_w_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmulh_w_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmulhw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xe5,0xd1]
; CHECK-NEXT: vpmulhw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe5,0xc1]
@@ -1952,7 +1952,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.pmulh.w.256(<16 x i16>, <16 x i16>, <16
define <16 x i16>@test_int_x86_avx512_mask_pmulh_w_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmulh_w_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmulhw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xe5,0xd1]
; CHECK-NEXT: vpmulhw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe5,0xc1]
@@ -1968,7 +1968,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.pmul.hr.sw.128(<8 x i16>, <8 x i16>, <8
define <8 x i16>@test_int_x86_avx512_mask_pmulhr_sw_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmulhr_sw_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmulhrsw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x0b,0xd1]
; CHECK-NEXT: vpmulhrsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x0b,0xc1]
@@ -1984,7 +1984,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.pmul.hr.sw.256(<16 x i16>, <16 x i16>,
define <16 x i16>@test_int_x86_avx512_mask_pmulhr_sw_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmulhr_sw_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmulhrsw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x0b,0xd1]
; CHECK-NEXT: vpmulhrsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x0b,0xc1]
@@ -2000,7 +2000,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmov.wb.128(<8 x i16>, <16 x i8>, i8)
define <16 x i8>@test_int_x86_avx512_mask_pmov_wb_128(<8 x i16> %x0, <16 x i8> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_wb_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmovwb %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0x89,0x30,0xc2]
; CHECK-NEXT: vpmovwb %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x30,0xc1]
@@ -2020,7 +2020,7 @@ declare void @llvm.x86.avx512.mask.pmov.wb.mem.128(i8* %ptr, <8 x i16>, i8)
define void @test_int_x86_avx512_mask_pmov_wb_mem_128(i8* %ptr, <8 x i16> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_wb_mem_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpmovwb %xmm0, (%rdi) ## encoding: [0x62,0xf2,0x7e,0x08,0x30,0x07]
; CHECK-NEXT: vpmovwb %xmm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x30,0x07]
@@ -2034,7 +2034,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmovs.wb.128(<8 x i16>, <16 x i8>, i8)
define <16 x i8>@test_int_x86_avx512_mask_pmovs_wb_128(<8 x i16> %x0, <16 x i8> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_wb_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmovswb %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0x89,0x20,0xc2]
; CHECK-NEXT: vpmovswb %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x20,0xc1]
@@ -2054,7 +2054,7 @@ declare void @llvm.x86.avx512.mask.pmovs.wb.mem.128(i8* %ptr, <8 x i16>, i8)
define void @test_int_x86_avx512_mask_pmovs_wb_mem_128(i8* %ptr, <8 x i16> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_wb_mem_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpmovswb %xmm0, (%rdi) ## encoding: [0x62,0xf2,0x7e,0x08,0x20,0x07]
; CHECK-NEXT: vpmovswb %xmm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x20,0x07]
@@ -2068,7 +2068,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmovus.wb.128(<8 x i16>, <16 x i8>, i8)
define <16 x i8>@test_int_x86_avx512_mask_pmovus_wb_128(<8 x i16> %x0, <16 x i8> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_wb_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmovuswb %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0x89,0x10,0xc2]
; CHECK-NEXT: vpmovuswb %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x10,0xc1]
@@ -2088,7 +2088,7 @@ declare void @llvm.x86.avx512.mask.pmovus.wb.mem.128(i8* %ptr, <8 x i16>, i8)
define void @test_int_x86_avx512_mask_pmovus_wb_mem_128(i8* %ptr, <8 x i16> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_wb_mem_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpmovuswb %xmm0, (%rdi) ## encoding: [0x62,0xf2,0x7e,0x08,0x10,0x07]
; CHECK-NEXT: vpmovuswb %xmm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x10,0x07]
@@ -2102,7 +2102,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmov.wb.256(<16 x i16>, <16 x i8>, i16)
define <16 x i8>@test_int_x86_avx512_mask_pmov_wb_256(<16 x i16> %x0, <16 x i8> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_wb_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmovwb %ymm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0xa9,0x30,0xc2]
; CHECK-NEXT: vpmovwb %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x30,0xc1]
@@ -2122,7 +2122,7 @@ declare void @llvm.x86.avx512.mask.pmov.wb.mem.256(i8* %ptr, <16 x i16>, i16)
define void @test_int_x86_avx512_mask_pmov_wb_mem_256(i8* %ptr, <16 x i16> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_wb_mem_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpmovwb %ymm0, (%rdi) ## encoding: [0x62,0xf2,0x7e,0x28,0x30,0x07]
; CHECK-NEXT: vpmovwb %ymm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x30,0x07]
@@ -2136,7 +2136,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmovs.wb.256(<16 x i16>, <16 x i8>, i16)
define <16 x i8>@test_int_x86_avx512_mask_pmovs_wb_256(<16 x i16> %x0, <16 x i8> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_wb_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmovswb %ymm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0xa9,0x20,0xc2]
; CHECK-NEXT: vpmovswb %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x20,0xc1]
@@ -2156,7 +2156,7 @@ declare void @llvm.x86.avx512.mask.pmovs.wb.mem.256(i8* %ptr, <16 x i16>, i16)
define void @test_int_x86_avx512_mask_pmovs_wb_mem_256(i8* %ptr, <16 x i16> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_wb_mem_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpmovswb %ymm0, (%rdi) ## encoding: [0x62,0xf2,0x7e,0x28,0x20,0x07]
; CHECK-NEXT: vpmovswb %ymm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x20,0x07]
@@ -2170,7 +2170,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmovus.wb.256(<16 x i16>, <16 x i8>, i16
define <16 x i8>@test_int_x86_avx512_mask_pmovus_wb_256(<16 x i16> %x0, <16 x i8> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_wb_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmovuswb %ymm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0xa9,0x10,0xc2]
; CHECK-NEXT: vpmovuswb %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x10,0xc1]
@@ -2190,7 +2190,7 @@ declare void @llvm.x86.avx512.mask.pmovus.wb.mem.256(i8* %ptr, <16 x i16>, i16)
define void @test_int_x86_avx512_mask_pmovus_wb_mem_256(i8* %ptr, <16 x i16> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_wb_mem_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpmovuswb %ymm0, (%rdi) ## encoding: [0x62,0xf2,0x7e,0x28,0x10,0x07]
; CHECK-NEXT: vpmovuswb %ymm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x10,0x07]
@@ -2204,7 +2204,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.pmaddw.d.128(<8 x i16>, <8 x i16>, <4 x
define <4 x i32>@test_int_x86_avx512_mask_pmaddw_d_128(<8 x i16> %x0, <8 x i16> %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmaddw_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmaddwd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xf5,0xd1]
; CHECK-NEXT: vpmaddwd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf5,0xc1]
@@ -2220,7 +2220,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.pmaddw.d.256(<16 x i16>, <16 x i16>, <8
define <8 x i32>@test_int_x86_avx512_mask_pmaddw_d_256(<16 x i16> %x0, <16 x i16> %x1, <8 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmaddw_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmaddwd %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xf5,0xd1]
; CHECK-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf5,0xc1]
@@ -2236,7 +2236,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.pmaddubs.w.128(<16 x i8>, <16 x i8>, <8
define <8 x i16>@test_int_x86_avx512_mask_pmaddubs_w_128(<16 x i8> %x0, <16 x i8> %x1, <8 x i16> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmaddubs_w_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmaddubsw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x04,0xd1]
; CHECK-NEXT: vpmaddubsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x04,0xc1]
@@ -2252,7 +2252,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.pmaddubs.w.256(<32 x i8>, <32 x i8>, <1
define <16 x i16>@test_int_x86_avx512_mask_pmaddubs_w_256(<32 x i8> %x0, <32 x i8> %x1, <16 x i16> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmaddubs_w_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x04,0xd1]
; CHECK-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x04,0xc1]
@@ -2268,7 +2268,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.dbpsadbw.128(<16 x i8>, <16 x i8>, i32,
define <8 x i16>@test_int_x86_avx512_mask_dbpsadbw_128(<16 x i8> %x0, <16 x i8> %x1, <8 x i16> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_dbpsadbw_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vdbpsadbw $2, %xmm1, %xmm0, %xmm3 ## encoding: [0x62,0xf3,0x7d,0x08,0x42,0xd9,0x02]
; CHECK-NEXT: vdbpsadbw $2, %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x42,0xd1,0x02]
@@ -2288,7 +2288,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.dbpsadbw.256(<32 x i8>, <32 x i8>, i32,
define <16 x i16>@test_int_x86_avx512_mask_dbpsadbw_256(<32 x i8> %x0, <32 x i8> %x1, <16 x i16> %x3, i16 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_dbpsadbw_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vdbpsadbw $2, %ymm1, %ymm0, %ymm3 ## encoding: [0x62,0xf3,0x7d,0x28,0x42,0xd9,0x02]
; CHECK-NEXT: vdbpsadbw $2, %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x42,0xd1,0x02]
@@ -2308,7 +2308,7 @@ declare i16 @llvm.x86.avx512.cvtb2mask.128(<16 x i8>)
define i16@test_int_x86_avx512_cvtb2mask_128(<16 x i8> %x0) {
; CHECK-LABEL: test_int_x86_avx512_cvtb2mask_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovb2m %xmm0, %k0 ## encoding: [0x62,0xf2,0x7e,0x08,0x29,0xc0]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
@@ -2321,7 +2321,7 @@ declare i32 @llvm.x86.avx512.cvtb2mask.256(<32 x i8>)
define i32@test_int_x86_avx512_cvtb2mask_256(<32 x i8> %x0) {
; CHECK-LABEL: test_int_x86_avx512_cvtb2mask_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovb2m %ymm0, %k0 ## encoding: [0x62,0xf2,0x7e,0x28,0x29,0xc0]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2333,7 +2333,7 @@ declare i8 @llvm.x86.avx512.cvtw2mask.128(<8 x i16>)
define i8@test_int_x86_avx512_cvtw2mask_128(<8 x i16> %x0) {
; CHECK-LABEL: test_int_x86_avx512_cvtw2mask_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovw2m %xmm0, %k0 ## encoding: [0x62,0xf2,0xfe,0x08,0x29,0xc0]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
@@ -2346,7 +2346,7 @@ declare i16 @llvm.x86.avx512.cvtw2mask.256(<16 x i16>)
define i16@test_int_x86_avx512_cvtw2mask_256(<16 x i16> %x0) {
; CHECK-LABEL: test_int_x86_avx512_cvtw2mask_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovw2m %ymm0, %k0 ## encoding: [0x62,0xf2,0xfe,0x28,0x29,0xc0]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
@@ -2359,7 +2359,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.psrlv16.hi(<16 x i16>, <16 x i16>, <16
define <16 x i16>@test_int_x86_avx512_mask_psrlv16_hi(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psrlv16_hi:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsrlvw %ymm1, %ymm0, %ymm3 ## encoding: [0x62,0xf2,0xfd,0x28,0x10,0xd9]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsrlvw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x10,0xd1]
@@ -2379,7 +2379,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.psrlv8.hi(<8 x i16>, <8 x i16>, <8 x i16
define <8 x i16>@test_int_x86_avx512_mask_psrlv8_hi(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psrlv8_hi:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsrlvw %xmm1, %xmm0, %xmm3 ## encoding: [0x62,0xf2,0xfd,0x08,0x10,0xd9]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsrlvw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x10,0xd1]
@@ -2399,7 +2399,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.psrav16.hi(<16 x i16>, <16 x i16>, <16
define <16 x i16>@test_int_x86_avx512_mask_psrav16_hi(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psrav16_hi:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsravw %ymm1, %ymm0, %ymm3 ## encoding: [0x62,0xf2,0xfd,0x28,0x11,0xd9]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsravw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x11,0xd1]
@@ -2419,7 +2419,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.psrav8.hi(<8 x i16>, <8 x i16>, <8 x i16
define <8 x i16>@test_int_x86_avx512_mask_psrav8_hi(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psrav8_hi:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsravw %xmm1, %xmm0, %xmm3 ## encoding: [0x62,0xf2,0xfd,0x08,0x11,0xd9]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsravw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x11,0xd1]
@@ -2439,7 +2439,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.psllv16.hi(<16 x i16>, <16 x i16>, <16
define <16 x i16>@test_int_x86_avx512_mask_psllv16_hi(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psllv16_hi:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsllvw %ymm1, %ymm0, %ymm3 ## encoding: [0x62,0xf2,0xfd,0x28,0x12,0xd9]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsllvw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x12,0xd1]
@@ -2459,7 +2459,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.psllv8.hi(<8 x i16>, <8 x i16>, <8 x i16
define <8 x i16>@test_int_x86_avx512_mask_psllv8_hi(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psllv8_hi:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsllvw %xmm1, %xmm0, %xmm3 ## encoding: [0x62,0xf2,0xfd,0x08,0x12,0xd9]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsllvw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x12,0xd1]
@@ -2479,7 +2479,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.permvar.hi.128(<8 x i16>, <8 x i16>, <8
define <8 x i16>@test_int_x86_avx512_mask_permvar_hi_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_permvar_hi_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpermw %xmm0, %xmm1, %xmm3 ## encoding: [0x62,0xf2,0xf5,0x08,0x8d,0xd8]
; CHECK-NEXT: vpermw %xmm0, %xmm1, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0x8d,0xd0]
@@ -2499,7 +2499,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.permvar.hi.256(<16 x i16>, <16 x i16>,
define <16 x i16>@test_int_x86_avx512_mask_permvar_hi_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_permvar_hi_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpermw %ymm0, %ymm1, %ymm3 ## encoding: [0x62,0xf2,0xf5,0x28,0x8d,0xd8]
; CHECK-NEXT: vpermw %ymm0, %ymm1, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xf5,0x29,0x8d,0xd0]
diff --git a/test/CodeGen/X86/avx512bwvl-mov.ll b/test/CodeGen/X86/avx512bwvl-mov.ll
index 92c8504da2f..1826890d49c 100644
--- a/test/CodeGen/X86/avx512bwvl-mov.ll
+++ b/test/CodeGen/X86/avx512bwvl-mov.ll
@@ -3,7 +3,7 @@
define <32 x i8> @test_256_1(i8 * %addr) {
; CHECK-LABEL: test_256_1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovups (%rdi), %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x10,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <32 x i8>*
@@ -13,7 +13,7 @@ define <32 x i8> @test_256_1(i8 * %addr) {
define void @test_256_2(i8 * %addr, <32 x i8> %data) {
; CHECK-LABEL: test_256_2:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovups %ymm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x11,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <32 x i8>*
@@ -23,7 +23,7 @@ define void @test_256_2(i8 * %addr, <32 x i8> %data) {
define <32 x i8> @test_256_3(i8 * %addr, <32 x i8> %old, <32 x i8> %mask1) {
; CHECK-LABEL: test_256_3:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vpcmpneqb %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0x75,0x28,0x3f,0xca,0x04]
; CHECK-NEXT: vmovdqu8 (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7f,0x29,0x6f,0x07]
@@ -37,7 +37,7 @@ define <32 x i8> @test_256_3(i8 * %addr, <32 x i8> %old, <32 x i8> %mask1) {
define <32 x i8> @test_256_4(i8 * %addr, <32 x i8> %mask1) {
; CHECK-LABEL: test_256_4:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9]
; CHECK-NEXT: vpcmpneqb %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf3,0x7d,0x28,0x3f,0xc9,0x04]
; CHECK-NEXT: vmovdqu8 (%rdi), %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7f,0xa9,0x6f,0x07]
@@ -51,7 +51,7 @@ define <32 x i8> @test_256_4(i8 * %addr, <32 x i8> %mask1) {
define <16 x i16> @test_256_5(i8 * %addr) {
; CHECK-LABEL: test_256_5:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovups (%rdi), %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x10,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <16 x i16>*
@@ -61,7 +61,7 @@ define <16 x i16> @test_256_5(i8 * %addr) {
define void @test_256_6(i8 * %addr, <16 x i16> %data) {
; CHECK-LABEL: test_256_6:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovups %ymm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x11,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <16 x i16>*
@@ -71,7 +71,7 @@ define void @test_256_6(i8 * %addr, <16 x i16> %data) {
define <16 x i16> @test_256_7(i8 * %addr, <16 x i16> %old, <16 x i16> %mask1) {
; CHECK-LABEL: test_256_7:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vpcmpneqw %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x28,0x3f,0xca,0x04]
; CHECK-NEXT: vmovdqu16 (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0xff,0x29,0x6f,0x07]
@@ -85,7 +85,7 @@ define <16 x i16> @test_256_7(i8 * %addr, <16 x i16> %old, <16 x i16> %mask1) {
define <16 x i16> @test_256_8(i8 * %addr, <16 x i16> %mask1) {
; CHECK-LABEL: test_256_8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9]
; CHECK-NEXT: vpcmpneqw %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x28,0x3f,0xc9,0x04]
; CHECK-NEXT: vmovdqu16 (%rdi), %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xff,0xa9,0x6f,0x07]
@@ -99,7 +99,7 @@ define <16 x i16> @test_256_8(i8 * %addr, <16 x i16> %mask1) {
define <16 x i8> @test_128_1(i8 * %addr) {
; CHECK-LABEL: test_128_1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovups (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x10,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <16 x i8>*
@@ -109,7 +109,7 @@ define <16 x i8> @test_128_1(i8 * %addr) {
define void @test_128_2(i8 * %addr, <16 x i8> %data) {
; CHECK-LABEL: test_128_2:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovups %xmm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x11,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <16 x i8>*
@@ -119,7 +119,7 @@ define void @test_128_2(i8 * %addr, <16 x i8> %data) {
define <16 x i8> @test_128_3(i8 * %addr, <16 x i8> %old, <16 x i8> %mask1) {
; CHECK-LABEL: test_128_3:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vpcmpneqb %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x08,0x3f,0xca,0x04]
; CHECK-NEXT: vmovdqu8 (%rdi), %xmm0 {%k1} ## encoding: [0x62,0xf1,0x7f,0x09,0x6f,0x07]
@@ -133,7 +133,7 @@ define <16 x i8> @test_128_3(i8 * %addr, <16 x i8> %old, <16 x i8> %mask1) {
define <16 x i8> @test_128_4(i8 * %addr, <16 x i8> %mask1) {
; CHECK-LABEL: test_128_4:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9]
; CHECK-NEXT: vpcmpneqb %xmm1, %xmm0, %k1 ## encoding: [0x62,0xf3,0x7d,0x08,0x3f,0xc9,0x04]
; CHECK-NEXT: vmovdqu8 (%rdi), %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7f,0x89,0x6f,0x07]
@@ -147,7 +147,7 @@ define <16 x i8> @test_128_4(i8 * %addr, <16 x i8> %mask1) {
define <8 x i16> @test_128_5(i8 * %addr) {
; CHECK-LABEL: test_128_5:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovups (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x10,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <8 x i16>*
@@ -157,7 +157,7 @@ define <8 x i16> @test_128_5(i8 * %addr) {
define void @test_128_6(i8 * %addr, <8 x i16> %data) {
; CHECK-LABEL: test_128_6:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovups %xmm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x11,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <8 x i16>*
@@ -167,7 +167,7 @@ define void @test_128_6(i8 * %addr, <8 x i16> %data) {
define <8 x i16> @test_128_7(i8 * %addr, <8 x i16> %old, <8 x i16> %mask1) {
; CHECK-LABEL: test_128_7:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vpcmpneqw %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x08,0x3f,0xca,0x04]
; CHECK-NEXT: vmovdqu16 (%rdi), %xmm0 {%k1} ## encoding: [0x62,0xf1,0xff,0x09,0x6f,0x07]
@@ -181,7 +181,7 @@ define <8 x i16> @test_128_7(i8 * %addr, <8 x i16> %old, <8 x i16> %mask1) {
define <8 x i16> @test_128_8(i8 * %addr, <8 x i16> %mask1) {
; CHECK-LABEL: test_128_8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9]
; CHECK-NEXT: vpcmpneqw %xmm1, %xmm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x08,0x3f,0xc9,0x04]
; CHECK-NEXT: vmovdqu16 (%rdi), %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xff,0x89,0x6f,0x07]
diff --git a/test/CodeGen/X86/avx512bwvl-vec-cmp.ll b/test/CodeGen/X86/avx512bwvl-vec-cmp.ll
index 17e581bbb50..bdaa1587e0a 100644
--- a/test/CodeGen/X86/avx512bwvl-vec-cmp.ll
+++ b/test/CodeGen/X86/avx512bwvl-vec-cmp.ll
@@ -3,7 +3,7 @@
define <32 x i8> @test256_1(<32 x i8> %x, <32 x i8> %y) nounwind {
; CHECK-LABEL: test256_1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqb %ymm1, %ymm0, %k1
; CHECK-NEXT: vpblendmb %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
@@ -14,7 +14,7 @@ define <32 x i8> @test256_1(<32 x i8> %x, <32 x i8> %y) nounwind {
define <32 x i8> @test256_2(<32 x i8> %x, <32 x i8> %y, <32 x i8> %x1) nounwind {
; CHECK-LABEL: test256_2:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtb %ymm1, %ymm0, %k1
; CHECK-NEXT: vpblendmb %ymm0, %ymm2, %ymm0 {%k1}
; CHECK-NEXT: retq
@@ -25,7 +25,7 @@ define <32 x i8> @test256_2(<32 x i8> %x, <32 x i8> %y, <32 x i8> %x1) nounwind
define <16 x i16> @test256_3(<16 x i16> %x, <16 x i16> %y, <16 x i16> %x1) nounwind {
; CHECK-LABEL: test256_3:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmplew %ymm0, %ymm1, %k1
; CHECK-NEXT: vpblendmw %ymm2, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
@@ -36,7 +36,7 @@ define <16 x i16> @test256_3(<16 x i16> %x, <16 x i16> %y, <16 x i16> %x1) nounw
define <32 x i8> @test256_4(<32 x i8> %x, <32 x i8> %y, <32 x i8> %x1) nounwind {
; CHECK-LABEL: test256_4:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpnleub %ymm1, %ymm0, %k1
; CHECK-NEXT: vpblendmb %ymm0, %ymm2, %ymm0 {%k1}
; CHECK-NEXT: retq
@@ -47,7 +47,7 @@ define <32 x i8> @test256_4(<32 x i8> %x, <32 x i8> %y, <32 x i8> %x1) nounwind
define <16 x i16> @test256_5(<16 x i16> %x, <16 x i16> %x1, <16 x i16>* %yp) nounwind {
; CHECK-LABEL: test256_5:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqw (%rdi), %ymm0, %k1
; CHECK-NEXT: vpblendmw %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
@@ -59,7 +59,7 @@ define <16 x i16> @test256_5(<16 x i16> %x, <16 x i16> %x1, <16 x i16>* %yp) nou
define <16 x i16> @test256_6(<16 x i16> %x, <16 x i16> %x1, <16 x i16>* %y.ptr) nounwind {
; CHECK-LABEL: test256_6:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtw (%rdi), %ymm0, %k1
; CHECK-NEXT: vpblendmw %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
@@ -71,7 +71,7 @@ define <16 x i16> @test256_6(<16 x i16> %x, <16 x i16> %x1, <16 x i16>* %y.ptr)
define <16 x i16> @test256_7(<16 x i16> %x, <16 x i16> %x1, <16 x i16>* %y.ptr) nounwind {
; CHECK-LABEL: test256_7:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmplew (%rdi), %ymm0, %k1
; CHECK-NEXT: vpblendmw %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
@@ -83,7 +83,7 @@ define <16 x i16> @test256_7(<16 x i16> %x, <16 x i16> %x1, <16 x i16>* %y.ptr)
define <16 x i16> @test256_8(<16 x i16> %x, <16 x i16> %x1, <16 x i16>* %y.ptr) nounwind {
; CHECK-LABEL: test256_8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpleuw (%rdi), %ymm0, %k1
; CHECK-NEXT: vpblendmw %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
@@ -95,7 +95,7 @@ define <16 x i16> @test256_8(<16 x i16> %x, <16 x i16> %x1, <16 x i16>* %y.ptr)
define <16 x i16> @test256_9(<16 x i16> %x, <16 x i16> %y, <16 x i16> %x1, <16 x i16> %y1) nounwind {
; CHECK-LABEL: test256_9:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k1
; CHECK-NEXT: vpcmpeqw %ymm3, %ymm2, %k1 {%k1}
; CHECK-NEXT: vpblendmw %ymm0, %ymm1, %ymm0 {%k1}
@@ -109,7 +109,7 @@ define <16 x i16> @test256_9(<16 x i16> %x, <16 x i16> %y, <16 x i16> %x1, <16 x
define <32 x i8> @test256_10(<32 x i8> %x, <32 x i8> %y, <32 x i8> %x1, <32 x i8> %y1) nounwind {
; CHECK-LABEL: test256_10:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpleb %ymm1, %ymm0, %k1
; CHECK-NEXT: vpcmpleb %ymm2, %ymm3, %k1 {%k1}
; CHECK-NEXT: vpblendmb %ymm0, %ymm2, %ymm0 {%k1}
@@ -123,7 +123,7 @@ define <32 x i8> @test256_10(<32 x i8> %x, <32 x i8> %y, <32 x i8> %x1, <32 x i8
define <32 x i8> @test256_11(<32 x i8> %x, <32 x i8>* %y.ptr, <32 x i8> %x1, <32 x i8> %y1) nounwind {
; CHECK-LABEL: test256_11:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtb %ymm2, %ymm1, %k1
; CHECK-NEXT: vpcmpgtb (%rdi), %ymm0, %k1 {%k1}
; CHECK-NEXT: vpblendmb %ymm0, %ymm1, %ymm0 {%k1}
@@ -138,7 +138,7 @@ define <32 x i8> @test256_11(<32 x i8> %x, <32 x i8>* %y.ptr, <32 x i8> %x1, <32
define <16 x i16> @test256_12(<16 x i16> %x, <16 x i16>* %y.ptr, <16 x i16> %x1, <16 x i16> %y1) nounwind {
; CHECK-LABEL: test256_12:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmplew %ymm1, %ymm2, %k1
; CHECK-NEXT: vpcmpleuw (%rdi), %ymm0, %k1 {%k1}
; CHECK-NEXT: vpblendmw %ymm0, %ymm1, %ymm0 {%k1}
@@ -153,7 +153,7 @@ define <16 x i16> @test256_12(<16 x i16> %x, <16 x i16>* %y.ptr, <16 x i16> %x1,
define <16 x i8> @test128_1(<16 x i8> %x, <16 x i8> %y) nounwind {
; CHECK-LABEL: test128_1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k1
; CHECK-NEXT: vpblendmb %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
@@ -164,7 +164,7 @@ define <16 x i8> @test128_1(<16 x i8> %x, <16 x i8> %y) nounwind {
define <16 x i8> @test128_2(<16 x i8> %x, <16 x i8> %y, <16 x i8> %x1) nounwind {
; CHECK-LABEL: test128_2:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtb %xmm1, %xmm0, %k1
; CHECK-NEXT: vpblendmb %xmm0, %xmm2, %xmm0 {%k1}
; CHECK-NEXT: retq
@@ -175,7 +175,7 @@ define <16 x i8> @test128_2(<16 x i8> %x, <16 x i8> %y, <16 x i8> %x1) nounwind
define <8 x i16> @test128_3(<8 x i16> %x, <8 x i16> %y, <8 x i16> %x1) nounwind {
; CHECK-LABEL: test128_3:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmplew %xmm0, %xmm1, %k1
; CHECK-NEXT: vpblendmw %xmm2, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
@@ -186,7 +186,7 @@ define <8 x i16> @test128_3(<8 x i16> %x, <8 x i16> %y, <8 x i16> %x1) nounwind
define <16 x i8> @test128_4(<16 x i8> %x, <16 x i8> %y, <16 x i8> %x1) nounwind {
; CHECK-LABEL: test128_4:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpnleub %xmm1, %xmm0, %k1
; CHECK-NEXT: vpblendmb %xmm0, %xmm2, %xmm0 {%k1}
; CHECK-NEXT: retq
@@ -197,7 +197,7 @@ define <16 x i8> @test128_4(<16 x i8> %x, <16 x i8> %y, <16 x i8> %x1) nounwind
define <8 x i16> @test128_5(<8 x i16> %x, <8 x i16> %x1, <8 x i16>* %yp) nounwind {
; CHECK-LABEL: test128_5:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqw (%rdi), %xmm0, %k1
; CHECK-NEXT: vpblendmw %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
@@ -209,7 +209,7 @@ define <8 x i16> @test128_5(<8 x i16> %x, <8 x i16> %x1, <8 x i16>* %yp) nounwin
define <8 x i16> @test128_6(<8 x i16> %x, <8 x i16> %x1, <8 x i16>* %y.ptr) nounwind {
; CHECK-LABEL: test128_6:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtw (%rdi), %xmm0, %k1
; CHECK-NEXT: vpblendmw %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
@@ -221,7 +221,7 @@ define <8 x i16> @test128_6(<8 x i16> %x, <8 x i16> %x1, <8 x i16>* %y.ptr) noun
define <8 x i16> @test128_7(<8 x i16> %x, <8 x i16> %x1, <8 x i16>* %y.ptr) nounwind {
; CHECK-LABEL: test128_7:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmplew (%rdi), %xmm0, %k1
; CHECK-NEXT: vpblendmw %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
@@ -233,7 +233,7 @@ define <8 x i16> @test128_7(<8 x i16> %x, <8 x i16> %x1, <8 x i16>* %y.ptr) noun
define <8 x i16> @test128_8(<8 x i16> %x, <8 x i16> %x1, <8 x i16>* %y.ptr) nounwind {
; CHECK-LABEL: test128_8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpleuw (%rdi), %xmm0, %k1
; CHECK-NEXT: vpblendmw %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
@@ -245,7 +245,7 @@ define <8 x i16> @test128_8(<8 x i16> %x, <8 x i16> %x1, <8 x i16>* %y.ptr) noun
define <8 x i16> @test128_9(<8 x i16> %x, <8 x i16> %y, <8 x i16> %x1, <8 x i16> %y1) nounwind {
; CHECK-LABEL: test128_9:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k1
; CHECK-NEXT: vpcmpeqw %xmm3, %xmm2, %k1 {%k1}
; CHECK-NEXT: vpblendmw %xmm0, %xmm1, %xmm0 {%k1}
@@ -259,7 +259,7 @@ define <8 x i16> @test128_9(<8 x i16> %x, <8 x i16> %y, <8 x i16> %x1, <8 x i16>
define <16 x i8> @test128_10(<16 x i8> %x, <16 x i8> %y, <16 x i8> %x1, <16 x i8> %y1) nounwind {
; CHECK-LABEL: test128_10:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpleb %xmm1, %xmm0, %k1
; CHECK-NEXT: vpcmpleb %xmm2, %xmm3, %k1 {%k1}
; CHECK-NEXT: vpblendmb %xmm0, %xmm2, %xmm0 {%k1}
@@ -273,7 +273,7 @@ define <16 x i8> @test128_10(<16 x i8> %x, <16 x i8> %y, <16 x i8> %x1, <16 x i8
define <16 x i8> @test128_11(<16 x i8> %x, <16 x i8>* %y.ptr, <16 x i8> %x1, <16 x i8> %y1) nounwind {
; CHECK-LABEL: test128_11:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtb %xmm2, %xmm1, %k1
; CHECK-NEXT: vpcmpgtb (%rdi), %xmm0, %k1 {%k1}
; CHECK-NEXT: vpblendmb %xmm0, %xmm1, %xmm0 {%k1}
@@ -288,7 +288,7 @@ define <16 x i8> @test128_11(<16 x i8> %x, <16 x i8>* %y.ptr, <16 x i8> %x1, <16
define <8 x i16> @test128_12(<8 x i16> %x, <8 x i16>* %y.ptr, <8 x i16> %x1, <8 x i16> %y1) nounwind {
; CHECK-LABEL: test128_12:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmplew %xmm1, %xmm2, %k1
; CHECK-NEXT: vpcmpleuw (%rdi), %xmm0, %k1 {%k1}
; CHECK-NEXT: vpblendmw %xmm0, %xmm1, %xmm0 {%k1}
diff --git a/test/CodeGen/X86/avx512bwvl-vec-test-testn.ll b/test/CodeGen/X86/avx512bwvl-vec-test-testn.ll
index 73135c27630..190d58d84b1 100644
--- a/test/CodeGen/X86/avx512bwvl-vec-test-testn.ll
+++ b/test/CodeGen/X86/avx512bwvl-vec-test-testn.ll
@@ -4,7 +4,7 @@
; Function Attrs: norecurse nounwind readnone
define zeroext i16 @TEST_mm_test_epi8_mask(<2 x i64> %__A, <2 x i64> %__B) local_unnamed_addr #0 {
; CHECK-LABEL: TEST_mm_test_epi8_mask:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestmb %xmm0, %xmm1, %k0
; CHECK-NEXT: kmovd %k0, %eax
; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -20,7 +20,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define zeroext i16 @TEST_mm_mask_test_epi8_mask(i16 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) local_unnamed_addr #0 {
; CHECK-LABEL: TEST_mm_mask_test_epi8_mask:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vptestmb %xmm0, %xmm1, %k0 {%k1}
; CHECK-NEXT: kmovd %k0, %eax
@@ -39,7 +39,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define zeroext i8 @TEST_mm_test_epi16_mask(<2 x i64> %__A, <2 x i64> %__B) local_unnamed_addr #0 {
; CHECK-LABEL: TEST_mm_test_epi16_mask:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestmw %xmm0, %xmm1, %k0
; CHECK-NEXT: kmovd %k0, %eax
; CHECK-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -55,7 +55,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define zeroext i8 @TEST_mm_mask_test_epi16_mask(i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) local_unnamed_addr #0 {
; CHECK-LABEL: TEST_mm_mask_test_epi16_mask:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vptestmw %xmm0, %xmm1, %k0 {%k1}
; CHECK-NEXT: kmovd %k0, %eax
@@ -74,7 +74,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define zeroext i16 @TEST_mm_testn_epi8_mask(<2 x i64> %__A, <2 x i64> %__B) local_unnamed_addr #0 {
; CHECK-LABEL: TEST_mm_testn_epi8_mask:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestnmb %xmm0, %xmm1, %k0
; CHECK-NEXT: kmovd %k0, %eax
; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -90,7 +90,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define zeroext i16 @TEST_mm_mask_testn_epi8_mask(i16 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) local_unnamed_addr #0 {
; CHECK-LABEL: TEST_mm_mask_testn_epi8_mask:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vptestnmb %xmm0, %xmm1, %k0 {%k1}
; CHECK-NEXT: kmovd %k0, %eax
@@ -109,7 +109,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define zeroext i8 @TEST_mm_testn_epi16_mask(<2 x i64> %__A, <2 x i64> %__B) local_unnamed_addr #0 {
; CHECK-LABEL: TEST_mm_testn_epi16_mask:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestnmw %xmm0, %xmm1, %k0
; CHECK-NEXT: kmovd %k0, %eax
; CHECK-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -125,7 +125,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define zeroext i8 @TEST_mm_mask_testn_epi16_mask(i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) local_unnamed_addr #0 {
; CHECK-LABEL: TEST_mm_mask_testn_epi16_mask:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vptestnmw %xmm0, %xmm1, %k0 {%k1}
; CHECK-NEXT: kmovd %k0, %eax
@@ -144,7 +144,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define i32 @TEST_mm256_test_epi8_mask(<4 x i64> %__A, <4 x i64> %__B) local_unnamed_addr #0 {
; CHECK-LABEL: TEST_mm256_test_epi8_mask:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestmb %ymm0, %ymm1, %k0
; CHECK-NEXT: kmovd %k0, %eax
; CHECK-NEXT: vzeroupper
@@ -160,7 +160,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define i32 @TEST_mm256_mask_test_epi8_mask(i32 %__U, <4 x i64> %__A, <4 x i64> %__B) local_unnamed_addr #0 {
; CHECK-LABEL: TEST_mm256_mask_test_epi8_mask:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vptestmb %ymm0, %ymm1, %k0 {%k1}
; CHECK-NEXT: kmovd %k0, %eax
@@ -179,7 +179,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define zeroext i16 @TEST_mm256_test_epi16_mask(<4 x i64> %__A, <4 x i64> %__B) local_unnamed_addr #0 {
; CHECK-LABEL: TEST_mm256_test_epi16_mask:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestmw %ymm0, %ymm1, %k0
; CHECK-NEXT: kmovd %k0, %eax
; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -196,7 +196,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define zeroext i16 @TEST_mm256_mask_test_epi16_mask(i16 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) local_unnamed_addr #0 {
; CHECK-LABEL: TEST_mm256_mask_test_epi16_mask:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vptestmw %ymm0, %ymm1, %k0 {%k1}
; CHECK-NEXT: kmovd %k0, %eax
@@ -216,7 +216,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define i32 @TEST_mm256_testn_epi8_mask(<4 x i64> %__A, <4 x i64> %__B) local_unnamed_addr #0 {
; CHECK-LABEL: TEST_mm256_testn_epi8_mask:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestnmb %ymm0, %ymm1, %k0
; CHECK-NEXT: kmovd %k0, %eax
; CHECK-NEXT: vzeroupper
@@ -232,7 +232,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define i32 @TEST_mm256_mask_testn_epi8_mask(i32 %__U, <4 x i64> %__A, <4 x i64> %__B) local_unnamed_addr #0 {
; CHECK-LABEL: TEST_mm256_mask_testn_epi8_mask:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vptestnmb %ymm0, %ymm1, %k0 {%k1}
; CHECK-NEXT: kmovd %k0, %eax
@@ -251,7 +251,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define zeroext i16 @TEST_mm256_testn_epi16_mask(<4 x i64> %__A, <4 x i64> %__B) local_unnamed_addr #0 {
; CHECK-LABEL: TEST_mm256_testn_epi16_mask:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestnmw %ymm0, %ymm1, %k0
; CHECK-NEXT: kmovd %k0, %eax
; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -268,7 +268,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define zeroext i16 @TEST_mm256_mask_testn_epi16_mask(i16 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) local_unnamed_addr #0 {
; CHECK-LABEL: TEST_mm256_mask_testn_epi16_mask:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vptestnmw %ymm0, %ymm1, %k0 {%k1}
; CHECK-NEXT: kmovd %k0, %eax
diff --git a/test/CodeGen/X86/avx512cd-intrinsics-fast-isel.ll b/test/CodeGen/X86/avx512cd-intrinsics-fast-isel.ll
index ca5e5523a9d..a4f4c837dc0 100644
--- a/test/CodeGen/X86/avx512cd-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/avx512cd-intrinsics-fast-isel.ll
@@ -3,7 +3,7 @@
define <8 x i64> @test_mm512_broadcastmb_epi64(<8 x i64> %a, <8 x i64> %b) {
; CHECK-LABEL: test_mm512_broadcastmb_epi64:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; CHECK-NEXT: vpbroadcastmb2q %k0, %zmm0
; CHECK-NEXT: retq
@@ -18,7 +18,7 @@ entry:
define <8 x i64> @test_mm512_broadcastmw_epi32(<8 x i64> %a, <8 x i64> %b) {
; CHECK-LABEL: test_mm512_broadcastmw_epi32:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; CHECK-NEXT: vpbroadcastmw2d %k0, %zmm0
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/avx512cd-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512cd-intrinsics-upgrade.ll
index 92dfe1e087a..da4ba9e1009 100644
--- a/test/CodeGen/X86/avx512cd-intrinsics-upgrade.ll
+++ b/test/CodeGen/X86/avx512cd-intrinsics-upgrade.ll
@@ -3,7 +3,7 @@
define <16 x i32> @test_lzcnt_d(<16 x i32> %a) {
; CHECK-LABEL: test_lzcnt_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vplzcntd %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.mask.lzcnt.d.512(<16 x i32> %a, <16 x i32> zeroinitializer, i16 -1)
@@ -14,7 +14,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.lzcnt.d.512(<16 x i32>, <16 x i32>, i16
define <8 x i64> @test_lzcnt_q(<8 x i64> %a) {
; CHECK-LABEL: test_lzcnt_q:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vplzcntq %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.mask.lzcnt.q.512(<8 x i64> %a, <8 x i64> zeroinitializer, i8 -1)
@@ -26,7 +26,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.lzcnt.q.512(<8 x i64>, <8 x i64>, i8) no
define <16 x i32> @test_mask_lzcnt_d(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
; CHECK-LABEL: test_mask_lzcnt_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vplzcntd %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -37,7 +37,7 @@ define <16 x i32> @test_mask_lzcnt_d(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
define <8 x i64> @test_mask_lzcnt_q(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
; CHECK-LABEL: test_mask_lzcnt_q:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vplzcntq %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -48,7 +48,7 @@ define <8 x i64> @test_mask_lzcnt_q(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
define <16 x i32> @test_x86_vbroadcastmw_512(i16 %a0) {
; CHECK-LABEL: test_x86_vbroadcastmw_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movzwl %di, %eax
; CHECK-NEXT: vpbroadcastd %eax, %zmm0
; CHECK-NEXT: retq
@@ -59,7 +59,7 @@ declare <16 x i32> @llvm.x86.avx512.broadcastmw.512(i16)
define <8 x i64> @test_x86_broadcastmb_512(i8 %a0) {
; CHECK-LABEL: test_x86_broadcastmb_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: vpbroadcastq %rax, %zmm0
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/avx512cd-intrinsics.ll b/test/CodeGen/X86/avx512cd-intrinsics.ll
index ab8c80f8dd3..7f0c761991e 100644
--- a/test/CodeGen/X86/avx512cd-intrinsics.ll
+++ b/test/CodeGen/X86/avx512cd-intrinsics.ll
@@ -5,7 +5,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.conflict.d.512(<16 x i32>, <16 x i32>,
define <8 x i64> @test_conflict_q(<8 x i64> %a) {
; CHECK-LABEL: test_conflict_q:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpconflictq %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.mask.conflict.q.512(<8 x i64> %a, <8 x i64> zeroinitializer, i8 -1)
@@ -16,7 +16,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.conflict.q.512(<8 x i64>, <8 x i64>, i8)
define <16 x i32> @test_maskz_conflict_d(<16 x i32> %a, i16 %mask) {
; CHECK-LABEL: test_maskz_conflict_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpconflictd %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -26,7 +26,7 @@ define <16 x i32> @test_maskz_conflict_d(<16 x i32> %a, i16 %mask) {
define <8 x i64> @test_mask_conflict_q(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
; CHECK-LABEL: test_mask_conflict_q:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpconflictq %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -37,7 +37,7 @@ define <8 x i64> @test_mask_conflict_q(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
define <16 x i32> @test_lzcnt_d(<16 x i32> %a) {
; CHECK-LABEL: test_lzcnt_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vplzcntd %zmm0, %zmm0
; CHECK-NEXT: retq
%1 = call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> %a, i1 false)
@@ -47,7 +47,7 @@ declare <16 x i32> @llvm.ctlz.v16i32(<16 x i32>, i1) #0
define <8 x i64> @test_lzcnt_q(<8 x i64> %a) {
; CHECK-LABEL: test_lzcnt_q:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vplzcntq %zmm0, %zmm0
; CHECK-NEXT: retq
%1 = call <8 x i64> @llvm.ctlz.v8i64(<8 x i64> %a, i1 false)
@@ -57,7 +57,7 @@ declare <8 x i64> @llvm.ctlz.v8i64(<8 x i64>, i1) #0
define <16 x i32> @test_mask_lzcnt_d(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
; CHECK-LABEL: test_mask_lzcnt_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vplzcntd %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -70,7 +70,7 @@ define <16 x i32> @test_mask_lzcnt_d(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
define <8 x i64> @test_mask_lzcnt_q(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
; CHECK-LABEL: test_mask_lzcnt_q:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vplzcntq %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
diff --git a/test/CodeGen/X86/avx512cdvl-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512cdvl-intrinsics-upgrade.ll
index 0e310be3489..6070ea294d5 100644
--- a/test/CodeGen/X86/avx512cdvl-intrinsics-upgrade.ll
+++ b/test/CodeGen/X86/avx512cdvl-intrinsics-upgrade.ll
@@ -5,7 +5,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.lzcnt.d.128(<4 x i32>, <4 x i32>, i8)
define <4 x i32>@test_int_x86_avx512_mask_vplzcnt_d_128(<4 x i32> %x0, <4 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_vplzcnt_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vplzcntd %xmm0, %xmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vplzcntd %xmm0, %xmm1 {%k1}
@@ -25,7 +25,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.lzcnt.d.256(<8 x i32>, <8 x i32>, i8)
define <8 x i32>@test_int_x86_avx512_mask_vplzcnt_d_256(<8 x i32> %x0, <8 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_vplzcnt_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vplzcntd %ymm0, %ymm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vplzcntd %ymm0, %ymm1 {%k1}
@@ -41,7 +41,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.lzcnt.q.128(<2 x i64>, <2 x i64>, i8)
define <2 x i64>@test_int_x86_avx512_mask_vplzcnt_q_128(<2 x i64> %x0, <2 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_vplzcnt_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vplzcntq %xmm0, %xmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vplzcntq %xmm0, %xmm1 {%k1}
@@ -57,7 +57,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.lzcnt.q.256(<4 x i64>, <4 x i64>, i8)
define <4 x i64>@test_int_x86_avx512_mask_vplzcnt_q_256(<4 x i64> %x0, <4 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_vplzcnt_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vplzcntq %ymm0, %ymm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vplzcntq %ymm0, %ymm1 {%k1}
@@ -71,7 +71,7 @@ define <4 x i64>@test_int_x86_avx512_mask_vplzcnt_q_256(<4 x i64> %x0, <4 x i64>
define <8 x i32> @test_x86_vbroadcastmw_256(i16 %a0) {
; CHECK-LABEL: test_x86_vbroadcastmw_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movzwl %di, %eax
; CHECK-NEXT: vpbroadcastd %eax, %ymm0
; CHECK-NEXT: retq
@@ -82,7 +82,7 @@ declare <8 x i32> @llvm.x86.avx512.broadcastmw.256(i16)
define <4 x i32> @test_x86_vbroadcastmw_128(i16 %a0) {
; CHECK-LABEL: test_x86_vbroadcastmw_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movzwl %di, %eax
; CHECK-NEXT: vpbroadcastd %eax, %xmm0
; CHECK-NEXT: retq
@@ -93,7 +93,7 @@ declare <4 x i32> @llvm.x86.avx512.broadcastmw.128(i16)
define <4 x i64> @test_x86_broadcastmb_256(i8 %a0) {
; CHECK-LABEL: test_x86_broadcastmb_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: vpbroadcastq %rax, %ymm0
; CHECK-NEXT: retq
@@ -104,7 +104,7 @@ declare <4 x i64> @llvm.x86.avx512.broadcastmb.256(i8)
define <2 x i64> @test_x86_broadcastmb_128(i8 %a0) {
; CHECK-LABEL: test_x86_broadcastmb_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: vpbroadcastq %rax, %xmm0
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/avx512cdvl-intrinsics.ll b/test/CodeGen/X86/avx512cdvl-intrinsics.ll
index 2fb50297c62..3530d321b02 100644
--- a/test/CodeGen/X86/avx512cdvl-intrinsics.ll
+++ b/test/CodeGen/X86/avx512cdvl-intrinsics.ll
@@ -3,7 +3,7 @@
define <4 x i32> @test_int_x86_avx512_mask_vplzcnt_d_128(<4 x i32> %x0, <4 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_vplzcnt_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vplzcntd %xmm0, %xmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vplzcntd %xmm0, %xmm1 {%k1}
@@ -28,7 +28,7 @@ declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>, i1) #0
define <8 x i32> @test_int_x86_avx512_mask_vplzcnt_d_256(<8 x i32> %x0, <8 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_vplzcnt_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vplzcntd %ymm0, %ymm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vplzcntd %ymm0, %ymm1 {%k1}
@@ -45,7 +45,7 @@ declare <8 x i32> @llvm.ctlz.v8i32(<8 x i32>, i1) #0
define <2 x i64> @test_int_x86_avx512_mask_vplzcnt_q_128(<2 x i64> %x0, <2 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_vplzcnt_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vplzcntq %xmm0, %xmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vplzcntq %xmm0, %xmm1 {%k1}
@@ -63,7 +63,7 @@ declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64>, i1) #0
define <4 x i64> @test_int_x86_avx512_mask_vplzcnt_q_256(<4 x i64> %x0, <4 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_vplzcnt_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vplzcntq %ymm0, %ymm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vplzcntq %ymm0, %ymm1 {%k1}
@@ -83,7 +83,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.conflict.d.128(<4 x i32>, <4 x i32>, i8)
define <4 x i32>@test_int_x86_avx512_mask_vpconflict_d_128(<4 x i32> %x0, <4 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpconflict_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpconflictd %xmm0, %xmm2 {%k1} {z}
; CHECK-NEXT: vpconflictd %xmm0, %xmm1 {%k1}
@@ -103,7 +103,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.conflict.d.256(<8 x i32>, <8 x i32>, i8)
define <8 x i32>@test_int_x86_avx512_mask_vpconflict_d_256(<8 x i32> %x0, <8 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpconflict_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpconflictd %ymm0, %ymm1 {%k1}
; CHECK-NEXT: vpconflictd %ymm0, %ymm0
@@ -119,7 +119,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.conflict.q.128(<2 x i64>, <2 x i64>, i8)
define <2 x i64>@test_int_x86_avx512_mask_vpconflict_q_128(<2 x i64> %x0, <2 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpconflict_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpconflictq %xmm0, %xmm1 {%k1}
; CHECK-NEXT: vpconflictq %xmm0, %xmm0
@@ -135,7 +135,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.conflict.q.256(<4 x i64>, <4 x i64>, i8)
define <4 x i64>@test_int_x86_avx512_mask_vpconflict_q_256(<4 x i64> %x0, <4 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpconflict_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpconflictq %ymm0, %ymm1 {%k1}
; CHECK-NEXT: vpconflictq %ymm0, %ymm0
diff --git a/test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll
index 04ea615fed6..ba75a351130 100644
--- a/test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll
+++ b/test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll
@@ -5,7 +5,7 @@ declare <2 x double> @llvm.x86.avx512.mask.vextractf64x2.512(<8 x double>, i32,
define <2 x double>@test_int_x86_avx512_mask_vextractf64x2_512(<8 x double> %x0, <2 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vextractf64x2_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
; CHECK-NEXT: kmovw %edi, %k0
; CHECK-NEXT: kshiftlb $7, %k0, %k1
@@ -35,7 +35,7 @@ declare <8 x float> @llvm.x86.avx512.mask.vextractf32x8.512(<16 x float>, i32, <
define <8 x float>@test_int_x86_avx512_mask_vextractf32x8(<16 x float> %x0, <8 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vextractf32x8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vextractf32x8 $1, %zmm0, %ymm1 {%k1}
@@ -55,7 +55,7 @@ declare <16 x float> @llvm.x86.avx512.mask.insertf32x8.512(<16 x float>, <8 x fl
define <16 x float>@test_int_x86_avx512_mask_insertf32x8_512(<16 x float> %x0, <8 x float> %x1, <16 x float> %x3, i16 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_insertf32x8_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm3
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vinsertf32x8 $1, %ymm1, %zmm0, %zmm2 {%k1}
@@ -75,7 +75,7 @@ declare <8 x double> @llvm.x86.avx512.mask.insertf64x2.512(<8 x double>, <2 x do
define <8 x double>@test_int_x86_avx512_mask_insertf64x2_512(<8 x double> %x0, <2 x double> %x1,<8 x double> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_insertf64x2_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vinsertf32x4 $1, %xmm1, %zmm0, %zmm3
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vinsertf64x2 $1, %xmm1, %zmm0, %zmm2 {%k1}
@@ -95,7 +95,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.inserti32x8.512(<16 x i32>, <8 x i32>,
define <16 x i32>@test_int_x86_avx512_mask_inserti32x8_512(<16 x i32> %x0, <8 x i32> %x1, <16 x i32> %x3, i16 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_inserti32x8_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm3
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vinserti32x8 $1, %ymm1, %zmm0, %zmm2 {%k1}
@@ -115,7 +115,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.inserti64x2.512(<8 x i64>, <2 x i64>, i3
define <8 x i64>@test_int_x86_avx512_mask_inserti64x2_512(<8 x i64> %x0, <2 x i64> %x1, <8 x i64> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_inserti64x2_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vinserti32x4 $1, %xmm1, %zmm0, %zmm3
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vinserti64x2 $1, %xmm1, %zmm0, %zmm2 {%k1}
@@ -136,7 +136,7 @@ declare <16 x i32> @llvm.x86.avx512.cvtmask2d.512(i16)
define <16 x i32>@test_int_x86_avx512_cvtmask2d_512(i16 %x0) {
; CHECK-LABEL: test_int_x86_avx512_cvtmask2d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k0
; CHECK-NEXT: vpmovm2d %k0, %zmm0
; CHECK-NEXT: retq
@@ -148,7 +148,7 @@ declare <8 x i64> @llvm.x86.avx512.cvtmask2q.512(i8)
define <8 x i64>@test_int_x86_avx512_cvtmask2q_512(i8 %x0) {
; CHECK-LABEL: test_int_x86_avx512_cvtmask2q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k0
; CHECK-NEXT: vpmovm2q %k0, %zmm0
; CHECK-NEXT: retq
@@ -160,7 +160,7 @@ declare <16 x float> @llvm.x86.avx512.mask.broadcastf32x8.512(<8 x float>, <16 x
define <16 x float>@test_int_x86_avx512_mask_broadcastf32x8_512(<8 x float> %x0, <16 x float> %x2, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf32x8_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; CHECK-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
@@ -180,7 +180,7 @@ define <16 x float>@test_int_x86_avx512_mask_broadcastf32x8_512(<8 x float> %x0,
define <16 x float>@test_int_x86_avx512_mask_broadcastf32x8_512_load(<8 x float>* %x0ptr, <16 x float> %x2, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf32x8_512_load:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vbroadcastf32x8 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
; CHECK-NEXT: retq
@@ -194,7 +194,7 @@ declare <8 x double> @llvm.x86.avx512.mask.broadcastf64x2.512(<2 x double>, <8 x
define <8 x double>@test_int_x86_avx512_mask_broadcastf64x2_512(<2 x double> %x0, <8 x double> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf64x2_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm2
@@ -215,7 +215,7 @@ define <8 x double>@test_int_x86_avx512_mask_broadcastf64x2_512(<2 x double> %x0
define <8 x double>@test_int_x86_avx512_mask_broadcastf64x2_512_load(<2 x double>* %x0ptr, <8 x double> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf64x2_512_load:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vbroadcastf64x2 {{.*#+}} zmm0 {%k1} = mem[0,1,0,1,0,1,0,1]
; CHECK-NEXT: retq
@@ -229,7 +229,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.broadcasti32x8.512(<8 x i32>, <16 x i32
define <16 x i32>@test_int_x86_avx512_mask_broadcasti32x8_512(<8 x i32> %x0, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x8_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
@@ -249,7 +249,7 @@ define <16 x i32>@test_int_x86_avx512_mask_broadcasti32x8_512(<8 x i32> %x0, <16
define <16 x i32>@test_int_x86_avx512_mask_broadcasti32x8_512_load(<8 x i32>* %x0ptr, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x8_512_load:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vbroadcasti32x8 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
; CHECK-NEXT: retq
@@ -263,7 +263,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.broadcasti64x2.512(<2 x i64>, <8 x i64>,
define <8 x i64>@test_int_x86_avx512_mask_broadcasti64x2_512(<2 x i64> %x0, <8 x i64> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti64x2_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; CHECK-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm2
@@ -284,7 +284,7 @@ define <8 x i64>@test_int_x86_avx512_mask_broadcasti64x2_512(<2 x i64> %x0, <8 x
define <8 x i64>@test_int_x86_avx512_mask_broadcasti64x2_512_load(<2 x i64>* %x0ptr, <8 x i64> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti64x2_512_load:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vbroadcasti64x2 {{.*#+}} zmm0 {%k1} = mem[0,1,0,1,0,1,0,1]
; CHECK-NEXT: retq
@@ -298,7 +298,7 @@ declare <16 x float> @llvm.x86.avx512.mask.broadcastf32x2.512(<4 x float>, <16 x
define <16 x float>@test_int_x86_avx512_mask_broadcastf32x2_512(<4 x float> %x0, <16 x float> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf32x2_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm2
@@ -320,7 +320,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.broadcasti32x2.512(<4 x i32>, <16 x i32
define <16 x i32>@test_int_x86_avx512_mask_broadcasti32x2_512(<4 x i32> %x0, <16 x i32> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x2_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; CHECK-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm2
diff --git a/test/CodeGen/X86/avx512dq-intrinsics.ll b/test/CodeGen/X86/avx512dq-intrinsics.ll
index 6e3358e4a17..56aee74faaa 100644
--- a/test/CodeGen/X86/avx512dq-intrinsics.ll
+++ b/test/CodeGen/X86/avx512dq-intrinsics.ll
@@ -6,7 +6,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.cvtpd2qq.512(<8 x double>, <8 x i64>, i8
define <8 x i64>@test_int_x86_avx512_mask_cvt_pd2qq_512(<8 x double> %x0, <8 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_pd2qq_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvtpd2qq {ru-sae}, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vcvtpd2qq {rn-sae}, %zmm0, %zmm0
@@ -22,7 +22,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.cvtpd2uqq.512(<8 x double>, <8 x i64>, i
define <8 x i64>@test_int_x86_avx512_mask_cvt_pd2uqq_512(<8 x double> %x0, <8 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_pd2uqq_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvtpd2uqq {ru-sae}, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vcvtpd2uqq {rn-sae}, %zmm0, %zmm0
@@ -38,7 +38,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.cvtps2qq.512(<8 x float>, <8 x i64>, i8,
define <8 x i64>@test_int_x86_avx512_mask_cvt_ps2qq_512(<8 x float> %x0, <8 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_ps2qq_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvtps2qq {ru-sae}, %ymm0, %zmm1 {%k1}
; CHECK-NEXT: vcvtps2qq {rn-sae}, %ymm0, %zmm0
@@ -54,7 +54,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.cvtps2uqq.512(<8 x float>, <8 x i64>, i8
define <8 x i64>@test_int_x86_avx512_mask_cvt_ps2uqq_512(<8 x float> %x0, <8 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_ps2uqq_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvtps2uqq {ru-sae}, %ymm0, %zmm1 {%k1}
; CHECK-NEXT: vcvtps2uqq {rn-sae}, %ymm0, %zmm0
@@ -70,7 +70,7 @@ declare <8 x double> @llvm.x86.avx512.mask.cvtqq2pd.512(<8 x i64>, <8 x double>,
define <8 x double>@test_int_x86_avx512_mask_cvt_qq2pd_512(<8 x i64> %x0, <8 x double> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_qq2pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvtqq2pd %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vcvtqq2pd {rn-sae}, %zmm0, %zmm0
@@ -86,7 +86,7 @@ declare <8 x float> @llvm.x86.avx512.mask.cvtqq2ps.512(<8 x i64>, <8 x float>, i
define <8 x float>@test_int_x86_avx512_mask_cvt_qq2ps_512(<8 x i64> %x0, <8 x float> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_qq2ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvtqq2ps %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vcvtqq2ps {rn-sae}, %zmm0, %ymm0
@@ -102,7 +102,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.cvttpd2qq.512(<8 x double>, <8 x i64>, i
define <8 x i64>@test_int_x86_avx512_mask_cvtt_pd2qq_512(<8 x double> %x0, <8 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_pd2qq_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvttpd2qq %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vcvttpd2qq {sae}, %zmm0, %zmm0
@@ -118,7 +118,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.cvttpd2uqq.512(<8 x double>, <8 x i64>,
define <8 x i64>@test_int_x86_avx512_mask_cvtt_pd2uqq_512(<8 x double> %x0, <8 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_pd2uqq_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvttpd2uqq %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vcvttpd2uqq {sae}, %zmm0, %zmm0
@@ -134,7 +134,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.cvttps2qq.512(<8 x float>, <8 x i64>, i8
define <8 x i64>@test_int_x86_avx512_mask_cvtt_ps2qq_512(<8 x float> %x0, <8 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_ps2qq_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvttps2qq %ymm0, %zmm1 {%k1}
; CHECK-NEXT: vcvttps2qq {sae}, %ymm0, %zmm0
@@ -150,7 +150,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.cvttps2uqq.512(<8 x float>, <8 x i64>, i
define <8 x i64>@test_int_x86_avx512_mask_cvtt_ps2uqq_512(<8 x float> %x0, <8 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_ps2uqq_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvttps2uqq %ymm0, %zmm1 {%k1}
; CHECK-NEXT: vcvttps2uqq {sae}, %ymm0, %zmm0
@@ -166,7 +166,7 @@ declare <8 x double> @llvm.x86.avx512.mask.cvtuqq2pd.512(<8 x i64>, <8 x double>
define <8 x double>@test_int_x86_avx512_mask_cvt_uqq2pd_512(<8 x i64> %x0, <8 x double> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_uqq2pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvtuqq2pd %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vcvtuqq2pd {rn-sae}, %zmm0, %zmm0
@@ -182,7 +182,7 @@ declare <8 x float> @llvm.x86.avx512.mask.cvtuqq2ps.512(<8 x i64>, <8 x float>,
define <8 x float>@test_int_x86_avx512_mask_cvt_uqq2ps_512(<8 x i64> %x0, <8 x float> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_uqq2ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvtuqq2ps %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vcvtuqq2ps {rn-sae}, %zmm0, %ymm0
@@ -198,7 +198,7 @@ declare <8 x double> @llvm.x86.avx512.mask.reduce.pd.512(<8 x double>, i32, <8 x
define <8 x double>@test_int_x86_avx512_mask_reduce_pd_512(<8 x double> %x0, <8 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_reduce_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vreducepd $8, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vreducepd $4, {sae}, %zmm0, %zmm0
@@ -214,7 +214,7 @@ declare <16 x float> @llvm.x86.avx512.mask.reduce.ps.512(<16 x float>, i32, <16
define <16 x float>@test_int_x86_avx512_mask_reduce_ps_512(<16 x float> %x0, <16 x float> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_reduce_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vreduceps $44, {sae}, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vreduceps $11, %zmm0, %zmm0
@@ -230,7 +230,7 @@ declare <8 x double> @llvm.x86.avx512.mask.range.pd.512(<8 x double>, <8 x doubl
define <8 x double>@test_int_x86_avx512_mask_range_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_range_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vrangepd $8, %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vrangepd $4, {sae}, %zmm1, %zmm0, %zmm0
@@ -246,7 +246,7 @@ declare <16 x float> @llvm.x86.avx512.mask.range.ps.512(<16 x float>, <16 x floa
define <16 x float>@test_int_x86_avx512_mask_range_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x3, i16 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_range_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vrangeps $88, %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vrangeps $4, {sae}, %zmm1, %zmm0, %zmm0
@@ -262,7 +262,7 @@ declare <4 x float> @llvm.x86.avx512.mask.reduce.ss(<4 x float>, <4 x float>,<4
define <4 x float>@test_int_x86_avx512_mask_reduce_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_reduce_ss:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vreducess $4, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vreducess $4, {sae}, %xmm1, %xmm0, %xmm0
@@ -278,7 +278,7 @@ declare <4 x float> @llvm.x86.avx512.mask.range.ss(<4 x float>, <4 x float>,<4 x
define <4 x float>@test_int_x86_avx512_mask_range_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x3, i8 %x4) {
; AVX512DQ-LABEL: test_int_x86_avx512_mask_range_ss:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: kmovw %edi, %k1
; AVX512DQ-NEXT: vrangess $4, {sae}, %xmm1, %xmm0, %xmm2 {%k1}
; AVX512DQ-NEXT: vrangess $4, {sae}, %xmm1, %xmm0, %xmm3
@@ -288,7 +288,7 @@ define <4 x float>@test_int_x86_avx512_mask_range_ss(<4 x float> %x0, <4 x float
; AVX512DQ-NEXT: retq
;
; AVX512DQVL-LABEL: test_int_x86_avx512_mask_range_ss:
-; AVX512DQVL: ## BB#0:
+; AVX512DQVL: ## %bb.0:
; AVX512DQVL-NEXT: kmovw %edi, %k1
; AVX512DQVL-NEXT: vrangess $4, {sae}, %xmm1, %xmm0, %xmm2 {%k1}
; AVX512DQVL-NEXT: vrangess $4, {sae}, %xmm1, %xmm0, %xmm3
@@ -308,7 +308,7 @@ declare <2 x double> @llvm.x86.avx512.mask.reduce.sd(<2 x double>, <2 x double>,
define <2 x double>@test_int_x86_avx512_mask_reduce_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_reduce_sd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vreducesd $4, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vreducesd $4, {sae}, %xmm1, %xmm0, %xmm0
@@ -324,7 +324,7 @@ declare <2 x double> @llvm.x86.avx512.mask.range.sd(<2 x double>, <2 x double>,<
define <2 x double>@test_int_x86_avx512_mask_range_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_range_sd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vrangesd $4, %xmm1, %xmm0, %xmm3
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vrangesd $4, %xmm1, %xmm0, %xmm2 {%k1}
@@ -344,7 +344,7 @@ declare i8 @llvm.x86.avx512.mask.fpclass.pd.512(<8 x double>, i32, i8)
define i8 @test_int_x86_avx512_mask_fpclass_pd_512(<8 x double> %x0, i8 %x1) {
; CHECK-LABEL: test_int_x86_avx512_mask_fpclass_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfpclasspd $2, %zmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %ecx
@@ -362,7 +362,7 @@ declare i16 @llvm.x86.avx512.mask.fpclass.ps.512(<16 x float>, i32, i16)
define i16@test_int_x86_avx512_mask_fpclass_ps_512(<16 x float> %x0, i16 %x1) {
; CHECK-LABEL: test_int_x86_avx512_mask_fpclass_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfpclassps $4, %zmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %ecx
@@ -381,7 +381,7 @@ declare i8 @llvm.x86.avx512.mask.fpclass.sd(<2 x double>, i32, i8)
define i8 @test_int_x86_avx512_mask_fpclass_sd(<2 x double> %x0, i8 %x1) {
; CHECK-LABEL: test_int_x86_avx512_mask_fpclass_sd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfpclasssd $2, %xmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %ecx
@@ -398,7 +398,7 @@ define i8 @test_int_x86_avx512_mask_fpclass_sd(<2 x double> %x0, i8 %x1) {
define i8 @test_int_x86_avx512_mask_fpclass_sd_load(<2 x double>* %x0ptr) {
; CHECK-LABEL: test_int_x86_avx512_mask_fpclass_sd_load:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfpclasssd $4, (%rdi), %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
@@ -412,7 +412,7 @@ declare i8 @llvm.x86.avx512.mask.fpclass.ss(<4 x float>, i32, i8)
define i8 @test_int_x86_avx512_mask_fpclass_ss(<4 x float> %x0, i8 %x1) {
; CHECK-LABEL: test_int_x86_avx512_mask_fpclass_ss:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfpclassss $4, %xmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %ecx
@@ -429,7 +429,7 @@ define i8 @test_int_x86_avx512_mask_fpclass_ss(<4 x float> %x0, i8 %x1) {
define i8 @test_int_x86_avx512_mask_fpclass_ss_load(<4 x float>* %x0ptr, i8 %x1) {
; CHECK-LABEL: test_int_x86_avx512_mask_fpclass_ss_load:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfpclassss $4, (%rdi), %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
@@ -443,7 +443,7 @@ declare i16 @llvm.x86.avx512.cvtd2mask.512(<16 x i32>)
define i16@test_int_x86_avx512_cvtd2mask_512(<16 x i32> %x0) {
; CHECK-LABEL: test_int_x86_avx512_cvtd2mask_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovd2m %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
@@ -456,7 +456,7 @@ declare i8 @llvm.x86.avx512.cvtq2mask.512(<8 x i64>)
define i8@test_int_x86_avx512_cvtq2mask_512(<8 x i64> %x0) {
; CHECK-LABEL: test_int_x86_avx512_cvtq2mask_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovq2m %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
diff --git a/test/CodeGen/X86/avx512dq-mask-op.ll b/test/CodeGen/X86/avx512dq-mask-op.ll
index ec7672912a1..2a56532dd9d 100644
--- a/test/CodeGen/X86/avx512dq-mask-op.ll
+++ b/test/CodeGen/X86/avx512dq-mask-op.ll
@@ -3,7 +3,7 @@
define i8 @mask8(i8 %x) {
; CHECK-LABEL: mask8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k0
; CHECK-NEXT: knotb %k0, %k0
; CHECK-NEXT: kmovd %k0, %eax
@@ -17,7 +17,7 @@ define i8 @mask8(i8 %x) {
define void @mask8_mem(i8* %ptr) {
; CHECK-LABEL: mask8_mem:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovb (%rdi), %k0
; CHECK-NEXT: knotb %k0, %k0
; CHECK-NEXT: kmovb %k0, (%rdi)
@@ -32,7 +32,7 @@ define void @mask8_mem(i8* %ptr) {
define i8 @mand8(i8 %x, i8 %y) {
; CHECK-LABEL: mand8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: xorl %esi, %eax
; CHECK-NEXT: andl %esi, %edi
@@ -50,7 +50,7 @@ define i8 @mand8(i8 %x, i8 %y) {
define i8 @mand8_mem(<8 x i1>* %x, <8 x i1>* %y) {
; CHECK-LABEL: mand8_mem:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovb (%rdi), %k0
; CHECK-NEXT: kmovb (%rsi), %k1
; CHECK-NEXT: kandb %k1, %k0, %k2
diff --git a/test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll
index dc1dd08e6b4..78fd5d57e40 100644
--- a/test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll
+++ b/test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll
@@ -3,7 +3,7 @@
define <4 x float> @test_mask_andnot_ps_rr_128(<4 x float> %a, <4 x float> %b) {
; CHECK-LABEL: test_mask_andnot_ps_rr_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vandnps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x55,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.avx512.mask.andn.ps.128(<4 x float> %a, <4 x float> %b, <4 x float> zeroinitializer, i8 -1)
@@ -12,7 +12,7 @@ define <4 x float> @test_mask_andnot_ps_rr_128(<4 x float> %a, <4 x float> %b) {
define <4 x float> @test_mask_andnot_ps_rrk_128(<4 x float> %a, <4 x float> %b, <4 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_ps_rrk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vandnps %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x55,0xd1]
; CHECK-NEXT: vmovaps %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2]
@@ -23,7 +23,7 @@ define <4 x float> @test_mask_andnot_ps_rrk_128(<4 x float> %a, <4 x float> %b,
define <4 x float> @test_mask_andnot_ps_rrkz_128(<4 x float> %a, <4 x float> %b, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_ps_rrkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vandnps %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x89,0x55,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -33,7 +33,7 @@ define <4 x float> @test_mask_andnot_ps_rrkz_128(<4 x float> %a, <4 x float> %b,
define <4 x float> @test_mask_andnot_ps_rm_128(<4 x float> %a, <4 x float>* %ptr_b) {
; CHECK-LABEL: test_mask_andnot_ps_rm_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vandnps (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x55,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <4 x float>, <4 x float>* %ptr_b
@@ -43,7 +43,7 @@ define <4 x float> @test_mask_andnot_ps_rm_128(<4 x float> %a, <4 x float>* %ptr
define <4 x float> @test_mask_andnot_ps_rmk_128(<4 x float> %a, <4 x float>* %ptr_b, <4 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_ps_rmk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vandnps (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x55,0x0f]
; CHECK-NEXT: vmovaps %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1]
@@ -55,7 +55,7 @@ define <4 x float> @test_mask_andnot_ps_rmk_128(<4 x float> %a, <4 x float>* %pt
define <4 x float> @test_mask_andnot_ps_rmkz_128(<4 x float> %a, <4 x float>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_ps_rmkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vandnps (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x89,0x55,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -66,7 +66,7 @@ define <4 x float> @test_mask_andnot_ps_rmkz_128(<4 x float> %a, <4 x float>* %p
define <4 x float> @test_mask_andnot_ps_rmb_128(<4 x float> %a, float* %ptr_b) {
; CHECK-LABEL: test_mask_andnot_ps_rmb_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vandnps (%rdi){1to4}, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7c,0x18,0x55,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load float, float* %ptr_b
@@ -78,7 +78,7 @@ define <4 x float> @test_mask_andnot_ps_rmb_128(<4 x float> %a, float* %ptr_b) {
define <4 x float> @test_mask_andnot_ps_rmbk_128(<4 x float> %a, float* %ptr_b, <4 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_ps_rmbk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vandnps (%rdi){1to4}, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x19,0x55,0x0f]
; CHECK-NEXT: vmovaps %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1]
@@ -92,7 +92,7 @@ define <4 x float> @test_mask_andnot_ps_rmbk_128(<4 x float> %a, float* %ptr_b,
define <4 x float> @test_mask_andnot_ps_rmbkz_128(<4 x float> %a, float* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_ps_rmbkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vandnps (%rdi){1to4}, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x99,0x55,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -107,7 +107,7 @@ declare <4 x float> @llvm.x86.avx512.mask.andn.ps.128(<4 x float>, <4 x float>,
define <8 x float> @test_mask_andnot_ps_rr_256(<8 x float> %a, <8 x float> %b) {
; CHECK-LABEL: test_mask_andnot_ps_rr_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vandnps %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x55,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx512.mask.andn.ps.256(<8 x float> %a, <8 x float> %b, <8 x float> zeroinitializer, i8 -1)
@@ -116,7 +116,7 @@ define <8 x float> @test_mask_andnot_ps_rr_256(<8 x float> %a, <8 x float> %b) {
define <8 x float> @test_mask_andnot_ps_rrk_256(<8 x float> %a, <8 x float> %b, <8 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_ps_rrk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vandnps %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x55,0xd1]
; CHECK-NEXT: vmovaps %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2]
@@ -127,7 +127,7 @@ define <8 x float> @test_mask_andnot_ps_rrk_256(<8 x float> %a, <8 x float> %b,
define <8 x float> @test_mask_andnot_ps_rrkz_256(<8 x float> %a, <8 x float> %b, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_ps_rrkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vandnps %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xa9,0x55,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -137,7 +137,7 @@ define <8 x float> @test_mask_andnot_ps_rrkz_256(<8 x float> %a, <8 x float> %b,
define <8 x float> @test_mask_andnot_ps_rm_256(<8 x float> %a, <8 x float>* %ptr_b) {
; CHECK-LABEL: test_mask_andnot_ps_rm_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vandnps (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x55,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x float>, <8 x float>* %ptr_b
@@ -147,7 +147,7 @@ define <8 x float> @test_mask_andnot_ps_rm_256(<8 x float> %a, <8 x float>* %ptr
define <8 x float> @test_mask_andnot_ps_rmk_256(<8 x float> %a, <8 x float>* %ptr_b, <8 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_ps_rmk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vandnps (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x55,0x0f]
; CHECK-NEXT: vmovaps %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc1]
@@ -159,7 +159,7 @@ define <8 x float> @test_mask_andnot_ps_rmk_256(<8 x float> %a, <8 x float>* %pt
define <8 x float> @test_mask_andnot_ps_rmkz_256(<8 x float> %a, <8 x float>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_ps_rmkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vandnps (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xa9,0x55,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -170,7 +170,7 @@ define <8 x float> @test_mask_andnot_ps_rmkz_256(<8 x float> %a, <8 x float>* %p
define <8 x float> @test_mask_andnot_ps_rmb_256(<8 x float> %a, float* %ptr_b) {
; CHECK-LABEL: test_mask_andnot_ps_rmb_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vandnps (%rdi){1to8}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7c,0x38,0x55,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load float, float* %ptr_b
@@ -182,7 +182,7 @@ define <8 x float> @test_mask_andnot_ps_rmb_256(<8 x float> %a, float* %ptr_b) {
define <8 x float> @test_mask_andnot_ps_rmbk_256(<8 x float> %a, float* %ptr_b, <8 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_ps_rmbk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vandnps (%rdi){1to8}, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x39,0x55,0x0f]
; CHECK-NEXT: vmovaps %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc1]
@@ -196,7 +196,7 @@ define <8 x float> @test_mask_andnot_ps_rmbk_256(<8 x float> %a, float* %ptr_b,
define <8 x float> @test_mask_andnot_ps_rmbkz_256(<8 x float> %a, float* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_ps_rmbkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vandnps (%rdi){1to8}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xb9,0x55,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -211,7 +211,7 @@ declare <8 x float> @llvm.x86.avx512.mask.andn.ps.256(<8 x float>, <8 x float>,
define <16 x float> @test_mask_andnot_ps_rr_512(<16 x float> %a, <16 x float> %b) {
; CHECK-LABEL: test_mask_andnot_ps_rr_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vandnps %zmm1, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x55,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x float> @llvm.x86.avx512.mask.andn.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> zeroinitializer, i16 -1)
@@ -220,7 +220,7 @@ define <16 x float> @test_mask_andnot_ps_rr_512(<16 x float> %a, <16 x float> %b
define <16 x float> @test_mask_andnot_ps_rrk_512(<16 x float> %a, <16 x float> %b, <16 x float> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_andnot_ps_rrk_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vandnps %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x49,0x55,0xd1]
; CHECK-NEXT: vmovaps %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2]
@@ -231,7 +231,7 @@ define <16 x float> @test_mask_andnot_ps_rrk_512(<16 x float> %a, <16 x float> %
define <16 x float> @test_mask_andnot_ps_rrkz_512(<16 x float> %a, <16 x float> %b, i16 %mask) {
; CHECK-LABEL: test_mask_andnot_ps_rrkz_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vandnps %zmm1, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xc9,0x55,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -241,7 +241,7 @@ define <16 x float> @test_mask_andnot_ps_rrkz_512(<16 x float> %a, <16 x float>
define <16 x float> @test_mask_andnot_ps_rm_512(<16 x float> %a, <16 x float>* %ptr_b) {
; CHECK-LABEL: test_mask_andnot_ps_rm_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vandnps (%rdi), %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x55,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x float>, <16 x float>* %ptr_b
@@ -251,7 +251,7 @@ define <16 x float> @test_mask_andnot_ps_rm_512(<16 x float> %a, <16 x float>* %
define <16 x float> @test_mask_andnot_ps_rmk_512(<16 x float> %a, <16 x float>* %ptr_b, <16 x float> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_andnot_ps_rmk_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vandnps (%rdi), %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x49,0x55,0x0f]
; CHECK-NEXT: vmovaps %zmm1, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc1]
@@ -263,7 +263,7 @@ define <16 x float> @test_mask_andnot_ps_rmk_512(<16 x float> %a, <16 x float>*
define <16 x float> @test_mask_andnot_ps_rmkz_512(<16 x float> %a, <16 x float>* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_andnot_ps_rmkz_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vandnps (%rdi), %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xc9,0x55,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -274,7 +274,7 @@ define <16 x float> @test_mask_andnot_ps_rmkz_512(<16 x float> %a, <16 x float>*
define <16 x float> @test_mask_andnot_ps_rmb_512(<16 x float> %a, float* %ptr_b) {
; CHECK-LABEL: test_mask_andnot_ps_rmb_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vandnps (%rdi){1to16}, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x58,0x55,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load float, float* %ptr_b
@@ -286,7 +286,7 @@ define <16 x float> @test_mask_andnot_ps_rmb_512(<16 x float> %a, float* %ptr_b)
define <16 x float> @test_mask_andnot_ps_rmbk_512(<16 x float> %a, float* %ptr_b, <16 x float> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_andnot_ps_rmbk_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vandnps (%rdi){1to16}, %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x59,0x55,0x0f]
; CHECK-NEXT: vmovaps %zmm1, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc1]
@@ -300,7 +300,7 @@ define <16 x float> @test_mask_andnot_ps_rmbk_512(<16 x float> %a, float* %ptr_b
define <16 x float> @test_mask_andnot_ps_rmbkz_512(<16 x float> %a, float* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_andnot_ps_rmbkz_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vandnps (%rdi){1to16}, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xd9,0x55,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -315,7 +315,7 @@ declare <16 x float> @llvm.x86.avx512.mask.andn.ps.512(<16 x float>, <16 x float
define <4 x float> @test_mask_and_ps_rr_128(<4 x float> %a, <4 x float> %b) {
; CHECK-LABEL: test_mask_and_ps_rr_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vandps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x54,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.avx512.mask.and.ps.128(<4 x float> %a, <4 x float> %b, <4 x float> zeroinitializer, i8 -1)
@@ -324,7 +324,7 @@ define <4 x float> @test_mask_and_ps_rr_128(<4 x float> %a, <4 x float> %b) {
define <4 x float> @test_mask_and_ps_rrk_128(<4 x float> %a, <4 x float> %b, <4 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_and_ps_rrk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vandps %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x54,0xd1]
; CHECK-NEXT: vmovaps %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2]
@@ -335,7 +335,7 @@ define <4 x float> @test_mask_and_ps_rrk_128(<4 x float> %a, <4 x float> %b, <4
define <4 x float> @test_mask_and_ps_rrkz_128(<4 x float> %a, <4 x float> %b, i8 %mask) {
; CHECK-LABEL: test_mask_and_ps_rrkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vandps %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x89,0x54,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -345,7 +345,7 @@ define <4 x float> @test_mask_and_ps_rrkz_128(<4 x float> %a, <4 x float> %b, i8
define <4 x float> @test_mask_and_ps_rm_128(<4 x float> %a, <4 x float>* %ptr_b) {
; CHECK-LABEL: test_mask_and_ps_rm_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vandps (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x54,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <4 x float>, <4 x float>* %ptr_b
@@ -355,7 +355,7 @@ define <4 x float> @test_mask_and_ps_rm_128(<4 x float> %a, <4 x float>* %ptr_b)
define <4 x float> @test_mask_and_ps_rmk_128(<4 x float> %a, <4 x float>* %ptr_b, <4 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_and_ps_rmk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vandps (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x54,0x0f]
; CHECK-NEXT: vmovaps %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1]
@@ -367,7 +367,7 @@ define <4 x float> @test_mask_and_ps_rmk_128(<4 x float> %a, <4 x float>* %ptr_b
define <4 x float> @test_mask_and_ps_rmkz_128(<4 x float> %a, <4 x float>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_and_ps_rmkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vandps (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x89,0x54,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -378,7 +378,7 @@ define <4 x float> @test_mask_and_ps_rmkz_128(<4 x float> %a, <4 x float>* %ptr_
define <4 x float> @test_mask_and_ps_rmb_128(<4 x float> %a, float* %ptr_b) {
; CHECK-LABEL: test_mask_and_ps_rmb_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vandps (%rdi){1to4}, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7c,0x18,0x54,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load float, float* %ptr_b
@@ -390,7 +390,7 @@ define <4 x float> @test_mask_and_ps_rmb_128(<4 x float> %a, float* %ptr_b) {
define <4 x float> @test_mask_and_ps_rmbk_128(<4 x float> %a, float* %ptr_b, <4 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_and_ps_rmbk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vandps (%rdi){1to4}, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x19,0x54,0x0f]
; CHECK-NEXT: vmovaps %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1]
@@ -404,7 +404,7 @@ define <4 x float> @test_mask_and_ps_rmbk_128(<4 x float> %a, float* %ptr_b, <4
define <4 x float> @test_mask_and_ps_rmbkz_128(<4 x float> %a, float* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_and_ps_rmbkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vandps (%rdi){1to4}, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x99,0x54,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -419,7 +419,7 @@ declare <4 x float> @llvm.x86.avx512.mask.and.ps.128(<4 x float>, <4 x float>, <
define <8 x float> @test_mask_and_ps_rr_256(<8 x float> %a, <8 x float> %b) {
; CHECK-LABEL: test_mask_and_ps_rr_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vandps %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x54,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx512.mask.and.ps.256(<8 x float> %a, <8 x float> %b, <8 x float> zeroinitializer, i8 -1)
@@ -428,7 +428,7 @@ define <8 x float> @test_mask_and_ps_rr_256(<8 x float> %a, <8 x float> %b) {
define <8 x float> @test_mask_and_ps_rrk_256(<8 x float> %a, <8 x float> %b, <8 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_and_ps_rrk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vandps %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x54,0xd1]
; CHECK-NEXT: vmovaps %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2]
@@ -439,7 +439,7 @@ define <8 x float> @test_mask_and_ps_rrk_256(<8 x float> %a, <8 x float> %b, <8
define <8 x float> @test_mask_and_ps_rrkz_256(<8 x float> %a, <8 x float> %b, i8 %mask) {
; CHECK-LABEL: test_mask_and_ps_rrkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vandps %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xa9,0x54,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -449,7 +449,7 @@ define <8 x float> @test_mask_and_ps_rrkz_256(<8 x float> %a, <8 x float> %b, i8
define <8 x float> @test_mask_and_ps_rm_256(<8 x float> %a, <8 x float>* %ptr_b) {
; CHECK-LABEL: test_mask_and_ps_rm_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vandps (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x54,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x float>, <8 x float>* %ptr_b
@@ -459,7 +459,7 @@ define <8 x float> @test_mask_and_ps_rm_256(<8 x float> %a, <8 x float>* %ptr_b)
define <8 x float> @test_mask_and_ps_rmk_256(<8 x float> %a, <8 x float>* %ptr_b, <8 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_and_ps_rmk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vandps (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x54,0x0f]
; CHECK-NEXT: vmovaps %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc1]
@@ -471,7 +471,7 @@ define <8 x float> @test_mask_and_ps_rmk_256(<8 x float> %a, <8 x float>* %ptr_b
define <8 x float> @test_mask_and_ps_rmkz_256(<8 x float> %a, <8 x float>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_and_ps_rmkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vandps (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xa9,0x54,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -482,7 +482,7 @@ define <8 x float> @test_mask_and_ps_rmkz_256(<8 x float> %a, <8 x float>* %ptr_
define <8 x float> @test_mask_and_ps_rmb_256(<8 x float> %a, float* %ptr_b) {
; CHECK-LABEL: test_mask_and_ps_rmb_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vandps (%rdi){1to8}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7c,0x38,0x54,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load float, float* %ptr_b
@@ -494,7 +494,7 @@ define <8 x float> @test_mask_and_ps_rmb_256(<8 x float> %a, float* %ptr_b) {
define <8 x float> @test_mask_and_ps_rmbk_256(<8 x float> %a, float* %ptr_b, <8 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_and_ps_rmbk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vandps (%rdi){1to8}, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x39,0x54,0x0f]
; CHECK-NEXT: vmovaps %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc1]
@@ -508,7 +508,7 @@ define <8 x float> @test_mask_and_ps_rmbk_256(<8 x float> %a, float* %ptr_b, <8
define <8 x float> @test_mask_and_ps_rmbkz_256(<8 x float> %a, float* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_and_ps_rmbkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vandps (%rdi){1to8}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xb9,0x54,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -523,7 +523,7 @@ declare <8 x float> @llvm.x86.avx512.mask.and.ps.256(<8 x float>, <8 x float>, <
define <16 x float> @test_mask_and_ps_rr_512(<16 x float> %a, <16 x float> %b) {
; CHECK-LABEL: test_mask_and_ps_rr_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vandps %zmm1, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x54,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x float> @llvm.x86.avx512.mask.and.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> zeroinitializer, i16 -1)
@@ -532,7 +532,7 @@ define <16 x float> @test_mask_and_ps_rr_512(<16 x float> %a, <16 x float> %b) {
define <16 x float> @test_mask_and_ps_rrk_512(<16 x float> %a, <16 x float> %b, <16 x float> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_and_ps_rrk_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vandps %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x49,0x54,0xd1]
; CHECK-NEXT: vmovaps %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2]
@@ -543,7 +543,7 @@ define <16 x float> @test_mask_and_ps_rrk_512(<16 x float> %a, <16 x float> %b,
define <16 x float> @test_mask_and_ps_rrkz_512(<16 x float> %a, <16 x float> %b, i16 %mask) {
; CHECK-LABEL: test_mask_and_ps_rrkz_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vandps %zmm1, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xc9,0x54,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -553,7 +553,7 @@ define <16 x float> @test_mask_and_ps_rrkz_512(<16 x float> %a, <16 x float> %b,
define <16 x float> @test_mask_and_ps_rm_512(<16 x float> %a, <16 x float>* %ptr_b) {
; CHECK-LABEL: test_mask_and_ps_rm_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vandps (%rdi), %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x54,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x float>, <16 x float>* %ptr_b
@@ -563,7 +563,7 @@ define <16 x float> @test_mask_and_ps_rm_512(<16 x float> %a, <16 x float>* %ptr
define <16 x float> @test_mask_and_ps_rmk_512(<16 x float> %a, <16 x float>* %ptr_b, <16 x float> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_and_ps_rmk_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vandps (%rdi), %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x49,0x54,0x0f]
; CHECK-NEXT: vmovaps %zmm1, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc1]
@@ -575,7 +575,7 @@ define <16 x float> @test_mask_and_ps_rmk_512(<16 x float> %a, <16 x float>* %pt
define <16 x float> @test_mask_and_ps_rmkz_512(<16 x float> %a, <16 x float>* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_and_ps_rmkz_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vandps (%rdi), %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xc9,0x54,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -586,7 +586,7 @@ define <16 x float> @test_mask_and_ps_rmkz_512(<16 x float> %a, <16 x float>* %p
define <16 x float> @test_mask_and_ps_rmb_512(<16 x float> %a, float* %ptr_b) {
; CHECK-LABEL: test_mask_and_ps_rmb_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vandps (%rdi){1to16}, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x58,0x54,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load float, float* %ptr_b
@@ -598,7 +598,7 @@ define <16 x float> @test_mask_and_ps_rmb_512(<16 x float> %a, float* %ptr_b) {
define <16 x float> @test_mask_and_ps_rmbk_512(<16 x float> %a, float* %ptr_b, <16 x float> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_and_ps_rmbk_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vandps (%rdi){1to16}, %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x59,0x54,0x0f]
; CHECK-NEXT: vmovaps %zmm1, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc1]
@@ -612,7 +612,7 @@ define <16 x float> @test_mask_and_ps_rmbk_512(<16 x float> %a, float* %ptr_b, <
define <16 x float> @test_mask_and_ps_rmbkz_512(<16 x float> %a, float* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_and_ps_rmbkz_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vandps (%rdi){1to16}, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xd9,0x54,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -627,7 +627,7 @@ declare <16 x float> @llvm.x86.avx512.mask.and.ps.512(<16 x float>, <16 x float>
define <4 x float> @test_mask_or_ps_rr_128(<4 x float> %a, <4 x float> %b) {
; CHECK-LABEL: test_mask_or_ps_rr_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vorps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x56,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.avx512.mask.or.ps.128(<4 x float> %a, <4 x float> %b, <4 x float> zeroinitializer, i8 -1)
@@ -636,7 +636,7 @@ define <4 x float> @test_mask_or_ps_rr_128(<4 x float> %a, <4 x float> %b) {
define <4 x float> @test_mask_or_ps_rrk_128(<4 x float> %a, <4 x float> %b, <4 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_or_ps_rrk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vorps %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x56,0xd1]
; CHECK-NEXT: vmovaps %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2]
@@ -647,7 +647,7 @@ define <4 x float> @test_mask_or_ps_rrk_128(<4 x float> %a, <4 x float> %b, <4 x
define <4 x float> @test_mask_or_ps_rrkz_128(<4 x float> %a, <4 x float> %b, i8 %mask) {
; CHECK-LABEL: test_mask_or_ps_rrkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vorps %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x89,0x56,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -657,7 +657,7 @@ define <4 x float> @test_mask_or_ps_rrkz_128(<4 x float> %a, <4 x float> %b, i8
define <4 x float> @test_mask_or_ps_rm_128(<4 x float> %a, <4 x float>* %ptr_b) {
; CHECK-LABEL: test_mask_or_ps_rm_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vorps (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x56,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <4 x float>, <4 x float>* %ptr_b
@@ -667,7 +667,7 @@ define <4 x float> @test_mask_or_ps_rm_128(<4 x float> %a, <4 x float>* %ptr_b)
define <4 x float> @test_mask_or_ps_rmk_128(<4 x float> %a, <4 x float>* %ptr_b, <4 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_or_ps_rmk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vorps (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x56,0x0f]
; CHECK-NEXT: vmovaps %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1]
@@ -679,7 +679,7 @@ define <4 x float> @test_mask_or_ps_rmk_128(<4 x float> %a, <4 x float>* %ptr_b,
define <4 x float> @test_mask_or_ps_rmkz_128(<4 x float> %a, <4 x float>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_or_ps_rmkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vorps (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x89,0x56,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -690,7 +690,7 @@ define <4 x float> @test_mask_or_ps_rmkz_128(<4 x float> %a, <4 x float>* %ptr_b
define <4 x float> @test_mask_or_ps_rmb_128(<4 x float> %a, float* %ptr_b) {
; CHECK-LABEL: test_mask_or_ps_rmb_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vorps (%rdi){1to4}, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7c,0x18,0x56,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load float, float* %ptr_b
@@ -702,7 +702,7 @@ define <4 x float> @test_mask_or_ps_rmb_128(<4 x float> %a, float* %ptr_b) {
define <4 x float> @test_mask_or_ps_rmbk_128(<4 x float> %a, float* %ptr_b, <4 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_or_ps_rmbk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vorps (%rdi){1to4}, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x19,0x56,0x0f]
; CHECK-NEXT: vmovaps %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1]
@@ -716,7 +716,7 @@ define <4 x float> @test_mask_or_ps_rmbk_128(<4 x float> %a, float* %ptr_b, <4 x
define <4 x float> @test_mask_or_ps_rmbkz_128(<4 x float> %a, float* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_or_ps_rmbkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vorps (%rdi){1to4}, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x99,0x56,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -731,7 +731,7 @@ declare <4 x float> @llvm.x86.avx512.mask.or.ps.128(<4 x float>, <4 x float>, <4
define <8 x float> @test_mask_or_ps_rr_256(<8 x float> %a, <8 x float> %b) {
; CHECK-LABEL: test_mask_or_ps_rr_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vorps %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x56,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx512.mask.or.ps.256(<8 x float> %a, <8 x float> %b, <8 x float> zeroinitializer, i8 -1)
@@ -740,7 +740,7 @@ define <8 x float> @test_mask_or_ps_rr_256(<8 x float> %a, <8 x float> %b) {
define <8 x float> @test_mask_or_ps_rrk_256(<8 x float> %a, <8 x float> %b, <8 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_or_ps_rrk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vorps %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x56,0xd1]
; CHECK-NEXT: vmovaps %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2]
@@ -751,7 +751,7 @@ define <8 x float> @test_mask_or_ps_rrk_256(<8 x float> %a, <8 x float> %b, <8 x
define <8 x float> @test_mask_or_ps_rrkz_256(<8 x float> %a, <8 x float> %b, i8 %mask) {
; CHECK-LABEL: test_mask_or_ps_rrkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vorps %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xa9,0x56,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -761,7 +761,7 @@ define <8 x float> @test_mask_or_ps_rrkz_256(<8 x float> %a, <8 x float> %b, i8
define <8 x float> @test_mask_or_ps_rm_256(<8 x float> %a, <8 x float>* %ptr_b) {
; CHECK-LABEL: test_mask_or_ps_rm_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vorps (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x56,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x float>, <8 x float>* %ptr_b
@@ -771,7 +771,7 @@ define <8 x float> @test_mask_or_ps_rm_256(<8 x float> %a, <8 x float>* %ptr_b)
define <8 x float> @test_mask_or_ps_rmk_256(<8 x float> %a, <8 x float>* %ptr_b, <8 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_or_ps_rmk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vorps (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x56,0x0f]
; CHECK-NEXT: vmovaps %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc1]
@@ -783,7 +783,7 @@ define <8 x float> @test_mask_or_ps_rmk_256(<8 x float> %a, <8 x float>* %ptr_b,
define <8 x float> @test_mask_or_ps_rmkz_256(<8 x float> %a, <8 x float>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_or_ps_rmkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vorps (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xa9,0x56,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -794,7 +794,7 @@ define <8 x float> @test_mask_or_ps_rmkz_256(<8 x float> %a, <8 x float>* %ptr_b
define <8 x float> @test_mask_or_ps_rmb_256(<8 x float> %a, float* %ptr_b) {
; CHECK-LABEL: test_mask_or_ps_rmb_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vorps (%rdi){1to8}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7c,0x38,0x56,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load float, float* %ptr_b
@@ -806,7 +806,7 @@ define <8 x float> @test_mask_or_ps_rmb_256(<8 x float> %a, float* %ptr_b) {
define <8 x float> @test_mask_or_ps_rmbk_256(<8 x float> %a, float* %ptr_b, <8 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_or_ps_rmbk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vorps (%rdi){1to8}, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x39,0x56,0x0f]
; CHECK-NEXT: vmovaps %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc1]
@@ -820,7 +820,7 @@ define <8 x float> @test_mask_or_ps_rmbk_256(<8 x float> %a, float* %ptr_b, <8 x
define <8 x float> @test_mask_or_ps_rmbkz_256(<8 x float> %a, float* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_or_ps_rmbkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vorps (%rdi){1to8}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xb9,0x56,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -835,7 +835,7 @@ declare <8 x float> @llvm.x86.avx512.mask.or.ps.256(<8 x float>, <8 x float>, <8
define <16 x float> @test_mask_or_ps_rr_512(<16 x float> %a, <16 x float> %b) {
; CHECK-LABEL: test_mask_or_ps_rr_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vorps %zmm1, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x56,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x float> @llvm.x86.avx512.mask.or.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> zeroinitializer, i16 -1)
@@ -844,7 +844,7 @@ define <16 x float> @test_mask_or_ps_rr_512(<16 x float> %a, <16 x float> %b) {
define <16 x float> @test_mask_or_ps_rrk_512(<16 x float> %a, <16 x float> %b, <16 x float> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_or_ps_rrk_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vorps %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x49,0x56,0xd1]
; CHECK-NEXT: vmovaps %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2]
@@ -855,7 +855,7 @@ define <16 x float> @test_mask_or_ps_rrk_512(<16 x float> %a, <16 x float> %b, <
define <16 x float> @test_mask_or_ps_rrkz_512(<16 x float> %a, <16 x float> %b, i16 %mask) {
; CHECK-LABEL: test_mask_or_ps_rrkz_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vorps %zmm1, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xc9,0x56,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -865,7 +865,7 @@ define <16 x float> @test_mask_or_ps_rrkz_512(<16 x float> %a, <16 x float> %b,
define <16 x float> @test_mask_or_ps_rm_512(<16 x float> %a, <16 x float>* %ptr_b) {
; CHECK-LABEL: test_mask_or_ps_rm_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vorps (%rdi), %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x56,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x float>, <16 x float>* %ptr_b
@@ -875,7 +875,7 @@ define <16 x float> @test_mask_or_ps_rm_512(<16 x float> %a, <16 x float>* %ptr_
define <16 x float> @test_mask_or_ps_rmk_512(<16 x float> %a, <16 x float>* %ptr_b, <16 x float> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_or_ps_rmk_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vorps (%rdi), %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x49,0x56,0x0f]
; CHECK-NEXT: vmovaps %zmm1, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc1]
@@ -887,7 +887,7 @@ define <16 x float> @test_mask_or_ps_rmk_512(<16 x float> %a, <16 x float>* %ptr
define <16 x float> @test_mask_or_ps_rmkz_512(<16 x float> %a, <16 x float>* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_or_ps_rmkz_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vorps (%rdi), %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xc9,0x56,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -898,7 +898,7 @@ define <16 x float> @test_mask_or_ps_rmkz_512(<16 x float> %a, <16 x float>* %pt
define <16 x float> @test_mask_or_ps_rmb_512(<16 x float> %a, float* %ptr_b) {
; CHECK-LABEL: test_mask_or_ps_rmb_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vorps (%rdi){1to16}, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x58,0x56,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load float, float* %ptr_b
@@ -910,7 +910,7 @@ define <16 x float> @test_mask_or_ps_rmb_512(<16 x float> %a, float* %ptr_b) {
define <16 x float> @test_mask_or_ps_rmbk_512(<16 x float> %a, float* %ptr_b, <16 x float> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_or_ps_rmbk_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vorps (%rdi){1to16}, %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x59,0x56,0x0f]
; CHECK-NEXT: vmovaps %zmm1, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc1]
@@ -924,7 +924,7 @@ define <16 x float> @test_mask_or_ps_rmbk_512(<16 x float> %a, float* %ptr_b, <1
define <16 x float> @test_mask_or_ps_rmbkz_512(<16 x float> %a, float* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_or_ps_rmbkz_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vorps (%rdi){1to16}, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xd9,0x56,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -939,7 +939,7 @@ declare <16 x float> @llvm.x86.avx512.mask.or.ps.512(<16 x float>, <16 x float>,
define <4 x float> @test_mask_xor_ps_rr_128(<4 x float> %a, <4 x float> %b) {
; CHECK-LABEL: test_mask_xor_ps_rr_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vxorps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x57,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.avx512.mask.xor.ps.128(<4 x float> %a, <4 x float> %b, <4 x float> zeroinitializer, i8 -1)
@@ -948,7 +948,7 @@ define <4 x float> @test_mask_xor_ps_rr_128(<4 x float> %a, <4 x float> %b) {
define <4 x float> @test_mask_xor_ps_rrk_128(<4 x float> %a, <4 x float> %b, <4 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_xor_ps_rrk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vxorps %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x57,0xd1]
; CHECK-NEXT: vmovaps %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2]
@@ -959,7 +959,7 @@ define <4 x float> @test_mask_xor_ps_rrk_128(<4 x float> %a, <4 x float> %b, <4
define <4 x float> @test_mask_xor_ps_rrkz_128(<4 x float> %a, <4 x float> %b, i8 %mask) {
; CHECK-LABEL: test_mask_xor_ps_rrkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vxorps %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x89,0x57,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -969,7 +969,7 @@ define <4 x float> @test_mask_xor_ps_rrkz_128(<4 x float> %a, <4 x float> %b, i8
define <4 x float> @test_mask_xor_ps_rm_128(<4 x float> %a, <4 x float>* %ptr_b) {
; CHECK-LABEL: test_mask_xor_ps_rm_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vxorps (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x57,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <4 x float>, <4 x float>* %ptr_b
@@ -979,7 +979,7 @@ define <4 x float> @test_mask_xor_ps_rm_128(<4 x float> %a, <4 x float>* %ptr_b)
define <4 x float> @test_mask_xor_ps_rmk_128(<4 x float> %a, <4 x float>* %ptr_b, <4 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_xor_ps_rmk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vxorps (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x57,0x0f]
; CHECK-NEXT: vmovaps %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1]
@@ -991,7 +991,7 @@ define <4 x float> @test_mask_xor_ps_rmk_128(<4 x float> %a, <4 x float>* %ptr_b
define <4 x float> @test_mask_xor_ps_rmkz_128(<4 x float> %a, <4 x float>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_xor_ps_rmkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vxorps (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x89,0x57,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1002,7 +1002,7 @@ define <4 x float> @test_mask_xor_ps_rmkz_128(<4 x float> %a, <4 x float>* %ptr_
define <4 x float> @test_mask_xor_ps_rmb_128(<4 x float> %a, float* %ptr_b) {
; CHECK-LABEL: test_mask_xor_ps_rmb_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vxorps (%rdi){1to4}, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7c,0x18,0x57,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load float, float* %ptr_b
@@ -1014,7 +1014,7 @@ define <4 x float> @test_mask_xor_ps_rmb_128(<4 x float> %a, float* %ptr_b) {
define <4 x float> @test_mask_xor_ps_rmbk_128(<4 x float> %a, float* %ptr_b, <4 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_xor_ps_rmbk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vxorps (%rdi){1to4}, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x19,0x57,0x0f]
; CHECK-NEXT: vmovaps %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1]
@@ -1028,7 +1028,7 @@ define <4 x float> @test_mask_xor_ps_rmbk_128(<4 x float> %a, float* %ptr_b, <4
define <4 x float> @test_mask_xor_ps_rmbkz_128(<4 x float> %a, float* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_xor_ps_rmbkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vxorps (%rdi){1to4}, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x99,0x57,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1043,7 +1043,7 @@ declare <4 x float> @llvm.x86.avx512.mask.xor.ps.128(<4 x float>, <4 x float>, <
define <8 x float> @test_mask_xor_ps_rr_256(<8 x float> %a, <8 x float> %b) {
; CHECK-LABEL: test_mask_xor_ps_rr_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vxorps %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x57,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx512.mask.xor.ps.256(<8 x float> %a, <8 x float> %b, <8 x float> zeroinitializer, i8 -1)
@@ -1052,7 +1052,7 @@ define <8 x float> @test_mask_xor_ps_rr_256(<8 x float> %a, <8 x float> %b) {
define <8 x float> @test_mask_xor_ps_rrk_256(<8 x float> %a, <8 x float> %b, <8 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_xor_ps_rrk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vxorps %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x57,0xd1]
; CHECK-NEXT: vmovaps %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2]
@@ -1063,7 +1063,7 @@ define <8 x float> @test_mask_xor_ps_rrk_256(<8 x float> %a, <8 x float> %b, <8
define <8 x float> @test_mask_xor_ps_rrkz_256(<8 x float> %a, <8 x float> %b, i8 %mask) {
; CHECK-LABEL: test_mask_xor_ps_rrkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vxorps %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xa9,0x57,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1073,7 +1073,7 @@ define <8 x float> @test_mask_xor_ps_rrkz_256(<8 x float> %a, <8 x float> %b, i8
define <8 x float> @test_mask_xor_ps_rm_256(<8 x float> %a, <8 x float>* %ptr_b) {
; CHECK-LABEL: test_mask_xor_ps_rm_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vxorps (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x57,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x float>, <8 x float>* %ptr_b
@@ -1083,7 +1083,7 @@ define <8 x float> @test_mask_xor_ps_rm_256(<8 x float> %a, <8 x float>* %ptr_b)
define <8 x float> @test_mask_xor_ps_rmk_256(<8 x float> %a, <8 x float>* %ptr_b, <8 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_xor_ps_rmk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vxorps (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x57,0x0f]
; CHECK-NEXT: vmovaps %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc1]
@@ -1095,7 +1095,7 @@ define <8 x float> @test_mask_xor_ps_rmk_256(<8 x float> %a, <8 x float>* %ptr_b
define <8 x float> @test_mask_xor_ps_rmkz_256(<8 x float> %a, <8 x float>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_xor_ps_rmkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vxorps (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xa9,0x57,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1106,7 +1106,7 @@ define <8 x float> @test_mask_xor_ps_rmkz_256(<8 x float> %a, <8 x float>* %ptr_
define <8 x float> @test_mask_xor_ps_rmb_256(<8 x float> %a, float* %ptr_b) {
; CHECK-LABEL: test_mask_xor_ps_rmb_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vxorps (%rdi){1to8}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7c,0x38,0x57,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load float, float* %ptr_b
@@ -1118,7 +1118,7 @@ define <8 x float> @test_mask_xor_ps_rmb_256(<8 x float> %a, float* %ptr_b) {
define <8 x float> @test_mask_xor_ps_rmbk_256(<8 x float> %a, float* %ptr_b, <8 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_xor_ps_rmbk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vxorps (%rdi){1to8}, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x39,0x57,0x0f]
; CHECK-NEXT: vmovaps %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc1]
@@ -1132,7 +1132,7 @@ define <8 x float> @test_mask_xor_ps_rmbk_256(<8 x float> %a, float* %ptr_b, <8
define <8 x float> @test_mask_xor_ps_rmbkz_256(<8 x float> %a, float* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_xor_ps_rmbkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vxorps (%rdi){1to8}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xb9,0x57,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1147,7 +1147,7 @@ declare <8 x float> @llvm.x86.avx512.mask.xor.ps.256(<8 x float>, <8 x float>, <
define <16 x float> @test_mask_xor_ps_rr_512(<16 x float> %a, <16 x float> %b) {
; CHECK-LABEL: test_mask_xor_ps_rr_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vxorps %zmm1, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x57,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x float> @llvm.x86.avx512.mask.xor.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> zeroinitializer, i16 -1)
@@ -1156,7 +1156,7 @@ define <16 x float> @test_mask_xor_ps_rr_512(<16 x float> %a, <16 x float> %b) {
define <16 x float> @test_mask_xor_ps_rrk_512(<16 x float> %a, <16 x float> %b, <16 x float> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_xor_ps_rrk_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vxorps %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x49,0x57,0xd1]
; CHECK-NEXT: vmovaps %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2]
@@ -1167,7 +1167,7 @@ define <16 x float> @test_mask_xor_ps_rrk_512(<16 x float> %a, <16 x float> %b,
define <16 x float> @test_mask_xor_ps_rrkz_512(<16 x float> %a, <16 x float> %b, i16 %mask) {
; CHECK-LABEL: test_mask_xor_ps_rrkz_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vxorps %zmm1, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xc9,0x57,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1177,7 +1177,7 @@ define <16 x float> @test_mask_xor_ps_rrkz_512(<16 x float> %a, <16 x float> %b,
define <16 x float> @test_mask_xor_ps_rm_512(<16 x float> %a, <16 x float>* %ptr_b) {
; CHECK-LABEL: test_mask_xor_ps_rm_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vxorps (%rdi), %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x57,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x float>, <16 x float>* %ptr_b
@@ -1187,7 +1187,7 @@ define <16 x float> @test_mask_xor_ps_rm_512(<16 x float> %a, <16 x float>* %ptr
define <16 x float> @test_mask_xor_ps_rmk_512(<16 x float> %a, <16 x float>* %ptr_b, <16 x float> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_xor_ps_rmk_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vxorps (%rdi), %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x49,0x57,0x0f]
; CHECK-NEXT: vmovaps %zmm1, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc1]
@@ -1199,7 +1199,7 @@ define <16 x float> @test_mask_xor_ps_rmk_512(<16 x float> %a, <16 x float>* %pt
define <16 x float> @test_mask_xor_ps_rmkz_512(<16 x float> %a, <16 x float>* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_xor_ps_rmkz_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vxorps (%rdi), %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xc9,0x57,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1210,7 +1210,7 @@ define <16 x float> @test_mask_xor_ps_rmkz_512(<16 x float> %a, <16 x float>* %p
define <16 x float> @test_mask_xor_ps_rmb_512(<16 x float> %a, float* %ptr_b) {
; CHECK-LABEL: test_mask_xor_ps_rmb_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vxorps (%rdi){1to16}, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x58,0x57,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load float, float* %ptr_b
@@ -1222,7 +1222,7 @@ define <16 x float> @test_mask_xor_ps_rmb_512(<16 x float> %a, float* %ptr_b) {
define <16 x float> @test_mask_xor_ps_rmbk_512(<16 x float> %a, float* %ptr_b, <16 x float> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_xor_ps_rmbk_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vxorps (%rdi){1to16}, %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x59,0x57,0x0f]
; CHECK-NEXT: vmovaps %zmm1, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc1]
@@ -1236,7 +1236,7 @@ define <16 x float> @test_mask_xor_ps_rmbk_512(<16 x float> %a, float* %ptr_b, <
define <16 x float> @test_mask_xor_ps_rmbkz_512(<16 x float> %a, float* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_xor_ps_rmbkz_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vxorps (%rdi){1to16}, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xd9,0x57,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1251,7 +1251,7 @@ declare <16 x float> @llvm.x86.avx512.mask.xor.ps.512(<16 x float>, <16 x float>
define <8 x i64> @test_mask_mullo_epi64_rr_512(<8 x i64> %a, <8 x i64> %b) {
; CHECK-LABEL: test_mask_mullo_epi64_rr_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmullq %zmm1, %zmm0, %zmm0 ## encoding: [0x62,0xf2,0xfd,0x48,0x40,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i64> @llvm.x86.avx512.mask.pmull.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> zeroinitializer, i8 -1)
@@ -1260,7 +1260,7 @@ define <8 x i64> @test_mask_mullo_epi64_rr_512(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @test_mask_mullo_epi64_rrk_512(<8 x i64> %a, <8 x i64> %b, <8 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_mullo_epi64_rrk_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmullq %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0x40,0xd1]
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc2]
@@ -1271,7 +1271,7 @@ define <8 x i64> @test_mask_mullo_epi64_rrk_512(<8 x i64> %a, <8 x i64> %b, <8 x
define <8 x i64> @test_mask_mullo_epi64_rrkz_512(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
; CHECK-LABEL: test_mask_mullo_epi64_rrkz_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmullq %zmm1, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xc9,0x40,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1281,7 +1281,7 @@ define <8 x i64> @test_mask_mullo_epi64_rrkz_512(<8 x i64> %a, <8 x i64> %b, i8
define <8 x i64> @test_mask_mullo_epi64_rm_512(<8 x i64> %a, <8 x i64>* %ptr_b) {
; CHECK-LABEL: test_mask_mullo_epi64_rm_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmullq (%rdi), %zmm0, %zmm0 ## encoding: [0x62,0xf2,0xfd,0x48,0x40,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i64>, <8 x i64>* %ptr_b
@@ -1291,7 +1291,7 @@ define <8 x i64> @test_mask_mullo_epi64_rm_512(<8 x i64> %a, <8 x i64>* %ptr_b)
define <8 x i64> @test_mask_mullo_epi64_rmk_512(<8 x i64> %a, <8 x i64>* %ptr_b, <8 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_mullo_epi64_rmk_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmullq (%rdi), %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0x40,0x0f]
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc1]
@@ -1303,7 +1303,7 @@ define <8 x i64> @test_mask_mullo_epi64_rmk_512(<8 x i64> %a, <8 x i64>* %ptr_b,
define <8 x i64> @test_mask_mullo_epi64_rmkz_512(<8 x i64> %a, <8 x i64>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_mullo_epi64_rmkz_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmullq (%rdi), %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xc9,0x40,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1314,7 +1314,7 @@ define <8 x i64> @test_mask_mullo_epi64_rmkz_512(<8 x i64> %a, <8 x i64>* %ptr_b
define <8 x i64> @test_mask_mullo_epi64_rmb_512(<8 x i64> %a, i64* %ptr_b) {
; CHECK-LABEL: test_mask_mullo_epi64_rmb_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmullq (%rdi){1to8}, %zmm0, %zmm0 ## encoding: [0x62,0xf2,0xfd,0x58,0x40,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load i64, i64* %ptr_b
@@ -1326,7 +1326,7 @@ define <8 x i64> @test_mask_mullo_epi64_rmb_512(<8 x i64> %a, i64* %ptr_b) {
define <8 x i64> @test_mask_mullo_epi64_rmbk_512(<8 x i64> %a, i64* %ptr_b, <8 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_mullo_epi64_rmbk_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmullq (%rdi){1to8}, %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x59,0x40,0x0f]
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc1]
@@ -1340,7 +1340,7 @@ define <8 x i64> @test_mask_mullo_epi64_rmbk_512(<8 x i64> %a, i64* %ptr_b, <8 x
define <8 x i64> @test_mask_mullo_epi64_rmbkz_512(<8 x i64> %a, i64* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_mullo_epi64_rmbkz_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmullq (%rdi){1to8}, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xd9,0x40,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1354,7 +1354,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.pmull.q.512(<8 x i64>, <8 x i64>, <8 x i
define <4 x i64> @test_mask_mullo_epi64_rr_256(<4 x i64> %a, <4 x i64> %b) {
; CHECK-LABEL: test_mask_mullo_epi64_rr_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmullq %ymm1, %ymm0, %ymm0 ## encoding: [0x62,0xf2,0xfd,0x28,0x40,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx512.mask.pmull.q.256(<4 x i64> %a, <4 x i64> %b, <4 x i64> zeroinitializer, i8 -1)
@@ -1363,7 +1363,7 @@ define <4 x i64> @test_mask_mullo_epi64_rr_256(<4 x i64> %a, <4 x i64> %b) {
define <4 x i64> @test_mask_mullo_epi64_rrk_256(<4 x i64> %a, <4 x i64> %b, <4 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_mullo_epi64_rrk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmullq %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x40,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
@@ -1374,7 +1374,7 @@ define <4 x i64> @test_mask_mullo_epi64_rrk_256(<4 x i64> %a, <4 x i64> %b, <4 x
define <4 x i64> @test_mask_mullo_epi64_rrkz_256(<4 x i64> %a, <4 x i64> %b, i8 %mask) {
; CHECK-LABEL: test_mask_mullo_epi64_rrkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmullq %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xa9,0x40,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1384,7 +1384,7 @@ define <4 x i64> @test_mask_mullo_epi64_rrkz_256(<4 x i64> %a, <4 x i64> %b, i8
define <4 x i64> @test_mask_mullo_epi64_rm_256(<4 x i64> %a, <4 x i64>* %ptr_b) {
; CHECK-LABEL: test_mask_mullo_epi64_rm_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmullq (%rdi), %ymm0, %ymm0 ## encoding: [0x62,0xf2,0xfd,0x28,0x40,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <4 x i64>, <4 x i64>* %ptr_b
@@ -1394,7 +1394,7 @@ define <4 x i64> @test_mask_mullo_epi64_rm_256(<4 x i64> %a, <4 x i64>* %ptr_b)
define <4 x i64> @test_mask_mullo_epi64_rmk_256(<4 x i64> %a, <4 x i64>* %ptr_b, <4 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_mullo_epi64_rmk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmullq (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x40,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -1406,7 +1406,7 @@ define <4 x i64> @test_mask_mullo_epi64_rmk_256(<4 x i64> %a, <4 x i64>* %ptr_b,
define <4 x i64> @test_mask_mullo_epi64_rmkz_256(<4 x i64> %a, <4 x i64>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_mullo_epi64_rmkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmullq (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xa9,0x40,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1417,7 +1417,7 @@ define <4 x i64> @test_mask_mullo_epi64_rmkz_256(<4 x i64> %a, <4 x i64>* %ptr_b
define <4 x i64> @test_mask_mullo_epi64_rmb_256(<4 x i64> %a, i64* %ptr_b) {
; CHECK-LABEL: test_mask_mullo_epi64_rmb_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmullq (%rdi){1to4}, %ymm0, %ymm0 ## encoding: [0x62,0xf2,0xfd,0x38,0x40,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load i64, i64* %ptr_b
@@ -1429,7 +1429,7 @@ define <4 x i64> @test_mask_mullo_epi64_rmb_256(<4 x i64> %a, i64* %ptr_b) {
define <4 x i64> @test_mask_mullo_epi64_rmbk_256(<4 x i64> %a, i64* %ptr_b, <4 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_mullo_epi64_rmbk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmullq (%rdi){1to4}, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x39,0x40,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -1443,7 +1443,7 @@ define <4 x i64> @test_mask_mullo_epi64_rmbk_256(<4 x i64> %a, i64* %ptr_b, <4 x
define <4 x i64> @test_mask_mullo_epi64_rmbkz_256(<4 x i64> %a, i64* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_mullo_epi64_rmbkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmullq (%rdi){1to4}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xb9,0x40,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1458,7 +1458,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.pmull.q.256(<4 x i64>, <4 x i64>, <4 x i
define <2 x i64> @test_mask_mullo_epi64_rr_128(<2 x i64> %a, <2 x i64> %b) {
; CHECK-LABEL: test_mask_mullo_epi64_rr_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmullq %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf2,0xfd,0x08,0x40,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.avx512.mask.pmull.q.128(<2 x i64> %a, <2 x i64> %b, <2 x i64> zeroinitializer, i8 -1)
@@ -1467,7 +1467,7 @@ define <2 x i64> @test_mask_mullo_epi64_rr_128(<2 x i64> %a, <2 x i64> %b) {
define <2 x i64> @test_mask_mullo_epi64_rrk_128(<2 x i64> %a, <2 x i64> %b, <2 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_mullo_epi64_rrk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmullq %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x40,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
@@ -1478,7 +1478,7 @@ define <2 x i64> @test_mask_mullo_epi64_rrk_128(<2 x i64> %a, <2 x i64> %b, <2 x
define <2 x i64> @test_mask_mullo_epi64_rrkz_128(<2 x i64> %a, <2 x i64> %b, i8 %mask) {
; CHECK-LABEL: test_mask_mullo_epi64_rrkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmullq %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0x89,0x40,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1488,7 +1488,7 @@ define <2 x i64> @test_mask_mullo_epi64_rrkz_128(<2 x i64> %a, <2 x i64> %b, i8
define <2 x i64> @test_mask_mullo_epi64_rm_128(<2 x i64> %a, <2 x i64>* %ptr_b) {
; CHECK-LABEL: test_mask_mullo_epi64_rm_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmullq (%rdi), %xmm0, %xmm0 ## encoding: [0x62,0xf2,0xfd,0x08,0x40,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <2 x i64>, <2 x i64>* %ptr_b
@@ -1498,7 +1498,7 @@ define <2 x i64> @test_mask_mullo_epi64_rm_128(<2 x i64> %a, <2 x i64>* %ptr_b)
define <2 x i64> @test_mask_mullo_epi64_rmk_128(<2 x i64> %a, <2 x i64>* %ptr_b, <2 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_mullo_epi64_rmk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmullq (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x40,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
@@ -1510,7 +1510,7 @@ define <2 x i64> @test_mask_mullo_epi64_rmk_128(<2 x i64> %a, <2 x i64>* %ptr_b,
define <2 x i64> @test_mask_mullo_epi64_rmkz_128(<2 x i64> %a, <2 x i64>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_mullo_epi64_rmkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmullq (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0x89,0x40,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1521,7 +1521,7 @@ define <2 x i64> @test_mask_mullo_epi64_rmkz_128(<2 x i64> %a, <2 x i64>* %ptr_b
define <2 x i64> @test_mask_mullo_epi64_rmb_128(<2 x i64> %a, i64* %ptr_b) {
; CHECK-LABEL: test_mask_mullo_epi64_rmb_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmullq (%rdi){1to2}, %xmm0, %xmm0 ## encoding: [0x62,0xf2,0xfd,0x18,0x40,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load i64, i64* %ptr_b
@@ -1533,7 +1533,7 @@ define <2 x i64> @test_mask_mullo_epi64_rmb_128(<2 x i64> %a, i64* %ptr_b) {
define <2 x i64> @test_mask_mullo_epi64_rmbk_128(<2 x i64> %a, i64* %ptr_b, <2 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_mullo_epi64_rmbk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmullq (%rdi){1to2}, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x19,0x40,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
@@ -1547,7 +1547,7 @@ define <2 x i64> @test_mask_mullo_epi64_rmbk_128(<2 x i64> %a, i64* %ptr_b, <2 x
define <2 x i64> @test_mask_mullo_epi64_rmbkz_128(<2 x i64> %a, i64* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_mullo_epi64_rmbkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmullq (%rdi){1to2}, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0x99,0x40,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1564,7 +1564,7 @@ declare <2 x double> @llvm.x86.avx512.mask.vextractf64x2.256(<4 x double>, i32,
define <2 x double>@test_int_x86_avx512_mask_vextractf64x2_256(<4 x double> %x0, <2 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vextractf64x2_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x19,0xc2,0x01]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vextractf64x2 $1, %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x19,0xc1,0x01]
@@ -1584,7 +1584,7 @@ declare <4 x double> @llvm.x86.avx512.mask.insertf64x2.256(<4 x double>, <2 x do
define <4 x double>@test_int_x86_avx512_mask_insertf64x2_256(<4 x double> %x0, <2 x double> %x1, <4 x double> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_insertf64x2_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x18,0xd9,0x01]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vinsertf64x2 $1, %xmm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x18,0xd1,0x01]
@@ -1604,7 +1604,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.inserti64x2.256(<4 x i64>, <2 x i64>, i3
define <4 x i64>@test_int_x86_avx512_mask_inserti64x2_256(<4 x i64> %x0, <2 x i64> %x1, <4 x i64> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_inserti64x2_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x38,0xd9,0x01]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vinserti64x2 $1, %xmm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x38,0xd1,0x01]
@@ -1624,7 +1624,7 @@ declare <4 x i32> @llvm.x86.avx512.cvtmask2d.128(i8)
define <4 x i32>@test_int_x86_avx512_cvtmask2d_128(i8 %x0) {
; CHECK-LABEL: test_int_x86_avx512_cvtmask2d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k0 ## encoding: [0xc5,0xf8,0x92,0xc7]
; CHECK-NEXT: vpmovm2d %k0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x08,0x38,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1636,7 +1636,7 @@ declare <8 x i32> @llvm.x86.avx512.cvtmask2d.256(i8)
define <8 x i32>@test_int_x86_avx512_cvtmask2d_256(i8 %x0) {
; CHECK-LABEL: test_int_x86_avx512_cvtmask2d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k0 ## encoding: [0xc5,0xf8,0x92,0xc7]
; CHECK-NEXT: vpmovm2d %k0, %ymm0 ## encoding: [0x62,0xf2,0x7e,0x28,0x38,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1648,7 +1648,7 @@ declare <2 x i64> @llvm.x86.avx512.cvtmask2q.128(i8)
define <2 x i64>@test_int_x86_avx512_cvtmask2q_128(i8 %x0) {
; CHECK-LABEL: test_int_x86_avx512_cvtmask2q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k0 ## encoding: [0xc5,0xf8,0x92,0xc7]
; CHECK-NEXT: vpmovm2q %k0, %xmm0 ## encoding: [0x62,0xf2,0xfe,0x08,0x38,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1660,7 +1660,7 @@ declare <4 x i64> @llvm.x86.avx512.cvtmask2q.256(i8)
define <4 x i64>@test_int_x86_avx512_cvtmask2q_256(i8 %x0) {
; CHECK-LABEL: test_int_x86_avx512_cvtmask2q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k0 ## encoding: [0xc5,0xf8,0x92,0xc7]
; CHECK-NEXT: vpmovm2q %k0, %ymm0 ## encoding: [0x62,0xf2,0xfe,0x28,0x38,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1672,7 +1672,7 @@ declare <4 x double> @llvm.x86.avx512.mask.broadcastf64x2.256(<2 x double>, <4 x
define <4 x double>@test_int_x86_avx512_mask_broadcastf64x2_256(<2 x double> %x0, <4 x double> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf64x2_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x18,0xd0,0x01]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -1692,7 +1692,7 @@ define <4 x double>@test_int_x86_avx512_mask_broadcastf64x2_256(<2 x double> %x0
define <4 x double>@test_int_x86_avx512_mask_broadcastf64x2_256_load(<2 x double>* %x0ptr, <4 x double> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf64x2_256_load:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vbroadcastf64x2 (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x1a,0x07]
; CHECK-NEXT: ## ymm0 {%k1} = mem[0,1,0,1]
@@ -1707,7 +1707,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.broadcasti64x2.256(<2 x i64>, <4 x i64>,
define <4 x i64>@test_int_x86_avx512_mask_broadcasti64x2_256(<2 x i64> %x0, <4 x i64> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti64x2_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; CHECK-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x38,0xd0,0x01]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -1727,7 +1727,7 @@ define <4 x i64>@test_int_x86_avx512_mask_broadcasti64x2_256(<2 x i64> %x0, <4 x
define <4 x i64>@test_int_x86_avx512_mask_broadcasti64x2_256_load(<2 x i64>* %x0ptr, <4 x i64> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti64x2_256_load:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vbroadcasti64x2 (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x5a,0x07]
; CHECK-NEXT: ## ymm0 {%k1} = mem[0,1,0,1]
@@ -1742,7 +1742,7 @@ declare <8 x float> @llvm.x86.avx512.mask.broadcastf32x2.256(<4 x float>, <8 x f
define <8 x float>@test_int_x86_avx512_mask_broadcastf32x2_256(<4 x float> %x0, <8 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf32x2_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x18,0xd0,0x01]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -1763,7 +1763,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.broadcasti32x2.256(<4 x i32>, <8 x i32>,
define <8 x i32>@test_int_x86_avx512_mask_broadcasti32x2_256(<4 x i32> %x0, <8 x i32> %x2, i8 %x3, i64 * %y_ptr) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x2_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; CHECK-NEXT: vmovq (%rsi), %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7e,0x16]
; CHECK-NEXT: ## xmm2 = mem[0],zero
@@ -1789,7 +1789,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.broadcasti32x2.128(<4 x i32>, <4 x i32>,
define <4 x i32>@test_int_x86_avx512_mask_broadcasti32x2_128(<4 x i32> %x0, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x2_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x6f,0xc8]
; CHECK-NEXT: vmovdqa32 %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0x6f,0xd0]
diff --git a/test/CodeGen/X86/avx512dqvl-intrinsics.ll b/test/CodeGen/X86/avx512dqvl-intrinsics.ll
index 863d2341737..71e8fb67568 100644
--- a/test/CodeGen/X86/avx512dqvl-intrinsics.ll
+++ b/test/CodeGen/X86/avx512dqvl-intrinsics.ll
@@ -5,7 +5,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.cvtpd2qq.128(<2 x double>, <2 x i64>, i8
define <2 x i64>@test_int_x86_avx512_mask_cvt_pd2qq_128(<2 x double> %x0, <2 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_pd2qq_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtpd2qq %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xfd,0x09,0x7b,0xc8]
; CHECK-NEXT: vcvtpd2qq %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfd,0x08,0x7b,0xc0]
@@ -21,7 +21,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.cvtpd2qq.256(<4 x double>, <4 x i64>, i8
define <4 x i64>@test_int_x86_avx512_mask_cvt_pd2qq_256(<4 x double> %x0, <4 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_pd2qq_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtpd2qq %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0xfd,0x29,0x7b,0xc8]
; CHECK-NEXT: vcvtpd2qq %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfd,0x28,0x7b,0xc0]
@@ -37,7 +37,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.cvtpd2uqq.128(<2 x double>, <2 x i64>, i
define <2 x i64>@test_int_x86_avx512_mask_cvt_pd2uqq_128(<2 x double> %x0, <2 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_pd2uqq_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtpd2uqq %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xfd,0x09,0x79,0xc8]
; CHECK-NEXT: vcvtpd2uqq %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfd,0x08,0x79,0xc0]
@@ -53,7 +53,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.cvtpd2uqq.256(<4 x double>, <4 x i64>, i
define <4 x i64>@test_int_x86_avx512_mask_cvt_pd2uqq_256(<4 x double> %x0, <4 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_pd2uqq_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtpd2uqq %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0xfd,0x29,0x79,0xc8]
; CHECK-NEXT: vcvtpd2uqq %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfd,0x28,0x79,0xc0]
@@ -69,7 +69,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.cvtps2qq.128(<4 x float>, <2 x i64>, i8)
define <2 x i64>@test_int_x86_avx512_mask_cvt_ps2qq_128(<4 x float> %x0, <2 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_ps2qq_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtps2qq %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x7b,0xc8]
; CHECK-NEXT: vcvtps2qq %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7d,0x08,0x7b,0xc0]
@@ -85,7 +85,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.cvtps2qq.256(<4 x float>, <4 x i64>, i8)
define <4 x i64>@test_int_x86_avx512_mask_cvt_ps2qq_256(<4 x float> %x0, <4 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_ps2qq_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtps2qq %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x7b,0xc8]
; CHECK-NEXT: vcvtps2qq %xmm0, %ymm0 ## encoding: [0x62,0xf1,0x7d,0x28,0x7b,0xc0]
@@ -101,7 +101,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.cvtps2uqq.128(<4 x float>, <2 x i64>, i8
define <2 x i64>@test_int_x86_avx512_mask_cvt_ps2uqq_128(<4 x float> %x0, <2 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_ps2uqq_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtps2uqq %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x79,0xc8]
; CHECK-NEXT: vcvtps2uqq %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7d,0x08,0x79,0xc0]
@@ -117,7 +117,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.cvtps2uqq.256(<4 x float>, <4 x i64>, i8
define <4 x i64>@test_int_x86_avx512_mask_cvt_ps2uqq_256(<4 x float> %x0, <4 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_ps2uqq_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtps2uqq %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x79,0xc8]
; CHECK-NEXT: vcvtps2uqq %xmm0, %ymm0 ## encoding: [0x62,0xf1,0x7d,0x28,0x79,0xc0]
@@ -133,7 +133,7 @@ declare <2 x double> @llvm.x86.avx512.mask.cvtqq2pd.128(<2 x i64>, <2 x double>,
define <2 x double>@test_int_x86_avx512_mask_cvt_qq2pd_128(<2 x i64> %x0, <2 x double> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_qq2pd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtqq2pd %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xfe,0x09,0xe6,0xc8]
; CHECK-NEXT: vcvtqq2pd %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfe,0x08,0xe6,0xc0]
@@ -149,7 +149,7 @@ declare <4 x double> @llvm.x86.avx512.mask.cvtqq2pd.256(<4 x i64>, <4 x double>,
define <4 x double>@test_int_x86_avx512_mask_cvt_qq2pd_256(<4 x i64> %x0, <4 x double> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_qq2pd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtqq2pd %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0xfe,0x29,0xe6,0xc8]
; CHECK-NEXT: vcvtqq2pd %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfe,0x28,0xe6,0xc0]
@@ -165,7 +165,7 @@ declare <4 x float> @llvm.x86.avx512.mask.cvtqq2ps.128(<2 x i64>, <4 x float>, i
define <4 x float>@test_int_x86_avx512_mask_cvt_qq2ps_128(<2 x i64> %x0, <4 x float> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_qq2ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtqq2ps %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xfc,0x09,0x5b,0xc8]
; CHECK-NEXT: vcvtqq2ps %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfc,0x08,0x5b,0xc0]
@@ -179,7 +179,7 @@ define <4 x float>@test_int_x86_avx512_mask_cvt_qq2ps_128(<2 x i64> %x0, <4 x fl
define <4 x float>@test_int_x86_avx512_mask_cvt_qq2ps_128_zext(<2 x i64> %x0, <4 x float> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_qq2ps_128_zext:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtqq2ps %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xfc,0x09,0x5b,0xc8]
; CHECK-NEXT: vmovq %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7e,0xc9]
@@ -199,7 +199,7 @@ declare <4 x float> @llvm.x86.avx512.mask.cvtqq2ps.256(<4 x i64>, <4 x float>, i
define <4 x float>@test_int_x86_avx512_mask_cvt_qq2ps_256(<4 x i64> %x0, <4 x float> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_qq2ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtqq2ps %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xfc,0x29,0x5b,0xc8]
; CHECK-NEXT: vcvtqq2ps %ymm0, %xmm0 ## encoding: [0x62,0xf1,0xfc,0x28,0x5b,0xc0]
@@ -215,7 +215,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.cvttpd2qq.128(<2 x double>, <2 x i64>, i
define <2 x i64>@test_int_x86_avx512_mask_cvtt_pd2qq_128(<2 x double> %x0, <2 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_pd2qq_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvttpd2qq %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xfd,0x09,0x7a,0xc8]
; CHECK-NEXT: vcvttpd2qq %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfd,0x08,0x7a,0xc0]
@@ -231,7 +231,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.cvttpd2qq.256(<4 x double>, <4 x i64>, i
define <4 x i64>@test_int_x86_avx512_mask_cvtt_pd2qq_256(<4 x double> %x0, <4 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_pd2qq_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvttpd2qq %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0xfd,0x29,0x7a,0xc8]
; CHECK-NEXT: vcvttpd2qq %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfd,0x28,0x7a,0xc0]
@@ -247,7 +247,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.cvttpd2uqq.128(<2 x double>, <2 x i64>,
define <2 x i64>@test_int_x86_avx512_mask_cvtt_pd2uqq_128(<2 x double> %x0, <2 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_pd2uqq_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvttpd2uqq %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xfd,0x09,0x78,0xc8]
; CHECK-NEXT: vcvttpd2uqq %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfd,0x08,0x78,0xc0]
@@ -263,7 +263,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.cvttpd2uqq.256(<4 x double>, <4 x i64>,
define <4 x i64>@test_int_x86_avx512_mask_cvtt_pd2uqq_256(<4 x double> %x0, <4 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_pd2uqq_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvttpd2uqq %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0xfd,0x29,0x78,0xc8]
; CHECK-NEXT: vcvttpd2uqq %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfd,0x28,0x78,0xc0]
@@ -279,7 +279,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.cvttps2qq.128(<4 x float>, <2 x i64>, i8
define <2 x i64>@test_int_x86_avx512_mask_cvtt_ps2qq_128(<4 x float> %x0, <2 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_ps2qq_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvttps2qq %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x7a,0xc8]
; CHECK-NEXT: vcvttps2qq %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7d,0x08,0x7a,0xc0]
@@ -295,7 +295,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.cvttps2qq.256(<4 x float>, <4 x i64>, i8
define <4 x i64>@test_int_x86_avx512_mask_cvtt_ps2qq_256(<4 x float> %x0, <4 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_ps2qq_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvttps2qq %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x7a,0xc8]
; CHECK-NEXT: vcvttps2qq %xmm0, %ymm0 ## encoding: [0x62,0xf1,0x7d,0x28,0x7a,0xc0]
@@ -311,7 +311,7 @@ declare <2 x double> @llvm.x86.avx512.mask.cvtuqq2pd.128(<2 x i64>, <2 x double>
define <2 x double>@test_int_x86_avx512_mask_cvt_uqq2pd_128(<2 x i64> %x0, <2 x double> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_uqq2pd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtuqq2pd %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xfe,0x09,0x7a,0xc8]
; CHECK-NEXT: vcvtuqq2pd %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfe,0x08,0x7a,0xc0]
@@ -327,7 +327,7 @@ declare <4 x double> @llvm.x86.avx512.mask.cvtuqq2pd.256(<4 x i64>, <4 x double>
define <4 x double>@test_int_x86_avx512_mask_cvt_uqq2pd_256(<4 x i64> %x0, <4 x double> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_uqq2pd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtuqq2pd %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0xfe,0x29,0x7a,0xc8]
; CHECK-NEXT: vcvtuqq2pd %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfe,0x28,0x7a,0xc0]
@@ -343,7 +343,7 @@ declare <4 x float> @llvm.x86.avx512.mask.cvtuqq2ps.128(<2 x i64>, <4 x float>,
define <4 x float>@test_int_x86_avx512_mask_cvt_uqq2ps_128(<2 x i64> %x0, <4 x float> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_uqq2ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtuqq2ps %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xff,0x09,0x7a,0xc8]
; CHECK-NEXT: vcvtuqq2ps %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xff,0x08,0x7a,0xc0]
@@ -357,7 +357,7 @@ define <4 x float>@test_int_x86_avx512_mask_cvt_uqq2ps_128(<2 x i64> %x0, <4 x f
define <4 x float>@test_int_x86_avx512_mask_cvt_uqq2ps_128_zext(<2 x i64> %x0, <4 x float> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_uqq2ps_128_zext:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtuqq2ps %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xff,0x09,0x7a,0xc8]
; CHECK-NEXT: vmovq %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7e,0xc9]
@@ -377,7 +377,7 @@ declare <4 x float> @llvm.x86.avx512.mask.cvtuqq2ps.256(<4 x i64>, <4 x float>,
define <4 x float>@test_int_x86_avx512_mask_cvt_uqq2ps_256(<4 x i64> %x0, <4 x float> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_uqq2ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtuqq2ps %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xff,0x29,0x7a,0xc8]
; CHECK-NEXT: vcvtuqq2ps %ymm0, %xmm0 ## encoding: [0x62,0xf1,0xff,0x28,0x7a,0xc0]
@@ -393,7 +393,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.cvttps2uqq.128(<4 x float>, <2 x i64>, i
define <2 x i64>@test_int_x86_avx512_mask_cvtt_ps2uqq_128(<4 x float> %x0, <2 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_ps2uqq_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvttps2uqq %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x78,0xc8]
; CHECK-NEXT: vcvttps2uqq %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7d,0x08,0x78,0xc0]
@@ -409,7 +409,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.cvttps2uqq.256(<4 x float>, <4 x i64>, i
define <4 x i64>@test_int_x86_avx512_mask_cvtt_ps2uqq_256(<4 x float> %x0, <4 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_ps2uqq_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvttps2uqq %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x78,0xc8]
; CHECK-NEXT: vcvttps2uqq %xmm0, %ymm0 ## encoding: [0x62,0xf1,0x7d,0x28,0x78,0xc0]
@@ -425,7 +425,7 @@ declare <2 x double> @llvm.x86.avx512.mask.reduce.pd.128(<2 x double>, i32, <2 x
define <2 x double>@test_int_x86_avx512_mask_reduce_pd_128(<2 x double> %x0, <2 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_reduce_pd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vreducepd $4, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x56,0xc8,0x04]
; CHECK-NEXT: vreducepd $8, %xmm0, %xmm0 ## encoding: [0x62,0xf3,0xfd,0x08,0x56,0xc0,0x08]
@@ -441,7 +441,7 @@ declare <4 x double> @llvm.x86.avx512.mask.reduce.pd.256(<4 x double>, i32, <4 x
define <4 x double>@test_int_x86_avx512_mask_reduce_pd_256(<4 x double> %x0, <4 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_reduce_pd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vreducepd $4, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x56,0xc8,0x04]
; CHECK-NEXT: vreducepd $0, %ymm0, %ymm0 ## encoding: [0x62,0xf3,0xfd,0x28,0x56,0xc0,0x00]
@@ -457,7 +457,7 @@ declare <4 x float> @llvm.x86.avx512.mask.reduce.ps.128(<4 x float>, i32, <4 x f
define <4 x float>@test_int_x86_avx512_mask_reduce_ps_128(<4 x float> %x0, <4 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_reduce_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vreduceps $4, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x56,0xc8,0x04]
; CHECK-NEXT: vreduceps $88, %xmm0, %xmm0 ## encoding: [0x62,0xf3,0x7d,0x08,0x56,0xc0,0x58]
@@ -473,7 +473,7 @@ declare <8 x float> @llvm.x86.avx512.mask.reduce.ps.256(<8 x float>, i32, <8 x f
define <8 x float>@test_int_x86_avx512_mask_reduce_ps_256(<8 x float> %x0, <8 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_reduce_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vreduceps $11, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x56,0xc8,0x0b]
; CHECK-NEXT: vreduceps $11, %ymm0, %ymm0 ## encoding: [0x62,0xf3,0x7d,0x28,0x56,0xc0,0x0b]
@@ -489,7 +489,7 @@ declare <2 x double> @llvm.x86.avx512.mask.range.pd.128(<2 x double>, <2 x doubl
define <2 x double>@test_int_x86_avx512_mask_range_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_range_pd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrangepd $4, %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x50,0xd1,0x04]
; CHECK-NEXT: vrangepd $8, %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf3,0xfd,0x08,0x50,0xc1,0x08]
@@ -505,7 +505,7 @@ declare <4 x double> @llvm.x86.avx512.mask.range.pd.256(<4 x double>, <4 x doubl
define <4 x double>@test_int_x86_avx512_mask_range_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_range_pd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrangepd $4, %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x50,0xd1,0x04]
; CHECK-NEXT: vrangepd $88, %ymm1, %ymm0, %ymm0 ## encoding: [0x62,0xf3,0xfd,0x28,0x50,0xc1,0x58]
@@ -521,7 +521,7 @@ declare <4 x float> @llvm.x86.avx512.mask.range.ps.128(<4 x float>, <4 x float>,
define <4 x float>@test_int_x86_avx512_mask_range_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_range_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrangeps $4, %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x50,0xd1,0x04]
; CHECK-NEXT: vrangeps $88, %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf3,0x7d,0x08,0x50,0xc1,0x58]
@@ -537,7 +537,7 @@ declare <8 x float> @llvm.x86.avx512.mask.range.ps.256(<8 x float>, <8 x float>,
define <8 x float>@test_int_x86_avx512_mask_range_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_range_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrangeps $4, %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x50,0xd1,0x04]
; CHECK-NEXT: vrangeps $88, %ymm1, %ymm0, %ymm0 ## encoding: [0x62,0xf3,0x7d,0x28,0x50,0xc1,0x58]
@@ -553,7 +553,7 @@ declare i8 @llvm.x86.avx512.mask.fpclass.ps.128(<4 x float>, i32, i8)
define i8 @test_int_x86_avx512_mask_fpclass_ps_128(<4 x float> %x0, i8 %x1) {
; CHECK-LABEL: test_int_x86_avx512_mask_fpclass_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vfpclassps $2, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x66,0xc0,0x02]
; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
@@ -572,7 +572,7 @@ declare i8 @llvm.x86.avx512.mask.fpclass.ps.256(<8 x float>, i32, i8)
define i8 @test_int_x86_avx512_mask_fpclass_ps_256(<8 x float> %x0, i8 %x1) {
; CHECK-LABEL: test_int_x86_avx512_mask_fpclass_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vfpclassps $2, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x66,0xc0,0x02]
; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
@@ -591,7 +591,7 @@ declare i8 @llvm.x86.avx512.mask.fpclass.pd.128(<2 x double>, i32, i8)
define i8 @test_int_x86_avx512_mask_fpclass_pd_128(<2 x double> %x0, i8 %x1) {
; CHECK-LABEL: test_int_x86_avx512_mask_fpclass_pd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vfpclasspd $4, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x66,0xc0,0x04]
; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
@@ -610,7 +610,7 @@ declare i8 @llvm.x86.avx512.mask.fpclass.pd.256(<4 x double>, i32, i8)
define i8 @test_int_x86_avx512_mask_fpclass_pd_256(<4 x double> %x0, i8 %x1) {
; CHECK-LABEL: test_int_x86_avx512_mask_fpclass_pd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vfpclasspd $2, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x66,0xc0,0x02]
; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
@@ -629,7 +629,7 @@ declare i8 @llvm.x86.avx512.cvtd2mask.128(<4 x i32>)
define i8@test_int_x86_avx512_cvtd2mask_128(<4 x i32> %x0) {
; CHECK-LABEL: test_int_x86_avx512_cvtd2mask_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovd2m %xmm0, %k0 ## encoding: [0x62,0xf2,0x7e,0x08,0x39,0xc0]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
@@ -642,7 +642,7 @@ declare i8 @llvm.x86.avx512.cvtd2mask.256(<8 x i32>)
define i8@test_int_x86_avx512_cvtd2mask_256(<8 x i32> %x0) {
; CHECK-LABEL: test_int_x86_avx512_cvtd2mask_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovd2m %ymm0, %k0 ## encoding: [0x62,0xf2,0x7e,0x28,0x39,0xc0]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
@@ -655,7 +655,7 @@ declare i8 @llvm.x86.avx512.cvtq2mask.128(<2 x i64>)
define i8@test_int_x86_avx512_cvtq2mask_128(<2 x i64> %x0) {
; CHECK-LABEL: test_int_x86_avx512_cvtq2mask_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovq2m %xmm0, %k0 ## encoding: [0x62,0xf2,0xfe,0x08,0x39,0xc0]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
@@ -668,7 +668,7 @@ declare i8 @llvm.x86.avx512.cvtq2mask.256(<4 x i64>)
define i8@test_int_x86_avx512_cvtq2mask_256(<4 x i64> %x0) {
; CHECK-LABEL: test_int_x86_avx512_cvtq2mask_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovq2m %ymm0, %k0 ## encoding: [0x62,0xf2,0xfe,0x28,0x39,0xc0]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
diff --git a/test/CodeGen/X86/avx512er-intrinsics.ll b/test/CodeGen/X86/avx512er-intrinsics.ll
index 2e71b9aecd3..cbb06dfbea6 100644
--- a/test/CodeGen/X86/avx512er-intrinsics.ll
+++ b/test/CodeGen/X86/avx512er-intrinsics.ll
@@ -3,7 +3,7 @@
define <16 x float> @test_rsqrt28_ps(<16 x float> %a0) {
; CHECK-LABEL: test_rsqrt28_ps:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vrsqrt28ps {sae}, %zmm0, %zmm0 # encoding: [0x62,0xf2,0x7d,0x18,0xcc,0xc0]
; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <16 x float> @llvm.x86.avx512.rsqrt28.ps(<16 x float> %a0, <16 x float> zeroinitializer, i16 -1, i32 8)
@@ -12,7 +12,7 @@ define <16 x float> @test_rsqrt28_ps(<16 x float> %a0) {
define <16 x float> @test1_rsqrt28_ps(<16 x float> %a0, <16 x float> %a1) {
; CHECK-LABEL: test1_rsqrt28_ps:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movw $6, %ax # encoding: [0x66,0xb8,0x06,0x00]
; CHECK-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
; CHECK-NEXT: vrsqrt28ps {sae}, %zmm0, %zmm1 {%k1} # encoding: [0x62,0xf2,0x7d,0x19,0xcc,0xc8]
@@ -24,7 +24,7 @@ define <16 x float> @test1_rsqrt28_ps(<16 x float> %a0, <16 x float> %a1) {
define <16 x float> @test2_rsqrt28_ps(<16 x float> %a0) {
; CHECK-LABEL: test2_rsqrt28_ps:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movw $6, %ax # encoding: [0x66,0xb8,0x06,0x00]
; CHECK-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
; CHECK-NEXT: vrsqrt28ps %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0xc9,0xcc,0xc0]
@@ -35,7 +35,7 @@ define <16 x float> @test2_rsqrt28_ps(<16 x float> %a0) {
define <16 x float> @test3_rsqrt28_ps(<16 x float> %a0) {
; CHECK-LABEL: test3_rsqrt28_ps:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movw $6, %ax # encoding: [0x66,0xb8,0x06,0x00]
; CHECK-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
; CHECK-NEXT: vrsqrt28ps %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0xc9,0xcc,0xc0]
@@ -46,7 +46,7 @@ define <16 x float> @test3_rsqrt28_ps(<16 x float> %a0) {
define <16 x float> @test4_rsqrt28_ps(<16 x float> %a0) {
; CHECK-LABEL: test4_rsqrt28_ps:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movw $6, %ax # encoding: [0x66,0xb8,0x06,0x00]
; CHECK-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
; CHECK-NEXT: vrsqrt28ps {sae}, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0x99,0xcc,0xc0]
@@ -60,7 +60,7 @@ declare <16 x float> @llvm.x86.avx512.rsqrt28.ps(<16 x float>, <16 x float>, i16
define <16 x float> @test_rcp28_ps_512(<16 x float> %a0) {
; CHECK-LABEL: test_rcp28_ps_512:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vrcp28ps {sae}, %zmm0, %zmm0 # encoding: [0x62,0xf2,0x7d,0x18,0xca,0xc0]
; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <16 x float> @llvm.x86.avx512.rcp28.ps(<16 x float> %a0, <16 x float> zeroinitializer, i16 -1, i32 8)
@@ -70,7 +70,7 @@ declare <16 x float> @llvm.x86.avx512.rcp28.ps(<16 x float>, <16 x float>, i16,
define <8 x double> @test_rcp28_pd_512(<8 x double> %a0) {
; CHECK-LABEL: test_rcp28_pd_512:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vrcp28pd {sae}, %zmm0, %zmm0 # encoding: [0x62,0xf2,0xfd,0x18,0xca,0xc0]
; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <8 x double> @llvm.x86.avx512.rcp28.pd(<8 x double> %a0, <8 x double> zeroinitializer, i8 -1, i32 8)
@@ -80,7 +80,7 @@ declare <8 x double> @llvm.x86.avx512.rcp28.pd(<8 x double>, <8 x double>, i8, i
define <16 x float> @test_exp2_ps_512(<16 x float> %a0) {
; CHECK-LABEL: test_exp2_ps_512:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vexp2ps {sae}, %zmm0, %zmm0 # encoding: [0x62,0xf2,0x7d,0x18,0xc8,0xc0]
; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <16 x float> @llvm.x86.avx512.exp2.ps(<16 x float> %a0, <16 x float> zeroinitializer, i16 -1, i32 8)
@@ -90,7 +90,7 @@ declare <16 x float> @llvm.x86.avx512.exp2.ps(<16 x float>, <16 x float>, i16, i
define <8 x double> @test_exp2_pd_512(<8 x double> %a0) {
; CHECK-LABEL: test_exp2_pd_512:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vexp2pd {sae}, %zmm0, %zmm0 # encoding: [0x62,0xf2,0xfd,0x18,0xc8,0xc0]
; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <8 x double> @llvm.x86.avx512.exp2.pd(<8 x double> %a0, <8 x double> zeroinitializer, i8 -1, i32 8)
@@ -100,7 +100,7 @@ declare <8 x double> @llvm.x86.avx512.exp2.pd(<8 x double>, <8 x double>, i8, i3
define <4 x float> @test_rsqrt28_ss(<4 x float> %a0) {
; CHECK-LABEL: test_rsqrt28_ss:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vrsqrt28ss {sae}, %xmm0, %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7d,0x18,0xcd,0xc0]
; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <4 x float> @llvm.x86.avx512.rsqrt28.ss(<4 x float> %a0, <4 x float> %a0, <4 x float> zeroinitializer, i8 -1, i32 8) ; <<4 x float>> [#uses=1]
@@ -110,7 +110,7 @@ declare <4 x float> @llvm.x86.avx512.rsqrt28.ss(<4 x float>, <4 x float>, <4 x f
define <4 x float> @test_rcp28_ss(<4 x float> %a0) {
; CHECK-LABEL: test_rcp28_ss:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vrcp28ss {sae}, %xmm0, %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7d,0x18,0xcb,0xc0]
; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <4 x float> @llvm.x86.avx512.rcp28.ss(<4 x float> %a0, <4 x float> %a0, <4 x float> zeroinitializer, i8 -1, i32 8) ; <<4 x float>> [#uses=1]
@@ -120,7 +120,7 @@ declare <4 x float> @llvm.x86.avx512.rcp28.ss(<4 x float>, <4 x float>, <4 x flo
define <4 x float> @test_rcp28_ss_load(<4 x float> %a0, <4 x float>* %a1ptr) {
; CHECK-LABEL: test_rcp28_ss_load:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vrcp28ss (%rdi), %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7d,0x08,0xcb,0x07]
; CHECK-NEXT: retq # encoding: [0xc3]
%a1 = load <4 x float>, <4 x float>* %a1ptr
@@ -130,7 +130,7 @@ define <4 x float> @test_rcp28_ss_load(<4 x float> %a0, <4 x float>* %a1ptr) {
define <4 x float> @test_rsqrt28_ss_load(<4 x float> %a0, <4 x float>* %a1ptr) {
; CHECK-LABEL: test_rsqrt28_ss_load:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vrsqrt28ss (%rdi), %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7d,0x08,0xcd,0x07]
; CHECK-NEXT: retq # encoding: [0xc3]
%a1 = load <4 x float>, <4 x float>* %a1ptr
@@ -140,7 +140,7 @@ define <4 x float> @test_rsqrt28_ss_load(<4 x float> %a0, <4 x float>* %a1ptr) {
define <4 x float> @test_rsqrt28_ss_maskz(<4 x float> %a0, i8 %mask) {
; CHECK-LABEL: test_rsqrt28_ss_maskz:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrsqrt28ss {sae}, %xmm0, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0x99,0xcd,0xc0]
; CHECK-NEXT: retq # encoding: [0xc3]
@@ -150,7 +150,7 @@ define <4 x float> @test_rsqrt28_ss_maskz(<4 x float> %a0, i8 %mask) {
define <4 x float> @test_rsqrt28_ss_mask(<4 x float> %a0, <4 x float> %b0, <4 x float> %c0, i8 %mask) {
; CHECK-LABEL: test_rsqrt28_ss_mask:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrsqrt28ss {sae}, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x19,0xcd,0xd1]
; CHECK-NEXT: vmovaps %xmm2, %xmm0 # encoding: [0xc5,0xf8,0x28,0xc2]
@@ -161,7 +161,7 @@ define <4 x float> @test_rsqrt28_ss_mask(<4 x float> %a0, <4 x float> %b0, <4 x
define <2 x double> @test_rcp28_sd_mask_load(<2 x double> %a0, <2 x double>* %a1ptr, <2 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_rcp28_sd_mask_load:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vrcp28sd %xmm0, %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0xcb,0xc8]
; CHECK-NEXT: vmovapd %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x28,0xc1]
@@ -174,7 +174,7 @@ declare <2 x double> @llvm.x86.avx512.rcp28.sd(<2 x double>, <2 x double>, <2 x
define <2 x double> @test_rsqrt28_sd_maskz_load(<2 x double> %a0, <2 x double>* %a1ptr, i8 %mask) {
; CHECK-LABEL: test_rsqrt28_sd_maskz_load:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vrsqrt28sd %xmm0, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0xcd,0xc0]
; CHECK-NEXT: retq # encoding: [0xc3]
@@ -185,7 +185,7 @@ define <2 x double> @test_rsqrt28_sd_maskz_load(<2 x double> %a0, <2 x double>*
define <2 x double> @test_rsqrt28_sd_maskz(<2 x double> %a0, i8 %mask) {
; CHECK-LABEL: test_rsqrt28_sd_maskz:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrsqrt28sd {sae}, %xmm0, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x99,0xcd,0xc0]
; CHECK-NEXT: retq # encoding: [0xc3]
@@ -195,7 +195,7 @@ define <2 x double> @test_rsqrt28_sd_maskz(<2 x double> %a0, i8 %mask) {
define <2 x double> @test_rsqrt28_sd_mask(<2 x double> %a0, <2 x double> %b0, <2 x double> %c0, i8 %mask) {
; CHECK-LABEL: test_rsqrt28_sd_mask:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrsqrt28sd {sae}, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x19,0xcd,0xd1]
; CHECK-NEXT: vmovapd %xmm2, %xmm0 # encoding: [0xc5,0xf9,0x28,0xc2]
@@ -208,7 +208,7 @@ declare <2 x double> @llvm.x86.avx512.rsqrt28.sd(<2 x double>, <2 x double>, <2
define <2 x double> @test_rsqrt28_sd_maskz_mem(<2 x double> %a0, double* %ptr, i8 %mask) {
; CHECK-LABEL: test_rsqrt28_sd_maskz_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vrsqrt28sd (%rdi), %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0xcd,0x07]
; CHECK-NEXT: retq # encoding: [0xc3]
@@ -220,7 +220,7 @@ define <2 x double> @test_rsqrt28_sd_maskz_mem(<2 x double> %a0, double* %ptr, i
define <2 x double> @test_rsqrt28_sd_maskz_mem_offset(<2 x double> %a0, double* %ptr, i8 %mask) {
; CHECK-LABEL: test_rsqrt28_sd_maskz_mem_offset:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vrsqrt28sd 144(%rdi), %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0xcd,0x47,0x12]
; CHECK-NEXT: retq # encoding: [0xc3]
diff --git a/test/CodeGen/X86/avx512f-vec-test-testn.ll b/test/CodeGen/X86/avx512f-vec-test-testn.ll
index 7067c902d37..07dc9b8116b 100644
--- a/test/CodeGen/X86/avx512f-vec-test-testn.ll
+++ b/test/CodeGen/X86/avx512f-vec-test-testn.ll
@@ -4,7 +4,7 @@
; Function Attrs: norecurse nounwind readnone
define zeroext i8 @TEST_mm512_test_epi64_mask(<8 x i64> %__A, <8 x i64> %__B) local_unnamed_addr #0 {
; CHECK-LABEL: TEST_mm512_test_epi64_mask:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestmq %zmm0, %zmm1, %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -20,7 +20,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define zeroext i16 @TEST_mm512_test_epi32_mask(<8 x i64> %__A, <8 x i64> %__B) local_unnamed_addr #0 {
; CHECK-LABEL: TEST_mm512_test_epi32_mask:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestmd %zmm0, %zmm1, %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -37,7 +37,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define zeroext i8 @TEST_mm512_mask_test_epi64_mask(i8 %__U, <8 x i64> %__A, <8 x i64> %__B) local_unnamed_addr #0 {
; CHECK-LABEL: TEST_mm512_mask_test_epi64_mask:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vptestmq %zmm0, %zmm1, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
@@ -56,7 +56,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define zeroext i16 @TEST_mm512_mask_test_epi32_mask(i16 %__U, <8 x i64> %__A, <8 x i64> %__B) local_unnamed_addr #0 {
; CHECK-LABEL: TEST_mm512_mask_test_epi32_mask:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vptestmd %zmm0, %zmm1, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
@@ -76,7 +76,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define zeroext i8 @TEST_mm512_testn_epi64_mask(<8 x i64> %__A, <8 x i64> %__B) local_unnamed_addr #0 {
; CHECK-LABEL: TEST_mm512_testn_epi64_mask:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestnmq %zmm0, %zmm1, %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -92,7 +92,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define zeroext i16 @TEST_mm512_testn_epi32_mask(<8 x i64> %__A, <8 x i64> %__B) local_unnamed_addr #0 {
; CHECK-LABEL: TEST_mm512_testn_epi32_mask:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestnmd %zmm0, %zmm1, %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -109,7 +109,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define zeroext i8 @TEST_mm512_mask_testn_epi64_mask(i8 %__U, <8 x i64> %__A, <8 x i64> %__B) local_unnamed_addr #0 {
; CHECK-LABEL: TEST_mm512_mask_testn_epi64_mask:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vptestnmq %zmm0, %zmm1, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
@@ -128,7 +128,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define zeroext i16 @TEST_mm512_mask_testn_epi32_mask(i16 %__U, <8 x i64> %__A, <8 x i64> %__B) local_unnamed_addr #0 {
; CHECK-LABEL: TEST_mm512_mask_testn_epi32_mask:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vptestnmd %zmm0, %zmm1, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
diff --git a/test/CodeGen/X86/avx512ifma-intrinsics.ll b/test/CodeGen/X86/avx512ifma-intrinsics.ll
index 8a0f8d9df62..1217138b226 100644
--- a/test/CodeGen/X86/avx512ifma-intrinsics.ll
+++ b/test/CodeGen/X86/avx512ifma-intrinsics.ll
@@ -5,7 +5,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.vpmadd52h.uq.512(<8 x i64>, <8 x i64>, <
define <8 x i64>@test_int_x86_avx512_mask_vpmadd52h_uq_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpmadd52h_uq_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovdqa64 %zmm0, %zmm3
; CHECK-NEXT: vpmadd52huq %zmm2, %zmm1, %zmm3
@@ -33,7 +33,7 @@ declare <8 x i64> @llvm.x86.avx512.maskz.vpmadd52h.uq.512(<8 x i64>, <8 x i64>,
define <8 x i64>@test_int_x86_avx512_maskz_vpmadd52h_uq_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpmadd52h_uq_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovdqa64 %zmm0, %zmm3
; CHECK-NEXT: vpmadd52huq %zmm2, %zmm1, %zmm3
@@ -61,7 +61,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.vpmadd52l.uq.512(<8 x i64>, <8 x i64>, <
define <8 x i64>@test_int_x86_avx512_mask_vpmadd52l_uq_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpmadd52l_uq_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovdqa64 %zmm0, %zmm3
; CHECK-NEXT: vpmadd52luq %zmm2, %zmm1, %zmm3
@@ -89,7 +89,7 @@ declare <8 x i64> @llvm.x86.avx512.maskz.vpmadd52l.uq.512(<8 x i64>, <8 x i64>,
define <8 x i64>@test_int_x86_avx512_maskz_vpmadd52l_uq_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpmadd52l_uq_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovdqa64 %zmm0, %zmm3
; CHECK-NEXT: vpmadd52luq %zmm2, %zmm1, %zmm3
@@ -115,7 +115,7 @@ define <8 x i64>@test_int_x86_avx512_maskz_vpmadd52l_uq_512(<8 x i64> %x0, <8 x
define <8 x i64>@test_int_x86_avx512_vpmadd52h_uq_512_load(<8 x i64> %x0, <8 x i64> %x1, <8 x i64>* %x2ptr) {
; CHECK-LABEL: test_int_x86_avx512_vpmadd52h_uq_512_load:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmadd52huq (%rdi), %zmm1, %zmm0
; CHECK-NEXT: retq
@@ -126,7 +126,7 @@ define <8 x i64>@test_int_x86_avx512_vpmadd52h_uq_512_load(<8 x i64> %x0, <8 x i
define <8 x i64>@test_int_x86_avx512_vpmadd52h_uq_512_load_bcast(<8 x i64> %x0, <8 x i64> %x1, i64* %x2ptr) {
; CHECK-LABEL: test_int_x86_avx512_vpmadd52h_uq_512_load_bcast:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmadd52huq (%rdi){1to8}, %zmm1, %zmm0
; CHECK-NEXT: retq
@@ -139,7 +139,7 @@ define <8 x i64>@test_int_x86_avx512_vpmadd52h_uq_512_load_bcast(<8 x i64> %x0,
define <8 x i64>@test_int_x86_avx512_vpmadd52h_uq_512_load_commute(<8 x i64> %x0, <8 x i64>* %x1ptr, <8 x i64> %x2) {
; CHECK-LABEL: test_int_x86_avx512_vpmadd52h_uq_512_load_commute:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmadd52huq (%rdi), %zmm1, %zmm0
; CHECK-NEXT: retq
@@ -150,7 +150,7 @@ define <8 x i64>@test_int_x86_avx512_vpmadd52h_uq_512_load_commute(<8 x i64> %x0
define <8 x i64>@test_int_x86_avx512_vpmadd52h_uq_512_load_commute_bcast(<8 x i64> %x0, i64* %x1ptr, <8 x i64> %x2) {
; CHECK-LABEL: test_int_x86_avx512_vpmadd52h_uq_512_load_commute_bcast:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmadd52huq (%rdi){1to8}, %zmm1, %zmm0
; CHECK-NEXT: retq
@@ -163,7 +163,7 @@ define <8 x i64>@test_int_x86_avx512_vpmadd52h_uq_512_load_commute_bcast(<8 x i6
define <8 x i64>@test_int_x86_avx512_mask_vpmadd52h_uq_512_load(<8 x i64> %x0, <8 x i64> %x1, <8 x i64>* %x2ptr, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpmadd52h_uq_512_load:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpmadd52huq (%rdi), %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -175,7 +175,7 @@ define <8 x i64>@test_int_x86_avx512_mask_vpmadd52h_uq_512_load(<8 x i64> %x0, <
define <8 x i64>@test_int_x86_avx512_mask_vpmadd52h_uq_512_load_bcast(<8 x i64> %x0, <8 x i64> %x1, i64* %x2ptr, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpmadd52h_uq_512_load_bcast:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpmadd52huq (%rdi){1to8}, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -189,7 +189,7 @@ define <8 x i64>@test_int_x86_avx512_mask_vpmadd52h_uq_512_load_bcast(<8 x i64>
define <8 x i64>@test_int_x86_avx512_mask_vpmadd52h_uq_512_load_commute(<8 x i64> %x0, <8 x i64>* %x1ptr, <8 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpmadd52h_uq_512_load_commute:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpmadd52huq (%rdi), %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -201,7 +201,7 @@ define <8 x i64>@test_int_x86_avx512_mask_vpmadd52h_uq_512_load_commute(<8 x i64
define <8 x i64>@test_int_x86_avx512_mask_vpmadd52h_uq_512_load_commute_bcast(<8 x i64> %x0, i64* %x1ptr, <8 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpmadd52h_uq_512_load_commute_bcast:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpmadd52huq (%rdi){1to8}, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -215,7 +215,7 @@ define <8 x i64>@test_int_x86_avx512_mask_vpmadd52h_uq_512_load_commute_bcast(<8
define <8 x i64>@test_int_x86_avx512_maskz_vpmadd52h_uq_512_load(<8 x i64> %x0, <8 x i64> %x1, <8 x i64>* %x2ptr, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpmadd52h_uq_512_load:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpmadd52huq (%rdi), %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -227,7 +227,7 @@ define <8 x i64>@test_int_x86_avx512_maskz_vpmadd52h_uq_512_load(<8 x i64> %x0,
define <8 x i64>@test_int_x86_avx512_maskz_vpmadd52h_uq_512_load_bcast(<8 x i64> %x0, <8 x i64> %x1, i64* %x2ptr, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpmadd52h_uq_512_load_bcast:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpmadd52huq (%rdi){1to8}, %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -241,7 +241,7 @@ define <8 x i64>@test_int_x86_avx512_maskz_vpmadd52h_uq_512_load_bcast(<8 x i64>
define <8 x i64>@test_int_x86_avx512_maskz_vpmadd52h_uq_512_load_commute(<8 x i64> %x0, <8 x i64>* %x1ptr, <8 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpmadd52h_uq_512_load_commute:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpmadd52huq (%rdi), %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -253,7 +253,7 @@ define <8 x i64>@test_int_x86_avx512_maskz_vpmadd52h_uq_512_load_commute(<8 x i6
define <8 x i64>@test_int_x86_avx512_maskz_vpmadd52h_uq_512_load_commute_bcast(<8 x i64> %x0, i64* %x1ptr, <8 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpmadd52h_uq_512_load_commute_bcast:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpmadd52huq (%rdi){1to8}, %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/avx512ifmavl-intrinsics.ll b/test/CodeGen/X86/avx512ifmavl-intrinsics.ll
index 69930746f22..40312c9f524 100644
--- a/test/CodeGen/X86/avx512ifmavl-intrinsics.ll
+++ b/test/CodeGen/X86/avx512ifmavl-intrinsics.ll
@@ -5,7 +5,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.vpmadd52h.uq.128(<2 x i64>, <2 x i64>, <
define <2 x i64>@test_int_x86_avx512_mask_vpmadd52h_uq_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpmadd52h_uq_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovdqa %xmm0, %xmm3
; CHECK-NEXT: vpmadd52huq %xmm2, %xmm1, %xmm3
@@ -33,7 +33,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.vpmadd52h.uq.256(<4 x i64>, <4 x i64>, <
define <4 x i64>@test_int_x86_avx512_mask_vpmadd52h_uq_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpmadd52h_uq_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovdqa %ymm0, %ymm3
; CHECK-NEXT: vpmadd52huq %ymm2, %ymm1, %ymm3
@@ -61,7 +61,7 @@ declare <2 x i64> @llvm.x86.avx512.maskz.vpmadd52h.uq.128(<2 x i64>, <2 x i64>,
define <2 x i64>@test_int_x86_avx512_maskz_vpmadd52h_uq_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpmadd52h_uq_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovdqa %xmm0, %xmm3
; CHECK-NEXT: vpmadd52huq %xmm2, %xmm1, %xmm3
@@ -89,7 +89,7 @@ declare <4 x i64> @llvm.x86.avx512.maskz.vpmadd52h.uq.256(<4 x i64>, <4 x i64>,
define <4 x i64>@test_int_x86_avx512_maskz_vpmadd52h_uq_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpmadd52h_uq_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovdqa %ymm0, %ymm3
; CHECK-NEXT: vpmadd52huq %ymm2, %ymm1, %ymm3
@@ -117,7 +117,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.vpmadd52l.uq.128(<2 x i64>, <2 x i64>, <
define <2 x i64>@test_int_x86_avx512_mask_vpmadd52l_uq_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpmadd52l_uq_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovdqa %xmm0, %xmm3
; CHECK-NEXT: vpmadd52luq %xmm2, %xmm1, %xmm3
@@ -145,7 +145,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.vpmadd52l.uq.256(<4 x i64>, <4 x i64>, <
define <4 x i64>@test_int_x86_avx512_mask_vpmadd52l_uq_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpmadd52l_uq_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovdqa %ymm0, %ymm3
; CHECK-NEXT: vpmadd52luq %ymm2, %ymm1, %ymm3
@@ -173,7 +173,7 @@ declare <2 x i64> @llvm.x86.avx512.maskz.vpmadd52l.uq.128(<2 x i64>, <2 x i64>,
define <2 x i64>@test_int_x86_avx512_maskz_vpmadd52l_uq_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpmadd52l_uq_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovdqa %xmm0, %xmm3
; CHECK-NEXT: vpmadd52luq %xmm2, %xmm1, %xmm3
@@ -201,7 +201,7 @@ declare <4 x i64> @llvm.x86.avx512.maskz.vpmadd52l.uq.256(<4 x i64>, <4 x i64>,
define <4 x i64>@test_int_x86_avx512_maskz_vpmadd52l_uq_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpmadd52l_uq_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovdqa %ymm0, %ymm3
; CHECK-NEXT: vpmadd52luq %ymm2, %ymm1, %ymm3
diff --git a/test/CodeGen/X86/avx512vbmi-intrinsics.ll b/test/CodeGen/X86/avx512vbmi-intrinsics.ll
index 12a422a69cf..25f62a49784 100644
--- a/test/CodeGen/X86/avx512vbmi-intrinsics.ll
+++ b/test/CodeGen/X86/avx512vbmi-intrinsics.ll
@@ -4,7 +4,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.permvar.qi.512(<64 x i8>, <64 x i8>, <64
define <64 x i8>@test_int_x86_avx512_mask_permvar_qi_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_permvar_qi_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovq %rdi, %k1
; CHECK-NEXT: vpermb %zmm0, %zmm1, %zmm2 {%k1}
; CHECK-NEXT: vpermb %zmm0, %zmm1, %zmm3 {%k1} {z}
@@ -24,7 +24,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.pmultishift.qb.512(<64 x i8>, <64 x i8>,
define <64 x i8>@test_int_x86_avx512_mask_pmultishift_qb_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmultishift_qb_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovq %rdi, %k1
; CHECK-NEXT: vpmultishiftqb %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vpmultishiftqb %zmm1, %zmm0, %zmm3 {%k1} {z}
@@ -44,7 +44,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.vpermi2var.qi.512(<64 x i8>, <64 x i8>,
define <64 x i8>@test_int_x86_avx512_mask_vpermi2var_qi_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermi2var_qi_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovq %rdi, %k1
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm3
; CHECK-NEXT: vpermi2b %zmm2, %zmm0, %zmm3 {%k1}
@@ -66,7 +66,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.vpermt2var.qi.512(<64 x i8>, <64 x i8>,
define <64 x i8>@test_int_x86_avx512_mask_vpermt2var_qi_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermt2var_qi_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovq %rdi, %k1
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm3
; CHECK-NEXT: vpermt2b %zmm2, %zmm0, %zmm3 {%k1}
@@ -88,7 +88,7 @@ declare <64 x i8> @llvm.x86.avx512.maskz.vpermt2var.qi.512(<64 x i8>, <64 x i8>,
define <64 x i8>@test_int_x86_avx512_maskz_vpermt2var_qi_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpermt2var_qi_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovq %rdi, %k1
; CHECK-NEXT: vpermi2b %zmm2, %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/avx512vbmi2-intrinsics.ll b/test/CodeGen/X86/avx512vbmi2-intrinsics.ll
index 742340d8939..7d307a82867 100644
--- a/test/CodeGen/X86/avx512vbmi2-intrinsics.ll
+++ b/test/CodeGen/X86/avx512vbmi2-intrinsics.ll
@@ -3,7 +3,7 @@
define <32 x i16> @test_expand_load_w_512(i8* %addr, <32 x i16> %data, i32 %mask) {
; CHECK-LABEL: test_expand_load_w_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpexpandw (%rdi), %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -14,7 +14,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.expand.load.w.512(i8* %addr, <32 x i16>
define void @test_compress_store_w_512(i8* %addr, <32 x i16> %data, i32 %mask) {
; CHECK-LABEL: test_compress_store_w_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpcompressw %zmm0, (%rdi) {%k1}
; CHECK-NEXT: retq
@@ -25,7 +25,7 @@ declare void @llvm.x86.avx512.mask.compress.store.w.512(i8* %addr, <32 x i16> %d
define <64 x i8> @test_expand_load_b_512(i8* %addr, <64 x i8> %data, i64 %mask) {
; CHECK-LABEL: test_expand_load_b_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovq %rsi, %k1
; CHECK-NEXT: vpexpandb (%rdi), %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -36,7 +36,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.expand.load.b.512(i8* %addr, <64 x i8> %
define void @test_compress_store_b_512(i8* %addr, <64 x i8> %data, i64 %mask) {
; CHECK-LABEL: test_compress_store_b_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovq %rsi, %k1
; CHECK-NEXT: vpcompressb %zmm0, (%rdi) {%k1}
; CHECK-NEXT: retq
@@ -47,7 +47,7 @@ declare void @llvm.x86.avx512.mask.compress.store.b.512(i8* %addr, <64 x i8> %da
define <32 x i16> @test_compress_w_512(<32 x i16> %data, <32 x i16> %src, i32 %mask) {
; CHECK-LABEL: test_compress_w_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpcompressw %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -59,7 +59,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.compress.w.512(<32 x i16>, <32 x i16>,
define <64 x i8> @test_compress_b_512(<64 x i8> %data, <64 x i8> %src, i64 %mask) {
; CHECK-LABEL: test_compress_b_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovq %rdi, %k1
; CHECK-NEXT: vpcompressb %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -71,7 +71,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.compress.b.512(<64 x i8>, <64 x i8>, i64
define <32 x i16> @test_expand_w_512(i8* %addr, <32 x i16> %data, i32 %mask) {
; CHECK-LABEL: test_expand_w_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpexpandw %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -82,7 +82,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.expand.w.512(<32 x i16>, <32 x i16>, i3
define <64 x i8> @test_expand_b_512(i8* %addr, <64 x i8> %data, i64 %mask) {
; CHECK-LABEL: test_expand_b_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovq %rsi, %k1
; CHECK-NEXT: vpexpandb %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -93,7 +93,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.expand.b.512(<64 x i8>, <64 x i8>, i64)
define <16 x i32>@test_int_x86_avx512_mask_vpshld_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x3, i16 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshld_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpshldd $22, %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vpshldd $22, %zmm1, %zmm0, %zmm0
@@ -108,7 +108,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.vpshld.d.512(<16 x i32>, <16 x i32>, i3
define <8 x i64>@test_int_x86_avx512_mask_vpshld_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshld_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpshldq $22, %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vpshldq $22, %zmm1, %zmm0, %zmm0
@@ -123,7 +123,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.vpshld.q.512(<8 x i64>, <8 x i64>, i32,
define <32 x i16>@test_int_x86_avx512_mask_vpshld_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x3, i32 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshld_w_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpshldw $22, %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vpshldw $22, %zmm1, %zmm0, %zmm0
@@ -138,7 +138,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.vpshld.w.512(<32 x i16>, <32 x i16>, i3
define <16 x i32>@test_int_x86_avx512_mask_vpshrd_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x3, i16 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshrd_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpshrdd $22, %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vpshrdd $22, %zmm1, %zmm0, %zmm0
@@ -153,7 +153,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.vpshrd.d.512(<16 x i32>, <16 x i32>, i3
define <8 x i64>@test_int_x86_avx512_mask_vpshrd_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshrd_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpshrdq $22, %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vpshrdq $22, %zmm1, %zmm0, %zmm0
@@ -168,7 +168,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.vpshrd.q.512(<8 x i64>, <8 x i64>, i32,
define <32 x i16>@test_int_x86_avx512_mask_vpshrd_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x3, i32 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshrd_w_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpshrdw $22, %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vpshrdw $22, %zmm1, %zmm0, %zmm0
@@ -186,7 +186,7 @@ declare <16 x i32> @llvm.x86.avx512.maskz.vpshrdv.d.512(<16 x i32>, <16 x i32>,
define <16 x i32>@test_int_x86_avx512_mask_vpshrdv_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2p, <16 x i32> %x4, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshrdv_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa64 %zmm0, %zmm3
; CHECK-NEXT: vpshrdvd (%rdi), %zmm1, %zmm3 {%k1}
@@ -210,7 +210,7 @@ declare <8 x i64> @llvm.x86.avx512.maskz.vpshrdv.q.512(<8 x i64>, <8 x i64>, <8
define <8 x i64>@test_int_x86_avx512_mask_vpshrdv_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64>* %x2p, <8 x i64> %x4, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshrdv_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa64 %zmm0, %zmm3
; CHECK-NEXT: vpshrdvq (%rdi), %zmm1, %zmm3 {%k1}
@@ -234,7 +234,7 @@ declare <32 x i16> @llvm.x86.avx512.maskz.vpshrdv.w.512(<32 x i16>, <32 x i16>,
define <32 x i16>@test_int_x86_avx512_mask_vpshrdv_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16>* %x2p, <32 x i16> %x4, i32 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshrdv_w_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa64 %zmm0, %zmm3
; CHECK-NEXT: vpshrdvw (%rdi), %zmm1, %zmm3 {%k1}
@@ -258,7 +258,7 @@ declare <16 x i32> @llvm.x86.avx512.maskz.vpshldv.d.512(<16 x i32>, <16 x i32>,
define <16 x i32>@test_int_x86_avx512_mask_vpshldv_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2p, <16 x i32> %x4, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshldv_d_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa64 %zmm0, %zmm3
; CHECK-NEXT: vpshldvd (%rdi), %zmm1, %zmm3 {%k1}
@@ -282,7 +282,7 @@ declare <8 x i64> @llvm.x86.avx512.maskz.vpshldv.q.512(<8 x i64>, <8 x i64>, <8
define <8 x i64>@test_int_x86_avx512_mask_vpshldv_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64>* %x2p, <8 x i64> %x4, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshldv_q_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa64 %zmm0, %zmm3
; CHECK-NEXT: vpshldvq (%rdi), %zmm1, %zmm3 {%k1}
@@ -306,7 +306,7 @@ declare <32 x i16> @llvm.x86.avx512.maskz.vpshldv.w.512(<32 x i16>, <32 x i16>,
define <32 x i16>@test_int_x86_avx512_mask_vpshldv_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16>* %x2p, <32 x i16> %x4, i32 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshldv_w_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa64 %zmm0, %zmm3
; CHECK-NEXT: vpshldvw (%rdi), %zmm1, %zmm3 {%k1}
diff --git a/test/CodeGen/X86/avx512vbmi2vl-intrinsics.ll b/test/CodeGen/X86/avx512vbmi2vl-intrinsics.ll
index c1b0f962f17..800568b9ff3 100644
--- a/test/CodeGen/X86/avx512vbmi2vl-intrinsics.ll
+++ b/test/CodeGen/X86/avx512vbmi2vl-intrinsics.ll
@@ -3,7 +3,7 @@
define <16 x i16> @test_compress_w_256(<16 x i16> %src, <16 x i16> %data, i16 %mask) {
; CHECK-LABEL: test_compress_w_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpcompressw %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
@@ -14,7 +14,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.compress.w.256(<16 x i16>, <16 x i16>,
define <8 x i16> @test_compress_w_128(<8 x i16> %data, i8 %mask) {
; CHECK-LABEL: test_compress_w_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpcompressw %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -25,7 +25,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.compress.w.128(<8 x i16>, <8 x i16>, i8)
define <32 x i8> @test_compress_b_256(<32 x i8> %src, <32 x i8> %data, i32 %mask) {
; CHECK-LABEL: test_compress_b_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpcompressb %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
@@ -36,7 +36,7 @@ declare <32 x i8> @llvm.x86.avx512.mask.compress.b.256(<32 x i8>, <32 x i8>, i32
define <16 x i8> @test_compress_b_128(<16 x i8> %data, i16 %mask) {
; CHECK-LABEL: test_compress_b_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpcompressb %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -47,7 +47,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.compress.b.128(<16 x i8>, <16 x i8>, i16
define <32 x i8> @test_expand_b_256(<32 x i8> %data, <32 x i8> %src, i32 %mask) {
; CHECK-LABEL: test_expand_b_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpexpandb %ymm0, %ymm1 {%k1}
; CHECK-NEXT: vmovdqa %ymm1, %ymm0
@@ -59,7 +59,7 @@ declare <32 x i8> @llvm.x86.avx512.mask.expand.b.256(<32 x i8>, <32 x i8>, i32)
define <16 x i8> @test_expand_b_128(<16 x i8> %data, i16 %mask) {
; CHECK-LABEL: test_expand_b_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpexpandb %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -70,7 +70,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.expand.b.128(<16 x i8>, <16 x i8>, i16)
define <16 x i16> @test_expand_w_256(<16 x i16> %data, <16 x i16> %src, i16 %mask) {
; CHECK-LABEL: test_expand_w_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpexpandw %ymm0, %ymm1 {%k1}
; CHECK-NEXT: vmovdqa %ymm1, %ymm0
@@ -82,7 +82,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.expand.w.256(<16 x i16>, <16 x i16>, i1
define <8 x i16> @test_expand_w_128(<8 x i16> %data, i8 %mask) {
; CHECK-LABEL: test_expand_w_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpexpandw %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -93,7 +93,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.expand.w.128(<8 x i16>, <8 x i16>, i8)
define <16 x i16> @test_expand_load_w_256(i8* %addr, <16 x i16> %data, i16 %mask) {
; CHECK-LABEL: test_expand_load_w_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpexpandw (%rdi), %ymm0 {%k1}
; CHECK-NEXT: retq
@@ -104,7 +104,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.expand.load.w.256(i8* %addr, <16 x i16>
define <8 x i16> @test_expand_load_w_128(i8* %addr, <8 x i16> %data, i8 %mask) {
; CHECK-LABEL: test_expand_load_w_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpexpandw (%rdi), %xmm0 {%k1}
; CHECK-NEXT: retq
@@ -115,7 +115,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.expand.load.w.128(i8* %addr, <8 x i16> %
define void @test_compress_store_w_256(i8* %addr, <16 x i16> %data, i16 %mask) {
; CHECK-LABEL: test_compress_store_w_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpcompressw %ymm0, (%rdi) {%k1}
; CHECK-NEXT: retq
@@ -126,7 +126,7 @@ declare void @llvm.x86.avx512.mask.compress.store.w.256(i8* %addr, <16 x i16> %d
define void @test_compress_store_w_128(i8* %addr, <8 x i16> %data, i8 %mask) {
; CHECK-LABEL: test_compress_store_w_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpcompressw %xmm0, (%rdi) {%k1}
; CHECK-NEXT: retq
@@ -137,7 +137,7 @@ declare void @llvm.x86.avx512.mask.compress.store.w.128(i8* %addr, <8 x i16> %da
define <32 x i8> @test_expand_load_b_256(i8* %addr, <32 x i8> %data, i32 %mask) {
; CHECK-LABEL: test_expand_load_b_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpexpandb (%rdi), %ymm0 {%k1}
; CHECK-NEXT: retq
@@ -148,7 +148,7 @@ declare <32 x i8> @llvm.x86.avx512.mask.expand.load.b.256(i8* %addr, <32 x i8> %
define <16 x i8> @test_expand_load_b_128(i8* %addr, <16 x i8> %data, i16 %mask) {
; CHECK-LABEL: test_expand_load_b_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpexpandb (%rdi), %xmm0 {%k1}
; CHECK-NEXT: retq
@@ -159,7 +159,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.expand.load.b.128(i8* %addr, <16 x i8> %
define void @test_compress_store_b_256(i8* %addr, <32 x i8> %data, i32 %mask) {
; CHECK-LABEL: test_compress_store_b_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpcompressb %ymm0, (%rdi) {%k1}
; CHECK-NEXT: retq
@@ -170,7 +170,7 @@ declare void @llvm.x86.avx512.mask.compress.store.b.256(i8* %addr, <32 x i8> %da
define void @test_compress_store_b_128(i8* %addr, <16 x i8> %data, i16 %mask) {
; CHECK-LABEL: test_compress_store_b_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpcompressb %xmm0, (%rdi) {%k1}
; CHECK-NEXT: retq
@@ -181,7 +181,7 @@ declare void @llvm.x86.avx512.mask.compress.store.b.128(i8* %addr, <16 x i8> %da
define <4 x i32>@test_int_x86_avx512_mask_vpshld_d_128(<4 x i32> %x0, <4 x i32> %x1,<4 x i32> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshld_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpshldd $22, %xmm1, %xmm0, %xmm3 {%k1} {z}
; CHECK-NEXT: vpshldd $22, %xmm1, %xmm0, %xmm2 {%k1}
@@ -200,7 +200,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.vpshld.d.128(<4 x i32>, <4 x i32>, i32,
define <8 x i32>@test_int_x86_avx512_mask_vpshld_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshld_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpshldd $22, %ymm1, %ymm0, %ymm2 {%k1}
; CHECK-NEXT: vpshldd $22, %ymm1, %ymm0, %ymm0
@@ -215,7 +215,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.vpshld.d.256(<8 x i32>, <8 x i32>, i32,
define <2 x i64>@test_int_x86_avx512_mask_vpshld_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshld_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpshldq $22, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vpshldq $22, %xmm1, %xmm0, %xmm0
@@ -230,7 +230,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.vpshld.q.128(<2 x i64>, <2 x i64>, i32,
define <4 x i64>@test_int_x86_avx512_mask_vpshld_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshld_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpshldq $22, %ymm1, %ymm0, %ymm2 {%k1}
; CHECK-NEXT: vpshldq $22, %ymm1, %ymm0, %ymm0
@@ -245,7 +245,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.vpshld.q.256(<4 x i64>, <4 x i64>, i32,
define <8 x i16>@test_int_x86_avx512_mask_vpshld_w_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshld_w_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpshldw $22, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vpshldw $22, %xmm1, %xmm0, %xmm0
@@ -260,7 +260,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.vpshld.w.128(<8 x i16>, <8 x i16>, i32,
define <16 x i16>@test_int_x86_avx512_mask_vpshld_w_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x3, i16 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshld_w_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpshldw $22, %ymm1, %ymm0, %ymm2 {%k1}
; CHECK-NEXT: vpshldw $22, %ymm1, %ymm0, %ymm0
@@ -275,7 +275,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.vpshld.w.256(<16 x i16>, <16 x i16>, i3
define <4 x i32>@test_int_x86_avx512_mask_vpshrd_d_128(<4 x i32> %x0, <4 x i32> %x1,<4 x i32> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshrd_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpshrdd $22, %xmm1, %xmm0, %xmm3 {%k1} {z}
; CHECK-NEXT: vpshrdd $22, %xmm1, %xmm0, %xmm2 {%k1}
@@ -294,7 +294,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.vpshrd.d.128(<4 x i32>, <4 x i32>, i32,
define <8 x i32>@test_int_x86_avx512_mask_vpshrd_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshrd_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpshrdd $22, %ymm1, %ymm0, %ymm2 {%k1}
; CHECK-NEXT: vpshrdd $22, %ymm1, %ymm0, %ymm0
@@ -309,7 +309,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.vpshrd.d.256(<8 x i32>, <8 x i32>, i32,
define <2 x i64>@test_int_x86_avx512_mask_vpshrd_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshrd_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpshrdq $22, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vpshrdq $22, %xmm1, %xmm0, %xmm0
@@ -324,7 +324,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.vpshrd.q.128(<2 x i64>, <2 x i64>, i32,
define <4 x i64>@test_int_x86_avx512_mask_vpshrd_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshrd_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpshrdq $22, %ymm1, %ymm0, %ymm2 {%k1}
; CHECK-NEXT: vpshrdq $22, %ymm1, %ymm0, %ymm0
@@ -339,7 +339,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.vpshrd.q.256(<4 x i64>, <4 x i64>, i32,
define <8 x i16>@test_int_x86_avx512_mask_vpshrd_w_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshrd_w_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpshrdw $22, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vpshrdw $22, %xmm1, %xmm0, %xmm0
@@ -354,7 +354,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.vpshrd.w.128(<8 x i16>, <8 x i16>, i32,
define <16 x i16>@test_int_x86_avx512_mask_vpshrd_w_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x3, i16 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshrd_w_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpshrdw $22, %ymm1, %ymm0, %ymm2 {%k1}
; CHECK-NEXT: vpshrdw $22, %ymm1, %ymm0, %ymm0
@@ -372,7 +372,7 @@ declare <8 x i32> @llvm.x86.avx512.maskz.vpshrdv.d.256(<8 x i32>, <8 x i32>, <8
define <8 x i32>@test_int_x86_avx512_mask_vpshrdv_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32>* %x2p, <8 x i32> %x4, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshrdv_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa %ymm0, %ymm3
; CHECK-NEXT: vpshrdvd (%rdi), %ymm1, %ymm3 {%k1}
@@ -396,7 +396,7 @@ declare <4 x i32> @llvm.x86.avx512.maskz.vpshrdv.d.128(<4 x i32>, <4 x i32>, <4
define <4 x i32>@test_int_x86_avx512_mask_vpshrdv_d_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32>* %x2p, <4 x i32> %x4, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshrdv_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa %xmm0, %xmm3
; CHECK-NEXT: vpshrdvd (%rdi), %xmm1, %xmm3 {%k1}
@@ -420,7 +420,7 @@ declare <4 x i64> @llvm.x86.avx512.maskz.vpshrdv.q.256(<4 x i64>, <4 x i64>, <4
define <4 x i64>@test_int_x86_avx512_mask_vpshrdv_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64>* %x2p, <4 x i64> %x4, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshrdv_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa %ymm0, %ymm3
; CHECK-NEXT: vpshrdvq (%rdi), %ymm1, %ymm3 {%k1}
@@ -444,7 +444,7 @@ declare <2 x i64> @llvm.x86.avx512.maskz.vpshrdv.q.128(<2 x i64>, <2 x i64>, <2
define <2 x i64>@test_int_x86_avx512_mask_vpshrdv_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64>* %x2p, <2 x i64> %x4, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshrdv_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa %xmm0, %xmm3
; CHECK-NEXT: vpshrdvq (%rdi), %xmm1, %xmm3 {%k1}
@@ -468,7 +468,7 @@ declare <16 x i16> @llvm.x86.avx512.maskz.vpshrdv.w.256(<16 x i16>, <16 x i16>,
define <16 x i16>@test_int_x86_avx512_mask_vpshrdv_w_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16>* %x2p, <16 x i16> %x4, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshrdv_w_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa %ymm0, %ymm3
; CHECK-NEXT: vpshrdvw (%rdi), %ymm1, %ymm3 {%k1}
@@ -492,7 +492,7 @@ declare <8 x i16> @llvm.x86.avx512.maskz.vpshrdv.w.128(<8 x i16>, <8 x i16>, <8
define <8 x i16>@test_int_x86_avx512_mask_vpshrdv_w_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16>* %x2p, <8 x i16> %x4, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshrdv_w_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa %xmm0, %xmm3
; CHECK-NEXT: vpshrdvw (%rdi), %xmm1, %xmm3 {%k1}
@@ -516,7 +516,7 @@ declare <8 x i32> @llvm.x86.avx512.maskz.vpshldv.d.256(<8 x i32>, <8 x i32>, <8
define <8 x i32>@test_int_x86_avx512_mask_vpshldv_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32>* %x2p, <8 x i32> %x4, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshldv_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa %ymm0, %ymm3
; CHECK-NEXT: vpshldvd (%rdi), %ymm1, %ymm3 {%k1}
@@ -540,7 +540,7 @@ declare <4 x i32> @llvm.x86.avx512.maskz.vpshldv.d.128(<4 x i32>, <4 x i32>, <4
define <4 x i32>@test_int_x86_avx512_mask_vpshldv_d_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32>* %x2p, <4 x i32> %x4, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshldv_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa %xmm0, %xmm3
; CHECK-NEXT: vpshldvd (%rdi), %xmm1, %xmm3 {%k1}
@@ -564,7 +564,7 @@ declare <4 x i64> @llvm.x86.avx512.maskz.vpshldv.q.256(<4 x i64>, <4 x i64>, <4
define <4 x i64>@test_int_x86_avx512_mask_vpshldv_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64>* %x2p, <4 x i64> %x4, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshldv_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa %ymm0, %ymm3
; CHECK-NEXT: vpshldvq (%rdi), %ymm1, %ymm3 {%k1}
@@ -588,7 +588,7 @@ declare <2 x i64> @llvm.x86.avx512.maskz.vpshldv.q.128(<2 x i64>, <2 x i64>, <2
define <2 x i64>@test_int_x86_avx512_mask_vpshldv_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64>* %x2p, <2 x i64> %x4, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshldv_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa %xmm0, %xmm3
; CHECK-NEXT: vpshldvq (%rdi), %xmm1, %xmm3 {%k1}
@@ -612,7 +612,7 @@ declare <16 x i16> @llvm.x86.avx512.maskz.vpshldv.w.256(<16 x i16>, <16 x i16>,
define <16 x i16>@test_int_x86_avx512_mask_vpshldv_w_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16>* %x2p, <16 x i16> %x4, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshldv_w_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa %ymm0, %ymm3
; CHECK-NEXT: vpshldvw (%rdi), %ymm1, %ymm3 {%k1}
@@ -636,7 +636,7 @@ declare <8 x i16> @llvm.x86.avx512.maskz.vpshldv.w.128(<8 x i16>, <8 x i16>, <8
define <8 x i16>@test_int_x86_avx512_mask_vpshldv_w_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16>* %x2p, <8 x i16> %x4, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshldv_w_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa %xmm0, %xmm3
; CHECK-NEXT: vpshldvw (%rdi), %xmm1, %xmm3 {%k1}
diff --git a/test/CodeGen/X86/avx512vbmivl-intrinsics.ll b/test/CodeGen/X86/avx512vbmivl-intrinsics.ll
index 8b5a2dceb2f..bb15ed190dd 100644
--- a/test/CodeGen/X86/avx512vbmivl-intrinsics.ll
+++ b/test/CodeGen/X86/avx512vbmivl-intrinsics.ll
@@ -5,7 +5,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.permvar.qi.128(<16 x i8>, <16 x i8>, <16
define <16 x i8>@test_int_x86_avx512_mask_permvar_qi_128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_permvar_qi_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpermb %xmm0, %xmm1, %xmm3 ## encoding: [0x62,0xf2,0x75,0x08,0x8d,0xd8]
; CHECK-NEXT: vpermb %xmm0, %xmm1, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0x8d,0xd0]
@@ -25,7 +25,7 @@ declare <32 x i8> @llvm.x86.avx512.mask.permvar.qi.256(<32 x i8>, <32 x i8>, <32
define <32 x i8>@test_int_x86_avx512_mask_permvar_qi_256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2, i32 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_permvar_qi_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpermb %ymm0, %ymm1, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x75,0x29,0x8d,0xd0]
; CHECK-NEXT: vpermb %ymm0, %ymm1, %ymm3 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0xa9,0x8d,0xd8]
@@ -45,7 +45,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmultishift.qb.128(<16 x i8>, <16 x i8>,
define <16 x i8>@test_int_x86_avx512_mask_pmultishift_qb_128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmultishift_qb_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmultishiftqb %xmm1, %xmm0, %xmm3 ## encoding: [0x62,0xf2,0xfd,0x08,0x83,0xd9]
; CHECK-NEXT: vpmultishiftqb %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x83,0xd1]
@@ -65,7 +65,7 @@ declare <32 x i8> @llvm.x86.avx512.mask.pmultishift.qb.256(<32 x i8>, <32 x i8>,
define <32 x i8>@test_int_x86_avx512_mask_pmultishift_qb_256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2, i32 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmultishift_qb_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmultishiftqb %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x83,0xd1]
; CHECK-NEXT: vpmultishiftqb %ymm1, %ymm0, %ymm3 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xa9,0x83,0xd9]
@@ -85,7 +85,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.vpermi2var.qi.128(<16 x i8>, <16 x i8>,
define <16 x i8>@test_int_x86_avx512_mask_vpermi2var_qi_128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermi2var_qi_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vmovdqa %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xd9]
; CHECK-NEXT: vpermi2b %xmm2, %xmm0, %xmm3 ## encoding: [0x62,0xf2,0x7d,0x08,0x75,0xda]
@@ -107,7 +107,7 @@ declare <32 x i8> @llvm.x86.avx512.mask.vpermi2var.qi.256(<32 x i8>, <32 x i8>,
define <32 x i8>@test_int_x86_avx512_mask_vpermi2var_qi_256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2, i32 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermi2var_qi_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vmovdqa %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xd9]
; CHECK-NEXT: vpermi2b %ymm2, %ymm0, %ymm3 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x75,0xda]
@@ -129,7 +129,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.vpermt2var.qi.128(<16 x i8>, <16 x i8>,
define <16 x i8>@test_int_x86_avx512_mask_vpermt2var_qi_128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermt2var_qi_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vmovdqa %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xd9]
; CHECK-NEXT: vpermt2b %xmm2, %xmm0, %xmm3 ## encoding: [0x62,0xf2,0x7d,0x08,0x7d,0xda]
@@ -151,7 +151,7 @@ declare <32 x i8> @llvm.x86.avx512.mask.vpermt2var.qi.256(<32 x i8>, <32 x i8>,
define <32 x i8>@test_int_x86_avx512_mask_vpermt2var_qi_256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2, i32 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermt2var_qi_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vmovdqa %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xd9]
; CHECK-NEXT: vpermt2b %ymm2, %ymm0, %ymm3 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x7d,0xda]
@@ -173,7 +173,7 @@ declare <16 x i8> @llvm.x86.avx512.maskz.vpermt2var.qi.128(<16 x i8>, <16 x i8>,
define <16 x i8>@test_int_x86_avx512_maskz_vpermt2var_qi_128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpermt2var_qi_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpermi2b %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x89,0x75,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -185,7 +185,7 @@ declare <32 x i8> @llvm.x86.avx512.maskz.vpermt2var.qi.256(<32 x i8>, <32 x i8>,
define <32 x i8>@test_int_x86_avx512_maskz_vpermt2var_qi_256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2, i32 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpermt2var_qi_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpermi2b %ymm2, %ymm1, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0xa9,0x75,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
diff --git a/test/CodeGen/X86/avx512vl-arith.ll b/test/CodeGen/X86/avx512vl-arith.ll
index d0ba6e102ae..beaefe92aac 100755
--- a/test/CodeGen/X86/avx512vl-arith.ll
+++ b/test/CodeGen/X86/avx512vl-arith.ll
@@ -5,7 +5,7 @@
define <4 x i64> @vpaddq256_test(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
; CHECK-LABEL: vpaddq256_test:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddq %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%x = add <4 x i64> %i, %j
@@ -14,7 +14,7 @@ define <4 x i64> @vpaddq256_test(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
define <4 x i64> @vpaddq256_fold_test(<4 x i64> %i, <4 x i64>* %j) nounwind {
; CHECK-LABEL: vpaddq256_fold_test:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddq (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%tmp = load <4 x i64>, <4 x i64>* %j, align 4
@@ -24,7 +24,7 @@ define <4 x i64> @vpaddq256_fold_test(<4 x i64> %i, <4 x i64>* %j) nounwind {
define <4 x i64> @vpaddq256_broadcast_test(<4 x i64> %i) nounwind {
; CHECK-LABEL: vpaddq256_broadcast_test:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddq {{.*}}(%rip){1to4}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfd,0x38,0xd4,0x05,A,A,A,A]
; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI2_0-4, kind: reloc_riprel_4byte
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -34,7 +34,7 @@ define <4 x i64> @vpaddq256_broadcast_test(<4 x i64> %i) nounwind {
define <4 x i64> @vpaddq256_broadcast2_test(<4 x i64> %i, i64* %j.ptr) nounwind {
; CHECK-LABEL: vpaddq256_broadcast2_test:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddq (%rdi){1to4}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfd,0x38,0xd4,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%j = load i64, i64* %j.ptr
@@ -46,7 +46,7 @@ define <4 x i64> @vpaddq256_broadcast2_test(<4 x i64> %i, i64* %j.ptr) nounwind
define <8 x i32> @vpaddd256_test(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
; CHECK-LABEL: vpaddd256_test:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%x = add <8 x i32> %i, %j
@@ -55,7 +55,7 @@ define <8 x i32> @vpaddd256_test(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
define <8 x i32> @vpaddd256_fold_test(<8 x i32> %i, <8 x i32>* %j) nounwind {
; CHECK-LABEL: vpaddd256_fold_test:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddd (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%tmp = load <8 x i32>, <8 x i32>* %j, align 4
@@ -65,7 +65,7 @@ define <8 x i32> @vpaddd256_fold_test(<8 x i32> %i, <8 x i32>* %j) nounwind {
define <8 x i32> @vpaddd256_broadcast_test(<8 x i32> %i) nounwind {
; CHECK-LABEL: vpaddd256_broadcast_test:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddd {{.*}}(%rip){1to8}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7d,0x38,0xfe,0x05,A,A,A,A]
; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI6_0-4, kind: reloc_riprel_4byte
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -75,7 +75,7 @@ define <8 x i32> @vpaddd256_broadcast_test(<8 x i32> %i) nounwind {
define <8 x i32> @vpaddd256_mask_test(<8 x i32> %i, <8 x i32> %j, <8 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: vpaddd256_mask_test:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xef,0xdb]
; CHECK-NEXT: vpcmpneqd %ymm3, %ymm2, %k1 ## encoding: [0x62,0xf3,0x6d,0x28,0x1f,0xcb,0x04]
; CHECK-NEXT: vpaddd %ymm1, %ymm0, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xfe,0xc1]
@@ -88,7 +88,7 @@ define <8 x i32> @vpaddd256_mask_test(<8 x i32> %i, <8 x i32> %j, <8 x i32> %mas
define <8 x i32> @vpaddd256_maskz_test(<8 x i32> %i, <8 x i32> %j, <8 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: vpaddd256_maskz_test:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xef,0xdb]
; CHECK-NEXT: vpcmpneqd %ymm3, %ymm2, %k1 ## encoding: [0x62,0xf3,0x6d,0x28,0x1f,0xcb,0x04]
; CHECK-NEXT: vpaddd %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xfe,0xc1]
@@ -101,7 +101,7 @@ define <8 x i32> @vpaddd256_maskz_test(<8 x i32> %i, <8 x i32> %j, <8 x i32> %ma
define <8 x i32> @vpaddd256_mask_fold_test(<8 x i32> %i, <8 x i32>* %j.ptr, <8 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: vpaddd256_mask_fold_test:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vpcmpneqd %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0x75,0x28,0x1f,0xca,0x04]
; CHECK-NEXT: vpaddd (%rdi), %ymm0, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xfe,0x07]
@@ -115,7 +115,7 @@ define <8 x i32> @vpaddd256_mask_fold_test(<8 x i32> %i, <8 x i32>* %j.ptr, <8 x
define <8 x i32> @vpaddd256_mask_broadcast_test(<8 x i32> %i, <8 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: vpaddd256_mask_broadcast_test:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vpcmpneqd %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0x75,0x28,0x1f,0xca,0x04]
; CHECK-NEXT: vpaddd {{.*}}(%rip){1to8}, %ymm0, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x39,0xfe,0x05,A,A,A,A]
@@ -129,7 +129,7 @@ define <8 x i32> @vpaddd256_mask_broadcast_test(<8 x i32> %i, <8 x i32> %mask1)
define <8 x i32> @vpaddd256_maskz_fold_test(<8 x i32> %i, <8 x i32>* %j.ptr, <8 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: vpaddd256_maskz_fold_test:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vpcmpneqd %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0x75,0x28,0x1f,0xca,0x04]
; CHECK-NEXT: vpaddd (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xfe,0x07]
@@ -143,7 +143,7 @@ define <8 x i32> @vpaddd256_maskz_fold_test(<8 x i32> %i, <8 x i32>* %j.ptr, <8
define <8 x i32> @vpaddd256_maskz_broadcast_test(<8 x i32> %i, <8 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: vpaddd256_maskz_broadcast_test:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vpcmpneqd %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0x75,0x28,0x1f,0xca,0x04]
; CHECK-NEXT: vpaddd {{.*}}(%rip){1to8}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xb9,0xfe,0x05,A,A,A,A]
@@ -157,7 +157,7 @@ define <8 x i32> @vpaddd256_maskz_broadcast_test(<8 x i32> %i, <8 x i32> %mask1)
define <4 x i64> @vpsubq256_test(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
; CHECK-LABEL: vpsubq256_test:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsubq %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfb,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%x = sub <4 x i64> %i, %j
@@ -166,7 +166,7 @@ define <4 x i64> @vpsubq256_test(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
define <8 x i32> @vpsubd256_test(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
; CHECK-LABEL: vpsubd256_test:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsubd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfa,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%x = sub <8 x i32> %i, %j
@@ -175,7 +175,7 @@ define <8 x i32> @vpsubd256_test(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
define <8 x i32> @vpmulld256_test(<8 x i32> %i, <8 x i32> %j) {
; CHECK-LABEL: vpmulld256_test:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmulld %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x40,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%x = mul <8 x i32> %i, %j
@@ -184,7 +184,7 @@ define <8 x i32> @vpmulld256_test(<8 x i32> %i, <8 x i32> %j) {
define <4 x double> @test_vaddpd_256(<4 x double> %y, <4 x double> %x) {
; CHECK-LABEL: test_vaddpd_256:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vaddpd %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0x58,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
entry:
@@ -194,7 +194,7 @@ entry:
define <4 x double> @test_fold_vaddpd_256(<4 x double> %y) {
; CHECK-LABEL: test_fold_vaddpd_256:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vaddpd {{.*}}(%rip), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x58,0x05,A,A,A,A]
; CHECK-NEXT: ## fixup A - offset: 4, value: LCPI17_0-4, kind: reloc_riprel_4byte
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -205,7 +205,7 @@ entry:
define <8 x float> @test_broadcast_vaddpd_256(<8 x float> %a) nounwind {
; CHECK-LABEL: test_broadcast_vaddpd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vaddps {{.*}}(%rip){1to8}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7c,0x38,0x58,0x05,A,A,A,A]
; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI18_0-4, kind: reloc_riprel_4byte
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -215,7 +215,7 @@ define <8 x float> @test_broadcast_vaddpd_256(<8 x float> %a) nounwind {
define <8 x float> @test_mask_vaddps_256(<8 x float> %dst, <8 x float> %i, <8 x float> %j, <8 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: test_mask_vaddps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
; CHECK-NEXT: vpcmpneqd %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0x65,0x28,0x1f,0xcc,0x04]
; CHECK-NEXT: vaddps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0x58,0xc2]
@@ -228,7 +228,7 @@ define <8 x float> @test_mask_vaddps_256(<8 x float> %dst, <8 x float> %i, <8 x
define <8 x float> @test_mask_vmulps_256(<8 x float> %dst, <8 x float> %i, <8 x float> %j, <8 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: test_mask_vmulps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
; CHECK-NEXT: vpcmpneqd %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0x65,0x28,0x1f,0xcc,0x04]
; CHECK-NEXT: vmulps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0x59,0xc2]
@@ -241,7 +241,7 @@ define <8 x float> @test_mask_vmulps_256(<8 x float> %dst, <8 x float> %i, <8 x
define <8 x float> @test_mask_vminps_256(<8 x float> %dst, <8 x float> %i, <8 x float> %j, <8 x i32> %mask1)nounwind readnone {
; CHECK-LABEL: test_mask_vminps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
; CHECK-NEXT: vpcmpneqd %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0x65,0x28,0x1f,0xcc,0x04]
; CHECK-NEXT: vminps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0x5d,0xc2]
@@ -255,7 +255,7 @@ define <8 x float> @test_mask_vminps_256(<8 x float> %dst, <8 x float> %i, <8 x
define <8 x float> @test_mask_vmaxps_256(<8 x float> %dst, <8 x float> %i, <8 x float> %j, <8 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: test_mask_vmaxps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
; CHECK-NEXT: vpcmpneqd %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0x65,0x28,0x1f,0xcc,0x04]
; CHECK-NEXT: vmaxps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0x5f,0xc2]
@@ -269,7 +269,7 @@ define <8 x float> @test_mask_vmaxps_256(<8 x float> %dst, <8 x float> %i, <8 x
define <8 x float> @test_mask_vsubps_256(<8 x float> %dst, <8 x float> %i, <8 x float> %j, <8 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: test_mask_vsubps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
; CHECK-NEXT: vpcmpneqd %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0x65,0x28,0x1f,0xcc,0x04]
; CHECK-NEXT: vsubps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0x5c,0xc2]
@@ -282,7 +282,7 @@ define <8 x float> @test_mask_vsubps_256(<8 x float> %dst, <8 x float> %i, <8 x
define <8 x float> @test_mask_vdivps_256(<8 x float> %dst, <8 x float> %i, <8 x float> %j, <8 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: test_mask_vdivps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
; CHECK-NEXT: vpcmpneqd %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0x65,0x28,0x1f,0xcc,0x04]
; CHECK-NEXT: vdivps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0x5e,0xc2]
@@ -295,7 +295,7 @@ define <8 x float> @test_mask_vdivps_256(<8 x float> %dst, <8 x float> %i, <8 x
define <4 x double> @test_mask_vmulpd_256(<4 x double> %dst, <4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone {
; CHECK-LABEL: test_mask_vmulpd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
; CHECK-NEXT: vpcmpneqq %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x28,0x1f,0xcc,0x04]
; CHECK-NEXT: vmulpd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x59,0xc2]
@@ -308,7 +308,7 @@ define <4 x double> @test_mask_vmulpd_256(<4 x double> %dst, <4 x double> %i, <4
define <4 x double> @test_mask_vminpd_256(<4 x double> %dst, <4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone {
; CHECK-LABEL: test_mask_vminpd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
; CHECK-NEXT: vpcmpneqq %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x28,0x1f,0xcc,0x04]
; CHECK-NEXT: vminpd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x5d,0xc2]
@@ -322,7 +322,7 @@ define <4 x double> @test_mask_vminpd_256(<4 x double> %dst, <4 x double> %i, <4
define <4 x double> @test_mask_vmaxpd_256(<4 x double> %dst, <4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone {
; CHECK-LABEL: test_mask_vmaxpd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
; CHECK-NEXT: vpcmpneqq %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x28,0x1f,0xcc,0x04]
; CHECK-NEXT: vmaxpd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x5f,0xc2]
@@ -336,7 +336,7 @@ define <4 x double> @test_mask_vmaxpd_256(<4 x double> %dst, <4 x double> %i, <4
define <4 x double> @test_mask_vsubpd_256(<4 x double> %dst, <4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone {
; CHECK-LABEL: test_mask_vsubpd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
; CHECK-NEXT: vpcmpneqq %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x28,0x1f,0xcc,0x04]
; CHECK-NEXT: vsubpd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x5c,0xc2]
@@ -349,7 +349,7 @@ define <4 x double> @test_mask_vsubpd_256(<4 x double> %dst, <4 x double> %i, <4
define <4 x double> @test_mask_vdivpd_256(<4 x double> %dst, <4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone {
; CHECK-LABEL: test_mask_vdivpd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
; CHECK-NEXT: vpcmpneqq %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x28,0x1f,0xcc,0x04]
; CHECK-NEXT: vdivpd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x5e,0xc2]
@@ -362,7 +362,7 @@ define <4 x double> @test_mask_vdivpd_256(<4 x double> %dst, <4 x double> %i, <4
define <4 x double> @test_mask_vaddpd_256(<4 x double> %dst, <4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone {
; CHECK-LABEL: test_mask_vaddpd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
; CHECK-NEXT: vpcmpneqq %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x28,0x1f,0xcc,0x04]
; CHECK-NEXT: vaddpd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x58,0xc2]
@@ -375,7 +375,7 @@ define <4 x double> @test_mask_vaddpd_256(<4 x double> %dst, <4 x double> %i, <4
define <4 x double> @test_maskz_vaddpd_256(<4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone {
; CHECK-LABEL: test_maskz_vaddpd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xef,0xdb]
; CHECK-NEXT: vpcmpneqq %ymm3, %ymm2, %k1 ## encoding: [0x62,0xf3,0xed,0x28,0x1f,0xcb,0x04]
; CHECK-NEXT: vaddpd %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xa9,0x58,0xc1]
@@ -388,7 +388,7 @@ define <4 x double> @test_maskz_vaddpd_256(<4 x double> %i, <4 x double> %j, <4
define <4 x double> @test_mask_fold_vaddpd_256(<4 x double> %dst, <4 x double> %i, <4 x double>* %j, <4 x i64> %mask1) nounwind {
; CHECK-LABEL: test_mask_fold_vaddpd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xef,0xdb]
; CHECK-NEXT: vpcmpneqq %ymm3, %ymm2, %k1 ## encoding: [0x62,0xf3,0xed,0x28,0x1f,0xcb,0x04]
; CHECK-NEXT: vaddpd (%rdi), %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x58,0x07]
@@ -402,7 +402,7 @@ define <4 x double> @test_mask_fold_vaddpd_256(<4 x double> %dst, <4 x double> %
define <4 x double> @test_maskz_fold_vaddpd_256(<4 x double> %i, <4 x double>* %j, <4 x i64> %mask1) nounwind {
; CHECK-LABEL: test_maskz_fold_vaddpd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vpcmpneqq %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x28,0x1f,0xca,0x04]
; CHECK-NEXT: vaddpd (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xa9,0x58,0x07]
@@ -416,7 +416,7 @@ define <4 x double> @test_maskz_fold_vaddpd_256(<4 x double> %i, <4 x double>* %
define <4 x double> @test_broadcast2_vaddpd_256(<4 x double> %i, double* %j) nounwind {
; CHECK-LABEL: test_broadcast2_vaddpd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vaddpd (%rdi){1to4}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfd,0x38,0x58,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%tmp = load double, double* %j
@@ -428,7 +428,7 @@ define <4 x double> @test_broadcast2_vaddpd_256(<4 x double> %i, double* %j) nou
define <4 x double> @test_mask_broadcast_vaddpd_256(<4 x double> %dst, <4 x double> %i, double* %j, <4 x i64> %mask1) nounwind {
; CHECK-LABEL: test_mask_broadcast_vaddpd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xef,0xc0]
; CHECK-NEXT: vpcmpneqq %ymm0, %ymm2, %k1 ## encoding: [0x62,0xf3,0xed,0x28,0x1f,0xc8,0x04]
; CHECK-NEXT: vaddpd (%rdi){1to4}, %ymm1, %ymm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x39,0x58,0x0f]
@@ -445,7 +445,7 @@ define <4 x double> @test_mask_broadcast_vaddpd_256(<4 x double> %dst, <4 x doub
define <4 x double> @test_maskz_broadcast_vaddpd_256(<4 x double> %i, double* %j, <4 x i64> %mask1) nounwind {
; CHECK-LABEL: test_maskz_broadcast_vaddpd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vpcmpneqq %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x28,0x1f,0xca,0x04]
; CHECK-NEXT: vaddpd (%rdi){1to4}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xb9,0x58,0x07]
@@ -463,7 +463,7 @@ define <4 x double> @test_maskz_broadcast_vaddpd_256(<4 x double> %i, double* %j
define <2 x i64> @vpaddq128_test(<2 x i64> %i, <2 x i64> %j) nounwind readnone {
; CHECK-LABEL: vpaddq128_test:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%x = add <2 x i64> %i, %j
@@ -472,7 +472,7 @@ define <2 x i64> @vpaddq128_test(<2 x i64> %i, <2 x i64> %j) nounwind readnone {
define <2 x i64> @vpaddq128_fold_test(<2 x i64> %i, <2 x i64>* %j) nounwind {
; CHECK-LABEL: vpaddq128_fold_test:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddq (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%tmp = load <2 x i64>, <2 x i64>* %j, align 4
@@ -482,7 +482,7 @@ define <2 x i64> @vpaddq128_fold_test(<2 x i64> %i, <2 x i64>* %j) nounwind {
define <2 x i64> @vpaddq128_broadcast2_test(<2 x i64> %i, i64* %j) nounwind {
; CHECK-LABEL: vpaddq128_broadcast2_test:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddq (%rdi){1to2}, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfd,0x18,0xd4,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%tmp = load i64, i64* %j
@@ -494,7 +494,7 @@ define <2 x i64> @vpaddq128_broadcast2_test(<2 x i64> %i, i64* %j) nounwind {
define <4 x i32> @vpaddd128_test(<4 x i32> %i, <4 x i32> %j) nounwind readnone {
; CHECK-LABEL: vpaddd128_test:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%x = add <4 x i32> %i, %j
@@ -503,7 +503,7 @@ define <4 x i32> @vpaddd128_test(<4 x i32> %i, <4 x i32> %j) nounwind readnone {
define <4 x i32> @vpaddd128_fold_test(<4 x i32> %i, <4 x i32>* %j) nounwind {
; CHECK-LABEL: vpaddd128_fold_test:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddd (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%tmp = load <4 x i32>, <4 x i32>* %j, align 4
@@ -513,7 +513,7 @@ define <4 x i32> @vpaddd128_fold_test(<4 x i32> %i, <4 x i32>* %j) nounwind {
define <4 x i32> @vpaddd128_broadcast_test(<4 x i32> %i) nounwind {
; CHECK-LABEL: vpaddd128_broadcast_test:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7d,0x18,0xfe,0x05,A,A,A,A]
; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI42_0-4, kind: reloc_riprel_4byte
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -523,7 +523,7 @@ define <4 x i32> @vpaddd128_broadcast_test(<4 x i32> %i) nounwind {
define <4 x i32> @vpaddd128_mask_test(<4 x i32> %i, <4 x i32> %j, <4 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: vpaddd128_mask_test:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xef,0xdb]
; CHECK-NEXT: vpcmpneqd %xmm3, %xmm2, %k1 ## encoding: [0x62,0xf3,0x6d,0x08,0x1f,0xcb,0x04]
; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xfe,0xc1]
@@ -536,7 +536,7 @@ define <4 x i32> @vpaddd128_mask_test(<4 x i32> %i, <4 x i32> %j, <4 x i32> %mas
define <4 x i32> @vpaddd128_maskz_test(<4 x i32> %i, <4 x i32> %j, <4 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: vpaddd128_maskz_test:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xef,0xdb]
; CHECK-NEXT: vpcmpneqd %xmm3, %xmm2, %k1 ## encoding: [0x62,0xf3,0x6d,0x08,0x1f,0xcb,0x04]
; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xfe,0xc1]
@@ -549,7 +549,7 @@ define <4 x i32> @vpaddd128_maskz_test(<4 x i32> %i, <4 x i32> %j, <4 x i32> %ma
define <4 x i32> @vpaddd128_mask_fold_test(<4 x i32> %i, <4 x i32>* %j.ptr, <4 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: vpaddd128_mask_fold_test:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vpcmpneqd %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x08,0x1f,0xca,0x04]
; CHECK-NEXT: vpaddd (%rdi), %xmm0, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xfe,0x07]
@@ -563,7 +563,7 @@ define <4 x i32> @vpaddd128_mask_fold_test(<4 x i32> %i, <4 x i32>* %j.ptr, <4 x
define <4 x i32> @vpaddd128_mask_broadcast_test(<4 x i32> %i, <4 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: vpaddd128_mask_broadcast_test:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vpcmpneqd %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x08,0x1f,0xca,0x04]
; CHECK-NEXT: vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x19,0xfe,0x05,A,A,A,A]
@@ -577,7 +577,7 @@ define <4 x i32> @vpaddd128_mask_broadcast_test(<4 x i32> %i, <4 x i32> %mask1)
define <4 x i32> @vpaddd128_maskz_fold_test(<4 x i32> %i, <4 x i32>* %j.ptr, <4 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: vpaddd128_maskz_fold_test:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vpcmpneqd %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x08,0x1f,0xca,0x04]
; CHECK-NEXT: vpaddd (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xfe,0x07]
@@ -591,7 +591,7 @@ define <4 x i32> @vpaddd128_maskz_fold_test(<4 x i32> %i, <4 x i32>* %j.ptr, <4
define <4 x i32> @vpaddd128_maskz_broadcast_test(<4 x i32> %i, <4 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: vpaddd128_maskz_broadcast_test:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vpcmpneqd %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x08,0x1f,0xca,0x04]
; CHECK-NEXT: vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x99,0xfe,0x05,A,A,A,A]
@@ -605,7 +605,7 @@ define <4 x i32> @vpaddd128_maskz_broadcast_test(<4 x i32> %i, <4 x i32> %mask1)
define <2 x i64> @vpsubq128_test(<2 x i64> %i, <2 x i64> %j) nounwind readnone {
; CHECK-LABEL: vpsubq128_test:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsubq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfb,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%x = sub <2 x i64> %i, %j
@@ -614,7 +614,7 @@ define <2 x i64> @vpsubq128_test(<2 x i64> %i, <2 x i64> %j) nounwind readnone {
define <4 x i32> @vpsubd128_test(<4 x i32> %i, <4 x i32> %j) nounwind readnone {
; CHECK-LABEL: vpsubd128_test:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsubd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfa,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%x = sub <4 x i32> %i, %j
@@ -623,7 +623,7 @@ define <4 x i32> @vpsubd128_test(<4 x i32> %i, <4 x i32> %j) nounwind readnone {
define <4 x i32> @vpmulld128_test(<4 x i32> %i, <4 x i32> %j) {
; CHECK-LABEL: vpmulld128_test:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmulld %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x40,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%x = mul <4 x i32> %i, %j
@@ -632,7 +632,7 @@ define <4 x i32> @vpmulld128_test(<4 x i32> %i, <4 x i32> %j) {
define <2 x double> @test_vaddpd_128(<2 x double> %y, <2 x double> %x) {
; CHECK-LABEL: test_vaddpd_128:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vaddpd %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0x58,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
entry:
@@ -642,7 +642,7 @@ entry:
define <2 x double> @test_fold_vaddpd_128(<2 x double> %y) {
; CHECK-LABEL: test_fold_vaddpd_128:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vaddpd {{.*}}(%rip), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0x05,A,A,A,A]
; CHECK-NEXT: ## fixup A - offset: 4, value: LCPI53_0-4, kind: reloc_riprel_4byte
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -653,7 +653,7 @@ entry:
define <4 x float> @test_broadcast_vaddpd_128(<4 x float> %a) nounwind {
; CHECK-LABEL: test_broadcast_vaddpd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vaddps {{.*}}(%rip){1to4}, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7c,0x18,0x58,0x05,A,A,A,A]
; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI54_0-4, kind: reloc_riprel_4byte
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -663,7 +663,7 @@ define <4 x float> @test_broadcast_vaddpd_128(<4 x float> %a) nounwind {
define <4 x float> @test_mask_vaddps_128(<4 x float> %dst, <4 x float> %i, <4 x float> %j, <4 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: test_mask_vaddps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
; CHECK-NEXT: vpcmpneqd %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0x65,0x08,0x1f,0xcc,0x04]
; CHECK-NEXT: vaddps %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x09,0x58,0xc2]
@@ -676,7 +676,7 @@ define <4 x float> @test_mask_vaddps_128(<4 x float> %dst, <4 x float> %i, <4 x
define <4 x float> @test_mask_vmulps_128(<4 x float> %dst, <4 x float> %i, <4 x float> %j, <4 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: test_mask_vmulps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
; CHECK-NEXT: vpcmpneqd %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0x65,0x08,0x1f,0xcc,0x04]
; CHECK-NEXT: vmulps %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x09,0x59,0xc2]
@@ -689,7 +689,7 @@ define <4 x float> @test_mask_vmulps_128(<4 x float> %dst, <4 x float> %i, <4 x
define <4 x float> @test_mask_vminps_128(<4 x float> %dst, <4 x float> %i, <4 x float> %j, <4 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: test_mask_vminps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
; CHECK-NEXT: vpcmpneqd %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0x65,0x08,0x1f,0xcc,0x04]
; CHECK-NEXT: vminps %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x09,0x5d,0xc2]
@@ -703,7 +703,7 @@ define <4 x float> @test_mask_vminps_128(<4 x float> %dst, <4 x float> %i, <4 x
define <4 x float> @test_mask_vmaxps_128(<4 x float> %dst, <4 x float> %i, <4 x float> %j, <4 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: test_mask_vmaxps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
; CHECK-NEXT: vpcmpneqd %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0x65,0x08,0x1f,0xcc,0x04]
; CHECK-NEXT: vmaxps %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x09,0x5f,0xc2]
@@ -717,7 +717,7 @@ define <4 x float> @test_mask_vmaxps_128(<4 x float> %dst, <4 x float> %i, <4 x
define <4 x float> @test_mask_vsubps_128(<4 x float> %dst, <4 x float> %i, <4 x float> %j, <4 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: test_mask_vsubps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
; CHECK-NEXT: vpcmpneqd %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0x65,0x08,0x1f,0xcc,0x04]
; CHECK-NEXT: vsubps %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x09,0x5c,0xc2]
@@ -731,7 +731,7 @@ define <4 x float> @test_mask_vsubps_128(<4 x float> %dst, <4 x float> %i, <4 x
define <4 x float> @test_mask_vdivps_128(<4 x float> %dst, <4 x float> %i, <4 x float> %j, <4 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: test_mask_vdivps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
; CHECK-NEXT: vpcmpneqd %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0x65,0x08,0x1f,0xcc,0x04]
; CHECK-NEXT: vdivps %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x09,0x5e,0xc2]
@@ -744,7 +744,7 @@ define <4 x float> @test_mask_vdivps_128(<4 x float> %dst, <4 x float> %i, <4 x
define <2 x double> @test_mask_vmulpd_128(<2 x double> %dst, <2 x double> %i, <2 x double> %j, <2 x i64> %mask1) nounwind readnone {
; CHECK-LABEL: test_mask_vmulpd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
; CHECK-NEXT: vpcmpneqq %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x08,0x1f,0xcc,0x04]
; CHECK-NEXT: vmulpd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x59,0xc2]
@@ -757,7 +757,7 @@ define <2 x double> @test_mask_vmulpd_128(<2 x double> %dst, <2 x double> %i, <2
define <2 x double> @test_mask_vminpd_128(<2 x double> %dst, <2 x double> %i, <2 x double> %j, <2 x i64> %mask1) nounwind readnone {
; CHECK-LABEL: test_mask_vminpd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
; CHECK-NEXT: vpcmpneqq %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x08,0x1f,0xcc,0x04]
; CHECK-NEXT: vminpd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x5d,0xc2]
@@ -771,7 +771,7 @@ define <2 x double> @test_mask_vminpd_128(<2 x double> %dst, <2 x double> %i, <2
define <2 x double> @test_mask_vmaxpd_128(<2 x double> %dst, <2 x double> %i, <2 x double> %j, <2 x i64> %mask1) nounwind readnone {
; CHECK-LABEL: test_mask_vmaxpd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
; CHECK-NEXT: vpcmpneqq %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x08,0x1f,0xcc,0x04]
; CHECK-NEXT: vmaxpd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x5f,0xc2]
@@ -785,7 +785,7 @@ define <2 x double> @test_mask_vmaxpd_128(<2 x double> %dst, <2 x double> %i, <2
define <2 x double> @test_mask_vsubpd_128(<2 x double> %dst, <2 x double> %i, <2 x double> %j, <2 x i64> %mask1) nounwind readnone {
; CHECK-LABEL: test_mask_vsubpd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
; CHECK-NEXT: vpcmpneqq %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x08,0x1f,0xcc,0x04]
; CHECK-NEXT: vsubpd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x5c,0xc2]
@@ -798,7 +798,7 @@ define <2 x double> @test_mask_vsubpd_128(<2 x double> %dst, <2 x double> %i, <2
define <2 x double> @test_mask_vdivpd_128(<2 x double> %dst, <2 x double> %i, <2 x double> %j, <2 x i64> %mask1) nounwind readnone {
; CHECK-LABEL: test_mask_vdivpd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
; CHECK-NEXT: vpcmpneqq %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x08,0x1f,0xcc,0x04]
; CHECK-NEXT: vdivpd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x5e,0xc2]
@@ -811,7 +811,7 @@ define <2 x double> @test_mask_vdivpd_128(<2 x double> %dst, <2 x double> %i, <2
define <2 x double> @test_mask_vaddpd_128(<2 x double> %dst, <2 x double> %i, <2 x double> %j, <2 x i64> %mask1) nounwind readnone {
; CHECK-LABEL: test_mask_vaddpd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
; CHECK-NEXT: vpcmpneqq %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x08,0x1f,0xcc,0x04]
; CHECK-NEXT: vaddpd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x58,0xc2]
@@ -824,7 +824,7 @@ define <2 x double> @test_mask_vaddpd_128(<2 x double> %dst, <2 x double> %i, <2
define <2 x double> @test_maskz_vaddpd_128(<2 x double> %i, <2 x double> %j,
; CHECK-LABEL: test_maskz_vaddpd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xef,0xdb]
; CHECK-NEXT: vpcmpneqq %xmm3, %xmm2, %k1 ## encoding: [0x62,0xf3,0xed,0x08,0x1f,0xcb,0x04]
; CHECK-NEXT: vaddpd %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x89,0x58,0xc1]
@@ -838,7 +838,7 @@ define <2 x double> @test_maskz_vaddpd_128(<2 x double> %i, <2 x double> %j,
define <2 x double> @test_mask_fold_vaddpd_128(<2 x double> %dst, <2 x double> %i, <2 x double>* %j, <2 x i64> %mask1) nounwind {
; CHECK-LABEL: test_mask_fold_vaddpd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xef,0xdb]
; CHECK-NEXT: vpcmpneqq %xmm3, %xmm2, %k1 ## encoding: [0x62,0xf3,0xed,0x08,0x1f,0xcb,0x04]
; CHECK-NEXT: vaddpd (%rdi), %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x58,0x07]
@@ -852,7 +852,7 @@ define <2 x double> @test_mask_fold_vaddpd_128(<2 x double> %dst, <2 x double> %
define <2 x double> @test_maskz_fold_vaddpd_128(<2 x double> %i, <2 x double>* %j, <2 x i64> %mask1) nounwind {
; CHECK-LABEL: test_maskz_fold_vaddpd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vpcmpneqq %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x08,0x1f,0xca,0x04]
; CHECK-NEXT: vaddpd (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x89,0x58,0x07]
@@ -866,7 +866,7 @@ define <2 x double> @test_maskz_fold_vaddpd_128(<2 x double> %i, <2 x double>* %
define <2 x double> @test_broadcast2_vaddpd_128(<2 x double> %i, double* %j) nounwind {
; CHECK-LABEL: test_broadcast2_vaddpd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vaddpd (%rdi){1to2}, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfd,0x18,0x58,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%tmp = load double, double* %j
@@ -878,7 +878,7 @@ define <2 x double> @test_broadcast2_vaddpd_128(<2 x double> %i, double* %j) nou
define <2 x double> @test_mask_broadcast_vaddpd_128(<2 x double> %dst, <2 x double> %i, double* %j, <2 x i64> %mask1) nounwind {
; CHECK-LABEL: test_mask_broadcast_vaddpd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xef,0xc0]
; CHECK-NEXT: vpcmpneqq %xmm0, %xmm2, %k1 ## encoding: [0x62,0xf3,0xed,0x08,0x1f,0xc8,0x04]
; CHECK-NEXT: vaddpd (%rdi){1to2}, %xmm1, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x19,0x58,0x0f]
@@ -895,7 +895,7 @@ define <2 x double> @test_mask_broadcast_vaddpd_128(<2 x double> %dst, <2 x doub
define <2 x double> @test_maskz_broadcast_vaddpd_128(<2 x double> %i, double* %j, <2 x i64> %mask1) nounwind {
; CHECK-LABEL: test_maskz_broadcast_vaddpd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vpcmpneqq %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x08,0x1f,0xca,0x04]
; CHECK-NEXT: vaddpd (%rdi){1to2}, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x99,0x58,0x07]
diff --git a/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll b/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll
index c58c704ae26..fdd6f712645 100644
--- a/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll
@@ -6,12 +6,12 @@
define <8 x float> @test_mm256_shuffle_f32x4(<8 x float> %__A, <8 x float> %__B) {
; X32-LABEL: test_mm256_shuffle_f32x4:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_shuffle_f32x4:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
; X64-NEXT: retq
entry:
@@ -21,14 +21,14 @@ entry:
define <8 x float> @test_mm256_mask_shuffle_f32x4(<8 x float> %__W, i8 zeroext %__U, <8 x float> %__A, <8 x float> %__B) {
; X32-LABEL: test_mm256_mask_shuffle_f32x4:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vshuff32x4 {{.*#+}} ymm0 {%k1} = ymm1[4,5,6,7],ymm2[4,5,6,7]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_mask_shuffle_f32x4:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vshuff32x4 {{.*#+}} ymm0 {%k1} = ymm1[4,5,6,7],ymm2[4,5,6,7]
; X64-NEXT: retq
@@ -41,14 +41,14 @@ entry:
define <8 x float> @test_mm256_maskz_shuffle_f32x4(i8 zeroext %__U, <8 x float> %__A, <8 x float> %__B) {
; X32-LABEL: test_mm256_maskz_shuffle_f32x4:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vshuff32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],ymm1[4,5,6,7]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_maskz_shuffle_f32x4:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vshuff32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],ymm1[4,5,6,7]
; X64-NEXT: retq
@@ -61,12 +61,12 @@ entry:
define <4 x double> @test_mm256_shuffle_f64x2(<4 x double> %__A, <4 x double> %__B) {
; X32-LABEL: test_mm256_shuffle_f64x2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_shuffle_f64x2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
; X64-NEXT: retq
entry:
@@ -76,14 +76,14 @@ entry:
define <4 x double> @test_mm256_mask_shuffle_f64x2(<4 x double> %__W, i8 zeroext %__U, <4 x double> %__A, <4 x double> %__B) {
; X32-LABEL: test_mm256_mask_shuffle_f64x2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vshuff64x2 {{.*#+}} ymm0 {%k1} = ymm1[2,3],ymm2[2,3]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_mask_shuffle_f64x2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vshuff64x2 {{.*#+}} ymm0 {%k1} = ymm1[2,3],ymm2[2,3]
; X64-NEXT: retq
@@ -97,14 +97,14 @@ entry:
define <4 x double> @test_mm256_maskz_shuffle_f64x2(i8 zeroext %__U, <4 x double> %__A, <4 x double> %__B) {
; X32-LABEL: test_mm256_maskz_shuffle_f64x2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vshuff64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],ymm1[2,3]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_maskz_shuffle_f64x2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vshuff64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],ymm1[2,3]
; X64-NEXT: retq
@@ -118,12 +118,12 @@ entry:
define <4 x i64> @test_mm256_shuffle_i32x4(<4 x i64> %__A, <4 x i64> %__B) {
; X32-LABEL: test_mm256_shuffle_i32x4:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_shuffle_i32x4:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
; X64-NEXT: retq
entry:
@@ -133,14 +133,14 @@ entry:
define <4 x i64> @test_mm256_mask_shuffle_i32x4(<4 x i64> %__W, i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
; X32-LABEL: test_mm256_mask_shuffle_i32x4:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vshufi32x4 {{.*#+}} ymm0 {%k1} = ymm1[4,5,6,7],ymm2[4,5,6,7]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_mask_shuffle_i32x4:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vshufi32x4 {{.*#+}} ymm0 {%k1} = ymm1[4,5,6,7],ymm2[4,5,6,7]
; X64-NEXT: retq
@@ -156,14 +156,14 @@ entry:
define <4 x i64> @test_mm256_maskz_shuffle_i32x4(i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
; X32-LABEL: test_mm256_maskz_shuffle_i32x4:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vshufi32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],ymm1[4,5,6,7]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_maskz_shuffle_i32x4:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vshufi32x4 {{.*#+}} ymm0 {%k1} {z} = ymm0[4,5,6,7],ymm1[4,5,6,7]
; X64-NEXT: retq
@@ -178,12 +178,12 @@ entry:
define <4 x i64> @test_mm256_shuffle_i64x2(<4 x i64> %__A, <4 x i64> %__B) {
; X32-LABEL: test_mm256_shuffle_i64x2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_shuffle_i64x2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
; X64-NEXT: retq
entry:
@@ -193,14 +193,14 @@ entry:
define <4 x i64> @test_mm256_mask_shuffle_i64x2(<4 x i64> %__W, i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
; X32-LABEL: test_mm256_mask_shuffle_i64x2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vshufi64x2 {{.*#+}} ymm0 {%k1} = ymm1[2,3],ymm2[2,3]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_mask_shuffle_i64x2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vshufi64x2 {{.*#+}} ymm0 {%k1} = ymm1[2,3],ymm2[2,3]
; X64-NEXT: retq
@@ -214,14 +214,14 @@ entry:
define <4 x i64> @test_mm256_maskz_shuffle_i64x2(i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
; X32-LABEL: test_mm256_maskz_shuffle_i64x2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vshufi64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],ymm1[2,3]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_maskz_shuffle_i64x2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vshufi64x2 {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3],ymm1[2,3]
; X64-NEXT: retq
@@ -235,14 +235,14 @@ entry:
define zeroext i8 @test_mm_test_epi32_mask(<2 x i64> %__A, <2 x i64> %__B) {
; X32-LABEL: test_mm_test_epi32_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vptestmd %xmm0, %xmm1, %k0
; X32-NEXT: kmovw %k0, %eax
; X32-NEXT: movzbl %al, %eax
; X32-NEXT: retl
;
; X64-LABEL: test_mm_test_epi32_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vptestmd %xmm0, %xmm1, %k0
; X64-NEXT: kmovw %k0, %eax
; X64-NEXT: movzbl %al, %eax
@@ -258,7 +258,7 @@ entry:
define zeroext i8 @test_mm_mask_test_epi32_mask(i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
; X32-LABEL: test_mm_mask_test_epi32_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vptestmd %xmm0, %xmm1, %k0 {%k1}
@@ -267,7 +267,7 @@ define zeroext i8 @test_mm_mask_test_epi32_mask(i8 zeroext %__U, <2 x i64> %__A,
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mask_test_epi32_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vptestmd %xmm0, %xmm1, %k0 {%k1}
; X64-NEXT: kmovw %k0, %eax
@@ -287,7 +287,7 @@ entry:
define zeroext i8 @test_mm256_test_epi32_mask(<4 x i64> %__A, <4 x i64> %__B) {
; X32-LABEL: test_mm256_test_epi32_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vptestmd %ymm0, %ymm1, %k0
; X32-NEXT: kmovw %k0, %eax
; X32-NEXT: movzbl %al, %eax
@@ -295,7 +295,7 @@ define zeroext i8 @test_mm256_test_epi32_mask(<4 x i64> %__A, <4 x i64> %__B) {
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_test_epi32_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vptestmd %ymm0, %ymm1, %k0
; X64-NEXT: kmovw %k0, %eax
; X64-NEXT: movzbl %al, %eax
@@ -311,7 +311,7 @@ entry:
define zeroext i8 @test_mm256_mask_test_epi32_mask(i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
; X32-LABEL: test_mm256_mask_test_epi32_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vptestmd %ymm0, %ymm1, %k0 {%k1}
@@ -321,7 +321,7 @@ define zeroext i8 @test_mm256_mask_test_epi32_mask(i8 zeroext %__U, <4 x i64> %_
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_mask_test_epi32_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vptestmd %ymm0, %ymm1, %k0 {%k1}
; X64-NEXT: kmovw %k0, %eax
@@ -340,14 +340,14 @@ entry:
define zeroext i8 @test_mm_test_epi64_mask(<2 x i64> %__A, <2 x i64> %__B) {
; X32-LABEL: test_mm_test_epi64_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vptestmq %xmm0, %xmm1, %k0
; X32-NEXT: kmovw %k0, %eax
; X32-NEXT: movzbl %al, %eax
; X32-NEXT: retl
;
; X64-LABEL: test_mm_test_epi64_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vptestmq %xmm0, %xmm1, %k0
; X64-NEXT: kmovw %k0, %eax
; X64-NEXT: movzbl %al, %eax
@@ -362,7 +362,7 @@ entry:
define zeroext i8 @test_mm_mask_test_epi64_mask(i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
; X32-LABEL: test_mm_mask_test_epi64_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vptestmq %xmm0, %xmm1, %k0 {%k1}
@@ -371,7 +371,7 @@ define zeroext i8 @test_mm_mask_test_epi64_mask(i8 zeroext %__U, <2 x i64> %__A,
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mask_test_epi64_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vptestmq %xmm0, %xmm1, %k0 {%k1}
; X64-NEXT: kmovw %k0, %eax
@@ -390,7 +390,7 @@ entry:
define zeroext i8 @test_mm256_test_epi64_mask(<4 x i64> %__A, <4 x i64> %__B) {
; X32-LABEL: test_mm256_test_epi64_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vptestmq %ymm0, %ymm1, %k0
; X32-NEXT: kmovw %k0, %eax
; X32-NEXT: movzbl %al, %eax
@@ -398,7 +398,7 @@ define zeroext i8 @test_mm256_test_epi64_mask(<4 x i64> %__A, <4 x i64> %__B) {
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_test_epi64_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vptestmq %ymm0, %ymm1, %k0
; X64-NEXT: kmovw %k0, %eax
; X64-NEXT: movzbl %al, %eax
@@ -414,7 +414,7 @@ entry:
define zeroext i8 @test_mm256_mask_test_epi64_mask(i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
; X32-LABEL: test_mm256_mask_test_epi64_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vptestmq %ymm0, %ymm1, %k0 {%k1}
@@ -424,7 +424,7 @@ define zeroext i8 @test_mm256_mask_test_epi64_mask(i8 zeroext %__U, <4 x i64> %_
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_mask_test_epi64_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vptestmq %ymm0, %ymm1, %k0 {%k1}
; X64-NEXT: kmovw %k0, %eax
@@ -444,14 +444,14 @@ entry:
define zeroext i8 @test_mm_testn_epi32_mask(<2 x i64> %__A, <2 x i64> %__B) {
; X32-LABEL: test_mm_testn_epi32_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vptestnmd %xmm0, %xmm1, %k0
; X32-NEXT: kmovw %k0, %eax
; X32-NEXT: movzbl %al, %eax
; X32-NEXT: retl
;
; X64-LABEL: test_mm_testn_epi32_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vptestnmd %xmm0, %xmm1, %k0
; X64-NEXT: kmovw %k0, %eax
; X64-NEXT: movzbl %al, %eax
@@ -467,7 +467,7 @@ entry:
define zeroext i8 @test_mm_mask_testn_epi32_mask(i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
; X32-LABEL: test_mm_mask_testn_epi32_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vptestnmd %xmm0, %xmm1, %k0 {%k1}
@@ -476,7 +476,7 @@ define zeroext i8 @test_mm_mask_testn_epi32_mask(i8 zeroext %__U, <2 x i64> %__A
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mask_testn_epi32_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vptestnmd %xmm0, %xmm1, %k0 {%k1}
; X64-NEXT: kmovw %k0, %eax
@@ -496,7 +496,7 @@ entry:
define zeroext i8 @test_mm256_testn_epi32_mask(<4 x i64> %__A, <4 x i64> %__B) {
; X32-LABEL: test_mm256_testn_epi32_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vptestnmd %ymm0, %ymm1, %k0
; X32-NEXT: kmovw %k0, %eax
; X32-NEXT: movzbl %al, %eax
@@ -504,7 +504,7 @@ define zeroext i8 @test_mm256_testn_epi32_mask(<4 x i64> %__A, <4 x i64> %__B) {
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_testn_epi32_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vptestnmd %ymm0, %ymm1, %k0
; X64-NEXT: kmovw %k0, %eax
; X64-NEXT: movzbl %al, %eax
@@ -520,7 +520,7 @@ entry:
define zeroext i8 @test_mm256_mask_testn_epi32_mask(i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
; X32-LABEL: test_mm256_mask_testn_epi32_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vptestnmd %ymm0, %ymm1, %k0 {%k1}
@@ -530,7 +530,7 @@ define zeroext i8 @test_mm256_mask_testn_epi32_mask(i8 zeroext %__U, <4 x i64> %
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_mask_testn_epi32_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vptestnmd %ymm0, %ymm1, %k0 {%k1}
; X64-NEXT: kmovw %k0, %eax
@@ -549,14 +549,14 @@ entry:
define zeroext i8 @test_mm_testn_epi64_mask(<2 x i64> %__A, <2 x i64> %__B) {
; X32-LABEL: test_mm_testn_epi64_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vptestnmq %xmm0, %xmm1, %k0
; X32-NEXT: kmovw %k0, %eax
; X32-NEXT: movzbl %al, %eax
; X32-NEXT: retl
;
; X64-LABEL: test_mm_testn_epi64_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vptestnmq %xmm0, %xmm1, %k0
; X64-NEXT: kmovw %k0, %eax
; X64-NEXT: movzbl %al, %eax
@@ -571,7 +571,7 @@ entry:
define zeroext i8 @test_mm_mask_testn_epi64_mask(i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
; X32-LABEL: test_mm_mask_testn_epi64_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vptestnmq %xmm0, %xmm1, %k0 {%k1}
@@ -580,7 +580,7 @@ define zeroext i8 @test_mm_mask_testn_epi64_mask(i8 zeroext %__U, <2 x i64> %__A
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mask_testn_epi64_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vptestnmq %xmm0, %xmm1, %k0 {%k1}
; X64-NEXT: kmovw %k0, %eax
@@ -599,7 +599,7 @@ entry:
define zeroext i8 @test_mm256_testn_epi64_mask(<4 x i64> %__A, <4 x i64> %__B) {
; X32-LABEL: test_mm256_testn_epi64_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vptestnmq %ymm0, %ymm1, %k0
; X32-NEXT: kmovw %k0, %eax
; X32-NEXT: movzbl %al, %eax
@@ -607,7 +607,7 @@ define zeroext i8 @test_mm256_testn_epi64_mask(<4 x i64> %__A, <4 x i64> %__B) {
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_testn_epi64_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vptestnmq %ymm0, %ymm1, %k0
; X64-NEXT: kmovw %k0, %eax
; X64-NEXT: movzbl %al, %eax
@@ -623,7 +623,7 @@ entry:
define zeroext i8 @test_mm256_mask_testn_epi64_mask(i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
; X32-LABEL: test_mm256_mask_testn_epi64_mask:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vptestnmq %ymm0, %ymm1, %k0 {%k1}
@@ -633,7 +633,7 @@ define zeroext i8 @test_mm256_mask_testn_epi64_mask(i8 zeroext %__U, <4 x i64> %
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_mask_testn_epi64_mask:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vptestnmq %ymm0, %ymm1, %k0 {%k1}
; X64-NEXT: kmovw %k0, %eax
@@ -653,14 +653,14 @@ entry:
define <2 x i64> @test_mm_mask_set1_epi32(<2 x i64> %__O, i8 zeroext %__M) {
; X32-LABEL: test_mm_mask_set1_epi32:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vpbroadcastd {{\.LCPI.*}}, %xmm0 {%k1}
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mask_set1_epi32:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vpbroadcastd {{.*}}(%rip), %xmm0 {%k1}
; X64-NEXT: retq
@@ -675,14 +675,14 @@ entry:
define <2 x i64> @test_mm_maskz_set1_epi32(i8 zeroext %__M) {
; X32-LABEL: test_mm_maskz_set1_epi32:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vpbroadcastd {{\.LCPI.*}}, %xmm0 {%k1} {z}
; X32-NEXT: retl
;
; X64-LABEL: test_mm_maskz_set1_epi32:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vpbroadcastd {{.*}}(%rip), %xmm0 {%k1} {z}
; X64-NEXT: retq
@@ -696,14 +696,14 @@ entry:
define <4 x i64> @test_mm256_mask_set1_epi32(<4 x i64> %__O, i8 zeroext %__M) {
; X32-LABEL: test_mm256_mask_set1_epi32:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vpbroadcastd {{\.LCPI.*}}, %ymm0 {%k1}
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_mask_set1_epi32:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vpbroadcastd {{.*}}(%rip), %ymm0 {%k1}
; X64-NEXT: retq
@@ -717,14 +717,14 @@ entry:
define <4 x i64> @test_mm256_maskz_set1_epi32(i8 zeroext %__M) {
; X32-LABEL: test_mm256_maskz_set1_epi32:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vpbroadcastd {{\.LCPI.*}}, %ymm0 {%k1} {z}
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_maskz_set1_epi32:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vpbroadcastd {{.*}}(%rip), %ymm0 {%k1} {z}
; X64-NEXT: retq
@@ -737,7 +737,7 @@ entry:
define <2 x i64> @test_mm_mask_set1_epi64(<2 x i64> %__O, i8 zeroext %__M, i64 %__A) {
; X32-LABEL: test_mm_mask_set1_epi64:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movb {{[0-9]+}}(%esp), %cl
; X32-NEXT: vmovd %eax, %xmm1
@@ -747,7 +747,7 @@ define <2 x i64> @test_mm_mask_set1_epi64(<2 x i64> %__O, i8 zeroext %__M, i64 %
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mask_set1_epi64:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vmovd %esi, %xmm1
; X64-NEXT: vpbroadcastb %xmm1, %xmm1
; X64-NEXT: kmovw %edi, %k1
@@ -766,7 +766,7 @@ entry:
define <2 x i64> @test_mm_maskz_set1_epi64(i8 zeroext %__M, i64 %__A) {
; X32-LABEL: test_mm_maskz_set1_epi64:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movb {{[0-9]+}}(%esp), %cl
; X32-NEXT: vmovd %eax, %xmm0
@@ -776,7 +776,7 @@ define <2 x i64> @test_mm_maskz_set1_epi64(i8 zeroext %__M, i64 %__A) {
; X32-NEXT: retl
;
; X64-LABEL: test_mm_maskz_set1_epi64:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vmovd %esi, %xmm0
; X64-NEXT: vpbroadcastb %xmm0, %xmm0
; X64-NEXT: kmovw %edi, %k1
@@ -796,7 +796,7 @@ entry:
define <4 x i64> @test_mm256_mask_set1_epi64(<4 x i64> %__O, i8 zeroext %__M, i64 %__A) {
; X32-LABEL: test_mm256_mask_set1_epi64:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movb {{[0-9]+}}(%esp), %dl
@@ -810,7 +810,7 @@ define <4 x i64> @test_mm256_mask_set1_epi64(<4 x i64> %__O, i8 zeroext %__M, i6
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_mask_set1_epi64:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vpbroadcastq %rsi, %ymm0 {%k1}
; X64-NEXT: retq
@@ -825,7 +825,7 @@ entry:
define <4 x i64> @test_mm256_maskz_set1_epi64(i8 zeroext %__M, i64 %__A) {
; X32-LABEL: test_mm256_maskz_set1_epi64:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movb {{[0-9]+}}(%esp), %dl
@@ -839,7 +839,7 @@ define <4 x i64> @test_mm256_maskz_set1_epi64(i8 zeroext %__M, i64 %__A) {
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_maskz_set1_epi64:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vpbroadcastq %rsi, %ymm0 {%k1} {z}
; X64-NEXT: retq
@@ -854,12 +854,12 @@ entry:
define <2 x i64> @test_mm_broadcastd_epi32(<2 x i64> %a0) {
; X32-LABEL: test_mm_broadcastd_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vbroadcastss %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_broadcastd_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcastss %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -870,7 +870,7 @@ define <2 x i64> @test_mm_broadcastd_epi32(<2 x i64> %a0) {
define <2 x i64> @test_mm_mask_broadcastd_epi32(<2 x i64> %a0, i8 %a1, <2 x i64> %a2) {
; X32-LABEL: test_mm_mask_broadcastd_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
@@ -883,7 +883,7 @@ define <2 x i64> @test_mm_mask_broadcastd_epi32(<2 x i64> %a0, i8 %a1, <2 x i64>
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mask_broadcastd_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andb $15, %dil
; X64-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
@@ -902,7 +902,7 @@ define <2 x i64> @test_mm_mask_broadcastd_epi32(<2 x i64> %a0, i8 %a1, <2 x i64>
define <2 x i64> @test_mm_maskz_broadcastd_epi32(i8 %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_maskz_broadcastd_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
@@ -915,7 +915,7 @@ define <2 x i64> @test_mm_maskz_broadcastd_epi32(i8 %a0, <2 x i64> %a1) {
; X32-NEXT: retl
;
; X64-LABEL: test_mm_maskz_broadcastd_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andb $15, %dil
; X64-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
@@ -933,12 +933,12 @@ define <2 x i64> @test_mm_maskz_broadcastd_epi32(i8 %a0, <2 x i64> %a1) {
define <4 x i64> @test_mm256_broadcastd_epi32(<2 x i64> %a0) {
; X32-LABEL: test_mm256_broadcastd_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vbroadcastss %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_broadcastd_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcastss %xmm0, %ymm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -949,14 +949,14 @@ define <4 x i64> @test_mm256_broadcastd_epi32(<2 x i64> %a0) {
define <4 x i64> @test_mm256_mask_broadcastd_epi32(<4 x i64> %a0, i8 %a1, <2 x i64> %a2) {
; X32-LABEL: test_mm256_mask_broadcastd_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vpbroadcastd %xmm1, %ymm0 {%k1}
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_mask_broadcastd_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vpbroadcastd %xmm1, %ymm0 {%k1}
; X64-NEXT: retq
@@ -971,14 +971,14 @@ define <4 x i64> @test_mm256_mask_broadcastd_epi32(<4 x i64> %a0, i8 %a1, <2 x i
define <4 x i64> @test_mm256_maskz_broadcastd_epi32(i8 %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm256_maskz_broadcastd_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vpbroadcastd %xmm0, %ymm0 {%k1} {z}
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_maskz_broadcastd_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vpbroadcastd %xmm0, %ymm0 {%k1} {z}
; X64-NEXT: retq
@@ -992,12 +992,12 @@ define <4 x i64> @test_mm256_maskz_broadcastd_epi32(i8 %a0, <2 x i64> %a1) {
define <2 x i64> @test_mm_broadcastq_epi64(<2 x i64> %a0) {
; X32-LABEL: test_mm_broadcastq_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpbroadcastq %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_broadcastq_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpbroadcastq %xmm0, %xmm0
; X64-NEXT: retq
%res = shufflevector <2 x i64> %a0, <2 x i64> undef, <2 x i32> zeroinitializer
@@ -1006,7 +1006,7 @@ define <2 x i64> @test_mm_broadcastq_epi64(<2 x i64> %a0) {
define <2 x i64> @test_mm_mask_broadcastq_epi64(<2 x i64> %a0, i8 %a1, <2 x i64> %a2) {
; X32-LABEL: test_mm_mask_broadcastq_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
@@ -1019,7 +1019,7 @@ define <2 x i64> @test_mm_mask_broadcastq_epi64(<2 x i64> %a0, i8 %a1, <2 x i64>
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mask_broadcastq_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andb $3, %dil
; X64-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
@@ -1035,7 +1035,7 @@ define <2 x i64> @test_mm_mask_broadcastq_epi64(<2 x i64> %a0, i8 %a1, <2 x i64>
define <2 x i64> @test_mm_maskz_broadcastq_epi64(i8 %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_maskz_broadcastq_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
@@ -1048,7 +1048,7 @@ define <2 x i64> @test_mm_maskz_broadcastq_epi64(i8 %a0, <2 x i64> %a1) {
; X32-NEXT: retl
;
; X64-LABEL: test_mm_maskz_broadcastq_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andb $3, %dil
; X64-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
@@ -1064,12 +1064,12 @@ define <2 x i64> @test_mm_maskz_broadcastq_epi64(i8 %a0, <2 x i64> %a1) {
define <4 x i64> @test_mm256_broadcastq_epi64(<2 x i64> %a0) {
; X32-LABEL: test_mm256_broadcastq_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vbroadcastsd %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_broadcastq_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcastsd %xmm0, %ymm0
; X64-NEXT: retq
%res = shufflevector <2 x i64> %a0, <2 x i64> undef, <4 x i32> zeroinitializer
@@ -1078,7 +1078,7 @@ define <4 x i64> @test_mm256_broadcastq_epi64(<2 x i64> %a0) {
define <4 x i64> @test_mm256_mask_broadcastq_epi64(<4 x i64> %a0, i8 %a1, <2 x i64> %a2) {
; X32-LABEL: test_mm256_mask_broadcastq_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
@@ -1091,7 +1091,7 @@ define <4 x i64> @test_mm256_mask_broadcastq_epi64(<4 x i64> %a0, i8 %a1, <2 x i
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_mask_broadcastq_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andb $15, %dil
; X64-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
@@ -1107,7 +1107,7 @@ define <4 x i64> @test_mm256_mask_broadcastq_epi64(<4 x i64> %a0, i8 %a1, <2 x i
define <4 x i64> @test_mm256_maskz_broadcastq_epi64(i8 %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm256_maskz_broadcastq_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
@@ -1120,7 +1120,7 @@ define <4 x i64> @test_mm256_maskz_broadcastq_epi64(i8 %a0, <2 x i64> %a1) {
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_maskz_broadcastq_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andb $15, %dil
; X64-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
@@ -1136,12 +1136,12 @@ define <4 x i64> @test_mm256_maskz_broadcastq_epi64(i8 %a0, <2 x i64> %a1) {
define <2 x double> @test_mm_broadcastsd_pd(<2 x double> %a0) {
; X32-LABEL: test_mm_broadcastsd_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_broadcastsd_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; X64-NEXT: retq
%res = shufflevector <2 x double> %a0, <2 x double> undef, <2 x i32> zeroinitializer
@@ -1150,7 +1150,7 @@ define <2 x double> @test_mm_broadcastsd_pd(<2 x double> %a0) {
define <2 x double> @test_mm_mask_broadcastsd_pd(<2 x double> %a0, i8 %a1, <2 x double> %a2) {
; X32-LABEL: test_mm_mask_broadcastsd_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
@@ -1163,7 +1163,7 @@ define <2 x double> @test_mm_mask_broadcastsd_pd(<2 x double> %a0, i8 %a1, <2 x
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mask_broadcastsd_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andb $3, %dil
; X64-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
@@ -1179,7 +1179,7 @@ define <2 x double> @test_mm_mask_broadcastsd_pd(<2 x double> %a0, i8 %a1, <2 x
define <2 x double> @test_mm_maskz_broadcastsd_pd(i8 %a0, <2 x double> %a1) {
; X32-LABEL: test_mm_maskz_broadcastsd_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
@@ -1192,7 +1192,7 @@ define <2 x double> @test_mm_maskz_broadcastsd_pd(i8 %a0, <2 x double> %a1) {
; X32-NEXT: retl
;
; X64-LABEL: test_mm_maskz_broadcastsd_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andb $3, %dil
; X64-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
@@ -1208,12 +1208,12 @@ define <2 x double> @test_mm_maskz_broadcastsd_pd(i8 %a0, <2 x double> %a1) {
define <4 x double> @test_mm256_broadcastsd_pd(<2 x double> %a0) {
; X32-LABEL: test_mm256_broadcastsd_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vbroadcastsd %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_broadcastsd_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcastsd %xmm0, %ymm0
; X64-NEXT: retq
%res = shufflevector <2 x double> %a0, <2 x double> undef, <4 x i32> zeroinitializer
@@ -1222,7 +1222,7 @@ define <4 x double> @test_mm256_broadcastsd_pd(<2 x double> %a0) {
define <4 x double> @test_mm256_mask_broadcastsd_pd(<4 x double> %a0, i8 %a1, <2 x double> %a2) {
; X32-LABEL: test_mm256_mask_broadcastsd_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
@@ -1235,7 +1235,7 @@ define <4 x double> @test_mm256_mask_broadcastsd_pd(<4 x double> %a0, i8 %a1, <2
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_mask_broadcastsd_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andb $15, %dil
; X64-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
@@ -1251,7 +1251,7 @@ define <4 x double> @test_mm256_mask_broadcastsd_pd(<4 x double> %a0, i8 %a1, <2
define <4 x double> @test_mm256_maskz_broadcastsd_pd(i8 %a0, <2 x double> %a1) {
; X32-LABEL: test_mm256_maskz_broadcastsd_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
@@ -1264,7 +1264,7 @@ define <4 x double> @test_mm256_maskz_broadcastsd_pd(i8 %a0, <2 x double> %a1) {
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_maskz_broadcastsd_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andb $15, %dil
; X64-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
@@ -1280,12 +1280,12 @@ define <4 x double> @test_mm256_maskz_broadcastsd_pd(i8 %a0, <2 x double> %a1) {
define <4 x float> @test_mm_broadcastss_ps(<4 x float> %a0) {
; X32-LABEL: test_mm_broadcastss_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vbroadcastss %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_broadcastss_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcastss %xmm0, %xmm0
; X64-NEXT: retq
%res = shufflevector <4 x float> %a0, <4 x float> undef, <4 x i32> zeroinitializer
@@ -1294,7 +1294,7 @@ define <4 x float> @test_mm_broadcastss_ps(<4 x float> %a0) {
define <4 x float> @test_mm_mask_broadcastss_ps(<4 x float> %a0, i8 %a1, <4 x float> %a2) {
; X32-LABEL: test_mm_mask_broadcastss_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
@@ -1307,7 +1307,7 @@ define <4 x float> @test_mm_mask_broadcastss_ps(<4 x float> %a0, i8 %a1, <4 x fl
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mask_broadcastss_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andb $15, %dil
; X64-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
@@ -1323,7 +1323,7 @@ define <4 x float> @test_mm_mask_broadcastss_ps(<4 x float> %a0, i8 %a1, <4 x fl
define <4 x float> @test_mm_maskz_broadcastss_ps(i8 %a0, <4 x float> %a1) {
; X32-LABEL: test_mm_maskz_broadcastss_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
@@ -1336,7 +1336,7 @@ define <4 x float> @test_mm_maskz_broadcastss_ps(i8 %a0, <4 x float> %a1) {
; X32-NEXT: retl
;
; X64-LABEL: test_mm_maskz_broadcastss_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andb $15, %dil
; X64-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
@@ -1352,12 +1352,12 @@ define <4 x float> @test_mm_maskz_broadcastss_ps(i8 %a0, <4 x float> %a1) {
define <8 x float> @test_mm256_broadcastss_ps(<4 x float> %a0) {
; X32-LABEL: test_mm256_broadcastss_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vbroadcastss %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_broadcastss_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcastss %xmm0, %ymm0
; X64-NEXT: retq
%res = shufflevector <4 x float> %a0, <4 x float> undef, <8 x i32> zeroinitializer
@@ -1366,14 +1366,14 @@ define <8 x float> @test_mm256_broadcastss_ps(<4 x float> %a0) {
define <8 x float> @test_mm256_mask_broadcastss_ps(<8 x float> %a0, i8 %a1, <4 x float> %a2) {
; X32-LABEL: test_mm256_mask_broadcastss_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vbroadcastss %xmm1, %ymm0 {%k1}
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_mask_broadcastss_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vbroadcastss %xmm1, %ymm0 {%k1}
; X64-NEXT: retq
@@ -1385,14 +1385,14 @@ define <8 x float> @test_mm256_mask_broadcastss_ps(<8 x float> %a0, i8 %a1, <4 x
define <8 x float> @test_mm256_maskz_broadcastss_ps(i8 %a0, <4 x float> %a1) {
; X32-LABEL: test_mm256_maskz_broadcastss_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vbroadcastss %xmm0, %ymm0 {%k1} {z}
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_maskz_broadcastss_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vbroadcastss %xmm0, %ymm0 {%k1} {z}
; X64-NEXT: retq
@@ -1404,12 +1404,12 @@ define <8 x float> @test_mm256_maskz_broadcastss_ps(i8 %a0, <4 x float> %a1) {
define <2 x double> @test_mm_movddup_pd(<2 x double> %a0) {
; X32-LABEL: test_mm_movddup_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_movddup_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; X64-NEXT: retq
%res = shufflevector <2 x double> %a0, <2 x double> undef, <2 x i32> zeroinitializer
@@ -1418,7 +1418,7 @@ define <2 x double> @test_mm_movddup_pd(<2 x double> %a0) {
define <2 x double> @test_mm_mask_movddup_pd(<2 x double> %a0, i8 %a1, <2 x double> %a2) {
; X32-LABEL: test_mm_mask_movddup_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
@@ -1431,7 +1431,7 @@ define <2 x double> @test_mm_mask_movddup_pd(<2 x double> %a0, i8 %a1, <2 x doub
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mask_movddup_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andb $3, %dil
; X64-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
@@ -1447,7 +1447,7 @@ define <2 x double> @test_mm_mask_movddup_pd(<2 x double> %a0, i8 %a1, <2 x doub
define <2 x double> @test_mm_maskz_movddup_pd(i8 %a0, <2 x double> %a1) {
; X32-LABEL: test_mm_maskz_movddup_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
@@ -1460,7 +1460,7 @@ define <2 x double> @test_mm_maskz_movddup_pd(i8 %a0, <2 x double> %a1) {
; X32-NEXT: retl
;
; X64-LABEL: test_mm_maskz_movddup_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andb $3, %dil
; X64-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
@@ -1476,12 +1476,12 @@ define <2 x double> @test_mm_maskz_movddup_pd(i8 %a0, <2 x double> %a1) {
define <4 x double> @test_mm256_movddup_pd(<4 x double> %a0) {
; X32-LABEL: test_mm256_movddup_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_movddup_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
; X64-NEXT: retq
%res = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
@@ -1490,7 +1490,7 @@ define <4 x double> @test_mm256_movddup_pd(<4 x double> %a0) {
define <4 x double> @test_mm256_mask_movddup_pd(<4 x double> %a0, i8 %a1, <4 x double> %a2) {
; X32-LABEL: test_mm256_mask_movddup_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
@@ -1503,7 +1503,7 @@ define <4 x double> @test_mm256_mask_movddup_pd(<4 x double> %a0, i8 %a1, <4 x d
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_mask_movddup_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andb $15, %dil
; X64-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
@@ -1519,7 +1519,7 @@ define <4 x double> @test_mm256_mask_movddup_pd(<4 x double> %a0, i8 %a1, <4 x d
define <4 x double> @test_mm256_maskz_movddup_pd(i8 %a0, <4 x double> %a1) {
; X32-LABEL: test_mm256_maskz_movddup_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
@@ -1532,7 +1532,7 @@ define <4 x double> @test_mm256_maskz_movddup_pd(i8 %a0, <4 x double> %a1) {
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_maskz_movddup_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andb $15, %dil
; X64-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
@@ -1548,12 +1548,12 @@ define <4 x double> @test_mm256_maskz_movddup_pd(i8 %a0, <4 x double> %a1) {
define <4 x float> @test_mm_movehdup_ps(<4 x float> %a0) {
; X32-LABEL: test_mm_movehdup_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_movehdup_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
; X64-NEXT: retq
%res = shufflevector <4 x float> %a0, <4 x float> undef, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
@@ -1562,7 +1562,7 @@ define <4 x float> @test_mm_movehdup_ps(<4 x float> %a0) {
define <4 x float> @test_mm_mask_movehdup_ps(<4 x float> %a0, i8 %a1, <4 x float> %a2) {
; X32-LABEL: test_mm_mask_movehdup_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
@@ -1575,7 +1575,7 @@ define <4 x float> @test_mm_mask_movehdup_ps(<4 x float> %a0, i8 %a1, <4 x float
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mask_movehdup_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andb $15, %dil
; X64-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
@@ -1591,7 +1591,7 @@ define <4 x float> @test_mm_mask_movehdup_ps(<4 x float> %a0, i8 %a1, <4 x float
define <4 x float> @test_mm_maskz_movehdup_ps(i8 %a0, <4 x float> %a1) {
; X32-LABEL: test_mm_maskz_movehdup_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
@@ -1604,7 +1604,7 @@ define <4 x float> @test_mm_maskz_movehdup_ps(i8 %a0, <4 x float> %a1) {
; X32-NEXT: retl
;
; X64-LABEL: test_mm_maskz_movehdup_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andb $15, %dil
; X64-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
@@ -1620,12 +1620,12 @@ define <4 x float> @test_mm_maskz_movehdup_ps(i8 %a0, <4 x float> %a1) {
define <8 x float> @test_mm256_movehdup_ps(<8 x float> %a0) {
; X32-LABEL: test_mm256_movehdup_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_movehdup_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
; X64-NEXT: retq
%res = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
@@ -1634,14 +1634,14 @@ define <8 x float> @test_mm256_movehdup_ps(<8 x float> %a0) {
define <8 x float> @test_mm256_mask_movehdup_ps(<8 x float> %a0, i8 %a1, <8 x float> %a2) {
; X32-LABEL: test_mm256_mask_movehdup_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vmovshdup {{.*#+}} ymm0 {%k1} = ymm1[1,1,3,3,5,5,7,7]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_mask_movehdup_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vmovshdup {{.*#+}} ymm0 {%k1} = ymm1[1,1,3,3,5,5,7,7]
; X64-NEXT: retq
@@ -1653,14 +1653,14 @@ define <8 x float> @test_mm256_mask_movehdup_ps(<8 x float> %a0, i8 %a1, <8 x fl
define <8 x float> @test_mm256_maskz_movehdup_ps(i8 %a0, <8 x float> %a1) {
; X32-LABEL: test_mm256_maskz_movehdup_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vmovshdup {{.*#+}} ymm0 {%k1} {z} = ymm0[1,1,3,3,5,5,7,7]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_maskz_movehdup_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vmovshdup {{.*#+}} ymm0 {%k1} {z} = ymm0[1,1,3,3,5,5,7,7]
; X64-NEXT: retq
@@ -1672,12 +1672,12 @@ define <8 x float> @test_mm256_maskz_movehdup_ps(i8 %a0, <8 x float> %a1) {
define <4 x float> @test_mm_moveldup_ps(<4 x float> %a0) {
; X32-LABEL: test_mm_moveldup_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_moveldup_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2]
; X64-NEXT: retq
%res = shufflevector <4 x float> %a0, <4 x float> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
@@ -1686,7 +1686,7 @@ define <4 x float> @test_mm_moveldup_ps(<4 x float> %a0) {
define <4 x float> @test_mm_mask_moveldup_ps(<4 x float> %a0, i8 %a1, <4 x float> %a2) {
; X32-LABEL: test_mm_mask_moveldup_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
@@ -1699,7 +1699,7 @@ define <4 x float> @test_mm_mask_moveldup_ps(<4 x float> %a0, i8 %a1, <4 x float
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mask_moveldup_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andb $15, %dil
; X64-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
@@ -1715,7 +1715,7 @@ define <4 x float> @test_mm_mask_moveldup_ps(<4 x float> %a0, i8 %a1, <4 x float
define <4 x float> @test_mm_maskz_moveldup_ps(i8 %a0, <4 x float> %a1) {
; X32-LABEL: test_mm_maskz_moveldup_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
@@ -1728,7 +1728,7 @@ define <4 x float> @test_mm_maskz_moveldup_ps(i8 %a0, <4 x float> %a1) {
; X32-NEXT: retl
;
; X64-LABEL: test_mm_maskz_moveldup_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andb $15, %dil
; X64-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
@@ -1744,12 +1744,12 @@ define <4 x float> @test_mm_maskz_moveldup_ps(i8 %a0, <4 x float> %a1) {
define <8 x float> @test_mm256_moveldup_ps(<8 x float> %a0) {
; X32-LABEL: test_mm256_moveldup_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_moveldup_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6]
; X64-NEXT: retq
%res = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
@@ -1758,14 +1758,14 @@ define <8 x float> @test_mm256_moveldup_ps(<8 x float> %a0) {
define <8 x float> @test_mm256_mask_moveldup_ps(<8 x float> %a0, i8 %a1, <8 x float> %a2) {
; X32-LABEL: test_mm256_mask_moveldup_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vmovsldup {{.*#+}} ymm0 {%k1} = ymm1[0,0,2,2,4,4,6,6]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_mask_moveldup_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vmovsldup {{.*#+}} ymm0 {%k1} = ymm1[0,0,2,2,4,4,6,6]
; X64-NEXT: retq
@@ -1777,14 +1777,14 @@ define <8 x float> @test_mm256_mask_moveldup_ps(<8 x float> %a0, i8 %a1, <8 x fl
define <8 x float> @test_mm256_maskz_moveldup_ps(i8 %a0, <8 x float> %a1) {
; X32-LABEL: test_mm256_maskz_moveldup_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vmovsldup {{.*#+}} ymm0 {%k1} {z} = ymm0[0,0,2,2,4,4,6,6]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_maskz_moveldup_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vmovsldup {{.*#+}} ymm0 {%k1} {z} = ymm0[0,0,2,2,4,4,6,6]
; X64-NEXT: retq
@@ -1796,12 +1796,12 @@ define <8 x float> @test_mm256_maskz_moveldup_ps(i8 %a0, <8 x float> %a1) {
define <4 x i64> @test_mm256_permutex_epi64(<4 x i64> %a0) {
; X32-LABEL: test_mm256_permutex_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,0,0,0]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_permutex_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,0,0,0]
; X64-NEXT: retq
%res = shufflevector <4 x i64> %a0, <4 x i64> undef, <4 x i32> <i32 3, i32 0, i32 0, i32 0>
@@ -1810,7 +1810,7 @@ define <4 x i64> @test_mm256_permutex_epi64(<4 x i64> %a0) {
define <4 x i64> @test_mm256_mask_permutex_epi64(<4 x i64> %a0, i8 %a1, <4 x i64> %a2) {
; X32-LABEL: test_mm256_mask_permutex_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
@@ -1823,7 +1823,7 @@ define <4 x i64> @test_mm256_mask_permutex_epi64(<4 x i64> %a0, i8 %a1, <4 x i64
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_mask_permutex_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andb $15, %dil
; X64-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
@@ -1839,7 +1839,7 @@ define <4 x i64> @test_mm256_mask_permutex_epi64(<4 x i64> %a0, i8 %a1, <4 x i64
define <4 x i64> @test_mm256_maskz_permutex_epi64(i8 %a0, <4 x i64> %a1) {
; X32-LABEL: test_mm256_maskz_permutex_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
@@ -1852,7 +1852,7 @@ define <4 x i64> @test_mm256_maskz_permutex_epi64(i8 %a0, <4 x i64> %a1) {
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_maskz_permutex_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andb $15, %dil
; X64-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
@@ -1868,12 +1868,12 @@ define <4 x i64> @test_mm256_maskz_permutex_epi64(i8 %a0, <4 x i64> %a1) {
define <4 x double> @test_mm256_permutex_pd(<4 x double> %a0) {
; X32-LABEL: test_mm256_permutex_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,0,0,0]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_permutex_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,0,0,0]
; X64-NEXT: retq
%res = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> <i32 3, i32 0, i32 0, i32 0>
@@ -1882,7 +1882,7 @@ define <4 x double> @test_mm256_permutex_pd(<4 x double> %a0) {
define <4 x double> @test_mm256_mask_permutex_pd(<4 x double> %a0, i8 %a1, <4 x double> %a2) {
; X32-LABEL: test_mm256_mask_permutex_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
@@ -1895,7 +1895,7 @@ define <4 x double> @test_mm256_mask_permutex_pd(<4 x double> %a0, i8 %a1, <4 x
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_mask_permutex_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andb $15, %dil
; X64-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
@@ -1911,7 +1911,7 @@ define <4 x double> @test_mm256_mask_permutex_pd(<4 x double> %a0, i8 %a1, <4 x
define <4 x double> @test_mm256_maskz_permutex_pd(i8 %a0, <4 x double> %a1) {
; X32-LABEL: test_mm256_maskz_permutex_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
@@ -1924,7 +1924,7 @@ define <4 x double> @test_mm256_maskz_permutex_pd(i8 %a0, <4 x double> %a1) {
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_maskz_permutex_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andb $15, %dil
; X64-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
@@ -1940,12 +1940,12 @@ define <4 x double> @test_mm256_maskz_permutex_pd(i8 %a0, <4 x double> %a1) {
define <2 x double> @test_mm_shuffle_pd(<2 x double> %a0, <2 x double> %a1) {
; X32-LABEL: test_mm_shuffle_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_shuffle_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
; X64-NEXT: retq
%res = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 1, i32 3>
@@ -1954,7 +1954,7 @@ define <2 x double> @test_mm_shuffle_pd(<2 x double> %a0, <2 x double> %a1) {
define <2 x double> @test_mm_mask_shuffle_pd(<2 x double> %a0, i8 %a1, <2 x double> %a2, <2 x double> %a3) {
; X32-LABEL: test_mm_mask_shuffle_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
@@ -1967,7 +1967,7 @@ define <2 x double> @test_mm_mask_shuffle_pd(<2 x double> %a0, i8 %a1, <2 x doub
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mask_shuffle_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andb $3, %dil
; X64-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
@@ -1983,7 +1983,7 @@ define <2 x double> @test_mm_mask_shuffle_pd(<2 x double> %a0, i8 %a1, <2 x doub
define <2 x double> @test_mm_maskz_shuffle_pd(i8 %a0, <2 x double> %a1, <2 x double> %a2) {
; X32-LABEL: test_mm_maskz_shuffle_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
@@ -1996,7 +1996,7 @@ define <2 x double> @test_mm_maskz_shuffle_pd(i8 %a0, <2 x double> %a1, <2 x dou
; X32-NEXT: retl
;
; X64-LABEL: test_mm_maskz_shuffle_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andb $3, %dil
; X64-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
@@ -2012,12 +2012,12 @@ define <2 x double> @test_mm_maskz_shuffle_pd(i8 %a0, <2 x double> %a1, <2 x dou
define <4 x double> @test_mm256_shuffle_pd(<4 x double> %a0, <4 x double> %a1) {
; X32-LABEL: test_mm256_shuffle_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[2],ymm1[2]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_shuffle_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[2],ymm1[2]
; X64-NEXT: retq
%res = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 1, i32 5, i32 2, i32 6>
@@ -2026,7 +2026,7 @@ define <4 x double> @test_mm256_shuffle_pd(<4 x double> %a0, <4 x double> %a1) {
define <4 x double> @test_mm256_mask_shuffle_pd(<4 x double> %a0, i8 %a1, <4 x double> %a2, <4 x double> %a3) {
; X32-LABEL: test_mm256_mask_shuffle_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
@@ -2039,7 +2039,7 @@ define <4 x double> @test_mm256_mask_shuffle_pd(<4 x double> %a0, i8 %a1, <4 x d
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_mask_shuffle_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andb $15, %dil
; X64-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
@@ -2055,7 +2055,7 @@ define <4 x double> @test_mm256_mask_shuffle_pd(<4 x double> %a0, i8 %a1, <4 x d
define <4 x double> @test_mm256_maskz_shuffle_pd(i8 %a0, <4 x double> %a1, <4 x double> %a2) {
; X32-LABEL: test_mm256_maskz_shuffle_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
@@ -2068,7 +2068,7 @@ define <4 x double> @test_mm256_maskz_shuffle_pd(i8 %a0, <4 x double> %a1, <4 x
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_maskz_shuffle_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andb $15, %dil
; X64-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
@@ -2084,12 +2084,12 @@ define <4 x double> @test_mm256_maskz_shuffle_pd(i8 %a0, <4 x double> %a1, <4 x
define <4 x float> @test_mm_shuffle_ps(<4 x float> %a0, <4 x float> %a1) {
; X32-LABEL: test_mm_shuffle_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_shuffle_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0]
; X64-NEXT: retq
%res = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 1, i32 4, i32 4>
@@ -2098,7 +2098,7 @@ define <4 x float> @test_mm_shuffle_ps(<4 x float> %a0, <4 x float> %a1) {
define <4 x float> @test_mm_mask_shuffle_ps(<4 x float> %a0, i8 %a1, <4 x float> %a2, <4 x float> %a3) {
; X32-LABEL: test_mm_mask_shuffle_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
@@ -2111,7 +2111,7 @@ define <4 x float> @test_mm_mask_shuffle_ps(<4 x float> %a0, i8 %a1, <4 x float>
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mask_shuffle_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andb $15, %dil
; X64-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
@@ -2127,7 +2127,7 @@ define <4 x float> @test_mm_mask_shuffle_ps(<4 x float> %a0, i8 %a1, <4 x float>
define <4 x float> @test_mm_maskz_shuffle_ps(i8 %a0, <4 x float> %a1, <4 x float> %a2) {
; X32-LABEL: test_mm_maskz_shuffle_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
@@ -2140,7 +2140,7 @@ define <4 x float> @test_mm_maskz_shuffle_ps(i8 %a0, <4 x float> %a1, <4 x float
; X32-NEXT: retl
;
; X64-LABEL: test_mm_maskz_shuffle_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andb $15, %dil
; X64-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
@@ -2156,12 +2156,12 @@ define <4 x float> @test_mm_maskz_shuffle_ps(i8 %a0, <4 x float> %a1, <4 x float
define <8 x float> @test_mm256_shuffle_ps(<8 x float> %a0, <8 x float> %a1) {
; X32-LABEL: test_mm256_shuffle_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,0],ymm0[4,5],ymm1[4,4]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_shuffle_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,0],ymm0[4,5],ymm1[4,4]
; X64-NEXT: retq
%res = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 0, i32 1, i32 8, i32 8, i32 4, i32 5, i32 12, i32 12>
@@ -2170,14 +2170,14 @@ define <8 x float> @test_mm256_shuffle_ps(<8 x float> %a0, <8 x float> %a1) {
define <8 x float> @test_mm256_mask_shuffle_ps(<8 x float> %a0, i8 %a1, <8 x float> %a2, <8 x float> %a3) {
; X32-LABEL: test_mm256_mask_shuffle_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vshufps {{.*#+}} ymm0 {%k1} = ymm1[0,1],ymm2[0,0],ymm1[4,5],ymm2[4,4]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_mask_shuffle_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vshufps {{.*#+}} ymm0 {%k1} = ymm1[0,1],ymm2[0,0],ymm1[4,5],ymm2[4,4]
; X64-NEXT: retq
@@ -2189,14 +2189,14 @@ define <8 x float> @test_mm256_mask_shuffle_ps(<8 x float> %a0, i8 %a1, <8 x flo
define <8 x float> @test_mm256_maskz_shuffle_ps(i8 %a0, <8 x float> %a1, <8 x float> %a2) {
; X32-LABEL: test_mm256_maskz_shuffle_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vshufps {{.*#+}} ymm0 {%k1} {z} = ymm0[0,1],ymm1[0,0],ymm0[4,5],ymm1[4,4]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_maskz_shuffle_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vshufps {{.*#+}} ymm0 {%k1} {z} = ymm0[0,1],ymm1[0,0],ymm0[4,5],ymm1[4,4]
; X64-NEXT: retq
diff --git a/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll
index 2fad69e4b71..6d8e019a0ee 100644
--- a/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll
+++ b/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll
@@ -5,7 +5,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.pbroadcast.d.gpr.128(i32, <4 x i32>, i8)
define <4 x i32>@test_int_x86_avx512_mask_pbroadcast_d_gpr_128(i32 %x0, <4 x i32> %x1, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_pbroadcast_d_gpr_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpbroadcastd %edi, %xmm1 ## encoding: [0x62,0xf2,0x7d,0x08,0x7c,0xcf]
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpbroadcastd %edi, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x7c,0xc7]
@@ -26,7 +26,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.pbroadcast.q.gpr.128(i64, <2 x i64>, i8)
define <2 x i64>@test_int_x86_avx512_mask_pbroadcast_q_gpr_128(i64 %x0, <2 x i64> %x1, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_pbroadcast_q_gpr_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpbroadcastq %rdi, %xmm1 ## encoding: [0x62,0xf2,0xfd,0x08,0x7c,0xcf]
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpbroadcastq %rdi, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x7c,0xc7]
@@ -47,7 +47,7 @@ define <2 x i64>@test_int_x86_avx512_mask_pbroadcast_q_gpr_128(i64 %x0, <2 x i64
define <8 x i32>@test_int_x86_avx512_mask_pbroadcast_d_gpr_256(i32 %x0, <8 x i32> %x1, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_pbroadcast_d_gpr_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpbroadcastd %edi, %ymm1 ## encoding: [0x62,0xf2,0x7d,0x28,0x7c,0xcf]
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpbroadcastd %edi, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x7c,0xc7]
@@ -67,7 +67,7 @@ define <2 x i64>@test_int_x86_avx512_mask_pbroadcast_q_gpr_128(i64 %x0, <2 x i64
define <4 x i64>@test_int_x86_avx512_mask_pbroadcast_q_gpr_256(i64 %x0, <4 x i64> %x1, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_pbroadcast_q_gpr_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpbroadcastq %rdi, %ymm1 ## encoding: [0x62,0xf2,0xfd,0x28,0x7c,0xcf]
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpbroadcastq %rdi, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x7c,0xc7]
@@ -89,7 +89,7 @@ declare <8 x i32> @llvm.x86.avx512.pbroadcastd.256(<4 x i32>, <8 x i32>, i8)
define <8 x i32>@test_int_x86_avx512_pbroadcastd_256(<4 x i32> %x0, <8 x i32> %x1, i8 %mask, i32 * %y_ptr) {
; CHECK-LABEL: test_int_x86_avx512_pbroadcastd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpbroadcastd %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x58,0xc8]
; CHECK-NEXT: vpbroadcastd %xmm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x58,0xc0]
@@ -110,7 +110,7 @@ declare <4 x i32> @llvm.x86.avx512.pbroadcastd.128(<4 x i32>, <4 x i32>, i8)
define <4 x i32>@test_int_x86_avx512_pbroadcastd_128(<4 x i32> %x0, <4 x i32> %x1, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_pbroadcastd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpbroadcastd %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x58,0xd0]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpbroadcastd %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x58,0xc8]
@@ -130,7 +130,7 @@ declare <4 x i64> @llvm.x86.avx512.pbroadcastq.256(<2 x i64>, <4 x i64>, i8)
define <4 x i64>@test_int_x86_avx512_pbroadcastq_256(<2 x i64> %x0, <4 x i64> %x1, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_pbroadcastq_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpbroadcastq %xmm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x59,0xd0]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpbroadcastq %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x59,0xc8]
@@ -150,7 +150,7 @@ declare <2 x i64> @llvm.x86.avx512.pbroadcastq.128(<2 x i64>, <2 x i64>, i8)
define <2 x i64>@test_int_x86_avx512_pbroadcastq_128(<2 x i64> %x0, <2 x i64> %x1, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_pbroadcastq_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpbroadcastq %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x59,0xd0]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpbroadcastq %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x59,0xc8]
@@ -170,7 +170,7 @@ declare <4 x double> @llvm.x86.avx512.mask.broadcast.sd.pd.256(<2 x double>, <4
define <4 x double> @test_x86_vbroadcast_sd_pd_256(<2 x double> %a0, <4 x double> %a1, i8 %mask ) {
; CHECK-LABEL: test_x86_vbroadcast_sd_pd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vbroadcastsd %xmm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x19,0xd0]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vbroadcastsd %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x19,0xc8]
@@ -190,7 +190,7 @@ declare <8 x float> @llvm.x86.avx512.mask.broadcast.ss.ps.256(<4 x float>, <8 x
define <8 x float> @test_x86_vbroadcast_ss_ps_256(<4 x float> %a0, <8 x float> %a1, i8 %mask ) {
; CHECK-LABEL: test_x86_vbroadcast_ss_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vbroadcastss %xmm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x18,0xd0]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vbroadcastss %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x18,0xc8]
@@ -210,7 +210,7 @@ declare <4 x float> @llvm.x86.avx512.mask.broadcast.ss.ps.128(<4 x float>, <4 x
define <4 x float> @test_x86_vbroadcast_ss_ps_128(<4 x float> %a0, <4 x float> %a1, i8 %mask ) {
; CHECK-LABEL: test_x86_vbroadcast_ss_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vbroadcastss %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x18,0xd0]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vbroadcastss %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x18,0xc8]
@@ -230,7 +230,7 @@ declare <4 x float> @llvm.x86.avx512.mask.movsldup.128(<4 x float>, <4 x float>,
define <4 x float>@test_int_x86_avx512_mask_movsldup_128(<4 x float> %x0, <4 x float> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_movsldup_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovsldup %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x12,0xd0]
; CHECK-NEXT: ## xmm2 = xmm0[0,0,2,2]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -253,7 +253,7 @@ declare <8 x float> @llvm.x86.avx512.mask.movsldup.256(<8 x float>, <8 x float>,
define <8 x float>@test_int_x86_avx512_mask_movsldup_256(<8 x float> %x0, <8 x float> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_movsldup_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovsldup %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xfe,0x12,0xd0]
; CHECK-NEXT: ## ymm2 = ymm0[0,0,2,2,4,4,6,6]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -276,7 +276,7 @@ declare <4 x float> @llvm.x86.avx512.mask.movshdup.128(<4 x float>, <4 x float>,
define <4 x float>@test_int_x86_avx512_mask_movshdup_128(<4 x float> %x0, <4 x float> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_movshdup_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovshdup %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x16,0xd0]
; CHECK-NEXT: ## xmm2 = xmm0[1,1,3,3]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -299,7 +299,7 @@ declare <8 x float> @llvm.x86.avx512.mask.movshdup.256(<8 x float>, <8 x float>,
define <8 x float>@test_int_x86_avx512_mask_movshdup_256(<8 x float> %x0, <8 x float> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_movshdup_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovshdup %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xfe,0x16,0xd0]
; CHECK-NEXT: ## ymm2 = ymm0[1,1,3,3,5,5,7,7]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -321,7 +321,7 @@ declare <2 x double> @llvm.x86.avx512.mask.movddup.128(<2 x double>, <2 x double
define <2 x double>@test_int_x86_avx512_mask_movddup_128(<2 x double> %x0, <2 x double> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_movddup_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovddup %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0xd0]
; CHECK-NEXT: ## xmm2 = xmm0[0,0]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -344,7 +344,7 @@ declare <4 x double> @llvm.x86.avx512.mask.movddup.256(<4 x double>, <4 x double
define <4 x double>@test_int_x86_avx512_mask_movddup_256(<4 x double> %x0, <4 x double> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_movddup_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovddup %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xff,0x12,0xd0]
; CHECK-NEXT: ## ymm2 = ymm0[0,0,2,2]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -367,7 +367,7 @@ declare <4 x double> @llvm.x86.avx512.mask.vpermil.pd.256(<4 x double>, i32, <4
define <4 x double>@test_int_x86_avx512_mask_vpermil_pd_256(<4 x double> %x0, <4 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermil_pd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpermilpd $6, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x05,0xd0,0x06]
; CHECK-NEXT: ## ymm2 = ymm0[0,1,3,2]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -390,7 +390,7 @@ declare <2 x double> @llvm.x86.avx512.mask.vpermil.pd.128(<2 x double>, i32, <2
define <2 x double>@test_int_x86_avx512_mask_vpermil_pd_128(<2 x double> %x0, <2 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermil_pd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpermilpd $1, %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x05,0xd0,0x01]
; CHECK-NEXT: ## xmm2 = xmm0[1,0]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -413,7 +413,7 @@ declare <8 x float> @llvm.x86.avx512.mask.vpermil.ps.256(<8 x float>, i32, <8 x
define <8 x float>@test_int_x86_avx512_mask_vpermil_ps_256(<8 x float> %x0, <8 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermil_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpermilps $22, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x04,0xd0,0x16]
; CHECK-NEXT: ## ymm2 = ymm0[2,1,1,0,6,5,5,4]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -436,7 +436,7 @@ declare <4 x float> @llvm.x86.avx512.mask.vpermil.ps.128(<4 x float>, i32, <4 x
define <4 x float>@test_int_x86_avx512_mask_vpermil_ps_128(<4 x float> %x0, <4 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermil_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpermilps $22, %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x04,0xd0,0x16]
; CHECK-NEXT: ## xmm2 = xmm0[2,1,1,0]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -459,7 +459,7 @@ declare <4 x double> @llvm.x86.avx512.mask.perm.df.256(<4 x double>, i32, <4 x d
define <4 x double>@test_int_x86_avx512_mask_perm_df_256(<4 x double> %x0, i32 %x1, <4 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_perm_df_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpermpd $3, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0xfd,0x01,0xd0,0x03]
; CHECK-NEXT: ## ymm2 = ymm0[3,0,0,0]
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
@@ -482,7 +482,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.perm.di.256(<4 x i64>, i32, <4 x i64>, i
define <4 x i64>@test_int_x86_avx512_mask_perm_di_256(<4 x i64> %x0, i32 %x1, <4 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_perm_di_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpermq $3, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0xfd,0x00,0xd0,0x03]
; CHECK-NEXT: ## ymm2 = ymm0[3,0,0,0]
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
@@ -505,7 +505,7 @@ declare void @llvm.x86.avx512.mask.store.pd.128(i8*, <2 x double>, i8)
define void@test_int_x86_avx512_mask_store_pd_128(i8* %ptr1, i8* %ptr2, <2 x double> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_store_pd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edx, %k1 ## encoding: [0xc5,0xf8,0x92,0xca]
; CHECK-NEXT: vmovapd %xmm0, (%rdi) {%k1} ## encoding: [0x62,0xf1,0xfd,0x09,0x29,0x07]
; CHECK-NEXT: vmovapd %xmm0, (%rsi) ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x29,0x06]
@@ -519,7 +519,7 @@ declare void @llvm.x86.avx512.mask.store.pd.256(i8*, <4 x double>, i8)
define void@test_int_x86_avx512_mask_store_pd_256(i8* %ptr1, i8* %ptr2, <4 x double> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_store_pd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edx, %k1 ## encoding: [0xc5,0xf8,0x92,0xca]
; CHECK-NEXT: vmovapd %ymm0, (%rdi) {%k1} ## encoding: [0x62,0xf1,0xfd,0x29,0x29,0x07]
; CHECK-NEXT: vmovapd %ymm0, (%rsi) ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x29,0x06]
@@ -533,7 +533,7 @@ declare void @llvm.x86.avx512.mask.storeu.pd.128(i8*, <2 x double>, i8)
define void@test_int_x86_avx512_mask_storeu_pd_128(i8* %ptr1, i8* %ptr2, <2 x double> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_storeu_pd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edx, %k1 ## encoding: [0xc5,0xf8,0x92,0xca]
; CHECK-NEXT: vmovupd %xmm0, (%rdi) {%k1} ## encoding: [0x62,0xf1,0xfd,0x09,0x11,0x07]
; CHECK-NEXT: vmovupd %xmm0, (%rsi) ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x11,0x06]
@@ -547,7 +547,7 @@ declare void @llvm.x86.avx512.mask.storeu.pd.256(i8*, <4 x double>, i8)
define void@test_int_x86_avx512_mask_storeu_pd_256(i8* %ptr1, i8* %ptr2, <4 x double> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_storeu_pd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edx, %k1 ## encoding: [0xc5,0xf8,0x92,0xca]
; CHECK-NEXT: vmovupd %ymm0, (%rdi) {%k1} ## encoding: [0x62,0xf1,0xfd,0x29,0x11,0x07]
; CHECK-NEXT: vmovupd %ymm0, (%rsi) ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x11,0x06]
@@ -561,7 +561,7 @@ declare void @llvm.x86.avx512.mask.store.ps.128(i8*, <4 x float>, i8)
define void@test_int_x86_avx512_mask_store_ps_128(i8* %ptr1, i8* %ptr2, <4 x float> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_store_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edx, %k1 ## encoding: [0xc5,0xf8,0x92,0xca]
; CHECK-NEXT: vmovaps %xmm0, (%rdi) {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x29,0x07]
; CHECK-NEXT: vmovaps %xmm0, (%rsi) ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x29,0x06]
@@ -575,7 +575,7 @@ declare void @llvm.x86.avx512.mask.store.ps.256(i8*, <8 x float>, i8)
define void@test_int_x86_avx512_mask_store_ps_256(i8* %ptr1, i8* %ptr2, <8 x float> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_store_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edx, %k1 ## encoding: [0xc5,0xf8,0x92,0xca]
; CHECK-NEXT: vmovaps %ymm0, (%rdi) {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x29,0x07]
; CHECK-NEXT: vmovaps %ymm0, (%rsi) ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x29,0x06]
@@ -589,7 +589,7 @@ declare void @llvm.x86.avx512.mask.storeu.ps.128(i8*, <4 x float>, i8)
define void@test_int_x86_avx512_mask_storeu_ps_128(i8* %ptr1, i8* %ptr2, <4 x float> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_storeu_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edx, %k1 ## encoding: [0xc5,0xf8,0x92,0xca]
; CHECK-NEXT: vmovups %xmm0, (%rdi) {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x11,0x07]
; CHECK-NEXT: vmovups %xmm0, (%rsi) ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x11,0x06]
@@ -603,7 +603,7 @@ declare void @llvm.x86.avx512.mask.storeu.ps.256(i8*, <8 x float>, i8)
define void@test_int_x86_avx512_mask_storeu_ps_256(i8* %ptr1, i8* %ptr2, <8 x float> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_storeu_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edx, %k1 ## encoding: [0xc5,0xf8,0x92,0xca]
; CHECK-NEXT: vmovups %ymm0, (%rdi) {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x11,0x07]
; CHECK-NEXT: vmovups %ymm0, (%rsi) ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x11,0x06]
@@ -617,7 +617,7 @@ declare void @llvm.x86.avx512.mask.storeu.q.128(i8*, <2 x i64>, i8)
define void@test_int_x86_avx512_mask_storeu_q_128(i8* %ptr1, i8* %ptr2, <2 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_storeu_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edx, %k1 ## encoding: [0xc5,0xf8,0x92,0xca]
; CHECK-NEXT: vmovdqu64 %xmm0, (%rdi) {%k1} ## encoding: [0x62,0xf1,0xfe,0x09,0x7f,0x07]
; CHECK-NEXT: vmovdqu %xmm0, (%rsi) ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7f,0x06]
@@ -631,7 +631,7 @@ declare void @llvm.x86.avx512.mask.storeu.q.256(i8*, <4 x i64>, i8)
define void@test_int_x86_avx512_mask_storeu_q_256(i8* %ptr1, i8* %ptr2, <4 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_storeu_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edx, %k1 ## encoding: [0xc5,0xf8,0x92,0xca]
; CHECK-NEXT: vmovdqu64 %ymm0, (%rdi) {%k1} ## encoding: [0x62,0xf1,0xfe,0x29,0x7f,0x07]
; CHECK-NEXT: vmovdqu %ymm0, (%rsi) ## EVEX TO VEX Compression encoding: [0xc5,0xfe,0x7f,0x06]
@@ -645,7 +645,7 @@ declare void @llvm.x86.avx512.mask.storeu.d.128(i8*, <4 x i32>, i8)
define void@test_int_x86_avx512_mask_storeu_d_128(i8* %ptr1, i8* %ptr2, <4 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_storeu_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edx, %k1 ## encoding: [0xc5,0xf8,0x92,0xca]
; CHECK-NEXT: vmovdqu32 %xmm0, (%rdi) {%k1} ## encoding: [0x62,0xf1,0x7e,0x09,0x7f,0x07]
; CHECK-NEXT: vmovdqu %xmm0, (%rsi) ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7f,0x06]
@@ -659,7 +659,7 @@ declare void @llvm.x86.avx512.mask.storeu.d.256(i8*, <8 x i32>, i8)
define void@test_int_x86_avx512_mask_storeu_d_256(i8* %ptr1, i8* %ptr2, <8 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_storeu_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edx, %k1 ## encoding: [0xc5,0xf8,0x92,0xca]
; CHECK-NEXT: vmovdqu32 %ymm0, (%rdi) {%k1} ## encoding: [0x62,0xf1,0x7e,0x29,0x7f,0x07]
; CHECK-NEXT: vmovdqu %ymm0, (%rsi) ## EVEX TO VEX Compression encoding: [0xc5,0xfe,0x7f,0x06]
@@ -673,7 +673,7 @@ declare void @llvm.x86.avx512.mask.store.q.128(i8*, <2 x i64>, i8)
define void@test_int_x86_avx512_mask_store_q_128(i8* %ptr1, i8* %ptr2, <2 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_store_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edx, %k1 ## encoding: [0xc5,0xf8,0x92,0xca]
; CHECK-NEXT: vmovdqa64 %xmm0, (%rdi) {%k1} ## encoding: [0x62,0xf1,0xfd,0x09,0x7f,0x07]
; CHECK-NEXT: vmovdqa %xmm0, (%rsi) ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x7f,0x06]
@@ -687,7 +687,7 @@ declare void @llvm.x86.avx512.mask.store.q.256(i8*, <4 x i64>, i8)
define void@test_int_x86_avx512_mask_store_q_256(i8* %ptr1, i8* %ptr2, <4 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_store_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edx, %k1 ## encoding: [0xc5,0xf8,0x92,0xca]
; CHECK-NEXT: vmovdqa64 %ymm0, (%rdi) {%k1} ## encoding: [0x62,0xf1,0xfd,0x29,0x7f,0x07]
; CHECK-NEXT: vmovdqa %ymm0, (%rsi) ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x7f,0x06]
@@ -701,7 +701,7 @@ declare void @llvm.x86.avx512.mask.store.d.128(i8*, <4 x i32>, i8)
define void@test_int_x86_avx512_mask_store_d_128(i8* %ptr1, i8* %ptr2, <4 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_store_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edx, %k1 ## encoding: [0xc5,0xf8,0x92,0xca]
; CHECK-NEXT: vmovdqa32 %xmm0, (%rdi) {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x7f,0x07]
; CHECK-NEXT: vmovdqa %xmm0, (%rsi) ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x7f,0x06]
@@ -715,7 +715,7 @@ declare void @llvm.x86.avx512.mask.store.d.256(i8*, <8 x i32>, i8)
define void@test_int_x86_avx512_mask_store_d_256(i8* %ptr1, i8* %ptr2, <8 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_store_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edx, %k1 ## encoding: [0xc5,0xf8,0x92,0xca]
; CHECK-NEXT: vmovdqa32 %ymm0, (%rdi) {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x7f,0x07]
; CHECK-NEXT: vmovdqa %ymm0, (%rsi) ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x7f,0x06]
@@ -727,7 +727,7 @@ define void@test_int_x86_avx512_mask_store_d_256(i8* %ptr1, i8* %ptr2, <8 x i32>
define <8 x float> @test_mask_load_aligned_ps_256(<8 x float> %data, i8* %ptr, i8 %mask) {
; CHECK-LABEL: test_mask_load_aligned_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0x07]
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vmovaps (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x28,0x07]
@@ -745,7 +745,7 @@ declare <8 x float> @llvm.x86.avx512.mask.load.ps.256(i8*, <8 x float>, i8)
define <8 x float> @test_mask_load_unaligned_ps_256(<8 x float> %data, i8* %ptr, i8 %mask) {
; CHECK-LABEL: test_mask_load_unaligned_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovups (%rdi), %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x10,0x07]
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vmovups (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x10,0x07]
@@ -763,7 +763,7 @@ declare <8 x float> @llvm.x86.avx512.mask.loadu.ps.256(i8*, <8 x float>, i8)
define <4 x double> @test_mask_load_aligned_pd_256(<4 x double> %data, i8* %ptr, i8 %mask) {
; CHECK-LABEL: test_mask_load_aligned_pd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovapd (%rdi), %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0x07]
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vmovapd (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0xfd,0x29,0x28,0x07]
@@ -781,7 +781,7 @@ declare <4 x double> @llvm.x86.avx512.mask.load.pd.256(i8*, <4 x double>, i8)
define <4 x double> @test_mask_load_unaligned_pd_256(<4 x double> %data, i8* %ptr, i8 %mask) {
; CHECK-LABEL: test_mask_load_unaligned_pd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovupd (%rdi), %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x10,0x07]
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vmovupd (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0xfd,0x29,0x10,0x07]
@@ -799,7 +799,7 @@ declare <4 x double> @llvm.x86.avx512.mask.loadu.pd.256(i8*, <4 x double>, i8)
define <4 x float> @test_mask_load_aligned_ps_128(<4 x float> %data, i8* %ptr, i8 %mask) {
; CHECK-LABEL: test_mask_load_aligned_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x07]
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vmovaps (%rdi), %xmm0 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x28,0x07]
@@ -817,7 +817,7 @@ declare <4 x float> @llvm.x86.avx512.mask.load.ps.128(i8*, <4 x float>, i8)
define <4 x float> @test_mask_load_unaligned_ps_128(<4 x float> %data, i8* %ptr, i8 %mask) {
; CHECK-LABEL: test_mask_load_unaligned_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovups (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x10,0x07]
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vmovups (%rdi), %xmm0 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x10,0x07]
@@ -835,7 +835,7 @@ declare <4 x float> @llvm.x86.avx512.mask.loadu.ps.128(i8*, <4 x float>, i8)
define <2 x double> @test_mask_load_aligned_pd_128(<2 x double> %data, i8* %ptr, i8 %mask) {
; CHECK-LABEL: test_mask_load_aligned_pd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovapd (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0x07]
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vmovapd (%rdi), %xmm0 {%k1} ## encoding: [0x62,0xf1,0xfd,0x09,0x28,0x07]
@@ -853,7 +853,7 @@ declare <2 x double> @llvm.x86.avx512.mask.load.pd.128(i8*, <2 x double>, i8)
define <2 x double> @test_mask_load_unaligned_pd_128(<2 x double> %data, i8* %ptr, i8 %mask) {
; CHECK-LABEL: test_mask_load_unaligned_pd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovupd (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x10,0x07]
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vmovupd (%rdi), %xmm0 {%k1} ## encoding: [0x62,0xf1,0xfd,0x09,0x10,0x07]
@@ -873,7 +873,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.loadu.d.128(i8*, <4 x i32>, i8)
define <4 x i32> @test_mask_load_unaligned_d_128(i8* %ptr, i8* %ptr2, <4 x i32> %data, i8 %mask) {
; CHECK-LABEL: test_mask_load_unaligned_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovdqu (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x6f,0x07]
; CHECK-NEXT: kmovw %edx, %k1 ## encoding: [0xc5,0xf8,0x92,0xca]
; CHECK-NEXT: vmovdqu32 (%rsi), %xmm0 {%k1} ## encoding: [0x62,0xf1,0x7e,0x09,0x6f,0x06]
@@ -891,7 +891,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.loadu.d.256(i8*, <8 x i32>, i8)
define <8 x i32> @test_mask_load_unaligned_d_256(i8* %ptr, i8* %ptr2, <8 x i32> %data, i8 %mask) {
; CHECK-LABEL: test_mask_load_unaligned_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovdqu (%rdi), %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfe,0x6f,0x07]
; CHECK-NEXT: kmovw %edx, %k1 ## encoding: [0xc5,0xf8,0x92,0xca]
; CHECK-NEXT: vmovdqu32 (%rsi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7e,0x29,0x6f,0x06]
@@ -909,7 +909,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.loadu.q.128(i8*, <2 x i64>, i8)
define <2 x i64> @test_mask_load_unaligned_q_128(i8* %ptr, i8* %ptr2, <2 x i64> %data, i8 %mask) {
; CHECK-LABEL: test_mask_load_unaligned_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovdqu (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x6f,0x07]
; CHECK-NEXT: kmovw %edx, %k1 ## encoding: [0xc5,0xf8,0x92,0xca]
; CHECK-NEXT: vmovdqu64 (%rsi), %xmm0 {%k1} ## encoding: [0x62,0xf1,0xfe,0x09,0x6f,0x06]
@@ -927,7 +927,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.loadu.q.256(i8*, <4 x i64>, i8)
define <4 x i64> @test_mask_load_unaligned_q_256(i8* %ptr, i8* %ptr2, <4 x i64> %data, i8 %mask) {
; CHECK-LABEL: test_mask_load_unaligned_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovdqu (%rdi), %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfe,0x6f,0x07]
; CHECK-NEXT: kmovw %edx, %k1 ## encoding: [0xc5,0xf8,0x92,0xca]
; CHECK-NEXT: vmovdqu64 (%rsi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0xfe,0x29,0x6f,0x06]
@@ -945,7 +945,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.load.d.128(i8*, <4 x i32>, i8)
define <4 x i32> @test_mask_load_aligned_d_128(<4 x i32> %data, i8* %ptr, i8 %mask) {
; CHECK-LABEL: test_mask_load_aligned_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0x07]
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vmovdqa32 (%rdi), %xmm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x6f,0x07]
@@ -963,7 +963,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.load.d.256(i8*, <8 x i32>, i8)
define <8 x i32> @test_mask_load_aligned_d_256(<8 x i32> %data, i8* %ptr, i8 %mask) {
; CHECK-LABEL: test_mask_load_aligned_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0x07]
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vmovdqa32 (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x6f,0x07]
@@ -981,7 +981,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.load.q.128(i8*, <2 x i64>, i8)
define <2 x i64> @test_mask_load_aligned_q_128(<2 x i64> %data, i8* %ptr, i8 %mask) {
; CHECK-LABEL: test_mask_load_aligned_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0x07]
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vmovdqa64 (%rdi), %xmm0 {%k1} ## encoding: [0x62,0xf1,0xfd,0x09,0x6f,0x07]
@@ -999,7 +999,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.load.q.256(i8*, <4 x i64>, i8)
define <4 x i64> @test_mask_load_aligned_q_256(<4 x i64> %data, i8* %ptr, i8 %mask) {
; CHECK-LABEL: test_mask_load_aligned_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovdqa (%rdi), %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0x07]
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vmovdqa64 (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0xfd,0x29,0x6f,0x07]
@@ -1017,7 +1017,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.pshuf.d.128(<4 x i32>, i32, <4 x i32>, i
define <4 x i32>@test_int_x86_avx512_mask_pshuf_d_128(<4 x i32> %x0, i32 %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pshuf_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpshufd $3, %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x70,0xd0,0x03]
; CHECK-NEXT: ## xmm2 = xmm0[3,0,0,0]
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
@@ -1040,7 +1040,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.pshuf.d.256(<8 x i32>, i32, <8 x i32>, i
define <8 x i32>@test_int_x86_avx512_mask_pshuf_d_256(<8 x i32> %x0, i32 %x1, <8 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pshuf_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpshufd $3, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x70,0xd0,0x03]
; CHECK-NEXT: ## ymm2 = ymm0[3,0,0,0,7,4,4,4]
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
@@ -1061,7 +1061,7 @@ define <8 x i32>@test_int_x86_avx512_mask_pshuf_d_256(<8 x i32> %x0, i32 %x1, <8
define i8 @test_pcmpeq_d_256(<8 x i32> %a, <8 x i32> %b) {
; CHECK-LABEL: test_pcmpeq_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x28,0x76,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
@@ -1072,7 +1072,7 @@ define i8 @test_pcmpeq_d_256(<8 x i32> %a, <8 x i32> %b) {
define i8 @test_mask_pcmpeq_d_256(<8 x i32> %a, <8 x i32> %b, i8 %mask) {
; CHECK-LABEL: test_mask_pcmpeq_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x76,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
@@ -1086,7 +1086,7 @@ declare i8 @llvm.x86.avx512.mask.pcmpeq.d.256(<8 x i32>, <8 x i32>, i8)
define i8 @test_pcmpeq_q_256(<4 x i64> %a, <4 x i64> %b) {
; CHECK-LABEL: test_pcmpeq_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x28,0x29,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
@@ -1097,7 +1097,7 @@ define i8 @test_pcmpeq_q_256(<4 x i64> %a, <4 x i64> %b) {
define i8 @test_mask_pcmpeq_q_256(<4 x i64> %a, <4 x i64> %b, i8 %mask) {
; CHECK-LABEL: test_mask_pcmpeq_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x29,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
@@ -1111,7 +1111,7 @@ declare i8 @llvm.x86.avx512.mask.pcmpeq.q.256(<4 x i64>, <4 x i64>, i8)
define i8 @test_pcmpgt_d_256(<8 x i32> %a, <8 x i32> %b) {
; CHECK-LABEL: test_pcmpgt_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtd %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x28,0x66,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
@@ -1122,7 +1122,7 @@ define i8 @test_pcmpgt_d_256(<8 x i32> %a, <8 x i32> %b) {
define i8 @test_mask_pcmpgt_d_256(<8 x i32> %a, <8 x i32> %b, i8 %mask) {
; CHECK-LABEL: test_mask_pcmpgt_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpgtd %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x66,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
@@ -1136,7 +1136,7 @@ declare i8 @llvm.x86.avx512.mask.pcmpgt.d.256(<8 x i32>, <8 x i32>, i8)
define i8 @test_pcmpgt_q_256(<4 x i64> %a, <4 x i64> %b) {
; CHECK-LABEL: test_pcmpgt_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x28,0x37,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
@@ -1147,7 +1147,7 @@ define i8 @test_pcmpgt_q_256(<4 x i64> %a, <4 x i64> %b) {
define i8 @test_mask_pcmpgt_q_256(<4 x i64> %a, <4 x i64> %b, i8 %mask) {
; CHECK-LABEL: test_mask_pcmpgt_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x37,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
@@ -1161,7 +1161,7 @@ declare i8 @llvm.x86.avx512.mask.pcmpgt.q.256(<4 x i64>, <4 x i64>, i8)
define i8 @test_pcmpeq_d_128(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test_pcmpeq_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x76,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
@@ -1172,7 +1172,7 @@ define i8 @test_pcmpeq_d_128(<4 x i32> %a, <4 x i32> %b) {
define i8 @test_mask_pcmpeq_d_128(<4 x i32> %a, <4 x i32> %b, i8 %mask) {
; CHECK-LABEL: test_mask_pcmpeq_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x76,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
@@ -1186,7 +1186,7 @@ declare i8 @llvm.x86.avx512.mask.pcmpeq.d.128(<4 x i32>, <4 x i32>, i8)
define i8 @test_pcmpeq_q_128(<2 x i64> %a, <2 x i64> %b) {
; CHECK-LABEL: test_pcmpeq_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x08,0x29,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
@@ -1197,7 +1197,7 @@ define i8 @test_pcmpeq_q_128(<2 x i64> %a, <2 x i64> %b) {
define i8 @test_mask_pcmpeq_q_128(<2 x i64> %a, <2 x i64> %b, i8 %mask) {
; CHECK-LABEL: test_mask_pcmpeq_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x29,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
@@ -1211,7 +1211,7 @@ declare i8 @llvm.x86.avx512.mask.pcmpeq.q.128(<2 x i64>, <2 x i64>, i8)
define i8 @test_pcmpgt_d_128(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test_pcmpgt_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x66,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
@@ -1222,7 +1222,7 @@ define i8 @test_pcmpgt_d_128(<4 x i32> %a, <4 x i32> %b) {
define i8 @test_mask_pcmpgt_d_128(<4 x i32> %a, <4 x i32> %b, i8 %mask) {
; CHECK-LABEL: test_mask_pcmpgt_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x66,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
@@ -1236,7 +1236,7 @@ declare i8 @llvm.x86.avx512.mask.pcmpgt.d.128(<4 x i32>, <4 x i32>, i8)
define i8 @test_pcmpgt_q_128(<2 x i64> %a, <2 x i64> %b) {
; CHECK-LABEL: test_pcmpgt_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x08,0x37,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
@@ -1247,7 +1247,7 @@ define i8 @test_pcmpgt_q_128(<2 x i64> %a, <2 x i64> %b) {
define i8 @test_mask_pcmpgt_q_128(<2 x i64> %a, <2 x i64> %b, i8 %mask) {
; CHECK-LABEL: test_mask_pcmpgt_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x37,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
@@ -1263,7 +1263,7 @@ declare <2 x double> @llvm.x86.avx512.mask.unpckh.pd.128(<2 x double>, <2 x doub
define <2 x double>@test_int_x86_avx512_mask_unpckh_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_unpckh_pd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vunpckhpd %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x15,0xd9]
; CHECK-NEXT: ## xmm3 = xmm0[1],xmm1[1]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -1281,7 +1281,7 @@ declare <4 x double> @llvm.x86.avx512.mask.unpckh.pd.256(<4 x double>, <4 x doub
define <4 x double>@test_int_x86_avx512_mask_unpckh_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_unpckh_pd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vunpckhpd %ymm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x15,0xd9]
; CHECK-NEXT: ## ymm3 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -1299,7 +1299,7 @@ declare <4 x float> @llvm.x86.avx512.mask.unpckh.ps.128(<4 x float>, <4 x float>
define <4 x float>@test_int_x86_avx512_mask_unpckh_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_unpckh_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vunpckhps %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x15,0xd9]
; CHECK-NEXT: ## xmm3 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -1317,7 +1317,7 @@ declare <8 x float> @llvm.x86.avx512.mask.unpckh.ps.256(<8 x float>, <8 x float>
define <8 x float>@test_int_x86_avx512_mask_unpckh_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_unpckh_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vunpckhps %ymm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x15,0xd9]
; CHECK-NEXT: ## ymm3 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -1335,7 +1335,7 @@ declare <2 x double> @llvm.x86.avx512.mask.unpckl.pd.128(<2 x double>, <2 x doub
define <2 x double>@test_int_x86_avx512_mask_unpckl_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_unpckl_pd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vunpcklpd %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x14,0xd9]
; CHECK-NEXT: ## xmm3 = xmm0[0],xmm1[0]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -1353,7 +1353,7 @@ declare <4 x double> @llvm.x86.avx512.mask.unpckl.pd.256(<4 x double>, <4 x doub
define <4 x double>@test_int_x86_avx512_mask_unpckl_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_unpckl_pd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vunpcklpd %ymm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x14,0xd9]
; CHECK-NEXT: ## ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -1371,7 +1371,7 @@ declare <4 x float> @llvm.x86.avx512.mask.unpckl.ps.128(<4 x float>, <4 x float>
define <4 x float>@test_int_x86_avx512_mask_unpckl_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_unpckl_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vunpcklps %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x14,0xd9]
; CHECK-NEXT: ## xmm3 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -1389,7 +1389,7 @@ declare <8 x float> @llvm.x86.avx512.mask.unpckl.ps.256(<8 x float>, <8 x float>
define <8 x float>@test_int_x86_avx512_mask_unpckl_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_unpckl_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vunpcklps %ymm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x14,0xd9]
; CHECK-NEXT: ## ymm3 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -1407,7 +1407,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.punpckhd.q.128(<4 x i32>, <4 x i32>, <4
define <4 x i32>@test_int_x86_avx512_mask_punpckhd_q_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_punpckhd_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpunpckhdq %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6a,0xd9]
; CHECK-NEXT: ## xmm3 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -1425,7 +1425,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.punpckld.q.128(<4 x i32>, <4 x i32>, <4
define <4 x i32>@test_int_x86_avx512_mask_punpckld_q_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_punpckld_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpunpckldq %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x62,0xd9]
; CHECK-NEXT: ## xmm3 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -1443,7 +1443,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.punpckhd.q.256(<8 x i32>, <8 x i32>, <8
define <8 x i32>@test_int_x86_avx512_mask_punpckhd_q_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_punpckhd_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpunpckhdq %ymm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6a,0xd9]
; CHECK-NEXT: ## ymm3 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -1461,7 +1461,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.punpckld.q.256(<8 x i32>, <8 x i32>, <8
define <8 x i32>@test_int_x86_avx512_mask_punpckld_q_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_punpckld_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpunpckldq %ymm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x62,0xd9]
; CHECK-NEXT: ## ymm3 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -1479,7 +1479,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.punpckhqd.q.128(<2 x i64>, <2 x i64>, <2
define <2 x i64>@test_int_x86_avx512_mask_punpckhqd_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_punpckhqd_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpunpckhqdq %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6d,0xd9]
; CHECK-NEXT: ## xmm3 = xmm0[1],xmm1[1]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -1497,7 +1497,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.punpcklqd.q.128(<2 x i64>, <2 x i64>, <2
define <2 x i64>@test_int_x86_avx512_mask_punpcklqd_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_punpcklqd_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpunpcklqdq %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6c,0xd9]
; CHECK-NEXT: ## xmm3 = xmm0[0],xmm1[0]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -1515,7 +1515,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.punpcklqd.q.256(<4 x i64>, <4 x i64>, <4
define <4 x i64>@test_int_x86_avx512_mask_punpcklqd_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_punpcklqd_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpunpcklqdq %ymm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6c,0xd9]
; CHECK-NEXT: ## ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -1533,7 +1533,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.punpckhqd.q.256(<4 x i64>, <4 x i64>, <4
define <4 x i64>@test_int_x86_avx512_mask_punpckhqd_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_punpckhqd_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpunpckhqdq %ymm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6d,0xd9]
; CHECK-NEXT: ## ymm3 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -1549,7 +1549,7 @@ define <4 x i64>@test_int_x86_avx512_mask_punpckhqd_q_256(<4 x i64> %x0, <4 x i6
define <4 x i32> @test_mask_and_epi32_rr_128(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test_mask_and_epi32_rr_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpand %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdb,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx512.mask.pand.d.128(<4 x i32> %a, <4 x i32> %b, <4 x i32> zeroinitializer, i8 -1)
@@ -1558,7 +1558,7 @@ define <4 x i32> @test_mask_and_epi32_rr_128(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @test_mask_and_epi32_rrk_128(<4 x i32> %a, <4 x i32> %b, <4 x i32> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_and_epi32_rrk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpandd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xdb,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
@@ -1569,7 +1569,7 @@ define <4 x i32> @test_mask_and_epi32_rrk_128(<4 x i32> %a, <4 x i32> %b, <4 x i
define <4 x i32> @test_mask_and_epi32_rrkz_128(<4 x i32> %a, <4 x i32> %b, i8 %mask) {
; CHECK-LABEL: test_mask_and_epi32_rrkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpandd %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xdb,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1579,7 +1579,7 @@ define <4 x i32> @test_mask_and_epi32_rrkz_128(<4 x i32> %a, <4 x i32> %b, i8 %m
define <4 x i32> @test_mask_and_epi32_rm_128(<4 x i32> %a, <4 x i32>* %ptr_b) {
; CHECK-LABEL: test_mask_and_epi32_rm_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpand (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdb,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <4 x i32>, <4 x i32>* %ptr_b
@@ -1589,7 +1589,7 @@ define <4 x i32> @test_mask_and_epi32_rm_128(<4 x i32> %a, <4 x i32>* %ptr_b) {
define <4 x i32> @test_mask_and_epi32_rmk_128(<4 x i32> %a, <4 x i32>* %ptr_b, <4 x i32> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_and_epi32_rmk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpandd (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xdb,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
@@ -1601,7 +1601,7 @@ define <4 x i32> @test_mask_and_epi32_rmk_128(<4 x i32> %a, <4 x i32>* %ptr_b, <
define <4 x i32> @test_mask_and_epi32_rmkz_128(<4 x i32> %a, <4 x i32>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_and_epi32_rmkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpandd (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xdb,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1612,7 +1612,7 @@ define <4 x i32> @test_mask_and_epi32_rmkz_128(<4 x i32> %a, <4 x i32>* %ptr_b,
define <4 x i32> @test_mask_and_epi32_rmb_128(<4 x i32> %a, i32* %ptr_b) {
; CHECK-LABEL: test_mask_and_epi32_rmb_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpandd (%rdi){1to4}, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7d,0x18,0xdb,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load i32, i32* %ptr_b
@@ -1624,7 +1624,7 @@ define <4 x i32> @test_mask_and_epi32_rmb_128(<4 x i32> %a, i32* %ptr_b) {
define <4 x i32> @test_mask_and_epi32_rmbk_128(<4 x i32> %a, i32* %ptr_b, <4 x i32> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_and_epi32_rmbk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpandd (%rdi){1to4}, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x19,0xdb,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
@@ -1638,7 +1638,7 @@ define <4 x i32> @test_mask_and_epi32_rmbk_128(<4 x i32> %a, i32* %ptr_b, <4 x i
define <4 x i32> @test_mask_and_epi32_rmbkz_128(<4 x i32> %a, i32* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_and_epi32_rmbkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpandd (%rdi){1to4}, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x99,0xdb,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1653,7 +1653,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.pand.d.128(<4 x i32>, <4 x i32>, <4 x i3
define <8 x i32> @test_mask_and_epi32_rr_256(<8 x i32> %a, <8 x i32> %b) {
; CHECK-LABEL: test_mask_and_epi32_rr_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpand %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xdb,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx512.mask.pand.d.256(<8 x i32> %a, <8 x i32> %b, <8 x i32> zeroinitializer, i8 -1)
@@ -1662,7 +1662,7 @@ define <8 x i32> @test_mask_and_epi32_rr_256(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @test_mask_and_epi32_rrk_256(<8 x i32> %a, <8 x i32> %b, <8 x i32> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_and_epi32_rrk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpandd %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xdb,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
@@ -1673,7 +1673,7 @@ define <8 x i32> @test_mask_and_epi32_rrk_256(<8 x i32> %a, <8 x i32> %b, <8 x i
define <8 x i32> @test_mask_and_epi32_rrkz_256(<8 x i32> %a, <8 x i32> %b, i8 %mask) {
; CHECK-LABEL: test_mask_and_epi32_rrkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpandd %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xdb,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1683,7 +1683,7 @@ define <8 x i32> @test_mask_and_epi32_rrkz_256(<8 x i32> %a, <8 x i32> %b, i8 %m
define <8 x i32> @test_mask_and_epi32_rm_256(<8 x i32> %a, <8 x i32>* %ptr_b) {
; CHECK-LABEL: test_mask_and_epi32_rm_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpand (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xdb,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i32>, <8 x i32>* %ptr_b
@@ -1693,7 +1693,7 @@ define <8 x i32> @test_mask_and_epi32_rm_256(<8 x i32> %a, <8 x i32>* %ptr_b) {
define <8 x i32> @test_mask_and_epi32_rmk_256(<8 x i32> %a, <8 x i32>* %ptr_b, <8 x i32> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_and_epi32_rmk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpandd (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xdb,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -1705,7 +1705,7 @@ define <8 x i32> @test_mask_and_epi32_rmk_256(<8 x i32> %a, <8 x i32>* %ptr_b, <
define <8 x i32> @test_mask_and_epi32_rmkz_256(<8 x i32> %a, <8 x i32>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_and_epi32_rmkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpandd (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xdb,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1716,7 +1716,7 @@ define <8 x i32> @test_mask_and_epi32_rmkz_256(<8 x i32> %a, <8 x i32>* %ptr_b,
define <8 x i32> @test_mask_and_epi32_rmb_256(<8 x i32> %a, i32* %ptr_b) {
; CHECK-LABEL: test_mask_and_epi32_rmb_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpandd (%rdi){1to8}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7d,0x38,0xdb,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load i32, i32* %ptr_b
@@ -1728,7 +1728,7 @@ define <8 x i32> @test_mask_and_epi32_rmb_256(<8 x i32> %a, i32* %ptr_b) {
define <8 x i32> @test_mask_and_epi32_rmbk_256(<8 x i32> %a, i32* %ptr_b, <8 x i32> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_and_epi32_rmbk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpandd (%rdi){1to8}, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x39,0xdb,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -1742,7 +1742,7 @@ define <8 x i32> @test_mask_and_epi32_rmbk_256(<8 x i32> %a, i32* %ptr_b, <8 x i
define <8 x i32> @test_mask_and_epi32_rmbkz_256(<8 x i32> %a, i32* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_and_epi32_rmbkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpandd (%rdi){1to8}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xb9,0xdb,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1757,7 +1757,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.pand.d.256(<8 x i32>, <8 x i32>, <8 x i3
define <4 x i32> @test_mask_or_epi32_rr_128(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test_mask_or_epi32_rr_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpor %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xeb,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx512.mask.por.d.128(<4 x i32> %a, <4 x i32> %b, <4 x i32> zeroinitializer, i8 -1)
@@ -1766,7 +1766,7 @@ define <4 x i32> @test_mask_or_epi32_rr_128(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @test_mask_or_epi32_rrk_128(<4 x i32> %a, <4 x i32> %b, <4 x i32> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_or_epi32_rrk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpord %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xeb,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
@@ -1777,7 +1777,7 @@ define <4 x i32> @test_mask_or_epi32_rrk_128(<4 x i32> %a, <4 x i32> %b, <4 x i3
define <4 x i32> @test_mask_or_epi32_rrkz_128(<4 x i32> %a, <4 x i32> %b, i8 %mask) {
; CHECK-LABEL: test_mask_or_epi32_rrkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpord %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xeb,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1787,7 +1787,7 @@ define <4 x i32> @test_mask_or_epi32_rrkz_128(<4 x i32> %a, <4 x i32> %b, i8 %ma
define <4 x i32> @test_mask_or_epi32_rm_128(<4 x i32> %a, <4 x i32>* %ptr_b) {
; CHECK-LABEL: test_mask_or_epi32_rm_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpor (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xeb,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <4 x i32>, <4 x i32>* %ptr_b
@@ -1797,7 +1797,7 @@ define <4 x i32> @test_mask_or_epi32_rm_128(<4 x i32> %a, <4 x i32>* %ptr_b) {
define <4 x i32> @test_mask_or_epi32_rmk_128(<4 x i32> %a, <4 x i32>* %ptr_b, <4 x i32> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_or_epi32_rmk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpord (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xeb,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
@@ -1809,7 +1809,7 @@ define <4 x i32> @test_mask_or_epi32_rmk_128(<4 x i32> %a, <4 x i32>* %ptr_b, <4
define <4 x i32> @test_mask_or_epi32_rmkz_128(<4 x i32> %a, <4 x i32>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_or_epi32_rmkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpord (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xeb,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1820,7 +1820,7 @@ define <4 x i32> @test_mask_or_epi32_rmkz_128(<4 x i32> %a, <4 x i32>* %ptr_b, i
define <4 x i32> @test_mask_or_epi32_rmb_128(<4 x i32> %a, i32* %ptr_b) {
; CHECK-LABEL: test_mask_or_epi32_rmb_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpord (%rdi){1to4}, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7d,0x18,0xeb,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load i32, i32* %ptr_b
@@ -1832,7 +1832,7 @@ define <4 x i32> @test_mask_or_epi32_rmb_128(<4 x i32> %a, i32* %ptr_b) {
define <4 x i32> @test_mask_or_epi32_rmbk_128(<4 x i32> %a, i32* %ptr_b, <4 x i32> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_or_epi32_rmbk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpord (%rdi){1to4}, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x19,0xeb,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
@@ -1846,7 +1846,7 @@ define <4 x i32> @test_mask_or_epi32_rmbk_128(<4 x i32> %a, i32* %ptr_b, <4 x i3
define <4 x i32> @test_mask_or_epi32_rmbkz_128(<4 x i32> %a, i32* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_or_epi32_rmbkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpord (%rdi){1to4}, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x99,0xeb,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1861,7 +1861,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.por.d.128(<4 x i32>, <4 x i32>, <4 x i32
define <8 x i32> @test_mask_or_epi32_rr_256(<8 x i32> %a, <8 x i32> %b) {
; CHECK-LABEL: test_mask_or_epi32_rr_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpor %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xeb,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx512.mask.por.d.256(<8 x i32> %a, <8 x i32> %b, <8 x i32> zeroinitializer, i8 -1)
@@ -1870,7 +1870,7 @@ define <8 x i32> @test_mask_or_epi32_rr_256(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @test_mask_or_epi32_rrk_256(<8 x i32> %a, <8 x i32> %b, <8 x i32> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_or_epi32_rrk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpord %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xeb,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
@@ -1881,7 +1881,7 @@ define <8 x i32> @test_mask_or_epi32_rrk_256(<8 x i32> %a, <8 x i32> %b, <8 x i3
define <8 x i32> @test_mask_or_epi32_rrkz_256(<8 x i32> %a, <8 x i32> %b, i8 %mask) {
; CHECK-LABEL: test_mask_or_epi32_rrkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpord %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xeb,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1891,7 +1891,7 @@ define <8 x i32> @test_mask_or_epi32_rrkz_256(<8 x i32> %a, <8 x i32> %b, i8 %ma
define <8 x i32> @test_mask_or_epi32_rm_256(<8 x i32> %a, <8 x i32>* %ptr_b) {
; CHECK-LABEL: test_mask_or_epi32_rm_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpor (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xeb,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i32>, <8 x i32>* %ptr_b
@@ -1901,7 +1901,7 @@ define <8 x i32> @test_mask_or_epi32_rm_256(<8 x i32> %a, <8 x i32>* %ptr_b) {
define <8 x i32> @test_mask_or_epi32_rmk_256(<8 x i32> %a, <8 x i32>* %ptr_b, <8 x i32> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_or_epi32_rmk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpord (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xeb,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -1913,7 +1913,7 @@ define <8 x i32> @test_mask_or_epi32_rmk_256(<8 x i32> %a, <8 x i32>* %ptr_b, <8
define <8 x i32> @test_mask_or_epi32_rmkz_256(<8 x i32> %a, <8 x i32>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_or_epi32_rmkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpord (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xeb,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1924,7 +1924,7 @@ define <8 x i32> @test_mask_or_epi32_rmkz_256(<8 x i32> %a, <8 x i32>* %ptr_b, i
define <8 x i32> @test_mask_or_epi32_rmb_256(<8 x i32> %a, i32* %ptr_b) {
; CHECK-LABEL: test_mask_or_epi32_rmb_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpord (%rdi){1to8}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7d,0x38,0xeb,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load i32, i32* %ptr_b
@@ -1936,7 +1936,7 @@ define <8 x i32> @test_mask_or_epi32_rmb_256(<8 x i32> %a, i32* %ptr_b) {
define <8 x i32> @test_mask_or_epi32_rmbk_256(<8 x i32> %a, i32* %ptr_b, <8 x i32> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_or_epi32_rmbk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpord (%rdi){1to8}, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x39,0xeb,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -1950,7 +1950,7 @@ define <8 x i32> @test_mask_or_epi32_rmbk_256(<8 x i32> %a, i32* %ptr_b, <8 x i3
define <8 x i32> @test_mask_or_epi32_rmbkz_256(<8 x i32> %a, i32* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_or_epi32_rmbkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpord (%rdi){1to8}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xb9,0xeb,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1965,7 +1965,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.por.d.256(<8 x i32>, <8 x i32>, <8 x i32
define <4 x i32> @test_mask_xor_epi32_rr_128(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test_mask_xor_epi32_rr_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xef,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx512.mask.pxor.d.128(<4 x i32> %a, <4 x i32> %b, <4 x i32> zeroinitializer, i8 -1)
@@ -1974,7 +1974,7 @@ define <4 x i32> @test_mask_xor_epi32_rr_128(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @test_mask_xor_epi32_rrk_128(<4 x i32> %a, <4 x i32> %b, <4 x i32> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_xor_epi32_rrk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpxord %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xef,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
@@ -1985,7 +1985,7 @@ define <4 x i32> @test_mask_xor_epi32_rrk_128(<4 x i32> %a, <4 x i32> %b, <4 x i
define <4 x i32> @test_mask_xor_epi32_rrkz_128(<4 x i32> %a, <4 x i32> %b, i8 %mask) {
; CHECK-LABEL: test_mask_xor_epi32_rrkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpxord %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xef,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1995,7 +1995,7 @@ define <4 x i32> @test_mask_xor_epi32_rrkz_128(<4 x i32> %a, <4 x i32> %b, i8 %m
define <4 x i32> @test_mask_xor_epi32_rm_128(<4 x i32> %a, <4 x i32>* %ptr_b) {
; CHECK-LABEL: test_mask_xor_epi32_rm_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xef,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <4 x i32>, <4 x i32>* %ptr_b
@@ -2005,7 +2005,7 @@ define <4 x i32> @test_mask_xor_epi32_rm_128(<4 x i32> %a, <4 x i32>* %ptr_b) {
define <4 x i32> @test_mask_xor_epi32_rmk_128(<4 x i32> %a, <4 x i32>* %ptr_b, <4 x i32> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_xor_epi32_rmk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpxord (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xef,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
@@ -2017,7 +2017,7 @@ define <4 x i32> @test_mask_xor_epi32_rmk_128(<4 x i32> %a, <4 x i32>* %ptr_b, <
define <4 x i32> @test_mask_xor_epi32_rmkz_128(<4 x i32> %a, <4 x i32>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_xor_epi32_rmkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpxord (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xef,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2028,7 +2028,7 @@ define <4 x i32> @test_mask_xor_epi32_rmkz_128(<4 x i32> %a, <4 x i32>* %ptr_b,
define <4 x i32> @test_mask_xor_epi32_rmb_128(<4 x i32> %a, i32* %ptr_b) {
; CHECK-LABEL: test_mask_xor_epi32_rmb_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxord (%rdi){1to4}, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7d,0x18,0xef,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load i32, i32* %ptr_b
@@ -2040,7 +2040,7 @@ define <4 x i32> @test_mask_xor_epi32_rmb_128(<4 x i32> %a, i32* %ptr_b) {
define <4 x i32> @test_mask_xor_epi32_rmbk_128(<4 x i32> %a, i32* %ptr_b, <4 x i32> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_xor_epi32_rmbk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpxord (%rdi){1to4}, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x19,0xef,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
@@ -2054,7 +2054,7 @@ define <4 x i32> @test_mask_xor_epi32_rmbk_128(<4 x i32> %a, i32* %ptr_b, <4 x i
define <4 x i32> @test_mask_xor_epi32_rmbkz_128(<4 x i32> %a, i32* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_xor_epi32_rmbkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpxord (%rdi){1to4}, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x99,0xef,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2069,7 +2069,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.pxor.d.128(<4 x i32>, <4 x i32>, <4 x i3
define <8 x i32> @test_mask_xor_epi32_rr_256(<8 x i32> %a, <8 x i32> %b) {
; CHECK-LABEL: test_mask_xor_epi32_rr_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xef,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx512.mask.pxor.d.256(<8 x i32> %a, <8 x i32> %b, <8 x i32> zeroinitializer, i8 -1)
@@ -2078,7 +2078,7 @@ define <8 x i32> @test_mask_xor_epi32_rr_256(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @test_mask_xor_epi32_rrk_256(<8 x i32> %a, <8 x i32> %b, <8 x i32> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_xor_epi32_rrk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpxord %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xef,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
@@ -2089,7 +2089,7 @@ define <8 x i32> @test_mask_xor_epi32_rrk_256(<8 x i32> %a, <8 x i32> %b, <8 x i
define <8 x i32> @test_mask_xor_epi32_rrkz_256(<8 x i32> %a, <8 x i32> %b, i8 %mask) {
; CHECK-LABEL: test_mask_xor_epi32_rrkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpxord %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xef,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2099,7 +2099,7 @@ define <8 x i32> @test_mask_xor_epi32_rrkz_256(<8 x i32> %a, <8 x i32> %b, i8 %m
define <8 x i32> @test_mask_xor_epi32_rm_256(<8 x i32> %a, <8 x i32>* %ptr_b) {
; CHECK-LABEL: test_mask_xor_epi32_rm_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xef,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i32>, <8 x i32>* %ptr_b
@@ -2109,7 +2109,7 @@ define <8 x i32> @test_mask_xor_epi32_rm_256(<8 x i32> %a, <8 x i32>* %ptr_b) {
define <8 x i32> @test_mask_xor_epi32_rmk_256(<8 x i32> %a, <8 x i32>* %ptr_b, <8 x i32> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_xor_epi32_rmk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpxord (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xef,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -2121,7 +2121,7 @@ define <8 x i32> @test_mask_xor_epi32_rmk_256(<8 x i32> %a, <8 x i32>* %ptr_b, <
define <8 x i32> @test_mask_xor_epi32_rmkz_256(<8 x i32> %a, <8 x i32>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_xor_epi32_rmkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpxord (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xef,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2132,7 +2132,7 @@ define <8 x i32> @test_mask_xor_epi32_rmkz_256(<8 x i32> %a, <8 x i32>* %ptr_b,
define <8 x i32> @test_mask_xor_epi32_rmb_256(<8 x i32> %a, i32* %ptr_b) {
; CHECK-LABEL: test_mask_xor_epi32_rmb_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxord (%rdi){1to8}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7d,0x38,0xef,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load i32, i32* %ptr_b
@@ -2144,7 +2144,7 @@ define <8 x i32> @test_mask_xor_epi32_rmb_256(<8 x i32> %a, i32* %ptr_b) {
define <8 x i32> @test_mask_xor_epi32_rmbk_256(<8 x i32> %a, i32* %ptr_b, <8 x i32> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_xor_epi32_rmbk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpxord (%rdi){1to8}, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x39,0xef,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -2158,7 +2158,7 @@ define <8 x i32> @test_mask_xor_epi32_rmbk_256(<8 x i32> %a, i32* %ptr_b, <8 x i
define <8 x i32> @test_mask_xor_epi32_rmbkz_256(<8 x i32> %a, i32* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_xor_epi32_rmbkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpxord (%rdi){1to8}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xb9,0xef,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2173,7 +2173,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.pxor.d.256(<8 x i32>, <8 x i32>, <8 x i3
define <4 x i32> @test_mask_andnot_epi32_rr_128(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test_mask_andnot_epi32_rr_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpandn %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdf,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx512.mask.pandn.d.128(<4 x i32> %a, <4 x i32> %b, <4 x i32> zeroinitializer, i8 -1)
@@ -2182,7 +2182,7 @@ define <4 x i32> @test_mask_andnot_epi32_rr_128(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @test_mask_andnot_epi32_rrk_128(<4 x i32> %a, <4 x i32> %b, <4 x i32> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_epi32_rrk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpandnd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xdf,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
@@ -2193,7 +2193,7 @@ define <4 x i32> @test_mask_andnot_epi32_rrk_128(<4 x i32> %a, <4 x i32> %b, <4
define <4 x i32> @test_mask_andnot_epi32_rrkz_128(<4 x i32> %a, <4 x i32> %b, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_epi32_rrkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpandnd %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xdf,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2203,7 +2203,7 @@ define <4 x i32> @test_mask_andnot_epi32_rrkz_128(<4 x i32> %a, <4 x i32> %b, i8
define <4 x i32> @test_mask_andnot_epi32_rm_128(<4 x i32> %a, <4 x i32>* %ptr_b) {
; CHECK-LABEL: test_mask_andnot_epi32_rm_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpandn (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdf,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <4 x i32>, <4 x i32>* %ptr_b
@@ -2213,7 +2213,7 @@ define <4 x i32> @test_mask_andnot_epi32_rm_128(<4 x i32> %a, <4 x i32>* %ptr_b)
define <4 x i32> @test_mask_andnot_epi32_rmk_128(<4 x i32> %a, <4 x i32>* %ptr_b, <4 x i32> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_epi32_rmk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpandnd (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xdf,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
@@ -2225,7 +2225,7 @@ define <4 x i32> @test_mask_andnot_epi32_rmk_128(<4 x i32> %a, <4 x i32>* %ptr_b
define <4 x i32> @test_mask_andnot_epi32_rmkz_128(<4 x i32> %a, <4 x i32>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_epi32_rmkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpandnd (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xdf,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2236,7 +2236,7 @@ define <4 x i32> @test_mask_andnot_epi32_rmkz_128(<4 x i32> %a, <4 x i32>* %ptr_
define <4 x i32> @test_mask_andnot_epi32_rmb_128(<4 x i32> %a, i32* %ptr_b) {
; CHECK-LABEL: test_mask_andnot_epi32_rmb_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpandnd (%rdi){1to4}, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7d,0x18,0xdf,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load i32, i32* %ptr_b
@@ -2248,7 +2248,7 @@ define <4 x i32> @test_mask_andnot_epi32_rmb_128(<4 x i32> %a, i32* %ptr_b) {
define <4 x i32> @test_mask_andnot_epi32_rmbk_128(<4 x i32> %a, i32* %ptr_b, <4 x i32> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_epi32_rmbk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpandnd (%rdi){1to4}, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x19,0xdf,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
@@ -2262,7 +2262,7 @@ define <4 x i32> @test_mask_andnot_epi32_rmbk_128(<4 x i32> %a, i32* %ptr_b, <4
define <4 x i32> @test_mask_andnot_epi32_rmbkz_128(<4 x i32> %a, i32* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_epi32_rmbkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpandnd (%rdi){1to4}, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x99,0xdf,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2277,7 +2277,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.pandn.d.128(<4 x i32>, <4 x i32>, <4 x i
define <8 x i32> @test_mask_andnot_epi32_rr_256(<8 x i32> %a, <8 x i32> %b) {
; CHECK-LABEL: test_mask_andnot_epi32_rr_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpandn %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xdf,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx512.mask.pandn.d.256(<8 x i32> %a, <8 x i32> %b, <8 x i32> zeroinitializer, i8 -1)
@@ -2286,7 +2286,7 @@ define <8 x i32> @test_mask_andnot_epi32_rr_256(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @test_mask_andnot_epi32_rrk_256(<8 x i32> %a, <8 x i32> %b, <8 x i32> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_epi32_rrk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpandnd %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xdf,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
@@ -2297,7 +2297,7 @@ define <8 x i32> @test_mask_andnot_epi32_rrk_256(<8 x i32> %a, <8 x i32> %b, <8
define <8 x i32> @test_mask_andnot_epi32_rrkz_256(<8 x i32> %a, <8 x i32> %b, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_epi32_rrkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpandnd %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xdf,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2307,7 +2307,7 @@ define <8 x i32> @test_mask_andnot_epi32_rrkz_256(<8 x i32> %a, <8 x i32> %b, i8
define <8 x i32> @test_mask_andnot_epi32_rm_256(<8 x i32> %a, <8 x i32>* %ptr_b) {
; CHECK-LABEL: test_mask_andnot_epi32_rm_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpandn (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xdf,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i32>, <8 x i32>* %ptr_b
@@ -2317,7 +2317,7 @@ define <8 x i32> @test_mask_andnot_epi32_rm_256(<8 x i32> %a, <8 x i32>* %ptr_b)
define <8 x i32> @test_mask_andnot_epi32_rmk_256(<8 x i32> %a, <8 x i32>* %ptr_b, <8 x i32> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_epi32_rmk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpandnd (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xdf,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -2329,7 +2329,7 @@ define <8 x i32> @test_mask_andnot_epi32_rmk_256(<8 x i32> %a, <8 x i32>* %ptr_b
define <8 x i32> @test_mask_andnot_epi32_rmkz_256(<8 x i32> %a, <8 x i32>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_epi32_rmkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpandnd (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xdf,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2340,7 +2340,7 @@ define <8 x i32> @test_mask_andnot_epi32_rmkz_256(<8 x i32> %a, <8 x i32>* %ptr_
define <8 x i32> @test_mask_andnot_epi32_rmb_256(<8 x i32> %a, i32* %ptr_b) {
; CHECK-LABEL: test_mask_andnot_epi32_rmb_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpandnd (%rdi){1to8}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7d,0x38,0xdf,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load i32, i32* %ptr_b
@@ -2352,7 +2352,7 @@ define <8 x i32> @test_mask_andnot_epi32_rmb_256(<8 x i32> %a, i32* %ptr_b) {
define <8 x i32> @test_mask_andnot_epi32_rmbk_256(<8 x i32> %a, i32* %ptr_b, <8 x i32> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_epi32_rmbk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpandnd (%rdi){1to8}, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x39,0xdf,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -2366,7 +2366,7 @@ define <8 x i32> @test_mask_andnot_epi32_rmbk_256(<8 x i32> %a, i32* %ptr_b, <8
define <8 x i32> @test_mask_andnot_epi32_rmbkz_256(<8 x i32> %a, i32* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_epi32_rmbkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpandnd (%rdi){1to8}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xb9,0xdf,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2381,7 +2381,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.pandn.d.256(<8 x i32>, <8 x i32>, <8 x i
define <2 x i64> @test_mask_andnot_epi64_rr_128(<2 x i64> %a, <2 x i64> %b) {
; CHECK-LABEL: test_mask_andnot_epi64_rr_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpandn %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdf,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.avx512.mask.pandn.q.128(<2 x i64> %a, <2 x i64> %b, <2 x i64> zeroinitializer, i8 -1)
@@ -2390,7 +2390,7 @@ define <2 x i64> @test_mask_andnot_epi64_rr_128(<2 x i64> %a, <2 x i64> %b) {
define <2 x i64> @test_mask_andnot_epi64_rrk_128(<2 x i64> %a, <2 x i64> %b, <2 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_epi64_rrk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpandnq %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0xfd,0x09,0xdf,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
@@ -2401,7 +2401,7 @@ define <2 x i64> @test_mask_andnot_epi64_rrk_128(<2 x i64> %a, <2 x i64> %b, <2
define <2 x i64> @test_mask_andnot_epi64_rrkz_128(<2 x i64> %a, <2 x i64> %b, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_epi64_rrkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpandnq %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x89,0xdf,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2411,7 +2411,7 @@ define <2 x i64> @test_mask_andnot_epi64_rrkz_128(<2 x i64> %a, <2 x i64> %b, i8
define <2 x i64> @test_mask_andnot_epi64_rm_128(<2 x i64> %a, <2 x i64>* %ptr_b) {
; CHECK-LABEL: test_mask_andnot_epi64_rm_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpandn (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdf,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <2 x i64>, <2 x i64>* %ptr_b
@@ -2421,7 +2421,7 @@ define <2 x i64> @test_mask_andnot_epi64_rm_128(<2 x i64> %a, <2 x i64>* %ptr_b)
define <2 x i64> @test_mask_andnot_epi64_rmk_128(<2 x i64> %a, <2 x i64>* %ptr_b, <2 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_epi64_rmk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpandnq (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xfd,0x09,0xdf,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
@@ -2433,7 +2433,7 @@ define <2 x i64> @test_mask_andnot_epi64_rmk_128(<2 x i64> %a, <2 x i64>* %ptr_b
define <2 x i64> @test_mask_andnot_epi64_rmkz_128(<2 x i64> %a, <2 x i64>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_epi64_rmkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpandnq (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x89,0xdf,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2444,7 +2444,7 @@ define <2 x i64> @test_mask_andnot_epi64_rmkz_128(<2 x i64> %a, <2 x i64>* %ptr_
define <2 x i64> @test_mask_andnot_epi64_rmb_128(<2 x i64> %a, i64* %ptr_b) {
; CHECK-LABEL: test_mask_andnot_epi64_rmb_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpandnq (%rdi){1to2}, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfd,0x18,0xdf,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load i64, i64* %ptr_b
@@ -2456,7 +2456,7 @@ define <2 x i64> @test_mask_andnot_epi64_rmb_128(<2 x i64> %a, i64* %ptr_b) {
define <2 x i64> @test_mask_andnot_epi64_rmbk_128(<2 x i64> %a, i64* %ptr_b, <2 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_epi64_rmbk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpandnq (%rdi){1to2}, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xfd,0x19,0xdf,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
@@ -2470,7 +2470,7 @@ define <2 x i64> @test_mask_andnot_epi64_rmbk_128(<2 x i64> %a, i64* %ptr_b, <2
define <2 x i64> @test_mask_andnot_epi64_rmbkz_128(<2 x i64> %a, i64* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_epi64_rmbkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpandnq (%rdi){1to2}, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x99,0xdf,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2485,7 +2485,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.pandn.q.128(<2 x i64>, <2 x i64>, <2 x i
define <4 x i64> @test_mask_andnot_epi64_rr_256(<4 x i64> %a, <4 x i64> %b) {
; CHECK-LABEL: test_mask_andnot_epi64_rr_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpandn %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xdf,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx512.mask.pandn.q.256(<4 x i64> %a, <4 x i64> %b, <4 x i64> zeroinitializer, i8 -1)
@@ -2494,7 +2494,7 @@ define <4 x i64> @test_mask_andnot_epi64_rr_256(<4 x i64> %a, <4 x i64> %b) {
define <4 x i64> @test_mask_andnot_epi64_rrk_256(<4 x i64> %a, <4 x i64> %b, <4 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_epi64_rrk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpandnq %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0xfd,0x29,0xdf,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
@@ -2505,7 +2505,7 @@ define <4 x i64> @test_mask_andnot_epi64_rrk_256(<4 x i64> %a, <4 x i64> %b, <4
define <4 x i64> @test_mask_andnot_epi64_rrkz_256(<4 x i64> %a, <4 x i64> %b, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_epi64_rrkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpandnq %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xa9,0xdf,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2515,7 +2515,7 @@ define <4 x i64> @test_mask_andnot_epi64_rrkz_256(<4 x i64> %a, <4 x i64> %b, i8
define <4 x i64> @test_mask_andnot_epi64_rm_256(<4 x i64> %a, <4 x i64>* %ptr_b) {
; CHECK-LABEL: test_mask_andnot_epi64_rm_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpandn (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xdf,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <4 x i64>, <4 x i64>* %ptr_b
@@ -2525,7 +2525,7 @@ define <4 x i64> @test_mask_andnot_epi64_rm_256(<4 x i64> %a, <4 x i64>* %ptr_b)
define <4 x i64> @test_mask_andnot_epi64_rmk_256(<4 x i64> %a, <4 x i64>* %ptr_b, <4 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_epi64_rmk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpandnq (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0xfd,0x29,0xdf,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -2537,7 +2537,7 @@ define <4 x i64> @test_mask_andnot_epi64_rmk_256(<4 x i64> %a, <4 x i64>* %ptr_b
define <4 x i64> @test_mask_andnot_epi64_rmkz_256(<4 x i64> %a, <4 x i64>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_epi64_rmkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpandnq (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xa9,0xdf,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2548,7 +2548,7 @@ define <4 x i64> @test_mask_andnot_epi64_rmkz_256(<4 x i64> %a, <4 x i64>* %ptr_
define <4 x i64> @test_mask_andnot_epi64_rmb_256(<4 x i64> %a, i64* %ptr_b) {
; CHECK-LABEL: test_mask_andnot_epi64_rmb_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpandnq (%rdi){1to4}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfd,0x38,0xdf,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load i64, i64* %ptr_b
@@ -2560,7 +2560,7 @@ define <4 x i64> @test_mask_andnot_epi64_rmb_256(<4 x i64> %a, i64* %ptr_b) {
define <4 x i64> @test_mask_andnot_epi64_rmbk_256(<4 x i64> %a, i64* %ptr_b, <4 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_epi64_rmbk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpandnq (%rdi){1to4}, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0xfd,0x39,0xdf,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -2574,7 +2574,7 @@ define <4 x i64> @test_mask_andnot_epi64_rmbk_256(<4 x i64> %a, i64* %ptr_b, <4
define <4 x i64> @test_mask_andnot_epi64_rmbkz_256(<4 x i64> %a, i64* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_epi64_rmbkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpandnq (%rdi){1to4}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xb9,0xdf,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2589,7 +2589,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.pandn.q.256(<4 x i64>, <4 x i64>, <4 x i
define <4 x i32> @test_mask_add_epi32_rr_128(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test_mask_add_epi32_rr_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx512.mask.padd.d.128(<4 x i32> %a, <4 x i32> %b, <4 x i32> zeroinitializer, i8 -1)
@@ -2598,7 +2598,7 @@ define <4 x i32> @test_mask_add_epi32_rr_128(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @test_mask_add_epi32_rrk_128(<4 x i32> %a, <4 x i32> %b, <4 x i32> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_add_epi32_rrk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xfe,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
@@ -2609,7 +2609,7 @@ define <4 x i32> @test_mask_add_epi32_rrk_128(<4 x i32> %a, <4 x i32> %b, <4 x i
define <4 x i32> @test_mask_add_epi32_rrkz_128(<4 x i32> %a, <4 x i32> %b, i8 %mask) {
; CHECK-LABEL: test_mask_add_epi32_rrkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xfe,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2619,7 +2619,7 @@ define <4 x i32> @test_mask_add_epi32_rrkz_128(<4 x i32> %a, <4 x i32> %b, i8 %m
define <4 x i32> @test_mask_add_epi32_rm_128(<4 x i32> %a, <4 x i32>* %ptr_b) {
; CHECK-LABEL: test_mask_add_epi32_rm_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddd (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <4 x i32>, <4 x i32>* %ptr_b
@@ -2629,7 +2629,7 @@ define <4 x i32> @test_mask_add_epi32_rm_128(<4 x i32> %a, <4 x i32>* %ptr_b) {
define <4 x i32> @test_mask_add_epi32_rmk_128(<4 x i32> %a, <4 x i32>* %ptr_b, <4 x i32> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_add_epi32_rmk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpaddd (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xfe,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
@@ -2641,7 +2641,7 @@ define <4 x i32> @test_mask_add_epi32_rmk_128(<4 x i32> %a, <4 x i32>* %ptr_b, <
define <4 x i32> @test_mask_add_epi32_rmkz_128(<4 x i32> %a, <4 x i32>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_add_epi32_rmkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpaddd (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xfe,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2652,7 +2652,7 @@ define <4 x i32> @test_mask_add_epi32_rmkz_128(<4 x i32> %a, <4 x i32>* %ptr_b,
define <4 x i32> @test_mask_add_epi32_rmb_128(<4 x i32> %a, i32* %ptr_b) {
; CHECK-LABEL: test_mask_add_epi32_rmb_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddd (%rdi){1to4}, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7d,0x18,0xfe,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load i32, i32* %ptr_b
@@ -2664,7 +2664,7 @@ define <4 x i32> @test_mask_add_epi32_rmb_128(<4 x i32> %a, i32* %ptr_b) {
define <4 x i32> @test_mask_add_epi32_rmbk_128(<4 x i32> %a, i32* %ptr_b, <4 x i32> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_add_epi32_rmbk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpaddd (%rdi){1to4}, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x19,0xfe,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
@@ -2678,7 +2678,7 @@ define <4 x i32> @test_mask_add_epi32_rmbk_128(<4 x i32> %a, i32* %ptr_b, <4 x i
define <4 x i32> @test_mask_add_epi32_rmbkz_128(<4 x i32> %a, i32* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_add_epi32_rmbkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpaddd (%rdi){1to4}, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x99,0xfe,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2693,7 +2693,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.padd.d.128(<4 x i32>, <4 x i32>, <4 x i3
define <4 x i32> @test_mask_sub_epi32_rr_128(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test_mask_sub_epi32_rr_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsubd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfa,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx512.mask.psub.d.128(<4 x i32> %a, <4 x i32> %b, <4 x i32> zeroinitializer, i8 -1)
@@ -2702,7 +2702,7 @@ define <4 x i32> @test_mask_sub_epi32_rr_128(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @test_mask_sub_epi32_rrk_128(<4 x i32> %a, <4 x i32> %b, <4 x i32> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_sub_epi32_rrk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsubd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xfa,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
@@ -2713,7 +2713,7 @@ define <4 x i32> @test_mask_sub_epi32_rrk_128(<4 x i32> %a, <4 x i32> %b, <4 x i
define <4 x i32> @test_mask_sub_epi32_rrkz_128(<4 x i32> %a, <4 x i32> %b, i8 %mask) {
; CHECK-LABEL: test_mask_sub_epi32_rrkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsubd %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xfa,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2723,7 +2723,7 @@ define <4 x i32> @test_mask_sub_epi32_rrkz_128(<4 x i32> %a, <4 x i32> %b, i8 %m
define <4 x i32> @test_mask_sub_epi32_rm_128(<4 x i32> %a, <4 x i32>* %ptr_b) {
; CHECK-LABEL: test_mask_sub_epi32_rm_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsubd (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfa,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <4 x i32>, <4 x i32>* %ptr_b
@@ -2733,7 +2733,7 @@ define <4 x i32> @test_mask_sub_epi32_rm_128(<4 x i32> %a, <4 x i32>* %ptr_b) {
define <4 x i32> @test_mask_sub_epi32_rmk_128(<4 x i32> %a, <4 x i32>* %ptr_b, <4 x i32> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_sub_epi32_rmk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpsubd (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xfa,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
@@ -2745,7 +2745,7 @@ define <4 x i32> @test_mask_sub_epi32_rmk_128(<4 x i32> %a, <4 x i32>* %ptr_b, <
define <4 x i32> @test_mask_sub_epi32_rmkz_128(<4 x i32> %a, <4 x i32>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_sub_epi32_rmkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpsubd (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xfa,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2756,7 +2756,7 @@ define <4 x i32> @test_mask_sub_epi32_rmkz_128(<4 x i32> %a, <4 x i32>* %ptr_b,
define <4 x i32> @test_mask_sub_epi32_rmb_128(<4 x i32> %a, i32* %ptr_b) {
; CHECK-LABEL: test_mask_sub_epi32_rmb_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsubd (%rdi){1to4}, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7d,0x18,0xfa,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load i32, i32* %ptr_b
@@ -2768,7 +2768,7 @@ define <4 x i32> @test_mask_sub_epi32_rmb_128(<4 x i32> %a, i32* %ptr_b) {
define <4 x i32> @test_mask_sub_epi32_rmbk_128(<4 x i32> %a, i32* %ptr_b, <4 x i32> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_sub_epi32_rmbk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpsubd (%rdi){1to4}, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x19,0xfa,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
@@ -2782,7 +2782,7 @@ define <4 x i32> @test_mask_sub_epi32_rmbk_128(<4 x i32> %a, i32* %ptr_b, <4 x i
define <4 x i32> @test_mask_sub_epi32_rmbkz_128(<4 x i32> %a, i32* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_sub_epi32_rmbkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpsubd (%rdi){1to4}, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x99,0xfa,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2797,7 +2797,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.psub.d.128(<4 x i32>, <4 x i32>, <4 x i3
define <8 x i32> @test_mask_sub_epi32_rr_256(<8 x i32> %a, <8 x i32> %b) {
; CHECK-LABEL: test_mask_sub_epi32_rr_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsubd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfa,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx512.mask.psub.d.256(<8 x i32> %a, <8 x i32> %b, <8 x i32> zeroinitializer, i8 -1)
@@ -2806,7 +2806,7 @@ define <8 x i32> @test_mask_sub_epi32_rr_256(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @test_mask_sub_epi32_rrk_256(<8 x i32> %a, <8 x i32> %b, <8 x i32> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_sub_epi32_rrk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsubd %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xfa,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
@@ -2817,7 +2817,7 @@ define <8 x i32> @test_mask_sub_epi32_rrk_256(<8 x i32> %a, <8 x i32> %b, <8 x i
define <8 x i32> @test_mask_sub_epi32_rrkz_256(<8 x i32> %a, <8 x i32> %b, i8 %mask) {
; CHECK-LABEL: test_mask_sub_epi32_rrkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsubd %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xfa,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2827,7 +2827,7 @@ define <8 x i32> @test_mask_sub_epi32_rrkz_256(<8 x i32> %a, <8 x i32> %b, i8 %m
define <8 x i32> @test_mask_sub_epi32_rm_256(<8 x i32> %a, <8 x i32>* %ptr_b) {
; CHECK-LABEL: test_mask_sub_epi32_rm_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsubd (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfa,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i32>, <8 x i32>* %ptr_b
@@ -2837,7 +2837,7 @@ define <8 x i32> @test_mask_sub_epi32_rm_256(<8 x i32> %a, <8 x i32>* %ptr_b) {
define <8 x i32> @test_mask_sub_epi32_rmk_256(<8 x i32> %a, <8 x i32>* %ptr_b, <8 x i32> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_sub_epi32_rmk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpsubd (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xfa,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -2849,7 +2849,7 @@ define <8 x i32> @test_mask_sub_epi32_rmk_256(<8 x i32> %a, <8 x i32>* %ptr_b, <
define <8 x i32> @test_mask_sub_epi32_rmkz_256(<8 x i32> %a, <8 x i32>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_sub_epi32_rmkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpsubd (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xfa,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2860,7 +2860,7 @@ define <8 x i32> @test_mask_sub_epi32_rmkz_256(<8 x i32> %a, <8 x i32>* %ptr_b,
define <8 x i32> @test_mask_sub_epi32_rmb_256(<8 x i32> %a, i32* %ptr_b) {
; CHECK-LABEL: test_mask_sub_epi32_rmb_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsubd (%rdi){1to8}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7d,0x38,0xfa,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load i32, i32* %ptr_b
@@ -2872,7 +2872,7 @@ define <8 x i32> @test_mask_sub_epi32_rmb_256(<8 x i32> %a, i32* %ptr_b) {
define <8 x i32> @test_mask_sub_epi32_rmbk_256(<8 x i32> %a, i32* %ptr_b, <8 x i32> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_sub_epi32_rmbk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpsubd (%rdi){1to8}, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x39,0xfa,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -2886,7 +2886,7 @@ define <8 x i32> @test_mask_sub_epi32_rmbk_256(<8 x i32> %a, i32* %ptr_b, <8 x i
define <8 x i32> @test_mask_sub_epi32_rmbkz_256(<8 x i32> %a, i32* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_sub_epi32_rmbkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpsubd (%rdi){1to8}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xb9,0xfa,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2901,7 +2901,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.psub.d.256(<8 x i32>, <8 x i32>, <8 x i3
define <8 x i32> @test_mask_add_epi32_rr_256(<8 x i32> %a, <8 x i32> %b) {
; CHECK-LABEL: test_mask_add_epi32_rr_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx512.mask.padd.d.256(<8 x i32> %a, <8 x i32> %b, <8 x i32> zeroinitializer, i8 -1)
@@ -2910,7 +2910,7 @@ define <8 x i32> @test_mask_add_epi32_rr_256(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @test_mask_add_epi32_rrk_256(<8 x i32> %a, <8 x i32> %b, <8 x i32> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_add_epi32_rrk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpaddd %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xfe,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
@@ -2921,7 +2921,7 @@ define <8 x i32> @test_mask_add_epi32_rrk_256(<8 x i32> %a, <8 x i32> %b, <8 x i
define <8 x i32> @test_mask_add_epi32_rrkz_256(<8 x i32> %a, <8 x i32> %b, i8 %mask) {
; CHECK-LABEL: test_mask_add_epi32_rrkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpaddd %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xfe,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2931,7 +2931,7 @@ define <8 x i32> @test_mask_add_epi32_rrkz_256(<8 x i32> %a, <8 x i32> %b, i8 %m
define <8 x i32> @test_mask_add_epi32_rm_256(<8 x i32> %a, <8 x i32>* %ptr_b) {
; CHECK-LABEL: test_mask_add_epi32_rm_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddd (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i32>, <8 x i32>* %ptr_b
@@ -2941,7 +2941,7 @@ define <8 x i32> @test_mask_add_epi32_rm_256(<8 x i32> %a, <8 x i32>* %ptr_b) {
define <8 x i32> @test_mask_add_epi32_rmk_256(<8 x i32> %a, <8 x i32>* %ptr_b, <8 x i32> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_add_epi32_rmk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpaddd (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xfe,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -2953,7 +2953,7 @@ define <8 x i32> @test_mask_add_epi32_rmk_256(<8 x i32> %a, <8 x i32>* %ptr_b, <
define <8 x i32> @test_mask_add_epi32_rmkz_256(<8 x i32> %a, <8 x i32>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_add_epi32_rmkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpaddd (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xfe,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2964,7 +2964,7 @@ define <8 x i32> @test_mask_add_epi32_rmkz_256(<8 x i32> %a, <8 x i32>* %ptr_b,
define <8 x i32> @test_mask_add_epi32_rmb_256(<8 x i32> %a, i32* %ptr_b) {
; CHECK-LABEL: test_mask_add_epi32_rmb_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpaddd (%rdi){1to8}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7d,0x38,0xfe,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load i32, i32* %ptr_b
@@ -2976,7 +2976,7 @@ define <8 x i32> @test_mask_add_epi32_rmb_256(<8 x i32> %a, i32* %ptr_b) {
define <8 x i32> @test_mask_add_epi32_rmbk_256(<8 x i32> %a, i32* %ptr_b, <8 x i32> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_add_epi32_rmbk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpaddd (%rdi){1to8}, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x39,0xfe,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -2990,7 +2990,7 @@ define <8 x i32> @test_mask_add_epi32_rmbk_256(<8 x i32> %a, i32* %ptr_b, <8 x i
define <8 x i32> @test_mask_add_epi32_rmbkz_256(<8 x i32> %a, i32* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_add_epi32_rmbkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpaddd (%rdi){1to8}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xb9,0xfe,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -3005,7 +3005,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.padd.d.256(<8 x i32>, <8 x i32>, <8 x i3
define <8 x float> @test_mm512_maskz_add_ps_256(<8 x float> %a0, <8 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_mm512_maskz_add_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vaddps %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xa9,0x58,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -3015,7 +3015,7 @@ define <8 x float> @test_mm512_maskz_add_ps_256(<8 x float> %a0, <8 x float> %a1
define <8 x float> @test_mm512_mask_add_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %src, i8 %mask) {
; CHECK-LABEL: test_mm512_mask_add_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vaddps %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x58,0xd1]
; CHECK-NEXT: vmovaps %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2]
@@ -3026,7 +3026,7 @@ define <8 x float> @test_mm512_mask_add_ps_256(<8 x float> %a0, <8 x float> %a1,
define <8 x float> @test_mm512_add_ps_256(<8 x float> %a0, <8 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_mm512_add_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vaddps %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x58,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx512.mask.add.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float>zeroinitializer, i8 -1)
@@ -3036,7 +3036,7 @@ declare <8 x float> @llvm.x86.avx512.mask.add.ps.256(<8 x float>, <8 x float>, <
define <4 x float> @test_mm512_maskz_add_ps_128(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_mm512_maskz_add_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vaddps %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x89,0x58,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -3046,7 +3046,7 @@ define <4 x float> @test_mm512_maskz_add_ps_128(<4 x float> %a0, <4 x float> %a1
define <4 x float> @test_mm512_mask_add_ps_128(<4 x float> %a0, <4 x float> %a1, <4 x float> %src, i8 %mask) {
; CHECK-LABEL: test_mm512_mask_add_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vaddps %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x58,0xd1]
; CHECK-NEXT: vmovaps %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2]
@@ -3057,7 +3057,7 @@ define <4 x float> @test_mm512_mask_add_ps_128(<4 x float> %a0, <4 x float> %a1,
define <4 x float> @test_mm512_add_ps_128(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_mm512_add_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vaddps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x58,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.avx512.mask.add.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float>zeroinitializer, i8 -1)
@@ -3067,7 +3067,7 @@ declare <4 x float> @llvm.x86.avx512.mask.add.ps.128(<4 x float>, <4 x float>, <
define <8 x float> @test_mm512_maskz_sub_ps_256(<8 x float> %a0, <8 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_mm512_maskz_sub_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vsubps %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xa9,0x5c,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -3077,7 +3077,7 @@ define <8 x float> @test_mm512_maskz_sub_ps_256(<8 x float> %a0, <8 x float> %a1
define <8 x float> @test_mm512_mask_sub_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %src, i8 %mask) {
; CHECK-LABEL: test_mm512_mask_sub_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vsubps %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x5c,0xd1]
; CHECK-NEXT: vmovaps %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2]
@@ -3088,7 +3088,7 @@ define <8 x float> @test_mm512_mask_sub_ps_256(<8 x float> %a0, <8 x float> %a1,
define <8 x float> @test_mm512_sub_ps_256(<8 x float> %a0, <8 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_mm512_sub_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vsubps %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x5c,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx512.mask.sub.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float>zeroinitializer, i8 -1)
@@ -3098,7 +3098,7 @@ declare <8 x float> @llvm.x86.avx512.mask.sub.ps.256(<8 x float>, <8 x float>, <
define <4 x float> @test_mm512_maskz_sub_ps_128(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_mm512_maskz_sub_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vsubps %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x89,0x5c,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -3108,7 +3108,7 @@ define <4 x float> @test_mm512_maskz_sub_ps_128(<4 x float> %a0, <4 x float> %a1
define <4 x float> @test_mm512_mask_sub_ps_128(<4 x float> %a0, <4 x float> %a1, <4 x float> %src, i8 %mask) {
; CHECK-LABEL: test_mm512_mask_sub_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vsubps %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x5c,0xd1]
; CHECK-NEXT: vmovaps %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2]
@@ -3119,7 +3119,7 @@ define <4 x float> @test_mm512_mask_sub_ps_128(<4 x float> %a0, <4 x float> %a1,
define <4 x float> @test_mm512_sub_ps_128(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_mm512_sub_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vsubps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x5c,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.avx512.mask.sub.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float>zeroinitializer, i8 -1)
@@ -3129,7 +3129,7 @@ declare <4 x float> @llvm.x86.avx512.mask.sub.ps.128(<4 x float>, <4 x float>, <
define <8 x float> @test_mm512_maskz_mul_ps_256(<8 x float> %a0, <8 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_mm512_maskz_mul_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmulps %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xa9,0x59,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -3139,7 +3139,7 @@ define <8 x float> @test_mm512_maskz_mul_ps_256(<8 x float> %a0, <8 x float> %a1
define <8 x float> @test_mm512_mask_mul_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %src, i8 %mask) {
; CHECK-LABEL: test_mm512_mask_mul_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmulps %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x59,0xd1]
; CHECK-NEXT: vmovaps %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2]
@@ -3150,7 +3150,7 @@ define <8 x float> @test_mm512_mask_mul_ps_256(<8 x float> %a0, <8 x float> %a1,
define <8 x float> @test_mm512_mul_ps_256(<8 x float> %a0, <8 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_mm512_mul_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmulps %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x59,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx512.mask.mul.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float>zeroinitializer, i8 -1)
@@ -3160,7 +3160,7 @@ declare <8 x float> @llvm.x86.avx512.mask.mul.ps.256(<8 x float>, <8 x float>, <
define <4 x float> @test_mm512_maskz_mul_ps_128(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_mm512_maskz_mul_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmulps %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x89,0x59,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -3170,7 +3170,7 @@ define <4 x float> @test_mm512_maskz_mul_ps_128(<4 x float> %a0, <4 x float> %a1
define <4 x float> @test_mm512_mask_mul_ps_128(<4 x float> %a0, <4 x float> %a1, <4 x float> %src, i8 %mask) {
; CHECK-LABEL: test_mm512_mask_mul_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmulps %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x59,0xd1]
; CHECK-NEXT: vmovaps %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2]
@@ -3181,7 +3181,7 @@ define <4 x float> @test_mm512_mask_mul_ps_128(<4 x float> %a0, <4 x float> %a1,
define <4 x float> @test_mm512_mul_ps_128(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_mm512_mul_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmulps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x59,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.avx512.mask.mul.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float>zeroinitializer, i8 -1)
@@ -3191,7 +3191,7 @@ declare <4 x float> @llvm.x86.avx512.mask.mul.ps.128(<4 x float>, <4 x float>, <
define <8 x float> @test_mm512_maskz_div_ps_256(<8 x float> %a0, <8 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_mm512_maskz_div_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vdivps %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xa9,0x5e,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -3201,7 +3201,7 @@ define <8 x float> @test_mm512_maskz_div_ps_256(<8 x float> %a0, <8 x float> %a1
define <8 x float> @test_mm512_mask_div_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %src, i8 %mask) {
; CHECK-LABEL: test_mm512_mask_div_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vdivps %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x5e,0xd1]
; CHECK-NEXT: vmovaps %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2]
@@ -3212,7 +3212,7 @@ define <8 x float> @test_mm512_mask_div_ps_256(<8 x float> %a0, <8 x float> %a1,
define <8 x float> @test_mm512_div_ps_256(<8 x float> %a0, <8 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_mm512_div_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vdivps %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x5e,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx512.mask.div.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float>zeroinitializer, i8 -1)
@@ -3222,7 +3222,7 @@ declare <8 x float> @llvm.x86.avx512.mask.div.ps.256(<8 x float>, <8 x float>, <
define <4 x float> @test_mm512_maskz_div_ps_128(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_mm512_maskz_div_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vdivps %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x89,0x5e,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -3232,7 +3232,7 @@ define <4 x float> @test_mm512_maskz_div_ps_128(<4 x float> %a0, <4 x float> %a1
define <4 x float> @test_mm512_mask_div_ps_128(<4 x float> %a0, <4 x float> %a1, <4 x float> %src, i8 %mask) {
; CHECK-LABEL: test_mm512_mask_div_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vdivps %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x5e,0xd1]
; CHECK-NEXT: vmovaps %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2]
@@ -3243,7 +3243,7 @@ define <4 x float> @test_mm512_mask_div_ps_128(<4 x float> %a0, <4 x float> %a1,
define <4 x float> @test_mm512_div_ps_128(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_mm512_div_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vdivps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x5e,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.avx512.mask.div.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float>zeroinitializer, i8 -1)
@@ -3255,7 +3255,7 @@ declare <8 x float> @llvm.x86.avx512.mask.shuf.f32x4.256(<8 x float>, <8 x float
define <8 x float>@test_int_x86_avx512_mask_shuf_f32x4_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_shuf_f32x4_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vblendpd $12, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x0d,0xc1,0x0c]
; CHECK-NEXT: ## ymm0 = ymm0[0,1],ymm1[2,3]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -3276,7 +3276,7 @@ declare <4 x double> @llvm.x86.avx512.mask.shuf.f64x2.256(<4 x double>, <4 x dou
define <4 x double>@test_int_x86_avx512_mask_shuf_f64x2_256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_shuf_f64x2_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vblendpd $12, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x0d,0xc1,0x0c]
; CHECK-NEXT: ## ymm0 = ymm0[0,1],ymm1[2,3]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -3297,7 +3297,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.shuf.i32x4.256(<8 x i32>, <8 x i32>, i32
define <8 x i32>@test_int_x86_avx512_mask_shuf_i32x4_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_shuf_i32x4_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpblendd $240, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x02,0xc1,0xf0]
; CHECK-NEXT: ## ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -3314,7 +3314,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.shuf.i64x2.256(<4 x i64>, <4 x i64>, i32
define <4 x i64>@test_int_x86_avx512_mask_shuf_i64x2_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_shuf_i64x2_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpblendd $240, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x02,0xc1,0xf0]
; CHECK-NEXT: ## ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -3331,7 +3331,7 @@ declare <2 x double> @llvm.x86.avx512.mask.shuf.pd.128(<2 x double>, <2 x double
define <2 x double>@test_int_x86_avx512_mask_shuf_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_shuf_pd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vshufpd $1, %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc6,0xd9,0x01]
; CHECK-NEXT: ## xmm3 = xmm0[1],xmm1[0]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -3354,7 +3354,7 @@ declare <4 x double> @llvm.x86.avx512.mask.shuf.pd.256(<4 x double>, <4 x double
define <4 x double>@test_int_x86_avx512_mask_shuf_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_shuf_pd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vshufpd $6, %ymm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xc6,0xd9,0x06]
; CHECK-NEXT: ## ymm3 = ymm0[0],ymm1[1],ymm0[3],ymm1[2]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -3372,7 +3372,7 @@ declare <4 x float> @llvm.x86.avx512.mask.shuf.ps.128(<4 x float>, <4 x float>,
define <4 x float>@test_int_x86_avx512_mask_shuf_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_shuf_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vshufps $22, %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0xc6,0xd9,0x16]
; CHECK-NEXT: ## xmm3 = xmm0[2,1],xmm1[1,0]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -3390,7 +3390,7 @@ declare <8 x float> @llvm.x86.avx512.mask.shuf.ps.256(<8 x float>, <8 x float>,
define <8 x float>@test_int_x86_avx512_mask_shuf_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_shuf_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vshufps $22, %ymm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0xc6,0xd9,0x16]
; CHECK-NEXT: ## ymm3 = ymm0[2,1],ymm1[1,0],ymm0[6,5],ymm1[5,4]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -3408,7 +3408,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.pmaxs.d.128(<4 x i32>, <4 x i32>, <4 x i
define <4 x i32>@test_int_x86_avx512_mask_pmaxs_d_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmaxs_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmaxsd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x3d,0xd1]
; CHECK-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x3d,0xc1]
@@ -3424,7 +3424,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.pmaxs.d.256(<8 x i32>, <8 x i32>, <8 x i
define <8 x i32>@test_int_x86_avx512_mask_pmaxs_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmaxs_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmaxsd %ymm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3d,0xd9]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmaxsd %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x3d,0xd1]
@@ -3440,7 +3440,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.pmaxs.q.128(<2 x i64>, <2 x i64>, <2 x i
define <2 x i64>@test_int_x86_avx512_mask_pmaxs_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmaxs_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmaxsq %xmm1, %xmm0, %xmm3 ## encoding: [0x62,0xf2,0xfd,0x08,0x3d,0xd9]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmaxsq %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x3d,0xd1]
@@ -3456,7 +3456,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.pmaxs.q.256(<4 x i64>, <4 x i64>, <4 x i
define <4 x i64>@test_int_x86_avx512_mask_pmaxs_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmaxs_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmaxsq %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x3d,0xd1]
; CHECK-NEXT: vpmaxsq %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xa9,0x3d,0xc1]
@@ -3472,7 +3472,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.pmaxu.d.128(<4 x i32>, <4 x i32>, <4 x i
define <4 x i32>@test_int_x86_avx512_mask_pmaxu_d_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2,i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmaxu_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmaxud %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x3f,0xd1]
; CHECK-NEXT: vpmaxud %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x3f,0xc1]
@@ -3488,7 +3488,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.pmaxu.d.256(<8 x i32>, <8 x i32>, <8 x i
define <8 x i32>@test_int_x86_avx512_mask_pmaxu_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmaxu_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmaxud %ymm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3f,0xd9]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmaxud %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x3f,0xd1]
@@ -3504,7 +3504,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.pmaxu.q.128(<2 x i64>, <2 x i64>, <2 x i
define <2 x i64>@test_int_x86_avx512_mask_pmaxu_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmaxu_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmaxuq %xmm1, %xmm0, %xmm3 ## encoding: [0x62,0xf2,0xfd,0x08,0x3f,0xd9]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmaxuq %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x3f,0xd1]
@@ -3520,7 +3520,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.pmaxu.q.256(<4 x i64>, <4 x i64>, <4 x i
define <4 x i64>@test_int_x86_avx512_mask_pmaxu_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmaxu_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmaxuq %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x3f,0xd1]
; CHECK-NEXT: vpmaxuq %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xa9,0x3f,0xc1]
@@ -3536,7 +3536,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.pmins.d.128(<4 x i32>, <4 x i32>, <4 x i
define <4 x i32>@test_int_x86_avx512_mask_pmins_d_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmins_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpminsd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x39,0xd1]
; CHECK-NEXT: vpminsd %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x39,0xc1]
@@ -3552,7 +3552,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.pmins.d.256(<8 x i32>, <8 x i32>, <8 x i
define <8 x i32>@test_int_x86_avx512_mask_pmins_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmins_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpminsd %ymm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x39,0xd9]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpminsd %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x39,0xd1]
@@ -3568,7 +3568,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.pmins.q.128(<2 x i64>, <2 x i64>, <2 x i
define <2 x i64>@test_int_x86_avx512_mask_pmins_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmins_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpminsq %xmm1, %xmm0, %xmm3 ## encoding: [0x62,0xf2,0xfd,0x08,0x39,0xd9]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpminsq %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x39,0xd1]
@@ -3584,7 +3584,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.pmins.q.256(<4 x i64>, <4 x i64>, <4 x i
define <4 x i64>@test_int_x86_avx512_mask_pmins_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmins_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpminsq %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x39,0xd1]
; CHECK-NEXT: vpminsq %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xa9,0x39,0xc1]
@@ -3600,7 +3600,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.pminu.d.128(<4 x i32>, <4 x i32>, <4 x i
define <4 x i32>@test_int_x86_avx512_mask_pminu_d_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_pminu_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpminud %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x3b,0xd1]
; CHECK-NEXT: vpminud %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x3b,0xc1]
@@ -3616,7 +3616,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.pminu.d.256(<8 x i32>, <8 x i32>, <8 x i
define <8 x i32>@test_int_x86_avx512_mask_pminu_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pminu_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpminud %ymm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3b,0xd9]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpminud %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x3b,0xd1]
@@ -3632,7 +3632,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.pminu.q.128(<2 x i64>, <2 x i64>, <2 x i
define <2 x i64>@test_int_x86_avx512_mask_pminu_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pminu_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpminuq %xmm1, %xmm0, %xmm3 ## encoding: [0x62,0xf2,0xfd,0x08,0x3b,0xd9]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpminuq %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x3b,0xd1]
@@ -3648,7 +3648,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.pminu.q.256(<4 x i64>, <4 x i64>, <4 x i
define <4 x i64>@test_int_x86_avx512_mask_pminu_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_pminu_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpminuq %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x3b,0xd1]
; CHECK-NEXT: vpminuq %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xa9,0x3b,0xc1]
@@ -3664,7 +3664,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.psrl.q.128(<2 x i64>, <2 x i64>, <2 x i6
define <2 x i64>@test_int_x86_avx512_mask_psrl_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psrl_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsrlq %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd3,0xd9]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsrlq %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0xfd,0x09,0xd3,0xd1]
@@ -3684,7 +3684,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.psrl.q.256(<4 x i64>, <2 x i64>, <4 x i6
define <4 x i64>@test_int_x86_avx512_mask_psrl_q_256(<4 x i64> %x0, <2 x i64> %x1, <4 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psrl_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsrlq %xmm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd3,0xd9]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsrlq %xmm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0xfd,0x29,0xd3,0xd1]
@@ -3704,7 +3704,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.psrl.d.128(<4 x i32>, <4 x i32>, <4 x i3
define <4 x i32>@test_int_x86_avx512_mask_psrl_d_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psrl_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsrld %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd2,0xd9]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsrld %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xd2,0xd1]
@@ -3724,7 +3724,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.psrl.d.256(<8 x i32>, <4 x i32>, <8 x i3
define <8 x i32>@test_int_x86_avx512_mask_psrl_d_256(<8 x i32> %x0, <4 x i32> %x1, <8 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psrl_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsrld %xmm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd2,0xd9]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsrld %xmm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xd2,0xd1]
@@ -3744,7 +3744,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.psra.d.128(<4 x i32>, <4 x i32>, <4 x i3
define <4 x i32>@test_int_x86_avx512_mask_psra_d_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psra_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsrad %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe2,0xd9]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsrad %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xe2,0xd1]
@@ -3764,7 +3764,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.psra.d.256(<8 x i32>, <4 x i32>, <8 x i3
define <8 x i32>@test_int_x86_avx512_mask_psra_d_256(<8 x i32> %x0, <4 x i32> %x1, <8 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psra_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsrad %xmm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe2,0xd9]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsrad %xmm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xe2,0xd1]
@@ -3784,7 +3784,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.psll.d.128(<4 x i32>, <4 x i32>, <4 x i3
define <4 x i32>@test_int_x86_avx512_mask_psll_d_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psll_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpslld %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf2,0xd9]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpslld %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xf2,0xd1]
@@ -3804,7 +3804,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.psll.d.256(<8 x i32>, <4 x i32>, <8 x i3
define <8 x i32>@test_int_x86_avx512_mask_psll_d_256(<8 x i32> %x0, <4 x i32> %x1, <8 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psll_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpslld %xmm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf2,0xd9]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpslld %xmm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xf2,0xd1]
@@ -3824,7 +3824,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.psll.q.256(<4 x i64>, <2 x i64>, <4 x i6
define <4 x i64>@test_int_x86_avx512_mask_psll_q_256(<4 x i64> %x0, <2 x i64> %x1, <4 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psll_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsllq %xmm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf3,0xd9]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsllq %xmm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0xfd,0x29,0xf3,0xd1]
@@ -3844,7 +3844,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.psrl.qi.128(<2 x i64>, i32, <2 x i64>, i
define <2 x i64>@test_int_x86_avx512_mask_psrl_qi_128(<2 x i64> %x0, i32 %x1, <2 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psrl_qi_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsrlq $3, %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0x73,0xd0,0x03]
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpsrlq $3, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x73,0xd0,0x03]
@@ -3864,7 +3864,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.psrl.qi.256(<4 x i64>, i32, <4 x i64>, i
define <4 x i64>@test_int_x86_avx512_mask_psrl_qi_256(<4 x i64> %x0, i32 %x1, <4 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psrl_qi_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsrlq $3, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x73,0xd0,0x03]
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpsrlq $3, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x73,0xd0,0x03]
@@ -3884,7 +3884,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.psrl.di.128(<4 x i32>, i32, <4 x i32>, i
define <4 x i32>@test_int_x86_avx512_mask_psrl_di_128(<4 x i32> %x0, i32 %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psrl_di_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsrld $3, %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0x72,0xd0,0x03]
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpsrld $3, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x09,0x72,0xd0,0x03]
@@ -3904,7 +3904,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.psrl.di.256(<8 x i32>, i32, <8 x i32>, i
define <8 x i32>@test_int_x86_avx512_mask_psrl_di_256(<8 x i32> %x0, i32 %x1, <8 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psrl_di_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsrld $3, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x72,0xd0,0x03]
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpsrld $3, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x29,0x72,0xd0,0x03]
@@ -3924,7 +3924,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.psll.di.128(<4 x i32>, i32, <4 x i32>, i
define <4 x i32>@test_int_x86_avx512_mask_psll_di_128(<4 x i32> %x0, i32 %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psll_di_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpslld $3, %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0x72,0xf0,0x03]
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpslld $3, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x09,0x72,0xf0,0x03]
@@ -3944,7 +3944,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.psll.di.256(<8 x i32>, i32, <8 x i32>, i
define <8 x i32>@test_int_x86_avx512_mask_psll_di_256(<8 x i32> %x0, i32 %x1, <8 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psll_di_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpslld $3, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x72,0xf0,0x03]
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpslld $3, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x29,0x72,0xf0,0x03]
@@ -3964,7 +3964,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.psrlv2.di(<2 x i64>, <2 x i64>, <2 x i64
define <2 x i64>@test_int_x86_avx512_mask_psrlv2_di(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psrlv2_di:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsrlvq %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0x45,0xd9]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsrlvq %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x45,0xd1]
@@ -3984,7 +3984,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.psrlv4.di(<4 x i64>, <4 x i64>, <4 x i64
define <4 x i64>@test_int_x86_avx512_mask_psrlv4_di(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psrlv4_di:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsrlvq %ymm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0x45,0xd9]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsrlvq %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x45,0xd1]
@@ -4004,7 +4004,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.psrlv4.si(<4 x i32>, <4 x i32>, <4 x i32
define <4 x i32>@test_int_x86_avx512_mask_psrlv4_si(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psrlv4_si:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsrlvd %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x45,0xd9]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsrlvd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x45,0xd1]
@@ -4024,7 +4024,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.psrlv8.si(<8 x i32>, <8 x i32>, <8 x i32
define <8 x i32>@test_int_x86_avx512_mask_psrlv8_si(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psrlv8_si:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsrlvd %ymm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x45,0xd9]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsrlvd %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x45,0xd1]
@@ -4044,7 +4044,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.psrav4.si(<4 x i32>, <4 x i32>, <4 x i32
define <4 x i32>@test_int_x86_avx512_mask_psrav4_si(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psrav4_si:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsravd %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x46,0xd9]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsravd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x46,0xd1]
@@ -4064,7 +4064,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.psrav8.si(<8 x i32>, <8 x i32>, <8 x i32
define <8 x i32>@test_int_x86_avx512_mask_psrav8_si(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psrav8_si:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsravd %ymm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x46,0xd9]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsravd %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x46,0xd1]
@@ -4082,7 +4082,7 @@ define <8 x i32>@test_int_x86_avx512_mask_psrav8_si(<8 x i32> %x0, <8 x i32> %x1
define <8 x i32>@test_int_x86_avx512_mask_psrav8_si_const() {
; CHECK-LABEL: test_int_x86_avx512_mask_psrav8_si_const:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovdqa {{.*}}(%rip), %ymm0 ## EVEX TO VEX Compression ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
; CHECK-NEXT: ## encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; CHECK-NEXT: ## fixup A - offset: 4, value: LCPI284_0-4, kind: reloc_riprel_4byte
@@ -4097,7 +4097,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.psllv2.di(<2 x i64>, <2 x i64>, <2 x i64
define <2 x i64>@test_int_x86_avx512_mask_psllv2_di(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psllv2_di:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsllvq %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0x47,0xd9]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsllvq %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x47,0xd1]
@@ -4117,7 +4117,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.psllv4.di(<4 x i64>, <4 x i64>, <4 x i64
define <4 x i64>@test_int_x86_avx512_mask_psllv4_di(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psllv4_di:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsllvq %ymm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0x47,0xd9]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsllvq %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x47,0xd1]
@@ -4137,7 +4137,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.psllv4.si(<4 x i32>, <4 x i32>, <4 x i32
define <4 x i32>@test_int_x86_avx512_mask_psllv4_si(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psllv4_si:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsllvd %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x47,0xd9]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsllvd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x47,0xd1]
@@ -4157,7 +4157,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.psllv8.si(<8 x i32>, <8 x i32>, <8 x i32
define <8 x i32>@test_int_x86_avx512_mask_psllv8_si(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psllv8_si:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsllvd %ymm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x47,0xd9]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsllvd %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x47,0xd1]
@@ -4177,7 +4177,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.pmovzxb.d.128(<16 x i8>, <4 x i32>, i8)
define <4 x i32>@test_int_x86_avx512_mask_pmovzxb_d_128(<16 x i8> %x0, <4 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovzxb_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovzxbd %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x31,0xd0]
; CHECK-NEXT: ## xmm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -4200,7 +4200,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.pmovzxb.d.256(<16 x i8>, <8 x i32>, i8)
define <8 x i32>@test_int_x86_avx512_mask_pmovzxb_d_256(<16 x i8> %x0, <8 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovzxb_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovzxbd %xmm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x31,0xd0]
; CHECK-NEXT: ## ymm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -4223,7 +4223,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.pmovzxb.q.128(<16 x i8>, <2 x i64>, i8)
define <2 x i64>@test_int_x86_avx512_mask_pmovzxb_q_128(<16 x i8> %x0, <2 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovzxb_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovzxbq %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x32,0xd0]
; CHECK-NEXT: ## xmm2 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -4246,7 +4246,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.pmovzxb.q.256(<16 x i8>, <4 x i64>, i8)
define <4 x i64>@test_int_x86_avx512_mask_pmovzxb_q_256(<16 x i8> %x0, <4 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovzxb_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovzxbq %xmm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x32,0xd0]
; CHECK-NEXT: ## ymm2 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -4269,7 +4269,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.pmovzxd.q.128(<4 x i32>, <2 x i64>, i8)
define <2 x i64>@test_int_x86_avx512_mask_pmovzxd_q_128(<4 x i32> %x0, <2 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovzxd_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovzxdq %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x35,0xd0]
; CHECK-NEXT: ## xmm2 = xmm0[0],zero,xmm0[1],zero
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -4292,7 +4292,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.pmovzxd.q.256(<4 x i32>, <4 x i64>, i8)
define <4 x i64>@test_int_x86_avx512_mask_pmovzxd_q_256(<4 x i32> %x0, <4 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovzxd_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovzxdq %xmm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x35,0xd0]
; CHECK-NEXT: ## ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -4315,7 +4315,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.pmovzxw.d.128(<8 x i16>, <4 x i32>, i8)
define <4 x i32>@test_int_x86_avx512_mask_pmovzxw_d_128(<8 x i16> %x0, <4 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovzxw_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovzxwd %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x33,0xd0]
; CHECK-NEXT: ## xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -4338,7 +4338,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.pmovzxw.d.256(<8 x i16>, <8 x i32>, i8)
define <8 x i32>@test_int_x86_avx512_mask_pmovzxw_d_256(<8 x i16> %x0, <8 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovzxw_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovzxwd %xmm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x33,0xd0]
; CHECK-NEXT: ## ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -4361,7 +4361,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.pmovzxw.q.128(<8 x i16>, <2 x i64>, i8)
define <2 x i64>@test_int_x86_avx512_mask_pmovzxw_q_128(<8 x i16> %x0, <2 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovzxw_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovzxwq %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x34,0xd0]
; CHECK-NEXT: ## xmm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -4384,7 +4384,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.pmovzxw.q.256(<8 x i16>, <4 x i64>, i8)
define <4 x i64>@test_int_x86_avx512_mask_pmovzxw_q_256(<8 x i16> %x0, <4 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovzxw_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovzxwq %xmm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x34,0xd0]
; CHECK-NEXT: ## ymm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -4407,7 +4407,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.pmovsxb.d.128(<16 x i8>, <4 x i32>, i8)
define <4 x i32>@test_int_x86_avx512_mask_pmovsxb_d_128(<16 x i8> %x0, <4 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovsxb_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovsxbd %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x21,0xd0]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmovsxbd %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x21,0xc8]
@@ -4427,7 +4427,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.pmovsxb.d.256(<16 x i8>, <8 x i32>, i8)
define <8 x i32>@test_int_x86_avx512_mask_pmovsxb_d_256(<16 x i8> %x0, <8 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovsxb_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovsxbd %xmm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x21,0xd0]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmovsxbd %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x21,0xc8]
@@ -4447,7 +4447,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.pmovsxb.q.128(<16 x i8>, <2 x i64>, i8)
define <2 x i64>@test_int_x86_avx512_mask_pmovsxb_q_128(<16 x i8> %x0, <2 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovsxb_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovsxbq %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x22,0xd0]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmovsxbq %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x22,0xc8]
@@ -4467,7 +4467,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.pmovsxb.q.256(<16 x i8>, <4 x i64>, i8)
define <4 x i64>@test_int_x86_avx512_mask_pmovsxb_q_256(<16 x i8> %x0, <4 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovsxb_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovsxbq %xmm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x22,0xd0]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmovsxbq %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x22,0xc8]
@@ -4487,7 +4487,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.pmovsxw.d.128(<8 x i16>, <4 x i32>, i8)
define <4 x i32>@test_int_x86_avx512_mask_pmovsxw_d_128(<8 x i16> %x0, <4 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovsxw_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovsxwd %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x23,0xd0]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmovsxwd %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x23,0xc8]
@@ -4507,7 +4507,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.pmovsxw.d.256(<8 x i16>, <8 x i32>, i8)
define <8 x i32>@test_int_x86_avx512_mask_pmovsxw_d_256(<8 x i16> %x0, <8 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovsxw_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovsxwd %xmm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x23,0xd0]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmovsxwd %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x23,0xc8]
@@ -4527,7 +4527,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.pmovsxw.q.128(<8 x i16>, <2 x i64>, i8)
define <2 x i64>@test_int_x86_avx512_mask_pmovsxw_q_128(<8 x i16> %x0, <2 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovsxw_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovsxwq %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x24,0xd0]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmovsxwq %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x24,0xc8]
@@ -4547,7 +4547,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.pmovsxw.q.256(<8 x i16>, <4 x i64>, i8)
define <4 x i64>@test_int_x86_avx512_mask_pmovsxw_q_256(<8 x i16> %x0, <4 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovsxw_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovsxwq %xmm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x24,0xd0]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmovsxwq %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x24,0xc8]
@@ -4567,7 +4567,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.psra.q.128(<2 x i64>, <2 x i64>, <2 x i6
define <2 x i64>@test_int_x86_avx512_mask_psra_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psra_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsraq %xmm1, %xmm0, %xmm3 ## encoding: [0x62,0xf1,0xfd,0x08,0xe2,0xd9]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsraq %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0xfd,0x09,0xe2,0xd1]
@@ -4587,7 +4587,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.psra.q.256(<4 x i64>, <2 x i64>, <4 x i6
define <4 x i64>@test_int_x86_avx512_mask_psra_q_256(<4 x i64> %x0, <2 x i64> %x1, <4 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psra_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsraq %xmm1, %ymm0, %ymm3 ## encoding: [0x62,0xf1,0xfd,0x28,0xe2,0xd9]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsraq %xmm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0xfd,0x29,0xe2,0xd1]
@@ -4607,7 +4607,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.psra.qi.128(<2 x i64>, i32, <2 x i64>, i
define <2 x i64>@test_int_x86_avx512_mask_psra_qi_128(<2 x i64> %x0, i32 %x1, <2 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psra_qi_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsraq $3, %xmm0, %xmm2 ## encoding: [0x62,0xf1,0xed,0x08,0x72,0xe0,0x03]
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpsraq $3, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x72,0xe0,0x03]
@@ -4627,7 +4627,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.psra.qi.256(<4 x i64>, i32, <4 x i64>, i
define <4 x i64>@test_int_x86_avx512_mask_psra_qi_256(<4 x i64> %x0, i32 %x1, <4 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psra_qi_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsraq $3, %ymm0, %ymm2 ## encoding: [0x62,0xf1,0xed,0x28,0x72,0xe0,0x03]
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpsraq $3, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x72,0xe0,0x03]
@@ -4647,7 +4647,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.psrav.q.128(<2 x i64>, <2 x i64>, <2 x i
define <2 x i64>@test_int_x86_avx512_mask_psrav_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psrav_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsravq %xmm1, %xmm0, %xmm3 ## encoding: [0x62,0xf2,0xfd,0x08,0x46,0xd9]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsravq %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x46,0xd1]
@@ -4665,7 +4665,7 @@ define <2 x i64>@test_int_x86_avx512_mask_psrav_q_128(<2 x i64> %x0, <2 x i64> %
define <2 x i64>@test_int_x86_avx512_mask_psrav_q_128_const(i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psrav_q_128_const:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovdqa {{.*}}(%rip), %xmm0 ## EVEX TO VEX Compression xmm0 = [2,18446744073709551607]
; CHECK-NEXT: ## encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; CHECK-NEXT: ## fixup A - offset: 4, value: LCPI312_0-4, kind: reloc_riprel_4byte
@@ -4680,7 +4680,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.psrav.q.256(<4 x i64>, <4 x i64>, <4 x i
define <4 x i64>@test_int_x86_avx512_mask_psrav_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psrav_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsravq %ymm1, %ymm0, %ymm3 ## encoding: [0x62,0xf2,0xfd,0x28,0x46,0xd9]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsravq %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x46,0xd1]
@@ -4700,7 +4700,7 @@ declare <2 x double> @llvm.x86.avx512.mask.cvtdq2pd.128(<4 x i32>, <2 x double>,
define <2 x double>@test_int_x86_avx512_mask_cvt_dq2pd_128(<4 x i32> %x0, <2 x double> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_dq2pd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvtdq2pd %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0xe6,0xd0]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtdq2pd %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7e,0x09,0xe6,0xc8]
@@ -4716,7 +4716,7 @@ declare <4 x double> @llvm.x86.avx512.mask.cvtdq2pd.256(<4 x i32>, <4 x double>,
define <4 x double>@test_int_x86_avx512_mask_cvt_dq2pd_256(<4 x i32> %x0, <4 x double> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_dq2pd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvtdq2pd %xmm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xfe,0xe6,0xd0]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtdq2pd %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7e,0x29,0xe6,0xc8]
@@ -4732,7 +4732,7 @@ declare <2 x double> @llvm.x86.avx512.mask.cvtudq2pd.128(<4 x i32>, <2 x double>
define <2 x double>@test_int_x86_avx512_mask_cvt_udq2pd_128(<4 x i32> %x0, <2 x double> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_udq2pd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvtudq2pd %xmm0, %xmm2 ## encoding: [0x62,0xf1,0x7e,0x08,0x7a,0xd0]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtudq2pd %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7e,0x09,0x7a,0xc8]
@@ -4748,7 +4748,7 @@ declare <4 x double> @llvm.x86.avx512.mask.cvtudq2pd.256(<4 x i32>, <4 x double>
define <4 x double>@test_int_x86_avx512_mask_cvt_udq2pd_256(<4 x i32> %x0, <4 x double> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_udq2pd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvtudq2pd %xmm0, %ymm2 ## encoding: [0x62,0xf1,0x7e,0x28,0x7a,0xd0]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtudq2pd %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7e,0x29,0x7a,0xc8]
@@ -4764,7 +4764,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.valign.d.128(<4 x i32>, <4 x i32>, i32,
define <4 x i32>@test_int_x86_avx512_mask_valign_d_128(<4 x i32> %x0, <4 x i32> %x1,<4 x i32> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_valign_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpalignr $8, %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x0f,0xd9,0x08]
; CHECK-NEXT: ## xmm3 = xmm1[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -4787,7 +4787,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.valign.d.256(<8 x i32>, <8 x i32>, i32,
define <8 x i32>@test_int_x86_avx512_mask_valign_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_valign_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: valignq $3, %ymm1, %ymm0, %ymm3 ## encoding: [0x62,0xf3,0xfd,0x28,0x03,0xd9,0x03]
; CHECK-NEXT: ## ymm3 = ymm1[3],ymm0[0,1,2]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -4805,7 +4805,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.valign.q.128(<2 x i64>, <2 x i64>, i32,
define <2 x i64>@test_int_x86_avx512_mask_valign_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_valign_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpalignr $8, %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x0f,0xd9,0x08]
; CHECK-NEXT: ## xmm3 = xmm1[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -4823,7 +4823,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.valign.q.256(<4 x i64>, <4 x i64>, i32,
define <4 x i64>@test_int_x86_avx512_mask_valign_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_valign_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: valignq $3, %ymm1, %ymm0, %ymm3 ## encoding: [0x62,0xf3,0xfd,0x28,0x03,0xd9,0x03]
; CHECK-NEXT: ## ymm3 = ymm1[3],ymm0[0,1,2]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -4841,7 +4841,7 @@ declare <4 x double> @llvm.x86.avx512.mask.vpermilvar.pd.256(<4 x double>, <4 x
define <4 x double>@test_int_x86_avx512_mask_vpermilvar_pd_256(<4 x double> %x0, <4 x i64> %x1, <4 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermilvar_pd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpermilpd %ymm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x0d,0xd9]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpermilpd %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x0d,0xd1]
@@ -4861,7 +4861,7 @@ declare <2 x double> @llvm.x86.avx512.mask.vpermilvar.pd.128(<2 x double>, <2 x
define <2 x double>@test_int_x86_avx512_mask_vpermilvar_pd_128(<2 x double> %x0, <2 x i64> %x1, <2 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermilvar_pd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpermilpd %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x0d,0xd9]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpermilpd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x0d,0xd1]
@@ -4881,7 +4881,7 @@ declare <8 x float> @llvm.x86.avx512.mask.vpermilvar.ps.256(<8 x float>, <8 x i3
define <8 x float>@test_int_x86_avx512_mask_vpermilvar_ps_256(<8 x float> %x0, <8 x i32> %x1, <8 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermilvar_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpermilps %ymm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x0c,0xd9]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpermilps %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x0c,0xd1]
@@ -4901,7 +4901,7 @@ declare <4 x float> @llvm.x86.avx512.mask.vpermilvar.ps.128(<4 x float>, <4 x i3
define <4 x float>@test_int_x86_avx512_mask_vpermilvar_ps_128(<4 x float> %x0, <4 x i32> %x1, <4 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermilvar_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpermilps %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x0c,0xd9]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpermilps %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x0c,0xd1]
@@ -4921,7 +4921,7 @@ declare <4 x float> @llvm.x86.avx512.mask.vextractf32x4.256(<8 x float>, i32, <4
define <4 x float>@test_int_x86_avx512_mask_vextractf32x4_256(<8 x float> %x0, <4 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vextractf32x4_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x19,0xc2,0x01]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vextractf32x4 $1, %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x19,0xc1,0x01]
@@ -4941,7 +4941,7 @@ declare <8 x float> @llvm.x86.avx512.mask.insertf32x4.256(<8 x float>, <4 x floa
define <8 x float>@test_int_x86_avx512_mask_insertf32x4_256(<8 x float> %x0, <4 x float> %x1, <8 x float> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_insertf32x4_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x18,0xd9,0x01]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vinsertf32x4 $1, %xmm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x18,0xd1,0x01]
@@ -4961,7 +4961,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.inserti32x4.256(<8 x i32>, <4 x i32>, i3
define <8 x i32>@test_int_x86_avx512_mask_inserti32x4_256(<8 x i32> %x0, <4 x i32> %x1, <8 x i32> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_inserti32x4_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x38,0xd9,0x01]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vinserti32x4 $1, %xmm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x38,0xd1,0x01]
@@ -4980,7 +4980,7 @@ define <8 x i32>@test_int_x86_avx512_mask_inserti32x4_256(<8 x i32> %x0, <4 x i3
define <8 x float> @test_mm512_maskz_max_ps_256(<8 x float> %a0, <8 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_mm512_maskz_max_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmaxps %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xa9,0x5f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -4990,7 +4990,7 @@ define <8 x float> @test_mm512_maskz_max_ps_256(<8 x float> %a0, <8 x float> %a1
define <8 x float> @test_mm512_mask_max_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %src, i8 %mask) {
; CHECK-LABEL: test_mm512_mask_max_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmaxps %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x5f,0xd1]
; CHECK-NEXT: vmovaps %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2]
@@ -5001,7 +5001,7 @@ define <8 x float> @test_mm512_mask_max_ps_256(<8 x float> %a0, <8 x float> %a1,
define <8 x float> @test_mm512_max_ps_256(<8 x float> %a0, <8 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_mm512_max_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmaxps %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x5f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx512.mask.max.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float>zeroinitializer, i8 -1)
@@ -5011,7 +5011,7 @@ declare <8 x float> @llvm.x86.avx512.mask.max.ps.256(<8 x float>, <8 x float>, <
define <4 x float> @test_mm512_maskz_max_ps_128(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_mm512_maskz_max_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmaxps %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x89,0x5f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -5021,7 +5021,7 @@ define <4 x float> @test_mm512_maskz_max_ps_128(<4 x float> %a0, <4 x float> %a1
define <4 x float> @test_mm512_mask_max_ps_128(<4 x float> %a0, <4 x float> %a1, <4 x float> %src, i8 %mask) {
; CHECK-LABEL: test_mm512_mask_max_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmaxps %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x5f,0xd1]
; CHECK-NEXT: vmovaps %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2]
@@ -5032,7 +5032,7 @@ define <4 x float> @test_mm512_mask_max_ps_128(<4 x float> %a0, <4 x float> %a1,
define <4 x float> @test_mm512_max_ps_128(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_mm512_max_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmaxps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x5f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.avx512.mask.max.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float>zeroinitializer, i8 -1)
@@ -5042,7 +5042,7 @@ declare <4 x float> @llvm.x86.avx512.mask.max.ps.128(<4 x float>, <4 x float>, <
define <8 x float> @test_mm512_maskz_min_ps_256(<8 x float> %a0, <8 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_mm512_maskz_min_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vminps %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xa9,0x5d,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -5052,7 +5052,7 @@ define <8 x float> @test_mm512_maskz_min_ps_256(<8 x float> %a0, <8 x float> %a1
define <8 x float> @test_mm512_mask_min_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %src, i8 %mask) {
; CHECK-LABEL: test_mm512_mask_min_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vminps %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x5d,0xd1]
; CHECK-NEXT: vmovaps %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2]
@@ -5063,7 +5063,7 @@ define <8 x float> @test_mm512_mask_min_ps_256(<8 x float> %a0, <8 x float> %a1,
define <8 x float> @test_mm512_min_ps_256(<8 x float> %a0, <8 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_mm512_min_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vminps %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x5d,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx512.mask.min.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float>zeroinitializer, i8 -1)
@@ -5073,7 +5073,7 @@ declare <8 x float> @llvm.x86.avx512.mask.min.ps.256(<8 x float>, <8 x float>, <
define <4 x float> @test_mm512_maskz_min_ps_128(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_mm512_maskz_min_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vminps %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x89,0x5d,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -5083,7 +5083,7 @@ define <4 x float> @test_mm512_maskz_min_ps_128(<4 x float> %a0, <4 x float> %a1
define <4 x float> @test_mm512_mask_min_ps_128(<4 x float> %a0, <4 x float> %a1, <4 x float> %src, i8 %mask) {
; CHECK-LABEL: test_mm512_mask_min_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vminps %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x5d,0xd1]
; CHECK-NEXT: vmovaps %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2]
@@ -5094,7 +5094,7 @@ define <4 x float> @test_mm512_mask_min_ps_128(<4 x float> %a0, <4 x float> %a1,
define <4 x float> @test_mm512_min_ps_128(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_mm512_min_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vminps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x5d,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.avx512.mask.min.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float>zeroinitializer, i8 -1)
@@ -5104,7 +5104,7 @@ declare <4 x float> @llvm.x86.avx512.mask.min.ps.128(<4 x float>, <4 x float>, <
define <8 x i8> @test_cmp_d_256(<8 x i32> %a0, <8 x i32> %a1) {
; CHECK-LABEL: test_cmp_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x28,0x76,0xc1]
; CHECK-NEXT: vpcmpgtd %ymm0, %ymm1, %k1 ## encoding: [0x62,0xf1,0x75,0x28,0x66,0xc8]
; CHECK-NEXT: vpcmpled %ymm1, %ymm0, %k2 ## encoding: [0x62,0xf3,0x7d,0x28,0x1f,0xd1,0x02]
@@ -5149,7 +5149,7 @@ define <8 x i8> @test_cmp_d_256(<8 x i32> %a0, <8 x i32> %a1) {
define <8 x i8> @test_mask_cmp_d_256(<8 x i32> %a0, <8 x i32> %a1, i8 %mask) {
; CHECK-LABEL: test_mask_cmp_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x76,0xc1]
; CHECK-NEXT: vpcmpgtd %ymm0, %ymm1, %k2 {%k1} ## encoding: [0x62,0xf1,0x75,0x29,0x66,0xd0]
@@ -5197,7 +5197,7 @@ declare i8 @llvm.x86.avx512.mask.cmp.d.256(<8 x i32>, <8 x i32>, i32, i8) nounwi
define <8 x i8> @test_ucmp_d_256(<8 x i32> %a0, <8 x i32> %a1) {
; CHECK-LABEL: test_ucmp_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x28,0x76,0xc1]
; CHECK-NEXT: vpcmpltud %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf3,0x7d,0x28,0x1e,0xc9,0x01]
; CHECK-NEXT: vpcmpleud %ymm1, %ymm0, %k2 ## encoding: [0x62,0xf3,0x7d,0x28,0x1e,0xd1,0x02]
@@ -5242,7 +5242,7 @@ define <8 x i8> @test_ucmp_d_256(<8 x i32> %a0, <8 x i32> %a1) {
define <8 x i8> @test_mask_ucmp_d_256(<8 x i32> %a0, <8 x i32> %a1, i8 %mask) {
; CHECK-LABEL: test_mask_ucmp_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x76,0xc1]
; CHECK-NEXT: vpcmpltud %ymm1, %ymm0, %k2 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x1e,0xd1,0x01]
@@ -5290,7 +5290,7 @@ declare i8 @llvm.x86.avx512.mask.ucmp.d.256(<8 x i32>, <8 x i32>, i32, i8) nounw
define <8 x i8> @test_cmp_q_256(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_cmp_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x28,0x29,0xc1]
; CHECK-NEXT: vpcmpgtq %ymm0, %ymm1, %k1 ## encoding: [0x62,0xf2,0xf5,0x28,0x37,0xc8]
; CHECK-NEXT: vpcmpleq %ymm1, %ymm0, %k2 ## encoding: [0x62,0xf3,0xfd,0x28,0x1f,0xd1,0x02]
@@ -5334,7 +5334,7 @@ define <8 x i8> @test_cmp_q_256(<4 x i64> %a0, <4 x i64> %a1) {
define <8 x i8> @test_mask_cmp_q_256(<4 x i64> %a0, <4 x i64> %a1, i8 %mask) {
; CHECK-LABEL: test_mask_cmp_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k6 ## encoding: [0xc5,0xf8,0x92,0xf7]
; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k6} ## encoding: [0x62,0xf2,0xfd,0x2e,0x29,0xc1]
; CHECK-NEXT: vpcmpgtq %ymm0, %ymm1, %k7 {%k6} ## encoding: [0x62,0xf2,0xf5,0x2e,0x37,0xf8]
@@ -5385,7 +5385,7 @@ declare i8 @llvm.x86.avx512.mask.cmp.q.256(<4 x i64>, <4 x i64>, i32, i8) nounwi
define <8 x i8> @test_ucmp_q_256(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_ucmp_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x28,0x29,0xc1]
; CHECK-NEXT: vpcmpltuq %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x28,0x1e,0xc9,0x01]
; CHECK-NEXT: vpcmpleuq %ymm1, %ymm0, %k2 ## encoding: [0x62,0xf3,0xfd,0x28,0x1e,0xd1,0x02]
@@ -5429,7 +5429,7 @@ define <8 x i8> @test_ucmp_q_256(<4 x i64> %a0, <4 x i64> %a1) {
define <8 x i8> @test_mask_ucmp_q_256(<4 x i64> %a0, <4 x i64> %a1, i8 %mask) {
; CHECK-LABEL: test_mask_ucmp_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k6 ## encoding: [0xc5,0xf8,0x92,0xf7]
; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k6} ## encoding: [0x62,0xf2,0xfd,0x2e,0x29,0xc1]
; CHECK-NEXT: vpcmpltuq %ymm1, %ymm0, %k7 {%k6} ## encoding: [0x62,0xf3,0xfd,0x2e,0x1e,0xf9,0x01]
@@ -5480,7 +5480,7 @@ declare i8 @llvm.x86.avx512.mask.ucmp.q.256(<4 x i64>, <4 x i64>, i32, i8) nounw
define <8 x i8> @test_cmp_d_128(<4 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_cmp_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x76,0xc1]
; CHECK-NEXT: vpcmpgtd %xmm0, %xmm1, %k1 ## encoding: [0x62,0xf1,0x75,0x08,0x66,0xc8]
; CHECK-NEXT: vpcmpled %xmm1, %xmm0, %k2 ## encoding: [0x62,0xf3,0x7d,0x08,0x1f,0xd1,0x02]
@@ -5524,7 +5524,7 @@ define <8 x i8> @test_cmp_d_128(<4 x i32> %a0, <4 x i32> %a1) {
define <8 x i8> @test_mask_cmp_d_128(<4 x i32> %a0, <4 x i32> %a1, i8 %mask) {
; CHECK-LABEL: test_mask_cmp_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k6 ## encoding: [0xc5,0xf8,0x92,0xf7]
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k6} ## encoding: [0x62,0xf1,0x7d,0x0e,0x76,0xc1]
; CHECK-NEXT: vpcmpgtd %xmm0, %xmm1, %k7 {%k6} ## encoding: [0x62,0xf1,0x75,0x0e,0x66,0xf8]
@@ -5575,7 +5575,7 @@ declare i8 @llvm.x86.avx512.mask.cmp.d.128(<4 x i32>, <4 x i32>, i32, i8) nounwi
define <8 x i8> @test_ucmp_d_128(<4 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_ucmp_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x76,0xc1]
; CHECK-NEXT: vpcmpltud %xmm1, %xmm0, %k1 ## encoding: [0x62,0xf3,0x7d,0x08,0x1e,0xc9,0x01]
; CHECK-NEXT: vpcmpleud %xmm1, %xmm0, %k2 ## encoding: [0x62,0xf3,0x7d,0x08,0x1e,0xd1,0x02]
@@ -5619,7 +5619,7 @@ define <8 x i8> @test_ucmp_d_128(<4 x i32> %a0, <4 x i32> %a1) {
define <8 x i8> @test_mask_ucmp_d_128(<4 x i32> %a0, <4 x i32> %a1, i8 %mask) {
; CHECK-LABEL: test_mask_ucmp_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k6 ## encoding: [0xc5,0xf8,0x92,0xf7]
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k6} ## encoding: [0x62,0xf1,0x7d,0x0e,0x76,0xc1]
; CHECK-NEXT: vpcmpltud %xmm1, %xmm0, %k7 {%k6} ## encoding: [0x62,0xf3,0x7d,0x0e,0x1e,0xf9,0x01]
@@ -5670,7 +5670,7 @@ declare i8 @llvm.x86.avx512.mask.ucmp.d.128(<4 x i32>, <4 x i32>, i32, i8) nounw
define <8 x i8> @test_cmp_q_128(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_cmp_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x08,0x29,0xc1]
; CHECK-NEXT: vpcmpgtq %xmm0, %xmm1, %k1 ## encoding: [0x62,0xf2,0xf5,0x08,0x37,0xc8]
; CHECK-NEXT: vpcmpleq %xmm1, %xmm0, %k2 ## encoding: [0x62,0xf3,0xfd,0x08,0x1f,0xd1,0x02]
@@ -5714,7 +5714,7 @@ define <8 x i8> @test_cmp_q_128(<2 x i64> %a0, <2 x i64> %a1) {
define <8 x i8> @test_mask_cmp_q_128(<2 x i64> %a0, <2 x i64> %a1, i8 %mask) {
; CHECK-LABEL: test_mask_cmp_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k7 ## encoding: [0xc5,0xf8,0x92,0xff]
; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k7} ## encoding: [0x62,0xf2,0xfd,0x0f,0x29,0xc1]
; CHECK-NEXT: vpcmpgtq %xmm0, %xmm1, %k6 {%k7} ## encoding: [0x62,0xf2,0xf5,0x0f,0x37,0xf0]
@@ -5767,7 +5767,7 @@ declare i8 @llvm.x86.avx512.mask.cmp.q.128(<2 x i64>, <2 x i64>, i32, i8) nounwi
define <8 x i8> @test_ucmp_q_128(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_ucmp_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x08,0x29,0xc1]
; CHECK-NEXT: vpcmpltuq %xmm1, %xmm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x08,0x1e,0xc9,0x01]
; CHECK-NEXT: vpcmpleuq %xmm1, %xmm0, %k2 ## encoding: [0x62,0xf3,0xfd,0x08,0x1e,0xd1,0x02]
@@ -5811,7 +5811,7 @@ define <8 x i8> @test_ucmp_q_128(<2 x i64> %a0, <2 x i64> %a1) {
define <8 x i8> @test_mask_ucmp_q_128(<2 x i64> %a0, <2 x i64> %a1, i8 %mask) {
; CHECK-LABEL: test_mask_ucmp_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k7 ## encoding: [0xc5,0xf8,0x92,0xff]
; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k7} ## encoding: [0x62,0xf2,0xfd,0x0f,0x29,0xc1]
; CHECK-NEXT: vpcmpltuq %xmm1, %xmm0, %k6 {%k7} ## encoding: [0x62,0xf3,0xfd,0x0f,0x1e,0xf1,0x01]
@@ -5866,7 +5866,7 @@ declare <8 x float> @llvm.x86.avx512.mask.broadcastf32x4.256(<4 x float>, <8 x f
define <8 x float>@test_int_x86_avx512_mask_broadcastf32x4_256(<4 x float> %x0, <8 x float> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf32x4_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x18,0xd0,0x01]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -5885,7 +5885,7 @@ define <8 x float>@test_int_x86_avx512_mask_broadcastf32x4_256(<4 x float> %x0,
define <8 x float>@test_int_x86_avx512_mask_broadcastf32x4_256_load(<4 x float>* %x0ptr, <8 x float> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf32x4_256_load:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vbroadcastf32x4 (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x1a,0x07]
; CHECK-NEXT: ## ymm0 {%k1} = mem[0,1,2,3,0,1,2,3]
@@ -5899,7 +5899,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.broadcasti32x4.256(<4 x i32>, <8 x i32>,
define <8 x i32>@test_int_x86_avx512_mask_broadcasti32x4_256(<4 x i32> %x0, <8 x i32> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x4_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; CHECK-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x38,0xd0,0x01]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -5918,7 +5918,7 @@ define <8 x i32>@test_int_x86_avx512_mask_broadcasti32x4_256(<4 x i32> %x0, <8 x
define <8 x i32>@test_int_x86_avx512_mask_broadcasti32x4_256_load(<4 x i32>* %x0ptr, <8 x i32> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x4_256_load:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vbroadcasti32x4 (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x5a,0x07]
; CHECK-NEXT: ## ymm0 {%k1} = mem[0,1,2,3,0,1,2,3]
@@ -5932,7 +5932,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.pabs.q.128(<2 x i64>, <2 x i64>, i8)
define <2 x i64>@test_int_x86_avx512_mask_pabs_q_128(<2 x i64> %x0, <2 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pabs_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpabsq %xmm0, %xmm2 ## encoding: [0x62,0xf2,0xfd,0x08,0x1f,0xd0]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpabsq %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x1f,0xc8]
@@ -5948,7 +5948,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.pabs.q.256(<4 x i64>, <4 x i64>, i8)
define <4 x i64>@test_int_x86_avx512_mask_pabs_q_256(<4 x i64> %x0, <4 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pabs_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpabsq %ymm0, %ymm2 ## encoding: [0x62,0xf2,0xfd,0x28,0x1f,0xd0]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpabsq %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x1f,0xc8]
@@ -5964,7 +5964,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.pabs.d.128(<4 x i32>, <4 x i32>, i8)
define <4 x i32>@test_int_x86_avx512_mask_pabs_d_128(<4 x i32> %x0, <4 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pabs_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpabsd %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x1e,0xd0]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpabsd %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x1e,0xc8]
@@ -5980,7 +5980,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.pabs.d.256(<8 x i32>, <8 x i32>, i8)
define <8 x i32>@test_int_x86_avx512_mask_pabs_d_256(<8 x i32> %x0, <8 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pabs_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpabsd %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x1e,0xd0]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpabsd %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x1e,0xc8]
@@ -5996,7 +5996,7 @@ declare i8 @llvm.x86.avx512.ptestm.d.128(<4 x i32>, <4 x i32>,i8)
define i8@test_int_x86_avx512_ptestm_d_128(<4 x i32> %x0, <4 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_ptestm_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vptestmd %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf2,0x7d,0x08,0x27,0xc1]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vptestmd %xmm1, %xmm0, %k1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x27,0xc9]
@@ -6015,7 +6015,7 @@ declare i8 @llvm.x86.avx512.ptestm.d.256(<8 x i32>, <8 x i32>, i8)
define i8@test_int_x86_avx512_ptestm_d_256(<8 x i32> %x0, <8 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_ptestm_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vptestmd %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf2,0x7d,0x28,0x27,0xc1]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vptestmd %ymm1, %ymm0, %k1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x27,0xc9]
@@ -6034,7 +6034,7 @@ declare i8 @llvm.x86.avx512.ptestm.q.128(<2 x i64>, <2 x i64>, i8)
define i8@test_int_x86_avx512_ptestm_q_128(<2 x i64> %x0, <2 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_ptestm_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vptestmq %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x08,0x27,0xc1]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vptestmq %xmm1, %xmm0, %k1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x27,0xc9]
@@ -6053,7 +6053,7 @@ declare i8 @llvm.x86.avx512.ptestm.q.256(<4 x i64>, <4 x i64>, i8)
define i8@test_int_x86_avx512_ptestm_q_256(<4 x i64> %x0, <4 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_ptestm_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vptestmq %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x28,0x27,0xc1]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vptestmq %ymm1, %ymm0, %k1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x27,0xc9]
@@ -6072,7 +6072,7 @@ declare i8 @llvm.x86.avx512.ptestnm.d.128(<4 x i32>, <4 x i32>, i8 %x2)
define i8@test_int_x86_avx512_ptestnm_d_128(<4 x i32> %x0, <4 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_ptestnm_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vptestnmd %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf2,0x7e,0x08,0x27,0xc1]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vptestnmd %xmm1, %xmm0, %k1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x27,0xc9]
@@ -6091,7 +6091,7 @@ declare i8 @llvm.x86.avx512.ptestnm.d.256(<8 x i32>, <8 x i32>, i8 %x2)
define i8@test_int_x86_avx512_ptestnm_d_256(<8 x i32> %x0, <8 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_ptestnm_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vptestnmd %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf2,0x7e,0x28,0x27,0xc1]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vptestnmd %ymm1, %ymm0, %k1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x27,0xc9]
@@ -6110,7 +6110,7 @@ declare i8 @llvm.x86.avx512.ptestnm.q.128(<2 x i64>, <2 x i64>, i8 %x2)
define i8@test_int_x86_avx512_ptestnm_q_128(<2 x i64> %x0, <2 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_ptestnm_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vptestnmq %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf2,0xfe,0x08,0x27,0xc1]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vptestnmq %xmm1, %xmm0, %k1 {%k1} ## encoding: [0x62,0xf2,0xfe,0x09,0x27,0xc9]
@@ -6129,7 +6129,7 @@ declare i8 @llvm.x86.avx512.ptestnm.q.256(<4 x i64>, <4 x i64>, i8 %x2)
define i8@test_int_x86_avx512_ptestnm_q_256(<4 x i64> %x0, <4 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_ptestnm_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vptestnmq %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf2,0xfe,0x28,0x27,0xc1]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vptestnmq %ymm1, %ymm0, %k1 {%k1} ## encoding: [0x62,0xf2,0xfe,0x29,0x27,0xc9]
diff --git a/test/CodeGen/X86/avx512vl-intrinsics.ll b/test/CodeGen/X86/avx512vl-intrinsics.ll
index 0eb0cab1326..2cd118832d3 100644
--- a/test/CodeGen/X86/avx512vl-intrinsics.ll
+++ b/test/CodeGen/X86/avx512vl-intrinsics.ll
@@ -3,7 +3,7 @@
define void @compr1(i8* %addr, <8 x double> %data, i8 %mask) {
; CHECK-LABEL: compr1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vcompresspd %zmm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0x8a,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -15,7 +15,7 @@ declare void @llvm.x86.avx512.mask.compress.store.pd.512(i8* %addr, <8 x double>
define void @compr2(i8* %addr, <4 x double> %data, i8 %mask) {
; CHECK-LABEL: compr2:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vcompresspd %ymm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x8a,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -27,7 +27,7 @@ declare void @llvm.x86.avx512.mask.compress.store.pd.256(i8* %addr, <4 x double>
define void @compr3(i8* %addr, <4 x float> %data, i8 %mask) {
; CHECK-LABEL: compr3:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vcompressps %xmm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x8a,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -39,7 +39,7 @@ declare void @llvm.x86.avx512.mask.compress.store.ps.128(i8* %addr, <4 x float>
define <8 x double> @compr4(i8* %addr, <8 x double> %data, i8 %mask) {
; CHECK-LABEL: compr4:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vcompresspd %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xc9,0x8a,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -51,7 +51,7 @@ declare <8 x double> @llvm.x86.avx512.mask.compress.pd.512(<8 x double> %data, <
define <4 x double> @compr5(<4 x double> %data, <4 x double> %src0, i8 %mask) {
; CHECK-LABEL: compr5:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcompresspd %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x8a,0xc1]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -64,7 +64,7 @@ declare <4 x double> @llvm.x86.avx512.mask.compress.pd.256(<4 x double> %data, <
define <4 x float> @compr6(<4 x float> %data, i8 %mask) {
; CHECK-LABEL: compr6:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcompressps %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x8a,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -76,7 +76,7 @@ declare <4 x float> @llvm.x86.avx512.mask.compress.ps.128(<4 x float> %data, <4
define void @compr7(i8* %addr, <8 x double> %data) {
; CHECK-LABEL: compr7:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovups %zmm0, (%rdi) ## encoding: [0x62,0xf1,0x7c,0x48,0x11,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
call void @llvm.x86.avx512.mask.compress.store.pd.512(i8* %addr, <8 x double> %data, i8 -1)
@@ -85,7 +85,7 @@ define void @compr7(i8* %addr, <8 x double> %data) {
define <4 x float> @compr8(<4 x float> %data) {
; CHECK-LABEL: compr8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.avx512.mask.compress.ps.128(<4 x float> %data, <4 x float>zeroinitializer, i8 -1)
ret <4 x float> %res
@@ -93,7 +93,7 @@ define <4 x float> @compr8(<4 x float> %data) {
define void @compr9(i8* %addr, <8 x i64> %data, i8 %mask) {
; CHECK-LABEL: compr9:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpcompressq %zmm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0x8b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -105,7 +105,7 @@ declare void @llvm.x86.avx512.mask.compress.store.q.512(i8* %addr, <8 x i64> %da
define <4 x i32> @compr10(<4 x i32> %data, i8 %mask) {
; CHECK-LABEL: compr10:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcompressd %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x8b,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -119,7 +119,7 @@ define <4 x i32> @compr10(<4 x i32> %data, i8 %mask) {
define i32 @compr11() {
; CHECK-LABEL: compr11:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movq _xmm@{{.*}}(%rip), %rax ## encoding: [0x48,0x8b,0x05,A,A,A,A]
; CHECK-NEXT: ## fixup A - offset: 3, value: _xmm@GOTPCREL-4, kind: reloc_riprel_4byte_movq_load
; CHECK-NEXT: vmovdqa (%rax), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0x00]
@@ -150,7 +150,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.compress.d.128(<4 x i32> %data, <4 x i32
define <8 x double> @expand1(i8* %addr, <8 x double> %data, i8 %mask) {
; CHECK-LABEL: expand1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vexpandpd (%rdi), %zmm0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0x88,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -162,7 +162,7 @@ declare <8 x double> @llvm.x86.avx512.mask.expand.load.pd.512(i8* %addr, <8 x do
define <4 x double> @expand2(i8* %addr, <4 x double> %data, i8 %mask) {
; CHECK-LABEL: expand2:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vexpandpd (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x88,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -174,7 +174,7 @@ declare <4 x double> @llvm.x86.avx512.mask.expand.load.pd.256(i8* %addr, <4 x do
define <4 x float> @expand3(i8* %addr, <4 x float> %data, i8 %mask) {
; CHECK-LABEL: expand3:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vexpandps (%rdi), %xmm0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x88,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -186,7 +186,7 @@ declare <4 x float> @llvm.x86.avx512.mask.expand.load.ps.128(i8* %addr, <4 x flo
define <8 x double> @expand4(i8* %addr, <8 x double> %data, i8 %mask) {
; CHECK-LABEL: expand4:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vexpandpd %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xc9,0x88,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -198,7 +198,7 @@ declare <8 x double> @llvm.x86.avx512.mask.expand.pd.512(<8 x double> %data, <8
define <4 x double> @expand5(<4 x double> %data, <4 x double> %src0, i8 %mask) {
; CHECK-LABEL: expand5:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vexpandpd %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x88,0xc8]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -211,7 +211,7 @@ declare <4 x double> @llvm.x86.avx512.mask.expand.pd.256(<4 x double> %data, <4
define <4 x float> @expand6(<4 x float> %data, i8 %mask) {
; CHECK-LABEL: expand6:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vexpandps %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x88,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -223,7 +223,7 @@ declare <4 x float> @llvm.x86.avx512.mask.expand.ps.128(<4 x float> %data, <4 x
define <8 x double> @expand7(i8* %addr, <8 x double> %data) {
; CHECK-LABEL: expand7:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovups (%rdi), %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x10,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x double> @llvm.x86.avx512.mask.expand.load.pd.512(i8* %addr, <8 x double> %data, i8 -1)
@@ -232,7 +232,7 @@ define <8 x double> @expand7(i8* %addr, <8 x double> %data) {
define <4 x float> @expand8(<4 x float> %data) {
; CHECK-LABEL: expand8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.avx512.mask.expand.ps.128(<4 x float> %data, <4 x float>zeroinitializer, i8 -1)
ret <4 x float> %res
@@ -240,7 +240,7 @@ define <4 x float> @expand8(<4 x float> %data) {
define <8 x i64> @expand9(i8* %addr, <8 x i64> %data, i8 %mask) {
; CHECK-LABEL: expand9:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpexpandq (%rdi), %zmm0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0x89,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -252,7 +252,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.expand.load.q.512(i8* %addr, <8 x i64> %
define <4 x i32> @expand10(<4 x i32> %data, i8 %mask) {
; CHECK-LABEL: expand10:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpexpandd %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x89,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -264,7 +264,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.expand.d.128(<4 x i32> %data, <4 x i32>
define <8 x i64> @expand11(i8* %addr) {
; CHECK-LABEL: expand11:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovups (%rdi), %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x10,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i64> @llvm.x86.avx512.mask.expand.load.q.512(i8* %addr, <8 x i64> undef, i8 -1)
@@ -273,7 +273,7 @@ define <8 x i64> @expand11(i8* %addr) {
define <8 x i64> @expand12(i8* %addr, i8 %mask) {
; CHECK-LABEL: expand12:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpexpandq (%rdi), %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xc9,0x89,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -287,7 +287,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.expand.q.512(<8 x i64> , <8 x i64>, i8)
define < 2 x i64> @test_mask_mul_epi32_rr_128(< 4 x i32> %a, < 4 x i32> %b) {
; CHECK-LABEL: test_mask_mul_epi32_rr_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmuldq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x28,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call < 2 x i64> @llvm.x86.avx512.mask.pmul.dq.128(< 4 x i32> %a, < 4 x i32> %b, < 2 x i64> zeroinitializer, i8 -1)
@@ -296,7 +296,7 @@ define < 2 x i64> @test_mask_mul_epi32_rr_128(< 4 x i32> %a, < 4 x i32> %b) {
define < 2 x i64> @test_mask_mul_epi32_rrk_128(< 4 x i32> %a, < 4 x i32> %b, < 2 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_mul_epi32_rrk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmuldq %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x28,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
@@ -307,7 +307,7 @@ define < 2 x i64> @test_mask_mul_epi32_rrk_128(< 4 x i32> %a, < 4 x i32> %b, < 2
define < 2 x i64> @test_mask_mul_epi32_rrkz_128(< 4 x i32> %a, < 4 x i32> %b, i8 %mask) {
; CHECK-LABEL: test_mask_mul_epi32_rrkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmuldq %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0x89,0x28,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -317,7 +317,7 @@ define < 2 x i64> @test_mask_mul_epi32_rrkz_128(< 4 x i32> %a, < 4 x i32> %b, i8
define < 2 x i64> @test_mask_mul_epi32_rm_128(< 4 x i32> %a, < 4 x i32>* %ptr_b) {
; CHECK-LABEL: test_mask_mul_epi32_rm_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmuldq (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x28,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load < 4 x i32>, < 4 x i32>* %ptr_b
@@ -327,7 +327,7 @@ define < 2 x i64> @test_mask_mul_epi32_rm_128(< 4 x i32> %a, < 4 x i32>* %ptr_b)
define < 2 x i64> @test_mask_mul_epi32_rmk_128(< 4 x i32> %a, < 4 x i32>* %ptr_b, < 2 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_mul_epi32_rmk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmuldq (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x28,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
@@ -339,7 +339,7 @@ define < 2 x i64> @test_mask_mul_epi32_rmk_128(< 4 x i32> %a, < 4 x i32>* %ptr_b
define < 2 x i64> @test_mask_mul_epi32_rmkz_128(< 4 x i32> %a, < 4 x i32>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_mul_epi32_rmkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmuldq (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0x89,0x28,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -350,7 +350,7 @@ define < 2 x i64> @test_mask_mul_epi32_rmkz_128(< 4 x i32> %a, < 4 x i32>* %ptr_
define < 2 x i64> @test_mask_mul_epi32_rmb_128(< 4 x i32> %a, i64* %ptr_b) {
; CHECK-LABEL: test_mask_mul_epi32_rmb_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmuldq (%rdi){1to2}, %xmm0, %xmm0 ## encoding: [0x62,0xf2,0xfd,0x18,0x28,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load i64, i64* %ptr_b
@@ -363,7 +363,7 @@ define < 2 x i64> @test_mask_mul_epi32_rmb_128(< 4 x i32> %a, i64* %ptr_b) {
define < 2 x i64> @test_mask_mul_epi32_rmbk_128(< 4 x i32> %a, i64* %ptr_b, < 2 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_mul_epi32_rmbk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmuldq (%rdi){1to2}, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x19,0x28,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
@@ -378,7 +378,7 @@ define < 2 x i64> @test_mask_mul_epi32_rmbk_128(< 4 x i32> %a, i64* %ptr_b, < 2
define < 2 x i64> @test_mask_mul_epi32_rmbkz_128(< 4 x i32> %a, i64* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_mul_epi32_rmbkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmuldq (%rdi){1to2}, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0x99,0x28,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -394,7 +394,7 @@ declare < 2 x i64> @llvm.x86.avx512.mask.pmul.dq.128(< 4 x i32>, < 4 x i32>, < 2
define < 4 x i64> @test_mask_mul_epi32_rr_256(< 8 x i32> %a, < 8 x i32> %b) {
; CHECK-LABEL: test_mask_mul_epi32_rr_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmuldq %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x28,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call < 4 x i64> @llvm.x86.avx512.mask.pmul.dq.256(< 8 x i32> %a, < 8 x i32> %b, < 4 x i64> zeroinitializer, i8 -1)
@@ -403,7 +403,7 @@ define < 4 x i64> @test_mask_mul_epi32_rr_256(< 8 x i32> %a, < 8 x i32> %b) {
define < 4 x i64> @test_mask_mul_epi32_rrk_256(< 8 x i32> %a, < 8 x i32> %b, < 4 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_mul_epi32_rrk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmuldq %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x28,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
@@ -414,7 +414,7 @@ define < 4 x i64> @test_mask_mul_epi32_rrk_256(< 8 x i32> %a, < 8 x i32> %b, < 4
define < 4 x i64> @test_mask_mul_epi32_rrkz_256(< 8 x i32> %a, < 8 x i32> %b, i8 %mask) {
; CHECK-LABEL: test_mask_mul_epi32_rrkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmuldq %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xa9,0x28,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -424,7 +424,7 @@ define < 4 x i64> @test_mask_mul_epi32_rrkz_256(< 8 x i32> %a, < 8 x i32> %b, i8
define < 4 x i64> @test_mask_mul_epi32_rm_256(< 8 x i32> %a, < 8 x i32>* %ptr_b) {
; CHECK-LABEL: test_mask_mul_epi32_rm_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmuldq (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x28,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load < 8 x i32>, < 8 x i32>* %ptr_b
@@ -434,7 +434,7 @@ define < 4 x i64> @test_mask_mul_epi32_rm_256(< 8 x i32> %a, < 8 x i32>* %ptr_b)
define < 4 x i64> @test_mask_mul_epi32_rmk_256(< 8 x i32> %a, < 8 x i32>* %ptr_b, < 4 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_mul_epi32_rmk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmuldq (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x28,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -446,7 +446,7 @@ define < 4 x i64> @test_mask_mul_epi32_rmk_256(< 8 x i32> %a, < 8 x i32>* %ptr_b
define < 4 x i64> @test_mask_mul_epi32_rmkz_256(< 8 x i32> %a, < 8 x i32>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_mul_epi32_rmkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmuldq (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xa9,0x28,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -457,7 +457,7 @@ define < 4 x i64> @test_mask_mul_epi32_rmkz_256(< 8 x i32> %a, < 8 x i32>* %ptr_
define < 4 x i64> @test_mask_mul_epi32_rmb_256(< 8 x i32> %a, i64* %ptr_b) {
; CHECK-LABEL: test_mask_mul_epi32_rmb_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmuldq (%rdi){1to4}, %ymm0, %ymm0 ## encoding: [0x62,0xf2,0xfd,0x38,0x28,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load i64, i64* %ptr_b
@@ -470,7 +470,7 @@ define < 4 x i64> @test_mask_mul_epi32_rmb_256(< 8 x i32> %a, i64* %ptr_b) {
define < 4 x i64> @test_mask_mul_epi32_rmbk_256(< 8 x i32> %a, i64* %ptr_b, < 4 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_mul_epi32_rmbk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmuldq (%rdi){1to4}, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x39,0x28,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -485,7 +485,7 @@ define < 4 x i64> @test_mask_mul_epi32_rmbk_256(< 8 x i32> %a, i64* %ptr_b, < 4
define < 4 x i64> @test_mask_mul_epi32_rmbkz_256(< 8 x i32> %a, i64* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_mul_epi32_rmbkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmuldq (%rdi){1to4}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xb9,0x28,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -501,7 +501,7 @@ declare < 4 x i64> @llvm.x86.avx512.mask.pmul.dq.256(< 8 x i32>, < 8 x i32>, < 4
define < 2 x i64> @test_mask_mul_epu32_rr_128(< 4 x i32> %a, < 4 x i32> %b) {
; CHECK-LABEL: test_mask_mul_epu32_rr_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf4,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call < 2 x i64> @llvm.x86.avx512.mask.pmulu.dq.128(< 4 x i32> %a, < 4 x i32> %b, < 2 x i64> zeroinitializer, i8 -1)
@@ -510,7 +510,7 @@ define < 2 x i64> @test_mask_mul_epu32_rr_128(< 4 x i32> %a, < 4 x i32> %b) {
define < 2 x i64> @test_mask_mul_epu32_rrk_128(< 4 x i32> %a, < 4 x i32> %b, < 2 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_mul_epu32_rrk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0xfd,0x09,0xf4,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
@@ -521,7 +521,7 @@ define < 2 x i64> @test_mask_mul_epu32_rrk_128(< 4 x i32> %a, < 4 x i32> %b, < 2
define < 2 x i64> @test_mask_mul_epu32_rrkz_128(< 4 x i32> %a, < 4 x i32> %b, i8 %mask) {
; CHECK-LABEL: test_mask_mul_epu32_rrkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x89,0xf4,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -531,7 +531,7 @@ define < 2 x i64> @test_mask_mul_epu32_rrkz_128(< 4 x i32> %a, < 4 x i32> %b, i8
define < 2 x i64> @test_mask_mul_epu32_rm_128(< 4 x i32> %a, < 4 x i32>* %ptr_b) {
; CHECK-LABEL: test_mask_mul_epu32_rm_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmuludq (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf4,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load < 4 x i32>, < 4 x i32>* %ptr_b
@@ -541,7 +541,7 @@ define < 2 x i64> @test_mask_mul_epu32_rm_128(< 4 x i32> %a, < 4 x i32>* %ptr_b)
define < 2 x i64> @test_mask_mul_epu32_rmk_128(< 4 x i32> %a, < 4 x i32>* %ptr_b, < 2 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_mul_epu32_rmk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmuludq (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xfd,0x09,0xf4,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
@@ -553,7 +553,7 @@ define < 2 x i64> @test_mask_mul_epu32_rmk_128(< 4 x i32> %a, < 4 x i32>* %ptr_b
define < 2 x i64> @test_mask_mul_epu32_rmkz_128(< 4 x i32> %a, < 4 x i32>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_mul_epu32_rmkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmuludq (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x89,0xf4,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -564,7 +564,7 @@ define < 2 x i64> @test_mask_mul_epu32_rmkz_128(< 4 x i32> %a, < 4 x i32>* %ptr_
define < 2 x i64> @test_mask_mul_epu32_rmb_128(< 4 x i32> %a, i64* %ptr_b) {
; CHECK-LABEL: test_mask_mul_epu32_rmb_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmuludq (%rdi){1to2}, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfd,0x18,0xf4,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load i64, i64* %ptr_b
@@ -577,7 +577,7 @@ define < 2 x i64> @test_mask_mul_epu32_rmb_128(< 4 x i32> %a, i64* %ptr_b) {
define < 2 x i64> @test_mask_mul_epu32_rmbk_128(< 4 x i32> %a, i64* %ptr_b, < 2 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_mul_epu32_rmbk_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmuludq (%rdi){1to2}, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xfd,0x19,0xf4,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
@@ -592,7 +592,7 @@ define < 2 x i64> @test_mask_mul_epu32_rmbk_128(< 4 x i32> %a, i64* %ptr_b, < 2
define < 2 x i64> @test_mask_mul_epu32_rmbkz_128(< 4 x i32> %a, i64* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_mul_epu32_rmbkz_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmuludq (%rdi){1to2}, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x99,0xf4,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -608,7 +608,7 @@ declare < 2 x i64> @llvm.x86.avx512.mask.pmulu.dq.128(< 4 x i32>, < 4 x i32>, <
define < 4 x i64> @test_mask_mul_epu32_rr_256(< 8 x i32> %a, < 8 x i32> %b) {
; CHECK-LABEL: test_mask_mul_epu32_rr_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmuludq %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf4,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call < 4 x i64> @llvm.x86.avx512.mask.pmulu.dq.256(< 8 x i32> %a, < 8 x i32> %b, < 4 x i64> zeroinitializer, i8 -1)
@@ -617,7 +617,7 @@ define < 4 x i64> @test_mask_mul_epu32_rr_256(< 8 x i32> %a, < 8 x i32> %b) {
define < 4 x i64> @test_mask_mul_epu32_rrk_256(< 8 x i32> %a, < 8 x i32> %b, < 4 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_mul_epu32_rrk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmuludq %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0xfd,0x29,0xf4,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
@@ -628,7 +628,7 @@ define < 4 x i64> @test_mask_mul_epu32_rrk_256(< 8 x i32> %a, < 8 x i32> %b, < 4
define < 4 x i64> @test_mask_mul_epu32_rrkz_256(< 8 x i32> %a, < 8 x i32> %b, i8 %mask) {
; CHECK-LABEL: test_mask_mul_epu32_rrkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmuludq %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xa9,0xf4,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -638,7 +638,7 @@ define < 4 x i64> @test_mask_mul_epu32_rrkz_256(< 8 x i32> %a, < 8 x i32> %b, i8
define < 4 x i64> @test_mask_mul_epu32_rm_256(< 8 x i32> %a, < 8 x i32>* %ptr_b) {
; CHECK-LABEL: test_mask_mul_epu32_rm_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmuludq (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf4,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load < 8 x i32>, < 8 x i32>* %ptr_b
@@ -648,7 +648,7 @@ define < 4 x i64> @test_mask_mul_epu32_rm_256(< 8 x i32> %a, < 8 x i32>* %ptr_b)
define < 4 x i64> @test_mask_mul_epu32_rmk_256(< 8 x i32> %a, < 8 x i32>* %ptr_b, < 4 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_mul_epu32_rmk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmuludq (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0xfd,0x29,0xf4,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -660,7 +660,7 @@ define < 4 x i64> @test_mask_mul_epu32_rmk_256(< 8 x i32> %a, < 8 x i32>* %ptr_b
define < 4 x i64> @test_mask_mul_epu32_rmkz_256(< 8 x i32> %a, < 8 x i32>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_mul_epu32_rmkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmuludq (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xa9,0xf4,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -671,7 +671,7 @@ define < 4 x i64> @test_mask_mul_epu32_rmkz_256(< 8 x i32> %a, < 8 x i32>* %ptr_
define < 4 x i64> @test_mask_mul_epu32_rmb_256(< 8 x i32> %a, i64* %ptr_b) {
; CHECK-LABEL: test_mask_mul_epu32_rmb_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmuludq (%rdi){1to4}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfd,0x38,0xf4,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load i64, i64* %ptr_b
@@ -684,7 +684,7 @@ define < 4 x i64> @test_mask_mul_epu32_rmb_256(< 8 x i32> %a, i64* %ptr_b) {
define < 4 x i64> @test_mask_mul_epu32_rmbk_256(< 8 x i32> %a, i64* %ptr_b, < 4 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_mul_epu32_rmbk_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmuludq (%rdi){1to4}, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0xfd,0x39,0xf4,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -699,7 +699,7 @@ define < 4 x i64> @test_mask_mul_epu32_rmbk_256(< 8 x i32> %a, i64* %ptr_b, < 4
define < 4 x i64> @test_mask_mul_epu32_rmbkz_256(< 8 x i32> %a, i64* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_mul_epu32_rmbkz_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmuludq (%rdi){1to4}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xb9,0xf4,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -715,7 +715,7 @@ declare < 4 x i64> @llvm.x86.avx512.mask.pmulu.dq.256(< 8 x i32>, < 8 x i32>, <
define i8 @test_cmpps_256(<8 x float> %a, <8 x float> %b) {
; CHECK-LABEL: test_cmpps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcmpleps %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0x7c,0x28,0xc2,0xc1,0x02]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
@@ -727,7 +727,7 @@ define i8 @test_cmpps_256(<8 x float> %a, <8 x float> %b) {
define i8 @test_cmpps_128(<4 x float> %a, <4 x float> %b) {
; CHECK-LABEL: test_cmpps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcmpleps %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7c,0x08,0xc2,0xc1,0x02]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
@@ -739,7 +739,7 @@ define i8 @test_cmpps_128(<4 x float> %a, <4 x float> %b) {
define i8 @test_cmppd_256(<4 x double> %a, <4 x double> %b) {
; CHECK-LABEL: test_cmppd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcmplepd %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0xfd,0x28,0xc2,0xc1,0x02]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
@@ -751,7 +751,7 @@ define i8 @test_cmppd_256(<4 x double> %a, <4 x double> %b) {
define i8 @test_cmppd_128(<2 x double> %a, <2 x double> %b) {
; CHECK-LABEL: test_cmppd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcmplepd %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0xfd,0x08,0xc2,0xc1,0x02]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
@@ -763,7 +763,7 @@ define i8 @test_cmppd_128(<2 x double> %a, <2 x double> %b) {
define <8 x float> @test_mm512_maskz_max_ps_256(<8 x float> %a0, <8 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_mm512_maskz_max_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmaxps %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xa9,0x5f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -775,7 +775,7 @@ define <8 x float> @test_mm512_maskz_max_ps_256(<8 x float> %a0, <8 x float> %a1
define <8 x float> @test_mm512_mask_max_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %src, i8 %mask) {
; CHECK-LABEL: test_mm512_mask_max_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmaxps %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x5f,0xd1]
; CHECK-NEXT: vmovaps %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2]
@@ -788,7 +788,7 @@ define <8 x float> @test_mm512_mask_max_ps_256(<8 x float> %a0, <8 x float> %a1,
define <8 x float> @test_mm512_max_ps_256(<8 x float> %a0, <8 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_mm512_max_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmaxps %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x5f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%1 = call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> %a0, <8 x float> %a1)
@@ -798,7 +798,7 @@ declare <8 x float> @llvm.x86.avx.max.ps.256(<8 x float>, <8 x float>)
define <4 x float> @test_mm512_maskz_max_ps_128(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_mm512_maskz_max_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmaxps %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x89,0x5f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -811,7 +811,7 @@ define <4 x float> @test_mm512_maskz_max_ps_128(<4 x float> %a0, <4 x float> %a1
define <4 x float> @test_mm512_mask_max_ps_128(<4 x float> %a0, <4 x float> %a1, <4 x float> %src, i8 %mask) {
; CHECK-LABEL: test_mm512_mask_max_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmaxps %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x5f,0xd1]
; CHECK-NEXT: vmovaps %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2]
@@ -825,7 +825,7 @@ define <4 x float> @test_mm512_mask_max_ps_128(<4 x float> %a0, <4 x float> %a1,
define <4 x float> @test_mm512_max_ps_128(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_mm512_max_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmaxps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x5f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%1 = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %a0, <4 x float> %a1)
@@ -835,7 +835,7 @@ declare <4 x float> @llvm.x86.sse.max.ps(<4 x float>, <4 x float>)
define <8 x float> @test_mm512_maskz_min_ps_256(<8 x float> %a0, <8 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_mm512_maskz_min_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vminps %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xa9,0x5d,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -847,7 +847,7 @@ define <8 x float> @test_mm512_maskz_min_ps_256(<8 x float> %a0, <8 x float> %a1
define <8 x float> @test_mm512_mask_min_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %src, i8 %mask) {
; CHECK-LABEL: test_mm512_mask_min_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vminps %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x5d,0xd1]
; CHECK-NEXT: vmovaps %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2]
@@ -860,7 +860,7 @@ define <8 x float> @test_mm512_mask_min_ps_256(<8 x float> %a0, <8 x float> %a1,
define <8 x float> @test_mm512_min_ps_256(<8 x float> %a0, <8 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_mm512_min_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vminps %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x5d,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%1 = call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> %a0, <8 x float> %a1)
@@ -870,7 +870,7 @@ declare <8 x float> @llvm.x86.avx.min.ps.256(<8 x float>, <8 x float>)
define <4 x float> @test_mm512_maskz_min_ps_128(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_mm512_maskz_min_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vminps %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x89,0x5d,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -883,7 +883,7 @@ define <4 x float> @test_mm512_maskz_min_ps_128(<4 x float> %a0, <4 x float> %a1
define <4 x float> @test_mm512_mask_min_ps_128(<4 x float> %a0, <4 x float> %a1, <4 x float> %src, i8 %mask) {
; CHECK-LABEL: test_mm512_mask_min_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vminps %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x5d,0xd1]
; CHECK-NEXT: vmovaps %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2]
@@ -897,7 +897,7 @@ define <4 x float> @test_mm512_mask_min_ps_128(<4 x float> %a0, <4 x float> %a1,
define <4 x float> @test_mm512_min_ps_128(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_mm512_min_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vminps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x5d,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%1 = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %a0, <4 x float> %a1)
@@ -907,7 +907,7 @@ declare <4 x float> @llvm.x86.sse.min.ps(<4 x float>, <4 x float>)
define <4 x double> @test_sqrt_pd_256(<4 x double> %a0, i8 %mask) {
; CHECK-LABEL: test_sqrt_pd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vsqrtpd %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xa9,0x51,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -918,7 +918,7 @@ declare <4 x double> @llvm.x86.avx512.mask.sqrt.pd.256(<4 x double>, <4 x double
define <8 x float> @test_sqrt_ps_256(<8 x float> %a0, i8 %mask) {
; CHECK-LABEL: test_sqrt_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vsqrtps %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xa9,0x51,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -930,7 +930,7 @@ declare <8 x float> @llvm.x86.avx512.mask.sqrt.ps.256(<8 x float>, <8 x float>,
define <4 x double> @test_getexp_pd_256(<4 x double> %a0) {
; CHECK-LABEL: test_getexp_pd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vgetexppd %ymm0, %ymm0 ## encoding: [0x62,0xf2,0xfd,0x28,0x42,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x double> @llvm.x86.avx512.mask.getexp.pd.256(<4 x double> %a0, <4 x double> zeroinitializer, i8 -1)
@@ -941,7 +941,7 @@ declare <4 x double> @llvm.x86.avx512.mask.getexp.pd.256(<4 x double>, <4 x doub
define <8 x float> @test_getexp_ps_256(<8 x float> %a0) {
; CHECK-LABEL: test_getexp_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vgetexpps %ymm0, %ymm0 ## encoding: [0x62,0xf2,0x7d,0x28,0x42,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx512.mask.getexp.ps.256(<8 x float> %a0, <8 x float> zeroinitializer, i8 -1)
@@ -953,7 +953,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.vpermt2var.d.128(<4 x i32>, <4 x i32>, <
define <4 x i32>@test_int_x86_avx512_mask_vpermt2var_d_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermt2var_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovdqa %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xd9]
; CHECK-NEXT: vpermt2d %xmm2, %xmm0, %xmm3 ## encoding: [0x62,0xf2,0x7d,0x08,0x7e,0xda]
@@ -970,7 +970,7 @@ declare <4 x i32> @llvm.x86.avx512.maskz.vpermt2var.d.128(<4 x i32>, <4 x i32>,
define <4 x i32>@test_int_x86_avx512_maskz_vpermt2var_d_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpermt2var_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovdqa %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xd9]
; CHECK-NEXT: vpermt2d %xmm2, %xmm0, %xmm3 ## encoding: [0x62,0xf2,0x7d,0x08,0x7e,0xda]
@@ -987,7 +987,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.vpermt2var.d.256(<8 x i32>, <8 x i32>, <
define <8 x i32>@test_int_x86_avx512_mask_vpermt2var_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermt2var_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovdqa %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xd9]
; CHECK-NEXT: vpermt2d %ymm2, %ymm0, %ymm3 ## encoding: [0x62,0xf2,0x7d,0x28,0x7e,0xda]
@@ -1004,7 +1004,7 @@ declare <8 x i32> @llvm.x86.avx512.maskz.vpermt2var.d.256(<8 x i32>, <8 x i32>,
define <8 x i32>@test_int_x86_avx512_maskz_vpermt2var_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpermt2var_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovdqa %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xd9]
; CHECK-NEXT: vpermt2d %ymm2, %ymm0, %ymm3 ## encoding: [0x62,0xf2,0x7d,0x28,0x7e,0xda]
@@ -1021,7 +1021,7 @@ declare <2 x double> @llvm.x86.avx512.mask.vpermi2var.pd.128(<2 x double>, <2 x
define <2 x double>@test_int_x86_avx512_mask_vpermi2var_pd_128(<2 x double> %x0, <2 x i64> %x1, <2 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermi2var_pd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovapd %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9]
; CHECK-NEXT: vpermi2pd %xmm2, %xmm0, %xmm3 ## encoding: [0x62,0xf2,0xfd,0x08,0x77,0xda]
@@ -1038,7 +1038,7 @@ declare <4 x double> @llvm.x86.avx512.mask.vpermi2var.pd.256(<4 x double>, <4 x
define <4 x double>@test_int_x86_avx512_mask_vpermi2var_pd_256(<4 x double> %x0, <4 x i64> %x1, <4 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermi2var_pd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovapd %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9]
; CHECK-NEXT: vpermi2pd %ymm2, %ymm0, %ymm3 ## encoding: [0x62,0xf2,0xfd,0x28,0x77,0xda]
@@ -1055,7 +1055,7 @@ declare <4 x float> @llvm.x86.avx512.mask.vpermi2var.ps.128(<4 x float>, <4 x i3
define <4 x float>@test_int_x86_avx512_mask_vpermi2var_ps_128(<4 x float> %x0, <4 x i32> %x1, <4 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermi2var_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovaps %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9]
; CHECK-NEXT: vpermi2ps %xmm2, %xmm0, %xmm3 ## encoding: [0x62,0xf2,0x7d,0x08,0x77,0xda]
@@ -1070,7 +1070,7 @@ define <4 x float>@test_int_x86_avx512_mask_vpermi2var_ps_128(<4 x float> %x0, <
define <4 x float>@test_int_x86_avx512_mask_vpermi2var_ps_128_cast(<4 x float> %x0, <2 x i64> %x1, <4 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermi2var_ps_128_cast:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpermi2ps %xmm2, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x77,0xca]
; CHECK-NEXT: vmovaps %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1]
@@ -1084,7 +1084,7 @@ declare <8 x float> @llvm.x86.avx512.mask.vpermi2var.ps.256(<8 x float>, <8 x i3
define <8 x float>@test_int_x86_avx512_mask_vpermi2var_ps_256(<8 x float> %x0, <8 x i32> %x1, <8 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermi2var_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovaps %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9]
; CHECK-NEXT: vpermi2ps %ymm2, %ymm0, %ymm3 ## encoding: [0x62,0xf2,0x7d,0x28,0x77,0xda]
@@ -1101,7 +1101,7 @@ declare <2 x double> @llvm.x86.avx512.mask.scalef.pd.128(<2 x double>, <2 x doub
define <2 x double>@test_int_x86_avx512_mask_scalef_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_scalef_pd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vscalefpd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x2c,0xd1]
; CHECK-NEXT: vscalefpd %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf2,0xfd,0x08,0x2c,0xc1]
@@ -1117,7 +1117,7 @@ declare <4 x double> @llvm.x86.avx512.mask.scalef.pd.256(<4 x double>, <4 x doub
define <4 x double>@test_int_x86_avx512_mask_scalef_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_scalef_pd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vscalefpd %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x2c,0xd1]
; CHECK-NEXT: vscalefpd %ymm1, %ymm0, %ymm0 ## encoding: [0x62,0xf2,0xfd,0x28,0x2c,0xc1]
@@ -1133,7 +1133,7 @@ declare <4 x float> @llvm.x86.avx512.mask.scalef.ps.128(<4 x float>, <4 x float>
define <4 x float>@test_int_x86_avx512_mask_scalef_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_scalef_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vscalefps %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x2c,0xd1]
; CHECK-NEXT: vscalefps %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf2,0x7d,0x08,0x2c,0xc1]
@@ -1149,7 +1149,7 @@ declare <8 x float> @llvm.x86.avx512.mask.scalef.ps.256(<8 x float>, <8 x float>
define <8 x float>@test_int_x86_avx512_mask_scalef_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_scalef_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vscalefps %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x2c,0xd1]
; CHECK-NEXT: vscalefps %ymm1, %ymm0, %ymm0 ## encoding: [0x62,0xf2,0x7d,0x28,0x2c,0xc1]
@@ -1165,7 +1165,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmov.qb.128(<2 x i64>, <16 x i8>, i8)
define <16 x i8>@test_int_x86_avx512_mask_pmov_qb_128(<2 x i64> %x0, <16 x i8> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_qb_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmovqb %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0x89,0x32,0xc2]
; CHECK-NEXT: vpmovqb %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x32,0xc1]
@@ -1185,7 +1185,7 @@ declare void @llvm.x86.avx512.mask.pmov.qb.mem.128(i8* %ptr, <2 x i64>, i8)
define void @test_int_x86_avx512_mask_pmov_qb_mem_128(i8* %ptr, <2 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_qb_mem_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmovqb %xmm0, (%rdi) ## encoding: [0x62,0xf2,0x7e,0x08,0x32,0x07]
; CHECK-NEXT: vpmovqb %xmm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x32,0x07]
@@ -1199,7 +1199,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmovs.qb.128(<2 x i64>, <16 x i8>, i8)
define <16 x i8>@test_int_x86_avx512_mask_pmovs_qb_128(<2 x i64> %x0, <16 x i8> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_qb_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmovsqb %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0x89,0x22,0xc2]
; CHECK-NEXT: vpmovsqb %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x22,0xc1]
@@ -1219,7 +1219,7 @@ declare void @llvm.x86.avx512.mask.pmovs.qb.mem.128(i8* %ptr, <2 x i64>, i8)
define void @test_int_x86_avx512_mask_pmovs_qb_mem_128(i8* %ptr, <2 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_qb_mem_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmovsqb %xmm0, (%rdi) ## encoding: [0x62,0xf2,0x7e,0x08,0x22,0x07]
; CHECK-NEXT: vpmovsqb %xmm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x22,0x07]
@@ -1233,7 +1233,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmovus.qb.128(<2 x i64>, <16 x i8>, i8)
define <16 x i8>@test_int_x86_avx512_mask_pmovus_qb_128(<2 x i64> %x0, <16 x i8> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_qb_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmovusqb %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0x89,0x12,0xc2]
; CHECK-NEXT: vpmovusqb %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x12,0xc1]
@@ -1253,7 +1253,7 @@ declare void @llvm.x86.avx512.mask.pmovus.qb.mem.128(i8* %ptr, <2 x i64>, i8)
define void @test_int_x86_avx512_mask_pmovus_qb_mem_128(i8* %ptr, <2 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_qb_mem_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmovusqb %xmm0, (%rdi) ## encoding: [0x62,0xf2,0x7e,0x08,0x12,0x07]
; CHECK-NEXT: vpmovusqb %xmm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x12,0x07]
@@ -1267,7 +1267,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmov.qb.256(<4 x i64>, <16 x i8>, i8)
define <16 x i8>@test_int_x86_avx512_mask_pmov_qb_256(<4 x i64> %x0, <16 x i8> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_qb_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmovqb %ymm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0xa9,0x32,0xc2]
; CHECK-NEXT: vpmovqb %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x32,0xc1]
@@ -1287,7 +1287,7 @@ declare void @llvm.x86.avx512.mask.pmov.qb.mem.256(i8* %ptr, <4 x i64>, i8)
define void @test_int_x86_avx512_mask_pmov_qb_mem_256(i8* %ptr, <4 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_qb_mem_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmovqb %ymm0, (%rdi) ## encoding: [0x62,0xf2,0x7e,0x28,0x32,0x07]
; CHECK-NEXT: vpmovqb %ymm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x32,0x07]
@@ -1301,7 +1301,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmovs.qb.256(<4 x i64>, <16 x i8>, i8)
define <16 x i8>@test_int_x86_avx512_mask_pmovs_qb_256(<4 x i64> %x0, <16 x i8> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_qb_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmovsqb %ymm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0xa9,0x22,0xc2]
; CHECK-NEXT: vpmovsqb %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x22,0xc1]
@@ -1321,7 +1321,7 @@ declare void @llvm.x86.avx512.mask.pmovs.qb.mem.256(i8* %ptr, <4 x i64>, i8)
define void @test_int_x86_avx512_mask_pmovs_qb_mem_256(i8* %ptr, <4 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_qb_mem_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmovsqb %ymm0, (%rdi) ## encoding: [0x62,0xf2,0x7e,0x28,0x22,0x07]
; CHECK-NEXT: vpmovsqb %ymm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x22,0x07]
@@ -1335,7 +1335,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmovus.qb.256(<4 x i64>, <16 x i8>, i8)
define <16 x i8>@test_int_x86_avx512_mask_pmovus_qb_256(<4 x i64> %x0, <16 x i8> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_qb_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmovusqb %ymm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0xa9,0x12,0xc2]
; CHECK-NEXT: vpmovusqb %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x12,0xc1]
@@ -1355,7 +1355,7 @@ declare void @llvm.x86.avx512.mask.pmovus.qb.mem.256(i8* %ptr, <4 x i64>, i8)
define void @test_int_x86_avx512_mask_pmovus_qb_mem_256(i8* %ptr, <4 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_qb_mem_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmovusqb %ymm0, (%rdi) ## encoding: [0x62,0xf2,0x7e,0x28,0x12,0x07]
; CHECK-NEXT: vpmovusqb %ymm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x12,0x07]
@@ -1369,7 +1369,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.pmov.qw.128(<2 x i64>, <8 x i16>, i8)
define <8 x i16>@test_int_x86_avx512_mask_pmov_qw_128(<2 x i64> %x0, <8 x i16> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_qw_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmovqw %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0x89,0x34,0xc2]
; CHECK-NEXT: vpmovqw %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x34,0xc1]
@@ -1389,7 +1389,7 @@ declare void @llvm.x86.avx512.mask.pmov.qw.mem.128(i8* %ptr, <2 x i64>, i8)
define void @test_int_x86_avx512_mask_pmov_qw_mem_128(i8* %ptr, <2 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_qw_mem_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmovqw %xmm0, (%rdi) ## encoding: [0x62,0xf2,0x7e,0x08,0x34,0x07]
; CHECK-NEXT: vpmovqw %xmm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x34,0x07]
@@ -1403,7 +1403,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.pmovs.qw.128(<2 x i64>, <8 x i16>, i8)
define <8 x i16>@test_int_x86_avx512_mask_pmovs_qw_128(<2 x i64> %x0, <8 x i16> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_qw_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmovsqw %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0x89,0x24,0xc2]
; CHECK-NEXT: vpmovsqw %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x24,0xc1]
@@ -1423,7 +1423,7 @@ declare void @llvm.x86.avx512.mask.pmovs.qw.mem.128(i8* %ptr, <2 x i64>, i8)
define void @test_int_x86_avx512_mask_pmovs_qw_mem_128(i8* %ptr, <2 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_qw_mem_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmovsqw %xmm0, (%rdi) ## encoding: [0x62,0xf2,0x7e,0x08,0x24,0x07]
; CHECK-NEXT: vpmovsqw %xmm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x24,0x07]
@@ -1437,7 +1437,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.pmovus.qw.128(<2 x i64>, <8 x i16>, i8)
define <8 x i16>@test_int_x86_avx512_mask_pmovus_qw_128(<2 x i64> %x0, <8 x i16> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_qw_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmovusqw %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0x89,0x14,0xc2]
; CHECK-NEXT: vpmovusqw %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x14,0xc1]
@@ -1457,7 +1457,7 @@ declare void @llvm.x86.avx512.mask.pmovus.qw.mem.128(i8* %ptr, <2 x i64>, i8)
define void @test_int_x86_avx512_mask_pmovus_qw_mem_128(i8* %ptr, <2 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_qw_mem_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmovusqw %xmm0, (%rdi) ## encoding: [0x62,0xf2,0x7e,0x08,0x14,0x07]
; CHECK-NEXT: vpmovusqw %xmm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x14,0x07]
@@ -1471,7 +1471,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.pmov.qw.256(<4 x i64>, <8 x i16>, i8)
define <8 x i16>@test_int_x86_avx512_mask_pmov_qw_256(<4 x i64> %x0, <8 x i16> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_qw_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmovqw %ymm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0xa9,0x34,0xc2]
; CHECK-NEXT: vpmovqw %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x34,0xc1]
@@ -1491,7 +1491,7 @@ declare void @llvm.x86.avx512.mask.pmov.qw.mem.256(i8* %ptr, <4 x i64>, i8)
define void @test_int_x86_avx512_mask_pmov_qw_mem_256(i8* %ptr, <4 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_qw_mem_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmovqw %ymm0, (%rdi) ## encoding: [0x62,0xf2,0x7e,0x28,0x34,0x07]
; CHECK-NEXT: vpmovqw %ymm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x34,0x07]
@@ -1505,7 +1505,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.pmovs.qw.256(<4 x i64>, <8 x i16>, i8)
define <8 x i16>@test_int_x86_avx512_mask_pmovs_qw_256(<4 x i64> %x0, <8 x i16> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_qw_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmovsqw %ymm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0xa9,0x24,0xc2]
; CHECK-NEXT: vpmovsqw %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x24,0xc1]
@@ -1525,7 +1525,7 @@ declare void @llvm.x86.avx512.mask.pmovs.qw.mem.256(i8* %ptr, <4 x i64>, i8)
define void @test_int_x86_avx512_mask_pmovs_qw_mem_256(i8* %ptr, <4 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_qw_mem_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmovsqw %ymm0, (%rdi) ## encoding: [0x62,0xf2,0x7e,0x28,0x24,0x07]
; CHECK-NEXT: vpmovsqw %ymm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x24,0x07]
@@ -1539,7 +1539,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.pmovus.qw.256(<4 x i64>, <8 x i16>, i8)
define <8 x i16>@test_int_x86_avx512_mask_pmovus_qw_256(<4 x i64> %x0, <8 x i16> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_qw_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmovusqw %ymm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0xa9,0x14,0xc2]
; CHECK-NEXT: vpmovusqw %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x14,0xc1]
@@ -1559,7 +1559,7 @@ declare void @llvm.x86.avx512.mask.pmovus.qw.mem.256(i8* %ptr, <4 x i64>, i8)
define void @test_int_x86_avx512_mask_pmovus_qw_mem_256(i8* %ptr, <4 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_qw_mem_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmovusqw %ymm0, (%rdi) ## encoding: [0x62,0xf2,0x7e,0x28,0x14,0x07]
; CHECK-NEXT: vpmovusqw %ymm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x14,0x07]
@@ -1573,7 +1573,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.pmov.qd.128(<2 x i64>, <4 x i32>, i8)
define <4 x i32>@test_int_x86_avx512_mask_pmov_qd_128(<2 x i64> %x0, <4 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_qd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmovqd %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0x89,0x35,0xc2]
; CHECK-NEXT: vpmovqd %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x35,0xc1]
@@ -1593,7 +1593,7 @@ declare void @llvm.x86.avx512.mask.pmov.qd.mem.128(i8* %ptr, <2 x i64>, i8)
define void @test_int_x86_avx512_mask_pmov_qd_mem_128(i8* %ptr, <2 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_qd_mem_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmovqd %xmm0, (%rdi) ## encoding: [0x62,0xf2,0x7e,0x08,0x35,0x07]
; CHECK-NEXT: vpmovqd %xmm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x35,0x07]
@@ -1607,7 +1607,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.pmovs.qd.128(<2 x i64>, <4 x i32>, i8)
define <4 x i32>@test_int_x86_avx512_mask_pmovs_qd_128(<2 x i64> %x0, <4 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_qd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmovsqd %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0x89,0x25,0xc2]
; CHECK-NEXT: vpmovsqd %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x25,0xc1]
@@ -1627,7 +1627,7 @@ declare void @llvm.x86.avx512.mask.pmovs.qd.mem.128(i8* %ptr, <2 x i64>, i8)
define void @test_int_x86_avx512_mask_pmovs_qd_mem_128(i8* %ptr, <2 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_qd_mem_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmovsqd %xmm0, (%rdi) ## encoding: [0x62,0xf2,0x7e,0x08,0x25,0x07]
; CHECK-NEXT: vpmovsqd %xmm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x25,0x07]
@@ -1641,7 +1641,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.pmovus.qd.128(<2 x i64>, <4 x i32>, i8)
define <4 x i32>@test_int_x86_avx512_mask_pmovus_qd_128(<2 x i64> %x0, <4 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_qd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmovusqd %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0x89,0x15,0xc2]
; CHECK-NEXT: vpmovusqd %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x15,0xc1]
@@ -1661,7 +1661,7 @@ declare void @llvm.x86.avx512.mask.pmovus.qd.mem.128(i8* %ptr, <2 x i64>, i8)
define void @test_int_x86_avx512_mask_pmovus_qd_mem_128(i8* %ptr, <2 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_qd_mem_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmovusqd %xmm0, (%rdi) ## encoding: [0x62,0xf2,0x7e,0x08,0x15,0x07]
; CHECK-NEXT: vpmovusqd %xmm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x15,0x07]
@@ -1675,7 +1675,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.pmov.qd.256(<4 x i64>, <4 x i32>, i8)
define <4 x i32>@test_int_x86_avx512_mask_pmov_qd_256(<4 x i64> %x0, <4 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_qd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmovqd %ymm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0xa9,0x35,0xc2]
; CHECK-NEXT: vpmovqd %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x35,0xc1]
@@ -1695,7 +1695,7 @@ declare void @llvm.x86.avx512.mask.pmov.qd.mem.256(i8* %ptr, <4 x i64>, i8)
define void @test_int_x86_avx512_mask_pmov_qd_mem_256(i8* %ptr, <4 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_qd_mem_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmovqd %ymm0, (%rdi) ## encoding: [0x62,0xf2,0x7e,0x28,0x35,0x07]
; CHECK-NEXT: vpmovqd %ymm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x35,0x07]
@@ -1709,7 +1709,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.pmovs.qd.256(<4 x i64>, <4 x i32>, i8)
define <4 x i32>@test_int_x86_avx512_mask_pmovs_qd_256(<4 x i64> %x0, <4 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_qd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmovsqd %ymm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0xa9,0x25,0xc2]
; CHECK-NEXT: vpmovsqd %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x25,0xc1]
@@ -1729,7 +1729,7 @@ declare void @llvm.x86.avx512.mask.pmovs.qd.mem.256(i8* %ptr, <4 x i64>, i8)
define void @test_int_x86_avx512_mask_pmovs_qd_mem_256(i8* %ptr, <4 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_qd_mem_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmovsqd %ymm0, (%rdi) ## encoding: [0x62,0xf2,0x7e,0x28,0x25,0x07]
; CHECK-NEXT: vpmovsqd %ymm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x25,0x07]
@@ -1743,7 +1743,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.pmovus.qd.256(<4 x i64>, <4 x i32>, i8)
define <4 x i32>@test_int_x86_avx512_mask_pmovus_qd_256(<4 x i64> %x0, <4 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_qd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmovusqd %ymm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0xa9,0x15,0xc2]
; CHECK-NEXT: vpmovusqd %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x15,0xc1]
@@ -1763,7 +1763,7 @@ declare void @llvm.x86.avx512.mask.pmovus.qd.mem.256(i8* %ptr, <4 x i64>, i8)
define void @test_int_x86_avx512_mask_pmovus_qd_mem_256(i8* %ptr, <4 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_qd_mem_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmovusqd %ymm0, (%rdi) ## encoding: [0x62,0xf2,0x7e,0x28,0x15,0x07]
; CHECK-NEXT: vpmovusqd %ymm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x15,0x07]
@@ -1777,7 +1777,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmov.db.128(<4 x i32>, <16 x i8>, i8)
define <16 x i8>@test_int_x86_avx512_mask_pmov_db_128(<4 x i32> %x0, <16 x i8> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_db_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmovdb %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0x89,0x31,0xc2]
; CHECK-NEXT: vpmovdb %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x31,0xc1]
@@ -1797,7 +1797,7 @@ declare void @llvm.x86.avx512.mask.pmov.db.mem.128(i8* %ptr, <4 x i32>, i8)
define void @test_int_x86_avx512_mask_pmov_db_mem_128(i8* %ptr, <4 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_db_mem_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmovdb %xmm0, (%rdi) ## encoding: [0x62,0xf2,0x7e,0x08,0x31,0x07]
; CHECK-NEXT: vpmovdb %xmm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x31,0x07]
@@ -1811,7 +1811,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmovs.db.128(<4 x i32>, <16 x i8>, i8)
define <16 x i8>@test_int_x86_avx512_mask_pmovs_db_128(<4 x i32> %x0, <16 x i8> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_db_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmovsdb %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0x89,0x21,0xc2]
; CHECK-NEXT: vpmovsdb %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x21,0xc1]
@@ -1831,7 +1831,7 @@ declare void @llvm.x86.avx512.mask.pmovs.db.mem.128(i8* %ptr, <4 x i32>, i8)
define void @test_int_x86_avx512_mask_pmovs_db_mem_128(i8* %ptr, <4 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_db_mem_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmovsdb %xmm0, (%rdi) ## encoding: [0x62,0xf2,0x7e,0x08,0x21,0x07]
; CHECK-NEXT: vpmovsdb %xmm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x21,0x07]
@@ -1845,7 +1845,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmovus.db.128(<4 x i32>, <16 x i8>, i8)
define <16 x i8>@test_int_x86_avx512_mask_pmovus_db_128(<4 x i32> %x0, <16 x i8> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_db_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmovusdb %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0x89,0x11,0xc2]
; CHECK-NEXT: vpmovusdb %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x11,0xc1]
@@ -1865,7 +1865,7 @@ declare void @llvm.x86.avx512.mask.pmovus.db.mem.128(i8* %ptr, <4 x i32>, i8)
define void @test_int_x86_avx512_mask_pmovus_db_mem_128(i8* %ptr, <4 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_db_mem_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmovusdb %xmm0, (%rdi) ## encoding: [0x62,0xf2,0x7e,0x08,0x11,0x07]
; CHECK-NEXT: vpmovusdb %xmm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x11,0x07]
@@ -1879,7 +1879,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmov.db.256(<8 x i32>, <16 x i8>, i8)
define <16 x i8>@test_int_x86_avx512_mask_pmov_db_256(<8 x i32> %x0, <16 x i8> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_db_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmovdb %ymm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0xa9,0x31,0xc2]
; CHECK-NEXT: vpmovdb %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x31,0xc1]
@@ -1899,7 +1899,7 @@ declare void @llvm.x86.avx512.mask.pmov.db.mem.256(i8* %ptr, <8 x i32>, i8)
define void @test_int_x86_avx512_mask_pmov_db_mem_256(i8* %ptr, <8 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_db_mem_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmovdb %ymm0, (%rdi) ## encoding: [0x62,0xf2,0x7e,0x28,0x31,0x07]
; CHECK-NEXT: vpmovdb %ymm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x31,0x07]
@@ -1913,7 +1913,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmovs.db.256(<8 x i32>, <16 x i8>, i8)
define <16 x i8>@test_int_x86_avx512_mask_pmovs_db_256(<8 x i32> %x0, <16 x i8> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_db_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmovsdb %ymm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0xa9,0x21,0xc2]
; CHECK-NEXT: vpmovsdb %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x21,0xc1]
@@ -1933,7 +1933,7 @@ declare void @llvm.x86.avx512.mask.pmovs.db.mem.256(i8* %ptr, <8 x i32>, i8)
define void @test_int_x86_avx512_mask_pmovs_db_mem_256(i8* %ptr, <8 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_db_mem_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmovsdb %ymm0, (%rdi) ## encoding: [0x62,0xf2,0x7e,0x28,0x21,0x07]
; CHECK-NEXT: vpmovsdb %ymm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x21,0x07]
@@ -1947,7 +1947,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmovus.db.256(<8 x i32>, <16 x i8>, i8)
define <16 x i8>@test_int_x86_avx512_mask_pmovus_db_256(<8 x i32> %x0, <16 x i8> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_db_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmovusdb %ymm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0xa9,0x11,0xc2]
; CHECK-NEXT: vpmovusdb %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x11,0xc1]
@@ -1967,7 +1967,7 @@ declare void @llvm.x86.avx512.mask.pmovus.db.mem.256(i8* %ptr, <8 x i32>, i8)
define void @test_int_x86_avx512_mask_pmovus_db_mem_256(i8* %ptr, <8 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_db_mem_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmovusdb %ymm0, (%rdi) ## encoding: [0x62,0xf2,0x7e,0x28,0x11,0x07]
; CHECK-NEXT: vpmovusdb %ymm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x11,0x07]
@@ -1981,7 +1981,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.pmov.dw.128(<4 x i32>, <8 x i16>, i8)
define <8 x i16>@test_int_x86_avx512_mask_pmov_dw_128(<4 x i32> %x0, <8 x i16> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_dw_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmovdw %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0x89,0x33,0xc2]
; CHECK-NEXT: vpmovdw %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x33,0xc1]
@@ -2001,7 +2001,7 @@ declare void @llvm.x86.avx512.mask.pmov.dw.mem.128(i8* %ptr, <4 x i32>, i8)
define void @test_int_x86_avx512_mask_pmov_dw_mem_128(i8* %ptr, <4 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_dw_mem_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmovdw %xmm0, (%rdi) ## encoding: [0x62,0xf2,0x7e,0x08,0x33,0x07]
; CHECK-NEXT: vpmovdw %xmm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x33,0x07]
@@ -2015,7 +2015,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.pmovs.dw.128(<4 x i32>, <8 x i16>, i8)
define <8 x i16>@test_int_x86_avx512_mask_pmovs_dw_128(<4 x i32> %x0, <8 x i16> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_dw_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmovsdw %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0x89,0x23,0xc2]
; CHECK-NEXT: vpmovsdw %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x23,0xc1]
@@ -2035,7 +2035,7 @@ declare void @llvm.x86.avx512.mask.pmovs.dw.mem.128(i8* %ptr, <4 x i32>, i8)
define void @test_int_x86_avx512_mask_pmovs_dw_mem_128(i8* %ptr, <4 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_dw_mem_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmovsdw %xmm0, (%rdi) ## encoding: [0x62,0xf2,0x7e,0x08,0x23,0x07]
; CHECK-NEXT: vpmovsdw %xmm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x23,0x07]
@@ -2049,7 +2049,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.pmovus.dw.128(<4 x i32>, <8 x i16>, i8)
define <8 x i16>@test_int_x86_avx512_mask_pmovus_dw_128(<4 x i32> %x0, <8 x i16> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_dw_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmovusdw %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0x89,0x13,0xc2]
; CHECK-NEXT: vpmovusdw %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x13,0xc1]
@@ -2069,7 +2069,7 @@ declare void @llvm.x86.avx512.mask.pmovus.dw.mem.128(i8* %ptr, <4 x i32>, i8)
define void @test_int_x86_avx512_mask_pmovus_dw_mem_128(i8* %ptr, <4 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_dw_mem_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmovusdw %xmm0, (%rdi) ## encoding: [0x62,0xf2,0x7e,0x08,0x13,0x07]
; CHECK-NEXT: vpmovusdw %xmm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x13,0x07]
@@ -2083,7 +2083,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.pmov.dw.256(<8 x i32>, <8 x i16>, i8)
define <8 x i16>@test_int_x86_avx512_mask_pmov_dw_256(<8 x i32> %x0, <8 x i16> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_dw_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmovdw %ymm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0xa9,0x33,0xc2]
; CHECK-NEXT: vpmovdw %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x33,0xc1]
@@ -2103,7 +2103,7 @@ declare void @llvm.x86.avx512.mask.pmov.dw.mem.256(i8* %ptr, <8 x i32>, i8)
define void @test_int_x86_avx512_mask_pmov_dw_mem_256(i8* %ptr, <8 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_dw_mem_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmovdw %ymm0, (%rdi) ## encoding: [0x62,0xf2,0x7e,0x28,0x33,0x07]
; CHECK-NEXT: vpmovdw %ymm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x33,0x07]
@@ -2117,7 +2117,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.pmovs.dw.256(<8 x i32>, <8 x i16>, i8)
define <8 x i16>@test_int_x86_avx512_mask_pmovs_dw_256(<8 x i32> %x0, <8 x i16> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_dw_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmovsdw %ymm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0xa9,0x23,0xc2]
; CHECK-NEXT: vpmovsdw %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x23,0xc1]
@@ -2137,7 +2137,7 @@ declare void @llvm.x86.avx512.mask.pmovs.dw.mem.256(i8* %ptr, <8 x i32>, i8)
define void @test_int_x86_avx512_mask_pmovs_dw_mem_256(i8* %ptr, <8 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_dw_mem_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmovsdw %ymm0, (%rdi) ## encoding: [0x62,0xf2,0x7e,0x28,0x23,0x07]
; CHECK-NEXT: vpmovsdw %ymm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x23,0x07]
@@ -2151,7 +2151,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.pmovus.dw.256(<8 x i32>, <8 x i16>, i8)
define <8 x i16>@test_int_x86_avx512_mask_pmovus_dw_256(<8 x i32> %x0, <8 x i16> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_dw_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmovusdw %ymm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0xa9,0x13,0xc2]
; CHECK-NEXT: vpmovusdw %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x13,0xc1]
@@ -2171,7 +2171,7 @@ declare void @llvm.x86.avx512.mask.pmovus.dw.mem.256(i8* %ptr, <8 x i32>, i8)
define void @test_int_x86_avx512_mask_pmovus_dw_mem_256(i8* %ptr, <8 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_dw_mem_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmovusdw %ymm0, (%rdi) ## encoding: [0x62,0xf2,0x7e,0x28,0x13,0x07]
; CHECK-NEXT: vpmovusdw %ymm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x13,0x07]
@@ -2185,7 +2185,7 @@ declare <4 x float> @llvm.x86.avx512.mask.cvtdq2ps.128(<4 x i32>, <4 x float>, i
define <4 x float>@test_int_x86_avx512_mask_cvt_dq2ps_128(<4 x i32> %x0, <4 x float> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_dq2ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtdq2ps %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x5b,0xc8]
; CHECK-NEXT: vcvtdq2ps %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x5b,0xc0]
@@ -2201,7 +2201,7 @@ declare <8 x float> @llvm.x86.avx512.mask.cvtdq2ps.256(<8 x i32>, <8 x float>, i
define <8 x float>@test_int_x86_avx512_mask_cvt_dq2ps_256(<8 x i32> %x0, <8 x float> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_dq2ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtdq2ps %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x5b,0xc8]
; CHECK-NEXT: vcvtdq2ps %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x5b,0xc0]
@@ -2217,7 +2217,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.cvtpd2dq.128(<2 x double>, <4 x i32>, i8
define <4 x i32>@test_int_x86_avx512_mask_cvt_pd2dq_128(<2 x double> %x0, <4 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_pd2dq_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtpd2dq %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xff,0x09,0xe6,0xc8]
; CHECK-NEXT: vcvtpd2dq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0xe6,0xc0]
@@ -2231,7 +2231,7 @@ define <4 x i32>@test_int_x86_avx512_mask_cvt_pd2dq_128(<2 x double> %x0, <4 x i
define <4 x i32>@test_int_x86_avx512_mask_cvt_pd2dq_128_zext(<2 x double> %x0, <4 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_pd2dq_128_zext:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtpd2dq %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xff,0x09,0xe6,0xc8]
; CHECK-NEXT: vmovq %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7e,0xc9]
@@ -2251,7 +2251,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.cvtpd2dq.256(<4 x double>, <4 x i32>, i8
define <4 x i32>@test_int_x86_avx512_mask_cvt_pd2dq_256(<4 x double> %x0, <4 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_pd2dq_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtpd2dq %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xff,0x29,0xe6,0xc8]
; CHECK-NEXT: vcvtpd2dq %ymm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xff,0xe6,0xc0]
@@ -2267,7 +2267,7 @@ declare <4 x float> @llvm.x86.avx512.mask.cvtpd2ps.256(<4 x double>, <4 x float>
define <4 x float>@test_int_x86_avx512_mask_cvt_pd2ps_256(<4 x double> %x0, <4 x float> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_pd2ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtpd2ps %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xfd,0x29,0x5a,0xc8]
; CHECK-NEXT: vcvtpd2ps %ymm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x5a,0xc0]
@@ -2283,7 +2283,7 @@ declare <4 x float> @llvm.x86.avx512.mask.cvtpd2ps(<2 x double>, <4 x float>, i8
define <4 x float>@test_int_x86_avx512_mask_cvt_pd2ps(<2 x double> %x0, <4 x float> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_pd2ps:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtpd2ps %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xfd,0x09,0x5a,0xc8]
; CHECK-NEXT: vcvtpd2ps %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x5a,0xc0]
@@ -2297,7 +2297,7 @@ define <4 x float>@test_int_x86_avx512_mask_cvt_pd2ps(<2 x double> %x0, <4 x flo
define <4 x float>@test_int_x86_avx512_mask_cvt_pd2ps_zext(<2 x double> %x0, <4 x float> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_pd2ps_zext:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtpd2ps %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xfd,0x09,0x5a,0xc8]
; CHECK-NEXT: vmovq %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7e,0xc9]
@@ -2317,7 +2317,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.cvtpd2udq.128(<2 x double>, <4 x i32>, i
define <4 x i32>@test_int_x86_avx512_mask_cvt_pd2udq_128(<2 x double> %x0, <4 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_pd2udq_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtpd2udq %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xfc,0x09,0x79,0xc8]
; CHECK-NEXT: vcvtpd2udq %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfc,0x08,0x79,0xc0]
@@ -2331,7 +2331,7 @@ define <4 x i32>@test_int_x86_avx512_mask_cvt_pd2udq_128(<2 x double> %x0, <4 x
define <4 x i32>@test_int_x86_avx512_mask_cvt_pd2udq_128_zext(<2 x double> %x0, <4 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_pd2udq_128_zext:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtpd2udq %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xfc,0x09,0x79,0xc8]
; CHECK-NEXT: vmovq %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7e,0xc9]
@@ -2351,7 +2351,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.cvtpd2udq.256(<4 x double>, <4 x i32>, i
define <4 x i32>@test_int_x86_avx512_mask_cvt_pd2udq_256(<4 x double> %x0, <4 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_pd2udq_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtpd2udq %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xfc,0x29,0x79,0xc8]
; CHECK-NEXT: vcvtpd2udq %ymm0, %xmm0 ## encoding: [0x62,0xf1,0xfc,0x28,0x79,0xc0]
@@ -2367,7 +2367,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.cvtps2dq.128(<4 x float>, <4 x i32>, i8)
define <4 x i32>@test_int_x86_avx512_mask_cvt_ps2dq_128(<4 x float> %x0, <4 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_ps2dq_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtps2dq %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x5b,0xc8]
; CHECK-NEXT: vcvtps2dq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x5b,0xc0]
@@ -2383,7 +2383,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.cvtps2dq.256(<8 x float>, <8 x i32>, i8)
define <8 x i32>@test_int_x86_avx512_mask_cvt_ps2dq_256(<8 x float> %x0, <8 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_ps2dq_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtps2dq %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x5b,0xc8]
; CHECK-NEXT: vcvtps2dq %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x5b,0xc0]
@@ -2399,7 +2399,7 @@ declare <2 x double> @llvm.x86.avx512.mask.cvtps2pd.128(<4 x float>, <2 x double
define <2 x double>@test_int_x86_avx512_mask_cvt_ps2pd_128(<4 x float> %x0, <2 x double> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_ps2pd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtps2pd %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x5a,0xc8]
; CHECK-NEXT: vcvtps2pd %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x5a,0xc0]
@@ -2415,7 +2415,7 @@ declare <4 x double> @llvm.x86.avx512.mask.cvtps2pd.256(<4 x float>, <4 x double
define <4 x double>@test_int_x86_avx512_mask_cvt_ps2pd_256(<4 x float> %x0, <4 x double> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_ps2pd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtps2pd %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x5a,0xc8]
; CHECK-NEXT: vcvtps2pd %xmm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x5a,0xc0]
@@ -2431,7 +2431,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.cvtps2udq.128(<4 x float>, <4 x i32>, i8
define <4 x i32>@test_int_x86_avx512_mask_cvt_ps2udq_128(<4 x float> %x0, <4 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_ps2udq_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtps2udq %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x79,0xc8]
; CHECK-NEXT: vcvtps2udq %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7c,0x08,0x79,0xc0]
@@ -2447,7 +2447,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.cvtps2udq.256(<8 x float>, <8 x i32>, i8
define <8 x i32>@test_int_x86_avx512_mask_cvt_ps2udq_256(<8 x float> %x0, <8 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_ps2udq_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtps2udq %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x79,0xc8]
; CHECK-NEXT: vcvtps2udq %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7c,0x28,0x79,0xc0]
@@ -2463,7 +2463,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.cvttpd2dq.128(<2 x double>, <4 x i32>, i
define <4 x i32>@test_int_x86_avx512_mask_cvtt_pd2dq_128(<2 x double> %x0, <4 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_pd2dq_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvttpd2dq %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xfd,0x09,0xe6,0xc8]
; CHECK-NEXT: vcvttpd2dq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe6,0xc0]
@@ -2477,7 +2477,7 @@ define <4 x i32>@test_int_x86_avx512_mask_cvtt_pd2dq_128(<2 x double> %x0, <4 x
define <4 x i32>@test_int_x86_avx512_mask_cvtt_pd2dq_128_zext(<2 x double> %x0, <4 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_pd2dq_128_zext:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvttpd2dq %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xfd,0x09,0xe6,0xc8]
; CHECK-NEXT: vmovq %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7e,0xc9]
@@ -2497,7 +2497,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.cvttpd2dq.256(<4 x double>, <4 x i32>, i
define <4 x i32>@test_int_x86_avx512_mask_cvtt_pd2dq_256(<4 x double> %x0, <4 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_pd2dq_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvttpd2dq %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xfd,0x29,0xe6,0xc8]
; CHECK-NEXT: vcvttpd2dq %ymm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe6,0xc0]
@@ -2513,7 +2513,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.cvttpd2udq.128(<2 x double>, <4 x i32>,
define <4 x i32>@test_int_x86_avx512_mask_cvtt_pd2udq_128(<2 x double> %x0, <4 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_pd2udq_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvttpd2udq %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xfc,0x09,0x78,0xc8]
; CHECK-NEXT: vcvttpd2udq %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfc,0x08,0x78,0xc0]
@@ -2527,7 +2527,7 @@ define <4 x i32>@test_int_x86_avx512_mask_cvtt_pd2udq_128(<2 x double> %x0, <4 x
define <4 x i32>@test_int_x86_avx512_mask_cvtt_pd2udq_128_zext(<2 x double> %x0, <4 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_pd2udq_128_zext:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvttpd2udq %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xfc,0x09,0x78,0xc8]
; CHECK-NEXT: vmovq %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7e,0xc9]
@@ -2547,7 +2547,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.cvttpd2udq.256(<4 x double>, <4 x i32>,
define <4 x i32>@test_int_x86_avx512_mask_cvtt_pd2udq_256(<4 x double> %x0, <4 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_pd2udq_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvttpd2udq %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xfc,0x29,0x78,0xc8]
; CHECK-NEXT: vcvttpd2udq %ymm0, %xmm0 ## encoding: [0x62,0xf1,0xfc,0x28,0x78,0xc0]
@@ -2563,7 +2563,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.cvttps2dq.128(<4 x float>, <4 x i32>, i8
define <4 x i32>@test_int_x86_avx512_mask_cvtt_ps2dq_128(<4 x float> %x0, <4 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_ps2dq_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvttps2dq %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7e,0x09,0x5b,0xc8]
; CHECK-NEXT: vcvttps2dq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x5b,0xc0]
@@ -2579,7 +2579,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.cvttps2dq.256(<8 x float>, <8 x i32>, i8
define <8 x i32>@test_int_x86_avx512_mask_cvtt_ps2dq_256(<8 x float> %x0, <8 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_ps2dq_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvttps2dq %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7e,0x29,0x5b,0xc8]
; CHECK-NEXT: vcvttps2dq %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfe,0x5b,0xc0]
@@ -2595,7 +2595,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.cvttps2udq.128(<4 x float>, <4 x i32>, i
define <4 x i32>@test_int_x86_avx512_mask_cvtt_ps2udq_128(<4 x float> %x0, <4 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_ps2udq_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvttps2udq %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x78,0xc8]
; CHECK-NEXT: vcvttps2udq %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7c,0x08,0x78,0xc0]
@@ -2611,7 +2611,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.cvttps2udq.256(<8 x float>, <8 x i32>, i
define <8 x i32>@test_int_x86_avx512_mask_cvtt_ps2udq_256(<8 x float> %x0, <8 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_ps2udq_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvttps2udq %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x78,0xc8]
; CHECK-NEXT: vcvttps2udq %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7c,0x28,0x78,0xc0]
@@ -2627,7 +2627,7 @@ declare <4 x float> @llvm.x86.avx512.mask.cvtudq2ps.128(<4 x i32>, <4 x float>,
define <4 x float>@test_int_x86_avx512_mask_cvt_udq2ps_128(<4 x i32> %x0, <4 x float> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_udq2ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtudq2ps %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7f,0x09,0x7a,0xc8]
; CHECK-NEXT: vcvtudq2ps %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7f,0x08,0x7a,0xc0]
@@ -2643,7 +2643,7 @@ declare <8 x float> @llvm.x86.avx512.mask.cvtudq2ps.256(<8 x i32>, <8 x float>,
define <8 x float>@test_int_x86_avx512_mask_cvt_udq2ps_256(<8 x i32> %x0, <8 x float> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_udq2ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtudq2ps %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7f,0x29,0x7a,0xc8]
; CHECK-NEXT: vcvtudq2ps %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7f,0x28,0x7a,0xc0]
@@ -2659,7 +2659,7 @@ declare <2 x double> @llvm.x86.avx512.mask.rndscale.pd.128(<2 x double>, i32, <2
define <2 x double>@test_int_x86_avx512_mask_rndscale_pd_128(<2 x double> %x0, <2 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_rndscale_pd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrndscalepd $4, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x09,0xc8,0x04]
; CHECK-NEXT: vrndscalepd $88, %xmm0, %xmm0 ## encoding: [0x62,0xf3,0xfd,0x08,0x09,0xc0,0x58]
@@ -2675,7 +2675,7 @@ declare <4 x double> @llvm.x86.avx512.mask.rndscale.pd.256(<4 x double>, i32, <4
define <4 x double>@test_int_x86_avx512_mask_rndscale_pd_256(<4 x double> %x0, <4 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_rndscale_pd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrndscalepd $4, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x09,0xc8,0x04]
; CHECK-NEXT: vrndscalepd $88, %ymm0, %ymm0 ## encoding: [0x62,0xf3,0xfd,0x28,0x09,0xc0,0x58]
@@ -2691,7 +2691,7 @@ declare <4 x float> @llvm.x86.avx512.mask.rndscale.ps.128(<4 x float>, i32, <4 x
define <4 x float>@test_int_x86_avx512_mask_rndscale_ps_128(<4 x float> %x0, <4 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_rndscale_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrndscaleps $88, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x08,0xc8,0x58]
; CHECK-NEXT: vrndscaleps $4, %xmm0, %xmm0 ## encoding: [0x62,0xf3,0x7d,0x08,0x08,0xc0,0x04]
@@ -2707,7 +2707,7 @@ declare <8 x float> @llvm.x86.avx512.mask.rndscale.ps.256(<8 x float>, i32, <8 x
define <8 x float>@test_int_x86_avx512_mask_rndscale_ps_256(<8 x float> %x0, <8 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_rndscale_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrndscaleps $5, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x08,0xc8,0x05]
; CHECK-NEXT: vrndscaleps $66, %ymm0, %ymm0 ## encoding: [0x62,0xf3,0x7d,0x28,0x08,0xc0,0x42]
@@ -2723,7 +2723,7 @@ declare <2 x double> @llvm.x86.avx512.mask.getmant.pd.128(<2 x double>, i32, <2
define <2 x double>@test_int_x86_avx512_mask_getmant_pd_128(<2 x double> %x0, <2 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_getmant_pd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vgetmantpd $11, %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf3,0xfd,0x89,0x26,0xd0,0x0b]
; CHECK-NEXT: vgetmantpd $11, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x26,0xc8,0x0b]
@@ -2743,7 +2743,7 @@ declare <4 x double> @llvm.x86.avx512.mask.getmant.pd.256(<4 x double>, i32, <4
define <4 x double>@test_int_x86_avx512_mask_getmant_pd_256(<4 x double> %x0, <4 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_getmant_pd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vgetmantpd $11, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x26,0xc8,0x0b]
; CHECK-NEXT: vgetmantpd $11, %ymm0, %ymm0 ## encoding: [0x62,0xf3,0xfd,0x28,0x26,0xc0,0x0b]
@@ -2759,7 +2759,7 @@ declare <4 x float> @llvm.x86.avx512.mask.getmant.ps.128(<4 x float>, i32, <4 x
define <4 x float>@test_int_x86_avx512_mask_getmant_ps_128(<4 x float> %x0, <4 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_getmant_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vgetmantps $11, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x26,0xc8,0x0b]
; CHECK-NEXT: vgetmantps $11, %xmm0, %xmm0 ## encoding: [0x62,0xf3,0x7d,0x08,0x26,0xc0,0x0b]
@@ -2775,7 +2775,7 @@ declare <8 x float> @llvm.x86.avx512.mask.getmant.ps.256(<8 x float>, i32, <8 x
define <8 x float>@test_int_x86_avx512_mask_getmant_ps_256(<8 x float> %x0, <8 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_getmant_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vgetmantps $11, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x26,0xc8,0x0b]
; CHECK-NEXT: vgetmantps $11, %ymm0, %ymm0 ## encoding: [0x62,0xf3,0x7d,0x28,0x26,0xc0,0x0b]
@@ -2791,7 +2791,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.pternlog.d.128(<4 x i32>, <4 x i32>, <4
define <4 x i32>@test_int_x86_avx512_mask_pternlog_d_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_pternlog_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovdqa %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xd8]
; CHECK-NEXT: vpternlogd $33, %xmm2, %xmm1, %xmm3 ## encoding: [0x62,0xf3,0x75,0x08,0x25,0xda,0x21]
@@ -2808,7 +2808,7 @@ declare <4 x i32> @llvm.x86.avx512.maskz.pternlog.d.128(<4 x i32>, <4 x i32>, <4
define <4 x i32>@test_int_x86_avx512_maskz_pternlog_d_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_maskz_pternlog_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovdqa %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xd8]
; CHECK-NEXT: vpternlogd $33, %xmm2, %xmm1, %xmm3 ## encoding: [0x62,0xf3,0x75,0x08,0x25,0xda,0x21]
@@ -2825,7 +2825,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.pternlog.d.256(<8 x i32>, <8 x i32>, <8
define <8 x i32>@test_int_x86_avx512_mask_pternlog_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_pternlog_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovdqa %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xd8]
; CHECK-NEXT: vpternlogd $33, %ymm2, %ymm1, %ymm3 ## encoding: [0x62,0xf3,0x75,0x28,0x25,0xda,0x21]
@@ -2842,7 +2842,7 @@ declare <8 x i32> @llvm.x86.avx512.maskz.pternlog.d.256(<8 x i32>, <8 x i32>, <8
define <8 x i32>@test_int_x86_avx512_maskz_pternlog_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_maskz_pternlog_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovdqa %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xd8]
; CHECK-NEXT: vpternlogd $33, %ymm2, %ymm1, %ymm3 ## encoding: [0x62,0xf3,0x75,0x28,0x25,0xda,0x21]
@@ -2859,7 +2859,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.pternlog.q.128(<2 x i64>, <2 x i64>, <2
define <2 x i64>@test_int_x86_avx512_mask_pternlog_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_pternlog_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovdqa %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xd8]
; CHECK-NEXT: vpternlogq $33, %xmm2, %xmm1, %xmm3 ## encoding: [0x62,0xf3,0xf5,0x08,0x25,0xda,0x21]
@@ -2876,7 +2876,7 @@ declare <2 x i64> @llvm.x86.avx512.maskz.pternlog.q.128(<2 x i64>, <2 x i64>, <2
define <2 x i64>@test_int_x86_avx512_maskz_pternlog_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_maskz_pternlog_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovdqa %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xd8]
; CHECK-NEXT: vpternlogq $33, %xmm2, %xmm1, %xmm3 ## encoding: [0x62,0xf3,0xf5,0x08,0x25,0xda,0x21]
@@ -2893,7 +2893,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.pternlog.q.256(<4 x i64>, <4 x i64>, <4
define <4 x i64>@test_int_x86_avx512_mask_pternlog_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_pternlog_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovdqa %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xd8]
; CHECK-NEXT: vpternlogq $33, %ymm2, %ymm1, %ymm3 ## encoding: [0x62,0xf3,0xf5,0x28,0x25,0xda,0x21]
@@ -2910,7 +2910,7 @@ declare <4 x i64> @llvm.x86.avx512.maskz.pternlog.q.256(<4 x i64>, <4 x i64>, <4
define <4 x i64>@test_int_x86_avx512_maskz_pternlog_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_maskz_pternlog_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovdqa %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xd8]
; CHECK-NEXT: vpternlogq $33, %ymm2, %ymm1, %ymm3 ## encoding: [0x62,0xf3,0xf5,0x28,0x25,0xda,0x21]
@@ -2925,7 +2925,7 @@ define <4 x i64>@test_int_x86_avx512_maskz_pternlog_q_256(<4 x i64> %x0, <4 x i6
define <4 x float> @test_x86_vcvtph2ps_128(<8 x i16> %a0) {
; CHECK-LABEL: test_x86_vcvtph2ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvtph2ps %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x13,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.avx512.mask.vcvtph2ps.128(<8 x i16> %a0, <4 x float> zeroinitializer, i8 -1)
@@ -2934,7 +2934,7 @@ define <4 x float> @test_x86_vcvtph2ps_128(<8 x i16> %a0) {
define <4 x float> @test_x86_vcvtph2ps_128_rrk(<8 x i16> %a0,<4 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_x86_vcvtph2ps_128_rrk:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtph2ps %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x13,0xc8]
; CHECK-NEXT: vmovaps %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1]
@@ -2946,7 +2946,7 @@ define <4 x float> @test_x86_vcvtph2ps_128_rrk(<8 x i16> %a0,<4 x float> %a1, i8
define <4 x float> @test_x86_vcvtph2ps_128_rrkz(<8 x i16> %a0, i8 %mask) {
; CHECK-LABEL: test_x86_vcvtph2ps_128_rrkz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtph2ps %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x13,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2958,7 +2958,7 @@ declare <4 x float> @llvm.x86.avx512.mask.vcvtph2ps.128(<8 x i16>, <4 x float>,
define <8 x float> @test_x86_vcvtph2ps_256(<8 x i16> %a0) {
; CHECK-LABEL: test_x86_vcvtph2ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvtph2ps %xmm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x13,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx512.mask.vcvtph2ps.256(<8 x i16> %a0, <8 x float> zeroinitializer, i8 -1)
@@ -2967,7 +2967,7 @@ define <8 x float> @test_x86_vcvtph2ps_256(<8 x i16> %a0) {
define <8 x float> @test_x86_vcvtph2ps_256_rrk(<8 x i16> %a0,<8 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_x86_vcvtph2ps_256_rrk:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtph2ps %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x13,0xc8]
; CHECK-NEXT: vmovaps %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc1]
@@ -2978,7 +2978,7 @@ define <8 x float> @test_x86_vcvtph2ps_256_rrk(<8 x i16> %a0,<8 x float> %a1, i8
define <8 x float> @test_x86_vcvtph2ps_256_rrkz(<8 x i16> %a0, i8 %mask) {
; CHECK-LABEL: test_x86_vcvtph2ps_256_rrkz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtph2ps %xmm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x13,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2990,7 +2990,7 @@ declare <8 x float> @llvm.x86.avx512.mask.vcvtph2ps.256(<8 x i16>, <8 x float>,
define <8 x i16> @test_x86_vcvtps2ph_128(<4 x float> %a0, i8 %mask, <8 x i16> %src) {
; CHECK-LABEL: test_x86_vcvtps2ph_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtps2ph $2, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x1d,0xc1,0x02]
; CHECK-NEXT: vcvtps2ph $2, %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf3,0x7d,0x89,0x1d,0xc2,0x02]
@@ -3010,7 +3010,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.vcvtps2ph.128(<4 x float>, i32, <8 x i16
define <8 x i16> @test_x86_vcvtps2ph_256(<8 x float> %a0, i8 %mask, <8 x i16> %src) {
; CHECK-LABEL: test_x86_vcvtps2ph_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtps2ph $2, %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x1d,0xc1,0x02]
; CHECK-NEXT: vcvtps2ph $2, %ymm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf3,0x7d,0xa9,0x1d,0xc2,0x02]
@@ -3030,7 +3030,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.vcvtps2ph.256(<8 x float>, i32, <8 x i16
define <8 x float> @test_rsqrt_ps_256_rr(<8 x float> %a0) {
; CHECK-LABEL: test_rsqrt_ps_256_rr:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vrsqrt14ps %ymm0, %ymm0 ## encoding: [0x62,0xf2,0x7d,0x28,0x4e,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx512.rsqrt14.ps.256(<8 x float> %a0, <8 x float> zeroinitializer, i8 -1)
@@ -3039,7 +3039,7 @@ define <8 x float> @test_rsqrt_ps_256_rr(<8 x float> %a0) {
define <8 x float> @test_rsqrt_ps_256_rrkz(<8 x float> %a0, i8 %mask) {
; CHECK-LABEL: test_rsqrt_ps_256_rrkz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrsqrt14ps %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x4e,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -3049,7 +3049,7 @@ define <8 x float> @test_rsqrt_ps_256_rrkz(<8 x float> %a0, i8 %mask) {
define <8 x float> @test_rsqrt_ps_256_rrk(<8 x float> %a0, <8 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_rsqrt_ps_256_rrk:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrsqrt14ps %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x4e,0xc8]
; CHECK-NEXT: vmovaps %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc1]
@@ -3060,7 +3060,7 @@ define <8 x float> @test_rsqrt_ps_256_rrk(<8 x float> %a0, <8 x float> %a1, i8 %
define <4 x float> @test_rsqrt_ps_128_rr(<4 x float> %a0) {
; CHECK-LABEL: test_rsqrt_ps_128_rr:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vrsqrt14ps %xmm0, %xmm0 ## encoding: [0x62,0xf2,0x7d,0x08,0x4e,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.avx512.rsqrt14.ps.128(<4 x float> %a0, <4 x float> zeroinitializer, i8 -1)
@@ -3069,7 +3069,7 @@ define <4 x float> @test_rsqrt_ps_128_rr(<4 x float> %a0) {
define <4 x float> @test_rsqrt_ps_128_rrkz(<4 x float> %a0, i8 %mask) {
; CHECK-LABEL: test_rsqrt_ps_128_rrkz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrsqrt14ps %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x4e,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -3079,7 +3079,7 @@ define <4 x float> @test_rsqrt_ps_128_rrkz(<4 x float> %a0, i8 %mask) {
define <4 x float> @test_rsqrt_ps_128_rrk(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_rsqrt_ps_128_rrk:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrsqrt14ps %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x4e,0xc8]
; CHECK-NEXT: vmovaps %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1]
@@ -3093,7 +3093,7 @@ declare <4 x float> @llvm.x86.avx512.rsqrt14.ps.128(<4 x float>, <4 x float>, i8
define <8 x float> @test_rcp_ps_256_rr(<8 x float> %a0) {
; CHECK-LABEL: test_rcp_ps_256_rr:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vrcp14ps %ymm0, %ymm0 ## encoding: [0x62,0xf2,0x7d,0x28,0x4c,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx512.rcp14.ps.256(<8 x float> %a0, <8 x float> zeroinitializer, i8 -1)
@@ -3102,7 +3102,7 @@ define <8 x float> @test_rcp_ps_256_rr(<8 x float> %a0) {
define <8 x float> @test_rcp_ps_256_rrkz(<8 x float> %a0, i8 %mask) {
; CHECK-LABEL: test_rcp_ps_256_rrkz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrcp14ps %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x4c,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -3112,7 +3112,7 @@ define <8 x float> @test_rcp_ps_256_rrkz(<8 x float> %a0, i8 %mask) {
define <8 x float> @test_rcp_ps_256_rrk(<8 x float> %a0, <8 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_rcp_ps_256_rrk:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrcp14ps %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x4c,0xc8]
; CHECK-NEXT: vmovaps %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc1]
@@ -3123,7 +3123,7 @@ define <8 x float> @test_rcp_ps_256_rrk(<8 x float> %a0, <8 x float> %a1, i8 %ma
define <4 x float> @test_rcp_ps_128_rr(<4 x float> %a0) {
; CHECK-LABEL: test_rcp_ps_128_rr:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vrcp14ps %xmm0, %xmm0 ## encoding: [0x62,0xf2,0x7d,0x08,0x4c,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.avx512.rcp14.ps.128(<4 x float> %a0, <4 x float> zeroinitializer, i8 -1)
@@ -3132,7 +3132,7 @@ define <4 x float> @test_rcp_ps_128_rr(<4 x float> %a0) {
define <4 x float> @test_rcp_ps_128_rrkz(<4 x float> %a0, i8 %mask) {
; CHECK-LABEL: test_rcp_ps_128_rrkz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrcp14ps %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x4c,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -3142,7 +3142,7 @@ define <4 x float> @test_rcp_ps_128_rrkz(<4 x float> %a0, i8 %mask) {
define <4 x float> @test_rcp_ps_128_rrk(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_rcp_ps_128_rrk:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrcp14ps %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x4c,0xc8]
; CHECK-NEXT: vmovaps %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1]
@@ -3156,7 +3156,7 @@ declare <4 x float> @llvm.x86.avx512.rcp14.ps.128(<4 x float>, <4 x float>, i8)
define <4 x double> @test_rsqrt_pd_256_rr(<4 x double> %a0) {
; CHECK-LABEL: test_rsqrt_pd_256_rr:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vrsqrt14pd %ymm0, %ymm0 ## encoding: [0x62,0xf2,0xfd,0x28,0x4e,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x double> @llvm.x86.avx512.rsqrt14.pd.256(<4 x double> %a0, <4 x double> zeroinitializer, i8 -1)
@@ -3165,7 +3165,7 @@ define <4 x double> @test_rsqrt_pd_256_rr(<4 x double> %a0) {
define <4 x double> @test_rsqrt_pd_256_rrkz(<4 x double> %a0, i8 %mask) {
; CHECK-LABEL: test_rsqrt_pd_256_rrkz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrsqrt14pd %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xa9,0x4e,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -3175,7 +3175,7 @@ define <4 x double> @test_rsqrt_pd_256_rrkz(<4 x double> %a0, i8 %mask) {
define <4 x double> @test_rsqrt_pd_256_rrk(<4 x double> %a0, <4 x double> %a1, i8 %mask) {
; CHECK-LABEL: test_rsqrt_pd_256_rrk:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrsqrt14pd %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x4e,0xc8]
; CHECK-NEXT: vmovapd %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xc1]
@@ -3186,7 +3186,7 @@ define <4 x double> @test_rsqrt_pd_256_rrk(<4 x double> %a0, <4 x double> %a1, i
define <2 x double> @test_rsqrt_pd_128_rr(<2 x double> %a0) {
; CHECK-LABEL: test_rsqrt_pd_128_rr:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vrsqrt14pd %xmm0, %xmm0 ## encoding: [0x62,0xf2,0xfd,0x08,0x4e,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <2 x double> @llvm.x86.avx512.rsqrt14.pd.128(<2 x double> %a0, <2 x double> zeroinitializer, i8 -1)
@@ -3195,7 +3195,7 @@ define <2 x double> @test_rsqrt_pd_128_rr(<2 x double> %a0) {
define <2 x double> @test_rsqrt_pd_128_rrkz(<2 x double> %a0, i8 %mask) {
; CHECK-LABEL: test_rsqrt_pd_128_rrkz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrsqrt14pd %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0x89,0x4e,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -3205,7 +3205,7 @@ define <2 x double> @test_rsqrt_pd_128_rrkz(<2 x double> %a0, i8 %mask) {
define <2 x double> @test_rsqrt_pd_128_rrk(<2 x double> %a0, <2 x double> %a1, i8 %mask) {
; CHECK-LABEL: test_rsqrt_pd_128_rrk:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrsqrt14pd %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x4e,0xc8]
; CHECK-NEXT: vmovapd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc1]
@@ -3219,7 +3219,7 @@ declare <2 x double> @llvm.x86.avx512.rsqrt14.pd.128(<2 x double>, <2 x double>,
define <4 x double> @test_rcp_pd_256_rr(<4 x double> %a0) {
; CHECK-LABEL: test_rcp_pd_256_rr:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vrcp14pd %ymm0, %ymm0 ## encoding: [0x62,0xf2,0xfd,0x28,0x4c,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x double> @llvm.x86.avx512.rcp14.pd.256(<4 x double> %a0, <4 x double> zeroinitializer, i8 -1)
@@ -3228,7 +3228,7 @@ define <4 x double> @test_rcp_pd_256_rr(<4 x double> %a0) {
define <4 x double> @test_rcp_pd_256_rrkz(<4 x double> %a0, i8 %mask) {
; CHECK-LABEL: test_rcp_pd_256_rrkz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrcp14pd %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xa9,0x4c,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -3238,7 +3238,7 @@ define <4 x double> @test_rcp_pd_256_rrkz(<4 x double> %a0, i8 %mask) {
define <4 x double> @test_rcp_pd_256_rrk(<4 x double> %a0, <4 x double> %a1, i8 %mask) {
; CHECK-LABEL: test_rcp_pd_256_rrk:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrcp14pd %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x4c,0xc8]
; CHECK-NEXT: vmovapd %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xc1]
@@ -3249,7 +3249,7 @@ define <4 x double> @test_rcp_pd_256_rrk(<4 x double> %a0, <4 x double> %a1, i8
define <2 x double> @test_rcp_pd_128_rr(<2 x double> %a0) {
; CHECK-LABEL: test_rcp_pd_128_rr:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vrcp14pd %xmm0, %xmm0 ## encoding: [0x62,0xf2,0xfd,0x08,0x4c,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <2 x double> @llvm.x86.avx512.rcp14.pd.128(<2 x double> %a0, <2 x double> zeroinitializer, i8 -1)
@@ -3258,7 +3258,7 @@ define <2 x double> @test_rcp_pd_128_rr(<2 x double> %a0) {
define <2 x double> @test_rcp_pd_128_rrkz(<2 x double> %a0, i8 %mask) {
; CHECK-LABEL: test_rcp_pd_128_rrkz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrcp14pd %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0x89,0x4c,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -3268,7 +3268,7 @@ define <2 x double> @test_rcp_pd_128_rrkz(<2 x double> %a0, i8 %mask) {
define <2 x double> @test_rcp_pd_128_rrk(<2 x double> %a0, <2 x double> %a1, i8 %mask) {
; CHECK-LABEL: test_rcp_pd_128_rrk:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrcp14pd %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x4c,0xc8]
; CHECK-NEXT: vmovapd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc1]
@@ -3284,7 +3284,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.prorv.d.128(<4 x i32>, <4 x i32>, <4 x i
define <4 x i32>@test_int_x86_avx512_mask_prorv_d_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_prorv_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vprorvd %xmm1, %xmm0, %xmm3 ## encoding: [0x62,0xf2,0x7d,0x08,0x14,0xd9]
; CHECK-NEXT: vprorvd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x14,0xd1]
@@ -3304,7 +3304,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.prorv.d.256(<8 x i32>, <8 x i32>, <8 x i
define <8 x i32>@test_int_x86_avx512_mask_prorv_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_prorv_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vprorvd %ymm1, %ymm0, %ymm3 ## encoding: [0x62,0xf2,0x7d,0x28,0x14,0xd9]
; CHECK-NEXT: vprorvd %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x14,0xd1]
@@ -3324,7 +3324,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.prorv.q.128(<2 x i64>, <2 x i64>, <2 x i
define <2 x i64>@test_int_x86_avx512_mask_prorv_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_prorv_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vprorvq %xmm1, %xmm0, %xmm3 ## encoding: [0x62,0xf2,0xfd,0x08,0x14,0xd9]
; CHECK-NEXT: vprorvq %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x14,0xd1]
@@ -3344,7 +3344,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.prorv.q.256(<4 x i64>, <4 x i64>, <4 x i
define <4 x i64>@test_int_x86_avx512_mask_prorv_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_prorv_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vprorvq %ymm1, %ymm0, %ymm3 ## encoding: [0x62,0xf2,0xfd,0x28,0x14,0xd9]
; CHECK-NEXT: vprorvq %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x14,0xd1]
@@ -3364,7 +3364,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.prol.d.128(<4 x i32>, i32, <4 x i32>, i8
define <4 x i32>@test_int_x86_avx512_mask_prol_d_128(<4 x i32> %x0, i32 %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_prol_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vprold $3, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x09,0x72,0xc8,0x03]
; CHECK-NEXT: vprold $3, %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf1,0x6d,0x89,0x72,0xc8,0x03]
@@ -3384,7 +3384,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.prol.d.256(<8 x i32>, i32, <8 x i32>, i8
define <8 x i32>@test_int_x86_avx512_mask_prol_d_256(<8 x i32> %x0, i32 %x1, <8 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_prol_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vprold $3, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x29,0x72,0xc8,0x03]
; CHECK-NEXT: vprold $3, %ymm0, %ymm2 {%k1} {z} ## encoding: [0x62,0xf1,0x6d,0xa9,0x72,0xc8,0x03]
@@ -3404,7 +3404,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.prol.q.128(<2 x i64>, i32, <2 x i64>, i8
define <2 x i64>@test_int_x86_avx512_mask_prol_q_128(<2 x i64> %x0, i32 %x1, <2 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_prol_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vprolq $3, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x72,0xc8,0x03]
; CHECK-NEXT: vprolq $3, %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf1,0xed,0x89,0x72,0xc8,0x03]
@@ -3424,7 +3424,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.prol.q.256(<4 x i64>, i32, <4 x i64>, i8
define <4 x i64>@test_int_x86_avx512_mask_prol_q_256(<4 x i64> %x0, i32 %x1, <4 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_prol_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vprolq $3, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x72,0xc8,0x03]
; CHECK-NEXT: vprolq $3, %ymm0, %ymm2 {%k1} {z} ## encoding: [0x62,0xf1,0xed,0xa9,0x72,0xc8,0x03]
@@ -3444,7 +3444,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.prolv.d.128(<4 x i32>, <4 x i32>, <4 x i
define <4 x i32>@test_int_x86_avx512_mask_prolv_d_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_prolv_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vprolvd %xmm1, %xmm0, %xmm3 ## encoding: [0x62,0xf2,0x7d,0x08,0x15,0xd9]
; CHECK-NEXT: vprolvd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x15,0xd1]
@@ -3464,7 +3464,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.prolv.d.256(<8 x i32>, <8 x i32>, <8 x i
define <8 x i32>@test_int_x86_avx512_mask_prolv_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_prolv_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vprolvd %ymm1, %ymm0, %ymm3 ## encoding: [0x62,0xf2,0x7d,0x28,0x15,0xd9]
; CHECK-NEXT: vprolvd %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x15,0xd1]
@@ -3484,7 +3484,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.prolv.q.128(<2 x i64>, <2 x i64>, <2 x i
define <2 x i64>@test_int_x86_avx512_mask_prolv_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_prolv_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vprolvq %xmm1, %xmm0, %xmm3 ## encoding: [0x62,0xf2,0xfd,0x08,0x15,0xd9]
; CHECK-NEXT: vprolvq %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x15,0xd1]
@@ -3504,7 +3504,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.prolv.q.256(<4 x i64>, <4 x i64>, <4 x i
define <4 x i64>@test_int_x86_avx512_mask_prolv_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_prolv_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vprolvq %ymm1, %ymm0, %ymm3 ## encoding: [0x62,0xf2,0xfd,0x28,0x15,0xd9]
; CHECK-NEXT: vprolvq %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x15,0xd1]
@@ -3524,7 +3524,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.pror.d.128(<4 x i32>, i32, <4 x i32>, i8
define <4 x i32>@test_int_x86_avx512_mask_pror_d_128(<4 x i32> %x0, i32 %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pror_d_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vprord $3, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x09,0x72,0xc0,0x03]
; CHECK-NEXT: vprord $3, %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf1,0x6d,0x89,0x72,0xc0,0x03]
@@ -3544,7 +3544,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.pror.d.256(<8 x i32>, i32, <8 x i32>, i8
define <8 x i32>@test_int_x86_avx512_mask_pror_d_256(<8 x i32> %x0, i32 %x1, <8 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pror_d_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vprord $3, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x29,0x72,0xc0,0x03]
; CHECK-NEXT: vprord $3, %ymm0, %ymm2 {%k1} {z} ## encoding: [0x62,0xf1,0x6d,0xa9,0x72,0xc0,0x03]
@@ -3564,7 +3564,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.pror.q.128(<2 x i64>, i32, <2 x i64>, i8
define <2 x i64>@test_int_x86_avx512_mask_pror_q_128(<2 x i64> %x0, i32 %x1, <2 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pror_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vprorq $3, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x72,0xc0,0x03]
; CHECK-NEXT: vprorq $3, %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf1,0xed,0x89,0x72,0xc0,0x03]
@@ -3584,7 +3584,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.pror.q.256(<4 x i64>, i32, <4 x i64>, i8
define <4 x i64>@test_int_x86_avx512_mask_pror_q_256(<4 x i64> %x0, i32 %x1, <4 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pror_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vprorq $3, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x72,0xc0,0x03]
; CHECK-NEXT: vprorq $3, %ymm0, %ymm2 {%k1} {z} ## encoding: [0x62,0xf1,0xed,0xa9,0x72,0xc0,0x03]
@@ -3604,7 +3604,7 @@ declare <4 x double> @llvm.x86.avx512.mask.permvar.df.256(<4 x double>, <4 x i64
define <4 x double>@test_int_x86_avx512_mask_permvar_df_256(<4 x double> %x0, <4 x i64> %x1, <4 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_permvar_df_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpermpd %ymm0, %ymm1, %ymm3 ## encoding: [0x62,0xf2,0xf5,0x28,0x16,0xd8]
; CHECK-NEXT: vpermpd %ymm0, %ymm1, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xf5,0x29,0x16,0xd0]
@@ -3624,7 +3624,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.permvar.di.256(<4 x i64>, <4 x i64>, <4
define <4 x i64>@test_int_x86_avx512_mask_permvar_di_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_permvar_di_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpermq %ymm0, %ymm1, %ymm3 ## encoding: [0x62,0xf2,0xf5,0x28,0x36,0xd8]
; CHECK-NEXT: vpermq %ymm0, %ymm1, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xf5,0x29,0x36,0xd0]
@@ -3644,7 +3644,7 @@ declare <8 x float> @llvm.x86.avx512.mask.permvar.sf.256(<8 x float>, <8 x i32>,
define <8 x float>@test_int_x86_avx512_mask_permvar_sf_256(<8 x float> %x0, <8 x i32> %x1, <8 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_permvar_sf_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpermps %ymm0, %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x16,0xd8]
; CHECK-NEXT: vpermps %ymm0, %ymm1, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x75,0x29,0x16,0xd0]
@@ -3664,7 +3664,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.permvar.si.256(<8 x i32>, <8 x i32>, <8
define <8 x i32>@test_int_x86_avx512_mask_permvar_si_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_permvar_si_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpermd %ymm0, %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x36,0xd8]
; CHECK-NEXT: vpermd %ymm0, %ymm1, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x75,0x29,0x36,0xd0]
@@ -3684,7 +3684,7 @@ declare <2 x double> @llvm.x86.avx512.mask.fixupimm.pd.128(<2 x double>, <2 x do
define <2 x double>@test_int_x86_avx512_mask_fixupimm_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x i64> %x2, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_fixupimm_pd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovapd %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd8]
; CHECK-NEXT: vfixupimmpd $5, %xmm2, %xmm1, %xmm3 {%k1} ## encoding: [0x62,0xf3,0xf5,0x09,0x54,0xda,0x05]
@@ -3706,7 +3706,7 @@ declare <2 x double> @llvm.x86.avx512.maskz.fixupimm.pd.128(<2 x double>, <2 x d
define <2 x double>@test_int_x86_avx512_maskz_fixupimm_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x i64> %x2, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_maskz_fixupimm_pd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovapd %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd8]
; CHECK-NEXT: vfixupimmpd $5, %xmm2, %xmm1, %xmm3 {%k1} {z} ## encoding: [0x62,0xf3,0xf5,0x89,0x54,0xda,0x05]
@@ -3726,7 +3726,7 @@ declare <4 x double> @llvm.x86.avx512.mask.fixupimm.pd.256(<4 x double>, <4 x do
define <4 x double>@test_int_x86_avx512_mask_fixupimm_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x i64> %x2, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_fixupimm_pd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovapd %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd8]
; CHECK-NEXT: vfixupimmpd $4, %ymm2, %ymm1, %ymm3 {%k1} ## encoding: [0x62,0xf3,0xf5,0x29,0x54,0xda,0x04]
@@ -3748,7 +3748,7 @@ declare <4 x double> @llvm.x86.avx512.maskz.fixupimm.pd.256(<4 x double>, <4 x d
define <4 x double>@test_int_x86_avx512_maskz_fixupimm_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x i64> %x2, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_maskz_fixupimm_pd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovapd %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd8]
; CHECK-NEXT: vfixupimmpd $5, %ymm2, %ymm1, %ymm3 {%k1} {z} ## encoding: [0x62,0xf3,0xf5,0xa9,0x54,0xda,0x05]
@@ -3771,7 +3771,7 @@ declare <4 x float> @llvm.x86.avx512.mask.fixupimm.ps.128(<4 x float>, <4 x floa
define <4 x float>@test_int_x86_avx512_mask_fixupimm_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x i32> %x2, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_fixupimm_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovaps %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd8]
; CHECK-NEXT: vfixupimmps $5, %xmm2, %xmm1, %xmm3 ## encoding: [0x62,0xf3,0x75,0x08,0x54,0xda,0x05]
@@ -3794,7 +3794,7 @@ declare <4 x float> @llvm.x86.avx512.maskz.fixupimm.ps.128(<4 x float>, <4 x flo
define <4 x float>@test_int_x86_avx512_maskz_fixupimm_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x i32> %x2, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_maskz_fixupimm_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovaps %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd8]
; CHECK-NEXT: vfixupimmps $5, %xmm2, %xmm1, %xmm3 ## encoding: [0x62,0xf3,0x75,0x08,0x54,0xda,0x05]
@@ -3817,7 +3817,7 @@ declare <8 x float> @llvm.x86.avx512.mask.fixupimm.ps.256(<8 x float>, <8 x floa
define <8 x float>@test_int_x86_avx512_mask_fixupimm_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x i32> %x2, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_fixupimm_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovaps %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd8]
; CHECK-NEXT: vfixupimmps $5, %ymm2, %ymm1, %ymm3 ## encoding: [0x62,0xf3,0x75,0x28,0x54,0xda,0x05]
@@ -3840,7 +3840,7 @@ declare <8 x float> @llvm.x86.avx512.maskz.fixupimm.ps.256(<8 x float>, <8 x flo
define <8 x float>@test_int_x86_avx512_maskz_fixupimm_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x i32> %x2, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_maskz_fixupimm_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovaps %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd8]
; CHECK-NEXT: vfixupimmps $5, %ymm2, %ymm1, %ymm3 ## encoding: [0x62,0xf3,0x75,0x28,0x54,0xda,0x05]
@@ -3861,7 +3861,7 @@ define <8 x float>@test_int_x86_avx512_maskz_fixupimm_ps_256(<8 x float> %x0, <8
define <2 x i64> @test_x86_avx512_psra_q_128(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_x86_avx512_psra_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsraq %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfd,0x08,0xe2,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.avx512.psra.q.128(<2 x i64> %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1]
@@ -3869,7 +3869,7 @@ define <2 x i64> @test_x86_avx512_psra_q_128(<2 x i64> %a0, <2 x i64> %a1) {
}
define <2 x i64> @test_x86_avx512_mask_psra_q_128(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_psra_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsraq %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0xfd,0x09,0xe2,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
@@ -3882,7 +3882,7 @@ define <2 x i64> @test_x86_avx512_mask_psra_q_128(<2 x i64> %a0, <2 x i64> %a1,
}
define <2 x i64> @test_x86_avx512_maskz_psra_q_128(<2 x i64> %a0, <2 x i64> %a1, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_maskz_psra_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsraq %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x89,0xe2,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -3897,7 +3897,7 @@ declare <2 x i64> @llvm.x86.avx512.psra.q.128(<2 x i64>, <2 x i64>) nounwind rea
define <4 x i64> @test_x86_avx512_psra_q_256(<4 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_x86_avx512_psra_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsraq %xmm1, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfd,0x28,0xe2,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx512.psra.q.256(<4 x i64> %a0, <2 x i64> %a1) ; <<4 x i64>> [#uses=1]
@@ -3905,7 +3905,7 @@ define <4 x i64> @test_x86_avx512_psra_q_256(<4 x i64> %a0, <2 x i64> %a1) {
}
define <4 x i64> @test_x86_avx512_mask_psra_q_256(<4 x i64> %a0, <2 x i64> %a1, <4 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_psra_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsraq %xmm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0xfd,0x29,0xe2,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
@@ -3918,7 +3918,7 @@ define <4 x i64> @test_x86_avx512_mask_psra_q_256(<4 x i64> %a0, <2 x i64> %a1,
}
define <4 x i64> @test_x86_avx512_maskz_psra_q_256(<4 x i64> %a0, <2 x i64> %a1, <4 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_maskz_psra_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsraq %xmm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xa9,0xe2,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -3933,7 +3933,7 @@ declare <4 x i64> @llvm.x86.avx512.psra.q.256(<4 x i64>, <2 x i64>) nounwind rea
define <2 x i64> @test_x86_avx512_psrai_q_128(<2 x i64> %a0) {
; CHECK-LABEL: test_x86_avx512_psrai_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsraq $7, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfd,0x08,0x72,0xe0,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.avx512.psrai.q.128(<2 x i64> %a0, i32 7) ; <<2 x i64>> [#uses=1]
@@ -3941,7 +3941,7 @@ define <2 x i64> @test_x86_avx512_psrai_q_128(<2 x i64> %a0) {
}
define <2 x i64> @test_x86_avx512_mask_psrai_q_128(<2 x i64> %a0, <2 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_psrai_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsraq $7, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x72,0xe0,0x07]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
@@ -3954,7 +3954,7 @@ define <2 x i64> @test_x86_avx512_mask_psrai_q_128(<2 x i64> %a0, <2 x i64> %pas
}
define <2 x i64> @test_x86_avx512_maskz_psrai_q_128(<2 x i64> %a0, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_maskz_psrai_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsraq $7, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x89,0x72,0xe0,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -3969,7 +3969,7 @@ declare <2 x i64> @llvm.x86.avx512.psrai.q.128(<2 x i64>, i32) nounwind readnone
define <4 x i64> @test_x86_avx512_psrai_q_256(<4 x i64> %a0) {
; CHECK-LABEL: test_x86_avx512_psrai_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsraq $7, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfd,0x28,0x72,0xe0,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx512.psrai.q.256(<4 x i64> %a0, i32 7) ; <<4 x i64>> [#uses=1]
@@ -3977,7 +3977,7 @@ define <4 x i64> @test_x86_avx512_psrai_q_256(<4 x i64> %a0) {
}
define <4 x i64> @test_x86_avx512_mask_psrai_q_256(<4 x i64> %a0, <4 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_psrai_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsraq $7, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x72,0xe0,0x07]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
@@ -3990,7 +3990,7 @@ define <4 x i64> @test_x86_avx512_mask_psrai_q_256(<4 x i64> %a0, <4 x i64> %pas
}
define <4 x i64> @test_x86_avx512_maskz_psrai_q_256(<4 x i64> %a0, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_maskz_psrai_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsraq $7, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xa9,0x72,0xe0,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -4004,7 +4004,7 @@ declare <4 x i64> @llvm.x86.avx512.psrai.q.256(<4 x i64>, i32) nounwind readnone
define <2 x i64> @test_x86_avx512_psrav_q_128(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_x86_avx512_psrav_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsravq %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf2,0xfd,0x08,0x46,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.avx512.psrav.q.128(<2 x i64> %a0, <2 x i64> %a1)
@@ -4013,7 +4013,7 @@ define <2 x i64> @test_x86_avx512_psrav_q_128(<2 x i64> %a0, <2 x i64> %a1) {
define <2 x i64> @test_x86_avx512_mask_psrav_q_128(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_psrav_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsravq %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x46,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
@@ -4027,7 +4027,7 @@ define <2 x i64> @test_x86_avx512_mask_psrav_q_128(<2 x i64> %a0, <2 x i64> %a1,
define <2 x i64> @test_x86_avx512_maskz_psrav_q_128(<2 x i64> %a0, <2 x i64> %a1, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_maskz_psrav_q_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsravq %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0x89,0x46,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -4042,7 +4042,7 @@ declare <2 x i64> @llvm.x86.avx512.psrav.q.128(<2 x i64>, <2 x i64>) nounwind re
define <4 x i64> @test_x86_avx512_psrav_q_256(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_x86_avx512_psrav_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsravq %ymm1, %ymm0, %ymm0 ## encoding: [0x62,0xf2,0xfd,0x28,0x46,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx512.psrav.q.256(<4 x i64> %a0, <4 x i64> %a1)
@@ -4051,7 +4051,7 @@ define <4 x i64> @test_x86_avx512_psrav_q_256(<4 x i64> %a0, <4 x i64> %a1) {
define <4 x i64> @test_x86_avx512_mask_psrav_q_256(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> %a2, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_psrav_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsravq %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x46,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
@@ -4065,7 +4065,7 @@ define <4 x i64> @test_x86_avx512_mask_psrav_q_256(<4 x i64> %a0, <4 x i64> %a1,
define <4 x i64> @test_x86_avx512_maskz_psrav_q_256(<4 x i64> %a0, <4 x i64> %a1, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_maskz_psrav_q_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsravq %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xa9,0x46,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -4082,7 +4082,7 @@ declare <8 x float> @llvm.x86.avx512.mask.vfmadd.ps.256(<8 x float>, <8 x float>
define <8 x float> @test_mask_vfmadd256_ps(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_vfmadd256_ps:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vfmadd132ps %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x29,0x98,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -4094,7 +4094,7 @@ declare <4 x float> @llvm.x86.avx512.mask.vfmadd.ps.128(<4 x float>, <4 x float>
define <4 x float> @test_mask_vfmadd128_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_vfmadd128_ps:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vfmadd132ps %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x09,0x98,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -4106,7 +4106,7 @@ declare <4 x double> @llvm.x86.avx512.mask.vfmadd.pd.256(<4 x double>, <4 x doub
define <4 x double> @test_mask_fmadd256_pd(<4 x double> %a, <4 x double> %b, <4 x double> %c, i8 %mask) {
; CHECK-LABEL: test_mask_fmadd256_pd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vfmadd132pd %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x29,0x98,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -4118,7 +4118,7 @@ declare <2 x double> @llvm.x86.avx512.mask.vfmadd.pd.128(<2 x double>, <2 x doub
define <2 x double> @test_mask_fmadd128_pd(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) {
; CHECK-LABEL: test_mask_fmadd128_pd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vfmadd132pd %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x09,0x98,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -4128,7 +4128,7 @@ define <2 x double> @test_mask_fmadd128_pd(<2 x double> %a, <2 x double> %b, <2
define <2 x double>@test_int_x86_avx512_mask_vfmadd_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_pd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovapd %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9]
; CHECK-NEXT: vfmadd213pd %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa8,0xda]
@@ -4145,7 +4145,7 @@ declare <2 x double> @llvm.x86.avx512.mask3.vfmadd.pd.128(<2 x double>, <2 x dou
define <2 x double>@test_int_x86_avx512_mask3_vfmadd_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_pd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovapd %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9]
; CHECK-NEXT: vfmadd213pd %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa8,0xda]
@@ -4162,7 +4162,7 @@ declare <2 x double> @llvm.x86.avx512.maskz.vfmadd.pd.128(<2 x double>, <2 x dou
define <2 x double>@test_int_x86_avx512_maskz_vfmadd_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vfmadd_pd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovapd %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9]
; CHECK-NEXT: vfmadd213pd %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa8,0xda]
@@ -4177,7 +4177,7 @@ define <2 x double>@test_int_x86_avx512_maskz_vfmadd_pd_128(<2 x double> %x0, <2
define <4 x double>@test_int_x86_avx512_mask_vfmadd_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_pd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovapd %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9]
; CHECK-NEXT: vfmadd213pd %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xa8,0xda]
@@ -4194,7 +4194,7 @@ declare <4 x double> @llvm.x86.avx512.mask3.vfmadd.pd.256(<4 x double>, <4 x dou
define <4 x double>@test_int_x86_avx512_mask3_vfmadd_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_pd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovapd %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9]
; CHECK-NEXT: vfmadd213pd %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xa8,0xda]
@@ -4211,7 +4211,7 @@ declare <4 x double> @llvm.x86.avx512.maskz.vfmadd.pd.256(<4 x double>, <4 x dou
define <4 x double>@test_int_x86_avx512_maskz_vfmadd_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vfmadd_pd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovapd %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9]
; CHECK-NEXT: vfmadd213pd %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xa8,0xda]
@@ -4226,7 +4226,7 @@ define <4 x double>@test_int_x86_avx512_maskz_vfmadd_pd_256(<4 x double> %x0, <4
define <4 x float>@test_int_x86_avx512_mask_vfmadd_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovaps %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9]
; CHECK-NEXT: vfmadd213ps %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa8,0xda]
@@ -4243,7 +4243,7 @@ declare <4 x float> @llvm.x86.avx512.mask3.vfmadd.ps.128(<4 x float>, <4 x float
define <4 x float>@test_int_x86_avx512_mask3_vfmadd_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovaps %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9]
; CHECK-NEXT: vfmadd213ps %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa8,0xda]
@@ -4260,7 +4260,7 @@ declare <4 x float> @llvm.x86.avx512.maskz.vfmadd.ps.128(<4 x float>, <4 x float
define <4 x float>@test_int_x86_avx512_maskz_vfmadd_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vfmadd_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovaps %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9]
; CHECK-NEXT: vfmadd213ps %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa8,0xda]
@@ -4275,7 +4275,7 @@ define <4 x float>@test_int_x86_avx512_maskz_vfmadd_ps_128(<4 x float> %x0, <4 x
define <8 x float>@test_int_x86_avx512_mask_vfmadd_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovaps %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9]
; CHECK-NEXT: vfmadd213ps %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xa8,0xda]
@@ -4292,7 +4292,7 @@ declare <8 x float> @llvm.x86.avx512.mask3.vfmadd.ps.256(<8 x float>, <8 x float
define <8 x float>@test_int_x86_avx512_mask3_vfmadd_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovaps %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9]
; CHECK-NEXT: vfmadd213ps %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xa8,0xda]
@@ -4309,7 +4309,7 @@ declare <8 x float> @llvm.x86.avx512.maskz.vfmadd.ps.256(<8 x float>, <8 x float
define <8 x float>@test_int_x86_avx512_maskz_vfmadd_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vfmadd_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovaps %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9]
; CHECK-NEXT: vfmadd213ps %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xa8,0xda]
@@ -4327,7 +4327,7 @@ declare <2 x double> @llvm.x86.avx512.mask3.vfmsub.pd.128(<2 x double>, <2 x dou
define <2 x double>@test_int_x86_avx512_mask3_vfmsub_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsub_pd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovapd %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9]
; CHECK-NEXT: vfmsub213pd %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xaa,0xda]
@@ -4345,7 +4345,7 @@ declare <4 x double> @llvm.x86.avx512.mask3.vfmsub.pd.256(<4 x double>, <4 x dou
define <4 x double>@test_int_x86_avx512_mask3_vfmsub_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsub_pd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovapd %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9]
; CHECK-NEXT: vfmsub213pd %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xaa,0xda]
@@ -4362,7 +4362,7 @@ declare <4 x float> @llvm.x86.avx512.mask3.vfmsub.ps.128(<4 x float>, <4 x float
define <4 x float>@test_int_x86_avx512_mask3_vfmsub_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsub_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovaps %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9]
; CHECK-NEXT: vfmsub213ps %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xaa,0xda]
@@ -4379,7 +4379,7 @@ declare <8 x float> @llvm.x86.avx512.mask3.vfmsub.ps.256(<8 x float>, <8 x float
define <8 x float>@test_int_x86_avx512_mask3_vfmsub_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsub_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovaps %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9]
; CHECK-NEXT: vfmsub213ps %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xaa,0xda]
@@ -4396,7 +4396,7 @@ declare <8 x float> @llvm.x86.avx512.mask.vfnmadd.ps.256(<8 x float>, <8 x float
define <8 x float> @test_mask_vfnmadd256_ps(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_vfnmadd256_ps:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vfnmadd132ps %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x29,0x9c,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -4408,7 +4408,7 @@ declare <4 x float> @llvm.x86.avx512.mask.vfnmadd.ps.128(<4 x float>, <4 x float
define <4 x float> @test_mask_vfnmadd128_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_vfnmadd128_ps:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vfnmadd132ps %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x09,0x9c,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -4420,7 +4420,7 @@ declare <4 x double> @llvm.x86.avx512.mask.vfnmadd.pd.256(<4 x double>, <4 x dou
define <4 x double> @test_mask_vfnmadd256_pd(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_vfnmadd256_pd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vfnmadd132pd %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x29,0x9c,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -4432,7 +4432,7 @@ declare <2 x double> @llvm.x86.avx512.mask.vfnmadd.pd.128(<2 x double>, <2 x dou
define <2 x double> @test_mask_vfnmadd128_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_vfnmadd128_pd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vfnmadd132pd %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x09,0x9c,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -4444,7 +4444,7 @@ declare <8 x float> @llvm.x86.avx512.mask.vfnmsub.ps.256(<8 x float>, <8 x float
define <8 x float> @test_mask_vfnmsub256_ps(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_vfnmsub256_ps:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vfnmsub132ps %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x29,0x9e,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -4456,7 +4456,7 @@ declare <4 x float> @llvm.x86.avx512.mask.vfnmsub.ps.128(<4 x float>, <4 x float
define <4 x float> @test_mask_vfnmsub128_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_vfnmsub128_ps:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vfnmsub132ps %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x09,0x9e,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -4468,7 +4468,7 @@ declare <4 x double> @llvm.x86.avx512.mask.vfnmsub.pd.256(<4 x double>, <4 x dou
define <4 x double> @test_mask_vfnmsub256_pd(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_vfnmsub256_pd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vfnmsub132pd %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x29,0x9e,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -4480,7 +4480,7 @@ declare <2 x double> @llvm.x86.avx512.mask.vfnmsub.pd.128(<2 x double>, <2 x dou
define <2 x double> @test_mask_vfnmsub128_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_vfnmsub128_pd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vfnmsub132pd %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x09,0x9e,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -4491,7 +4491,7 @@ define <2 x double> @test_mask_vfnmsub128_pd(<2 x double> %a0, <2 x double> %a1,
define <2 x double>@test_int_x86_avx512_mask_vfnmsub_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vfnmsub_pd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovapd %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9]
; CHECK-NEXT: vfnmsub213pd %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xae,0xda]
@@ -4508,7 +4508,7 @@ declare <2 x double> @llvm.x86.avx512.mask3.vfnmsub.pd.128(<2 x double>, <2 x do
define <2 x double>@test_int_x86_avx512_mask3_vfnmsub_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask3_vfnmsub_pd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovapd %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9]
; CHECK-NEXT: vfnmsub213pd %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xae,0xda]
@@ -4523,7 +4523,7 @@ define <2 x double>@test_int_x86_avx512_mask3_vfnmsub_pd_128(<2 x double> %x0, <
define <4 x double>@test_int_x86_avx512_mask_vfnmsub_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vfnmsub_pd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovapd %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9]
; CHECK-NEXT: vfnmsub213pd %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xae,0xda]
@@ -4540,7 +4540,7 @@ declare <4 x double> @llvm.x86.avx512.mask3.vfnmsub.pd.256(<4 x double>, <4 x do
define <4 x double>@test_int_x86_avx512_mask3_vfnmsub_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask3_vfnmsub_pd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovapd %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9]
; CHECK-NEXT: vfnmsub213pd %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xae,0xda]
@@ -4555,7 +4555,7 @@ define <4 x double>@test_int_x86_avx512_mask3_vfnmsub_pd_256(<4 x double> %x0, <
define <4 x float>@test_int_x86_avx512_mask_vfnmsub_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vfnmsub_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovaps %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9]
; CHECK-NEXT: vfnmsub213ps %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xae,0xda]
@@ -4572,7 +4572,7 @@ declare <4 x float> @llvm.x86.avx512.mask3.vfnmsub.ps.128(<4 x float>, <4 x floa
define <4 x float>@test_int_x86_avx512_mask3_vfnmsub_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask3_vfnmsub_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovaps %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9]
; CHECK-NEXT: vfnmsub213ps %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xae,0xda]
@@ -4587,7 +4587,7 @@ define <4 x float>@test_int_x86_avx512_mask3_vfnmsub_ps_128(<4 x float> %x0, <4
define <8 x float>@test_int_x86_avx512_mask_vfnmsub_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vfnmsub_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovaps %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9]
; CHECK-NEXT: vfnmsub213ps %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xae,0xda]
@@ -4604,7 +4604,7 @@ declare <8 x float> @llvm.x86.avx512.mask3.vfnmsub.ps.256(<8 x float>, <8 x floa
define <8 x float>@test_int_x86_avx512_mask3_vfnmsub_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask3_vfnmsub_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovaps %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9]
; CHECK-NEXT: vfnmsub213ps %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xae,0xda]
@@ -4619,7 +4619,7 @@ define <8 x float>@test_int_x86_avx512_mask3_vfnmsub_ps_256(<8 x float> %x0, <8
define <2 x double>@test_int_x86_avx512_mask_vfnmadd_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vfnmadd_pd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovapd %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9]
; CHECK-NEXT: vfnmadd213pd %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xac,0xda]
@@ -4634,7 +4634,7 @@ define <2 x double>@test_int_x86_avx512_mask_vfnmadd_pd_128(<2 x double> %x0, <2
define <4 x double>@test_int_x86_avx512_mask_vfnmadd_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vfnmadd_pd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovapd %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9]
; CHECK-NEXT: vfnmadd213pd %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xac,0xda]
@@ -4649,7 +4649,7 @@ define <4 x double>@test_int_x86_avx512_mask_vfnmadd_pd_256(<4 x double> %x0, <4
define <4 x float>@test_int_x86_avx512_mask_vfnmadd_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vfnmadd_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovaps %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9]
; CHECK-NEXT: vfnmadd213ps %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xac,0xda]
@@ -4664,7 +4664,7 @@ define <4 x float>@test_int_x86_avx512_mask_vfnmadd_ps_128(<4 x float> %x0, <4 x
define <8 x float>@test_int_x86_avx512_mask_vfnmadd_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vfnmadd_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovaps %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9]
; CHECK-NEXT: vfnmadd213ps %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xac,0xda]
@@ -4681,7 +4681,7 @@ declare <8 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.256(<8 x float>, <8 x flo
define <8 x float> @test_mask_fmaddsub256_ps(<8 x float> %a, <8 x float> %b, <8 x float> %c, i8 %mask) {
; CHECK-LABEL: test_mask_fmaddsub256_ps:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vfmaddsub132ps %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x29,0x96,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -4693,7 +4693,7 @@ declare <4 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.128(<4 x float>, <4 x flo
define <4 x float> @test_mask_fmaddsub128_ps(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) {
; CHECK-LABEL: test_mask_fmaddsub128_ps:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vfmaddsub132ps %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x09,0x96,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -4705,7 +4705,7 @@ declare <4 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.256(<4 x double>, <4 x d
define <4 x double> @test_mask_vfmaddsub256_pd(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_vfmaddsub256_pd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vfmaddsub132pd %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x29,0x96,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -4717,7 +4717,7 @@ declare <2 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.128(<2 x double>, <2 x d
define <2 x double> @test_mask_vfmaddsub128_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_vfmaddsub128_pd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vfmaddsub132pd %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x09,0x96,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -4727,7 +4727,7 @@ define <2 x double> @test_mask_vfmaddsub128_pd(<2 x double> %a0, <2 x double> %a
define <2 x double>@test_int_x86_avx512_mask_vfmaddsub_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vfmaddsub_pd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovapd %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9]
; CHECK-NEXT: vfmaddsub213pd %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa6,0xda]
@@ -4744,7 +4744,7 @@ declare <2 x double> @llvm.x86.avx512.mask3.vfmaddsub.pd.128(<2 x double>, <2 x
define <2 x double>@test_int_x86_avx512_mask3_vfmaddsub_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmaddsub_pd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovapd %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9]
; CHECK-NEXT: vfmaddsub213pd %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa6,0xda]
@@ -4761,7 +4761,7 @@ declare <2 x double> @llvm.x86.avx512.maskz.vfmaddsub.pd.128(<2 x double>, <2 x
define <2 x double>@test_int_x86_avx512_maskz_vfmaddsub_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vfmaddsub_pd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovapd %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9]
; CHECK-NEXT: vfmaddsub213pd %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa6,0xda]
@@ -4776,7 +4776,7 @@ define <2 x double>@test_int_x86_avx512_maskz_vfmaddsub_pd_128(<2 x double> %x0,
define <4 x double>@test_int_x86_avx512_mask_vfmaddsub_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vfmaddsub_pd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovapd %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9]
; CHECK-NEXT: vfmaddsub213pd %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xa6,0xda]
@@ -4793,7 +4793,7 @@ declare <4 x double> @llvm.x86.avx512.mask3.vfmaddsub.pd.256(<4 x double>, <4 x
define <4 x double>@test_int_x86_avx512_mask3_vfmaddsub_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmaddsub_pd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovapd %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9]
; CHECK-NEXT: vfmaddsub213pd %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xa6,0xda]
@@ -4810,7 +4810,7 @@ declare <4 x double> @llvm.x86.avx512.maskz.vfmaddsub.pd.256(<4 x double>, <4 x
define <4 x double>@test_int_x86_avx512_maskz_vfmaddsub_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vfmaddsub_pd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovapd %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9]
; CHECK-NEXT: vfmaddsub213pd %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xa6,0xda]
@@ -4825,7 +4825,7 @@ define <4 x double>@test_int_x86_avx512_maskz_vfmaddsub_pd_256(<4 x double> %x0,
define <4 x float>@test_int_x86_avx512_mask_vfmaddsub_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vfmaddsub_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovaps %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9]
; CHECK-NEXT: vfmaddsub213ps %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa6,0xda]
@@ -4842,7 +4842,7 @@ declare <4 x float> @llvm.x86.avx512.mask3.vfmaddsub.ps.128(<4 x float>, <4 x fl
define <4 x float>@test_int_x86_avx512_mask3_vfmaddsub_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmaddsub_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovaps %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9]
; CHECK-NEXT: vfmaddsub213ps %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa6,0xda]
@@ -4859,7 +4859,7 @@ declare <4 x float> @llvm.x86.avx512.maskz.vfmaddsub.ps.128(<4 x float>, <4 x fl
define <4 x float>@test_int_x86_avx512_maskz_vfmaddsub_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vfmaddsub_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovaps %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9]
; CHECK-NEXT: vfmaddsub213ps %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa6,0xda]
@@ -4874,7 +4874,7 @@ define <4 x float>@test_int_x86_avx512_maskz_vfmaddsub_ps_128(<4 x float> %x0, <
define <8 x float>@test_int_x86_avx512_mask_vfmaddsub_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vfmaddsub_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovaps %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9]
; CHECK-NEXT: vfmaddsub213ps %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xa6,0xda]
@@ -4891,7 +4891,7 @@ declare <8 x float> @llvm.x86.avx512.mask3.vfmaddsub.ps.256(<8 x float>, <8 x fl
define <8 x float>@test_int_x86_avx512_mask3_vfmaddsub_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmaddsub_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovaps %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9]
; CHECK-NEXT: vfmaddsub213ps %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xa6,0xda]
@@ -4908,7 +4908,7 @@ declare <8 x float> @llvm.x86.avx512.maskz.vfmaddsub.ps.256(<8 x float>, <8 x fl
define <8 x float>@test_int_x86_avx512_maskz_vfmaddsub_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vfmaddsub_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovaps %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9]
; CHECK-NEXT: vfmaddsub213ps %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xa6,0xda]
@@ -4925,7 +4925,7 @@ declare <2 x double> @llvm.x86.avx512.mask3.vfmsubadd.pd.128(<2 x double>, <2 x
define <2 x double>@test_int_x86_avx512_mask3_vfmsubadd_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsubadd_pd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovapd %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9]
; CHECK-NEXT: vfmsubadd213pd %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa7,0xda]
@@ -4942,7 +4942,7 @@ declare <4 x double> @llvm.x86.avx512.mask3.vfmsubadd.pd.256(<4 x double>, <4 x
define <4 x double>@test_int_x86_avx512_mask3_vfmsubadd_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsubadd_pd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovapd %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9]
; CHECK-NEXT: vfmsubadd213pd %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xa7,0xda]
@@ -4959,7 +4959,7 @@ declare <4 x float> @llvm.x86.avx512.mask3.vfmsubadd.ps.128(<4 x float>, <4 x fl
define <4 x float>@test_int_x86_avx512_mask3_vfmsubadd_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsubadd_ps_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovaps %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9]
; CHECK-NEXT: vfmsubadd213ps %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa7,0xda]
@@ -4976,7 +4976,7 @@ declare <8 x float> @llvm.x86.avx512.mask3.vfmsubadd.ps.256(<8 x float>, <8 x fl
define <8 x float>@test_int_x86_avx512_mask3_vfmsubadd_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsubadd_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovaps %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9]
; CHECK-NEXT: vfmsubadd213ps %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xa7,0xda]
@@ -4992,7 +4992,7 @@ define <8 x float>@test_int_x86_avx512_mask3_vfmsubadd_ps_256(<8 x float> %x0, <
define <4 x float> @test_mask_vfmadd128_ps_r(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_vfmadd128_ps_r:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vfmadd132ps %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x09,0x98,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -5002,7 +5002,7 @@ define <4 x float> @test_mask_vfmadd128_ps_r(<4 x float> %a0, <4 x float> %a1, <
define <4 x float> @test_mask_vfmadd128_ps_rz(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) {
; CHECK-LABEL: test_mask_vfmadd128_ps_rz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd213ps %xmm2, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa8,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 -1) nounwind
@@ -5011,7 +5011,7 @@ define <4 x float> @test_mask_vfmadd128_ps_rz(<4 x float> %a0, <4 x float> %a1,
define <4 x float> @test_mask_vfmadd128_ps_rmk(<4 x float> %a0, <4 x float> %a1, <4 x float>* %ptr_a2, i8 %mask) {
; CHECK-LABEL: test_mask_vfmadd128_ps_rmk:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vfmadd213ps (%rdi), %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xa8,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -5022,7 +5022,7 @@ define <4 x float> @test_mask_vfmadd128_ps_rmk(<4 x float> %a0, <4 x float> %a1,
define <4 x float> @test_mask_vfmadd128_ps_rmka(<4 x float> %a0, <4 x float> %a1, <4 x float>* %ptr_a2, i8 %mask) {
; CHECK-LABEL: test_mask_vfmadd128_ps_rmka:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vfmadd213ps (%rdi), %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xa8,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -5033,7 +5033,7 @@ define <4 x float> @test_mask_vfmadd128_ps_rmka(<4 x float> %a0, <4 x float> %a1
define <4 x float> @test_mask_vfmadd128_ps_rmkz(<4 x float> %a0, <4 x float> %a1, <4 x float>* %ptr_a2) {
; CHECK-LABEL: test_mask_vfmadd128_ps_rmkz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd213ps (%rdi), %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa8,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%a2 = load <4 x float>, <4 x float>* %ptr_a2
@@ -5043,7 +5043,7 @@ define <4 x float> @test_mask_vfmadd128_ps_rmkz(<4 x float> %a0, <4 x float> %a1
define <4 x float> @test_mask_vfmadd128_ps_rmkza(<4 x float> %a0, <4 x float> %a1, <4 x float>* %ptr_a2) {
; CHECK-LABEL: test_mask_vfmadd128_ps_rmkza:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd213ps (%rdi), %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa8,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%a2 = load <4 x float>, <4 x float>* %ptr_a2, align 4
@@ -5053,7 +5053,7 @@ define <4 x float> @test_mask_vfmadd128_ps_rmkza(<4 x float> %a0, <4 x float> %a
define <4 x float> @test_mask_vfmadd128_ps_rmb(<4 x float> %a0, <4 x float> %a1, float* %ptr_a2, i8 %mask) {
; CHECK-LABEL: test_mask_vfmadd128_ps_rmb:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vfmadd213ps (%rdi){1to4}, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x19,0xa8,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -5068,7 +5068,7 @@ define <4 x float> @test_mask_vfmadd128_ps_rmb(<4 x float> %a0, <4 x float> %a1,
define <4 x float> @test_mask_vfmadd128_ps_rmba(<4 x float> %a0, <4 x float> %a1, float* %ptr_a2, i8 %mask) {
; CHECK-LABEL: test_mask_vfmadd128_ps_rmba:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vfmadd213ps (%rdi){1to4}, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x19,0xa8,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -5083,7 +5083,7 @@ define <4 x float> @test_mask_vfmadd128_ps_rmba(<4 x float> %a0, <4 x float> %a1
define <4 x float> @test_mask_vfmadd128_ps_rmbz(<4 x float> %a0, <4 x float> %a1, float* %ptr_a2) {
; CHECK-LABEL: test_mask_vfmadd128_ps_rmbz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd213ps (%rdi){1to4}, %xmm1, %xmm0 ## encoding: [0x62,0xf2,0x75,0x18,0xa8,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load float, float* %ptr_a2
@@ -5097,7 +5097,7 @@ define <4 x float> @test_mask_vfmadd128_ps_rmbz(<4 x float> %a0, <4 x float> %a1
define <4 x float> @test_mask_vfmadd128_ps_rmbza(<4 x float> %a0, <4 x float> %a1, float* %ptr_a2) {
; CHECK-LABEL: test_mask_vfmadd128_ps_rmbza:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd213ps (%rdi){1to4}, %xmm1, %xmm0 ## encoding: [0x62,0xf2,0x75,0x18,0xa8,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load float, float* %ptr_a2, align 4
@@ -5111,7 +5111,7 @@ define <4 x float> @test_mask_vfmadd128_ps_rmbza(<4 x float> %a0, <4 x float> %a
define <2 x double> @test_mask_vfmadd128_pd_r(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_vfmadd128_pd_r:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vfmadd132pd %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x09,0x98,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -5121,7 +5121,7 @@ define <2 x double> @test_mask_vfmadd128_pd_r(<2 x double> %a0, <2 x double> %a1
define <2 x double> @test_mask_vfmadd128_pd_rz(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) {
; CHECK-LABEL: test_mask_vfmadd128_pd_rz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd213pd %xmm2, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa8,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <2 x double> @llvm.x86.avx512.mask.vfmadd.pd.128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 -1) nounwind
@@ -5130,7 +5130,7 @@ define <2 x double> @test_mask_vfmadd128_pd_rz(<2 x double> %a0, <2 x double> %a
define <2 x double> @test_mask_vfmadd128_pd_rmk(<2 x double> %a0, <2 x double> %a1, <2 x double>* %ptr_a2, i8 %mask) {
; CHECK-LABEL: test_mask_vfmadd128_pd_rmk:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vfmadd213pd (%rdi), %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xa8,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -5141,7 +5141,7 @@ define <2 x double> @test_mask_vfmadd128_pd_rmk(<2 x double> %a0, <2 x double> %
define <2 x double> @test_mask_vfmadd128_pd_rmkz(<2 x double> %a0, <2 x double> %a1, <2 x double>* %ptr_a2) {
; CHECK-LABEL: test_mask_vfmadd128_pd_rmkz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd213pd (%rdi), %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa8,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%a2 = load <2 x double>, <2 x double>* %ptr_a2
@@ -5151,7 +5151,7 @@ define <2 x double> @test_mask_vfmadd128_pd_rmkz(<2 x double> %a0, <2 x double>
define <4 x double> @test_mask_vfmadd256_pd_r(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_vfmadd256_pd_r:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vfmadd132pd %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x29,0x98,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -5161,7 +5161,7 @@ define <4 x double> @test_mask_vfmadd256_pd_r(<4 x double> %a0, <4 x double> %a1
define <4 x double> @test_mask_vfmadd256_pd_rz(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) {
; CHECK-LABEL: test_mask_vfmadd256_pd_rz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd213pd %ymm2, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xa8,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x double> @llvm.x86.avx512.mask.vfmadd.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 -1) nounwind
@@ -5170,7 +5170,7 @@ define <4 x double> @test_mask_vfmadd256_pd_rz(<4 x double> %a0, <4 x double> %a
define <4 x double> @test_mask_vfmadd256_pd_rmk(<4 x double> %a0, <4 x double> %a1, <4 x double>* %ptr_a2, i8 %mask) {
; CHECK-LABEL: test_mask_vfmadd256_pd_rmk:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vfmadd213pd (%rdi), %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x29,0xa8,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -5181,7 +5181,7 @@ define <4 x double> @test_mask_vfmadd256_pd_rmk(<4 x double> %a0, <4 x double> %
define <4 x double> @test_mask_vfmadd256_pd_rmkz(<4 x double> %a0, <4 x double> %a1, <4 x double>* %ptr_a2) {
; CHECK-LABEL: test_mask_vfmadd256_pd_rmkz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd213pd (%rdi), %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xa8,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%a2 = load <4 x double>, <4 x double>* %ptr_a2
diff --git a/test/CodeGen/X86/avx512vl-logic.ll b/test/CodeGen/X86/avx512vl-logic.ll
index 6e697cf59a4..52b135c7c29 100644
--- a/test/CodeGen/X86/avx512vl-logic.ll
+++ b/test/CodeGen/X86/avx512vl-logic.ll
@@ -6,7 +6,7 @@
define <8 x i32> @vpandd256(<8 x i32> %a, <8 x i32> %b) nounwind uwtable readnone ssp {
; CHECK-LABEL: vpandd256:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vpaddd {{.*}}(%rip){1to8}, %ymm0, %ymm0
; CHECK-NEXT: vpand %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
@@ -19,7 +19,7 @@ entry:
define <8 x i32> @vpandnd256(<8 x i32> %a, <8 x i32> %b) nounwind uwtable readnone ssp {
; CHECK-LABEL: vpandnd256:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vpaddd {{.*}}(%rip){1to8}, %ymm0, %ymm1
; CHECK-NEXT: vpandn %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
@@ -33,7 +33,7 @@ entry:
define <8 x i32> @vpord256(<8 x i32> %a, <8 x i32> %b) nounwind uwtable readnone ssp {
; CHECK-LABEL: vpord256:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vpaddd {{.*}}(%rip){1to8}, %ymm0, %ymm0
; CHECK-NEXT: vpor %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
@@ -46,7 +46,7 @@ entry:
define <8 x i32> @vpxord256(<8 x i32> %a, <8 x i32> %b) nounwind uwtable readnone ssp {
; CHECK-LABEL: vpxord256:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vpaddd {{.*}}(%rip){1to8}, %ymm0, %ymm0
; CHECK-NEXT: vpxor %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
@@ -59,7 +59,7 @@ entry:
define <4 x i64> @vpandq256(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
; CHECK-LABEL: vpandq256:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vpaddq {{.*}}(%rip){1to4}, %ymm0, %ymm0
; CHECK-NEXT: vpand %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
@@ -72,7 +72,7 @@ entry:
define <4 x i64> @vpandnq256(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
; CHECK-LABEL: vpandnq256:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vpaddq {{.*}}(%rip){1to4}, %ymm0, %ymm0
; CHECK-NEXT: vpandn %ymm0, %ymm1, %ymm0
; CHECK-NEXT: retq
@@ -86,7 +86,7 @@ entry:
define <4 x i64> @vporq256(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
; CHECK-LABEL: vporq256:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vpaddq {{.*}}(%rip){1to4}, %ymm0, %ymm0
; CHECK-NEXT: vpor %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
@@ -99,7 +99,7 @@ entry:
define <4 x i64> @vpxorq256(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
; CHECK-LABEL: vpxorq256:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vpaddq {{.*}}(%rip){1to4}, %ymm0, %ymm0
; CHECK-NEXT: vpxor %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
@@ -114,7 +114,7 @@ entry:
define <4 x i32> @vpandd128(<4 x i32> %a, <4 x i32> %b) nounwind uwtable readnone ssp {
; CHECK-LABEL: vpandd128:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0
; CHECK-NEXT: vpand %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
@@ -127,7 +127,7 @@ entry:
define <4 x i32> @vpandnd128(<4 x i32> %a, <4 x i32> %b) nounwind uwtable readnone ssp {
; CHECK-LABEL: vpandnd128:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0
; CHECK-NEXT: vpandn %xmm0, %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -141,7 +141,7 @@ entry:
define <4 x i32> @vpord128(<4 x i32> %a, <4 x i32> %b) nounwind uwtable readnone ssp {
; CHECK-LABEL: vpord128:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0
; CHECK-NEXT: vpor %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
@@ -154,7 +154,7 @@ entry:
define <4 x i32> @vpxord128(<4 x i32> %a, <4 x i32> %b) nounwind uwtable readnone ssp {
; CHECK-LABEL: vpxord128:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0
; CHECK-NEXT: vpxor %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
@@ -167,7 +167,7 @@ entry:
define <2 x i64> @vpandq128(<2 x i64> %a, <2 x i64> %b) nounwind uwtable readnone ssp {
; CHECK-LABEL: vpandq128:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vpaddq {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: vpand %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
@@ -180,7 +180,7 @@ entry:
define <2 x i64> @vpandnq128(<2 x i64> %a, <2 x i64> %b) nounwind uwtable readnone ssp {
; CHECK-LABEL: vpandnq128:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vpaddq {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: vpandn %xmm0, %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -194,7 +194,7 @@ entry:
define <2 x i64> @vporq128(<2 x i64> %a, <2 x i64> %b) nounwind uwtable readnone ssp {
; CHECK-LABEL: vporq128:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vpaddq {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: vpor %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
@@ -207,7 +207,7 @@ entry:
define <2 x i64> @vpxorq128(<2 x i64> %a, <2 x i64> %b) nounwind uwtable readnone ssp {
; CHECK-LABEL: vpxorq128:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vpaddq {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: vpxor %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
@@ -221,13 +221,13 @@ entry:
define <4 x double> @test_mm256_mask_andnot_pd(<4 x double> %__W, i8 zeroext %__U, <4 x double> %__A, <4 x double> %__B) {
; KNL-LABEL: test_mm256_mask_andnot_pd:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpandnq %ymm2, %ymm1, %ymm0 {%k1}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm256_mask_andnot_pd:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandnpd %ymm2, %ymm1, %ymm0 {%k1}
; SKX-NEXT: retq
@@ -245,13 +245,13 @@ entry:
define <4 x double> @test_mm256_maskz_andnot_pd(i8 zeroext %__U, <4 x double> %__A, <4 x double> %__B) {
; KNL-LABEL: test_mm256_maskz_andnot_pd:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpandnq %ymm1, %ymm0, %ymm0 {%k1} {z}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm256_maskz_andnot_pd:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandnpd %ymm1, %ymm0, %ymm0 {%k1} {z}
; SKX-NEXT: retq
@@ -269,13 +269,13 @@ entry:
define <2 x double> @test_mm_mask_andnot_pd(<2 x double> %__W, i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) {
; KNL-LABEL: test_mm_mask_andnot_pd:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpandnq %xmm2, %xmm1, %xmm0 {%k1}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm_mask_andnot_pd:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandnpd %xmm2, %xmm1, %xmm0 {%k1}
; SKX-NEXT: retq
@@ -293,13 +293,13 @@ entry:
define <2 x double> @test_mm_maskz_andnot_pd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) {
; KNL-LABEL: test_mm_maskz_andnot_pd:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpandnq %xmm1, %xmm0, %xmm0 {%k1} {z}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm_maskz_andnot_pd:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandnpd %xmm1, %xmm0, %xmm0 {%k1} {z}
; SKX-NEXT: retq
@@ -317,13 +317,13 @@ entry:
define <8 x float> @test_mm256_mask_andnot_ps(<8 x float> %__W, i8 zeroext %__U, <8 x float> %__A, <8 x float> %__B) {
; KNL-LABEL: test_mm256_mask_andnot_ps:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpandnd %ymm2, %ymm1, %ymm0 {%k1}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm256_mask_andnot_ps:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandnps %ymm2, %ymm1, %ymm0 {%k1}
; SKX-NEXT: retq
@@ -340,13 +340,13 @@ entry:
define <8 x float> @test_mm256_maskz_andnot_ps(i8 zeroext %__U, <8 x float> %__A, <8 x float> %__B) {
; KNL-LABEL: test_mm256_maskz_andnot_ps:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpandnd %ymm1, %ymm0, %ymm0 {%k1} {z}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm256_maskz_andnot_ps:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandnps %ymm1, %ymm0, %ymm0 {%k1} {z}
; SKX-NEXT: retq
@@ -363,13 +363,13 @@ entry:
define <4 x float> @test_mm_mask_andnot_ps(<4 x float> %__W, i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) {
; KNL-LABEL: test_mm_mask_andnot_ps:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpandnd %xmm2, %xmm1, %xmm0 {%k1}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm_mask_andnot_ps:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandnps %xmm2, %xmm1, %xmm0 {%k1}
; SKX-NEXT: retq
@@ -387,13 +387,13 @@ entry:
define <4 x float> @test_mm_maskz_andnot_ps(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) {
; KNL-LABEL: test_mm_maskz_andnot_ps:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpandnd %xmm1, %xmm0, %xmm0 {%k1} {z}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm_maskz_andnot_ps:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandnps %xmm1, %xmm0, %xmm0 {%k1} {z}
; SKX-NEXT: retq
@@ -411,13 +411,13 @@ entry:
define <4 x double> @test_mm256_mask_and_pd(<4 x double> %__W, i8 zeroext %__U, <4 x double> %__A, <4 x double> %__B) {
; KNL-LABEL: test_mm256_mask_and_pd:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpandq %ymm1, %ymm2, %ymm0 {%k1}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm256_mask_and_pd:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandpd %ymm1, %ymm2, %ymm0 {%k1}
; SKX-NEXT: retq
@@ -434,13 +434,13 @@ entry:
define <4 x double> @test_mm256_maskz_and_pd(i8 zeroext %__U, <4 x double> %__A, <4 x double> %__B) {
; KNL-LABEL: test_mm256_maskz_and_pd:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpandq %ymm0, %ymm1, %ymm0 {%k1} {z}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm256_maskz_and_pd:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandpd %ymm0, %ymm1, %ymm0 {%k1} {z}
; SKX-NEXT: retq
@@ -457,13 +457,13 @@ entry:
define <2 x double> @test_mm_mask_and_pd(<2 x double> %__W, i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) {
; KNL-LABEL: test_mm_mask_and_pd:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpandq %xmm1, %xmm2, %xmm0 {%k1}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm_mask_and_pd:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandpd %xmm1, %xmm2, %xmm0 {%k1}
; SKX-NEXT: retq
@@ -480,13 +480,13 @@ entry:
define <2 x double> @test_mm_maskz_and_pd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) {
; KNL-LABEL: test_mm_maskz_and_pd:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpandq %xmm0, %xmm1, %xmm0 {%k1} {z}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm_maskz_and_pd:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandpd %xmm0, %xmm1, %xmm0 {%k1} {z}
; SKX-NEXT: retq
@@ -503,13 +503,13 @@ entry:
define <8 x float> @test_mm256_mask_and_ps(<8 x float> %__W, i8 zeroext %__U, <8 x float> %__A, <8 x float> %__B) {
; KNL-LABEL: test_mm256_mask_and_ps:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpandd %ymm1, %ymm2, %ymm0 {%k1}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm256_mask_and_ps:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandps %ymm1, %ymm2, %ymm0 {%k1}
; SKX-NEXT: retq
@@ -525,13 +525,13 @@ entry:
define <8 x float> @test_mm256_maskz_and_ps(i8 zeroext %__U, <8 x float> %__A, <8 x float> %__B) {
; KNL-LABEL: test_mm256_maskz_and_ps:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpandd %ymm0, %ymm1, %ymm0 {%k1} {z}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm256_maskz_and_ps:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandps %ymm0, %ymm1, %ymm0 {%k1} {z}
; SKX-NEXT: retq
@@ -547,13 +547,13 @@ entry:
define <4 x float> @test_mm_mask_and_ps(<4 x float> %__W, i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) {
; KNL-LABEL: test_mm_mask_and_ps:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpandd %xmm1, %xmm2, %xmm0 {%k1}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm_mask_and_ps:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandps %xmm1, %xmm2, %xmm0 {%k1}
; SKX-NEXT: retq
@@ -570,13 +570,13 @@ entry:
define <4 x float> @test_mm_maskz_and_ps(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) {
; KNL-LABEL: test_mm_maskz_and_ps:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpandd %xmm0, %xmm1, %xmm0 {%k1} {z}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm_maskz_and_ps:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandps %xmm0, %xmm1, %xmm0 {%k1} {z}
; SKX-NEXT: retq
@@ -593,13 +593,13 @@ entry:
define <4 x double> @test_mm256_mask_xor_pd(<4 x double> %__W, i8 zeroext %__U, <4 x double> %__A, <4 x double> %__B) {
; KNL-LABEL: test_mm256_mask_xor_pd:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpxorq %ymm2, %ymm1, %ymm0 {%k1}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm256_mask_xor_pd:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vxorpd %ymm2, %ymm1, %ymm0 {%k1}
; SKX-NEXT: retq
@@ -616,13 +616,13 @@ entry:
define <4 x double> @test_mm256_maskz_xor_pd(i8 zeroext %__U, <4 x double> %__A, <4 x double> %__B) {
; KNL-LABEL: test_mm256_maskz_xor_pd:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpxorq %ymm1, %ymm0, %ymm0 {%k1} {z}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm256_maskz_xor_pd:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vxorpd %ymm1, %ymm0, %ymm0 {%k1} {z}
; SKX-NEXT: retq
@@ -639,13 +639,13 @@ entry:
define <2 x double> @test_mm_mask_xor_pd(<2 x double> %__W, i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) {
; KNL-LABEL: test_mm_mask_xor_pd:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpxorq %xmm2, %xmm1, %xmm0 {%k1}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm_mask_xor_pd:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vxorpd %xmm2, %xmm1, %xmm0 {%k1}
; SKX-NEXT: retq
@@ -662,13 +662,13 @@ entry:
define <2 x double> @test_mm_maskz_xor_pd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) {
; KNL-LABEL: test_mm_maskz_xor_pd:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpxorq %xmm1, %xmm0, %xmm0 {%k1} {z}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm_maskz_xor_pd:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vxorpd %xmm1, %xmm0, %xmm0 {%k1} {z}
; SKX-NEXT: retq
@@ -685,13 +685,13 @@ entry:
define <8 x float> @test_mm256_mask_xor_ps(<8 x float> %__W, i8 zeroext %__U, <8 x float> %__A, <8 x float> %__B) {
; KNL-LABEL: test_mm256_mask_xor_ps:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpxord %ymm2, %ymm1, %ymm0 {%k1}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm256_mask_xor_ps:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vxorps %ymm2, %ymm1, %ymm0 {%k1}
; SKX-NEXT: retq
@@ -707,13 +707,13 @@ entry:
define <8 x float> @test_mm256_maskz_xor_ps(i8 zeroext %__U, <8 x float> %__A, <8 x float> %__B) {
; KNL-LABEL: test_mm256_maskz_xor_ps:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpxord %ymm1, %ymm0, %ymm0 {%k1} {z}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm256_maskz_xor_ps:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vxorps %ymm1, %ymm0, %ymm0 {%k1} {z}
; SKX-NEXT: retq
@@ -729,13 +729,13 @@ entry:
define <4 x float> @test_mm_mask_xor_ps(<4 x float> %__W, i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) {
; KNL-LABEL: test_mm_mask_xor_ps:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpxord %xmm2, %xmm1, %xmm0 {%k1}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm_mask_xor_ps:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vxorps %xmm2, %xmm1, %xmm0 {%k1}
; SKX-NEXT: retq
@@ -752,13 +752,13 @@ entry:
define <4 x float> @test_mm_maskz_xor_ps(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) {
; KNL-LABEL: test_mm_maskz_xor_ps:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpxord %xmm1, %xmm0, %xmm0 {%k1} {z}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm_maskz_xor_ps:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vxorps %xmm1, %xmm0, %xmm0 {%k1} {z}
; SKX-NEXT: retq
@@ -775,13 +775,13 @@ entry:
define <4 x double> @test_mm256_mask_or_pd(<4 x double> %__W, i8 zeroext %__U, <4 x double> %__A, <4 x double> %__B) {
; KNL-LABEL: test_mm256_mask_or_pd:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vporq %ymm1, %ymm2, %ymm0 {%k1}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm256_mask_or_pd:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vorpd %ymm1, %ymm2, %ymm0 {%k1}
; SKX-NEXT: retq
@@ -798,13 +798,13 @@ entry:
define <4 x double> @test_mm256_maskz_or_pd(i8 zeroext %__U, <4 x double> %__A, <4 x double> %__B) {
; KNL-LABEL: test_mm256_maskz_or_pd:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vporq %ymm0, %ymm1, %ymm0 {%k1} {z}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm256_maskz_or_pd:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vorpd %ymm0, %ymm1, %ymm0 {%k1} {z}
; SKX-NEXT: retq
@@ -821,13 +821,13 @@ entry:
define <2 x double> @test_mm_mask_or_pd(<2 x double> %__W, i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) {
; KNL-LABEL: test_mm_mask_or_pd:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vporq %xmm1, %xmm2, %xmm0 {%k1}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm_mask_or_pd:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vorpd %xmm1, %xmm2, %xmm0 {%k1}
; SKX-NEXT: retq
@@ -844,13 +844,13 @@ entry:
define <2 x double> @test_mm_maskz_or_pd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) {
; KNL-LABEL: test_mm_maskz_or_pd:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vporq %xmm0, %xmm1, %xmm0 {%k1} {z}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm_maskz_or_pd:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vorpd %xmm0, %xmm1, %xmm0 {%k1} {z}
; SKX-NEXT: retq
@@ -867,13 +867,13 @@ entry:
define <8 x float> @test_mm256_mask_or_ps(<8 x float> %__W, i8 zeroext %__U, <8 x float> %__A, <8 x float> %__B) {
; KNL-LABEL: test_mm256_mask_or_ps:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpord %ymm1, %ymm2, %ymm0 {%k1}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm256_mask_or_ps:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vorps %ymm1, %ymm2, %ymm0 {%k1}
; SKX-NEXT: retq
@@ -889,13 +889,13 @@ entry:
define <8 x float> @test_mm256_maskz_or_ps(i8 zeroext %__U, <8 x float> %__A, <8 x float> %__B) {
; KNL-LABEL: test_mm256_maskz_or_ps:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpord %ymm0, %ymm1, %ymm0 {%k1} {z}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm256_maskz_or_ps:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vorps %ymm0, %ymm1, %ymm0 {%k1} {z}
; SKX-NEXT: retq
@@ -911,13 +911,13 @@ entry:
define <4 x float> @test_mm_mask_or_ps(<4 x float> %__W, i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) {
; KNL-LABEL: test_mm_mask_or_ps:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpord %xmm1, %xmm2, %xmm0 {%k1}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm_mask_or_ps:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vorps %xmm1, %xmm2, %xmm0 {%k1}
; SKX-NEXT: retq
@@ -934,13 +934,13 @@ entry:
define <4 x float> @test_mm_maskz_or_ps(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) {
; KNL-LABEL: test_mm_maskz_or_ps:
-; KNL: ## BB#0: ## %entry
+; KNL: ## %bb.0: ## %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpord %xmm0, %xmm1, %xmm0 {%k1} {z}
; KNL-NEXT: retq
;
; SKX-LABEL: test_mm_maskz_or_ps:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vorps %xmm0, %xmm1, %xmm0 {%k1} {z}
; SKX-NEXT: retq
diff --git a/test/CodeGen/X86/avx512vl-mov.ll b/test/CodeGen/X86/avx512vl-mov.ll
index 870f9e0748f..f0ce312305f 100644
--- a/test/CodeGen/X86/avx512vl-mov.ll
+++ b/test/CodeGen/X86/avx512vl-mov.ll
@@ -3,7 +3,7 @@
define <8 x i32> @test_256_1(i8 * %addr) {
; CHECK-LABEL: test_256_1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovups (%rdi), %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x10,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <8 x i32>*
@@ -13,7 +13,7 @@ define <8 x i32> @test_256_1(i8 * %addr) {
define <8 x i32> @test_256_2(i8 * %addr) {
; CHECK-LABEL: test_256_2:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <8 x i32>*
@@ -23,7 +23,7 @@ define <8 x i32> @test_256_2(i8 * %addr) {
define void @test_256_3(i8 * %addr, <4 x i64> %data) {
; CHECK-LABEL: test_256_3:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovaps %ymm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x29,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <4 x i64>*
@@ -33,7 +33,7 @@ define void @test_256_3(i8 * %addr, <4 x i64> %data) {
define void @test_256_4(i8 * %addr, <8 x i32> %data) {
; CHECK-LABEL: test_256_4:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovups %ymm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x11,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <8 x i32>*
@@ -43,7 +43,7 @@ define void @test_256_4(i8 * %addr, <8 x i32> %data) {
define void @test_256_5(i8 * %addr, <8 x i32> %data) {
; CHECK-LABEL: test_256_5:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovaps %ymm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x29,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <8 x i32>*
@@ -53,7 +53,7 @@ define void @test_256_5(i8 * %addr, <8 x i32> %data) {
define <4 x i64> @test_256_6(i8 * %addr) {
; CHECK-LABEL: test_256_6:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <4 x i64>*
@@ -63,7 +63,7 @@ define <4 x i64> @test_256_6(i8 * %addr) {
define void @test_256_7(i8 * %addr, <4 x i64> %data) {
; CHECK-LABEL: test_256_7:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovups %ymm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x11,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <4 x i64>*
@@ -73,7 +73,7 @@ define void @test_256_7(i8 * %addr, <4 x i64> %data) {
define <4 x i64> @test_256_8(i8 * %addr) {
; CHECK-LABEL: test_256_8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovups (%rdi), %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x10,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <4 x i64>*
@@ -83,7 +83,7 @@ define <4 x i64> @test_256_8(i8 * %addr) {
define void @test_256_9(i8 * %addr, <4 x double> %data) {
; CHECK-LABEL: test_256_9:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovaps %ymm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x29,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <4 x double>*
@@ -93,7 +93,7 @@ define void @test_256_9(i8 * %addr, <4 x double> %data) {
define <4 x double> @test_256_10(i8 * %addr) {
; CHECK-LABEL: test_256_10:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <4 x double>*
@@ -103,7 +103,7 @@ define <4 x double> @test_256_10(i8 * %addr) {
define void @test_256_11(i8 * %addr, <8 x float> %data) {
; CHECK-LABEL: test_256_11:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovaps %ymm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x29,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <8 x float>*
@@ -113,7 +113,7 @@ define void @test_256_11(i8 * %addr, <8 x float> %data) {
define <8 x float> @test_256_12(i8 * %addr) {
; CHECK-LABEL: test_256_12:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <8 x float>*
@@ -123,7 +123,7 @@ define <8 x float> @test_256_12(i8 * %addr) {
define void @test_256_13(i8 * %addr, <4 x double> %data) {
; CHECK-LABEL: test_256_13:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovups %ymm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x11,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <4 x double>*
@@ -133,7 +133,7 @@ define void @test_256_13(i8 * %addr, <4 x double> %data) {
define <4 x double> @test_256_14(i8 * %addr) {
; CHECK-LABEL: test_256_14:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovups (%rdi), %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x10,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <4 x double>*
@@ -143,7 +143,7 @@ define <4 x double> @test_256_14(i8 * %addr) {
define void @test_256_15(i8 * %addr, <8 x float> %data) {
; CHECK-LABEL: test_256_15:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovups %ymm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x11,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <8 x float>*
@@ -153,7 +153,7 @@ define void @test_256_15(i8 * %addr, <8 x float> %data) {
define <8 x float> @test_256_16(i8 * %addr) {
; CHECK-LABEL: test_256_16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovups (%rdi), %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x10,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <8 x float>*
@@ -163,7 +163,7 @@ define <8 x float> @test_256_16(i8 * %addr) {
define <8 x i32> @test_256_17(i8 * %addr, <8 x i32> %old, <8 x i32> %mask1) {
; CHECK-LABEL: test_256_17:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vpcmpneqd %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0x75,0x28,0x1f,0xca,0x04]
; CHECK-NEXT: vmovdqa32 (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x6f,0x07]
@@ -177,7 +177,7 @@ define <8 x i32> @test_256_17(i8 * %addr, <8 x i32> %old, <8 x i32> %mask1) {
define <8 x i32> @test_256_18(i8 * %addr, <8 x i32> %old, <8 x i32> %mask1) {
; CHECK-LABEL: test_256_18:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vpcmpneqd %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0x75,0x28,0x1f,0xca,0x04]
; CHECK-NEXT: vmovdqu32 (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7e,0x29,0x6f,0x07]
@@ -191,7 +191,7 @@ define <8 x i32> @test_256_18(i8 * %addr, <8 x i32> %old, <8 x i32> %mask1) {
define <8 x i32> @test_256_19(i8 * %addr, <8 x i32> %mask1) {
; CHECK-LABEL: test_256_19:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9]
; CHECK-NEXT: vpcmpneqd %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf3,0x7d,0x28,0x1f,0xc9,0x04]
; CHECK-NEXT: vmovdqa32 (%rdi), %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0x6f,0x07]
@@ -205,7 +205,7 @@ define <8 x i32> @test_256_19(i8 * %addr, <8 x i32> %mask1) {
define <8 x i32> @test_256_20(i8 * %addr, <8 x i32> %mask1) {
; CHECK-LABEL: test_256_20:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9]
; CHECK-NEXT: vpcmpneqd %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf3,0x7d,0x28,0x1f,0xc9,0x04]
; CHECK-NEXT: vmovdqu32 (%rdi), %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7e,0xa9,0x6f,0x07]
@@ -219,7 +219,7 @@ define <8 x i32> @test_256_20(i8 * %addr, <8 x i32> %mask1) {
define <4 x i64> @test_256_21(i8 * %addr, <4 x i64> %old, <4 x i64> %mask1) {
; CHECK-LABEL: test_256_21:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vpcmpneqq %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x28,0x1f,0xca,0x04]
; CHECK-NEXT: vmovdqa64 (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0xfd,0x29,0x6f,0x07]
@@ -233,7 +233,7 @@ define <4 x i64> @test_256_21(i8 * %addr, <4 x i64> %old, <4 x i64> %mask1) {
define <4 x i64> @test_256_22(i8 * %addr, <4 x i64> %old, <4 x i64> %mask1) {
; CHECK-LABEL: test_256_22:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vpcmpneqq %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x28,0x1f,0xca,0x04]
; CHECK-NEXT: vmovdqu64 (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0xfe,0x29,0x6f,0x07]
@@ -247,7 +247,7 @@ define <4 x i64> @test_256_22(i8 * %addr, <4 x i64> %old, <4 x i64> %mask1) {
define <4 x i64> @test_256_23(i8 * %addr, <4 x i64> %mask1) {
; CHECK-LABEL: test_256_23:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9]
; CHECK-NEXT: vpcmpneqq %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x28,0x1f,0xc9,0x04]
; CHECK-NEXT: vmovdqa64 (%rdi), %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xa9,0x6f,0x07]
@@ -261,7 +261,7 @@ define <4 x i64> @test_256_23(i8 * %addr, <4 x i64> %mask1) {
define <4 x i64> @test_256_24(i8 * %addr, <4 x i64> %mask1) {
; CHECK-LABEL: test_256_24:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9]
; CHECK-NEXT: vpcmpneqq %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x28,0x1f,0xc9,0x04]
; CHECK-NEXT: vmovdqu64 (%rdi), %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfe,0xa9,0x6f,0x07]
@@ -275,7 +275,7 @@ define <4 x i64> @test_256_24(i8 * %addr, <4 x i64> %mask1) {
define <8 x float> @test_256_25(i8 * %addr, <8 x float> %old, <8 x float> %mask1) {
; CHECK-LABEL: test_256_25:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vcmpneq_oqps %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf1,0x74,0x28,0xc2,0xca,0x0c]
; CHECK-NEXT: vmovaps (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x28,0x07]
@@ -289,7 +289,7 @@ define <8 x float> @test_256_25(i8 * %addr, <8 x float> %old, <8 x float> %mask1
define <8 x float> @test_256_26(i8 * %addr, <8 x float> %old, <8 x float> %mask1) {
; CHECK-LABEL: test_256_26:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vcmpneq_oqps %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf1,0x74,0x28,0xc2,0xca,0x0c]
; CHECK-NEXT: vmovups (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x10,0x07]
@@ -303,7 +303,7 @@ define <8 x float> @test_256_26(i8 * %addr, <8 x float> %old, <8 x float> %mask1
define <8 x float> @test_256_27(i8 * %addr, <8 x float> %mask1) {
; CHECK-LABEL: test_256_27:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9]
; CHECK-NEXT: vcmpneq_oqps %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf1,0x7c,0x28,0xc2,0xc9,0x0c]
; CHECK-NEXT: vmovaps (%rdi), %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xa9,0x28,0x07]
@@ -317,7 +317,7 @@ define <8 x float> @test_256_27(i8 * %addr, <8 x float> %mask1) {
define <8 x float> @test_256_28(i8 * %addr, <8 x float> %mask1) {
; CHECK-LABEL: test_256_28:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9]
; CHECK-NEXT: vcmpneq_oqps %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf1,0x7c,0x28,0xc2,0xc9,0x0c]
; CHECK-NEXT: vmovups (%rdi), %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xa9,0x10,0x07]
@@ -331,7 +331,7 @@ define <8 x float> @test_256_28(i8 * %addr, <8 x float> %mask1) {
define <4 x double> @test_256_29(i8 * %addr, <4 x double> %old, <4 x i64> %mask1) {
; CHECK-LABEL: test_256_29:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vpcmpneqq %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x28,0x1f,0xca,0x04]
; CHECK-NEXT: vmovapd (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0xfd,0x29,0x28,0x07]
@@ -345,7 +345,7 @@ define <4 x double> @test_256_29(i8 * %addr, <4 x double> %old, <4 x i64> %mask1
define <4 x double> @test_256_30(i8 * %addr, <4 x double> %old, <4 x i64> %mask1) {
; CHECK-LABEL: test_256_30:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vpcmpneqq %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x28,0x1f,0xca,0x04]
; CHECK-NEXT: vmovupd (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0xfd,0x29,0x10,0x07]
@@ -359,7 +359,7 @@ define <4 x double> @test_256_30(i8 * %addr, <4 x double> %old, <4 x i64> %mask1
define <4 x double> @test_256_31(i8 * %addr, <4 x i64> %mask1) {
; CHECK-LABEL: test_256_31:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9]
; CHECK-NEXT: vpcmpneqq %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x28,0x1f,0xc9,0x04]
; CHECK-NEXT: vmovapd (%rdi), %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xa9,0x28,0x07]
@@ -373,7 +373,7 @@ define <4 x double> @test_256_31(i8 * %addr, <4 x i64> %mask1) {
define <4 x double> @test_256_32(i8 * %addr, <4 x i64> %mask1) {
; CHECK-LABEL: test_256_32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9]
; CHECK-NEXT: vpcmpneqq %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x28,0x1f,0xc9,0x04]
; CHECK-NEXT: vmovupd (%rdi), %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xa9,0x10,0x07]
@@ -387,7 +387,7 @@ define <4 x double> @test_256_32(i8 * %addr, <4 x i64> %mask1) {
define <4 x i32> @test_128_1(i8 * %addr) {
; CHECK-LABEL: test_128_1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovups (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x10,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <4 x i32>*
@@ -397,7 +397,7 @@ define <4 x i32> @test_128_1(i8 * %addr) {
define <4 x i32> @test_128_2(i8 * %addr) {
; CHECK-LABEL: test_128_2:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <4 x i32>*
@@ -407,7 +407,7 @@ define <4 x i32> @test_128_2(i8 * %addr) {
define void @test_128_3(i8 * %addr, <2 x i64> %data) {
; CHECK-LABEL: test_128_3:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovaps %xmm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x29,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <2 x i64>*
@@ -417,7 +417,7 @@ define void @test_128_3(i8 * %addr, <2 x i64> %data) {
define void @test_128_4(i8 * %addr, <4 x i32> %data) {
; CHECK-LABEL: test_128_4:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovups %xmm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x11,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <4 x i32>*
@@ -427,7 +427,7 @@ define void @test_128_4(i8 * %addr, <4 x i32> %data) {
define void @test_128_5(i8 * %addr, <4 x i32> %data) {
; CHECK-LABEL: test_128_5:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovaps %xmm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x29,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <4 x i32>*
@@ -437,7 +437,7 @@ define void @test_128_5(i8 * %addr, <4 x i32> %data) {
define <2 x i64> @test_128_6(i8 * %addr) {
; CHECK-LABEL: test_128_6:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <2 x i64>*
@@ -447,7 +447,7 @@ define <2 x i64> @test_128_6(i8 * %addr) {
define void @test_128_7(i8 * %addr, <2 x i64> %data) {
; CHECK-LABEL: test_128_7:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovups %xmm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x11,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <2 x i64>*
@@ -457,7 +457,7 @@ define void @test_128_7(i8 * %addr, <2 x i64> %data) {
define <2 x i64> @test_128_8(i8 * %addr) {
; CHECK-LABEL: test_128_8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovups (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x10,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <2 x i64>*
@@ -467,7 +467,7 @@ define <2 x i64> @test_128_8(i8 * %addr) {
define void @test_128_9(i8 * %addr, <2 x double> %data) {
; CHECK-LABEL: test_128_9:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovaps %xmm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x29,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <2 x double>*
@@ -477,7 +477,7 @@ define void @test_128_9(i8 * %addr, <2 x double> %data) {
define <2 x double> @test_128_10(i8 * %addr) {
; CHECK-LABEL: test_128_10:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <2 x double>*
@@ -487,7 +487,7 @@ define <2 x double> @test_128_10(i8 * %addr) {
define void @test_128_11(i8 * %addr, <4 x float> %data) {
; CHECK-LABEL: test_128_11:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovaps %xmm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x29,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <4 x float>*
@@ -497,7 +497,7 @@ define void @test_128_11(i8 * %addr, <4 x float> %data) {
define <4 x float> @test_128_12(i8 * %addr) {
; CHECK-LABEL: test_128_12:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <4 x float>*
@@ -507,7 +507,7 @@ define <4 x float> @test_128_12(i8 * %addr) {
define void @test_128_13(i8 * %addr, <2 x double> %data) {
; CHECK-LABEL: test_128_13:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovups %xmm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x11,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <2 x double>*
@@ -517,7 +517,7 @@ define void @test_128_13(i8 * %addr, <2 x double> %data) {
define <2 x double> @test_128_14(i8 * %addr) {
; CHECK-LABEL: test_128_14:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovups (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x10,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <2 x double>*
@@ -527,7 +527,7 @@ define <2 x double> @test_128_14(i8 * %addr) {
define void @test_128_15(i8 * %addr, <4 x float> %data) {
; CHECK-LABEL: test_128_15:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovups %xmm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x11,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <4 x float>*
@@ -537,7 +537,7 @@ define void @test_128_15(i8 * %addr, <4 x float> %data) {
define <4 x float> @test_128_16(i8 * %addr) {
; CHECK-LABEL: test_128_16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovups (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x10,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%vaddr = bitcast i8* %addr to <4 x float>*
@@ -547,7 +547,7 @@ define <4 x float> @test_128_16(i8 * %addr) {
define <4 x i32> @test_128_17(i8 * %addr, <4 x i32> %old, <4 x i32> %mask1) {
; CHECK-LABEL: test_128_17:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vpcmpneqd %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x08,0x1f,0xca,0x04]
; CHECK-NEXT: vmovdqa32 (%rdi), %xmm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x6f,0x07]
@@ -561,7 +561,7 @@ define <4 x i32> @test_128_17(i8 * %addr, <4 x i32> %old, <4 x i32> %mask1) {
define <4 x i32> @test_128_18(i8 * %addr, <4 x i32> %old, <4 x i32> %mask1) {
; CHECK-LABEL: test_128_18:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vpcmpneqd %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x08,0x1f,0xca,0x04]
; CHECK-NEXT: vmovdqu32 (%rdi), %xmm0 {%k1} ## encoding: [0x62,0xf1,0x7e,0x09,0x6f,0x07]
@@ -575,7 +575,7 @@ define <4 x i32> @test_128_18(i8 * %addr, <4 x i32> %old, <4 x i32> %mask1) {
define <4 x i32> @test_128_19(i8 * %addr, <4 x i32> %mask1) {
; CHECK-LABEL: test_128_19:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9]
; CHECK-NEXT: vpcmpneqd %xmm1, %xmm0, %k1 ## encoding: [0x62,0xf3,0x7d,0x08,0x1f,0xc9,0x04]
; CHECK-NEXT: vmovdqa32 (%rdi), %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0x6f,0x07]
@@ -589,7 +589,7 @@ define <4 x i32> @test_128_19(i8 * %addr, <4 x i32> %mask1) {
define <4 x i32> @test_128_20(i8 * %addr, <4 x i32> %mask1) {
; CHECK-LABEL: test_128_20:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9]
; CHECK-NEXT: vpcmpneqd %xmm1, %xmm0, %k1 ## encoding: [0x62,0xf3,0x7d,0x08,0x1f,0xc9,0x04]
; CHECK-NEXT: vmovdqu32 (%rdi), %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7e,0x89,0x6f,0x07]
@@ -603,7 +603,7 @@ define <4 x i32> @test_128_20(i8 * %addr, <4 x i32> %mask1) {
define <2 x i64> @test_128_21(i8 * %addr, <2 x i64> %old, <2 x i64> %mask1) {
; CHECK-LABEL: test_128_21:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vpcmpneqq %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x08,0x1f,0xca,0x04]
; CHECK-NEXT: vmovdqa64 (%rdi), %xmm0 {%k1} ## encoding: [0x62,0xf1,0xfd,0x09,0x6f,0x07]
@@ -617,7 +617,7 @@ define <2 x i64> @test_128_21(i8 * %addr, <2 x i64> %old, <2 x i64> %mask1) {
define <2 x i64> @test_128_22(i8 * %addr, <2 x i64> %old, <2 x i64> %mask1) {
; CHECK-LABEL: test_128_22:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vpcmpneqq %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x08,0x1f,0xca,0x04]
; CHECK-NEXT: vmovdqu64 (%rdi), %xmm0 {%k1} ## encoding: [0x62,0xf1,0xfe,0x09,0x6f,0x07]
@@ -631,7 +631,7 @@ define <2 x i64> @test_128_22(i8 * %addr, <2 x i64> %old, <2 x i64> %mask1) {
define <2 x i64> @test_128_23(i8 * %addr, <2 x i64> %mask1) {
; CHECK-LABEL: test_128_23:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9]
; CHECK-NEXT: vpcmpneqq %xmm1, %xmm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x08,0x1f,0xc9,0x04]
; CHECK-NEXT: vmovdqa64 (%rdi), %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x89,0x6f,0x07]
@@ -645,7 +645,7 @@ define <2 x i64> @test_128_23(i8 * %addr, <2 x i64> %mask1) {
define <2 x i64> @test_128_24(i8 * %addr, <2 x i64> %mask1) {
; CHECK-LABEL: test_128_24:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9]
; CHECK-NEXT: vpcmpneqq %xmm1, %xmm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x08,0x1f,0xc9,0x04]
; CHECK-NEXT: vmovdqu64 (%rdi), %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfe,0x89,0x6f,0x07]
@@ -659,7 +659,7 @@ define <2 x i64> @test_128_24(i8 * %addr, <2 x i64> %mask1) {
define <4 x float> @test_128_25(i8 * %addr, <4 x float> %old, <4 x i32> %mask1) {
; CHECK-LABEL: test_128_25:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vpcmpneqd %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x08,0x1f,0xca,0x04]
; CHECK-NEXT: vmovaps (%rdi), %xmm0 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x28,0x07]
@@ -673,7 +673,7 @@ define <4 x float> @test_128_25(i8 * %addr, <4 x float> %old, <4 x i32> %mask1)
define <4 x float> @test_128_26(i8 * %addr, <4 x float> %old, <4 x i32> %mask1) {
; CHECK-LABEL: test_128_26:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vpcmpneqd %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x08,0x1f,0xca,0x04]
; CHECK-NEXT: vmovups (%rdi), %xmm0 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x10,0x07]
@@ -687,7 +687,7 @@ define <4 x float> @test_128_26(i8 * %addr, <4 x float> %old, <4 x i32> %mask1)
define <4 x float> @test_128_27(i8 * %addr, <4 x i32> %mask1) {
; CHECK-LABEL: test_128_27:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9]
; CHECK-NEXT: vpcmpneqd %xmm1, %xmm0, %k1 ## encoding: [0x62,0xf3,0x7d,0x08,0x1f,0xc9,0x04]
; CHECK-NEXT: vmovaps (%rdi), %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x89,0x28,0x07]
@@ -701,7 +701,7 @@ define <4 x float> @test_128_27(i8 * %addr, <4 x i32> %mask1) {
define <4 x float> @test_128_28(i8 * %addr, <4 x i32> %mask1) {
; CHECK-LABEL: test_128_28:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9]
; CHECK-NEXT: vpcmpneqd %xmm1, %xmm0, %k1 ## encoding: [0x62,0xf3,0x7d,0x08,0x1f,0xc9,0x04]
; CHECK-NEXT: vmovups (%rdi), %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x89,0x10,0x07]
@@ -715,7 +715,7 @@ define <4 x float> @test_128_28(i8 * %addr, <4 x i32> %mask1) {
define <2 x double> @test_128_29(i8 * %addr, <2 x double> %old, <2 x i64> %mask1) {
; CHECK-LABEL: test_128_29:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vpcmpneqq %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x08,0x1f,0xca,0x04]
; CHECK-NEXT: vmovapd (%rdi), %xmm0 {%k1} ## encoding: [0x62,0xf1,0xfd,0x09,0x28,0x07]
@@ -729,7 +729,7 @@ define <2 x double> @test_128_29(i8 * %addr, <2 x double> %old, <2 x i64> %mask1
define <2 x double> @test_128_30(i8 * %addr, <2 x double> %old, <2 x i64> %mask1) {
; CHECK-LABEL: test_128_30:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vpcmpneqq %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x08,0x1f,0xca,0x04]
; CHECK-NEXT: vmovupd (%rdi), %xmm0 {%k1} ## encoding: [0x62,0xf1,0xfd,0x09,0x10,0x07]
@@ -743,7 +743,7 @@ define <2 x double> @test_128_30(i8 * %addr, <2 x double> %old, <2 x i64> %mask1
define <2 x double> @test_128_31(i8 * %addr, <2 x i64> %mask1) {
; CHECK-LABEL: test_128_31:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9]
; CHECK-NEXT: vpcmpneqq %xmm1, %xmm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x08,0x1f,0xc9,0x04]
; CHECK-NEXT: vmovapd (%rdi), %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x89,0x28,0x07]
@@ -757,7 +757,7 @@ define <2 x double> @test_128_31(i8 * %addr, <2 x i64> %mask1) {
define <2 x double> @test_128_32(i8 * %addr, <2 x i64> %mask1) {
; CHECK-LABEL: test_128_32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9]
; CHECK-NEXT: vpcmpneqq %xmm1, %xmm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x08,0x1f,0xc9,0x04]
; CHECK-NEXT: vmovupd (%rdi), %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x89,0x10,0x07]
diff --git a/test/CodeGen/X86/avx512vl-vbroadcast.ll b/test/CodeGen/X86/avx512vl-vbroadcast.ll
index 9fc957297e2..7d24b8161e5 100644
--- a/test/CodeGen/X86/avx512vl-vbroadcast.ll
+++ b/test/CodeGen/X86/avx512vl-vbroadcast.ll
@@ -4,7 +4,7 @@
declare void @func_f32(float)
define <8 x float> @_256_broadcast_ss_spill(float %x) {
; CHECK-LABEL: _256_broadcast_ss_spill:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: vaddss %xmm0, %xmm0, %xmm0
@@ -22,7 +22,7 @@ define <8 x float> @_256_broadcast_ss_spill(float %x) {
define <4 x float> @_128_broadcast_ss_spill(float %x) {
; CHECK-LABEL: _128_broadcast_ss_spill:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: vaddss %xmm0, %xmm0, %xmm0
@@ -41,7 +41,7 @@ define <4 x float> @_128_broadcast_ss_spill(float %x) {
declare void @func_f64(double)
define <4 x double> @_256_broadcast_sd_spill(double %x) {
; CHECK-LABEL: _256_broadcast_sd_spill:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: vaddsd %xmm0, %xmm0, %xmm0
@@ -59,7 +59,7 @@ define <4 x double> @_256_broadcast_sd_spill(double %x) {
define <8 x float> @_inreg8xfloat(float %a) {
; CHECK-LABEL: _inreg8xfloat:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastss %xmm0, %ymm0
; CHECK-NEXT: retq
%b = insertelement <8 x float> undef, float %a, i32 0
@@ -69,7 +69,7 @@ define <8 x float> @_inreg8xfloat(float %a) {
define <8 x float> @_ss8xfloat_mask(<8 x float> %i, float %a, <8 x i32> %mask1) {
; CHECK-LABEL: _ss8xfloat_mask:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpneqd %ymm3, %ymm2, %k1
; CHECK-NEXT: vbroadcastss %xmm1, %ymm0 {%k1}
@@ -83,7 +83,7 @@ define <8 x float> @_ss8xfloat_mask(<8 x float> %i, float %a, <8 x i32> %mask1
define <8 x float> @_ss8xfloat_maskz(float %a, <8 x i32> %mask1) {
; CHECK-LABEL: _ss8xfloat_maskz:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpneqd %ymm2, %ymm1, %k1
; CHECK-NEXT: vbroadcastss %xmm0, %ymm0 {%k1} {z}
@@ -97,7 +97,7 @@ define <8 x float> @_ss8xfloat_maskz(float %a, <8 x i32> %mask1) {
define <4 x float> @_inreg4xfloat(float %a) {
; CHECK-LABEL: _inreg4xfloat:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastss %xmm0, %xmm0
; CHECK-NEXT: retq
%b = insertelement <4 x float> undef, float %a, i32 0
@@ -107,7 +107,7 @@ define <4 x float> @_inreg4xfloat(float %a) {
define <4 x float> @_ss4xfloat_mask(<4 x float> %i, float %a, <4 x i32> %mask1) {
; CHECK-LABEL: _ss4xfloat_mask:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpneqd %xmm3, %xmm2, %k1
; CHECK-NEXT: vbroadcastss %xmm1, %xmm0 {%k1}
@@ -121,7 +121,7 @@ define <4 x float> @_ss4xfloat_mask(<4 x float> %i, float %a, <4 x i32> %mask1
define <4 x float> @_ss4xfloat_maskz(float %a, <4 x i32> %mask1) {
; CHECK-LABEL: _ss4xfloat_maskz:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpneqd %xmm2, %xmm1, %k1
; CHECK-NEXT: vbroadcastss %xmm0, %xmm0 {%k1} {z}
@@ -135,7 +135,7 @@ define <4 x float> @_ss4xfloat_maskz(float %a, <4 x i32> %mask1) {
define <4 x double> @_inreg4xdouble(double %a) {
; CHECK-LABEL: _inreg4xdouble:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0
; CHECK-NEXT: retq
%b = insertelement <4 x double> undef, double %a, i32 0
@@ -145,7 +145,7 @@ define <4 x double> @_inreg4xdouble(double %a) {
define <4 x double> @_ss4xdouble_mask(<4 x double> %i, double %a, <4 x i32> %mask1) {
; CHECK-LABEL: _ss4xdouble_mask:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpneqd %xmm3, %xmm2, %k1
; CHECK-NEXT: vbroadcastsd %xmm1, %ymm0 {%k1}
@@ -159,7 +159,7 @@ define <4 x double> @_ss4xdouble_mask(<4 x double> %i, double %a, <4 x i32> %m
define <4 x double> @_ss4xdouble_maskz(double %a, <4 x i32> %mask1) {
; CHECK-LABEL: _ss4xdouble_maskz:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpneqd %xmm2, %xmm1, %k1
; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0 {%k1} {z}
@@ -173,7 +173,7 @@ define <4 x double> @_ss4xdouble_maskz(double %a, <4 x i32> %mask1) {
define <2 x double> @test_v2f64_broadcast_fold(<2 x double> *%a0, <2 x double> %a1) {
; CHECK-LABEL: test_v2f64_broadcast_fold:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vaddpd (%rdi){1to2}, %xmm0, %xmm0
; CHECK-NEXT: retq
%1 = load <2 x double>, <2 x double> *%a0, align 16
@@ -184,7 +184,7 @@ define <2 x double> @test_v2f64_broadcast_fold(<2 x double> *%a0, <2 x double> %
define <2 x double> @test_v2f64_broadcast_fold_mask(<2 x double> *%a0, <2 x double> %a1, <2 x i64> %mask1, <2 x double> %a2) {
; CHECK-LABEL: test_v2f64_broadcast_fold_mask:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpneqq %xmm3, %xmm1, %k1
; CHECK-NEXT: vaddpd (%rdi){1to2}, %xmm0, %xmm2 {%k1}
diff --git a/test/CodeGen/X86/avx512vl-vec-cmp.ll b/test/CodeGen/X86/avx512vl-vec-cmp.ll
index e42b8d9a1bd..036ab037b3b 100644
--- a/test/CodeGen/X86/avx512vl-vec-cmp.ll
+++ b/test/CodeGen/X86/avx512vl-vec-cmp.ll
@@ -4,13 +4,13 @@
define <4 x i64> @test256_1(<4 x i64> %x, <4 x i64> %y) nounwind {
; VLX-LABEL: test256_1:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpeqq %ymm1, %ymm0, %k1
; VLX-NEXT: vpblendmq %ymm0, %ymm1, %ymm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test256_1:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm2
; NoVLX-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
; NoVLX-NEXT: retq
@@ -21,13 +21,13 @@ define <4 x i64> @test256_1(<4 x i64> %x, <4 x i64> %y) nounwind {
define <4 x i64> @test256_2(<4 x i64> %x, <4 x i64> %y, <4 x i64> %x1) nounwind {
; VLX-LABEL: test256_2:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpgtq %ymm1, %ymm0, %k1
; VLX-NEXT: vpblendmq %ymm2, %ymm1, %ymm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test256_2:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
; NoVLX-NEXT: vblendvpd %ymm0, %ymm2, %ymm1, %ymm0
; NoVLX-NEXT: retq
@@ -38,13 +38,13 @@ define <4 x i64> @test256_2(<4 x i64> %x, <4 x i64> %y, <4 x i64> %x1) nounwind
define <8 x i32> @test256_3(<8 x i32> %x, <8 x i32> %y, <8 x i32> %x1) nounwind {
; VLX-LABEL: test256_3:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpled %ymm0, %ymm1, %k1
; VLX-NEXT: vpblendmd %ymm2, %ymm1, %ymm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test256_3:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: # kill: %ymm2<def> %ymm2<kill> %zmm2<def>
; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
@@ -59,13 +59,13 @@ define <8 x i32> @test256_3(<8 x i32> %x, <8 x i32> %y, <8 x i32> %x1) nounwind
define <4 x i64> @test256_4(<4 x i64> %x, <4 x i64> %y, <4 x i64> %x1) nounwind {
; VLX-LABEL: test256_4:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpnleuq %ymm1, %ymm0, %k1
; VLX-NEXT: vpblendmq %ymm2, %ymm1, %ymm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test256_4:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: vpbroadcastq {{.*#+}} ymm3 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
; NoVLX-NEXT: vpxor %ymm3, %ymm1, %ymm4
; NoVLX-NEXT: vpxor %ymm3, %ymm0, %ymm0
@@ -79,13 +79,13 @@ define <4 x i64> @test256_4(<4 x i64> %x, <4 x i64> %y, <4 x i64> %x1) nounwind
define <8 x i32> @test256_5(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwind {
; VLX-LABEL: test256_5:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpeqd (%rdi), %ymm0, %k1
; VLX-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test256_5:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
@@ -101,13 +101,13 @@ define <8 x i32> @test256_5(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwin
define <8 x i32> @test256_5b(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwind {
; VLX-LABEL: test256_5b:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpeqd (%rdi), %ymm0, %k1
; VLX-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test256_5b:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
@@ -123,13 +123,13 @@ define <8 x i32> @test256_5b(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwi
define <8 x i32> @test256_6(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) nounwind {
; VLX-LABEL: test256_6:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpgtd (%rdi), %ymm0, %k1
; VLX-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test256_6:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
@@ -145,13 +145,13 @@ define <8 x i32> @test256_6(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) noun
define <8 x i32> @test256_6b(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) nounwind {
; VLX-LABEL: test256_6b:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpgtd (%rdi), %ymm0, %k1
; VLX-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test256_6b:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
@@ -167,13 +167,13 @@ define <8 x i32> @test256_6b(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) nou
define <8 x i32> @test256_7(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) nounwind {
; VLX-LABEL: test256_7:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpled (%rdi), %ymm0, %k1
; VLX-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test256_7:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
@@ -189,13 +189,13 @@ define <8 x i32> @test256_7(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) noun
define <8 x i32> @test256_7b(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) nounwind {
; VLX-LABEL: test256_7b:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpled (%rdi), %ymm0, %k1
; VLX-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test256_7b:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
@@ -211,13 +211,13 @@ define <8 x i32> @test256_7b(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) nou
define <8 x i32> @test256_8(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) nounwind {
; VLX-LABEL: test256_8:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpleud (%rdi), %ymm0, %k1
; VLX-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test256_8:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
@@ -233,13 +233,13 @@ define <8 x i32> @test256_8(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) noun
define <8 x i32> @test256_8b(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) nounwind {
; VLX-LABEL: test256_8b:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpleud (%rdi), %ymm0, %k1
; VLX-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test256_8b:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
@@ -255,14 +255,14 @@ define <8 x i32> @test256_8b(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) nou
define <8 x i32> @test256_9(<8 x i32> %x, <8 x i32> %y, <8 x i32> %x1, <8 x i32> %y1) nounwind {
; VLX-LABEL: test256_9:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpeqd %ymm1, %ymm0, %k1
; VLX-NEXT: vpcmpeqd %ymm3, %ymm2, %k1 {%k1}
; VLX-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test256_9:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: # kill: %ymm3<def> %ymm3<kill> %zmm3<def>
; NoVLX-NEXT: # kill: %ymm2<def> %ymm2<kill> %zmm2<def>
; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
@@ -281,14 +281,14 @@ define <8 x i32> @test256_9(<8 x i32> %x, <8 x i32> %y, <8 x i32> %x1, <8 x i32>
define <4 x i64> @test256_10(<4 x i64> %x, <4 x i64> %y, <4 x i64> %x1, <4 x i64> %y1) nounwind {
; VLX-LABEL: test256_10:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpleq %ymm1, %ymm0, %k1
; VLX-NEXT: vpcmpleq %ymm2, %ymm3, %k1 {%k1}
; VLX-NEXT: vpblendmq %ymm0, %ymm2, %ymm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test256_10:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: vpcmpgtq %ymm2, %ymm3, %ymm3
; NoVLX-NEXT: vpcmpeqd %ymm4, %ymm4, %ymm4
; NoVLX-NEXT: vpxor %ymm4, %ymm3, %ymm3
@@ -305,14 +305,14 @@ define <4 x i64> @test256_10(<4 x i64> %x, <4 x i64> %y, <4 x i64> %x1, <4 x i64
define <4 x i64> @test256_11(<4 x i64> %x, <4 x i64>* %y.ptr, <4 x i64> %x1, <4 x i64> %y1) nounwind {
; VLX-LABEL: test256_11:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpgtq %ymm2, %ymm1, %k1
; VLX-NEXT: vpcmpgtq (%rdi), %ymm0, %k1 {%k1}
; VLX-NEXT: vpblendmq %ymm0, %ymm1, %ymm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test256_11:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: vpcmpgtq (%rdi), %ymm0, %ymm3
; NoVLX-NEXT: vpcmpgtq %ymm2, %ymm1, %ymm2
; NoVLX-NEXT: vpand %ymm2, %ymm3, %ymm2
@@ -328,14 +328,14 @@ define <4 x i64> @test256_11(<4 x i64> %x, <4 x i64>* %y.ptr, <4 x i64> %x1, <4
define <8 x i32> @test256_12(<8 x i32> %x, <8 x i32>* %y.ptr, <8 x i32> %x1, <8 x i32> %y1) nounwind {
; VLX-LABEL: test256_12:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpled %ymm1, %ymm2, %k1
; VLX-NEXT: vpcmpleud (%rdi), %ymm0, %k1 {%k1}
; VLX-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test256_12:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: # kill: %ymm2<def> %ymm2<kill> %zmm2<def>
; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
@@ -355,13 +355,13 @@ define <8 x i32> @test256_12(<8 x i32> %x, <8 x i32>* %y.ptr, <8 x i32> %x1, <8
define <4 x i64> @test256_13(<4 x i64> %x, <4 x i64> %x1, i64* %yb.ptr) nounwind {
; VLX-LABEL: test256_13:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpeqq (%rdi){1to4}, %ymm0, %k1
; VLX-NEXT: vpblendmq %ymm0, %ymm1, %ymm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test256_13:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm2
; NoVLX-NEXT: vpcmpeqq %ymm2, %ymm0, %ymm2
; NoVLX-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
@@ -376,13 +376,13 @@ define <4 x i64> @test256_13(<4 x i64> %x, <4 x i64> %x1, i64* %yb.ptr) nounwind
define <8 x i32> @test256_14(<8 x i32> %x, i32* %yb.ptr, <8 x i32> %x1) nounwind {
; VLX-LABEL: test256_14:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpled (%rdi){1to8}, %ymm0, %k1
; VLX-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test256_14:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm2
@@ -400,14 +400,14 @@ define <8 x i32> @test256_14(<8 x i32> %x, i32* %yb.ptr, <8 x i32> %x1) nounwind
define <8 x i32> @test256_15(<8 x i32> %x, i32* %yb.ptr, <8 x i32> %x1, <8 x i32> %y1) nounwind {
; VLX-LABEL: test256_15:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpled %ymm1, %ymm2, %k1
; VLX-NEXT: vpcmpgtd (%rdi){1to8}, %ymm0, %k1 {%k1}
; VLX-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test256_15:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: # kill: %ymm2<def> %ymm2<kill> %zmm2<def>
; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
@@ -429,14 +429,14 @@ define <8 x i32> @test256_15(<8 x i32> %x, i32* %yb.ptr, <8 x i32> %x1, <8 x i32
define <4 x i64> @test256_16(<4 x i64> %x, i64* %yb.ptr, <4 x i64> %x1, <4 x i64> %y1) nounwind {
; VLX-LABEL: test256_16:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpleq %ymm1, %ymm2, %k1
; VLX-NEXT: vpcmpgtq (%rdi){1to4}, %ymm0, %k1 {%k1}
; VLX-NEXT: vpblendmq %ymm0, %ymm1, %ymm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test256_16:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: vpcmpgtq %ymm1, %ymm2, %ymm2
; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm3
; NoVLX-NEXT: vpcmpgtq %ymm3, %ymm0, %ymm3
@@ -455,13 +455,13 @@ define <4 x i64> @test256_16(<4 x i64> %x, i64* %yb.ptr, <4 x i64> %x1, <4 x i64
define <8 x i32> @test256_17(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwind {
; VLX-LABEL: test256_17:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpneqd (%rdi), %ymm0, %k1
; VLX-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test256_17:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
@@ -477,13 +477,13 @@ define <8 x i32> @test256_17(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwi
define <8 x i32> @test256_18(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwind {
; VLX-LABEL: test256_18:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpneqd (%rdi), %ymm0, %k1
; VLX-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test256_18:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
@@ -499,13 +499,13 @@ define <8 x i32> @test256_18(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwi
define <8 x i32> @test256_19(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwind {
; VLX-LABEL: test256_19:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpnltud (%rdi), %ymm0, %k1
; VLX-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test256_19:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
@@ -521,13 +521,13 @@ define <8 x i32> @test256_19(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwi
define <8 x i32> @test256_20(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwind {
; VLX-LABEL: test256_20:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpleud (%rdi), %ymm0, %k1
; VLX-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test256_20:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
@@ -543,13 +543,13 @@ define <8 x i32> @test256_20(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwi
define <2 x i64> @test128_1(<2 x i64> %x, <2 x i64> %y) nounwind {
; VLX-LABEL: test128_1:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpeqq %xmm1, %xmm0, %k1
; VLX-NEXT: vpblendmq %xmm0, %xmm1, %xmm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test128_1:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm2
; NoVLX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; NoVLX-NEXT: retq
@@ -560,13 +560,13 @@ define <2 x i64> @test128_1(<2 x i64> %x, <2 x i64> %y) nounwind {
define <2 x i64> @test128_2(<2 x i64> %x, <2 x i64> %y, <2 x i64> %x1) nounwind {
; VLX-LABEL: test128_2:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpgtq %xmm1, %xmm0, %k1
; VLX-NEXT: vpblendmq %xmm2, %xmm1, %xmm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test128_2:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vblendvpd %xmm0, %xmm2, %xmm1, %xmm0
; NoVLX-NEXT: retq
@@ -577,13 +577,13 @@ define <2 x i64> @test128_2(<2 x i64> %x, <2 x i64> %y, <2 x i64> %x1) nounwind
define <4 x i32> @test128_3(<4 x i32> %x, <4 x i32> %y, <4 x i32> %x1) nounwind {
; VLX-LABEL: test128_3:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpled %xmm0, %xmm1, %k1
; VLX-NEXT: vpblendmd %xmm2, %xmm1, %xmm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test128_3:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
; NoVLX-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
; NoVLX-NEXT: vpxor %xmm3, %xmm0, %xmm0
@@ -596,13 +596,13 @@ define <4 x i32> @test128_3(<4 x i32> %x, <4 x i32> %y, <4 x i32> %x1) nounwind
define <2 x i64> @test128_4(<2 x i64> %x, <2 x i64> %y, <2 x i64> %x1) nounwind {
; VLX-LABEL: test128_4:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpnleuq %xmm1, %xmm0, %k1
; VLX-NEXT: vpblendmq %xmm2, %xmm1, %xmm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test128_4:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
; NoVLX-NEXT: vpxor %xmm3, %xmm1, %xmm4
; NoVLX-NEXT: vpxor %xmm3, %xmm0, %xmm0
@@ -616,13 +616,13 @@ define <2 x i64> @test128_4(<2 x i64> %x, <2 x i64> %y, <2 x i64> %x1) nounwind
define <4 x i32> @test128_5(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %yp) nounwind {
; VLX-LABEL: test128_5:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpeqd (%rdi), %xmm0, %k1
; VLX-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test128_5:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: vpcmpeqd (%rdi), %xmm0, %xmm2
; NoVLX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; NoVLX-NEXT: retq
@@ -634,13 +634,13 @@ define <4 x i32> @test128_5(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %yp) nounwin
define <4 x i32> @test128_5b(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %yp) nounwind {
; VLX-LABEL: test128_5b:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpeqd (%rdi), %xmm0, %k1
; VLX-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test128_5b:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: vpcmpeqd (%rdi), %xmm0, %xmm2
; NoVLX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; NoVLX-NEXT: retq
@@ -652,13 +652,13 @@ define <4 x i32> @test128_5b(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %yp) nounwi
define <4 x i32> @test128_6(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nounwind {
; VLX-LABEL: test128_6:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpgtd (%rdi), %xmm0, %k1
; VLX-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test128_6:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: vpcmpgtd (%rdi), %xmm0, %xmm2
; NoVLX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; NoVLX-NEXT: retq
@@ -670,13 +670,13 @@ define <4 x i32> @test128_6(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) noun
define <4 x i32> @test128_6b(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nounwind {
; VLX-LABEL: test128_6b:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpgtd (%rdi), %xmm0, %k1
; VLX-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test128_6b:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: vpcmpgtd (%rdi), %xmm0, %xmm2
; NoVLX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; NoVLX-NEXT: retq
@@ -688,13 +688,13 @@ define <4 x i32> @test128_6b(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nou
define <4 x i32> @test128_7(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nounwind {
; VLX-LABEL: test128_7:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpled (%rdi), %xmm0, %k1
; VLX-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test128_7:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: vpcmpgtd (%rdi), %xmm0, %xmm2
; NoVLX-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
; NoVLX-NEXT: vpxor %xmm3, %xmm2, %xmm2
@@ -708,13 +708,13 @@ define <4 x i32> @test128_7(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) noun
define <4 x i32> @test128_7b(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nounwind {
; VLX-LABEL: test128_7b:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpled (%rdi), %xmm0, %k1
; VLX-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test128_7b:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: vpcmpgtd (%rdi), %xmm0, %xmm2
; NoVLX-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
; NoVLX-NEXT: vpxor %xmm3, %xmm2, %xmm2
@@ -728,13 +728,13 @@ define <4 x i32> @test128_7b(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nou
define <4 x i32> @test128_8(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nounwind {
; VLX-LABEL: test128_8:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpleud (%rdi), %xmm0, %k1
; VLX-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test128_8:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: vpminud (%rdi), %xmm0, %xmm2
; NoVLX-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm2
; NoVLX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
@@ -747,13 +747,13 @@ define <4 x i32> @test128_8(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) noun
define <4 x i32> @test128_8b(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nounwind {
; VLX-LABEL: test128_8b:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpleud (%rdi), %xmm0, %k1
; VLX-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test128_8b:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: vmovdqu (%rdi), %xmm2
; NoVLX-NEXT: vpmaxud %xmm0, %xmm2, %xmm3
; NoVLX-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
@@ -767,14 +767,14 @@ define <4 x i32> @test128_8b(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nou
define <4 x i32> @test128_9(<4 x i32> %x, <4 x i32> %y, <4 x i32> %x1, <4 x i32> %y1) nounwind {
; VLX-LABEL: test128_9:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpeqd %xmm1, %xmm0, %k1
; VLX-NEXT: vpcmpeqd %xmm3, %xmm2, %k1 {%k1}
; VLX-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test128_9:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm3
; NoVLX-NEXT: vpand %xmm2, %xmm3, %xmm2
@@ -789,14 +789,14 @@ define <4 x i32> @test128_9(<4 x i32> %x, <4 x i32> %y, <4 x i32> %x1, <4 x i32>
define <2 x i64> @test128_10(<2 x i64> %x, <2 x i64> %y, <2 x i64> %x1, <2 x i64> %y1) nounwind {
; VLX-LABEL: test128_10:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpleq %xmm1, %xmm0, %k1
; VLX-NEXT: vpcmpleq %xmm2, %xmm3, %k1 {%k1}
; VLX-NEXT: vpblendmq %xmm0, %xmm2, %xmm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test128_10:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm3
; NoVLX-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
; NoVLX-NEXT: vpxor %xmm4, %xmm3, %xmm3
@@ -813,14 +813,14 @@ define <2 x i64> @test128_10(<2 x i64> %x, <2 x i64> %y, <2 x i64> %x1, <2 x i64
define <2 x i64> @test128_11(<2 x i64> %x, <2 x i64>* %y.ptr, <2 x i64> %x1, <2 x i64> %y1) nounwind {
; VLX-LABEL: test128_11:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpgtq %xmm2, %xmm1, %k1
; VLX-NEXT: vpcmpgtq (%rdi), %xmm0, %k1 {%k1}
; VLX-NEXT: vpblendmq %xmm0, %xmm1, %xmm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test128_11:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: vpcmpgtq (%rdi), %xmm0, %xmm3
; NoVLX-NEXT: vpcmpgtq %xmm2, %xmm1, %xmm2
; NoVLX-NEXT: vpand %xmm2, %xmm3, %xmm2
@@ -836,14 +836,14 @@ define <2 x i64> @test128_11(<2 x i64> %x, <2 x i64>* %y.ptr, <2 x i64> %x1, <2
define <4 x i32> @test128_12(<4 x i32> %x, <4 x i32>* %y.ptr, <4 x i32> %x1, <4 x i32> %y1) nounwind {
; VLX-LABEL: test128_12:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpled %xmm1, %xmm2, %k1
; VLX-NEXT: vpcmpleud (%rdi), %xmm0, %k1 {%k1}
; VLX-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test128_12:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: vpcmpgtd %xmm1, %xmm2, %xmm2
; NoVLX-NEXT: vpminud (%rdi), %xmm0, %xmm3
; NoVLX-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm3
@@ -860,13 +860,13 @@ define <4 x i32> @test128_12(<4 x i32> %x, <4 x i32>* %y.ptr, <4 x i32> %x1, <4
define <2 x i64> @test128_13(<2 x i64> %x, <2 x i64> %x1, i64* %yb.ptr) nounwind {
; VLX-LABEL: test128_13:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpeqq (%rdi){1to2}, %xmm0, %k1
; VLX-NEXT: vpblendmq %xmm0, %xmm1, %xmm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test128_13:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm2
; NoVLX-NEXT: vpcmpeqq %xmm2, %xmm0, %xmm2
; NoVLX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
@@ -881,13 +881,13 @@ define <2 x i64> @test128_13(<2 x i64> %x, <2 x i64> %x1, i64* %yb.ptr) nounwind
define <4 x i32> @test128_14(<4 x i32> %x, i32* %yb.ptr, <4 x i32> %x1) nounwind {
; VLX-LABEL: test128_14:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpled (%rdi){1to4}, %xmm0, %k1
; VLX-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test128_14:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm2
; NoVLX-NEXT: vpcmpgtd %xmm2, %xmm0, %xmm2
; NoVLX-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
@@ -904,14 +904,14 @@ define <4 x i32> @test128_14(<4 x i32> %x, i32* %yb.ptr, <4 x i32> %x1) nounwind
define <4 x i32> @test128_15(<4 x i32> %x, i32* %yb.ptr, <4 x i32> %x1, <4 x i32> %y1) nounwind {
; VLX-LABEL: test128_15:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpled %xmm1, %xmm2, %k1
; VLX-NEXT: vpcmpgtd (%rdi){1to4}, %xmm0, %k1 {%k1}
; VLX-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test128_15:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: vpcmpgtd %xmm1, %xmm2, %xmm2
; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm3
; NoVLX-NEXT: vpcmpgtd %xmm3, %xmm0, %xmm3
@@ -930,14 +930,14 @@ define <4 x i32> @test128_15(<4 x i32> %x, i32* %yb.ptr, <4 x i32> %x1, <4 x i32
define <2 x i64> @test128_16(<2 x i64> %x, i64* %yb.ptr, <2 x i64> %x1, <2 x i64> %y1) nounwind {
; VLX-LABEL: test128_16:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpleq %xmm1, %xmm2, %k1
; VLX-NEXT: vpcmpgtq (%rdi){1to2}, %xmm0, %k1 {%k1}
; VLX-NEXT: vpblendmq %xmm0, %xmm1, %xmm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test128_16:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: vpcmpgtq %xmm1, %xmm2, %xmm2
; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm3
; NoVLX-NEXT: vpcmpgtq %xmm3, %xmm0, %xmm3
@@ -956,13 +956,13 @@ define <2 x i64> @test128_16(<2 x i64> %x, i64* %yb.ptr, <2 x i64> %x1, <2 x i64
define <4 x i32> @test128_17(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nounwind {
; VLX-LABEL: test128_17:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpneqd (%rdi), %xmm0, %k1
; VLX-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test128_17:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: vpcmpeqd (%rdi), %xmm0, %xmm2
; NoVLX-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
; NoVLX-NEXT: vpxor %xmm3, %xmm2, %xmm2
@@ -976,13 +976,13 @@ define <4 x i32> @test128_17(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nou
define <4 x i32> @test128_18(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nounwind {
; VLX-LABEL: test128_18:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpneqd (%rdi), %xmm0, %k1
; VLX-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test128_18:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: vpcmpeqd (%rdi), %xmm0, %xmm2
; NoVLX-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
; NoVLX-NEXT: vpxor %xmm3, %xmm2, %xmm2
@@ -996,13 +996,13 @@ define <4 x i32> @test128_18(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nou
define <4 x i32> @test128_19(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nounwind {
; VLX-LABEL: test128_19:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpnltud (%rdi), %xmm0, %k1
; VLX-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test128_19:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: vpmaxud (%rdi), %xmm0, %xmm2
; NoVLX-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm2
; NoVLX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
@@ -1015,13 +1015,13 @@ define <4 x i32> @test128_19(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nou
define <4 x i32> @test128_20(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nounwind {
; VLX-LABEL: test128_20:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpcmpleud (%rdi), %xmm0, %k1
; VLX-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
; VLX-NEXT: retq
;
; NoVLX-LABEL: test128_20:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: vmovdqu (%rdi), %xmm2
; NoVLX-NEXT: vpmaxud %xmm0, %xmm2, %xmm3
; NoVLX-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
diff --git a/test/CodeGen/X86/avx512vl-vec-masked-cmp.ll b/test/CodeGen/X86/avx512vl-vec-masked-cmp.ll
index e4b626f52a8..53e8a5c0644 100644
--- a/test/CodeGen/X86/avx512vl-vec-masked-cmp.ll
+++ b/test/CodeGen/X86/avx512vl-vec-masked-cmp.ll
@@ -4,13 +4,13 @@
define zeroext i32 @test_vpcmpeqb_v16i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqb_v16i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqb %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqb_v16i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -122,13 +122,13 @@ entry:
define zeroext i32 @test_vpcmpeqb_v16i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqb_v16i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqb (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqb_v16i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -241,14 +241,14 @@ entry:
define zeroext i32 @test_masked_vpcmpeqb_v16i1_v32i1_mask(i16 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqb_v16i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqb %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqb_v16i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -363,14 +363,14 @@ entry:
define zeroext i32 @test_masked_vpcmpeqb_v16i1_v32i1_mask_mem(i16 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqb_v16i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqb (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqb_v16i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -487,13 +487,13 @@ entry:
define zeroext i64 @test_vpcmpeqb_v16i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqb_v16i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqb %xmm1, %xmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqb_v16i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -610,13 +610,13 @@ entry:
define zeroext i64 @test_vpcmpeqb_v16i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqb_v16i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqb (%rdi), %xmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqb_v16i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -734,14 +734,14 @@ entry:
define zeroext i64 @test_masked_vpcmpeqb_v16i1_v64i1_mask(i16 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqb_v16i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqb %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqb_v16i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -861,14 +861,14 @@ entry:
define zeroext i64 @test_masked_vpcmpeqb_v16i1_v64i1_mask_mem(i16 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqb_v16i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqb (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqb_v16i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -990,14 +990,14 @@ entry:
define zeroext i64 @test_vpcmpeqb_v32i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqb_v32i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqb %ymm1, %ymm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqb_v32i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -1037,14 +1037,14 @@ entry:
define zeroext i64 @test_vpcmpeqb_v32i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqb_v32i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqb (%rdi), %ymm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqb_v32i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -1085,7 +1085,7 @@ entry:
define zeroext i64 @test_masked_vpcmpeqb_v32i1_v64i1_mask(i32 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqb_v32i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqb %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -1093,7 +1093,7 @@ define zeroext i64 @test_masked_vpcmpeqb_v32i1_v64i1_mask(i32 zeroext %__u, <4 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqb_v32i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -1144,7 +1144,7 @@ entry:
define zeroext i64 @test_masked_vpcmpeqb_v32i1_v64i1_mask_mem(i32 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqb_v32i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqb (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -1152,7 +1152,7 @@ define zeroext i64 @test_masked_vpcmpeqb_v32i1_v64i1_mask_mem(i32 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqb_v32i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -1205,14 +1205,14 @@ entry:
define zeroext i16 @test_vpcmpeqw_v8i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqw_v8i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqw %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqw_v8i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
@@ -1232,14 +1232,14 @@ entry:
define zeroext i16 @test_vpcmpeqw_v8i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqw_v8i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqw (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqw_v8i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpeqw (%rdi), %xmm0, %xmm0
; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
@@ -1260,7 +1260,7 @@ entry:
define zeroext i16 @test_masked_vpcmpeqw_v8i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqw_v8i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqw %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -1268,7 +1268,7 @@ define zeroext i16 @test_masked_vpcmpeqw_v8i1_v16i1_mask(i8 zeroext %__u, <2 x i
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqw_v8i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
@@ -1291,7 +1291,7 @@ entry:
define zeroext i16 @test_masked_vpcmpeqw_v8i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqw_v8i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqw (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -1299,7 +1299,7 @@ define zeroext i16 @test_masked_vpcmpeqw_v8i1_v16i1_mask_mem(i8 zeroext %__u, <2
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqw_v8i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpeqw (%rsi), %xmm0, %xmm0
; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
@@ -1324,13 +1324,13 @@ entry:
define zeroext i32 @test_vpcmpeqw_v8i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqw_v8i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqw %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqw_v8i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -1397,13 +1397,13 @@ entry:
define zeroext i32 @test_vpcmpeqw_v8i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqw_v8i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqw (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqw_v8i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -1471,14 +1471,14 @@ entry:
define zeroext i32 @test_masked_vpcmpeqw_v8i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqw_v8i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqw %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqw_v8i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -1548,14 +1548,14 @@ entry:
define zeroext i32 @test_masked_vpcmpeqw_v8i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqw_v8i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqw (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqw_v8i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -1627,13 +1627,13 @@ entry:
define zeroext i64 @test_vpcmpeqw_v8i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqw_v8i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqw %xmm1, %xmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqw_v8i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -1705,13 +1705,13 @@ entry:
define zeroext i64 @test_vpcmpeqw_v8i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqw_v8i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqw (%rdi), %xmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqw_v8i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -1784,14 +1784,14 @@ entry:
define zeroext i64 @test_masked_vpcmpeqw_v8i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqw_v8i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqw %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqw_v8i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -1866,14 +1866,14 @@ entry:
define zeroext i64 @test_masked_vpcmpeqw_v8i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqw_v8i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqw (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqw_v8i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -1950,14 +1950,14 @@ entry:
define zeroext i32 @test_vpcmpeqw_v16i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqw_v16i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqw %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqw_v16i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -2069,14 +2069,14 @@ entry:
define zeroext i32 @test_vpcmpeqw_v16i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqw_v16i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqw (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqw_v16i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -2189,7 +2189,7 @@ entry:
define zeroext i32 @test_masked_vpcmpeqw_v16i1_v32i1_mask(i16 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqw_v16i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqw %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -2197,7 +2197,7 @@ define zeroext i32 @test_masked_vpcmpeqw_v16i1_v32i1_mask(i16 zeroext %__u, <4 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqw_v16i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -2312,7 +2312,7 @@ entry:
define zeroext i32 @test_masked_vpcmpeqw_v16i1_v32i1_mask_mem(i16 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqw_v16i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqw (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -2320,7 +2320,7 @@ define zeroext i32 @test_masked_vpcmpeqw_v16i1_v32i1_mask_mem(i16 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqw_v16i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -2437,14 +2437,14 @@ entry:
define zeroext i64 @test_vpcmpeqw_v16i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqw_v16i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqw %ymm1, %ymm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqw_v16i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -2561,14 +2561,14 @@ entry:
define zeroext i64 @test_vpcmpeqw_v16i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqw_v16i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqw (%rdi), %ymm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqw_v16i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -2686,7 +2686,7 @@ entry:
define zeroext i64 @test_masked_vpcmpeqw_v16i1_v64i1_mask(i16 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqw_v16i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqw %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -2694,7 +2694,7 @@ define zeroext i64 @test_masked_vpcmpeqw_v16i1_v64i1_mask(i16 zeroext %__u, <4 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqw_v16i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -2814,7 +2814,7 @@ entry:
define zeroext i64 @test_masked_vpcmpeqw_v16i1_v64i1_mask_mem(i16 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqw_v16i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqw (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -2822,7 +2822,7 @@ define zeroext i64 @test_masked_vpcmpeqw_v16i1_v64i1_mask_mem(i16 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqw_v16i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -2944,14 +2944,14 @@ entry:
define zeroext i64 @test_vpcmpeqw_v32i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqw_v32i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqw %zmm1, %zmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqw_v32i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -3293,14 +3293,14 @@ entry:
define zeroext i64 @test_vpcmpeqw_v32i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqw_v32i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqw (%rdi), %zmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqw_v32i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -3558,7 +3558,7 @@ entry:
define zeroext i64 @test_masked_vpcmpeqw_v32i1_v64i1_mask(i32 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqw_v32i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqw %zmm1, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -3566,7 +3566,7 @@ define zeroext i64 @test_masked_vpcmpeqw_v32i1_v64i1_mask(i32 zeroext %__u, <8 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqw_v32i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -3919,7 +3919,7 @@ entry:
define zeroext i64 @test_masked_vpcmpeqw_v32i1_v64i1_mask_mem(i32 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqw_v32i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqw (%rsi), %zmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -3927,7 +3927,7 @@ define zeroext i64 @test_masked_vpcmpeqw_v32i1_v64i1_mask_mem(i32 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqw_v32i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -4197,14 +4197,14 @@ entry:
define zeroext i8 @test_vpcmpeqd_v4i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqd_v4i1_v8i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v4i1_v8i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -4253,14 +4253,14 @@ entry:
define zeroext i8 @test_vpcmpeqd_v4i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqd_v4i1_v8i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v4i1_v8i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpeqd (%rdi), %xmm0, %xmm0
; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -4310,7 +4310,7 @@ entry:
define zeroext i8 @test_masked_vpcmpeqd_v4i1_v8i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqd_v4i1_v8i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -4318,7 +4318,7 @@ define zeroext i8 @test_masked_vpcmpeqd_v4i1_v8i1_mask(i8 zeroext %__u, <2 x i64
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v8i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k1
@@ -4388,7 +4388,7 @@ entry:
define zeroext i8 @test_masked_vpcmpeqd_v4i1_v8i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqd_v4i1_v8i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -4396,7 +4396,7 @@ define zeroext i8 @test_masked_vpcmpeqd_v4i1_v8i1_mask_mem(i8 zeroext %__u, <2 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v8i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpeqd (%rsi), %xmm0, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k1
@@ -4468,14 +4468,14 @@ entry:
define zeroext i8 @test_vpcmpeqd_v4i1_v8i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqd_v4i1_v8i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v4i1_v8i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1
; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
@@ -4527,7 +4527,7 @@ entry:
define zeroext i8 @test_masked_vpcmpeqd_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqd_v4i1_v8i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -4535,7 +4535,7 @@ define zeroext i8 @test_masked_vpcmpeqd_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <2
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v8i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1
; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
@@ -4609,14 +4609,14 @@ entry:
define zeroext i16 @test_vpcmpeqd_v4i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqd_v4i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v4i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -4664,14 +4664,14 @@ entry:
define zeroext i16 @test_vpcmpeqd_v4i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqd_v4i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v4i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpeqd (%rdi), %xmm0, %xmm0
; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -4720,7 +4720,7 @@ entry:
define zeroext i16 @test_masked_vpcmpeqd_v4i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqd_v4i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -4728,7 +4728,7 @@ define zeroext i16 @test_masked_vpcmpeqd_v4i1_v16i1_mask(i8 zeroext %__u, <2 x i
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k1
@@ -4797,7 +4797,7 @@ entry:
define zeroext i16 @test_masked_vpcmpeqd_v4i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqd_v4i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -4805,7 +4805,7 @@ define zeroext i16 @test_masked_vpcmpeqd_v4i1_v16i1_mask_mem(i8 zeroext %__u, <2
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpeqd (%rsi), %xmm0, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k1
@@ -4876,14 +4876,14 @@ entry:
define zeroext i16 @test_vpcmpeqd_v4i1_v16i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqd_v4i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v4i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1
; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
@@ -4934,7 +4934,7 @@ entry:
define zeroext i16 @test_masked_vpcmpeqd_v4i1_v16i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqd_v4i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -4942,7 +4942,7 @@ define zeroext i16 @test_masked_vpcmpeqd_v4i1_v16i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1
; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
@@ -5015,13 +5015,13 @@ entry:
define zeroext i32 @test_vpcmpeqd_v4i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqd_v4i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v4i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -5056,13 +5056,13 @@ entry:
define zeroext i32 @test_vpcmpeqd_v4i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqd_v4i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v4i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -5098,14 +5098,14 @@ entry:
define zeroext i32 @test_masked_vpcmpeqd_v4i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqd_v4i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -5161,14 +5161,14 @@ entry:
define zeroext i32 @test_masked_vpcmpeqd_v4i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqd_v4i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -5226,13 +5226,13 @@ entry:
define zeroext i32 @test_vpcmpeqd_v4i1_v32i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqd_v4i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v4i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -5270,14 +5270,14 @@ entry:
define zeroext i32 @test_masked_vpcmpeqd_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqd_v4i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -5337,13 +5337,13 @@ entry:
define zeroext i64 @test_vpcmpeqd_v4i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqd_v4i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd %xmm1, %xmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v4i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -5384,13 +5384,13 @@ entry:
define zeroext i64 @test_vpcmpeqd_v4i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqd_v4i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd (%rdi), %xmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v4i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -5432,14 +5432,14 @@ entry:
define zeroext i64 @test_masked_vpcmpeqd_v4i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqd_v4i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -5501,14 +5501,14 @@ entry:
define zeroext i64 @test_masked_vpcmpeqd_v4i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqd_v4i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -5572,13 +5572,13 @@ entry:
define zeroext i64 @test_vpcmpeqd_v4i1_v64i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqd_v4i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v4i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -5622,14 +5622,14 @@ entry:
define zeroext i64 @test_masked_vpcmpeqd_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqd_v4i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -5695,7 +5695,7 @@ entry:
define zeroext i16 @test_vpcmpeqd_v8i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqd_v8i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -5703,7 +5703,7 @@ define zeroext i16 @test_vpcmpeqd_v8i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v8i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
@@ -5724,7 +5724,7 @@ entry:
define zeroext i16 @test_vpcmpeqd_v8i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqd_v8i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -5732,7 +5732,7 @@ define zeroext i16 @test_vpcmpeqd_v8i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>*
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v8i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
@@ -5754,7 +5754,7 @@ entry:
define zeroext i16 @test_masked_vpcmpeqd_v8i1_v16i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqd_v8i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -5763,7 +5763,7 @@ define zeroext i16 @test_masked_vpcmpeqd_v8i1_v16i1_mask(i8 zeroext %__u, <4 x i
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: kmovw %edi, %k1
@@ -5787,7 +5787,7 @@ entry:
define zeroext i16 @test_masked_vpcmpeqd_v8i1_v16i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqd_v8i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -5796,7 +5796,7 @@ define zeroext i16 @test_masked_vpcmpeqd_v8i1_v16i1_mask_mem(i8 zeroext %__u, <4
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
@@ -5822,7 +5822,7 @@ entry:
define zeroext i16 @test_vpcmpeqd_v8i1_v16i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqd_v8i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd (%rdi){1to8}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -5830,7 +5830,7 @@ define zeroext i16 @test_vpcmpeqd_v8i1_v16i1_mask_mem_b(<4 x i64> %__a, i32* %__
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v8i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
@@ -5853,7 +5853,7 @@ entry:
define zeroext i16 @test_masked_vpcmpeqd_v8i1_v16i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqd_v8i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd (%rsi){1to8}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -5862,7 +5862,7 @@ define zeroext i16 @test_masked_vpcmpeqd_v8i1_v16i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
@@ -5889,14 +5889,14 @@ entry:
define zeroext i32 @test_vpcmpeqd_v8i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqd_v8i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v8i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -5962,14 +5962,14 @@ entry:
define zeroext i32 @test_vpcmpeqd_v8i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqd_v8i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v8i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -6036,7 +6036,7 @@ entry:
define zeroext i32 @test_masked_vpcmpeqd_v8i1_v32i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqd_v8i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -6044,7 +6044,7 @@ define zeroext i32 @test_masked_vpcmpeqd_v8i1_v32i1_mask(i8 zeroext %__u, <4 x i
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -6113,7 +6113,7 @@ entry:
define zeroext i32 @test_masked_vpcmpeqd_v8i1_v32i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqd_v8i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -6121,7 +6121,7 @@ define zeroext i32 @test_masked_vpcmpeqd_v8i1_v32i1_mask_mem(i8 zeroext %__u, <4
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -6192,14 +6192,14 @@ entry:
define zeroext i32 @test_vpcmpeqd_v8i1_v32i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqd_v8i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd (%rdi){1to8}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v8i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -6267,7 +6267,7 @@ entry:
define zeroext i32 @test_masked_vpcmpeqd_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqd_v8i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd (%rsi){1to8}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -6275,7 +6275,7 @@ define zeroext i32 @test_masked_vpcmpeqd_v8i1_v32i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -6347,14 +6347,14 @@ entry:
define zeroext i64 @test_vpcmpeqd_v8i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqd_v8i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd %ymm1, %ymm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v8i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -6425,14 +6425,14 @@ entry:
define zeroext i64 @test_vpcmpeqd_v8i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqd_v8i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd (%rdi), %ymm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v8i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -6504,7 +6504,7 @@ entry:
define zeroext i64 @test_masked_vpcmpeqd_v8i1_v64i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqd_v8i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -6512,7 +6512,7 @@ define zeroext i64 @test_masked_vpcmpeqd_v8i1_v64i1_mask(i8 zeroext %__u, <4 x i
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -6586,7 +6586,7 @@ entry:
define zeroext i64 @test_masked_vpcmpeqd_v8i1_v64i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqd_v8i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -6594,7 +6594,7 @@ define zeroext i64 @test_masked_vpcmpeqd_v8i1_v64i1_mask_mem(i8 zeroext %__u, <4
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -6670,14 +6670,14 @@ entry:
define zeroext i64 @test_vpcmpeqd_v8i1_v64i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqd_v8i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd (%rdi){1to8}, %ymm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v8i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -6750,7 +6750,7 @@ entry:
define zeroext i64 @test_masked_vpcmpeqd_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqd_v8i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd (%rsi){1to8}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -6758,7 +6758,7 @@ define zeroext i64 @test_masked_vpcmpeqd_v8i1_v64i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -6835,14 +6835,14 @@ entry:
define zeroext i32 @test_vpcmpeqd_v16i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqd_v16i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v16i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -6951,14 +6951,14 @@ entry:
define zeroext i32 @test_vpcmpeqd_v16i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqd_v16i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd (%rdi), %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v16i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -7068,7 +7068,7 @@ entry:
define zeroext i32 @test_masked_vpcmpeqd_v16i1_v32i1_mask(i16 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqd_v16i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -7076,7 +7076,7 @@ define zeroext i32 @test_masked_vpcmpeqd_v16i1_v32i1_mask(i16 zeroext %__u, <8 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v16i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -7188,7 +7188,7 @@ entry:
define zeroext i32 @test_masked_vpcmpeqd_v16i1_v32i1_mask_mem(i16 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqd_v16i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd (%rsi), %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -7196,7 +7196,7 @@ define zeroext i32 @test_masked_vpcmpeqd_v16i1_v32i1_mask_mem(i16 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v16i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -7310,14 +7310,14 @@ entry:
define zeroext i32 @test_vpcmpeqd_v16i1_v32i1_mask_mem_b(<8 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqd_v16i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd (%rdi){1to16}, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v16i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -7428,7 +7428,7 @@ entry:
define zeroext i32 @test_masked_vpcmpeqd_v16i1_v32i1_mask_mem_b(i16 zeroext %__u, <8 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqd_v16i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd (%rsi){1to16}, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -7436,7 +7436,7 @@ define zeroext i32 @test_masked_vpcmpeqd_v16i1_v32i1_mask_mem_b(i16 zeroext %__u
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v16i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -7551,14 +7551,14 @@ entry:
define zeroext i64 @test_vpcmpeqd_v16i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqd_v16i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v16i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -7672,14 +7672,14 @@ entry:
define zeroext i64 @test_vpcmpeqd_v16i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqd_v16i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd (%rdi), %zmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v16i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -7794,7 +7794,7 @@ entry:
define zeroext i64 @test_masked_vpcmpeqd_v16i1_v64i1_mask(i16 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqd_v16i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -7802,7 +7802,7 @@ define zeroext i64 @test_masked_vpcmpeqd_v16i1_v64i1_mask(i16 zeroext %__u, <8 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v16i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -7919,7 +7919,7 @@ entry:
define zeroext i64 @test_masked_vpcmpeqd_v16i1_v64i1_mask_mem(i16 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqd_v16i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd (%rsi), %zmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -7927,7 +7927,7 @@ define zeroext i64 @test_masked_vpcmpeqd_v16i1_v64i1_mask_mem(i16 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v16i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -8046,14 +8046,14 @@ entry:
define zeroext i64 @test_vpcmpeqd_v16i1_v64i1_mask_mem_b(<8 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqd_v16i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd (%rdi){1to16}, %zmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v16i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -8169,7 +8169,7 @@ entry:
define zeroext i64 @test_masked_vpcmpeqd_v16i1_v64i1_mask_mem_b(i16 zeroext %__u, <8 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqd_v16i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd (%rsi){1to16}, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -8177,7 +8177,7 @@ define zeroext i64 @test_masked_vpcmpeqd_v16i1_v64i1_mask_mem_b(i16 zeroext %__u
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v16i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -8297,14 +8297,14 @@ entry:
define zeroext i4 @test_vpcmpeqq_v2i1_v4i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqq_v2i1_v4i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq %xmm1, %xmm0, %k0
; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
; VLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v2i1_v4i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; NoVLX-NEXT: vpslld $31, %ymm0, %ymm0
@@ -8325,14 +8325,14 @@ entry:
define zeroext i4 @test_vpcmpeqq_v2i1_v4i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqq_v2i1_v4i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi), %xmm0, %k0
; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
; VLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v2i1_v4i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpeqq (%rdi), %xmm0, %xmm0
; NoVLX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; NoVLX-NEXT: vpslld $31, %ymm0, %ymm0
@@ -8354,7 +8354,7 @@ entry:
define zeroext i4 @test_masked_vpcmpeqq_v2i1_v4i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqq_v2i1_v4i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
@@ -8362,7 +8362,7 @@ define zeroext i4 @test_masked_vpcmpeqq_v2i1_v4i1_mask(i8 zeroext %__u, <2 x i64
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v4i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
; NoVLX-NEXT: kshiftlw $15, %k0, %k1
@@ -8396,7 +8396,7 @@ entry:
define zeroext i4 @test_masked_vpcmpeqq_v2i1_v4i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqq_v2i1_v4i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
@@ -8404,7 +8404,7 @@ define zeroext i4 @test_masked_vpcmpeqq_v2i1_v4i1_mask_mem(i8 zeroext %__u, <2 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v4i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpeqq (%rsi), %xmm0, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
; NoVLX-NEXT: kshiftlw $15, %k0, %k1
@@ -8440,14 +8440,14 @@ entry:
define zeroext i4 @test_vpcmpeqq_v2i1_v4i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqq_v2i1_v4i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
; VLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v2i1_v4i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
; NoVLX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
@@ -8471,7 +8471,7 @@ entry:
define zeroext i4 @test_masked_vpcmpeqq_v2i1_v4i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqq_v2i1_v4i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
@@ -8479,7 +8479,7 @@ define zeroext i4 @test_masked_vpcmpeqq_v2i1_v4i1_mask_mem_b(i8 zeroext %__u, <2
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v4i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
; NoVLX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
@@ -8517,14 +8517,14 @@ entry:
define zeroext i8 @test_vpcmpeqq_v2i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqq_v2i1_v8i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v2i1_v8i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -8557,14 +8557,14 @@ entry:
define zeroext i8 @test_vpcmpeqq_v2i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqq_v2i1_v8i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v2i1_v8i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpeqq (%rdi), %xmm0, %xmm0
; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -8598,7 +8598,7 @@ entry:
define zeroext i8 @test_masked_vpcmpeqq_v2i1_v8i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqq_v2i1_v8i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -8606,7 +8606,7 @@ define zeroext i8 @test_masked_vpcmpeqq_v2i1_v8i1_mask(i8 zeroext %__u, <2 x i64
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v8i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
; NoVLX-NEXT: kshiftlw $15, %k0, %k1
@@ -8652,7 +8652,7 @@ entry:
define zeroext i8 @test_masked_vpcmpeqq_v2i1_v8i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqq_v2i1_v8i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -8660,7 +8660,7 @@ define zeroext i8 @test_masked_vpcmpeqq_v2i1_v8i1_mask_mem(i8 zeroext %__u, <2 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v8i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpeqq (%rsi), %xmm0, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
; NoVLX-NEXT: kshiftlw $15, %k0, %k1
@@ -8708,14 +8708,14 @@ entry:
define zeroext i8 @test_vpcmpeqq_v2i1_v8i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqq_v2i1_v8i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v2i1_v8i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
; NoVLX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
@@ -8751,7 +8751,7 @@ entry:
define zeroext i8 @test_masked_vpcmpeqq_v2i1_v8i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqq_v2i1_v8i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -8759,7 +8759,7 @@ define zeroext i8 @test_masked_vpcmpeqq_v2i1_v8i1_mask_mem_b(i8 zeroext %__u, <2
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v8i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
; NoVLX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
@@ -8809,14 +8809,14 @@ entry:
define zeroext i16 @test_vpcmpeqq_v2i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqq_v2i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v2i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -8848,14 +8848,14 @@ entry:
define zeroext i16 @test_vpcmpeqq_v2i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqq_v2i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v2i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpeqq (%rdi), %xmm0, %xmm0
; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -8888,7 +8888,7 @@ entry:
define zeroext i16 @test_masked_vpcmpeqq_v2i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqq_v2i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -8896,7 +8896,7 @@ define zeroext i16 @test_masked_vpcmpeqq_v2i1_v16i1_mask(i8 zeroext %__u, <2 x i
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
; NoVLX-NEXT: kshiftlw $15, %k0, %k1
@@ -8941,7 +8941,7 @@ entry:
define zeroext i16 @test_masked_vpcmpeqq_v2i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqq_v2i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -8949,7 +8949,7 @@ define zeroext i16 @test_masked_vpcmpeqq_v2i1_v16i1_mask_mem(i8 zeroext %__u, <2
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpeqq (%rsi), %xmm0, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
; NoVLX-NEXT: kshiftlw $15, %k0, %k1
@@ -8996,14 +8996,14 @@ entry:
define zeroext i16 @test_vpcmpeqq_v2i1_v16i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqq_v2i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v2i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
; NoVLX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
@@ -9038,7 +9038,7 @@ entry:
define zeroext i16 @test_masked_vpcmpeqq_v2i1_v16i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqq_v2i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -9046,7 +9046,7 @@ define zeroext i16 @test_masked_vpcmpeqq_v2i1_v16i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
; NoVLX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
@@ -9095,13 +9095,13 @@ entry:
define zeroext i32 @test_vpcmpeqq_v2i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqq_v2i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v2i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -9136,13 +9136,13 @@ entry:
define zeroext i32 @test_vpcmpeqq_v2i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqq_v2i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v2i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -9178,14 +9178,14 @@ entry:
define zeroext i32 @test_masked_vpcmpeqq_v2i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqq_v2i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -9233,14 +9233,14 @@ entry:
define zeroext i32 @test_masked_vpcmpeqq_v2i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqq_v2i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -9290,13 +9290,13 @@ entry:
define zeroext i32 @test_vpcmpeqq_v2i1_v32i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqq_v2i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v2i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -9334,14 +9334,14 @@ entry:
define zeroext i32 @test_masked_vpcmpeqq_v2i1_v32i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqq_v2i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -9393,13 +9393,13 @@ entry:
define zeroext i64 @test_vpcmpeqq_v2i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqq_v2i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq %xmm1, %xmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v2i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -9440,13 +9440,13 @@ entry:
define zeroext i64 @test_vpcmpeqq_v2i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqq_v2i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi), %xmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v2i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -9488,14 +9488,14 @@ entry:
define zeroext i64 @test_masked_vpcmpeqq_v2i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqq_v2i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -9549,14 +9549,14 @@ entry:
define zeroext i64 @test_masked_vpcmpeqq_v2i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqq_v2i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -9612,13 +9612,13 @@ entry:
define zeroext i64 @test_vpcmpeqq_v2i1_v64i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqq_v2i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v2i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -9662,14 +9662,14 @@ entry:
define zeroext i64 @test_masked_vpcmpeqq_v2i1_v64i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqq_v2i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -9727,7 +9727,7 @@ entry:
define zeroext i8 @test_vpcmpeqq_v4i1_v8i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqq_v4i1_v8i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -9735,7 +9735,7 @@ define zeroext i8 @test_vpcmpeqq_v4i1_v8i1_mask(<4 x i64> %__a, <4 x i64> %__b)
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v4i1_v8i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0
; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
@@ -9785,7 +9785,7 @@ entry:
define zeroext i8 @test_vpcmpeqq_v4i1_v8i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqq_v4i1_v8i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -9793,7 +9793,7 @@ define zeroext i8 @test_vpcmpeqq_v4i1_v8i1_mask_mem(<4 x i64> %__a, <4 x i64>* %
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v4i1_v8i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpeqq (%rdi), %ymm0, %ymm0
; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
@@ -9844,7 +9844,7 @@ entry:
define zeroext i8 @test_masked_vpcmpeqq_v4i1_v8i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqq_v4i1_v8i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -9853,7 +9853,7 @@ define zeroext i8 @test_masked_vpcmpeqq_v4i1_v8i1_mask(i8 zeroext %__u, <4 x i64
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v8i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0
; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
; NoVLX-NEXT: kmovw %edi, %k0
@@ -9924,7 +9924,7 @@ entry:
define zeroext i8 @test_masked_vpcmpeqq_v4i1_v8i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqq_v4i1_v8i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -9933,7 +9933,7 @@ define zeroext i8 @test_masked_vpcmpeqq_v4i1_v8i1_mask_mem(i8 zeroext %__u, <4 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v8i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpeqq (%rsi), %ymm0, %ymm0
; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
; NoVLX-NEXT: kmovw %edi, %k0
@@ -10006,7 +10006,7 @@ entry:
define zeroext i8 @test_vpcmpeqq_v4i1_v8i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqq_v4i1_v8i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -10014,7 +10014,7 @@ define zeroext i8 @test_vpcmpeqq_v4i1_v8i1_mask_mem_b(<4 x i64> %__a, i64* %__b)
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v4i1_v8i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1
; NoVLX-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0
; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
@@ -10067,7 +10067,7 @@ entry:
define zeroext i8 @test_masked_vpcmpeqq_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqq_v4i1_v8i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -10076,7 +10076,7 @@ define zeroext i8 @test_masked_vpcmpeqq_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <4
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v8i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1
; NoVLX-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0
; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
@@ -10151,7 +10151,7 @@ entry:
define zeroext i16 @test_vpcmpeqq_v4i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqq_v4i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -10159,7 +10159,7 @@ define zeroext i16 @test_vpcmpeqq_v4i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v4i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0
; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
@@ -10208,7 +10208,7 @@ entry:
define zeroext i16 @test_vpcmpeqq_v4i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqq_v4i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -10216,7 +10216,7 @@ define zeroext i16 @test_vpcmpeqq_v4i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>*
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v4i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpeqq (%rdi), %ymm0, %ymm0
; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
@@ -10266,7 +10266,7 @@ entry:
define zeroext i16 @test_masked_vpcmpeqq_v4i1_v16i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqq_v4i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -10275,7 +10275,7 @@ define zeroext i16 @test_masked_vpcmpeqq_v4i1_v16i1_mask(i8 zeroext %__u, <4 x i
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0
; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
; NoVLX-NEXT: kmovw %edi, %k0
@@ -10345,7 +10345,7 @@ entry:
define zeroext i16 @test_masked_vpcmpeqq_v4i1_v16i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqq_v4i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -10354,7 +10354,7 @@ define zeroext i16 @test_masked_vpcmpeqq_v4i1_v16i1_mask_mem(i8 zeroext %__u, <4
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpeqq (%rsi), %ymm0, %ymm0
; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
; NoVLX-NEXT: kmovw %edi, %k0
@@ -10426,7 +10426,7 @@ entry:
define zeroext i16 @test_vpcmpeqq_v4i1_v16i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqq_v4i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -10434,7 +10434,7 @@ define zeroext i16 @test_vpcmpeqq_v4i1_v16i1_mask_mem_b(<4 x i64> %__a, i64* %__
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v4i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1
; NoVLX-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0
; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
@@ -10486,7 +10486,7 @@ entry:
define zeroext i16 @test_masked_vpcmpeqq_v4i1_v16i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqq_v4i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -10495,7 +10495,7 @@ define zeroext i16 @test_masked_vpcmpeqq_v4i1_v16i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1
; NoVLX-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0
; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
@@ -10569,14 +10569,14 @@ entry:
define zeroext i32 @test_vpcmpeqq_v4i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqq_v4i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v4i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -10612,14 +10612,14 @@ entry:
define zeroext i32 @test_vpcmpeqq_v4i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqq_v4i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v4i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -10656,7 +10656,7 @@ entry:
define zeroext i32 @test_masked_vpcmpeqq_v4i1_v32i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqq_v4i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -10664,7 +10664,7 @@ define zeroext i32 @test_masked_vpcmpeqq_v4i1_v32i1_mask(i8 zeroext %__u, <4 x i
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -10721,7 +10721,7 @@ entry:
define zeroext i32 @test_masked_vpcmpeqq_v4i1_v32i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqq_v4i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -10729,7 +10729,7 @@ define zeroext i32 @test_masked_vpcmpeqq_v4i1_v32i1_mask_mem(i8 zeroext %__u, <4
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -10788,14 +10788,14 @@ entry:
define zeroext i32 @test_vpcmpeqq_v4i1_v32i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqq_v4i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v4i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -10834,7 +10834,7 @@ entry:
define zeroext i32 @test_masked_vpcmpeqq_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqq_v4i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -10842,7 +10842,7 @@ define zeroext i32 @test_masked_vpcmpeqq_v4i1_v32i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -10903,14 +10903,14 @@ entry:
define zeroext i64 @test_vpcmpeqq_v4i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqq_v4i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq %ymm1, %ymm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v4i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -10952,14 +10952,14 @@ entry:
define zeroext i64 @test_vpcmpeqq_v4i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqq_v4i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi), %ymm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v4i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -11002,7 +11002,7 @@ entry:
define zeroext i64 @test_masked_vpcmpeqq_v4i1_v64i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqq_v4i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -11010,7 +11010,7 @@ define zeroext i64 @test_masked_vpcmpeqq_v4i1_v64i1_mask(i8 zeroext %__u, <4 x i
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -11073,7 +11073,7 @@ entry:
define zeroext i64 @test_masked_vpcmpeqq_v4i1_v64i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqq_v4i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -11081,7 +11081,7 @@ define zeroext i64 @test_masked_vpcmpeqq_v4i1_v64i1_mask_mem(i8 zeroext %__u, <4
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -11146,14 +11146,14 @@ entry:
define zeroext i64 @test_vpcmpeqq_v4i1_v64i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqq_v4i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v4i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -11198,7 +11198,7 @@ entry:
define zeroext i64 @test_masked_vpcmpeqq_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqq_v4i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -11206,7 +11206,7 @@ define zeroext i64 @test_masked_vpcmpeqq_v4i1_v64i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -11273,7 +11273,7 @@ entry:
define zeroext i16 @test_vpcmpeqq_v8i1_v16i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqq_v8i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -11281,7 +11281,7 @@ define zeroext i16 @test_vpcmpeqq_v8i1_v16i1_mask(<8 x i64> %__a, <8 x i64> %__b
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v8i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -11298,7 +11298,7 @@ entry:
define zeroext i16 @test_vpcmpeqq_v8i1_v16i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqq_v8i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi), %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -11306,7 +11306,7 @@ define zeroext i16 @test_vpcmpeqq_v8i1_v16i1_mask_mem(<8 x i64> %__a, <8 x i64>*
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v8i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpeqq (%rdi), %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -11324,7 +11324,7 @@ entry:
define zeroext i16 @test_masked_vpcmpeqq_v8i1_v16i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqq_v8i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -11333,7 +11333,7 @@ define zeroext i16 @test_masked_vpcmpeqq_v8i1_v16i1_mask(i8 zeroext %__u, <8 x i
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v8i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
@@ -11353,7 +11353,7 @@ entry:
define zeroext i16 @test_masked_vpcmpeqq_v8i1_v16i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqq_v8i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi), %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -11362,7 +11362,7 @@ define zeroext i16 @test_masked_vpcmpeqq_v8i1_v16i1_mask_mem(i8 zeroext %__u, <8
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v8i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqq (%rsi), %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
@@ -11384,7 +11384,7 @@ entry:
define zeroext i16 @test_vpcmpeqq_v8i1_v16i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqq_v8i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi){1to8}, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -11392,7 +11392,7 @@ define zeroext i16 @test_vpcmpeqq_v8i1_v16i1_mask_mem_b(<8 x i64> %__a, i64* %__
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v8i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpeqq (%rdi){1to8}, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -11411,7 +11411,7 @@ entry:
define zeroext i16 @test_masked_vpcmpeqq_v8i1_v16i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqq_v8i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi){1to8}, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -11420,7 +11420,7 @@ define zeroext i16 @test_masked_vpcmpeqq_v8i1_v16i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v8i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqq (%rsi){1to8}, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
@@ -11443,14 +11443,14 @@ entry:
define zeroext i32 @test_vpcmpeqq_v8i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqq_v8i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v8i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -11514,14 +11514,14 @@ entry:
define zeroext i32 @test_vpcmpeqq_v8i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqq_v8i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi), %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v8i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -11586,7 +11586,7 @@ entry:
define zeroext i32 @test_masked_vpcmpeqq_v8i1_v32i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqq_v8i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -11594,7 +11594,7 @@ define zeroext i32 @test_masked_vpcmpeqq_v8i1_v32i1_mask(i8 zeroext %__u, <8 x i
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v8i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -11661,7 +11661,7 @@ entry:
define zeroext i32 @test_masked_vpcmpeqq_v8i1_v32i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqq_v8i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi), %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -11669,7 +11669,7 @@ define zeroext i32 @test_masked_vpcmpeqq_v8i1_v32i1_mask_mem(i8 zeroext %__u, <8
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v8i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -11738,14 +11738,14 @@ entry:
define zeroext i32 @test_vpcmpeqq_v8i1_v32i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqq_v8i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi){1to8}, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v8i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -11811,7 +11811,7 @@ entry:
define zeroext i32 @test_masked_vpcmpeqq_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqq_v8i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi){1to8}, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -11819,7 +11819,7 @@ define zeroext i32 @test_masked_vpcmpeqq_v8i1_v32i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v8i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -11889,14 +11889,14 @@ entry:
define zeroext i64 @test_vpcmpeqq_v8i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqq_v8i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v8i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -11965,14 +11965,14 @@ entry:
define zeroext i64 @test_vpcmpeqq_v8i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqq_v8i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi), %zmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v8i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -12042,7 +12042,7 @@ entry:
define zeroext i64 @test_masked_vpcmpeqq_v8i1_v64i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqq_v8i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -12050,7 +12050,7 @@ define zeroext i64 @test_masked_vpcmpeqq_v8i1_v64i1_mask(i8 zeroext %__u, <8 x i
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v8i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -12122,7 +12122,7 @@ entry:
define zeroext i64 @test_masked_vpcmpeqq_v8i1_v64i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqq_v8i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi), %zmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -12130,7 +12130,7 @@ define zeroext i64 @test_masked_vpcmpeqq_v8i1_v64i1_mask_mem(i8 zeroext %__u, <8
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v8i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -12204,14 +12204,14 @@ entry:
define zeroext i64 @test_vpcmpeqq_v8i1_v64i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpeqq_v8i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi){1to8}, %zmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v8i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -12282,7 +12282,7 @@ entry:
define zeroext i64 @test_masked_vpcmpeqq_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpeqq_v8i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi){1to8}, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -12290,7 +12290,7 @@ define zeroext i64 @test_masked_vpcmpeqq_v8i1_v64i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v8i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -12365,13 +12365,13 @@ entry:
define zeroext i32 @test_vpcmpsgtb_v16i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtb_v16i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtb %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtb_v16i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -12483,13 +12483,13 @@ entry:
define zeroext i32 @test_vpcmpsgtb_v16i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtb_v16i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtb (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtb_v16i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -12602,14 +12602,14 @@ entry:
define zeroext i32 @test_masked_vpcmpsgtb_v16i1_v32i1_mask(i16 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtb_v16i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtb %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtb_v16i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -12724,14 +12724,14 @@ entry:
define zeroext i32 @test_masked_vpcmpsgtb_v16i1_v32i1_mask_mem(i16 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtb_v16i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtb (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtb_v16i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -12848,13 +12848,13 @@ entry:
define zeroext i64 @test_vpcmpsgtb_v16i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtb_v16i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtb %xmm1, %xmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtb_v16i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -12971,13 +12971,13 @@ entry:
define zeroext i64 @test_vpcmpsgtb_v16i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtb_v16i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtb (%rdi), %xmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtb_v16i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -13095,14 +13095,14 @@ entry:
define zeroext i64 @test_masked_vpcmpsgtb_v16i1_v64i1_mask(i16 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtb_v16i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtb %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtb_v16i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -13222,14 +13222,14 @@ entry:
define zeroext i64 @test_masked_vpcmpsgtb_v16i1_v64i1_mask_mem(i16 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtb_v16i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtb (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtb_v16i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -13351,14 +13351,14 @@ entry:
define zeroext i64 @test_vpcmpsgtb_v32i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtb_v32i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtb %ymm1, %ymm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtb_v32i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -13398,14 +13398,14 @@ entry:
define zeroext i64 @test_vpcmpsgtb_v32i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtb_v32i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtb (%rdi), %ymm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtb_v32i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -13446,7 +13446,7 @@ entry:
define zeroext i64 @test_masked_vpcmpsgtb_v32i1_v64i1_mask(i32 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtb_v32i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtb %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -13454,7 +13454,7 @@ define zeroext i64 @test_masked_vpcmpsgtb_v32i1_v64i1_mask(i32 zeroext %__u, <4
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtb_v32i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -13505,7 +13505,7 @@ entry:
define zeroext i64 @test_masked_vpcmpsgtb_v32i1_v64i1_mask_mem(i32 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtb_v32i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtb (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -13513,7 +13513,7 @@ define zeroext i64 @test_masked_vpcmpsgtb_v32i1_v64i1_mask_mem(i32 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtb_v32i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -13566,14 +13566,14 @@ entry:
define zeroext i16 @test_vpcmpsgtw_v8i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtw_v8i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtw %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtw_v8i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
@@ -13593,14 +13593,14 @@ entry:
define zeroext i16 @test_vpcmpsgtw_v8i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtw_v8i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtw (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtw_v8i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtw (%rdi), %xmm0, %xmm0
; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
@@ -13621,7 +13621,7 @@ entry:
define zeroext i16 @test_masked_vpcmpsgtw_v8i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtw_v8i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtw %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -13629,7 +13629,7 @@ define zeroext i16 @test_masked_vpcmpsgtw_v8i1_v16i1_mask(i8 zeroext %__u, <2 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtw_v8i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
@@ -13652,7 +13652,7 @@ entry:
define zeroext i16 @test_masked_vpcmpsgtw_v8i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtw_v8i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtw (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -13660,7 +13660,7 @@ define zeroext i16 @test_masked_vpcmpsgtw_v8i1_v16i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtw_v8i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtw (%rsi), %xmm0, %xmm0
; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
@@ -13685,13 +13685,13 @@ entry:
define zeroext i32 @test_vpcmpsgtw_v8i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtw_v8i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtw %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtw_v8i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -13758,13 +13758,13 @@ entry:
define zeroext i32 @test_vpcmpsgtw_v8i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtw_v8i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtw (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtw_v8i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -13832,14 +13832,14 @@ entry:
define zeroext i32 @test_masked_vpcmpsgtw_v8i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtw_v8i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtw %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtw_v8i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -13909,14 +13909,14 @@ entry:
define zeroext i32 @test_masked_vpcmpsgtw_v8i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtw_v8i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtw (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtw_v8i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -13988,13 +13988,13 @@ entry:
define zeroext i64 @test_vpcmpsgtw_v8i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtw_v8i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtw %xmm1, %xmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtw_v8i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -14066,13 +14066,13 @@ entry:
define zeroext i64 @test_vpcmpsgtw_v8i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtw_v8i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtw (%rdi), %xmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtw_v8i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -14145,14 +14145,14 @@ entry:
define zeroext i64 @test_masked_vpcmpsgtw_v8i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtw_v8i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtw %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtw_v8i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -14227,14 +14227,14 @@ entry:
define zeroext i64 @test_masked_vpcmpsgtw_v8i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtw_v8i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtw (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtw_v8i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -14311,14 +14311,14 @@ entry:
define zeroext i32 @test_vpcmpsgtw_v16i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtw_v16i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtw %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtw_v16i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -14430,14 +14430,14 @@ entry:
define zeroext i32 @test_vpcmpsgtw_v16i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtw_v16i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtw (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtw_v16i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -14550,7 +14550,7 @@ entry:
define zeroext i32 @test_masked_vpcmpsgtw_v16i1_v32i1_mask(i16 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtw_v16i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtw %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -14558,7 +14558,7 @@ define zeroext i32 @test_masked_vpcmpsgtw_v16i1_v32i1_mask(i16 zeroext %__u, <4
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtw_v16i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -14673,7 +14673,7 @@ entry:
define zeroext i32 @test_masked_vpcmpsgtw_v16i1_v32i1_mask_mem(i16 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtw_v16i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtw (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -14681,7 +14681,7 @@ define zeroext i32 @test_masked_vpcmpsgtw_v16i1_v32i1_mask_mem(i16 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtw_v16i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -14798,14 +14798,14 @@ entry:
define zeroext i64 @test_vpcmpsgtw_v16i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtw_v16i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtw %ymm1, %ymm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtw_v16i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -14922,14 +14922,14 @@ entry:
define zeroext i64 @test_vpcmpsgtw_v16i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtw_v16i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtw (%rdi), %ymm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtw_v16i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -15047,7 +15047,7 @@ entry:
define zeroext i64 @test_masked_vpcmpsgtw_v16i1_v64i1_mask(i16 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtw_v16i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtw %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -15055,7 +15055,7 @@ define zeroext i64 @test_masked_vpcmpsgtw_v16i1_v64i1_mask(i16 zeroext %__u, <4
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtw_v16i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -15175,7 +15175,7 @@ entry:
define zeroext i64 @test_masked_vpcmpsgtw_v16i1_v64i1_mask_mem(i16 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtw_v16i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtw (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -15183,7 +15183,7 @@ define zeroext i64 @test_masked_vpcmpsgtw_v16i1_v64i1_mask_mem(i16 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtw_v16i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -15305,14 +15305,14 @@ entry:
define zeroext i64 @test_vpcmpsgtw_v32i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtw_v32i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtw %zmm1, %zmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtw_v32i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -15654,14 +15654,14 @@ entry:
define zeroext i64 @test_vpcmpsgtw_v32i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtw_v32i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtw (%rdi), %zmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtw_v32i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -15919,7 +15919,7 @@ entry:
define zeroext i64 @test_masked_vpcmpsgtw_v32i1_v64i1_mask(i32 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtw_v32i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtw %zmm1, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -15927,7 +15927,7 @@ define zeroext i64 @test_masked_vpcmpsgtw_v32i1_v64i1_mask(i32 zeroext %__u, <8
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtw_v32i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -16280,7 +16280,7 @@ entry:
define zeroext i64 @test_masked_vpcmpsgtw_v32i1_v64i1_mask_mem(i32 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtw_v32i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtw (%rsi), %zmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -16288,7 +16288,7 @@ define zeroext i64 @test_masked_vpcmpsgtw_v32i1_v64i1_mask_mem(i32 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtw_v32i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -16558,14 +16558,14 @@ entry:
define zeroext i8 @test_vpcmpsgtd_v4i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtd_v4i1_v8i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v8i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -16614,14 +16614,14 @@ entry:
define zeroext i8 @test_vpcmpsgtd_v4i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtd_v4i1_v8i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v8i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtd (%rdi), %xmm0, %xmm0
; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -16671,7 +16671,7 @@ entry:
define zeroext i8 @test_masked_vpcmpsgtd_v4i1_v8i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtd_v4i1_v8i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -16679,7 +16679,7 @@ define zeroext i8 @test_masked_vpcmpsgtd_v4i1_v8i1_mask(i8 zeroext %__u, <2 x i6
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v8i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k1
@@ -16749,7 +16749,7 @@ entry:
define zeroext i8 @test_masked_vpcmpsgtd_v4i1_v8i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtd_v4i1_v8i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -16757,7 +16757,7 @@ define zeroext i8 @test_masked_vpcmpsgtd_v4i1_v8i1_mask_mem(i8 zeroext %__u, <2
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v8i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtd (%rsi), %xmm0, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k1
@@ -16829,14 +16829,14 @@ entry:
define zeroext i8 @test_vpcmpsgtd_v4i1_v8i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtd_v4i1_v8i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v8i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1
; NoVLX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
@@ -16888,7 +16888,7 @@ entry:
define zeroext i8 @test_masked_vpcmpsgtd_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtd_v4i1_v8i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -16896,7 +16896,7 @@ define zeroext i8 @test_masked_vpcmpsgtd_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v8i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1
; NoVLX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
@@ -16970,14 +16970,14 @@ entry:
define zeroext i16 @test_vpcmpsgtd_v4i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtd_v4i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -17025,14 +17025,14 @@ entry:
define zeroext i16 @test_vpcmpsgtd_v4i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtd_v4i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtd (%rdi), %xmm0, %xmm0
; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -17081,7 +17081,7 @@ entry:
define zeroext i16 @test_masked_vpcmpsgtd_v4i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtd_v4i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -17089,7 +17089,7 @@ define zeroext i16 @test_masked_vpcmpsgtd_v4i1_v16i1_mask(i8 zeroext %__u, <2 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k1
@@ -17158,7 +17158,7 @@ entry:
define zeroext i16 @test_masked_vpcmpsgtd_v4i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtd_v4i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -17166,7 +17166,7 @@ define zeroext i16 @test_masked_vpcmpsgtd_v4i1_v16i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtd (%rsi), %xmm0, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k1
@@ -17237,14 +17237,14 @@ entry:
define zeroext i16 @test_vpcmpsgtd_v4i1_v16i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtd_v4i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1
; NoVLX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
@@ -17295,7 +17295,7 @@ entry:
define zeroext i16 @test_masked_vpcmpsgtd_v4i1_v16i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtd_v4i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -17303,7 +17303,7 @@ define zeroext i16 @test_masked_vpcmpsgtd_v4i1_v16i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1
; NoVLX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
@@ -17376,13 +17376,13 @@ entry:
define zeroext i32 @test_vpcmpsgtd_v4i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtd_v4i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -17417,13 +17417,13 @@ entry:
define zeroext i32 @test_vpcmpsgtd_v4i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtd_v4i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -17459,14 +17459,14 @@ entry:
define zeroext i32 @test_masked_vpcmpsgtd_v4i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtd_v4i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -17522,14 +17522,14 @@ entry:
define zeroext i32 @test_masked_vpcmpsgtd_v4i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtd_v4i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -17587,13 +17587,13 @@ entry:
define zeroext i32 @test_vpcmpsgtd_v4i1_v32i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtd_v4i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -17631,14 +17631,14 @@ entry:
define zeroext i32 @test_masked_vpcmpsgtd_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtd_v4i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -17698,13 +17698,13 @@ entry:
define zeroext i64 @test_vpcmpsgtd_v4i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtd_v4i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd %xmm1, %xmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -17745,13 +17745,13 @@ entry:
define zeroext i64 @test_vpcmpsgtd_v4i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtd_v4i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd (%rdi), %xmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -17793,14 +17793,14 @@ entry:
define zeroext i64 @test_masked_vpcmpsgtd_v4i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtd_v4i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -17862,14 +17862,14 @@ entry:
define zeroext i64 @test_masked_vpcmpsgtd_v4i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtd_v4i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -17933,13 +17933,13 @@ entry:
define zeroext i64 @test_vpcmpsgtd_v4i1_v64i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtd_v4i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -17983,14 +17983,14 @@ entry:
define zeroext i64 @test_masked_vpcmpsgtd_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtd_v4i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -18056,7 +18056,7 @@ entry:
define zeroext i16 @test_vpcmpsgtd_v8i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtd_v8i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -18064,7 +18064,7 @@ define zeroext i16 @test_vpcmpsgtd_v8i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
@@ -18085,7 +18085,7 @@ entry:
define zeroext i16 @test_vpcmpsgtd_v8i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtd_v8i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -18093,7 +18093,7 @@ define zeroext i16 @test_vpcmpsgtd_v8i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
@@ -18115,7 +18115,7 @@ entry:
define zeroext i16 @test_masked_vpcmpsgtd_v8i1_v16i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtd_v8i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -18124,7 +18124,7 @@ define zeroext i16 @test_masked_vpcmpsgtd_v8i1_v16i1_mask(i8 zeroext %__u, <4 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: kmovw %edi, %k1
@@ -18148,7 +18148,7 @@ entry:
define zeroext i16 @test_masked_vpcmpsgtd_v8i1_v16i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtd_v8i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -18157,7 +18157,7 @@ define zeroext i16 @test_masked_vpcmpsgtd_v8i1_v16i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
@@ -18183,7 +18183,7 @@ entry:
define zeroext i16 @test_vpcmpsgtd_v8i1_v16i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtd_v8i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd (%rdi){1to8}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -18191,7 +18191,7 @@ define zeroext i16 @test_vpcmpsgtd_v8i1_v16i1_mask_mem_b(<4 x i64> %__a, i32* %_
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
@@ -18214,7 +18214,7 @@ entry:
define zeroext i16 @test_masked_vpcmpsgtd_v8i1_v16i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtd_v8i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd (%rsi){1to8}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -18223,7 +18223,7 @@ define zeroext i16 @test_masked_vpcmpsgtd_v8i1_v16i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
@@ -18250,14 +18250,14 @@ entry:
define zeroext i32 @test_vpcmpsgtd_v8i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtd_v8i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -18323,14 +18323,14 @@ entry:
define zeroext i32 @test_vpcmpsgtd_v8i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtd_v8i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -18397,7 +18397,7 @@ entry:
define zeroext i32 @test_masked_vpcmpsgtd_v8i1_v32i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtd_v8i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -18405,7 +18405,7 @@ define zeroext i32 @test_masked_vpcmpsgtd_v8i1_v32i1_mask(i8 zeroext %__u, <4 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -18474,7 +18474,7 @@ entry:
define zeroext i32 @test_masked_vpcmpsgtd_v8i1_v32i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtd_v8i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -18482,7 +18482,7 @@ define zeroext i32 @test_masked_vpcmpsgtd_v8i1_v32i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -18553,14 +18553,14 @@ entry:
define zeroext i32 @test_vpcmpsgtd_v8i1_v32i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtd_v8i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd (%rdi){1to8}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -18628,7 +18628,7 @@ entry:
define zeroext i32 @test_masked_vpcmpsgtd_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtd_v8i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd (%rsi){1to8}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -18636,7 +18636,7 @@ define zeroext i32 @test_masked_vpcmpsgtd_v8i1_v32i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -18708,14 +18708,14 @@ entry:
define zeroext i64 @test_vpcmpsgtd_v8i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtd_v8i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd %ymm1, %ymm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -18786,14 +18786,14 @@ entry:
define zeroext i64 @test_vpcmpsgtd_v8i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtd_v8i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd (%rdi), %ymm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -18865,7 +18865,7 @@ entry:
define zeroext i64 @test_masked_vpcmpsgtd_v8i1_v64i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtd_v8i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -18873,7 +18873,7 @@ define zeroext i64 @test_masked_vpcmpsgtd_v8i1_v64i1_mask(i8 zeroext %__u, <4 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -18947,7 +18947,7 @@ entry:
define zeroext i64 @test_masked_vpcmpsgtd_v8i1_v64i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtd_v8i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -18955,7 +18955,7 @@ define zeroext i64 @test_masked_vpcmpsgtd_v8i1_v64i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -19031,14 +19031,14 @@ entry:
define zeroext i64 @test_vpcmpsgtd_v8i1_v64i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtd_v8i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd (%rdi){1to8}, %ymm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -19111,7 +19111,7 @@ entry:
define zeroext i64 @test_masked_vpcmpsgtd_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtd_v8i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd (%rsi){1to8}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -19119,7 +19119,7 @@ define zeroext i64 @test_masked_vpcmpsgtd_v8i1_v64i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -19196,14 +19196,14 @@ entry:
define zeroext i32 @test_vpcmpsgtd_v16i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtd_v16i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v16i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -19312,14 +19312,14 @@ entry:
define zeroext i32 @test_vpcmpsgtd_v16i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtd_v16i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd (%rdi), %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v16i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -19429,7 +19429,7 @@ entry:
define zeroext i32 @test_masked_vpcmpsgtd_v16i1_v32i1_mask(i16 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtd_v16i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -19437,7 +19437,7 @@ define zeroext i32 @test_masked_vpcmpsgtd_v16i1_v32i1_mask(i16 zeroext %__u, <8
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v16i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -19549,7 +19549,7 @@ entry:
define zeroext i32 @test_masked_vpcmpsgtd_v16i1_v32i1_mask_mem(i16 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtd_v16i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd (%rsi), %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -19557,7 +19557,7 @@ define zeroext i32 @test_masked_vpcmpsgtd_v16i1_v32i1_mask_mem(i16 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v16i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -19671,14 +19671,14 @@ entry:
define zeroext i32 @test_vpcmpsgtd_v16i1_v32i1_mask_mem_b(<8 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtd_v16i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd (%rdi){1to16}, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v16i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -19789,7 +19789,7 @@ entry:
define zeroext i32 @test_masked_vpcmpsgtd_v16i1_v32i1_mask_mem_b(i16 zeroext %__u, <8 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtd_v16i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd (%rsi){1to16}, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -19797,7 +19797,7 @@ define zeroext i32 @test_masked_vpcmpsgtd_v16i1_v32i1_mask_mem_b(i16 zeroext %__
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v16i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -19912,14 +19912,14 @@ entry:
define zeroext i64 @test_vpcmpsgtd_v16i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtd_v16i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v16i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -20033,14 +20033,14 @@ entry:
define zeroext i64 @test_vpcmpsgtd_v16i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtd_v16i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd (%rdi), %zmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v16i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -20155,7 +20155,7 @@ entry:
define zeroext i64 @test_masked_vpcmpsgtd_v16i1_v64i1_mask(i16 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtd_v16i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -20163,7 +20163,7 @@ define zeroext i64 @test_masked_vpcmpsgtd_v16i1_v64i1_mask(i16 zeroext %__u, <8
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v16i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -20280,7 +20280,7 @@ entry:
define zeroext i64 @test_masked_vpcmpsgtd_v16i1_v64i1_mask_mem(i16 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtd_v16i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd (%rsi), %zmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -20288,7 +20288,7 @@ define zeroext i64 @test_masked_vpcmpsgtd_v16i1_v64i1_mask_mem(i16 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v16i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -20407,14 +20407,14 @@ entry:
define zeroext i64 @test_vpcmpsgtd_v16i1_v64i1_mask_mem_b(<8 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtd_v16i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd (%rdi){1to16}, %zmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v16i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -20530,7 +20530,7 @@ entry:
define zeroext i64 @test_masked_vpcmpsgtd_v16i1_v64i1_mask_mem_b(i16 zeroext %__u, <8 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtd_v16i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd (%rsi){1to16}, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -20538,7 +20538,7 @@ define zeroext i64 @test_masked_vpcmpsgtd_v16i1_v64i1_mask_mem_b(i16 zeroext %__
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v16i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -20658,14 +20658,14 @@ entry:
define zeroext i4 @test_vpcmpsgtq_v2i1_v4i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtq_v2i1_v4i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
; VLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v4i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; NoVLX-NEXT: vpslld $31, %ymm0, %ymm0
@@ -20686,14 +20686,14 @@ entry:
define zeroext i4 @test_vpcmpsgtq_v2i1_v4i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtq_v2i1_v4i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi), %xmm0, %k0
; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
; VLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v4i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtq (%rdi), %xmm0, %xmm0
; NoVLX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; NoVLX-NEXT: vpslld $31, %ymm0, %ymm0
@@ -20715,7 +20715,7 @@ entry:
define zeroext i4 @test_masked_vpcmpsgtq_v2i1_v4i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtq_v2i1_v4i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
@@ -20723,7 +20723,7 @@ define zeroext i4 @test_masked_vpcmpsgtq_v2i1_v4i1_mask(i8 zeroext %__u, <2 x i6
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v4i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
; NoVLX-NEXT: kshiftlw $15, %k0, %k1
@@ -20757,7 +20757,7 @@ entry:
define zeroext i4 @test_masked_vpcmpsgtq_v2i1_v4i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtq_v2i1_v4i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
@@ -20765,7 +20765,7 @@ define zeroext i4 @test_masked_vpcmpsgtq_v2i1_v4i1_mask_mem(i8 zeroext %__u, <2
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v4i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtq (%rsi), %xmm0, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
; NoVLX-NEXT: kshiftlw $15, %k0, %k1
@@ -20801,14 +20801,14 @@ entry:
define zeroext i4 @test_vpcmpsgtq_v2i1_v4i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtq_v2i1_v4i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
; VLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v4i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
; NoVLX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
@@ -20832,7 +20832,7 @@ entry:
define zeroext i4 @test_masked_vpcmpsgtq_v2i1_v4i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtq_v2i1_v4i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
@@ -20840,7 +20840,7 @@ define zeroext i4 @test_masked_vpcmpsgtq_v2i1_v4i1_mask_mem_b(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v4i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
; NoVLX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
@@ -20878,14 +20878,14 @@ entry:
define zeroext i8 @test_vpcmpsgtq_v2i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtq_v2i1_v8i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v8i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -20918,14 +20918,14 @@ entry:
define zeroext i8 @test_vpcmpsgtq_v2i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtq_v2i1_v8i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v8i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtq (%rdi), %xmm0, %xmm0
; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -20959,7 +20959,7 @@ entry:
define zeroext i8 @test_masked_vpcmpsgtq_v2i1_v8i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtq_v2i1_v8i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -20967,7 +20967,7 @@ define zeroext i8 @test_masked_vpcmpsgtq_v2i1_v8i1_mask(i8 zeroext %__u, <2 x i6
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v8i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
; NoVLX-NEXT: kshiftlw $15, %k0, %k1
@@ -21013,7 +21013,7 @@ entry:
define zeroext i8 @test_masked_vpcmpsgtq_v2i1_v8i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtq_v2i1_v8i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -21021,7 +21021,7 @@ define zeroext i8 @test_masked_vpcmpsgtq_v2i1_v8i1_mask_mem(i8 zeroext %__u, <2
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v8i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtq (%rsi), %xmm0, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
; NoVLX-NEXT: kshiftlw $15, %k0, %k1
@@ -21069,14 +21069,14 @@ entry:
define zeroext i8 @test_vpcmpsgtq_v2i1_v8i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtq_v2i1_v8i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v8i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
; NoVLX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
@@ -21112,7 +21112,7 @@ entry:
define zeroext i8 @test_masked_vpcmpsgtq_v2i1_v8i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtq_v2i1_v8i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -21120,7 +21120,7 @@ define zeroext i8 @test_masked_vpcmpsgtq_v2i1_v8i1_mask_mem_b(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v8i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
; NoVLX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
@@ -21170,14 +21170,14 @@ entry:
define zeroext i16 @test_vpcmpsgtq_v2i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtq_v2i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -21209,14 +21209,14 @@ entry:
define zeroext i16 @test_vpcmpsgtq_v2i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtq_v2i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtq (%rdi), %xmm0, %xmm0
; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -21249,7 +21249,7 @@ entry:
define zeroext i16 @test_masked_vpcmpsgtq_v2i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtq_v2i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -21257,7 +21257,7 @@ define zeroext i16 @test_masked_vpcmpsgtq_v2i1_v16i1_mask(i8 zeroext %__u, <2 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
; NoVLX-NEXT: kshiftlw $15, %k0, %k1
@@ -21302,7 +21302,7 @@ entry:
define zeroext i16 @test_masked_vpcmpsgtq_v2i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtq_v2i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -21310,7 +21310,7 @@ define zeroext i16 @test_masked_vpcmpsgtq_v2i1_v16i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtq (%rsi), %xmm0, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
; NoVLX-NEXT: kshiftlw $15, %k0, %k1
@@ -21357,14 +21357,14 @@ entry:
define zeroext i16 @test_vpcmpsgtq_v2i1_v16i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtq_v2i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
; NoVLX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
@@ -21399,7 +21399,7 @@ entry:
define zeroext i16 @test_masked_vpcmpsgtq_v2i1_v16i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtq_v2i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -21407,7 +21407,7 @@ define zeroext i16 @test_masked_vpcmpsgtq_v2i1_v16i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
; NoVLX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
@@ -21456,13 +21456,13 @@ entry:
define zeroext i32 @test_vpcmpsgtq_v2i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtq_v2i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -21497,13 +21497,13 @@ entry:
define zeroext i32 @test_vpcmpsgtq_v2i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtq_v2i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -21539,14 +21539,14 @@ entry:
define zeroext i32 @test_masked_vpcmpsgtq_v2i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtq_v2i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -21594,14 +21594,14 @@ entry:
define zeroext i32 @test_masked_vpcmpsgtq_v2i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtq_v2i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -21651,13 +21651,13 @@ entry:
define zeroext i32 @test_vpcmpsgtq_v2i1_v32i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtq_v2i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -21695,14 +21695,14 @@ entry:
define zeroext i32 @test_masked_vpcmpsgtq_v2i1_v32i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtq_v2i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -21754,13 +21754,13 @@ entry:
define zeroext i64 @test_vpcmpsgtq_v2i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtq_v2i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -21801,13 +21801,13 @@ entry:
define zeroext i64 @test_vpcmpsgtq_v2i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtq_v2i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi), %xmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -21849,14 +21849,14 @@ entry:
define zeroext i64 @test_masked_vpcmpsgtq_v2i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtq_v2i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -21910,14 +21910,14 @@ entry:
define zeroext i64 @test_masked_vpcmpsgtq_v2i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtq_v2i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -21973,13 +21973,13 @@ entry:
define zeroext i64 @test_vpcmpsgtq_v2i1_v64i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtq_v2i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -22023,14 +22023,14 @@ entry:
define zeroext i64 @test_masked_vpcmpsgtq_v2i1_v64i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtq_v2i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -22088,7 +22088,7 @@ entry:
define zeroext i8 @test_vpcmpsgtq_v4i1_v8i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtq_v4i1_v8i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -22096,7 +22096,7 @@ define zeroext i8 @test_vpcmpsgtq_v4i1_v8i1_mask(<4 x i64> %__a, <4 x i64> %__b)
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v8i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
@@ -22146,7 +22146,7 @@ entry:
define zeroext i8 @test_vpcmpsgtq_v4i1_v8i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtq_v4i1_v8i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -22154,7 +22154,7 @@ define zeroext i8 @test_vpcmpsgtq_v4i1_v8i1_mask_mem(<4 x i64> %__a, <4 x i64>*
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v8i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtq (%rdi), %ymm0, %ymm0
; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
@@ -22205,7 +22205,7 @@ entry:
define zeroext i8 @test_masked_vpcmpsgtq_v4i1_v8i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtq_v4i1_v8i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -22214,7 +22214,7 @@ define zeroext i8 @test_masked_vpcmpsgtq_v4i1_v8i1_mask(i8 zeroext %__u, <4 x i6
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v8i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
; NoVLX-NEXT: kmovw %edi, %k0
@@ -22285,7 +22285,7 @@ entry:
define zeroext i8 @test_masked_vpcmpsgtq_v4i1_v8i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtq_v4i1_v8i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -22294,7 +22294,7 @@ define zeroext i8 @test_masked_vpcmpsgtq_v4i1_v8i1_mask_mem(i8 zeroext %__u, <4
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v8i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtq (%rsi), %ymm0, %ymm0
; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
; NoVLX-NEXT: kmovw %edi, %k0
@@ -22367,7 +22367,7 @@ entry:
define zeroext i8 @test_vpcmpsgtq_v4i1_v8i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtq_v4i1_v8i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -22375,7 +22375,7 @@ define zeroext i8 @test_vpcmpsgtq_v4i1_v8i1_mask_mem_b(<4 x i64> %__a, i64* %__b
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v8i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1
; NoVLX-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
@@ -22428,7 +22428,7 @@ entry:
define zeroext i8 @test_masked_vpcmpsgtq_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtq_v4i1_v8i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -22437,7 +22437,7 @@ define zeroext i8 @test_masked_vpcmpsgtq_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v8i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1
; NoVLX-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
@@ -22512,7 +22512,7 @@ entry:
define zeroext i16 @test_vpcmpsgtq_v4i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtq_v4i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -22520,7 +22520,7 @@ define zeroext i16 @test_vpcmpsgtq_v4i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
@@ -22569,7 +22569,7 @@ entry:
define zeroext i16 @test_vpcmpsgtq_v4i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtq_v4i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -22577,7 +22577,7 @@ define zeroext i16 @test_vpcmpsgtq_v4i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtq (%rdi), %ymm0, %ymm0
; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
@@ -22627,7 +22627,7 @@ entry:
define zeroext i16 @test_masked_vpcmpsgtq_v4i1_v16i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtq_v4i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -22636,7 +22636,7 @@ define zeroext i16 @test_masked_vpcmpsgtq_v4i1_v16i1_mask(i8 zeroext %__u, <4 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
; NoVLX-NEXT: kmovw %edi, %k0
@@ -22706,7 +22706,7 @@ entry:
define zeroext i16 @test_masked_vpcmpsgtq_v4i1_v16i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtq_v4i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -22715,7 +22715,7 @@ define zeroext i16 @test_masked_vpcmpsgtq_v4i1_v16i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtq (%rsi), %ymm0, %ymm0
; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
; NoVLX-NEXT: kmovw %edi, %k0
@@ -22787,7 +22787,7 @@ entry:
define zeroext i16 @test_vpcmpsgtq_v4i1_v16i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtq_v4i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -22795,7 +22795,7 @@ define zeroext i16 @test_vpcmpsgtq_v4i1_v16i1_mask_mem_b(<4 x i64> %__a, i64* %_
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1
; NoVLX-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
@@ -22847,7 +22847,7 @@ entry:
define zeroext i16 @test_masked_vpcmpsgtq_v4i1_v16i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtq_v4i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -22856,7 +22856,7 @@ define zeroext i16 @test_masked_vpcmpsgtq_v4i1_v16i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1
; NoVLX-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
@@ -22930,14 +22930,14 @@ entry:
define zeroext i32 @test_vpcmpsgtq_v4i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtq_v4i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -22973,14 +22973,14 @@ entry:
define zeroext i32 @test_vpcmpsgtq_v4i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtq_v4i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -23017,7 +23017,7 @@ entry:
define zeroext i32 @test_masked_vpcmpsgtq_v4i1_v32i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtq_v4i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -23025,7 +23025,7 @@ define zeroext i32 @test_masked_vpcmpsgtq_v4i1_v32i1_mask(i8 zeroext %__u, <4 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -23082,7 +23082,7 @@ entry:
define zeroext i32 @test_masked_vpcmpsgtq_v4i1_v32i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtq_v4i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -23090,7 +23090,7 @@ define zeroext i32 @test_masked_vpcmpsgtq_v4i1_v32i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -23149,14 +23149,14 @@ entry:
define zeroext i32 @test_vpcmpsgtq_v4i1_v32i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtq_v4i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -23195,7 +23195,7 @@ entry:
define zeroext i32 @test_masked_vpcmpsgtq_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtq_v4i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -23203,7 +23203,7 @@ define zeroext i32 @test_masked_vpcmpsgtq_v4i1_v32i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -23264,14 +23264,14 @@ entry:
define zeroext i64 @test_vpcmpsgtq_v4i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtq_v4i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq %ymm1, %ymm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -23313,14 +23313,14 @@ entry:
define zeroext i64 @test_vpcmpsgtq_v4i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtq_v4i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi), %ymm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -23363,7 +23363,7 @@ entry:
define zeroext i64 @test_masked_vpcmpsgtq_v4i1_v64i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtq_v4i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -23371,7 +23371,7 @@ define zeroext i64 @test_masked_vpcmpsgtq_v4i1_v64i1_mask(i8 zeroext %__u, <4 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -23434,7 +23434,7 @@ entry:
define zeroext i64 @test_masked_vpcmpsgtq_v4i1_v64i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtq_v4i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -23442,7 +23442,7 @@ define zeroext i64 @test_masked_vpcmpsgtq_v4i1_v64i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -23507,14 +23507,14 @@ entry:
define zeroext i64 @test_vpcmpsgtq_v4i1_v64i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtq_v4i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -23559,7 +23559,7 @@ entry:
define zeroext i64 @test_masked_vpcmpsgtq_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtq_v4i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -23567,7 +23567,7 @@ define zeroext i64 @test_masked_vpcmpsgtq_v4i1_v64i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -23634,7 +23634,7 @@ entry:
define zeroext i16 @test_vpcmpsgtq_v8i1_v16i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtq_v8i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -23642,7 +23642,7 @@ define zeroext i16 @test_vpcmpsgtq_v8i1_v16i1_mask(<8 x i64> %__a, <8 x i64> %__
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v8i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -23659,7 +23659,7 @@ entry:
define zeroext i16 @test_vpcmpsgtq_v8i1_v16i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtq_v8i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi), %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -23667,7 +23667,7 @@ define zeroext i16 @test_vpcmpsgtq_v8i1_v16i1_mask_mem(<8 x i64> %__a, <8 x i64>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v8i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtq (%rdi), %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -23685,7 +23685,7 @@ entry:
define zeroext i16 @test_masked_vpcmpsgtq_v8i1_v16i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtq_v8i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -23694,7 +23694,7 @@ define zeroext i16 @test_masked_vpcmpsgtq_v8i1_v16i1_mask(i8 zeroext %__u, <8 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v8i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
@@ -23714,7 +23714,7 @@ entry:
define zeroext i16 @test_masked_vpcmpsgtq_v8i1_v16i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtq_v8i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi), %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -23723,7 +23723,7 @@ define zeroext i16 @test_masked_vpcmpsgtq_v8i1_v16i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v8i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtq (%rsi), %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
@@ -23745,7 +23745,7 @@ entry:
define zeroext i16 @test_vpcmpsgtq_v8i1_v16i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtq_v8i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi){1to8}, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -23753,7 +23753,7 @@ define zeroext i16 @test_vpcmpsgtq_v8i1_v16i1_mask_mem_b(<8 x i64> %__a, i64* %_
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v8i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtq (%rdi){1to8}, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -23772,7 +23772,7 @@ entry:
define zeroext i16 @test_masked_vpcmpsgtq_v8i1_v16i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtq_v8i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi){1to8}, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -23781,7 +23781,7 @@ define zeroext i16 @test_masked_vpcmpsgtq_v8i1_v16i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v8i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtq (%rsi){1to8}, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
@@ -23804,14 +23804,14 @@ entry:
define zeroext i32 @test_vpcmpsgtq_v8i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtq_v8i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v8i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -23875,14 +23875,14 @@ entry:
define zeroext i32 @test_vpcmpsgtq_v8i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtq_v8i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi), %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v8i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -23947,7 +23947,7 @@ entry:
define zeroext i32 @test_masked_vpcmpsgtq_v8i1_v32i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtq_v8i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -23955,7 +23955,7 @@ define zeroext i32 @test_masked_vpcmpsgtq_v8i1_v32i1_mask(i8 zeroext %__u, <8 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v8i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -24022,7 +24022,7 @@ entry:
define zeroext i32 @test_masked_vpcmpsgtq_v8i1_v32i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtq_v8i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi), %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -24030,7 +24030,7 @@ define zeroext i32 @test_masked_vpcmpsgtq_v8i1_v32i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v8i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -24099,14 +24099,14 @@ entry:
define zeroext i32 @test_vpcmpsgtq_v8i1_v32i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtq_v8i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi){1to8}, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v8i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -24172,7 +24172,7 @@ entry:
define zeroext i32 @test_masked_vpcmpsgtq_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtq_v8i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi){1to8}, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -24180,7 +24180,7 @@ define zeroext i32 @test_masked_vpcmpsgtq_v8i1_v32i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v8i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -24250,14 +24250,14 @@ entry:
define zeroext i64 @test_vpcmpsgtq_v8i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtq_v8i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v8i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -24326,14 +24326,14 @@ entry:
define zeroext i64 @test_vpcmpsgtq_v8i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtq_v8i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi), %zmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v8i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -24403,7 +24403,7 @@ entry:
define zeroext i64 @test_masked_vpcmpsgtq_v8i1_v64i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtq_v8i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -24411,7 +24411,7 @@ define zeroext i64 @test_masked_vpcmpsgtq_v8i1_v64i1_mask(i8 zeroext %__u, <8 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v8i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -24483,7 +24483,7 @@ entry:
define zeroext i64 @test_masked_vpcmpsgtq_v8i1_v64i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtq_v8i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi), %zmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -24491,7 +24491,7 @@ define zeroext i64 @test_masked_vpcmpsgtq_v8i1_v64i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v8i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -24565,14 +24565,14 @@ entry:
define zeroext i64 @test_vpcmpsgtq_v8i1_v64i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgtq_v8i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi){1to8}, %zmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v8i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -24643,7 +24643,7 @@ entry:
define zeroext i64 @test_masked_vpcmpsgtq_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgtq_v8i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi){1to8}, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -24651,7 +24651,7 @@ define zeroext i64 @test_masked_vpcmpsgtq_v8i1_v64i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v8i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -24726,13 +24726,13 @@ entry:
define zeroext i32 @test_vpcmpsgeb_v16i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgeb_v16i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpleb %xmm0, %xmm1, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeb_v16i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -24846,13 +24846,13 @@ entry:
define zeroext i32 @test_vpcmpsgeb_v16i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgeb_v16i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltb (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeb_v16i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -24968,14 +24968,14 @@ entry:
define zeroext i32 @test_masked_vpcmpsgeb_v16i1_v32i1_mask(i16 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgeb_v16i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpleb %xmm0, %xmm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeb_v16i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -25092,14 +25092,14 @@ entry:
define zeroext i32 @test_masked_vpcmpsgeb_v16i1_v32i1_mask_mem(i16 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgeb_v16i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltb (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeb_v16i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -25219,13 +25219,13 @@ entry:
define zeroext i64 @test_vpcmpsgeb_v16i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgeb_v16i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpleb %xmm0, %xmm1, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeb_v16i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -25344,13 +25344,13 @@ entry:
define zeroext i64 @test_vpcmpsgeb_v16i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgeb_v16i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltb (%rdi), %xmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeb_v16i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -25471,14 +25471,14 @@ entry:
define zeroext i64 @test_masked_vpcmpsgeb_v16i1_v64i1_mask(i16 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgeb_v16i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpleb %xmm0, %xmm1, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeb_v16i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -25600,14 +25600,14 @@ entry:
define zeroext i64 @test_masked_vpcmpsgeb_v16i1_v64i1_mask_mem(i16 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgeb_v16i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltb (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeb_v16i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -25732,14 +25732,14 @@ entry:
define zeroext i64 @test_vpcmpsgeb_v32i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgeb_v32i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpleb %ymm0, %ymm1, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeb_v32i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -25781,14 +25781,14 @@ entry:
define zeroext i64 @test_vpcmpsgeb_v32i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgeb_v32i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltb (%rdi), %ymm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeb_v32i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -25832,7 +25832,7 @@ entry:
define zeroext i64 @test_masked_vpcmpsgeb_v32i1_v64i1_mask(i32 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgeb_v32i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpleb %ymm0, %ymm1, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -25840,7 +25840,7 @@ define zeroext i64 @test_masked_vpcmpsgeb_v32i1_v64i1_mask(i32 zeroext %__u, <4
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeb_v32i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -25893,7 +25893,7 @@ entry:
define zeroext i64 @test_masked_vpcmpsgeb_v32i1_v64i1_mask_mem(i32 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgeb_v32i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltb (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -25901,7 +25901,7 @@ define zeroext i64 @test_masked_vpcmpsgeb_v32i1_v64i1_mask_mem(i32 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeb_v32i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -25957,14 +25957,14 @@ entry:
define zeroext i16 @test_vpcmpsgew_v8i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgew_v8i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmplew %xmm0, %xmm1, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgew_v8i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
@@ -25986,14 +25986,14 @@ entry:
define zeroext i16 @test_vpcmpsgew_v8i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgew_v8i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltw (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgew_v8i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
; NoVLX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
@@ -26017,7 +26017,7 @@ entry:
define zeroext i16 @test_masked_vpcmpsgew_v8i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgew_v8i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmplew %xmm0, %xmm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -26025,7 +26025,7 @@ define zeroext i16 @test_masked_vpcmpsgew_v8i1_v16i1_mask(i8 zeroext %__u, <2 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgew_v8i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
@@ -26050,7 +26050,7 @@ entry:
define zeroext i16 @test_masked_vpcmpsgew_v8i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgew_v8i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltw (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -26058,7 +26058,7 @@ define zeroext i16 @test_masked_vpcmpsgew_v8i1_v16i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgew_v8i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
; NoVLX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
@@ -26086,13 +26086,13 @@ entry:
define zeroext i32 @test_vpcmpsgew_v8i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgew_v8i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmplew %xmm0, %xmm1, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgew_v8i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -26161,13 +26161,13 @@ entry:
define zeroext i32 @test_vpcmpsgew_v8i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgew_v8i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltw (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgew_v8i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -26238,14 +26238,14 @@ entry:
define zeroext i32 @test_masked_vpcmpsgew_v8i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgew_v8i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmplew %xmm0, %xmm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgew_v8i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -26317,14 +26317,14 @@ entry:
define zeroext i32 @test_masked_vpcmpsgew_v8i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgew_v8i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltw (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgew_v8i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -26399,13 +26399,13 @@ entry:
define zeroext i64 @test_vpcmpsgew_v8i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgew_v8i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmplew %xmm0, %xmm1, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgew_v8i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -26479,13 +26479,13 @@ entry:
define zeroext i64 @test_vpcmpsgew_v8i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgew_v8i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltw (%rdi), %xmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgew_v8i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -26561,14 +26561,14 @@ entry:
define zeroext i64 @test_masked_vpcmpsgew_v8i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgew_v8i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmplew %xmm0, %xmm1, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgew_v8i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -26645,14 +26645,14 @@ entry:
define zeroext i64 @test_masked_vpcmpsgew_v8i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgew_v8i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltw (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgew_v8i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -26732,14 +26732,14 @@ entry:
define zeroext i32 @test_vpcmpsgew_v16i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgew_v16i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmplew %ymm0, %ymm1, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgew_v16i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -26853,14 +26853,14 @@ entry:
define zeroext i32 @test_vpcmpsgew_v16i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgew_v16i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltw (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgew_v16i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -26976,7 +26976,7 @@ entry:
define zeroext i32 @test_masked_vpcmpsgew_v16i1_v32i1_mask(i16 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgew_v16i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmplew %ymm0, %ymm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -26984,7 +26984,7 @@ define zeroext i32 @test_masked_vpcmpsgew_v16i1_v32i1_mask(i16 zeroext %__u, <4
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgew_v16i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -27101,7 +27101,7 @@ entry:
define zeroext i32 @test_masked_vpcmpsgew_v16i1_v32i1_mask_mem(i16 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgew_v16i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltw (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -27109,7 +27109,7 @@ define zeroext i32 @test_masked_vpcmpsgew_v16i1_v32i1_mask_mem(i16 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgew_v16i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -27229,14 +27229,14 @@ entry:
define zeroext i64 @test_vpcmpsgew_v16i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgew_v16i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmplew %ymm0, %ymm1, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgew_v16i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -27355,14 +27355,14 @@ entry:
define zeroext i64 @test_vpcmpsgew_v16i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgew_v16i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltw (%rdi), %ymm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgew_v16i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -27483,7 +27483,7 @@ entry:
define zeroext i64 @test_masked_vpcmpsgew_v16i1_v64i1_mask(i16 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgew_v16i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmplew %ymm0, %ymm1, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -27491,7 +27491,7 @@ define zeroext i64 @test_masked_vpcmpsgew_v16i1_v64i1_mask(i16 zeroext %__u, <4
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgew_v16i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -27613,7 +27613,7 @@ entry:
define zeroext i64 @test_masked_vpcmpsgew_v16i1_v64i1_mask_mem(i16 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgew_v16i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltw (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -27621,7 +27621,7 @@ define zeroext i64 @test_masked_vpcmpsgew_v16i1_v64i1_mask_mem(i16 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgew_v16i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -27746,14 +27746,14 @@ entry:
define zeroext i64 @test_vpcmpsgew_v32i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgew_v32i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmplew %zmm0, %zmm1, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgew_v32i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -28098,14 +28098,14 @@ entry:
define zeroext i64 @test_vpcmpsgew_v32i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgew_v32i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltw (%rdi), %zmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgew_v32i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -28368,7 +28368,7 @@ entry:
define zeroext i64 @test_masked_vpcmpsgew_v32i1_v64i1_mask(i32 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgew_v32i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmplew %zmm0, %zmm1, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -28376,7 +28376,7 @@ define zeroext i64 @test_masked_vpcmpsgew_v32i1_v64i1_mask(i32 zeroext %__u, <8
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgew_v32i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -28732,7 +28732,7 @@ entry:
define zeroext i64 @test_masked_vpcmpsgew_v32i1_v64i1_mask_mem(i32 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgew_v32i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltw (%rsi), %zmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -28740,7 +28740,7 @@ define zeroext i64 @test_masked_vpcmpsgew_v32i1_v64i1_mask_mem(i32 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgew_v32i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -29015,14 +29015,14 @@ entry:
define zeroext i8 @test_vpcmpsged_v4i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsged_v4i1_v8i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpled %xmm0, %xmm1, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v4i1_v8i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
@@ -29073,14 +29073,14 @@ entry:
define zeroext i8 @test_vpcmpsged_v4i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsged_v4i1_v8i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltd (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v4i1_v8i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
@@ -29133,7 +29133,7 @@ entry:
define zeroext i8 @test_masked_vpcmpsged_v4i1_v8i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsged_v4i1_v8i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpled %xmm0, %xmm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -29141,7 +29141,7 @@ define zeroext i8 @test_masked_vpcmpsged_v4i1_v8i1_mask(i8 zeroext %__u, <2 x i6
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v8i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k1
@@ -29211,7 +29211,7 @@ entry:
define zeroext i8 @test_masked_vpcmpsged_v4i1_v8i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsged_v4i1_v8i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltd (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -29219,7 +29219,7 @@ define zeroext i8 @test_masked_vpcmpsged_v4i1_v8i1_mask_mem(i8 zeroext %__u, <2
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v8i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
@@ -29292,14 +29292,14 @@ entry:
define zeroext i8 @test_vpcmpsged_v4i1_v8i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsged_v4i1_v8i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltd (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v4i1_v8i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1
; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
@@ -29353,7 +29353,7 @@ entry:
define zeroext i8 @test_masked_vpcmpsged_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsged_v4i1_v8i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltd (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -29361,7 +29361,7 @@ define zeroext i8 @test_masked_vpcmpsged_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v8i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1
; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
@@ -29435,14 +29435,14 @@ entry:
define zeroext i16 @test_vpcmpsged_v4i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsged_v4i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpled %xmm0, %xmm1, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v4i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
@@ -29492,14 +29492,14 @@ entry:
define zeroext i16 @test_vpcmpsged_v4i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsged_v4i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltd (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v4i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
@@ -29551,7 +29551,7 @@ entry:
define zeroext i16 @test_masked_vpcmpsged_v4i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsged_v4i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpled %xmm0, %xmm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -29559,7 +29559,7 @@ define zeroext i16 @test_masked_vpcmpsged_v4i1_v16i1_mask(i8 zeroext %__u, <2 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k1
@@ -29628,7 +29628,7 @@ entry:
define zeroext i16 @test_masked_vpcmpsged_v4i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsged_v4i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltd (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -29636,7 +29636,7 @@ define zeroext i16 @test_masked_vpcmpsged_v4i1_v16i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
@@ -29708,14 +29708,14 @@ entry:
define zeroext i16 @test_vpcmpsged_v4i1_v16i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsged_v4i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltd (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v4i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1
; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
@@ -29768,7 +29768,7 @@ entry:
define zeroext i16 @test_masked_vpcmpsged_v4i1_v16i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsged_v4i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltd (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -29776,7 +29776,7 @@ define zeroext i16 @test_masked_vpcmpsged_v4i1_v16i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1
; NoVLX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
@@ -29849,13 +29849,13 @@ entry:
define zeroext i32 @test_vpcmpsged_v4i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsged_v4i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpled %xmm0, %xmm1, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v4i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -29892,13 +29892,13 @@ entry:
define zeroext i32 @test_vpcmpsged_v4i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsged_v4i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltd (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v4i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -29937,14 +29937,14 @@ entry:
define zeroext i32 @test_masked_vpcmpsged_v4i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsged_v4i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpled %xmm0, %xmm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -30000,14 +30000,14 @@ entry:
define zeroext i32 @test_masked_vpcmpsged_v4i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsged_v4i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltd (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -30066,13 +30066,13 @@ entry:
define zeroext i32 @test_vpcmpsged_v4i1_v32i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsged_v4i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltd (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v4i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -30112,14 +30112,14 @@ entry:
define zeroext i32 @test_masked_vpcmpsged_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsged_v4i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltd (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -30179,13 +30179,13 @@ entry:
define zeroext i64 @test_vpcmpsged_v4i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsged_v4i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpled %xmm0, %xmm1, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v4i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -30228,13 +30228,13 @@ entry:
define zeroext i64 @test_vpcmpsged_v4i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsged_v4i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltd (%rdi), %xmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v4i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -30279,14 +30279,14 @@ entry:
define zeroext i64 @test_masked_vpcmpsged_v4i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsged_v4i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpled %xmm0, %xmm1, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -30348,14 +30348,14 @@ entry:
define zeroext i64 @test_masked_vpcmpsged_v4i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsged_v4i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltd (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -30420,13 +30420,13 @@ entry:
define zeroext i64 @test_vpcmpsged_v4i1_v64i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsged_v4i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltd (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v4i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -30472,14 +30472,14 @@ entry:
define zeroext i64 @test_masked_vpcmpsged_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsged_v4i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltd (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -30545,7 +30545,7 @@ entry:
define zeroext i16 @test_vpcmpsged_v8i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsged_v8i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpled %ymm0, %ymm1, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -30553,7 +30553,7 @@ define zeroext i16 @test_vpcmpsged_v8i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v8i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
@@ -30574,7 +30574,7 @@ entry:
define zeroext i16 @test_vpcmpsged_v8i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsged_v8i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltd (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -30582,7 +30582,7 @@ define zeroext i16 @test_vpcmpsged_v8i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v8i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
@@ -30604,7 +30604,7 @@ entry:
define zeroext i16 @test_masked_vpcmpsged_v8i1_v16i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsged_v8i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpled %ymm0, %ymm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -30613,7 +30613,7 @@ define zeroext i16 @test_masked_vpcmpsged_v8i1_v16i1_mask(i8 zeroext %__u, <4 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: kmovw %edi, %k1
@@ -30637,7 +30637,7 @@ entry:
define zeroext i16 @test_masked_vpcmpsged_v8i1_v16i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsged_v8i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltd (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -30646,7 +30646,7 @@ define zeroext i16 @test_masked_vpcmpsged_v8i1_v16i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
@@ -30672,7 +30672,7 @@ entry:
define zeroext i16 @test_vpcmpsged_v8i1_v16i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsged_v8i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltd (%rdi){1to8}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -30680,7 +30680,7 @@ define zeroext i16 @test_vpcmpsged_v8i1_v16i1_mask_mem_b(<4 x i64> %__a, i32* %_
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v8i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
@@ -30703,7 +30703,7 @@ entry:
define zeroext i16 @test_masked_vpcmpsged_v8i1_v16i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsged_v8i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltd (%rsi){1to8}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -30712,7 +30712,7 @@ define zeroext i16 @test_masked_vpcmpsged_v8i1_v16i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
@@ -30739,14 +30739,14 @@ entry:
define zeroext i32 @test_vpcmpsged_v8i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsged_v8i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpled %ymm0, %ymm1, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v8i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -30812,14 +30812,14 @@ entry:
define zeroext i32 @test_vpcmpsged_v8i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsged_v8i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltd (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v8i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -30886,7 +30886,7 @@ entry:
define zeroext i32 @test_masked_vpcmpsged_v8i1_v32i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsged_v8i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpled %ymm0, %ymm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -30894,7 +30894,7 @@ define zeroext i32 @test_masked_vpcmpsged_v8i1_v32i1_mask(i8 zeroext %__u, <4 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -30963,7 +30963,7 @@ entry:
define zeroext i32 @test_masked_vpcmpsged_v8i1_v32i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsged_v8i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltd (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -30971,7 +30971,7 @@ define zeroext i32 @test_masked_vpcmpsged_v8i1_v32i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -31042,14 +31042,14 @@ entry:
define zeroext i32 @test_vpcmpsged_v8i1_v32i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsged_v8i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltd (%rdi){1to8}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v8i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -31117,7 +31117,7 @@ entry:
define zeroext i32 @test_masked_vpcmpsged_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsged_v8i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltd (%rsi){1to8}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -31125,7 +31125,7 @@ define zeroext i32 @test_masked_vpcmpsged_v8i1_v32i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -31197,14 +31197,14 @@ entry:
define zeroext i64 @test_vpcmpsged_v8i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsged_v8i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpled %ymm0, %ymm1, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v8i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -31275,14 +31275,14 @@ entry:
define zeroext i64 @test_vpcmpsged_v8i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsged_v8i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltd (%rdi), %ymm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v8i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -31354,7 +31354,7 @@ entry:
define zeroext i64 @test_masked_vpcmpsged_v8i1_v64i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsged_v8i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpled %ymm0, %ymm1, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -31362,7 +31362,7 @@ define zeroext i64 @test_masked_vpcmpsged_v8i1_v64i1_mask(i8 zeroext %__u, <4 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -31436,7 +31436,7 @@ entry:
define zeroext i64 @test_masked_vpcmpsged_v8i1_v64i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsged_v8i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltd (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -31444,7 +31444,7 @@ define zeroext i64 @test_masked_vpcmpsged_v8i1_v64i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -31520,14 +31520,14 @@ entry:
define zeroext i64 @test_vpcmpsged_v8i1_v64i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsged_v8i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltd (%rdi){1to8}, %ymm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v8i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -31600,7 +31600,7 @@ entry:
define zeroext i64 @test_masked_vpcmpsged_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsged_v8i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltd (%rsi){1to8}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -31608,7 +31608,7 @@ define zeroext i64 @test_masked_vpcmpsged_v8i1_v64i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -31685,14 +31685,14 @@ entry:
define zeroext i32 @test_vpcmpsged_v16i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsged_v16i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v16i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -31801,14 +31801,14 @@ entry:
define zeroext i32 @test_vpcmpsged_v16i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsged_v16i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltd (%rdi), %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v16i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -31918,7 +31918,7 @@ entry:
define zeroext i32 @test_masked_vpcmpsged_v16i1_v32i1_mask(i16 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsged_v16i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -31926,7 +31926,7 @@ define zeroext i32 @test_masked_vpcmpsged_v16i1_v32i1_mask(i16 zeroext %__u, <8
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v16i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -32038,7 +32038,7 @@ entry:
define zeroext i32 @test_masked_vpcmpsged_v16i1_v32i1_mask_mem(i16 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsged_v16i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltd (%rsi), %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -32046,7 +32046,7 @@ define zeroext i32 @test_masked_vpcmpsged_v16i1_v32i1_mask_mem(i16 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v16i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -32160,14 +32160,14 @@ entry:
define zeroext i32 @test_vpcmpsged_v16i1_v32i1_mask_mem_b(<8 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsged_v16i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltd (%rdi){1to16}, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v16i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -32278,7 +32278,7 @@ entry:
define zeroext i32 @test_masked_vpcmpsged_v16i1_v32i1_mask_mem_b(i16 zeroext %__u, <8 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsged_v16i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltd (%rsi){1to16}, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -32286,7 +32286,7 @@ define zeroext i32 @test_masked_vpcmpsged_v16i1_v32i1_mask_mem_b(i16 zeroext %__
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v16i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -32401,14 +32401,14 @@ entry:
define zeroext i64 @test_vpcmpsged_v16i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsged_v16i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v16i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -32522,14 +32522,14 @@ entry:
define zeroext i64 @test_vpcmpsged_v16i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsged_v16i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltd (%rdi), %zmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v16i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -32644,7 +32644,7 @@ entry:
define zeroext i64 @test_masked_vpcmpsged_v16i1_v64i1_mask(i16 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsged_v16i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -32652,7 +32652,7 @@ define zeroext i64 @test_masked_vpcmpsged_v16i1_v64i1_mask(i16 zeroext %__u, <8
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v16i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -32769,7 +32769,7 @@ entry:
define zeroext i64 @test_masked_vpcmpsged_v16i1_v64i1_mask_mem(i16 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsged_v16i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltd (%rsi), %zmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -32777,7 +32777,7 @@ define zeroext i64 @test_masked_vpcmpsged_v16i1_v64i1_mask_mem(i16 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v16i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -32896,14 +32896,14 @@ entry:
define zeroext i64 @test_vpcmpsged_v16i1_v64i1_mask_mem_b(<8 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsged_v16i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltd (%rdi){1to16}, %zmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v16i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -33019,7 +33019,7 @@ entry:
define zeroext i64 @test_masked_vpcmpsged_v16i1_v64i1_mask_mem_b(i16 zeroext %__u, <8 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsged_v16i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltd (%rsi){1to16}, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -33027,7 +33027,7 @@ define zeroext i64 @test_masked_vpcmpsged_v16i1_v64i1_mask_mem_b(i16 zeroext %__
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v16i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -33147,14 +33147,14 @@ entry:
define zeroext i4 @test_vpcmpsgeq_v2i1_v4i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgeq_v2i1_v4i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0
; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
; VLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v4i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
@@ -33177,14 +33177,14 @@ entry:
define zeroext i4 @test_vpcmpsgeq_v2i1_v4i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgeq_v2i1_v4i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi), %xmm0, %k0
; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
; VLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v4i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
@@ -33209,7 +33209,7 @@ entry:
define zeroext i4 @test_masked_vpcmpsgeq_v2i1_v4i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgeq_v2i1_v4i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
@@ -33217,7 +33217,7 @@ define zeroext i4 @test_masked_vpcmpsgeq_v2i1_v4i1_mask(i8 zeroext %__u, <2 x i6
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v4i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
; NoVLX-NEXT: kshiftlw $15, %k0, %k1
@@ -33251,7 +33251,7 @@ entry:
define zeroext i4 @test_masked_vpcmpsgeq_v2i1_v4i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgeq_v2i1_v4i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
@@ -33259,7 +33259,7 @@ define zeroext i4 @test_masked_vpcmpsgeq_v2i1_v4i1_mask_mem(i8 zeroext %__u, <2
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v4i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
@@ -33296,14 +33296,14 @@ entry:
define zeroext i4 @test_vpcmpsgeq_v2i1_v4i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgeq_v2i1_v4i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
; VLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v4i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
@@ -33329,7 +33329,7 @@ entry:
define zeroext i4 @test_masked_vpcmpsgeq_v2i1_v4i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgeq_v2i1_v4i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
@@ -33337,7 +33337,7 @@ define zeroext i4 @test_masked_vpcmpsgeq_v2i1_v4i1_mask_mem_b(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v4i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
@@ -33375,14 +33375,14 @@ entry:
define zeroext i8 @test_vpcmpsgeq_v2i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgeq_v2i1_v8i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v8i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
@@ -33417,14 +33417,14 @@ entry:
define zeroext i8 @test_vpcmpsgeq_v2i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgeq_v2i1_v8i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v8i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
@@ -33461,7 +33461,7 @@ entry:
define zeroext i8 @test_masked_vpcmpsgeq_v2i1_v8i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgeq_v2i1_v8i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -33469,7 +33469,7 @@ define zeroext i8 @test_masked_vpcmpsgeq_v2i1_v8i1_mask(i8 zeroext %__u, <2 x i6
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v8i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
; NoVLX-NEXT: kshiftlw $15, %k0, %k1
@@ -33515,7 +33515,7 @@ entry:
define zeroext i8 @test_masked_vpcmpsgeq_v2i1_v8i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgeq_v2i1_v8i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -33523,7 +33523,7 @@ define zeroext i8 @test_masked_vpcmpsgeq_v2i1_v8i1_mask_mem(i8 zeroext %__u, <2
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v8i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
@@ -33572,14 +33572,14 @@ entry:
define zeroext i8 @test_vpcmpsgeq_v2i1_v8i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgeq_v2i1_v8i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v8i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
@@ -33617,7 +33617,7 @@ entry:
define zeroext i8 @test_masked_vpcmpsgeq_v2i1_v8i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgeq_v2i1_v8i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -33625,7 +33625,7 @@ define zeroext i8 @test_masked_vpcmpsgeq_v2i1_v8i1_mask_mem_b(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v8i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
@@ -33675,14 +33675,14 @@ entry:
define zeroext i16 @test_vpcmpsgeq_v2i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgeq_v2i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
@@ -33716,14 +33716,14 @@ entry:
define zeroext i16 @test_vpcmpsgeq_v2i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgeq_v2i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
@@ -33759,7 +33759,7 @@ entry:
define zeroext i16 @test_masked_vpcmpsgeq_v2i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgeq_v2i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -33767,7 +33767,7 @@ define zeroext i16 @test_masked_vpcmpsgeq_v2i1_v16i1_mask(i8 zeroext %__u, <2 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
; NoVLX-NEXT: kshiftlw $15, %k0, %k1
@@ -33812,7 +33812,7 @@ entry:
define zeroext i16 @test_masked_vpcmpsgeq_v2i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgeq_v2i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -33820,7 +33820,7 @@ define zeroext i16 @test_masked_vpcmpsgeq_v2i1_v16i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
@@ -33868,14 +33868,14 @@ entry:
define zeroext i16 @test_vpcmpsgeq_v2i1_v16i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgeq_v2i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
; NoVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
@@ -33912,7 +33912,7 @@ entry:
define zeroext i16 @test_masked_vpcmpsgeq_v2i1_v16i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgeq_v2i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -33920,7 +33920,7 @@ define zeroext i16 @test_masked_vpcmpsgeq_v2i1_v16i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
; NoVLX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
; NoVLX-NEXT: kmovw %edi, %k0
@@ -33969,13 +33969,13 @@ entry:
define zeroext i32 @test_vpcmpsgeq_v2i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgeq_v2i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -34012,13 +34012,13 @@ entry:
define zeroext i32 @test_vpcmpsgeq_v2i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgeq_v2i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -34057,14 +34057,14 @@ entry:
define zeroext i32 @test_masked_vpcmpsgeq_v2i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgeq_v2i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -34112,14 +34112,14 @@ entry:
define zeroext i32 @test_masked_vpcmpsgeq_v2i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgeq_v2i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -34170,13 +34170,13 @@ entry:
define zeroext i32 @test_vpcmpsgeq_v2i1_v32i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgeq_v2i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -34216,14 +34216,14 @@ entry:
define zeroext i32 @test_masked_vpcmpsgeq_v2i1_v32i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgeq_v2i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -34275,13 +34275,13 @@ entry:
define zeroext i64 @test_vpcmpsgeq_v2i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgeq_v2i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -34324,13 +34324,13 @@ entry:
define zeroext i64 @test_vpcmpsgeq_v2i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgeq_v2i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi), %xmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -34375,14 +34375,14 @@ entry:
define zeroext i64 @test_masked_vpcmpsgeq_v2i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgeq_v2i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -34436,14 +34436,14 @@ entry:
define zeroext i64 @test_masked_vpcmpsgeq_v2i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgeq_v2i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -34500,13 +34500,13 @@ entry:
define zeroext i64 @test_vpcmpsgeq_v2i1_v64i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgeq_v2i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -34552,14 +34552,14 @@ entry:
define zeroext i64 @test_masked_vpcmpsgeq_v2i1_v64i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgeq_v2i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -34617,7 +34617,7 @@ entry:
define zeroext i8 @test_vpcmpsgeq_v4i1_v8i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgeq_v4i1_v8i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpleq %ymm0, %ymm1, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -34625,7 +34625,7 @@ define zeroext i8 @test_vpcmpsgeq_v4i1_v8i1_mask(<4 x i64> %__a, <4 x i64> %__b)
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v8i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
@@ -34677,7 +34677,7 @@ entry:
define zeroext i8 @test_vpcmpsgeq_v4i1_v8i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgeq_v4i1_v8i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -34685,7 +34685,7 @@ define zeroext i8 @test_vpcmpsgeq_v4i1_v8i1_mask_mem(<4 x i64> %__a, <4 x i64>*
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v8i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
@@ -34739,7 +34739,7 @@ entry:
define zeroext i8 @test_masked_vpcmpsgeq_v4i1_v8i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgeq_v4i1_v8i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpleq %ymm0, %ymm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -34748,7 +34748,7 @@ define zeroext i8 @test_masked_vpcmpsgeq_v4i1_v8i1_mask(i8 zeroext %__u, <4 x i6
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v8i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
@@ -34821,7 +34821,7 @@ entry:
define zeroext i8 @test_masked_vpcmpsgeq_v4i1_v8i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgeq_v4i1_v8i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -34830,7 +34830,7 @@ define zeroext i8 @test_masked_vpcmpsgeq_v4i1_v8i1_mask_mem(i8 zeroext %__u, <4
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v8i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
@@ -34906,7 +34906,7 @@ entry:
define zeroext i8 @test_vpcmpsgeq_v4i1_v8i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgeq_v4i1_v8i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -34914,7 +34914,7 @@ define zeroext i8 @test_vpcmpsgeq_v4i1_v8i1_mask_mem_b(<4 x i64> %__a, i64* %__b
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v8i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1
; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
@@ -34969,7 +34969,7 @@ entry:
define zeroext i8 @test_masked_vpcmpsgeq_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgeq_v4i1_v8i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -34978,7 +34978,7 @@ define zeroext i8 @test_masked_vpcmpsgeq_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v8i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1
; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
@@ -35055,7 +35055,7 @@ entry:
define zeroext i16 @test_vpcmpsgeq_v4i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgeq_v4i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpleq %ymm0, %ymm1, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -35063,7 +35063,7 @@ define zeroext i16 @test_vpcmpsgeq_v4i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
@@ -35114,7 +35114,7 @@ entry:
define zeroext i16 @test_vpcmpsgeq_v4i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgeq_v4i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -35122,7 +35122,7 @@ define zeroext i16 @test_vpcmpsgeq_v4i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
@@ -35175,7 +35175,7 @@ entry:
define zeroext i16 @test_masked_vpcmpsgeq_v4i1_v16i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgeq_v4i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpleq %ymm0, %ymm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -35184,7 +35184,7 @@ define zeroext i16 @test_masked_vpcmpsgeq_v4i1_v16i1_mask(i8 zeroext %__u, <4 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
@@ -35256,7 +35256,7 @@ entry:
define zeroext i16 @test_masked_vpcmpsgeq_v4i1_v16i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgeq_v4i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -35265,7 +35265,7 @@ define zeroext i16 @test_masked_vpcmpsgeq_v4i1_v16i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
@@ -35340,7 +35340,7 @@ entry:
define zeroext i16 @test_vpcmpsgeq_v4i1_v16i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgeq_v4i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -35348,7 +35348,7 @@ define zeroext i16 @test_vpcmpsgeq_v4i1_v16i1_mask_mem_b(<4 x i64> %__a, i64* %_
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1
; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
@@ -35402,7 +35402,7 @@ entry:
define zeroext i16 @test_masked_vpcmpsgeq_v4i1_v16i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgeq_v4i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -35411,7 +35411,7 @@ define zeroext i16 @test_masked_vpcmpsgeq_v4i1_v16i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1
; NoVLX-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
; NoVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
@@ -35487,14 +35487,14 @@ entry:
define zeroext i32 @test_vpcmpsgeq_v4i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgeq_v4i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpleq %ymm0, %ymm1, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -35532,14 +35532,14 @@ entry:
define zeroext i32 @test_vpcmpsgeq_v4i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgeq_v4i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -35579,7 +35579,7 @@ entry:
define zeroext i32 @test_masked_vpcmpsgeq_v4i1_v32i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgeq_v4i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpleq %ymm0, %ymm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -35587,7 +35587,7 @@ define zeroext i32 @test_masked_vpcmpsgeq_v4i1_v32i1_mask(i8 zeroext %__u, <4 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -35646,7 +35646,7 @@ entry:
define zeroext i32 @test_masked_vpcmpsgeq_v4i1_v32i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgeq_v4i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -35654,7 +35654,7 @@ define zeroext i32 @test_masked_vpcmpsgeq_v4i1_v32i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -35716,14 +35716,14 @@ entry:
define zeroext i32 @test_vpcmpsgeq_v4i1_v32i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgeq_v4i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -35764,7 +35764,7 @@ entry:
define zeroext i32 @test_masked_vpcmpsgeq_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgeq_v4i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -35772,7 +35772,7 @@ define zeroext i32 @test_masked_vpcmpsgeq_v4i1_v32i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -35835,14 +35835,14 @@ entry:
define zeroext i64 @test_vpcmpsgeq_v4i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgeq_v4i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpleq %ymm0, %ymm1, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -35886,14 +35886,14 @@ entry:
define zeroext i64 @test_vpcmpsgeq_v4i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgeq_v4i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi), %ymm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -35939,7 +35939,7 @@ entry:
define zeroext i64 @test_masked_vpcmpsgeq_v4i1_v64i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgeq_v4i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpleq %ymm0, %ymm1, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -35947,7 +35947,7 @@ define zeroext i64 @test_masked_vpcmpsgeq_v4i1_v64i1_mask(i8 zeroext %__u, <4 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -36012,7 +36012,7 @@ entry:
define zeroext i64 @test_masked_vpcmpsgeq_v4i1_v64i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgeq_v4i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -36020,7 +36020,7 @@ define zeroext i64 @test_masked_vpcmpsgeq_v4i1_v64i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -36088,14 +36088,14 @@ entry:
define zeroext i64 @test_vpcmpsgeq_v4i1_v64i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgeq_v4i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -36142,7 +36142,7 @@ entry:
define zeroext i64 @test_masked_vpcmpsgeq_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgeq_v4i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -36150,7 +36150,7 @@ define zeroext i64 @test_masked_vpcmpsgeq_v4i1_v64i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -36219,7 +36219,7 @@ entry:
define zeroext i16 @test_vpcmpsgeq_v8i1_v16i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgeq_v8i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -36227,7 +36227,7 @@ define zeroext i16 @test_vpcmpsgeq_v8i1_v16i1_mask(<8 x i64> %__a, <8 x i64> %__
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v8i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
; NoVLX-NEXT: kmovw %k0, %eax
; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -36244,7 +36244,7 @@ entry:
define zeroext i16 @test_vpcmpsgeq_v8i1_v16i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgeq_v8i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi), %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -36252,7 +36252,7 @@ define zeroext i16 @test_vpcmpsgeq_v8i1_v16i1_mask_mem(<8 x i64> %__a, <8 x i64>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v8i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpnltq (%rdi), %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -36270,7 +36270,7 @@ entry:
define zeroext i16 @test_masked_vpcmpsgeq_v8i1_v16i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgeq_v8i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -36279,7 +36279,7 @@ define zeroext i16 @test_masked_vpcmpsgeq_v8i1_v16i1_mask(i8 zeroext %__u, <8 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v8i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
@@ -36299,7 +36299,7 @@ entry:
define zeroext i16 @test_masked_vpcmpsgeq_v8i1_v16i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgeq_v8i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi), %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -36308,7 +36308,7 @@ define zeroext i16 @test_masked_vpcmpsgeq_v8i1_v16i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v8i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpnltq (%rsi), %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
@@ -36330,7 +36330,7 @@ entry:
define zeroext i16 @test_vpcmpsgeq_v8i1_v16i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgeq_v8i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi){1to8}, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -36338,7 +36338,7 @@ define zeroext i16 @test_vpcmpsgeq_v8i1_v16i1_mask_mem_b(<8 x i64> %__a, i64* %_
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v8i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpnltq (%rdi){1to8}, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -36357,7 +36357,7 @@ entry:
define zeroext i16 @test_masked_vpcmpsgeq_v8i1_v16i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgeq_v8i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi){1to8}, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -36366,7 +36366,7 @@ define zeroext i16 @test_masked_vpcmpsgeq_v8i1_v16i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v8i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpnltq (%rsi){1to8}, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
@@ -36389,14 +36389,14 @@ entry:
define zeroext i32 @test_vpcmpsgeq_v8i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgeq_v8i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v8i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -36460,14 +36460,14 @@ entry:
define zeroext i32 @test_vpcmpsgeq_v8i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgeq_v8i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi), %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v8i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -36532,7 +36532,7 @@ entry:
define zeroext i32 @test_masked_vpcmpsgeq_v8i1_v32i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgeq_v8i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -36540,7 +36540,7 @@ define zeroext i32 @test_masked_vpcmpsgeq_v8i1_v32i1_mask(i8 zeroext %__u, <8 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v8i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -36607,7 +36607,7 @@ entry:
define zeroext i32 @test_masked_vpcmpsgeq_v8i1_v32i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgeq_v8i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi), %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -36615,7 +36615,7 @@ define zeroext i32 @test_masked_vpcmpsgeq_v8i1_v32i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v8i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -36684,14 +36684,14 @@ entry:
define zeroext i32 @test_vpcmpsgeq_v8i1_v32i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgeq_v8i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi){1to8}, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v8i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -36757,7 +36757,7 @@ entry:
define zeroext i32 @test_masked_vpcmpsgeq_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgeq_v8i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi){1to8}, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -36765,7 +36765,7 @@ define zeroext i32 @test_masked_vpcmpsgeq_v8i1_v32i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v8i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -36835,14 +36835,14 @@ entry:
define zeroext i64 @test_vpcmpsgeq_v8i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgeq_v8i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v8i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -36911,14 +36911,14 @@ entry:
define zeroext i64 @test_vpcmpsgeq_v8i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgeq_v8i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi), %zmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v8i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -36988,7 +36988,7 @@ entry:
define zeroext i64 @test_masked_vpcmpsgeq_v8i1_v64i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgeq_v8i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -36996,7 +36996,7 @@ define zeroext i64 @test_masked_vpcmpsgeq_v8i1_v64i1_mask(i8 zeroext %__u, <8 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v8i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -37068,7 +37068,7 @@ entry:
define zeroext i64 @test_masked_vpcmpsgeq_v8i1_v64i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgeq_v8i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi), %zmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -37076,7 +37076,7 @@ define zeroext i64 @test_masked_vpcmpsgeq_v8i1_v64i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v8i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -37150,14 +37150,14 @@ entry:
define zeroext i64 @test_vpcmpsgeq_v8i1_v64i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpsgeq_v8i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi){1to8}, %zmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v8i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -37228,7 +37228,7 @@ entry:
define zeroext i64 @test_masked_vpcmpsgeq_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpsgeq_v8i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi){1to8}, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -37236,7 +37236,7 @@ define zeroext i64 @test_masked_vpcmpsgeq_v8i1_v64i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v8i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -37311,13 +37311,13 @@ entry:
define zeroext i32 @test_vpcmpultb_v16i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultb_v16i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltub %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultb_v16i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -37432,13 +37432,13 @@ entry:
define zeroext i32 @test_vpcmpultb_v16i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultb_v16i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltub (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultb_v16i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -37554,14 +37554,14 @@ entry:
define zeroext i32 @test_masked_vpcmpultb_v16i1_v32i1_mask(i16 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultb_v16i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltub %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultb_v16i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -37679,14 +37679,14 @@ entry:
define zeroext i32 @test_masked_vpcmpultb_v16i1_v32i1_mask_mem(i16 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultb_v16i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltub (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultb_v16i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -37806,13 +37806,13 @@ entry:
define zeroext i64 @test_vpcmpultb_v16i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultb_v16i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltub %xmm1, %xmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultb_v16i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -37932,13 +37932,13 @@ entry:
define zeroext i64 @test_vpcmpultb_v16i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultb_v16i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltub (%rdi), %xmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultb_v16i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -38059,14 +38059,14 @@ entry:
define zeroext i64 @test_masked_vpcmpultb_v16i1_v64i1_mask(i16 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultb_v16i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltub %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultb_v16i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -38189,14 +38189,14 @@ entry:
define zeroext i64 @test_masked_vpcmpultb_v16i1_v64i1_mask_mem(i16 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultb_v16i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltub (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultb_v16i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -38321,14 +38321,14 @@ entry:
define zeroext i64 @test_vpcmpultb_v32i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultb_v32i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltub %ymm1, %ymm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultb_v32i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -38371,14 +38371,14 @@ entry:
define zeroext i64 @test_vpcmpultb_v32i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultb_v32i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltub (%rdi), %ymm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultb_v32i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -38422,7 +38422,7 @@ entry:
define zeroext i64 @test_masked_vpcmpultb_v32i1_v64i1_mask(i32 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultb_v32i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltub %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -38430,7 +38430,7 @@ define zeroext i64 @test_masked_vpcmpultb_v32i1_v64i1_mask(i32 zeroext %__u, <4
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultb_v32i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -38484,7 +38484,7 @@ entry:
define zeroext i64 @test_masked_vpcmpultb_v32i1_v64i1_mask_mem(i32 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultb_v32i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltub (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -38492,7 +38492,7 @@ define zeroext i64 @test_masked_vpcmpultb_v32i1_v64i1_mask_mem(i32 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultb_v32i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -38548,14 +38548,14 @@ entry:
define zeroext i16 @test_vpcmpultw_v8i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultw_v8i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuw %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultw_v8i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vmovdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
@@ -38578,14 +38578,14 @@ entry:
define zeroext i16 @test_vpcmpultw_v8i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultw_v8i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuw (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultw_v8i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vpxor (%rdi), %xmm1, %xmm1
@@ -38609,7 +38609,7 @@ entry:
define zeroext i16 @test_masked_vpcmpultw_v8i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultw_v8i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuw %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -38617,7 +38617,7 @@ define zeroext i16 @test_masked_vpcmpultw_v8i1_v16i1_mask(i8 zeroext %__u, <2 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultw_v8i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vmovdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
@@ -38643,7 +38643,7 @@ entry:
define zeroext i16 @test_masked_vpcmpultw_v8i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultw_v8i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuw (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -38651,7 +38651,7 @@ define zeroext i16 @test_masked_vpcmpultw_v8i1_v16i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultw_v8i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vpxor (%rsi), %xmm1, %xmm1
@@ -38679,13 +38679,13 @@ entry:
define zeroext i32 @test_vpcmpultw_v8i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultw_v8i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuw %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultw_v8i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -38755,13 +38755,13 @@ entry:
define zeroext i32 @test_vpcmpultw_v8i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultw_v8i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuw (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultw_v8i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -38832,14 +38832,14 @@ entry:
define zeroext i32 @test_masked_vpcmpultw_v8i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultw_v8i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuw %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultw_v8i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -38912,14 +38912,14 @@ entry:
define zeroext i32 @test_masked_vpcmpultw_v8i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultw_v8i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuw (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultw_v8i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -38994,13 +38994,13 @@ entry:
define zeroext i64 @test_vpcmpultw_v8i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultw_v8i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuw %xmm1, %xmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultw_v8i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -39075,13 +39075,13 @@ entry:
define zeroext i64 @test_vpcmpultw_v8i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultw_v8i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuw (%rdi), %xmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultw_v8i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -39157,14 +39157,14 @@ entry:
define zeroext i64 @test_masked_vpcmpultw_v8i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultw_v8i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuw %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultw_v8i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -39242,14 +39242,14 @@ entry:
define zeroext i64 @test_masked_vpcmpultw_v8i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultw_v8i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuw (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultw_v8i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -39329,14 +39329,14 @@ entry:
define zeroext i32 @test_vpcmpultw_v16i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultw_v16i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuw %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultw_v16i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -39451,14 +39451,14 @@ entry:
define zeroext i32 @test_vpcmpultw_v16i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultw_v16i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuw (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultw_v16i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -39574,7 +39574,7 @@ entry:
define zeroext i32 @test_masked_vpcmpultw_v16i1_v32i1_mask(i16 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultw_v16i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuw %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -39582,7 +39582,7 @@ define zeroext i32 @test_masked_vpcmpultw_v16i1_v32i1_mask(i16 zeroext %__u, <4
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultw_v16i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -39700,7 +39700,7 @@ entry:
define zeroext i32 @test_masked_vpcmpultw_v16i1_v32i1_mask_mem(i16 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultw_v16i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuw (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -39708,7 +39708,7 @@ define zeroext i32 @test_masked_vpcmpultw_v16i1_v32i1_mask_mem(i16 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultw_v16i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -39828,14 +39828,14 @@ entry:
define zeroext i64 @test_vpcmpultw_v16i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultw_v16i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuw %ymm1, %ymm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultw_v16i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -39955,14 +39955,14 @@ entry:
define zeroext i64 @test_vpcmpultw_v16i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultw_v16i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuw (%rdi), %ymm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultw_v16i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -40083,7 +40083,7 @@ entry:
define zeroext i64 @test_masked_vpcmpultw_v16i1_v64i1_mask(i16 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultw_v16i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuw %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -40091,7 +40091,7 @@ define zeroext i64 @test_masked_vpcmpultw_v16i1_v64i1_mask(i16 zeroext %__u, <4
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultw_v16i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -40214,7 +40214,7 @@ entry:
define zeroext i64 @test_masked_vpcmpultw_v16i1_v64i1_mask_mem(i16 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultw_v16i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuw (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -40222,7 +40222,7 @@ define zeroext i64 @test_masked_vpcmpultw_v16i1_v64i1_mask_mem(i16 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultw_v16i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -40347,14 +40347,14 @@ entry:
define zeroext i64 @test_vpcmpultw_v32i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultw_v32i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuw %zmm1, %zmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultw_v32i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -40701,14 +40701,14 @@ entry:
define zeroext i64 @test_vpcmpultw_v32i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultw_v32i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuw (%rdi), %zmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultw_v32i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -40971,7 +40971,7 @@ entry:
define zeroext i64 @test_masked_vpcmpultw_v32i1_v64i1_mask(i32 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultw_v32i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuw %zmm1, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -40979,7 +40979,7 @@ define zeroext i64 @test_masked_vpcmpultw_v32i1_v64i1_mask(i32 zeroext %__u, <8
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultw_v32i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -41337,7 +41337,7 @@ entry:
define zeroext i64 @test_masked_vpcmpultw_v32i1_v64i1_mask_mem(i32 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultw_v32i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuw (%rsi), %zmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -41345,7 +41345,7 @@ define zeroext i64 @test_masked_vpcmpultw_v32i1_v64i1_mask_mem(i32 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultw_v32i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -41620,14 +41620,14 @@ entry:
define zeroext i8 @test_vpcmpultd_v4i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultd_v4i1_v8i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v4i1_v8i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
@@ -41679,14 +41679,14 @@ entry:
define zeroext i8 @test_vpcmpultd_v4i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultd_v4i1_v8i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v4i1_v8i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [2147483648,2147483648,2147483648,2147483648]
; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vpxor (%rdi), %xmm1, %xmm1
@@ -41739,7 +41739,7 @@ entry:
define zeroext i8 @test_masked_vpcmpultd_v4i1_v8i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultd_v4i1_v8i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -41747,7 +41747,7 @@ define zeroext i8 @test_masked_vpcmpultd_v4i1_v8i1_mask(i8 zeroext %__u, <2 x i6
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v8i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
@@ -41820,7 +41820,7 @@ entry:
define zeroext i8 @test_masked_vpcmpultd_v4i1_v8i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultd_v4i1_v8i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -41828,7 +41828,7 @@ define zeroext i8 @test_masked_vpcmpultd_v4i1_v8i1_mask_mem(i8 zeroext %__u, <2
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v8i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [2147483648,2147483648,2147483648,2147483648]
; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vpxor (%rsi), %xmm1, %xmm1
@@ -41903,14 +41903,14 @@ entry:
define zeroext i8 @test_vpcmpultd_v4i1_v8i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultd_v4i1_v8i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v4i1_v8i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1
; NoVLX-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
@@ -41965,7 +41965,7 @@ entry:
define zeroext i8 @test_masked_vpcmpultd_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultd_v4i1_v8i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -41973,7 +41973,7 @@ define zeroext i8 @test_masked_vpcmpultd_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v8i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1
; NoVLX-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
@@ -42050,14 +42050,14 @@ entry:
define zeroext i16 @test_vpcmpultd_v4i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultd_v4i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v4i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
@@ -42108,14 +42108,14 @@ entry:
define zeroext i16 @test_vpcmpultd_v4i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultd_v4i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v4i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [2147483648,2147483648,2147483648,2147483648]
; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vpxor (%rdi), %xmm1, %xmm1
@@ -42167,7 +42167,7 @@ entry:
define zeroext i16 @test_masked_vpcmpultd_v4i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultd_v4i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -42175,7 +42175,7 @@ define zeroext i16 @test_masked_vpcmpultd_v4i1_v16i1_mask(i8 zeroext %__u, <2 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
@@ -42247,7 +42247,7 @@ entry:
define zeroext i16 @test_masked_vpcmpultd_v4i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultd_v4i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -42255,7 +42255,7 @@ define zeroext i16 @test_masked_vpcmpultd_v4i1_v16i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [2147483648,2147483648,2147483648,2147483648]
; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vpxor (%rsi), %xmm1, %xmm1
@@ -42329,14 +42329,14 @@ entry:
define zeroext i16 @test_vpcmpultd_v4i1_v16i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultd_v4i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v4i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1
; NoVLX-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
@@ -42390,7 +42390,7 @@ entry:
define zeroext i16 @test_masked_vpcmpultd_v4i1_v16i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultd_v4i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -42398,7 +42398,7 @@ define zeroext i16 @test_masked_vpcmpultd_v4i1_v16i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1
; NoVLX-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
@@ -42474,13 +42474,13 @@ entry:
define zeroext i32 @test_vpcmpultd_v4i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultd_v4i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v4i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -42518,13 +42518,13 @@ entry:
define zeroext i32 @test_vpcmpultd_v4i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultd_v4i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v4i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -42563,14 +42563,14 @@ entry:
define zeroext i32 @test_masked_vpcmpultd_v4i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultd_v4i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -42629,14 +42629,14 @@ entry:
define zeroext i32 @test_masked_vpcmpultd_v4i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultd_v4i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -42697,13 +42697,13 @@ entry:
define zeroext i32 @test_vpcmpultd_v4i1_v32i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultd_v4i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v4i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -42744,14 +42744,14 @@ entry:
define zeroext i32 @test_masked_vpcmpultd_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultd_v4i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -42814,13 +42814,13 @@ entry:
define zeroext i64 @test_vpcmpultd_v4i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultd_v4i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud %xmm1, %xmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v4i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -42864,13 +42864,13 @@ entry:
define zeroext i64 @test_vpcmpultd_v4i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultd_v4i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud (%rdi), %xmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v4i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -42915,14 +42915,14 @@ entry:
define zeroext i64 @test_masked_vpcmpultd_v4i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultd_v4i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -42987,14 +42987,14 @@ entry:
define zeroext i64 @test_masked_vpcmpultd_v4i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultd_v4i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -43061,13 +43061,13 @@ entry:
define zeroext i64 @test_vpcmpultd_v4i1_v64i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultd_v4i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v4i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -43114,14 +43114,14 @@ entry:
define zeroext i64 @test_masked_vpcmpultd_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultd_v4i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -43190,7 +43190,7 @@ entry:
define zeroext i16 @test_vpcmpultd_v8i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultd_v8i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -43198,7 +43198,7 @@ define zeroext i16 @test_vpcmpultd_v8i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v8i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
@@ -43219,7 +43219,7 @@ entry:
define zeroext i16 @test_vpcmpultd_v8i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultd_v8i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -43227,7 +43227,7 @@ define zeroext i16 @test_vpcmpultd_v8i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v8i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
@@ -43249,7 +43249,7 @@ entry:
define zeroext i16 @test_masked_vpcmpultd_v8i1_v16i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultd_v8i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -43258,7 +43258,7 @@ define zeroext i16 @test_masked_vpcmpultd_v8i1_v16i1_mask(i8 zeroext %__u, <4 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: kmovw %edi, %k1
@@ -43282,7 +43282,7 @@ entry:
define zeroext i16 @test_masked_vpcmpultd_v8i1_v16i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultd_v8i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -43291,7 +43291,7 @@ define zeroext i16 @test_masked_vpcmpultd_v8i1_v16i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
@@ -43317,7 +43317,7 @@ entry:
define zeroext i16 @test_vpcmpultd_v8i1_v16i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultd_v8i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud (%rdi){1to8}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -43325,7 +43325,7 @@ define zeroext i16 @test_vpcmpultd_v8i1_v16i1_mask_mem_b(<4 x i64> %__a, i32* %_
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v8i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
@@ -43348,7 +43348,7 @@ entry:
define zeroext i16 @test_masked_vpcmpultd_v8i1_v16i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultd_v8i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud (%rsi){1to8}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -43357,7 +43357,7 @@ define zeroext i16 @test_masked_vpcmpultd_v8i1_v16i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
@@ -43384,14 +43384,14 @@ entry:
define zeroext i32 @test_vpcmpultd_v8i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultd_v8i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v8i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -43457,14 +43457,14 @@ entry:
define zeroext i32 @test_vpcmpultd_v8i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultd_v8i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v8i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -43531,7 +43531,7 @@ entry:
define zeroext i32 @test_masked_vpcmpultd_v8i1_v32i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultd_v8i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -43539,7 +43539,7 @@ define zeroext i32 @test_masked_vpcmpultd_v8i1_v32i1_mask(i8 zeroext %__u, <4 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -43608,7 +43608,7 @@ entry:
define zeroext i32 @test_masked_vpcmpultd_v8i1_v32i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultd_v8i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -43616,7 +43616,7 @@ define zeroext i32 @test_masked_vpcmpultd_v8i1_v32i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -43687,14 +43687,14 @@ entry:
define zeroext i32 @test_vpcmpultd_v8i1_v32i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultd_v8i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud (%rdi){1to8}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v8i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -43762,7 +43762,7 @@ entry:
define zeroext i32 @test_masked_vpcmpultd_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultd_v8i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud (%rsi){1to8}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -43770,7 +43770,7 @@ define zeroext i32 @test_masked_vpcmpultd_v8i1_v32i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -43842,14 +43842,14 @@ entry:
define zeroext i64 @test_vpcmpultd_v8i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultd_v8i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud %ymm1, %ymm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v8i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -43920,14 +43920,14 @@ entry:
define zeroext i64 @test_vpcmpultd_v8i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultd_v8i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud (%rdi), %ymm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v8i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -43999,7 +43999,7 @@ entry:
define zeroext i64 @test_masked_vpcmpultd_v8i1_v64i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultd_v8i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -44007,7 +44007,7 @@ define zeroext i64 @test_masked_vpcmpultd_v8i1_v64i1_mask(i8 zeroext %__u, <4 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -44081,7 +44081,7 @@ entry:
define zeroext i64 @test_masked_vpcmpultd_v8i1_v64i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultd_v8i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -44089,7 +44089,7 @@ define zeroext i64 @test_masked_vpcmpultd_v8i1_v64i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -44165,14 +44165,14 @@ entry:
define zeroext i64 @test_vpcmpultd_v8i1_v64i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultd_v8i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud (%rdi){1to8}, %ymm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v8i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -44245,7 +44245,7 @@ entry:
define zeroext i64 @test_masked_vpcmpultd_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultd_v8i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud (%rsi){1to8}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -44253,7 +44253,7 @@ define zeroext i64 @test_masked_vpcmpultd_v8i1_v64i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -44330,14 +44330,14 @@ entry:
define zeroext i32 @test_vpcmpultd_v16i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultd_v16i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v16i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -44446,14 +44446,14 @@ entry:
define zeroext i32 @test_vpcmpultd_v16i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultd_v16i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud (%rdi), %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v16i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -44563,7 +44563,7 @@ entry:
define zeroext i32 @test_masked_vpcmpultd_v16i1_v32i1_mask(i16 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultd_v16i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -44571,7 +44571,7 @@ define zeroext i32 @test_masked_vpcmpultd_v16i1_v32i1_mask(i16 zeroext %__u, <8
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v16i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -44683,7 +44683,7 @@ entry:
define zeroext i32 @test_masked_vpcmpultd_v16i1_v32i1_mask_mem(i16 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultd_v16i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud (%rsi), %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -44691,7 +44691,7 @@ define zeroext i32 @test_masked_vpcmpultd_v16i1_v32i1_mask_mem(i16 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v16i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -44805,14 +44805,14 @@ entry:
define zeroext i32 @test_vpcmpultd_v16i1_v32i1_mask_mem_b(<8 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultd_v16i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud (%rdi){1to16}, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v16i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -44923,7 +44923,7 @@ entry:
define zeroext i32 @test_masked_vpcmpultd_v16i1_v32i1_mask_mem_b(i16 zeroext %__u, <8 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultd_v16i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud (%rsi){1to16}, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -44931,7 +44931,7 @@ define zeroext i32 @test_masked_vpcmpultd_v16i1_v32i1_mask_mem_b(i16 zeroext %__
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v16i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -45046,14 +45046,14 @@ entry:
define zeroext i64 @test_vpcmpultd_v16i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultd_v16i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v16i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -45167,14 +45167,14 @@ entry:
define zeroext i64 @test_vpcmpultd_v16i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultd_v16i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud (%rdi), %zmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v16i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -45289,7 +45289,7 @@ entry:
define zeroext i64 @test_masked_vpcmpultd_v16i1_v64i1_mask(i16 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultd_v16i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -45297,7 +45297,7 @@ define zeroext i64 @test_masked_vpcmpultd_v16i1_v64i1_mask(i16 zeroext %__u, <8
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v16i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -45414,7 +45414,7 @@ entry:
define zeroext i64 @test_masked_vpcmpultd_v16i1_v64i1_mask_mem(i16 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultd_v16i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud (%rsi), %zmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -45422,7 +45422,7 @@ define zeroext i64 @test_masked_vpcmpultd_v16i1_v64i1_mask_mem(i16 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v16i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -45541,14 +45541,14 @@ entry:
define zeroext i64 @test_vpcmpultd_v16i1_v64i1_mask_mem_b(<8 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultd_v16i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud (%rdi){1to16}, %zmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v16i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -45664,7 +45664,7 @@ entry:
define zeroext i64 @test_masked_vpcmpultd_v16i1_v64i1_mask_mem_b(i16 zeroext %__u, <8 x i64> %__a, i32* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultd_v16i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud (%rsi){1to16}, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -45672,7 +45672,7 @@ define zeroext i64 @test_masked_vpcmpultd_v16i1_v64i1_mask_mem_b(i16 zeroext %__
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v16i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -45792,14 +45792,14 @@ entry:
define zeroext i4 @test_vpcmpultq_v2i1_v4i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultq_v2i1_v4i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq %xmm1, %xmm0, %k0
; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
; VLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v2i1_v4i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
@@ -45823,14 +45823,14 @@ entry:
define zeroext i4 @test_vpcmpultq_v2i1_v4i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultq_v2i1_v4i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi), %xmm0, %k0
; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
; VLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v2i1_v4i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vmovdqa {{.*#+}} xmm1 = [9223372036854775808,9223372036854775808]
; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vpxor (%rdi), %xmm1, %xmm1
@@ -45855,7 +45855,7 @@ entry:
define zeroext i4 @test_masked_vpcmpultq_v2i1_v4i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultq_v2i1_v4i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
@@ -45863,7 +45863,7 @@ define zeroext i4 @test_masked_vpcmpultq_v2i1_v4i1_mask(i8 zeroext %__u, <2 x i6
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v4i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
@@ -45900,7 +45900,7 @@ entry:
define zeroext i4 @test_masked_vpcmpultq_v2i1_v4i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultq_v2i1_v4i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
@@ -45908,7 +45908,7 @@ define zeroext i4 @test_masked_vpcmpultq_v2i1_v4i1_mask_mem(i8 zeroext %__u, <2
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v4i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vmovdqa {{.*#+}} xmm1 = [9223372036854775808,9223372036854775808]
; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vpxor (%rsi), %xmm1, %xmm1
@@ -45947,14 +45947,14 @@ entry:
define zeroext i4 @test_vpcmpultq_v2i1_v4i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultq_v2i1_v4i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
; VLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v2i1_v4i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
; NoVLX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
@@ -45981,7 +45981,7 @@ entry:
define zeroext i4 @test_masked_vpcmpultq_v2i1_v4i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultq_v2i1_v4i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
@@ -45989,7 +45989,7 @@ define zeroext i4 @test_masked_vpcmpultq_v2i1_v4i1_mask_mem_b(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v4i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
; NoVLX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
@@ -46030,14 +46030,14 @@ entry:
define zeroext i8 @test_vpcmpultq_v2i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultq_v2i1_v8i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v2i1_v8i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
@@ -46073,14 +46073,14 @@ entry:
define zeroext i8 @test_vpcmpultq_v2i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultq_v2i1_v8i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v2i1_v8i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vmovdqa {{.*#+}} xmm1 = [9223372036854775808,9223372036854775808]
; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vpxor (%rdi), %xmm1, %xmm1
@@ -46117,7 +46117,7 @@ entry:
define zeroext i8 @test_masked_vpcmpultq_v2i1_v8i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultq_v2i1_v8i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -46125,7 +46125,7 @@ define zeroext i8 @test_masked_vpcmpultq_v2i1_v8i1_mask(i8 zeroext %__u, <2 x i6
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v8i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
@@ -46174,7 +46174,7 @@ entry:
define zeroext i8 @test_masked_vpcmpultq_v2i1_v8i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultq_v2i1_v8i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -46182,7 +46182,7 @@ define zeroext i8 @test_masked_vpcmpultq_v2i1_v8i1_mask_mem(i8 zeroext %__u, <2
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v8i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vmovdqa {{.*#+}} xmm1 = [9223372036854775808,9223372036854775808]
; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vpxor (%rsi), %xmm1, %xmm1
@@ -46233,14 +46233,14 @@ entry:
define zeroext i8 @test_vpcmpultq_v2i1_v8i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultq_v2i1_v8i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v2i1_v8i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
; NoVLX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
@@ -46279,7 +46279,7 @@ entry:
define zeroext i8 @test_masked_vpcmpultq_v2i1_v8i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultq_v2i1_v8i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -46287,7 +46287,7 @@ define zeroext i8 @test_masked_vpcmpultq_v2i1_v8i1_mask_mem_b(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v8i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
; NoVLX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
@@ -46340,14 +46340,14 @@ entry:
define zeroext i16 @test_vpcmpultq_v2i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultq_v2i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v2i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
@@ -46382,14 +46382,14 @@ entry:
define zeroext i16 @test_vpcmpultq_v2i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultq_v2i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v2i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vmovdqa {{.*#+}} xmm1 = [9223372036854775808,9223372036854775808]
; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vpxor (%rdi), %xmm1, %xmm1
@@ -46425,7 +46425,7 @@ entry:
define zeroext i16 @test_masked_vpcmpultq_v2i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultq_v2i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -46433,7 +46433,7 @@ define zeroext i16 @test_masked_vpcmpultq_v2i1_v16i1_mask(i8 zeroext %__u, <2 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
; NoVLX-NEXT: vpxor %xmm2, %xmm1, %xmm1
@@ -46481,7 +46481,7 @@ entry:
define zeroext i16 @test_masked_vpcmpultq_v2i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultq_v2i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -46489,7 +46489,7 @@ define zeroext i16 @test_masked_vpcmpultq_v2i1_v16i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vmovdqa {{.*#+}} xmm1 = [9223372036854775808,9223372036854775808]
; NoVLX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vpxor (%rsi), %xmm1, %xmm1
@@ -46539,14 +46539,14 @@ entry:
define zeroext i16 @test_vpcmpultq_v2i1_v16i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultq_v2i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v2i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
; NoVLX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
@@ -46584,7 +46584,7 @@ entry:
define zeroext i16 @test_masked_vpcmpultq_v2i1_v16i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultq_v2i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -46592,7 +46592,7 @@ define zeroext i16 @test_masked_vpcmpultq_v2i1_v16i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
; NoVLX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; NoVLX-NEXT: vpxor %xmm2, %xmm0, %xmm0
@@ -46644,13 +46644,13 @@ entry:
define zeroext i32 @test_vpcmpultq_v2i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultq_v2i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v2i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -46688,13 +46688,13 @@ entry:
define zeroext i32 @test_vpcmpultq_v2i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultq_v2i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v2i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -46733,14 +46733,14 @@ entry:
define zeroext i32 @test_masked_vpcmpultq_v2i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultq_v2i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -46791,14 +46791,14 @@ entry:
define zeroext i32 @test_masked_vpcmpultq_v2i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultq_v2i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -46851,13 +46851,13 @@ entry:
define zeroext i32 @test_vpcmpultq_v2i1_v32i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultq_v2i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v2i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -46898,14 +46898,14 @@ entry:
define zeroext i32 @test_masked_vpcmpultq_v2i1_v32i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultq_v2i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -46960,13 +46960,13 @@ entry:
define zeroext i64 @test_vpcmpultq_v2i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultq_v2i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq %xmm1, %xmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v2i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -47010,13 +47010,13 @@ entry:
define zeroext i64 @test_vpcmpultq_v2i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultq_v2i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi), %xmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v2i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -47061,14 +47061,14 @@ entry:
define zeroext i64 @test_masked_vpcmpultq_v2i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultq_v2i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -47125,14 +47125,14 @@ entry:
define zeroext i64 @test_masked_vpcmpultq_v2i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultq_v2i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -47191,13 +47191,13 @@ entry:
define zeroext i64 @test_vpcmpultq_v2i1_v64i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultq_v2i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v2i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -47244,14 +47244,14 @@ entry:
define zeroext i64 @test_masked_vpcmpultq_v2i1_v64i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultq_v2i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -47312,7 +47312,7 @@ entry:
define zeroext i8 @test_vpcmpultq_v4i1_v8i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultq_v4i1_v8i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -47320,7 +47320,7 @@ define zeroext i8 @test_vpcmpultq_v4i1_v8i1_mask(<4 x i64> %__a, <4 x i64> %__b)
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v4i1_v8i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
; NoVLX-NEXT: vpxor %ymm2, %ymm0, %ymm0
; NoVLX-NEXT: vpxor %ymm2, %ymm1, %ymm1
@@ -47373,7 +47373,7 @@ entry:
define zeroext i8 @test_vpcmpultq_v4i1_v8i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultq_v4i1_v8i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -47381,7 +47381,7 @@ define zeroext i8 @test_vpcmpultq_v4i1_v8i1_mask_mem(<4 x i64> %__a, <4 x i64>*
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v4i1_v8i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq {{.*#+}} ymm1 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
; NoVLX-NEXT: vpxor (%rdi), %ymm1, %ymm1
@@ -47435,7 +47435,7 @@ entry:
define zeroext i8 @test_masked_vpcmpultq_v4i1_v8i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultq_v4i1_v8i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -47444,7 +47444,7 @@ define zeroext i8 @test_masked_vpcmpultq_v4i1_v8i1_mask(i8 zeroext %__u, <4 x i6
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v8i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
; NoVLX-NEXT: vpxor %ymm2, %ymm0, %ymm0
; NoVLX-NEXT: vpxor %ymm2, %ymm1, %ymm1
@@ -47518,7 +47518,7 @@ entry:
define zeroext i8 @test_masked_vpcmpultq_v4i1_v8i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultq_v4i1_v8i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -47527,7 +47527,7 @@ define zeroext i8 @test_masked_vpcmpultq_v4i1_v8i1_mask_mem(i8 zeroext %__u, <4
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v8i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq {{.*#+}} ymm1 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
; NoVLX-NEXT: vpxor (%rsi), %ymm1, %ymm1
@@ -47603,7 +47603,7 @@ entry:
define zeroext i8 @test_vpcmpultq_v4i1_v8i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultq_v4i1_v8i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -47611,7 +47611,7 @@ define zeroext i8 @test_vpcmpultq_v4i1_v8i1_mask_mem_b(<4 x i64> %__a, i64* %__b
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v4i1_v8i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1
; NoVLX-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
; NoVLX-NEXT: vpxor %ymm2, %ymm0, %ymm0
@@ -47667,7 +47667,7 @@ entry:
define zeroext i8 @test_masked_vpcmpultq_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultq_v4i1_v8i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -47676,7 +47676,7 @@ define zeroext i8 @test_masked_vpcmpultq_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v8i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1
; NoVLX-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
; NoVLX-NEXT: vpxor %ymm2, %ymm0, %ymm0
@@ -47754,7 +47754,7 @@ entry:
define zeroext i16 @test_vpcmpultq_v4i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultq_v4i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -47762,7 +47762,7 @@ define zeroext i16 @test_vpcmpultq_v4i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v4i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
; NoVLX-NEXT: vpxor %ymm2, %ymm0, %ymm0
; NoVLX-NEXT: vpxor %ymm2, %ymm1, %ymm1
@@ -47814,7 +47814,7 @@ entry:
define zeroext i16 @test_vpcmpultq_v4i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultq_v4i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -47822,7 +47822,7 @@ define zeroext i16 @test_vpcmpultq_v4i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v4i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq {{.*#+}} ymm1 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
; NoVLX-NEXT: vpxor (%rdi), %ymm1, %ymm1
@@ -47875,7 +47875,7 @@ entry:
define zeroext i16 @test_masked_vpcmpultq_v4i1_v16i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultq_v4i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -47884,7 +47884,7 @@ define zeroext i16 @test_masked_vpcmpultq_v4i1_v16i1_mask(i8 zeroext %__u, <4 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
; NoVLX-NEXT: vpxor %ymm2, %ymm0, %ymm0
; NoVLX-NEXT: vpxor %ymm2, %ymm1, %ymm1
@@ -47957,7 +47957,7 @@ entry:
define zeroext i16 @test_masked_vpcmpultq_v4i1_v16i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultq_v4i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -47966,7 +47966,7 @@ define zeroext i16 @test_masked_vpcmpultq_v4i1_v16i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq {{.*#+}} ymm1 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
; NoVLX-NEXT: vpxor %ymm1, %ymm0, %ymm0
; NoVLX-NEXT: vpxor (%rsi), %ymm1, %ymm1
@@ -48041,7 +48041,7 @@ entry:
define zeroext i16 @test_vpcmpultq_v4i1_v16i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultq_v4i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -48049,7 +48049,7 @@ define zeroext i16 @test_vpcmpultq_v4i1_v16i1_mask_mem_b(<4 x i64> %__a, i64* %_
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v4i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1
; NoVLX-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
; NoVLX-NEXT: vpxor %ymm2, %ymm0, %ymm0
@@ -48104,7 +48104,7 @@ entry:
define zeroext i16 @test_masked_vpcmpultq_v4i1_v16i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultq_v4i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -48113,7 +48113,7 @@ define zeroext i16 @test_masked_vpcmpultq_v4i1_v16i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1
; NoVLX-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
; NoVLX-NEXT: vpxor %ymm2, %ymm0, %ymm0
@@ -48190,14 +48190,14 @@ entry:
define zeroext i32 @test_vpcmpultq_v4i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultq_v4i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v4i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -48236,14 +48236,14 @@ entry:
define zeroext i32 @test_vpcmpultq_v4i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultq_v4i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v4i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -48283,7 +48283,7 @@ entry:
define zeroext i32 @test_masked_vpcmpultq_v4i1_v32i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultq_v4i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -48291,7 +48291,7 @@ define zeroext i32 @test_masked_vpcmpultq_v4i1_v32i1_mask(i8 zeroext %__u, <4 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -48351,7 +48351,7 @@ entry:
define zeroext i32 @test_masked_vpcmpultq_v4i1_v32i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultq_v4i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -48359,7 +48359,7 @@ define zeroext i32 @test_masked_vpcmpultq_v4i1_v32i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -48421,14 +48421,14 @@ entry:
define zeroext i32 @test_vpcmpultq_v4i1_v32i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultq_v4i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v4i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -48470,7 +48470,7 @@ entry:
define zeroext i32 @test_masked_vpcmpultq_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultq_v4i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -48478,7 +48478,7 @@ define zeroext i32 @test_masked_vpcmpultq_v4i1_v32i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -48542,14 +48542,14 @@ entry:
define zeroext i64 @test_vpcmpultq_v4i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultq_v4i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq %ymm1, %ymm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v4i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -48594,14 +48594,14 @@ entry:
define zeroext i64 @test_vpcmpultq_v4i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultq_v4i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi), %ymm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v4i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -48647,7 +48647,7 @@ entry:
define zeroext i64 @test_masked_vpcmpultq_v4i1_v64i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultq_v4i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -48655,7 +48655,7 @@ define zeroext i64 @test_masked_vpcmpultq_v4i1_v64i1_mask(i8 zeroext %__u, <4 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -48721,7 +48721,7 @@ entry:
define zeroext i64 @test_masked_vpcmpultq_v4i1_v64i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultq_v4i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -48729,7 +48729,7 @@ define zeroext i64 @test_masked_vpcmpultq_v4i1_v64i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -48797,14 +48797,14 @@ entry:
define zeroext i64 @test_vpcmpultq_v4i1_v64i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultq_v4i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v4i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -48852,7 +48852,7 @@ entry:
define zeroext i64 @test_masked_vpcmpultq_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultq_v4i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -48860,7 +48860,7 @@ define zeroext i64 @test_masked_vpcmpultq_v4i1_v64i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -48930,7 +48930,7 @@ entry:
define zeroext i16 @test_vpcmpultq_v8i1_v16i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultq_v8i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -48938,7 +48938,7 @@ define zeroext i16 @test_vpcmpultq_v8i1_v16i1_mask(<8 x i64> %__a, <8 x i64> %__
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v8i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -48955,7 +48955,7 @@ entry:
define zeroext i16 @test_vpcmpultq_v8i1_v16i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultq_v8i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi), %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -48963,7 +48963,7 @@ define zeroext i16 @test_vpcmpultq_v8i1_v16i1_mask_mem(<8 x i64> %__a, <8 x i64>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v8i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpltuq (%rdi), %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -48981,7 +48981,7 @@ entry:
define zeroext i16 @test_masked_vpcmpultq_v8i1_v16i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultq_v8i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -48990,7 +48990,7 @@ define zeroext i16 @test_masked_vpcmpultq_v8i1_v16i1_mask(i8 zeroext %__u, <8 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v8i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
@@ -49010,7 +49010,7 @@ entry:
define zeroext i16 @test_masked_vpcmpultq_v8i1_v16i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultq_v8i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi), %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -49019,7 +49019,7 @@ define zeroext i16 @test_masked_vpcmpultq_v8i1_v16i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v8i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltuq (%rsi), %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
@@ -49041,7 +49041,7 @@ entry:
define zeroext i16 @test_vpcmpultq_v8i1_v16i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultq_v8i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi){1to8}, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -49049,7 +49049,7 @@ define zeroext i16 @test_vpcmpultq_v8i1_v16i1_mask_mem_b(<8 x i64> %__a, i64* %_
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v8i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpltuq (%rdi){1to8}, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -49068,7 +49068,7 @@ entry:
define zeroext i16 @test_masked_vpcmpultq_v8i1_v16i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultq_v8i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi){1to8}, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -49077,7 +49077,7 @@ define zeroext i16 @test_masked_vpcmpultq_v8i1_v16i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v8i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltuq (%rsi){1to8}, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
@@ -49100,14 +49100,14 @@ entry:
define zeroext i32 @test_vpcmpultq_v8i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultq_v8i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v8i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -49171,14 +49171,14 @@ entry:
define zeroext i32 @test_vpcmpultq_v8i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultq_v8i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi), %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v8i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -49243,7 +49243,7 @@ entry:
define zeroext i32 @test_masked_vpcmpultq_v8i1_v32i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultq_v8i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -49251,7 +49251,7 @@ define zeroext i32 @test_masked_vpcmpultq_v8i1_v32i1_mask(i8 zeroext %__u, <8 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v8i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -49318,7 +49318,7 @@ entry:
define zeroext i32 @test_masked_vpcmpultq_v8i1_v32i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultq_v8i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi), %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -49326,7 +49326,7 @@ define zeroext i32 @test_masked_vpcmpultq_v8i1_v32i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v8i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -49395,14 +49395,14 @@ entry:
define zeroext i32 @test_vpcmpultq_v8i1_v32i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultq_v8i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi){1to8}, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v8i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -49468,7 +49468,7 @@ entry:
define zeroext i32 @test_masked_vpcmpultq_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultq_v8i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi){1to8}, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -49476,7 +49476,7 @@ define zeroext i32 @test_masked_vpcmpultq_v8i1_v32i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v8i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -49546,14 +49546,14 @@ entry:
define zeroext i64 @test_vpcmpultq_v8i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultq_v8i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v8i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -49622,14 +49622,14 @@ entry:
define zeroext i64 @test_vpcmpultq_v8i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultq_v8i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi), %zmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v8i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -49699,7 +49699,7 @@ entry:
define zeroext i64 @test_masked_vpcmpultq_v8i1_v64i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultq_v8i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -49707,7 +49707,7 @@ define zeroext i64 @test_masked_vpcmpultq_v8i1_v64i1_mask(i8 zeroext %__u, <8 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v8i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -49779,7 +49779,7 @@ entry:
define zeroext i64 @test_masked_vpcmpultq_v8i1_v64i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultq_v8i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi), %zmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -49787,7 +49787,7 @@ define zeroext i64 @test_masked_vpcmpultq_v8i1_v64i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v8i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -49861,14 +49861,14 @@ entry:
define zeroext i64 @test_vpcmpultq_v8i1_v64i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_vpcmpultq_v8i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi){1to8}, %zmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v8i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -49939,7 +49939,7 @@ entry:
define zeroext i64 @test_masked_vpcmpultq_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vpcmpultq_v8i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi){1to8}, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -49947,7 +49947,7 @@ define zeroext i64 @test_masked_vpcmpultq_v8i1_v64i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v8i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -50023,14 +50023,14 @@ entry:
declare i16 @llvm.x86.avx512.mask.cmp.ps.512(<16 x float> , <16 x float> , i32, i16, i32)
define zeroext i8 @test_vcmpoeqps_v4i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqps_v4i1_v8i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v4i1_v8i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vcmpeqps %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -50079,14 +50079,14 @@ entry:
define zeroext i8 @test_vcmpoeqps_v4i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqps_v4i1_v8i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v4i1_v8i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vcmpeqps (%rdi), %xmm0, %xmm0
; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -50136,14 +50136,14 @@ entry:
define zeroext i8 @test_vcmpoeqps_v4i1_v8i1_mask_mem_b(<2 x i64> %__a, float* %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqps_v4i1_v8i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v4i1_v8i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vbroadcastss (%rdi), %xmm1
; NoVLX-NEXT: vcmpeqps %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
@@ -50195,7 +50195,7 @@ entry:
define zeroext i8 @test_masked_vcmpoeqps_v4i1_v8i1_mask(i4 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqps_v4i1_v8i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqps %xmm1, %xmm0, %k0 {%k1}
@@ -50204,7 +50204,7 @@ define zeroext i8 @test_masked_vcmpoeqps_v4i1_v8i1_mask(i4 zeroext %__u, <2 x i6
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v8i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -50261,7 +50261,7 @@ entry:
define zeroext i8 @test_masked_vcmpoeqps_v4i1_v8i1_mask_mem(i4 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqps_v4i1_v8i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqps (%rsi), %xmm0, %k0 {%k1}
@@ -50270,7 +50270,7 @@ define zeroext i8 @test_masked_vcmpoeqps_v4i1_v8i1_mask_mem(i4 zeroext %__u, <2
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v8i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -50328,7 +50328,7 @@ entry:
define zeroext i8 @test_masked_vcmpoeqps_v4i1_v8i1_mask_mem_b(i4 zeroext %__u, <2 x i64> %__a, float* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqps_v4i1_v8i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqps (%rsi){1to4}, %xmm0, %k0 {%k1}
@@ -50337,7 +50337,7 @@ define zeroext i8 @test_masked_vcmpoeqps_v4i1_v8i1_mask_mem_b(i4 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v8i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -50399,14 +50399,14 @@ entry:
define zeroext i16 @test_vcmpoeqps_v4i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqps_v4i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v4i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vcmpeqps %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -50454,14 +50454,14 @@ entry:
define zeroext i16 @test_vcmpoeqps_v4i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqps_v4i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v4i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vcmpeqps (%rdi), %xmm0, %xmm0
; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -50510,14 +50510,14 @@ entry:
define zeroext i16 @test_vcmpoeqps_v4i1_v16i1_mask_mem_b(<2 x i64> %__a, float* %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqps_v4i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v4i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vbroadcastss (%rdi), %xmm1
; NoVLX-NEXT: vcmpeqps %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
@@ -50568,7 +50568,7 @@ entry:
define zeroext i16 @test_masked_vcmpoeqps_v4i1_v16i1_mask(i4 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqps_v4i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqps %xmm1, %xmm0, %k0 {%k1}
@@ -50577,7 +50577,7 @@ define zeroext i16 @test_masked_vcmpoeqps_v4i1_v16i1_mask(i4 zeroext %__u, <2 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -50633,7 +50633,7 @@ entry:
define zeroext i16 @test_masked_vcmpoeqps_v4i1_v16i1_mask_mem(i4 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqps_v4i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqps (%rsi), %xmm0, %k0 {%k1}
@@ -50642,7 +50642,7 @@ define zeroext i16 @test_masked_vcmpoeqps_v4i1_v16i1_mask_mem(i4 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -50699,7 +50699,7 @@ entry:
define zeroext i16 @test_masked_vcmpoeqps_v4i1_v16i1_mask_mem_b(i4 zeroext %__u, <2 x i64> %__a, float* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqps_v4i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqps (%rsi){1to4}, %xmm0, %k0 {%k1}
@@ -50708,7 +50708,7 @@ define zeroext i16 @test_masked_vcmpoeqps_v4i1_v16i1_mask_mem_b(i4 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -50769,13 +50769,13 @@ entry:
define zeroext i32 @test_vcmpoeqps_v4i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqps_v4i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v4i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -50810,13 +50810,13 @@ entry:
define zeroext i32 @test_vcmpoeqps_v4i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqps_v4i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v4i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -50852,13 +50852,13 @@ entry:
define zeroext i32 @test_vcmpoeqps_v4i1_v32i1_mask_mem_b(<2 x i64> %__a, float* %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqps_v4i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v4i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -50896,7 +50896,7 @@ entry:
define zeroext i32 @test_masked_vcmpoeqps_v4i1_v32i1_mask(i4 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqps_v4i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqps %xmm1, %xmm0, %k0 {%k1}
@@ -50904,7 +50904,7 @@ define zeroext i32 @test_masked_vcmpoeqps_v4i1_v32i1_mask(i4 zeroext %__u, <2 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -50947,7 +50947,7 @@ entry:
define zeroext i32 @test_masked_vcmpoeqps_v4i1_v32i1_mask_mem(i4 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqps_v4i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqps (%rsi), %xmm0, %k0 {%k1}
@@ -50955,7 +50955,7 @@ define zeroext i32 @test_masked_vcmpoeqps_v4i1_v32i1_mask_mem(i4 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -50999,7 +50999,7 @@ entry:
define zeroext i32 @test_masked_vcmpoeqps_v4i1_v32i1_mask_mem_b(i4 zeroext %__u, <2 x i64> %__a, float* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqps_v4i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqps (%rsi){1to4}, %xmm0, %k0 {%k1}
@@ -51007,7 +51007,7 @@ define zeroext i32 @test_masked_vcmpoeqps_v4i1_v32i1_mask_mem_b(i4 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -51055,13 +51055,13 @@ entry:
define zeroext i64 @test_vcmpoeqps_v4i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqps_v4i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps %xmm1, %xmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v4i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -51102,13 +51102,13 @@ entry:
define zeroext i64 @test_vcmpoeqps_v4i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqps_v4i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps (%rdi), %xmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v4i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -51150,13 +51150,13 @@ entry:
define zeroext i64 @test_vcmpoeqps_v4i1_v64i1_mask_mem_b(<2 x i64> %__a, float* %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqps_v4i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v4i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -51200,7 +51200,7 @@ entry:
define zeroext i64 @test_masked_vcmpoeqps_v4i1_v64i1_mask(i4 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqps_v4i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqps %xmm1, %xmm0, %k0 {%k1}
@@ -51208,7 +51208,7 @@ define zeroext i64 @test_masked_vcmpoeqps_v4i1_v64i1_mask(i4 zeroext %__u, <2 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -51257,7 +51257,7 @@ entry:
define zeroext i64 @test_masked_vcmpoeqps_v4i1_v64i1_mask_mem(i4 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqps_v4i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqps (%rsi), %xmm0, %k0 {%k1}
@@ -51265,7 +51265,7 @@ define zeroext i64 @test_masked_vcmpoeqps_v4i1_v64i1_mask_mem(i4 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -51315,7 +51315,7 @@ entry:
define zeroext i64 @test_masked_vcmpoeqps_v4i1_v64i1_mask_mem_b(i4 zeroext %__u, <2 x i64> %__a, float* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqps_v4i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqps (%rsi){1to4}, %xmm0, %k0 {%k1}
@@ -51323,7 +51323,7 @@ define zeroext i64 @test_masked_vcmpoeqps_v4i1_v64i1_mask_mem_b(i4 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -51377,7 +51377,7 @@ entry:
define zeroext i16 @test_vcmpoeqps_v8i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqps_v8i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -51385,7 +51385,7 @@ define zeroext i16 @test_vcmpoeqps_v8i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v8i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
@@ -51406,7 +51406,7 @@ entry:
define zeroext i16 @test_vcmpoeqps_v8i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqps_v8i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -51414,7 +51414,7 @@ define zeroext i16 @test_vcmpoeqps_v8i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v8i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovaps (%rdi), %ymm1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
@@ -51436,7 +51436,7 @@ entry:
define zeroext i16 @test_vcmpoeqps_v8i1_v16i1_mask_mem_b(<4 x i64> %__a, float* %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqps_v8i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps (%rdi){1to8}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -51444,7 +51444,7 @@ define zeroext i16 @test_vcmpoeqps_v8i1_v16i1_mask_mem_b(<4 x i64> %__a, float*
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v8i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vbroadcastss (%rdi), %ymm1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
@@ -51467,7 +51467,7 @@ entry:
define zeroext i16 @test_masked_vcmpoeqps_v8i1_v16i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqps_v8i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqps %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -51476,7 +51476,7 @@ define zeroext i16 @test_masked_vcmpoeqps_v8i1_v16i1_mask(i8 zeroext %__u, <4 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v8i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: kmovw %edi, %k1
@@ -51500,7 +51500,7 @@ entry:
define zeroext i16 @test_masked_vcmpoeqps_v8i1_v16i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqps_v8i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqps (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -51509,7 +51509,7 @@ define zeroext i16 @test_masked_vcmpoeqps_v8i1_v16i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v8i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovaps (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
@@ -51534,7 +51534,7 @@ entry:
define zeroext i16 @test_masked_vcmpoeqps_v8i1_v16i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, float* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqps_v8i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqps (%rsi){1to8}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -51543,7 +51543,7 @@ define zeroext i16 @test_masked_vcmpoeqps_v8i1_v16i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v8i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vbroadcastss (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
@@ -51571,14 +51571,14 @@ entry:
define zeroext i32 @test_vcmpoeqps_v8i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqps_v8i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v8i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -51644,14 +51644,14 @@ entry:
define zeroext i32 @test_vcmpoeqps_v8i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqps_v8i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v8i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -51718,14 +51718,14 @@ entry:
define zeroext i32 @test_vcmpoeqps_v8i1_v32i1_mask_mem_b(<4 x i64> %__a, float* %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqps_v8i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps (%rdi){1to8}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v8i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -51793,7 +51793,7 @@ entry:
define zeroext i32 @test_masked_vcmpoeqps_v8i1_v32i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqps_v8i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqps %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -51801,7 +51801,7 @@ define zeroext i32 @test_masked_vcmpoeqps_v8i1_v32i1_mask(i8 zeroext %__u, <4 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v8i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -51870,7 +51870,7 @@ entry:
define zeroext i32 @test_masked_vcmpoeqps_v8i1_v32i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqps_v8i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqps (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -51878,7 +51878,7 @@ define zeroext i32 @test_masked_vcmpoeqps_v8i1_v32i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v8i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -51948,7 +51948,7 @@ entry:
define zeroext i32 @test_masked_vcmpoeqps_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, float* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqps_v8i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqps (%rsi){1to8}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -51956,7 +51956,7 @@ define zeroext i32 @test_masked_vcmpoeqps_v8i1_v32i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v8i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -52029,14 +52029,14 @@ entry:
define zeroext i64 @test_vcmpoeqps_v8i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqps_v8i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps %ymm1, %ymm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v8i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -52107,14 +52107,14 @@ entry:
define zeroext i64 @test_vcmpoeqps_v8i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqps_v8i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps (%rdi), %ymm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v8i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -52186,14 +52186,14 @@ entry:
define zeroext i64 @test_vcmpoeqps_v8i1_v64i1_mask_mem_b(<4 x i64> %__a, float* %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqps_v8i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps (%rdi){1to8}, %ymm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v8i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -52266,7 +52266,7 @@ entry:
define zeroext i64 @test_masked_vcmpoeqps_v8i1_v64i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqps_v8i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqps %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -52274,7 +52274,7 @@ define zeroext i64 @test_masked_vcmpoeqps_v8i1_v64i1_mask(i8 zeroext %__u, <4 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v8i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -52348,7 +52348,7 @@ entry:
define zeroext i64 @test_masked_vcmpoeqps_v8i1_v64i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqps_v8i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqps (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -52356,7 +52356,7 @@ define zeroext i64 @test_masked_vcmpoeqps_v8i1_v64i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v8i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -52431,7 +52431,7 @@ entry:
define zeroext i64 @test_masked_vcmpoeqps_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, float* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqps_v8i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqps (%rsi){1to8}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -52439,7 +52439,7 @@ define zeroext i64 @test_masked_vcmpoeqps_v8i1_v64i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v8i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -52517,14 +52517,14 @@ entry:
define zeroext i32 @test_vcmpoeqps_v16i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqps_v16i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v16i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -52633,14 +52633,14 @@ entry:
define zeroext i32 @test_vcmpoeqps_v16i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqps_v16i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps (%rdi), %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v16i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -52750,14 +52750,14 @@ entry:
define zeroext i32 @test_vcmpoeqps_v16i1_v32i1_mask_mem_b(<8 x i64> %__a, float* %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqps_v16i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps (%rdi){1to16}, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v16i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -52868,7 +52868,7 @@ entry:
define zeroext i32 @test_masked_vcmpoeqps_v16i1_v32i1_mask(i16 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqps_v16i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -52876,7 +52876,7 @@ define zeroext i32 @test_masked_vcmpoeqps_v16i1_v32i1_mask(i16 zeroext %__u, <8
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v16i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -52988,7 +52988,7 @@ entry:
define zeroext i32 @test_masked_vcmpoeqps_v16i1_v32i1_mask_mem(i16 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqps_v16i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqps (%rsi), %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -52996,7 +52996,7 @@ define zeroext i32 @test_masked_vcmpoeqps_v16i1_v32i1_mask_mem(i16 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v16i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -53109,7 +53109,7 @@ entry:
define zeroext i32 @test_masked_vcmpoeqps_v16i1_v32i1_mask_mem_b(i16 zeroext %__u, <8 x i64> %__a, float* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqps_v16i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqps (%rsi){1to16}, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -53117,7 +53117,7 @@ define zeroext i32 @test_masked_vcmpoeqps_v16i1_v32i1_mask_mem_b(i16 zeroext %__
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v16i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -53233,7 +53233,7 @@ entry:
define zeroext i32 @test_vcmpoeqps_v16i1_v32i1_sae_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; CHECK-LABEL: test_vcmpoeqps_v16i1_v32i1_sae_mask:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vcmpleps {sae}, %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: vzeroupper
@@ -53248,7 +53248,7 @@ entry:
define zeroext i32 @test_masked_vcmpoeqps_v16i1_v32i1_sae_mask(i16 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqps_v16i1_v32i1_sae_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpleps {sae}, %zmm1, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovw %k0, %eax
@@ -53256,7 +53256,7 @@ define zeroext i32 @test_masked_vcmpoeqps_v16i1_v32i1_sae_mask(i16 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v16i1_v32i1_sae_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpleps {sae}, %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
@@ -53274,14 +53274,14 @@ entry:
define zeroext i64 @test_vcmpoeqps_v16i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqps_v16i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v16i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -53395,14 +53395,14 @@ entry:
define zeroext i64 @test_vcmpoeqps_v16i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqps_v16i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps (%rdi), %zmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v16i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -53517,14 +53517,14 @@ entry:
define zeroext i64 @test_vcmpoeqps_v16i1_v64i1_mask_mem_b(<8 x i64> %__a, float* %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqps_v16i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps (%rdi){1to16}, %zmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v16i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -53640,7 +53640,7 @@ entry:
define zeroext i64 @test_masked_vcmpoeqps_v16i1_v64i1_mask(i16 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqps_v16i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -53648,7 +53648,7 @@ define zeroext i64 @test_masked_vcmpoeqps_v16i1_v64i1_mask(i16 zeroext %__u, <8
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v16i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -53765,7 +53765,7 @@ entry:
define zeroext i64 @test_masked_vcmpoeqps_v16i1_v64i1_mask_mem(i16 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqps_v16i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqps (%rsi), %zmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -53773,7 +53773,7 @@ define zeroext i64 @test_masked_vcmpoeqps_v16i1_v64i1_mask_mem(i16 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v16i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -53891,7 +53891,7 @@ entry:
define zeroext i64 @test_masked_vcmpoeqps_v16i1_v64i1_mask_mem_b(i16 zeroext %__u, <8 x i64> %__a, float* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqps_v16i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqps (%rsi){1to16}, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -53899,7 +53899,7 @@ define zeroext i64 @test_masked_vcmpoeqps_v16i1_v64i1_mask_mem_b(i16 zeroext %__
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v16i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -54020,7 +54020,7 @@ entry:
define zeroext i64 @test_vcmpoeqps_v16i1_v64i1_sae_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqps_v16i1_v64i1_sae_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpleps {sae}, %zmm1, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: movzwl %ax, %eax
@@ -54028,7 +54028,7 @@ define zeroext i64 @test_vcmpoeqps_v16i1_v64i1_sae_mask(<8 x i64> %__a, <8 x i64
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v16i1_v64i1_sae_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vcmpleps {sae}, %zmm1, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
; NoVLX-NEXT: movzwl %ax, %eax
@@ -54044,7 +54044,7 @@ entry:
define zeroext i64 @test_masked_vcmpoeqps_v16i1_v64i1_sae_mask(i16 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqps_v16i1_v64i1_sae_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpleps {sae}, %zmm1, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -54053,7 +54053,7 @@ define zeroext i64 @test_masked_vcmpoeqps_v16i1_v64i1_sae_mask(i16 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v16i1_v64i1_sae_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpleps {sae}, %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
@@ -54073,14 +54073,14 @@ entry:
declare i8 @llvm.x86.avx512.mask.cmp.pd.512(<8 x double> , <8 x double> , i32, i8, i32)
define zeroext i4 @test_vcmpoeqpd_v2i1_v4i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqpd_v2i1_v4i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd %xmm1, %xmm0, %k0
; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
; VLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v4i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vcmpeqpd %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; NoVLX-NEXT: vpslld $31, %ymm0, %ymm0
@@ -54101,14 +54101,14 @@ entry:
define zeroext i4 @test_vcmpoeqpd_v2i1_v4i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqpd_v2i1_v4i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi), %xmm0, %k0
; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
; VLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v4i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vcmpeqpd (%rdi), %xmm0, %xmm0
; NoVLX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; NoVLX-NEXT: vpslld $31, %ymm0, %ymm0
@@ -54130,14 +54130,14 @@ entry:
define zeroext i4 @test_vcmpoeqpd_v2i1_v4i1_mask_mem_b(<2 x i64> %__a, double* %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqpd_v2i1_v4i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
; VLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v4i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
; NoVLX-NEXT: vcmpeqpd %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
@@ -54161,7 +54161,7 @@ entry:
define zeroext i4 @test_masked_vcmpoeqpd_v2i1_v4i1_mask(i2 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqpd_v2i1_v4i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd %xmm1, %xmm0, %k0 {%k1}
@@ -54170,7 +54170,7 @@ define zeroext i4 @test_masked_vcmpoeqpd_v2i1_v4i1_mask(i2 zeroext %__u, <2 x i6
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v4i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -54198,7 +54198,7 @@ entry:
define zeroext i4 @test_masked_vcmpoeqpd_v2i1_v4i1_mask_mem(i2 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqpd_v2i1_v4i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd (%rsi), %xmm0, %k0 {%k1}
@@ -54207,7 +54207,7 @@ define zeroext i4 @test_masked_vcmpoeqpd_v2i1_v4i1_mask_mem(i2 zeroext %__u, <2
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v4i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -54236,7 +54236,7 @@ entry:
define zeroext i4 @test_masked_vcmpoeqpd_v2i1_v4i1_mask_mem_b(i2 zeroext %__u, <2 x i64> %__a, double* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqpd_v2i1_v4i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd (%rsi){1to2}, %xmm0, %k0 {%k1}
@@ -54245,7 +54245,7 @@ define zeroext i4 @test_masked_vcmpoeqpd_v2i1_v4i1_mask_mem_b(i2 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v4i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -54278,14 +54278,14 @@ entry:
define zeroext i8 @test_vcmpoeqpd_v2i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqpd_v2i1_v8i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v8i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vcmpeqpd %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -54318,14 +54318,14 @@ entry:
define zeroext i8 @test_vcmpoeqpd_v2i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqpd_v2i1_v8i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v8i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vcmpeqpd (%rdi), %xmm0, %xmm0
; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -54359,14 +54359,14 @@ entry:
define zeroext i8 @test_vcmpoeqpd_v2i1_v8i1_mask_mem_b(<2 x i64> %__a, double* %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqpd_v2i1_v8i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v8i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
; NoVLX-NEXT: vcmpeqpd %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
@@ -54402,7 +54402,7 @@ entry:
define zeroext i8 @test_masked_vcmpoeqpd_v2i1_v8i1_mask(i2 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqpd_v2i1_v8i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd %xmm1, %xmm0, %k0 {%k1}
@@ -54411,7 +54411,7 @@ define zeroext i8 @test_masked_vcmpoeqpd_v2i1_v8i1_mask(i2 zeroext %__u, <2 x i6
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v8i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -54451,7 +54451,7 @@ entry:
define zeroext i8 @test_masked_vcmpoeqpd_v2i1_v8i1_mask_mem(i2 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqpd_v2i1_v8i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd (%rsi), %xmm0, %k0 {%k1}
@@ -54460,7 +54460,7 @@ define zeroext i8 @test_masked_vcmpoeqpd_v2i1_v8i1_mask_mem(i2 zeroext %__u, <2
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v8i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -54501,7 +54501,7 @@ entry:
define zeroext i8 @test_masked_vcmpoeqpd_v2i1_v8i1_mask_mem_b(i2 zeroext %__u, <2 x i64> %__a, double* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqpd_v2i1_v8i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd (%rsi){1to2}, %xmm0, %k0 {%k1}
@@ -54510,7 +54510,7 @@ define zeroext i8 @test_masked_vcmpoeqpd_v2i1_v8i1_mask_mem_b(i2 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v8i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -54555,14 +54555,14 @@ entry:
define zeroext i16 @test_vcmpoeqpd_v2i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqpd_v2i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vcmpeqpd %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -54594,14 +54594,14 @@ entry:
define zeroext i16 @test_vcmpoeqpd_v2i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqpd_v2i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vcmpeqpd (%rdi), %xmm0, %xmm0
; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -54634,14 +54634,14 @@ entry:
define zeroext i16 @test_vcmpoeqpd_v2i1_v16i1_mask_mem_b(<2 x i64> %__a, double* %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqpd_v2i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
; NoVLX-NEXT: vcmpeqpd %xmm1, %xmm0, %xmm0
; NoVLX-NEXT: vpextrb $8, %xmm0, %eax
@@ -54676,7 +54676,7 @@ entry:
define zeroext i16 @test_masked_vcmpoeqpd_v2i1_v16i1_mask(i2 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqpd_v2i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd %xmm1, %xmm0, %k0 {%k1}
@@ -54685,7 +54685,7 @@ define zeroext i16 @test_masked_vcmpoeqpd_v2i1_v16i1_mask(i2 zeroext %__u, <2 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -54724,7 +54724,7 @@ entry:
define zeroext i16 @test_masked_vcmpoeqpd_v2i1_v16i1_mask_mem(i2 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqpd_v2i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd (%rsi), %xmm0, %k0 {%k1}
@@ -54733,7 +54733,7 @@ define zeroext i16 @test_masked_vcmpoeqpd_v2i1_v16i1_mask_mem(i2 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -54773,7 +54773,7 @@ entry:
define zeroext i16 @test_masked_vcmpoeqpd_v2i1_v16i1_mask_mem_b(i2 zeroext %__u, <2 x i64> %__a, double* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqpd_v2i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd (%rsi){1to2}, %xmm0, %k0 {%k1}
@@ -54782,7 +54782,7 @@ define zeroext i16 @test_masked_vcmpoeqpd_v2i1_v16i1_mask_mem_b(i2 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -54826,13 +54826,13 @@ entry:
define zeroext i32 @test_vcmpoeqpd_v2i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqpd_v2i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -54867,13 +54867,13 @@ entry:
define zeroext i32 @test_vcmpoeqpd_v2i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqpd_v2i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -54909,13 +54909,13 @@ entry:
define zeroext i32 @test_vcmpoeqpd_v2i1_v32i1_mask_mem_b(<2 x i64> %__a, double* %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqpd_v2i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -54953,7 +54953,7 @@ entry:
define zeroext i32 @test_masked_vcmpoeqpd_v2i1_v32i1_mask(i2 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqpd_v2i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd %xmm1, %xmm0, %k0 {%k1}
@@ -54961,7 +54961,7 @@ define zeroext i32 @test_masked_vcmpoeqpd_v2i1_v32i1_mask(i2 zeroext %__u, <2 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -55003,7 +55003,7 @@ entry:
define zeroext i32 @test_masked_vcmpoeqpd_v2i1_v32i1_mask_mem(i2 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqpd_v2i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd (%rsi), %xmm0, %k0 {%k1}
@@ -55011,7 +55011,7 @@ define zeroext i32 @test_masked_vcmpoeqpd_v2i1_v32i1_mask_mem(i2 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -55054,7 +55054,7 @@ entry:
define zeroext i32 @test_masked_vcmpoeqpd_v2i1_v32i1_mask_mem_b(i2 zeroext %__u, <2 x i64> %__a, double* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqpd_v2i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd (%rsi){1to2}, %xmm0, %k0 {%k1}
@@ -55062,7 +55062,7 @@ define zeroext i32 @test_masked_vcmpoeqpd_v2i1_v32i1_mask_mem_b(i2 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -55109,13 +55109,13 @@ entry:
define zeroext i64 @test_vcmpoeqpd_v2i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqpd_v2i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd %xmm1, %xmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -55156,13 +55156,13 @@ entry:
define zeroext i64 @test_vcmpoeqpd_v2i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqpd_v2i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi), %xmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -55204,13 +55204,13 @@ entry:
define zeroext i64 @test_vcmpoeqpd_v2i1_v64i1_mask_mem_b(<2 x i64> %__a, double* %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqpd_v2i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -55254,7 +55254,7 @@ entry:
define zeroext i64 @test_masked_vcmpoeqpd_v2i1_v64i1_mask(i2 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqpd_v2i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd %xmm1, %xmm0, %k0 {%k1}
@@ -55262,7 +55262,7 @@ define zeroext i64 @test_masked_vcmpoeqpd_v2i1_v64i1_mask(i2 zeroext %__u, <2 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -55310,7 +55310,7 @@ entry:
define zeroext i64 @test_masked_vcmpoeqpd_v2i1_v64i1_mask_mem(i2 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqpd_v2i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd (%rsi), %xmm0, %k0 {%k1}
@@ -55318,7 +55318,7 @@ define zeroext i64 @test_masked_vcmpoeqpd_v2i1_v64i1_mask_mem(i2 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -55367,7 +55367,7 @@ entry:
define zeroext i64 @test_masked_vcmpoeqpd_v2i1_v64i1_mask_mem_b(i2 zeroext %__u, <2 x i64> %__a, double* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqpd_v2i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd (%rsi){1to2}, %xmm0, %k0 {%k1}
@@ -55375,7 +55375,7 @@ define zeroext i64 @test_masked_vcmpoeqpd_v2i1_v64i1_mask_mem_b(i2 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -55428,7 +55428,7 @@ entry:
define zeroext i8 @test_vcmpoeqpd_v4i1_v8i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqpd_v4i1_v8i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -55436,7 +55436,7 @@ define zeroext i8 @test_vcmpoeqpd_v4i1_v8i1_mask(<4 x i64> %__a, <4 x i64> %__b)
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v8i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vcmpeqpd %ymm1, %ymm0, %ymm0
; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
@@ -55486,7 +55486,7 @@ entry:
define zeroext i8 @test_vcmpoeqpd_v4i1_v8i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqpd_v4i1_v8i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -55494,7 +55494,7 @@ define zeroext i8 @test_vcmpoeqpd_v4i1_v8i1_mask_mem(<4 x i64> %__a, <4 x i64>*
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v8i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vcmpeqpd (%rdi), %ymm0, %ymm0
; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
@@ -55545,7 +55545,7 @@ entry:
define zeroext i8 @test_vcmpoeqpd_v4i1_v8i1_mask_mem_b(<4 x i64> %__a, double* %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqpd_v4i1_v8i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -55553,7 +55553,7 @@ define zeroext i8 @test_vcmpoeqpd_v4i1_v8i1_mask_mem_b(<4 x i64> %__a, double* %
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v8i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vbroadcastsd (%rdi), %ymm1
; NoVLX-NEXT: vcmpeqpd %ymm1, %ymm0, %ymm0
; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
@@ -55606,7 +55606,7 @@ entry:
define zeroext i8 @test_masked_vcmpoeqpd_v4i1_v8i1_mask(i4 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqpd_v4i1_v8i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd %ymm1, %ymm0, %k0 {%k1}
@@ -55616,7 +55616,7 @@ define zeroext i8 @test_masked_vcmpoeqpd_v4i1_v8i1_mask(i4 zeroext %__u, <4 x i6
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v4i1_v8i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -55674,7 +55674,7 @@ entry:
define zeroext i8 @test_masked_vcmpoeqpd_v4i1_v8i1_mask_mem(i4 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqpd_v4i1_v8i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd (%rsi), %ymm0, %k0 {%k1}
@@ -55684,7 +55684,7 @@ define zeroext i8 @test_masked_vcmpoeqpd_v4i1_v8i1_mask_mem(i4 zeroext %__u, <4
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v4i1_v8i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -55743,7 +55743,7 @@ entry:
define zeroext i8 @test_masked_vcmpoeqpd_v4i1_v8i1_mask_mem_b(i4 zeroext %__u, <4 x i64> %__a, double* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqpd_v4i1_v8i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd (%rsi){1to4}, %ymm0, %k0 {%k1}
@@ -55753,7 +55753,7 @@ define zeroext i8 @test_masked_vcmpoeqpd_v4i1_v8i1_mask_mem_b(i4 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v4i1_v8i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -55816,7 +55816,7 @@ entry:
define zeroext i16 @test_vcmpoeqpd_v4i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqpd_v4i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -55824,7 +55824,7 @@ define zeroext i16 @test_vcmpoeqpd_v4i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vcmpeqpd %ymm1, %ymm0, %ymm0
; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
@@ -55873,7 +55873,7 @@ entry:
define zeroext i16 @test_vcmpoeqpd_v4i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqpd_v4i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -55881,7 +55881,7 @@ define zeroext i16 @test_vcmpoeqpd_v4i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vcmpeqpd (%rdi), %ymm0, %ymm0
; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
; NoVLX-NEXT: vpextrb $4, %xmm0, %eax
@@ -55931,7 +55931,7 @@ entry:
define zeroext i16 @test_vcmpoeqpd_v4i1_v16i1_mask_mem_b(<4 x i64> %__a, double* %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqpd_v4i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -55939,7 +55939,7 @@ define zeroext i16 @test_vcmpoeqpd_v4i1_v16i1_mask_mem_b(<4 x i64> %__a, double*
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vbroadcastsd (%rdi), %ymm1
; NoVLX-NEXT: vcmpeqpd %ymm1, %ymm0, %ymm0
; NoVLX-NEXT: vpmovqd %zmm0, %ymm0
@@ -55991,7 +55991,7 @@ entry:
define zeroext i16 @test_masked_vcmpoeqpd_v4i1_v16i1_mask(i4 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqpd_v4i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd %ymm1, %ymm0, %k0 {%k1}
@@ -56001,7 +56001,7 @@ define zeroext i16 @test_masked_vcmpoeqpd_v4i1_v16i1_mask(i4 zeroext %__u, <4 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v4i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -56058,7 +56058,7 @@ entry:
define zeroext i16 @test_masked_vcmpoeqpd_v4i1_v16i1_mask_mem(i4 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqpd_v4i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd (%rsi), %ymm0, %k0 {%k1}
@@ -56068,7 +56068,7 @@ define zeroext i16 @test_masked_vcmpoeqpd_v4i1_v16i1_mask_mem(i4 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v4i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -56126,7 +56126,7 @@ entry:
define zeroext i16 @test_masked_vcmpoeqpd_v4i1_v16i1_mask_mem_b(i4 zeroext %__u, <4 x i64> %__a, double* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqpd_v4i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd (%rsi){1to4}, %ymm0, %k0 {%k1}
@@ -56136,7 +56136,7 @@ define zeroext i16 @test_masked_vcmpoeqpd_v4i1_v16i1_mask_mem_b(i4 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v4i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; NoVLX-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; NoVLX-NEXT: kmovw %eax, %k1
@@ -56198,14 +56198,14 @@ entry:
define zeroext i32 @test_vcmpoeqpd_v4i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqpd_v4i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -56241,14 +56241,14 @@ entry:
define zeroext i32 @test_vcmpoeqpd_v4i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqpd_v4i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -56285,14 +56285,14 @@ entry:
define zeroext i32 @test_vcmpoeqpd_v4i1_v32i1_mask_mem_b(<4 x i64> %__a, double* %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqpd_v4i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -56331,7 +56331,7 @@ entry:
define zeroext i32 @test_masked_vcmpoeqpd_v4i1_v32i1_mask(i4 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqpd_v4i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd %ymm1, %ymm0, %k0 {%k1}
@@ -56340,7 +56340,7 @@ define zeroext i32 @test_masked_vcmpoeqpd_v4i1_v32i1_mask(i4 zeroext %__u, <4 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v4i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -56384,7 +56384,7 @@ entry:
define zeroext i32 @test_masked_vcmpoeqpd_v4i1_v32i1_mask_mem(i4 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqpd_v4i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd (%rsi), %ymm0, %k0 {%k1}
@@ -56393,7 +56393,7 @@ define zeroext i32 @test_masked_vcmpoeqpd_v4i1_v32i1_mask_mem(i4 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v4i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -56438,7 +56438,7 @@ entry:
define zeroext i32 @test_masked_vcmpoeqpd_v4i1_v32i1_mask_mem_b(i4 zeroext %__u, <4 x i64> %__a, double* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqpd_v4i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd (%rsi){1to4}, %ymm0, %k0 {%k1}
@@ -56447,7 +56447,7 @@ define zeroext i32 @test_masked_vcmpoeqpd_v4i1_v32i1_mask_mem_b(i4 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v4i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -56496,14 +56496,14 @@ entry:
define zeroext i64 @test_vcmpoeqpd_v4i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqpd_v4i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd %ymm1, %ymm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -56545,14 +56545,14 @@ entry:
define zeroext i64 @test_vcmpoeqpd_v4i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqpd_v4i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi), %ymm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -56595,14 +56595,14 @@ entry:
define zeroext i64 @test_vcmpoeqpd_v4i1_v64i1_mask_mem_b(<4 x i64> %__a, double* %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqpd_v4i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -56647,7 +56647,7 @@ entry:
define zeroext i64 @test_masked_vcmpoeqpd_v4i1_v64i1_mask(i4 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqpd_v4i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd %ymm1, %ymm0, %k0 {%k1}
@@ -56656,7 +56656,7 @@ define zeroext i64 @test_masked_vcmpoeqpd_v4i1_v64i1_mask(i4 zeroext %__u, <4 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v4i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -56706,7 +56706,7 @@ entry:
define zeroext i64 @test_masked_vcmpoeqpd_v4i1_v64i1_mask_mem(i4 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqpd_v4i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd (%rsi), %ymm0, %k0 {%k1}
@@ -56715,7 +56715,7 @@ define zeroext i64 @test_masked_vcmpoeqpd_v4i1_v64i1_mask_mem(i4 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v4i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -56766,7 +56766,7 @@ entry:
define zeroext i64 @test_masked_vcmpoeqpd_v4i1_v64i1_mask_mem_b(i4 zeroext %__u, <4 x i64> %__a, double* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqpd_v4i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd (%rsi){1to4}, %ymm0, %k0 {%k1}
@@ -56775,7 +56775,7 @@ define zeroext i64 @test_masked_vcmpoeqpd_v4i1_v64i1_mask_mem_b(i4 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v4i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -56830,7 +56830,7 @@ entry:
define zeroext i16 @test_vcmpoeqpd_v8i1_v16i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqpd_v8i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -56838,7 +56838,7 @@ define zeroext i16 @test_vcmpoeqpd_v8i1_v16i1_mask(<8 x i64> %__a, <8 x i64> %__
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v8i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -56855,7 +56855,7 @@ entry:
define zeroext i16 @test_vcmpoeqpd_v8i1_v16i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqpd_v8i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi), %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -56863,7 +56863,7 @@ define zeroext i16 @test_vcmpoeqpd_v8i1_v16i1_mask_mem(<8 x i64> %__a, <8 x i64>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v8i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vcmpeqpd (%rdi), %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -56881,7 +56881,7 @@ entry:
define zeroext i16 @test_vcmpoeqpd_v8i1_v16i1_mask_mem_b(<8 x i64> %__a, double* %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqpd_v8i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi){1to8}, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -56889,7 +56889,7 @@ define zeroext i16 @test_vcmpoeqpd_v8i1_v16i1_mask_mem_b(<8 x i64> %__a, double*
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v8i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vcmpeqpd (%rdi){1to8}, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -56908,7 +56908,7 @@ entry:
define zeroext i16 @test_masked_vcmpoeqpd_v8i1_v16i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqpd_v8i1_v16i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -56917,7 +56917,7 @@ define zeroext i16 @test_masked_vcmpoeqpd_v8i1_v16i1_mask(i8 zeroext %__u, <8 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v8i1_v16i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
@@ -56937,7 +56937,7 @@ entry:
define zeroext i16 @test_masked_vcmpoeqpd_v8i1_v16i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqpd_v8i1_v16i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqpd (%rsi), %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -56946,7 +56946,7 @@ define zeroext i16 @test_masked_vcmpoeqpd_v8i1_v16i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v8i1_v16i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqpd (%rsi), %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
@@ -56967,7 +56967,7 @@ entry:
define zeroext i16 @test_masked_vcmpoeqpd_v8i1_v16i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, double* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqpd_v8i1_v16i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqpd (%rsi){1to8}, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -56976,7 +56976,7 @@ define zeroext i16 @test_masked_vcmpoeqpd_v8i1_v16i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v8i1_v16i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqpd (%rsi){1to8}, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
@@ -57000,7 +57000,7 @@ entry:
define zeroext i16 @test_vcmpoeqpd_v8i1_v16i1_sae_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqpd_v8i1_v16i1_sae_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmplepd {sae}, %zmm1, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: movzbl %al, %eax
@@ -57009,7 +57009,7 @@ define zeroext i16 @test_vcmpoeqpd_v8i1_v16i1_sae_mask(<8 x i64> %__a, <8 x i64>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v8i1_v16i1_sae_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vcmplepd {sae}, %zmm1, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
; NoVLX-NEXT: movzbl %al, %eax
@@ -57026,7 +57026,7 @@ entry:
define zeroext i16 @test_masked_vcmpoeqpd_v8i1_v16i1_sae_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqpd_v8i1_v16i1_sae_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmplepd {sae}, %zmm1, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -57036,7 +57036,7 @@ define zeroext i16 @test_masked_vcmpoeqpd_v8i1_v16i1_sae_mask(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v8i1_v16i1_sae_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmplepd {sae}, %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
@@ -57056,14 +57056,14 @@ entry:
define zeroext i32 @test_vcmpoeqpd_v8i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqpd_v8i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v8i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -57127,14 +57127,14 @@ entry:
define zeroext i32 @test_vcmpoeqpd_v8i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqpd_v8i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi), %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v8i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -57199,14 +57199,14 @@ entry:
define zeroext i32 @test_vcmpoeqpd_v8i1_v32i1_mask_mem_b(<8 x i64> %__a, double* %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqpd_v8i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi){1to8}, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v8i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -57272,7 +57272,7 @@ entry:
define zeroext i32 @test_masked_vcmpoeqpd_v8i1_v32i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqpd_v8i1_v32i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -57280,7 +57280,7 @@ define zeroext i32 @test_masked_vcmpoeqpd_v8i1_v32i1_mask(i8 zeroext %__u, <8 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v8i1_v32i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -57347,7 +57347,7 @@ entry:
define zeroext i32 @test_masked_vcmpoeqpd_v8i1_v32i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqpd_v8i1_v32i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqpd (%rsi), %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -57355,7 +57355,7 @@ define zeroext i32 @test_masked_vcmpoeqpd_v8i1_v32i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v8i1_v32i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -57423,7 +57423,7 @@ entry:
define zeroext i32 @test_masked_vcmpoeqpd_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, double* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqpd_v8i1_v32i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqpd (%rsi){1to8}, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -57431,7 +57431,7 @@ define zeroext i32 @test_masked_vcmpoeqpd_v8i1_v32i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v8i1_v32i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -57502,14 +57502,14 @@ entry:
define zeroext i32 @test_vcmpoeqpd_v8i1_v32i1_sae_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqpd_v8i1_v32i1_sae_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmplepd {sae}, %zmm1, %zmm0, %k0
; VLX-NEXT: kmovb %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v8i1_v32i1_sae_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vcmplepd {sae}, %zmm1, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
; NoVLX-NEXT: movzbl %al, %eax
@@ -57525,7 +57525,7 @@ entry:
define zeroext i32 @test_masked_vcmpoeqpd_v8i1_v32i1_sae_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqpd_v8i1_v32i1_sae_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmplepd {sae}, %zmm1, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovb %k0, %eax
@@ -57533,7 +57533,7 @@ define zeroext i32 @test_masked_vcmpoeqpd_v8i1_v32i1_sae_mask(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v8i1_v32i1_sae_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmplepd {sae}, %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
@@ -57552,14 +57552,14 @@ entry:
define zeroext i64 @test_vcmpoeqpd_v8i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqpd_v8i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v8i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -57628,14 +57628,14 @@ entry:
define zeroext i64 @test_vcmpoeqpd_v8i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqpd_v8i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi), %zmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v8i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -57705,14 +57705,14 @@ entry:
define zeroext i64 @test_vcmpoeqpd_v8i1_v64i1_mask_mem_b(<8 x i64> %__a, double* %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqpd_v8i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi){1to8}, %zmm0, %k0
; VLX-NEXT: kmovq %k0, %rax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v8i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -57783,7 +57783,7 @@ entry:
define zeroext i64 @test_masked_vcmpoeqpd_v8i1_v64i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqpd_v8i1_v64i1_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -57791,7 +57791,7 @@ define zeroext i64 @test_masked_vcmpoeqpd_v8i1_v64i1_mask(i8 zeroext %__u, <8 x
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v8i1_v64i1_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -57863,7 +57863,7 @@ entry:
define zeroext i64 @test_masked_vcmpoeqpd_v8i1_v64i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqpd_v8i1_v64i1_mask_mem:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqpd (%rsi), %zmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -57871,7 +57871,7 @@ define zeroext i64 @test_masked_vcmpoeqpd_v8i1_v64i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v8i1_v64i1_mask_mem:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -57944,7 +57944,7 @@ entry:
define zeroext i64 @test_masked_vcmpoeqpd_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, double* %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqpd_v8i1_v64i1_mask_mem_b:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqpd (%rsi){1to8}, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovq %k0, %rax
@@ -57952,7 +57952,7 @@ define zeroext i64 @test_masked_vcmpoeqpd_v8i1_v64i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v8i1_v64i1_mask_mem_b:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
@@ -58028,7 +58028,7 @@ entry:
define zeroext i64 @test_vcmpoeqpd_v8i1_v64i1_sae_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_vcmpoeqpd_v8i1_v64i1_sae_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmplepd {sae}, %zmm1, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: movzbl %al, %eax
@@ -58036,7 +58036,7 @@ define zeroext i64 @test_vcmpoeqpd_v8i1_v64i1_sae_mask(<8 x i64> %__a, <8 x i64>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v8i1_v64i1_sae_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vcmplepd {sae}, %zmm1, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
; NoVLX-NEXT: movzbl %al, %eax
@@ -58052,7 +58052,7 @@ entry:
define zeroext i64 @test_masked_vcmpoeqpd_v8i1_v64i1_sae_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
; VLX-LABEL: test_masked_vcmpoeqpd_v8i1_v64i1_sae_mask:
-; VLX: # BB#0: # %entry
+; VLX: # %bb.0: # %entry
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmplepd {sae}, %zmm1, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
@@ -58061,7 +58061,7 @@ define zeroext i64 @test_masked_vcmpoeqpd_v8i1_v64i1_sae_mask(i8 zeroext %__u, <
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v8i1_v64i1_sae_mask:
-; NoVLX: # BB#0: # %entry
+; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmplepd {sae}, %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
@@ -58079,14 +58079,14 @@ entry:
; Test that we understand that cmpps with rounding zeros the upper bits of the mask register.
define i32 @test_cmpm_rnd_zero(<16 x float> %a, <16 x float> %b) {
; VLX-LABEL: test_cmpm_rnd_zero:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vcmpleps {sae}, %zmm1, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_cmpm_rnd_zero:
-; NoVLX: # BB#0:
+; NoVLX: # %bb.0:
; NoVLX-NEXT: pushq %rbp
; NoVLX-NEXT: .cfi_def_cfa_offset 16
; NoVLX-NEXT: .cfi_offset %rbp, -16
diff --git a/test/CodeGen/X86/avx512vl-vec-test-testn.ll b/test/CodeGen/X86/avx512vl-vec-test-testn.ll
index b0dd9c24359..65f9d9fc3d5 100644
--- a/test/CodeGen/X86/avx512vl-vec-test-testn.ll
+++ b/test/CodeGen/X86/avx512vl-vec-test-testn.ll
@@ -5,14 +5,14 @@
; Function Attrs: norecurse nounwind readnone
define zeroext i8 @TEST_mm_test_epi64_mask(<2 x i64> %__A, <2 x i64> %__B) local_unnamed_addr #0 {
; X86_64-LABEL: TEST_mm_test_epi64_mask:
-; X86_64: # BB#0: # %entry
+; X86_64: # %bb.0: # %entry
; X86_64-NEXT: vptestmq %xmm0, %xmm1, %k0
; X86_64-NEXT: kmovw %k0, %eax
; X86_64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X86_64-NEXT: retq
;
; I386-LABEL: TEST_mm_test_epi64_mask:
-; I386: # BB#0: # %entry
+; I386: # %bb.0: # %entry
; I386-NEXT: vptestmq %xmm0, %xmm1, %k0
; I386-NEXT: kmovw %k0, %eax
; I386-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -28,14 +28,14 @@ entry:
; Function Attrs: norecurse nounwind readnone
define zeroext i8 @TEST_mm_test_epi32_mask(<2 x i64> %__A, <2 x i64> %__B) local_unnamed_addr #0 {
; X86_64-LABEL: TEST_mm_test_epi32_mask:
-; X86_64: # BB#0: # %entry
+; X86_64: # %bb.0: # %entry
; X86_64-NEXT: vptestmd %xmm0, %xmm1, %k0
; X86_64-NEXT: kmovw %k0, %eax
; X86_64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X86_64-NEXT: retq
;
; I386-LABEL: TEST_mm_test_epi32_mask:
-; I386: # BB#0: # %entry
+; I386: # %bb.0: # %entry
; I386-NEXT: vptestmd %xmm0, %xmm1, %k0
; I386-NEXT: kmovw %k0, %eax
; I386-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -52,7 +52,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define zeroext i8 @TEST_mm256_test_epi64_mask(<4 x i64> %__A, <4 x i64> %__B) local_unnamed_addr #0 {
; X86_64-LABEL: TEST_mm256_test_epi64_mask:
-; X86_64: # BB#0: # %entry
+; X86_64: # %bb.0: # %entry
; X86_64-NEXT: vptestmq %ymm0, %ymm1, %k0
; X86_64-NEXT: kmovw %k0, %eax
; X86_64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -60,7 +60,7 @@ define zeroext i8 @TEST_mm256_test_epi64_mask(<4 x i64> %__A, <4 x i64> %__B) lo
; X86_64-NEXT: retq
;
; I386-LABEL: TEST_mm256_test_epi64_mask:
-; I386: # BB#0: # %entry
+; I386: # %bb.0: # %entry
; I386-NEXT: vptestmq %ymm0, %ymm1, %k0
; I386-NEXT: kmovw %k0, %eax
; I386-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -77,7 +77,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define zeroext i8 @TEST_mm256_test_epi32_mask(<4 x i64> %__A, <4 x i64> %__B) local_unnamed_addr #0 {
; X86_64-LABEL: TEST_mm256_test_epi32_mask:
-; X86_64: # BB#0: # %entry
+; X86_64: # %bb.0: # %entry
; X86_64-NEXT: vptestmd %ymm0, %ymm1, %k0
; X86_64-NEXT: kmovw %k0, %eax
; X86_64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -85,7 +85,7 @@ define zeroext i8 @TEST_mm256_test_epi32_mask(<4 x i64> %__A, <4 x i64> %__B) lo
; X86_64-NEXT: retq
;
; I386-LABEL: TEST_mm256_test_epi32_mask:
-; I386: # BB#0: # %entry
+; I386: # %bb.0: # %entry
; I386-NEXT: vptestmd %ymm0, %ymm1, %k0
; I386-NEXT: kmovw %k0, %eax
; I386-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -102,7 +102,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define zeroext i8 @TEST_mm_mask_test_epi64_mask(i8 %__U, <2 x i64> %__A, <2 x i64> %__B) local_unnamed_addr #0 {
; X86_64-LABEL: TEST_mm_mask_test_epi64_mask:
-; X86_64: # BB#0: # %entry
+; X86_64: # %bb.0: # %entry
; X86_64-NEXT: kmovw %edi, %k1
; X86_64-NEXT: vptestmq %xmm0, %xmm1, %k0 {%k1}
; X86_64-NEXT: kmovw %k0, %eax
@@ -110,7 +110,7 @@ define zeroext i8 @TEST_mm_mask_test_epi64_mask(i8 %__U, <2 x i64> %__A, <2 x i6
; X86_64-NEXT: retq
;
; I386-LABEL: TEST_mm_mask_test_epi64_mask:
-; I386: # BB#0: # %entry
+; I386: # %bb.0: # %entry
; I386-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; I386-NEXT: kmovw %eax, %k1
; I386-NEXT: vptestmq %xmm0, %xmm1, %k0 {%k1}
@@ -131,7 +131,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define zeroext i8 @TEST_mm_mask_test_epi32_mask(i8 %__U, <2 x i64> %__A, <2 x i64> %__B) local_unnamed_addr #0 {
; X86_64-LABEL: TEST_mm_mask_test_epi32_mask:
-; X86_64: # BB#0: # %entry
+; X86_64: # %bb.0: # %entry
; X86_64-NEXT: kmovw %edi, %k1
; X86_64-NEXT: vptestmd %xmm0, %xmm1, %k0 {%k1}
; X86_64-NEXT: kmovw %k0, %eax
@@ -139,7 +139,7 @@ define zeroext i8 @TEST_mm_mask_test_epi32_mask(i8 %__U, <2 x i64> %__A, <2 x i6
; X86_64-NEXT: retq
;
; I386-LABEL: TEST_mm_mask_test_epi32_mask:
-; I386: # BB#0: # %entry
+; I386: # %bb.0: # %entry
; I386-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; I386-NEXT: kmovw %eax, %k1
; I386-NEXT: vptestmd %xmm0, %xmm1, %k0 {%k1}
@@ -162,7 +162,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define zeroext i8 @TEST_mm256_mask_test_epi64_mask(i8 %__U, <4 x i64> %__A, <4 x i64> %__B) local_unnamed_addr #0 {
; X86_64-LABEL: TEST_mm256_mask_test_epi64_mask:
-; X86_64: # BB#0: # %entry
+; X86_64: # %bb.0: # %entry
; X86_64-NEXT: kmovw %edi, %k1
; X86_64-NEXT: vptestmq %ymm0, %ymm1, %k0 {%k1}
; X86_64-NEXT: kmovw %k0, %eax
@@ -171,7 +171,7 @@ define zeroext i8 @TEST_mm256_mask_test_epi64_mask(i8 %__U, <4 x i64> %__A, <4 x
; X86_64-NEXT: retq
;
; I386-LABEL: TEST_mm256_mask_test_epi64_mask:
-; I386: # BB#0: # %entry
+; I386: # %bb.0: # %entry
; I386-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; I386-NEXT: kmovw %eax, %k1
; I386-NEXT: vptestmq %ymm0, %ymm1, %k0 {%k1}
@@ -193,7 +193,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define zeroext i8 @TEST_mm256_mask_test_epi32_mask(i8 %__U, <4 x i64> %__A, <4 x i64> %__B) local_unnamed_addr #0 {
; X86_64-LABEL: TEST_mm256_mask_test_epi32_mask:
-; X86_64: # BB#0: # %entry
+; X86_64: # %bb.0: # %entry
; X86_64-NEXT: kmovw %edi, %k1
; X86_64-NEXT: vptestmd %ymm0, %ymm1, %k0 {%k1}
; X86_64-NEXT: kmovw %k0, %eax
@@ -202,7 +202,7 @@ define zeroext i8 @TEST_mm256_mask_test_epi32_mask(i8 %__U, <4 x i64> %__A, <4 x
; X86_64-NEXT: retq
;
; I386-LABEL: TEST_mm256_mask_test_epi32_mask:
-; I386: # BB#0: # %entry
+; I386: # %bb.0: # %entry
; I386-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; I386-NEXT: kmovw %eax, %k1
; I386-NEXT: vptestmd %ymm0, %ymm1, %k0 {%k1}
@@ -223,14 +223,14 @@ entry:
; Function Attrs: norecurse nounwind readnone
define zeroext i8 @TEST_mm_testn_epi64_mask(<2 x i64> %__A, <2 x i64> %__B) local_unnamed_addr #0 {
; X86_64-LABEL: TEST_mm_testn_epi64_mask:
-; X86_64: # BB#0: # %entry
+; X86_64: # %bb.0: # %entry
; X86_64-NEXT: vptestnmq %xmm0, %xmm1, %k0
; X86_64-NEXT: kmovw %k0, %eax
; X86_64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X86_64-NEXT: retq
;
; I386-LABEL: TEST_mm_testn_epi64_mask:
-; I386: # BB#0: # %entry
+; I386: # %bb.0: # %entry
; I386-NEXT: vptestnmq %xmm0, %xmm1, %k0
; I386-NEXT: kmovw %k0, %eax
; I386-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -246,14 +246,14 @@ entry:
; Function Attrs: norecurse nounwind readnone
define zeroext i8 @TEST_mm_testn_epi32_mask(<2 x i64> %__A, <2 x i64> %__B) local_unnamed_addr #0 {
; X86_64-LABEL: TEST_mm_testn_epi32_mask:
-; X86_64: # BB#0: # %entry
+; X86_64: # %bb.0: # %entry
; X86_64-NEXT: vptestnmd %xmm0, %xmm1, %k0
; X86_64-NEXT: kmovw %k0, %eax
; X86_64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X86_64-NEXT: retq
;
; I386-LABEL: TEST_mm_testn_epi32_mask:
-; I386: # BB#0: # %entry
+; I386: # %bb.0: # %entry
; I386-NEXT: vptestnmd %xmm0, %xmm1, %k0
; I386-NEXT: kmovw %k0, %eax
; I386-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -270,7 +270,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define zeroext i8 @TEST_mm256_testn_epi64_mask(<4 x i64> %__A, <4 x i64> %__B) local_unnamed_addr #0 {
; X86_64-LABEL: TEST_mm256_testn_epi64_mask:
-; X86_64: # BB#0: # %entry
+; X86_64: # %bb.0: # %entry
; X86_64-NEXT: vptestnmq %ymm0, %ymm1, %k0
; X86_64-NEXT: kmovw %k0, %eax
; X86_64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -278,7 +278,7 @@ define zeroext i8 @TEST_mm256_testn_epi64_mask(<4 x i64> %__A, <4 x i64> %__B) l
; X86_64-NEXT: retq
;
; I386-LABEL: TEST_mm256_testn_epi64_mask:
-; I386: # BB#0: # %entry
+; I386: # %bb.0: # %entry
; I386-NEXT: vptestnmq %ymm0, %ymm1, %k0
; I386-NEXT: kmovw %k0, %eax
; I386-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -295,7 +295,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define zeroext i8 @TEST_mm256_testn_epi32_mask(<4 x i64> %__A, <4 x i64> %__B) local_unnamed_addr #0 {
; X86_64-LABEL: TEST_mm256_testn_epi32_mask:
-; X86_64: # BB#0: # %entry
+; X86_64: # %bb.0: # %entry
; X86_64-NEXT: vptestnmd %ymm0, %ymm1, %k0
; X86_64-NEXT: kmovw %k0, %eax
; X86_64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -303,7 +303,7 @@ define zeroext i8 @TEST_mm256_testn_epi32_mask(<4 x i64> %__A, <4 x i64> %__B) l
; X86_64-NEXT: retq
;
; I386-LABEL: TEST_mm256_testn_epi32_mask:
-; I386: # BB#0: # %entry
+; I386: # %bb.0: # %entry
; I386-NEXT: vptestnmd %ymm0, %ymm1, %k0
; I386-NEXT: kmovw %k0, %eax
; I386-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -320,7 +320,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define zeroext i8 @TEST_mm_mask_testn_epi64_mask(i8 %__U, <2 x i64> %__A, <2 x i64> %__B) local_unnamed_addr #0 {
; X86_64-LABEL: TEST_mm_mask_testn_epi64_mask:
-; X86_64: # BB#0: # %entry
+; X86_64: # %bb.0: # %entry
; X86_64-NEXT: kmovw %edi, %k1
; X86_64-NEXT: vptestnmq %xmm0, %xmm1, %k0 {%k1}
; X86_64-NEXT: kmovw %k0, %eax
@@ -328,7 +328,7 @@ define zeroext i8 @TEST_mm_mask_testn_epi64_mask(i8 %__U, <2 x i64> %__A, <2 x i
; X86_64-NEXT: retq
;
; I386-LABEL: TEST_mm_mask_testn_epi64_mask:
-; I386: # BB#0: # %entry
+; I386: # %bb.0: # %entry
; I386-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; I386-NEXT: kmovw %eax, %k1
; I386-NEXT: vptestnmq %xmm0, %xmm1, %k0 {%k1}
@@ -349,7 +349,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define zeroext i8 @TEST_mm_mask_testn_epi32_mask(i8 %__U, <2 x i64> %__A, <2 x i64> %__B) local_unnamed_addr #0 {
; X86_64-LABEL: TEST_mm_mask_testn_epi32_mask:
-; X86_64: # BB#0: # %entry
+; X86_64: # %bb.0: # %entry
; X86_64-NEXT: kmovw %edi, %k1
; X86_64-NEXT: vptestnmd %xmm0, %xmm1, %k0 {%k1}
; X86_64-NEXT: kmovw %k0, %eax
@@ -357,7 +357,7 @@ define zeroext i8 @TEST_mm_mask_testn_epi32_mask(i8 %__U, <2 x i64> %__A, <2 x i
; X86_64-NEXT: retq
;
; I386-LABEL: TEST_mm_mask_testn_epi32_mask:
-; I386: # BB#0: # %entry
+; I386: # %bb.0: # %entry
; I386-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; I386-NEXT: kmovw %eax, %k1
; I386-NEXT: vptestnmd %xmm0, %xmm1, %k0 {%k1}
@@ -380,7 +380,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define zeroext i8 @TEST_mm256_mask_testn_epi64_mask(i8 %__U, <4 x i64> %__A, <4 x i64> %__B) local_unnamed_addr #0 {
; X86_64-LABEL: TEST_mm256_mask_testn_epi64_mask:
-; X86_64: # BB#0: # %entry
+; X86_64: # %bb.0: # %entry
; X86_64-NEXT: kmovw %edi, %k1
; X86_64-NEXT: vptestnmq %ymm0, %ymm1, %k0 {%k1}
; X86_64-NEXT: kmovw %k0, %eax
@@ -389,7 +389,7 @@ define zeroext i8 @TEST_mm256_mask_testn_epi64_mask(i8 %__U, <4 x i64> %__A, <4
; X86_64-NEXT: retq
;
; I386-LABEL: TEST_mm256_mask_testn_epi64_mask:
-; I386: # BB#0: # %entry
+; I386: # %bb.0: # %entry
; I386-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; I386-NEXT: kmovw %eax, %k1
; I386-NEXT: vptestnmq %ymm0, %ymm1, %k0 {%k1}
@@ -411,7 +411,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define zeroext i8 @TEST_mm256_mask_testn_epi32_mask(i8 %__U, <4 x i64> %__A, <4 x i64> %__B) local_unnamed_addr #0 {
; X86_64-LABEL: TEST_mm256_mask_testn_epi32_mask:
-; X86_64: # BB#0: # %entry
+; X86_64: # %bb.0: # %entry
; X86_64-NEXT: kmovw %edi, %k1
; X86_64-NEXT: vptestnmd %ymm0, %ymm1, %k0 {%k1}
; X86_64-NEXT: kmovw %k0, %eax
@@ -420,7 +420,7 @@ define zeroext i8 @TEST_mm256_mask_testn_epi32_mask(i8 %__U, <4 x i64> %__A, <4
; X86_64-NEXT: retq
;
; I386-LABEL: TEST_mm256_mask_testn_epi32_mask:
-; I386: # BB#0: # %entry
+; I386: # %bb.0: # %entry
; I386-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; I386-NEXT: kmovw %eax, %k1
; I386-NEXT: vptestnmd %ymm0, %ymm1, %k0 {%k1}
diff --git a/test/CodeGen/X86/avx512vl-vpclmulqdq.ll b/test/CodeGen/X86/avx512vl-vpclmulqdq.ll
index 3db3b9ecff5..777a70db5a8 100644
--- a/test/CodeGen/X86/avx512vl-vpclmulqdq.ll
+++ b/test/CodeGen/X86/avx512vl-vpclmulqdq.ll
@@ -3,7 +3,7 @@
define <2 x i64> @test_x86_pclmulqdq(<2 x i64> %a0, <2 x i64> %a1) {
; AVX512VL_VPCLMULQDQ-LABEL: test_x86_pclmulqdq:
-; AVX512VL_VPCLMULQDQ: # BB#0:
+; AVX512VL_VPCLMULQDQ: # %bb.0:
; AVX512VL_VPCLMULQDQ-NEXT: vpclmulqdq $1, %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x44,0xc1,0x01]
; AVX512VL_VPCLMULQDQ-NEXT: retq # encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.pclmulqdq(<2 x i64> %a0, <2 x i64> %a1, i8 1)
@@ -13,7 +13,7 @@ declare <2 x i64> @llvm.x86.pclmulqdq(<2 x i64>, <2 x i64>, i8) nounwind readnon
define <4 x i64> @test_x86_pclmulqdq_256(<4 x i64> %a0, <4 x i64> %a1) {
; AVX512VL_VPCLMULQDQ-LABEL: test_x86_pclmulqdq_256:
-; AVX512VL_VPCLMULQDQ: # BB#0:
+; AVX512VL_VPCLMULQDQ: # %bb.0:
; AVX512VL_VPCLMULQDQ-NEXT: vpclmulqdq $16, %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x44,0xc1,0x10]
; AVX512VL_VPCLMULQDQ-NEXT: retq # encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.pclmulqdq.256(<4 x i64> %a0, <4 x i64> %a1, i8 16)
diff --git a/test/CodeGen/X86/avx512vl_vnni-intrinsics.ll b/test/CodeGen/X86/avx512vl_vnni-intrinsics.ll
index 10e82ee24ad..a098389f00c 100644
--- a/test/CodeGen/X86/avx512vl_vnni-intrinsics.ll
+++ b/test/CodeGen/X86/avx512vl_vnni-intrinsics.ll
@@ -5,7 +5,7 @@ declare <8 x i32> @llvm.x86.avx512.maskz.vpdpbusd.256(<8 x i32>, <8 x i32>, <8 x
define <8 x i32>@test_int_x86_avx512_mask_vpdpbusd_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32>* %x2p, <8 x i32> %x4, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpdpbusd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vmovaps %ymm0, %ymm3
; CHECK-NEXT: vpdpbusd (%rdi), %ymm1, %ymm3 {%k1}
@@ -29,7 +29,7 @@ declare <4 x i32> @llvm.x86.avx512.maskz.vpdpbusd.128(<4 x i32>, <4 x i32>, <4 x
define <4 x i32>@test_int_x86_avx512_mask_vpdpbusd_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32>* %x2p, <4 x i32> %x4, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpdpbusd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vmovaps %xmm0, %xmm3
; CHECK-NEXT: vpdpbusd (%rdi), %xmm1, %xmm3 {%k1}
@@ -53,7 +53,7 @@ declare <8 x i32> @llvm.x86.avx512.maskz.vpdpbusds.256(<8 x i32>, <8 x i32>, <8
define <8 x i32>@test_int_x86_avx512_mask_vpdpbusds_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32>* %x2p, <8 x i32> %x4, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpdpbusds_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vmovaps %ymm0, %ymm3
; CHECK-NEXT: vpdpbusds (%rdi), %ymm1, %ymm3 {%k1}
@@ -77,7 +77,7 @@ declare <4 x i32> @llvm.x86.avx512.maskz.vpdpbusds.128(<4 x i32>, <4 x i32>, <4
define <4 x i32>@test_int_x86_avx512_mask_vpdpbusds_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32>* %x2p, <4 x i32> %x4, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpdpbusds_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vmovaps %xmm0, %xmm3
; CHECK-NEXT: vpdpbusds (%rdi), %xmm1, %xmm3 {%k1}
@@ -101,7 +101,7 @@ declare <8 x i32> @llvm.x86.avx512.maskz.vpdpwssd.256(<8 x i32>, <8 x i32>, <8 x
define <8 x i32>@test_int_x86_avx512_mask_vpdpwssd_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32>* %x2p, <8 x i32> %x4, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpdpwssd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vmovaps %ymm0, %ymm3
; CHECK-NEXT: vpdpwssd (%rdi), %ymm1, %ymm3 {%k1}
@@ -125,7 +125,7 @@ declare <4 x i32> @llvm.x86.avx512.maskz.vpdpwssd.128(<4 x i32>, <4 x i32>, <4 x
define <4 x i32>@test_int_x86_avx512_mask_vpdpwssd_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32>* %x2p, <4 x i32> %x4, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpdpwssd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vmovaps %xmm0, %xmm3
; CHECK-NEXT: vpdpwssd (%rdi), %xmm1, %xmm3 {%k1}
@@ -150,7 +150,7 @@ declare <8 x i32> @llvm.x86.avx512.maskz.vpdpwssds.256(<8 x i32>, <8 x i32>, <8
define <8 x i32>@test_int_x86_avx512_mask_vpdpwssds_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32>* %x2p, <8 x i32> %x4, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpdpwssds_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vmovaps %ymm0, %ymm3
; CHECK-NEXT: vpdpwssds (%rdi), %ymm1, %ymm3 {%k1}
@@ -174,7 +174,7 @@ declare <4 x i32> @llvm.x86.avx512.maskz.vpdpwssds.128(<4 x i32>, <4 x i32>, <4
define <4 x i32>@test_int_x86_avx512_mask_vpdpwssds_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32>* %x2p, <4 x i32> %x4, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpdpwssds_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vmovaps %xmm0, %xmm3
; CHECK-NEXT: vpdpwssds (%rdi), %xmm1, %xmm3 {%k1}
diff --git a/test/CodeGen/X86/avx512vlcd-intrinsics-fast-isel.ll b/test/CodeGen/X86/avx512vlcd-intrinsics-fast-isel.ll
index ab4cbeb8d5e..1f0efeefd32 100644
--- a/test/CodeGen/X86/avx512vlcd-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/avx512vlcd-intrinsics-fast-isel.ll
@@ -3,7 +3,7 @@
define <2 x i64> @test_mm_broadcastmb_epi64(<2 x i64> %a, <2 x i64> %b) {
; CHECK-LABEL: test_mm_broadcastmb_epi64:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0
; CHECK-NEXT: vpbroadcastmb2q %k0, %xmm0
; CHECK-NEXT: retq
@@ -21,7 +21,7 @@ entry:
define <4 x i64> @test_mm256_broadcastmb_epi64(<4 x i64> %a, <4 x i64> %b) {
; CHECK-LABEL: test_mm256_broadcastmb_epi64:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0
; CHECK-NEXT: vpbroadcastmb2q %k0, %ymm0
; CHECK-NEXT: retq
@@ -37,7 +37,7 @@ entry:
define <2 x i64> @test_mm_broadcastmw_epi32(<8 x i64> %a, <8 x i64> %b) {
; CHECK-LABEL: test_mm_broadcastmw_epi32:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; CHECK-NEXT: vpbroadcastmw2d %k0, %xmm0
; CHECK-NEXT: vzeroupper
@@ -56,7 +56,7 @@ entry:
define <4 x i64> @test_mm256_broadcastmw_epi32(<8 x i64> %a, <8 x i64> %b) {
; CHECK-LABEL: test_mm256_broadcastmw_epi32:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; CHECK-NEXT: vpbroadcastmw2d %k0, %ymm0
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/avx512vnni-intrinsics.ll b/test/CodeGen/X86/avx512vnni-intrinsics.ll
index 3cd1011563a..0ee0ca0cde4 100644
--- a/test/CodeGen/X86/avx512vnni-intrinsics.ll
+++ b/test/CodeGen/X86/avx512vnni-intrinsics.ll
@@ -5,7 +5,7 @@ declare <16 x i32> @llvm.x86.avx512.maskz.vpdpbusd.512(<16 x i32>, <16 x i32>, <
define <16 x i32>@test_int_x86_avx512_mask_vpdpbusd_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2p, <16 x i32> %x4, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpdpbusd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vmovaps %zmm0, %zmm3
; CHECK-NEXT: vpdpbusd (%rdi), %zmm1, %zmm3 {%k1}
@@ -29,7 +29,7 @@ declare <16 x i32> @llvm.x86.avx512.maskz.vpdpbusds.512(<16 x i32>, <16 x i32>,
define <16 x i32>@test_int_x86_avx512_mask_vpdpbusds_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2p, <16 x i32> %x4, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpdpbusds_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vmovaps %zmm0, %zmm3
; CHECK-NEXT: vpdpbusds (%rdi), %zmm1, %zmm3 {%k1}
@@ -53,7 +53,7 @@ declare <16 x i32> @llvm.x86.avx512.maskz.vpdpwssd.512(<16 x i32>, <16 x i32>, <
define <16 x i32>@test_int_x86_avx512_mask_vpdpwssd_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2p, <16 x i32> %x4, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpdpwssd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vmovaps %zmm0, %zmm3
; CHECK-NEXT: vpdpwssd (%rdi), %zmm1, %zmm3 {%k1}
@@ -77,7 +77,7 @@ declare <16 x i32> @llvm.x86.avx512.maskz.vpdpwssds.512(<16 x i32>, <16 x i32>,
define <16 x i32>@test_int_x86_avx512_mask_vpdpwssds_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2p, <16 x i32> %x4, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpdpwssds_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vmovaps %zmm0, %zmm3
; CHECK-NEXT: vpdpwssds (%rdi), %zmm1, %zmm3 {%k1}
diff --git a/test/CodeGen/X86/avx512vpopcntdq-intrinsics.ll b/test/CodeGen/X86/avx512vpopcntdq-intrinsics.ll
index 019c5282f63..34330a19d82 100644
--- a/test/CodeGen/X86/avx512vpopcntdq-intrinsics.ll
+++ b/test/CodeGen/X86/avx512vpopcntdq-intrinsics.ll
@@ -10,13 +10,13 @@
define <16 x i32> @test_mask_vpopcnt_d(<16 x i32> %a, i16 %mask, <16 x i32> %b) {
; X86_64-LABEL: test_mask_vpopcnt_d:
-; X86_64: # BB#0:
+; X86_64: # %bb.0:
; X86_64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
; X86_64-NEXT: vpopcntd %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf2,0x7d,0x49,0x55,0xc1]
; X86_64-NEXT: retq # encoding: [0xc3]
;
; X86-LABEL: test_mask_vpopcnt_d:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
; X86-NEXT: vpopcntd %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf2,0x7d,0x49,0x55,0xc1]
; X86-NEXT: retl # encoding: [0xc3]
@@ -28,13 +28,13 @@ define <16 x i32> @test_mask_vpopcnt_d(<16 x i32> %a, i16 %mask, <16 x i32> %b)
define <16 x i32> @test_maskz_vpopcnt_d(i16 %mask, <16 x i32> %a) {
; X86_64-LABEL: test_maskz_vpopcnt_d:
-; X86_64: # BB#0:
+; X86_64: # %bb.0:
; X86_64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
; X86_64-NEXT: vpopcntd %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0xc9,0x55,0xc0]
; X86_64-NEXT: retq # encoding: [0xc3]
;
; X86-LABEL: test_maskz_vpopcnt_d:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
; X86-NEXT: vpopcntd %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0xc9,0x55,0xc0]
; X86-NEXT: retl # encoding: [0xc3]
@@ -46,14 +46,14 @@ define <16 x i32> @test_maskz_vpopcnt_d(i16 %mask, <16 x i32> %a) {
define <8 x i64> @test_mask_vpopcnt_q(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
; X86_64-LABEL: test_mask_vpopcnt_q:
-; X86_64: # BB#0:
+; X86_64: # %bb.0:
; X86_64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
; X86_64-NEXT: vpopcntq %zmm0, %zmm1 {%k1} # encoding: [0x62,0xf2,0xfd,0x49,0x55,0xc8]
; X86_64-NEXT: vmovdqa64 %zmm1, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc1]
; X86_64-NEXT: retq # encoding: [0xc3]
;
; X86-LABEL: test_mask_vpopcnt_q:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
; X86-NEXT: vpopcntq %zmm0, %zmm1 {%k1} # encoding: [0x62,0xf2,0xfd,0x49,0x55,0xc8]
@@ -67,13 +67,13 @@ define <8 x i64> @test_mask_vpopcnt_q(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
define <8 x i64> @test_maskz_vpopcnt_q(<8 x i64> %a, i8 %mask) {
; X86_64-LABEL: test_maskz_vpopcnt_q:
-; X86_64: # BB#0:
+; X86_64: # %bb.0:
; X86_64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
; X86_64-NEXT: vpopcntq %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xc9,0x55,0xc0]
; X86_64-NEXT: retq # encoding: [0xc3]
;
; X86-LABEL: test_maskz_vpopcnt_q:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
; X86-NEXT: vpopcntq %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xc9,0x55,0xc0]
diff --git a/test/CodeGen/X86/bc-extract.ll b/test/CodeGen/X86/bc-extract.ll
index b43c70e303a..506ba906800 100644
--- a/test/CodeGen/X86/bc-extract.ll
+++ b/test/CodeGen/X86/bc-extract.ll
@@ -4,12 +4,12 @@
define float @extractFloat1() nounwind {
; X32-LABEL: extractFloat1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: fld1
; X32-NEXT: retl
;
; X64-LABEL: extractFloat1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-NEXT: retq
entry:
@@ -20,12 +20,12 @@ entry:
define float @extractFloat2() nounwind {
; X32-LABEL: extractFloat2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: fldz
; X32-NEXT: retl
;
; X64-LABEL: extractFloat2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: xorps %xmm0, %xmm0
; X64-NEXT: retq
entry:
@@ -36,12 +36,12 @@ entry:
define i32 @extractInt2() nounwind {
; X32-LABEL: extractInt2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: retl
;
; X64-LABEL: extractInt2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/bigstructret.ll b/test/CodeGen/X86/bigstructret.ll
index 6fd1c8bb5b7..d4db764c680 100644
--- a/test/CodeGen/X86/bigstructret.ll
+++ b/test/CodeGen/X86/bigstructret.ll
@@ -7,7 +7,7 @@
define fastcc %0 @ReturnBigStruct() nounwind readnone {
; X86-LABEL: ReturnBigStruct:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl $24601, 12(%ecx) # imm = 0x6019
; X86-NEXT: movl $48, 8(%ecx)
; X86-NEXT: movl $24, 4(%ecx)
@@ -16,7 +16,7 @@ define fastcc %0 @ReturnBigStruct() nounwind readnone {
; X86-NEXT: retl
;
; X64-LABEL: ReturnBigStruct:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movabsq $105660490448944, %rax # imm = 0x601900000030
; X64-NEXT: movq %rax, 8(%rdi)
; X64-NEXT: movabsq $103079215116, %rax # imm = 0x180000000C
@@ -34,7 +34,7 @@ entry:
define fastcc %1 @ReturnBigStruct2() nounwind readnone {
; X86-LABEL: ReturnBigStruct2:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl $48, 4(%ecx)
; X86-NEXT: movb $1, 2(%ecx)
; X86-NEXT: movw $256, (%ecx) # imm = 0x100
@@ -42,7 +42,7 @@ define fastcc %1 @ReturnBigStruct2() nounwind readnone {
; X86-NEXT: retl
;
; X64-LABEL: ReturnBigStruct2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movl $48, 4(%rdi)
; X64-NEXT: movb $1, 2(%rdi)
; X64-NEXT: movw $256, (%rdi) # imm = 0x100
diff --git a/test/CodeGen/X86/bitcast-and-setcc-128.ll b/test/CodeGen/X86/bitcast-and-setcc-128.ll
index ef062aeb3f0..d65c789a2d5 100644
--- a/test/CodeGen/X86/bitcast-and-setcc-128.ll
+++ b/test/CodeGen/X86/bitcast-and-setcc-128.ll
@@ -8,7 +8,7 @@
define i8 @v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i16> %d) {
; SSE2-SSSE3-LABEL: v8i16:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: pcmpgtw %xmm1, %xmm0
; SSE2-SSSE3-NEXT: pcmpgtw %xmm3, %xmm2
; SSE2-SSSE3-NEXT: pand %xmm0, %xmm2
@@ -18,7 +18,7 @@ define i8 @v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i16> %d) {
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v8i16:
-; AVX12: # BB#0:
+; AVX12: # %bb.0:
; AVX12-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpcmpgtw %xmm3, %xmm2, %xmm1
; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -28,7 +28,7 @@ define i8 @v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i16> %d) {
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v8i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
; AVX512F-NEXT: vpmovsxwq %xmm0, %zmm0
; AVX512F-NEXT: vpsllq $63, %zmm0, %zmm0
@@ -43,7 +43,7 @@ define i8 @v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i16> %d) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v8i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtw %xmm1, %xmm0, %k1
; AVX512BW-NEXT: vpcmpgtw %xmm3, %xmm2, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
@@ -58,7 +58,7 @@ define i8 @v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i16> %d) {
define i4 @v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) {
; SSE2-SSSE3-LABEL: v4i32:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-SSSE3-NEXT: pcmpgtd %xmm3, %xmm2
; SSE2-SSSE3-NEXT: pand %xmm0, %xmm2
@@ -67,7 +67,7 @@ define i4 @v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) {
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4i32:
-; AVX12: # BB#0:
+; AVX12: # %bb.0:
; AVX12-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpcmpgtd %xmm3, %xmm2, %xmm1
; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -76,7 +76,7 @@ define i4 @v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) {
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v4i32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtd %xmm1, %xmm0, %k1
; AVX512F-NEXT: vpcmpgtd %xmm3, %xmm2, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
@@ -85,7 +85,7 @@ define i4 @v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v4i32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtd %xmm1, %xmm0, %k1
; AVX512BW-NEXT: vpcmpgtd %xmm3, %xmm2, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
@@ -101,7 +101,7 @@ define i4 @v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) {
define i4 @v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x float> %d) {
; SSE2-SSSE3-LABEL: v4f32:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: cmpltps %xmm0, %xmm1
; SSE2-SSSE3-NEXT: cmpltps %xmm2, %xmm3
; SSE2-SSSE3-NEXT: andps %xmm1, %xmm3
@@ -110,7 +110,7 @@ define i4 @v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x float> %d)
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4f32:
-; AVX12: # BB#0:
+; AVX12: # %bb.0:
; AVX12-NEXT: vcmpltps %xmm0, %xmm1, %xmm0
; AVX12-NEXT: vcmpltps %xmm2, %xmm3, %xmm1
; AVX12-NEXT: vandps %xmm1, %xmm0, %xmm0
@@ -119,7 +119,7 @@ define i4 @v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x float> %d)
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v4f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vcmpltps %xmm0, %xmm1, %k1
; AVX512F-NEXT: vcmpltps %xmm2, %xmm3, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
@@ -128,7 +128,7 @@ define i4 @v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x float> %d)
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v4f32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vcmpltps %xmm0, %xmm1, %k1
; AVX512BW-NEXT: vcmpltps %xmm2, %xmm3, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
@@ -144,7 +144,7 @@ define i4 @v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x float> %d)
define i16 @v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %d) {
; SSE2-SSSE3-LABEL: v16i8:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: pcmpgtb %xmm1, %xmm0
; SSE2-SSSE3-NEXT: pcmpgtb %xmm3, %xmm2
; SSE2-SSSE3-NEXT: pand %xmm0, %xmm2
@@ -153,7 +153,7 @@ define i16 @v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %d) {
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v16i8:
-; AVX12: # BB#0:
+; AVX12: # %bb.0:
; AVX12-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpcmpgtb %xmm3, %xmm2, %xmm1
; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -162,7 +162,7 @@ define i16 @v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %d) {
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v16i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0
; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0
@@ -177,7 +177,7 @@ define i16 @v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %d) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v16i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtb %xmm1, %xmm0, %k1
; AVX512BW-NEXT: vpcmpgtb %xmm3, %xmm2, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
@@ -192,7 +192,7 @@ define i16 @v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %d) {
define i2 @v2i8(<2 x i8> %a, <2 x i8> %b, <2 x i8> %c, <2 x i8> %d) {
; SSE2-SSSE3-LABEL: v2i8:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: psllq $56, %xmm2
; SSE2-SSSE3-NEXT: movdqa %xmm2, %xmm4
; SSE2-SSSE3-NEXT: psrad $31, %xmm4
@@ -248,7 +248,7 @@ define i2 @v2i8(<2 x i8> %a, <2 x i8> %b, <2 x i8> %c, <2 x i8> %d) {
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v2i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsllq $56, %xmm3, %xmm3
; AVX1-NEXT: vpsrad $31, %xmm3, %xmm4
; AVX1-NEXT: vpsrad $24, %xmm3, %xmm3
@@ -277,7 +277,7 @@ define i2 @v2i8(<2 x i8> %a, <2 x i8> %b, <2 x i8> %c, <2 x i8> %d) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: v2i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsllq $56, %xmm3, %xmm3
; AVX2-NEXT: vpsrad $31, %xmm3, %xmm4
; AVX2-NEXT: vpsrad $24, %xmm3, %xmm3
@@ -306,7 +306,7 @@ define i2 @v2i8(<2 x i8> %a, <2 x i8> %b, <2 x i8> %c, <2 x i8> %d) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: v2i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsllq $56, %xmm3, %xmm3
; AVX512F-NEXT: vpsraq $56, %xmm3, %xmm3
; AVX512F-NEXT: vpsllq $56, %xmm2, %xmm2
@@ -323,7 +323,7 @@ define i2 @v2i8(<2 x i8> %a, <2 x i8> %b, <2 x i8> %c, <2 x i8> %d) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v2i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsllq $56, %xmm3, %xmm3
; AVX512BW-NEXT: vpsraq $56, %xmm3, %xmm3
; AVX512BW-NEXT: vpsllq $56, %xmm2, %xmm2
@@ -347,7 +347,7 @@ define i2 @v2i8(<2 x i8> %a, <2 x i8> %b, <2 x i8> %c, <2 x i8> %d) {
define i2 @v2i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, <2 x i16> %d) {
; SSE2-SSSE3-LABEL: v2i16:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: psllq $48, %xmm2
; SSE2-SSSE3-NEXT: movdqa %xmm2, %xmm4
; SSE2-SSSE3-NEXT: psrad $31, %xmm4
@@ -403,7 +403,7 @@ define i2 @v2i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, <2 x i16> %d) {
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v2i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsllq $48, %xmm3, %xmm3
; AVX1-NEXT: vpsrad $31, %xmm3, %xmm4
; AVX1-NEXT: vpsrad $16, %xmm3, %xmm3
@@ -432,7 +432,7 @@ define i2 @v2i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, <2 x i16> %d) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: v2i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsllq $48, %xmm3, %xmm3
; AVX2-NEXT: vpsrad $31, %xmm3, %xmm4
; AVX2-NEXT: vpsrad $16, %xmm3, %xmm3
@@ -461,7 +461,7 @@ define i2 @v2i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, <2 x i16> %d) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: v2i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsllq $48, %xmm3, %xmm3
; AVX512F-NEXT: vpsraq $48, %xmm3, %xmm3
; AVX512F-NEXT: vpsllq $48, %xmm2, %xmm2
@@ -478,7 +478,7 @@ define i2 @v2i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, <2 x i16> %d) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v2i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsllq $48, %xmm3, %xmm3
; AVX512BW-NEXT: vpsraq $48, %xmm3, %xmm3
; AVX512BW-NEXT: vpsllq $48, %xmm2, %xmm2
@@ -502,7 +502,7 @@ define i2 @v2i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, <2 x i16> %d) {
define i2 @v2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i32> %d) {
; SSE2-SSSE3-LABEL: v2i32:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: psllq $32, %xmm2
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,3,2,3]
; SSE2-SSSE3-NEXT: psrad $31, %xmm2
@@ -550,7 +550,7 @@ define i2 @v2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i32> %d) {
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v2i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsllq $32, %xmm3, %xmm3
; AVX1-NEXT: vpsrad $31, %xmm3, %xmm4
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
@@ -575,7 +575,7 @@ define i2 @v2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i32> %d) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: v2i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsllq $32, %xmm3, %xmm3
; AVX2-NEXT: vpsrad $31, %xmm3, %xmm4
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
@@ -600,7 +600,7 @@ define i2 @v2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i32> %d) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: v2i32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsllq $32, %xmm3, %xmm3
; AVX512F-NEXT: vpsraq $32, %xmm3, %xmm3
; AVX512F-NEXT: vpsllq $32, %xmm2, %xmm2
@@ -617,7 +617,7 @@ define i2 @v2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i32> %d) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v2i32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsllq $32, %xmm3, %xmm3
; AVX512BW-NEXT: vpsraq $32, %xmm3, %xmm3
; AVX512BW-NEXT: vpsllq $32, %xmm2, %xmm2
@@ -641,7 +641,7 @@ define i2 @v2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i32> %d) {
define i2 @v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c, <2 x i64> %d) {
; SSE2-SSSE3-LABEL: v2i64:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,0,2147483648,0]
; SSE2-SSSE3-NEXT: pxor %xmm4, %xmm1
; SSE2-SSSE3-NEXT: pxor %xmm4, %xmm0
@@ -669,7 +669,7 @@ define i2 @v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c, <2 x i64> %d) {
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v2i64:
-; AVX12: # BB#0:
+; AVX12: # %bb.0:
; AVX12-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm1
; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -678,7 +678,7 @@ define i2 @v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c, <2 x i64> %d) {
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v2i64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtq %xmm1, %xmm0, %k1
; AVX512F-NEXT: vpcmpgtq %xmm3, %xmm2, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
@@ -687,7 +687,7 @@ define i2 @v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c, <2 x i64> %d) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v2i64:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtq %xmm1, %xmm0, %k1
; AVX512BW-NEXT: vpcmpgtq %xmm3, %xmm2, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
@@ -703,7 +703,7 @@ define i2 @v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c, <2 x i64> %d) {
define i2 @v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x double> %d) {
; SSE2-SSSE3-LABEL: v2f64:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: cmpltpd %xmm0, %xmm1
; SSE2-SSSE3-NEXT: cmpltpd %xmm2, %xmm3
; SSE2-SSSE3-NEXT: andpd %xmm1, %xmm3
@@ -712,7 +712,7 @@ define i2 @v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x double>
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v2f64:
-; AVX12: # BB#0:
+; AVX12: # %bb.0:
; AVX12-NEXT: vcmpltpd %xmm0, %xmm1, %xmm0
; AVX12-NEXT: vcmpltpd %xmm2, %xmm3, %xmm1
; AVX12-NEXT: vandpd %xmm1, %xmm0, %xmm0
@@ -721,7 +721,7 @@ define i2 @v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x double>
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v2f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vcmpltpd %xmm0, %xmm1, %k1
; AVX512F-NEXT: vcmpltpd %xmm2, %xmm3, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
@@ -730,7 +730,7 @@ define i2 @v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x double>
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v2f64:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vcmpltpd %xmm0, %xmm1, %k1
; AVX512BW-NEXT: vcmpltpd %xmm2, %xmm3, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
@@ -746,7 +746,7 @@ define i2 @v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x double>
define i4 @v4i8(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <4 x i8> %d) {
; SSE2-SSSE3-LABEL: v4i8:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: pslld $24, %xmm3
; SSE2-SSSE3-NEXT: psrad $24, %xmm3
; SSE2-SSSE3-NEXT: pslld $24, %xmm2
@@ -763,7 +763,7 @@ define i4 @v4i8(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <4 x i8> %d) {
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4i8:
-; AVX12: # BB#0:
+; AVX12: # %bb.0:
; AVX12-NEXT: vpslld $24, %xmm3, %xmm3
; AVX12-NEXT: vpsrad $24, %xmm3, %xmm3
; AVX12-NEXT: vpslld $24, %xmm2, %xmm2
@@ -780,7 +780,7 @@ define i4 @v4i8(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <4 x i8> %d) {
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v4i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpslld $24, %xmm3, %xmm3
; AVX512F-NEXT: vpsrad $24, %xmm3, %xmm3
; AVX512F-NEXT: vpslld $24, %xmm2, %xmm2
@@ -797,7 +797,7 @@ define i4 @v4i8(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <4 x i8> %d) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v4i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpslld $24, %xmm3, %xmm3
; AVX512BW-NEXT: vpsrad $24, %xmm3, %xmm3
; AVX512BW-NEXT: vpslld $24, %xmm2, %xmm2
@@ -821,7 +821,7 @@ define i4 @v4i8(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <4 x i8> %d) {
define i4 @v4i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c, <4 x i16> %d) {
; SSE2-SSSE3-LABEL: v4i16:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: pslld $16, %xmm3
; SSE2-SSSE3-NEXT: psrad $16, %xmm3
; SSE2-SSSE3-NEXT: pslld $16, %xmm2
@@ -838,7 +838,7 @@ define i4 @v4i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c, <4 x i16> %d) {
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4i16:
-; AVX12: # BB#0:
+; AVX12: # %bb.0:
; AVX12-NEXT: vpslld $16, %xmm3, %xmm3
; AVX12-NEXT: vpsrad $16, %xmm3, %xmm3
; AVX12-NEXT: vpslld $16, %xmm2, %xmm2
@@ -855,7 +855,7 @@ define i4 @v4i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c, <4 x i16> %d) {
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v4i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpslld $16, %xmm3, %xmm3
; AVX512F-NEXT: vpsrad $16, %xmm3, %xmm3
; AVX512F-NEXT: vpslld $16, %xmm2, %xmm2
@@ -872,7 +872,7 @@ define i4 @v4i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c, <4 x i16> %d) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v4i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpslld $16, %xmm3, %xmm3
; AVX512BW-NEXT: vpsrad $16, %xmm3, %xmm3
; AVX512BW-NEXT: vpslld $16, %xmm2, %xmm2
@@ -896,7 +896,7 @@ define i4 @v4i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c, <4 x i16> %d) {
define i8 @v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %d) {
; SSE2-SSSE3-LABEL: v8i8:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: psllw $8, %xmm3
; SSE2-SSSE3-NEXT: psraw $8, %xmm3
; SSE2-SSSE3-NEXT: psllw $8, %xmm2
@@ -914,7 +914,7 @@ define i8 @v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %d) {
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v8i8:
-; AVX12: # BB#0:
+; AVX12: # %bb.0:
; AVX12-NEXT: vpsllw $8, %xmm3, %xmm3
; AVX12-NEXT: vpsraw $8, %xmm3, %xmm3
; AVX12-NEXT: vpsllw $8, %xmm2, %xmm2
@@ -932,7 +932,7 @@ define i8 @v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %d) {
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v8i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsllw $8, %xmm3, %xmm3
; AVX512F-NEXT: vpsraw $8, %xmm3, %xmm3
; AVX512F-NEXT: vpsllw $8, %xmm2, %xmm2
@@ -955,7 +955,7 @@ define i8 @v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %d) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v8i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsllw $8, %xmm3, %xmm3
; AVX512BW-NEXT: vpsraw $8, %xmm3, %xmm3
; AVX512BW-NEXT: vpsllw $8, %xmm2, %xmm2
diff --git a/test/CodeGen/X86/bitcast-and-setcc-256.ll b/test/CodeGen/X86/bitcast-and-setcc-256.ll
index 3fb66e400ce..9cb5750d2cb 100644
--- a/test/CodeGen/X86/bitcast-and-setcc-256.ll
+++ b/test/CodeGen/X86/bitcast-and-setcc-256.ll
@@ -8,7 +8,7 @@
define i4 @v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i64> %d) {
; SSE2-SSSE3-LABEL: v4i64:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [2147483648,0,2147483648,0]
; SSE2-SSSE3-NEXT: pxor %xmm8, %xmm3
; SSE2-SSSE3-NEXT: pxor %xmm8, %xmm1
@@ -58,7 +58,7 @@ define i4 @v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i64> %d) {
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm5, %xmm4
@@ -76,7 +76,7 @@ define i4 @v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i64> %d) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
@@ -90,7 +90,7 @@ define i4 @v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i64> %d) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: v4i64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtq %ymm1, %ymm0, %k1
; AVX512F-NEXT: vpcmpgtq %ymm3, %ymm2, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
@@ -100,7 +100,7 @@ define i4 @v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i64> %d) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v4i64:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtq %ymm1, %ymm0, %k1
; AVX512BW-NEXT: vpcmpgtq %ymm3, %ymm2, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
@@ -117,7 +117,7 @@ define i4 @v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i64> %d) {
define i4 @v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c, <4 x double> %d) {
; SSE2-SSSE3-LABEL: v4f64:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: cmpltpd %xmm1, %xmm3
; SSE2-SSSE3-NEXT: cmpltpd %xmm0, %xmm2
; SSE2-SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2]
@@ -130,7 +130,7 @@ define i4 @v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c, <4 x double>
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4f64:
-; AVX12: # BB#0:
+; AVX12: # %bb.0:
; AVX12-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
; AVX12-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX12-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
@@ -144,7 +144,7 @@ define i4 @v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c, <4 x double>
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v4f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vcmpltpd %ymm0, %ymm1, %k1
; AVX512F-NEXT: vcmpltpd %ymm2, %ymm3, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
@@ -154,7 +154,7 @@ define i4 @v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c, <4 x double>
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v4f64:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vcmpltpd %ymm0, %ymm1, %k1
; AVX512BW-NEXT: vcmpltpd %ymm2, %ymm3, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
@@ -171,7 +171,7 @@ define i4 @v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c, <4 x double>
define i16 @v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i16> %d) {
; SSE2-SSSE3-LABEL: v16i16:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: pcmpgtw %xmm3, %xmm1
; SSE2-SSSE3-NEXT: pcmpgtw %xmm2, %xmm0
; SSE2-SSSE3-NEXT: packsswb %xmm1, %xmm0
@@ -184,7 +184,7 @@ define i16 @v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i16> %d) {
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpcmpgtw %xmm4, %xmm5, %xmm4
@@ -202,7 +202,7 @@ define i16 @v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i16> %d) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
@@ -216,7 +216,7 @@ define i16 @v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i16> %d) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: v16i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0
@@ -231,7 +231,7 @@ define i16 @v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i16> %d) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v16i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtw %ymm1, %ymm0, %k1
; AVX512BW-NEXT: vpcmpgtw %ymm3, %ymm2, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
@@ -247,7 +247,7 @@ define i16 @v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i16> %d) {
define i8 @v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) {
; SSE2-SSSE3-LABEL: v8i32:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: pcmpgtd %xmm3, %xmm1
; SSE2-SSSE3-NEXT: pcmpgtd %xmm2, %xmm0
; SSE2-SSSE3-NEXT: packssdw %xmm1, %xmm0
@@ -261,7 +261,7 @@ define i8 @v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) {
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpcmpgtd %xmm4, %xmm5, %xmm4
@@ -280,7 +280,7 @@ define i8 @v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
@@ -295,7 +295,7 @@ define i8 @v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: v8i32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtd %ymm1, %ymm0, %k1
; AVX512F-NEXT: vpcmpgtd %ymm3, %ymm2, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
@@ -304,7 +304,7 @@ define i8 @v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v8i32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtd %ymm1, %ymm0, %k1
; AVX512BW-NEXT: vpcmpgtd %ymm3, %ymm2, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
@@ -320,7 +320,7 @@ define i8 @v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) {
define i8 @v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x float> %d) {
; SSE2-SSSE3-LABEL: v8f32:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: cmpltps %xmm1, %xmm3
; SSE2-SSSE3-NEXT: cmpltps %xmm0, %xmm2
; SSE2-SSSE3-NEXT: packssdw %xmm3, %xmm2
@@ -334,7 +334,7 @@ define i8 @v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x float> %d)
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v8f32:
-; AVX12: # BB#0:
+; AVX12: # %bb.0:
; AVX12-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
; AVX12-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX12-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
@@ -349,7 +349,7 @@ define i8 @v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x float> %d)
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v8f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vcmpltps %ymm0, %ymm1, %k1
; AVX512F-NEXT: vcmpltps %ymm2, %ymm3, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
@@ -358,7 +358,7 @@ define i8 @v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x float> %d)
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v8f32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vcmpltps %ymm0, %ymm1, %k1
; AVX512BW-NEXT: vcmpltps %ymm2, %ymm3, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
@@ -374,7 +374,7 @@ define i8 @v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x float> %d)
define i32 @v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c, <32 x i8> %d) {
; SSE2-SSSE3-LABEL: v32i8:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: pcmpgtb %xmm2, %xmm0
; SSE2-SSSE3-NEXT: pcmpgtb %xmm3, %xmm1
; SSE2-SSSE3-NEXT: pcmpgtb %xmm6, %xmm4
@@ -388,7 +388,7 @@ define i32 @v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c, <32 x i8> %d) {
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpcmpgtb %xmm4, %xmm5, %xmm4
@@ -407,7 +407,7 @@ define i32 @v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c, <32 x i8> %d) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpcmpgtb %ymm3, %ymm2, %ymm1
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -416,7 +416,7 @@ define i32 @v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c, <32 x i8> %d) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: v32i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: pushq %rbp
; AVX512F-NEXT: .cfi_def_cfa_offset 16
; AVX512F-NEXT: .cfi_offset %rbp, -16
@@ -443,7 +443,7 @@ define i32 @v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c, <32 x i8> %d) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v32i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtb %ymm1, %ymm0, %k1
; AVX512BW-NEXT: vpcmpgtb %ymm3, %ymm2, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
diff --git a/test/CodeGen/X86/bitcast-and-setcc-512.ll b/test/CodeGen/X86/bitcast-and-setcc-512.ll
index ef128913b1e..79ef2cc13a8 100644
--- a/test/CodeGen/X86/bitcast-and-setcc-512.ll
+++ b/test/CodeGen/X86/bitcast-and-setcc-512.ll
@@ -7,7 +7,7 @@
define i8 @v8i64(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c, <8 x i64> %d) {
; SSE-LABEL: v8i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm9
; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm10
@@ -45,7 +45,7 @@ define i8 @v8i64(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c, <8 x i64> %d) {
; SSE-NEXT: retq
;
; AVX1-LABEL: v8i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm8
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm9
; AVX1-NEXT: vpcmpgtq %xmm8, %xmm9, %xmm8
@@ -81,7 +81,7 @@ define i8 @v8i64(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c, <8 x i64> %d) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: v8i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
; AVX2-NEXT: vpackssdw %xmm3, %xmm1, %xmm1
@@ -109,7 +109,7 @@ define i8 @v8i64(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c, <8 x i64> %d) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: v8i64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
; AVX512F-NEXT: vpcmpgtq %zmm3, %zmm2, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
@@ -118,7 +118,7 @@ define i8 @v8i64(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c, <8 x i64> %d) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v8i64:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
; AVX512BW-NEXT: vpcmpgtq %zmm3, %zmm2, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
@@ -134,7 +134,7 @@ define i8 @v8i64(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c, <8 x i64> %d) {
define i8 @v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %c, <8 x double> %d) {
; SSE-LABEL: v8f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm8
; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm9
; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm10
@@ -172,7 +172,7 @@ define i8 @v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %c, <8 x double>
; SSE-NEXT: retq
;
; AVX12-LABEL: v8f64:
-; AVX12: # BB#0:
+; AVX12: # %bb.0:
; AVX12-NEXT: vcmpltpd %ymm1, %ymm3, %ymm1
; AVX12-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX12-NEXT: vpackssdw %xmm3, %xmm1, %xmm1
@@ -200,7 +200,7 @@ define i8 @v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %c, <8 x double>
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v8f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vcmpltpd %zmm0, %zmm1, %k1
; AVX512F-NEXT: vcmpltpd %zmm2, %zmm3, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
@@ -209,7 +209,7 @@ define i8 @v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %c, <8 x double>
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v8f64:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vcmpltpd %zmm0, %zmm1, %k1
; AVX512BW-NEXT: vcmpltpd %zmm2, %zmm3, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
@@ -225,7 +225,7 @@ define i8 @v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %c, <8 x double>
define i32 @v32i16(<32 x i16> %a, <32 x i16> %b, <32 x i16> %c, <32 x i16> %d) {
; SSE-LABEL: v32i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm9
; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm10
@@ -251,7 +251,7 @@ define i32 @v32i16(<32 x i16> %a, <32 x i16> %b, <32 x i16> %c, <32 x i16> %d) {
; SSE-NEXT: retq
;
; AVX1-LABEL: v32i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm8
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm9
; AVX1-NEXT: vpcmpgtw %xmm8, %xmm9, %xmm8
@@ -282,7 +282,7 @@ define i32 @v32i16(<32 x i16> %a, <32 x i16> %b, <32 x i16> %c, <32 x i16> %d) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: v32i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtw %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
; AVX2-NEXT: vpacksswb %xmm3, %xmm1, %xmm1
@@ -303,7 +303,7 @@ define i32 @v32i16(<32 x i16> %a, <32 x i16> %b, <32 x i16> %c, <32 x i16> %d) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: v32i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: pushq %rbp
; AVX512F-NEXT: .cfi_def_cfa_offset 16
; AVX512F-NEXT: .cfi_offset %rbp, -16
@@ -598,7 +598,7 @@ define i32 @v32i16(<32 x i16> %a, <32 x i16> %b, <32 x i16> %c, <32 x i16> %d) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v32i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtw %zmm1, %zmm0, %k1
; AVX512BW-NEXT: vpcmpgtw %zmm3, %zmm2, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
@@ -613,7 +613,7 @@ define i32 @v32i16(<32 x i16> %a, <32 x i16> %b, <32 x i16> %c, <32 x i16> %d) {
define i16 @v16i32(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c, <16 x i32> %d) {
; SSE-LABEL: v16i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm9
; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm10
@@ -638,7 +638,7 @@ define i16 @v16i32(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c, <16 x i32> %d) {
; SSE-NEXT: retq
;
; AVX1-LABEL: v16i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm8
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm9
; AVX1-NEXT: vpcmpgtd %xmm8, %xmm9, %xmm8
@@ -668,7 +668,7 @@ define i16 @v16i32(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c, <16 x i32> %d) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: v16i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtd %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
; AVX2-NEXT: vpackssdw %xmm3, %xmm1, %xmm1
@@ -690,7 +690,7 @@ define i16 @v16i32(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c, <16 x i32> %d) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: v16i32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtd %zmm1, %zmm0, %k1
; AVX512F-NEXT: vpcmpgtd %zmm3, %zmm2, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
@@ -699,7 +699,7 @@ define i16 @v16i32(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c, <16 x i32> %d) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v16i32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtd %zmm1, %zmm0, %k1
; AVX512BW-NEXT: vpcmpgtd %zmm3, %zmm2, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
@@ -715,7 +715,7 @@ define i16 @v16i32(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c, <16 x i32> %d) {
define i16 @v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %c, <16 x float> %d) {
; SSE-LABEL: v16f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm8
; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm9
; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm10
@@ -740,7 +740,7 @@ define i16 @v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %c, <16 x floa
; SSE-NEXT: retq
;
; AVX12-LABEL: v16f32:
-; AVX12: # BB#0:
+; AVX12: # %bb.0:
; AVX12-NEXT: vcmpltps %ymm1, %ymm3, %ymm1
; AVX12-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX12-NEXT: vpackssdw %xmm3, %xmm1, %xmm1
@@ -762,7 +762,7 @@ define i16 @v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %c, <16 x floa
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v16f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vcmpltps %zmm0, %zmm1, %k1
; AVX512F-NEXT: vcmpltps %zmm2, %zmm3, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
@@ -771,7 +771,7 @@ define i16 @v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %c, <16 x floa
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v16f32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vcmpltps %zmm0, %zmm1, %k1
; AVX512BW-NEXT: vcmpltps %zmm2, %zmm3, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
@@ -787,7 +787,7 @@ define i16 @v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %c, <16 x floa
define i64 @v64i8(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c, <64 x i8> %d) {
; SSE-LABEL: v64i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm11
; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm10
; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm9
@@ -1009,7 +1009,7 @@ define i64 @v64i8(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c, <64 x i8> %d) {
; SSE-NEXT: retq
;
; AVX1-LABEL: v64i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: pushq %rbp
; AVX1-NEXT: .cfi_def_cfa_offset 16
; AVX1-NEXT: .cfi_offset %rbp, -16
@@ -1243,7 +1243,7 @@ define i64 @v64i8(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c, <64 x i8> %d) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: v64i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: pushq %rbp
; AVX2-NEXT: .cfi_def_cfa_offset 16
; AVX2-NEXT: .cfi_offset %rbp, -16
@@ -1461,7 +1461,7 @@ define i64 @v64i8(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c, <64 x i8> %d) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: v64i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: pushq %rbp
; AVX512F-NEXT: .cfi_def_cfa_offset 16
; AVX512F-NEXT: .cfi_offset %rbp, -16
@@ -1503,7 +1503,7 @@ define i64 @v64i8(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c, <64 x i8> %d) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v64i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtb %zmm1, %zmm0, %k1
; AVX512BW-NEXT: vpcmpgtb %zmm3, %zmm2, %k0 {%k1}
; AVX512BW-NEXT: kmovq %k0, %rax
diff --git a/test/CodeGen/X86/bitcast-i256.ll b/test/CodeGen/X86/bitcast-i256.ll
index 83677357cf7..a29292e4ba1 100644
--- a/test/CodeGen/X86/bitcast-i256.ll
+++ b/test/CodeGen/X86/bitcast-i256.ll
@@ -4,14 +4,14 @@
define i256 @foo(<8 x i32> %a) {
; FAST-LABEL: foo:
-; FAST: # BB#0:
+; FAST: # %bb.0:
; FAST-NEXT: vmovups %ymm0, (%rdi)
; FAST-NEXT: movq %rdi, %rax
; FAST-NEXT: vzeroupper
; FAST-NEXT: retq
;
; SLOW-LABEL: foo:
-; SLOW: # BB#0:
+; SLOW: # %bb.0:
; SLOW-NEXT: vextractf128 $1, %ymm0, 16(%rdi)
; SLOW-NEXT: vmovups %xmm0, (%rdi)
; SLOW-NEXT: movq %rdi, %rax
diff --git a/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll b/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll
index 988cbe1e6df..92a4ebc8051 100644
--- a/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll
+++ b/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll
@@ -11,7 +11,7 @@
define <2 x i64> @ext_i2_2i64(i2 %a0) {
; SSE2-SSSE3-LABEL: ext_i2_2i64:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SSE2-SSSE3-NEXT: movq %rdi, %xmm0
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,0,1]
@@ -23,7 +23,7 @@ define <2 x i64> @ext_i2_2i64(i2 %a0) {
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: ext_i2_2i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX1-NEXT: vmovq %rdi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
@@ -33,7 +33,7 @@ define <2 x i64> @ext_i2_2i64(i2 %a0) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: ext_i2_2i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX2-NEXT: vmovq %rdi, %xmm0
; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0
@@ -43,7 +43,7 @@ define <2 x i64> @ext_i2_2i64(i2 %a0) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: ext_i2_2i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: andb $3, %dil
; AVX512-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; AVX512-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
@@ -59,7 +59,7 @@ define <2 x i64> @ext_i2_2i64(i2 %a0) {
define <4 x i32> @ext_i4_4i32(i4 %a0) {
; SSE2-SSSE3-LABEL: ext_i4_4i32:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: movd %edi, %xmm0
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8]
@@ -68,7 +68,7 @@ define <4 x i32> @ext_i4_4i32(i4 %a0) {
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: ext_i4_4i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %edi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8]
@@ -77,7 +77,7 @@ define <4 x i32> @ext_i4_4i32(i4 %a0) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: ext_i4_4i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %edi, %xmm0
; AVX2-NEXT: vpbroadcastd %xmm0, %xmm0
; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8]
@@ -86,7 +86,7 @@ define <4 x i32> @ext_i4_4i32(i4 %a0) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: ext_i4_4i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: andb $15, %dil
; AVX512-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; AVX512-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
@@ -103,7 +103,7 @@ define <4 x i32> @ext_i4_4i32(i4 %a0) {
define <8 x i16> @ext_i8_8i16(i8 %a0) {
; SSE2-SSSE3-LABEL: ext_i8_8i16:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: movd %edi, %xmm0
; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
@@ -113,7 +113,7 @@ define <8 x i16> @ext_i8_8i16(i8 %a0) {
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: ext_i8_8i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %edi, %xmm0
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
@@ -123,7 +123,7 @@ define <8 x i16> @ext_i8_8i16(i8 %a0) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: ext_i8_8i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %edi, %xmm0
; AVX2-NEXT: vpbroadcastw %xmm0, %xmm0
; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128]
@@ -132,7 +132,7 @@ define <8 x i16> @ext_i8_8i16(i8 %a0) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: ext_i8_8i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovd %edi, %k0
; AVX512-NEXT: vpmovm2w %k0, %xmm0
; AVX512-NEXT: retq
@@ -143,7 +143,7 @@ define <8 x i16> @ext_i8_8i16(i8 %a0) {
define <16 x i8> @ext_i16_16i8(i16 %a0) {
; SSE2-LABEL: ext_i16_16i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movd %edi, %xmm0
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,1,1,4,5,6,7]
@@ -154,7 +154,7 @@ define <16 x i8> @ext_i16_16i8(i16 %a0) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: ext_i16_16i8:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movd %edi, %xmm0
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
@@ -163,7 +163,7 @@ define <16 x i8> @ext_i16_16i8(i16 %a0) {
; SSSE3-NEXT: retq
;
; AVX1-LABEL: ext_i16_16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %edi, %xmm0
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
@@ -172,7 +172,7 @@ define <16 x i8> @ext_i16_16i8(i16 %a0) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: ext_i16_16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %edi, %xmm0
; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
; AVX2-NEXT: vpbroadcastq {{.*#+}} xmm1 = [9241421688590303745,9241421688590303745]
@@ -181,7 +181,7 @@ define <16 x i8> @ext_i16_16i8(i16 %a0) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: ext_i16_16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovd %edi, %k0
; AVX512-NEXT: vpmovm2b %k0, %xmm0
; AVX512-NEXT: retq
@@ -196,7 +196,7 @@ define <16 x i8> @ext_i16_16i8(i16 %a0) {
define <4 x i64> @ext_i4_4i64(i4 %a0) {
; SSE2-SSSE3-LABEL: ext_i4_4i64:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SSE2-SSSE3-NEXT: movq %rdi, %xmm0
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,0,1]
@@ -214,7 +214,7 @@ define <4 x i64> @ext_i4_4i64(i4 %a0) {
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: ext_i4_4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX1-NEXT: vmovq %rdi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
@@ -231,7 +231,7 @@ define <4 x i64> @ext_i4_4i64(i4 %a0) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: ext_i4_4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX2-NEXT: vmovq %rdi, %xmm0
; AVX2-NEXT: vpbroadcastq %xmm0, %ymm0
@@ -241,7 +241,7 @@ define <4 x i64> @ext_i4_4i64(i4 %a0) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: ext_i4_4i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: andb $15, %dil
; AVX512-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; AVX512-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
@@ -256,7 +256,7 @@ define <4 x i64> @ext_i4_4i64(i4 %a0) {
define <8 x i32> @ext_i8_8i32(i8 %a0) {
; SSE2-SSSE3-LABEL: ext_i8_8i32:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: movd %edi, %xmm0
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,0,0]
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [1,2,4,8]
@@ -269,7 +269,7 @@ define <8 x i32> @ext_i8_8i32(i8 %a0) {
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: ext_i8_8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %edi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
@@ -285,7 +285,7 @@ define <8 x i32> @ext_i8_8i32(i8 %a0) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: ext_i8_8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %edi, %xmm0
; AVX2-NEXT: vpbroadcastd %xmm0, %ymm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [1,2,4,8,16,32,64,128]
@@ -294,7 +294,7 @@ define <8 x i32> @ext_i8_8i32(i8 %a0) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: ext_i8_8i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovd %edi, %k1
; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
@@ -306,7 +306,7 @@ define <8 x i32> @ext_i8_8i32(i8 %a0) {
define <16 x i16> @ext_i16_16i16(i16 %a0) {
; SSE2-SSSE3-LABEL: ext_i16_16i16:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: movd %edi, %xmm0
; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,1]
@@ -320,7 +320,7 @@ define <16 x i16> @ext_i16_16i16(i16 %a0) {
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: ext_i16_16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %edi, %xmm0
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
@@ -337,7 +337,7 @@ define <16 x i16> @ext_i16_16i16(i16 %a0) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: ext_i16_16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %edi, %xmm0
; AVX2-NEXT: vpbroadcastw %xmm0, %ymm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
@@ -346,7 +346,7 @@ define <16 x i16> @ext_i16_16i16(i16 %a0) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: ext_i16_16i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovd %edi, %k0
; AVX512-NEXT: vpmovm2w %k0, %ymm0
; AVX512-NEXT: retq
@@ -357,7 +357,7 @@ define <16 x i16> @ext_i16_16i16(i16 %a0) {
define <32 x i8> @ext_i32_32i8(i32 %a0) {
; SSE2-SSSE3-LABEL: ext_i32_32i8:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: movd %edi, %xmm1
; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,0,1,1,4,5,6,7]
@@ -372,7 +372,7 @@ define <32 x i8> @ext_i32_32i8(i32 %a0) {
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: ext_i32_32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %edi, %xmm0
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,0,1,1,4,5,6,7]
@@ -392,7 +392,7 @@ define <32 x i8> @ext_i32_32i8(i32 %a0) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: ext_i32_32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %edi, %xmm0
; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; AVX2-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,0,1,1,4,5,6,7]
@@ -406,7 +406,7 @@ define <32 x i8> @ext_i32_32i8(i32 %a0) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: ext_i32_32i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovd %edi, %k0
; AVX512-NEXT: vpmovm2b %k0, %ymm0
; AVX512-NEXT: retq
@@ -421,7 +421,7 @@ define <32 x i8> @ext_i32_32i8(i32 %a0) {
define <8 x i64> @ext_i8_8i64(i8 %a0) {
; SSE2-SSSE3-LABEL: ext_i8_8i64:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SSE2-SSSE3-NEXT: movq %rdi, %xmm0
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,1,0,1]
@@ -451,7 +451,7 @@ define <8 x i64> @ext_i8_8i64(i8 %a0) {
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: ext_i8_8i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX1-NEXT: vmovq %rdi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
@@ -475,7 +475,7 @@ define <8 x i64> @ext_i8_8i64(i8 %a0) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: ext_i8_8i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX2-NEXT: vmovq %rdi, %xmm0
; AVX2-NEXT: vpbroadcastq %xmm0, %ymm1
@@ -488,7 +488,7 @@ define <8 x i64> @ext_i8_8i64(i8 %a0) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: ext_i8_8i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovd %edi, %k1
; AVX512-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512-NEXT: retq
@@ -499,7 +499,7 @@ define <8 x i64> @ext_i8_8i64(i8 %a0) {
define <16 x i32> @ext_i16_16i32(i16 %a0) {
; SSE2-SSSE3-LABEL: ext_i16_16i32:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: movd %edi, %xmm0
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,0,0,0]
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8]
@@ -520,7 +520,7 @@ define <16 x i32> @ext_i16_16i32(i16 %a0) {
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: ext_i16_16i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %edi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1
@@ -543,7 +543,7 @@ define <16 x i32> @ext_i16_16i32(i16 %a0) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: ext_i16_16i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %edi, %xmm0
; AVX2-NEXT: vpbroadcastd %xmm0, %ymm1
; AVX2-NEXT: vmovdqa {{.*#+}} ymm0 = [1,2,4,8,16,32,64,128]
@@ -555,7 +555,7 @@ define <16 x i32> @ext_i16_16i32(i16 %a0) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: ext_i16_16i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovd %edi, %k1
; AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512-NEXT: retq
@@ -566,7 +566,7 @@ define <16 x i32> @ext_i16_16i32(i16 %a0) {
define <32 x i16> @ext_i32_32i16(i32 %a0) {
; SSE2-SSSE3-LABEL: ext_i32_32i16:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: movd %edi, %xmm2
; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm2[0,0,0,0,4,5,6,7]
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,1]
@@ -587,7 +587,7 @@ define <32 x i16> @ext_i32_32i16(i32 %a0) {
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: ext_i32_32i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %edi, %xmm1
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm1[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
@@ -615,7 +615,7 @@ define <32 x i16> @ext_i32_32i16(i32 %a0) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: ext_i32_32i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %edi, %xmm0
; AVX2-NEXT: vpbroadcastw %xmm0, %ymm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
@@ -629,7 +629,7 @@ define <32 x i16> @ext_i32_32i16(i32 %a0) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: ext_i32_32i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovd %edi, %k0
; AVX512-NEXT: vpmovm2w %k0, %zmm0
; AVX512-NEXT: retq
@@ -640,7 +640,7 @@ define <32 x i16> @ext_i32_32i16(i32 %a0) {
define <64 x i8> @ext_i64_64i8(i64 %a0) {
; SSE2-SSSE3-LABEL: ext_i64_64i8:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: movq %rdi, %xmm3
; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm3[0,0,1,1,4,5,6,7]
@@ -663,7 +663,7 @@ define <64 x i8> @ext_i64_64i8(i64 %a0) {
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: ext_i64_64i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovq %rdi, %xmm0
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm1[0,0,1,1,4,5,6,7]
@@ -696,7 +696,7 @@ define <64 x i8> @ext_i64_64i8(i64 %a0) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: ext_i64_64i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovq %rdi, %xmm0
; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm1[0,0,1,1,4,5,6,7]
@@ -717,7 +717,7 @@ define <64 x i8> @ext_i64_64i8(i64 %a0) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: ext_i64_64i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovq %rdi, %k0
; AVX512-NEXT: vpmovm2b %k0, %zmm0
; AVX512-NEXT: retq
diff --git a/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll b/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll
index cab849d4987..e0dd7f253e8 100644
--- a/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll
+++ b/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll
@@ -12,7 +12,7 @@
define <2 x i64> @ext_i2_2i64(i2 %a0) {
; SSE2-SSSE3-LABEL: ext_i2_2i64:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SSE2-SSSE3-NEXT: movq %rdi, %xmm0
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,0,1]
@@ -25,7 +25,7 @@ define <2 x i64> @ext_i2_2i64(i2 %a0) {
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: ext_i2_2i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX1-NEXT: vmovq %rdi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
@@ -36,7 +36,7 @@ define <2 x i64> @ext_i2_2i64(i2 %a0) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: ext_i2_2i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX2-NEXT: vmovq %rdi, %xmm0
; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0
@@ -47,7 +47,7 @@ define <2 x i64> @ext_i2_2i64(i2 %a0) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: ext_i2_2i64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: andb $3, %dil
; AVX512F-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; AVX512F-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
@@ -58,7 +58,7 @@ define <2 x i64> @ext_i2_2i64(i2 %a0) {
; AVX512F-NEXT: retq
;
; AVX512VLBW-LABEL: ext_i2_2i64:
-; AVX512VLBW: # BB#0:
+; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: andb $3, %dil
; AVX512VLBW-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; AVX512VLBW-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
@@ -74,7 +74,7 @@ define <2 x i64> @ext_i2_2i64(i2 %a0) {
define <4 x i32> @ext_i4_4i32(i4 %a0) {
; SSE2-SSSE3-LABEL: ext_i4_4i32:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: movd %edi, %xmm0
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8]
@@ -84,7 +84,7 @@ define <4 x i32> @ext_i4_4i32(i4 %a0) {
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: ext_i4_4i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %edi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8]
@@ -94,7 +94,7 @@ define <4 x i32> @ext_i4_4i32(i4 %a0) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: ext_i4_4i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %edi, %xmm0
; AVX2-NEXT: vpbroadcastd %xmm0, %xmm0
; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8]
@@ -104,7 +104,7 @@ define <4 x i32> @ext_i4_4i32(i4 %a0) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: ext_i4_4i32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: andb $15, %dil
; AVX512F-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; AVX512F-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
@@ -116,7 +116,7 @@ define <4 x i32> @ext_i4_4i32(i4 %a0) {
; AVX512F-NEXT: retq
;
; AVX512VLBW-LABEL: ext_i4_4i32:
-; AVX512VLBW: # BB#0:
+; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: andb $15, %dil
; AVX512VLBW-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; AVX512VLBW-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
@@ -132,7 +132,7 @@ define <4 x i32> @ext_i4_4i32(i4 %a0) {
define <8 x i16> @ext_i8_8i16(i8 %a0) {
; SSE2-SSSE3-LABEL: ext_i8_8i16:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: movd %edi, %xmm0
; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
@@ -143,7 +143,7 @@ define <8 x i16> @ext_i8_8i16(i8 %a0) {
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: ext_i8_8i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %edi, %xmm0
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
@@ -154,7 +154,7 @@ define <8 x i16> @ext_i8_8i16(i8 %a0) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: ext_i8_8i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %edi, %xmm0
; AVX2-NEXT: vpbroadcastw %xmm0, %xmm0
; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128]
@@ -164,7 +164,7 @@ define <8 x i16> @ext_i8_8i16(i8 %a0) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: ext_i8_8i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: kmovw %edi, %k1
; AVX512F-NEXT: vpbroadcastq {{.*}}(%rip), %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovqw %zmm0, %xmm0
@@ -172,7 +172,7 @@ define <8 x i16> @ext_i8_8i16(i8 %a0) {
; AVX512F-NEXT: retq
;
; AVX512VLBW-LABEL: ext_i8_8i16:
-; AVX512VLBW: # BB#0:
+; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: kmovd %edi, %k1
; AVX512VLBW-NEXT: vmovdqu16 {{.*}}(%rip), %xmm0 {%k1} {z}
; AVX512VLBW-NEXT: retq
@@ -183,7 +183,7 @@ define <8 x i16> @ext_i8_8i16(i8 %a0) {
define <16 x i8> @ext_i16_16i8(i16 %a0) {
; SSE2-LABEL: ext_i16_16i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movd %edi, %xmm0
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,1,1,4,5,6,7]
@@ -196,7 +196,7 @@ define <16 x i8> @ext_i16_16i8(i16 %a0) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: ext_i16_16i8:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movd %edi, %xmm0
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
@@ -207,7 +207,7 @@ define <16 x i8> @ext_i16_16i8(i16 %a0) {
; SSSE3-NEXT: retq
;
; AVX1-LABEL: ext_i16_16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %edi, %xmm0
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
@@ -218,7 +218,7 @@ define <16 x i8> @ext_i16_16i8(i16 %a0) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: ext_i16_16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %edi, %xmm0
; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
; AVX2-NEXT: vpbroadcastq {{.*#+}} xmm1 = [9241421688590303745,9241421688590303745]
@@ -229,7 +229,7 @@ define <16 x i8> @ext_i16_16i8(i16 %a0) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: ext_i16_16i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: kmovw %edi, %k1
; AVX512F-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
@@ -237,7 +237,7 @@ define <16 x i8> @ext_i16_16i8(i16 %a0) {
; AVX512F-NEXT: retq
;
; AVX512VLBW-LABEL: ext_i16_16i8:
-; AVX512VLBW: # BB#0:
+; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: kmovd %edi, %k1
; AVX512VLBW-NEXT: vmovdqu8 {{.*}}(%rip), %xmm0 {%k1} {z}
; AVX512VLBW-NEXT: retq
@@ -252,7 +252,7 @@ define <16 x i8> @ext_i16_16i8(i16 %a0) {
define <4 x i64> @ext_i4_4i64(i4 %a0) {
; SSE2-SSSE3-LABEL: ext_i4_4i64:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SSE2-SSSE3-NEXT: movq %rdi, %xmm0
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,0,1]
@@ -272,7 +272,7 @@ define <4 x i64> @ext_i4_4i64(i4 %a0) {
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: ext_i4_4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX1-NEXT: vmovq %rdi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
@@ -291,7 +291,7 @@ define <4 x i64> @ext_i4_4i64(i4 %a0) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: ext_i4_4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX2-NEXT: vmovq %rdi, %xmm0
; AVX2-NEXT: vpbroadcastq %xmm0, %ymm0
@@ -302,7 +302,7 @@ define <4 x i64> @ext_i4_4i64(i4 %a0) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: ext_i4_4i64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: andb $15, %dil
; AVX512F-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; AVX512F-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
@@ -312,7 +312,7 @@ define <4 x i64> @ext_i4_4i64(i4 %a0) {
; AVX512F-NEXT: retq
;
; AVX512VLBW-LABEL: ext_i4_4i64:
-; AVX512VLBW: # BB#0:
+; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: andb $15, %dil
; AVX512VLBW-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; AVX512VLBW-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
@@ -327,7 +327,7 @@ define <4 x i64> @ext_i4_4i64(i4 %a0) {
define <8 x i32> @ext_i8_8i32(i8 %a0) {
; SSE2-SSSE3-LABEL: ext_i8_8i32:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: movd %edi, %xmm0
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,0,0]
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [1,2,4,8]
@@ -342,7 +342,7 @@ define <8 x i32> @ext_i8_8i32(i8 %a0) {
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: ext_i8_8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %edi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
@@ -360,7 +360,7 @@ define <8 x i32> @ext_i8_8i32(i8 %a0) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: ext_i8_8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %edi, %xmm0
; AVX2-NEXT: vpbroadcastd %xmm0, %ymm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [1,2,4,8,16,32,64,128]
@@ -370,14 +370,14 @@ define <8 x i32> @ext_i8_8i32(i8 %a0) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: ext_i8_8i32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: kmovw %edi, %k1
; AVX512F-NEXT: vpbroadcastq {{.*}}(%rip), %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
; AVX512F-NEXT: retq
;
; AVX512VLBW-LABEL: ext_i8_8i32:
-; AVX512VLBW: # BB#0:
+; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: kmovd %edi, %k1
; AVX512VLBW-NEXT: vpbroadcastd {{.*}}(%rip), %ymm0 {%k1} {z}
; AVX512VLBW-NEXT: retq
@@ -388,7 +388,7 @@ define <8 x i32> @ext_i8_8i32(i8 %a0) {
define <16 x i16> @ext_i16_16i16(i16 %a0) {
; SSE2-SSSE3-LABEL: ext_i16_16i16:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: movd %edi, %xmm0
; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,1]
@@ -404,7 +404,7 @@ define <16 x i16> @ext_i16_16i16(i16 %a0) {
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: ext_i16_16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %edi, %xmm0
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
@@ -423,7 +423,7 @@ define <16 x i16> @ext_i16_16i16(i16 %a0) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: ext_i16_16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %edi, %xmm0
; AVX2-NEXT: vpbroadcastw %xmm0, %ymm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
@@ -433,14 +433,14 @@ define <16 x i16> @ext_i16_16i16(i16 %a0) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: ext_i16_16i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: kmovw %edi, %k1
; AVX512F-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
; AVX512F-NEXT: retq
;
; AVX512VLBW-LABEL: ext_i16_16i16:
-; AVX512VLBW: # BB#0:
+; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: kmovd %edi, %k1
; AVX512VLBW-NEXT: vmovdqu16 {{.*}}(%rip), %ymm0 {%k1} {z}
; AVX512VLBW-NEXT: retq
@@ -451,7 +451,7 @@ define <16 x i16> @ext_i16_16i16(i16 %a0) {
define <32 x i8> @ext_i32_32i8(i32 %a0) {
; SSE2-SSSE3-LABEL: ext_i32_32i8:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: movd %edi, %xmm1
; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,0,1,1,4,5,6,7]
@@ -471,7 +471,7 @@ define <32 x i8> @ext_i32_32i8(i32 %a0) {
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: ext_i32_32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %edi, %xmm0
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,0,1,1,4,5,6,7]
@@ -496,7 +496,7 @@ define <32 x i8> @ext_i32_32i8(i32 %a0) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: ext_i32_32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %edi, %xmm0
; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; AVX2-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,0,1,1,4,5,6,7]
@@ -512,7 +512,7 @@ define <32 x i8> @ext_i32_32i8(i32 %a0) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: ext_i32_32i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: pushq %rbp
; AVX512F-NEXT: .cfi_def_cfa_offset 16
; AVX512F-NEXT: .cfi_offset %rbp, -16
@@ -534,7 +534,7 @@ define <32 x i8> @ext_i32_32i8(i32 %a0) {
; AVX512F-NEXT: retq
;
; AVX512VLBW-LABEL: ext_i32_32i8:
-; AVX512VLBW: # BB#0:
+; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: kmovd %edi, %k1
; AVX512VLBW-NEXT: vmovdqu8 {{.*}}(%rip), %ymm0 {%k1} {z}
; AVX512VLBW-NEXT: retq
@@ -549,7 +549,7 @@ define <32 x i8> @ext_i32_32i8(i32 %a0) {
define <8 x i64> @ext_i8_8i64(i8 %a0) {
; SSE2-SSSE3-LABEL: ext_i8_8i64:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SSE2-SSSE3-NEXT: movq %rdi, %xmm0
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,1,0,1]
@@ -583,7 +583,7 @@ define <8 x i64> @ext_i8_8i64(i8 %a0) {
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: ext_i8_8i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX1-NEXT: vmovq %rdi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
@@ -611,7 +611,7 @@ define <8 x i64> @ext_i8_8i64(i8 %a0) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: ext_i8_8i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX2-NEXT: vmovq %rdi, %xmm0
; AVX2-NEXT: vpbroadcastq %xmm0, %ymm1
@@ -626,13 +626,13 @@ define <8 x i64> @ext_i8_8i64(i8 %a0) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: ext_i8_8i64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: kmovw %edi, %k1
; AVX512F-NEXT: vpbroadcastq {{.*}}(%rip), %zmm0 {%k1} {z}
; AVX512F-NEXT: retq
;
; AVX512VLBW-LABEL: ext_i8_8i64:
-; AVX512VLBW: # BB#0:
+; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: kmovd %edi, %k1
; AVX512VLBW-NEXT: vpbroadcastq {{.*}}(%rip), %zmm0 {%k1} {z}
; AVX512VLBW-NEXT: retq
@@ -643,7 +643,7 @@ define <8 x i64> @ext_i8_8i64(i8 %a0) {
define <16 x i32> @ext_i16_16i32(i16 %a0) {
; SSE2-SSSE3-LABEL: ext_i16_16i32:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: movd %edi, %xmm0
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,0,0,0]
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8]
@@ -668,7 +668,7 @@ define <16 x i32> @ext_i16_16i32(i16 %a0) {
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: ext_i16_16i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %edi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1
@@ -695,7 +695,7 @@ define <16 x i32> @ext_i16_16i32(i16 %a0) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: ext_i16_16i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %edi, %xmm0
; AVX2-NEXT: vpbroadcastd %xmm0, %ymm1
; AVX2-NEXT: vmovdqa {{.*#+}} ymm0 = [1,2,4,8,16,32,64,128]
@@ -709,13 +709,13 @@ define <16 x i32> @ext_i16_16i32(i16 %a0) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: ext_i16_16i32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: kmovw %edi, %k1
; AVX512F-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
; AVX512F-NEXT: retq
;
; AVX512VLBW-LABEL: ext_i16_16i32:
-; AVX512VLBW: # BB#0:
+; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: kmovd %edi, %k1
; AVX512VLBW-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
; AVX512VLBW-NEXT: retq
@@ -726,7 +726,7 @@ define <16 x i32> @ext_i16_16i32(i16 %a0) {
define <32 x i16> @ext_i32_32i16(i32 %a0) {
; SSE2-SSSE3-LABEL: ext_i32_32i16:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: movd %edi, %xmm2
; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm2[0,0,0,0,4,5,6,7]
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,1]
@@ -751,7 +751,7 @@ define <32 x i16> @ext_i32_32i16(i32 %a0) {
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: ext_i32_32i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %edi, %xmm1
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm1[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
@@ -783,7 +783,7 @@ define <32 x i16> @ext_i32_32i16(i32 %a0) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: ext_i32_32i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %edi, %xmm0
; AVX2-NEXT: vpbroadcastw %xmm0, %ymm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
@@ -799,7 +799,7 @@ define <32 x i16> @ext_i32_32i16(i32 %a0) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: ext_i32_32i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: pushq %rbp
; AVX512F-NEXT: .cfi_def_cfa_offset 16
; AVX512F-NEXT: .cfi_offset %rbp, -16
@@ -824,7 +824,7 @@ define <32 x i16> @ext_i32_32i16(i32 %a0) {
; AVX512F-NEXT: retq
;
; AVX512VLBW-LABEL: ext_i32_32i16:
-; AVX512VLBW: # BB#0:
+; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: kmovd %edi, %k1
; AVX512VLBW-NEXT: vmovdqu16 {{.*}}(%rip), %zmm0 {%k1} {z}
; AVX512VLBW-NEXT: retq
@@ -835,7 +835,7 @@ define <32 x i16> @ext_i32_32i16(i32 %a0) {
define <64 x i8> @ext_i64_64i8(i64 %a0) {
; SSE2-SSSE3-LABEL: ext_i64_64i8:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: movq %rdi, %xmm3
; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm3[0,0,1,1,4,5,6,7]
@@ -867,7 +867,7 @@ define <64 x i8> @ext_i64_64i8(i64 %a0) {
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: ext_i64_64i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovq %rdi, %xmm0
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm1[0,0,1,1,4,5,6,7]
@@ -909,7 +909,7 @@ define <64 x i8> @ext_i64_64i8(i64 %a0) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: ext_i64_64i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovq %rdi, %xmm0
; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm1[0,0,1,1,4,5,6,7]
@@ -935,7 +935,7 @@ define <64 x i8> @ext_i64_64i8(i64 %a0) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: ext_i64_64i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: pushq %rbp
; AVX512F-NEXT: .cfi_def_cfa_offset 16
; AVX512F-NEXT: .cfi_offset %rbp, -16
@@ -966,7 +966,7 @@ define <64 x i8> @ext_i64_64i8(i64 %a0) {
; AVX512F-NEXT: retq
;
; AVX512VLBW-LABEL: ext_i64_64i8:
-; AVX512VLBW: # BB#0:
+; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: kmovq %rdi, %k1
; AVX512VLBW-NEXT: vmovdqu8 {{.*}}(%rip), %zmm0 {%k1} {z}
; AVX512VLBW-NEXT: retq
diff --git a/test/CodeGen/X86/bitcast-int-to-vector-bool.ll b/test/CodeGen/X86/bitcast-int-to-vector-bool.ll
index 5010d5c78c3..50893949544 100644
--- a/test/CodeGen/X86/bitcast-int-to-vector-bool.ll
+++ b/test/CodeGen/X86/bitcast-int-to-vector-bool.ll
@@ -7,7 +7,7 @@
define <2 x i1> @bitcast_i2_2i1(i2 zeroext %a0) {
; SSE2-SSSE3-LABEL: bitcast_i2_2i1:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SSE2-SSSE3-NEXT: movq %rdi, %xmm0
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,0,1]
@@ -20,7 +20,7 @@ define <2 x i1> @bitcast_i2_2i1(i2 zeroext %a0) {
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: bitcast_i2_2i1:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX1-NEXT: vmovq %rdi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
@@ -31,7 +31,7 @@ define <2 x i1> @bitcast_i2_2i1(i2 zeroext %a0) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: bitcast_i2_2i1:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX2-NEXT: vmovq %rdi, %xmm0
; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0
@@ -42,7 +42,7 @@ define <2 x i1> @bitcast_i2_2i1(i2 zeroext %a0) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: bitcast_i2_2i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; AVX512-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; AVX512-NEXT: kmovd %eax, %k1
@@ -56,7 +56,7 @@ define <2 x i1> @bitcast_i2_2i1(i2 zeroext %a0) {
define <4 x i1> @bitcast_i4_4i1(i4 zeroext %a0) {
; SSE2-SSSE3-LABEL: bitcast_i4_4i1:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: movd %edi, %xmm0
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8]
@@ -66,7 +66,7 @@ define <4 x i1> @bitcast_i4_4i1(i4 zeroext %a0) {
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: bitcast_i4_4i1:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %edi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8]
@@ -76,7 +76,7 @@ define <4 x i1> @bitcast_i4_4i1(i4 zeroext %a0) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: bitcast_i4_4i1:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %edi, %xmm0
; AVX2-NEXT: vpbroadcastd %xmm0, %xmm0
; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8]
@@ -86,7 +86,7 @@ define <4 x i1> @bitcast_i4_4i1(i4 zeroext %a0) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: bitcast_i4_4i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; AVX512-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; AVX512-NEXT: kmovd %eax, %k1
@@ -101,7 +101,7 @@ define <4 x i1> @bitcast_i4_4i1(i4 zeroext %a0) {
define <8 x i1> @bitcast_i8_8i1(i8 zeroext %a0) {
; SSE2-SSSE3-LABEL: bitcast_i8_8i1:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: movd %edi, %xmm0
; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
@@ -112,7 +112,7 @@ define <8 x i1> @bitcast_i8_8i1(i8 zeroext %a0) {
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: bitcast_i8_8i1:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %edi, %xmm0
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
@@ -123,7 +123,7 @@ define <8 x i1> @bitcast_i8_8i1(i8 zeroext %a0) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: bitcast_i8_8i1:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %edi, %xmm0
; AVX2-NEXT: vpbroadcastw %xmm0, %xmm0
; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128]
@@ -133,7 +133,7 @@ define <8 x i1> @bitcast_i8_8i1(i8 zeroext %a0) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: bitcast_i8_8i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovd %edi, %k0
; AVX512-NEXT: vpmovm2w %k0, %xmm0
; AVX512-NEXT: retq
@@ -143,7 +143,7 @@ define <8 x i1> @bitcast_i8_8i1(i8 zeroext %a0) {
define <16 x i1> @bitcast_i16_16i1(i16 zeroext %a0) {
; SSE2-LABEL: bitcast_i16_16i1:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movd %edi, %xmm0
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,1,1,4,5,6,7]
@@ -156,7 +156,7 @@ define <16 x i1> @bitcast_i16_16i1(i16 zeroext %a0) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: bitcast_i16_16i1:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movd %edi, %xmm0
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
@@ -167,7 +167,7 @@ define <16 x i1> @bitcast_i16_16i1(i16 zeroext %a0) {
; SSSE3-NEXT: retq
;
; AVX1-LABEL: bitcast_i16_16i1:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %edi, %xmm0
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
@@ -178,7 +178,7 @@ define <16 x i1> @bitcast_i16_16i1(i16 zeroext %a0) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: bitcast_i16_16i1:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %edi, %xmm0
; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
; AVX2-NEXT: vpbroadcastq {{.*#+}} xmm1 = [9241421688590303745,9241421688590303745]
@@ -189,7 +189,7 @@ define <16 x i1> @bitcast_i16_16i1(i16 zeroext %a0) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: bitcast_i16_16i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovd %edi, %k0
; AVX512-NEXT: vpmovm2b %k0, %xmm0
; AVX512-NEXT: retq
@@ -199,13 +199,13 @@ define <16 x i1> @bitcast_i16_16i1(i16 zeroext %a0) {
define <32 x i1> @bitcast_i32_32i1(i32 %a0) {
; SSE2-SSSE3-LABEL: bitcast_i32_32i1:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: movl %esi, (%rdi)
; SSE2-SSSE3-NEXT: movq %rdi, %rax
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: bitcast_i32_32i1:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %edi, %xmm0
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,0,1,1,4,5,6,7]
@@ -230,7 +230,7 @@ define <32 x i1> @bitcast_i32_32i1(i32 %a0) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: bitcast_i32_32i1:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %edi, %xmm0
; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; AVX2-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,0,1,1,4,5,6,7]
@@ -246,7 +246,7 @@ define <32 x i1> @bitcast_i32_32i1(i32 %a0) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: bitcast_i32_32i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovd %edi, %k0
; AVX512-NEXT: vpmovm2b %k0, %ymm0
; AVX512-NEXT: retq
@@ -256,19 +256,19 @@ define <32 x i1> @bitcast_i32_32i1(i32 %a0) {
define <64 x i1> @bitcast_i64_64i1(i64 %a0) {
; SSE2-SSSE3-LABEL: bitcast_i64_64i1:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: movq %rsi, (%rdi)
; SSE2-SSSE3-NEXT: movq %rdi, %rax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: bitcast_i64_64i1:
-; AVX12: # BB#0:
+; AVX12: # %bb.0:
; AVX12-NEXT: movq %rsi, (%rdi)
; AVX12-NEXT: movq %rdi, %rax
; AVX12-NEXT: retq
;
; AVX512-LABEL: bitcast_i64_64i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovq %rdi, %k0
; AVX512-NEXT: vpmovm2b %k0, %zmm0
; AVX512-NEXT: retq
diff --git a/test/CodeGen/X86/bitcast-int-to-vector.ll b/test/CodeGen/X86/bitcast-int-to-vector.ll
index a260ac3706f..42383e6b9fe 100644
--- a/test/CodeGen/X86/bitcast-int-to-vector.ll
+++ b/test/CodeGen/X86/bitcast-int-to-vector.ll
@@ -5,7 +5,7 @@
define i1 @foo(i64 %a) {
; X86-LABEL: foo:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: flds {{[0-9]+}}(%esp)
; X86-NEXT: flds {{[0-9]+}}(%esp)
; X86-NEXT: fucompp
@@ -16,14 +16,14 @@ define i1 @foo(i64 %a) {
; X86-NEXT: retl
;
; X86-SSE-LABEL: foo:
-; X86-SSE: # BB#0:
+; X86-SSE: # %bb.0:
; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-SSE-NEXT: ucomiss {{[0-9]+}}(%esp), %xmm0
; X86-SSE-NEXT: setp %al
; X86-SSE-NEXT: retl
;
; X64-LABEL: foo:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq %rdi, %xmm0
; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
; X64-NEXT: ucomiss %xmm1, %xmm0
diff --git a/test/CodeGen/X86/bitcast-mmx.ll b/test/CodeGen/X86/bitcast-mmx.ll
index f0318ede531..9f612574a32 100644
--- a/test/CodeGen/X86/bitcast-mmx.ll
+++ b/test/CodeGen/X86/bitcast-mmx.ll
@@ -4,13 +4,13 @@
define i32 @t0(i64 %x) nounwind {
; X86-LABEL: t0:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pshufw $238, {{[0-9]+}}(%esp), %mm0 # mm0 = mem[2,3,2,3]
; X86-NEXT: movd %mm0, %eax
; X86-NEXT: retl
;
; X64-LABEL: t0:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movd %rdi, %mm0
; X64-NEXT: pshufw $238, %mm0, %mm0 # mm0 = mm0[2,3,2,3]
; X64-NEXT: movd %mm0, %eax
@@ -29,7 +29,7 @@ entry:
define i64 @t1(i64 %x, i32 %n) nounwind {
; X86-LABEL: t1:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
@@ -45,7 +45,7 @@ define i64 @t1(i64 %x, i32 %n) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: t1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movd %esi, %mm0
; X64-NEXT: movd %rdi, %mm1
; X64-NEXT: psllq %mm0, %mm1
@@ -60,7 +60,7 @@ entry:
define i64 @t2(i64 %x, i32 %n, i32 %w) nounwind {
; X86-LABEL: t2:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
@@ -77,7 +77,7 @@ define i64 @t2(i64 %x, i32 %n, i32 %w) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: t2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movd %esi, %mm0
; X64-NEXT: movd %edx, %mm1
; X64-NEXT: psllq %mm0, %mm1
@@ -98,7 +98,7 @@ entry:
define i64 @t3(<1 x i64>* %y, i32* %n) nounwind {
; X86-LABEL: t3:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
@@ -116,7 +116,7 @@ define i64 @t3(<1 x i64>* %y, i32* %n) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: t3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq (%rdi), %mm0
; X64-NEXT: movd (%rsi), %mm1
; X64-NEXT: psllq %mm1, %mm0
diff --git a/test/CodeGen/X86/bitcast-setcc-128.ll b/test/CodeGen/X86/bitcast-setcc-128.ll
index d68bdfa5356..4cd31137839 100644
--- a/test/CodeGen/X86/bitcast-setcc-128.ll
+++ b/test/CodeGen/X86/bitcast-setcc-128.ll
@@ -8,7 +8,7 @@
define i8 @v8i16(<8 x i16> %a, <8 x i16> %b) {
; SSE2-SSSE3-LABEL: v8i16:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: pcmpgtw %xmm1, %xmm0
; SSE2-SSSE3-NEXT: packsswb %xmm0, %xmm0
; SSE2-SSSE3-NEXT: pmovmskb %xmm0, %eax
@@ -16,7 +16,7 @@ define i8 @v8i16(<8 x i16> %a, <8 x i16> %b) {
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v8i16:
-; AVX12: # BB#0:
+; AVX12: # %bb.0:
; AVX12-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; AVX12-NEXT: vpmovmskb %xmm0, %eax
@@ -24,7 +24,7 @@ define i8 @v8i16(<8 x i16> %a, <8 x i16> %b) {
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v8i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
; AVX512F-NEXT: vpmovsxwq %xmm0, %zmm0
; AVX512F-NEXT: vpsllq $63, %zmm0, %zmm0
@@ -35,7 +35,7 @@ define i8 @v8i16(<8 x i16> %a, <8 x i16> %b) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v8i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtw %xmm1, %xmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -47,21 +47,21 @@ define i8 @v8i16(<8 x i16> %a, <8 x i16> %b) {
define i4 @v4i32(<4 x i32> %a, <4 x i32> %b) {
; SSE2-SSSE3-LABEL: v4i32:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-SSSE3-NEXT: movmskps %xmm0, %eax
; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4i32:
-; AVX12: # BB#0:
+; AVX12: # %bb.0:
; AVX12-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vmovmskps %xmm0, %eax
; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v4i32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtd %xmm1, %xmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
; AVX512F-NEXT: movb %al, -{{[0-9]+}}(%rsp)
@@ -69,7 +69,7 @@ define i4 @v4i32(<4 x i32> %a, <4 x i32> %b) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v4i32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtd %xmm1, %xmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: movb %al, -{{[0-9]+}}(%rsp)
@@ -82,21 +82,21 @@ define i4 @v4i32(<4 x i32> %a, <4 x i32> %b) {
define i4 @v4f32(<4 x float> %a, <4 x float> %b) {
; SSE2-SSSE3-LABEL: v4f32:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: cmpltps %xmm0, %xmm1
; SSE2-SSSE3-NEXT: movmskps %xmm1, %eax
; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4f32:
-; AVX12: # BB#0:
+; AVX12: # %bb.0:
; AVX12-NEXT: vcmpltps %xmm0, %xmm1, %xmm0
; AVX12-NEXT: vmovmskps %xmm0, %eax
; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v4f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vcmpltps %xmm0, %xmm1, %k0
; AVX512F-NEXT: kmovw %k0, %eax
; AVX512F-NEXT: movb %al, -{{[0-9]+}}(%rsp)
@@ -104,7 +104,7 @@ define i4 @v4f32(<4 x float> %a, <4 x float> %b) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v4f32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vcmpltps %xmm0, %xmm1, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: movb %al, -{{[0-9]+}}(%rsp)
@@ -117,21 +117,21 @@ define i4 @v4f32(<4 x float> %a, <4 x float> %b) {
define i16 @v16i8(<16 x i8> %a, <16 x i8> %b) {
; SSE2-SSSE3-LABEL: v16i8:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: pcmpgtb %xmm1, %xmm0
; SSE2-SSSE3-NEXT: pmovmskb %xmm0, %eax
; SSE2-SSSE3-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v16i8:
-; AVX12: # BB#0:
+; AVX12: # %bb.0:
; AVX12-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpmovmskb %xmm0, %eax
; AVX12-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v16i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0
; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0
@@ -142,7 +142,7 @@ define i16 @v16i8(<16 x i8> %a, <16 x i8> %b) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v16i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtb %xmm1, %xmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -154,7 +154,7 @@ define i16 @v16i8(<16 x i8> %a, <16 x i8> %b) {
define i2 @v2i8(<2 x i8> %a, <2 x i8> %b) {
; SSE2-SSSE3-LABEL: v2i8:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: psllq $56, %xmm0
; SSE2-SSSE3-NEXT: movdqa %xmm0, %xmm2
; SSE2-SSSE3-NEXT: psrad $31, %xmm2
@@ -185,7 +185,7 @@ define i2 @v2i8(<2 x i8> %a, <2 x i8> %b) {
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v2i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsllq $56, %xmm1, %xmm1
; AVX1-NEXT: vpsrad $31, %xmm1, %xmm2
; AVX1-NEXT: vpsrad $24, %xmm1, %xmm1
@@ -202,7 +202,7 @@ define i2 @v2i8(<2 x i8> %a, <2 x i8> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: v2i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsllq $56, %xmm1, %xmm1
; AVX2-NEXT: vpsrad $31, %xmm1, %xmm2
; AVX2-NEXT: vpsrad $24, %xmm1, %xmm1
@@ -219,7 +219,7 @@ define i2 @v2i8(<2 x i8> %a, <2 x i8> %b) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: v2i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsllq $56, %xmm1, %xmm1
; AVX512F-NEXT: vpsraq $56, %xmm1, %xmm1
; AVX512F-NEXT: vpsllq $56, %xmm0, %xmm0
@@ -231,7 +231,7 @@ define i2 @v2i8(<2 x i8> %a, <2 x i8> %b) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v2i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsllq $56, %xmm1, %xmm1
; AVX512BW-NEXT: vpsraq $56, %xmm1, %xmm1
; AVX512BW-NEXT: vpsllq $56, %xmm0, %xmm0
@@ -248,7 +248,7 @@ define i2 @v2i8(<2 x i8> %a, <2 x i8> %b) {
define i2 @v2i16(<2 x i16> %a, <2 x i16> %b) {
; SSE2-SSSE3-LABEL: v2i16:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: psllq $48, %xmm0
; SSE2-SSSE3-NEXT: movdqa %xmm0, %xmm2
; SSE2-SSSE3-NEXT: psrad $31, %xmm2
@@ -279,7 +279,7 @@ define i2 @v2i16(<2 x i16> %a, <2 x i16> %b) {
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v2i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsllq $48, %xmm1, %xmm1
; AVX1-NEXT: vpsrad $31, %xmm1, %xmm2
; AVX1-NEXT: vpsrad $16, %xmm1, %xmm1
@@ -296,7 +296,7 @@ define i2 @v2i16(<2 x i16> %a, <2 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: v2i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsllq $48, %xmm1, %xmm1
; AVX2-NEXT: vpsrad $31, %xmm1, %xmm2
; AVX2-NEXT: vpsrad $16, %xmm1, %xmm1
@@ -313,7 +313,7 @@ define i2 @v2i16(<2 x i16> %a, <2 x i16> %b) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: v2i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsllq $48, %xmm1, %xmm1
; AVX512F-NEXT: vpsraq $48, %xmm1, %xmm1
; AVX512F-NEXT: vpsllq $48, %xmm0, %xmm0
@@ -325,7 +325,7 @@ define i2 @v2i16(<2 x i16> %a, <2 x i16> %b) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v2i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsllq $48, %xmm1, %xmm1
; AVX512BW-NEXT: vpsraq $48, %xmm1, %xmm1
; AVX512BW-NEXT: vpsllq $48, %xmm0, %xmm0
@@ -342,7 +342,7 @@ define i2 @v2i16(<2 x i16> %a, <2 x i16> %b) {
define i2 @v2i32(<2 x i32> %a, <2 x i32> %b) {
; SSE2-SSSE3-LABEL: v2i32:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: psllq $32, %xmm0
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
; SSE2-SSSE3-NEXT: psrad $31, %xmm0
@@ -369,7 +369,7 @@ define i2 @v2i32(<2 x i32> %a, <2 x i32> %b) {
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v2i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsllq $32, %xmm1, %xmm1
; AVX1-NEXT: vpsrad $31, %xmm1, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
@@ -384,7 +384,7 @@ define i2 @v2i32(<2 x i32> %a, <2 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: v2i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsllq $32, %xmm1, %xmm1
; AVX2-NEXT: vpsrad $31, %xmm1, %xmm2
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
@@ -399,7 +399,7 @@ define i2 @v2i32(<2 x i32> %a, <2 x i32> %b) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: v2i32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsllq $32, %xmm1, %xmm1
; AVX512F-NEXT: vpsraq $32, %xmm1, %xmm1
; AVX512F-NEXT: vpsllq $32, %xmm0, %xmm0
@@ -411,7 +411,7 @@ define i2 @v2i32(<2 x i32> %a, <2 x i32> %b) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v2i32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsllq $32, %xmm1, %xmm1
; AVX512BW-NEXT: vpsraq $32, %xmm1, %xmm1
; AVX512BW-NEXT: vpsllq $32, %xmm0, %xmm0
@@ -428,7 +428,7 @@ define i2 @v2i32(<2 x i32> %a, <2 x i32> %b) {
define i2 @v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE2-SSSE3-LABEL: v2i64:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; SSE2-SSSE3-NEXT: pxor %xmm2, %xmm1
; SSE2-SSSE3-NEXT: pxor %xmm2, %xmm0
@@ -445,14 +445,14 @@ define i2 @v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v2i64:
-; AVX12: # BB#0:
+; AVX12: # %bb.0:
; AVX12-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vmovmskpd %xmm0, %eax
; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v2i64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
; AVX512F-NEXT: movb %al, -{{[0-9]+}}(%rsp)
@@ -460,7 +460,7 @@ define i2 @v2i64(<2 x i64> %a, <2 x i64> %b) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v2i64:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: movb %al, -{{[0-9]+}}(%rsp)
@@ -473,21 +473,21 @@ define i2 @v2i64(<2 x i64> %a, <2 x i64> %b) {
define i2 @v2f64(<2 x double> %a, <2 x double> %b) {
; SSE2-SSSE3-LABEL: v2f64:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: cmpltpd %xmm0, %xmm1
; SSE2-SSSE3-NEXT: movmskpd %xmm1, %eax
; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v2f64:
-; AVX12: # BB#0:
+; AVX12: # %bb.0:
; AVX12-NEXT: vcmpltpd %xmm0, %xmm1, %xmm0
; AVX12-NEXT: vmovmskpd %xmm0, %eax
; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v2f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vcmpltpd %xmm0, %xmm1, %k0
; AVX512F-NEXT: kmovw %k0, %eax
; AVX512F-NEXT: movb %al, -{{[0-9]+}}(%rsp)
@@ -495,7 +495,7 @@ define i2 @v2f64(<2 x double> %a, <2 x double> %b) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v2f64:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vcmpltpd %xmm0, %xmm1, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: movb %al, -{{[0-9]+}}(%rsp)
@@ -508,7 +508,7 @@ define i2 @v2f64(<2 x double> %a, <2 x double> %b) {
define i4 @v4i8(<4 x i8> %a, <4 x i8> %b) {
; SSE2-SSSE3-LABEL: v4i8:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: pslld $24, %xmm1
; SSE2-SSSE3-NEXT: psrad $24, %xmm1
; SSE2-SSSE3-NEXT: pslld $24, %xmm0
@@ -519,7 +519,7 @@ define i4 @v4i8(<4 x i8> %a, <4 x i8> %b) {
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4i8:
-; AVX12: # BB#0:
+; AVX12: # %bb.0:
; AVX12-NEXT: vpslld $24, %xmm1, %xmm1
; AVX12-NEXT: vpsrad $24, %xmm1, %xmm1
; AVX12-NEXT: vpslld $24, %xmm0, %xmm0
@@ -530,7 +530,7 @@ define i4 @v4i8(<4 x i8> %a, <4 x i8> %b) {
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v4i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpslld $24, %xmm1, %xmm1
; AVX512F-NEXT: vpsrad $24, %xmm1, %xmm1
; AVX512F-NEXT: vpslld $24, %xmm0, %xmm0
@@ -542,7 +542,7 @@ define i4 @v4i8(<4 x i8> %a, <4 x i8> %b) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v4i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpslld $24, %xmm1, %xmm1
; AVX512BW-NEXT: vpsrad $24, %xmm1, %xmm1
; AVX512BW-NEXT: vpslld $24, %xmm0, %xmm0
@@ -559,7 +559,7 @@ define i4 @v4i8(<4 x i8> %a, <4 x i8> %b) {
define i4 @v4i16(<4 x i16> %a, <4 x i16> %b) {
; SSE2-SSSE3-LABEL: v4i16:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: pslld $16, %xmm1
; SSE2-SSSE3-NEXT: psrad $16, %xmm1
; SSE2-SSSE3-NEXT: pslld $16, %xmm0
@@ -570,7 +570,7 @@ define i4 @v4i16(<4 x i16> %a, <4 x i16> %b) {
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4i16:
-; AVX12: # BB#0:
+; AVX12: # %bb.0:
; AVX12-NEXT: vpslld $16, %xmm1, %xmm1
; AVX12-NEXT: vpsrad $16, %xmm1, %xmm1
; AVX12-NEXT: vpslld $16, %xmm0, %xmm0
@@ -581,7 +581,7 @@ define i4 @v4i16(<4 x i16> %a, <4 x i16> %b) {
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v4i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpslld $16, %xmm1, %xmm1
; AVX512F-NEXT: vpsrad $16, %xmm1, %xmm1
; AVX512F-NEXT: vpslld $16, %xmm0, %xmm0
@@ -593,7 +593,7 @@ define i4 @v4i16(<4 x i16> %a, <4 x i16> %b) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v4i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpslld $16, %xmm1, %xmm1
; AVX512BW-NEXT: vpsrad $16, %xmm1, %xmm1
; AVX512BW-NEXT: vpslld $16, %xmm0, %xmm0
@@ -610,7 +610,7 @@ define i4 @v4i16(<4 x i16> %a, <4 x i16> %b) {
define i8 @v8i8(<8 x i8> %a, <8 x i8> %b) {
; SSE2-SSSE3-LABEL: v8i8:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: psllw $8, %xmm1
; SSE2-SSSE3-NEXT: psraw $8, %xmm1
; SSE2-SSSE3-NEXT: psllw $8, %xmm0
@@ -622,7 +622,7 @@ define i8 @v8i8(<8 x i8> %a, <8 x i8> %b) {
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v8i8:
-; AVX12: # BB#0:
+; AVX12: # %bb.0:
; AVX12-NEXT: vpsllw $8, %xmm1, %xmm1
; AVX12-NEXT: vpsraw $8, %xmm1, %xmm1
; AVX12-NEXT: vpsllw $8, %xmm0, %xmm0
@@ -634,7 +634,7 @@ define i8 @v8i8(<8 x i8> %a, <8 x i8> %b) {
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v8i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsllw $8, %xmm1, %xmm1
; AVX512F-NEXT: vpsraw $8, %xmm1, %xmm1
; AVX512F-NEXT: vpsllw $8, %xmm0, %xmm0
@@ -649,7 +649,7 @@ define i8 @v8i8(<8 x i8> %a, <8 x i8> %b) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v8i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsllw $8, %xmm1, %xmm1
; AVX512BW-NEXT: vpsraw $8, %xmm1, %xmm1
; AVX512BW-NEXT: vpsllw $8, %xmm0, %xmm0
diff --git a/test/CodeGen/X86/bitcast-setcc-256.ll b/test/CodeGen/X86/bitcast-setcc-256.ll
index 4b2a0d116e3..a0a8a2e3ad7 100644
--- a/test/CodeGen/X86/bitcast-setcc-256.ll
+++ b/test/CodeGen/X86/bitcast-setcc-256.ll
@@ -8,7 +8,7 @@
define i16 @v16i16(<16 x i16> %a, <16 x i16> %b) {
; SSE2-SSSE3-LABEL: v16i16:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: pcmpgtw %xmm3, %xmm1
; SSE2-SSSE3-NEXT: pcmpgtw %xmm2, %xmm0
; SSE2-SSSE3-NEXT: packsswb %xmm1, %xmm0
@@ -17,7 +17,7 @@ define i16 @v16i16(<16 x i16> %a, <16 x i16> %b) {
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtw %xmm2, %xmm3, %xmm2
@@ -29,7 +29,7 @@ define i16 @v16i16(<16 x i16> %a, <16 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
@@ -39,7 +39,7 @@ define i16 @v16i16(<16 x i16> %a, <16 x i16> %b) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: v16i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0
@@ -50,7 +50,7 @@ define i16 @v16i16(<16 x i16> %a, <16 x i16> %b) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v16i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtw %ymm1, %ymm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -63,7 +63,7 @@ define i16 @v16i16(<16 x i16> %a, <16 x i16> %b) {
define i8 @v8i32(<8 x i32> %a, <8 x i32> %b) {
; SSE2-SSSE3-LABEL: v8i32:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: pcmpgtd %xmm3, %xmm1
; SSE2-SSSE3-NEXT: pcmpgtd %xmm2, %xmm0
; SSE2-SSSE3-NEXT: packssdw %xmm1, %xmm0
@@ -73,7 +73,7 @@ define i8 @v8i32(<8 x i32> %a, <8 x i32> %b) {
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtd %xmm2, %xmm3, %xmm2
@@ -85,7 +85,7 @@ define i8 @v8i32(<8 x i32> %a, <8 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vmovmskps %ymm0, %eax
; AVX2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -93,7 +93,7 @@ define i8 @v8i32(<8 x i32> %a, <8 x i32> %b) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: v8i32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtd %ymm1, %ymm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
; AVX512F-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -101,7 +101,7 @@ define i8 @v8i32(<8 x i32> %a, <8 x i32> %b) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v8i32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtd %ymm1, %ymm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -114,7 +114,7 @@ define i8 @v8i32(<8 x i32> %a, <8 x i32> %b) {
define i8 @v8f32(<8 x float> %a, <8 x float> %b) {
; SSE2-SSSE3-LABEL: v8f32:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: cmpltps %xmm1, %xmm3
; SSE2-SSSE3-NEXT: cmpltps %xmm0, %xmm2
; SSE2-SSSE3-NEXT: packssdw %xmm3, %xmm2
@@ -124,7 +124,7 @@ define i8 @v8f32(<8 x float> %a, <8 x float> %b) {
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v8f32:
-; AVX12: # BB#0:
+; AVX12: # %bb.0:
; AVX12-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
; AVX12-NEXT: vmovmskps %ymm0, %eax
; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -132,7 +132,7 @@ define i8 @v8f32(<8 x float> %a, <8 x float> %b) {
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v8f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vcmpltps %ymm0, %ymm1, %k0
; AVX512F-NEXT: kmovw %k0, %eax
; AVX512F-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -140,7 +140,7 @@ define i8 @v8f32(<8 x float> %a, <8 x float> %b) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v8f32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vcmpltps %ymm0, %ymm1, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -153,7 +153,7 @@ define i8 @v8f32(<8 x float> %a, <8 x float> %b) {
define i32 @v32i8(<32 x i8> %a, <32 x i8> %b) {
; SSE2-SSSE3-LABEL: v32i8:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: pcmpgtb %xmm2, %xmm0
; SSE2-SSSE3-NEXT: pmovmskb %xmm0, %ecx
; SSE2-SSSE3-NEXT: pcmpgtb %xmm3, %xmm1
@@ -163,7 +163,7 @@ define i32 @v32i8(<32 x i8> %a, <32 x i8> %b) {
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm2
; AVX1-NEXT: vpmovmskb %xmm2, %ecx
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
@@ -176,14 +176,14 @@ define i32 @v32i8(<32 x i8> %a, <32 x i8> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpmovmskb %ymm0, %eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512F-LABEL: v32i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: pushq %rbp
; AVX512F-NEXT: .cfi_def_cfa_offset 16
; AVX512F-NEXT: .cfi_offset %rbp, -16
@@ -208,7 +208,7 @@ define i32 @v32i8(<32 x i8> %a, <32 x i8> %b) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v32i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtb %ymm1, %ymm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: vzeroupper
@@ -220,7 +220,7 @@ define i32 @v32i8(<32 x i8> %a, <32 x i8> %b) {
define i4 @v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE2-SSSE3-LABEL: v4i64:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,0,2147483648,0]
; SSE2-SSSE3-NEXT: pxor %xmm4, %xmm3
; SSE2-SSSE3-NEXT: pxor %xmm4, %xmm1
@@ -248,7 +248,7 @@ define i4 @v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -260,7 +260,7 @@ define i4 @v4i64(<4 x i64> %a, <4 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vmovmskpd %ymm0, %eax
; AVX2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -268,7 +268,7 @@ define i4 @v4i64(<4 x i64> %a, <4 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: v4i64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtq %ymm1, %ymm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
; AVX512F-NEXT: movb %al, -{{[0-9]+}}(%rsp)
@@ -277,7 +277,7 @@ define i4 @v4i64(<4 x i64> %a, <4 x i64> %b) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v4i64:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtq %ymm1, %ymm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: movb %al, -{{[0-9]+}}(%rsp)
@@ -291,7 +291,7 @@ define i4 @v4i64(<4 x i64> %a, <4 x i64> %b) {
define i4 @v4f64(<4 x double> %a, <4 x double> %b) {
; SSE2-SSSE3-LABEL: v4f64:
-; SSE2-SSSE3: # BB#0:
+; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: cmpltpd %xmm1, %xmm3
; SSE2-SSSE3-NEXT: cmpltpd %xmm0, %xmm2
; SSE2-SSSE3-NEXT: packssdw %xmm3, %xmm2
@@ -300,7 +300,7 @@ define i4 @v4f64(<4 x double> %a, <4 x double> %b) {
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4f64:
-; AVX12: # BB#0:
+; AVX12: # %bb.0:
; AVX12-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
; AVX12-NEXT: vmovmskpd %ymm0, %eax
; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -308,7 +308,7 @@ define i4 @v4f64(<4 x double> %a, <4 x double> %b) {
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v4f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vcmpltpd %ymm0, %ymm1, %k0
; AVX512F-NEXT: kmovw %k0, %eax
; AVX512F-NEXT: movb %al, -{{[0-9]+}}(%rsp)
@@ -317,7 +317,7 @@ define i4 @v4f64(<4 x double> %a, <4 x double> %b) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v4f64:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vcmpltpd %ymm0, %ymm1, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: movb %al, -{{[0-9]+}}(%rsp)
diff --git a/test/CodeGen/X86/bitcast-setcc-512.ll b/test/CodeGen/X86/bitcast-setcc-512.ll
index 93b5ddefb9d..7779fa00e5f 100644
--- a/test/CodeGen/X86/bitcast-setcc-512.ll
+++ b/test/CodeGen/X86/bitcast-setcc-512.ll
@@ -7,7 +7,7 @@
define i32 @v32i16(<32 x i16> %a, <32 x i16> %b) {
; SSE-LABEL: v32i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtw %xmm5, %xmm1
; SSE-NEXT: pcmpgtw %xmm4, %xmm0
; SSE-NEXT: packsswb %xmm1, %xmm0
@@ -21,7 +21,7 @@ define i32 @v32i16(<32 x i16> %a, <32 x i16> %b) {
; SSE-NEXT: retq
;
; AVX1-LABEL: v32i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpcmpgtw %xmm4, %xmm5, %xmm4
@@ -40,7 +40,7 @@ define i32 @v32i16(<32 x i16> %a, <32 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: v32i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtw %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
@@ -50,7 +50,7 @@ define i32 @v32i16(<32 x i16> %a, <32 x i16> %b) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: v32i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: pushq %rbp
; AVX512F-NEXT: .cfi_def_cfa_offset 16
; AVX512F-NEXT: .cfi_offset %rbp, -16
@@ -207,7 +207,7 @@ define i32 @v32i16(<32 x i16> %a, <32 x i16> %b) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v32i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtw %zmm1, %zmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: vzeroupper
@@ -219,7 +219,7 @@ define i32 @v32i16(<32 x i16> %a, <32 x i16> %b) {
define i16 @v16i32(<16 x i32> %a, <16 x i32> %b) {
; SSE-LABEL: v16i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtd %xmm7, %xmm3
; SSE-NEXT: pcmpgtd %xmm6, %xmm2
; SSE-NEXT: packssdw %xmm3, %xmm2
@@ -232,7 +232,7 @@ define i16 @v16i32(<16 x i32> %a, <16 x i32> %b) {
; SSE-NEXT: retq
;
; AVX1-LABEL: v16i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX1-NEXT: vpcmpgtd %xmm4, %xmm5, %xmm4
@@ -250,7 +250,7 @@ define i16 @v16i32(<16 x i32> %a, <16 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: v16i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtd %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vpcmpgtd %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
@@ -263,7 +263,7 @@ define i16 @v16i32(<16 x i32> %a, <16 x i32> %b) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: v16i32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
; AVX512F-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -271,7 +271,7 @@ define i16 @v16i32(<16 x i32> %a, <16 x i32> %b) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v16i32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -284,7 +284,7 @@ define i16 @v16i32(<16 x i32> %a, <16 x i32> %b) {
define i16 @v16f32(<16 x float> %a, <16 x float> %b) {
; SSE-LABEL: v16f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpltps %xmm3, %xmm7
; SSE-NEXT: cmpltps %xmm2, %xmm6
; SSE-NEXT: packssdw %xmm7, %xmm6
@@ -297,7 +297,7 @@ define i16 @v16f32(<16 x float> %a, <16 x float> %b) {
; SSE-NEXT: retq
;
; AVX1-LABEL: v16f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vcmpltps %ymm1, %ymm3, %ymm1
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpackssdw %xmm3, %xmm1, %xmm1
@@ -311,7 +311,7 @@ define i16 @v16f32(<16 x float> %a, <16 x float> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: v16f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vcmpltps %ymm1, %ymm3, %ymm1
; AVX2-NEXT: vcmpltps %ymm0, %ymm2, %ymm0
; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
@@ -324,7 +324,7 @@ define i16 @v16f32(<16 x float> %a, <16 x float> %b) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: v16f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vcmpltps %zmm0, %zmm1, %k0
; AVX512F-NEXT: kmovw %k0, %eax
; AVX512F-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -332,7 +332,7 @@ define i16 @v16f32(<16 x float> %a, <16 x float> %b) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v16f32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vcmpltps %zmm0, %zmm1, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -345,7 +345,7 @@ define i16 @v16f32(<16 x float> %a, <16 x float> %b) {
define i64 @v64i8(<64 x i8> %a, <64 x i8> %b) {
; SSE-LABEL: v64i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtb %xmm5, %xmm1
; SSE-NEXT: pextrb $15, %xmm1, %eax
; SSE-NEXT: andb $1, %al
@@ -555,7 +555,7 @@ define i64 @v64i8(<64 x i8> %a, <64 x i8> %b) {
; SSE-NEXT: retq
;
; AVX1-LABEL: v64i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: pushq %rbp
; AVX1-NEXT: .cfi_def_cfa_offset 16
; AVX1-NEXT: .cfi_offset %rbp, -16
@@ -773,7 +773,7 @@ define i64 @v64i8(<64 x i8> %a, <64 x i8> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: v64i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: pushq %rbp
; AVX2-NEXT: .cfi_def_cfa_offset 16
; AVX2-NEXT: .cfi_offset %rbp, -16
@@ -987,7 +987,7 @@ define i64 @v64i8(<64 x i8> %a, <64 x i8> %b) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: v64i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: pushq %rbp
; AVX512F-NEXT: .cfi_def_cfa_offset 16
; AVX512F-NEXT: .cfi_offset %rbp, -16
@@ -1025,7 +1025,7 @@ define i64 @v64i8(<64 x i8> %a, <64 x i8> %b) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v64i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtb %zmm1, %zmm0, %k0
; AVX512BW-NEXT: kmovq %k0, %rax
; AVX512BW-NEXT: vzeroupper
@@ -1037,7 +1037,7 @@ define i64 @v64i8(<64 x i8> %a, <64 x i8> %b) {
define i8 @v8i64(<8 x i64> %a, <8 x i64> %b) {
; SSE-LABEL: v8i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtq %xmm7, %xmm3
; SSE-NEXT: pcmpgtq %xmm6, %xmm2
; SSE-NEXT: packssdw %xmm3, %xmm2
@@ -1051,7 +1051,7 @@ define i8 @v8i64(<8 x i64> %a, <8 x i64> %b) {
; SSE-NEXT: retq
;
; AVX1-LABEL: v8i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm5, %xmm4
@@ -1069,7 +1069,7 @@ define i8 @v8i64(<8 x i64> %a, <8 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: v8i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vpcmpgtq %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
@@ -1080,7 +1080,7 @@ define i8 @v8i64(<8 x i64> %a, <8 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: v8i64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
; AVX512F-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -1088,7 +1088,7 @@ define i8 @v8i64(<8 x i64> %a, <8 x i64> %b) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v8i64:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -1101,7 +1101,7 @@ define i8 @v8i64(<8 x i64> %a, <8 x i64> %b) {
define i8 @v8f64(<8 x double> %a, <8 x double> %b) {
; SSE-LABEL: v8f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpltpd %xmm3, %xmm7
; SSE-NEXT: cmpltpd %xmm2, %xmm6
; SSE-NEXT: packssdw %xmm7, %xmm6
@@ -1115,7 +1115,7 @@ define i8 @v8f64(<8 x double> %a, <8 x double> %b) {
; SSE-NEXT: retq
;
; AVX1-LABEL: v8f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vcmpltpd %ymm1, %ymm3, %ymm1
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpackssdw %xmm3, %xmm1, %xmm1
@@ -1129,7 +1129,7 @@ define i8 @v8f64(<8 x double> %a, <8 x double> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: v8f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vcmpltpd %ymm1, %ymm3, %ymm1
; AVX2-NEXT: vcmpltpd %ymm0, %ymm2, %ymm0
; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
@@ -1140,7 +1140,7 @@ define i8 @v8f64(<8 x double> %a, <8 x double> %b) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: v8f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vcmpltpd %zmm0, %zmm1, %k0
; AVX512F-NEXT: kmovw %k0, %eax
; AVX512F-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -1148,7 +1148,7 @@ define i8 @v8f64(<8 x double> %a, <8 x double> %b) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v8f64:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vcmpltpd %zmm0, %zmm1, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: # kill: %al<def> %al<kill> %eax<kill>
diff --git a/test/CodeGen/X86/bitreverse.ll b/test/CodeGen/X86/bitreverse.ll
index 8e10499eb26..ffb93b4453c 100644
--- a/test/CodeGen/X86/bitreverse.ll
+++ b/test/CodeGen/X86/bitreverse.ll
@@ -9,7 +9,7 @@ declare <2 x i16> @llvm.bitreverse.v2i16(<2 x i16>) readnone
define <2 x i16> @test_bitreverse_v2i16(<2 x i16> %a) nounwind {
; X86-LABEL: test_bitreverse_v2i16:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: rolw $8, %ax
@@ -51,7 +51,7 @@ define <2 x i16> @test_bitreverse_v2i16(<2 x i16> %a) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: test_bitreverse_v2i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pxor %xmm1, %xmm1
; X64-NEXT: movdqa %xmm0, %xmm2
; X64-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
@@ -98,7 +98,7 @@ declare i64 @llvm.bitreverse.i64(i64) readnone
define i64 @test_bitreverse_i64(i64 %a) nounwind {
; X86-LABEL: test_bitreverse_i64:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: bswapl %eax
@@ -138,7 +138,7 @@ define i64 @test_bitreverse_i64(i64 %a) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: test_bitreverse_i64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: bswapq %rdi
; X64-NEXT: movabsq $1085102592571150095, %rax # imm = 0xF0F0F0F0F0F0F0F
; X64-NEXT: andq %rdi, %rax
@@ -168,7 +168,7 @@ declare i32 @llvm.bitreverse.i32(i32) readnone
define i32 @test_bitreverse_i32(i32 %a) nounwind {
; X86-LABEL: test_bitreverse_i32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: bswapl %eax
; X86-NEXT: movl %eax, %ecx
@@ -190,7 +190,7 @@ define i32 @test_bitreverse_i32(i32 %a) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: test_bitreverse_i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: bswapl %edi
; X64-NEXT: movl %edi, %eax
@@ -218,7 +218,7 @@ declare i24 @llvm.bitreverse.i24(i24) readnone
define i24 @test_bitreverse_i24(i24 %a) nounwind {
; X86-LABEL: test_bitreverse_i24:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: bswapl %eax
; X86-NEXT: movl %eax, %ecx
@@ -241,7 +241,7 @@ define i24 @test_bitreverse_i24(i24 %a) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: test_bitreverse_i24:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: bswapl %edi
; X64-NEXT: movl %edi, %eax
@@ -270,7 +270,7 @@ declare i16 @llvm.bitreverse.i16(i16) readnone
define i16 @test_bitreverse_i16(i16 %a) nounwind {
; X86-LABEL: test_bitreverse_i16:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: rolw $8, %ax
; X86-NEXT: movl %eax, %ecx
@@ -293,7 +293,7 @@ define i16 @test_bitreverse_i16(i16 %a) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: test_bitreverse_i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: rolw $8, %di
; X64-NEXT: movl %edi, %eax
@@ -322,7 +322,7 @@ declare i8 @llvm.bitreverse.i8(i8) readnone
define i8 @test_bitreverse_i8(i8 %a) {
; X86-LABEL: test_bitreverse_i8:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
; X86-NEXT: rolb $4, %al
; X86-NEXT: movl %eax, %ecx
@@ -340,7 +340,7 @@ define i8 @test_bitreverse_i8(i8 %a) {
; X86-NEXT: retl
;
; X64-LABEL: test_bitreverse_i8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: rolb $4, %dil
; X64-NEXT: movl %edi, %eax
; X64-NEXT: andb $51, %al
@@ -364,7 +364,7 @@ declare i4 @llvm.bitreverse.i4(i4) readnone
define i4 @test_bitreverse_i4(i4 %a) {
; X86-LABEL: test_bitreverse_i4:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
; X86-NEXT: rolb $4, %al
; X86-NEXT: movl %eax, %ecx
@@ -383,7 +383,7 @@ define i4 @test_bitreverse_i4(i4 %a) {
; X86-NEXT: retl
;
; X64-LABEL: test_bitreverse_i4:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: rolb $4, %dil
; X64-NEXT: movl %edi, %eax
; X64-NEXT: andb $51, %al
@@ -408,13 +408,13 @@ define i4 @test_bitreverse_i4(i4 %a) {
define <2 x i16> @fold_v2i16() {
; X86-LABEL: fold_v2i16:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movw $-4096, %ax # imm = 0xF000
; X86-NEXT: movw $240, %dx
; X86-NEXT: retl
;
; X64-LABEL: fold_v2i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps {{.*#+}} xmm0 = [61440,240]
; X64-NEXT: retq
%b = call <2 x i16> @llvm.bitreverse.v2i16(<2 x i16> <i16 15, i16 3840>)
@@ -423,12 +423,12 @@ define <2 x i16> @fold_v2i16() {
define i24 @fold_i24() {
; X86-LABEL: fold_i24:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl $2048, %eax # imm = 0x800
; X86-NEXT: retl
;
; X64-LABEL: fold_i24:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl $2048, %eax # imm = 0x800
; X64-NEXT: retq
%b = call i24 @llvm.bitreverse.i24(i24 4096)
@@ -437,12 +437,12 @@ define i24 @fold_i24() {
define i8 @fold_i8() {
; X86-LABEL: fold_i8:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movb $-16, %al
; X86-NEXT: retl
;
; X64-LABEL: fold_i8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movb $-16, %al
; X64-NEXT: retq
%b = call i8 @llvm.bitreverse.i8(i8 15)
@@ -451,12 +451,12 @@ define i8 @fold_i8() {
define i4 @fold_i4() {
; X86-LABEL: fold_i4:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movb $1, %al
; X86-NEXT: retl
;
; X64-LABEL: fold_i4:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movb $1, %al
; X64-NEXT: retq
%b = call i4 @llvm.bitreverse.i4(i4 8)
@@ -467,12 +467,12 @@ define i4 @fold_i4() {
define i8 @identity_i8(i8 %a) {
; X86-LABEL: identity_i8:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
; X86-NEXT: retl
;
; X64-LABEL: identity_i8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: retq
%b = call i8 @llvm.bitreverse.i8(i8 %a)
@@ -482,13 +482,13 @@ define i8 @identity_i8(i8 %a) {
define <2 x i16> @identity_v2i16(<2 x i16> %a) {
; X86-LABEL: identity_v2i16:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %edx
; X86-NEXT: retl
;
; X64-LABEL: identity_v2i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
%b = call <2 x i16> @llvm.bitreverse.v2i16(<2 x i16> %a)
%c = call <2 x i16> @llvm.bitreverse.v2i16(<2 x i16> %b)
@@ -499,11 +499,11 @@ define <2 x i16> @identity_v2i16(<2 x i16> %a) {
define i8 @undef_i8() {
; X86-LABEL: undef_i8:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: retl
;
; X64-LABEL: undef_i8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
%b = call i8 @llvm.bitreverse.i8(i8 undef)
ret i8 %b
@@ -511,11 +511,11 @@ define i8 @undef_i8() {
define <2 x i16> @undef_v2i16() {
; X86-LABEL: undef_v2i16:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: retl
;
; X64-LABEL: undef_v2i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
%b = call <2 x i16> @llvm.bitreverse.v2i16(<2 x i16> undef)
ret <2 x i16> %b
diff --git a/test/CodeGen/X86/block-placement.ll b/test/CodeGen/X86/block-placement.ll
index d4948892a31..aa2e9aac433 100644
--- a/test/CodeGen/X86/block-placement.ll
+++ b/test/CodeGen/X86/block-placement.ll
@@ -474,11 +474,11 @@ define void @fpcmp_unanalyzable_branch(i1 %cond) {
; edge in 'entry' -> 'entry.if.then_crit_edge' -> 'if.then' -> 'if.end' is
; fall-through.
; CHECK-LABEL: fpcmp_unanalyzable_branch:
-; CHECK: # BB#0: # %entry
-; CHECK: # BB#1: # %entry.if.then_crit_edge
+; CHECK: # %bb.0: # %entry
+; CHECK: # %bb.1: # %entry.if.then_crit_edge
; CHECK: .LBB10_5: # %if.then
; CHECK: .LBB10_6: # %if.end
-; CHECK: # BB#3: # %exit
+; CHECK: # %bb.3: # %exit
; CHECK: jne .LBB10_4
; CHECK-NEXT: jnp .LBB10_6
; CHECK: jmp .LBB10_5
diff --git a/test/CodeGen/X86/block-placement.mir b/test/CodeGen/X86/block-placement.mir
index 61af79d16f5..600bc13f14c 100644
--- a/test/CodeGen/X86/block-placement.mir
+++ b/test/CodeGen/X86/block-placement.mir
@@ -46,28 +46,28 @@ liveins:
- { reg: '%rdi' }
- { reg: '%esi' }
-# CHECK: %eax = FAULTING_OP 1, %bb.3.null, 1684, killed %rdi, 1, %noreg, 0, %noreg :: (load 4 from %ir.ptr)
-# CHECK-NEXT: JMP_1 %bb.2.not_null
+# CHECK: %eax = FAULTING_OP 1, %bb.3, 1684, killed %rdi, 1, %noreg, 0, %noreg :: (load 4 from %ir.ptr)
+# CHECK-NEXT: JMP_1 %bb.2
# CHECK: bb.3.null:
# CHECK: bb.4.right:
# CHECK: bb.2.not_null:
body: |
bb.0.entry:
- successors: %bb.1.left(0x7ffff800), %bb.3.right(0x00000800)
+ successors: %bb.1(0x7ffff800), %bb.3(0x00000800)
liveins: %esi, %rdi
frame-setup PUSH64r undef %rax, implicit-def %rsp, implicit %rsp
CFI_INSTRUCTION def_cfa_offset 16
TEST8ri %sil, 1, implicit-def %eflags, implicit killed %esi
- JE_1 %bb.3.right, implicit killed %eflags
+ JE_1 %bb.3, implicit killed %eflags
bb.1.left:
- successors: %bb.2.null(0x7ffff800), %bb.4.not_null(0x00000800)
+ successors: %bb.2(0x7ffff800), %bb.4(0x00000800)
liveins: %rdi
- %eax = FAULTING_OP 1, %bb.2.null, 1684, killed %rdi, 1, %noreg, 0, %noreg :: (load 4 from %ir.ptr)
- JMP_1 %bb.4.not_null
+ %eax = FAULTING_OP 1, %bb.2, 1684, killed %rdi, 1, %noreg, 0, %noreg :: (load 4 from %ir.ptr)
+ JMP_1 %bb.4
bb.4.not_null:
liveins: %rdi, %eax
diff --git a/test/CodeGen/X86/bmi-intrinsics-fast-isel-x86_64.ll b/test/CodeGen/X86/bmi-intrinsics-fast-isel-x86_64.ll
index 8b15a1591b6..f86df57b687 100644
--- a/test/CodeGen/X86/bmi-intrinsics-fast-isel-x86_64.ll
+++ b/test/CodeGen/X86/bmi-intrinsics-fast-isel-x86_64.ll
@@ -9,7 +9,7 @@
define i64 @test__andn_u64(i64 %a0, i64 %a1) {
; X64-LABEL: test__andn_u64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorq $-1, %rdi
; X64-NEXT: andq %rsi, %rdi
; X64-NEXT: movq %rdi, %rax
@@ -21,7 +21,7 @@ define i64 @test__andn_u64(i64 %a0, i64 %a1) {
define i64 @test__bextr_u64(i64 %a0, i64 %a1) {
; X64-LABEL: test__bextr_u64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: bextrq %rsi, %rdi, %rax
; X64-NEXT: retq
%res = call i64 @llvm.x86.bmi.bextr.64(i64 %a0, i64 %a1)
@@ -30,7 +30,7 @@ define i64 @test__bextr_u64(i64 %a0, i64 %a1) {
define i64 @test__blsi_u64(i64 %a0) {
; X64-LABEL: test__blsi_u64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: subq %rdi, %rax
; X64-NEXT: andq %rdi, %rax
@@ -42,7 +42,7 @@ define i64 @test__blsi_u64(i64 %a0) {
define i64 @test__blsmsk_u64(i64 %a0) {
; X64-LABEL: test__blsmsk_u64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: subq $1, %rax
; X64-NEXT: xorq %rdi, %rax
@@ -54,7 +54,7 @@ define i64 @test__blsmsk_u64(i64 %a0) {
define i64 @test__blsr_u64(i64 %a0) {
; X64-LABEL: test__blsr_u64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: subq $1, %rax
; X64-NEXT: andq %rdi, %rax
@@ -66,7 +66,7 @@ define i64 @test__blsr_u64(i64 %a0) {
define i64 @test__tzcnt_u64(i64 %a0) {
; X64-LABEL: test__tzcnt_u64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl $64, %ecx
; X64-NEXT: tzcntq %rdi, %rax
; X64-NEXT: cmovbq %rcx, %rax
@@ -83,7 +83,7 @@ define i64 @test__tzcnt_u64(i64 %a0) {
define i64 @test_andn_u64(i64 %a0, i64 %a1) {
; X64-LABEL: test_andn_u64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorq $-1, %rdi
; X64-NEXT: andq %rsi, %rdi
; X64-NEXT: movq %rdi, %rax
@@ -95,7 +95,7 @@ define i64 @test_andn_u64(i64 %a0, i64 %a1) {
define i64 @test_bextr_u64(i64 %a0, i32 %a1, i32 %a2) {
; X64-LABEL: test_bextr_u64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andl $255, %esi
; X64-NEXT: andl $255, %edx
; X64-NEXT: shll $8, %edx
@@ -114,7 +114,7 @@ define i64 @test_bextr_u64(i64 %a0, i32 %a1, i32 %a2) {
define i64 @test_blsi_u64(i64 %a0) {
; X64-LABEL: test_blsi_u64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: subq %rdi, %rax
; X64-NEXT: andq %rdi, %rax
@@ -126,7 +126,7 @@ define i64 @test_blsi_u64(i64 %a0) {
define i64 @test_blsmsk_u64(i64 %a0) {
; X64-LABEL: test_blsmsk_u64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: subq $1, %rax
; X64-NEXT: xorq %rdi, %rax
@@ -138,7 +138,7 @@ define i64 @test_blsmsk_u64(i64 %a0) {
define i64 @test_blsr_u64(i64 %a0) {
; X64-LABEL: test_blsr_u64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: subq $1, %rax
; X64-NEXT: andq %rdi, %rax
@@ -150,7 +150,7 @@ define i64 @test_blsr_u64(i64 %a0) {
define i64 @test_tzcnt_u64(i64 %a0) {
; X64-LABEL: test_tzcnt_u64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl $64, %ecx
; X64-NEXT: tzcntq %rdi, %rax
; X64-NEXT: cmovbq %rcx, %rax
diff --git a/test/CodeGen/X86/bmi-intrinsics-fast-isel.ll b/test/CodeGen/X86/bmi-intrinsics-fast-isel.ll
index 2b889dd054f..3c183a59f9c 100644
--- a/test/CodeGen/X86/bmi-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/bmi-intrinsics-fast-isel.ll
@@ -10,12 +10,12 @@
define i16 @test__tzcnt_u16(i16 %a0) {
; X32-LABEL: test__tzcnt_u16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movzwl %ax, %ecx
; X32-NEXT: cmpl $0, %ecx
; X32-NEXT: jne .LBB0_1
-; X32-NEXT: # BB#2:
+; X32-NEXT: # %bb.2:
; X32-NEXT: movw $16, %ax
; X32-NEXT: retl
; X32-NEXT: .LBB0_1:
@@ -23,7 +23,7 @@ define i16 @test__tzcnt_u16(i16 %a0) {
; X32-NEXT: retl
;
; X64-LABEL: test__tzcnt_u16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movw $16, %cx
; X64-NEXT: movzwl %di, %edx
; X64-NEXT: tzcntw %dx, %ax
@@ -39,14 +39,14 @@ define i16 @test__tzcnt_u16(i16 %a0) {
define i32 @test__andn_u32(i32 %a0, i32 %a1) {
; X32-LABEL: test__andn_u32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: xorl $-1, %eax
; X32-NEXT: andl {{[0-9]+}}(%esp), %eax
; X32-NEXT: retl
;
; X64-LABEL: test__andn_u32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl $-1, %edi
; X64-NEXT: andl %esi, %edi
; X64-NEXT: movl %edi, %eax
@@ -58,13 +58,13 @@ define i32 @test__andn_u32(i32 %a0, i32 %a1) {
define i32 @test__bextr_u32(i32 %a0, i32 %a1) {
; X32-LABEL: test__bextr_u32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: bextrl %eax, {{[0-9]+}}(%esp), %eax
; X32-NEXT: retl
;
; X64-LABEL: test__bextr_u32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: bextrl %esi, %edi, %eax
; X64-NEXT: retq
%res = call i32 @llvm.x86.bmi.bextr.32(i32 %a0, i32 %a1)
@@ -73,7 +73,7 @@ define i32 @test__bextr_u32(i32 %a0, i32 %a1) {
define i32 @test__blsi_u32(i32 %a0) {
; X32-LABEL: test__blsi_u32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: subl %ecx, %eax
@@ -81,7 +81,7 @@ define i32 @test__blsi_u32(i32 %a0) {
; X32-NEXT: retl
;
; X64-LABEL: test__blsi_u32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: subl %edi, %eax
; X64-NEXT: andl %edi, %eax
@@ -93,7 +93,7 @@ define i32 @test__blsi_u32(i32 %a0) {
define i32 @test__blsmsk_u32(i32 %a0) {
; X32-LABEL: test__blsmsk_u32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl %ecx, %eax
; X32-NEXT: subl $1, %eax
@@ -101,7 +101,7 @@ define i32 @test__blsmsk_u32(i32 %a0) {
; X32-NEXT: retl
;
; X64-LABEL: test__blsmsk_u32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: subl $1, %eax
; X64-NEXT: xorl %edi, %eax
@@ -113,7 +113,7 @@ define i32 @test__blsmsk_u32(i32 %a0) {
define i32 @test__blsr_u32(i32 %a0) {
; X32-LABEL: test__blsr_u32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl %ecx, %eax
; X32-NEXT: subl $1, %eax
@@ -121,7 +121,7 @@ define i32 @test__blsr_u32(i32 %a0) {
; X32-NEXT: retl
;
; X64-LABEL: test__blsr_u32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: subl $1, %eax
; X64-NEXT: andl %edi, %eax
@@ -133,11 +133,11 @@ define i32 @test__blsr_u32(i32 %a0) {
define i32 @test__tzcnt_u32(i32 %a0) {
; X32-LABEL: test__tzcnt_u32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: cmpl $0, %eax
; X32-NEXT: jne .LBB6_1
-; X32-NEXT: # BB#2:
+; X32-NEXT: # %bb.2:
; X32-NEXT: movl $32, %eax
; X32-NEXT: retl
; X32-NEXT: .LBB6_1:
@@ -145,7 +145,7 @@ define i32 @test__tzcnt_u32(i32 %a0) {
; X32-NEXT: retl
;
; X64-LABEL: test__tzcnt_u32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl $32, %ecx
; X64-NEXT: tzcntl %edi, %eax
; X64-NEXT: cmovbl %ecx, %eax
@@ -162,12 +162,12 @@ define i32 @test__tzcnt_u32(i32 %a0) {
define i16 @test_tzcnt_u16(i16 %a0) {
; X32-LABEL: test_tzcnt_u16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movzwl %ax, %ecx
; X32-NEXT: cmpl $0, %ecx
; X32-NEXT: jne .LBB7_1
-; X32-NEXT: # BB#2:
+; X32-NEXT: # %bb.2:
; X32-NEXT: movw $16, %ax
; X32-NEXT: retl
; X32-NEXT: .LBB7_1:
@@ -175,7 +175,7 @@ define i16 @test_tzcnt_u16(i16 %a0) {
; X32-NEXT: retl
;
; X64-LABEL: test_tzcnt_u16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movw $16, %cx
; X64-NEXT: movzwl %di, %edx
; X64-NEXT: tzcntw %dx, %ax
@@ -191,14 +191,14 @@ define i16 @test_tzcnt_u16(i16 %a0) {
define i32 @test_andn_u32(i32 %a0, i32 %a1) {
; X32-LABEL: test_andn_u32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: xorl $-1, %eax
; X32-NEXT: andl {{[0-9]+}}(%esp), %eax
; X32-NEXT: retl
;
; X64-LABEL: test_andn_u32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl $-1, %edi
; X64-NEXT: andl %esi, %edi
; X64-NEXT: movl %edi, %eax
@@ -210,7 +210,7 @@ define i32 @test_andn_u32(i32 %a0, i32 %a1) {
define i32 @test_bextr_u32(i32 %a0, i32 %a1, i32 %a2) {
; X32-LABEL: test_bextr_u32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: andl $255, %ecx
@@ -221,7 +221,7 @@ define i32 @test_bextr_u32(i32 %a0, i32 %a1, i32 %a2) {
; X32-NEXT: retl
;
; X64-LABEL: test_bextr_u32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andl $255, %esi
; X64-NEXT: andl $255, %edx
; X64-NEXT: shll $8, %edx
@@ -238,7 +238,7 @@ define i32 @test_bextr_u32(i32 %a0, i32 %a1, i32 %a2) {
define i32 @test_blsi_u32(i32 %a0) {
; X32-LABEL: test_blsi_u32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: subl %ecx, %eax
@@ -246,7 +246,7 @@ define i32 @test_blsi_u32(i32 %a0) {
; X32-NEXT: retl
;
; X64-LABEL: test_blsi_u32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: subl %edi, %eax
; X64-NEXT: andl %edi, %eax
@@ -258,7 +258,7 @@ define i32 @test_blsi_u32(i32 %a0) {
define i32 @test_blsmsk_u32(i32 %a0) {
; X32-LABEL: test_blsmsk_u32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl %ecx, %eax
; X32-NEXT: subl $1, %eax
@@ -266,7 +266,7 @@ define i32 @test_blsmsk_u32(i32 %a0) {
; X32-NEXT: retl
;
; X64-LABEL: test_blsmsk_u32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: subl $1, %eax
; X64-NEXT: xorl %edi, %eax
@@ -278,7 +278,7 @@ define i32 @test_blsmsk_u32(i32 %a0) {
define i32 @test_blsr_u32(i32 %a0) {
; X32-LABEL: test_blsr_u32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl %ecx, %eax
; X32-NEXT: subl $1, %eax
@@ -286,7 +286,7 @@ define i32 @test_blsr_u32(i32 %a0) {
; X32-NEXT: retl
;
; X64-LABEL: test_blsr_u32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: subl $1, %eax
; X64-NEXT: andl %edi, %eax
@@ -298,11 +298,11 @@ define i32 @test_blsr_u32(i32 %a0) {
define i32 @test_tzcnt_u32(i32 %a0) {
; X32-LABEL: test_tzcnt_u32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: cmpl $0, %eax
; X32-NEXT: jne .LBB13_1
-; X32-NEXT: # BB#2:
+; X32-NEXT: # %bb.2:
; X32-NEXT: movl $32, %eax
; X32-NEXT: retl
; X32-NEXT: .LBB13_1:
@@ -310,7 +310,7 @@ define i32 @test_tzcnt_u32(i32 %a0) {
; X32-NEXT: retl
;
; X64-LABEL: test_tzcnt_u32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl $32, %ecx
; X64-NEXT: tzcntl %edi, %eax
; X64-NEXT: cmovbl %ecx, %eax
diff --git a/test/CodeGen/X86/bmi-schedule.ll b/test/CodeGen/X86/bmi-schedule.ll
index d42548110ec..58ed1dc8565 100644
--- a/test/CodeGen/X86/bmi-schedule.ll
+++ b/test/CodeGen/X86/bmi-schedule.ll
@@ -9,7 +9,7 @@
define i16 @test_andn_i16(i16 zeroext %a0, i16 zeroext %a1, i16 *%a2) {
; GENERIC-LABEL: test_andn_i16:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: andnl %esi, %edi, %eax # sched: [1:0.33]
; GENERIC-NEXT: notl %edi # sched: [1:0.33]
; GENERIC-NEXT: andw (%rdx), %di # sched: [6:0.50]
@@ -18,7 +18,7 @@ define i16 @test_andn_i16(i16 zeroext %a0, i16 zeroext %a1, i16 *%a2) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_andn_i16:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: andnl %esi, %edi, %eax # sched: [1:0.50]
; HASWELL-NEXT: notl %edi # sched: [1:0.25]
; HASWELL-NEXT: andw (%rdx), %di # sched: [1:0.50]
@@ -27,7 +27,7 @@ define i16 @test_andn_i16(i16 zeroext %a0, i16 zeroext %a1, i16 *%a2) {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_andn_i16:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: andnl %esi, %edi, %eax # sched: [1:0.50]
; BROADWELL-NEXT: notl %edi # sched: [1:0.25]
; BROADWELL-NEXT: andw (%rdx), %di # sched: [6:0.50]
@@ -36,7 +36,7 @@ define i16 @test_andn_i16(i16 zeroext %a0, i16 zeroext %a1, i16 *%a2) {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_andn_i16:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: andnl %esi, %edi, %eax # sched: [1:0.50]
; SKYLAKE-NEXT: notl %edi # sched: [1:0.25]
; SKYLAKE-NEXT: andw (%rdx), %di # sched: [6:0.50]
@@ -45,7 +45,7 @@ define i16 @test_andn_i16(i16 zeroext %a0, i16 zeroext %a1, i16 *%a2) {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_andn_i16:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: andnl %esi, %edi, %eax # sched: [1:0.50]
; BTVER2-NEXT: notl %edi # sched: [1:0.50]
; BTVER2-NEXT: andw (%rdx), %di # sched: [4:1.00]
@@ -54,7 +54,7 @@ define i16 @test_andn_i16(i16 zeroext %a0, i16 zeroext %a1, i16 *%a2) {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_andn_i16:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: andnl %esi, %edi, %eax # sched: [1:0.25]
; ZNVER1-NEXT: notl %edi # sched: [1:0.25]
; ZNVER1-NEXT: andw (%rdx), %di # sched: [5:0.50]
@@ -71,42 +71,42 @@ define i16 @test_andn_i16(i16 zeroext %a0, i16 zeroext %a1, i16 *%a2) {
define i32 @test_andn_i32(i32 %a0, i32 %a1, i32 *%a2) {
; GENERIC-LABEL: test_andn_i32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: andnl %esi, %edi, %ecx # sched: [1:0.33]
; GENERIC-NEXT: andnl (%rdx), %edi, %eax # sched: [5:0.50]
; GENERIC-NEXT: addl %ecx, %eax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_andn_i32:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: andnl %esi, %edi, %ecx # sched: [1:0.50]
; HASWELL-NEXT: andnl (%rdx), %edi, %eax # sched: [1:0.50]
; HASWELL-NEXT: addl %ecx, %eax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_andn_i32:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: andnl %esi, %edi, %ecx # sched: [1:0.50]
; BROADWELL-NEXT: andnl (%rdx), %edi, %eax # sched: [6:0.50]
; BROADWELL-NEXT: addl %ecx, %eax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_andn_i32:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: andnl %esi, %edi, %ecx # sched: [1:0.50]
; SKYLAKE-NEXT: andnl (%rdx), %edi, %eax # sched: [6:0.50]
; SKYLAKE-NEXT: addl %ecx, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_andn_i32:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: andnl (%rdx), %edi, %eax # sched: [4:1.00]
; BTVER2-NEXT: andnl %esi, %edi, %ecx # sched: [1:0.50]
; BTVER2-NEXT: addl %ecx, %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_andn_i32:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: andnl (%rdx), %edi, %eax # sched: [5:0.50]
; ZNVER1-NEXT: andnl %esi, %edi, %ecx # sched: [1:0.25]
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
@@ -121,42 +121,42 @@ define i32 @test_andn_i32(i32 %a0, i32 %a1, i32 *%a2) {
define i64 @test_andn_i64(i64 %a0, i64 %a1, i64 *%a2) {
; GENERIC-LABEL: test_andn_i64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: andnq %rsi, %rdi, %rcx # sched: [1:0.33]
; GENERIC-NEXT: andnq (%rdx), %rdi, %rax # sched: [5:0.50]
; GENERIC-NEXT: addq %rcx, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_andn_i64:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: andnq %rsi, %rdi, %rcx # sched: [1:0.50]
; HASWELL-NEXT: andnq (%rdx), %rdi, %rax # sched: [1:0.50]
; HASWELL-NEXT: addq %rcx, %rax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_andn_i64:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: andnq %rsi, %rdi, %rcx # sched: [1:0.50]
; BROADWELL-NEXT: andnq (%rdx), %rdi, %rax # sched: [6:0.50]
; BROADWELL-NEXT: addq %rcx, %rax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_andn_i64:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: andnq %rsi, %rdi, %rcx # sched: [1:0.50]
; SKYLAKE-NEXT: andnq (%rdx), %rdi, %rax # sched: [6:0.50]
; SKYLAKE-NEXT: addq %rcx, %rax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_andn_i64:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: andnq (%rdx), %rdi, %rax # sched: [4:1.00]
; BTVER2-NEXT: andnq %rsi, %rdi, %rcx # sched: [1:0.50]
; BTVER2-NEXT: addq %rcx, %rax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_andn_i64:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: andnq (%rdx), %rdi, %rax # sched: [5:0.50]
; ZNVER1-NEXT: andnq %rsi, %rdi, %rcx # sched: [1:0.25]
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
@@ -171,42 +171,42 @@ define i64 @test_andn_i64(i64 %a0, i64 %a1, i64 *%a2) {
define i32 @test_bextr_i32(i32 %a0, i32 %a1, i32 *%a2) {
; GENERIC-LABEL: test_bextr_i32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: bextrl %edi, (%rdx), %ecx
; GENERIC-NEXT: bextrl %edi, %esi, %eax
; GENERIC-NEXT: addl %ecx, %eax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_bextr_i32:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: bextrl %edi, (%rdx), %ecx # sched: [2:0.50]
; HASWELL-NEXT: bextrl %edi, %esi, %eax # sched: [2:0.50]
; HASWELL-NEXT: addl %ecx, %eax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_bextr_i32:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: bextrl %edi, (%rdx), %ecx # sched: [7:0.50]
; BROADWELL-NEXT: bextrl %edi, %esi, %eax # sched: [2:0.50]
; BROADWELL-NEXT: addl %ecx, %eax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_bextr_i32:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: bextrl %edi, (%rdx), %ecx # sched: [7:0.50]
; SKYLAKE-NEXT: bextrl %edi, %esi, %eax # sched: [2:0.50]
; SKYLAKE-NEXT: addl %ecx, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_bextr_i32:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: bextrl %edi, (%rdx), %ecx
; BTVER2-NEXT: bextrl %edi, %esi, %eax
; BTVER2-NEXT: addl %ecx, %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_bextr_i32:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: bextrl %edi, (%rdx), %ecx # sched: [5:0.50]
; ZNVER1-NEXT: bextrl %edi, %esi, %eax # sched: [1:0.25]
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
@@ -221,42 +221,42 @@ declare i32 @llvm.x86.bmi.bextr.32(i32, i32)
define i64 @test_bextr_i64(i64 %a0, i64 %a1, i64 *%a2) {
; GENERIC-LABEL: test_bextr_i64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: bextrq %rdi, (%rdx), %rcx
; GENERIC-NEXT: bextrq %rdi, %rsi, %rax
; GENERIC-NEXT: addq %rcx, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_bextr_i64:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: bextrq %rdi, (%rdx), %rcx # sched: [2:0.50]
; HASWELL-NEXT: bextrq %rdi, %rsi, %rax # sched: [2:0.50]
; HASWELL-NEXT: addq %rcx, %rax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_bextr_i64:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: bextrq %rdi, (%rdx), %rcx # sched: [7:0.50]
; BROADWELL-NEXT: bextrq %rdi, %rsi, %rax # sched: [2:0.50]
; BROADWELL-NEXT: addq %rcx, %rax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_bextr_i64:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: bextrq %rdi, (%rdx), %rcx # sched: [7:0.50]
; SKYLAKE-NEXT: bextrq %rdi, %rsi, %rax # sched: [2:0.50]
; SKYLAKE-NEXT: addq %rcx, %rax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_bextr_i64:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: bextrq %rdi, (%rdx), %rcx
; BTVER2-NEXT: bextrq %rdi, %rsi, %rax
; BTVER2-NEXT: addq %rcx, %rax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_bextr_i64:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: bextrq %rdi, (%rdx), %rcx # sched: [5:0.50]
; ZNVER1-NEXT: bextrq %rdi, %rsi, %rax # sched: [1:0.25]
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
@@ -271,42 +271,42 @@ declare i64 @llvm.x86.bmi.bextr.64(i64, i64)
define i32 @test_blsi_i32(i32 %a0, i32 *%a1) {
; GENERIC-LABEL: test_blsi_i32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: blsil (%rsi), %ecx
; GENERIC-NEXT: blsil %edi, %eax
; GENERIC-NEXT: addl %ecx, %eax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_blsi_i32:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: blsil (%rsi), %ecx # sched: [1:0.50]
; HASWELL-NEXT: blsil %edi, %eax # sched: [1:0.50]
; HASWELL-NEXT: addl %ecx, %eax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_blsi_i32:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: blsil (%rsi), %ecx # sched: [6:0.50]
; BROADWELL-NEXT: blsil %edi, %eax # sched: [1:0.50]
; BROADWELL-NEXT: addl %ecx, %eax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_blsi_i32:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: blsil (%rsi), %ecx # sched: [6:0.50]
; SKYLAKE-NEXT: blsil %edi, %eax # sched: [1:0.50]
; SKYLAKE-NEXT: addl %ecx, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_blsi_i32:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: blsil (%rsi), %ecx
; BTVER2-NEXT: blsil %edi, %eax
; BTVER2-NEXT: addl %ecx, %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_blsi_i32:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: blsil (%rsi), %ecx # sched: [6:0.50]
; ZNVER1-NEXT: blsil %edi, %eax # sched: [2:0.25]
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
@@ -322,42 +322,42 @@ define i32 @test_blsi_i32(i32 %a0, i32 *%a1) {
define i64 @test_blsi_i64(i64 %a0, i64 *%a1) {
; GENERIC-LABEL: test_blsi_i64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: blsiq (%rsi), %rcx
; GENERIC-NEXT: blsiq %rdi, %rax
; GENERIC-NEXT: addq %rcx, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_blsi_i64:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: blsiq (%rsi), %rcx # sched: [1:0.50]
; HASWELL-NEXT: blsiq %rdi, %rax # sched: [1:0.50]
; HASWELL-NEXT: addq %rcx, %rax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_blsi_i64:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: blsiq (%rsi), %rcx # sched: [6:0.50]
; BROADWELL-NEXT: blsiq %rdi, %rax # sched: [1:0.50]
; BROADWELL-NEXT: addq %rcx, %rax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_blsi_i64:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: blsiq (%rsi), %rcx # sched: [6:0.50]
; SKYLAKE-NEXT: blsiq %rdi, %rax # sched: [1:0.50]
; SKYLAKE-NEXT: addq %rcx, %rax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_blsi_i64:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: blsiq (%rsi), %rcx
; BTVER2-NEXT: blsiq %rdi, %rax
; BTVER2-NEXT: addq %rcx, %rax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_blsi_i64:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: blsiq (%rsi), %rcx # sched: [6:0.50]
; ZNVER1-NEXT: blsiq %rdi, %rax # sched: [2:0.25]
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
@@ -373,42 +373,42 @@ define i64 @test_blsi_i64(i64 %a0, i64 *%a1) {
define i32 @test_blsmsk_i32(i32 %a0, i32 *%a1) {
; GENERIC-LABEL: test_blsmsk_i32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: blsmskl (%rsi), %ecx
; GENERIC-NEXT: blsmskl %edi, %eax
; GENERIC-NEXT: addl %ecx, %eax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_blsmsk_i32:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: blsmskl (%rsi), %ecx # sched: [1:0.50]
; HASWELL-NEXT: blsmskl %edi, %eax # sched: [1:0.50]
; HASWELL-NEXT: addl %ecx, %eax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_blsmsk_i32:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: blsmskl (%rsi), %ecx # sched: [6:0.50]
; BROADWELL-NEXT: blsmskl %edi, %eax # sched: [1:0.50]
; BROADWELL-NEXT: addl %ecx, %eax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_blsmsk_i32:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: blsmskl (%rsi), %ecx # sched: [6:0.50]
; SKYLAKE-NEXT: blsmskl %edi, %eax # sched: [1:0.50]
; SKYLAKE-NEXT: addl %ecx, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_blsmsk_i32:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: blsmskl (%rsi), %ecx
; BTVER2-NEXT: blsmskl %edi, %eax
; BTVER2-NEXT: addl %ecx, %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_blsmsk_i32:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: blsmskl (%rsi), %ecx # sched: [6:0.50]
; ZNVER1-NEXT: blsmskl %edi, %eax # sched: [2:0.25]
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
@@ -424,42 +424,42 @@ define i32 @test_blsmsk_i32(i32 %a0, i32 *%a1) {
define i64 @test_blsmsk_i64(i64 %a0, i64 *%a1) {
; GENERIC-LABEL: test_blsmsk_i64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: blsmskq (%rsi), %rcx
; GENERIC-NEXT: blsmskq %rdi, %rax
; GENERIC-NEXT: addq %rcx, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_blsmsk_i64:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: blsmskq (%rsi), %rcx # sched: [1:0.50]
; HASWELL-NEXT: blsmskq %rdi, %rax # sched: [1:0.50]
; HASWELL-NEXT: addq %rcx, %rax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_blsmsk_i64:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: blsmskq (%rsi), %rcx # sched: [6:0.50]
; BROADWELL-NEXT: blsmskq %rdi, %rax # sched: [1:0.50]
; BROADWELL-NEXT: addq %rcx, %rax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_blsmsk_i64:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: blsmskq (%rsi), %rcx # sched: [6:0.50]
; SKYLAKE-NEXT: blsmskq %rdi, %rax # sched: [1:0.50]
; SKYLAKE-NEXT: addq %rcx, %rax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_blsmsk_i64:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: blsmskq (%rsi), %rcx
; BTVER2-NEXT: blsmskq %rdi, %rax
; BTVER2-NEXT: addq %rcx, %rax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_blsmsk_i64:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: blsmskq (%rsi), %rcx # sched: [6:0.50]
; ZNVER1-NEXT: blsmskq %rdi, %rax # sched: [2:0.25]
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
@@ -475,42 +475,42 @@ define i64 @test_blsmsk_i64(i64 %a0, i64 *%a1) {
define i32 @test_blsr_i32(i32 %a0, i32 *%a1) {
; GENERIC-LABEL: test_blsr_i32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: blsrl (%rsi), %ecx
; GENERIC-NEXT: blsrl %edi, %eax
; GENERIC-NEXT: addl %ecx, %eax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_blsr_i32:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: blsrl (%rsi), %ecx # sched: [1:0.50]
; HASWELL-NEXT: blsrl %edi, %eax # sched: [1:0.50]
; HASWELL-NEXT: addl %ecx, %eax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_blsr_i32:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: blsrl (%rsi), %ecx # sched: [6:0.50]
; BROADWELL-NEXT: blsrl %edi, %eax # sched: [1:0.50]
; BROADWELL-NEXT: addl %ecx, %eax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_blsr_i32:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: blsrl (%rsi), %ecx # sched: [6:0.50]
; SKYLAKE-NEXT: blsrl %edi, %eax # sched: [1:0.50]
; SKYLAKE-NEXT: addl %ecx, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_blsr_i32:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: blsrl (%rsi), %ecx
; BTVER2-NEXT: blsrl %edi, %eax
; BTVER2-NEXT: addl %ecx, %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_blsr_i32:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: blsrl (%rsi), %ecx # sched: [6:0.50]
; ZNVER1-NEXT: blsrl %edi, %eax # sched: [2:0.25]
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
@@ -526,42 +526,42 @@ define i32 @test_blsr_i32(i32 %a0, i32 *%a1) {
define i64 @test_blsr_i64(i64 %a0, i64 *%a1) {
; GENERIC-LABEL: test_blsr_i64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: blsrq (%rsi), %rcx
; GENERIC-NEXT: blsrq %rdi, %rax
; GENERIC-NEXT: addq %rcx, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_blsr_i64:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: blsrq (%rsi), %rcx # sched: [1:0.50]
; HASWELL-NEXT: blsrq %rdi, %rax # sched: [1:0.50]
; HASWELL-NEXT: addq %rcx, %rax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_blsr_i64:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: blsrq (%rsi), %rcx # sched: [6:0.50]
; BROADWELL-NEXT: blsrq %rdi, %rax # sched: [1:0.50]
; BROADWELL-NEXT: addq %rcx, %rax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_blsr_i64:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: blsrq (%rsi), %rcx # sched: [6:0.50]
; SKYLAKE-NEXT: blsrq %rdi, %rax # sched: [1:0.50]
; SKYLAKE-NEXT: addq %rcx, %rax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_blsr_i64:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: blsrq (%rsi), %rcx
; BTVER2-NEXT: blsrq %rdi, %rax
; BTVER2-NEXT: addq %rcx, %rax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_blsr_i64:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: blsrq (%rsi), %rcx # sched: [6:0.50]
; ZNVER1-NEXT: blsrq %rdi, %rax # sched: [2:0.25]
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
@@ -577,7 +577,7 @@ define i64 @test_blsr_i64(i64 %a0, i64 *%a1) {
define i16 @test_cttz_i16(i16 zeroext %a0, i16 *%a1) {
; GENERIC-LABEL: test_cttz_i16:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: tzcntw (%rsi), %cx
; GENERIC-NEXT: tzcntw %di, %ax
; GENERIC-NEXT: orl %ecx, %eax # sched: [1:0.33]
@@ -585,7 +585,7 @@ define i16 @test_cttz_i16(i16 zeroext %a0, i16 *%a1) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cttz_i16:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: tzcntw (%rsi), %cx # sched: [3:1.00]
; HASWELL-NEXT: tzcntw %di, %ax # sched: [3:1.00]
; HASWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
@@ -593,7 +593,7 @@ define i16 @test_cttz_i16(i16 zeroext %a0, i16 *%a1) {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cttz_i16:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: tzcntw (%rsi), %cx # sched: [8:1.00]
; BROADWELL-NEXT: tzcntw %di, %ax # sched: [3:1.00]
; BROADWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
@@ -601,7 +601,7 @@ define i16 @test_cttz_i16(i16 zeroext %a0, i16 *%a1) {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cttz_i16:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: tzcntw (%rsi), %cx # sched: [8:1.00]
; SKYLAKE-NEXT: tzcntw %di, %ax # sched: [3:1.00]
; SKYLAKE-NEXT: orl %ecx, %eax # sched: [1:0.25]
@@ -609,7 +609,7 @@ define i16 @test_cttz_i16(i16 zeroext %a0, i16 *%a1) {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cttz_i16:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: tzcntw (%rsi), %cx
; BTVER2-NEXT: tzcntw %di, %ax
; BTVER2-NEXT: orl %ecx, %eax # sched: [1:0.50]
@@ -617,7 +617,7 @@ define i16 @test_cttz_i16(i16 zeroext %a0, i16 *%a1) {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cttz_i16:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: tzcntw (%rsi), %cx # sched: [6:0.50]
; ZNVER1-NEXT: tzcntw %di, %ax # sched: [2:0.25]
; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25]
@@ -633,42 +633,42 @@ declare i16 @llvm.cttz.i16(i16, i1)
define i32 @test_cttz_i32(i32 %a0, i32 *%a1) {
; GENERIC-LABEL: test_cttz_i32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: tzcntl (%rsi), %ecx
; GENERIC-NEXT: tzcntl %edi, %eax
; GENERIC-NEXT: orl %ecx, %eax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cttz_i32:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: tzcntl (%rsi), %ecx # sched: [3:1.00]
; HASWELL-NEXT: tzcntl %edi, %eax # sched: [3:1.00]
; HASWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cttz_i32:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: tzcntl (%rsi), %ecx # sched: [8:1.00]
; BROADWELL-NEXT: tzcntl %edi, %eax # sched: [3:1.00]
; BROADWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cttz_i32:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: tzcntl (%rsi), %ecx # sched: [8:1.00]
; SKYLAKE-NEXT: tzcntl %edi, %eax # sched: [3:1.00]
; SKYLAKE-NEXT: orl %ecx, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cttz_i32:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: tzcntl (%rsi), %ecx
; BTVER2-NEXT: tzcntl %edi, %eax
; BTVER2-NEXT: orl %ecx, %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cttz_i32:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: tzcntl (%rsi), %ecx # sched: [6:0.50]
; ZNVER1-NEXT: tzcntl %edi, %eax # sched: [2:0.25]
; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25]
@@ -683,42 +683,42 @@ declare i32 @llvm.cttz.i32(i32, i1)
define i64 @test_cttz_i64(i64 %a0, i64 *%a1) {
; GENERIC-LABEL: test_cttz_i64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: tzcntq (%rsi), %rcx
; GENERIC-NEXT: tzcntq %rdi, %rax
; GENERIC-NEXT: orq %rcx, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cttz_i64:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: tzcntq (%rsi), %rcx # sched: [3:1.00]
; HASWELL-NEXT: tzcntq %rdi, %rax # sched: [3:1.00]
; HASWELL-NEXT: orq %rcx, %rax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cttz_i64:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: tzcntq (%rsi), %rcx # sched: [8:1.00]
; BROADWELL-NEXT: tzcntq %rdi, %rax # sched: [3:1.00]
; BROADWELL-NEXT: orq %rcx, %rax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cttz_i64:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: tzcntq (%rsi), %rcx # sched: [8:1.00]
; SKYLAKE-NEXT: tzcntq %rdi, %rax # sched: [3:1.00]
; SKYLAKE-NEXT: orq %rcx, %rax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cttz_i64:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: tzcntq (%rsi), %rcx
; BTVER2-NEXT: tzcntq %rdi, %rax
; BTVER2-NEXT: orq %rcx, %rax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cttz_i64:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: tzcntq (%rsi), %rcx # sched: [6:0.50]
; ZNVER1-NEXT: tzcntq %rdi, %rax # sched: [2:0.25]
; ZNVER1-NEXT: orq %rcx, %rax # sched: [1:0.25]
diff --git a/test/CodeGen/X86/bmi.ll b/test/CodeGen/X86/bmi.ll
index b2f0309e562..66c76131ba6 100644
--- a/test/CodeGen/X86/bmi.ll
+++ b/test/CodeGen/X86/bmi.ll
@@ -9,7 +9,7 @@ declare i64 @llvm.cttz.i64(i64, i1)
define i8 @t1(i8 %x) {
; CHECK-LABEL: t1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: orl $256, %eax # imm = 0x100
; CHECK-NEXT: tzcntl %eax, %eax
@@ -21,7 +21,7 @@ define i8 @t1(i8 %x) {
define i16 @t2(i16 %x) {
; CHECK-LABEL: t2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: tzcntw %di, %ax
; CHECK-NEXT: retq
%tmp = tail call i16 @llvm.cttz.i16( i16 %x, i1 false )
@@ -30,7 +30,7 @@ define i16 @t2(i16 %x) {
define i32 @t3(i32 %x) {
; CHECK-LABEL: t3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: tzcntl %edi, %eax
; CHECK-NEXT: retq
%tmp = tail call i32 @llvm.cttz.i32( i32 %x, i1 false )
@@ -39,7 +39,7 @@ define i32 @t3(i32 %x) {
define i32 @tzcnt32_load(i32* %x) {
; CHECK-LABEL: tzcnt32_load:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: tzcntl (%rdi), %eax
; CHECK-NEXT: retq
%x1 = load i32, i32* %x
@@ -49,7 +49,7 @@ define i32 @tzcnt32_load(i32* %x) {
define i64 @t4(i64 %x) {
; CHECK-LABEL: t4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: tzcntq %rdi, %rax
; CHECK-NEXT: retq
%tmp = tail call i64 @llvm.cttz.i64( i64 %x, i1 false )
@@ -58,7 +58,7 @@ define i64 @t4(i64 %x) {
define i8 @t5(i8 %x) {
; CHECK-LABEL: t5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: tzcntl %eax, %eax
; CHECK-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -69,7 +69,7 @@ define i8 @t5(i8 %x) {
define i16 @t6(i16 %x) {
; CHECK-LABEL: t6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: tzcntw %di, %ax
; CHECK-NEXT: retq
%tmp = tail call i16 @llvm.cttz.i16( i16 %x, i1 true )
@@ -78,7 +78,7 @@ define i16 @t6(i16 %x) {
define i32 @t7(i32 %x) {
; CHECK-LABEL: t7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: tzcntl %edi, %eax
; CHECK-NEXT: retq
%tmp = tail call i32 @llvm.cttz.i32( i32 %x, i1 true )
@@ -87,7 +87,7 @@ define i32 @t7(i32 %x) {
define i64 @t8(i64 %x) {
; CHECK-LABEL: t8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: tzcntq %rdi, %rax
; CHECK-NEXT: retq
%tmp = tail call i64 @llvm.cttz.i64( i64 %x, i1 true )
@@ -96,7 +96,7 @@ define i64 @t8(i64 %x) {
define i32 @andn32(i32 %x, i32 %y) {
; CHECK-LABEL: andn32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: andnl %esi, %edi, %eax
; CHECK-NEXT: retq
%tmp1 = xor i32 %x, -1
@@ -106,7 +106,7 @@ define i32 @andn32(i32 %x, i32 %y) {
define i32 @andn32_load(i32 %x, i32* %y) {
; CHECK-LABEL: andn32_load:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: andnl (%rsi), %edi, %eax
; CHECK-NEXT: retq
%y1 = load i32, i32* %y
@@ -117,7 +117,7 @@ define i32 @andn32_load(i32 %x, i32* %y) {
define i64 @andn64(i64 %x, i64 %y) {
; CHECK-LABEL: andn64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: andnq %rsi, %rdi, %rax
; CHECK-NEXT: retq
%tmp1 = xor i64 %x, -1
@@ -128,7 +128,7 @@ define i64 @andn64(i64 %x, i64 %y) {
; Don't choose a 'test' if an 'andn' can be used.
define i1 @andn_cmp(i32 %x, i32 %y) {
; CHECK-LABEL: andn_cmp:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: andnl %esi, %edi, %eax
; CHECK-NEXT: sete %al
; CHECK-NEXT: retq
@@ -141,7 +141,7 @@ define i1 @andn_cmp(i32 %x, i32 %y) {
; Recognize a disguised andn in the following 4 tests.
define i1 @and_cmp1(i32 %x, i32 %y) {
; CHECK-LABEL: and_cmp1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: andnl %esi, %edi, %eax
; CHECK-NEXT: sete %al
; CHECK-NEXT: retq
@@ -152,7 +152,7 @@ define i1 @and_cmp1(i32 %x, i32 %y) {
define i1 @and_cmp2(i32 %x, i32 %y) {
; CHECK-LABEL: and_cmp2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: andnl %esi, %edi, %eax
; CHECK-NEXT: setne %al
; CHECK-NEXT: retq
@@ -163,7 +163,7 @@ define i1 @and_cmp2(i32 %x, i32 %y) {
define i1 @and_cmp3(i32 %x, i32 %y) {
; CHECK-LABEL: and_cmp3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: andnl %esi, %edi, %eax
; CHECK-NEXT: sete %al
; CHECK-NEXT: retq
@@ -174,7 +174,7 @@ define i1 @and_cmp3(i32 %x, i32 %y) {
define i1 @and_cmp4(i32 %x, i32 %y) {
; CHECK-LABEL: and_cmp4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: andnl %esi, %edi, %eax
; CHECK-NEXT: setne %al
; CHECK-NEXT: retq
@@ -187,7 +187,7 @@ define i1 @and_cmp4(i32 %x, i32 %y) {
; even though the BMI instruction doesn't have an immediate form.
define i1 @and_cmp_const(i32 %x) {
; CHECK-LABEL: and_cmp_const:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl $43, %eax
; CHECK-NEXT: andnl %eax, %edi, %eax
; CHECK-NEXT: sete %al
@@ -200,7 +200,7 @@ define i1 @and_cmp_const(i32 %x) {
; But don't use 'andn' if the mask is a power-of-two.
define i1 @and_cmp_const_power_of_two(i32 %x, i32 %y) {
; CHECK-LABEL: and_cmp_const_power_of_two:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: btl %esi, %edi
; CHECK-NEXT: setae %al
; CHECK-NEXT: retq
@@ -213,7 +213,7 @@ define i1 @and_cmp_const_power_of_two(i32 %x, i32 %y) {
; Don't transform to 'andn' if there's another use of the 'and'.
define i32 @and_cmp_not_one_use(i32 %x) {
; CHECK-LABEL: and_cmp_not_one_use:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: andl $37, %edi
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: cmpl $37, %edi
@@ -230,7 +230,7 @@ define i32 @and_cmp_not_one_use(i32 %x) {
; Verify that we're not transforming invalid comparison predicates.
define i1 @not_an_andn1(i32 %x, i32 %y) {
; CHECK-LABEL: not_an_andn1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: andl %esi, %edi
; CHECK-NEXT: cmpl %edi, %esi
; CHECK-NEXT: setg %al
@@ -242,7 +242,7 @@ define i1 @not_an_andn1(i32 %x, i32 %y) {
define i1 @not_an_andn2(i32 %x, i32 %y) {
; CHECK-LABEL: not_an_andn2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: andl %esi, %edi
; CHECK-NEXT: cmpl %edi, %esi
; CHECK-NEXT: setbe %al
@@ -255,7 +255,7 @@ define i1 @not_an_andn2(i32 %x, i32 %y) {
; Don't choose a 'test' if an 'andn' can be used.
define i1 @andn_cmp_swap_ops(i64 %x, i64 %y) {
; CHECK-LABEL: andn_cmp_swap_ops:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: andnq %rsi, %rdi, %rax
; CHECK-NEXT: sete %al
; CHECK-NEXT: retq
@@ -268,7 +268,7 @@ define i1 @andn_cmp_swap_ops(i64 %x, i64 %y) {
; Use a 'test' (not an 'and') because 'andn' only works for i32/i64.
define i1 @andn_cmp_i8(i8 %x, i8 %y) {
; CHECK-LABEL: andn_cmp_i8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: notb %sil
; CHECK-NEXT: testb %sil, %dil
; CHECK-NEXT: sete %al
@@ -281,7 +281,7 @@ define i1 @andn_cmp_i8(i8 %x, i8 %y) {
define i32 @bextr32(i32 %x, i32 %y) {
; CHECK-LABEL: bextr32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: bextrl %esi, %edi, %eax
; CHECK-NEXT: retq
%tmp = tail call i32 @llvm.x86.bmi.bextr.32(i32 %x, i32 %y)
@@ -290,7 +290,7 @@ define i32 @bextr32(i32 %x, i32 %y) {
define i32 @bextr32_load(i32* %x, i32 %y) {
; CHECK-LABEL: bextr32_load:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: bextrl %esi, (%rdi), %eax
; CHECK-NEXT: retq
%x1 = load i32, i32* %x
@@ -302,7 +302,7 @@ declare i32 @llvm.x86.bmi.bextr.32(i32, i32)
define i32 @bextr32b(i32 %x) uwtable ssp {
; CHECK-LABEL: bextr32b:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl $3076, %eax # imm = 0xC04
; CHECK-NEXT: bextrl %eax, %edi, %eax
; CHECK-NEXT: retq
@@ -314,7 +314,7 @@ define i32 @bextr32b(i32 %x) uwtable ssp {
; Make sure we still use AH subreg trick to extract 15:8
define i32 @bextr32_subreg(i32 %x) uwtable ssp {
; CHECK-LABEL: bextr32_subreg:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: movzbl %ah, %eax # NOREX
; CHECK-NEXT: retq
@@ -325,7 +325,7 @@ define i32 @bextr32_subreg(i32 %x) uwtable ssp {
define i32 @bextr32b_load(i32* %x) uwtable ssp {
; CHECK-LABEL: bextr32b_load:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl $3076, %eax # imm = 0xC04
; CHECK-NEXT: bextrl %eax, (%rdi), %eax
; CHECK-NEXT: retq
@@ -338,7 +338,7 @@ define i32 @bextr32b_load(i32* %x) uwtable ssp {
; PR34042
define i32 @bextr32c(i32 %x, i16 zeroext %y) {
; CHECK-LABEL: bextr32c:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movswl %si, %eax
; CHECK-NEXT: bextrl %eax, %edi, %eax
; CHECK-NEXT: retq
@@ -349,7 +349,7 @@ define i32 @bextr32c(i32 %x, i16 zeroext %y) {
define i64 @bextr64(i64 %x, i64 %y) {
; CHECK-LABEL: bextr64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: bextrq %rsi, %rdi, %rax
; CHECK-NEXT: retq
%tmp = tail call i64 @llvm.x86.bmi.bextr.64(i64 %x, i64 %y)
@@ -360,7 +360,7 @@ declare i64 @llvm.x86.bmi.bextr.64(i64, i64)
define i64 @bextr64b(i64 %x) uwtable ssp {
; CHECK-LABEL: bextr64b:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl $3076, %eax # imm = 0xC04
; CHECK-NEXT: bextrl %eax, %edi, %eax
; CHECK-NEXT: retq
@@ -372,7 +372,7 @@ define i64 @bextr64b(i64 %x) uwtable ssp {
; Make sure we still use the AH subreg trick to extract 15:8
define i64 @bextr64_subreg(i64 %x) uwtable ssp {
; CHECK-LABEL: bextr64_subreg:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: movzbl %ah, %eax # NOREX
; CHECK-NEXT: retq
@@ -383,7 +383,7 @@ define i64 @bextr64_subreg(i64 %x) uwtable ssp {
define i64 @bextr64b_load(i64* %x) {
; CHECK-LABEL: bextr64b_load:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl $3076, %eax # imm = 0xC04
; CHECK-NEXT: bextrl %eax, (%rdi), %eax
; CHECK-NEXT: retq
@@ -396,7 +396,7 @@ define i64 @bextr64b_load(i64* %x) {
; PR34042
define i64 @bextr64c(i64 %x, i32 %y) {
; CHECK-LABEL: bextr64c:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movslq %esi, %rax
; CHECK-NEXT: bextrq %rax, %rdi, %rax
; CHECK-NEXT: retq
@@ -407,7 +407,7 @@ define i64 @bextr64c(i64 %x, i32 %y) {
define i64 @bextr64d(i64 %a) {
; CHECK-LABEL: bextr64d:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl $8450, %eax # imm = 0x2102
; CHECK-NEXT: bextrq %rax, %rdi, %rax
; CHECK-NEXT: retq
@@ -419,7 +419,7 @@ entry:
define i32 @non_bextr32(i32 %x) {
; CHECK-LABEL: non_bextr32:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: shrl $2, %edi
; CHECK-NEXT: andl $111, %edi
; CHECK-NEXT: movl %edi, %eax
@@ -432,7 +432,7 @@ entry:
define i64 @non_bextr64(i64 %x) {
; CHECK-LABEL: non_bextr64:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: shrq $2, %rdi
; CHECK-NEXT: movabsq $8589934590, %rax # imm = 0x1FFFFFFFE
; CHECK-NEXT: andq %rdi, %rax
@@ -445,7 +445,7 @@ entry:
define i32 @bzhi32b(i32 %x, i8 zeroext %index) {
; BMI1-LABEL: bzhi32b:
-; BMI1: # BB#0: # %entry
+; BMI1: # %bb.0: # %entry
; BMI1-NEXT: movl $1, %eax
; BMI1-NEXT: movl %esi, %ecx
; BMI1-NEXT: shll %cl, %eax
@@ -454,7 +454,7 @@ define i32 @bzhi32b(i32 %x, i8 zeroext %index) {
; BMI1-NEXT: retq
;
; BMI2-LABEL: bzhi32b:
-; BMI2: # BB#0: # %entry
+; BMI2: # %bb.0: # %entry
; BMI2-NEXT: bzhil %esi, %edi, %eax
; BMI2-NEXT: retq
entry:
@@ -467,7 +467,7 @@ entry:
define i32 @bzhi32b_load(i32* %w, i8 zeroext %index) {
; BMI1-LABEL: bzhi32b_load:
-; BMI1: # BB#0: # %entry
+; BMI1: # %bb.0: # %entry
; BMI1-NEXT: movl $1, %eax
; BMI1-NEXT: movl %esi, %ecx
; BMI1-NEXT: shll %cl, %eax
@@ -476,7 +476,7 @@ define i32 @bzhi32b_load(i32* %w, i8 zeroext %index) {
; BMI1-NEXT: retq
;
; BMI2-LABEL: bzhi32b_load:
-; BMI2: # BB#0: # %entry
+; BMI2: # %bb.0: # %entry
; BMI2-NEXT: bzhil %esi, (%rdi), %eax
; BMI2-NEXT: retq
entry:
@@ -490,7 +490,7 @@ entry:
define i32 @bzhi32c(i32 %x, i8 zeroext %index) {
; BMI1-LABEL: bzhi32c:
-; BMI1: # BB#0: # %entry
+; BMI1: # %bb.0: # %entry
; BMI1-NEXT: movl $1, %eax
; BMI1-NEXT: movl %esi, %ecx
; BMI1-NEXT: shll %cl, %eax
@@ -499,7 +499,7 @@ define i32 @bzhi32c(i32 %x, i8 zeroext %index) {
; BMI1-NEXT: retq
;
; BMI2-LABEL: bzhi32c:
-; BMI2: # BB#0: # %entry
+; BMI2: # %bb.0: # %entry
; BMI2-NEXT: bzhil %esi, %edi, %eax
; BMI2-NEXT: retq
entry:
@@ -512,7 +512,7 @@ entry:
define i32 @bzhi32d(i32 %a, i32 %b) {
; BMI1-LABEL: bzhi32d:
-; BMI1: # BB#0: # %entry
+; BMI1: # %bb.0: # %entry
; BMI1-NEXT: movl $32, %ecx
; BMI1-NEXT: subl %esi, %ecx
; BMI1-NEXT: movl $-1, %eax
@@ -522,7 +522,7 @@ define i32 @bzhi32d(i32 %a, i32 %b) {
; BMI1-NEXT: retq
;
; BMI2-LABEL: bzhi32d:
-; BMI2: # BB#0: # %entry
+; BMI2: # %bb.0: # %entry
; BMI2-NEXT: bzhil %esi, %edi, %eax
; BMI2-NEXT: retq
entry:
@@ -534,7 +534,7 @@ entry:
define i32 @bzhi32e(i32 %a, i32 %b) {
; BMI1-LABEL: bzhi32e:
-; BMI1: # BB#0: # %entry
+; BMI1: # %bb.0: # %entry
; BMI1-NEXT: movl $32, %ecx
; BMI1-NEXT: subl %esi, %ecx
; BMI1-NEXT: shll %cl, %edi
@@ -544,7 +544,7 @@ define i32 @bzhi32e(i32 %a, i32 %b) {
; BMI1-NEXT: retq
;
; BMI2-LABEL: bzhi32e:
-; BMI2: # BB#0: # %entry
+; BMI2: # %bb.0: # %entry
; BMI2-NEXT: bzhil %esi, %edi, %eax
; BMI2-NEXT: retq
entry:
@@ -556,7 +556,7 @@ entry:
define i64 @bzhi64b(i64 %x, i8 zeroext %index) {
; BMI1-LABEL: bzhi64b:
-; BMI1: # BB#0: # %entry
+; BMI1: # %bb.0: # %entry
; BMI1-NEXT: movl $1, %eax
; BMI1-NEXT: movl %esi, %ecx
; BMI1-NEXT: shlq %cl, %rax
@@ -565,7 +565,7 @@ define i64 @bzhi64b(i64 %x, i8 zeroext %index) {
; BMI1-NEXT: retq
;
; BMI2-LABEL: bzhi64b:
-; BMI2: # BB#0: # %entry
+; BMI2: # %bb.0: # %entry
; BMI2-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; BMI2-NEXT: bzhiq %rsi, %rdi, %rax
; BMI2-NEXT: retq
@@ -579,7 +579,7 @@ entry:
define i64 @bzhi64c(i64 %a, i64 %b) {
; BMI1-LABEL: bzhi64c:
-; BMI1: # BB#0: # %entry
+; BMI1: # %bb.0: # %entry
; BMI1-NEXT: movl $64, %ecx
; BMI1-NEXT: subl %esi, %ecx
; BMI1-NEXT: movq $-1, %rax
@@ -589,7 +589,7 @@ define i64 @bzhi64c(i64 %a, i64 %b) {
; BMI1-NEXT: retq
;
; BMI2-LABEL: bzhi64c:
-; BMI2: # BB#0: # %entry
+; BMI2: # %bb.0: # %entry
; BMI2-NEXT: bzhiq %rsi, %rdi, %rax
; BMI2-NEXT: retq
entry:
@@ -601,7 +601,7 @@ entry:
define i64 @bzhi64d(i64 %a, i32 %b) {
; BMI1-LABEL: bzhi64d:
-; BMI1: # BB#0: # %entry
+; BMI1: # %bb.0: # %entry
; BMI1-NEXT: movl $64, %ecx
; BMI1-NEXT: subl %esi, %ecx
; BMI1-NEXT: movq $-1, %rax
@@ -611,7 +611,7 @@ define i64 @bzhi64d(i64 %a, i32 %b) {
; BMI1-NEXT: retq
;
; BMI2-LABEL: bzhi64d:
-; BMI2: # BB#0: # %entry
+; BMI2: # %bb.0: # %entry
; BMI2-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; BMI2-NEXT: bzhiq %rsi, %rdi, %rax
; BMI2-NEXT: retq
@@ -625,7 +625,7 @@ entry:
define i64 @bzhi64e(i64 %a, i64 %b) {
; BMI1-LABEL: bzhi64e:
-; BMI1: # BB#0: # %entry
+; BMI1: # %bb.0: # %entry
; BMI1-NEXT: movl $64, %ecx
; BMI1-NEXT: subl %esi, %ecx
; BMI1-NEXT: shlq %cl, %rdi
@@ -635,7 +635,7 @@ define i64 @bzhi64e(i64 %a, i64 %b) {
; BMI1-NEXT: retq
;
; BMI2-LABEL: bzhi64e:
-; BMI2: # BB#0: # %entry
+; BMI2: # %bb.0: # %entry
; BMI2-NEXT: bzhiq %rsi, %rdi, %rax
; BMI2-NEXT: retq
entry:
@@ -647,7 +647,7 @@ entry:
define i64 @bzhi64f(i64 %a, i32 %b) {
; BMI1-LABEL: bzhi64f:
-; BMI1: # BB#0: # %entry
+; BMI1: # %bb.0: # %entry
; BMI1-NEXT: movl $64, %ecx
; BMI1-NEXT: subl %esi, %ecx
; BMI1-NEXT: shlq %cl, %rdi
@@ -657,7 +657,7 @@ define i64 @bzhi64f(i64 %a, i32 %b) {
; BMI1-NEXT: retq
;
; BMI2-LABEL: bzhi64f:
-; BMI2: # BB#0: # %entry
+; BMI2: # %bb.0: # %entry
; BMI2-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; BMI2-NEXT: bzhiq %rsi, %rdi, %rax
; BMI2-NEXT: retq
@@ -671,13 +671,13 @@ entry:
define i64 @bzhi64_constant_mask(i64 %x) {
; BMI1-LABEL: bzhi64_constant_mask:
-; BMI1: # BB#0: # %entry
+; BMI1: # %bb.0: # %entry
; BMI1-NEXT: movl $15872, %eax # imm = 0x3E00
; BMI1-NEXT: bextrq %rax, %rdi, %rax
; BMI1-NEXT: retq
;
; BMI2-LABEL: bzhi64_constant_mask:
-; BMI2: # BB#0: # %entry
+; BMI2: # %bb.0: # %entry
; BMI2-NEXT: movb $62, %al
; BMI2-NEXT: bzhiq %rax, %rdi, %rax
; BMI2-NEXT: retq
@@ -688,13 +688,13 @@ entry:
define i64 @bzhi64_constant_mask_load(i64* %x) {
; BMI1-LABEL: bzhi64_constant_mask_load:
-; BMI1: # BB#0: # %entry
+; BMI1: # %bb.0: # %entry
; BMI1-NEXT: movl $15872, %eax # imm = 0x3E00
; BMI1-NEXT: bextrq %rax, (%rdi), %rax
; BMI1-NEXT: retq
;
; BMI2-LABEL: bzhi64_constant_mask_load:
-; BMI2: # BB#0: # %entry
+; BMI2: # %bb.0: # %entry
; BMI2-NEXT: movb $62, %al
; BMI2-NEXT: bzhiq %rax, (%rdi), %rax
; BMI2-NEXT: retq
@@ -706,7 +706,7 @@ entry:
define i64 @bzhi64_small_constant_mask(i64 %x) {
; CHECK-LABEL: bzhi64_small_constant_mask:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: andl $2147483647, %edi # imm = 0x7FFFFFFF
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: retq
@@ -717,7 +717,7 @@ entry:
define i32 @blsi32(i32 %x) {
; CHECK-LABEL: blsi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blsil %edi, %eax
; CHECK-NEXT: retq
%tmp = sub i32 0, %x
@@ -727,7 +727,7 @@ define i32 @blsi32(i32 %x) {
define i32 @blsi32_load(i32* %x) {
; CHECK-LABEL: blsi32_load:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blsil (%rdi), %eax
; CHECK-NEXT: retq
%x1 = load i32, i32* %x
@@ -738,7 +738,7 @@ define i32 @blsi32_load(i32* %x) {
define i64 @blsi64(i64 %x) {
; CHECK-LABEL: blsi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blsiq %rdi, %rax
; CHECK-NEXT: retq
%tmp = sub i64 0, %x
@@ -748,7 +748,7 @@ define i64 @blsi64(i64 %x) {
define i32 @blsmsk32(i32 %x) {
; CHECK-LABEL: blsmsk32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blsmskl %edi, %eax
; CHECK-NEXT: retq
%tmp = sub i32 %x, 1
@@ -758,7 +758,7 @@ define i32 @blsmsk32(i32 %x) {
define i32 @blsmsk32_load(i32* %x) {
; CHECK-LABEL: blsmsk32_load:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blsmskl (%rdi), %eax
; CHECK-NEXT: retq
%x1 = load i32, i32* %x
@@ -769,7 +769,7 @@ define i32 @blsmsk32_load(i32* %x) {
define i64 @blsmsk64(i64 %x) {
; CHECK-LABEL: blsmsk64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blsmskq %rdi, %rax
; CHECK-NEXT: retq
%tmp = sub i64 %x, 1
@@ -779,7 +779,7 @@ define i64 @blsmsk64(i64 %x) {
define i32 @blsr32(i32 %x) {
; CHECK-LABEL: blsr32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blsrl %edi, %eax
; CHECK-NEXT: retq
%tmp = sub i32 %x, 1
@@ -789,7 +789,7 @@ define i32 @blsr32(i32 %x) {
define i32 @blsr32_load(i32* %x) {
; CHECK-LABEL: blsr32_load:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blsrl (%rdi), %eax
; CHECK-NEXT: retq
%x1 = load i32, i32* %x
@@ -800,7 +800,7 @@ define i32 @blsr32_load(i32* %x) {
define i64 @blsr64(i64 %x) {
; CHECK-LABEL: blsr64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blsrq %rdi, %rax
; CHECK-NEXT: retq
%tmp = sub i64 %x, 1
diff --git a/test/CodeGen/X86/bmi2-schedule.ll b/test/CodeGen/X86/bmi2-schedule.ll
index f645ddfefdb..1c36262cff5 100644
--- a/test/CodeGen/X86/bmi2-schedule.ll
+++ b/test/CodeGen/X86/bmi2-schedule.ll
@@ -8,42 +8,42 @@
define i32 @test_bzhi_i32(i32 %a0, i32 %a1, i32 *%a2) {
; GENERIC-LABEL: test_bzhi_i32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: bzhil %edi, (%rdx), %ecx
; GENERIC-NEXT: bzhil %edi, %esi, %eax
; GENERIC-NEXT: addl %ecx, %eax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_bzhi_i32:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: bzhil %edi, (%rdx), %ecx # sched: [1:0.50]
; HASWELL-NEXT: bzhil %edi, %esi, %eax # sched: [1:0.50]
; HASWELL-NEXT: addl %ecx, %eax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_bzhi_i32:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: bzhil %edi, (%rdx), %ecx # sched: [6:0.50]
; BROADWELL-NEXT: bzhil %edi, %esi, %eax # sched: [1:0.50]
; BROADWELL-NEXT: addl %ecx, %eax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_bzhi_i32:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: bzhil %edi, (%rdx), %ecx # sched: [6:0.50]
; SKYLAKE-NEXT: bzhil %edi, %esi, %eax # sched: [1:0.50]
; SKYLAKE-NEXT: addl %ecx, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_bzhi_i32:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: bzhil %edi, (%rdx), %ecx # sched: [1:0.50]
; KNL-NEXT: bzhil %edi, %esi, %eax # sched: [1:0.50]
; KNL-NEXT: addl %ecx, %eax # sched: [1:0.25]
; KNL-NEXT: retq # sched: [2:1.00]
;
; ZNVER1-LABEL: test_bzhi_i32:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: bzhil %edi, (%rdx), %ecx # sched: [5:0.50]
; ZNVER1-NEXT: bzhil %edi, %esi, %eax # sched: [1:0.25]
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
@@ -58,42 +58,42 @@ declare i32 @llvm.x86.bmi.bzhi.32(i32, i32)
define i64 @test_bzhi_i64(i64 %a0, i64 %a1, i64 *%a2) {
; GENERIC-LABEL: test_bzhi_i64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: bzhiq %rdi, (%rdx), %rcx
; GENERIC-NEXT: bzhiq %rdi, %rsi, %rax
; GENERIC-NEXT: addq %rcx, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_bzhi_i64:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: bzhiq %rdi, (%rdx), %rcx # sched: [1:0.50]
; HASWELL-NEXT: bzhiq %rdi, %rsi, %rax # sched: [1:0.50]
; HASWELL-NEXT: addq %rcx, %rax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_bzhi_i64:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: bzhiq %rdi, (%rdx), %rcx # sched: [6:0.50]
; BROADWELL-NEXT: bzhiq %rdi, %rsi, %rax # sched: [1:0.50]
; BROADWELL-NEXT: addq %rcx, %rax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_bzhi_i64:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: bzhiq %rdi, (%rdx), %rcx # sched: [6:0.50]
; SKYLAKE-NEXT: bzhiq %rdi, %rsi, %rax # sched: [1:0.50]
; SKYLAKE-NEXT: addq %rcx, %rax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_bzhi_i64:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: bzhiq %rdi, (%rdx), %rcx # sched: [1:0.50]
; KNL-NEXT: bzhiq %rdi, %rsi, %rax # sched: [1:0.50]
; KNL-NEXT: addq %rcx, %rax # sched: [1:0.25]
; KNL-NEXT: retq # sched: [2:1.00]
;
; ZNVER1-LABEL: test_bzhi_i64:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: bzhiq %rdi, (%rdx), %rcx # sched: [5:0.50]
; ZNVER1-NEXT: bzhiq %rdi, %rsi, %rax # sched: [1:0.25]
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
@@ -110,7 +110,7 @@ declare i64 @llvm.x86.bmi.bzhi.64(i64, i64)
define i64 @test_mulx_i64(i64 %a0, i64 %a1, i64 *%a2) {
; GENERIC-LABEL: test_mulx_i64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movq %rdx, %rax # sched: [1:0.33]
; GENERIC-NEXT: movq %rdi, %rdx # sched: [1:0.33]
; GENERIC-NEXT: mulxq %rsi, %rsi, %rcx # sched: [3:1.00]
@@ -119,7 +119,7 @@ define i64 @test_mulx_i64(i64 %a0, i64 %a1, i64 *%a2) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_mulx_i64:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: movq %rdx, %rax # sched: [1:0.25]
; HASWELL-NEXT: movq %rdi, %rdx # sched: [1:0.25]
; HASWELL-NEXT: mulxq %rsi, %rsi, %rcx # sched: [4:1.00]
@@ -128,7 +128,7 @@ define i64 @test_mulx_i64(i64 %a0, i64 %a1, i64 *%a2) {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_mulx_i64:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movq %rdx, %rax # sched: [1:0.25]
; BROADWELL-NEXT: movq %rdi, %rdx # sched: [1:0.25]
; BROADWELL-NEXT: mulxq %rsi, %rsi, %rcx # sched: [4:1.00]
@@ -137,7 +137,7 @@ define i64 @test_mulx_i64(i64 %a0, i64 %a1, i64 *%a2) {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_mulx_i64:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movq %rdx, %rax # sched: [1:0.25]
; SKYLAKE-NEXT: movq %rdi, %rdx # sched: [1:0.25]
; SKYLAKE-NEXT: mulxq %rsi, %rsi, %rcx # sched: [4:1.00]
@@ -146,7 +146,7 @@ define i64 @test_mulx_i64(i64 %a0, i64 %a1, i64 *%a2) {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_mulx_i64:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: movq %rdx, %rax # sched: [1:0.25]
; KNL-NEXT: movq %rdi, %rdx # sched: [1:0.25]
; KNL-NEXT: mulxq %rsi, %rsi, %rcx # sched: [4:1.00]
@@ -155,7 +155,7 @@ define i64 @test_mulx_i64(i64 %a0, i64 %a1, i64 *%a2) {
; KNL-NEXT: retq # sched: [2:1.00]
;
; ZNVER1-LABEL: test_mulx_i64:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movq %rdx, %rax # sched: [1:0.25]
; ZNVER1-NEXT: movq %rdi, %rdx # sched: [1:0.25]
; ZNVER1-NEXT: mulxq %rsi, %rsi, %rcx # sched: [3:1.00]
@@ -178,42 +178,42 @@ define i64 @test_mulx_i64(i64 %a0, i64 %a1, i64 *%a2) {
define i32 @test_pdep_i32(i32 %a0, i32 %a1, i32 *%a2) {
; GENERIC-LABEL: test_pdep_i32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pdepl (%rdx), %edi, %ecx
; GENERIC-NEXT: pdepl %esi, %edi, %eax
; GENERIC-NEXT: addl %ecx, %eax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pdep_i32:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: pdepl (%rdx), %edi, %ecx # sched: [3:1.00]
; HASWELL-NEXT: pdepl %esi, %edi, %eax # sched: [3:1.00]
; HASWELL-NEXT: addl %ecx, %eax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pdep_i32:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: pdepl (%rdx), %edi, %ecx # sched: [8:1.00]
; BROADWELL-NEXT: pdepl %esi, %edi, %eax # sched: [3:1.00]
; BROADWELL-NEXT: addl %ecx, %eax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pdep_i32:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: pdepl (%rdx), %edi, %ecx # sched: [8:1.00]
; SKYLAKE-NEXT: pdepl %esi, %edi, %eax # sched: [3:1.00]
; SKYLAKE-NEXT: addl %ecx, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_pdep_i32:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: pdepl (%rdx), %edi, %ecx # sched: [3:1.00]
; KNL-NEXT: pdepl %esi, %edi, %eax # sched: [3:1.00]
; KNL-NEXT: addl %ecx, %eax # sched: [1:0.25]
; KNL-NEXT: retq # sched: [2:1.00]
;
; ZNVER1-LABEL: test_pdep_i32:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: pdepl (%rdx), %edi, %ecx # sched: [100:?]
; ZNVER1-NEXT: pdepl %esi, %edi, %eax # sched: [100:?]
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
@@ -228,42 +228,42 @@ declare i32 @llvm.x86.bmi.pdep.32(i32, i32)
define i64 @test_pdep_i64(i64 %a0, i64 %a1, i64 *%a2) {
; GENERIC-LABEL: test_pdep_i64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pdepq (%rdx), %rdi, %rcx
; GENERIC-NEXT: pdepq %rsi, %rdi, %rax
; GENERIC-NEXT: addq %rcx, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pdep_i64:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: pdepq (%rdx), %rdi, %rcx # sched: [3:1.00]
; HASWELL-NEXT: pdepq %rsi, %rdi, %rax # sched: [3:1.00]
; HASWELL-NEXT: addq %rcx, %rax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pdep_i64:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: pdepq (%rdx), %rdi, %rcx # sched: [8:1.00]
; BROADWELL-NEXT: pdepq %rsi, %rdi, %rax # sched: [3:1.00]
; BROADWELL-NEXT: addq %rcx, %rax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pdep_i64:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: pdepq (%rdx), %rdi, %rcx # sched: [8:1.00]
; SKYLAKE-NEXT: pdepq %rsi, %rdi, %rax # sched: [3:1.00]
; SKYLAKE-NEXT: addq %rcx, %rax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_pdep_i64:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: pdepq (%rdx), %rdi, %rcx # sched: [3:1.00]
; KNL-NEXT: pdepq %rsi, %rdi, %rax # sched: [3:1.00]
; KNL-NEXT: addq %rcx, %rax # sched: [1:0.25]
; KNL-NEXT: retq # sched: [2:1.00]
;
; ZNVER1-LABEL: test_pdep_i64:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: pdepq (%rdx), %rdi, %rcx # sched: [100:?]
; ZNVER1-NEXT: pdepq %rsi, %rdi, %rax # sched: [100:?]
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
@@ -278,42 +278,42 @@ declare i64 @llvm.x86.bmi.pdep.64(i64, i64)
define i32 @test_pext_i32(i32 %a0, i32 %a1, i32 *%a2) {
; GENERIC-LABEL: test_pext_i32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pextl (%rdx), %edi, %ecx
; GENERIC-NEXT: pextl %esi, %edi, %eax
; GENERIC-NEXT: addl %ecx, %eax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pext_i32:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: pextl (%rdx), %edi, %ecx # sched: [3:1.00]
; HASWELL-NEXT: pextl %esi, %edi, %eax # sched: [3:1.00]
; HASWELL-NEXT: addl %ecx, %eax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pext_i32:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: pextl (%rdx), %edi, %ecx # sched: [8:1.00]
; BROADWELL-NEXT: pextl %esi, %edi, %eax # sched: [3:1.00]
; BROADWELL-NEXT: addl %ecx, %eax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pext_i32:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: pextl (%rdx), %edi, %ecx # sched: [8:1.00]
; SKYLAKE-NEXT: pextl %esi, %edi, %eax # sched: [3:1.00]
; SKYLAKE-NEXT: addl %ecx, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_pext_i32:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: pextl (%rdx), %edi, %ecx # sched: [3:1.00]
; KNL-NEXT: pextl %esi, %edi, %eax # sched: [3:1.00]
; KNL-NEXT: addl %ecx, %eax # sched: [1:0.25]
; KNL-NEXT: retq # sched: [2:1.00]
;
; ZNVER1-LABEL: test_pext_i32:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: pextl (%rdx), %edi, %ecx # sched: [100:?]
; ZNVER1-NEXT: pextl %esi, %edi, %eax # sched: [100:?]
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
@@ -328,42 +328,42 @@ declare i32 @llvm.x86.bmi.pext.32(i32, i32)
define i64 @test_pext_i64(i64 %a0, i64 %a1, i64 *%a2) {
; GENERIC-LABEL: test_pext_i64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pextq (%rdx), %rdi, %rcx
; GENERIC-NEXT: pextq %rsi, %rdi, %rax
; GENERIC-NEXT: addq %rcx, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pext_i64:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: pextq (%rdx), %rdi, %rcx # sched: [3:1.00]
; HASWELL-NEXT: pextq %rsi, %rdi, %rax # sched: [3:1.00]
; HASWELL-NEXT: addq %rcx, %rax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pext_i64:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: pextq (%rdx), %rdi, %rcx # sched: [8:1.00]
; BROADWELL-NEXT: pextq %rsi, %rdi, %rax # sched: [3:1.00]
; BROADWELL-NEXT: addq %rcx, %rax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pext_i64:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: pextq (%rdx), %rdi, %rcx # sched: [8:1.00]
; SKYLAKE-NEXT: pextq %rsi, %rdi, %rax # sched: [3:1.00]
; SKYLAKE-NEXT: addq %rcx, %rax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_pext_i64:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: pextq (%rdx), %rdi, %rcx # sched: [3:1.00]
; KNL-NEXT: pextq %rsi, %rdi, %rax # sched: [3:1.00]
; KNL-NEXT: addq %rcx, %rax # sched: [1:0.25]
; KNL-NEXT: retq # sched: [2:1.00]
;
; ZNVER1-LABEL: test_pext_i64:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: pextq (%rdx), %rdi, %rcx # sched: [100:?]
; ZNVER1-NEXT: pextq %rsi, %rdi, %rax # sched: [100:?]
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
@@ -378,42 +378,42 @@ declare i64 @llvm.x86.bmi.pext.64(i64, i64)
define i32 @test_rorx_i32(i32 %a0, i32 %a1, i32 *%a2) {
; GENERIC-LABEL: test_rorx_i32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: rorxl $5, %edi, %ecx # sched: [1:0.50]
; GENERIC-NEXT: rorxl $5, (%rdx), %eax # sched: [5:0.50]
; GENERIC-NEXT: addl %ecx, %eax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_rorx_i32:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: rorxl $5, %edi, %ecx # sched: [1:0.50]
; HASWELL-NEXT: rorxl $5, (%rdx), %eax # sched: [1:0.50]
; HASWELL-NEXT: addl %ecx, %eax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_rorx_i32:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: rorxl $5, %edi, %ecx # sched: [1:0.50]
; BROADWELL-NEXT: rorxl $5, (%rdx), %eax # sched: [6:0.50]
; BROADWELL-NEXT: addl %ecx, %eax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_rorx_i32:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: rorxl $5, %edi, %ecx # sched: [1:0.50]
; SKYLAKE-NEXT: rorxl $5, (%rdx), %eax # sched: [6:0.50]
; SKYLAKE-NEXT: addl %ecx, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_rorx_i32:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: rorxl $5, %edi, %ecx # sched: [1:0.50]
; KNL-NEXT: rorxl $5, (%rdx), %eax # sched: [1:0.50]
; KNL-NEXT: addl %ecx, %eax # sched: [1:0.25]
; KNL-NEXT: retq # sched: [2:1.00]
;
; ZNVER1-LABEL: test_rorx_i32:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: rorxl $5, (%rdx), %eax # sched: [5:0.50]
; ZNVER1-NEXT: rorxl $5, %edi, %ecx # sched: [1:0.25]
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
@@ -431,42 +431,42 @@ define i32 @test_rorx_i32(i32 %a0, i32 %a1, i32 *%a2) {
define i64 @test_rorx_i64(i64 %a0, i64 %a1, i64 *%a2) {
; GENERIC-LABEL: test_rorx_i64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: rorxq $5, %rdi, %rcx # sched: [1:0.50]
; GENERIC-NEXT: rorxq $5, (%rdx), %rax # sched: [5:0.50]
; GENERIC-NEXT: addq %rcx, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_rorx_i64:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: rorxq $5, %rdi, %rcx # sched: [1:0.50]
; HASWELL-NEXT: rorxq $5, (%rdx), %rax # sched: [1:0.50]
; HASWELL-NEXT: addq %rcx, %rax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_rorx_i64:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: rorxq $5, %rdi, %rcx # sched: [1:0.50]
; BROADWELL-NEXT: rorxq $5, (%rdx), %rax # sched: [6:0.50]
; BROADWELL-NEXT: addq %rcx, %rax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_rorx_i64:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: rorxq $5, %rdi, %rcx # sched: [1:0.50]
; SKYLAKE-NEXT: rorxq $5, (%rdx), %rax # sched: [6:0.50]
; SKYLAKE-NEXT: addq %rcx, %rax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_rorx_i64:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: rorxq $5, %rdi, %rcx # sched: [1:0.50]
; KNL-NEXT: rorxq $5, (%rdx), %rax # sched: [1:0.50]
; KNL-NEXT: addq %rcx, %rax # sched: [1:0.25]
; KNL-NEXT: retq # sched: [2:1.00]
;
; ZNVER1-LABEL: test_rorx_i64:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: rorxq $5, (%rdx), %rax # sched: [5:0.50]
; ZNVER1-NEXT: rorxq $5, %rdi, %rcx # sched: [1:0.25]
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
@@ -484,42 +484,42 @@ define i64 @test_rorx_i64(i64 %a0, i64 %a1, i64 *%a2) {
define i32 @test_sarx_i32(i32 %a0, i32 %a1, i32 *%a2) {
; GENERIC-LABEL: test_sarx_i32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: sarxl %esi, %edi, %ecx # sched: [1:0.50]
; GENERIC-NEXT: sarxl %esi, (%rdx), %eax # sched: [5:0.50]
; GENERIC-NEXT: addl %ecx, %eax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_sarx_i32:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: sarxl %esi, %edi, %ecx # sched: [1:0.50]
; HASWELL-NEXT: sarxl %esi, (%rdx), %eax # sched: [1:0.50]
; HASWELL-NEXT: addl %ecx, %eax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_sarx_i32:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: sarxl %esi, %edi, %ecx # sched: [1:0.50]
; BROADWELL-NEXT: sarxl %esi, (%rdx), %eax # sched: [6:0.50]
; BROADWELL-NEXT: addl %ecx, %eax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_sarx_i32:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: sarxl %esi, %edi, %ecx # sched: [1:0.50]
; SKYLAKE-NEXT: sarxl %esi, (%rdx), %eax # sched: [6:0.50]
; SKYLAKE-NEXT: addl %ecx, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_sarx_i32:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: sarxl %esi, %edi, %ecx # sched: [1:0.50]
; KNL-NEXT: sarxl %esi, (%rdx), %eax # sched: [1:0.50]
; KNL-NEXT: addl %ecx, %eax # sched: [1:0.25]
; KNL-NEXT: retq # sched: [2:1.00]
;
; ZNVER1-LABEL: test_sarx_i32:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: sarxl %esi, (%rdx), %eax # sched: [5:0.50]
; ZNVER1-NEXT: sarxl %esi, %edi, %ecx # sched: [1:0.25]
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
@@ -533,42 +533,42 @@ define i32 @test_sarx_i32(i32 %a0, i32 %a1, i32 *%a2) {
define i64 @test_sarx_i64(i64 %a0, i64 %a1, i64 *%a2) {
; GENERIC-LABEL: test_sarx_i64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: sarxq %rsi, %rdi, %rcx # sched: [1:0.50]
; GENERIC-NEXT: sarxq %rsi, (%rdx), %rax # sched: [5:0.50]
; GENERIC-NEXT: addq %rcx, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_sarx_i64:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: sarxq %rsi, %rdi, %rcx # sched: [1:0.50]
; HASWELL-NEXT: sarxq %rsi, (%rdx), %rax # sched: [1:0.50]
; HASWELL-NEXT: addq %rcx, %rax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_sarx_i64:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: sarxq %rsi, %rdi, %rcx # sched: [1:0.50]
; BROADWELL-NEXT: sarxq %rsi, (%rdx), %rax # sched: [6:0.50]
; BROADWELL-NEXT: addq %rcx, %rax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_sarx_i64:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: sarxq %rsi, %rdi, %rcx # sched: [1:0.50]
; SKYLAKE-NEXT: sarxq %rsi, (%rdx), %rax # sched: [6:0.50]
; SKYLAKE-NEXT: addq %rcx, %rax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_sarx_i64:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: sarxq %rsi, %rdi, %rcx # sched: [1:0.50]
; KNL-NEXT: sarxq %rsi, (%rdx), %rax # sched: [1:0.50]
; KNL-NEXT: addq %rcx, %rax # sched: [1:0.25]
; KNL-NEXT: retq # sched: [2:1.00]
;
; ZNVER1-LABEL: test_sarx_i64:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: sarxq %rsi, (%rdx), %rax # sched: [5:0.50]
; ZNVER1-NEXT: sarxq %rsi, %rdi, %rcx # sched: [1:0.25]
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
@@ -582,42 +582,42 @@ define i64 @test_sarx_i64(i64 %a0, i64 %a1, i64 *%a2) {
define i32 @test_shlx_i32(i32 %a0, i32 %a1, i32 *%a2) {
; GENERIC-LABEL: test_shlx_i32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: shlxl %esi, %edi, %ecx # sched: [1:0.50]
; GENERIC-NEXT: shlxl %esi, (%rdx), %eax # sched: [5:0.50]
; GENERIC-NEXT: addl %ecx, %eax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_shlx_i32:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: shlxl %esi, %edi, %ecx # sched: [1:0.50]
; HASWELL-NEXT: shlxl %esi, (%rdx), %eax # sched: [1:0.50]
; HASWELL-NEXT: addl %ecx, %eax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_shlx_i32:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: shlxl %esi, %edi, %ecx # sched: [1:0.50]
; BROADWELL-NEXT: shlxl %esi, (%rdx), %eax # sched: [6:0.50]
; BROADWELL-NEXT: addl %ecx, %eax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_shlx_i32:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: shlxl %esi, %edi, %ecx # sched: [1:0.50]
; SKYLAKE-NEXT: shlxl %esi, (%rdx), %eax # sched: [6:0.50]
; SKYLAKE-NEXT: addl %ecx, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_shlx_i32:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: shlxl %esi, %edi, %ecx # sched: [1:0.50]
; KNL-NEXT: shlxl %esi, (%rdx), %eax # sched: [1:0.50]
; KNL-NEXT: addl %ecx, %eax # sched: [1:0.25]
; KNL-NEXT: retq # sched: [2:1.00]
;
; ZNVER1-LABEL: test_shlx_i32:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: shlxl %esi, (%rdx), %eax # sched: [5:0.50]
; ZNVER1-NEXT: shlxl %esi, %edi, %ecx # sched: [1:0.25]
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
@@ -631,42 +631,42 @@ define i32 @test_shlx_i32(i32 %a0, i32 %a1, i32 *%a2) {
define i64 @test_shlx_i64(i64 %a0, i64 %a1, i64 *%a2) {
; GENERIC-LABEL: test_shlx_i64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: shlxq %rsi, %rdi, %rcx # sched: [1:0.50]
; GENERIC-NEXT: shlxq %rsi, (%rdx), %rax # sched: [5:0.50]
; GENERIC-NEXT: addq %rcx, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_shlx_i64:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: shlxq %rsi, %rdi, %rcx # sched: [1:0.50]
; HASWELL-NEXT: shlxq %rsi, (%rdx), %rax # sched: [1:0.50]
; HASWELL-NEXT: addq %rcx, %rax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_shlx_i64:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: shlxq %rsi, %rdi, %rcx # sched: [1:0.50]
; BROADWELL-NEXT: shlxq %rsi, (%rdx), %rax # sched: [6:0.50]
; BROADWELL-NEXT: addq %rcx, %rax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_shlx_i64:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: shlxq %rsi, %rdi, %rcx # sched: [1:0.50]
; SKYLAKE-NEXT: shlxq %rsi, (%rdx), %rax # sched: [6:0.50]
; SKYLAKE-NEXT: addq %rcx, %rax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_shlx_i64:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: shlxq %rsi, %rdi, %rcx # sched: [1:0.50]
; KNL-NEXT: shlxq %rsi, (%rdx), %rax # sched: [1:0.50]
; KNL-NEXT: addq %rcx, %rax # sched: [1:0.25]
; KNL-NEXT: retq # sched: [2:1.00]
;
; ZNVER1-LABEL: test_shlx_i64:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: shlxq %rsi, (%rdx), %rax # sched: [5:0.50]
; ZNVER1-NEXT: shlxq %rsi, %rdi, %rcx # sched: [1:0.25]
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
@@ -680,42 +680,42 @@ define i64 @test_shlx_i64(i64 %a0, i64 %a1, i64 *%a2) {
define i32 @test_shrx_i32(i32 %a0, i32 %a1, i32 *%a2) {
; GENERIC-LABEL: test_shrx_i32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: shrxl %esi, %edi, %ecx # sched: [1:0.50]
; GENERIC-NEXT: shrxl %esi, (%rdx), %eax # sched: [5:0.50]
; GENERIC-NEXT: addl %ecx, %eax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_shrx_i32:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: shrxl %esi, %edi, %ecx # sched: [1:0.50]
; HASWELL-NEXT: shrxl %esi, (%rdx), %eax # sched: [1:0.50]
; HASWELL-NEXT: addl %ecx, %eax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_shrx_i32:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: shrxl %esi, %edi, %ecx # sched: [1:0.50]
; BROADWELL-NEXT: shrxl %esi, (%rdx), %eax # sched: [6:0.50]
; BROADWELL-NEXT: addl %ecx, %eax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_shrx_i32:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: shrxl %esi, %edi, %ecx # sched: [1:0.50]
; SKYLAKE-NEXT: shrxl %esi, (%rdx), %eax # sched: [6:0.50]
; SKYLAKE-NEXT: addl %ecx, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_shrx_i32:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: shrxl %esi, %edi, %ecx # sched: [1:0.50]
; KNL-NEXT: shrxl %esi, (%rdx), %eax # sched: [1:0.50]
; KNL-NEXT: addl %ecx, %eax # sched: [1:0.25]
; KNL-NEXT: retq # sched: [2:1.00]
;
; ZNVER1-LABEL: test_shrx_i32:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: shrxl %esi, (%rdx), %eax # sched: [5:0.50]
; ZNVER1-NEXT: shrxl %esi, %edi, %ecx # sched: [1:0.25]
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
@@ -729,42 +729,42 @@ define i32 @test_shrx_i32(i32 %a0, i32 %a1, i32 *%a2) {
define i64 @test_shrx_i64(i64 %a0, i64 %a1, i64 *%a2) {
; GENERIC-LABEL: test_shrx_i64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: shrxq %rsi, %rdi, %rcx # sched: [1:0.50]
; GENERIC-NEXT: shrxq %rsi, (%rdx), %rax # sched: [5:0.50]
; GENERIC-NEXT: addq %rcx, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_shrx_i64:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: shrxq %rsi, %rdi, %rcx # sched: [1:0.50]
; HASWELL-NEXT: shrxq %rsi, (%rdx), %rax # sched: [1:0.50]
; HASWELL-NEXT: addq %rcx, %rax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_shrx_i64:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: shrxq %rsi, %rdi, %rcx # sched: [1:0.50]
; BROADWELL-NEXT: shrxq %rsi, (%rdx), %rax # sched: [6:0.50]
; BROADWELL-NEXT: addq %rcx, %rax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_shrx_i64:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: shrxq %rsi, %rdi, %rcx # sched: [1:0.50]
; SKYLAKE-NEXT: shrxq %rsi, (%rdx), %rax # sched: [6:0.50]
; SKYLAKE-NEXT: addq %rcx, %rax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_shrx_i64:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: shrxq %rsi, %rdi, %rcx # sched: [1:0.50]
; KNL-NEXT: shrxq %rsi, (%rdx), %rax # sched: [1:0.50]
; KNL-NEXT: addq %rcx, %rax # sched: [1:0.25]
; KNL-NEXT: retq # sched: [2:1.00]
;
; ZNVER1-LABEL: test_shrx_i64:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: shrxq %rsi, (%rdx), %rax # sched: [5:0.50]
; ZNVER1-NEXT: shrxq %rsi, %rdi, %rcx # sched: [1:0.25]
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
diff --git a/test/CodeGen/X86/bmi2.ll b/test/CodeGen/X86/bmi2.ll
index 99a51a8183c..226bf6531fd 100644
--- a/test/CodeGen/X86/bmi2.ll
+++ b/test/CodeGen/X86/bmi2.ll
@@ -3,7 +3,7 @@
define i32 @bzhi32(i32 %x, i32 %y) {
; CHECK-LABEL: bzhi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: bzhil %esi, %edi, %eax
; CHECK-NEXT: retq
%tmp = tail call i32 @llvm.x86.bmi.bzhi.32(i32 %x, i32 %y)
@@ -12,7 +12,7 @@ define i32 @bzhi32(i32 %x, i32 %y) {
define i32 @bzhi32_load(i32* %x, i32 %y) {
; CHECK-LABEL: bzhi32_load:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: bzhil %esi, (%rdi), %eax
; CHECK-NEXT: retq
%x1 = load i32, i32* %x
@@ -24,7 +24,7 @@ declare i32 @llvm.x86.bmi.bzhi.32(i32, i32)
define i64 @bzhi64(i64 %x, i64 %y) {
; CHECK-LABEL: bzhi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: bzhiq %rsi, %rdi, %rax
; CHECK-NEXT: retq
%tmp = tail call i64 @llvm.x86.bmi.bzhi.64(i64 %x, i64 %y)
@@ -35,7 +35,7 @@ declare i64 @llvm.x86.bmi.bzhi.64(i64, i64)
define i32 @pdep32(i32 %x, i32 %y) {
; CHECK-LABEL: pdep32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pdepl %esi, %edi, %eax
; CHECK-NEXT: retq
%tmp = tail call i32 @llvm.x86.bmi.pdep.32(i32 %x, i32 %y)
@@ -44,7 +44,7 @@ define i32 @pdep32(i32 %x, i32 %y) {
define i32 @pdep32_load(i32 %x, i32* %y) {
; CHECK-LABEL: pdep32_load:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pdepl (%rsi), %edi, %eax
; CHECK-NEXT: retq
%y1 = load i32, i32* %y
@@ -56,7 +56,7 @@ declare i32 @llvm.x86.bmi.pdep.32(i32, i32)
define i64 @pdep64(i64 %x, i64 %y) {
; CHECK-LABEL: pdep64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pdepq %rsi, %rdi, %rax
; CHECK-NEXT: retq
%tmp = tail call i64 @llvm.x86.bmi.pdep.64(i64 %x, i64 %y)
@@ -67,7 +67,7 @@ declare i64 @llvm.x86.bmi.pdep.64(i64, i64)
define i32 @pext32(i32 %x, i32 %y) {
; CHECK-LABEL: pext32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pextl %esi, %edi, %eax
; CHECK-NEXT: retq
%tmp = tail call i32 @llvm.x86.bmi.pext.32(i32 %x, i32 %y)
@@ -76,7 +76,7 @@ define i32 @pext32(i32 %x, i32 %y) {
define i32 @pext32_load(i32 %x, i32* %y) {
; CHECK-LABEL: pext32_load:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pextl (%rsi), %edi, %eax
; CHECK-NEXT: retq
%y1 = load i32, i32* %y
@@ -88,7 +88,7 @@ declare i32 @llvm.x86.bmi.pext.32(i32, i32)
define i64 @pext64(i64 %x, i64 %y) {
; CHECK-LABEL: pext64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pextq %rsi, %rdi, %rax
; CHECK-NEXT: retq
%tmp = tail call i64 @llvm.x86.bmi.pext.64(i64 %x, i64 %y)
diff --git a/test/CodeGen/X86/bool-ext-inc.ll b/test/CodeGen/X86/bool-ext-inc.ll
index 7c1042878d5..d5711fdb3ca 100644
--- a/test/CodeGen/X86/bool-ext-inc.ll
+++ b/test/CodeGen/X86/bool-ext-inc.ll
@@ -5,7 +5,7 @@
define i32 @sext_inc(i1 zeroext %x) nounwind {
; CHECK-LABEL: sext_inc:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorb $1, %dil
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: retq
@@ -18,7 +18,7 @@ define i32 @sext_inc(i1 zeroext %x) nounwind {
define <4 x i32> @sext_inc_vec(<4 x i1> %x) nounwind {
; CHECK-LABEL: sext_inc_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastss {{.*#+}} xmm1 = [1,1,1,1]
; CHECK-NEXT: vandnps %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
@@ -29,7 +29,7 @@ define <4 x i32> @sext_inc_vec(<4 x i1> %x) nounwind {
define <4 x i32> @cmpgt_sext_inc_vec(<4 x i32> %x, <4 x i32> %y) nounwind {
; CHECK-LABEL: cmpgt_sext_inc_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1]
; CHECK-NEXT: vpandn %xmm1, %xmm0, %xmm0
@@ -42,7 +42,7 @@ define <4 x i32> @cmpgt_sext_inc_vec(<4 x i32> %x, <4 x i32> %y) nounwind {
define <4 x i32> @cmpne_sext_inc_vec(<4 x i32> %x, <4 x i32> %y) nounwind {
; CHECK-LABEL: cmpne_sext_inc_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpsrld $31, %xmm0, %xmm0
; CHECK-NEXT: retq
@@ -54,7 +54,7 @@ define <4 x i32> @cmpne_sext_inc_vec(<4 x i32> %x, <4 x i32> %y) nounwind {
define <4 x i64> @cmpgt_sext_inc_vec256(<4 x i64> %x, <4 x i64> %y) nounwind {
; CHECK-LABEL: cmpgt_sext_inc_vec256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
; CHECK-NEXT: vpbroadcastq {{.*#+}} ymm1 = [1,1,1,1]
; CHECK-NEXT: vpandn %ymm1, %ymm0, %ymm0
@@ -67,7 +67,7 @@ define <4 x i64> @cmpgt_sext_inc_vec256(<4 x i64> %x, <4 x i64> %y) nounwind {
define i32 @bool_logic_and_math(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
; CHECK-LABEL: bool_logic_and_math:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpl %esi, %edi
; CHECK-NEXT: sete %al
; CHECK-NEXT: cmpl %ecx, %edx
@@ -85,7 +85,7 @@ define i32 @bool_logic_and_math(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
define <4 x i32> @bool_logic_and_math_vec(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) nounwind {
; CHECK-LABEL: bool_logic_and_math_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm1
; CHECK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
diff --git a/test/CodeGen/X86/bool-simplify.ll b/test/CodeGen/X86/bool-simplify.ll
index 951a83c5c54..fbd39d27d89 100644
--- a/test/CodeGen/X86/bool-simplify.ll
+++ b/test/CodeGen/X86/bool-simplify.ll
@@ -3,7 +3,7 @@
define i32 @foo(<2 x i64> %c, i32 %a, i32 %b) {
; CHECK-LABEL: foo:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: ptest %xmm0, %xmm0
; CHECK-NEXT: cmovnel %esi, %edi
; CHECK-NEXT: movl %edi, %eax
@@ -16,10 +16,10 @@ define i32 @foo(<2 x i64> %c, i32 %a, i32 %b) {
define i32 @bar(<2 x i64> %c) {
; CHECK-LABEL: bar:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: ptest %xmm0, %xmm0
; CHECK-NEXT: jne .LBB1_2
-; CHECK-NEXT: # BB#1: # %if-true-block
+; CHECK-NEXT: # %bb.1: # %if-true-block
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: retq
; CHECK-NEXT: .LBB1_2: # %endif-block
@@ -37,7 +37,7 @@ endif-block:
define i32 @bax(<2 x i64> %c) {
; CHECK-LABEL: bax:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: ptest %xmm0, %xmm0
; CHECK-NEXT: sete %al
@@ -50,7 +50,7 @@ define i32 @bax(<2 x i64> %c) {
define i16 @rnd16(i16 %arg) nounwind {
; CHECK-LABEL: rnd16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: rdrandw %cx
; CHECK-NEXT: cmovbw %di, %ax
@@ -68,7 +68,7 @@ define i16 @rnd16(i16 %arg) nounwind {
define i32 @rnd32(i32 %arg) nounwind {
; CHECK-LABEL: rnd32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: rdrandl %ecx
; CHECK-NEXT: cmovbl %edi, %eax
@@ -85,7 +85,7 @@ define i32 @rnd32(i32 %arg) nounwind {
define i64 @rnd64(i64 %arg) nounwind {
; CHECK-LABEL: rnd64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: rdrandq %rcx
; CHECK-NEXT: cmovbq %rdi, %rax
@@ -102,7 +102,7 @@ define i64 @rnd64(i64 %arg) nounwind {
define i16 @seed16(i16 %arg) nounwind {
; CHECK-LABEL: seed16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: rdseedw %cx
; CHECK-NEXT: cmovbw %di, %ax
@@ -120,7 +120,7 @@ define i16 @seed16(i16 %arg) nounwind {
define i32 @seed32(i32 %arg) nounwind {
; CHECK-LABEL: seed32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: rdseedl %ecx
; CHECK-NEXT: cmovbl %edi, %eax
@@ -137,7 +137,7 @@ define i32 @seed32(i32 %arg) nounwind {
define i64 @seed64(i64 %arg) nounwind {
; CHECK-LABEL: seed64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: rdseedq %rcx
; CHECK-NEXT: cmovbq %rdi, %rax
diff --git a/test/CodeGen/X86/bool-vector.ll b/test/CodeGen/X86/bool-vector.ll
index 03f0debdf12..7a4af821de5 100644
--- a/test/CodeGen/X86/bool-vector.ll
+++ b/test/CodeGen/X86/bool-vector.ll
@@ -8,7 +8,7 @@
define i32 @PR15215_bad(<4 x i32> %input) {
; X32-LABEL: PR15215_bad:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: movb {{[0-9]+}}(%esp), %cl
; X32-NEXT: movb {{[0-9]+}}(%esp), %dl
@@ -27,21 +27,21 @@ define i32 @PR15215_bad(<4 x i32> %input) {
; X32-NEXT: retl
;
; X32-SSE2-LABEL: PR15215_bad:
-; X32-SSE2: # BB#0: # %entry
+; X32-SSE2: # %bb.0: # %entry
; X32-SSE2-NEXT: pslld $31, %xmm0
; X32-SSE2-NEXT: psrad $31, %xmm0
; X32-SSE2-NEXT: movmskps %xmm0, %eax
; X32-SSE2-NEXT: retl
;
; X32-AVX2-LABEL: PR15215_bad:
-; X32-AVX2: # BB#0: # %entry
+; X32-AVX2: # %bb.0: # %entry
; X32-AVX2-NEXT: vpslld $31, %xmm0, %xmm0
; X32-AVX2-NEXT: vpsrad $31, %xmm0, %xmm0
; X32-AVX2-NEXT: vmovmskps %xmm0, %eax
; X32-AVX2-NEXT: retl
;
; X64-LABEL: PR15215_bad:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: addb %cl, %cl
; X64-NEXT: andb $1, %dl
; X64-NEXT: orb %cl, %dl
@@ -56,14 +56,14 @@ define i32 @PR15215_bad(<4 x i32> %input) {
; X64-NEXT: retq
;
; X64-SSE2-LABEL: PR15215_bad:
-; X64-SSE2: # BB#0: # %entry
+; X64-SSE2: # %bb.0: # %entry
; X64-SSE2-NEXT: pslld $31, %xmm0
; X64-SSE2-NEXT: psrad $31, %xmm0
; X64-SSE2-NEXT: movmskps %xmm0, %eax
; X64-SSE2-NEXT: retq
;
; X64-AVX2-LABEL: PR15215_bad:
-; X64-AVX2: # BB#0: # %entry
+; X64-AVX2: # %bb.0: # %entry
; X64-AVX2-NEXT: vpslld $31, %xmm0, %xmm0
; X64-AVX2-NEXT: vpsrad $31, %xmm0, %xmm0
; X64-AVX2-NEXT: vmovmskps %xmm0, %eax
@@ -77,7 +77,7 @@ entry:
define i32 @PR15215_good(<4 x i32> %input) {
; X32-LABEL: PR15215_good:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: pushl %esi
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: .cfi_offset %esi, -8
@@ -96,7 +96,7 @@ define i32 @PR15215_good(<4 x i32> %input) {
; X32-NEXT: retl
;
; X32-SSE2-LABEL: PR15215_good:
-; X32-SSE2: # BB#0: # %entry
+; X32-SSE2: # %bb.0: # %entry
; X32-SSE2-NEXT: pushl %esi
; X32-SSE2-NEXT: .cfi_def_cfa_offset 8
; X32-SSE2-NEXT: .cfi_offset %esi, -8
@@ -118,7 +118,7 @@ define i32 @PR15215_good(<4 x i32> %input) {
; X32-SSE2-NEXT: retl
;
; X32-AVX2-LABEL: PR15215_good:
-; X32-AVX2: # BB#0: # %entry
+; X32-AVX2: # %bb.0: # %entry
; X32-AVX2-NEXT: pushl %esi
; X32-AVX2-NEXT: .cfi_def_cfa_offset 8
; X32-AVX2-NEXT: .cfi_offset %esi, -8
@@ -137,7 +137,7 @@ define i32 @PR15215_good(<4 x i32> %input) {
; X32-AVX2-NEXT: retl
;
; X64-LABEL: PR15215_good:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
; X64-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
@@ -152,7 +152,7 @@ define i32 @PR15215_good(<4 x i32> %input) {
; X64-NEXT: retq
;
; X64-SSE2-LABEL: PR15215_good:
-; X64-SSE2: # BB#0: # %entry
+; X64-SSE2: # %bb.0: # %entry
; X64-SSE2-NEXT: movd %xmm0, %eax
; X64-SSE2-NEXT: andl $1, %eax
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
@@ -170,7 +170,7 @@ define i32 @PR15215_good(<4 x i32> %input) {
; X64-SSE2-NEXT: retq
;
; X64-AVX2-LABEL: PR15215_good:
-; X64-AVX2: # BB#0: # %entry
+; X64-AVX2: # %bb.0: # %entry
; X64-AVX2-NEXT: vmovd %xmm0, %eax
; X64-AVX2-NEXT: andl $1, %eax
; X64-AVX2-NEXT: vpextrd $1, %xmm0, %ecx
diff --git a/test/CodeGen/X86/bool-zext.ll b/test/CodeGen/X86/bool-zext.ll
index e9375f73136..82b6a993ac2 100644
--- a/test/CodeGen/X86/bool-zext.ll
+++ b/test/CodeGen/X86/bool-zext.ll
@@ -5,7 +5,7 @@
; It's not necessary to zero-extend the arg because it is specified 'zeroext'.
define void @bar1(i1 zeroext %v1) nounwind ssp {
; X32-LABEL: bar1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: pushl %eax
; X32-NEXT: calll foo1
@@ -13,7 +13,7 @@ define void @bar1(i1 zeroext %v1) nounwind ssp {
; X32-NEXT: retl
;
; X64-LABEL: bar1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: jmp foo1 # TAILCALL
%conv = zext i1 %v1 to i32
@@ -24,7 +24,7 @@ define void @bar1(i1 zeroext %v1) nounwind ssp {
; Check that on x86-64 the arguments are simply forwarded.
define void @bar2(i8 zeroext %v1) nounwind ssp {
; X32-LABEL: bar2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: pushl %eax
; X32-NEXT: calll foo1
@@ -32,7 +32,7 @@ define void @bar2(i8 zeroext %v1) nounwind ssp {
; X32-NEXT: retl
;
; X64-LABEL: bar2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: jmp foo1 # TAILCALL
%conv = zext i8 %v1 to i32
@@ -43,12 +43,12 @@ define void @bar2(i8 zeroext %v1) nounwind ssp {
; Check that i1 return values are not zero-extended.
define zeroext i1 @bar3() nounwind ssp {
; X32-LABEL: bar3:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: calll foo2
; X32-NEXT: retl
;
; X64-LABEL: bar3:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pushq %rax
; X64-NEXT: callq foo2
; X64-NEXT: popq %rcx
diff --git a/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll b/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll
index 1194f96b01a..a20689dae3c 100644
--- a/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll
+++ b/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll
@@ -18,28 +18,28 @@
define <16 x i8> @f16xi8_i16(<16 x i8> %a) {
; AVX-LABEL: f16xi8_i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
; AVX-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-NEXT: retl
;
; ALL32-LABEL: f16xi8_i16:
-; ALL32: # BB#0:
+; ALL32: # %bb.0:
; ALL32-NEXT: vpbroadcastw {{.*#+}} xmm1 = [256,256,256,256,256,256,256,256]
; ALL32-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; ALL32-NEXT: vpand %xmm1, %xmm0, %xmm0
; ALL32-NEXT: retl
;
; AVX-64-LABEL: f16xi8_i16:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
; AVX-64-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; AVX-64-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-64-NEXT: retq
;
; ALL64-LABEL: f16xi8_i16:
-; ALL64: # BB#0:
+; ALL64: # %bb.0:
; ALL64-NEXT: vpbroadcastw {{.*#+}} xmm1 = [256,256,256,256,256,256,256,256]
; ALL64-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; ALL64-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -52,28 +52,28 @@ define <16 x i8> @f16xi8_i16(<16 x i8> %a) {
define <16 x i8> @f16xi8_i32(<16 x i8> %a) {
; AVX-LABEL: f16xi8_i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [3.82047143E-37,3.82047143E-37,3.82047143E-37,3.82047143E-37]
; AVX-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-NEXT: retl
;
; ALL32-LABEL: f16xi8_i32:
-; ALL32: # BB#0:
+; ALL32: # %bb.0:
; ALL32-NEXT: vpbroadcastd {{.*#+}} xmm1 = [50462976,50462976,50462976,50462976]
; ALL32-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; ALL32-NEXT: vpand %xmm1, %xmm0, %xmm0
; ALL32-NEXT: retl
;
; AVX-64-LABEL: f16xi8_i32:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vbroadcastss {{.*#+}} xmm1 = [3.82047143E-37,3.82047143E-37,3.82047143E-37,3.82047143E-37]
; AVX-64-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; AVX-64-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-64-NEXT: retq
;
; ALL64-LABEL: f16xi8_i32:
-; ALL64: # BB#0:
+; ALL64: # %bb.0:
; ALL64-NEXT: vpbroadcastd {{.*#+}} xmm1 = [50462976,50462976,50462976,50462976]
; ALL64-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; ALL64-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -86,28 +86,28 @@ define <16 x i8> @f16xi8_i32(<16 x i8> %a) {
define <16 x i8> @f16xi8_i64(<16 x i8> %a) {
; AVX-LABEL: f16xi8_i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
; AVX-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-NEXT: retl
;
; ALL32-LABEL: f16xi8_i64:
-; ALL32: # BB#0:
+; ALL32: # %bb.0:
; ALL32-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
; ALL32-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; ALL32-NEXT: vpand %xmm1, %xmm0, %xmm0
; ALL32-NEXT: retl
;
; AVX-64-LABEL: f16xi8_i64:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
; AVX-64-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; AVX-64-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-64-NEXT: retq
;
; ALL64-LABEL: f16xi8_i64:
-; ALL64: # BB#0:
+; ALL64: # %bb.0:
; ALL64-NEXT: vpbroadcastq {{.*#+}} xmm1 = [506097522914230528,506097522914230528]
; ALL64-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; ALL64-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -120,7 +120,7 @@ define <16 x i8> @f16xi8_i64(<16 x i8> %a) {
define <32 x i8> @f32xi8_i16(<32 x i8> %a) {
; AVX-LABEL: f32xi8_i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
; AVX-NEXT: vpaddb %xmm2, %xmm1, %xmm1
@@ -130,14 +130,14 @@ define <32 x i8> @f32xi8_i16(<32 x i8> %a) {
; AVX-NEXT: retl
;
; ALL32-LABEL: f32xi8_i16:
-; ALL32: # BB#0:
+; ALL32: # %bb.0:
; ALL32-NEXT: vpbroadcastw {{.*#+}} ymm1 = [256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256]
; ALL32-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; ALL32-NEXT: vpand %ymm1, %ymm0, %ymm0
; ALL32-NEXT: retl
;
; AVX-64-LABEL: f32xi8_i16:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-64-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
; AVX-64-NEXT: vpaddb %xmm2, %xmm1, %xmm1
@@ -147,7 +147,7 @@ define <32 x i8> @f32xi8_i16(<32 x i8> %a) {
; AVX-64-NEXT: retq
;
; ALL64-LABEL: f32xi8_i16:
-; ALL64: # BB#0:
+; ALL64: # %bb.0:
; ALL64-NEXT: vpbroadcastw {{.*#+}} ymm1 = [256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256]
; ALL64-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; ALL64-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -160,7 +160,7 @@ define <32 x i8> @f32xi8_i16(<32 x i8> %a) {
define <32 x i8> @f32xi8_i32(<32 x i8> %a) {
; AVX-LABEL: f32xi8_i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-NEXT: vbroadcastss {{.*#+}} xmm2 = [3.82047143E-37,3.82047143E-37,3.82047143E-37,3.82047143E-37]
; AVX-NEXT: vpaddb %xmm2, %xmm1, %xmm1
@@ -170,14 +170,14 @@ define <32 x i8> @f32xi8_i32(<32 x i8> %a) {
; AVX-NEXT: retl
;
; ALL32-LABEL: f32xi8_i32:
-; ALL32: # BB#0:
+; ALL32: # %bb.0:
; ALL32-NEXT: vpbroadcastd {{.*#+}} ymm1 = [50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976]
; ALL32-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; ALL32-NEXT: vpand %ymm1, %ymm0, %ymm0
; ALL32-NEXT: retl
;
; AVX-64-LABEL: f32xi8_i32:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-64-NEXT: vbroadcastss {{.*#+}} xmm2 = [3.82047143E-37,3.82047143E-37,3.82047143E-37,3.82047143E-37]
; AVX-64-NEXT: vpaddb %xmm2, %xmm1, %xmm1
@@ -187,7 +187,7 @@ define <32 x i8> @f32xi8_i32(<32 x i8> %a) {
; AVX-64-NEXT: retq
;
; ALL64-LABEL: f32xi8_i32:
-; ALL64: # BB#0:
+; ALL64: # %bb.0:
; ALL64-NEXT: vpbroadcastd {{.*#+}} ymm1 = [50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976]
; ALL64-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; ALL64-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -200,7 +200,7 @@ define <32 x i8> @f32xi8_i32(<32 x i8> %a) {
define <32 x i8> @f32xi8_i64(<32 x i8> %a) {
; AVX-LABEL: f32xi8_i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-NEXT: vmovddup {{.*#+}} xmm2 = mem[0,0]
; AVX-NEXT: vpaddb %xmm2, %xmm1, %xmm1
@@ -210,14 +210,14 @@ define <32 x i8> @f32xi8_i64(<32 x i8> %a) {
; AVX-NEXT: retl
;
; ALL32-LABEL: f32xi8_i64:
-; ALL32: # BB#0:
+; ALL32: # %bb.0:
; ALL32-NEXT: vpbroadcastq {{.*#+}} ymm1 = [7.9499288951273625E-275,7.9499288951273625E-275,7.9499288951273625E-275,7.9499288951273625E-275]
; ALL32-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; ALL32-NEXT: vpand %ymm1, %ymm0, %ymm0
; ALL32-NEXT: retl
;
; AVX-64-LABEL: f32xi8_i64:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-64-NEXT: vmovddup {{.*#+}} xmm2 = mem[0,0]
; AVX-64-NEXT: vpaddb %xmm2, %xmm1, %xmm1
@@ -227,7 +227,7 @@ define <32 x i8> @f32xi8_i64(<32 x i8> %a) {
; AVX-64-NEXT: retq
;
; ALL64-LABEL: f32xi8_i64:
-; ALL64: # BB#0:
+; ALL64: # %bb.0:
; ALL64-NEXT: vpbroadcastq {{.*#+}} ymm1 = [506097522914230528,506097522914230528,506097522914230528,506097522914230528]
; ALL64-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; ALL64-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -240,7 +240,7 @@ define <32 x i8> @f32xi8_i64(<32 x i8> %a) {
define <32 x i8> @f32xi8_i128(<32 x i8> %a) {
; AVX-LABEL: f32xi8_i128:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; AVX-NEXT: vpaddb %xmm2, %xmm1, %xmm1
@@ -250,7 +250,7 @@ define <32 x i8> @f32xi8_i128(<32 x i8> %a) {
; AVX-NEXT: retl
;
; ALL32-LABEL: f32xi8_i128:
-; ALL32: # BB#0:
+; ALL32: # %bb.0:
; ALL32-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; ALL32-NEXT: # ymm1 = mem[0,1,0,1]
; ALL32-NEXT: vpaddb %ymm1, %ymm0, %ymm0
@@ -258,7 +258,7 @@ define <32 x i8> @f32xi8_i128(<32 x i8> %a) {
; ALL32-NEXT: retl
;
; AVX-64-LABEL: f32xi8_i128:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-64-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; AVX-64-NEXT: vpaddb %xmm2, %xmm1, %xmm1
@@ -268,7 +268,7 @@ define <32 x i8> @f32xi8_i128(<32 x i8> %a) {
; AVX-64-NEXT: retq
;
; ALL64-LABEL: f32xi8_i128:
-; ALL64: # BB#0:
+; ALL64: # %bb.0:
; ALL64-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; ALL64-NEXT: # ymm1 = mem[0,1,0,1]
; ALL64-NEXT: vpaddb %ymm1, %ymm0, %ymm0
@@ -282,7 +282,7 @@ define <32 x i8> @f32xi8_i128(<32 x i8> %a) {
define <64 x i8> @f64xi8_i16(<64 x i8> %a) {
; AVX-LABEL: f64xi8_i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
; AVX-NEXT: vpaddb %xmm3, %xmm2, %xmm2
@@ -298,7 +298,7 @@ define <64 x i8> @f64xi8_i16(<64 x i8> %a) {
; AVX-NEXT: retl
;
; NO-AVX512BW-LABEL: f64xi8_i16:
-; NO-AVX512BW: # BB#0:
+; NO-AVX512BW: # %bb.0:
; NO-AVX512BW-NEXT: vpbroadcastw {{.*#+}} ymm2 = [256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256]
; NO-AVX512BW-NEXT: vpaddb %ymm2, %ymm1, %ymm1
; NO-AVX512BW-NEXT: vpaddb %ymm2, %ymm0, %ymm0
@@ -307,14 +307,14 @@ define <64 x i8> @f64xi8_i16(<64 x i8> %a) {
; NO-AVX512BW-NEXT: retl
;
; AVX512BW-LABEL: f64xi8_i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpbroadcastw {{.*#+}} zmm1 = [256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256]
; AVX512BW-NEXT: vpaddb %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retl
;
; AVX-64-LABEL: f64xi8_i16:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX-64-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
; AVX-64-NEXT: vpaddb %xmm3, %xmm2, %xmm2
@@ -330,7 +330,7 @@ define <64 x i8> @f64xi8_i16(<64 x i8> %a) {
; AVX-64-NEXT: retq
;
; NO-AVX512BW-64-LABEL: f64xi8_i16:
-; NO-AVX512BW-64: # BB#0:
+; NO-AVX512BW-64: # %bb.0:
; NO-AVX512BW-64-NEXT: vpbroadcastw {{.*#+}} ymm2 = [256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256]
; NO-AVX512BW-64-NEXT: vpaddb %ymm2, %ymm1, %ymm1
; NO-AVX512BW-64-NEXT: vpaddb %ymm2, %ymm0, %ymm0
@@ -339,7 +339,7 @@ define <64 x i8> @f64xi8_i16(<64 x i8> %a) {
; NO-AVX512BW-64-NEXT: retq
;
; AVX512BW-64-LABEL: f64xi8_i16:
-; AVX512BW-64: # BB#0:
+; AVX512BW-64: # %bb.0:
; AVX512BW-64-NEXT: vpbroadcastw {{.*#+}} zmm1 = [256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256]
; AVX512BW-64-NEXT: vpaddb %zmm1, %zmm0, %zmm0
; AVX512BW-64-NEXT: vpandq %zmm1, %zmm0, %zmm0
@@ -352,7 +352,7 @@ define <64 x i8> @f64xi8_i16(<64 x i8> %a) {
define <64 x i8> @f64i8_i32(<64 x i8> %a) {
; AVX-LABEL: f64i8_i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX-NEXT: vbroadcastss {{.*#+}} xmm3 = [3.82047143E-37,3.82047143E-37,3.82047143E-37,3.82047143E-37]
; AVX-NEXT: vpaddb %xmm3, %xmm2, %xmm2
@@ -368,7 +368,7 @@ define <64 x i8> @f64i8_i32(<64 x i8> %a) {
; AVX-NEXT: retl
;
; NO-AVX512BW-LABEL: f64i8_i32:
-; NO-AVX512BW: # BB#0:
+; NO-AVX512BW: # %bb.0:
; NO-AVX512BW-NEXT: vpbroadcastd {{.*#+}} ymm2 = [50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976]
; NO-AVX512BW-NEXT: vpaddb %ymm2, %ymm1, %ymm1
; NO-AVX512BW-NEXT: vpaddb %ymm2, %ymm0, %ymm0
@@ -377,14 +377,14 @@ define <64 x i8> @f64i8_i32(<64 x i8> %a) {
; NO-AVX512BW-NEXT: retl
;
; AVX512BW-LABEL: f64i8_i32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpbroadcastd {{.*#+}} zmm1 = [50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976]
; AVX512BW-NEXT: vpaddb %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retl
;
; AVX-64-LABEL: f64i8_i32:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX-64-NEXT: vbroadcastss {{.*#+}} xmm3 = [3.82047143E-37,3.82047143E-37,3.82047143E-37,3.82047143E-37]
; AVX-64-NEXT: vpaddb %xmm3, %xmm2, %xmm2
@@ -400,7 +400,7 @@ define <64 x i8> @f64i8_i32(<64 x i8> %a) {
; AVX-64-NEXT: retq
;
; NO-AVX512BW-64-LABEL: f64i8_i32:
-; NO-AVX512BW-64: # BB#0:
+; NO-AVX512BW-64: # %bb.0:
; NO-AVX512BW-64-NEXT: vpbroadcastd {{.*#+}} ymm2 = [50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976]
; NO-AVX512BW-64-NEXT: vpaddb %ymm2, %ymm1, %ymm1
; NO-AVX512BW-64-NEXT: vpaddb %ymm2, %ymm0, %ymm0
@@ -409,7 +409,7 @@ define <64 x i8> @f64i8_i32(<64 x i8> %a) {
; NO-AVX512BW-64-NEXT: retq
;
; AVX512BW-64-LABEL: f64i8_i32:
-; AVX512BW-64: # BB#0:
+; AVX512BW-64: # %bb.0:
; AVX512BW-64-NEXT: vpbroadcastd {{.*#+}} zmm1 = [50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976]
; AVX512BW-64-NEXT: vpaddb %zmm1, %zmm0, %zmm0
; AVX512BW-64-NEXT: vpandq %zmm1, %zmm0, %zmm0
@@ -422,7 +422,7 @@ define <64 x i8> @f64i8_i32(<64 x i8> %a) {
define <64 x i8> @f64xi8_i64(<64 x i8> %a) {
; AVX-LABEL: f64xi8_i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX-NEXT: vmovddup {{.*#+}} xmm3 = mem[0,0]
; AVX-NEXT: vpaddb %xmm3, %xmm2, %xmm2
@@ -438,7 +438,7 @@ define <64 x i8> @f64xi8_i64(<64 x i8> %a) {
; AVX-NEXT: retl
;
; NO-AVX512BW-LABEL: f64xi8_i64:
-; NO-AVX512BW: # BB#0:
+; NO-AVX512BW: # %bb.0:
; NO-AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm2 = [7.9499288951273625E-275,7.9499288951273625E-275,7.9499288951273625E-275,7.9499288951273625E-275]
; NO-AVX512BW-NEXT: vpaddb %ymm2, %ymm1, %ymm1
; NO-AVX512BW-NEXT: vpaddb %ymm2, %ymm0, %ymm0
@@ -447,14 +447,14 @@ define <64 x i8> @f64xi8_i64(<64 x i8> %a) {
; NO-AVX512BW-NEXT: retl
;
; AVX512BW-LABEL: f64xi8_i64:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpbroadcastq {{.*#+}} zmm1 = [7.9499288951273625E-275,7.9499288951273625E-275,7.9499288951273625E-275,7.9499288951273625E-275,7.9499288951273625E-275,7.9499288951273625E-275,7.9499288951273625E-275,7.9499288951273625E-275]
; AVX512BW-NEXT: vpaddb %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retl
;
; AVX-64-LABEL: f64xi8_i64:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX-64-NEXT: vmovddup {{.*#+}} xmm3 = mem[0,0]
; AVX-64-NEXT: vpaddb %xmm3, %xmm2, %xmm2
@@ -470,7 +470,7 @@ define <64 x i8> @f64xi8_i64(<64 x i8> %a) {
; AVX-64-NEXT: retq
;
; NO-AVX512BW-64-LABEL: f64xi8_i64:
-; NO-AVX512BW-64: # BB#0:
+; NO-AVX512BW-64: # %bb.0:
; NO-AVX512BW-64-NEXT: vpbroadcastq {{.*#+}} ymm2 = [506097522914230528,506097522914230528,506097522914230528,506097522914230528]
; NO-AVX512BW-64-NEXT: vpaddb %ymm2, %ymm1, %ymm1
; NO-AVX512BW-64-NEXT: vpaddb %ymm2, %ymm0, %ymm0
@@ -479,7 +479,7 @@ define <64 x i8> @f64xi8_i64(<64 x i8> %a) {
; NO-AVX512BW-64-NEXT: retq
;
; AVX512BW-64-LABEL: f64xi8_i64:
-; AVX512BW-64: # BB#0:
+; AVX512BW-64: # %bb.0:
; AVX512BW-64-NEXT: vpbroadcastq {{.*#+}} zmm1 = [506097522914230528,506097522914230528,506097522914230528,506097522914230528,506097522914230528,506097522914230528,506097522914230528,506097522914230528]
; AVX512BW-64-NEXT: vpaddb %zmm1, %zmm0, %zmm0
; AVX512BW-64-NEXT: vpandq %zmm1, %zmm0, %zmm0
@@ -492,7 +492,7 @@ define <64 x i8> @f64xi8_i64(<64 x i8> %a) {
define <64 x i8> @f64xi8_i128(<64 x i8> %a) {
; AVX-LABEL: f64xi8_i128:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; AVX-NEXT: vpaddb %xmm3, %xmm2, %xmm2
@@ -508,7 +508,7 @@ define <64 x i8> @f64xi8_i128(<64 x i8> %a) {
; AVX-NEXT: retl
;
; NO-AVX512BW-LABEL: f64xi8_i128:
-; NO-AVX512BW: # BB#0:
+; NO-AVX512BW: # %bb.0:
; NO-AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; NO-AVX512BW-NEXT: # ymm2 = mem[0,1,0,1]
; NO-AVX512BW-NEXT: vpaddb %ymm2, %ymm1, %ymm1
@@ -518,7 +518,7 @@ define <64 x i8> @f64xi8_i128(<64 x i8> %a) {
; NO-AVX512BW-NEXT: retl
;
; AVX512BW-LABEL: f64xi8_i128:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; AVX512BW-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512BW-NEXT: vpaddb %zmm1, %zmm0, %zmm0
@@ -526,7 +526,7 @@ define <64 x i8> @f64xi8_i128(<64 x i8> %a) {
; AVX512BW-NEXT: retl
;
; AVX-64-LABEL: f64xi8_i128:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX-64-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; AVX-64-NEXT: vpaddb %xmm3, %xmm2, %xmm2
@@ -542,7 +542,7 @@ define <64 x i8> @f64xi8_i128(<64 x i8> %a) {
; AVX-64-NEXT: retq
;
; NO-AVX512BW-64-LABEL: f64xi8_i128:
-; NO-AVX512BW-64: # BB#0:
+; NO-AVX512BW-64: # %bb.0:
; NO-AVX512BW-64-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; NO-AVX512BW-64-NEXT: # ymm2 = mem[0,1,0,1]
; NO-AVX512BW-64-NEXT: vpaddb %ymm2, %ymm1, %ymm1
@@ -552,7 +552,7 @@ define <64 x i8> @f64xi8_i128(<64 x i8> %a) {
; NO-AVX512BW-64-NEXT: retq
;
; AVX512BW-64-LABEL: f64xi8_i128:
-; AVX512BW-64: # BB#0:
+; AVX512BW-64: # %bb.0:
; AVX512BW-64-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; AVX512BW-64-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512BW-64-NEXT: vpaddb %zmm1, %zmm0, %zmm0
@@ -566,7 +566,7 @@ define <64 x i8> @f64xi8_i128(<64 x i8> %a) {
define <64 x i8> @f64xi8_i256(<64 x i8> %a) {
; AVX-LABEL: f64xi8_i256:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31]
; AVX-NEXT: vpaddb %xmm3, %xmm2, %xmm2
@@ -583,7 +583,7 @@ define <64 x i8> @f64xi8_i256(<64 x i8> %a) {
; AVX-NEXT: retl
;
; NO-AVX512BW-LABEL: f64xi8_i256:
-; NO-AVX512BW: # BB#0:
+; NO-AVX512BW: # %bb.0:
; NO-AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31]
; NO-AVX512BW-NEXT: vpaddb %ymm2, %ymm1, %ymm1
; NO-AVX512BW-NEXT: vpaddb %ymm2, %ymm0, %ymm0
@@ -592,7 +592,7 @@ define <64 x i8> @f64xi8_i256(<64 x i8> %a) {
; NO-AVX512BW-NEXT: retl
;
; AVX512BW-LABEL: f64xi8_i256:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31]
; AVX512BW-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
; AVX512BW-NEXT: vpaddb %zmm1, %zmm0, %zmm0
@@ -600,7 +600,7 @@ define <64 x i8> @f64xi8_i256(<64 x i8> %a) {
; AVX512BW-NEXT: retl
;
; AVX-64-LABEL: f64xi8_i256:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX-64-NEXT: vmovdqa {{.*#+}} xmm3 = [16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31]
; AVX-64-NEXT: vpaddb %xmm3, %xmm2, %xmm2
@@ -617,7 +617,7 @@ define <64 x i8> @f64xi8_i256(<64 x i8> %a) {
; AVX-64-NEXT: retq
;
; NO-AVX512BW-64-LABEL: f64xi8_i256:
-; NO-AVX512BW-64: # BB#0:
+; NO-AVX512BW-64: # %bb.0:
; NO-AVX512BW-64-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31]
; NO-AVX512BW-64-NEXT: vpaddb %ymm2, %ymm1, %ymm1
; NO-AVX512BW-64-NEXT: vpaddb %ymm2, %ymm0, %ymm0
@@ -626,7 +626,7 @@ define <64 x i8> @f64xi8_i256(<64 x i8> %a) {
; NO-AVX512BW-64-NEXT: retq
;
; AVX512BW-64-LABEL: f64xi8_i256:
-; AVX512BW-64: # BB#0:
+; AVX512BW-64: # %bb.0:
; AVX512BW-64-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31]
; AVX512BW-64-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
; AVX512BW-64-NEXT: vpaddb %zmm1, %zmm0, %zmm0
@@ -640,28 +640,28 @@ define <64 x i8> @f64xi8_i256(<64 x i8> %a) {
define <8 x i16> @f8xi16_i32(<8 x i16> %a) {
; AVX-LABEL: f8xi16_i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [9.18354962E-41,9.18354962E-41,9.18354962E-41,9.18354962E-41]
; AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-NEXT: retl
;
; ALL32-LABEL: f8xi16_i32:
-; ALL32: # BB#0:
+; ALL32: # %bb.0:
; ALL32-NEXT: vpbroadcastd {{.*#+}} xmm1 = [65536,65536,65536,65536]
; ALL32-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; ALL32-NEXT: vpand %xmm1, %xmm0, %xmm0
; ALL32-NEXT: retl
;
; AVX-64-LABEL: f8xi16_i32:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vbroadcastss {{.*#+}} xmm1 = [9.18354962E-41,9.18354962E-41,9.18354962E-41,9.18354962E-41]
; AVX-64-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; AVX-64-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-64-NEXT: retq
;
; ALL64-LABEL: f8xi16_i32:
-; ALL64: # BB#0:
+; ALL64: # %bb.0:
; ALL64-NEXT: vpbroadcastd {{.*#+}} xmm1 = [65536,65536,65536,65536]
; ALL64-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; ALL64-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -674,28 +674,28 @@ define <8 x i16> @f8xi16_i32(<8 x i16> %a) {
define <8 x i16> @f8xi16_i64(<8 x i16> %a) {
; AVX-LABEL: f8xi16_i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
; AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-NEXT: retl
;
; ALL32-LABEL: f8xi16_i64:
-; ALL32: # BB#0:
+; ALL32: # %bb.0:
; ALL32-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
; ALL32-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; ALL32-NEXT: vpand %xmm1, %xmm0, %xmm0
; ALL32-NEXT: retl
;
; AVX-64-LABEL: f8xi16_i64:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
; AVX-64-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; AVX-64-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-64-NEXT: retq
;
; ALL64-LABEL: f8xi16_i64:
-; ALL64: # BB#0:
+; ALL64: # %bb.0:
; ALL64-NEXT: vpbroadcastq {{.*#+}} xmm1 = [844433520132096,844433520132096]
; ALL64-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; ALL64-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -708,7 +708,7 @@ define <8 x i16> @f8xi16_i64(<8 x i16> %a) {
define <16 x i16> @f16xi16_i32(<16 x i16> %a) {
; AVX-LABEL: f16xi16_i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-NEXT: vbroadcastss {{.*#+}} xmm2 = [9.18354962E-41,9.18354962E-41,9.18354962E-41,9.18354962E-41]
; AVX-NEXT: vpaddw %xmm2, %xmm1, %xmm1
@@ -718,14 +718,14 @@ define <16 x i16> @f16xi16_i32(<16 x i16> %a) {
; AVX-NEXT: retl
;
; ALL32-LABEL: f16xi16_i32:
-; ALL32: # BB#0:
+; ALL32: # %bb.0:
; ALL32-NEXT: vpbroadcastd {{.*#+}} ymm1 = [65536,65536,65536,65536,65536,65536,65536,65536]
; ALL32-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; ALL32-NEXT: vpand %ymm1, %ymm0, %ymm0
; ALL32-NEXT: retl
;
; AVX-64-LABEL: f16xi16_i32:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-64-NEXT: vbroadcastss {{.*#+}} xmm2 = [9.18354962E-41,9.18354962E-41,9.18354962E-41,9.18354962E-41]
; AVX-64-NEXT: vpaddw %xmm2, %xmm1, %xmm1
@@ -735,7 +735,7 @@ define <16 x i16> @f16xi16_i32(<16 x i16> %a) {
; AVX-64-NEXT: retq
;
; ALL64-LABEL: f16xi16_i32:
-; ALL64: # BB#0:
+; ALL64: # %bb.0:
; ALL64-NEXT: vpbroadcastd {{.*#+}} ymm1 = [65536,65536,65536,65536,65536,65536,65536,65536]
; ALL64-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; ALL64-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -748,7 +748,7 @@ define <16 x i16> @f16xi16_i32(<16 x i16> %a) {
define <16 x i16> @f16xi16_i64(<16 x i16> %a) {
; AVX-LABEL: f16xi16_i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-NEXT: vmovddup {{.*#+}} xmm2 = mem[0,0]
; AVX-NEXT: vpaddw %xmm2, %xmm1, %xmm1
@@ -758,14 +758,14 @@ define <16 x i16> @f16xi16_i64(<16 x i16> %a) {
; AVX-NEXT: retl
;
; ALL32-LABEL: f16xi16_i64:
-; ALL32: # BB#0:
+; ALL32: # %bb.0:
; ALL32-NEXT: vpbroadcastq {{.*#+}} ymm1 = [4.1720559249406128E-309,4.1720559249406128E-309,4.1720559249406128E-309,4.1720559249406128E-309]
; ALL32-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; ALL32-NEXT: vpand %ymm1, %ymm0, %ymm0
; ALL32-NEXT: retl
;
; AVX-64-LABEL: f16xi16_i64:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-64-NEXT: vmovddup {{.*#+}} xmm2 = mem[0,0]
; AVX-64-NEXT: vpaddw %xmm2, %xmm1, %xmm1
@@ -775,7 +775,7 @@ define <16 x i16> @f16xi16_i64(<16 x i16> %a) {
; AVX-64-NEXT: retq
;
; ALL64-LABEL: f16xi16_i64:
-; ALL64: # BB#0:
+; ALL64: # %bb.0:
; ALL64-NEXT: vpbroadcastq {{.*#+}} ymm1 = [844433520132096,844433520132096,844433520132096,844433520132096]
; ALL64-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; ALL64-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -788,7 +788,7 @@ define <16 x i16> @f16xi16_i64(<16 x i16> %a) {
define <16 x i16> @f16xi16_i128(<16 x i16> %a) {
; AVX-LABEL: f16xi16_i128:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7]
; AVX-NEXT: vpaddw %xmm2, %xmm1, %xmm1
@@ -798,7 +798,7 @@ define <16 x i16> @f16xi16_i128(<16 x i16> %a) {
; AVX-NEXT: retl
;
; ALL32-LABEL: f16xi16_i128:
-; ALL32: # BB#0:
+; ALL32: # %bb.0:
; ALL32-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
; ALL32-NEXT: # ymm1 = mem[0,1,0,1]
; ALL32-NEXT: vpaddw %ymm1, %ymm0, %ymm0
@@ -806,7 +806,7 @@ define <16 x i16> @f16xi16_i128(<16 x i16> %a) {
; ALL32-NEXT: retl
;
; AVX-64-LABEL: f16xi16_i128:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-64-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7]
; AVX-64-NEXT: vpaddw %xmm2, %xmm1, %xmm1
@@ -816,7 +816,7 @@ define <16 x i16> @f16xi16_i128(<16 x i16> %a) {
; AVX-64-NEXT: retq
;
; ALL64-LABEL: f16xi16_i128:
-; ALL64: # BB#0:
+; ALL64: # %bb.0:
; ALL64-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
; ALL64-NEXT: # ymm1 = mem[0,1,0,1]
; ALL64-NEXT: vpaddw %ymm1, %ymm0, %ymm0
@@ -830,7 +830,7 @@ define <16 x i16> @f16xi16_i128(<16 x i16> %a) {
define <32 x i16> @f32xi16_i32(<32 x i16> %a) {
; AVX-LABEL: f32xi16_i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX-NEXT: vbroadcastss {{.*#+}} xmm3 = [9.18354962E-41,9.18354962E-41,9.18354962E-41,9.18354962E-41]
; AVX-NEXT: vpaddw %xmm3, %xmm2, %xmm2
@@ -846,7 +846,7 @@ define <32 x i16> @f32xi16_i32(<32 x i16> %a) {
; AVX-NEXT: retl
;
; NO-AVX512BW-LABEL: f32xi16_i32:
-; NO-AVX512BW: # BB#0:
+; NO-AVX512BW: # %bb.0:
; NO-AVX512BW-NEXT: vpbroadcastd {{.*#+}} ymm2 = [65536,65536,65536,65536,65536,65536,65536,65536]
; NO-AVX512BW-NEXT: vpaddw %ymm2, %ymm1, %ymm1
; NO-AVX512BW-NEXT: vpaddw %ymm2, %ymm0, %ymm0
@@ -855,14 +855,14 @@ define <32 x i16> @f32xi16_i32(<32 x i16> %a) {
; NO-AVX512BW-NEXT: retl
;
; AVX512BW-LABEL: f32xi16_i32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpbroadcastd {{.*#+}} zmm1 = [65536,65536,65536,65536,65536,65536,65536,65536,65536,65536,65536,65536,65536,65536,65536,65536]
; AVX512BW-NEXT: vpaddw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retl
;
; AVX-64-LABEL: f32xi16_i32:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX-64-NEXT: vbroadcastss {{.*#+}} xmm3 = [9.18354962E-41,9.18354962E-41,9.18354962E-41,9.18354962E-41]
; AVX-64-NEXT: vpaddw %xmm3, %xmm2, %xmm2
@@ -878,7 +878,7 @@ define <32 x i16> @f32xi16_i32(<32 x i16> %a) {
; AVX-64-NEXT: retq
;
; NO-AVX512BW-64-LABEL: f32xi16_i32:
-; NO-AVX512BW-64: # BB#0:
+; NO-AVX512BW-64: # %bb.0:
; NO-AVX512BW-64-NEXT: vpbroadcastd {{.*#+}} ymm2 = [65536,65536,65536,65536,65536,65536,65536,65536]
; NO-AVX512BW-64-NEXT: vpaddw %ymm2, %ymm1, %ymm1
; NO-AVX512BW-64-NEXT: vpaddw %ymm2, %ymm0, %ymm0
@@ -887,7 +887,7 @@ define <32 x i16> @f32xi16_i32(<32 x i16> %a) {
; NO-AVX512BW-64-NEXT: retq
;
; AVX512BW-64-LABEL: f32xi16_i32:
-; AVX512BW-64: # BB#0:
+; AVX512BW-64: # %bb.0:
; AVX512BW-64-NEXT: vpbroadcastd {{.*#+}} zmm1 = [65536,65536,65536,65536,65536,65536,65536,65536,65536,65536,65536,65536,65536,65536,65536,65536]
; AVX512BW-64-NEXT: vpaddw %zmm1, %zmm0, %zmm0
; AVX512BW-64-NEXT: vpandq %zmm1, %zmm0, %zmm0
@@ -900,7 +900,7 @@ define <32 x i16> @f32xi16_i32(<32 x i16> %a) {
define <32 x i16> @f32xi16_i64(<32 x i16> %a) {
; AVX-LABEL: f32xi16_i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX-NEXT: vmovddup {{.*#+}} xmm3 = mem[0,0]
; AVX-NEXT: vpaddw %xmm3, %xmm2, %xmm2
@@ -916,7 +916,7 @@ define <32 x i16> @f32xi16_i64(<32 x i16> %a) {
; AVX-NEXT: retl
;
; NO-AVX512BW-LABEL: f32xi16_i64:
-; NO-AVX512BW: # BB#0:
+; NO-AVX512BW: # %bb.0:
; NO-AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm2 = [4.1720559249406128E-309,4.1720559249406128E-309,4.1720559249406128E-309,4.1720559249406128E-309]
; NO-AVX512BW-NEXT: vpaddw %ymm2, %ymm1, %ymm1
; NO-AVX512BW-NEXT: vpaddw %ymm2, %ymm0, %ymm0
@@ -925,14 +925,14 @@ define <32 x i16> @f32xi16_i64(<32 x i16> %a) {
; NO-AVX512BW-NEXT: retl
;
; AVX512BW-LABEL: f32xi16_i64:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpbroadcastq {{.*#+}} zmm1 = [4.1720559249406128E-309,4.1720559249406128E-309,4.1720559249406128E-309,4.1720559249406128E-309,4.1720559249406128E-309,4.1720559249406128E-309,4.1720559249406128E-309,4.1720559249406128E-309]
; AVX512BW-NEXT: vpaddw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retl
;
; AVX-64-LABEL: f32xi16_i64:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX-64-NEXT: vmovddup {{.*#+}} xmm3 = mem[0,0]
; AVX-64-NEXT: vpaddw %xmm3, %xmm2, %xmm2
@@ -948,7 +948,7 @@ define <32 x i16> @f32xi16_i64(<32 x i16> %a) {
; AVX-64-NEXT: retq
;
; NO-AVX512BW-64-LABEL: f32xi16_i64:
-; NO-AVX512BW-64: # BB#0:
+; NO-AVX512BW-64: # %bb.0:
; NO-AVX512BW-64-NEXT: vpbroadcastq {{.*#+}} ymm2 = [844433520132096,844433520132096,844433520132096,844433520132096]
; NO-AVX512BW-64-NEXT: vpaddw %ymm2, %ymm1, %ymm1
; NO-AVX512BW-64-NEXT: vpaddw %ymm2, %ymm0, %ymm0
@@ -957,7 +957,7 @@ define <32 x i16> @f32xi16_i64(<32 x i16> %a) {
; NO-AVX512BW-64-NEXT: retq
;
; AVX512BW-64-LABEL: f32xi16_i64:
-; AVX512BW-64: # BB#0:
+; AVX512BW-64: # %bb.0:
; AVX512BW-64-NEXT: vpbroadcastq {{.*#+}} zmm1 = [844433520132096,844433520132096,844433520132096,844433520132096,844433520132096,844433520132096,844433520132096,844433520132096]
; AVX512BW-64-NEXT: vpaddw %zmm1, %zmm0, %zmm0
; AVX512BW-64-NEXT: vpandq %zmm1, %zmm0, %zmm0
@@ -970,7 +970,7 @@ define <32 x i16> @f32xi16_i64(<32 x i16> %a) {
define <32 x i16> @f32xi16_i128(<32 x i16> %a) {
; AVX-LABEL: f32xi16_i128:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,2,3,4,5,6,7]
; AVX-NEXT: vpaddw %xmm3, %xmm2, %xmm2
@@ -986,7 +986,7 @@ define <32 x i16> @f32xi16_i128(<32 x i16> %a) {
; AVX-NEXT: retl
;
; NO-AVX512BW-LABEL: f32xi16_i128:
-; NO-AVX512BW: # BB#0:
+; NO-AVX512BW: # %bb.0:
; NO-AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
; NO-AVX512BW-NEXT: # ymm2 = mem[0,1,0,1]
; NO-AVX512BW-NEXT: vpaddw %ymm2, %ymm1, %ymm1
@@ -996,7 +996,7 @@ define <32 x i16> @f32xi16_i128(<32 x i16> %a) {
; NO-AVX512BW-NEXT: retl
;
; AVX512BW-LABEL: f32xi16_i128:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
; AVX512BW-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512BW-NEXT: vpaddw %zmm1, %zmm0, %zmm0
@@ -1004,7 +1004,7 @@ define <32 x i16> @f32xi16_i128(<32 x i16> %a) {
; AVX512BW-NEXT: retl
;
; AVX-64-LABEL: f32xi16_i128:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX-64-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,2,3,4,5,6,7]
; AVX-64-NEXT: vpaddw %xmm3, %xmm2, %xmm2
@@ -1020,7 +1020,7 @@ define <32 x i16> @f32xi16_i128(<32 x i16> %a) {
; AVX-64-NEXT: retq
;
; NO-AVX512BW-64-LABEL: f32xi16_i128:
-; NO-AVX512BW-64: # BB#0:
+; NO-AVX512BW-64: # %bb.0:
; NO-AVX512BW-64-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
; NO-AVX512BW-64-NEXT: # ymm2 = mem[0,1,0,1]
; NO-AVX512BW-64-NEXT: vpaddw %ymm2, %ymm1, %ymm1
@@ -1030,7 +1030,7 @@ define <32 x i16> @f32xi16_i128(<32 x i16> %a) {
; NO-AVX512BW-64-NEXT: retq
;
; AVX512BW-64-LABEL: f32xi16_i128:
-; AVX512BW-64: # BB#0:
+; AVX512BW-64: # %bb.0:
; AVX512BW-64-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
; AVX512BW-64-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512BW-64-NEXT: vpaddw %zmm1, %zmm0, %zmm0
@@ -1044,7 +1044,7 @@ define <32 x i16> @f32xi16_i128(<32 x i16> %a) {
define <32 x i16> @f32xi16_i256(<32 x i16> %a) {
; AVX-LABEL: f32xi16_i256:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [8,9,10,11,12,13,14,15]
; AVX-NEXT: vpaddw %xmm3, %xmm2, %xmm2
@@ -1061,7 +1061,7 @@ define <32 x i16> @f32xi16_i256(<32 x i16> %a) {
; AVX-NEXT: retl
;
; NO-AVX512BW-LABEL: f32xi16_i256:
-; NO-AVX512BW: # BB#0:
+; NO-AVX512BW: # %bb.0:
; NO-AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; NO-AVX512BW-NEXT: vpaddw %ymm2, %ymm1, %ymm1
; NO-AVX512BW-NEXT: vpaddw %ymm2, %ymm0, %ymm0
@@ -1070,7 +1070,7 @@ define <32 x i16> @f32xi16_i256(<32 x i16> %a) {
; NO-AVX512BW-NEXT: retl
;
; AVX512BW-LABEL: f32xi16_i256:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; AVX512BW-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
; AVX512BW-NEXT: vpaddw %zmm1, %zmm0, %zmm0
@@ -1078,7 +1078,7 @@ define <32 x i16> @f32xi16_i256(<32 x i16> %a) {
; AVX512BW-NEXT: retl
;
; AVX-64-LABEL: f32xi16_i256:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX-64-NEXT: vmovdqa {{.*#+}} xmm3 = [8,9,10,11,12,13,14,15]
; AVX-64-NEXT: vpaddw %xmm3, %xmm2, %xmm2
@@ -1095,7 +1095,7 @@ define <32 x i16> @f32xi16_i256(<32 x i16> %a) {
; AVX-64-NEXT: retq
;
; NO-AVX512BW-64-LABEL: f32xi16_i256:
-; NO-AVX512BW-64: # BB#0:
+; NO-AVX512BW-64: # %bb.0:
; NO-AVX512BW-64-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; NO-AVX512BW-64-NEXT: vpaddw %ymm2, %ymm1, %ymm1
; NO-AVX512BW-64-NEXT: vpaddw %ymm2, %ymm0, %ymm0
@@ -1104,7 +1104,7 @@ define <32 x i16> @f32xi16_i256(<32 x i16> %a) {
; NO-AVX512BW-64-NEXT: retq
;
; AVX512BW-64-LABEL: f32xi16_i256:
-; AVX512BW-64: # BB#0:
+; AVX512BW-64: # %bb.0:
; AVX512BW-64-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; AVX512BW-64-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
; AVX512BW-64-NEXT: vpaddw %zmm1, %zmm0, %zmm0
@@ -1119,28 +1119,28 @@ define <32 x i16> @f32xi16_i256(<32 x i16> %a) {
define <4 x i32> @f4xi32_i64(<4 x i32> %a) {
; AVX-LABEL: f4xi32_i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-NEXT: retl
;
; ALL32-LABEL: f4xi32_i64:
-; ALL32: # BB#0:
+; ALL32: # %bb.0:
; ALL32-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
; ALL32-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; ALL32-NEXT: vpand %xmm1, %xmm0, %xmm0
; ALL32-NEXT: retl
;
; AVX-64-LABEL: f4xi32_i64:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
; AVX-64-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX-64-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-64-NEXT: retq
;
; ALL64-LABEL: f4xi32_i64:
-; ALL64: # BB#0:
+; ALL64: # %bb.0:
; ALL64-NEXT: vpbroadcastq {{.*#+}} xmm1 = [4294967296,4294967296]
; ALL64-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; ALL64-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -1153,7 +1153,7 @@ define <4 x i32> @f4xi32_i64(<4 x i32> %a) {
define <8 x i32> @f8xi32_i64(<8 x i32> %a) {
; AVX-LABEL: f8xi32_i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-NEXT: vmovddup {{.*#+}} xmm2 = mem[0,0]
; AVX-NEXT: vpaddd %xmm2, %xmm1, %xmm1
@@ -1163,14 +1163,14 @@ define <8 x i32> @f8xi32_i64(<8 x i32> %a) {
; AVX-NEXT: retl
;
; ALL32-LABEL: f8xi32_i64:
-; ALL32: # BB#0:
+; ALL32: # %bb.0:
; ALL32-NEXT: vpbroadcastq {{.*#+}} ymm1 = [2.1219957909652723E-314,2.1219957909652723E-314,2.1219957909652723E-314,2.1219957909652723E-314]
; ALL32-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; ALL32-NEXT: vpand %ymm1, %ymm0, %ymm0
; ALL32-NEXT: retl
;
; AVX-64-LABEL: f8xi32_i64:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-64-NEXT: vmovddup {{.*#+}} xmm2 = mem[0,0]
; AVX-64-NEXT: vpaddd %xmm2, %xmm1, %xmm1
@@ -1180,7 +1180,7 @@ define <8 x i32> @f8xi32_i64(<8 x i32> %a) {
; AVX-64-NEXT: retq
;
; ALL64-LABEL: f8xi32_i64:
-; ALL64: # BB#0:
+; ALL64: # %bb.0:
; ALL64-NEXT: vpbroadcastq {{.*#+}} ymm1 = [4294967296,4294967296,4294967296,4294967296]
; ALL64-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; ALL64-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -1193,7 +1193,7 @@ define <8 x i32> @f8xi32_i64(<8 x i32> %a) {
define <8 x i32> @f8xi32_i128(<8 x i32> %a) {
; AVX-LABEL: f8xi32_i128:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3]
; AVX-NEXT: vpaddd %xmm2, %xmm1, %xmm1
@@ -1203,7 +1203,7 @@ define <8 x i32> @f8xi32_i128(<8 x i32> %a) {
; AVX-NEXT: retl
;
; ALL32-LABEL: f8xi32_i128:
-; ALL32: # BB#0:
+; ALL32: # %bb.0:
; ALL32-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [0,1,2,3,0,1,2,3]
; ALL32-NEXT: # ymm1 = mem[0,1,0,1]
; ALL32-NEXT: vpaddd %ymm1, %ymm0, %ymm0
@@ -1211,7 +1211,7 @@ define <8 x i32> @f8xi32_i128(<8 x i32> %a) {
; ALL32-NEXT: retl
;
; AVX-64-LABEL: f8xi32_i128:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-64-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3]
; AVX-64-NEXT: vpaddd %xmm2, %xmm1, %xmm1
@@ -1221,7 +1221,7 @@ define <8 x i32> @f8xi32_i128(<8 x i32> %a) {
; AVX-64-NEXT: retq
;
; ALL64-LABEL: f8xi32_i128:
-; ALL64: # BB#0:
+; ALL64: # %bb.0:
; ALL64-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [0,1,2,3,0,1,2,3]
; ALL64-NEXT: # ymm1 = mem[0,1,0,1]
; ALL64-NEXT: vpaddd %ymm1, %ymm0, %ymm0
@@ -1235,7 +1235,7 @@ define <8 x i32> @f8xi32_i128(<8 x i32> %a) {
define <16 x i32> @f16xi32_i64(<16 x i32> %a) {
; AVX-LABEL: f16xi32_i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX-NEXT: vmovddup {{.*#+}} xmm3 = mem[0,0]
; AVX-NEXT: vpaddd %xmm3, %xmm2, %xmm2
@@ -1251,7 +1251,7 @@ define <16 x i32> @f16xi32_i64(<16 x i32> %a) {
; AVX-NEXT: retl
;
; AVX2-LABEL: f16xi32_i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [2.1219957909652723E-314,2.1219957909652723E-314,2.1219957909652723E-314,2.1219957909652723E-314]
; AVX2-NEXT: vpaddd %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpaddd %ymm2, %ymm0, %ymm0
@@ -1260,14 +1260,14 @@ define <16 x i32> @f16xi32_i64(<16 x i32> %a) {
; AVX2-NEXT: retl
;
; AVX512-LABEL: f16xi32_i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpbroadcastq {{.*#+}} zmm1 = [2.1219957909652723E-314,2.1219957909652723E-314,2.1219957909652723E-314,2.1219957909652723E-314,2.1219957909652723E-314,2.1219957909652723E-314,2.1219957909652723E-314,2.1219957909652723E-314]
; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0
; AVX512-NEXT: retl
;
; AVX-64-LABEL: f16xi32_i64:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX-64-NEXT: vmovddup {{.*#+}} xmm3 = mem[0,0]
; AVX-64-NEXT: vpaddd %xmm3, %xmm2, %xmm2
@@ -1283,7 +1283,7 @@ define <16 x i32> @f16xi32_i64(<16 x i32> %a) {
; AVX-64-NEXT: retq
;
; AVX2-64-LABEL: f16xi32_i64:
-; AVX2-64: # BB#0:
+; AVX2-64: # %bb.0:
; AVX2-64-NEXT: vpbroadcastq {{.*#+}} ymm2 = [4294967296,4294967296,4294967296,4294967296]
; AVX2-64-NEXT: vpaddd %ymm2, %ymm1, %ymm1
; AVX2-64-NEXT: vpaddd %ymm2, %ymm0, %ymm0
@@ -1292,7 +1292,7 @@ define <16 x i32> @f16xi32_i64(<16 x i32> %a) {
; AVX2-64-NEXT: retq
;
; AVX512F-64-LABEL: f16xi32_i64:
-; AVX512F-64: # BB#0:
+; AVX512F-64: # %bb.0:
; AVX512F-64-NEXT: vpbroadcastq {{.*#+}} zmm1 = [4294967296,4294967296,4294967296,4294967296,4294967296,4294967296,4294967296,4294967296]
; AVX512F-64-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; AVX512F-64-NEXT: vpandq %zmm1, %zmm0, %zmm0
@@ -1305,7 +1305,7 @@ define <16 x i32> @f16xi32_i64(<16 x i32> %a) {
define <16 x i32> @f16xi32_i128(<16 x i32> %a) {
; AVX-LABEL: f16xi32_i128:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,2,3]
; AVX-NEXT: vpaddd %xmm3, %xmm2, %xmm2
@@ -1321,7 +1321,7 @@ define <16 x i32> @f16xi32_i128(<16 x i32> %a) {
; AVX-NEXT: retl
;
; AVX2-LABEL: f16xi32_i128:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [0,1,2,3,0,1,2,3]
; AVX2-NEXT: # ymm2 = mem[0,1,0,1]
; AVX2-NEXT: vpaddd %ymm2, %ymm1, %ymm1
@@ -1331,7 +1331,7 @@ define <16 x i32> @f16xi32_i128(<16 x i32> %a) {
; AVX2-NEXT: retl
;
; AVX512-LABEL: f16xi32_i128:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
@@ -1339,7 +1339,7 @@ define <16 x i32> @f16xi32_i128(<16 x i32> %a) {
; AVX512-NEXT: retl
;
; AVX-64-LABEL: f16xi32_i128:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX-64-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,2,3]
; AVX-64-NEXT: vpaddd %xmm3, %xmm2, %xmm2
@@ -1355,7 +1355,7 @@ define <16 x i32> @f16xi32_i128(<16 x i32> %a) {
; AVX-64-NEXT: retq
;
; AVX2-64-LABEL: f16xi32_i128:
-; AVX2-64: # BB#0:
+; AVX2-64: # %bb.0:
; AVX2-64-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [0,1,2,3,0,1,2,3]
; AVX2-64-NEXT: # ymm2 = mem[0,1,0,1]
; AVX2-64-NEXT: vpaddd %ymm2, %ymm1, %ymm1
@@ -1365,7 +1365,7 @@ define <16 x i32> @f16xi32_i128(<16 x i32> %a) {
; AVX2-64-NEXT: retq
;
; AVX512F-64-LABEL: f16xi32_i128:
-; AVX512F-64: # BB#0:
+; AVX512F-64: # %bb.0:
; AVX512F-64-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512F-64-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512F-64-NEXT: vpaddd %zmm1, %zmm0, %zmm0
@@ -1379,7 +1379,7 @@ define <16 x i32> @f16xi32_i128(<16 x i32> %a) {
define <4 x i64> @f4xi64_i128(<4 x i64> %a) {
; AVX-LABEL: f4xi64_i128:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [0,0,1,0,0,0,1,0]
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm3
@@ -1390,14 +1390,14 @@ define <4 x i64> @f4xi64_i128(<4 x i64> %a) {
; AVX-NEXT: retl
;
; ALL32-LABEL: f4xi64_i128:
-; ALL32: # BB#0:
+; ALL32: # %bb.0:
; ALL32-NEXT: vmovdqa {{.*#+}} ymm1 = [0,0,1,0,0,0,1,0]
; ALL32-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; ALL32-NEXT: vpand %ymm1, %ymm0, %ymm0
; ALL32-NEXT: retl
;
; AVX-64-LABEL: f4xi64_i128:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-64-NEXT: movl $1, %eax
; AVX-64-NEXT: vmovq %rax, %xmm2
@@ -1409,7 +1409,7 @@ define <4 x i64> @f4xi64_i128(<4 x i64> %a) {
; AVX-64-NEXT: retq
;
; ALL64-LABEL: f4xi64_i128:
-; ALL64: # BB#0:
+; ALL64: # %bb.0:
; ALL64-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [0,1,0,1]
; ALL64-NEXT: # ymm1 = mem[0,1,0,1]
; ALL64-NEXT: vpaddq %ymm1, %ymm0, %ymm0
@@ -1423,7 +1423,7 @@ define <4 x i64> @f4xi64_i128(<4 x i64> %a) {
define <8 x i64> @f8xi64_i128(<8 x i64> %a) {
; AVX-LABEL: f8xi64_i128:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa {{.*#+}} ymm2 = [0,0,1,0,0,0,1,0]
; AVX-NEXT: vextractf128 $1, %ymm2, %xmm3
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm4
@@ -1439,7 +1439,7 @@ define <8 x i64> @f8xi64_i128(<8 x i64> %a) {
; AVX-NEXT: retl
;
; AVX2-LABEL: f8xi64_i128:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,0,1,0,0,0,1,0]
; AVX2-NEXT: vpaddq %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0
@@ -1448,14 +1448,14 @@ define <8 x i64> @f8xi64_i128(<8 x i64> %a) {
; AVX2-NEXT: retl
;
; AVX512-LABEL: f8xi64_i128:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0]
; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0
; AVX512-NEXT: retl
;
; AVX-64-LABEL: f8xi64_i128:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX-64-NEXT: movl $1, %eax
; AVX-64-NEXT: vmovq %rax, %xmm3
@@ -1474,7 +1474,7 @@ define <8 x i64> @f8xi64_i128(<8 x i64> %a) {
; AVX-64-NEXT: retq
;
; AVX2-64-LABEL: f8xi64_i128:
-; AVX2-64: # BB#0:
+; AVX2-64: # %bb.0:
; AVX2-64-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [0,1,0,1]
; AVX2-64-NEXT: # ymm2 = mem[0,1,0,1]
; AVX2-64-NEXT: vpaddq %ymm2, %ymm1, %ymm1
@@ -1484,7 +1484,7 @@ define <8 x i64> @f8xi64_i128(<8 x i64> %a) {
; AVX2-64-NEXT: retq
;
; AVX512F-64-LABEL: f8xi64_i128:
-; AVX512F-64: # BB#0:
+; AVX512F-64: # %bb.0:
; AVX512F-64-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [0,1,0,1,0,1,0,1]
; AVX512F-64-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512F-64-NEXT: vpaddq %zmm1, %zmm0, %zmm0
@@ -1498,7 +1498,7 @@ define <8 x i64> @f8xi64_i128(<8 x i64> %a) {
define <8 x i64> @f8xi64_i256(<8 x i64> %a) {
; AVX-LABEL: f8xi64_i256:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa {{.*#+}} ymm2 = [0,0,1,0,2,0,3,0]
; AVX-NEXT: vextractf128 $1, %ymm2, %xmm3
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm4
@@ -1514,7 +1514,7 @@ define <8 x i64> @f8xi64_i256(<8 x i64> %a) {
; AVX-NEXT: retl
;
; AVX2-LABEL: f8xi64_i256:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,0,1,0,2,0,3,0]
; AVX2-NEXT: vpaddq %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0
@@ -1523,14 +1523,14 @@ define <8 x i64> @f8xi64_i256(<8 x i64> %a) {
; AVX2-NEXT: retl
;
; AVX512-LABEL: f8xi64_i256:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,0,1,0,2,0,3,0,0,0,1,0,2,0,3,0]
; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0
; AVX512-NEXT: retl
;
; AVX-64-LABEL: f8xi64_i256:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX-64-NEXT: vmovdqa {{.*#+}} xmm3 = [2,3]
; AVX-64-NEXT: vpaddq %xmm3, %xmm2, %xmm2
@@ -1549,7 +1549,7 @@ define <8 x i64> @f8xi64_i256(<8 x i64> %a) {
; AVX-64-NEXT: retq
;
; AVX2-64-LABEL: f8xi64_i256:
-; AVX2-64: # BB#0:
+; AVX2-64: # %bb.0:
; AVX2-64-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3]
; AVX2-64-NEXT: vpaddq %ymm2, %ymm1, %ymm1
; AVX2-64-NEXT: vpaddq %ymm2, %ymm0, %ymm0
@@ -1558,7 +1558,7 @@ define <8 x i64> @f8xi64_i256(<8 x i64> %a) {
; AVX2-64-NEXT: retq
;
; AVX512F-64-LABEL: f8xi64_i256:
-; AVX512F-64: # BB#0:
+; AVX512F-64: # %bb.0:
; AVX512F-64-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [0,1,2,3,0,1,2,3]
; AVX512F-64-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
; AVX512F-64-NEXT: vpaddq %zmm1, %zmm0, %zmm0
@@ -1572,28 +1572,28 @@ define <8 x i64> @f8xi64_i256(<8 x i64> %a) {
define <4 x float> @f4xf32_f64(<4 x float> %a) {
; AVX-LABEL: f4xf32_f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX-NEXT: vdivps %xmm0, %xmm1, %xmm0
; AVX-NEXT: retl
;
; ALL32-LABEL: f4xf32_f64:
-; ALL32: # BB#0:
+; ALL32: # %bb.0:
; ALL32-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
; ALL32-NEXT: vaddps %xmm1, %xmm0, %xmm0
; ALL32-NEXT: vdivps %xmm0, %xmm1, %xmm0
; ALL32-NEXT: retl
;
; AVX-64-LABEL: f4xf32_f64:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
; AVX-64-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX-64-NEXT: vdivps %xmm0, %xmm1, %xmm0
; AVX-64-NEXT: retq
;
; ALL64-LABEL: f4xf32_f64:
-; ALL64: # BB#0:
+; ALL64: # %bb.0:
; ALL64-NEXT: vpbroadcastq {{.*#+}} xmm1 = [4575657222482165760,4575657222482165760]
; ALL64-NEXT: vaddps %xmm1, %xmm0, %xmm0
; ALL64-NEXT: vdivps %xmm0, %xmm1, %xmm0
@@ -1606,28 +1606,28 @@ define <4 x float> @f4xf32_f64(<4 x float> %a) {
define <8 x float> @f8xf32_f64(<8 x float> %a) {
; AVX-LABEL: f8xf32_f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vbroadcastsd {{.*#+}} ymm1 = [0.0078125018626451492,0.0078125018626451492,0.0078125018626451492,0.0078125018626451492]
; AVX-NEXT: vaddps %ymm1, %ymm0, %ymm0
; AVX-NEXT: vdivps %ymm0, %ymm1, %ymm0
; AVX-NEXT: retl
;
; ALL32-LABEL: f8xf32_f64:
-; ALL32: # BB#0:
+; ALL32: # %bb.0:
; ALL32-NEXT: vbroadcastsd {{.*#+}} ymm1 = [0.0078125018626451492,0.0078125018626451492,0.0078125018626451492,0.0078125018626451492]
; ALL32-NEXT: vaddps %ymm1, %ymm0, %ymm0
; ALL32-NEXT: vdivps %ymm0, %ymm1, %ymm0
; ALL32-NEXT: retl
;
; AVX-64-LABEL: f8xf32_f64:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vbroadcastsd {{.*#+}} ymm1 = [0.0078125018626451492,0.0078125018626451492,0.0078125018626451492,0.0078125018626451492]
; AVX-64-NEXT: vaddps %ymm1, %ymm0, %ymm0
; AVX-64-NEXT: vdivps %ymm0, %ymm1, %ymm0
; AVX-64-NEXT: retq
;
; ALL64-LABEL: f8xf32_f64:
-; ALL64: # BB#0:
+; ALL64: # %bb.0:
; ALL64-NEXT: vbroadcastsd {{.*#+}} ymm1 = [4575657222482165760,4575657222482165760,4575657222482165760,4575657222482165760]
; ALL64-NEXT: vaddps %ymm1, %ymm0, %ymm0
; ALL64-NEXT: vdivps %ymm0, %ymm1, %ymm0
@@ -1640,7 +1640,7 @@ define <8 x float> @f8xf32_f64(<8 x float> %a) {
define <8 x float> @f8xf32_f128(<8 x float> %a) {
; AVX-LABEL: f8xf32_f128:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
; AVX-NEXT: # ymm1 = mem[0,1,0,1]
; AVX-NEXT: vaddps %ymm1, %ymm0, %ymm0
@@ -1648,7 +1648,7 @@ define <8 x float> @f8xf32_f128(<8 x float> %a) {
; AVX-NEXT: retl
;
; ALL32-LABEL: f8xf32_f128:
-; ALL32: # BB#0:
+; ALL32: # %bb.0:
; ALL32-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
; ALL32-NEXT: # ymm1 = mem[0,1,0,1]
; ALL32-NEXT: vaddps %ymm1, %ymm0, %ymm0
@@ -1656,7 +1656,7 @@ define <8 x float> @f8xf32_f128(<8 x float> %a) {
; ALL32-NEXT: retl
;
; AVX-64-LABEL: f8xf32_f128:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
; AVX-64-NEXT: # ymm1 = mem[0,1,0,1]
; AVX-64-NEXT: vaddps %ymm1, %ymm0, %ymm0
@@ -1664,7 +1664,7 @@ define <8 x float> @f8xf32_f128(<8 x float> %a) {
; AVX-64-NEXT: retq
;
; ALL64-LABEL: f8xf32_f128:
-; ALL64: # BB#0:
+; ALL64: # %bb.0:
; ALL64-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
; ALL64-NEXT: # ymm1 = mem[0,1,0,1]
; ALL64-NEXT: vaddps %ymm1, %ymm0, %ymm0
@@ -1678,7 +1678,7 @@ define <8 x float> @f8xf32_f128(<8 x float> %a) {
define <16 x float> @f16xf32_f64(<16 x float> %a) {
; AVX-LABEL: f16xf32_f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vbroadcastsd {{.*#+}} ymm2 = [0.0078125018626451492,0.0078125018626451492,0.0078125018626451492,0.0078125018626451492]
; AVX-NEXT: vaddps %ymm2, %ymm1, %ymm1
; AVX-NEXT: vaddps %ymm2, %ymm0, %ymm0
@@ -1687,7 +1687,7 @@ define <16 x float> @f16xf32_f64(<16 x float> %a) {
; AVX-NEXT: retl
;
; AVX2-LABEL: f16xf32_f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm2 = [0.0078125018626451492,0.0078125018626451492,0.0078125018626451492,0.0078125018626451492]
; AVX2-NEXT: vaddps %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vaddps %ymm2, %ymm0, %ymm0
@@ -1696,14 +1696,14 @@ define <16 x float> @f16xf32_f64(<16 x float> %a) {
; AVX2-NEXT: retl
;
; AVX512-LABEL: f16xf32_f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vbroadcastsd {{.*#+}} zmm1 = [0.0078125018626451492,0.0078125018626451492,0.0078125018626451492,0.0078125018626451492,0.0078125018626451492,0.0078125018626451492,0.0078125018626451492,0.0078125018626451492]
; AVX512-NEXT: vaddps %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vdivps %zmm0, %zmm1, %zmm0
; AVX512-NEXT: retl
;
; AVX-64-LABEL: f16xf32_f64:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vbroadcastsd {{.*#+}} ymm2 = [0.0078125018626451492,0.0078125018626451492,0.0078125018626451492,0.0078125018626451492]
; AVX-64-NEXT: vaddps %ymm2, %ymm1, %ymm1
; AVX-64-NEXT: vaddps %ymm2, %ymm0, %ymm0
@@ -1712,7 +1712,7 @@ define <16 x float> @f16xf32_f64(<16 x float> %a) {
; AVX-64-NEXT: retq
;
; AVX2-64-LABEL: f16xf32_f64:
-; AVX2-64: # BB#0:
+; AVX2-64: # %bb.0:
; AVX2-64-NEXT: vbroadcastsd {{.*#+}} ymm2 = [4575657222482165760,4575657222482165760,4575657222482165760,4575657222482165760]
; AVX2-64-NEXT: vaddps %ymm2, %ymm1, %ymm1
; AVX2-64-NEXT: vaddps %ymm2, %ymm0, %ymm0
@@ -1721,7 +1721,7 @@ define <16 x float> @f16xf32_f64(<16 x float> %a) {
; AVX2-64-NEXT: retq
;
; AVX512F-64-LABEL: f16xf32_f64:
-; AVX512F-64: # BB#0:
+; AVX512F-64: # %bb.0:
; AVX512F-64-NEXT: vbroadcastsd {{.*#+}} zmm1 = [4575657222482165760,4575657222482165760,4575657222482165760,4575657222482165760,4575657222482165760,4575657222482165760,4575657222482165760,4575657222482165760]
; AVX512F-64-NEXT: vaddps %zmm1, %zmm0, %zmm0
; AVX512F-64-NEXT: vdivps %zmm0, %zmm1, %zmm0
@@ -1734,7 +1734,7 @@ define <16 x float> @f16xf32_f64(<16 x float> %a) {
define <16 x float> @f16xf32_f128(<16 x float> %a) {
; AVX-LABEL: f16xf32_f128:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vbroadcastf128 {{.*#+}} ymm2 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
; AVX-NEXT: # ymm2 = mem[0,1,0,1]
; AVX-NEXT: vaddps %ymm2, %ymm1, %ymm1
@@ -1744,7 +1744,7 @@ define <16 x float> @f16xf32_f128(<16 x float> %a) {
; AVX-NEXT: retl
;
; AVX2-LABEL: f16xf32_f128:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vbroadcastf128 {{.*#+}} ymm2 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
; AVX2-NEXT: # ymm2 = mem[0,1,0,1]
; AVX2-NEXT: vaddps %ymm2, %ymm1, %ymm1
@@ -1754,7 +1754,7 @@ define <16 x float> @f16xf32_f128(<16 x float> %a) {
; AVX2-NEXT: retl
;
; AVX512-LABEL: f16xf32_f128:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vbroadcastf32x4 {{.*#+}} zmm1 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
; AVX512-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512-NEXT: vaddps %zmm1, %zmm0, %zmm0
@@ -1762,7 +1762,7 @@ define <16 x float> @f16xf32_f128(<16 x float> %a) {
; AVX512-NEXT: retl
;
; AVX-64-LABEL: f16xf32_f128:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vbroadcastf128 {{.*#+}} ymm2 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
; AVX-64-NEXT: # ymm2 = mem[0,1,0,1]
; AVX-64-NEXT: vaddps %ymm2, %ymm1, %ymm1
@@ -1772,7 +1772,7 @@ define <16 x float> @f16xf32_f128(<16 x float> %a) {
; AVX-64-NEXT: retq
;
; AVX2-64-LABEL: f16xf32_f128:
-; AVX2-64: # BB#0:
+; AVX2-64: # %bb.0:
; AVX2-64-NEXT: vbroadcastf128 {{.*#+}} ymm2 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
; AVX2-64-NEXT: # ymm2 = mem[0,1,0,1]
; AVX2-64-NEXT: vaddps %ymm2, %ymm1, %ymm1
@@ -1782,7 +1782,7 @@ define <16 x float> @f16xf32_f128(<16 x float> %a) {
; AVX2-64-NEXT: retq
;
; AVX512F-64-LABEL: f16xf32_f128:
-; AVX512F-64: # BB#0:
+; AVX512F-64: # %bb.0:
; AVX512F-64-NEXT: vbroadcastf32x4 {{.*#+}} zmm1 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
; AVX512F-64-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512F-64-NEXT: vaddps %zmm1, %zmm0, %zmm0
@@ -1796,7 +1796,7 @@ define <16 x float> @f16xf32_f128(<16 x float> %a) {
define <16 x float> @f16xf32_f256(<16 x float> %a) {
; AVX-LABEL: f16xf32_f256:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm2 = [8.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,5.000000e+00,6.000000e+00,7.000000e+00]
; AVX-NEXT: vaddps %ymm2, %ymm1, %ymm1
; AVX-NEXT: vaddps %ymm2, %ymm0, %ymm0
@@ -1805,7 +1805,7 @@ define <16 x float> @f16xf32_f256(<16 x float> %a) {
; AVX-NEXT: retl
;
; AVX2-LABEL: f16xf32_f256:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovaps {{.*#+}} ymm2 = [8.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,5.000000e+00,6.000000e+00,7.000000e+00]
; AVX2-NEXT: vaddps %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vaddps %ymm2, %ymm0, %ymm0
@@ -1814,7 +1814,7 @@ define <16 x float> @f16xf32_f256(<16 x float> %a) {
; AVX2-NEXT: retl
;
; AVX512-LABEL: f16xf32_f256:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vbroadcastf64x4 {{.*#+}} zmm1 = [8.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,5.000000e+00,6.000000e+00,7.000000e+00,8.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,5.000000e+00,6.000000e+00,7.000000e+00]
; AVX512-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
; AVX512-NEXT: vaddps %zmm1, %zmm0, %zmm0
@@ -1822,7 +1822,7 @@ define <16 x float> @f16xf32_f256(<16 x float> %a) {
; AVX512-NEXT: retl
;
; AVX-64-LABEL: f16xf32_f256:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vmovaps {{.*#+}} ymm2 = [8.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,5.000000e+00,6.000000e+00,7.000000e+00]
; AVX-64-NEXT: vaddps %ymm2, %ymm1, %ymm1
; AVX-64-NEXT: vaddps %ymm2, %ymm0, %ymm0
@@ -1831,7 +1831,7 @@ define <16 x float> @f16xf32_f256(<16 x float> %a) {
; AVX-64-NEXT: retq
;
; AVX2-64-LABEL: f16xf32_f256:
-; AVX2-64: # BB#0:
+; AVX2-64: # %bb.0:
; AVX2-64-NEXT: vmovaps {{.*#+}} ymm2 = [8.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,5.000000e+00,6.000000e+00,7.000000e+00]
; AVX2-64-NEXT: vaddps %ymm2, %ymm1, %ymm1
; AVX2-64-NEXT: vaddps %ymm2, %ymm0, %ymm0
@@ -1840,7 +1840,7 @@ define <16 x float> @f16xf32_f256(<16 x float> %a) {
; AVX2-64-NEXT: retq
;
; AVX512F-64-LABEL: f16xf32_f256:
-; AVX512F-64: # BB#0:
+; AVX512F-64: # %bb.0:
; AVX512F-64-NEXT: vbroadcastf64x4 {{.*#+}} zmm1 = [8.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,5.000000e+00,6.000000e+00,7.000000e+00,8.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,5.000000e+00,6.000000e+00,7.000000e+00]
; AVX512F-64-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
; AVX512F-64-NEXT: vaddps %zmm1, %zmm0, %zmm0
@@ -1854,7 +1854,7 @@ define <16 x float> @f16xf32_f256(<16 x float> %a) {
define <4 x double> @f4xf64_f128(<4 x double> %a) {
; AVX-LABEL: f4xf64_f128:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [2.000000e+00,1.000000e+00,2.000000e+00,1.000000e+00]
; AVX-NEXT: # ymm1 = mem[0,1,0,1]
; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0
@@ -1862,7 +1862,7 @@ define <4 x double> @f4xf64_f128(<4 x double> %a) {
; AVX-NEXT: retl
;
; ALL32-LABEL: f4xf64_f128:
-; ALL32: # BB#0:
+; ALL32: # %bb.0:
; ALL32-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [2.000000e+00,1.000000e+00,2.000000e+00,1.000000e+00]
; ALL32-NEXT: # ymm1 = mem[0,1,0,1]
; ALL32-NEXT: vaddpd %ymm1, %ymm0, %ymm0
@@ -1870,7 +1870,7 @@ define <4 x double> @f4xf64_f128(<4 x double> %a) {
; ALL32-NEXT: retl
;
; AVX-64-LABEL: f4xf64_f128:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [2.000000e+00,1.000000e+00,2.000000e+00,1.000000e+00]
; AVX-64-NEXT: # ymm1 = mem[0,1,0,1]
; AVX-64-NEXT: vaddpd %ymm1, %ymm0, %ymm0
@@ -1878,7 +1878,7 @@ define <4 x double> @f4xf64_f128(<4 x double> %a) {
; AVX-64-NEXT: retq
;
; ALL64-LABEL: f4xf64_f128:
-; ALL64: # BB#0:
+; ALL64: # %bb.0:
; ALL64-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [2.000000e+00,1.000000e+00,2.000000e+00,1.000000e+00]
; ALL64-NEXT: # ymm1 = mem[0,1,0,1]
; ALL64-NEXT: vaddpd %ymm1, %ymm0, %ymm0
@@ -1892,7 +1892,7 @@ define <4 x double> @f4xf64_f128(<4 x double> %a) {
define <8 x double> @f8xf64_f128(<8 x double> %a) {
; AVX-LABEL: f8xf64_f128:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vbroadcastf128 {{.*#+}} ymm2 = [2.000000e+00,1.000000e+00,2.000000e+00,1.000000e+00]
; AVX-NEXT: # ymm2 = mem[0,1,0,1]
; AVX-NEXT: vaddpd %ymm2, %ymm1, %ymm1
@@ -1902,7 +1902,7 @@ define <8 x double> @f8xf64_f128(<8 x double> %a) {
; AVX-NEXT: retl
;
; AVX2-LABEL: f8xf64_f128:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vbroadcastf128 {{.*#+}} ymm2 = [2.000000e+00,1.000000e+00,2.000000e+00,1.000000e+00]
; AVX2-NEXT: # ymm2 = mem[0,1,0,1]
; AVX2-NEXT: vaddpd %ymm2, %ymm1, %ymm1
@@ -1912,7 +1912,7 @@ define <8 x double> @f8xf64_f128(<8 x double> %a) {
; AVX2-NEXT: retl
;
; AVX512-LABEL: f8xf64_f128:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vbroadcastf32x4 {{.*#+}} zmm1 = [2.000000e+00,1.000000e+00,2.000000e+00,1.000000e+00,2.000000e+00,1.000000e+00,2.000000e+00,1.000000e+00]
; AVX512-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512-NEXT: vaddpd %zmm1, %zmm0, %zmm0
@@ -1920,7 +1920,7 @@ define <8 x double> @f8xf64_f128(<8 x double> %a) {
; AVX512-NEXT: retl
;
; AVX-64-LABEL: f8xf64_f128:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vbroadcastf128 {{.*#+}} ymm2 = [2.000000e+00,1.000000e+00,2.000000e+00,1.000000e+00]
; AVX-64-NEXT: # ymm2 = mem[0,1,0,1]
; AVX-64-NEXT: vaddpd %ymm2, %ymm1, %ymm1
@@ -1930,7 +1930,7 @@ define <8 x double> @f8xf64_f128(<8 x double> %a) {
; AVX-64-NEXT: retq
;
; AVX2-64-LABEL: f8xf64_f128:
-; AVX2-64: # BB#0:
+; AVX2-64: # %bb.0:
; AVX2-64-NEXT: vbroadcastf128 {{.*#+}} ymm2 = [2.000000e+00,1.000000e+00,2.000000e+00,1.000000e+00]
; AVX2-64-NEXT: # ymm2 = mem[0,1,0,1]
; AVX2-64-NEXT: vaddpd %ymm2, %ymm1, %ymm1
@@ -1940,7 +1940,7 @@ define <8 x double> @f8xf64_f128(<8 x double> %a) {
; AVX2-64-NEXT: retq
;
; AVX512F-64-LABEL: f8xf64_f128:
-; AVX512F-64: # BB#0:
+; AVX512F-64: # %bb.0:
; AVX512F-64-NEXT: vbroadcastf32x4 {{.*#+}} zmm1 = [2.000000e+00,1.000000e+00,2.000000e+00,1.000000e+00,2.000000e+00,1.000000e+00,2.000000e+00,1.000000e+00]
; AVX512F-64-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512F-64-NEXT: vaddpd %zmm1, %zmm0, %zmm0
@@ -1961,7 +1961,7 @@ define <8 x double> @f8xf64_f128(<8 x double> %a) {
define <8 x double> @f8xf64_f256(<8 x double> %a) {
; AVX-LABEL: f8xf64_f256:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovapd {{.*#+}} ymm2 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
; AVX-NEXT: vaddpd %ymm2, %ymm1, %ymm1
; AVX-NEXT: vaddpd %ymm2, %ymm0, %ymm0
@@ -1970,7 +1970,7 @@ define <8 x double> @f8xf64_f256(<8 x double> %a) {
; AVX-NEXT: retl
;
; AVX2-LABEL: f8xf64_f256:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovapd {{.*#+}} ymm2 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
; AVX2-NEXT: vaddpd %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vaddpd %ymm2, %ymm0, %ymm0
@@ -1979,7 +1979,7 @@ define <8 x double> @f8xf64_f256(<8 x double> %a) {
; AVX2-NEXT: retl
;
; AVX512-LABEL: f8xf64_f256:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vbroadcastf64x4 {{.*#+}} zmm1 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
; AVX512-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
; AVX512-NEXT: vaddpd %zmm1, %zmm0, %zmm0
@@ -1987,7 +1987,7 @@ define <8 x double> @f8xf64_f256(<8 x double> %a) {
; AVX512-NEXT: retl
;
; AVX-64-LABEL: f8xf64_f256:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vmovapd {{.*#+}} ymm2 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
; AVX-64-NEXT: vaddpd %ymm2, %ymm1, %ymm1
; AVX-64-NEXT: vaddpd %ymm2, %ymm0, %ymm0
@@ -1996,7 +1996,7 @@ define <8 x double> @f8xf64_f256(<8 x double> %a) {
; AVX-64-NEXT: retq
;
; AVX2-64-LABEL: f8xf64_f256:
-; AVX2-64: # BB#0:
+; AVX2-64: # %bb.0:
; AVX2-64-NEXT: vmovapd {{.*#+}} ymm2 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
; AVX2-64-NEXT: vaddpd %ymm2, %ymm1, %ymm1
; AVX2-64-NEXT: vaddpd %ymm2, %ymm0, %ymm0
@@ -2005,7 +2005,7 @@ define <8 x double> @f8xf64_f256(<8 x double> %a) {
; AVX2-64-NEXT: retq
;
; AVX512F-64-LABEL: f8xf64_f256:
-; AVX512F-64: # BB#0:
+; AVX512F-64: # %bb.0:
; AVX512F-64-NEXT: vbroadcastf64x4 {{.*#+}} zmm1 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
; AVX512F-64-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
; AVX512F-64-NEXT: vaddpd %zmm1, %zmm0, %zmm0
@@ -2020,28 +2020,28 @@ define <8 x double> @f8xf64_f256(<8 x double> %a) {
define <8 x i16> @f8xi16_i32_NaN(<8 x i16> %a) {
; AVX-LABEL: f8xi16_i32_NaN:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [NaN,NaN,NaN,NaN]
; AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-NEXT: retl
;
; ALL32-LABEL: f8xi16_i32_NaN:
-; ALL32: # BB#0:
+; ALL32: # %bb.0:
; ALL32-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4290379776,4290379776,4290379776,4290379776]
; ALL32-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; ALL32-NEXT: vpand %xmm1, %xmm0, %xmm0
; ALL32-NEXT: retl
;
; AVX-64-LABEL: f8xi16_i32_NaN:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vbroadcastss {{.*#+}} xmm1 = [NaN,NaN,NaN,NaN]
; AVX-64-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; AVX-64-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-64-NEXT: retq
;
; ALL64-LABEL: f8xi16_i32_NaN:
-; ALL64: # BB#0:
+; ALL64: # %bb.0:
; ALL64-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4290379776,4290379776,4290379776,4290379776]
; ALL64-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; ALL64-NEXT: vpand %xmm1, %xmm0, %xmm0
diff --git a/test/CodeGen/X86/broadcastm-lowering.ll b/test/CodeGen/X86/broadcastm-lowering.ll
index e0e3adcaefb..af1a7bf33fa 100644
--- a/test/CodeGen/X86/broadcastm-lowering.ll
+++ b/test/CodeGen/X86/broadcastm-lowering.ll
@@ -5,7 +5,7 @@
define <2 x i64> @test_mm_epi64(<8 x i16> %a, <8 x i16> %b) {
; AVX512CD-LABEL: test_mm_epi64:
-; AVX512CD: # BB#0: # %entry
+; AVX512CD: # %bb.0: # %entry
; AVX512CD-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
; AVX512CD-NEXT: vpmovsxwq %xmm0, %zmm0
; AVX512CD-NEXT: vpsllq $63, %zmm0, %zmm0
@@ -18,13 +18,13 @@ define <2 x i64> @test_mm_epi64(<8 x i16> %a, <8 x i16> %b) {
; AVX512CD-NEXT: retq
;
; AVX512VLCDBW-LABEL: test_mm_epi64:
-; AVX512VLCDBW: # BB#0: # %entry
+; AVX512VLCDBW: # %bb.0: # %entry
; AVX512VLCDBW-NEXT: vpcmpeqw %xmm1, %xmm0, %k0
; AVX512VLCDBW-NEXT: vpbroadcastmb2q %k0, %xmm0
; AVX512VLCDBW-NEXT: retq
;
; X86-AVX512VLCDBW-LABEL: test_mm_epi64:
-; X86-AVX512VLCDBW: # BB#0: # %entry
+; X86-AVX512VLCDBW: # %bb.0: # %entry
; X86-AVX512VLCDBW-NEXT: vpcmpeqw %xmm1, %xmm0, %k0
; X86-AVX512VLCDBW-NEXT: kmovd %k0, %eax
; X86-AVX512VLCDBW-NEXT: movzbl %al, %eax
@@ -42,7 +42,7 @@ entry:
define <4 x i32> @test_mm_epi32(<16 x i8> %a, <16 x i8> %b) {
; AVX512CD-LABEL: test_mm_epi32:
-; AVX512CD: # BB#0: # %entry
+; AVX512CD: # %bb.0: # %entry
; AVX512CD-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
; AVX512CD-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512CD-NEXT: vpslld $31, %zmm0, %zmm0
@@ -57,13 +57,13 @@ define <4 x i32> @test_mm_epi32(<16 x i8> %a, <16 x i8> %b) {
; AVX512CD-NEXT: retq
;
; AVX512VLCDBW-LABEL: test_mm_epi32:
-; AVX512VLCDBW: # BB#0: # %entry
+; AVX512VLCDBW: # %bb.0: # %entry
; AVX512VLCDBW-NEXT: vpcmpeqb %xmm1, %xmm0, %k0
; AVX512VLCDBW-NEXT: vpbroadcastmw2d %k0, %xmm0
; AVX512VLCDBW-NEXT: retq
;
; X86-AVX512VLCDBW-LABEL: test_mm_epi32:
-; X86-AVX512VLCDBW: # BB#0: # %entry
+; X86-AVX512VLCDBW: # %bb.0: # %entry
; X86-AVX512VLCDBW-NEXT: vpcmpeqb %xmm1, %xmm0, %k0
; X86-AVX512VLCDBW-NEXT: vpbroadcastmw2d %k0, %xmm0
; X86-AVX512VLCDBW-NEXT: retl
@@ -78,19 +78,19 @@ entry:
define <16 x i32> @test_mm512_epi32(<16 x i32> %a, <16 x i32> %b) {
; AVX512CD-LABEL: test_mm512_epi32:
-; AVX512CD: # BB#0: # %entry
+; AVX512CD: # %bb.0: # %entry
; AVX512CD-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; AVX512CD-NEXT: vpbroadcastmw2d %k0, %zmm0
; AVX512CD-NEXT: retq
;
; AVX512VLCDBW-LABEL: test_mm512_epi32:
-; AVX512VLCDBW: # BB#0: # %entry
+; AVX512VLCDBW: # %bb.0: # %entry
; AVX512VLCDBW-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; AVX512VLCDBW-NEXT: vpbroadcastmw2d %k0, %zmm0
; AVX512VLCDBW-NEXT: retq
;
; X86-AVX512VLCDBW-LABEL: test_mm512_epi32:
-; X86-AVX512VLCDBW: # BB#0: # %entry
+; X86-AVX512VLCDBW: # %bb.0: # %entry
; X86-AVX512VLCDBW-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; X86-AVX512VLCDBW-NEXT: vpbroadcastmw2d %k0, %zmm0
; X86-AVX512VLCDBW-NEXT: retl
@@ -105,7 +105,7 @@ entry:
define <8 x i64> @test_mm512_epi64(<8 x i32> %a, <8 x i32> %b) {
; AVX512CD-LABEL: test_mm512_epi64:
-; AVX512CD: # BB#0: # %entry
+; AVX512CD: # %bb.0: # %entry
; AVX512CD-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; AVX512CD-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512CD-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
@@ -113,13 +113,13 @@ define <8 x i64> @test_mm512_epi64(<8 x i32> %a, <8 x i32> %b) {
; AVX512CD-NEXT: retq
;
; AVX512VLCDBW-LABEL: test_mm512_epi64:
-; AVX512VLCDBW: # BB#0: # %entry
+; AVX512VLCDBW: # %bb.0: # %entry
; AVX512VLCDBW-NEXT: vpcmpeqd %ymm1, %ymm0, %k0
; AVX512VLCDBW-NEXT: vpbroadcastmb2q %k0, %zmm0
; AVX512VLCDBW-NEXT: retq
;
; X86-AVX512VLCDBW-LABEL: test_mm512_epi64:
-; X86-AVX512VLCDBW: # BB#0: # %entry
+; X86-AVX512VLCDBW: # %bb.0: # %entry
; X86-AVX512VLCDBW-NEXT: vpcmpeqd %ymm1, %ymm0, %k0
; X86-AVX512VLCDBW-NEXT: kmovd %k0, %eax
; X86-AVX512VLCDBW-NEXT: movzbl %al, %eax
@@ -139,7 +139,7 @@ entry:
define <4 x i64> @test_mm256_epi64(<8 x i32> %a, <8 x i32> %b) {
; AVX512CD-LABEL: test_mm256_epi64:
-; AVX512CD: # BB#0: # %entry
+; AVX512CD: # %bb.0: # %entry
; AVX512CD-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; AVX512CD-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512CD-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
@@ -151,13 +151,13 @@ define <4 x i64> @test_mm256_epi64(<8 x i32> %a, <8 x i32> %b) {
; AVX512CD-NEXT: retq
;
; AVX512VLCDBW-LABEL: test_mm256_epi64:
-; AVX512VLCDBW: # BB#0: # %entry
+; AVX512VLCDBW: # %bb.0: # %entry
; AVX512VLCDBW-NEXT: vpcmpeqd %ymm1, %ymm0, %k0
; AVX512VLCDBW-NEXT: vpbroadcastmb2q %k0, %ymm0
; AVX512VLCDBW-NEXT: retq
;
; X86-AVX512VLCDBW-LABEL: test_mm256_epi64:
-; X86-AVX512VLCDBW: # BB#0: # %entry
+; X86-AVX512VLCDBW: # %bb.0: # %entry
; X86-AVX512VLCDBW-NEXT: vpcmpeqd %ymm1, %ymm0, %k0
; X86-AVX512VLCDBW-NEXT: kmovd %k0, %eax
; X86-AVX512VLCDBW-NEXT: movzbl %al, %eax
@@ -176,7 +176,7 @@ entry:
define <8 x i32> @test_mm256_epi32(<16 x i16> %a, <16 x i16> %b) {
; AVX512CD-LABEL: test_mm256_epi32:
-; AVX512CD: # BB#0: # %entry
+; AVX512CD: # %bb.0: # %entry
; AVX512CD-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
; AVX512CD-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512CD-NEXT: vpslld $31, %zmm0, %zmm0
@@ -191,13 +191,13 @@ define <8 x i32> @test_mm256_epi32(<16 x i16> %a, <16 x i16> %b) {
; AVX512CD-NEXT: retq
;
; AVX512VLCDBW-LABEL: test_mm256_epi32:
-; AVX512VLCDBW: # BB#0: # %entry
+; AVX512VLCDBW: # %bb.0: # %entry
; AVX512VLCDBW-NEXT: vpcmpeqw %ymm1, %ymm0, %k0
; AVX512VLCDBW-NEXT: vpbroadcastmw2d %k0, %ymm0
; AVX512VLCDBW-NEXT: retq
;
; X86-AVX512VLCDBW-LABEL: test_mm256_epi32:
-; X86-AVX512VLCDBW: # BB#0: # %entry
+; X86-AVX512VLCDBW: # %bb.0: # %entry
; X86-AVX512VLCDBW-NEXT: vpcmpeqw %ymm1, %ymm0, %k0
; X86-AVX512VLCDBW-NEXT: vpbroadcastmw2d %k0, %ymm0
; X86-AVX512VLCDBW-NEXT: retl
diff --git a/test/CodeGen/X86/bswap-rotate.ll b/test/CodeGen/X86/bswap-rotate.ll
index f686febe564..62798ba67e2 100644
--- a/test/CodeGen/X86/bswap-rotate.ll
+++ b/test/CodeGen/X86/bswap-rotate.ll
@@ -7,13 +7,13 @@
define i16 @combine_bswap_rotate(i16 %a0) {
; X86-LABEL: combine_bswap_rotate:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: rolw $9, %ax
; X86-NEXT: retl
;
; X64-LABEL: combine_bswap_rotate:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: rolw $9, %di
; X64-NEXT: movl %edi, %eax
; X64-NEXT: retq
diff --git a/test/CodeGen/X86/bswap-vector.ll b/test/CodeGen/X86/bswap-vector.ll
index 7463f5f6d08..e7cb91a42f8 100644
--- a/test/CodeGen/X86/bswap-vector.ll
+++ b/test/CodeGen/X86/bswap-vector.ll
@@ -10,7 +10,7 @@ declare <2 x i64> @llvm.bswap.v2i64(<2 x i64>)
define <8 x i16> @test1(<8 x i16> %v) {
; CHECK-NOSSSE3-LABEL: test1:
-; CHECK-NOSSSE3: # BB#0: # %entry
+; CHECK-NOSSSE3: # %bb.0: # %entry
; CHECK-NOSSSE3-NEXT: pxor %xmm1, %xmm1
; CHECK-NOSSSE3-NEXT: movdqa %xmm0, %xmm2
; CHECK-NOSSSE3-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
@@ -23,17 +23,17 @@ define <8 x i16> @test1(<8 x i16> %v) {
; CHECK-NOSSSE3-NEXT: retq
;
; CHECK-SSSE3-LABEL: test1:
-; CHECK-SSSE3: # BB#0: # %entry
+; CHECK-SSSE3: # %bb.0: # %entry
; CHECK-SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
; CHECK-SSSE3-NEXT: retq
;
; CHECK-AVX-LABEL: test1:
-; CHECK-AVX: # BB#0: # %entry
+; CHECK-AVX: # %bb.0: # %entry
; CHECK-AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
; CHECK-AVX-NEXT: retq
;
; CHECK-WIDE-AVX-LABEL: test1:
-; CHECK-WIDE-AVX: # BB#0: # %entry
+; CHECK-WIDE-AVX: # %bb.0: # %entry
; CHECK-WIDE-AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
; CHECK-WIDE-AVX-NEXT: retq
entry:
@@ -43,7 +43,7 @@ entry:
define <4 x i32> @test2(<4 x i32> %v) {
; CHECK-NOSSSE3-LABEL: test2:
-; CHECK-NOSSSE3: # BB#0: # %entry
+; CHECK-NOSSSE3: # %bb.0: # %entry
; CHECK-NOSSSE3-NEXT: pxor %xmm1, %xmm1
; CHECK-NOSSSE3-NEXT: movdqa %xmm0, %xmm2
; CHECK-NOSSSE3-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
@@ -56,17 +56,17 @@ define <4 x i32> @test2(<4 x i32> %v) {
; CHECK-NOSSSE3-NEXT: retq
;
; CHECK-SSSE3-LABEL: test2:
-; CHECK-SSSE3: # BB#0: # %entry
+; CHECK-SSSE3: # %bb.0: # %entry
; CHECK-SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
; CHECK-SSSE3-NEXT: retq
;
; CHECK-AVX-LABEL: test2:
-; CHECK-AVX: # BB#0: # %entry
+; CHECK-AVX: # %bb.0: # %entry
; CHECK-AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
; CHECK-AVX-NEXT: retq
;
; CHECK-WIDE-AVX-LABEL: test2:
-; CHECK-WIDE-AVX: # BB#0: # %entry
+; CHECK-WIDE-AVX: # %bb.0: # %entry
; CHECK-WIDE-AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
; CHECK-WIDE-AVX-NEXT: retq
entry:
@@ -76,7 +76,7 @@ entry:
define <2 x i64> @test3(<2 x i64> %v) {
; CHECK-NOSSSE3-LABEL: test3:
-; CHECK-NOSSSE3: # BB#0: # %entry
+; CHECK-NOSSSE3: # %bb.0: # %entry
; CHECK-NOSSSE3-NEXT: pxor %xmm1, %xmm1
; CHECK-NOSSSE3-NEXT: movdqa %xmm0, %xmm2
; CHECK-NOSSSE3-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
@@ -91,17 +91,17 @@ define <2 x i64> @test3(<2 x i64> %v) {
; CHECK-NOSSSE3-NEXT: retq
;
; CHECK-SSSE3-LABEL: test3:
-; CHECK-SSSE3: # BB#0: # %entry
+; CHECK-SSSE3: # %bb.0: # %entry
; CHECK-SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8]
; CHECK-SSSE3-NEXT: retq
;
; CHECK-AVX-LABEL: test3:
-; CHECK-AVX: # BB#0: # %entry
+; CHECK-AVX: # %bb.0: # %entry
; CHECK-AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8]
; CHECK-AVX-NEXT: retq
;
; CHECK-WIDE-AVX-LABEL: test3:
-; CHECK-WIDE-AVX: # BB#0: # %entry
+; CHECK-WIDE-AVX: # %bb.0: # %entry
; CHECK-WIDE-AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8]
; CHECK-WIDE-AVX-NEXT: retq
entry:
@@ -115,7 +115,7 @@ declare <4 x i64> @llvm.bswap.v4i64(<4 x i64>)
define <16 x i16> @test4(<16 x i16> %v) {
; CHECK-NOSSSE3-LABEL: test4:
-; CHECK-NOSSSE3: # BB#0: # %entry
+; CHECK-NOSSSE3: # %bb.0: # %entry
; CHECK-NOSSSE3-NEXT: pxor %xmm2, %xmm2
; CHECK-NOSSSE3-NEXT: movdqa %xmm0, %xmm3
; CHECK-NOSSSE3-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
@@ -136,19 +136,19 @@ define <16 x i16> @test4(<16 x i16> %v) {
; CHECK-NOSSSE3-NEXT: retq
;
; CHECK-SSSE3-LABEL: test4:
-; CHECK-SSSE3: # BB#0: # %entry
+; CHECK-SSSE3: # %bb.0: # %entry
; CHECK-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
; CHECK-SSSE3-NEXT: pshufb %xmm2, %xmm0
; CHECK-SSSE3-NEXT: pshufb %xmm2, %xmm1
; CHECK-SSSE3-NEXT: retq
;
; CHECK-AVX-LABEL: test4:
-; CHECK-AVX: # BB#0: # %entry
+; CHECK-AVX: # %bb.0: # %entry
; CHECK-AVX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14,17,16,19,18,21,20,23,22,25,24,27,26,29,28,31,30]
; CHECK-AVX-NEXT: retq
;
; CHECK-WIDE-AVX-LABEL: test4:
-; CHECK-WIDE-AVX: # BB#0: # %entry
+; CHECK-WIDE-AVX: # %bb.0: # %entry
; CHECK-WIDE-AVX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14,17,16,19,18,21,20,23,22,25,24,27,26,29,28,31,30]
; CHECK-WIDE-AVX-NEXT: retq
entry:
@@ -158,7 +158,7 @@ entry:
define <8 x i32> @test5(<8 x i32> %v) {
; CHECK-NOSSSE3-LABEL: test5:
-; CHECK-NOSSSE3: # BB#0: # %entry
+; CHECK-NOSSSE3: # %bb.0: # %entry
; CHECK-NOSSSE3-NEXT: pxor %xmm2, %xmm2
; CHECK-NOSSSE3-NEXT: movdqa %xmm0, %xmm3
; CHECK-NOSSSE3-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
@@ -179,19 +179,19 @@ define <8 x i32> @test5(<8 x i32> %v) {
; CHECK-NOSSSE3-NEXT: retq
;
; CHECK-SSSE3-LABEL: test5:
-; CHECK-SSSE3: # BB#0: # %entry
+; CHECK-SSSE3: # %bb.0: # %entry
; CHECK-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
; CHECK-SSSE3-NEXT: pshufb %xmm2, %xmm0
; CHECK-SSSE3-NEXT: pshufb %xmm2, %xmm1
; CHECK-SSSE3-NEXT: retq
;
; CHECK-AVX-LABEL: test5:
-; CHECK-AVX: # BB#0: # %entry
+; CHECK-AVX: # %bb.0: # %entry
; CHECK-AVX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12,19,18,17,16,23,22,21,20,27,26,25,24,31,30,29,28]
; CHECK-AVX-NEXT: retq
;
; CHECK-WIDE-AVX-LABEL: test5:
-; CHECK-WIDE-AVX: # BB#0: # %entry
+; CHECK-WIDE-AVX: # %bb.0: # %entry
; CHECK-WIDE-AVX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12,19,18,17,16,23,22,21,20,27,26,25,24,31,30,29,28]
; CHECK-WIDE-AVX-NEXT: retq
entry:
@@ -201,7 +201,7 @@ entry:
define <4 x i64> @test6(<4 x i64> %v) {
; CHECK-NOSSSE3-LABEL: test6:
-; CHECK-NOSSSE3: # BB#0: # %entry
+; CHECK-NOSSSE3: # %bb.0: # %entry
; CHECK-NOSSSE3-NEXT: pxor %xmm2, %xmm2
; CHECK-NOSSSE3-NEXT: movdqa %xmm0, %xmm3
; CHECK-NOSSSE3-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
@@ -226,19 +226,19 @@ define <4 x i64> @test6(<4 x i64> %v) {
; CHECK-NOSSSE3-NEXT: retq
;
; CHECK-SSSE3-LABEL: test6:
-; CHECK-SSSE3: # BB#0: # %entry
+; CHECK-SSSE3: # %bb.0: # %entry
; CHECK-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8]
; CHECK-SSSE3-NEXT: pshufb %xmm2, %xmm0
; CHECK-SSSE3-NEXT: pshufb %xmm2, %xmm1
; CHECK-SSSE3-NEXT: retq
;
; CHECK-AVX-LABEL: test6:
-; CHECK-AVX: # BB#0: # %entry
+; CHECK-AVX: # %bb.0: # %entry
; CHECK-AVX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,23,22,21,20,19,18,17,16,31,30,29,28,27,26,25,24]
; CHECK-AVX-NEXT: retq
;
; CHECK-WIDE-AVX-LABEL: test6:
-; CHECK-WIDE-AVX: # BB#0: # %entry
+; CHECK-WIDE-AVX: # %bb.0: # %entry
; CHECK-WIDE-AVX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,23,22,21,20,19,18,17,16,31,30,29,28,27,26,25,24]
; CHECK-WIDE-AVX-NEXT: retq
entry:
@@ -250,7 +250,7 @@ declare <4 x i16> @llvm.bswap.v4i16(<4 x i16>)
define <4 x i16> @test7(<4 x i16> %v) {
; CHECK-NOSSSE3-LABEL: test7:
-; CHECK-NOSSSE3: # BB#0: # %entry
+; CHECK-NOSSSE3: # %bb.0: # %entry
; CHECK-NOSSSE3-NEXT: pxor %xmm1, %xmm1
; CHECK-NOSSSE3-NEXT: movdqa %xmm0, %xmm2
; CHECK-NOSSSE3-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
@@ -264,17 +264,17 @@ define <4 x i16> @test7(<4 x i16> %v) {
; CHECK-NOSSSE3-NEXT: retq
;
; CHECK-SSSE3-LABEL: test7:
-; CHECK-SSSE3: # BB#0: # %entry
+; CHECK-SSSE3: # %bb.0: # %entry
; CHECK-SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[1,0],zero,zero,xmm0[5,4],zero,zero,xmm0[9,8],zero,zero,xmm0[13,12],zero,zero
; CHECK-SSSE3-NEXT: retq
;
; CHECK-AVX-LABEL: test7:
-; CHECK-AVX: # BB#0: # %entry
+; CHECK-AVX: # %bb.0: # %entry
; CHECK-AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[1,0],zero,zero,xmm0[5,4],zero,zero,xmm0[9,8],zero,zero,xmm0[13,12],zero,zero
; CHECK-AVX-NEXT: retq
;
; CHECK-WIDE-AVX-LABEL: test7:
-; CHECK-WIDE-AVX: # BB#0: # %entry
+; CHECK-WIDE-AVX: # %bb.0: # %entry
; CHECK-WIDE-AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
; CHECK-WIDE-AVX-NEXT: retq
entry:
@@ -288,7 +288,7 @@ entry:
define <8 x i16> @identity_v8i16(<8 x i16> %v) {
; CHECK-ALL-LABEL: identity_v8i16:
-; CHECK-ALL: # BB#0: # %entry
+; CHECK-ALL: # %bb.0: # %entry
; CHECK-ALL-NEXT: retq
entry:
%bs1 = call <8 x i16> @llvm.bswap.v8i16(<8 x i16> %v)
@@ -298,7 +298,7 @@ entry:
define <4 x i32> @identity_v4i32(<4 x i32> %v) {
; CHECK-ALL-LABEL: identity_v4i32:
-; CHECK-ALL: # BB#0: # %entry
+; CHECK-ALL: # %bb.0: # %entry
; CHECK-ALL-NEXT: retq
entry:
%bs1 = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> %v)
@@ -308,7 +308,7 @@ entry:
define <2 x i64> @identity_v2i64(<2 x i64> %v) {
; CHECK-ALL-LABEL: identity_v2i64:
-; CHECK-ALL: # BB#0: # %entry
+; CHECK-ALL: # %bb.0: # %entry
; CHECK-ALL-NEXT: retq
entry:
%bs1 = call <2 x i64> @llvm.bswap.v2i64(<2 x i64> %v)
@@ -318,7 +318,7 @@ entry:
define <16 x i16> @identity_v16i16(<16 x i16> %v) {
; CHECK-ALL-LABEL: identity_v16i16:
-; CHECK-ALL: # BB#0: # %entry
+; CHECK-ALL: # %bb.0: # %entry
; CHECK-ALL-NEXT: retq
entry:
%bs1 = call <16 x i16> @llvm.bswap.v16i16(<16 x i16> %v)
@@ -328,7 +328,7 @@ entry:
define <8 x i32> @identity_v8i32(<8 x i32> %v) {
; CHECK-ALL-LABEL: identity_v8i32:
-; CHECK-ALL: # BB#0: # %entry
+; CHECK-ALL: # %bb.0: # %entry
; CHECK-ALL-NEXT: retq
entry:
%bs1 = call <8 x i32> @llvm.bswap.v8i32(<8 x i32> %v)
@@ -338,7 +338,7 @@ entry:
define <4 x i64> @identity_v4i64(<4 x i64> %v) {
; CHECK-ALL-LABEL: identity_v4i64:
-; CHECK-ALL: # BB#0: # %entry
+; CHECK-ALL: # %bb.0: # %entry
; CHECK-ALL-NEXT: retq
entry:
%bs1 = call <4 x i64> @llvm.bswap.v4i64(<4 x i64> %v)
@@ -348,7 +348,7 @@ entry:
define <4 x i16> @identity_v4i16(<4 x i16> %v) {
; CHECK-ALL-LABEL: identity_v4i16:
-; CHECK-ALL: # BB#0: # %entry
+; CHECK-ALL: # %bb.0: # %entry
; CHECK-ALL-NEXT: retq
entry:
%bs1 = call <4 x i16> @llvm.bswap.v4i16(<4 x i16> %v)
@@ -362,17 +362,17 @@ entry:
define <8 x i16> @fold_v8i16() {
; CHECK-SSE-LABEL: fold_v8i16:
-; CHECK-SSE: # BB#0: # %entry
+; CHECK-SSE: # %bb.0: # %entry
; CHECK-SSE-NEXT: movaps {{.*#+}} xmm0 = [0,256,65535,512,65023,1024,64511,1536]
; CHECK-SSE-NEXT: retq
;
; CHECK-AVX-LABEL: fold_v8i16:
-; CHECK-AVX: # BB#0: # %entry
+; CHECK-AVX: # %bb.0: # %entry
; CHECK-AVX-NEXT: vmovaps {{.*#+}} xmm0 = [0,256,65535,512,65023,1024,64511,1536]
; CHECK-AVX-NEXT: retq
;
; CHECK-WIDE-AVX-LABEL: fold_v8i16:
-; CHECK-WIDE-AVX: # BB#0: # %entry
+; CHECK-WIDE-AVX: # %bb.0: # %entry
; CHECK-WIDE-AVX-NEXT: vmovaps {{.*#+}} xmm0 = [0,256,65535,512,65023,1024,64511,1536]
; CHECK-WIDE-AVX-NEXT: retq
entry:
@@ -382,17 +382,17 @@ entry:
define <4 x i32> @fold_v4i32() {
; CHECK-SSE-LABEL: fold_v4i32:
-; CHECK-SSE: # BB#0: # %entry
+; CHECK-SSE: # %bb.0: # %entry
; CHECK-SSE-NEXT: movaps {{.*#+}} xmm0 = [0,4294967295,33554432,4261412863]
; CHECK-SSE-NEXT: retq
;
; CHECK-AVX-LABEL: fold_v4i32:
-; CHECK-AVX: # BB#0: # %entry
+; CHECK-AVX: # %bb.0: # %entry
; CHECK-AVX-NEXT: vmovaps {{.*#+}} xmm0 = [0,4294967295,33554432,4261412863]
; CHECK-AVX-NEXT: retq
;
; CHECK-WIDE-AVX-LABEL: fold_v4i32:
-; CHECK-WIDE-AVX: # BB#0: # %entry
+; CHECK-WIDE-AVX: # %bb.0: # %entry
; CHECK-WIDE-AVX-NEXT: vmovaps {{.*#+}} xmm0 = [0,4294967295,33554432,4261412863]
; CHECK-WIDE-AVX-NEXT: retq
entry:
@@ -402,17 +402,17 @@ entry:
define <2 x i64> @fold_v2i64() {
; CHECK-SSE-LABEL: fold_v2i64:
-; CHECK-SSE: # BB#0: # %entry
+; CHECK-SSE: # %bb.0: # %entry
; CHECK-SSE-NEXT: movaps {{.*#+}} xmm0 = [18374686479671623680,18446744073709551615]
; CHECK-SSE-NEXT: retq
;
; CHECK-AVX-LABEL: fold_v2i64:
-; CHECK-AVX: # BB#0: # %entry
+; CHECK-AVX: # %bb.0: # %entry
; CHECK-AVX-NEXT: vmovaps {{.*#+}} xmm0 = [18374686479671623680,18446744073709551615]
; CHECK-AVX-NEXT: retq
;
; CHECK-WIDE-AVX-LABEL: fold_v2i64:
-; CHECK-WIDE-AVX: # BB#0: # %entry
+; CHECK-WIDE-AVX: # %bb.0: # %entry
; CHECK-WIDE-AVX-NEXT: vmovaps {{.*#+}} xmm0 = [18374686479671623680,18446744073709551615]
; CHECK-WIDE-AVX-NEXT: retq
entry:
@@ -422,18 +422,18 @@ entry:
define <16 x i16> @fold_v16i16() {
; CHECK-SSE-LABEL: fold_v16i16:
-; CHECK-SSE: # BB#0: # %entry
+; CHECK-SSE: # %bb.0: # %entry
; CHECK-SSE-NEXT: movaps {{.*#+}} xmm0 = [0,256,65535,512,65023,1024,64511,1536]
; CHECK-SSE-NEXT: movaps {{.*#+}} xmm1 = [63999,2048,63487,2560,62975,3072,62463,3584]
; CHECK-SSE-NEXT: retq
;
; CHECK-AVX-LABEL: fold_v16i16:
-; CHECK-AVX: # BB#0: # %entry
+; CHECK-AVX: # %bb.0: # %entry
; CHECK-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,256,65535,512,65023,1024,64511,1536,63999,2048,63487,2560,62975,3072,62463,3584]
; CHECK-AVX-NEXT: retq
;
; CHECK-WIDE-AVX-LABEL: fold_v16i16:
-; CHECK-WIDE-AVX: # BB#0: # %entry
+; CHECK-WIDE-AVX: # %bb.0: # %entry
; CHECK-WIDE-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,256,65535,512,65023,1024,64511,1536,63999,2048,63487,2560,62975,3072,62463,3584]
; CHECK-WIDE-AVX-NEXT: retq
entry:
@@ -443,18 +443,18 @@ entry:
define <8 x i32> @fold_v8i32() {
; CHECK-SSE-LABEL: fold_v8i32:
-; CHECK-SSE: # BB#0: # %entry
+; CHECK-SSE: # %bb.0: # %entry
; CHECK-SSE-NEXT: movaps {{.*#+}} xmm0 = [0,16777216,4294967295,33554432]
; CHECK-SSE-NEXT: movaps {{.*#+}} xmm1 = [4261412863,67108864,4227858431,100663296]
; CHECK-SSE-NEXT: retq
;
; CHECK-AVX-LABEL: fold_v8i32:
-; CHECK-AVX: # BB#0: # %entry
+; CHECK-AVX: # %bb.0: # %entry
; CHECK-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,16777216,4294967295,33554432,4261412863,67108864,4227858431,100663296]
; CHECK-AVX-NEXT: retq
;
; CHECK-WIDE-AVX-LABEL: fold_v8i32:
-; CHECK-WIDE-AVX: # BB#0: # %entry
+; CHECK-WIDE-AVX: # %bb.0: # %entry
; CHECK-WIDE-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,16777216,4294967295,33554432,4261412863,67108864,4227858431,100663296]
; CHECK-WIDE-AVX-NEXT: retq
entry:
@@ -464,18 +464,18 @@ entry:
define <4 x i64> @fold_v4i64() {
; CHECK-SSE-LABEL: fold_v4i64:
-; CHECK-SSE: # BB#0: # %entry
+; CHECK-SSE: # %bb.0: # %entry
; CHECK-SSE-NEXT: movaps {{.*#+}} xmm0 = [18374686479671623680,18446744073709551615]
; CHECK-SSE-NEXT: movaps {{.*#+}} xmm1 = [18446462598732840960,72056494526300160]
; CHECK-SSE-NEXT: retq
;
; CHECK-AVX-LABEL: fold_v4i64:
-; CHECK-AVX: # BB#0: # %entry
+; CHECK-AVX: # %bb.0: # %entry
; CHECK-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [18374686479671623680,18446744073709551615,18446462598732840960,72056494526300160]
; CHECK-AVX-NEXT: retq
;
; CHECK-WIDE-AVX-LABEL: fold_v4i64:
-; CHECK-WIDE-AVX: # BB#0: # %entry
+; CHECK-WIDE-AVX: # %bb.0: # %entry
; CHECK-WIDE-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [18374686479671623680,18446744073709551615,18446462598732840960,72056494526300160]
; CHECK-WIDE-AVX-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/bswap-wide-int.ll b/test/CodeGen/X86/bswap-wide-int.ll
index 858dbf5fd85..8d6416158e3 100644
--- a/test/CodeGen/X86/bswap-wide-int.ll
+++ b/test/CodeGen/X86/bswap-wide-int.ll
@@ -10,7 +10,7 @@ declare i256 @llvm.bswap.i256(i256)
define i64 @bswap_i64(i64 %a0) nounwind {
; X86-LABEL: bswap_i64:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: bswapl %eax
@@ -18,19 +18,19 @@ define i64 @bswap_i64(i64 %a0) nounwind {
; X86-NEXT: retl
;
; X86-MOVBE-LABEL: bswap_i64:
-; X86-MOVBE: # BB#0:
+; X86-MOVBE: # %bb.0:
; X86-MOVBE-NEXT: movbel {{[0-9]+}}(%esp), %eax
; X86-MOVBE-NEXT: movbel {{[0-9]+}}(%esp), %edx
; X86-MOVBE-NEXT: retl
;
; X64-LABEL: bswap_i64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: bswapq %rdi
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: retq
;
; X64-MOVBE-LABEL: bswap_i64:
-; X64-MOVBE: # BB#0:
+; X64-MOVBE: # %bb.0:
; X64-MOVBE-NEXT: bswapq %rdi
; X64-MOVBE-NEXT: movq %rdi, %rax
; X64-MOVBE-NEXT: retq
@@ -40,7 +40,7 @@ define i64 @bswap_i64(i64 %a0) nounwind {
define i128 @bswap_i128(i128 %a0) nounwind {
; X86-LABEL: bswap_i128:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -61,7 +61,7 @@ define i128 @bswap_i128(i128 %a0) nounwind {
; X86-NEXT: retl $4
;
; X86-MOVBE-LABEL: bswap_i128:
-; X86-MOVBE: # BB#0:
+; X86-MOVBE: # %bb.0:
; X86-MOVBE-NEXT: pushl %edi
; X86-MOVBE-NEXT: pushl %esi
; X86-MOVBE-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -78,7 +78,7 @@ define i128 @bswap_i128(i128 %a0) nounwind {
; X86-MOVBE-NEXT: retl $4
;
; X64-LABEL: bswap_i128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: bswapq %rsi
; X64-NEXT: bswapq %rdi
; X64-NEXT: movq %rsi, %rax
@@ -86,7 +86,7 @@ define i128 @bswap_i128(i128 %a0) nounwind {
; X64-NEXT: retq
;
; X64-MOVBE-LABEL: bswap_i128:
-; X64-MOVBE: # BB#0:
+; X64-MOVBE: # %bb.0:
; X64-MOVBE-NEXT: bswapq %rsi
; X64-MOVBE-NEXT: bswapq %rdi
; X64-MOVBE-NEXT: movq %rsi, %rax
@@ -98,7 +98,7 @@ define i128 @bswap_i128(i128 %a0) nounwind {
define i256 @bswap_i256(i256 %a0) nounwind {
; X86-LABEL: bswap_i256:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: bswapl %ecx
@@ -127,7 +127,7 @@ define i256 @bswap_i256(i256 %a0) nounwind {
; X86-NEXT: retl $4
;
; X86-MOVBE-LABEL: bswap_i256:
-; X86-MOVBE: # BB#0:
+; X86-MOVBE: # %bb.0:
; X86-MOVBE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-MOVBE-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-MOVBE-NEXT: movbel %ecx, 28(%eax)
@@ -148,7 +148,7 @@ define i256 @bswap_i256(i256 %a0) nounwind {
; X86-MOVBE-NEXT: retl $4
;
; X64-LABEL: bswap_i256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: bswapq %r8
; X64-NEXT: bswapq %rcx
; X64-NEXT: bswapq %rdx
@@ -161,7 +161,7 @@ define i256 @bswap_i256(i256 %a0) nounwind {
; X64-NEXT: retq
;
; X64-MOVBE-LABEL: bswap_i256:
-; X64-MOVBE: # BB#0:
+; X64-MOVBE: # %bb.0:
; X64-MOVBE-NEXT: movbeq %rsi, 24(%rdi)
; X64-MOVBE-NEXT: movbeq %rdx, 16(%rdi)
; X64-MOVBE-NEXT: movbeq %rcx, 8(%rdi)
diff --git a/test/CodeGen/X86/bswap_tree.ll b/test/CodeGen/X86/bswap_tree.ll
index c217879d438..acd9330458f 100644
--- a/test/CodeGen/X86/bswap_tree.ll
+++ b/test/CodeGen/X86/bswap_tree.ll
@@ -12,14 +12,14 @@
; => (rotl (bswap x), 16)
define i32 @test1(i32 %x) nounwind {
; CHECK-LABEL: test1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: bswapl %eax
; CHECK-NEXT: roll $16, %eax
; CHECK-NEXT: retl
;
; CHECK64-LABEL: test1:
-; CHECK64: # BB#0:
+; CHECK64: # %bb.0:
; CHECK64-NEXT: bswapl %edi
; CHECK64-NEXT: roll $16, %edi
; CHECK64-NEXT: movl %edi, %eax
@@ -45,14 +45,14 @@ define i32 @test1(i32 %x) nounwind {
; ((x >> 8) & 0x00ff0000)
define i32 @test2(i32 %x) nounwind {
; CHECK-LABEL: test2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: bswapl %eax
; CHECK-NEXT: roll $16, %eax
; CHECK-NEXT: retl
;
; CHECK64-LABEL: test2:
-; CHECK64: # BB#0:
+; CHECK64: # %bb.0:
; CHECK64-NEXT: bswapl %edi
; CHECK64-NEXT: roll $16, %edi
; CHECK64-NEXT: movl %edi, %eax
diff --git a/test/CodeGen/X86/bswap_tree2.ll b/test/CodeGen/X86/bswap_tree2.ll
index 1340b7662a7..f4d75f4ef1e 100644
--- a/test/CodeGen/X86/bswap_tree2.ll
+++ b/test/CodeGen/X86/bswap_tree2.ll
@@ -8,7 +8,7 @@
; (with only half of the swap tree valid).
define i32 @test1(i32 %x) nounwind {
; CHECK-LABEL: test1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl %eax, %ecx
; CHECK-NEXT: andl $16711680, %ecx # imm = 0xFF0000
@@ -23,7 +23,7 @@
; CHECK-NEXT: retl
;
; CHECK64-LABEL: test1:
-; CHECK64: # BB#0:
+; CHECK64: # %bb.0:
; CHECK64-NEXT: movl %edi, %eax
; CHECK64-NEXT: andl $16711680, %eax # imm = 0xFF0000
; CHECK64-NEXT: movl %edi, %ecx
@@ -58,7 +58,7 @@
; ((x >> 8) & 0x00ff0000)
define i32 @test2(i32 %x) nounwind {
; CHECK-LABEL: test2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movl %ecx, %eax
; CHECK-NEXT: shrl $8, %eax
@@ -72,7 +72,7 @@ define i32 @test2(i32 %x) nounwind {
; CHECK-NEXT: retl
;
; CHECK64-LABEL: test2:
-; CHECK64: # BB#0:
+; CHECK64: # %bb.0:
; CHECK64-NEXT: movl %edi, %eax
; CHECK64-NEXT: shrl $8, %eax
; CHECK64-NEXT: shll $8, %edi
@@ -100,7 +100,7 @@ define i32 @test2(i32 %x) nounwind {
; Invalid pattern involving a unary op
define i32 @test3(float %x) nounwind {
; CHECK-LABEL: test3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: subl $8, %esp
; CHECK-NEXT: flds {{[0-9]+}}(%esp)
; CHECK-NEXT: fnstcw {{[0-9]+}}(%esp)
@@ -124,7 +124,7 @@ define i32 @test3(float %x) nounwind {
; CHECK-NEXT: retl
;
; CHECK64-LABEL: test3:
-; CHECK64: # BB#0:
+; CHECK64: # %bb.0:
; CHECK64-NEXT: cvttss2si %xmm0, %ecx
; CHECK64-NEXT: movl %ecx, %edx
; CHECK64-NEXT: shll $8, %edx
diff --git a/test/CodeGen/X86/bt.ll b/test/CodeGen/X86/bt.ll
index c8050a33916..144e9e7e50c 100644
--- a/test/CodeGen/X86/bt.ll
+++ b/test/CodeGen/X86/bt.ll
@@ -23,21 +23,21 @@
define void @test2(i32 %x, i32 %n) nounwind {
; X86-LABEL: test2:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: btl %ecx, %eax
; X86-NEXT: jb .LBB0_2
-; X86-NEXT: # BB#1: # %bb
+; X86-NEXT: # %bb.1: # %bb
; X86-NEXT: calll foo
; X86-NEXT: .LBB0_2: # %UnifiedReturnBlock
; X86-NEXT: retl
;
; X64-LABEL: test2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: btl %esi, %edi
; X64-NEXT: jb .LBB0_2
-; X64-NEXT: # BB#1: # %bb
+; X64-NEXT: # %bb.1: # %bb
; X64-NEXT: pushq %rax
; X64-NEXT: callq foo
; X64-NEXT: popq %rax
@@ -59,22 +59,22 @@ UnifiedReturnBlock:
define void @test2b(i32 %x, i32 %n) nounwind {
; X86-LABEL: test2b:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: btl %ecx, %eax
; X86-NEXT: jae .LBB1_1
-; X86-NEXT: # BB#2: # %UnifiedReturnBlock
+; X86-NEXT: # %bb.2: # %UnifiedReturnBlock
; X86-NEXT: retl
; X86-NEXT: .LBB1_1: # %bb
; X86-NEXT: calll foo
; X86-NEXT: retl
;
; X64-LABEL: test2b:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: btl %esi, %edi
; X64-NEXT: jae .LBB1_1
-; X64-NEXT: # BB#2: # %UnifiedReturnBlock
+; X64-NEXT: # %bb.2: # %UnifiedReturnBlock
; X64-NEXT: retq
; X64-NEXT: .LBB1_1: # %bb
; X64-NEXT: pushq %rax
@@ -97,21 +97,21 @@ UnifiedReturnBlock:
define void @atest2(i32 %x, i32 %n) nounwind {
; X86-LABEL: atest2:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: btl %ecx, %eax
; X86-NEXT: jb .LBB2_2
-; X86-NEXT: # BB#1: # %bb
+; X86-NEXT: # %bb.1: # %bb
; X86-NEXT: calll foo
; X86-NEXT: .LBB2_2: # %UnifiedReturnBlock
; X86-NEXT: retl
;
; X64-LABEL: atest2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: btl %esi, %edi
; X64-NEXT: jb .LBB2_2
-; X64-NEXT: # BB#1: # %bb
+; X64-NEXT: # %bb.1: # %bb
; X64-NEXT: pushq %rax
; X64-NEXT: callq foo
; X64-NEXT: popq %rax
@@ -133,22 +133,22 @@ UnifiedReturnBlock:
define void @atest2b(i32 %x, i32 %n) nounwind {
; X86-LABEL: atest2b:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: btl %ecx, %eax
; X86-NEXT: jae .LBB3_1
-; X86-NEXT: # BB#2: # %UnifiedReturnBlock
+; X86-NEXT: # %bb.2: # %UnifiedReturnBlock
; X86-NEXT: retl
; X86-NEXT: .LBB3_1: # %bb
; X86-NEXT: calll foo
; X86-NEXT: retl
;
; X64-LABEL: atest2b:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: btl %esi, %edi
; X64-NEXT: jae .LBB3_1
-; X64-NEXT: # BB#2: # %UnifiedReturnBlock
+; X64-NEXT: # %bb.2: # %UnifiedReturnBlock
; X64-NEXT: retq
; X64-NEXT: .LBB3_1: # %bb
; X64-NEXT: pushq %rax
@@ -171,22 +171,22 @@ UnifiedReturnBlock:
define void @test3(i32 %x, i32 %n) nounwind {
; X86-LABEL: test3:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: btl %ecx, %eax
; X86-NEXT: jae .LBB4_1
-; X86-NEXT: # BB#2: # %UnifiedReturnBlock
+; X86-NEXT: # %bb.2: # %UnifiedReturnBlock
; X86-NEXT: retl
; X86-NEXT: .LBB4_1: # %bb
; X86-NEXT: calll foo
; X86-NEXT: retl
;
; X64-LABEL: test3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: btl %esi, %edi
; X64-NEXT: jae .LBB4_1
-; X64-NEXT: # BB#2: # %UnifiedReturnBlock
+; X64-NEXT: # %bb.2: # %UnifiedReturnBlock
; X64-NEXT: retq
; X64-NEXT: .LBB4_1: # %bb
; X64-NEXT: pushq %rax
@@ -209,22 +209,22 @@ UnifiedReturnBlock:
define void @test3b(i32 %x, i32 %n) nounwind {
; X86-LABEL: test3b:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: btl %ecx, %eax
; X86-NEXT: jae .LBB5_1
-; X86-NEXT: # BB#2: # %UnifiedReturnBlock
+; X86-NEXT: # %bb.2: # %UnifiedReturnBlock
; X86-NEXT: retl
; X86-NEXT: .LBB5_1: # %bb
; X86-NEXT: calll foo
; X86-NEXT: retl
;
; X64-LABEL: test3b:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: btl %esi, %edi
; X64-NEXT: jae .LBB5_1
-; X64-NEXT: # BB#2: # %UnifiedReturnBlock
+; X64-NEXT: # %bb.2: # %UnifiedReturnBlock
; X64-NEXT: retq
; X64-NEXT: .LBB5_1: # %bb
; X64-NEXT: pushq %rax
@@ -247,21 +247,21 @@ UnifiedReturnBlock:
define void @testne2(i32 %x, i32 %n) nounwind {
; X86-LABEL: testne2:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: btl %ecx, %eax
; X86-NEXT: jae .LBB6_2
-; X86-NEXT: # BB#1: # %bb
+; X86-NEXT: # %bb.1: # %bb
; X86-NEXT: calll foo
; X86-NEXT: .LBB6_2: # %UnifiedReturnBlock
; X86-NEXT: retl
;
; X64-LABEL: testne2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: btl %esi, %edi
; X64-NEXT: jae .LBB6_2
-; X64-NEXT: # BB#1: # %bb
+; X64-NEXT: # %bb.1: # %bb
; X64-NEXT: pushq %rax
; X64-NEXT: callq foo
; X64-NEXT: popq %rax
@@ -283,21 +283,21 @@ UnifiedReturnBlock:
define void @testne2b(i32 %x, i32 %n) nounwind {
; X86-LABEL: testne2b:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: btl %ecx, %eax
; X86-NEXT: jae .LBB7_2
-; X86-NEXT: # BB#1: # %bb
+; X86-NEXT: # %bb.1: # %bb
; X86-NEXT: calll foo
; X86-NEXT: .LBB7_2: # %UnifiedReturnBlock
; X86-NEXT: retl
;
; X64-LABEL: testne2b:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: btl %esi, %edi
; X64-NEXT: jae .LBB7_2
-; X64-NEXT: # BB#1: # %bb
+; X64-NEXT: # %bb.1: # %bb
; X64-NEXT: pushq %rax
; X64-NEXT: callq foo
; X64-NEXT: popq %rax
@@ -319,21 +319,21 @@ UnifiedReturnBlock:
define void @atestne2(i32 %x, i32 %n) nounwind {
; X86-LABEL: atestne2:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: btl %ecx, %eax
; X86-NEXT: jae .LBB8_2
-; X86-NEXT: # BB#1: # %bb
+; X86-NEXT: # %bb.1: # %bb
; X86-NEXT: calll foo
; X86-NEXT: .LBB8_2: # %UnifiedReturnBlock
; X86-NEXT: retl
;
; X64-LABEL: atestne2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: btl %esi, %edi
; X64-NEXT: jae .LBB8_2
-; X64-NEXT: # BB#1: # %bb
+; X64-NEXT: # %bb.1: # %bb
; X64-NEXT: pushq %rax
; X64-NEXT: callq foo
; X64-NEXT: popq %rax
@@ -355,21 +355,21 @@ UnifiedReturnBlock:
define void @atestne2b(i32 %x, i32 %n) nounwind {
; X86-LABEL: atestne2b:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: btl %ecx, %eax
; X86-NEXT: jae .LBB9_2
-; X86-NEXT: # BB#1: # %bb
+; X86-NEXT: # %bb.1: # %bb
; X86-NEXT: calll foo
; X86-NEXT: .LBB9_2: # %UnifiedReturnBlock
; X86-NEXT: retl
;
; X64-LABEL: atestne2b:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: btl %esi, %edi
; X64-NEXT: jae .LBB9_2
-; X64-NEXT: # BB#1: # %bb
+; X64-NEXT: # %bb.1: # %bb
; X64-NEXT: pushq %rax
; X64-NEXT: callq foo
; X64-NEXT: popq %rax
@@ -391,21 +391,21 @@ UnifiedReturnBlock:
define void @testne3(i32 %x, i32 %n) nounwind {
; X86-LABEL: testne3:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: btl %ecx, %eax
; X86-NEXT: jae .LBB10_2
-; X86-NEXT: # BB#1: # %bb
+; X86-NEXT: # %bb.1: # %bb
; X86-NEXT: calll foo
; X86-NEXT: .LBB10_2: # %UnifiedReturnBlock
; X86-NEXT: retl
;
; X64-LABEL: testne3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: btl %esi, %edi
; X64-NEXT: jae .LBB10_2
-; X64-NEXT: # BB#1: # %bb
+; X64-NEXT: # %bb.1: # %bb
; X64-NEXT: pushq %rax
; X64-NEXT: callq foo
; X64-NEXT: popq %rax
@@ -427,21 +427,21 @@ UnifiedReturnBlock:
define void @testne3b(i32 %x, i32 %n) nounwind {
; X86-LABEL: testne3b:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: btl %ecx, %eax
; X86-NEXT: jae .LBB11_2
-; X86-NEXT: # BB#1: # %bb
+; X86-NEXT: # %bb.1: # %bb
; X86-NEXT: calll foo
; X86-NEXT: .LBB11_2: # %UnifiedReturnBlock
; X86-NEXT: retl
;
; X64-LABEL: testne3b:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: btl %esi, %edi
; X64-NEXT: jae .LBB11_2
-; X64-NEXT: # BB#1: # %bb
+; X64-NEXT: # %bb.1: # %bb
; X64-NEXT: pushq %rax
; X64-NEXT: callq foo
; X64-NEXT: popq %rax
@@ -463,21 +463,21 @@ UnifiedReturnBlock:
define void @query2(i32 %x, i32 %n) nounwind {
; X86-LABEL: query2:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: btl %ecx, %eax
; X86-NEXT: jae .LBB12_2
-; X86-NEXT: # BB#1: # %bb
+; X86-NEXT: # %bb.1: # %bb
; X86-NEXT: calll foo
; X86-NEXT: .LBB12_2: # %UnifiedReturnBlock
; X86-NEXT: retl
;
; X64-LABEL: query2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: btl %esi, %edi
; X64-NEXT: jae .LBB12_2
-; X64-NEXT: # BB#1: # %bb
+; X64-NEXT: # %bb.1: # %bb
; X64-NEXT: pushq %rax
; X64-NEXT: callq foo
; X64-NEXT: popq %rax
@@ -499,21 +499,21 @@ UnifiedReturnBlock:
define void @query2b(i32 %x, i32 %n) nounwind {
; X86-LABEL: query2b:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: btl %ecx, %eax
; X86-NEXT: jae .LBB13_2
-; X86-NEXT: # BB#1: # %bb
+; X86-NEXT: # %bb.1: # %bb
; X86-NEXT: calll foo
; X86-NEXT: .LBB13_2: # %UnifiedReturnBlock
; X86-NEXT: retl
;
; X64-LABEL: query2b:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: btl %esi, %edi
; X64-NEXT: jae .LBB13_2
-; X64-NEXT: # BB#1: # %bb
+; X64-NEXT: # %bb.1: # %bb
; X64-NEXT: pushq %rax
; X64-NEXT: callq foo
; X64-NEXT: popq %rax
@@ -535,21 +535,21 @@ UnifiedReturnBlock:
define void @aquery2(i32 %x, i32 %n) nounwind {
; X86-LABEL: aquery2:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: btl %ecx, %eax
; X86-NEXT: jae .LBB14_2
-; X86-NEXT: # BB#1: # %bb
+; X86-NEXT: # %bb.1: # %bb
; X86-NEXT: calll foo
; X86-NEXT: .LBB14_2: # %UnifiedReturnBlock
; X86-NEXT: retl
;
; X64-LABEL: aquery2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: btl %esi, %edi
; X64-NEXT: jae .LBB14_2
-; X64-NEXT: # BB#1: # %bb
+; X64-NEXT: # %bb.1: # %bb
; X64-NEXT: pushq %rax
; X64-NEXT: callq foo
; X64-NEXT: popq %rax
@@ -571,21 +571,21 @@ UnifiedReturnBlock:
define void @aquery2b(i32 %x, i32 %n) nounwind {
; X86-LABEL: aquery2b:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: btl %ecx, %eax
; X86-NEXT: jae .LBB15_2
-; X86-NEXT: # BB#1: # %bb
+; X86-NEXT: # %bb.1: # %bb
; X86-NEXT: calll foo
; X86-NEXT: .LBB15_2: # %UnifiedReturnBlock
; X86-NEXT: retl
;
; X64-LABEL: aquery2b:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: btl %esi, %edi
; X64-NEXT: jae .LBB15_2
-; X64-NEXT: # BB#1: # %bb
+; X64-NEXT: # %bb.1: # %bb
; X64-NEXT: pushq %rax
; X64-NEXT: callq foo
; X64-NEXT: popq %rax
@@ -607,21 +607,21 @@ UnifiedReturnBlock:
define void @query3(i32 %x, i32 %n) nounwind {
; X86-LABEL: query3:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: btl %ecx, %eax
; X86-NEXT: jae .LBB16_2
-; X86-NEXT: # BB#1: # %bb
+; X86-NEXT: # %bb.1: # %bb
; X86-NEXT: calll foo
; X86-NEXT: .LBB16_2: # %UnifiedReturnBlock
; X86-NEXT: retl
;
; X64-LABEL: query3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: btl %esi, %edi
; X64-NEXT: jae .LBB16_2
-; X64-NEXT: # BB#1: # %bb
+; X64-NEXT: # %bb.1: # %bb
; X64-NEXT: pushq %rax
; X64-NEXT: callq foo
; X64-NEXT: popq %rax
@@ -643,21 +643,21 @@ UnifiedReturnBlock:
define void @query3b(i32 %x, i32 %n) nounwind {
; X86-LABEL: query3b:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: btl %ecx, %eax
; X86-NEXT: jae .LBB17_2
-; X86-NEXT: # BB#1: # %bb
+; X86-NEXT: # %bb.1: # %bb
; X86-NEXT: calll foo
; X86-NEXT: .LBB17_2: # %UnifiedReturnBlock
; X86-NEXT: retl
;
; X64-LABEL: query3b:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: btl %esi, %edi
; X64-NEXT: jae .LBB17_2
-; X64-NEXT: # BB#1: # %bb
+; X64-NEXT: # %bb.1: # %bb
; X64-NEXT: pushq %rax
; X64-NEXT: callq foo
; X64-NEXT: popq %rax
@@ -679,21 +679,21 @@ UnifiedReturnBlock:
define void @query3x(i32 %x, i32 %n) nounwind {
; X86-LABEL: query3x:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: btl %ecx, %eax
; X86-NEXT: jae .LBB18_2
-; X86-NEXT: # BB#1: # %bb
+; X86-NEXT: # %bb.1: # %bb
; X86-NEXT: calll foo
; X86-NEXT: .LBB18_2: # %UnifiedReturnBlock
; X86-NEXT: retl
;
; X64-LABEL: query3x:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: btl %esi, %edi
; X64-NEXT: jae .LBB18_2
-; X64-NEXT: # BB#1: # %bb
+; X64-NEXT: # %bb.1: # %bb
; X64-NEXT: pushq %rax
; X64-NEXT: callq foo
; X64-NEXT: popq %rax
@@ -715,21 +715,21 @@ UnifiedReturnBlock:
define void @query3bx(i32 %x, i32 %n) nounwind {
; X86-LABEL: query3bx:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: btl %ecx, %eax
; X86-NEXT: jae .LBB19_2
-; X86-NEXT: # BB#1: # %bb
+; X86-NEXT: # %bb.1: # %bb
; X86-NEXT: calll foo
; X86-NEXT: .LBB19_2: # %UnifiedReturnBlock
; X86-NEXT: retl
;
; X64-LABEL: query3bx:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: btl %esi, %edi
; X64-NEXT: jae .LBB19_2
-; X64-NEXT: # BB#1: # %bb
+; X64-NEXT: # %bb.1: # %bb
; X64-NEXT: pushq %rax
; X64-NEXT: callq foo
; X64-NEXT: popq %rax
@@ -751,21 +751,21 @@ UnifiedReturnBlock:
define void @queryne2(i32 %x, i32 %n) nounwind {
; X86-LABEL: queryne2:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: btl %ecx, %eax
; X86-NEXT: jb .LBB20_2
-; X86-NEXT: # BB#1: # %bb
+; X86-NEXT: # %bb.1: # %bb
; X86-NEXT: calll foo
; X86-NEXT: .LBB20_2: # %UnifiedReturnBlock
; X86-NEXT: retl
;
; X64-LABEL: queryne2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: btl %esi, %edi
; X64-NEXT: jb .LBB20_2
-; X64-NEXT: # BB#1: # %bb
+; X64-NEXT: # %bb.1: # %bb
; X64-NEXT: pushq %rax
; X64-NEXT: callq foo
; X64-NEXT: popq %rax
@@ -787,21 +787,21 @@ UnifiedReturnBlock:
define void @queryne2b(i32 %x, i32 %n) nounwind {
; X86-LABEL: queryne2b:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: btl %ecx, %eax
; X86-NEXT: jb .LBB21_2
-; X86-NEXT: # BB#1: # %bb
+; X86-NEXT: # %bb.1: # %bb
; X86-NEXT: calll foo
; X86-NEXT: .LBB21_2: # %UnifiedReturnBlock
; X86-NEXT: retl
;
; X64-LABEL: queryne2b:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: btl %esi, %edi
; X64-NEXT: jb .LBB21_2
-; X64-NEXT: # BB#1: # %bb
+; X64-NEXT: # %bb.1: # %bb
; X64-NEXT: pushq %rax
; X64-NEXT: callq foo
; X64-NEXT: popq %rax
@@ -823,21 +823,21 @@ UnifiedReturnBlock:
define void @aqueryne2(i32 %x, i32 %n) nounwind {
; X86-LABEL: aqueryne2:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: btl %ecx, %eax
; X86-NEXT: jb .LBB22_2
-; X86-NEXT: # BB#1: # %bb
+; X86-NEXT: # %bb.1: # %bb
; X86-NEXT: calll foo
; X86-NEXT: .LBB22_2: # %UnifiedReturnBlock
; X86-NEXT: retl
;
; X64-LABEL: aqueryne2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: btl %esi, %edi
; X64-NEXT: jb .LBB22_2
-; X64-NEXT: # BB#1: # %bb
+; X64-NEXT: # %bb.1: # %bb
; X64-NEXT: pushq %rax
; X64-NEXT: callq foo
; X64-NEXT: popq %rax
@@ -859,21 +859,21 @@ UnifiedReturnBlock:
define void @aqueryne2b(i32 %x, i32 %n) nounwind {
; X86-LABEL: aqueryne2b:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: btl %ecx, %eax
; X86-NEXT: jb .LBB23_2
-; X86-NEXT: # BB#1: # %bb
+; X86-NEXT: # %bb.1: # %bb
; X86-NEXT: calll foo
; X86-NEXT: .LBB23_2: # %UnifiedReturnBlock
; X86-NEXT: retl
;
; X64-LABEL: aqueryne2b:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: btl %esi, %edi
; X64-NEXT: jb .LBB23_2
-; X64-NEXT: # BB#1: # %bb
+; X64-NEXT: # %bb.1: # %bb
; X64-NEXT: pushq %rax
; X64-NEXT: callq foo
; X64-NEXT: popq %rax
@@ -895,21 +895,21 @@ UnifiedReturnBlock:
define void @queryne3(i32 %x, i32 %n) nounwind {
; X86-LABEL: queryne3:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: btl %ecx, %eax
; X86-NEXT: jb .LBB24_2
-; X86-NEXT: # BB#1: # %bb
+; X86-NEXT: # %bb.1: # %bb
; X86-NEXT: calll foo
; X86-NEXT: .LBB24_2: # %UnifiedReturnBlock
; X86-NEXT: retl
;
; X64-LABEL: queryne3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: btl %esi, %edi
; X64-NEXT: jb .LBB24_2
-; X64-NEXT: # BB#1: # %bb
+; X64-NEXT: # %bb.1: # %bb
; X64-NEXT: pushq %rax
; X64-NEXT: callq foo
; X64-NEXT: popq %rax
@@ -931,21 +931,21 @@ UnifiedReturnBlock:
define void @queryne3b(i32 %x, i32 %n) nounwind {
; X86-LABEL: queryne3b:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: btl %ecx, %eax
; X86-NEXT: jb .LBB25_2
-; X86-NEXT: # BB#1: # %bb
+; X86-NEXT: # %bb.1: # %bb
; X86-NEXT: calll foo
; X86-NEXT: .LBB25_2: # %UnifiedReturnBlock
; X86-NEXT: retl
;
; X64-LABEL: queryne3b:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: btl %esi, %edi
; X64-NEXT: jb .LBB25_2
-; X64-NEXT: # BB#1: # %bb
+; X64-NEXT: # %bb.1: # %bb
; X64-NEXT: pushq %rax
; X64-NEXT: callq foo
; X64-NEXT: popq %rax
@@ -967,21 +967,21 @@ UnifiedReturnBlock:
define void @queryne3x(i32 %x, i32 %n) nounwind {
; X86-LABEL: queryne3x:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: btl %ecx, %eax
; X86-NEXT: jb .LBB26_2
-; X86-NEXT: # BB#1: # %bb
+; X86-NEXT: # %bb.1: # %bb
; X86-NEXT: calll foo
; X86-NEXT: .LBB26_2: # %UnifiedReturnBlock
; X86-NEXT: retl
;
; X64-LABEL: queryne3x:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: btl %esi, %edi
; X64-NEXT: jb .LBB26_2
-; X64-NEXT: # BB#1: # %bb
+; X64-NEXT: # %bb.1: # %bb
; X64-NEXT: pushq %rax
; X64-NEXT: callq foo
; X64-NEXT: popq %rax
@@ -1003,21 +1003,21 @@ UnifiedReturnBlock:
define void @queryne3bx(i32 %x, i32 %n) nounwind {
; X86-LABEL: queryne3bx:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: btl %ecx, %eax
; X86-NEXT: jb .LBB27_2
-; X86-NEXT: # BB#1: # %bb
+; X86-NEXT: # %bb.1: # %bb
; X86-NEXT: calll foo
; X86-NEXT: .LBB27_2: # %UnifiedReturnBlock
; X86-NEXT: retl
;
; X64-LABEL: queryne3bx:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: btl %esi, %edi
; X64-NEXT: jb .LBB27_2
-; X64-NEXT: # BB#1: # %bb
+; X64-NEXT: # %bb.1: # %bb
; X64-NEXT: pushq %rax
; X64-NEXT: callq foo
; X64-NEXT: popq %rax
@@ -1041,7 +1041,7 @@ declare void @foo()
define zeroext i1 @invert(i32 %flags, i32 %flag) nounwind {
; X86-LABEL: invert:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: notl %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
@@ -1050,7 +1050,7 @@ define zeroext i1 @invert(i32 %flags, i32 %flag) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: invert:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: notl %edi
; X64-NEXT: btl %esi, %edi
; X64-NEXT: setb %al
@@ -1064,7 +1064,7 @@ define zeroext i1 @invert(i32 %flags, i32 %flag) nounwind {
define zeroext i1 @extend(i32 %bit, i64 %bits) {
; X86-LABEL: extend:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: btl %eax, %ecx
@@ -1072,7 +1072,7 @@ define zeroext i1 @extend(i32 %bit, i64 %bits) {
; X86-NEXT: retl
;
; X64-LABEL: extend:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: btl %edi, %esi
; X64-NEXT: setb %al
; X64-NEXT: retq
@@ -1092,7 +1092,7 @@ entry:
; }
define void @demanded_i32(i32* nocapture readonly, i32* nocapture, i32) nounwind {
; X86-LABEL: demanded_i32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
@@ -1103,7 +1103,7 @@ define void @demanded_i32(i32* nocapture readonly, i32* nocapture, i32) nounwind
; X86-NEXT: shll %cl, %edx
; X86-NEXT: btl %ecx, %esi
; X86-NEXT: jae .LBB30_2
-; X86-NEXT: # BB#1:
+; X86-NEXT: # %bb.1:
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: orl %edx, (%ecx,%eax,4)
; X86-NEXT: .LBB30_2:
@@ -1111,7 +1111,7 @@ define void @demanded_i32(i32* nocapture readonly, i32* nocapture, i32) nounwind
; X86-NEXT: retl
;
; X64-LABEL: demanded_i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edx, %eax
; X64-NEXT: shrl $5, %eax
; X64-NEXT: movl (%rdi,%rax,4), %r8d
@@ -1120,7 +1120,7 @@ define void @demanded_i32(i32* nocapture readonly, i32* nocapture, i32) nounwind
; X64-NEXT: shll %cl, %edi
; X64-NEXT: btl %edx, %r8d
; X64-NEXT: jae .LBB30_2
-; X64-NEXT: # BB#1:
+; X64-NEXT: # %bb.1:
; X64-NEXT: orl %edi, (%rsi,%rax,4)
; X64-NEXT: .LBB30_2:
; X64-NEXT: retq
diff --git a/test/CodeGen/X86/btq.ll b/test/CodeGen/X86/btq.ll
index fc015d598f9..1a17de17715 100644
--- a/test/CodeGen/X86/btq.ll
+++ b/test/CodeGen/X86/btq.ll
@@ -5,10 +5,10 @@ declare void @bar()
define void @test1(i64 %foo) nounwind {
; CHECK-LABEL: test1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: btq $32, %rdi
; CHECK-NEXT: jb .LBB0_2
-; CHECK-NEXT: # BB#1: # %if.end
+; CHECK-NEXT: # %bb.1: # %if.end
; CHECK-NEXT: retq
; CHECK-NEXT: .LBB0_2: # %if.then
; CHECK-NEXT: jmp bar # TAILCALL
@@ -26,10 +26,10 @@ if.end:
define void @test2(i64 %foo) nounwind {
; CHECK-LABEL: test2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: testl $-2147483648, %edi # imm = 0x80000000
; CHECK-NEXT: jne .LBB1_2
-; CHECK-NEXT: # BB#1: # %if.end
+; CHECK-NEXT: # %bb.1: # %if.end
; CHECK-NEXT: retq
; CHECK-NEXT: .LBB1_2: # %if.then
; CHECK-NEXT: jmp bar # TAILCALL
diff --git a/test/CodeGen/X86/build-vector-128.ll b/test/CodeGen/X86/build-vector-128.ll
index 384c0828dc6..da92fe6c3fd 100644
--- a/test/CodeGen/X86/build-vector-128.ll
+++ b/test/CodeGen/X86/build-vector-128.ll
@@ -10,22 +10,22 @@
define <2 x double> @test_buildvector_v2f64(double %a0, double %a1) {
; SSE-32-LABEL: test_buildvector_v2f64:
-; SSE-32: # BB#0:
+; SSE-32: # %bb.0:
; SSE-32-NEXT: movups {{[0-9]+}}(%esp), %xmm0
; SSE-32-NEXT: retl
;
; SSE-64-LABEL: test_buildvector_v2f64:
-; SSE-64: # BB#0:
+; SSE-64: # %bb.0:
; SSE-64-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-64-NEXT: retq
;
; AVX-32-LABEL: test_buildvector_v2f64:
-; AVX-32: # BB#0:
+; AVX-32: # %bb.0:
; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %xmm0
; AVX-32-NEXT: retl
;
; AVX-64-LABEL: test_buildvector_v2f64:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-64-NEXT: retq
%ins0 = insertelement <2 x double> undef, double %a0, i32 0
@@ -35,31 +35,31 @@ define <2 x double> @test_buildvector_v2f64(double %a0, double %a1) {
define <4 x float> @test_buildvector_v4f32(float %a0, float %a1, float %a2, float %a3) {
; SSE-32-LABEL: test_buildvector_v4f32:
-; SSE-32: # BB#0:
+; SSE-32: # %bb.0:
; SSE-32-NEXT: movups {{[0-9]+}}(%esp), %xmm0
; SSE-32-NEXT: retl
;
; SSE2-64-LABEL: test_buildvector_v4f32:
-; SSE2-64: # BB#0:
+; SSE2-64: # %bb.0:
; SSE2-64-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; SSE2-64-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-64-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE2-64-NEXT: retq
;
; SSE41-64-LABEL: test_buildvector_v4f32:
-; SSE41-64: # BB#0:
+; SSE41-64: # %bb.0:
; SSE41-64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
; SSE41-64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
; SSE41-64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[0]
; SSE41-64-NEXT: retq
;
; AVX-32-LABEL: test_buildvector_v4f32:
-; AVX-32: # BB#0:
+; AVX-32: # %bb.0:
; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %xmm0
; AVX-32-NEXT: retl
;
; AVX-64-LABEL: test_buildvector_v4f32:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
; AVX-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
; AVX-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[0]
@@ -73,24 +73,24 @@ define <4 x float> @test_buildvector_v4f32(float %a0, float %a1, float %a2, floa
define <2 x i64> @test_buildvector_v2i64(i64 %a0, i64 %a1) {
; SSE-32-LABEL: test_buildvector_v2i64:
-; SSE-32: # BB#0:
+; SSE-32: # %bb.0:
; SSE-32-NEXT: movups {{[0-9]+}}(%esp), %xmm0
; SSE-32-NEXT: retl
;
; SSE-64-LABEL: test_buildvector_v2i64:
-; SSE-64: # BB#0:
+; SSE-64: # %bb.0:
; SSE-64-NEXT: movq %rsi, %xmm1
; SSE-64-NEXT: movq %rdi, %xmm0
; SSE-64-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-64-NEXT: retq
;
; AVX-32-LABEL: test_buildvector_v2i64:
-; AVX-32: # BB#0:
+; AVX-32: # %bb.0:
; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %xmm0
; AVX-32-NEXT: retl
;
; AVX-64-LABEL: test_buildvector_v2i64:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vmovq %rsi, %xmm0
; AVX-64-NEXT: vmovq %rdi, %xmm1
; AVX-64-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
@@ -102,12 +102,12 @@ define <2 x i64> @test_buildvector_v2i64(i64 %a0, i64 %a1) {
define <4 x i32> @test_buildvector_v4i32(i32 %f0, i32 %f1, i32 %f2, i32 %f3) {
; SSE-32-LABEL: test_buildvector_v4i32:
-; SSE-32: # BB#0:
+; SSE-32: # %bb.0:
; SSE-32-NEXT: movups {{[0-9]+}}(%esp), %xmm0
; SSE-32-NEXT: retl
;
; SSE2-64-LABEL: test_buildvector_v4i32:
-; SSE2-64: # BB#0:
+; SSE2-64: # %bb.0:
; SSE2-64-NEXT: movd %ecx, %xmm0
; SSE2-64-NEXT: movd %edx, %xmm1
; SSE2-64-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -118,7 +118,7 @@ define <4 x i32> @test_buildvector_v4i32(i32 %f0, i32 %f1, i32 %f2, i32 %f3) {
; SSE2-64-NEXT: retq
;
; SSE41-64-LABEL: test_buildvector_v4i32:
-; SSE41-64: # BB#0:
+; SSE41-64: # %bb.0:
; SSE41-64-NEXT: movd %edi, %xmm0
; SSE41-64-NEXT: pinsrd $1, %esi, %xmm0
; SSE41-64-NEXT: pinsrd $2, %edx, %xmm0
@@ -126,12 +126,12 @@ define <4 x i32> @test_buildvector_v4i32(i32 %f0, i32 %f1, i32 %f2, i32 %f3) {
; SSE41-64-NEXT: retq
;
; AVX-32-LABEL: test_buildvector_v4i32:
-; AVX-32: # BB#0:
+; AVX-32: # %bb.0:
; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %xmm0
; AVX-32-NEXT: retl
;
; AVX-64-LABEL: test_buildvector_v4i32:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vmovd %edi, %xmm0
; AVX-64-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0
; AVX-64-NEXT: vpinsrd $2, %edx, %xmm0, %xmm0
@@ -146,7 +146,7 @@ define <4 x i32> @test_buildvector_v4i32(i32 %f0, i32 %f1, i32 %f2, i32 %f3) {
define <8 x i16> @test_buildvector_v8i16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %a4, i16 %a5, i16 %a6, i16 %a7) {
; SSE2-32-LABEL: test_buildvector_v8i16:
-; SSE2-32: # BB#0:
+; SSE2-32: # %bb.0:
; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2-32-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
@@ -165,7 +165,7 @@ define <8 x i16> @test_buildvector_v8i16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16
; SSE2-32-NEXT: retl
;
; SSE2-64-LABEL: test_buildvector_v8i16:
-; SSE2-64: # BB#0:
+; SSE2-64: # %bb.0:
; SSE2-64-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-64-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2-64-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
@@ -184,7 +184,7 @@ define <8 x i16> @test_buildvector_v8i16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16
; SSE2-64-NEXT: retq
;
; SSE41-32-LABEL: test_buildvector_v8i16:
-; SSE41-32: # BB#0:
+; SSE41-32: # %bb.0:
; SSE41-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE41-32-NEXT: pinsrw $1, {{[0-9]+}}(%esp), %xmm0
; SSE41-32-NEXT: pinsrw $2, {{[0-9]+}}(%esp), %xmm0
@@ -196,7 +196,7 @@ define <8 x i16> @test_buildvector_v8i16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16
; SSE41-32-NEXT: retl
;
; SSE41-64-LABEL: test_buildvector_v8i16:
-; SSE41-64: # BB#0:
+; SSE41-64: # %bb.0:
; SSE41-64-NEXT: movd %edi, %xmm0
; SSE41-64-NEXT: pinsrw $1, %esi, %xmm0
; SSE41-64-NEXT: pinsrw $2, %edx, %xmm0
@@ -208,7 +208,7 @@ define <8 x i16> @test_buildvector_v8i16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16
; SSE41-64-NEXT: retq
;
; AVX-32-LABEL: test_buildvector_v8i16:
-; AVX-32: # BB#0:
+; AVX-32: # %bb.0:
; AVX-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
; AVX-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
@@ -220,7 +220,7 @@ define <8 x i16> @test_buildvector_v8i16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16
; AVX-32-NEXT: retl
;
; AVX-64-LABEL: test_buildvector_v8i16:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vmovd %edi, %xmm0
; AVX-64-NEXT: vpinsrw $1, %esi, %xmm0, %xmm0
; AVX-64-NEXT: vpinsrw $2, %edx, %xmm0, %xmm0
@@ -243,7 +243,7 @@ define <8 x i16> @test_buildvector_v8i16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16
define <16 x i8> @test_buildvector_v16i8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, i8 %a5, i8 %a6, i8 %a7, i8 %a8, i8 %a9, i8 %a10, i8 %a11, i8 %a12, i8 %a13, i8 %a14, i8 %a15) {
; SSE2-32-LABEL: test_buildvector_v16i8:
-; SSE2-32: # BB#0:
+; SSE2-32: # %bb.0:
; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
@@ -278,7 +278,7 @@ define <16 x i8> @test_buildvector_v16i8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4,
; SSE2-32-NEXT: retl
;
; SSE2-64-LABEL: test_buildvector_v16i8:
-; SSE2-64: # BB#0:
+; SSE2-64: # %bb.0:
; SSE2-64-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-64-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
@@ -313,7 +313,7 @@ define <16 x i8> @test_buildvector_v16i8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4,
; SSE2-64-NEXT: retq
;
; SSE41-32-LABEL: test_buildvector_v16i8:
-; SSE41-32: # BB#0:
+; SSE41-32: # %bb.0:
; SSE41-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE41-32-NEXT: pinsrb $1, {{[0-9]+}}(%esp), %xmm0
; SSE41-32-NEXT: pinsrb $2, {{[0-9]+}}(%esp), %xmm0
@@ -333,7 +333,7 @@ define <16 x i8> @test_buildvector_v16i8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4,
; SSE41-32-NEXT: retl
;
; SSE41-64-LABEL: test_buildvector_v16i8:
-; SSE41-64: # BB#0:
+; SSE41-64: # %bb.0:
; SSE41-64-NEXT: movd %edi, %xmm0
; SSE41-64-NEXT: pinsrb $1, %esi, %xmm0
; SSE41-64-NEXT: pinsrb $2, %edx, %xmm0
@@ -353,7 +353,7 @@ define <16 x i8> @test_buildvector_v16i8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4,
; SSE41-64-NEXT: retq
;
; AVX-32-LABEL: test_buildvector_v16i8:
-; AVX-32: # BB#0:
+; AVX-32: # %bb.0:
; AVX-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
; AVX-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
@@ -373,7 +373,7 @@ define <16 x i8> @test_buildvector_v16i8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4,
; AVX-32-NEXT: retl
;
; AVX-64-LABEL: test_buildvector_v16i8:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vmovd %edi, %xmm0
; AVX-64-NEXT: vpinsrb $1, %esi, %xmm0, %xmm0
; AVX-64-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
diff --git a/test/CodeGen/X86/build-vector-256.ll b/test/CodeGen/X86/build-vector-256.ll
index 4b077cc2469..f2f17710033 100644
--- a/test/CodeGen/X86/build-vector-256.ll
+++ b/test/CodeGen/X86/build-vector-256.ll
@@ -6,12 +6,12 @@
define <4 x double> @test_buildvector_v4f64(double %a0, double %a1, double %a2, double %a3) {
; AVX-32-LABEL: test_buildvector_v4f64:
-; AVX-32: # BB#0:
+; AVX-32: # %bb.0:
; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %ymm0
; AVX-32-NEXT: retl
;
; AVX-64-LABEL: test_buildvector_v4f64:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0]
; AVX-64-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
@@ -25,12 +25,12 @@ define <4 x double> @test_buildvector_v4f64(double %a0, double %a1, double %a2,
define <8 x float> @test_buildvector_v8f32(float %a0, float %a1, float %a2, float %a3, float %a4, float %a5, float %a6, float %a7) {
; AVX-32-LABEL: test_buildvector_v8f32:
-; AVX-32: # BB#0:
+; AVX-32: # %bb.0:
; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %ymm0
; AVX-32-NEXT: retl
;
; AVX-64-LABEL: test_buildvector_v8f32:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[2,3]
; AVX-64-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1],xmm6[0],xmm4[3]
; AVX-64-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1,2],xmm7[0]
@@ -52,12 +52,12 @@ define <8 x float> @test_buildvector_v8f32(float %a0, float %a1, float %a2, floa
define <4 x i64> @test_buildvector_v4i64(i64 %a0, i64 %a1, i64 %a2, i64 %a3) {
; AVX-32-LABEL: test_buildvector_v4i64:
-; AVX-32: # BB#0:
+; AVX-32: # %bb.0:
; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %ymm0
; AVX-32-NEXT: retl
;
; AVX1-64-LABEL: test_buildvector_v4i64:
-; AVX1-64: # BB#0:
+; AVX1-64: # %bb.0:
; AVX1-64-NEXT: vmovq %rcx, %xmm0
; AVX1-64-NEXT: vmovq %rdx, %xmm1
; AVX1-64-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
@@ -68,7 +68,7 @@ define <4 x i64> @test_buildvector_v4i64(i64 %a0, i64 %a1, i64 %a2, i64 %a3) {
; AVX1-64-NEXT: retq
;
; AVX2-64-LABEL: test_buildvector_v4i64:
-; AVX2-64: # BB#0:
+; AVX2-64: # %bb.0:
; AVX2-64-NEXT: vmovq %rcx, %xmm0
; AVX2-64-NEXT: vmovq %rdx, %xmm1
; AVX2-64-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
@@ -86,12 +86,12 @@ define <4 x i64> @test_buildvector_v4i64(i64 %a0, i64 %a1, i64 %a2, i64 %a3) {
define <8 x i32> @test_buildvector_v8i32(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7) {
; AVX-32-LABEL: test_buildvector_v8i32:
-; AVX-32: # BB#0:
+; AVX-32: # %bb.0:
; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %ymm0
; AVX-32-NEXT: retl
;
; AVX1-64-LABEL: test_buildvector_v8i32:
-; AVX1-64: # BB#0:
+; AVX1-64: # %bb.0:
; AVX1-64-NEXT: vmovd %edi, %xmm0
; AVX1-64-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0
; AVX1-64-NEXT: vpinsrd $2, %edx, %xmm0, %xmm0
@@ -104,7 +104,7 @@ define <8 x i32> @test_buildvector_v8i32(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32
; AVX1-64-NEXT: retq
;
; AVX2-64-LABEL: test_buildvector_v8i32:
-; AVX2-64: # BB#0:
+; AVX2-64: # %bb.0:
; AVX2-64-NEXT: vmovd %edi, %xmm0
; AVX2-64-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0
; AVX2-64-NEXT: vpinsrd $2, %edx, %xmm0, %xmm0
@@ -128,7 +128,7 @@ define <8 x i32> @test_buildvector_v8i32(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32
define <16 x i16> @test_buildvector_v16i16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %a4, i16 %a5, i16 %a6, i16 %a7, i16 %a8, i16 %a9, i16 %a10, i16 %a11, i16 %a12, i16 %a13, i16 %a14, i16 %a15) {
; AVX1-32-LABEL: test_buildvector_v16i16:
-; AVX1-32: # BB#0:
+; AVX1-32: # %bb.0:
; AVX1-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX1-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
; AVX1-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
@@ -149,7 +149,7 @@ define <16 x i16> @test_buildvector_v16i16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i
; AVX1-32-NEXT: retl
;
; AVX1-64-LABEL: test_buildvector_v16i16:
-; AVX1-64: # BB#0:
+; AVX1-64: # %bb.0:
; AVX1-64-NEXT: vmovd %edi, %xmm0
; AVX1-64-NEXT: vpinsrw $1, %esi, %xmm0, %xmm0
; AVX1-64-NEXT: vpinsrw $2, %edx, %xmm0, %xmm0
@@ -170,7 +170,7 @@ define <16 x i16> @test_buildvector_v16i16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i
; AVX1-64-NEXT: retq
;
; AVX2-32-LABEL: test_buildvector_v16i16:
-; AVX2-32: # BB#0:
+; AVX2-32: # %bb.0:
; AVX2-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX2-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
; AVX2-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
@@ -191,7 +191,7 @@ define <16 x i16> @test_buildvector_v16i16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i
; AVX2-32-NEXT: retl
;
; AVX2-64-LABEL: test_buildvector_v16i16:
-; AVX2-64: # BB#0:
+; AVX2-64: # %bb.0:
; AVX2-64-NEXT: vmovd %edi, %xmm0
; AVX2-64-NEXT: vpinsrw $1, %esi, %xmm0, %xmm0
; AVX2-64-NEXT: vpinsrw $2, %edx, %xmm0, %xmm0
@@ -231,7 +231,7 @@ define <16 x i16> @test_buildvector_v16i16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i
define <32 x i8> @test_buildvector_v32i8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, i8 %a5, i8 %a6, i8 %a7, i8 %a8, i8 %a9, i8 %a10, i8 %a11, i8 %a12, i8 %a13, i8 %a14, i8 %a15, i8 %a16, i8 %a17, i8 %a18, i8 %a19, i8 %a20, i8 %a21, i8 %a22, i8 %a23, i8 %a24, i8 %a25, i8 %a26, i8 %a27, i8 %a28, i8 %a29, i8 %a30, i8 %a31) {
; AVX1-32-LABEL: test_buildvector_v32i8:
-; AVX1-32: # BB#0:
+; AVX1-32: # %bb.0:
; AVX1-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX1-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
; AVX1-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
@@ -268,7 +268,7 @@ define <32 x i8> @test_buildvector_v32i8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4,
; AVX1-32-NEXT: retl
;
; AVX1-64-LABEL: test_buildvector_v32i8:
-; AVX1-64: # BB#0:
+; AVX1-64: # %bb.0:
; AVX1-64-NEXT: vmovd %edi, %xmm0
; AVX1-64-NEXT: vpinsrb $1, %esi, %xmm0, %xmm0
; AVX1-64-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
@@ -305,7 +305,7 @@ define <32 x i8> @test_buildvector_v32i8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4,
; AVX1-64-NEXT: retq
;
; AVX2-32-LABEL: test_buildvector_v32i8:
-; AVX2-32: # BB#0:
+; AVX2-32: # %bb.0:
; AVX2-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX2-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
; AVX2-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
@@ -342,7 +342,7 @@ define <32 x i8> @test_buildvector_v32i8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4,
; AVX2-32-NEXT: retl
;
; AVX2-64-LABEL: test_buildvector_v32i8:
-; AVX2-64: # BB#0:
+; AVX2-64: # %bb.0:
; AVX2-64-NEXT: vmovd %edi, %xmm0
; AVX2-64-NEXT: vpinsrb $1, %esi, %xmm0, %xmm0
; AVX2-64-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
diff --git a/test/CodeGen/X86/build-vector-512.ll b/test/CodeGen/X86/build-vector-512.ll
index ca83da93eb7..aba8b13db96 100644
--- a/test/CodeGen/X86/build-vector-512.ll
+++ b/test/CodeGen/X86/build-vector-512.ll
@@ -6,12 +6,12 @@
define <8 x double> @test_buildvector_v8f64(double %a0, double %a1, double %a2, double %a3, double %a4, double %a5, double %a6, double %a7) {
; AVX-32-LABEL: test_buildvector_v8f64:
-; AVX-32: # BB#0:
+; AVX-32: # %bb.0:
; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %zmm0
; AVX-32-NEXT: retl
;
; AVX-64-LABEL: test_buildvector_v8f64:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vmovlhps {{.*#+}} xmm6 = xmm6[0],xmm7[0]
; AVX-64-NEXT: vmovlhps {{.*#+}} xmm4 = xmm4[0],xmm5[0]
; AVX-64-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm4
@@ -33,12 +33,12 @@ define <8 x double> @test_buildvector_v8f64(double %a0, double %a1, double %a2,
define <16 x float> @test_buildvector_v16f32(float %a0, float %a1, float %a2, float %a3, float %a4, float %a5, float %a6, float %a7, float %a8, float %a9, float %a10, float %a11, float %a12, float %a13, float %a14, float %a15) {
; AVX-32-LABEL: test_buildvector_v16f32:
-; AVX-32: # BB#0:
+; AVX-32: # %bb.0:
; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %zmm0
; AVX-32-NEXT: retl
;
; AVX-64-LABEL: test_buildvector_v16f32:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[2,3]
; AVX-64-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1],xmm6[0],xmm4[3]
; AVX-64-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1,2],xmm7[0]
@@ -78,12 +78,12 @@ define <16 x float> @test_buildvector_v16f32(float %a0, float %a1, float %a2, fl
define <8 x i64> @test_buildvector_v8i64(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, i64 %a7) {
; AVX-32-LABEL: test_buildvector_v8i64:
-; AVX-32: # BB#0:
+; AVX-32: # %bb.0:
; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %zmm0
; AVX-32-NEXT: retl
;
; AVX-64-LABEL: test_buildvector_v8i64:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vmovq %rcx, %xmm0
; AVX-64-NEXT: vmovq %rdx, %xmm1
; AVX-64-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
@@ -110,12 +110,12 @@ define <8 x i64> @test_buildvector_v8i64(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64
define <16 x i32> @test_buildvector_v16i32(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7, i32 %a8, i32 %a9, i32 %a10, i32 %a11, i32 %a12, i32 %a13, i32 %a14, i32 %a15) {
; AVX-32-LABEL: test_buildvector_v16i32:
-; AVX-32: # BB#0:
+; AVX-32: # %bb.0:
; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %zmm0
; AVX-32-NEXT: retl
;
; AVX-64-LABEL: test_buildvector_v16i32:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vmovd %edi, %xmm0
; AVX-64-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0
; AVX-64-NEXT: vpinsrd $2, %edx, %xmm0, %xmm0
@@ -157,7 +157,7 @@ define <16 x i32> @test_buildvector_v16i32(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i
define <32 x i16> @test_buildvector_v32i16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %a4, i16 %a5, i16 %a6, i16 %a7, i16 %a8, i16 %a9, i16 %a10, i16 %a11, i16 %a12, i16 %a13, i16 %a14, i16 %a15, i16 %a16, i16 %a17, i16 %a18, i16 %a19, i16 %a20, i16 %a21, i16 %a22, i16 %a23, i16 %a24, i16 %a25, i16 %a26, i16 %a27, i16 %a28, i16 %a29, i16 %a30, i16 %a31) {
; AVX512F-32-LABEL: test_buildvector_v32i16:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX512F-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
; AVX512F-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
@@ -195,7 +195,7 @@ define <32 x i16> @test_buildvector_v32i16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i
; AVX512F-32-NEXT: retl
;
; AVX512F-64-LABEL: test_buildvector_v32i16:
-; AVX512F-64: # BB#0:
+; AVX512F-64: # %bb.0:
; AVX512F-64-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX512F-64-NEXT: vpinsrw $1, {{[0-9]+}}(%rsp), %xmm0, %xmm0
; AVX512F-64-NEXT: vpinsrw $2, {{[0-9]+}}(%rsp), %xmm0, %xmm0
@@ -233,7 +233,7 @@ define <32 x i16> @test_buildvector_v32i16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i
; AVX512F-64-NEXT: retq
;
; AVX512BW-32-LABEL: test_buildvector_v32i16:
-; AVX512BW-32: # BB#0:
+; AVX512BW-32: # %bb.0:
; AVX512BW-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX512BW-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
; AVX512BW-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
@@ -272,7 +272,7 @@ define <32 x i16> @test_buildvector_v32i16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i
; AVX512BW-32-NEXT: retl
;
; AVX512BW-64-LABEL: test_buildvector_v32i16:
-; AVX512BW-64: # BB#0:
+; AVX512BW-64: # %bb.0:
; AVX512BW-64-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX512BW-64-NEXT: vpinsrw $1, {{[0-9]+}}(%rsp), %xmm0, %xmm0
; AVX512BW-64-NEXT: vpinsrw $2, {{[0-9]+}}(%rsp), %xmm0, %xmm0
@@ -346,7 +346,7 @@ define <32 x i16> @test_buildvector_v32i16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i
define <64 x i8> @test_buildvector_v64i8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, i8 %a5, i8 %a6, i8 %a7, i8 %a8, i8 %a9, i8 %a10, i8 %a11, i8 %a12, i8 %a13, i8 %a14, i8 %a15, i8 %a16, i8 %a17, i8 %a18, i8 %a19, i8 %a20, i8 %a21, i8 %a22, i8 %a23, i8 %a24, i8 %a25, i8 %a26, i8 %a27, i8 %a28, i8 %a29, i8 %a30, i8 %a31, i8 %a32, i8 %a33, i8 %a34, i8 %a35, i8 %a36, i8 %a37, i8 %a38, i8 %a39, i8 %a40, i8 %a41, i8 %a42, i8 %a43, i8 %a44, i8 %a45, i8 %a46, i8 %a47, i8 %a48, i8 %a49, i8 %a50, i8 %a51, i8 %a52, i8 %a53, i8 %a54, i8 %a55, i8 %a56, i8 %a57, i8 %a58, i8 %a59, i8 %a60, i8 %a61, i8 %a62, i8 %a63) {
; AVX512F-32-LABEL: test_buildvector_v64i8:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX512F-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
; AVX512F-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
@@ -416,7 +416,7 @@ define <64 x i8> @test_buildvector_v64i8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4,
; AVX512F-32-NEXT: retl
;
; AVX512F-64-LABEL: test_buildvector_v64i8:
-; AVX512F-64: # BB#0:
+; AVX512F-64: # %bb.0:
; AVX512F-64-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX512F-64-NEXT: vpinsrb $1, {{[0-9]+}}(%rsp), %xmm0, %xmm0
; AVX512F-64-NEXT: vpinsrb $2, {{[0-9]+}}(%rsp), %xmm0, %xmm0
@@ -486,7 +486,7 @@ define <64 x i8> @test_buildvector_v64i8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4,
; AVX512F-64-NEXT: retq
;
; AVX512BW-32-LABEL: test_buildvector_v64i8:
-; AVX512BW-32: # BB#0:
+; AVX512BW-32: # %bb.0:
; AVX512BW-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX512BW-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
; AVX512BW-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
@@ -557,7 +557,7 @@ define <64 x i8> @test_buildvector_v64i8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4,
; AVX512BW-32-NEXT: retl
;
; AVX512BW-64-LABEL: test_buildvector_v64i8:
-; AVX512BW-64: # BB#0:
+; AVX512BW-64: # %bb.0:
; AVX512BW-64-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX512BW-64-NEXT: vpinsrb $1, {{[0-9]+}}(%rsp), %xmm0, %xmm0
; AVX512BW-64-NEXT: vpinsrb $2, {{[0-9]+}}(%rsp), %xmm0, %xmm0
diff --git a/test/CodeGen/X86/buildvec-insertvec.ll b/test/CodeGen/X86/buildvec-insertvec.ll
index 5c4e2120924..88b5df04c76 100644
--- a/test/CodeGen/X86/buildvec-insertvec.ll
+++ b/test/CodeGen/X86/buildvec-insertvec.ll
@@ -4,7 +4,7 @@
define void @foo(<3 x float> %in, <4 x i8>* nocapture %out) nounwind {
; SSE2-LABEL: foo:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: cvttps2dq %xmm0, %xmm0
; SSE2-NEXT: movl $255, %eax
; SSE2-NEXT: movd %eax, %xmm1
@@ -17,7 +17,7 @@ define void @foo(<3 x float> %in, <4 x i8>* nocapture %out) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: foo:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: cvttps2dq %xmm0, %xmm0
; SSE41-NEXT: movl $255, %eax
; SSE41-NEXT: pinsrd $3, %eax, %xmm0
@@ -36,7 +36,7 @@ define void @foo(<3 x float> %in, <4 x i8>* nocapture %out) nounwind {
define <4 x float> @test_negative_zero_1(<4 x float> %A) {
; SSE2-LABEL: test_negative_zero_1:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movaps %xmm0, %xmm1
; SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
; SSE2-NEXT: xorps %xmm2, %xmm2
@@ -47,7 +47,7 @@ define <4 x float> @test_negative_zero_1(<4 x float> %A) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_negative_zero_1:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2],zero
; SSE41-NEXT: retq
entry:
@@ -64,14 +64,14 @@ entry:
define <2 x double> @test_negative_zero_2(<2 x double> %A) {
; SSE2-LABEL: test_negative_zero_2:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movapd {{.*#+}} xmm1 = <u,-0>
; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE2-NEXT: movapd %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_negative_zero_2:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm0[0],mem[1]
; SSE41-NEXT: retq
entry:
@@ -83,14 +83,14 @@ entry:
define <4 x float> @test_buildvector_v4f32_register(float %f0, float %f1, float %f2, float %f3) {
; SSE2-LABEL: test_buildvector_v4f32_register:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_buildvector_v4f32_register:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[0]
@@ -104,7 +104,7 @@ define <4 x float> @test_buildvector_v4f32_register(float %f0, float %f1, float
define <4 x float> @test_buildvector_v4f32_load(float* %p0, float* %p1, float* %p2, float* %p3) {
; SSE2-LABEL: test_buildvector_v4f32_load:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -115,7 +115,7 @@ define <4 x float> @test_buildvector_v4f32_load(float* %p0, float* %p1, float* %
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_buildvector_v4f32_load:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
@@ -134,7 +134,7 @@ define <4 x float> @test_buildvector_v4f32_load(float* %p0, float* %p1, float* %
define <4 x float> @test_buildvector_v4f32_partial_load(float %f0, float %f1, float %f2, float* %p3) {
; SSE2-LABEL: test_buildvector_v4f32_partial_load:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
@@ -142,7 +142,7 @@ define <4 x float> @test_buildvector_v4f32_partial_load(float %f0, float %f1, fl
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_buildvector_v4f32_partial_load:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
@@ -157,7 +157,7 @@ define <4 x float> @test_buildvector_v4f32_partial_load(float %f0, float %f1, fl
define <4 x i32> @test_buildvector_v4i32_register(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
; SSE2-LABEL: test_buildvector_v4i32_register:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movd %ecx, %xmm0
; SSE2-NEXT: movd %edx, %xmm1
; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -168,7 +168,7 @@ define <4 x i32> @test_buildvector_v4i32_register(i32 %a0, i32 %a1, i32 %a2, i32
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_buildvector_v4i32_register:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movd %edi, %xmm0
; SSE41-NEXT: pinsrd $1, %esi, %xmm0
; SSE41-NEXT: pinsrd $2, %edx, %xmm0
@@ -183,7 +183,7 @@ define <4 x i32> @test_buildvector_v4i32_register(i32 %a0, i32 %a1, i32 %a2, i32
define <4 x i32> @test_buildvector_v4i32_partial(i32 %a0, i32 %a3) {
; SSE2-LABEL: test_buildvector_v4i32_partial:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movd %edi, %xmm0
; SSE2-NEXT: movd %esi, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
@@ -191,7 +191,7 @@ define <4 x i32> @test_buildvector_v4i32_partial(i32 %a0, i32 %a3) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_buildvector_v4i32_partial:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movd %edi, %xmm0
; SSE41-NEXT: pinsrd $3, %esi, %xmm0
; SSE41-NEXT: retq
@@ -204,7 +204,7 @@ define <4 x i32> @test_buildvector_v4i32_partial(i32 %a0, i32 %a3) {
define <4 x i32> @test_buildvector_v4i32_register_zero(i32 %a0, i32 %a2, i32 %a3) {
; CHECK-LABEL: test_buildvector_v4i32_register_zero:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movd %edx, %xmm0
; CHECK-NEXT: movd %esi, %xmm1
; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -220,7 +220,7 @@ define <4 x i32> @test_buildvector_v4i32_register_zero(i32 %a0, i32 %a2, i32 %a3
define <4 x i32> @test_buildvector_v4i32_register_zero_2(i32 %a1, i32 %a2, i32 %a3) {
; CHECK-LABEL: test_buildvector_v4i32_register_zero_2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movd %edx, %xmm0
; CHECK-NEXT: movd %esi, %xmm1
; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -236,7 +236,7 @@ define <4 x i32> @test_buildvector_v4i32_register_zero_2(i32 %a1, i32 %a2, i32 %
define <8 x i16> @test_buildvector_v8i16_register(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %a4, i16 %a5, i16 %a6, i16 %a7) {
; SSE2-LABEL: test_buildvector_v8i16_register:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
@@ -255,7 +255,7 @@ define <8 x i16> @test_buildvector_v8i16_register(i16 %a0, i16 %a1, i16 %a2, i16
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_buildvector_v8i16_register:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movd %edi, %xmm0
; SSE41-NEXT: pinsrw $1, %esi, %xmm0
; SSE41-NEXT: pinsrw $2, %edx, %xmm0
@@ -278,7 +278,7 @@ define <8 x i16> @test_buildvector_v8i16_register(i16 %a0, i16 %a1, i16 %a2, i16
define <8 x i16> @test_buildvector_v8i16_partial(i16 %a1, i16 %a3, i16 %a4, i16 %a5) {
; CHECK-LABEL: test_buildvector_v8i16_partial:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pxor %xmm0, %xmm0
; CHECK-NEXT: pinsrw $1, %edi, %xmm0
; CHECK-NEXT: pinsrw $3, %esi, %xmm0
@@ -298,7 +298,7 @@ define <8 x i16> @test_buildvector_v8i16_partial(i16 %a1, i16 %a3, i16 %a4, i16
define <8 x i16> @test_buildvector_v8i16_register_zero(i16 %a0, i16 %a3, i16 %a4, i16 %a5) {
; CHECK-LABEL: test_buildvector_v8i16_register_zero:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pxor %xmm0, %xmm0
; CHECK-NEXT: pinsrw $0, %edi, %xmm0
; CHECK-NEXT: pinsrw $3, %esi, %xmm0
@@ -318,7 +318,7 @@ define <8 x i16> @test_buildvector_v8i16_register_zero(i16 %a0, i16 %a3, i16 %a4
define <8 x i16> @test_buildvector_v8i16_register_zero_2(i16 %a1, i16 %a3, i16 %a4, i16 %a5) {
; CHECK-LABEL: test_buildvector_v8i16_register_zero_2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pxor %xmm0, %xmm0
; CHECK-NEXT: pinsrw $1, %edi, %xmm0
; CHECK-NEXT: pinsrw $3, %esi, %xmm0
@@ -338,7 +338,7 @@ define <8 x i16> @test_buildvector_v8i16_register_zero_2(i16 %a1, i16 %a3, i16 %
define <16 x i8> @test_buildvector_v16i8_register(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, i8 %a5, i8 %a6, i8 %a7, i8 %a8, i8 %a9, i8 %a10, i8 %a11, i8 %a12, i8 %a13, i8 %a14, i8 %a15) {
; SSE2-LABEL: test_buildvector_v16i8_register:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
@@ -373,7 +373,7 @@ define <16 x i8> @test_buildvector_v16i8_register(i8 %a0, i8 %a1, i8 %a2, i8 %a3
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_buildvector_v16i8_register:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movd %edi, %xmm0
; SSE41-NEXT: pinsrb $1, %esi, %xmm0
; SSE41-NEXT: pinsrb $2, %edx, %xmm0
@@ -412,7 +412,7 @@ define <16 x i8> @test_buildvector_v16i8_register(i8 %a0, i8 %a1, i8 %a2, i8 %a3
define <16 x i8> @test_buildvector_v16i8_partial(i8 %a2, i8 %a6, i8 %a8, i8 %a11, i8 %a12, i8 %a15) {
; SSE2-LABEL: test_buildvector_v16i8_partial:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movzbl %dil, %eax
; SSE2-NEXT: pinsrw $1, %eax, %xmm0
; SSE2-NEXT: movzbl %sil, %eax
@@ -428,7 +428,7 @@ define <16 x i8> @test_buildvector_v16i8_partial(i8 %a2, i8 %a6, i8 %a8, i8 %a11
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_buildvector_v16i8_partial:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: pinsrb $2, %edi, %xmm0
; SSE41-NEXT: pinsrb $6, %esi, %xmm0
@@ -458,7 +458,7 @@ define <16 x i8> @test_buildvector_v16i8_partial(i8 %a2, i8 %a6, i8 %a8, i8 %a11
define <16 x i8> @test_buildvector_v16i8_register_zero(i8 %a0, i8 %a4, i8 %a6, i8 %a8, i8 %a11, i8 %a12, i8 %a15) {
; SSE2-LABEL: test_buildvector_v16i8_register_zero:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movzbl %sil, %eax
; SSE2-NEXT: movzbl %dil, %esi
; SSE2-NEXT: movd %esi, %xmm0
@@ -477,7 +477,7 @@ define <16 x i8> @test_buildvector_v16i8_register_zero(i8 %a0, i8 %a4, i8 %a6, i
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_buildvector_v16i8_register_zero:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: pinsrb $0, %edi, %xmm0
; SSE41-NEXT: pinsrb $4, %esi, %xmm0
@@ -508,7 +508,7 @@ define <16 x i8> @test_buildvector_v16i8_register_zero(i8 %a0, i8 %a4, i8 %a6, i
define <16 x i8> @test_buildvector_v16i8_register_zero_2(i8 %a2, i8 %a3, i8 %a6, i8 %a8, i8 %a11, i8 %a12, i8 %a15) {
; SSE2-LABEL: test_buildvector_v16i8_register_zero_2:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: shll $8, %esi
; SSE2-NEXT: movzbl %dil, %eax
; SSE2-NEXT: orl %esi, %eax
@@ -528,7 +528,7 @@ define <16 x i8> @test_buildvector_v16i8_register_zero_2(i8 %a2, i8 %a3, i8 %a6,
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_buildvector_v16i8_register_zero_2:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: pinsrb $2, %edi, %xmm0
; SSE41-NEXT: pinsrb $3, %esi, %xmm0
diff --git a/test/CodeGen/X86/bypass-slow-division-32.ll b/test/CodeGen/X86/bypass-slow-division-32.ll
index 32a1a5f7413..6677ccfbaf8 100644
--- a/test/CodeGen/X86/bypass-slow-division-32.ll
+++ b/test/CodeGen/X86/bypass-slow-division-32.ll
@@ -4,14 +4,14 @@
define i32 @Test_get_quotient(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: Test_get_quotient:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl %eax, %edx
; CHECK-NEXT: orl %ecx, %edx
; CHECK-NEXT: testl $-256, %edx
; CHECK-NEXT: je .LBB0_1
-; CHECK-NEXT: # BB#2:
+; CHECK-NEXT: # %bb.2:
; CHECK-NEXT: cltd
; CHECK-NEXT: idivl %ecx
; CHECK-NEXT: retl
@@ -27,14 +27,14 @@ define i32 @Test_get_quotient(i32 %a, i32 %b) nounwind {
define i32 @Test_get_remainder(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: Test_get_remainder:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl %eax, %edx
; CHECK-NEXT: orl %ecx, %edx
; CHECK-NEXT: testl $-256, %edx
; CHECK-NEXT: je .LBB1_1
-; CHECK-NEXT: # BB#2:
+; CHECK-NEXT: # %bb.2:
; CHECK-NEXT: cltd
; CHECK-NEXT: idivl %ecx
; CHECK-NEXT: movl %edx, %eax
@@ -51,14 +51,14 @@ define i32 @Test_get_remainder(i32 %a, i32 %b) nounwind {
define i32 @Test_get_quotient_and_remainder(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: Test_get_quotient_and_remainder:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl %eax, %edx
; CHECK-NEXT: orl %ecx, %edx
; CHECK-NEXT: testl $-256, %edx
; CHECK-NEXT: je .LBB2_1
-; CHECK-NEXT: # BB#2:
+; CHECK-NEXT: # %bb.2:
; CHECK-NEXT: cltd
; CHECK-NEXT: idivl %ecx
; CHECK-NEXT: addl %edx, %eax
@@ -79,7 +79,7 @@ define i32 @Test_get_quotient_and_remainder(i32 %a, i32 %b) nounwind {
define i32 @Test_use_div_and_idiv(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: Test_use_div_and_idiv:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pushl %ebx
; CHECK-NEXT: pushl %edi
; CHECK-NEXT: pushl %esi
@@ -89,7 +89,7 @@ define i32 @Test_use_div_and_idiv(i32 %a, i32 %b) nounwind {
; CHECK-NEXT: orl %ebx, %edi
; CHECK-NEXT: testl $-256, %edi
; CHECK-NEXT: je .LBB3_1
-; CHECK-NEXT: # BB#2:
+; CHECK-NEXT: # %bb.2:
; CHECK-NEXT: movl %ecx, %eax
; CHECK-NEXT: cltd
; CHECK-NEXT: idivl %ebx
@@ -128,7 +128,7 @@ define i32 @Test_use_div_and_idiv(i32 %a, i32 %b) nounwind {
define i32 @Test_use_div_imm_imm() nounwind {
; CHECK-LABEL: Test_use_div_imm_imm:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl $64, %eax
; CHECK-NEXT: retl
%resultdiv = sdiv i32 256, 4
@@ -137,7 +137,7 @@ define i32 @Test_use_div_imm_imm() nounwind {
define i32 @Test_use_div_reg_imm(i32 %a) nounwind {
; CHECK-LABEL: Test_use_div_reg_imm:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl $1041204193, %eax # imm = 0x3E0F83E1
; CHECK-NEXT: imull {{[0-9]+}}(%esp)
; CHECK-NEXT: movl %edx, %eax
@@ -151,7 +151,7 @@ define i32 @Test_use_div_reg_imm(i32 %a) nounwind {
define i32 @Test_use_rem_reg_imm(i32 %a) nounwind {
; CHECK-LABEL: Test_use_rem_reg_imm:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movl $1041204193, %edx # imm = 0x3E0F83E1
; CHECK-NEXT: movl %ecx, %eax
@@ -172,7 +172,7 @@ define i32 @Test_use_rem_reg_imm(i32 %a) nounwind {
define i32 @Test_use_divrem_reg_imm(i32 %a) nounwind {
; CHECK-LABEL: Test_use_divrem_reg_imm:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movl $1041204193, %edx # imm = 0x3E0F83E1
; CHECK-NEXT: movl %ecx, %eax
@@ -196,11 +196,11 @@ define i32 @Test_use_divrem_reg_imm(i32 %a) nounwind {
define i32 @Test_use_div_imm_reg(i32 %a) nounwind {
; CHECK-LABEL: Test_use_div_imm_reg:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: testl $-256, %ecx
; CHECK-NEXT: je .LBB8_1
-; CHECK-NEXT: # BB#2:
+; CHECK-NEXT: # %bb.2:
; CHECK-NEXT: movl $4, %eax
; CHECK-NEXT: xorl %edx, %edx
; CHECK-NEXT: idivl %ecx
@@ -218,11 +218,11 @@ define i32 @Test_use_div_imm_reg(i32 %a) nounwind {
define i32 @Test_use_rem_imm_reg(i32 %a) nounwind {
; CHECK-LABEL: Test_use_rem_imm_reg:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: testl $-256, %ecx
; CHECK-NEXT: je .LBB9_1
-; CHECK-NEXT: # BB#2:
+; CHECK-NEXT: # %bb.2:
; CHECK-NEXT: movl $4, %eax
; CHECK-NEXT: xorl %edx, %edx
; CHECK-NEXT: idivl %ecx
diff --git a/test/CodeGen/X86/bypass-slow-division-64.ll b/test/CodeGen/X86/bypass-slow-division-64.ll
index d85e7d70fcc..33789c93bcc 100644
--- a/test/CodeGen/X86/bypass-slow-division-64.ll
+++ b/test/CodeGen/X86/bypass-slow-division-64.ll
@@ -6,12 +6,12 @@
define i64 @Test_get_quotient(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: Test_get_quotient:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: orq %rsi, %rax
; CHECK-NEXT: shrq $32, %rax
; CHECK-NEXT: je .LBB0_1
-; CHECK-NEXT: # BB#2:
+; CHECK-NEXT: # %bb.2:
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: cqto
; CHECK-NEXT: idivq %rsi
@@ -28,12 +28,12 @@ define i64 @Test_get_quotient(i64 %a, i64 %b) nounwind {
define i64 @Test_get_remainder(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: Test_get_remainder:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: orq %rsi, %rax
; CHECK-NEXT: shrq $32, %rax
; CHECK-NEXT: je .LBB1_1
-; CHECK-NEXT: # BB#2:
+; CHECK-NEXT: # %bb.2:
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: cqto
; CHECK-NEXT: idivq %rsi
@@ -52,12 +52,12 @@ define i64 @Test_get_remainder(i64 %a, i64 %b) nounwind {
define i64 @Test_get_quotient_and_remainder(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: Test_get_quotient_and_remainder:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: orq %rsi, %rax
; CHECK-NEXT: shrq $32, %rax
; CHECK-NEXT: je .LBB2_1
-; CHECK-NEXT: # BB#2:
+; CHECK-NEXT: # %bb.2:
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: cqto
; CHECK-NEXT: idivq %rsi
diff --git a/test/CodeGen/X86/cast-vsel.ll b/test/CodeGen/X86/cast-vsel.ll
index 86cce73024f..ee63ec65391 100644
--- a/test/CodeGen/X86/cast-vsel.ll
+++ b/test/CodeGen/X86/cast-vsel.ll
@@ -10,7 +10,7 @@
define <8 x i32> @sext(<8 x float> %a, <8 x float> %b, <8 x i16> %c, <8 x i16> %d) {
; SSE2-LABEL: sext:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: cmpltps %xmm3, %xmm1
; SSE2-NEXT: cmpltps %xmm2, %xmm0
; SSE2-NEXT: packssdw %xmm1, %xmm0
@@ -25,7 +25,7 @@ define <8 x i32> @sext(<8 x float> %a, <8 x float> %b, <8 x i16> %c, <8 x i16> %
; SSE2-NEXT: retq
;
; SSE41-LABEL: sext:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: cmpltps %xmm3, %xmm1
; SSE41-NEXT: cmpltps %xmm2, %xmm0
; SSE41-NEXT: packssdw %xmm1, %xmm0
@@ -36,7 +36,7 @@ define <8 x i32> @sext(<8 x float> %a, <8 x float> %b, <8 x i16> %c, <8 x i16> %
; SSE41-NEXT: retq
;
; AVX1-LABEL: sext:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vcmpltps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: vpmovsxwd %xmm2, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
@@ -50,7 +50,7 @@ define <8 x i32> @sext(<8 x float> %a, <8 x float> %b, <8 x i16> %c, <8 x i16> %
; AVX1-NEXT: retq
;
; AVX2-LABEL: sext:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vcmpltps %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpmovsxwd %xmm2, %ymm1
; AVX2-NEXT: vpmovsxwd %xmm3, %ymm2
@@ -64,7 +64,7 @@ define <8 x i32> @sext(<8 x float> %a, <8 x float> %b, <8 x i16> %c, <8 x i16> %
define <8 x i32> @zext(<8 x float> %a, <8 x float> %b, <8 x i16> %c, <8 x i16> %d) {
; SSE2-LABEL: zext:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps %xmm0, %xmm6
; SSE2-NEXT: cmpltps %xmm3, %xmm1
; SSE2-NEXT: cmpltps %xmm2, %xmm6
@@ -80,7 +80,7 @@ define <8 x i32> @zext(<8 x float> %a, <8 x float> %b, <8 x i16> %c, <8 x i16> %
; SSE2-NEXT: retq
;
; SSE41-LABEL: zext:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: cmpltps %xmm3, %xmm1
; SSE41-NEXT: cmpltps %xmm2, %xmm0
; SSE41-NEXT: packssdw %xmm1, %xmm0
@@ -91,7 +91,7 @@ define <8 x i32> @zext(<8 x float> %a, <8 x float> %b, <8 x i16> %c, <8 x i16> %
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vcmpltps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
@@ -105,7 +105,7 @@ define <8 x i32> @zext(<8 x float> %a, <8 x float> %b, <8 x i16> %c, <8 x i16> %
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vcmpltps %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
@@ -119,7 +119,7 @@ define <8 x i32> @zext(<8 x float> %a, <8 x float> %b, <8 x i16> %c, <8 x i16> %
define <4 x double> @fpext(<4 x double> %a, <4 x double> %b, <4 x float> %c, <4 x float> %d) {
; SSE2-LABEL: fpext:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: cmpltpd %xmm3, %xmm1
; SSE2-NEXT: cmpltpd %xmm2, %xmm0
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
@@ -133,7 +133,7 @@ define <4 x double> @fpext(<4 x double> %a, <4 x double> %b, <4 x float> %c, <4
; SSE2-NEXT: retq
;
; SSE41-LABEL: fpext:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: cmpltpd %xmm3, %xmm1
; SSE41-NEXT: cmpltpd %xmm2, %xmm0
; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
@@ -144,7 +144,7 @@ define <4 x double> @fpext(<4 x double> %a, <4 x double> %b, <4 x float> %c, <4
; SSE41-NEXT: retq
;
; AVX-LABEL: fpext:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpltpd %ymm1, %ymm0, %ymm0
; AVX-NEXT: vcvtps2pd %xmm2, %ymm1
; AVX-NEXT: vcvtps2pd %xmm3, %ymm2
@@ -158,7 +158,7 @@ define <4 x double> @fpext(<4 x double> %a, <4 x double> %b, <4 x float> %c, <4
define <8 x i16> @trunc(<8 x i16> %a, <8 x i16> %b, <8 x i32> %c, <8 x i32> %d) {
; SSE2-LABEL: trunc:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pcmpeqw %xmm1, %xmm0
; SSE2-NEXT: pslld $16, %xmm5
; SSE2-NEXT: psrad $16, %xmm5
@@ -176,7 +176,7 @@ define <8 x i16> @trunc(<8 x i16> %a, <8 x i16> %b, <8 x i32> %c, <8 x i32> %d)
; SSE2-NEXT: retq
;
; SSE41-LABEL: trunc:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pcmpeqw %xmm1, %xmm0
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; SSE41-NEXT: pshufb %xmm1, %xmm3
@@ -190,7 +190,7 @@ define <8 x i16> @trunc(<8 x i16> %a, <8 x i16> %b, <8 x i32> %c, <8 x i32> %d)
; SSE41-NEXT: retq
;
; AVX1-LABEL: trunc:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
@@ -206,7 +206,7 @@ define <8 x i16> @trunc(<8 x i16> %a, <8 x i16> %b, <8 x i32> %c, <8 x i32> %d)
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm1, %ymm2, %ymm2
@@ -224,7 +224,7 @@ define <8 x i16> @trunc(<8 x i16> %a, <8 x i16> %b, <8 x i32> %c, <8 x i32> %d)
define <4 x float> @fptrunc(<4 x float> %a, <4 x float> %b, <4 x double> %c, <4 x double> %d) {
; SSE2-LABEL: fptrunc:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: cmpltps %xmm1, %xmm0
; SSE2-NEXT: cvtpd2ps %xmm5, %xmm1
; SSE2-NEXT: cvtpd2ps %xmm4, %xmm4
@@ -238,7 +238,7 @@ define <4 x float> @fptrunc(<4 x float> %a, <4 x float> %b, <4 x double> %c, <4
; SSE2-NEXT: retq
;
; SSE41-LABEL: fptrunc:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: cmpltps %xmm1, %xmm0
; SSE41-NEXT: cvtpd2ps %xmm3, %xmm1
; SSE41-NEXT: cvtpd2ps %xmm2, %xmm2
@@ -251,7 +251,7 @@ define <4 x float> @fptrunc(<4 x float> %a, <4 x float> %b, <4 x double> %c, <4
; SSE41-NEXT: retq
;
; AVX-LABEL: fptrunc:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpltps %xmm1, %xmm0, %xmm0
; AVX-NEXT: vcvtpd2ps %ymm2, %xmm1
; AVX-NEXT: vcvtpd2ps %ymm3, %xmm2
@@ -276,7 +276,7 @@ define <4 x float> @fptrunc(<4 x float> %a, <4 x float> %b, <4 x double> %c, <4
define void @example25() nounwind {
; SSE2-LABEL: example25:
-; SSE2: # BB#0: # %vector.ph
+; SSE2: # %bb.0: # %vector.ph
; SSE2-NEXT: movq $-4096, %rax # imm = 0xF000
; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [1,1,1,1]
; SSE2-NEXT: .p2align 4, 0x90
@@ -302,11 +302,11 @@ define void @example25() nounwind {
; SSE2-NEXT: movdqa %xmm1, dj+4096(%rax)
; SSE2-NEXT: addq $32, %rax
; SSE2-NEXT: jne .LBB5_1
-; SSE2-NEXT: # BB#2: # %for.end
+; SSE2-NEXT: # %bb.2: # %for.end
; SSE2-NEXT: retq
;
; SSE41-LABEL: example25:
-; SSE41: # BB#0: # %vector.ph
+; SSE41: # %bb.0: # %vector.ph
; SSE41-NEXT: movq $-4096, %rax # imm = 0xF000
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [1,1,1,1]
; SSE41-NEXT: .p2align 4, 0x90
@@ -331,11 +331,11 @@ define void @example25() nounwind {
; SSE41-NEXT: movdqa %xmm1, dj+4096(%rax)
; SSE41-NEXT: addq $32, %rax
; SSE41-NEXT: jne .LBB5_1
-; SSE41-NEXT: # BB#2: # %for.end
+; SSE41-NEXT: # %bb.2: # %for.end
; SSE41-NEXT: retq
;
; AVX1-LABEL: example25:
-; AVX1: # BB#0: # %vector.ph
+; AVX1: # %bb.0: # %vector.ph
; AVX1-NEXT: movq $-4096, %rax # imm = 0xF000
; AVX1-NEXT: vmovaps {{.*#+}} ymm0 = [1,1,1,1,1,1,1,1]
; AVX1-NEXT: .p2align 4, 0x90
@@ -350,12 +350,12 @@ define void @example25() nounwind {
; AVX1-NEXT: vmovups %ymm1, dj+4096(%rax)
; AVX1-NEXT: addq $32, %rax
; AVX1-NEXT: jne .LBB5_1
-; AVX1-NEXT: # BB#2: # %for.end
+; AVX1-NEXT: # %bb.2: # %for.end
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: example25:
-; AVX2: # BB#0: # %vector.ph
+; AVX2: # %bb.0: # %vector.ph
; AVX2-NEXT: movq $-4096, %rax # imm = 0xF000
; AVX2-NEXT: vbroadcastss {{.*#+}} ymm0 = [1,1,1,1,1,1,1,1]
; AVX2-NEXT: .p2align 4, 0x90
@@ -370,7 +370,7 @@ define void @example25() nounwind {
; AVX2-NEXT: vmovups %ymm1, dj+4096(%rax)
; AVX2-NEXT: addq $32, %rax
; AVX2-NEXT: jne .LBB5_1
-; AVX2-NEXT: # BB#2: # %for.end
+; AVX2-NEXT: # %bb.2: # %for.end
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
vector.ph:
@@ -407,7 +407,7 @@ for.end:
define void @example24(i16 signext %x, i16 signext %y) nounwind {
; SSE2-LABEL: example24:
-; SSE2: # BB#0: # %vector.ph
+; SSE2: # %bb.0: # %vector.ph
; SSE2-NEXT: movd %edi, %xmm0
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
@@ -435,11 +435,11 @@ define void @example24(i16 signext %x, i16 signext %y) nounwind {
; SSE2-NEXT: movdqa %xmm3, dj+4096(%rax)
; SSE2-NEXT: addq $32, %rax
; SSE2-NEXT: jne .LBB6_1
-; SSE2-NEXT: # BB#2: # %for.end
+; SSE2-NEXT: # %bb.2: # %for.end
; SSE2-NEXT: retq
;
; SSE41-LABEL: example24:
-; SSE41: # BB#0: # %vector.ph
+; SSE41: # %bb.0: # %vector.ph
; SSE41-NEXT: movd %edi, %xmm0
; SSE41-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,1]
@@ -464,11 +464,11 @@ define void @example24(i16 signext %x, i16 signext %y) nounwind {
; SSE41-NEXT: movdqa %xmm0, dj+4112(%rax)
; SSE41-NEXT: addq $32, %rax
; SSE41-NEXT: jne .LBB6_1
-; SSE41-NEXT: # BB#2: # %for.end
+; SSE41-NEXT: # %bb.2: # %for.end
; SSE41-NEXT: retq
;
; AVX1-LABEL: example24:
-; AVX1: # BB#0: # %vector.ph
+; AVX1: # %bb.0: # %vector.ph
; AVX1-NEXT: vmovd %edi, %xmm0
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
@@ -493,12 +493,12 @@ define void @example24(i16 signext %x, i16 signext %y) nounwind {
; AVX1-NEXT: vmovups %ymm2, dj+4096(%rax)
; AVX1-NEXT: addq $32, %rax
; AVX1-NEXT: jne .LBB6_1
-; AVX1-NEXT: # BB#2: # %for.end
+; AVX1-NEXT: # %bb.2: # %for.end
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: example24:
-; AVX2: # BB#0: # %vector.ph
+; AVX2: # %bb.0: # %vector.ph
; AVX2-NEXT: vmovd %edi, %xmm0
; AVX2-NEXT: vpbroadcastw %xmm0, %xmm0
; AVX2-NEXT: vmovd %esi, %xmm1
@@ -515,7 +515,7 @@ define void @example24(i16 signext %x, i16 signext %y) nounwind {
; AVX2-NEXT: vmovups %ymm2, dj+4096(%rax)
; AVX2-NEXT: addq $32, %rax
; AVX2-NEXT: jne .LBB6_1
-; AVX2-NEXT: # BB#2: # %for.end
+; AVX2-NEXT: # %bb.2: # %for.end
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
vector.ph:
diff --git a/test/CodeGen/X86/catchpad-weight.ll b/test/CodeGen/X86/catchpad-weight.ll
index a4b6f6f0edb..c122ad2c20a 100644
--- a/test/CodeGen/X86/catchpad-weight.ll
+++ b/test/CodeGen/X86/catchpad-weight.ll
@@ -2,7 +2,7 @@
; Check if the edge weight to the catchpad is calculated correctly.
-; CHECK: Successors according to CFG: BB#2(0x7ffff100 / 0x80000000 = 100.00%) BB#1(0x00000800 / 0x80000000 = 0.00%) BB#3(0x00000400 / 0x80000000 = 0.00%) BB#4(0x00000200 / 0x80000000 = 0.00%) BB#5(0x00000100 / 0x80000000 = 0.00%)
+; CHECK: Successors according to CFG: %bb.2(0x7ffff100 / 0x80000000 = 100.00%) %bb.1(0x00000800 / 0x80000000 = 0.00%) %bb.3(0x00000400 / 0x80000000 = 0.00%) %bb.4(0x00000200 / 0x80000000 = 0.00%) %bb.5(0x00000100 / 0x80000000 = 0.00%)
target datalayout = "e-m:w-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64--windows-msvc18.0.0"
diff --git a/test/CodeGen/X86/chain_order.ll b/test/CodeGen/X86/chain_order.ll
index cc48e5b6149..b9e188f6a1b 100644
--- a/test/CodeGen/X86/chain_order.ll
+++ b/test/CodeGen/X86/chain_order.ll
@@ -4,7 +4,7 @@
; A test from pifft (after SLP-vectorization) that fails when we drop the chain on newly merged loads.
define void @cftx020(double* nocapture %a) {
; CHECK-LABEL: cftx020:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
diff --git a/test/CodeGen/X86/clear_upper_vector_element_bits.ll b/test/CodeGen/X86/clear_upper_vector_element_bits.ll
index 22ec4d392b7..2af9ec1b813 100644
--- a/test/CodeGen/X86/clear_upper_vector_element_bits.ll
+++ b/test/CodeGen/X86/clear_upper_vector_element_bits.ll
@@ -10,24 +10,24 @@
define <2 x i64> @_clearupper2xi64a(<2 x i64>) nounwind {
; SSE2-LABEL: _clearupper2xi64a:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: _clearupper2xi64a:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pxor %xmm1, %xmm1
; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
; SSE42-NEXT: retq
;
; AVX1-LABEL: _clearupper2xi64a:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: _clearupper2xi64a:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; AVX2-NEXT: retq
@@ -44,21 +44,21 @@ define <2 x i64> @_clearupper2xi64a(<2 x i64>) nounwind {
define <4 x i64> @_clearupper4xi64a(<4 x i64>) nounwind {
; SSE2-LABEL: _clearupper4xi64a:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps {{.*#+}} xmm2 = [4294967295,4294967295]
; SSE2-NEXT: andps %xmm2, %xmm0
; SSE2-NEXT: andps %xmm2, %xmm1
; SSE2-NEXT: retq
;
; SSE42-LABEL: _clearupper4xi64a:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pxor %xmm2, %xmm2
; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
; SSE42-NEXT: retq
;
; AVX-LABEL: _clearupper4xi64a:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
; AVX-NEXT: retq
@@ -83,18 +83,18 @@ define <4 x i64> @_clearupper4xi64a(<4 x i64>) nounwind {
define <4 x i32> @_clearupper4xi32a(<4 x i32>) nounwind {
; SSE2-LABEL: _clearupper4xi32a:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: _clearupper4xi32a:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pxor %xmm1, %xmm1
; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
; SSE42-NEXT: retq
;
; AVX-LABEL: _clearupper4xi32a:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
; AVX-NEXT: retq
@@ -119,26 +119,26 @@ define <4 x i32> @_clearupper4xi32a(<4 x i32>) nounwind {
define <8 x i32> @_clearupper8xi32a(<8 x i32>) nounwind {
; SSE2-LABEL: _clearupper8xi32a:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps {{.*#+}} xmm2 = [65535,65535,65535,65535]
; SSE2-NEXT: andps %xmm2, %xmm0
; SSE2-NEXT: andps %xmm2, %xmm1
; SSE2-NEXT: retq
;
; SSE42-LABEL: _clearupper8xi32a:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pxor %xmm2, %xmm2
; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7]
; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
; SSE42-NEXT: retq
;
; AVX1-LABEL: _clearupper8xi32a:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: _clearupper8xi32a:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
; AVX2-NEXT: retq
@@ -179,12 +179,12 @@ define <8 x i32> @_clearupper8xi32a(<8 x i32>) nounwind {
define <8 x i16> @_clearupper8xi16a(<8 x i16>) nounwind {
; SSE-LABEL: _clearupper8xi16a:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: _clearupper8xi16a:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%x0 = extractelement <8 x i16> %0, i32 0
@@ -224,14 +224,14 @@ define <8 x i16> @_clearupper8xi16a(<8 x i16>) nounwind {
define <16 x i16> @_clearupper16xi16a(<16 x i16>) nounwind {
; SSE-LABEL: _clearupper16xi16a:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
; SSE-NEXT: andps %xmm2, %xmm0
; SSE-NEXT: andps %xmm2, %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: _clearupper16xi16a:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; AVX-NEXT: retq
%x0 = extractelement <16 x i16> %0, i32 0
@@ -303,7 +303,7 @@ define <16 x i16> @_clearupper16xi16a(<16 x i16>) nounwind {
define <16 x i8> @_clearupper16xi8a(<16 x i8>) nounwind {
; SSE2-LABEL: _clearupper16xi8a:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; SSE2-NEXT: movd %eax, %xmm0
@@ -352,12 +352,12 @@ define <16 x i8> @_clearupper16xi8a(<16 x i8>) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: _clearupper16xi8a:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: andps {{.*}}(%rip), %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: _clearupper16xi8a:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%x0 = extractelement <16 x i8> %0, i32 0
@@ -429,7 +429,7 @@ define <16 x i8> @_clearupper16xi8a(<16 x i8>) nounwind {
define <32 x i8> @_clearupper32xi8a(<32 x i8>) nounwind {
; SSE2-LABEL: _clearupper32xi8a:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
@@ -524,14 +524,14 @@ define <32 x i8> @_clearupper32xi8a(<32 x i8>) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: _clearupper32xi8a:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movaps {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE42-NEXT: andps %xmm2, %xmm0
; SSE42-NEXT: andps %xmm2, %xmm1
; SSE42-NEXT: retq
;
; AVX-LABEL: _clearupper32xi8a:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; AVX-NEXT: retq
%x0 = extractelement <32 x i8> %0, i32 0
@@ -667,24 +667,24 @@ define <32 x i8> @_clearupper32xi8a(<32 x i8>) nounwind {
define <2 x i64> @_clearupper2xi64b(<2 x i64>) nounwind {
; SSE2-LABEL: _clearupper2xi64b:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: _clearupper2xi64b:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pxor %xmm1, %xmm1
; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
; SSE42-NEXT: retq
;
; AVX1-LABEL: _clearupper2xi64b:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: _clearupper2xi64b:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; AVX2-NEXT: retq
@@ -697,21 +697,21 @@ define <2 x i64> @_clearupper2xi64b(<2 x i64>) nounwind {
define <4 x i64> @_clearupper4xi64b(<4 x i64>) nounwind {
; SSE2-LABEL: _clearupper4xi64b:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps {{.*#+}} xmm2 = [4294967295,0,4294967295,0]
; SSE2-NEXT: andps %xmm2, %xmm0
; SSE2-NEXT: andps %xmm2, %xmm1
; SSE2-NEXT: retq
;
; SSE42-LABEL: _clearupper4xi64b:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pxor %xmm2, %xmm2
; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
; SSE42-NEXT: retq
;
; AVX-LABEL: _clearupper4xi64b:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
; AVX-NEXT: retq
@@ -726,18 +726,18 @@ define <4 x i64> @_clearupper4xi64b(<4 x i64>) nounwind {
define <4 x i32> @_clearupper4xi32b(<4 x i32>) nounwind {
; SSE2-LABEL: _clearupper4xi32b:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: _clearupper4xi32b:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pxor %xmm1, %xmm1
; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
; SSE42-NEXT: retq
;
; AVX-LABEL: _clearupper4xi32b:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
; AVX-NEXT: retq
@@ -752,26 +752,26 @@ define <4 x i32> @_clearupper4xi32b(<4 x i32>) nounwind {
define <8 x i32> @_clearupper8xi32b(<8 x i32>) nounwind {
; SSE2-LABEL: _clearupper8xi32b:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps {{.*#+}} xmm2 = [65535,0,65535,0,65535,0,65535,0]
; SSE2-NEXT: andps %xmm2, %xmm0
; SSE2-NEXT: andps %xmm2, %xmm1
; SSE2-NEXT: retq
;
; SSE42-LABEL: _clearupper8xi32b:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pxor %xmm2, %xmm2
; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7]
; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
; SSE42-NEXT: retq
;
; AVX1-LABEL: _clearupper8xi32b:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: _clearupper8xi32b:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
; AVX2-NEXT: retq
@@ -790,12 +790,12 @@ define <8 x i32> @_clearupper8xi32b(<8 x i32>) nounwind {
define <8 x i16> @_clearupper8xi16b(<8 x i16>) nounwind {
; SSE-LABEL: _clearupper8xi16b:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: _clearupper8xi16b:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%x8 = bitcast <8 x i16> %0 to <16 x i8>
@@ -813,14 +813,14 @@ define <8 x i16> @_clearupper8xi16b(<8 x i16>) nounwind {
define <16 x i16> @_clearupper16xi16b(<16 x i16>) nounwind {
; SSE-LABEL: _clearupper16xi16b:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
; SSE-NEXT: andps %xmm2, %xmm0
; SSE-NEXT: andps %xmm2, %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: _clearupper16xi16b:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm1 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
; AVX-NEXT: vandps %xmm1, %xmm0, %xmm2
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
@@ -850,7 +850,7 @@ define <16 x i16> @_clearupper16xi16b(<16 x i16>) nounwind {
define <16 x i8> @_clearupper16xi8b(<16 x i8>) nounwind {
; SSE2-LABEL: _clearupper16xi8b:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pushq %r14
; SSE2-NEXT: pushq %rbx
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -926,7 +926,7 @@ define <16 x i8> @_clearupper16xi8b(<16 x i8>) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: _clearupper16xi8b:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pushq %r14
; SSE42-NEXT: pushq %rbx
; SSE42-NEXT: movq %xmm0, %rcx
@@ -1001,7 +1001,7 @@ define <16 x i8> @_clearupper16xi8b(<16 x i8>) nounwind {
; SSE42-NEXT: retq
;
; AVX-LABEL: _clearupper16xi8b:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: pushq %rbp
; AVX-NEXT: pushq %r15
; AVX-NEXT: pushq %r14
@@ -1103,7 +1103,7 @@ define <16 x i8> @_clearupper16xi8b(<16 x i8>) nounwind {
define <32 x i8> @_clearupper32xi8b(<32 x i8>) nounwind {
; SSE2-LABEL: _clearupper32xi8b:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pushq %r14
; SSE2-NEXT: pushq %rbx
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
@@ -1179,7 +1179,7 @@ define <32 x i8> @_clearupper32xi8b(<32 x i8>) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: _clearupper32xi8b:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pushq %r14
; SSE42-NEXT: pushq %rbx
; SSE42-NEXT: movq %xmm0, %rcx
@@ -1254,7 +1254,7 @@ define <32 x i8> @_clearupper32xi8b(<32 x i8>) nounwind {
; SSE42-NEXT: retq
;
; AVX1-LABEL: _clearupper32xi8b:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: pushq %rbp
; AVX1-NEXT: pushq %r15
; AVX1-NEXT: pushq %r14
@@ -1425,7 +1425,7 @@ define <32 x i8> @_clearupper32xi8b(<32 x i8>) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: _clearupper32xi8b:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: pushq %rbp
; AVX2-NEXT: pushq %r15
; AVX2-NEXT: pushq %r14
@@ -1633,24 +1633,24 @@ define <32 x i8> @_clearupper32xi8b(<32 x i8>) nounwind {
define <2 x i64> @_clearupper2xi64c(<2 x i64>) nounwind {
; SSE2-LABEL: _clearupper2xi64c:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: _clearupper2xi64c:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pxor %xmm1, %xmm1
; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
; SSE42-NEXT: retq
;
; AVX1-LABEL: _clearupper2xi64c:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: _clearupper2xi64c:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; AVX2-NEXT: retq
@@ -1660,21 +1660,21 @@ define <2 x i64> @_clearupper2xi64c(<2 x i64>) nounwind {
define <4 x i64> @_clearupper4xi64c(<4 x i64>) nounwind {
; SSE2-LABEL: _clearupper4xi64c:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps {{.*#+}} xmm2 = [4294967295,0,4294967295,0]
; SSE2-NEXT: andps %xmm2, %xmm0
; SSE2-NEXT: andps %xmm2, %xmm1
; SSE2-NEXT: retq
;
; SSE42-LABEL: _clearupper4xi64c:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pxor %xmm2, %xmm2
; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
; SSE42-NEXT: retq
;
; AVX-LABEL: _clearupper4xi64c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
; AVX-NEXT: retq
@@ -1684,18 +1684,18 @@ define <4 x i64> @_clearupper4xi64c(<4 x i64>) nounwind {
define <4 x i32> @_clearupper4xi32c(<4 x i32>) nounwind {
; SSE2-LABEL: _clearupper4xi32c:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: _clearupper4xi32c:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pxor %xmm1, %xmm1
; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
; SSE42-NEXT: retq
;
; AVX-LABEL: _clearupper4xi32c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
; AVX-NEXT: retq
@@ -1705,26 +1705,26 @@ define <4 x i32> @_clearupper4xi32c(<4 x i32>) nounwind {
define <8 x i32> @_clearupper8xi32c(<8 x i32>) nounwind {
; SSE2-LABEL: _clearupper8xi32c:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps {{.*#+}} xmm2 = [65535,0,65535,0,65535,0,65535,0]
; SSE2-NEXT: andps %xmm2, %xmm0
; SSE2-NEXT: andps %xmm2, %xmm1
; SSE2-NEXT: retq
;
; SSE42-LABEL: _clearupper8xi32c:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pxor %xmm2, %xmm2
; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7]
; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
; SSE42-NEXT: retq
;
; AVX1-LABEL: _clearupper8xi32c:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: _clearupper8xi32c:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
; AVX2-NEXT: retq
@@ -1734,12 +1734,12 @@ define <8 x i32> @_clearupper8xi32c(<8 x i32>) nounwind {
define <8 x i16> @_clearupper8xi16c(<8 x i16>) nounwind {
; SSE-LABEL: _clearupper8xi16c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: _clearupper8xi16c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%r = and <8 x i16> <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>, %0
@@ -1748,14 +1748,14 @@ define <8 x i16> @_clearupper8xi16c(<8 x i16>) nounwind {
define <16 x i16> @_clearupper16xi16c(<16 x i16>) nounwind {
; SSE-LABEL: _clearupper16xi16c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
; SSE-NEXT: andps %xmm2, %xmm0
; SSE-NEXT: andps %xmm2, %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: _clearupper16xi16c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; AVX-NEXT: retq
%r = and <16 x i16> <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>, %0
@@ -1764,12 +1764,12 @@ define <16 x i16> @_clearupper16xi16c(<16 x i16>) nounwind {
define <16 x i8> @_clearupper16xi8c(<16 x i8>) nounwind {
; SSE-LABEL: _clearupper16xi8c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: _clearupper16xi8c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%r = and <16 x i8> <i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15>, %0
@@ -1778,14 +1778,14 @@ define <16 x i8> @_clearupper16xi8c(<16 x i8>) nounwind {
define <32 x i8> @_clearupper32xi8c(<32 x i8>) nounwind {
; SSE-LABEL: _clearupper32xi8c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE-NEXT: andps %xmm2, %xmm0
; SSE-NEXT: andps %xmm2, %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: _clearupper32xi8c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; AVX-NEXT: retq
%r = and <32 x i8> <i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15>, %0
diff --git a/test/CodeGen/X86/clflushopt-schedule.ll b/test/CodeGen/X86/clflushopt-schedule.ll
index 3ba02f0a66e..14b4551cabc 100644
--- a/test/CodeGen/X86/clflushopt-schedule.ll
+++ b/test/CodeGen/X86/clflushopt-schedule.ll
@@ -7,27 +7,27 @@
define void @clflushopt(i8* %p) nounwind {
; GENERIC-LABEL: clflushopt:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: clflushopt (%rdi) # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; GLM-LABEL: clflushopt:
-; GLM: # BB#0:
+; GLM: # %bb.0:
; GLM-NEXT: clflushopt (%rdi) # sched: [3:1.00]
; GLM-NEXT: retq # sched: [4:1.00]
;
; SKYLAKE-LABEL: clflushopt:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: clflushopt (%rdi) # sched: [2:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: clflushopt:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: clflushopt (%rdi) # sched: [2:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: clflushopt:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: clflushopt (%rdi) # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
tail call void @llvm.x86.clflushopt(i8* %p)
diff --git a/test/CodeGen/X86/clflushopt.ll b/test/CodeGen/X86/clflushopt.ll
index 1f699a88e6c..decd4cc35ab 100644
--- a/test/CodeGen/X86/clflushopt.ll
+++ b/test/CodeGen/X86/clflushopt.ll
@@ -4,13 +4,13 @@
define void @clflushopt(i8* %p) nounwind {
; X86-LABEL: clflushopt:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: clflushopt (%eax)
; X86-NEXT: retl
;
; X64-LABEL: clflushopt:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: clflushopt (%rdi)
; X64-NEXT: retq
tail call void @llvm.x86.clflushopt(i8* %p)
diff --git a/test/CodeGen/X86/clwb.ll b/test/CodeGen/X86/clwb.ll
index fe11383481a..0bbb14917f7 100644
--- a/test/CodeGen/X86/clwb.ll
+++ b/test/CodeGen/X86/clwb.ll
@@ -3,7 +3,7 @@
define void @clwb(i8* %p) nounwind {
; CHECK-LABEL: clwb:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: clwb (%eax)
; CHECK-NEXT: retl
diff --git a/test/CodeGen/X86/clz.ll b/test/CodeGen/X86/clz.ll
index 4e479365fb8..5f58e79a94e 100644
--- a/test/CodeGen/X86/clz.ll
+++ b/test/CodeGen/X86/clz.ll
@@ -16,28 +16,28 @@ declare i64 @llvm.ctlz.i64(i64, i1)
define i8 @cttz_i8(i8 %x) {
; X32-LABEL: cttz_i8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: bsfl %eax, %eax
; X32-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X32-NEXT: retl
;
; X64-LABEL: cttz_i8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: bsfl %eax, %eax
; X64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X64-NEXT: retq
;
; X32-CLZ-LABEL: cttz_i8:
-; X32-CLZ: # BB#0:
+; X32-CLZ: # %bb.0:
; X32-CLZ-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-CLZ-NEXT: tzcntl %eax, %eax
; X32-CLZ-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X32-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: cttz_i8:
-; X64-CLZ: # BB#0:
+; X64-CLZ: # %bb.0:
; X64-CLZ-NEXT: movzbl %dil, %eax
; X64-CLZ-NEXT: tzcntl %eax, %eax
; X64-CLZ-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -48,22 +48,22 @@ define i8 @cttz_i8(i8 %x) {
define i16 @cttz_i16(i16 %x) {
; X32-LABEL: cttz_i16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: bsfw {{[0-9]+}}(%esp), %ax
; X32-NEXT: retl
;
; X64-LABEL: cttz_i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: bsfw %di, %ax
; X64-NEXT: retq
;
; X32-CLZ-LABEL: cttz_i16:
-; X32-CLZ: # BB#0:
+; X32-CLZ: # %bb.0:
; X32-CLZ-NEXT: tzcntw {{[0-9]+}}(%esp), %ax
; X32-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: cttz_i16:
-; X64-CLZ: # BB#0:
+; X64-CLZ: # %bb.0:
; X64-CLZ-NEXT: tzcntw %di, %ax
; X64-CLZ-NEXT: retq
%tmp = call i16 @llvm.cttz.i16( i16 %x, i1 true )
@@ -72,22 +72,22 @@ define i16 @cttz_i16(i16 %x) {
define i32 @cttz_i32(i32 %x) {
; X32-LABEL: cttz_i32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: bsfl {{[0-9]+}}(%esp), %eax
; X32-NEXT: retl
;
; X64-LABEL: cttz_i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: bsfl %edi, %eax
; X64-NEXT: retq
;
; X32-CLZ-LABEL: cttz_i32:
-; X32-CLZ: # BB#0:
+; X32-CLZ: # %bb.0:
; X32-CLZ-NEXT: tzcntl {{[0-9]+}}(%esp), %eax
; X32-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: cttz_i32:
-; X64-CLZ: # BB#0:
+; X64-CLZ: # %bb.0:
; X64-CLZ-NEXT: tzcntl %edi, %eax
; X64-CLZ-NEXT: retq
%tmp = call i32 @llvm.cttz.i32( i32 %x, i1 true )
@@ -96,11 +96,11 @@ define i32 @cttz_i32(i32 %x) {
define i64 @cttz_i64(i64 %x) {
; X32-LABEL: cttz_i64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: testl %eax, %eax
; X32-NEXT: jne .LBB3_1
-; X32-NEXT: # BB#2:
+; X32-NEXT: # %bb.2:
; X32-NEXT: bsfl {{[0-9]+}}(%esp), %eax
; X32-NEXT: addl $32, %eax
; X32-NEXT: xorl %edx, %edx
@@ -111,16 +111,16 @@ define i64 @cttz_i64(i64 %x) {
; X32-NEXT: retl
;
; X64-LABEL: cttz_i64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: bsfq %rdi, %rax
; X64-NEXT: retq
;
; X32-CLZ-LABEL: cttz_i64:
-; X32-CLZ: # BB#0:
+; X32-CLZ: # %bb.0:
; X32-CLZ-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-CLZ-NEXT: testl %eax, %eax
; X32-CLZ-NEXT: jne .LBB3_1
-; X32-CLZ-NEXT: # BB#2:
+; X32-CLZ-NEXT: # %bb.2:
; X32-CLZ-NEXT: tzcntl {{[0-9]+}}(%esp), %eax
; X32-CLZ-NEXT: addl $32, %eax
; X32-CLZ-NEXT: xorl %edx, %edx
@@ -131,7 +131,7 @@ define i64 @cttz_i64(i64 %x) {
; X32-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: cttz_i64:
-; X64-CLZ: # BB#0:
+; X64-CLZ: # %bb.0:
; X64-CLZ-NEXT: tzcntq %rdi, %rax
; X64-CLZ-NEXT: retq
%tmp = call i64 @llvm.cttz.i64( i64 %x, i1 true )
@@ -140,7 +140,7 @@ define i64 @cttz_i64(i64 %x) {
define i8 @ctlz_i8(i8 %x) {
; X32-LABEL: ctlz_i8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: bsrl %eax, %eax
; X32-NEXT: xorl $7, %eax
@@ -148,7 +148,7 @@ define i8 @ctlz_i8(i8 %x) {
; X32-NEXT: retl
;
; X64-LABEL: ctlz_i8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: bsrl %eax, %eax
; X64-NEXT: xorl $7, %eax
@@ -156,7 +156,7 @@ define i8 @ctlz_i8(i8 %x) {
; X64-NEXT: retq
;
; X32-CLZ-LABEL: ctlz_i8:
-; X32-CLZ: # BB#0:
+; X32-CLZ: # %bb.0:
; X32-CLZ-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-CLZ-NEXT: lzcntl %eax, %eax
; X32-CLZ-NEXT: addl $-24, %eax
@@ -164,7 +164,7 @@ define i8 @ctlz_i8(i8 %x) {
; X32-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: ctlz_i8:
-; X64-CLZ: # BB#0:
+; X64-CLZ: # %bb.0:
; X64-CLZ-NEXT: movzbl %dil, %eax
; X64-CLZ-NEXT: lzcntl %eax, %eax
; X64-CLZ-NEXT: addl $-24, %eax
@@ -176,26 +176,26 @@ define i8 @ctlz_i8(i8 %x) {
define i16 @ctlz_i16(i16 %x) {
; X32-LABEL: ctlz_i16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: bsrw {{[0-9]+}}(%esp), %ax
; X32-NEXT: xorl $15, %eax
; X32-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X32-NEXT: retl
;
; X64-LABEL: ctlz_i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: bsrw %di, %ax
; X64-NEXT: xorl $15, %eax
; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
;
; X32-CLZ-LABEL: ctlz_i16:
-; X32-CLZ: # BB#0:
+; X32-CLZ: # %bb.0:
; X32-CLZ-NEXT: lzcntw {{[0-9]+}}(%esp), %ax
; X32-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: ctlz_i16:
-; X64-CLZ: # BB#0:
+; X64-CLZ: # %bb.0:
; X64-CLZ-NEXT: lzcntw %di, %ax
; X64-CLZ-NEXT: retq
%tmp2 = call i16 @llvm.ctlz.i16( i16 %x, i1 true )
@@ -204,24 +204,24 @@ define i16 @ctlz_i16(i16 %x) {
define i32 @ctlz_i32(i32 %x) {
; X32-LABEL: ctlz_i32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: bsrl {{[0-9]+}}(%esp), %eax
; X32-NEXT: xorl $31, %eax
; X32-NEXT: retl
;
; X64-LABEL: ctlz_i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: bsrl %edi, %eax
; X64-NEXT: xorl $31, %eax
; X64-NEXT: retq
;
; X32-CLZ-LABEL: ctlz_i32:
-; X32-CLZ: # BB#0:
+; X32-CLZ: # %bb.0:
; X32-CLZ-NEXT: lzcntl {{[0-9]+}}(%esp), %eax
; X32-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: ctlz_i32:
-; X64-CLZ: # BB#0:
+; X64-CLZ: # %bb.0:
; X64-CLZ-NEXT: lzcntl %edi, %eax
; X64-CLZ-NEXT: retq
%tmp = call i32 @llvm.ctlz.i32( i32 %x, i1 true )
@@ -230,11 +230,11 @@ define i32 @ctlz_i32(i32 %x) {
define i64 @ctlz_i64(i64 %x) {
; X32-LABEL: ctlz_i64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: testl %eax, %eax
; X32-NEXT: jne .LBB7_1
-; X32-NEXT: # BB#2:
+; X32-NEXT: # %bb.2:
; X32-NEXT: bsrl {{[0-9]+}}(%esp), %eax
; X32-NEXT: xorl $31, %eax
; X32-NEXT: addl $32, %eax
@@ -247,17 +247,17 @@ define i64 @ctlz_i64(i64 %x) {
; X32-NEXT: retl
;
; X64-LABEL: ctlz_i64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: bsrq %rdi, %rax
; X64-NEXT: xorq $63, %rax
; X64-NEXT: retq
;
; X32-CLZ-LABEL: ctlz_i64:
-; X32-CLZ: # BB#0:
+; X32-CLZ: # %bb.0:
; X32-CLZ-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-CLZ-NEXT: testl %eax, %eax
; X32-CLZ-NEXT: jne .LBB7_1
-; X32-CLZ-NEXT: # BB#2:
+; X32-CLZ-NEXT: # %bb.2:
; X32-CLZ-NEXT: lzcntl {{[0-9]+}}(%esp), %eax
; X32-CLZ-NEXT: addl $32, %eax
; X32-CLZ-NEXT: xorl %edx, %edx
@@ -268,7 +268,7 @@ define i64 @ctlz_i64(i64 %x) {
; X32-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: ctlz_i64:
-; X64-CLZ: # BB#0:
+; X64-CLZ: # %bb.0:
; X64-CLZ-NEXT: lzcntq %rdi, %rax
; X64-CLZ-NEXT: retq
%tmp = call i64 @llvm.ctlz.i64( i64 %x, i1 true )
@@ -278,11 +278,11 @@ define i64 @ctlz_i64(i64 %x) {
; Generate a test and branch to handle zero inputs because bsr/bsf are very slow.
define i8 @ctlz_i8_zero_test(i8 %n) {
; X32-LABEL: ctlz_i8_zero_test:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: testb %al, %al
; X32-NEXT: je .LBB8_1
-; X32-NEXT: # BB#2: # %cond.false
+; X32-NEXT: # %bb.2: # %cond.false
; X32-NEXT: movzbl %al, %eax
; X32-NEXT: bsrl %eax, %eax
; X32-NEXT: xorl $7, %eax
@@ -294,10 +294,10 @@ define i8 @ctlz_i8_zero_test(i8 %n) {
; X32-NEXT: retl
;
; X64-LABEL: ctlz_i8_zero_test:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: testb %dil, %dil
; X64-NEXT: je .LBB8_1
-; X64-NEXT: # BB#2: # %cond.false
+; X64-NEXT: # %bb.2: # %cond.false
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: bsrl %eax, %eax
; X64-NEXT: xorl $7, %eax
@@ -309,7 +309,7 @@ define i8 @ctlz_i8_zero_test(i8 %n) {
; X64-NEXT: retq
;
; X32-CLZ-LABEL: ctlz_i8_zero_test:
-; X32-CLZ: # BB#0:
+; X32-CLZ: # %bb.0:
; X32-CLZ-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-CLZ-NEXT: lzcntl %eax, %eax
; X32-CLZ-NEXT: addl $-24, %eax
@@ -317,7 +317,7 @@ define i8 @ctlz_i8_zero_test(i8 %n) {
; X32-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: ctlz_i8_zero_test:
-; X64-CLZ: # BB#0:
+; X64-CLZ: # %bb.0:
; X64-CLZ-NEXT: movzbl %dil, %eax
; X64-CLZ-NEXT: lzcntl %eax, %eax
; X64-CLZ-NEXT: addl $-24, %eax
@@ -330,11 +330,11 @@ define i8 @ctlz_i8_zero_test(i8 %n) {
; Generate a test and branch to handle zero inputs because bsr/bsf are very slow.
define i16 @ctlz_i16_zero_test(i16 %n) {
; X32-LABEL: ctlz_i16_zero_test:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: testw %ax, %ax
; X32-NEXT: je .LBB9_1
-; X32-NEXT: # BB#2: # %cond.false
+; X32-NEXT: # %bb.2: # %cond.false
; X32-NEXT: bsrw %ax, %ax
; X32-NEXT: xorl $15, %eax
; X32-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -345,10 +345,10 @@ define i16 @ctlz_i16_zero_test(i16 %n) {
; X32-NEXT: retl
;
; X64-LABEL: ctlz_i16_zero_test:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: testw %di, %di
; X64-NEXT: je .LBB9_1
-; X64-NEXT: # BB#2: # %cond.false
+; X64-NEXT: # %bb.2: # %cond.false
; X64-NEXT: bsrw %di, %ax
; X64-NEXT: xorl $15, %eax
; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -359,12 +359,12 @@ define i16 @ctlz_i16_zero_test(i16 %n) {
; X64-NEXT: retq
;
; X32-CLZ-LABEL: ctlz_i16_zero_test:
-; X32-CLZ: # BB#0:
+; X32-CLZ: # %bb.0:
; X32-CLZ-NEXT: lzcntw {{[0-9]+}}(%esp), %ax
; X32-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: ctlz_i16_zero_test:
-; X64-CLZ: # BB#0:
+; X64-CLZ: # %bb.0:
; X64-CLZ-NEXT: lzcntw %di, %ax
; X64-CLZ-NEXT: retq
%tmp1 = call i16 @llvm.ctlz.i16(i16 %n, i1 false)
@@ -374,11 +374,11 @@ define i16 @ctlz_i16_zero_test(i16 %n) {
; Generate a test and branch to handle zero inputs because bsr/bsf are very slow.
define i32 @ctlz_i32_zero_test(i32 %n) {
; X32-LABEL: ctlz_i32_zero_test:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: testl %eax, %eax
; X32-NEXT: je .LBB10_1
-; X32-NEXT: # BB#2: # %cond.false
+; X32-NEXT: # %bb.2: # %cond.false
; X32-NEXT: bsrl %eax, %eax
; X32-NEXT: xorl $31, %eax
; X32-NEXT: retl
@@ -387,10 +387,10 @@ define i32 @ctlz_i32_zero_test(i32 %n) {
; X32-NEXT: retl
;
; X64-LABEL: ctlz_i32_zero_test:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: testl %edi, %edi
; X64-NEXT: je .LBB10_1
-; X64-NEXT: # BB#2: # %cond.false
+; X64-NEXT: # %bb.2: # %cond.false
; X64-NEXT: bsrl %edi, %eax
; X64-NEXT: xorl $31, %eax
; X64-NEXT: retq
@@ -399,12 +399,12 @@ define i32 @ctlz_i32_zero_test(i32 %n) {
; X64-NEXT: retq
;
; X32-CLZ-LABEL: ctlz_i32_zero_test:
-; X32-CLZ: # BB#0:
+; X32-CLZ: # %bb.0:
; X32-CLZ-NEXT: lzcntl {{[0-9]+}}(%esp), %eax
; X32-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: ctlz_i32_zero_test:
-; X64-CLZ: # BB#0:
+; X64-CLZ: # %bb.0:
; X64-CLZ-NEXT: lzcntl %edi, %eax
; X64-CLZ-NEXT: retq
%tmp1 = call i32 @llvm.ctlz.i32(i32 %n, i1 false)
@@ -414,17 +414,17 @@ define i32 @ctlz_i32_zero_test(i32 %n) {
; Generate a test and branch to handle zero inputs because bsr/bsf are very slow.
define i64 @ctlz_i64_zero_test(i64 %n) {
; X32-LABEL: ctlz_i64_zero_test:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: bsrl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movl $63, %eax
; X32-NEXT: je .LBB11_2
-; X32-NEXT: # BB#1:
+; X32-NEXT: # %bb.1:
; X32-NEXT: movl %edx, %eax
; X32-NEXT: .LBB11_2:
; X32-NEXT: testl %ecx, %ecx
; X32-NEXT: jne .LBB11_3
-; X32-NEXT: # BB#4:
+; X32-NEXT: # %bb.4:
; X32-NEXT: xorl $31, %eax
; X32-NEXT: addl $32, %eax
; X32-NEXT: xorl %edx, %edx
@@ -436,10 +436,10 @@ define i64 @ctlz_i64_zero_test(i64 %n) {
; X32-NEXT: retl
;
; X64-LABEL: ctlz_i64_zero_test:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: testq %rdi, %rdi
; X64-NEXT: je .LBB11_1
-; X64-NEXT: # BB#2: # %cond.false
+; X64-NEXT: # %bb.2: # %cond.false
; X64-NEXT: bsrq %rdi, %rax
; X64-NEXT: xorq $63, %rax
; X64-NEXT: retq
@@ -448,11 +448,11 @@ define i64 @ctlz_i64_zero_test(i64 %n) {
; X64-NEXT: retq
;
; X32-CLZ-LABEL: ctlz_i64_zero_test:
-; X32-CLZ: # BB#0:
+; X32-CLZ: # %bb.0:
; X32-CLZ-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-CLZ-NEXT: testl %eax, %eax
; X32-CLZ-NEXT: jne .LBB11_1
-; X32-CLZ-NEXT: # BB#2:
+; X32-CLZ-NEXT: # %bb.2:
; X32-CLZ-NEXT: lzcntl {{[0-9]+}}(%esp), %eax
; X32-CLZ-NEXT: addl $32, %eax
; X32-CLZ-NEXT: xorl %edx, %edx
@@ -463,7 +463,7 @@ define i64 @ctlz_i64_zero_test(i64 %n) {
; X32-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: ctlz_i64_zero_test:
-; X64-CLZ: # BB#0:
+; X64-CLZ: # %bb.0:
; X64-CLZ-NEXT: lzcntq %rdi, %rax
; X64-CLZ-NEXT: retq
%tmp1 = call i64 @llvm.ctlz.i64(i64 %n, i1 false)
@@ -473,11 +473,11 @@ define i64 @ctlz_i64_zero_test(i64 %n) {
; Generate a test and branch to handle zero inputs because bsr/bsf are very slow.
define i8 @cttz_i8_zero_test(i8 %n) {
; X32-LABEL: cttz_i8_zero_test:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: testb %al, %al
; X32-NEXT: je .LBB12_1
-; X32-NEXT: # BB#2: # %cond.false
+; X32-NEXT: # %bb.2: # %cond.false
; X32-NEXT: movzbl %al, %eax
; X32-NEXT: bsfl %eax, %eax
; X32-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -488,10 +488,10 @@ define i8 @cttz_i8_zero_test(i8 %n) {
; X32-NEXT: retl
;
; X64-LABEL: cttz_i8_zero_test:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: testb %dil, %dil
; X64-NEXT: je .LBB12_1
-; X64-NEXT: # BB#2: # %cond.false
+; X64-NEXT: # %bb.2: # %cond.false
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: bsfl %eax, %eax
; X64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -502,7 +502,7 @@ define i8 @cttz_i8_zero_test(i8 %n) {
; X64-NEXT: retq
;
; X32-CLZ-LABEL: cttz_i8_zero_test:
-; X32-CLZ: # BB#0:
+; X32-CLZ: # %bb.0:
; X32-CLZ-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-CLZ-NEXT: orl $256, %eax # imm = 0x100
; X32-CLZ-NEXT: tzcntl %eax, %eax
@@ -510,7 +510,7 @@ define i8 @cttz_i8_zero_test(i8 %n) {
; X32-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: cttz_i8_zero_test:
-; X64-CLZ: # BB#0:
+; X64-CLZ: # %bb.0:
; X64-CLZ-NEXT: movzbl %dil, %eax
; X64-CLZ-NEXT: orl $256, %eax # imm = 0x100
; X64-CLZ-NEXT: tzcntl %eax, %eax
@@ -523,11 +523,11 @@ define i8 @cttz_i8_zero_test(i8 %n) {
; Generate a test and branch to handle zero inputs because bsr/bsf are very slow.
define i16 @cttz_i16_zero_test(i16 %n) {
; X32-LABEL: cttz_i16_zero_test:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: testw %ax, %ax
; X32-NEXT: je .LBB13_1
-; X32-NEXT: # BB#2: # %cond.false
+; X32-NEXT: # %bb.2: # %cond.false
; X32-NEXT: bsfw %ax, %ax
; X32-NEXT: retl
; X32-NEXT: .LBB13_1
@@ -535,10 +535,10 @@ define i16 @cttz_i16_zero_test(i16 %n) {
; X32-NEXT: retl
;
; X64-LABEL: cttz_i16_zero_test:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: testw %di, %di
; X64-NEXT: je .LBB13_1
-; X64-NEXT: # BB#2: # %cond.false
+; X64-NEXT: # %bb.2: # %cond.false
; X64-NEXT: bsfw %di, %ax
; X64-NEXT: retq
; X64-NEXT: .LBB13_1:
@@ -546,12 +546,12 @@ define i16 @cttz_i16_zero_test(i16 %n) {
; X64-NEXT: retq
;
; X32-CLZ-LABEL: cttz_i16_zero_test:
-; X32-CLZ: # BB#0:
+; X32-CLZ: # %bb.0:
; X32-CLZ-NEXT: tzcntw {{[0-9]+}}(%esp), %ax
; X32-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: cttz_i16_zero_test:
-; X64-CLZ: # BB#0:
+; X64-CLZ: # %bb.0:
; X64-CLZ-NEXT: tzcntw %di, %ax
; X64-CLZ-NEXT: retq
%tmp1 = call i16 @llvm.cttz.i16(i16 %n, i1 false)
@@ -561,11 +561,11 @@ define i16 @cttz_i16_zero_test(i16 %n) {
; Generate a test and branch to handle zero inputs because bsr/bsf are very slow.
define i32 @cttz_i32_zero_test(i32 %n) {
; X32-LABEL: cttz_i32_zero_test:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: testl %eax, %eax
; X32-NEXT: je .LBB14_1
-; X32-NEXT: # BB#2: # %cond.false
+; X32-NEXT: # %bb.2: # %cond.false
; X32-NEXT: bsfl %eax, %eax
; X32-NEXT: retl
; X32-NEXT: .LBB14_1
@@ -573,10 +573,10 @@ define i32 @cttz_i32_zero_test(i32 %n) {
; X32-NEXT: retl
;
; X64-LABEL: cttz_i32_zero_test:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: testl %edi, %edi
; X64-NEXT: je .LBB14_1
-; X64-NEXT: # BB#2: # %cond.false
+; X64-NEXT: # %bb.2: # %cond.false
; X64-NEXT: bsfl %edi, %eax
; X64-NEXT: retq
; X64-NEXT: .LBB14_1:
@@ -584,12 +584,12 @@ define i32 @cttz_i32_zero_test(i32 %n) {
; X64-NEXT: retq
;
; X32-CLZ-LABEL: cttz_i32_zero_test:
-; X32-CLZ: # BB#0:
+; X32-CLZ: # %bb.0:
; X32-CLZ-NEXT: tzcntl {{[0-9]+}}(%esp), %eax
; X32-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: cttz_i32_zero_test:
-; X64-CLZ: # BB#0:
+; X64-CLZ: # %bb.0:
; X64-CLZ-NEXT: tzcntl %edi, %eax
; X64-CLZ-NEXT: retq
%tmp1 = call i32 @llvm.cttz.i32(i32 %n, i1 false)
@@ -599,17 +599,17 @@ define i32 @cttz_i32_zero_test(i32 %n) {
; Generate a test and branch to handle zero inputs because bsr/bsf are very slow.
define i64 @cttz_i64_zero_test(i64 %n) {
; X32-LABEL: cttz_i64_zero_test:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: bsfl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movl $32, %eax
; X32-NEXT: je .LBB15_2
-; X32-NEXT: # BB#1:
+; X32-NEXT: # %bb.1:
; X32-NEXT: movl %edx, %eax
; X32-NEXT: .LBB15_2:
; X32-NEXT: testl %ecx, %ecx
; X32-NEXT: jne .LBB15_3
-; X32-NEXT: # BB#4:
+; X32-NEXT: # %bb.4:
; X32-NEXT: addl $32, %eax
; X32-NEXT: xorl %edx, %edx
; X32-NEXT: retl
@@ -619,10 +619,10 @@ define i64 @cttz_i64_zero_test(i64 %n) {
; X32-NEXT: retl
;
; X64-LABEL: cttz_i64_zero_test:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: testq %rdi, %rdi
; X64-NEXT: je .LBB15_1
-; X64-NEXT: # BB#2: # %cond.false
+; X64-NEXT: # %bb.2: # %cond.false
; X64-NEXT: bsfq %rdi, %rax
; X64-NEXT: retq
; X64-NEXT: .LBB15_1:
@@ -630,11 +630,11 @@ define i64 @cttz_i64_zero_test(i64 %n) {
; X64-NEXT: retq
;
; X32-CLZ-LABEL: cttz_i64_zero_test:
-; X32-CLZ: # BB#0:
+; X32-CLZ: # %bb.0:
; X32-CLZ-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-CLZ-NEXT: testl %eax, %eax
; X32-CLZ-NEXT: jne .LBB15_1
-; X32-CLZ-NEXT: # BB#2:
+; X32-CLZ-NEXT: # %bb.2:
; X32-CLZ-NEXT: tzcntl {{[0-9]+}}(%esp), %eax
; X32-CLZ-NEXT: addl $32, %eax
; X32-CLZ-NEXT: xorl %edx, %edx
@@ -645,7 +645,7 @@ define i64 @cttz_i64_zero_test(i64 %n) {
; X32-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: cttz_i64_zero_test:
-; X64-CLZ: # BB#0:
+; X64-CLZ: # %bb.0:
; X64-CLZ-NEXT: tzcntq %rdi, %rax
; X64-CLZ-NEXT: retq
%tmp1 = call i64 @llvm.cttz.i64(i64 %n, i1 false)
@@ -659,11 +659,11 @@ define i64 @cttz_i64_zero_test(i64 %n) {
; codegen doesn't know how to delete the movl and je.
define i32 @ctlz_i32_fold_cmov(i32 %n) {
; X32-LABEL: ctlz_i32_fold_cmov:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: orl $1, %eax
; X32-NEXT: je .LBB16_1
-; X32-NEXT: # BB#2: # %cond.false
+; X32-NEXT: # %bb.2: # %cond.false
; X32-NEXT: bsrl %eax, %eax
; X32-NEXT: xorl $31, %eax
; X32-NEXT: retl
@@ -672,10 +672,10 @@ define i32 @ctlz_i32_fold_cmov(i32 %n) {
; X32-NEXT: retl
;
; X64-LABEL: ctlz_i32_fold_cmov:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: orl $1, %edi
; X64-NEXT: je .LBB16_1
-; X64-NEXT: # BB#2: # %cond.false
+; X64-NEXT: # %bb.2: # %cond.false
; X64-NEXT: bsrl %edi, %eax
; X64-NEXT: xorl $31, %eax
; X64-NEXT: retq
@@ -684,14 +684,14 @@ define i32 @ctlz_i32_fold_cmov(i32 %n) {
; X64-NEXT: retq
;
; X32-CLZ-LABEL: ctlz_i32_fold_cmov:
-; X32-CLZ: # BB#0:
+; X32-CLZ: # %bb.0:
; X32-CLZ-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-CLZ-NEXT: orl $1, %eax
; X32-CLZ-NEXT: lzcntl %eax, %eax
; X32-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: ctlz_i32_fold_cmov:
-; X64-CLZ: # BB#0:
+; X64-CLZ: # %bb.0:
; X64-CLZ-NEXT: orl $1, %edi
; X64-CLZ-NEXT: lzcntl %edi, %eax
; X64-CLZ-NEXT: retq
@@ -705,23 +705,23 @@ define i32 @ctlz_i32_fold_cmov(i32 %n) {
; FIXME: We should probably select BSR instead of LZCNT in these circumstances.
define i32 @ctlz_bsr(i32 %n) {
; X32-LABEL: ctlz_bsr:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: bsrl {{[0-9]+}}(%esp), %eax
; X32-NEXT: retl
;
; X64-LABEL: ctlz_bsr:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: bsrl %edi, %eax
; X64-NEXT: retq
;
; X32-CLZ-LABEL: ctlz_bsr:
-; X32-CLZ: # BB#0:
+; X32-CLZ: # %bb.0:
; X32-CLZ-NEXT: lzcntl {{[0-9]+}}(%esp), %eax
; X32-CLZ-NEXT: xorl $31, %eax
; X32-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: ctlz_bsr:
-; X64-CLZ: # BB#0:
+; X64-CLZ: # %bb.0:
; X64-CLZ-NEXT: lzcntl %edi, %eax
; X64-CLZ-NEXT: xorl $31, %eax
; X64-CLZ-NEXT: retq
@@ -735,11 +735,11 @@ define i32 @ctlz_bsr(i32 %n) {
; codegen doesn't know how to combine the $32 and $31 into $63.
define i32 @ctlz_bsr_zero_test(i32 %n) {
; X32-LABEL: ctlz_bsr_zero_test:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: testl %eax, %eax
; X32-NEXT: je .LBB18_1
-; X32-NEXT: # BB#2: # %cond.false
+; X32-NEXT: # %bb.2: # %cond.false
; X32-NEXT: bsrl %eax, %eax
; X32-NEXT: xorl $31, %eax
; X32-NEXT: xorl $31, %eax
@@ -750,10 +750,10 @@ define i32 @ctlz_bsr_zero_test(i32 %n) {
; X32-NEXT: retl
;
; X64-LABEL: ctlz_bsr_zero_test:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: testl %edi, %edi
; X64-NEXT: je .LBB18_1
-; X64-NEXT: # BB#2: # %cond.false
+; X64-NEXT: # %bb.2: # %cond.false
; X64-NEXT: bsrl %edi, %eax
; X64-NEXT: xorl $31, %eax
; X64-NEXT: xorl $31, %eax
@@ -764,13 +764,13 @@ define i32 @ctlz_bsr_zero_test(i32 %n) {
; X64-NEXT: retq
;
; X32-CLZ-LABEL: ctlz_bsr_zero_test:
-; X32-CLZ: # BB#0:
+; X32-CLZ: # %bb.0:
; X32-CLZ-NEXT: lzcntl {{[0-9]+}}(%esp), %eax
; X32-CLZ-NEXT: xorl $31, %eax
; X32-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: ctlz_bsr_zero_test:
-; X64-CLZ: # BB#0:
+; X64-CLZ: # %bb.0:
; X64-CLZ-NEXT: lzcntl %edi, %eax
; X64-CLZ-NEXT: xorl $31, %eax
; X64-CLZ-NEXT: retq
@@ -781,7 +781,7 @@ define i32 @ctlz_bsr_zero_test(i32 %n) {
define i8 @cttz_i8_knownbits(i8 %x) {
; X32-LABEL: cttz_i8_knownbits:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: orb $2, %al
; X32-NEXT: movzbl %al, %eax
@@ -790,7 +790,7 @@ define i8 @cttz_i8_knownbits(i8 %x) {
; X32-NEXT: retl
;
; X64-LABEL: cttz_i8_knownbits:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: orb $2, %dil
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: bsfl %eax, %eax
@@ -798,7 +798,7 @@ define i8 @cttz_i8_knownbits(i8 %x) {
; X64-NEXT: retq
;
; X32-CLZ-LABEL: cttz_i8_knownbits:
-; X32-CLZ: # BB#0:
+; X32-CLZ: # %bb.0:
; X32-CLZ-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-CLZ-NEXT: orb $2, %al
; X32-CLZ-NEXT: movzbl %al, %eax
@@ -807,7 +807,7 @@ define i8 @cttz_i8_knownbits(i8 %x) {
; X32-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: cttz_i8_knownbits:
-; X64-CLZ: # BB#0:
+; X64-CLZ: # %bb.0:
; X64-CLZ-NEXT: orb $2, %dil
; X64-CLZ-NEXT: movzbl %dil, %eax
; X64-CLZ-NEXT: tzcntl %eax, %eax
@@ -821,7 +821,7 @@ define i8 @cttz_i8_knownbits(i8 %x) {
define i8 @ctlz_i8_knownbits(i8 %x) {
; X32-LABEL: ctlz_i8_knownbits:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: orb $64, %al
; X32-NEXT: movzbl %al, %eax
@@ -831,7 +831,7 @@ define i8 @ctlz_i8_knownbits(i8 %x) {
; X32-NEXT: retl
;
; X64-LABEL: ctlz_i8_knownbits:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: orb $64, %dil
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: bsrl %eax, %eax
@@ -840,7 +840,7 @@ define i8 @ctlz_i8_knownbits(i8 %x) {
; X64-NEXT: retq
;
; X32-CLZ-LABEL: ctlz_i8_knownbits:
-; X32-CLZ: # BB#0:
+; X32-CLZ: # %bb.0:
; X32-CLZ-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-CLZ-NEXT: orb $64, %al
; X32-CLZ-NEXT: movzbl %al, %eax
@@ -850,7 +850,7 @@ define i8 @ctlz_i8_knownbits(i8 %x) {
; X32-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: ctlz_i8_knownbits:
-; X64-CLZ: # BB#0:
+; X64-CLZ: # %bb.0:
; X64-CLZ-NEXT: orb $64, %dil
; X64-CLZ-NEXT: movzbl %dil, %eax
; X64-CLZ-NEXT: lzcntl %eax, %eax
diff --git a/test/CodeGen/X86/clzero.ll b/test/CodeGen/X86/clzero.ll
index f15d4deedef..d08470dda92 100644
--- a/test/CodeGen/X86/clzero.ll
+++ b/test/CodeGen/X86/clzero.ll
@@ -4,13 +4,13 @@
define void @foo(i8* %p) #0 {
; X64-LABEL: foo:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: leaq (%rdi), %rax
; X64-NEXT: clzero
; X64-NEXT: retq
;
; X32-LABEL: foo:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: leal (%eax), %eax
; X32-NEXT: clzero
diff --git a/test/CodeGen/X86/cmov-into-branch.ll b/test/CodeGen/X86/cmov-into-branch.ll
index 4a29bb4e1db..4c1b2bcb162 100644
--- a/test/CodeGen/X86/cmov-into-branch.ll
+++ b/test/CodeGen/X86/cmov-into-branch.ll
@@ -4,7 +4,7 @@
; cmp with single-use load, should not form branch.
define i32 @test1(double %a, double* nocapture %b, i32 %x, i32 %y) {
; CHECK-LABEL: test1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: ucomisd (%rdi), %xmm0
; CHECK-NEXT: cmovbel %edx, %esi
; CHECK-NEXT: movl %esi, %eax
@@ -18,7 +18,7 @@ define i32 @test1(double %a, double* nocapture %b, i32 %x, i32 %y) {
; Sanity check: no load.
define i32 @test2(double %a, double %b, i32 %x, i32 %y) {
; CHECK-LABEL: test2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: ucomisd %xmm1, %xmm0
; CHECK-NEXT: cmovbel %esi, %edi
; CHECK-NEXT: movl %edi, %eax
@@ -31,7 +31,7 @@ define i32 @test2(double %a, double %b, i32 %x, i32 %y) {
; Multiple uses of the load.
define i32 @test4(i32 %a, i32* nocapture %b, i32 %x, i32 %y) {
; CHECK-LABEL: test4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl (%rsi), %eax
; CHECK-NEXT: cmpl %edi, %eax
; CHECK-NEXT: cmovael %ecx, %edx
@@ -47,7 +47,7 @@ define i32 @test4(i32 %a, i32* nocapture %b, i32 %x, i32 %y) {
; Multiple uses of the cmp.
define i32 @test5(i32 %a, i32* nocapture %b, i32 %x, i32 %y) {
; CHECK-LABEL: test5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpl %edi, (%rsi)
; CHECK-NEXT: cmoval %edi, %ecx
; CHECK-NEXT: cmovael %edx, %ecx
@@ -64,7 +64,7 @@ define i32 @test5(i32 %a, i32* nocapture %b, i32 %x, i32 %y) {
; Zero-extended select.
define void @test6(i32 %a, i32 %x, i32* %y.ptr, i64* %z.ptr) {
; CHECK-LABEL: test6:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: cmovnsl (%rdx), %esi
@@ -82,7 +82,7 @@ entry:
; If a select is not obviously predictable, don't turn it into a branch.
define i32 @weighted_select1(i32 %a, i32 %b) {
; CHECK-LABEL: weighted_select1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: cmovnel %edi, %esi
; CHECK-NEXT: movl %esi, %eax
@@ -95,10 +95,10 @@ define i32 @weighted_select1(i32 %a, i32 %b) {
; If a select is obviously predictable, turn it into a branch.
define i32 @weighted_select2(i32 %a, i32 %b) {
; CHECK-LABEL: weighted_select2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: jne .LBB6_2
-; CHECK-NEXT: # BB#1: # %select.false
+; CHECK-NEXT: # %bb.1: # %select.false
; CHECK-NEXT: movl %esi, %edi
; CHECK-NEXT: .LBB6_2: # %select.end
; CHECK-NEXT: movl %edi, %eax
@@ -114,10 +114,10 @@ define i32 @weighted_select2(i32 %a, i32 %b) {
; TODO: But likely true vs. likely false should affect basic block placement?
define i32 @weighted_select3(i32 %a, i32 %b) {
; CHECK-LABEL: weighted_select3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: je .LBB7_1
-; CHECK-NEXT: # BB#2: # %select.end
+; CHECK-NEXT: # %bb.2: # %select.end
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
; CHECK-NEXT: .LBB7_1: # %select.false
@@ -132,7 +132,7 @@ define i32 @weighted_select3(i32 %a, i32 %b) {
; Weightlessness is no reason to die.
define i32 @unweighted_select(i32 %a, i32 %b) {
; CHECK-LABEL: unweighted_select:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: cmovnel %edi, %esi
; CHECK-NEXT: movl %esi, %eax
diff --git a/test/CodeGen/X86/cmov-promotion.ll b/test/CodeGen/X86/cmov-promotion.ll
index 7d8c0f492f6..1021a5b5716 100644
--- a/test/CodeGen/X86/cmov-promotion.ll
+++ b/test/CodeGen/X86/cmov-promotion.ll
@@ -4,11 +4,11 @@
define i16 @cmov_zpromotion_8_to_16(i1 %c) {
; CMOV-LABEL: cmov_zpromotion_8_to_16:
-; CMOV: # BB#0:
+; CMOV: # %bb.0:
; CMOV-NEXT: testb $1, %dil
; CMOV-NEXT: movb $117, %al
; CMOV-NEXT: jne .LBB0_2
-; CMOV-NEXT: # BB#1:
+; CMOV-NEXT: # %bb.1:
; CMOV-NEXT: movb $-19, %al
; CMOV-NEXT: .LBB0_2:
; CMOV-NEXT: movzbl %al, %eax
@@ -16,11 +16,11 @@ define i16 @cmov_zpromotion_8_to_16(i1 %c) {
; CMOV-NEXT: retq
;
; NO_CMOV-LABEL: cmov_zpromotion_8_to_16:
-; NO_CMOV: # BB#0:
+; NO_CMOV: # %bb.0:
; NO_CMOV-NEXT: testb $1, {{[0-9]+}}(%esp)
; NO_CMOV-NEXT: movb $117, %al
; NO_CMOV-NEXT: jne .LBB0_2
-; NO_CMOV-NEXT: # BB#1:
+; NO_CMOV-NEXT: # %bb.1:
; NO_CMOV-NEXT: movb $-19, %al
; NO_CMOV-NEXT: .LBB0_2:
; NO_CMOV-NEXT: movzbl %al, %eax
@@ -33,22 +33,22 @@ define i16 @cmov_zpromotion_8_to_16(i1 %c) {
define i32 @cmov_zpromotion_8_to_32(i1 %c) {
; CMOV-LABEL: cmov_zpromotion_8_to_32:
-; CMOV: # BB#0:
+; CMOV: # %bb.0:
; CMOV-NEXT: testb $1, %dil
; CMOV-NEXT: movb $126, %al
; CMOV-NEXT: jne .LBB1_2
-; CMOV-NEXT: # BB#1:
+; CMOV-NEXT: # %bb.1:
; CMOV-NEXT: movb $-1, %al
; CMOV-NEXT: .LBB1_2:
; CMOV-NEXT: movzbl %al, %eax
; CMOV-NEXT: retq
;
; NO_CMOV-LABEL: cmov_zpromotion_8_to_32:
-; NO_CMOV: # BB#0:
+; NO_CMOV: # %bb.0:
; NO_CMOV-NEXT: testb $1, {{[0-9]+}}(%esp)
; NO_CMOV-NEXT: movb $126, %al
; NO_CMOV-NEXT: jne .LBB1_2
-; NO_CMOV-NEXT: # BB#1:
+; NO_CMOV-NEXT: # %bb.1:
; NO_CMOV-NEXT: movb $-1, %al
; NO_CMOV-NEXT: .LBB1_2:
; NO_CMOV-NEXT: movzbl %al, %eax
@@ -60,22 +60,22 @@ define i32 @cmov_zpromotion_8_to_32(i1 %c) {
define i64 @cmov_zpromotion_8_to_64(i1 %c) {
; CMOV-LABEL: cmov_zpromotion_8_to_64:
-; CMOV: # BB#0:
+; CMOV: # %bb.0:
; CMOV-NEXT: testb $1, %dil
; CMOV-NEXT: movb $126, %al
; CMOV-NEXT: jne .LBB2_2
-; CMOV-NEXT: # BB#1:
+; CMOV-NEXT: # %bb.1:
; CMOV-NEXT: movb $-1, %al
; CMOV-NEXT: .LBB2_2:
; CMOV-NEXT: movzbl %al, %eax
; CMOV-NEXT: retq
;
; NO_CMOV-LABEL: cmov_zpromotion_8_to_64:
-; NO_CMOV: # BB#0:
+; NO_CMOV: # %bb.0:
; NO_CMOV-NEXT: testb $1, {{[0-9]+}}(%esp)
; NO_CMOV-NEXT: movb $126, %al
; NO_CMOV-NEXT: jne .LBB2_2
-; NO_CMOV-NEXT: # BB#1:
+; NO_CMOV-NEXT: # %bb.1:
; NO_CMOV-NEXT: movb $-1, %al
; NO_CMOV-NEXT: .LBB2_2:
; NO_CMOV-NEXT: movzbl %al, %eax
@@ -88,7 +88,7 @@ define i64 @cmov_zpromotion_8_to_64(i1 %c) {
define i32 @cmov_zpromotion_16_to_32(i1 %c) {
; CMOV-LABEL: cmov_zpromotion_16_to_32:
-; CMOV: # BB#0:
+; CMOV: # %bb.0:
; CMOV-NEXT: testb $1, %dil
; CMOV-NEXT: movl $12414, %ecx # imm = 0x307E
; CMOV-NEXT: movl $65535, %eax # imm = 0xFFFF
@@ -96,11 +96,11 @@ define i32 @cmov_zpromotion_16_to_32(i1 %c) {
; CMOV-NEXT: retq
;
; NO_CMOV-LABEL: cmov_zpromotion_16_to_32:
-; NO_CMOV: # BB#0:
+; NO_CMOV: # %bb.0:
; NO_CMOV-NEXT: testb $1, {{[0-9]+}}(%esp)
; NO_CMOV-NEXT: movl $12414, %eax # imm = 0x307E
; NO_CMOV-NEXT: jne .LBB3_2
-; NO_CMOV-NEXT: # BB#1:
+; NO_CMOV-NEXT: # %bb.1:
; NO_CMOV-NEXT: movl $65535, %eax # imm = 0xFFFF
; NO_CMOV-NEXT: .LBB3_2:
; NO_CMOV-NEXT: retl
@@ -111,7 +111,7 @@ define i32 @cmov_zpromotion_16_to_32(i1 %c) {
define i64 @cmov_zpromotion_16_to_64(i1 %c) {
; CMOV-LABEL: cmov_zpromotion_16_to_64:
-; CMOV: # BB#0:
+; CMOV: # %bb.0:
; CMOV-NEXT: testb $1, %dil
; CMOV-NEXT: movl $12414, %ecx # imm = 0x307E
; CMOV-NEXT: movl $65535, %eax # imm = 0xFFFF
@@ -119,11 +119,11 @@ define i64 @cmov_zpromotion_16_to_64(i1 %c) {
; CMOV-NEXT: retq
;
; NO_CMOV-LABEL: cmov_zpromotion_16_to_64:
-; NO_CMOV: # BB#0:
+; NO_CMOV: # %bb.0:
; NO_CMOV-NEXT: testb $1, {{[0-9]+}}(%esp)
; NO_CMOV-NEXT: movl $12414, %eax # imm = 0x307E
; NO_CMOV-NEXT: jne .LBB4_2
-; NO_CMOV-NEXT: # BB#1:
+; NO_CMOV-NEXT: # %bb.1:
; NO_CMOV-NEXT: movl $65535, %eax # imm = 0xFFFF
; NO_CMOV-NEXT: .LBB4_2:
; NO_CMOV-NEXT: xorl %edx, %edx
@@ -135,7 +135,7 @@ define i64 @cmov_zpromotion_16_to_64(i1 %c) {
define i64 @cmov_zpromotion_32_to_64(i1 %c) {
; CMOV-LABEL: cmov_zpromotion_32_to_64:
-; CMOV: # BB#0:
+; CMOV: # %bb.0:
; CMOV-NEXT: testb $1, %dil
; CMOV-NEXT: movl $12414, %ecx # imm = 0x307E
; CMOV-NEXT: movl $-1, %eax
@@ -143,11 +143,11 @@ define i64 @cmov_zpromotion_32_to_64(i1 %c) {
; CMOV-NEXT: retq
;
; NO_CMOV-LABEL: cmov_zpromotion_32_to_64:
-; NO_CMOV: # BB#0:
+; NO_CMOV: # %bb.0:
; NO_CMOV-NEXT: testb $1, {{[0-9]+}}(%esp)
; NO_CMOV-NEXT: movl $12414, %eax # imm = 0x307E
; NO_CMOV-NEXT: jne .LBB5_2
-; NO_CMOV-NEXT: # BB#1:
+; NO_CMOV-NEXT: # %bb.1:
; NO_CMOV-NEXT: movl $-1, %eax
; NO_CMOV-NEXT: .LBB5_2:
; NO_CMOV-NEXT: xorl %edx, %edx
@@ -159,11 +159,11 @@ define i64 @cmov_zpromotion_32_to_64(i1 %c) {
define i16 @cmov_spromotion_8_to_16(i1 %c) {
; CMOV-LABEL: cmov_spromotion_8_to_16:
-; CMOV: # BB#0:
+; CMOV: # %bb.0:
; CMOV-NEXT: testb $1, %dil
; CMOV-NEXT: movb $117, %al
; CMOV-NEXT: jne .LBB6_2
-; CMOV-NEXT: # BB#1:
+; CMOV-NEXT: # %bb.1:
; CMOV-NEXT: movb $-19, %al
; CMOV-NEXT: .LBB6_2:
; CMOV-NEXT: movsbl %al, %eax
@@ -171,11 +171,11 @@ define i16 @cmov_spromotion_8_to_16(i1 %c) {
; CMOV-NEXT: retq
;
; NO_CMOV-LABEL: cmov_spromotion_8_to_16:
-; NO_CMOV: # BB#0:
+; NO_CMOV: # %bb.0:
; NO_CMOV-NEXT: testb $1, {{[0-9]+}}(%esp)
; NO_CMOV-NEXT: movb $117, %al
; NO_CMOV-NEXT: jne .LBB6_2
-; NO_CMOV-NEXT: # BB#1:
+; NO_CMOV-NEXT: # %bb.1:
; NO_CMOV-NEXT: movb $-19, %al
; NO_CMOV-NEXT: .LBB6_2:
; NO_CMOV-NEXT: movsbl %al, %eax
@@ -188,22 +188,22 @@ define i16 @cmov_spromotion_8_to_16(i1 %c) {
define i32 @cmov_spromotion_8_to_32(i1 %c) {
; CMOV-LABEL: cmov_spromotion_8_to_32:
-; CMOV: # BB#0:
+; CMOV: # %bb.0:
; CMOV-NEXT: testb $1, %dil
; CMOV-NEXT: movb $126, %al
; CMOV-NEXT: jne .LBB7_2
-; CMOV-NEXT: # BB#1:
+; CMOV-NEXT: # %bb.1:
; CMOV-NEXT: movb $-1, %al
; CMOV-NEXT: .LBB7_2:
; CMOV-NEXT: movsbl %al, %eax
; CMOV-NEXT: retq
;
; NO_CMOV-LABEL: cmov_spromotion_8_to_32:
-; NO_CMOV: # BB#0:
+; NO_CMOV: # %bb.0:
; NO_CMOV-NEXT: testb $1, {{[0-9]+}}(%esp)
; NO_CMOV-NEXT: movb $126, %al
; NO_CMOV-NEXT: jne .LBB7_2
-; NO_CMOV-NEXT: # BB#1:
+; NO_CMOV-NEXT: # %bb.1:
; NO_CMOV-NEXT: movb $-1, %al
; NO_CMOV-NEXT: .LBB7_2:
; NO_CMOV-NEXT: movsbl %al, %eax
@@ -215,22 +215,22 @@ define i32 @cmov_spromotion_8_to_32(i1 %c) {
define i64 @cmov_spromotion_8_to_64(i1 %c) {
; CMOV-LABEL: cmov_spromotion_8_to_64:
-; CMOV: # BB#0:
+; CMOV: # %bb.0:
; CMOV-NEXT: testb $1, %dil
; CMOV-NEXT: movb $126, %al
; CMOV-NEXT: jne .LBB8_2
-; CMOV-NEXT: # BB#1:
+; CMOV-NEXT: # %bb.1:
; CMOV-NEXT: movb $-1, %al
; CMOV-NEXT: .LBB8_2:
; CMOV-NEXT: movsbq %al, %rax
; CMOV-NEXT: retq
;
; NO_CMOV-LABEL: cmov_spromotion_8_to_64:
-; NO_CMOV: # BB#0:
+; NO_CMOV: # %bb.0:
; NO_CMOV-NEXT: testb $1, {{[0-9]+}}(%esp)
; NO_CMOV-NEXT: movb $126, %al
; NO_CMOV-NEXT: jne .LBB8_2
-; NO_CMOV-NEXT: # BB#1:
+; NO_CMOV-NEXT: # %bb.1:
; NO_CMOV-NEXT: movb $-1, %al
; NO_CMOV-NEXT: .LBB8_2:
; NO_CMOV-NEXT: movsbl %al, %eax
@@ -244,7 +244,7 @@ define i64 @cmov_spromotion_8_to_64(i1 %c) {
define i32 @cmov_spromotion_16_to_32(i1 %c) {
; CMOV-LABEL: cmov_spromotion_16_to_32:
-; CMOV: # BB#0:
+; CMOV: # %bb.0:
; CMOV-NEXT: testb $1, %dil
; CMOV-NEXT: movl $12414, %ecx # imm = 0x307E
; CMOV-NEXT: movl $-1, %eax
@@ -252,11 +252,11 @@ define i32 @cmov_spromotion_16_to_32(i1 %c) {
; CMOV-NEXT: retq
;
; NO_CMOV-LABEL: cmov_spromotion_16_to_32:
-; NO_CMOV: # BB#0:
+; NO_CMOV: # %bb.0:
; NO_CMOV-NEXT: testb $1, {{[0-9]+}}(%esp)
; NO_CMOV-NEXT: movl $12414, %eax # imm = 0x307E
; NO_CMOV-NEXT: jne .LBB9_2
-; NO_CMOV-NEXT: # BB#1:
+; NO_CMOV-NEXT: # %bb.1:
; NO_CMOV-NEXT: movl $-1, %eax
; NO_CMOV-NEXT: .LBB9_2:
; NO_CMOV-NEXT: retl
@@ -267,7 +267,7 @@ define i32 @cmov_spromotion_16_to_32(i1 %c) {
define i64 @cmov_spromotion_16_to_64(i1 %c) {
; CMOV-LABEL: cmov_spromotion_16_to_64:
-; CMOV: # BB#0:
+; CMOV: # %bb.0:
; CMOV-NEXT: testb $1, %dil
; CMOV-NEXT: movl $12414, %ecx # imm = 0x307E
; CMOV-NEXT: movq $-1, %rax
@@ -275,11 +275,11 @@ define i64 @cmov_spromotion_16_to_64(i1 %c) {
; CMOV-NEXT: retq
;
; NO_CMOV-LABEL: cmov_spromotion_16_to_64:
-; NO_CMOV: # BB#0:
+; NO_CMOV: # %bb.0:
; NO_CMOV-NEXT: testb $1, {{[0-9]+}}(%esp)
; NO_CMOV-NEXT: movl $12414, %eax # imm = 0x307E
; NO_CMOV-NEXT: jne .LBB10_2
-; NO_CMOV-NEXT: # BB#1:
+; NO_CMOV-NEXT: # %bb.1:
; NO_CMOV-NEXT: movl $-1, %eax
; NO_CMOV-NEXT: .LBB10_2:
; NO_CMOV-NEXT: movl %eax, %edx
@@ -292,7 +292,7 @@ define i64 @cmov_spromotion_16_to_64(i1 %c) {
define i64 @cmov_spromotion_32_to_64(i1 %c) {
; CMOV-LABEL: cmov_spromotion_32_to_64:
-; CMOV: # BB#0:
+; CMOV: # %bb.0:
; CMOV-NEXT: testb $1, %dil
; CMOV-NEXT: movl $12414, %eax # imm = 0x307E
; CMOV-NEXT: movl $-1, %ecx
@@ -301,11 +301,11 @@ define i64 @cmov_spromotion_32_to_64(i1 %c) {
; CMOV-NEXT: retq
;
; NO_CMOV-LABEL: cmov_spromotion_32_to_64:
-; NO_CMOV: # BB#0:
+; NO_CMOV: # %bb.0:
; NO_CMOV-NEXT: testb $1, {{[0-9]+}}(%esp)
; NO_CMOV-NEXT: movl $12414, %eax # imm = 0x307E
; NO_CMOV-NEXT: jne .LBB11_2
-; NO_CMOV-NEXT: # BB#1:
+; NO_CMOV-NEXT: # %bb.1:
; NO_CMOV-NEXT: movl $-1, %eax
; NO_CMOV-NEXT: .LBB11_2:
; NO_CMOV-NEXT: movl %eax, %edx
diff --git a/test/CodeGen/X86/cmov.ll b/test/CodeGen/X86/cmov.ll
index 1bb5964b48a..0495b74c962 100644
--- a/test/CodeGen/X86/cmov.ll
+++ b/test/CodeGen/X86/cmov.ll
@@ -4,7 +4,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
define i32 @test1(i32 %x, i32 %n, i32 %w, i32* %vp) nounwind readnone {
; CHECK-LABEL: test1:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: btl %esi, %edi
; CHECK-NEXT: movl $12, %eax
; CHECK-NEXT: cmovael (%rcx), %eax
@@ -20,7 +20,7 @@ entry:
define i32 @test2(i32 %x, i32 %n, i32 %w, i32* %vp) nounwind readnone {
; CHECK-LABEL: test2:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: btl %esi, %edi
; CHECK-NEXT: movl $12, %eax
; CHECK-NEXT: cmovbl (%rcx), %eax
@@ -43,7 +43,7 @@ declare void @bar(i64) nounwind
define void @test3(i64 %a, i64 %b, i1 %p) nounwind {
; CHECK-LABEL: test3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: testb $1, %dl
; CHECK-NEXT: cmovel %esi, %edi
@@ -77,7 +77,7 @@ define void @test3(i64 %a, i64 %b, i1 %p) nounwind {
define i1 @test4() nounwind {
; CHECK-LABEL: test4:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movsbl {{.*}}(%rip), %edx
; CHECK-NEXT: movl %edx, %eax
; CHECK-NEXT: shrb $7, %al
@@ -88,7 +88,7 @@ define i1 @test4() nounwind {
; CHECK-NEXT: movb {{.*}}(%rip), %al
; CHECK-NEXT: testb %al, %al
; CHECK-NEXT: je .LBB3_2
-; CHECK-NEXT: # BB#1: # %bb.i.i.i
+; CHECK-NEXT: # %bb.1: # %bb.i.i.i
; CHECK-NEXT: movb {{.*}}(%rip), %cl
; CHECK-NEXT: .LBB3_2: # %func_4.exit.i
; CHECK-NEXT: pushq %rbx
@@ -96,15 +96,15 @@ define i1 @test4() nounwind {
; CHECK-NEXT: setne %bl
; CHECK-NEXT: movl %eax, %ecx
; CHECK-NEXT: je .LBB3_4
-; CHECK-NEXT: # BB#3: # %func_4.exit.i
+; CHECK-NEXT: # %bb.3: # %func_4.exit.i
; CHECK-NEXT: xorl %ecx, %ecx
; CHECK-NEXT: .LBB3_4: # %func_4.exit.i
; CHECK-NEXT: testb %al, %al
; CHECK-NEXT: je .LBB3_7
-; CHECK-NEXT: # BB#5: # %func_4.exit.i
+; CHECK-NEXT: # %bb.5: # %func_4.exit.i
; CHECK-NEXT: testb %bl, %bl
; CHECK-NEXT: jne .LBB3_7
-; CHECK-NEXT: # BB#6: # %bb.i.i
+; CHECK-NEXT: # %bb.6: # %bb.i.i
; CHECK-NEXT: movb {{.*}}(%rip), %cl
; CHECK-NEXT: xorl %ebx, %ebx
; CHECK-NEXT: movl %eax, %ecx
@@ -160,7 +160,7 @@ declare i32 @printf(i8* nocapture, ...) nounwind
; rdar://6668608
define i32 @test5(i32* nocapture %P) nounwind readonly {
; CHECK-LABEL: test5:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: cmpl $41, (%rdi)
; CHECK-NEXT: setg %al
@@ -175,7 +175,7 @@ entry:
define i32 @test6(i32* nocapture %P) nounwind readonly {
; CHECK-LABEL: test6:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: cmpl $42, (%rdi)
; CHECK-NEXT: setl %al
@@ -193,10 +193,10 @@ entry:
; because it isn't worth it. Just use a branch instead.
define i8 @test7(i1 inreg %c, i8 inreg %a, i8 inreg %b) nounwind {
; CHECK-LABEL: test7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: testb $1, %dil
; CHECK-NEXT: jne .LBB6_2
-; CHECK-NEXT: # BB#1:
+; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: movl %edx, %esi
; CHECK-NEXT: .LBB6_2:
; CHECK-NEXT: movl %esi, %eax
@@ -207,7 +207,7 @@ define i8 @test7(i1 inreg %c, i8 inreg %a, i8 inreg %b) nounwind {
define i32 @smin(i32 %x) {
; CHECK-LABEL: smin:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: notl %edi
; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: movl $-1, %eax
diff --git a/test/CodeGen/X86/cmovcmov.ll b/test/CodeGen/X86/cmovcmov.ll
index 22c7b3f88dd..3d0a60f1a31 100644
--- a/test/CodeGen/X86/cmovcmov.ll
+++ b/test/CodeGen/X86/cmovcmov.ll
@@ -233,13 +233,13 @@ attributes #0 = { nounwind }
; was lowered to:
;
; The first two cmovs got expanded to:
-; BB#0:
-; JL_1 BB#9
-; BB#7:
-; JG_1 BB#9
-; BB#8:
-; BB#9:
-; %12 = phi(%7, BB#8, %11, BB#0, %12, BB#7)
+; %bb.0:
+; JL_1 %bb.9
+; %bb.7:
+; JG_1 %bb.9
+; %bb.8:
+; %bb.9:
+; %12 = phi(%7, %bb.8, %11, %bb.0, %12, %bb.7)
; %13 = COPY %12
; Which was invalid as %12 is not the same value as %13
diff --git a/test/CodeGen/X86/cmp.ll b/test/CodeGen/X86/cmp.ll
index 82e133d2576..1ab8421638d 100644
--- a/test/CodeGen/X86/cmp.ll
+++ b/test/CodeGen/X86/cmp.ll
@@ -5,11 +5,11 @@
define i32 @test1(i32 %X, i32* %y) nounwind {
; CHECK-LABEL: test1:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cmpl $0, (%rsi) # encoding: [0x83,0x3e,0x00]
; CHECK-NEXT: je .LBB0_2 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB0_2-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#1: # %cond_true
+; CHECK-NEXT: # %bb.1: # %cond_true
; CHECK-NEXT: movl $1, %eax # encoding: [0xb8,0x01,0x00,0x00,0x00]
; CHECK-NEXT: retq # encoding: [0xc3]
; CHECK-NEXT: .LBB0_2: # %ReturnBlock
@@ -29,12 +29,12 @@ ReturnBlock:
define i32 @test2(i32 %X, i32* %y) nounwind {
; CHECK-LABEL: test2:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: testl $536870911, (%rsi) # encoding: [0xf7,0x06,0xff,0xff,0xff,0x1f]
; CHECK-NEXT: # imm = 0x1FFFFFFF
; CHECK-NEXT: je .LBB1_2 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB1_2-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#1: # %cond_true
+; CHECK-NEXT: # %bb.1: # %cond_true
; CHECK-NEXT: movl $1, %eax # encoding: [0xb8,0x01,0x00,0x00,0x00]
; CHECK-NEXT: retq # encoding: [0xc3]
; CHECK-NEXT: .LBB1_2: # %ReturnBlock
@@ -55,11 +55,11 @@ ReturnBlock:
define i8 @test2b(i8 %X, i8* %y) nounwind {
; CHECK-LABEL: test2b:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: testb $31, (%rsi) # encoding: [0xf6,0x06,0x1f]
; CHECK-NEXT: je .LBB2_2 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB2_2-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#1: # %cond_true
+; CHECK-NEXT: # %bb.1: # %cond_true
; CHECK-NEXT: movb $1, %al # encoding: [0xb0,0x01]
; CHECK-NEXT: retq # encoding: [0xc3]
; CHECK-NEXT: .LBB2_2: # %ReturnBlock
@@ -80,7 +80,7 @@ ReturnBlock:
define i64 @test3(i64 %x) nounwind {
; CHECK-LABEL: test3:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
; CHECK-NEXT: testq %rdi, %rdi # encoding: [0x48,0x85,0xff]
; CHECK-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
@@ -93,7 +93,7 @@ entry:
define i64 @test4(i64 %x) nounwind {
; CHECK-LABEL: test4:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
; CHECK-NEXT: testq %rdi, %rdi # encoding: [0x48,0x85,0xff]
; CHECK-NEXT: setle %al # encoding: [0x0f,0x9e,0xc0]
@@ -106,17 +106,17 @@ entry:
define i32 @test5(double %A) nounwind {
; CHECK-LABEL: test5:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: ucomisd {{.*}}(%rip), %xmm0 # encoding: [0x66,0x0f,0x2e,0x05,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
; CHECK-NEXT: ja .LBB5_3 # encoding: [0x77,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB5_3-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#1: # %entry
+; CHECK-NEXT: # %bb.1: # %entry
; CHECK-NEXT: ucomisd {{.*}}(%rip), %xmm0 # encoding: [0x66,0x0f,0x2e,0x05,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
; CHECK-NEXT: jb .LBB5_3 # encoding: [0x72,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB5_3-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %bb12
+; CHECK-NEXT: # %bb.2: # %bb12
; CHECK-NEXT: movl $32, %eax # encoding: [0xb8,0x20,0x00,0x00,0x00]
; CHECK-NEXT: retq # encoding: [0xc3]
; CHECK-NEXT: .LBB5_3: # %bb8
@@ -142,11 +142,11 @@ declare i32 @foo(...)
define i32 @test6() nounwind align 2 {
; CHECK-LABEL: test6:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cmpq $0, -{{[0-9]+}}(%rsp) # encoding: [0x48,0x83,0x7c,0x24,0xf8,0x00]
; CHECK-NEXT: je .LBB6_1 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB6_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %F
+; CHECK-NEXT: # %bb.2: # %F
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
; CHECK-NEXT: retq # encoding: [0xc3]
; CHECK-NEXT: .LBB6_1: # %T
@@ -168,7 +168,7 @@ F:
define i32 @test7(i64 %res) nounwind {
; CHECK-LABEL: test7:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
; CHECK-NEXT: shrq $32, %rdi # encoding: [0x48,0xc1,0xef,0x20]
; CHECK-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
@@ -181,7 +181,7 @@ entry:
define i32 @test8(i64 %res) nounwind {
; CHECK-LABEL: test8:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: shrq $32, %rdi # encoding: [0x48,0xc1,0xef,0x20]
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
; CHECK-NEXT: cmpq $3, %rdi # encoding: [0x48,0x83,0xff,0x03]
@@ -195,7 +195,7 @@ entry:
define i32 @test9(i64 %res) nounwind {
; CHECK-LABEL: test9:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
; CHECK-NEXT: shrq $33, %rdi # encoding: [0x48,0xc1,0xef,0x21]
; CHECK-NEXT: sete %al # encoding: [0x0f,0x94,0xc0]
@@ -208,7 +208,7 @@ entry:
define i32 @test10(i64 %res) nounwind {
; CHECK-LABEL: test10:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
; CHECK-NEXT: shrq $32, %rdi # encoding: [0x48,0xc1,0xef,0x20]
; CHECK-NEXT: setne %al # encoding: [0x0f,0x95,0xc0]
@@ -221,7 +221,7 @@ entry:
define i32 @test11(i64 %l) nounwind {
; CHECK-LABEL: test11:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: shrq $47, %rdi # encoding: [0x48,0xc1,0xef,0x2f]
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
; CHECK-NEXT: cmpq $1, %rdi # encoding: [0x48,0x83,0xff,0x01]
@@ -236,7 +236,7 @@ entry:
define i32 @test12() ssp uwtable {
; CHECK-LABEL: test12:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax # encoding: [0x50]
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: callq test12b # encoding: [0xe8,A,A,A,A]
@@ -244,7 +244,7 @@ define i32 @test12() ssp uwtable {
; CHECK-NEXT: testb %al, %al # encoding: [0x84,0xc0]
; CHECK-NEXT: je .LBB12_2 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB12_2-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#1: # %T
+; CHECK-NEXT: # %bb.1: # %T
; CHECK-NEXT: movl $1, %eax # encoding: [0xb8,0x01,0x00,0x00,0x00]
; CHECK-NEXT: popq %rcx # encoding: [0x59]
; CHECK-NEXT: retq # encoding: [0xc3]
@@ -267,7 +267,7 @@ declare zeroext i1 @test12b()
define i32 @test13(i32 %mask, i32 %base, i32 %intra) {
; CHECK-LABEL: test13:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: testb $8, %dil # encoding: [0x40,0xf6,0xc7,0x08]
; CHECK-NEXT: cmovnel %edx, %esi # encoding: [0x0f,0x45,0xf2]
; CHECK-NEXT: movl %esi, %eax # encoding: [0x89,0xf0]
@@ -282,7 +282,7 @@ entry:
define i32 @test14(i32 %mask, i32 %base, i32 %intra) {
; CHECK-LABEL: test14:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: shrl $7, %edi # encoding: [0xc1,0xef,0x07]
; CHECK-NEXT: cmovnsl %edx, %esi # encoding: [0x0f,0x49,0xf2]
; CHECK-NEXT: movl %esi, %eax # encoding: [0x89,0xf0]
@@ -298,7 +298,7 @@ entry:
; PR19964
define zeroext i1 @test15(i32 %bf.load, i32 %n) {
; CHECK-LABEL: test15:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: shrl $16, %edi # encoding: [0xc1,0xef,0x10]
; CHECK-NEXT: sete %cl # encoding: [0x0f,0x94,0xc1]
; CHECK-NEXT: cmpl %esi, %edi # encoding: [0x39,0xf7]
@@ -316,7 +316,7 @@ entry:
define i8 @test16(i16 signext %L) {
; CHECK-LABEL: test16:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: testw %di, %di # encoding: [0x66,0x85,0xff]
; CHECK-NEXT: setns %al # encoding: [0x0f,0x99,0xc0]
; CHECK-NEXT: retq # encoding: [0xc3]
@@ -330,7 +330,7 @@ entry:
define i8 @test17(i32 %L) {
; CHECK-LABEL: test17:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: testl %edi, %edi # encoding: [0x85,0xff]
; CHECK-NEXT: setns %al # encoding: [0x0f,0x99,0xc0]
; CHECK-NEXT: retq # encoding: [0xc3]
@@ -344,7 +344,7 @@ entry:
define i8 @test18(i64 %L) {
; CHECK-LABEL: test18:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: testq %rdi, %rdi # encoding: [0x48,0x85,0xff]
; CHECK-NEXT: setns %al # encoding: [0x0f,0x99,0xc0]
; CHECK-NEXT: retq # encoding: [0xc3]
@@ -358,7 +358,7 @@ entry:
define zeroext i1 @test19(i32 %L) {
; CHECK-LABEL: test19:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: testl %edi, %edi # encoding: [0x85,0xff]
; CHECK-NEXT: setns %al # encoding: [0x0f,0x99,0xc0]
; CHECK-NEXT: retq # encoding: [0xc3]
@@ -373,7 +373,7 @@ entry:
; This test failed due to incorrect handling of "shift + icmp" sequence
define void @test20(i32 %bf.load, i8 %x1, i8* %b_addr) {
; CHECK-LABEL: test20:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
; CHECK-NEXT: testl $16777215, %edi # encoding: [0xf7,0xc7,0xff,0xff,0xff,0x00]
; CHECK-NEXT: # imm = 0xFFFFFF
@@ -405,7 +405,7 @@ entry:
define i32 @test21(i64 %val) {
; CHECK-LABEL: test21:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
; CHECK-NEXT: shrq $41, %rdi # encoding: [0x48,0xc1,0xef,0x29]
; CHECK-NEXT: setne %al # encoding: [0x0f,0x95,0xc0]
@@ -421,7 +421,7 @@ entry:
; AND-to-SHR transformation is enabled for eq/ne condition codes only.
define i32 @test22(i64 %val) {
; CHECK-LABEL: test22:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
@@ -434,7 +434,7 @@ entry:
define i32 @test23(i64 %val) {
; CHECK-LABEL: test23:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
; CHECK-NEXT: testq $-1048576, %rdi # encoding: [0x48,0xf7,0xc7,0x00,0x00,0xf0,0xff]
; CHECK-NEXT: # imm = 0xFFF00000
@@ -450,7 +450,7 @@ entry:
define i32 @test24(i64 %val) {
; CHECK-LABEL: test24:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
; CHECK-NEXT: shlq $16, %rdi # encoding: [0x48,0xc1,0xe7,0x10]
; CHECK-NEXT: setne %al # encoding: [0x0f,0x95,0xc0]
diff --git a/test/CodeGen/X86/coalesce_commute_movsd.ll b/test/CodeGen/X86/coalesce_commute_movsd.ll
index 2f4680755b2..bcd7f2fb965 100644
--- a/test/CodeGen/X86/coalesce_commute_movsd.ll
+++ b/test/CodeGen/X86/coalesce_commute_movsd.ll
@@ -8,23 +8,23 @@
define <2 x double> @insert_f64(double %a0, <2 x double> %a1) {
; SSE2-LABEL: insert_f64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE2-NEXT: movapd %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: insert_f64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; SSE41-NEXT: retq
;
; AVX-LABEL: insert_f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; AVX-NEXT: retq
;
; AVX512-LABEL: insert_f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; AVX512-NEXT: retq
%1 = insertelement <2 x double> %a1, double %a0, i32 0
@@ -33,23 +33,23 @@ define <2 x double> @insert_f64(double %a0, <2 x double> %a1) {
define <4 x float> @insert_f32(float %a0, <4 x float> %a1) {
; SSE2-LABEL: insert_f32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: insert_f32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; SSE41-NEXT: retq
;
; AVX-LABEL: insert_f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX-NEXT: retq
;
; AVX512-LABEL: insert_f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX512-NEXT: retq
%1 = insertelement <4 x float> %a1, float %a0, i32 0
diff --git a/test/CodeGen/X86/combine-64bit-vec-binop.ll b/test/CodeGen/X86/combine-64bit-vec-binop.ll
index 2935a2095bb..e434bfc11c4 100644
--- a/test/CodeGen/X86/combine-64bit-vec-binop.ll
+++ b/test/CodeGen/X86/combine-64bit-vec-binop.ll
@@ -3,7 +3,7 @@
define double @test1_add(double %A, double %B) {
; SSE41-LABEL: test1_add:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: paddd %xmm1, %xmm0
; SSE41-NEXT: retq
%1 = bitcast double %A to <2 x i32>
@@ -15,7 +15,7 @@ define double @test1_add(double %A, double %B) {
define double @test2_add(double %A, double %B) {
; SSE41-LABEL: test2_add:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: paddw %xmm1, %xmm0
; SSE41-NEXT: retq
%1 = bitcast double %A to <4 x i16>
@@ -27,7 +27,7 @@ define double @test2_add(double %A, double %B) {
define double @test3_add(double %A, double %B) {
; SSE41-LABEL: test3_add:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: paddb %xmm1, %xmm0
; SSE41-NEXT: retq
%1 = bitcast double %A to <8 x i8>
@@ -39,7 +39,7 @@ define double @test3_add(double %A, double %B) {
define double @test1_sub(double %A, double %B) {
; SSE41-LABEL: test1_sub:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: psubd %xmm1, %xmm0
; SSE41-NEXT: retq
%1 = bitcast double %A to <2 x i32>
@@ -51,7 +51,7 @@ define double @test1_sub(double %A, double %B) {
define double @test2_sub(double %A, double %B) {
; SSE41-LABEL: test2_sub:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: psubw %xmm1, %xmm0
; SSE41-NEXT: retq
%1 = bitcast double %A to <4 x i16>
@@ -63,7 +63,7 @@ define double @test2_sub(double %A, double %B) {
define double @test3_sub(double %A, double %B) {
; SSE41-LABEL: test3_sub:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: psubb %xmm1, %xmm0
; SSE41-NEXT: retq
%1 = bitcast double %A to <8 x i8>
@@ -75,7 +75,7 @@ define double @test3_sub(double %A, double %B) {
define double @test1_mul(double %A, double %B) {
; SSE41-LABEL: test1_mul:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmulld %xmm1, %xmm0
; SSE41-NEXT: retq
%1 = bitcast double %A to <2 x i32>
@@ -87,7 +87,7 @@ define double @test1_mul(double %A, double %B) {
define double @test2_mul(double %A, double %B) {
; SSE41-LABEL: test2_mul:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmullw %xmm1, %xmm0
; SSE41-NEXT: retq
%1 = bitcast double %A to <4 x i16>
@@ -100,7 +100,7 @@ define double @test2_mul(double %A, double %B) {
; There is no legal ISD::MUL with type MVT::v8i16.
define double @test3_mul(double %A, double %B) {
; SSE41-LABEL: test3_mul:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; SSE41-NEXT: pmullw %xmm2, %xmm0
@@ -115,7 +115,7 @@ define double @test3_mul(double %A, double %B) {
define double @test1_and(double %A, double %B) {
; SSE41-LABEL: test1_and:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: andps %xmm1, %xmm0
; SSE41-NEXT: retq
%1 = bitcast double %A to <2 x i32>
@@ -127,7 +127,7 @@ define double @test1_and(double %A, double %B) {
define double @test2_and(double %A, double %B) {
; SSE41-LABEL: test2_and:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: andps %xmm1, %xmm0
; SSE41-NEXT: retq
%1 = bitcast double %A to <4 x i16>
@@ -139,7 +139,7 @@ define double @test2_and(double %A, double %B) {
define double @test3_and(double %A, double %B) {
; SSE41-LABEL: test3_and:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: andps %xmm1, %xmm0
; SSE41-NEXT: retq
%1 = bitcast double %A to <8 x i8>
@@ -151,7 +151,7 @@ define double @test3_and(double %A, double %B) {
define double @test1_or(double %A, double %B) {
; SSE41-LABEL: test1_or:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: orps %xmm1, %xmm0
; SSE41-NEXT: retq
%1 = bitcast double %A to <2 x i32>
@@ -163,7 +163,7 @@ define double @test1_or(double %A, double %B) {
define double @test2_or(double %A, double %B) {
; SSE41-LABEL: test2_or:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: orps %xmm1, %xmm0
; SSE41-NEXT: retq
%1 = bitcast double %A to <4 x i16>
@@ -175,7 +175,7 @@ define double @test2_or(double %A, double %B) {
define double @test3_or(double %A, double %B) {
; SSE41-LABEL: test3_or:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: orps %xmm1, %xmm0
; SSE41-NEXT: retq
%1 = bitcast double %A to <8 x i8>
@@ -187,7 +187,7 @@ define double @test3_or(double %A, double %B) {
define double @test1_xor(double %A, double %B) {
; SSE41-LABEL: test1_xor:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: xorps %xmm1, %xmm0
; SSE41-NEXT: retq
%1 = bitcast double %A to <2 x i32>
@@ -199,7 +199,7 @@ define double @test1_xor(double %A, double %B) {
define double @test2_xor(double %A, double %B) {
; SSE41-LABEL: test2_xor:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: xorps %xmm1, %xmm0
; SSE41-NEXT: retq
%1 = bitcast double %A to <4 x i16>
@@ -211,7 +211,7 @@ define double @test2_xor(double %A, double %B) {
define double @test3_xor(double %A, double %B) {
; SSE41-LABEL: test3_xor:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: xorps %xmm1, %xmm0
; SSE41-NEXT: retq
%1 = bitcast double %A to <8 x i8>
@@ -223,7 +223,7 @@ define double @test3_xor(double %A, double %B) {
define double @test_fadd(double %A, double %B) {
; SSE41-LABEL: test_fadd:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: addps %xmm1, %xmm0
; SSE41-NEXT: retq
%1 = bitcast double %A to <2 x float>
@@ -235,7 +235,7 @@ define double @test_fadd(double %A, double %B) {
define double @test_fsub(double %A, double %B) {
; SSE41-LABEL: test_fsub:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: subps %xmm1, %xmm0
; SSE41-NEXT: retq
%1 = bitcast double %A to <2 x float>
@@ -247,7 +247,7 @@ define double @test_fsub(double %A, double %B) {
define double @test_fmul(double %A, double %B) {
; SSE41-LABEL: test_fmul:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: mulps %xmm1, %xmm0
; SSE41-NEXT: retq
%1 = bitcast double %A to <2 x float>
diff --git a/test/CodeGen/X86/combine-abs.ll b/test/CodeGen/X86/combine-abs.ll
index 2f1804021fd..3ecec6ecfa0 100644
--- a/test/CodeGen/X86/combine-abs.ll
+++ b/test/CodeGen/X86/combine-abs.ll
@@ -6,7 +6,7 @@
; fold (abs c1) -> c2
define <4 x i32> @combine_v4i32_abs_constant() {
; CHECK-LABEL: combine_v4i32_abs_constant:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} xmm0 = [0,1,3,2147483648]
; CHECK-NEXT: retq
%1 = call <4 x i32> @llvm.x86.ssse3.pabs.d.128(<4 x i32> <i32 0, i32 -1, i32 3, i32 -2147483648>)
@@ -15,7 +15,7 @@ define <4 x i32> @combine_v4i32_abs_constant() {
define <16 x i16> @combine_v16i16_abs_constant() {
; CHECK-LABEL: combine_v16i16_abs_constant:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} ymm0 = [0,1,1,3,3,7,7,255,255,4096,4096,32767,32767,32768,32768,0]
; CHECK-NEXT: retq
%1 = call <16 x i16> @llvm.x86.avx2.pabs.w(<16 x i16> <i16 0, i16 1, i16 -1, i16 3, i16 -3, i16 7, i16 -7, i16 255, i16 -255, i16 4096, i16 -4096, i16 32767, i16 -32767, i16 -32768, i16 32768, i16 65536>)
@@ -25,7 +25,7 @@ define <16 x i16> @combine_v16i16_abs_constant() {
; fold (abs (abs x)) -> (abs x)
define i32 @combine_i32_abs_abs(i32 %a) {
; CHECK-LABEL: combine_i32_abs_abs:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: negl %eax
; CHECK-NEXT: cmovll %edi, %eax
@@ -41,7 +41,7 @@ define i32 @combine_i32_abs_abs(i32 %a) {
define <8 x i16> @combine_v8i16_abs_abs(<8 x i16> %a) {
; CHECK-LABEL: combine_v8i16_abs_abs:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpabsw %xmm0, %xmm0
; CHECK-NEXT: retq
%a1 = call <8 x i16> @llvm.x86.ssse3.pabs.w.128(<8 x i16> %a)
@@ -53,7 +53,7 @@ define <8 x i16> @combine_v8i16_abs_abs(<8 x i16> %a) {
define <32 x i8> @combine_v32i8_abs_abs(<32 x i8> %a) {
; CHECK-LABEL: combine_v32i8_abs_abs:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpabsb %ymm0, %ymm0
; CHECK-NEXT: retq
%n1 = sub <32 x i8> zeroinitializer, %a
@@ -65,7 +65,7 @@ define <32 x i8> @combine_v32i8_abs_abs(<32 x i8> %a) {
define <4 x i64> @combine_v4i64_abs_abs(<4 x i64> %a) {
; AVX2-LABEL: combine_v4i64_abs_abs:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm2
; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0
@@ -76,14 +76,14 @@ define <4 x i64> @combine_v4i64_abs_abs(<4 x i64> %a) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: combine_v4i64_abs_abs:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512F-NEXT: vpabsq %zmm0, %zmm0
; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: combine_v4i64_abs_abs:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpabsq %ymm0, %ymm0
; AVX512VL-NEXT: retq
%n1 = sub <4 x i64> zeroinitializer, %a
@@ -98,17 +98,17 @@ define <4 x i64> @combine_v4i64_abs_abs(<4 x i64> %a) {
; fold (abs x) -> x iff not-negative
define <16 x i8> @combine_v16i8_abs_constant(<16 x i8> %a) {
; AVX2-LABEL: combine_v16i8_abs_constant:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: combine_v16i8_abs_constant:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: combine_v16i8_abs_constant:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX512VL-NEXT: retq
%1 = insertelement <16 x i8> undef, i8 15, i32 0
@@ -120,7 +120,7 @@ define <16 x i8> @combine_v16i8_abs_constant(<16 x i8> %a) {
define <8 x i32> @combine_v8i32_abs_pos(<8 x i32> %a) {
; CHECK-LABEL: combine_v8i32_abs_pos:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsrld $1, %ymm0, %ymm0
; CHECK-NEXT: retq
%1 = lshr <8 x i32> %a, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
diff --git a/test/CodeGen/X86/combine-add.ll b/test/CodeGen/X86/combine-add.ll
index a4e959c0b8f..9a9f535c608 100644
--- a/test/CodeGen/X86/combine-add.ll
+++ b/test/CodeGen/X86/combine-add.ll
@@ -5,11 +5,11 @@
; fold (add x, 0) -> x
define <4 x i32> @combine_vec_add_to_zero(<4 x i32> %a) {
; SSE-LABEL: combine_vec_add_to_zero:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_add_to_zero:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: retq
%1 = add <4 x i32> %a, zeroinitializer
ret <4 x i32> %1
@@ -18,14 +18,14 @@ define <4 x i32> @combine_vec_add_to_zero(<4 x i32> %a) {
; fold ((c1-A)+c2) -> (c1+c2)-A
define <4 x i32> @combine_vec_add_constant_sub(<4 x i32> %a) {
; SSE-LABEL: combine_vec_add_constant_sub:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [0,2,4,6]
; SSE-NEXT: psubd %xmm0, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_add_constant_sub:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [0,2,4,6]
; AVX-NEXT: vpsubd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
@@ -37,13 +37,13 @@ define <4 x i32> @combine_vec_add_constant_sub(<4 x i32> %a) {
; fold ((0-A) + B) -> B-A
define <4 x i32> @combine_vec_add_neg0(<4 x i32> %a, <4 x i32> %b) {
; SSE-LABEL: combine_vec_add_neg0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psubd %xmm0, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_add_neg0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsubd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = sub <4 x i32> zeroinitializer, %a
@@ -54,12 +54,12 @@ define <4 x i32> @combine_vec_add_neg0(<4 x i32> %a, <4 x i32> %b) {
; fold (A + (0-B)) -> A-B
define <4 x i32> @combine_vec_add_neg1(<4 x i32> %a, <4 x i32> %b) {
; SSE-LABEL: combine_vec_add_neg1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psubd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_add_neg1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsubd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = sub <4 x i32> zeroinitializer, %b
@@ -70,12 +70,12 @@ define <4 x i32> @combine_vec_add_neg1(<4 x i32> %a, <4 x i32> %b) {
; fold (A+(B-A)) -> B
define <4 x i32> @combine_vec_add_sub0(<4 x i32> %a, <4 x i32> %b) {
; SSE-LABEL: combine_vec_add_sub0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_add_sub0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps %xmm1, %xmm0
; AVX-NEXT: retq
%1 = sub <4 x i32> %b, %a
@@ -86,12 +86,12 @@ define <4 x i32> @combine_vec_add_sub0(<4 x i32> %a, <4 x i32> %b) {
; fold ((B-A)+A) -> B
define <4 x i32> @combine_vec_add_sub1(<4 x i32> %a, <4 x i32> %b) {
; SSE-LABEL: combine_vec_add_sub1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_add_sub1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps %xmm1, %xmm0
; AVX-NEXT: retq
%1 = sub <4 x i32> %b, %a
@@ -102,13 +102,13 @@ define <4 x i32> @combine_vec_add_sub1(<4 x i32> %a, <4 x i32> %b) {
; fold (A+(B-(A+C))) to (B-C)
define <4 x i32> @combine_vec_add_sub_add0(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
; SSE-LABEL: combine_vec_add_sub_add0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psubd %xmm2, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_add_sub_add0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsubd %xmm2, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = add <4 x i32> %a, %c
@@ -120,13 +120,13 @@ define <4 x i32> @combine_vec_add_sub_add0(<4 x i32> %a, <4 x i32> %b, <4 x i32>
; fold (A+(B-(C+A))) to (B-C)
define <4 x i32> @combine_vec_add_sub_add1(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
; SSE-LABEL: combine_vec_add_sub_add1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psubd %xmm2, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_add_sub_add1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsubd %xmm2, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = add <4 x i32> %c, %a
@@ -138,13 +138,13 @@ define <4 x i32> @combine_vec_add_sub_add1(<4 x i32> %a, <4 x i32> %b, <4 x i32>
; fold (A+((B-A)+C)) to (B+C)
define <4 x i32> @combine_vec_add_sub_add2(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
; SSE-LABEL: combine_vec_add_sub_add2:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: paddd %xmm2, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_add_sub_add2:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpaddd %xmm2, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = sub <4 x i32> %b, %a
@@ -156,13 +156,13 @@ define <4 x i32> @combine_vec_add_sub_add2(<4 x i32> %a, <4 x i32> %b, <4 x i32>
; fold (A+((B-A)-C)) to (B-C)
define <4 x i32> @combine_vec_add_sub_add3(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
; SSE-LABEL: combine_vec_add_sub_add3:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psubd %xmm2, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_add_sub_add3:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsubd %xmm2, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = sub <4 x i32> %b, %a
@@ -174,14 +174,14 @@ define <4 x i32> @combine_vec_add_sub_add3(<4 x i32> %a, <4 x i32> %b, <4 x i32>
; fold (A-B)+(C-D) to (A+C)-(B+D) when A or C is constant
define <4 x i32> @combine_vec_add_sub_sub(<4 x i32> %a, <4 x i32> %b, <4 x i32> %d) {
; SSE-LABEL: combine_vec_add_sub_sub:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: paddd {{.*}}(%rip), %xmm0
; SSE-NEXT: paddd %xmm2, %xmm1
; SSE-NEXT: psubd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_add_sub_sub:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vpaddd %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpsubd %xmm1, %xmm0, %xmm0
@@ -195,14 +195,14 @@ define <4 x i32> @combine_vec_add_sub_sub(<4 x i32> %a, <4 x i32> %b, <4 x i32>
; fold (a+b) -> (a|b) iff a and b share no bits.
define <4 x i32> @combine_vec_add_uniquebits(<4 x i32> %a, <4 x i32> %b) {
; SSE-LABEL: combine_vec_add_uniquebits:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
; SSE-NEXT: andps {{.*}}(%rip), %xmm1
; SSE-NEXT: orps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_add_uniquebits:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vbroadcastss {{.*}}(%rip), %xmm2
; AVX-NEXT: vandps %xmm2, %xmm0, %xmm0
; AVX-NEXT: vbroadcastss {{.*}}(%rip), %xmm2
@@ -218,13 +218,13 @@ define <4 x i32> @combine_vec_add_uniquebits(<4 x i32> %a, <4 x i32> %b) {
; fold (add x, shl(0 - y, n)) -> sub(x, shl(y, n))
define <4 x i32> @combine_vec_add_shl_neg0(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: combine_vec_add_shl_neg0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pslld $5, %xmm1
; SSE-NEXT: psubd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_add_shl_neg0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpslld $5, %xmm1, %xmm1
; AVX-NEXT: vpsubd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -237,13 +237,13 @@ define <4 x i32> @combine_vec_add_shl_neg0(<4 x i32> %x, <4 x i32> %y) {
; fold (add shl(0 - y, n), x) -> sub(x, shl(y, n))
define <4 x i32> @combine_vec_add_shl_neg1(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: combine_vec_add_shl_neg1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pslld $5, %xmm1
; SSE-NEXT: psubd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_add_shl_neg1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpslld $5, %xmm1, %xmm1
; AVX-NEXT: vpsubd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -257,13 +257,13 @@ define <4 x i32> @combine_vec_add_shl_neg1(<4 x i32> %x, <4 x i32> %y) {
; and similar xforms where the inner op is either ~0 or 0.
define <4 x i32> @combine_vec_add_and_compare(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) {
; SSE-LABEL: combine_vec_add_and_compare:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm2, %xmm1
; SSE-NEXT: psubd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_add_and_compare:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqd %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpsubd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -277,14 +277,14 @@ define <4 x i32> @combine_vec_add_and_compare(<4 x i32> %a0, <4 x i32> %a1, <4 x
; add (sext i1), X -> sub X, (zext i1)
define <4 x i32> @combine_vec_add_sext(<4 x i1> %a0, <4 x i32> %a1) {
; SSE-LABEL: combine_vec_add_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pslld $31, %xmm0
; SSE-NEXT: psrad $31, %xmm0
; SSE-NEXT: paddd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_add_sext:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpslld $31, %xmm0, %xmm0
; AVX-NEXT: vpsrad $31, %xmm0, %xmm0
; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
@@ -297,14 +297,14 @@ define <4 x i32> @combine_vec_add_sext(<4 x i1> %a0, <4 x i32> %a1) {
; add (sext i1), X -> sub X, (zext i1)
define <4 x i32> @combine_vec_add_sextinreg(<4 x i32> %a0, <4 x i32> %a1) {
; SSE-LABEL: combine_vec_add_sextinreg:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pslld $31, %xmm0
; SSE-NEXT: psrad $31, %xmm0
; SSE-NEXT: paddd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_add_sextinreg:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpslld $31, %xmm0, %xmm0
; AVX-NEXT: vpsrad $31, %xmm0, %xmm0
; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
diff --git a/test/CodeGen/X86/combine-and.ll b/test/CodeGen/X86/combine-and.ll
index 5ce3f32cfd2..e92237f524f 100644
--- a/test/CodeGen/X86/combine-and.ll
+++ b/test/CodeGen/X86/combine-and.ll
@@ -3,7 +3,7 @@
define i32 @and_self(i32 %x) {
; CHECK-LABEL: and_self:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
%and = and i32 %x, %x
@@ -12,7 +12,7 @@ define i32 @and_self(i32 %x) {
define <4 x i32> @and_self_vec(<4 x i32> %x) {
; CHECK-LABEL: and_self_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%and = and <4 x i32> %x, %x
ret <4 x i32> %and
@@ -26,7 +26,7 @@ define <4 x i32> @and_self_vec(<4 x i32> %x) {
define <4 x i32> @test1(<4 x i32> %A) {
; CHECK-LABEL: test1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pxor %xmm1, %xmm1
; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
; CHECK-NEXT: retq
@@ -36,7 +36,7 @@ define <4 x i32> @test1(<4 x i32> %A) {
define <4 x i32> @test2(<4 x i32> %A) {
; CHECK-LABEL: test2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pxor %xmm1, %xmm1
; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
; CHECK-NEXT: retq
@@ -46,7 +46,7 @@ define <4 x i32> @test2(<4 x i32> %A) {
define <4 x i32> @test3(<4 x i32> %A) {
; CHECK-LABEL: test3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pxor %xmm1, %xmm1
; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5],xmm1[6,7]
; CHECK-NEXT: retq
@@ -56,7 +56,7 @@ define <4 x i32> @test3(<4 x i32> %A) {
define <4 x i32> @test4(<4 x i32> %A) {
; CHECK-LABEL: test4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pxor %xmm1, %xmm1
; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
; CHECK-NEXT: retq
@@ -66,7 +66,7 @@ define <4 x i32> @test4(<4 x i32> %A) {
define <4 x i32> @test5(<4 x i32> %A) {
; CHECK-LABEL: test5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pxor %xmm1, %xmm1
; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
; CHECK-NEXT: retq
@@ -76,7 +76,7 @@ define <4 x i32> @test5(<4 x i32> %A) {
define <4 x i32> @test6(<4 x i32> %A) {
; CHECK-LABEL: test6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pxor %xmm1, %xmm1
; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
; CHECK-NEXT: retq
@@ -86,7 +86,7 @@ define <4 x i32> @test6(<4 x i32> %A) {
define <4 x i32> @test7(<4 x i32> %A) {
; CHECK-LABEL: test7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pxor %xmm1, %xmm1
; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; CHECK-NEXT: retq
@@ -96,7 +96,7 @@ define <4 x i32> @test7(<4 x i32> %A) {
define <4 x i32> @test8(<4 x i32> %A) {
; CHECK-LABEL: test8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pxor %xmm1, %xmm1
; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5],xmm0[6,7]
; CHECK-NEXT: retq
@@ -106,7 +106,7 @@ define <4 x i32> @test8(<4 x i32> %A) {
define <4 x i32> @test9(<4 x i32> %A) {
; CHECK-LABEL: test9:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; CHECK-NEXT: retq
%1 = and <4 x i32> %A, <i32 -1, i32 -1, i32 0, i32 0>
@@ -115,7 +115,7 @@ define <4 x i32> @test9(<4 x i32> %A) {
define <4 x i32> @test10(<4 x i32> %A) {
; CHECK-LABEL: test10:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pxor %xmm1, %xmm1
; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5],xmm1[6,7]
; CHECK-NEXT: retq
@@ -125,7 +125,7 @@ define <4 x i32> @test10(<4 x i32> %A) {
define <4 x i32> @test11(<4 x i32> %A) {
; CHECK-LABEL: test11:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pxor %xmm1, %xmm1
; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
; CHECK-NEXT: retq
@@ -135,7 +135,7 @@ define <4 x i32> @test11(<4 x i32> %A) {
define <4 x i32> @test12(<4 x i32> %A) {
; CHECK-LABEL: test12:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pxor %xmm1, %xmm1
; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
; CHECK-NEXT: retq
@@ -145,7 +145,7 @@ define <4 x i32> @test12(<4 x i32> %A) {
define <4 x i32> @test13(<4 x i32> %A) {
; CHECK-LABEL: test13:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pxor %xmm1, %xmm1
; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
; CHECK-NEXT: retq
@@ -155,7 +155,7 @@ define <4 x i32> @test13(<4 x i32> %A) {
define <4 x i32> @test14(<4 x i32> %A) {
; CHECK-LABEL: test14:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pxor %xmm1, %xmm1
; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
; CHECK-NEXT: retq
@@ -165,7 +165,7 @@ define <4 x i32> @test14(<4 x i32> %A) {
define <4 x i32> @test15(<4 x i32> %A, <4 x i32> %B) {
; CHECK-LABEL: test15:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
; CHECK-NEXT: retq
%1 = and <4 x i32> %A, <i32 -1, i32 0, i32 -1, i32 -1>
@@ -176,7 +176,7 @@ define <4 x i32> @test15(<4 x i32> %A, <4 x i32> %B) {
define <4 x i32> @test16(<4 x i32> %A, <4 x i32> %B) {
; CHECK-LABEL: test16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
; CHECK-NEXT: retq
%1 = and <4 x i32> %A, <i32 -1, i32 0, i32 -1, i32 0>
@@ -187,7 +187,7 @@ define <4 x i32> @test16(<4 x i32> %A, <4 x i32> %B) {
define <4 x i32> @test17(<4 x i32> %A, <4 x i32> %B) {
; CHECK-LABEL: test17:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
; CHECK-NEXT: retq
%1 = and <4 x i32> %A, <i32 0, i32 -1, i32 0, i32 -1>
@@ -202,7 +202,7 @@ define <4 x i32> @test17(<4 x i32> %A, <4 x i32> %B) {
define <2 x i64> @and_or_v2i64(<2 x i64> %a0) {
; CHECK-LABEL: and_or_v2i64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movaps {{.*#+}} xmm0 = [8,8]
; CHECK-NEXT: retq
%1 = or <2 x i64> %a0, <i64 255, i64 255>
@@ -212,7 +212,7 @@ define <2 x i64> @and_or_v2i64(<2 x i64> %a0) {
define <4 x i32> @and_or_v4i32(<4 x i32> %a0) {
; CHECK-LABEL: and_or_v4i32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movaps {{.*#+}} xmm0 = [3,3,3,3]
; CHECK-NEXT: retq
%1 = or <4 x i32> %a0, <i32 15, i32 15, i32 15, i32 15>
@@ -226,7 +226,7 @@ define <4 x i32> @and_or_v4i32(<4 x i32> %a0) {
define <2 x i64> @and_or_zext_v2i32(<2 x i32> %a0) {
; CHECK-LABEL: and_or_zext_v2i32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorps %xmm0, %xmm0
; CHECK-NEXT: retq
%1 = zext <2 x i32> %a0 to <2 x i64>
@@ -237,7 +237,7 @@ define <2 x i64> @and_or_zext_v2i32(<2 x i32> %a0) {
define <4 x i32> @and_or_zext_v4i16(<4 x i16> %a0) {
; CHECK-LABEL: and_or_zext_v4i16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorps %xmm0, %xmm0
; CHECK-NEXT: retq
%1 = zext <4 x i16> %a0 to <4 x i32>
@@ -252,7 +252,7 @@ define <4 x i32> @and_or_zext_v4i16(<4 x i16> %a0) {
define <8 x i16> @ashr_mask1_v8i16(<8 x i16> %a0) {
; CHECK-LABEL: ashr_mask1_v8i16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: psrlw $15, %xmm0
; CHECK-NEXT: retq
%1 = ashr <8 x i16> %a0, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
@@ -262,7 +262,7 @@ define <8 x i16> @ashr_mask1_v8i16(<8 x i16> %a0) {
define <4 x i32> @ashr_mask7_v4i32(<4 x i32> %a0) {
; CHECK-LABEL: ashr_mask7_v4i32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: psrad $31, %xmm0
; CHECK-NEXT: psrld $29, %xmm0
; CHECK-NEXT: retq
@@ -278,7 +278,7 @@ define <4 x i32> @ashr_mask7_v4i32(<4 x i32> %a0) {
; PR34620 - redundant PAND after vector shift of a byte vector (PSRLW)
define <16 x i8> @PR34620(<16 x i8> %a0, <16 x i8> %a1) {
; CHECK-LABEL: PR34620:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: psrlw $1, %xmm0
; CHECK-NEXT: pand {{.*}}(%rip), %xmm0
; CHECK-NEXT: pand {{.*}}(%rip), %xmm0
diff --git a/test/CodeGen/X86/combine-avx-intrinsics.ll b/test/CodeGen/X86/combine-avx-intrinsics.ll
index 811b1f20833..e46a1903e81 100644
--- a/test/CodeGen/X86/combine-avx-intrinsics.ll
+++ b/test/CodeGen/X86/combine-avx-intrinsics.ll
@@ -4,7 +4,7 @@
define <4 x double> @test_x86_avx_blend_pd_256(<4 x double> %a0) {
; CHECK-LABEL: test_x86_avx_blend_pd_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%1 = call <4 x double> @llvm.x86.avx.blend.pd.256(<4 x double> %a0, <4 x double> %a0, i32 7)
ret <4 x double> %1
@@ -12,7 +12,7 @@ define <4 x double> @test_x86_avx_blend_pd_256(<4 x double> %a0) {
define <8 x float> @test_x86_avx_blend_ps_256(<8 x float> %a0) {
; CHECK-LABEL: test_x86_avx_blend_ps_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%1 = call <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float> %a0, <8 x float> %a0, i32 7)
ret <8 x float> %1
@@ -20,7 +20,7 @@ define <8 x float> @test_x86_avx_blend_ps_256(<8 x float> %a0) {
define <4 x double> @test2_x86_avx_blend_pd_256(<4 x double> %a0, <4 x double> %a1) {
; CHECK-LABEL: test2_x86_avx_blend_pd_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%1 = call <4 x double> @llvm.x86.avx.blend.pd.256(<4 x double> %a0, <4 x double> %a1, i32 0)
ret <4 x double> %1
@@ -28,7 +28,7 @@ define <4 x double> @test2_x86_avx_blend_pd_256(<4 x double> %a0, <4 x double> %
define <8 x float> @test2_x86_avx_blend_ps_256(<8 x float> %a0, <8 x float> %a1) {
; CHECK-LABEL: test2_x86_avx_blend_ps_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%1 = call <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float> %a0, <8 x float> %a1, i32 0)
ret <8 x float> %1
@@ -36,7 +36,7 @@ define <8 x float> @test2_x86_avx_blend_ps_256(<8 x float> %a0, <8 x float> %a1)
define <4 x double> @test3_x86_avx_blend_pd_256(<4 x double> %a0, <4 x double> %a1) {
; CHECK-LABEL: test3_x86_avx_blend_pd_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps %ymm1, %ymm0
; CHECK-NEXT: retq
%1 = call <4 x double> @llvm.x86.avx.blend.pd.256(<4 x double> %a0, <4 x double> %a1, i32 -1)
@@ -45,7 +45,7 @@ define <4 x double> @test3_x86_avx_blend_pd_256(<4 x double> %a0, <4 x double> %
define <8 x float> @test3_x86_avx_blend_ps_256(<8 x float> %a0, <8 x float> %a1) {
; CHECK-LABEL: test3_x86_avx_blend_ps_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps %ymm1, %ymm0
; CHECK-NEXT: retq
%1 = call <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float> %a0, <8 x float> %a1, i32 -1)
diff --git a/test/CodeGen/X86/combine-avx2-intrinsics.ll b/test/CodeGen/X86/combine-avx2-intrinsics.ll
index 9a548f6b7f0..672820d86c6 100644
--- a/test/CodeGen/X86/combine-avx2-intrinsics.ll
+++ b/test/CodeGen/X86/combine-avx2-intrinsics.ll
@@ -6,7 +6,7 @@
define <16 x i16> @test_x86_avx2_pblendw(<16 x i16> %a0) {
; CHECK-LABEL: test_x86_avx2_pblendw:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%res = call <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16> %a0, <16 x i16> %a0, i32 7)
ret <16 x i16> %res
@@ -14,7 +14,7 @@ define <16 x i16> @test_x86_avx2_pblendw(<16 x i16> %a0) {
define <4 x i32> @test_x86_avx2_pblendd_128(<4 x i32> %a0) {
; CHECK-LABEL: test_x86_avx2_pblendd_128:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32> %a0, <4 x i32> %a0, i32 7)
ret <4 x i32> %res
@@ -22,7 +22,7 @@ define <4 x i32> @test_x86_avx2_pblendd_128(<4 x i32> %a0) {
define <8 x i32> @test_x86_avx2_pblendd_256(<8 x i32> %a0) {
; CHECK-LABEL: test_x86_avx2_pblendd_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%res = call <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32> %a0, <8 x i32> %a0, i32 7)
ret <8 x i32> %res
@@ -30,7 +30,7 @@ define <8 x i32> @test_x86_avx2_pblendd_256(<8 x i32> %a0) {
define <16 x i16> @test2_x86_avx2_pblendw(<16 x i16> %a0, <16 x i16> %a1) {
; CHECK-LABEL: test2_x86_avx2_pblendw:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%res = call <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16> %a0, <16 x i16> %a1, i32 0)
ret <16 x i16> %res
@@ -38,7 +38,7 @@ define <16 x i16> @test2_x86_avx2_pblendw(<16 x i16> %a0, <16 x i16> %a1) {
define <4 x i32> @test2_x86_avx2_pblendd_128(<4 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test2_x86_avx2_pblendd_128:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32> %a0, <4 x i32> %a1, i32 0)
ret <4 x i32> %res
@@ -46,7 +46,7 @@ define <4 x i32> @test2_x86_avx2_pblendd_128(<4 x i32> %a0, <4 x i32> %a1) {
define <8 x i32> @test2_x86_avx2_pblendd_256(<8 x i32> %a0, <8 x i32> %a1) {
; CHECK-LABEL: test2_x86_avx2_pblendd_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%res = call <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32> %a0, <8 x i32> %a1, i32 0)
ret <8 x i32> %res
@@ -54,7 +54,7 @@ define <8 x i32> @test2_x86_avx2_pblendd_256(<8 x i32> %a0, <8 x i32> %a1) {
define <16 x i16> @test3_x86_avx2_pblendw(<16 x i16> %a0, <16 x i16> %a1) {
; CHECK-LABEL: test3_x86_avx2_pblendw:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps %ymm1, %ymm0
; CHECK-NEXT: retq
%res = call <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16> %a0, <16 x i16> %a1, i32 -1)
@@ -63,7 +63,7 @@ define <16 x i16> @test3_x86_avx2_pblendw(<16 x i16> %a0, <16 x i16> %a1) {
define <4 x i32> @test3_x86_avx2_pblendd_128(<4 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test3_x86_avx2_pblendd_128:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps %xmm1, %xmm0
; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32> %a0, <4 x i32> %a1, i32 -1)
@@ -72,7 +72,7 @@ define <4 x i32> @test3_x86_avx2_pblendd_128(<4 x i32> %a0, <4 x i32> %a1) {
define <8 x i32> @test3_x86_avx2_pblendd_256(<8 x i32> %a0, <8 x i32> %a1) {
; CHECK-LABEL: test3_x86_avx2_pblendd_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps %ymm1, %ymm0
; CHECK-NEXT: retq
%res = call <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32> %a0, <8 x i32> %a1, i32 -1)
diff --git a/test/CodeGen/X86/combine-fcopysign.ll b/test/CodeGen/X86/combine-fcopysign.ll
index be8674741e4..6298192226e 100644
--- a/test/CodeGen/X86/combine-fcopysign.ll
+++ b/test/CodeGen/X86/combine-fcopysign.ll
@@ -9,7 +9,7 @@
; copysign(x, c1) -> fabs(x) iff ispos(c1)
define <4 x float> @combine_vec_fcopysign_pos_constant0(<4 x float> %x) {
; SSE-LABEL: combine_vec_fcopysign_pos_constant0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm1 = [2.000000e+00,2.000000e+00,2.000000e+00,2.000000e+00]
; SSE-NEXT: andps {{.*}}(%rip), %xmm1
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
@@ -17,7 +17,7 @@ define <4 x float> @combine_vec_fcopysign_pos_constant0(<4 x float> %x) {
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_fcopysign_pos_constant0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vbroadcastss {{.*}}(%rip), %xmm1
; AVX-NEXT: vbroadcastss {{.*}}(%rip), %xmm2
; AVX-NEXT: vandps %xmm1, %xmm2, %xmm1
@@ -31,7 +31,7 @@ define <4 x float> @combine_vec_fcopysign_pos_constant0(<4 x float> %x) {
define <4 x float> @combine_vec_fcopysign_pos_constant1(<4 x float> %x) {
; SSE-LABEL: combine_vec_fcopysign_pos_constant1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm1 = [0.000000e+00,2.000000e+00,4.000000e+00,8.000000e+00]
; SSE-NEXT: andps {{.*}}(%rip), %xmm1
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
@@ -39,7 +39,7 @@ define <4 x float> @combine_vec_fcopysign_pos_constant1(<4 x float> %x) {
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_fcopysign_pos_constant1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vbroadcastss {{.*}}(%rip), %xmm1
; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
; AVX-NEXT: vbroadcastss {{.*}}(%rip), %xmm1
@@ -52,12 +52,12 @@ define <4 x float> @combine_vec_fcopysign_pos_constant1(<4 x float> %x) {
define <4 x float> @combine_vec_fcopysign_fabs_sgn(<4 x float> %x, <4 x float> %y) {
; SSE-LABEL: combine_vec_fcopysign_fabs_sgn:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_fcopysign_fabs_sgn:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vbroadcastss {{.*}}(%rip), %xmm1
; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -69,7 +69,7 @@ define <4 x float> @combine_vec_fcopysign_fabs_sgn(<4 x float> %x, <4 x float> %
; copysign(x, c1) -> fneg(fabs(x)) iff isneg(c1)
define <4 x float> @combine_vec_fcopysign_neg_constant0(<4 x float> %x) {
; SSE-LABEL: combine_vec_fcopysign_neg_constant0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm1 = [-2.000000e+00,-2.000000e+00,-2.000000e+00,-2.000000e+00]
; SSE-NEXT: andps {{.*}}(%rip), %xmm1
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
@@ -77,7 +77,7 @@ define <4 x float> @combine_vec_fcopysign_neg_constant0(<4 x float> %x) {
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_fcopysign_neg_constant0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vbroadcastss {{.*}}(%rip), %xmm1
; AVX-NEXT: vbroadcastss {{.*}}(%rip), %xmm2
; AVX-NEXT: vandps %xmm1, %xmm2, %xmm1
@@ -91,7 +91,7 @@ define <4 x float> @combine_vec_fcopysign_neg_constant0(<4 x float> %x) {
define <4 x float> @combine_vec_fcopysign_neg_constant1(<4 x float> %x) {
; SSE-LABEL: combine_vec_fcopysign_neg_constant1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm1 = [-0.000000e+00,-2.000000e+00,-4.000000e+00,-8.000000e+00]
; SSE-NEXT: andps {{.*}}(%rip), %xmm1
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
@@ -99,7 +99,7 @@ define <4 x float> @combine_vec_fcopysign_neg_constant1(<4 x float> %x) {
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_fcopysign_neg_constant1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vbroadcastss {{.*}}(%rip), %xmm1
; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
; AVX-NEXT: vbroadcastss {{.*}}(%rip), %xmm1
@@ -112,13 +112,13 @@ define <4 x float> @combine_vec_fcopysign_neg_constant1(<4 x float> %x) {
define <4 x float> @combine_vec_fcopysign_fneg_fabs_sgn(<4 x float> %x, <4 x float> %y) {
; SSE-LABEL: combine_vec_fcopysign_fneg_fabs_sgn:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
; SSE-NEXT: orps {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_fcopysign_fneg_fabs_sgn:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vbroadcastss {{.*}}(%rip), %xmm1
; AVX-NEXT: vbroadcastss {{.*}}(%rip), %xmm2
; AVX-NEXT: vandps %xmm2, %xmm0, %xmm0
@@ -133,14 +133,14 @@ define <4 x float> @combine_vec_fcopysign_fneg_fabs_sgn(<4 x float> %x, <4 x flo
; copysign(fabs(x), y) -> copysign(x, y)
define <4 x float> @combine_vec_fcopysign_fabs_mag(<4 x float> %x, <4 x float> %y) {
; SSE-LABEL: combine_vec_fcopysign_fabs_mag:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: andps {{.*}}(%rip), %xmm1
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
; SSE-NEXT: orps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_fcopysign_fabs_mag:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vbroadcastss {{.*}}(%rip), %xmm2
; AVX-NEXT: vandps %xmm2, %xmm1, %xmm1
; AVX-NEXT: vbroadcastss {{.*}}(%rip), %xmm2
@@ -155,14 +155,14 @@ define <4 x float> @combine_vec_fcopysign_fabs_mag(<4 x float> %x, <4 x float> %
; copysign(fneg(x), y) -> copysign(x, y)
define <4 x float> @combine_vec_fcopysign_fneg_mag(<4 x float> %x, <4 x float> %y) {
; SSE-LABEL: combine_vec_fcopysign_fneg_mag:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: andps {{.*}}(%rip), %xmm1
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
; SSE-NEXT: orps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_fcopysign_fneg_mag:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vbroadcastss {{.*}}(%rip), %xmm2
; AVX-NEXT: vandps %xmm2, %xmm1, %xmm1
; AVX-NEXT: vbroadcastss {{.*}}(%rip), %xmm2
@@ -177,14 +177,14 @@ define <4 x float> @combine_vec_fcopysign_fneg_mag(<4 x float> %x, <4 x float> %
; copysign(copysign(x,z), y) -> copysign(x, y)
define <4 x float> @combine_vec_fcopysign_fcopysign_mag(<4 x float> %x, <4 x float> %y, <4 x float> %z) {
; SSE-LABEL: combine_vec_fcopysign_fcopysign_mag:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: andps {{.*}}(%rip), %xmm1
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
; SSE-NEXT: orps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_fcopysign_fcopysign_mag:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vbroadcastss {{.*}}(%rip), %xmm2
; AVX-NEXT: vandps %xmm2, %xmm1, %xmm1
; AVX-NEXT: vbroadcastss {{.*}}(%rip), %xmm2
@@ -199,14 +199,14 @@ define <4 x float> @combine_vec_fcopysign_fcopysign_mag(<4 x float> %x, <4 x flo
; copysign(x, copysign(y,z)) -> copysign(x, z)
define <4 x float> @combine_vec_fcopysign_fcopysign_sgn(<4 x float> %x, <4 x float> %y, <4 x float> %z) {
; SSE-LABEL: combine_vec_fcopysign_fcopysign_sgn:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: andps {{.*}}(%rip), %xmm2
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
; SSE-NEXT: orps %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_fcopysign_fcopysign_sgn:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vbroadcastss {{.*}}(%rip), %xmm1
; AVX-NEXT: vandps %xmm1, %xmm2, %xmm1
; AVX-NEXT: vbroadcastss {{.*}}(%rip), %xmm2
@@ -221,7 +221,7 @@ define <4 x float> @combine_vec_fcopysign_fcopysign_sgn(<4 x float> %x, <4 x flo
; copysign(x, fp_extend(y)) -> copysign(x, y)
define <4 x double> @combine_vec_fcopysign_fpext_sgn(<4 x double> %x, <4 x float> %y) {
; SSE-LABEL: combine_vec_fcopysign_fpext_sgn:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps %xmm2, %xmm3
; SSE-NEXT: cvtss2sd %xmm2, %xmm4
; SSE-NEXT: movshdup {{.*#+}} xmm5 = xmm2[1,1,3,3]
@@ -257,7 +257,7 @@ define <4 x double> @combine_vec_fcopysign_fpext_sgn(<4 x double> %x, <4 x float
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_fcopysign_fpext_sgn:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vbroadcastsd {{.*}}(%rip), %ymm2
; AVX-NEXT: vandps %ymm2, %ymm0, %ymm0
; AVX-NEXT: vcvtps2pd %xmm1, %ymm1
@@ -273,7 +273,7 @@ define <4 x double> @combine_vec_fcopysign_fpext_sgn(<4 x double> %x, <4 x float
; copysign(x, fp_round(y)) -> copysign(x, y)
define <4 x float> @combine_vec_fcopysign_fptrunc_sgn(<4 x float> %x, <4 x double> %y) {
; SSE-LABEL: combine_vec_fcopysign_fptrunc_sgn:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps %xmm0, %xmm3
; SSE-NEXT: movaps {{.*#+}} xmm5
; SSE-NEXT: andps %xmm5, %xmm0
@@ -307,7 +307,7 @@ define <4 x float> @combine_vec_fcopysign_fptrunc_sgn(<4 x float> %x, <4 x doubl
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_fcopysign_fptrunc_sgn:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vbroadcastss {{.*}}(%rip), %xmm2
; AVX-NEXT: vandpd %xmm2, %xmm0, %xmm0
; AVX-NEXT: vcvtpd2ps %ymm1, %xmm1
diff --git a/test/CodeGen/X86/combine-mul.ll b/test/CodeGen/X86/combine-mul.ll
index 2580a821240..21733acac1f 100644
--- a/test/CodeGen/X86/combine-mul.ll
+++ b/test/CodeGen/X86/combine-mul.ll
@@ -5,11 +5,11 @@
; fold (mul undef, x) -> 0
define <4 x i32> @combine_vec_mul_undef0(<4 x i32> %x) {
; SSE-LABEL: combine_vec_mul_undef0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_mul_undef0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: retq
%1 = mul <4 x i32> undef, %x
ret <4 x i32> %1
@@ -18,11 +18,11 @@ define <4 x i32> @combine_vec_mul_undef0(<4 x i32> %x) {
; fold (mul x, undef) -> 0
define <4 x i32> @combine_vec_mul_undef1(<4 x i32> %x) {
; SSE-LABEL: combine_vec_mul_undef1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_mul_undef1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: retq
%1 = mul <4 x i32> %x, undef
ret <4 x i32> %1
@@ -31,12 +31,12 @@ define <4 x i32> @combine_vec_mul_undef1(<4 x i32> %x) {
; fold (mul x, 0) -> 0
define <4 x i32> @combine_vec_mul_zero(<4 x i32> %x) {
; SSE-LABEL: combine_vec_mul_zero:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_mul_zero:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = mul <4 x i32> %x, zeroinitializer
@@ -46,11 +46,11 @@ define <4 x i32> @combine_vec_mul_zero(<4 x i32> %x) {
; fold (mul x, 1) -> x
define <4 x i32> @combine_vec_mul_one(<4 x i32> %x) {
; SSE-LABEL: combine_vec_mul_one:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_mul_one:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: retq
%1 = mul <4 x i32> %x, <i32 1, i32 1, i32 1, i32 1>
ret <4 x i32> %1
@@ -59,14 +59,14 @@ define <4 x i32> @combine_vec_mul_one(<4 x i32> %x) {
; fold (mul x, -1) -> 0-x
define <4 x i32> @combine_vec_mul_negone(<4 x i32> %x) {
; SSE-LABEL: combine_vec_mul_negone:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: psubd %xmm0, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_mul_negone:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpsubd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
@@ -77,12 +77,12 @@ define <4 x i32> @combine_vec_mul_negone(<4 x i32> %x) {
; fold (mul x, (1 << c)) -> x << c
define <4 x i32> @combine_vec_mul_pow2a(<4 x i32> %x) {
; SSE-LABEL: combine_vec_mul_pow2a:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: paddd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_mul_pow2a:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpaddd %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = mul <4 x i32> %x, <i32 2, i32 2, i32 2, i32 2>
@@ -91,12 +91,12 @@ define <4 x i32> @combine_vec_mul_pow2a(<4 x i32> %x) {
define <4 x i32> @combine_vec_mul_pow2b(<4 x i32> %x) {
; SSE-LABEL: combine_vec_mul_pow2b:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmulld {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_mul_pow2b:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%1 = mul <4 x i32> %x, <i32 1, i32 2, i32 4, i32 16>
@@ -105,7 +105,7 @@ define <4 x i32> @combine_vec_mul_pow2b(<4 x i32> %x) {
define <4 x i64> @combine_vec_mul_pow2c(<4 x i64> %x) {
; SSE-LABEL: combine_vec_mul_pow2c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: psllq $1, %xmm2
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1,2,3],xmm2[4,5,6,7]
@@ -117,7 +117,7 @@ define <4 x i64> @combine_vec_mul_pow2c(<4 x i64> %x) {
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_mul_pow2c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsllvq {{.*}}(%rip), %ymm0, %ymm0
; AVX-NEXT: retq
%1 = mul <4 x i64> %x, <i64 1, i64 2, i64 4, i64 16>
@@ -127,7 +127,7 @@ define <4 x i64> @combine_vec_mul_pow2c(<4 x i64> %x) {
; fold (mul x, -(1 << c)) -> -(x << c) or (-x) << c
define <4 x i32> @combine_vec_mul_negpow2a(<4 x i32> %x) {
; SSE-LABEL: combine_vec_mul_negpow2a:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: paddd %xmm0, %xmm0
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: psubd %xmm0, %xmm1
@@ -135,7 +135,7 @@ define <4 x i32> @combine_vec_mul_negpow2a(<4 x i32> %x) {
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_mul_negpow2a:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpaddd %xmm0, %xmm0, %xmm0
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpsubd %xmm0, %xmm1, %xmm0
@@ -146,12 +146,12 @@ define <4 x i32> @combine_vec_mul_negpow2a(<4 x i32> %x) {
define <4 x i32> @combine_vec_mul_negpow2b(<4 x i32> %x) {
; SSE-LABEL: combine_vec_mul_negpow2b:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmulld {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_mul_negpow2b:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%1 = mul <4 x i32> %x, <i32 -1, i32 -2, i32 -4, i32 -16>
@@ -160,7 +160,7 @@ define <4 x i32> @combine_vec_mul_negpow2b(<4 x i32> %x) {
define <4 x i64> @combine_vec_mul_negpow2c(<4 x i64> %x) {
; SSE-LABEL: combine_vec_mul_negpow2c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [4294967295,4294967295]
; SSE-NEXT: movdqa %xmm0, %xmm3
; SSE-NEXT: pmuludq %xmm2, %xmm3
@@ -184,7 +184,7 @@ define <4 x i64> @combine_vec_mul_negpow2c(<4 x i64> %x) {
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_mul_negpow2c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpbroadcastq {{.*#+}} ymm1 = [4294967295,4294967295,4294967295,4294967295]
; AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm1
; AVX-NEXT: vpsrlq $32, %ymm0, %ymm2
@@ -202,12 +202,12 @@ define <4 x i64> @combine_vec_mul_negpow2c(<4 x i64> %x) {
; (mul (shl X, c1), c2) -> (mul X, c2 << c1)
define <4 x i32> @combine_vec_mul_shl_const(<4 x i32> %x) {
; SSE-LABEL: combine_vec_mul_shl_const:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmulld {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_mul_shl_const:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%1 = shl <4 x i32> %x, <i32 1, i32 2, i32 8, i32 16>
@@ -218,13 +218,13 @@ define <4 x i32> @combine_vec_mul_shl_const(<4 x i32> %x) {
; (mul (shl X, C), Y) -> (shl (mul X, Y), C) when the shift has one use.
define <4 x i32> @combine_vec_mul_shl_oneuse0(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: combine_vec_mul_shl_oneuse0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmulld %xmm1, %xmm0
; SSE-NEXT: pmulld {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_mul_shl_oneuse0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
@@ -235,13 +235,13 @@ define <4 x i32> @combine_vec_mul_shl_oneuse0(<4 x i32> %x, <4 x i32> %y) {
define <4 x i32> @combine_vec_mul_shl_oneuse1(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: combine_vec_mul_shl_oneuse1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmulld %xmm1, %xmm0
; SSE-NEXT: pmulld {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_mul_shl_oneuse1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
@@ -252,14 +252,14 @@ define <4 x i32> @combine_vec_mul_shl_oneuse1(<4 x i32> %x, <4 x i32> %y) {
define <4 x i32> @combine_vec_mul_shl_multiuse0(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: combine_vec_mul_shl_multiuse0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmulld {{.*}}(%rip), %xmm0
; SSE-NEXT: pmulld %xmm0, %xmm1
; SSE-NEXT: paddd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_mul_shl_multiuse0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm1
; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
@@ -272,14 +272,14 @@ define <4 x i32> @combine_vec_mul_shl_multiuse0(<4 x i32> %x, <4 x i32> %y) {
define <4 x i32> @combine_vec_mul_shl_multiuse1(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: combine_vec_mul_shl_multiuse1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmulld {{.*}}(%rip), %xmm0
; SSE-NEXT: pmulld %xmm0, %xmm1
; SSE-NEXT: paddd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_mul_shl_multiuse1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vpmulld %xmm0, %xmm1, %xmm1
; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
@@ -294,13 +294,13 @@ define <4 x i32> @combine_vec_mul_shl_multiuse1(<4 x i32> %x, <4 x i32> %y) {
define <4 x i32> @combine_vec_mul_add(<4 x i32> %x) {
; SSE-LABEL: combine_vec_mul_add:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmulld {{.*}}(%rip), %xmm0
; SSE-NEXT: paddd {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_mul_add:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
diff --git a/test/CodeGen/X86/combine-multiplies.ll b/test/CodeGen/X86/combine-multiplies.ll
index ab30b9b489e..98fc16ca226 100644
--- a/test/CodeGen/X86/combine-multiplies.ll
+++ b/test/CodeGen/X86/combine-multiplies.ll
@@ -33,7 +33,7 @@
; Function Attrs: nounwind
define void @testCombineMultiplies([100 x i32]* nocapture %a, i32 %lll) nounwind {
; CHECK-LABEL: testCombineMultiplies:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushl %esi
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
@@ -74,7 +74,7 @@ entry:
; Output looks something like this:
;
; testCombineMultiplies_splat: # @testCombineMultiplies_splat
-; # BB#0: # %entry
+; # %bb.0: # %entry
; movdqa .LCPI1_0, %xmm1 # xmm1 = [11,11,11,11]
; paddd %xmm0, %xmm1
; movdqa .LCPI1_1, %xmm2 # xmm2 = [22,22,22,22]
@@ -104,7 +104,7 @@ entry:
; Function Attrs: nounwind
define void @testCombineMultiplies_splat(<4 x i32> %v1) nounwind {
; CHECK-LABEL: testCombineMultiplies_splat:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movdqa {{.*#+}} xmm1 = [11,11,11,11]
; CHECK-NEXT: paddd %xmm0, %xmm1
; CHECK-NEXT: movdqa {{.*#+}} xmm2 = [22,22,22,22]
@@ -138,7 +138,7 @@ entry:
; Function Attrs: nounwind
define void @testCombineMultiplies_non_splat(<4 x i32> %v1) nounwind {
; CHECK-LABEL: testCombineMultiplies_non_splat:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movdqa {{.*#+}} xmm1 = [11,22,33,44]
; CHECK-NEXT: paddd %xmm0, %xmm1
; CHECK-NEXT: movdqa {{.*#+}} xmm2 = [22,33,44,55]
diff --git a/test/CodeGen/X86/combine-or.ll b/test/CodeGen/X86/combine-or.ll
index b99c05288b3..1601c67dce2 100644
--- a/test/CodeGen/X86/combine-or.ll
+++ b/test/CodeGen/X86/combine-or.ll
@@ -3,7 +3,7 @@
define i32 @or_self(i32 %x) {
; CHECK-LABEL: or_self:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
%or = or i32 %x, %x
@@ -12,7 +12,7 @@ define i32 @or_self(i32 %x) {
define <4 x i32> @or_self_vec(<4 x i32> %x) {
; CHECK-LABEL: or_self_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%or = or <4 x i32> %x, %x
ret <4 x i32> %or
@@ -23,7 +23,7 @@ define <4 x i32> @or_self_vec(<4 x i32> %x) {
define <2 x i64> @test1(<2 x i64> %a, <2 x i64> %b) {
; CHECK-LABEL: test1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; CHECK-NEXT: retq
%shuf1 = shufflevector <2 x i64> %a, <2 x i64> zeroinitializer, <2 x i32><i32 0, i32 2>
@@ -35,7 +35,7 @@ define <2 x i64> @test1(<2 x i64> %a, <2 x i64> %b) {
define <4 x i32> @test2(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; CHECK-NEXT: retq
%shuf1 = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32><i32 4, i32 4, i32 2, i32 3>
@@ -47,7 +47,7 @@ define <4 x i32> @test2(<4 x i32> %a, <4 x i32> %b) {
define <2 x i64> @test3(<2 x i64> %a, <2 x i64> %b) {
; CHECK-LABEL: test3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; CHECK-NEXT: retq
%shuf1 = shufflevector <2 x i64> %a, <2 x i64> zeroinitializer, <2 x i32><i32 2, i32 1>
@@ -59,7 +59,7 @@ define <2 x i64> @test3(<2 x i64> %a, <2 x i64> %b) {
define <4 x i32> @test4(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
; CHECK-NEXT: retq
%shuf1 = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32><i32 0, i32 4, i32 4, i32 4>
@@ -71,7 +71,7 @@ define <4 x i32> @test4(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @test5(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
; CHECK-NEXT: retq
%shuf1 = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32><i32 4, i32 1, i32 2, i32 3>
@@ -83,7 +83,7 @@ define <4 x i32> @test5(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @test6(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; CHECK-NEXT: retq
%shuf1 = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32><i32 0, i32 1, i32 4, i32 4>
@@ -95,7 +95,7 @@ define <4 x i32> @test6(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @test7(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; CHECK-NEXT: retq
%and1 = and <4 x i32> %a, <i32 -1, i32 -1, i32 0, i32 0>
@@ -107,7 +107,7 @@ define <4 x i32> @test7(<4 x i32> %a, <4 x i32> %b) {
define <2 x i64> @test8(<2 x i64> %a, <2 x i64> %b) {
; CHECK-LABEL: test8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; CHECK-NEXT: retq
%and1 = and <2 x i64> %a, <i64 -1, i64 0>
@@ -119,7 +119,7 @@ define <2 x i64> @test8(<2 x i64> %a, <2 x i64> %b) {
define <4 x i32> @test9(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test9:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; CHECK-NEXT: retq
%and1 = and <4 x i32> %a, <i32 0, i32 0, i32 -1, i32 -1>
@@ -131,7 +131,7 @@ define <4 x i32> @test9(<4 x i32> %a, <4 x i32> %b) {
define <2 x i64> @test10(<2 x i64> %a, <2 x i64> %b) {
; CHECK-LABEL: test10:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; CHECK-NEXT: retq
%and1 = and <2 x i64> %a, <i64 0, i64 -1>
@@ -143,7 +143,7 @@ define <2 x i64> @test10(<2 x i64> %a, <2 x i64> %b) {
define <4 x i32> @test11(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test11:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
; CHECK-NEXT: retq
%and1 = and <4 x i32> %a, <i32 -1, i32 0, i32 0, i32 0>
@@ -155,7 +155,7 @@ define <4 x i32> @test11(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @test12(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test12:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
; CHECK-NEXT: retq
%and1 = and <4 x i32> %a, <i32 0, i32 -1, i32 -1, i32 -1>
@@ -169,7 +169,7 @@ define <4 x i32> @test12(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @test13(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test13:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[2,3]
; CHECK-NEXT: retq
%shuf1 = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32><i32 1, i32 1, i32 4, i32 4>
@@ -181,7 +181,7 @@ define <4 x i32> @test13(<4 x i32> %a, <4 x i32> %b) {
define <2 x i64> @test14(<2 x i64> %a, <2 x i64> %b) {
; CHECK-LABEL: test14:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; CHECK-NEXT: retq
%shuf1 = shufflevector <2 x i64> %a, <2 x i64> zeroinitializer, <2 x i32><i32 0, i32 2>
@@ -193,7 +193,7 @@ define <2 x i64> @test14(<2 x i64> %a, <2 x i64> %b) {
define <4 x i32> @test15(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test15:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,1],xmm0[2,1]
; CHECK-NEXT: movaps %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -206,7 +206,7 @@ define <4 x i32> @test15(<4 x i32> %a, <4 x i32> %b) {
define <2 x i64> @test16(<2 x i64> %a, <2 x i64> %b) {
; CHECK-LABEL: test16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; CHECK-NEXT: movaps %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -222,7 +222,7 @@ define <2 x i64> @test16(<2 x i64> %a, <2 x i64> %b) {
define <4 x i32> @test17(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test17:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: psllq $32, %xmm0
; CHECK-NEXT: movq {{.*#+}} xmm1 = xmm1[0],zero
; CHECK-NEXT: por %xmm1, %xmm0
@@ -236,7 +236,7 @@ define <4 x i32> @test17(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @test18(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test18:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pxor %xmm2, %xmm2
; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3,4,5,6,7]
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,0,1,1]
@@ -252,7 +252,7 @@ define <4 x i32> @test18(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @test19(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test19:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,0,2,3]
; CHECK-NEXT: pxor %xmm3, %xmm3
; CHECK-NEXT: pblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
@@ -269,7 +269,7 @@ define <4 x i32> @test19(<4 x i32> %a, <4 x i32> %b) {
define <2 x i64> @test20(<2 x i64> %a, <2 x i64> %b) {
; CHECK-LABEL: test20:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: por %xmm1, %xmm0
; CHECK-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; CHECK-NEXT: retq
@@ -282,7 +282,7 @@ define <2 x i64> @test20(<2 x i64> %a, <2 x i64> %b) {
define <2 x i64> @test21(<2 x i64> %a, <2 x i64> %b) {
; CHECK-LABEL: test21:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: por %xmm1, %xmm0
; CHECK-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
; CHECK-NEXT: retq
@@ -298,7 +298,7 @@ define <2 x i64> @test21(<2 x i64> %a, <2 x i64> %b) {
define <2 x double> @test22(<2 x double> %a0, <2 x double> %a1) {
; CHECK-LABEL: test22:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; CHECK-NEXT: retq
%bc1 = bitcast <2 x double> %a0 to <2 x i64>
@@ -313,7 +313,7 @@ define <2 x double> @test22(<2 x double> %a0, <2 x double> %a1) {
define <4 x float> @test23(<4 x float> %a0, <4 x float> %a1) {
; CHECK-LABEL: test23:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3]
; CHECK-NEXT: retq
%bc1 = bitcast <4 x float> %a0 to <4 x i32>
@@ -328,7 +328,7 @@ define <4 x float> @test23(<4 x float> %a0, <4 x float> %a1) {
define <4 x float> @test24(<4 x float> %a0, <4 x float> %a1) {
; CHECK-LABEL: test24:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; CHECK-NEXT: retq
%bc1 = bitcast <4 x float> %a0 to <2 x i64>
@@ -343,7 +343,7 @@ define <4 x float> @test24(<4 x float> %a0, <4 x float> %a1) {
define <4 x float> @test25(<4 x float> %a0) {
; CHECK-LABEL: test25:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blendps {{.*#+}} xmm0 = mem[0],xmm0[1,2],mem[3]
; CHECK-NEXT: retq
%bc1 = bitcast <4 x float> %a0 to <4 x i32>
@@ -361,7 +361,7 @@ define <4 x float> @test25(<4 x float> %a0) {
; handle legal vector value types.
define <4 x i8> @test_crash(<4 x i8> %a, <4 x i8> %b) {
; CHECK-LABEL: test_crash:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; CHECK-NEXT: retq
%shuf1 = shufflevector <4 x i8> %a, <4 x i8> zeroinitializer, <4 x i32><i32 4, i32 4, i32 2, i32 3>
@@ -374,7 +374,7 @@ define <4 x i8> @test_crash(<4 x i8> %a, <4 x i8> %b) {
define <4 x i32> @test2b(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test2b:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; CHECK-NEXT: retq
%shuf1 = shufflevector <4 x i32> zeroinitializer, <4 x i32> %a, <4 x i32><i32 0, i32 0, i32 6, i32 7>
@@ -385,7 +385,7 @@ define <4 x i32> @test2b(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @test2c(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test2c:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; CHECK-NEXT: retq
%shuf1 = shufflevector <4 x i32> zeroinitializer, <4 x i32> %a, <4 x i32><i32 0, i32 0, i32 6, i32 7>
@@ -397,7 +397,7 @@ define <4 x i32> @test2c(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @test2d(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test2d:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; CHECK-NEXT: retq
%shuf1 = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32><i32 4, i32 4, i32 2, i32 3>
@@ -410,7 +410,7 @@ define <4 x i32> @test2d(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @test2e(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test2e:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; CHECK-NEXT: retq
%shuf1 = shufflevector <4 x i32> %a, <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>, <4 x i32><i32 undef, i32 4, i32 2, i32 3>
@@ -421,7 +421,7 @@ define <4 x i32> @test2e(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @test2f(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test2f:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; CHECK-NEXT: retq
%shuf1 = shufflevector <4 x i32> %a, <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>, <4 x i32><i32 4, i32 4, i32 2, i32 3>
@@ -435,7 +435,7 @@ define <4 x i32> @test2f(<4 x i32> %a, <4 x i32> %b) {
define <2 x i64> @or_and_v2i64(<2 x i64> %a0) {
; CHECK-LABEL: or_and_v2i64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: andps {{.*}}(%rip), %xmm0
; CHECK-NEXT: orps {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
@@ -448,7 +448,7 @@ define <2 x i64> @or_and_v2i64(<2 x i64> %a0) {
define <4 x i32> @or_and_v4i32(<4 x i32> %a0) {
; CHECK-LABEL: or_and_v4i32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movaps {{.*#+}} xmm0 = [3,3,3,3]
; CHECK-NEXT: retq
%1 = and <4 x i32> %a0, <i32 1, i32 1, i32 1, i32 1>
@@ -460,7 +460,7 @@ define <4 x i32> @or_and_v4i32(<4 x i32> %a0) {
define <2 x i64> @or_zext_v2i32(<2 x i32> %a0) {
; CHECK-LABEL: or_zext_v2i32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movaps {{.*#+}} xmm0 = [4294967295,4294967295]
; CHECK-NEXT: retq
%1 = zext <2 x i32> %a0 to <2 x i64>
@@ -470,7 +470,7 @@ define <2 x i64> @or_zext_v2i32(<2 x i32> %a0) {
define <4 x i32> @or_zext_v4i16(<4 x i16> %a0) {
; CHECK-LABEL: or_zext_v4i16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movaps {{.*#+}} xmm0 = [65535,65535,65535,65535]
; CHECK-NEXT: retq
%1 = zext <4 x i16> %a0 to <4 x i32>
diff --git a/test/CodeGen/X86/combine-pmuldq.ll b/test/CodeGen/X86/combine-pmuldq.ll
index 9c193a2a199..53ab87a386b 100644
--- a/test/CodeGen/X86/combine-pmuldq.ll
+++ b/test/CodeGen/X86/combine-pmuldq.ll
@@ -5,7 +5,7 @@
; TODO - shuffle+sext are superfluous
define <2 x i64> @combine_shuffle_sext_pmuldq(<4 x i32> %a0, <4 x i32> %a1) {
; SSE-LABEL: combine_shuffle_sext_pmuldq:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE-NEXT: pmovsxdq %xmm0, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
@@ -14,7 +14,7 @@ define <2 x i64> @combine_shuffle_sext_pmuldq(<4 x i32> %a0, <4 x i32> %a1) {
; SSE-NEXT: retq
;
; AVX-LABEL: combine_shuffle_sext_pmuldq:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; AVX-NEXT: vpmovsxdq %xmm0, %xmm0
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -32,7 +32,7 @@ define <2 x i64> @combine_shuffle_sext_pmuldq(<4 x i32> %a0, <4 x i32> %a1) {
; TODO - shuffle+zext are superfluous
define <2 x i64> @combine_shuffle_zext_pmuludq(<4 x i32> %a0, <4 x i32> %a1) {
; SSE-LABEL: combine_shuffle_zext_pmuludq:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
@@ -41,7 +41,7 @@ define <2 x i64> @combine_shuffle_zext_pmuludq(<4 x i32> %a0, <4 x i32> %a1) {
; SSE-NEXT: retq
;
; AVX-LABEL: combine_shuffle_zext_pmuludq:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; AVX-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -59,7 +59,7 @@ define <2 x i64> @combine_shuffle_zext_pmuludq(<4 x i32> %a0, <4 x i32> %a1) {
; TODO - blends are superfluous
define <2 x i64> @combine_shuffle_zero_pmuludq(<4 x i32> %a0, <4 x i32> %a1) {
; SSE-LABEL: combine_shuffle_zero_pmuludq:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm2, %xmm2
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
@@ -67,7 +67,7 @@ define <2 x i64> @combine_shuffle_zero_pmuludq(<4 x i32> %a0, <4 x i32> %a1) {
; SSE-NEXT: retq
;
; AVX-LABEL: combine_shuffle_zero_pmuludq:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; AVX-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
@@ -84,7 +84,7 @@ define <2 x i64> @combine_shuffle_zero_pmuludq(<4 x i32> %a0, <4 x i32> %a1) {
; TODO - blends are superfluous
define <4 x i64> @combine_shuffle_zero_pmuludq_256(<8 x i32> %a0, <8 x i32> %a1) {
; SSE-LABEL: combine_shuffle_zero_pmuludq_256:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm4, %xmm4
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm4[2,3],xmm1[4,5],xmm4[6,7]
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5],xmm4[6,7]
@@ -95,7 +95,7 @@ define <4 x i64> @combine_shuffle_zero_pmuludq_256(<8 x i32> %a0, <8 x i32> %a1)
; SSE-NEXT: retq
;
; AVX-LABEL: combine_shuffle_zero_pmuludq_256:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7]
; AVX-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7]
diff --git a/test/CodeGen/X86/combine-rotates.ll b/test/CodeGen/X86/combine-rotates.ll
index 0d74c937af3..e75973af05e 100644
--- a/test/CodeGen/X86/combine-rotates.ll
+++ b/test/CodeGen/X86/combine-rotates.ll
@@ -5,12 +5,12 @@
; fold (rot (rot x, c1), c2) -> rot x, c1+c2
define <4 x i32> @combine_vec_rot_rot(<4 x i32> %x) {
; XOP-LABEL: combine_vec_rot_rot:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vprotd {{.*}}(%rip), %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512-LABEL: combine_vec_rot_rot:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vprolvd {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: retq
%1 = lshr <4 x i32> %x, <i32 1, i32 2, i32 3, i32 4>
@@ -24,12 +24,12 @@ define <4 x i32> @combine_vec_rot_rot(<4 x i32> %x) {
define <4 x i32> @combine_vec_rot_rot_splat(<4 x i32> %x) {
; XOP-LABEL: combine_vec_rot_rot_splat:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vprotd $7, %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512-LABEL: combine_vec_rot_rot_splat:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vprold $7, %xmm0, %xmm0
; AVX512-NEXT: retq
%1 = lshr <4 x i32> %x, <i32 3, i32 3, i32 3, i32 3>
@@ -43,11 +43,11 @@ define <4 x i32> @combine_vec_rot_rot_splat(<4 x i32> %x) {
define <4 x i32> @combine_vec_rot_rot_splat_zero(<4 x i32> %x) {
; XOP-LABEL: combine_vec_rot_rot_splat_zero:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: retq
;
; AVX512-LABEL: combine_vec_rot_rot_splat_zero:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: retq
%1 = lshr <4 x i32> %x, <i32 1, i32 1, i32 1, i32 1>
%2 = shl <4 x i32> %x, <i32 31, i32 31, i32 31, i32 31>
diff --git a/test/CodeGen/X86/combine-sdiv.ll b/test/CodeGen/X86/combine-sdiv.ll
index ddb1786e37d..8fb30a2594b 100644
--- a/test/CodeGen/X86/combine-sdiv.ll
+++ b/test/CodeGen/X86/combine-sdiv.ll
@@ -6,11 +6,11 @@
; fold (sdiv undef, x) -> 0
define <4 x i32> @combine_vec_sdiv_undef0(<4 x i32> %x) {
; SSE-LABEL: combine_vec_sdiv_undef0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_sdiv_undef0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: retq
%1 = sdiv <4 x i32> undef, %x
ret <4 x i32> %1
@@ -19,11 +19,11 @@ define <4 x i32> @combine_vec_sdiv_undef0(<4 x i32> %x) {
; fold (sdiv x, undef) -> undef
define <4 x i32> @combine_vec_sdiv_undef1(<4 x i32> %x) {
; SSE-LABEL: combine_vec_sdiv_undef1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_sdiv_undef1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: retq
%1 = sdiv <4 x i32> %x, undef
ret <4 x i32> %1
@@ -32,11 +32,11 @@ define <4 x i32> @combine_vec_sdiv_undef1(<4 x i32> %x) {
; fold (sdiv x, 1) -> x
define <4 x i32> @combine_vec_sdiv_by_one(<4 x i32> %x) {
; SSE-LABEL: combine_vec_sdiv_by_one:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_sdiv_by_one:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: retq
%1 = sdiv <4 x i32> %x, <i32 1, i32 1, i32 1, i32 1>
ret <4 x i32> %1
@@ -45,14 +45,14 @@ define <4 x i32> @combine_vec_sdiv_by_one(<4 x i32> %x) {
; fold (sdiv x, -1) -> 0 - x
define <4 x i32> @combine_vec_sdiv_by_negone(<4 x i32> %x) {
; SSE-LABEL: combine_vec_sdiv_by_negone:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: psubd %xmm0, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_sdiv_by_negone:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpsubd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
@@ -63,13 +63,13 @@ define <4 x i32> @combine_vec_sdiv_by_negone(<4 x i32> %x) {
; fold (sdiv x, y) -> (udiv x, y) iff x and y are positive
define <4 x i32> @combine_vec_sdiv_by_pos0(<4 x i32> %x) {
; SSE-LABEL: combine_vec_sdiv_by_pos0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
; SSE-NEXT: psrld $2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_sdiv_by_pos0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vpsrld $2, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -80,7 +80,7 @@ define <4 x i32> @combine_vec_sdiv_by_pos0(<4 x i32> %x) {
define <4 x i32> @combine_vec_sdiv_by_pos1(<4 x i32> %x) {
; SSE-LABEL: combine_vec_sdiv_by_pos1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: movdqa %xmm0, %xmm1
@@ -94,7 +94,7 @@ define <4 x i32> @combine_vec_sdiv_by_pos1(<4 x i32> %x) {
; SSE-NEXT: retq
;
; AVX1-LABEL: combine_vec_sdiv_by_pos1:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vpsrld $4, %xmm0, %xmm1
; AVX1-NEXT: vpsrld $2, %xmm0, %xmm2
@@ -105,7 +105,7 @@ define <4 x i32> @combine_vec_sdiv_by_pos1(<4 x i32> %x) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_vec_sdiv_by_pos1:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: retq
@@ -117,7 +117,7 @@ define <4 x i32> @combine_vec_sdiv_by_pos1(<4 x i32> %x) {
; fold (sdiv x, (1 << c)) -> x >>u c
define <4 x i32> @combine_vec_sdiv_by_pow2a(<4 x i32> %x) {
; SSE-LABEL: combine_vec_sdiv_by_pow2a:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psrad $31, %xmm1
; SSE-NEXT: psrld $30, %xmm1
@@ -127,7 +127,7 @@ define <4 x i32> @combine_vec_sdiv_by_pow2a(<4 x i32> %x) {
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_sdiv_by_pow2a:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrad $31, %xmm0, %xmm1
; AVX-NEXT: vpsrld $30, %xmm1, %xmm1
; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
@@ -139,7 +139,7 @@ define <4 x i32> @combine_vec_sdiv_by_pow2a(<4 x i32> %x) {
define <4 x i32> @combine_vec_sdiv_by_pow2b(<4 x i32> %x) {
; SSE-LABEL: combine_vec_sdiv_by_pow2b:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pextrd $1, %xmm0, %eax
; SSE-NEXT: movl %eax, %ecx
; SSE-NEXT: sarl $31, %ecx
@@ -164,7 +164,7 @@ define <4 x i32> @combine_vec_sdiv_by_pow2b(<4 x i32> %x) {
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_sdiv_by_pow2b:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpextrd $1, %xmm0, %eax
; AVX-NEXT: movl %eax, %ecx
; AVX-NEXT: sarl $31, %ecx
diff --git a/test/CodeGen/X86/combine-sext-in-reg.ll b/test/CodeGen/X86/combine-sext-in-reg.ll
index 3e60f3bf95e..686945a7bcd 100644
--- a/test/CodeGen/X86/combine-sext-in-reg.ll
+++ b/test/CodeGen/X86/combine-sext-in-reg.ll
@@ -5,7 +5,7 @@
; fold sextinreg(zext) -> sext
define <4 x i64> @sextinreg_zext_v16i8_4i64(<16 x i8> %a0) {
; SSE-LABEL: sextinreg_zext_v16i8_4i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmovsxbq %xmm0, %xmm2
; SSE-NEXT: psrld $16, %xmm0
; SSE-NEXT: pmovsxbq %xmm0, %xmm1
@@ -13,7 +13,7 @@ define <4 x i64> @sextinreg_zext_v16i8_4i64(<16 x i8> %a0) {
; SSE-NEXT: retq
;
; AVX-LABEL: sextinreg_zext_v16i8_4i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovsxbq %xmm0, %ymm0
; AVX-NEXT: retq
%1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -26,7 +26,7 @@ define <4 x i64> @sextinreg_zext_v16i8_4i64(<16 x i8> %a0) {
; fold sextinreg(zext(sext)) -> sext
define <4 x i64> @sextinreg_zext_sext_v16i8_4i64(<16 x i8> %a0) {
; SSE-LABEL: sextinreg_zext_sext_v16i8_4i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmovsxbq %xmm0, %xmm2
; SSE-NEXT: psrld $16, %xmm0
; SSE-NEXT: pmovsxbq %xmm0, %xmm1
@@ -34,7 +34,7 @@ define <4 x i64> @sextinreg_zext_sext_v16i8_4i64(<16 x i8> %a0) {
; SSE-NEXT: retq
;
; AVX-LABEL: sextinreg_zext_sext_v16i8_4i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovsxbq %xmm0, %ymm0
; AVX-NEXT: retq
%1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
diff --git a/test/CodeGen/X86/combine-shl.ll b/test/CodeGen/X86/combine-shl.ll
index 0d130dc0ee8..6effd2ad044 100644
--- a/test/CodeGen/X86/combine-shl.ll
+++ b/test/CodeGen/X86/combine-shl.ll
@@ -5,12 +5,12 @@
; fold (shl 0, x) -> 0
define <4 x i32> @combine_vec_shl_zero(<4 x i32> %x) {
; SSE-LABEL: combine_vec_shl_zero:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_shl_zero:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = shl <4 x i32> zeroinitializer, %x
@@ -20,11 +20,11 @@ define <4 x i32> @combine_vec_shl_zero(<4 x i32> %x) {
; fold (shl x, c >= size(x)) -> undef
define <4 x i32> @combine_vec_shl_outofrange0(<4 x i32> %x) {
; SSE-LABEL: combine_vec_shl_outofrange0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_shl_outofrange0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: retq
%1 = shl <4 x i32> %x, <i32 33, i32 33, i32 33, i32 33>
ret <4 x i32> %1
@@ -32,11 +32,11 @@ define <4 x i32> @combine_vec_shl_outofrange0(<4 x i32> %x) {
define <4 x i32> @combine_vec_shl_outofrange1(<4 x i32> %x) {
; SSE-LABEL: combine_vec_shl_outofrange1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_shl_outofrange1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: retq
%1 = shl <4 x i32> %x, <i32 33, i32 34, i32 35, i32 36>
ret <4 x i32> %1
@@ -44,11 +44,11 @@ define <4 x i32> @combine_vec_shl_outofrange1(<4 x i32> %x) {
define <4 x i32> @combine_vec_shl_outofrange2(<4 x i32> %a0) {
; SSE-LABEL: combine_vec_shl_outofrange2:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_shl_outofrange2:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: retq
%1 = and <4 x i32> %a0, <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
%2 = shl <4 x i32> %1, <i32 33, i32 33, i32 33, i32 33>
@@ -58,11 +58,11 @@ define <4 x i32> @combine_vec_shl_outofrange2(<4 x i32> %a0) {
; fold (shl x, 0) -> x
define <4 x i32> @combine_vec_shl_by_zero(<4 x i32> %x) {
; SSE-LABEL: combine_vec_shl_by_zero:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_shl_by_zero:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: retq
%1 = shl <4 x i32> %x, zeroinitializer
ret <4 x i32> %1
@@ -71,12 +71,12 @@ define <4 x i32> @combine_vec_shl_by_zero(<4 x i32> %x) {
; if (shl x, c) is known to be zero, return 0
define <4 x i32> @combine_vec_shl_known_zero0(<4 x i32> %x) {
; SSE-LABEL: combine_vec_shl_known_zero0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_shl_known_zero0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = and <4 x i32> %x, <i32 4294901760, i32 4294901760, i32 4294901760, i32 4294901760>
@@ -86,13 +86,13 @@ define <4 x i32> @combine_vec_shl_known_zero0(<4 x i32> %x) {
define <4 x i32> @combine_vec_shl_known_zero1(<4 x i32> %x) {
; SSE-LABEL: combine_vec_shl_known_zero1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
; SSE-NEXT: pmulld {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_shl_known_zero1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
@@ -104,7 +104,7 @@ define <4 x i32> @combine_vec_shl_known_zero1(<4 x i32> %x) {
; fold (shl x, (trunc (and y, c))) -> (shl x, (and (trunc y), (trunc c))).
define <4 x i32> @combine_vec_shl_trunc_and(<4 x i32> %x, <4 x i64> %y) {
; SSE-LABEL: combine_vec_shl_trunc_and:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
; SSE-NEXT: andps {{.*}}(%rip), %xmm1
; SSE-NEXT: pslld $23, %xmm1
@@ -114,7 +114,7 @@ define <4 x i32> @combine_vec_shl_trunc_and(<4 x i32> %x, <4 x i64> %y) {
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_shl_trunc_and:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
; AVX-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
@@ -130,12 +130,12 @@ define <4 x i32> @combine_vec_shl_trunc_and(<4 x i32> %x, <4 x i64> %y) {
; fold (shl (shl x, c1), c2) -> (shl x, (add c1, c2))
define <4 x i32> @combine_vec_shl_shl0(<4 x i32> %x) {
; SSE-LABEL: combine_vec_shl_shl0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pslld $6, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_shl_shl0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpslld $6, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = shl <4 x i32> %x, <i32 2, i32 2, i32 2, i32 2>
@@ -145,12 +145,12 @@ define <4 x i32> @combine_vec_shl_shl0(<4 x i32> %x) {
define <4 x i32> @combine_vec_shl_shl1(<4 x i32> %x) {
; SSE-LABEL: combine_vec_shl_shl1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmulld {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_shl_shl1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%1 = shl <4 x i32> %x, <i32 0, i32 1, i32 2, i32 3>
@@ -161,12 +161,12 @@ define <4 x i32> @combine_vec_shl_shl1(<4 x i32> %x) {
; fold (shl (shl x, c1), c2) -> 0
define <4 x i32> @combine_vec_shl_shlr_zero0(<4 x i32> %x) {
; SSE-LABEL: combine_vec_shl_shlr_zero0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_shl_shlr_zero0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = shl <4 x i32> %x, <i32 16, i32 16, i32 16, i32 16>
@@ -176,12 +176,12 @@ define <4 x i32> @combine_vec_shl_shlr_zero0(<4 x i32> %x) {
define <4 x i32> @combine_vec_shl_shl_zero1(<4 x i32> %x) {
; SSE-LABEL: combine_vec_shl_shl_zero1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_shl_shl_zero1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = shl <4 x i32> %x, <i32 17, i32 18, i32 19, i32 20>
@@ -192,7 +192,7 @@ define <4 x i32> @combine_vec_shl_shl_zero1(<4 x i32> %x) {
; fold (shl (ext (shl x, c1)), c2) -> (ext (shl x, (add c1, c2)))
define <8 x i32> @combine_vec_shl_ext_shl0(<8 x i16> %x) {
; SSE-LABEL: combine_vec_shl_ext_shl0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
; SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
@@ -201,7 +201,7 @@ define <8 x i32> @combine_vec_shl_ext_shl0(<8 x i16> %x) {
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_shl_ext_shl0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX-NEXT: vpslld $20, %ymm0, %ymm0
; AVX-NEXT: retq
@@ -213,7 +213,7 @@ define <8 x i32> @combine_vec_shl_ext_shl0(<8 x i16> %x) {
define <8 x i32> @combine_vec_shl_ext_shl1(<8 x i16> %x) {
; SSE-LABEL: combine_vec_shl_ext_shl1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmullw {{.*}}(%rip), %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; SSE-NEXT: pmovsxwd %xmm1, %xmm1
@@ -223,7 +223,7 @@ define <8 x i32> @combine_vec_shl_ext_shl1(<8 x i16> %x) {
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_shl_ext_shl1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX-NEXT: vpsllvd {{.*}}(%rip), %ymm0, %ymm0
@@ -237,7 +237,7 @@ define <8 x i32> @combine_vec_shl_ext_shl1(<8 x i16> %x) {
; fold (shl (zext (srl x, C)), C) -> (zext (shl (srl x, C), C))
define <8 x i32> @combine_vec_shl_zext_lshr0(<8 x i16> %x) {
; SSE-LABEL: combine_vec_shl_zext_lshr0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
; SSE-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -246,7 +246,7 @@ define <8 x i32> @combine_vec_shl_zext_lshr0(<8 x i16> %x) {
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_shl_zext_lshr0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX-NEXT: retq
@@ -258,7 +258,7 @@ define <8 x i32> @combine_vec_shl_zext_lshr0(<8 x i16> %x) {
define <8 x i32> @combine_vec_shl_zext_lshr1(<8 x i16> %x) {
; SSE-LABEL: combine_vec_shl_zext_lshr1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psrlw $8, %xmm1
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,5,6],xmm1[7]
@@ -279,7 +279,7 @@ define <8 x i32> @combine_vec_shl_zext_lshr1(<8 x i16> %x) {
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_shl_zext_lshr1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [1,2,3,4,5,6,7,8]
; AVX-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
@@ -297,12 +297,12 @@ define <8 x i32> @combine_vec_shl_zext_lshr1(<8 x i16> %x) {
; fold (shl (sr[la] exact X, C1), C2) -> (shl X, (C2-C1)) if C1 <= C2
define <4 x i32> @combine_vec_shl_ge_ashr_extact0(<4 x i32> %x) {
; SSE-LABEL: combine_vec_shl_ge_ashr_extact0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pslld $2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_shl_ge_ashr_extact0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpslld $2, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = ashr exact <4 x i32> %x, <i32 3, i32 3, i32 3, i32 3>
@@ -312,7 +312,7 @@ define <4 x i32> @combine_vec_shl_ge_ashr_extact0(<4 x i32> %x) {
define <4 x i32> @combine_vec_shl_ge_ashr_extact1(<4 x i32> %x) {
; SSE-LABEL: combine_vec_shl_ge_ashr_extact1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psrad $8, %xmm1
; SSE-NEXT: movdqa %xmm0, %xmm2
@@ -327,7 +327,7 @@ define <4 x i32> @combine_vec_shl_ge_ashr_extact1(<4 x i32> %x) {
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_shl_ge_ashr_extact1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
@@ -339,12 +339,12 @@ define <4 x i32> @combine_vec_shl_ge_ashr_extact1(<4 x i32> %x) {
; fold (shl (sr[la] exact X, C1), C2) -> (sr[la] X, (C2-C1)) if C1 > C2
define <4 x i32> @combine_vec_shl_lt_ashr_extact0(<4 x i32> %x) {
; SSE-LABEL: combine_vec_shl_lt_ashr_extact0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrad $2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_shl_lt_ashr_extact0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrad $2, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = ashr exact <4 x i32> %x, <i32 5, i32 5, i32 5, i32 5>
@@ -354,7 +354,7 @@ define <4 x i32> @combine_vec_shl_lt_ashr_extact0(<4 x i32> %x) {
define <4 x i32> @combine_vec_shl_lt_ashr_extact1(<4 x i32> %x) {
; SSE-LABEL: combine_vec_shl_lt_ashr_extact1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psrad $8, %xmm1
; SSE-NEXT: movdqa %xmm0, %xmm2
@@ -369,7 +369,7 @@ define <4 x i32> @combine_vec_shl_lt_ashr_extact1(<4 x i32> %x) {
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_shl_lt_ashr_extact1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
@@ -381,13 +381,13 @@ define <4 x i32> @combine_vec_shl_lt_ashr_extact1(<4 x i32> %x) {
; fold (shl (srl x, c1), c2) -> (and (shl x, (sub c2, c1), MASK) if C2 > C1
define <4 x i32> @combine_vec_shl_gt_lshr0(<4 x i32> %x) {
; SSE-LABEL: combine_vec_shl_gt_lshr0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pslld $2, %xmm0
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_shl_gt_lshr0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294967264,4294967264,4294967264,4294967264]
; AVX-NEXT: vpslld $2, %xmm0, %xmm0
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -399,7 +399,7 @@ define <4 x i32> @combine_vec_shl_gt_lshr0(<4 x i32> %x) {
define <4 x i32> @combine_vec_shl_gt_lshr1(<4 x i32> %x) {
; SSE-LABEL: combine_vec_shl_gt_lshr1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psrld $8, %xmm1
; SSE-NEXT: movdqa %xmm0, %xmm2
@@ -414,7 +414,7 @@ define <4 x i32> @combine_vec_shl_gt_lshr1(<4 x i32> %x) {
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_shl_gt_lshr1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
@@ -426,13 +426,13 @@ define <4 x i32> @combine_vec_shl_gt_lshr1(<4 x i32> %x) {
; fold (shl (srl x, c1), c2) -> (and (srl x, (sub c1, c2), MASK) if C1 >= C2
define <4 x i32> @combine_vec_shl_le_lshr0(<4 x i32> %x) {
; SSE-LABEL: combine_vec_shl_le_lshr0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrld $2, %xmm0
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_shl_le_lshr0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1073741816,1073741816,1073741816,1073741816]
; AVX-NEXT: vpsrld $2, %xmm0, %xmm0
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -444,7 +444,7 @@ define <4 x i32> @combine_vec_shl_le_lshr0(<4 x i32> %x) {
define <4 x i32> @combine_vec_shl_le_lshr1(<4 x i32> %x) {
; SSE-LABEL: combine_vec_shl_le_lshr1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psrld $8, %xmm1
; SSE-NEXT: movdqa %xmm0, %xmm2
@@ -459,7 +459,7 @@ define <4 x i32> @combine_vec_shl_le_lshr1(<4 x i32> %x) {
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_shl_le_lshr1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
@@ -471,12 +471,12 @@ define <4 x i32> @combine_vec_shl_le_lshr1(<4 x i32> %x) {
; fold (shl (sra x, c1), c1) -> (and x, (shl -1, c1))
define <4 x i32> @combine_vec_shl_ashr0(<4 x i32> %x) {
; SSE-LABEL: combine_vec_shl_ashr0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_shl_ashr0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [4294967264,4294967264,4294967264,4294967264]
; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -487,12 +487,12 @@ define <4 x i32> @combine_vec_shl_ashr0(<4 x i32> %x) {
define <4 x i32> @combine_vec_shl_ashr1(<4 x i32> %x) {
; SSE-LABEL: combine_vec_shl_ashr1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_shl_ashr1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%1 = ashr <4 x i32> %x, <i32 5, i32 6, i32 7, i32 8>
@@ -503,13 +503,13 @@ define <4 x i32> @combine_vec_shl_ashr1(<4 x i32> %x) {
; fold (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
define <4 x i32> @combine_vec_shl_add0(<4 x i32> %x) {
; SSE-LABEL: combine_vec_shl_add0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pslld $2, %xmm0
; SSE-NEXT: paddd {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_shl_add0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpslld $2, %xmm0, %xmm0
; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [20,20,20,20]
; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
@@ -521,13 +521,13 @@ define <4 x i32> @combine_vec_shl_add0(<4 x i32> %x) {
define <4 x i32> @combine_vec_shl_add1(<4 x i32> %x) {
; SSE-LABEL: combine_vec_shl_add1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmulld {{.*}}(%rip), %xmm0
; SSE-NEXT: paddd {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_shl_add1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
@@ -539,13 +539,13 @@ define <4 x i32> @combine_vec_shl_add1(<4 x i32> %x) {
; fold (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
define <4 x i32> @combine_vec_shl_or0(<4 x i32> %x) {
; SSE-LABEL: combine_vec_shl_or0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pslld $2, %xmm0
; SSE-NEXT: por {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_shl_or0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpslld $2, %xmm0, %xmm0
; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [20,20,20,20]
; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
@@ -557,13 +557,13 @@ define <4 x i32> @combine_vec_shl_or0(<4 x i32> %x) {
define <4 x i32> @combine_vec_shl_or1(<4 x i32> %x) {
; SSE-LABEL: combine_vec_shl_or1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmulld {{.*}}(%rip), %xmm0
; SSE-NEXT: por {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_shl_or1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
@@ -575,12 +575,12 @@ define <4 x i32> @combine_vec_shl_or1(<4 x i32> %x) {
; fold (shl (mul x, c1), c2) -> (mul x, c1 << c2)
define <4 x i32> @combine_vec_shl_mul0(<4 x i32> %x) {
; SSE-LABEL: combine_vec_shl_mul0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmulld {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_shl_mul0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [20,20,20,20]
; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -591,12 +591,12 @@ define <4 x i32> @combine_vec_shl_mul0(<4 x i32> %x) {
define <4 x i32> @combine_vec_shl_mul1(<4 x i32> %x) {
; SSE-LABEL: combine_vec_shl_mul1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmulld {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_shl_mul1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%1 = mul <4 x i32> %x, <i32 5, i32 6, i32 7, i32 8>
diff --git a/test/CodeGen/X86/combine-sra.ll b/test/CodeGen/X86/combine-sra.ll
index fb16faa30a9..436f48e14b0 100644
--- a/test/CodeGen/X86/combine-sra.ll
+++ b/test/CodeGen/X86/combine-sra.ll
@@ -5,12 +5,12 @@
; fold (sra 0, x) -> 0
define <4 x i32> @combine_vec_ashr_zero(<4 x i32> %x) {
; SSE-LABEL: combine_vec_ashr_zero:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_ashr_zero:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = ashr <4 x i32> zeroinitializer, %x
@@ -20,12 +20,12 @@ define <4 x i32> @combine_vec_ashr_zero(<4 x i32> %x) {
; fold (sra -1, x) -> -1
define <4 x i32> @combine_vec_ashr_allones(<4 x i32> %x) {
; SSE-LABEL: combine_vec_ashr_allones:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_ashr_allones:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = ashr <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, %x
@@ -35,11 +35,11 @@ define <4 x i32> @combine_vec_ashr_allones(<4 x i32> %x) {
; fold (sra x, c >= size(x)) -> undef
define <4 x i32> @combine_vec_ashr_outofrange0(<4 x i32> %x) {
; SSE-LABEL: combine_vec_ashr_outofrange0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_ashr_outofrange0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: retq
%1 = ashr <4 x i32> %x, <i32 33, i32 33, i32 33, i32 33>
ret <4 x i32> %1
@@ -47,11 +47,11 @@ define <4 x i32> @combine_vec_ashr_outofrange0(<4 x i32> %x) {
define <4 x i32> @combine_vec_ashr_outofrange1(<4 x i32> %x) {
; SSE-LABEL: combine_vec_ashr_outofrange1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_ashr_outofrange1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: retq
%1 = ashr <4 x i32> %x, <i32 33, i32 34, i32 35, i32 36>
ret <4 x i32> %1
@@ -60,11 +60,11 @@ define <4 x i32> @combine_vec_ashr_outofrange1(<4 x i32> %x) {
; fold (sra x, 0) -> x
define <4 x i32> @combine_vec_ashr_by_zero(<4 x i32> %x) {
; SSE-LABEL: combine_vec_ashr_by_zero:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_ashr_by_zero:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: retq
%1 = ashr <4 x i32> %x, zeroinitializer
ret <4 x i32> %1
@@ -73,12 +73,12 @@ define <4 x i32> @combine_vec_ashr_by_zero(<4 x i32> %x) {
; fold (sra (sra x, c1), c2) -> (sra x, (add c1, c2))
define <4 x i32> @combine_vec_ashr_ashr0(<4 x i32> %x) {
; SSE-LABEL: combine_vec_ashr_ashr0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrad $6, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_ashr_ashr0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrad $6, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = ashr <4 x i32> %x, <i32 2, i32 2, i32 2, i32 2>
@@ -88,7 +88,7 @@ define <4 x i32> @combine_vec_ashr_ashr0(<4 x i32> %x) {
define <4 x i32> @combine_vec_ashr_ashr1(<4 x i32> %x) {
; SSE-LABEL: combine_vec_ashr_ashr1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psrad $10, %xmm1
; SSE-NEXT: movdqa %xmm0, %xmm2
@@ -102,7 +102,7 @@ define <4 x i32> @combine_vec_ashr_ashr1(<4 x i32> %x) {
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_ashr_ashr1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%1 = ashr <4 x i32> %x, <i32 0, i32 1, i32 2, i32 3>
@@ -112,12 +112,12 @@ define <4 x i32> @combine_vec_ashr_ashr1(<4 x i32> %x) {
define <4 x i32> @combine_vec_ashr_ashr2(<4 x i32> %x) {
; SSE-LABEL: combine_vec_ashr_ashr2:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrad $31, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_ashr_ashr2:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrad $31, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = ashr <4 x i32> %x, <i32 17, i32 18, i32 19, i32 20>
@@ -127,7 +127,7 @@ define <4 x i32> @combine_vec_ashr_ashr2(<4 x i32> %x) {
define <4 x i32> @combine_vec_ashr_ashr3(<4 x i32> %x) {
; SSE-LABEL: combine_vec_ashr_ashr3:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psrad $27, %xmm1
; SSE-NEXT: movdqa %xmm0, %xmm2
@@ -146,7 +146,7 @@ define <4 x i32> @combine_vec_ashr_ashr3(<4 x i32> %x) {
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_ashr_ashr3:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
@@ -158,7 +158,7 @@ define <4 x i32> @combine_vec_ashr_ashr3(<4 x i32> %x) {
; fold (sra x, (trunc (and y, c))) -> (sra x, (and (trunc y), (trunc c))).
define <4 x i32> @combine_vec_ashr_trunc_and(<4 x i32> %x, <4 x i64> %y) {
; SSE-LABEL: combine_vec_ashr_trunc_and:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
; SSE-NEXT: andps {{.*}}(%rip), %xmm1
; SSE-NEXT: movaps %xmm1, %xmm2
@@ -181,7 +181,7 @@ define <4 x i32> @combine_vec_ashr_trunc_and(<4 x i32> %x, <4 x i64> %y) {
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_ashr_trunc_and:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
; AVX-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
@@ -198,7 +198,7 @@ define <4 x i32> @combine_vec_ashr_trunc_and(<4 x i32> %x, <4 x i64> %y) {
; if c1 is equal to the number of bits the trunc removes
define <4 x i32> @combine_vec_ashr_trunc_lshr(<4 x i64> %x) {
; SSE-LABEL: combine_vec_ashr_trunc_lshr:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrlq $32, %xmm1
; SSE-NEXT: psrlq $32, %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
@@ -214,7 +214,7 @@ define <4 x i32> @combine_vec_ashr_trunc_lshr(<4 x i64> %x) {
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_ashr_trunc_lshr:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrlq $32, %ymm0, %ymm0
; AVX-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
@@ -231,7 +231,7 @@ define <4 x i32> @combine_vec_ashr_trunc_lshr(<4 x i64> %x) {
; if c1 is equal to the number of bits the trunc removes
define <4 x i32> @combine_vec_ashr_trunc_ashr(<4 x i64> %x) {
; SSE-LABEL: combine_vec_ashr_trunc_ashr:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; SSE-NEXT: psrad $31, %xmm1
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
@@ -248,7 +248,7 @@ define <4 x i32> @combine_vec_ashr_trunc_ashr(<4 x i64> %x) {
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_ashr_trunc_ashr:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[1,3,2,3,5,7,6,7]
; AVX-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
@@ -263,7 +263,7 @@ define <4 x i32> @combine_vec_ashr_trunc_ashr(<4 x i64> %x) {
; If the sign bit is known to be zero, switch this to a SRL.
define <4 x i32> @combine_vec_ashr_positive(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: combine_vec_ashr_positive:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
; SSE-NEXT: movdqa %xmm1, %xmm2
; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
@@ -285,7 +285,7 @@ define <4 x i32> @combine_vec_ashr_positive(<4 x i32> %x, <4 x i32> %y) {
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_ashr_positive:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -296,12 +296,12 @@ define <4 x i32> @combine_vec_ashr_positive(<4 x i32> %x, <4 x i32> %y) {
define <4 x i32> @combine_vec_ashr_positive_splat(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: combine_vec_ashr_positive_splat:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_ashr_positive_splat:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = and <4 x i32> %x, <i32 1023, i32 1023, i32 1023, i32 1023>
diff --git a/test/CodeGen/X86/combine-srem.ll b/test/CodeGen/X86/combine-srem.ll
index 6c1956ac77c..336c6b8ac03 100644
--- a/test/CodeGen/X86/combine-srem.ll
+++ b/test/CodeGen/X86/combine-srem.ll
@@ -6,11 +6,11 @@
; fold (srem undef, x) -> 0
define <4 x i32> @combine_vec_srem_undef0(<4 x i32> %x) {
; SSE-LABEL: combine_vec_srem_undef0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_srem_undef0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: retq
%1 = srem <4 x i32> undef, %x
ret <4 x i32> %1
@@ -19,11 +19,11 @@ define <4 x i32> @combine_vec_srem_undef0(<4 x i32> %x) {
; fold (srem x, undef) -> undef
define <4 x i32> @combine_vec_srem_undef1(<4 x i32> %x) {
; SSE-LABEL: combine_vec_srem_undef1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_srem_undef1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: retq
%1 = srem <4 x i32> %x, undef
ret <4 x i32> %1
@@ -32,17 +32,17 @@ define <4 x i32> @combine_vec_srem_undef1(<4 x i32> %x) {
; fold (srem x, y) -> (urem x, y) iff x and y are positive
define <4 x i32> @combine_vec_srem_by_pos0(<4 x i32> %x) {
; SSE-LABEL: combine_vec_srem_by_pos0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: combine_vec_srem_by_pos0:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_vec_srem_by_pos0:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vbroadcastss {{.*}}(%rip), %xmm1
; AVX2-NEXT: vandps %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
@@ -53,12 +53,12 @@ define <4 x i32> @combine_vec_srem_by_pos0(<4 x i32> %x) {
define <4 x i32> @combine_vec_srem_by_pos1(<4 x i32> %x) {
; SSE-LABEL: combine_vec_srem_by_pos1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_srem_by_pos1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%1 = and <4 x i32> %x, <i32 255, i32 255, i32 255, i32 255>
diff --git a/test/CodeGen/X86/combine-srl.ll b/test/CodeGen/X86/combine-srl.ll
index 9be85422731..58452855055 100644
--- a/test/CodeGen/X86/combine-srl.ll
+++ b/test/CodeGen/X86/combine-srl.ll
@@ -5,12 +5,12 @@
; fold (srl 0, x) -> 0
define <4 x i32> @combine_vec_lshr_zero(<4 x i32> %x) {
; SSE-LABEL: combine_vec_lshr_zero:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_lshr_zero:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = lshr <4 x i32> zeroinitializer, %x
@@ -20,11 +20,11 @@ define <4 x i32> @combine_vec_lshr_zero(<4 x i32> %x) {
; fold (srl x, c >= size(x)) -> undef
define <4 x i32> @combine_vec_lshr_outofrange0(<4 x i32> %x) {
; SSE-LABEL: combine_vec_lshr_outofrange0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_lshr_outofrange0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: retq
%1 = lshr <4 x i32> %x, <i32 33, i32 33, i32 33, i32 33>
ret <4 x i32> %1
@@ -32,11 +32,11 @@ define <4 x i32> @combine_vec_lshr_outofrange0(<4 x i32> %x) {
define <4 x i32> @combine_vec_lshr_outofrange1(<4 x i32> %x) {
; SSE-LABEL: combine_vec_lshr_outofrange1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_lshr_outofrange1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: retq
%1 = lshr <4 x i32> %x, <i32 33, i32 34, i32 35, i32 36>
ret <4 x i32> %1
@@ -45,11 +45,11 @@ define <4 x i32> @combine_vec_lshr_outofrange1(<4 x i32> %x) {
; fold (srl x, 0) -> x
define <4 x i32> @combine_vec_lshr_by_zero(<4 x i32> %x) {
; SSE-LABEL: combine_vec_lshr_by_zero:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_lshr_by_zero:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: retq
%1 = lshr <4 x i32> %x, zeroinitializer
ret <4 x i32> %1
@@ -58,12 +58,12 @@ define <4 x i32> @combine_vec_lshr_by_zero(<4 x i32> %x) {
; if (srl x, c) is known to be zero, return 0
define <4 x i32> @combine_vec_lshr_known_zero0(<4 x i32> %x) {
; SSE-LABEL: combine_vec_lshr_known_zero0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_lshr_known_zero0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = and <4 x i32> %x, <i32 15, i32 15, i32 15, i32 15>
@@ -73,7 +73,7 @@ define <4 x i32> @combine_vec_lshr_known_zero0(<4 x i32> %x) {
define <4 x i32> @combine_vec_lshr_known_zero1(<4 x i32> %x) {
; SSE-LABEL: combine_vec_lshr_known_zero1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psrld $11, %xmm1
@@ -88,7 +88,7 @@ define <4 x i32> @combine_vec_lshr_known_zero1(<4 x i32> %x) {
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_lshr_known_zero1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [15,15,15,15]
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
@@ -101,12 +101,12 @@ define <4 x i32> @combine_vec_lshr_known_zero1(<4 x i32> %x) {
; fold (srl (srl x, c1), c2) -> (srl x, (add c1, c2))
define <4 x i32> @combine_vec_lshr_lshr0(<4 x i32> %x) {
; SSE-LABEL: combine_vec_lshr_lshr0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrld $6, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_lshr_lshr0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrld $6, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = lshr <4 x i32> %x, <i32 2, i32 2, i32 2, i32 2>
@@ -116,7 +116,7 @@ define <4 x i32> @combine_vec_lshr_lshr0(<4 x i32> %x) {
define <4 x i32> @combine_vec_lshr_lshr1(<4 x i32> %x) {
; SSE-LABEL: combine_vec_lshr_lshr1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psrld $10, %xmm1
; SSE-NEXT: movdqa %xmm0, %xmm2
@@ -130,7 +130,7 @@ define <4 x i32> @combine_vec_lshr_lshr1(<4 x i32> %x) {
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_lshr_lshr1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%1 = lshr <4 x i32> %x, <i32 0, i32 1, i32 2, i32 3>
@@ -141,12 +141,12 @@ define <4 x i32> @combine_vec_lshr_lshr1(<4 x i32> %x) {
; fold (srl (srl x, c1), c2) -> 0
define <4 x i32> @combine_vec_lshr_lshr_zero0(<4 x i32> %x) {
; SSE-LABEL: combine_vec_lshr_lshr_zero0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_lshr_lshr_zero0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = lshr <4 x i32> %x, <i32 16, i32 16, i32 16, i32 16>
@@ -156,12 +156,12 @@ define <4 x i32> @combine_vec_lshr_lshr_zero0(<4 x i32> %x) {
define <4 x i32> @combine_vec_lshr_lshr_zero1(<4 x i32> %x) {
; SSE-LABEL: combine_vec_lshr_lshr_zero1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_lshr_lshr_zero1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = lshr <4 x i32> %x, <i32 17, i32 18, i32 19, i32 20>
@@ -172,14 +172,14 @@ define <4 x i32> @combine_vec_lshr_lshr_zero1(<4 x i32> %x) {
; fold (srl (trunc (srl x, c1)), c2) -> (trunc (srl x, (add c1, c2)))
define <4 x i32> @combine_vec_lshr_trunc_lshr0(<4 x i64> %x) {
; SSE-LABEL: combine_vec_lshr_trunc_lshr0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrlq $48, %xmm1
; SSE-NEXT: psrlq $48, %xmm0
; SSE-NEXT: packusdw %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_lshr_trunc_lshr0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrlq $48, %ymm0, %ymm0
; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
@@ -193,7 +193,7 @@ define <4 x i32> @combine_vec_lshr_trunc_lshr0(<4 x i64> %x) {
define <4 x i32> @combine_vec_lshr_trunc_lshr1(<4 x i64> %x) {
; SSE-LABEL: combine_vec_lshr_trunc_lshr1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm1, %xmm2
; SSE-NEXT: psrlq $35, %xmm2
; SSE-NEXT: psrlq $34, %xmm1
@@ -216,7 +216,7 @@ define <4 x i32> @combine_vec_lshr_trunc_lshr1(<4 x i64> %x) {
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_lshr_trunc_lshr1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrlvq {{.*}}(%rip), %ymm0, %ymm0
; AVX-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
@@ -232,12 +232,12 @@ define <4 x i32> @combine_vec_lshr_trunc_lshr1(<4 x i64> %x) {
; fold (srl (trunc (srl x, c1)), c2) -> 0
define <4 x i32> @combine_vec_lshr_trunc_lshr_zero0(<4 x i64> %x) {
; SSE-LABEL: combine_vec_lshr_trunc_lshr_zero0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_lshr_trunc_lshr_zero0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = lshr <4 x i64> %x, <i64 48, i64 48, i64 48, i64 48>
@@ -248,7 +248,7 @@ define <4 x i32> @combine_vec_lshr_trunc_lshr_zero0(<4 x i64> %x) {
define <4 x i32> @combine_vec_lshr_trunc_lshr_zero1(<4 x i64> %x) {
; SSE-LABEL: combine_vec_lshr_trunc_lshr_zero1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm1, %xmm2
; SSE-NEXT: psrlq $51, %xmm2
; SSE-NEXT: psrlq $50, %xmm1
@@ -271,7 +271,7 @@ define <4 x i32> @combine_vec_lshr_trunc_lshr_zero1(<4 x i64> %x) {
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_lshr_trunc_lshr_zero1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrlvq {{.*}}(%rip), %ymm0, %ymm0
; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
@@ -287,12 +287,12 @@ define <4 x i32> @combine_vec_lshr_trunc_lshr_zero1(<4 x i64> %x) {
; fold (srl (shl x, c), c) -> (and x, cst2)
define <4 x i32> @combine_vec_lshr_shl_mask0(<4 x i32> %x) {
; SSE-LABEL: combine_vec_lshr_shl_mask0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_lshr_shl_mask0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [1073741823,1073741823,1073741823,1073741823]
; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -303,12 +303,12 @@ define <4 x i32> @combine_vec_lshr_shl_mask0(<4 x i32> %x) {
define <4 x i32> @combine_vec_lshr_shl_mask1(<4 x i32> %x) {
; SSE-LABEL: combine_vec_lshr_shl_mask1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_lshr_shl_mask1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%1 = shl <4 x i32> %x, <i32 2, i32 3, i32 4, i32 5>
@@ -319,12 +319,12 @@ define <4 x i32> @combine_vec_lshr_shl_mask1(<4 x i32> %x) {
; fold (srl (sra X, Y), 31) -> (srl X, 31)
define <4 x i32> @combine_vec_lshr_ashr_sign(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: combine_vec_lshr_ashr_sign:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrld $31, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_lshr_ashr_sign:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrld $31, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = ashr <4 x i32> %x, %y
@@ -335,14 +335,14 @@ define <4 x i32> @combine_vec_lshr_ashr_sign(<4 x i32> %x, <4 x i32> %y) {
; fold (srl (ctlz x), "5") -> x iff x has one bit set (the low bit).
define <4 x i32> @combine_vec_lshr_lzcnt_bit0(<4 x i32> %x) {
; SSE-LABEL: combine_vec_lshr_lzcnt_bit0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
; SSE-NEXT: psrld $4, %xmm0
; SSE-NEXT: pxor {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_lshr_lzcnt_bit0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [16,16,16,16]
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpsrld $4, %xmm0, %xmm0
@@ -357,7 +357,7 @@ define <4 x i32> @combine_vec_lshr_lzcnt_bit0(<4 x i32> %x) {
define <4 x i32> @combine_vec_lshr_lzcnt_bit1(<4 x i32> %x) {
; SSE-LABEL: combine_vec_lshr_lzcnt_bit1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE-NEXT: movdqa %xmm0, %xmm1
@@ -389,7 +389,7 @@ define <4 x i32> @combine_vec_lshr_lzcnt_bit1(<4 x i32> %x) {
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_lshr_lzcnt_bit1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
@@ -424,7 +424,7 @@ declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>, i1)
; fold (srl x, (trunc (and y, c))) -> (srl x, (and (trunc y), (trunc c))).
define <4 x i32> @combine_vec_lshr_trunc_and(<4 x i32> %x, <4 x i64> %y) {
; SSE-LABEL: combine_vec_lshr_trunc_and:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
; SSE-NEXT: andps {{.*}}(%rip), %xmm1
; SSE-NEXT: movaps %xmm1, %xmm2
@@ -447,7 +447,7 @@ define <4 x i32> @combine_vec_lshr_trunc_and(<4 x i32> %x, <4 x i64> %y) {
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_lshr_trunc_and:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
; AVX-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
diff --git a/test/CodeGen/X86/combine-sse41-intrinsics.ll b/test/CodeGen/X86/combine-sse41-intrinsics.ll
index 0c8e7b317ec..698e5bc423c 100644
--- a/test/CodeGen/X86/combine-sse41-intrinsics.ll
+++ b/test/CodeGen/X86/combine-sse41-intrinsics.ll
@@ -4,7 +4,7 @@
define <2 x double> @test_x86_sse41_blend_pd(<2 x double> %a0, <2 x double> %a1) {
; CHECK-LABEL: test_x86_sse41_blend_pd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%1 = call <2 x double> @llvm.x86.sse41.blendpd(<2 x double> %a0, <2 x double> %a1, i32 0)
ret <2 x double> %1
@@ -12,7 +12,7 @@ define <2 x double> @test_x86_sse41_blend_pd(<2 x double> %a0, <2 x double> %a1)
define <4 x float> @test_x86_sse41_blend_ps(<4 x float> %a0, <4 x float> %a1) {
; CHECK-LABEL: test_x86_sse41_blend_ps:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%1 = call <4 x float> @llvm.x86.sse41.blendps(<4 x float> %a0, <4 x float> %a1, i32 0)
ret <4 x float> %1
@@ -20,7 +20,7 @@ define <4 x float> @test_x86_sse41_blend_ps(<4 x float> %a0, <4 x float> %a1) {
define <8 x i16> @test_x86_sse41_pblend_w(<8 x i16> %a0, <8 x i16> %a1) {
; CHECK-LABEL: test_x86_sse41_pblend_w:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%1 = call <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16> %a0, <8 x i16> %a1, i32 0)
ret <8 x i16> %1
@@ -28,7 +28,7 @@ define <8 x i16> @test_x86_sse41_pblend_w(<8 x i16> %a0, <8 x i16> %a1) {
define <2 x double> @test2_x86_sse41_blend_pd(<2 x double> %a0, <2 x double> %a1) {
; CHECK-LABEL: test2_x86_sse41_blend_pd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movaps %xmm1, %xmm0
; CHECK-NEXT: retq
%1 = call <2 x double> @llvm.x86.sse41.blendpd(<2 x double> %a0, <2 x double> %a1, i32 -1)
@@ -37,7 +37,7 @@ define <2 x double> @test2_x86_sse41_blend_pd(<2 x double> %a0, <2 x double> %a1
define <4 x float> @test2_x86_sse41_blend_ps(<4 x float> %a0, <4 x float> %a1) {
; CHECK-LABEL: test2_x86_sse41_blend_ps:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movaps %xmm1, %xmm0
; CHECK-NEXT: retq
%1 = call <4 x float> @llvm.x86.sse41.blendps(<4 x float> %a0, <4 x float> %a1, i32 -1)
@@ -46,7 +46,7 @@ define <4 x float> @test2_x86_sse41_blend_ps(<4 x float> %a0, <4 x float> %a1) {
define <8 x i16> @test2_x86_sse41_pblend_w(<8 x i16> %a0, <8 x i16> %a1) {
; CHECK-LABEL: test2_x86_sse41_pblend_w:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movaps %xmm1, %xmm0
; CHECK-NEXT: retq
%1 = call <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16> %a0, <8 x i16> %a1, i32 -1)
@@ -55,7 +55,7 @@ define <8 x i16> @test2_x86_sse41_pblend_w(<8 x i16> %a0, <8 x i16> %a1) {
define <2 x double> @test3_x86_sse41_blend_pd(<2 x double> %a0) {
; CHECK-LABEL: test3_x86_sse41_blend_pd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%1 = call <2 x double> @llvm.x86.sse41.blendpd(<2 x double> %a0, <2 x double> %a0, i32 7)
ret <2 x double> %1
@@ -63,7 +63,7 @@ define <2 x double> @test3_x86_sse41_blend_pd(<2 x double> %a0) {
define <4 x float> @test3_x86_sse41_blend_ps(<4 x float> %a0) {
; CHECK-LABEL: test3_x86_sse41_blend_ps:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%1 = call <4 x float> @llvm.x86.sse41.blendps(<4 x float> %a0, <4 x float> %a0, i32 7)
ret <4 x float> %1
@@ -71,7 +71,7 @@ define <4 x float> @test3_x86_sse41_blend_ps(<4 x float> %a0) {
define <8 x i16> @test3_x86_sse41_pblend_w(<8 x i16> %a0) {
; CHECK-LABEL: test3_x86_sse41_pblend_w:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%1 = call <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16> %a0, <8 x i16> %a0, i32 7)
ret <8 x i16> %1
diff --git a/test/CodeGen/X86/combine-sub.ll b/test/CodeGen/X86/combine-sub.ll
index e062440b42b..df5aba0f26c 100644
--- a/test/CodeGen/X86/combine-sub.ll
+++ b/test/CodeGen/X86/combine-sub.ll
@@ -5,11 +5,11 @@
; fold (sub x, 0) -> x
define <4 x i32> @combine_vec_sub_zero(<4 x i32> %a) {
; SSE-LABEL: combine_vec_sub_zero:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_sub_zero:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: retq
%1 = sub <4 x i32> %a, zeroinitializer
ret <4 x i32> %1
@@ -18,12 +18,12 @@ define <4 x i32> @combine_vec_sub_zero(<4 x i32> %a) {
; fold (sub x, x) -> 0
define <4 x i32> @combine_vec_sub_self(<4 x i32> %a) {
; SSE-LABEL: combine_vec_sub_self:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_sub_self:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = sub <4 x i32> %a, %a
@@ -33,12 +33,12 @@ define <4 x i32> @combine_vec_sub_self(<4 x i32> %a) {
; fold (sub x, c) -> (add x, -c)
define <4 x i32> @combine_vec_sub_constant(<4 x i32> %x) {
; SSE-LABEL: combine_vec_sub_constant:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psubd {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_sub_constant:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsubd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%1 = sub <4 x i32> %x, <i32 0, i32 1, i32 2, i32 3>
@@ -48,13 +48,13 @@ define <4 x i32> @combine_vec_sub_constant(<4 x i32> %x) {
; Canonicalize (sub -1, x) -> ~x, i.e. (xor x, -1)
define <4 x i32> @combine_vec_sub_negone(<4 x i32> %x) {
; SSE-LABEL: combine_vec_sub_negone:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm1, %xmm1
; SSE-NEXT: pxor %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_sub_negone:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -65,12 +65,12 @@ define <4 x i32> @combine_vec_sub_negone(<4 x i32> %x) {
; fold A-(A-B) -> B
define <4 x i32> @combine_vec_sub_sub(<4 x i32> %a, <4 x i32> %b) {
; SSE-LABEL: combine_vec_sub_sub:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_sub_sub:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps %xmm1, %xmm0
; AVX-NEXT: retq
%1 = sub <4 x i32> %a, %b
@@ -81,12 +81,12 @@ define <4 x i32> @combine_vec_sub_sub(<4 x i32> %a, <4 x i32> %b) {
; fold (A+B)-A -> B
define <4 x i32> @combine_vec_sub_add0(<4 x i32> %a, <4 x i32> %b) {
; SSE-LABEL: combine_vec_sub_add0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_sub_add0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps %xmm1, %xmm0
; AVX-NEXT: retq
%1 = add <4 x i32> %a, %b
@@ -97,11 +97,11 @@ define <4 x i32> @combine_vec_sub_add0(<4 x i32> %a, <4 x i32> %b) {
; fold (A+B)-B -> A
define <4 x i32> @combine_vec_sub_add1(<4 x i32> %a, <4 x i32> %b) {
; SSE-LABEL: combine_vec_sub_add1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_sub_add1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: retq
%1 = add <4 x i32> %a, %b
%2 = sub <4 x i32> %1, %b
@@ -111,14 +111,14 @@ define <4 x i32> @combine_vec_sub_add1(<4 x i32> %a, <4 x i32> %b) {
; fold C2-(A+C1) -> (C2-C1)-A
define <4 x i32> @combine_vec_sub_constant_add(<4 x i32> %a) {
; SSE-LABEL: combine_vec_sub_constant_add:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [3,1,4294967295,4294967293]
; SSE-NEXT: psubd %xmm0, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_sub_constant_add:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [3,1,4294967295,4294967293]
; AVX-NEXT: vpsubd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
@@ -130,12 +130,12 @@ define <4 x i32> @combine_vec_sub_constant_add(<4 x i32> %a) {
; fold ((A+(B+C))-B) -> A+C
define <4 x i32> @combine_vec_sub_add_add(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
; SSE-LABEL: combine_vec_sub_add_add:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: paddd %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_sub_add_add:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpaddd %xmm2, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = add <4 x i32> %b, %c
@@ -147,12 +147,12 @@ define <4 x i32> @combine_vec_sub_add_add(<4 x i32> %a, <4 x i32> %b, <4 x i32>
; fold ((A+(B-C))-B) -> A-C
define <4 x i32> @combine_vec_sub_add_sub(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
; SSE-LABEL: combine_vec_sub_add_sub:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psubd %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_sub_add_sub:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsubd %xmm2, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = sub <4 x i32> %b, %c
@@ -164,12 +164,12 @@ define <4 x i32> @combine_vec_sub_add_sub(<4 x i32> %a, <4 x i32> %b, <4 x i32>
; fold ((A-(B-C))-C) -> A-B
define <4 x i32> @combine_vec_sub_sub_sub(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
; SSE-LABEL: combine_vec_sub_sub_sub:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psubd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_sub_sub_sub:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsubd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = sub <4 x i32> %b, %c
@@ -181,11 +181,11 @@ define <4 x i32> @combine_vec_sub_sub_sub(<4 x i32> %a, <4 x i32> %b, <4 x i32>
; fold undef-A -> undef
define <4 x i32> @combine_vec_sub_undef0(<4 x i32> %a) {
; SSE-LABEL: combine_vec_sub_undef0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_sub_undef0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: retq
%1 = sub <4 x i32> undef, %a
ret <4 x i32> %1
@@ -194,11 +194,11 @@ define <4 x i32> @combine_vec_sub_undef0(<4 x i32> %a) {
; fold A-undef -> undef
define <4 x i32> @combine_vec_sub_undef1(<4 x i32> %a) {
; SSE-LABEL: combine_vec_sub_undef1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_sub_undef1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: retq
%1 = sub <4 x i32> %a, undef
ret <4 x i32> %1
@@ -207,14 +207,14 @@ define <4 x i32> @combine_vec_sub_undef1(<4 x i32> %a) {
; sub X, (sext Y i1) -> add X, (and Y 1)
define <4 x i32> @combine_vec_add_sext(<4 x i32> %x, <4 x i1> %y) {
; SSE-LABEL: combine_vec_add_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pslld $31, %xmm1
; SSE-NEXT: psrad $31, %xmm1
; SSE-NEXT: psubd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_add_sext:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpslld $31, %xmm1, %xmm1
; AVX-NEXT: vpsrad $31, %xmm1, %xmm1
; AVX-NEXT: vpsubd %xmm1, %xmm0, %xmm0
@@ -227,14 +227,14 @@ define <4 x i32> @combine_vec_add_sext(<4 x i32> %x, <4 x i1> %y) {
; sub X, (sextinreg Y i1) -> add X, (and Y 1)
define <4 x i32> @combine_vec_sub_sextinreg(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: combine_vec_sub_sextinreg:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pslld $31, %xmm1
; SSE-NEXT: psrad $31, %xmm1
; SSE-NEXT: psubd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_sub_sextinreg:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpslld $31, %xmm1, %xmm1
; AVX-NEXT: vpsrad $31, %xmm1, %xmm1
; AVX-NEXT: vpsubd %xmm1, %xmm0, %xmm0
diff --git a/test/CodeGen/X86/combine-testm-and.ll b/test/CodeGen/X86/combine-testm-and.ll
index b10a4b5ed29..9c03bce7b6d 100644
--- a/test/CodeGen/X86/combine-testm-and.ll
+++ b/test/CodeGen/X86/combine-testm-and.ll
@@ -3,7 +3,7 @@
define i32 @combineTESTM_AND_1(<8 x i64> %a, <8 x i64> %b) {
; CHECK-LABEL: combineTESTM_AND_1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vptestmq %zmm0, %zmm1, %k0
; CHECK-NEXT: kmovb %k0, %eax
; CHECK-NEXT: vzeroupper
@@ -16,7 +16,7 @@ define i32 @combineTESTM_AND_1(<8 x i64> %a, <8 x i64> %b) {
define i32 @combineTESTM_AND_2(<8 x i64> %a, <8 x i64> %b , i8 %mask) {
; CHECK-LABEL: combineTESTM_AND_2:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vptestmq %zmm0, %zmm1, %k0 {%k1}
; CHECK-NEXT: kmovb %k0, %eax
@@ -30,7 +30,7 @@ define i32 @combineTESTM_AND_2(<8 x i64> %a, <8 x i64> %b , i8 %mask) {
define i32 @combineTESTM_AND_mask_3(<8 x i64> %a, <8 x i64>* %bptr , i8 %mask) {
; CHECK-LABEL: combineTESTM_AND_mask_3:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vptestmq (%rdi), %zmm0, %k0 {%k1}
; CHECK-NEXT: kmovb %k0, %eax
@@ -45,7 +45,7 @@ define i32 @combineTESTM_AND_mask_3(<8 x i64> %a, <8 x i64>* %bptr , i8 %mask) {
define i32 @combineTESTM_AND_mask_4(<8 x i64> %a, <8 x i64>* %bptr , i8 %mask) {
; CHECK-LABEL: combineTESTM_AND_mask_4:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vptestmq (%rdi), %zmm0, %k0 {%k1}
; CHECK-NEXT: kmovb %k0, %eax
diff --git a/test/CodeGen/X86/combine-udiv.ll b/test/CodeGen/X86/combine-udiv.ll
index b6ae2fa6d15..d8e1ac216c9 100644
--- a/test/CodeGen/X86/combine-udiv.ll
+++ b/test/CodeGen/X86/combine-udiv.ll
@@ -6,11 +6,11 @@
; fold (udiv undef, x) -> 0
define <4 x i32> @combine_vec_udiv_undef0(<4 x i32> %x) {
; SSE-LABEL: combine_vec_udiv_undef0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_udiv_undef0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: retq
%1 = udiv <4 x i32> undef, %x
ret <4 x i32> %1
@@ -19,11 +19,11 @@ define <4 x i32> @combine_vec_udiv_undef0(<4 x i32> %x) {
; fold (udiv x, undef) -> undef
define <4 x i32> @combine_vec_udiv_undef1(<4 x i32> %x) {
; SSE-LABEL: combine_vec_udiv_undef1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_udiv_undef1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: retq
%1 = udiv <4 x i32> %x, undef
ret <4 x i32> %1
@@ -32,12 +32,12 @@ define <4 x i32> @combine_vec_udiv_undef1(<4 x i32> %x) {
; fold (udiv x, (1 << c)) -> x >>u c
define <4 x i32> @combine_vec_udiv_by_pow2a(<4 x i32> %x) {
; SSE-LABEL: combine_vec_udiv_by_pow2a:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrld $2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_udiv_by_pow2a:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrld $2, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = udiv <4 x i32> %x, <i32 4, i32 4, i32 4, i32 4>
@@ -46,7 +46,7 @@ define <4 x i32> @combine_vec_udiv_by_pow2a(<4 x i32> %x) {
define <4 x i32> @combine_vec_udiv_by_pow2b(<4 x i32> %x) {
; SSE-LABEL: combine_vec_udiv_by_pow2b:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psrld $3, %xmm1
@@ -59,7 +59,7 @@ define <4 x i32> @combine_vec_udiv_by_pow2b(<4 x i32> %x) {
; SSE-NEXT: retq
;
; AVX1-LABEL: combine_vec_udiv_by_pow2b:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsrld $4, %xmm0, %xmm1
; AVX1-NEXT: vpsrld $2, %xmm0, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
@@ -69,7 +69,7 @@ define <4 x i32> @combine_vec_udiv_by_pow2b(<4 x i32> %x) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_vec_udiv_by_pow2b:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: retq
%1 = udiv <4 x i32> %x, <i32 1, i32 4, i32 8, i32 16>
@@ -78,7 +78,7 @@ define <4 x i32> @combine_vec_udiv_by_pow2b(<4 x i32> %x) {
define <4 x i32> @combine_vec_udiv_by_pow2c(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: combine_vec_udiv_by_pow2c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm1, %xmm2
; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; SSE-NEXT: movdqa %xmm0, %xmm3
@@ -99,7 +99,7 @@ define <4 x i32> @combine_vec_udiv_by_pow2c(<4 x i32> %x, <4 x i32> %y) {
; SSE-NEXT: retq
;
; AVX1-LABEL: combine_vec_udiv_by_pow2c:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsrldq {{.*#+}} xmm2 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX1-NEXT: vpsrld %xmm2, %xmm0, %xmm2
; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm3
@@ -115,7 +115,7 @@ define <4 x i32> @combine_vec_udiv_by_pow2c(<4 x i32> %x, <4 x i32> %y) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_vec_udiv_by_pow2c:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
%1 = shl <4 x i32> <i32 1, i32 1, i32 1, i32 1>, %y
@@ -126,7 +126,7 @@ define <4 x i32> @combine_vec_udiv_by_pow2c(<4 x i32> %x, <4 x i32> %y) {
; fold (udiv x, (shl c, y)) -> x >>u (log2(c)+y) iff c is power of 2
define <4 x i32> @combine_vec_udiv_by_shl_pow2a(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: combine_vec_udiv_by_shl_pow2a:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: paddd {{.*}}(%rip), %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm2
; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
@@ -148,7 +148,7 @@ define <4 x i32> @combine_vec_udiv_by_shl_pow2a(<4 x i32> %x, <4 x i32> %y) {
; SSE-NEXT: retq
;
; AVX1-LABEL: combine_vec_udiv_by_shl_pow2a:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vpsrldq {{.*#+}} xmm2 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX1-NEXT: vpsrld %xmm2, %xmm0, %xmm2
@@ -165,7 +165,7 @@ define <4 x i32> @combine_vec_udiv_by_shl_pow2a(<4 x i32> %x, <4 x i32> %y) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_vec_udiv_by_shl_pow2a:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2,2,2,2]
; AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
@@ -177,7 +177,7 @@ define <4 x i32> @combine_vec_udiv_by_shl_pow2a(<4 x i32> %x, <4 x i32> %y) {
define <4 x i32> @combine_vec_udiv_by_shl_pow2b(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: combine_vec_udiv_by_shl_pow2b:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: paddd {{.*}}(%rip), %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm2
; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
@@ -199,7 +199,7 @@ define <4 x i32> @combine_vec_udiv_by_shl_pow2b(<4 x i32> %x, <4 x i32> %y) {
; SSE-NEXT: retq
;
; AVX1-LABEL: combine_vec_udiv_by_shl_pow2b:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vpsrldq {{.*#+}} xmm2 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX1-NEXT: vpsrld %xmm2, %xmm0, %xmm2
@@ -216,7 +216,7 @@ define <4 x i32> @combine_vec_udiv_by_shl_pow2b(<4 x i32> %x, <4 x i32> %y) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_vec_udiv_by_shl_pow2b:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpaddd {{.*}}(%rip), %xmm1, %xmm1
; AVX2-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
diff --git a/test/CodeGen/X86/combine-urem.ll b/test/CodeGen/X86/combine-urem.ll
index 4c7716bbaeb..2530136c054 100644
--- a/test/CodeGen/X86/combine-urem.ll
+++ b/test/CodeGen/X86/combine-urem.ll
@@ -6,11 +6,11 @@
; fold (urem undef, x) -> 0
define <4 x i32> @combine_vec_urem_undef0(<4 x i32> %x) {
; SSE-LABEL: combine_vec_urem_undef0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_urem_undef0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: retq
%1 = urem <4 x i32> undef, %x
ret <4 x i32> %1
@@ -19,11 +19,11 @@ define <4 x i32> @combine_vec_urem_undef0(<4 x i32> %x) {
; fold (urem x, undef) -> undef
define <4 x i32> @combine_vec_urem_undef1(<4 x i32> %x) {
; SSE-LABEL: combine_vec_urem_undef1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_urem_undef1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: retq
%1 = urem <4 x i32> %x, undef
ret <4 x i32> %1
@@ -32,17 +32,17 @@ define <4 x i32> @combine_vec_urem_undef1(<4 x i32> %x) {
; fold (urem x, pow2) -> (and x, (pow2-1))
define <4 x i32> @combine_vec_urem_by_pow2a(<4 x i32> %x) {
; SSE-LABEL: combine_vec_urem_by_pow2a:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: combine_vec_urem_by_pow2a:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_vec_urem_by_pow2a:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vbroadcastss {{.*#+}} xmm1 = [3,3,3,3]
; AVX2-NEXT: vandps %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
@@ -52,12 +52,12 @@ define <4 x i32> @combine_vec_urem_by_pow2a(<4 x i32> %x) {
define <4 x i32> @combine_vec_urem_by_pow2b(<4 x i32> %x) {
; SSE-LABEL: combine_vec_urem_by_pow2b:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_urem_by_pow2b:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%1 = urem <4 x i32> %x, <i32 1, i32 4, i32 8, i32 16>
@@ -66,7 +66,7 @@ define <4 x i32> @combine_vec_urem_by_pow2b(<4 x i32> %x) {
define <4 x i32> @combine_vec_urem_by_pow2c(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: combine_vec_urem_by_pow2c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pslld $23, %xmm1
; SSE-NEXT: paddd {{.*}}(%rip), %xmm1
; SSE-NEXT: cvttps2dq %xmm1, %xmm1
@@ -76,7 +76,7 @@ define <4 x i32> @combine_vec_urem_by_pow2c(<4 x i32> %x, <4 x i32> %y) {
; SSE-NEXT: retq
;
; AVX1-LABEL: combine_vec_urem_by_pow2c:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpslld $23, %xmm1, %xmm1
; AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1
@@ -86,7 +86,7 @@ define <4 x i32> @combine_vec_urem_by_pow2c(<4 x i32> %x, <4 x i32> %y) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_vec_urem_by_pow2c:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1,1,1,1]
; AVX2-NEXT: vpsllvd %xmm1, %xmm2, %xmm1
; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
@@ -100,7 +100,7 @@ define <4 x i32> @combine_vec_urem_by_pow2c(<4 x i32> %x, <4 x i32> %y) {
define <4 x i32> @combine_vec_urem_by_pow2d(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: combine_vec_urem_by_pow2d:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm1, %xmm2
; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; SSE-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648]
@@ -125,7 +125,7 @@ define <4 x i32> @combine_vec_urem_by_pow2d(<4 x i32> %x, <4 x i32> %y) {
; SSE-NEXT: retq
;
; AVX1-LABEL: combine_vec_urem_by_pow2d:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsrldq {{.*#+}} xmm2 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648]
; AVX1-NEXT: vpsrld %xmm2, %xmm3, %xmm2
@@ -145,7 +145,7 @@ define <4 x i32> @combine_vec_urem_by_pow2d(<4 x i32> %x, <4 x i32> %y) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_vec_urem_by_pow2d:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; AVX2-NEXT: vpsrlvd %xmm1, %xmm2, %xmm1
; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
@@ -160,7 +160,7 @@ define <4 x i32> @combine_vec_urem_by_pow2d(<4 x i32> %x, <4 x i32> %y) {
; fold (urem x, (shl pow2, y)) -> (and x, (add (shl pow2, y), -1))
define <4 x i32> @combine_vec_urem_by_shl_pow2a(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: combine_vec_urem_by_shl_pow2a:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pslld $23, %xmm1
; SSE-NEXT: paddd {{.*}}(%rip), %xmm1
; SSE-NEXT: cvttps2dq %xmm1, %xmm1
@@ -171,7 +171,7 @@ define <4 x i32> @combine_vec_urem_by_shl_pow2a(<4 x i32> %x, <4 x i32> %y) {
; SSE-NEXT: retq
;
; AVX1-LABEL: combine_vec_urem_by_shl_pow2a:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpslld $23, %xmm1, %xmm1
; AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1
@@ -182,7 +182,7 @@ define <4 x i32> @combine_vec_urem_by_shl_pow2a(<4 x i32> %x, <4 x i32> %y) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_vec_urem_by_shl_pow2a:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [4,4,4,4]
; AVX2-NEXT: vpsllvd %xmm1, %xmm2, %xmm1
; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
@@ -196,7 +196,7 @@ define <4 x i32> @combine_vec_urem_by_shl_pow2a(<4 x i32> %x, <4 x i32> %y) {
define <4 x i32> @combine_vec_urem_by_shl_pow2b(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: combine_vec_urem_by_shl_pow2b:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pslld $23, %xmm1
; SSE-NEXT: paddd {{.*}}(%rip), %xmm1
; SSE-NEXT: cvttps2dq %xmm1, %xmm1
@@ -207,7 +207,7 @@ define <4 x i32> @combine_vec_urem_by_shl_pow2b(<4 x i32> %x, <4 x i32> %y) {
; SSE-NEXT: retq
;
; AVX1-LABEL: combine_vec_urem_by_shl_pow2b:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpslld $23, %xmm1, %xmm1
; AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1
@@ -218,7 +218,7 @@ define <4 x i32> @combine_vec_urem_by_shl_pow2b(<4 x i32> %x, <4 x i32> %y) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_vec_urem_by_shl_pow2b:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [1,4,8,16]
; AVX2-NEXT: vpsllvd %xmm1, %xmm2, %xmm1
; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
diff --git a/test/CodeGen/X86/commute-3dnow.ll b/test/CodeGen/X86/commute-3dnow.ll
index b7a01efe2d3..bf7892af44f 100644
--- a/test/CodeGen/X86/commute-3dnow.ll
+++ b/test/CodeGen/X86/commute-3dnow.ll
@@ -4,7 +4,7 @@
define void @commute_m_pfadd(x86_mmx *%a0, x86_mmx *%a1, x86_mmx *%a2) nounwind {
; X32-LABEL: commute_m_pfadd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -15,7 +15,7 @@ define void @commute_m_pfadd(x86_mmx *%a0, x86_mmx *%a1, x86_mmx *%a2) nounwind
; X32-NEXT: retl
;
; X64-LABEL: commute_m_pfadd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq (%rdi), %mm0
; X64-NEXT: pfadd (%rsi), %mm0
; X64-NEXT: pfadd (%rdx), %mm0
@@ -33,7 +33,7 @@ declare x86_mmx @llvm.x86.3dnow.pfadd(x86_mmx, x86_mmx)
define void @commute_m_pfsub(x86_mmx *%a0, x86_mmx *%a1, x86_mmx *%a2) nounwind {
; X32-LABEL: commute_m_pfsub:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -44,7 +44,7 @@ define void @commute_m_pfsub(x86_mmx *%a0, x86_mmx *%a1, x86_mmx *%a2) nounwind
; X32-NEXT: retl
;
; X64-LABEL: commute_m_pfsub:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq (%rdi), %mm0
; X64-NEXT: pfsub (%rsi), %mm0
; X64-NEXT: pfsubr (%rdx), %mm0
@@ -62,7 +62,7 @@ declare x86_mmx @llvm.x86.3dnow.pfsub(x86_mmx, x86_mmx)
define void @commute_m_pfsubr(x86_mmx *%a0, x86_mmx *%a1, x86_mmx *%a2) nounwind {
; X32-LABEL: commute_m_pfsubr:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -73,7 +73,7 @@ define void @commute_m_pfsubr(x86_mmx *%a0, x86_mmx *%a1, x86_mmx *%a2) nounwind
; X32-NEXT: retl
;
; X64-LABEL: commute_m_pfsubr:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq (%rdi), %mm0
; X64-NEXT: pfsubr (%rsi), %mm0
; X64-NEXT: pfsub (%rdx), %mm0
@@ -91,7 +91,7 @@ declare x86_mmx @llvm.x86.3dnow.pfsubr(x86_mmx, x86_mmx)
define void @commute_m_pfmul(x86_mmx *%a0, x86_mmx *%a1, x86_mmx *%a2) nounwind {
; X32-LABEL: commute_m_pfmul:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -102,7 +102,7 @@ define void @commute_m_pfmul(x86_mmx *%a0, x86_mmx *%a1, x86_mmx *%a2) nounwind
; X32-NEXT: retl
;
; X64-LABEL: commute_m_pfmul:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq (%rdi), %mm0
; X64-NEXT: pfmul (%rsi), %mm0
; X64-NEXT: pfmul (%rdx), %mm0
@@ -121,7 +121,7 @@ declare x86_mmx @llvm.x86.3dnow.pfmul(x86_mmx, x86_mmx)
; PFMAX can't commute without fast-math.
define void @commute_m_pfmax(x86_mmx *%a0, x86_mmx *%a1, x86_mmx *%a2) nounwind {
; X32-LABEL: commute_m_pfmax:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -133,7 +133,7 @@ define void @commute_m_pfmax(x86_mmx *%a0, x86_mmx *%a1, x86_mmx *%a2) nounwind
; X32-NEXT: retl
;
; X64-LABEL: commute_m_pfmax:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq (%rdi), %mm0
; X64-NEXT: movq (%rdx), %mm1
; X64-NEXT: pfmax (%rsi), %mm0
@@ -153,7 +153,7 @@ declare x86_mmx @llvm.x86.3dnow.pfmax(x86_mmx, x86_mmx)
; PFMIN can't commute without fast-math.
define void @commute_m_pfmin(x86_mmx *%a0, x86_mmx *%a1, x86_mmx *%a2) nounwind {
; X32-LABEL: commute_m_pfmin:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -165,7 +165,7 @@ define void @commute_m_pfmin(x86_mmx *%a0, x86_mmx *%a1, x86_mmx *%a2) nounwind
; X32-NEXT: retl
;
; X64-LABEL: commute_m_pfmin:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq (%rdi), %mm0
; X64-NEXT: movq (%rdx), %mm1
; X64-NEXT: pfmin (%rsi), %mm0
@@ -184,7 +184,7 @@ declare x86_mmx @llvm.x86.3dnow.pfmin(x86_mmx, x86_mmx)
define void @commute_m_pfcmpeq(x86_mmx *%a0, x86_mmx *%a1, x86_mmx *%a2) nounwind {
; X32-LABEL: commute_m_pfcmpeq:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -195,7 +195,7 @@ define void @commute_m_pfcmpeq(x86_mmx *%a0, x86_mmx *%a1, x86_mmx *%a2) nounwin
; X32-NEXT: retl
;
; X64-LABEL: commute_m_pfcmpeq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq (%rdi), %mm0
; X64-NEXT: pfcmpeq (%rsi), %mm0
; X64-NEXT: pfcmpeq (%rdx), %mm0
@@ -213,7 +213,7 @@ declare x86_mmx @llvm.x86.3dnow.pfcmpeq(x86_mmx, x86_mmx)
define void @commute_m_pavgusb(x86_mmx *%a0, x86_mmx *%a1, x86_mmx *%a2) nounwind {
; X32-LABEL: commute_m_pavgusb:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -224,7 +224,7 @@ define void @commute_m_pavgusb(x86_mmx *%a0, x86_mmx *%a1, x86_mmx *%a2) nounwin
; X32-NEXT: retl
;
; X64-LABEL: commute_m_pavgusb:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq (%rdi), %mm0
; X64-NEXT: pavgusb (%rsi), %mm0
; X64-NEXT: pavgusb (%rdx), %mm0
@@ -242,7 +242,7 @@ declare x86_mmx @llvm.x86.3dnow.pavgusb(x86_mmx, x86_mmx)
define void @commute_m_pmulhrw(x86_mmx *%a0, x86_mmx *%a1, x86_mmx *%a2) nounwind {
; X32-LABEL: commute_m_pmulhrw:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -253,7 +253,7 @@ define void @commute_m_pmulhrw(x86_mmx *%a0, x86_mmx *%a1, x86_mmx *%a2) nounwin
; X32-NEXT: retl
;
; X64-LABEL: commute_m_pmulhrw:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq (%rdi), %mm0
; X64-NEXT: pmulhrw (%rsi), %mm0
; X64-NEXT: pmulhrw (%rdx), %mm0
diff --git a/test/CodeGen/X86/commute-blend-avx2.ll b/test/CodeGen/X86/commute-blend-avx2.ll
index 9b907a86800..b3c8cefab5b 100644
--- a/test/CodeGen/X86/commute-blend-avx2.ll
+++ b/test/CodeGen/X86/commute-blend-avx2.ll
@@ -3,7 +3,7 @@
define <8 x i16> @commute_fold_vpblendw_128(<8 x i16> %a, <8 x i16>* %b) #0 {
; CHECK-LABEL: commute_fold_vpblendw_128:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1,2,3],xmm0[4],mem[5,6,7]
; CHECK-NEXT: retq
%1 = load <8 x i16>, <8 x i16>* %b
@@ -14,7 +14,7 @@ declare <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16>, <8 x i16>, i8) nounwind rea
define <16 x i16> @commute_fold_vpblendw_256(<16 x i16> %a, <16 x i16>* %b) #0 {
; CHECK-LABEL: commute_fold_vpblendw_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],mem[1,2,3],ymm0[4],mem[5,6,7],ymm0[8],mem[9,10,11],ymm0[12],mem[13,14,15]
; CHECK-NEXT: retq
%1 = load <16 x i16>, <16 x i16>* %b
@@ -25,7 +25,7 @@ declare <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16>, <16 x i16>, i8) nounwind r
define <4 x i32> @commute_fold_vpblendd_128(<4 x i32> %a, <4 x i32>* %b) #0 {
; CHECK-LABEL: commute_fold_vpblendd_128:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],mem[1,2,3]
; CHECK-NEXT: retq
%1 = load <4 x i32>, <4 x i32>* %b
@@ -36,7 +36,7 @@ declare <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32>, <4 x i32>, i8) nounwind
define <8 x i32> @commute_fold_vpblendd_256(<8 x i32> %a, <8 x i32>* %b) #0 {
; CHECK-LABEL: commute_fold_vpblendd_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],mem[1,2,3,4,5,6],ymm0[7]
; CHECK-NEXT: retq
%1 = load <8 x i32>, <8 x i32>* %b
@@ -47,7 +47,7 @@ declare <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32>, <8 x i32>, i8) nounwind
define <4 x float> @commute_fold_vblendps_128(<4 x float> %a, <4 x float>* %b) #0 {
; CHECK-LABEL: commute_fold_vblendps_128:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3]
; CHECK-NEXT: retq
%1 = load <4 x float>, <4 x float>* %b
@@ -58,7 +58,7 @@ declare <4 x float> @llvm.x86.sse41.blendps(<4 x float>, <4 x float>, i8) nounwi
define <8 x float> @commute_fold_vblendps_256(<8 x float> %a, <8 x float>* %b) #0 {
; CHECK-LABEL: commute_fold_vblendps_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],mem[3,4,5,6,7]
; CHECK-NEXT: retq
%1 = load <8 x float>, <8 x float>* %b
@@ -69,7 +69,7 @@ declare <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float>, <8 x float>, i8) nou
define <2 x double> @commute_fold_vblendpd_128(<2 x double> %a, <2 x double>* %b) #0 {
; CHECK-LABEL: commute_fold_vblendpd_128:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],mem[1]
; CHECK-NEXT: retq
%1 = load <2 x double>, <2 x double>* %b
@@ -80,7 +80,7 @@ declare <2 x double> @llvm.x86.sse41.blendpd(<2 x double>, <2 x double>, i8) nou
define <4 x double> @commute_fold_vblendpd_256(<4 x double> %a, <4 x double>* %b) #0 {
; CHECK-LABEL: commute_fold_vblendpd_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],mem[3]
; CHECK-NEXT: retq
%1 = load <4 x double>, <4 x double>* %b
diff --git a/test/CodeGen/X86/commute-blend-sse41.ll b/test/CodeGen/X86/commute-blend-sse41.ll
index 14a685b179a..d296c10fdae 100644
--- a/test/CodeGen/X86/commute-blend-sse41.ll
+++ b/test/CodeGen/X86/commute-blend-sse41.ll
@@ -3,7 +3,7 @@
define <8 x i16> @commute_fold_pblendw(<8 x i16> %a, <8 x i16>* %b) #0 {
; CHECK-LABEL: commute_fold_pblendw:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],mem[1,2,3],xmm0[4],mem[5,6,7]
; CHECK-NEXT: retq
%1 = load <8 x i16>, <8 x i16>* %b
@@ -14,7 +14,7 @@ declare <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16>, <8 x i16>, i8) nounwind rea
define <4 x float> @commute_fold_blendps(<4 x float> %a, <4 x float>* %b) #0 {
; CHECK-LABEL: commute_fold_blendps:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3]
; CHECK-NEXT: retq
%1 = load <4 x float>, <4 x float>* %b
@@ -25,7 +25,7 @@ declare <4 x float> @llvm.x86.sse41.blendps(<4 x float>, <4 x float>, i8) nounwi
define <2 x double> @commute_fold_blendpd(<2 x double> %a, <2 x double>* %b) #0 {
; CHECK-LABEL: commute_fold_blendpd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blendpd {{.*#+}} xmm0 = xmm0[0],mem[1]
; CHECK-NEXT: retq
%1 = load <2 x double>, <2 x double>* %b
diff --git a/test/CodeGen/X86/commute-clmul.ll b/test/CodeGen/X86/commute-clmul.ll
index 1c2337cef2f..e8c61befc39 100644
--- a/test/CodeGen/X86/commute-clmul.ll
+++ b/test/CodeGen/X86/commute-clmul.ll
@@ -7,12 +7,12 @@ declare <2 x i64> @llvm.x86.pclmulqdq(<2 x i64>, <2 x i64>, i8) nounwind readnon
define <2 x i64> @commute_lq_lq(<2 x i64>* %a0, <2 x i64> %a1) #0 {
; SSE-LABEL: commute_lq_lq:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pclmulqdq $0, (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: commute_lq_lq:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpclmulqdq $0, (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
%1 = load <2 x i64>, <2 x i64>* %a0
@@ -22,12 +22,12 @@ define <2 x i64> @commute_lq_lq(<2 x i64>* %a0, <2 x i64> %a1) #0 {
define <2 x i64> @commute_lq_hq(<2 x i64>* %a0, <2 x i64> %a1) #0 {
; SSE-LABEL: commute_lq_hq:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pclmulqdq $1, (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: commute_lq_hq:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpclmulqdq $1, (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
%1 = load <2 x i64>, <2 x i64>* %a0
@@ -37,12 +37,12 @@ define <2 x i64> @commute_lq_hq(<2 x i64>* %a0, <2 x i64> %a1) #0 {
define <2 x i64> @commute_hq_lq(<2 x i64>* %a0, <2 x i64> %a1) #0 {
; SSE-LABEL: commute_hq_lq:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pclmulqdq $16, (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: commute_hq_lq:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpclmulqdq $16, (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
%1 = load <2 x i64>, <2 x i64>* %a0
@@ -52,12 +52,12 @@ define <2 x i64> @commute_hq_lq(<2 x i64>* %a0, <2 x i64> %a1) #0 {
define <2 x i64> @commute_hq_hq(<2 x i64>* %a0, <2 x i64> %a1) #0 {
; SSE-LABEL: commute_hq_hq:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pclmulqdq $17, (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: commute_hq_hq:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpclmulqdq $17, (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
%1 = load <2 x i64>, <2 x i64>* %a0
diff --git a/test/CodeGen/X86/commute-fcmp.ll b/test/CodeGen/X86/commute-fcmp.ll
index 30a504236da..f5b70304d70 100644
--- a/test/CodeGen/X86/commute-fcmp.ll
+++ b/test/CodeGen/X86/commute-fcmp.ll
@@ -10,17 +10,17 @@
define <4 x i32> @commute_cmpps_eq(<4 x float>* %a0, <4 x float> %a1) {
; SSE-LABEL: commute_cmpps_eq:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpeqps (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: commute_cmpps_eq:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpeqps (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: commute_cmpps_eq:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpeqps (%rdi), %xmm0, %k1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
@@ -33,17 +33,17 @@ define <4 x i32> @commute_cmpps_eq(<4 x float>* %a0, <4 x float> %a1) {
define <4 x i32> @commute_cmpps_ne(<4 x float>* %a0, <4 x float> %a1) {
; SSE-LABEL: commute_cmpps_ne:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpneqps (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: commute_cmpps_ne:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpneqps (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: commute_cmpps_ne:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpneqps (%rdi), %xmm0, %k1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
@@ -56,17 +56,17 @@ define <4 x i32> @commute_cmpps_ne(<4 x float>* %a0, <4 x float> %a1) {
define <4 x i32> @commute_cmpps_ord(<4 x float>* %a0, <4 x float> %a1) {
; SSE-LABEL: commute_cmpps_ord:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpordps (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: commute_cmpps_ord:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpordps (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: commute_cmpps_ord:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpordps (%rdi), %xmm0, %k1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
@@ -79,17 +79,17 @@ define <4 x i32> @commute_cmpps_ord(<4 x float>* %a0, <4 x float> %a1) {
define <4 x i32> @commute_cmpps_uno(<4 x float>* %a0, <4 x float> %a1) {
; SSE-LABEL: commute_cmpps_uno:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpunordps (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: commute_cmpps_uno:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpunordps (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: commute_cmpps_uno:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpunordps (%rdi), %xmm0, %k1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
@@ -102,7 +102,7 @@ define <4 x i32> @commute_cmpps_uno(<4 x float>* %a0, <4 x float> %a1) {
define <4 x i32> @commute_cmpps_ueq(<4 x float>* %a0, <4 x float> %a1) {
; SSE-LABEL: commute_cmpps_ueq:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps (%rdi), %xmm1
; SSE-NEXT: movaps %xmm1, %xmm2
; SSE-NEXT: cmpeqps %xmm0, %xmm2
@@ -111,13 +111,13 @@ define <4 x i32> @commute_cmpps_ueq(<4 x float>* %a0, <4 x float> %a1) {
; SSE-NEXT: retq
;
; AVX-LABEL: commute_cmpps_ueq:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps (%rdi), %xmm1
; AVX-NEXT: vcmpeq_uqps %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: commute_cmpps_ueq:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovaps (%rdi), %xmm1
; AVX512-NEXT: vcmpeq_uqps %xmm0, %xmm1, %k1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -131,7 +131,7 @@ define <4 x i32> @commute_cmpps_ueq(<4 x float>* %a0, <4 x float> %a1) {
define <4 x i32> @commute_cmpps_one(<4 x float>* %a0, <4 x float> %a1) {
; SSE-LABEL: commute_cmpps_one:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps (%rdi), %xmm1
; SSE-NEXT: movaps %xmm1, %xmm2
; SSE-NEXT: cmpneqps %xmm0, %xmm2
@@ -140,13 +140,13 @@ define <4 x i32> @commute_cmpps_one(<4 x float>* %a0, <4 x float> %a1) {
; SSE-NEXT: retq
;
; AVX-LABEL: commute_cmpps_one:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps (%rdi), %xmm1
; AVX-NEXT: vcmpneq_oqps %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: commute_cmpps_one:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovaps (%rdi), %xmm1
; AVX512-NEXT: vcmpneq_oqps %xmm0, %xmm1, %k1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -160,20 +160,20 @@ define <4 x i32> @commute_cmpps_one(<4 x float>* %a0, <4 x float> %a1) {
define <4 x i32> @commute_cmpps_lt(<4 x float>* %a0, <4 x float> %a1) {
; SSE-LABEL: commute_cmpps_lt:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps (%rdi), %xmm1
; SSE-NEXT: cmpltps %xmm0, %xmm1
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: commute_cmpps_lt:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps (%rdi), %xmm1
; AVX-NEXT: vcmpltps %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: commute_cmpps_lt:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovaps (%rdi), %xmm1
; AVX512-NEXT: vcmpltps %xmm0, %xmm1, %k1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -187,20 +187,20 @@ define <4 x i32> @commute_cmpps_lt(<4 x float>* %a0, <4 x float> %a1) {
define <4 x i32> @commute_cmpps_le(<4 x float>* %a0, <4 x float> %a1) {
; SSE-LABEL: commute_cmpps_le:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps (%rdi), %xmm1
; SSE-NEXT: cmpleps %xmm0, %xmm1
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: commute_cmpps_le:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps (%rdi), %xmm1
; AVX-NEXT: vcmpleps %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: commute_cmpps_le:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovaps (%rdi), %xmm1
; AVX512-NEXT: vcmpleps %xmm0, %xmm1, %k1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -214,18 +214,18 @@ define <4 x i32> @commute_cmpps_le(<4 x float>* %a0, <4 x float> %a1) {
define <8 x i32> @commute_cmpps_eq_ymm(<8 x float>* %a0, <8 x float> %a1) {
; SSE-LABEL: commute_cmpps_eq_ymm:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpeqps (%rdi), %xmm0
; SSE-NEXT: cmpeqps 16(%rdi), %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: commute_cmpps_eq_ymm:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpeqps (%rdi), %ymm0, %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: commute_cmpps_eq_ymm:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpeqps (%rdi), %ymm0, %k1
; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
@@ -238,18 +238,18 @@ define <8 x i32> @commute_cmpps_eq_ymm(<8 x float>* %a0, <8 x float> %a1) {
define <8 x i32> @commute_cmpps_ne_ymm(<8 x float>* %a0, <8 x float> %a1) {
; SSE-LABEL: commute_cmpps_ne_ymm:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpneqps (%rdi), %xmm0
; SSE-NEXT: cmpneqps 16(%rdi), %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: commute_cmpps_ne_ymm:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpneqps (%rdi), %ymm0, %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: commute_cmpps_ne_ymm:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpneqps (%rdi), %ymm0, %k1
; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
@@ -262,18 +262,18 @@ define <8 x i32> @commute_cmpps_ne_ymm(<8 x float>* %a0, <8 x float> %a1) {
define <8 x i32> @commute_cmpps_ord_ymm(<8 x float>* %a0, <8 x float> %a1) {
; SSE-LABEL: commute_cmpps_ord_ymm:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpordps (%rdi), %xmm0
; SSE-NEXT: cmpordps 16(%rdi), %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: commute_cmpps_ord_ymm:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpordps (%rdi), %ymm0, %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: commute_cmpps_ord_ymm:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpordps (%rdi), %ymm0, %k1
; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
@@ -286,18 +286,18 @@ define <8 x i32> @commute_cmpps_ord_ymm(<8 x float>* %a0, <8 x float> %a1) {
define <8 x i32> @commute_cmpps_uno_ymm(<8 x float>* %a0, <8 x float> %a1) {
; SSE-LABEL: commute_cmpps_uno_ymm:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpunordps (%rdi), %xmm0
; SSE-NEXT: cmpunordps 16(%rdi), %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: commute_cmpps_uno_ymm:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpunordps (%rdi), %ymm0, %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: commute_cmpps_uno_ymm:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpunordps (%rdi), %ymm0, %k1
; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
@@ -310,7 +310,7 @@ define <8 x i32> @commute_cmpps_uno_ymm(<8 x float>* %a0, <8 x float> %a1) {
define <8 x i32> @commute_cmpps_ueq_ymm(<8 x float>* %a0, <8 x float> %a1) {
; SSE-LABEL: commute_cmpps_ueq_ymm:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps (%rdi), %xmm2
; SSE-NEXT: movaps 16(%rdi), %xmm3
; SSE-NEXT: movaps %xmm2, %xmm4
@@ -324,13 +324,13 @@ define <8 x i32> @commute_cmpps_ueq_ymm(<8 x float>* %a0, <8 x float> %a1) {
; SSE-NEXT: retq
;
; AVX-LABEL: commute_cmpps_ueq_ymm:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps (%rdi), %ymm1
; AVX-NEXT: vcmpeq_uqps %ymm0, %ymm1, %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: commute_cmpps_ueq_ymm:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovaps (%rdi), %ymm1
; AVX512-NEXT: vcmpeq_uqps %ymm0, %ymm1, %k1
; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
@@ -344,7 +344,7 @@ define <8 x i32> @commute_cmpps_ueq_ymm(<8 x float>* %a0, <8 x float> %a1) {
define <8 x i32> @commute_cmpps_one_ymm(<8 x float>* %a0, <8 x float> %a1) {
; SSE-LABEL: commute_cmpps_one_ymm:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps (%rdi), %xmm2
; SSE-NEXT: movaps 16(%rdi), %xmm3
; SSE-NEXT: movaps %xmm2, %xmm4
@@ -358,13 +358,13 @@ define <8 x i32> @commute_cmpps_one_ymm(<8 x float>* %a0, <8 x float> %a1) {
; SSE-NEXT: retq
;
; AVX-LABEL: commute_cmpps_one_ymm:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps (%rdi), %ymm1
; AVX-NEXT: vcmpneq_oqps %ymm0, %ymm1, %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: commute_cmpps_one_ymm:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovaps (%rdi), %ymm1
; AVX512-NEXT: vcmpneq_oqps %ymm0, %ymm1, %k1
; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
@@ -378,7 +378,7 @@ define <8 x i32> @commute_cmpps_one_ymm(<8 x float>* %a0, <8 x float> %a1) {
define <8 x i32> @commute_cmpps_lt_ymm(<8 x float>* %a0, <8 x float> %a1) {
; SSE-LABEL: commute_cmpps_lt_ymm:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps (%rdi), %xmm2
; SSE-NEXT: movaps 16(%rdi), %xmm3
; SSE-NEXT: cmpltps %xmm0, %xmm2
@@ -388,13 +388,13 @@ define <8 x i32> @commute_cmpps_lt_ymm(<8 x float>* %a0, <8 x float> %a1) {
; SSE-NEXT: retq
;
; AVX-LABEL: commute_cmpps_lt_ymm:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps (%rdi), %ymm1
; AVX-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: commute_cmpps_lt_ymm:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovaps (%rdi), %ymm1
; AVX512-NEXT: vcmpltps %ymm0, %ymm1, %k1
; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
@@ -408,7 +408,7 @@ define <8 x i32> @commute_cmpps_lt_ymm(<8 x float>* %a0, <8 x float> %a1) {
define <8 x i32> @commute_cmpps_le_ymm(<8 x float>* %a0, <8 x float> %a1) {
; SSE-LABEL: commute_cmpps_le_ymm:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps (%rdi), %xmm2
; SSE-NEXT: movaps 16(%rdi), %xmm3
; SSE-NEXT: cmpleps %xmm0, %xmm2
@@ -418,13 +418,13 @@ define <8 x i32> @commute_cmpps_le_ymm(<8 x float>* %a0, <8 x float> %a1) {
; SSE-NEXT: retq
;
; AVX-LABEL: commute_cmpps_le_ymm:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps (%rdi), %ymm1
; AVX-NEXT: vcmpleps %ymm0, %ymm1, %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: commute_cmpps_le_ymm:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovaps (%rdi), %ymm1
; AVX512-NEXT: vcmpleps %ymm0, %ymm1, %k1
; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
@@ -443,17 +443,17 @@ define <8 x i32> @commute_cmpps_le_ymm(<8 x float>* %a0, <8 x float> %a1) {
define <2 x i64> @commute_cmppd_eq(<2 x double>* %a0, <2 x double> %a1) {
; SSE-LABEL: commute_cmppd_eq:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpeqpd (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: commute_cmppd_eq:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpeqpd (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: commute_cmppd_eq:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpeqpd (%rdi), %xmm0, %k1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z}
@@ -466,17 +466,17 @@ define <2 x i64> @commute_cmppd_eq(<2 x double>* %a0, <2 x double> %a1) {
define <2 x i64> @commute_cmppd_ne(<2 x double>* %a0, <2 x double> %a1) {
; SSE-LABEL: commute_cmppd_ne:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpneqpd (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: commute_cmppd_ne:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpneqpd (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: commute_cmppd_ne:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpneqpd (%rdi), %xmm0, %k1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z}
@@ -489,17 +489,17 @@ define <2 x i64> @commute_cmppd_ne(<2 x double>* %a0, <2 x double> %a1) {
define <2 x i64> @commute_cmppd_ord(<2 x double>* %a0, <2 x double> %a1) {
; SSE-LABEL: commute_cmppd_ord:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpordpd (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: commute_cmppd_ord:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpordpd (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: commute_cmppd_ord:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpordpd (%rdi), %xmm0, %k1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z}
@@ -512,7 +512,7 @@ define <2 x i64> @commute_cmppd_ord(<2 x double>* %a0, <2 x double> %a1) {
define <2 x i64> @commute_cmppd_ueq(<2 x double>* %a0, <2 x double> %a1) {
; SSE-LABEL: commute_cmppd_ueq:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movapd (%rdi), %xmm1
; SSE-NEXT: movapd %xmm1, %xmm2
; SSE-NEXT: cmpeqpd %xmm0, %xmm2
@@ -521,13 +521,13 @@ define <2 x i64> @commute_cmppd_ueq(<2 x double>* %a0, <2 x double> %a1) {
; SSE-NEXT: retq
;
; AVX-LABEL: commute_cmppd_ueq:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovapd (%rdi), %xmm1
; AVX-NEXT: vcmpeq_uqpd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: commute_cmppd_ueq:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovapd (%rdi), %xmm1
; AVX512-NEXT: vcmpeq_uqpd %xmm0, %xmm1, %k1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -541,7 +541,7 @@ define <2 x i64> @commute_cmppd_ueq(<2 x double>* %a0, <2 x double> %a1) {
define <2 x i64> @commute_cmppd_one(<2 x double>* %a0, <2 x double> %a1) {
; SSE-LABEL: commute_cmppd_one:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movapd (%rdi), %xmm1
; SSE-NEXT: movapd %xmm1, %xmm2
; SSE-NEXT: cmpneqpd %xmm0, %xmm2
@@ -550,13 +550,13 @@ define <2 x i64> @commute_cmppd_one(<2 x double>* %a0, <2 x double> %a1) {
; SSE-NEXT: retq
;
; AVX-LABEL: commute_cmppd_one:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovapd (%rdi), %xmm1
; AVX-NEXT: vcmpneq_oqpd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: commute_cmppd_one:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovapd (%rdi), %xmm1
; AVX512-NEXT: vcmpneq_oqpd %xmm0, %xmm1, %k1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -570,17 +570,17 @@ define <2 x i64> @commute_cmppd_one(<2 x double>* %a0, <2 x double> %a1) {
define <2 x i64> @commute_cmppd_uno(<2 x double>* %a0, <2 x double> %a1) {
; SSE-LABEL: commute_cmppd_uno:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpunordpd (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: commute_cmppd_uno:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpunordpd (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: commute_cmppd_uno:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpunordpd (%rdi), %xmm0, %k1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z}
@@ -593,20 +593,20 @@ define <2 x i64> @commute_cmppd_uno(<2 x double>* %a0, <2 x double> %a1) {
define <2 x i64> @commute_cmppd_lt(<2 x double>* %a0, <2 x double> %a1) {
; SSE-LABEL: commute_cmppd_lt:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movapd (%rdi), %xmm1
; SSE-NEXT: cmpltpd %xmm0, %xmm1
; SSE-NEXT: movapd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: commute_cmppd_lt:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovapd (%rdi), %xmm1
; AVX-NEXT: vcmpltpd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: commute_cmppd_lt:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovapd (%rdi), %xmm1
; AVX512-NEXT: vcmpltpd %xmm0, %xmm1, %k1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -620,20 +620,20 @@ define <2 x i64> @commute_cmppd_lt(<2 x double>* %a0, <2 x double> %a1) {
define <2 x i64> @commute_cmppd_le(<2 x double>* %a0, <2 x double> %a1) {
; SSE-LABEL: commute_cmppd_le:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movapd (%rdi), %xmm1
; SSE-NEXT: cmplepd %xmm0, %xmm1
; SSE-NEXT: movapd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: commute_cmppd_le:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovapd (%rdi), %xmm1
; AVX-NEXT: vcmplepd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: commute_cmppd_le:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovapd (%rdi), %xmm1
; AVX512-NEXT: vcmplepd %xmm0, %xmm1, %k1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -647,18 +647,18 @@ define <2 x i64> @commute_cmppd_le(<2 x double>* %a0, <2 x double> %a1) {
define <4 x i64> @commute_cmppd_eq_ymmm(<4 x double>* %a0, <4 x double> %a1) {
; SSE-LABEL: commute_cmppd_eq_ymmm:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpeqpd (%rdi), %xmm0
; SSE-NEXT: cmpeqpd 16(%rdi), %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: commute_cmppd_eq_ymmm:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpeqpd (%rdi), %ymm0, %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: commute_cmppd_eq_ymmm:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpeqpd (%rdi), %ymm0, %k1
; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa64 %ymm0, %ymm0 {%k1} {z}
@@ -671,18 +671,18 @@ define <4 x i64> @commute_cmppd_eq_ymmm(<4 x double>* %a0, <4 x double> %a1) {
define <4 x i64> @commute_cmppd_ne_ymmm(<4 x double>* %a0, <4 x double> %a1) {
; SSE-LABEL: commute_cmppd_ne_ymmm:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpneqpd (%rdi), %xmm0
; SSE-NEXT: cmpneqpd 16(%rdi), %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: commute_cmppd_ne_ymmm:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpneqpd (%rdi), %ymm0, %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: commute_cmppd_ne_ymmm:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpneqpd (%rdi), %ymm0, %k1
; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa64 %ymm0, %ymm0 {%k1} {z}
@@ -695,18 +695,18 @@ define <4 x i64> @commute_cmppd_ne_ymmm(<4 x double>* %a0, <4 x double> %a1) {
define <4 x i64> @commute_cmppd_ord_ymmm(<4 x double>* %a0, <4 x double> %a1) {
; SSE-LABEL: commute_cmppd_ord_ymmm:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpordpd (%rdi), %xmm0
; SSE-NEXT: cmpordpd 16(%rdi), %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: commute_cmppd_ord_ymmm:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpordpd (%rdi), %ymm0, %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: commute_cmppd_ord_ymmm:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpordpd (%rdi), %ymm0, %k1
; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa64 %ymm0, %ymm0 {%k1} {z}
@@ -719,18 +719,18 @@ define <4 x i64> @commute_cmppd_ord_ymmm(<4 x double>* %a0, <4 x double> %a1) {
define <4 x i64> @commute_cmppd_uno_ymmm(<4 x double>* %a0, <4 x double> %a1) {
; SSE-LABEL: commute_cmppd_uno_ymmm:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpunordpd (%rdi), %xmm0
; SSE-NEXT: cmpunordpd 16(%rdi), %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: commute_cmppd_uno_ymmm:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpunordpd (%rdi), %ymm0, %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: commute_cmppd_uno_ymmm:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpunordpd (%rdi), %ymm0, %k1
; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa64 %ymm0, %ymm0 {%k1} {z}
@@ -743,7 +743,7 @@ define <4 x i64> @commute_cmppd_uno_ymmm(<4 x double>* %a0, <4 x double> %a1) {
define <4 x i64> @commute_cmppd_ueq_ymmm(<4 x double>* %a0, <4 x double> %a1) {
; SSE-LABEL: commute_cmppd_ueq_ymmm:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movapd (%rdi), %xmm2
; SSE-NEXT: movapd 16(%rdi), %xmm3
; SSE-NEXT: movapd %xmm2, %xmm4
@@ -757,13 +757,13 @@ define <4 x i64> @commute_cmppd_ueq_ymmm(<4 x double>* %a0, <4 x double> %a1) {
; SSE-NEXT: retq
;
; AVX-LABEL: commute_cmppd_ueq_ymmm:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovapd (%rdi), %ymm1
; AVX-NEXT: vcmpeq_uqpd %ymm0, %ymm1, %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: commute_cmppd_ueq_ymmm:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovapd (%rdi), %ymm1
; AVX512-NEXT: vcmpeq_uqpd %ymm0, %ymm1, %k1
; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
@@ -777,7 +777,7 @@ define <4 x i64> @commute_cmppd_ueq_ymmm(<4 x double>* %a0, <4 x double> %a1) {
define <4 x i64> @commute_cmppd_one_ymmm(<4 x double>* %a0, <4 x double> %a1) {
; SSE-LABEL: commute_cmppd_one_ymmm:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movapd (%rdi), %xmm2
; SSE-NEXT: movapd 16(%rdi), %xmm3
; SSE-NEXT: movapd %xmm2, %xmm4
@@ -791,13 +791,13 @@ define <4 x i64> @commute_cmppd_one_ymmm(<4 x double>* %a0, <4 x double> %a1) {
; SSE-NEXT: retq
;
; AVX-LABEL: commute_cmppd_one_ymmm:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovapd (%rdi), %ymm1
; AVX-NEXT: vcmpneq_oqpd %ymm0, %ymm1, %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: commute_cmppd_one_ymmm:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovapd (%rdi), %ymm1
; AVX512-NEXT: vcmpneq_oqpd %ymm0, %ymm1, %k1
; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
@@ -811,7 +811,7 @@ define <4 x i64> @commute_cmppd_one_ymmm(<4 x double>* %a0, <4 x double> %a1) {
define <4 x i64> @commute_cmppd_lt_ymmm(<4 x double>* %a0, <4 x double> %a1) {
; SSE-LABEL: commute_cmppd_lt_ymmm:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movapd (%rdi), %xmm2
; SSE-NEXT: movapd 16(%rdi), %xmm3
; SSE-NEXT: cmpltpd %xmm0, %xmm2
@@ -821,13 +821,13 @@ define <4 x i64> @commute_cmppd_lt_ymmm(<4 x double>* %a0, <4 x double> %a1) {
; SSE-NEXT: retq
;
; AVX-LABEL: commute_cmppd_lt_ymmm:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovapd (%rdi), %ymm1
; AVX-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: commute_cmppd_lt_ymmm:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovapd (%rdi), %ymm1
; AVX512-NEXT: vcmpltpd %ymm0, %ymm1, %k1
; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
@@ -841,7 +841,7 @@ define <4 x i64> @commute_cmppd_lt_ymmm(<4 x double>* %a0, <4 x double> %a1) {
define <4 x i64> @commute_cmppd_le_ymmm(<4 x double>* %a0, <4 x double> %a1) {
; SSE-LABEL: commute_cmppd_le_ymmm:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movapd (%rdi), %xmm2
; SSE-NEXT: movapd 16(%rdi), %xmm3
; SSE-NEXT: cmplepd %xmm0, %xmm2
@@ -851,13 +851,13 @@ define <4 x i64> @commute_cmppd_le_ymmm(<4 x double>* %a0, <4 x double> %a1) {
; SSE-NEXT: retq
;
; AVX-LABEL: commute_cmppd_le_ymmm:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovapd (%rdi), %ymm1
; AVX-NEXT: vcmplepd %ymm0, %ymm1, %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: commute_cmppd_le_ymmm:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovapd (%rdi), %ymm1
; AVX512-NEXT: vcmplepd %ymm0, %ymm1, %k1
; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
diff --git a/test/CodeGen/X86/commute-vpclmulqdq-avx.ll b/test/CodeGen/X86/commute-vpclmulqdq-avx.ll
index ec75316bac4..0d9ea5450a0 100644
--- a/test/CodeGen/X86/commute-vpclmulqdq-avx.ll
+++ b/test/CodeGen/X86/commute-vpclmulqdq-avx.ll
@@ -6,7 +6,7 @@ declare <4 x i64> @llvm.x86.pclmulqdq.256(<4 x i64>, <4 x i64>, i8) nounwind rea
define <4 x i64> @commute_v1(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: commute_v1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpclmulqdq $0, %ymm1, %ymm0, %ymm0
; CHECK-NEXT: vxorps %ymm0, %ymm0, %ymm0
; CHECK-NEXT: retq
@@ -18,7 +18,7 @@ define <4 x i64> @commute_v1(<4 x i64> %a0, <4 x i64> %a1) {
define <4 x i64> @commute_v2(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: commute_v2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpclmulqdq $16, %ymm1, %ymm0, %ymm0
; CHECK-NEXT: vxorps %ymm0, %ymm0, %ymm0
; CHECK-NEXT: retq
@@ -30,7 +30,7 @@ define <4 x i64> @commute_v2(<4 x i64> %a0, <4 x i64> %a1) {
define <4 x i64> @commute_v3(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: commute_v3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpclmulqdq $17, %ymm1, %ymm0, %ymm0
; CHECK-NEXT: vxorps %ymm0, %ymm0, %ymm0
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/commute-vpclmulqdq-avx512.ll b/test/CodeGen/X86/commute-vpclmulqdq-avx512.ll
index c9366056a4f..400f27baca6 100644
--- a/test/CodeGen/X86/commute-vpclmulqdq-avx512.ll
+++ b/test/CodeGen/X86/commute-vpclmulqdq-avx512.ll
@@ -8,7 +8,7 @@ declare <8 x i64> @llvm.x86.pclmulqdq.512(<8 x i64>, <8 x i64>, i8) nounwind rea
define <2 x i64> @commute_xmm_v1(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: commute_xmm_v1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpclmulqdq $0, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpxor %xmm0, %xmm0, %xmm0
; CHECK-NEXT: retq
@@ -20,7 +20,7 @@ define <2 x i64> @commute_xmm_v1(<2 x i64> %a0, <2 x i64> %a1) {
define <2 x i64> @commute_xmm_v2(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: commute_xmm_v2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpclmulqdq $16, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpxor %xmm0, %xmm0, %xmm0
; CHECK-NEXT: retq
@@ -32,7 +32,7 @@ define <2 x i64> @commute_xmm_v2(<2 x i64> %a0, <2 x i64> %a1) {
define <2 x i64> @commute_xmm_v3(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: commute_xmm_v3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpclmulqdq $17, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpxor %xmm0, %xmm0, %xmm0
; CHECK-NEXT: retq
@@ -44,7 +44,7 @@ define <2 x i64> @commute_xmm_v3(<2 x i64> %a0, <2 x i64> %a1) {
define <4 x i64> @commute_ymm_v1(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: commute_ymm_v1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpclmulqdq $0, %ymm1, %ymm0, %ymm0
; CHECK-NEXT: vpxor %ymm0, %ymm0, %ymm0
; CHECK-NEXT: retq
@@ -56,7 +56,7 @@ define <4 x i64> @commute_ymm_v1(<4 x i64> %a0, <4 x i64> %a1) {
define <4 x i64> @commute_ymm_v2(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: commute_ymm_v2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpclmulqdq $16, %ymm1, %ymm0, %ymm0
; CHECK-NEXT: vpxor %ymm0, %ymm0, %ymm0
; CHECK-NEXT: retq
@@ -68,7 +68,7 @@ define <4 x i64> @commute_ymm_v2(<4 x i64> %a0, <4 x i64> %a1) {
define <4 x i64> @commute_ymm_v3(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: commute_ymm_v3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpclmulqdq $17, %ymm1, %ymm0, %ymm0
; CHECK-NEXT: vpxor %ymm0, %ymm0, %ymm0
; CHECK-NEXT: retq
@@ -80,7 +80,7 @@ define <4 x i64> @commute_ymm_v3(<4 x i64> %a0, <4 x i64> %a1) {
define <8 x i64> @commute_zmm_v1(<8 x i64> %a0, <8 x i64> %a1) {
; CHECK-LABEL: commute_zmm_v1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpclmulqdq $0, %zmm1, %zmm0, %zmm0
; CHECK-NEXT: vpxorq %zmm0, %zmm0, %zmm0
; CHECK-NEXT: retq
@@ -92,7 +92,7 @@ define <8 x i64> @commute_zmm_v1(<8 x i64> %a0, <8 x i64> %a1) {
define <8 x i64> @commute_zmm_v2(<8 x i64> %a0, <8 x i64> %a1) {
; CHECK-LABEL: commute_zmm_v2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpclmulqdq $16, %zmm1, %zmm0, %zmm0
; CHECK-NEXT: vpxorq %zmm0, %zmm0, %zmm0
; CHECK-NEXT: retq
@@ -104,7 +104,7 @@ define <8 x i64> @commute_zmm_v2(<8 x i64> %a0, <8 x i64> %a1) {
define <8 x i64> @commute_zmm_v3(<8 x i64> %a0, <8 x i64> %a1) {
; CHECK-LABEL: commute_zmm_v3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpclmulqdq $17, %zmm1, %zmm0, %zmm0
; CHECK-NEXT: vpxorq %zmm0, %zmm0, %zmm0
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/commute-xop.ll b/test/CodeGen/X86/commute-xop.ll
index 4043155ba8d..3dfb24db7fb 100644
--- a/test/CodeGen/X86/commute-xop.ll
+++ b/test/CodeGen/X86/commute-xop.ll
@@ -4,13 +4,13 @@
define <16 x i8> @commute_fold_vpcomb(<16 x i8>* %a0, <16 x i8> %a1) {
; X32-LABEL: commute_fold_vpcomb:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpcomgtb (%eax), %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: commute_fold_vpcomb:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcomgtb (%rdi), %xmm0, %xmm0
; X64-NEXT: retq
%1 = load <16 x i8>, <16 x i8>* %a0
@@ -21,13 +21,13 @@ declare <16 x i8> @llvm.x86.xop.vpcomb(<16 x i8>, <16 x i8>, i8) nounwind readno
define <4 x i32> @commute_fold_vpcomd(<4 x i32>* %a0, <4 x i32> %a1) {
; X32-LABEL: commute_fold_vpcomd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpcomged (%eax), %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: commute_fold_vpcomd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcomged (%rdi), %xmm0, %xmm0
; X64-NEXT: retq
%1 = load <4 x i32>, <4 x i32>* %a0
@@ -38,13 +38,13 @@ declare <4 x i32> @llvm.x86.xop.vpcomd(<4 x i32>, <4 x i32>, i8) nounwind readno
define <2 x i64> @commute_fold_vpcomq(<2 x i64>* %a0, <2 x i64> %a1) {
; X32-LABEL: commute_fold_vpcomq:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpcomltq (%eax), %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: commute_fold_vpcomq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcomltq (%rdi), %xmm0, %xmm0
; X64-NEXT: retq
%1 = load <2 x i64>, <2 x i64>* %a0
@@ -55,13 +55,13 @@ declare <2 x i64> @llvm.x86.xop.vpcomq(<2 x i64>, <2 x i64>, i8) nounwind readno
define <16 x i8> @commute_fold_vpcomub(<16 x i8>* %a0, <16 x i8> %a1) {
; X32-LABEL: commute_fold_vpcomub:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpcomleub (%eax), %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: commute_fold_vpcomub:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcomleub (%rdi), %xmm0, %xmm0
; X64-NEXT: retq
%1 = load <16 x i8>, <16 x i8>* %a0
@@ -72,13 +72,13 @@ declare <16 x i8> @llvm.x86.xop.vpcomub(<16 x i8>, <16 x i8>, i8) nounwind readn
define <4 x i32> @commute_fold_vpcomud(<4 x i32>* %a0, <4 x i32> %a1) {
; X32-LABEL: commute_fold_vpcomud:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpcomequd (%eax), %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: commute_fold_vpcomud:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcomequd (%rdi), %xmm0, %xmm0
; X64-NEXT: retq
%1 = load <4 x i32>, <4 x i32>* %a0
@@ -89,13 +89,13 @@ declare <4 x i32> @llvm.x86.xop.vpcomud(<4 x i32>, <4 x i32>, i8) nounwind readn
define <2 x i64> @commute_fold_vpcomuq(<2 x i64>* %a0, <2 x i64> %a1) {
; X32-LABEL: commute_fold_vpcomuq:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpcomnequq (%eax), %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: commute_fold_vpcomuq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcomnequq (%rdi), %xmm0, %xmm0
; X64-NEXT: retq
%1 = load <2 x i64>, <2 x i64>* %a0
@@ -106,13 +106,13 @@ declare <2 x i64> @llvm.x86.xop.vpcomuq(<2 x i64>, <2 x i64>, i8) nounwind readn
define <8 x i16> @commute_fold_vpcomuw(<8 x i16>* %a0, <8 x i16> %a1) {
; X32-LABEL: commute_fold_vpcomuw:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpcomfalseuw (%eax), %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: commute_fold_vpcomuw:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcomfalseuw (%rdi), %xmm0, %xmm0
; X64-NEXT: retq
%1 = load <8 x i16>, <8 x i16>* %a0
@@ -123,13 +123,13 @@ declare <8 x i16> @llvm.x86.xop.vpcomuw(<8 x i16>, <8 x i16>, i8) nounwind readn
define <8 x i16> @commute_fold_vpcomw(<8 x i16>* %a0, <8 x i16> %a1) {
; X32-LABEL: commute_fold_vpcomw:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpcomtruew (%eax), %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: commute_fold_vpcomw:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcomtruew (%rdi), %xmm0, %xmm0
; X64-NEXT: retq
%1 = load <8 x i16>, <8 x i16>* %a0
@@ -140,13 +140,13 @@ declare <8 x i16> @llvm.x86.xop.vpcomw(<8 x i16>, <8 x i16>, i8) nounwind readno
define <4 x i32> @commute_fold_vpmacsdd(<4 x i32>* %a0, <4 x i32> %a1, <4 x i32> %a2) {
; X32-LABEL: commute_fold_vpmacsdd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmacsdd %xmm1, (%eax), %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: commute_fold_vpmacsdd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmacsdd %xmm1, (%rdi), %xmm0, %xmm0
; X64-NEXT: retq
%1 = load <4 x i32>, <4 x i32>* %a0
@@ -157,13 +157,13 @@ declare <4 x i32> @llvm.x86.xop.vpmacsdd(<4 x i32>, <4 x i32>, <4 x i32>) nounwi
define <2 x i64> @commute_fold_vpmacsdqh(<4 x i32>* %a0, <4 x i32> %a1, <2 x i64> %a2) {
; X32-LABEL: commute_fold_vpmacsdqh:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmacsdqh %xmm1, (%eax), %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: commute_fold_vpmacsdqh:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmacsdqh %xmm1, (%rdi), %xmm0, %xmm0
; X64-NEXT: retq
%1 = load <4 x i32>, <4 x i32>* %a0
@@ -174,13 +174,13 @@ declare <2 x i64> @llvm.x86.xop.vpmacsdqh(<4 x i32>, <4 x i32>, <2 x i64>) nounw
define <2 x i64> @commute_fold_vpmacsdql(<4 x i32>* %a0, <4 x i32> %a1, <2 x i64> %a2) {
; X32-LABEL: commute_fold_vpmacsdql:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmacsdql %xmm1, (%eax), %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: commute_fold_vpmacsdql:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmacsdql %xmm1, (%rdi), %xmm0, %xmm0
; X64-NEXT: retq
%1 = load <4 x i32>, <4 x i32>* %a0
@@ -191,13 +191,13 @@ declare <2 x i64> @llvm.x86.xop.vpmacsdql(<4 x i32>, <4 x i32>, <2 x i64>) nounw
define <4 x i32> @commute_fold_vpmacssdd(<4 x i32>* %a0, <4 x i32> %a1, <4 x i32> %a2) {
; X32-LABEL: commute_fold_vpmacssdd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmacssdd %xmm1, (%eax), %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: commute_fold_vpmacssdd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmacssdd %xmm1, (%rdi), %xmm0, %xmm0
; X64-NEXT: retq
%1 = load <4 x i32>, <4 x i32>* %a0
@@ -208,13 +208,13 @@ declare <4 x i32> @llvm.x86.xop.vpmacssdd(<4 x i32>, <4 x i32>, <4 x i32>) nounw
define <2 x i64> @commute_fold_vpmacssdqh(<4 x i32>* %a0, <4 x i32> %a1, <2 x i64> %a2) {
; X32-LABEL: commute_fold_vpmacssdqh:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmacssdqh %xmm1, (%eax), %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: commute_fold_vpmacssdqh:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmacssdqh %xmm1, (%rdi), %xmm0, %xmm0
; X64-NEXT: retq
%1 = load <4 x i32>, <4 x i32>* %a0
@@ -225,13 +225,13 @@ declare <2 x i64> @llvm.x86.xop.vpmacssdqh(<4 x i32>, <4 x i32>, <2 x i64>) noun
define <2 x i64> @commute_fold_vpmacssdql(<4 x i32>* %a0, <4 x i32> %a1, <2 x i64> %a2) {
; X32-LABEL: commute_fold_vpmacssdql:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmacssdql %xmm1, (%eax), %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: commute_fold_vpmacssdql:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmacssdql %xmm1, (%rdi), %xmm0, %xmm0
; X64-NEXT: retq
%1 = load <4 x i32>, <4 x i32>* %a0
@@ -242,13 +242,13 @@ declare <2 x i64> @llvm.x86.xop.vpmacssdql(<4 x i32>, <4 x i32>, <2 x i64>) noun
define <4 x i32> @commute_fold_vpmacsswd(<8 x i16>* %a0, <8 x i16> %a1, <4 x i32> %a2) {
; X32-LABEL: commute_fold_vpmacsswd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmacsswd %xmm1, (%eax), %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: commute_fold_vpmacsswd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmacsswd %xmm1, (%rdi), %xmm0, %xmm0
; X64-NEXT: retq
%1 = load <8 x i16>, <8 x i16>* %a0
@@ -259,13 +259,13 @@ declare <4 x i32> @llvm.x86.xop.vpmacsswd(<8 x i16>, <8 x i16>, <4 x i32>) nounw
define <8 x i16> @commute_fold_vpmacssww(<8 x i16>* %a0, <8 x i16> %a1, <8 x i16> %a2) {
; X32-LABEL: commute_fold_vpmacssww:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmacssww %xmm1, (%eax), %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: commute_fold_vpmacssww:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmacssww %xmm1, (%rdi), %xmm0, %xmm0
; X64-NEXT: retq
%1 = load <8 x i16>, <8 x i16>* %a0
@@ -276,13 +276,13 @@ declare <8 x i16> @llvm.x86.xop.vpmacssww(<8 x i16>, <8 x i16>, <8 x i16>) nounw
define <4 x i32> @commute_fold_vpmacswd(<8 x i16>* %a0, <8 x i16> %a1, <4 x i32> %a2) {
; X32-LABEL: commute_fold_vpmacswd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmacswd %xmm1, (%eax), %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: commute_fold_vpmacswd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmacswd %xmm1, (%rdi), %xmm0, %xmm0
; X64-NEXT: retq
%1 = load <8 x i16>, <8 x i16>* %a0
@@ -293,13 +293,13 @@ declare <4 x i32> @llvm.x86.xop.vpmacswd(<8 x i16>, <8 x i16>, <4 x i32>) nounwi
define <8 x i16> @commute_fold_vpmacsww(<8 x i16>* %a0, <8 x i16> %a1, <8 x i16> %a2) {
; X32-LABEL: commute_fold_vpmacsww:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmacsww %xmm1, (%eax), %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: commute_fold_vpmacsww:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmacsww %xmm1, (%rdi), %xmm0, %xmm0
; X64-NEXT: retq
%1 = load <8 x i16>, <8 x i16>* %a0
@@ -310,13 +310,13 @@ declare <8 x i16> @llvm.x86.xop.vpmacsww(<8 x i16>, <8 x i16>, <8 x i16>) nounwi
define <4 x i32> @commute_fold_vpmadcsswd(<8 x i16>* %a0, <8 x i16> %a1, <4 x i32> %a2) {
; X32-LABEL: commute_fold_vpmadcsswd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmadcsswd %xmm1, (%eax), %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: commute_fold_vpmadcsswd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmadcsswd %xmm1, (%rdi), %xmm0, %xmm0
; X64-NEXT: retq
%1 = load <8 x i16>, <8 x i16>* %a0
@@ -327,13 +327,13 @@ declare <4 x i32> @llvm.x86.xop.vpmadcsswd(<8 x i16>, <8 x i16>, <4 x i32>) noun
define <4 x i32> @commute_fold_vpmadcswd(<8 x i16>* %a0, <8 x i16> %a1, <4 x i32> %a2) {
; X32-LABEL: commute_fold_vpmadcswd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmadcswd %xmm1, (%eax), %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: commute_fold_vpmadcswd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmadcswd %xmm1, (%rdi), %xmm0, %xmm0
; X64-NEXT: retq
%1 = load <8 x i16>, <8 x i16>* %a0
diff --git a/test/CodeGen/X86/complex-fastmath.ll b/test/CodeGen/X86/complex-fastmath.ll
index d31707260a0..9c02ac6667f 100644
--- a/test/CodeGen/X86/complex-fastmath.ll
+++ b/test/CodeGen/X86/complex-fastmath.ll
@@ -11,7 +11,7 @@
define <2 x float> @complex_square_f32(<2 x float>) #0 {
; SSE-LABEL: complex_square_f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: addss %xmm2, %xmm2
@@ -23,7 +23,7 @@ define <2 x float> @complex_square_f32(<2 x float>) #0 {
; SSE-NEXT: retq
;
; AVX1-LABEL: complex_square_f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX1-NEXT: vaddss %xmm0, %xmm0, %xmm2
; AVX1-NEXT: vmulss %xmm2, %xmm1, %xmm2
@@ -34,7 +34,7 @@ define <2 x float> @complex_square_f32(<2 x float>) #0 {
; AVX1-NEXT: retq
;
; FMA-LABEL: complex_square_f32:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; FMA-NEXT: vaddss %xmm0, %xmm0, %xmm2
; FMA-NEXT: vmulss %xmm2, %xmm1, %xmm2
@@ -56,7 +56,7 @@ define <2 x float> @complex_square_f32(<2 x float>) #0 {
define <2 x double> @complex_square_f64(<2 x double>) #0 {
; SSE-LABEL: complex_square_f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps %xmm0, %xmm1
; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
; SSE-NEXT: movaps %xmm0, %xmm2
@@ -69,7 +69,7 @@ define <2 x double> @complex_square_f64(<2 x double>) #0 {
; SSE-NEXT: retq
;
; AVX1-LABEL: complex_square_f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX1-NEXT: vaddsd %xmm0, %xmm0, %xmm2
; AVX1-NEXT: vmulsd %xmm2, %xmm1, %xmm2
@@ -80,7 +80,7 @@ define <2 x double> @complex_square_f64(<2 x double>) #0 {
; AVX1-NEXT: retq
;
; FMA-LABEL: complex_square_f64:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; FMA-NEXT: vaddsd %xmm0, %xmm0, %xmm2
; FMA-NEXT: vmulsd %xmm2, %xmm1, %xmm2
@@ -106,7 +106,7 @@ define <2 x double> @complex_square_f64(<2 x double>) #0 {
define <2 x float> @complex_mul_f32(<2 x float>, <2 x float>) #0 {
; SSE-LABEL: complex_mul_f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; SSE-NEXT: movshdup {{.*#+}} xmm3 = xmm1[1,1,3,3]
; SSE-NEXT: movaps %xmm3, %xmm4
@@ -120,7 +120,7 @@ define <2 x float> @complex_mul_f32(<2 x float>, <2 x float>) #0 {
; SSE-NEXT: retq
;
; AVX1-LABEL: complex_mul_f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX1-NEXT: vmovshdup {{.*#+}} xmm3 = xmm1[1,1,3,3]
; AVX1-NEXT: vmulss %xmm0, %xmm3, %xmm4
@@ -133,7 +133,7 @@ define <2 x float> @complex_mul_f32(<2 x float>, <2 x float>) #0 {
; AVX1-NEXT: retq
;
; FMA-LABEL: complex_mul_f32:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; FMA-NEXT: vmovshdup {{.*#+}} xmm3 = xmm1[1,1,3,3]
; FMA-NEXT: vmulss %xmm2, %xmm1, %xmm4
@@ -159,7 +159,7 @@ define <2 x float> @complex_mul_f32(<2 x float>, <2 x float>) #0 {
define <2 x double> @complex_mul_f64(<2 x double>, <2 x double>) #0 {
; SSE-LABEL: complex_mul_f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
; SSE-NEXT: movaps %xmm1, %xmm3
@@ -175,7 +175,7 @@ define <2 x double> @complex_mul_f64(<2 x double>, <2 x double>) #0 {
; SSE-NEXT: retq
;
; AVX1-LABEL: complex_mul_f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX1-NEXT: vpermilpd {{.*#+}} xmm3 = xmm1[1,0]
; AVX1-NEXT: vmulsd %xmm0, %xmm3, %xmm4
@@ -188,7 +188,7 @@ define <2 x double> @complex_mul_f64(<2 x double>, <2 x double>) #0 {
; AVX1-NEXT: retq
;
; FMA-LABEL: complex_mul_f64:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; FMA-NEXT: vpermilpd {{.*#+}} xmm3 = xmm1[1,0]
; FMA-NEXT: vmulsd %xmm2, %xmm1, %xmm4
diff --git a/test/CodeGen/X86/compress_expand.ll b/test/CodeGen/X86/compress_expand.ll
index 7456d68b8d9..4e90eccfc62 100644
--- a/test/CodeGen/X86/compress_expand.ll
+++ b/test/CodeGen/X86/compress_expand.ll
@@ -9,14 +9,14 @@ target triple = "x86_64-unknown-linux-gnu"
define <16 x float> @test1(float* %base) {
; SKX-LABEL: test1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movw $-2049, %ax # imm = 0xF7FF
; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vexpandps (%rdi), %zmm0 {%k1} {z}
; SKX-NEXT: retq
;
; KNL-LABEL: test1:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: movw $-2049, %ax # imm = 0xF7FF
; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: vexpandps (%rdi), %zmm0 {%k1} {z}
@@ -27,14 +27,14 @@ define <16 x float> @test1(float* %base) {
define <16 x float> @test2(float* %base, <16 x float> %src0) {
; SKX-LABEL: test2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movw $30719, %ax # imm = 0x77FF
; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vexpandps (%rdi), %zmm0 {%k1}
; SKX-NEXT: retq
;
; KNL-LABEL: test2:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: movw $30719, %ax # imm = 0x77FF
; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: vexpandps (%rdi), %zmm0 {%k1}
@@ -45,14 +45,14 @@ define <16 x float> @test2(float* %base, <16 x float> %src0) {
define <8 x double> @test3(double* %base, <8 x double> %src0, <8 x i1> %mask) {
; SKX-LABEL: test3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm1, %xmm1
; SKX-NEXT: vpmovw2m %xmm1, %k1
; SKX-NEXT: vexpandpd (%rdi), %zmm0 {%k1}
; SKX-NEXT: retq
;
; KNL-LABEL: test3:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxwq %xmm1, %zmm1
; KNL-NEXT: vpsllq $63, %zmm1, %zmm1
; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1
@@ -64,14 +64,14 @@ define <8 x double> @test3(double* %base, <8 x double> %src0, <8 x i1> %mask) {
define <4 x float> @test4(float* %base, <4 x float> %src0) {
; SKX-LABEL: test4:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movb $7, %al
; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vexpandps (%rdi), %xmm0 {%k1}
; SKX-NEXT: retq
;
; KNL-LABEL: test4:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; KNL-NEXT: movw $7, %ax
; KNL-NEXT: kmovw %eax, %k1
@@ -84,14 +84,14 @@ define <4 x float> @test4(float* %base, <4 x float> %src0) {
define <2 x i64> @test5(i64* %base, <2 x i64> %src0) {
; SKX-LABEL: test5:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movb $2, %al
; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vpexpandq (%rdi), %xmm0 {%k1}
; SKX-NEXT: retq
;
; KNL-LABEL: test5:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; KNL-NEXT: movb $2, %al
; KNL-NEXT: kmovw %eax, %k1
@@ -109,7 +109,7 @@ declare <2 x i64> @llvm.masked.expandload.v2i64(i64*, <2 x i1>, <2 x i64>)
define void @test6(float* %base, <16 x float> %V) {
; SKX-LABEL: test6:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movw $-2049, %ax # imm = 0xF7FF
; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vcompressps %zmm0, (%rdi) {%k1}
@@ -117,7 +117,7 @@ define void @test6(float* %base, <16 x float> %V) {
; SKX-NEXT: retq
;
; KNL-LABEL: test6:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: movw $-2049, %ax # imm = 0xF7FF
; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: vcompressps %zmm0, (%rdi) {%k1}
@@ -128,7 +128,7 @@ define void @test6(float* %base, <16 x float> %V) {
define void @test7(float* %base, <8 x float> %V, <8 x i1> %mask) {
; SKX-LABEL: test7:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm1, %xmm1
; SKX-NEXT: vpmovw2m %xmm1, %k1
; SKX-NEXT: vcompressps %ymm0, (%rdi) {%k1}
@@ -136,7 +136,7 @@ define void @test7(float* %base, <8 x float> %V, <8 x i1> %mask) {
; SKX-NEXT: retq
;
; KNL-LABEL: test7:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL-NEXT: vpmovsxwq %xmm1, %zmm1
; KNL-NEXT: vpsllq $63, %zmm1, %zmm1
@@ -149,7 +149,7 @@ define void @test7(float* %base, <8 x float> %V, <8 x i1> %mask) {
define void @test8(double* %base, <8 x double> %V, <8 x i1> %mask) {
; SKX-LABEL: test8:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm1, %xmm1
; SKX-NEXT: vpmovw2m %xmm1, %k1
; SKX-NEXT: vcompresspd %zmm0, (%rdi) {%k1}
@@ -157,7 +157,7 @@ define void @test8(double* %base, <8 x double> %V, <8 x i1> %mask) {
; SKX-NEXT: retq
;
; KNL-LABEL: test8:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxwq %xmm1, %zmm1
; KNL-NEXT: vpsllq $63, %zmm1, %zmm1
; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1
@@ -169,7 +169,7 @@ define void @test8(double* %base, <8 x double> %V, <8 x i1> %mask) {
define void @test9(i64* %base, <8 x i64> %V, <8 x i1> %mask) {
; SKX-LABEL: test9:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm1, %xmm1
; SKX-NEXT: vpmovw2m %xmm1, %k1
; SKX-NEXT: vpcompressq %zmm0, (%rdi) {%k1}
@@ -177,7 +177,7 @@ define void @test9(i64* %base, <8 x i64> %V, <8 x i1> %mask) {
; SKX-NEXT: retq
;
; KNL-LABEL: test9:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxwq %xmm1, %zmm1
; KNL-NEXT: vpsllq $63, %zmm1, %zmm1
; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1
@@ -189,7 +189,7 @@ define void @test9(i64* %base, <8 x i64> %V, <8 x i1> %mask) {
define void @test10(i64* %base, <4 x i64> %V, <4 x i1> %mask) {
; SKX-LABEL: test10:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm1, %xmm1
; SKX-NEXT: vptestmd %xmm1, %xmm1, %k1
; SKX-NEXT: vpcompressq %ymm0, (%rdi) {%k1}
@@ -197,7 +197,7 @@ define void @test10(i64* %base, <4 x i64> %V, <4 x i1> %mask) {
; SKX-NEXT: retq
;
; KNL-LABEL: test10:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL-NEXT: vpslld $31, %xmm1, %xmm1
; KNL-NEXT: vpsrad $31, %xmm1, %xmm1
@@ -213,14 +213,14 @@ define void @test10(i64* %base, <4 x i64> %V, <4 x i1> %mask) {
define void @test11(i64* %base, <2 x i64> %V, <2 x i1> %mask) {
; SKX-LABEL: test11:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllq $63, %xmm1, %xmm1
; SKX-NEXT: vptestmq %xmm1, %xmm1, %k1
; SKX-NEXT: vpcompressq %xmm0, (%rdi) {%k1}
; SKX-NEXT: retq
;
; KNL-LABEL: test11:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; KNL-NEXT: vpsllq $63, %xmm1, %xmm1
; KNL-NEXT: vpsraq $63, %zmm1, %zmm1
@@ -235,14 +235,14 @@ define void @test11(i64* %base, <2 x i64> %V, <2 x i1> %mask) {
define void @test12(float* %base, <4 x float> %V, <4 x i1> %mask) {
; SKX-LABEL: test12:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm1, %xmm1
; SKX-NEXT: vptestmd %xmm1, %xmm1, %k1
; SKX-NEXT: vcompressps %xmm0, (%rdi) {%k1}
; SKX-NEXT: retq
;
; KNL-LABEL: test12:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; KNL-NEXT: vpslld $31, %xmm1, %xmm1
; KNL-NEXT: vpsrad $31, %xmm1, %xmm1
@@ -257,7 +257,7 @@ define void @test12(float* %base, <4 x float> %V, <4 x i1> %mask) {
define <2 x float> @test13(float* %base, <2 x float> %src0, <2 x i32> %trigger) {
; SKX-LABEL: test13:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; SKX-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
; SKX-NEXT: vpcmpeqq %xmm2, %xmm1, %k1
@@ -265,7 +265,7 @@ define <2 x float> @test13(float* %base, <2 x float> %src0, <2 x i32> %trigger)
; SKX-NEXT: retq
;
; KNL-LABEL: test13:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; KNL-NEXT: vpxor %xmm2, %xmm2, %xmm2
; KNL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
@@ -284,7 +284,7 @@ define <2 x float> @test13(float* %base, <2 x float> %src0, <2 x i32> %trigger)
define void @test14(float* %base, <2 x float> %V, <2 x i32> %trigger) {
; SKX-LABEL: test14:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; SKX-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
; SKX-NEXT: vpcmpeqq %xmm2, %xmm1, %k1
@@ -292,7 +292,7 @@ define void @test14(float* %base, <2 x float> %V, <2 x i32> %trigger) {
; SKX-NEXT: retq
;
; KNL-LABEL: test14:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; KNL-NEXT: vpxor %xmm2, %xmm2, %xmm2
; KNL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
@@ -310,7 +310,7 @@ define void @test14(float* %base, <2 x float> %V, <2 x i32> %trigger) {
define <32 x float> @test15(float* %base, <32 x float> %src0, <32 x i32> %trigger) {
; ALL-LABEL: test15:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpxor %xmm4, %xmm4, %xmm4
; ALL-NEXT: vpcmpeqd %zmm4, %zmm3, %k1
; ALL-NEXT: vpcmpeqd %zmm4, %zmm2, %k2
@@ -326,7 +326,7 @@ define <32 x float> @test15(float* %base, <32 x float> %src0, <32 x i32> %trigge
define <16 x double> @test16(double* %base, <16 x double> %src0, <16 x i32> %trigger) {
; SKX-LABEL: test16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vextracti64x4 $1, %zmm2, %ymm3
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4
; SKX-NEXT: vpcmpeqd %ymm4, %ymm3, %k1
@@ -338,7 +338,7 @@ define <16 x double> @test16(double* %base, <16 x double> %src0, <16 x i32> %tri
; SKX-NEXT: retq
;
; KNL-LABEL: test16:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpxor %xmm3, %xmm3, %xmm3
; KNL-NEXT: vextracti64x4 $1, %zmm2, %ymm4
; KNL-NEXT: vpcmpeqd %zmm3, %zmm4, %k1
@@ -356,7 +356,7 @@ define <16 x double> @test16(double* %base, <16 x double> %src0, <16 x i32> %tri
define void @test17(float* %base, <32 x float> %V, <32 x i32> %trigger) {
; SKX-LABEL: test17:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4
; SKX-NEXT: vpcmpeqd %zmm4, %zmm3, %k1
; SKX-NEXT: vpcmpeqd %zmm4, %zmm2, %k2
@@ -368,7 +368,7 @@ define void @test17(float* %base, <32 x float> %V, <32 x i32> %trigger) {
; SKX-NEXT: retq
;
; KNL-LABEL: test17:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpxor %xmm4, %xmm4, %xmm4
; KNL-NEXT: vpcmpeqd %zmm4, %zmm3, %k1
; KNL-NEXT: vpcmpeqd %zmm4, %zmm2, %k2
@@ -384,7 +384,7 @@ define void @test17(float* %base, <32 x float> %V, <32 x i32> %trigger) {
define void @test18(double* %base, <16 x double> %V, <16 x i1> %mask) {
; SKX-LABEL: test18:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm2, %xmm2
; SKX-NEXT: vpmovb2m %xmm2, %k1
; SKX-NEXT: kshiftrw $8, %k1, %k2
@@ -396,7 +396,7 @@ define void @test18(double* %base, <16 x double> %V, <16 x i1> %mask) {
; SKX-NEXT: retq
;
; KNL-LABEL: test18:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxbd %xmm2, %zmm2
; KNL-NEXT: vpslld $31, %zmm2, %zmm2
; KNL-NEXT: vptestmd %zmm2, %zmm2, %k1
diff --git a/test/CodeGen/X86/computeKnownBits_urem.ll b/test/CodeGen/X86/computeKnownBits_urem.ll
index f09370dc2fb..4701ee5e085 100644
--- a/test/CodeGen/X86/computeKnownBits_urem.ll
+++ b/test/CodeGen/X86/computeKnownBits_urem.ll
@@ -4,7 +4,7 @@
define i32 @main() nounwind {
; X86-LABEL: main:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %eax
; X86-NEXT: movl $1, (%esp)
; X86-NEXT: movl $1, %eax
@@ -12,7 +12,7 @@ define i32 @main() nounwind {
; X86-NEXT: retl
;
; X64-LABEL: main:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movl $1, -{{[0-9]+}}(%rsp)
; X64-NEXT: movl $1, %eax
; X64-NEXT: retq
diff --git a/test/CodeGen/X86/conditional-indecrement.ll b/test/CodeGen/X86/conditional-indecrement.ll
index f9e18f62697..6a681445bf8 100644
--- a/test/CodeGen/X86/conditional-indecrement.ll
+++ b/test/CodeGen/X86/conditional-indecrement.ll
@@ -3,7 +3,7 @@
define i32 @test1(i32 %a, i32 %b) nounwind readnone {
; CHECK-LABEL: test1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpl $1, %edi
; CHECK-NEXT: sbbl $-1, %esi
; CHECK-NEXT: movl %esi, %eax
@@ -16,7 +16,7 @@ define i32 @test1(i32 %a, i32 %b) nounwind readnone {
define i32 @test1_commute(i32 %a, i32 %b) nounwind readnone {
; CHECK-LABEL: test1_commute:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpl $1, %edi
; CHECK-NEXT: sbbl $-1, %esi
; CHECK-NEXT: movl %esi, %eax
@@ -29,7 +29,7 @@ define i32 @test1_commute(i32 %a, i32 %b) nounwind readnone {
define i32 @test2(i32 %a, i32 %b) nounwind readnone {
; CHECK-LABEL: test2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpl $1, %edi
; CHECK-NEXT: adcl $0, %esi
; CHECK-NEXT: movl %esi, %eax
@@ -42,7 +42,7 @@ define i32 @test2(i32 %a, i32 %b) nounwind readnone {
define i32 @test3(i32 %a, i32 %b) nounwind readnone {
; CHECK-LABEL: test3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpl $1, %edi
; CHECK-NEXT: adcl $0, %esi
; CHECK-NEXT: movl %esi, %eax
@@ -55,7 +55,7 @@ define i32 @test3(i32 %a, i32 %b) nounwind readnone {
define i32 @test4(i32 %a, i32 %b) nounwind readnone {
; CHECK-LABEL: test4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpl $1, %edi
; CHECK-NEXT: sbbl $-1, %esi
; CHECK-NEXT: movl %esi, %eax
@@ -68,7 +68,7 @@ define i32 @test4(i32 %a, i32 %b) nounwind readnone {
define i32 @test5(i32 %a, i32 %b) nounwind readnone {
; CHECK-LABEL: test5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpl $1, %edi
; CHECK-NEXT: adcl $-1, %esi
; CHECK-NEXT: movl %esi, %eax
@@ -81,7 +81,7 @@ define i32 @test5(i32 %a, i32 %b) nounwind readnone {
define i32 @test6(i32 %a, i32 %b) nounwind readnone {
; CHECK-LABEL: test6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpl $1, %edi
; CHECK-NEXT: sbbl $0, %esi
; CHECK-NEXT: movl %esi, %eax
@@ -94,7 +94,7 @@ define i32 @test6(i32 %a, i32 %b) nounwind readnone {
define i32 @test7(i32 %a, i32 %b) nounwind readnone {
; CHECK-LABEL: test7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpl $1, %edi
; CHECK-NEXT: sbbl $0, %esi
; CHECK-NEXT: movl %esi, %eax
@@ -107,7 +107,7 @@ define i32 @test7(i32 %a, i32 %b) nounwind readnone {
define i32 @test8(i32 %a, i32 %b) nounwind readnone {
; CHECK-LABEL: test8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpl $1, %edi
; CHECK-NEXT: adcl $-1, %esi
; CHECK-NEXT: movl %esi, %eax
diff --git a/test/CodeGen/X86/conditional-tailcall-samedest.mir b/test/CodeGen/X86/conditional-tailcall-samedest.mir
index a632ba99743..f975e6b65d4 100644
--- a/test/CodeGen/X86/conditional-tailcall-samedest.mir
+++ b/test/CodeGen/X86/conditional-tailcall-samedest.mir
@@ -8,7 +8,7 @@
# CHECK: body: |
# CHECK: bb.0.entry:
-# CHECK: successors: %bb.1.sw.bb(0x40000000)
+# CHECK: successors: %bb.1(0x40000000)
# CHECK: liveins: %edi
# CHECK: CMP32ri8 killed %edi, 2, implicit-def %eflags
# CHECK: TCRETURNdi64cc @mergeable_conditional_tailcall
@@ -101,27 +101,27 @@ stack:
constants:
body: |
bb.0.entry:
- successors: %bb.2.sw.bb(0x40000000), %bb.1.entry(0x40000000)
+ successors: %bb.2(0x40000000), %bb.1(0x40000000)
liveins: %edi
CMP32ri8 killed %edi, 2, implicit-def %eflags
- JB_1 %bb.2.sw.bb, implicit %eflags
- JMP_1 %bb.1.entry
+ JB_1 %bb.2, implicit %eflags
+ JMP_1 %bb.1
bb.1.entry:
- successors: %bb.4.sw.bb2(0x40000000), %bb.5.sw.epilog(0x40000000)
+ successors: %bb.4(0x40000000), %bb.5(0x40000000)
liveins: %eflags
- JE_1 %bb.4.sw.bb2, implicit killed %eflags
- JMP_1 %bb.5.sw.epilog
+ JE_1 %bb.4, implicit killed %eflags
+ JMP_1 %bb.5
bb.2.sw.bb:
- successors: %bb.3.init.check.i(0x00000800), %bb.6.return(0x7ffff800)
+ successors: %bb.3(0x00000800), %bb.6(0x7ffff800)
%al = ACQUIRE_MOV8rm %rip, 1, %noreg, @static_local_guard, %noreg :: (volatile load acquire 1 from `i8* bitcast (i64* @static_local_guard to i8*)`, align 8)
TEST8rr killed %al, %al, implicit-def %eflags
- JNE_1 %bb.6.return, implicit killed %eflags
- JMP_1 %bb.3.init.check.i
+ JNE_1 %bb.6, implicit killed %eflags
+ JMP_1 %bb.3
bb.3.init.check.i:
dead %edi = MOV32ri64 @static_local_guard, implicit-def %rdi
diff --git a/test/CodeGen/X86/constant-combines.ll b/test/CodeGen/X86/constant-combines.ll
index bcf07093a3c..85741685beb 100644
--- a/test/CodeGen/X86/constant-combines.ll
+++ b/test/CodeGen/X86/constant-combines.ll
@@ -14,7 +14,7 @@ define void @PR22524({ float, float }* %arg) {
; being useful.
;
; CHECK-LABEL: PR22524:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: movd %eax, %xmm0
; CHECK-NEXT: xorps %xmm1, %xmm1
diff --git a/test/CodeGen/X86/copysign-constant-magnitude.ll b/test/CodeGen/X86/copysign-constant-magnitude.ll
index 8af045914cf..61cb6d0960d 100644
--- a/test/CodeGen/X86/copysign-constant-magnitude.ll
+++ b/test/CodeGen/X86/copysign-constant-magnitude.ll
@@ -11,7 +11,7 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
define double @mag_pos0_double(double %x) nounwind {
; CHECK-LABEL: mag_pos0_double:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: andps [[SIGNMASK1]](%rip), %xmm0
; CHECK-NEXT: retq
;
@@ -24,7 +24,7 @@ define double @mag_pos0_double(double %x) nounwind {
define double @mag_neg0_double(double %x) nounwind {
; CHECK-LABEL: mag_neg0_double:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movsd [[SIGNMASK2]](%rip), %xmm1
; CHECK-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0,0]
; CHECK-NEXT: andps %xmm1, %xmm0
@@ -42,7 +42,7 @@ define double @mag_neg0_double(double %x) nounwind {
define double @mag_pos1_double(double %x) nounwind {
; CHECK-LABEL: mag_pos1_double:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: andps [[SIGNMASK3]](%rip), %xmm0
; CHECK-NEXT: movsd [[ONE3]](%rip), %xmm1
; CHECK-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0,0]
@@ -62,7 +62,7 @@ define double @mag_pos1_double(double %x) nounwind {
define double @mag_neg1_double(double %x) nounwind {
; CHECK-LABEL: mag_neg1_double:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: andps [[SIGNMASK4]](%rip), %xmm0
; CHECK-NEXT: orps [[ONE4]](%rip), %xmm0
; CHECK-NEXT: retq
@@ -77,7 +77,7 @@ define double @mag_neg1_double(double %x) nounwind {
define float @mag_pos0_float(float %x) nounwind {
; CHECK-LABEL: mag_pos0_float:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: andps [[SIGNMASK5]](%rip), %xmm0
; CHECK-NEXT: retq
;
@@ -90,7 +90,7 @@ define float @mag_pos0_float(float %x) nounwind {
define float @mag_neg0_float(float %x) nounwind {
; CHECK-LABEL: mag_neg0_float:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movss [[SIGNMASK6]](%rip), %xmm1
; CHECK-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0,0,0]
; CHECK-NEXT: andps %xmm1, %xmm0
@@ -108,7 +108,7 @@ define float @mag_neg0_float(float %x) nounwind {
define float @mag_pos1_float(float %x) nounwind {
; CHECK-LABEL: mag_pos1_float:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: andps [[SIGNMASK7]](%rip), %xmm0
; CHECK-NEXT: movss [[ONE7]](%rip), %xmm1
; CHECK-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0,0,0]
@@ -130,7 +130,7 @@ define float @mag_pos1_float(float %x) nounwind {
define float @mag_neg1_float(float %x) nounwind {
; CHECK-LABEL: mag_neg1_float:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: andps [[SIGNMASK8]](%rip), %xmm0
; CHECK-NEXT: orps [[ONE8]](%rip), %xmm0
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/critical-edge-split-2.ll b/test/CodeGen/X86/critical-edge-split-2.ll
index f503d5fc790..39c713ef71b 100644
--- a/test/CodeGen/X86/critical-edge-split-2.ll
+++ b/test/CodeGen/X86/critical-edge-split-2.ll
@@ -10,11 +10,11 @@
; PR8642
define i16 @test1(i1 zeroext %C, i8** nocapture %argv) nounwind ssp {
; CHECK-LABEL: test1:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movw $1, %ax
; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: jne .LBB0_2
-; CHECK-NEXT: # BB#1: # %cond.false.i
+; CHECK-NEXT: # %bb.1: # %cond.false.i
; CHECK-NEXT: movl $g_4, %eax
; CHECK-NEXT: movl $g_2+4, %ecx
; CHECK-NEXT: xorl %esi, %esi
diff --git a/test/CodeGen/X86/ctpop-combine.ll b/test/CodeGen/X86/ctpop-combine.ll
index 955b8a14410..d09f46c164e 100644
--- a/test/CodeGen/X86/ctpop-combine.ll
+++ b/test/CodeGen/X86/ctpop-combine.ll
@@ -6,7 +6,7 @@ declare i64 @llvm.ctpop.i64(i64) nounwind readnone
define i32 @test1(i64 %x) nounwind readnone {
; CHECK-LABEL: test1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: leaq -1(%rdi), %rcx
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: testq %rcx, %rdi
@@ -22,7 +22,7 @@ define i32 @test1(i64 %x) nounwind readnone {
define i32 @test2(i64 %x) nounwind readnone {
; CHECK-LABEL: test2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: leaq -1(%rdi), %rcx
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: testq %rcx, %rdi
@@ -36,7 +36,7 @@ define i32 @test2(i64 %x) nounwind readnone {
define i32 @test3(i64 %x) nounwind readnone {
; CHECK-LABEL: test3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: popcntq %rdi, %rcx
; CHECK-NEXT: andb $63, %cl
; CHECK-NEXT: xorl %eax, %eax
@@ -52,7 +52,7 @@ define i32 @test3(i64 %x) nounwind readnone {
define i8 @test4(i8 %x) nounwind readnone {
; CHECK-LABEL: test4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: andl $127, %edi
; CHECK-NEXT: popcntl %edi, %eax
; CHECK-NEXT: # kill: %al<def> %al<kill> %eax<kill>
diff --git a/test/CodeGen/X86/cvtv2f32.ll b/test/CodeGen/X86/cvtv2f32.ll
index 297692f6bd6..556c858759f 100644
--- a/test/CodeGen/X86/cvtv2f32.ll
+++ b/test/CodeGen/X86/cvtv2f32.ll
@@ -7,7 +7,7 @@
define <2 x float> @uitofp_2i32_buildvector(i32 %x, i32 %y, <2 x float> %v) {
; X32-LABEL: uitofp_2i32_buildvector:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; X32-NEXT: movdqa {{.*#+}} xmm2 = [1258291200,1258291200,1258291200,1258291200]
; X32-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
@@ -19,7 +19,7 @@ define <2 x float> @uitofp_2i32_buildvector(i32 %x, i32 %y, <2 x float> %v) {
; X32-NEXT: retl
;
; X64-LABEL: uitofp_2i32_buildvector:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movd %edi, %xmm1
; X64-NEXT: pinsrd $1, %esi, %xmm1
; X64-NEXT: movdqa {{.*#+}} xmm2 = [1258291200,1258291200,1258291200,1258291200]
@@ -40,7 +40,7 @@ define <2 x float> @uitofp_2i32_buildvector(i32 %x, i32 %y, <2 x float> %v) {
define <2 x float> @uitofp_2i32_legalized(<2 x i32> %in, <2 x float> %v) {
; X32-LABEL: uitofp_2i32_legalized:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pxor %xmm2, %xmm2
; X32-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; X32-NEXT: movdqa {{.*#+}} xmm0 = [4.503600e+15,4.503600e+15]
@@ -51,7 +51,7 @@ define <2 x float> @uitofp_2i32_legalized(<2 x i32> %in, <2 x float> %v) {
; X32-NEXT: retl
;
; X64-LABEL: uitofp_2i32_legalized:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pxor %xmm2, %xmm2
; X64-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; X64-NEXT: movdqa {{.*#+}} xmm0 = [4.503600e+15,4.503600e+15]
diff --git a/test/CodeGen/X86/dag-fmf-cse.ll b/test/CodeGen/X86/dag-fmf-cse.ll
index c12c49d0f40..021459eb4bd 100644
--- a/test/CodeGen/X86/dag-fmf-cse.ll
+++ b/test/CodeGen/X86/dag-fmf-cse.ll
@@ -8,7 +8,7 @@
define float @fmf_should_not_break_cse(float %a, float %b) {
; CHECK-LABEL: fmf_should_not_break_cse:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmulss %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vaddss %xmm0, %xmm0, %xmm0
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/dag-merge-fast-accesses.ll b/test/CodeGen/X86/dag-merge-fast-accesses.ll
index e5dfccb278c..662f74f9754 100644
--- a/test/CodeGen/X86/dag-merge-fast-accesses.ll
+++ b/test/CodeGen/X86/dag-merge-fast-accesses.ll
@@ -7,13 +7,13 @@
define void @merge_const_vec_store(i64* %ptr) {
; FAST-LABEL: merge_const_vec_store:
-; FAST: # BB#0:
+; FAST: # %bb.0:
; FAST-NEXT: xorps %xmm0, %xmm0
; FAST-NEXT: movups %xmm0, (%rdi)
; FAST-NEXT: retq
;
; SLOW-LABEL: merge_const_vec_store:
-; SLOW: # BB#0:
+; SLOW: # %bb.0:
; SLOW-NEXT: movq $0, (%rdi)
; SLOW-NEXT: movq $0, 8(%rdi)
; SLOW-NEXT: retq
@@ -29,12 +29,12 @@ define void @merge_const_vec_store(i64* %ptr) {
define void @merge_vec_element_store(<4 x double> %v, double* %ptr) {
; FAST-LABEL: merge_vec_element_store:
-; FAST: # BB#0:
+; FAST: # %bb.0:
; FAST-NEXT: movups %xmm0, (%rdi)
; FAST-NEXT: retq
;
; SLOW-LABEL: merge_vec_element_store:
-; SLOW: # BB#0:
+; SLOW: # %bb.0:
; SLOW-NEXT: movlpd %xmm0, (%rdi)
; SLOW-NEXT: movhpd %xmm0, 8(%rdi)
; SLOW-NEXT: retq
@@ -53,13 +53,13 @@ define void @merge_vec_element_store(<4 x double> %v, double* %ptr) {
define void @merge_vec_load_and_stores(i64 *%ptr) {
; FAST-LABEL: merge_vec_load_and_stores:
-; FAST: # BB#0:
+; FAST: # %bb.0:
; FAST-NEXT: movups (%rdi), %xmm0
; FAST-NEXT: movups %xmm0, 40(%rdi)
; FAST-NEXT: retq
;
; SLOW-LABEL: merge_vec_load_and_stores:
-; SLOW: # BB#0:
+; SLOW: # %bb.0:
; SLOW-NEXT: movq (%rdi), %rax
; SLOW-NEXT: movq 8(%rdi), %rcx
; SLOW-NEXT: movq %rax, 40(%rdi)
diff --git a/test/CodeGen/X86/dagcombine-buildvector.ll b/test/CodeGen/X86/dagcombine-buildvector.ll
index 30b4040582a..59f042f7f26 100644
--- a/test/CodeGen/X86/dagcombine-buildvector.ll
+++ b/test/CodeGen/X86/dagcombine-buildvector.ll
@@ -6,7 +6,7 @@
define void @test(<2 x double>* %dst, <4 x double> %src) nounwind {
; CHECK-LABEL: test:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; CHECK-NEXT: movaps %xmm0, (%eax)
@@ -19,7 +19,7 @@ entry:
define void @test2(<4 x i16>* %src, <4 x i32>* %dest) nounwind {
; CHECK-LABEL: test2:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: pmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
diff --git a/test/CodeGen/X86/dagcombine-cse.ll b/test/CodeGen/X86/dagcombine-cse.ll
index c617a8c6cae..043dbcabcbf 100644
--- a/test/CodeGen/X86/dagcombine-cse.ll
+++ b/test/CodeGen/X86/dagcombine-cse.ll
@@ -4,7 +4,7 @@
define i32 @t(i8* %ref_frame_ptr, i32 %ref_frame_stride, i32 %idxX, i32 %idxY) nounwind {
; X32-LABEL: t:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: imull {{[0-9]+}}(%esp), %ecx
@@ -18,7 +18,7 @@ define i32 @t(i8* %ref_frame_ptr, i32 %ref_frame_stride, i32 %idxX, i32 %idxY) n
; X32-NEXT: retl
;
; X64-LABEL: t:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: ## kill: %edx<def> %edx<kill> %rdx<def>
; X64-NEXT: ## kill: %esi<def> %esi<kill> %rsi<def>
; X64-NEXT: imull %ecx, %esi
diff --git a/test/CodeGen/X86/debugloc-no-line-0.ll b/test/CodeGen/X86/debugloc-no-line-0.ll
index 65dfe577d2f..27b72caf360 100644
--- a/test/CodeGen/X86/debugloc-no-line-0.ll
+++ b/test/CodeGen/X86/debugloc-no-line-0.ll
@@ -4,7 +4,7 @@
; annotation, and that the annotation is identical to the one on e.g.,
; the jmp to bb4.
;
-; CHECK: JMP{{.*}}%bb.4.entry, debug-location ![[JUMPLOC:[0-9]+]]
+; CHECK: JMP{{.*}}%bb.4, debug-location ![[JUMPLOC:[0-9]+]]
; CHECK: bb.4.entry:
; CHECK: successors:
; CHECK: JE{{.*}}debug-location ![[JUMPLOC]]
diff --git a/test/CodeGen/X86/div-rem-simplify.ll b/test/CodeGen/X86/div-rem-simplify.ll
index 04cf439dc15..af43df00755 100644
--- a/test/CodeGen/X86/div-rem-simplify.ll
+++ b/test/CodeGen/X86/div-rem-simplify.ll
@@ -5,7 +5,7 @@
define i32 @srem0(i32 %x) {
; CHECK-LABEL: srem0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%rem = srem i32 %x, 0
ret i32 %rem
@@ -13,7 +13,7 @@ define i32 @srem0(i32 %x) {
define i32 @urem0(i32 %x) {
; CHECK-LABEL: urem0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%rem = urem i32 %x, 0
ret i32 %rem
@@ -21,7 +21,7 @@ define i32 @urem0(i32 %x) {
define i32 @sdiv0(i32 %x) {
; CHECK-LABEL: sdiv0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%div = sdiv i32 %x, 0
ret i32 %div
@@ -29,7 +29,7 @@ define i32 @sdiv0(i32 %x) {
define i32 @udiv0(i32 %x) {
; CHECK-LABEL: udiv0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%div = udiv i32 %x, 0
ret i32 %div
@@ -39,7 +39,7 @@ define i32 @udiv0(i32 %x) {
define <4 x i32> @srem_vec0(<4 x i32> %x) {
; CHECK-LABEL: srem_vec0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%rem = srem <4 x i32> %x, zeroinitializer
ret <4 x i32> %rem
@@ -47,7 +47,7 @@ define <4 x i32> @srem_vec0(<4 x i32> %x) {
define <4 x i32> @urem_vec0(<4 x i32> %x) {
; CHECK-LABEL: urem_vec0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%rem = urem <4 x i32> %x, zeroinitializer
ret <4 x i32> %rem
@@ -55,7 +55,7 @@ define <4 x i32> @urem_vec0(<4 x i32> %x) {
define <4 x i32> @sdiv_vec0(<4 x i32> %x) {
; CHECK-LABEL: sdiv_vec0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%div = sdiv <4 x i32> %x, zeroinitializer
ret <4 x i32> %div
@@ -63,7 +63,7 @@ define <4 x i32> @sdiv_vec0(<4 x i32> %x) {
define <4 x i32> @udiv_vec0(<4 x i32> %x) {
; CHECK-LABEL: udiv_vec0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%div = udiv <4 x i32> %x, zeroinitializer
ret <4 x i32> %div
@@ -74,7 +74,7 @@ define <4 x i32> @udiv_vec0(<4 x i32> %x) {
define i32 @sel_urem0(i1 %cond) {
; CHECK-LABEL: sel_urem0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%sel = select i1 %cond, i32 23, i32 234
%rem = urem i32 %sel, 0
@@ -83,7 +83,7 @@ define i32 @sel_urem0(i1 %cond) {
define i32 @sel_srem0(i1 %cond) {
; CHECK-LABEL: sel_srem0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%sel = select i1 %cond, i32 23, i32 234
%rem = srem i32 %sel, 0
@@ -92,7 +92,7 @@ define i32 @sel_srem0(i1 %cond) {
define i32 @sel_udiv0(i1 %cond) {
; CHECK-LABEL: sel_udiv0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%sel = select i1 %cond, i32 23, i32 234
%div = udiv i32 %sel, 0
@@ -101,7 +101,7 @@ define i32 @sel_udiv0(i1 %cond) {
define i32 @sel_sdiv0(i1 %cond) {
; CHECK-LABEL: sel_sdiv0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%sel = select i1 %cond, i32 23, i32 234
%div = sdiv i32 %sel, 0
@@ -113,7 +113,7 @@ define i32 @sel_sdiv0(i1 %cond) {
define <4 x i32> @sel_urem0_vec(i1 %cond) {
; CHECK-LABEL: sel_urem0_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%sel = select i1 %cond, <4 x i32> <i32 -1, i32 0, i32 1, i32 2>, <4 x i32> <i32 11, i32 12, i32 13, i32 14>
%rem = urem <4 x i32> %sel, zeroinitializer
@@ -122,7 +122,7 @@ define <4 x i32> @sel_urem0_vec(i1 %cond) {
define <4 x i32> @sel_srem0_vec(i1 %cond) {
; CHECK-LABEL: sel_srem0_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%sel = select i1 %cond, <4 x i32> <i32 -1, i32 0, i32 1, i32 2>, <4 x i32> <i32 11, i32 12, i32 13, i32 14>
%rem = srem <4 x i32> %sel, zeroinitializer
@@ -131,7 +131,7 @@ define <4 x i32> @sel_srem0_vec(i1 %cond) {
define <4 x i32> @sel_udiv0_vec(i1 %cond) {
; CHECK-LABEL: sel_udiv0_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%sel = select i1 %cond, <4 x i32> <i32 -1, i32 0, i32 1, i32 2>, <4 x i32> <i32 11, i32 12, i32 13, i32 14>
%div = udiv <4 x i32> %sel, zeroinitializer
@@ -140,7 +140,7 @@ define <4 x i32> @sel_udiv0_vec(i1 %cond) {
define <4 x i32> @sel_sdiv0_vec(i1 %cond) {
; CHECK-LABEL: sel_sdiv0_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%sel = select i1 %cond, <4 x i32> <i32 -1, i32 0, i32 1, i32 2>, <4 x i32> <i32 11, i32 12, i32 13, i32 14>
%div = sdiv <4 x i32> %sel, zeroinitializer
@@ -151,7 +151,7 @@ define <4 x i32> @sel_sdiv0_vec(i1 %cond) {
define <4 x i32> @sdiv0elt_vec(<4 x i32> %x) {
; CHECK-LABEL: sdiv0elt_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%zero = and <4 x i32> %x, <i32 0, i32 0, i32 0, i32 0>
%some_ones = or <4 x i32> %zero, <i32 0, i32 -1, i32 0, i32 3>
@@ -161,7 +161,7 @@ define <4 x i32> @sdiv0elt_vec(<4 x i32> %x) {
define <4 x i32> @udiv0elt_vec(<4 x i32> %x) {
; CHECK-LABEL: udiv0elt_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%div = udiv <4 x i32> <i32 11, i32 12, i32 13, i32 14>, <i32 0, i32 3, i32 4, i32 0>
ret <4 x i32> %div
@@ -169,7 +169,7 @@ define <4 x i32> @udiv0elt_vec(<4 x i32> %x) {
define <4 x i32> @urem0elt_vec(<4 x i32> %x) {
; CHECK-LABEL: urem0elt_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%zero = and <4 x i32> %x, <i32 0, i32 0, i32 0, i32 0>
%some_ones = or <4 x i32> %zero, <i32 0, i32 0, i32 0, i32 3>
@@ -179,7 +179,7 @@ define <4 x i32> @urem0elt_vec(<4 x i32> %x) {
define <4 x i32> @srem0elt_vec(<4 x i32> %x) {
; CHECK-LABEL: srem0elt_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%rem = srem <4 x i32> <i32 -11, i32 -12, i32 -13, i32 -14>, <i32 -3, i32 -3, i32 0, i32 2>
ret <4 x i32> %rem
diff --git a/test/CodeGen/X86/divide-by-constant.ll b/test/CodeGen/X86/divide-by-constant.ll
index cb95da1b87e..3121f6b2cb0 100644
--- a/test/CodeGen/X86/divide-by-constant.ll
+++ b/test/CodeGen/X86/divide-by-constant.ll
@@ -4,7 +4,7 @@
define zeroext i16 @test1(i16 zeroext %x) nounwind {
; X32-LABEL: test1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: imull $63551, %eax, %eax # imm = 0xF83F
; X32-NEXT: shrl $21, %eax
@@ -12,7 +12,7 @@ define zeroext i16 @test1(i16 zeroext %x) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: imull $63551, %edi, %eax # imm = 0xF83F
; X64-NEXT: shrl $21, %eax
; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -24,7 +24,7 @@ entry:
define zeroext i16 @test2(i8 signext %x, i16 zeroext %c) nounwind readnone ssp noredzone {
; X32-LABEL: test2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: imull $43691, %eax, %eax # imm = 0xAAAB
; X32-NEXT: shrl $17, %eax
@@ -32,7 +32,7 @@ define zeroext i16 @test2(i8 signext %x, i16 zeroext %c) nounwind readnone ssp n
; X32-NEXT: retl
;
; X64-LABEL: test2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: imull $43691, %esi, %eax # imm = 0xAAAB
; X64-NEXT: shrl $17, %eax
; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -45,7 +45,7 @@ entry:
define zeroext i8 @test3(i8 zeroext %x, i8 zeroext %c) nounwind readnone ssp noredzone {
; X32-LABEL: test3:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: imull $171, %eax, %eax
; X32-NEXT: shrl $9, %eax
@@ -54,7 +54,7 @@ define zeroext i8 @test3(i8 zeroext %x, i8 zeroext %c) nounwind readnone ssp nor
; X32-NEXT: retl
;
; X64-LABEL: test3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: imull $171, %esi, %eax
; X64-NEXT: shrl $9, %eax
; X64-NEXT: movzwl %ax, %eax
@@ -67,7 +67,7 @@ entry:
define signext i16 @test4(i16 signext %x) nounwind {
; X32-LABEL: test4:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movswl {{[0-9]+}}(%esp), %eax
; X32-NEXT: imull $1986, %eax, %eax # imm = 0x7C2
; X32-NEXT: movl %eax, %ecx
@@ -78,7 +78,7 @@ define signext i16 @test4(i16 signext %x) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test4:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: imull $1986, %edi, %eax # imm = 0x7C2
; X64-NEXT: movl %eax, %ecx
; X64-NEXT: shrl $31, %ecx
@@ -93,7 +93,7 @@ entry:
define i32 @test5(i32 %A) nounwind {
; X32-LABEL: test5:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl $365384439, %eax # imm = 0x15C752F7
; X32-NEXT: mull {{[0-9]+}}(%esp)
; X32-NEXT: shrl $27, %edx
@@ -101,7 +101,7 @@ define i32 @test5(i32 %A) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test5:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: imulq $365384439, %rax, %rax # imm = 0x15C752F7
; X64-NEXT: shrq $59, %rax
@@ -113,7 +113,7 @@ define i32 @test5(i32 %A) nounwind {
define signext i16 @test6(i16 signext %x) nounwind {
; X32-LABEL: test6:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movswl {{[0-9]+}}(%esp), %eax
; X32-NEXT: imull $26215, %eax, %eax # imm = 0x6667
; X32-NEXT: movl %eax, %ecx
@@ -124,7 +124,7 @@ define signext i16 @test6(i16 signext %x) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test6:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: imull $26215, %edi, %eax # imm = 0x6667
; X64-NEXT: movl %eax, %ecx
; X64-NEXT: shrl $31, %ecx
@@ -139,7 +139,7 @@ entry:
define i32 @test7(i32 %x) nounwind {
; X32-LABEL: test7:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: shrl $2, %eax
; X32-NEXT: movl $613566757, %ecx # imm = 0x24924925
@@ -148,7 +148,7 @@ define i32 @test7(i32 %x) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test7:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: shrl $2, %edi
; X64-NEXT: imulq $613566757, %rdi, %rax # imm = 0x24924925
@@ -162,7 +162,7 @@ define i32 @test7(i32 %x) nounwind {
; PR13326
define i8 @test8(i8 %x) nounwind {
; X32-LABEL: test8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: shrb %al
; X32-NEXT: movzbl %al, %eax
@@ -173,7 +173,7 @@ define i8 @test8(i8 %x) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: shrb %dil
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: imull $211, %eax, %eax
@@ -187,7 +187,7 @@ define i8 @test8(i8 %x) nounwind {
define i8 @test9(i8 %x) nounwind {
; X32-LABEL: test9:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: shrb $2, %al
; X32-NEXT: movzbl %al, %eax
@@ -198,7 +198,7 @@ define i8 @test9(i8 %x) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test9:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: shrb $2, %dil
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: imull $71, %eax, %eax
@@ -212,7 +212,7 @@ define i8 @test9(i8 %x) nounwind {
define i32 @testsize1(i32 %x) minsize nounwind {
; X32-LABEL: testsize1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: pushl $32
; X32-NEXT: popl %ecx
@@ -221,7 +221,7 @@ define i32 @testsize1(i32 %x) minsize nounwind {
; X32-NEXT: retl
;
; X64-LABEL: testsize1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: pushq $32
; X64-NEXT: popq %rcx
; X64-NEXT: movl %edi, %eax
@@ -235,7 +235,7 @@ entry:
define i32 @testsize2(i32 %x) minsize nounwind {
; X32-LABEL: testsize2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: pushl $33
; X32-NEXT: popl %ecx
@@ -244,7 +244,7 @@ define i32 @testsize2(i32 %x) minsize nounwind {
; X32-NEXT: retl
;
; X64-LABEL: testsize2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: pushq $33
; X64-NEXT: popq %rcx
; X64-NEXT: movl %edi, %eax
@@ -258,13 +258,13 @@ entry:
define i32 @testsize3(i32 %x) minsize nounwind {
; X32-LABEL: testsize3:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: shrl $5, %eax
; X32-NEXT: retl
;
; X64-LABEL: testsize3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: shrl $5, %edi
; X64-NEXT: movl %edi, %eax
; X64-NEXT: retq
@@ -275,7 +275,7 @@ entry:
define i32 @testsize4(i32 %x) minsize nounwind {
; X32-LABEL: testsize4:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: pushl $33
; X32-NEXT: popl %ecx
@@ -284,7 +284,7 @@ define i32 @testsize4(i32 %x) minsize nounwind {
; X32-NEXT: retl
;
; X64-LABEL: testsize4:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: pushq $33
; X64-NEXT: popq %rcx
; X64-NEXT: xorl %edx, %edx
@@ -298,7 +298,7 @@ entry:
define i64 @PR23590(i64 %x) nounwind {
; X32-LABEL: PR23590:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: subl $12, %esp
; X32-NEXT: pushl $0
; X32-NEXT: pushl $12345 # imm = 0x3039
@@ -315,7 +315,7 @@ define i64 @PR23590(i64 %x) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: PR23590:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq %rdi, %rcx
; X64-NEXT: movabsq $6120523590596543007, %rdx # imm = 0x54F077C718E7C21F
; X64-NEXT: movq %rcx, %rax
diff --git a/test/CodeGen/X86/divrem.ll b/test/CodeGen/X86/divrem.ll
index cbd6f7adae7..a43d3e51745 100644
--- a/test/CodeGen/X86/divrem.ll
+++ b/test/CodeGen/X86/divrem.ll
@@ -4,7 +4,7 @@
define void @si64(i64 %x, i64 %y, i64* %p, i64* %q) nounwind {
; X32-LABEL: si64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebp
; X32-NEXT: pushl %ebx
; X32-NEXT: pushl %edi
@@ -38,7 +38,7 @@ define void @si64(i64 %x, i64 %y, i64* %p, i64* %q) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: si64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq %rdx, %r8
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: cqto
@@ -55,7 +55,7 @@ define void @si64(i64 %x, i64 %y, i64* %p, i64* %q) nounwind {
define void @si32(i32 %x, i32 %y, i32* %p, i32* %q) nounwind {
; X32-LABEL: si32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %esi
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
@@ -68,7 +68,7 @@ define void @si32(i32 %x, i32 %y, i32* %p, i32* %q) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: si32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq %rdx, %r8
; X64-NEXT: movl %edi, %eax
; X64-NEXT: cltd
@@ -85,7 +85,7 @@ define void @si32(i32 %x, i32 %y, i32* %p, i32* %q) nounwind {
define void @si16(i16 %x, i16 %y, i16* %p, i16* %q) nounwind {
; X32-LABEL: si16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %esi
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
@@ -98,7 +98,7 @@ define void @si16(i16 %x, i16 %y, i16* %p, i16* %q) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: si16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq %rdx, %r8
; X64-NEXT: movl %edi, %eax
; X64-NEXT: cwtd
@@ -115,7 +115,7 @@ define void @si16(i16 %x, i16 %y, i16* %p, i16* %q) nounwind {
define void @si8(i8 %x, i8 %y, i8* %p, i8* %q) nounwind {
; X32-LABEL: si8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebx
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -129,7 +129,7 @@ define void @si8(i8 %x, i8 %y, i8* %p, i8* %q) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: si8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: cbtw
; X64-NEXT: idivb %sil
@@ -146,7 +146,7 @@ define void @si8(i8 %x, i8 %y, i8* %p, i8* %q) nounwind {
define void @ui64(i64 %x, i64 %y, i64* %p, i64* %q) nounwind {
; X32-LABEL: ui64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebp
; X32-NEXT: pushl %ebx
; X32-NEXT: pushl %edi
@@ -180,7 +180,7 @@ define void @ui64(i64 %x, i64 %y, i64* %p, i64* %q) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: ui64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq %rdx, %r8
; X64-NEXT: xorl %edx, %edx
; X64-NEXT: movq %rdi, %rax
@@ -197,7 +197,7 @@ define void @ui64(i64 %x, i64 %y, i64* %p, i64* %q) nounwind {
define void @ui32(i32 %x, i32 %y, i32* %p, i32* %q) nounwind {
; X32-LABEL: ui32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %esi
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
@@ -210,7 +210,7 @@ define void @ui32(i32 %x, i32 %y, i32* %p, i32* %q) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: ui32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq %rdx, %r8
; X64-NEXT: xorl %edx, %edx
; X64-NEXT: movl %edi, %eax
@@ -227,7 +227,7 @@ define void @ui32(i32 %x, i32 %y, i32* %p, i32* %q) nounwind {
define void @ui16(i16 %x, i16 %y, i16* %p, i16* %q) nounwind {
; X32-LABEL: ui16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %esi
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
@@ -240,7 +240,7 @@ define void @ui16(i16 %x, i16 %y, i16* %p, i16* %q) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: ui16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq %rdx, %r8
; X64-NEXT: xorl %edx, %edx
; X64-NEXT: movl %edi, %eax
@@ -257,7 +257,7 @@ define void @ui16(i16 %x, i16 %y, i16* %p, i16* %q) nounwind {
define void @ui8(i8 %x, i8 %y, i8* %p, i8* %q) nounwind {
; X32-LABEL: ui8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebx
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -271,7 +271,7 @@ define void @ui8(i8 %x, i8 %y, i8* %p, i8* %q) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: ui8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
; X64-NEXT: divb %sil
diff --git a/test/CodeGen/X86/divrem8_ext.ll b/test/CodeGen/X86/divrem8_ext.ll
index 70a5ca83da2..eaa22c1a77c 100644
--- a/test/CodeGen/X86/divrem8_ext.ll
+++ b/test/CodeGen/X86/divrem8_ext.ll
@@ -4,7 +4,7 @@
define zeroext i8 @test_udivrem_zext_ah(i8 %x, i8 %y) {
; X32-LABEL: test_udivrem_zext_ah:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
; X32-NEXT: divb {{[0-9]+}}(%esp)
@@ -14,7 +14,7 @@ define zeroext i8 @test_udivrem_zext_ah(i8 %x, i8 %y) {
; X32-NEXT: retl
;
; X64-LABEL: test_udivrem_zext_ah:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
; X64-NEXT: divb %sil
@@ -30,7 +30,7 @@ define zeroext i8 @test_udivrem_zext_ah(i8 %x, i8 %y) {
define zeroext i8 @test_urem_zext_ah(i8 %x, i8 %y) {
; X32-LABEL: test_urem_zext_ah:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
; X32-NEXT: divb {{[0-9]+}}(%esp)
@@ -39,7 +39,7 @@ define zeroext i8 @test_urem_zext_ah(i8 %x, i8 %y) {
; X32-NEXT: retl
;
; X64-LABEL: test_urem_zext_ah:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
; X64-NEXT: divb %sil
@@ -52,7 +52,7 @@ define zeroext i8 @test_urem_zext_ah(i8 %x, i8 %y) {
define i8 @test_urem_noext_ah(i8 %x, i8 %y) {
; X32-LABEL: test_urem_noext_ah:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %cl
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
@@ -63,7 +63,7 @@ define i8 @test_urem_noext_ah(i8 %x, i8 %y) {
; X32-NEXT: retl
;
; X64-LABEL: test_urem_noext_ah:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
; X64-NEXT: divb %sil
@@ -78,7 +78,7 @@ define i8 @test_urem_noext_ah(i8 %x, i8 %y) {
define i64 @test_urem_zext64_ah(i8 %x, i8 %y) {
; X32-LABEL: test_urem_zext64_ah:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
; X32-NEXT: divb {{[0-9]+}}(%esp)
@@ -87,7 +87,7 @@ define i64 @test_urem_zext64_ah(i8 %x, i8 %y) {
; X32-NEXT: retl
;
; X64-LABEL: test_urem_zext64_ah:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
; X64-NEXT: divb %sil
@@ -100,7 +100,7 @@ define i64 @test_urem_zext64_ah(i8 %x, i8 %y) {
define signext i8 @test_sdivrem_sext_ah(i8 %x, i8 %y) {
; X32-LABEL: test_sdivrem_sext_ah:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: cbtw
; X32-NEXT: idivb {{[0-9]+}}(%esp)
@@ -110,7 +110,7 @@ define signext i8 @test_sdivrem_sext_ah(i8 %x, i8 %y) {
; X32-NEXT: retl
;
; X64-LABEL: test_sdivrem_sext_ah:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: cbtw
; X64-NEXT: idivb %sil
@@ -126,7 +126,7 @@ define signext i8 @test_sdivrem_sext_ah(i8 %x, i8 %y) {
define signext i8 @test_srem_sext_ah(i8 %x, i8 %y) {
; X32-LABEL: test_srem_sext_ah:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: cbtw
; X32-NEXT: idivb {{[0-9]+}}(%esp)
@@ -135,7 +135,7 @@ define signext i8 @test_srem_sext_ah(i8 %x, i8 %y) {
; X32-NEXT: retl
;
; X64-LABEL: test_srem_sext_ah:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: cbtw
; X64-NEXT: idivb %sil
@@ -148,7 +148,7 @@ define signext i8 @test_srem_sext_ah(i8 %x, i8 %y) {
define i8 @test_srem_noext_ah(i8 %x, i8 %y) {
; X32-LABEL: test_srem_noext_ah:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: movb {{[0-9]+}}(%esp), %cl
; X32-NEXT: cbtw
@@ -159,7 +159,7 @@ define i8 @test_srem_noext_ah(i8 %x, i8 %y) {
; X32-NEXT: retl
;
; X64-LABEL: test_srem_noext_ah:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: cbtw
; X64-NEXT: idivb %sil
@@ -174,7 +174,7 @@ define i8 @test_srem_noext_ah(i8 %x, i8 %y) {
define i64 @test_srem_sext64_ah(i8 %x, i8 %y) {
; X32-LABEL: test_srem_sext64_ah:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: cbtw
; X32-NEXT: idivb {{[0-9]+}}(%esp)
@@ -184,7 +184,7 @@ define i64 @test_srem_sext64_ah(i8 %x, i8 %y) {
; X32-NEXT: retl
;
; X64-LABEL: test_srem_sext64_ah:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: cbtw
; X64-NEXT: idivb %sil
@@ -198,7 +198,7 @@ define i64 @test_srem_sext64_ah(i8 %x, i8 %y) {
define i64 @pr25754(i8 %a, i8 %c) {
; X32-LABEL: pr25754:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
; X32-NEXT: divb {{[0-9]+}}(%esp)
@@ -209,7 +209,7 @@ define i64 @pr25754(i8 %a, i8 %c) {
; X32-NEXT: retl
;
; X64-LABEL: pr25754:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
; X64-NEXT: divb %sil
diff --git a/test/CodeGen/X86/domain-reassignment.mir b/test/CodeGen/X86/domain-reassignment.mir
index 98ac2fd495f..3cb4b5dd139 100644
--- a/test/CodeGen/X86/domain-reassignment.mir
+++ b/test/CodeGen/X86/domain-reassignment.mir
@@ -110,7 +110,7 @@ stack:
constants:
body: |
bb.0.entry:
- successors: %bb.1.if(0x40000000), %bb.2.else(0x40000000)
+ successors: %bb.1(0x40000000), %bb.2(0x40000000)
liveins: %edi, %rsi, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5
%10 = COPY %xmm5
@@ -123,11 +123,11 @@ body: |
%3 = COPY %edi
%11 = COPY %3.sub_8bit
TEST8ri killed %11, 1, implicit-def %eflags
- JE_1 %bb.2.else, implicit %eflags
- JMP_1 %bb.1.if
+ JE_1 %bb.2, implicit %eflags
+ JMP_1 %bb.1
bb.1.if:
- successors: %bb.3.exit(0x80000000)
+ successors: %bb.3(0x80000000)
%14 = VCMPSSZrr %7, %8, 0
@@ -137,10 +137,10 @@ body: |
%15 = COPY %14
%0 = COPY %15.sub_8bit
- JMP_1 %bb.3.exit
+ JMP_1 %bb.3
bb.2.else:
- successors: %bb.3.exit(0x80000000)
+ successors: %bb.3(0x80000000)
%12 = VCMPSSZrr %9, %10, 0
; check that cross domain copies are replaced with same domain copies.
@@ -153,11 +153,11 @@ body: |
bb.3.exit:
; check PHI, IMPLICIT_DEF, and INSERT_SUBREG replacers.
- ; CHECK: %2:vk8 = PHI %1, %bb.2.else, %0, %bb.1.if
+ ; CHECK: %2:vk8 = PHI %1, %bb.2, %0, %bb.1
; CHECK: %16:vk32 = COPY %2
; CHECK: %18:vk1wm = COPY %16
- %2 = PHI %1, %bb.2.else, %0, %bb.1.if
+ %2 = PHI %1, %bb.2, %0, %bb.1
%17 = IMPLICIT_DEF
%16 = INSERT_SUBREG %17, %2, 1
%18 = COPY %16
diff --git a/test/CodeGen/X86/exedeps-movq.ll b/test/CodeGen/X86/exedeps-movq.ll
index c1c60981edf..cc56be672db 100644
--- a/test/CodeGen/X86/exedeps-movq.ll
+++ b/test/CodeGen/X86/exedeps-movq.ll
@@ -12,13 +12,13 @@
define void @store_floats(<4 x float> %x, i64* %p) {
; SSE-LABEL: store_floats:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addps %xmm0, %xmm0
; SSE-NEXT: movlps %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: store_floats:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddps %xmm0, %xmm0, %xmm0
; AVX-NEXT: vmovlps %xmm0, (%rdi)
; AVX-NEXT: retq
@@ -31,13 +31,13 @@ define void @store_floats(<4 x float> %x, i64* %p) {
define void @store_double(<2 x double> %x, i64* %p) {
; SSE-LABEL: store_double:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addpd %xmm0, %xmm0
; SSE-NEXT: movlpd %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: store_double:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddpd %xmm0, %xmm0, %xmm0
; AVX-NEXT: vmovlpd %xmm0, (%rdi)
; AVX-NEXT: retq
@@ -50,13 +50,13 @@ define void @store_double(<2 x double> %x, i64* %p) {
define void @store_int(<4 x i32> %x, <2 x float>* %p) {
; SSE-LABEL: store_int:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: paddd %xmm0, %xmm0
; SSE-NEXT: movq %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: store_int:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpaddd %xmm0, %xmm0, %xmm0
; AVX-NEXT: vmovq %xmm0, (%rdi)
; AVX-NEXT: retq
@@ -69,13 +69,13 @@ define void @store_int(<4 x i32> %x, <2 x float>* %p) {
define void @store_h_double(<2 x double> %x, i64* %p) {
; SSE-LABEL: store_h_double:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addpd %xmm0, %xmm0
; SSE-NEXT: movhpd %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: store_h_double:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddpd %xmm0, %xmm0, %xmm0
; AVX-NEXT: vmovhpd %xmm0, (%rdi)
; AVX-NEXT: retq
diff --git a/test/CodeGen/X86/exedepsfix-broadcast.ll b/test/CodeGen/X86/exedepsfix-broadcast.ll
index e67bb0f9b7a..2fcbdd39f4a 100644
--- a/test/CodeGen/X86/exedepsfix-broadcast.ll
+++ b/test/CodeGen/X86/exedepsfix-broadcast.ll
@@ -6,7 +6,7 @@
define <4 x float> @ExeDepsFix_broadcastss(<4 x float> %arg, <4 x float> %arg2) {
; CHECK-LABEL: ExeDepsFix_broadcastss:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vbroadcastss {{.*}}(%rip), %xmm2
; CHECK-NEXT: vandps %xmm2, %xmm0, %xmm0
; CHECK-NEXT: vmaxps %xmm1, %xmm0, %xmm0
@@ -21,7 +21,7 @@ define <4 x float> @ExeDepsFix_broadcastss(<4 x float> %arg, <4 x float> %arg2)
define <8 x float> @ExeDepsFix_broadcastss256(<8 x float> %arg, <8 x float> %arg2) {
; CHECK-LABEL: ExeDepsFix_broadcastss256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vbroadcastss {{.*}}(%rip), %ymm2
; CHECK-NEXT: vandps %ymm2, %ymm0, %ymm0
; CHECK-NEXT: vmaxps %ymm1, %ymm0, %ymm0
@@ -36,7 +36,7 @@ define <8 x float> @ExeDepsFix_broadcastss256(<8 x float> %arg, <8 x float> %arg
define <4 x float> @ExeDepsFix_broadcastss_inreg(<4 x float> %arg, <4 x float> %arg2, i32 %broadcastvalue) {
; CHECK-LABEL: ExeDepsFix_broadcastss_inreg:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovd %edi, %xmm2
; CHECK-NEXT: vpbroadcastd %xmm2, %xmm2
; CHECK-NEXT: vpand %xmm2, %xmm0, %xmm0
@@ -54,7 +54,7 @@ define <4 x float> @ExeDepsFix_broadcastss_inreg(<4 x float> %arg, <4 x float> %
define <8 x float> @ExeDepsFix_broadcastss256_inreg(<8 x float> %arg, <8 x float> %arg2, i32 %broadcastvalue) {
; CHECK-LABEL: ExeDepsFix_broadcastss256_inreg:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovd %edi, %xmm2
; CHECK-NEXT: vpbroadcastd %xmm2, %ymm2
; CHECK-NEXT: vpand %ymm2, %ymm0, %ymm0
@@ -73,7 +73,7 @@ define <8 x float> @ExeDepsFix_broadcastss256_inreg(<8 x float> %arg, <8 x float
; In that case the broadcast is directly folded into vandpd.
define <2 x double> @ExeDepsFix_broadcastsd(<2 x double> %arg, <2 x double> %arg2) {
; CHECK-LABEL: ExeDepsFix_broadcastsd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vandpd {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: vmaxpd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
@@ -87,7 +87,7 @@ define <2 x double> @ExeDepsFix_broadcastsd(<2 x double> %arg, <2 x double> %arg
define <4 x double> @ExeDepsFix_broadcastsd256(<4 x double> %arg, <4 x double> %arg2) {
; CHECK-LABEL: ExeDepsFix_broadcastsd256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vbroadcastsd {{.*}}(%rip), %ymm2
; CHECK-NEXT: vandpd %ymm2, %ymm0, %ymm0
; CHECK-NEXT: vmaxpd %ymm1, %ymm0, %ymm0
@@ -104,7 +104,7 @@ define <4 x double> @ExeDepsFix_broadcastsd256(<4 x double> %arg, <4 x double> %
; vpand and there is nothing more you can do to match vmaxpd.
define <2 x double> @ExeDepsFix_broadcastsd_inreg(<2 x double> %arg, <2 x double> %arg2, i64 %broadcastvalue) {
; CHECK-LABEL: ExeDepsFix_broadcastsd_inreg:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovq %rdi, %xmm2
; CHECK-NEXT: vpbroadcastq %xmm2, %xmm2
; CHECK-NEXT: vpand %xmm2, %xmm0, %xmm0
@@ -122,7 +122,7 @@ define <2 x double> @ExeDepsFix_broadcastsd_inreg(<2 x double> %arg, <2 x double
define <4 x double> @ExeDepsFix_broadcastsd256_inreg(<4 x double> %arg, <4 x double> %arg2, i64 %broadcastvalue) {
; CHECK-LABEL: ExeDepsFix_broadcastsd256_inreg:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmovq %rdi, %xmm2
; CHECK-NEXT: vpbroadcastq %xmm2, %ymm2
; CHECK-NEXT: vpand %ymm2, %ymm0, %ymm0
diff --git a/test/CodeGen/X86/extract-store.ll b/test/CodeGen/X86/extract-store.ll
index 225d2e9a107..0601c773fa5 100644
--- a/test/CodeGen/X86/extract-store.ll
+++ b/test/CodeGen/X86/extract-store.ll
@@ -10,42 +10,42 @@
define void @extract_i8_0(i8* nocapture %dst, <16 x i8> %foo) nounwind {
; SSE2-X32-LABEL: extract_i8_0:
-; SSE2-X32: # BB#0:
+; SSE2-X32: # %bb.0:
; SSE2-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE2-X32-NEXT: movd %xmm0, %ecx
; SSE2-X32-NEXT: movb %cl, (%eax)
; SSE2-X32-NEXT: retl
;
; SSE2-X64-LABEL: extract_i8_0:
-; SSE2-X64: # BB#0:
+; SSE2-X64: # %bb.0:
; SSE2-X64-NEXT: movd %xmm0, %eax
; SSE2-X64-NEXT: movb %al, (%rdi)
; SSE2-X64-NEXT: retq
;
; SSE41-X32-LABEL: extract_i8_0:
-; SSE41-X32: # BB#0:
+; SSE41-X32: # %bb.0:
; SSE41-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE41-X32-NEXT: pextrb $0, %xmm0, (%eax)
; SSE41-X32-NEXT: retl
;
; SSE41-X64-LABEL: extract_i8_0:
-; SSE41-X64: # BB#0:
+; SSE41-X64: # %bb.0:
; SSE41-X64-NEXT: pextrb $0, %xmm0, (%rdi)
; SSE41-X64-NEXT: retq
;
; AVX-X32-LABEL: extract_i8_0:
-; AVX-X32: # BB#0:
+; AVX-X32: # %bb.0:
; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-X32-NEXT: vpextrb $0, %xmm0, (%eax)
; AVX-X32-NEXT: retl
;
; AVX-X64-LABEL: extract_i8_0:
-; AVX-X64: # BB#0:
+; AVX-X64: # %bb.0:
; AVX-X64-NEXT: vpextrb $0, %xmm0, (%rdi)
; AVX-X64-NEXT: retq
;
; SSE-F128-LABEL: extract_i8_0:
-; SSE-F128: # BB#0:
+; SSE-F128: # %bb.0:
; SSE-F128-NEXT: movd %xmm0, %eax
; SSE-F128-NEXT: movb %al, (%rdi)
; SSE-F128-NEXT: retq
@@ -56,7 +56,7 @@ define void @extract_i8_0(i8* nocapture %dst, <16 x i8> %foo) nounwind {
define void @extract_i8_3(i8* nocapture %dst, <16 x i8> %foo) nounwind {
; SSE2-X32-LABEL: extract_i8_3:
-; SSE2-X32: # BB#0:
+; SSE2-X32: # %bb.0:
; SSE2-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE2-X32-NEXT: movd %xmm0, %ecx
; SSE2-X32-NEXT: shrl $24, %ecx
@@ -64,36 +64,36 @@ define void @extract_i8_3(i8* nocapture %dst, <16 x i8> %foo) nounwind {
; SSE2-X32-NEXT: retl
;
; SSE2-X64-LABEL: extract_i8_3:
-; SSE2-X64: # BB#0:
+; SSE2-X64: # %bb.0:
; SSE2-X64-NEXT: movd %xmm0, %eax
; SSE2-X64-NEXT: shrl $24, %eax
; SSE2-X64-NEXT: movb %al, (%rdi)
; SSE2-X64-NEXT: retq
;
; SSE41-X32-LABEL: extract_i8_3:
-; SSE41-X32: # BB#0:
+; SSE41-X32: # %bb.0:
; SSE41-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE41-X32-NEXT: pextrb $3, %xmm0, (%eax)
; SSE41-X32-NEXT: retl
;
; SSE41-X64-LABEL: extract_i8_3:
-; SSE41-X64: # BB#0:
+; SSE41-X64: # %bb.0:
; SSE41-X64-NEXT: pextrb $3, %xmm0, (%rdi)
; SSE41-X64-NEXT: retq
;
; AVX-X32-LABEL: extract_i8_3:
-; AVX-X32: # BB#0:
+; AVX-X32: # %bb.0:
; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-X32-NEXT: vpextrb $3, %xmm0, (%eax)
; AVX-X32-NEXT: retl
;
; AVX-X64-LABEL: extract_i8_3:
-; AVX-X64: # BB#0:
+; AVX-X64: # %bb.0:
; AVX-X64-NEXT: vpextrb $3, %xmm0, (%rdi)
; AVX-X64-NEXT: retq
;
; SSE-F128-LABEL: extract_i8_3:
-; SSE-F128: # BB#0:
+; SSE-F128: # %bb.0:
; SSE-F128-NEXT: movd %xmm0, %eax
; SSE-F128-NEXT: shrl $24, %eax
; SSE-F128-NEXT: movb %al, (%rdi)
@@ -105,42 +105,42 @@ define void @extract_i8_3(i8* nocapture %dst, <16 x i8> %foo) nounwind {
define void @extract_i8_15(i8* nocapture %dst, <16 x i8> %foo) nounwind {
; SSE2-X32-LABEL: extract_i8_15:
-; SSE2-X32: # BB#0:
+; SSE2-X32: # %bb.0:
; SSE2-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE2-X32-NEXT: pextrw $7, %xmm0, %ecx
; SSE2-X32-NEXT: movb %ch, (%eax)
; SSE2-X32-NEXT: retl
;
; SSE2-X64-LABEL: extract_i8_15:
-; SSE2-X64: # BB#0:
+; SSE2-X64: # %bb.0:
; SSE2-X64-NEXT: pextrw $7, %xmm0, %eax
; SSE2-X64-NEXT: movb %ah, (%rdi) # NOREX
; SSE2-X64-NEXT: retq
;
; SSE41-X32-LABEL: extract_i8_15:
-; SSE41-X32: # BB#0:
+; SSE41-X32: # %bb.0:
; SSE41-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE41-X32-NEXT: pextrb $15, %xmm0, (%eax)
; SSE41-X32-NEXT: retl
;
; SSE41-X64-LABEL: extract_i8_15:
-; SSE41-X64: # BB#0:
+; SSE41-X64: # %bb.0:
; SSE41-X64-NEXT: pextrb $15, %xmm0, (%rdi)
; SSE41-X64-NEXT: retq
;
; AVX-X32-LABEL: extract_i8_15:
-; AVX-X32: # BB#0:
+; AVX-X32: # %bb.0:
; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-X32-NEXT: vpextrb $15, %xmm0, (%eax)
; AVX-X32-NEXT: retl
;
; AVX-X64-LABEL: extract_i8_15:
-; AVX-X64: # BB#0:
+; AVX-X64: # %bb.0:
; AVX-X64-NEXT: vpextrb $15, %xmm0, (%rdi)
; AVX-X64-NEXT: retq
;
; SSE-F128-LABEL: extract_i8_15:
-; SSE-F128: # BB#0:
+; SSE-F128: # %bb.0:
; SSE-F128-NEXT: pextrw $7, %xmm0, %eax
; SSE-F128-NEXT: movb %ah, (%rdi) # NOREX
; SSE-F128-NEXT: retq
@@ -151,42 +151,42 @@ define void @extract_i8_15(i8* nocapture %dst, <16 x i8> %foo) nounwind {
define void @extract_i16_0(i16* nocapture %dst, <8 x i16> %foo) nounwind {
; SSE2-X32-LABEL: extract_i16_0:
-; SSE2-X32: # BB#0:
+; SSE2-X32: # %bb.0:
; SSE2-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE2-X32-NEXT: movd %xmm0, %ecx
; SSE2-X32-NEXT: movw %cx, (%eax)
; SSE2-X32-NEXT: retl
;
; SSE2-X64-LABEL: extract_i16_0:
-; SSE2-X64: # BB#0:
+; SSE2-X64: # %bb.0:
; SSE2-X64-NEXT: movd %xmm0, %eax
; SSE2-X64-NEXT: movw %ax, (%rdi)
; SSE2-X64-NEXT: retq
;
; SSE41-X32-LABEL: extract_i16_0:
-; SSE41-X32: # BB#0:
+; SSE41-X32: # %bb.0:
; SSE41-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE41-X32-NEXT: pextrw $0, %xmm0, (%eax)
; SSE41-X32-NEXT: retl
;
; SSE41-X64-LABEL: extract_i16_0:
-; SSE41-X64: # BB#0:
+; SSE41-X64: # %bb.0:
; SSE41-X64-NEXT: pextrw $0, %xmm0, (%rdi)
; SSE41-X64-NEXT: retq
;
; AVX-X32-LABEL: extract_i16_0:
-; AVX-X32: # BB#0:
+; AVX-X32: # %bb.0:
; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-X32-NEXT: vpextrw $0, %xmm0, (%eax)
; AVX-X32-NEXT: retl
;
; AVX-X64-LABEL: extract_i16_0:
-; AVX-X64: # BB#0:
+; AVX-X64: # %bb.0:
; AVX-X64-NEXT: vpextrw $0, %xmm0, (%rdi)
; AVX-X64-NEXT: retq
;
; SSE-F128-LABEL: extract_i16_0:
-; SSE-F128: # BB#0:
+; SSE-F128: # %bb.0:
; SSE-F128-NEXT: movd %xmm0, %eax
; SSE-F128-NEXT: movw %ax, (%rdi)
; SSE-F128-NEXT: retq
@@ -197,42 +197,42 @@ define void @extract_i16_0(i16* nocapture %dst, <8 x i16> %foo) nounwind {
define void @extract_i16_7(i16* nocapture %dst, <8 x i16> %foo) nounwind {
; SSE2-X32-LABEL: extract_i16_7:
-; SSE2-X32: # BB#0:
+; SSE2-X32: # %bb.0:
; SSE2-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE2-X32-NEXT: pextrw $7, %xmm0, %ecx
; SSE2-X32-NEXT: movw %cx, (%eax)
; SSE2-X32-NEXT: retl
;
; SSE2-X64-LABEL: extract_i16_7:
-; SSE2-X64: # BB#0:
+; SSE2-X64: # %bb.0:
; SSE2-X64-NEXT: pextrw $7, %xmm0, %eax
; SSE2-X64-NEXT: movw %ax, (%rdi)
; SSE2-X64-NEXT: retq
;
; SSE41-X32-LABEL: extract_i16_7:
-; SSE41-X32: # BB#0:
+; SSE41-X32: # %bb.0:
; SSE41-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE41-X32-NEXT: pextrw $7, %xmm0, (%eax)
; SSE41-X32-NEXT: retl
;
; SSE41-X64-LABEL: extract_i16_7:
-; SSE41-X64: # BB#0:
+; SSE41-X64: # %bb.0:
; SSE41-X64-NEXT: pextrw $7, %xmm0, (%rdi)
; SSE41-X64-NEXT: retq
;
; AVX-X32-LABEL: extract_i16_7:
-; AVX-X32: # BB#0:
+; AVX-X32: # %bb.0:
; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-X32-NEXT: vpextrw $7, %xmm0, (%eax)
; AVX-X32-NEXT: retl
;
; AVX-X64-LABEL: extract_i16_7:
-; AVX-X64: # BB#0:
+; AVX-X64: # %bb.0:
; AVX-X64-NEXT: vpextrw $7, %xmm0, (%rdi)
; AVX-X64-NEXT: retq
;
; SSE-F128-LABEL: extract_i16_7:
-; SSE-F128: # BB#0:
+; SSE-F128: # %bb.0:
; SSE-F128-NEXT: pextrw $7, %xmm0, %eax
; SSE-F128-NEXT: movw %ax, (%rdi)
; SSE-F128-NEXT: retq
@@ -243,24 +243,24 @@ define void @extract_i16_7(i16* nocapture %dst, <8 x i16> %foo) nounwind {
define void @extract_i32_0(i32* nocapture %dst, <4 x i32> %foo) nounwind {
; SSE-X32-LABEL: extract_i32_0:
-; SSE-X32: # BB#0:
+; SSE-X32: # %bb.0:
; SSE-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE-X32-NEXT: movss %xmm0, (%eax)
; SSE-X32-NEXT: retl
;
; SSE-X64-LABEL: extract_i32_0:
-; SSE-X64: # BB#0:
+; SSE-X64: # %bb.0:
; SSE-X64-NEXT: movss %xmm0, (%rdi)
; SSE-X64-NEXT: retq
;
; AVX-X32-LABEL: extract_i32_0:
-; AVX-X32: # BB#0:
+; AVX-X32: # %bb.0:
; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-X32-NEXT: vmovss %xmm0, (%eax)
; AVX-X32-NEXT: retl
;
; AVX-X64-LABEL: extract_i32_0:
-; AVX-X64: # BB#0:
+; AVX-X64: # %bb.0:
; AVX-X64-NEXT: vmovss %xmm0, (%rdi)
; AVX-X64-NEXT: retq
%vecext = extractelement <4 x i32> %foo, i32 0
@@ -270,42 +270,42 @@ define void @extract_i32_0(i32* nocapture %dst, <4 x i32> %foo) nounwind {
define void @extract_i32_3(i32* nocapture %dst, <4 x i32> %foo) nounwind {
; SSE2-X32-LABEL: extract_i32_3:
-; SSE2-X32: # BB#0:
+; SSE2-X32: # %bb.0:
; SSE2-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE2-X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
; SSE2-X32-NEXT: movd %xmm0, (%eax)
; SSE2-X32-NEXT: retl
;
; SSE2-X64-LABEL: extract_i32_3:
-; SSE2-X64: # BB#0:
+; SSE2-X64: # %bb.0:
; SSE2-X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
; SSE2-X64-NEXT: movd %xmm0, (%rdi)
; SSE2-X64-NEXT: retq
;
; SSE41-X32-LABEL: extract_i32_3:
-; SSE41-X32: # BB#0:
+; SSE41-X32: # %bb.0:
; SSE41-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE41-X32-NEXT: extractps $3, %xmm0, (%eax)
; SSE41-X32-NEXT: retl
;
; SSE41-X64-LABEL: extract_i32_3:
-; SSE41-X64: # BB#0:
+; SSE41-X64: # %bb.0:
; SSE41-X64-NEXT: extractps $3, %xmm0, (%rdi)
; SSE41-X64-NEXT: retq
;
; AVX-X32-LABEL: extract_i32_3:
-; AVX-X32: # BB#0:
+; AVX-X32: # %bb.0:
; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-X32-NEXT: vextractps $3, %xmm0, (%eax)
; AVX-X32-NEXT: retl
;
; AVX-X64-LABEL: extract_i32_3:
-; AVX-X64: # BB#0:
+; AVX-X64: # %bb.0:
; AVX-X64-NEXT: vextractps $3, %xmm0, (%rdi)
; AVX-X64-NEXT: retq
;
; SSE-F128-LABEL: extract_i32_3:
-; SSE-F128: # BB#0:
+; SSE-F128: # %bb.0:
; SSE-F128-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
; SSE-F128-NEXT: movd %xmm0, (%rdi)
; SSE-F128-NEXT: retq
@@ -316,24 +316,24 @@ define void @extract_i32_3(i32* nocapture %dst, <4 x i32> %foo) nounwind {
define void @extract_i64_0(i64* nocapture %dst, <2 x i64> %foo) nounwind {
; SSE-X32-LABEL: extract_i64_0:
-; SSE-X32: # BB#0:
+; SSE-X32: # %bb.0:
; SSE-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE-X32-NEXT: movlps %xmm0, (%eax)
; SSE-X32-NEXT: retl
;
; SSE-X64-LABEL: extract_i64_0:
-; SSE-X64: # BB#0:
+; SSE-X64: # %bb.0:
; SSE-X64-NEXT: movlps %xmm0, (%rdi)
; SSE-X64-NEXT: retq
;
; AVX-X32-LABEL: extract_i64_0:
-; AVX-X32: # BB#0:
+; AVX-X32: # %bb.0:
; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-X32-NEXT: vmovlps %xmm0, (%eax)
; AVX-X32-NEXT: retl
;
; AVX-X64-LABEL: extract_i64_0:
-; AVX-X64: # BB#0:
+; AVX-X64: # %bb.0:
; AVX-X64-NEXT: vmovlps %xmm0, (%rdi)
; AVX-X64-NEXT: retq
%vecext = extractelement <2 x i64> %foo, i32 0
@@ -343,37 +343,37 @@ define void @extract_i64_0(i64* nocapture %dst, <2 x i64> %foo) nounwind {
define void @extract_i64_1(i64* nocapture %dst, <2 x i64> %foo) nounwind {
; SSE-X32-LABEL: extract_i64_1:
-; SSE-X32: # BB#0:
+; SSE-X32: # %bb.0:
; SSE-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE-X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
; SSE-X32-NEXT: movq %xmm0, (%eax)
; SSE-X32-NEXT: retl
;
; SSE2-X64-LABEL: extract_i64_1:
-; SSE2-X64: # BB#0:
+; SSE2-X64: # %bb.0:
; SSE2-X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; SSE2-X64-NEXT: movq %xmm0, (%rdi)
; SSE2-X64-NEXT: retq
;
; SSE41-X64-LABEL: extract_i64_1:
-; SSE41-X64: # BB#0:
+; SSE41-X64: # %bb.0:
; SSE41-X64-NEXT: pextrq $1, %xmm0, (%rdi)
; SSE41-X64-NEXT: retq
;
; AVX-X32-LABEL: extract_i64_1:
-; AVX-X32: # BB#0:
+; AVX-X32: # %bb.0:
; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX-X32-NEXT: vmovlps %xmm0, (%eax)
; AVX-X32-NEXT: retl
;
; AVX-X64-LABEL: extract_i64_1:
-; AVX-X64: # BB#0:
+; AVX-X64: # %bb.0:
; AVX-X64-NEXT: vpextrq $1, %xmm0, (%rdi)
; AVX-X64-NEXT: retq
;
; SSE-F128-LABEL: extract_i64_1:
-; SSE-F128: # BB#0:
+; SSE-F128: # %bb.0:
; SSE-F128-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; SSE-F128-NEXT: movq %xmm0, (%rdi)
; SSE-F128-NEXT: retq
@@ -384,24 +384,24 @@ define void @extract_i64_1(i64* nocapture %dst, <2 x i64> %foo) nounwind {
define void @extract_f32_0(float* nocapture %dst, <4 x float> %foo) nounwind {
; SSE-X32-LABEL: extract_f32_0:
-; SSE-X32: # BB#0:
+; SSE-X32: # %bb.0:
; SSE-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE-X32-NEXT: movss %xmm0, (%eax)
; SSE-X32-NEXT: retl
;
; SSE-X64-LABEL: extract_f32_0:
-; SSE-X64: # BB#0:
+; SSE-X64: # %bb.0:
; SSE-X64-NEXT: movss %xmm0, (%rdi)
; SSE-X64-NEXT: retq
;
; AVX-X32-LABEL: extract_f32_0:
-; AVX-X32: # BB#0:
+; AVX-X32: # %bb.0:
; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-X32-NEXT: vmovss %xmm0, (%eax)
; AVX-X32-NEXT: retl
;
; AVX-X64-LABEL: extract_f32_0:
-; AVX-X64: # BB#0:
+; AVX-X64: # %bb.0:
; AVX-X64-NEXT: vmovss %xmm0, (%rdi)
; AVX-X64-NEXT: retq
%vecext = extractelement <4 x float> %foo, i32 0
@@ -411,42 +411,42 @@ define void @extract_f32_0(float* nocapture %dst, <4 x float> %foo) nounwind {
define void @extract_f32_3(float* nocapture %dst, <4 x float> %foo) nounwind {
; SSE2-X32-LABEL: extract_f32_3:
-; SSE2-X32: # BB#0:
+; SSE2-X32: # %bb.0:
; SSE2-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE2-X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; SSE2-X32-NEXT: movss %xmm0, (%eax)
; SSE2-X32-NEXT: retl
;
; SSE2-X64-LABEL: extract_f32_3:
-; SSE2-X64: # BB#0:
+; SSE2-X64: # %bb.0:
; SSE2-X64-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; SSE2-X64-NEXT: movss %xmm0, (%rdi)
; SSE2-X64-NEXT: retq
;
; SSE41-X32-LABEL: extract_f32_3:
-; SSE41-X32: # BB#0:
+; SSE41-X32: # %bb.0:
; SSE41-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE41-X32-NEXT: extractps $3, %xmm0, (%eax)
; SSE41-X32-NEXT: retl
;
; SSE41-X64-LABEL: extract_f32_3:
-; SSE41-X64: # BB#0:
+; SSE41-X64: # %bb.0:
; SSE41-X64-NEXT: extractps $3, %xmm0, (%rdi)
; SSE41-X64-NEXT: retq
;
; AVX-X32-LABEL: extract_f32_3:
-; AVX-X32: # BB#0:
+; AVX-X32: # %bb.0:
; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-X32-NEXT: vextractps $3, %xmm0, (%eax)
; AVX-X32-NEXT: retl
;
; AVX-X64-LABEL: extract_f32_3:
-; AVX-X64: # BB#0:
+; AVX-X64: # %bb.0:
; AVX-X64-NEXT: vextractps $3, %xmm0, (%rdi)
; AVX-X64-NEXT: retq
;
; SSE-F128-LABEL: extract_f32_3:
-; SSE-F128: # BB#0:
+; SSE-F128: # %bb.0:
; SSE-F128-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; SSE-F128-NEXT: movss %xmm0, (%rdi)
; SSE-F128-NEXT: retq
@@ -457,24 +457,24 @@ define void @extract_f32_3(float* nocapture %dst, <4 x float> %foo) nounwind {
define void @extract_f64_0(double* nocapture %dst, <2 x double> %foo) nounwind {
; SSE-X32-LABEL: extract_f64_0:
-; SSE-X32: # BB#0:
+; SSE-X32: # %bb.0:
; SSE-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE-X32-NEXT: movlps %xmm0, (%eax)
; SSE-X32-NEXT: retl
;
; SSE-X64-LABEL: extract_f64_0:
-; SSE-X64: # BB#0:
+; SSE-X64: # %bb.0:
; SSE-X64-NEXT: movlps %xmm0, (%rdi)
; SSE-X64-NEXT: retq
;
; AVX-X32-LABEL: extract_f64_0:
-; AVX-X32: # BB#0:
+; AVX-X32: # %bb.0:
; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-X32-NEXT: vmovlps %xmm0, (%eax)
; AVX-X32-NEXT: retl
;
; AVX-X64-LABEL: extract_f64_0:
-; AVX-X64: # BB#0:
+; AVX-X64: # %bb.0:
; AVX-X64-NEXT: vmovlps %xmm0, (%rdi)
; AVX-X64-NEXT: retq
%vecext = extractelement <2 x double> %foo, i32 0
@@ -484,24 +484,24 @@ define void @extract_f64_0(double* nocapture %dst, <2 x double> %foo) nounwind {
define void @extract_f64_1(double* nocapture %dst, <2 x double> %foo) nounwind {
; SSE-X32-LABEL: extract_f64_1:
-; SSE-X32: # BB#0:
+; SSE-X32: # %bb.0:
; SSE-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE-X32-NEXT: movhpd %xmm0, (%eax)
; SSE-X32-NEXT: retl
;
; SSE-X64-LABEL: extract_f64_1:
-; SSE-X64: # BB#0:
+; SSE-X64: # %bb.0:
; SSE-X64-NEXT: movhpd %xmm0, (%rdi)
; SSE-X64-NEXT: retq
;
; AVX-X32-LABEL: extract_f64_1:
-; AVX-X32: # BB#0:
+; AVX-X32: # %bb.0:
; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-X32-NEXT: vmovhpd %xmm0, (%eax)
; AVX-X32-NEXT: retl
;
; AVX-X64-LABEL: extract_f64_1:
-; AVX-X64: # BB#0:
+; AVX-X64: # %bb.0:
; AVX-X64-NEXT: vmovhpd %xmm0, (%rdi)
; AVX-X64-NEXT: retq
%vecext = extractelement <2 x double> %foo, i32 1
@@ -511,7 +511,7 @@ define void @extract_f64_1(double* nocapture %dst, <2 x double> %foo) nounwind {
define void @extract_f128_0(fp128* nocapture %dst, <2 x fp128> %foo) nounwind {
; SSE-X32-LABEL: extract_f128_0:
-; SSE-X32: # BB#0:
+; SSE-X32: # %bb.0:
; SSE-X32-NEXT: pushl %edi
; SSE-X32-NEXT: pushl %esi
; SSE-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -528,32 +528,32 @@ define void @extract_f128_0(fp128* nocapture %dst, <2 x fp128> %foo) nounwind {
; SSE-X32-NEXT: retl
;
; SSE2-X64-LABEL: extract_f128_0:
-; SSE2-X64: # BB#0:
+; SSE2-X64: # %bb.0:
; SSE2-X64-NEXT: movq %rdx, 8(%rdi)
; SSE2-X64-NEXT: movq %rsi, (%rdi)
; SSE2-X64-NEXT: retq
;
; SSE41-X64-LABEL: extract_f128_0:
-; SSE41-X64: # BB#0:
+; SSE41-X64: # %bb.0:
; SSE41-X64-NEXT: movq %rdx, 8(%rdi)
; SSE41-X64-NEXT: movq %rsi, (%rdi)
; SSE41-X64-NEXT: retq
;
; AVX-X32-LABEL: extract_f128_0:
-; AVX-X32: # BB#0:
+; AVX-X32: # %bb.0:
; AVX-X32-NEXT: vmovups {{[0-9]+}}(%esp), %xmm0
; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-X32-NEXT: vmovups %xmm0, (%eax)
; AVX-X32-NEXT: retl
;
; AVX-X64-LABEL: extract_f128_0:
-; AVX-X64: # BB#0:
+; AVX-X64: # %bb.0:
; AVX-X64-NEXT: movq %rdx, 8(%rdi)
; AVX-X64-NEXT: movq %rsi, (%rdi)
; AVX-X64-NEXT: retq
;
; SSE-F128-LABEL: extract_f128_0:
-; SSE-F128: # BB#0:
+; SSE-F128: # %bb.0:
; SSE-F128-NEXT: movaps %xmm0, (%rdi)
; SSE-F128-NEXT: retq
%vecext = extractelement <2 x fp128> %foo, i32 0
@@ -563,7 +563,7 @@ define void @extract_f128_0(fp128* nocapture %dst, <2 x fp128> %foo) nounwind {
define void @extract_f128_1(fp128* nocapture %dst, <2 x fp128> %foo) nounwind {
; SSE-X32-LABEL: extract_f128_1:
-; SSE-X32: # BB#0:
+; SSE-X32: # %bb.0:
; SSE-X32-NEXT: pushl %edi
; SSE-X32-NEXT: pushl %esi
; SSE-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -580,32 +580,32 @@ define void @extract_f128_1(fp128* nocapture %dst, <2 x fp128> %foo) nounwind {
; SSE-X32-NEXT: retl
;
; SSE2-X64-LABEL: extract_f128_1:
-; SSE2-X64: # BB#0:
+; SSE2-X64: # %bb.0:
; SSE2-X64-NEXT: movq %r8, 8(%rdi)
; SSE2-X64-NEXT: movq %rcx, (%rdi)
; SSE2-X64-NEXT: retq
;
; SSE41-X64-LABEL: extract_f128_1:
-; SSE41-X64: # BB#0:
+; SSE41-X64: # %bb.0:
; SSE41-X64-NEXT: movq %r8, 8(%rdi)
; SSE41-X64-NEXT: movq %rcx, (%rdi)
; SSE41-X64-NEXT: retq
;
; AVX-X32-LABEL: extract_f128_1:
-; AVX-X32: # BB#0:
+; AVX-X32: # %bb.0:
; AVX-X32-NEXT: vmovups {{[0-9]+}}(%esp), %xmm0
; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-X32-NEXT: vmovups %xmm0, (%eax)
; AVX-X32-NEXT: retl
;
; AVX-X64-LABEL: extract_f128_1:
-; AVX-X64: # BB#0:
+; AVX-X64: # %bb.0:
; AVX-X64-NEXT: movq %r8, 8(%rdi)
; AVX-X64-NEXT: movq %rcx, (%rdi)
; AVX-X64-NEXT: retq
;
; SSE-F128-LABEL: extract_f128_1:
-; SSE-F128: # BB#0:
+; SSE-F128: # %bb.0:
; SSE-F128-NEXT: movaps %xmm1, (%rdi)
; SSE-F128-NEXT: retq
%vecext = extractelement <2 x fp128> %foo, i32 1
@@ -615,11 +615,11 @@ define void @extract_f128_1(fp128* nocapture %dst, <2 x fp128> %foo) nounwind {
define void @extract_i8_undef(i8* nocapture %dst, <16 x i8> %foo) nounwind {
; X32-LABEL: extract_i8_undef:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: extract_i8_undef:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
%vecext = extractelement <16 x i8> %foo, i32 16 ; undef
store i8 %vecext, i8* %dst, align 1
@@ -628,11 +628,11 @@ define void @extract_i8_undef(i8* nocapture %dst, <16 x i8> %foo) nounwind {
define void @extract_i16_undef(i16* nocapture %dst, <8 x i16> %foo) nounwind {
; X32-LABEL: extract_i16_undef:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: extract_i16_undef:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
%vecext = extractelement <8 x i16> %foo, i32 9 ; undef
store i16 %vecext, i16* %dst, align 1
@@ -641,11 +641,11 @@ define void @extract_i16_undef(i16* nocapture %dst, <8 x i16> %foo) nounwind {
define void @extract_i32_undef(i32* nocapture %dst, <4 x i32> %foo) nounwind {
; X32-LABEL: extract_i32_undef:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: extract_i32_undef:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
%vecext = extractelement <4 x i32> %foo, i32 6 ; undef
store i32 %vecext, i32* %dst, align 1
@@ -654,11 +654,11 @@ define void @extract_i32_undef(i32* nocapture %dst, <4 x i32> %foo) nounwind {
define void @extract_i64_undef(i64* nocapture %dst, <2 x i64> %foo) nounwind {
; X32-LABEL: extract_i64_undef:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: extract_i64_undef:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
%vecext = extractelement <2 x i64> %foo, i32 2 ; undef
store i64 %vecext, i64* %dst, align 1
@@ -667,11 +667,11 @@ define void @extract_i64_undef(i64* nocapture %dst, <2 x i64> %foo) nounwind {
define void @extract_f32_undef(float* nocapture %dst, <4 x float> %foo) nounwind {
; X32-LABEL: extract_f32_undef:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: extract_f32_undef:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
%vecext = extractelement <4 x float> %foo, i32 6 ; undef
store float %vecext, float* %dst, align 1
@@ -680,11 +680,11 @@ define void @extract_f32_undef(float* nocapture %dst, <4 x float> %foo) nounwind
define void @extract_f64_undef(double* nocapture %dst, <2 x double> %foo) nounwind {
; X32-LABEL: extract_f64_undef:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: extract_f64_undef:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
%vecext = extractelement <2 x double> %foo, i32 2 ; undef
store double %vecext, double* %dst, align 1
@@ -693,11 +693,11 @@ define void @extract_f64_undef(double* nocapture %dst, <2 x double> %foo) nounwi
define void @extract_f128_undef(fp128* nocapture %dst, <2 x fp128> %foo) nounwind {
; X32-LABEL: extract_f128_undef:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: extract_f128_undef:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
%vecext = extractelement <2 x fp128> %foo, i32 2 ; undef
store fp128 %vecext, fp128* %dst, align 1
diff --git a/test/CodeGen/X86/extractelement-index.ll b/test/CodeGen/X86/extractelement-index.ll
index 14762f38c42..3a8e3b356b0 100644
--- a/test/CodeGen/X86/extractelement-index.ll
+++ b/test/CodeGen/X86/extractelement-index.ll
@@ -10,20 +10,20 @@
define i8 @extractelement_v16i8_1(<16 x i8> %a) nounwind {
; SSE2-LABEL: extractelement_v16i8_1:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movd %xmm0, %eax
; SSE2-NEXT: shrl $8, %eax
; SSE2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE2-NEXT: retq
;
; SSE41-LABEL: extractelement_v16i8_1:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pextrb $1, %xmm0, %eax
; SSE41-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE41-NEXT: retq
;
; AVX-LABEL: extractelement_v16i8_1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpextrb $1, %xmm0, %eax
; AVX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX-NEXT: retq
@@ -33,20 +33,20 @@ define i8 @extractelement_v16i8_1(<16 x i8> %a) nounwind {
define i8 @extractelement_v16i8_11(<16 x i8> %a) nounwind {
; SSE2-LABEL: extractelement_v16i8_11:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pextrw $5, %xmm0, %eax
; SSE2-NEXT: shrl $8, %eax
; SSE2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE2-NEXT: retq
;
; SSE41-LABEL: extractelement_v16i8_11:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pextrb $11, %xmm0, %eax
; SSE41-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE41-NEXT: retq
;
; AVX-LABEL: extractelement_v16i8_11:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpextrb $11, %xmm0, %eax
; AVX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX-NEXT: retq
@@ -56,19 +56,19 @@ define i8 @extractelement_v16i8_11(<16 x i8> %a) nounwind {
define i8 @extractelement_v16i8_14(<16 x i8> %a) nounwind {
; SSE2-LABEL: extractelement_v16i8_14:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pextrw $7, %xmm0, %eax
; SSE2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE2-NEXT: retq
;
; SSE41-LABEL: extractelement_v16i8_14:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pextrb $14, %xmm0, %eax
; SSE41-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE41-NEXT: retq
;
; AVX-LABEL: extractelement_v16i8_14:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpextrb $14, %xmm0, %eax
; AVX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX-NEXT: retq
@@ -78,20 +78,20 @@ define i8 @extractelement_v16i8_14(<16 x i8> %a) nounwind {
define i8 @extractelement_v32i8_1(<32 x i8> %a) nounwind {
; SSE2-LABEL: extractelement_v32i8_1:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movd %xmm0, %eax
; SSE2-NEXT: shrl $8, %eax
; SSE2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE2-NEXT: retq
;
; SSE41-LABEL: extractelement_v32i8_1:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pextrb $1, %xmm0, %eax
; SSE41-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE41-NEXT: retq
;
; AVX-LABEL: extractelement_v32i8_1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpextrb $1, %xmm0, %eax
; AVX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX-NEXT: vzeroupper
@@ -102,20 +102,20 @@ define i8 @extractelement_v32i8_1(<32 x i8> %a) nounwind {
define i8 @extractelement_v32i8_17(<32 x i8> %a) nounwind {
; SSE2-LABEL: extractelement_v32i8_17:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movd %xmm1, %eax
; SSE2-NEXT: shrl $8, %eax
; SSE2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE2-NEXT: retq
;
; SSE41-LABEL: extractelement_v32i8_17:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pextrb $1, %xmm1, %eax
; SSE41-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE41-NEXT: retq
;
; AVX1-LABEL: extractelement_v32i8_17:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpextrb $1, %xmm0, %eax
; AVX1-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -123,7 +123,7 @@ define i8 @extractelement_v32i8_17(<32 x i8> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: extractelement_v32i8_17:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX2-NEXT: vpextrb $1, %xmm0, %eax
; AVX2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -135,13 +135,13 @@ define i8 @extractelement_v32i8_17(<32 x i8> %a) nounwind {
define i16 @extractelement_v8i16_0(<8 x i16> %a, i256 %i) nounwind {
; SSE-LABEL: extractelement_v8i16_0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movd %xmm0, %eax
; SSE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SSE-NEXT: retq
;
; AVX-LABEL: extractelement_v8i16_0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovd %xmm0, %eax
; AVX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX-NEXT: retq
@@ -151,13 +151,13 @@ define i16 @extractelement_v8i16_0(<8 x i16> %a, i256 %i) nounwind {
define i16 @extractelement_v8i16_3(<8 x i16> %a, i256 %i) nounwind {
; SSE-LABEL: extractelement_v8i16_3:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pextrw $3, %xmm0, %eax
; SSE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SSE-NEXT: retq
;
; AVX-LABEL: extractelement_v8i16_3:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpextrw $3, %xmm0, %eax
; AVX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX-NEXT: retq
@@ -167,13 +167,13 @@ define i16 @extractelement_v8i16_3(<8 x i16> %a, i256 %i) nounwind {
define i16 @extractelement_v16i16_0(<16 x i16> %a, i256 %i) nounwind {
; SSE-LABEL: extractelement_v16i16_0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movd %xmm0, %eax
; SSE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SSE-NEXT: retq
;
; AVX-LABEL: extractelement_v16i16_0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovd %xmm0, %eax
; AVX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX-NEXT: vzeroupper
@@ -184,13 +184,13 @@ define i16 @extractelement_v16i16_0(<16 x i16> %a, i256 %i) nounwind {
define i16 @extractelement_v16i16_13(<16 x i16> %a, i256 %i) nounwind {
; SSE-LABEL: extractelement_v16i16_13:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pextrw $5, %xmm1, %eax
; SSE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SSE-NEXT: retq
;
; AVX1-LABEL: extractelement_v16i16_13:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpextrw $5, %xmm0, %eax
; AVX1-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -198,7 +198,7 @@ define i16 @extractelement_v16i16_13(<16 x i16> %a, i256 %i) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: extractelement_v16i16_13:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX2-NEXT: vpextrw $5, %xmm0, %eax
; AVX2-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -210,12 +210,12 @@ define i16 @extractelement_v16i16_13(<16 x i16> %a, i256 %i) nounwind {
define i32 @extractelement_v4i32_0(<4 x i32> %a) nounwind {
; SSE-LABEL: extractelement_v4i32_0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movd %xmm0, %eax
; SSE-NEXT: retq
;
; AVX-LABEL: extractelement_v4i32_0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovd %xmm0, %eax
; AVX-NEXT: retq
%b = extractelement <4 x i32> %a, i256 0
@@ -224,18 +224,18 @@ define i32 @extractelement_v4i32_0(<4 x i32> %a) nounwind {
define i32 @extractelement_v4i32_3(<4 x i32> %a) nounwind {
; SSE2-LABEL: extractelement_v4i32_3:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
; SSE2-NEXT: movd %xmm0, %eax
; SSE2-NEXT: retq
;
; SSE41-LABEL: extractelement_v4i32_3:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: extractps $3, %xmm0, %eax
; SSE41-NEXT: retq
;
; AVX-LABEL: extractelement_v4i32_3:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vextractps $3, %xmm0, %eax
; AVX-NEXT: retq
%b = extractelement <4 x i32> %a, i256 3
@@ -244,19 +244,19 @@ define i32 @extractelement_v4i32_3(<4 x i32> %a) nounwind {
define i32 @extractelement_v8i32_0(<8 x i32> %a) nounwind {
; SSE-LABEL: extractelement_v8i32_0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movd %xmm1, %eax
; SSE-NEXT: retq
;
; AVX1-LABEL: extractelement_v8i32_0:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vmovd %xmm0, %eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: extractelement_v8i32_0:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX2-NEXT: vmovd %xmm0, %eax
; AVX2-NEXT: vzeroupper
@@ -267,19 +267,19 @@ define i32 @extractelement_v8i32_0(<8 x i32> %a) nounwind {
define i32 @extractelement_v8i32_4(<8 x i32> %a) nounwind {
; SSE-LABEL: extractelement_v8i32_4:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movd %xmm1, %eax
; SSE-NEXT: retq
;
; AVX1-LABEL: extractelement_v8i32_4:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vmovd %xmm0, %eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: extractelement_v8i32_4:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX2-NEXT: vmovd %xmm0, %eax
; AVX2-NEXT: vzeroupper
@@ -290,18 +290,18 @@ define i32 @extractelement_v8i32_4(<8 x i32> %a) nounwind {
define i32 @extractelement_v8i32_7(<8 x i32> %a) nounwind {
; SSE2-LABEL: extractelement_v8i32_7:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[3,1,2,3]
; SSE2-NEXT: movd %xmm0, %eax
; SSE2-NEXT: retq
;
; SSE41-LABEL: extractelement_v8i32_7:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: extractps $3, %xmm1, %eax
; SSE41-NEXT: retq
;
; AVX-LABEL: extractelement_v8i32_7:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX-NEXT: vextractps $3, %xmm0, %eax
; AVX-NEXT: vzeroupper
@@ -312,12 +312,12 @@ define i32 @extractelement_v8i32_7(<8 x i32> %a) nounwind {
define i64 @extractelement_v2i64_0(<2 x i64> %a, i256 %i) nounwind {
; SSE-LABEL: extractelement_v2i64_0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movq %xmm0, %rax
; SSE-NEXT: retq
;
; AVX-LABEL: extractelement_v2i64_0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovq %xmm0, %rax
; AVX-NEXT: retq
%b = extractelement <2 x i64> %a, i256 0
@@ -326,18 +326,18 @@ define i64 @extractelement_v2i64_0(<2 x i64> %a, i256 %i) nounwind {
define i64 @extractelement_v2i64_1(<2 x i64> %a, i256 %i) nounwind {
; SSE2-LABEL: extractelement_v2i64_1:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; SSE2-NEXT: movq %xmm0, %rax
; SSE2-NEXT: retq
;
; SSE41-LABEL: extractelement_v2i64_1:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pextrq $1, %xmm0, %rax
; SSE41-NEXT: retq
;
; AVX-LABEL: extractelement_v2i64_1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpextrq $1, %xmm0, %rax
; AVX-NEXT: retq
%b = extractelement <2 x i64> %a, i256 1
@@ -346,18 +346,18 @@ define i64 @extractelement_v2i64_1(<2 x i64> %a, i256 %i) nounwind {
define i64 @extractelement_v4i64_1(<4 x i64> %a, i256 %i) nounwind {
; SSE2-LABEL: extractelement_v4i64_1:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; SSE2-NEXT: movq %xmm0, %rax
; SSE2-NEXT: retq
;
; SSE41-LABEL: extractelement_v4i64_1:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pextrq $1, %xmm0, %rax
; SSE41-NEXT: retq
;
; AVX-LABEL: extractelement_v4i64_1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpextrq $1, %xmm0, %rax
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
@@ -367,25 +367,25 @@ define i64 @extractelement_v4i64_1(<4 x i64> %a, i256 %i) nounwind {
define i64 @extractelement_v4i64_3(<4 x i64> %a, i256 %i) nounwind {
; SSE2-LABEL: extractelement_v4i64_3:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
; SSE2-NEXT: movq %xmm0, %rax
; SSE2-NEXT: retq
;
; SSE41-LABEL: extractelement_v4i64_3:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pextrq $1, %xmm1, %rax
; SSE41-NEXT: retq
;
; AVX1-LABEL: extractelement_v4i64_3:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpextrq $1, %xmm0, %rax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: extractelement_v4i64_3:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX2-NEXT: vpextrq $1, %xmm0, %rax
; AVX2-NEXT: vzeroupper
@@ -400,7 +400,7 @@ define i64 @extractelement_v4i64_3(<4 x i64> %a, i256 %i) nounwind {
define i8 @extractelement_v16i8_var(<16 x i8> %a, i256 %i) nounwind {
; SSE-LABEL: extractelement_v16i8_var:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: andl $15, %edi
; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE-NEXT: leaq -{{[0-9]+}}(%rsp), %rax
@@ -408,7 +408,7 @@ define i8 @extractelement_v16i8_var(<16 x i8> %a, i256 %i) nounwind {
; SSE-NEXT: retq
;
; AVX-LABEL: extractelement_v16i8_var:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: andl $15, %edi
; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX-NEXT: leaq -{{[0-9]+}}(%rsp), %rax
@@ -420,7 +420,7 @@ define i8 @extractelement_v16i8_var(<16 x i8> %a, i256 %i) nounwind {
define i8 @extractelement_v32i8_var(<32 x i8> %a, i256 %i) nounwind {
; SSE-LABEL: extractelement_v32i8_var:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pushq %rbp
; SSE-NEXT: movq %rsp, %rbp
; SSE-NEXT: andq $-32, %rsp
@@ -435,7 +435,7 @@ define i8 @extractelement_v32i8_var(<32 x i8> %a, i256 %i) nounwind {
; SSE-NEXT: retq
;
; AVX-LABEL: extractelement_v32i8_var:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: pushq %rbp
; AVX-NEXT: movq %rsp, %rbp
; AVX-NEXT: andq $-32, %rsp
@@ -454,14 +454,14 @@ define i8 @extractelement_v32i8_var(<32 x i8> %a, i256 %i) nounwind {
define i16 @extractelement_v8i16_var(<8 x i16> %a, i256 %i) nounwind {
; SSE-LABEL: extractelement_v8i16_var:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: andl $7, %edi
; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE-NEXT: movzwl -24(%rsp,%rdi,2), %eax
; SSE-NEXT: retq
;
; AVX-LABEL: extractelement_v8i16_var:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: andl $7, %edi
; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX-NEXT: movzwl -24(%rsp,%rdi,2), %eax
@@ -472,7 +472,7 @@ define i16 @extractelement_v8i16_var(<8 x i16> %a, i256 %i) nounwind {
define i16 @extractelement_v16i16_var(<16 x i16> %a, i256 %i) nounwind {
; SSE-LABEL: extractelement_v16i16_var:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pushq %rbp
; SSE-NEXT: movq %rsp, %rbp
; SSE-NEXT: andq $-32, %rsp
@@ -486,7 +486,7 @@ define i16 @extractelement_v16i16_var(<16 x i16> %a, i256 %i) nounwind {
; SSE-NEXT: retq
;
; AVX-LABEL: extractelement_v16i16_var:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: pushq %rbp
; AVX-NEXT: movq %rsp, %rbp
; AVX-NEXT: andq $-32, %rsp
@@ -504,14 +504,14 @@ define i16 @extractelement_v16i16_var(<16 x i16> %a, i256 %i) nounwind {
define i32 @extractelement_v4i32_var(<4 x i32> %a, i256 %i) nounwind {
; SSE-LABEL: extractelement_v4i32_var:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: andl $3, %edi
; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE-NEXT: movl -24(%rsp,%rdi,4), %eax
; SSE-NEXT: retq
;
; AVX-LABEL: extractelement_v4i32_var:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: andl $3, %edi
; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX-NEXT: movl -24(%rsp,%rdi,4), %eax
@@ -522,7 +522,7 @@ define i32 @extractelement_v4i32_var(<4 x i32> %a, i256 %i) nounwind {
define i32 @extractelement_v8i32_var(<8 x i32> %a, i256 %i) nounwind {
; SSE-LABEL: extractelement_v8i32_var:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pushq %rbp
; SSE-NEXT: movq %rsp, %rbp
; SSE-NEXT: andq $-32, %rsp
@@ -536,7 +536,7 @@ define i32 @extractelement_v8i32_var(<8 x i32> %a, i256 %i) nounwind {
; SSE-NEXT: retq
;
; AVX-LABEL: extractelement_v8i32_var:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: pushq %rbp
; AVX-NEXT: movq %rsp, %rbp
; AVX-NEXT: andq $-32, %rsp
@@ -554,14 +554,14 @@ define i32 @extractelement_v8i32_var(<8 x i32> %a, i256 %i) nounwind {
define i64 @extractelement_v2i64_var(<2 x i64> %a, i256 %i) nounwind {
; SSE-LABEL: extractelement_v2i64_var:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: andl $1, %edi
; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE-NEXT: movq -24(%rsp,%rdi,8), %rax
; SSE-NEXT: retq
;
; AVX-LABEL: extractelement_v2i64_var:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: andl $1, %edi
; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX-NEXT: movq -24(%rsp,%rdi,8), %rax
@@ -572,7 +572,7 @@ define i64 @extractelement_v2i64_var(<2 x i64> %a, i256 %i) nounwind {
define i64 @extractelement_v4i64_var(<4 x i64> %a, i256 %i) nounwind {
; SSE-LABEL: extractelement_v4i64_var:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pushq %rbp
; SSE-NEXT: movq %rsp, %rbp
; SSE-NEXT: andq $-32, %rsp
@@ -586,7 +586,7 @@ define i64 @extractelement_v4i64_var(<4 x i64> %a, i256 %i) nounwind {
; SSE-NEXT: retq
;
; AVX-LABEL: extractelement_v4i64_var:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: pushq %rbp
; AVX-NEXT: movq %rsp, %rbp
; AVX-NEXT: andq $-32, %rsp
@@ -608,11 +608,11 @@ define i64 @extractelement_v4i64_var(<4 x i64> %a, i256 %i) nounwind {
define i8 @extractelement_32i8_m1(<32 x i8> %a) nounwind {
; SSE-LABEL: extractelement_32i8_m1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: retq
;
; AVX-LABEL: extractelement_32i8_m1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: retq
%b = extractelement <32 x i8> %a, i256 -1
ret i8 %b
@@ -620,11 +620,11 @@ define i8 @extractelement_32i8_m1(<32 x i8> %a) nounwind {
define i16 @extractelement_v16i16_m4(<16 x i16> %a, i256 %i) nounwind {
; SSE-LABEL: extractelement_v16i16_m4:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: retq
;
; AVX-LABEL: extractelement_v16i16_m4:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: retq
%b = extractelement <16 x i16> %a, i256 -4
ret i16 %b
@@ -632,11 +632,11 @@ define i16 @extractelement_v16i16_m4(<16 x i16> %a, i256 %i) nounwind {
define i32 @extractelement_v8i32_15(<8 x i32> %a) nounwind {
; SSE-LABEL: extractelement_v8i32_15:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: retq
;
; AVX-LABEL: extractelement_v8i32_15:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: retq
%b = extractelement <8 x i32> %a, i64 15
ret i32 %b
@@ -644,11 +644,11 @@ define i32 @extractelement_v8i32_15(<8 x i32> %a) nounwind {
define i64 @extractelement_v4i64_4(<4 x i64> %a, i256 %i) nounwind {
; SSE-LABEL: extractelement_v4i64_4:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: retq
;
; AVX-LABEL: extractelement_v4i64_4:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: retq
%b = extractelement <4 x i64> %a, i256 4
ret i64 %b
diff --git a/test/CodeGen/X86/extractelement-legalization-store-ordering.ll b/test/CodeGen/X86/extractelement-legalization-store-ordering.ll
index 4d0b5ccc16b..a2aa23bbb91 100644
--- a/test/CodeGen/X86/extractelement-legalization-store-ordering.ll
+++ b/test/CodeGen/X86/extractelement-legalization-store-ordering.ll
@@ -9,7 +9,7 @@ target datalayout = "e-m:o-p:32:32-f64:32:64-f80:128-n8:16:32-S128"
define void @test_extractelement_legalization_storereuse(<4 x i32> %a, i32* nocapture %x, i32* nocapture readonly %y, i32 %i) #0 {
; CHECK-LABEL: test_extractelement_legalization_storereuse:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: pushl %ebx
; CHECK-NEXT: pushl %edi
; CHECK-NEXT: pushl %esi
diff --git a/test/CodeGen/X86/extractelement-load.ll b/test/CodeGen/X86/extractelement-load.ll
index c3542bff4cc..8cde110383b 100644
--- a/test/CodeGen/X86/extractelement-load.ll
+++ b/test/CodeGen/X86/extractelement-load.ll
@@ -7,18 +7,18 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
define i32 @t(<2 x i64>* %val) nounwind {
; X32-SSE2-LABEL: t:
-; X32-SSE2: # BB#0:
+; X32-SSE2: # %bb.0:
; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE2-NEXT: movl 8(%eax), %eax
; X32-SSE2-NEXT: retl
;
; X64-SSSE3-LABEL: t:
-; X64-SSSE3: # BB#0:
+; X64-SSSE3: # %bb.0:
; X64-SSSE3-NEXT: movl 8(%rdi), %eax
; X64-SSSE3-NEXT: retq
;
; X64-AVX-LABEL: t:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: movl 8(%rdi), %eax
; X64-AVX-NEXT: retq
%tmp2 = load <2 x i64>, <2 x i64>* %val, align 16 ; <<2 x i64>> [#uses=1]
@@ -31,15 +31,15 @@ define i32 @t(<2 x i64>* %val) nounwind {
; (Making sure this doesn't crash.)
define i32 @t2(<8 x i32>* %xp) {
; X32-SSE2-LABEL: t2:
-; X32-SSE2: # BB#0:
+; X32-SSE2: # %bb.0:
; X32-SSE2-NEXT: retl
;
; X64-SSSE3-LABEL: t2:
-; X64-SSSE3: # BB#0:
+; X64-SSSE3: # %bb.0:
; X64-SSSE3-NEXT: retq
;
; X64-AVX-LABEL: t2:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: retq
%x = load <8 x i32>, <8 x i32>* %xp
%Shuff68 = shufflevector <8 x i32> %x, <8 x i32> undef, <8 x i32> <i32 undef, i32 7, i32 9, i32 undef, i32 13, i32 15, i32 1, i32 3>
@@ -57,17 +57,17 @@ define i32 @t2(<8 x i32>* %xp) {
define void @t3() {
; X32-SSE2-LABEL: t3:
-; X32-SSE2: # BB#0: # %bb
+; X32-SSE2: # %bb.0: # %bb
; X32-SSE2-NEXT: movupd (%eax), %xmm0
; X32-SSE2-NEXT: movhpd %xmm0, (%eax)
;
; X64-SSSE3-LABEL: t3:
-; X64-SSSE3: # BB#0: # %bb
+; X64-SSSE3: # %bb.0: # %bb
; X64-SSSE3-NEXT: movddup {{.*#+}} xmm0 = mem[0,0]
; X64-SSSE3-NEXT: movlpd %xmm0, (%rax)
;
; X64-AVX-LABEL: t3:
-; X64-AVX: # BB#0: # %bb
+; X64-AVX: # %bb.0: # %bb
; X64-AVX-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; X64-AVX-NEXT: vmovlpd %xmm0, (%rax)
bb:
@@ -83,7 +83,7 @@ bb:
; second shuffle operand was a post-bitcast type instead of a pre-bitcast type.
define i64 @t4(<2 x double>* %a) {
; X32-SSE2-LABEL: t4:
-; X32-SSE2: # BB#0:
+; X32-SSE2: # %bb.0:
; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE2-NEXT: movapd (%eax), %xmm0
; X32-SSE2-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0]
@@ -94,12 +94,12 @@ define i64 @t4(<2 x double>* %a) {
; X32-SSE2-NEXT: retl
;
; X64-SSSE3-LABEL: t4:
-; X64-SSSE3: # BB#0:
+; X64-SSSE3: # %bb.0:
; X64-SSSE3-NEXT: movq (%rdi), %rax
; X64-SSSE3-NEXT: retq
;
; X64-AVX-LABEL: t4:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: movq (%rdi), %rax
; X64-AVX-NEXT: retq
%b = load <2 x double>, <2 x double>* %a, align 16
diff --git a/test/CodeGen/X86/f16c-intrinsics-fast-isel.ll b/test/CodeGen/X86/f16c-intrinsics-fast-isel.ll
index a78b72cd424..3c4fc8c9baa 100644
--- a/test/CodeGen/X86/f16c-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/f16c-intrinsics-fast-isel.ll
@@ -6,7 +6,7 @@
define float @test_cvtsh_ss(i16 %a0) nounwind {
; X32-LABEL: test_cvtsh_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovd %eax, %xmm0
@@ -17,7 +17,7 @@ define float @test_cvtsh_ss(i16 %a0) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_cvtsh_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movzwl %di, %eax
; X64-NEXT: vmovd %eax, %xmm0
; X64-NEXT: vcvtph2ps %xmm0, %xmm0
@@ -37,7 +37,7 @@ define float @test_cvtsh_ss(i16 %a0) nounwind {
define i16 @test_cvtss_sh(float %a0) nounwind {
; X32-LABEL: test_cvtss_sh:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X32-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
@@ -47,7 +47,7 @@ define i16 @test_cvtss_sh(float %a0) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_cvtss_sh:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; X64-NEXT: vcvtps2ph $0, %xmm0, %xmm0
@@ -65,12 +65,12 @@ define i16 @test_cvtss_sh(float %a0) nounwind {
define <4 x float> @test_mm_cvtph_ps(<2 x i64> %a0) nounwind {
; X32-LABEL: test_mm_cvtph_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vcvtph2ps %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cvtph_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vcvtph2ps %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -80,12 +80,12 @@ define <4 x float> @test_mm_cvtph_ps(<2 x i64> %a0) nounwind {
define <8 x float> @test_mm256_cvtph_ps(<2 x i64> %a0) nounwind {
; X32-LABEL: test_mm256_cvtph_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vcvtph2ps %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_cvtph_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vcvtph2ps %xmm0, %ymm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -95,12 +95,12 @@ define <8 x float> @test_mm256_cvtph_ps(<2 x i64> %a0) nounwind {
define <2 x i64> @test_mm_cvtps_ph(<4 x float> %a0) nounwind {
; X32-LABEL: test_mm_cvtps_ph:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vcvtps2ph $0, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cvtps_ph:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vcvtps2ph $0, %xmm0, %xmm0
; X64-NEXT: retq
%cvt = call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %a0, i32 0)
@@ -110,13 +110,13 @@ define <2 x i64> @test_mm_cvtps_ph(<4 x float> %a0) nounwind {
define <2 x i64> @test_mm256_cvtps_ph(<8 x float> %a0) nounwind {
; X32-LABEL: test_mm256_cvtps_ph:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vcvtps2ph $0, %ymm0, %xmm0
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_cvtps_ph:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vcvtps2ph $0, %ymm0, %xmm0
; X64-NEXT: vzeroupper
; X64-NEXT: retq
diff --git a/test/CodeGen/X86/f16c-intrinsics.ll b/test/CodeGen/X86/f16c-intrinsics.ll
index 64f8fd0ca8d..20ea67529a9 100644
--- a/test/CodeGen/X86/f16c-intrinsics.ll
+++ b/test/CodeGen/X86/f16c-intrinsics.ll
@@ -6,22 +6,22 @@
define <4 x float> @test_x86_vcvtph2ps_128(<8 x i16> %a0) {
; X32-LABEL: test_x86_vcvtph2ps_128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vcvtph2ps %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x13,0xc0]
; X32-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_x86_vcvtph2ps_128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vcvtph2ps %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x13,0xc0]
; X64-NEXT: retq # encoding: [0xc3]
;
; X32-AVX512VL-LABEL: test_x86_vcvtph2ps_128:
-; X32-AVX512VL: # BB#0:
+; X32-AVX512VL: # %bb.0:
; X32-AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x13,0xc0]
; X32-AVX512VL-NEXT: retl # encoding: [0xc3]
;
; X64-AVX512VL-LABEL: test_x86_vcvtph2ps_128:
-; X64-AVX512VL: # BB#0:
+; X64-AVX512VL: # %bb.0:
; X64-AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x13,0xc0]
; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
%res = call <4 x float> @llvm.x86.vcvtph2ps.128(<8 x i16> %a0) ; <<4 x float>> [#uses=1]
@@ -31,24 +31,24 @@ declare <4 x float> @llvm.x86.vcvtph2ps.128(<8 x i16>) nounwind readonly
define <4 x float> @test_x86_vcvtph2ps_128_m(<8 x i16>* nocapture %a) {
; X32-LABEL: test_x86_vcvtph2ps_128_m:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X32-NEXT: vcvtph2ps (%eax), %xmm0 # encoding: [0xc4,0xe2,0x79,0x13,0x00]
; X32-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_x86_vcvtph2ps_128_m:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vcvtph2ps (%rdi), %xmm0 # encoding: [0xc4,0xe2,0x79,0x13,0x07]
; X64-NEXT: retq # encoding: [0xc3]
;
; X32-AVX512VL-LABEL: test_x86_vcvtph2ps_128_m:
-; X32-AVX512VL: # BB#0:
+; X32-AVX512VL: # %bb.0:
; X32-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X32-AVX512VL-NEXT: vcvtph2ps (%eax), %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x13,0x00]
; X32-AVX512VL-NEXT: retl # encoding: [0xc3]
;
; X64-AVX512VL-LABEL: test_x86_vcvtph2ps_128_m:
-; X64-AVX512VL: # BB#0:
+; X64-AVX512VL: # %bb.0:
; X64-AVX512VL-NEXT: vcvtph2ps (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x13,0x07]
; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
%load = load <8 x i16>, <8 x i16>* %a
@@ -58,22 +58,22 @@ define <4 x float> @test_x86_vcvtph2ps_128_m(<8 x i16>* nocapture %a) {
define <8 x float> @test_x86_vcvtph2ps_256(<8 x i16> %a0) {
; X32-LABEL: test_x86_vcvtph2ps_256:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vcvtph2ps %xmm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x13,0xc0]
; X32-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_x86_vcvtph2ps_256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vcvtph2ps %xmm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x13,0xc0]
; X64-NEXT: retq # encoding: [0xc3]
;
; X32-AVX512VL-LABEL: test_x86_vcvtph2ps_256:
-; X32-AVX512VL: # BB#0:
+; X32-AVX512VL: # %bb.0:
; X32-AVX512VL-NEXT: vcvtph2ps %xmm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x13,0xc0]
; X32-AVX512VL-NEXT: retl # encoding: [0xc3]
;
; X64-AVX512VL-LABEL: test_x86_vcvtph2ps_256:
-; X64-AVX512VL: # BB#0:
+; X64-AVX512VL: # %bb.0:
; X64-AVX512VL-NEXT: vcvtph2ps %xmm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x13,0xc0]
; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
%res = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %a0) ; <<8 x float>> [#uses=1]
@@ -83,24 +83,24 @@ declare <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16>) nounwind readonly
define <8 x float> @test_x86_vcvtph2ps_256_m(<8 x i16>* nocapture %a) nounwind {
; X32-LABEL: test_x86_vcvtph2ps_256_m:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X32-NEXT: vcvtph2ps (%eax), %ymm0 # encoding: [0xc4,0xe2,0x7d,0x13,0x00]
; X32-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_x86_vcvtph2ps_256_m:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vcvtph2ps (%rdi), %ymm0 # encoding: [0xc4,0xe2,0x7d,0x13,0x07]
; X64-NEXT: retq # encoding: [0xc3]
;
; X32-AVX512VL-LABEL: test_x86_vcvtph2ps_256_m:
-; X32-AVX512VL: # BB#0:
+; X32-AVX512VL: # %bb.0:
; X32-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X32-AVX512VL-NEXT: vcvtph2ps (%eax), %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x13,0x00]
; X32-AVX512VL-NEXT: retl # encoding: [0xc3]
;
; X64-AVX512VL-LABEL: test_x86_vcvtph2ps_256_m:
-; X64-AVX512VL: # BB#0:
+; X64-AVX512VL: # %bb.0:
; X64-AVX512VL-NEXT: vcvtph2ps (%rdi), %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x13,0x07]
; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
%load = load <8 x i16>, <8 x i16>* %a
@@ -110,22 +110,22 @@ define <8 x float> @test_x86_vcvtph2ps_256_m(<8 x i16>* nocapture %a) nounwind {
define <8 x i16> @test_x86_vcvtps2ph_128(<4 x float> %a0) {
; X32-LABEL: test_x86_vcvtps2ph_128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vcvtps2ph $0, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x1d,0xc0,0x00]
; X32-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_x86_vcvtps2ph_128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vcvtps2ph $0, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x1d,0xc0,0x00]
; X64-NEXT: retq # encoding: [0xc3]
;
; X32-AVX512VL-LABEL: test_x86_vcvtps2ph_128:
-; X32-AVX512VL: # BB#0:
+; X32-AVX512VL: # %bb.0:
; X32-AVX512VL-NEXT: vcvtps2ph $0, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x1d,0xc0,0x00]
; X32-AVX512VL-NEXT: retl # encoding: [0xc3]
;
; X64-AVX512VL-LABEL: test_x86_vcvtps2ph_128:
-; X64-AVX512VL: # BB#0:
+; X64-AVX512VL: # %bb.0:
; X64-AVX512VL-NEXT: vcvtps2ph $0, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x1d,0xc0,0x00]
; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %a0, i32 0) ; <<8 x i16>> [#uses=1]
@@ -135,25 +135,25 @@ declare <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float>, i32) nounwind readonly
define <8 x i16> @test_x86_vcvtps2ph_256(<8 x float> %a0) {
; X32-LABEL: test_x86_vcvtps2ph_256:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vcvtps2ph $0, %ymm0, %xmm0 # encoding: [0xc4,0xe3,0x7d,0x1d,0xc0,0x00]
; X32-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
; X32-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_x86_vcvtps2ph_256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vcvtps2ph $0, %ymm0, %xmm0 # encoding: [0xc4,0xe3,0x7d,0x1d,0xc0,0x00]
; X64-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
; X64-NEXT: retq # encoding: [0xc3]
;
; X32-AVX512VL-LABEL: test_x86_vcvtps2ph_256:
-; X32-AVX512VL: # BB#0:
+; X32-AVX512VL: # %bb.0:
; X32-AVX512VL-NEXT: vcvtps2ph $0, %ymm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x1d,0xc0,0x00]
; X32-AVX512VL-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
; X32-AVX512VL-NEXT: retl # encoding: [0xc3]
;
; X64-AVX512VL-LABEL: test_x86_vcvtps2ph_256:
-; X64-AVX512VL: # BB#0:
+; X64-AVX512VL: # %bb.0:
; X64-AVX512VL-NEXT: vcvtps2ph $0, %ymm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x1d,0xc0,0x00]
; X64-AVX512VL-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
@@ -164,24 +164,24 @@ declare <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float>, i32) nounwind readonly
define <4 x float> @test_x86_vcvtps2ph_128_scalar(i64* %ptr) {
; X32-LABEL: test_x86_vcvtps2ph_128_scalar:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X32-NEXT: vcvtph2ps (%eax), %xmm0 # encoding: [0xc4,0xe2,0x79,0x13,0x00]
; X32-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_x86_vcvtps2ph_128_scalar:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vcvtph2ps (%rdi), %xmm0 # encoding: [0xc4,0xe2,0x79,0x13,0x07]
; X64-NEXT: retq # encoding: [0xc3]
;
; X32-AVX512VL-LABEL: test_x86_vcvtps2ph_128_scalar:
-; X32-AVX512VL: # BB#0:
+; X32-AVX512VL: # %bb.0:
; X32-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X32-AVX512VL-NEXT: vcvtph2ps (%eax), %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x13,0x00]
; X32-AVX512VL-NEXT: retl # encoding: [0xc3]
;
; X64-AVX512VL-LABEL: test_x86_vcvtps2ph_128_scalar:
-; X64-AVX512VL: # BB#0:
+; X64-AVX512VL: # %bb.0:
; X64-AVX512VL-NEXT: vcvtph2ps (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x13,0x07]
; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
%load = load i64, i64* %ptr
@@ -194,24 +194,24 @@ define <4 x float> @test_x86_vcvtps2ph_128_scalar(i64* %ptr) {
define <4 x float> @test_x86_vcvtps2ph_128_scalar2(i64* %ptr) {
; X32-LABEL: test_x86_vcvtps2ph_128_scalar2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X32-NEXT: vcvtph2ps (%eax), %xmm0 # encoding: [0xc4,0xe2,0x79,0x13,0x00]
; X32-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_x86_vcvtps2ph_128_scalar2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vcvtph2ps (%rdi), %xmm0 # encoding: [0xc4,0xe2,0x79,0x13,0x07]
; X64-NEXT: retq # encoding: [0xc3]
;
; X32-AVX512VL-LABEL: test_x86_vcvtps2ph_128_scalar2:
-; X32-AVX512VL: # BB#0:
+; X32-AVX512VL: # %bb.0:
; X32-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X32-AVX512VL-NEXT: vcvtph2ps (%eax), %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x13,0x00]
; X32-AVX512VL-NEXT: retl # encoding: [0xc3]
;
; X64-AVX512VL-LABEL: test_x86_vcvtps2ph_128_scalar2:
-; X64-AVX512VL: # BB#0:
+; X64-AVX512VL: # %bb.0:
; X64-AVX512VL-NEXT: vcvtph2ps (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x13,0x07]
; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
%load = load i64, i64* %ptr
@@ -223,27 +223,27 @@ define <4 x float> @test_x86_vcvtps2ph_128_scalar2(i64* %ptr) {
define void @test_x86_vcvtps2ph_256_m(<8 x i16>* nocapture %d, <8 x float> %a) nounwind {
; X32-LABEL: test_x86_vcvtps2ph_256_m:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X32-NEXT: vcvtps2ph $3, %ymm0, (%eax) # encoding: [0xc4,0xe3,0x7d,0x1d,0x00,0x03]
; X32-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
; X32-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_x86_vcvtps2ph_256_m:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vcvtps2ph $3, %ymm0, (%rdi) # encoding: [0xc4,0xe3,0x7d,0x1d,0x07,0x03]
; X64-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
; X64-NEXT: retq # encoding: [0xc3]
;
; X32-AVX512VL-LABEL: test_x86_vcvtps2ph_256_m:
-; X32-AVX512VL: # BB#0: # %entry
+; X32-AVX512VL: # %bb.0: # %entry
; X32-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X32-AVX512VL-NEXT: vcvtps2ph $3, %ymm0, (%eax) # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x1d,0x00,0x03]
; X32-AVX512VL-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
; X32-AVX512VL-NEXT: retl # encoding: [0xc3]
;
; X64-AVX512VL-LABEL: test_x86_vcvtps2ph_256_m:
-; X64-AVX512VL: # BB#0: # %entry
+; X64-AVX512VL: # %bb.0: # %entry
; X64-AVX512VL-NEXT: vcvtps2ph $3, %ymm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x1d,0x07,0x03]
; X64-AVX512VL-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
@@ -255,18 +255,18 @@ entry:
define void @test_x86_vcvtps2ph_128_m(<4 x i16>* nocapture %d, <4 x float> %a) nounwind {
; X32-LABEL: test_x86_vcvtps2ph_128_m:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X32-NEXT: vcvtps2ph $3, %xmm0, (%eax) # encoding: [0xc4,0xe3,0x79,0x1d,0x00,0x03]
; X32-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_x86_vcvtps2ph_128_m:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vcvtps2ph $3, %xmm0, (%rdi) # encoding: [0xc4,0xe3,0x79,0x1d,0x07,0x03]
; X64-NEXT: retq # encoding: [0xc3]
;
; X32-AVX512VL-LABEL: test_x86_vcvtps2ph_128_m:
-; X32-AVX512VL: # BB#0: # %entry
+; X32-AVX512VL: # %bb.0: # %entry
; X32-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X32-AVX512VL-NEXT: vcvtps2ph $3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x1d,0xc0,0x03]
; X32-AVX512VL-NEXT: vpmovzxwd %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x33,0xc0]
@@ -275,7 +275,7 @@ define void @test_x86_vcvtps2ph_128_m(<4 x i16>* nocapture %d, <4 x float> %a) n
; X32-AVX512VL-NEXT: retl # encoding: [0xc3]
;
; X64-AVX512VL-LABEL: test_x86_vcvtps2ph_128_m:
-; X64-AVX512VL: # BB#0: # %entry
+; X64-AVX512VL: # %bb.0: # %entry
; X64-AVX512VL-NEXT: vcvtps2ph $3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x1d,0xc0,0x03]
; X64-AVX512VL-NEXT: vpmovzxwd %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x33,0xc0]
; X64-AVX512VL-NEXT: # xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
@@ -290,24 +290,24 @@ entry:
define void @test_x86_vcvtps2ph_128_m2(double* nocapture %hf4x16, <4 x float> %f4x32) #0 {
; X32-LABEL: test_x86_vcvtps2ph_128_m2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X32-NEXT: vcvtps2ph $3, %xmm0, (%eax) # encoding: [0xc4,0xe3,0x79,0x1d,0x00,0x03]
; X32-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_x86_vcvtps2ph_128_m2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vcvtps2ph $3, %xmm0, (%rdi) # encoding: [0xc4,0xe3,0x79,0x1d,0x07,0x03]
; X64-NEXT: retq # encoding: [0xc3]
;
; X32-AVX512VL-LABEL: test_x86_vcvtps2ph_128_m2:
-; X32-AVX512VL: # BB#0: # %entry
+; X32-AVX512VL: # %bb.0: # %entry
; X32-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X32-AVX512VL-NEXT: vcvtps2ph $3, %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x1d,0x00,0x03]
; X32-AVX512VL-NEXT: retl # encoding: [0xc3]
;
; X64-AVX512VL-LABEL: test_x86_vcvtps2ph_128_m2:
-; X64-AVX512VL: # BB#0: # %entry
+; X64-AVX512VL: # %bb.0: # %entry
; X64-AVX512VL-NEXT: vcvtps2ph $3, %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x1d,0x07,0x03]
; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
entry:
@@ -320,24 +320,24 @@ entry:
define void @test_x86_vcvtps2ph_128_m3(i64* nocapture %hf4x16, <4 x float> %f4x32) #0 {
; X32-LABEL: test_x86_vcvtps2ph_128_m3:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X32-NEXT: vcvtps2ph $3, %xmm0, (%eax) # encoding: [0xc4,0xe3,0x79,0x1d,0x00,0x03]
; X32-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_x86_vcvtps2ph_128_m3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vcvtps2ph $3, %xmm0, (%rdi) # encoding: [0xc4,0xe3,0x79,0x1d,0x07,0x03]
; X64-NEXT: retq # encoding: [0xc3]
;
; X32-AVX512VL-LABEL: test_x86_vcvtps2ph_128_m3:
-; X32-AVX512VL: # BB#0: # %entry
+; X32-AVX512VL: # %bb.0: # %entry
; X32-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X32-AVX512VL-NEXT: vcvtps2ph $3, %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x1d,0x00,0x03]
; X32-AVX512VL-NEXT: retl # encoding: [0xc3]
;
; X64-AVX512VL-LABEL: test_x86_vcvtps2ph_128_m3:
-; X64-AVX512VL: # BB#0: # %entry
+; X64-AVX512VL: # %bb.0: # %entry
; X64-AVX512VL-NEXT: vcvtps2ph $3, %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x1d,0x07,0x03]
; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
entry:
diff --git a/test/CodeGen/X86/f16c-schedule.ll b/test/CodeGen/X86/f16c-schedule.ll
index 1d0236e4430..62a294cc162 100644
--- a/test/CodeGen/X86/f16c-schedule.ll
+++ b/test/CodeGen/X86/f16c-schedule.ll
@@ -9,49 +9,49 @@
define <4 x float> @test_vcvtph2ps_128(<8 x i16> %a0, <8 x i16> *%a1) {
; GENERIC-LABEL: test_vcvtph2ps_128:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvtph2ps (%rdi), %xmm1 # sched: [7:1.00]
; GENERIC-NEXT: vcvtph2ps %xmm0, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; IVY-LABEL: test_vcvtph2ps_128:
-; IVY: # BB#0:
+; IVY: # %bb.0:
; IVY-NEXT: vcvtph2ps (%rdi), %xmm1 # sched: [7:1.00]
; IVY-NEXT: vcvtph2ps %xmm0, %xmm0 # sched: [3:1.00]
; IVY-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; IVY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_vcvtph2ps_128:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vcvtph2ps (%rdi), %xmm1 # sched: [1:1.00]
; HASWELL-NEXT: vcvtph2ps %xmm0, %xmm0 # sched: [2:1.00]
; HASWELL-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_vcvtph2ps_128:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vcvtph2ps (%rdi), %xmm1 # sched: [6:1.00]
; BROADWELL-NEXT: vcvtph2ps %xmm0, %xmm0 # sched: [2:1.00]
; BROADWELL-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_vcvtph2ps_128:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vcvtph2ps (%rdi), %xmm1 # sched: [9:0.50]
; SKYLAKE-NEXT: vcvtph2ps %xmm0, %xmm0 # sched: [5:1.00]
; SKYLAKE-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_vcvtph2ps_128:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vcvtph2ps (%rdi), %xmm1 # sched: [8:1.00]
; BTVER2-NEXT: vcvtph2ps %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_vcvtph2ps_128:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vcvtph2ps (%rdi), %xmm1 # sched: [100:?]
; ZNVER1-NEXT: vcvtph2ps %xmm0, %xmm0 # sched: [100:?]
; ZNVER1-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
@@ -66,49 +66,49 @@ declare <4 x float> @llvm.x86.vcvtph2ps.128(<8 x i16>)
define <8 x float> @test_vcvtph2ps_256(<8 x i16> %a0, <8 x i16> *%a1) {
; GENERIC-LABEL: test_vcvtph2ps_256:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvtph2ps (%rdi), %ymm1 # sched: [7:1.00]
; GENERIC-NEXT: vcvtph2ps %xmm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; IVY-LABEL: test_vcvtph2ps_256:
-; IVY: # BB#0:
+; IVY: # %bb.0:
; IVY-NEXT: vcvtph2ps (%rdi), %ymm1 # sched: [7:1.00]
; IVY-NEXT: vcvtph2ps %xmm0, %ymm0 # sched: [3:1.00]
; IVY-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; IVY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_vcvtph2ps_256:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vcvtph2ps (%rdi), %ymm1 # sched: [1:1.00]
; HASWELL-NEXT: vcvtph2ps %xmm0, %ymm0 # sched: [2:1.00]
; HASWELL-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_vcvtph2ps_256:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vcvtph2ps (%rdi), %ymm1 # sched: [6:1.00]
; BROADWELL-NEXT: vcvtph2ps %xmm0, %ymm0 # sched: [2:1.00]
; BROADWELL-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_vcvtph2ps_256:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vcvtph2ps (%rdi), %ymm1 # sched: [10:0.50]
; SKYLAKE-NEXT: vcvtph2ps %xmm0, %ymm0 # sched: [7:1.00]
; SKYLAKE-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_vcvtph2ps_256:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vcvtph2ps (%rdi), %ymm1 # sched: [8:2.00]
; BTVER2-NEXT: vcvtph2ps %xmm0, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_vcvtph2ps_256:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vcvtph2ps (%rdi), %ymm1 # sched: [100:?]
; ZNVER1-NEXT: vcvtph2ps %xmm0, %ymm0 # sched: [100:?]
; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
@@ -123,43 +123,43 @@ declare <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16>)
define <8 x i16> @test_vcvtps2ph_128(<4 x float> %a0, <4 x float> %a1, <4 x i16> *%a2) {
; GENERIC-LABEL: test_vcvtps2ph_128:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvtps2ph $0, %xmm0, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: vcvtps2ph $0, %xmm1, (%rdi) # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; IVY-LABEL: test_vcvtps2ph_128:
-; IVY: # BB#0:
+; IVY: # %bb.0:
; IVY-NEXT: vcvtps2ph $0, %xmm0, %xmm0 # sched: [3:1.00]
; IVY-NEXT: vcvtps2ph $0, %xmm1, (%rdi) # sched: [7:1.00]
; IVY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_vcvtps2ph_128:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vcvtps2ph $0, %xmm0, %xmm0 # sched: [4:1.00]
; HASWELL-NEXT: vcvtps2ph $0, %xmm1, (%rdi) # sched: [4:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_vcvtps2ph_128:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vcvtps2ph $0, %xmm0, %xmm0 # sched: [4:1.00]
; BROADWELL-NEXT: vcvtps2ph $0, %xmm1, (%rdi) # sched: [4:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_vcvtps2ph_128:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vcvtps2ph $0, %xmm0, %xmm0 # sched: [5:1.00]
; SKYLAKE-NEXT: vcvtps2ph $0, %xmm1, (%rdi) # sched: [6:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_vcvtps2ph_128:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vcvtps2ph $0, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vcvtps2ph $0, %xmm1, (%rdi) # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_vcvtps2ph_128:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vcvtps2ph $0, %xmm0, %xmm0 # sched: [100:?]
; ZNVER1-NEXT: vcvtps2ph $0, %xmm1, (%rdi) # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -173,48 +173,48 @@ declare <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float>, i32)
define <8 x i16> @test_vcvtps2ph_256(<8 x float> %a0, <8 x float> %a1, <8 x i16> *%a2) {
; GENERIC-LABEL: test_vcvtps2ph_256:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vcvtps2ph $0, %ymm0, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: vcvtps2ph $0, %ymm1, (%rdi) # sched: [7:1.00]
; GENERIC-NEXT: vzeroupper
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; IVY-LABEL: test_vcvtps2ph_256:
-; IVY: # BB#0:
+; IVY: # %bb.0:
; IVY-NEXT: vcvtps2ph $0, %ymm0, %xmm0 # sched: [3:1.00]
; IVY-NEXT: vcvtps2ph $0, %ymm1, (%rdi) # sched: [7:1.00]
; IVY-NEXT: vzeroupper
; IVY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_vcvtps2ph_256:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vcvtps2ph $0, %ymm0, %xmm0 # sched: [6:1.00]
; HASWELL-NEXT: vcvtps2ph $0, %ymm1, (%rdi) # sched: [6:1.00]
; HASWELL-NEXT: vzeroupper # sched: [4:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_vcvtps2ph_256:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vcvtps2ph $0, %ymm0, %xmm0 # sched: [6:1.00]
; BROADWELL-NEXT: vcvtps2ph $0, %ymm1, (%rdi) # sched: [4:1.00]
; BROADWELL-NEXT: vzeroupper # sched: [4:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_vcvtps2ph_256:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vcvtps2ph $0, %ymm0, %xmm0 # sched: [7:1.00]
; SKYLAKE-NEXT: vcvtps2ph $0, %ymm1, (%rdi) # sched: [8:1.00]
; SKYLAKE-NEXT: vzeroupper # sched: [4:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_vcvtps2ph_256:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vcvtps2ph $0, %ymm0, %xmm0 # sched: [6:2.00]
; BTVER2-NEXT: vcvtps2ph $0, %ymm1, (%rdi) # sched: [11:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_vcvtps2ph_256:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vcvtps2ph $0, %ymm0, %xmm0 # sched: [100:?]
; ZNVER1-NEXT: vcvtps2ph $0, %ymm1, (%rdi) # sched: [100:?]
; ZNVER1-NEXT: vzeroupper # sched: [100:?]
diff --git a/test/CodeGen/X86/fadd-combines.ll b/test/CodeGen/X86/fadd-combines.ll
index 28f72f42d01..ce7ee94e0fb 100644
--- a/test/CodeGen/X86/fadd-combines.ll
+++ b/test/CodeGen/X86/fadd-combines.ll
@@ -3,7 +3,7 @@
define float @fadd_zero_f32(float %x) #0 {
; CHECK-LABEL: fadd_zero_f32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%y = fadd float %x, 0.0
ret float %y
@@ -11,7 +11,7 @@ define float @fadd_zero_f32(float %x) #0 {
define <4 x float> @fadd_zero_4f32(<4 x float> %x) #0 {
; CHECK-LABEL: fadd_zero_4f32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%y = fadd <4 x float> %x, zeroinitializer
ret <4 x float> %y
@@ -20,7 +20,7 @@ define <4 x float> @fadd_zero_4f32(<4 x float> %x) #0 {
; CHECK: float 3
define float @fadd_2const_f32(float %x) #0 {
; CHECK-LABEL: fadd_2const_f32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: addss {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
%y = fadd float %x, 1.0
@@ -34,7 +34,7 @@ define float @fadd_2const_f32(float %x) #0 {
; CHECK: float 5
define <4 x float> @fadd_2const_4f32(<4 x float> %x) #0 {
; CHECK-LABEL: fadd_2const_4f32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: addps {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
%y = fadd <4 x float> %x, <float 1.0, float 2.0, float 3.0, float 4.0>
@@ -45,7 +45,7 @@ define <4 x float> @fadd_2const_4f32(<4 x float> %x) #0 {
; CHECK: float 3
define float @fadd_x_fmul_x_c_f32(float %x) #0 {
; CHECK-LABEL: fadd_x_fmul_x_c_f32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: mulss {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
%y = fmul float %x, 2.0
@@ -59,7 +59,7 @@ define float @fadd_x_fmul_x_c_f32(float %x) #0 {
; CHECK: float 5
define <4 x float> @fadd_x_fmul_x_c_4f32(<4 x float> %x) #0 {
; CHECK-LABEL: fadd_x_fmul_x_c_4f32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: mulps {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
%y = fmul <4 x float> %x, <float 1.0, float 2.0, float 3.0, float 4.0>
@@ -70,7 +70,7 @@ define <4 x float> @fadd_x_fmul_x_c_4f32(<4 x float> %x) #0 {
; CHECK: float 3
define float @fadd_fmul_x_c_x_f32(float %x) #0 {
; CHECK-LABEL: fadd_fmul_x_c_x_f32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: mulss {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
%y = fmul float %x, 2.0
@@ -84,7 +84,7 @@ define float @fadd_fmul_x_c_x_f32(float %x) #0 {
; CHECK: float 5
define <4 x float> @fadd_fmul_x_c_x_4f32(<4 x float> %x) #0 {
; CHECK-LABEL: fadd_fmul_x_c_x_4f32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: mulps {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
%y = fmul <4 x float> %x, <float 1.0, float 2.0, float 3.0, float 4.0>
@@ -95,7 +95,7 @@ define <4 x float> @fadd_fmul_x_c_x_4f32(<4 x float> %x) #0 {
; CHECK: float 4
define float @fadd_fadd_x_x_fmul_x_c_f32(float %x) #0 {
; CHECK-LABEL: fadd_fadd_x_x_fmul_x_c_f32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: mulss {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
%y = fadd float %x, %x
@@ -110,7 +110,7 @@ define float @fadd_fadd_x_x_fmul_x_c_f32(float %x) #0 {
; CHECK: float 6
define <4 x float> @fadd_fadd_x_x_fmul_x_c_4f32(<4 x float> %x) #0 {
; CHECK-LABEL: fadd_fadd_x_x_fmul_x_c_4f32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: mulps {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
%y = fadd <4 x float> %x, %x
@@ -122,7 +122,7 @@ define <4 x float> @fadd_fadd_x_x_fmul_x_c_4f32(<4 x float> %x) #0 {
; CHECK: float 4
define float @fadd_fmul_x_c_fadd_x_x_f32(float %x) #0 {
; CHECK-LABEL: fadd_fmul_x_c_fadd_x_x_f32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: mulss {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
%y = fadd float %x, %x
@@ -137,7 +137,7 @@ define float @fadd_fmul_x_c_fadd_x_x_f32(float %x) #0 {
; CHECK: float 6
define <4 x float> @fadd_fmul_x_c_fadd_x_x_4f32(<4 x float> %x) #0 {
; CHECK-LABEL: fadd_fmul_x_c_fadd_x_x_4f32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: mulps {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
%y = fadd <4 x float> %x, %x
@@ -149,7 +149,7 @@ define <4 x float> @fadd_fmul_x_c_fadd_x_x_4f32(<4 x float> %x) #0 {
; CHECK: float 3
define float @fadd_x_fadd_x_x_f32(float %x) #0 {
; CHECK-LABEL: fadd_x_fadd_x_x_f32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: mulss {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
%y = fadd float %x, %x
@@ -163,7 +163,7 @@ define float @fadd_x_fadd_x_x_f32(float %x) #0 {
; CHECK: float 3
define <4 x float> @fadd_x_fadd_x_x_4f32(<4 x float> %x) #0 {
; CHECK-LABEL: fadd_x_fadd_x_x_4f32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: mulps {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
%y = fadd <4 x float> %x, %x
@@ -174,7 +174,7 @@ define <4 x float> @fadd_x_fadd_x_x_4f32(<4 x float> %x) #0 {
; CHECK: float 3
define float @fadd_fadd_x_x_x_f32(float %x) #0 {
; CHECK-LABEL: fadd_fadd_x_x_x_f32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: mulss {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
%y = fadd float %x, %x
@@ -188,7 +188,7 @@ define float @fadd_fadd_x_x_x_f32(float %x) #0 {
; CHECK: float 3
define <4 x float> @fadd_fadd_x_x_x_4f32(<4 x float> %x) #0 {
; CHECK-LABEL: fadd_fadd_x_x_x_4f32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: mulps {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
%y = fadd <4 x float> %x, %x
@@ -199,7 +199,7 @@ define <4 x float> @fadd_fadd_x_x_x_4f32(<4 x float> %x) #0 {
; CHECK: float 4
define float @fadd_fadd_x_x_fadd_x_x_f32(float %x) #0 {
; CHECK-LABEL: fadd_fadd_x_x_fadd_x_x_f32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: mulss {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
%y = fadd float %x, %x
@@ -213,7 +213,7 @@ define float @fadd_fadd_x_x_fadd_x_x_f32(float %x) #0 {
; CHECK: float 4
define <4 x float> @fadd_fadd_x_x_fadd_x_x_4f32(<4 x float> %x) #0 {
; CHECK-LABEL: fadd_fadd_x_x_fadd_x_x_4f32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: mulps {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
%y = fadd <4 x float> %x, %x
diff --git a/test/CodeGen/X86/fast-isel-cmp.ll b/test/CodeGen/X86/fast-isel-cmp.ll
index 6ffb22e215e..991ecfd5e99 100644
--- a/test/CodeGen/X86/fast-isel-cmp.ll
+++ b/test/CodeGen/X86/fast-isel-cmp.ll
@@ -6,7 +6,7 @@
define zeroext i1 @fcmp_oeq(float %x, float %y) {
; SDAG-LABEL: fcmp_oeq:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: cmpeqss %xmm1, %xmm0
; SDAG-NEXT: movd %xmm0, %eax
; SDAG-NEXT: andl $1, %eax
@@ -14,7 +14,7 @@ define zeroext i1 @fcmp_oeq(float %x, float %y) {
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_oeq:
-; FAST_NOAVX: ## BB#0:
+; FAST_NOAVX: ## %bb.0:
; FAST_NOAVX-NEXT: ucomiss %xmm1, %xmm0
; FAST_NOAVX-NEXT: sete %al
; FAST_NOAVX-NEXT: setnp %cl
@@ -24,7 +24,7 @@ define zeroext i1 @fcmp_oeq(float %x, float %y) {
; FAST_NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: fcmp_oeq:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vucomiss %xmm1, %xmm0
; FAST_AVX-NEXT: sete %al
; FAST_AVX-NEXT: setnp %cl
@@ -38,13 +38,13 @@ define zeroext i1 @fcmp_oeq(float %x, float %y) {
define zeroext i1 @fcmp_ogt(float %x, float %y) {
; SDAG-LABEL: fcmp_ogt:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: ucomiss %xmm1, %xmm0
; SDAG-NEXT: seta %al
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_ogt:
-; FAST_NOAVX: ## BB#0:
+; FAST_NOAVX: ## %bb.0:
; FAST_NOAVX-NEXT: ucomiss %xmm1, %xmm0
; FAST_NOAVX-NEXT: seta %al
; FAST_NOAVX-NEXT: andb $1, %al
@@ -52,7 +52,7 @@ define zeroext i1 @fcmp_ogt(float %x, float %y) {
; FAST_NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: fcmp_ogt:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vucomiss %xmm1, %xmm0
; FAST_AVX-NEXT: seta %al
; FAST_AVX-NEXT: andb $1, %al
@@ -64,13 +64,13 @@ define zeroext i1 @fcmp_ogt(float %x, float %y) {
define zeroext i1 @fcmp_oge(float %x, float %y) {
; SDAG-LABEL: fcmp_oge:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: ucomiss %xmm1, %xmm0
; SDAG-NEXT: setae %al
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_oge:
-; FAST_NOAVX: ## BB#0:
+; FAST_NOAVX: ## %bb.0:
; FAST_NOAVX-NEXT: ucomiss %xmm1, %xmm0
; FAST_NOAVX-NEXT: setae %al
; FAST_NOAVX-NEXT: andb $1, %al
@@ -78,7 +78,7 @@ define zeroext i1 @fcmp_oge(float %x, float %y) {
; FAST_NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: fcmp_oge:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vucomiss %xmm1, %xmm0
; FAST_AVX-NEXT: setae %al
; FAST_AVX-NEXT: andb $1, %al
@@ -90,13 +90,13 @@ define zeroext i1 @fcmp_oge(float %x, float %y) {
define zeroext i1 @fcmp_olt(float %x, float %y) {
; SDAG-LABEL: fcmp_olt:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: ucomiss %xmm0, %xmm1
; SDAG-NEXT: seta %al
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_olt:
-; FAST_NOAVX: ## BB#0:
+; FAST_NOAVX: ## %bb.0:
; FAST_NOAVX-NEXT: ucomiss %xmm0, %xmm1
; FAST_NOAVX-NEXT: seta %al
; FAST_NOAVX-NEXT: andb $1, %al
@@ -104,7 +104,7 @@ define zeroext i1 @fcmp_olt(float %x, float %y) {
; FAST_NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: fcmp_olt:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vucomiss %xmm0, %xmm1
; FAST_AVX-NEXT: seta %al
; FAST_AVX-NEXT: andb $1, %al
@@ -116,13 +116,13 @@ define zeroext i1 @fcmp_olt(float %x, float %y) {
define zeroext i1 @fcmp_ole(float %x, float %y) {
; SDAG-LABEL: fcmp_ole:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: ucomiss %xmm0, %xmm1
; SDAG-NEXT: setae %al
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_ole:
-; FAST_NOAVX: ## BB#0:
+; FAST_NOAVX: ## %bb.0:
; FAST_NOAVX-NEXT: ucomiss %xmm0, %xmm1
; FAST_NOAVX-NEXT: setae %al
; FAST_NOAVX-NEXT: andb $1, %al
@@ -130,7 +130,7 @@ define zeroext i1 @fcmp_ole(float %x, float %y) {
; FAST_NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: fcmp_ole:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vucomiss %xmm0, %xmm1
; FAST_AVX-NEXT: setae %al
; FAST_AVX-NEXT: andb $1, %al
@@ -142,13 +142,13 @@ define zeroext i1 @fcmp_ole(float %x, float %y) {
define zeroext i1 @fcmp_one(float %x, float %y) {
; SDAG-LABEL: fcmp_one:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: ucomiss %xmm1, %xmm0
; SDAG-NEXT: setne %al
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_one:
-; FAST_NOAVX: ## BB#0:
+; FAST_NOAVX: ## %bb.0:
; FAST_NOAVX-NEXT: ucomiss %xmm1, %xmm0
; FAST_NOAVX-NEXT: setne %al
; FAST_NOAVX-NEXT: andb $1, %al
@@ -156,7 +156,7 @@ define zeroext i1 @fcmp_one(float %x, float %y) {
; FAST_NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: fcmp_one:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vucomiss %xmm1, %xmm0
; FAST_AVX-NEXT: setne %al
; FAST_AVX-NEXT: andb $1, %al
@@ -168,13 +168,13 @@ define zeroext i1 @fcmp_one(float %x, float %y) {
define zeroext i1 @fcmp_ord(float %x, float %y) {
; SDAG-LABEL: fcmp_ord:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: ucomiss %xmm1, %xmm0
; SDAG-NEXT: setnp %al
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_ord:
-; FAST_NOAVX: ## BB#0:
+; FAST_NOAVX: ## %bb.0:
; FAST_NOAVX-NEXT: ucomiss %xmm1, %xmm0
; FAST_NOAVX-NEXT: setnp %al
; FAST_NOAVX-NEXT: andb $1, %al
@@ -182,7 +182,7 @@ define zeroext i1 @fcmp_ord(float %x, float %y) {
; FAST_NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: fcmp_ord:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vucomiss %xmm1, %xmm0
; FAST_AVX-NEXT: setnp %al
; FAST_AVX-NEXT: andb $1, %al
@@ -194,13 +194,13 @@ define zeroext i1 @fcmp_ord(float %x, float %y) {
define zeroext i1 @fcmp_uno(float %x, float %y) {
; SDAG-LABEL: fcmp_uno:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: ucomiss %xmm1, %xmm0
; SDAG-NEXT: setp %al
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_uno:
-; FAST_NOAVX: ## BB#0:
+; FAST_NOAVX: ## %bb.0:
; FAST_NOAVX-NEXT: ucomiss %xmm1, %xmm0
; FAST_NOAVX-NEXT: setp %al
; FAST_NOAVX-NEXT: andb $1, %al
@@ -208,7 +208,7 @@ define zeroext i1 @fcmp_uno(float %x, float %y) {
; FAST_NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: fcmp_uno:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vucomiss %xmm1, %xmm0
; FAST_AVX-NEXT: setp %al
; FAST_AVX-NEXT: andb $1, %al
@@ -220,13 +220,13 @@ define zeroext i1 @fcmp_uno(float %x, float %y) {
define zeroext i1 @fcmp_ueq(float %x, float %y) {
; SDAG-LABEL: fcmp_ueq:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: ucomiss %xmm1, %xmm0
; SDAG-NEXT: sete %al
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_ueq:
-; FAST_NOAVX: ## BB#0:
+; FAST_NOAVX: ## %bb.0:
; FAST_NOAVX-NEXT: ucomiss %xmm1, %xmm0
; FAST_NOAVX-NEXT: sete %al
; FAST_NOAVX-NEXT: andb $1, %al
@@ -234,7 +234,7 @@ define zeroext i1 @fcmp_ueq(float %x, float %y) {
; FAST_NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: fcmp_ueq:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vucomiss %xmm1, %xmm0
; FAST_AVX-NEXT: sete %al
; FAST_AVX-NEXT: andb $1, %al
@@ -246,13 +246,13 @@ define zeroext i1 @fcmp_ueq(float %x, float %y) {
define zeroext i1 @fcmp_ugt(float %x, float %y) {
; SDAG-LABEL: fcmp_ugt:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: ucomiss %xmm0, %xmm1
; SDAG-NEXT: setb %al
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_ugt:
-; FAST_NOAVX: ## BB#0:
+; FAST_NOAVX: ## %bb.0:
; FAST_NOAVX-NEXT: ucomiss %xmm0, %xmm1
; FAST_NOAVX-NEXT: setb %al
; FAST_NOAVX-NEXT: andb $1, %al
@@ -260,7 +260,7 @@ define zeroext i1 @fcmp_ugt(float %x, float %y) {
; FAST_NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: fcmp_ugt:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vucomiss %xmm0, %xmm1
; FAST_AVX-NEXT: setb %al
; FAST_AVX-NEXT: andb $1, %al
@@ -272,13 +272,13 @@ define zeroext i1 @fcmp_ugt(float %x, float %y) {
define zeroext i1 @fcmp_uge(float %x, float %y) {
; SDAG-LABEL: fcmp_uge:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: ucomiss %xmm0, %xmm1
; SDAG-NEXT: setbe %al
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_uge:
-; FAST_NOAVX: ## BB#0:
+; FAST_NOAVX: ## %bb.0:
; FAST_NOAVX-NEXT: ucomiss %xmm0, %xmm1
; FAST_NOAVX-NEXT: setbe %al
; FAST_NOAVX-NEXT: andb $1, %al
@@ -286,7 +286,7 @@ define zeroext i1 @fcmp_uge(float %x, float %y) {
; FAST_NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: fcmp_uge:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vucomiss %xmm0, %xmm1
; FAST_AVX-NEXT: setbe %al
; FAST_AVX-NEXT: andb $1, %al
@@ -298,13 +298,13 @@ define zeroext i1 @fcmp_uge(float %x, float %y) {
define zeroext i1 @fcmp_ult(float %x, float %y) {
; SDAG-LABEL: fcmp_ult:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: ucomiss %xmm1, %xmm0
; SDAG-NEXT: setb %al
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_ult:
-; FAST_NOAVX: ## BB#0:
+; FAST_NOAVX: ## %bb.0:
; FAST_NOAVX-NEXT: ucomiss %xmm1, %xmm0
; FAST_NOAVX-NEXT: setb %al
; FAST_NOAVX-NEXT: andb $1, %al
@@ -312,7 +312,7 @@ define zeroext i1 @fcmp_ult(float %x, float %y) {
; FAST_NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: fcmp_ult:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vucomiss %xmm1, %xmm0
; FAST_AVX-NEXT: setb %al
; FAST_AVX-NEXT: andb $1, %al
@@ -324,13 +324,13 @@ define zeroext i1 @fcmp_ult(float %x, float %y) {
define zeroext i1 @fcmp_ule(float %x, float %y) {
; SDAG-LABEL: fcmp_ule:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: ucomiss %xmm1, %xmm0
; SDAG-NEXT: setbe %al
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_ule:
-; FAST_NOAVX: ## BB#0:
+; FAST_NOAVX: ## %bb.0:
; FAST_NOAVX-NEXT: ucomiss %xmm1, %xmm0
; FAST_NOAVX-NEXT: setbe %al
; FAST_NOAVX-NEXT: andb $1, %al
@@ -338,7 +338,7 @@ define zeroext i1 @fcmp_ule(float %x, float %y) {
; FAST_NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: fcmp_ule:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vucomiss %xmm1, %xmm0
; FAST_AVX-NEXT: setbe %al
; FAST_AVX-NEXT: andb $1, %al
@@ -350,7 +350,7 @@ define zeroext i1 @fcmp_ule(float %x, float %y) {
define zeroext i1 @fcmp_une(float %x, float %y) {
; SDAG-LABEL: fcmp_une:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: cmpneqss %xmm1, %xmm0
; SDAG-NEXT: movd %xmm0, %eax
; SDAG-NEXT: andl $1, %eax
@@ -358,7 +358,7 @@ define zeroext i1 @fcmp_une(float %x, float %y) {
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_une:
-; FAST_NOAVX: ## BB#0:
+; FAST_NOAVX: ## %bb.0:
; FAST_NOAVX-NEXT: ucomiss %xmm1, %xmm0
; FAST_NOAVX-NEXT: setne %al
; FAST_NOAVX-NEXT: setp %cl
@@ -368,7 +368,7 @@ define zeroext i1 @fcmp_une(float %x, float %y) {
; FAST_NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: fcmp_une:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vucomiss %xmm1, %xmm0
; FAST_AVX-NEXT: setne %al
; FAST_AVX-NEXT: setp %cl
@@ -382,13 +382,13 @@ define zeroext i1 @fcmp_une(float %x, float %y) {
define zeroext i1 @icmp_eq(i32 %x, i32 %y) {
; SDAG-LABEL: icmp_eq:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: cmpl %esi, %edi
; SDAG-NEXT: sete %al
; SDAG-NEXT: retq
;
; FAST-LABEL: icmp_eq:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: cmpl %esi, %edi
; FAST-NEXT: sete %al
; FAST-NEXT: andb $1, %al
@@ -400,13 +400,13 @@ define zeroext i1 @icmp_eq(i32 %x, i32 %y) {
define zeroext i1 @icmp_ne(i32 %x, i32 %y) {
; SDAG-LABEL: icmp_ne:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: cmpl %esi, %edi
; SDAG-NEXT: setne %al
; SDAG-NEXT: retq
;
; FAST-LABEL: icmp_ne:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: cmpl %esi, %edi
; FAST-NEXT: setne %al
; FAST-NEXT: andb $1, %al
@@ -418,13 +418,13 @@ define zeroext i1 @icmp_ne(i32 %x, i32 %y) {
define zeroext i1 @icmp_ugt(i32 %x, i32 %y) {
; SDAG-LABEL: icmp_ugt:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: cmpl %esi, %edi
; SDAG-NEXT: seta %al
; SDAG-NEXT: retq
;
; FAST-LABEL: icmp_ugt:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: cmpl %esi, %edi
; FAST-NEXT: seta %al
; FAST-NEXT: andb $1, %al
@@ -436,13 +436,13 @@ define zeroext i1 @icmp_ugt(i32 %x, i32 %y) {
define zeroext i1 @icmp_uge(i32 %x, i32 %y) {
; SDAG-LABEL: icmp_uge:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: cmpl %esi, %edi
; SDAG-NEXT: setae %al
; SDAG-NEXT: retq
;
; FAST-LABEL: icmp_uge:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: cmpl %esi, %edi
; FAST-NEXT: setae %al
; FAST-NEXT: andb $1, %al
@@ -454,13 +454,13 @@ define zeroext i1 @icmp_uge(i32 %x, i32 %y) {
define zeroext i1 @icmp_ult(i32 %x, i32 %y) {
; SDAG-LABEL: icmp_ult:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: cmpl %esi, %edi
; SDAG-NEXT: setb %al
; SDAG-NEXT: retq
;
; FAST-LABEL: icmp_ult:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: cmpl %esi, %edi
; FAST-NEXT: setb %al
; FAST-NEXT: andb $1, %al
@@ -472,13 +472,13 @@ define zeroext i1 @icmp_ult(i32 %x, i32 %y) {
define zeroext i1 @icmp_ule(i32 %x, i32 %y) {
; SDAG-LABEL: icmp_ule:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: cmpl %esi, %edi
; SDAG-NEXT: setbe %al
; SDAG-NEXT: retq
;
; FAST-LABEL: icmp_ule:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: cmpl %esi, %edi
; FAST-NEXT: setbe %al
; FAST-NEXT: andb $1, %al
@@ -490,13 +490,13 @@ define zeroext i1 @icmp_ule(i32 %x, i32 %y) {
define zeroext i1 @icmp_sgt(i32 %x, i32 %y) {
; SDAG-LABEL: icmp_sgt:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: cmpl %esi, %edi
; SDAG-NEXT: setg %al
; SDAG-NEXT: retq
;
; FAST-LABEL: icmp_sgt:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: cmpl %esi, %edi
; FAST-NEXT: setg %al
; FAST-NEXT: andb $1, %al
@@ -508,13 +508,13 @@ define zeroext i1 @icmp_sgt(i32 %x, i32 %y) {
define zeroext i1 @icmp_sge(i32 %x, i32 %y) {
; SDAG-LABEL: icmp_sge:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: cmpl %esi, %edi
; SDAG-NEXT: setge %al
; SDAG-NEXT: retq
;
; FAST-LABEL: icmp_sge:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: cmpl %esi, %edi
; FAST-NEXT: setge %al
; FAST-NEXT: andb $1, %al
@@ -526,13 +526,13 @@ define zeroext i1 @icmp_sge(i32 %x, i32 %y) {
define zeroext i1 @icmp_slt(i32 %x, i32 %y) {
; SDAG-LABEL: icmp_slt:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: cmpl %esi, %edi
; SDAG-NEXT: setl %al
; SDAG-NEXT: retq
;
; FAST-LABEL: icmp_slt:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: cmpl %esi, %edi
; FAST-NEXT: setl %al
; FAST-NEXT: andb $1, %al
@@ -544,13 +544,13 @@ define zeroext i1 @icmp_slt(i32 %x, i32 %y) {
define zeroext i1 @icmp_sle(i32 %x, i32 %y) {
; SDAG-LABEL: icmp_sle:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: cmpl %esi, %edi
; SDAG-NEXT: setle %al
; SDAG-NEXT: retq
;
; FAST-LABEL: icmp_sle:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: cmpl %esi, %edi
; FAST-NEXT: setle %al
; FAST-NEXT: andb $1, %al
@@ -563,13 +563,13 @@ define zeroext i1 @icmp_sle(i32 %x, i32 %y) {
; Test cmp folding and condition optimization.
define zeroext i1 @fcmp_oeq2(float %x) {
; SDAG-LABEL: fcmp_oeq2:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: ucomiss %xmm0, %xmm0
; SDAG-NEXT: setnp %al
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_oeq2:
-; FAST_NOAVX: ## BB#0:
+; FAST_NOAVX: ## %bb.0:
; FAST_NOAVX-NEXT: ucomiss %xmm0, %xmm0
; FAST_NOAVX-NEXT: setnp %al
; FAST_NOAVX-NEXT: andb $1, %al
@@ -577,7 +577,7 @@ define zeroext i1 @fcmp_oeq2(float %x) {
; FAST_NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: fcmp_oeq2:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vucomiss %xmm0, %xmm0
; FAST_AVX-NEXT: setnp %al
; FAST_AVX-NEXT: andb $1, %al
@@ -589,7 +589,7 @@ define zeroext i1 @fcmp_oeq2(float %x) {
define zeroext i1 @fcmp_oeq3(float %x) {
; SDAG-LABEL: fcmp_oeq3:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: xorps %xmm1, %xmm1
; SDAG-NEXT: cmpeqss %xmm0, %xmm1
; SDAG-NEXT: movd %xmm1, %eax
@@ -598,7 +598,7 @@ define zeroext i1 @fcmp_oeq3(float %x) {
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_oeq3:
-; FAST_NOAVX: ## BB#0:
+; FAST_NOAVX: ## %bb.0:
; FAST_NOAVX-NEXT: xorps %xmm1, %xmm1
; FAST_NOAVX-NEXT: ucomiss %xmm1, %xmm0
; FAST_NOAVX-NEXT: sete %al
@@ -609,7 +609,7 @@ define zeroext i1 @fcmp_oeq3(float %x) {
; FAST_NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: fcmp_oeq3:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FAST_AVX-NEXT: vucomiss %xmm1, %xmm0
; FAST_AVX-NEXT: sete %al
@@ -624,12 +624,12 @@ define zeroext i1 @fcmp_oeq3(float %x) {
define zeroext i1 @fcmp_ogt2(float %x) {
; SDAG-LABEL: fcmp_ogt2:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: xorl %eax, %eax
; SDAG-NEXT: retq
;
; FAST-LABEL: fcmp_ogt2:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: xorl %eax, %eax
; FAST-NEXT: andb $1, %al
; FAST-NEXT: movzbl %al, %eax
@@ -640,14 +640,14 @@ define zeroext i1 @fcmp_ogt2(float %x) {
define zeroext i1 @fcmp_ogt3(float %x) {
; SDAG-LABEL: fcmp_ogt3:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: xorps %xmm1, %xmm1
; SDAG-NEXT: ucomiss %xmm1, %xmm0
; SDAG-NEXT: seta %al
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_ogt3:
-; FAST_NOAVX: ## BB#0:
+; FAST_NOAVX: ## %bb.0:
; FAST_NOAVX-NEXT: xorps %xmm1, %xmm1
; FAST_NOAVX-NEXT: ucomiss %xmm1, %xmm0
; FAST_NOAVX-NEXT: seta %al
@@ -656,7 +656,7 @@ define zeroext i1 @fcmp_ogt3(float %x) {
; FAST_NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: fcmp_ogt3:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FAST_AVX-NEXT: vucomiss %xmm1, %xmm0
; FAST_AVX-NEXT: seta %al
@@ -669,13 +669,13 @@ define zeroext i1 @fcmp_ogt3(float %x) {
define zeroext i1 @fcmp_oge2(float %x) {
; SDAG-LABEL: fcmp_oge2:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: ucomiss %xmm0, %xmm0
; SDAG-NEXT: setnp %al
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_oge2:
-; FAST_NOAVX: ## BB#0:
+; FAST_NOAVX: ## %bb.0:
; FAST_NOAVX-NEXT: ucomiss %xmm0, %xmm0
; FAST_NOAVX-NEXT: setnp %al
; FAST_NOAVX-NEXT: andb $1, %al
@@ -683,7 +683,7 @@ define zeroext i1 @fcmp_oge2(float %x) {
; FAST_NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: fcmp_oge2:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vucomiss %xmm0, %xmm0
; FAST_AVX-NEXT: setnp %al
; FAST_AVX-NEXT: andb $1, %al
@@ -695,14 +695,14 @@ define zeroext i1 @fcmp_oge2(float %x) {
define zeroext i1 @fcmp_oge3(float %x) {
; SDAG-LABEL: fcmp_oge3:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: xorps %xmm1, %xmm1
; SDAG-NEXT: ucomiss %xmm1, %xmm0
; SDAG-NEXT: setae %al
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_oge3:
-; FAST_NOAVX: ## BB#0:
+; FAST_NOAVX: ## %bb.0:
; FAST_NOAVX-NEXT: xorps %xmm1, %xmm1
; FAST_NOAVX-NEXT: ucomiss %xmm1, %xmm0
; FAST_NOAVX-NEXT: setae %al
@@ -711,7 +711,7 @@ define zeroext i1 @fcmp_oge3(float %x) {
; FAST_NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: fcmp_oge3:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FAST_AVX-NEXT: vucomiss %xmm1, %xmm0
; FAST_AVX-NEXT: setae %al
@@ -724,12 +724,12 @@ define zeroext i1 @fcmp_oge3(float %x) {
define zeroext i1 @fcmp_olt2(float %x) {
; SDAG-LABEL: fcmp_olt2:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: xorl %eax, %eax
; SDAG-NEXT: retq
;
; FAST-LABEL: fcmp_olt2:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: xorl %eax, %eax
; FAST-NEXT: andb $1, %al
; FAST-NEXT: movzbl %al, %eax
@@ -740,14 +740,14 @@ define zeroext i1 @fcmp_olt2(float %x) {
define zeroext i1 @fcmp_olt3(float %x) {
; SDAG-LABEL: fcmp_olt3:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: xorps %xmm1, %xmm1
; SDAG-NEXT: ucomiss %xmm0, %xmm1
; SDAG-NEXT: seta %al
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_olt3:
-; FAST_NOAVX: ## BB#0:
+; FAST_NOAVX: ## %bb.0:
; FAST_NOAVX-NEXT: xorps %xmm1, %xmm1
; FAST_NOAVX-NEXT: ucomiss %xmm0, %xmm1
; FAST_NOAVX-NEXT: seta %al
@@ -756,7 +756,7 @@ define zeroext i1 @fcmp_olt3(float %x) {
; FAST_NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: fcmp_olt3:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FAST_AVX-NEXT: vucomiss %xmm0, %xmm1
; FAST_AVX-NEXT: seta %al
@@ -769,13 +769,13 @@ define zeroext i1 @fcmp_olt3(float %x) {
define zeroext i1 @fcmp_ole2(float %x) {
; SDAG-LABEL: fcmp_ole2:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: ucomiss %xmm0, %xmm0
; SDAG-NEXT: setnp %al
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_ole2:
-; FAST_NOAVX: ## BB#0:
+; FAST_NOAVX: ## %bb.0:
; FAST_NOAVX-NEXT: ucomiss %xmm0, %xmm0
; FAST_NOAVX-NEXT: setnp %al
; FAST_NOAVX-NEXT: andb $1, %al
@@ -783,7 +783,7 @@ define zeroext i1 @fcmp_ole2(float %x) {
; FAST_NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: fcmp_ole2:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vucomiss %xmm0, %xmm0
; FAST_AVX-NEXT: setnp %al
; FAST_AVX-NEXT: andb $1, %al
@@ -795,14 +795,14 @@ define zeroext i1 @fcmp_ole2(float %x) {
define zeroext i1 @fcmp_ole3(float %x) {
; SDAG-LABEL: fcmp_ole3:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: xorps %xmm1, %xmm1
; SDAG-NEXT: ucomiss %xmm0, %xmm1
; SDAG-NEXT: setae %al
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_ole3:
-; FAST_NOAVX: ## BB#0:
+; FAST_NOAVX: ## %bb.0:
; FAST_NOAVX-NEXT: xorps %xmm1, %xmm1
; FAST_NOAVX-NEXT: ucomiss %xmm0, %xmm1
; FAST_NOAVX-NEXT: setae %al
@@ -811,7 +811,7 @@ define zeroext i1 @fcmp_ole3(float %x) {
; FAST_NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: fcmp_ole3:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FAST_AVX-NEXT: vucomiss %xmm0, %xmm1
; FAST_AVX-NEXT: setae %al
@@ -824,12 +824,12 @@ define zeroext i1 @fcmp_ole3(float %x) {
define zeroext i1 @fcmp_one2(float %x) {
; SDAG-LABEL: fcmp_one2:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: xorl %eax, %eax
; SDAG-NEXT: retq
;
; FAST-LABEL: fcmp_one2:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: xorl %eax, %eax
; FAST-NEXT: andb $1, %al
; FAST-NEXT: movzbl %al, %eax
@@ -840,14 +840,14 @@ define zeroext i1 @fcmp_one2(float %x) {
define zeroext i1 @fcmp_one3(float %x) {
; SDAG-LABEL: fcmp_one3:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: xorps %xmm1, %xmm1
; SDAG-NEXT: ucomiss %xmm1, %xmm0
; SDAG-NEXT: setne %al
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_one3:
-; FAST_NOAVX: ## BB#0:
+; FAST_NOAVX: ## %bb.0:
; FAST_NOAVX-NEXT: xorps %xmm1, %xmm1
; FAST_NOAVX-NEXT: ucomiss %xmm1, %xmm0
; FAST_NOAVX-NEXT: setne %al
@@ -856,7 +856,7 @@ define zeroext i1 @fcmp_one3(float %x) {
; FAST_NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: fcmp_one3:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FAST_AVX-NEXT: vucomiss %xmm1, %xmm0
; FAST_AVX-NEXT: setne %al
@@ -869,13 +869,13 @@ define zeroext i1 @fcmp_one3(float %x) {
define zeroext i1 @fcmp_ord2(float %x) {
; SDAG-LABEL: fcmp_ord2:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: ucomiss %xmm0, %xmm0
; SDAG-NEXT: setnp %al
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_ord2:
-; FAST_NOAVX: ## BB#0:
+; FAST_NOAVX: ## %bb.0:
; FAST_NOAVX-NEXT: ucomiss %xmm0, %xmm0
; FAST_NOAVX-NEXT: setnp %al
; FAST_NOAVX-NEXT: andb $1, %al
@@ -883,7 +883,7 @@ define zeroext i1 @fcmp_ord2(float %x) {
; FAST_NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: fcmp_ord2:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vucomiss %xmm0, %xmm0
; FAST_AVX-NEXT: setnp %al
; FAST_AVX-NEXT: andb $1, %al
@@ -895,13 +895,13 @@ define zeroext i1 @fcmp_ord2(float %x) {
define zeroext i1 @fcmp_ord3(float %x) {
; SDAG-LABEL: fcmp_ord3:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: ucomiss %xmm0, %xmm0
; SDAG-NEXT: setnp %al
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_ord3:
-; FAST_NOAVX: ## BB#0:
+; FAST_NOAVX: ## %bb.0:
; FAST_NOAVX-NEXT: ucomiss %xmm0, %xmm0
; FAST_NOAVX-NEXT: setnp %al
; FAST_NOAVX-NEXT: andb $1, %al
@@ -909,7 +909,7 @@ define zeroext i1 @fcmp_ord3(float %x) {
; FAST_NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: fcmp_ord3:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vucomiss %xmm0, %xmm0
; FAST_AVX-NEXT: setnp %al
; FAST_AVX-NEXT: andb $1, %al
@@ -921,13 +921,13 @@ define zeroext i1 @fcmp_ord3(float %x) {
define zeroext i1 @fcmp_uno2(float %x) {
; SDAG-LABEL: fcmp_uno2:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: ucomiss %xmm0, %xmm0
; SDAG-NEXT: setp %al
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_uno2:
-; FAST_NOAVX: ## BB#0:
+; FAST_NOAVX: ## %bb.0:
; FAST_NOAVX-NEXT: ucomiss %xmm0, %xmm0
; FAST_NOAVX-NEXT: setp %al
; FAST_NOAVX-NEXT: andb $1, %al
@@ -935,7 +935,7 @@ define zeroext i1 @fcmp_uno2(float %x) {
; FAST_NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: fcmp_uno2:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vucomiss %xmm0, %xmm0
; FAST_AVX-NEXT: setp %al
; FAST_AVX-NEXT: andb $1, %al
@@ -947,13 +947,13 @@ define zeroext i1 @fcmp_uno2(float %x) {
define zeroext i1 @fcmp_uno3(float %x) {
; SDAG-LABEL: fcmp_uno3:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: ucomiss %xmm0, %xmm0
; SDAG-NEXT: setp %al
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_uno3:
-; FAST_NOAVX: ## BB#0:
+; FAST_NOAVX: ## %bb.0:
; FAST_NOAVX-NEXT: ucomiss %xmm0, %xmm0
; FAST_NOAVX-NEXT: setp %al
; FAST_NOAVX-NEXT: andb $1, %al
@@ -961,7 +961,7 @@ define zeroext i1 @fcmp_uno3(float %x) {
; FAST_NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: fcmp_uno3:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vucomiss %xmm0, %xmm0
; FAST_AVX-NEXT: setp %al
; FAST_AVX-NEXT: andb $1, %al
@@ -973,12 +973,12 @@ define zeroext i1 @fcmp_uno3(float %x) {
define zeroext i1 @fcmp_ueq2(float %x) {
; SDAG-LABEL: fcmp_ueq2:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: movb $1, %al
; SDAG-NEXT: retq
;
; FAST-LABEL: fcmp_ueq2:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: movb $1, %al
; FAST-NEXT: andb $1, %al
; FAST-NEXT: movzbl %al, %eax
@@ -989,14 +989,14 @@ define zeroext i1 @fcmp_ueq2(float %x) {
define zeroext i1 @fcmp_ueq3(float %x) {
; SDAG-LABEL: fcmp_ueq3:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: xorps %xmm1, %xmm1
; SDAG-NEXT: ucomiss %xmm1, %xmm0
; SDAG-NEXT: sete %al
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_ueq3:
-; FAST_NOAVX: ## BB#0:
+; FAST_NOAVX: ## %bb.0:
; FAST_NOAVX-NEXT: xorps %xmm1, %xmm1
; FAST_NOAVX-NEXT: ucomiss %xmm1, %xmm0
; FAST_NOAVX-NEXT: sete %al
@@ -1005,7 +1005,7 @@ define zeroext i1 @fcmp_ueq3(float %x) {
; FAST_NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: fcmp_ueq3:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FAST_AVX-NEXT: vucomiss %xmm1, %xmm0
; FAST_AVX-NEXT: sete %al
@@ -1018,13 +1018,13 @@ define zeroext i1 @fcmp_ueq3(float %x) {
define zeroext i1 @fcmp_ugt2(float %x) {
; SDAG-LABEL: fcmp_ugt2:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: ucomiss %xmm0, %xmm0
; SDAG-NEXT: setp %al
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_ugt2:
-; FAST_NOAVX: ## BB#0:
+; FAST_NOAVX: ## %bb.0:
; FAST_NOAVX-NEXT: ucomiss %xmm0, %xmm0
; FAST_NOAVX-NEXT: setp %al
; FAST_NOAVX-NEXT: andb $1, %al
@@ -1032,7 +1032,7 @@ define zeroext i1 @fcmp_ugt2(float %x) {
; FAST_NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: fcmp_ugt2:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vucomiss %xmm0, %xmm0
; FAST_AVX-NEXT: setp %al
; FAST_AVX-NEXT: andb $1, %al
@@ -1044,14 +1044,14 @@ define zeroext i1 @fcmp_ugt2(float %x) {
define zeroext i1 @fcmp_ugt3(float %x) {
; SDAG-LABEL: fcmp_ugt3:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: xorps %xmm1, %xmm1
; SDAG-NEXT: ucomiss %xmm0, %xmm1
; SDAG-NEXT: setb %al
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_ugt3:
-; FAST_NOAVX: ## BB#0:
+; FAST_NOAVX: ## %bb.0:
; FAST_NOAVX-NEXT: xorps %xmm1, %xmm1
; FAST_NOAVX-NEXT: ucomiss %xmm0, %xmm1
; FAST_NOAVX-NEXT: setb %al
@@ -1060,7 +1060,7 @@ define zeroext i1 @fcmp_ugt3(float %x) {
; FAST_NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: fcmp_ugt3:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FAST_AVX-NEXT: vucomiss %xmm0, %xmm1
; FAST_AVX-NEXT: setb %al
@@ -1073,12 +1073,12 @@ define zeroext i1 @fcmp_ugt3(float %x) {
define zeroext i1 @fcmp_uge2(float %x) {
; SDAG-LABEL: fcmp_uge2:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: movb $1, %al
; SDAG-NEXT: retq
;
; FAST-LABEL: fcmp_uge2:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: movb $1, %al
; FAST-NEXT: andb $1, %al
; FAST-NEXT: movzbl %al, %eax
@@ -1089,14 +1089,14 @@ define zeroext i1 @fcmp_uge2(float %x) {
define zeroext i1 @fcmp_uge3(float %x) {
; SDAG-LABEL: fcmp_uge3:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: xorps %xmm1, %xmm1
; SDAG-NEXT: ucomiss %xmm0, %xmm1
; SDAG-NEXT: setbe %al
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_uge3:
-; FAST_NOAVX: ## BB#0:
+; FAST_NOAVX: ## %bb.0:
; FAST_NOAVX-NEXT: xorps %xmm1, %xmm1
; FAST_NOAVX-NEXT: ucomiss %xmm0, %xmm1
; FAST_NOAVX-NEXT: setbe %al
@@ -1105,7 +1105,7 @@ define zeroext i1 @fcmp_uge3(float %x) {
; FAST_NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: fcmp_uge3:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FAST_AVX-NEXT: vucomiss %xmm0, %xmm1
; FAST_AVX-NEXT: setbe %al
@@ -1118,13 +1118,13 @@ define zeroext i1 @fcmp_uge3(float %x) {
define zeroext i1 @fcmp_ult2(float %x) {
; SDAG-LABEL: fcmp_ult2:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: ucomiss %xmm0, %xmm0
; SDAG-NEXT: setp %al
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_ult2:
-; FAST_NOAVX: ## BB#0:
+; FAST_NOAVX: ## %bb.0:
; FAST_NOAVX-NEXT: ucomiss %xmm0, %xmm0
; FAST_NOAVX-NEXT: setp %al
; FAST_NOAVX-NEXT: andb $1, %al
@@ -1132,7 +1132,7 @@ define zeroext i1 @fcmp_ult2(float %x) {
; FAST_NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: fcmp_ult2:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vucomiss %xmm0, %xmm0
; FAST_AVX-NEXT: setp %al
; FAST_AVX-NEXT: andb $1, %al
@@ -1144,14 +1144,14 @@ define zeroext i1 @fcmp_ult2(float %x) {
define zeroext i1 @fcmp_ult3(float %x) {
; SDAG-LABEL: fcmp_ult3:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: xorps %xmm1, %xmm1
; SDAG-NEXT: ucomiss %xmm1, %xmm0
; SDAG-NEXT: setb %al
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_ult3:
-; FAST_NOAVX: ## BB#0:
+; FAST_NOAVX: ## %bb.0:
; FAST_NOAVX-NEXT: xorps %xmm1, %xmm1
; FAST_NOAVX-NEXT: ucomiss %xmm1, %xmm0
; FAST_NOAVX-NEXT: setb %al
@@ -1160,7 +1160,7 @@ define zeroext i1 @fcmp_ult3(float %x) {
; FAST_NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: fcmp_ult3:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FAST_AVX-NEXT: vucomiss %xmm1, %xmm0
; FAST_AVX-NEXT: setb %al
@@ -1173,12 +1173,12 @@ define zeroext i1 @fcmp_ult3(float %x) {
define zeroext i1 @fcmp_ule2(float %x) {
; SDAG-LABEL: fcmp_ule2:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: movb $1, %al
; SDAG-NEXT: retq
;
; FAST-LABEL: fcmp_ule2:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: movb $1, %al
; FAST-NEXT: andb $1, %al
; FAST-NEXT: movzbl %al, %eax
@@ -1189,14 +1189,14 @@ define zeroext i1 @fcmp_ule2(float %x) {
define zeroext i1 @fcmp_ule3(float %x) {
; SDAG-LABEL: fcmp_ule3:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: xorps %xmm1, %xmm1
; SDAG-NEXT: ucomiss %xmm1, %xmm0
; SDAG-NEXT: setbe %al
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_ule3:
-; FAST_NOAVX: ## BB#0:
+; FAST_NOAVX: ## %bb.0:
; FAST_NOAVX-NEXT: xorps %xmm1, %xmm1
; FAST_NOAVX-NEXT: ucomiss %xmm1, %xmm0
; FAST_NOAVX-NEXT: setbe %al
@@ -1205,7 +1205,7 @@ define zeroext i1 @fcmp_ule3(float %x) {
; FAST_NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: fcmp_ule3:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FAST_AVX-NEXT: vucomiss %xmm1, %xmm0
; FAST_AVX-NEXT: setbe %al
@@ -1218,13 +1218,13 @@ define zeroext i1 @fcmp_ule3(float %x) {
define zeroext i1 @fcmp_une2(float %x) {
; SDAG-LABEL: fcmp_une2:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: ucomiss %xmm0, %xmm0
; SDAG-NEXT: setp %al
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_une2:
-; FAST_NOAVX: ## BB#0:
+; FAST_NOAVX: ## %bb.0:
; FAST_NOAVX-NEXT: ucomiss %xmm0, %xmm0
; FAST_NOAVX-NEXT: setp %al
; FAST_NOAVX-NEXT: andb $1, %al
@@ -1232,7 +1232,7 @@ define zeroext i1 @fcmp_une2(float %x) {
; FAST_NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: fcmp_une2:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vucomiss %xmm0, %xmm0
; FAST_AVX-NEXT: setp %al
; FAST_AVX-NEXT: andb $1, %al
@@ -1244,7 +1244,7 @@ define zeroext i1 @fcmp_une2(float %x) {
define zeroext i1 @fcmp_une3(float %x) {
; SDAG-LABEL: fcmp_une3:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: xorps %xmm1, %xmm1
; SDAG-NEXT: cmpneqss %xmm0, %xmm1
; SDAG-NEXT: movd %xmm1, %eax
@@ -1253,7 +1253,7 @@ define zeroext i1 @fcmp_une3(float %x) {
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_une3:
-; FAST_NOAVX: ## BB#0:
+; FAST_NOAVX: ## %bb.0:
; FAST_NOAVX-NEXT: xorps %xmm1, %xmm1
; FAST_NOAVX-NEXT: ucomiss %xmm1, %xmm0
; FAST_NOAVX-NEXT: setne %al
@@ -1264,7 +1264,7 @@ define zeroext i1 @fcmp_une3(float %x) {
; FAST_NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: fcmp_une3:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FAST_AVX-NEXT: vucomiss %xmm1, %xmm0
; FAST_AVX-NEXT: setne %al
@@ -1279,12 +1279,12 @@ define zeroext i1 @fcmp_une3(float %x) {
define zeroext i1 @icmp_eq2(i32 %x) {
; SDAG-LABEL: icmp_eq2:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: movb $1, %al
; SDAG-NEXT: retq
;
; FAST-LABEL: icmp_eq2:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: movb $1, %al
; FAST-NEXT: andb $1, %al
; FAST-NEXT: movzbl %al, %eax
@@ -1295,12 +1295,12 @@ define zeroext i1 @icmp_eq2(i32 %x) {
define zeroext i1 @icmp_ne2(i32 %x) {
; SDAG-LABEL: icmp_ne2:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: xorl %eax, %eax
; SDAG-NEXT: retq
;
; FAST-LABEL: icmp_ne2:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: xorl %eax, %eax
; FAST-NEXT: andb $1, %al
; FAST-NEXT: movzbl %al, %eax
@@ -1311,12 +1311,12 @@ define zeroext i1 @icmp_ne2(i32 %x) {
define zeroext i1 @icmp_ugt2(i32 %x) {
; SDAG-LABEL: icmp_ugt2:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: xorl %eax, %eax
; SDAG-NEXT: retq
;
; FAST-LABEL: icmp_ugt2:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: xorl %eax, %eax
; FAST-NEXT: andb $1, %al
; FAST-NEXT: movzbl %al, %eax
@@ -1327,12 +1327,12 @@ define zeroext i1 @icmp_ugt2(i32 %x) {
define zeroext i1 @icmp_uge2(i32 %x) {
; SDAG-LABEL: icmp_uge2:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: movb $1, %al
; SDAG-NEXT: retq
;
; FAST-LABEL: icmp_uge2:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: movb $1, %al
; FAST-NEXT: andb $1, %al
; FAST-NEXT: movzbl %al, %eax
@@ -1343,12 +1343,12 @@ define zeroext i1 @icmp_uge2(i32 %x) {
define zeroext i1 @icmp_ult2(i32 %x) {
; SDAG-LABEL: icmp_ult2:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: xorl %eax, %eax
; SDAG-NEXT: retq
;
; FAST-LABEL: icmp_ult2:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: xorl %eax, %eax
; FAST-NEXT: andb $1, %al
; FAST-NEXT: movzbl %al, %eax
@@ -1359,12 +1359,12 @@ define zeroext i1 @icmp_ult2(i32 %x) {
define zeroext i1 @icmp_ule2(i32 %x) {
; SDAG-LABEL: icmp_ule2:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: movb $1, %al
; SDAG-NEXT: retq
;
; FAST-LABEL: icmp_ule2:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: movb $1, %al
; FAST-NEXT: andb $1, %al
; FAST-NEXT: movzbl %al, %eax
@@ -1375,12 +1375,12 @@ define zeroext i1 @icmp_ule2(i32 %x) {
define zeroext i1 @icmp_sgt2(i32 %x) {
; SDAG-LABEL: icmp_sgt2:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: xorl %eax, %eax
; SDAG-NEXT: retq
;
; FAST-LABEL: icmp_sgt2:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: xorl %eax, %eax
; FAST-NEXT: andb $1, %al
; FAST-NEXT: movzbl %al, %eax
@@ -1391,12 +1391,12 @@ define zeroext i1 @icmp_sgt2(i32 %x) {
define zeroext i1 @icmp_sge2(i32 %x) {
; SDAG-LABEL: icmp_sge2:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: movb $1, %al
; SDAG-NEXT: retq
;
; FAST-LABEL: icmp_sge2:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: movb $1, %al
; FAST-NEXT: andb $1, %al
; FAST-NEXT: movzbl %al, %eax
@@ -1407,12 +1407,12 @@ define zeroext i1 @icmp_sge2(i32 %x) {
define zeroext i1 @icmp_slt2(i32 %x) {
; SDAG-LABEL: icmp_slt2:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: xorl %eax, %eax
; SDAG-NEXT: retq
;
; FAST-LABEL: icmp_slt2:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: xorl %eax, %eax
; FAST-NEXT: andb $1, %al
; FAST-NEXT: movzbl %al, %eax
@@ -1423,12 +1423,12 @@ define zeroext i1 @icmp_slt2(i32 %x) {
define zeroext i1 @icmp_sle2(i32 %x) {
; SDAG-LABEL: icmp_sle2:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: movb $1, %al
; SDAG-NEXT: retq
;
; FAST-LABEL: icmp_sle2:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: movb $1, %al
; FAST-NEXT: andb $1, %al
; FAST-NEXT: movzbl %al, %eax
diff --git a/test/CodeGen/X86/fast-isel-constpool.ll b/test/CodeGen/X86/fast-isel-constpool.ll
index 374a5e3907c..4b8f387571e 100644
--- a/test/CodeGen/X86/fast-isel-constpool.ll
+++ b/test/CodeGen/X86/fast-isel-constpool.ll
@@ -9,25 +9,25 @@
; Make sure fast isel uses rip-relative addressing for the small code model.
define float @constpool_float(float %x) {
; CHECK-LABEL: constpool_float:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT: addss %xmm1, %xmm0
; CHECK-NEXT: retq
;
; LARGE-LABEL: constpool_float:
-; LARGE: ## BB#0:
+; LARGE: ## %bb.0:
; LARGE-NEXT: movabsq $LCPI0_0, %rax
; LARGE-NEXT: addss (%rax), %xmm0
; LARGE-NEXT: retq
;
; AVX-LABEL: constpool_float:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; LARGE_AVX-LABEL: constpool_float:
-; LARGE_AVX: ## BB#0:
+; LARGE_AVX: ## %bb.0:
; LARGE_AVX-NEXT: movabsq $LCPI0_0, %rax
; LARGE_AVX-NEXT: vaddss (%rax), %xmm0, %xmm0
; LARGE_AVX-NEXT: retq
@@ -38,25 +38,25 @@ define float @constpool_float(float %x) {
define double @constpool_double(double %x) nounwind {
; CHECK-LABEL: constpool_double:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: addsd %xmm1, %xmm0
; CHECK-NEXT: retq
;
; LARGE-LABEL: constpool_double:
-; LARGE: ## BB#0:
+; LARGE: ## %bb.0:
; LARGE-NEXT: movabsq $LCPI1_0, %rax
; LARGE-NEXT: addsd (%rax), %xmm0
; LARGE-NEXT: retq
;
; AVX-LABEL: constpool_double:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; LARGE_AVX-LABEL: constpool_double:
-; LARGE_AVX: ## BB#0:
+; LARGE_AVX: ## %bb.0:
; LARGE_AVX-NEXT: movabsq $LCPI1_0, %rax
; LARGE_AVX-NEXT: vaddsd (%rax), %xmm0, %xmm0
; LARGE_AVX-NEXT: retq
diff --git a/test/CodeGen/X86/fast-isel-fptrunc-fpext.ll b/test/CodeGen/X86/fast-isel-fptrunc-fpext.ll
index 4a3337554b6..af4a9da9c2a 100644
--- a/test/CodeGen/X86/fast-isel-fptrunc-fpext.ll
+++ b/test/CodeGen/X86/fast-isel-fptrunc-fpext.ll
@@ -24,12 +24,12 @@
define double @single_to_double_rr(float %x) {
; SSE-LABEL: single_to_double_rr:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: cvtss2sd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: single_to_double_rr:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -39,12 +39,12 @@ entry:
define float @double_to_single_rr(double %x) {
; SSE-LABEL: double_to_single_rr:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: cvtsd2ss %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: double_to_single_rr:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -54,13 +54,13 @@ entry:
define double @single_to_double_rm(float* %x) {
; SSE-LABEL: single_to_double_rm:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: cvtss2sd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: single_to_double_rm:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -72,12 +72,12 @@ entry:
define double @single_to_double_rm_optsize(float* %x) optsize {
; SSE-LABEL: single_to_double_rm_optsize:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: cvtss2sd (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: single_to_double_rm_optsize:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -89,13 +89,13 @@ entry:
define float @double_to_single_rm(double* %x) {
; SSE-LABEL: double_to_single_rm:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: cvtsd2ss %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: double_to_single_rm:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -107,12 +107,12 @@ entry:
define float @double_to_single_rm_optsize(double* %x) optsize {
; SSE-LABEL: double_to_single_rm_optsize:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: cvtsd2ss (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: double_to_single_rm_optsize:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
diff --git a/test/CodeGen/X86/fast-isel-int-float-conversion-x86-64.ll b/test/CodeGen/X86/fast-isel-int-float-conversion-x86-64.ll
index 50eddab2b45..509a5cfe931 100644
--- a/test/CodeGen/X86/fast-isel-int-float-conversion-x86-64.ll
+++ b/test/CodeGen/X86/fast-isel-int-float-conversion-x86-64.ll
@@ -5,12 +5,12 @@
define double @long_to_double_rr(i64 %a) {
; SSE2-LABEL: long_to_double_rr:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: cvtsi2sdq %rdi, %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: long_to_double_rr:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvtsi2sdq %rdi, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -20,13 +20,13 @@ entry:
define double @long_to_double_rm(i64* %a) {
; SSE2-LABEL: long_to_double_rm:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movq (%rdi), %rax
; SSE2-NEXT: cvtsi2sdq %rax, %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: long_to_double_rm:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvtsi2sdq (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -37,12 +37,12 @@ entry:
define double @long_to_double_rm_optsize(i64* %a) optsize {
; SSE2-LABEL: long_to_double_rm_optsize:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: cvtsi2sdq (%rdi), %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: long_to_double_rm_optsize:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvtsi2sdq (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -53,12 +53,12 @@ entry:
define float @long_to_float_rr(i64 %a) {
; SSE2-LABEL: long_to_float_rr:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: cvtsi2ssq %rdi, %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: long_to_float_rr:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvtsi2ssq %rdi, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -68,13 +68,13 @@ entry:
define float @long_to_float_rm(i64* %a) {
; SSE2-LABEL: long_to_float_rm:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movq (%rdi), %rax
; SSE2-NEXT: cvtsi2ssq %rax, %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: long_to_float_rm:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvtsi2ssq (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -85,12 +85,12 @@ entry:
define float @long_to_float_rm_optsize(i64* %a) optsize {
; SSE2-LABEL: long_to_float_rm_optsize:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: cvtsi2ssq (%rdi), %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: long_to_float_rm_optsize:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvtsi2ssq (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/fast-isel-int-float-conversion.ll b/test/CodeGen/X86/fast-isel-int-float-conversion.ll
index 3e69710868b..4465d3463cc 100644
--- a/test/CodeGen/X86/fast-isel-int-float-conversion.ll
+++ b/test/CodeGen/X86/fast-isel-int-float-conversion.ll
@@ -7,17 +7,17 @@
define double @int_to_double_rr(i32 %a) {
; SSE2-LABEL: int_to_double_rr:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: cvtsi2sdl %edi, %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: int_to_double_rr:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvtsi2sdl %edi, %xmm0, %xmm0
; AVX-NEXT: retq
;
; SSE2_X86-LABEL: int_to_double_rr:
-; SSE2_X86: # BB#0: # %entry
+; SSE2_X86: # %bb.0: # %entry
; SSE2_X86-NEXT: pushl %ebp
; SSE2_X86-NEXT: .cfi_def_cfa_offset 8
; SSE2_X86-NEXT: .cfi_offset %ebp, -8
@@ -34,7 +34,7 @@ define double @int_to_double_rr(i32 %a) {
; SSE2_X86-NEXT: retl
;
; AVX_X86-LABEL: int_to_double_rr:
-; AVX_X86: # BB#0: # %entry
+; AVX_X86: # %bb.0: # %entry
; AVX_X86-NEXT: pushl %ebp
; AVX_X86-NEXT: .cfi_def_cfa_offset 8
; AVX_X86-NEXT: .cfi_offset %ebp, -8
@@ -55,18 +55,18 @@ entry:
define double @int_to_double_rm(i32* %a) {
; SSE2-LABEL: int_to_double_rm:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movl (%rdi), %eax
; SSE2-NEXT: cvtsi2sdl %eax, %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: int_to_double_rm:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvtsi2sdl (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
;
; SSE2_X86-LABEL: int_to_double_rm:
-; SSE2_X86: # BB#0: # %entry
+; SSE2_X86: # %bb.0: # %entry
; SSE2_X86-NEXT: pushl %ebp
; SSE2_X86-NEXT: .cfi_def_cfa_offset 8
; SSE2_X86-NEXT: .cfi_offset %ebp, -8
@@ -83,7 +83,7 @@ define double @int_to_double_rm(i32* %a) {
; SSE2_X86-NEXT: retl
;
; AVX_X86-LABEL: int_to_double_rm:
-; AVX_X86: # BB#0: # %entry
+; AVX_X86: # %bb.0: # %entry
; AVX_X86-NEXT: pushl %ebp
; AVX_X86-NEXT: .cfi_def_cfa_offset 8
; AVX_X86-NEXT: .cfi_offset %ebp, -8
@@ -106,17 +106,17 @@ entry:
define double @int_to_double_rm_optsize(i32* %a) optsize {
; SSE2-LABEL: int_to_double_rm_optsize:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: cvtsi2sdl (%rdi), %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: int_to_double_rm_optsize:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvtsi2sdl (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
;
; SSE2_X86-LABEL: int_to_double_rm_optsize:
-; SSE2_X86: # BB#0: # %entry
+; SSE2_X86: # %bb.0: # %entry
; SSE2_X86-NEXT: pushl %ebp
; SSE2_X86-NEXT: .cfi_def_cfa_offset 8
; SSE2_X86-NEXT: .cfi_offset %ebp, -8
@@ -133,7 +133,7 @@ define double @int_to_double_rm_optsize(i32* %a) optsize {
; SSE2_X86-NEXT: retl
;
; AVX_X86-LABEL: int_to_double_rm_optsize:
-; AVX_X86: # BB#0: # %entry
+; AVX_X86: # %bb.0: # %entry
; AVX_X86-NEXT: pushl %ebp
; AVX_X86-NEXT: .cfi_def_cfa_offset 8
; AVX_X86-NEXT: .cfi_offset %ebp, -8
@@ -156,17 +156,17 @@ entry:
define float @int_to_float_rr(i32 %a) {
; SSE2-LABEL: int_to_float_rr:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: cvtsi2ssl %edi, %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: int_to_float_rr:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvtsi2ssl %edi, %xmm0, %xmm0
; AVX-NEXT: retq
;
; SSE2_X86-LABEL: int_to_float_rr:
-; SSE2_X86: # BB#0: # %entry
+; SSE2_X86: # %bb.0: # %entry
; SSE2_X86-NEXT: pushl %eax
; SSE2_X86-NEXT: .cfi_def_cfa_offset 8
; SSE2_X86-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -177,7 +177,7 @@ define float @int_to_float_rr(i32 %a) {
; SSE2_X86-NEXT: retl
;
; AVX_X86-LABEL: int_to_float_rr:
-; AVX_X86: # BB#0: # %entry
+; AVX_X86: # %bb.0: # %entry
; AVX_X86-NEXT: pushl %eax
; AVX_X86-NEXT: .cfi_def_cfa_offset 8
; AVX_X86-NEXT: vcvtsi2ssl {{[0-9]+}}(%esp), %xmm0, %xmm0
@@ -192,18 +192,18 @@ entry:
define float @int_to_float_rm(i32* %a) {
; SSE2-LABEL: int_to_float_rm:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movl (%rdi), %eax
; SSE2-NEXT: cvtsi2ssl %eax, %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: int_to_float_rm:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvtsi2ssl (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
;
; SSE2_X86-LABEL: int_to_float_rm:
-; SSE2_X86: # BB#0: # %entry
+; SSE2_X86: # %bb.0: # %entry
; SSE2_X86-NEXT: pushl %eax
; SSE2_X86-NEXT: .cfi_def_cfa_offset 8
; SSE2_X86-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -214,7 +214,7 @@ define float @int_to_float_rm(i32* %a) {
; SSE2_X86-NEXT: retl
;
; AVX_X86-LABEL: int_to_float_rm:
-; AVX_X86: # BB#0: # %entry
+; AVX_X86: # %bb.0: # %entry
; AVX_X86-NEXT: pushl %eax
; AVX_X86-NEXT: .cfi_def_cfa_offset 8
; AVX_X86-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -231,17 +231,17 @@ entry:
define float @int_to_float_rm_optsize(i32* %a) optsize {
; SSE2-LABEL: int_to_float_rm_optsize:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: cvtsi2ssl (%rdi), %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: int_to_float_rm_optsize:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvtsi2ssl (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
;
; SSE2_X86-LABEL: int_to_float_rm_optsize:
-; SSE2_X86: # BB#0: # %entry
+; SSE2_X86: # %bb.0: # %entry
; SSE2_X86-NEXT: pushl %eax
; SSE2_X86-NEXT: .cfi_def_cfa_offset 8
; SSE2_X86-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -252,7 +252,7 @@ define float @int_to_float_rm_optsize(i32* %a) optsize {
; SSE2_X86-NEXT: retl
;
; AVX_X86-LABEL: int_to_float_rm_optsize:
-; AVX_X86: # BB#0: # %entry
+; AVX_X86: # %bb.0: # %entry
; AVX_X86-NEXT: pushl %eax
; AVX_X86-NEXT: .cfi_def_cfa_offset 8
; AVX_X86-NEXT: movl {{[0-9]+}}(%esp), %eax
diff --git a/test/CodeGen/X86/fast-isel-load-i1.ll b/test/CodeGen/X86/fast-isel-load-i1.ll
index f515d38cbb9..814c8649ca9 100644
--- a/test/CodeGen/X86/fast-isel-load-i1.ll
+++ b/test/CodeGen/X86/fast-isel-load-i1.ll
@@ -3,10 +3,10 @@
define i1 @test_i1(i1* %b) {
; CHECK-LABEL: test_i1:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: testb $1, (%rdi)
; CHECK-NEXT: je .LBB0_2
-; CHECK-NEXT: # BB#1: # %in
+; CHECK-NEXT: # %bb.1: # %in
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: retq
; CHECK-NEXT: .LBB0_2: # %out
diff --git a/test/CodeGen/X86/fast-isel-nontemporal.ll b/test/CodeGen/X86/fast-isel-nontemporal.ll
index b9fbc7a743d..79e96308a29 100644
--- a/test/CodeGen/X86/fast-isel-nontemporal.ll
+++ b/test/CodeGen/X86/fast-isel-nontemporal.ll
@@ -14,7 +14,7 @@
define void @test_nti32(i32* nocapture %ptr, i32 %X) {
; ALL-LABEL: test_nti32:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: movntil %esi, (%rdi)
; ALL-NEXT: retq
entry:
@@ -24,7 +24,7 @@ entry:
define void @test_nti64(i64* nocapture %ptr, i64 %X) {
; ALL-LABEL: test_nti64:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: movntiq %rsi, (%rdi)
; ALL-NEXT: retq
entry:
@@ -34,27 +34,27 @@ entry:
define void @test_ntfloat(float* nocapture %ptr, float %X) {
; SSE2-LABEL: test_ntfloat:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movss %xmm0, (%rdi)
; SSE2-NEXT: retq
;
; SSE4A-LABEL: test_ntfloat:
-; SSE4A: # BB#0: # %entry
+; SSE4A: # %bb.0: # %entry
; SSE4A-NEXT: movntss %xmm0, (%rdi)
; SSE4A-NEXT: retq
;
; SSE41-LABEL: test_ntfloat:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movss %xmm0, (%rdi)
; SSE41-NEXT: retq
;
; AVX-LABEL: test_ntfloat:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovss %xmm0, (%rdi)
; AVX-NEXT: retq
;
; AVX512-LABEL: test_ntfloat:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovss %xmm0, (%rdi)
; AVX512-NEXT: retq
entry:
@@ -64,27 +64,27 @@ entry:
define void @test_ntdouble(double* nocapture %ptr, double %X) {
; SSE2-LABEL: test_ntdouble:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movsd %xmm0, (%rdi)
; SSE2-NEXT: retq
;
; SSE4A-LABEL: test_ntdouble:
-; SSE4A: # BB#0: # %entry
+; SSE4A: # %bb.0: # %entry
; SSE4A-NEXT: movntsd %xmm0, (%rdi)
; SSE4A-NEXT: retq
;
; SSE41-LABEL: test_ntdouble:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movsd %xmm0, (%rdi)
; SSE41-NEXT: retq
;
; AVX-LABEL: test_ntdouble:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovsd %xmm0, (%rdi)
; AVX-NEXT: retq
;
; AVX512-LABEL: test_ntdouble:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovsd %xmm0, (%rdi)
; AVX512-NEXT: retq
entry:
@@ -98,7 +98,7 @@ entry:
define void @test_mmx(x86_mmx* nocapture %a0, x86_mmx* nocapture %a1) {
; ALL-LABEL: test_mmx:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: movq (%rdi), %mm0
; ALL-NEXT: psrlq $3, %mm0
; ALL-NEXT: movntq %mm0, (%rsi)
@@ -117,17 +117,17 @@ declare x86_mmx @llvm.x86.mmx.psrli.q(x86_mmx, i32) nounwind readnone
define void @test_nt4xfloat(<4 x float>* nocapture %ptr, <4 x float> %X) {
; SSE-LABEL: test_nt4xfloat:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movntps %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_nt4xfloat:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovntps %xmm0, (%rdi)
; AVX-NEXT: retq
;
; AVX512-LABEL: test_nt4xfloat:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovntps %xmm0, (%rdi)
; AVX512-NEXT: retq
entry:
@@ -137,17 +137,17 @@ entry:
define void @test_nt2xdouble(<2 x double>* nocapture %ptr, <2 x double> %X) {
; SSE-LABEL: test_nt2xdouble:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movntpd %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_nt2xdouble:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovntpd %xmm0, (%rdi)
; AVX-NEXT: retq
;
; AVX512-LABEL: test_nt2xdouble:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovntpd %xmm0, (%rdi)
; AVX512-NEXT: retq
entry:
@@ -157,17 +157,17 @@ entry:
define void @test_nt16xi8(<16 x i8>* nocapture %ptr, <16 x i8> %X) {
; SSE-LABEL: test_nt16xi8:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movntdq %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_nt16xi8:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovntdq %xmm0, (%rdi)
; AVX-NEXT: retq
;
; AVX512-LABEL: test_nt16xi8:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovntdq %xmm0, (%rdi)
; AVX512-NEXT: retq
entry:
@@ -177,17 +177,17 @@ entry:
define void @test_nt8xi16(<8 x i16>* nocapture %ptr, <8 x i16> %X) {
; SSE-LABEL: test_nt8xi16:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movntdq %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_nt8xi16:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovntdq %xmm0, (%rdi)
; AVX-NEXT: retq
;
; AVX512-LABEL: test_nt8xi16:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovntdq %xmm0, (%rdi)
; AVX512-NEXT: retq
entry:
@@ -197,17 +197,17 @@ entry:
define void @test_nt4xi32(<4 x i32>* nocapture %ptr, <4 x i32> %X) {
; SSE-LABEL: test_nt4xi32:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movntdq %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_nt4xi32:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovntdq %xmm0, (%rdi)
; AVX-NEXT: retq
;
; AVX512-LABEL: test_nt4xi32:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovntdq %xmm0, (%rdi)
; AVX512-NEXT: retq
entry:
@@ -217,17 +217,17 @@ entry:
define void @test_nt2xi64(<2 x i64>* nocapture %ptr, <2 x i64> %X) {
; SSE-LABEL: test_nt2xi64:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movntdq %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_nt2xi64:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovntdq %xmm0, (%rdi)
; AVX-NEXT: retq
;
; AVX512-LABEL: test_nt2xi64:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovntdq %xmm0, (%rdi)
; AVX512-NEXT: retq
entry:
@@ -241,27 +241,27 @@ entry:
define <4 x float> @test_load_nt4xfloat(<4 x float>* nocapture %ptr) {
; SSE2-LABEL: test_load_nt4xfloat:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movaps (%rdi), %xmm0
; SSE2-NEXT: retq
;
; SSE4A-LABEL: test_load_nt4xfloat:
-; SSE4A: # BB#0: # %entry
+; SSE4A: # %bb.0: # %entry
; SSE4A-NEXT: movaps (%rdi), %xmm0
; SSE4A-NEXT: retq
;
; SSE41-LABEL: test_load_nt4xfloat:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movntdqa (%rdi), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: test_load_nt4xfloat:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovntdqa (%rdi), %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: test_load_nt4xfloat:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovntdqa (%rdi), %xmm0
; AVX512-NEXT: retq
entry:
@@ -271,27 +271,27 @@ entry:
define <2 x double> @test_load_nt2xdouble(<2 x double>* nocapture %ptr) {
; SSE2-LABEL: test_load_nt2xdouble:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movapd (%rdi), %xmm0
; SSE2-NEXT: retq
;
; SSE4A-LABEL: test_load_nt2xdouble:
-; SSE4A: # BB#0: # %entry
+; SSE4A: # %bb.0: # %entry
; SSE4A-NEXT: movapd (%rdi), %xmm0
; SSE4A-NEXT: retq
;
; SSE41-LABEL: test_load_nt2xdouble:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movntdqa (%rdi), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: test_load_nt2xdouble:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovntdqa (%rdi), %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: test_load_nt2xdouble:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovntdqa (%rdi), %xmm0
; AVX512-NEXT: retq
entry:
@@ -301,17 +301,17 @@ entry:
define <16 x i8> @test_load_nt16xi8(<16 x i8>* nocapture %ptr) {
; SSE-LABEL: test_load_nt16xi8:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movntdqa (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_load_nt16xi8:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovntdqa (%rdi), %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: test_load_nt16xi8:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovntdqa (%rdi), %xmm0
; AVX512-NEXT: retq
entry:
@@ -321,17 +321,17 @@ entry:
define <8 x i16> @test_load_nt8xi16(<8 x i16>* nocapture %ptr) {
; SSE-LABEL: test_load_nt8xi16:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movntdqa (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_load_nt8xi16:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovntdqa (%rdi), %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: test_load_nt8xi16:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovntdqa (%rdi), %xmm0
; AVX512-NEXT: retq
entry:
@@ -341,17 +341,17 @@ entry:
define <4 x i32> @test_load_nt4xi32(<4 x i32>* nocapture %ptr) {
; SSE-LABEL: test_load_nt4xi32:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movntdqa (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_load_nt4xi32:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovntdqa (%rdi), %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: test_load_nt4xi32:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovntdqa (%rdi), %xmm0
; AVX512-NEXT: retq
entry:
@@ -361,17 +361,17 @@ entry:
define <2 x i64> @test_load_nt2xi64(<2 x i64>* nocapture %ptr) {
; SSE-LABEL: test_load_nt2xi64:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movntdqa (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_load_nt2xi64:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovntdqa (%rdi), %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: test_load_nt2xi64:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovntdqa (%rdi), %xmm0
; AVX512-NEXT: retq
entry:
@@ -385,19 +385,19 @@ entry:
define void @test_nt8xfloat(<8 x float>* nocapture %ptr, <8 x float> %X) {
; SSE-LABEL: test_nt8xfloat:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movntps %xmm0, (%rdi)
; SSE-NEXT: movntps %xmm1, 16(%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_nt8xfloat:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovntps %ymm0, (%rdi)
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; AVX512-LABEL: test_nt8xfloat:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovntps %ymm0, (%rdi)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
@@ -408,19 +408,19 @@ entry:
define void @test_nt4xdouble(<4 x double>* nocapture %ptr, <4 x double> %X) {
; SSE-LABEL: test_nt4xdouble:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movntpd %xmm0, (%rdi)
; SSE-NEXT: movntpd %xmm1, 16(%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_nt4xdouble:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovntpd %ymm0, (%rdi)
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; AVX512-LABEL: test_nt4xdouble:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovntpd %ymm0, (%rdi)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
@@ -431,19 +431,19 @@ entry:
define void @test_nt32xi8(<32 x i8>* nocapture %ptr, <32 x i8> %X) {
; SSE-LABEL: test_nt32xi8:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movntdq %xmm0, (%rdi)
; SSE-NEXT: movntdq %xmm1, 16(%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_nt32xi8:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovntdq %ymm0, (%rdi)
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; AVX512-LABEL: test_nt32xi8:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovntdq %ymm0, (%rdi)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
@@ -454,19 +454,19 @@ entry:
define void @test_nt16xi16(<16 x i16>* nocapture %ptr, <16 x i16> %X) {
; SSE-LABEL: test_nt16xi16:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movntdq %xmm0, (%rdi)
; SSE-NEXT: movntdq %xmm1, 16(%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_nt16xi16:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovntdq %ymm0, (%rdi)
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; AVX512-LABEL: test_nt16xi16:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovntdq %ymm0, (%rdi)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
@@ -477,19 +477,19 @@ entry:
define void @test_nt8xi32(<8 x i32>* nocapture %ptr, <8 x i32> %X) {
; SSE-LABEL: test_nt8xi32:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movntdq %xmm0, (%rdi)
; SSE-NEXT: movntdq %xmm1, 16(%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_nt8xi32:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovntdq %ymm0, (%rdi)
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; AVX512-LABEL: test_nt8xi32:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovntdq %ymm0, (%rdi)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
@@ -500,19 +500,19 @@ entry:
define void @test_nt4xi64(<4 x i64>* nocapture %ptr, <4 x i64> %X) {
; SSE-LABEL: test_nt4xi64:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movntdq %xmm0, (%rdi)
; SSE-NEXT: movntdq %xmm1, 16(%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_nt4xi64:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovntdq %ymm0, (%rdi)
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; AVX512-LABEL: test_nt4xi64:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovntdq %ymm0, (%rdi)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
@@ -527,25 +527,25 @@ entry:
define <8 x float> @test_load_nt8xfloat(<8 x float>* nocapture %ptr) {
; SSE2-LABEL: test_load_nt8xfloat:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movaps (%rdi), %xmm0
; SSE2-NEXT: movaps 16(%rdi), %xmm1
; SSE2-NEXT: retq
;
; SSE4A-LABEL: test_load_nt8xfloat:
-; SSE4A: # BB#0: # %entry
+; SSE4A: # %bb.0: # %entry
; SSE4A-NEXT: movaps (%rdi), %xmm0
; SSE4A-NEXT: movaps 16(%rdi), %xmm1
; SSE4A-NEXT: retq
;
; SSE41-LABEL: test_load_nt8xfloat:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movntdqa (%rdi), %xmm0
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_load_nt8xfloat:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
; AVX1-NEXT: # implicit-def: %ymm1
; AVX1-NEXT: vmovaps %xmm0, %xmm1
@@ -554,12 +554,12 @@ define <8 x float> @test_load_nt8xfloat(<8 x float>* nocapture %ptr) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_load_nt8xfloat:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_load_nt8xfloat:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovntdqa (%rdi), %ymm0
; AVX512-NEXT: retq
entry:
@@ -569,25 +569,25 @@ entry:
define <4 x double> @test_load_nt4xdouble(<4 x double>* nocapture %ptr) {
; SSE2-LABEL: test_load_nt4xdouble:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movapd (%rdi), %xmm0
; SSE2-NEXT: movapd 16(%rdi), %xmm1
; SSE2-NEXT: retq
;
; SSE4A-LABEL: test_load_nt4xdouble:
-; SSE4A: # BB#0: # %entry
+; SSE4A: # %bb.0: # %entry
; SSE4A-NEXT: movapd (%rdi), %xmm0
; SSE4A-NEXT: movapd 16(%rdi), %xmm1
; SSE4A-NEXT: retq
;
; SSE41-LABEL: test_load_nt4xdouble:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movntdqa (%rdi), %xmm0
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_load_nt4xdouble:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
; AVX1-NEXT: # implicit-def: %ymm1
; AVX1-NEXT: vmovaps %xmm0, %xmm1
@@ -596,12 +596,12 @@ define <4 x double> @test_load_nt4xdouble(<4 x double>* nocapture %ptr) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_load_nt4xdouble:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_load_nt4xdouble:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovntdqa (%rdi), %ymm0
; AVX512-NEXT: retq
entry:
@@ -611,25 +611,25 @@ entry:
define <32 x i8> @test_load_nt32xi8(<32 x i8>* nocapture %ptr) {
; SSE2-LABEL: test_load_nt32xi8:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movaps (%rdi), %xmm0
; SSE2-NEXT: movaps 16(%rdi), %xmm1
; SSE2-NEXT: retq
;
; SSE4A-LABEL: test_load_nt32xi8:
-; SSE4A: # BB#0: # %entry
+; SSE4A: # %bb.0: # %entry
; SSE4A-NEXT: movaps (%rdi), %xmm0
; SSE4A-NEXT: movaps 16(%rdi), %xmm1
; SSE4A-NEXT: retq
;
; SSE41-LABEL: test_load_nt32xi8:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movntdqa (%rdi), %xmm0
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_load_nt32xi8:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
; AVX1-NEXT: # implicit-def: %ymm1
; AVX1-NEXT: vmovaps %xmm0, %xmm1
@@ -638,12 +638,12 @@ define <32 x i8> @test_load_nt32xi8(<32 x i8>* nocapture %ptr) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_load_nt32xi8:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_load_nt32xi8:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovntdqa (%rdi), %ymm0
; AVX512-NEXT: retq
entry:
@@ -653,25 +653,25 @@ entry:
define <16 x i16> @test_load_nt16xi16(<16 x i16>* nocapture %ptr) {
; SSE2-LABEL: test_load_nt16xi16:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movaps (%rdi), %xmm0
; SSE2-NEXT: movaps 16(%rdi), %xmm1
; SSE2-NEXT: retq
;
; SSE4A-LABEL: test_load_nt16xi16:
-; SSE4A: # BB#0: # %entry
+; SSE4A: # %bb.0: # %entry
; SSE4A-NEXT: movaps (%rdi), %xmm0
; SSE4A-NEXT: movaps 16(%rdi), %xmm1
; SSE4A-NEXT: retq
;
; SSE41-LABEL: test_load_nt16xi16:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movntdqa (%rdi), %xmm0
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_load_nt16xi16:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
; AVX1-NEXT: # implicit-def: %ymm1
; AVX1-NEXT: vmovaps %xmm0, %xmm1
@@ -680,12 +680,12 @@ define <16 x i16> @test_load_nt16xi16(<16 x i16>* nocapture %ptr) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_load_nt16xi16:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_load_nt16xi16:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovntdqa (%rdi), %ymm0
; AVX512-NEXT: retq
entry:
@@ -695,25 +695,25 @@ entry:
define <8 x i32> @test_load_nt8xi32(<8 x i32>* nocapture %ptr) {
; SSE2-LABEL: test_load_nt8xi32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movaps (%rdi), %xmm0
; SSE2-NEXT: movaps 16(%rdi), %xmm1
; SSE2-NEXT: retq
;
; SSE4A-LABEL: test_load_nt8xi32:
-; SSE4A: # BB#0: # %entry
+; SSE4A: # %bb.0: # %entry
; SSE4A-NEXT: movaps (%rdi), %xmm0
; SSE4A-NEXT: movaps 16(%rdi), %xmm1
; SSE4A-NEXT: retq
;
; SSE41-LABEL: test_load_nt8xi32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movntdqa (%rdi), %xmm0
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_load_nt8xi32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
; AVX1-NEXT: # implicit-def: %ymm1
; AVX1-NEXT: vmovaps %xmm0, %xmm1
@@ -722,12 +722,12 @@ define <8 x i32> @test_load_nt8xi32(<8 x i32>* nocapture %ptr) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_load_nt8xi32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_load_nt8xi32:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovntdqa (%rdi), %ymm0
; AVX512-NEXT: retq
entry:
@@ -737,25 +737,25 @@ entry:
define <4 x i64> @test_load_nt4xi64(<4 x i64>* nocapture %ptr) {
; SSE2-LABEL: test_load_nt4xi64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movaps (%rdi), %xmm0
; SSE2-NEXT: movaps 16(%rdi), %xmm1
; SSE2-NEXT: retq
;
; SSE4A-LABEL: test_load_nt4xi64:
-; SSE4A: # BB#0: # %entry
+; SSE4A: # %bb.0: # %entry
; SSE4A-NEXT: movaps (%rdi), %xmm0
; SSE4A-NEXT: movaps 16(%rdi), %xmm1
; SSE4A-NEXT: retq
;
; SSE41-LABEL: test_load_nt4xi64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movntdqa (%rdi), %xmm0
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_load_nt4xi64:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
; AVX1-NEXT: # implicit-def: %ymm1
; AVX1-NEXT: vmovaps %xmm0, %xmm1
@@ -764,12 +764,12 @@ define <4 x i64> @test_load_nt4xi64(<4 x i64>* nocapture %ptr) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_load_nt4xi64:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_load_nt4xi64:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovntdqa (%rdi), %ymm0
; AVX512-NEXT: retq
entry:
@@ -783,7 +783,7 @@ entry:
define void @test_nt16xfloat(<16 x float>* nocapture %ptr, <16 x float> %X) {
; SSE-LABEL: test_nt16xfloat:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movntps %xmm0, (%rdi)
; SSE-NEXT: movntps %xmm1, 16(%rdi)
; SSE-NEXT: movntps %xmm2, 32(%rdi)
@@ -791,14 +791,14 @@ define void @test_nt16xfloat(<16 x float>* nocapture %ptr, <16 x float> %X) {
; SSE-NEXT: retq
;
; AVX-LABEL: test_nt16xfloat:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovntps %ymm0, (%rdi)
; AVX-NEXT: vmovntps %ymm1, 32(%rdi)
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; AVX512-LABEL: test_nt16xfloat:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovntps %zmm0, (%rdi)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
@@ -809,7 +809,7 @@ entry:
define void @test_nt8xdouble(<8 x double>* nocapture %ptr, <8 x double> %X) {
; SSE-LABEL: test_nt8xdouble:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movntpd %xmm0, (%rdi)
; SSE-NEXT: movntpd %xmm1, 16(%rdi)
; SSE-NEXT: movntpd %xmm2, 32(%rdi)
@@ -817,14 +817,14 @@ define void @test_nt8xdouble(<8 x double>* nocapture %ptr, <8 x double> %X) {
; SSE-NEXT: retq
;
; AVX-LABEL: test_nt8xdouble:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovntpd %ymm0, (%rdi)
; AVX-NEXT: vmovntpd %ymm1, 32(%rdi)
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; AVX512-LABEL: test_nt8xdouble:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovntpd %zmm0, (%rdi)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
@@ -835,7 +835,7 @@ entry:
define void @test_nt64xi8(<64 x i8>* nocapture %ptr, <64 x i8> %X) {
; SSE-LABEL: test_nt64xi8:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movntdq %xmm0, (%rdi)
; SSE-NEXT: movntdq %xmm1, 16(%rdi)
; SSE-NEXT: movntdq %xmm2, 32(%rdi)
@@ -843,28 +843,28 @@ define void @test_nt64xi8(<64 x i8>* nocapture %ptr, <64 x i8> %X) {
; SSE-NEXT: retq
;
; AVX-LABEL: test_nt64xi8:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovntdq %ymm0, (%rdi)
; AVX-NEXT: vmovntdq %ymm1, 32(%rdi)
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; AVX512VL-LABEL: test_nt64xi8:
-; AVX512VL: # BB#0: # %entry
+; AVX512VL: # %bb.0: # %entry
; AVX512VL-NEXT: vmovntdq %ymm0, (%rdi)
; AVX512VL-NEXT: vmovntdq %ymm1, 32(%rdi)
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512F-LABEL: test_nt64xi8:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vmovntdq %ymm0, (%rdi)
; AVX512F-NEXT: vmovntdq %ymm1, 32(%rdi)
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_nt64xi8:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vmovntdq %zmm0, (%rdi)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
@@ -875,7 +875,7 @@ entry:
define void @test_nt32xi16(<32 x i16>* nocapture %ptr, <32 x i16> %X) {
; SSE-LABEL: test_nt32xi16:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movntdq %xmm0, (%rdi)
; SSE-NEXT: movntdq %xmm1, 16(%rdi)
; SSE-NEXT: movntdq %xmm2, 32(%rdi)
@@ -883,28 +883,28 @@ define void @test_nt32xi16(<32 x i16>* nocapture %ptr, <32 x i16> %X) {
; SSE-NEXT: retq
;
; AVX-LABEL: test_nt32xi16:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovntdq %ymm0, (%rdi)
; AVX-NEXT: vmovntdq %ymm1, 32(%rdi)
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; AVX512VL-LABEL: test_nt32xi16:
-; AVX512VL: # BB#0: # %entry
+; AVX512VL: # %bb.0: # %entry
; AVX512VL-NEXT: vmovntdq %ymm0, (%rdi)
; AVX512VL-NEXT: vmovntdq %ymm1, 32(%rdi)
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512F-LABEL: test_nt32xi16:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vmovntdq %ymm0, (%rdi)
; AVX512F-NEXT: vmovntdq %ymm1, 32(%rdi)
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_nt32xi16:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vmovntdq %zmm0, (%rdi)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
@@ -915,7 +915,7 @@ entry:
define void @test_nt16xi32(<16 x i32>* nocapture %ptr, <16 x i32> %X) {
; SSE-LABEL: test_nt16xi32:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movntdq %xmm0, (%rdi)
; SSE-NEXT: movntdq %xmm1, 16(%rdi)
; SSE-NEXT: movntdq %xmm2, 32(%rdi)
@@ -923,14 +923,14 @@ define void @test_nt16xi32(<16 x i32>* nocapture %ptr, <16 x i32> %X) {
; SSE-NEXT: retq
;
; AVX-LABEL: test_nt16xi32:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovntdq %ymm0, (%rdi)
; AVX-NEXT: vmovntdq %ymm1, 32(%rdi)
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; AVX512-LABEL: test_nt16xi32:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovntdq %zmm0, (%rdi)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
@@ -941,7 +941,7 @@ entry:
define void @test_nt8xi64(<8 x i64>* nocapture %ptr, <8 x i64> %X) {
; SSE-LABEL: test_nt8xi64:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movntdq %xmm0, (%rdi)
; SSE-NEXT: movntdq %xmm1, 16(%rdi)
; SSE-NEXT: movntdq %xmm2, 32(%rdi)
@@ -949,14 +949,14 @@ define void @test_nt8xi64(<8 x i64>* nocapture %ptr, <8 x i64> %X) {
; SSE-NEXT: retq
;
; AVX-LABEL: test_nt8xi64:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovntdq %ymm0, (%rdi)
; AVX-NEXT: vmovntdq %ymm1, 32(%rdi)
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; AVX512-LABEL: test_nt8xi64:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovntdq %zmm0, (%rdi)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
@@ -971,7 +971,7 @@ entry:
define <16 x float> @test_load_nt16xfloat(<16 x float>* nocapture %ptr) {
; SSE2-LABEL: test_load_nt16xfloat:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movaps (%rdi), %xmm0
; SSE2-NEXT: movaps 16(%rdi), %xmm1
; SSE2-NEXT: movaps 32(%rdi), %xmm2
@@ -979,7 +979,7 @@ define <16 x float> @test_load_nt16xfloat(<16 x float>* nocapture %ptr) {
; SSE2-NEXT: retq
;
; SSE4A-LABEL: test_load_nt16xfloat:
-; SSE4A: # BB#0: # %entry
+; SSE4A: # %bb.0: # %entry
; SSE4A-NEXT: movaps (%rdi), %xmm0
; SSE4A-NEXT: movaps 16(%rdi), %xmm1
; SSE4A-NEXT: movaps 32(%rdi), %xmm2
@@ -987,7 +987,7 @@ define <16 x float> @test_load_nt16xfloat(<16 x float>* nocapture %ptr) {
; SSE4A-NEXT: retq
;
; SSE41-LABEL: test_load_nt16xfloat:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movntdqa (%rdi), %xmm0
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
; SSE41-NEXT: movntdqa 32(%rdi), %xmm2
@@ -995,7 +995,7 @@ define <16 x float> @test_load_nt16xfloat(<16 x float>* nocapture %ptr) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_load_nt16xfloat:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
; AVX1-NEXT: # implicit-def: %ymm1
; AVX1-NEXT: vmovaps %xmm0, %xmm1
@@ -1009,13 +1009,13 @@ define <16 x float> @test_load_nt16xfloat(<16 x float>* nocapture %ptr) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_load_nt16xfloat:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_load_nt16xfloat:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovntdqa (%rdi), %zmm0
; AVX512-NEXT: retq
entry:
@@ -1025,7 +1025,7 @@ entry:
define <8 x double> @test_load_nt8xdouble(<8 x double>* nocapture %ptr) {
; SSE2-LABEL: test_load_nt8xdouble:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movapd (%rdi), %xmm0
; SSE2-NEXT: movapd 16(%rdi), %xmm1
; SSE2-NEXT: movapd 32(%rdi), %xmm2
@@ -1033,7 +1033,7 @@ define <8 x double> @test_load_nt8xdouble(<8 x double>* nocapture %ptr) {
; SSE2-NEXT: retq
;
; SSE4A-LABEL: test_load_nt8xdouble:
-; SSE4A: # BB#0: # %entry
+; SSE4A: # %bb.0: # %entry
; SSE4A-NEXT: movapd (%rdi), %xmm0
; SSE4A-NEXT: movapd 16(%rdi), %xmm1
; SSE4A-NEXT: movapd 32(%rdi), %xmm2
@@ -1041,7 +1041,7 @@ define <8 x double> @test_load_nt8xdouble(<8 x double>* nocapture %ptr) {
; SSE4A-NEXT: retq
;
; SSE41-LABEL: test_load_nt8xdouble:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movntdqa (%rdi), %xmm0
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
; SSE41-NEXT: movntdqa 32(%rdi), %xmm2
@@ -1049,7 +1049,7 @@ define <8 x double> @test_load_nt8xdouble(<8 x double>* nocapture %ptr) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_load_nt8xdouble:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
; AVX1-NEXT: # implicit-def: %ymm1
; AVX1-NEXT: vmovaps %xmm0, %xmm1
@@ -1063,13 +1063,13 @@ define <8 x double> @test_load_nt8xdouble(<8 x double>* nocapture %ptr) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_load_nt8xdouble:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_load_nt8xdouble:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovntdqa (%rdi), %zmm0
; AVX512-NEXT: retq
entry:
@@ -1079,7 +1079,7 @@ entry:
define <64 x i8> @test_load_nt64xi8(<64 x i8>* nocapture %ptr) {
; SSE2-LABEL: test_load_nt64xi8:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movaps (%rdi), %xmm0
; SSE2-NEXT: movaps 16(%rdi), %xmm1
; SSE2-NEXT: movaps 32(%rdi), %xmm2
@@ -1087,7 +1087,7 @@ define <64 x i8> @test_load_nt64xi8(<64 x i8>* nocapture %ptr) {
; SSE2-NEXT: retq
;
; SSE4A-LABEL: test_load_nt64xi8:
-; SSE4A: # BB#0: # %entry
+; SSE4A: # %bb.0: # %entry
; SSE4A-NEXT: movaps (%rdi), %xmm0
; SSE4A-NEXT: movaps 16(%rdi), %xmm1
; SSE4A-NEXT: movaps 32(%rdi), %xmm2
@@ -1095,7 +1095,7 @@ define <64 x i8> @test_load_nt64xi8(<64 x i8>* nocapture %ptr) {
; SSE4A-NEXT: retq
;
; SSE41-LABEL: test_load_nt64xi8:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movntdqa (%rdi), %xmm0
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
; SSE41-NEXT: movntdqa 32(%rdi), %xmm2
@@ -1103,7 +1103,7 @@ define <64 x i8> @test_load_nt64xi8(<64 x i8>* nocapture %ptr) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_load_nt64xi8:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
; AVX1-NEXT: # implicit-def: %ymm1
; AVX1-NEXT: vmovaps %xmm0, %xmm1
@@ -1117,25 +1117,25 @@ define <64 x i8> @test_load_nt64xi8(<64 x i8>* nocapture %ptr) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_load_nt64xi8:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm1
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: test_load_nt64xi8:
-; AVX512VL: # BB#0: # %entry
+; AVX512VL: # %bb.0: # %entry
; AVX512VL-NEXT: vmovntdqa (%rdi), %ymm0
; AVX512VL-NEXT: vmovntdqa 32(%rdi), %ymm1
; AVX512VL-NEXT: retq
;
; AVX512F-LABEL: test_load_nt64xi8:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vmovntdqa (%rdi), %ymm0
; AVX512F-NEXT: vmovntdqa 32(%rdi), %ymm1
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_load_nt64xi8:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vmovntdqa (%rdi), %zmm0
; AVX512BW-NEXT: retq
entry:
@@ -1145,7 +1145,7 @@ entry:
define <32 x i16> @test_load_nt32xi16(<32 x i16>* nocapture %ptr) {
; SSE2-LABEL: test_load_nt32xi16:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movaps (%rdi), %xmm0
; SSE2-NEXT: movaps 16(%rdi), %xmm1
; SSE2-NEXT: movaps 32(%rdi), %xmm2
@@ -1153,7 +1153,7 @@ define <32 x i16> @test_load_nt32xi16(<32 x i16>* nocapture %ptr) {
; SSE2-NEXT: retq
;
; SSE4A-LABEL: test_load_nt32xi16:
-; SSE4A: # BB#0: # %entry
+; SSE4A: # %bb.0: # %entry
; SSE4A-NEXT: movaps (%rdi), %xmm0
; SSE4A-NEXT: movaps 16(%rdi), %xmm1
; SSE4A-NEXT: movaps 32(%rdi), %xmm2
@@ -1161,7 +1161,7 @@ define <32 x i16> @test_load_nt32xi16(<32 x i16>* nocapture %ptr) {
; SSE4A-NEXT: retq
;
; SSE41-LABEL: test_load_nt32xi16:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movntdqa (%rdi), %xmm0
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
; SSE41-NEXT: movntdqa 32(%rdi), %xmm2
@@ -1169,7 +1169,7 @@ define <32 x i16> @test_load_nt32xi16(<32 x i16>* nocapture %ptr) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_load_nt32xi16:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
; AVX1-NEXT: # implicit-def: %ymm1
; AVX1-NEXT: vmovaps %xmm0, %xmm1
@@ -1183,25 +1183,25 @@ define <32 x i16> @test_load_nt32xi16(<32 x i16>* nocapture %ptr) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_load_nt32xi16:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm1
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: test_load_nt32xi16:
-; AVX512VL: # BB#0: # %entry
+; AVX512VL: # %bb.0: # %entry
; AVX512VL-NEXT: vmovntdqa (%rdi), %ymm0
; AVX512VL-NEXT: vmovntdqa 32(%rdi), %ymm1
; AVX512VL-NEXT: retq
;
; AVX512F-LABEL: test_load_nt32xi16:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vmovntdqa (%rdi), %ymm0
; AVX512F-NEXT: vmovntdqa 32(%rdi), %ymm1
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_load_nt32xi16:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vmovntdqa (%rdi), %zmm0
; AVX512BW-NEXT: retq
entry:
@@ -1211,7 +1211,7 @@ entry:
define <16 x i32> @test_load_nt16xi32(<16 x i32>* nocapture %ptr) {
; SSE2-LABEL: test_load_nt16xi32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movaps (%rdi), %xmm0
; SSE2-NEXT: movaps 16(%rdi), %xmm1
; SSE2-NEXT: movaps 32(%rdi), %xmm2
@@ -1219,7 +1219,7 @@ define <16 x i32> @test_load_nt16xi32(<16 x i32>* nocapture %ptr) {
; SSE2-NEXT: retq
;
; SSE4A-LABEL: test_load_nt16xi32:
-; SSE4A: # BB#0: # %entry
+; SSE4A: # %bb.0: # %entry
; SSE4A-NEXT: movaps (%rdi), %xmm0
; SSE4A-NEXT: movaps 16(%rdi), %xmm1
; SSE4A-NEXT: movaps 32(%rdi), %xmm2
@@ -1227,7 +1227,7 @@ define <16 x i32> @test_load_nt16xi32(<16 x i32>* nocapture %ptr) {
; SSE4A-NEXT: retq
;
; SSE41-LABEL: test_load_nt16xi32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movntdqa (%rdi), %xmm0
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
; SSE41-NEXT: movntdqa 32(%rdi), %xmm2
@@ -1235,7 +1235,7 @@ define <16 x i32> @test_load_nt16xi32(<16 x i32>* nocapture %ptr) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_load_nt16xi32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
; AVX1-NEXT: # implicit-def: %ymm1
; AVX1-NEXT: vmovaps %xmm0, %xmm1
@@ -1249,13 +1249,13 @@ define <16 x i32> @test_load_nt16xi32(<16 x i32>* nocapture %ptr) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_load_nt16xi32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_load_nt16xi32:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovntdqa (%rdi), %zmm0
; AVX512-NEXT: retq
entry:
@@ -1265,7 +1265,7 @@ entry:
define <8 x i64> @test_load_nt8xi64(<8 x i64>* nocapture %ptr) {
; SSE2-LABEL: test_load_nt8xi64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movaps (%rdi), %xmm0
; SSE2-NEXT: movaps 16(%rdi), %xmm1
; SSE2-NEXT: movaps 32(%rdi), %xmm2
@@ -1273,7 +1273,7 @@ define <8 x i64> @test_load_nt8xi64(<8 x i64>* nocapture %ptr) {
; SSE2-NEXT: retq
;
; SSE4A-LABEL: test_load_nt8xi64:
-; SSE4A: # BB#0: # %entry
+; SSE4A: # %bb.0: # %entry
; SSE4A-NEXT: movaps (%rdi), %xmm0
; SSE4A-NEXT: movaps 16(%rdi), %xmm1
; SSE4A-NEXT: movaps 32(%rdi), %xmm2
@@ -1281,7 +1281,7 @@ define <8 x i64> @test_load_nt8xi64(<8 x i64>* nocapture %ptr) {
; SSE4A-NEXT: retq
;
; SSE41-LABEL: test_load_nt8xi64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movntdqa (%rdi), %xmm0
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
; SSE41-NEXT: movntdqa 32(%rdi), %xmm2
@@ -1289,7 +1289,7 @@ define <8 x i64> @test_load_nt8xi64(<8 x i64>* nocapture %ptr) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_load_nt8xi64:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
; AVX1-NEXT: # implicit-def: %ymm1
; AVX1-NEXT: vmovaps %xmm0, %xmm1
@@ -1303,13 +1303,13 @@ define <8 x i64> @test_load_nt8xi64(<8 x i64>* nocapture %ptr) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_load_nt8xi64:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_load_nt8xi64:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovntdqa (%rdi), %zmm0
; AVX512-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/fast-isel-select-cmov.ll b/test/CodeGen/X86/fast-isel-select-cmov.ll
index e40e917e11e..3e9b99f4c53 100644
--- a/test/CodeGen/X86/fast-isel-select-cmov.ll
+++ b/test/CodeGen/X86/fast-isel-select-cmov.ll
@@ -7,7 +7,7 @@
define zeroext i16 @select_cmov_i16(i1 zeroext %cond, i16 zeroext %a, i16 zeroext %b) {
; CHECK-LABEL: select_cmov_i16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: testb $1, %dil
; CHECK-NEXT: cmovew %dx, %si
; CHECK-NEXT: movzwl %si, %eax
@@ -18,7 +18,7 @@ define zeroext i16 @select_cmov_i16(i1 zeroext %cond, i16 zeroext %a, i16 zeroex
define zeroext i16 @select_cmp_cmov_i16(i16 zeroext %a, i16 zeroext %b) {
; CHECK-LABEL: select_cmp_cmov_i16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: cmpw %si, %di
; CHECK-NEXT: cmovbw %di, %si
; CHECK-NEXT: movzwl %si, %eax
@@ -30,7 +30,7 @@ define zeroext i16 @select_cmp_cmov_i16(i16 zeroext %a, i16 zeroext %b) {
define i32 @select_cmov_i32(i1 zeroext %cond, i32 %a, i32 %b) {
; CHECK-LABEL: select_cmov_i32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: testb $1, %dil
; CHECK-NEXT: cmovel %edx, %esi
; CHECK-NEXT: movl %esi, %eax
@@ -41,7 +41,7 @@ define i32 @select_cmov_i32(i1 zeroext %cond, i32 %a, i32 %b) {
define i32 @select_cmp_cmov_i32(i32 %a, i32 %b) {
; CHECK-LABEL: select_cmp_cmov_i32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: cmpl %esi, %edi
; CHECK-NEXT: cmovbl %edi, %esi
; CHECK-NEXT: movl %esi, %eax
@@ -53,7 +53,7 @@ define i32 @select_cmp_cmov_i32(i32 %a, i32 %b) {
define i64 @select_cmov_i64(i1 zeroext %cond, i64 %a, i64 %b) {
; CHECK-LABEL: select_cmov_i64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: testb $1, %dil
; CHECK-NEXT: cmoveq %rdx, %rsi
; CHECK-NEXT: movq %rsi, %rax
@@ -64,7 +64,7 @@ define i64 @select_cmov_i64(i1 zeroext %cond, i64 %a, i64 %b) {
define i64 @select_cmp_cmov_i64(i64 %a, i64 %b) {
; CHECK-LABEL: select_cmp_cmov_i64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: cmpq %rsi, %rdi
; CHECK-NEXT: cmovbq %rdi, %rsi
; CHECK-NEXT: movq %rsi, %rax
diff --git a/test/CodeGen/X86/fast-isel-select-cmov2.ll b/test/CodeGen/X86/fast-isel-select-cmov2.ll
index eb4994f3c49..3dd4d2b3433 100644
--- a/test/CodeGen/X86/fast-isel-select-cmov2.ll
+++ b/test/CodeGen/X86/fast-isel-select-cmov2.ll
@@ -8,7 +8,7 @@
define i64 @select_fcmp_false_cmov(double %a, double %b, i64 %c, i64 %d) {
; CHECK-LABEL: select_fcmp_false_cmov:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movq %rsi, %rax
; CHECK-NEXT: retq
%1 = fcmp false double %a, %b
@@ -18,7 +18,7 @@ define i64 @select_fcmp_false_cmov(double %a, double %b, i64 %c, i64 %d) {
define i64 @select_fcmp_oeq_cmov(double %a, double %b, i64 %c, i64 %d) {
; SDAG-LABEL: select_fcmp_oeq_cmov:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: ucomisd %xmm1, %xmm0
; SDAG-NEXT: cmovneq %rsi, %rdi
; SDAG-NEXT: cmovpq %rsi, %rdi
@@ -26,7 +26,7 @@ define i64 @select_fcmp_oeq_cmov(double %a, double %b, i64 %c, i64 %d) {
; SDAG-NEXT: retq
;
; FAST-LABEL: select_fcmp_oeq_cmov:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: ucomisd %xmm1, %xmm0
; FAST-NEXT: setnp %al
; FAST-NEXT: sete %cl
@@ -36,7 +36,7 @@ define i64 @select_fcmp_oeq_cmov(double %a, double %b, i64 %c, i64 %d) {
; FAST-NEXT: retq
;
; FAST_AVX-LABEL: select_fcmp_oeq_cmov:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vucomisd %xmm1, %xmm0
; FAST_AVX-NEXT: setnp %al
; FAST_AVX-NEXT: sete %cl
@@ -51,14 +51,14 @@ define i64 @select_fcmp_oeq_cmov(double %a, double %b, i64 %c, i64 %d) {
define i64 @select_fcmp_ogt_cmov(double %a, double %b, i64 %c, i64 %d) {
; NOAVX-LABEL: select_fcmp_ogt_cmov:
-; NOAVX: ## BB#0:
+; NOAVX: ## %bb.0:
; NOAVX-NEXT: ucomisd %xmm1, %xmm0
; NOAVX-NEXT: cmovbeq %rsi, %rdi
; NOAVX-NEXT: movq %rdi, %rax
; NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: select_fcmp_ogt_cmov:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vucomisd %xmm1, %xmm0
; FAST_AVX-NEXT: cmovbeq %rsi, %rdi
; FAST_AVX-NEXT: movq %rdi, %rax
@@ -70,14 +70,14 @@ define i64 @select_fcmp_ogt_cmov(double %a, double %b, i64 %c, i64 %d) {
define i64 @select_fcmp_oge_cmov(double %a, double %b, i64 %c, i64 %d) {
; NOAVX-LABEL: select_fcmp_oge_cmov:
-; NOAVX: ## BB#0:
+; NOAVX: ## %bb.0:
; NOAVX-NEXT: ucomisd %xmm1, %xmm0
; NOAVX-NEXT: cmovbq %rsi, %rdi
; NOAVX-NEXT: movq %rdi, %rax
; NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: select_fcmp_oge_cmov:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vucomisd %xmm1, %xmm0
; FAST_AVX-NEXT: cmovbq %rsi, %rdi
; FAST_AVX-NEXT: movq %rdi, %rax
@@ -89,14 +89,14 @@ define i64 @select_fcmp_oge_cmov(double %a, double %b, i64 %c, i64 %d) {
define i64 @select_fcmp_olt_cmov(double %a, double %b, i64 %c, i64 %d) {
; NOAVX-LABEL: select_fcmp_olt_cmov:
-; NOAVX: ## BB#0:
+; NOAVX: ## %bb.0:
; NOAVX-NEXT: ucomisd %xmm0, %xmm1
; NOAVX-NEXT: cmovbeq %rsi, %rdi
; NOAVX-NEXT: movq %rdi, %rax
; NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: select_fcmp_olt_cmov:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vucomisd %xmm0, %xmm1
; FAST_AVX-NEXT: cmovbeq %rsi, %rdi
; FAST_AVX-NEXT: movq %rdi, %rax
@@ -108,14 +108,14 @@ define i64 @select_fcmp_olt_cmov(double %a, double %b, i64 %c, i64 %d) {
define i64 @select_fcmp_ole_cmov(double %a, double %b, i64 %c, i64 %d) {
; NOAVX-LABEL: select_fcmp_ole_cmov:
-; NOAVX: ## BB#0:
+; NOAVX: ## %bb.0:
; NOAVX-NEXT: ucomisd %xmm0, %xmm1
; NOAVX-NEXT: cmovbq %rsi, %rdi
; NOAVX-NEXT: movq %rdi, %rax
; NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: select_fcmp_ole_cmov:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vucomisd %xmm0, %xmm1
; FAST_AVX-NEXT: cmovbq %rsi, %rdi
; FAST_AVX-NEXT: movq %rdi, %rax
@@ -127,14 +127,14 @@ define i64 @select_fcmp_ole_cmov(double %a, double %b, i64 %c, i64 %d) {
define i64 @select_fcmp_one_cmov(double %a, double %b, i64 %c, i64 %d) {
; NOAVX-LABEL: select_fcmp_one_cmov:
-; NOAVX: ## BB#0:
+; NOAVX: ## %bb.0:
; NOAVX-NEXT: ucomisd %xmm1, %xmm0
; NOAVX-NEXT: cmoveq %rsi, %rdi
; NOAVX-NEXT: movq %rdi, %rax
; NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: select_fcmp_one_cmov:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vucomisd %xmm1, %xmm0
; FAST_AVX-NEXT: cmoveq %rsi, %rdi
; FAST_AVX-NEXT: movq %rdi, %rax
@@ -146,14 +146,14 @@ define i64 @select_fcmp_one_cmov(double %a, double %b, i64 %c, i64 %d) {
define i64 @select_fcmp_ord_cmov(double %a, double %b, i64 %c, i64 %d) {
; NOAVX-LABEL: select_fcmp_ord_cmov:
-; NOAVX: ## BB#0:
+; NOAVX: ## %bb.0:
; NOAVX-NEXT: ucomisd %xmm1, %xmm0
; NOAVX-NEXT: cmovpq %rsi, %rdi
; NOAVX-NEXT: movq %rdi, %rax
; NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: select_fcmp_ord_cmov:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vucomisd %xmm1, %xmm0
; FAST_AVX-NEXT: cmovpq %rsi, %rdi
; FAST_AVX-NEXT: movq %rdi, %rax
@@ -165,14 +165,14 @@ define i64 @select_fcmp_ord_cmov(double %a, double %b, i64 %c, i64 %d) {
define i64 @select_fcmp_uno_cmov(double %a, double %b, i64 %c, i64 %d) {
; NOAVX-LABEL: select_fcmp_uno_cmov:
-; NOAVX: ## BB#0:
+; NOAVX: ## %bb.0:
; NOAVX-NEXT: ucomisd %xmm1, %xmm0
; NOAVX-NEXT: cmovnpq %rsi, %rdi
; NOAVX-NEXT: movq %rdi, %rax
; NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: select_fcmp_uno_cmov:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vucomisd %xmm1, %xmm0
; FAST_AVX-NEXT: cmovnpq %rsi, %rdi
; FAST_AVX-NEXT: movq %rdi, %rax
@@ -184,14 +184,14 @@ define i64 @select_fcmp_uno_cmov(double %a, double %b, i64 %c, i64 %d) {
define i64 @select_fcmp_ueq_cmov(double %a, double %b, i64 %c, i64 %d) {
; NOAVX-LABEL: select_fcmp_ueq_cmov:
-; NOAVX: ## BB#0:
+; NOAVX: ## %bb.0:
; NOAVX-NEXT: ucomisd %xmm1, %xmm0
; NOAVX-NEXT: cmovneq %rsi, %rdi
; NOAVX-NEXT: movq %rdi, %rax
; NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: select_fcmp_ueq_cmov:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vucomisd %xmm1, %xmm0
; FAST_AVX-NEXT: cmovneq %rsi, %rdi
; FAST_AVX-NEXT: movq %rdi, %rax
@@ -203,14 +203,14 @@ define i64 @select_fcmp_ueq_cmov(double %a, double %b, i64 %c, i64 %d) {
define i64 @select_fcmp_ugt_cmov(double %a, double %b, i64 %c, i64 %d) {
; NOAVX-LABEL: select_fcmp_ugt_cmov:
-; NOAVX: ## BB#0:
+; NOAVX: ## %bb.0:
; NOAVX-NEXT: ucomisd %xmm0, %xmm1
; NOAVX-NEXT: cmovaeq %rsi, %rdi
; NOAVX-NEXT: movq %rdi, %rax
; NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: select_fcmp_ugt_cmov:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vucomisd %xmm0, %xmm1
; FAST_AVX-NEXT: cmovaeq %rsi, %rdi
; FAST_AVX-NEXT: movq %rdi, %rax
@@ -222,14 +222,14 @@ define i64 @select_fcmp_ugt_cmov(double %a, double %b, i64 %c, i64 %d) {
define i64 @select_fcmp_uge_cmov(double %a, double %b, i64 %c, i64 %d) {
; NOAVX-LABEL: select_fcmp_uge_cmov:
-; NOAVX: ## BB#0:
+; NOAVX: ## %bb.0:
; NOAVX-NEXT: ucomisd %xmm0, %xmm1
; NOAVX-NEXT: cmovaq %rsi, %rdi
; NOAVX-NEXT: movq %rdi, %rax
; NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: select_fcmp_uge_cmov:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vucomisd %xmm0, %xmm1
; FAST_AVX-NEXT: cmovaq %rsi, %rdi
; FAST_AVX-NEXT: movq %rdi, %rax
@@ -241,14 +241,14 @@ define i64 @select_fcmp_uge_cmov(double %a, double %b, i64 %c, i64 %d) {
define i64 @select_fcmp_ult_cmov(double %a, double %b, i64 %c, i64 %d) {
; NOAVX-LABEL: select_fcmp_ult_cmov:
-; NOAVX: ## BB#0:
+; NOAVX: ## %bb.0:
; NOAVX-NEXT: ucomisd %xmm1, %xmm0
; NOAVX-NEXT: cmovaeq %rsi, %rdi
; NOAVX-NEXT: movq %rdi, %rax
; NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: select_fcmp_ult_cmov:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vucomisd %xmm1, %xmm0
; FAST_AVX-NEXT: cmovaeq %rsi, %rdi
; FAST_AVX-NEXT: movq %rdi, %rax
@@ -260,14 +260,14 @@ define i64 @select_fcmp_ult_cmov(double %a, double %b, i64 %c, i64 %d) {
define i64 @select_fcmp_ule_cmov(double %a, double %b, i64 %c, i64 %d) {
; NOAVX-LABEL: select_fcmp_ule_cmov:
-; NOAVX: ## BB#0:
+; NOAVX: ## %bb.0:
; NOAVX-NEXT: ucomisd %xmm1, %xmm0
; NOAVX-NEXT: cmovaq %rsi, %rdi
; NOAVX-NEXT: movq %rdi, %rax
; NOAVX-NEXT: retq
;
; FAST_AVX-LABEL: select_fcmp_ule_cmov:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vucomisd %xmm1, %xmm0
; FAST_AVX-NEXT: cmovaq %rsi, %rdi
; FAST_AVX-NEXT: movq %rdi, %rax
@@ -279,7 +279,7 @@ define i64 @select_fcmp_ule_cmov(double %a, double %b, i64 %c, i64 %d) {
define i64 @select_fcmp_une_cmov(double %a, double %b, i64 %c, i64 %d) {
; SDAG-LABEL: select_fcmp_une_cmov:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: ucomisd %xmm1, %xmm0
; SDAG-NEXT: cmovneq %rdi, %rsi
; SDAG-NEXT: cmovpq %rdi, %rsi
@@ -287,7 +287,7 @@ define i64 @select_fcmp_une_cmov(double %a, double %b, i64 %c, i64 %d) {
; SDAG-NEXT: retq
;
; FAST-LABEL: select_fcmp_une_cmov:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: ucomisd %xmm1, %xmm0
; FAST-NEXT: setp %al
; FAST-NEXT: setne %cl
@@ -297,7 +297,7 @@ define i64 @select_fcmp_une_cmov(double %a, double %b, i64 %c, i64 %d) {
; FAST-NEXT: retq
;
; FAST_AVX-LABEL: select_fcmp_une_cmov:
-; FAST_AVX: ## BB#0:
+; FAST_AVX: ## %bb.0:
; FAST_AVX-NEXT: vucomisd %xmm1, %xmm0
; FAST_AVX-NEXT: setp %al
; FAST_AVX-NEXT: setne %cl
@@ -312,7 +312,7 @@ define i64 @select_fcmp_une_cmov(double %a, double %b, i64 %c, i64 %d) {
define i64 @select_fcmp_true_cmov(double %a, double %b, i64 %c, i64 %d) {
; CHECK-LABEL: select_fcmp_true_cmov:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: retq
%1 = fcmp true double %a, %b
@@ -322,7 +322,7 @@ define i64 @select_fcmp_true_cmov(double %a, double %b, i64 %c, i64 %d) {
define i64 @select_icmp_eq_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
; CHECK-LABEL: select_icmp_eq_cmov:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: cmpq %rsi, %rdi
; CHECK-NEXT: cmovneq %rcx, %rdx
; CHECK-NEXT: movq %rdx, %rax
@@ -334,7 +334,7 @@ define i64 @select_icmp_eq_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
define i64 @select_icmp_ne_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
; CHECK-LABEL: select_icmp_ne_cmov:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: cmpq %rsi, %rdi
; CHECK-NEXT: cmoveq %rcx, %rdx
; CHECK-NEXT: movq %rdx, %rax
@@ -346,7 +346,7 @@ define i64 @select_icmp_ne_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
define i64 @select_icmp_ugt_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
; CHECK-LABEL: select_icmp_ugt_cmov:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: cmpq %rsi, %rdi
; CHECK-NEXT: cmovbeq %rcx, %rdx
; CHECK-NEXT: movq %rdx, %rax
@@ -359,7 +359,7 @@ define i64 @select_icmp_ugt_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
define i64 @select_icmp_uge_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
; CHECK-LABEL: select_icmp_uge_cmov:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: cmpq %rsi, %rdi
; CHECK-NEXT: cmovbq %rcx, %rdx
; CHECK-NEXT: movq %rdx, %rax
@@ -371,7 +371,7 @@ define i64 @select_icmp_uge_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
define i64 @select_icmp_ult_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
; CHECK-LABEL: select_icmp_ult_cmov:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: cmpq %rsi, %rdi
; CHECK-NEXT: cmovaeq %rcx, %rdx
; CHECK-NEXT: movq %rdx, %rax
@@ -383,7 +383,7 @@ define i64 @select_icmp_ult_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
define i64 @select_icmp_ule_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
; CHECK-LABEL: select_icmp_ule_cmov:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: cmpq %rsi, %rdi
; CHECK-NEXT: cmovaq %rcx, %rdx
; CHECK-NEXT: movq %rdx, %rax
@@ -395,7 +395,7 @@ define i64 @select_icmp_ule_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
define i64 @select_icmp_sgt_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
; CHECK-LABEL: select_icmp_sgt_cmov:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: cmpq %rsi, %rdi
; CHECK-NEXT: cmovleq %rcx, %rdx
; CHECK-NEXT: movq %rdx, %rax
@@ -407,7 +407,7 @@ define i64 @select_icmp_sgt_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
define i64 @select_icmp_sge_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
; CHECK-LABEL: select_icmp_sge_cmov:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: cmpq %rsi, %rdi
; CHECK-NEXT: cmovlq %rcx, %rdx
; CHECK-NEXT: movq %rdx, %rax
@@ -419,7 +419,7 @@ define i64 @select_icmp_sge_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
define i64 @select_icmp_slt_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
; CHECK-LABEL: select_icmp_slt_cmov:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: cmpq %rsi, %rdi
; CHECK-NEXT: cmovgeq %rcx, %rdx
; CHECK-NEXT: movq %rdx, %rax
@@ -431,7 +431,7 @@ define i64 @select_icmp_slt_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
define i64 @select_icmp_sle_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
; CHECK-LABEL: select_icmp_sle_cmov:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: cmpq %rsi, %rdi
; CHECK-NEXT: cmovgq %rcx, %rdx
; CHECK-NEXT: movq %rdx, %rax
diff --git a/test/CodeGen/X86/fast-isel-select-pseudo-cmov.ll b/test/CodeGen/X86/fast-isel-select-pseudo-cmov.ll
index 8724b66c911..3ab040758fa 100644
--- a/test/CodeGen/X86/fast-isel-select-pseudo-cmov.ll
+++ b/test/CodeGen/X86/fast-isel-select-pseudo-cmov.ll
@@ -7,17 +7,17 @@
define float @select_fcmp_one_f32(float %a, float %b, float %c, float %d) {
; SSE-LABEL: select_fcmp_one_f32:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: ucomiss %xmm1, %xmm0
; SSE-NEXT: jne LBB0_2
-; SSE-NEXT: ## BB#1:
+; SSE-NEXT: ## %bb.1:
; SSE-NEXT: movaps %xmm3, %xmm2
; SSE-NEXT: LBB0_2:
; SSE-NEXT: movaps %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: select_fcmp_one_f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vcmpneq_oqss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
@@ -28,17 +28,17 @@ define float @select_fcmp_one_f32(float %a, float %b, float %c, float %d) {
define double @select_fcmp_one_f64(double %a, double %b, double %c, double %d) {
; SSE-LABEL: select_fcmp_one_f64:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: ucomisd %xmm1, %xmm0
; SSE-NEXT: jne LBB1_2
-; SSE-NEXT: ## BB#1:
+; SSE-NEXT: ## %bb.1:
; SSE-NEXT: movaps %xmm3, %xmm2
; SSE-NEXT: LBB1_2:
; SSE-NEXT: movaps %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: select_fcmp_one_f64:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vcmpneq_oqsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
@@ -49,19 +49,19 @@ define double @select_fcmp_one_f64(double %a, double %b, double %c, double %d) {
define float @select_icmp_eq_f32(i64 %a, i64 %b, float %c, float %d) {
; SSE-LABEL: select_icmp_eq_f32:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: cmpq %rsi, %rdi
; SSE-NEXT: je LBB2_2
-; SSE-NEXT: ## BB#1:
+; SSE-NEXT: ## %bb.1:
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: LBB2_2:
; SSE-NEXT: retq
;
; AVX-LABEL: select_icmp_eq_f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: cmpq %rsi, %rdi
; AVX-NEXT: je LBB2_2
-; AVX-NEXT: ## BB#1:
+; AVX-NEXT: ## %bb.1:
; AVX-NEXT: vmovaps %xmm1, %xmm0
; AVX-NEXT: LBB2_2:
; AVX-NEXT: retq
@@ -72,19 +72,19 @@ define float @select_icmp_eq_f32(i64 %a, i64 %b, float %c, float %d) {
define float @select_icmp_ne_f32(i64 %a, i64 %b, float %c, float %d) {
; SSE-LABEL: select_icmp_ne_f32:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: cmpq %rsi, %rdi
; SSE-NEXT: jne LBB3_2
-; SSE-NEXT: ## BB#1:
+; SSE-NEXT: ## %bb.1:
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: LBB3_2:
; SSE-NEXT: retq
;
; AVX-LABEL: select_icmp_ne_f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: cmpq %rsi, %rdi
; AVX-NEXT: jne LBB3_2
-; AVX-NEXT: ## BB#1:
+; AVX-NEXT: ## %bb.1:
; AVX-NEXT: vmovaps %xmm1, %xmm0
; AVX-NEXT: LBB3_2:
; AVX-NEXT: retq
@@ -95,19 +95,19 @@ define float @select_icmp_ne_f32(i64 %a, i64 %b, float %c, float %d) {
define float @select_icmp_ugt_f32(i64 %a, i64 %b, float %c, float %d) {
; SSE-LABEL: select_icmp_ugt_f32:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: cmpq %rsi, %rdi
; SSE-NEXT: ja LBB4_2
-; SSE-NEXT: ## BB#1:
+; SSE-NEXT: ## %bb.1:
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: LBB4_2:
; SSE-NEXT: retq
;
; AVX-LABEL: select_icmp_ugt_f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: cmpq %rsi, %rdi
; AVX-NEXT: ja LBB4_2
-; AVX-NEXT: ## BB#1:
+; AVX-NEXT: ## %bb.1:
; AVX-NEXT: vmovaps %xmm1, %xmm0
; AVX-NEXT: LBB4_2:
; AVX-NEXT: retq
@@ -118,19 +118,19 @@ define float @select_icmp_ugt_f32(i64 %a, i64 %b, float %c, float %d) {
define float @select_icmp_uge_f32(i64 %a, i64 %b, float %c, float %d) {
; SSE-LABEL: select_icmp_uge_f32:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: cmpq %rsi, %rdi
; SSE-NEXT: jae LBB5_2
-; SSE-NEXT: ## BB#1:
+; SSE-NEXT: ## %bb.1:
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: LBB5_2:
; SSE-NEXT: retq
;
; AVX-LABEL: select_icmp_uge_f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: cmpq %rsi, %rdi
; AVX-NEXT: jae LBB5_2
-; AVX-NEXT: ## BB#1:
+; AVX-NEXT: ## %bb.1:
; AVX-NEXT: vmovaps %xmm1, %xmm0
; AVX-NEXT: LBB5_2:
; AVX-NEXT: retq
@@ -141,19 +141,19 @@ define float @select_icmp_uge_f32(i64 %a, i64 %b, float %c, float %d) {
define float @select_icmp_ult_f32(i64 %a, i64 %b, float %c, float %d) {
; SSE-LABEL: select_icmp_ult_f32:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: cmpq %rsi, %rdi
; SSE-NEXT: jb LBB6_2
-; SSE-NEXT: ## BB#1:
+; SSE-NEXT: ## %bb.1:
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: LBB6_2:
; SSE-NEXT: retq
;
; AVX-LABEL: select_icmp_ult_f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: cmpq %rsi, %rdi
; AVX-NEXT: jb LBB6_2
-; AVX-NEXT: ## BB#1:
+; AVX-NEXT: ## %bb.1:
; AVX-NEXT: vmovaps %xmm1, %xmm0
; AVX-NEXT: LBB6_2:
; AVX-NEXT: retq
@@ -164,19 +164,19 @@ define float @select_icmp_ult_f32(i64 %a, i64 %b, float %c, float %d) {
define float @select_icmp_ule_f32(i64 %a, i64 %b, float %c, float %d) {
; SSE-LABEL: select_icmp_ule_f32:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: cmpq %rsi, %rdi
; SSE-NEXT: jbe LBB7_2
-; SSE-NEXT: ## BB#1:
+; SSE-NEXT: ## %bb.1:
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: LBB7_2:
; SSE-NEXT: retq
;
; AVX-LABEL: select_icmp_ule_f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: cmpq %rsi, %rdi
; AVX-NEXT: jbe LBB7_2
-; AVX-NEXT: ## BB#1:
+; AVX-NEXT: ## %bb.1:
; AVX-NEXT: vmovaps %xmm1, %xmm0
; AVX-NEXT: LBB7_2:
; AVX-NEXT: retq
@@ -187,19 +187,19 @@ define float @select_icmp_ule_f32(i64 %a, i64 %b, float %c, float %d) {
define float @select_icmp_sgt_f32(i64 %a, i64 %b, float %c, float %d) {
; SSE-LABEL: select_icmp_sgt_f32:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: cmpq %rsi, %rdi
; SSE-NEXT: jg LBB8_2
-; SSE-NEXT: ## BB#1:
+; SSE-NEXT: ## %bb.1:
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: LBB8_2:
; SSE-NEXT: retq
;
; AVX-LABEL: select_icmp_sgt_f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: cmpq %rsi, %rdi
; AVX-NEXT: jg LBB8_2
-; AVX-NEXT: ## BB#1:
+; AVX-NEXT: ## %bb.1:
; AVX-NEXT: vmovaps %xmm1, %xmm0
; AVX-NEXT: LBB8_2:
; AVX-NEXT: retq
@@ -210,19 +210,19 @@ define float @select_icmp_sgt_f32(i64 %a, i64 %b, float %c, float %d) {
define float @select_icmp_sge_f32(i64 %a, i64 %b, float %c, float %d) {
; SSE-LABEL: select_icmp_sge_f32:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: cmpq %rsi, %rdi
; SSE-NEXT: jge LBB9_2
-; SSE-NEXT: ## BB#1:
+; SSE-NEXT: ## %bb.1:
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: LBB9_2:
; SSE-NEXT: retq
;
; AVX-LABEL: select_icmp_sge_f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: cmpq %rsi, %rdi
; AVX-NEXT: jge LBB9_2
-; AVX-NEXT: ## BB#1:
+; AVX-NEXT: ## %bb.1:
; AVX-NEXT: vmovaps %xmm1, %xmm0
; AVX-NEXT: LBB9_2:
; AVX-NEXT: retq
@@ -233,19 +233,19 @@ define float @select_icmp_sge_f32(i64 %a, i64 %b, float %c, float %d) {
define float @select_icmp_slt_f32(i64 %a, i64 %b, float %c, float %d) {
; SSE-LABEL: select_icmp_slt_f32:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: cmpq %rsi, %rdi
; SSE-NEXT: jl LBB10_2
-; SSE-NEXT: ## BB#1:
+; SSE-NEXT: ## %bb.1:
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: LBB10_2:
; SSE-NEXT: retq
;
; AVX-LABEL: select_icmp_slt_f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: cmpq %rsi, %rdi
; AVX-NEXT: jl LBB10_2
-; AVX-NEXT: ## BB#1:
+; AVX-NEXT: ## %bb.1:
; AVX-NEXT: vmovaps %xmm1, %xmm0
; AVX-NEXT: LBB10_2:
; AVX-NEXT: retq
@@ -256,19 +256,19 @@ define float @select_icmp_slt_f32(i64 %a, i64 %b, float %c, float %d) {
define float @select_icmp_sle_f32(i64 %a, i64 %b, float %c, float %d) {
; SSE-LABEL: select_icmp_sle_f32:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: cmpq %rsi, %rdi
; SSE-NEXT: jle LBB11_2
-; SSE-NEXT: ## BB#1:
+; SSE-NEXT: ## %bb.1:
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: LBB11_2:
; SSE-NEXT: retq
;
; AVX-LABEL: select_icmp_sle_f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: cmpq %rsi, %rdi
; AVX-NEXT: jle LBB11_2
-; AVX-NEXT: ## BB#1:
+; AVX-NEXT: ## %bb.1:
; AVX-NEXT: vmovaps %xmm1, %xmm0
; AVX-NEXT: LBB11_2:
; AVX-NEXT: retq
@@ -279,10 +279,10 @@ define float @select_icmp_sle_f32(i64 %a, i64 %b, float %c, float %d) {
define i8 @select_icmp_sle_i8(i64 %a, i64 %b, i8 %c, i8 %d) {
; CHECK-LABEL: select_icmp_sle_i8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: cmpq %rsi, %rdi
; CHECK-NEXT: jle LBB12_2
-; CHECK-NEXT: ## BB#1:
+; CHECK-NEXT: ## %bb.1:
; CHECK-NEXT: movl %ecx, %edx
; CHECK-NEXT: LBB12_2:
; CHECK-NEXT: movl %edx, %eax
diff --git a/test/CodeGen/X86/fast-isel-select-sse.ll b/test/CodeGen/X86/fast-isel-select-sse.ll
index 1b6bb36b77c..e91b925a38e 100644
--- a/test/CodeGen/X86/fast-isel-select-sse.ll
+++ b/test/CodeGen/X86/fast-isel-select-sse.ll
@@ -10,7 +10,7 @@
define float @select_fcmp_oeq_f32(float %a, float %b, float %c, float %d) {
; SSE-LABEL: select_fcmp_oeq_f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpeqss %xmm1, %xmm0
; SSE-NEXT: andps %xmm0, %xmm2
; SSE-NEXT: andnps %xmm3, %xmm0
@@ -18,13 +18,13 @@ define float @select_fcmp_oeq_f32(float %a, float %b, float %c, float %d) {
; SSE-NEXT: retq
;
; AVX-LABEL: select_fcmp_oeq_f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpeqss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: select_fcmp_oeq_f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpeqss %xmm1, %xmm0, %k1
; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovaps %xmm3, %xmm0
@@ -36,7 +36,7 @@ define float @select_fcmp_oeq_f32(float %a, float %b, float %c, float %d) {
define double @select_fcmp_oeq_f64(double %a, double %b, double %c, double %d) {
; SSE-LABEL: select_fcmp_oeq_f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpeqsd %xmm1, %xmm0
; SSE-NEXT: andpd %xmm0, %xmm2
; SSE-NEXT: andnpd %xmm3, %xmm0
@@ -44,13 +44,13 @@ define double @select_fcmp_oeq_f64(double %a, double %b, double %c, double %d) {
; SSE-NEXT: retq
;
; AVX-LABEL: select_fcmp_oeq_f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpeqsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: select_fcmp_oeq_f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpeqsd %xmm1, %xmm0, %k1
; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovapd %xmm3, %xmm0
@@ -62,7 +62,7 @@ define double @select_fcmp_oeq_f64(double %a, double %b, double %c, double %d) {
define float @select_fcmp_ogt_f32(float %a, float %b, float %c, float %d) {
; SSE-LABEL: select_fcmp_ogt_f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpltss %xmm0, %xmm1
; SSE-NEXT: andps %xmm1, %xmm2
; SSE-NEXT: andnps %xmm3, %xmm1
@@ -71,13 +71,13 @@ define float @select_fcmp_ogt_f32(float %a, float %b, float %c, float %d) {
; SSE-NEXT: retq
;
; AVX-LABEL: select_fcmp_ogt_f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpltss %xmm0, %xmm1, %xmm0
; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: select_fcmp_ogt_f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpltss %xmm0, %xmm1, %k1
; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovaps %xmm3, %xmm0
@@ -89,7 +89,7 @@ define float @select_fcmp_ogt_f32(float %a, float %b, float %c, float %d) {
define double @select_fcmp_ogt_f64(double %a, double %b, double %c, double %d) {
; SSE-LABEL: select_fcmp_ogt_f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpltsd %xmm0, %xmm1
; SSE-NEXT: andpd %xmm1, %xmm2
; SSE-NEXT: andnpd %xmm3, %xmm1
@@ -98,13 +98,13 @@ define double @select_fcmp_ogt_f64(double %a, double %b, double %c, double %d) {
; SSE-NEXT: retq
;
; AVX-LABEL: select_fcmp_ogt_f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpltsd %xmm0, %xmm1, %xmm0
; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: select_fcmp_ogt_f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpltsd %xmm0, %xmm1, %k1
; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovapd %xmm3, %xmm0
@@ -116,7 +116,7 @@ define double @select_fcmp_ogt_f64(double %a, double %b, double %c, double %d) {
define float @select_fcmp_oge_f32(float %a, float %b, float %c, float %d) {
; SSE-LABEL: select_fcmp_oge_f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpless %xmm0, %xmm1
; SSE-NEXT: andps %xmm1, %xmm2
; SSE-NEXT: andnps %xmm3, %xmm1
@@ -125,13 +125,13 @@ define float @select_fcmp_oge_f32(float %a, float %b, float %c, float %d) {
; SSE-NEXT: retq
;
; AVX-LABEL: select_fcmp_oge_f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpless %xmm0, %xmm1, %xmm0
; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: select_fcmp_oge_f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpless %xmm0, %xmm1, %k1
; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovaps %xmm3, %xmm0
@@ -143,7 +143,7 @@ define float @select_fcmp_oge_f32(float %a, float %b, float %c, float %d) {
define double @select_fcmp_oge_f64(double %a, double %b, double %c, double %d) {
; SSE-LABEL: select_fcmp_oge_f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmplesd %xmm0, %xmm1
; SSE-NEXT: andpd %xmm1, %xmm2
; SSE-NEXT: andnpd %xmm3, %xmm1
@@ -152,13 +152,13 @@ define double @select_fcmp_oge_f64(double %a, double %b, double %c, double %d) {
; SSE-NEXT: retq
;
; AVX-LABEL: select_fcmp_oge_f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmplesd %xmm0, %xmm1, %xmm0
; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: select_fcmp_oge_f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmplesd %xmm0, %xmm1, %k1
; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovapd %xmm3, %xmm0
@@ -170,7 +170,7 @@ define double @select_fcmp_oge_f64(double %a, double %b, double %c, double %d) {
define float @select_fcmp_olt_f32(float %a, float %b, float %c, float %d) {
; SSE-LABEL: select_fcmp_olt_f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpltss %xmm1, %xmm0
; SSE-NEXT: andps %xmm0, %xmm2
; SSE-NEXT: andnps %xmm3, %xmm0
@@ -178,13 +178,13 @@ define float @select_fcmp_olt_f32(float %a, float %b, float %c, float %d) {
; SSE-NEXT: retq
;
; AVX-LABEL: select_fcmp_olt_f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpltss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: select_fcmp_olt_f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpltss %xmm1, %xmm0, %k1
; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovaps %xmm3, %xmm0
@@ -196,7 +196,7 @@ define float @select_fcmp_olt_f32(float %a, float %b, float %c, float %d) {
define double @select_fcmp_olt_f64(double %a, double %b, double %c, double %d) {
; SSE-LABEL: select_fcmp_olt_f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpltsd %xmm1, %xmm0
; SSE-NEXT: andpd %xmm0, %xmm2
; SSE-NEXT: andnpd %xmm3, %xmm0
@@ -204,13 +204,13 @@ define double @select_fcmp_olt_f64(double %a, double %b, double %c, double %d) {
; SSE-NEXT: retq
;
; AVX-LABEL: select_fcmp_olt_f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpltsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: select_fcmp_olt_f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpltsd %xmm1, %xmm0, %k1
; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovapd %xmm3, %xmm0
@@ -222,7 +222,7 @@ define double @select_fcmp_olt_f64(double %a, double %b, double %c, double %d) {
define float @select_fcmp_ole_f32(float %a, float %b, float %c, float %d) {
; SSE-LABEL: select_fcmp_ole_f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpless %xmm1, %xmm0
; SSE-NEXT: andps %xmm0, %xmm2
; SSE-NEXT: andnps %xmm3, %xmm0
@@ -230,13 +230,13 @@ define float @select_fcmp_ole_f32(float %a, float %b, float %c, float %d) {
; SSE-NEXT: retq
;
; AVX-LABEL: select_fcmp_ole_f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpless %xmm1, %xmm0, %xmm0
; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: select_fcmp_ole_f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpless %xmm1, %xmm0, %k1
; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovaps %xmm3, %xmm0
@@ -248,7 +248,7 @@ define float @select_fcmp_ole_f32(float %a, float %b, float %c, float %d) {
define double @select_fcmp_ole_f64(double %a, double %b, double %c, double %d) {
; SSE-LABEL: select_fcmp_ole_f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmplesd %xmm1, %xmm0
; SSE-NEXT: andpd %xmm0, %xmm2
; SSE-NEXT: andnpd %xmm3, %xmm0
@@ -256,13 +256,13 @@ define double @select_fcmp_ole_f64(double %a, double %b, double %c, double %d) {
; SSE-NEXT: retq
;
; AVX-LABEL: select_fcmp_ole_f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmplesd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: select_fcmp_ole_f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmplesd %xmm1, %xmm0, %k1
; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovapd %xmm3, %xmm0
@@ -274,7 +274,7 @@ define double @select_fcmp_ole_f64(double %a, double %b, double %c, double %d) {
define float @select_fcmp_ord_f32(float %a, float %b, float %c, float %d) {
; SSE-LABEL: select_fcmp_ord_f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpordss %xmm1, %xmm0
; SSE-NEXT: andps %xmm0, %xmm2
; SSE-NEXT: andnps %xmm3, %xmm0
@@ -282,13 +282,13 @@ define float @select_fcmp_ord_f32(float %a, float %b, float %c, float %d) {
; SSE-NEXT: retq
;
; AVX-LABEL: select_fcmp_ord_f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpordss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: select_fcmp_ord_f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpordss %xmm1, %xmm0, %k1
; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovaps %xmm3, %xmm0
@@ -300,7 +300,7 @@ define float @select_fcmp_ord_f32(float %a, float %b, float %c, float %d) {
define double @select_fcmp_ord_f64(double %a, double %b, double %c, double %d) {
; SSE-LABEL: select_fcmp_ord_f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpordsd %xmm1, %xmm0
; SSE-NEXT: andpd %xmm0, %xmm2
; SSE-NEXT: andnpd %xmm3, %xmm0
@@ -308,13 +308,13 @@ define double @select_fcmp_ord_f64(double %a, double %b, double %c, double %d) {
; SSE-NEXT: retq
;
; AVX-LABEL: select_fcmp_ord_f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpordsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: select_fcmp_ord_f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpordsd %xmm1, %xmm0, %k1
; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovapd %xmm3, %xmm0
@@ -326,7 +326,7 @@ define double @select_fcmp_ord_f64(double %a, double %b, double %c, double %d) {
define float @select_fcmp_uno_f32(float %a, float %b, float %c, float %d) {
; SSE-LABEL: select_fcmp_uno_f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpunordss %xmm1, %xmm0
; SSE-NEXT: andps %xmm0, %xmm2
; SSE-NEXT: andnps %xmm3, %xmm0
@@ -334,13 +334,13 @@ define float @select_fcmp_uno_f32(float %a, float %b, float %c, float %d) {
; SSE-NEXT: retq
;
; AVX-LABEL: select_fcmp_uno_f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpunordss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: select_fcmp_uno_f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpunordss %xmm1, %xmm0, %k1
; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovaps %xmm3, %xmm0
@@ -352,7 +352,7 @@ define float @select_fcmp_uno_f32(float %a, float %b, float %c, float %d) {
define double @select_fcmp_uno_f64(double %a, double %b, double %c, double %d) {
; SSE-LABEL: select_fcmp_uno_f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpunordsd %xmm1, %xmm0
; SSE-NEXT: andpd %xmm0, %xmm2
; SSE-NEXT: andnpd %xmm3, %xmm0
@@ -360,13 +360,13 @@ define double @select_fcmp_uno_f64(double %a, double %b, double %c, double %d) {
; SSE-NEXT: retq
;
; AVX-LABEL: select_fcmp_uno_f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpunordsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: select_fcmp_uno_f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpunordsd %xmm1, %xmm0, %k1
; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovapd %xmm3, %xmm0
@@ -378,7 +378,7 @@ define double @select_fcmp_uno_f64(double %a, double %b, double %c, double %d) {
define float @select_fcmp_ugt_f32(float %a, float %b, float %c, float %d) {
; SSE-LABEL: select_fcmp_ugt_f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpnless %xmm1, %xmm0
; SSE-NEXT: andps %xmm0, %xmm2
; SSE-NEXT: andnps %xmm3, %xmm0
@@ -386,13 +386,13 @@ define float @select_fcmp_ugt_f32(float %a, float %b, float %c, float %d) {
; SSE-NEXT: retq
;
; AVX-LABEL: select_fcmp_ugt_f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpnless %xmm1, %xmm0, %xmm0
; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: select_fcmp_ugt_f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpnless %xmm1, %xmm0, %k1
; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovaps %xmm3, %xmm0
@@ -404,7 +404,7 @@ define float @select_fcmp_ugt_f32(float %a, float %b, float %c, float %d) {
define double @select_fcmp_ugt_f64(double %a, double %b, double %c, double %d) {
; SSE-LABEL: select_fcmp_ugt_f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpnlesd %xmm1, %xmm0
; SSE-NEXT: andpd %xmm0, %xmm2
; SSE-NEXT: andnpd %xmm3, %xmm0
@@ -412,13 +412,13 @@ define double @select_fcmp_ugt_f64(double %a, double %b, double %c, double %d) {
; SSE-NEXT: retq
;
; AVX-LABEL: select_fcmp_ugt_f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpnlesd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: select_fcmp_ugt_f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpnlesd %xmm1, %xmm0, %k1
; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovapd %xmm3, %xmm0
@@ -430,7 +430,7 @@ define double @select_fcmp_ugt_f64(double %a, double %b, double %c, double %d) {
define float @select_fcmp_uge_f32(float %a, float %b, float %c, float %d) {
; SSE-LABEL: select_fcmp_uge_f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpnltss %xmm1, %xmm0
; SSE-NEXT: andps %xmm0, %xmm2
; SSE-NEXT: andnps %xmm3, %xmm0
@@ -438,13 +438,13 @@ define float @select_fcmp_uge_f32(float %a, float %b, float %c, float %d) {
; SSE-NEXT: retq
;
; AVX-LABEL: select_fcmp_uge_f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpnltss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: select_fcmp_uge_f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpnltss %xmm1, %xmm0, %k1
; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovaps %xmm3, %xmm0
@@ -456,7 +456,7 @@ define float @select_fcmp_uge_f32(float %a, float %b, float %c, float %d) {
define double @select_fcmp_uge_f64(double %a, double %b, double %c, double %d) {
; SSE-LABEL: select_fcmp_uge_f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpnltsd %xmm1, %xmm0
; SSE-NEXT: andpd %xmm0, %xmm2
; SSE-NEXT: andnpd %xmm3, %xmm0
@@ -464,13 +464,13 @@ define double @select_fcmp_uge_f64(double %a, double %b, double %c, double %d) {
; SSE-NEXT: retq
;
; AVX-LABEL: select_fcmp_uge_f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpnltsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: select_fcmp_uge_f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpnltsd %xmm1, %xmm0, %k1
; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovapd %xmm3, %xmm0
@@ -482,7 +482,7 @@ define double @select_fcmp_uge_f64(double %a, double %b, double %c, double %d) {
define float @select_fcmp_ult_f32(float %a, float %b, float %c, float %d) {
; SSE-LABEL: select_fcmp_ult_f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpnless %xmm0, %xmm1
; SSE-NEXT: andps %xmm1, %xmm2
; SSE-NEXT: andnps %xmm3, %xmm1
@@ -491,13 +491,13 @@ define float @select_fcmp_ult_f32(float %a, float %b, float %c, float %d) {
; SSE-NEXT: retq
;
; AVX-LABEL: select_fcmp_ult_f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpnless %xmm0, %xmm1, %xmm0
; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: select_fcmp_ult_f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpnless %xmm0, %xmm1, %k1
; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovaps %xmm3, %xmm0
@@ -509,7 +509,7 @@ define float @select_fcmp_ult_f32(float %a, float %b, float %c, float %d) {
define double @select_fcmp_ult_f64(double %a, double %b, double %c, double %d) {
; SSE-LABEL: select_fcmp_ult_f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpnlesd %xmm0, %xmm1
; SSE-NEXT: andpd %xmm1, %xmm2
; SSE-NEXT: andnpd %xmm3, %xmm1
@@ -518,13 +518,13 @@ define double @select_fcmp_ult_f64(double %a, double %b, double %c, double %d) {
; SSE-NEXT: retq
;
; AVX-LABEL: select_fcmp_ult_f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpnlesd %xmm0, %xmm1, %xmm0
; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: select_fcmp_ult_f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpnlesd %xmm0, %xmm1, %k1
; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovapd %xmm3, %xmm0
@@ -536,7 +536,7 @@ define double @select_fcmp_ult_f64(double %a, double %b, double %c, double %d) {
define float @select_fcmp_ule_f32(float %a, float %b, float %c, float %d) {
; SSE-LABEL: select_fcmp_ule_f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpnltss %xmm0, %xmm1
; SSE-NEXT: andps %xmm1, %xmm2
; SSE-NEXT: andnps %xmm3, %xmm1
@@ -545,13 +545,13 @@ define float @select_fcmp_ule_f32(float %a, float %b, float %c, float %d) {
; SSE-NEXT: retq
;
; AVX-LABEL: select_fcmp_ule_f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpnltss %xmm0, %xmm1, %xmm0
; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: select_fcmp_ule_f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpnltss %xmm0, %xmm1, %k1
; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovaps %xmm3, %xmm0
@@ -563,7 +563,7 @@ define float @select_fcmp_ule_f32(float %a, float %b, float %c, float %d) {
define double @select_fcmp_ule_f64(double %a, double %b, double %c, double %d) {
; SSE-LABEL: select_fcmp_ule_f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpnltsd %xmm0, %xmm1
; SSE-NEXT: andpd %xmm1, %xmm2
; SSE-NEXT: andnpd %xmm3, %xmm1
@@ -572,13 +572,13 @@ define double @select_fcmp_ule_f64(double %a, double %b, double %c, double %d) {
; SSE-NEXT: retq
;
; AVX-LABEL: select_fcmp_ule_f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpnltsd %xmm0, %xmm1, %xmm0
; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: select_fcmp_ule_f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpnltsd %xmm0, %xmm1, %k1
; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovapd %xmm3, %xmm0
@@ -590,7 +590,7 @@ define double @select_fcmp_ule_f64(double %a, double %b, double %c, double %d) {
define float @select_fcmp_une_f32(float %a, float %b, float %c, float %d) {
; SSE-LABEL: select_fcmp_une_f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpneqss %xmm1, %xmm0
; SSE-NEXT: andps %xmm0, %xmm2
; SSE-NEXT: andnps %xmm3, %xmm0
@@ -598,13 +598,13 @@ define float @select_fcmp_une_f32(float %a, float %b, float %c, float %d) {
; SSE-NEXT: retq
;
; AVX-LABEL: select_fcmp_une_f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpneqss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: select_fcmp_une_f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpneqss %xmm1, %xmm0, %k1
; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovaps %xmm3, %xmm0
@@ -616,7 +616,7 @@ define float @select_fcmp_une_f32(float %a, float %b, float %c, float %d) {
define double @select_fcmp_une_f64(double %a, double %b, double %c, double %d) {
; SSE-LABEL: select_fcmp_une_f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpneqsd %xmm1, %xmm0
; SSE-NEXT: andpd %xmm0, %xmm2
; SSE-NEXT: andnpd %xmm3, %xmm0
@@ -624,13 +624,13 @@ define double @select_fcmp_une_f64(double %a, double %b, double %c, double %d) {
; SSE-NEXT: retq
;
; AVX-LABEL: select_fcmp_une_f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpneqsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: select_fcmp_une_f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpneqsd %xmm1, %xmm0, %k1
; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
; AVX512-NEXT: vmovapd %xmm3, %xmm0
diff --git a/test/CodeGen/X86/fast-isel-sext-zext.ll b/test/CodeGen/X86/fast-isel-sext-zext.ll
index e467faea774..a99a46dc40f 100644
--- a/test/CodeGen/X86/fast-isel-sext-zext.ll
+++ b/test/CodeGen/X86/fast-isel-sext-zext.ll
@@ -4,7 +4,7 @@
define i8 @test1(i8 %x) nounwind {
; X32-LABEL: test1:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: andb $1, %al
; X32-NEXT: negb %al
@@ -12,7 +12,7 @@ define i8 @test1(i8 %x) nounwind {
; X32-NEXT: ## -- End function
;
; X64-LABEL: test1:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: andb $1, %dil
; X64-NEXT: negb %dil
; X64-NEXT: movl %edi, %eax
@@ -25,7 +25,7 @@ define i8 @test1(i8 %x) nounwind {
define i16 @test2(i16 %x) nounwind {
; X32-LABEL: test2:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: andb $1, %al
; X32-NEXT: negb %al
@@ -35,7 +35,7 @@ define i16 @test2(i16 %x) nounwind {
; X32-NEXT: ## -- End function
;
; X64-LABEL: test2:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: andb $1, %dil
; X64-NEXT: negb %dil
; X64-NEXT: movsbl %dil, %eax
@@ -49,7 +49,7 @@ define i16 @test2(i16 %x) nounwind {
define i32 @test3(i32 %x) nounwind {
; X32-LABEL: test3:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: andb $1, %al
; X32-NEXT: negb %al
@@ -58,7 +58,7 @@ define i32 @test3(i32 %x) nounwind {
; X32-NEXT: ## -- End function
;
; X64-LABEL: test3:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: andb $1, %dil
; X64-NEXT: negb %dil
; X64-NEXT: movsbl %dil, %eax
@@ -71,7 +71,7 @@ define i32 @test3(i32 %x) nounwind {
define i32 @test4(i32 %x) nounwind {
; X32-LABEL: test4:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: andb $1, %al
; X32-NEXT: negb %al
@@ -80,7 +80,7 @@ define i32 @test4(i32 %x) nounwind {
; X32-NEXT: ## -- End function
;
; X64-LABEL: test4:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: andb $1, %dil
; X64-NEXT: negb %dil
; X64-NEXT: movsbl %dil, %eax
@@ -93,14 +93,14 @@ define i32 @test4(i32 %x) nounwind {
define i8 @test5(i8 %x) nounwind {
; X32-LABEL: test5:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: andb $1, %al
; X32-NEXT: retl
; X32-NEXT: ## -- End function
;
; X64-LABEL: test5:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: andb $1, %dil
; X64-NEXT: movl %edi, %eax
; X64-NEXT: retq
@@ -112,7 +112,7 @@ define i8 @test5(i8 %x) nounwind {
define i16 @test6(i16 %x) nounwind {
; X32-LABEL: test6:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: andb $1, %al
; X32-NEXT: movzbl %al, %eax
@@ -121,7 +121,7 @@ define i16 @test6(i16 %x) nounwind {
; X32-NEXT: ## -- End function
;
; X64-LABEL: test6:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: andb $1, %dil
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
@@ -134,7 +134,7 @@ define i16 @test6(i16 %x) nounwind {
define i32 @test7(i32 %x) nounwind {
; X32-LABEL: test7:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: andb $1, %al
; X32-NEXT: movzbl %al, %eax
@@ -142,7 +142,7 @@ define i32 @test7(i32 %x) nounwind {
; X32-NEXT: ## -- End function
;
; X64-LABEL: test7:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: andb $1, %dil
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: retq
@@ -154,7 +154,7 @@ define i32 @test7(i32 %x) nounwind {
define i32 @test8(i32 %x) nounwind {
; X32-LABEL: test8:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: andb $1, %al
; X32-NEXT: movzbl %al, %eax
@@ -162,7 +162,7 @@ define i32 @test8(i32 %x) nounwind {
; X32-NEXT: ## -- End function
;
; X64-LABEL: test8:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: andb $1, %dil
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: retq
@@ -174,14 +174,14 @@ define i32 @test8(i32 %x) nounwind {
define i16 @test9(i8 %x) nounwind {
; X32-LABEL: test9:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movsbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X32-NEXT: retl
; X32-NEXT: ## -- End function
;
; X64-LABEL: test9:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: movsbl %dil, %eax
; X64-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
@@ -192,13 +192,13 @@ define i16 @test9(i8 %x) nounwind {
define i32 @test10(i8 %x) nounwind {
; X32-LABEL: test10:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movsbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: retl
; X32-NEXT: ## -- End function
;
; X64-LABEL: test10:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: movsbl %dil, %eax
; X64-NEXT: retq
; X64-NEXT: ## -- End function
@@ -208,7 +208,7 @@ define i32 @test10(i8 %x) nounwind {
define i64 @test11(i8 %x) nounwind {
; X32-LABEL: test11:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movsbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, %edx
; X32-NEXT: sarl $31, %edx
@@ -216,7 +216,7 @@ define i64 @test11(i8 %x) nounwind {
; X32-NEXT: ## -- End function
;
; X64-LABEL: test11:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: movsbq %dil, %rax
; X64-NEXT: retq
; X64-NEXT: ## -- End function
@@ -226,14 +226,14 @@ define i64 @test11(i8 %x) nounwind {
define i16 @test12(i8 %x) nounwind {
; X32-LABEL: test12:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X32-NEXT: retl
; X32-NEXT: ## -- End function
;
; X64-LABEL: test12:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
@@ -244,13 +244,13 @@ define i16 @test12(i8 %x) nounwind {
define i32 @test13(i8 %x) nounwind {
; X32-LABEL: test13:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: retl
; X32-NEXT: ## -- End function
;
; X64-LABEL: test13:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: retq
; X64-NEXT: ## -- End function
@@ -260,14 +260,14 @@ define i32 @test13(i8 %x) nounwind {
define i64 @test14(i8 %x) nounwind {
; X32-LABEL: test14:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: xorl %edx, %edx
; X32-NEXT: retl
; X32-NEXT: ## -- End function
;
; X64-LABEL: test14:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: retq
; X64-NEXT: ## -- End function
@@ -277,13 +277,13 @@ define i64 @test14(i8 %x) nounwind {
define i32 @test15(i16 %x) nounwind {
; X32-LABEL: test15:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movswl {{[0-9]+}}(%esp), %eax
; X32-NEXT: retl
; X32-NEXT: ## -- End function
;
; X64-LABEL: test15:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: movswl %di, %eax
; X64-NEXT: retq
; X64-NEXT: ## -- End function
@@ -293,7 +293,7 @@ define i32 @test15(i16 %x) nounwind {
define i64 @test16(i16 %x) nounwind {
; X32-LABEL: test16:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movswl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, %edx
; X32-NEXT: sarl $31, %edx
@@ -301,7 +301,7 @@ define i64 @test16(i16 %x) nounwind {
; X32-NEXT: ## -- End function
;
; X64-LABEL: test16:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: movswq %di, %rax
; X64-NEXT: retq
; X64-NEXT: ## -- End function
@@ -311,13 +311,13 @@ define i64 @test16(i16 %x) nounwind {
define i32 @test17(i16 %x) nounwind {
; X32-LABEL: test17:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: retl
; X32-NEXT: ## -- End function
;
; X64-LABEL: test17:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: movzwl %di, %eax
; X64-NEXT: retq
; X64-NEXT: ## -- End function
@@ -327,14 +327,14 @@ define i32 @test17(i16 %x) nounwind {
define i64 @test18(i16 %x) nounwind {
; X32-LABEL: test18:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: xorl %edx, %edx
; X32-NEXT: retl
; X32-NEXT: ## -- End function
;
; X64-LABEL: test18:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: movzwl %di, %eax
; X64-NEXT: retq
; X64-NEXT: ## -- End function
@@ -344,7 +344,7 @@ define i64 @test18(i16 %x) nounwind {
define i64 @test19(i32 %x) nounwind {
; X32-LABEL: test19:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, %edx
; X32-NEXT: sarl $31, %edx
@@ -352,7 +352,7 @@ define i64 @test19(i32 %x) nounwind {
; X32-NEXT: ## -- End function
;
; X64-LABEL: test19:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: movslq %edi, %rax
; X64-NEXT: retq
; X64-NEXT: ## -- End function
@@ -362,14 +362,14 @@ define i64 @test19(i32 %x) nounwind {
define i64 @test20(i32 %x) nounwind {
; X32-LABEL: test20:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: xorl %edx, %edx
; X32-NEXT: retl
; X32-NEXT: ## -- End function
;
; X64-LABEL: test20:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: retq
; X64-NEXT: ## -- End function
diff --git a/test/CodeGen/X86/fast-isel-shift.ll b/test/CodeGen/X86/fast-isel-shift.ll
index 2205976f3cb..ff6858c2b79 100644
--- a/test/CodeGen/X86/fast-isel-shift.ll
+++ b/test/CodeGen/X86/fast-isel-shift.ll
@@ -3,7 +3,7 @@
define i8 @shl_i8(i8 %a, i8 %b) {
; CHECK-LABEL: shl_i8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movl %esi, %ecx
; CHECK-NEXT: shlb %cl, %dil
; CHECK-NEXT: movl %edi, %eax
@@ -14,7 +14,7 @@ define i8 @shl_i8(i8 %a, i8 %b) {
define i16 @shl_i16(i16 %a, i16 %b) {
; CHECK-LABEL: shl_i16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movl %esi, %ecx
; CHECK-NEXT: ## kill: %cl<def> %cx<kill>
; CHECK-NEXT: shlw %cl, %di
@@ -26,7 +26,7 @@ define i16 @shl_i16(i16 %a, i16 %b) {
define i32 @shl_i32(i32 %a, i32 %b) {
; CHECK-LABEL: shl_i32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movl %esi, %ecx
; CHECK-NEXT: ## kill: %cl<def> %ecx<kill>
; CHECK-NEXT: shll %cl, %edi
@@ -38,7 +38,7 @@ define i32 @shl_i32(i32 %a, i32 %b) {
define i64 @shl_i64(i64 %a, i64 %b) {
; CHECK-LABEL: shl_i64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movq %rsi, %rcx
; CHECK-NEXT: ## kill: %cl<def> %rcx<kill>
; CHECK-NEXT: shlq %cl, %rdi
@@ -50,7 +50,7 @@ define i64 @shl_i64(i64 %a, i64 %b) {
define i8 @lshr_i8(i8 %a, i8 %b) {
; CHECK-LABEL: lshr_i8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movl %esi, %ecx
; CHECK-NEXT: shrb %cl, %dil
; CHECK-NEXT: movl %edi, %eax
@@ -61,7 +61,7 @@ define i8 @lshr_i8(i8 %a, i8 %b) {
define i16 @lshr_i16(i16 %a, i16 %b) {
; CHECK-LABEL: lshr_i16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movl %esi, %ecx
; CHECK-NEXT: ## kill: %cl<def> %cx<kill>
; CHECK-NEXT: shrw %cl, %di
@@ -73,7 +73,7 @@ define i16 @lshr_i16(i16 %a, i16 %b) {
define i32 @lshr_i32(i32 %a, i32 %b) {
; CHECK-LABEL: lshr_i32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movl %esi, %ecx
; CHECK-NEXT: ## kill: %cl<def> %ecx<kill>
; CHECK-NEXT: shrl %cl, %edi
@@ -85,7 +85,7 @@ define i32 @lshr_i32(i32 %a, i32 %b) {
define i64 @lshr_i64(i64 %a, i64 %b) {
; CHECK-LABEL: lshr_i64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movq %rsi, %rcx
; CHECK-NEXT: ## kill: %cl<def> %rcx<kill>
; CHECK-NEXT: shrq %cl, %rdi
@@ -97,7 +97,7 @@ define i64 @lshr_i64(i64 %a, i64 %b) {
define i8 @ashr_i8(i8 %a, i8 %b) {
; CHECK-LABEL: ashr_i8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movl %esi, %ecx
; CHECK-NEXT: sarb %cl, %dil
; CHECK-NEXT: movl %edi, %eax
@@ -108,7 +108,7 @@ define i8 @ashr_i8(i8 %a, i8 %b) {
define i16 @ashr_i16(i16 %a, i16 %b) {
; CHECK-LABEL: ashr_i16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movl %esi, %ecx
; CHECK-NEXT: ## kill: %cl<def> %cx<kill>
; CHECK-NEXT: sarw %cl, %di
@@ -120,7 +120,7 @@ define i16 @ashr_i16(i16 %a, i16 %b) {
define i32 @ashr_i32(i32 %a, i32 %b) {
; CHECK-LABEL: ashr_i32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movl %esi, %ecx
; CHECK-NEXT: ## kill: %cl<def> %ecx<kill>
; CHECK-NEXT: sarl %cl, %edi
@@ -132,7 +132,7 @@ define i32 @ashr_i32(i32 %a, i32 %b) {
define i64 @ashr_i64(i64 %a, i64 %b) {
; CHECK-LABEL: ashr_i64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movq %rsi, %rcx
; CHECK-NEXT: ## kill: %cl<def> %rcx<kill>
; CHECK-NEXT: sarq %cl, %rdi
@@ -144,7 +144,7 @@ define i64 @ashr_i64(i64 %a, i64 %b) {
define i8 @shl_imm1_i8(i8 %a) {
; CHECK-LABEL: shl_imm1_i8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: shlb $1, %dil
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
@@ -154,7 +154,7 @@ define i8 @shl_imm1_i8(i8 %a) {
define i16 @shl_imm1_i16(i16 %a) {
; CHECK-LABEL: shl_imm1_i16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: leal (,%rdi,2), %eax
; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
@@ -165,7 +165,7 @@ define i16 @shl_imm1_i16(i16 %a) {
define i32 @shl_imm1_i32(i32 %a) {
; CHECK-LABEL: shl_imm1_i32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: leal (,%rdi,2), %eax
; CHECK-NEXT: retq
@@ -175,7 +175,7 @@ define i32 @shl_imm1_i32(i32 %a) {
define i64 @shl_imm1_i64(i64 %a) {
; CHECK-LABEL: shl_imm1_i64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: leaq (,%rdi,2), %rax
; CHECK-NEXT: retq
%c = shl i64 %a, 1
@@ -184,7 +184,7 @@ define i64 @shl_imm1_i64(i64 %a) {
define i8 @lshr_imm1_i8(i8 %a) {
; CHECK-LABEL: lshr_imm1_i8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: shrb $1, %dil
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
@@ -194,7 +194,7 @@ define i8 @lshr_imm1_i8(i8 %a) {
define i16 @lshr_imm1_i16(i16 %a) {
; CHECK-LABEL: lshr_imm1_i16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: shrw $1, %di
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
@@ -204,7 +204,7 @@ define i16 @lshr_imm1_i16(i16 %a) {
define i32 @lshr_imm1_i32(i32 %a) {
; CHECK-LABEL: lshr_imm1_i32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: shrl $1, %edi
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
@@ -214,7 +214,7 @@ define i32 @lshr_imm1_i32(i32 %a) {
define i64 @lshr_imm1_i64(i64 %a) {
; CHECK-LABEL: lshr_imm1_i64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: shrq $1, %rdi
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: retq
@@ -224,7 +224,7 @@ define i64 @lshr_imm1_i64(i64 %a) {
define i8 @ashr_imm1_i8(i8 %a) {
; CHECK-LABEL: ashr_imm1_i8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: sarb $1, %dil
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
@@ -234,7 +234,7 @@ define i8 @ashr_imm1_i8(i8 %a) {
define i16 @ashr_imm1_i16(i16 %a) {
; CHECK-LABEL: ashr_imm1_i16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: sarw $1, %di
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
@@ -244,7 +244,7 @@ define i16 @ashr_imm1_i16(i16 %a) {
define i32 @ashr_imm1_i32(i32 %a) {
; CHECK-LABEL: ashr_imm1_i32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: sarl $1, %edi
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
@@ -254,7 +254,7 @@ define i32 @ashr_imm1_i32(i32 %a) {
define i64 @ashr_imm1_i64(i64 %a) {
; CHECK-LABEL: ashr_imm1_i64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: sarq $1, %rdi
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: retq
@@ -264,7 +264,7 @@ define i64 @ashr_imm1_i64(i64 %a) {
define i8 @shl_imm4_i8(i8 %a) {
; CHECK-LABEL: shl_imm4_i8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: shlb $4, %dil
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
@@ -274,7 +274,7 @@ define i8 @shl_imm4_i8(i8 %a) {
define i16 @shl_imm4_i16(i16 %a) {
; CHECK-LABEL: shl_imm4_i16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: shlw $4, %di
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
@@ -284,7 +284,7 @@ define i16 @shl_imm4_i16(i16 %a) {
define i32 @shl_imm4_i32(i32 %a) {
; CHECK-LABEL: shl_imm4_i32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: shll $4, %edi
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
@@ -294,7 +294,7 @@ define i32 @shl_imm4_i32(i32 %a) {
define i64 @shl_imm4_i64(i64 %a) {
; CHECK-LABEL: shl_imm4_i64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: shlq $4, %rdi
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: retq
@@ -304,7 +304,7 @@ define i64 @shl_imm4_i64(i64 %a) {
define i8 @lshr_imm4_i8(i8 %a) {
; CHECK-LABEL: lshr_imm4_i8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: shrb $4, %dil
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
@@ -314,7 +314,7 @@ define i8 @lshr_imm4_i8(i8 %a) {
define i16 @lshr_imm4_i16(i16 %a) {
; CHECK-LABEL: lshr_imm4_i16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: shrw $4, %di
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
@@ -324,7 +324,7 @@ define i16 @lshr_imm4_i16(i16 %a) {
define i32 @lshr_imm4_i32(i32 %a) {
; CHECK-LABEL: lshr_imm4_i32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: shrl $4, %edi
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
@@ -334,7 +334,7 @@ define i32 @lshr_imm4_i32(i32 %a) {
define i64 @lshr_imm4_i64(i64 %a) {
; CHECK-LABEL: lshr_imm4_i64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: shrq $4, %rdi
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: retq
@@ -344,7 +344,7 @@ define i64 @lshr_imm4_i64(i64 %a) {
define i8 @ashr_imm4_i8(i8 %a) {
; CHECK-LABEL: ashr_imm4_i8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: sarb $4, %dil
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
@@ -354,7 +354,7 @@ define i8 @ashr_imm4_i8(i8 %a) {
define i16 @ashr_imm4_i16(i16 %a) {
; CHECK-LABEL: ashr_imm4_i16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: sarw $4, %di
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
@@ -364,7 +364,7 @@ define i16 @ashr_imm4_i16(i16 %a) {
define i32 @ashr_imm4_i32(i32 %a) {
; CHECK-LABEL: ashr_imm4_i32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: sarl $4, %edi
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
@@ -374,7 +374,7 @@ define i32 @ashr_imm4_i32(i32 %a) {
define i64 @ashr_imm4_i64(i64 %a) {
; CHECK-LABEL: ashr_imm4_i64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: sarq $4, %rdi
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/fast-isel-store.ll b/test/CodeGen/X86/fast-isel-store.ll
index e359e620563..6468186d4ca 100644
--- a/test/CodeGen/X86/fast-isel-store.ll
+++ b/test/CodeGen/X86/fast-isel-store.ll
@@ -10,13 +10,13 @@
define i32 @test_store_32(i32* nocapture %addr, i32 %value) {
; ALL32-LABEL: test_store_32:
-; ALL32: # BB#0: # %entry
+; ALL32: # %bb.0: # %entry
; ALL32-NEXT: movl %esi, (%rdi)
; ALL32-NEXT: movl %esi, %eax
; ALL32-NEXT: retq
;
; ALL64-LABEL: test_store_32:
-; ALL64: # BB#0: # %entry
+; ALL64: # %bb.0: # %entry
; ALL64-NEXT: movl {{[0-9]+}}(%esp), %eax
; ALL64-NEXT: movl {{[0-9]+}}(%esp), %ecx
; ALL64-NEXT: movl %eax, (%ecx)
@@ -28,13 +28,13 @@ entry:
define i16 @test_store_16(i16* nocapture %addr, i16 %value) {
; ALL32-LABEL: test_store_16:
-; ALL32: # BB#0: # %entry
+; ALL32: # %bb.0: # %entry
; ALL32-NEXT: movw %si, (%rdi)
; ALL32-NEXT: movl %esi, %eax
; ALL32-NEXT: retq
;
; ALL64-LABEL: test_store_16:
-; ALL64: # BB#0: # %entry
+; ALL64: # %bb.0: # %entry
; ALL64-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; ALL64-NEXT: movl {{[0-9]+}}(%esp), %ecx
; ALL64-NEXT: movw %ax, (%ecx)
@@ -46,39 +46,39 @@ entry:
define <4 x i32> @test_store_4xi32(<4 x i32>* nocapture %addr, <4 x i32> %value, <4 x i32> %value2) {
; SSE32-LABEL: test_store_4xi32:
-; SSE32: # BB#0:
+; SSE32: # %bb.0:
; SSE32-NEXT: paddd %xmm1, %xmm0
; SSE32-NEXT: movdqu %xmm0, (%rdi)
; SSE32-NEXT: retq
;
; SSE64-LABEL: test_store_4xi32:
-; SSE64: # BB#0:
+; SSE64: # %bb.0:
; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE64-NEXT: paddd %xmm1, %xmm0
; SSE64-NEXT: movdqu %xmm0, (%eax)
; SSE64-NEXT: retl
;
; AVXONLY32-LABEL: test_store_4xi32:
-; AVXONLY32: # BB#0:
+; AVXONLY32: # %bb.0:
; AVXONLY32-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVXONLY32-NEXT: vmovdqu %xmm0, (%rdi)
; AVXONLY32-NEXT: retq
;
; AVX64-LABEL: test_store_4xi32:
-; AVX64: # BB#0:
+; AVX64: # %bb.0:
; AVX64-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX64-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX64-NEXT: vmovdqu %xmm0, (%eax)
; AVX64-NEXT: retl
;
; KNL32-LABEL: test_store_4xi32:
-; KNL32: # BB#0:
+; KNL32: # %bb.0:
; KNL32-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; KNL32-NEXT: vmovdqu %xmm0, (%rdi)
; KNL32-NEXT: retq
;
; SKX32-LABEL: test_store_4xi32:
-; SKX32: # BB#0:
+; SKX32: # %bb.0:
; SKX32-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; SKX32-NEXT: vmovdqu %xmm0, (%rdi)
; SKX32-NEXT: retq
@@ -89,39 +89,39 @@ define <4 x i32> @test_store_4xi32(<4 x i32>* nocapture %addr, <4 x i32> %value,
define <4 x i32> @test_store_4xi32_aligned(<4 x i32>* nocapture %addr, <4 x i32> %value, <4 x i32> %value2) {
; SSE32-LABEL: test_store_4xi32_aligned:
-; SSE32: # BB#0:
+; SSE32: # %bb.0:
; SSE32-NEXT: paddd %xmm1, %xmm0
; SSE32-NEXT: movdqa %xmm0, (%rdi)
; SSE32-NEXT: retq
;
; SSE64-LABEL: test_store_4xi32_aligned:
-; SSE64: # BB#0:
+; SSE64: # %bb.0:
; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE64-NEXT: paddd %xmm1, %xmm0
; SSE64-NEXT: movdqa %xmm0, (%eax)
; SSE64-NEXT: retl
;
; AVXONLY32-LABEL: test_store_4xi32_aligned:
-; AVXONLY32: # BB#0:
+; AVXONLY32: # %bb.0:
; AVXONLY32-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVXONLY32-NEXT: vmovdqa %xmm0, (%rdi)
; AVXONLY32-NEXT: retq
;
; AVX64-LABEL: test_store_4xi32_aligned:
-; AVX64: # BB#0:
+; AVX64: # %bb.0:
; AVX64-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX64-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX64-NEXT: vmovdqa %xmm0, (%eax)
; AVX64-NEXT: retl
;
; KNL32-LABEL: test_store_4xi32_aligned:
-; KNL32: # BB#0:
+; KNL32: # %bb.0:
; KNL32-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; KNL32-NEXT: vmovdqa %xmm0, (%rdi)
; KNL32-NEXT: retq
;
; SKX32-LABEL: test_store_4xi32_aligned:
-; SKX32: # BB#0:
+; SKX32: # %bb.0:
; SKX32-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; SKX32-NEXT: vmovdqa %xmm0, (%rdi)
; SKX32-NEXT: retq
@@ -132,23 +132,23 @@ define <4 x i32> @test_store_4xi32_aligned(<4 x i32>* nocapture %addr, <4 x i32>
define <4 x float> @test_store_4xf32(<4 x float>* nocapture %addr, <4 x float> %value) {
; SSE32-LABEL: test_store_4xf32:
-; SSE32: # BB#0:
+; SSE32: # %bb.0:
; SSE32-NEXT: movups %xmm0, (%rdi)
; SSE32-NEXT: retq
;
; SSE64-LABEL: test_store_4xf32:
-; SSE64: # BB#0:
+; SSE64: # %bb.0:
; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE64-NEXT: movups %xmm0, (%eax)
; SSE64-NEXT: retl
;
; AVX32-LABEL: test_store_4xf32:
-; AVX32: # BB#0:
+; AVX32: # %bb.0:
; AVX32-NEXT: vmovups %xmm0, (%rdi)
; AVX32-NEXT: retq
;
; AVX64-LABEL: test_store_4xf32:
-; AVX64: # BB#0:
+; AVX64: # %bb.0:
; AVX64-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX64-NEXT: vmovups %xmm0, (%eax)
; AVX64-NEXT: retl
@@ -158,23 +158,23 @@ define <4 x float> @test_store_4xf32(<4 x float>* nocapture %addr, <4 x float> %
define <4 x float> @test_store_4xf32_aligned(<4 x float>* nocapture %addr, <4 x float> %value) {
; SSE32-LABEL: test_store_4xf32_aligned:
-; SSE32: # BB#0:
+; SSE32: # %bb.0:
; SSE32-NEXT: movaps %xmm0, (%rdi)
; SSE32-NEXT: retq
;
; SSE64-LABEL: test_store_4xf32_aligned:
-; SSE64: # BB#0:
+; SSE64: # %bb.0:
; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE64-NEXT: movaps %xmm0, (%eax)
; SSE64-NEXT: retl
;
; AVX32-LABEL: test_store_4xf32_aligned:
-; AVX32: # BB#0:
+; AVX32: # %bb.0:
; AVX32-NEXT: vmovaps %xmm0, (%rdi)
; AVX32-NEXT: retq
;
; AVX64-LABEL: test_store_4xf32_aligned:
-; AVX64: # BB#0:
+; AVX64: # %bb.0:
; AVX64-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX64-NEXT: vmovaps %xmm0, (%eax)
; AVX64-NEXT: retl
@@ -184,26 +184,26 @@ define <4 x float> @test_store_4xf32_aligned(<4 x float>* nocapture %addr, <4 x
define <2 x double> @test_store_2xf64(<2 x double>* nocapture %addr, <2 x double> %value, <2 x double> %value2) {
; SSE32-LABEL: test_store_2xf64:
-; SSE32: # BB#0:
+; SSE32: # %bb.0:
; SSE32-NEXT: addpd %xmm1, %xmm0
; SSE32-NEXT: movupd %xmm0, (%rdi)
; SSE32-NEXT: retq
;
; SSE64-LABEL: test_store_2xf64:
-; SSE64: # BB#0:
+; SSE64: # %bb.0:
; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE64-NEXT: addpd %xmm1, %xmm0
; SSE64-NEXT: movupd %xmm0, (%eax)
; SSE64-NEXT: retl
;
; AVX32-LABEL: test_store_2xf64:
-; AVX32: # BB#0:
+; AVX32: # %bb.0:
; AVX32-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; AVX32-NEXT: vmovupd %xmm0, (%rdi)
; AVX32-NEXT: retq
;
; AVX64-LABEL: test_store_2xf64:
-; AVX64: # BB#0:
+; AVX64: # %bb.0:
; AVX64-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX64-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; AVX64-NEXT: vmovupd %xmm0, (%eax)
@@ -215,26 +215,26 @@ define <2 x double> @test_store_2xf64(<2 x double>* nocapture %addr, <2 x double
define <2 x double> @test_store_2xf64_aligned(<2 x double>* nocapture %addr, <2 x double> %value, <2 x double> %value2) {
; SSE32-LABEL: test_store_2xf64_aligned:
-; SSE32: # BB#0:
+; SSE32: # %bb.0:
; SSE32-NEXT: addpd %xmm1, %xmm0
; SSE32-NEXT: movapd %xmm0, (%rdi)
; SSE32-NEXT: retq
;
; SSE64-LABEL: test_store_2xf64_aligned:
-; SSE64: # BB#0:
+; SSE64: # %bb.0:
; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE64-NEXT: addpd %xmm1, %xmm0
; SSE64-NEXT: movapd %xmm0, (%eax)
; SSE64-NEXT: retl
;
; AVX32-LABEL: test_store_2xf64_aligned:
-; AVX32: # BB#0:
+; AVX32: # %bb.0:
; AVX32-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; AVX32-NEXT: vmovapd %xmm0, (%rdi)
; AVX32-NEXT: retq
;
; AVX64-LABEL: test_store_2xf64_aligned:
-; AVX64: # BB#0:
+; AVX64: # %bb.0:
; AVX64-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX64-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; AVX64-NEXT: vmovapd %xmm0, (%eax)
@@ -246,25 +246,25 @@ define <2 x double> @test_store_2xf64_aligned(<2 x double>* nocapture %addr, <2
define <8 x i32> @test_store_8xi32(<8 x i32>* nocapture %addr, <8 x i32> %value) {
; SSE32-LABEL: test_store_8xi32:
-; SSE32: # BB#0:
+; SSE32: # %bb.0:
; SSE32-NEXT: movups %xmm0, (%rdi)
; SSE32-NEXT: movups %xmm1, 16(%rdi)
; SSE32-NEXT: retq
;
; SSE64-LABEL: test_store_8xi32:
-; SSE64: # BB#0:
+; SSE64: # %bb.0:
; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE64-NEXT: movups %xmm0, (%eax)
; SSE64-NEXT: movups %xmm1, 16(%eax)
; SSE64-NEXT: retl
;
; AVX32-LABEL: test_store_8xi32:
-; AVX32: # BB#0:
+; AVX32: # %bb.0:
; AVX32-NEXT: vmovups %ymm0, (%rdi)
; AVX32-NEXT: retq
;
; AVX64-LABEL: test_store_8xi32:
-; AVX64: # BB#0:
+; AVX64: # %bb.0:
; AVX64-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX64-NEXT: vmovups %ymm0, (%eax)
; AVX64-NEXT: retl
@@ -274,25 +274,25 @@ define <8 x i32> @test_store_8xi32(<8 x i32>* nocapture %addr, <8 x i32> %value)
define <8 x i32> @test_store_8xi32_aligned(<8 x i32>* nocapture %addr, <8 x i32> %value) {
; SSE32-LABEL: test_store_8xi32_aligned:
-; SSE32: # BB#0:
+; SSE32: # %bb.0:
; SSE32-NEXT: movaps %xmm0, (%rdi)
; SSE32-NEXT: movaps %xmm1, 16(%rdi)
; SSE32-NEXT: retq
;
; SSE64-LABEL: test_store_8xi32_aligned:
-; SSE64: # BB#0:
+; SSE64: # %bb.0:
; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE64-NEXT: movaps %xmm0, (%eax)
; SSE64-NEXT: movaps %xmm1, 16(%eax)
; SSE64-NEXT: retl
;
; AVX32-LABEL: test_store_8xi32_aligned:
-; AVX32: # BB#0:
+; AVX32: # %bb.0:
; AVX32-NEXT: vmovaps %ymm0, (%rdi)
; AVX32-NEXT: retq
;
; AVX64-LABEL: test_store_8xi32_aligned:
-; AVX64: # BB#0:
+; AVX64: # %bb.0:
; AVX64-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX64-NEXT: vmovaps %ymm0, (%eax)
; AVX64-NEXT: retl
@@ -302,25 +302,25 @@ define <8 x i32> @test_store_8xi32_aligned(<8 x i32>* nocapture %addr, <8 x i32>
define <8 x float> @test_store_8xf32(<8 x float>* nocapture %addr, <8 x float> %value) {
; SSE32-LABEL: test_store_8xf32:
-; SSE32: # BB#0:
+; SSE32: # %bb.0:
; SSE32-NEXT: movups %xmm0, (%rdi)
; SSE32-NEXT: movups %xmm1, 16(%rdi)
; SSE32-NEXT: retq
;
; SSE64-LABEL: test_store_8xf32:
-; SSE64: # BB#0:
+; SSE64: # %bb.0:
; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE64-NEXT: movups %xmm0, (%eax)
; SSE64-NEXT: movups %xmm1, 16(%eax)
; SSE64-NEXT: retl
;
; AVX32-LABEL: test_store_8xf32:
-; AVX32: # BB#0:
+; AVX32: # %bb.0:
; AVX32-NEXT: vmovups %ymm0, (%rdi)
; AVX32-NEXT: retq
;
; AVX64-LABEL: test_store_8xf32:
-; AVX64: # BB#0:
+; AVX64: # %bb.0:
; AVX64-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX64-NEXT: vmovups %ymm0, (%eax)
; AVX64-NEXT: retl
@@ -330,25 +330,25 @@ define <8 x float> @test_store_8xf32(<8 x float>* nocapture %addr, <8 x float> %
define <8 x float> @test_store_8xf32_aligned(<8 x float>* nocapture %addr, <8 x float> %value) {
; SSE32-LABEL: test_store_8xf32_aligned:
-; SSE32: # BB#0:
+; SSE32: # %bb.0:
; SSE32-NEXT: movaps %xmm0, (%rdi)
; SSE32-NEXT: movaps %xmm1, 16(%rdi)
; SSE32-NEXT: retq
;
; SSE64-LABEL: test_store_8xf32_aligned:
-; SSE64: # BB#0:
+; SSE64: # %bb.0:
; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE64-NEXT: movaps %xmm0, (%eax)
; SSE64-NEXT: movaps %xmm1, 16(%eax)
; SSE64-NEXT: retl
;
; AVX32-LABEL: test_store_8xf32_aligned:
-; AVX32: # BB#0:
+; AVX32: # %bb.0:
; AVX32-NEXT: vmovaps %ymm0, (%rdi)
; AVX32-NEXT: retq
;
; AVX64-LABEL: test_store_8xf32_aligned:
-; AVX64: # BB#0:
+; AVX64: # %bb.0:
; AVX64-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX64-NEXT: vmovaps %ymm0, (%eax)
; AVX64-NEXT: retl
@@ -358,7 +358,7 @@ define <8 x float> @test_store_8xf32_aligned(<8 x float>* nocapture %addr, <8 x
define <4 x double> @test_store_4xf64(<4 x double>* nocapture %addr, <4 x double> %value, <4 x double> %value2) {
; SSE32-LABEL: test_store_4xf64:
-; SSE32: # BB#0:
+; SSE32: # %bb.0:
; SSE32-NEXT: addpd %xmm3, %xmm1
; SSE32-NEXT: addpd %xmm2, %xmm0
; SSE32-NEXT: movupd %xmm0, (%rdi)
@@ -366,7 +366,7 @@ define <4 x double> @test_store_4xf64(<4 x double>* nocapture %addr, <4 x double
; SSE32-NEXT: retq
;
; SSE64-LABEL: test_store_4xf64:
-; SSE64: # BB#0:
+; SSE64: # %bb.0:
; SSE64-NEXT: subl $12, %esp
; SSE64-NEXT: .cfi_def_cfa_offset 16
; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -378,13 +378,13 @@ define <4 x double> @test_store_4xf64(<4 x double>* nocapture %addr, <4 x double
; SSE64-NEXT: retl
;
; AVX32-LABEL: test_store_4xf64:
-; AVX32: # BB#0:
+; AVX32: # %bb.0:
; AVX32-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; AVX32-NEXT: vmovupd %ymm0, (%rdi)
; AVX32-NEXT: retq
;
; AVX64-LABEL: test_store_4xf64:
-; AVX64: # BB#0:
+; AVX64: # %bb.0:
; AVX64-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX64-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; AVX64-NEXT: vmovupd %ymm0, (%eax)
@@ -396,7 +396,7 @@ define <4 x double> @test_store_4xf64(<4 x double>* nocapture %addr, <4 x double
define <4 x double> @test_store_4xf64_aligned(<4 x double>* nocapture %addr, <4 x double> %value, <4 x double> %value2) {
; SSE32-LABEL: test_store_4xf64_aligned:
-; SSE32: # BB#0:
+; SSE32: # %bb.0:
; SSE32-NEXT: addpd %xmm3, %xmm1
; SSE32-NEXT: addpd %xmm2, %xmm0
; SSE32-NEXT: movapd %xmm0, (%rdi)
@@ -404,7 +404,7 @@ define <4 x double> @test_store_4xf64_aligned(<4 x double>* nocapture %addr, <4
; SSE32-NEXT: retq
;
; SSE64-LABEL: test_store_4xf64_aligned:
-; SSE64: # BB#0:
+; SSE64: # %bb.0:
; SSE64-NEXT: subl $12, %esp
; SSE64-NEXT: .cfi_def_cfa_offset 16
; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -416,13 +416,13 @@ define <4 x double> @test_store_4xf64_aligned(<4 x double>* nocapture %addr, <4
; SSE64-NEXT: retl
;
; AVX32-LABEL: test_store_4xf64_aligned:
-; AVX32: # BB#0:
+; AVX32: # %bb.0:
; AVX32-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; AVX32-NEXT: vmovapd %ymm0, (%rdi)
; AVX32-NEXT: retq
;
; AVX64-LABEL: test_store_4xf64_aligned:
-; AVX64: # BB#0:
+; AVX64: # %bb.0:
; AVX64-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX64-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; AVX64-NEXT: vmovapd %ymm0, (%eax)
@@ -434,7 +434,7 @@ define <4 x double> @test_store_4xf64_aligned(<4 x double>* nocapture %addr, <4
define <16 x i32> @test_store_16xi32(<16 x i32>* nocapture %addr, <16 x i32> %value) {
; SSE32-LABEL: test_store_16xi32:
-; SSE32: # BB#0:
+; SSE32: # %bb.0:
; SSE32-NEXT: movups %xmm0, (%rdi)
; SSE32-NEXT: movups %xmm1, 16(%rdi)
; SSE32-NEXT: movups %xmm2, 32(%rdi)
@@ -442,7 +442,7 @@ define <16 x i32> @test_store_16xi32(<16 x i32>* nocapture %addr, <16 x i32> %va
; SSE32-NEXT: retq
;
; SSE64-LABEL: test_store_16xi32:
-; SSE64: # BB#0:
+; SSE64: # %bb.0:
; SSE64-NEXT: subl $12, %esp
; SSE64-NEXT: .cfi_def_cfa_offset 16
; SSE64-NEXT: movaps {{[0-9]+}}(%esp), %xmm3
@@ -455,25 +455,25 @@ define <16 x i32> @test_store_16xi32(<16 x i32>* nocapture %addr, <16 x i32> %va
; SSE64-NEXT: retl
;
; AVXONLY32-LABEL: test_store_16xi32:
-; AVXONLY32: # BB#0:
+; AVXONLY32: # %bb.0:
; AVXONLY32-NEXT: vmovups %ymm0, (%rdi)
; AVXONLY32-NEXT: vmovups %ymm1, 32(%rdi)
; AVXONLY32-NEXT: retq
;
; AVXONLY64-LABEL: test_store_16xi32:
-; AVXONLY64: # BB#0:
+; AVXONLY64: # %bb.0:
; AVXONLY64-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVXONLY64-NEXT: vmovups %ymm0, (%eax)
; AVXONLY64-NEXT: vmovups %ymm1, 32(%eax)
; AVXONLY64-NEXT: retl
;
; AVX51232-LABEL: test_store_16xi32:
-; AVX51232: # BB#0:
+; AVX51232: # %bb.0:
; AVX51232-NEXT: vmovups %zmm0, (%rdi)
; AVX51232-NEXT: retq
;
; AVX51264-LABEL: test_store_16xi32:
-; AVX51264: # BB#0:
+; AVX51264: # %bb.0:
; AVX51264-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX51264-NEXT: vmovups %zmm0, (%eax)
; AVX51264-NEXT: retl
@@ -483,7 +483,7 @@ define <16 x i32> @test_store_16xi32(<16 x i32>* nocapture %addr, <16 x i32> %va
define <16 x i32> @test_store_16xi32_aligned(<16 x i32>* nocapture %addr, <16 x i32> %value) {
; SSE32-LABEL: test_store_16xi32_aligned:
-; SSE32: # BB#0:
+; SSE32: # %bb.0:
; SSE32-NEXT: movaps %xmm0, (%rdi)
; SSE32-NEXT: movaps %xmm1, 16(%rdi)
; SSE32-NEXT: movaps %xmm2, 32(%rdi)
@@ -491,7 +491,7 @@ define <16 x i32> @test_store_16xi32_aligned(<16 x i32>* nocapture %addr, <16 x
; SSE32-NEXT: retq
;
; SSE64-LABEL: test_store_16xi32_aligned:
-; SSE64: # BB#0:
+; SSE64: # %bb.0:
; SSE64-NEXT: subl $12, %esp
; SSE64-NEXT: .cfi_def_cfa_offset 16
; SSE64-NEXT: movaps {{[0-9]+}}(%esp), %xmm3
@@ -504,25 +504,25 @@ define <16 x i32> @test_store_16xi32_aligned(<16 x i32>* nocapture %addr, <16 x
; SSE64-NEXT: retl
;
; AVXONLY32-LABEL: test_store_16xi32_aligned:
-; AVXONLY32: # BB#0:
+; AVXONLY32: # %bb.0:
; AVXONLY32-NEXT: vmovaps %ymm0, (%rdi)
; AVXONLY32-NEXT: vmovaps %ymm1, 32(%rdi)
; AVXONLY32-NEXT: retq
;
; AVXONLY64-LABEL: test_store_16xi32_aligned:
-; AVXONLY64: # BB#0:
+; AVXONLY64: # %bb.0:
; AVXONLY64-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVXONLY64-NEXT: vmovaps %ymm0, (%eax)
; AVXONLY64-NEXT: vmovaps %ymm1, 32(%eax)
; AVXONLY64-NEXT: retl
;
; AVX51232-LABEL: test_store_16xi32_aligned:
-; AVX51232: # BB#0:
+; AVX51232: # %bb.0:
; AVX51232-NEXT: vmovaps %zmm0, (%rdi)
; AVX51232-NEXT: retq
;
; AVX51264-LABEL: test_store_16xi32_aligned:
-; AVX51264: # BB#0:
+; AVX51264: # %bb.0:
; AVX51264-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX51264-NEXT: vmovaps %zmm0, (%eax)
; AVX51264-NEXT: retl
@@ -532,7 +532,7 @@ define <16 x i32> @test_store_16xi32_aligned(<16 x i32>* nocapture %addr, <16 x
define <16 x float> @test_store_16xf32(<16 x float>* nocapture %addr, <16 x float> %value) {
; SSE32-LABEL: test_store_16xf32:
-; SSE32: # BB#0:
+; SSE32: # %bb.0:
; SSE32-NEXT: movups %xmm0, (%rdi)
; SSE32-NEXT: movups %xmm1, 16(%rdi)
; SSE32-NEXT: movups %xmm2, 32(%rdi)
@@ -540,7 +540,7 @@ define <16 x float> @test_store_16xf32(<16 x float>* nocapture %addr, <16 x floa
; SSE32-NEXT: retq
;
; SSE64-LABEL: test_store_16xf32:
-; SSE64: # BB#0:
+; SSE64: # %bb.0:
; SSE64-NEXT: subl $12, %esp
; SSE64-NEXT: .cfi_def_cfa_offset 16
; SSE64-NEXT: movaps {{[0-9]+}}(%esp), %xmm3
@@ -553,25 +553,25 @@ define <16 x float> @test_store_16xf32(<16 x float>* nocapture %addr, <16 x floa
; SSE64-NEXT: retl
;
; AVXONLY32-LABEL: test_store_16xf32:
-; AVXONLY32: # BB#0:
+; AVXONLY32: # %bb.0:
; AVXONLY32-NEXT: vmovups %ymm0, (%rdi)
; AVXONLY32-NEXT: vmovups %ymm1, 32(%rdi)
; AVXONLY32-NEXT: retq
;
; AVXONLY64-LABEL: test_store_16xf32:
-; AVXONLY64: # BB#0:
+; AVXONLY64: # %bb.0:
; AVXONLY64-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVXONLY64-NEXT: vmovups %ymm0, (%eax)
; AVXONLY64-NEXT: vmovups %ymm1, 32(%eax)
; AVXONLY64-NEXT: retl
;
; AVX51232-LABEL: test_store_16xf32:
-; AVX51232: # BB#0:
+; AVX51232: # %bb.0:
; AVX51232-NEXT: vmovups %zmm0, (%rdi)
; AVX51232-NEXT: retq
;
; AVX51264-LABEL: test_store_16xf32:
-; AVX51264: # BB#0:
+; AVX51264: # %bb.0:
; AVX51264-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX51264-NEXT: vmovups %zmm0, (%eax)
; AVX51264-NEXT: retl
@@ -581,7 +581,7 @@ define <16 x float> @test_store_16xf32(<16 x float>* nocapture %addr, <16 x floa
define <16 x float> @test_store_16xf32_aligned(<16 x float>* nocapture %addr, <16 x float> %value) {
; SSE32-LABEL: test_store_16xf32_aligned:
-; SSE32: # BB#0:
+; SSE32: # %bb.0:
; SSE32-NEXT: movaps %xmm0, (%rdi)
; SSE32-NEXT: movaps %xmm1, 16(%rdi)
; SSE32-NEXT: movaps %xmm2, 32(%rdi)
@@ -589,7 +589,7 @@ define <16 x float> @test_store_16xf32_aligned(<16 x float>* nocapture %addr, <1
; SSE32-NEXT: retq
;
; SSE64-LABEL: test_store_16xf32_aligned:
-; SSE64: # BB#0:
+; SSE64: # %bb.0:
; SSE64-NEXT: subl $12, %esp
; SSE64-NEXT: .cfi_def_cfa_offset 16
; SSE64-NEXT: movaps {{[0-9]+}}(%esp), %xmm3
@@ -602,25 +602,25 @@ define <16 x float> @test_store_16xf32_aligned(<16 x float>* nocapture %addr, <1
; SSE64-NEXT: retl
;
; AVXONLY32-LABEL: test_store_16xf32_aligned:
-; AVXONLY32: # BB#0:
+; AVXONLY32: # %bb.0:
; AVXONLY32-NEXT: vmovaps %ymm0, (%rdi)
; AVXONLY32-NEXT: vmovaps %ymm1, 32(%rdi)
; AVXONLY32-NEXT: retq
;
; AVXONLY64-LABEL: test_store_16xf32_aligned:
-; AVXONLY64: # BB#0:
+; AVXONLY64: # %bb.0:
; AVXONLY64-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVXONLY64-NEXT: vmovaps %ymm0, (%eax)
; AVXONLY64-NEXT: vmovaps %ymm1, 32(%eax)
; AVXONLY64-NEXT: retl
;
; AVX51232-LABEL: test_store_16xf32_aligned:
-; AVX51232: # BB#0:
+; AVX51232: # %bb.0:
; AVX51232-NEXT: vmovaps %zmm0, (%rdi)
; AVX51232-NEXT: retq
;
; AVX51264-LABEL: test_store_16xf32_aligned:
-; AVX51264: # BB#0:
+; AVX51264: # %bb.0:
; AVX51264-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX51264-NEXT: vmovaps %zmm0, (%eax)
; AVX51264-NEXT: retl
@@ -630,7 +630,7 @@ define <16 x float> @test_store_16xf32_aligned(<16 x float>* nocapture %addr, <1
define <8 x double> @test_store_8xf64(<8 x double>* nocapture %addr, <8 x double> %value, <8 x double> %value2) {
; SSE32-LABEL: test_store_8xf64:
-; SSE32: # BB#0:
+; SSE32: # %bb.0:
; SSE32-NEXT: addpd %xmm7, %xmm3
; SSE32-NEXT: addpd %xmm6, %xmm2
; SSE32-NEXT: addpd %xmm5, %xmm1
@@ -642,7 +642,7 @@ define <8 x double> @test_store_8xf64(<8 x double>* nocapture %addr, <8 x double
; SSE32-NEXT: retq
;
; SSE64-LABEL: test_store_8xf64:
-; SSE64: # BB#0:
+; SSE64: # %bb.0:
; SSE64-NEXT: subl $12, %esp
; SSE64-NEXT: .cfi_def_cfa_offset 16
; SSE64-NEXT: movapd {{[0-9]+}}(%esp), %xmm3
@@ -659,7 +659,7 @@ define <8 x double> @test_store_8xf64(<8 x double>* nocapture %addr, <8 x double
; SSE64-NEXT: retl
;
; AVXONLY32-LABEL: test_store_8xf64:
-; AVXONLY32: # BB#0:
+; AVXONLY32: # %bb.0:
; AVXONLY32-NEXT: vaddpd %ymm3, %ymm1, %ymm1
; AVXONLY32-NEXT: vaddpd %ymm2, %ymm0, %ymm0
; AVXONLY32-NEXT: vmovupd %ymm0, (%rdi)
@@ -667,7 +667,7 @@ define <8 x double> @test_store_8xf64(<8 x double>* nocapture %addr, <8 x double
; AVXONLY32-NEXT: retq
;
; AVXONLY64-LABEL: test_store_8xf64:
-; AVXONLY64: # BB#0:
+; AVXONLY64: # %bb.0:
; AVXONLY64-NEXT: pushl %ebp
; AVXONLY64-NEXT: .cfi_def_cfa_offset 8
; AVXONLY64-NEXT: .cfi_offset %ebp, -8
@@ -685,13 +685,13 @@ define <8 x double> @test_store_8xf64(<8 x double>* nocapture %addr, <8 x double
; AVXONLY64-NEXT: retl
;
; AVX51232-LABEL: test_store_8xf64:
-; AVX51232: # BB#0:
+; AVX51232: # %bb.0:
; AVX51232-NEXT: vaddpd %zmm1, %zmm0, %zmm0
; AVX51232-NEXT: vmovupd %zmm0, (%rdi)
; AVX51232-NEXT: retq
;
; AVX51264-LABEL: test_store_8xf64:
-; AVX51264: # BB#0:
+; AVX51264: # %bb.0:
; AVX51264-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX51264-NEXT: vaddpd %zmm1, %zmm0, %zmm0
; AVX51264-NEXT: vmovupd %zmm0, (%eax)
@@ -703,7 +703,7 @@ define <8 x double> @test_store_8xf64(<8 x double>* nocapture %addr, <8 x double
define <8 x double> @test_store_8xf64_aligned(<8 x double>* nocapture %addr, <8 x double> %value, <8 x double> %value2) {
; SSE32-LABEL: test_store_8xf64_aligned:
-; SSE32: # BB#0:
+; SSE32: # %bb.0:
; SSE32-NEXT: addpd %xmm7, %xmm3
; SSE32-NEXT: addpd %xmm6, %xmm2
; SSE32-NEXT: addpd %xmm5, %xmm1
@@ -715,7 +715,7 @@ define <8 x double> @test_store_8xf64_aligned(<8 x double>* nocapture %addr, <8
; SSE32-NEXT: retq
;
; SSE64-LABEL: test_store_8xf64_aligned:
-; SSE64: # BB#0:
+; SSE64: # %bb.0:
; SSE64-NEXT: subl $12, %esp
; SSE64-NEXT: .cfi_def_cfa_offset 16
; SSE64-NEXT: movapd {{[0-9]+}}(%esp), %xmm3
@@ -732,7 +732,7 @@ define <8 x double> @test_store_8xf64_aligned(<8 x double>* nocapture %addr, <8
; SSE64-NEXT: retl
;
; AVXONLY32-LABEL: test_store_8xf64_aligned:
-; AVXONLY32: # BB#0:
+; AVXONLY32: # %bb.0:
; AVXONLY32-NEXT: vaddpd %ymm3, %ymm1, %ymm1
; AVXONLY32-NEXT: vaddpd %ymm2, %ymm0, %ymm0
; AVXONLY32-NEXT: vmovapd %ymm0, (%rdi)
@@ -740,7 +740,7 @@ define <8 x double> @test_store_8xf64_aligned(<8 x double>* nocapture %addr, <8
; AVXONLY32-NEXT: retq
;
; AVXONLY64-LABEL: test_store_8xf64_aligned:
-; AVXONLY64: # BB#0:
+; AVXONLY64: # %bb.0:
; AVXONLY64-NEXT: pushl %ebp
; AVXONLY64-NEXT: .cfi_def_cfa_offset 8
; AVXONLY64-NEXT: .cfi_offset %ebp, -8
@@ -758,13 +758,13 @@ define <8 x double> @test_store_8xf64_aligned(<8 x double>* nocapture %addr, <8
; AVXONLY64-NEXT: retl
;
; AVX51232-LABEL: test_store_8xf64_aligned:
-; AVX51232: # BB#0:
+; AVX51232: # %bb.0:
; AVX51232-NEXT: vaddpd %zmm1, %zmm0, %zmm0
; AVX51232-NEXT: vmovapd %zmm0, (%rdi)
; AVX51232-NEXT: retq
;
; AVX51264-LABEL: test_store_8xf64_aligned:
-; AVX51264: # BB#0:
+; AVX51264: # %bb.0:
; AVX51264-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX51264-NEXT: vaddpd %zmm1, %zmm0, %zmm0
; AVX51264-NEXT: vmovapd %zmm0, (%eax)
diff --git a/test/CodeGen/X86/fast-isel-vecload.ll b/test/CodeGen/X86/fast-isel-vecload.ll
index caa31dfc33c..31730493fb5 100644
--- a/test/CodeGen/X86/fast-isel-vecload.ll
+++ b/test/CodeGen/X86/fast-isel-vecload.ll
@@ -9,22 +9,22 @@
define <16 x i8> @test_v16i8(<16 x i8>* %V) {
; SSE-LABEL: test_v16i8:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movdqa (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVXONLY-LABEL: test_v16i8:
-; AVXONLY: # BB#0: # %entry
+; AVXONLY: # %bb.0: # %entry
; AVXONLY-NEXT: vmovdqa (%rdi), %xmm0
; AVXONLY-NEXT: retq
;
; KNL-LABEL: test_v16i8:
-; KNL: # BB#0: # %entry
+; KNL: # %bb.0: # %entry
; KNL-NEXT: vmovdqa (%rdi), %xmm0
; KNL-NEXT: retq
;
; SKX-LABEL: test_v16i8:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vmovdqa64 (%rdi), %xmm0
; SKX-NEXT: retq
entry:
@@ -34,22 +34,22 @@ entry:
define <8 x i16> @test_v8i16(<8 x i16>* %V) {
; SSE-LABEL: test_v8i16:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movdqa (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVXONLY-LABEL: test_v8i16:
-; AVXONLY: # BB#0: # %entry
+; AVXONLY: # %bb.0: # %entry
; AVXONLY-NEXT: vmovdqa (%rdi), %xmm0
; AVXONLY-NEXT: retq
;
; KNL-LABEL: test_v8i16:
-; KNL: # BB#0: # %entry
+; KNL: # %bb.0: # %entry
; KNL-NEXT: vmovdqa (%rdi), %xmm0
; KNL-NEXT: retq
;
; SKX-LABEL: test_v8i16:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vmovdqa64 (%rdi), %xmm0
; SKX-NEXT: retq
entry:
@@ -59,22 +59,22 @@ entry:
define <4 x i32> @test_v4i32(<4 x i32>* %V) {
; SSE-LABEL: test_v4i32:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movdqa (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVXONLY-LABEL: test_v4i32:
-; AVXONLY: # BB#0: # %entry
+; AVXONLY: # %bb.0: # %entry
; AVXONLY-NEXT: vmovdqa (%rdi), %xmm0
; AVXONLY-NEXT: retq
;
; KNL-LABEL: test_v4i32:
-; KNL: # BB#0: # %entry
+; KNL: # %bb.0: # %entry
; KNL-NEXT: vmovdqa (%rdi), %xmm0
; KNL-NEXT: retq
;
; SKX-LABEL: test_v4i32:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vmovdqa64 (%rdi), %xmm0
; SKX-NEXT: retq
entry:
@@ -84,22 +84,22 @@ entry:
define <2 x i64> @test_v2i64(<2 x i64>* %V) {
; SSE-LABEL: test_v2i64:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movdqa (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVXONLY-LABEL: test_v2i64:
-; AVXONLY: # BB#0: # %entry
+; AVXONLY: # %bb.0: # %entry
; AVXONLY-NEXT: vmovdqa (%rdi), %xmm0
; AVXONLY-NEXT: retq
;
; KNL-LABEL: test_v2i64:
-; KNL: # BB#0: # %entry
+; KNL: # %bb.0: # %entry
; KNL-NEXT: vmovdqa (%rdi), %xmm0
; KNL-NEXT: retq
;
; SKX-LABEL: test_v2i64:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vmovdqa64 (%rdi), %xmm0
; SKX-NEXT: retq
entry:
@@ -109,22 +109,22 @@ entry:
define <16 x i8> @test_v16i8_unaligned(<16 x i8>* %V) {
; SSE-LABEL: test_v16i8_unaligned:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movdqu (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVXONLY-LABEL: test_v16i8_unaligned:
-; AVXONLY: # BB#0: # %entry
+; AVXONLY: # %bb.0: # %entry
; AVXONLY-NEXT: vmovdqu (%rdi), %xmm0
; AVXONLY-NEXT: retq
;
; KNL-LABEL: test_v16i8_unaligned:
-; KNL: # BB#0: # %entry
+; KNL: # %bb.0: # %entry
; KNL-NEXT: vmovdqu (%rdi), %xmm0
; KNL-NEXT: retq
;
; SKX-LABEL: test_v16i8_unaligned:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vmovdqu64 (%rdi), %xmm0
; SKX-NEXT: retq
entry:
@@ -134,22 +134,22 @@ entry:
define <8 x i16> @test_v8i16_unaligned(<8 x i16>* %V) {
; SSE-LABEL: test_v8i16_unaligned:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movdqu (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVXONLY-LABEL: test_v8i16_unaligned:
-; AVXONLY: # BB#0: # %entry
+; AVXONLY: # %bb.0: # %entry
; AVXONLY-NEXT: vmovdqu (%rdi), %xmm0
; AVXONLY-NEXT: retq
;
; KNL-LABEL: test_v8i16_unaligned:
-; KNL: # BB#0: # %entry
+; KNL: # %bb.0: # %entry
; KNL-NEXT: vmovdqu (%rdi), %xmm0
; KNL-NEXT: retq
;
; SKX-LABEL: test_v8i16_unaligned:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vmovdqu64 (%rdi), %xmm0
; SKX-NEXT: retq
entry:
@@ -159,22 +159,22 @@ entry:
define <4 x i32> @test_v4i32_unaligned(<4 x i32>* %V) {
; SSE-LABEL: test_v4i32_unaligned:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movdqu (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVXONLY-LABEL: test_v4i32_unaligned:
-; AVXONLY: # BB#0: # %entry
+; AVXONLY: # %bb.0: # %entry
; AVXONLY-NEXT: vmovdqu (%rdi), %xmm0
; AVXONLY-NEXT: retq
;
; KNL-LABEL: test_v4i32_unaligned:
-; KNL: # BB#0: # %entry
+; KNL: # %bb.0: # %entry
; KNL-NEXT: vmovdqu (%rdi), %xmm0
; KNL-NEXT: retq
;
; SKX-LABEL: test_v4i32_unaligned:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vmovdqu64 (%rdi), %xmm0
; SKX-NEXT: retq
entry:
@@ -184,22 +184,22 @@ entry:
define <2 x i64> @test_v2i64_unaligned(<2 x i64>* %V) {
; SSE-LABEL: test_v2i64_unaligned:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movdqu (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVXONLY-LABEL: test_v2i64_unaligned:
-; AVXONLY: # BB#0: # %entry
+; AVXONLY: # %bb.0: # %entry
; AVXONLY-NEXT: vmovdqu (%rdi), %xmm0
; AVXONLY-NEXT: retq
;
; KNL-LABEL: test_v2i64_unaligned:
-; KNL: # BB#0: # %entry
+; KNL: # %bb.0: # %entry
; KNL-NEXT: vmovdqu (%rdi), %xmm0
; KNL-NEXT: retq
;
; SKX-LABEL: test_v2i64_unaligned:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vmovdqu64 (%rdi), %xmm0
; SKX-NEXT: retq
entry:
@@ -209,12 +209,12 @@ entry:
define <4 x float> @test_v4f32(<4 x float>* %V) {
; SSE-LABEL: test_v4f32:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movaps (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_v4f32:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovaps (%rdi), %xmm0
; AVX-NEXT: retq
entry:
@@ -224,12 +224,12 @@ entry:
define <2 x double> @test_v2f64(<2 x double>* %V) {
; SSE-LABEL: test_v2f64:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movapd (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_v2f64:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovapd (%rdi), %xmm0
; AVX-NEXT: retq
entry:
@@ -239,12 +239,12 @@ entry:
define <4 x float> @test_v4f32_unaligned(<4 x float>* %V) {
; SSE-LABEL: test_v4f32_unaligned:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movups (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_v4f32_unaligned:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovups (%rdi), %xmm0
; AVX-NEXT: retq
entry:
@@ -254,12 +254,12 @@ entry:
define <2 x double> @test_v2f64_unaligned(<2 x double>* %V) {
; SSE-LABEL: test_v2f64_unaligned:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movupd (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_v2f64_unaligned:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovupd (%rdi), %xmm0
; AVX-NEXT: retq
entry:
@@ -269,22 +269,22 @@ entry:
define <16 x i8> @test_v16i8_abi_alignment(<16 x i8>* %V) {
; SSE-LABEL: test_v16i8_abi_alignment:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movdqa (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVXONLY-LABEL: test_v16i8_abi_alignment:
-; AVXONLY: # BB#0: # %entry
+; AVXONLY: # %bb.0: # %entry
; AVXONLY-NEXT: vmovdqa (%rdi), %xmm0
; AVXONLY-NEXT: retq
;
; KNL-LABEL: test_v16i8_abi_alignment:
-; KNL: # BB#0: # %entry
+; KNL: # %bb.0: # %entry
; KNL-NEXT: vmovdqa (%rdi), %xmm0
; KNL-NEXT: retq
;
; SKX-LABEL: test_v16i8_abi_alignment:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vmovdqa64 (%rdi), %xmm0
; SKX-NEXT: retq
entry:
@@ -294,22 +294,22 @@ entry:
define <8 x i16> @test_v8i16_abi_alignment(<8 x i16>* %V) {
; SSE-LABEL: test_v8i16_abi_alignment:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movdqa (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVXONLY-LABEL: test_v8i16_abi_alignment:
-; AVXONLY: # BB#0: # %entry
+; AVXONLY: # %bb.0: # %entry
; AVXONLY-NEXT: vmovdqa (%rdi), %xmm0
; AVXONLY-NEXT: retq
;
; KNL-LABEL: test_v8i16_abi_alignment:
-; KNL: # BB#0: # %entry
+; KNL: # %bb.0: # %entry
; KNL-NEXT: vmovdqa (%rdi), %xmm0
; KNL-NEXT: retq
;
; SKX-LABEL: test_v8i16_abi_alignment:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vmovdqa64 (%rdi), %xmm0
; SKX-NEXT: retq
entry:
@@ -319,22 +319,22 @@ entry:
define <4 x i32> @test_v4i32_abi_alignment(<4 x i32>* %V) {
; SSE-LABEL: test_v4i32_abi_alignment:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movdqa (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVXONLY-LABEL: test_v4i32_abi_alignment:
-; AVXONLY: # BB#0: # %entry
+; AVXONLY: # %bb.0: # %entry
; AVXONLY-NEXT: vmovdqa (%rdi), %xmm0
; AVXONLY-NEXT: retq
;
; KNL-LABEL: test_v4i32_abi_alignment:
-; KNL: # BB#0: # %entry
+; KNL: # %bb.0: # %entry
; KNL-NEXT: vmovdqa (%rdi), %xmm0
; KNL-NEXT: retq
;
; SKX-LABEL: test_v4i32_abi_alignment:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vmovdqa64 (%rdi), %xmm0
; SKX-NEXT: retq
entry:
@@ -344,22 +344,22 @@ entry:
define <2 x i64> @test_v2i64_abi_alignment(<2 x i64>* %V) {
; SSE-LABEL: test_v2i64_abi_alignment:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movdqa (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVXONLY-LABEL: test_v2i64_abi_alignment:
-; AVXONLY: # BB#0: # %entry
+; AVXONLY: # %bb.0: # %entry
; AVXONLY-NEXT: vmovdqa (%rdi), %xmm0
; AVXONLY-NEXT: retq
;
; KNL-LABEL: test_v2i64_abi_alignment:
-; KNL: # BB#0: # %entry
+; KNL: # %bb.0: # %entry
; KNL-NEXT: vmovdqa (%rdi), %xmm0
; KNL-NEXT: retq
;
; SKX-LABEL: test_v2i64_abi_alignment:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vmovdqa64 (%rdi), %xmm0
; SKX-NEXT: retq
entry:
@@ -369,12 +369,12 @@ entry:
define <4 x float> @test_v4f32_abi_alignment(<4 x float>* %V) {
; SSE-LABEL: test_v4f32_abi_alignment:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movaps (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_v4f32_abi_alignment:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovaps (%rdi), %xmm0
; AVX-NEXT: retq
entry:
@@ -384,12 +384,12 @@ entry:
define <2 x double> @test_v2f64_abi_alignment(<2 x double>* %V) {
; SSE-LABEL: test_v2f64_abi_alignment:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movapd (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_v2f64_abi_alignment:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovapd (%rdi), %xmm0
; AVX-NEXT: retq
entry:
@@ -399,23 +399,23 @@ entry:
define <32 x i8> @test_v32i8(<32 x i8>* %V) {
; SSE-LABEL: test_v32i8:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movaps (%rdi), %xmm0
; SSE-NEXT: movaps 16(%rdi), %xmm1
; SSE-NEXT: retq
;
; AVXONLY-LABEL: test_v32i8:
-; AVXONLY: # BB#0: # %entry
+; AVXONLY: # %bb.0: # %entry
; AVXONLY-NEXT: vmovdqa (%rdi), %ymm0
; AVXONLY-NEXT: retq
;
; KNL-LABEL: test_v32i8:
-; KNL: # BB#0: # %entry
+; KNL: # %bb.0: # %entry
; KNL-NEXT: vmovdqa (%rdi), %ymm0
; KNL-NEXT: retq
;
; SKX-LABEL: test_v32i8:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vmovdqa64 (%rdi), %ymm0
; SKX-NEXT: retq
entry:
@@ -425,23 +425,23 @@ entry:
define <16 x i16> @test_v16i16(<16 x i16>* %V) {
; SSE-LABEL: test_v16i16:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movaps (%rdi), %xmm0
; SSE-NEXT: movaps 16(%rdi), %xmm1
; SSE-NEXT: retq
;
; AVXONLY-LABEL: test_v16i16:
-; AVXONLY: # BB#0: # %entry
+; AVXONLY: # %bb.0: # %entry
; AVXONLY-NEXT: vmovdqa (%rdi), %ymm0
; AVXONLY-NEXT: retq
;
; KNL-LABEL: test_v16i16:
-; KNL: # BB#0: # %entry
+; KNL: # %bb.0: # %entry
; KNL-NEXT: vmovdqa (%rdi), %ymm0
; KNL-NEXT: retq
;
; SKX-LABEL: test_v16i16:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vmovdqa64 (%rdi), %ymm0
; SKX-NEXT: retq
entry:
@@ -451,23 +451,23 @@ entry:
define <8 x i32> @test_v8i32(<8 x i32>* %V) {
; SSE-LABEL: test_v8i32:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movaps (%rdi), %xmm0
; SSE-NEXT: movaps 16(%rdi), %xmm1
; SSE-NEXT: retq
;
; AVXONLY-LABEL: test_v8i32:
-; AVXONLY: # BB#0: # %entry
+; AVXONLY: # %bb.0: # %entry
; AVXONLY-NEXT: vmovdqa (%rdi), %ymm0
; AVXONLY-NEXT: retq
;
; KNL-LABEL: test_v8i32:
-; KNL: # BB#0: # %entry
+; KNL: # %bb.0: # %entry
; KNL-NEXT: vmovdqa (%rdi), %ymm0
; KNL-NEXT: retq
;
; SKX-LABEL: test_v8i32:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vmovdqa64 (%rdi), %ymm0
; SKX-NEXT: retq
entry:
@@ -477,23 +477,23 @@ entry:
define <4 x i64> @test_v4i64(<4 x i64>* %V) {
; SSE-LABEL: test_v4i64:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movaps (%rdi), %xmm0
; SSE-NEXT: movaps 16(%rdi), %xmm1
; SSE-NEXT: retq
;
; AVXONLY-LABEL: test_v4i64:
-; AVXONLY: # BB#0: # %entry
+; AVXONLY: # %bb.0: # %entry
; AVXONLY-NEXT: vmovdqa (%rdi), %ymm0
; AVXONLY-NEXT: retq
;
; KNL-LABEL: test_v4i64:
-; KNL: # BB#0: # %entry
+; KNL: # %bb.0: # %entry
; KNL-NEXT: vmovdqa (%rdi), %ymm0
; KNL-NEXT: retq
;
; SKX-LABEL: test_v4i64:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vmovdqa64 (%rdi), %ymm0
; SKX-NEXT: retq
entry:
@@ -503,23 +503,23 @@ entry:
define <32 x i8> @test_v32i8_unaligned(<32 x i8>* %V) {
; SSE-LABEL: test_v32i8_unaligned:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movups (%rdi), %xmm0
; SSE-NEXT: movups 16(%rdi), %xmm1
; SSE-NEXT: retq
;
; AVXONLY-LABEL: test_v32i8_unaligned:
-; AVXONLY: # BB#0: # %entry
+; AVXONLY: # %bb.0: # %entry
; AVXONLY-NEXT: vmovdqu (%rdi), %ymm0
; AVXONLY-NEXT: retq
;
; KNL-LABEL: test_v32i8_unaligned:
-; KNL: # BB#0: # %entry
+; KNL: # %bb.0: # %entry
; KNL-NEXT: vmovdqu (%rdi), %ymm0
; KNL-NEXT: retq
;
; SKX-LABEL: test_v32i8_unaligned:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vmovdqu64 (%rdi), %ymm0
; SKX-NEXT: retq
entry:
@@ -529,23 +529,23 @@ entry:
define <16 x i16> @test_v16i16_unaligned(<16 x i16>* %V) {
; SSE-LABEL: test_v16i16_unaligned:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movups (%rdi), %xmm0
; SSE-NEXT: movups 16(%rdi), %xmm1
; SSE-NEXT: retq
;
; AVXONLY-LABEL: test_v16i16_unaligned:
-; AVXONLY: # BB#0: # %entry
+; AVXONLY: # %bb.0: # %entry
; AVXONLY-NEXT: vmovdqu (%rdi), %ymm0
; AVXONLY-NEXT: retq
;
; KNL-LABEL: test_v16i16_unaligned:
-; KNL: # BB#0: # %entry
+; KNL: # %bb.0: # %entry
; KNL-NEXT: vmovdqu (%rdi), %ymm0
; KNL-NEXT: retq
;
; SKX-LABEL: test_v16i16_unaligned:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vmovdqu64 (%rdi), %ymm0
; SKX-NEXT: retq
entry:
@@ -555,23 +555,23 @@ entry:
define <8 x i32> @test_v8i32_unaligned(<8 x i32>* %V) {
; SSE-LABEL: test_v8i32_unaligned:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movups (%rdi), %xmm0
; SSE-NEXT: movups 16(%rdi), %xmm1
; SSE-NEXT: retq
;
; AVXONLY-LABEL: test_v8i32_unaligned:
-; AVXONLY: # BB#0: # %entry
+; AVXONLY: # %bb.0: # %entry
; AVXONLY-NEXT: vmovdqu (%rdi), %ymm0
; AVXONLY-NEXT: retq
;
; KNL-LABEL: test_v8i32_unaligned:
-; KNL: # BB#0: # %entry
+; KNL: # %bb.0: # %entry
; KNL-NEXT: vmovdqu (%rdi), %ymm0
; KNL-NEXT: retq
;
; SKX-LABEL: test_v8i32_unaligned:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vmovdqu64 (%rdi), %ymm0
; SKX-NEXT: retq
entry:
@@ -581,23 +581,23 @@ entry:
define <4 x i64> @test_v4i64_unaligned(<4 x i64>* %V) {
; SSE-LABEL: test_v4i64_unaligned:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movups (%rdi), %xmm0
; SSE-NEXT: movups 16(%rdi), %xmm1
; SSE-NEXT: retq
;
; AVXONLY-LABEL: test_v4i64_unaligned:
-; AVXONLY: # BB#0: # %entry
+; AVXONLY: # %bb.0: # %entry
; AVXONLY-NEXT: vmovdqu (%rdi), %ymm0
; AVXONLY-NEXT: retq
;
; KNL-LABEL: test_v4i64_unaligned:
-; KNL: # BB#0: # %entry
+; KNL: # %bb.0: # %entry
; KNL-NEXT: vmovdqu (%rdi), %ymm0
; KNL-NEXT: retq
;
; SKX-LABEL: test_v4i64_unaligned:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vmovdqu64 (%rdi), %ymm0
; SKX-NEXT: retq
entry:
@@ -607,13 +607,13 @@ entry:
define <8 x float> @test_v8f32(<8 x float>* %V) {
; SSE-LABEL: test_v8f32:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movaps (%rdi), %xmm0
; SSE-NEXT: movaps 16(%rdi), %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: test_v8f32:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovaps (%rdi), %ymm0
; AVX-NEXT: retq
entry:
@@ -623,13 +623,13 @@ entry:
define <4 x double> @test_v4f64(<4 x double>* %V) {
; SSE-LABEL: test_v4f64:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movapd (%rdi), %xmm0
; SSE-NEXT: movapd 16(%rdi), %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: test_v4f64:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovapd (%rdi), %ymm0
; AVX-NEXT: retq
entry:
@@ -639,13 +639,13 @@ entry:
define <8 x float> @test_v8f32_unaligned(<8 x float>* %V) {
; SSE-LABEL: test_v8f32_unaligned:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movups (%rdi), %xmm0
; SSE-NEXT: movups 16(%rdi), %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: test_v8f32_unaligned:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovups (%rdi), %ymm0
; AVX-NEXT: retq
entry:
@@ -655,13 +655,13 @@ entry:
define <4 x double> @test_v4f64_unaligned(<4 x double>* %V) {
; SSE-LABEL: test_v4f64_unaligned:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movupd (%rdi), %xmm0
; SSE-NEXT: movupd 16(%rdi), %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: test_v4f64_unaligned:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovupd (%rdi), %ymm0
; AVX-NEXT: retq
entry:
@@ -671,7 +671,7 @@ entry:
define <64 x i8> @test_v64i8(<64 x i8>* %V) {
; SSE-LABEL: test_v64i8:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movaps (%rdi), %xmm0
; SSE-NEXT: movaps 16(%rdi), %xmm1
; SSE-NEXT: movaps 32(%rdi), %xmm2
@@ -679,19 +679,19 @@ define <64 x i8> @test_v64i8(<64 x i8>* %V) {
; SSE-NEXT: retq
;
; AVXONLY-LABEL: test_v64i8:
-; AVXONLY: # BB#0: # %entry
+; AVXONLY: # %bb.0: # %entry
; AVXONLY-NEXT: vmovaps (%rdi), %ymm0
; AVXONLY-NEXT: vmovaps 32(%rdi), %ymm1
; AVXONLY-NEXT: retq
;
; KNL-LABEL: test_v64i8:
-; KNL: # BB#0: # %entry
+; KNL: # %bb.0: # %entry
; KNL-NEXT: vmovaps (%rdi), %ymm0
; KNL-NEXT: vmovaps 32(%rdi), %ymm1
; KNL-NEXT: retq
;
; SKX-LABEL: test_v64i8:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vmovdqa64 (%rdi), %zmm0
; SKX-NEXT: retq
entry:
@@ -701,7 +701,7 @@ entry:
define <32 x i16> @test_v32i16(<32 x i16>* %V) {
; SSE-LABEL: test_v32i16:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movaps (%rdi), %xmm0
; SSE-NEXT: movaps 16(%rdi), %xmm1
; SSE-NEXT: movaps 32(%rdi), %xmm2
@@ -709,19 +709,19 @@ define <32 x i16> @test_v32i16(<32 x i16>* %V) {
; SSE-NEXT: retq
;
; AVXONLY-LABEL: test_v32i16:
-; AVXONLY: # BB#0: # %entry
+; AVXONLY: # %bb.0: # %entry
; AVXONLY-NEXT: vmovaps (%rdi), %ymm0
; AVXONLY-NEXT: vmovaps 32(%rdi), %ymm1
; AVXONLY-NEXT: retq
;
; KNL-LABEL: test_v32i16:
-; KNL: # BB#0: # %entry
+; KNL: # %bb.0: # %entry
; KNL-NEXT: vmovaps (%rdi), %ymm0
; KNL-NEXT: vmovaps 32(%rdi), %ymm1
; KNL-NEXT: retq
;
; SKX-LABEL: test_v32i16:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vmovdqa64 (%rdi), %zmm0
; SKX-NEXT: retq
entry:
@@ -731,7 +731,7 @@ entry:
define <16 x i32> @test_v16i32(<16 x i32>* %V) {
; SSE-LABEL: test_v16i32:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movaps (%rdi), %xmm0
; SSE-NEXT: movaps 16(%rdi), %xmm1
; SSE-NEXT: movaps 32(%rdi), %xmm2
@@ -739,13 +739,13 @@ define <16 x i32> @test_v16i32(<16 x i32>* %V) {
; SSE-NEXT: retq
;
; AVXONLY-LABEL: test_v16i32:
-; AVXONLY: # BB#0: # %entry
+; AVXONLY: # %bb.0: # %entry
; AVXONLY-NEXT: vmovaps (%rdi), %ymm0
; AVXONLY-NEXT: vmovaps 32(%rdi), %ymm1
; AVXONLY-NEXT: retq
;
; AVX512-LABEL: test_v16i32:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512-NEXT: retq
entry:
@@ -755,7 +755,7 @@ entry:
define <8 x i64> @test_v8i64(<8 x i64>* %V) {
; SSE-LABEL: test_v8i64:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movaps (%rdi), %xmm0
; SSE-NEXT: movaps 16(%rdi), %xmm1
; SSE-NEXT: movaps 32(%rdi), %xmm2
@@ -763,13 +763,13 @@ define <8 x i64> @test_v8i64(<8 x i64>* %V) {
; SSE-NEXT: retq
;
; AVXONLY-LABEL: test_v8i64:
-; AVXONLY: # BB#0: # %entry
+; AVXONLY: # %bb.0: # %entry
; AVXONLY-NEXT: vmovaps (%rdi), %ymm0
; AVXONLY-NEXT: vmovaps 32(%rdi), %ymm1
; AVXONLY-NEXT: retq
;
; AVX512-LABEL: test_v8i64:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512-NEXT: retq
entry:
@@ -779,7 +779,7 @@ entry:
define <64 x i8> @test_v64i8_unaligned(<64 x i8>* %V) {
; SSE-LABEL: test_v64i8_unaligned:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movups (%rdi), %xmm0
; SSE-NEXT: movups 16(%rdi), %xmm1
; SSE-NEXT: movups 32(%rdi), %xmm2
@@ -787,19 +787,19 @@ define <64 x i8> @test_v64i8_unaligned(<64 x i8>* %V) {
; SSE-NEXT: retq
;
; AVXONLY-LABEL: test_v64i8_unaligned:
-; AVXONLY: # BB#0: # %entry
+; AVXONLY: # %bb.0: # %entry
; AVXONLY-NEXT: vmovups (%rdi), %ymm0
; AVXONLY-NEXT: vmovups 32(%rdi), %ymm1
; AVXONLY-NEXT: retq
;
; KNL-LABEL: test_v64i8_unaligned:
-; KNL: # BB#0: # %entry
+; KNL: # %bb.0: # %entry
; KNL-NEXT: vmovups (%rdi), %ymm0
; KNL-NEXT: vmovups 32(%rdi), %ymm1
; KNL-NEXT: retq
;
; SKX-LABEL: test_v64i8_unaligned:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vmovdqu64 (%rdi), %zmm0
; SKX-NEXT: retq
entry:
@@ -809,7 +809,7 @@ entry:
define <32 x i16> @test_v32i16_unaligned(<32 x i16>* %V) {
; SSE-LABEL: test_v32i16_unaligned:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movups (%rdi), %xmm0
; SSE-NEXT: movups 16(%rdi), %xmm1
; SSE-NEXT: movups 32(%rdi), %xmm2
@@ -817,19 +817,19 @@ define <32 x i16> @test_v32i16_unaligned(<32 x i16>* %V) {
; SSE-NEXT: retq
;
; AVXONLY-LABEL: test_v32i16_unaligned:
-; AVXONLY: # BB#0: # %entry
+; AVXONLY: # %bb.0: # %entry
; AVXONLY-NEXT: vmovups (%rdi), %ymm0
; AVXONLY-NEXT: vmovups 32(%rdi), %ymm1
; AVXONLY-NEXT: retq
;
; KNL-LABEL: test_v32i16_unaligned:
-; KNL: # BB#0: # %entry
+; KNL: # %bb.0: # %entry
; KNL-NEXT: vmovups (%rdi), %ymm0
; KNL-NEXT: vmovups 32(%rdi), %ymm1
; KNL-NEXT: retq
;
; SKX-LABEL: test_v32i16_unaligned:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vmovdqu64 (%rdi), %zmm0
; SKX-NEXT: retq
entry:
@@ -839,7 +839,7 @@ entry:
define <16 x i32> @test_v16i32_unaligned(<16 x i32>* %V) {
; SSE-LABEL: test_v16i32_unaligned:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movups (%rdi), %xmm0
; SSE-NEXT: movups 16(%rdi), %xmm1
; SSE-NEXT: movups 32(%rdi), %xmm2
@@ -847,13 +847,13 @@ define <16 x i32> @test_v16i32_unaligned(<16 x i32>* %V) {
; SSE-NEXT: retq
;
; AVXONLY-LABEL: test_v16i32_unaligned:
-; AVXONLY: # BB#0: # %entry
+; AVXONLY: # %bb.0: # %entry
; AVXONLY-NEXT: vmovups (%rdi), %ymm0
; AVXONLY-NEXT: vmovups 32(%rdi), %ymm1
; AVXONLY-NEXT: retq
;
; AVX512-LABEL: test_v16i32_unaligned:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovdqu64 (%rdi), %zmm0
; AVX512-NEXT: retq
entry:
@@ -863,7 +863,7 @@ entry:
define <8 x i64> @test_v8i64_unaligned(<8 x i64>* %V) {
; SSE-LABEL: test_v8i64_unaligned:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movups (%rdi), %xmm0
; SSE-NEXT: movups 16(%rdi), %xmm1
; SSE-NEXT: movups 32(%rdi), %xmm2
@@ -871,13 +871,13 @@ define <8 x i64> @test_v8i64_unaligned(<8 x i64>* %V) {
; SSE-NEXT: retq
;
; AVXONLY-LABEL: test_v8i64_unaligned:
-; AVXONLY: # BB#0: # %entry
+; AVXONLY: # %bb.0: # %entry
; AVXONLY-NEXT: vmovups (%rdi), %ymm0
; AVXONLY-NEXT: vmovups 32(%rdi), %ymm1
; AVXONLY-NEXT: retq
;
; AVX512-LABEL: test_v8i64_unaligned:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovdqu64 (%rdi), %zmm0
; AVX512-NEXT: retq
entry:
@@ -887,13 +887,13 @@ entry:
define <8 x float> @test_v16f32(<8 x float>* %V) {
; SSE-LABEL: test_v16f32:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movaps (%rdi), %xmm0
; SSE-NEXT: movaps 16(%rdi), %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: test_v16f32:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovaps (%rdi), %ymm0
; AVX-NEXT: retq
entry:
@@ -903,7 +903,7 @@ entry:
define <8 x double> @test_v8f64(<8 x double>* %V) {
; SSE-LABEL: test_v8f64:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movapd (%rdi), %xmm0
; SSE-NEXT: movapd 16(%rdi), %xmm1
; SSE-NEXT: movapd 32(%rdi), %xmm2
@@ -911,13 +911,13 @@ define <8 x double> @test_v8f64(<8 x double>* %V) {
; SSE-NEXT: retq
;
; AVXONLY-LABEL: test_v8f64:
-; AVXONLY: # BB#0: # %entry
+; AVXONLY: # %bb.0: # %entry
; AVXONLY-NEXT: vmovapd (%rdi), %ymm0
; AVXONLY-NEXT: vmovapd 32(%rdi), %ymm1
; AVXONLY-NEXT: retq
;
; AVX512-LABEL: test_v8f64:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovapd (%rdi), %zmm0
; AVX512-NEXT: retq
entry:
@@ -927,7 +927,7 @@ entry:
define <16 x float> @test_v16f32_unaligned(<16 x float>* %V) {
; SSE-LABEL: test_v16f32_unaligned:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movups (%rdi), %xmm0
; SSE-NEXT: movups 16(%rdi), %xmm1
; SSE-NEXT: movups 32(%rdi), %xmm2
@@ -935,13 +935,13 @@ define <16 x float> @test_v16f32_unaligned(<16 x float>* %V) {
; SSE-NEXT: retq
;
; AVXONLY-LABEL: test_v16f32_unaligned:
-; AVXONLY: # BB#0: # %entry
+; AVXONLY: # %bb.0: # %entry
; AVXONLY-NEXT: vmovups (%rdi), %ymm0
; AVXONLY-NEXT: vmovups 32(%rdi), %ymm1
; AVXONLY-NEXT: retq
;
; AVX512-LABEL: test_v16f32_unaligned:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovups (%rdi), %zmm0
; AVX512-NEXT: retq
entry:
@@ -951,7 +951,7 @@ entry:
define <8 x double> @test_v8f64_unaligned(<8 x double>* %V) {
; SSE-LABEL: test_v8f64_unaligned:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movupd (%rdi), %xmm0
; SSE-NEXT: movupd 16(%rdi), %xmm1
; SSE-NEXT: movupd 32(%rdi), %xmm2
@@ -959,13 +959,13 @@ define <8 x double> @test_v8f64_unaligned(<8 x double>* %V) {
; SSE-NEXT: retq
;
; AVXONLY-LABEL: test_v8f64_unaligned:
-; AVXONLY: # BB#0: # %entry
+; AVXONLY: # %bb.0: # %entry
; AVXONLY-NEXT: vmovupd (%rdi), %ymm0
; AVXONLY-NEXT: vmovupd 32(%rdi), %ymm1
; AVXONLY-NEXT: retq
;
; AVX512-LABEL: test_v8f64_unaligned:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovupd (%rdi), %zmm0
; AVX512-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/fastisel-softfloat.ll b/test/CodeGen/X86/fastisel-softfloat.ll
index e4330db81e1..579637e8344 100644
--- a/test/CodeGen/X86/fastisel-softfloat.ll
+++ b/test/CodeGen/X86/fastisel-softfloat.ll
@@ -6,7 +6,7 @@ target triple = "x86_64-unknown-linux-gnu"
define float @pr26522(float %pat) #0 {
; CHECK-LABEL: pr26522:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
ret float %pat
diff --git a/test/CodeGen/X86/fdiv-combine.ll b/test/CodeGen/X86/fdiv-combine.ll
index d9d9ac401fb..912110e75d2 100644
--- a/test/CodeGen/X86/fdiv-combine.ll
+++ b/test/CodeGen/X86/fdiv-combine.ll
@@ -7,7 +7,7 @@
define float @div1_arcp(float %x, float %y, float %z) {
; CHECK-LABEL: div1_arcp:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: divss %xmm1, %xmm0
; CHECK-NEXT: retq
%div1 = fdiv arcp float %x, %y
@@ -18,7 +18,7 @@ define float @div1_arcp(float %x, float %y, float %z) {
define float @div2_arcp_all(float %x, float %y, float %z) {
; CHECK-LABEL: div2_arcp_all:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
; CHECK-NEXT: divss %xmm2, %xmm3
; CHECK-NEXT: mulss %xmm3, %xmm0
@@ -35,7 +35,7 @@ define float @div2_arcp_all(float %x, float %y, float %z) {
define float @div2_arcp_partial1(float %x, float %y, float %z) {
; CHECK-LABEL: div2_arcp_partial1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: divss %xmm2, %xmm0
; CHECK-NEXT: mulss %xmm1, %xmm0
; CHECK-NEXT: divss %xmm2, %xmm0
@@ -50,7 +50,7 @@ define float @div2_arcp_partial1(float %x, float %y, float %z) {
define float @div2_arcp_partial2(float %x, float %y, float %z) {
; CHECK-LABEL: div2_arcp_partial2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: divss %xmm2, %xmm0
; CHECK-NEXT: mulss %xmm1, %xmm0
; CHECK-NEXT: divss %xmm2, %xmm0
@@ -65,7 +65,7 @@ define float @div2_arcp_partial2(float %x, float %y, float %z) {
define float @div2_arcp_partial3(float %x, float %y, float %z) {
; CHECK-LABEL: div2_arcp_partial3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
; CHECK-NEXT: divss %xmm2, %xmm3
; CHECK-NEXT: mulss %xmm3, %xmm0
@@ -83,7 +83,7 @@ define float @div2_arcp_partial3(float %x, float %y, float %z) {
define double @div3_arcp(double %x, double %y, double %z) {
; CHECK-LABEL: div3_arcp:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movsd{{.*#+}} xmm2 = mem[0],zero
; CHECK-NEXT: divsd %xmm1, %xmm2
; CHECK-NEXT: mulsd %xmm2, %xmm0
diff --git a/test/CodeGen/X86/fdiv.ll b/test/CodeGen/X86/fdiv.ll
index 226e6d269c3..f3956ecc0ea 100644
--- a/test/CodeGen/X86/fdiv.ll
+++ b/test/CodeGen/X86/fdiv.ll
@@ -4,7 +4,7 @@
define double @exact(double %x) {
; Exact division by a constant converted to multiplication.
; CHECK-LABEL: exact:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: mulsd {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
%div = fdiv double %x, 2.0
@@ -14,7 +14,7 @@ define double @exact(double %x) {
define double @inexact(double %x) {
; Inexact division by a constant converted to multiplication.
; CHECK-LABEL: inexact:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: mulsd {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
%div = fdiv double %x, 0x41DFFFFFFFC00000
@@ -24,7 +24,7 @@ define double @inexact(double %x) {
define double @funky(double %x) {
; No conversion to multiplication if too funky.
; CHECK-LABEL: funky:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorpd %xmm1, %xmm1
; CHECK-NEXT: divsd %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -35,7 +35,7 @@ define double @funky(double %x) {
define double @denormal1(double %x) {
; Don't generate multiplication by a denormal.
; CHECK-LABEL: denormal1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: divsd {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
%div = fdiv double %x, 0x7FD0000000000001
@@ -45,7 +45,7 @@ define double @denormal1(double %x) {
define double @denormal2(double %x) {
; Don't generate multiplication by a denormal.
; CHECK-LABEL: denormal2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: divsd {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
%div = fdiv double %x, 0x7FEFFFFFFFFFFFFF
@@ -56,7 +56,7 @@ define double @denormal2(double %x) {
define float @double_negative(float %x, float %y) #0 {
; CHECK-LABEL: double_negative:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: divss %xmm1, %xmm0
; CHECK-NEXT: retq
%neg1 = fsub float -0.0, %x
diff --git a/test/CodeGen/X86/fixup-bw-copy.ll b/test/CodeGen/X86/fixup-bw-copy.ll
index d9d822ff475..443fcf3f504 100644
--- a/test/CodeGen/X86/fixup-bw-copy.ll
+++ b/test/CodeGen/X86/fixup-bw-copy.ll
@@ -8,17 +8,17 @@ target datalayout = "e-m:o-p:32:32-f64:32:64-f80:128-n8:16:32-S128"
define i8 @test_movb(i8 %a0) {
; BWON64-LABEL: test_movb:
-; BWON64: # BB#0:
+; BWON64: # %bb.0:
; BWON64-NEXT: movl %edi, %eax
; BWON64-NEXT: retq
;
; BWOFF64-LABEL: test_movb:
-; BWOFF64: # BB#0:
+; BWOFF64: # %bb.0:
; BWOFF64-NEXT: movb %dil, %al
; BWOFF64-NEXT: retq
;
; X32-LABEL: test_movb:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: retl
ret i8 %a0
@@ -26,22 +26,22 @@ define i8 @test_movb(i8 %a0) {
define i16 @test_movw(i16 %a0) {
; BWON64-LABEL: test_movw:
-; BWON64: # BB#0:
+; BWON64: # %bb.0:
; BWON64-NEXT: movl %edi, %eax
; BWON64-NEXT: retq
;
; BWOFF64-LABEL: test_movw:
-; BWOFF64: # BB#0:
+; BWOFF64: # %bb.0:
; BWOFF64-NEXT: movw %di, %ax
; BWOFF64-NEXT: retq
;
; BWON32-LABEL: test_movw:
-; BWON32: # BB#0:
+; BWON32: # %bb.0:
; BWON32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; BWON32-NEXT: retl
;
; BWOFF32-LABEL: test_movw:
-; BWOFF32: # BB#0:
+; BWOFF32: # %bb.0:
; BWOFF32-NEXT: movw {{[0-9]+}}(%esp), %ax
; BWOFF32-NEXT: retl
ret i16 %a0
@@ -50,7 +50,7 @@ define i16 @test_movw(i16 %a0) {
; Verify we don't mess with H-reg copies (only generated in 32-bit mode).
define i8 @test_movb_hreg(i16 %a0) {
; X64-LABEL: test_movb_hreg:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: shrl $8, %eax
; X64-NEXT: addb %dil, %al
@@ -58,7 +58,7 @@ define i8 @test_movb_hreg(i16 %a0) {
; X64-NEXT: retq
;
; X32-LABEL: test_movb_hreg:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: addb %al, %ah
; X32-NEXT: movb %ah, %al
diff --git a/test/CodeGen/X86/fma-commute-x86.ll b/test/CodeGen/X86/fma-commute-x86.ll
index bf8b9aaae13..f8ae88d68e0 100644
--- a/test/CodeGen/X86/fma-commute-x86.ll
+++ b/test/CodeGen/X86/fma-commute-x86.ll
@@ -8,7 +8,7 @@ attributes #0 = { nounwind }
declare <4 x float> @llvm.x86.fma.vfmadd.ss(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
define <4 x float> @test_x86_fmadd_baa_ss(<4 x float> %a, <4 x float> %b) #0 {
; FMA-LABEL: test_x86_fmadd_baa_ss:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovaps (%rcx), %xmm1
; FMA-NEXT: vmovaps (%rdx), %xmm0
; FMA-NEXT: vfmadd213ss %xmm1, %xmm1, %xmm0
@@ -19,7 +19,7 @@ define <4 x float> @test_x86_fmadd_baa_ss(<4 x float> %a, <4 x float> %b) #0 {
define <4 x float> @test_x86_fmadd_aba_ss(<4 x float> %a, <4 x float> %b) #0 {
; FMA-LABEL: test_x86_fmadd_aba_ss:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovaps (%rcx), %xmm0
; FMA-NEXT: vfmadd132ss (%rdx), %xmm0, %xmm0
; FMA-NEXT: retq
@@ -29,7 +29,7 @@ define <4 x float> @test_x86_fmadd_aba_ss(<4 x float> %a, <4 x float> %b) #0 {
define <4 x float> @test_x86_fmadd_bba_ss(<4 x float> %a, <4 x float> %b) #0 {
; FMA-LABEL: test_x86_fmadd_bba_ss:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovaps (%rdx), %xmm0
; FMA-NEXT: vfmadd213ss (%rcx), %xmm0, %xmm0
; FMA-NEXT: retq
@@ -40,7 +40,7 @@ define <4 x float> @test_x86_fmadd_bba_ss(<4 x float> %a, <4 x float> %b) #0 {
declare <4 x float> @llvm.x86.fma.vfmadd.ps(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
define <4 x float> @test_x86_fmadd_baa_ps(<4 x float> %a, <4 x float> %b) #0 {
; FMA-LABEL: test_x86_fmadd_baa_ps:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovaps (%rcx), %xmm0
; FMA-NEXT: vfmadd132ps (%rdx), %xmm0, %xmm0
; FMA-NEXT: retq
@@ -50,7 +50,7 @@ define <4 x float> @test_x86_fmadd_baa_ps(<4 x float> %a, <4 x float> %b) #0 {
define <4 x float> @test_x86_fmadd_aba_ps(<4 x float> %a, <4 x float> %b) #0 {
; FMA-LABEL: test_x86_fmadd_aba_ps:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovaps (%rcx), %xmm0
; FMA-NEXT: vfmadd231ps (%rdx), %xmm0, %xmm0
; FMA-NEXT: retq
@@ -60,7 +60,7 @@ define <4 x float> @test_x86_fmadd_aba_ps(<4 x float> %a, <4 x float> %b) #0 {
define <4 x float> @test_x86_fmadd_bba_ps(<4 x float> %a, <4 x float> %b) #0 {
; FMA-LABEL: test_x86_fmadd_bba_ps:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovaps (%rdx), %xmm0
; FMA-NEXT: vfmadd213ps (%rcx), %xmm0, %xmm0
; FMA-NEXT: retq
@@ -71,7 +71,7 @@ define <4 x float> @test_x86_fmadd_bba_ps(<4 x float> %a, <4 x float> %b) #0 {
declare <8 x float> @llvm.x86.fma.vfmadd.ps.256(<8 x float>, <8 x float>, <8 x float>) nounwind readnone
define <8 x float> @test_x86_fmadd_baa_ps_y(<8 x float> %a, <8 x float> %b) #0 {
; FMA-LABEL: test_x86_fmadd_baa_ps_y:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovaps (%rcx), %ymm0
; FMA-NEXT: vfmadd132ps (%rdx), %ymm0, %ymm0
; FMA-NEXT: retq
@@ -81,7 +81,7 @@ define <8 x float> @test_x86_fmadd_baa_ps_y(<8 x float> %a, <8 x float> %b) #0 {
define <8 x float> @test_x86_fmadd_aba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
; FMA-LABEL: test_x86_fmadd_aba_ps_y:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovaps (%rcx), %ymm0
; FMA-NEXT: vfmadd231ps (%rdx), %ymm0, %ymm0
; FMA-NEXT: retq
@@ -91,7 +91,7 @@ define <8 x float> @test_x86_fmadd_aba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
define <8 x float> @test_x86_fmadd_bba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
; FMA-LABEL: test_x86_fmadd_bba_ps_y:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovaps (%rdx), %ymm0
; FMA-NEXT: vfmadd213ps (%rcx), %ymm0, %ymm0
; FMA-NEXT: retq
@@ -102,7 +102,7 @@ define <8 x float> @test_x86_fmadd_bba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
declare <2 x double> @llvm.x86.fma.vfmadd.sd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
define <2 x double> @test_x86_fmadd_baa_sd(<2 x double> %a, <2 x double> %b) #0 {
; FMA-LABEL: test_x86_fmadd_baa_sd:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovapd (%rcx), %xmm1
; FMA-NEXT: vmovapd (%rdx), %xmm0
; FMA-NEXT: vfmadd213sd %xmm1, %xmm1, %xmm0
@@ -113,7 +113,7 @@ define <2 x double> @test_x86_fmadd_baa_sd(<2 x double> %a, <2 x double> %b) #0
define <2 x double> @test_x86_fmadd_aba_sd(<2 x double> %a, <2 x double> %b) #0 {
; FMA-LABEL: test_x86_fmadd_aba_sd:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovapd (%rcx), %xmm0
; FMA-NEXT: vfmadd132sd (%rdx), %xmm0, %xmm0
; FMA-NEXT: retq
@@ -123,7 +123,7 @@ define <2 x double> @test_x86_fmadd_aba_sd(<2 x double> %a, <2 x double> %b) #0
define <2 x double> @test_x86_fmadd_bba_sd(<2 x double> %a, <2 x double> %b) #0 {
; FMA-LABEL: test_x86_fmadd_bba_sd:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovapd (%rdx), %xmm0
; FMA-NEXT: vfmadd213sd (%rcx), %xmm0, %xmm0
; FMA-NEXT: retq
@@ -134,7 +134,7 @@ define <2 x double> @test_x86_fmadd_bba_sd(<2 x double> %a, <2 x double> %b) #0
declare <2 x double> @llvm.x86.fma.vfmadd.pd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
define <2 x double> @test_x86_fmadd_baa_pd(<2 x double> %a, <2 x double> %b) #0 {
; FMA-LABEL: test_x86_fmadd_baa_pd:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovapd (%rcx), %xmm0
; FMA-NEXT: vfmadd132pd (%rdx), %xmm0, %xmm0
; FMA-NEXT: retq
@@ -144,7 +144,7 @@ define <2 x double> @test_x86_fmadd_baa_pd(<2 x double> %a, <2 x double> %b) #0
define <2 x double> @test_x86_fmadd_aba_pd(<2 x double> %a, <2 x double> %b) #0 {
; FMA-LABEL: test_x86_fmadd_aba_pd:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovapd (%rcx), %xmm0
; FMA-NEXT: vfmadd231pd (%rdx), %xmm0, %xmm0
; FMA-NEXT: retq
@@ -154,7 +154,7 @@ define <2 x double> @test_x86_fmadd_aba_pd(<2 x double> %a, <2 x double> %b) #0
define <2 x double> @test_x86_fmadd_bba_pd(<2 x double> %a, <2 x double> %b) #0 {
; FMA-LABEL: test_x86_fmadd_bba_pd:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovapd (%rdx), %xmm0
; FMA-NEXT: vfmadd213pd (%rcx), %xmm0, %xmm0
; FMA-NEXT: retq
@@ -165,7 +165,7 @@ define <2 x double> @test_x86_fmadd_bba_pd(<2 x double> %a, <2 x double> %b) #0
declare <4 x double> @llvm.x86.fma.vfmadd.pd.256(<4 x double>, <4 x double>, <4 x double>) nounwind readnone
define <4 x double> @test_x86_fmadd_baa_pd_y(<4 x double> %a, <4 x double> %b) #0 {
; FMA-LABEL: test_x86_fmadd_baa_pd_y:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovapd (%rcx), %ymm0
; FMA-NEXT: vfmadd132pd (%rdx), %ymm0, %ymm0
; FMA-NEXT: retq
@@ -175,7 +175,7 @@ define <4 x double> @test_x86_fmadd_baa_pd_y(<4 x double> %a, <4 x double> %b) #
define <4 x double> @test_x86_fmadd_aba_pd_y(<4 x double> %a, <4 x double> %b) #0 {
; FMA-LABEL: test_x86_fmadd_aba_pd_y:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovapd (%rcx), %ymm0
; FMA-NEXT: vfmadd231pd (%rdx), %ymm0, %ymm0
; FMA-NEXT: retq
@@ -185,7 +185,7 @@ define <4 x double> @test_x86_fmadd_aba_pd_y(<4 x double> %a, <4 x double> %b) #
define <4 x double> @test_x86_fmadd_bba_pd_y(<4 x double> %a, <4 x double> %b) #0 {
; FMA-LABEL: test_x86_fmadd_bba_pd_y:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovapd (%rdx), %ymm0
; FMA-NEXT: vfmadd213pd (%rcx), %ymm0, %ymm0
; FMA-NEXT: retq
@@ -197,7 +197,7 @@ define <4 x double> @test_x86_fmadd_bba_pd_y(<4 x double> %a, <4 x double> %b) #
declare <4 x float> @llvm.x86.fma.vfnmadd.ss(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
define <4 x float> @test_x86_fnmadd_baa_ss(<4 x float> %a, <4 x float> %b) #0 {
; FMA-LABEL: test_x86_fnmadd_baa_ss:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovaps (%rcx), %xmm1
; FMA-NEXT: vmovaps (%rdx), %xmm0
; FMA-NEXT: vfnmadd213ss %xmm1, %xmm1, %xmm0
@@ -208,7 +208,7 @@ define <4 x float> @test_x86_fnmadd_baa_ss(<4 x float> %a, <4 x float> %b) #0 {
define <4 x float> @test_x86_fnmadd_aba_ss(<4 x float> %a, <4 x float> %b) #0 {
; FMA-LABEL: test_x86_fnmadd_aba_ss:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovaps (%rcx), %xmm0
; FMA-NEXT: vfnmadd132ss (%rdx), %xmm0, %xmm0
; FMA-NEXT: retq
@@ -218,7 +218,7 @@ define <4 x float> @test_x86_fnmadd_aba_ss(<4 x float> %a, <4 x float> %b) #0 {
define <4 x float> @test_x86_fnmadd_bba_ss(<4 x float> %a, <4 x float> %b) #0 {
; FMA-LABEL: test_x86_fnmadd_bba_ss:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovaps (%rdx), %xmm0
; FMA-NEXT: vfnmadd213ss (%rcx), %xmm0, %xmm0
; FMA-NEXT: retq
@@ -229,7 +229,7 @@ define <4 x float> @test_x86_fnmadd_bba_ss(<4 x float> %a, <4 x float> %b) #0 {
declare <4 x float> @llvm.x86.fma.vfnmadd.ps(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
define <4 x float> @test_x86_fnmadd_baa_ps(<4 x float> %a, <4 x float> %b) #0 {
; FMA-LABEL: test_x86_fnmadd_baa_ps:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovaps (%rcx), %xmm0
; FMA-NEXT: vfnmadd132ps (%rdx), %xmm0, %xmm0
; FMA-NEXT: retq
@@ -239,7 +239,7 @@ define <4 x float> @test_x86_fnmadd_baa_ps(<4 x float> %a, <4 x float> %b) #0 {
define <4 x float> @test_x86_fnmadd_aba_ps(<4 x float> %a, <4 x float> %b) #0 {
; FMA-LABEL: test_x86_fnmadd_aba_ps:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovaps (%rcx), %xmm0
; FMA-NEXT: vfnmadd231ps (%rdx), %xmm0, %xmm0
; FMA-NEXT: retq
@@ -249,7 +249,7 @@ define <4 x float> @test_x86_fnmadd_aba_ps(<4 x float> %a, <4 x float> %b) #0 {
define <4 x float> @test_x86_fnmadd_bba_ps(<4 x float> %a, <4 x float> %b) #0 {
; FMA-LABEL: test_x86_fnmadd_bba_ps:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovaps (%rdx), %xmm0
; FMA-NEXT: vfnmadd213ps (%rcx), %xmm0, %xmm0
; FMA-NEXT: retq
@@ -260,7 +260,7 @@ define <4 x float> @test_x86_fnmadd_bba_ps(<4 x float> %a, <4 x float> %b) #0 {
declare <8 x float> @llvm.x86.fma.vfnmadd.ps.256(<8 x float>, <8 x float>, <8 x float>) nounwind readnone
define <8 x float> @test_x86_fnmadd_baa_ps_y(<8 x float> %a, <8 x float> %b) #0 {
; FMA-LABEL: test_x86_fnmadd_baa_ps_y:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovaps (%rcx), %ymm0
; FMA-NEXT: vfnmadd132ps (%rdx), %ymm0, %ymm0
; FMA-NEXT: retq
@@ -270,7 +270,7 @@ define <8 x float> @test_x86_fnmadd_baa_ps_y(<8 x float> %a, <8 x float> %b) #0
define <8 x float> @test_x86_fnmadd_aba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
; FMA-LABEL: test_x86_fnmadd_aba_ps_y:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovaps (%rcx), %ymm0
; FMA-NEXT: vfnmadd231ps (%rdx), %ymm0, %ymm0
; FMA-NEXT: retq
@@ -280,7 +280,7 @@ define <8 x float> @test_x86_fnmadd_aba_ps_y(<8 x float> %a, <8 x float> %b) #0
define <8 x float> @test_x86_fnmadd_bba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
; FMA-LABEL: test_x86_fnmadd_bba_ps_y:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovaps (%rdx), %ymm0
; FMA-NEXT: vfnmadd213ps (%rcx), %ymm0, %ymm0
; FMA-NEXT: retq
@@ -291,7 +291,7 @@ define <8 x float> @test_x86_fnmadd_bba_ps_y(<8 x float> %a, <8 x float> %b) #0
declare <2 x double> @llvm.x86.fma.vfnmadd.sd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
define <2 x double> @test_x86_fnmadd_baa_sd(<2 x double> %a, <2 x double> %b) #0 {
; FMA-LABEL: test_x86_fnmadd_baa_sd:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovapd (%rcx), %xmm1
; FMA-NEXT: vmovapd (%rdx), %xmm0
; FMA-NEXT: vfnmadd213sd %xmm1, %xmm1, %xmm0
@@ -302,7 +302,7 @@ define <2 x double> @test_x86_fnmadd_baa_sd(<2 x double> %a, <2 x double> %b) #0
define <2 x double> @test_x86_fnmadd_aba_sd(<2 x double> %a, <2 x double> %b) #0 {
; FMA-LABEL: test_x86_fnmadd_aba_sd:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovapd (%rcx), %xmm0
; FMA-NEXT: vfnmadd132sd (%rdx), %xmm0, %xmm0
; FMA-NEXT: retq
@@ -312,7 +312,7 @@ define <2 x double> @test_x86_fnmadd_aba_sd(<2 x double> %a, <2 x double> %b) #0
define <2 x double> @test_x86_fnmadd_bba_sd(<2 x double> %a, <2 x double> %b) #0 {
; FMA-LABEL: test_x86_fnmadd_bba_sd:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovapd (%rdx), %xmm0
; FMA-NEXT: vfnmadd213sd (%rcx), %xmm0, %xmm0
; FMA-NEXT: retq
@@ -323,7 +323,7 @@ define <2 x double> @test_x86_fnmadd_bba_sd(<2 x double> %a, <2 x double> %b) #0
declare <2 x double> @llvm.x86.fma.vfnmadd.pd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
define <2 x double> @test_x86_fnmadd_baa_pd(<2 x double> %a, <2 x double> %b) #0 {
; FMA-LABEL: test_x86_fnmadd_baa_pd:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovapd (%rcx), %xmm0
; FMA-NEXT: vfnmadd132pd (%rdx), %xmm0, %xmm0
; FMA-NEXT: retq
@@ -333,7 +333,7 @@ define <2 x double> @test_x86_fnmadd_baa_pd(<2 x double> %a, <2 x double> %b) #0
define <2 x double> @test_x86_fnmadd_aba_pd(<2 x double> %a, <2 x double> %b) #0 {
; FMA-LABEL: test_x86_fnmadd_aba_pd:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovapd (%rcx), %xmm0
; FMA-NEXT: vfnmadd231pd (%rdx), %xmm0, %xmm0
; FMA-NEXT: retq
@@ -343,7 +343,7 @@ define <2 x double> @test_x86_fnmadd_aba_pd(<2 x double> %a, <2 x double> %b) #0
define <2 x double> @test_x86_fnmadd_bba_pd(<2 x double> %a, <2 x double> %b) #0 {
; FMA-LABEL: test_x86_fnmadd_bba_pd:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovapd (%rdx), %xmm0
; FMA-NEXT: vfnmadd213pd (%rcx), %xmm0, %xmm0
; FMA-NEXT: retq
@@ -354,7 +354,7 @@ define <2 x double> @test_x86_fnmadd_bba_pd(<2 x double> %a, <2 x double> %b) #0
declare <4 x double> @llvm.x86.fma.vfnmadd.pd.256(<4 x double>, <4 x double>, <4 x double>) nounwind readnone
define <4 x double> @test_x86_fnmadd_baa_pd_y(<4 x double> %a, <4 x double> %b) #0 {
; FMA-LABEL: test_x86_fnmadd_baa_pd_y:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovapd (%rcx), %ymm0
; FMA-NEXT: vfnmadd132pd (%rdx), %ymm0, %ymm0
; FMA-NEXT: retq
@@ -364,7 +364,7 @@ define <4 x double> @test_x86_fnmadd_baa_pd_y(<4 x double> %a, <4 x double> %b)
define <4 x double> @test_x86_fnmadd_aba_pd_y(<4 x double> %a, <4 x double> %b) #0 {
; FMA-LABEL: test_x86_fnmadd_aba_pd_y:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovapd (%rcx), %ymm0
; FMA-NEXT: vfnmadd231pd (%rdx), %ymm0, %ymm0
; FMA-NEXT: retq
@@ -374,7 +374,7 @@ define <4 x double> @test_x86_fnmadd_aba_pd_y(<4 x double> %a, <4 x double> %b)
define <4 x double> @test_x86_fnmadd_bba_pd_y(<4 x double> %a, <4 x double> %b) #0 {
; FMA-LABEL: test_x86_fnmadd_bba_pd_y:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovapd (%rdx), %ymm0
; FMA-NEXT: vfnmadd213pd (%rcx), %ymm0, %ymm0
; FMA-NEXT: retq
@@ -385,7 +385,7 @@ define <4 x double> @test_x86_fnmadd_bba_pd_y(<4 x double> %a, <4 x double> %b)
declare <4 x float> @llvm.x86.fma.vfmsub.ss(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
define <4 x float> @test_x86_fmsub_baa_ss(<4 x float> %a, <4 x float> %b) #0 {
; FMA-LABEL: test_x86_fmsub_baa_ss:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovaps (%rcx), %xmm1
; FMA-NEXT: vmovaps (%rdx), %xmm0
; FMA-NEXT: vfmsub213ss %xmm1, %xmm1, %xmm0
@@ -396,7 +396,7 @@ define <4 x float> @test_x86_fmsub_baa_ss(<4 x float> %a, <4 x float> %b) #0 {
define <4 x float> @test_x86_fmsub_aba_ss(<4 x float> %a, <4 x float> %b) #0 {
; FMA-LABEL: test_x86_fmsub_aba_ss:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovaps (%rcx), %xmm0
; FMA-NEXT: vfmsub132ss (%rdx), %xmm0, %xmm0
; FMA-NEXT: retq
@@ -406,7 +406,7 @@ define <4 x float> @test_x86_fmsub_aba_ss(<4 x float> %a, <4 x float> %b) #0 {
define <4 x float> @test_x86_fmsub_bba_ss(<4 x float> %a, <4 x float> %b) #0 {
; FMA-LABEL: test_x86_fmsub_bba_ss:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovaps (%rdx), %xmm0
; FMA-NEXT: vfmsub213ss (%rcx), %xmm0, %xmm0
; FMA-NEXT: retq
@@ -417,7 +417,7 @@ define <4 x float> @test_x86_fmsub_bba_ss(<4 x float> %a, <4 x float> %b) #0 {
declare <4 x float> @llvm.x86.fma.vfmsub.ps(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
define <4 x float> @test_x86_fmsub_baa_ps(<4 x float> %a, <4 x float> %b) #0 {
; FMA-LABEL: test_x86_fmsub_baa_ps:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovaps (%rcx), %xmm0
; FMA-NEXT: vfmsub132ps (%rdx), %xmm0, %xmm0
; FMA-NEXT: retq
@@ -427,7 +427,7 @@ define <4 x float> @test_x86_fmsub_baa_ps(<4 x float> %a, <4 x float> %b) #0 {
define <4 x float> @test_x86_fmsub_aba_ps(<4 x float> %a, <4 x float> %b) #0 {
; FMA-LABEL: test_x86_fmsub_aba_ps:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovaps (%rcx), %xmm0
; FMA-NEXT: vfmsub231ps (%rdx), %xmm0, %xmm0
; FMA-NEXT: retq
@@ -437,7 +437,7 @@ define <4 x float> @test_x86_fmsub_aba_ps(<4 x float> %a, <4 x float> %b) #0 {
define <4 x float> @test_x86_fmsub_bba_ps(<4 x float> %a, <4 x float> %b) #0 {
; FMA-LABEL: test_x86_fmsub_bba_ps:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovaps (%rdx), %xmm0
; FMA-NEXT: vfmsub213ps (%rcx), %xmm0, %xmm0
; FMA-NEXT: retq
@@ -448,7 +448,7 @@ define <4 x float> @test_x86_fmsub_bba_ps(<4 x float> %a, <4 x float> %b) #0 {
declare <8 x float> @llvm.x86.fma.vfmsub.ps.256(<8 x float>, <8 x float>, <8 x float>) nounwind readnone
define <8 x float> @test_x86_fmsub_baa_ps_y(<8 x float> %a, <8 x float> %b) #0 {
; FMA-LABEL: test_x86_fmsub_baa_ps_y:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovaps (%rcx), %ymm0
; FMA-NEXT: vfmsub132ps (%rdx), %ymm0, %ymm0
; FMA-NEXT: retq
@@ -458,7 +458,7 @@ define <8 x float> @test_x86_fmsub_baa_ps_y(<8 x float> %a, <8 x float> %b) #0 {
define <8 x float> @test_x86_fmsub_aba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
; FMA-LABEL: test_x86_fmsub_aba_ps_y:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovaps (%rcx), %ymm0
; FMA-NEXT: vfmsub231ps (%rdx), %ymm0, %ymm0
; FMA-NEXT: retq
@@ -468,7 +468,7 @@ define <8 x float> @test_x86_fmsub_aba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
define <8 x float> @test_x86_fmsub_bba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
; FMA-LABEL: test_x86_fmsub_bba_ps_y:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovaps (%rdx), %ymm0
; FMA-NEXT: vfmsub213ps (%rcx), %ymm0, %ymm0
; FMA-NEXT: retq
@@ -479,7 +479,7 @@ define <8 x float> @test_x86_fmsub_bba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
declare <2 x double> @llvm.x86.fma.vfmsub.sd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
define <2 x double> @test_x86_fmsub_baa_sd(<2 x double> %a, <2 x double> %b) #0 {
; FMA-LABEL: test_x86_fmsub_baa_sd:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovapd (%rcx), %xmm1
; FMA-NEXT: vmovapd (%rdx), %xmm0
; FMA-NEXT: vfmsub213sd %xmm1, %xmm1, %xmm0
@@ -490,7 +490,7 @@ define <2 x double> @test_x86_fmsub_baa_sd(<2 x double> %a, <2 x double> %b) #0
define <2 x double> @test_x86_fmsub_aba_sd(<2 x double> %a, <2 x double> %b) #0 {
; FMA-LABEL: test_x86_fmsub_aba_sd:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovapd (%rcx), %xmm0
; FMA-NEXT: vfmsub132sd (%rdx), %xmm0, %xmm0
; FMA-NEXT: retq
@@ -500,7 +500,7 @@ define <2 x double> @test_x86_fmsub_aba_sd(<2 x double> %a, <2 x double> %b) #0
define <2 x double> @test_x86_fmsub_bba_sd(<2 x double> %a, <2 x double> %b) #0 {
; FMA-LABEL: test_x86_fmsub_bba_sd:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovapd (%rdx), %xmm0
; FMA-NEXT: vfmsub213sd (%rcx), %xmm0, %xmm0
; FMA-NEXT: retq
@@ -511,7 +511,7 @@ define <2 x double> @test_x86_fmsub_bba_sd(<2 x double> %a, <2 x double> %b) #0
declare <2 x double> @llvm.x86.fma.vfmsub.pd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
define <2 x double> @test_x86_fmsub_baa_pd(<2 x double> %a, <2 x double> %b) #0 {
; FMA-LABEL: test_x86_fmsub_baa_pd:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovapd (%rcx), %xmm0
; FMA-NEXT: vfmsub132pd (%rdx), %xmm0, %xmm0
; FMA-NEXT: retq
@@ -521,7 +521,7 @@ define <2 x double> @test_x86_fmsub_baa_pd(<2 x double> %a, <2 x double> %b) #0
define <2 x double> @test_x86_fmsub_aba_pd(<2 x double> %a, <2 x double> %b) #0 {
; FMA-LABEL: test_x86_fmsub_aba_pd:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovapd (%rcx), %xmm0
; FMA-NEXT: vfmsub231pd (%rdx), %xmm0, %xmm0
; FMA-NEXT: retq
@@ -531,7 +531,7 @@ define <2 x double> @test_x86_fmsub_aba_pd(<2 x double> %a, <2 x double> %b) #0
define <2 x double> @test_x86_fmsub_bba_pd(<2 x double> %a, <2 x double> %b) #0 {
; FMA-LABEL: test_x86_fmsub_bba_pd:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovapd (%rdx), %xmm0
; FMA-NEXT: vfmsub213pd (%rcx), %xmm0, %xmm0
; FMA-NEXT: retq
@@ -542,7 +542,7 @@ define <2 x double> @test_x86_fmsub_bba_pd(<2 x double> %a, <2 x double> %b) #0
declare <4 x double> @llvm.x86.fma.vfmsub.pd.256(<4 x double>, <4 x double>, <4 x double>) nounwind readnone
define <4 x double> @test_x86_fmsub_baa_pd_y(<4 x double> %a, <4 x double> %b) #0 {
; FMA-LABEL: test_x86_fmsub_baa_pd_y:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovapd (%rcx), %ymm0
; FMA-NEXT: vfmsub132pd (%rdx), %ymm0, %ymm0
; FMA-NEXT: retq
@@ -552,7 +552,7 @@ define <4 x double> @test_x86_fmsub_baa_pd_y(<4 x double> %a, <4 x double> %b) #
define <4 x double> @test_x86_fmsub_aba_pd_y(<4 x double> %a, <4 x double> %b) #0 {
; FMA-LABEL: test_x86_fmsub_aba_pd_y:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovapd (%rcx), %ymm0
; FMA-NEXT: vfmsub231pd (%rdx), %ymm0, %ymm0
; FMA-NEXT: retq
@@ -562,7 +562,7 @@ define <4 x double> @test_x86_fmsub_aba_pd_y(<4 x double> %a, <4 x double> %b) #
define <4 x double> @test_x86_fmsub_bba_pd_y(<4 x double> %a, <4 x double> %b) #0 {
; FMA-LABEL: test_x86_fmsub_bba_pd_y:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovapd (%rdx), %ymm0
; FMA-NEXT: vfmsub213pd (%rcx), %ymm0, %ymm0
; FMA-NEXT: retq
@@ -574,7 +574,7 @@ define <4 x double> @test_x86_fmsub_bba_pd_y(<4 x double> %a, <4 x double> %b) #
declare <4 x float> @llvm.x86.fma.vfnmsub.ss(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
define <4 x float> @test_x86_fnmsub_baa_ss(<4 x float> %a, <4 x float> %b) #0 {
; FMA-LABEL: test_x86_fnmsub_baa_ss:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovaps (%rcx), %xmm1
; FMA-NEXT: vmovaps (%rdx), %xmm0
; FMA-NEXT: vfnmsub213ss %xmm1, %xmm1, %xmm0
@@ -585,7 +585,7 @@ define <4 x float> @test_x86_fnmsub_baa_ss(<4 x float> %a, <4 x float> %b) #0 {
define <4 x float> @test_x86_fnmsub_aba_ss(<4 x float> %a, <4 x float> %b) #0 {
; FMA-LABEL: test_x86_fnmsub_aba_ss:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovaps (%rcx), %xmm0
; FMA-NEXT: vfnmsub132ss (%rdx), %xmm0, %xmm0
; FMA-NEXT: retq
@@ -595,7 +595,7 @@ define <4 x float> @test_x86_fnmsub_aba_ss(<4 x float> %a, <4 x float> %b) #0 {
define <4 x float> @test_x86_fnmsub_bba_ss(<4 x float> %a, <4 x float> %b) #0 {
; FMA-LABEL: test_x86_fnmsub_bba_ss:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovaps (%rdx), %xmm0
; FMA-NEXT: vfnmsub213ss (%rcx), %xmm0, %xmm0
; FMA-NEXT: retq
@@ -606,7 +606,7 @@ define <4 x float> @test_x86_fnmsub_bba_ss(<4 x float> %a, <4 x float> %b) #0 {
declare <4 x float> @llvm.x86.fma.vfnmsub.ps(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
define <4 x float> @test_x86_fnmsub_baa_ps(<4 x float> %a, <4 x float> %b) #0 {
; FMA-LABEL: test_x86_fnmsub_baa_ps:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovaps (%rcx), %xmm0
; FMA-NEXT: vfnmsub132ps (%rdx), %xmm0, %xmm0
; FMA-NEXT: retq
@@ -616,7 +616,7 @@ define <4 x float> @test_x86_fnmsub_baa_ps(<4 x float> %a, <4 x float> %b) #0 {
define <4 x float> @test_x86_fnmsub_aba_ps(<4 x float> %a, <4 x float> %b) #0 {
; FMA-LABEL: test_x86_fnmsub_aba_ps:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovaps (%rcx), %xmm0
; FMA-NEXT: vfnmsub231ps (%rdx), %xmm0, %xmm0
; FMA-NEXT: retq
@@ -626,7 +626,7 @@ define <4 x float> @test_x86_fnmsub_aba_ps(<4 x float> %a, <4 x float> %b) #0 {
define <4 x float> @test_x86_fnmsub_bba_ps(<4 x float> %a, <4 x float> %b) #0 {
; FMA-LABEL: test_x86_fnmsub_bba_ps:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovaps (%rdx), %xmm0
; FMA-NEXT: vfnmsub213ps (%rcx), %xmm0, %xmm0
; FMA-NEXT: retq
@@ -637,7 +637,7 @@ define <4 x float> @test_x86_fnmsub_bba_ps(<4 x float> %a, <4 x float> %b) #0 {
declare <8 x float> @llvm.x86.fma.vfnmsub.ps.256(<8 x float>, <8 x float>, <8 x float>) nounwind readnone
define <8 x float> @test_x86_fnmsub_baa_ps_y(<8 x float> %a, <8 x float> %b) #0 {
; FMA-LABEL: test_x86_fnmsub_baa_ps_y:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovaps (%rcx), %ymm0
; FMA-NEXT: vfnmsub132ps (%rdx), %ymm0, %ymm0
; FMA-NEXT: retq
@@ -647,7 +647,7 @@ define <8 x float> @test_x86_fnmsub_baa_ps_y(<8 x float> %a, <8 x float> %b) #0
define <8 x float> @test_x86_fnmsub_aba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
; FMA-LABEL: test_x86_fnmsub_aba_ps_y:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovaps (%rcx), %ymm0
; FMA-NEXT: vfnmsub231ps (%rdx), %ymm0, %ymm0
; FMA-NEXT: retq
@@ -657,7 +657,7 @@ define <8 x float> @test_x86_fnmsub_aba_ps_y(<8 x float> %a, <8 x float> %b) #0
define <8 x float> @test_x86_fnmsub_bba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
; FMA-LABEL: test_x86_fnmsub_bba_ps_y:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovaps (%rdx), %ymm0
; FMA-NEXT: vfnmsub213ps (%rcx), %ymm0, %ymm0
; FMA-NEXT: retq
@@ -668,7 +668,7 @@ define <8 x float> @test_x86_fnmsub_bba_ps_y(<8 x float> %a, <8 x float> %b) #0
declare <2 x double> @llvm.x86.fma.vfnmsub.sd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
define <2 x double> @test_x86_fnmsub_baa_sd(<2 x double> %a, <2 x double> %b) #0 {
; FMA-LABEL: test_x86_fnmsub_baa_sd:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovapd (%rcx), %xmm1
; FMA-NEXT: vmovapd (%rdx), %xmm0
; FMA-NEXT: vfnmsub213sd %xmm1, %xmm1, %xmm0
@@ -679,7 +679,7 @@ define <2 x double> @test_x86_fnmsub_baa_sd(<2 x double> %a, <2 x double> %b) #0
define <2 x double> @test_x86_fnmsub_aba_sd(<2 x double> %a, <2 x double> %b) #0 {
; FMA-LABEL: test_x86_fnmsub_aba_sd:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovapd (%rcx), %xmm0
; FMA-NEXT: vfnmsub132sd (%rdx), %xmm0, %xmm0
; FMA-NEXT: retq
@@ -689,7 +689,7 @@ define <2 x double> @test_x86_fnmsub_aba_sd(<2 x double> %a, <2 x double> %b) #0
define <2 x double> @test_x86_fnmsub_bba_sd(<2 x double> %a, <2 x double> %b) #0 {
; FMA-LABEL: test_x86_fnmsub_bba_sd:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovapd (%rdx), %xmm0
; FMA-NEXT: vfnmsub213sd (%rcx), %xmm0, %xmm0
; FMA-NEXT: retq
@@ -700,7 +700,7 @@ define <2 x double> @test_x86_fnmsub_bba_sd(<2 x double> %a, <2 x double> %b) #0
declare <2 x double> @llvm.x86.fma.vfnmsub.pd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
define <2 x double> @test_x86_fnmsub_baa_pd(<2 x double> %a, <2 x double> %b) #0 {
; FMA-LABEL: test_x86_fnmsub_baa_pd:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovapd (%rcx), %xmm0
; FMA-NEXT: vfnmsub132pd (%rdx), %xmm0, %xmm0
; FMA-NEXT: retq
@@ -710,7 +710,7 @@ define <2 x double> @test_x86_fnmsub_baa_pd(<2 x double> %a, <2 x double> %b) #0
define <2 x double> @test_x86_fnmsub_aba_pd(<2 x double> %a, <2 x double> %b) #0 {
; FMA-LABEL: test_x86_fnmsub_aba_pd:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovapd (%rcx), %xmm0
; FMA-NEXT: vfnmsub231pd (%rdx), %xmm0, %xmm0
; FMA-NEXT: retq
@@ -720,7 +720,7 @@ define <2 x double> @test_x86_fnmsub_aba_pd(<2 x double> %a, <2 x double> %b) #0
define <2 x double> @test_x86_fnmsub_bba_pd(<2 x double> %a, <2 x double> %b) #0 {
; FMA-LABEL: test_x86_fnmsub_bba_pd:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovapd (%rdx), %xmm0
; FMA-NEXT: vfnmsub213pd (%rcx), %xmm0, %xmm0
; FMA-NEXT: retq
@@ -731,7 +731,7 @@ define <2 x double> @test_x86_fnmsub_bba_pd(<2 x double> %a, <2 x double> %b) #0
declare <4 x double> @llvm.x86.fma.vfnmsub.pd.256(<4 x double>, <4 x double>, <4 x double>) nounwind readnone
define <4 x double> @test_x86_fnmsub_baa_pd_y(<4 x double> %a, <4 x double> %b) #0 {
; FMA-LABEL: test_x86_fnmsub_baa_pd_y:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovapd (%rcx), %ymm0
; FMA-NEXT: vfnmsub132pd (%rdx), %ymm0, %ymm0
; FMA-NEXT: retq
@@ -741,7 +741,7 @@ define <4 x double> @test_x86_fnmsub_baa_pd_y(<4 x double> %a, <4 x double> %b)
define <4 x double> @test_x86_fnmsub_aba_pd_y(<4 x double> %a, <4 x double> %b) #0 {
; FMA-LABEL: test_x86_fnmsub_aba_pd_y:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovapd (%rcx), %ymm0
; FMA-NEXT: vfnmsub231pd (%rdx), %ymm0, %ymm0
; FMA-NEXT: retq
@@ -751,7 +751,7 @@ define <4 x double> @test_x86_fnmsub_aba_pd_y(<4 x double> %a, <4 x double> %b)
define <4 x double> @test_x86_fnmsub_bba_pd_y(<4 x double> %a, <4 x double> %b) #0 {
; FMA-LABEL: test_x86_fnmsub_bba_pd_y:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmovapd (%rdx), %ymm0
; FMA-NEXT: vfnmsub213pd (%rcx), %ymm0, %ymm0
; FMA-NEXT: retq
diff --git a/test/CodeGen/X86/fma-fneg-combine.ll b/test/CodeGen/X86/fma-fneg-combine.ll
index 8247cb27978..8dacf2dcf97 100644
--- a/test/CodeGen/X86/fma-fneg-combine.ll
+++ b/test/CodeGen/X86/fma-fneg-combine.ll
@@ -7,7 +7,7 @@
define <16 x float> @test1(<16 x float> %a, <16 x float> %b, <16 x float> %c) {
; CHECK-LABEL: test1:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vfmsub213ps %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
entry:
@@ -23,7 +23,7 @@ declare <16 x float> @llvm.x86.avx512.mask.vfnmsub.ps.512(<16 x float>, <16 x fl
define <16 x float> @test2(<16 x float> %a, <16 x float> %b, <16 x float> %c) {
; CHECK-LABEL: test2:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vfnmsub213ps %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
entry:
@@ -34,7 +34,7 @@ entry:
define <16 x float> @test3(<16 x float> %a, <16 x float> %b, <16 x float> %c) {
; CHECK-LABEL: test3:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vfmsub213ps %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
entry:
@@ -45,7 +45,7 @@ entry:
define <16 x float> @test4(<16 x float> %a, <16 x float> %b, <16 x float> %c) {
; CHECK-LABEL: test4:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vfmadd213ps %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
entry:
@@ -56,7 +56,7 @@ entry:
define <16 x float> @test5(<16 x float> %a, <16 x float> %b, <16 x float> %c) {
; CHECK-LABEL: test5:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vfmsub213ps {ru-sae}, %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
entry:
@@ -67,7 +67,7 @@ entry:
define <16 x float> @test6(<16 x float> %a, <16 x float> %b, <16 x float> %c) {
; CHECK-LABEL: test6:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vfmadd213ps {ru-sae}, %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
entry:
@@ -79,7 +79,7 @@ entry:
define <8 x float> @test7(<8 x float> %a, <8 x float> %b, <8 x float> %c) {
; CHECK-LABEL: test7:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0
; CHECK-NEXT: retq
entry:
@@ -90,13 +90,13 @@ entry:
define <8 x float> @test8(<8 x float> %a, <8 x float> %b, <8 x float> %c) {
; SKX-LABEL: test8:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vxorps {{.*}}(%rip){1to8}, %ymm2, %ymm2
; SKX-NEXT: vfmsub213ps %ymm2, %ymm1, %ymm0
; SKX-NEXT: retq
;
; KNL-LABEL: test8:
-; KNL: # BB#0: # %entry
+; KNL: # %bb.0: # %entry
; KNL-NEXT: vbroadcastss {{.*#+}} ymm3 = [-0,-0,-0,-0,-0,-0,-0,-0]
; KNL-NEXT: vxorps %ymm3, %ymm2, %ymm2
; KNL-NEXT: vfmsub213ps %ymm2, %ymm1, %ymm0
@@ -112,7 +112,7 @@ declare <8 x float> @llvm.x86.fma.vfmsub.ps.256(<8 x float>, <8 x float>, <8 x f
define <8 x double> @test9(<8 x double> %a, <8 x double> %b, <8 x double> %c) {
; CHECK-LABEL: test9:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vfnmsub213pd %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
entry:
@@ -125,7 +125,7 @@ declare <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a, <8 x d
define <2 x double> @test10(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
; CHECK-LABEL: test10:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0
; CHECK-NEXT: vxorpd {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: retq
@@ -139,14 +139,14 @@ declare <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> %a, <2 x doubl
define <4 x float> @test11(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 zeroext %mask) local_unnamed_addr #0 {
; SKX-LABEL: test11:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vxorps {{.*}}(%rip){1to4}, %xmm2, %xmm0
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vfmadd231ss %xmm1, %xmm1, %xmm0 {%k1}
; SKX-NEXT: retq
;
; KNL-LABEL: test11:
-; KNL: # BB#0: # %entry
+; KNL: # %bb.0: # %entry
; KNL-NEXT: vbroadcastss {{.*#+}} xmm0 = [-0,-0,-0,-0]
; KNL-NEXT: vxorps %xmm0, %xmm2, %xmm0
; KNL-NEXT: kmovw %edi, %k1
@@ -162,14 +162,14 @@ declare <4 x float> @llvm.x86.avx512.mask3.vfmadd.ss(<4 x float>, <4 x float>, <
define <4 x float> @test11b(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 zeroext %mask) local_unnamed_addr #0 {
; SKX-LABEL: test11b:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vfmsub213ss %xmm2, %xmm1, %xmm1 {%k1}
; SKX-NEXT: vmovaps %xmm1, %xmm0
; SKX-NEXT: retq
;
; KNL-LABEL: test11b:
-; KNL: # BB#0: # %entry
+; KNL: # %bb.0: # %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vfmsub213ss %xmm2, %xmm1, %xmm1 {%k1}
; KNL-NEXT: vmovaps %xmm1, %xmm0
@@ -184,14 +184,14 @@ declare <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float>, <4 x float>, <4
define <8 x double> @test12(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask) {
; SKX-LABEL: test12:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vfmadd132pd %zmm1, %zmm2, %zmm0 {%k1}
; SKX-NEXT: vxorpd {{.*}}(%rip){1to8}, %zmm0, %zmm0
; SKX-NEXT: retq
;
; KNL-LABEL: test12:
-; KNL: # BB#0: # %entry
+; KNL: # %bb.0: # %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vfmadd132pd %zmm1, %zmm2, %zmm0 {%k1}
; KNL-NEXT: vpxorq {{.*}}(%rip){1to8}, %zmm0, %zmm0
@@ -204,14 +204,14 @@ entry:
define <2 x double> @test13(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) {
; SKX-LABEL: test13:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vxorpd {{.*}}(%rip), %xmm0, %xmm0
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 {%k1}
; SKX-NEXT: retq
;
; KNL-LABEL: test13:
-; KNL: # BB#0: # %entry
+; KNL: # %bb.0: # %entry
; KNL-NEXT: vxorpd {{.*}}(%rip), %xmm0, %xmm0
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 {%k1}
@@ -225,14 +225,14 @@ entry:
define <16 x float> @test14(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask) {
; SKX-LABEL: test14:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vfnmsub132ps {ru-sae}, %zmm1, %zmm2, %zmm0 {%k1}
; SKX-NEXT: vxorps {{.*}}(%rip){1to16}, %zmm0, %zmm0
; SKX-NEXT: retq
;
; KNL-LABEL: test14:
-; KNL: # BB#0: # %entry
+; KNL: # %bb.0: # %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vfnmsub132ps {ru-sae}, %zmm1, %zmm2, %zmm0 {%k1}
; KNL-NEXT: vpxord {{.*}}(%rip){1to16}, %zmm0, %zmm0
@@ -245,7 +245,7 @@ entry:
define <16 x float> @test15(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask) {
; SKX-LABEL: test15:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vxorps {{.*}}(%rip){1to16}, %zmm0, %zmm3
; SKX-NEXT: vfnmadd213ps {ru-sae}, %zmm2, %zmm0, %zmm1
@@ -255,7 +255,7 @@ define <16 x float> @test15(<16 x float> %a, <16 x float> %b, <16 x float> %c, i
; SKX-NEXT: retq
;
; KNL-LABEL: test15:
-; KNL: # BB#0: # %entry
+; KNL: # %bb.0: # %entry
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpxord {{.*}}(%rip){1to16}, %zmm0, %zmm3
; KNL-NEXT: vfnmadd213ps {ru-sae}, %zmm2, %zmm0, %zmm1
@@ -272,13 +272,13 @@ entry:
define <16 x float> @test16(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask) {
; SKX-LABEL: test16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vfmsubadd132ps {rd-sae}, %zmm1, %zmm2, %zmm0 {%k1}
; SKX-NEXT: retq
;
; KNL-LABEL: test16:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vfmsubadd132ps {rd-sae}, %zmm1, %zmm2, %zmm0 {%k1}
; KNL-NEXT: retq
@@ -290,13 +290,13 @@ declare <16 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.512(<16 x float>, <16 x
define <8 x double> @test17(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask) {
; SKX-LABEL: test17:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vfmsubadd132pd %zmm1, %zmm2, %zmm0 {%k1}
; SKX-NEXT: retq
;
; KNL-LABEL: test17:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vfmsubadd132pd %zmm1, %zmm2, %zmm0 {%k1}
; KNL-NEXT: retq
diff --git a/test/CodeGen/X86/fma-intrinsics-x86.ll b/test/CodeGen/X86/fma-intrinsics-x86.ll
index 6b28d0c19cf..db1e382ed6b 100644
--- a/test/CodeGen/X86/fma-intrinsics-x86.ll
+++ b/test/CodeGen/X86/fma-intrinsics-x86.ll
@@ -6,17 +6,17 @@
; VFMADD
define <4 x float> @test_x86_fma_vfmadd_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
; CHECK-FMA-LABEL: test_x86_fma_vfmadd_ss:
-; CHECK-FMA: # BB#0:
+; CHECK-FMA: # %bb.0:
; CHECK-FMA-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xa9,0xc2]
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmadd_ss:
-; CHECK-AVX512VL: # BB#0:
+; CHECK-AVX512VL: # %bb.0:
; CHECK-AVX512VL-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa9,0xc2]
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmadd_ss:
-; CHECK-FMA-WIN: # BB#0:
+; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x0a]
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x01]
; CHECK-FMA-WIN-NEXT: vfmadd213ss (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xa9,0x00]
@@ -27,19 +27,19 @@ define <4 x float> @test_x86_fma_vfmadd_ss(<4 x float> %a0, <4 x float> %a1, <4
define <4 x float> @test_x86_fma_vfmadd_bac_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
; CHECK-FMA-LABEL: test_x86_fma_vfmadd_bac_ss:
-; CHECK-FMA: # BB#0:
+; CHECK-FMA: # %bb.0:
; CHECK-FMA-NEXT: vfmadd213ss %xmm2, %xmm0, %xmm1 # encoding: [0xc4,0xe2,0x79,0xa9,0xca]
; CHECK-FMA-NEXT: vmovaps %xmm1, %xmm0 # encoding: [0xc5,0xf8,0x28,0xc1]
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmadd_bac_ss:
-; CHECK-AVX512VL: # BB#0:
+; CHECK-AVX512VL: # %bb.0:
; CHECK-AVX512VL-NEXT: vfmadd213ss %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa9,0xca]
; CHECK-AVX512VL-NEXT: vmovaps %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1]
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmadd_bac_ss:
-; CHECK-FMA-WIN: # BB#0:
+; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfmadd213ss (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xa9,0x00]
@@ -51,17 +51,17 @@ declare <4 x float> @llvm.x86.fma.vfmadd.ss(<4 x float>, <4 x float>, <4 x float
define <2 x double> @test_x86_fma_vfmadd_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
; CHECK-FMA-LABEL: test_x86_fma_vfmadd_sd:
-; CHECK-FMA: # BB#0:
+; CHECK-FMA: # %bb.0:
; CHECK-FMA-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xa9,0xc2]
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmadd_sd:
-; CHECK-AVX512VL: # BB#0:
+; CHECK-AVX512VL: # %bb.0:
; CHECK-AVX512VL-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa9,0xc2]
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmadd_sd:
-; CHECK-FMA-WIN: # BB#0:
+; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x0a]
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x01]
; CHECK-FMA-WIN-NEXT: vfmadd213sd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xa9,0x00]
@@ -72,19 +72,19 @@ define <2 x double> @test_x86_fma_vfmadd_sd(<2 x double> %a0, <2 x double> %a1,
define <2 x double> @test_x86_fma_vfmadd_bac_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
; CHECK-FMA-LABEL: test_x86_fma_vfmadd_bac_sd:
-; CHECK-FMA: # BB#0:
+; CHECK-FMA: # %bb.0:
; CHECK-FMA-NEXT: vfmadd213sd %xmm2, %xmm0, %xmm1 # encoding: [0xc4,0xe2,0xf9,0xa9,0xca]
; CHECK-FMA-NEXT: vmovapd %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x28,0xc1]
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmadd_bac_sd:
-; CHECK-AVX512VL: # BB#0:
+; CHECK-AVX512VL: # %bb.0:
; CHECK-AVX512VL-NEXT: vfmadd213sd %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa9,0xca]
; CHECK-AVX512VL-NEXT: vmovapd %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc1]
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmadd_bac_sd:
-; CHECK-FMA-WIN: # BB#0:
+; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfmadd213sd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xa9,0x00]
@@ -96,17 +96,17 @@ declare <2 x double> @llvm.x86.fma.vfmadd.sd(<2 x double>, <2 x double>, <2 x do
define <4 x float> @test_x86_fma_vfmadd_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
; CHECK-FMA-LABEL: test_x86_fma_vfmadd_ps:
-; CHECK-FMA: # BB#0:
+; CHECK-FMA: # %bb.0:
; CHECK-FMA-NEXT: vfmadd213ps %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xa8,0xc2]
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmadd_ps:
-; CHECK-AVX512VL: # BB#0:
+; CHECK-AVX512VL: # %bb.0:
; CHECK-AVX512VL-NEXT: vfmadd213ps %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa8,0xc2]
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmadd_ps:
-; CHECK-FMA-WIN: # BB#0:
+; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfmadd213ps (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xa8,0x00]
@@ -118,17 +118,17 @@ declare <4 x float> @llvm.x86.fma.vfmadd.ps(<4 x float>, <4 x float>, <4 x float
define <2 x double> @test_x86_fma_vfmadd_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
; CHECK-FMA-LABEL: test_x86_fma_vfmadd_pd:
-; CHECK-FMA: # BB#0:
+; CHECK-FMA: # %bb.0:
; CHECK-FMA-NEXT: vfmadd213pd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xa8,0xc2]
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmadd_pd:
-; CHECK-AVX512VL: # BB#0:
+; CHECK-AVX512VL: # %bb.0:
; CHECK-AVX512VL-NEXT: vfmadd213pd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa8,0xc2]
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmadd_pd:
-; CHECK-FMA-WIN: # BB#0:
+; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfmadd213pd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xa8,0x00]
@@ -140,17 +140,17 @@ declare <2 x double> @llvm.x86.fma.vfmadd.pd(<2 x double>, <2 x double>, <2 x do
define <8 x float> @test_x86_fma_vfmadd_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
; CHECK-FMA-LABEL: test_x86_fma_vfmadd_ps_256:
-; CHECK-FMA: # BB#0:
+; CHECK-FMA: # %bb.0:
; CHECK-FMA-NEXT: vfmadd213ps %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x75,0xa8,0xc2]
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmadd_ps_256:
-; CHECK-AVX512VL: # BB#0:
+; CHECK-AVX512VL: # %bb.0:
; CHECK-AVX512VL-NEXT: vfmadd213ps %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xa8,0xc2]
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmadd_ps_256:
-; CHECK-FMA-WIN: # BB#0:
+; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %ymm1 # encoding: [0xc5,0xfc,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %ymm0 # encoding: [0xc5,0xfc,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfmadd213ps (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0x75,0xa8,0x00]
@@ -162,17 +162,17 @@ declare <8 x float> @llvm.x86.fma.vfmadd.ps.256(<8 x float>, <8 x float>, <8 x f
define <4 x double> @test_x86_fma_vfmadd_pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
; CHECK-FMA-LABEL: test_x86_fma_vfmadd_pd_256:
-; CHECK-FMA: # BB#0:
+; CHECK-FMA: # %bb.0:
; CHECK-FMA-NEXT: vfmadd213pd %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0xf5,0xa8,0xc2]
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmadd_pd_256:
-; CHECK-AVX512VL: # BB#0:
+; CHECK-AVX512VL: # %bb.0:
; CHECK-AVX512VL-NEXT: vfmadd213pd %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xa8,0xc2]
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmadd_pd_256:
-; CHECK-FMA-WIN: # BB#0:
+; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %ymm1 # encoding: [0xc5,0xfd,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %ymm0 # encoding: [0xc5,0xfd,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfmadd213pd (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0xf5,0xa8,0x00]
@@ -185,17 +185,17 @@ declare <4 x double> @llvm.x86.fma.vfmadd.pd.256(<4 x double>, <4 x double>, <4
; VFMSUB
define <4 x float> @test_x86_fma_vfmsub_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
; CHECK-FMA-LABEL: test_x86_fma_vfmsub_ss:
-; CHECK-FMA: # BB#0:
+; CHECK-FMA: # %bb.0:
; CHECK-FMA-NEXT: vfmsub213ss %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xab,0xc2]
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsub_ss:
-; CHECK-AVX512VL: # BB#0:
+; CHECK-AVX512VL: # %bb.0:
; CHECK-AVX512VL-NEXT: vfmsub213ss %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xab,0xc2]
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsub_ss:
-; CHECK-FMA-WIN: # BB#0:
+; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x0a]
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x01]
; CHECK-FMA-WIN-NEXT: vfmsub213ss (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xab,0x00]
@@ -206,19 +206,19 @@ define <4 x float> @test_x86_fma_vfmsub_ss(<4 x float> %a0, <4 x float> %a1, <4
define <4 x float> @test_x86_fma_vfmsub_bac_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
; CHECK-FMA-LABEL: test_x86_fma_vfmsub_bac_ss:
-; CHECK-FMA: # BB#0:
+; CHECK-FMA: # %bb.0:
; CHECK-FMA-NEXT: vfmsub213ss %xmm2, %xmm0, %xmm1 # encoding: [0xc4,0xe2,0x79,0xab,0xca]
; CHECK-FMA-NEXT: vmovaps %xmm1, %xmm0 # encoding: [0xc5,0xf8,0x28,0xc1]
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsub_bac_ss:
-; CHECK-AVX512VL: # BB#0:
+; CHECK-AVX512VL: # %bb.0:
; CHECK-AVX512VL-NEXT: vfmsub213ss %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xab,0xca]
; CHECK-AVX512VL-NEXT: vmovaps %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1]
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsub_bac_ss:
-; CHECK-FMA-WIN: # BB#0:
+; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfmsub213ss (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xab,0x00]
@@ -230,17 +230,17 @@ declare <4 x float> @llvm.x86.fma.vfmsub.ss(<4 x float>, <4 x float>, <4 x float
define <2 x double> @test_x86_fma_vfmsub_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
; CHECK-FMA-LABEL: test_x86_fma_vfmsub_sd:
-; CHECK-FMA: # BB#0:
+; CHECK-FMA: # %bb.0:
; CHECK-FMA-NEXT: vfmsub213sd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xab,0xc2]
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsub_sd:
-; CHECK-AVX512VL: # BB#0:
+; CHECK-AVX512VL: # %bb.0:
; CHECK-AVX512VL-NEXT: vfmsub213sd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xab,0xc2]
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsub_sd:
-; CHECK-FMA-WIN: # BB#0:
+; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x0a]
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x01]
; CHECK-FMA-WIN-NEXT: vfmsub213sd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xab,0x00]
@@ -251,19 +251,19 @@ define <2 x double> @test_x86_fma_vfmsub_sd(<2 x double> %a0, <2 x double> %a1,
define <2 x double> @test_x86_fma_vfmsub_bac_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
; CHECK-FMA-LABEL: test_x86_fma_vfmsub_bac_sd:
-; CHECK-FMA: # BB#0:
+; CHECK-FMA: # %bb.0:
; CHECK-FMA-NEXT: vfmsub213sd %xmm2, %xmm0, %xmm1 # encoding: [0xc4,0xe2,0xf9,0xab,0xca]
; CHECK-FMA-NEXT: vmovapd %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x28,0xc1]
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsub_bac_sd:
-; CHECK-AVX512VL: # BB#0:
+; CHECK-AVX512VL: # %bb.0:
; CHECK-AVX512VL-NEXT: vfmsub213sd %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xab,0xca]
; CHECK-AVX512VL-NEXT: vmovapd %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc1]
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsub_bac_sd:
-; CHECK-FMA-WIN: # BB#0:
+; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfmsub213sd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xab,0x00]
@@ -275,17 +275,17 @@ declare <2 x double> @llvm.x86.fma.vfmsub.sd(<2 x double>, <2 x double>, <2 x do
define <4 x float> @test_x86_fma_vfmsub_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
; CHECK-FMA-LABEL: test_x86_fma_vfmsub_ps:
-; CHECK-FMA: # BB#0:
+; CHECK-FMA: # %bb.0:
; CHECK-FMA-NEXT: vfmsub213ps %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xaa,0xc2]
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsub_ps:
-; CHECK-AVX512VL: # BB#0:
+; CHECK-AVX512VL: # %bb.0:
; CHECK-AVX512VL-NEXT: vfmsub213ps %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xaa,0xc2]
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsub_ps:
-; CHECK-FMA-WIN: # BB#0:
+; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfmsub213ps (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xaa,0x00]
@@ -297,17 +297,17 @@ declare <4 x float> @llvm.x86.fma.vfmsub.ps(<4 x float>, <4 x float>, <4 x float
define <2 x double> @test_x86_fma_vfmsub_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
; CHECK-FMA-LABEL: test_x86_fma_vfmsub_pd:
-; CHECK-FMA: # BB#0:
+; CHECK-FMA: # %bb.0:
; CHECK-FMA-NEXT: vfmsub213pd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xaa,0xc2]
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsub_pd:
-; CHECK-AVX512VL: # BB#0:
+; CHECK-AVX512VL: # %bb.0:
; CHECK-AVX512VL-NEXT: vfmsub213pd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xaa,0xc2]
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsub_pd:
-; CHECK-FMA-WIN: # BB#0:
+; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfmsub213pd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xaa,0x00]
@@ -319,17 +319,17 @@ declare <2 x double> @llvm.x86.fma.vfmsub.pd(<2 x double>, <2 x double>, <2 x do
define <8 x float> @test_x86_fma_vfmsub_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
; CHECK-FMA-LABEL: test_x86_fma_vfmsub_ps_256:
-; CHECK-FMA: # BB#0:
+; CHECK-FMA: # %bb.0:
; CHECK-FMA-NEXT: vfmsub213ps %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x75,0xaa,0xc2]
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsub_ps_256:
-; CHECK-AVX512VL: # BB#0:
+; CHECK-AVX512VL: # %bb.0:
; CHECK-AVX512VL-NEXT: vfmsub213ps %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xaa,0xc2]
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsub_ps_256:
-; CHECK-FMA-WIN: # BB#0:
+; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %ymm1 # encoding: [0xc5,0xfc,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %ymm0 # encoding: [0xc5,0xfc,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfmsub213ps (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0x75,0xaa,0x00]
@@ -341,17 +341,17 @@ declare <8 x float> @llvm.x86.fma.vfmsub.ps.256(<8 x float>, <8 x float>, <8 x f
define <4 x double> @test_x86_fma_vfmsub_pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
; CHECK-FMA-LABEL: test_x86_fma_vfmsub_pd_256:
-; CHECK-FMA: # BB#0:
+; CHECK-FMA: # %bb.0:
; CHECK-FMA-NEXT: vfmsub213pd %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0xf5,0xaa,0xc2]
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsub_pd_256:
-; CHECK-AVX512VL: # BB#0:
+; CHECK-AVX512VL: # %bb.0:
; CHECK-AVX512VL-NEXT: vfmsub213pd %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xaa,0xc2]
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsub_pd_256:
-; CHECK-FMA-WIN: # BB#0:
+; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %ymm1 # encoding: [0xc5,0xfd,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %ymm0 # encoding: [0xc5,0xfd,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfmsub213pd (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0xf5,0xaa,0x00]
@@ -364,17 +364,17 @@ declare <4 x double> @llvm.x86.fma.vfmsub.pd.256(<4 x double>, <4 x double>, <4
; VFNMADD
define <4 x float> @test_x86_fma_vfnmadd_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
; CHECK-FMA-LABEL: test_x86_fma_vfnmadd_ss:
-; CHECK-FMA: # BB#0:
+; CHECK-FMA: # %bb.0:
; CHECK-FMA-NEXT: vfnmadd213ss %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xad,0xc2]
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmadd_ss:
-; CHECK-AVX512VL: # BB#0:
+; CHECK-AVX512VL: # %bb.0:
; CHECK-AVX512VL-NEXT: vfnmadd213ss %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xad,0xc2]
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmadd_ss:
-; CHECK-FMA-WIN: # BB#0:
+; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x0a]
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x01]
; CHECK-FMA-WIN-NEXT: vfnmadd213ss (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xad,0x00]
@@ -385,19 +385,19 @@ define <4 x float> @test_x86_fma_vfnmadd_ss(<4 x float> %a0, <4 x float> %a1, <4
define <4 x float> @test_x86_fma_vfnmadd_bac_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
; CHECK-FMA-LABEL: test_x86_fma_vfnmadd_bac_ss:
-; CHECK-FMA: # BB#0:
+; CHECK-FMA: # %bb.0:
; CHECK-FMA-NEXT: vfnmadd213ss %xmm2, %xmm0, %xmm1 # encoding: [0xc4,0xe2,0x79,0xad,0xca]
; CHECK-FMA-NEXT: vmovaps %xmm1, %xmm0 # encoding: [0xc5,0xf8,0x28,0xc1]
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmadd_bac_ss:
-; CHECK-AVX512VL: # BB#0:
+; CHECK-AVX512VL: # %bb.0:
; CHECK-AVX512VL-NEXT: vfnmadd213ss %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xad,0xca]
; CHECK-AVX512VL-NEXT: vmovaps %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1]
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmadd_bac_ss:
-; CHECK-FMA-WIN: # BB#0:
+; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfnmadd213ss (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xad,0x00]
@@ -409,17 +409,17 @@ declare <4 x float> @llvm.x86.fma.vfnmadd.ss(<4 x float>, <4 x float>, <4 x floa
define <2 x double> @test_x86_fma_vfnmadd_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
; CHECK-FMA-LABEL: test_x86_fma_vfnmadd_sd:
-; CHECK-FMA: # BB#0:
+; CHECK-FMA: # %bb.0:
; CHECK-FMA-NEXT: vfnmadd213sd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xad,0xc2]
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmadd_sd:
-; CHECK-AVX512VL: # BB#0:
+; CHECK-AVX512VL: # %bb.0:
; CHECK-AVX512VL-NEXT: vfnmadd213sd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xad,0xc2]
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmadd_sd:
-; CHECK-FMA-WIN: # BB#0:
+; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x0a]
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x01]
; CHECK-FMA-WIN-NEXT: vfnmadd213sd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xad,0x00]
@@ -430,19 +430,19 @@ define <2 x double> @test_x86_fma_vfnmadd_sd(<2 x double> %a0, <2 x double> %a1,
define <2 x double> @test_x86_fma_vfnmadd_bac_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
; CHECK-FMA-LABEL: test_x86_fma_vfnmadd_bac_sd:
-; CHECK-FMA: # BB#0:
+; CHECK-FMA: # %bb.0:
; CHECK-FMA-NEXT: vfnmadd213sd %xmm2, %xmm0, %xmm1 # encoding: [0xc4,0xe2,0xf9,0xad,0xca]
; CHECK-FMA-NEXT: vmovapd %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x28,0xc1]
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmadd_bac_sd:
-; CHECK-AVX512VL: # BB#0:
+; CHECK-AVX512VL: # %bb.0:
; CHECK-AVX512VL-NEXT: vfnmadd213sd %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xad,0xca]
; CHECK-AVX512VL-NEXT: vmovapd %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc1]
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmadd_bac_sd:
-; CHECK-FMA-WIN: # BB#0:
+; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfnmadd213sd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xad,0x00]
@@ -454,17 +454,17 @@ declare <2 x double> @llvm.x86.fma.vfnmadd.sd(<2 x double>, <2 x double>, <2 x d
define <4 x float> @test_x86_fma_vfnmadd_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
; CHECK-FMA-LABEL: test_x86_fma_vfnmadd_ps:
-; CHECK-FMA: # BB#0:
+; CHECK-FMA: # %bb.0:
; CHECK-FMA-NEXT: vfnmadd213ps %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xac,0xc2]
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmadd_ps:
-; CHECK-AVX512VL: # BB#0:
+; CHECK-AVX512VL: # %bb.0:
; CHECK-AVX512VL-NEXT: vfnmadd213ps %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xac,0xc2]
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmadd_ps:
-; CHECK-FMA-WIN: # BB#0:
+; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfnmadd213ps (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xac,0x00]
@@ -476,17 +476,17 @@ declare <4 x float> @llvm.x86.fma.vfnmadd.ps(<4 x float>, <4 x float>, <4 x floa
define <2 x double> @test_x86_fma_vfnmadd_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
; CHECK-FMA-LABEL: test_x86_fma_vfnmadd_pd:
-; CHECK-FMA: # BB#0:
+; CHECK-FMA: # %bb.0:
; CHECK-FMA-NEXT: vfnmadd213pd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xac,0xc2]
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmadd_pd:
-; CHECK-AVX512VL: # BB#0:
+; CHECK-AVX512VL: # %bb.0:
; CHECK-AVX512VL-NEXT: vfnmadd213pd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xac,0xc2]
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmadd_pd:
-; CHECK-FMA-WIN: # BB#0:
+; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfnmadd213pd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xac,0x00]
@@ -498,17 +498,17 @@ declare <2 x double> @llvm.x86.fma.vfnmadd.pd(<2 x double>, <2 x double>, <2 x d
define <8 x float> @test_x86_fma_vfnmadd_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
; CHECK-FMA-LABEL: test_x86_fma_vfnmadd_ps_256:
-; CHECK-FMA: # BB#0:
+; CHECK-FMA: # %bb.0:
; CHECK-FMA-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x75,0xac,0xc2]
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmadd_ps_256:
-; CHECK-AVX512VL: # BB#0:
+; CHECK-AVX512VL: # %bb.0:
; CHECK-AVX512VL-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xac,0xc2]
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmadd_ps_256:
-; CHECK-FMA-WIN: # BB#0:
+; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %ymm1 # encoding: [0xc5,0xfc,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %ymm0 # encoding: [0xc5,0xfc,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfnmadd213ps (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0x75,0xac,0x00]
@@ -520,17 +520,17 @@ declare <8 x float> @llvm.x86.fma.vfnmadd.ps.256(<8 x float>, <8 x float>, <8 x
define <4 x double> @test_x86_fma_vfnmadd_pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
; CHECK-FMA-LABEL: test_x86_fma_vfnmadd_pd_256:
-; CHECK-FMA: # BB#0:
+; CHECK-FMA: # %bb.0:
; CHECK-FMA-NEXT: vfnmadd213pd %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0xf5,0xac,0xc2]
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmadd_pd_256:
-; CHECK-AVX512VL: # BB#0:
+; CHECK-AVX512VL: # %bb.0:
; CHECK-AVX512VL-NEXT: vfnmadd213pd %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xac,0xc2]
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmadd_pd_256:
-; CHECK-FMA-WIN: # BB#0:
+; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %ymm1 # encoding: [0xc5,0xfd,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %ymm0 # encoding: [0xc5,0xfd,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfnmadd213pd (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0xf5,0xac,0x00]
@@ -543,17 +543,17 @@ declare <4 x double> @llvm.x86.fma.vfnmadd.pd.256(<4 x double>, <4 x double>, <4
; VFNMSUB
define <4 x float> @test_x86_fma_vfnmsub_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
; CHECK-FMA-LABEL: test_x86_fma_vfnmsub_ss:
-; CHECK-FMA: # BB#0:
+; CHECK-FMA: # %bb.0:
; CHECK-FMA-NEXT: vfnmsub213ss %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xaf,0xc2]
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmsub_ss:
-; CHECK-AVX512VL: # BB#0:
+; CHECK-AVX512VL: # %bb.0:
; CHECK-AVX512VL-NEXT: vfnmsub213ss %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xaf,0xc2]
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmsub_ss:
-; CHECK-FMA-WIN: # BB#0:
+; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x0a]
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x01]
; CHECK-FMA-WIN-NEXT: vfnmsub213ss (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xaf,0x00]
@@ -564,19 +564,19 @@ define <4 x float> @test_x86_fma_vfnmsub_ss(<4 x float> %a0, <4 x float> %a1, <4
define <4 x float> @test_x86_fma_vfnmsub_bac_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
; CHECK-FMA-LABEL: test_x86_fma_vfnmsub_bac_ss:
-; CHECK-FMA: # BB#0:
+; CHECK-FMA: # %bb.0:
; CHECK-FMA-NEXT: vfnmsub213ss %xmm2, %xmm0, %xmm1 # encoding: [0xc4,0xe2,0x79,0xaf,0xca]
; CHECK-FMA-NEXT: vmovaps %xmm1, %xmm0 # encoding: [0xc5,0xf8,0x28,0xc1]
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmsub_bac_ss:
-; CHECK-AVX512VL: # BB#0:
+; CHECK-AVX512VL: # %bb.0:
; CHECK-AVX512VL-NEXT: vfnmsub213ss %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xaf,0xca]
; CHECK-AVX512VL-NEXT: vmovaps %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1]
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmsub_bac_ss:
-; CHECK-FMA-WIN: # BB#0:
+; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfnmsub213ss (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xaf,0x00]
@@ -588,17 +588,17 @@ declare <4 x float> @llvm.x86.fma.vfnmsub.ss(<4 x float>, <4 x float>, <4 x floa
define <2 x double> @test_x86_fma_vfnmsub_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
; CHECK-FMA-LABEL: test_x86_fma_vfnmsub_sd:
-; CHECK-FMA: # BB#0:
+; CHECK-FMA: # %bb.0:
; CHECK-FMA-NEXT: vfnmsub213sd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xaf,0xc2]
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmsub_sd:
-; CHECK-AVX512VL: # BB#0:
+; CHECK-AVX512VL: # %bb.0:
; CHECK-AVX512VL-NEXT: vfnmsub213sd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xaf,0xc2]
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmsub_sd:
-; CHECK-FMA-WIN: # BB#0:
+; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x0a]
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x01]
; CHECK-FMA-WIN-NEXT: vfnmsub213sd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xaf,0x00]
@@ -609,19 +609,19 @@ define <2 x double> @test_x86_fma_vfnmsub_sd(<2 x double> %a0, <2 x double> %a1,
define <2 x double> @test_x86_fma_vfnmsub_bac_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
; CHECK-FMA-LABEL: test_x86_fma_vfnmsub_bac_sd:
-; CHECK-FMA: # BB#0:
+; CHECK-FMA: # %bb.0:
; CHECK-FMA-NEXT: vfnmsub213sd %xmm2, %xmm0, %xmm1 # encoding: [0xc4,0xe2,0xf9,0xaf,0xca]
; CHECK-FMA-NEXT: vmovapd %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x28,0xc1]
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmsub_bac_sd:
-; CHECK-AVX512VL: # BB#0:
+; CHECK-AVX512VL: # %bb.0:
; CHECK-AVX512VL-NEXT: vfnmsub213sd %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xaf,0xca]
; CHECK-AVX512VL-NEXT: vmovapd %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc1]
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmsub_bac_sd:
-; CHECK-FMA-WIN: # BB#0:
+; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfnmsub213sd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xaf,0x00]
@@ -633,17 +633,17 @@ declare <2 x double> @llvm.x86.fma.vfnmsub.sd(<2 x double>, <2 x double>, <2 x d
define <4 x float> @test_x86_fma_vfnmsub_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
; CHECK-FMA-LABEL: test_x86_fma_vfnmsub_ps:
-; CHECK-FMA: # BB#0:
+; CHECK-FMA: # %bb.0:
; CHECK-FMA-NEXT: vfnmsub213ps %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xae,0xc2]
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmsub_ps:
-; CHECK-AVX512VL: # BB#0:
+; CHECK-AVX512VL: # %bb.0:
; CHECK-AVX512VL-NEXT: vfnmsub213ps %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xae,0xc2]
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmsub_ps:
-; CHECK-FMA-WIN: # BB#0:
+; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfnmsub213ps (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xae,0x00]
@@ -655,17 +655,17 @@ declare <4 x float> @llvm.x86.fma.vfnmsub.ps(<4 x float>, <4 x float>, <4 x floa
define <2 x double> @test_x86_fma_vfnmsub_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
; CHECK-FMA-LABEL: test_x86_fma_vfnmsub_pd:
-; CHECK-FMA: # BB#0:
+; CHECK-FMA: # %bb.0:
; CHECK-FMA-NEXT: vfnmsub213pd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xae,0xc2]
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmsub_pd:
-; CHECK-AVX512VL: # BB#0:
+; CHECK-AVX512VL: # %bb.0:
; CHECK-AVX512VL-NEXT: vfnmsub213pd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xae,0xc2]
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmsub_pd:
-; CHECK-FMA-WIN: # BB#0:
+; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfnmsub213pd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xae,0x00]
@@ -677,17 +677,17 @@ declare <2 x double> @llvm.x86.fma.vfnmsub.pd(<2 x double>, <2 x double>, <2 x d
define <8 x float> @test_x86_fma_vfnmsub_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
; CHECK-FMA-LABEL: test_x86_fma_vfnmsub_ps_256:
-; CHECK-FMA: # BB#0:
+; CHECK-FMA: # %bb.0:
; CHECK-FMA-NEXT: vfnmsub213ps %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x75,0xae,0xc2]
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmsub_ps_256:
-; CHECK-AVX512VL: # BB#0:
+; CHECK-AVX512VL: # %bb.0:
; CHECK-AVX512VL-NEXT: vfnmsub213ps %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xae,0xc2]
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmsub_ps_256:
-; CHECK-FMA-WIN: # BB#0:
+; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %ymm1 # encoding: [0xc5,0xfc,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %ymm0 # encoding: [0xc5,0xfc,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfnmsub213ps (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0x75,0xae,0x00]
@@ -699,17 +699,17 @@ declare <8 x float> @llvm.x86.fma.vfnmsub.ps.256(<8 x float>, <8 x float>, <8 x
define <4 x double> @test_x86_fma_vfnmsub_pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
; CHECK-FMA-LABEL: test_x86_fma_vfnmsub_pd_256:
-; CHECK-FMA: # BB#0:
+; CHECK-FMA: # %bb.0:
; CHECK-FMA-NEXT: vfnmsub213pd %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0xf5,0xae,0xc2]
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmsub_pd_256:
-; CHECK-AVX512VL: # BB#0:
+; CHECK-AVX512VL: # %bb.0:
; CHECK-AVX512VL-NEXT: vfnmsub213pd %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xae,0xc2]
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmsub_pd_256:
-; CHECK-FMA-WIN: # BB#0:
+; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %ymm1 # encoding: [0xc5,0xfd,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %ymm0 # encoding: [0xc5,0xfd,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfnmsub213pd (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0xf5,0xae,0x00]
@@ -722,17 +722,17 @@ declare <4 x double> @llvm.x86.fma.vfnmsub.pd.256(<4 x double>, <4 x double>, <4
; VFMADDSUB
define <4 x float> @test_x86_fma_vfmaddsub_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
; CHECK-FMA-LABEL: test_x86_fma_vfmaddsub_ps:
-; CHECK-FMA: # BB#0:
+; CHECK-FMA: # %bb.0:
; CHECK-FMA-NEXT: vfmaddsub213ps %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xa6,0xc2]
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmaddsub_ps:
-; CHECK-AVX512VL: # BB#0:
+; CHECK-AVX512VL: # %bb.0:
; CHECK-AVX512VL-NEXT: vfmaddsub213ps %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa6,0xc2]
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmaddsub_ps:
-; CHECK-FMA-WIN: # BB#0:
+; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfmaddsub213ps (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xa6,0x00]
@@ -744,17 +744,17 @@ declare <4 x float> @llvm.x86.fma.vfmaddsub.ps(<4 x float>, <4 x float>, <4 x fl
define <2 x double> @test_x86_fma_vfmaddsub_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
; CHECK-FMA-LABEL: test_x86_fma_vfmaddsub_pd:
-; CHECK-FMA: # BB#0:
+; CHECK-FMA: # %bb.0:
; CHECK-FMA-NEXT: vfmaddsub213pd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xa6,0xc2]
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmaddsub_pd:
-; CHECK-AVX512VL: # BB#0:
+; CHECK-AVX512VL: # %bb.0:
; CHECK-AVX512VL-NEXT: vfmaddsub213pd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa6,0xc2]
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmaddsub_pd:
-; CHECK-FMA-WIN: # BB#0:
+; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfmaddsub213pd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xa6,0x00]
@@ -766,17 +766,17 @@ declare <2 x double> @llvm.x86.fma.vfmaddsub.pd(<2 x double>, <2 x double>, <2 x
define <8 x float> @test_x86_fma_vfmaddsub_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
; CHECK-FMA-LABEL: test_x86_fma_vfmaddsub_ps_256:
-; CHECK-FMA: # BB#0:
+; CHECK-FMA: # %bb.0:
; CHECK-FMA-NEXT: vfmaddsub213ps %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x75,0xa6,0xc2]
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmaddsub_ps_256:
-; CHECK-AVX512VL: # BB#0:
+; CHECK-AVX512VL: # %bb.0:
; CHECK-AVX512VL-NEXT: vfmaddsub213ps %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xa6,0xc2]
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmaddsub_ps_256:
-; CHECK-FMA-WIN: # BB#0:
+; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %ymm1 # encoding: [0xc5,0xfc,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %ymm0 # encoding: [0xc5,0xfc,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfmaddsub213ps (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0x75,0xa6,0x00]
@@ -788,17 +788,17 @@ declare <8 x float> @llvm.x86.fma.vfmaddsub.ps.256(<8 x float>, <8 x float>, <8
define <4 x double> @test_x86_fma_vfmaddsub_pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
; CHECK-FMA-LABEL: test_x86_fma_vfmaddsub_pd_256:
-; CHECK-FMA: # BB#0:
+; CHECK-FMA: # %bb.0:
; CHECK-FMA-NEXT: vfmaddsub213pd %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0xf5,0xa6,0xc2]
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmaddsub_pd_256:
-; CHECK-AVX512VL: # BB#0:
+; CHECK-AVX512VL: # %bb.0:
; CHECK-AVX512VL-NEXT: vfmaddsub213pd %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xa6,0xc2]
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmaddsub_pd_256:
-; CHECK-FMA-WIN: # BB#0:
+; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %ymm1 # encoding: [0xc5,0xfd,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %ymm0 # encoding: [0xc5,0xfd,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfmaddsub213pd (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0xf5,0xa6,0x00]
@@ -811,17 +811,17 @@ declare <4 x double> @llvm.x86.fma.vfmaddsub.pd.256(<4 x double>, <4 x double>,
; VFMSUBADD
define <4 x float> @test_x86_fma_vfmsubadd_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
; CHECK-FMA-LABEL: test_x86_fma_vfmsubadd_ps:
-; CHECK-FMA: # BB#0:
+; CHECK-FMA: # %bb.0:
; CHECK-FMA-NEXT: vfmsubadd213ps %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xa7,0xc2]
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsubadd_ps:
-; CHECK-AVX512VL: # BB#0:
+; CHECK-AVX512VL: # %bb.0:
; CHECK-AVX512VL-NEXT: vfmsubadd213ps %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa7,0xc2]
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsubadd_ps:
-; CHECK-FMA-WIN: # BB#0:
+; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfmsubadd213ps (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xa7,0x00]
@@ -833,17 +833,17 @@ declare <4 x float> @llvm.x86.fma.vfmsubadd.ps(<4 x float>, <4 x float>, <4 x fl
define <2 x double> @test_x86_fma_vfmsubadd_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
; CHECK-FMA-LABEL: test_x86_fma_vfmsubadd_pd:
-; CHECK-FMA: # BB#0:
+; CHECK-FMA: # %bb.0:
; CHECK-FMA-NEXT: vfmsubadd213pd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xa7,0xc2]
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsubadd_pd:
-; CHECK-AVX512VL: # BB#0:
+; CHECK-AVX512VL: # %bb.0:
; CHECK-AVX512VL-NEXT: vfmsubadd213pd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa7,0xc2]
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsubadd_pd:
-; CHECK-FMA-WIN: # BB#0:
+; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfmsubadd213pd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xa7,0x00]
@@ -855,17 +855,17 @@ declare <2 x double> @llvm.x86.fma.vfmsubadd.pd(<2 x double>, <2 x double>, <2 x
define <8 x float> @test_x86_fma_vfmsubadd_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
; CHECK-FMA-LABEL: test_x86_fma_vfmsubadd_ps_256:
-; CHECK-FMA: # BB#0:
+; CHECK-FMA: # %bb.0:
; CHECK-FMA-NEXT: vfmsubadd213ps %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x75,0xa7,0xc2]
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsubadd_ps_256:
-; CHECK-AVX512VL: # BB#0:
+; CHECK-AVX512VL: # %bb.0:
; CHECK-AVX512VL-NEXT: vfmsubadd213ps %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xa7,0xc2]
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsubadd_ps_256:
-; CHECK-FMA-WIN: # BB#0:
+; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %ymm1 # encoding: [0xc5,0xfc,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %ymm0 # encoding: [0xc5,0xfc,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfmsubadd213ps (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0x75,0xa7,0x00]
@@ -877,17 +877,17 @@ declare <8 x float> @llvm.x86.fma.vfmsubadd.ps.256(<8 x float>, <8 x float>, <8
define <4 x double> @test_x86_fma_vfmsubadd_pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
; CHECK-FMA-LABEL: test_x86_fma_vfmsubadd_pd_256:
-; CHECK-FMA: # BB#0:
+; CHECK-FMA: # %bb.0:
; CHECK-FMA-NEXT: vfmsubadd213pd %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0xf5,0xa7,0xc2]
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsubadd_pd_256:
-; CHECK-AVX512VL: # BB#0:
+; CHECK-AVX512VL: # %bb.0:
; CHECK-AVX512VL-NEXT: vfmsubadd213pd %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xa7,0xc2]
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsubadd_pd_256:
-; CHECK-FMA-WIN: # BB#0:
+; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %ymm1 # encoding: [0xc5,0xfd,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %ymm0 # encoding: [0xc5,0xfd,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfmsubadd213pd (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0xf5,0xa7,0x00]
diff --git a/test/CodeGen/X86/fma-scalar-memfold.ll b/test/CodeGen/X86/fma-scalar-memfold.ll
index 23baeafe98d..7822139c3e1 100644
--- a/test/CodeGen/X86/fma-scalar-memfold.ll
+++ b/test/CodeGen/X86/fma-scalar-memfold.ll
@@ -16,7 +16,7 @@ declare <2 x double> @llvm.x86.fma.vfnmsub.sd(<2 x double>, <2 x double>, <2 x d
define void @fmadd_aab_ss(float* %a, float* %b) {
; CHECK-LABEL: fmadd_aab_ss:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: vfmadd213ss (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovss %xmm0, (%rdi)
@@ -42,7 +42,7 @@ define void @fmadd_aab_ss(float* %a, float* %b) {
define void @fmadd_aba_ss(float* %a, float* %b) {
; CHECK-LABEL: fmadd_aba_ss:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: vfmadd132ss (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovss %xmm0, (%rdi)
@@ -68,7 +68,7 @@ define void @fmadd_aba_ss(float* %a, float* %b) {
define void @fmsub_aab_ss(float* %a, float* %b) {
; CHECK-LABEL: fmsub_aab_ss:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: vfmsub213ss (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovss %xmm0, (%rdi)
@@ -94,7 +94,7 @@ define void @fmsub_aab_ss(float* %a, float* %b) {
define void @fmsub_aba_ss(float* %a, float* %b) {
; CHECK-LABEL: fmsub_aba_ss:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: vfmsub132ss (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovss %xmm0, (%rdi)
@@ -120,7 +120,7 @@ define void @fmsub_aba_ss(float* %a, float* %b) {
define void @fnmadd_aab_ss(float* %a, float* %b) {
; CHECK-LABEL: fnmadd_aab_ss:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: vfnmadd213ss (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovss %xmm0, (%rdi)
@@ -146,7 +146,7 @@ define void @fnmadd_aab_ss(float* %a, float* %b) {
define void @fnmadd_aba_ss(float* %a, float* %b) {
; CHECK-LABEL: fnmadd_aba_ss:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: vfnmadd132ss (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovss %xmm0, (%rdi)
@@ -172,7 +172,7 @@ define void @fnmadd_aba_ss(float* %a, float* %b) {
define void @fnmsub_aab_ss(float* %a, float* %b) {
; CHECK-LABEL: fnmsub_aab_ss:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: vfnmsub213ss (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovss %xmm0, (%rdi)
@@ -198,7 +198,7 @@ define void @fnmsub_aab_ss(float* %a, float* %b) {
define void @fnmsub_aba_ss(float* %a, float* %b) {
; CHECK-LABEL: fnmsub_aba_ss:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: vfnmsub132ss (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovss %xmm0, (%rdi)
@@ -224,7 +224,7 @@ define void @fnmsub_aba_ss(float* %a, float* %b) {
define void @fmadd_aab_sd(double* %a, double* %b) {
; CHECK-LABEL: fmadd_aab_sd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: vfmadd213sd (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovlpd %xmm0, (%rdi)
@@ -246,7 +246,7 @@ define void @fmadd_aab_sd(double* %a, double* %b) {
define void @fmadd_aba_sd(double* %a, double* %b) {
; CHECK-LABEL: fmadd_aba_sd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: vfmadd132sd (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovlpd %xmm0, (%rdi)
@@ -268,7 +268,7 @@ define void @fmadd_aba_sd(double* %a, double* %b) {
define void @fmsub_aab_sd(double* %a, double* %b) {
; CHECK-LABEL: fmsub_aab_sd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: vfmsub213sd (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovlpd %xmm0, (%rdi)
@@ -290,7 +290,7 @@ define void @fmsub_aab_sd(double* %a, double* %b) {
define void @fmsub_aba_sd(double* %a, double* %b) {
; CHECK-LABEL: fmsub_aba_sd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: vfmsub132sd (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovlpd %xmm0, (%rdi)
@@ -312,7 +312,7 @@ define void @fmsub_aba_sd(double* %a, double* %b) {
define void @fnmadd_aab_sd(double* %a, double* %b) {
; CHECK-LABEL: fnmadd_aab_sd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: vfnmadd213sd (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovlpd %xmm0, (%rdi)
@@ -334,7 +334,7 @@ define void @fnmadd_aab_sd(double* %a, double* %b) {
define void @fnmadd_aba_sd(double* %a, double* %b) {
; CHECK-LABEL: fnmadd_aba_sd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: vfnmadd132sd (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovlpd %xmm0, (%rdi)
@@ -356,7 +356,7 @@ define void @fnmadd_aba_sd(double* %a, double* %b) {
define void @fnmsub_aab_sd(double* %a, double* %b) {
; CHECK-LABEL: fnmsub_aab_sd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: vfnmsub213sd (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovlpd %xmm0, (%rdi)
@@ -378,7 +378,7 @@ define void @fnmsub_aab_sd(double* %a, double* %b) {
define void @fnmsub_aba_sd(double* %a, double* %b) {
; CHECK-LABEL: fnmsub_aba_sd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: vfnmsub132sd (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovlpd %xmm0, (%rdi)
diff --git a/test/CodeGen/X86/fma-schedule.ll b/test/CodeGen/X86/fma-schedule.ll
index 121807a697e..2198436592d 100644
--- a/test/CodeGen/X86/fma-schedule.ll
+++ b/test/CodeGen/X86/fma-schedule.ll
@@ -17,43 +17,43 @@
define <2 x double> @test_vfmadd213pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> *%a3) {
; GENERIC-LABEL: test_vfmadd213pd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vfmadd213pd %xmm2, %xmm1, %xmm0
; GENERIC-NEXT: vfmadd213pd (%rdi), %xmm1, %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_vfmadd213pd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vfmadd213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: vfmadd213pd (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_vfmadd213pd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vfmadd213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; BROADWELL-NEXT: vfmadd213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_vfmadd213pd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vfmadd213pd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vfmadd213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_vfmadd213pd:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vfmadd213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; KNL-NEXT: vfmadd213pd (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: test_vfmadd213pd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vfmadd213pd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vfmadd213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_vfmadd213pd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vfmadd213pd %xmm2, %xmm1, %xmm0
; ZNVER1-NEXT: vfmadd213pd (%rdi), %xmm1, %xmm0
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -65,43 +65,43 @@ define <2 x double> @test_vfmadd213pd(<2 x double> %a0, <2 x double> %a1, <2 x d
define <4 x double> @test_vfmadd213pd_ymm(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, <4 x double> *%a3) {
; GENERIC-LABEL: test_vfmadd213pd_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vfmadd213pd %ymm2, %ymm1, %ymm0
; GENERIC-NEXT: vfmadd213pd (%rdi), %ymm1, %ymm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_vfmadd213pd_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vfmadd213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
; HASWELL-NEXT: vfmadd213pd (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_vfmadd213pd_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vfmadd213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
; BROADWELL-NEXT: vfmadd213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_vfmadd213pd_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vfmadd213pd %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vfmadd213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_vfmadd213pd_ymm:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vfmadd213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
; KNL-NEXT: vfmadd213pd (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: test_vfmadd213pd_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vfmadd213pd %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vfmadd213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_vfmadd213pd_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vfmadd213pd %ymm2, %ymm1, %ymm0
; ZNVER1-NEXT: vfmadd213pd (%rdi), %ymm1, %ymm0
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -113,43 +113,43 @@ define <4 x double> @test_vfmadd213pd_ymm(<4 x double> %a0, <4 x double> %a1, <4
define <4 x float> @test_vfmadd213ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, <4 x float> *%a3) {
; GENERIC-LABEL: test_vfmadd213ps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vfmadd213ps %xmm2, %xmm1, %xmm0
; GENERIC-NEXT: vfmadd213ps (%rdi), %xmm1, %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_vfmadd213ps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vfmadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: vfmadd213ps (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_vfmadd213ps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vfmadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; BROADWELL-NEXT: vfmadd213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_vfmadd213ps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vfmadd213ps %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vfmadd213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_vfmadd213ps:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vfmadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; KNL-NEXT: vfmadd213ps (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: test_vfmadd213ps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vfmadd213ps %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vfmadd213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_vfmadd213ps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vfmadd213ps %xmm2, %xmm1, %xmm0
; ZNVER1-NEXT: vfmadd213ps (%rdi), %xmm1, %xmm0
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -161,43 +161,43 @@ define <4 x float> @test_vfmadd213ps(<4 x float> %a0, <4 x float> %a1, <4 x floa
define <8 x float> @test_vfmadd213ps_ymm(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, <8 x float> *%a3) {
; GENERIC-LABEL: test_vfmadd213ps_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vfmadd213ps %ymm2, %ymm1, %ymm0
; GENERIC-NEXT: vfmadd213ps (%rdi), %ymm1, %ymm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_vfmadd213ps_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vfmadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
; HASWELL-NEXT: vfmadd213ps (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_vfmadd213ps_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vfmadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
; BROADWELL-NEXT: vfmadd213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_vfmadd213ps_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vfmadd213ps %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vfmadd213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_vfmadd213ps_ymm:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vfmadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
; KNL-NEXT: vfmadd213ps (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: test_vfmadd213ps_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vfmadd213ps %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vfmadd213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_vfmadd213ps_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vfmadd213ps %ymm2, %ymm1, %ymm0
; ZNVER1-NEXT: vfmadd213ps (%rdi), %ymm1, %ymm0
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -209,43 +209,43 @@ define <8 x float> @test_vfmadd213ps_ymm(<8 x float> %a0, <8 x float> %a1, <8 x
define <2 x double> @test_vfmadd213sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> *%a3) {
; GENERIC-LABEL: test_vfmadd213sd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0
; GENERIC-NEXT: vfmadd213sd (%rdi), %xmm1, %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_vfmadd213sd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: vfmadd213sd (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_vfmadd213sd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; BROADWELL-NEXT: vfmadd213sd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_vfmadd213sd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vfmadd213sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_vfmadd213sd:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; KNL-NEXT: vfmadd213sd (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: test_vfmadd213sd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vfmadd213sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_vfmadd213sd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0
; ZNVER1-NEXT: vfmadd213sd (%rdi), %xmm1, %xmm0
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -257,43 +257,43 @@ define <2 x double> @test_vfmadd213sd(<2 x double> %a0, <2 x double> %a1, <2 x d
define <4 x float> @test_vfmadd213ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, <4 x float> *%a3) {
; GENERIC-LABEL: test_vfmadd213ss:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0
; GENERIC-NEXT: vfmadd213ss (%rdi), %xmm1, %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_vfmadd213ss:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: vfmadd213ss (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_vfmadd213ss:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; BROADWELL-NEXT: vfmadd213ss (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_vfmadd213ss:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vfmadd213ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_vfmadd213ss:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; KNL-NEXT: vfmadd213ss (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: test_vfmadd213ss:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vfmadd213ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_vfmadd213ss:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0
; ZNVER1-NEXT: vfmadd213ss (%rdi), %xmm1, %xmm0
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -317,43 +317,43 @@ define <4 x float> @test_vfmadd213ss(<4 x float> %a0, <4 x float> %a1, <4 x floa
define <2 x double> @test_vfmaddsubpd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> *%a3) {
; GENERIC-LABEL: test_vfmaddsubpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vfmaddsub213pd %xmm2, %xmm1, %xmm0
; GENERIC-NEXT: vfmaddsub213pd (%rdi), %xmm1, %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_vfmaddsubpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vfmaddsub213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: vfmaddsub213pd (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_vfmaddsubpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vfmaddsub213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; BROADWELL-NEXT: vfmaddsub213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_vfmaddsubpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vfmaddsub213pd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vfmaddsub213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_vfmaddsubpd:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vfmaddsub213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; KNL-NEXT: vfmaddsub213pd (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: test_vfmaddsubpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vfmaddsub213pd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vfmaddsub213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_vfmaddsubpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vfmaddsub213pd %xmm2, %xmm1, %xmm0
; ZNVER1-NEXT: vfmaddsub213pd (%rdi), %xmm1, %xmm0
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -365,43 +365,43 @@ define <2 x double> @test_vfmaddsubpd(<2 x double> %a0, <2 x double> %a1, <2 x d
define <4 x double> @test_vfmaddsubpd_ymm(<4 x double> %a0, <4 x double> %a1, <4 x double> %a4, <4 x double> *%a3) {
; GENERIC-LABEL: test_vfmaddsubpd_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vfmaddsub213pd %ymm2, %ymm1, %ymm0
; GENERIC-NEXT: vfmaddsub213pd (%rdi), %ymm1, %ymm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_vfmaddsubpd_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vfmaddsub213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
; HASWELL-NEXT: vfmaddsub213pd (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_vfmaddsubpd_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vfmaddsub213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
; BROADWELL-NEXT: vfmaddsub213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_vfmaddsubpd_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vfmaddsub213pd %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vfmaddsub213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_vfmaddsubpd_ymm:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vfmaddsub213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
; KNL-NEXT: vfmaddsub213pd (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: test_vfmaddsubpd_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vfmaddsub213pd %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vfmaddsub213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_vfmaddsubpd_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vfmaddsub213pd %ymm2, %ymm1, %ymm0
; ZNVER1-NEXT: vfmaddsub213pd (%rdi), %ymm1, %ymm0
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -413,43 +413,43 @@ define <4 x double> @test_vfmaddsubpd_ymm(<4 x double> %a0, <4 x double> %a1, <4
define <4 x float> @test_vfmaddsubps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a4, <4 x float> *%a3) {
; GENERIC-LABEL: test_vfmaddsubps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vfmaddsub213ps %xmm2, %xmm1, %xmm0
; GENERIC-NEXT: vfmaddsub213ps (%rdi), %xmm1, %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_vfmaddsubps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vfmaddsub213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: vfmaddsub213ps (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_vfmaddsubps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vfmaddsub213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; BROADWELL-NEXT: vfmaddsub213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_vfmaddsubps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vfmaddsub213ps %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vfmaddsub213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_vfmaddsubps:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vfmaddsub213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; KNL-NEXT: vfmaddsub213ps (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: test_vfmaddsubps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vfmaddsub213ps %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vfmaddsub213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_vfmaddsubps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vfmaddsub213ps %xmm2, %xmm1, %xmm0
; ZNVER1-NEXT: vfmaddsub213ps (%rdi), %xmm1, %xmm0
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -461,43 +461,43 @@ define <4 x float> @test_vfmaddsubps(<4 x float> %a0, <4 x float> %a1, <4 x floa
define <8 x float> @test_vfmaddsubps_ymm(<8 x float> %a0, <8 x float> %a1, <8 x float> %a8, <8 x float> *%a3) {
; GENERIC-LABEL: test_vfmaddsubps_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vfmaddsub213ps %ymm2, %ymm1, %ymm0
; GENERIC-NEXT: vfmaddsub213ps (%rdi), %ymm1, %ymm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_vfmaddsubps_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vfmaddsub213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
; HASWELL-NEXT: vfmaddsub213ps (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_vfmaddsubps_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vfmaddsub213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
; BROADWELL-NEXT: vfmaddsub213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_vfmaddsubps_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vfmaddsub213ps %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vfmaddsub213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_vfmaddsubps_ymm:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vfmaddsub213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
; KNL-NEXT: vfmaddsub213ps (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: test_vfmaddsubps_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vfmaddsub213ps %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vfmaddsub213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_vfmaddsubps_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vfmaddsub213ps %ymm2, %ymm1, %ymm0
; ZNVER1-NEXT: vfmaddsub213ps (%rdi), %ymm1, %ymm0
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -521,43 +521,43 @@ define <8 x float> @test_vfmaddsubps_ymm(<8 x float> %a0, <8 x float> %a1, <8 x
define <2 x double> @test_vfmsubaddpd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> *%a3) {
; GENERIC-LABEL: test_vfmsubaddpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vfmsubadd213pd %xmm2, %xmm1, %xmm0
; GENERIC-NEXT: vfmsubadd213pd (%rdi), %xmm1, %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_vfmsubaddpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vfmsubadd213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: vfmsubadd213pd (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_vfmsubaddpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vfmsubadd213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; BROADWELL-NEXT: vfmsubadd213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_vfmsubaddpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vfmsubadd213pd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vfmsubadd213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_vfmsubaddpd:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vfmsubadd213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; KNL-NEXT: vfmsubadd213pd (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: test_vfmsubaddpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vfmsubadd213pd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vfmsubadd213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_vfmsubaddpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vfmsubadd213pd %xmm2, %xmm1, %xmm0
; ZNVER1-NEXT: vfmsubadd213pd (%rdi), %xmm1, %xmm0
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -569,43 +569,43 @@ define <2 x double> @test_vfmsubaddpd(<2 x double> %a0, <2 x double> %a1, <2 x d
define <4 x double> @test_vfmsubaddpd_ymm(<4 x double> %a0, <4 x double> %a1, <4 x double> %a4, <4 x double> *%a3) {
; GENERIC-LABEL: test_vfmsubaddpd_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vfmsubadd213pd %ymm2, %ymm1, %ymm0
; GENERIC-NEXT: vfmsubadd213pd (%rdi), %ymm1, %ymm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_vfmsubaddpd_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vfmsubadd213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
; HASWELL-NEXT: vfmsubadd213pd (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_vfmsubaddpd_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vfmsubadd213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
; BROADWELL-NEXT: vfmsubadd213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_vfmsubaddpd_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vfmsubadd213pd %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vfmsubadd213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_vfmsubaddpd_ymm:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vfmsubadd213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
; KNL-NEXT: vfmsubadd213pd (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: test_vfmsubaddpd_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vfmsubadd213pd %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vfmsubadd213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_vfmsubaddpd_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vfmsubadd213pd %ymm2, %ymm1, %ymm0
; ZNVER1-NEXT: vfmsubadd213pd (%rdi), %ymm1, %ymm0
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -617,43 +617,43 @@ define <4 x double> @test_vfmsubaddpd_ymm(<4 x double> %a0, <4 x double> %a1, <4
define <4 x float> @test_vfmsubaddps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a4, <4 x float> *%a3) {
; GENERIC-LABEL: test_vfmsubaddps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vfmsubadd213ps %xmm2, %xmm1, %xmm0
; GENERIC-NEXT: vfmsubadd213ps (%rdi), %xmm1, %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_vfmsubaddps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vfmsubadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: vfmsubadd213ps (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_vfmsubaddps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vfmsubadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; BROADWELL-NEXT: vfmsubadd213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_vfmsubaddps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vfmsubadd213ps %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vfmsubadd213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_vfmsubaddps:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vfmsubadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; KNL-NEXT: vfmsubadd213ps (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: test_vfmsubaddps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vfmsubadd213ps %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vfmsubadd213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_vfmsubaddps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vfmsubadd213ps %xmm2, %xmm1, %xmm0
; ZNVER1-NEXT: vfmsubadd213ps (%rdi), %xmm1, %xmm0
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -665,43 +665,43 @@ define <4 x float> @test_vfmsubaddps(<4 x float> %a0, <4 x float> %a1, <4 x floa
define <8 x float> @test_vfmsubaddps_ymm(<8 x float> %a0, <8 x float> %a1, <8 x float> %a8, <8 x float> *%a3) {
; GENERIC-LABEL: test_vfmsubaddps_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vfmsubadd213ps %ymm2, %ymm1, %ymm0
; GENERIC-NEXT: vfmsubadd213ps (%rdi), %ymm1, %ymm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_vfmsubaddps_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vfmsubadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
; HASWELL-NEXT: vfmsubadd213ps (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_vfmsubaddps_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vfmsubadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
; BROADWELL-NEXT: vfmsubadd213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_vfmsubaddps_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vfmsubadd213ps %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vfmsubadd213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_vfmsubaddps_ymm:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vfmsubadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
; KNL-NEXT: vfmsubadd213ps (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: test_vfmsubaddps_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vfmsubadd213ps %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vfmsubadd213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_vfmsubaddps_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vfmsubadd213ps %ymm2, %ymm1, %ymm0
; ZNVER1-NEXT: vfmsubadd213ps (%rdi), %ymm1, %ymm0
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -725,43 +725,43 @@ define <8 x float> @test_vfmsubaddps_ymm(<8 x float> %a0, <8 x float> %a1, <8 x
define <2 x double> @test_vfmsub213pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> *%a3) {
; GENERIC-LABEL: test_vfmsub213pd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vfmsub213pd %xmm2, %xmm1, %xmm0
; GENERIC-NEXT: vfmsub213pd (%rdi), %xmm1, %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_vfmsub213pd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vfmsub213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: vfmsub213pd (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_vfmsub213pd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vfmsub213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; BROADWELL-NEXT: vfmsub213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_vfmsub213pd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vfmsub213pd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vfmsub213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_vfmsub213pd:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vfmsub213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; KNL-NEXT: vfmsub213pd (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: test_vfmsub213pd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vfmsub213pd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vfmsub213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_vfmsub213pd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vfmsub213pd %xmm2, %xmm1, %xmm0
; ZNVER1-NEXT: vfmsub213pd (%rdi), %xmm1, %xmm0
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -773,43 +773,43 @@ define <2 x double> @test_vfmsub213pd(<2 x double> %a0, <2 x double> %a1, <2 x d
define <4 x double> @test_vfmsub213pd_ymm(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, <4 x double> *%a3) {
; GENERIC-LABEL: test_vfmsub213pd_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vfmsub213pd %ymm2, %ymm1, %ymm0
; GENERIC-NEXT: vfmsub213pd (%rdi), %ymm1, %ymm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_vfmsub213pd_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vfmsub213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
; HASWELL-NEXT: vfmsub213pd (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_vfmsub213pd_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vfmsub213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
; BROADWELL-NEXT: vfmsub213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_vfmsub213pd_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vfmsub213pd %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vfmsub213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_vfmsub213pd_ymm:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vfmsub213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
; KNL-NEXT: vfmsub213pd (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: test_vfmsub213pd_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vfmsub213pd %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vfmsub213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_vfmsub213pd_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vfmsub213pd %ymm2, %ymm1, %ymm0
; ZNVER1-NEXT: vfmsub213pd (%rdi), %ymm1, %ymm0
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -821,43 +821,43 @@ define <4 x double> @test_vfmsub213pd_ymm(<4 x double> %a0, <4 x double> %a1, <4
define <4 x float> @test_vfmsub213ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, <4 x float> *%a3) {
; GENERIC-LABEL: test_vfmsub213ps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vfmsub213ps %xmm2, %xmm1, %xmm0
; GENERIC-NEXT: vfmsub213ps (%rdi), %xmm1, %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_vfmsub213ps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vfmsub213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: vfmsub213ps (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_vfmsub213ps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vfmsub213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; BROADWELL-NEXT: vfmsub213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_vfmsub213ps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vfmsub213ps %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vfmsub213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_vfmsub213ps:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vfmsub213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; KNL-NEXT: vfmsub213ps (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: test_vfmsub213ps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vfmsub213ps %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vfmsub213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_vfmsub213ps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vfmsub213ps %xmm2, %xmm1, %xmm0
; ZNVER1-NEXT: vfmsub213ps (%rdi), %xmm1, %xmm0
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -869,43 +869,43 @@ define <4 x float> @test_vfmsub213ps(<4 x float> %a0, <4 x float> %a1, <4 x floa
define <8 x float> @test_vfmsub213ps_ymm(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, <8 x float> *%a3) {
; GENERIC-LABEL: test_vfmsub213ps_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vfmsub213ps %ymm2, %ymm1, %ymm0
; GENERIC-NEXT: vfmsub213ps (%rdi), %ymm1, %ymm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_vfmsub213ps_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vfmsub213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
; HASWELL-NEXT: vfmsub213ps (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_vfmsub213ps_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vfmsub213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
; BROADWELL-NEXT: vfmsub213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_vfmsub213ps_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vfmsub213ps %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vfmsub213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_vfmsub213ps_ymm:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vfmsub213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
; KNL-NEXT: vfmsub213ps (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: test_vfmsub213ps_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vfmsub213ps %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vfmsub213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_vfmsub213ps_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vfmsub213ps %ymm2, %ymm1, %ymm0
; ZNVER1-NEXT: vfmsub213ps (%rdi), %ymm1, %ymm0
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -917,43 +917,43 @@ define <8 x float> @test_vfmsub213ps_ymm(<8 x float> %a0, <8 x float> %a1, <8 x
define <2 x double> @test_vfmsub213sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> *%a3) {
; GENERIC-LABEL: test_vfmsub213sd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vfmsub213sd %xmm2, %xmm1, %xmm0
; GENERIC-NEXT: vfmsub213sd (%rdi), %xmm1, %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_vfmsub213sd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vfmsub213sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: vfmsub213sd (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_vfmsub213sd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vfmsub213sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; BROADWELL-NEXT: vfmsub213sd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_vfmsub213sd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vfmsub213sd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vfmsub213sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_vfmsub213sd:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vfmsub213sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; KNL-NEXT: vfmsub213sd (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: test_vfmsub213sd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vfmsub213sd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vfmsub213sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_vfmsub213sd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vfmsub213sd %xmm2, %xmm1, %xmm0
; ZNVER1-NEXT: vfmsub213sd (%rdi), %xmm1, %xmm0
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -965,43 +965,43 @@ define <2 x double> @test_vfmsub213sd(<2 x double> %a0, <2 x double> %a1, <2 x d
define <4 x float> @test_vfmsub213ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, <4 x float> *%a3) {
; GENERIC-LABEL: test_vfmsub213ss:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vfmsub213ss %xmm2, %xmm1, %xmm0
; GENERIC-NEXT: vfmsub213ss (%rdi), %xmm1, %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_vfmsub213ss:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vfmsub213ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: vfmsub213ss (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_vfmsub213ss:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vfmsub213ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; BROADWELL-NEXT: vfmsub213ss (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_vfmsub213ss:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vfmsub213ss %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vfmsub213ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_vfmsub213ss:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vfmsub213ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; KNL-NEXT: vfmsub213ss (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: test_vfmsub213ss:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vfmsub213ss %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vfmsub213ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_vfmsub213ss:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vfmsub213ss %xmm2, %xmm1, %xmm0
; ZNVER1-NEXT: vfmsub213ss (%rdi), %xmm1, %xmm0
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1025,43 +1025,43 @@ define <4 x float> @test_vfmsub213ss(<4 x float> %a0, <4 x float> %a1, <4 x floa
define <2 x double> @test_vfnmadd213pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> *%a3) {
; GENERIC-LABEL: test_vfnmadd213pd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vfnmadd213pd %xmm2, %xmm1, %xmm0
; GENERIC-NEXT: vfnmadd213pd (%rdi), %xmm1, %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_vfnmadd213pd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vfnmadd213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: vfnmadd213pd (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_vfnmadd213pd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vfnmadd213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; BROADWELL-NEXT: vfnmadd213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_vfnmadd213pd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vfnmadd213pd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vfnmadd213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_vfnmadd213pd:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vfnmadd213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; KNL-NEXT: vfnmadd213pd (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: test_vfnmadd213pd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vfnmadd213pd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vfnmadd213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_vfnmadd213pd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vfnmadd213pd %xmm2, %xmm1, %xmm0
; ZNVER1-NEXT: vfnmadd213pd (%rdi), %xmm1, %xmm0
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1073,43 +1073,43 @@ define <2 x double> @test_vfnmadd213pd(<2 x double> %a0, <2 x double> %a1, <2 x
define <4 x double> @test_vfnmadd213pd_ymm(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, <4 x double> *%a3) {
; GENERIC-LABEL: test_vfnmadd213pd_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vfnmadd213pd %ymm2, %ymm1, %ymm0
; GENERIC-NEXT: vfnmadd213pd (%rdi), %ymm1, %ymm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_vfnmadd213pd_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vfnmadd213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
; HASWELL-NEXT: vfnmadd213pd (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_vfnmadd213pd_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vfnmadd213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
; BROADWELL-NEXT: vfnmadd213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_vfnmadd213pd_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vfnmadd213pd %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vfnmadd213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_vfnmadd213pd_ymm:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vfnmadd213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
; KNL-NEXT: vfnmadd213pd (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: test_vfnmadd213pd_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vfnmadd213pd %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vfnmadd213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_vfnmadd213pd_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vfnmadd213pd %ymm2, %ymm1, %ymm0
; ZNVER1-NEXT: vfnmadd213pd (%rdi), %ymm1, %ymm0
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1121,43 +1121,43 @@ define <4 x double> @test_vfnmadd213pd_ymm(<4 x double> %a0, <4 x double> %a1, <
define <4 x float> @test_vfnmadd213ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, <4 x float> *%a3) {
; GENERIC-LABEL: test_vfnmadd213ps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vfnmadd213ps %xmm2, %xmm1, %xmm0
; GENERIC-NEXT: vfnmadd213ps (%rdi), %xmm1, %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_vfnmadd213ps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vfnmadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: vfnmadd213ps (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_vfnmadd213ps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vfnmadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; BROADWELL-NEXT: vfnmadd213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_vfnmadd213ps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vfnmadd213ps %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vfnmadd213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_vfnmadd213ps:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vfnmadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; KNL-NEXT: vfnmadd213ps (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: test_vfnmadd213ps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vfnmadd213ps %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vfnmadd213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_vfnmadd213ps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vfnmadd213ps %xmm2, %xmm1, %xmm0
; ZNVER1-NEXT: vfnmadd213ps (%rdi), %xmm1, %xmm0
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1169,43 +1169,43 @@ define <4 x float> @test_vfnmadd213ps(<4 x float> %a0, <4 x float> %a1, <4 x flo
define <8 x float> @test_vfnmadd213ps_ymm(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, <8 x float> *%a3) {
; GENERIC-LABEL: test_vfnmadd213ps_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0
; GENERIC-NEXT: vfnmadd213ps (%rdi), %ymm1, %ymm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_vfnmadd213ps_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
; HASWELL-NEXT: vfnmadd213ps (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_vfnmadd213ps_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
; BROADWELL-NEXT: vfnmadd213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_vfnmadd213ps_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vfnmadd213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_vfnmadd213ps_ymm:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
; KNL-NEXT: vfnmadd213ps (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: test_vfnmadd213ps_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vfnmadd213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_vfnmadd213ps_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0
; ZNVER1-NEXT: vfnmadd213ps (%rdi), %ymm1, %ymm0
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1217,43 +1217,43 @@ define <8 x float> @test_vfnmadd213ps_ymm(<8 x float> %a0, <8 x float> %a1, <8 x
define <2 x double> @test_vfnmadd213sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> *%a3) {
; GENERIC-LABEL: test_vfnmadd213sd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vfnmadd213sd %xmm2, %xmm1, %xmm0
; GENERIC-NEXT: vfnmadd213sd (%rdi), %xmm1, %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_vfnmadd213sd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vfnmadd213sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: vfnmadd213sd (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_vfnmadd213sd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vfnmadd213sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; BROADWELL-NEXT: vfnmadd213sd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_vfnmadd213sd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vfnmadd213sd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vfnmadd213sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_vfnmadd213sd:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vfnmadd213sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; KNL-NEXT: vfnmadd213sd (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: test_vfnmadd213sd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vfnmadd213sd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vfnmadd213sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_vfnmadd213sd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vfnmadd213sd %xmm2, %xmm1, %xmm0
; ZNVER1-NEXT: vfnmadd213sd (%rdi), %xmm1, %xmm0
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1265,43 +1265,43 @@ define <2 x double> @test_vfnmadd213sd(<2 x double> %a0, <2 x double> %a1, <2 x
define <4 x float> @test_vfnmadd213ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, <4 x float> *%a3) {
; GENERIC-LABEL: test_vfnmadd213ss:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vfnmadd213ss %xmm2, %xmm1, %xmm0
; GENERIC-NEXT: vfnmadd213ss (%rdi), %xmm1, %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_vfnmadd213ss:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vfnmadd213ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: vfnmadd213ss (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_vfnmadd213ss:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vfnmadd213ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; BROADWELL-NEXT: vfnmadd213ss (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_vfnmadd213ss:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vfnmadd213ss %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vfnmadd213ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_vfnmadd213ss:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vfnmadd213ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; KNL-NEXT: vfnmadd213ss (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: test_vfnmadd213ss:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vfnmadd213ss %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vfnmadd213ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_vfnmadd213ss:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vfnmadd213ss %xmm2, %xmm1, %xmm0
; ZNVER1-NEXT: vfnmadd213ss (%rdi), %xmm1, %xmm0
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1325,43 +1325,43 @@ define <4 x float> @test_vfnmadd213ss(<4 x float> %a0, <4 x float> %a1, <4 x flo
define <2 x double> @test_vfnmsub213pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> *%a3) {
; GENERIC-LABEL: test_vfnmsub213pd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vfnmsub213pd %xmm2, %xmm1, %xmm0
; GENERIC-NEXT: vfnmsub213pd (%rdi), %xmm1, %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_vfnmsub213pd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vfnmsub213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: vfnmsub213pd (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_vfnmsub213pd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vfnmsub213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; BROADWELL-NEXT: vfnmsub213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_vfnmsub213pd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vfnmsub213pd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vfnmsub213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_vfnmsub213pd:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vfnmsub213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; KNL-NEXT: vfnmsub213pd (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: test_vfnmsub213pd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vfnmsub213pd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vfnmsub213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_vfnmsub213pd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vfnmsub213pd %xmm2, %xmm1, %xmm0
; ZNVER1-NEXT: vfnmsub213pd (%rdi), %xmm1, %xmm0
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1373,43 +1373,43 @@ define <2 x double> @test_vfnmsub213pd(<2 x double> %a0, <2 x double> %a1, <2 x
define <4 x double> @test_vfnmsub213pd_ymm(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, <4 x double> *%a3) {
; GENERIC-LABEL: test_vfnmsub213pd_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vfnmsub213pd %ymm2, %ymm1, %ymm0
; GENERIC-NEXT: vfnmsub213pd (%rdi), %ymm1, %ymm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_vfnmsub213pd_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vfnmsub213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
; HASWELL-NEXT: vfnmsub213pd (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_vfnmsub213pd_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vfnmsub213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
; BROADWELL-NEXT: vfnmsub213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_vfnmsub213pd_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vfnmsub213pd %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vfnmsub213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_vfnmsub213pd_ymm:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vfnmsub213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
; KNL-NEXT: vfnmsub213pd (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: test_vfnmsub213pd_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vfnmsub213pd %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vfnmsub213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_vfnmsub213pd_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vfnmsub213pd %ymm2, %ymm1, %ymm0
; ZNVER1-NEXT: vfnmsub213pd (%rdi), %ymm1, %ymm0
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1421,43 +1421,43 @@ define <4 x double> @test_vfnmsub213pd_ymm(<4 x double> %a0, <4 x double> %a1, <
define <4 x float> @test_vfnmsub213ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, <4 x float> *%a3) {
; GENERIC-LABEL: test_vfnmsub213ps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vfnmsub213ps %xmm2, %xmm1, %xmm0
; GENERIC-NEXT: vfnmsub213ps (%rdi), %xmm1, %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_vfnmsub213ps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vfnmsub213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: vfnmsub213ps (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_vfnmsub213ps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vfnmsub213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; BROADWELL-NEXT: vfnmsub213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_vfnmsub213ps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vfnmsub213ps %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vfnmsub213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_vfnmsub213ps:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vfnmsub213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; KNL-NEXT: vfnmsub213ps (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: test_vfnmsub213ps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vfnmsub213ps %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vfnmsub213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_vfnmsub213ps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vfnmsub213ps %xmm2, %xmm1, %xmm0
; ZNVER1-NEXT: vfnmsub213ps (%rdi), %xmm1, %xmm0
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1469,43 +1469,43 @@ define <4 x float> @test_vfnmsub213ps(<4 x float> %a0, <4 x float> %a1, <4 x flo
define <8 x float> @test_vfnmsub213ps_ymm(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, <8 x float> *%a3) {
; GENERIC-LABEL: test_vfnmsub213ps_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vfnmsub213ps %ymm2, %ymm1, %ymm0
; GENERIC-NEXT: vfnmsub213ps (%rdi), %ymm1, %ymm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_vfnmsub213ps_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vfnmsub213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
; HASWELL-NEXT: vfnmsub213ps (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_vfnmsub213ps_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vfnmsub213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
; BROADWELL-NEXT: vfnmsub213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_vfnmsub213ps_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vfnmsub213ps %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vfnmsub213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_vfnmsub213ps_ymm:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vfnmsub213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
; KNL-NEXT: vfnmsub213ps (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: test_vfnmsub213ps_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vfnmsub213ps %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vfnmsub213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_vfnmsub213ps_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vfnmsub213ps %ymm2, %ymm1, %ymm0
; ZNVER1-NEXT: vfnmsub213ps (%rdi), %ymm1, %ymm0
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1517,43 +1517,43 @@ define <8 x float> @test_vfnmsub213ps_ymm(<8 x float> %a0, <8 x float> %a1, <8 x
define <2 x double> @test_vfnmsub213sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> *%a3) {
; GENERIC-LABEL: test_vfnmsub213sd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vfnmsub213sd %xmm2, %xmm1, %xmm0
; GENERIC-NEXT: vfnmsub213sd (%rdi), %xmm1, %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_vfnmsub213sd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vfnmsub213sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: vfnmsub213sd (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_vfnmsub213sd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vfnmsub213sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; BROADWELL-NEXT: vfnmsub213sd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_vfnmsub213sd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vfnmsub213sd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vfnmsub213sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_vfnmsub213sd:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vfnmsub213sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; KNL-NEXT: vfnmsub213sd (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: test_vfnmsub213sd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vfnmsub213sd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vfnmsub213sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_vfnmsub213sd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vfnmsub213sd %xmm2, %xmm1, %xmm0
; ZNVER1-NEXT: vfnmsub213sd (%rdi), %xmm1, %xmm0
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1565,43 +1565,43 @@ define <2 x double> @test_vfnmsub213sd(<2 x double> %a0, <2 x double> %a1, <2 x
define <4 x float> @test_vfnmsub213ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, <4 x float> *%a3) {
; GENERIC-LABEL: test_vfnmsub213ss:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vfnmsub213ss %xmm2, %xmm1, %xmm0
; GENERIC-NEXT: vfnmsub213ss (%rdi), %xmm1, %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_vfnmsub213ss:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vfnmsub213ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: vfnmsub213ss (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_vfnmsub213ss:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vfnmsub213ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; BROADWELL-NEXT: vfnmsub213ss (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_vfnmsub213ss:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vfnmsub213ss %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vfnmsub213ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; KNL-LABEL: test_vfnmsub213ss:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vfnmsub213ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
; KNL-NEXT: vfnmsub213ss (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: test_vfnmsub213ss:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vfnmsub213ss %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vfnmsub213ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_vfnmsub213ss:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vfnmsub213ss %xmm2, %xmm1, %xmm0
; ZNVER1-NEXT: vfnmsub213ss (%rdi), %xmm1, %xmm0
; ZNVER1-NEXT: retq # sched: [1:0.50]
diff --git a/test/CodeGen/X86/fma.ll b/test/CodeGen/X86/fma.ll
index 2c942347d54..611f707d64c 100644
--- a/test/CodeGen/X86/fma.ll
+++ b/test/CodeGen/X86/fma.ll
@@ -10,7 +10,7 @@
define float @test_f32(float %a, float %b, float %c) #0 {
; FMA32-LABEL: test_f32:
-; FMA32: ## BB#0: ## %entry
+; FMA32: ## %bb.0: ## %entry
; FMA32-NEXT: pushl %eax ## encoding: [0x50]
; FMA32-NEXT: vmovss {{[0-9]+}}(%esp), %xmm0 ## encoding: [0xc5,0xfa,0x10,0x44,0x24,0x08]
; FMA32-NEXT: ## xmm0 = mem[0],zero,zero,zero
@@ -23,29 +23,29 @@ define float @test_f32(float %a, float %b, float %c) #0 {
; FMA32-NEXT: retl ## encoding: [0xc3]
;
; FMACALL32-LABEL: test_f32:
-; FMACALL32: ## BB#0: ## %entry
+; FMACALL32: ## %bb.0: ## %entry
; FMACALL32-NEXT: jmp _fmaf ## TAILCALL
; FMACALL32-NEXT: ## encoding: [0xeb,A]
; FMACALL32-NEXT: ## fixup A - offset: 1, value: _fmaf-1, kind: FK_PCRel_1
;
; FMA64-LABEL: test_f32:
-; FMA64: ## BB#0: ## %entry
+; FMA64: ## %bb.0: ## %entry
; FMA64-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x71,0xa9,0xc2]
; FMA64-NEXT: retq ## encoding: [0xc3]
;
; FMACALL64-LABEL: test_f32:
-; FMACALL64: ## BB#0: ## %entry
+; FMACALL64: ## %bb.0: ## %entry
; FMACALL64-NEXT: jmp _fmaf ## TAILCALL
; FMACALL64-NEXT: ## encoding: [0xeb,A]
; FMACALL64-NEXT: ## fixup A - offset: 1, value: _fmaf-1, kind: FK_PCRel_1
;
; AVX512-LABEL: test_f32:
-; AVX512: ## BB#0: ## %entry
+; AVX512: ## %bb.0: ## %entry
; AVX512-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa9,0xc2]
; AVX512-NEXT: retq ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_f32:
-; AVX512VL: ## BB#0: ## %entry
+; AVX512VL: ## %bb.0: ## %entry
; AVX512VL-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa9,0xc2]
; AVX512VL-NEXT: retq ## encoding: [0xc3]
entry:
@@ -55,7 +55,7 @@ entry:
define double @test_f64(double %a, double %b, double %c) #0 {
; FMA32-LABEL: test_f64:
-; FMA32: ## BB#0: ## %entry
+; FMA32: ## %bb.0: ## %entry
; FMA32-NEXT: subl $12, %esp ## encoding: [0x83,0xec,0x0c]
; FMA32-NEXT: vmovsd {{[0-9]+}}(%esp), %xmm0 ## encoding: [0xc5,0xfb,0x10,0x44,0x24,0x10]
; FMA32-NEXT: ## xmm0 = mem[0],zero
@@ -68,29 +68,29 @@ define double @test_f64(double %a, double %b, double %c) #0 {
; FMA32-NEXT: retl ## encoding: [0xc3]
;
; FMACALL32-LABEL: test_f64:
-; FMACALL32: ## BB#0: ## %entry
+; FMACALL32: ## %bb.0: ## %entry
; FMACALL32-NEXT: jmp _fma ## TAILCALL
; FMACALL32-NEXT: ## encoding: [0xeb,A]
; FMACALL32-NEXT: ## fixup A - offset: 1, value: _fma-1, kind: FK_PCRel_1
;
; FMA64-LABEL: test_f64:
-; FMA64: ## BB#0: ## %entry
+; FMA64: ## %bb.0: ## %entry
; FMA64-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0xf1,0xa9,0xc2]
; FMA64-NEXT: retq ## encoding: [0xc3]
;
; FMACALL64-LABEL: test_f64:
-; FMACALL64: ## BB#0: ## %entry
+; FMACALL64: ## %bb.0: ## %entry
; FMACALL64-NEXT: jmp _fma ## TAILCALL
; FMACALL64-NEXT: ## encoding: [0xeb,A]
; FMACALL64-NEXT: ## fixup A - offset: 1, value: _fma-1, kind: FK_PCRel_1
;
; AVX512-LABEL: test_f64:
-; AVX512: ## BB#0: ## %entry
+; AVX512: ## %bb.0: ## %entry
; AVX512-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa9,0xc2]
; AVX512-NEXT: retq ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_f64:
-; AVX512VL: ## BB#0: ## %entry
+; AVX512VL: ## %bb.0: ## %entry
; AVX512VL-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa9,0xc2]
; AVX512VL-NEXT: retq ## encoding: [0xc3]
entry:
@@ -100,7 +100,7 @@ entry:
define x86_fp80 @test_f80(x86_fp80 %a, x86_fp80 %b, x86_fp80 %c) #0 {
; FMA32-LABEL: test_f80:
-; FMA32: ## BB#0: ## %entry
+; FMA32: ## %bb.0: ## %entry
; FMA32-NEXT: subl $60, %esp ## encoding: [0x83,0xec,0x3c]
; FMA32-NEXT: fldt {{[0-9]+}}(%esp) ## encoding: [0xdb,0x6c,0x24,0x40]
; FMA32-NEXT: fldt {{[0-9]+}}(%esp) ## encoding: [0xdb,0x6c,0x24,0x50]
@@ -114,7 +114,7 @@ define x86_fp80 @test_f80(x86_fp80 %a, x86_fp80 %b, x86_fp80 %c) #0 {
; FMA32-NEXT: retl ## encoding: [0xc3]
;
; FMACALL32-LABEL: test_f80:
-; FMACALL32: ## BB#0: ## %entry
+; FMACALL32: ## %bb.0: ## %entry
; FMACALL32-NEXT: subl $60, %esp ## encoding: [0x83,0xec,0x3c]
; FMACALL32-NEXT: fldt {{[0-9]+}}(%esp) ## encoding: [0xdb,0x6c,0x24,0x40]
; FMACALL32-NEXT: fldt {{[0-9]+}}(%esp) ## encoding: [0xdb,0x6c,0x24,0x50]
@@ -128,7 +128,7 @@ define x86_fp80 @test_f80(x86_fp80 %a, x86_fp80 %b, x86_fp80 %c) #0 {
; FMACALL32-NEXT: retl ## encoding: [0xc3]
;
; FMA64-LABEL: test_f80:
-; FMA64: ## BB#0: ## %entry
+; FMA64: ## %bb.0: ## %entry
; FMA64-NEXT: subq $56, %rsp ## encoding: [0x48,0x83,0xec,0x38]
; FMA64-NEXT: fldt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x6c,0x24,0x40]
; FMA64-NEXT: fldt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x6c,0x24,0x50]
@@ -142,7 +142,7 @@ define x86_fp80 @test_f80(x86_fp80 %a, x86_fp80 %b, x86_fp80 %c) #0 {
; FMA64-NEXT: retq ## encoding: [0xc3]
;
; FMACALL64-LABEL: test_f80:
-; FMACALL64: ## BB#0: ## %entry
+; FMACALL64: ## %bb.0: ## %entry
; FMACALL64-NEXT: subq $56, %rsp ## encoding: [0x48,0x83,0xec,0x38]
; FMACALL64-NEXT: fldt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x6c,0x24,0x40]
; FMACALL64-NEXT: fldt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x6c,0x24,0x50]
@@ -156,7 +156,7 @@ define x86_fp80 @test_f80(x86_fp80 %a, x86_fp80 %b, x86_fp80 %c) #0 {
; FMACALL64-NEXT: retq ## encoding: [0xc3]
;
; AVX512-LABEL: test_f80:
-; AVX512: ## BB#0: ## %entry
+; AVX512: ## %bb.0: ## %entry
; AVX512-NEXT: subq $56, %rsp ## encoding: [0x48,0x83,0xec,0x38]
; AVX512-NEXT: fldt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x6c,0x24,0x40]
; AVX512-NEXT: fldt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x6c,0x24,0x50]
@@ -170,7 +170,7 @@ define x86_fp80 @test_f80(x86_fp80 %a, x86_fp80 %b, x86_fp80 %c) #0 {
; AVX512-NEXT: retq ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_f80:
-; AVX512VL: ## BB#0: ## %entry
+; AVX512VL: ## %bb.0: ## %entry
; AVX512VL-NEXT: subq $56, %rsp ## encoding: [0x48,0x83,0xec,0x38]
; AVX512VL-NEXT: fldt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x6c,0x24,0x40]
; AVX512VL-NEXT: fldt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x6c,0x24,0x50]
@@ -189,40 +189,40 @@ entry:
define float @test_f32_cst() #0 {
; FMA32-LABEL: test_f32_cst:
-; FMA32: ## BB#0: ## %entry
+; FMA32: ## %bb.0: ## %entry
; FMA32-NEXT: flds LCPI3_0 ## encoding: [0xd9,0x05,A,A,A,A]
; FMA32-NEXT: ## fixup A - offset: 2, value: LCPI3_0, kind: FK_Data_4
; FMA32-NEXT: retl ## encoding: [0xc3]
;
; FMACALL32-LABEL: test_f32_cst:
-; FMACALL32: ## BB#0: ## %entry
+; FMACALL32: ## %bb.0: ## %entry
; FMACALL32-NEXT: flds LCPI3_0 ## encoding: [0xd9,0x05,A,A,A,A]
; FMACALL32-NEXT: ## fixup A - offset: 2, value: LCPI3_0, kind: FK_Data_4
; FMACALL32-NEXT: retl ## encoding: [0xc3]
;
; FMA64-LABEL: test_f32_cst:
-; FMA64: ## BB#0: ## %entry
+; FMA64: ## %bb.0: ## %entry
; FMA64-NEXT: vmovss {{.*}}(%rip), %xmm0 ## encoding: [0xc5,0xfa,0x10,0x05,A,A,A,A]
; FMA64-NEXT: ## fixup A - offset: 4, value: LCPI3_0-4, kind: reloc_riprel_4byte
; FMA64-NEXT: ## xmm0 = mem[0],zero,zero,zero
; FMA64-NEXT: retq ## encoding: [0xc3]
;
; FMACALL64-LABEL: test_f32_cst:
-; FMACALL64: ## BB#0: ## %entry
+; FMACALL64: ## %bb.0: ## %entry
; FMACALL64-NEXT: movss {{.*}}(%rip), %xmm0 ## encoding: [0xf3,0x0f,0x10,0x05,A,A,A,A]
; FMACALL64-NEXT: ## fixup A - offset: 4, value: LCPI3_0-4, kind: reloc_riprel_4byte
; FMACALL64-NEXT: ## xmm0 = mem[0],zero,zero,zero
; FMACALL64-NEXT: retq ## encoding: [0xc3]
;
; AVX512-LABEL: test_f32_cst:
-; AVX512: ## BB#0: ## %entry
+; AVX512: ## %bb.0: ## %entry
; AVX512-NEXT: vmovss {{.*}}(%rip), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x05,A,A,A,A]
; AVX512-NEXT: ## fixup A - offset: 4, value: LCPI3_0-4, kind: reloc_riprel_4byte
; AVX512-NEXT: ## xmm0 = mem[0],zero,zero,zero
; AVX512-NEXT: retq ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_f32_cst:
-; AVX512VL: ## BB#0: ## %entry
+; AVX512VL: ## %bb.0: ## %entry
; AVX512VL-NEXT: vmovss {{.*}}(%rip), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x05,A,A,A,A]
; AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI3_0-4, kind: reloc_riprel_4byte
; AVX512VL-NEXT: ## xmm0 = mem[0],zero,zero,zero
@@ -234,22 +234,22 @@ entry:
define <4 x float> @test_v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c) #0 {
; FMA32-LABEL: test_v4f32:
-; FMA32: ## BB#0: ## %entry
+; FMA32: ## %bb.0: ## %entry
; FMA32-NEXT: vfmadd213ps %xmm2, %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x71,0xa8,0xc2]
; FMA32-NEXT: retl ## encoding: [0xc3]
;
; FMA64-LABEL: test_v4f32:
-; FMA64: ## BB#0: ## %entry
+; FMA64: ## %bb.0: ## %entry
; FMA64-NEXT: vfmadd213ps %xmm2, %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x71,0xa8,0xc2]
; FMA64-NEXT: retq ## encoding: [0xc3]
;
; AVX512-LABEL: test_v4f32:
-; AVX512: ## BB#0: ## %entry
+; AVX512: ## %bb.0: ## %entry
; AVX512-NEXT: vfmadd213ps %xmm2, %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x71,0xa8,0xc2]
; AVX512-NEXT: retq ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_v4f32:
-; AVX512VL: ## BB#0: ## %entry
+; AVX512VL: ## %bb.0: ## %entry
; AVX512VL-NEXT: vfmadd213ps %xmm2, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa8,0xc2]
; AVX512VL-NEXT: retq ## encoding: [0xc3]
entry:
@@ -259,22 +259,22 @@ entry:
define <8 x float> @test_v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c) #0 {
; FMA32-LABEL: test_v8f32:
-; FMA32: ## BB#0: ## %entry
+; FMA32: ## %bb.0: ## %entry
; FMA32-NEXT: vfmadd213ps %ymm2, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x75,0xa8,0xc2]
; FMA32-NEXT: retl ## encoding: [0xc3]
;
; FMA64-LABEL: test_v8f32:
-; FMA64: ## BB#0: ## %entry
+; FMA64: ## %bb.0: ## %entry
; FMA64-NEXT: vfmadd213ps %ymm2, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x75,0xa8,0xc2]
; FMA64-NEXT: retq ## encoding: [0xc3]
;
; AVX512-LABEL: test_v8f32:
-; AVX512: ## BB#0: ## %entry
+; AVX512: ## %bb.0: ## %entry
; AVX512-NEXT: vfmadd213ps %ymm2, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x75,0xa8,0xc2]
; AVX512-NEXT: retq ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_v8f32:
-; AVX512VL: ## BB#0: ## %entry
+; AVX512VL: ## %bb.0: ## %entry
; AVX512VL-NEXT: vfmadd213ps %ymm2, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xa8,0xc2]
; AVX512VL-NEXT: retq ## encoding: [0xc3]
entry:
@@ -284,7 +284,7 @@ entry:
define <16 x float> @test_v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %c) #0 {
; FMA32-LABEL: test_v16f32:
-; FMA32: ## BB#0: ## %entry
+; FMA32: ## %bb.0: ## %entry
; FMA32-NEXT: pushl %ebp ## encoding: [0x55]
; FMA32-NEXT: movl %esp, %ebp ## encoding: [0x89,0xe5]
; FMA32-NEXT: andl $-32, %esp ## encoding: [0x83,0xe4,0xe0]
@@ -296,18 +296,18 @@ define <16 x float> @test_v16f32(<16 x float> %a, <16 x float> %b, <16 x float>
; FMA32-NEXT: retl ## encoding: [0xc3]
;
; FMA64-LABEL: test_v16f32:
-; FMA64: ## BB#0: ## %entry
+; FMA64: ## %bb.0: ## %entry
; FMA64-NEXT: vfmadd213ps %ymm4, %ymm2, %ymm0 ## encoding: [0xc4,0xe2,0x6d,0xa8,0xc4]
; FMA64-NEXT: vfmadd213ps %ymm5, %ymm3, %ymm1 ## encoding: [0xc4,0xe2,0x65,0xa8,0xcd]
; FMA64-NEXT: retq ## encoding: [0xc3]
;
; AVX512-LABEL: test_v16f32:
-; AVX512: ## BB#0: ## %entry
+; AVX512: ## %bb.0: ## %entry
; AVX512-NEXT: vfmadd213ps %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0x75,0x48,0xa8,0xc2]
; AVX512-NEXT: retq ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_v16f32:
-; AVX512VL: ## BB#0: ## %entry
+; AVX512VL: ## %bb.0: ## %entry
; AVX512VL-NEXT: vfmadd213ps %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0x75,0x48,0xa8,0xc2]
; AVX512VL-NEXT: retq ## encoding: [0xc3]
entry:
@@ -317,22 +317,22 @@ entry:
define <2 x double> @test_v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c) #0 {
; FMA32-LABEL: test_v2f64:
-; FMA32: ## BB#0: ## %entry
+; FMA32: ## %bb.0: ## %entry
; FMA32-NEXT: vfmadd213pd %xmm2, %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0xf1,0xa8,0xc2]
; FMA32-NEXT: retl ## encoding: [0xc3]
;
; FMA64-LABEL: test_v2f64:
-; FMA64: ## BB#0: ## %entry
+; FMA64: ## %bb.0: ## %entry
; FMA64-NEXT: vfmadd213pd %xmm2, %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0xf1,0xa8,0xc2]
; FMA64-NEXT: retq ## encoding: [0xc3]
;
; AVX512-LABEL: test_v2f64:
-; AVX512: ## BB#0: ## %entry
+; AVX512: ## %bb.0: ## %entry
; AVX512-NEXT: vfmadd213pd %xmm2, %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0xf1,0xa8,0xc2]
; AVX512-NEXT: retq ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_v2f64:
-; AVX512VL: ## BB#0: ## %entry
+; AVX512VL: ## %bb.0: ## %entry
; AVX512VL-NEXT: vfmadd213pd %xmm2, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa8,0xc2]
; AVX512VL-NEXT: retq ## encoding: [0xc3]
entry:
@@ -342,22 +342,22 @@ entry:
define <4 x double> @test_v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c) #0 {
; FMA32-LABEL: test_v4f64:
-; FMA32: ## BB#0: ## %entry
+; FMA32: ## %bb.0: ## %entry
; FMA32-NEXT: vfmadd213pd %ymm2, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0xf5,0xa8,0xc2]
; FMA32-NEXT: retl ## encoding: [0xc3]
;
; FMA64-LABEL: test_v4f64:
-; FMA64: ## BB#0: ## %entry
+; FMA64: ## %bb.0: ## %entry
; FMA64-NEXT: vfmadd213pd %ymm2, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0xf5,0xa8,0xc2]
; FMA64-NEXT: retq ## encoding: [0xc3]
;
; AVX512-LABEL: test_v4f64:
-; AVX512: ## BB#0: ## %entry
+; AVX512: ## %bb.0: ## %entry
; AVX512-NEXT: vfmadd213pd %ymm2, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0xf5,0xa8,0xc2]
; AVX512-NEXT: retq ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_v4f64:
-; AVX512VL: ## BB#0: ## %entry
+; AVX512VL: ## %bb.0: ## %entry
; AVX512VL-NEXT: vfmadd213pd %ymm2, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xa8,0xc2]
; AVX512VL-NEXT: retq ## encoding: [0xc3]
entry:
@@ -367,7 +367,7 @@ entry:
define <8 x double> @test_v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %c) #0 {
; FMA32-LABEL: test_v8f64:
-; FMA32: ## BB#0: ## %entry
+; FMA32: ## %bb.0: ## %entry
; FMA32-NEXT: pushl %ebp ## encoding: [0x55]
; FMA32-NEXT: movl %esp, %ebp ## encoding: [0x89,0xe5]
; FMA32-NEXT: andl $-32, %esp ## encoding: [0x83,0xe4,0xe0]
@@ -379,18 +379,18 @@ define <8 x double> @test_v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %
; FMA32-NEXT: retl ## encoding: [0xc3]
;
; FMA64-LABEL: test_v8f64:
-; FMA64: ## BB#0: ## %entry
+; FMA64: ## %bb.0: ## %entry
; FMA64-NEXT: vfmadd213pd %ymm4, %ymm2, %ymm0 ## encoding: [0xc4,0xe2,0xed,0xa8,0xc4]
; FMA64-NEXT: vfmadd213pd %ymm5, %ymm3, %ymm1 ## encoding: [0xc4,0xe2,0xe5,0xa8,0xcd]
; FMA64-NEXT: retq ## encoding: [0xc3]
;
; AVX512-LABEL: test_v8f64:
-; AVX512: ## BB#0: ## %entry
+; AVX512: ## %bb.0: ## %entry
; AVX512-NEXT: vfmadd213pd %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0xf5,0x48,0xa8,0xc2]
; AVX512-NEXT: retq ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_v8f64:
-; AVX512VL: ## BB#0: ## %entry
+; AVX512VL: ## %bb.0: ## %entry
; AVX512VL-NEXT: vfmadd213pd %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0xf5,0x48,0xa8,0xc2]
; AVX512VL-NEXT: retq ## encoding: [0xc3]
entry:
diff --git a/test/CodeGen/X86/fma4-commute-x86.ll b/test/CodeGen/X86/fma4-commute-x86.ll
index f47eb7c75a5..cfc6837e453 100644
--- a/test/CodeGen/X86/fma4-commute-x86.ll
+++ b/test/CodeGen/X86/fma4-commute-x86.ll
@@ -6,7 +6,7 @@ attributes #0 = { nounwind }
declare <4 x float> @llvm.x86.fma4.vfmadd.ss(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
define <4 x float> @test_x86_fmadd_baa_ss(<4 x float> %a, <4 x float> %b) #0 {
; FMA4-LABEL: test_x86_fmadd_baa_ss:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovaps (%rcx), %xmm0
; FMA4-NEXT: vfmaddss %xmm0, (%rdx), %xmm0, %xmm0
; FMA4-NEXT: retq
@@ -16,7 +16,7 @@ define <4 x float> @test_x86_fmadd_baa_ss(<4 x float> %a, <4 x float> %b) #0 {
define <4 x float> @test_x86_fmadd_aba_ss(<4 x float> %a, <4 x float> %b) #0 {
; FMA4-LABEL: test_x86_fmadd_aba_ss:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovaps (%rcx), %xmm0
; FMA4-NEXT: vfmaddss %xmm0, (%rdx), %xmm0, %xmm0
; FMA4-NEXT: retq
@@ -26,7 +26,7 @@ define <4 x float> @test_x86_fmadd_aba_ss(<4 x float> %a, <4 x float> %b) #0 {
define <4 x float> @test_x86_fmadd_bba_ss(<4 x float> %a, <4 x float> %b) #0 {
; FMA4-LABEL: test_x86_fmadd_bba_ss:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovaps (%rdx), %xmm0
; FMA4-NEXT: vfmaddss (%rcx), %xmm0, %xmm0, %xmm0
; FMA4-NEXT: retq
@@ -37,7 +37,7 @@ define <4 x float> @test_x86_fmadd_bba_ss(<4 x float> %a, <4 x float> %b) #0 {
declare <4 x float> @llvm.x86.fma.vfmadd.ps(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
define <4 x float> @test_x86_fmadd_baa_ps(<4 x float> %a, <4 x float> %b) #0 {
; FMA4-LABEL: test_x86_fmadd_baa_ps:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovaps (%rcx), %xmm0
; FMA4-NEXT: vfmaddps %xmm0, (%rdx), %xmm0, %xmm0
; FMA4-NEXT: retq
@@ -47,7 +47,7 @@ define <4 x float> @test_x86_fmadd_baa_ps(<4 x float> %a, <4 x float> %b) #0 {
define <4 x float> @test_x86_fmadd_aba_ps(<4 x float> %a, <4 x float> %b) #0 {
; FMA4-LABEL: test_x86_fmadd_aba_ps:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovaps (%rcx), %xmm0
; FMA4-NEXT: vfmaddps %xmm0, (%rdx), %xmm0, %xmm0
; FMA4-NEXT: retq
@@ -57,7 +57,7 @@ define <4 x float> @test_x86_fmadd_aba_ps(<4 x float> %a, <4 x float> %b) #0 {
define <4 x float> @test_x86_fmadd_bba_ps(<4 x float> %a, <4 x float> %b) #0 {
; FMA4-LABEL: test_x86_fmadd_bba_ps:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovaps (%rdx), %xmm0
; FMA4-NEXT: vfmaddps (%rcx), %xmm0, %xmm0, %xmm0
; FMA4-NEXT: retq
@@ -68,7 +68,7 @@ define <4 x float> @test_x86_fmadd_bba_ps(<4 x float> %a, <4 x float> %b) #0 {
declare <8 x float> @llvm.x86.fma.vfmadd.ps.256(<8 x float>, <8 x float>, <8 x float>) nounwind readnone
define <8 x float> @test_x86_fmadd_baa_ps_y(<8 x float> %a, <8 x float> %b) #0 {
; FMA4-LABEL: test_x86_fmadd_baa_ps_y:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovaps (%rcx), %ymm0
; FMA4-NEXT: vfmaddps %ymm0, (%rdx), %ymm0, %ymm0
; FMA4-NEXT: retq
@@ -78,7 +78,7 @@ define <8 x float> @test_x86_fmadd_baa_ps_y(<8 x float> %a, <8 x float> %b) #0 {
define <8 x float> @test_x86_fmadd_aba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
; FMA4-LABEL: test_x86_fmadd_aba_ps_y:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovaps (%rcx), %ymm0
; FMA4-NEXT: vfmaddps %ymm0, (%rdx), %ymm0, %ymm0
; FMA4-NEXT: retq
@@ -88,7 +88,7 @@ define <8 x float> @test_x86_fmadd_aba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
define <8 x float> @test_x86_fmadd_bba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
; FMA4-LABEL: test_x86_fmadd_bba_ps_y:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovaps (%rdx), %ymm0
; FMA4-NEXT: vfmaddps (%rcx), %ymm0, %ymm0, %ymm0
; FMA4-NEXT: retq
@@ -99,7 +99,7 @@ define <8 x float> @test_x86_fmadd_bba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
declare <2 x double> @llvm.x86.fma4.vfmadd.sd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
define <2 x double> @test_x86_fmadd_baa_sd(<2 x double> %a, <2 x double> %b) #0 {
; FMA4-LABEL: test_x86_fmadd_baa_sd:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovapd (%rcx), %xmm0
; FMA4-NEXT: vfmaddsd %xmm0, (%rdx), %xmm0, %xmm0
; FMA4-NEXT: retq
@@ -109,7 +109,7 @@ define <2 x double> @test_x86_fmadd_baa_sd(<2 x double> %a, <2 x double> %b) #0
define <2 x double> @test_x86_fmadd_aba_sd(<2 x double> %a, <2 x double> %b) #0 {
; FMA4-LABEL: test_x86_fmadd_aba_sd:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovapd (%rcx), %xmm0
; FMA4-NEXT: vfmaddsd %xmm0, (%rdx), %xmm0, %xmm0
; FMA4-NEXT: retq
@@ -119,7 +119,7 @@ define <2 x double> @test_x86_fmadd_aba_sd(<2 x double> %a, <2 x double> %b) #0
define <2 x double> @test_x86_fmadd_bba_sd(<2 x double> %a, <2 x double> %b) #0 {
; FMA4-LABEL: test_x86_fmadd_bba_sd:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovapd (%rdx), %xmm0
; FMA4-NEXT: vfmaddsd (%rcx), %xmm0, %xmm0, %xmm0
; FMA4-NEXT: retq
@@ -130,7 +130,7 @@ define <2 x double> @test_x86_fmadd_bba_sd(<2 x double> %a, <2 x double> %b) #0
declare <2 x double> @llvm.x86.fma.vfmadd.pd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
define <2 x double> @test_x86_fmadd_baa_pd(<2 x double> %a, <2 x double> %b) #0 {
; FMA4-LABEL: test_x86_fmadd_baa_pd:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovapd (%rcx), %xmm0
; FMA4-NEXT: vfmaddpd %xmm0, (%rdx), %xmm0, %xmm0
; FMA4-NEXT: retq
@@ -140,7 +140,7 @@ define <2 x double> @test_x86_fmadd_baa_pd(<2 x double> %a, <2 x double> %b) #0
define <2 x double> @test_x86_fmadd_aba_pd(<2 x double> %a, <2 x double> %b) #0 {
; FMA4-LABEL: test_x86_fmadd_aba_pd:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovapd (%rcx), %xmm0
; FMA4-NEXT: vfmaddpd %xmm0, (%rdx), %xmm0, %xmm0
; FMA4-NEXT: retq
@@ -150,7 +150,7 @@ define <2 x double> @test_x86_fmadd_aba_pd(<2 x double> %a, <2 x double> %b) #0
define <2 x double> @test_x86_fmadd_bba_pd(<2 x double> %a, <2 x double> %b) #0 {
; FMA4-LABEL: test_x86_fmadd_bba_pd:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovapd (%rdx), %xmm0
; FMA4-NEXT: vfmaddpd (%rcx), %xmm0, %xmm0, %xmm0
; FMA4-NEXT: retq
@@ -161,7 +161,7 @@ define <2 x double> @test_x86_fmadd_bba_pd(<2 x double> %a, <2 x double> %b) #0
declare <4 x double> @llvm.x86.fma.vfmadd.pd.256(<4 x double>, <4 x double>, <4 x double>) nounwind readnone
define <4 x double> @test_x86_fmadd_baa_pd_y(<4 x double> %a, <4 x double> %b) #0 {
; FMA4-LABEL: test_x86_fmadd_baa_pd_y:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovapd (%rcx), %ymm0
; FMA4-NEXT: vfmaddpd %ymm0, (%rdx), %ymm0, %ymm0
; FMA4-NEXT: retq
@@ -171,7 +171,7 @@ define <4 x double> @test_x86_fmadd_baa_pd_y(<4 x double> %a, <4 x double> %b) #
define <4 x double> @test_x86_fmadd_aba_pd_y(<4 x double> %a, <4 x double> %b) #0 {
; FMA4-LABEL: test_x86_fmadd_aba_pd_y:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovapd (%rcx), %ymm0
; FMA4-NEXT: vfmaddpd %ymm0, (%rdx), %ymm0, %ymm0
; FMA4-NEXT: retq
@@ -181,7 +181,7 @@ define <4 x double> @test_x86_fmadd_aba_pd_y(<4 x double> %a, <4 x double> %b) #
define <4 x double> @test_x86_fmadd_bba_pd_y(<4 x double> %a, <4 x double> %b) #0 {
; FMA4-LABEL: test_x86_fmadd_bba_pd_y:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovapd (%rdx), %ymm0
; FMA4-NEXT: vfmaddpd (%rcx), %ymm0, %ymm0, %ymm0
; FMA4-NEXT: retq
@@ -192,7 +192,7 @@ define <4 x double> @test_x86_fmadd_bba_pd_y(<4 x double> %a, <4 x double> %b) #
declare <4 x float> @llvm.x86.fma.vfnmadd.ps(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
define <4 x float> @test_x86_fnmadd_baa_ps(<4 x float> %a, <4 x float> %b) #0 {
; FMA4-LABEL: test_x86_fnmadd_baa_ps:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovaps (%rcx), %xmm0
; FMA4-NEXT: vfnmaddps %xmm0, (%rdx), %xmm0, %xmm0
; FMA4-NEXT: retq
@@ -202,7 +202,7 @@ define <4 x float> @test_x86_fnmadd_baa_ps(<4 x float> %a, <4 x float> %b) #0 {
define <4 x float> @test_x86_fnmadd_aba_ps(<4 x float> %a, <4 x float> %b) #0 {
; FMA4-LABEL: test_x86_fnmadd_aba_ps:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovaps (%rcx), %xmm0
; FMA4-NEXT: vfnmaddps %xmm0, (%rdx), %xmm0, %xmm0
; FMA4-NEXT: retq
@@ -212,7 +212,7 @@ define <4 x float> @test_x86_fnmadd_aba_ps(<4 x float> %a, <4 x float> %b) #0 {
define <4 x float> @test_x86_fnmadd_bba_ps(<4 x float> %a, <4 x float> %b) #0 {
; FMA4-LABEL: test_x86_fnmadd_bba_ps:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovaps (%rdx), %xmm0
; FMA4-NEXT: vfnmaddps (%rcx), %xmm0, %xmm0, %xmm0
; FMA4-NEXT: retq
@@ -223,7 +223,7 @@ define <4 x float> @test_x86_fnmadd_bba_ps(<4 x float> %a, <4 x float> %b) #0 {
declare <8 x float> @llvm.x86.fma.vfnmadd.ps.256(<8 x float>, <8 x float>, <8 x float>) nounwind readnone
define <8 x float> @test_x86_fnmadd_baa_ps_y(<8 x float> %a, <8 x float> %b) #0 {
; FMA4-LABEL: test_x86_fnmadd_baa_ps_y:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovaps (%rcx), %ymm0
; FMA4-NEXT: vfnmaddps %ymm0, (%rdx), %ymm0, %ymm0
; FMA4-NEXT: retq
@@ -233,7 +233,7 @@ define <8 x float> @test_x86_fnmadd_baa_ps_y(<8 x float> %a, <8 x float> %b) #0
define <8 x float> @test_x86_fnmadd_aba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
; FMA4-LABEL: test_x86_fnmadd_aba_ps_y:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovaps (%rcx), %ymm0
; FMA4-NEXT: vfnmaddps %ymm0, (%rdx), %ymm0, %ymm0
; FMA4-NEXT: retq
@@ -243,7 +243,7 @@ define <8 x float> @test_x86_fnmadd_aba_ps_y(<8 x float> %a, <8 x float> %b) #0
define <8 x float> @test_x86_fnmadd_bba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
; FMA4-LABEL: test_x86_fnmadd_bba_ps_y:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovaps (%rdx), %ymm0
; FMA4-NEXT: vfnmaddps (%rcx), %ymm0, %ymm0, %ymm0
; FMA4-NEXT: retq
@@ -254,7 +254,7 @@ define <8 x float> @test_x86_fnmadd_bba_ps_y(<8 x float> %a, <8 x float> %b) #0
declare <2 x double> @llvm.x86.fma.vfnmadd.pd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
define <2 x double> @test_x86_fnmadd_baa_pd(<2 x double> %a, <2 x double> %b) #0 {
; FMA4-LABEL: test_x86_fnmadd_baa_pd:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovapd (%rcx), %xmm0
; FMA4-NEXT: vfnmaddpd %xmm0, (%rdx), %xmm0, %xmm0
; FMA4-NEXT: retq
@@ -264,7 +264,7 @@ define <2 x double> @test_x86_fnmadd_baa_pd(<2 x double> %a, <2 x double> %b) #0
define <2 x double> @test_x86_fnmadd_aba_pd(<2 x double> %a, <2 x double> %b) #0 {
; FMA4-LABEL: test_x86_fnmadd_aba_pd:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovapd (%rcx), %xmm0
; FMA4-NEXT: vfnmaddpd %xmm0, (%rdx), %xmm0, %xmm0
; FMA4-NEXT: retq
@@ -274,7 +274,7 @@ define <2 x double> @test_x86_fnmadd_aba_pd(<2 x double> %a, <2 x double> %b) #0
define <2 x double> @test_x86_fnmadd_bba_pd(<2 x double> %a, <2 x double> %b) #0 {
; FMA4-LABEL: test_x86_fnmadd_bba_pd:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovapd (%rdx), %xmm0
; FMA4-NEXT: vfnmaddpd (%rcx), %xmm0, %xmm0, %xmm0
; FMA4-NEXT: retq
@@ -285,7 +285,7 @@ define <2 x double> @test_x86_fnmadd_bba_pd(<2 x double> %a, <2 x double> %b) #0
declare <4 x double> @llvm.x86.fma.vfnmadd.pd.256(<4 x double>, <4 x double>, <4 x double>) nounwind readnone
define <4 x double> @test_x86_fnmadd_baa_pd_y(<4 x double> %a, <4 x double> %b) #0 {
; FMA4-LABEL: test_x86_fnmadd_baa_pd_y:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovapd (%rcx), %ymm0
; FMA4-NEXT: vfnmaddpd %ymm0, (%rdx), %ymm0, %ymm0
; FMA4-NEXT: retq
@@ -295,7 +295,7 @@ define <4 x double> @test_x86_fnmadd_baa_pd_y(<4 x double> %a, <4 x double> %b)
define <4 x double> @test_x86_fnmadd_aba_pd_y(<4 x double> %a, <4 x double> %b) #0 {
; FMA4-LABEL: test_x86_fnmadd_aba_pd_y:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovapd (%rcx), %ymm0
; FMA4-NEXT: vfnmaddpd %ymm0, (%rdx), %ymm0, %ymm0
; FMA4-NEXT: retq
@@ -305,7 +305,7 @@ define <4 x double> @test_x86_fnmadd_aba_pd_y(<4 x double> %a, <4 x double> %b)
define <4 x double> @test_x86_fnmadd_bba_pd_y(<4 x double> %a, <4 x double> %b) #0 {
; FMA4-LABEL: test_x86_fnmadd_bba_pd_y:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovapd (%rdx), %ymm0
; FMA4-NEXT: vfnmaddpd (%rcx), %ymm0, %ymm0, %ymm0
; FMA4-NEXT: retq
@@ -316,7 +316,7 @@ define <4 x double> @test_x86_fnmadd_bba_pd_y(<4 x double> %a, <4 x double> %b)
declare <4 x float> @llvm.x86.fma.vfmsub.ps(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
define <4 x float> @test_x86_fmsub_baa_ps(<4 x float> %a, <4 x float> %b) #0 {
; FMA4-LABEL: test_x86_fmsub_baa_ps:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovaps (%rcx), %xmm0
; FMA4-NEXT: vfmsubps %xmm0, (%rdx), %xmm0, %xmm0
; FMA4-NEXT: retq
@@ -326,7 +326,7 @@ define <4 x float> @test_x86_fmsub_baa_ps(<4 x float> %a, <4 x float> %b) #0 {
define <4 x float> @test_x86_fmsub_aba_ps(<4 x float> %a, <4 x float> %b) #0 {
; FMA4-LABEL: test_x86_fmsub_aba_ps:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovaps (%rcx), %xmm0
; FMA4-NEXT: vfmsubps %xmm0, (%rdx), %xmm0, %xmm0
; FMA4-NEXT: retq
@@ -336,7 +336,7 @@ define <4 x float> @test_x86_fmsub_aba_ps(<4 x float> %a, <4 x float> %b) #0 {
define <4 x float> @test_x86_fmsub_bba_ps(<4 x float> %a, <4 x float> %b) #0 {
; FMA4-LABEL: test_x86_fmsub_bba_ps:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovaps (%rdx), %xmm0
; FMA4-NEXT: vfmsubps (%rcx), %xmm0, %xmm0, %xmm0
; FMA4-NEXT: retq
@@ -347,7 +347,7 @@ define <4 x float> @test_x86_fmsub_bba_ps(<4 x float> %a, <4 x float> %b) #0 {
declare <8 x float> @llvm.x86.fma.vfmsub.ps.256(<8 x float>, <8 x float>, <8 x float>) nounwind readnone
define <8 x float> @test_x86_fmsub_baa_ps_y(<8 x float> %a, <8 x float> %b) #0 {
; FMA4-LABEL: test_x86_fmsub_baa_ps_y:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovaps (%rcx), %ymm0
; FMA4-NEXT: vfmsubps %ymm0, (%rdx), %ymm0, %ymm0
; FMA4-NEXT: retq
@@ -357,7 +357,7 @@ define <8 x float> @test_x86_fmsub_baa_ps_y(<8 x float> %a, <8 x float> %b) #0 {
define <8 x float> @test_x86_fmsub_aba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
; FMA4-LABEL: test_x86_fmsub_aba_ps_y:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovaps (%rcx), %ymm0
; FMA4-NEXT: vfmsubps %ymm0, (%rdx), %ymm0, %ymm0
; FMA4-NEXT: retq
@@ -367,7 +367,7 @@ define <8 x float> @test_x86_fmsub_aba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
define <8 x float> @test_x86_fmsub_bba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
; FMA4-LABEL: test_x86_fmsub_bba_ps_y:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovaps (%rdx), %ymm0
; FMA4-NEXT: vfmsubps (%rcx), %ymm0, %ymm0, %ymm0
; FMA4-NEXT: retq
@@ -378,7 +378,7 @@ define <8 x float> @test_x86_fmsub_bba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
declare <2 x double> @llvm.x86.fma.vfmsub.pd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
define <2 x double> @test_x86_fmsub_baa_pd(<2 x double> %a, <2 x double> %b) #0 {
; FMA4-LABEL: test_x86_fmsub_baa_pd:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovapd (%rcx), %xmm0
; FMA4-NEXT: vfmsubpd %xmm0, (%rdx), %xmm0, %xmm0
; FMA4-NEXT: retq
@@ -388,7 +388,7 @@ define <2 x double> @test_x86_fmsub_baa_pd(<2 x double> %a, <2 x double> %b) #0
define <2 x double> @test_x86_fmsub_aba_pd(<2 x double> %a, <2 x double> %b) #0 {
; FMA4-LABEL: test_x86_fmsub_aba_pd:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovapd (%rcx), %xmm0
; FMA4-NEXT: vfmsubpd %xmm0, (%rdx), %xmm0, %xmm0
; FMA4-NEXT: retq
@@ -398,7 +398,7 @@ define <2 x double> @test_x86_fmsub_aba_pd(<2 x double> %a, <2 x double> %b) #0
define <2 x double> @test_x86_fmsub_bba_pd(<2 x double> %a, <2 x double> %b) #0 {
; FMA4-LABEL: test_x86_fmsub_bba_pd:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovapd (%rdx), %xmm0
; FMA4-NEXT: vfmsubpd (%rcx), %xmm0, %xmm0, %xmm0
; FMA4-NEXT: retq
@@ -409,7 +409,7 @@ define <2 x double> @test_x86_fmsub_bba_pd(<2 x double> %a, <2 x double> %b) #0
declare <4 x double> @llvm.x86.fma.vfmsub.pd.256(<4 x double>, <4 x double>, <4 x double>) nounwind readnone
define <4 x double> @test_x86_fmsub_baa_pd_y(<4 x double> %a, <4 x double> %b) #0 {
; FMA4-LABEL: test_x86_fmsub_baa_pd_y:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovapd (%rcx), %ymm0
; FMA4-NEXT: vfmsubpd %ymm0, (%rdx), %ymm0, %ymm0
; FMA4-NEXT: retq
@@ -419,7 +419,7 @@ define <4 x double> @test_x86_fmsub_baa_pd_y(<4 x double> %a, <4 x double> %b) #
define <4 x double> @test_x86_fmsub_aba_pd_y(<4 x double> %a, <4 x double> %b) #0 {
; FMA4-LABEL: test_x86_fmsub_aba_pd_y:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovapd (%rcx), %ymm0
; FMA4-NEXT: vfmsubpd %ymm0, (%rdx), %ymm0, %ymm0
; FMA4-NEXT: retq
@@ -429,7 +429,7 @@ define <4 x double> @test_x86_fmsub_aba_pd_y(<4 x double> %a, <4 x double> %b) #
define <4 x double> @test_x86_fmsub_bba_pd_y(<4 x double> %a, <4 x double> %b) #0 {
; FMA4-LABEL: test_x86_fmsub_bba_pd_y:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovapd (%rdx), %ymm0
; FMA4-NEXT: vfmsubpd (%rcx), %ymm0, %ymm0, %ymm0
; FMA4-NEXT: retq
@@ -440,7 +440,7 @@ define <4 x double> @test_x86_fmsub_bba_pd_y(<4 x double> %a, <4 x double> %b) #
declare <4 x float> @llvm.x86.fma.vfnmsub.ps(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
define <4 x float> @test_x86_fnmsub_baa_ps(<4 x float> %a, <4 x float> %b) #0 {
; FMA4-LABEL: test_x86_fnmsub_baa_ps:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovaps (%rcx), %xmm0
; FMA4-NEXT: vfnmsubps %xmm0, (%rdx), %xmm0, %xmm0
; FMA4-NEXT: retq
@@ -450,7 +450,7 @@ define <4 x float> @test_x86_fnmsub_baa_ps(<4 x float> %a, <4 x float> %b) #0 {
define <4 x float> @test_x86_fnmsub_aba_ps(<4 x float> %a, <4 x float> %b) #0 {
; FMA4-LABEL: test_x86_fnmsub_aba_ps:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovaps (%rcx), %xmm0
; FMA4-NEXT: vfnmsubps %xmm0, (%rdx), %xmm0, %xmm0
; FMA4-NEXT: retq
@@ -460,7 +460,7 @@ define <4 x float> @test_x86_fnmsub_aba_ps(<4 x float> %a, <4 x float> %b) #0 {
define <4 x float> @test_x86_fnmsub_bba_ps(<4 x float> %a, <4 x float> %b) #0 {
; FMA4-LABEL: test_x86_fnmsub_bba_ps:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovaps (%rdx), %xmm0
; FMA4-NEXT: vfnmsubps (%rcx), %xmm0, %xmm0, %xmm0
; FMA4-NEXT: retq
@@ -471,7 +471,7 @@ define <4 x float> @test_x86_fnmsub_bba_ps(<4 x float> %a, <4 x float> %b) #0 {
declare <8 x float> @llvm.x86.fma.vfnmsub.ps.256(<8 x float>, <8 x float>, <8 x float>) nounwind readnone
define <8 x float> @test_x86_fnmsub_baa_ps_y(<8 x float> %a, <8 x float> %b) #0 {
; FMA4-LABEL: test_x86_fnmsub_baa_ps_y:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovaps (%rcx), %ymm0
; FMA4-NEXT: vfnmsubps %ymm0, (%rdx), %ymm0, %ymm0
; FMA4-NEXT: retq
@@ -481,7 +481,7 @@ define <8 x float> @test_x86_fnmsub_baa_ps_y(<8 x float> %a, <8 x float> %b) #0
define <8 x float> @test_x86_fnmsub_aba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
; FMA4-LABEL: test_x86_fnmsub_aba_ps_y:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovaps (%rcx), %ymm0
; FMA4-NEXT: vfnmsubps %ymm0, (%rdx), %ymm0, %ymm0
; FMA4-NEXT: retq
@@ -491,7 +491,7 @@ define <8 x float> @test_x86_fnmsub_aba_ps_y(<8 x float> %a, <8 x float> %b) #0
define <8 x float> @test_x86_fnmsub_bba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
; FMA4-LABEL: test_x86_fnmsub_bba_ps_y:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovaps (%rdx), %ymm0
; FMA4-NEXT: vfnmsubps (%rcx), %ymm0, %ymm0, %ymm0
; FMA4-NEXT: retq
@@ -502,7 +502,7 @@ define <8 x float> @test_x86_fnmsub_bba_ps_y(<8 x float> %a, <8 x float> %b) #0
declare <2 x double> @llvm.x86.fma.vfnmsub.pd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
define <2 x double> @test_x86_fnmsub_baa_pd(<2 x double> %a, <2 x double> %b) #0 {
; FMA4-LABEL: test_x86_fnmsub_baa_pd:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovapd (%rcx), %xmm0
; FMA4-NEXT: vfnmsubpd %xmm0, (%rdx), %xmm0, %xmm0
; FMA4-NEXT: retq
@@ -512,7 +512,7 @@ define <2 x double> @test_x86_fnmsub_baa_pd(<2 x double> %a, <2 x double> %b) #0
define <2 x double> @test_x86_fnmsub_aba_pd(<2 x double> %a, <2 x double> %b) #0 {
; FMA4-LABEL: test_x86_fnmsub_aba_pd:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovapd (%rcx), %xmm0
; FMA4-NEXT: vfnmsubpd %xmm0, (%rdx), %xmm0, %xmm0
; FMA4-NEXT: retq
@@ -522,7 +522,7 @@ define <2 x double> @test_x86_fnmsub_aba_pd(<2 x double> %a, <2 x double> %b) #0
define <2 x double> @test_x86_fnmsub_bba_pd(<2 x double> %a, <2 x double> %b) #0 {
; FMA4-LABEL: test_x86_fnmsub_bba_pd:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovapd (%rdx), %xmm0
; FMA4-NEXT: vfnmsubpd (%rcx), %xmm0, %xmm0, %xmm0
; FMA4-NEXT: retq
@@ -533,7 +533,7 @@ define <2 x double> @test_x86_fnmsub_bba_pd(<2 x double> %a, <2 x double> %b) #0
declare <4 x double> @llvm.x86.fma.vfnmsub.pd.256(<4 x double>, <4 x double>, <4 x double>) nounwind readnone
define <4 x double> @test_x86_fnmsub_baa_pd_y(<4 x double> %a, <4 x double> %b) #0 {
; FMA4-LABEL: test_x86_fnmsub_baa_pd_y:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovapd (%rcx), %ymm0
; FMA4-NEXT: vfnmsubpd %ymm0, (%rdx), %ymm0, %ymm0
; FMA4-NEXT: retq
@@ -543,7 +543,7 @@ define <4 x double> @test_x86_fnmsub_baa_pd_y(<4 x double> %a, <4 x double> %b)
define <4 x double> @test_x86_fnmsub_aba_pd_y(<4 x double> %a, <4 x double> %b) #0 {
; FMA4-LABEL: test_x86_fnmsub_aba_pd_y:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovapd (%rcx), %ymm0
; FMA4-NEXT: vfnmsubpd %ymm0, (%rdx), %ymm0, %ymm0
; FMA4-NEXT: retq
@@ -553,7 +553,7 @@ define <4 x double> @test_x86_fnmsub_aba_pd_y(<4 x double> %a, <4 x double> %b)
define <4 x double> @test_x86_fnmsub_bba_pd_y(<4 x double> %a, <4 x double> %b) #0 {
; FMA4-LABEL: test_x86_fnmsub_bba_pd_y:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmovapd (%rdx), %ymm0
; FMA4-NEXT: vfnmsubpd (%rcx), %ymm0, %ymm0, %ymm0
; FMA4-NEXT: retq
diff --git a/test/CodeGen/X86/fma4-fneg-combine.ll b/test/CodeGen/X86/fma4-fneg-combine.ll
index 69f90d1d011..771162a2c99 100644
--- a/test/CodeGen/X86/fma4-fneg-combine.ll
+++ b/test/CodeGen/X86/fma4-fneg-combine.ll
@@ -8,7 +8,7 @@ declare <2 x double> @llvm.x86.fma4.vfmadd.sd(<2 x double> %a, <2 x double> %b,
; TODO this can be negated
define <4 x float> @test1(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
; CHECK-LABEL: test1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfmaddss %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vxorps {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: retq
@@ -19,7 +19,7 @@ define <4 x float> @test1(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
define <4 x float> @test2(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
; CHECK-LABEL: test2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfmsubss %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%sub.i = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %c
@@ -29,7 +29,7 @@ define <4 x float> @test2(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
define <4 x float> @test3(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
; CHECK-LABEL: test3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfnmaddss %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%sub.i = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %b
@@ -39,7 +39,7 @@ define <4 x float> @test3(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
define <4 x float> @test4(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
; CHECK-LABEL: test4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfnmaddss %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%sub.i = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %a
@@ -49,7 +49,7 @@ define <4 x float> @test4(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
define <4 x float> @test5(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
; CHECK-LABEL: test5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfnmsubss %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%sub.i = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %a
@@ -60,7 +60,7 @@ define <4 x float> @test5(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
define <2 x double> @test6(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
; CHECK-LABEL: test6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfmaddsd %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vxorpd {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: retq
@@ -71,7 +71,7 @@ define <2 x double> @test6(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
define <2 x double> @test7(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
; CHECK-LABEL: test7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfmsubsd %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%sub.i = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %c
@@ -81,7 +81,7 @@ define <2 x double> @test7(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
define <2 x double> @test8(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
; CHECK-LABEL: test8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfnmaddsd %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%sub.i = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %b
@@ -91,7 +91,7 @@ define <2 x double> @test8(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
define <2 x double> @test9(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
; CHECK-LABEL: test9:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfnmaddsd %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%sub.i = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %a
@@ -101,7 +101,7 @@ define <2 x double> @test9(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
define <2 x double> @test10(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
; CHECK-LABEL: test10:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfnmsubsd %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%sub.i = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %a
diff --git a/test/CodeGen/X86/fma4-intrinsics-x86.ll b/test/CodeGen/X86/fma4-intrinsics-x86.ll
index 0cdf251cfba..ee6a7ec1b55 100644
--- a/test/CodeGen/X86/fma4-intrinsics-x86.ll
+++ b/test/CodeGen/X86/fma4-intrinsics-x86.ll
@@ -5,7 +5,7 @@
; VFMADD
define <4 x float> @test_x86_fma4_vfmadd_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
; CHECK-LABEL: test_x86_fma4_vfmadd_ss:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfmaddss %xmm2, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0xf9,0x6a,0xc2,0x10]
; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <4 x float> @llvm.x86.fma4.vfmadd.ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2)
@@ -14,7 +14,7 @@ define <4 x float> @test_x86_fma4_vfmadd_ss(<4 x float> %a0, <4 x float> %a1, <4
define <4 x float> @test_x86_fma4_vfmadd_bac_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
; CHECK-LABEL: test_x86_fma4_vfmadd_bac_ss:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfmaddss %xmm2, %xmm0, %xmm1, %xmm0 # encoding: [0xc4,0xe3,0xf1,0x6a,0xc2,0x00]
; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <4 x float> @llvm.x86.fma4.vfmadd.ss(<4 x float> %a1, <4 x float> %a0, <4 x float> %a2)
@@ -24,7 +24,7 @@ declare <4 x float> @llvm.x86.fma4.vfmadd.ss(<4 x float>, <4 x float>, <4 x floa
define <2 x double> @test_x86_fma4_vfmadd_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
; CHECK-LABEL: test_x86_fma4_vfmadd_sd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfmaddsd %xmm2, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0xf9,0x6b,0xc2,0x10]
; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <2 x double> @llvm.x86.fma4.vfmadd.sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2)
@@ -33,7 +33,7 @@ define <2 x double> @test_x86_fma4_vfmadd_sd(<2 x double> %a0, <2 x double> %a1,
define <2 x double> @test_x86_fma4_vfmadd_bac_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
; CHECK-LABEL: test_x86_fma4_vfmadd_bac_sd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfmaddsd %xmm2, %xmm0, %xmm1, %xmm0 # encoding: [0xc4,0xe3,0xf1,0x6b,0xc2,0x00]
; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <2 x double> @llvm.x86.fma4.vfmadd.sd(<2 x double> %a1, <2 x double> %a0, <2 x double> %a2)
@@ -43,7 +43,7 @@ declare <2 x double> @llvm.x86.fma4.vfmadd.sd(<2 x double>, <2 x double>, <2 x d
define <4 x float> @test_x86_fma_vfmadd_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
; CHECK-LABEL: test_x86_fma_vfmadd_ps:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfmaddps %xmm2, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0xf9,0x68,0xc2,0x10]
; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <4 x float> @llvm.x86.fma.vfmadd.ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2)
@@ -53,7 +53,7 @@ declare <4 x float> @llvm.x86.fma.vfmadd.ps(<4 x float>, <4 x float>, <4 x float
define <2 x double> @test_x86_fma_vfmadd_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
; CHECK-LABEL: test_x86_fma_vfmadd_pd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfmaddpd %xmm2, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0xf9,0x69,0xc2,0x10]
; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <2 x double> @llvm.x86.fma.vfmadd.pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2)
@@ -63,7 +63,7 @@ declare <2 x double> @llvm.x86.fma.vfmadd.pd(<2 x double>, <2 x double>, <2 x do
define <8 x float> @test_x86_fma_vfmadd_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
; CHECK-LABEL: test_x86_fma_vfmadd_ps_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfmaddps %ymm2, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0xfd,0x68,0xc2,0x10]
; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <8 x float> @llvm.x86.fma.vfmadd.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2)
@@ -73,7 +73,7 @@ declare <8 x float> @llvm.x86.fma.vfmadd.ps.256(<8 x float>, <8 x float>, <8 x f
define <4 x double> @test_x86_fma_vfmadd_pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
; CHECK-LABEL: test_x86_fma_vfmadd_pd_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfmaddpd %ymm2, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0xfd,0x69,0xc2,0x10]
; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <4 x double> @llvm.x86.fma.vfmadd.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2)
@@ -84,7 +84,7 @@ declare <4 x double> @llvm.x86.fma.vfmadd.pd.256(<4 x double>, <4 x double>, <4
; VFMSUB
define <4 x float> @test_x86_fma_vfmsub_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
; CHECK-LABEL: test_x86_fma_vfmsub_ps:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfmsubps %xmm2, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0xf9,0x6c,0xc2,0x10]
; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <4 x float> @llvm.x86.fma.vfmsub.ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2)
@@ -94,7 +94,7 @@ declare <4 x float> @llvm.x86.fma.vfmsub.ps(<4 x float>, <4 x float>, <4 x float
define <2 x double> @test_x86_fma_vfmsub_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
; CHECK-LABEL: test_x86_fma_vfmsub_pd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfmsubpd %xmm2, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0xf9,0x6d,0xc2,0x10]
; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <2 x double> @llvm.x86.fma.vfmsub.pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2)
@@ -104,7 +104,7 @@ declare <2 x double> @llvm.x86.fma.vfmsub.pd(<2 x double>, <2 x double>, <2 x do
define <8 x float> @test_x86_fma_vfmsub_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
; CHECK-LABEL: test_x86_fma_vfmsub_ps_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfmsubps %ymm2, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0xfd,0x6c,0xc2,0x10]
; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <8 x float> @llvm.x86.fma.vfmsub.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2)
@@ -114,7 +114,7 @@ declare <8 x float> @llvm.x86.fma.vfmsub.ps.256(<8 x float>, <8 x float>, <8 x f
define <4 x double> @test_x86_fma_vfmsub_pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
; CHECK-LABEL: test_x86_fma_vfmsub_pd_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfmsubpd %ymm2, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0xfd,0x6d,0xc2,0x10]
; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <4 x double> @llvm.x86.fma.vfmsub.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2)
@@ -125,7 +125,7 @@ declare <4 x double> @llvm.x86.fma.vfmsub.pd.256(<4 x double>, <4 x double>, <4
; VFNMADD
define <4 x float> @test_x86_fma_vfnmadd_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
; CHECK-LABEL: test_x86_fma_vfnmadd_ps:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfnmaddps %xmm2, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0xf9,0x78,0xc2,0x10]
; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <4 x float> @llvm.x86.fma.vfnmadd.ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2)
@@ -135,7 +135,7 @@ declare <4 x float> @llvm.x86.fma.vfnmadd.ps(<4 x float>, <4 x float>, <4 x floa
define <2 x double> @test_x86_fma_vfnmadd_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
; CHECK-LABEL: test_x86_fma_vfnmadd_pd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfnmaddpd %xmm2, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0xf9,0x79,0xc2,0x10]
; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <2 x double> @llvm.x86.fma.vfnmadd.pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2)
@@ -145,7 +145,7 @@ declare <2 x double> @llvm.x86.fma.vfnmadd.pd(<2 x double>, <2 x double>, <2 x d
define <8 x float> @test_x86_fma_vfnmadd_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
; CHECK-LABEL: test_x86_fma_vfnmadd_ps_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfnmaddps %ymm2, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0xfd,0x78,0xc2,0x10]
; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <8 x float> @llvm.x86.fma.vfnmadd.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2)
@@ -155,7 +155,7 @@ declare <8 x float> @llvm.x86.fma.vfnmadd.ps.256(<8 x float>, <8 x float>, <8 x
define <4 x double> @test_x86_fma_vfnmadd_pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
; CHECK-LABEL: test_x86_fma_vfnmadd_pd_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfnmaddpd %ymm2, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0xfd,0x79,0xc2,0x10]
; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <4 x double> @llvm.x86.fma.vfnmadd.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2)
@@ -166,7 +166,7 @@ declare <4 x double> @llvm.x86.fma.vfnmadd.pd.256(<4 x double>, <4 x double>, <4
; VFNMSUB
define <4 x float> @test_x86_fma_vfnmsub_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
; CHECK-LABEL: test_x86_fma_vfnmsub_ps:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfnmsubps %xmm2, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0xf9,0x7c,0xc2,0x10]
; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <4 x float> @llvm.x86.fma.vfnmsub.ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2)
@@ -176,7 +176,7 @@ declare <4 x float> @llvm.x86.fma.vfnmsub.ps(<4 x float>, <4 x float>, <4 x floa
define <2 x double> @test_x86_fma_vfnmsub_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
; CHECK-LABEL: test_x86_fma_vfnmsub_pd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfnmsubpd %xmm2, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0xf9,0x7d,0xc2,0x10]
; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <2 x double> @llvm.x86.fma.vfnmsub.pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2)
@@ -186,7 +186,7 @@ declare <2 x double> @llvm.x86.fma.vfnmsub.pd(<2 x double>, <2 x double>, <2 x d
define <8 x float> @test_x86_fma_vfnmsub_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
; CHECK-LABEL: test_x86_fma_vfnmsub_ps_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfnmsubps %ymm2, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0xfd,0x7c,0xc2,0x10]
; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <8 x float> @llvm.x86.fma.vfnmsub.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2)
@@ -196,7 +196,7 @@ declare <8 x float> @llvm.x86.fma.vfnmsub.ps.256(<8 x float>, <8 x float>, <8 x
define <4 x double> @test_x86_fma_vfnmsub_pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
; CHECK-LABEL: test_x86_fma_vfnmsub_pd_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfnmsubpd %ymm2, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0xfd,0x7d,0xc2,0x10]
; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <4 x double> @llvm.x86.fma.vfnmsub.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2)
@@ -207,7 +207,7 @@ declare <4 x double> @llvm.x86.fma.vfnmsub.pd.256(<4 x double>, <4 x double>, <4
; VFMADDSUB
define <4 x float> @test_x86_fma_vfmaddsub_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
; CHECK-LABEL: test_x86_fma_vfmaddsub_ps:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfmaddsubps %xmm2, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0xf9,0x5c,0xc2,0x10]
; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <4 x float> @llvm.x86.fma.vfmaddsub.ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2)
@@ -217,7 +217,7 @@ declare <4 x float> @llvm.x86.fma.vfmaddsub.ps(<4 x float>, <4 x float>, <4 x fl
define <2 x double> @test_x86_fma_vfmaddsub_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
; CHECK-LABEL: test_x86_fma_vfmaddsub_pd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfmaddsubpd %xmm2, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0xf9,0x5d,0xc2,0x10]
; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <2 x double> @llvm.x86.fma.vfmaddsub.pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2)
@@ -227,7 +227,7 @@ declare <2 x double> @llvm.x86.fma.vfmaddsub.pd(<2 x double>, <2 x double>, <2 x
define <8 x float> @test_x86_fma_vfmaddsub_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
; CHECK-LABEL: test_x86_fma_vfmaddsub_ps_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfmaddsubps %ymm2, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0xfd,0x5c,0xc2,0x10]
; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <8 x float> @llvm.x86.fma.vfmaddsub.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2)
@@ -237,7 +237,7 @@ declare <8 x float> @llvm.x86.fma.vfmaddsub.ps.256(<8 x float>, <8 x float>, <8
define <4 x double> @test_x86_fma_vfmaddsub_pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
; CHECK-LABEL: test_x86_fma_vfmaddsub_pd_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfmaddsubpd %ymm2, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0xfd,0x5d,0xc2,0x10]
; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <4 x double> @llvm.x86.fma.vfmaddsub.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2)
@@ -248,7 +248,7 @@ declare <4 x double> @llvm.x86.fma.vfmaddsub.pd.256(<4 x double>, <4 x double>,
; VFMSUBADD
define <4 x float> @test_x86_fma_vfmsubadd_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
; CHECK-LABEL: test_x86_fma_vfmsubadd_ps:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfmsubaddps %xmm2, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0xf9,0x5e,0xc2,0x10]
; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <4 x float> @llvm.x86.fma.vfmsubadd.ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2)
@@ -258,7 +258,7 @@ declare <4 x float> @llvm.x86.fma.vfmsubadd.ps(<4 x float>, <4 x float>, <4 x fl
define <2 x double> @test_x86_fma_vfmsubadd_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
; CHECK-LABEL: test_x86_fma_vfmsubadd_pd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfmsubaddpd %xmm2, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0xf9,0x5f,0xc2,0x10]
; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <2 x double> @llvm.x86.fma.vfmsubadd.pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2)
@@ -268,7 +268,7 @@ declare <2 x double> @llvm.x86.fma.vfmsubadd.pd(<2 x double>, <2 x double>, <2 x
define <8 x float> @test_x86_fma_vfmsubadd_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
; CHECK-LABEL: test_x86_fma_vfmsubadd_ps_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfmsubaddps %ymm2, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0xfd,0x5e,0xc2,0x10]
; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <8 x float> @llvm.x86.fma.vfmsubadd.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2)
@@ -278,7 +278,7 @@ declare <8 x float> @llvm.x86.fma.vfmsubadd.ps.256(<8 x float>, <8 x float>, <8
define <4 x double> @test_x86_fma_vfmsubadd_pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
; CHECK-LABEL: test_x86_fma_vfmsubadd_pd_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfmsubaddpd %ymm2, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0xfd,0x5f,0xc2,0x10]
; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <4 x double> @llvm.x86.fma.vfmsubadd.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2)
diff --git a/test/CodeGen/X86/fma4-intrinsics-x86_64-folded-load.ll b/test/CodeGen/X86/fma4-intrinsics-x86_64-folded-load.ll
index a7f7500afb1..236f3ff19da 100644
--- a/test/CodeGen/X86/fma4-intrinsics-x86_64-folded-load.ll
+++ b/test/CodeGen/X86/fma4-intrinsics-x86_64-folded-load.ll
@@ -5,7 +5,7 @@
; VFMADD
define < 4 x float > @test_x86_fma4_vfmadd_ss_load(< 4 x float > %a0, < 4 x float > %a1, float* %a2) {
; CHECK-LABEL: test_x86_fma4_vfmadd_ss_load:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfmaddss (%rdi), %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%x = load float , float *%a2
@@ -15,7 +15,7 @@ define < 4 x float > @test_x86_fma4_vfmadd_ss_load(< 4 x float > %a0, < 4 x floa
}
define < 4 x float > @test_x86_fma4_vfmadd_ss_load2(< 4 x float > %a0, float* %a1, < 4 x float > %a2) {
; CHECK-LABEL: test_x86_fma4_vfmadd_ss_load2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfmaddss %xmm1, (%rdi), %xmm0, %xmm0
; CHECK-NEXT: retq
%x = load float , float *%a1
@@ -28,7 +28,7 @@ declare < 4 x float > @llvm.x86.fma4.vfmadd.ss(< 4 x float >, < 4 x float >, < 4
define < 2 x double > @test_x86_fma4_vfmadd_sd_load(< 2 x double > %a0, < 2 x double > %a1, double* %a2) {
; CHECK-LABEL: test_x86_fma4_vfmadd_sd_load:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfmaddsd (%rdi), %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%x = load double , double *%a2
@@ -38,7 +38,7 @@ define < 2 x double > @test_x86_fma4_vfmadd_sd_load(< 2 x double > %a0, < 2 x do
}
define < 2 x double > @test_x86_fma4_vfmadd_sd_load2(< 2 x double > %a0, double* %a1, < 2 x double > %a2) {
; CHECK-LABEL: test_x86_fma4_vfmadd_sd_load2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfmaddsd %xmm1, (%rdi), %xmm0, %xmm0
; CHECK-NEXT: retq
%x = load double , double *%a1
@@ -49,7 +49,7 @@ define < 2 x double > @test_x86_fma4_vfmadd_sd_load2(< 2 x double > %a0, double*
declare < 2 x double > @llvm.x86.fma4.vfmadd.sd(< 2 x double >, < 2 x double >, < 2 x double >) nounwind readnone
define < 4 x float > @test_x86_fma_vfmadd_ps_load(< 4 x float > %a0, < 4 x float > %a1, < 4 x float >* %a2) {
; CHECK-LABEL: test_x86_fma_vfmadd_ps_load:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfmaddps (%rdi), %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%x = load <4 x float>, <4 x float>* %a2
@@ -58,7 +58,7 @@ define < 4 x float > @test_x86_fma_vfmadd_ps_load(< 4 x float > %a0, < 4 x float
}
define < 4 x float > @test_x86_fma_vfmadd_ps_load2(< 4 x float > %a0, < 4 x float >* %a1, < 4 x float > %a2) {
; CHECK-LABEL: test_x86_fma_vfmadd_ps_load2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfmaddps %xmm1, (%rdi), %xmm0, %xmm0
; CHECK-NEXT: retq
%x = load <4 x float>, <4 x float>* %a1
@@ -70,7 +70,7 @@ declare < 4 x float > @llvm.x86.fma.vfmadd.ps(< 4 x float >, < 4 x float >, < 4
; To test execution dependency
define < 4 x float > @test_x86_fma_vfmadd_ps_load3(< 4 x float >* %a0, < 4 x float >* %a1, < 4 x float > %a2) {
; CHECK-LABEL: test_x86_fma_vfmadd_ps_load3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %xmm1
; CHECK-NEXT: vfmaddps %xmm0, (%rsi), %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -82,7 +82,7 @@ define < 4 x float > @test_x86_fma_vfmadd_ps_load3(< 4 x float >* %a0, < 4 x flo
define < 2 x double > @test_x86_fma_vfmadd_pd_load(< 2 x double > %a0, < 2 x double > %a1, < 2 x double >* %a2) {
; CHECK-LABEL: test_x86_fma_vfmadd_pd_load:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfmaddpd (%rdi), %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%x = load <2 x double>, <2 x double>* %a2
@@ -91,7 +91,7 @@ define < 2 x double > @test_x86_fma_vfmadd_pd_load(< 2 x double > %a0, < 2 x dou
}
define < 2 x double > @test_x86_fma_vfmadd_pd_load2(< 2 x double > %a0, < 2 x double >* %a1, < 2 x double > %a2) {
; CHECK-LABEL: test_x86_fma_vfmadd_pd_load2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfmaddpd %xmm1, (%rdi), %xmm0, %xmm0
; CHECK-NEXT: retq
%x = load <2 x double>, <2 x double>* %a1
@@ -103,7 +103,7 @@ declare < 2 x double > @llvm.x86.fma.vfmadd.pd(< 2 x double >, < 2 x double >, <
; To test execution dependency
define < 2 x double > @test_x86_fma_vfmadd_pd_load3(< 2 x double >* %a0, < 2 x double >* %a1, < 2 x double > %a2) {
; CHECK-LABEL: test_x86_fma_vfmadd_pd_load3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd (%rdi), %xmm1
; CHECK-NEXT: vfmaddpd %xmm0, (%rsi), %xmm1, %xmm0
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/fma4-scalar-memfold.ll b/test/CodeGen/X86/fma4-scalar-memfold.ll
index b43e800795f..204f6f99b16 100644
--- a/test/CodeGen/X86/fma4-scalar-memfold.ll
+++ b/test/CodeGen/X86/fma4-scalar-memfold.ll
@@ -8,7 +8,7 @@ declare <2 x double> @llvm.x86.fma4.vfmadd.sd(<2 x double>, <2 x double>, <2 x d
define void @fmadd_aab_ss(float* %a, float* %b) {
; CHECK-LABEL: fmadd_aab_ss:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: vfmaddss (%rsi), %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vmovss %xmm0, (%rdi)
@@ -34,7 +34,7 @@ define void @fmadd_aab_ss(float* %a, float* %b) {
define void @fmadd_aba_ss(float* %a, float* %b) {
; CHECK-LABEL: fmadd_aba_ss:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: vfmaddss %xmm0, (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovss %xmm0, (%rdi)
@@ -60,7 +60,7 @@ define void @fmadd_aba_ss(float* %a, float* %b) {
define void @fmadd_aab_sd(double* %a, double* %b) {
; CHECK-LABEL: fmadd_aab_sd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: vfmaddsd (%rsi), %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vmovlpd %xmm0, (%rdi)
@@ -82,7 +82,7 @@ define void @fmadd_aab_sd(double* %a, double* %b) {
define void @fmadd_aba_sd(double* %a, double* %b) {
; CHECK-LABEL: fmadd_aba_sd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: vfmaddsd %xmm0, (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovlpd %xmm0, (%rdi)
diff --git a/test/CodeGen/X86/fma_patterns.ll b/test/CodeGen/X86/fma_patterns.ll
index f1698b5b310..2b4a686b0de 100644
--- a/test/CodeGen/X86/fma_patterns.ll
+++ b/test/CodeGen/X86/fma_patterns.ll
@@ -14,17 +14,17 @@
define float @test_f32_fmadd(float %a0, float %a1, float %a2) {
; FMA-LABEL: test_f32_fmadd:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_f32_fmadd:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfmaddss %xmm2, %xmm1, %xmm0, %xmm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_f32_fmadd:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0
; AVX512-NEXT: retq
%x = fmul float %a0, %a1
@@ -34,17 +34,17 @@ define float @test_f32_fmadd(float %a0, float %a1, float %a2) {
define <4 x float> @test_4f32_fmadd(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) {
; FMA-LABEL: test_4f32_fmadd:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfmadd213ps %xmm2, %xmm1, %xmm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_4f32_fmadd:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfmaddps %xmm2, %xmm1, %xmm0, %xmm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_4f32_fmadd:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfmadd213ps %xmm2, %xmm1, %xmm0
; AVX512-NEXT: retq
%x = fmul <4 x float> %a0, %a1
@@ -54,17 +54,17 @@ define <4 x float> @test_4f32_fmadd(<4 x float> %a0, <4 x float> %a1, <4 x float
define <8 x float> @test_8f32_fmadd(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) {
; FMA-LABEL: test_8f32_fmadd:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfmadd213ps %ymm2, %ymm1, %ymm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_8f32_fmadd:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfmaddps %ymm2, %ymm1, %ymm0, %ymm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_8f32_fmadd:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfmadd213ps %ymm2, %ymm1, %ymm0
; AVX512-NEXT: retq
%x = fmul <8 x float> %a0, %a1
@@ -74,17 +74,17 @@ define <8 x float> @test_8f32_fmadd(<8 x float> %a0, <8 x float> %a1, <8 x float
define double @test_f64_fmadd(double %a0, double %a1, double %a2) {
; FMA-LABEL: test_f64_fmadd:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_f64_fmadd:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfmaddsd %xmm2, %xmm1, %xmm0, %xmm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_f64_fmadd:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0
; AVX512-NEXT: retq
%x = fmul double %a0, %a1
@@ -94,17 +94,17 @@ define double @test_f64_fmadd(double %a0, double %a1, double %a2) {
define <2 x double> @test_2f64_fmadd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) {
; FMA-LABEL: test_2f64_fmadd:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfmadd213pd %xmm2, %xmm1, %xmm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_2f64_fmadd:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfmaddpd %xmm2, %xmm1, %xmm0, %xmm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_2f64_fmadd:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfmadd213pd %xmm2, %xmm1, %xmm0
; AVX512-NEXT: retq
%x = fmul <2 x double> %a0, %a1
@@ -114,17 +114,17 @@ define <2 x double> @test_2f64_fmadd(<2 x double> %a0, <2 x double> %a1, <2 x do
define <4 x double> @test_4f64_fmadd(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) {
; FMA-LABEL: test_4f64_fmadd:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfmadd213pd %ymm2, %ymm1, %ymm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_4f64_fmadd:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfmaddpd %ymm2, %ymm1, %ymm0, %ymm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_4f64_fmadd:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfmadd213pd %ymm2, %ymm1, %ymm0
; AVX512-NEXT: retq
%x = fmul <4 x double> %a0, %a1
@@ -138,17 +138,17 @@ define <4 x double> @test_4f64_fmadd(<4 x double> %a0, <4 x double> %a1, <4 x do
define float @test_f32_fmsub(float %a0, float %a1, float %a2) {
; FMA-LABEL: test_f32_fmsub:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfmsub213ss %xmm2, %xmm1, %xmm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_f32_fmsub:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfmsubss %xmm2, %xmm1, %xmm0, %xmm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_f32_fmsub:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfmsub213ss %xmm2, %xmm1, %xmm0
; AVX512-NEXT: retq
%x = fmul float %a0, %a1
@@ -158,17 +158,17 @@ define float @test_f32_fmsub(float %a0, float %a1, float %a2) {
define <4 x float> @test_4f32_fmsub(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) {
; FMA-LABEL: test_4f32_fmsub:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfmsub213ps %xmm2, %xmm1, %xmm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_4f32_fmsub:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfmsubps %xmm2, %xmm1, %xmm0, %xmm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_4f32_fmsub:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfmsub213ps %xmm2, %xmm1, %xmm0
; AVX512-NEXT: retq
%x = fmul <4 x float> %a0, %a1
@@ -178,17 +178,17 @@ define <4 x float> @test_4f32_fmsub(<4 x float> %a0, <4 x float> %a1, <4 x float
define <8 x float> @test_8f32_fmsub(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) {
; FMA-LABEL: test_8f32_fmsub:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfmsub213ps %ymm2, %ymm1, %ymm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_8f32_fmsub:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfmsubps %ymm2, %ymm1, %ymm0, %ymm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_8f32_fmsub:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfmsub213ps %ymm2, %ymm1, %ymm0
; AVX512-NEXT: retq
%x = fmul <8 x float> %a0, %a1
@@ -198,17 +198,17 @@ define <8 x float> @test_8f32_fmsub(<8 x float> %a0, <8 x float> %a1, <8 x float
define double @test_f64_fmsub(double %a0, double %a1, double %a2) {
; FMA-LABEL: test_f64_fmsub:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfmsub213sd %xmm2, %xmm1, %xmm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_f64_fmsub:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfmsubsd %xmm2, %xmm1, %xmm0, %xmm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_f64_fmsub:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfmsub213sd %xmm2, %xmm1, %xmm0
; AVX512-NEXT: retq
%x = fmul double %a0, %a1
@@ -218,17 +218,17 @@ define double @test_f64_fmsub(double %a0, double %a1, double %a2) {
define <2 x double> @test_2f64_fmsub(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) {
; FMA-LABEL: test_2f64_fmsub:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfmsub213pd %xmm2, %xmm1, %xmm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_2f64_fmsub:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfmsubpd %xmm2, %xmm1, %xmm0, %xmm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_2f64_fmsub:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfmsub213pd %xmm2, %xmm1, %xmm0
; AVX512-NEXT: retq
%x = fmul <2 x double> %a0, %a1
@@ -238,17 +238,17 @@ define <2 x double> @test_2f64_fmsub(<2 x double> %a0, <2 x double> %a1, <2 x do
define <4 x double> @test_4f64_fmsub(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) {
; FMA-LABEL: test_4f64_fmsub:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfmsub213pd %ymm2, %ymm1, %ymm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_4f64_fmsub:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfmsubpd %ymm2, %ymm1, %ymm0, %ymm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_4f64_fmsub:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfmsub213pd %ymm2, %ymm1, %ymm0
; AVX512-NEXT: retq
%x = fmul <4 x double> %a0, %a1
@@ -262,17 +262,17 @@ define <4 x double> @test_4f64_fmsub(<4 x double> %a0, <4 x double> %a1, <4 x do
define float @test_f32_fnmadd(float %a0, float %a1, float %a2) {
; FMA-LABEL: test_f32_fnmadd:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfnmadd213ss %xmm2, %xmm1, %xmm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_f32_fnmadd:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfnmaddss %xmm2, %xmm1, %xmm0, %xmm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_f32_fnmadd:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfnmadd213ss %xmm2, %xmm1, %xmm0
; AVX512-NEXT: retq
%x = fmul float %a0, %a1
@@ -282,17 +282,17 @@ define float @test_f32_fnmadd(float %a0, float %a1, float %a2) {
define <4 x float> @test_4f32_fnmadd(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) {
; FMA-LABEL: test_4f32_fnmadd:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfnmadd213ps %xmm2, %xmm1, %xmm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_4f32_fnmadd:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfnmaddps %xmm2, %xmm1, %xmm0, %xmm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_4f32_fnmadd:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfnmadd213ps %xmm2, %xmm1, %xmm0
; AVX512-NEXT: retq
%x = fmul <4 x float> %a0, %a1
@@ -302,17 +302,17 @@ define <4 x float> @test_4f32_fnmadd(<4 x float> %a0, <4 x float> %a1, <4 x floa
define <8 x float> @test_8f32_fnmadd(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) {
; FMA-LABEL: test_8f32_fnmadd:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_8f32_fnmadd:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfnmaddps %ymm2, %ymm1, %ymm0, %ymm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_8f32_fnmadd:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0
; AVX512-NEXT: retq
%x = fmul <8 x float> %a0, %a1
@@ -322,17 +322,17 @@ define <8 x float> @test_8f32_fnmadd(<8 x float> %a0, <8 x float> %a1, <8 x floa
define double @test_f64_fnmadd(double %a0, double %a1, double %a2) {
; FMA-LABEL: test_f64_fnmadd:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfnmadd213sd %xmm2, %xmm1, %xmm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_f64_fnmadd:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfnmaddsd %xmm2, %xmm1, %xmm0, %xmm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_f64_fnmadd:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfnmadd213sd %xmm2, %xmm1, %xmm0
; AVX512-NEXT: retq
%x = fmul double %a0, %a1
@@ -342,17 +342,17 @@ define double @test_f64_fnmadd(double %a0, double %a1, double %a2) {
define <2 x double> @test_2f64_fnmadd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) {
; FMA-LABEL: test_2f64_fnmadd:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfnmadd213pd %xmm2, %xmm1, %xmm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_2f64_fnmadd:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfnmaddpd %xmm2, %xmm1, %xmm0, %xmm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_2f64_fnmadd:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfnmadd213pd %xmm2, %xmm1, %xmm0
; AVX512-NEXT: retq
%x = fmul <2 x double> %a0, %a1
@@ -362,17 +362,17 @@ define <2 x double> @test_2f64_fnmadd(<2 x double> %a0, <2 x double> %a1, <2 x d
define <4 x double> @test_4f64_fnmadd(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) {
; FMA-LABEL: test_4f64_fnmadd:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfnmadd213pd %ymm2, %ymm1, %ymm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_4f64_fnmadd:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfnmaddpd %ymm2, %ymm1, %ymm0, %ymm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_4f64_fnmadd:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfnmadd213pd %ymm2, %ymm1, %ymm0
; AVX512-NEXT: retq
%x = fmul <4 x double> %a0, %a1
@@ -386,17 +386,17 @@ define <4 x double> @test_4f64_fnmadd(<4 x double> %a0, <4 x double> %a1, <4 x d
define float @test_f32_fnmsub(float %a0, float %a1, float %a2) {
; FMA-LABEL: test_f32_fnmsub:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfnmsub213ss %xmm2, %xmm1, %xmm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_f32_fnmsub:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfnmsubss %xmm2, %xmm1, %xmm0, %xmm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_f32_fnmsub:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfnmsub213ss %xmm2, %xmm1, %xmm0
; AVX512-NEXT: retq
%x = fmul float %a0, %a1
@@ -407,17 +407,17 @@ define float @test_f32_fnmsub(float %a0, float %a1, float %a2) {
define <4 x float> @test_4f32_fnmsub(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) {
; FMA-LABEL: test_4f32_fnmsub:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfnmsub213ps %xmm2, %xmm1, %xmm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_4f32_fnmsub:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfnmsubps %xmm2, %xmm1, %xmm0, %xmm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_4f32_fnmsub:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfnmsub213ps %xmm2, %xmm1, %xmm0
; AVX512-NEXT: retq
%x = fmul <4 x float> %a0, %a1
@@ -428,17 +428,17 @@ define <4 x float> @test_4f32_fnmsub(<4 x float> %a0, <4 x float> %a1, <4 x floa
define <8 x float> @test_8f32_fnmsub(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) {
; FMA-LABEL: test_8f32_fnmsub:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfnmsub213ps %ymm2, %ymm1, %ymm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_8f32_fnmsub:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfnmsubps %ymm2, %ymm1, %ymm0, %ymm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_8f32_fnmsub:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfnmsub213ps %ymm2, %ymm1, %ymm0
; AVX512-NEXT: retq
%x = fmul <8 x float> %a0, %a1
@@ -449,17 +449,17 @@ define <8 x float> @test_8f32_fnmsub(<8 x float> %a0, <8 x float> %a1, <8 x floa
define double @test_f64_fnmsub(double %a0, double %a1, double %a2) {
; FMA-LABEL: test_f64_fnmsub:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfnmsub213sd %xmm2, %xmm1, %xmm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_f64_fnmsub:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfnmsubsd %xmm2, %xmm1, %xmm0, %xmm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_f64_fnmsub:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfnmsub213sd %xmm2, %xmm1, %xmm0
; AVX512-NEXT: retq
%x = fmul double %a0, %a1
@@ -470,17 +470,17 @@ define double @test_f64_fnmsub(double %a0, double %a1, double %a2) {
define <2 x double> @test_2f64_fnmsub(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) {
; FMA-LABEL: test_2f64_fnmsub:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfnmsub213pd %xmm2, %xmm1, %xmm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_2f64_fnmsub:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfnmsubpd %xmm2, %xmm1, %xmm0, %xmm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_2f64_fnmsub:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfnmsub213pd %xmm2, %xmm1, %xmm0
; AVX512-NEXT: retq
%x = fmul <2 x double> %a0, %a1
@@ -491,17 +491,17 @@ define <2 x double> @test_2f64_fnmsub(<2 x double> %a0, <2 x double> %a1, <2 x d
define <4 x double> @test_4f64_fnmsub(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) {
; FMA-LABEL: test_4f64_fnmsub:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfnmsub213pd %ymm2, %ymm1, %ymm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_4f64_fnmsub:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfnmsubpd %ymm2, %ymm1, %ymm0, %ymm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_4f64_fnmsub:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfnmsub213pd %ymm2, %ymm1, %ymm0
; AVX512-NEXT: retq
%x = fmul <4 x double> %a0, %a1
@@ -516,17 +516,17 @@ define <4 x double> @test_4f64_fnmsub(<4 x double> %a0, <4 x double> %a1, <4 x d
define <4 x float> @test_4f32_fmadd_load(<4 x float>* %a0, <4 x float> %a1, <4 x float> %a2) {
; FMA-LABEL: test_4f32_fmadd_load:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfmadd132ps (%rdi), %xmm1, %xmm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_4f32_fmadd_load:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfmaddps %xmm1, (%rdi), %xmm0, %xmm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_4f32_fmadd_load:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfmadd132ps (%rdi), %xmm1, %xmm0
; AVX512-NEXT: retq
%x = load <4 x float>, <4 x float>* %a0
@@ -537,17 +537,17 @@ define <4 x float> @test_4f32_fmadd_load(<4 x float>* %a0, <4 x float> %a1, <4 x
define <2 x double> @test_2f64_fmsub_load(<2 x double>* %a0, <2 x double> %a1, <2 x double> %a2) {
; FMA-LABEL: test_2f64_fmsub_load:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfmsub132pd (%rdi), %xmm1, %xmm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_2f64_fmsub_load:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfmsubpd %xmm1, (%rdi), %xmm0, %xmm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_2f64_fmsub_load:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfmsub132pd (%rdi), %xmm1, %xmm0
; AVX512-NEXT: retq
%x = load <2 x double>, <2 x double>* %a0
@@ -562,35 +562,35 @@ define <2 x double> @test_2f64_fmsub_load(<2 x double>* %a0, <2 x double> %a1, <
define <4 x float> @test_v4f32_mul_add_x_one_y(<4 x float> %x, <4 x float> %y) {
; FMA-INFS-LABEL: test_v4f32_mul_add_x_one_y:
-; FMA-INFS: # BB#0:
+; FMA-INFS: # %bb.0:
; FMA-INFS-NEXT: vaddps {{.*}}(%rip), %xmm0, %xmm0
; FMA-INFS-NEXT: vmulps %xmm1, %xmm0, %xmm0
; FMA-INFS-NEXT: retq
;
; FMA4-INFS-LABEL: test_v4f32_mul_add_x_one_y:
-; FMA4-INFS: # BB#0:
+; FMA4-INFS: # %bb.0:
; FMA4-INFS-NEXT: vaddps {{.*}}(%rip), %xmm0, %xmm0
; FMA4-INFS-NEXT: vmulps %xmm1, %xmm0, %xmm0
; FMA4-INFS-NEXT: retq
;
; AVX512-INFS-LABEL: test_v4f32_mul_add_x_one_y:
-; AVX512-INFS: # BB#0:
+; AVX512-INFS: # %bb.0:
; AVX512-INFS-NEXT: vaddps {{.*}}(%rip){1to4}, %xmm0, %xmm0
; AVX512-INFS-NEXT: vmulps %xmm1, %xmm0, %xmm0
; AVX512-INFS-NEXT: retq
;
; FMA-NOINFS-LABEL: test_v4f32_mul_add_x_one_y:
-; FMA-NOINFS: # BB#0:
+; FMA-NOINFS: # %bb.0:
; FMA-NOINFS-NEXT: vfmadd213ps %xmm1, %xmm1, %xmm0
; FMA-NOINFS-NEXT: retq
;
; FMA4-NOINFS-LABEL: test_v4f32_mul_add_x_one_y:
-; FMA4-NOINFS: # BB#0:
+; FMA4-NOINFS: # %bb.0:
; FMA4-NOINFS-NEXT: vfmaddps %xmm1, %xmm1, %xmm0, %xmm0
; FMA4-NOINFS-NEXT: retq
;
; AVX512-NOINFS-LABEL: test_v4f32_mul_add_x_one_y:
-; AVX512-NOINFS: # BB#0:
+; AVX512-NOINFS: # %bb.0:
; AVX512-NOINFS-NEXT: vfmadd213ps %xmm1, %xmm1, %xmm0
; AVX512-NOINFS-NEXT: retq
%a = fadd <4 x float> %x, <float 1.0, float 1.0, float 1.0, float 1.0>
@@ -600,35 +600,35 @@ define <4 x float> @test_v4f32_mul_add_x_one_y(<4 x float> %x, <4 x float> %y) {
define <4 x float> @test_v4f32_mul_y_add_x_one(<4 x float> %x, <4 x float> %y) {
; FMA-INFS-LABEL: test_v4f32_mul_y_add_x_one:
-; FMA-INFS: # BB#0:
+; FMA-INFS: # %bb.0:
; FMA-INFS-NEXT: vaddps {{.*}}(%rip), %xmm0, %xmm0
; FMA-INFS-NEXT: vmulps %xmm0, %xmm1, %xmm0
; FMA-INFS-NEXT: retq
;
; FMA4-INFS-LABEL: test_v4f32_mul_y_add_x_one:
-; FMA4-INFS: # BB#0:
+; FMA4-INFS: # %bb.0:
; FMA4-INFS-NEXT: vaddps {{.*}}(%rip), %xmm0, %xmm0
; FMA4-INFS-NEXT: vmulps %xmm0, %xmm1, %xmm0
; FMA4-INFS-NEXT: retq
;
; AVX512-INFS-LABEL: test_v4f32_mul_y_add_x_one:
-; AVX512-INFS: # BB#0:
+; AVX512-INFS: # %bb.0:
; AVX512-INFS-NEXT: vaddps {{.*}}(%rip){1to4}, %xmm0, %xmm0
; AVX512-INFS-NEXT: vmulps %xmm0, %xmm1, %xmm0
; AVX512-INFS-NEXT: retq
;
; FMA-NOINFS-LABEL: test_v4f32_mul_y_add_x_one:
-; FMA-NOINFS: # BB#0:
+; FMA-NOINFS: # %bb.0:
; FMA-NOINFS-NEXT: vfmadd213ps %xmm1, %xmm1, %xmm0
; FMA-NOINFS-NEXT: retq
;
; FMA4-NOINFS-LABEL: test_v4f32_mul_y_add_x_one:
-; FMA4-NOINFS: # BB#0:
+; FMA4-NOINFS: # %bb.0:
; FMA4-NOINFS-NEXT: vfmaddps %xmm1, %xmm1, %xmm0, %xmm0
; FMA4-NOINFS-NEXT: retq
;
; AVX512-NOINFS-LABEL: test_v4f32_mul_y_add_x_one:
-; AVX512-NOINFS: # BB#0:
+; AVX512-NOINFS: # %bb.0:
; AVX512-NOINFS-NEXT: vfmadd213ps %xmm1, %xmm1, %xmm0
; AVX512-NOINFS-NEXT: retq
%a = fadd <4 x float> %x, <float 1.0, float 1.0, float 1.0, float 1.0>
@@ -638,35 +638,35 @@ define <4 x float> @test_v4f32_mul_y_add_x_one(<4 x float> %x, <4 x float> %y) {
define <4 x float> @test_v4f32_mul_add_x_negone_y(<4 x float> %x, <4 x float> %y) {
; FMA-INFS-LABEL: test_v4f32_mul_add_x_negone_y:
-; FMA-INFS: # BB#0:
+; FMA-INFS: # %bb.0:
; FMA-INFS-NEXT: vaddps {{.*}}(%rip), %xmm0, %xmm0
; FMA-INFS-NEXT: vmulps %xmm1, %xmm0, %xmm0
; FMA-INFS-NEXT: retq
;
; FMA4-INFS-LABEL: test_v4f32_mul_add_x_negone_y:
-; FMA4-INFS: # BB#0:
+; FMA4-INFS: # %bb.0:
; FMA4-INFS-NEXT: vaddps {{.*}}(%rip), %xmm0, %xmm0
; FMA4-INFS-NEXT: vmulps %xmm1, %xmm0, %xmm0
; FMA4-INFS-NEXT: retq
;
; AVX512-INFS-LABEL: test_v4f32_mul_add_x_negone_y:
-; AVX512-INFS: # BB#0:
+; AVX512-INFS: # %bb.0:
; AVX512-INFS-NEXT: vaddps {{.*}}(%rip){1to4}, %xmm0, %xmm0
; AVX512-INFS-NEXT: vmulps %xmm1, %xmm0, %xmm0
; AVX512-INFS-NEXT: retq
;
; FMA-NOINFS-LABEL: test_v4f32_mul_add_x_negone_y:
-; FMA-NOINFS: # BB#0:
+; FMA-NOINFS: # %bb.0:
; FMA-NOINFS-NEXT: vfmsub213ps %xmm1, %xmm1, %xmm0
; FMA-NOINFS-NEXT: retq
;
; FMA4-NOINFS-LABEL: test_v4f32_mul_add_x_negone_y:
-; FMA4-NOINFS: # BB#0:
+; FMA4-NOINFS: # %bb.0:
; FMA4-NOINFS-NEXT: vfmsubps %xmm1, %xmm1, %xmm0, %xmm0
; FMA4-NOINFS-NEXT: retq
;
; AVX512-NOINFS-LABEL: test_v4f32_mul_add_x_negone_y:
-; AVX512-NOINFS: # BB#0:
+; AVX512-NOINFS: # %bb.0:
; AVX512-NOINFS-NEXT: vfmsub213ps %xmm1, %xmm1, %xmm0
; AVX512-NOINFS-NEXT: retq
%a = fadd <4 x float> %x, <float -1.0, float -1.0, float -1.0, float -1.0>
@@ -676,35 +676,35 @@ define <4 x float> @test_v4f32_mul_add_x_negone_y(<4 x float> %x, <4 x float> %y
define <4 x float> @test_v4f32_mul_y_add_x_negone(<4 x float> %x, <4 x float> %y) {
; FMA-INFS-LABEL: test_v4f32_mul_y_add_x_negone:
-; FMA-INFS: # BB#0:
+; FMA-INFS: # %bb.0:
; FMA-INFS-NEXT: vaddps {{.*}}(%rip), %xmm0, %xmm0
; FMA-INFS-NEXT: vmulps %xmm0, %xmm1, %xmm0
; FMA-INFS-NEXT: retq
;
; FMA4-INFS-LABEL: test_v4f32_mul_y_add_x_negone:
-; FMA4-INFS: # BB#0:
+; FMA4-INFS: # %bb.0:
; FMA4-INFS-NEXT: vaddps {{.*}}(%rip), %xmm0, %xmm0
; FMA4-INFS-NEXT: vmulps %xmm0, %xmm1, %xmm0
; FMA4-INFS-NEXT: retq
;
; AVX512-INFS-LABEL: test_v4f32_mul_y_add_x_negone:
-; AVX512-INFS: # BB#0:
+; AVX512-INFS: # %bb.0:
; AVX512-INFS-NEXT: vaddps {{.*}}(%rip){1to4}, %xmm0, %xmm0
; AVX512-INFS-NEXT: vmulps %xmm0, %xmm1, %xmm0
; AVX512-INFS-NEXT: retq
;
; FMA-NOINFS-LABEL: test_v4f32_mul_y_add_x_negone:
-; FMA-NOINFS: # BB#0:
+; FMA-NOINFS: # %bb.0:
; FMA-NOINFS-NEXT: vfmsub213ps %xmm1, %xmm1, %xmm0
; FMA-NOINFS-NEXT: retq
;
; FMA4-NOINFS-LABEL: test_v4f32_mul_y_add_x_negone:
-; FMA4-NOINFS: # BB#0:
+; FMA4-NOINFS: # %bb.0:
; FMA4-NOINFS-NEXT: vfmsubps %xmm1, %xmm1, %xmm0, %xmm0
; FMA4-NOINFS-NEXT: retq
;
; AVX512-NOINFS-LABEL: test_v4f32_mul_y_add_x_negone:
-; AVX512-NOINFS: # BB#0:
+; AVX512-NOINFS: # %bb.0:
; AVX512-NOINFS-NEXT: vfmsub213ps %xmm1, %xmm1, %xmm0
; AVX512-NOINFS-NEXT: retq
%a = fadd <4 x float> %x, <float -1.0, float -1.0, float -1.0, float -1.0>
@@ -714,38 +714,38 @@ define <4 x float> @test_v4f32_mul_y_add_x_negone(<4 x float> %x, <4 x float> %y
define <4 x float> @test_v4f32_mul_sub_one_x_y(<4 x float> %x, <4 x float> %y) {
; FMA-INFS-LABEL: test_v4f32_mul_sub_one_x_y:
-; FMA-INFS: # BB#0:
+; FMA-INFS: # %bb.0:
; FMA-INFS-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
; FMA-INFS-NEXT: vsubps %xmm0, %xmm2, %xmm0
; FMA-INFS-NEXT: vmulps %xmm1, %xmm0, %xmm0
; FMA-INFS-NEXT: retq
;
; FMA4-INFS-LABEL: test_v4f32_mul_sub_one_x_y:
-; FMA4-INFS: # BB#0:
+; FMA4-INFS: # %bb.0:
; FMA4-INFS-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
; FMA4-INFS-NEXT: vsubps %xmm0, %xmm2, %xmm0
; FMA4-INFS-NEXT: vmulps %xmm1, %xmm0, %xmm0
; FMA4-INFS-NEXT: retq
;
; AVX512-INFS-LABEL: test_v4f32_mul_sub_one_x_y:
-; AVX512-INFS: # BB#0:
+; AVX512-INFS: # %bb.0:
; AVX512-INFS-NEXT: vbroadcastss {{.*#+}} xmm2 = [1,1,1,1]
; AVX512-INFS-NEXT: vsubps %xmm0, %xmm2, %xmm0
; AVX512-INFS-NEXT: vmulps %xmm1, %xmm0, %xmm0
; AVX512-INFS-NEXT: retq
;
; FMA-NOINFS-LABEL: test_v4f32_mul_sub_one_x_y:
-; FMA-NOINFS: # BB#0:
+; FMA-NOINFS: # %bb.0:
; FMA-NOINFS-NEXT: vfnmadd213ps %xmm1, %xmm1, %xmm0
; FMA-NOINFS-NEXT: retq
;
; FMA4-NOINFS-LABEL: test_v4f32_mul_sub_one_x_y:
-; FMA4-NOINFS: # BB#0:
+; FMA4-NOINFS: # %bb.0:
; FMA4-NOINFS-NEXT: vfnmaddps %xmm1, %xmm1, %xmm0, %xmm0
; FMA4-NOINFS-NEXT: retq
;
; AVX512-NOINFS-LABEL: test_v4f32_mul_sub_one_x_y:
-; AVX512-NOINFS: # BB#0:
+; AVX512-NOINFS: # %bb.0:
; AVX512-NOINFS-NEXT: vfnmadd213ps %xmm1, %xmm1, %xmm0
; AVX512-NOINFS-NEXT: retq
%s = fsub <4 x float> <float 1.0, float 1.0, float 1.0, float 1.0>, %x
@@ -755,38 +755,38 @@ define <4 x float> @test_v4f32_mul_sub_one_x_y(<4 x float> %x, <4 x float> %y) {
define <4 x float> @test_v4f32_mul_y_sub_one_x(<4 x float> %x, <4 x float> %y) {
; FMA-INFS-LABEL: test_v4f32_mul_y_sub_one_x:
-; FMA-INFS: # BB#0:
+; FMA-INFS: # %bb.0:
; FMA-INFS-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
; FMA-INFS-NEXT: vsubps %xmm0, %xmm2, %xmm0
; FMA-INFS-NEXT: vmulps %xmm0, %xmm1, %xmm0
; FMA-INFS-NEXT: retq
;
; FMA4-INFS-LABEL: test_v4f32_mul_y_sub_one_x:
-; FMA4-INFS: # BB#0:
+; FMA4-INFS: # %bb.0:
; FMA4-INFS-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
; FMA4-INFS-NEXT: vsubps %xmm0, %xmm2, %xmm0
; FMA4-INFS-NEXT: vmulps %xmm0, %xmm1, %xmm0
; FMA4-INFS-NEXT: retq
;
; AVX512-INFS-LABEL: test_v4f32_mul_y_sub_one_x:
-; AVX512-INFS: # BB#0:
+; AVX512-INFS: # %bb.0:
; AVX512-INFS-NEXT: vbroadcastss {{.*#+}} xmm2 = [1,1,1,1]
; AVX512-INFS-NEXT: vsubps %xmm0, %xmm2, %xmm0
; AVX512-INFS-NEXT: vmulps %xmm0, %xmm1, %xmm0
; AVX512-INFS-NEXT: retq
;
; FMA-NOINFS-LABEL: test_v4f32_mul_y_sub_one_x:
-; FMA-NOINFS: # BB#0:
+; FMA-NOINFS: # %bb.0:
; FMA-NOINFS-NEXT: vfnmadd213ps %xmm1, %xmm1, %xmm0
; FMA-NOINFS-NEXT: retq
;
; FMA4-NOINFS-LABEL: test_v4f32_mul_y_sub_one_x:
-; FMA4-NOINFS: # BB#0:
+; FMA4-NOINFS: # %bb.0:
; FMA4-NOINFS-NEXT: vfnmaddps %xmm1, %xmm1, %xmm0, %xmm0
; FMA4-NOINFS-NEXT: retq
;
; AVX512-NOINFS-LABEL: test_v4f32_mul_y_sub_one_x:
-; AVX512-NOINFS: # BB#0:
+; AVX512-NOINFS: # %bb.0:
; AVX512-NOINFS-NEXT: vfnmadd213ps %xmm1, %xmm1, %xmm0
; AVX512-NOINFS-NEXT: retq
%s = fsub <4 x float> <float 1.0, float 1.0, float 1.0, float 1.0>, %x
@@ -796,38 +796,38 @@ define <4 x float> @test_v4f32_mul_y_sub_one_x(<4 x float> %x, <4 x float> %y) {
define <4 x float> @test_v4f32_mul_sub_negone_x_y(<4 x float> %x, <4 x float> %y) {
; FMA-INFS-LABEL: test_v4f32_mul_sub_negone_x_y:
-; FMA-INFS: # BB#0:
+; FMA-INFS: # %bb.0:
; FMA-INFS-NEXT: vmovaps {{.*#+}} xmm2 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
; FMA-INFS-NEXT: vsubps %xmm0, %xmm2, %xmm0
; FMA-INFS-NEXT: vmulps %xmm1, %xmm0, %xmm0
; FMA-INFS-NEXT: retq
;
; FMA4-INFS-LABEL: test_v4f32_mul_sub_negone_x_y:
-; FMA4-INFS: # BB#0:
+; FMA4-INFS: # %bb.0:
; FMA4-INFS-NEXT: vmovaps {{.*#+}} xmm2 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
; FMA4-INFS-NEXT: vsubps %xmm0, %xmm2, %xmm0
; FMA4-INFS-NEXT: vmulps %xmm1, %xmm0, %xmm0
; FMA4-INFS-NEXT: retq
;
; AVX512-INFS-LABEL: test_v4f32_mul_sub_negone_x_y:
-; AVX512-INFS: # BB#0:
+; AVX512-INFS: # %bb.0:
; AVX512-INFS-NEXT: vbroadcastss {{.*#+}} xmm2 = [-1,-1,-1,-1]
; AVX512-INFS-NEXT: vsubps %xmm0, %xmm2, %xmm0
; AVX512-INFS-NEXT: vmulps %xmm1, %xmm0, %xmm0
; AVX512-INFS-NEXT: retq
;
; FMA-NOINFS-LABEL: test_v4f32_mul_sub_negone_x_y:
-; FMA-NOINFS: # BB#0:
+; FMA-NOINFS: # %bb.0:
; FMA-NOINFS-NEXT: vfnmsub213ps %xmm1, %xmm1, %xmm0
; FMA-NOINFS-NEXT: retq
;
; FMA4-NOINFS-LABEL: test_v4f32_mul_sub_negone_x_y:
-; FMA4-NOINFS: # BB#0:
+; FMA4-NOINFS: # %bb.0:
; FMA4-NOINFS-NEXT: vfnmsubps %xmm1, %xmm1, %xmm0, %xmm0
; FMA4-NOINFS-NEXT: retq
;
; AVX512-NOINFS-LABEL: test_v4f32_mul_sub_negone_x_y:
-; AVX512-NOINFS: # BB#0:
+; AVX512-NOINFS: # %bb.0:
; AVX512-NOINFS-NEXT: vfnmsub213ps %xmm1, %xmm1, %xmm0
; AVX512-NOINFS-NEXT: retq
%s = fsub <4 x float> <float -1.0, float -1.0, float -1.0, float -1.0>, %x
@@ -837,38 +837,38 @@ define <4 x float> @test_v4f32_mul_sub_negone_x_y(<4 x float> %x, <4 x float> %y
define <4 x float> @test_v4f32_mul_y_sub_negone_x(<4 x float> %x, <4 x float> %y) {
; FMA-INFS-LABEL: test_v4f32_mul_y_sub_negone_x:
-; FMA-INFS: # BB#0:
+; FMA-INFS: # %bb.0:
; FMA-INFS-NEXT: vmovaps {{.*#+}} xmm2 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
; FMA-INFS-NEXT: vsubps %xmm0, %xmm2, %xmm0
; FMA-INFS-NEXT: vmulps %xmm0, %xmm1, %xmm0
; FMA-INFS-NEXT: retq
;
; FMA4-INFS-LABEL: test_v4f32_mul_y_sub_negone_x:
-; FMA4-INFS: # BB#0:
+; FMA4-INFS: # %bb.0:
; FMA4-INFS-NEXT: vmovaps {{.*#+}} xmm2 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
; FMA4-INFS-NEXT: vsubps %xmm0, %xmm2, %xmm0
; FMA4-INFS-NEXT: vmulps %xmm0, %xmm1, %xmm0
; FMA4-INFS-NEXT: retq
;
; AVX512-INFS-LABEL: test_v4f32_mul_y_sub_negone_x:
-; AVX512-INFS: # BB#0:
+; AVX512-INFS: # %bb.0:
; AVX512-INFS-NEXT: vbroadcastss {{.*#+}} xmm2 = [-1,-1,-1,-1]
; AVX512-INFS-NEXT: vsubps %xmm0, %xmm2, %xmm0
; AVX512-INFS-NEXT: vmulps %xmm0, %xmm1, %xmm0
; AVX512-INFS-NEXT: retq
;
; FMA-NOINFS-LABEL: test_v4f32_mul_y_sub_negone_x:
-; FMA-NOINFS: # BB#0:
+; FMA-NOINFS: # %bb.0:
; FMA-NOINFS-NEXT: vfnmsub213ps %xmm1, %xmm1, %xmm0
; FMA-NOINFS-NEXT: retq
;
; FMA4-NOINFS-LABEL: test_v4f32_mul_y_sub_negone_x:
-; FMA4-NOINFS: # BB#0:
+; FMA4-NOINFS: # %bb.0:
; FMA4-NOINFS-NEXT: vfnmsubps %xmm1, %xmm1, %xmm0, %xmm0
; FMA4-NOINFS-NEXT: retq
;
; AVX512-NOINFS-LABEL: test_v4f32_mul_y_sub_negone_x:
-; AVX512-NOINFS: # BB#0:
+; AVX512-NOINFS: # %bb.0:
; AVX512-NOINFS-NEXT: vfnmsub213ps %xmm1, %xmm1, %xmm0
; AVX512-NOINFS-NEXT: retq
%s = fsub <4 x float> <float -1.0, float -1.0, float -1.0, float -1.0>, %x
@@ -878,35 +878,35 @@ define <4 x float> @test_v4f32_mul_y_sub_negone_x(<4 x float> %x, <4 x float> %y
define <4 x float> @test_v4f32_mul_sub_x_one_y(<4 x float> %x, <4 x float> %y) {
; FMA-INFS-LABEL: test_v4f32_mul_sub_x_one_y:
-; FMA-INFS: # BB#0:
+; FMA-INFS: # %bb.0:
; FMA-INFS-NEXT: vsubps {{.*}}(%rip), %xmm0, %xmm0
; FMA-INFS-NEXT: vmulps %xmm1, %xmm0, %xmm0
; FMA-INFS-NEXT: retq
;
; FMA4-INFS-LABEL: test_v4f32_mul_sub_x_one_y:
-; FMA4-INFS: # BB#0:
+; FMA4-INFS: # %bb.0:
; FMA4-INFS-NEXT: vsubps {{.*}}(%rip), %xmm0, %xmm0
; FMA4-INFS-NEXT: vmulps %xmm1, %xmm0, %xmm0
; FMA4-INFS-NEXT: retq
;
; AVX512-INFS-LABEL: test_v4f32_mul_sub_x_one_y:
-; AVX512-INFS: # BB#0:
+; AVX512-INFS: # %bb.0:
; AVX512-INFS-NEXT: vsubps {{.*}}(%rip){1to4}, %xmm0, %xmm0
; AVX512-INFS-NEXT: vmulps %xmm1, %xmm0, %xmm0
; AVX512-INFS-NEXT: retq
;
; FMA-NOINFS-LABEL: test_v4f32_mul_sub_x_one_y:
-; FMA-NOINFS: # BB#0:
+; FMA-NOINFS: # %bb.0:
; FMA-NOINFS-NEXT: vfmsub213ps %xmm1, %xmm1, %xmm0
; FMA-NOINFS-NEXT: retq
;
; FMA4-NOINFS-LABEL: test_v4f32_mul_sub_x_one_y:
-; FMA4-NOINFS: # BB#0:
+; FMA4-NOINFS: # %bb.0:
; FMA4-NOINFS-NEXT: vfmsubps %xmm1, %xmm1, %xmm0, %xmm0
; FMA4-NOINFS-NEXT: retq
;
; AVX512-NOINFS-LABEL: test_v4f32_mul_sub_x_one_y:
-; AVX512-NOINFS: # BB#0:
+; AVX512-NOINFS: # %bb.0:
; AVX512-NOINFS-NEXT: vfmsub213ps %xmm1, %xmm1, %xmm0
; AVX512-NOINFS-NEXT: retq
%s = fsub <4 x float> %x, <float 1.0, float 1.0, float 1.0, float 1.0>
@@ -916,35 +916,35 @@ define <4 x float> @test_v4f32_mul_sub_x_one_y(<4 x float> %x, <4 x float> %y) {
define <4 x float> @test_v4f32_mul_y_sub_x_one(<4 x float> %x, <4 x float> %y) {
; FMA-INFS-LABEL: test_v4f32_mul_y_sub_x_one:
-; FMA-INFS: # BB#0:
+; FMA-INFS: # %bb.0:
; FMA-INFS-NEXT: vsubps {{.*}}(%rip), %xmm0, %xmm0
; FMA-INFS-NEXT: vmulps %xmm0, %xmm1, %xmm0
; FMA-INFS-NEXT: retq
;
; FMA4-INFS-LABEL: test_v4f32_mul_y_sub_x_one:
-; FMA4-INFS: # BB#0:
+; FMA4-INFS: # %bb.0:
; FMA4-INFS-NEXT: vsubps {{.*}}(%rip), %xmm0, %xmm0
; FMA4-INFS-NEXT: vmulps %xmm0, %xmm1, %xmm0
; FMA4-INFS-NEXT: retq
;
; AVX512-INFS-LABEL: test_v4f32_mul_y_sub_x_one:
-; AVX512-INFS: # BB#0:
+; AVX512-INFS: # %bb.0:
; AVX512-INFS-NEXT: vsubps {{.*}}(%rip){1to4}, %xmm0, %xmm0
; AVX512-INFS-NEXT: vmulps %xmm0, %xmm1, %xmm0
; AVX512-INFS-NEXT: retq
;
; FMA-NOINFS-LABEL: test_v4f32_mul_y_sub_x_one:
-; FMA-NOINFS: # BB#0:
+; FMA-NOINFS: # %bb.0:
; FMA-NOINFS-NEXT: vfmsub213ps %xmm1, %xmm1, %xmm0
; FMA-NOINFS-NEXT: retq
;
; FMA4-NOINFS-LABEL: test_v4f32_mul_y_sub_x_one:
-; FMA4-NOINFS: # BB#0:
+; FMA4-NOINFS: # %bb.0:
; FMA4-NOINFS-NEXT: vfmsubps %xmm1, %xmm1, %xmm0, %xmm0
; FMA4-NOINFS-NEXT: retq
;
; AVX512-NOINFS-LABEL: test_v4f32_mul_y_sub_x_one:
-; AVX512-NOINFS: # BB#0:
+; AVX512-NOINFS: # %bb.0:
; AVX512-NOINFS-NEXT: vfmsub213ps %xmm1, %xmm1, %xmm0
; AVX512-NOINFS-NEXT: retq
%s = fsub <4 x float> %x, <float 1.0, float 1.0, float 1.0, float 1.0>
@@ -954,35 +954,35 @@ define <4 x float> @test_v4f32_mul_y_sub_x_one(<4 x float> %x, <4 x float> %y) {
define <4 x float> @test_v4f32_mul_sub_x_negone_y(<4 x float> %x, <4 x float> %y) {
; FMA-INFS-LABEL: test_v4f32_mul_sub_x_negone_y:
-; FMA-INFS: # BB#0:
+; FMA-INFS: # %bb.0:
; FMA-INFS-NEXT: vsubps {{.*}}(%rip), %xmm0, %xmm0
; FMA-INFS-NEXT: vmulps %xmm1, %xmm0, %xmm0
; FMA-INFS-NEXT: retq
;
; FMA4-INFS-LABEL: test_v4f32_mul_sub_x_negone_y:
-; FMA4-INFS: # BB#0:
+; FMA4-INFS: # %bb.0:
; FMA4-INFS-NEXT: vsubps {{.*}}(%rip), %xmm0, %xmm0
; FMA4-INFS-NEXT: vmulps %xmm1, %xmm0, %xmm0
; FMA4-INFS-NEXT: retq
;
; AVX512-INFS-LABEL: test_v4f32_mul_sub_x_negone_y:
-; AVX512-INFS: # BB#0:
+; AVX512-INFS: # %bb.0:
; AVX512-INFS-NEXT: vsubps {{.*}}(%rip){1to4}, %xmm0, %xmm0
; AVX512-INFS-NEXT: vmulps %xmm1, %xmm0, %xmm0
; AVX512-INFS-NEXT: retq
;
; FMA-NOINFS-LABEL: test_v4f32_mul_sub_x_negone_y:
-; FMA-NOINFS: # BB#0:
+; FMA-NOINFS: # %bb.0:
; FMA-NOINFS-NEXT: vfmadd213ps %xmm1, %xmm1, %xmm0
; FMA-NOINFS-NEXT: retq
;
; FMA4-NOINFS-LABEL: test_v4f32_mul_sub_x_negone_y:
-; FMA4-NOINFS: # BB#0:
+; FMA4-NOINFS: # %bb.0:
; FMA4-NOINFS-NEXT: vfmaddps %xmm1, %xmm1, %xmm0, %xmm0
; FMA4-NOINFS-NEXT: retq
;
; AVX512-NOINFS-LABEL: test_v4f32_mul_sub_x_negone_y:
-; AVX512-NOINFS: # BB#0:
+; AVX512-NOINFS: # %bb.0:
; AVX512-NOINFS-NEXT: vfmadd213ps %xmm1, %xmm1, %xmm0
; AVX512-NOINFS-NEXT: retq
%s = fsub <4 x float> %x, <float -1.0, float -1.0, float -1.0, float -1.0>
@@ -992,35 +992,35 @@ define <4 x float> @test_v4f32_mul_sub_x_negone_y(<4 x float> %x, <4 x float> %y
define <4 x float> @test_v4f32_mul_y_sub_x_negone(<4 x float> %x, <4 x float> %y) {
; FMA-INFS-LABEL: test_v4f32_mul_y_sub_x_negone:
-; FMA-INFS: # BB#0:
+; FMA-INFS: # %bb.0:
; FMA-INFS-NEXT: vsubps {{.*}}(%rip), %xmm0, %xmm0
; FMA-INFS-NEXT: vmulps %xmm0, %xmm1, %xmm0
; FMA-INFS-NEXT: retq
;
; FMA4-INFS-LABEL: test_v4f32_mul_y_sub_x_negone:
-; FMA4-INFS: # BB#0:
+; FMA4-INFS: # %bb.0:
; FMA4-INFS-NEXT: vsubps {{.*}}(%rip), %xmm0, %xmm0
; FMA4-INFS-NEXT: vmulps %xmm0, %xmm1, %xmm0
; FMA4-INFS-NEXT: retq
;
; AVX512-INFS-LABEL: test_v4f32_mul_y_sub_x_negone:
-; AVX512-INFS: # BB#0:
+; AVX512-INFS: # %bb.0:
; AVX512-INFS-NEXT: vsubps {{.*}}(%rip){1to4}, %xmm0, %xmm0
; AVX512-INFS-NEXT: vmulps %xmm0, %xmm1, %xmm0
; AVX512-INFS-NEXT: retq
;
; FMA-NOINFS-LABEL: test_v4f32_mul_y_sub_x_negone:
-; FMA-NOINFS: # BB#0:
+; FMA-NOINFS: # %bb.0:
; FMA-NOINFS-NEXT: vfmadd213ps %xmm1, %xmm1, %xmm0
; FMA-NOINFS-NEXT: retq
;
; FMA4-NOINFS-LABEL: test_v4f32_mul_y_sub_x_negone:
-; FMA4-NOINFS: # BB#0:
+; FMA4-NOINFS: # %bb.0:
; FMA4-NOINFS-NEXT: vfmaddps %xmm1, %xmm1, %xmm0, %xmm0
; FMA4-NOINFS-NEXT: retq
;
; AVX512-NOINFS-LABEL: test_v4f32_mul_y_sub_x_negone:
-; AVX512-NOINFS: # BB#0:
+; AVX512-NOINFS: # %bb.0:
; AVX512-NOINFS-NEXT: vfmadd213ps %xmm1, %xmm1, %xmm0
; AVX512-NOINFS-NEXT: retq
%s = fsub <4 x float> %x, <float -1.0, float -1.0, float -1.0, float -1.0>
@@ -1034,7 +1034,7 @@ define <4 x float> @test_v4f32_mul_y_sub_x_negone(<4 x float> %x, <4 x float> %y
define float @test_f32_interp(float %x, float %y, float %t) {
; FMA-INFS-LABEL: test_f32_interp:
-; FMA-INFS: # BB#0:
+; FMA-INFS: # %bb.0:
; FMA-INFS-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
; FMA-INFS-NEXT: vsubss %xmm2, %xmm3, %xmm3
; FMA-INFS-NEXT: vmulss %xmm3, %xmm1, %xmm1
@@ -1042,7 +1042,7 @@ define float @test_f32_interp(float %x, float %y, float %t) {
; FMA-INFS-NEXT: retq
;
; FMA4-INFS-LABEL: test_f32_interp:
-; FMA4-INFS: # BB#0:
+; FMA4-INFS: # %bb.0:
; FMA4-INFS-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
; FMA4-INFS-NEXT: vsubss %xmm2, %xmm3, %xmm3
; FMA4-INFS-NEXT: vmulss %xmm3, %xmm1, %xmm1
@@ -1050,7 +1050,7 @@ define float @test_f32_interp(float %x, float %y, float %t) {
; FMA4-INFS-NEXT: retq
;
; AVX512-INFS-LABEL: test_f32_interp:
-; AVX512-INFS: # BB#0:
+; AVX512-INFS: # %bb.0:
; AVX512-INFS-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
; AVX512-INFS-NEXT: vsubss %xmm2, %xmm3, %xmm3
; AVX512-INFS-NEXT: vmulss %xmm3, %xmm1, %xmm1
@@ -1058,19 +1058,19 @@ define float @test_f32_interp(float %x, float %y, float %t) {
; AVX512-INFS-NEXT: retq
;
; FMA-NOINFS-LABEL: test_f32_interp:
-; FMA-NOINFS: # BB#0:
+; FMA-NOINFS: # %bb.0:
; FMA-NOINFS-NEXT: vfnmadd213ss %xmm1, %xmm2, %xmm1
; FMA-NOINFS-NEXT: vfmadd213ss %xmm1, %xmm2, %xmm0
; FMA-NOINFS-NEXT: retq
;
; FMA4-NOINFS-LABEL: test_f32_interp:
-; FMA4-NOINFS: # BB#0:
+; FMA4-NOINFS: # %bb.0:
; FMA4-NOINFS-NEXT: vfnmaddss %xmm1, %xmm1, %xmm2, %xmm1
; FMA4-NOINFS-NEXT: vfmaddss %xmm1, %xmm2, %xmm0, %xmm0
; FMA4-NOINFS-NEXT: retq
;
; AVX512-NOINFS-LABEL: test_f32_interp:
-; AVX512-NOINFS: # BB#0:
+; AVX512-NOINFS: # %bb.0:
; AVX512-NOINFS-NEXT: vfnmadd213ss %xmm1, %xmm2, %xmm1
; AVX512-NOINFS-NEXT: vfmadd213ss %xmm1, %xmm2, %xmm0
; AVX512-NOINFS-NEXT: retq
@@ -1083,7 +1083,7 @@ define float @test_f32_interp(float %x, float %y, float %t) {
define <4 x float> @test_v4f32_interp(<4 x float> %x, <4 x float> %y, <4 x float> %t) {
; FMA-INFS-LABEL: test_v4f32_interp:
-; FMA-INFS: # BB#0:
+; FMA-INFS: # %bb.0:
; FMA-INFS-NEXT: vmovaps {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
; FMA-INFS-NEXT: vsubps %xmm2, %xmm3, %xmm3
; FMA-INFS-NEXT: vmulps %xmm3, %xmm1, %xmm1
@@ -1091,7 +1091,7 @@ define <4 x float> @test_v4f32_interp(<4 x float> %x, <4 x float> %y, <4 x float
; FMA-INFS-NEXT: retq
;
; FMA4-INFS-LABEL: test_v4f32_interp:
-; FMA4-INFS: # BB#0:
+; FMA4-INFS: # %bb.0:
; FMA4-INFS-NEXT: vmovaps {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
; FMA4-INFS-NEXT: vsubps %xmm2, %xmm3, %xmm3
; FMA4-INFS-NEXT: vmulps %xmm3, %xmm1, %xmm1
@@ -1099,7 +1099,7 @@ define <4 x float> @test_v4f32_interp(<4 x float> %x, <4 x float> %y, <4 x float
; FMA4-INFS-NEXT: retq
;
; AVX512-INFS-LABEL: test_v4f32_interp:
-; AVX512-INFS: # BB#0:
+; AVX512-INFS: # %bb.0:
; AVX512-INFS-NEXT: vbroadcastss {{.*#+}} xmm3 = [1,1,1,1]
; AVX512-INFS-NEXT: vsubps %xmm2, %xmm3, %xmm3
; AVX512-INFS-NEXT: vmulps %xmm3, %xmm1, %xmm1
@@ -1107,19 +1107,19 @@ define <4 x float> @test_v4f32_interp(<4 x float> %x, <4 x float> %y, <4 x float
; AVX512-INFS-NEXT: retq
;
; FMA-NOINFS-LABEL: test_v4f32_interp:
-; FMA-NOINFS: # BB#0:
+; FMA-NOINFS: # %bb.0:
; FMA-NOINFS-NEXT: vfnmadd213ps %xmm1, %xmm2, %xmm1
; FMA-NOINFS-NEXT: vfmadd213ps %xmm1, %xmm2, %xmm0
; FMA-NOINFS-NEXT: retq
;
; FMA4-NOINFS-LABEL: test_v4f32_interp:
-; FMA4-NOINFS: # BB#0:
+; FMA4-NOINFS: # %bb.0:
; FMA4-NOINFS-NEXT: vfnmaddps %xmm1, %xmm1, %xmm2, %xmm1
; FMA4-NOINFS-NEXT: vfmaddps %xmm1, %xmm2, %xmm0, %xmm0
; FMA4-NOINFS-NEXT: retq
;
; AVX512-NOINFS-LABEL: test_v4f32_interp:
-; AVX512-NOINFS: # BB#0:
+; AVX512-NOINFS: # %bb.0:
; AVX512-NOINFS-NEXT: vfnmadd213ps %xmm1, %xmm2, %xmm1
; AVX512-NOINFS-NEXT: vfmadd213ps %xmm1, %xmm2, %xmm0
; AVX512-NOINFS-NEXT: retq
@@ -1132,7 +1132,7 @@ define <4 x float> @test_v4f32_interp(<4 x float> %x, <4 x float> %y, <4 x float
define <8 x float> @test_v8f32_interp(<8 x float> %x, <8 x float> %y, <8 x float> %t) {
; FMA-INFS-LABEL: test_v8f32_interp:
-; FMA-INFS: # BB#0:
+; FMA-INFS: # %bb.0:
; FMA-INFS-NEXT: vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
; FMA-INFS-NEXT: vsubps %ymm2, %ymm3, %ymm3
; FMA-INFS-NEXT: vmulps %ymm3, %ymm1, %ymm1
@@ -1140,7 +1140,7 @@ define <8 x float> @test_v8f32_interp(<8 x float> %x, <8 x float> %y, <8 x float
; FMA-INFS-NEXT: retq
;
; FMA4-INFS-LABEL: test_v8f32_interp:
-; FMA4-INFS: # BB#0:
+; FMA4-INFS: # %bb.0:
; FMA4-INFS-NEXT: vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
; FMA4-INFS-NEXT: vsubps %ymm2, %ymm3, %ymm3
; FMA4-INFS-NEXT: vmulps %ymm3, %ymm1, %ymm1
@@ -1148,7 +1148,7 @@ define <8 x float> @test_v8f32_interp(<8 x float> %x, <8 x float> %y, <8 x float
; FMA4-INFS-NEXT: retq
;
; AVX512-INFS-LABEL: test_v8f32_interp:
-; AVX512-INFS: # BB#0:
+; AVX512-INFS: # %bb.0:
; AVX512-INFS-NEXT: vbroadcastss {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1]
; AVX512-INFS-NEXT: vsubps %ymm2, %ymm3, %ymm3
; AVX512-INFS-NEXT: vmulps %ymm3, %ymm1, %ymm1
@@ -1156,19 +1156,19 @@ define <8 x float> @test_v8f32_interp(<8 x float> %x, <8 x float> %y, <8 x float
; AVX512-INFS-NEXT: retq
;
; FMA-NOINFS-LABEL: test_v8f32_interp:
-; FMA-NOINFS: # BB#0:
+; FMA-NOINFS: # %bb.0:
; FMA-NOINFS-NEXT: vfnmadd213ps %ymm1, %ymm2, %ymm1
; FMA-NOINFS-NEXT: vfmadd213ps %ymm1, %ymm2, %ymm0
; FMA-NOINFS-NEXT: retq
;
; FMA4-NOINFS-LABEL: test_v8f32_interp:
-; FMA4-NOINFS: # BB#0:
+; FMA4-NOINFS: # %bb.0:
; FMA4-NOINFS-NEXT: vfnmaddps %ymm1, %ymm1, %ymm2, %ymm1
; FMA4-NOINFS-NEXT: vfmaddps %ymm1, %ymm2, %ymm0, %ymm0
; FMA4-NOINFS-NEXT: retq
;
; AVX512-NOINFS-LABEL: test_v8f32_interp:
-; AVX512-NOINFS: # BB#0:
+; AVX512-NOINFS: # %bb.0:
; AVX512-NOINFS-NEXT: vfnmadd213ps %ymm1, %ymm2, %ymm1
; AVX512-NOINFS-NEXT: vfmadd213ps %ymm1, %ymm2, %ymm0
; AVX512-NOINFS-NEXT: retq
@@ -1181,7 +1181,7 @@ define <8 x float> @test_v8f32_interp(<8 x float> %x, <8 x float> %y, <8 x float
define double @test_f64_interp(double %x, double %y, double %t) {
; FMA-INFS-LABEL: test_f64_interp:
-; FMA-INFS: # BB#0:
+; FMA-INFS: # %bb.0:
; FMA-INFS-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
; FMA-INFS-NEXT: vsubsd %xmm2, %xmm3, %xmm3
; FMA-INFS-NEXT: vmulsd %xmm3, %xmm1, %xmm1
@@ -1189,7 +1189,7 @@ define double @test_f64_interp(double %x, double %y, double %t) {
; FMA-INFS-NEXT: retq
;
; FMA4-INFS-LABEL: test_f64_interp:
-; FMA4-INFS: # BB#0:
+; FMA4-INFS: # %bb.0:
; FMA4-INFS-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
; FMA4-INFS-NEXT: vsubsd %xmm2, %xmm3, %xmm3
; FMA4-INFS-NEXT: vmulsd %xmm3, %xmm1, %xmm1
@@ -1197,7 +1197,7 @@ define double @test_f64_interp(double %x, double %y, double %t) {
; FMA4-INFS-NEXT: retq
;
; AVX512-INFS-LABEL: test_f64_interp:
-; AVX512-INFS: # BB#0:
+; AVX512-INFS: # %bb.0:
; AVX512-INFS-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
; AVX512-INFS-NEXT: vsubsd %xmm2, %xmm3, %xmm3
; AVX512-INFS-NEXT: vmulsd %xmm3, %xmm1, %xmm1
@@ -1205,19 +1205,19 @@ define double @test_f64_interp(double %x, double %y, double %t) {
; AVX512-INFS-NEXT: retq
;
; FMA-NOINFS-LABEL: test_f64_interp:
-; FMA-NOINFS: # BB#0:
+; FMA-NOINFS: # %bb.0:
; FMA-NOINFS-NEXT: vfnmadd213sd %xmm1, %xmm2, %xmm1
; FMA-NOINFS-NEXT: vfmadd213sd %xmm1, %xmm2, %xmm0
; FMA-NOINFS-NEXT: retq
;
; FMA4-NOINFS-LABEL: test_f64_interp:
-; FMA4-NOINFS: # BB#0:
+; FMA4-NOINFS: # %bb.0:
; FMA4-NOINFS-NEXT: vfnmaddsd %xmm1, %xmm1, %xmm2, %xmm1
; FMA4-NOINFS-NEXT: vfmaddsd %xmm1, %xmm2, %xmm0, %xmm0
; FMA4-NOINFS-NEXT: retq
;
; AVX512-NOINFS-LABEL: test_f64_interp:
-; AVX512-NOINFS: # BB#0:
+; AVX512-NOINFS: # %bb.0:
; AVX512-NOINFS-NEXT: vfnmadd213sd %xmm1, %xmm2, %xmm1
; AVX512-NOINFS-NEXT: vfmadd213sd %xmm1, %xmm2, %xmm0
; AVX512-NOINFS-NEXT: retq
@@ -1230,7 +1230,7 @@ define double @test_f64_interp(double %x, double %y, double %t) {
define <2 x double> @test_v2f64_interp(<2 x double> %x, <2 x double> %y, <2 x double> %t) {
; FMA-INFS-LABEL: test_v2f64_interp:
-; FMA-INFS: # BB#0:
+; FMA-INFS: # %bb.0:
; FMA-INFS-NEXT: vmovapd {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00]
; FMA-INFS-NEXT: vsubpd %xmm2, %xmm3, %xmm3
; FMA-INFS-NEXT: vmulpd %xmm3, %xmm1, %xmm1
@@ -1238,7 +1238,7 @@ define <2 x double> @test_v2f64_interp(<2 x double> %x, <2 x double> %y, <2 x do
; FMA-INFS-NEXT: retq
;
; FMA4-INFS-LABEL: test_v2f64_interp:
-; FMA4-INFS: # BB#0:
+; FMA4-INFS: # %bb.0:
; FMA4-INFS-NEXT: vmovapd {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00]
; FMA4-INFS-NEXT: vsubpd %xmm2, %xmm3, %xmm3
; FMA4-INFS-NEXT: vmulpd %xmm3, %xmm1, %xmm1
@@ -1246,7 +1246,7 @@ define <2 x double> @test_v2f64_interp(<2 x double> %x, <2 x double> %y, <2 x do
; FMA4-INFS-NEXT: retq
;
; AVX512-INFS-LABEL: test_v2f64_interp:
-; AVX512-INFS: # BB#0:
+; AVX512-INFS: # %bb.0:
; AVX512-INFS-NEXT: vmovapd {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00]
; AVX512-INFS-NEXT: vsubpd %xmm2, %xmm3, %xmm3
; AVX512-INFS-NEXT: vmulpd %xmm3, %xmm1, %xmm1
@@ -1254,19 +1254,19 @@ define <2 x double> @test_v2f64_interp(<2 x double> %x, <2 x double> %y, <2 x do
; AVX512-INFS-NEXT: retq
;
; FMA-NOINFS-LABEL: test_v2f64_interp:
-; FMA-NOINFS: # BB#0:
+; FMA-NOINFS: # %bb.0:
; FMA-NOINFS-NEXT: vfnmadd213pd %xmm1, %xmm2, %xmm1
; FMA-NOINFS-NEXT: vfmadd213pd %xmm1, %xmm2, %xmm0
; FMA-NOINFS-NEXT: retq
;
; FMA4-NOINFS-LABEL: test_v2f64_interp:
-; FMA4-NOINFS: # BB#0:
+; FMA4-NOINFS: # %bb.0:
; FMA4-NOINFS-NEXT: vfnmaddpd %xmm1, %xmm1, %xmm2, %xmm1
; FMA4-NOINFS-NEXT: vfmaddpd %xmm1, %xmm2, %xmm0, %xmm0
; FMA4-NOINFS-NEXT: retq
;
; AVX512-NOINFS-LABEL: test_v2f64_interp:
-; AVX512-NOINFS: # BB#0:
+; AVX512-NOINFS: # %bb.0:
; AVX512-NOINFS-NEXT: vfnmadd213pd %xmm1, %xmm2, %xmm1
; AVX512-NOINFS-NEXT: vfmadd213pd %xmm1, %xmm2, %xmm0
; AVX512-NOINFS-NEXT: retq
@@ -1279,7 +1279,7 @@ define <2 x double> @test_v2f64_interp(<2 x double> %x, <2 x double> %y, <2 x do
define <4 x double> @test_v4f64_interp(<4 x double> %x, <4 x double> %y, <4 x double> %t) {
; FMA-INFS-LABEL: test_v4f64_interp:
-; FMA-INFS: # BB#0:
+; FMA-INFS: # %bb.0:
; FMA-INFS-NEXT: vmovapd {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
; FMA-INFS-NEXT: vsubpd %ymm2, %ymm3, %ymm3
; FMA-INFS-NEXT: vmulpd %ymm3, %ymm1, %ymm1
@@ -1287,7 +1287,7 @@ define <4 x double> @test_v4f64_interp(<4 x double> %x, <4 x double> %y, <4 x do
; FMA-INFS-NEXT: retq
;
; FMA4-INFS-LABEL: test_v4f64_interp:
-; FMA4-INFS: # BB#0:
+; FMA4-INFS: # %bb.0:
; FMA4-INFS-NEXT: vmovapd {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
; FMA4-INFS-NEXT: vsubpd %ymm2, %ymm3, %ymm3
; FMA4-INFS-NEXT: vmulpd %ymm3, %ymm1, %ymm1
@@ -1295,7 +1295,7 @@ define <4 x double> @test_v4f64_interp(<4 x double> %x, <4 x double> %y, <4 x do
; FMA4-INFS-NEXT: retq
;
; AVX512-INFS-LABEL: test_v4f64_interp:
-; AVX512-INFS: # BB#0:
+; AVX512-INFS: # %bb.0:
; AVX512-INFS-NEXT: vbroadcastsd {{.*#+}} ymm3 = [1,1,1,1]
; AVX512-INFS-NEXT: vsubpd %ymm2, %ymm3, %ymm3
; AVX512-INFS-NEXT: vmulpd %ymm3, %ymm1, %ymm1
@@ -1303,19 +1303,19 @@ define <4 x double> @test_v4f64_interp(<4 x double> %x, <4 x double> %y, <4 x do
; AVX512-INFS-NEXT: retq
;
; FMA-NOINFS-LABEL: test_v4f64_interp:
-; FMA-NOINFS: # BB#0:
+; FMA-NOINFS: # %bb.0:
; FMA-NOINFS-NEXT: vfnmadd213pd %ymm1, %ymm2, %ymm1
; FMA-NOINFS-NEXT: vfmadd213pd %ymm1, %ymm2, %ymm0
; FMA-NOINFS-NEXT: retq
;
; FMA4-NOINFS-LABEL: test_v4f64_interp:
-; FMA4-NOINFS: # BB#0:
+; FMA4-NOINFS: # %bb.0:
; FMA4-NOINFS-NEXT: vfnmaddpd %ymm1, %ymm1, %ymm2, %ymm1
; FMA4-NOINFS-NEXT: vfmaddpd %ymm1, %ymm2, %ymm0, %ymm0
; FMA4-NOINFS-NEXT: retq
;
; AVX512-NOINFS-LABEL: test_v4f64_interp:
-; AVX512-NOINFS: # BB#0:
+; AVX512-NOINFS: # %bb.0:
; AVX512-NOINFS-NEXT: vfnmadd213pd %ymm1, %ymm2, %ymm1
; AVX512-NOINFS-NEXT: vfmadd213pd %ymm1, %ymm2, %ymm0
; AVX512-NOINFS-NEXT: retq
@@ -1332,17 +1332,17 @@ define <4 x double> @test_v4f64_interp(<4 x double> %x, <4 x double> %y, <4 x do
define <4 x float> @test_v4f32_fneg_fmadd(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
; FMA-LABEL: test_v4f32_fneg_fmadd:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfnmsub213ps %xmm2, %xmm1, %xmm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v4f32_fneg_fmadd:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfnmsubps %xmm2, %xmm1, %xmm0, %xmm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v4f32_fneg_fmadd:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfnmsub213ps %xmm2, %xmm1, %xmm0
; AVX512-NEXT: retq
%mul = fmul <4 x float> %a0, %a1
@@ -1353,17 +1353,17 @@ define <4 x float> @test_v4f32_fneg_fmadd(<4 x float> %a0, <4 x float> %a1, <4 x
define <4 x double> @test_v4f64_fneg_fmsub(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
; FMA-LABEL: test_v4f64_fneg_fmsub:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfnmadd213pd %ymm2, %ymm1, %ymm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v4f64_fneg_fmsub:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfnmaddpd %ymm2, %ymm1, %ymm0, %ymm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v4f64_fneg_fmsub:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfnmadd213pd %ymm2, %ymm1, %ymm0
; AVX512-NEXT: retq
%mul = fmul <4 x double> %a0, %a1
@@ -1374,17 +1374,17 @@ define <4 x double> @test_v4f64_fneg_fmsub(<4 x double> %a0, <4 x double> %a1, <
define <4 x float> @test_v4f32_fneg_fnmadd(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
; FMA-LABEL: test_v4f32_fneg_fnmadd:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfmsub213ps %xmm2, %xmm1, %xmm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v4f32_fneg_fnmadd:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfmsubps %xmm2, %xmm1, %xmm0, %xmm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v4f32_fneg_fnmadd:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfmsub213ps %xmm2, %xmm1, %xmm0
; AVX512-NEXT: retq
%mul = fmul <4 x float> %a0, %a1
@@ -1396,17 +1396,17 @@ define <4 x float> @test_v4f32_fneg_fnmadd(<4 x float> %a0, <4 x float> %a1, <4
define <4 x double> @test_v4f64_fneg_fnmsub(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
; FMA-LABEL: test_v4f64_fneg_fnmsub:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfmadd213pd %ymm2, %ymm1, %ymm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v4f64_fneg_fnmsub:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfmaddpd %ymm2, %ymm1, %ymm0, %ymm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v4f64_fneg_fnmsub:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfmadd213pd %ymm2, %ymm1, %ymm0
; AVX512-NEXT: retq
%mul = fmul <4 x double> %a0, %a1
@@ -1422,17 +1422,17 @@ define <4 x double> @test_v4f64_fneg_fnmsub(<4 x double> %a0, <4 x double> %a1,
define <4 x float> @test_v4f32_fma_x_c1_fmul_x_c2(<4 x float> %x) #0 {
; FMA-LABEL: test_v4f32_fma_x_c1_fmul_x_c2:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v4f32_fma_x_c1_fmul_x_c2:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v4f32_fma_x_c1_fmul_x_c2:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmulps {{.*}}(%rip){1to4}, %xmm0, %xmm0
; AVX512-NEXT: retq
%m0 = fmul <4 x float> %x, <float 1.0, float 2.0, float 3.0, float 4.0>
@@ -1447,17 +1447,17 @@ define <4 x float> @test_v4f32_fma_x_c1_fmul_x_c2(<4 x float> %x) #0 {
define <4 x float> @test_v4f32_fma_fmul_x_c1_c2_y(<4 x float> %x, <4 x float> %y) #0 {
; FMA-LABEL: test_v4f32_fma_fmul_x_c1_c2_y:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfmadd132ps {{.*}}(%rip), %xmm1, %xmm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v4f32_fma_fmul_x_c1_c2_y:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfmaddps %xmm1, {{.*}}(%rip), %xmm0, %xmm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v4f32_fma_fmul_x_c1_c2_y:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfmadd132ps {{.*}}(%rip), %xmm1, %xmm0
; AVX512-NEXT: retq
%m0 = fmul <4 x float> %x, <float 1.0, float 2.0, float 3.0, float 4.0>
@@ -1470,19 +1470,19 @@ define <4 x float> @test_v4f32_fma_fmul_x_c1_c2_y(<4 x float> %x, <4 x float> %y
define double @test_f64_fneg_fmul(double %x, double %y) #0 {
; FMA-LABEL: test_f64_fneg_fmul:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; FMA-NEXT: vfnmsub213sd %xmm2, %xmm1, %xmm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_f64_fneg_fmul:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; FMA4-NEXT: vfnmsubsd %xmm2, %xmm1, %xmm0, %xmm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_f64_fneg_fmul:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; AVX512-NEXT: vfnmsub213sd %xmm2, %xmm1, %xmm0
; AVX512-NEXT: retq
@@ -1493,19 +1493,19 @@ define double @test_f64_fneg_fmul(double %x, double %y) #0 {
define <4 x float> @test_v4f32_fneg_fmul(<4 x float> %x, <4 x float> %y) #0 {
; FMA-LABEL: test_v4f32_fneg_fmul:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vxorps %xmm2, %xmm2, %xmm2
; FMA-NEXT: vfnmsub213ps %xmm2, %xmm1, %xmm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v4f32_fneg_fmul:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vxorps %xmm2, %xmm2, %xmm2
; FMA4-NEXT: vfnmsubps %xmm2, %xmm1, %xmm0, %xmm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v4f32_fneg_fmul:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vxorps %xmm2, %xmm2, %xmm2
; AVX512-NEXT: vfnmsub213ps %xmm2, %xmm1, %xmm0
; AVX512-NEXT: retq
@@ -1516,19 +1516,19 @@ define <4 x float> @test_v4f32_fneg_fmul(<4 x float> %x, <4 x float> %y) #0 {
define <4 x double> @test_v4f64_fneg_fmul(<4 x double> %x, <4 x double> %y) #0 {
; FMA-LABEL: test_v4f64_fneg_fmul:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; FMA-NEXT: vfnmsub213pd %ymm2, %ymm1, %ymm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v4f64_fneg_fmul:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; FMA4-NEXT: vfnmsubpd %ymm2, %ymm1, %ymm0, %ymm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v4f64_fneg_fmul:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; AVX512-NEXT: vfnmsub213pd %ymm2, %ymm1, %ymm0
; AVX512-NEXT: retq
@@ -1539,19 +1539,19 @@ define <4 x double> @test_v4f64_fneg_fmul(<4 x double> %x, <4 x double> %y) #0 {
define <4 x double> @test_v4f64_fneg_fmul_no_nsz(<4 x double> %x, <4 x double> %y) #0 {
; FMA-LABEL: test_v4f64_fneg_fmul_no_nsz:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmulpd %ymm1, %ymm0, %ymm0
; FMA-NEXT: vxorpd {{.*}}(%rip), %ymm0, %ymm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v4f64_fneg_fmul_no_nsz:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmulpd %ymm1, %ymm0, %ymm0
; FMA4-NEXT: vxorpd {{.*}}(%rip), %ymm0, %ymm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v4f64_fneg_fmul_no_nsz:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmulpd %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vxorpd {{.*}}(%rip){1to4}, %ymm0, %ymm0
; AVX512-NEXT: retq
diff --git a/test/CodeGen/X86/fma_patterns_wide.ll b/test/CodeGen/X86/fma_patterns_wide.ll
index 2b12c37fa2f..9b2d7ff2bb9 100644
--- a/test/CodeGen/X86/fma_patterns_wide.ll
+++ b/test/CodeGen/X86/fma_patterns_wide.ll
@@ -14,19 +14,19 @@
define <16 x float> @test_16f32_fmadd(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
; FMA-LABEL: test_16f32_fmadd:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfmadd213ps %ymm4, %ymm2, %ymm0
; FMA-NEXT: vfmadd213ps %ymm5, %ymm3, %ymm1
; FMA-NEXT: retq
;
; FMA4-LABEL: test_16f32_fmadd:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfmaddps %ymm4, %ymm2, %ymm0, %ymm0
; FMA4-NEXT: vfmaddps %ymm5, %ymm3, %ymm1, %ymm1
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_16f32_fmadd:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfmadd213ps %zmm2, %zmm1, %zmm0
; AVX512-NEXT: retq
%x = fmul <16 x float> %a0, %a1
@@ -36,19 +36,19 @@ define <16 x float> @test_16f32_fmadd(<16 x float> %a0, <16 x float> %a1, <16 x
define <8 x double> @test_8f64_fmadd(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
; FMA-LABEL: test_8f64_fmadd:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfmadd213pd %ymm4, %ymm2, %ymm0
; FMA-NEXT: vfmadd213pd %ymm5, %ymm3, %ymm1
; FMA-NEXT: retq
;
; FMA4-LABEL: test_8f64_fmadd:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfmaddpd %ymm4, %ymm2, %ymm0, %ymm0
; FMA4-NEXT: vfmaddpd %ymm5, %ymm3, %ymm1, %ymm1
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_8f64_fmadd:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfmadd213pd %zmm2, %zmm1, %zmm0
; AVX512-NEXT: retq
%x = fmul <8 x double> %a0, %a1
@@ -62,19 +62,19 @@ define <8 x double> @test_8f64_fmadd(<8 x double> %a0, <8 x double> %a1, <8 x do
define <16 x float> @test_16f32_fmsub(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
; FMA-LABEL: test_16f32_fmsub:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfmsub213ps %ymm4, %ymm2, %ymm0
; FMA-NEXT: vfmsub213ps %ymm5, %ymm3, %ymm1
; FMA-NEXT: retq
;
; FMA4-LABEL: test_16f32_fmsub:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfmsubps %ymm4, %ymm2, %ymm0, %ymm0
; FMA4-NEXT: vfmsubps %ymm5, %ymm3, %ymm1, %ymm1
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_16f32_fmsub:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfmsub213ps %zmm2, %zmm1, %zmm0
; AVX512-NEXT: retq
%x = fmul <16 x float> %a0, %a1
@@ -84,19 +84,19 @@ define <16 x float> @test_16f32_fmsub(<16 x float> %a0, <16 x float> %a1, <16 x
define <8 x double> @test_8f64_fmsub(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
; FMA-LABEL: test_8f64_fmsub:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfmsub213pd %ymm4, %ymm2, %ymm0
; FMA-NEXT: vfmsub213pd %ymm5, %ymm3, %ymm1
; FMA-NEXT: retq
;
; FMA4-LABEL: test_8f64_fmsub:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfmsubpd %ymm4, %ymm2, %ymm0, %ymm0
; FMA4-NEXT: vfmsubpd %ymm5, %ymm3, %ymm1, %ymm1
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_8f64_fmsub:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfmsub213pd %zmm2, %zmm1, %zmm0
; AVX512-NEXT: retq
%x = fmul <8 x double> %a0, %a1
@@ -110,19 +110,19 @@ define <8 x double> @test_8f64_fmsub(<8 x double> %a0, <8 x double> %a1, <8 x do
define <16 x float> @test_16f32_fnmadd(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
; FMA-LABEL: test_16f32_fnmadd:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfnmadd213ps %ymm4, %ymm2, %ymm0
; FMA-NEXT: vfnmadd213ps %ymm5, %ymm3, %ymm1
; FMA-NEXT: retq
;
; FMA4-LABEL: test_16f32_fnmadd:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfnmaddps %ymm4, %ymm2, %ymm0, %ymm0
; FMA4-NEXT: vfnmaddps %ymm5, %ymm3, %ymm1, %ymm1
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_16f32_fnmadd:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfnmadd213ps %zmm2, %zmm1, %zmm0
; AVX512-NEXT: retq
%x = fmul <16 x float> %a0, %a1
@@ -132,19 +132,19 @@ define <16 x float> @test_16f32_fnmadd(<16 x float> %a0, <16 x float> %a1, <16 x
define <8 x double> @test_8f64_fnmadd(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
; FMA-LABEL: test_8f64_fnmadd:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfnmadd213pd %ymm4, %ymm2, %ymm0
; FMA-NEXT: vfnmadd213pd %ymm5, %ymm3, %ymm1
; FMA-NEXT: retq
;
; FMA4-LABEL: test_8f64_fnmadd:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfnmaddpd %ymm4, %ymm2, %ymm0, %ymm0
; FMA4-NEXT: vfnmaddpd %ymm5, %ymm3, %ymm1, %ymm1
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_8f64_fnmadd:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfnmadd213pd %zmm2, %zmm1, %zmm0
; AVX512-NEXT: retq
%x = fmul <8 x double> %a0, %a1
@@ -158,19 +158,19 @@ define <8 x double> @test_8f64_fnmadd(<8 x double> %a0, <8 x double> %a1, <8 x d
define <16 x float> @test_16f32_fnmsub(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
; FMA-LABEL: test_16f32_fnmsub:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfnmsub213ps %ymm4, %ymm2, %ymm0
; FMA-NEXT: vfnmsub213ps %ymm5, %ymm3, %ymm1
; FMA-NEXT: retq
;
; FMA4-LABEL: test_16f32_fnmsub:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfnmsubps %ymm4, %ymm2, %ymm0, %ymm0
; FMA4-NEXT: vfnmsubps %ymm5, %ymm3, %ymm1, %ymm1
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_16f32_fnmsub:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfnmsub213ps %zmm2, %zmm1, %zmm0
; AVX512-NEXT: retq
%x = fmul <16 x float> %a0, %a1
@@ -181,19 +181,19 @@ define <16 x float> @test_16f32_fnmsub(<16 x float> %a0, <16 x float> %a1, <16 x
define <8 x double> @test_8f64_fnmsub(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
; FMA-LABEL: test_8f64_fnmsub:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfnmsub213pd %ymm4, %ymm2, %ymm0
; FMA-NEXT: vfnmsub213pd %ymm5, %ymm3, %ymm1
; FMA-NEXT: retq
;
; FMA4-LABEL: test_8f64_fnmsub:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfnmsubpd %ymm4, %ymm2, %ymm0, %ymm0
; FMA4-NEXT: vfnmsubpd %ymm5, %ymm3, %ymm1, %ymm1
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_8f64_fnmsub:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfnmsub213pd %zmm2, %zmm1, %zmm0
; AVX512-NEXT: retq
%x = fmul <8 x double> %a0, %a1
@@ -208,19 +208,19 @@ define <8 x double> @test_8f64_fnmsub(<8 x double> %a0, <8 x double> %a1, <8 x d
define <16 x float> @test_16f32_fmadd_load(<16 x float>* %a0, <16 x float> %a1, <16 x float> %a2) {
; FMA-LABEL: test_16f32_fmadd_load:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfmadd132ps (%rdi), %ymm2, %ymm0
; FMA-NEXT: vfmadd132ps 32(%rdi), %ymm3, %ymm1
; FMA-NEXT: retq
;
; FMA4-LABEL: test_16f32_fmadd_load:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfmaddps %ymm2, (%rdi), %ymm0, %ymm0
; FMA4-NEXT: vfmaddps %ymm3, 32(%rdi), %ymm1, %ymm1
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_16f32_fmadd_load:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfmadd132ps (%rdi), %zmm1, %zmm0
; AVX512-NEXT: retq
%x = load <16 x float>, <16 x float>* %a0
@@ -231,19 +231,19 @@ define <16 x float> @test_16f32_fmadd_load(<16 x float>* %a0, <16 x float> %a1,
define <8 x double> @test_8f64_fmsub_load(<8 x double>* %a0, <8 x double> %a1, <8 x double> %a2) {
; FMA-LABEL: test_8f64_fmsub_load:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfmsub132pd (%rdi), %ymm2, %ymm0
; FMA-NEXT: vfmsub132pd 32(%rdi), %ymm3, %ymm1
; FMA-NEXT: retq
;
; FMA4-LABEL: test_8f64_fmsub_load:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfmsubpd %ymm2, (%rdi), %ymm0, %ymm0
; FMA4-NEXT: vfmsubpd %ymm3, 32(%rdi), %ymm1, %ymm1
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_8f64_fmsub_load:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfmsub132pd (%rdi), %zmm1, %zmm0
; AVX512-NEXT: retq
%x = load <8 x double>, <8 x double>* %a0
@@ -258,7 +258,7 @@ define <8 x double> @test_8f64_fmsub_load(<8 x double>* %a0, <8 x double> %a1, <
define <16 x float> @test_v16f32_mul_add_x_one_y(<16 x float> %x, <16 x float> %y) {
; FMA-INFS-LABEL: test_v16f32_mul_add_x_one_y:
-; FMA-INFS: # BB#0:
+; FMA-INFS: # %bb.0:
; FMA-INFS-NEXT: vmovaps {{.*#+}} ymm4 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
; FMA-INFS-NEXT: vaddps %ymm4, %ymm1, %ymm1
; FMA-INFS-NEXT: vaddps %ymm4, %ymm0, %ymm0
@@ -267,7 +267,7 @@ define <16 x float> @test_v16f32_mul_add_x_one_y(<16 x float> %x, <16 x float> %
; FMA-INFS-NEXT: retq
;
; FMA4-INFS-LABEL: test_v16f32_mul_add_x_one_y:
-; FMA4-INFS: # BB#0:
+; FMA4-INFS: # %bb.0:
; FMA4-INFS-NEXT: vmovaps {{.*#+}} ymm4 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
; FMA4-INFS-NEXT: vaddps %ymm4, %ymm1, %ymm1
; FMA4-INFS-NEXT: vaddps %ymm4, %ymm0, %ymm0
@@ -276,25 +276,25 @@ define <16 x float> @test_v16f32_mul_add_x_one_y(<16 x float> %x, <16 x float> %
; FMA4-INFS-NEXT: retq
;
; AVX512-INFS-LABEL: test_v16f32_mul_add_x_one_y:
-; AVX512-INFS: # BB#0:
+; AVX512-INFS: # %bb.0:
; AVX512-INFS-NEXT: vaddps {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512-INFS-NEXT: vmulps %zmm1, %zmm0, %zmm0
; AVX512-INFS-NEXT: retq
;
; FMA-NOINFS-LABEL: test_v16f32_mul_add_x_one_y:
-; FMA-NOINFS: # BB#0:
+; FMA-NOINFS: # %bb.0:
; FMA-NOINFS-NEXT: vfmadd213ps %ymm2, %ymm2, %ymm0
; FMA-NOINFS-NEXT: vfmadd213ps %ymm3, %ymm3, %ymm1
; FMA-NOINFS-NEXT: retq
;
; FMA4-NOINFS-LABEL: test_v16f32_mul_add_x_one_y:
-; FMA4-NOINFS: # BB#0:
+; FMA4-NOINFS: # %bb.0:
; FMA4-NOINFS-NEXT: vfmaddps %ymm2, %ymm2, %ymm0, %ymm0
; FMA4-NOINFS-NEXT: vfmaddps %ymm3, %ymm3, %ymm1, %ymm1
; FMA4-NOINFS-NEXT: retq
;
; AVX512-NOINFS-LABEL: test_v16f32_mul_add_x_one_y:
-; AVX512-NOINFS: # BB#0:
+; AVX512-NOINFS: # %bb.0:
; AVX512-NOINFS-NEXT: vfmadd213ps %zmm1, %zmm1, %zmm0
; AVX512-NOINFS-NEXT: retq
%a = fadd <16 x float> %x, <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>
@@ -304,7 +304,7 @@ define <16 x float> @test_v16f32_mul_add_x_one_y(<16 x float> %x, <16 x float> %
define <8 x double> @test_v8f64_mul_y_add_x_one(<8 x double> %x, <8 x double> %y) {
; FMA-INFS-LABEL: test_v8f64_mul_y_add_x_one:
-; FMA-INFS: # BB#0:
+; FMA-INFS: # %bb.0:
; FMA-INFS-NEXT: vmovapd {{.*#+}} ymm4 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
; FMA-INFS-NEXT: vaddpd %ymm4, %ymm1, %ymm1
; FMA-INFS-NEXT: vaddpd %ymm4, %ymm0, %ymm0
@@ -313,7 +313,7 @@ define <8 x double> @test_v8f64_mul_y_add_x_one(<8 x double> %x, <8 x double> %y
; FMA-INFS-NEXT: retq
;
; FMA4-INFS-LABEL: test_v8f64_mul_y_add_x_one:
-; FMA4-INFS: # BB#0:
+; FMA4-INFS: # %bb.0:
; FMA4-INFS-NEXT: vmovapd {{.*#+}} ymm4 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
; FMA4-INFS-NEXT: vaddpd %ymm4, %ymm1, %ymm1
; FMA4-INFS-NEXT: vaddpd %ymm4, %ymm0, %ymm0
@@ -322,25 +322,25 @@ define <8 x double> @test_v8f64_mul_y_add_x_one(<8 x double> %x, <8 x double> %y
; FMA4-INFS-NEXT: retq
;
; AVX512-INFS-LABEL: test_v8f64_mul_y_add_x_one:
-; AVX512-INFS: # BB#0:
+; AVX512-INFS: # %bb.0:
; AVX512-INFS-NEXT: vaddpd {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512-INFS-NEXT: vmulpd %zmm0, %zmm1, %zmm0
; AVX512-INFS-NEXT: retq
;
; FMA-NOINFS-LABEL: test_v8f64_mul_y_add_x_one:
-; FMA-NOINFS: # BB#0:
+; FMA-NOINFS: # %bb.0:
; FMA-NOINFS-NEXT: vfmadd213pd %ymm2, %ymm2, %ymm0
; FMA-NOINFS-NEXT: vfmadd213pd %ymm3, %ymm3, %ymm1
; FMA-NOINFS-NEXT: retq
;
; FMA4-NOINFS-LABEL: test_v8f64_mul_y_add_x_one:
-; FMA4-NOINFS: # BB#0:
+; FMA4-NOINFS: # %bb.0:
; FMA4-NOINFS-NEXT: vfmaddpd %ymm2, %ymm2, %ymm0, %ymm0
; FMA4-NOINFS-NEXT: vfmaddpd %ymm3, %ymm3, %ymm1, %ymm1
; FMA4-NOINFS-NEXT: retq
;
; AVX512-NOINFS-LABEL: test_v8f64_mul_y_add_x_one:
-; AVX512-NOINFS: # BB#0:
+; AVX512-NOINFS: # %bb.0:
; AVX512-NOINFS-NEXT: vfmadd213pd %zmm1, %zmm1, %zmm0
; AVX512-NOINFS-NEXT: retq
%a = fadd <8 x double> %x, <double 1.0, double 1.0, double 1.0, double 1.0, double 1.0, double 1.0, double 1.0, double 1.0>
@@ -350,7 +350,7 @@ define <8 x double> @test_v8f64_mul_y_add_x_one(<8 x double> %x, <8 x double> %y
define <16 x float> @test_v16f32_mul_add_x_negone_y(<16 x float> %x, <16 x float> %y) {
; FMA-INFS-LABEL: test_v16f32_mul_add_x_negone_y:
-; FMA-INFS: # BB#0:
+; FMA-INFS: # %bb.0:
; FMA-INFS-NEXT: vmovaps {{.*#+}} ymm4 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
; FMA-INFS-NEXT: vaddps %ymm4, %ymm1, %ymm1
; FMA-INFS-NEXT: vaddps %ymm4, %ymm0, %ymm0
@@ -359,7 +359,7 @@ define <16 x float> @test_v16f32_mul_add_x_negone_y(<16 x float> %x, <16 x float
; FMA-INFS-NEXT: retq
;
; FMA4-INFS-LABEL: test_v16f32_mul_add_x_negone_y:
-; FMA4-INFS: # BB#0:
+; FMA4-INFS: # %bb.0:
; FMA4-INFS-NEXT: vmovaps {{.*#+}} ymm4 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
; FMA4-INFS-NEXT: vaddps %ymm4, %ymm1, %ymm1
; FMA4-INFS-NEXT: vaddps %ymm4, %ymm0, %ymm0
@@ -368,25 +368,25 @@ define <16 x float> @test_v16f32_mul_add_x_negone_y(<16 x float> %x, <16 x float
; FMA4-INFS-NEXT: retq
;
; AVX512-INFS-LABEL: test_v16f32_mul_add_x_negone_y:
-; AVX512-INFS: # BB#0:
+; AVX512-INFS: # %bb.0:
; AVX512-INFS-NEXT: vaddps {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512-INFS-NEXT: vmulps %zmm1, %zmm0, %zmm0
; AVX512-INFS-NEXT: retq
;
; FMA-NOINFS-LABEL: test_v16f32_mul_add_x_negone_y:
-; FMA-NOINFS: # BB#0:
+; FMA-NOINFS: # %bb.0:
; FMA-NOINFS-NEXT: vfmsub213ps %ymm2, %ymm2, %ymm0
; FMA-NOINFS-NEXT: vfmsub213ps %ymm3, %ymm3, %ymm1
; FMA-NOINFS-NEXT: retq
;
; FMA4-NOINFS-LABEL: test_v16f32_mul_add_x_negone_y:
-; FMA4-NOINFS: # BB#0:
+; FMA4-NOINFS: # %bb.0:
; FMA4-NOINFS-NEXT: vfmsubps %ymm2, %ymm2, %ymm0, %ymm0
; FMA4-NOINFS-NEXT: vfmsubps %ymm3, %ymm3, %ymm1, %ymm1
; FMA4-NOINFS-NEXT: retq
;
; AVX512-NOINFS-LABEL: test_v16f32_mul_add_x_negone_y:
-; AVX512-NOINFS: # BB#0:
+; AVX512-NOINFS: # %bb.0:
; AVX512-NOINFS-NEXT: vfmsub213ps %zmm1, %zmm1, %zmm0
; AVX512-NOINFS-NEXT: retq
%a = fadd <16 x float> %x, <float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0>
@@ -396,7 +396,7 @@ define <16 x float> @test_v16f32_mul_add_x_negone_y(<16 x float> %x, <16 x float
define <8 x double> @test_v8f64_mul_y_add_x_negone(<8 x double> %x, <8 x double> %y) {
; FMA-INFS-LABEL: test_v8f64_mul_y_add_x_negone:
-; FMA-INFS: # BB#0:
+; FMA-INFS: # %bb.0:
; FMA-INFS-NEXT: vmovapd {{.*#+}} ymm4 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
; FMA-INFS-NEXT: vaddpd %ymm4, %ymm1, %ymm1
; FMA-INFS-NEXT: vaddpd %ymm4, %ymm0, %ymm0
@@ -405,7 +405,7 @@ define <8 x double> @test_v8f64_mul_y_add_x_negone(<8 x double> %x, <8 x double>
; FMA-INFS-NEXT: retq
;
; FMA4-INFS-LABEL: test_v8f64_mul_y_add_x_negone:
-; FMA4-INFS: # BB#0:
+; FMA4-INFS: # %bb.0:
; FMA4-INFS-NEXT: vmovapd {{.*#+}} ymm4 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
; FMA4-INFS-NEXT: vaddpd %ymm4, %ymm1, %ymm1
; FMA4-INFS-NEXT: vaddpd %ymm4, %ymm0, %ymm0
@@ -414,25 +414,25 @@ define <8 x double> @test_v8f64_mul_y_add_x_negone(<8 x double> %x, <8 x double>
; FMA4-INFS-NEXT: retq
;
; AVX512-INFS-LABEL: test_v8f64_mul_y_add_x_negone:
-; AVX512-INFS: # BB#0:
+; AVX512-INFS: # %bb.0:
; AVX512-INFS-NEXT: vaddpd {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512-INFS-NEXT: vmulpd %zmm0, %zmm1, %zmm0
; AVX512-INFS-NEXT: retq
;
; FMA-NOINFS-LABEL: test_v8f64_mul_y_add_x_negone:
-; FMA-NOINFS: # BB#0:
+; FMA-NOINFS: # %bb.0:
; FMA-NOINFS-NEXT: vfmsub213pd %ymm2, %ymm2, %ymm0
; FMA-NOINFS-NEXT: vfmsub213pd %ymm3, %ymm3, %ymm1
; FMA-NOINFS-NEXT: retq
;
; FMA4-NOINFS-LABEL: test_v8f64_mul_y_add_x_negone:
-; FMA4-NOINFS: # BB#0:
+; FMA4-NOINFS: # %bb.0:
; FMA4-NOINFS-NEXT: vfmsubpd %ymm2, %ymm2, %ymm0, %ymm0
; FMA4-NOINFS-NEXT: vfmsubpd %ymm3, %ymm3, %ymm1, %ymm1
; FMA4-NOINFS-NEXT: retq
;
; AVX512-NOINFS-LABEL: test_v8f64_mul_y_add_x_negone:
-; AVX512-NOINFS: # BB#0:
+; AVX512-NOINFS: # %bb.0:
; AVX512-NOINFS-NEXT: vfmsub213pd %zmm1, %zmm1, %zmm0
; AVX512-NOINFS-NEXT: retq
%a = fadd <8 x double> %x, <double -1.0, double -1.0, double -1.0, double -1.0, double -1.0, double -1.0, double -1.0, double -1.0>
@@ -442,7 +442,7 @@ define <8 x double> @test_v8f64_mul_y_add_x_negone(<8 x double> %x, <8 x double>
define <16 x float> @test_v16f32_mul_sub_one_x_y(<16 x float> %x, <16 x float> %y) {
; FMA-INFS-LABEL: test_v16f32_mul_sub_one_x_y:
-; FMA-INFS: # BB#0:
+; FMA-INFS: # %bb.0:
; FMA-INFS-NEXT: vmovaps {{.*#+}} ymm4 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
; FMA-INFS-NEXT: vsubps %ymm1, %ymm4, %ymm1
; FMA-INFS-NEXT: vsubps %ymm0, %ymm4, %ymm0
@@ -451,7 +451,7 @@ define <16 x float> @test_v16f32_mul_sub_one_x_y(<16 x float> %x, <16 x float> %
; FMA-INFS-NEXT: retq
;
; FMA4-INFS-LABEL: test_v16f32_mul_sub_one_x_y:
-; FMA4-INFS: # BB#0:
+; FMA4-INFS: # %bb.0:
; FMA4-INFS-NEXT: vmovaps {{.*#+}} ymm4 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
; FMA4-INFS-NEXT: vsubps %ymm1, %ymm4, %ymm1
; FMA4-INFS-NEXT: vsubps %ymm0, %ymm4, %ymm0
@@ -460,26 +460,26 @@ define <16 x float> @test_v16f32_mul_sub_one_x_y(<16 x float> %x, <16 x float> %
; FMA4-INFS-NEXT: retq
;
; AVX512-INFS-LABEL: test_v16f32_mul_sub_one_x_y:
-; AVX512-INFS: # BB#0:
+; AVX512-INFS: # %bb.0:
; AVX512-INFS-NEXT: vbroadcastss {{.*#+}} zmm2 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX512-INFS-NEXT: vsubps %zmm0, %zmm2, %zmm0
; AVX512-INFS-NEXT: vmulps %zmm1, %zmm0, %zmm0
; AVX512-INFS-NEXT: retq
;
; FMA-NOINFS-LABEL: test_v16f32_mul_sub_one_x_y:
-; FMA-NOINFS: # BB#0:
+; FMA-NOINFS: # %bb.0:
; FMA-NOINFS-NEXT: vfnmadd213ps %ymm2, %ymm2, %ymm0
; FMA-NOINFS-NEXT: vfnmadd213ps %ymm3, %ymm3, %ymm1
; FMA-NOINFS-NEXT: retq
;
; FMA4-NOINFS-LABEL: test_v16f32_mul_sub_one_x_y:
-; FMA4-NOINFS: # BB#0:
+; FMA4-NOINFS: # %bb.0:
; FMA4-NOINFS-NEXT: vfnmaddps %ymm2, %ymm2, %ymm0, %ymm0
; FMA4-NOINFS-NEXT: vfnmaddps %ymm3, %ymm3, %ymm1, %ymm1
; FMA4-NOINFS-NEXT: retq
;
; AVX512-NOINFS-LABEL: test_v16f32_mul_sub_one_x_y:
-; AVX512-NOINFS: # BB#0:
+; AVX512-NOINFS: # %bb.0:
; AVX512-NOINFS-NEXT: vfnmadd213ps %zmm1, %zmm1, %zmm0
; AVX512-NOINFS-NEXT: retq
%s = fsub <16 x float> <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, %x
@@ -489,7 +489,7 @@ define <16 x float> @test_v16f32_mul_sub_one_x_y(<16 x float> %x, <16 x float> %
define <8 x double> @test_v8f64_mul_y_sub_one_x(<8 x double> %x, <8 x double> %y) {
; FMA-INFS-LABEL: test_v8f64_mul_y_sub_one_x:
-; FMA-INFS: # BB#0:
+; FMA-INFS: # %bb.0:
; FMA-INFS-NEXT: vmovapd {{.*#+}} ymm4 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
; FMA-INFS-NEXT: vsubpd %ymm1, %ymm4, %ymm1
; FMA-INFS-NEXT: vsubpd %ymm0, %ymm4, %ymm0
@@ -498,7 +498,7 @@ define <8 x double> @test_v8f64_mul_y_sub_one_x(<8 x double> %x, <8 x double> %y
; FMA-INFS-NEXT: retq
;
; FMA4-INFS-LABEL: test_v8f64_mul_y_sub_one_x:
-; FMA4-INFS: # BB#0:
+; FMA4-INFS: # %bb.0:
; FMA4-INFS-NEXT: vmovapd {{.*#+}} ymm4 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
; FMA4-INFS-NEXT: vsubpd %ymm1, %ymm4, %ymm1
; FMA4-INFS-NEXT: vsubpd %ymm0, %ymm4, %ymm0
@@ -507,26 +507,26 @@ define <8 x double> @test_v8f64_mul_y_sub_one_x(<8 x double> %x, <8 x double> %y
; FMA4-INFS-NEXT: retq
;
; AVX512-INFS-LABEL: test_v8f64_mul_y_sub_one_x:
-; AVX512-INFS: # BB#0:
+; AVX512-INFS: # %bb.0:
; AVX512-INFS-NEXT: vbroadcastsd {{.*#+}} zmm2 = [1,1,1,1,1,1,1,1]
; AVX512-INFS-NEXT: vsubpd %zmm0, %zmm2, %zmm0
; AVX512-INFS-NEXT: vmulpd %zmm0, %zmm1, %zmm0
; AVX512-INFS-NEXT: retq
;
; FMA-NOINFS-LABEL: test_v8f64_mul_y_sub_one_x:
-; FMA-NOINFS: # BB#0:
+; FMA-NOINFS: # %bb.0:
; FMA-NOINFS-NEXT: vfnmadd213pd %ymm2, %ymm2, %ymm0
; FMA-NOINFS-NEXT: vfnmadd213pd %ymm3, %ymm3, %ymm1
; FMA-NOINFS-NEXT: retq
;
; FMA4-NOINFS-LABEL: test_v8f64_mul_y_sub_one_x:
-; FMA4-NOINFS: # BB#0:
+; FMA4-NOINFS: # %bb.0:
; FMA4-NOINFS-NEXT: vfnmaddpd %ymm2, %ymm2, %ymm0, %ymm0
; FMA4-NOINFS-NEXT: vfnmaddpd %ymm3, %ymm3, %ymm1, %ymm1
; FMA4-NOINFS-NEXT: retq
;
; AVX512-NOINFS-LABEL: test_v8f64_mul_y_sub_one_x:
-; AVX512-NOINFS: # BB#0:
+; AVX512-NOINFS: # %bb.0:
; AVX512-NOINFS-NEXT: vfnmadd213pd %zmm1, %zmm1, %zmm0
; AVX512-NOINFS-NEXT: retq
%s = fsub <8 x double> <double 1.0, double 1.0, double 1.0, double 1.0, double 1.0, double 1.0, double 1.0, double 1.0>, %x
@@ -536,7 +536,7 @@ define <8 x double> @test_v8f64_mul_y_sub_one_x(<8 x double> %x, <8 x double> %y
define <16 x float> @test_v16f32_mul_sub_negone_x_y(<16 x float> %x, <16 x float> %y) {
; FMA-INFS-LABEL: test_v16f32_mul_sub_negone_x_y:
-; FMA-INFS: # BB#0:
+; FMA-INFS: # %bb.0:
; FMA-INFS-NEXT: vmovaps {{.*#+}} ymm4 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
; FMA-INFS-NEXT: vsubps %ymm1, %ymm4, %ymm1
; FMA-INFS-NEXT: vsubps %ymm0, %ymm4, %ymm0
@@ -545,7 +545,7 @@ define <16 x float> @test_v16f32_mul_sub_negone_x_y(<16 x float> %x, <16 x float
; FMA-INFS-NEXT: retq
;
; FMA4-INFS-LABEL: test_v16f32_mul_sub_negone_x_y:
-; FMA4-INFS: # BB#0:
+; FMA4-INFS: # %bb.0:
; FMA4-INFS-NEXT: vmovaps {{.*#+}} ymm4 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
; FMA4-INFS-NEXT: vsubps %ymm1, %ymm4, %ymm1
; FMA4-INFS-NEXT: vsubps %ymm0, %ymm4, %ymm0
@@ -554,26 +554,26 @@ define <16 x float> @test_v16f32_mul_sub_negone_x_y(<16 x float> %x, <16 x float
; FMA4-INFS-NEXT: retq
;
; AVX512-INFS-LABEL: test_v16f32_mul_sub_negone_x_y:
-; AVX512-INFS: # BB#0:
+; AVX512-INFS: # %bb.0:
; AVX512-INFS-NEXT: vbroadcastss {{.*#+}} zmm2 = [-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1]
; AVX512-INFS-NEXT: vsubps %zmm0, %zmm2, %zmm0
; AVX512-INFS-NEXT: vmulps %zmm1, %zmm0, %zmm0
; AVX512-INFS-NEXT: retq
;
; FMA-NOINFS-LABEL: test_v16f32_mul_sub_negone_x_y:
-; FMA-NOINFS: # BB#0:
+; FMA-NOINFS: # %bb.0:
; FMA-NOINFS-NEXT: vfnmsub213ps %ymm2, %ymm2, %ymm0
; FMA-NOINFS-NEXT: vfnmsub213ps %ymm3, %ymm3, %ymm1
; FMA-NOINFS-NEXT: retq
;
; FMA4-NOINFS-LABEL: test_v16f32_mul_sub_negone_x_y:
-; FMA4-NOINFS: # BB#0:
+; FMA4-NOINFS: # %bb.0:
; FMA4-NOINFS-NEXT: vfnmsubps %ymm2, %ymm2, %ymm0, %ymm0
; FMA4-NOINFS-NEXT: vfnmsubps %ymm3, %ymm3, %ymm1, %ymm1
; FMA4-NOINFS-NEXT: retq
;
; AVX512-NOINFS-LABEL: test_v16f32_mul_sub_negone_x_y:
-; AVX512-NOINFS: # BB#0:
+; AVX512-NOINFS: # %bb.0:
; AVX512-NOINFS-NEXT: vfnmsub213ps %zmm1, %zmm1, %zmm0
; AVX512-NOINFS-NEXT: retq
%s = fsub <16 x float> <float -1.0, float -1.0, float -1.0, float -1.0,float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0>, %x
@@ -583,7 +583,7 @@ define <16 x float> @test_v16f32_mul_sub_negone_x_y(<16 x float> %x, <16 x float
define <8 x double> @test_v8f64_mul_y_sub_negone_x(<8 x double> %x, <8 x double> %y) {
; FMA-INFS-LABEL: test_v8f64_mul_y_sub_negone_x:
-; FMA-INFS: # BB#0:
+; FMA-INFS: # %bb.0:
; FMA-INFS-NEXT: vmovapd {{.*#+}} ymm4 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
; FMA-INFS-NEXT: vsubpd %ymm1, %ymm4, %ymm1
; FMA-INFS-NEXT: vsubpd %ymm0, %ymm4, %ymm0
@@ -592,7 +592,7 @@ define <8 x double> @test_v8f64_mul_y_sub_negone_x(<8 x double> %x, <8 x double>
; FMA-INFS-NEXT: retq
;
; FMA4-INFS-LABEL: test_v8f64_mul_y_sub_negone_x:
-; FMA4-INFS: # BB#0:
+; FMA4-INFS: # %bb.0:
; FMA4-INFS-NEXT: vmovapd {{.*#+}} ymm4 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
; FMA4-INFS-NEXT: vsubpd %ymm1, %ymm4, %ymm1
; FMA4-INFS-NEXT: vsubpd %ymm0, %ymm4, %ymm0
@@ -601,26 +601,26 @@ define <8 x double> @test_v8f64_mul_y_sub_negone_x(<8 x double> %x, <8 x double>
; FMA4-INFS-NEXT: retq
;
; AVX512-INFS-LABEL: test_v8f64_mul_y_sub_negone_x:
-; AVX512-INFS: # BB#0:
+; AVX512-INFS: # %bb.0:
; AVX512-INFS-NEXT: vbroadcastsd {{.*#+}} zmm2 = [-1,-1,-1,-1,-1,-1,-1,-1]
; AVX512-INFS-NEXT: vsubpd %zmm0, %zmm2, %zmm0
; AVX512-INFS-NEXT: vmulpd %zmm0, %zmm1, %zmm0
; AVX512-INFS-NEXT: retq
;
; FMA-NOINFS-LABEL: test_v8f64_mul_y_sub_negone_x:
-; FMA-NOINFS: # BB#0:
+; FMA-NOINFS: # %bb.0:
; FMA-NOINFS-NEXT: vfnmsub213pd %ymm2, %ymm2, %ymm0
; FMA-NOINFS-NEXT: vfnmsub213pd %ymm3, %ymm3, %ymm1
; FMA-NOINFS-NEXT: retq
;
; FMA4-NOINFS-LABEL: test_v8f64_mul_y_sub_negone_x:
-; FMA4-NOINFS: # BB#0:
+; FMA4-NOINFS: # %bb.0:
; FMA4-NOINFS-NEXT: vfnmsubpd %ymm2, %ymm2, %ymm0, %ymm0
; FMA4-NOINFS-NEXT: vfnmsubpd %ymm3, %ymm3, %ymm1, %ymm1
; FMA4-NOINFS-NEXT: retq
;
; AVX512-NOINFS-LABEL: test_v8f64_mul_y_sub_negone_x:
-; AVX512-NOINFS: # BB#0:
+; AVX512-NOINFS: # %bb.0:
; AVX512-NOINFS-NEXT: vfnmsub213pd %zmm1, %zmm1, %zmm0
; AVX512-NOINFS-NEXT: retq
%s = fsub <8 x double> <double -1.0, double -1.0, double -1.0, double -1.0, double -1.0, double -1.0, double -1.0, double -1.0>, %x
@@ -630,7 +630,7 @@ define <8 x double> @test_v8f64_mul_y_sub_negone_x(<8 x double> %x, <8 x double>
define <16 x float> @test_v16f32_mul_sub_x_one_y(<16 x float> %x, <16 x float> %y) {
; FMA-INFS-LABEL: test_v16f32_mul_sub_x_one_y:
-; FMA-INFS: # BB#0:
+; FMA-INFS: # %bb.0:
; FMA-INFS-NEXT: vmovaps {{.*#+}} ymm4 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
; FMA-INFS-NEXT: vsubps %ymm4, %ymm1, %ymm1
; FMA-INFS-NEXT: vsubps %ymm4, %ymm0, %ymm0
@@ -639,7 +639,7 @@ define <16 x float> @test_v16f32_mul_sub_x_one_y(<16 x float> %x, <16 x float> %
; FMA-INFS-NEXT: retq
;
; FMA4-INFS-LABEL: test_v16f32_mul_sub_x_one_y:
-; FMA4-INFS: # BB#0:
+; FMA4-INFS: # %bb.0:
; FMA4-INFS-NEXT: vmovaps {{.*#+}} ymm4 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
; FMA4-INFS-NEXT: vsubps %ymm4, %ymm1, %ymm1
; FMA4-INFS-NEXT: vsubps %ymm4, %ymm0, %ymm0
@@ -648,25 +648,25 @@ define <16 x float> @test_v16f32_mul_sub_x_one_y(<16 x float> %x, <16 x float> %
; FMA4-INFS-NEXT: retq
;
; AVX512-INFS-LABEL: test_v16f32_mul_sub_x_one_y:
-; AVX512-INFS: # BB#0:
+; AVX512-INFS: # %bb.0:
; AVX512-INFS-NEXT: vsubps {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512-INFS-NEXT: vmulps %zmm1, %zmm0, %zmm0
; AVX512-INFS-NEXT: retq
;
; FMA-NOINFS-LABEL: test_v16f32_mul_sub_x_one_y:
-; FMA-NOINFS: # BB#0:
+; FMA-NOINFS: # %bb.0:
; FMA-NOINFS-NEXT: vfmsub213ps %ymm2, %ymm2, %ymm0
; FMA-NOINFS-NEXT: vfmsub213ps %ymm3, %ymm3, %ymm1
; FMA-NOINFS-NEXT: retq
;
; FMA4-NOINFS-LABEL: test_v16f32_mul_sub_x_one_y:
-; FMA4-NOINFS: # BB#0:
+; FMA4-NOINFS: # %bb.0:
; FMA4-NOINFS-NEXT: vfmsubps %ymm2, %ymm2, %ymm0, %ymm0
; FMA4-NOINFS-NEXT: vfmsubps %ymm3, %ymm3, %ymm1, %ymm1
; FMA4-NOINFS-NEXT: retq
;
; AVX512-NOINFS-LABEL: test_v16f32_mul_sub_x_one_y:
-; AVX512-NOINFS: # BB#0:
+; AVX512-NOINFS: # %bb.0:
; AVX512-NOINFS-NEXT: vfmsub213ps %zmm1, %zmm1, %zmm0
; AVX512-NOINFS-NEXT: retq
%s = fsub <16 x float> %x, <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>
@@ -676,7 +676,7 @@ define <16 x float> @test_v16f32_mul_sub_x_one_y(<16 x float> %x, <16 x float> %
define <8 x double> @test_v8f64_mul_y_sub_x_one(<8 x double> %x, <8 x double> %y) {
; FMA-INFS-LABEL: test_v8f64_mul_y_sub_x_one:
-; FMA-INFS: # BB#0:
+; FMA-INFS: # %bb.0:
; FMA-INFS-NEXT: vmovapd {{.*#+}} ymm4 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
; FMA-INFS-NEXT: vsubpd %ymm4, %ymm1, %ymm1
; FMA-INFS-NEXT: vsubpd %ymm4, %ymm0, %ymm0
@@ -685,7 +685,7 @@ define <8 x double> @test_v8f64_mul_y_sub_x_one(<8 x double> %x, <8 x double> %y
; FMA-INFS-NEXT: retq
;
; FMA4-INFS-LABEL: test_v8f64_mul_y_sub_x_one:
-; FMA4-INFS: # BB#0:
+; FMA4-INFS: # %bb.0:
; FMA4-INFS-NEXT: vmovapd {{.*#+}} ymm4 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
; FMA4-INFS-NEXT: vsubpd %ymm4, %ymm1, %ymm1
; FMA4-INFS-NEXT: vsubpd %ymm4, %ymm0, %ymm0
@@ -694,25 +694,25 @@ define <8 x double> @test_v8f64_mul_y_sub_x_one(<8 x double> %x, <8 x double> %y
; FMA4-INFS-NEXT: retq
;
; AVX512-INFS-LABEL: test_v8f64_mul_y_sub_x_one:
-; AVX512-INFS: # BB#0:
+; AVX512-INFS: # %bb.0:
; AVX512-INFS-NEXT: vsubpd {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512-INFS-NEXT: vmulpd %zmm0, %zmm1, %zmm0
; AVX512-INFS-NEXT: retq
;
; FMA-NOINFS-LABEL: test_v8f64_mul_y_sub_x_one:
-; FMA-NOINFS: # BB#0:
+; FMA-NOINFS: # %bb.0:
; FMA-NOINFS-NEXT: vfmsub213pd %ymm2, %ymm2, %ymm0
; FMA-NOINFS-NEXT: vfmsub213pd %ymm3, %ymm3, %ymm1
; FMA-NOINFS-NEXT: retq
;
; FMA4-NOINFS-LABEL: test_v8f64_mul_y_sub_x_one:
-; FMA4-NOINFS: # BB#0:
+; FMA4-NOINFS: # %bb.0:
; FMA4-NOINFS-NEXT: vfmsubpd %ymm2, %ymm2, %ymm0, %ymm0
; FMA4-NOINFS-NEXT: vfmsubpd %ymm3, %ymm3, %ymm1, %ymm1
; FMA4-NOINFS-NEXT: retq
;
; AVX512-NOINFS-LABEL: test_v8f64_mul_y_sub_x_one:
-; AVX512-NOINFS: # BB#0:
+; AVX512-NOINFS: # %bb.0:
; AVX512-NOINFS-NEXT: vfmsub213pd %zmm1, %zmm1, %zmm0
; AVX512-NOINFS-NEXT: retq
%s = fsub <8 x double> %x, <double 1.0, double 1.0, double 1.0, double 1.0, double 1.0, double 1.0, double 1.0, double 1.0>
@@ -722,7 +722,7 @@ define <8 x double> @test_v8f64_mul_y_sub_x_one(<8 x double> %x, <8 x double> %y
define <16 x float> @test_v16f32_mul_sub_x_negone_y(<16 x float> %x, <16 x float> %y) {
; FMA-INFS-LABEL: test_v16f32_mul_sub_x_negone_y:
-; FMA-INFS: # BB#0:
+; FMA-INFS: # %bb.0:
; FMA-INFS-NEXT: vmovaps {{.*#+}} ymm4 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
; FMA-INFS-NEXT: vsubps %ymm4, %ymm1, %ymm1
; FMA-INFS-NEXT: vsubps %ymm4, %ymm0, %ymm0
@@ -731,7 +731,7 @@ define <16 x float> @test_v16f32_mul_sub_x_negone_y(<16 x float> %x, <16 x float
; FMA-INFS-NEXT: retq
;
; FMA4-INFS-LABEL: test_v16f32_mul_sub_x_negone_y:
-; FMA4-INFS: # BB#0:
+; FMA4-INFS: # %bb.0:
; FMA4-INFS-NEXT: vmovaps {{.*#+}} ymm4 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
; FMA4-INFS-NEXT: vsubps %ymm4, %ymm1, %ymm1
; FMA4-INFS-NEXT: vsubps %ymm4, %ymm0, %ymm0
@@ -740,25 +740,25 @@ define <16 x float> @test_v16f32_mul_sub_x_negone_y(<16 x float> %x, <16 x float
; FMA4-INFS-NEXT: retq
;
; AVX512-INFS-LABEL: test_v16f32_mul_sub_x_negone_y:
-; AVX512-INFS: # BB#0:
+; AVX512-INFS: # %bb.0:
; AVX512-INFS-NEXT: vsubps {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512-INFS-NEXT: vmulps %zmm1, %zmm0, %zmm0
; AVX512-INFS-NEXT: retq
;
; FMA-NOINFS-LABEL: test_v16f32_mul_sub_x_negone_y:
-; FMA-NOINFS: # BB#0:
+; FMA-NOINFS: # %bb.0:
; FMA-NOINFS-NEXT: vfmadd213ps %ymm2, %ymm2, %ymm0
; FMA-NOINFS-NEXT: vfmadd213ps %ymm3, %ymm3, %ymm1
; FMA-NOINFS-NEXT: retq
;
; FMA4-NOINFS-LABEL: test_v16f32_mul_sub_x_negone_y:
-; FMA4-NOINFS: # BB#0:
+; FMA4-NOINFS: # %bb.0:
; FMA4-NOINFS-NEXT: vfmaddps %ymm2, %ymm2, %ymm0, %ymm0
; FMA4-NOINFS-NEXT: vfmaddps %ymm3, %ymm3, %ymm1, %ymm1
; FMA4-NOINFS-NEXT: retq
;
; AVX512-NOINFS-LABEL: test_v16f32_mul_sub_x_negone_y:
-; AVX512-NOINFS: # BB#0:
+; AVX512-NOINFS: # %bb.0:
; AVX512-NOINFS-NEXT: vfmadd213ps %zmm1, %zmm1, %zmm0
; AVX512-NOINFS-NEXT: retq
%s = fsub <16 x float> %x, <float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0>
@@ -768,7 +768,7 @@ define <16 x float> @test_v16f32_mul_sub_x_negone_y(<16 x float> %x, <16 x float
define <8 x double> @test_v8f64_mul_y_sub_x_negone(<8 x double> %x, <8 x double> %y) {
; FMA-INFS-LABEL: test_v8f64_mul_y_sub_x_negone:
-; FMA-INFS: # BB#0:
+; FMA-INFS: # %bb.0:
; FMA-INFS-NEXT: vmovapd {{.*#+}} ymm4 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
; FMA-INFS-NEXT: vsubpd %ymm4, %ymm1, %ymm1
; FMA-INFS-NEXT: vsubpd %ymm4, %ymm0, %ymm0
@@ -777,7 +777,7 @@ define <8 x double> @test_v8f64_mul_y_sub_x_negone(<8 x double> %x, <8 x double>
; FMA-INFS-NEXT: retq
;
; FMA4-INFS-LABEL: test_v8f64_mul_y_sub_x_negone:
-; FMA4-INFS: # BB#0:
+; FMA4-INFS: # %bb.0:
; FMA4-INFS-NEXT: vmovapd {{.*#+}} ymm4 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
; FMA4-INFS-NEXT: vsubpd %ymm4, %ymm1, %ymm1
; FMA4-INFS-NEXT: vsubpd %ymm4, %ymm0, %ymm0
@@ -786,25 +786,25 @@ define <8 x double> @test_v8f64_mul_y_sub_x_negone(<8 x double> %x, <8 x double>
; FMA4-INFS-NEXT: retq
;
; AVX512-INFS-LABEL: test_v8f64_mul_y_sub_x_negone:
-; AVX512-INFS: # BB#0:
+; AVX512-INFS: # %bb.0:
; AVX512-INFS-NEXT: vsubpd {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512-INFS-NEXT: vmulpd %zmm0, %zmm1, %zmm0
; AVX512-INFS-NEXT: retq
;
; FMA-NOINFS-LABEL: test_v8f64_mul_y_sub_x_negone:
-; FMA-NOINFS: # BB#0:
+; FMA-NOINFS: # %bb.0:
; FMA-NOINFS-NEXT: vfmadd213pd %ymm2, %ymm2, %ymm0
; FMA-NOINFS-NEXT: vfmadd213pd %ymm3, %ymm3, %ymm1
; FMA-NOINFS-NEXT: retq
;
; FMA4-NOINFS-LABEL: test_v8f64_mul_y_sub_x_negone:
-; FMA4-NOINFS: # BB#0:
+; FMA4-NOINFS: # %bb.0:
; FMA4-NOINFS-NEXT: vfmaddpd %ymm2, %ymm2, %ymm0, %ymm0
; FMA4-NOINFS-NEXT: vfmaddpd %ymm3, %ymm3, %ymm1, %ymm1
; FMA4-NOINFS-NEXT: retq
;
; AVX512-NOINFS-LABEL: test_v8f64_mul_y_sub_x_negone:
-; AVX512-NOINFS: # BB#0:
+; AVX512-NOINFS: # %bb.0:
; AVX512-NOINFS-NEXT: vfmadd213pd %zmm1, %zmm1, %zmm0
; AVX512-NOINFS-NEXT: retq
%s = fsub <8 x double> %x, <double -1.0, double -1.0, double -1.0, double -1.0, double -1.0, double -1.0, double -1.0, double -1.0>
@@ -818,7 +818,7 @@ define <8 x double> @test_v8f64_mul_y_sub_x_negone(<8 x double> %x, <8 x double>
define <16 x float> @test_v16f32_interp(<16 x float> %x, <16 x float> %y, <16 x float> %t) {
; FMA-INFS-LABEL: test_v16f32_interp:
-; FMA-INFS: # BB#0:
+; FMA-INFS: # %bb.0:
; FMA-INFS-NEXT: vmovaps {{.*#+}} ymm6 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
; FMA-INFS-NEXT: vsubps %ymm4, %ymm6, %ymm7
; FMA-INFS-NEXT: vsubps %ymm5, %ymm6, %ymm6
@@ -829,7 +829,7 @@ define <16 x float> @test_v16f32_interp(<16 x float> %x, <16 x float> %y, <16 x
; FMA-INFS-NEXT: retq
;
; FMA4-INFS-LABEL: test_v16f32_interp:
-; FMA4-INFS: # BB#0:
+; FMA4-INFS: # %bb.0:
; FMA4-INFS-NEXT: vmovaps {{.*#+}} ymm6 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
; FMA4-INFS-NEXT: vsubps %ymm4, %ymm6, %ymm7
; FMA4-INFS-NEXT: vsubps %ymm5, %ymm6, %ymm6
@@ -840,7 +840,7 @@ define <16 x float> @test_v16f32_interp(<16 x float> %x, <16 x float> %y, <16 x
; FMA4-INFS-NEXT: retq
;
; AVX512-INFS-LABEL: test_v16f32_interp:
-; AVX512-INFS: # BB#0:
+; AVX512-INFS: # %bb.0:
; AVX512-INFS-NEXT: vbroadcastss {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX512-INFS-NEXT: vsubps %zmm2, %zmm3, %zmm3
; AVX512-INFS-NEXT: vmulps %zmm3, %zmm1, %zmm1
@@ -848,7 +848,7 @@ define <16 x float> @test_v16f32_interp(<16 x float> %x, <16 x float> %y, <16 x
; AVX512-INFS-NEXT: retq
;
; FMA-NOINFS-LABEL: test_v16f32_interp:
-; FMA-NOINFS: # BB#0:
+; FMA-NOINFS: # %bb.0:
; FMA-NOINFS-NEXT: vfnmadd213ps %ymm3, %ymm5, %ymm3
; FMA-NOINFS-NEXT: vfnmadd213ps %ymm2, %ymm4, %ymm2
; FMA-NOINFS-NEXT: vfmadd213ps %ymm2, %ymm4, %ymm0
@@ -856,7 +856,7 @@ define <16 x float> @test_v16f32_interp(<16 x float> %x, <16 x float> %y, <16 x
; FMA-NOINFS-NEXT: retq
;
; FMA4-NOINFS-LABEL: test_v16f32_interp:
-; FMA4-NOINFS: # BB#0:
+; FMA4-NOINFS: # %bb.0:
; FMA4-NOINFS-NEXT: vfnmaddps %ymm3, %ymm3, %ymm5, %ymm3
; FMA4-NOINFS-NEXT: vfnmaddps %ymm2, %ymm2, %ymm4, %ymm2
; FMA4-NOINFS-NEXT: vfmaddps %ymm2, %ymm4, %ymm0, %ymm0
@@ -864,7 +864,7 @@ define <16 x float> @test_v16f32_interp(<16 x float> %x, <16 x float> %y, <16 x
; FMA4-NOINFS-NEXT: retq
;
; AVX512-NOINFS-LABEL: test_v16f32_interp:
-; AVX512-NOINFS: # BB#0:
+; AVX512-NOINFS: # %bb.0:
; AVX512-NOINFS-NEXT: vfnmadd213ps %zmm1, %zmm2, %zmm1
; AVX512-NOINFS-NEXT: vfmadd213ps %zmm1, %zmm2, %zmm0
; AVX512-NOINFS-NEXT: retq
@@ -877,7 +877,7 @@ define <16 x float> @test_v16f32_interp(<16 x float> %x, <16 x float> %y, <16 x
define <8 x double> @test_v8f64_interp(<8 x double> %x, <8 x double> %y, <8 x double> %t) {
; FMA-INFS-LABEL: test_v8f64_interp:
-; FMA-INFS: # BB#0:
+; FMA-INFS: # %bb.0:
; FMA-INFS-NEXT: vmovapd {{.*#+}} ymm6 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
; FMA-INFS-NEXT: vsubpd %ymm4, %ymm6, %ymm7
; FMA-INFS-NEXT: vsubpd %ymm5, %ymm6, %ymm6
@@ -888,7 +888,7 @@ define <8 x double> @test_v8f64_interp(<8 x double> %x, <8 x double> %y, <8 x do
; FMA-INFS-NEXT: retq
;
; FMA4-INFS-LABEL: test_v8f64_interp:
-; FMA4-INFS: # BB#0:
+; FMA4-INFS: # %bb.0:
; FMA4-INFS-NEXT: vmovapd {{.*#+}} ymm6 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
; FMA4-INFS-NEXT: vsubpd %ymm4, %ymm6, %ymm7
; FMA4-INFS-NEXT: vsubpd %ymm5, %ymm6, %ymm6
@@ -899,7 +899,7 @@ define <8 x double> @test_v8f64_interp(<8 x double> %x, <8 x double> %y, <8 x do
; FMA4-INFS-NEXT: retq
;
; AVX512-INFS-LABEL: test_v8f64_interp:
-; AVX512-INFS: # BB#0:
+; AVX512-INFS: # %bb.0:
; AVX512-INFS-NEXT: vbroadcastsd {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1]
; AVX512-INFS-NEXT: vsubpd %zmm2, %zmm3, %zmm3
; AVX512-INFS-NEXT: vmulpd %zmm3, %zmm1, %zmm1
@@ -907,7 +907,7 @@ define <8 x double> @test_v8f64_interp(<8 x double> %x, <8 x double> %y, <8 x do
; AVX512-INFS-NEXT: retq
;
; FMA-NOINFS-LABEL: test_v8f64_interp:
-; FMA-NOINFS: # BB#0:
+; FMA-NOINFS: # %bb.0:
; FMA-NOINFS-NEXT: vfnmadd213pd %ymm3, %ymm5, %ymm3
; FMA-NOINFS-NEXT: vfnmadd213pd %ymm2, %ymm4, %ymm2
; FMA-NOINFS-NEXT: vfmadd213pd %ymm2, %ymm4, %ymm0
@@ -915,7 +915,7 @@ define <8 x double> @test_v8f64_interp(<8 x double> %x, <8 x double> %y, <8 x do
; FMA-NOINFS-NEXT: retq
;
; FMA4-NOINFS-LABEL: test_v8f64_interp:
-; FMA4-NOINFS: # BB#0:
+; FMA4-NOINFS: # %bb.0:
; FMA4-NOINFS-NEXT: vfnmaddpd %ymm3, %ymm3, %ymm5, %ymm3
; FMA4-NOINFS-NEXT: vfnmaddpd %ymm2, %ymm2, %ymm4, %ymm2
; FMA4-NOINFS-NEXT: vfmaddpd %ymm2, %ymm4, %ymm0, %ymm0
@@ -923,7 +923,7 @@ define <8 x double> @test_v8f64_interp(<8 x double> %x, <8 x double> %y, <8 x do
; FMA4-NOINFS-NEXT: retq
;
; AVX512-NOINFS-LABEL: test_v8f64_interp:
-; AVX512-NOINFS: # BB#0:
+; AVX512-NOINFS: # %bb.0:
; AVX512-NOINFS-NEXT: vfnmadd213pd %zmm1, %zmm2, %zmm1
; AVX512-NOINFS-NEXT: vfmadd213pd %zmm1, %zmm2, %zmm0
; AVX512-NOINFS-NEXT: retq
@@ -940,19 +940,19 @@ define <8 x double> @test_v8f64_interp(<8 x double> %x, <8 x double> %y, <8 x do
define <16 x float> @test_v16f32_fneg_fmadd(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) #0 {
; FMA-LABEL: test_v16f32_fneg_fmadd:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfnmsub213ps %ymm4, %ymm2, %ymm0
; FMA-NEXT: vfnmsub213ps %ymm5, %ymm3, %ymm1
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v16f32_fneg_fmadd:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfnmsubps %ymm4, %ymm2, %ymm0, %ymm0
; FMA4-NEXT: vfnmsubps %ymm5, %ymm3, %ymm1, %ymm1
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v16f32_fneg_fmadd:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfnmsub213ps %zmm2, %zmm1, %zmm0
; AVX512-NEXT: retq
%mul = fmul <16 x float> %a0, %a1
@@ -963,19 +963,19 @@ define <16 x float> @test_v16f32_fneg_fmadd(<16 x float> %a0, <16 x float> %a1,
define <8 x double> @test_v8f64_fneg_fmsub(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) #0 {
; FMA-LABEL: test_v8f64_fneg_fmsub:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfnmadd213pd %ymm4, %ymm2, %ymm0
; FMA-NEXT: vfnmadd213pd %ymm5, %ymm3, %ymm1
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v8f64_fneg_fmsub:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfnmaddpd %ymm4, %ymm2, %ymm0, %ymm0
; FMA4-NEXT: vfnmaddpd %ymm5, %ymm3, %ymm1, %ymm1
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v8f64_fneg_fmsub:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfnmadd213pd %zmm2, %zmm1, %zmm0
; AVX512-NEXT: retq
%mul = fmul <8 x double> %a0, %a1
@@ -986,19 +986,19 @@ define <8 x double> @test_v8f64_fneg_fmsub(<8 x double> %a0, <8 x double> %a1, <
define <16 x float> @test_v16f32_fneg_fnmadd(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) #0 {
; FMA-LABEL: test_v16f32_fneg_fnmadd:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfmsub213ps %ymm4, %ymm2, %ymm0
; FMA-NEXT: vfmsub213ps %ymm5, %ymm3, %ymm1
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v16f32_fneg_fnmadd:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfmsubps %ymm4, %ymm2, %ymm0, %ymm0
; FMA4-NEXT: vfmsubps %ymm5, %ymm3, %ymm1, %ymm1
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v16f32_fneg_fnmadd:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfmsub213ps %zmm2, %zmm1, %zmm0
; AVX512-NEXT: retq
%mul = fmul <16 x float> %a0, %a1
@@ -1010,19 +1010,19 @@ define <16 x float> @test_v16f32_fneg_fnmadd(<16 x float> %a0, <16 x float> %a1,
define <8 x double> @test_v8f64_fneg_fnmsub(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) #0 {
; FMA-LABEL: test_v8f64_fneg_fnmsub:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfmadd213pd %ymm4, %ymm2, %ymm0
; FMA-NEXT: vfmadd213pd %ymm5, %ymm3, %ymm1
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v8f64_fneg_fnmsub:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfmaddpd %ymm4, %ymm2, %ymm0, %ymm0
; FMA4-NEXT: vfmaddpd %ymm5, %ymm3, %ymm1, %ymm1
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v8f64_fneg_fnmsub:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfmadd213pd %zmm2, %zmm1, %zmm0
; AVX512-NEXT: retq
%mul = fmul <8 x double> %a0, %a1
@@ -1038,19 +1038,19 @@ define <8 x double> @test_v8f64_fneg_fnmsub(<8 x double> %a0, <8 x double> %a1,
define <16 x float> @test_v16f32_fma_x_c1_fmul_x_c2(<16 x float> %x) #0 {
; FMA-LABEL: test_v16f32_fma_x_c1_fmul_x_c2:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
; FMA-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v16f32_fma_x_c1_fmul_x_c2:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
; FMA4-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v16f32_fma_x_c1_fmul_x_c2:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmulps {{.*}}(%rip), %zmm0, %zmm0
; AVX512-NEXT: retq
%m0 = fmul <16 x float> %x, <float 17.0, float 16.0, float 15.0, float 14.0, float 13.0, float 12.0, float 11.0, float 10.0, float 9.0, float 8.0, float 7.0, float 6.0, float 5.0, float 4.0, float 3.0, float 2.0>
@@ -1065,19 +1065,19 @@ define <16 x float> @test_v16f32_fma_x_c1_fmul_x_c2(<16 x float> %x) #0 {
define <16 x float> @test_v16f32_fma_fmul_x_c1_c2_y(<16 x float> %x, <16 x float> %y) #0 {
; FMA-LABEL: test_v16f32_fma_fmul_x_c1_c2_y:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vfmadd132ps {{.*}}(%rip), %ymm2, %ymm0
; FMA-NEXT: vfmadd132ps {{.*}}(%rip), %ymm3, %ymm1
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v16f32_fma_fmul_x_c1_c2_y:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vfmaddps %ymm2, {{.*}}(%rip), %ymm0, %ymm0
; FMA4-NEXT: vfmaddps %ymm3, {{.*}}(%rip), %ymm1, %ymm1
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v16f32_fma_fmul_x_c1_c2_y:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vfmadd132ps {{.*}}(%rip), %zmm1, %zmm0
; AVX512-NEXT: retq
%m0 = fmul <16 x float> %x, <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0, float 9.0, float 10.0, float 11.0, float 12.0, float 13.0, float 14.0, float 15.0, float 16.0>
@@ -1090,21 +1090,21 @@ define <16 x float> @test_v16f32_fma_fmul_x_c1_c2_y(<16 x float> %x, <16 x float
define <16 x float> @test_v16f32_fneg_fmul(<16 x float> %x, <16 x float> %y) #0 {
; FMA-LABEL: test_v16f32_fneg_fmul:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vxorps %xmm4, %xmm4, %xmm4
; FMA-NEXT: vfnmsub213ps %ymm4, %ymm2, %ymm0
; FMA-NEXT: vfnmsub213ps %ymm4, %ymm3, %ymm1
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v16f32_fneg_fmul:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vxorps %xmm4, %xmm4, %xmm4
; FMA4-NEXT: vfnmsubps %ymm4, %ymm2, %ymm0, %ymm0
; FMA4-NEXT: vfnmsubps %ymm4, %ymm3, %ymm1, %ymm1
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v16f32_fneg_fmul:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vxorps %xmm2, %xmm2, %xmm2
; AVX512-NEXT: vfnmsub213ps %zmm2, %zmm1, %zmm0
; AVX512-NEXT: retq
@@ -1115,21 +1115,21 @@ define <16 x float> @test_v16f32_fneg_fmul(<16 x float> %x, <16 x float> %y) #0
define <8 x double> @test_v8f64_fneg_fmul(<8 x double> %x, <8 x double> %y) #0 {
; FMA-LABEL: test_v8f64_fneg_fmul:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vxorpd %xmm4, %xmm4, %xmm4
; FMA-NEXT: vfnmsub213pd %ymm4, %ymm2, %ymm0
; FMA-NEXT: vfnmsub213pd %ymm4, %ymm3, %ymm1
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v8f64_fneg_fmul:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vxorpd %xmm4, %xmm4, %xmm4
; FMA4-NEXT: vfnmsubpd %ymm4, %ymm2, %ymm0, %ymm0
; FMA4-NEXT: vfnmsubpd %ymm4, %ymm3, %ymm1, %ymm1
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v8f64_fneg_fmul:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; AVX512-NEXT: vfnmsub213pd %zmm2, %zmm1, %zmm0
; AVX512-NEXT: retq
@@ -1140,7 +1140,7 @@ define <8 x double> @test_v8f64_fneg_fmul(<8 x double> %x, <8 x double> %y) #0 {
define <8 x double> @test_v8f64_fneg_fmul_no_nsz(<8 x double> %x, <8 x double> %y) #0 {
; FMA-LABEL: test_v8f64_fneg_fmul_no_nsz:
-; FMA: # BB#0:
+; FMA: # %bb.0:
; FMA-NEXT: vmulpd %ymm3, %ymm1, %ymm1
; FMA-NEXT: vmulpd %ymm2, %ymm0, %ymm0
; FMA-NEXT: vmovapd {{.*#+}} ymm2 = [-0.000000e+00,-0.000000e+00,-0.000000e+00,-0.000000e+00]
@@ -1149,7 +1149,7 @@ define <8 x double> @test_v8f64_fneg_fmul_no_nsz(<8 x double> %x, <8 x double> %
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v8f64_fneg_fmul_no_nsz:
-; FMA4: # BB#0:
+; FMA4: # %bb.0:
; FMA4-NEXT: vmulpd %ymm3, %ymm1, %ymm1
; FMA4-NEXT: vmulpd %ymm2, %ymm0, %ymm0
; FMA4-NEXT: vmovapd {{.*#+}} ymm2 = [-0.000000e+00,-0.000000e+00,-0.000000e+00,-0.000000e+00]
@@ -1158,7 +1158,7 @@ define <8 x double> @test_v8f64_fneg_fmul_no_nsz(<8 x double> %x, <8 x double> %
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v8f64_fneg_fmul_no_nsz:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmulpd %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vxorpd {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512-NEXT: retq
diff --git a/test/CodeGen/X86/fmaddsub-combine.ll b/test/CodeGen/X86/fmaddsub-combine.ll
index 3ce5f132277..5c7cefdab31 100644
--- a/test/CodeGen/X86/fmaddsub-combine.ll
+++ b/test/CodeGen/X86/fmaddsub-combine.ll
@@ -7,12 +7,12 @@
define <2 x double> @mul_addsub_pd128(<2 x double> %A, <2 x double> %B, <2 x double> %C) #0 {
; FMA3-LABEL: mul_addsub_pd128:
-; FMA3: # BB#0: # %entry
+; FMA3: # %bb.0: # %entry
; FMA3-NEXT: vfmaddsub213pd %xmm2, %xmm1, %xmm0
; FMA3-NEXT: retq
;
; FMA4-LABEL: mul_addsub_pd128:
-; FMA4: # BB#0: # %entry
+; FMA4: # %bb.0: # %entry
; FMA4-NEXT: vfmaddsubpd %xmm2, %xmm1, %xmm0, %xmm0
; FMA4-NEXT: retq
entry:
@@ -25,12 +25,12 @@ entry:
define <4 x float> @mul_addsub_ps128(<4 x float> %A, <4 x float> %B, <4 x float> %C) #0 {
; FMA3-LABEL: mul_addsub_ps128:
-; FMA3: # BB#0: # %entry
+; FMA3: # %bb.0: # %entry
; FMA3-NEXT: vfmaddsub213ps %xmm2, %xmm1, %xmm0
; FMA3-NEXT: retq
;
; FMA4-LABEL: mul_addsub_ps128:
-; FMA4: # BB#0: # %entry
+; FMA4: # %bb.0: # %entry
; FMA4-NEXT: vfmaddsubps %xmm2, %xmm1, %xmm0, %xmm0
; FMA4-NEXT: retq
entry:
@@ -43,12 +43,12 @@ entry:
define <4 x double> @mul_addsub_pd256(<4 x double> %A, <4 x double> %B, <4 x double> %C) #0 {
; FMA3-LABEL: mul_addsub_pd256:
-; FMA3: # BB#0: # %entry
+; FMA3: # %bb.0: # %entry
; FMA3-NEXT: vfmaddsub213pd %ymm2, %ymm1, %ymm0
; FMA3-NEXT: retq
;
; FMA4-LABEL: mul_addsub_pd256:
-; FMA4: # BB#0: # %entry
+; FMA4: # %bb.0: # %entry
; FMA4-NEXT: vfmaddsubpd %ymm2, %ymm1, %ymm0, %ymm0
; FMA4-NEXT: retq
entry:
@@ -61,12 +61,12 @@ entry:
define <8 x float> @mul_addsub_ps256(<8 x float> %A, <8 x float> %B, <8 x float> %C) #0 {
; FMA3-LABEL: mul_addsub_ps256:
-; FMA3: # BB#0: # %entry
+; FMA3: # %bb.0: # %entry
; FMA3-NEXT: vfmaddsub213ps %ymm2, %ymm1, %ymm0
; FMA3-NEXT: retq
;
; FMA4-LABEL: mul_addsub_ps256:
-; FMA4: # BB#0: # %entry
+; FMA4: # %bb.0: # %entry
; FMA4-NEXT: vfmaddsubps %ymm2, %ymm1, %ymm0, %ymm0
; FMA4-NEXT: retq
entry:
@@ -79,18 +79,18 @@ entry:
define <8 x double> @mul_addsub_pd512(<8 x double> %A, <8 x double> %B, <8 x double> %C) #0 {
; FMA3_256-LABEL: mul_addsub_pd512:
-; FMA3_256: # BB#0: # %entry
+; FMA3_256: # %bb.0: # %entry
; FMA3_256-NEXT: vfmaddsub213pd %ymm4, %ymm2, %ymm0
; FMA3_256-NEXT: vfmaddsub213pd %ymm5, %ymm3, %ymm1
; FMA3_256-NEXT: retq
;
; FMA3_512-LABEL: mul_addsub_pd512:
-; FMA3_512: # BB#0: # %entry
+; FMA3_512: # %bb.0: # %entry
; FMA3_512-NEXT: vfmaddsub213pd %zmm2, %zmm1, %zmm0
; FMA3_512-NEXT: retq
;
; FMA4-LABEL: mul_addsub_pd512:
-; FMA4: # BB#0: # %entry
+; FMA4: # %bb.0: # %entry
; FMA4-NEXT: vfmaddsubpd %ymm4, %ymm2, %ymm0, %ymm0
; FMA4-NEXT: vfmaddsubpd %ymm5, %ymm3, %ymm1, %ymm1
; FMA4-NEXT: retq
@@ -104,18 +104,18 @@ entry:
define <16 x float> @mul_addsub_ps512(<16 x float> %A, <16 x float> %B, <16 x float> %C) #0 {
; FMA3_256-LABEL: mul_addsub_ps512:
-; FMA3_256: # BB#0: # %entry
+; FMA3_256: # %bb.0: # %entry
; FMA3_256-NEXT: vfmaddsub213ps %ymm4, %ymm2, %ymm0
; FMA3_256-NEXT: vfmaddsub213ps %ymm5, %ymm3, %ymm1
; FMA3_256-NEXT: retq
;
; FMA3_512-LABEL: mul_addsub_ps512:
-; FMA3_512: # BB#0: # %entry
+; FMA3_512: # %bb.0: # %entry
; FMA3_512-NEXT: vfmaddsub213ps %zmm2, %zmm1, %zmm0
; FMA3_512-NEXT: retq
;
; FMA4-LABEL: mul_addsub_ps512:
-; FMA4: # BB#0: # %entry
+; FMA4: # %bb.0: # %entry
; FMA4-NEXT: vfmaddsubps %ymm4, %ymm2, %ymm0, %ymm0
; FMA4-NEXT: vfmaddsubps %ymm5, %ymm3, %ymm1, %ymm1
; FMA4-NEXT: retq
diff --git a/test/CodeGen/X86/fmf-flags.ll b/test/CodeGen/X86/fmf-flags.ll
index 652c1d1b166..00c73c1ffb8 100644
--- a/test/CodeGen/X86/fmf-flags.ll
+++ b/test/CodeGen/X86/fmf-flags.ll
@@ -6,14 +6,14 @@ declare float @llvm.sqrt.f32(float %x);
define float @fast_recip_sqrt(float %x) {
; X64-LABEL: fast_recip_sqrt:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: sqrtss %xmm0, %xmm1
; X64-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-NEXT: divss %xmm1, %xmm0
; X64-NEXT: retq
;
; X86-LABEL: fast_recip_sqrt:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: flds {{[0-9]+}}(%esp)
; X86-NEXT: fsqrt
; X86-NEXT: fld1
@@ -28,7 +28,7 @@ declare float @llvm.fmuladd.f32(float %a, float %b, float %c);
define float @fast_fmuladd_opts(float %a , float %b , float %c) {
; X64-LABEL: fast_fmuladd_opts:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps %xmm0, %xmm1
; X64-NEXT: addss %xmm1, %xmm1
; X64-NEXT: addss %xmm0, %xmm1
@@ -36,7 +36,7 @@ define float @fast_fmuladd_opts(float %a , float %b , float %c) {
; X64-NEXT: retq
;
; X86-LABEL: fast_fmuladd_opts:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: flds {{[0-9]+}}(%esp)
; X86-NEXT: fld %st(0)
; X86-NEXT: fadd %st(1)
@@ -52,7 +52,7 @@ define float @fast_fmuladd_opts(float %a , float %b , float %c) {
define double @not_so_fast_mul_add(double %x) {
; X64-LABEL: not_so_fast_mul_add:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; X64-NEXT: mulsd %xmm0, %xmm1
; X64-NEXT: addsd %xmm1, %xmm0
@@ -60,7 +60,7 @@ define double @not_so_fast_mul_add(double %x) {
; X64-NEXT: retq
;
; X86-LABEL: not_so_fast_mul_add:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: fldl {{[0-9]+}}(%esp)
; X86-NEXT: fld %st(0)
; X86-NEXT: fmull {{\.LCPI.*}}
@@ -79,7 +79,7 @@ define double @not_so_fast_mul_add(double %x) {
define float @not_so_fast_recip_sqrt(float %x) {
; X64-LABEL: not_so_fast_recip_sqrt:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: sqrtss %xmm0, %xmm1
; X64-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-NEXT: divss %xmm1, %xmm0
@@ -87,7 +87,7 @@ define float @not_so_fast_recip_sqrt(float %x) {
; X64-NEXT: retq
;
; X86-LABEL: not_so_fast_recip_sqrt:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: flds {{[0-9]+}}(%esp)
; X86-NEXT: fsqrt
; X86-NEXT: fld1
diff --git a/test/CodeGen/X86/fmsubadd-combine.ll b/test/CodeGen/X86/fmsubadd-combine.ll
index 338a95f6a80..814d61e2238 100644
--- a/test/CodeGen/X86/fmsubadd-combine.ll
+++ b/test/CodeGen/X86/fmsubadd-combine.ll
@@ -7,7 +7,7 @@
define <2 x double> @mul_subadd_pd128(<2 x double> %A, <2 x double> %B, <2 x double> %C) #0 {
; FMA3_256-LABEL: mul_subadd_pd128:
-; FMA3_256: # BB#0: # %entry
+; FMA3_256: # %bb.0: # %entry
; FMA3_256-NEXT: vmulpd %xmm1, %xmm0, %xmm0
; FMA3_256-NEXT: vsubpd %xmm2, %xmm0, %xmm1
; FMA3_256-NEXT: vaddpd %xmm2, %xmm0, %xmm0
@@ -15,7 +15,7 @@ define <2 x double> @mul_subadd_pd128(<2 x double> %A, <2 x double> %B, <2 x dou
; FMA3_256-NEXT: retq
;
; FMA3_512-LABEL: mul_subadd_pd128:
-; FMA3_512: # BB#0: # %entry
+; FMA3_512: # %bb.0: # %entry
; FMA3_512-NEXT: vmulpd %xmm1, %xmm0, %xmm0
; FMA3_512-NEXT: vsubpd %xmm2, %xmm0, %xmm1
; FMA3_512-NEXT: vaddpd %xmm2, %xmm0, %xmm0
@@ -23,7 +23,7 @@ define <2 x double> @mul_subadd_pd128(<2 x double> %A, <2 x double> %B, <2 x dou
; FMA3_512-NEXT: retq
;
; FMA4-LABEL: mul_subadd_pd128:
-; FMA4: # BB#0: # %entry
+; FMA4: # %bb.0: # %entry
; FMA4-NEXT: vmulpd %xmm1, %xmm0, %xmm0
; FMA4-NEXT: vsubpd %xmm2, %xmm0, %xmm1
; FMA4-NEXT: vaddpd %xmm2, %xmm0, %xmm0
@@ -39,7 +39,7 @@ entry:
define <4 x float> @mul_subadd_ps128(<4 x float> %A, <4 x float> %B, <4 x float> %C) #0 {
; FMA3-LABEL: mul_subadd_ps128:
-; FMA3: # BB#0: # %entry
+; FMA3: # %bb.0: # %entry
; FMA3-NEXT: vmulps %xmm1, %xmm0, %xmm0
; FMA3-NEXT: vsubps %xmm2, %xmm0, %xmm1
; FMA3-NEXT: vaddps %xmm2, %xmm0, %xmm0
@@ -47,7 +47,7 @@ define <4 x float> @mul_subadd_ps128(<4 x float> %A, <4 x float> %B, <4 x float>
; FMA3-NEXT: retq
;
; FMA4-LABEL: mul_subadd_ps128:
-; FMA4: # BB#0: # %entry
+; FMA4: # %bb.0: # %entry
; FMA4-NEXT: vmulps %xmm1, %xmm0, %xmm0
; FMA4-NEXT: vsubps %xmm2, %xmm0, %xmm1
; FMA4-NEXT: vaddps %xmm2, %xmm0, %xmm0
@@ -63,7 +63,7 @@ entry:
define <4 x double> @mul_subadd_pd256(<4 x double> %A, <4 x double> %B, <4 x double> %C) #0 {
; FMA3-LABEL: mul_subadd_pd256:
-; FMA3: # BB#0: # %entry
+; FMA3: # %bb.0: # %entry
; FMA3-NEXT: vmulpd %ymm1, %ymm0, %ymm0
; FMA3-NEXT: vsubpd %ymm2, %ymm0, %ymm1
; FMA3-NEXT: vaddpd %ymm2, %ymm0, %ymm0
@@ -71,7 +71,7 @@ define <4 x double> @mul_subadd_pd256(<4 x double> %A, <4 x double> %B, <4 x dou
; FMA3-NEXT: retq
;
; FMA4-LABEL: mul_subadd_pd256:
-; FMA4: # BB#0: # %entry
+; FMA4: # %bb.0: # %entry
; FMA4-NEXT: vmulpd %ymm1, %ymm0, %ymm0
; FMA4-NEXT: vsubpd %ymm2, %ymm0, %ymm1
; FMA4-NEXT: vaddpd %ymm2, %ymm0, %ymm0
@@ -87,7 +87,7 @@ entry:
define <8 x float> @mul_subadd_ps256(<8 x float> %A, <8 x float> %B, <8 x float> %C) #0 {
; FMA3-LABEL: mul_subadd_ps256:
-; FMA3: # BB#0: # %entry
+; FMA3: # %bb.0: # %entry
; FMA3-NEXT: vmulps %ymm1, %ymm0, %ymm0
; FMA3-NEXT: vsubps %ymm2, %ymm0, %ymm1
; FMA3-NEXT: vaddps %ymm2, %ymm0, %ymm0
@@ -95,7 +95,7 @@ define <8 x float> @mul_subadd_ps256(<8 x float> %A, <8 x float> %B, <8 x float>
; FMA3-NEXT: retq
;
; FMA4-LABEL: mul_subadd_ps256:
-; FMA4: # BB#0: # %entry
+; FMA4: # %bb.0: # %entry
; FMA4-NEXT: vmulps %ymm1, %ymm0, %ymm0
; FMA4-NEXT: vsubps %ymm2, %ymm0, %ymm1
; FMA4-NEXT: vaddps %ymm2, %ymm0, %ymm0
@@ -111,7 +111,7 @@ entry:
define <8 x double> @mul_subadd_pd512(<8 x double> %A, <8 x double> %B, <8 x double> %C) #0 {
; FMA3_256-LABEL: mul_subadd_pd512:
-; FMA3_256: # BB#0: # %entry
+; FMA3_256: # %bb.0: # %entry
; FMA3_256-NEXT: vmulpd %ymm2, %ymm0, %ymm0
; FMA3_256-NEXT: vmulpd %ymm3, %ymm1, %ymm1
; FMA3_256-NEXT: vsubpd %ymm5, %ymm1, %ymm2
@@ -123,7 +123,7 @@ define <8 x double> @mul_subadd_pd512(<8 x double> %A, <8 x double> %B, <8 x dou
; FMA3_256-NEXT: retq
;
; FMA3_512-LABEL: mul_subadd_pd512:
-; FMA3_512: # BB#0: # %entry
+; FMA3_512: # %bb.0: # %entry
; FMA3_512-NEXT: vmulpd %zmm1, %zmm0, %zmm0
; FMA3_512-NEXT: vsubpd %zmm2, %zmm0, %zmm1
; FMA3_512-NEXT: vaddpd %zmm2, %zmm0, %zmm0
@@ -131,7 +131,7 @@ define <8 x double> @mul_subadd_pd512(<8 x double> %A, <8 x double> %B, <8 x dou
; FMA3_512-NEXT: retq
;
; FMA4-LABEL: mul_subadd_pd512:
-; FMA4: # BB#0: # %entry
+; FMA4: # %bb.0: # %entry
; FMA4-NEXT: vmulpd %ymm2, %ymm0, %ymm0
; FMA4-NEXT: vmulpd %ymm3, %ymm1, %ymm1
; FMA4-NEXT: vsubpd %ymm5, %ymm1, %ymm2
@@ -151,7 +151,7 @@ entry:
define <16 x float> @mul_subadd_ps512(<16 x float> %A, <16 x float> %B, <16 x float> %C) #0 {
; FMA3_256-LABEL: mul_subadd_ps512:
-; FMA3_256: # BB#0: # %entry
+; FMA3_256: # %bb.0: # %entry
; FMA3_256-NEXT: vmulps %ymm2, %ymm0, %ymm0
; FMA3_256-NEXT: vmulps %ymm3, %ymm1, %ymm1
; FMA3_256-NEXT: vsubps %ymm5, %ymm1, %ymm2
@@ -163,7 +163,7 @@ define <16 x float> @mul_subadd_ps512(<16 x float> %A, <16 x float> %B, <16 x fl
; FMA3_256-NEXT: retq
;
; FMA3_512-LABEL: mul_subadd_ps512:
-; FMA3_512: # BB#0: # %entry
+; FMA3_512: # %bb.0: # %entry
; FMA3_512-NEXT: vmulps %zmm1, %zmm0, %zmm1
; FMA3_512-NEXT: vaddps %zmm2, %zmm1, %zmm0
; FMA3_512-NEXT: movw $-21846, %ax # imm = 0xAAAA
@@ -172,7 +172,7 @@ define <16 x float> @mul_subadd_ps512(<16 x float> %A, <16 x float> %B, <16 x fl
; FMA3_512-NEXT: retq
;
; FMA4-LABEL: mul_subadd_ps512:
-; FMA4: # BB#0: # %entry
+; FMA4: # %bb.0: # %entry
; FMA4-NEXT: vmulps %ymm2, %ymm0, %ymm0
; FMA4-NEXT: vmulps %ymm3, %ymm1, %ymm1
; FMA4-NEXT: vsubps %ymm5, %ymm1, %ymm2
diff --git a/test/CodeGen/X86/fold-load-binops.ll b/test/CodeGen/X86/fold-load-binops.ll
index 4662a1521a3..2d4fc723baa 100644
--- a/test/CodeGen/X86/fold-load-binops.ll
+++ b/test/CodeGen/X86/fold-load-binops.ll
@@ -9,12 +9,12 @@
define <4 x float> @addss(<4 x float> %va, float* %pb) {
; SSE-LABEL: addss:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addss (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: addss:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddss (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
%a = extractelement <4 x float> %va, i32 0
@@ -26,12 +26,12 @@ define <4 x float> @addss(<4 x float> %va, float* %pb) {
define <2 x double> @addsd(<2 x double> %va, double* %pb) {
; SSE-LABEL: addsd:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addsd (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: addsd:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddsd (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
%a = extractelement <2 x double> %va, i32 0
@@ -43,12 +43,12 @@ define <2 x double> @addsd(<2 x double> %va, double* %pb) {
define <4 x float> @subss(<4 x float> %va, float* %pb) {
; SSE-LABEL: subss:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: subss (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: subss:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vsubss (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
%a = extractelement <4 x float> %va, i32 0
@@ -60,12 +60,12 @@ define <4 x float> @subss(<4 x float> %va, float* %pb) {
define <2 x double> @subsd(<2 x double> %va, double* %pb) {
; SSE-LABEL: subsd:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: subsd (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: subsd:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vsubsd (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
%a = extractelement <2 x double> %va, i32 0
@@ -77,12 +77,12 @@ define <2 x double> @subsd(<2 x double> %va, double* %pb) {
define <4 x float> @mulss(<4 x float> %va, float* %pb) {
; SSE-LABEL: mulss:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: mulss (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: mulss:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmulss (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
%a = extractelement <4 x float> %va, i32 0
@@ -94,12 +94,12 @@ define <4 x float> @mulss(<4 x float> %va, float* %pb) {
define <2 x double> @mulsd(<2 x double> %va, double* %pb) {
; SSE-LABEL: mulsd:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: mulsd (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: mulsd:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmulsd (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
%a = extractelement <2 x double> %va, i32 0
@@ -111,12 +111,12 @@ define <2 x double> @mulsd(<2 x double> %va, double* %pb) {
define <4 x float> @divss(<4 x float> %va, float* %pb) {
; SSE-LABEL: divss:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: divss (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: divss:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vdivss (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
%a = extractelement <4 x float> %va, i32 0
@@ -128,12 +128,12 @@ define <4 x float> @divss(<4 x float> %va, float* %pb) {
define <2 x double> @divsd(<2 x double> %va, double* %pb) {
; SSE-LABEL: divsd:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: divsd (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: divsd:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vdivsd (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
%a = extractelement <2 x double> %va, i32 0
diff --git a/test/CodeGen/X86/fold-load-unops.ll b/test/CodeGen/X86/fold-load-unops.ll
index bf47c633c35..7feb66525e2 100644
--- a/test/CodeGen/X86/fold-load-unops.ll
+++ b/test/CodeGen/X86/fold-load-unops.ll
@@ -7,13 +7,13 @@
define float @rcpss(float* %a) {
; SSE-LABEL: rcpss:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: rcpss %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: rcpss:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vrcpss %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -26,13 +26,13 @@ define float @rcpss(float* %a) {
define float @rsqrtss(float* %a) {
; SSE-LABEL: rsqrtss:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: rsqrtss %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: rsqrtss:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vrsqrtss %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -45,13 +45,13 @@ define float @rsqrtss(float* %a) {
define float @sqrtss(float* %a) {
; SSE-LABEL: sqrtss:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: sqrtss %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: sqrtss:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vsqrtss %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -64,13 +64,13 @@ define float @sqrtss(float* %a) {
define double @sqrtsd(double* %a) {
; SSE-LABEL: sqrtsd:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: sqrtsd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: sqrtsd:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -83,12 +83,12 @@ define double @sqrtsd(double* %a) {
define float @rcpss_size(float* %a) optsize {
; SSE-LABEL: rcpss_size:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: rcpss (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: rcpss_size:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vrcpss (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
%ld = load float, float* %a
@@ -100,12 +100,12 @@ define float @rcpss_size(float* %a) optsize {
define <4 x float> @rcpss_full_size(<4 x float>* %a) optsize {
; SSE-LABEL: rcpss_full_size:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: rcpss (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: rcpss_full_size:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vrcpss (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
%ld = load <4 x float>, <4 x float>* %a
@@ -115,12 +115,12 @@ define <4 x float> @rcpss_full_size(<4 x float>* %a) optsize {
define float @rsqrtss_size(float* %a) optsize {
; SSE-LABEL: rsqrtss_size:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: rsqrtss (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: rsqrtss_size:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vrsqrtss (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
%ld = load float, float* %a
@@ -132,12 +132,12 @@ define float @rsqrtss_size(float* %a) optsize {
define <4 x float> @rsqrtss_full_size(<4 x float>* %a) optsize {
; SSE-LABEL: rsqrtss_full_size:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: rsqrtss (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: rsqrtss_full_size:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vrsqrtss (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
%ld = load <4 x float>, <4 x float>* %a
@@ -147,12 +147,12 @@ define <4 x float> @rsqrtss_full_size(<4 x float>* %a) optsize {
define float @sqrtss_size(float* %a) optsize{
; SSE-LABEL: sqrtss_size:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: sqrtss (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: sqrtss_size:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vsqrtss (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
%ld = load float, float* %a
@@ -164,12 +164,12 @@ define float @sqrtss_size(float* %a) optsize{
define <4 x float> @sqrtss_full_size(<4 x float>* %a) optsize{
; SSE-LABEL: sqrtss_full_size:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: sqrtss (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: sqrtss_full_size:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vsqrtss (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
%ld = load <4 x float>, <4 x float>* %a
@@ -179,12 +179,12 @@ define <4 x float> @sqrtss_full_size(<4 x float>* %a) optsize{
define double @sqrtsd_size(double* %a) optsize {
; SSE-LABEL: sqrtsd_size:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: sqrtsd (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: sqrtsd_size:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vsqrtsd (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
%ld = load double, double* %a
@@ -196,12 +196,12 @@ define double @sqrtsd_size(double* %a) optsize {
define <2 x double> @sqrtsd_full_size(<2 x double>* %a) optsize {
; SSE-LABEL: sqrtsd_full_size:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: sqrtsd (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: sqrtsd_full_size:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vsqrtsd (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
%ld = load <2 x double>, <2 x double>* %a
diff --git a/test/CodeGen/X86/fold-rmw-ops.ll b/test/CodeGen/X86/fold-rmw-ops.ll
index b16571eb507..bb89d4b54ea 100644
--- a/test/CodeGen/X86/fold-rmw-ops.ll
+++ b/test/CodeGen/X86/fold-rmw-ops.ll
@@ -13,13 +13,13 @@ declare void @b()
define void @add64_imm32_br() nounwind {
; CHECK-LABEL: add64_imm32_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addq $16777214, {{.*}}(%rip) # encoding: [0x48,0x81,0x05,A,A,A,A,0xfe,0xff,0xff,0x00]
; CHECK-NEXT: # fixup A - offset: 3, value: g64-8, kind: reloc_riprel_4byte
; CHECK-NEXT: # imm = 0xFFFFFE
; CHECK-NEXT: js .LBB0_1 # encoding: [0x78,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB0_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -46,13 +46,13 @@ b:
define void @add64_sext_imm32_br() nounwind {
; CHECK-LABEL: add64_sext_imm32_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addq $-2147483648, {{.*}}(%rip) # encoding: [0x48,0x81,0x05,A,A,A,A,0x00,0x00,0x00,0x80]
; CHECK-NEXT: # fixup A - offset: 3, value: g64-8, kind: reloc_riprel_4byte
; CHECK-NEXT: # imm = 0x80000000
; CHECK-NEXT: js .LBB1_1 # encoding: [0x78,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB1_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -79,13 +79,13 @@ b:
define void @add64_imm32_via_sub_br() nounwind {
; CHECK-LABEL: add64_imm32_via_sub_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $-2147483648, {{.*}}(%rip) # encoding: [0x48,0x81,0x2d,A,A,A,A,0x00,0x00,0x00,0x80]
; CHECK-NEXT: # fixup A - offset: 3, value: g64-8, kind: reloc_riprel_4byte
; CHECK-NEXT: # imm = 0x80000000
; CHECK-NEXT: js .LBB2_1 # encoding: [0x78,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB2_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -113,14 +113,14 @@ b:
define void @add64_no_imm32_via_sub_due_to_cf_br() nounwind {
; CHECK-LABEL: add64_no_imm32_via_sub_due_to_cf_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl $2147483648, %eax # encoding: [0xb8,0x00,0x00,0x00,0x80]
; CHECK-NEXT: # imm = 0x80000000
; CHECK-NEXT: addq %rax, {{.*}}(%rip) # encoding: [0x48,0x01,0x05,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 3, value: g64-4, kind: reloc_riprel_4byte
; CHECK-NEXT: jae .LBB3_2 # encoding: [0x73,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB3_2-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#1: # %a
+; CHECK-NEXT: # %bb.1: # %a
; CHECK-NEXT: jmp a # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: a-1, kind: FK_PCRel_1
@@ -149,14 +149,14 @@ b:
define void @add64_too_large_imm32_br() nounwind {
; CHECK-LABEL: add64_too_large_imm32_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl $2147483649, %eax # encoding: [0xb8,0x01,0x00,0x00,0x80]
; CHECK-NEXT: # imm = 0x80000001
; CHECK-NEXT: addq %rax, {{.*}}(%rip) # encoding: [0x48,0x01,0x05,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 3, value: g64-4, kind: reloc_riprel_4byte
; CHECK-NEXT: js .LBB4_1 # encoding: [0x78,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB4_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -184,12 +184,12 @@ b:
define void @add64_imm8_via_sub_br() nounwind {
; CHECK-LABEL: add64_imm8_via_sub_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $-128, {{.*}}(%rip) # encoding: [0x48,0x83,0x2d,A,A,A,A,0x80]
; CHECK-NEXT: # fixup A - offset: 3, value: g64-5, kind: reloc_riprel_4byte
; CHECK-NEXT: js .LBB5_1 # encoding: [0x78,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB5_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -217,12 +217,12 @@ b:
define void @add64_imm8_br() nounwind {
; CHECK-LABEL: add64_imm8_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addq $42, {{.*}}(%rip) # encoding: [0x48,0x83,0x05,A,A,A,A,0x2a]
; CHECK-NEXT: # fixup A - offset: 3, value: g64-5, kind: reloc_riprel_4byte
; CHECK-NEXT: js .LBB6_1 # encoding: [0x78,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB6_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -248,12 +248,12 @@ b:
define void @add64_imm8_neg_br() nounwind {
; CHECK-LABEL: add64_imm8_neg_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addq $-42, {{.*}}(%rip) # encoding: [0x48,0x83,0x05,A,A,A,A,0xd6]
; CHECK-NEXT: # fixup A - offset: 3, value: g64-5, kind: reloc_riprel_4byte
; CHECK-NEXT: js .LBB7_1 # encoding: [0x78,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB7_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -279,13 +279,13 @@ b:
define void @add32_imm_br() nounwind {
; CHECK-LABEL: add32_imm_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addl $-2147483648, {{.*}}(%rip) # encoding: [0x81,0x05,A,A,A,A,0x00,0x00,0x00,0x80]
; CHECK-NEXT: # fixup A - offset: 2, value: g32-8, kind: reloc_riprel_4byte
; CHECK-NEXT: # imm = 0x80000000
; CHECK-NEXT: js .LBB8_1 # encoding: [0x78,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB8_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -312,12 +312,12 @@ b:
define void @add32_imm8_br() nounwind {
; CHECK-LABEL: add32_imm8_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addl $42, {{.*}}(%rip) # encoding: [0x83,0x05,A,A,A,A,0x2a]
; CHECK-NEXT: # fixup A - offset: 2, value: g32-5, kind: reloc_riprel_4byte
; CHECK-NEXT: js .LBB9_1 # encoding: [0x78,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB9_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -343,12 +343,12 @@ b:
define void @add32_imm8_neg_br() nounwind {
; CHECK-LABEL: add32_imm8_neg_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addl $-42, {{.*}}(%rip) # encoding: [0x83,0x05,A,A,A,A,0xd6]
; CHECK-NEXT: # fixup A - offset: 2, value: g32-5, kind: reloc_riprel_4byte
; CHECK-NEXT: js .LBB10_1 # encoding: [0x78,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB10_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -374,13 +374,13 @@ b:
define void @add16_imm_br() nounwind {
; CHECK-LABEL: add16_imm_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addw $-32768, {{.*}}(%rip) # encoding: [0x66,0x81,0x05,A,A,A,A,0x00,0x80]
; CHECK-NEXT: # fixup A - offset: 3, value: g16-6, kind: reloc_riprel_4byte
; CHECK-NEXT: # imm = 0x8000
; CHECK-NEXT: js .LBB11_1 # encoding: [0x78,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB11_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -407,12 +407,12 @@ b:
define void @add16_imm8_br() nounwind {
; CHECK-LABEL: add16_imm8_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addw $42, {{.*}}(%rip) # encoding: [0x66,0x83,0x05,A,A,A,A,0x2a]
; CHECK-NEXT: # fixup A - offset: 3, value: g16-5, kind: reloc_riprel_4byte
; CHECK-NEXT: js .LBB12_1 # encoding: [0x78,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB12_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -438,12 +438,12 @@ b:
define void @add16_imm8_neg_br() nounwind {
; CHECK-LABEL: add16_imm8_neg_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addw $-42, {{.*}}(%rip) # encoding: [0x66,0x83,0x05,A,A,A,A,0xd6]
; CHECK-NEXT: # fixup A - offset: 3, value: g16-5, kind: reloc_riprel_4byte
; CHECK-NEXT: js .LBB13_1 # encoding: [0x78,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB13_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -469,12 +469,12 @@ b:
define void @add8_imm_br() nounwind {
; CHECK-LABEL: add8_imm_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addb $-2, {{.*}}(%rip) # encoding: [0x80,0x05,A,A,A,A,0xfe]
; CHECK-NEXT: # fixup A - offset: 2, value: g8-5, kind: reloc_riprel_4byte
; CHECK-NEXT: js .LBB14_1 # encoding: [0x78,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB14_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -500,12 +500,12 @@ b:
define void @add64_reg_br(i64 %arg) nounwind {
; CHECK-LABEL: add64_reg_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addq %rdi, {{.*}}(%rip) # encoding: [0x48,0x01,0x3d,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 3, value: g64-4, kind: reloc_riprel_4byte
; CHECK-NEXT: js .LBB15_1 # encoding: [0x78,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB15_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -531,12 +531,12 @@ b:
define void @add32_reg_br(i32 %arg) nounwind {
; CHECK-LABEL: add32_reg_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addl %edi, {{.*}}(%rip) # encoding: [0x01,0x3d,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 2, value: g32-4, kind: reloc_riprel_4byte
; CHECK-NEXT: js .LBB16_1 # encoding: [0x78,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB16_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -562,12 +562,12 @@ b:
define void @add16_reg_br(i16 %arg) nounwind {
; CHECK-LABEL: add16_reg_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addw %di, {{.*}}(%rip) # encoding: [0x66,0x01,0x3d,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 3, value: g16-4, kind: reloc_riprel_4byte
; CHECK-NEXT: js .LBB17_1 # encoding: [0x78,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB17_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -593,12 +593,12 @@ b:
define void @add8_reg_br(i8 %arg) nounwind {
; CHECK-LABEL: add8_reg_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addb %dil, {{.*}}(%rip) # encoding: [0x40,0x00,0x3d,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 3, value: g8-4, kind: reloc_riprel_4byte
; CHECK-NEXT: js .LBB18_1 # encoding: [0x78,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB18_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -624,13 +624,13 @@ b:
define void @sub64_imm32_br() nounwind {
; CHECK-LABEL: sub64_imm32_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $-2147483648, {{.*}}(%rip) # encoding: [0x48,0x81,0x2d,A,A,A,A,0x00,0x00,0x00,0x80]
; CHECK-NEXT: # fixup A - offset: 3, value: g64-8, kind: reloc_riprel_4byte
; CHECK-NEXT: # imm = 0x80000000
; CHECK-NEXT: js .LBB19_1 # encoding: [0x78,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB19_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -658,14 +658,14 @@ b:
define void @sub64_too_large_imm32_br() nounwind {
; CHECK-LABEL: sub64_too_large_imm32_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movabsq $-4294967295, %rax # encoding: [0x48,0xb8,0x01,0x00,0x00,0x00,0xff,0xff,0xff,0xff]
; CHECK-NEXT: # imm = 0xFFFFFFFF00000001
; CHECK-NEXT: addq %rax, {{.*}}(%rip) # encoding: [0x48,0x01,0x05,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 3, value: g64-4, kind: reloc_riprel_4byte
; CHECK-NEXT: js .LBB20_1 # encoding: [0x78,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB20_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -693,12 +693,12 @@ b:
define void @sub64_imm8_br() nounwind {
; CHECK-LABEL: sub64_imm8_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $-128, {{.*}}(%rip) # encoding: [0x48,0x83,0x2d,A,A,A,A,0x80]
; CHECK-NEXT: # fixup A - offset: 3, value: g64-5, kind: reloc_riprel_4byte
; CHECK-NEXT: js .LBB21_1 # encoding: [0x78,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB21_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -726,13 +726,13 @@ b:
define void @sub32_imm_br() nounwind {
; CHECK-LABEL: sub32_imm_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addl $-2147483648, {{.*}}(%rip) # encoding: [0x81,0x05,A,A,A,A,0x00,0x00,0x00,0x80]
; CHECK-NEXT: # fixup A - offset: 2, value: g32-8, kind: reloc_riprel_4byte
; CHECK-NEXT: # imm = 0x80000000
; CHECK-NEXT: js .LBB22_1 # encoding: [0x78,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB22_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -760,12 +760,12 @@ b:
define void @sub32_imm8_br() nounwind {
; CHECK-LABEL: sub32_imm8_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subl $-128, {{.*}}(%rip) # encoding: [0x83,0x2d,A,A,A,A,0x80]
; CHECK-NEXT: # fixup A - offset: 2, value: g32-5, kind: reloc_riprel_4byte
; CHECK-NEXT: js .LBB23_1 # encoding: [0x78,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB23_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -793,13 +793,13 @@ b:
define void @sub16_imm_br() nounwind {
; CHECK-LABEL: sub16_imm_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addw $-32768, {{.*}}(%rip) # encoding: [0x66,0x81,0x05,A,A,A,A,0x00,0x80]
; CHECK-NEXT: # fixup A - offset: 3, value: g16-6, kind: reloc_riprel_4byte
; CHECK-NEXT: # imm = 0x8000
; CHECK-NEXT: js .LBB24_1 # encoding: [0x78,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB24_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -827,12 +827,12 @@ b:
define void @sub16_imm8_br() nounwind {
; CHECK-LABEL: sub16_imm8_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subw $-128, {{.*}}(%rip) # encoding: [0x66,0x83,0x2d,A,A,A,A,0x80]
; CHECK-NEXT: # fixup A - offset: 3, value: g16-5, kind: reloc_riprel_4byte
; CHECK-NEXT: js .LBB25_1 # encoding: [0x78,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB25_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -860,12 +860,12 @@ b:
define void @sub8_imm_br() nounwind {
; CHECK-LABEL: sub8_imm_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addb $-128, {{.*}}(%rip) # encoding: [0x80,0x05,A,A,A,A,0x80]
; CHECK-NEXT: # fixup A - offset: 2, value: g8-5, kind: reloc_riprel_4byte
; CHECK-NEXT: js .LBB26_1 # encoding: [0x78,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB26_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -893,12 +893,12 @@ b:
define void @sub64_reg_br(i64 %arg) nounwind {
; CHECK-LABEL: sub64_reg_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq %rdi, {{.*}}(%rip) # encoding: [0x48,0x29,0x3d,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 3, value: g64-4, kind: reloc_riprel_4byte
; CHECK-NEXT: js .LBB27_1 # encoding: [0x78,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB27_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -924,12 +924,12 @@ b:
define void @sub32_reg_br(i32 %arg) nounwind {
; CHECK-LABEL: sub32_reg_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subl %edi, {{.*}}(%rip) # encoding: [0x29,0x3d,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 2, value: g32-4, kind: reloc_riprel_4byte
; CHECK-NEXT: js .LBB28_1 # encoding: [0x78,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB28_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -955,12 +955,12 @@ b:
define void @sub16_reg_br(i16 %arg) nounwind {
; CHECK-LABEL: sub16_reg_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subw %di, {{.*}}(%rip) # encoding: [0x66,0x29,0x3d,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 3, value: g16-4, kind: reloc_riprel_4byte
; CHECK-NEXT: js .LBB29_1 # encoding: [0x78,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB29_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -986,12 +986,12 @@ b:
define void @sub8_reg_br(i8 %arg) nounwind {
; CHECK-LABEL: sub8_reg_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subb %dil, {{.*}}(%rip) # encoding: [0x40,0x28,0x3d,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 3, value: g8-4, kind: reloc_riprel_4byte
; CHECK-NEXT: js .LBB30_1 # encoding: [0x78,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB30_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1017,13 +1017,13 @@ b:
define void @and64_imm32_br() nounwind {
; CHECK-LABEL: and64_imm32_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: andq $16777215, {{.*}}(%rip) # encoding: [0x48,0x81,0x25,A,A,A,A,0xff,0xff,0xff,0x00]
; CHECK-NEXT: # fixup A - offset: 3, value: g64-8, kind: reloc_riprel_4byte
; CHECK-NEXT: # imm = 0xFFFFFF
; CHECK-NEXT: je .LBB31_1 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB31_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1050,13 +1050,13 @@ b:
define void @and64_sext_imm32_br() nounwind {
; CHECK-LABEL: and64_sext_imm32_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: andq $-2147483648, {{.*}}(%rip) # encoding: [0x48,0x81,0x25,A,A,A,A,0x00,0x00,0x00,0x80]
; CHECK-NEXT: # fixup A - offset: 3, value: g64-8, kind: reloc_riprel_4byte
; CHECK-NEXT: # imm = 0x80000000
; CHECK-NEXT: je .LBB32_1 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB32_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1083,12 +1083,12 @@ b:
define void @and64_imm8_br() nounwind {
; CHECK-LABEL: and64_imm8_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: andq $15, {{.*}}(%rip) # encoding: [0x48,0x83,0x25,A,A,A,A,0x0f]
; CHECK-NEXT: # fixup A - offset: 3, value: g64-5, kind: reloc_riprel_4byte
; CHECK-NEXT: je .LBB33_1 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB33_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1114,12 +1114,12 @@ b:
define void @and64_imm8_neg_br() nounwind {
; CHECK-LABEL: and64_imm8_neg_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: andq $-4, {{.*}}(%rip) # encoding: [0x48,0x83,0x25,A,A,A,A,0xfc]
; CHECK-NEXT: # fixup A - offset: 3, value: g64-5, kind: reloc_riprel_4byte
; CHECK-NEXT: je .LBB34_1 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB34_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1145,7 +1145,7 @@ b:
define void @and32_imm_br() nounwind {
; CHECK-LABEL: and32_imm_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl $-2147483648, %eax # encoding: [0xb8,0x00,0x00,0x00,0x80]
; CHECK-NEXT: # imm = 0x80000000
; CHECK-NEXT: andl {{.*}}(%rip), %eax # encoding: [0x23,0x05,A,A,A,A]
@@ -1154,7 +1154,7 @@ define void @and32_imm_br() nounwind {
; CHECK-NEXT: # fixup A - offset: 2, value: g32-4, kind: reloc_riprel_4byte
; CHECK-NEXT: jne .LBB35_2 # encoding: [0x75,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB35_2-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#1: # %a
+; CHECK-NEXT: # %bb.1: # %a
; CHECK-NEXT: jmp a # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: a-1, kind: FK_PCRel_1
@@ -1181,12 +1181,12 @@ b:
define void @and32_imm8_br() nounwind {
; CHECK-LABEL: and32_imm8_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: andl $15, {{.*}}(%rip) # encoding: [0x83,0x25,A,A,A,A,0x0f]
; CHECK-NEXT: # fixup A - offset: 2, value: g32-5, kind: reloc_riprel_4byte
; CHECK-NEXT: je .LBB36_1 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB36_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1212,12 +1212,12 @@ b:
define void @and32_imm8_neg_br() nounwind {
; CHECK-LABEL: and32_imm8_neg_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: andl $-4, {{.*}}(%rip) # encoding: [0x83,0x25,A,A,A,A,0xfc]
; CHECK-NEXT: # fixup A - offset: 2, value: g32-5, kind: reloc_riprel_4byte
; CHECK-NEXT: je .LBB37_1 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB37_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1243,7 +1243,7 @@ b:
define void @and16_imm_br() nounwind {
; CHECK-LABEL: and16_imm_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movzwl {{.*}}(%rip), %eax # encoding: [0x0f,0xb7,0x05,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 3, value: g16-4, kind: reloc_riprel_4byte
; CHECK-NEXT: andl $32768, %eax # encoding: [0x25,0x00,0x80,0x00,0x00]
@@ -1253,7 +1253,7 @@ define void @and16_imm_br() nounwind {
; CHECK-NEXT: testw %ax, %ax # encoding: [0x66,0x85,0xc0]
; CHECK-NEXT: jne .LBB38_2 # encoding: [0x75,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB38_2-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#1: # %a
+; CHECK-NEXT: # %bb.1: # %a
; CHECK-NEXT: jmp a # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: a-1, kind: FK_PCRel_1
@@ -1279,12 +1279,12 @@ b:
define void @and16_imm8_br() nounwind {
; CHECK-LABEL: and16_imm8_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: andw $15, {{.*}}(%rip) # encoding: [0x66,0x83,0x25,A,A,A,A,0x0f]
; CHECK-NEXT: # fixup A - offset: 3, value: g16-5, kind: reloc_riprel_4byte
; CHECK-NEXT: je .LBB39_1 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB39_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1310,12 +1310,12 @@ b:
define void @and16_imm8_neg_br() nounwind {
; CHECK-LABEL: and16_imm8_neg_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: andw $-4, {{.*}}(%rip) # encoding: [0x66,0x83,0x25,A,A,A,A,0xfc]
; CHECK-NEXT: # fixup A - offset: 3, value: g16-5, kind: reloc_riprel_4byte
; CHECK-NEXT: je .LBB40_1 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB40_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1341,12 +1341,12 @@ b:
define void @and8_imm_br() nounwind {
; CHECK-LABEL: and8_imm_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: andb $-4, {{.*}}(%rip) # encoding: [0x80,0x25,A,A,A,A,0xfc]
; CHECK-NEXT: # fixup A - offset: 2, value: g8-5, kind: reloc_riprel_4byte
; CHECK-NEXT: je .LBB41_1 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB41_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1372,12 +1372,12 @@ b:
define void @and64_reg_br(i64 %arg) nounwind {
; CHECK-LABEL: and64_reg_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: andq %rdi, {{.*}}(%rip) # encoding: [0x48,0x21,0x3d,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 3, value: g64-4, kind: reloc_riprel_4byte
; CHECK-NEXT: je .LBB42_1 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB42_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1403,12 +1403,12 @@ b:
define void @and32_reg_br(i32 %arg) nounwind {
; CHECK-LABEL: and32_reg_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: andl %edi, {{.*}}(%rip) # encoding: [0x21,0x3d,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 2, value: g32-4, kind: reloc_riprel_4byte
; CHECK-NEXT: je .LBB43_1 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB43_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1434,12 +1434,12 @@ b:
define void @and16_reg_br(i16 %arg) nounwind {
; CHECK-LABEL: and16_reg_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: andw %di, {{.*}}(%rip) # encoding: [0x66,0x21,0x3d,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 3, value: g16-4, kind: reloc_riprel_4byte
; CHECK-NEXT: je .LBB44_1 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB44_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1465,12 +1465,12 @@ b:
define void @and8_reg_br(i8 %arg) nounwind {
; CHECK-LABEL: and8_reg_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: andb %dil, {{.*}}(%rip) # encoding: [0x40,0x20,0x3d,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 3, value: g8-4, kind: reloc_riprel_4byte
; CHECK-NEXT: je .LBB45_1 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB45_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1496,13 +1496,13 @@ b:
define void @or64_imm32_br() nounwind {
; CHECK-LABEL: or64_imm32_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: orq $16777215, {{.*}}(%rip) # encoding: [0x48,0x81,0x0d,A,A,A,A,0xff,0xff,0xff,0x00]
; CHECK-NEXT: # fixup A - offset: 3, value: g64-8, kind: reloc_riprel_4byte
; CHECK-NEXT: # imm = 0xFFFFFF
; CHECK-NEXT: je .LBB46_1 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB46_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1529,13 +1529,13 @@ b:
define void @or64_sext_imm32_br() nounwind {
; CHECK-LABEL: or64_sext_imm32_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: orq $-2147483648, {{.*}}(%rip) # encoding: [0x48,0x81,0x0d,A,A,A,A,0x00,0x00,0x00,0x80]
; CHECK-NEXT: # fixup A - offset: 3, value: g64-8, kind: reloc_riprel_4byte
; CHECK-NEXT: # imm = 0x80000000
; CHECK-NEXT: je .LBB47_1 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB47_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1562,12 +1562,12 @@ b:
define void @or64_imm8_br() nounwind {
; CHECK-LABEL: or64_imm8_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: orq $15, {{.*}}(%rip) # encoding: [0x48,0x83,0x0d,A,A,A,A,0x0f]
; CHECK-NEXT: # fixup A - offset: 3, value: g64-5, kind: reloc_riprel_4byte
; CHECK-NEXT: je .LBB48_1 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB48_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1593,12 +1593,12 @@ b:
define void @or64_imm8_neg_br() nounwind {
; CHECK-LABEL: or64_imm8_neg_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: orq $-4, {{.*}}(%rip) # encoding: [0x48,0x83,0x0d,A,A,A,A,0xfc]
; CHECK-NEXT: # fixup A - offset: 3, value: g64-5, kind: reloc_riprel_4byte
; CHECK-NEXT: je .LBB49_1 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB49_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1624,13 +1624,13 @@ b:
define void @or32_imm_br() nounwind {
; CHECK-LABEL: or32_imm_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: orl $-2147483648, {{.*}}(%rip) # encoding: [0x81,0x0d,A,A,A,A,0x00,0x00,0x00,0x80]
; CHECK-NEXT: # fixup A - offset: 2, value: g32-8, kind: reloc_riprel_4byte
; CHECK-NEXT: # imm = 0x80000000
; CHECK-NEXT: je .LBB50_1 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB50_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1657,12 +1657,12 @@ b:
define void @or32_imm8_br() nounwind {
; CHECK-LABEL: or32_imm8_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: orl $15, {{.*}}(%rip) # encoding: [0x83,0x0d,A,A,A,A,0x0f]
; CHECK-NEXT: # fixup A - offset: 2, value: g32-5, kind: reloc_riprel_4byte
; CHECK-NEXT: je .LBB51_1 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB51_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1688,12 +1688,12 @@ b:
define void @or32_imm8_neg_br() nounwind {
; CHECK-LABEL: or32_imm8_neg_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: orl $-4, {{.*}}(%rip) # encoding: [0x83,0x0d,A,A,A,A,0xfc]
; CHECK-NEXT: # fixup A - offset: 2, value: g32-5, kind: reloc_riprel_4byte
; CHECK-NEXT: je .LBB52_1 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB52_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1719,13 +1719,13 @@ b:
define void @or16_imm_br() nounwind {
; CHECK-LABEL: or16_imm_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: orw $-32768, {{.*}}(%rip) # encoding: [0x66,0x81,0x0d,A,A,A,A,0x00,0x80]
; CHECK-NEXT: # fixup A - offset: 3, value: g16-6, kind: reloc_riprel_4byte
; CHECK-NEXT: # imm = 0x8000
; CHECK-NEXT: je .LBB53_1 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB53_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1751,12 +1751,12 @@ b:
define void @or16_imm8_br() nounwind {
; CHECK-LABEL: or16_imm8_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: orw $15, {{.*}}(%rip) # encoding: [0x66,0x83,0x0d,A,A,A,A,0x0f]
; CHECK-NEXT: # fixup A - offset: 3, value: g16-5, kind: reloc_riprel_4byte
; CHECK-NEXT: je .LBB54_1 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB54_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1782,12 +1782,12 @@ b:
define void @or16_imm8_neg_br() nounwind {
; CHECK-LABEL: or16_imm8_neg_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: orw $-4, {{.*}}(%rip) # encoding: [0x66,0x83,0x0d,A,A,A,A,0xfc]
; CHECK-NEXT: # fixup A - offset: 3, value: g16-5, kind: reloc_riprel_4byte
; CHECK-NEXT: je .LBB55_1 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB55_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1813,12 +1813,12 @@ b:
define void @or8_imm_br() nounwind {
; CHECK-LABEL: or8_imm_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: orb $-4, {{.*}}(%rip) # encoding: [0x80,0x0d,A,A,A,A,0xfc]
; CHECK-NEXT: # fixup A - offset: 2, value: g8-5, kind: reloc_riprel_4byte
; CHECK-NEXT: je .LBB56_1 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB56_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1844,12 +1844,12 @@ b:
define void @or64_reg_br(i64 %arg) nounwind {
; CHECK-LABEL: or64_reg_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: orq %rdi, {{.*}}(%rip) # encoding: [0x48,0x09,0x3d,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 3, value: g64-4, kind: reloc_riprel_4byte
; CHECK-NEXT: je .LBB57_1 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB57_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1875,12 +1875,12 @@ b:
define void @or32_reg_br(i32 %arg) nounwind {
; CHECK-LABEL: or32_reg_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: orl %edi, {{.*}}(%rip) # encoding: [0x09,0x3d,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 2, value: g32-4, kind: reloc_riprel_4byte
; CHECK-NEXT: je .LBB58_1 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB58_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1906,12 +1906,12 @@ b:
define void @or16_reg_br(i16 %arg) nounwind {
; CHECK-LABEL: or16_reg_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: orw %di, {{.*}}(%rip) # encoding: [0x66,0x09,0x3d,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 3, value: g16-4, kind: reloc_riprel_4byte
; CHECK-NEXT: je .LBB59_1 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB59_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1937,12 +1937,12 @@ b:
define void @or8_reg_br(i8 %arg) nounwind {
; CHECK-LABEL: or8_reg_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: orb %dil, {{.*}}(%rip) # encoding: [0x40,0x08,0x3d,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 3, value: g8-4, kind: reloc_riprel_4byte
; CHECK-NEXT: je .LBB60_1 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB60_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1968,13 +1968,13 @@ b:
define void @xor64_imm32_br() nounwind {
; CHECK-LABEL: xor64_imm32_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorq $16777215, {{.*}}(%rip) # encoding: [0x48,0x81,0x35,A,A,A,A,0xff,0xff,0xff,0x00]
; CHECK-NEXT: # fixup A - offset: 3, value: g64-8, kind: reloc_riprel_4byte
; CHECK-NEXT: # imm = 0xFFFFFF
; CHECK-NEXT: je .LBB61_1 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB61_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -2001,13 +2001,13 @@ b:
define void @xor64_sext_imm32_br() nounwind {
; CHECK-LABEL: xor64_sext_imm32_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorq $-2147483648, {{.*}}(%rip) # encoding: [0x48,0x81,0x35,A,A,A,A,0x00,0x00,0x00,0x80]
; CHECK-NEXT: # fixup A - offset: 3, value: g64-8, kind: reloc_riprel_4byte
; CHECK-NEXT: # imm = 0x80000000
; CHECK-NEXT: je .LBB62_1 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB62_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -2034,12 +2034,12 @@ b:
define void @xor64_imm8_br() nounwind {
; CHECK-LABEL: xor64_imm8_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorq $15, {{.*}}(%rip) # encoding: [0x48,0x83,0x35,A,A,A,A,0x0f]
; CHECK-NEXT: # fixup A - offset: 3, value: g64-5, kind: reloc_riprel_4byte
; CHECK-NEXT: je .LBB63_1 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB63_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -2065,12 +2065,12 @@ b:
define void @xor64_imm8_neg_br() nounwind {
; CHECK-LABEL: xor64_imm8_neg_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorq $-4, {{.*}}(%rip) # encoding: [0x48,0x83,0x35,A,A,A,A,0xfc]
; CHECK-NEXT: # fixup A - offset: 3, value: g64-5, kind: reloc_riprel_4byte
; CHECK-NEXT: je .LBB64_1 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB64_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -2096,13 +2096,13 @@ b:
define void @xor32_imm_br() nounwind {
; CHECK-LABEL: xor32_imm_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl $-2147483648, {{.*}}(%rip) # encoding: [0x81,0x35,A,A,A,A,0x00,0x00,0x00,0x80]
; CHECK-NEXT: # fixup A - offset: 2, value: g32-8, kind: reloc_riprel_4byte
; CHECK-NEXT: # imm = 0x80000000
; CHECK-NEXT: je .LBB65_1 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB65_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -2129,12 +2129,12 @@ b:
define void @xor32_imm8_br() nounwind {
; CHECK-LABEL: xor32_imm8_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl $15, {{.*}}(%rip) # encoding: [0x83,0x35,A,A,A,A,0x0f]
; CHECK-NEXT: # fixup A - offset: 2, value: g32-5, kind: reloc_riprel_4byte
; CHECK-NEXT: je .LBB66_1 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB66_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -2160,12 +2160,12 @@ b:
define void @xor32_imm8_neg_br() nounwind {
; CHECK-LABEL: xor32_imm8_neg_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl $-4, {{.*}}(%rip) # encoding: [0x83,0x35,A,A,A,A,0xfc]
; CHECK-NEXT: # fixup A - offset: 2, value: g32-5, kind: reloc_riprel_4byte
; CHECK-NEXT: je .LBB67_1 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB67_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -2191,13 +2191,13 @@ b:
define void @xor16_imm_br() nounwind {
; CHECK-LABEL: xor16_imm_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorw $-32768, {{.*}}(%rip) # encoding: [0x66,0x81,0x35,A,A,A,A,0x00,0x80]
; CHECK-NEXT: # fixup A - offset: 3, value: g16-6, kind: reloc_riprel_4byte
; CHECK-NEXT: # imm = 0x8000
; CHECK-NEXT: je .LBB68_1 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB68_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -2223,12 +2223,12 @@ b:
define void @xor16_imm8_br() nounwind {
; CHECK-LABEL: xor16_imm8_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorw $15, {{.*}}(%rip) # encoding: [0x66,0x83,0x35,A,A,A,A,0x0f]
; CHECK-NEXT: # fixup A - offset: 3, value: g16-5, kind: reloc_riprel_4byte
; CHECK-NEXT: je .LBB69_1 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB69_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -2254,12 +2254,12 @@ b:
define void @xor16_imm8_neg_br() nounwind {
; CHECK-LABEL: xor16_imm8_neg_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorw $-4, {{.*}}(%rip) # encoding: [0x66,0x83,0x35,A,A,A,A,0xfc]
; CHECK-NEXT: # fixup A - offset: 3, value: g16-5, kind: reloc_riprel_4byte
; CHECK-NEXT: je .LBB70_1 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB70_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -2285,12 +2285,12 @@ b:
define void @xor8_imm_br() nounwind {
; CHECK-LABEL: xor8_imm_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorb $-4, {{.*}}(%rip) # encoding: [0x80,0x35,A,A,A,A,0xfc]
; CHECK-NEXT: # fixup A - offset: 2, value: g8-5, kind: reloc_riprel_4byte
; CHECK-NEXT: je .LBB71_1 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB71_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -2316,12 +2316,12 @@ b:
define void @xor64_reg_br(i64 %arg) nounwind {
; CHECK-LABEL: xor64_reg_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorq %rdi, {{.*}}(%rip) # encoding: [0x48,0x31,0x3d,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 3, value: g64-4, kind: reloc_riprel_4byte
; CHECK-NEXT: je .LBB72_1 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB72_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -2347,12 +2347,12 @@ b:
define void @xor32_reg_br(i32 %arg) nounwind {
; CHECK-LABEL: xor32_reg_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl %edi, {{.*}}(%rip) # encoding: [0x31,0x3d,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 2, value: g32-4, kind: reloc_riprel_4byte
; CHECK-NEXT: je .LBB73_1 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB73_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -2378,12 +2378,12 @@ b:
define void @xor16_reg_br(i16 %arg) nounwind {
; CHECK-LABEL: xor16_reg_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorw %di, {{.*}}(%rip) # encoding: [0x66,0x31,0x3d,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 3, value: g16-4, kind: reloc_riprel_4byte
; CHECK-NEXT: je .LBB74_1 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB74_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -2409,12 +2409,12 @@ b:
define void @xor8_reg_br(i8 %arg) nounwind {
; CHECK-LABEL: xor8_reg_br:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorb %dil, {{.*}}(%rip) # encoding: [0x40,0x30,0x3d,A,A,A,A]
; CHECK-NEXT: # fixup A - offset: 3, value: g8-4, kind: reloc_riprel_4byte
; CHECK-NEXT: je .LBB75_1 # encoding: [0x74,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB75_1-1, kind: FK_PCRel_1
-; CHECK-NEXT: # BB#2: # %b
+; CHECK-NEXT: # %bb.2: # %b
; CHECK-NEXT: jmp b # TAILCALL
; CHECK-NEXT: # encoding: [0xeb,A]
; CHECK-NEXT: # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
diff --git a/test/CodeGen/X86/fold-vector-sext-crash.ll b/test/CodeGen/X86/fold-vector-sext-crash.ll
index 6928a3ea941..481f55e9e10 100644
--- a/test/CodeGen/X86/fold-vector-sext-crash.ll
+++ b/test/CodeGen/X86/fold-vector-sext-crash.ll
@@ -8,7 +8,7 @@
define <4 x i64> @foo(<4 x i64> %A) {
; CHECK-LABEL: foo:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vmovdqa %xmm1, %xmm1
; CHECK-NEXT: vandps %ymm0, %ymm1, %ymm0
diff --git a/test/CodeGen/X86/fold-vector-sext-crash2.ll b/test/CodeGen/X86/fold-vector-sext-crash2.ll
index ccc4b103926..ca1a1c1949e 100644
--- a/test/CodeGen/X86/fold-vector-sext-crash2.ll
+++ b/test/CodeGen/X86/fold-vector-sext-crash2.ll
@@ -6,7 +6,7 @@
define <2 x i256> @test_sext1() {
; X32-LABEL: test_sext1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl $-1, 60(%eax)
; X32-NEXT: movl $-1, 56(%eax)
@@ -27,7 +27,7 @@ define <2 x i256> @test_sext1() {
; X32-NEXT: retl $4
;
; X64-LABEL: test_sext1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorps %xmm0, %xmm0
; X64-NEXT: movaps %xmm0, 16(%rdi)
; X64-NEXT: movaps %xmm0, (%rdi)
@@ -44,7 +44,7 @@ define <2 x i256> @test_sext1() {
define <2 x i256> @test_sext2() {
; X32-LABEL: test_sext2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl $-1, 60(%eax)
; X32-NEXT: movl $-1, 56(%eax)
@@ -65,7 +65,7 @@ define <2 x i256> @test_sext2() {
; X32-NEXT: retl $4
;
; X64-LABEL: test_sext2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorps %xmm0, %xmm0
; X64-NEXT: movaps %xmm0, 16(%rdi)
; X64-NEXT: movaps %xmm0, (%rdi)
@@ -82,7 +82,7 @@ define <2 x i256> @test_sext2() {
define <2 x i256> @test_zext1() {
; X32-LABEL: test_zext1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl $0, 60(%eax)
; X32-NEXT: movl $0, 56(%eax)
@@ -103,7 +103,7 @@ define <2 x i256> @test_zext1() {
; X32-NEXT: retl $4
;
; X64-LABEL: test_zext1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorps %xmm0, %xmm0
; X64-NEXT: movaps %xmm0, 48(%rdi)
; X64-NEXT: movaps %xmm0, 16(%rdi)
@@ -119,7 +119,7 @@ define <2 x i256> @test_zext1() {
define <2 x i256> @test_zext2() {
; X32-LABEL: test_zext2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl $0, 60(%eax)
; X32-NEXT: movl $0, 56(%eax)
@@ -140,7 +140,7 @@ define <2 x i256> @test_zext2() {
; X32-NEXT: retl $4
;
; X64-LABEL: test_zext2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorps %xmm0, %xmm0
; X64-NEXT: movaps %xmm0, 48(%rdi)
; X64-NEXT: movaps %xmm0, 16(%rdi)
diff --git a/test/CodeGen/X86/fold-vector-sext-zext.ll b/test/CodeGen/X86/fold-vector-sext-zext.ll
index 39e728816b0..16274a0d819 100644
--- a/test/CodeGen/X86/fold-vector-sext-zext.ll
+++ b/test/CodeGen/X86/fold-vector-sext-zext.ll
@@ -10,12 +10,12 @@
define <4 x i16> @test_sext_4i8_4i16() {
; X32-LABEL: test_sext_4i8_4i16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps {{.*#+}} xmm0 = [0,4294967295,2,4294967293]
; X32-NEXT: retl
;
; X64-LABEL: test_sext_4i8_4i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} xmm0 = [0,4294967295,2,4294967293]
; X64-NEXT: retq
%1 = insertelement <4 x i8> undef, i8 0, i32 0
@@ -28,12 +28,12 @@ define <4 x i16> @test_sext_4i8_4i16() {
define <4 x i16> @test_sext_4i8_4i16_undef() {
; X32-LABEL: test_sext_4i8_4i16_undef:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps {{.*#+}} xmm0 = <u,4294967295,u,4294967293>
; X32-NEXT: retl
;
; X64-LABEL: test_sext_4i8_4i16_undef:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} xmm0 = <u,4294967295,u,4294967293>
; X64-NEXT: retq
%1 = insertelement <4 x i8> undef, i8 undef, i32 0
@@ -46,12 +46,12 @@ define <4 x i16> @test_sext_4i8_4i16_undef() {
define <4 x i32> @test_sext_4i8_4i32() {
; X32-LABEL: test_sext_4i8_4i32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps {{.*#+}} xmm0 = [0,4294967295,2,4294967293]
; X32-NEXT: retl
;
; X64-LABEL: test_sext_4i8_4i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} xmm0 = [0,4294967295,2,4294967293]
; X64-NEXT: retq
%1 = insertelement <4 x i8> undef, i8 0, i32 0
@@ -64,12 +64,12 @@ define <4 x i32> @test_sext_4i8_4i32() {
define <4 x i32> @test_sext_4i8_4i32_undef() {
; X32-LABEL: test_sext_4i8_4i32_undef:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps {{.*#+}} xmm0 = <u,4294967295,u,4294967293>
; X32-NEXT: retl
;
; X64-LABEL: test_sext_4i8_4i32_undef:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} xmm0 = <u,4294967295,u,4294967293>
; X64-NEXT: retq
%1 = insertelement <4 x i8> undef, i8 undef, i32 0
@@ -82,12 +82,12 @@ define <4 x i32> @test_sext_4i8_4i32_undef() {
define <4 x i64> @test_sext_4i8_4i64() {
; X32-LABEL: test_sext_4i8_4i64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps {{.*#+}} ymm0 = [0,0,4294967295,4294967295,2,0,4294967293,4294967295]
; X32-NEXT: retl
;
; X64-LABEL: test_sext_4i8_4i64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} ymm0 = [0,18446744073709551615,2,18446744073709551613]
; X64-NEXT: retq
%1 = insertelement <4 x i8> undef, i8 0, i32 0
@@ -100,12 +100,12 @@ define <4 x i64> @test_sext_4i8_4i64() {
define <4 x i64> @test_sext_4i8_4i64_undef() {
; X32-LABEL: test_sext_4i8_4i64_undef:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps {{.*#+}} ymm0 = <u,u,4294967295,4294967295,u,u,4294967293,4294967295>
; X32-NEXT: retl
;
; X64-LABEL: test_sext_4i8_4i64_undef:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} ymm0 = <u,18446744073709551615,u,18446744073709551613>
; X64-NEXT: retq
%1 = insertelement <4 x i8> undef, i8 undef, i32 0
@@ -118,12 +118,12 @@ define <4 x i64> @test_sext_4i8_4i64_undef() {
define <8 x i16> @test_sext_8i8_8i16() {
; X32-LABEL: test_sext_8i8_8i16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps {{.*#+}} xmm0 = <0,65535,2,65533,u,u,u,u>
; X32-NEXT: retl
;
; X64-LABEL: test_sext_8i8_8i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} xmm0 = <0,65535,2,65533,u,u,u,u>
; X64-NEXT: retq
%1 = insertelement <8 x i8> undef, i8 0, i32 0
@@ -140,12 +140,12 @@ define <8 x i16> @test_sext_8i8_8i16() {
define <8 x i32> @test_sext_8i8_8i32() {
; X32-LABEL: test_sext_8i8_8i32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps {{.*#+}} ymm0 = <0,4294967295,2,4294967293,u,u,u,u>
; X32-NEXT: retl
;
; X64-LABEL: test_sext_8i8_8i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} ymm0 = <0,4294967295,2,4294967293,u,u,u,u>
; X64-NEXT: retq
%1 = insertelement <8 x i8> undef, i8 0, i32 0
@@ -162,12 +162,12 @@ define <8 x i32> @test_sext_8i8_8i32() {
define <8 x i16> @test_sext_8i8_8i16_undef() {
; X32-LABEL: test_sext_8i8_8i16_undef:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps {{.*#+}} xmm0 = <u,65535,u,65533,u,u,u,u>
; X32-NEXT: retl
;
; X64-LABEL: test_sext_8i8_8i16_undef:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} xmm0 = <u,65535,u,65533,u,u,u,u>
; X64-NEXT: retq
%1 = insertelement <8 x i8> undef, i8 undef, i32 0
@@ -184,12 +184,12 @@ define <8 x i16> @test_sext_8i8_8i16_undef() {
define <8 x i32> @test_sext_8i8_8i32_undef() {
; X32-LABEL: test_sext_8i8_8i32_undef:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps {{.*#+}} ymm0 = <0,u,2,u,u,u,u,u>
; X32-NEXT: retl
;
; X64-LABEL: test_sext_8i8_8i32_undef:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} ymm0 = <0,u,2,u,u,u,u,u>
; X64-NEXT: retq
%1 = insertelement <8 x i8> undef, i8 0, i32 0
@@ -206,12 +206,12 @@ define <8 x i32> @test_sext_8i8_8i32_undef() {
define <4 x i16> @test_zext_4i8_4i16() {
; X32-LABEL: test_zext_4i8_4i16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps {{.*#+}} xmm0 = [0,255,2,253]
; X32-NEXT: retl
;
; X64-LABEL: test_zext_4i8_4i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} xmm0 = [0,255,2,253]
; X64-NEXT: retq
%1 = insertelement <4 x i8> undef, i8 0, i32 0
@@ -224,12 +224,12 @@ define <4 x i16> @test_zext_4i8_4i16() {
define <4 x i32> @test_zext_4i8_4i32() {
; X32-LABEL: test_zext_4i8_4i32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps {{.*#+}} xmm0 = [0,255,2,253]
; X32-NEXT: retl
;
; X64-LABEL: test_zext_4i8_4i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} xmm0 = [0,255,2,253]
; X64-NEXT: retq
%1 = insertelement <4 x i8> undef, i8 0, i32 0
@@ -242,12 +242,12 @@ define <4 x i32> @test_zext_4i8_4i32() {
define <4 x i64> @test_zext_4i8_4i64() {
; X32-LABEL: test_zext_4i8_4i64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps {{.*#+}} ymm0 = [0,0,255,0,2,0,253,0]
; X32-NEXT: retl
;
; X64-LABEL: test_zext_4i8_4i64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} ymm0 = [0,255,2,253]
; X64-NEXT: retq
%1 = insertelement <4 x i8> undef, i8 0, i32 0
@@ -260,12 +260,12 @@ define <4 x i64> @test_zext_4i8_4i64() {
define <4 x i16> @test_zext_4i8_4i16_undef() {
; X32-LABEL: test_zext_4i8_4i16_undef:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps {{.*#+}} xmm0 = <u,255,u,253>
; X32-NEXT: retl
;
; X64-LABEL: test_zext_4i8_4i16_undef:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} xmm0 = <u,255,u,253>
; X64-NEXT: retq
%1 = insertelement <4 x i8> undef, i8 undef, i32 0
@@ -278,12 +278,12 @@ define <4 x i16> @test_zext_4i8_4i16_undef() {
define <4 x i32> @test_zext_4i8_4i32_undef() {
; X32-LABEL: test_zext_4i8_4i32_undef:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps {{.*#+}} xmm0 = <0,u,2,u>
; X32-NEXT: retl
;
; X64-LABEL: test_zext_4i8_4i32_undef:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} xmm0 = <0,u,2,u>
; X64-NEXT: retq
%1 = insertelement <4 x i8> undef, i8 0, i32 0
@@ -296,12 +296,12 @@ define <4 x i32> @test_zext_4i8_4i32_undef() {
define <4 x i64> @test_zext_4i8_4i64_undef() {
; X32-LABEL: test_zext_4i8_4i64_undef:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps {{.*#+}} ymm0 = <u,u,255,0,2,0,u,u>
; X32-NEXT: retl
;
; X64-LABEL: test_zext_4i8_4i64_undef:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} ymm0 = <u,255,2,u>
; X64-NEXT: retq
%1 = insertelement <4 x i8> undef, i8 undef, i32 0
@@ -314,12 +314,12 @@ define <4 x i64> @test_zext_4i8_4i64_undef() {
define <8 x i16> @test_zext_8i8_8i16() {
; X32-LABEL: test_zext_8i8_8i16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps {{.*#+}} xmm0 = [0,255,2,253,4,251,6,249]
; X32-NEXT: retl
;
; X64-LABEL: test_zext_8i8_8i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} xmm0 = [0,255,2,253,4,251,6,249]
; X64-NEXT: retq
%1 = insertelement <8 x i8> undef, i8 0, i32 0
@@ -336,12 +336,12 @@ define <8 x i16> @test_zext_8i8_8i16() {
define <8 x i32> @test_zext_8i8_8i32() {
; X32-LABEL: test_zext_8i8_8i32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps {{.*#+}} ymm0 = [0,255,2,253,4,251,6,249]
; X32-NEXT: retl
;
; X64-LABEL: test_zext_8i8_8i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} ymm0 = [0,255,2,253,4,251,6,249]
; X64-NEXT: retq
%1 = insertelement <8 x i8> undef, i8 0, i32 0
@@ -358,12 +358,12 @@ define <8 x i32> @test_zext_8i8_8i32() {
define <8 x i16> @test_zext_8i8_8i16_undef() {
; X32-LABEL: test_zext_8i8_8i16_undef:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps {{.*#+}} xmm0 = <u,255,u,253,u,251,u,249>
; X32-NEXT: retl
;
; X64-LABEL: test_zext_8i8_8i16_undef:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} xmm0 = <u,255,u,253,u,251,u,249>
; X64-NEXT: retq
%1 = insertelement <8 x i8> undef, i8 undef, i32 0
@@ -380,12 +380,12 @@ define <8 x i16> @test_zext_8i8_8i16_undef() {
define <8 x i32> @test_zext_8i8_8i32_undef() {
; X32-LABEL: test_zext_8i8_8i32_undef:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps {{.*#+}} ymm0 = <0,u,2,253,4,u,6,u>
; X32-NEXT: retl
;
; X64-LABEL: test_zext_8i8_8i32_undef:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} ymm0 = <0,u,2,253,4,u,6,u>
; X64-NEXT: retq
%1 = insertelement <8 x i8> undef, i8 0, i32 0
diff --git a/test/CodeGen/X86/fp-fast.ll b/test/CodeGen/X86/fp-fast.ll
index fa31b9c9e12..c2b07ed1023 100644
--- a/test/CodeGen/X86/fp-fast.ll
+++ b/test/CodeGen/X86/fp-fast.ll
@@ -3,7 +3,7 @@
define float @test1(float %a) {
; CHECK-LABEL: test1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: retq
%t1 = fadd float %a, %a
@@ -13,7 +13,7 @@ define float @test1(float %a) {
define float @test2(float %a) {
; CHECK-LABEL: test2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: retq
%t1 = fmul float 4.0, %a
@@ -24,7 +24,7 @@ define float @test2(float %a) {
define float @test3(float %a) {
; CHECK-LABEL: test3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: retq
%t1 = fmul float %a, 4.0
@@ -35,7 +35,7 @@ define float @test3(float %a) {
define float @test4(float %a) {
; CHECK-LABEL: test4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: retq
%t1 = fadd float %a, %a
@@ -46,7 +46,7 @@ define float @test4(float %a) {
define float @test5(float %a) {
; CHECK-LABEL: test5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: retq
%t1 = fadd float %a, %a
@@ -57,7 +57,7 @@ define float @test5(float %a) {
define float @test6(float %a) {
; CHECK-LABEL: test6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0
; CHECK-NEXT: retq
%t1 = fmul float 2.0, %a
@@ -68,7 +68,7 @@ define float @test6(float %a) {
define float @test7(float %a) {
; CHECK-LABEL: test7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0
; CHECK-NEXT: retq
%t1 = fmul float %a, 2.0
@@ -79,7 +79,7 @@ define float @test7(float %a) {
define float @test8(float %a) {
; CHECK-LABEL: test8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%t1 = fmul float %a, 0.0
%t2 = fadd float %a, %t1
@@ -88,7 +88,7 @@ define float @test8(float %a) {
define float @test9(float %a) {
; CHECK-LABEL: test9:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%t1 = fmul float 0.0, %a
%t2 = fadd float %t1, %a
@@ -97,7 +97,7 @@ define float @test9(float %a) {
define float @test10(float %a) {
; CHECK-LABEL: test10:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0
; CHECK-NEXT: retq
%t1 = fsub float -0.0, %a
@@ -107,7 +107,7 @@ define float @test10(float %a) {
define float @test11(float %a) {
; CHECK-LABEL: test11:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0
; CHECK-NEXT: retq
%t1 = fsub float -0.0, %a
diff --git a/test/CodeGen/X86/fp-load-trunc.ll b/test/CodeGen/X86/fp-load-trunc.ll
index 4ef4903914b..582b648fdec 100644
--- a/test/CodeGen/X86/fp-load-trunc.ll
+++ b/test/CodeGen/X86/fp-load-trunc.ll
@@ -4,7 +4,7 @@
define <1 x float> @test1(<1 x double>* %p) nounwind {
; CHECK-LABEL: test1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pushl %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
@@ -15,7 +15,7 @@ define <1 x float> @test1(<1 x double>* %p) nounwind {
; CHECK-NEXT: retl
;
; AVX-LABEL: test1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: pushl %eax
; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
@@ -31,13 +31,13 @@ define <1 x float> @test1(<1 x double>* %p) nounwind {
define <2 x float> @test2(<2 x double>* %p) nounwind {
; CHECK-LABEL: test2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: cvtpd2ps (%eax), %xmm0
; CHECK-NEXT: retl
;
; AVX-LABEL: test2:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-NEXT: vcvtpd2psx (%eax), %xmm0
; AVX-NEXT: retl
@@ -48,7 +48,7 @@ define <2 x float> @test2(<2 x double>* %p) nounwind {
define <4 x float> @test3(<4 x double>* %p) nounwind {
; CHECK-LABEL: test3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: cvtpd2ps 16(%eax), %xmm1
; CHECK-NEXT: cvtpd2ps (%eax), %xmm0
@@ -56,7 +56,7 @@ define <4 x float> @test3(<4 x double>* %p) nounwind {
; CHECK-NEXT: retl
;
; AVX-LABEL: test3:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-NEXT: vcvtpd2psy (%eax), %xmm0
; AVX-NEXT: retl
@@ -67,7 +67,7 @@ define <4 x float> @test3(<4 x double>* %p) nounwind {
define <8 x float> @test4(<8 x double>* %p) nounwind {
; CHECK-LABEL: test4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: cvtpd2ps 16(%eax), %xmm1
; CHECK-NEXT: cvtpd2ps (%eax), %xmm0
@@ -78,7 +78,7 @@ define <8 x float> @test4(<8 x double>* %p) nounwind {
; CHECK-NEXT: retl
;
; AVX-LABEL: test4:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-NEXT: vcvtpd2psy (%eax), %xmm0
; AVX-NEXT: vcvtpd2psy 32(%eax), %xmm1
diff --git a/test/CodeGen/X86/fp-logic-replace.ll b/test/CodeGen/X86/fp-logic-replace.ll
index e62b2f3db23..c1660ea696f 100644
--- a/test/CodeGen/X86/fp-logic-replace.ll
+++ b/test/CodeGen/X86/fp-logic-replace.ll
@@ -11,17 +11,17 @@
define double @FsANDPSrr(double %x, double %y) {
; SSE-LABEL: FsANDPSrr:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: andps %xmm1, %xmm0 # encoding: [0x0f,0x54,0xc1]
; SSE-NEXT: retq # encoding: [0xc3]
;
; AVX-LABEL: FsANDPSrr:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x54,0xc1]
; AVX-NEXT: retq # encoding: [0xc3]
;
; AVX512DQ-LABEL: FsANDPSrr:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vandps %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x54,0xc1]
; AVX512DQ-NEXT: retq # encoding: [0xc3]
%bc1 = bitcast double %x to i64
@@ -33,18 +33,18 @@ define double @FsANDPSrr(double %x, double %y) {
define double @FsANDNPSrr(double %x, double %y) {
; SSE-LABEL: FsANDNPSrr:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: andnps %xmm0, %xmm1 # encoding: [0x0f,0x55,0xc8]
; SSE-NEXT: movaps %xmm1, %xmm0 # encoding: [0x0f,0x28,0xc1]
; SSE-NEXT: retq # encoding: [0xc3]
;
; AVX-LABEL: FsANDNPSrr:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vandnps %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf0,0x55,0xc0]
; AVX-NEXT: retq # encoding: [0xc3]
;
; AVX512DQ-LABEL: FsANDNPSrr:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vandnps %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf0,0x55,0xc0]
; AVX512DQ-NEXT: retq # encoding: [0xc3]
%bc1 = bitcast double %x to i64
@@ -57,17 +57,17 @@ define double @FsANDNPSrr(double %x, double %y) {
define double @FsORPSrr(double %x, double %y) {
; SSE-LABEL: FsORPSrr:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: orps %xmm1, %xmm0 # encoding: [0x0f,0x56,0xc1]
; SSE-NEXT: retq # encoding: [0xc3]
;
; AVX-LABEL: FsORPSrr:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x56,0xc1]
; AVX-NEXT: retq # encoding: [0xc3]
;
; AVX512DQ-LABEL: FsORPSrr:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vorps %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x56,0xc1]
; AVX512DQ-NEXT: retq # encoding: [0xc3]
%bc1 = bitcast double %x to i64
@@ -79,17 +79,17 @@ define double @FsORPSrr(double %x, double %y) {
define double @FsXORPSrr(double %x, double %y) {
; SSE-LABEL: FsXORPSrr:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm1, %xmm0 # encoding: [0x0f,0x57,0xc1]
; SSE-NEXT: retq # encoding: [0xc3]
;
; AVX-LABEL: FsXORPSrr:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x57,0xc1]
; AVX-NEXT: retq # encoding: [0xc3]
;
; AVX512DQ-LABEL: FsXORPSrr:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vxorps %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x57,0xc1]
; AVX512DQ-NEXT: retq # encoding: [0xc3]
%bc1 = bitcast double %x to i64
diff --git a/test/CodeGen/X86/fp-logic.ll b/test/CodeGen/X86/fp-logic.ll
index 976470a8303..4402daceac7 100644
--- a/test/CodeGen/X86/fp-logic.ll
+++ b/test/CodeGen/X86/fp-logic.ll
@@ -18,7 +18,7 @@
define i32 @f1(float %x, i32 %y) {
; CHECK-LABEL: f1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movd %xmm0, %eax
; CHECK-NEXT: andl %edi, %eax
; CHECK-NEXT: retq
@@ -31,7 +31,7 @@ define i32 @f1(float %x, i32 %y) {
define i32 @f2(float %x, i32 %y) {
; CHECK-LABEL: f2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movd %xmm0, %eax
; CHECK-NEXT: andl %edi, %eax
; CHECK-NEXT: retq
@@ -44,7 +44,7 @@ define i32 @f2(float %x, i32 %y) {
define i32 @f3(float %x) {
; CHECK-LABEL: f3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movd %xmm0, %eax
; CHECK-NEXT: andl $1, %eax
; CHECK-NEXT: retq
@@ -57,7 +57,7 @@ define i32 @f3(float %x) {
define i32 @f4(float %x) {
; CHECK-LABEL: f4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movd %xmm0, %eax
; CHECK-NEXT: andl $2, %eax
; CHECK-NEXT: retq
@@ -70,7 +70,7 @@ define i32 @f4(float %x) {
define float @f5(float %x, i32 %y) {
; CHECK-LABEL: f5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movd %edi, %xmm1
; CHECK-NEXT: pand %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -84,7 +84,7 @@ define float @f5(float %x, i32 %y) {
define float @f6(float %x, i32 %y) {
; CHECK-LABEL: f6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movd %edi, %xmm1
; CHECK-NEXT: pand %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -98,7 +98,7 @@ define float @f6(float %x, i32 %y) {
define float @f7(float %x) {
; CHECK-LABEL: f7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT: andps %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -112,7 +112,7 @@ define float @f7(float %x) {
define float @f8(float %x) {
; CHECK-LABEL: f8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT: andps %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -126,7 +126,7 @@ define float @f8(float %x) {
define i32 @f9(float %x, float %y) {
; CHECK-LABEL: f9:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pand %xmm1, %xmm0
; CHECK-NEXT: movd %xmm0, %eax
; CHECK-NEXT: retq
@@ -140,7 +140,7 @@ define i32 @f9(float %x, float %y) {
define float @f10(float %x, float %y) {
; CHECK-LABEL: f10:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: andps %xmm1, %xmm0
; CHECK-NEXT: retq
%bc1 = bitcast float %x to i32
@@ -152,7 +152,7 @@ define float @f10(float %x, float %y) {
define float @or(float %x, float %y) {
; CHECK-LABEL: or:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: orps %xmm1, %xmm0
; CHECK-NEXT: retq
%bc1 = bitcast float %x to i32
@@ -164,7 +164,7 @@ define float @or(float %x, float %y) {
define float @xor(float %x, float %y) {
; CHECK-LABEL: xor:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorps %xmm1, %xmm0
; CHECK-NEXT: retq
%bc1 = bitcast float %x to i32
@@ -176,7 +176,7 @@ define float @xor(float %x, float %y) {
define float @f7_or(float %x) {
; CHECK-LABEL: f7_or:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT: orps %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -188,7 +188,7 @@ define float @f7_or(float %x) {
define float @f7_xor(float %x) {
; CHECK-LABEL: f7_xor:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT: xorps %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -202,7 +202,7 @@ define float @f7_xor(float %x) {
define double @doubles(double %x, double %y) {
; CHECK-LABEL: doubles:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: andps %xmm1, %xmm0
; CHECK-NEXT: retq
%bc1 = bitcast double %x to i64
@@ -214,7 +214,7 @@ define double @doubles(double %x, double %y) {
define double @f7_double(double %x) {
; CHECK-LABEL: f7_double:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: andps %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -230,7 +230,7 @@ define double @f7_double(double %x) {
define float @movmsk(float %x) {
; CHECK-LABEL: movmsk:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT: andps %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -242,7 +242,7 @@ define float @movmsk(float %x) {
define double @bitcast_fabs(double %x) {
; CHECK-LABEL: bitcast_fabs:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: andps {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
%bc1 = bitcast double %x to i64
@@ -253,7 +253,7 @@ define double @bitcast_fabs(double %x) {
define float @bitcast_fneg(float %x) {
; CHECK-LABEL: bitcast_fneg:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorps {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
%bc1 = bitcast float %x to i32
@@ -264,7 +264,7 @@ define float @bitcast_fneg(float %x) {
define <2 x double> @bitcast_fabs_vec(<2 x double> %x) {
; CHECK-LABEL: bitcast_fabs_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: andps {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
%bc1 = bitcast <2 x double> %x to <2 x i64>
@@ -275,7 +275,7 @@ define <2 x double> @bitcast_fabs_vec(<2 x double> %x) {
define <4 x float> @bitcast_fneg_vec(<4 x float> %x) {
; CHECK-LABEL: bitcast_fneg_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorps {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
%bc1 = bitcast <4 x float> %x to <4 x i32>
diff --git a/test/CodeGen/X86/fp-select-cmp-and.ll b/test/CodeGen/X86/fp-select-cmp-and.ll
index 651d7a3351c..0f6159d36ea 100644
--- a/test/CodeGen/X86/fp-select-cmp-and.ll
+++ b/test/CodeGen/X86/fp-select-cmp-and.ll
@@ -3,7 +3,7 @@
define double @test1(double %a, double %b, double %eps) {
; CHECK-LABEL: test1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpltsd %xmm2, %xmm0
; CHECK-NEXT: andpd %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -14,7 +14,7 @@ define double @test1(double %a, double %b, double %eps) {
define double @test2(double %a, double %b, double %eps) {
; CHECK-LABEL: test2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmplesd %xmm2, %xmm0
; CHECK-NEXT: andpd %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -25,7 +25,7 @@ define double @test2(double %a, double %b, double %eps) {
define double @test3(double %a, double %b, double %eps) {
; CHECK-LABEL: test3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpltsd %xmm0, %xmm2
; CHECK-NEXT: andpd %xmm1, %xmm2
; CHECK-NEXT: movapd %xmm2, %xmm0
@@ -37,7 +37,7 @@ define double @test3(double %a, double %b, double %eps) {
define double @test4(double %a, double %b, double %eps) {
; CHECK-LABEL: test4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmplesd %xmm0, %xmm2
; CHECK-NEXT: andpd %xmm1, %xmm2
; CHECK-NEXT: movapd %xmm2, %xmm0
@@ -49,7 +49,7 @@ define double @test4(double %a, double %b, double %eps) {
define double @test5(double %a, double %b, double %eps) {
; CHECK-LABEL: test5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpltsd %xmm2, %xmm0
; CHECK-NEXT: andnpd %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -60,7 +60,7 @@ define double @test5(double %a, double %b, double %eps) {
define double @test6(double %a, double %b, double %eps) {
; CHECK-LABEL: test6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmplesd %xmm2, %xmm0
; CHECK-NEXT: andnpd %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -71,7 +71,7 @@ define double @test6(double %a, double %b, double %eps) {
define double @test7(double %a, double %b, double %eps) {
; CHECK-LABEL: test7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpltsd %xmm0, %xmm2
; CHECK-NEXT: andnpd %xmm1, %xmm2
; CHECK-NEXT: movapd %xmm2, %xmm0
@@ -83,7 +83,7 @@ define double @test7(double %a, double %b, double %eps) {
define double @test8(double %a, double %b, double %eps) {
; CHECK-LABEL: test8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmplesd %xmm0, %xmm2
; CHECK-NEXT: andnpd %xmm1, %xmm2
; CHECK-NEXT: movapd %xmm2, %xmm0
@@ -95,7 +95,7 @@ define double @test8(double %a, double %b, double %eps) {
define float @test9(float %a, float %b, float %eps) {
; CHECK-LABEL: test9:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpltss %xmm2, %xmm0
; CHECK-NEXT: andps %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -106,7 +106,7 @@ define float @test9(float %a, float %b, float %eps) {
define float @test10(float %a, float %b, float %eps) {
; CHECK-LABEL: test10:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpless %xmm2, %xmm0
; CHECK-NEXT: andps %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -117,7 +117,7 @@ define float @test10(float %a, float %b, float %eps) {
define float @test11(float %a, float %b, float %eps) {
; CHECK-LABEL: test11:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpltss %xmm0, %xmm2
; CHECK-NEXT: andps %xmm1, %xmm2
; CHECK-NEXT: movaps %xmm2, %xmm0
@@ -129,7 +129,7 @@ define float @test11(float %a, float %b, float %eps) {
define float @test12(float %a, float %b, float %eps) {
; CHECK-LABEL: test12:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpless %xmm0, %xmm2
; CHECK-NEXT: andps %xmm1, %xmm2
; CHECK-NEXT: movaps %xmm2, %xmm0
@@ -141,7 +141,7 @@ define float @test12(float %a, float %b, float %eps) {
define float @test13(float %a, float %b, float %eps) {
; CHECK-LABEL: test13:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpltss %xmm2, %xmm0
; CHECK-NEXT: andnps %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -152,7 +152,7 @@ define float @test13(float %a, float %b, float %eps) {
define float @test14(float %a, float %b, float %eps) {
; CHECK-LABEL: test14:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpless %xmm2, %xmm0
; CHECK-NEXT: andnps %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -163,7 +163,7 @@ define float @test14(float %a, float %b, float %eps) {
define float @test15(float %a, float %b, float %eps) {
; CHECK-LABEL: test15:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpltss %xmm0, %xmm2
; CHECK-NEXT: andnps %xmm1, %xmm2
; CHECK-NEXT: movaps %xmm2, %xmm0
@@ -175,7 +175,7 @@ define float @test15(float %a, float %b, float %eps) {
define float @test16(float %a, float %b, float %eps) {
; CHECK-LABEL: test16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpless %xmm0, %xmm2
; CHECK-NEXT: andnps %xmm1, %xmm2
; CHECK-NEXT: movaps %xmm2, %xmm0
@@ -187,7 +187,7 @@ define float @test16(float %a, float %b, float %eps) {
define float @test17(float %a, float %b, float %c, float %eps) {
; CHECK-LABEL: test17:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpless %xmm0, %xmm3
; CHECK-NEXT: andps %xmm3, %xmm2
; CHECK-NEXT: andnps %xmm1, %xmm3
@@ -201,7 +201,7 @@ define float @test17(float %a, float %b, float %c, float %eps) {
define double @test18(double %a, double %b, double %c, double %eps) {
; CHECK-LABEL: test18:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmplesd %xmm0, %xmm3
; CHECK-NEXT: andpd %xmm3, %xmm2
; CHECK-NEXT: andnpd %xmm1, %xmm3
diff --git a/test/CodeGen/X86/fp-trunc.ll b/test/CodeGen/X86/fp-trunc.ll
index 2f700cd4cc7..105db93749e 100644
--- a/test/CodeGen/X86/fp-trunc.ll
+++ b/test/CodeGen/X86/fp-trunc.ll
@@ -4,7 +4,7 @@
define <1 x float> @test1(<1 x double> %x) nounwind {
; CHECK-LABEL: test1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pushl %eax
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: cvtsd2ss %xmm0, %xmm0
@@ -14,7 +14,7 @@ define <1 x float> @test1(<1 x double> %x) nounwind {
; CHECK-NEXT: retl
;
; AVX-LABEL: test1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: pushl %eax
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0
@@ -28,12 +28,12 @@ define <1 x float> @test1(<1 x double> %x) nounwind {
define <2 x float> @test2(<2 x double> %x) nounwind {
; CHECK-LABEL: test2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cvtpd2ps %xmm0, %xmm0
; CHECK-NEXT: retl
;
; AVX-LABEL: test2:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvtpd2ps %xmm0, %xmm0
; AVX-NEXT: retl
%y = fptrunc <2 x double> %x to <2 x float>
@@ -42,14 +42,14 @@ define <2 x float> @test2(<2 x double> %x) nounwind {
define <4 x float> @test3(<4 x double> %x) nounwind {
; CHECK-LABEL: test3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cvtpd2ps %xmm1, %xmm1
; CHECK-NEXT: cvtpd2ps %xmm0, %xmm0
; CHECK-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; CHECK-NEXT: retl
;
; AVX-LABEL: test3:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvtpd2ps %ymm0, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retl
@@ -59,7 +59,7 @@ define <4 x float> @test3(<4 x double> %x) nounwind {
define <8 x float> @test4(<8 x double> %x) nounwind {
; CHECK-LABEL: test4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: subl $12, %esp
; CHECK-NEXT: cvtpd2ps %xmm1, %xmm1
; CHECK-NEXT: cvtpd2ps %xmm0, %xmm0
@@ -71,7 +71,7 @@ define <8 x float> @test4(<8 x double> %x) nounwind {
; CHECK-NEXT: retl
;
; AVX-LABEL: test4:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvtpd2ps %ymm0, %xmm0
; AVX-NEXT: vcvtpd2ps %ymm1, %xmm1
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
diff --git a/test/CodeGen/X86/fp-une-cmp.ll b/test/CodeGen/X86/fp-une-cmp.ll
index 1b5af5aba36..9d208dc97e8 100644
--- a/test/CodeGen/X86/fp-une-cmp.ll
+++ b/test/CodeGen/X86/fp-une-cmp.ll
@@ -23,13 +23,13 @@
define double @rdar_7859988(double %x, double %y) nounwind readnone optsize ssp {
; CHECK-LABEL: rdar_7859988:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mulsd %xmm1, %xmm0
; CHECK-NEXT: xorpd %xmm1, %xmm1
; CHECK-NEXT: ucomisd %xmm1, %xmm0
; CHECK-NEXT: jne .LBB0_2
; CHECK-NEXT: jp .LBB0_2
-; CHECK-NEXT: # BB#1: # %bb1
+; CHECK-NEXT: # %bb.1: # %bb1
; CHECK-NEXT: addsd {{.*}}(%rip), %xmm0
; CHECK-NEXT: .LBB0_2: # %bb2
; CHECK-NEXT: retq
@@ -50,7 +50,7 @@ bb2:
define double @profile_metadata(double %x, double %y) {
; CHECK-LABEL: profile_metadata:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mulsd %xmm1, %xmm0
; CHECK-NEXT: xorpd %xmm1, %xmm1
; CHECK-NEXT: ucomisd %xmm1, %xmm0
@@ -81,7 +81,7 @@ bb2:
define void @foo(float %f) {
; CHECK-LABEL: foo:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorps %xmm1, %xmm1
; CHECK-NEXT: ucomiss %xmm1, %xmm0
; CHECK-NEXT: jne .LBB2_2
diff --git a/test/CodeGen/X86/fp128-cast.ll b/test/CodeGen/X86/fp128-cast.ll
index 560892485d8..3e49f671508 100644
--- a/test/CodeGen/X86/fp128-cast.ll
+++ b/test/CodeGen/X86/fp128-cast.ll
@@ -363,7 +363,7 @@ cleanup: ; preds = %entry, %if.then
define i1 @PR34866(i128 %x) {
; X64-LABEL: PR34866:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps {{.*}}(%rip), %xmm0
; X64-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-NEXT: xorq -{{[0-9]+}}(%rsp), %rsi
@@ -373,13 +373,13 @@ define i1 @PR34866(i128 %x) {
; X64-NEXT: retq
;
; X64_NO_MMX-LABEL: PR34866:
-; X64_NO_MMX: # BB#0:
+; X64_NO_MMX: # %bb.0:
; X64_NO_MMX-NEXT: orq %rsi, %rdi
; X64_NO_MMX-NEXT: sete %al
; X64_NO_MMX-NEXT: retq
;
; X32-LABEL: PR34866:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: orl {{[0-9]+}}(%esp), %ecx
@@ -394,7 +394,7 @@ define i1 @PR34866(i128 %x) {
define i1 @PR34866_commute(i128 %x) {
; X64-LABEL: PR34866_commute:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps {{.*}}(%rip), %xmm0
; X64-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-NEXT: xorq -{{[0-9]+}}(%rsp), %rsi
@@ -404,13 +404,13 @@ define i1 @PR34866_commute(i128 %x) {
; X64-NEXT: retq
;
; X64_NO_MMX-LABEL: PR34866_commute:
-; X64_NO_MMX: # BB#0:
+; X64_NO_MMX: # %bb.0:
; X64_NO_MMX-NEXT: orq %rsi, %rdi
; X64_NO_MMX-NEXT: sete %al
; X64_NO_MMX-NEXT: retq
;
; X32-LABEL: PR34866_commute:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: orl {{[0-9]+}}(%esp), %ecx
diff --git a/test/CodeGen/X86/fp128-i128.ll b/test/CodeGen/X86/fp128-i128.ll
index 98082ec611d..54e2aab37ec 100644
--- a/test/CodeGen/X86/fp128-i128.ll
+++ b/test/CodeGen/X86/fp128-i128.ll
@@ -43,7 +43,7 @@
; }
define void @TestUnionLD1(fp128 %s, i64 %n) #0 {
; CHECK-LABEL: TestUnionLD1:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movq -{{[0-9]+}}(%rsp), %rax
; CHECK-NEXT: movabsq $281474976710655, %rcx # imm = 0xFFFFFFFFFFFF
@@ -78,7 +78,7 @@ entry:
; }
define fp128 @TestUnionLD2(fp128 %s) #0 {
; CHECK-LABEL: TestUnionLD2:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movq -{{[0-9]+}}(%rsp), %rax
; CHECK-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
@@ -102,7 +102,7 @@ entry:
; }
define fp128 @TestI128_1(fp128 %x) #0 {
; CHECK-LABEL: TestI128_1:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rax
@@ -140,11 +140,11 @@ entry:
; }
define fp128 @TestI128_2(fp128 %x, fp128 %y) #0 {
; CHECK-LABEL: TestI128_2:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: cmpq $0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: jns .LBB3_2
-; CHECK-NEXT: # BB#1: # %entry
+; CHECK-NEXT: # %bb.1: # %entry
; CHECK-NEXT: movaps %xmm1, %xmm0
; CHECK-NEXT: .LBB3_2: # %entry
; CHECK-NEXT: retq
@@ -168,14 +168,14 @@ entry:
; }
define fp128 @TestI128_3(fp128 %x, i32* nocapture readnone %ex) #0 {
; CHECK-LABEL: TestI128_3:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $56, %rsp
; CHECK-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rax
; CHECK-NEXT: movabsq $9223090561878065152, %rcx # imm = 0x7FFF000000000000
; CHECK-NEXT: testq %rcx, %rax
; CHECK-NEXT: je .LBB4_2
-; CHECK-NEXT: # BB#1:
+; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rcx
; CHECK-NEXT: jmp .LBB4_3
; CHECK-NEXT: .LBB4_2: # %if.then
@@ -224,7 +224,7 @@ if.end: ; preds = %if.then, %entry
; }
define fp128 @TestI128_4(fp128 %x) #0 {
; CHECK-LABEL: TestI128_4:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: movaps %xmm0, %xmm1
; CHECK-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp)
@@ -253,7 +253,7 @@ entry:
; }
define void @TestShift128_2() #2 {
; CHECK-LABEL: TestShift128_2:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movq {{.*}}(%rip), %rax
; CHECK-NEXT: shlq $32, %rax
; CHECK-NEXT: movq {{.*}}(%rip), %rcx
@@ -272,7 +272,7 @@ entry:
define fp128 @acosl(fp128 %x) #0 {
; CHECK-LABEL: acosl:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: movaps %xmm0, %xmm1
; CHECK-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp)
@@ -294,11 +294,11 @@ entry:
; Compare i128 values and check i128 constants.
define fp128 @TestComp(fp128 %x, fp128 %y) #0 {
; CHECK-LABEL: TestComp:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: cmpq $0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: jns .LBB8_2
-; CHECK-NEXT: # BB#1: # %entry
+; CHECK-NEXT: # %bb.1: # %entry
; CHECK-NEXT: movaps %xmm1, %xmm0
; CHECK-NEXT: .LBB8_2: # %entry
; CHECK-NEXT: retq
@@ -314,7 +314,7 @@ declare void @foo(fp128) #1
; Test logical operations on fp128 values.
define fp128 @TestFABS_LD(fp128 %x) #0 {
; CHECK-LABEL: TestFABS_LD:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: andps {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
entry:
@@ -329,7 +329,7 @@ declare fp128 @copysignl(fp128, fp128) #1
; Test more complicated logical operations generated from copysignl.
define void @TestCopySign({ fp128, fp128 }* noalias nocapture sret %agg.result, { fp128, fp128 }* byval nocapture readonly align 16 %z) #0 {
; CHECK-LABEL: TestCopySign:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rbp
; CHECK-NEXT: pushq %rbx
; CHECK-NEXT: subq $40, %rsp
@@ -345,7 +345,7 @@ define void @TestCopySign({ fp128, fp128 }* noalias nocapture sret %agg.result,
; CHECK-NEXT: callq __subtf3
; CHECK-NEXT: testl %ebp, %ebp
; CHECK-NEXT: jle .LBB10_1
-; CHECK-NEXT: # BB#2: # %if.then
+; CHECK-NEXT: # %bb.2: # %if.then
; CHECK-NEXT: andps {{.*}}(%rip), %xmm0
; CHECK-NEXT: movaps %xmm0, %xmm1
; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
diff --git a/test/CodeGen/X86/fp128-select.ll b/test/CodeGen/X86/fp128-select.ll
index 9416163357e..85f7d97c985 100644
--- a/test/CodeGen/X86/fp128-select.ll
+++ b/test/CodeGen/X86/fp128-select.ll
@@ -10,10 +10,10 @@
define void @test_select(fp128* %p, fp128* %q, i1 zeroext %c) {
; MMX-LABEL: test_select:
-; MMX: # BB#0:
+; MMX: # %bb.0:
; MMX-NEXT: testl %edx, %edx
; MMX-NEXT: jne .LBB0_1
-; MMX-NEXT: # BB#2:
+; MMX-NEXT: # %bb.2:
; MMX-NEXT: movaps {{.*}}(%rip), %xmm0
; MMX-NEXT: movaps %xmm0, (%rsi)
; MMX-NEXT: retq
@@ -23,7 +23,7 @@ define void @test_select(fp128* %p, fp128* %q, i1 zeroext %c) {
; MMX-NEXT: retq
;
; CHECK-LABEL: test_select:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: testl %edx, %edx
; CHECK-NEXT: cmovneq (%rdi), %rax
diff --git a/test/CodeGen/X86/gfni-intrinsics.ll b/test/CodeGen/X86/gfni-intrinsics.ll
index 46e9efc12ca..76e201e3a41 100644
--- a/test/CodeGen/X86/gfni-intrinsics.ll
+++ b/test/CodeGen/X86/gfni-intrinsics.ll
@@ -4,7 +4,7 @@
declare <16 x i8> @llvm.x86.vgf2p8affineinvqb.128(<16 x i8>, <16 x i8>, i8)
define <16 x i8> @test_gf2p8affineinvqb_128(<16 x i8> %src1, <16 x i8> %src2) {
; CHECK-LABEL: test_gf2p8affineinvqb_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: gf2p8affineinvqb $11, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0xcf,0xc1,0x0b]
; CHECK-NEXT: retl ## encoding: [0xc3]
%1 = call <16 x i8> @llvm.x86.vgf2p8affineinvqb.128(<16 x i8> %src1, <16 x i8> %src2, i8 11)
@@ -14,7 +14,7 @@ define <16 x i8> @test_gf2p8affineinvqb_128(<16 x i8> %src1, <16 x i8> %src2) {
declare <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8>, <16 x i8>, i8)
define <16 x i8> @test_gf2p8affineqb_128(<16 x i8> %src1, <16 x i8> %src2) {
; CHECK-LABEL: test_gf2p8affineqb_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: gf2p8affineqb $11, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0xce,0xc1,0x0b]
; CHECK-NEXT: retl ## encoding: [0xc3]
%1 = call <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8> %src1, <16 x i8> %src2, i8 11)
@@ -24,7 +24,7 @@ define <16 x i8> @test_gf2p8affineqb_128(<16 x i8> %src1, <16 x i8> %src2) {
declare <16 x i8> @llvm.x86.vgf2p8mulb.128(<16 x i8>, <16 x i8>)
define <16 x i8> @test_gf2p8mulb_128(<16 x i8> %src1, <16 x i8> %src2) {
; CHECK-LABEL: test_gf2p8mulb_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: gf2p8mulb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0xcf,0xc1]
; CHECK-NEXT: retl ## encoding: [0xc3]
%1 = call <16 x i8> @llvm.x86.vgf2p8mulb.128(<16 x i8> %src1, <16 x i8> %src2)
diff --git a/test/CodeGen/X86/gpr-to-mask.ll b/test/CodeGen/X86/gpr-to-mask.ll
index 842faaae9b1..1928a6c80f5 100644
--- a/test/CodeGen/X86/gpr-to-mask.ll
+++ b/test/CodeGen/X86/gpr-to-mask.ll
@@ -3,10 +3,10 @@
define void @test_fcmp_storefloat(i1 %cond, float* %fptr, float %f1, float %f2, float %f3, float %f4, float %f5, float %f6) {
; CHECK-LABEL: test_fcmp_storefloat:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: testb $1, %dil
; CHECK-NEXT: je .LBB0_2
-; CHECK-NEXT: # BB#1: # %if
+; CHECK-NEXT: # %bb.1: # %if
; CHECK-NEXT: vcmpeqss %xmm3, %xmm2, %k1
; CHECK-NEXT: jmp .LBB0_3
; CHECK-NEXT: .LBB0_2: # %else
@@ -35,10 +35,10 @@ exit:
define void @test_fcmp_storei1(i1 %cond, float* %fptr, i1* %iptr, float %f1, float %f2, float %f3, float %f4) {
; CHECK-LABEL: test_fcmp_storei1:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: testb $1, %dil
; CHECK-NEXT: je .LBB1_2
-; CHECK-NEXT: # BB#1: # %if
+; CHECK-NEXT: # %bb.1: # %if
; CHECK-NEXT: vcmpeqss %xmm1, %xmm0, %k0
; CHECK-NEXT: jmp .LBB1_3
; CHECK-NEXT: .LBB1_2: # %else
@@ -67,10 +67,10 @@ exit:
define void @test_load_add(i1 %cond, float* %fptr, i1* %iptr1, i1* %iptr2, float %f1, float %f2) {
; CHECK-LABEL: test_load_add:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: testb $1, %dil
; CHECK-NEXT: je .LBB2_2
-; CHECK-NEXT: # BB#1: # %if
+; CHECK-NEXT: # %bb.1: # %if
; CHECK-NEXT: kmovb (%rdx), %k0
; CHECK-NEXT: kmovb (%rcx), %k1
; CHECK-NEXT: kaddb %k1, %k0, %k1
@@ -103,10 +103,10 @@ exit:
define void @test_load_i1(i1 %cond, float* %fptr, i1* %iptr1, i1* %iptr2, float %f1, float %f2) {
; CHECK-LABEL: test_load_i1:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: testb $1, %dil
; CHECK-NEXT: je .LBB3_2
-; CHECK-NEXT: # BB#1: # %if
+; CHECK-NEXT: # %bb.1: # %if
; CHECK-NEXT: kmovb (%rdx), %k1
; CHECK-NEXT: jmp .LBB3_3
; CHECK-NEXT: .LBB3_2: # %else
@@ -135,10 +135,10 @@ exit:
define void @test_loadi1_storei1(i1 %cond, i1* %iptr1, i1* %iptr2, i1* %iptr3) {
; CHECK-LABEL: test_loadi1_storei1:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: testb $1, %dil
; CHECK-NEXT: je .LBB4_2
-; CHECK-NEXT: # BB#1: # %if
+; CHECK-NEXT: # %bb.1: # %if
; CHECK-NEXT: movb (%rsi), %al
; CHECK-NEXT: jmp .LBB4_3
; CHECK-NEXT: .LBB4_2: # %else
@@ -166,12 +166,12 @@ exit:
define void @test_shl1(i1 %cond, i8* %ptr1, i8* %ptr2, <8 x float> %fvec1, <8 x float> %fvec2, <8 x float>* %fptrvec) {
; CHECK-LABEL: test_shl1:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; CHECK-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; CHECK-NEXT: testb $1, %dil
; CHECK-NEXT: je .LBB5_2
-; CHECK-NEXT: # BB#1: # %if
+; CHECK-NEXT: # %bb.1: # %if
; CHECK-NEXT: kmovb (%rsi), %k0
; CHECK-NEXT: kaddb %k0, %k0, %k1
; CHECK-NEXT: jmp .LBB5_3
@@ -204,12 +204,12 @@ exit:
define void @test_shr1(i1 %cond, i8* %ptr1, i8* %ptr2, <8 x float> %fvec1, <8 x float> %fvec2, <8 x float>* %fptrvec) {
; CHECK-LABEL: test_shr1:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; CHECK-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; CHECK-NEXT: testb $1, %dil
; CHECK-NEXT: je .LBB6_2
-; CHECK-NEXT: # BB#1: # %if
+; CHECK-NEXT: # %bb.1: # %if
; CHECK-NEXT: movb (%rsi), %al
; CHECK-NEXT: shrb %al
; CHECK-NEXT: jmp .LBB6_3
@@ -243,12 +243,12 @@ exit:
define void @test_shr2(i1 %cond, i8* %ptr1, i8* %ptr2, <8 x float> %fvec1, <8 x float> %fvec2, <8 x float>* %fptrvec) {
; CHECK-LABEL: test_shr2:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; CHECK-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; CHECK-NEXT: testb $1, %dil
; CHECK-NEXT: je .LBB7_2
-; CHECK-NEXT: # BB#1: # %if
+; CHECK-NEXT: # %bb.1: # %if
; CHECK-NEXT: kmovb (%rsi), %k0
; CHECK-NEXT: kshiftrb $2, %k0, %k1
; CHECK-NEXT: jmp .LBB7_3
@@ -281,12 +281,12 @@ exit:
define void @test_shl(i1 %cond, i8* %ptr1, i8* %ptr2, <8 x float> %fvec1, <8 x float> %fvec2, <8 x float>* %fptrvec) {
; CHECK-LABEL: test_shl:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; CHECK-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; CHECK-NEXT: testb $1, %dil
; CHECK-NEXT: je .LBB8_2
-; CHECK-NEXT: # BB#1: # %if
+; CHECK-NEXT: # %bb.1: # %if
; CHECK-NEXT: kmovb (%rsi), %k0
; CHECK-NEXT: kshiftlb $6, %k0, %k1
; CHECK-NEXT: jmp .LBB8_3
@@ -319,14 +319,14 @@ exit:
define void @test_add(i1 %cond, i8* %ptr1, i8* %ptr2, <8 x float> %fvec1, <8 x float> %fvec2, <8 x float>* %fptrvec) {
; CHECK-LABEL: test_add:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; CHECK-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; CHECK-NEXT: kmovb (%rsi), %k0
; CHECK-NEXT: kmovb (%rdx), %k1
; CHECK-NEXT: testb $1, %dil
; CHECK-NEXT: je .LBB9_2
-; CHECK-NEXT: # BB#1: # %if
+; CHECK-NEXT: # %bb.1: # %if
; CHECK-NEXT: kandb %k1, %k0, %k1
; CHECK-NEXT: jmp .LBB9_3
; CHECK-NEXT: .LBB9_2: # %else
diff --git a/test/CodeGen/X86/haddsub-2.ll b/test/CodeGen/X86/haddsub-2.ll
index e32c7452b0c..2b8b8c909d1 100644
--- a/test/CodeGen/X86/haddsub-2.ll
+++ b/test/CodeGen/X86/haddsub-2.ll
@@ -6,12 +6,12 @@
define <4 x float> @hadd_ps_test1(<4 x float> %A, <4 x float> %B) {
; SSE-LABEL: hadd_ps_test1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: haddps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: hadd_ps_test1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhaddps %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%vecext = extractelement <4 x float> %A, i32 0
@@ -35,12 +35,12 @@ define <4 x float> @hadd_ps_test1(<4 x float> %A, <4 x float> %B) {
define <4 x float> @hadd_ps_test2(<4 x float> %A, <4 x float> %B) {
; SSE-LABEL: hadd_ps_test2:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: haddps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: hadd_ps_test2:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhaddps %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%vecext = extractelement <4 x float> %A, i32 2
@@ -64,12 +64,12 @@ define <4 x float> @hadd_ps_test2(<4 x float> %A, <4 x float> %B) {
define <4 x float> @hsub_ps_test1(<4 x float> %A, <4 x float> %B) {
; SSE-LABEL: hsub_ps_test1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: hsubps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: hsub_ps_test1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhsubps %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%vecext = extractelement <4 x float> %A, i32 0
@@ -93,12 +93,12 @@ define <4 x float> @hsub_ps_test1(<4 x float> %A, <4 x float> %B) {
define <4 x float> @hsub_ps_test2(<4 x float> %A, <4 x float> %B) {
; SSE-LABEL: hsub_ps_test2:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: hsubps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: hsub_ps_test2:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhsubps %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%vecext = extractelement <4 x float> %A, i32 2
@@ -122,7 +122,7 @@ define <4 x float> @hsub_ps_test2(<4 x float> %A, <4 x float> %B) {
define <4 x i32> @phadd_d_test1(<4 x i32> %A, <4 x i32> %B) {
; SSE3-LABEL: phadd_d_test1:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movd %xmm0, %eax
; SSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
; SSE3-NEXT: movd %xmm2, %ecx
@@ -151,12 +151,12 @@ define <4 x i32> @phadd_d_test1(<4 x i32> %A, <4 x i32> %B) {
; SSE3-NEXT: retq
;
; SSSE3-LABEL: phadd_d_test1:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: phaddd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; AVX-LABEL: phadd_d_test1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vphaddd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%vecext = extractelement <4 x i32> %A, i32 0
@@ -180,7 +180,7 @@ define <4 x i32> @phadd_d_test1(<4 x i32> %A, <4 x i32> %B) {
define <4 x i32> @phadd_d_test2(<4 x i32> %A, <4 x i32> %B) {
; SSE3-LABEL: phadd_d_test2:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
; SSE3-NEXT: movd %xmm2, %eax
; SSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3]
@@ -209,12 +209,12 @@ define <4 x i32> @phadd_d_test2(<4 x i32> %A, <4 x i32> %B) {
; SSE3-NEXT: retq
;
; SSSE3-LABEL: phadd_d_test2:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: phaddd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; AVX-LABEL: phadd_d_test2:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vphaddd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%vecext = extractelement <4 x i32> %A, i32 2
@@ -238,7 +238,7 @@ define <4 x i32> @phadd_d_test2(<4 x i32> %A, <4 x i32> %B) {
define <4 x i32> @phsub_d_test1(<4 x i32> %A, <4 x i32> %B) {
; SSE3-LABEL: phsub_d_test1:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movd %xmm0, %eax
; SSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
; SSE3-NEXT: movd %xmm2, %ecx
@@ -267,12 +267,12 @@ define <4 x i32> @phsub_d_test1(<4 x i32> %A, <4 x i32> %B) {
; SSE3-NEXT: retq
;
; SSSE3-LABEL: phsub_d_test1:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: phsubd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; AVX-LABEL: phsub_d_test1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vphsubd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%vecext = extractelement <4 x i32> %A, i32 0
@@ -296,7 +296,7 @@ define <4 x i32> @phsub_d_test1(<4 x i32> %A, <4 x i32> %B) {
define <4 x i32> @phsub_d_test2(<4 x i32> %A, <4 x i32> %B) {
; SSE3-LABEL: phsub_d_test2:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
; SSE3-NEXT: movd %xmm2, %eax
; SSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3]
@@ -325,12 +325,12 @@ define <4 x i32> @phsub_d_test2(<4 x i32> %A, <4 x i32> %B) {
; SSE3-NEXT: retq
;
; SSSE3-LABEL: phsub_d_test2:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: phsubd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; AVX-LABEL: phsub_d_test2:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vphsubd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%vecext = extractelement <4 x i32> %A, i32 2
@@ -354,12 +354,12 @@ define <4 x i32> @phsub_d_test2(<4 x i32> %A, <4 x i32> %B) {
define <2 x double> @hadd_pd_test1(<2 x double> %A, <2 x double> %B) {
; SSE-LABEL: hadd_pd_test1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: haddpd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: hadd_pd_test1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhaddpd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%vecext = extractelement <2 x double> %A, i32 0
@@ -375,12 +375,12 @@ define <2 x double> @hadd_pd_test1(<2 x double> %A, <2 x double> %B) {
define <2 x double> @hadd_pd_test2(<2 x double> %A, <2 x double> %B) {
; SSE-LABEL: hadd_pd_test2:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: haddpd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: hadd_pd_test2:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhaddpd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%vecext = extractelement <2 x double> %A, i32 1
@@ -396,12 +396,12 @@ define <2 x double> @hadd_pd_test2(<2 x double> %A, <2 x double> %B) {
define <2 x double> @hsub_pd_test1(<2 x double> %A, <2 x double> %B) {
; SSE-LABEL: hsub_pd_test1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: hsubpd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: hsub_pd_test1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhsubpd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%vecext = extractelement <2 x double> %A, i32 0
@@ -417,12 +417,12 @@ define <2 x double> @hsub_pd_test1(<2 x double> %A, <2 x double> %B) {
define <2 x double> @hsub_pd_test2(<2 x double> %A, <2 x double> %B) {
; SSE-LABEL: hsub_pd_test2:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: hsubpd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: hsub_pd_test2:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhsubpd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%vecext = extractelement <2 x double> %B, i32 0
@@ -438,14 +438,14 @@ define <2 x double> @hsub_pd_test2(<2 x double> %A, <2 x double> %B) {
define <4 x double> @avx_vhadd_pd_test(<4 x double> %A, <4 x double> %B) {
; SSE-LABEL: avx_vhadd_pd_test:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: haddpd %xmm1, %xmm0
; SSE-NEXT: haddpd %xmm3, %xmm2
; SSE-NEXT: movapd %xmm2, %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: avx_vhadd_pd_test:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX-NEXT: vhaddpd %xmm2, %xmm1, %xmm1
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm2
@@ -473,14 +473,14 @@ define <4 x double> @avx_vhadd_pd_test(<4 x double> %A, <4 x double> %B) {
define <4 x double> @avx_vhsub_pd_test(<4 x double> %A, <4 x double> %B) {
; SSE-LABEL: avx_vhsub_pd_test:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: hsubpd %xmm1, %xmm0
; SSE-NEXT: hsubpd %xmm3, %xmm2
; SSE-NEXT: movapd %xmm2, %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: avx_vhsub_pd_test:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX-NEXT: vhsubpd %xmm2, %xmm1, %xmm1
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm2
@@ -508,7 +508,7 @@ define <4 x double> @avx_vhsub_pd_test(<4 x double> %A, <4 x double> %B) {
define <8 x i32> @avx2_vphadd_d_test(<8 x i32> %A, <8 x i32> %B) {
; SSE3-LABEL: avx2_vphadd_d_test:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movd %xmm0, %ecx
; SSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,2,3]
; SSE3-NEXT: movd %xmm4, %r8d
@@ -562,14 +562,14 @@ define <8 x i32> @avx2_vphadd_d_test(<8 x i32> %A, <8 x i32> %B) {
; SSE3-NEXT: retq
;
; SSSE3-LABEL: avx2_vphadd_d_test:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: phaddd %xmm1, %xmm0
; SSSE3-NEXT: phaddd %xmm3, %xmm2
; SSSE3-NEXT: movdqa %xmm2, %xmm1
; SSSE3-NEXT: retq
;
; AVX1-LABEL: avx2_vphadd_d_test:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vphaddd %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
@@ -578,7 +578,7 @@ define <8 x i32> @avx2_vphadd_d_test(<8 x i32> %A, <8 x i32> %B) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: avx2_vphadd_d_test:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX2-NEXT: vphaddd %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
@@ -622,7 +622,7 @@ define <8 x i32> @avx2_vphadd_d_test(<8 x i32> %A, <8 x i32> %B) {
define <16 x i16> @avx2_vphadd_w_test(<16 x i16> %a, <16 x i16> %b) {
; SSE3-LABEL: avx2_vphadd_w_test:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: pushq %rbp
; SSE3-NEXT: .cfi_def_cfa_offset 16
; SSE3-NEXT: pushq %r15
@@ -732,14 +732,14 @@ define <16 x i16> @avx2_vphadd_w_test(<16 x i16> %a, <16 x i16> %b) {
; SSE3-NEXT: retq
;
; SSSE3-LABEL: avx2_vphadd_w_test:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: phaddw %xmm1, %xmm0
; SSSE3-NEXT: phaddw %xmm3, %xmm2
; SSSE3-NEXT: movdqa %xmm2, %xmm1
; SSSE3-NEXT: retq
;
; AVX1-LABEL: avx2_vphadd_w_test:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vphaddw %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
@@ -748,7 +748,7 @@ define <16 x i16> @avx2_vphadd_w_test(<16 x i16> %a, <16 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: avx2_vphadd_w_test:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX2-NEXT: vphaddw %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
@@ -826,7 +826,7 @@ define <16 x i16> @avx2_vphadd_w_test(<16 x i16> %a, <16 x i16> %b) {
define <4 x i32> @not_a_hsub_1(<4 x i32> %A, <4 x i32> %B) {
; SSE-LABEL: not_a_hsub_1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movd %xmm0, %eax
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
; SSE-NEXT: movd %xmm2, %ecx
@@ -855,7 +855,7 @@ define <4 x i32> @not_a_hsub_1(<4 x i32> %A, <4 x i32> %B) {
; SSE-NEXT: retq
;
; AVX-LABEL: not_a_hsub_1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovd %xmm0, %eax
; AVX-NEXT: vpextrd $1, %xmm0, %ecx
; AVX-NEXT: subl %ecx, %eax
@@ -894,7 +894,7 @@ define <4 x i32> @not_a_hsub_1(<4 x i32> %A, <4 x i32> %B) {
define <4 x float> @not_a_hsub_2(<4 x float> %A, <4 x float> %B) {
; SSE-LABEL: not_a_hsub_2:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
; SSE-NEXT: movaps %xmm0, %xmm3
@@ -915,7 +915,7 @@ define <4 x float> @not_a_hsub_2(<4 x float> %A, <4 x float> %B) {
; SSE-NEXT: retq
;
; AVX-LABEL: not_a_hsub_2:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX-NEXT: vpermilps {{.*#+}} xmm3 = xmm0[3,1,2,3]
; AVX-NEXT: vsubss %xmm3, %xmm2, %xmm2
@@ -951,7 +951,7 @@ define <4 x float> @not_a_hsub_2(<4 x float> %A, <4 x float> %B) {
define <2 x double> @not_a_hsub_3(<2 x double> %A, <2 x double> %B) {
; SSE-LABEL: not_a_hsub_3:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps %xmm1, %xmm2
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
; SSE-NEXT: subsd %xmm2, %xmm1
@@ -963,7 +963,7 @@ define <2 x double> @not_a_hsub_3(<2 x double> %A, <2 x double> %B) {
; SSE-NEXT: retq
;
; AVX-LABEL: not_a_hsub_3:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX-NEXT: vsubsd %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
@@ -986,13 +986,13 @@ define <2 x double> @not_a_hsub_3(<2 x double> %A, <2 x double> %B) {
define <8 x float> @avx_vhadd_ps(<8 x float> %a, <8 x float> %b) {
; SSE-LABEL: avx_vhadd_ps:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: haddps %xmm2, %xmm0
; SSE-NEXT: haddps %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: avx_vhadd_ps:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhaddps %ymm1, %ymm0, %ymm0
; AVX-NEXT: retq
%vecext = extractelement <8 x float> %a, i32 0
@@ -1032,13 +1032,13 @@ define <8 x float> @avx_vhadd_ps(<8 x float> %a, <8 x float> %b) {
define <8 x float> @avx_vhsub_ps(<8 x float> %a, <8 x float> %b) {
; SSE-LABEL: avx_vhsub_ps:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: hsubps %xmm2, %xmm0
; SSE-NEXT: hsubps %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: avx_vhsub_ps:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhsubps %ymm1, %ymm0, %ymm0
; AVX-NEXT: retq
%vecext = extractelement <8 x float> %a, i32 0
@@ -1078,13 +1078,13 @@ define <8 x float> @avx_vhsub_ps(<8 x float> %a, <8 x float> %b) {
define <4 x double> @avx_hadd_pd(<4 x double> %a, <4 x double> %b) {
; SSE-LABEL: avx_hadd_pd:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: haddpd %xmm2, %xmm0
; SSE-NEXT: haddpd %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: avx_hadd_pd:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhaddpd %ymm1, %ymm0, %ymm0
; AVX-NEXT: retq
%vecext = extractelement <4 x double> %a, i32 0
@@ -1108,13 +1108,13 @@ define <4 x double> @avx_hadd_pd(<4 x double> %a, <4 x double> %b) {
define <4 x double> @avx_hsub_pd(<4 x double> %a, <4 x double> %b) {
; SSE-LABEL: avx_hsub_pd:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: hsubpd %xmm2, %xmm0
; SSE-NEXT: hsubpd %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: avx_hsub_pd:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhsubpd %ymm1, %ymm0, %ymm0
; AVX-NEXT: retq
%vecext = extractelement <4 x double> %a, i32 0
@@ -1140,7 +1140,7 @@ define <4 x double> @avx_hsub_pd(<4 x double> %a, <4 x double> %b) {
define <8 x i32> @avx2_hadd_d(<8 x i32> %a, <8 x i32> %b) {
; SSE3-LABEL: avx2_hadd_d:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movd %xmm0, %ecx
; SSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,2,3]
; SSE3-NEXT: movd %xmm4, %r8d
@@ -1194,13 +1194,13 @@ define <8 x i32> @avx2_hadd_d(<8 x i32> %a, <8 x i32> %b) {
; SSE3-NEXT: retq
;
; SSSE3-LABEL: avx2_hadd_d:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: phaddd %xmm2, %xmm0
; SSSE3-NEXT: phaddd %xmm3, %xmm1
; SSSE3-NEXT: retq
;
; AVX1-LABEL: avx2_hadd_d:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vphaddd %xmm2, %xmm3, %xmm2
@@ -1209,7 +1209,7 @@ define <8 x i32> @avx2_hadd_d(<8 x i32> %a, <8 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: avx2_hadd_d:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vphaddd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
%vecext = extractelement <8 x i32> %a, i32 0
@@ -1249,7 +1249,7 @@ define <8 x i32> @avx2_hadd_d(<8 x i32> %a, <8 x i32> %b) {
define <16 x i16> @avx2_hadd_w(<16 x i16> %a, <16 x i16> %b) {
; SSE3-LABEL: avx2_hadd_w:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: pushq %rbp
; SSE3-NEXT: .cfi_def_cfa_offset 16
; SSE3-NEXT: pushq %r15
@@ -1359,13 +1359,13 @@ define <16 x i16> @avx2_hadd_w(<16 x i16> %a, <16 x i16> %b) {
; SSE3-NEXT: retq
;
; SSSE3-LABEL: avx2_hadd_w:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: phaddw %xmm2, %xmm0
; SSSE3-NEXT: phaddw %xmm3, %xmm1
; SSSE3-NEXT: retq
;
; AVX1-LABEL: avx2_hadd_w:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vphaddw %xmm2, %xmm3, %xmm2
@@ -1374,7 +1374,7 @@ define <16 x i16> @avx2_hadd_w(<16 x i16> %a, <16 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: avx2_hadd_w:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vphaddw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
%vecext = extractelement <16 x i16> %a, i32 0
diff --git a/test/CodeGen/X86/haddsub-shuf.ll b/test/CodeGen/X86/haddsub-shuf.ll
index 37597c415d6..3b126b7b6df 100644
--- a/test/CodeGen/X86/haddsub-shuf.ll
+++ b/test/CodeGen/X86/haddsub-shuf.ll
@@ -7,12 +7,12 @@
define <4 x float> @hadd_v4f32(<4 x float> %a) {
; SSSE3-LABEL: hadd_v4f32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: haddps %xmm0, %xmm0
; SSSE3-NEXT: retq
;
; AVX-LABEL: hadd_v4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhaddps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%a02 = shufflevector <4 x float> %a, <4 x float> undef, <2 x i32> <i32 0, i32 2>
@@ -24,12 +24,12 @@ define <4 x float> @hadd_v4f32(<4 x float> %a) {
define <4 x float> @hsub_v4f32(<4 x float> %a) {
; SSSE3-LABEL: hsub_v4f32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: hsubps %xmm0, %xmm0
; SSSE3-NEXT: retq
;
; AVX-LABEL: hsub_v4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhsubps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%a02 = shufflevector <4 x float> %a, <4 x float> undef, <2 x i32> <i32 0, i32 2>
@@ -41,12 +41,12 @@ define <4 x float> @hsub_v4f32(<4 x float> %a) {
define <2 x double> @hadd_v2f64(<2 x double> %a) {
; SSSE3-LABEL: hadd_v2f64:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: haddpd %xmm0, %xmm0
; SSSE3-NEXT: retq
;
; AVX-LABEL: hadd_v2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhaddpd %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%a0 = shufflevector <2 x double> %a, <2 x double> undef, <2 x i32> <i32 0, i32 undef>
@@ -58,12 +58,12 @@ define <2 x double> @hadd_v2f64(<2 x double> %a) {
define <2 x double> @hsub_v2f64(<2 x double> %a) {
; SSSE3-LABEL: hsub_v2f64:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: hsubpd %xmm0, %xmm0
; SSSE3-NEXT: retq
;
; AVX-LABEL: hsub_v2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhsubpd %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%a0 = shufflevector <2 x double> %a, <2 x double> undef, <2 x i32> <i32 0, i32 undef>
@@ -75,12 +75,12 @@ define <2 x double> @hsub_v2f64(<2 x double> %a) {
define <4 x i32> @hadd_v4i32(<4 x i32> %a) {
; SSSE3-LABEL: hadd_v4i32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: phaddd %xmm0, %xmm0
; SSSE3-NEXT: retq
;
; AVX-LABEL: hadd_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vphaddd %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%a02 = shufflevector <4 x i32> %a, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 undef, i32 undef>
@@ -92,12 +92,12 @@ define <4 x i32> @hadd_v4i32(<4 x i32> %a) {
define <4 x i32> @hsub_v4i32(<4 x i32> %a) {
; SSSE3-LABEL: hsub_v4i32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: phsubd %xmm0, %xmm0
; SSSE3-NEXT: retq
;
; AVX-LABEL: hsub_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vphsubd %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%a02 = shufflevector <4 x i32> %a, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 undef, i32 undef>
@@ -109,12 +109,12 @@ define <4 x i32> @hsub_v4i32(<4 x i32> %a) {
define <8 x i16> @hadd_v8i16(<8 x i16> %a) {
; SSSE3-LABEL: hadd_v8i16:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: phaddw %xmm0, %xmm0
; SSSE3-NEXT: retq
;
; AVX-LABEL: hadd_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vphaddw %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%a0246 = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -126,12 +126,12 @@ define <8 x i16> @hadd_v8i16(<8 x i16> %a) {
define <8 x i16> @hsub_v8i16(<8 x i16> %a) {
; SSSE3-LABEL: hsub_v8i16:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: phsubw %xmm0, %xmm0
; SSSE3-NEXT: retq
;
; AVX-LABEL: hsub_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vphsubw %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%a0246 = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 undef, i32 undef, i32 undef, i32 undef>
diff --git a/test/CodeGen/X86/haddsub-undef.ll b/test/CodeGen/X86/haddsub-undef.ll
index e59ff79e0d8..d34f8985cff 100644
--- a/test/CodeGen/X86/haddsub-undef.ll
+++ b/test/CodeGen/X86/haddsub-undef.ll
@@ -7,12 +7,12 @@
define <4 x float> @test1_undef(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: test1_undef:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: haddps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test1_undef:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhaddps %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%vecext = extractelement <4 x float> %a, i32 0
@@ -32,12 +32,12 @@ define <4 x float> @test1_undef(<4 x float> %a, <4 x float> %b) {
define <4 x float> @test2_undef(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: test2_undef:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: haddps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test2_undef:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhaddps %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%vecext = extractelement <4 x float> %a, i32 0
@@ -57,12 +57,12 @@ define <4 x float> @test2_undef(<4 x float> %a, <4 x float> %b) {
define <4 x float> @test3_undef(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: test3_undef:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: haddps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test3_undef:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhaddps %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%vecext = extractelement <4 x float> %a, i32 0
@@ -82,13 +82,13 @@ define <4 x float> @test3_undef(<4 x float> %a, <4 x float> %b) {
define <4 x float> @test4_undef(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: test4_undef:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE-NEXT: addss %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test4_undef:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -101,7 +101,7 @@ define <4 x float> @test4_undef(<4 x float> %a, <4 x float> %b) {
define <2 x double> @test5_undef(<2 x double> %a, <2 x double> %b) {
; SSE-LABEL: test5_undef:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps %xmm0, %xmm1
; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
; SSE-NEXT: addsd %xmm0, %xmm1
@@ -109,7 +109,7 @@ define <2 x double> @test5_undef(<2 x double> %a, <2 x double> %b) {
; SSE-NEXT: retq
;
; AVX-LABEL: test5_undef:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -122,12 +122,12 @@ define <2 x double> @test5_undef(<2 x double> %a, <2 x double> %b) {
define <4 x float> @test6_undef(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: test6_undef:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: haddps %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test6_undef:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhaddps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%vecext = extractelement <4 x float> %a, i32 0
@@ -143,12 +143,12 @@ define <4 x float> @test6_undef(<4 x float> %a, <4 x float> %b) {
define <4 x float> @test7_undef(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: test7_undef:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: haddps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test7_undef:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhaddps %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%vecext = extractelement <4 x float> %b, i32 0
@@ -164,7 +164,7 @@ define <4 x float> @test7_undef(<4 x float> %a, <4 x float> %b) {
define <4 x float> @test8_undef(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: test8_undef:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE-NEXT: addss %xmm0, %xmm1
; SSE-NEXT: movaps %xmm0, %xmm2
@@ -176,7 +176,7 @@ define <4 x float> @test8_undef(<4 x float> %a, <4 x float> %b) {
; SSE-NEXT: retq
;
; AVX-LABEL: test8_undef:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm1
; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
@@ -197,12 +197,12 @@ define <4 x float> @test8_undef(<4 x float> %a, <4 x float> %b) {
define <4 x float> @test9_undef(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: test9_undef:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: haddps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test9_undef:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhaddps %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%vecext = extractelement <4 x float> %a, i32 0
@@ -218,12 +218,12 @@ define <4 x float> @test9_undef(<4 x float> %a, <4 x float> %b) {
define <8 x float> @test10_undef(<8 x float> %a, <8 x float> %b) {
; SSE-LABEL: test10_undef:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: haddps %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test10_undef:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhaddps %ymm1, %ymm0, %ymm0
; AVX-NEXT: retq
%vecext = extractelement <8 x float> %a, i32 0
@@ -239,7 +239,7 @@ define <8 x float> @test10_undef(<8 x float> %a, <8 x float> %b) {
define <8 x float> @test11_undef(<8 x float> %a, <8 x float> %b) {
; SSE-LABEL: test11_undef:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE-NEXT: addss %xmm1, %xmm0
; SSE-NEXT: movshdup {{.*#+}} xmm1 = xmm3[1,1,3,3]
@@ -248,7 +248,7 @@ define <8 x float> @test11_undef(<8 x float> %a, <8 x float> %b) {
; SSE-NEXT: retq
;
; AVX-LABEL: test11_undef:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhaddps %ymm0, %ymm0, %ymm0
; AVX-NEXT: retq
%vecext = extractelement <8 x float> %a, i32 0
@@ -264,12 +264,12 @@ define <8 x float> @test11_undef(<8 x float> %a, <8 x float> %b) {
define <8 x float> @test12_undef(<8 x float> %a, <8 x float> %b) {
; SSE-LABEL: test12_undef:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: haddps %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test12_undef:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhaddps %ymm0, %ymm0, %ymm0
; AVX-NEXT: retq
%vecext = extractelement <8 x float> %a, i32 0
@@ -285,12 +285,12 @@ define <8 x float> @test12_undef(<8 x float> %a, <8 x float> %b) {
define <8 x float> @test13_undef(<8 x float> %a, <8 x float> %b) {
; SSE-LABEL: test13_undef:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: haddps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test13_undef:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-NEXT: vhaddps %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -315,17 +315,17 @@ define <8 x float> @test13_undef(<8 x float> %a, <8 x float> %b) {
define <8 x i32> @test14_undef(<8 x i32> %a, <8 x i32> %b) {
; SSE-LABEL: test14_undef:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: phaddd %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: test14_undef:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vphaddd %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test14_undef:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vphaddd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
%vecext = extractelement <8 x i32> %a, i32 0
@@ -344,7 +344,7 @@ define <8 x i32> @test14_undef(<8 x i32> %a, <8 x i32> %b) {
; integer horizontal adds instead of two scalar adds followed by vector inserts.
define <8 x i32> @test15_undef(<8 x i32> %a, <8 x i32> %b) {
; SSE-LABEL: test15_undef:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movd %xmm0, %eax
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; SSE-NEXT: movd %xmm0, %ecx
@@ -359,7 +359,7 @@ define <8 x i32> @test15_undef(<8 x i32> %a, <8 x i32> %b) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test15_undef:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %xmm0, %eax
; AVX1-NEXT: vpextrd $1, %xmm0, %ecx
; AVX1-NEXT: addl %eax, %ecx
@@ -374,7 +374,7 @@ define <8 x i32> @test15_undef(<8 x i32> %a, <8 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test15_undef:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vphaddd %ymm0, %ymm0, %ymm0
; AVX2-NEXT: retq
%vecext = extractelement <8 x i32> %a, i32 0
@@ -390,17 +390,17 @@ define <8 x i32> @test15_undef(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @test16_undef(<8 x i32> %a, <8 x i32> %b) {
; SSE-LABEL: test16_undef:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: phaddd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: test16_undef:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vphaddd %xmm0, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test16_undef:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vphaddd %ymm0, %ymm0, %ymm0
; AVX2-NEXT: retq
%vecext = extractelement <8 x i32> %a, i32 0
@@ -416,18 +416,18 @@ define <8 x i32> @test16_undef(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @test17_undef(<8 x i32> %a, <8 x i32> %b) {
; SSE-LABEL: test17_undef:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: phaddd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: test17_undef:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vphaddd %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test17_undef:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vphaddd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
diff --git a/test/CodeGen/X86/haddsub.ll b/test/CodeGen/X86/haddsub.ll
index bf5966e318f..030de9c7f14 100644
--- a/test/CodeGen/X86/haddsub.ll
+++ b/test/CodeGen/X86/haddsub.ll
@@ -4,12 +4,12 @@
define <2 x double> @haddpd1(<2 x double> %x, <2 x double> %y) {
; SSE3-LABEL: haddpd1:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: haddpd %xmm1, %xmm0
; SSE3-NEXT: retq
;
; AVX-LABEL: haddpd1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhaddpd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%a = shufflevector <2 x double> %x, <2 x double> %y, <2 x i32> <i32 0, i32 2>
@@ -20,12 +20,12 @@ define <2 x double> @haddpd1(<2 x double> %x, <2 x double> %y) {
define <2 x double> @haddpd2(<2 x double> %x, <2 x double> %y) {
; SSE3-LABEL: haddpd2:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: haddpd %xmm1, %xmm0
; SSE3-NEXT: retq
;
; AVX-LABEL: haddpd2:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhaddpd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%a = shufflevector <2 x double> %x, <2 x double> %y, <2 x i32> <i32 1, i32 2>
@@ -36,12 +36,12 @@ define <2 x double> @haddpd2(<2 x double> %x, <2 x double> %y) {
define <2 x double> @haddpd3(<2 x double> %x) {
; SSE3-LABEL: haddpd3:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: haddpd %xmm0, %xmm0
; SSE3-NEXT: retq
;
; AVX-LABEL: haddpd3:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhaddpd %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%a = shufflevector <2 x double> %x, <2 x double> undef, <2 x i32> <i32 0, i32 undef>
@@ -52,12 +52,12 @@ define <2 x double> @haddpd3(<2 x double> %x) {
define <4 x float> @haddps1(<4 x float> %x, <4 x float> %y) {
; SSE3-LABEL: haddps1:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: haddps %xmm1, %xmm0
; SSE3-NEXT: retq
;
; AVX-LABEL: haddps1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhaddps %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%a = shufflevector <4 x float> %x, <4 x float> %y, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
@@ -68,12 +68,12 @@ define <4 x float> @haddps1(<4 x float> %x, <4 x float> %y) {
define <4 x float> @haddps2(<4 x float> %x, <4 x float> %y) {
; SSE3-LABEL: haddps2:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: haddps %xmm1, %xmm0
; SSE3-NEXT: retq
;
; AVX-LABEL: haddps2:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhaddps %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%a = shufflevector <4 x float> %x, <4 x float> %y, <4 x i32> <i32 1, i32 2, i32 5, i32 6>
@@ -84,12 +84,12 @@ define <4 x float> @haddps2(<4 x float> %x, <4 x float> %y) {
define <4 x float> @haddps3(<4 x float> %x) {
; SSE3-LABEL: haddps3:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: haddps %xmm0, %xmm0
; SSE3-NEXT: retq
;
; AVX-LABEL: haddps3:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhaddps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 2, i32 4, i32 6>
@@ -100,12 +100,12 @@ define <4 x float> @haddps3(<4 x float> %x) {
define <4 x float> @haddps4(<4 x float> %x) {
; SSE3-LABEL: haddps4:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: haddps %xmm0, %xmm0
; SSE3-NEXT: retq
;
; AVX-LABEL: haddps4:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhaddps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 0, i32 2, i32 undef, i32 undef>
@@ -116,12 +116,12 @@ define <4 x float> @haddps4(<4 x float> %x) {
define <4 x float> @haddps5(<4 x float> %x) {
; SSE3-LABEL: haddps5:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: haddps %xmm0, %xmm0
; SSE3-NEXT: retq
;
; AVX-LABEL: haddps5:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhaddps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 0, i32 3, i32 undef, i32 undef>
@@ -132,12 +132,12 @@ define <4 x float> @haddps5(<4 x float> %x) {
define <4 x float> @haddps6(<4 x float> %x) {
; SSE3-LABEL: haddps6:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: haddps %xmm0, %xmm0
; SSE3-NEXT: retq
;
; AVX-LABEL: haddps6:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhaddps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
@@ -148,12 +148,12 @@ define <4 x float> @haddps6(<4 x float> %x) {
define <4 x float> @haddps7(<4 x float> %x) {
; SSE3-LABEL: haddps7:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: haddps %xmm0, %xmm0
; SSE3-NEXT: retq
;
; AVX-LABEL: haddps7:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhaddps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 3, i32 undef, i32 undef>
@@ -164,12 +164,12 @@ define <4 x float> @haddps7(<4 x float> %x) {
define <2 x double> @hsubpd1(<2 x double> %x, <2 x double> %y) {
; SSE3-LABEL: hsubpd1:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: hsubpd %xmm1, %xmm0
; SSE3-NEXT: retq
;
; AVX-LABEL: hsubpd1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhsubpd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%a = shufflevector <2 x double> %x, <2 x double> %y, <2 x i32> <i32 0, i32 2>
@@ -180,12 +180,12 @@ define <2 x double> @hsubpd1(<2 x double> %x, <2 x double> %y) {
define <2 x double> @hsubpd2(<2 x double> %x) {
; SSE3-LABEL: hsubpd2:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: hsubpd %xmm0, %xmm0
; SSE3-NEXT: retq
;
; AVX-LABEL: hsubpd2:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhsubpd %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%a = shufflevector <2 x double> %x, <2 x double> undef, <2 x i32> <i32 0, i32 undef>
@@ -196,12 +196,12 @@ define <2 x double> @hsubpd2(<2 x double> %x) {
define <4 x float> @hsubps1(<4 x float> %x, <4 x float> %y) {
; SSE3-LABEL: hsubps1:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: hsubps %xmm1, %xmm0
; SSE3-NEXT: retq
;
; AVX-LABEL: hsubps1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhsubps %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%a = shufflevector <4 x float> %x, <4 x float> %y, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
@@ -212,12 +212,12 @@ define <4 x float> @hsubps1(<4 x float> %x, <4 x float> %y) {
define <4 x float> @hsubps2(<4 x float> %x) {
; SSE3-LABEL: hsubps2:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: hsubps %xmm0, %xmm0
; SSE3-NEXT: retq
;
; AVX-LABEL: hsubps2:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhsubps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 2, i32 4, i32 6>
@@ -228,12 +228,12 @@ define <4 x float> @hsubps2(<4 x float> %x) {
define <4 x float> @hsubps3(<4 x float> %x) {
; SSE3-LABEL: hsubps3:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: hsubps %xmm0, %xmm0
; SSE3-NEXT: retq
;
; AVX-LABEL: hsubps3:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhsubps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 0, i32 2, i32 undef, i32 undef>
@@ -244,12 +244,12 @@ define <4 x float> @hsubps3(<4 x float> %x) {
define <4 x float> @hsubps4(<4 x float> %x) {
; SSE3-LABEL: hsubps4:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: hsubps %xmm0, %xmm0
; SSE3-NEXT: retq
;
; AVX-LABEL: hsubps4:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhsubps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
@@ -260,13 +260,13 @@ define <4 x float> @hsubps4(<4 x float> %x) {
define <8 x float> @vhaddps1(<8 x float> %x, <8 x float> %y) {
; SSE3-LABEL: vhaddps1:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: haddps %xmm2, %xmm0
; SSE3-NEXT: haddps %xmm3, %xmm1
; SSE3-NEXT: retq
;
; AVX-LABEL: vhaddps1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhaddps %ymm1, %ymm0, %ymm0
; AVX-NEXT: retq
%a = shufflevector <8 x float> %x, <8 x float> %y, <8 x i32> <i32 0, i32 2, i32 8, i32 10, i32 4, i32 6, i32 12, i32 14>
@@ -277,13 +277,13 @@ define <8 x float> @vhaddps1(<8 x float> %x, <8 x float> %y) {
define <8 x float> @vhaddps2(<8 x float> %x, <8 x float> %y) {
; SSE3-LABEL: vhaddps2:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: haddps %xmm2, %xmm0
; SSE3-NEXT: haddps %xmm3, %xmm1
; SSE3-NEXT: retq
;
; AVX-LABEL: vhaddps2:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhaddps %ymm1, %ymm0, %ymm0
; AVX-NEXT: retq
%a = shufflevector <8 x float> %x, <8 x float> %y, <8 x i32> <i32 1, i32 2, i32 9, i32 10, i32 5, i32 6, i32 13, i32 14>
@@ -294,13 +294,13 @@ define <8 x float> @vhaddps2(<8 x float> %x, <8 x float> %y) {
define <8 x float> @vhaddps3(<8 x float> %x) {
; SSE3-LABEL: vhaddps3:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: haddps %xmm0, %xmm0
; SSE3-NEXT: haddps %xmm1, %xmm1
; SSE3-NEXT: retq
;
; AVX-LABEL: vhaddps3:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhaddps %ymm0, %ymm0, %ymm0
; AVX-NEXT: retq
%a = shufflevector <8 x float> %x, <8 x float> undef, <8 x i32> <i32 undef, i32 2, i32 8, i32 10, i32 4, i32 6, i32 undef, i32 14>
@@ -311,13 +311,13 @@ define <8 x float> @vhaddps3(<8 x float> %x) {
define <8 x float> @vhsubps1(<8 x float> %x, <8 x float> %y) {
; SSE3-LABEL: vhsubps1:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: hsubps %xmm2, %xmm0
; SSE3-NEXT: hsubps %xmm3, %xmm1
; SSE3-NEXT: retq
;
; AVX-LABEL: vhsubps1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhsubps %ymm1, %ymm0, %ymm0
; AVX-NEXT: retq
%a = shufflevector <8 x float> %x, <8 x float> %y, <8 x i32> <i32 0, i32 2, i32 8, i32 10, i32 4, i32 6, i32 12, i32 14>
@@ -328,13 +328,13 @@ define <8 x float> @vhsubps1(<8 x float> %x, <8 x float> %y) {
define <8 x float> @vhsubps3(<8 x float> %x) {
; SSE3-LABEL: vhsubps3:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: hsubps %xmm0, %xmm0
; SSE3-NEXT: hsubps %xmm1, %xmm1
; SSE3-NEXT: retq
;
; AVX-LABEL: vhsubps3:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhsubps %ymm0, %ymm0, %ymm0
; AVX-NEXT: retq
%a = shufflevector <8 x float> %x, <8 x float> undef, <8 x i32> <i32 undef, i32 2, i32 8, i32 10, i32 4, i32 6, i32 undef, i32 14>
@@ -345,13 +345,13 @@ define <8 x float> @vhsubps3(<8 x float> %x) {
define <4 x double> @vhaddpd1(<4 x double> %x, <4 x double> %y) {
; SSE3-LABEL: vhaddpd1:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: haddpd %xmm2, %xmm0
; SSE3-NEXT: haddpd %xmm3, %xmm1
; SSE3-NEXT: retq
;
; AVX-LABEL: vhaddpd1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhaddpd %ymm1, %ymm0, %ymm0
; AVX-NEXT: retq
%a = shufflevector <4 x double> %x, <4 x double> %y, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
@@ -362,13 +362,13 @@ define <4 x double> @vhaddpd1(<4 x double> %x, <4 x double> %y) {
define <4 x double> @vhsubpd1(<4 x double> %x, <4 x double> %y) {
; SSE3-LABEL: vhsubpd1:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: hsubpd %xmm2, %xmm0
; SSE3-NEXT: hsubpd %xmm3, %xmm1
; SSE3-NEXT: retq
;
; AVX-LABEL: vhsubpd1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhsubpd %ymm1, %ymm0, %ymm0
; AVX-NEXT: retq
%a = shufflevector <4 x double> %x, <4 x double> %y, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
@@ -379,12 +379,12 @@ define <4 x double> @vhsubpd1(<4 x double> %x, <4 x double> %y) {
define <2 x float> @haddps_v2f32(<4 x float> %v0) {
; SSE3-LABEL: haddps_v2f32:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: haddps %xmm0, %xmm0
; SSE3-NEXT: retq
;
; AVX-LABEL: haddps_v2f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vhaddps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%v0.0 = extractelement <4 x float> %v0, i32 0
diff --git a/test/CodeGen/X86/half.ll b/test/CodeGen/X86/half.ll
index 9b0aa0ef931..d36a04b8459 100644
--- a/test/CodeGen/X86/half.ll
+++ b/test/CodeGen/X86/half.ll
@@ -10,19 +10,19 @@
define void @test_load_store(half* %in, half* %out) #0 {
; BWON-LABEL: test_load_store:
-; BWON: # BB#0:
+; BWON: # %bb.0:
; BWON-NEXT: movzwl (%rdi), %eax
; BWON-NEXT: movw %ax, (%rsi)
; BWON-NEXT: retq
;
; BWOFF-LABEL: test_load_store:
-; BWOFF: # BB#0:
+; BWOFF: # %bb.0:
; BWOFF-NEXT: movw (%rdi), %ax
; BWOFF-NEXT: movw %ax, (%rsi)
; BWOFF-NEXT: retq
;
; CHECK-I686-LABEL: test_load_store:
-; CHECK-I686: # BB#0:
+; CHECK-I686: # %bb.0:
; CHECK-I686-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-I686-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-I686-NEXT: movw (%ecx), %cx
@@ -35,17 +35,17 @@ define void @test_load_store(half* %in, half* %out) #0 {
define i16 @test_bitcast_from_half(half* %addr) #0 {
; BWON-LABEL: test_bitcast_from_half:
-; BWON: # BB#0:
+; BWON: # %bb.0:
; BWON-NEXT: movzwl (%rdi), %eax
; BWON-NEXT: retq
;
; BWOFF-LABEL: test_bitcast_from_half:
-; BWOFF: # BB#0:
+; BWOFF: # %bb.0:
; BWOFF-NEXT: movw (%rdi), %ax
; BWOFF-NEXT: retq
;
; CHECK-I686-LABEL: test_bitcast_from_half:
-; CHECK-I686: # BB#0:
+; CHECK-I686: # %bb.0:
; CHECK-I686-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-I686-NEXT: movw (%eax), %ax
; CHECK-I686-NEXT: retl
@@ -56,12 +56,12 @@ define i16 @test_bitcast_from_half(half* %addr) #0 {
define void @test_bitcast_to_half(half* %addr, i16 %in) #0 {
; CHECK-LABEL: test_bitcast_to_half:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movw %si, (%rdi)
; CHECK-NEXT: retq
;
; CHECK-I686-LABEL: test_bitcast_to_half:
-; CHECK-I686: # BB#0:
+; CHECK-I686: # %bb.0:
; CHECK-I686-NEXT: movw {{[0-9]+}}(%esp), %ax
; CHECK-I686-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-I686-NEXT: movw %ax, (%ecx)
@@ -73,19 +73,19 @@ define void @test_bitcast_to_half(half* %addr, i16 %in) #0 {
define float @test_extend32(half* %addr) #0 {
; CHECK-LIBCALL-LABEL: test_extend32:
-; CHECK-LIBCALL: # BB#0:
+; CHECK-LIBCALL: # %bb.0:
; CHECK-LIBCALL-NEXT: movzwl (%rdi), %edi
; CHECK-LIBCALL-NEXT: jmp __gnu_h2f_ieee # TAILCALL
;
; BWON-F16C-LABEL: test_extend32:
-; BWON-F16C: # BB#0:
+; BWON-F16C: # %bb.0:
; BWON-F16C-NEXT: movswl (%rdi), %eax
; BWON-F16C-NEXT: vmovd %eax, %xmm0
; BWON-F16C-NEXT: vcvtph2ps %xmm0, %xmm0
; BWON-F16C-NEXT: retq
;
; CHECK-I686-LABEL: test_extend32:
-; CHECK-I686: # BB#0:
+; CHECK-I686: # %bb.0:
; CHECK-I686-NEXT: subl $12, %esp
; CHECK-I686-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-I686-NEXT: movzwl (%eax), %eax
@@ -100,7 +100,7 @@ define float @test_extend32(half* %addr) #0 {
define double @test_extend64(half* %addr) #0 {
; CHECK-LIBCALL-LABEL: test_extend64:
-; CHECK-LIBCALL: # BB#0:
+; CHECK-LIBCALL: # %bb.0:
; CHECK-LIBCALL-NEXT: pushq %rax
; CHECK-LIBCALL-NEXT: movzwl (%rdi), %edi
; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
@@ -109,7 +109,7 @@ define double @test_extend64(half* %addr) #0 {
; CHECK-LIBCALL-NEXT: retq
;
; BWON-F16C-LABEL: test_extend64:
-; BWON-F16C: # BB#0:
+; BWON-F16C: # %bb.0:
; BWON-F16C-NEXT: movswl (%rdi), %eax
; BWON-F16C-NEXT: vmovd %eax, %xmm0
; BWON-F16C-NEXT: vcvtph2ps %xmm0, %xmm0
@@ -117,7 +117,7 @@ define double @test_extend64(half* %addr) #0 {
; BWON-F16C-NEXT: retq
;
; CHECK-I686-LABEL: test_extend64:
-; CHECK-I686: # BB#0:
+; CHECK-I686: # %bb.0:
; CHECK-I686-NEXT: subl $12, %esp
; CHECK-I686-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-I686-NEXT: movzwl (%eax), %eax
@@ -132,7 +132,7 @@ define double @test_extend64(half* %addr) #0 {
define void @test_trunc32(float %in, half* %addr) #0 {
; CHECK-LIBCALL-LABEL: test_trunc32:
-; CHECK-LIBCALL: # BB#0:
+; CHECK-LIBCALL: # %bb.0:
; CHECK-LIBCALL-NEXT: pushq %rbx
; CHECK-LIBCALL-NEXT: movq %rdi, %rbx
; CHECK-LIBCALL-NEXT: callq __gnu_f2h_ieee
@@ -141,14 +141,14 @@ define void @test_trunc32(float %in, half* %addr) #0 {
; CHECK-LIBCALL-NEXT: retq
;
; BWON-F16C-LABEL: test_trunc32:
-; BWON-F16C: # BB#0:
+; BWON-F16C: # %bb.0:
; BWON-F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; BWON-F16C-NEXT: vmovd %xmm0, %eax
; BWON-F16C-NEXT: movw %ax, (%rdi)
; BWON-F16C-NEXT: retq
;
; CHECK-I686-LABEL: test_trunc32:
-; CHECK-I686: # BB#0:
+; CHECK-I686: # %bb.0:
; CHECK-I686-NEXT: pushl %esi
; CHECK-I686-NEXT: subl $8, %esp
; CHECK-I686-NEXT: movl {{[0-9]+}}(%esp), %esi
@@ -166,7 +166,7 @@ define void @test_trunc32(float %in, half* %addr) #0 {
define void @test_trunc64(double %in, half* %addr) #0 {
; CHECK-LABEL: test_trunc64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pushq %rbx
; CHECK-NEXT: movq %rdi, %rbx
; CHECK-NEXT: callq __truncdfhf2
@@ -175,7 +175,7 @@ define void @test_trunc64(double %in, half* %addr) #0 {
; CHECK-NEXT: retq
;
; CHECK-I686-LABEL: test_trunc64:
-; CHECK-I686: # BB#0:
+; CHECK-I686: # %bb.0:
; CHECK-I686-NEXT: pushl %esi
; CHECK-I686-NEXT: subl $8, %esp
; CHECK-I686-NEXT: movl {{[0-9]+}}(%esp), %esi
@@ -193,7 +193,7 @@ define void @test_trunc64(double %in, half* %addr) #0 {
define i64 @test_fptosi_i64(half* %p) #0 {
; CHECK-LIBCALL-LABEL: test_fptosi_i64:
-; CHECK-LIBCALL: # BB#0:
+; CHECK-LIBCALL: # %bb.0:
; CHECK-LIBCALL-NEXT: pushq %rax
; CHECK-LIBCALL-NEXT: movzwl (%rdi), %edi
; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
@@ -202,7 +202,7 @@ define i64 @test_fptosi_i64(half* %p) #0 {
; CHECK-LIBCALL-NEXT: retq
;
; BWON-F16C-LABEL: test_fptosi_i64:
-; BWON-F16C: # BB#0:
+; BWON-F16C: # %bb.0:
; BWON-F16C-NEXT: movswl (%rdi), %eax
; BWON-F16C-NEXT: vmovd %eax, %xmm0
; BWON-F16C-NEXT: vcvtph2ps %xmm0, %xmm0
@@ -210,7 +210,7 @@ define i64 @test_fptosi_i64(half* %p) #0 {
; BWON-F16C-NEXT: retq
;
; CHECK-I686-LABEL: test_fptosi_i64:
-; CHECK-I686: # BB#0:
+; CHECK-I686: # %bb.0:
; CHECK-I686-NEXT: subl $12, %esp
; CHECK-I686-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-I686-NEXT: movzwl (%eax), %eax
@@ -227,7 +227,7 @@ define i64 @test_fptosi_i64(half* %p) #0 {
define void @test_sitofp_i64(i64 %a, half* %p) #0 {
; CHECK-LIBCALL-LABEL: test_sitofp_i64:
-; CHECK-LIBCALL: # BB#0:
+; CHECK-LIBCALL: # %bb.0:
; CHECK-LIBCALL-NEXT: pushq %rbx
; CHECK-LIBCALL-NEXT: movq %rsi, %rbx
; CHECK-LIBCALL-NEXT: cvtsi2ssq %rdi, %xmm0
@@ -237,7 +237,7 @@ define void @test_sitofp_i64(i64 %a, half* %p) #0 {
; CHECK-LIBCALL-NEXT: retq
;
; BWON-F16C-LABEL: test_sitofp_i64:
-; BWON-F16C: # BB#0:
+; BWON-F16C: # %bb.0:
; BWON-F16C-NEXT: vcvtsi2ssq %rdi, %xmm0, %xmm0
; BWON-F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; BWON-F16C-NEXT: vmovd %xmm0, %eax
@@ -245,7 +245,7 @@ define void @test_sitofp_i64(i64 %a, half* %p) #0 {
; BWON-F16C-NEXT: retq
;
; CHECK-I686-LABEL: test_sitofp_i64:
-; CHECK-I686: # BB#0:
+; CHECK-I686: # %bb.0:
; CHECK-I686-NEXT: pushl %esi
; CHECK-I686-NEXT: subl $24, %esp
; CHECK-I686-NEXT: movl {{[0-9]+}}(%esp), %esi
@@ -267,7 +267,7 @@ define void @test_sitofp_i64(i64 %a, half* %p) #0 {
define i64 @test_fptoui_i64(half* %p) #0 {
; CHECK-LIBCALL-LABEL: test_fptoui_i64:
-; CHECK-LIBCALL: # BB#0:
+; CHECK-LIBCALL: # %bb.0:
; CHECK-LIBCALL-NEXT: pushq %rax
; CHECK-LIBCALL-NEXT: movzwl (%rdi), %edi
; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
@@ -284,7 +284,7 @@ define i64 @test_fptoui_i64(half* %p) #0 {
; CHECK-LIBCALL-NEXT: retq
;
; BWON-F16C-LABEL: test_fptoui_i64:
-; BWON-F16C: # BB#0:
+; BWON-F16C: # %bb.0:
; BWON-F16C-NEXT: movswl (%rdi), %eax
; BWON-F16C-NEXT: vmovd %eax, %xmm0
; BWON-F16C-NEXT: vcvtph2ps %xmm0, %xmm0
@@ -299,7 +299,7 @@ define i64 @test_fptoui_i64(half* %p) #0 {
; BWON-F16C-NEXT: retq
;
; CHECK-I686-LABEL: test_fptoui_i64:
-; CHECK-I686: # BB#0:
+; CHECK-I686: # %bb.0:
; CHECK-I686-NEXT: subl $12, %esp
; CHECK-I686-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-I686-NEXT: movzwl (%eax), %eax
@@ -316,12 +316,12 @@ define i64 @test_fptoui_i64(half* %p) #0 {
define void @test_uitofp_i64(i64 %a, half* %p) #0 {
; CHECK-LIBCALL-LABEL: test_uitofp_i64:
-; CHECK-LIBCALL: # BB#0:
+; CHECK-LIBCALL: # %bb.0:
; CHECK-LIBCALL-NEXT: pushq %rbx
; CHECK-LIBCALL-NEXT: movq %rsi, %rbx
; CHECK-LIBCALL-NEXT: testq %rdi, %rdi
; CHECK-LIBCALL-NEXT: js .LBB10_1
-; CHECK-LIBCALL-NEXT: # BB#2:
+; CHECK-LIBCALL-NEXT: # %bb.2:
; CHECK-LIBCALL-NEXT: cvtsi2ssq %rdi, %xmm0
; CHECK-LIBCALL-NEXT: jmp .LBB10_3
; CHECK-LIBCALL-NEXT: .LBB10_1:
@@ -338,10 +338,10 @@ define void @test_uitofp_i64(i64 %a, half* %p) #0 {
; CHECK-LIBCALL-NEXT: retq
;
; BWON-F16C-LABEL: test_uitofp_i64:
-; BWON-F16C: # BB#0:
+; BWON-F16C: # %bb.0:
; BWON-F16C-NEXT: testq %rdi, %rdi
; BWON-F16C-NEXT: js .LBB10_1
-; BWON-F16C-NEXT: # BB#2:
+; BWON-F16C-NEXT: # %bb.2:
; BWON-F16C-NEXT: vcvtsi2ssq %rdi, %xmm0, %xmm0
; BWON-F16C-NEXT: jmp .LBB10_3
; BWON-F16C-NEXT: .LBB10_1:
@@ -358,7 +358,7 @@ define void @test_uitofp_i64(i64 %a, half* %p) #0 {
; BWON-F16C-NEXT: retq
;
; CHECK-I686-LABEL: test_uitofp_i64:
-; CHECK-I686: # BB#0:
+; CHECK-I686: # %bb.0:
; CHECK-I686-NEXT: pushl %esi
; CHECK-I686-NEXT: subl $24, %esp
; CHECK-I686-NEXT: movl {{[0-9]+}}(%esp), %esi
@@ -382,7 +382,7 @@ define void @test_uitofp_i64(i64 %a, half* %p) #0 {
define <4 x float> @test_extend32_vec4(<4 x half>* %p) #0 {
; CHECK-LIBCALL-LABEL: test_extend32_vec4:
-; CHECK-LIBCALL: # BB#0:
+; CHECK-LIBCALL: # %bb.0:
; CHECK-LIBCALL-NEXT: pushq %rbx
; CHECK-LIBCALL-NEXT: subq $48, %rsp
; CHECK-LIBCALL-NEXT: movq %rdi, %rbx
@@ -408,7 +408,7 @@ define <4 x float> @test_extend32_vec4(<4 x half>* %p) #0 {
; CHECK-LIBCALL-NEXT: retq
;
; BWON-F16C-LABEL: test_extend32_vec4:
-; BWON-F16C: # BB#0:
+; BWON-F16C: # %bb.0:
; BWON-F16C-NEXT: movswl 6(%rdi), %eax
; BWON-F16C-NEXT: vmovd %eax, %xmm0
; BWON-F16C-NEXT: vcvtph2ps %xmm0, %xmm0
@@ -427,7 +427,7 @@ define <4 x float> @test_extend32_vec4(<4 x half>* %p) #0 {
; BWON-F16C-NEXT: retq
;
; CHECK-I686-LABEL: test_extend32_vec4:
-; CHECK-I686: # BB#0:
+; CHECK-I686: # %bb.0:
; CHECK-I686-NEXT: pushl %esi
; CHECK-I686-NEXT: subl $56, %esp
; CHECK-I686-NEXT: movl {{[0-9]+}}(%esp), %esi
@@ -468,7 +468,7 @@ define <4 x float> @test_extend32_vec4(<4 x half>* %p) #0 {
define <4 x double> @test_extend64_vec4(<4 x half>* %p) #0 {
; CHECK-LIBCALL-LABEL: test_extend64_vec4:
-; CHECK-LIBCALL: # BB#0:
+; CHECK-LIBCALL: # %bb.0:
; CHECK-LIBCALL-NEXT: pushq %rbx
; CHECK-LIBCALL-NEXT: subq $16, %rsp
; CHECK-LIBCALL-NEXT: movq %rdi, %rbx
@@ -500,7 +500,7 @@ define <4 x double> @test_extend64_vec4(<4 x half>* %p) #0 {
; CHECK-LIBCALL-NEXT: retq
;
; BWON-F16C-LABEL: test_extend64_vec4:
-; BWON-F16C: # BB#0:
+; BWON-F16C: # %bb.0:
; BWON-F16C-NEXT: movswl (%rdi), %eax
; BWON-F16C-NEXT: vmovd %eax, %xmm0
; BWON-F16C-NEXT: vcvtph2ps %xmm0, %xmm0
@@ -523,7 +523,7 @@ define <4 x double> @test_extend64_vec4(<4 x half>* %p) #0 {
; BWON-F16C-NEXT: retq
;
; CHECK-I686-LABEL: test_extend64_vec4:
-; CHECK-I686: # BB#0:
+; CHECK-I686: # %bb.0:
; CHECK-I686-NEXT: pushl %esi
; CHECK-I686-NEXT: subl $88, %esp
; CHECK-I686-NEXT: movl {{[0-9]+}}(%esp), %esi
@@ -563,7 +563,7 @@ define <4 x double> @test_extend64_vec4(<4 x half>* %p) #0 {
define void @test_trunc32_vec4(<4 x float> %a, <4 x half>* %p) #0 {
; BWON-NOF16C-LABEL: test_trunc32_vec4:
-; BWON-NOF16C: # BB#0:
+; BWON-NOF16C: # %bb.0:
; BWON-NOF16C-NEXT: pushq %rbp
; BWON-NOF16C-NEXT: pushq %r15
; BWON-NOF16C-NEXT: pushq %r14
@@ -596,7 +596,7 @@ define void @test_trunc32_vec4(<4 x float> %a, <4 x half>* %p) #0 {
; BWON-NOF16C-NEXT: retq
;
; BWOFF-LABEL: test_trunc32_vec4:
-; BWOFF: # BB#0:
+; BWOFF: # %bb.0:
; BWOFF-NEXT: pushq %rbp
; BWOFF-NEXT: pushq %r15
; BWOFF-NEXT: pushq %r14
@@ -629,7 +629,7 @@ define void @test_trunc32_vec4(<4 x float> %a, <4 x half>* %p) #0 {
; BWOFF-NEXT: retq
;
; BWON-F16C-LABEL: test_trunc32_vec4:
-; BWON-F16C: # BB#0:
+; BWON-F16C: # %bb.0:
; BWON-F16C-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; BWON-F16C-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; BWON-F16C-NEXT: vmovd %xmm1, %eax
@@ -648,7 +648,7 @@ define void @test_trunc32_vec4(<4 x float> %a, <4 x half>* %p) #0 {
; BWON-F16C-NEXT: retq
;
; CHECK-I686-LABEL: test_trunc32_vec4:
-; CHECK-I686: # BB#0:
+; CHECK-I686: # %bb.0:
; CHECK-I686-NEXT: pushl %ebp
; CHECK-I686-NEXT: pushl %ebx
; CHECK-I686-NEXT: pushl %edi
@@ -691,7 +691,7 @@ define void @test_trunc32_vec4(<4 x float> %a, <4 x half>* %p) #0 {
define void @test_trunc64_vec4(<4 x double> %a, <4 x half>* %p) #0 {
; BWON-NOF16C-LABEL: test_trunc64_vec4:
-; BWON-NOF16C: # BB#0:
+; BWON-NOF16C: # %bb.0:
; BWON-NOF16C-NEXT: pushq %rbp
; BWON-NOF16C-NEXT: pushq %r15
; BWON-NOF16C-NEXT: pushq %r14
@@ -724,7 +724,7 @@ define void @test_trunc64_vec4(<4 x double> %a, <4 x half>* %p) #0 {
; BWON-NOF16C-NEXT: retq
;
; BWOFF-LABEL: test_trunc64_vec4:
-; BWOFF: # BB#0:
+; BWOFF: # %bb.0:
; BWOFF-NEXT: pushq %rbp
; BWOFF-NEXT: pushq %r15
; BWOFF-NEXT: pushq %r14
@@ -757,7 +757,7 @@ define void @test_trunc64_vec4(<4 x double> %a, <4 x half>* %p) #0 {
; BWOFF-NEXT: retq
;
; BWON-F16C-LABEL: test_trunc64_vec4:
-; BWON-F16C: # BB#0:
+; BWON-F16C: # %bb.0:
; BWON-F16C-NEXT: pushq %rbp
; BWON-F16C-NEXT: pushq %r15
; BWON-F16C-NEXT: pushq %r14
@@ -795,7 +795,7 @@ define void @test_trunc64_vec4(<4 x double> %a, <4 x half>* %p) #0 {
; BWON-F16C-NEXT: retq
;
; CHECK-I686-LABEL: test_trunc64_vec4:
-; CHECK-I686: # BB#0:
+; CHECK-I686: # %bb.0:
; CHECK-I686-NEXT: pushl %ebp
; CHECK-I686-NEXT: pushl %ebx
; CHECK-I686-NEXT: pushl %edi
@@ -840,7 +840,7 @@ declare float @test_floatret();
; fp_round and the subsequent fptrunc from float to half.
define half @test_f80trunc_nodagcombine() #0 {
; CHECK-LIBCALL-LABEL: test_f80trunc_nodagcombine:
-; CHECK-LIBCALL: # BB#0:
+; CHECK-LIBCALL: # %bb.0:
; CHECK-LIBCALL-NEXT: pushq %rax
; CHECK-LIBCALL-NEXT: callq test_floatret
; CHECK-LIBCALL-NEXT: callq __gnu_f2h_ieee
@@ -850,7 +850,7 @@ define half @test_f80trunc_nodagcombine() #0 {
; CHECK-LIBCALL-NEXT: retq
;
; BWON-F16C-LABEL: test_f80trunc_nodagcombine:
-; BWON-F16C: # BB#0:
+; BWON-F16C: # %bb.0:
; BWON-F16C-NEXT: pushq %rax
; BWON-F16C-NEXT: callq test_floatret
; BWON-F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
@@ -859,7 +859,7 @@ define half @test_f80trunc_nodagcombine() #0 {
; BWON-F16C-NEXT: retq
;
; CHECK-I686-LABEL: test_f80trunc_nodagcombine:
-; CHECK-I686: # BB#0:
+; CHECK-I686: # %bb.0:
; CHECK-I686-NEXT: subl $12, %esp
; CHECK-I686-NEXT: calll test_floatret
; CHECK-I686-NEXT: fstps (%esp)
@@ -879,7 +879,7 @@ define half @test_f80trunc_nodagcombine() #0 {
define float @test_sitofp_fadd_i32(i32 %a, half* %b) #0 {
; CHECK-LIBCALL-LABEL: test_sitofp_fadd_i32:
-; CHECK-LIBCALL: # BB#0:
+; CHECK-LIBCALL: # %bb.0:
; CHECK-LIBCALL-NEXT: pushq %rbx
; CHECK-LIBCALL-NEXT: subq $16, %rsp
; CHECK-LIBCALL-NEXT: movl %edi, %ebx
@@ -896,7 +896,7 @@ define float @test_sitofp_fadd_i32(i32 %a, half* %b) #0 {
; CHECK-LIBCALL-NEXT: retq
;
; BWON-F16C-LABEL: test_sitofp_fadd_i32:
-; BWON-F16C: # BB#0:
+; BWON-F16C: # %bb.0:
; BWON-F16C-NEXT: movswl (%rsi), %eax
; BWON-F16C-NEXT: vmovd %eax, %xmm0
; BWON-F16C-NEXT: vcvtph2ps %xmm0, %xmm0
@@ -907,7 +907,7 @@ define float @test_sitofp_fadd_i32(i32 %a, half* %b) #0 {
; BWON-F16C-NEXT: retq
;
; CHECK-I686-LABEL: test_sitofp_fadd_i32:
-; CHECK-I686: # BB#0:
+; CHECK-I686: # %bb.0:
; CHECK-I686-NEXT: subl $28, %esp
; CHECK-I686-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-I686-NEXT: movzwl (%eax), %eax
diff --git a/test/CodeGen/X86/horizontal-reduce-smax.ll b/test/CodeGen/X86/horizontal-reduce-smax.ll
index 71294904b22..9e53aea03e9 100644
--- a/test/CodeGen/X86/horizontal-reduce-smax.ll
+++ b/test/CodeGen/X86/horizontal-reduce-smax.ll
@@ -15,7 +15,7 @@
define i64 @test_reduce_v2i64(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_reduce_v2i64:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; X86-SSE2-NEXT: movdqa %xmm0, %xmm3
@@ -38,7 +38,7 @@ define i64 @test_reduce_v2i64(<2 x i64> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v2i64:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: movdqa %xmm0, %xmm1
; X86-SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
; X86-SSE42-NEXT: pcmpgtq %xmm2, %xmm0
@@ -48,7 +48,7 @@ define i64 @test_reduce_v2i64(<2 x i64> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX-LABEL: test_reduce_v2i64:
-; X86-AVX: ## BB#0:
+; X86-AVX: ## %bb.0:
; X86-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-AVX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
; X86-AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
@@ -57,7 +57,7 @@ define i64 @test_reduce_v2i64(<2 x i64> %a0) {
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v2i64:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; X64-SSE2-NEXT: movdqa %xmm0, %xmm3
@@ -78,7 +78,7 @@ define i64 @test_reduce_v2i64(<2 x i64> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v2i64:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: movdqa %xmm0, %xmm1
; X64-SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
; X64-SSE42-NEXT: pcmpgtq %xmm2, %xmm0
@@ -87,7 +87,7 @@ define i64 @test_reduce_v2i64(<2 x i64> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v2i64:
-; X64-AVX1: ## BB#0:
+; X64-AVX1: ## %bb.0:
; X64-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
; X64-AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
@@ -95,7 +95,7 @@ define i64 @test_reduce_v2i64(<2 x i64> %a0) {
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_reduce_v2i64:
-; X64-AVX2: ## BB#0:
+; X64-AVX2: ## %bb.0:
; X64-AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
; X64-AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
@@ -103,7 +103,7 @@ define i64 @test_reduce_v2i64(<2 x i64> %a0) {
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: test_reduce_v2i64:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-AVX512-NEXT: vpmaxsq %xmm1, %xmm0, %xmm0
; X64-AVX512-NEXT: vmovq %xmm0, %rax
@@ -117,7 +117,7 @@ define i64 @test_reduce_v2i64(<2 x i64> %a0) {
define i32 @test_reduce_v4i32(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_reduce_v4i32:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-SSE2-NEXT: movdqa %xmm0, %xmm2
; X86-SSE2-NEXT: pcmpgtd %xmm1, %xmm2
@@ -134,7 +134,7 @@ define i32 @test_reduce_v4i32(<4 x i32> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v4i32:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-SSE42-NEXT: pmaxsd %xmm0, %xmm1
; X86-SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -143,7 +143,7 @@ define i32 @test_reduce_v4i32(<4 x i32> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX-LABEL: test_reduce_v4i32:
-; X86-AVX: ## BB#0:
+; X86-AVX: ## %bb.0:
; X86-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-AVX-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
@@ -152,7 +152,7 @@ define i32 @test_reduce_v4i32(<4 x i32> %a0) {
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v4i32:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-SSE2-NEXT: movdqa %xmm0, %xmm2
; X64-SSE2-NEXT: pcmpgtd %xmm1, %xmm2
@@ -169,7 +169,7 @@ define i32 @test_reduce_v4i32(<4 x i32> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v4i32:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-SSE42-NEXT: pmaxsd %xmm0, %xmm1
; X64-SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -178,7 +178,7 @@ define i32 @test_reduce_v4i32(<4 x i32> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX-LABEL: test_reduce_v4i32:
-; X64-AVX: ## BB#0:
+; X64-AVX: ## %bb.0:
; X64-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-AVX-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
@@ -197,7 +197,7 @@ define i32 @test_reduce_v4i32(<4 x i32> %a0) {
define i16 @test_reduce_v8i16(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_reduce_v8i16:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-SSE2-NEXT: pmaxsw %xmm0, %xmm1
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -210,7 +210,7 @@ define i16 @test_reduce_v8i16(<8 x i16> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v8i16:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: movdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
; X86-SSE42-NEXT: pxor %xmm1, %xmm0
; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0
@@ -220,7 +220,7 @@ define i16 @test_reduce_v8i16(<8 x i16> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX-LABEL: test_reduce_v8i16:
-; X86-AVX: ## BB#0:
+; X86-AVX: ## %bb.0:
; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
; X86-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: vphminposuw %xmm0, %xmm0
@@ -230,7 +230,7 @@ define i16 @test_reduce_v8i16(<8 x i16> %a0) {
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v8i16:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-SSE2-NEXT: pmaxsw %xmm0, %xmm1
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -243,7 +243,7 @@ define i16 @test_reduce_v8i16(<8 x i16> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v8i16:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: movdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
; X64-SSE42-NEXT: pxor %xmm1, %xmm0
; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0
@@ -253,7 +253,7 @@ define i16 @test_reduce_v8i16(<8 x i16> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX-LABEL: test_reduce_v8i16:
-; X64-AVX: ## BB#0:
+; X64-AVX: ## %bb.0:
; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
; X64-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT: vphminposuw %xmm0, %xmm0
@@ -276,7 +276,7 @@ define i16 @test_reduce_v8i16(<8 x i16> %a0) {
define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_reduce_v16i8:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-SSE2-NEXT: movdqa %xmm0, %xmm2
; X86-SSE2-NEXT: pcmpgtb %xmm1, %xmm2
@@ -308,7 +308,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v16i8:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-SSE42-NEXT: pmaxsb %xmm0, %xmm1
; X86-SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -324,7 +324,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX-LABEL: test_reduce_v16i8:
-; X86-AVX: ## BB#0:
+; X86-AVX: ## %bb.0:
; X86-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-AVX-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
@@ -338,7 +338,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v16i8:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-SSE2-NEXT: movdqa %xmm0, %xmm2
; X64-SSE2-NEXT: pcmpgtb %xmm1, %xmm2
@@ -370,7 +370,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v16i8:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-SSE42-NEXT: pmaxsb %xmm0, %xmm1
; X64-SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -386,7 +386,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX-LABEL: test_reduce_v16i8:
-; X64-AVX: ## BB#0:
+; X64-AVX: ## %bb.0:
; X64-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-AVX-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
@@ -420,7 +420,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
define i64 @test_reduce_v4i64(<4 x i64> %a0) {
; X86-SSE2-LABEL: test_reduce_v4i64:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; X86-SSE2-NEXT: movdqa %xmm1, %xmm3
; X86-SSE2-NEXT: pxor %xmm2, %xmm3
@@ -458,7 +458,7 @@ define i64 @test_reduce_v4i64(<4 x i64> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v4i64:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: movdqa %xmm0, %xmm2
; X86-SSE42-NEXT: pcmpgtq %xmm1, %xmm0
; X86-SSE42-NEXT: blendvpd %xmm0, %xmm2, %xmm1
@@ -471,7 +471,7 @@ define i64 @test_reduce_v4i64(<4 x i64> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v4i64:
-; X86-AVX1: ## BB#0:
+; X86-AVX1: ## %bb.0:
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; X86-AVX1-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
; X86-AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm3
@@ -489,7 +489,7 @@ define i64 @test_reduce_v4i64(<4 x i64> %a0) {
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_reduce_v4i64:
-; X86-AVX2: ## BB#0:
+; X86-AVX2: ## %bb.0:
; X86-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X86-AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
; X86-AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
@@ -502,7 +502,7 @@ define i64 @test_reduce_v4i64(<4 x i64> %a0) {
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v4i64:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; X64-SSE2-NEXT: movdqa %xmm1, %xmm3
; X64-SSE2-NEXT: pxor %xmm2, %xmm3
@@ -538,7 +538,7 @@ define i64 @test_reduce_v4i64(<4 x i64> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v4i64:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: movdqa %xmm0, %xmm2
; X64-SSE42-NEXT: pcmpgtq %xmm1, %xmm0
; X64-SSE42-NEXT: blendvpd %xmm0, %xmm2, %xmm1
@@ -550,7 +550,7 @@ define i64 @test_reduce_v4i64(<4 x i64> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v4i64:
-; X64-AVX1: ## BB#0:
+; X64-AVX1: ## %bb.0:
; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; X64-AVX1-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
; X64-AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm3
@@ -567,7 +567,7 @@ define i64 @test_reduce_v4i64(<4 x i64> %a0) {
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_reduce_v4i64:
-; X64-AVX2: ## BB#0:
+; X64-AVX2: ## %bb.0:
; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
; X64-AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
@@ -579,7 +579,7 @@ define i64 @test_reduce_v4i64(<4 x i64> %a0) {
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: test_reduce_v4i64:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX512-NEXT: vpmaxsq %ymm1, %ymm0, %ymm0
; X64-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -599,7 +599,7 @@ define i64 @test_reduce_v4i64(<4 x i64> %a0) {
define i32 @test_reduce_v8i32(<8 x i32> %a0) {
; X86-SSE2-LABEL: test_reduce_v8i32:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: movdqa %xmm0, %xmm2
; X86-SSE2-NEXT: pcmpgtd %xmm1, %xmm2
; X86-SSE2-NEXT: pand %xmm2, %xmm0
@@ -621,7 +621,7 @@ define i32 @test_reduce_v8i32(<8 x i32> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v8i32:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: pmaxsd %xmm1, %xmm0
; X86-SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-SSE42-NEXT: pmaxsd %xmm0, %xmm1
@@ -631,7 +631,7 @@ define i32 @test_reduce_v8i32(<8 x i32> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v8i32:
-; X86-AVX1: ## BB#0:
+; X86-AVX1: ## %bb.0:
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; X86-AVX1-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -643,7 +643,7 @@ define i32 @test_reduce_v8i32(<8 x i32> %a0) {
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_reduce_v8i32:
-; X86-AVX2: ## BB#0:
+; X86-AVX2: ## %bb.0:
; X86-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X86-AVX2-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
; X86-AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -655,7 +655,7 @@ define i32 @test_reduce_v8i32(<8 x i32> %a0) {
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v8i32:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: movdqa %xmm0, %xmm2
; X64-SSE2-NEXT: pcmpgtd %xmm1, %xmm2
; X64-SSE2-NEXT: pand %xmm2, %xmm0
@@ -677,7 +677,7 @@ define i32 @test_reduce_v8i32(<8 x i32> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v8i32:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: pmaxsd %xmm1, %xmm0
; X64-SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-SSE42-NEXT: pmaxsd %xmm0, %xmm1
@@ -687,7 +687,7 @@ define i32 @test_reduce_v8i32(<8 x i32> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v8i32:
-; X64-AVX1: ## BB#0:
+; X64-AVX1: ## %bb.0:
; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; X64-AVX1-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -699,7 +699,7 @@ define i32 @test_reduce_v8i32(<8 x i32> %a0) {
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_reduce_v8i32:
-; X64-AVX2: ## BB#0:
+; X64-AVX2: ## %bb.0:
; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX2-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
; X64-AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -711,7 +711,7 @@ define i32 @test_reduce_v8i32(<8 x i32> %a0) {
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: test_reduce_v8i32:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX512-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
; X64-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -736,7 +736,7 @@ define i32 @test_reduce_v8i32(<8 x i32> %a0) {
define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X86-SSE2-LABEL: test_reduce_v16i16:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: pmaxsw %xmm1, %xmm0
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-SSE2-NEXT: pmaxsw %xmm0, %xmm1
@@ -750,7 +750,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v16i16:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: pmaxsw %xmm1, %xmm0
; X86-SSE42-NEXT: movdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
; X86-SSE42-NEXT: pxor %xmm1, %xmm0
@@ -761,7 +761,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v16i16:
-; X86-AVX1: ## BB#0:
+; X86-AVX1: ## %bb.0:
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; X86-AVX1-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
@@ -774,7 +774,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_reduce_v16i16:
-; X86-AVX2: ## BB#0:
+; X86-AVX2: ## %bb.0:
; X86-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X86-AVX2-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
@@ -787,7 +787,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v16i16:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: pmaxsw %xmm1, %xmm0
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-SSE2-NEXT: pmaxsw %xmm0, %xmm1
@@ -801,7 +801,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v16i16:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: pmaxsw %xmm1, %xmm0
; X64-SSE42-NEXT: movdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
; X64-SSE42-NEXT: pxor %xmm1, %xmm0
@@ -812,7 +812,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v16i16:
-; X64-AVX1: ## BB#0:
+; X64-AVX1: ## %bb.0:
; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; X64-AVX1-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
@@ -825,7 +825,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_reduce_v16i16:
-; X64-AVX2: ## BB#0:
+; X64-AVX2: ## %bb.0:
; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX2-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
@@ -838,7 +838,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: test_reduce_v16i16:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX512-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0
; X64-AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
@@ -867,7 +867,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X86-SSE2-LABEL: test_reduce_v32i8:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: movdqa %xmm0, %xmm2
; X86-SSE2-NEXT: pcmpgtb %xmm1, %xmm2
; X86-SSE2-NEXT: pand %xmm2, %xmm0
@@ -904,7 +904,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v32i8:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: pmaxsb %xmm1, %xmm0
; X86-SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-SSE42-NEXT: pmaxsb %xmm0, %xmm1
@@ -921,7 +921,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v32i8:
-; X86-AVX1: ## BB#0:
+; X86-AVX1: ## %bb.0:
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; X86-AVX1-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -938,7 +938,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_reduce_v32i8:
-; X86-AVX2: ## BB#0:
+; X86-AVX2: ## %bb.0:
; X86-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X86-AVX2-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
; X86-AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -955,7 +955,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v32i8:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: movdqa %xmm0, %xmm2
; X64-SSE2-NEXT: pcmpgtb %xmm1, %xmm2
; X64-SSE2-NEXT: pand %xmm2, %xmm0
@@ -992,7 +992,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v32i8:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: pmaxsb %xmm1, %xmm0
; X64-SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-SSE42-NEXT: pmaxsb %xmm0, %xmm1
@@ -1009,7 +1009,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v32i8:
-; X64-AVX1: ## BB#0:
+; X64-AVX1: ## %bb.0:
; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; X64-AVX1-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -1026,7 +1026,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_reduce_v32i8:
-; X64-AVX2: ## BB#0:
+; X64-AVX2: ## %bb.0:
; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX2-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
; X64-AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -1043,7 +1043,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: test_reduce_v32i8:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX512-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
; X64-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -1083,7 +1083,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
define i64 @test_reduce_v8i64(<8 x i64> %a0) {
; X86-SSE2-LABEL: test_reduce_v8i64:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: subl $28, %esp
; X86-SSE2-NEXT: .cfi_def_cfa_offset 32
; X86-SSE2-NEXT: movdqa %xmm3, %xmm5
@@ -1158,7 +1158,7 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v8i64:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: movdqa %xmm0, %xmm4
; X86-SSE42-NEXT: movdqa %xmm4, %xmm5
; X86-SSE42-NEXT: pcmpgtq %xmm2, %xmm5
@@ -1179,7 +1179,7 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v8i64:
-; X86-AVX1: ## BB#0:
+; X86-AVX1: ## %bb.0:
; X86-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; X86-AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -1203,7 +1203,7 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_reduce_v8i64:
-; X86-AVX2: ## BB#0:
+; X86-AVX2: ## %bb.0:
; X86-AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
; X86-AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
; X86-AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
@@ -1218,7 +1218,7 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v8i64:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,0,2147483648,0]
; X64-SSE2-NEXT: movdqa %xmm3, %xmm5
; X64-SSE2-NEXT: pxor %xmm4, %xmm5
@@ -1284,7 +1284,7 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v8i64:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: movdqa %xmm0, %xmm4
; X64-SSE42-NEXT: movdqa %xmm4, %xmm5
; X64-SSE42-NEXT: pcmpgtq %xmm2, %xmm5
@@ -1304,7 +1304,7 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v8i64:
-; X64-AVX1: ## BB#0:
+; X64-AVX1: ## %bb.0:
; X64-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; X64-AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -1327,7 +1327,7 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_reduce_v8i64:
-; X64-AVX2: ## BB#0:
+; X64-AVX2: ## %bb.0:
; X64-AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
; X64-AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
; X64-AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
@@ -1341,7 +1341,7 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: test_reduce_v8i64:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; X64-AVX512-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
; X64-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
@@ -1366,7 +1366,7 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
define i32 @test_reduce_v16i32(<16 x i32> %a0) {
; X86-SSE2-LABEL: test_reduce_v16i32:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: movdqa %xmm0, %xmm4
; X86-SSE2-NEXT: pcmpgtd %xmm2, %xmm4
; X86-SSE2-NEXT: movdqa %xmm1, %xmm5
@@ -1398,7 +1398,7 @@ define i32 @test_reduce_v16i32(<16 x i32> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v16i32:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: pmaxsd %xmm3, %xmm1
; X86-SSE42-NEXT: pmaxsd %xmm2, %xmm0
; X86-SSE42-NEXT: pmaxsd %xmm1, %xmm0
@@ -1410,7 +1410,7 @@ define i32 @test_reduce_v16i32(<16 x i32> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v16i32:
-; X86-AVX1: ## BB#0:
+; X86-AVX1: ## %bb.0:
; X86-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; X86-AVX1-NEXT: vpmaxsd %xmm2, %xmm3, %xmm2
@@ -1425,7 +1425,7 @@ define i32 @test_reduce_v16i32(<16 x i32> %a0) {
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_reduce_v16i32:
-; X86-AVX2: ## BB#0:
+; X86-AVX2: ## %bb.0:
; X86-AVX2-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
; X86-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X86-AVX2-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
@@ -1438,7 +1438,7 @@ define i32 @test_reduce_v16i32(<16 x i32> %a0) {
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v16i32:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: movdqa %xmm0, %xmm4
; X64-SSE2-NEXT: pcmpgtd %xmm2, %xmm4
; X64-SSE2-NEXT: movdqa %xmm1, %xmm5
@@ -1470,7 +1470,7 @@ define i32 @test_reduce_v16i32(<16 x i32> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v16i32:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: pmaxsd %xmm3, %xmm1
; X64-SSE42-NEXT: pmaxsd %xmm2, %xmm0
; X64-SSE42-NEXT: pmaxsd %xmm1, %xmm0
@@ -1482,7 +1482,7 @@ define i32 @test_reduce_v16i32(<16 x i32> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v16i32:
-; X64-AVX1: ## BB#0:
+; X64-AVX1: ## %bb.0:
; X64-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; X64-AVX1-NEXT: vpmaxsd %xmm2, %xmm3, %xmm2
@@ -1497,7 +1497,7 @@ define i32 @test_reduce_v16i32(<16 x i32> %a0) {
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_reduce_v16i32:
-; X64-AVX2: ## BB#0:
+; X64-AVX2: ## %bb.0:
; X64-AVX2-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX2-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
@@ -1510,7 +1510,7 @@ define i32 @test_reduce_v16i32(<16 x i32> %a0) {
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: test_reduce_v16i32:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; X64-AVX512-NEXT: vpmaxsd %zmm1, %zmm0, %zmm0
; X64-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
@@ -1540,7 +1540,7 @@ define i32 @test_reduce_v16i32(<16 x i32> %a0) {
define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X86-SSE2-LABEL: test_reduce_v32i16:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: pmaxsw %xmm3, %xmm1
; X86-SSE2-NEXT: pmaxsw %xmm2, %xmm0
; X86-SSE2-NEXT: pmaxsw %xmm1, %xmm0
@@ -1556,7 +1556,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v32i16:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: pmaxsw %xmm3, %xmm1
; X86-SSE42-NEXT: pmaxsw %xmm2, %xmm0
; X86-SSE42-NEXT: pmaxsw %xmm1, %xmm0
@@ -1569,7 +1569,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v32i16:
-; X86-AVX1: ## BB#0:
+; X86-AVX1: ## %bb.0:
; X86-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; X86-AVX1-NEXT: vpmaxsw %xmm2, %xmm3, %xmm2
@@ -1585,7 +1585,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_reduce_v32i16:
-; X86-AVX2: ## BB#0:
+; X86-AVX2: ## %bb.0:
; X86-AVX2-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0
; X86-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X86-AVX2-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0
@@ -1599,7 +1599,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v32i16:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: pmaxsw %xmm3, %xmm1
; X64-SSE2-NEXT: pmaxsw %xmm2, %xmm0
; X64-SSE2-NEXT: pmaxsw %xmm1, %xmm0
@@ -1615,7 +1615,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v32i16:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: pmaxsw %xmm3, %xmm1
; X64-SSE42-NEXT: pmaxsw %xmm2, %xmm0
; X64-SSE42-NEXT: pmaxsw %xmm1, %xmm0
@@ -1628,7 +1628,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v32i16:
-; X64-AVX1: ## BB#0:
+; X64-AVX1: ## %bb.0:
; X64-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; X64-AVX1-NEXT: vpmaxsw %xmm2, %xmm3, %xmm2
@@ -1644,7 +1644,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_reduce_v32i16:
-; X64-AVX2: ## BB#0:
+; X64-AVX2: ## %bb.0:
; X64-AVX2-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0
; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX2-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0
@@ -1658,7 +1658,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: test_reduce_v32i16:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; X64-AVX512-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0
; X64-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
@@ -1692,7 +1692,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X86-SSE2-LABEL: test_reduce_v64i8:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: movdqa %xmm0, %xmm4
; X86-SSE2-NEXT: pcmpgtb %xmm2, %xmm4
; X86-SSE2-NEXT: movdqa %xmm1, %xmm5
@@ -1739,7 +1739,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v64i8:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: pmaxsb %xmm3, %xmm1
; X86-SSE42-NEXT: pmaxsb %xmm2, %xmm0
; X86-SSE42-NEXT: pmaxsb %xmm1, %xmm0
@@ -1758,7 +1758,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v64i8:
-; X86-AVX1: ## BB#0:
+; X86-AVX1: ## %bb.0:
; X86-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; X86-AVX1-NEXT: vpmaxsb %xmm2, %xmm3, %xmm2
@@ -1778,7 +1778,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_reduce_v64i8:
-; X86-AVX2: ## BB#0:
+; X86-AVX2: ## %bb.0:
; X86-AVX2-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
; X86-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X86-AVX2-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
@@ -1796,7 +1796,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v64i8:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: movdqa %xmm0, %xmm4
; X64-SSE2-NEXT: pcmpgtb %xmm2, %xmm4
; X64-SSE2-NEXT: movdqa %xmm1, %xmm5
@@ -1843,7 +1843,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v64i8:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: pmaxsb %xmm3, %xmm1
; X64-SSE42-NEXT: pmaxsb %xmm2, %xmm0
; X64-SSE42-NEXT: pmaxsb %xmm1, %xmm0
@@ -1862,7 +1862,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v64i8:
-; X64-AVX1: ## BB#0:
+; X64-AVX1: ## %bb.0:
; X64-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; X64-AVX1-NEXT: vpmaxsb %xmm2, %xmm3, %xmm2
@@ -1882,7 +1882,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_reduce_v64i8:
-; X64-AVX2: ## BB#0:
+; X64-AVX2: ## %bb.0:
; X64-AVX2-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX2-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
@@ -1900,7 +1900,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: test_reduce_v64i8:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; X64-AVX512-NEXT: vpmaxsb %zmm1, %zmm0, %zmm0
; X64-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
diff --git a/test/CodeGen/X86/horizontal-reduce-smin.ll b/test/CodeGen/X86/horizontal-reduce-smin.ll
index 45025bf7cc7..e92dcc1072e 100644
--- a/test/CodeGen/X86/horizontal-reduce-smin.ll
+++ b/test/CodeGen/X86/horizontal-reduce-smin.ll
@@ -15,7 +15,7 @@
define i64 @test_reduce_v2i64(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_reduce_v2i64:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; X86-SSE2-NEXT: movdqa %xmm0, %xmm3
@@ -38,7 +38,7 @@ define i64 @test_reduce_v2i64(<2 x i64> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v2i64:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: movdqa %xmm0, %xmm1
; X86-SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
; X86-SSE42-NEXT: movdqa %xmm2, %xmm0
@@ -49,7 +49,7 @@ define i64 @test_reduce_v2i64(<2 x i64> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX-LABEL: test_reduce_v2i64:
-; X86-AVX: ## BB#0:
+; X86-AVX: ## %bb.0:
; X86-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-AVX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
; X86-AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
@@ -58,7 +58,7 @@ define i64 @test_reduce_v2i64(<2 x i64> %a0) {
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v2i64:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; X64-SSE2-NEXT: movdqa %xmm0, %xmm3
@@ -79,7 +79,7 @@ define i64 @test_reduce_v2i64(<2 x i64> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v2i64:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: movdqa %xmm0, %xmm1
; X64-SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
; X64-SSE42-NEXT: movdqa %xmm2, %xmm0
@@ -89,7 +89,7 @@ define i64 @test_reduce_v2i64(<2 x i64> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v2i64:
-; X64-AVX1: ## BB#0:
+; X64-AVX1: ## %bb.0:
; X64-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-AVX1-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
; X64-AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
@@ -97,7 +97,7 @@ define i64 @test_reduce_v2i64(<2 x i64> %a0) {
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_reduce_v2i64:
-; X64-AVX2: ## BB#0:
+; X64-AVX2: ## %bb.0:
; X64-AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-AVX2-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
; X64-AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
@@ -105,7 +105,7 @@ define i64 @test_reduce_v2i64(<2 x i64> %a0) {
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: test_reduce_v2i64:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-AVX512-NEXT: vpminsq %xmm1, %xmm0, %xmm0
; X64-AVX512-NEXT: vmovq %xmm0, %rax
@@ -119,7 +119,7 @@ define i64 @test_reduce_v2i64(<2 x i64> %a0) {
define i32 @test_reduce_v4i32(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_reduce_v4i32:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-SSE2-NEXT: movdqa %xmm1, %xmm2
; X86-SSE2-NEXT: pcmpgtd %xmm0, %xmm2
@@ -136,7 +136,7 @@ define i32 @test_reduce_v4i32(<4 x i32> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v4i32:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-SSE42-NEXT: pminsd %xmm0, %xmm1
; X86-SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -145,7 +145,7 @@ define i32 @test_reduce_v4i32(<4 x i32> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX-LABEL: test_reduce_v4i32:
-; X86-AVX: ## BB#0:
+; X86-AVX: ## %bb.0:
; X86-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-AVX-NEXT: vpminsd %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
@@ -154,7 +154,7 @@ define i32 @test_reduce_v4i32(<4 x i32> %a0) {
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v4i32:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-SSE2-NEXT: movdqa %xmm1, %xmm2
; X64-SSE2-NEXT: pcmpgtd %xmm0, %xmm2
@@ -171,7 +171,7 @@ define i32 @test_reduce_v4i32(<4 x i32> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v4i32:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-SSE42-NEXT: pminsd %xmm0, %xmm1
; X64-SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -180,7 +180,7 @@ define i32 @test_reduce_v4i32(<4 x i32> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX-LABEL: test_reduce_v4i32:
-; X64-AVX: ## BB#0:
+; X64-AVX: ## %bb.0:
; X64-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-AVX-NEXT: vpminsd %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
@@ -199,7 +199,7 @@ define i32 @test_reduce_v4i32(<4 x i32> %a0) {
define i16 @test_reduce_v8i16(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_reduce_v8i16:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-SSE2-NEXT: pminsw %xmm0, %xmm1
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -212,7 +212,7 @@ define i16 @test_reduce_v8i16(<8 x i16> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v8i16:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: movdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
; X86-SSE42-NEXT: pxor %xmm1, %xmm0
; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0
@@ -222,7 +222,7 @@ define i16 @test_reduce_v8i16(<8 x i16> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX-LABEL: test_reduce_v8i16:
-; X86-AVX: ## BB#0:
+; X86-AVX: ## %bb.0:
; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
; X86-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: vphminposuw %xmm0, %xmm0
@@ -232,7 +232,7 @@ define i16 @test_reduce_v8i16(<8 x i16> %a0) {
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v8i16:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-SSE2-NEXT: pminsw %xmm0, %xmm1
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -245,7 +245,7 @@ define i16 @test_reduce_v8i16(<8 x i16> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v8i16:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: movdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
; X64-SSE42-NEXT: pxor %xmm1, %xmm0
; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0
@@ -255,7 +255,7 @@ define i16 @test_reduce_v8i16(<8 x i16> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX-LABEL: test_reduce_v8i16:
-; X64-AVX: ## BB#0:
+; X64-AVX: ## %bb.0:
; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
; X64-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT: vphminposuw %xmm0, %xmm0
@@ -278,7 +278,7 @@ define i16 @test_reduce_v8i16(<8 x i16> %a0) {
define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_reduce_v16i8:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-SSE2-NEXT: movdqa %xmm1, %xmm2
; X86-SSE2-NEXT: pcmpgtb %xmm0, %xmm2
@@ -310,7 +310,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v16i8:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-SSE42-NEXT: pminsb %xmm0, %xmm1
; X86-SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -326,7 +326,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX-LABEL: test_reduce_v16i8:
-; X86-AVX: ## BB#0:
+; X86-AVX: ## %bb.0:
; X86-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-AVX-NEXT: vpminsb %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
@@ -340,7 +340,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v16i8:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-SSE2-NEXT: movdqa %xmm1, %xmm2
; X64-SSE2-NEXT: pcmpgtb %xmm0, %xmm2
@@ -372,7 +372,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v16i8:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-SSE42-NEXT: pminsb %xmm0, %xmm1
; X64-SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -388,7 +388,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX-LABEL: test_reduce_v16i8:
-; X64-AVX: ## BB#0:
+; X64-AVX: ## %bb.0:
; X64-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-AVX-NEXT: vpminsb %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
@@ -422,7 +422,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
define i64 @test_reduce_v4i64(<4 x i64> %a0) {
; X86-SSE2-LABEL: test_reduce_v4i64:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; X86-SSE2-NEXT: movdqa %xmm0, %xmm3
; X86-SSE2-NEXT: pxor %xmm2, %xmm3
@@ -460,7 +460,7 @@ define i64 @test_reduce_v4i64(<4 x i64> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v4i64:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: movdqa %xmm0, %xmm2
; X86-SSE42-NEXT: movdqa %xmm1, %xmm0
; X86-SSE42-NEXT: pcmpgtq %xmm2, %xmm0
@@ -474,7 +474,7 @@ define i64 @test_reduce_v4i64(<4 x i64> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v4i64:
-; X86-AVX1: ## BB#0:
+; X86-AVX1: ## %bb.0:
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; X86-AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
; X86-AVX1-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm3
@@ -492,7 +492,7 @@ define i64 @test_reduce_v4i64(<4 x i64> %a0) {
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_reduce_v4i64:
-; X86-AVX2: ## BB#0:
+; X86-AVX2: ## %bb.0:
; X86-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X86-AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm2
; X86-AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
@@ -505,7 +505,7 @@ define i64 @test_reduce_v4i64(<4 x i64> %a0) {
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v4i64:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; X64-SSE2-NEXT: movdqa %xmm0, %xmm3
; X64-SSE2-NEXT: pxor %xmm2, %xmm3
@@ -541,7 +541,7 @@ define i64 @test_reduce_v4i64(<4 x i64> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v4i64:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: movdqa %xmm0, %xmm2
; X64-SSE42-NEXT: movdqa %xmm1, %xmm0
; X64-SSE42-NEXT: pcmpgtq %xmm2, %xmm0
@@ -554,7 +554,7 @@ define i64 @test_reduce_v4i64(<4 x i64> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v4i64:
-; X64-AVX1: ## BB#0:
+; X64-AVX1: ## %bb.0:
; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; X64-AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
; X64-AVX1-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm3
@@ -571,7 +571,7 @@ define i64 @test_reduce_v4i64(<4 x i64> %a0) {
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_reduce_v4i64:
-; X64-AVX2: ## BB#0:
+; X64-AVX2: ## %bb.0:
; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm2
; X64-AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
@@ -583,7 +583,7 @@ define i64 @test_reduce_v4i64(<4 x i64> %a0) {
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: test_reduce_v4i64:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX512-NEXT: vpminsq %ymm1, %ymm0, %ymm0
; X64-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -603,7 +603,7 @@ define i64 @test_reduce_v4i64(<4 x i64> %a0) {
define i32 @test_reduce_v8i32(<8 x i32> %a0) {
; X86-SSE2-LABEL: test_reduce_v8i32:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: movdqa %xmm1, %xmm2
; X86-SSE2-NEXT: pcmpgtd %xmm0, %xmm2
; X86-SSE2-NEXT: pand %xmm2, %xmm0
@@ -625,7 +625,7 @@ define i32 @test_reduce_v8i32(<8 x i32> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v8i32:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: pminsd %xmm1, %xmm0
; X86-SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-SSE42-NEXT: pminsd %xmm0, %xmm1
@@ -635,7 +635,7 @@ define i32 @test_reduce_v8i32(<8 x i32> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v8i32:
-; X86-AVX1: ## BB#0:
+; X86-AVX1: ## %bb.0:
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; X86-AVX1-NEXT: vpminsd %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -647,7 +647,7 @@ define i32 @test_reduce_v8i32(<8 x i32> %a0) {
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_reduce_v8i32:
-; X86-AVX2: ## BB#0:
+; X86-AVX2: ## %bb.0:
; X86-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X86-AVX2-NEXT: vpminsd %ymm1, %ymm0, %ymm0
; X86-AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -659,7 +659,7 @@ define i32 @test_reduce_v8i32(<8 x i32> %a0) {
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v8i32:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: movdqa %xmm1, %xmm2
; X64-SSE2-NEXT: pcmpgtd %xmm0, %xmm2
; X64-SSE2-NEXT: pand %xmm2, %xmm0
@@ -681,7 +681,7 @@ define i32 @test_reduce_v8i32(<8 x i32> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v8i32:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: pminsd %xmm1, %xmm0
; X64-SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-SSE42-NEXT: pminsd %xmm0, %xmm1
@@ -691,7 +691,7 @@ define i32 @test_reduce_v8i32(<8 x i32> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v8i32:
-; X64-AVX1: ## BB#0:
+; X64-AVX1: ## %bb.0:
; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; X64-AVX1-NEXT: vpminsd %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -703,7 +703,7 @@ define i32 @test_reduce_v8i32(<8 x i32> %a0) {
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_reduce_v8i32:
-; X64-AVX2: ## BB#0:
+; X64-AVX2: ## %bb.0:
; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX2-NEXT: vpminsd %ymm1, %ymm0, %ymm0
; X64-AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -715,7 +715,7 @@ define i32 @test_reduce_v8i32(<8 x i32> %a0) {
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: test_reduce_v8i32:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX512-NEXT: vpminsd %ymm1, %ymm0, %ymm0
; X64-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -740,7 +740,7 @@ define i32 @test_reduce_v8i32(<8 x i32> %a0) {
define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X86-SSE2-LABEL: test_reduce_v16i16:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: pminsw %xmm1, %xmm0
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-SSE2-NEXT: pminsw %xmm0, %xmm1
@@ -754,7 +754,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v16i16:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: pminsw %xmm1, %xmm0
; X86-SSE42-NEXT: movdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
; X86-SSE42-NEXT: pxor %xmm1, %xmm0
@@ -765,7 +765,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v16i16:
-; X86-AVX1: ## BB#0:
+; X86-AVX1: ## %bb.0:
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; X86-AVX1-NEXT: vpminsw %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
@@ -778,7 +778,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_reduce_v16i16:
-; X86-AVX2: ## BB#0:
+; X86-AVX2: ## %bb.0:
; X86-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X86-AVX2-NEXT: vpminsw %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
@@ -791,7 +791,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v16i16:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: pminsw %xmm1, %xmm0
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-SSE2-NEXT: pminsw %xmm0, %xmm1
@@ -805,7 +805,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v16i16:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: pminsw %xmm1, %xmm0
; X64-SSE42-NEXT: movdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
; X64-SSE42-NEXT: pxor %xmm1, %xmm0
@@ -816,7 +816,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v16i16:
-; X64-AVX1: ## BB#0:
+; X64-AVX1: ## %bb.0:
; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; X64-AVX1-NEXT: vpminsw %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
@@ -829,7 +829,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_reduce_v16i16:
-; X64-AVX2: ## BB#0:
+; X64-AVX2: ## %bb.0:
; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX2-NEXT: vpminsw %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
@@ -842,7 +842,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: test_reduce_v16i16:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX512-NEXT: vpminsw %xmm1, %xmm0, %xmm0
; X64-AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
@@ -871,7 +871,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X86-SSE2-LABEL: test_reduce_v32i8:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: movdqa %xmm1, %xmm2
; X86-SSE2-NEXT: pcmpgtb %xmm0, %xmm2
; X86-SSE2-NEXT: pand %xmm2, %xmm0
@@ -908,7 +908,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v32i8:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: pminsb %xmm1, %xmm0
; X86-SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-SSE42-NEXT: pminsb %xmm0, %xmm1
@@ -925,7 +925,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v32i8:
-; X86-AVX1: ## BB#0:
+; X86-AVX1: ## %bb.0:
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; X86-AVX1-NEXT: vpminsb %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -942,7 +942,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_reduce_v32i8:
-; X86-AVX2: ## BB#0:
+; X86-AVX2: ## %bb.0:
; X86-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X86-AVX2-NEXT: vpminsb %ymm1, %ymm0, %ymm0
; X86-AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -959,7 +959,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v32i8:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: movdqa %xmm1, %xmm2
; X64-SSE2-NEXT: pcmpgtb %xmm0, %xmm2
; X64-SSE2-NEXT: pand %xmm2, %xmm0
@@ -996,7 +996,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v32i8:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: pminsb %xmm1, %xmm0
; X64-SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-SSE42-NEXT: pminsb %xmm0, %xmm1
@@ -1013,7 +1013,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v32i8:
-; X64-AVX1: ## BB#0:
+; X64-AVX1: ## %bb.0:
; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; X64-AVX1-NEXT: vpminsb %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -1030,7 +1030,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_reduce_v32i8:
-; X64-AVX2: ## BB#0:
+; X64-AVX2: ## %bb.0:
; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX2-NEXT: vpminsb %ymm1, %ymm0, %ymm0
; X64-AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -1047,7 +1047,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: test_reduce_v32i8:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX512-NEXT: vpminsb %ymm1, %ymm0, %ymm0
; X64-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -1087,7 +1087,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
define i64 @test_reduce_v8i64(<8 x i64> %a0) {
; X86-SSE2-LABEL: test_reduce_v8i64:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: subl $28, %esp
; X86-SSE2-NEXT: .cfi_def_cfa_offset 32
; X86-SSE2-NEXT: movdqa %xmm2, %xmm6
@@ -1160,7 +1160,7 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v8i64:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: movdqa %xmm0, %xmm4
; X86-SSE42-NEXT: movdqa %xmm3, %xmm5
; X86-SSE42-NEXT: pcmpgtq %xmm1, %xmm5
@@ -1181,7 +1181,7 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v8i64:
-; X86-AVX1: ## BB#0:
+; X86-AVX1: ## %bb.0:
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; X86-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; X86-AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -1205,7 +1205,7 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_reduce_v8i64:
-; X86-AVX2: ## BB#0:
+; X86-AVX2: ## %bb.0:
; X86-AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm2
; X86-AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
; X86-AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
@@ -1220,7 +1220,7 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v8i64:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: movdqa {{.*#+}} xmm9 = [2147483648,0,2147483648,0]
; X64-SSE2-NEXT: movdqa %xmm0, %xmm5
; X64-SSE2-NEXT: pxor %xmm9, %xmm5
@@ -1286,7 +1286,7 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v8i64:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: movdqa %xmm0, %xmm4
; X64-SSE42-NEXT: movdqa %xmm3, %xmm5
; X64-SSE42-NEXT: pcmpgtq %xmm1, %xmm5
@@ -1306,7 +1306,7 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v8i64:
-; X64-AVX1: ## BB#0:
+; X64-AVX1: ## %bb.0:
; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; X64-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; X64-AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -1329,7 +1329,7 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_reduce_v8i64:
-; X64-AVX2: ## BB#0:
+; X64-AVX2: ## %bb.0:
; X64-AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm2
; X64-AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
; X64-AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
@@ -1343,7 +1343,7 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: test_reduce_v8i64:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; X64-AVX512-NEXT: vpminsq %zmm1, %zmm0, %zmm0
; X64-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
@@ -1368,7 +1368,7 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
define i32 @test_reduce_v16i32(<16 x i32> %a0) {
; X86-SSE2-LABEL: test_reduce_v16i32:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: movdqa %xmm3, %xmm4
; X86-SSE2-NEXT: pcmpgtd %xmm1, %xmm4
; X86-SSE2-NEXT: movdqa %xmm2, %xmm5
@@ -1400,7 +1400,7 @@ define i32 @test_reduce_v16i32(<16 x i32> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v16i32:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: pminsd %xmm3, %xmm1
; X86-SSE42-NEXT: pminsd %xmm2, %xmm0
; X86-SSE42-NEXT: pminsd %xmm1, %xmm0
@@ -1412,7 +1412,7 @@ define i32 @test_reduce_v16i32(<16 x i32> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v16i32:
-; X86-AVX1: ## BB#0:
+; X86-AVX1: ## %bb.0:
; X86-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; X86-AVX1-NEXT: vpminsd %xmm2, %xmm3, %xmm2
@@ -1427,7 +1427,7 @@ define i32 @test_reduce_v16i32(<16 x i32> %a0) {
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_reduce_v16i32:
-; X86-AVX2: ## BB#0:
+; X86-AVX2: ## %bb.0:
; X86-AVX2-NEXT: vpminsd %ymm1, %ymm0, %ymm0
; X86-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X86-AVX2-NEXT: vpminsd %ymm1, %ymm0, %ymm0
@@ -1440,7 +1440,7 @@ define i32 @test_reduce_v16i32(<16 x i32> %a0) {
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v16i32:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: movdqa %xmm3, %xmm4
; X64-SSE2-NEXT: pcmpgtd %xmm1, %xmm4
; X64-SSE2-NEXT: movdqa %xmm2, %xmm5
@@ -1472,7 +1472,7 @@ define i32 @test_reduce_v16i32(<16 x i32> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v16i32:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: pminsd %xmm3, %xmm1
; X64-SSE42-NEXT: pminsd %xmm2, %xmm0
; X64-SSE42-NEXT: pminsd %xmm1, %xmm0
@@ -1484,7 +1484,7 @@ define i32 @test_reduce_v16i32(<16 x i32> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v16i32:
-; X64-AVX1: ## BB#0:
+; X64-AVX1: ## %bb.0:
; X64-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; X64-AVX1-NEXT: vpminsd %xmm2, %xmm3, %xmm2
@@ -1499,7 +1499,7 @@ define i32 @test_reduce_v16i32(<16 x i32> %a0) {
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_reduce_v16i32:
-; X64-AVX2: ## BB#0:
+; X64-AVX2: ## %bb.0:
; X64-AVX2-NEXT: vpminsd %ymm1, %ymm0, %ymm0
; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX2-NEXT: vpminsd %ymm1, %ymm0, %ymm0
@@ -1512,7 +1512,7 @@ define i32 @test_reduce_v16i32(<16 x i32> %a0) {
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: test_reduce_v16i32:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; X64-AVX512-NEXT: vpminsd %zmm1, %zmm0, %zmm0
; X64-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
@@ -1542,7 +1542,7 @@ define i32 @test_reduce_v16i32(<16 x i32> %a0) {
define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X86-SSE2-LABEL: test_reduce_v32i16:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: pminsw %xmm3, %xmm1
; X86-SSE2-NEXT: pminsw %xmm2, %xmm0
; X86-SSE2-NEXT: pminsw %xmm1, %xmm0
@@ -1558,7 +1558,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v32i16:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: pminsw %xmm3, %xmm1
; X86-SSE42-NEXT: pminsw %xmm2, %xmm0
; X86-SSE42-NEXT: pminsw %xmm1, %xmm0
@@ -1571,7 +1571,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v32i16:
-; X86-AVX1: ## BB#0:
+; X86-AVX1: ## %bb.0:
; X86-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; X86-AVX1-NEXT: vpminsw %xmm2, %xmm3, %xmm2
@@ -1587,7 +1587,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_reduce_v32i16:
-; X86-AVX2: ## BB#0:
+; X86-AVX2: ## %bb.0:
; X86-AVX2-NEXT: vpminsw %ymm1, %ymm0, %ymm0
; X86-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X86-AVX2-NEXT: vpminsw %xmm1, %xmm0, %xmm0
@@ -1601,7 +1601,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v32i16:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: pminsw %xmm3, %xmm1
; X64-SSE2-NEXT: pminsw %xmm2, %xmm0
; X64-SSE2-NEXT: pminsw %xmm1, %xmm0
@@ -1617,7 +1617,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v32i16:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: pminsw %xmm3, %xmm1
; X64-SSE42-NEXT: pminsw %xmm2, %xmm0
; X64-SSE42-NEXT: pminsw %xmm1, %xmm0
@@ -1630,7 +1630,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v32i16:
-; X64-AVX1: ## BB#0:
+; X64-AVX1: ## %bb.0:
; X64-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; X64-AVX1-NEXT: vpminsw %xmm2, %xmm3, %xmm2
@@ -1646,7 +1646,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_reduce_v32i16:
-; X64-AVX2: ## BB#0:
+; X64-AVX2: ## %bb.0:
; X64-AVX2-NEXT: vpminsw %ymm1, %ymm0, %ymm0
; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX2-NEXT: vpminsw %xmm1, %xmm0, %xmm0
@@ -1660,7 +1660,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: test_reduce_v32i16:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; X64-AVX512-NEXT: vpminsw %ymm1, %ymm0, %ymm0
; X64-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
@@ -1694,7 +1694,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X86-SSE2-LABEL: test_reduce_v64i8:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: movdqa %xmm3, %xmm4
; X86-SSE2-NEXT: pcmpgtb %xmm1, %xmm4
; X86-SSE2-NEXT: movdqa %xmm2, %xmm5
@@ -1741,7 +1741,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v64i8:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: pminsb %xmm3, %xmm1
; X86-SSE42-NEXT: pminsb %xmm2, %xmm0
; X86-SSE42-NEXT: pminsb %xmm1, %xmm0
@@ -1760,7 +1760,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v64i8:
-; X86-AVX1: ## BB#0:
+; X86-AVX1: ## %bb.0:
; X86-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; X86-AVX1-NEXT: vpminsb %xmm2, %xmm3, %xmm2
@@ -1780,7 +1780,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_reduce_v64i8:
-; X86-AVX2: ## BB#0:
+; X86-AVX2: ## %bb.0:
; X86-AVX2-NEXT: vpminsb %ymm1, %ymm0, %ymm0
; X86-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X86-AVX2-NEXT: vpminsb %ymm1, %ymm0, %ymm0
@@ -1798,7 +1798,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v64i8:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: movdqa %xmm3, %xmm4
; X64-SSE2-NEXT: pcmpgtb %xmm1, %xmm4
; X64-SSE2-NEXT: movdqa %xmm2, %xmm5
@@ -1845,7 +1845,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v64i8:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: pminsb %xmm3, %xmm1
; X64-SSE42-NEXT: pminsb %xmm2, %xmm0
; X64-SSE42-NEXT: pminsb %xmm1, %xmm0
@@ -1864,7 +1864,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v64i8:
-; X64-AVX1: ## BB#0:
+; X64-AVX1: ## %bb.0:
; X64-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; X64-AVX1-NEXT: vpminsb %xmm2, %xmm3, %xmm2
@@ -1884,7 +1884,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_reduce_v64i8:
-; X64-AVX2: ## BB#0:
+; X64-AVX2: ## %bb.0:
; X64-AVX2-NEXT: vpminsb %ymm1, %ymm0, %ymm0
; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX2-NEXT: vpminsb %ymm1, %ymm0, %ymm0
@@ -1902,7 +1902,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: test_reduce_v64i8:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; X64-AVX512-NEXT: vpminsb %zmm1, %zmm0, %zmm0
; X64-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
diff --git a/test/CodeGen/X86/horizontal-reduce-umax.ll b/test/CodeGen/X86/horizontal-reduce-umax.ll
index 35d94e88f80..84020e1bd66 100644
--- a/test/CodeGen/X86/horizontal-reduce-umax.ll
+++ b/test/CodeGen/X86/horizontal-reduce-umax.ll
@@ -15,7 +15,7 @@
define i64 @test_reduce_v2i64(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_reduce_v2i64:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; X86-SSE2-NEXT: movdqa %xmm0, %xmm3
@@ -38,7 +38,7 @@ define i64 @test_reduce_v2i64(<2 x i64> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v2i64:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: movdqa %xmm0, %xmm1
; X86-SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
; X86-SSE42-NEXT: movdqa {{.*#+}} xmm3 = [0,2147483648,0,2147483648]
@@ -51,7 +51,7 @@ define i64 @test_reduce_v2i64(<2 x i64> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX-LABEL: test_reduce_v2i64:
-; X86-AVX: ## BB#0:
+; X86-AVX: ## %bb.0:
; X86-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [0,2147483648,0,2147483648]
; X86-AVX-NEXT: vpxor %xmm2, %xmm0, %xmm3
@@ -63,7 +63,7 @@ define i64 @test_reduce_v2i64(<2 x i64> %a0) {
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v2i64:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; X64-SSE2-NEXT: movdqa %xmm0, %xmm3
@@ -84,7 +84,7 @@ define i64 @test_reduce_v2i64(<2 x i64> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v2i64:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: movdqa %xmm0, %xmm1
; X64-SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
; X64-SSE42-NEXT: movdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
@@ -96,7 +96,7 @@ define i64 @test_reduce_v2i64(<2 x i64> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v2i64:
-; X64-AVX1: ## BB#0:
+; X64-AVX1: ## %bb.0:
; X64-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; X64-AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm3
@@ -107,7 +107,7 @@ define i64 @test_reduce_v2i64(<2 x i64> %a0) {
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_reduce_v2i64:
-; X64-AVX2: ## BB#0:
+; X64-AVX2: ## %bb.0:
; X64-AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; X64-AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm3
@@ -118,7 +118,7 @@ define i64 @test_reduce_v2i64(<2 x i64> %a0) {
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: test_reduce_v2i64:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-AVX512-NEXT: vpmaxuq %xmm1, %xmm0, %xmm0
; X64-AVX512-NEXT: vmovq %xmm0, %rax
@@ -132,7 +132,7 @@ define i64 @test_reduce_v2i64(<2 x i64> %a0) {
define i32 @test_reduce_v4i32(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_reduce_v4i32:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; X86-SSE2-NEXT: movdqa %xmm0, %xmm3
@@ -155,7 +155,7 @@ define i32 @test_reduce_v4i32(<4 x i32> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v4i32:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-SSE42-NEXT: pmaxud %xmm0, %xmm1
; X86-SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -164,7 +164,7 @@ define i32 @test_reduce_v4i32(<4 x i32> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX-LABEL: test_reduce_v4i32:
-; X86-AVX: ## BB#0:
+; X86-AVX: ## %bb.0:
; X86-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-AVX-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
@@ -173,7 +173,7 @@ define i32 @test_reduce_v4i32(<4 x i32> %a0) {
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v4i32:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; X64-SSE2-NEXT: movdqa %xmm0, %xmm3
@@ -196,7 +196,7 @@ define i32 @test_reduce_v4i32(<4 x i32> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v4i32:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-SSE42-NEXT: pmaxud %xmm0, %xmm1
; X64-SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -205,7 +205,7 @@ define i32 @test_reduce_v4i32(<4 x i32> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX-LABEL: test_reduce_v4i32:
-; X64-AVX: ## BB#0:
+; X64-AVX: ## %bb.0:
; X64-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-AVX-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
@@ -224,7 +224,7 @@ define i32 @test_reduce_v4i32(<4 x i32> %a0) {
define i16 @test_reduce_v8i16(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_reduce_v8i16:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
; X86-SSE2-NEXT: movdqa %xmm0, %xmm3
@@ -258,7 +258,7 @@ define i16 @test_reduce_v8i16(<8 x i16> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v8i16:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: pcmpeqd %xmm1, %xmm1
; X86-SSE42-NEXT: pxor %xmm1, %xmm0
; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0
@@ -268,7 +268,7 @@ define i16 @test_reduce_v8i16(<8 x i16> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX-LABEL: test_reduce_v8i16:
-; X86-AVX: ## BB#0:
+; X86-AVX: ## %bb.0:
; X86-AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; X86-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: vphminposuw %xmm0, %xmm0
@@ -278,7 +278,7 @@ define i16 @test_reduce_v8i16(<8 x i16> %a0) {
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v8i16:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
; X64-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
; X64-SSE2-NEXT: movdqa %xmm0, %xmm3
@@ -312,7 +312,7 @@ define i16 @test_reduce_v8i16(<8 x i16> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v8i16:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: pcmpeqd %xmm1, %xmm1
; X64-SSE42-NEXT: pxor %xmm1, %xmm0
; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0
@@ -322,7 +322,7 @@ define i16 @test_reduce_v8i16(<8 x i16> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX-LABEL: test_reduce_v8i16:
-; X64-AVX: ## BB#0:
+; X64-AVX: ## %bb.0:
; X64-AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; X64-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT: vphminposuw %xmm0, %xmm0
@@ -345,7 +345,7 @@ define i16 @test_reduce_v8i16(<8 x i16> %a0) {
define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_reduce_v16i8:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-SSE2-NEXT: pmaxub %xmm0, %xmm1
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -361,7 +361,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v16i8:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-SSE42-NEXT: pmaxub %xmm0, %xmm1
; X86-SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -377,7 +377,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX-LABEL: test_reduce_v16i8:
-; X86-AVX: ## BB#0:
+; X86-AVX: ## %bb.0:
; X86-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-AVX-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
@@ -391,7 +391,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v16i8:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-SSE2-NEXT: pmaxub %xmm0, %xmm1
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -407,7 +407,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v16i8:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-SSE42-NEXT: pmaxub %xmm0, %xmm1
; X64-SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -423,7 +423,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX-LABEL: test_reduce_v16i8:
-; X64-AVX: ## BB#0:
+; X64-AVX: ## %bb.0:
; X64-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-AVX-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
@@ -457,7 +457,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
define i64 @test_reduce_v4i64(<4 x i64> %a0) {
; X86-SSE2-LABEL: test_reduce_v4i64:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; X86-SSE2-NEXT: movdqa %xmm1, %xmm3
; X86-SSE2-NEXT: pxor %xmm2, %xmm3
@@ -495,7 +495,7 @@ define i64 @test_reduce_v4i64(<4 x i64> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v4i64:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: movdqa %xmm0, %xmm2
; X86-SSE42-NEXT: movdqa {{.*#+}} xmm3 = [0,2147483648,0,2147483648]
; X86-SSE42-NEXT: movdqa %xmm1, %xmm4
@@ -514,7 +514,7 @@ define i64 @test_reduce_v4i64(<4 x i64> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v4i64:
-; X86-AVX1: ## BB#0:
+; X86-AVX1: ## %bb.0:
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; X86-AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,2147483648,0,2147483648]
; X86-AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm3
@@ -538,7 +538,7 @@ define i64 @test_reduce_v4i64(<4 x i64> %a0) {
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_reduce_v4i64:
-; X86-AVX2: ## BB#0:
+; X86-AVX2: ## %bb.0:
; X86-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X86-AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2147483648,0,2147483648,0,2147483648,0,2147483648]
; X86-AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm3
@@ -556,7 +556,7 @@ define i64 @test_reduce_v4i64(<4 x i64> %a0) {
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v4i64:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; X64-SSE2-NEXT: movdqa %xmm1, %xmm3
; X64-SSE2-NEXT: pxor %xmm2, %xmm3
@@ -592,7 +592,7 @@ define i64 @test_reduce_v4i64(<4 x i64> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v4i64:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: movdqa %xmm0, %xmm2
; X64-SSE42-NEXT: movdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
; X64-SSE42-NEXT: movdqa %xmm1, %xmm4
@@ -610,7 +610,7 @@ define i64 @test_reduce_v4i64(<4 x i64> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v4i64:
-; X64-AVX1: ## BB#0:
+; X64-AVX1: ## %bb.0:
; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; X64-AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; X64-AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm3
@@ -633,7 +633,7 @@ define i64 @test_reduce_v4i64(<4 x i64> %a0) {
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_reduce_v4i64:
-; X64-AVX2: ## BB#0:
+; X64-AVX2: ## %bb.0:
; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
; X64-AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm3
@@ -650,7 +650,7 @@ define i64 @test_reduce_v4i64(<4 x i64> %a0) {
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: test_reduce_v4i64:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX512-NEXT: vpmaxuq %ymm1, %ymm0, %ymm0
; X64-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -670,7 +670,7 @@ define i64 @test_reduce_v4i64(<4 x i64> %a0) {
define i32 @test_reduce_v8i32(<8 x i32> %a0) {
; X86-SSE2-LABEL: test_reduce_v8i32:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; X86-SSE2-NEXT: movdqa %xmm1, %xmm3
; X86-SSE2-NEXT: pxor %xmm2, %xmm3
@@ -701,7 +701,7 @@ define i32 @test_reduce_v8i32(<8 x i32> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v8i32:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: pmaxud %xmm1, %xmm0
; X86-SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-SSE42-NEXT: pmaxud %xmm0, %xmm1
@@ -711,7 +711,7 @@ define i32 @test_reduce_v8i32(<8 x i32> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v8i32:
-; X86-AVX1: ## BB#0:
+; X86-AVX1: ## %bb.0:
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; X86-AVX1-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -723,7 +723,7 @@ define i32 @test_reduce_v8i32(<8 x i32> %a0) {
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_reduce_v8i32:
-; X86-AVX2: ## BB#0:
+; X86-AVX2: ## %bb.0:
; X86-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X86-AVX2-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
; X86-AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -735,7 +735,7 @@ define i32 @test_reduce_v8i32(<8 x i32> %a0) {
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v8i32:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; X64-SSE2-NEXT: movdqa %xmm1, %xmm3
; X64-SSE2-NEXT: pxor %xmm2, %xmm3
@@ -766,7 +766,7 @@ define i32 @test_reduce_v8i32(<8 x i32> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v8i32:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: pmaxud %xmm1, %xmm0
; X64-SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-SSE42-NEXT: pmaxud %xmm0, %xmm1
@@ -776,7 +776,7 @@ define i32 @test_reduce_v8i32(<8 x i32> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v8i32:
-; X64-AVX1: ## BB#0:
+; X64-AVX1: ## %bb.0:
; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; X64-AVX1-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -788,7 +788,7 @@ define i32 @test_reduce_v8i32(<8 x i32> %a0) {
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_reduce_v8i32:
-; X64-AVX2: ## BB#0:
+; X64-AVX2: ## %bb.0:
; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX2-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
; X64-AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -800,7 +800,7 @@ define i32 @test_reduce_v8i32(<8 x i32> %a0) {
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: test_reduce_v8i32:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX512-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
; X64-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -825,7 +825,7 @@ define i32 @test_reduce_v8i32(<8 x i32> %a0) {
define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X86-SSE2-LABEL: test_reduce_v16i16:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
; X86-SSE2-NEXT: movdqa %xmm1, %xmm3
; X86-SSE2-NEXT: pxor %xmm2, %xmm3
@@ -867,7 +867,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v16i16:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: pmaxuw %xmm1, %xmm0
; X86-SSE42-NEXT: pcmpeqd %xmm1, %xmm1
; X86-SSE42-NEXT: pxor %xmm1, %xmm0
@@ -878,7 +878,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v16i16:
-; X86-AVX1: ## BB#0:
+; X86-AVX1: ## %bb.0:
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; X86-AVX1-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
@@ -891,7 +891,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_reduce_v16i16:
-; X86-AVX2: ## BB#0:
+; X86-AVX2: ## %bb.0:
; X86-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X86-AVX2-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
@@ -904,7 +904,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v16i16:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
; X64-SSE2-NEXT: movdqa %xmm1, %xmm3
; X64-SSE2-NEXT: pxor %xmm2, %xmm3
@@ -946,7 +946,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v16i16:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: pmaxuw %xmm1, %xmm0
; X64-SSE42-NEXT: pcmpeqd %xmm1, %xmm1
; X64-SSE42-NEXT: pxor %xmm1, %xmm0
@@ -957,7 +957,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v16i16:
-; X64-AVX1: ## BB#0:
+; X64-AVX1: ## %bb.0:
; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; X64-AVX1-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
@@ -970,7 +970,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_reduce_v16i16:
-; X64-AVX2: ## BB#0:
+; X64-AVX2: ## %bb.0:
; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX2-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
@@ -983,7 +983,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: test_reduce_v16i16:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX512-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0
; X64-AVX512-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
@@ -1012,7 +1012,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X86-SSE2-LABEL: test_reduce_v32i8:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: pmaxub %xmm1, %xmm0
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-SSE2-NEXT: pmaxub %xmm0, %xmm1
@@ -1029,7 +1029,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v32i8:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: pmaxub %xmm1, %xmm0
; X86-SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-SSE42-NEXT: pmaxub %xmm0, %xmm1
@@ -1046,7 +1046,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v32i8:
-; X86-AVX1: ## BB#0:
+; X86-AVX1: ## %bb.0:
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; X86-AVX1-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -1063,7 +1063,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_reduce_v32i8:
-; X86-AVX2: ## BB#0:
+; X86-AVX2: ## %bb.0:
; X86-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X86-AVX2-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
; X86-AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -1080,7 +1080,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v32i8:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: pmaxub %xmm1, %xmm0
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-SSE2-NEXT: pmaxub %xmm0, %xmm1
@@ -1097,7 +1097,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v32i8:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: pmaxub %xmm1, %xmm0
; X64-SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-SSE42-NEXT: pmaxub %xmm0, %xmm1
@@ -1114,7 +1114,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v32i8:
-; X64-AVX1: ## BB#0:
+; X64-AVX1: ## %bb.0:
; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; X64-AVX1-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -1131,7 +1131,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_reduce_v32i8:
-; X64-AVX2: ## BB#0:
+; X64-AVX2: ## %bb.0:
; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX2-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
; X64-AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -1148,7 +1148,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: test_reduce_v32i8:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX512-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
; X64-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -1188,7 +1188,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
define i64 @test_reduce_v8i64(<8 x i64> %a0) {
; X86-SSE2-LABEL: test_reduce_v8i64:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: subl $28, %esp
; X86-SSE2-NEXT: .cfi_def_cfa_offset 32
; X86-SSE2-NEXT: movdqa %xmm3, %xmm5
@@ -1263,7 +1263,7 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v8i64:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: movdqa %xmm0, %xmm4
; X86-SSE42-NEXT: movdqa {{.*#+}} xmm6 = [0,2147483648,0,2147483648]
; X86-SSE42-NEXT: movdqa %xmm3, %xmm0
@@ -1296,7 +1296,7 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v8i64:
-; X86-AVX1: ## BB#0:
+; X86-AVX1: ## %bb.0:
; X86-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; X86-AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,2147483648,0,2147483648]
; X86-AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
@@ -1330,7 +1330,7 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_reduce_v8i64:
-; X86-AVX2: ## BB#0:
+; X86-AVX2: ## %bb.0:
; X86-AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2147483648,0,2147483648,0,2147483648,0,2147483648]
; X86-AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm3
; X86-AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm4
@@ -1352,7 +1352,7 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v8i64:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
; X64-SSE2-NEXT: movdqa %xmm3, %xmm5
; X64-SSE2-NEXT: pxor %xmm4, %xmm5
@@ -1418,7 +1418,7 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v8i64:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: movdqa %xmm0, %xmm4
; X64-SSE42-NEXT: movdqa {{.*#+}} xmm6 = [9223372036854775808,9223372036854775808]
; X64-SSE42-NEXT: movdqa %xmm3, %xmm0
@@ -1450,7 +1450,7 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v8i64:
-; X64-AVX1: ## BB#0:
+; X64-AVX1: ## %bb.0:
; X64-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; X64-AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
; X64-AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
@@ -1483,7 +1483,7 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_reduce_v8i64:
-; X64-AVX2: ## BB#0:
+; X64-AVX2: ## %bb.0:
; X64-AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
; X64-AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm3
; X64-AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm4
@@ -1504,7 +1504,7 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: test_reduce_v8i64:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; X64-AVX512-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
; X64-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
@@ -1529,7 +1529,7 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
define i32 @test_reduce_v16i32(<16 x i32> %a0) {
; X86-SSE2-LABEL: test_reduce_v16i32:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
; X86-SSE2-NEXT: movdqa %xmm3, %xmm5
; X86-SSE2-NEXT: pxor %xmm4, %xmm5
@@ -1576,7 +1576,7 @@ define i32 @test_reduce_v16i32(<16 x i32> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v16i32:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: pmaxud %xmm3, %xmm1
; X86-SSE42-NEXT: pmaxud %xmm2, %xmm0
; X86-SSE42-NEXT: pmaxud %xmm1, %xmm0
@@ -1588,7 +1588,7 @@ define i32 @test_reduce_v16i32(<16 x i32> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v16i32:
-; X86-AVX1: ## BB#0:
+; X86-AVX1: ## %bb.0:
; X86-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; X86-AVX1-NEXT: vpmaxud %xmm2, %xmm3, %xmm2
@@ -1603,7 +1603,7 @@ define i32 @test_reduce_v16i32(<16 x i32> %a0) {
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_reduce_v16i32:
-; X86-AVX2: ## BB#0:
+; X86-AVX2: ## %bb.0:
; X86-AVX2-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
; X86-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X86-AVX2-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
@@ -1616,7 +1616,7 @@ define i32 @test_reduce_v16i32(<16 x i32> %a0) {
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v16i32:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
; X64-SSE2-NEXT: movdqa %xmm3, %xmm5
; X64-SSE2-NEXT: pxor %xmm4, %xmm5
@@ -1663,7 +1663,7 @@ define i32 @test_reduce_v16i32(<16 x i32> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v16i32:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: pmaxud %xmm3, %xmm1
; X64-SSE42-NEXT: pmaxud %xmm2, %xmm0
; X64-SSE42-NEXT: pmaxud %xmm1, %xmm0
@@ -1675,7 +1675,7 @@ define i32 @test_reduce_v16i32(<16 x i32> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v16i32:
-; X64-AVX1: ## BB#0:
+; X64-AVX1: ## %bb.0:
; X64-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; X64-AVX1-NEXT: vpmaxud %xmm2, %xmm3, %xmm2
@@ -1690,7 +1690,7 @@ define i32 @test_reduce_v16i32(<16 x i32> %a0) {
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_reduce_v16i32:
-; X64-AVX2: ## BB#0:
+; X64-AVX2: ## %bb.0:
; X64-AVX2-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX2-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
@@ -1703,7 +1703,7 @@ define i32 @test_reduce_v16i32(<16 x i32> %a0) {
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: test_reduce_v16i32:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; X64-AVX512-NEXT: vpmaxud %zmm1, %zmm0, %zmm0
; X64-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
@@ -1733,7 +1733,7 @@ define i32 @test_reduce_v16i32(<16 x i32> %a0) {
define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X86-SSE2-LABEL: test_reduce_v32i16:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm4 = [32768,32768,32768,32768,32768,32768,32768,32768]
; X86-SSE2-NEXT: movdqa %xmm3, %xmm5
; X86-SSE2-NEXT: pxor %xmm4, %xmm5
@@ -1791,7 +1791,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v32i16:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: pmaxuw %xmm3, %xmm1
; X86-SSE42-NEXT: pmaxuw %xmm2, %xmm0
; X86-SSE42-NEXT: pmaxuw %xmm1, %xmm0
@@ -1804,7 +1804,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v32i16:
-; X86-AVX1: ## BB#0:
+; X86-AVX1: ## %bb.0:
; X86-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; X86-AVX1-NEXT: vpmaxuw %xmm2, %xmm3, %xmm2
@@ -1820,7 +1820,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_reduce_v32i16:
-; X86-AVX2: ## BB#0:
+; X86-AVX2: ## %bb.0:
; X86-AVX2-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0
; X86-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X86-AVX2-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0
@@ -1834,7 +1834,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v32i16:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: movdqa {{.*#+}} xmm4 = [32768,32768,32768,32768,32768,32768,32768,32768]
; X64-SSE2-NEXT: movdqa %xmm3, %xmm5
; X64-SSE2-NEXT: pxor %xmm4, %xmm5
@@ -1892,7 +1892,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v32i16:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: pmaxuw %xmm3, %xmm1
; X64-SSE42-NEXT: pmaxuw %xmm2, %xmm0
; X64-SSE42-NEXT: pmaxuw %xmm1, %xmm0
@@ -1905,7 +1905,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v32i16:
-; X64-AVX1: ## BB#0:
+; X64-AVX1: ## %bb.0:
; X64-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; X64-AVX1-NEXT: vpmaxuw %xmm2, %xmm3, %xmm2
@@ -1921,7 +1921,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_reduce_v32i16:
-; X64-AVX2: ## BB#0:
+; X64-AVX2: ## %bb.0:
; X64-AVX2-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0
; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX2-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0
@@ -1935,7 +1935,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: test_reduce_v32i16:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; X64-AVX512-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0
; X64-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
@@ -1969,7 +1969,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X86-SSE2-LABEL: test_reduce_v64i8:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: pmaxub %xmm3, %xmm1
; X86-SSE2-NEXT: pmaxub %xmm2, %xmm0
; X86-SSE2-NEXT: pmaxub %xmm1, %xmm0
@@ -1988,7 +1988,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v64i8:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: pmaxub %xmm3, %xmm1
; X86-SSE42-NEXT: pmaxub %xmm2, %xmm0
; X86-SSE42-NEXT: pmaxub %xmm1, %xmm0
@@ -2007,7 +2007,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v64i8:
-; X86-AVX1: ## BB#0:
+; X86-AVX1: ## %bb.0:
; X86-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; X86-AVX1-NEXT: vpmaxub %xmm2, %xmm3, %xmm2
@@ -2027,7 +2027,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_reduce_v64i8:
-; X86-AVX2: ## BB#0:
+; X86-AVX2: ## %bb.0:
; X86-AVX2-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
; X86-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X86-AVX2-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
@@ -2045,7 +2045,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v64i8:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: pmaxub %xmm3, %xmm1
; X64-SSE2-NEXT: pmaxub %xmm2, %xmm0
; X64-SSE2-NEXT: pmaxub %xmm1, %xmm0
@@ -2064,7 +2064,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v64i8:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: pmaxub %xmm3, %xmm1
; X64-SSE42-NEXT: pmaxub %xmm2, %xmm0
; X64-SSE42-NEXT: pmaxub %xmm1, %xmm0
@@ -2083,7 +2083,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v64i8:
-; X64-AVX1: ## BB#0:
+; X64-AVX1: ## %bb.0:
; X64-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; X64-AVX1-NEXT: vpmaxub %xmm2, %xmm3, %xmm2
@@ -2103,7 +2103,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_reduce_v64i8:
-; X64-AVX2: ## BB#0:
+; X64-AVX2: ## %bb.0:
; X64-AVX2-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX2-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
@@ -2121,7 +2121,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: test_reduce_v64i8:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; X64-AVX512-NEXT: vpmaxub %zmm1, %zmm0, %zmm0
; X64-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
diff --git a/test/CodeGen/X86/horizontal-reduce-umin.ll b/test/CodeGen/X86/horizontal-reduce-umin.ll
index 9c2e3788b1c..749fe7ee4a4 100644
--- a/test/CodeGen/X86/horizontal-reduce-umin.ll
+++ b/test/CodeGen/X86/horizontal-reduce-umin.ll
@@ -15,7 +15,7 @@
define i64 @test_reduce_v2i64(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_reduce_v2i64:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; X86-SSE2-NEXT: movdqa %xmm0, %xmm3
@@ -38,7 +38,7 @@ define i64 @test_reduce_v2i64(<2 x i64> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v2i64:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: movdqa %xmm0, %xmm1
; X86-SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
; X86-SSE42-NEXT: movdqa {{.*#+}} xmm0 = [0,2147483648,0,2147483648]
@@ -52,7 +52,7 @@ define i64 @test_reduce_v2i64(<2 x i64> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX-LABEL: test_reduce_v2i64:
-; X86-AVX: ## BB#0:
+; X86-AVX: ## %bb.0:
; X86-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [0,2147483648,0,2147483648]
; X86-AVX-NEXT: vpxor %xmm2, %xmm0, %xmm3
@@ -64,7 +64,7 @@ define i64 @test_reduce_v2i64(<2 x i64> %a0) {
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v2i64:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; X64-SSE2-NEXT: movdqa %xmm0, %xmm3
@@ -85,7 +85,7 @@ define i64 @test_reduce_v2i64(<2 x i64> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v2i64:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: movdqa %xmm0, %xmm1
; X64-SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
; X64-SSE42-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
@@ -98,7 +98,7 @@ define i64 @test_reduce_v2i64(<2 x i64> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v2i64:
-; X64-AVX1: ## BB#0:
+; X64-AVX1: ## %bb.0:
; X64-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; X64-AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm3
@@ -109,7 +109,7 @@ define i64 @test_reduce_v2i64(<2 x i64> %a0) {
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_reduce_v2i64:
-; X64-AVX2: ## BB#0:
+; X64-AVX2: ## %bb.0:
; X64-AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; X64-AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm3
@@ -120,7 +120,7 @@ define i64 @test_reduce_v2i64(<2 x i64> %a0) {
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: test_reduce_v2i64:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-AVX512-NEXT: vpminuq %xmm1, %xmm0, %xmm0
; X64-AVX512-NEXT: vmovq %xmm0, %rax
@@ -134,7 +134,7 @@ define i64 @test_reduce_v2i64(<2 x i64> %a0) {
define i32 @test_reduce_v4i32(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_reduce_v4i32:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; X86-SSE2-NEXT: movdqa %xmm0, %xmm3
@@ -157,7 +157,7 @@ define i32 @test_reduce_v4i32(<4 x i32> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v4i32:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-SSE42-NEXT: pminud %xmm0, %xmm1
; X86-SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -166,7 +166,7 @@ define i32 @test_reduce_v4i32(<4 x i32> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX-LABEL: test_reduce_v4i32:
-; X86-AVX: ## BB#0:
+; X86-AVX: ## %bb.0:
; X86-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-AVX-NEXT: vpminud %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
@@ -175,7 +175,7 @@ define i32 @test_reduce_v4i32(<4 x i32> %a0) {
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v4i32:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; X64-SSE2-NEXT: movdqa %xmm0, %xmm3
@@ -198,7 +198,7 @@ define i32 @test_reduce_v4i32(<4 x i32> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v4i32:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-SSE42-NEXT: pminud %xmm0, %xmm1
; X64-SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -207,7 +207,7 @@ define i32 @test_reduce_v4i32(<4 x i32> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX-LABEL: test_reduce_v4i32:
-; X64-AVX: ## BB#0:
+; X64-AVX: ## %bb.0:
; X64-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-AVX-NEXT: vpminud %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
@@ -226,7 +226,7 @@ define i32 @test_reduce_v4i32(<4 x i32> %a0) {
define i16 @test_reduce_v8i16(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_reduce_v8i16:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
; X86-SSE2-NEXT: movdqa %xmm0, %xmm3
@@ -260,21 +260,21 @@ define i16 @test_reduce_v8i16(<8 x i16> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v8i16:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X86-SSE42-NEXT: movd %xmm0, %eax
; X86-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-SSE42-NEXT: retl
;
; X86-AVX-LABEL: test_reduce_v8i16:
-; X86-AVX: ## BB#0:
+; X86-AVX: ## %bb.0:
; X86-AVX-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX-NEXT: vmovd %xmm0, %eax
; X86-AVX-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v8i16:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
; X64-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
; X64-SSE2-NEXT: movdqa %xmm0, %xmm3
@@ -308,14 +308,14 @@ define i16 @test_reduce_v8i16(<8 x i16> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v8i16:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X64-SSE42-NEXT: movd %xmm0, %eax
; X64-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-SSE42-NEXT: retq
;
; X64-AVX-LABEL: test_reduce_v8i16:
-; X64-AVX: ## BB#0:
+; X64-AVX: ## %bb.0:
; X64-AVX-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX-NEXT: vmovd %xmm0, %eax
; X64-AVX-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
@@ -335,7 +335,7 @@ define i16 @test_reduce_v8i16(<8 x i16> %a0) {
define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_reduce_v16i8:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-SSE2-NEXT: pminub %xmm0, %xmm1
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -351,7 +351,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v16i8:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-SSE42-NEXT: pminub %xmm0, %xmm1
; X86-SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -367,7 +367,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX-LABEL: test_reduce_v16i8:
-; X86-AVX: ## BB#0:
+; X86-AVX: ## %bb.0:
; X86-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
@@ -381,7 +381,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v16i8:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-SSE2-NEXT: pminub %xmm0, %xmm1
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -397,7 +397,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v16i8:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-SSE42-NEXT: pminub %xmm0, %xmm1
; X64-SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -413,7 +413,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX-LABEL: test_reduce_v16i8:
-; X64-AVX: ## BB#0:
+; X64-AVX: ## %bb.0:
; X64-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
@@ -447,7 +447,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
define i64 @test_reduce_v4i64(<4 x i64> %a0) {
; X86-SSE2-LABEL: test_reduce_v4i64:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; X86-SSE2-NEXT: movdqa %xmm0, %xmm3
; X86-SSE2-NEXT: pxor %xmm2, %xmm3
@@ -485,7 +485,7 @@ define i64 @test_reduce_v4i64(<4 x i64> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v4i64:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: movdqa %xmm0, %xmm2
; X86-SSE42-NEXT: movdqa {{.*#+}} xmm3 = [0,2147483648,0,2147483648]
; X86-SSE42-NEXT: movdqa %xmm2, %xmm4
@@ -506,7 +506,7 @@ define i64 @test_reduce_v4i64(<4 x i64> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v4i64:
-; X86-AVX1: ## BB#0:
+; X86-AVX1: ## %bb.0:
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; X86-AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,2147483648,0,2147483648]
; X86-AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm3
@@ -530,7 +530,7 @@ define i64 @test_reduce_v4i64(<4 x i64> %a0) {
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_reduce_v4i64:
-; X86-AVX2: ## BB#0:
+; X86-AVX2: ## %bb.0:
; X86-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X86-AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2147483648,0,2147483648,0,2147483648,0,2147483648]
; X86-AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm3
@@ -548,7 +548,7 @@ define i64 @test_reduce_v4i64(<4 x i64> %a0) {
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v4i64:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; X64-SSE2-NEXT: movdqa %xmm0, %xmm3
; X64-SSE2-NEXT: pxor %xmm2, %xmm3
@@ -584,7 +584,7 @@ define i64 @test_reduce_v4i64(<4 x i64> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v4i64:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: movdqa %xmm0, %xmm2
; X64-SSE42-NEXT: movdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
; X64-SSE42-NEXT: movdqa %xmm2, %xmm4
@@ -604,7 +604,7 @@ define i64 @test_reduce_v4i64(<4 x i64> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v4i64:
-; X64-AVX1: ## BB#0:
+; X64-AVX1: ## %bb.0:
; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; X64-AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; X64-AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm3
@@ -627,7 +627,7 @@ define i64 @test_reduce_v4i64(<4 x i64> %a0) {
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_reduce_v4i64:
-; X64-AVX2: ## BB#0:
+; X64-AVX2: ## %bb.0:
; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
; X64-AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm3
@@ -644,7 +644,7 @@ define i64 @test_reduce_v4i64(<4 x i64> %a0) {
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: test_reduce_v4i64:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX512-NEXT: vpminuq %ymm1, %ymm0, %ymm0
; X64-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -664,7 +664,7 @@ define i64 @test_reduce_v4i64(<4 x i64> %a0) {
define i32 @test_reduce_v8i32(<8 x i32> %a0) {
; X86-SSE2-LABEL: test_reduce_v8i32:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; X86-SSE2-NEXT: movdqa %xmm0, %xmm3
; X86-SSE2-NEXT: pxor %xmm2, %xmm3
@@ -695,7 +695,7 @@ define i32 @test_reduce_v8i32(<8 x i32> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v8i32:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: pminud %xmm1, %xmm0
; X86-SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-SSE42-NEXT: pminud %xmm0, %xmm1
@@ -705,7 +705,7 @@ define i32 @test_reduce_v8i32(<8 x i32> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v8i32:
-; X86-AVX1: ## BB#0:
+; X86-AVX1: ## %bb.0:
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; X86-AVX1-NEXT: vpminud %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -717,7 +717,7 @@ define i32 @test_reduce_v8i32(<8 x i32> %a0) {
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_reduce_v8i32:
-; X86-AVX2: ## BB#0:
+; X86-AVX2: ## %bb.0:
; X86-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X86-AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm0
; X86-AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -729,7 +729,7 @@ define i32 @test_reduce_v8i32(<8 x i32> %a0) {
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v8i32:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; X64-SSE2-NEXT: movdqa %xmm0, %xmm3
; X64-SSE2-NEXT: pxor %xmm2, %xmm3
@@ -760,7 +760,7 @@ define i32 @test_reduce_v8i32(<8 x i32> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v8i32:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: pminud %xmm1, %xmm0
; X64-SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-SSE42-NEXT: pminud %xmm0, %xmm1
@@ -770,7 +770,7 @@ define i32 @test_reduce_v8i32(<8 x i32> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v8i32:
-; X64-AVX1: ## BB#0:
+; X64-AVX1: ## %bb.0:
; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; X64-AVX1-NEXT: vpminud %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -782,7 +782,7 @@ define i32 @test_reduce_v8i32(<8 x i32> %a0) {
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_reduce_v8i32:
-; X64-AVX2: ## BB#0:
+; X64-AVX2: ## %bb.0:
; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm0
; X64-AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -794,7 +794,7 @@ define i32 @test_reduce_v8i32(<8 x i32> %a0) {
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: test_reduce_v8i32:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX512-NEXT: vpminud %ymm1, %ymm0, %ymm0
; X64-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -819,7 +819,7 @@ define i32 @test_reduce_v8i32(<8 x i32> %a0) {
define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X86-SSE2-LABEL: test_reduce_v16i16:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
; X86-SSE2-NEXT: movdqa %xmm0, %xmm3
; X86-SSE2-NEXT: pxor %xmm2, %xmm3
@@ -861,7 +861,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v16i16:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: pminuw %xmm1, %xmm0
; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X86-SSE42-NEXT: movd %xmm0, %eax
@@ -869,7 +869,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v16i16:
-; X86-AVX1: ## BB#0:
+; X86-AVX1: ## %bb.0:
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; X86-AVX1-NEXT: vpminuw %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0
@@ -879,7 +879,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_reduce_v16i16:
-; X86-AVX2: ## BB#0:
+; X86-AVX2: ## %bb.0:
; X86-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X86-AVX2-NEXT: vpminuw %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0
@@ -889,7 +889,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v16i16:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
; X64-SSE2-NEXT: movdqa %xmm0, %xmm3
; X64-SSE2-NEXT: pxor %xmm2, %xmm3
@@ -931,7 +931,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v16i16:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: pminuw %xmm1, %xmm0
; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X64-SSE42-NEXT: movd %xmm0, %eax
@@ -939,7 +939,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v16i16:
-; X64-AVX1: ## BB#0:
+; X64-AVX1: ## %bb.0:
; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; X64-AVX1-NEXT: vpminuw %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0
@@ -949,7 +949,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_reduce_v16i16:
-; X64-AVX2: ## BB#0:
+; X64-AVX2: ## %bb.0:
; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX2-NEXT: vpminuw %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0
@@ -959,7 +959,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: test_reduce_v16i16:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX512-NEXT: vpminuw %xmm1, %xmm0, %xmm0
; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0
@@ -985,7 +985,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X86-SSE2-LABEL: test_reduce_v32i8:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: pminub %xmm1, %xmm0
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-SSE2-NEXT: pminub %xmm0, %xmm1
@@ -1002,7 +1002,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v32i8:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: pminub %xmm1, %xmm0
; X86-SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-SSE42-NEXT: pminub %xmm0, %xmm1
@@ -1019,7 +1019,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v32i8:
-; X86-AVX1: ## BB#0:
+; X86-AVX1: ## %bb.0:
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; X86-AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -1036,7 +1036,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_reduce_v32i8:
-; X86-AVX2: ## BB#0:
+; X86-AVX2: ## %bb.0:
; X86-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X86-AVX2-NEXT: vpminub %ymm1, %ymm0, %ymm0
; X86-AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -1053,7 +1053,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v32i8:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: pminub %xmm1, %xmm0
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-SSE2-NEXT: pminub %xmm0, %xmm1
@@ -1070,7 +1070,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v32i8:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: pminub %xmm1, %xmm0
; X64-SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X64-SSE42-NEXT: pminub %xmm0, %xmm1
@@ -1087,7 +1087,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v32i8:
-; X64-AVX1: ## BB#0:
+; X64-AVX1: ## %bb.0:
; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; X64-AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -1104,7 +1104,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_reduce_v32i8:
-; X64-AVX2: ## BB#0:
+; X64-AVX2: ## %bb.0:
; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX2-NEXT: vpminub %ymm1, %ymm0, %ymm0
; X64-AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -1121,7 +1121,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: test_reduce_v32i8:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX512-NEXT: vpminub %ymm1, %ymm0, %ymm0
; X64-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -1161,7 +1161,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
define i64 @test_reduce_v8i64(<8 x i64> %a0) {
; X86-SSE2-LABEL: test_reduce_v8i64:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: subl $28, %esp
; X86-SSE2-NEXT: .cfi_def_cfa_offset 32
; X86-SSE2-NEXT: movdqa %xmm2, %xmm6
@@ -1234,7 +1234,7 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v8i64:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: movdqa %xmm0, %xmm5
; X86-SSE42-NEXT: movdqa {{.*#+}} xmm4 = [0,2147483648,0,2147483648]
; X86-SSE42-NEXT: pxor %xmm4, %xmm0
@@ -1267,7 +1267,7 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v8i64:
-; X86-AVX1: ## BB#0:
+; X86-AVX1: ## %bb.0:
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; X86-AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,2147483648,0,2147483648]
; X86-AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
@@ -1301,7 +1301,7 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_reduce_v8i64:
-; X86-AVX2: ## BB#0:
+; X86-AVX2: ## %bb.0:
; X86-AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2147483648,0,2147483648,0,2147483648,0,2147483648]
; X86-AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm3
; X86-AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm4
@@ -1323,7 +1323,7 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v8i64:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: movdqa {{.*#+}} xmm9 = [2147483648,2147483648,2147483648,2147483648]
; X64-SSE2-NEXT: movdqa %xmm0, %xmm5
; X64-SSE2-NEXT: pxor %xmm9, %xmm5
@@ -1389,7 +1389,7 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v8i64:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: movdqa %xmm0, %xmm5
; X64-SSE42-NEXT: movdqa {{.*#+}} xmm4 = [9223372036854775808,9223372036854775808]
; X64-SSE42-NEXT: pxor %xmm4, %xmm0
@@ -1421,7 +1421,7 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v8i64:
-; X64-AVX1: ## BB#0:
+; X64-AVX1: ## %bb.0:
; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; X64-AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
; X64-AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
@@ -1454,7 +1454,7 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_reduce_v8i64:
-; X64-AVX2: ## BB#0:
+; X64-AVX2: ## %bb.0:
; X64-AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
; X64-AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm3
; X64-AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm4
@@ -1475,7 +1475,7 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: test_reduce_v8i64:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; X64-AVX512-NEXT: vpminuq %zmm1, %zmm0, %zmm0
; X64-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
@@ -1500,7 +1500,7 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
define i32 @test_reduce_v16i32(<16 x i32> %a0) {
; X86-SSE2-LABEL: test_reduce_v16i32:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
; X86-SSE2-NEXT: movdqa %xmm0, %xmm6
; X86-SSE2-NEXT: pxor %xmm4, %xmm6
@@ -1547,7 +1547,7 @@ define i32 @test_reduce_v16i32(<16 x i32> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v16i32:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: pminud %xmm3, %xmm1
; X86-SSE42-NEXT: pminud %xmm2, %xmm0
; X86-SSE42-NEXT: pminud %xmm1, %xmm0
@@ -1559,7 +1559,7 @@ define i32 @test_reduce_v16i32(<16 x i32> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v16i32:
-; X86-AVX1: ## BB#0:
+; X86-AVX1: ## %bb.0:
; X86-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; X86-AVX1-NEXT: vpminud %xmm2, %xmm3, %xmm2
@@ -1574,7 +1574,7 @@ define i32 @test_reduce_v16i32(<16 x i32> %a0) {
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_reduce_v16i32:
-; X86-AVX2: ## BB#0:
+; X86-AVX2: ## %bb.0:
; X86-AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm0
; X86-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X86-AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm0
@@ -1587,7 +1587,7 @@ define i32 @test_reduce_v16i32(<16 x i32> %a0) {
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v16i32:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
; X64-SSE2-NEXT: movdqa %xmm0, %xmm6
; X64-SSE2-NEXT: pxor %xmm4, %xmm6
@@ -1634,7 +1634,7 @@ define i32 @test_reduce_v16i32(<16 x i32> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v16i32:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: pminud %xmm3, %xmm1
; X64-SSE42-NEXT: pminud %xmm2, %xmm0
; X64-SSE42-NEXT: pminud %xmm1, %xmm0
@@ -1646,7 +1646,7 @@ define i32 @test_reduce_v16i32(<16 x i32> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v16i32:
-; X64-AVX1: ## BB#0:
+; X64-AVX1: ## %bb.0:
; X64-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; X64-AVX1-NEXT: vpminud %xmm2, %xmm3, %xmm2
@@ -1661,7 +1661,7 @@ define i32 @test_reduce_v16i32(<16 x i32> %a0) {
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_reduce_v16i32:
-; X64-AVX2: ## BB#0:
+; X64-AVX2: ## %bb.0:
; X64-AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm0
; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm0
@@ -1674,7 +1674,7 @@ define i32 @test_reduce_v16i32(<16 x i32> %a0) {
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: test_reduce_v16i32:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; X64-AVX512-NEXT: vpminud %zmm1, %zmm0, %zmm0
; X64-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
@@ -1704,7 +1704,7 @@ define i32 @test_reduce_v16i32(<16 x i32> %a0) {
define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X86-SSE2-LABEL: test_reduce_v32i16:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm4 = [32768,32768,32768,32768,32768,32768,32768,32768]
; X86-SSE2-NEXT: movdqa %xmm0, %xmm6
; X86-SSE2-NEXT: pxor %xmm4, %xmm6
@@ -1762,7 +1762,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v32i16:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: pminuw %xmm3, %xmm1
; X86-SSE42-NEXT: pminuw %xmm2, %xmm0
; X86-SSE42-NEXT: pminuw %xmm1, %xmm0
@@ -1772,7 +1772,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v32i16:
-; X86-AVX1: ## BB#0:
+; X86-AVX1: ## %bb.0:
; X86-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; X86-AVX1-NEXT: vpminuw %xmm2, %xmm3, %xmm2
@@ -1785,7 +1785,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_reduce_v32i16:
-; X86-AVX2: ## BB#0:
+; X86-AVX2: ## %bb.0:
; X86-AVX2-NEXT: vpminuw %ymm1, %ymm0, %ymm0
; X86-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X86-AVX2-NEXT: vpminuw %xmm1, %xmm0, %xmm0
@@ -1796,7 +1796,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v32i16:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: movdqa {{.*#+}} xmm4 = [32768,32768,32768,32768,32768,32768,32768,32768]
; X64-SSE2-NEXT: movdqa %xmm0, %xmm6
; X64-SSE2-NEXT: pxor %xmm4, %xmm6
@@ -1854,7 +1854,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v32i16:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: pminuw %xmm3, %xmm1
; X64-SSE42-NEXT: pminuw %xmm2, %xmm0
; X64-SSE42-NEXT: pminuw %xmm1, %xmm0
@@ -1864,7 +1864,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v32i16:
-; X64-AVX1: ## BB#0:
+; X64-AVX1: ## %bb.0:
; X64-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; X64-AVX1-NEXT: vpminuw %xmm2, %xmm3, %xmm2
@@ -1877,7 +1877,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_reduce_v32i16:
-; X64-AVX2: ## BB#0:
+; X64-AVX2: ## %bb.0:
; X64-AVX2-NEXT: vpminuw %ymm1, %ymm0, %ymm0
; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX2-NEXT: vpminuw %xmm1, %xmm0, %xmm0
@@ -1888,7 +1888,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: test_reduce_v32i16:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; X64-AVX512-NEXT: vpminuw %ymm1, %ymm0, %ymm0
; X64-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
@@ -1919,7 +1919,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X86-SSE2-LABEL: test_reduce_v64i8:
-; X86-SSE2: ## BB#0:
+; X86-SSE2: ## %bb.0:
; X86-SSE2-NEXT: pminub %xmm3, %xmm1
; X86-SSE2-NEXT: pminub %xmm2, %xmm0
; X86-SSE2-NEXT: pminub %xmm1, %xmm0
@@ -1938,7 +1938,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v64i8:
-; X86-SSE42: ## BB#0:
+; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: pminub %xmm3, %xmm1
; X86-SSE42-NEXT: pminub %xmm2, %xmm0
; X86-SSE42-NEXT: pminub %xmm1, %xmm0
@@ -1957,7 +1957,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v64i8:
-; X86-AVX1: ## BB#0:
+; X86-AVX1: ## %bb.0:
; X86-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; X86-AVX1-NEXT: vpminub %xmm2, %xmm3, %xmm2
@@ -1977,7 +1977,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_reduce_v64i8:
-; X86-AVX2: ## BB#0:
+; X86-AVX2: ## %bb.0:
; X86-AVX2-NEXT: vpminub %ymm1, %ymm0, %ymm0
; X86-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X86-AVX2-NEXT: vpminub %ymm1, %ymm0, %ymm0
@@ -1995,7 +1995,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v64i8:
-; X64-SSE2: ## BB#0:
+; X64-SSE2: ## %bb.0:
; X64-SSE2-NEXT: pminub %xmm3, %xmm1
; X64-SSE2-NEXT: pminub %xmm2, %xmm0
; X64-SSE2-NEXT: pminub %xmm1, %xmm0
@@ -2014,7 +2014,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v64i8:
-; X64-SSE42: ## BB#0:
+; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: pminub %xmm3, %xmm1
; X64-SSE42-NEXT: pminub %xmm2, %xmm0
; X64-SSE42-NEXT: pminub %xmm1, %xmm0
@@ -2033,7 +2033,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v64i8:
-; X64-AVX1: ## BB#0:
+; X64-AVX1: ## %bb.0:
; X64-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; X64-AVX1-NEXT: vpminub %xmm2, %xmm3, %xmm2
@@ -2053,7 +2053,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_reduce_v64i8:
-; X64-AVX2: ## BB#0:
+; X64-AVX2: ## %bb.0:
; X64-AVX2-NEXT: vpminub %ymm1, %ymm0, %ymm0
; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-AVX2-NEXT: vpminub %ymm1, %ymm0, %ymm0
@@ -2071,7 +2071,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: test_reduce_v64i8:
-; X64-AVX512: ## BB#0:
+; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; X64-AVX512-NEXT: vpminub %zmm1, %zmm0, %zmm0
; X64-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
diff --git a/test/CodeGen/X86/horizontal-shuffle.ll b/test/CodeGen/X86/horizontal-shuffle.ll
index c407a827a2e..70fc7fa4a1d 100644
--- a/test/CodeGen/X86/horizontal-shuffle.ll
+++ b/test/CodeGen/X86/horizontal-shuffle.ll
@@ -8,12 +8,12 @@
define <4 x float> @test_unpackl_fhadd_128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, <4 x float> %a3) {
; X32-LABEL: test_unpackl_fhadd_128:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vhaddps %xmm2, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_unpackl_fhadd_128:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vhaddps %xmm2, %xmm0, %xmm0
; X64-NEXT: retq
%1 = call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %a0, <4 x float> %a1)
@@ -24,12 +24,12 @@ define <4 x float> @test_unpackl_fhadd_128(<4 x float> %a0, <4 x float> %a1, <4
define <2 x double> @test_unpackh_fhadd_128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> %a3) {
; X32-LABEL: test_unpackh_fhadd_128:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vhaddpd %xmm3, %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_unpackh_fhadd_128:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vhaddpd %xmm3, %xmm1, %xmm0
; X64-NEXT: retq
%1 = call <2 x double> @llvm.x86.sse3.hadd.pd(<2 x double> %a0, <2 x double> %a1)
@@ -40,12 +40,12 @@ define <2 x double> @test_unpackh_fhadd_128(<2 x double> %a0, <2 x double> %a1,
define <2 x double> @test_unpackl_fhsub_128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> %a3) {
; X32-LABEL: test_unpackl_fhsub_128:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vhsubpd %xmm2, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_unpackl_fhsub_128:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vhsubpd %xmm2, %xmm0, %xmm0
; X64-NEXT: retq
%1 = call <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double> %a0, <2 x double> %a1)
@@ -56,12 +56,12 @@ define <2 x double> @test_unpackl_fhsub_128(<2 x double> %a0, <2 x double> %a1,
define <4 x float> @test_unpackh_fhsub_128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, <4 x float> %a3) {
; X32-LABEL: test_unpackh_fhsub_128:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vhsubps %xmm3, %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_unpackh_fhsub_128:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vhsubps %xmm3, %xmm1, %xmm0
; X64-NEXT: retq
%1 = call <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float> %a0, <4 x float> %a1)
@@ -72,12 +72,12 @@ define <4 x float> @test_unpackh_fhsub_128(<4 x float> %a0, <4 x float> %a1, <4
define <8 x i16> @test_unpackl_hadd_128(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> %a2, <8 x i16> %a3) {
; X32-LABEL: test_unpackl_hadd_128:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vphaddw %xmm2, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_unpackl_hadd_128:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vphaddw %xmm2, %xmm0, %xmm0
; X64-NEXT: retq
%1 = call <8 x i16> @llvm.x86.ssse3.phadd.w.128(<8 x i16> %a0, <8 x i16> %a1)
@@ -88,12 +88,12 @@ define <8 x i16> @test_unpackl_hadd_128(<8 x i16> %a0, <8 x i16> %a1, <8 x i16>
define <4 x i32> @test_unpackh_hadd_128(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2, <4 x i32> %a3) {
; X32-LABEL: test_unpackh_hadd_128:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vphaddd %xmm3, %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_unpackh_hadd_128:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vphaddd %xmm3, %xmm1, %xmm0
; X64-NEXT: retq
%1 = call <4 x i32> @llvm.x86.ssse3.phadd.d.128(<4 x i32> %a0, <4 x i32> %a1)
@@ -104,12 +104,12 @@ define <4 x i32> @test_unpackh_hadd_128(<4 x i32> %a0, <4 x i32> %a1, <4 x i32>
define <4 x i32> @test_unpackl_hsub_128(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2, <4 x i32> %a3) {
; X32-LABEL: test_unpackl_hsub_128:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vphsubd %xmm2, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_unpackl_hsub_128:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vphsubd %xmm2, %xmm0, %xmm0
; X64-NEXT: retq
%1 = call <4 x i32> @llvm.x86.ssse3.phsub.d.128(<4 x i32> %a0, <4 x i32> %a1)
@@ -120,12 +120,12 @@ define <4 x i32> @test_unpackl_hsub_128(<4 x i32> %a0, <4 x i32> %a1, <4 x i32>
define <8 x i16> @test_unpackh_hsub_128(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> %a2, <8 x i16> %a3) {
; X32-LABEL: test_unpackh_hsub_128:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vphsubw %xmm3, %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_unpackh_hsub_128:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vphsubw %xmm3, %xmm1, %xmm0
; X64-NEXT: retq
%1 = call <8 x i16> @llvm.x86.ssse3.phsub.w.128(<8 x i16> %a0, <8 x i16> %a1)
@@ -136,12 +136,12 @@ define <8 x i16> @test_unpackh_hsub_128(<8 x i16> %a0, <8 x i16> %a1, <8 x i16>
define <16 x i8> @test_unpackl_packss_128(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> %a2, <8 x i16> %a3) {
; X32-LABEL: test_unpackl_packss_128:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_unpackl_packss_128:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
; X64-NEXT: retq
%1 = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> %a0, <8 x i16> %a1)
@@ -152,12 +152,12 @@ define <16 x i8> @test_unpackl_packss_128(<8 x i16> %a0, <8 x i16> %a1, <8 x i16
define <8 x i16> @test_unpackh_packss_128(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2, <4 x i32> %a3) {
; X32-LABEL: test_unpackh_packss_128:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vpackssdw %xmm3, %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_unpackh_packss_128:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpackssdw %xmm3, %xmm1, %xmm0
; X64-NEXT: retq
%1 = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %a0, <4 x i32> %a1)
@@ -168,12 +168,12 @@ define <8 x i16> @test_unpackh_packss_128(<4 x i32> %a0, <4 x i32> %a1, <4 x i32
define <8 x i16> @test_unpackl_packus_128(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2, <4 x i32> %a3) {
; X32-LABEL: test_unpackl_packus_128:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_unpackl_packus_128:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
; X64-NEXT: retq
%1 = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> %a0, <4 x i32> %a1)
@@ -184,12 +184,12 @@ define <8 x i16> @test_unpackl_packus_128(<4 x i32> %a0, <4 x i32> %a1, <4 x i32
define <16 x i8> @test_unpackh_packus_128(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> %a2, <8 x i16> %a3) {
; X32-LABEL: test_unpackh_packus_128:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vpackuswb %xmm3, %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_unpackh_packus_128:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpackuswb %xmm3, %xmm1, %xmm0
; X64-NEXT: retq
%1 = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> %a0, <8 x i16> %a1)
@@ -204,12 +204,12 @@ define <16 x i8> @test_unpackh_packus_128(<8 x i16> %a0, <8 x i16> %a1, <8 x i16
define <8 x float> @test_unpackl_fhadd_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, <8 x float> %a3) {
; X32-LABEL: test_unpackl_fhadd_256:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vhaddps %ymm2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_unpackl_fhadd_256:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vhaddps %ymm2, %ymm0, %ymm0
; X64-NEXT: retq
%1 = call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> %a0, <8 x float> %a1)
@@ -220,12 +220,12 @@ define <8 x float> @test_unpackl_fhadd_256(<8 x float> %a0, <8 x float> %a1, <8
define <4 x double> @test_unpackh_fhadd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, <4 x double> %a3) {
; X32-LABEL: test_unpackh_fhadd_256:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vhaddpd %ymm3, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_unpackh_fhadd_256:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vhaddpd %ymm3, %ymm1, %ymm0
; X64-NEXT: retq
%1 = call <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double> %a0, <4 x double> %a1)
@@ -236,12 +236,12 @@ define <4 x double> @test_unpackh_fhadd_256(<4 x double> %a0, <4 x double> %a1,
define <4 x double> @test_unpackl_fhsub_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, <4 x double> %a3) {
; X32-LABEL: test_unpackl_fhsub_256:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vhsubpd %ymm2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_unpackl_fhsub_256:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vhsubpd %ymm2, %ymm0, %ymm0
; X64-NEXT: retq
%1 = call <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double> %a0, <4 x double> %a1)
@@ -252,12 +252,12 @@ define <4 x double> @test_unpackl_fhsub_256(<4 x double> %a0, <4 x double> %a1,
define <8 x float> @test_unpackh_fhsub_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, <8 x float> %a3) {
; X32-LABEL: test_unpackh_fhsub_256:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vhsubps %ymm3, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_unpackh_fhsub_256:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vhsubps %ymm3, %ymm1, %ymm0
; X64-NEXT: retq
%1 = call <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float> %a0, <8 x float> %a1)
@@ -268,12 +268,12 @@ define <8 x float> @test_unpackh_fhsub_256(<8 x float> %a0, <8 x float> %a1, <8
define <16 x i16> @test_unpackl_hadd_256(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> %a2, <16 x i16> %a3) {
; X32-LABEL: test_unpackl_hadd_256:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vphaddw %ymm2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_unpackl_hadd_256:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vphaddw %ymm2, %ymm0, %ymm0
; X64-NEXT: retq
%1 = call <16 x i16> @llvm.x86.avx2.phadd.w(<16 x i16> %a0, <16 x i16> %a1)
@@ -284,12 +284,12 @@ define <16 x i16> @test_unpackl_hadd_256(<16 x i16> %a0, <16 x i16> %a1, <16 x i
define <8 x i32> @test_unpackh_hadd_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2, <8 x i32> %a3) {
; X32-LABEL: test_unpackh_hadd_256:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vphaddd %ymm3, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_unpackh_hadd_256:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vphaddd %ymm3, %ymm1, %ymm0
; X64-NEXT: retq
%1 = call <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32> %a0, <8 x i32> %a1)
@@ -300,12 +300,12 @@ define <8 x i32> @test_unpackh_hadd_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32>
define <8 x i32> @test_unpackl_hsub_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2, <8 x i32> %a3) {
; X32-LABEL: test_unpackl_hsub_256:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vphsubd %ymm2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_unpackl_hsub_256:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vphsubd %ymm2, %ymm0, %ymm0
; X64-NEXT: retq
%1 = call <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32> %a0, <8 x i32> %a1)
@@ -316,12 +316,12 @@ define <8 x i32> @test_unpackl_hsub_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32>
define <16 x i16> @test_unpackh_hsub_256(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> %a2, <16 x i16> %a3) {
; X32-LABEL: test_unpackh_hsub_256:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vphsubw %ymm3, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_unpackh_hsub_256:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vphsubw %ymm3, %ymm1, %ymm0
; X64-NEXT: retq
%1 = call <16 x i16> @llvm.x86.avx2.phsub.w(<16 x i16> %a0, <16 x i16> %a1)
@@ -332,12 +332,12 @@ define <16 x i16> @test_unpackh_hsub_256(<16 x i16> %a0, <16 x i16> %a1, <16 x i
define <32 x i8> @test_unpackl_packss_256(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> %a2, <16 x i16> %a3) {
; X32-LABEL: test_unpackl_packss_256:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vpacksswb %ymm2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_unpackl_packss_256:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpacksswb %ymm2, %ymm0, %ymm0
; X64-NEXT: retq
%1 = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> %a0, <16 x i16> %a1)
@@ -348,12 +348,12 @@ define <32 x i8> @test_unpackl_packss_256(<16 x i16> %a0, <16 x i16> %a1, <16 x
define <16 x i16> @test_unpackh_packss_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2, <8 x i32> %a3) {
; X32-LABEL: test_unpackh_packss_256:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vpackssdw %ymm3, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_unpackh_packss_256:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpackssdw %ymm3, %ymm1, %ymm0
; X64-NEXT: retq
%1 = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> %a0, <8 x i32> %a1)
@@ -364,12 +364,12 @@ define <16 x i16> @test_unpackh_packss_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i3
define <16 x i16> @test_unpackl_packus_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2, <8 x i32> %a3) {
; X32-LABEL: test_unpackl_packus_256:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vpackusdw %ymm2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_unpackl_packus_256:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpackusdw %ymm2, %ymm0, %ymm0
; X64-NEXT: retq
%1 = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> %a0, <8 x i32> %a1)
@@ -380,12 +380,12 @@ define <16 x i16> @test_unpackl_packus_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i3
define <32 x i8> @test_unpackh_packus_256(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> %a2, <16 x i16> %a3) {
; X32-LABEL: test_unpackh_packus_256:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vpacksswb %ymm3, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_unpackh_packus_256:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpacksswb %ymm3, %ymm1, %ymm0
; X64-NEXT: retq
%1 = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> %a0, <16 x i16> %a1)
diff --git a/test/CodeGen/X86/i256-add.ll b/test/CodeGen/X86/i256-add.ll
index 7b2656897e0..36d838a68cb 100644
--- a/test/CodeGen/X86/i256-add.ll
+++ b/test/CodeGen/X86/i256-add.ll
@@ -4,7 +4,7 @@
define void @add(i256* %p, i256* %q) nounwind {
; X32-LABEL: add:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebp
; X32-NEXT: pushl %ebx
; X32-NEXT: pushl %edi
@@ -50,7 +50,7 @@ define void @add(i256* %p, i256* %q) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: add:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq 16(%rdi), %rax
; X64-NEXT: movq (%rdi), %rcx
; X64-NEXT: movq 8(%rdi), %rdx
@@ -71,7 +71,7 @@ define void @add(i256* %p, i256* %q) nounwind {
}
define void @sub(i256* %p, i256* %q) nounwind {
; X32-LABEL: sub:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebp
; X32-NEXT: pushl %ebx
; X32-NEXT: pushl %edi
@@ -114,7 +114,7 @@ define void @sub(i256* %p, i256* %q) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: sub:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq 16(%rdi), %rax
; X64-NEXT: movq (%rdi), %rcx
; X64-NEXT: movq 8(%rdi), %rdx
diff --git a/test/CodeGen/X86/i64-mem-copy.ll b/test/CodeGen/X86/i64-mem-copy.ll
index 7b1926da245..e14293797e8 100644
--- a/test/CodeGen/X86/i64-mem-copy.ll
+++ b/test/CodeGen/X86/i64-mem-copy.ll
@@ -7,13 +7,13 @@
define void @foo(i64* %x, i64* %y) {
; X64-LABEL: foo:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq (%rsi), %rax
; X64-NEXT: movq %rax, (%rdi)
; X64-NEXT: retq
;
; X32-LABEL: foo:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
@@ -29,13 +29,13 @@ define void @foo(i64* %x, i64* %y) {
define void @store_i64_from_vector(<8 x i16> %x, <8 x i16> %y, i64* %i) {
; X64-LABEL: store_i64_from_vector:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: paddw %xmm1, %xmm0
; X64-NEXT: movq %xmm0, (%rdi)
; X64-NEXT: retq
;
; X32-LABEL: store_i64_from_vector:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: paddw %xmm1, %xmm0
; X32-NEXT: movq %xmm0, (%eax)
@@ -49,7 +49,7 @@ define void @store_i64_from_vector(<8 x i16> %x, <8 x i16> %y, i64* %i) {
define void @store_i64_from_vector256(<16 x i16> %x, <16 x i16> %y, i64* %i) {
; X32AVX-LABEL: store_i64_from_vector256:
-; X32AVX: # BB#0:
+; X32AVX: # %bb.0:
; X32AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32AVX-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; X32AVX-NEXT: vextracti128 $1, %ymm0, %xmm0
diff --git a/test/CodeGen/X86/i64-to-float.ll b/test/CodeGen/X86/i64-to-float.ll
index 26553f5f352..0440b3d9575 100644
--- a/test/CodeGen/X86/i64-to-float.ll
+++ b/test/CodeGen/X86/i64-to-float.ll
@@ -8,27 +8,27 @@
define <2 x double> @mask_sitofp_2i64_2f64(<2 x i64> %a) nounwind {
; X32-SSE-LABEL: mask_sitofp_2i64_2f64:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
; X32-SSE-NEXT: cvtdq2pd %xmm0, %xmm0
; X32-SSE-NEXT: retl
;
; X32-AVX-LABEL: mask_sitofp_2i64_2f64:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[8,9],zero,zero,xmm0[u,u,u,u,u,u,u,u]
; X32-AVX-NEXT: vcvtdq2pd %xmm0, %xmm0
; X32-AVX-NEXT: retl
;
; X64-SSE-LABEL: mask_sitofp_2i64_2f64:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X64-SSE-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE-NEXT: cvtdq2pd %xmm0, %xmm0
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: mask_sitofp_2i64_2f64:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[8,9],zero,zero,xmm0[u,u,u,u,u,u,u,u]
; X64-AVX-NEXT: vcvtdq2pd %xmm0, %xmm0
; X64-AVX-NEXT: retq
@@ -39,27 +39,27 @@ define <2 x double> @mask_sitofp_2i64_2f64(<2 x i64> %a) nounwind {
define <2 x double> @mask_uitofp_2i64_2f64(<2 x i64> %a) nounwind {
; X32-SSE-LABEL: mask_uitofp_2i64_2f64:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
; X32-SSE-NEXT: cvtdq2pd %xmm0, %xmm0
; X32-SSE-NEXT: retl
;
; X32-AVX-LABEL: mask_uitofp_2i64_2f64:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[8,9],zero,zero,xmm0[u,u,u,u,u,u,u,u]
; X32-AVX-NEXT: vcvtdq2pd %xmm0, %xmm0
; X32-AVX-NEXT: retl
;
; X64-SSE-LABEL: mask_uitofp_2i64_2f64:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X64-SSE-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE-NEXT: cvtdq2pd %xmm0, %xmm0
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: mask_uitofp_2i64_2f64:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[8,9],zero,zero,xmm0[u,u,u,u,u,u,u,u]
; X64-AVX-NEXT: vcvtdq2pd %xmm0, %xmm0
; X64-AVX-NEXT: retq
@@ -70,14 +70,14 @@ define <2 x double> @mask_uitofp_2i64_2f64(<2 x i64> %a) nounwind {
define <4 x float> @mask_sitofp_4i64_4f32(<4 x i64> %a) nounwind {
; X32-SSE-LABEL: mask_sitofp_4i64_4f32:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; X32-SSE-NEXT: andps {{\.LCPI.*}}, %xmm0
; X32-SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; X32-SSE-NEXT: retl
;
; X32-AVX-LABEL: mask_sitofp_4i64_4f32:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
; X32-AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; X32-AVX-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0
@@ -86,14 +86,14 @@ define <4 x float> @mask_sitofp_4i64_4f32(<4 x i64> %a) nounwind {
; X32-AVX-NEXT: retl
;
; X64-SSE-LABEL: mask_sitofp_4i64_4f32:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; X64-SSE-NEXT: andps {{.*}}(%rip), %xmm0
; X64-SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: mask_sitofp_4i64_4f32:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
; X64-AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; X64-AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
@@ -107,14 +107,14 @@ define <4 x float> @mask_sitofp_4i64_4f32(<4 x i64> %a) nounwind {
define <4 x float> @mask_uitofp_4i64_4f32(<4 x i64> %a) nounwind {
; X32-SSE-LABEL: mask_uitofp_4i64_4f32:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; X32-SSE-NEXT: andps {{\.LCPI.*}}, %xmm0
; X32-SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; X32-SSE-NEXT: retl
;
; X32-AVX-LABEL: mask_uitofp_4i64_4f32:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
; X32-AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; X32-AVX-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0
@@ -123,14 +123,14 @@ define <4 x float> @mask_uitofp_4i64_4f32(<4 x i64> %a) nounwind {
; X32-AVX-NEXT: retl
;
; X64-SSE-LABEL: mask_uitofp_4i64_4f32:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; X64-SSE-NEXT: andps {{.*}}(%rip), %xmm0
; X64-SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: mask_uitofp_4i64_4f32:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
; X64-AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; X64-AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
@@ -144,7 +144,7 @@ define <4 x float> @mask_uitofp_4i64_4f32(<4 x i64> %a) nounwind {
define <2 x double> @clamp_sitofp_2i64_2f64(<2 x i64> %a) nounwind {
; X32-SSE-LABEL: clamp_sitofp_2i64_2f64:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: pushl %ebp
; X32-SSE-NEXT: movl %esp, %ebp
; X32-SSE-NEXT: andl $-8, %esp
@@ -194,7 +194,7 @@ define <2 x double> @clamp_sitofp_2i64_2f64(<2 x i64> %a) nounwind {
; X32-SSE-NEXT: retl
;
; X32-AVX-LABEL: clamp_sitofp_2i64_2f64:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: pushl %ebp
; X32-AVX-NEXT: movl %esp, %ebp
; X32-AVX-NEXT: andl $-8, %esp
@@ -220,7 +220,7 @@ define <2 x double> @clamp_sitofp_2i64_2f64(<2 x i64> %a) nounwind {
; X32-AVX-NEXT: retl
;
; X64-SSE-LABEL: clamp_sitofp_2i64_2f64:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: movdqa {{.*#+}} xmm1 = [2147483648,0,2147483648,0]
; X64-SSE-NEXT: movdqa %xmm0, %xmm2
; X64-SSE-NEXT: pxor %xmm1, %xmm2
@@ -262,7 +262,7 @@ define <2 x double> @clamp_sitofp_2i64_2f64(<2 x i64> %a) nounwind {
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: clamp_sitofp_2i64_2f64:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [18446744073709551361,18446744073709551361]
; X64-AVX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
; X64-AVX-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
diff --git a/test/CodeGen/X86/iabs.ll b/test/CodeGen/X86/iabs.ll
index 952c31c7814..32c3d8149ea 100644
--- a/test/CodeGen/X86/iabs.ll
+++ b/test/CodeGen/X86/iabs.ll
@@ -11,7 +11,7 @@
; rdar://10695237
define i8 @test_i8(i8 %a) nounwind {
; X86-LABEL: test_i8:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
; X86-NEXT: movl %eax, %ecx
; X86-NEXT: sarb $7, %cl
@@ -20,7 +20,7 @@ define i8 @test_i8(i8 %a) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: test_i8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: sarb $7, %al
; X64-NEXT: addb %al, %dil
@@ -35,7 +35,7 @@ define i8 @test_i8(i8 %a) nounwind {
define i16 @test_i16(i16 %a) nounwind {
; X86-NO-CMOV-LABEL: test_i16:
-; X86-NO-CMOV: # BB#0:
+; X86-NO-CMOV: # %bb.0:
; X86-NO-CMOV-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NO-CMOV-NEXT: movl %eax, %ecx
; X86-NO-CMOV-NEXT: sarw $15, %cx
@@ -45,7 +45,7 @@ define i16 @test_i16(i16 %a) nounwind {
; X86-NO-CMOV-NEXT: retl
;
; X86-CMOV-LABEL: test_i16:
-; X86-CMOV: # BB#0:
+; X86-CMOV: # %bb.0:
; X86-CMOV-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
; X86-CMOV-NEXT: movl %ecx, %eax
; X86-CMOV-NEXT: negw %ax
@@ -53,7 +53,7 @@ define i16 @test_i16(i16 %a) nounwind {
; X86-CMOV-NEXT: retl
;
; X64-LABEL: test_i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: negw %ax
; X64-NEXT: cmovlw %di, %ax
@@ -66,7 +66,7 @@ define i16 @test_i16(i16 %a) nounwind {
define i32 @test_i32(i32 %a) nounwind {
; X86-NO-CMOV-LABEL: test_i32:
-; X86-NO-CMOV: # BB#0:
+; X86-NO-CMOV: # %bb.0:
; X86-NO-CMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NO-CMOV-NEXT: movl %eax, %ecx
; X86-NO-CMOV-NEXT: sarl $31, %ecx
@@ -75,7 +75,7 @@ define i32 @test_i32(i32 %a) nounwind {
; X86-NO-CMOV-NEXT: retl
;
; X86-CMOV-LABEL: test_i32:
-; X86-CMOV: # BB#0:
+; X86-CMOV: # %bb.0:
; X86-CMOV-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-CMOV-NEXT: movl %ecx, %eax
; X86-CMOV-NEXT: negl %eax
@@ -83,7 +83,7 @@ define i32 @test_i32(i32 %a) nounwind {
; X86-CMOV-NEXT: retl
;
; X64-LABEL: test_i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: negl %eax
; X64-NEXT: cmovll %edi, %eax
@@ -96,7 +96,7 @@ define i32 @test_i32(i32 %a) nounwind {
define i64 @test_i64(i64 %a) nounwind {
; X86-LABEL: test_i64:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl %edx, %ecx
; X86-NEXT: sarl $31, %ecx
@@ -108,7 +108,7 @@ define i64 @test_i64(i64 %a) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: test_i64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: negq %rax
; X64-NEXT: cmovlq %rdi, %rax
diff --git a/test/CodeGen/X86/illegal-bitfield-loadstore.ll b/test/CodeGen/X86/illegal-bitfield-loadstore.ll
index 39e11f2b96e..84a59a65052 100644
--- a/test/CodeGen/X86/illegal-bitfield-loadstore.ll
+++ b/test/CodeGen/X86/illegal-bitfield-loadstore.ll
@@ -4,7 +4,7 @@
define void @i24_or(i24* %a) {
; X86-LABEL: i24_or:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movzwl (%ecx), %edx
; X86-NEXT: movzbl 2(%ecx), %eax
@@ -16,7 +16,7 @@ define void @i24_or(i24* %a) {
; X86-NEXT: retl
;
; X64-LABEL: i24_or:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movzwl (%rdi), %eax
; X64-NEXT: movzbl 2(%rdi), %ecx
; X64-NEXT: movb %cl, 2(%rdi)
@@ -33,7 +33,7 @@ define void @i24_or(i24* %a) {
define void @i24_and_or(i24* %a) {
; X86-LABEL: i24_and_or:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movzwl (%ecx), %edx
; X86-NEXT: movzbl 2(%ecx), %eax
@@ -46,7 +46,7 @@ define void @i24_and_or(i24* %a) {
; X86-NEXT: retl
;
; X64-LABEL: i24_and_or:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movzwl (%rdi), %eax
; X64-NEXT: movzbl 2(%rdi), %ecx
; X64-NEXT: movb %cl, 2(%rdi)
@@ -65,7 +65,7 @@ define void @i24_and_or(i24* %a) {
define void @i24_insert_bit(i24* %a, i1 zeroext %bit) {
; X86-LABEL: i24_insert_bit:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl %esi
; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: .cfi_offset %esi, -8
@@ -84,7 +84,7 @@ define void @i24_insert_bit(i24* %a, i1 zeroext %bit) {
; X86-NEXT: retl
;
; X64-LABEL: i24_insert_bit:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movzwl (%rdi), %eax
; X64-NEXT: movzbl 2(%rdi), %ecx
; X64-NEXT: movb %cl, 2(%rdi)
@@ -106,13 +106,13 @@ define void @i24_insert_bit(i24* %a, i1 zeroext %bit) {
define void @i56_or(i56* %a) {
; X86-LABEL: i56_or:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: orl $384, (%eax) # imm = 0x180
; X86-NEXT: retl
;
; X64-LABEL: i56_or:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movzwl 4(%rdi), %eax
; X64-NEXT: movzbl 6(%rdi), %ecx
; X64-NEXT: movb %cl, 6(%rdi)
@@ -135,7 +135,7 @@ define void @i56_or(i56* %a) {
define void @i56_and_or(i56* %a) {
; X86-LABEL: i56_and_or:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl $384, %ecx # imm = 0x180
; X86-NEXT: orl (%eax), %ecx
@@ -144,7 +144,7 @@ define void @i56_and_or(i56* %a) {
; X86-NEXT: retl
;
; X64-LABEL: i56_and_or:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movzwl 4(%rdi), %eax
; X64-NEXT: movzbl 6(%rdi), %ecx
; X64-NEXT: movb %cl, 6(%rdi)
@@ -170,7 +170,7 @@ define void @i56_and_or(i56* %a) {
define void @i56_insert_bit(i56* %a, i1 zeroext %bit) {
; X86-LABEL: i56_insert_bit:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: shll $13, %ecx
@@ -181,7 +181,7 @@ define void @i56_insert_bit(i56* %a, i1 zeroext %bit) {
; X86-NEXT: retl
;
; X64-LABEL: i56_insert_bit:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %esi, %eax
; X64-NEXT: movzwl 4(%rdi), %ecx
; X64-NEXT: movzbl 6(%rdi), %edx
diff --git a/test/CodeGen/X86/immediate_merging.ll b/test/CodeGen/X86/immediate_merging.ll
index 9c9e4830116..e1c29191498 100644
--- a/test/CodeGen/X86/immediate_merging.ll
+++ b/test/CodeGen/X86/immediate_merging.ll
@@ -15,7 +15,7 @@
; instructions.
define i32 @foo() optsize {
; X86-LABEL: foo:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl $1234, %eax # imm = 0x4D2
; X86-NEXT: movl %eax, a
; X86-NEXT: movl %eax, b
@@ -23,7 +23,7 @@ define i32 @foo() optsize {
; X86-NEXT: movl %eax, c
; X86-NEXT: cmpl %eax, e
; X86-NEXT: jne .LBB0_2
-; X86-NEXT: # BB#1: # %if.then
+; X86-NEXT: # %bb.1: # %if.then
; X86-NEXT: movl $1, x
; X86-NEXT: .LBB0_2: # %if.end
; X86-NEXT: movl $1234, f # imm = 0x4D2
@@ -34,7 +34,7 @@ define i32 @foo() optsize {
; X86-NEXT: retl
;
; X64-LABEL: foo:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movl $1234, %eax # imm = 0x4D2
; X64-NEXT: movl %eax, {{.*}}(%rip)
; X64-NEXT: movl %eax, {{.*}}(%rip)
@@ -42,7 +42,7 @@ define i32 @foo() optsize {
; X64-NEXT: movl %eax, {{.*}}(%rip)
; X64-NEXT: cmpl %eax, {{.*}}(%rip)
; X64-NEXT: jne .LBB0_2
-; X64-NEXT: # BB#1: # %if.then
+; X64-NEXT: # %bb.1: # %if.then
; X64-NEXT: movl $1, {{.*}}(%rip)
; X64-NEXT: .LBB0_2: # %if.end
; X64-NEXT: movl $1234, {{.*}}(%rip) # imm = 0x4D2
@@ -76,14 +76,14 @@ if.end: ; preds = %if.then, %entry
; Test -O2 to make sure that all immediates get pulled in to their users.
define i32 @foo2() {
; X86-LABEL: foo2:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl $1234, a # imm = 0x4D2
; X86-NEXT: movl $1234, b # imm = 0x4D2
; X86-NEXT: xorl %eax, %eax
; X86-NEXT: retl
;
; X64-LABEL: foo2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movl $1234, {{.*}}(%rip) # imm = 0x4D2
; X64-NEXT: movl $1234, {{.*}}(%rip) # imm = 0x4D2
; X64-NEXT: xorl %eax, %eax
@@ -103,7 +103,7 @@ declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) #1
; sure we don't directly store the immediates.
define void @foomemset() optsize {
; X86-LABEL: foomemset:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl $555819297, %eax # imm = 0x21212121
; X86-NEXT: movl %eax, AA+20
; X86-NEXT: movl %eax, AA+16
@@ -114,7 +114,7 @@ define void @foomemset() optsize {
; X86-NEXT: retl
;
; X64-LABEL: foomemset:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movabsq $2387225703656530209, %rax # imm = 0x2121212121212121
; X64-NEXT: movq %rax, AA+{{.*}}(%rip)
; X64-NEXT: movq %rax, AA+{{.*}}(%rip)
diff --git a/test/CodeGen/X86/immediate_merging64.ll b/test/CodeGen/X86/immediate_merging64.ll
index 4bc9d4af644..57f5b3b79d9 100644
--- a/test/CodeGen/X86/immediate_merging64.ll
+++ b/test/CodeGen/X86/immediate_merging64.ll
@@ -8,7 +8,7 @@
; optimizing for code size.
define i1 @imm_multiple_users(i64 %a, i64* %b) optsize {
; CHECK-LABEL: imm_multiple_users:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movq $-1, %rax
; CHECK-NEXT: movq %rax, (%rsi)
; CHECK-NEXT: cmpq %rax, %rdi
@@ -26,7 +26,7 @@ declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1)
; code size.
define void @memset_zero(i8* noalias nocapture %D) optsize {
; CHECK-LABEL: memset_zero:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: movq %rax, 7(%rdi)
; CHECK-NEXT: movq %rax, (%rdi)
diff --git a/test/CodeGen/X86/implicit-null-checks.mir b/test/CodeGen/X86/implicit-null-checks.mir
index 889c4834de2..31361ac27e3 100644
--- a/test/CodeGen/X86/implicit-null-checks.mir
+++ b/test/CodeGen/X86/implicit-null-checks.mir
@@ -391,15 +391,15 @@ liveins:
- { reg: '%esi' }
# CHECK: bb.0.entry:
# CHECK: %eax = MOV32ri 2200000
-# CHECK-NEXT: %eax = FAULTING_OP 1, %bb.3.is_null, {{[0-9]+}}, %eax, %rdi, 1, %noreg, 0, %noreg, implicit-def %eflags :: (load 4 from %ir.x)
-# CHECK-NEXT: JMP_1 %bb.1.not_null
+# CHECK-NEXT: %eax = FAULTING_OP 1, %bb.3, {{[0-9]+}}, %eax, %rdi, 1, %noreg, 0, %noreg, implicit-def %eflags :: (load 4 from %ir.x)
+# CHECK-NEXT: JMP_1 %bb.1
body: |
bb.0.entry:
liveins: %esi, %rdi
TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.3.is_null, implicit %eflags
+ JE_1 %bb.3, implicit %eflags
bb.1.not_null:
liveins: %esi, %rdi
@@ -407,7 +407,7 @@ body: |
%eax = MOV32ri 2200000
%eax = AND32rm killed %eax, killed %rdi, 1, %noreg, 0, %noreg, implicit-def dead %eflags :: (load 4 from %ir.x)
CMP32rr killed %eax, killed %esi, implicit-def %eflags
- JE_1 %bb.4.ret_100, implicit %eflags
+ JE_1 %bb.4, implicit %eflags
bb.2.ret_200:
%eax = MOV32ri 200
@@ -433,7 +433,7 @@ liveins:
# CHECK: bb.0.entry:
# CHECK: %eax = MOV32rm killed %rdx, 1, %noreg, 0, %noreg :: (volatile load 4 from %ir.ptr)
# CHECK-NEXT: TEST64rr %rdi, %rdi, implicit-def %eflags
-# CHECK-NEXT: JE_1 %bb.3.is_null, implicit %eflags
+# CHECK-NEXT: JE_1 %bb.3, implicit %eflags
body: |
bb.0.entry:
@@ -441,7 +441,7 @@ body: |
%eax = MOV32rm killed %rdx, 1, %noreg, 0, %noreg :: (volatile load 4 from %ir.ptr)
TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.3.is_null, implicit %eflags
+ JE_1 %bb.3, implicit %eflags
bb.1.not_null:
liveins: %esi, %rdi
@@ -449,7 +449,7 @@ body: |
%eax = MOV32ri 2200000
%eax = AND32rm killed %eax, killed %rdi, 1, %noreg, 0, %noreg, implicit-def dead %eflags :: (load 4 from %ir.x)
CMP32rr killed %eax, killed %esi, implicit-def %eflags
- JE_1 %bb.4.ret_100, implicit %eflags
+ JE_1 %bb.4, implicit %eflags
bb.2.ret_200:
@@ -475,14 +475,14 @@ liveins:
- { reg: '%esi' }
# CHECK: bb.0.entry:
# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags
-# CHECK-NEXT: JE_1 %bb.3.is_null, implicit %eflags
+# CHECK-NEXT: JE_1 %bb.3, implicit %eflags
body: |
bb.0.entry:
liveins: %esi, %rdi
TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.3.is_null, implicit %eflags
+ JE_1 %bb.3, implicit %eflags
bb.1.not_null:
liveins: %esi, %rdi
@@ -491,7 +491,7 @@ body: |
%eax = ADD32ri killed %eax, 100, implicit-def dead %eflags
%eax = AND32rm killed %eax, killed %rdi, 1, %noreg, 0, %noreg, implicit-def dead %eflags :: (load 4 from %ir.x)
CMP32rr killed %eax, killed %esi, implicit-def %eflags
- JE_1 %bb.4.ret_100, implicit %eflags
+ JE_1 %bb.4, implicit %eflags
bb.2.ret_200:
%eax = MOV32ri 200
@@ -516,14 +516,14 @@ liveins:
- { reg: '%rsi' }
# CHECK: bb.0.entry:
# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags
-# CHECK-NEXT: JE_1 %bb.3.is_null, implicit %eflags
+# CHECK-NEXT: JE_1 %bb.3, implicit %eflags
body: |
bb.0.entry:
liveins: %rsi, %rdi
TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.3.is_null, implicit %eflags
+ JE_1 %bb.3, implicit %eflags
bb.1.not_null:
liveins: %rsi, %rdi
@@ -531,7 +531,7 @@ body: |
%rdi = MOV64ri 5000
%rdi = AND64rm killed %rdi, killed %rdi, 1, %noreg, 0, %noreg, implicit-def dead %eflags :: (load 4 from %ir.x)
CMP64rr killed %rdi, killed %rsi, implicit-def %eflags
- JE_1 %bb.4.ret_100, implicit %eflags
+ JE_1 %bb.4, implicit %eflags
bb.2.ret_200:
%eax = MOV32ri 200
@@ -556,14 +556,14 @@ liveins:
- { reg: '%rsi' }
# CHECK: bb.0.entry:
# CHECK: %rbx = MOV64rr %rdx
-# CHECK-NEXT: %rbx = FAULTING_OP 1, %bb.3.is_null, {{[0-9]+}}, %rbx, %rdi, 1, %noreg, 0, %noreg, implicit-def %eflags :: (load 4 from %ir.x)
+# CHECK-NEXT: %rbx = FAULTING_OP 1, %bb.3, {{[0-9]+}}, %rbx, %rdi, 1, %noreg, 0, %noreg, implicit-def %eflags :: (load 4 from %ir.x)
body: |
bb.0.entry:
liveins: %rsi, %rdi, %rdx
TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.3.is_null, implicit %eflags
+ JE_1 %bb.3, implicit %eflags
bb.1.not_null:
liveins: %rsi, %rdi, %rdx
@@ -572,7 +572,7 @@ body: |
%rbx = AND64rm killed %rbx, killed %rdi, 1, %noreg, 0, %noreg, implicit-def dead %eflags :: (load 4 from %ir.x)
%rdx = MOV64ri 0
CMP64rr killed %rbx, killed %rsi, implicit-def %eflags
- JE_1 %bb.4.ret_100, implicit %eflags
+ JE_1 %bb.4, implicit %eflags
bb.2.ret_200:
%eax = MOV32ri 200
@@ -611,7 +611,7 @@ body: |
CFI_INSTRUCTION offset %rbx, -16
%rbx = MOV64rr %rdi
TEST64rr %rbx, %rbx, implicit-def %eflags
- JE_1 %bb.2.leave, implicit killed %eflags
+ JE_1 %bb.2, implicit killed %eflags
bb.1.stay:
liveins: %rbx
@@ -648,7 +648,7 @@ body: |
liveins: %rdi, %rsi
TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.2.is_null, implicit killed %eflags
+ JE_1 %bb.2, implicit killed %eflags
bb.1.not_null:
liveins: %rdi, %rsi
@@ -668,8 +668,8 @@ body: |
name: use_alternate_load_op
# CHECK-LABEL: name: use_alternate_load_op
# CHECK: bb.0.entry:
-# CHECK: %rax = FAULTING_OP 1, %bb.2.is_null, {{[0-9]+}}, %rdi, 1, %noreg, 0, %noreg
-# CHECK-NEXT: JMP_1 %bb.1.not_null
+# CHECK: %rax = FAULTING_OP 1, %bb.2, {{[0-9]+}}, %rdi, 1, %noreg, 0, %noreg
+# CHECK-NEXT: JMP_1 %bb.1
# CHECK: bb.1.not_null
alignment: 4
@@ -682,7 +682,7 @@ body: |
liveins: %rdi, %rsi
TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.2.is_null, implicit killed %eflags
+ JE_1 %bb.2, implicit killed %eflags
bb.1.not_null:
liveins: %rdi, %rsi
@@ -701,8 +701,8 @@ body: |
name: imp_null_check_gep_load_with_use_dep
# CHECK-LABEL: name: imp_null_check_gep_load_with_use_dep
# CHECK: bb.0.entry:
-# CHECK: %eax = FAULTING_OP 1, %bb.2.is_null, {{[0-9]+}}, %rdi, 1, %noreg, 0, %noreg, implicit-def %rax :: (load 4 from %ir.x)
-# CHECK-NEXT: JMP_1 %bb.1.not_null
+# CHECK: %eax = FAULTING_OP 1, %bb.2, {{[0-9]+}}, %rdi, 1, %noreg, 0, %noreg, implicit-def %rax :: (load 4 from %ir.x)
+# CHECK-NEXT: JMP_1 %bb.1
alignment: 4
tracksRegLiveness: true
liveins:
@@ -713,7 +713,7 @@ body: |
liveins: %rsi, %rdi
TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.1.is_null, implicit %eflags
+ JE_1 %bb.1, implicit %eflags
bb.2.not_null:
liveins: %rdi, %rsi
@@ -733,8 +733,8 @@ name: imp_null_check_load_with_base_sep
# CHECK-LABEL: name: imp_null_check_load_with_base_sep
# CHECK: bb.0.entry:
# CHECK: %rsi = ADD64rr %rsi, %rdi, implicit-def dead %eflags
-# CHECK-NEXT: %esi = FAULTING_OP 1, %bb.2.is_null, {{[0-9]+}}, %esi, %rdi, 1, %noreg, 0, %noreg, implicit-def %eflags
-# CHECK-NEXT: JMP_1 %bb.1.not_null
+# CHECK-NEXT: %esi = FAULTING_OP 1, %bb.2, {{[0-9]+}}, %esi, %rdi, 1, %noreg, 0, %noreg, implicit-def %eflags
+# CHECK-NEXT: JMP_1 %bb.1
alignment: 4
tracksRegLiveness: true
liveins:
@@ -745,7 +745,7 @@ body: |
liveins: %rsi, %rdi
TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.1.is_null, implicit %eflags
+ JE_1 %bb.1, implicit %eflags
bb.2.not_null:
liveins: %rdi, %rsi
@@ -764,8 +764,8 @@ body: |
name: inc_store
# CHECK-LABEL: name: inc_store
# CHECK: bb.0.entry:
-# CHECK: %noreg = FAULTING_OP 3, %bb.2.is_null, {{[0-9]+}}, %rdi, 1, %noreg, 0, %noreg, %rsi
-# CHECK-NEXT: JMP_1 %bb.1.not_null
+# CHECK: %noreg = FAULTING_OP 3, %bb.2, {{[0-9]+}}, %rdi, 1, %noreg, 0, %noreg, %rsi
+# CHECK-NEXT: JMP_1 %bb.1
# CHECK: bb.1.not_null
alignment: 4
@@ -778,7 +778,7 @@ body: |
liveins: %rdi, %rsi
TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.2.is_null, implicit killed %eflags
+ JE_1 %bb.2, implicit killed %eflags
bb.1.not_null:
liveins: %rdi, %rsi
@@ -794,8 +794,8 @@ body: |
name: inc_store_plus_offset
# CHECK-LABEL: inc_store_plus_offset
# CHECK: bb.0.entry:
-# CHECK: %noreg = FAULTING_OP 3, %bb.2.is_null, {{[0-9]+}}, %rdi, 1, %noreg, 16, %noreg, %rsi
-# CHECK-NEXT: JMP_1 %bb.1.not_null
+# CHECK: %noreg = FAULTING_OP 3, %bb.2, {{[0-9]+}}, %rdi, 1, %noreg, 16, %noreg, %rsi
+# CHECK-NEXT: JMP_1 %bb.1
# CHECK: bb.1.not_null
alignment: 4
@@ -808,7 +808,7 @@ body: |
liveins: %rdi, %rsi
TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.2.is_null, implicit killed %eflags
+ JE_1 %bb.2, implicit killed %eflags
bb.1.not_null:
liveins: %rdi, %rsi
@@ -825,8 +825,8 @@ name: inc_store_with_dep
# CHECK-LABEL: inc_store_with_dep
# CHECK: bb.0.entry:
# CHECK: %esi = ADD32rr killed %esi, killed %esi, implicit-def dead %eflags
-# CHECK-NEXT: %noreg = FAULTING_OP 3, %bb.2.is_null, {{[0-9]+}}, %rdi, 1, %noreg, 16, %noreg, %esi
-# CHECK-NEXT: JMP_1 %bb.1.not_null
+# CHECK-NEXT: %noreg = FAULTING_OP 3, %bb.2, {{[0-9]+}}, %rdi, 1, %noreg, 16, %noreg, %esi
+# CHECK-NEXT: JMP_1 %bb.1
# CHECK: bb.1.not_null
alignment: 4
@@ -839,7 +839,7 @@ body: |
liveins: %rdi, %rsi
TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.2.is_null, implicit killed %eflags
+ JE_1 %bb.2, implicit killed %eflags
bb.1.not_null:
liveins: %rdi, %rsi
@@ -857,7 +857,7 @@ name: inc_store_with_dep_in_null
# CHECK-LABEL: inc_store_with_dep_in_null
# CHECK: bb.0.entry:
# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags
-# CHECK-NEXT: JE_1 %bb.2.is_null, implicit killed %eflags
+# CHECK-NEXT: JE_1 %bb.2, implicit killed %eflags
# CHECK: bb.1.not_null
alignment: 4
@@ -870,7 +870,7 @@ body: |
liveins: %rdi, %rsi
TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.2.is_null, implicit killed %eflags
+ JE_1 %bb.2, implicit killed %eflags
bb.1.not_null:
liveins: %rdi, %rsi
@@ -892,7 +892,7 @@ name: inc_store_with_volatile
# CHECK-LABEL: inc_store_with_volatile
# CHECK: bb.0.entry:
# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags
-# CHECK-NEXT: JE_1 %bb.2.is_null, implicit killed %eflags
+# CHECK-NEXT: JE_1 %bb.2, implicit killed %eflags
# CHECK: bb.1.not_null
alignment: 4
@@ -905,7 +905,7 @@ body: |
liveins: %rdi, %rsi
TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.2.is_null, implicit killed %eflags
+ JE_1 %bb.2, implicit killed %eflags
bb.1.not_null:
liveins: %rdi, %rsi
@@ -922,7 +922,7 @@ name: inc_store_with_two_dep
# CHECK-LABEL: inc_store_with_two_dep
# CHECK: bb.0.entry:
# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags
-# CHECK-NEXT: JE_1 %bb.2.is_null, implicit killed %eflags
+# CHECK-NEXT: JE_1 %bb.2, implicit killed %eflags
# CHECK: bb.1.not_null
alignment: 4
@@ -935,7 +935,7 @@ body: |
liveins: %rdi, %rsi
TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.2.is_null, implicit killed %eflags
+ JE_1 %bb.2, implicit killed %eflags
bb.1.not_null:
liveins: %rdi, %rsi
@@ -954,7 +954,7 @@ name: inc_store_with_redefined_base
# CHECK-LABEL: inc_store_with_redefined_base
# CHECK: bb.0.entry:
# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags
-# CHECK-NEXT: JE_1 %bb.2.is_null, implicit killed %eflags
+# CHECK-NEXT: JE_1 %bb.2, implicit killed %eflags
# CHECK: bb.1.not_null
alignment: 4
@@ -967,7 +967,7 @@ body: |
liveins: %rdi, %rsi
TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.2.is_null, implicit killed %eflags
+ JE_1 %bb.2, implicit killed %eflags
bb.1.not_null:
liveins: %rdi, %rsi
@@ -984,8 +984,8 @@ body: |
name: inc_store_with_reused_base
# CHECK-LABEL: inc_store_with_reused_base
# CHECK: bb.0.entry:
-# CHECK: %noreg = FAULTING_OP 3, %bb.2.is_null, {{[0-9]+}}, %rdi, 1, %noreg, 16, %noreg, %esi
-# CHECK-NEXT: JMP_1 %bb.1.not_null
+# CHECK: %noreg = FAULTING_OP 3, %bb.2, {{[0-9]+}}, %rdi, 1, %noreg, 16, %noreg, %esi
+# CHECK-NEXT: JMP_1 %bb.1
# CHECK: bb.1.not_null
alignment: 4
@@ -998,7 +998,7 @@ body: |
liveins: %rdi, %rsi
TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.2.is_null, implicit killed %eflags
+ JE_1 %bb.2, implicit killed %eflags
bb.1.not_null:
liveins: %rdi, %rsi
@@ -1017,7 +1017,7 @@ name: inc_store_across_call
# CHECK-LABEL: inc_store_across_call
# CHECK: bb.0.entry:
# CHECK: TEST64rr %rbx, %rbx, implicit-def %eflags
-# CHECK-NEXT: JE_1 %bb.2.is_null, implicit killed %eflags
+# CHECK-NEXT: JE_1 %bb.2, implicit killed %eflags
# CHECK: bb.1.not_null
alignment: 4
@@ -1037,7 +1037,7 @@ body: |
CFI_INSTRUCTION offset %rbx, -16
%rbx = MOV64rr killed %rdi
TEST64rr %rbx, %rbx, implicit-def %eflags
- JE_1 %bb.2.is_null, implicit killed %eflags
+ JE_1 %bb.2, implicit killed %eflags
bb.1.not_null:
liveins: %rbx
@@ -1059,7 +1059,7 @@ name: inc_store_with_dep_in_dep
# CHECK-LABEL: inc_store_with_dep_in_dep
# CHECK: bb.0.entry:
# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags
-# CHECK-NEXT: JE_1 %bb.2.is_null, implicit killed %eflags
+# CHECK-NEXT: JE_1 %bb.2, implicit killed %eflags
# CHECK: bb.1.not_null
alignment: 4
@@ -1072,7 +1072,7 @@ body: |
liveins: %rdi, %rsi
TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.2.is_null, implicit killed %eflags
+ JE_1 %bb.2, implicit killed %eflags
bb.1.not_null:
liveins: %rdi, %rsi
@@ -1092,7 +1092,7 @@ name: inc_store_with_load_over_store
# CHECK-LABEL: inc_store_with_load_over_store
# CHECK: bb.0.entry:
# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags
-# CHECK-NEXT: JE_1 %bb.2.is_null, implicit killed %eflags
+# CHECK-NEXT: JE_1 %bb.2, implicit killed %eflags
# CHECK: bb.1.not_null
alignment: 4
@@ -1105,7 +1105,7 @@ body: |
liveins: %rdi, %rsi
TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.2.is_null, implicit killed %eflags
+ JE_1 %bb.2, implicit killed %eflags
bb.1.not_null:
liveins: %rdi, %rsi
@@ -1124,7 +1124,7 @@ name: inc_store_with_store_over_load
# CHECK-LABEL: inc_store_with_store_over_load
# CHECK: bb.0.entry:
# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags
-# CHECK-NEXT: JE_1 %bb.2.is_null, implicit killed %eflags
+# CHECK-NEXT: JE_1 %bb.2, implicit killed %eflags
# CHECK: bb.1.not_null
alignment: 4
@@ -1137,7 +1137,7 @@ body: |
liveins: %rdi, %rsi
TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.2.is_null, implicit killed %eflags
+ JE_1 %bb.2, implicit killed %eflags
bb.1.not_null:
liveins: %rdi, %rsi
@@ -1156,7 +1156,7 @@ name: inc_store_with_store_over_store
# CHECK-LABEL: inc_store_with_store_over_store
# CHECK: bb.0.entry:
# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags
-# CHECK-NEXT: JE_1 %bb.2.is_null, implicit killed %eflags
+# CHECK-NEXT: JE_1 %bb.2, implicit killed %eflags
# CHECK: bb.1.not_null
alignment: 4
@@ -1169,7 +1169,7 @@ body: |
liveins: %rdi, %rsi
TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.2.is_null, implicit killed %eflags
+ JE_1 %bb.2, implicit killed %eflags
bb.1.not_null:
liveins: %rdi, %rsi
@@ -1186,8 +1186,8 @@ body: |
name: inc_store_with_load_and_store
# CHECK-LABEL: inc_store_with_load_and_store
# CHECK: bb.0.entry:
-# CHECK: %noreg = FAULTING_OP 2, %bb.2.is_null, {{[0-9]+}}, %rdi, 1, %noreg, 0, %noreg, %esi, implicit-def %eflags
-# CHECK-NEXT: JMP_1 %bb.1.not_null
+# CHECK: %noreg = FAULTING_OP 2, %bb.2, {{[0-9]+}}, %rdi, 1, %noreg, 0, %noreg, %esi, implicit-def %eflags
+# CHECK-NEXT: JMP_1 %bb.1
# CHECK: bb.1.not_null
alignment: 4
@@ -1200,7 +1200,7 @@ body: |
liveins: %rdi, %rsi
TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.2.is_null, implicit killed %eflags
+ JE_1 %bb.2, implicit killed %eflags
bb.1.not_null:
liveins: %rdi, %rsi
@@ -1217,8 +1217,8 @@ body: |
name: inc_store_and_load_no_alias
# CHECK-LABEL: inc_store_and_load_no_alias
# CHECK: bb.0.entry:
-# CHECK: %eax = FAULTING_OP 1, %bb.2.is_null, {{[0-9]+}}, %rdi, 1, %noreg, 0, %noreg :: (load 4 from %ir.ptr)
-# CHECK-NEXT: JMP_1 %bb.1.not_null
+# CHECK: %eax = FAULTING_OP 1, %bb.2, {{[0-9]+}}, %rdi, 1, %noreg, 0, %noreg :: (load 4 from %ir.ptr)
+# CHECK-NEXT: JMP_1 %bb.1
# CHECK: bb.1.not_null
alignment: 4
@@ -1231,7 +1231,7 @@ body: |
liveins: %rdi, %rsi
TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.2.is_null, implicit killed %eflags
+ JE_1 %bb.2, implicit killed %eflags
bb.1.not_null:
liveins: %rdi, %rsi
@@ -1250,7 +1250,7 @@ name: inc_store_and_load_alias
# CHECK-LABEL: inc_store_and_load_alias
# CHECK: bb.0.entry:
# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags
-# CHECK-NEXT: JE_1 %bb.2.is_null, implicit killed %eflags
+# CHECK-NEXT: JE_1 %bb.2, implicit killed %eflags
# CHECK: bb.1.not_null
alignment: 4
@@ -1263,7 +1263,7 @@ body: |
liveins: %rdi, %rsi
TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.2.is_null, implicit killed %eflags
+ JE_1 %bb.2, implicit killed %eflags
bb.1.not_null:
liveins: %rdi, %rsi
@@ -1282,7 +1282,7 @@ name: inc_spill_dep
# CHECK-LABEL: inc_spill_dep
# CHECK: bb.0.entry:
# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags
-# CHECK-NEXT: JE_1 %bb.2.is_null, implicit killed %eflags
+# CHECK-NEXT: JE_1 %bb.2, implicit killed %eflags
# CHECK: bb.1.not_null
alignment: 4
@@ -1299,7 +1299,7 @@ body: |
%rsp = frame-setup SUB64ri8 %rsp, 8, implicit-def dead %eflags
MOV32mr %rsp, 1, %noreg, 0, %noreg, %esi :: (store 4 into %stack.0)
TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.2.is_null, implicit killed %eflags
+ JE_1 %bb.2, implicit killed %eflags
bb.1.not_null:
liveins: %rdi, %rsi
diff --git a/test/CodeGen/X86/imul-lea-2.ll b/test/CodeGen/X86/imul-lea-2.ll
index a633e453c88..d1de25d02ef 100644
--- a/test/CodeGen/X86/imul-lea-2.ll
+++ b/test/CodeGen/X86/imul-lea-2.ll
@@ -4,7 +4,7 @@
define i64 @t1(i64 %a) nounwind readnone {
; CHECK-LABEL: t1:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: leaq (%rdi,%rdi,8), %rax
; CHECK-NEXT: leaq (%rax,%rax,8), %rax
; CHECK-NEXT: retq
@@ -15,7 +15,7 @@ entry:
define i64 @t2(i64 %a) nounwind readnone {
; CHECK-LABEL: t2:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: shlq $3, %rdi
; CHECK-NEXT: leaq (%rdi,%rdi,4), %rax
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/imul-lea.ll b/test/CodeGen/X86/imul-lea.ll
index 48490074ac3..777222ec0bf 100644
--- a/test/CodeGen/X86/imul-lea.ll
+++ b/test/CodeGen/X86/imul-lea.ll
@@ -5,7 +5,7 @@ declare i32 @foo()
define i32 @test() {
; CHECK-LABEL: test:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: calll foo
; CHECK-NEXT: leal (%eax,%eax,8), %eax
; CHECK-NEXT: retl
diff --git a/test/CodeGen/X86/imul.ll b/test/CodeGen/X86/imul.ll
index 7aa698e0570..a3322aff205 100644
--- a/test/CodeGen/X86/imul.ll
+++ b/test/CodeGen/X86/imul.ll
@@ -174,14 +174,14 @@ define i64 @mul18446744073709551615_64(i64 %A) {
define i32 @test(i32 %a) {
; X64-LABEL: test:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movl %edi, %eax
; X64-NEXT: shll $5, %eax
; X64-NEXT: subl %edi, %eax
; X64-NEXT: retq
;
; X86-LABEL: test:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, %eax
; X86-NEXT: shll $5, %eax
@@ -194,7 +194,7 @@ entry:
define i32 @test1(i32 %a) {
; X64-LABEL: test1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movl %edi, %eax
; X64-NEXT: shll $5, %eax
; X64-NEXT: subl %edi, %eax
@@ -202,7 +202,7 @@ define i32 @test1(i32 %a) {
; X64-NEXT: retq
;
; X86-LABEL: test1:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, %eax
; X86-NEXT: shll $5, %eax
@@ -217,7 +217,7 @@ entry:
define i32 @test2(i32 %a) {
; X64-LABEL: test2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: movl %edi, %eax
; X64-NEXT: shll $5, %eax
@@ -225,7 +225,7 @@ define i32 @test2(i32 %a) {
; X64-NEXT: retq
;
; X86-LABEL: test2:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, %eax
; X86-NEXT: shll $5, %eax
@@ -238,7 +238,7 @@ entry:
define i32 @test3(i32 %a) {
; X64-LABEL: test3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: movl %edi, %eax
; X64-NEXT: shll $5, %eax
@@ -247,7 +247,7 @@ define i32 @test3(i32 %a) {
; X64-NEXT: retq
;
; X86-LABEL: test3:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, %eax
; X86-NEXT: shll $5, %eax
@@ -261,14 +261,14 @@ entry:
define i64 @test4(i64 %a) {
; X64-LABEL: test4:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: shlq $5, %rax
; X64-NEXT: subq %rdi, %rax
; X64-NEXT: retq
;
; X86-LABEL: test4:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl %eax, %ecx
; X86-NEXT: shll $5, %ecx
@@ -284,7 +284,7 @@ entry:
define i64 @test5(i64 %a) {
; X64-LABEL: test5:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: shlq $5, %rax
; X64-NEXT: subq %rdi, %rax
@@ -292,7 +292,7 @@ define i64 @test5(i64 %a) {
; X64-NEXT: retq
;
; X86-LABEL: test5:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %esi
; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: .cfi_offset %esi, -8
@@ -316,14 +316,14 @@ entry:
define i64 @test6(i64 %a) {
; X64-LABEL: test6:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: shlq $5, %rax
; X64-NEXT: leaq (%rax,%rdi), %rax
; X64-NEXT: retq
;
; X86-LABEL: test6:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl %eax, %ecx
; X86-NEXT: shll $5, %ecx
@@ -339,7 +339,7 @@ entry:
define i64 @test7(i64 %a) {
; X64-LABEL: test7:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: shlq $5, %rax
; X64-NEXT: leaq (%rax,%rdi), %rax
@@ -347,7 +347,7 @@ define i64 @test7(i64 %a) {
; X64-NEXT: retq
;
; X86-LABEL: test7:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %esi
; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: .cfi_offset %esi, -8
@@ -370,13 +370,13 @@ entry:
define i64 @testOverflow(i64 %a) {
; X64-LABEL: testOverflow:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movabsq $9223372036854775807, %rax # imm = 0x7FFFFFFFFFFFFFFF
; X64-NEXT: imulq %rdi, %rax
; X64-NEXT: retq
;
; X86-LABEL: testOverflow:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %esi
; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: .cfi_offset %esi, -8
diff --git a/test/CodeGen/X86/inline-0bh.ll b/test/CodeGen/X86/inline-0bh.ll
index ceef395aa14..b1e7e57e0b2 100644
--- a/test/CodeGen/X86/inline-0bh.ll
+++ b/test/CodeGen/X86/inline-0bh.ll
@@ -4,7 +4,7 @@
; Function Attrs: noinline nounwind
define i32 @PR31007() {
; CHECK-LABEL: PR31007:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: #APP
; CHECK : addb $11, %al
; CHECK: #NO_APP
diff --git a/test/CodeGen/X86/inline-asm-fpstack.ll b/test/CodeGen/X86/inline-asm-fpstack.ll
index 61870d8d417..b07f830f9b1 100644
--- a/test/CodeGen/X86/inline-asm-fpstack.ll
+++ b/test/CodeGen/X86/inline-asm-fpstack.ll
@@ -4,7 +4,7 @@
; There should be no stack manipulations between the inline asm and ret.
define x86_fp80 @test1() {
; CHECK-LABEL: test1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: ## InlineAsm Start
; CHECK-NEXT: fld0
; CHECK-NEXT: ## InlineAsm End
@@ -15,7 +15,7 @@ define x86_fp80 @test1() {
define double @test2() {
; CHECK-LABEL: test2:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: ## InlineAsm Start
; CHECK-NEXT: fld0
; CHECK-NEXT: ## InlineAsm End
@@ -28,7 +28,7 @@ define double @test2() {
; Asm consumes stack, nothing should be popped.
define void @test3(x86_fp80 %X) {
; CHECK-LABEL: test3:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: fldt {{[0-9]+}}(%esp)
; CHECK-NEXT: ## InlineAsm Start
; CHECK-NEXT: frob
@@ -40,7 +40,7 @@ define void @test3(x86_fp80 %X) {
define void @test4(double %X) {
; CHECK-LABEL: test4:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: fldl {{[0-9]+}}(%esp)
; CHECK-NEXT: ## InlineAsm Start
; CHECK-NEXT: frob
@@ -54,7 +54,7 @@ define void @test4(double %X) {
; The fadd can be done in xmm or x87 regs - we don't test that.
define void @test5(double %X) {
; CHECK-LABEL: test5:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: fldl {{[0-9]+}}(%esp)
; CHECK-NEXT: fadds LCPI4_0
; CHECK-NEXT: ## InlineAsm Start
@@ -68,7 +68,7 @@ define void @test5(double %X) {
define void @test6(double %A, double %B, double %C, double %D, double %E) nounwind {
; CHECK-LABEL: test6:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: fldl {{[0-9]+}}(%esp)
; CHECK-NEXT: fldl {{[0-9]+}}(%esp)
; CHECK-NEXT: fldl {{[0-9]+}}(%esp)
@@ -113,7 +113,7 @@ entry:
; inline asm.
define void @testPR4185() {
; CHECK-LABEL: testPR4185:
-; CHECK: ## BB#0: ## %return
+; CHECK: ## %bb.0: ## %return
; CHECK-NEXT: flds LCPI6_0
; CHECK-NEXT: fld %st(0)
; CHECK-NEXT: ## InlineAsm Start
@@ -135,7 +135,7 @@ return:
; A valid alternative would be to remat the constant pool load before each inline asm.
define void @testPR4185b() {
; CHECK-LABEL: testPR4185b:
-; CHECK: ## BB#0: ## %return
+; CHECK: ## %bb.0: ## %return
; CHECK-NEXT: flds LCPI7_0
; CHECK-NEXT: ## InlineAsm Start
; CHECK-NEXT: fistl %st(0)
@@ -154,7 +154,7 @@ return:
; The return value from ceil must be duped before being consumed by asm.
define void @testPR4459(x86_fp80 %a) {
; CHECK-LABEL: testPR4459:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: subl $28, %esp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: fldt {{[0-9]+}}(%esp)
@@ -182,7 +182,7 @@ declare x86_fp80 @ceil(x86_fp80)
; Set up call to test.
define void @testPR4484(x86_fp80 %a) {
; CHECK-LABEL: testPR4484:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: subl $28, %esp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: fldt {{[0-9]+}}(%esp)
@@ -206,7 +206,7 @@ entry:
; PR4485
define void @testPR4485(x86_fp80* %a) {
; CHECK-LABEL: testPR4485:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: fldt (%eax)
; CHECK-NEXT: flds LCPI10_0
@@ -247,7 +247,7 @@ entry:
; }
define void @fist1(x86_fp80 %x, i32* %p) nounwind ssp {
; CHECK-LABEL: fist1:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: fldt {{[0-9]+}}(%esp)
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: ## InlineAsm Start
@@ -271,7 +271,7 @@ entry:
; }
define x86_fp80 @fist2(x86_fp80 %x, i32* %p) nounwind ssp {
; CHECK-LABEL: fist2:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: fldt {{[0-9]+}}(%esp)
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: ## InlineAsm Start
@@ -291,7 +291,7 @@ entry:
; }
define void @fucomp1(x86_fp80 %x, x86_fp80 %y) nounwind ssp {
; CHECK-LABEL: fucomp1:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: fldt {{[0-9]+}}(%esp)
; CHECK-NEXT: fldt {{[0-9]+}}(%esp)
; CHECK-NEXT: fxch %st(1)
@@ -318,7 +318,7 @@ entry:
;
define void @fucomp2(x86_fp80 %x, x86_fp80 %y) nounwind ssp {
; CHECK-LABEL: fucomp2:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: fldt {{[0-9]+}}(%esp)
; CHECK-NEXT: fldt {{[0-9]+}}(%esp)
; CHECK-NEXT: fxch %st(1)
@@ -335,7 +335,7 @@ entry:
define void @fucomp3(x86_fp80 %x, x86_fp80 %y) nounwind ssp {
; CHECK-LABEL: fucomp3:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: fldt {{[0-9]+}}(%esp)
; CHECK-NEXT: fldt {{[0-9]+}}(%esp)
; CHECK-NEXT: fxch %st(1)
@@ -353,7 +353,7 @@ entry:
%complex = type { float, float }
define float @sincos1(float %x) nounwind ssp {
; CHECK-LABEL: sincos1:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: flds {{[0-9]+}}(%esp)
; CHECK-NEXT: ## InlineAsm Start
; CHECK-NEXT: sincos
@@ -370,7 +370,7 @@ entry:
; Same thing, swapped output operands.
define float @sincos2(float %x) nounwind ssp {
; CHECK-LABEL: sincos2:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: flds {{[0-9]+}}(%esp)
; CHECK-NEXT: ## InlineAsm Start
; CHECK-NEXT: sincos
@@ -391,7 +391,7 @@ entry:
; Discard both results.
define float @sincos3(float %x) nounwind ssp {
; CHECK-LABEL: sincos3:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: flds {{[0-9]+}}(%esp)
; CHECK-NEXT: fld %st(0)
; CHECK-NEXT: ## InlineAsm Start
@@ -416,7 +416,7 @@ entry:
; Pass the same value in two fixed stack slots.
define i32 @PR10602() nounwind ssp {
; CHECK-LABEL: PR10602:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: flds LCPI19_0
; CHECK-NEXT: fld %st(0)
; CHECK-NEXT: fxch %st(1)
@@ -450,13 +450,13 @@ entry:
; Function Attrs: ssp
define void @test_live_st(i32 %a1) {
; CHECK-LABEL: test_live_st:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: subl $12, %esp
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: fldt (%eax)
; CHECK-NEXT: cmpl $1, {{[0-9]+}}(%esp)
; CHECK-NEXT: jne LBB20_2
-; CHECK-NEXT: ## BB#1: ## %sw.bb4.i
+; CHECK-NEXT: ## %bb.1: ## %sw.bb4.i
; CHECK-NEXT: ## InlineAsm Start
; CHECK-NEXT: frndint
; CHECK-NEXT: ## InlineAsm End
@@ -502,7 +502,7 @@ return:
; Check that x87 stackifier is correctly rewriting FP registers to ST registers.
define double @test_operand_rewrite() {
; CHECK-LABEL: test_operand_rewrite:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: ## InlineAsm Start
; CHECK-NEXT: foo %st(0), %st(1)
; CHECK-NEXT: ## InlineAsm End
diff --git a/test/CodeGen/X86/inline-sse.ll b/test/CodeGen/X86/inline-sse.ll
index 08819b85829..ba6d4e9015f 100644
--- a/test/CodeGen/X86/inline-sse.ll
+++ b/test/CodeGen/X86/inline-sse.ll
@@ -7,7 +7,7 @@
define void @nop() nounwind {
; X32-LABEL: nop:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebp
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: andl $-16, %esp
@@ -20,7 +20,7 @@ define void @nop() nounwind {
; X32-NEXT: retl
;
; X64-LABEL: nop:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: #APP
; X64-NEXT: #NO_APP
; X64-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
diff --git a/test/CodeGen/X86/insert-into-constant-vector.ll b/test/CodeGen/X86/insert-into-constant-vector.ll
index c804b1bde09..03ce34dace7 100644
--- a/test/CodeGen/X86/insert-into-constant-vector.ll
+++ b/test/CodeGen/X86/insert-into-constant-vector.ll
@@ -10,7 +10,7 @@
define <16 x i8> @elt0_v16i8(i8 %x) {
; X32SSE2-LABEL: elt0_v16i8:
-; X32SSE2: # BB#0:
+; X32SSE2: # %bb.0:
; X32SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32SSE2-NEXT: movaps {{.*#+}} xmm0 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; X32SSE2-NEXT: andnps %xmm1, %xmm0
@@ -18,7 +18,7 @@ define <16 x i8> @elt0_v16i8(i8 %x) {
; X32SSE2-NEXT: retl
;
; X64SSE2-LABEL: elt0_v16i8:
-; X64SSE2: # BB#0:
+; X64SSE2: # %bb.0:
; X64SSE2-NEXT: movd %edi, %xmm1
; X64SSE2-NEXT: movdqa {{.*#+}} xmm0 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; X64SSE2-NEXT: pandn %xmm1, %xmm0
@@ -26,25 +26,25 @@ define <16 x i8> @elt0_v16i8(i8 %x) {
; X64SSE2-NEXT: retq
;
; X32SSE4-LABEL: elt0_v16i8:
-; X32SSE4: # BB#0:
+; X32SSE4: # %bb.0:
; X32SSE4-NEXT: movdqa {{.*#+}} xmm0 = <u,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15>
; X32SSE4-NEXT: pinsrb $0, {{[0-9]+}}(%esp), %xmm0
; X32SSE4-NEXT: retl
;
; X64SSE4-LABEL: elt0_v16i8:
-; X64SSE4: # BB#0:
+; X64SSE4: # %bb.0:
; X64SSE4-NEXT: movdqa {{.*#+}} xmm0 = <u,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15>
; X64SSE4-NEXT: pinsrb $0, %edi, %xmm0
; X64SSE4-NEXT: retq
;
; X32AVX-LABEL: elt0_v16i8:
-; X32AVX: # BB#0:
+; X32AVX: # %bb.0:
; X32AVX-NEXT: vmovdqa {{.*#+}} xmm0 = <u,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15>
; X32AVX-NEXT: vpinsrb $0, {{[0-9]+}}(%esp), %xmm0, %xmm0
; X32AVX-NEXT: retl
;
; X64AVX-LABEL: elt0_v16i8:
-; X64AVX: # BB#0:
+; X64AVX: # %bb.0:
; X64AVX-NEXT: vmovdqa {{.*#+}} xmm0 = <u,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15>
; X64AVX-NEXT: vpinsrb $0, %edi, %xmm0, %xmm0
; X64AVX-NEXT: retq
@@ -54,25 +54,25 @@ define <16 x i8> @elt0_v16i8(i8 %x) {
define <8 x i16> @elt5_v8i16(i16 %x) {
; X32SSE-LABEL: elt5_v8i16:
-; X32SSE: # BB#0:
+; X32SSE: # %bb.0:
; X32SSE-NEXT: movdqa {{.*#+}} xmm0 = <42,1,2,3,4,u,6,7>
; X32SSE-NEXT: pinsrw $5, {{[0-9]+}}(%esp), %xmm0
; X32SSE-NEXT: retl
;
; X64SSE-LABEL: elt5_v8i16:
-; X64SSE: # BB#0:
+; X64SSE: # %bb.0:
; X64SSE-NEXT: movdqa {{.*#+}} xmm0 = <42,1,2,3,4,u,6,7>
; X64SSE-NEXT: pinsrw $5, %edi, %xmm0
; X64SSE-NEXT: retq
;
; X32AVX-LABEL: elt5_v8i16:
-; X32AVX: # BB#0:
+; X32AVX: # %bb.0:
; X32AVX-NEXT: vmovdqa {{.*#+}} xmm0 = <42,1,2,3,4,u,6,7>
; X32AVX-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm0, %xmm0
; X32AVX-NEXT: retl
;
; X64AVX-LABEL: elt5_v8i16:
-; X64AVX: # BB#0:
+; X64AVX: # %bb.0:
; X64AVX-NEXT: vmovdqa {{.*#+}} xmm0 = <42,1,2,3,4,u,6,7>
; X64AVX-NEXT: vpinsrw $5, %edi, %xmm0, %xmm0
; X64AVX-NEXT: retq
@@ -82,7 +82,7 @@ define <8 x i16> @elt5_v8i16(i16 %x) {
define <4 x i32> @elt3_v4i32(i32 %x) {
; X32SSE2-LABEL: elt3_v4i32:
-; X32SSE2: # BB#0:
+; X32SSE2: # %bb.0:
; X32SSE2-NEXT: movaps {{.*#+}} xmm0 = <42,1,2,u>
; X32SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,0]
@@ -90,7 +90,7 @@ define <4 x i32> @elt3_v4i32(i32 %x) {
; X32SSE2-NEXT: retl
;
; X64SSE2-LABEL: elt3_v4i32:
-; X64SSE2: # BB#0:
+; X64SSE2: # %bb.0:
; X64SSE2-NEXT: movd %edi, %xmm1
; X64SSE2-NEXT: movaps {{.*#+}} xmm0 = <42,1,2,u>
; X64SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,0]
@@ -98,25 +98,25 @@ define <4 x i32> @elt3_v4i32(i32 %x) {
; X64SSE2-NEXT: retq
;
; X32SSE4-LABEL: elt3_v4i32:
-; X32SSE4: # BB#0:
+; X32SSE4: # %bb.0:
; X32SSE4-NEXT: movdqa {{.*#+}} xmm0 = <42,1,2,u>
; X32SSE4-NEXT: pinsrd $3, {{[0-9]+}}(%esp), %xmm0
; X32SSE4-NEXT: retl
;
; X64SSE4-LABEL: elt3_v4i32:
-; X64SSE4: # BB#0:
+; X64SSE4: # %bb.0:
; X64SSE4-NEXT: movdqa {{.*#+}} xmm0 = <42,1,2,u>
; X64SSE4-NEXT: pinsrd $3, %edi, %xmm0
; X64SSE4-NEXT: retq
;
; X32AVX-LABEL: elt3_v4i32:
-; X32AVX: # BB#0:
+; X32AVX: # %bb.0:
; X32AVX-NEXT: vmovdqa {{.*#+}} xmm0 = <42,1,2,u>
; X32AVX-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0
; X32AVX-NEXT: retl
;
; X64AVX-LABEL: elt3_v4i32:
-; X64AVX: # BB#0:
+; X64AVX: # %bb.0:
; X64AVX-NEXT: vmovdqa {{.*#+}} xmm0 = <42,1,2,u>
; X64AVX-NEXT: vpinsrd $3, %edi, %xmm0, %xmm0
; X64AVX-NEXT: retq
@@ -126,7 +126,7 @@ define <4 x i32> @elt3_v4i32(i32 %x) {
define <2 x i64> @elt0_v2i64(i64 %x) {
; X32SSE-LABEL: elt0_v2i64:
-; X32SSE: # BB#0:
+; X32SSE: # %bb.0:
; X32SSE-NEXT: movl $1, %eax
; X32SSE-NEXT: movd %eax, %xmm1
; X32SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
@@ -134,20 +134,20 @@ define <2 x i64> @elt0_v2i64(i64 %x) {
; X32SSE-NEXT: retl
;
; X64SSE2-LABEL: elt0_v2i64:
-; X64SSE2: # BB#0:
+; X64SSE2: # %bb.0:
; X64SSE2-NEXT: movq %rdi, %xmm1
; X64SSE2-NEXT: movapd {{.*#+}} xmm0 = <u,1>
; X64SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; X64SSE2-NEXT: retq
;
; X64SSE4-LABEL: elt0_v2i64:
-; X64SSE4: # BB#0:
+; X64SSE4: # %bb.0:
; X64SSE4-NEXT: movdqa {{.*#+}} xmm0 = <u,1>
; X64SSE4-NEXT: pinsrq $0, %rdi, %xmm0
; X64SSE4-NEXT: retq
;
; X32AVX-LABEL: elt0_v2i64:
-; X32AVX: # BB#0:
+; X32AVX: # %bb.0:
; X32AVX-NEXT: movl $1, %eax
; X32AVX-NEXT: vmovd %eax, %xmm0
; X32AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
@@ -155,7 +155,7 @@ define <2 x i64> @elt0_v2i64(i64 %x) {
; X32AVX-NEXT: retl
;
; X64AVX-LABEL: elt0_v2i64:
-; X64AVX: # BB#0:
+; X64AVX: # %bb.0:
; X64AVX-NEXT: vmovdqa {{.*#+}} xmm0 = <u,1>
; X64AVX-NEXT: vpinsrq $0, %rdi, %xmm0, %xmm0
; X64AVX-NEXT: retq
@@ -165,7 +165,7 @@ define <2 x i64> @elt0_v2i64(i64 %x) {
define <4 x float> @elt1_v4f32(float %x) {
; X32SSE2-LABEL: elt1_v4f32:
-; X32SSE2: # BB#0:
+; X32SSE2: # %bb.0:
; X32SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32SSE2-NEXT: movaps {{.*#+}} xmm1 = <42,u,2,3>
; X32SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0]
@@ -173,33 +173,33 @@ define <4 x float> @elt1_v4f32(float %x) {
; X32SSE2-NEXT: retl
;
; X64SSE2-LABEL: elt1_v4f32:
-; X64SSE2: # BB#0:
+; X64SSE2: # %bb.0:
; X64SSE2-NEXT: movaps {{.*#+}} xmm1 = <42,u,2,3>
; X64SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0]
; X64SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
; X64SSE2-NEXT: retq
;
; X32SSE4-LABEL: elt1_v4f32:
-; X32SSE4: # BB#0:
+; X32SSE4: # %bb.0:
; X32SSE4-NEXT: movaps {{.*#+}} xmm0 = <42,u,2,3>
; X32SSE4-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
; X32SSE4-NEXT: retl
;
; X64SSE4-LABEL: elt1_v4f32:
-; X64SSE4: # BB#0:
+; X64SSE4: # %bb.0:
; X64SSE4-NEXT: movaps {{.*#+}} xmm1 = <42,u,2,3>
; X64SSE4-NEXT: insertps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[2,3]
; X64SSE4-NEXT: movaps %xmm1, %xmm0
; X64SSE4-NEXT: retq
;
; X32AVX-LABEL: elt1_v4f32:
-; X32AVX: # BB#0:
+; X32AVX: # %bb.0:
; X32AVX-NEXT: vmovaps {{.*#+}} xmm0 = <42,u,2,3>
; X32AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
; X32AVX-NEXT: retl
;
; X64AVX-LABEL: elt1_v4f32:
-; X64AVX: # BB#0:
+; X64AVX: # %bb.0:
; X64AVX-NEXT: vmovaps {{.*#+}} xmm1 = <42,u,2,3>
; X64AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
; X64AVX-NEXT: retq
@@ -209,26 +209,26 @@ define <4 x float> @elt1_v4f32(float %x) {
define <2 x double> @elt1_v2f64(double %x) {
; X32SSE-LABEL: elt1_v2f64:
-; X32SSE: # BB#0:
+; X32SSE: # %bb.0:
; X32SSE-NEXT: movapd {{.*#+}} xmm0 = <42,u>
; X32SSE-NEXT: movhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; X32SSE-NEXT: retl
;
; X64SSE-LABEL: elt1_v2f64:
-; X64SSE: # BB#0:
+; X64SSE: # %bb.0:
; X64SSE-NEXT: movaps {{.*#+}} xmm1 = <42,u>
; X64SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; X64SSE-NEXT: movaps %xmm1, %xmm0
; X64SSE-NEXT: retq
;
; X32AVX-LABEL: elt1_v2f64:
-; X32AVX: # BB#0:
+; X32AVX: # %bb.0:
; X32AVX-NEXT: vmovapd {{.*#+}} xmm0 = <42,u>
; X32AVX-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; X32AVX-NEXT: retl
;
; X64AVX-LABEL: elt1_v2f64:
-; X64AVX: # BB#0:
+; X64AVX: # %bb.0:
; X64AVX-NEXT: vmovaps {{.*#+}} xmm1 = <42,u>
; X64AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; X64AVX-NEXT: retq
@@ -238,7 +238,7 @@ define <2 x double> @elt1_v2f64(double %x) {
define <8 x i32> @elt7_v8i32(i32 %x) {
; X32SSE2-LABEL: elt7_v8i32:
-; X32SSE2: # BB#0:
+; X32SSE2: # %bb.0:
; X32SSE2-NEXT: movaps {{.*#+}} xmm1 = <4,5,6,u>
; X32SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[2,0]
@@ -247,7 +247,7 @@ define <8 x i32> @elt7_v8i32(i32 %x) {
; X32SSE2-NEXT: retl
;
; X64SSE2-LABEL: elt7_v8i32:
-; X64SSE2: # BB#0:
+; X64SSE2: # %bb.0:
; X64SSE2-NEXT: movd %edi, %xmm0
; X64SSE2-NEXT: movaps {{.*#+}} xmm1 = <4,5,6,u>
; X64SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[2,0]
@@ -256,21 +256,21 @@ define <8 x i32> @elt7_v8i32(i32 %x) {
; X64SSE2-NEXT: retq
;
; X32SSE4-LABEL: elt7_v8i32:
-; X32SSE4: # BB#0:
+; X32SSE4: # %bb.0:
; X32SSE4-NEXT: movdqa {{.*#+}} xmm1 = <4,5,6,u>
; X32SSE4-NEXT: pinsrd $3, {{[0-9]+}}(%esp), %xmm1
; X32SSE4-NEXT: movaps {{.*#+}} xmm0 = [42,1,2,3]
; X32SSE4-NEXT: retl
;
; X64SSE4-LABEL: elt7_v8i32:
-; X64SSE4: # BB#0:
+; X64SSE4: # %bb.0:
; X64SSE4-NEXT: movdqa {{.*#+}} xmm1 = <4,5,6,u>
; X64SSE4-NEXT: pinsrd $3, %edi, %xmm1
; X64SSE4-NEXT: movaps {{.*#+}} xmm0 = [42,1,2,3]
; X64SSE4-NEXT: retq
;
; X32AVX-LABEL: elt7_v8i32:
-; X32AVX: # BB#0:
+; X32AVX: # %bb.0:
; X32AVX-NEXT: vmovdqa {{.*#+}} ymm0 = <42,1,2,3,4,5,6,u>
; X32AVX-NEXT: vextracti128 $1, %ymm0, %xmm1
; X32AVX-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm1, %xmm1
@@ -278,7 +278,7 @@ define <8 x i32> @elt7_v8i32(i32 %x) {
; X32AVX-NEXT: retl
;
; X64AVX-LABEL: elt7_v8i32:
-; X64AVX: # BB#0:
+; X64AVX: # %bb.0:
; X64AVX-NEXT: vmovdqa {{.*#+}} ymm0 = <42,1,2,3,4,5,6,u>
; X64AVX-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64AVX-NEXT: vpinsrd $3, %edi, %xmm1, %xmm1
@@ -290,7 +290,7 @@ define <8 x i32> @elt7_v8i32(i32 %x) {
define <8 x float> @elt6_v8f32(float %x) {
; X32SSE2-LABEL: elt6_v8f32:
-; X32SSE2: # BB#0:
+; X32SSE2: # %bb.0:
; X32SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32SSE2-NEXT: movaps {{.*#+}} xmm1 = <4,5,u,7>
; X32SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[3,0]
@@ -299,7 +299,7 @@ define <8 x float> @elt6_v8f32(float %x) {
; X32SSE2-NEXT: retl
;
; X64SSE2-LABEL: elt6_v8f32:
-; X64SSE2: # BB#0:
+; X64SSE2: # %bb.0:
; X64SSE2-NEXT: movaps {{.*#+}} xmm1 = <4,5,u,7>
; X64SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[3,0]
; X64SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[0,2]
@@ -307,21 +307,21 @@ define <8 x float> @elt6_v8f32(float %x) {
; X64SSE2-NEXT: retq
;
; X32SSE4-LABEL: elt6_v8f32:
-; X32SSE4: # BB#0:
+; X32SSE4: # %bb.0:
; X32SSE4-NEXT: movaps {{.*#+}} xmm1 = <4,5,u,7>
; X32SSE4-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1],mem[0],xmm1[3]
; X32SSE4-NEXT: movaps {{.*#+}} xmm0 = [4.200000e+01,1.000000e+00,2.000000e+00,3.000000e+00]
; X32SSE4-NEXT: retl
;
; X64SSE4-LABEL: elt6_v8f32:
-; X64SSE4: # BB#0:
+; X64SSE4: # %bb.0:
; X64SSE4-NEXT: movaps {{.*#+}} xmm1 = <4,5,u,7>
; X64SSE4-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1],xmm0[0],xmm1[3]
; X64SSE4-NEXT: movaps {{.*#+}} xmm0 = [4.200000e+01,1.000000e+00,2.000000e+00,3.000000e+00]
; X64SSE4-NEXT: retq
;
; X32AVX-LABEL: elt6_v8f32:
-; X32AVX: # BB#0:
+; X32AVX: # %bb.0:
; X32AVX-NEXT: vmovaps {{.*#+}} ymm0 = <42,1,2,3,4,5,u,7>
; X32AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
; X32AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],mem[0],xmm1[3]
@@ -329,7 +329,7 @@ define <8 x float> @elt6_v8f32(float %x) {
; X32AVX-NEXT: retl
;
; X64AVX-LABEL: elt6_v8f32:
-; X64AVX: # BB#0:
+; X64AVX: # %bb.0:
; X64AVX-NEXT: vmovaps {{.*#+}} ymm1 = <42,1,2,3,4,5,u,7>
; X64AVX-NEXT: vextractf128 $1, %ymm1, %xmm2
; X64AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[0,1],xmm0[0],xmm2[3]
@@ -341,7 +341,7 @@ define <8 x float> @elt6_v8f32(float %x) {
define <8 x i64> @elt5_v8i64(i64 %x) {
; X32SSE-LABEL: elt5_v8i64:
-; X32SSE: # BB#0:
+; X32SSE: # %bb.0:
; X32SSE-NEXT: movl $4, %eax
; X32SSE-NEXT: movd %eax, %xmm2
; X32SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
@@ -352,7 +352,7 @@ define <8 x i64> @elt5_v8i64(i64 %x) {
; X32SSE-NEXT: retl
;
; X64SSE2-LABEL: elt5_v8i64:
-; X64SSE2: # BB#0:
+; X64SSE2: # %bb.0:
; X64SSE2-NEXT: movq %rdi, %xmm0
; X64SSE2-NEXT: movdqa {{.*#+}} xmm2 = <4,u>
; X64SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0]
@@ -362,7 +362,7 @@ define <8 x i64> @elt5_v8i64(i64 %x) {
; X64SSE2-NEXT: retq
;
; X64SSE4-LABEL: elt5_v8i64:
-; X64SSE4: # BB#0:
+; X64SSE4: # %bb.0:
; X64SSE4-NEXT: movdqa {{.*#+}} xmm2 = <4,u>
; X64SSE4-NEXT: pinsrq $1, %rdi, %xmm2
; X64SSE4-NEXT: movaps {{.*#+}} xmm0 = [42,1]
@@ -371,7 +371,7 @@ define <8 x i64> @elt5_v8i64(i64 %x) {
; X64SSE4-NEXT: retq
;
; X32AVX2-LABEL: elt5_v8i64:
-; X32AVX2: # BB#0:
+; X32AVX2: # %bb.0:
; X32AVX2-NEXT: movl $4, %eax
; X32AVX2-NEXT: vmovd %eax, %xmm0
; X32AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
@@ -381,7 +381,7 @@ define <8 x i64> @elt5_v8i64(i64 %x) {
; X32AVX2-NEXT: retl
;
; X64AVX2-LABEL: elt5_v8i64:
-; X64AVX2: # BB#0:
+; X64AVX2: # %bb.0:
; X64AVX2-NEXT: vmovdqa {{.*#+}} ymm0 = <4,u,6,7>
; X64AVX2-NEXT: vpinsrq $1, %rdi, %xmm0, %xmm1
; X64AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7]
@@ -389,7 +389,7 @@ define <8 x i64> @elt5_v8i64(i64 %x) {
; X64AVX2-NEXT: retq
;
; X32AVX512F-LABEL: elt5_v8i64:
-; X32AVX512F: # BB#0:
+; X32AVX512F: # %bb.0:
; X32AVX512F-NEXT: vmovdqa {{.*#+}} ymm0 = [42,0,1,0,2,0,3,0]
; X32AVX512F-NEXT: movl $4, %eax
; X32AVX512F-NEXT: vmovd %eax, %xmm1
@@ -400,7 +400,7 @@ define <8 x i64> @elt5_v8i64(i64 %x) {
; X32AVX512F-NEXT: retl
;
; X64AVX512F-LABEL: elt5_v8i64:
-; X64AVX512F: # BB#0:
+; X64AVX512F: # %bb.0:
; X64AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm0 = <42,1,2,3,4,u,6,7>
; X64AVX512F-NEXT: vextracti32x4 $2, %zmm0, %xmm1
; X64AVX512F-NEXT: vpinsrq $1, %rdi, %xmm1, %xmm1
@@ -412,7 +412,7 @@ define <8 x i64> @elt5_v8i64(i64 %x) {
define <8 x double> @elt1_v8f64(double %x) {
; X32SSE-LABEL: elt1_v8f64:
-; X32SSE: # BB#0:
+; X32SSE: # %bb.0:
; X32SSE-NEXT: movapd {{.*#+}} xmm0 = <42,u>
; X32SSE-NEXT: movhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; X32SSE-NEXT: movaps {{.*#+}} xmm1 = [2.000000e+00,3.000000e+00]
@@ -421,7 +421,7 @@ define <8 x double> @elt1_v8f64(double %x) {
; X32SSE-NEXT: retl
;
; X64SSE-LABEL: elt1_v8f64:
-; X64SSE: # BB#0:
+; X64SSE: # %bb.0:
; X64SSE-NEXT: movaps {{.*#+}} xmm4 = <42,u>
; X64SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm0[0]
; X64SSE-NEXT: movaps {{.*#+}} xmm1 = [2.000000e+00,3.000000e+00]
@@ -431,7 +431,7 @@ define <8 x double> @elt1_v8f64(double %x) {
; X64SSE-NEXT: retq
;
; X32AVX2-LABEL: elt1_v8f64:
-; X32AVX2: # BB#0:
+; X32AVX2: # %bb.0:
; X32AVX2-NEXT: vmovapd {{.*#+}} ymm0 = <42,u,2,3>
; X32AVX2-NEXT: vmovhpd {{.*#+}} xmm1 = xmm0[0],mem[0]
; X32AVX2-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
@@ -439,7 +439,7 @@ define <8 x double> @elt1_v8f64(double %x) {
; X32AVX2-NEXT: retl
;
; X64AVX2-LABEL: elt1_v8f64:
-; X64AVX2: # BB#0:
+; X64AVX2: # %bb.0:
; X64AVX2-NEXT: vmovapd {{.*#+}} ymm1 = <42,u,2,3>
; X64AVX2-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; X64AVX2-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
@@ -447,14 +447,14 @@ define <8 x double> @elt1_v8f64(double %x) {
; X64AVX2-NEXT: retq
;
; X32AVX512F-LABEL: elt1_v8f64:
-; X32AVX512F: # BB#0:
+; X32AVX512F: # %bb.0:
; X32AVX512F-NEXT: vmovapd {{.*#+}} zmm0 = <42,u,2,3,4,5,6,7>
; X32AVX512F-NEXT: vmovhpd {{.*#+}} xmm1 = xmm0[0],mem[0]
; X32AVX512F-NEXT: vinsertf32x4 $0, %xmm1, %zmm0, %zmm0
; X32AVX512F-NEXT: retl
;
; X64AVX512F-LABEL: elt1_v8f64:
-; X64AVX512F: # BB#0:
+; X64AVX512F: # %bb.0:
; X64AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = <42,u,2,3,4,5,6,7>
; X64AVX512F-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; X64AVX512F-NEXT: vinsertf32x4 $0, %xmm0, %zmm1, %zmm0
diff --git a/test/CodeGen/X86/insertelement-duplicates.ll b/test/CodeGen/X86/insertelement-duplicates.ll
index 8356bd462db..2f32c5a2e6b 100644
--- a/test/CodeGen/X86/insertelement-duplicates.ll
+++ b/test/CodeGen/X86/insertelement-duplicates.ll
@@ -6,7 +6,7 @@
define void @PR15298(<4 x float>* nocapture %source, <8 x float>* nocapture %dest) nounwind noinline {
; SSE-32-LABEL: PR15298:
-; SSE-32: # BB#0: # %L.entry
+; SSE-32: # %bb.0: # %L.entry
; SSE-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE-32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; SSE-32-NEXT: movaps 304(%ecx), %xmm0
@@ -18,7 +18,7 @@ define void @PR15298(<4 x float>* nocapture %source, <8 x float>* nocapture %des
; SSE-32-NEXT: retl
;
; SSE-64-LABEL: PR15298:
-; SSE-64: # BB#0: # %L.entry
+; SSE-64: # %bb.0: # %L.entry
; SSE-64-NEXT: movaps 304(%rdi), %xmm0
; SSE-64-NEXT: xorps %xmm1, %xmm1
; SSE-64-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,1]
@@ -28,7 +28,7 @@ define void @PR15298(<4 x float>* nocapture %source, <8 x float>* nocapture %des
; SSE-64-NEXT: retq
;
; AVX-32-LABEL: PR15298:
-; AVX-32: # BB#0: # %L.entry
+; AVX-32: # %bb.0: # %L.entry
; AVX-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; AVX-32-NEXT: vbroadcastss 304(%ecx), %xmm0
@@ -39,7 +39,7 @@ define void @PR15298(<4 x float>* nocapture %source, <8 x float>* nocapture %des
; AVX-32-NEXT: retl
;
; AVX-64-LABEL: PR15298:
-; AVX-64: # BB#0: # %L.entry
+; AVX-64: # %bb.0: # %L.entry
; AVX-64-NEXT: vbroadcastss 304(%rdi), %xmm0
; AVX-64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-64-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3,4,5,6,7]
diff --git a/test/CodeGen/X86/insertelement-ones.ll b/test/CodeGen/X86/insertelement-ones.ll
index d63459d0257..ceb3217b7cf 100644
--- a/test/CodeGen/X86/insertelement-ones.ll
+++ b/test/CodeGen/X86/insertelement-ones.ll
@@ -10,40 +10,40 @@
define <2 x i64> @insert_v2i64_x1(<2 x i64> %a) {
; SSE2-LABEL: insert_v2i64_x1:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movlpd {{.*#+}} xmm0 = mem[0],xmm0[1]
; SSE2-NEXT: retq
;
; SSE3-LABEL: insert_v2i64_x1:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movlpd {{.*#+}} xmm0 = mem[0],xmm0[1]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: insert_v2i64_x1:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movlpd {{.*#+}} xmm0 = mem[0],xmm0[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_v2i64_x1:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pcmpeqd %xmm1, %xmm1
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: insert_v2i64_x1:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: insert_v2i64_x1:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
; AVX2-NEXT: retq
;
; AVX512-LABEL: insert_v2i64_x1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
; AVX512-NEXT: retq
@@ -53,41 +53,41 @@ define <2 x i64> @insert_v2i64_x1(<2 x i64> %a) {
define <4 x i64> @insert_v4i64_01x3(<4 x i64> %a) {
; SSE2-LABEL: insert_v4i64_01x3:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movlpd {{.*#+}} xmm1 = mem[0],xmm1[1]
; SSE2-NEXT: retq
;
; SSE3-LABEL: insert_v4i64_01x3:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movlpd {{.*#+}} xmm1 = mem[0],xmm1[1]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: insert_v4i64_01x3:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movlpd {{.*#+}} xmm1 = mem[0],xmm1[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_v4i64_01x3:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pcmpeqd %xmm2, %xmm2
; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: insert_v4i64_01x3:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: insert_v4i64_01x3:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-NEXT: retq
;
; AVX512-LABEL: insert_v4i64_01x3:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX512-NEXT: retq
@@ -97,7 +97,7 @@ define <4 x i64> @insert_v4i64_01x3(<4 x i64> %a) {
define <4 x i32> @insert_v4i32_01x3(<4 x i32> %a) {
; SSE2-LABEL: insert_v4i32_01x3:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movl $-1, %eax
; SSE2-NEXT: movd %eax, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
@@ -105,7 +105,7 @@ define <4 x i32> @insert_v4i32_01x3(<4 x i32> %a) {
; SSE2-NEXT: retq
;
; SSE3-LABEL: insert_v4i32_01x3:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movl $-1, %eax
; SSE3-NEXT: movd %eax, %xmm1
; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
@@ -113,7 +113,7 @@ define <4 x i32> @insert_v4i32_01x3(<4 x i32> %a) {
; SSE3-NEXT: retq
;
; SSSE3-LABEL: insert_v4i32_01x3:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movl $-1, %eax
; SSSE3-NEXT: movd %eax, %xmm1
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
@@ -121,25 +121,25 @@ define <4 x i32> @insert_v4i32_01x3(<4 x i32> %a) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_v4i32_01x3:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pcmpeqd %xmm1, %xmm1
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: insert_v4i32_01x3:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: insert_v4i32_01x3:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
; AVX2-NEXT: retq
;
; AVX512-LABEL: insert_v4i32_01x3:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
; AVX512-NEXT: retq
@@ -149,7 +149,7 @@ define <4 x i32> @insert_v4i32_01x3(<4 x i32> %a) {
define <8 x i32> @insert_v8i32_x12345x7(<8 x i32> %a) {
; SSE2-LABEL: insert_v8i32_x12345x7:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
; SSE2-NEXT: movl $-1, %eax
@@ -159,7 +159,7 @@ define <8 x i32> @insert_v8i32_x12345x7(<8 x i32> %a) {
; SSE2-NEXT: retq
;
; SSE3-LABEL: insert_v8i32_x12345x7:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSE3-NEXT: movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
; SSE3-NEXT: movl $-1, %eax
@@ -169,7 +169,7 @@ define <8 x i32> @insert_v8i32_x12345x7(<8 x i32> %a) {
; SSE3-NEXT: retq
;
; SSSE3-LABEL: insert_v8i32_x12345x7:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSSE3-NEXT: movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
; SSSE3-NEXT: movl $-1, %eax
@@ -179,27 +179,27 @@ define <8 x i32> @insert_v8i32_x12345x7(<8 x i32> %a) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_v8i32_x12345x7:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pcmpeqd %xmm2, %xmm2
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3,4,5,6,7]
; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5],xmm1[6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: insert_v8i32_x12345x7:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5],ymm1[6],ymm0[7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: insert_v8i32_x12345x7:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5],ymm1[6],ymm0[7]
; AVX2-NEXT: retq
;
; AVX512-LABEL: insert_v8i32_x12345x7:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5],ymm1[6],ymm0[7]
; AVX512-NEXT: retq
@@ -210,34 +210,34 @@ define <8 x i32> @insert_v8i32_x12345x7(<8 x i32> %a) {
define <8 x i16> @insert_v8i16_x12345x7(<8 x i16> %a) {
; SSE2-LABEL: insert_v8i16_x12345x7:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movl $65535, %eax # imm = 0xFFFF
; SSE2-NEXT: pinsrw $0, %eax, %xmm0
; SSE2-NEXT: pinsrw $6, %eax, %xmm0
; SSE2-NEXT: retq
;
; SSE3-LABEL: insert_v8i16_x12345x7:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movl $65535, %eax # imm = 0xFFFF
; SSE3-NEXT: pinsrw $0, %eax, %xmm0
; SSE3-NEXT: pinsrw $6, %eax, %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: insert_v8i16_x12345x7:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movl $65535, %eax # imm = 0xFFFF
; SSSE3-NEXT: pinsrw $0, %eax, %xmm0
; SSSE3-NEXT: pinsrw $6, %eax, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_v8i16_x12345x7:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pcmpeqd %xmm1, %xmm1
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5],xmm1[6],xmm0[7]
; SSE41-NEXT: retq
;
; AVX-LABEL: insert_v8i16_x12345x7:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5],xmm1[6],xmm0[7]
; AVX-NEXT: retq
@@ -248,7 +248,7 @@ define <8 x i16> @insert_v8i16_x12345x7(<8 x i16> %a) {
define <16 x i16> @insert_v16i16_x12345x789ABCDEx(<16 x i16> %a) {
; SSE2-LABEL: insert_v16i16_x12345x789ABCDEx:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movl $65535, %eax # imm = 0xFFFF
; SSE2-NEXT: pinsrw $0, %eax, %xmm0
; SSE2-NEXT: pinsrw $6, %eax, %xmm0
@@ -256,7 +256,7 @@ define <16 x i16> @insert_v16i16_x12345x789ABCDEx(<16 x i16> %a) {
; SSE2-NEXT: retq
;
; SSE3-LABEL: insert_v16i16_x12345x789ABCDEx:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movl $65535, %eax # imm = 0xFFFF
; SSE3-NEXT: pinsrw $0, %eax, %xmm0
; SSE3-NEXT: pinsrw $6, %eax, %xmm0
@@ -264,7 +264,7 @@ define <16 x i16> @insert_v16i16_x12345x789ABCDEx(<16 x i16> %a) {
; SSE3-NEXT: retq
;
; SSSE3-LABEL: insert_v16i16_x12345x789ABCDEx:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movl $65535, %eax # imm = 0xFFFF
; SSSE3-NEXT: pinsrw $0, %eax, %xmm0
; SSSE3-NEXT: pinsrw $6, %eax, %xmm0
@@ -272,14 +272,14 @@ define <16 x i16> @insert_v16i16_x12345x789ABCDEx(<16 x i16> %a) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_v16i16_x12345x789ABCDEx:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pcmpeqd %xmm2, %xmm2
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3,4,5],xmm2[6],xmm0[7]
; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6],xmm2[7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: insert_v16i16_x12345x789ABCDEx:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; AVX1-NEXT: vorps {{.*}}(%rip), %ymm0, %ymm0
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
@@ -289,7 +289,7 @@ define <16 x i16> @insert_v16i16_x12345x789ABCDEx(<16 x i16> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: insert_v16i16_x12345x789ABCDEx:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; AVX2-NEXT: vpblendvb %ymm1, %ymm0, %ymm2, %ymm0
@@ -300,7 +300,7 @@ define <16 x i16> @insert_v16i16_x12345x789ABCDEx(<16 x i16> %a) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: insert_v16i16_x12345x789ABCDEx:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm1 = [0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512F-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; AVX512F-NEXT: vpblendvb %ymm1, %ymm0, %ymm2, %ymm0
@@ -311,7 +311,7 @@ define <16 x i16> @insert_v16i16_x12345x789ABCDEx(<16 x i16> %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: insert_v16i16_x12345x789ABCDEx:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; AVX512VL-NEXT: movw $1, %ax
; AVX512VL-NEXT: kmovd %eax, %k1
@@ -331,7 +331,7 @@ define <16 x i16> @insert_v16i16_x12345x789ABCDEx(<16 x i16> %a) {
define <16 x i8> @insert_v16i8_x123456789ABCDEx(<16 x i8> %a) {
; SSE2-LABEL: insert_v16i8_x123456789ABCDEx:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; SSE2-NEXT: pand %xmm1, %xmm0
; SSE2-NEXT: movl $255, %eax
@@ -344,7 +344,7 @@ define <16 x i8> @insert_v16i8_x123456789ABCDEx(<16 x i8> %a) {
; SSE2-NEXT: retq
;
; SSE3-LABEL: insert_v16i8_x123456789ABCDEx:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movdqa {{.*#+}} xmm1 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; SSE3-NEXT: pand %xmm1, %xmm0
; SSE3-NEXT: movl $255, %eax
@@ -357,7 +357,7 @@ define <16 x i8> @insert_v16i8_x123456789ABCDEx(<16 x i8> %a) {
; SSE3-NEXT: retq
;
; SSSE3-LABEL: insert_v16i8_x123456789ABCDEx:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = zero,xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; SSSE3-NEXT: movl $255, %eax
; SSSE3-NEXT: movd %eax, %xmm1
@@ -370,14 +370,14 @@ define <16 x i8> @insert_v16i8_x123456789ABCDEx(<16 x i8> %a) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_v16i8_x123456789ABCDEx:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movl $255, %eax
; SSE41-NEXT: pinsrb $0, %eax, %xmm0
; SSE41-NEXT: pinsrb $15, %eax, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: insert_v16i8_x123456789ABCDEx:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: movl $255, %eax
; AVX-NEXT: vpinsrb $0, %eax, %xmm0, %xmm0
; AVX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
@@ -389,7 +389,7 @@ define <16 x i8> @insert_v16i8_x123456789ABCDEx(<16 x i8> %a) {
define <32 x i8> @insert_v32i8_x123456789ABCDEzGHIJKLMNOPQRSTxx(<32 x i8> %a) {
; SSE2-LABEL: insert_v32i8_x123456789ABCDEzGHIJKLMNOPQRSTxx:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; SSE2-NEXT: pand %xmm2, %xmm0
; SSE2-NEXT: movl $255, %eax
@@ -411,7 +411,7 @@ define <32 x i8> @insert_v32i8_x123456789ABCDEzGHIJKLMNOPQRSTxx(<32 x i8> %a) {
; SSE2-NEXT: retq
;
; SSE3-LABEL: insert_v32i8_x123456789ABCDEzGHIJKLMNOPQRSTxx:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movdqa {{.*#+}} xmm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; SSE3-NEXT: pand %xmm2, %xmm0
; SSE3-NEXT: movl $255, %eax
@@ -433,7 +433,7 @@ define <32 x i8> @insert_v32i8_x123456789ABCDEzGHIJKLMNOPQRSTxx(<32 x i8> %a) {
; SSE3-NEXT: retq
;
; SSSE3-LABEL: insert_v32i8_x123456789ABCDEzGHIJKLMNOPQRSTxx:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = zero,xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; SSSE3-NEXT: movl $255, %eax
; SSSE3-NEXT: movd %eax, %xmm2
@@ -453,7 +453,7 @@ define <32 x i8> @insert_v32i8_x123456789ABCDEzGHIJKLMNOPQRSTxx(<32 x i8> %a) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_v32i8_x123456789ABCDEzGHIJKLMNOPQRSTxx:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movl $255, %eax
; SSE41-NEXT: pinsrb $0, %eax, %xmm0
; SSE41-NEXT: pinsrb $15, %eax, %xmm0
@@ -462,7 +462,7 @@ define <32 x i8> @insert_v32i8_x123456789ABCDEzGHIJKLMNOPQRSTxx(<32 x i8> %a) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: insert_v32i8_x123456789ABCDEzGHIJKLMNOPQRSTxx:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: movl $255, %eax
; AVX1-NEXT: vpinsrb $0, %eax, %xmm0, %xmm1
; AVX1-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
@@ -474,7 +474,7 @@ define <32 x i8> @insert_v32i8_x123456789ABCDEzGHIJKLMNOPQRSTxx(<32 x i8> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: insert_v32i8_x123456789ABCDEzGHIJKLMNOPQRSTxx:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: movl $255, %eax
; AVX2-NEXT: vpinsrb $0, %eax, %xmm0, %xmm1
; AVX2-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
@@ -486,7 +486,7 @@ define <32 x i8> @insert_v32i8_x123456789ABCDEzGHIJKLMNOPQRSTxx(<32 x i8> %a) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: insert_v32i8_x123456789ABCDEzGHIJKLMNOPQRSTxx:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: movl $255, %eax
; AVX512-NEXT: vpinsrb $0, %eax, %xmm0, %xmm1
; AVX512-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
diff --git a/test/CodeGen/X86/insertelement-shuffle.ll b/test/CodeGen/X86/insertelement-shuffle.ll
index fb01e18cd71..705ceba9487 100644
--- a/test/CodeGen/X86/insertelement-shuffle.ll
+++ b/test/CodeGen/X86/insertelement-shuffle.ll
@@ -6,7 +6,7 @@
define <8 x float> @insert_subvector_256(i16 %x0, i16 %x1, <8 x float> %v) nounwind {
; X32_AVX256-LABEL: insert_subvector_256:
-; X32_AVX256: # BB#0:
+; X32_AVX256: # %bb.0:
; X32_AVX256-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32_AVX256-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm1, %xmm1
; X32_AVX256-NEXT: vpbroadcastd %xmm1, %xmm1
@@ -14,7 +14,7 @@ define <8 x float> @insert_subvector_256(i16 %x0, i16 %x1, <8 x float> %v) nounw
; X32_AVX256-NEXT: retl
;
; X64_AVX256-LABEL: insert_subvector_256:
-; X64_AVX256: # BB#0:
+; X64_AVX256: # %bb.0:
; X64_AVX256-NEXT: vmovd %edi, %xmm1
; X64_AVX256-NEXT: vpinsrw $1, %esi, %xmm1, %xmm1
; X64_AVX256-NEXT: vpbroadcastd %xmm1, %xmm1
@@ -22,7 +22,7 @@ define <8 x float> @insert_subvector_256(i16 %x0, i16 %x1, <8 x float> %v) nounw
; X64_AVX256-NEXT: retq
;
; X32_AVX512-LABEL: insert_subvector_256:
-; X32_AVX512: # BB#0:
+; X32_AVX512: # %bb.0:
; X32_AVX512-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32_AVX512-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm1, %xmm1
; X32_AVX512-NEXT: vpbroadcastd %xmm1, %xmm1
@@ -30,7 +30,7 @@ define <8 x float> @insert_subvector_256(i16 %x0, i16 %x1, <8 x float> %v) nounw
; X32_AVX512-NEXT: retl
;
; X64_AVX512-LABEL: insert_subvector_256:
-; X64_AVX512: # BB#0:
+; X64_AVX512: # %bb.0:
; X64_AVX512-NEXT: vmovd %edi, %xmm1
; X64_AVX512-NEXT: vpinsrw $1, %esi, %xmm1, %xmm1
; X64_AVX512-NEXT: vpbroadcastd %xmm1, %xmm1
@@ -45,7 +45,7 @@ define <8 x float> @insert_subvector_256(i16 %x0, i16 %x1, <8 x float> %v) nounw
define <8 x i64> @insert_subvector_512(i32 %x0, i32 %x1, <8 x i64> %v) nounwind {
; X32_AVX256-LABEL: insert_subvector_512:
-; X32_AVX256: # BB#0:
+; X32_AVX256: # %bb.0:
; X32_AVX256-NEXT: pushl %ebp
; X32_AVX256-NEXT: movl %esp, %ebp
; X32_AVX256-NEXT: andl $-8, %esp
@@ -61,7 +61,7 @@ define <8 x i64> @insert_subvector_512(i32 %x0, i32 %x1, <8 x i64> %v) nounwind
; X32_AVX256-NEXT: retl
;
; X64_AVX256-LABEL: insert_subvector_512:
-; X64_AVX256: # BB#0:
+; X64_AVX256: # %bb.0:
; X64_AVX256-NEXT: vmovd %edi, %xmm2
; X64_AVX256-NEXT: vpinsrd $1, %esi, %xmm2, %xmm2
; X64_AVX256-NEXT: vmovq %xmm2, %rax
@@ -71,14 +71,14 @@ define <8 x i64> @insert_subvector_512(i32 %x0, i32 %x1, <8 x i64> %v) nounwind
; X64_AVX256-NEXT: retq
;
; X32_AVX512-LABEL: insert_subvector_512:
-; X32_AVX512: # BB#0:
+; X32_AVX512: # %bb.0:
; X32_AVX512-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; X32_AVX512-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,0,1,0,8,0,3,0,4,0,5,0,6,0,7,0]
; X32_AVX512-NEXT: vpermt2q %zmm1, %zmm2, %zmm0
; X32_AVX512-NEXT: retl
;
; X64_AVX512-LABEL: insert_subvector_512:
-; X64_AVX512: # BB#0:
+; X64_AVX512: # %bb.0:
; X64_AVX512-NEXT: vmovd %edi, %xmm1
; X64_AVX512-NEXT: vpinsrd $1, %esi, %xmm1, %xmm1
; X64_AVX512-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,8,3,4,5,6,7]
@@ -96,7 +96,7 @@ define <8 x i64> @insert_subvector_512(i32 %x0, i32 %x1, <8 x i64> %v) nounwind
define <8 x i64> @insert_subvector_into_undef(i32 %x0, i32 %x1) nounwind {
; X32_AVX256-LABEL: insert_subvector_into_undef:
-; X32_AVX256: # BB#0:
+; X32_AVX256: # %bb.0:
; X32_AVX256-NEXT: pushl %ebp
; X32_AVX256-NEXT: movl %esp, %ebp
; X32_AVX256-NEXT: andl $-8, %esp
@@ -116,7 +116,7 @@ define <8 x i64> @insert_subvector_into_undef(i32 %x0, i32 %x1) nounwind {
; X32_AVX256-NEXT: retl
;
; X64_AVX256-LABEL: insert_subvector_into_undef:
-; X64_AVX256: # BB#0:
+; X64_AVX256: # %bb.0:
; X64_AVX256-NEXT: vmovd %edi, %xmm0
; X64_AVX256-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0
; X64_AVX256-NEXT: vpbroadcastq %xmm0, %ymm0
@@ -124,13 +124,13 @@ define <8 x i64> @insert_subvector_into_undef(i32 %x0, i32 %x1) nounwind {
; X64_AVX256-NEXT: retq
;
; X32_AVX512-LABEL: insert_subvector_into_undef:
-; X32_AVX512: # BB#0:
+; X32_AVX512: # %bb.0:
; X32_AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32_AVX512-NEXT: vbroadcastsd %xmm0, %zmm0
; X32_AVX512-NEXT: retl
;
; X64_AVX512-LABEL: insert_subvector_into_undef:
-; X64_AVX512: # BB#0:
+; X64_AVX512: # %bb.0:
; X64_AVX512-NEXT: vmovd %edi, %xmm0
; X64_AVX512-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0
; X64_AVX512-NEXT: vpbroadcastq %xmm0, %zmm0
diff --git a/test/CodeGen/X86/insertelement-zero.ll b/test/CodeGen/X86/insertelement-zero.ll
index dff7a69dc50..0a26f6cd016 100644
--- a/test/CodeGen/X86/insertelement-zero.ll
+++ b/test/CodeGen/X86/insertelement-zero.ll
@@ -8,31 +8,31 @@
define <2 x double> @insert_v2f64_z1(<2 x double> %a) {
; SSE2-LABEL: insert_v2f64_z1:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: xorpd %xmm1, %xmm1
; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq
;
; SSE3-LABEL: insert_v2f64_z1:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: xorpd %xmm1, %xmm1
; SSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: insert_v2f64_z1:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: xorpd %xmm1, %xmm1
; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_v2f64_z1:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: xorpd %xmm1, %xmm1
; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE41-NEXT: retq
;
; AVX-LABEL: insert_v2f64_z1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; AVX-NEXT: retq
@@ -42,35 +42,35 @@ define <2 x double> @insert_v2f64_z1(<2 x double> %a) {
define <4 x double> @insert_v4f64_0zz3(<4 x double> %a) {
; SSE2-LABEL: insert_v4f64_0zz3:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; SSE2-NEXT: xorpd %xmm2, %xmm2
; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
; SSE2-NEXT: retq
;
; SSE3-LABEL: insert_v4f64_0zz3:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; SSE3-NEXT: xorpd %xmm2, %xmm2
; SSE3-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: insert_v4f64_0zz3:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; SSSE3-NEXT: xorpd %xmm2, %xmm2
; SSSE3-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_v4f64_0zz3:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; SSE41-NEXT: xorpd %xmm2, %xmm2
; SSE41-NEXT: blendpd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
; SSE41-NEXT: retq
;
; AVX-LABEL: insert_v4f64_0zz3:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3]
; AVX-NEXT: retq
@@ -81,37 +81,37 @@ define <4 x double> @insert_v4f64_0zz3(<4 x double> %a) {
define <2 x i64> @insert_v2i64_z1(<2 x i64> %a) {
; SSE2-LABEL: insert_v2i64_z1:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: xorpd %xmm1, %xmm1
; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq
;
; SSE3-LABEL: insert_v2i64_z1:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: xorpd %xmm1, %xmm1
; SSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: insert_v2i64_z1:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: xorpd %xmm1, %xmm1
; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_v2i64_z1:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: insert_v2i64_z1:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: insert_v2i64_z1:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
; AVX2-NEXT: retq
@@ -121,37 +121,37 @@ define <2 x i64> @insert_v2i64_z1(<2 x i64> %a) {
define <4 x i64> @insert_v4i64_01z3(<4 x i64> %a) {
; SSE2-LABEL: insert_v4i64_01z3:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: xorpd %xmm2, %xmm2
; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
; SSE2-NEXT: retq
;
; SSE3-LABEL: insert_v4i64_01z3:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: xorpd %xmm2, %xmm2
; SSE3-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: insert_v4i64_01z3:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: xorpd %xmm2, %xmm2
; SSSE3-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_v4i64_01z3:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm2, %xmm2
; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: insert_v4i64_01z3:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: insert_v4i64_01z3:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-NEXT: retq
@@ -161,34 +161,34 @@ define <4 x i64> @insert_v4i64_01z3(<4 x i64> %a) {
define <4 x float> @insert_v4f32_01z3(<4 x float> %a) {
; SSE2-LABEL: insert_v4f32_01z3:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: xorps %xmm1, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
; SSE2-NEXT: retq
;
; SSE3-LABEL: insert_v4f32_01z3:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: xorps %xmm1, %xmm1
; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: insert_v4f32_01z3:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: xorps %xmm1, %xmm1
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_v4f32_01z3:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: xorps %xmm1, %xmm1
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
; SSE41-NEXT: retq
;
; AVX-LABEL: insert_v4f32_01z3:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
; AVX-NEXT: retq
@@ -198,7 +198,7 @@ define <4 x float> @insert_v4f32_01z3(<4 x float> %a) {
define <8 x float> @insert_v8f32_z12345z7(<8 x float> %a) {
; SSE2-LABEL: insert_v8f32_z12345z7:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: xorps %xmm2, %xmm2
; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm1[3,0]
@@ -206,7 +206,7 @@ define <8 x float> @insert_v8f32_z12345z7(<8 x float> %a) {
; SSE2-NEXT: retq
;
; SSE3-LABEL: insert_v8f32_z12345z7:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: xorps %xmm2, %xmm2
; SSE3-NEXT: movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
; SSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm1[3,0]
@@ -214,7 +214,7 @@ define <8 x float> @insert_v8f32_z12345z7(<8 x float> %a) {
; SSE3-NEXT: retq
;
; SSSE3-LABEL: insert_v8f32_z12345z7:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: xorps %xmm2, %xmm2
; SSSE3-NEXT: movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm1[3,0]
@@ -222,14 +222,14 @@ define <8 x float> @insert_v8f32_z12345z7(<8 x float> %a) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_v8f32_z12345z7:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: xorps %xmm2, %xmm2
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
; SSE41-NEXT: retq
;
; AVX-LABEL: insert_v8f32_z12345z7:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5],ymm1[6],ymm0[7]
; AVX-NEXT: retq
@@ -240,40 +240,40 @@ define <8 x float> @insert_v8f32_z12345z7(<8 x float> %a) {
define <4 x i32> @insert_v4i32_01z3(<4 x i32> %a) {
; SSE2-LABEL: insert_v4i32_01z3:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: xorps %xmm1, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
; SSE2-NEXT: retq
;
; SSE3-LABEL: insert_v4i32_01z3:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: xorps %xmm1, %xmm1
; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: insert_v4i32_01z3:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: xorps %xmm1, %xmm1
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_v4i32_01z3:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: insert_v4i32_01z3:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: insert_v4i32_01z3:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
; AVX2-NEXT: retq
@@ -283,7 +283,7 @@ define <4 x i32> @insert_v4i32_01z3(<4 x i32> %a) {
define <8 x i32> @insert_v8i32_z12345z7(<8 x i32> %a) {
; SSE2-LABEL: insert_v8i32_z12345z7:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: xorps %xmm2, %xmm2
; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
; SSE2-NEXT: xorps %xmm2, %xmm2
@@ -292,7 +292,7 @@ define <8 x i32> @insert_v8i32_z12345z7(<8 x i32> %a) {
; SSE2-NEXT: retq
;
; SSE3-LABEL: insert_v8i32_z12345z7:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: xorps %xmm2, %xmm2
; SSE3-NEXT: movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
; SSE3-NEXT: xorps %xmm2, %xmm2
@@ -301,7 +301,7 @@ define <8 x i32> @insert_v8i32_z12345z7(<8 x i32> %a) {
; SSE3-NEXT: retq
;
; SSSE3-LABEL: insert_v8i32_z12345z7:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: xorps %xmm2, %xmm2
; SSSE3-NEXT: movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
; SSSE3-NEXT: xorps %xmm2, %xmm2
@@ -310,14 +310,14 @@ define <8 x i32> @insert_v8i32_z12345z7(<8 x i32> %a) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_v8i32_z12345z7:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm2, %xmm2
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3,4,5,6,7]
; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5],xmm1[6,7]
; SSE41-NEXT: retq
;
; AVX-LABEL: insert_v8i32_z12345z7:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5],ymm1[6],ymm0[7]
; AVX-NEXT: retq
@@ -328,34 +328,34 @@ define <8 x i32> @insert_v8i32_z12345z7(<8 x i32> %a) {
define <8 x i16> @insert_v8i16_z12345z7(<8 x i16> %a) {
; SSE2-LABEL: insert_v8i16_z12345z7:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: xorl %eax, %eax
; SSE2-NEXT: pinsrw $0, %eax, %xmm0
; SSE2-NEXT: pinsrw $6, %eax, %xmm0
; SSE2-NEXT: retq
;
; SSE3-LABEL: insert_v8i16_z12345z7:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: xorl %eax, %eax
; SSE3-NEXT: pinsrw $0, %eax, %xmm0
; SSE3-NEXT: pinsrw $6, %eax, %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: insert_v8i16_z12345z7:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: xorl %eax, %eax
; SSSE3-NEXT: pinsrw $0, %eax, %xmm0
; SSSE3-NEXT: pinsrw $6, %eax, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_v8i16_z12345z7:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5],xmm1[6],xmm0[7]
; SSE41-NEXT: retq
;
; AVX-LABEL: insert_v8i16_z12345z7:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5],xmm1[6],xmm0[7]
; AVX-NEXT: retq
@@ -366,7 +366,7 @@ define <8 x i16> @insert_v8i16_z12345z7(<8 x i16> %a) {
define <16 x i16> @insert_v16i16_z12345z789ABCDEz(<16 x i16> %a) {
; SSE2-LABEL: insert_v16i16_z12345z789ABCDEz:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: xorl %eax, %eax
; SSE2-NEXT: pinsrw $0, %eax, %xmm0
; SSE2-NEXT: pinsrw $6, %eax, %xmm0
@@ -374,7 +374,7 @@ define <16 x i16> @insert_v16i16_z12345z789ABCDEz(<16 x i16> %a) {
; SSE2-NEXT: retq
;
; SSE3-LABEL: insert_v16i16_z12345z789ABCDEz:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: xorl %eax, %eax
; SSE3-NEXT: pinsrw $0, %eax, %xmm0
; SSE3-NEXT: pinsrw $6, %eax, %xmm0
@@ -382,7 +382,7 @@ define <16 x i16> @insert_v16i16_z12345z789ABCDEz(<16 x i16> %a) {
; SSE3-NEXT: retq
;
; SSSE3-LABEL: insert_v16i16_z12345z789ABCDEz:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: xorl %eax, %eax
; SSSE3-NEXT: pinsrw $0, %eax, %xmm0
; SSSE3-NEXT: pinsrw $6, %eax, %xmm0
@@ -390,14 +390,14 @@ define <16 x i16> @insert_v16i16_z12345z789ABCDEz(<16 x i16> %a) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_v16i16_z12345z789ABCDEz:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm2, %xmm2
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3,4,5],xmm2[6],xmm0[7]
; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6],xmm2[7]
; SSE41-NEXT: retq
;
; AVX-LABEL: insert_v16i16_z12345z789ABCDEz:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; AVX-NEXT: retq
%1 = insertelement <16 x i16> %a, i16 0, i32 0
@@ -408,29 +408,29 @@ define <16 x i16> @insert_v16i16_z12345z789ABCDEz(<16 x i16> %a) {
define <16 x i8> @insert_v16i8_z123456789ABCDEz(<16 x i8> %a) {
; SSE2-LABEL: insert_v16i8_z123456789ABCDEz:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
; SSE2-NEXT: retq
;
; SSE3-LABEL: insert_v16i8_z123456789ABCDEz:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: andps {{.*}}(%rip), %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: insert_v16i8_z123456789ABCDEz:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: andps {{.*}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_v16i8_z123456789ABCDEz:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: xorl %eax, %eax
; SSE41-NEXT: pinsrb $0, %eax, %xmm0
; SSE41-NEXT: pinsrb $15, %eax, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: insert_v16i8_z123456789ABCDEz:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: xorl %eax, %eax
; AVX-NEXT: vpinsrb $0, %eax, %xmm0, %xmm0
; AVX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
@@ -442,25 +442,25 @@ define <16 x i8> @insert_v16i8_z123456789ABCDEz(<16 x i8> %a) {
define <32 x i8> @insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz(<32 x i8> %a) {
; SSE2-LABEL: insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
; SSE2-NEXT: andps {{.*}}(%rip), %xmm1
; SSE2-NEXT: retq
;
; SSE3-LABEL: insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: andps {{.*}}(%rip), %xmm0
; SSE3-NEXT: andps {{.*}}(%rip), %xmm1
; SSE3-NEXT: retq
;
; SSSE3-LABEL: insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: andps {{.*}}(%rip), %xmm0
; SSSE3-NEXT: andps {{.*}}(%rip), %xmm1
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: xorl %eax, %eax
; SSE41-NEXT: pinsrb $0, %eax, %xmm0
; SSE41-NEXT: pinsrb $15, %eax, %xmm0
@@ -469,7 +469,7 @@ define <32 x i8> @insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz(<32 x i8> %a) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: xorl %eax, %eax
; AVX1-NEXT: vpinsrb $0, %eax, %xmm0, %xmm1
; AVX1-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
@@ -480,7 +480,7 @@ define <32 x i8> @insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz(<32 x i8> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: xorl %eax, %eax
; AVX2-NEXT: vpinsrb $0, %eax, %xmm0, %xmm1
; AVX2-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
diff --git a/test/CodeGen/X86/insertps-combine.ll b/test/CodeGen/X86/insertps-combine.ll
index 044ad072153..22a978eca07 100644
--- a/test/CodeGen/X86/insertps-combine.ll
+++ b/test/CodeGen/X86/insertps-combine.ll
@@ -5,12 +5,12 @@
define <4 x float> @shuffle_v4f32_0z27(<4 x float> %x, <4 x float> %a) {
; SSE-LABEL: shuffle_v4f32_0z27:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[2],xmm1[2]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v4f32_0z27:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[2],xmm1[2]
; AVX-NEXT: retq
%vecext = extractelement <4 x float> %x, i32 0
@@ -23,12 +23,12 @@ define <4 x float> @shuffle_v4f32_0z27(<4 x float> %x, <4 x float> %a) {
define <4 x float> @shuffle_v4f32_0zz4(<4 x float> %xyzw, <4 x float> %abcd) {
; SSE-LABEL: shuffle_v4f32_0zz4:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,zero,xmm1[0]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v4f32_0zz4:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],zero,zero,xmm1[0]
; AVX-NEXT: retq
%vecext = extractelement <4 x float> %xyzw, i32 0
@@ -41,12 +41,12 @@ define <4 x float> @shuffle_v4f32_0zz4(<4 x float> %xyzw, <4 x float> %abcd) {
define <4 x float> @shuffle_v4f32_0z24(<4 x float> %xyzw, <4 x float> %abcd) {
; SSE-LABEL: shuffle_v4f32_0z24:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[2],xmm1[0]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v4f32_0z24:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[2],xmm1[0]
; AVX-NEXT: retq
%vecext = extractelement <4 x float> %xyzw, i32 0
@@ -59,12 +59,12 @@ define <4 x float> @shuffle_v4f32_0z24(<4 x float> %xyzw, <4 x float> %abcd) {
define <4 x float> @shuffle_v4f32_0zz0(float %a) {
; SSE-LABEL: shuffle_v4f32_0zz0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,zero,xmm0[0]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v4f32_0zz0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],zero,zero,xmm0[0]
; AVX-NEXT: retq
%vecinit = insertelement <4 x float> undef, float %a, i32 0
@@ -76,12 +76,12 @@ define <4 x float> @shuffle_v4f32_0zz0(float %a) {
define <4 x float> @shuffle_v4f32_0z6z(<4 x float> %A, <4 x float> %B) {
; SSE-LABEL: shuffle_v4f32_0z6z:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm1[2],zero
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v4f32_0z6z:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],zero,xmm1[2],zero
; AVX-NEXT: retq
%vecext = extractelement <4 x float> %A, i32 0
@@ -95,13 +95,13 @@ define <4 x float> @shuffle_v4f32_0z6z(<4 x float> %A, <4 x float> %B) {
define <4 x float> @shuffle_v4f32_z06z(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: shuffle_v4f32_z06z:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: insertps {{.*#+}} xmm1 = zero,xmm0[0],xmm1[2],zero
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v4f32_z06z:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = zero,xmm0[0],xmm1[2],zero
; AVX-NEXT: retq
%shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 undef, i32 0, i32 6, i32 undef>
@@ -111,12 +111,12 @@ define <4 x float> @shuffle_v4f32_z06z(<4 x float> %a, <4 x float> %b) {
define <4 x float> @shuffle_v4f32_05zz(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: shuffle_v4f32_05zz:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[1],zero,zero
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v4f32_05zz:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[1],zero,zero
; AVX-NEXT: retq
%shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 undef, i32 undef>
@@ -126,12 +126,12 @@ define <4 x float> @shuffle_v4f32_05zz(<4 x float> %a, <4 x float> %b) {
define <4 x float> @insertps_undef_input0(<4 x float> %a0, <4 x float> %a1) {
; SSE-LABEL: insertps_undef_input0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: insertps {{.*#+}} xmm0 = zero,xmm1[0],zero,zero
; SSE-NEXT: retq
;
; AVX-LABEL: insertps_undef_input0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = zero,xmm1[0],zero,zero
; AVX-NEXT: retq
%res0 = fadd <4 x float> %a0, <float 1.0, float 1.0, float 1.0, float 1.0>
@@ -142,13 +142,13 @@ define <4 x float> @insertps_undef_input0(<4 x float> %a0, <4 x float> %a1) {
define <4 x float> @insertps_undef_input1(<4 x float> %a0, <4 x float> %a1) {
; SSE-LABEL: insertps_undef_input1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm1, %xmm1
; SSE-NEXT: blendps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
; SSE-NEXT: retq
;
; AVX-LABEL: insertps_undef_input1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
; AVX-NEXT: retq
@@ -160,7 +160,7 @@ define <4 x float> @insertps_undef_input1(<4 x float> %a0, <4 x float> %a1) {
define <4 x float> @insertps_zero_from_v2f64(<4 x float> %a0, <2 x double>* %a1) nounwind {
; SSE-LABEL: insertps_zero_from_v2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movapd (%rdi), %xmm1
; SSE-NEXT: addpd {{.*}}(%rip), %xmm1
; SSE-NEXT: insertps {{.*#+}} xmm0 = zero,xmm0[2,2,3]
@@ -168,7 +168,7 @@ define <4 x float> @insertps_zero_from_v2f64(<4 x float> %a0, <2 x double>* %a1)
; SSE-NEXT: retq
;
; AVX-LABEL: insertps_zero_from_v2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovapd (%rdi), %xmm1
; AVX-NEXT: vaddpd {{.*}}(%rip), %xmm1, %xmm1
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = zero,xmm0[2,2,3]
@@ -184,7 +184,7 @@ define <4 x float> @insertps_zero_from_v2f64(<4 x float> %a0, <2 x double>* %a1)
define <4 x float> @insertps_zero_from_v2i64(<4 x float> %a0, <2 x i64>* %a1) nounwind {
; SSE-LABEL: insertps_zero_from_v2i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa (%rdi), %xmm1
; SSE-NEXT: paddq {{.*}}(%rip), %xmm1
; SSE-NEXT: insertps {{.*#+}} xmm0 = zero,xmm0[2,2,3]
@@ -192,7 +192,7 @@ define <4 x float> @insertps_zero_from_v2i64(<4 x float> %a0, <2 x i64>* %a1) no
; SSE-NEXT: retq
;
; AVX-LABEL: insertps_zero_from_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa (%rdi), %xmm1
; AVX-NEXT: vpaddq {{.*}}(%rip), %xmm1, %xmm1
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = zero,xmm0[2,2,3]
@@ -208,7 +208,7 @@ define <4 x float> @insertps_zero_from_v2i64(<4 x float> %a0, <2 x i64>* %a1) no
define <4 x float> @insertps_zero_from_v8i16(<4 x float> %a0, <8 x i16>* %a1) nounwind {
; SSE-LABEL: insertps_zero_from_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa (%rdi), %xmm1
; SSE-NEXT: paddw {{.*}}(%rip), %xmm1
; SSE-NEXT: insertps {{.*#+}} xmm0 = zero,xmm0[2,2,3]
@@ -216,7 +216,7 @@ define <4 x float> @insertps_zero_from_v8i16(<4 x float> %a0, <8 x i16>* %a1) no
; SSE-NEXT: retq
;
; AVX-LABEL: insertps_zero_from_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa (%rdi), %xmm1
; AVX-NEXT: vpaddw {{.*}}(%rip), %xmm1, %xmm1
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = zero,xmm0[2,2,3]
@@ -232,12 +232,12 @@ define <4 x float> @insertps_zero_from_v8i16(<4 x float> %a0, <8 x i16>* %a1) no
define <4 x float> @consecutive_load_insertps_04zz(float* %p) {
; SSE-LABEL: consecutive_load_insertps_04zz:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: retq
;
; AVX-LABEL: consecutive_load_insertps_04zz:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: retq
%p0 = getelementptr inbounds float, float* %p, i64 1
@@ -252,12 +252,12 @@ define <4 x float> @consecutive_load_insertps_04zz(float* %p) {
define float @extract_zero_insertps_z0z7(<4 x float> %a0, <4 x float> %a1) {
; SSE-LABEL: extract_zero_insertps_z0z7:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: extract_zero_insertps_z0z7:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%res = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a0, <4 x float> %a1, i8 21)
@@ -267,12 +267,12 @@ define float @extract_zero_insertps_z0z7(<4 x float> %a0, <4 x float> %a1) {
define float @extract_lane_insertps_5123(<4 x float> %a0, <4 x float> *%p1) {
; SSE-LABEL: extract_lane_insertps_5123:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: retq
;
; AVX-LABEL: extract_lane_insertps_5123:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: retq
%a1 = load <4 x float>, <4 x float> *%p1
diff --git a/test/CodeGen/X86/insertps-from-constantpool.ll b/test/CodeGen/X86/insertps-from-constantpool.ll
index cfcfeacad06..e0a371ebe40 100644
--- a/test/CodeGen/X86/insertps-from-constantpool.ll
+++ b/test/CodeGen/X86/insertps-from-constantpool.ll
@@ -5,12 +5,12 @@
define <4 x float> @fold_from_constantpool(<4 x float> %a) {
; X32-LABEL: fold_from_constantpool:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: insertps {{.*#+}} xmm0 = mem[0],xmm0[1,2,3]
; X32-NEXT: retl
;
; X64-LABEL: fold_from_constantpool:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: insertps {{.*#+}} xmm0 = mem[0],xmm0[1,2,3]
; X64-NEXT: retq
%1 = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> <float 0.0, float 1.0, float 0.0, float 0.0>, i8 64)
diff --git a/test/CodeGen/X86/insertps-unfold-load-bug.ll b/test/CodeGen/X86/insertps-unfold-load-bug.ll
index bf7c4bc4d7b..723b25d598c 100644
--- a/test/CodeGen/X86/insertps-unfold-load-bug.ll
+++ b/test/CodeGen/X86/insertps-unfold-load-bug.ll
@@ -6,7 +6,7 @@
define <4 x float> @insertps_unfold(<4 x float>* %v0, <4 x float>* %v1) {
; X32-LABEL: insertps_unfold:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -16,7 +16,7 @@ define <4 x float> @insertps_unfold(<4 x float>* %v0, <4 x float>* %v1) {
; X32-NEXT: retl
;
; X64-LABEL: insertps_unfold:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X64-NEXT: movaps (%rdi), %xmm0
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
diff --git a/test/CodeGen/X86/jump_sign.ll b/test/CodeGen/X86/jump_sign.ll
index c767e06948f..137edece053 100644
--- a/test/CodeGen/X86/jump_sign.ll
+++ b/test/CodeGen/X86/jump_sign.ll
@@ -3,11 +3,11 @@
define i32 @func_f(i32 %X) {
; CHECK-LABEL: func_f:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: incl %eax
; CHECK-NEXT: jns .LBB0_2
-; CHECK-NEXT: # BB#1: # %cond_true
+; CHECK-NEXT: # %bb.1: # %cond_true
; CHECK-NEXT: calll bar
; CHECK-NEXT: .LBB0_2: # %cond_next
; CHECK-NEXT: jmp baz # TAILCALL
@@ -32,7 +32,7 @@ declare i32 @baz(...)
; rdar://11355268
define i32 @func_g(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: func_g:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: xorl %ecx, %ecx
; CHECK-NEXT: subl {{[0-9]+}}(%esp), %eax
@@ -47,7 +47,7 @@ define i32 @func_g(i32 %a, i32 %b) nounwind {
; rdar://10734411
define i32 @func_h(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: func_h:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: xorl %edx, %edx
@@ -62,7 +62,7 @@ define i32 @func_h(i32 %a, i32 %b) nounwind {
define i32 @func_i(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: func_i:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: xorl %ecx, %ecx
; CHECK-NEXT: subl {{[0-9]+}}(%esp), %eax
@@ -76,7 +76,7 @@ define i32 @func_i(i32 %a, i32 %b) nounwind {
define i32 @func_j(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: func_j:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: xorl %ecx, %ecx
; CHECK-NEXT: subl {{[0-9]+}}(%esp), %eax
@@ -90,7 +90,7 @@ define i32 @func_j(i32 %a, i32 %b) nounwind {
define i32 @func_k(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: func_k:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: xorl %edx, %edx
@@ -106,7 +106,7 @@ define i32 @func_k(i32 %a, i32 %b) nounwind {
; redundant cmp instruction
define i32 @func_l(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: func_l:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx
; CHECK-NEXT: movl %edx, %eax
@@ -121,7 +121,7 @@ define i32 @func_l(i32 %a, i32 %b) nounwind {
define i32 @func_m(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: func_m:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: subl %ecx, %eax
@@ -137,14 +137,14 @@ define i32 @func_m(i32 %a, i32 %b) nounwind {
; a swapped sub.
define i32 @func_l2(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: func_l2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl %eax, %ecx
; CHECK-NEXT: subl %edx, %ecx
; CHECK-NEXT: cmpl %eax, %edx
; CHECK-NEXT: jne .LBB8_2
-; CHECK-NEXT: # BB#1: # %if.then
+; CHECK-NEXT: # %bb.1: # %if.then
; CHECK-NEXT: cmovgl %ecx, %eax
; CHECK-NEXT: retl
; CHECK-NEXT: .LBB8_2: # %if.else
@@ -165,12 +165,12 @@ if.else:
define i32 @func_l3(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: func_l3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: subl %ecx, %eax
; CHECK-NEXT: jge .LBB9_2
-; CHECK-NEXT: # BB#1: # %if.then
+; CHECK-NEXT: # %bb.1: # %if.then
; CHECK-NEXT: retl
; CHECK-NEXT: .LBB9_2: # %if.else
; CHECK-NEXT: incl %eax
@@ -191,7 +191,7 @@ if.else:
; When Movr0 is between sub and cmp, we need to move "Movr0" before sub.
define i32 @func_l4(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: func_l4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: xorl %edx, %edx
@@ -207,7 +207,7 @@ define i32 @func_l4(i32 %a, i32 %b) nounwind {
; rdar://11540023
define i32 @func_n(i32 %x, i32 %y) nounwind {
; CHECK-LABEL: func_n:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: cmpl %ecx, %eax
@@ -222,19 +222,19 @@ define i32 @func_n(i32 %x, i32 %y) nounwind {
; PR://13046
define void @func_o() nounwind uwtable {
; CHECK-LABEL: func_o:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: testb %al, %al
; CHECK-NEXT: je .LBB12_1
-; CHECK-NEXT: # BB#2: # %if.end.i
+; CHECK-NEXT: # %bb.2: # %if.end.i
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: testb %al, %al
; CHECK-NEXT: jne .LBB12_5
-; CHECK-NEXT: # BB#3: # %sw.bb
+; CHECK-NEXT: # %bb.3: # %sw.bb
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: testb %al, %al
; CHECK-NEXT: jne .LBB12_8
-; CHECK-NEXT: # BB#4: # %if.end29
+; CHECK-NEXT: # %bb.4: # %if.end29
; CHECK-NEXT: movzwl (%eax), %eax
; CHECK-NEXT: movzwl %ax, %eax
; CHECK-NEXT: imull $52429, %eax, %ecx # imm = 0xCCCD
@@ -247,13 +247,13 @@ define void @func_o() nounwind uwtable {
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: testb %al, %al
; CHECK-NEXT: je .LBB12_9
-; CHECK-NEXT: # BB#10: # %if.else.i104
+; CHECK-NEXT: # %bb.10: # %if.else.i104
; CHECK-NEXT: retl
; CHECK-NEXT: .LBB12_5: # %sw.default
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: testb %al, %al
; CHECK-NEXT: jne .LBB12_7
-; CHECK-NEXT: # BB#6: # %if.then.i96
+; CHECK-NEXT: # %bb.6: # %if.then.i96
; CHECK-NEXT: .LBB12_1: # %if.then.i
; CHECK-NEXT: .LBB12_9: # %if.then.i103
; CHECK-NEXT: .LBB12_7: # %if.else.i97
@@ -299,7 +299,7 @@ if.else.i104: ; preds = %if.then44
; rdar://11855129
define i32 @func_p(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: func_p:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: xorl %ecx, %ecx
; CHECK-NEXT: addl {{[0-9]+}}(%esp), %eax
@@ -316,7 +316,7 @@ define i32 @func_p(i32 %a, i32 %b) nounwind {
; by sbb, we should not optimize cmp away.
define i32 @func_q(i32 %a0, i32 %a1, i32 %a2) {
; CHECK-LABEL: func_q:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movl %ecx, %edx
@@ -335,13 +335,13 @@ define i32 @func_q(i32 %a0, i32 %a1, i32 %a2) {
; rdar://11873276
define i8* @func_r(i8* %base, i32* nocapture %offset, i32 %size) nounwind {
; CHECK-LABEL: func_r:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx
; CHECK-NEXT: movl (%edx), %ecx
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: subl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: jl .LBB15_2
-; CHECK-NEXT: # BB#1: # %if.end
+; CHECK-NEXT: # %bb.1: # %if.end
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl %ecx, (%edx)
; CHECK-NEXT: addl %ecx, %eax
@@ -366,7 +366,7 @@ return:
; Test optimizations of dec/inc.
define i32 @func_dec(i32 %a) nounwind {
; CHECK-LABEL: func_dec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: xorl %ecx, %ecx
; CHECK-NEXT: decl %eax
@@ -380,7 +380,7 @@ define i32 @func_dec(i32 %a) nounwind {
define i32 @func_inc(i32 %a) nounwind {
; CHECK-LABEL: func_inc:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: xorl %ecx, %ecx
; CHECK-NEXT: incl %eax
@@ -397,7 +397,7 @@ define i32 @func_inc(i32 %a) nounwind {
@a = common global i32 0, align 4
define i32 @func_test1(i32 %p1) nounwind uwtable {
; CHECK-LABEL: func_test1:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl b, %eax
; CHECK-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: setb %cl
@@ -405,7 +405,7 @@ define i32 @func_test1(i32 %p1) nounwind uwtable {
; CHECK-NEXT: movl %eax, %edx
; CHECK-NEXT: andb %cl, %dl
; CHECK-NEXT: je .LBB18_2
-; CHECK-NEXT: # BB#1: # %if.then
+; CHECK-NEXT: # %bb.1: # %if.then
; CHECK-NEXT: decl %eax
; CHECK-NEXT: movl %eax, a
; CHECK-NEXT: .LBB18_2: # %if.end
diff --git a/test/CodeGen/X86/known-bits-vector.ll b/test/CodeGen/X86/known-bits-vector.ll
index e9b2d6701b2..283d1f93dfb 100644
--- a/test/CodeGen/X86/known-bits-vector.ll
+++ b/test/CodeGen/X86/known-bits-vector.ll
@@ -4,13 +4,13 @@
define i32 @knownbits_mask_extract_sext(<8 x i16> %a0) nounwind {
; X32-LABEL: knownbits_mask_extract_sext:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X32-NEXT: vpextrw $0, %xmm0, %eax
; X32-NEXT: retl
;
; X64-LABEL: knownbits_mask_extract_sext:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-NEXT: vpextrw $0, %xmm0, %eax
; X64-NEXT: retq
@@ -22,7 +22,7 @@ define i32 @knownbits_mask_extract_sext(<8 x i16> %a0) nounwind {
define float @knownbits_mask_extract_uitofp(<2 x i64> %a0) nounwind {
; X32-LABEL: knownbits_mask_extract_uitofp:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3],xmm0[4,5,6,7]
@@ -34,7 +34,7 @@ define float @knownbits_mask_extract_uitofp(<2 x i64> %a0) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: knownbits_mask_extract_uitofp:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3],xmm0[4,5,6,7]
; X64-NEXT: vmovq %xmm0, %rax
@@ -48,7 +48,7 @@ define float @knownbits_mask_extract_uitofp(<2 x i64> %a0) nounwind {
define <4 x float> @knownbits_insert_uitofp(<4 x i32> %a0, i16 %a1, i16 %a2) nounwind {
; X32-LABEL: knownbits_insert_uitofp:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vpinsrd $0, %eax, %xmm0, %xmm0
@@ -58,7 +58,7 @@ define <4 x float> @knownbits_insert_uitofp(<4 x i32> %a0, i16 %a1, i16 %a2) nou
; X32-NEXT: retl
;
; X64-LABEL: knownbits_insert_uitofp:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movzwl %di, %eax
; X64-NEXT: movzwl %si, %ecx
; X64-NEXT: vpinsrd $0, %eax, %xmm0, %xmm0
@@ -77,14 +77,14 @@ define <4 x float> @knownbits_insert_uitofp(<4 x i32> %a0, i16 %a1, i16 %a2) nou
define <4 x i32> @knownbits_mask_shuffle_sext(<8 x i16> %a0) nounwind {
; X32-LABEL: knownbits_mask_shuffle_sext:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; X32-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; X32-NEXT: retl
;
; X64-LABEL: knownbits_mask_shuffle_sext:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; X64-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
@@ -97,14 +97,14 @@ define <4 x i32> @knownbits_mask_shuffle_sext(<8 x i16> %a0) nounwind {
define <4 x i32> @knownbits_mask_shuffle_shuffle_sext(<8 x i16> %a0) nounwind {
; X32-LABEL: knownbits_mask_shuffle_shuffle_sext:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; X32-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; X32-NEXT: retl
;
; X64-LABEL: knownbits_mask_shuffle_shuffle_sext:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; X64-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
@@ -118,14 +118,14 @@ define <4 x i32> @knownbits_mask_shuffle_shuffle_sext(<8 x i16> %a0) nounwind {
define <4 x i32> @knownbits_mask_shuffle_shuffle_undef_sext(<8 x i16> %a0) nounwind {
; X32-LABEL: knownbits_mask_shuffle_shuffle_undef_sext:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; X32-NEXT: vpmovsxwd %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: knownbits_mask_shuffle_shuffle_undef_sext:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; X64-NEXT: vpmovsxwd %xmm0, %xmm0
@@ -139,14 +139,14 @@ define <4 x i32> @knownbits_mask_shuffle_shuffle_undef_sext(<8 x i16> %a0) nounw
define <4 x float> @knownbits_mask_shuffle_uitofp(<4 x i32> %a0) nounwind {
; X32-LABEL: knownbits_mask_shuffle_uitofp:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0
; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
; X32-NEXT: vcvtdq2ps %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: knownbits_mask_shuffle_uitofp:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; X64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
; X64-NEXT: vcvtdq2ps %xmm0, %xmm0
@@ -159,7 +159,7 @@ define <4 x float> @knownbits_mask_shuffle_uitofp(<4 x i32> %a0) nounwind {
define <4 x float> @knownbits_mask_or_shuffle_uitofp(<4 x i32> %a0) nounwind {
; X32-LABEL: knownbits_mask_or_shuffle_uitofp:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0
; X32-NEXT: vorps {{\.LCPI.*}}, %xmm0, %xmm0
; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
@@ -167,7 +167,7 @@ define <4 x float> @knownbits_mask_or_shuffle_uitofp(<4 x i32> %a0) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: knownbits_mask_or_shuffle_uitofp:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; X64-NEXT: vorps {{.*}}(%rip), %xmm0, %xmm0
; X64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
@@ -182,7 +182,7 @@ define <4 x float> @knownbits_mask_or_shuffle_uitofp(<4 x i32> %a0) nounwind {
define <4 x float> @knownbits_mask_xor_shuffle_uitofp(<4 x i32> %a0) nounwind {
; X32-LABEL: knownbits_mask_xor_shuffle_uitofp:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0
; X32-NEXT: vxorps {{\.LCPI.*}}, %xmm0, %xmm0
; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
@@ -190,7 +190,7 @@ define <4 x float> @knownbits_mask_xor_shuffle_uitofp(<4 x i32> %a0) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: knownbits_mask_xor_shuffle_uitofp:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; X64-NEXT: vxorps {{.*}}(%rip), %xmm0, %xmm0
; X64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
@@ -205,12 +205,12 @@ define <4 x float> @knownbits_mask_xor_shuffle_uitofp(<4 x i32> %a0) nounwind {
define <4 x i32> @knownbits_mask_shl_shuffle_lshr(<4 x i32> %a0) nounwind {
; X32-LABEL: knownbits_mask_shl_shuffle_lshr:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: knownbits_mask_shl_shuffle_lshr:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-NEXT: retq
%1 = and <4 x i32> %a0, <i32 -65536, i32 -7, i32 -7, i32 -65536>
@@ -222,12 +222,12 @@ define <4 x i32> @knownbits_mask_shl_shuffle_lshr(<4 x i32> %a0) nounwind {
define <4 x i32> @knownbits_mask_ashr_shuffle_lshr(<4 x i32> %a0) nounwind {
; X32-LABEL: knownbits_mask_ashr_shuffle_lshr:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: knownbits_mask_ashr_shuffle_lshr:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-NEXT: retq
%1 = and <4 x i32> %a0, <i32 131071, i32 -1, i32 -1, i32 131071>
@@ -239,12 +239,12 @@ define <4 x i32> @knownbits_mask_ashr_shuffle_lshr(<4 x i32> %a0) nounwind {
define <4 x i32> @knownbits_mask_mul_shuffle_shl(<4 x i32> %a0, <4 x i32> %a1) nounwind {
; X32-LABEL: knownbits_mask_mul_shuffle_shl:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: knownbits_mask_mul_shuffle_shl:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-NEXT: retq
%1 = and <4 x i32> %a0, <i32 -65536, i32 -7, i32 -7, i32 -65536>
@@ -256,12 +256,12 @@ define <4 x i32> @knownbits_mask_mul_shuffle_shl(<4 x i32> %a0, <4 x i32> %a1) n
define <4 x i32> @knownbits_mask_trunc_shuffle_shl(<4 x i64> %a0) nounwind {
; X32-LABEL: knownbits_mask_trunc_shuffle_shl:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: knownbits_mask_trunc_shuffle_shl:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-NEXT: retq
%1 = and <4 x i64> %a0, <i64 -65536, i64 -7, i64 7, i64 -65536>
@@ -273,12 +273,12 @@ define <4 x i32> @knownbits_mask_trunc_shuffle_shl(<4 x i64> %a0) nounwind {
define <4 x i32> @knownbits_mask_add_shuffle_lshr(<4 x i32> %a0, <4 x i32> %a1) nounwind {
; X32-LABEL: knownbits_mask_add_shuffle_lshr:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: knownbits_mask_add_shuffle_lshr:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-NEXT: retq
%1 = and <4 x i32> %a0, <i32 32767, i32 -1, i32 -1, i32 32767>
@@ -291,12 +291,12 @@ define <4 x i32> @knownbits_mask_add_shuffle_lshr(<4 x i32> %a0, <4 x i32> %a1)
define <4 x i32> @knownbits_mask_sub_shuffle_lshr(<4 x i32> %a0) nounwind {
; X32-LABEL: knownbits_mask_sub_shuffle_lshr:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: knownbits_mask_sub_shuffle_lshr:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-NEXT: retq
%1 = and <4 x i32> %a0, <i32 15, i32 -1, i32 -1, i32 15>
@@ -308,12 +308,12 @@ define <4 x i32> @knownbits_mask_sub_shuffle_lshr(<4 x i32> %a0) nounwind {
define <4 x i32> @knownbits_mask_udiv_shuffle_lshr(<4 x i32> %a0, <4 x i32> %a1) nounwind {
; X32-LABEL: knownbits_mask_udiv_shuffle_lshr:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: knownbits_mask_udiv_shuffle_lshr:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-NEXT: retq
%1 = and <4 x i32> %a0, <i32 32767, i32 -1, i32 -1, i32 32767>
@@ -325,12 +325,12 @@ define <4 x i32> @knownbits_mask_udiv_shuffle_lshr(<4 x i32> %a0, <4 x i32> %a1)
define <4 x i32> @knownbits_urem_lshr(<4 x i32> %a0) nounwind {
; X32-LABEL: knownbits_urem_lshr:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: knownbits_urem_lshr:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-NEXT: retq
%1 = urem <4 x i32> %a0, <i32 16, i32 16, i32 16, i32 16>
@@ -340,12 +340,12 @@ define <4 x i32> @knownbits_urem_lshr(<4 x i32> %a0) nounwind {
define <4 x i32> @knownbits_mask_urem_shuffle_lshr(<4 x i32> %a0, <4 x i32> %a1) nounwind {
; X32-LABEL: knownbits_mask_urem_shuffle_lshr:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: knownbits_mask_urem_shuffle_lshr:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-NEXT: retq
%1 = and <4 x i32> %a0, <i32 32767, i32 -1, i32 -1, i32 32767>
@@ -358,12 +358,12 @@ define <4 x i32> @knownbits_mask_urem_shuffle_lshr(<4 x i32> %a0, <4 x i32> %a1)
define <4 x i32> @knownbits_mask_srem_shuffle_lshr(<4 x i32> %a0) nounwind {
; X32-LABEL: knownbits_mask_srem_shuffle_lshr:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: knownbits_mask_srem_shuffle_lshr:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-NEXT: retq
%1 = and <4 x i32> %a0, <i32 -32768, i32 -1, i32 -1, i32 -32768>
@@ -375,12 +375,12 @@ define <4 x i32> @knownbits_mask_srem_shuffle_lshr(<4 x i32> %a0) nounwind {
define <4 x i32> @knownbits_mask_bswap_shuffle_shl(<4 x i32> %a0) nounwind {
; X32-LABEL: knownbits_mask_bswap_shuffle_shl:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: knownbits_mask_bswap_shuffle_shl:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-NEXT: retq
%1 = and <4 x i32> %a0, <i32 32767, i32 -1, i32 -1, i32 32767>
@@ -393,7 +393,7 @@ declare <4 x i32> @llvm.bswap.v4i32(<4 x i32>)
define <8 x float> @knownbits_mask_concat_uitofp(<4 x i32> %a0, <4 x i32> %a1) nounwind {
; X32-LABEL: knownbits_mask_concat_uitofp:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0
; X32-NEXT: vandps {{\.LCPI.*}}, %xmm1, %xmm1
; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,0,2]
@@ -403,7 +403,7 @@ define <8 x float> @knownbits_mask_concat_uitofp(<4 x i32> %a0, <4 x i32> %a1) n
; X32-NEXT: retl
;
; X64-LABEL: knownbits_mask_concat_uitofp:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; X64-NEXT: vandps {{.*}}(%rip), %xmm1, %xmm1
; X64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,0,2]
@@ -420,14 +420,14 @@ define <8 x float> @knownbits_mask_concat_uitofp(<4 x i32> %a0, <4 x i32> %a1) n
define <4 x float> @knownbits_lshr_bitcast_shuffle_uitofp(<2 x i64> %a0, <4 x i32> %a1) nounwind {
; X32-LABEL: knownbits_lshr_bitcast_shuffle_uitofp:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsrlq $1, %xmm0, %xmm0
; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
; X32-NEXT: vcvtdq2ps %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: knownbits_lshr_bitcast_shuffle_uitofp:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsrlq $1, %xmm0, %xmm0
; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
; X64-NEXT: vcvtdq2ps %xmm0, %xmm0
@@ -441,7 +441,7 @@ define <4 x float> @knownbits_lshr_bitcast_shuffle_uitofp(<2 x i64> %a0, <4 x i3
define <4 x float> @knownbits_smax_smin_shuffle_uitofp(<4 x i32> %a0) {
; X32-LABEL: knownbits_smax_smin_shuffle_uitofp:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpminsd {{\.LCPI.*}}, %xmm0, %xmm0
; X32-NEXT: vpmaxsd {{\.LCPI.*}}, %xmm0, %xmm0
; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3]
@@ -453,7 +453,7 @@ define <4 x float> @knownbits_smax_smin_shuffle_uitofp(<4 x i32> %a0) {
; X32-NEXT: retl
;
; X64-LABEL: knownbits_smax_smin_shuffle_uitofp:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpminsd {{.*}}(%rip), %xmm0, %xmm0
; X64-NEXT: vpmaxsd {{.*}}(%rip), %xmm0, %xmm0
; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3]
@@ -474,14 +474,14 @@ declare <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32>, <4 x i32>) nounwind readnone
define <4 x float> @knownbits_umin_shuffle_uitofp(<4 x i32> %a0) {
; X32-LABEL: knownbits_umin_shuffle_uitofp:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpminud {{\.LCPI.*}}, %xmm0, %xmm0
; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3]
; X32-NEXT: vcvtdq2ps %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: knownbits_umin_shuffle_uitofp:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm0
; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3]
; X64-NEXT: vcvtdq2ps %xmm0, %xmm0
@@ -496,13 +496,13 @@ declare <4 x i32> @llvm.x86.sse41.pminud(<4 x i32>, <4 x i32>) nounwind readnone
define <4 x i32> @knownbits_umax_shuffle_ashr(<4 x i32> %a0) {
; X32-LABEL: knownbits_umax_shuffle_ashr:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmaxud {{\.LCPI.*}}, %xmm0, %xmm0
; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,2]
; X32-NEXT: retl
;
; X64-LABEL: knownbits_umax_shuffle_ashr:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmaxud {{.*}}(%rip), %xmm0, %xmm0
; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,2]
; X64-NEXT: retq
@@ -514,7 +514,7 @@ define <4 x i32> @knownbits_umax_shuffle_ashr(<4 x i32> %a0) {
define <4 x float> @knownbits_mask_umax_shuffle_uitofp(<4 x i32> %a0) {
; X32-LABEL: knownbits_mask_umax_shuffle_uitofp:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X32-NEXT: vpmaxud {{\.LCPI.*}}, %xmm0, %xmm0
; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3]
@@ -522,7 +522,7 @@ define <4 x float> @knownbits_mask_umax_shuffle_uitofp(<4 x i32> %a0) {
; X32-NEXT: retl
;
; X64-LABEL: knownbits_mask_umax_shuffle_uitofp:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-NEXT: vpmaxud {{.*}}(%rip), %xmm0, %xmm0
; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3]
@@ -537,12 +537,12 @@ define <4 x float> @knownbits_mask_umax_shuffle_uitofp(<4 x i32> %a0) {
define <4 x i32> @knownbits_mask_bitreverse_ashr(<4 x i32> %a0) {
; X32-LABEL: knownbits_mask_bitreverse_ashr:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: knownbits_mask_bitreverse_ashr:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-NEXT: retq
%1 = and <4 x i32> %a0, <i32 -2, i32 -2, i32 -2, i32 -2>
@@ -555,7 +555,7 @@ declare <4 x i32> @llvm.bitreverse.v4i32(<4 x i32>) nounwind readnone
; If we don't know that the input isn't INT_MIN we can't combine to sitofp
define <4 x float> @knownbits_abs_uitofp(<4 x i32> %a0) {
; X32-LABEL: knownbits_abs_uitofp:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpabsd %xmm0, %xmm0
; X32-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
; X32-NEXT: vpsrld $16, %xmm0, %xmm0
@@ -565,7 +565,7 @@ define <4 x float> @knownbits_abs_uitofp(<4 x i32> %a0) {
; X32-NEXT: retl
;
; X64-LABEL: knownbits_abs_uitofp:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpabsd %xmm0, %xmm0
; X64-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
; X64-NEXT: vpsrld $16, %xmm0, %xmm0
@@ -582,7 +582,7 @@ define <4 x float> @knownbits_abs_uitofp(<4 x i32> %a0) {
define <4 x float> @knownbits_or_abs_uitofp(<4 x i32> %a0) {
; X32-LABEL: knownbits_or_abs_uitofp:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpor {{\.LCPI.*}}, %xmm0, %xmm0
; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,0,2]
; X32-NEXT: vpabsd %xmm0, %xmm0
@@ -590,7 +590,7 @@ define <4 x float> @knownbits_or_abs_uitofp(<4 x i32> %a0) {
; X32-NEXT: retl
;
; X64-LABEL: knownbits_or_abs_uitofp:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,0,2]
; X64-NEXT: vpabsd %xmm0, %xmm0
@@ -607,7 +607,7 @@ define <4 x float> @knownbits_or_abs_uitofp(<4 x i32> %a0) {
define <4 x float> @knownbits_and_select_shuffle_uitofp(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2, <4 x i32> %a3) nounwind {
; X32-LABEL: knownbits_and_select_shuffle_uitofp:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebp
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: andl $-16, %esp
@@ -624,7 +624,7 @@ define <4 x float> @knownbits_and_select_shuffle_uitofp(<4 x i32> %a0, <4 x i32>
; X32-NEXT: retl
;
; X64-LABEL: knownbits_and_select_shuffle_uitofp:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vandps {{.*}}(%rip), %xmm2, %xmm2
; X64-NEXT: vandps {{.*}}(%rip), %xmm3, %xmm3
; X64-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
@@ -643,7 +643,7 @@ define <4 x float> @knownbits_and_select_shuffle_uitofp(<4 x i32> %a0, <4 x i32>
define <4 x float> @knownbits_lshr_and_select_shuffle_uitofp(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2, <4 x i32> %a3) nounwind {
; X32-LABEL: knownbits_lshr_and_select_shuffle_uitofp:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebp
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: andl $-16, %esp
@@ -662,7 +662,7 @@ define <4 x float> @knownbits_lshr_and_select_shuffle_uitofp(<4 x i32> %a0, <4 x
; X32-NEXT: retl
;
; X64-LABEL: knownbits_lshr_and_select_shuffle_uitofp:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsrld $1, %xmm2, %xmm4
; X64-NEXT: vpsrld $5, %xmm2, %xmm2
; X64-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7]
diff --git a/test/CodeGen/X86/known-bits.ll b/test/CodeGen/X86/known-bits.ll
index dc2a88cf25d..91cde32d10e 100644
--- a/test/CodeGen/X86/known-bits.ll
+++ b/test/CodeGen/X86/known-bits.ll
@@ -4,7 +4,7 @@
define void @knownbits_zext_in_reg(i8*) nounwind {
; X32-LABEL: knownbits_zext_in_reg:
-; X32: # BB#0: # %BB
+; X32: # %bb.0: # %BB
; X32-NEXT: pushl %ebp
; X32-NEXT: pushl %ebx
; X32-NEXT: pushl %edi
@@ -47,7 +47,7 @@ define void @knownbits_zext_in_reg(i8*) nounwind {
; X32-NEXT: jmp .LBB0_1
;
; X64-LABEL: knownbits_zext_in_reg:
-; X64: # BB#0: # %BB
+; X64: # %bb.0: # %BB
; X64-NEXT: movzbl (%rdi), %eax
; X64-NEXT: imull $101, %eax, %eax
; X64-NEXT: shrl $14, %eax
@@ -106,12 +106,12 @@ CF246: ; preds = %CF237
define i32 @knownbits_mask_add_lshr(i32 %a0, i32 %a1) nounwind {
; X32-LABEL: knownbits_mask_add_lshr:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: retl
;
; X64-LABEL: knownbits_mask_add_lshr:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: retq
%1 = and i32 %a0, 32767
@@ -123,7 +123,7 @@ define i32 @knownbits_mask_add_lshr(i32 %a0, i32 %a1) nounwind {
define i128 @knownbits_mask_addc_shl(i64 %a0, i64 %a1, i64 %a2) nounwind {
; X32-LABEL: knownbits_mask_addc_shl:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %edi
; X32-NEXT: pushl %esi
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -147,7 +147,7 @@ define i128 @knownbits_mask_addc_shl(i64 %a0, i64 %a1, i64 %a2) nounwind {
; X32-NEXT: retl $4
;
; X64-LABEL: knownbits_mask_addc_shl:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andq $-1024, %rdi # imm = 0xFC00
; X64-NEXT: andq $-1024, %rsi # imm = 0xFC00
; X64-NEXT: addq %rdi, %rsi
@@ -169,7 +169,7 @@ define i128 @knownbits_mask_addc_shl(i64 %a0, i64 %a1, i64 %a2) nounwind {
define {i32, i1} @knownbits_uaddo_saddo(i64 %a0, i64 %a1) nounwind {
; X32-LABEL: knownbits_uaddo_saddo:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebx
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
@@ -193,7 +193,7 @@ define {i32, i1} @knownbits_uaddo_saddo(i64 %a0, i64 %a1) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: knownbits_uaddo_saddo:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: shlq $32, %rdi
; X64-NEXT: shlq $32, %rsi
; X64-NEXT: addq %rdi, %rsi
@@ -220,7 +220,7 @@ define {i32, i1} @knownbits_uaddo_saddo(i64 %a0, i64 %a1) nounwind {
define {i32, i1} @knownbits_usubo_ssubo(i64 %a0, i64 %a1) nounwind {
; X32-LABEL: knownbits_usubo_ssubo:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebx
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
@@ -244,7 +244,7 @@ define {i32, i1} @knownbits_usubo_ssubo(i64 %a0, i64 %a1) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: knownbits_usubo_ssubo:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: shlq $32, %rdi
; X64-NEXT: shlq $32, %rsi
; X64-NEXT: cmpq %rsi, %rdi
diff --git a/test/CodeGen/X86/known-signbits-vector.ll b/test/CodeGen/X86/known-signbits-vector.ll
index 0afbd425652..a003a5520d0 100644
--- a/test/CodeGen/X86/known-signbits-vector.ll
+++ b/test/CodeGen/X86/known-signbits-vector.ll
@@ -4,12 +4,12 @@
define <2 x double> @signbits_sext_v2i64_sitofp_v2f64(i32 %a0, i32 %a1) nounwind {
; X32-LABEL: signbits_sext_v2i64_sitofp_v2f64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vcvtdq2pd {{[0-9]+}}(%esp), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: signbits_sext_v2i64_sitofp_v2f64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovd %edi, %xmm0
; X64-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0
; X64-NEXT: vcvtdq2pd %xmm0, %xmm0
@@ -24,7 +24,7 @@ define <2 x double> @signbits_sext_v2i64_sitofp_v2f64(i32 %a0, i32 %a1) nounwind
define <4 x float> @signbits_sext_v4i64_sitofp_v4f32(i8 signext %a0, i16 signext %a1, i32 %a2, i32 %a3) nounwind {
; X32-LABEL: signbits_sext_v4i64_sitofp_v4f32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movsbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movswl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovd %eax, %xmm0
@@ -46,7 +46,7 @@ define <4 x float> @signbits_sext_v4i64_sitofp_v4f32(i8 signext %a0, i16 signext
; X32-NEXT: retl
;
; X64-LABEL: signbits_sext_v4i64_sitofp_v4f32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movslq %edi, %rax
; X64-NEXT: movslq %esi, %rsi
; X64-NEXT: movslq %edx, %rdx
@@ -74,7 +74,7 @@ define <4 x float> @signbits_sext_v4i64_sitofp_v4f32(i8 signext %a0, i16 signext
define float @signbits_ashr_extract_sitofp_0(<2 x i64> %a0) nounwind {
; X32-LABEL: signbits_ashr_extract_sitofp_0:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: vextractps $1, %xmm0, %eax
; X32-NEXT: vcvtsi2ssl %eax, %xmm1, %xmm0
@@ -84,7 +84,7 @@ define float @signbits_ashr_extract_sitofp_0(<2 x i64> %a0) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: signbits_ashr_extract_sitofp_0:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsrad $31, %xmm0, %xmm1
; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
; X64-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
@@ -99,7 +99,7 @@ define float @signbits_ashr_extract_sitofp_0(<2 x i64> %a0) nounwind {
define float @signbits_ashr_extract_sitofp_1(<2 x i64> %a0) nounwind {
; X32-LABEL: signbits_ashr_extract_sitofp_1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: vmovdqa {{.*#+}} xmm1 = [0,2147483648,0,2147483648]
; X32-NEXT: vpsrlq $63, %xmm1, %xmm2
@@ -118,7 +118,7 @@ define float @signbits_ashr_extract_sitofp_1(<2 x i64> %a0) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: signbits_ashr_extract_sitofp_1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsrlq $63, %xmm0, %xmm1
; X64-NEXT: vpsrlq $32, %xmm0, %xmm0
; X64-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
@@ -136,7 +136,7 @@ define float @signbits_ashr_extract_sitofp_1(<2 x i64> %a0) nounwind {
define float @signbits_ashr_shl_extract_sitofp(<2 x i64> %a0) nounwind {
; X32-LABEL: signbits_ashr_shl_extract_sitofp:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: vmovdqa {{.*#+}} xmm1 = [0,2147483648,0,2147483648]
; X32-NEXT: vpsrlq $60, %xmm1, %xmm2
@@ -156,7 +156,7 @@ define float @signbits_ashr_shl_extract_sitofp(<2 x i64> %a0) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: signbits_ashr_shl_extract_sitofp:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsrlq $60, %xmm0, %xmm1
; X64-NEXT: vpsrlq $61, %xmm0, %xmm0
; X64-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
@@ -176,7 +176,7 @@ define float @signbits_ashr_shl_extract_sitofp(<2 x i64> %a0) nounwind {
define float @signbits_ashr_insert_ashr_extract_sitofp(i64 %a0, i64 %a1) nounwind {
; X32-LABEL: signbits_ashr_insert_ashr_extract_sitofp:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
@@ -195,7 +195,7 @@ define float @signbits_ashr_insert_ashr_extract_sitofp(i64 %a0, i64 %a1) nounwin
; X32-NEXT: retl
;
; X64-LABEL: signbits_ashr_insert_ashr_extract_sitofp:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: sarq $30, %rdi
; X64-NEXT: vmovq %rsi, %xmm0
; X64-NEXT: vmovq %rdi, %xmm1
@@ -217,7 +217,7 @@ define float @signbits_ashr_insert_ashr_extract_sitofp(i64 %a0, i64 %a1) nounwin
define <4 x double> @signbits_sext_shuffle_sitofp(<4 x i32> %a0, <4 x i64> %a1) nounwind {
; X32-LABEL: signbits_sext_shuffle_sitofp:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmovsxdq %xmm0, %xmm1
; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; X32-NEXT: vpmovsxdq %xmm0, %xmm0
@@ -230,7 +230,7 @@ define <4 x double> @signbits_sext_shuffle_sitofp(<4 x i32> %a0, <4 x i64> %a1)
; X32-NEXT: retl
;
; X64-LABEL: signbits_sext_shuffle_sitofp:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmovsxdq %xmm0, %xmm1
; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; X64-NEXT: vpmovsxdq %xmm0, %xmm0
@@ -249,7 +249,7 @@ define <4 x double> @signbits_sext_shuffle_sitofp(<4 x i32> %a0, <4 x i64> %a1)
define <2 x double> @signbits_ashr_concat_ashr_extract_sitofp(<2 x i64> %a0, <4 x i64> %a1) nounwind {
; X32-LABEL: signbits_ashr_concat_ashr_extract_sitofp:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsrad $16, %xmm0, %xmm1
; X32-NEXT: vpsrlq $16, %xmm0, %xmm0
; X32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
@@ -259,7 +259,7 @@ define <2 x double> @signbits_ashr_concat_ashr_extract_sitofp(<2 x i64> %a0, <4
; X32-NEXT: retl
;
; X64-LABEL: signbits_ashr_concat_ashr_extract_sitofp:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsrad $16, %xmm0, %xmm1
; X64-NEXT: vpsrlq $16, %xmm0, %xmm0
; X64-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
@@ -278,7 +278,7 @@ define <2 x double> @signbits_ashr_concat_ashr_extract_sitofp(<2 x i64> %a0, <4
define float @signbits_ashr_sext_sextinreg_and_extract_sitofp(<2 x i64> %a0, <2 x i64> %a1, i32 %a2) nounwind {
; X32-LABEL: signbits_ashr_sext_sextinreg_and_extract_sitofp:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: vmovdqa {{.*#+}} xmm2 = [0,2147483648,0,2147483648]
; X32-NEXT: vpsrlq $60, %xmm2, %xmm3
@@ -306,7 +306,7 @@ define float @signbits_ashr_sext_sextinreg_and_extract_sitofp(<2 x i64> %a0, <2
; X32-NEXT: retl
;
; X64-LABEL: signbits_ashr_sext_sextinreg_and_extract_sitofp:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsrlq $60, %xmm0, %xmm2
; X64-NEXT: vpsrlq $61, %xmm0, %xmm0
; X64-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
@@ -336,7 +336,7 @@ define float @signbits_ashr_sext_sextinreg_and_extract_sitofp(<2 x i64> %a0, <2
define float @signbits_ashr_sextvecinreg_bitops_extract_sitofp(<2 x i64> %a0, <4 x i32> %a1) nounwind {
; X32-LABEL: signbits_ashr_sextvecinreg_bitops_extract_sitofp:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: vmovdqa {{.*#+}} xmm2 = [0,2147483648,0,2147483648]
; X32-NEXT: vpsrlq $60, %xmm2, %xmm3
@@ -359,7 +359,7 @@ define float @signbits_ashr_sextvecinreg_bitops_extract_sitofp(<2 x i64> %a0, <4
; X32-NEXT: retl
;
; X64-LABEL: signbits_ashr_sextvecinreg_bitops_extract_sitofp:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsrlq $60, %xmm0, %xmm2
; X64-NEXT: vpsrlq $61, %xmm0, %xmm0
; X64-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
@@ -386,7 +386,7 @@ define float @signbits_ashr_sextvecinreg_bitops_extract_sitofp(<2 x i64> %a0, <4
define <4 x float> @signbits_ashr_sext_select_shuffle_sitofp(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> %a2, <4 x i32> %a3) nounwind {
; X32-LABEL: signbits_ashr_sext_select_shuffle_sitofp:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebp
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: andl $-16, %esp
@@ -423,7 +423,7 @@ define <4 x float> @signbits_ashr_sext_select_shuffle_sitofp(<4 x i64> %a0, <4 x
; X32-NEXT: retl
;
; X64-LABEL: signbits_ashr_sext_select_shuffle_sitofp:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vextractf128 $1, %ymm2, %xmm4
; X64-NEXT: vpsrlq $63, %xmm4, %xmm5
; X64-NEXT: vpsrlq $33, %xmm4, %xmm4
diff --git a/test/CodeGen/X86/lea-3.ll b/test/CodeGen/X86/lea-3.ll
index 09154e4fb78..94d11adc3ce 100644
--- a/test/CodeGen/X86/lea-3.ll
+++ b/test/CodeGen/X86/lea-3.ll
@@ -6,25 +6,25 @@
define i64 @test2(i64 %a) {
; LNX1-LABEL: test2:
-; LNX1: # BB#0:
+; LNX1: # %bb.0:
; LNX1-NEXT: leaq (,%rdi,4), %rax
; LNX1-NEXT: orq %rdi, %rax
; LNX1-NEXT: retq
;
; LNX2-LABEL: test2:
-; LNX2: # BB#0:
+; LNX2: # %bb.0:
; LNX2-NEXT: leaq (,%rdi,4), %rax
; LNX2-NEXT: orq %rdi, %rax
; LNX2-NEXT: retq
;
; NACL-LABEL: test2:
-; NACL: # BB#0:
+; NACL: # %bb.0:
; NACL-NEXT: leaq (,%rdi,4), %rax
; NACL-NEXT: orq %rdi, %rax
; NACL-NEXT: retq
;
; WIN-LABEL: test2:
-; WIN: # BB#0:
+; WIN: # %bb.0:
; WIN-NEXT: leaq (,%rcx,4), %rax
; WIN-NEXT: orq %rcx, %rax
; WIN-NEXT: retq
@@ -35,25 +35,25 @@ define i64 @test2(i64 %a) {
define i32 @test(i32 %a) {
; LNX1-LABEL: test:
-; LNX1: # BB#0:
+; LNX1: # %bb.0:
; LNX1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; LNX1-NEXT: leal (%rdi,%rdi,2), %eax
; LNX1-NEXT: retq
;
; LNX2-LABEL: test:
-; LNX2: # BB#0:
+; LNX2: # %bb.0:
; LNX2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; LNX2-NEXT: leal (%rdi,%rdi,2), %eax
; LNX2-NEXT: retq
;
; NACL-LABEL: test:
-; NACL: # BB#0:
+; NACL: # %bb.0:
; NACL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; NACL-NEXT: leal (%rdi,%rdi,2), %eax
; NACL-NEXT: retq
;
; WIN-LABEL: test:
-; WIN: # BB#0:
+; WIN: # %bb.0:
; WIN-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
; WIN-NEXT: leal (%rcx,%rcx,2), %eax
; WIN-NEXT: retq
@@ -63,22 +63,22 @@ define i32 @test(i32 %a) {
define i64 @test3(i64 %a) {
; LNX1-LABEL: test3:
-; LNX1: # BB#0:
+; LNX1: # %bb.0:
; LNX1-NEXT: leaq (,%rdi,8), %rax
; LNX1-NEXT: retq
;
; LNX2-LABEL: test3:
-; LNX2: # BB#0:
+; LNX2: # %bb.0:
; LNX2-NEXT: leaq (,%rdi,8), %rax
; LNX2-NEXT: retq
;
; NACL-LABEL: test3:
-; NACL: # BB#0:
+; NACL: # %bb.0:
; NACL-NEXT: leaq (,%rdi,8), %rax
; NACL-NEXT: retq
;
; WIN-LABEL: test3:
-; WIN: # BB#0:
+; WIN: # %bb.0:
; WIN-NEXT: leaq (,%rcx,8), %rax
; WIN-NEXT: retq
%tmp2 = shl i64 %a, 3
diff --git a/test/CodeGen/X86/lea-opt-cse1.ll b/test/CodeGen/X86/lea-opt-cse1.ll
index 05b47690e81..08241f6b5b8 100644
--- a/test/CodeGen/X86/lea-opt-cse1.ll
+++ b/test/CodeGen/X86/lea-opt-cse1.ll
@@ -6,7 +6,7 @@
define void @test_func(%struct.SA* nocapture %ctx, i32 %n) local_unnamed_addr {
; X64-LABEL: test_func:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movl (%rdi), %eax
; X64-NEXT: movl 16(%rdi), %ecx
; X64-NEXT: leal (%rax,%rcx), %edx
@@ -17,7 +17,7 @@ define void @test_func(%struct.SA* nocapture %ctx, i32 %n) local_unnamed_addr {
; X64-NEXT: retq
;
; X86-LABEL: test_func:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %esi
; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: .cfi_offset %esi, -8
diff --git a/test/CodeGen/X86/lea-opt-cse2.ll b/test/CodeGen/X86/lea-opt-cse2.ll
index 865dd49a6e1..429a7a5c0c8 100644
--- a/test/CodeGen/X86/lea-opt-cse2.ll
+++ b/test/CodeGen/X86/lea-opt-cse2.ll
@@ -6,7 +6,7 @@
define void @foo(%struct.SA* nocapture %ctx, i32 %n) local_unnamed_addr #0 {
; X64-LABEL: foo:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: .p2align 4, 0x90
; X64-NEXT: .LBB0_1: # %loop
; X64-NEXT: # =>This Inner Loop Header: Depth=1
@@ -16,14 +16,14 @@ define void @foo(%struct.SA* nocapture %ctx, i32 %n) local_unnamed_addr #0 {
; X64-NEXT: movl %edx, 12(%rdi)
; X64-NEXT: decl %esi
; X64-NEXT: jne .LBB0_1
-; X64-NEXT: # BB#2: # %exit
+; X64-NEXT: # %bb.2: # %exit
; X64-NEXT: addl %ecx, %eax
; X64-NEXT: leal 1(%rcx,%rax), %eax
; X64-NEXT: movl %eax, 16(%rdi)
; X64-NEXT: retq
;
; X86-LABEL: foo:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %edi
; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: pushl %esi
@@ -41,7 +41,7 @@ define void @foo(%struct.SA* nocapture %ctx, i32 %n) local_unnamed_addr #0 {
; X86-NEXT: movl %edi, 12(%eax)
; X86-NEXT: decl %ecx
; X86-NEXT: jne .LBB0_1
-; X86-NEXT: # BB#2: # %exit
+; X86-NEXT: # %bb.2: # %exit
; X86-NEXT: addl %esi, %edx
; X86-NEXT: leal 1(%esi,%edx), %ecx
; X86-NEXT: movl %ecx, 16(%eax)
diff --git a/test/CodeGen/X86/lea-opt-cse3.ll b/test/CodeGen/X86/lea-opt-cse3.ll
index 48ab3130bf0..96e24a362ee 100644
--- a/test/CodeGen/X86/lea-opt-cse3.ll
+++ b/test/CodeGen/X86/lea-opt-cse3.ll
@@ -4,7 +4,7 @@
define i32 @foo(i32 %a, i32 %b) local_unnamed_addr #0 {
; X64-LABEL: foo:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal 4(%rdi,%rsi,2), %ecx
@@ -13,7 +13,7 @@ define i32 @foo(i32 %a, i32 %b) local_unnamed_addr #0 {
; X64-NEXT: retq
;
; X86-LABEL: foo:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: leal 4(%ecx,%eax,2), %edx
@@ -32,7 +32,7 @@ entry:
define i32 @foo1(i32 %a, i32 %b) local_unnamed_addr #0 {
; X64-LABEL: foo1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal 4(%rdi,%rsi,4), %ecx
@@ -41,7 +41,7 @@ define i32 @foo1(i32 %a, i32 %b) local_unnamed_addr #0 {
; X64-NEXT: retq
;
; X86-LABEL: foo1:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: leal 4(%ecx,%eax,4), %edx
@@ -60,14 +60,14 @@ entry:
define i32 @foo1_mult_basic_blocks(i32 %a, i32 %b) local_unnamed_addr #0 {
; X64-LABEL: foo1_mult_basic_blocks:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal 4(%rdi,%rsi,4), %ecx
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: cmpl $10, %ecx
; X64-NEXT: je .LBB2_2
-; X64-NEXT: # BB#1: # %mid
+; X64-NEXT: # %bb.1: # %mid
; X64-NEXT: leal 4(%rdi,%rsi,8), %eax
; X64-NEXT: imull %eax, %ecx
; X64-NEXT: movl %ecx, %eax
@@ -75,7 +75,7 @@ define i32 @foo1_mult_basic_blocks(i32 %a, i32 %b) local_unnamed_addr #0 {
; X64-NEXT: retq
;
; X86-LABEL: foo1_mult_basic_blocks:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %esi
; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: .cfi_offset %esi, -8
@@ -85,7 +85,7 @@ define i32 @foo1_mult_basic_blocks(i32 %a, i32 %b) local_unnamed_addr #0 {
; X86-NEXT: xorl %eax, %eax
; X86-NEXT: cmpl $10, %ecx
; X86-NEXT: je .LBB2_2
-; X86-NEXT: # BB#1: # %mid
+; X86-NEXT: # %bb.1: # %mid
; X86-NEXT: leal 4(%esi,%edx,8), %eax
; X86-NEXT: imull %eax, %ecx
; X86-NEXT: movl %ecx, %eax
@@ -112,14 +112,14 @@ exit:
define i32 @foo1_mult_basic_blocks_illegal_scale(i32 %a, i32 %b) local_unnamed_addr #0 {
; X64-LABEL: foo1_mult_basic_blocks_illegal_scale:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal 4(%rdi,%rsi,2), %ecx
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: cmpl $10, %ecx
; X64-NEXT: je .LBB3_2
-; X64-NEXT: # BB#1: # %mid
+; X64-NEXT: # %bb.1: # %mid
; X64-NEXT: leal 4(%rdi,%rsi,8), %eax
; X64-NEXT: imull %eax, %ecx
; X64-NEXT: movl %ecx, %eax
@@ -127,7 +127,7 @@ define i32 @foo1_mult_basic_blocks_illegal_scale(i32 %a, i32 %b) local_unnamed_a
; X64-NEXT: retq
;
; X86-LABEL: foo1_mult_basic_blocks_illegal_scale:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %esi
; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: .cfi_offset %esi, -8
@@ -137,7 +137,7 @@ define i32 @foo1_mult_basic_blocks_illegal_scale(i32 %a, i32 %b) local_unnamed_a
; X86-NEXT: xorl %eax, %eax
; X86-NEXT: cmpl $10, %ecx
; X86-NEXT: je .LBB3_2
-; X86-NEXT: # BB#1: # %mid
+; X86-NEXT: # %bb.1: # %mid
; X86-NEXT: leal 4(%esi,%edx,8), %eax
; X86-NEXT: imull %eax, %ecx
; X86-NEXT: movl %ecx, %eax
diff --git a/test/CodeGen/X86/lea-opt-cse4.ll b/test/CodeGen/X86/lea-opt-cse4.ll
index 31f31a73d44..a295ac7129c 100644
--- a/test/CodeGen/X86/lea-opt-cse4.ll
+++ b/test/CodeGen/X86/lea-opt-cse4.ll
@@ -6,7 +6,7 @@
define void @foo(%struct.SA* nocapture %ctx, i32 %n) local_unnamed_addr #0 {
; X64-LABEL: foo:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movl 16(%rdi), %eax
; X64-NEXT: movl (%rdi), %ecx
; X64-NEXT: addl %eax, %ecx
@@ -20,7 +20,7 @@ define void @foo(%struct.SA* nocapture %ctx, i32 %n) local_unnamed_addr #0 {
; X64-NEXT: retq
;
; X86-LABEL: foo:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %esi
; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: .cfi_offset %esi, -8
@@ -58,7 +58,7 @@ define void @foo(%struct.SA* nocapture %ctx, i32 %n) local_unnamed_addr #0 {
define void @foo_loop(%struct.SA* nocapture %ctx, i32 %n) local_unnamed_addr #0 {
; X64-LABEL: foo_loop:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: .p2align 4, 0x90
; X64-NEXT: .LBB1_1: # %loop
; X64-NEXT: # =>This Inner Loop Header: Depth=1
@@ -68,7 +68,7 @@ define void @foo_loop(%struct.SA* nocapture %ctx, i32 %n) local_unnamed_addr #0
; X64-NEXT: movl %edx, 12(%rdi)
; X64-NEXT: decl %esi
; X64-NEXT: jne .LBB1_1
-; X64-NEXT: # BB#2: # %exit
+; X64-NEXT: # %bb.2: # %exit
; X64-NEXT: addl %eax, %ecx
; X64-NEXT: leal 1(%rax,%rcx), %ecx
; X64-NEXT: addl %eax, %ecx
@@ -81,7 +81,7 @@ define void @foo_loop(%struct.SA* nocapture %ctx, i32 %n) local_unnamed_addr #0
; X64-NEXT: retq
;
; X86-LABEL: foo_loop:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %edi
; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: pushl %esi
@@ -99,7 +99,7 @@ define void @foo_loop(%struct.SA* nocapture %ctx, i32 %n) local_unnamed_addr #0
; X86-NEXT: movl %edi, 12(%eax)
; X86-NEXT: decl %edx
; X86-NEXT: jne .LBB1_1
-; X86-NEXT: # BB#2: # %exit
+; X86-NEXT: # %bb.2: # %exit
; X86-NEXT: addl %ecx, %esi
; X86-NEXT: leal 1(%ecx,%esi), %edx
; X86-NEXT: addl %ecx, %edx
diff --git a/test/CodeGen/X86/lea32-schedule.ll b/test/CodeGen/X86/lea32-schedule.ll
index 5ba3ddd3a3b..e1bc9af65ed 100644
--- a/test/CodeGen/X86/lea32-schedule.ll
+++ b/test/CodeGen/X86/lea32-schedule.ll
@@ -13,13 +13,13 @@
define i32 @test_lea_offset(i32) {
; GENERIC-LABEL: test_lea_offset:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; GENERIC-NEXT: leal -24(%rdi), %eax # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_offset:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ATOM-NEXT: leal -24(%rdi), %eax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -31,43 +31,43 @@ define i32 @test_lea_offset(i32) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_lea_offset:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SLM-NEXT: leal -24(%rdi), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_offset:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SANDY-NEXT: leal -24(%rdi), %eax # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_offset:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; HASWELL-NEXT: leal -24(%rdi), %eax # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_lea_offset:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; BROADWELL-NEXT: leal -24(%rdi), %eax # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_offset:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SKYLAKE-NEXT: leal -24(%rdi), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_offset:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; BTVER2-NEXT: leal -24(%rdi), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_offset:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ZNVER1-NEXT: leal -24(%rdi), %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -77,13 +77,13 @@ define i32 @test_lea_offset(i32) {
define i32 @test_lea_offset_big(i32) {
; GENERIC-LABEL: test_lea_offset_big:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; GENERIC-NEXT: leal 1024(%rdi), %eax # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_offset_big:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ATOM-NEXT: leal 1024(%rdi), %eax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -95,43 +95,43 @@ define i32 @test_lea_offset_big(i32) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_lea_offset_big:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SLM-NEXT: leal 1024(%rdi), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_offset_big:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SANDY-NEXT: leal 1024(%rdi), %eax # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_offset_big:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; HASWELL-NEXT: leal 1024(%rdi), %eax # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_lea_offset_big:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; BROADWELL-NEXT: leal 1024(%rdi), %eax # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_offset_big:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SKYLAKE-NEXT: leal 1024(%rdi), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_offset_big:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; BTVER2-NEXT: leal 1024(%rdi), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_offset_big:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ZNVER1-NEXT: leal 1024(%rdi), %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -142,14 +142,14 @@ define i32 @test_lea_offset_big(i32) {
; Function Attrs: norecurse nounwind readnone uwtable
define i32 @test_lea_add(i32, i32) {
; GENERIC-LABEL: test_lea_add:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; GENERIC-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; GENERIC-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_add:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; ATOM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ATOM-NEXT: leal (%rdi,%rsi), %eax # sched: [1:1.00]
@@ -162,49 +162,49 @@ define i32 @test_lea_add(i32, i32) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_lea_add:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SLM-NEXT: leal (%rdi,%rsi), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_add:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; SANDY-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SANDY-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_add:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; HASWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; HASWELL-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_lea_add:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; BROADWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; BROADWELL-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_add:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; SKYLAKE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SKYLAKE-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_add:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; BTVER2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; BTVER2-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_add:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; ZNVER1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ZNVER1-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.25]
@@ -215,7 +215,7 @@ define i32 @test_lea_add(i32, i32) {
define i32 @test_lea_add_offset(i32, i32) {
; GENERIC-LABEL: test_lea_add_offset:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; GENERIC-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; GENERIC-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
@@ -223,7 +223,7 @@ define i32 @test_lea_add_offset(i32, i32) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_add_offset:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; ATOM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ATOM-NEXT: leal 16(%rdi,%rsi), %eax # sched: [1:1.00]
@@ -236,14 +236,14 @@ define i32 @test_lea_add_offset(i32, i32) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_lea_add_offset:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SLM-NEXT: leal 16(%rdi,%rsi), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_add_offset:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; SANDY-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SANDY-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
@@ -251,7 +251,7 @@ define i32 @test_lea_add_offset(i32, i32) {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_add_offset:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; HASWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; HASWELL-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
@@ -259,7 +259,7 @@ define i32 @test_lea_add_offset(i32, i32) {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_lea_add_offset:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; BROADWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; BROADWELL-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
@@ -267,7 +267,7 @@ define i32 @test_lea_add_offset(i32, i32) {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_add_offset:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; SKYLAKE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SKYLAKE-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
@@ -275,14 +275,14 @@ define i32 @test_lea_add_offset(i32, i32) {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_add_offset:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; BTVER2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; BTVER2-NEXT: leal 16(%rdi,%rsi), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_add_offset:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; ZNVER1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ZNVER1-NEXT: leal 16(%rdi,%rsi), %eax # sched: [1:0.25]
@@ -294,7 +294,7 @@ define i32 @test_lea_add_offset(i32, i32) {
define i32 @test_lea_add_offset_big(i32, i32) {
; GENERIC-LABEL: test_lea_add_offset_big:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; GENERIC-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; GENERIC-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
@@ -303,7 +303,7 @@ define i32 @test_lea_add_offset_big(i32, i32) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_add_offset_big:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; ATOM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ATOM-NEXT: leal -4096(%rdi,%rsi), %eax # sched: [1:1.00]
@@ -316,14 +316,14 @@ define i32 @test_lea_add_offset_big(i32, i32) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_lea_add_offset_big:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SLM-NEXT: leal -4096(%rdi,%rsi), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_add_offset_big:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; SANDY-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SANDY-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
@@ -332,7 +332,7 @@ define i32 @test_lea_add_offset_big(i32, i32) {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_add_offset_big:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; HASWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; HASWELL-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
@@ -341,7 +341,7 @@ define i32 @test_lea_add_offset_big(i32, i32) {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_lea_add_offset_big:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; BROADWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; BROADWELL-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
@@ -350,7 +350,7 @@ define i32 @test_lea_add_offset_big(i32, i32) {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_add_offset_big:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; SKYLAKE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SKYLAKE-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
@@ -359,14 +359,14 @@ define i32 @test_lea_add_offset_big(i32, i32) {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_add_offset_big:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; BTVER2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; BTVER2-NEXT: leal -4096(%rdi,%rsi), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_add_offset_big:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; ZNVER1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ZNVER1-NEXT: leal -4096(%rdi,%rsi), %eax # sched: [1:0.25]
@@ -378,13 +378,13 @@ define i32 @test_lea_add_offset_big(i32, i32) {
define i32 @test_lea_mul(i32) {
; GENERIC-LABEL: test_lea_mul:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; GENERIC-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_mul:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ATOM-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -396,43 +396,43 @@ define i32 @test_lea_mul(i32) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_lea_mul:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SLM-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_mul:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SANDY-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_mul:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; HASWELL-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_lea_mul:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; BROADWELL-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_mul:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SKYLAKE-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_mul:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; BTVER2-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_mul:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ZNVER1-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -442,14 +442,14 @@ define i32 @test_lea_mul(i32) {
define i32 @test_lea_mul_offset(i32) {
; GENERIC-LABEL: test_lea_mul_offset:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; GENERIC-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; GENERIC-NEXT: addl $-32, %eax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_mul_offset:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ATOM-NEXT: leal -32(%rdi,%rdi,2), %eax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -461,47 +461,47 @@ define i32 @test_lea_mul_offset(i32) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_lea_mul_offset:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SLM-NEXT: leal -32(%rdi,%rdi,2), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_mul_offset:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SANDY-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; SANDY-NEXT: addl $-32, %eax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_mul_offset:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; HASWELL-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; HASWELL-NEXT: addl $-32, %eax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_lea_mul_offset:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; BROADWELL-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; BROADWELL-NEXT: addl $-32, %eax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_mul_offset:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SKYLAKE-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: addl $-32, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_mul_offset:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; BTVER2-NEXT: leal -32(%rdi,%rdi,2), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_mul_offset:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ZNVER1-NEXT: leal -32(%rdi,%rdi,2), %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -512,7 +512,7 @@ define i32 @test_lea_mul_offset(i32) {
define i32 @test_lea_mul_offset_big(i32) {
; GENERIC-LABEL: test_lea_mul_offset_big:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; GENERIC-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; GENERIC-NEXT: addl $10000, %eax # imm = 0x2710
@@ -520,7 +520,7 @@ define i32 @test_lea_mul_offset_big(i32) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_mul_offset_big:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ATOM-NEXT: leal 10000(%rdi,%rdi,8), %eax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -532,13 +532,13 @@ define i32 @test_lea_mul_offset_big(i32) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_lea_mul_offset_big:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SLM-NEXT: leal 10000(%rdi,%rdi,8), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_mul_offset_big:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SANDY-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; SANDY-NEXT: addl $10000, %eax # imm = 0x2710
@@ -546,7 +546,7 @@ define i32 @test_lea_mul_offset_big(i32) {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_mul_offset_big:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; HASWELL-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; HASWELL-NEXT: addl $10000, %eax # imm = 0x2710
@@ -554,7 +554,7 @@ define i32 @test_lea_mul_offset_big(i32) {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_lea_mul_offset_big:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; BROADWELL-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; BROADWELL-NEXT: addl $10000, %eax # imm = 0x2710
@@ -562,7 +562,7 @@ define i32 @test_lea_mul_offset_big(i32) {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_mul_offset_big:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SKYLAKE-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: addl $10000, %eax # imm = 0x2710
@@ -570,13 +570,13 @@ define i32 @test_lea_mul_offset_big(i32) {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_mul_offset_big:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; BTVER2-NEXT: leal 10000(%rdi,%rdi,8), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_mul_offset_big:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ZNVER1-NEXT: leal 10000(%rdi,%rdi,8), %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -587,14 +587,14 @@ define i32 @test_lea_mul_offset_big(i32) {
define i32 @test_lea_add_scale(i32, i32) {
; GENERIC-LABEL: test_lea_add_scale:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; GENERIC-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; GENERIC-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_add_scale:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; ATOM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ATOM-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:1.00]
@@ -607,49 +607,49 @@ define i32 @test_lea_add_scale(i32, i32) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_lea_add_scale:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SLM-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_add_scale:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; SANDY-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SANDY-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_add_scale:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; HASWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; HASWELL-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_lea_add_scale:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; BROADWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; BROADWELL-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_add_scale:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; SKYLAKE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SKYLAKE-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_add_scale:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; BTVER2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; BTVER2-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_add_scale:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; ZNVER1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ZNVER1-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:0.25]
@@ -661,7 +661,7 @@ define i32 @test_lea_add_scale(i32, i32) {
define i32 @test_lea_add_scale_offset(i32, i32) {
; GENERIC-LABEL: test_lea_add_scale_offset:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; GENERIC-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; GENERIC-NEXT: leal (%rdi,%rsi,4), %eax # sched: [1:0.50]
@@ -669,7 +669,7 @@ define i32 @test_lea_add_scale_offset(i32, i32) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_add_scale_offset:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; ATOM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ATOM-NEXT: leal 96(%rdi,%rsi,4), %eax # sched: [1:1.00]
@@ -682,14 +682,14 @@ define i32 @test_lea_add_scale_offset(i32, i32) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_lea_add_scale_offset:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SLM-NEXT: leal 96(%rdi,%rsi,4), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_add_scale_offset:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; SANDY-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SANDY-NEXT: leal (%rdi,%rsi,4), %eax # sched: [1:0.50]
@@ -697,7 +697,7 @@ define i32 @test_lea_add_scale_offset(i32, i32) {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_add_scale_offset:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; HASWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; HASWELL-NEXT: leal (%rdi,%rsi,4), %eax # sched: [1:0.50]
@@ -705,7 +705,7 @@ define i32 @test_lea_add_scale_offset(i32, i32) {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_lea_add_scale_offset:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; BROADWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; BROADWELL-NEXT: leal (%rdi,%rsi,4), %eax # sched: [1:0.50]
@@ -713,7 +713,7 @@ define i32 @test_lea_add_scale_offset(i32, i32) {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_add_scale_offset:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; SKYLAKE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SKYLAKE-NEXT: leal (%rdi,%rsi,4), %eax # sched: [1:0.50]
@@ -721,14 +721,14 @@ define i32 @test_lea_add_scale_offset(i32, i32) {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_add_scale_offset:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; BTVER2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; BTVER2-NEXT: leal 96(%rdi,%rsi,4), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_add_scale_offset:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; ZNVER1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ZNVER1-NEXT: leal 96(%rdi,%rsi,4), %eax # sched: [1:0.25]
@@ -741,7 +741,7 @@ define i32 @test_lea_add_scale_offset(i32, i32) {
define i32 @test_lea_add_scale_offset_big(i32, i32) {
; GENERIC-LABEL: test_lea_add_scale_offset_big:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; GENERIC-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; GENERIC-NEXT: leal (%rdi,%rsi,8), %eax # sched: [1:0.50]
@@ -750,7 +750,7 @@ define i32 @test_lea_add_scale_offset_big(i32, i32) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_add_scale_offset_big:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; ATOM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ATOM-NEXT: leal -1200(%rdi,%rsi,8), %eax # sched: [1:1.00]
@@ -763,14 +763,14 @@ define i32 @test_lea_add_scale_offset_big(i32, i32) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_lea_add_scale_offset_big:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SLM-NEXT: leal -1200(%rdi,%rsi,8), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_add_scale_offset_big:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; SANDY-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SANDY-NEXT: leal (%rdi,%rsi,8), %eax # sched: [1:0.50]
@@ -779,7 +779,7 @@ define i32 @test_lea_add_scale_offset_big(i32, i32) {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_add_scale_offset_big:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; HASWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; HASWELL-NEXT: leal (%rdi,%rsi,8), %eax # sched: [1:0.50]
@@ -788,7 +788,7 @@ define i32 @test_lea_add_scale_offset_big(i32, i32) {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_lea_add_scale_offset_big:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; BROADWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; BROADWELL-NEXT: leal (%rdi,%rsi,8), %eax # sched: [1:0.50]
@@ -797,7 +797,7 @@ define i32 @test_lea_add_scale_offset_big(i32, i32) {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_add_scale_offset_big:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; SKYLAKE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SKYLAKE-NEXT: leal (%rdi,%rsi,8), %eax # sched: [1:0.50]
@@ -806,14 +806,14 @@ define i32 @test_lea_add_scale_offset_big(i32, i32) {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_add_scale_offset_big:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; BTVER2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; BTVER2-NEXT: leal -1200(%rdi,%rsi,8), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_add_scale_offset_big:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; ZNVER1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ZNVER1-NEXT: leal -1200(%rdi,%rsi,8), %eax # sched: [1:0.25]
diff --git a/test/CodeGen/X86/lea64-schedule.ll b/test/CodeGen/X86/lea64-schedule.ll
index 1177645a698..21c4bed92b6 100644
--- a/test/CodeGen/X86/lea64-schedule.ll
+++ b/test/CodeGen/X86/lea64-schedule.ll
@@ -13,12 +13,12 @@
define i64 @test_lea_offset(i64) {
; GENERIC-LABEL: test_lea_offset:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: leaq -24(%rdi), %rax # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_offset:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: leaq -24(%rdi), %rax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -29,37 +29,37 @@ define i64 @test_lea_offset(i64) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_lea_offset:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: leaq -24(%rdi), %rax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_offset:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: leaq -24(%rdi), %rax # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_offset:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: leaq -24(%rdi), %rax # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_lea_offset:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: leaq -24(%rdi), %rax # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_offset:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: leaq -24(%rdi), %rax # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_offset:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: leaq -24(%rdi), %rax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_offset:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: leaq -24(%rdi), %rax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%2 = add nsw i64 %0, -24
@@ -68,12 +68,12 @@ define i64 @test_lea_offset(i64) {
define i64 @test_lea_offset_big(i64) {
; GENERIC-LABEL: test_lea_offset_big:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: leaq 1024(%rdi), %rax # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_offset_big:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: leaq 1024(%rdi), %rax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -84,37 +84,37 @@ define i64 @test_lea_offset_big(i64) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_lea_offset_big:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: leaq 1024(%rdi), %rax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_offset_big:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: leaq 1024(%rdi), %rax # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_offset_big:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: leaq 1024(%rdi), %rax # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_lea_offset_big:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: leaq 1024(%rdi), %rax # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_offset_big:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: leaq 1024(%rdi), %rax # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_offset_big:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: leaq 1024(%rdi), %rax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_offset_big:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: leaq 1024(%rdi), %rax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%2 = add nsw i64 %0, 1024
@@ -124,12 +124,12 @@ define i64 @test_lea_offset_big(i64) {
; Function Attrs: norecurse nounwind readnone uwtable
define i64 @test_lea_add(i64, i64) {
; GENERIC-LABEL: test_lea_add:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: leaq (%rdi,%rsi), %rax # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_add:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: leaq (%rdi,%rsi), %rax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -140,37 +140,37 @@ define i64 @test_lea_add(i64, i64) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_lea_add:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: leaq (%rdi,%rsi), %rax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_add:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: leaq (%rdi,%rsi), %rax # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_add:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: leaq (%rdi,%rsi), %rax # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_lea_add:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: leaq (%rdi,%rsi), %rax # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_add:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: leaq (%rdi,%rsi), %rax # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_add:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: leaq (%rdi,%rsi), %rax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_add:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: leaq (%rdi,%rsi), %rax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%3 = add nsw i64 %1, %0
@@ -179,13 +179,13 @@ define i64 @test_lea_add(i64, i64) {
define i64 @test_lea_add_offset(i64, i64) {
; GENERIC-LABEL: test_lea_add_offset:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: leaq (%rdi,%rsi), %rax # sched: [1:0.50]
; GENERIC-NEXT: addq $16, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_add_offset:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: leaq 16(%rdi,%rsi), %rax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -196,41 +196,41 @@ define i64 @test_lea_add_offset(i64, i64) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_lea_add_offset:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: leaq 16(%rdi,%rsi), %rax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_add_offset:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: leaq (%rdi,%rsi), %rax # sched: [1:0.50]
; SANDY-NEXT: addq $16, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_add_offset:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: leaq (%rdi,%rsi), %rax # sched: [1:0.50]
; HASWELL-NEXT: addq $16, %rax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_lea_add_offset:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: leaq (%rdi,%rsi), %rax # sched: [1:0.50]
; BROADWELL-NEXT: addq $16, %rax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_add_offset:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: leaq (%rdi,%rsi), %rax # sched: [1:0.50]
; SKYLAKE-NEXT: addq $16, %rax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_add_offset:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: leaq 16(%rdi,%rsi), %rax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_add_offset:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: leaq 16(%rdi,%rsi), %rax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%3 = add i64 %0, 16
@@ -240,14 +240,14 @@ define i64 @test_lea_add_offset(i64, i64) {
define i64 @test_lea_add_offset_big(i64, i64) {
; GENERIC-LABEL: test_lea_add_offset_big:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: leaq (%rdi,%rsi), %rax # sched: [1:0.50]
; GENERIC-NEXT: addq $-4096, %rax # imm = 0xF000
; GENERIC-NEXT: # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_add_offset_big:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: leaq -4096(%rdi,%rsi), %rax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -258,45 +258,45 @@ define i64 @test_lea_add_offset_big(i64, i64) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_lea_add_offset_big:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: leaq -4096(%rdi,%rsi), %rax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_add_offset_big:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: leaq (%rdi,%rsi), %rax # sched: [1:0.50]
; SANDY-NEXT: addq $-4096, %rax # imm = 0xF000
; SANDY-NEXT: # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_add_offset_big:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: leaq (%rdi,%rsi), %rax # sched: [1:0.50]
; HASWELL-NEXT: addq $-4096, %rax # imm = 0xF000
; HASWELL-NEXT: # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_lea_add_offset_big:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: leaq (%rdi,%rsi), %rax # sched: [1:0.50]
; BROADWELL-NEXT: addq $-4096, %rax # imm = 0xF000
; BROADWELL-NEXT: # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_add_offset_big:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: leaq (%rdi,%rsi), %rax # sched: [1:0.50]
; SKYLAKE-NEXT: addq $-4096, %rax # imm = 0xF000
; SKYLAKE-NEXT: # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_add_offset_big:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: leaq -4096(%rdi,%rsi), %rax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_add_offset_big:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: leaq -4096(%rdi,%rsi), %rax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%3 = add i64 %0, -4096
@@ -306,12 +306,12 @@ define i64 @test_lea_add_offset_big(i64, i64) {
define i64 @test_lea_mul(i64) {
; GENERIC-LABEL: test_lea_mul:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_mul:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -322,37 +322,37 @@ define i64 @test_lea_mul(i64) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_lea_mul:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_mul:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_mul:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_lea_mul:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_mul:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_mul:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_mul:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%2 = mul nsw i64 %0, 3
@@ -361,13 +361,13 @@ define i64 @test_lea_mul(i64) {
define i64 @test_lea_mul_offset(i64) {
; GENERIC-LABEL: test_lea_mul_offset:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:0.50]
; GENERIC-NEXT: addq $-32, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_mul_offset:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: leaq -32(%rdi,%rdi,2), %rax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -378,41 +378,41 @@ define i64 @test_lea_mul_offset(i64) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_lea_mul_offset:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: leaq -32(%rdi,%rdi,2), %rax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_mul_offset:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:0.50]
; SANDY-NEXT: addq $-32, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_mul_offset:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:0.50]
; HASWELL-NEXT: addq $-32, %rax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_lea_mul_offset:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:0.50]
; BROADWELL-NEXT: addq $-32, %rax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_mul_offset:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:0.50]
; SKYLAKE-NEXT: addq $-32, %rax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_mul_offset:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: leaq -32(%rdi,%rdi,2), %rax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_mul_offset:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: leaq -32(%rdi,%rdi,2), %rax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%2 = mul nsw i64 %0, 3
@@ -422,14 +422,14 @@ define i64 @test_lea_mul_offset(i64) {
define i64 @test_lea_mul_offset_big(i64) {
; GENERIC-LABEL: test_lea_mul_offset_big:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: leaq (%rdi,%rdi,8), %rax # sched: [1:0.50]
; GENERIC-NEXT: addq $10000, %rax # imm = 0x2710
; GENERIC-NEXT: # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_mul_offset_big:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: leaq 10000(%rdi,%rdi,8), %rax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -440,45 +440,45 @@ define i64 @test_lea_mul_offset_big(i64) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_lea_mul_offset_big:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: leaq 10000(%rdi,%rdi,8), %rax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_mul_offset_big:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: leaq (%rdi,%rdi,8), %rax # sched: [1:0.50]
; SANDY-NEXT: addq $10000, %rax # imm = 0x2710
; SANDY-NEXT: # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_mul_offset_big:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: leaq (%rdi,%rdi,8), %rax # sched: [1:0.50]
; HASWELL-NEXT: addq $10000, %rax # imm = 0x2710
; HASWELL-NEXT: # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_lea_mul_offset_big:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: leaq (%rdi,%rdi,8), %rax # sched: [1:0.50]
; BROADWELL-NEXT: addq $10000, %rax # imm = 0x2710
; BROADWELL-NEXT: # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_mul_offset_big:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: leaq (%rdi,%rdi,8), %rax # sched: [1:0.50]
; SKYLAKE-NEXT: addq $10000, %rax # imm = 0x2710
; SKYLAKE-NEXT: # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_mul_offset_big:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: leaq 10000(%rdi,%rdi,8), %rax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_mul_offset_big:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: leaq 10000(%rdi,%rdi,8), %rax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%2 = mul nsw i64 %0, 9
@@ -488,12 +488,12 @@ define i64 @test_lea_mul_offset_big(i64) {
define i64 @test_lea_add_scale(i64, i64) {
; GENERIC-LABEL: test_lea_add_scale:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: leaq (%rdi,%rsi,2), %rax # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_add_scale:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: leaq (%rdi,%rsi,2), %rax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -504,37 +504,37 @@ define i64 @test_lea_add_scale(i64, i64) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_lea_add_scale:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: leaq (%rdi,%rsi,2), %rax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_add_scale:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: leaq (%rdi,%rsi,2), %rax # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_add_scale:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: leaq (%rdi,%rsi,2), %rax # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_lea_add_scale:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: leaq (%rdi,%rsi,2), %rax # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_add_scale:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: leaq (%rdi,%rsi,2), %rax # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_add_scale:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: leaq (%rdi,%rsi,2), %rax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_add_scale:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: leaq (%rdi,%rsi,2), %rax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%3 = shl i64 %1, 1
@@ -544,13 +544,13 @@ define i64 @test_lea_add_scale(i64, i64) {
define i64 @test_lea_add_scale_offset(i64, i64) {
; GENERIC-LABEL: test_lea_add_scale_offset:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: leaq (%rdi,%rsi,4), %rax # sched: [1:0.50]
; GENERIC-NEXT: addq $96, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_add_scale_offset:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: leaq 96(%rdi,%rsi,4), %rax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -561,41 +561,41 @@ define i64 @test_lea_add_scale_offset(i64, i64) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_lea_add_scale_offset:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: leaq 96(%rdi,%rsi,4), %rax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_add_scale_offset:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: leaq (%rdi,%rsi,4), %rax # sched: [1:0.50]
; SANDY-NEXT: addq $96, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_add_scale_offset:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: leaq (%rdi,%rsi,4), %rax # sched: [1:0.50]
; HASWELL-NEXT: addq $96, %rax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_lea_add_scale_offset:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: leaq (%rdi,%rsi,4), %rax # sched: [1:0.50]
; BROADWELL-NEXT: addq $96, %rax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_add_scale_offset:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: leaq (%rdi,%rsi,4), %rax # sched: [1:0.50]
; SKYLAKE-NEXT: addq $96, %rax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_add_scale_offset:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: leaq 96(%rdi,%rsi,4), %rax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_add_scale_offset:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: leaq 96(%rdi,%rsi,4), %rax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%3 = shl i64 %1, 2
@@ -606,14 +606,14 @@ define i64 @test_lea_add_scale_offset(i64, i64) {
define i64 @test_lea_add_scale_offset_big(i64, i64) {
; GENERIC-LABEL: test_lea_add_scale_offset_big:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: leaq (%rdi,%rsi,8), %rax # sched: [1:0.50]
; GENERIC-NEXT: addq $-1200, %rax # imm = 0xFB50
; GENERIC-NEXT: # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_add_scale_offset_big:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: leaq -1200(%rdi,%rsi,8), %rax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -624,45 +624,45 @@ define i64 @test_lea_add_scale_offset_big(i64, i64) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_lea_add_scale_offset_big:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: leaq -1200(%rdi,%rsi,8), %rax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_add_scale_offset_big:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: leaq (%rdi,%rsi,8), %rax # sched: [1:0.50]
; SANDY-NEXT: addq $-1200, %rax # imm = 0xFB50
; SANDY-NEXT: # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_add_scale_offset_big:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: leaq (%rdi,%rsi,8), %rax # sched: [1:0.50]
; HASWELL-NEXT: addq $-1200, %rax # imm = 0xFB50
; HASWELL-NEXT: # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_lea_add_scale_offset_big:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: leaq (%rdi,%rsi,8), %rax # sched: [1:0.50]
; BROADWELL-NEXT: addq $-1200, %rax # imm = 0xFB50
; BROADWELL-NEXT: # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_add_scale_offset_big:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: leaq (%rdi,%rsi,8), %rax # sched: [1:0.50]
; SKYLAKE-NEXT: addq $-1200, %rax # imm = 0xFB50
; SKYLAKE-NEXT: # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_add_scale_offset_big:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: leaq -1200(%rdi,%rsi,8), %rax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_add_scale_offset_big:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: leaq -1200(%rdi,%rsi,8), %rax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%3 = shl i64 %1, 3
diff --git a/test/CodeGen/X86/legalize-shift-64.ll b/test/CodeGen/X86/legalize-shift-64.ll
index ca4cfa5b805..05fad9c6132 100644
--- a/test/CodeGen/X86/legalize-shift-64.ll
+++ b/test/CodeGen/X86/legalize-shift-64.ll
@@ -3,7 +3,7 @@
define i64 @test1(i32 %xx, i32 %test) nounwind {
; CHECK-LABEL: test1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movb {{[0-9]+}}(%esp), %cl
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx
; CHECK-NEXT: andb $7, %cl
@@ -22,7 +22,7 @@ define i64 @test1(i32 %xx, i32 %test) nounwind {
define i64 @test2(i64 %xx, i32 %test) nounwind {
; CHECK-LABEL: test2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pushl %esi
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %esi
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -41,7 +41,7 @@ define i64 @test2(i64 %xx, i32 %test) nounwind {
define i64 @test3(i64 %xx, i32 %test) nounwind {
; CHECK-LABEL: test3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx
; CHECK-NEXT: movb {{[0-9]+}}(%esp), %cl
@@ -57,7 +57,7 @@ define i64 @test3(i64 %xx, i32 %test) nounwind {
define i64 @test4(i64 %xx, i32 %test) nounwind {
; CHECK-LABEL: test4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx
; CHECK-NEXT: movb {{[0-9]+}}(%esp), %cl
@@ -74,7 +74,7 @@ define i64 @test4(i64 %xx, i32 %test) nounwind {
; PR14668
define <2 x i64> @test5(<2 x i64> %A, <2 x i64> %B) {
; CHECK-LABEL: test5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pushl %ebp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: pushl %ebx
@@ -97,7 +97,7 @@ define <2 x i64> @test5(<2 x i64> %A, <2 x i64> %B) {
; CHECK-NEXT: testb $32, %cl
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ebp
; CHECK-NEXT: je .LBB4_2
-; CHECK-NEXT: # BB#1:
+; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: movl %edi, %esi
; CHECK-NEXT: xorl %edi, %edi
; CHECK-NEXT: .LBB4_2:
@@ -108,7 +108,7 @@ define <2 x i64> @test5(<2 x i64> %A, <2 x i64> %B) {
; CHECK-NEXT: shldl %cl, %edx, %ebp
; CHECK-NEXT: testb $32, %cl
; CHECK-NEXT: je .LBB4_4
-; CHECK-NEXT: # BB#3:
+; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: movl %ebx, %ebp
; CHECK-NEXT: xorl %ebx, %ebx
; CHECK-NEXT: .LBB4_4:
@@ -128,7 +128,7 @@ define <2 x i64> @test5(<2 x i64> %A, <2 x i64> %B) {
; PR16108
define i32 @test6() {
; CHECK-LABEL: test6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pushl %ebp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: .cfi_offset %ebp, -8
@@ -144,7 +144,7 @@ define i32 @test6() {
; CHECK-NEXT: movb $32, %dl
; CHECK-NEXT: testb %dl, %dl
; CHECK-NEXT: jne .LBB5_2
-; CHECK-NEXT: # BB#1:
+; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: movl %ecx, %eax
; CHECK-NEXT: .LBB5_2:
; CHECK-NEXT: sete %cl
@@ -152,7 +152,7 @@ define i32 @test6() {
; CHECK-NEXT: xorl $1, %eax
; CHECK-NEXT: orl %ecx, %eax
; CHECK-NEXT: je .LBB5_5
-; CHECK-NEXT: # BB#3: # %if.then
+; CHECK-NEXT: # %bb.3: # %if.then
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: jmp .LBB5_4
; CHECK-NEXT: .LBB5_5: # %if.end
diff --git a/test/CodeGen/X86/legalize-shl-vec.ll b/test/CodeGen/X86/legalize-shl-vec.ll
index 996dff54ace..a6238f26cbb 100644
--- a/test/CodeGen/X86/legalize-shl-vec.ll
+++ b/test/CodeGen/X86/legalize-shl-vec.ll
@@ -4,7 +4,7 @@
define <2 x i256> @test_shl(<2 x i256> %In) {
; X32-LABEL: test_shl:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl $0, 60(%eax)
; X32-NEXT: movl $0, 56(%eax)
@@ -25,7 +25,7 @@ define <2 x i256> @test_shl(<2 x i256> %In) {
; X32-NEXT: retl $4
;
; X64-LABEL: test_shl:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorps %xmm0, %xmm0
; X64-NEXT: movaps %xmm0, 48(%rdi)
; X64-NEXT: movaps %xmm0, 32(%rdi)
@@ -40,7 +40,7 @@ define <2 x i256> @test_shl(<2 x i256> %In) {
define <2 x i256> @test_srl(<2 x i256> %In) {
; X32-LABEL: test_srl:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl $0, 60(%eax)
; X32-NEXT: movl $0, 56(%eax)
@@ -61,7 +61,7 @@ define <2 x i256> @test_srl(<2 x i256> %In) {
; X32-NEXT: retl $4
;
; X64-LABEL: test_srl:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorps %xmm0, %xmm0
; X64-NEXT: movaps %xmm0, 48(%rdi)
; X64-NEXT: movaps %xmm0, 32(%rdi)
@@ -76,7 +76,7 @@ define <2 x i256> @test_srl(<2 x i256> %In) {
define <2 x i256> @test_sra(<2 x i256> %In) {
; X32-LABEL: test_sra:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl %ecx, 60(%eax)
@@ -107,7 +107,7 @@ define <2 x i256> @test_sra(<2 x i256> %In) {
; X32-NEXT: retl $4
;
; X64-LABEL: test_sra:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdx
diff --git a/test/CodeGen/X86/live-out-reg-info.ll b/test/CodeGen/X86/live-out-reg-info.ll
index b838065beea..e4644665d65 100644
--- a/test/CodeGen/X86/live-out-reg-info.ll
+++ b/test/CodeGen/X86/live-out-reg-info.ll
@@ -8,13 +8,13 @@ declare void @qux()
define void @foo(i32 %a) {
; CHECK-LABEL: foo:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: shrl $23, %edi
; CHECK-NEXT: btl $8, %edi
; CHECK-NEXT: jb .LBB0_2
-; CHECK-NEXT: # BB#1: # %true
+; CHECK-NEXT: # %bb.1: # %true
; CHECK-NEXT: callq qux
; CHECK-NEXT: .LBB0_2: # %false
; CHECK-NEXT: popq %rax
diff --git a/test/CodeGen/X86/load-combine.ll b/test/CodeGen/X86/load-combine.ll
index d1f5f41ac7b..c943b6d5ed7 100644
--- a/test/CodeGen/X86/load-combine.ll
+++ b/test/CodeGen/X86/load-combine.ll
@@ -8,13 +8,13 @@
; (i32) p[0] | ((i32) p[1] << 8) | ((i32) p[2] << 16) | ((i32) p[3] << 24)
define i32 @load_i32_by_i8(i32* %arg) {
; CHECK-LABEL: load_i32_by_i8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl (%eax), %eax
; CHECK-NEXT: retl
;
; CHECK64-LABEL: load_i32_by_i8:
-; CHECK64: # BB#0:
+; CHECK64: # %bb.0:
; CHECK64-NEXT: movl (%rdi), %eax
; CHECK64-NEXT: retq
%tmp = bitcast i32* %arg to i8*
@@ -42,26 +42,26 @@ define i32 @load_i32_by_i8(i32* %arg) {
; ((i32) p[0] << 24) | ((i32) p[1] << 16) | ((i32) p[2] << 8) | (i32) p[3]
define i32 @load_i32_by_i8_bswap(i32* %arg) {
; BSWAP-LABEL: load_i32_by_i8_bswap:
-; BSWAP: # BB#0:
+; BSWAP: # %bb.0:
; BSWAP-NEXT: movl {{[0-9]+}}(%esp), %eax
; BSWAP-NEXT: movl (%eax), %eax
; BSWAP-NEXT: bswapl %eax
; BSWAP-NEXT: retl
;
; MOVBE-LABEL: load_i32_by_i8_bswap:
-; MOVBE: # BB#0:
+; MOVBE: # %bb.0:
; MOVBE-NEXT: movl {{[0-9]+}}(%esp), %eax
; MOVBE-NEXT: movbel (%eax), %eax
; MOVBE-NEXT: retl
;
; BSWAP64-LABEL: load_i32_by_i8_bswap:
-; BSWAP64: # BB#0:
+; BSWAP64: # %bb.0:
; BSWAP64-NEXT: movl (%rdi), %eax
; BSWAP64-NEXT: bswapl %eax
; BSWAP64-NEXT: retq
;
; MOVBE64-LABEL: load_i32_by_i8_bswap:
-; MOVBE64: # BB#0:
+; MOVBE64: # %bb.0:
; MOVBE64-NEXT: movbel (%rdi), %eax
; MOVBE64-NEXT: retq
%tmp = bitcast i32* %arg to i8*
@@ -89,13 +89,13 @@ define i32 @load_i32_by_i8_bswap(i32* %arg) {
; (i32) p[0] | ((i32) p[1] << 16)
define i32 @load_i32_by_i16(i32* %arg) {
; CHECK-LABEL: load_i32_by_i16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl (%eax), %eax
; CHECK-NEXT: retl
;
; CHECK64-LABEL: load_i32_by_i16:
-; CHECK64: # BB#0:
+; CHECK64: # %bb.0:
; CHECK64-NEXT: movl (%rdi), %eax
; CHECK64-NEXT: retq
%tmp = bitcast i32* %arg to i16*
@@ -114,13 +114,13 @@ define i32 @load_i32_by_i16(i32* %arg) {
; (i32) p_16[0] | ((i32) p[2] << 16) | ((i32) p[3] << 24)
define i32 @load_i32_by_i16_i8(i32* %arg) {
; CHECK-LABEL: load_i32_by_i16_i8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl (%eax), %eax
; CHECK-NEXT: retl
;
; CHECK64-LABEL: load_i32_by_i16_i8:
-; CHECK64: # BB#0:
+; CHECK64: # %bb.0:
; CHECK64-NEXT: movl (%rdi), %eax
; CHECK64-NEXT: retq
%tmp = bitcast i32* %arg to i16*
@@ -145,13 +145,13 @@ define i32 @load_i32_by_i16_i8(i32* %arg) {
; (i32) ((i16) p[0] | ((i16) p[1] << 8)) | (((i32) ((i16) p[3] | ((i16) p[4] << 8)) << 16)
define i32 @load_i32_by_i16_by_i8(i32* %arg) {
; CHECK-LABEL: load_i32_by_i16_by_i8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl (%eax), %eax
; CHECK-NEXT: retl
;
; CHECK64-LABEL: load_i32_by_i16_by_i8:
-; CHECK64: # BB#0:
+; CHECK64: # %bb.0:
; CHECK64-NEXT: movl (%rdi), %eax
; CHECK64-NEXT: retq
%tmp = bitcast i32* %arg to i8*
@@ -181,26 +181,26 @@ define i32 @load_i32_by_i16_by_i8(i32* %arg) {
; ((i32) (((i16) p[0] << 8) | (i16) p[1]) << 16) | (i32) (((i16) p[3] << 8) | (i16) p[4])
define i32 @load_i32_by_i16_by_i8_bswap(i32* %arg) {
; BSWAP-LABEL: load_i32_by_i16_by_i8_bswap:
-; BSWAP: # BB#0:
+; BSWAP: # %bb.0:
; BSWAP-NEXT: movl {{[0-9]+}}(%esp), %eax
; BSWAP-NEXT: movl (%eax), %eax
; BSWAP-NEXT: bswapl %eax
; BSWAP-NEXT: retl
;
; MOVBE-LABEL: load_i32_by_i16_by_i8_bswap:
-; MOVBE: # BB#0:
+; MOVBE: # %bb.0:
; MOVBE-NEXT: movl {{[0-9]+}}(%esp), %eax
; MOVBE-NEXT: movbel (%eax), %eax
; MOVBE-NEXT: retl
;
; BSWAP64-LABEL: load_i32_by_i16_by_i8_bswap:
-; BSWAP64: # BB#0:
+; BSWAP64: # %bb.0:
; BSWAP64-NEXT: movl (%rdi), %eax
; BSWAP64-NEXT: bswapl %eax
; BSWAP64-NEXT: retq
;
; MOVBE64-LABEL: load_i32_by_i16_by_i8_bswap:
-; MOVBE64: # BB#0:
+; MOVBE64: # %bb.0:
; MOVBE64-NEXT: movbel (%rdi), %eax
; MOVBE64-NEXT: retq
%tmp = bitcast i32* %arg to i8*
@@ -230,14 +230,14 @@ define i32 @load_i32_by_i16_by_i8_bswap(i32* %arg) {
; (i64) p[0] | ((i64) p[1] << 8) | ((i64) p[2] << 16) | ((i64) p[3] << 24) | ((i64) p[4] << 32) | ((i64) p[5] << 40) | ((i64) p[6] << 48) | ((i64) p[7] << 56)
define i64 @load_i64_by_i8(i64* %arg) {
; CHECK-LABEL: load_i64_by_i8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movl (%ecx), %eax
; CHECK-NEXT: movl 4(%ecx), %edx
; CHECK-NEXT: retl
;
; CHECK64-LABEL: load_i64_by_i8:
-; CHECK64: # BB#0:
+; CHECK64: # %bb.0:
; CHECK64-NEXT: movq (%rdi), %rax
; CHECK64-NEXT: retq
%tmp = bitcast i64* %arg to i8*
@@ -285,7 +285,7 @@ define i64 @load_i64_by_i8(i64* %arg) {
; ((i64) p[0] << 56) | ((i64) p[1] << 48) | ((i64) p[2] << 40) | ((i64) p[3] << 32) | ((i64) p[4] << 24) | ((i64) p[5] << 16) | ((i64) p[6] << 8) | (i64) p[7]
define i64 @load_i64_by_i8_bswap(i64* %arg) {
; BSWAP-LABEL: load_i64_by_i8_bswap:
-; BSWAP: # BB#0:
+; BSWAP: # %bb.0:
; BSWAP-NEXT: movl {{[0-9]+}}(%esp), %eax
; BSWAP-NEXT: movl (%eax), %edx
; BSWAP-NEXT: movl 4(%eax), %eax
@@ -294,20 +294,20 @@ define i64 @load_i64_by_i8_bswap(i64* %arg) {
; BSWAP-NEXT: retl
;
; MOVBE-LABEL: load_i64_by_i8_bswap:
-; MOVBE: # BB#0:
+; MOVBE: # %bb.0:
; MOVBE-NEXT: movl {{[0-9]+}}(%esp), %ecx
; MOVBE-NEXT: movbel 4(%ecx), %eax
; MOVBE-NEXT: movbel (%ecx), %edx
; MOVBE-NEXT: retl
;
; BSWAP64-LABEL: load_i64_by_i8_bswap:
-; BSWAP64: # BB#0:
+; BSWAP64: # %bb.0:
; BSWAP64-NEXT: movq (%rdi), %rax
; BSWAP64-NEXT: bswapq %rax
; BSWAP64-NEXT: retq
;
; MOVBE64-LABEL: load_i64_by_i8_bswap:
-; MOVBE64: # BB#0:
+; MOVBE64: # %bb.0:
; MOVBE64-NEXT: movbeq (%rdi), %rax
; MOVBE64-NEXT: retq
%tmp = bitcast i64* %arg to i8*
@@ -358,7 +358,7 @@ define i64 @load_i64_by_i8_bswap(i64* %arg) {
; x | res
define i32 @load_i32_by_i8_bswap_uses(i32* %arg) {
; CHECK-LABEL: load_i32_by_i8_bswap_uses:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pushl %esi
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: .cfi_offset %esi, -8
@@ -379,7 +379,7 @@ define i32 @load_i32_by_i8_bswap_uses(i32* %arg) {
; CHECK-NEXT: retl
;
; CHECK64-LABEL: load_i32_by_i8_bswap_uses:
-; CHECK64: # BB#0:
+; CHECK64: # %bb.0:
; CHECK64-NEXT: movzbl (%rdi), %eax
; CHECK64-NEXT: shll $24, %eax
; CHECK64-NEXT: movzbl 1(%rdi), %ecx
@@ -422,7 +422,7 @@ define i32 @load_i32_by_i8_bswap_uses(i32* %arg) {
; ((i32) p0 << 24) | ((i32) p[1] << 16) | ((i32) p[2] << 8) | (i32) p[3]
define i32 @load_i32_by_i8_bswap_volatile(i32* %arg) {
; CHECK-LABEL: load_i32_by_i8_bswap_volatile:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movzbl (%eax), %ecx
; CHECK-NEXT: shll $24, %ecx
@@ -437,7 +437,7 @@ define i32 @load_i32_by_i8_bswap_volatile(i32* %arg) {
; CHECK-NEXT: retl
;
; CHECK64-LABEL: load_i32_by_i8_bswap_volatile:
-; CHECK64: # BB#0:
+; CHECK64: # %bb.0:
; CHECK64-NEXT: movzbl (%rdi), %eax
; CHECK64-NEXT: shll $24, %eax
; CHECK64-NEXT: movzbl 1(%rdi), %ecx
@@ -478,7 +478,7 @@ define i32 @load_i32_by_i8_bswap_volatile(i32* %arg) {
; res1 | res2
define i32 @load_i32_by_i8_bswap_store_in_between(i32* %arg, i32* %arg1) {
; CHECK-LABEL: load_i32_by_i8_bswap_store_in_between:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pushl %esi
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: .cfi_offset %esi, -8
@@ -499,7 +499,7 @@ define i32 @load_i32_by_i8_bswap_store_in_between(i32* %arg, i32* %arg1) {
; CHECK-NEXT: retl
;
; CHECK64-LABEL: load_i32_by_i8_bswap_store_in_between:
-; CHECK64: # BB#0:
+; CHECK64: # %bb.0:
; CHECK64-NEXT: movzbl (%rdi), %eax
; CHECK64-NEXT: shll $24, %eax
; CHECK64-NEXT: movzbl 1(%rdi), %ecx
@@ -540,7 +540,7 @@ define i32 @load_i32_by_i8_bswap_store_in_between(i32* %arg, i32* %arg1) {
; ((i32) p[0] << 24) | ((i32) q[1] << 16) | ((i32) p[2] << 8) | (i32) p[3]
define i32 @load_i32_by_i8_bswap_unrelated_load(i32* %arg, i32* %arg1) {
; CHECK-LABEL: load_i32_by_i8_bswap_unrelated_load:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movzbl (%ecx), %edx
@@ -556,7 +556,7 @@ define i32 @load_i32_by_i8_bswap_unrelated_load(i32* %arg, i32* %arg1) {
; CHECK-NEXT: retl
;
; CHECK64-LABEL: load_i32_by_i8_bswap_unrelated_load:
-; CHECK64: # BB#0:
+; CHECK64: # %bb.0:
; CHECK64-NEXT: movzbl (%rdi), %eax
; CHECK64-NEXT: shll $24, %eax
; CHECK64-NEXT: movzbl 1(%rsi), %ecx
@@ -595,13 +595,13 @@ define i32 @load_i32_by_i8_bswap_unrelated_load(i32* %arg, i32* %arg1) {
; (i32) p[1] | ((i32) p[2] << 8) | ((i32) p[3] << 16) | ((i32) p[4] << 24)
define i32 @load_i32_by_i8_nonzero_offset(i32* %arg) {
; CHECK-LABEL: load_i32_by_i8_nonzero_offset:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl 1(%eax), %eax
; CHECK-NEXT: retl
;
; CHECK64-LABEL: load_i32_by_i8_nonzero_offset:
-; CHECK64: # BB#0:
+; CHECK64: # %bb.0:
; CHECK64-NEXT: movl 1(%rdi), %eax
; CHECK64-NEXT: retq
%tmp = bitcast i32* %arg to i8*
@@ -630,13 +630,13 @@ define i32 @load_i32_by_i8_nonzero_offset(i32* %arg) {
; (i32) p[-4] | ((i32) p[-3] << 8) | ((i32) p[-2] << 16) | ((i32) p[-1] << 24)
define i32 @load_i32_by_i8_neg_offset(i32* %arg) {
; CHECK-LABEL: load_i32_by_i8_neg_offset:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl -4(%eax), %eax
; CHECK-NEXT: retl
;
; CHECK64-LABEL: load_i32_by_i8_neg_offset:
-; CHECK64: # BB#0:
+; CHECK64: # %bb.0:
; CHECK64-NEXT: movl -4(%rdi), %eax
; CHECK64-NEXT: retq
%tmp = bitcast i32* %arg to i8*
@@ -665,26 +665,26 @@ define i32 @load_i32_by_i8_neg_offset(i32* %arg) {
; (i32) p[4] | ((i32) p[3] << 8) | ((i32) p[2] << 16) | ((i32) p[1] << 24)
define i32 @load_i32_by_i8_nonzero_offset_bswap(i32* %arg) {
; BSWAP-LABEL: load_i32_by_i8_nonzero_offset_bswap:
-; BSWAP: # BB#0:
+; BSWAP: # %bb.0:
; BSWAP-NEXT: movl {{[0-9]+}}(%esp), %eax
; BSWAP-NEXT: movl 1(%eax), %eax
; BSWAP-NEXT: bswapl %eax
; BSWAP-NEXT: retl
;
; MOVBE-LABEL: load_i32_by_i8_nonzero_offset_bswap:
-; MOVBE: # BB#0:
+; MOVBE: # %bb.0:
; MOVBE-NEXT: movl {{[0-9]+}}(%esp), %eax
; MOVBE-NEXT: movbel 1(%eax), %eax
; MOVBE-NEXT: retl
;
; BSWAP64-LABEL: load_i32_by_i8_nonzero_offset_bswap:
-; BSWAP64: # BB#0:
+; BSWAP64: # %bb.0:
; BSWAP64-NEXT: movl 1(%rdi), %eax
; BSWAP64-NEXT: bswapl %eax
; BSWAP64-NEXT: retq
;
; MOVBE64-LABEL: load_i32_by_i8_nonzero_offset_bswap:
-; MOVBE64: # BB#0:
+; MOVBE64: # %bb.0:
; MOVBE64-NEXT: movbel 1(%rdi), %eax
; MOVBE64-NEXT: retq
%tmp = bitcast i32* %arg to i8*
@@ -713,26 +713,26 @@ define i32 @load_i32_by_i8_nonzero_offset_bswap(i32* %arg) {
; (i32) p[-1] | ((i32) p[-2] << 8) | ((i32) p[-3] << 16) | ((i32) p[-4] << 24)
define i32 @load_i32_by_i8_neg_offset_bswap(i32* %arg) {
; BSWAP-LABEL: load_i32_by_i8_neg_offset_bswap:
-; BSWAP: # BB#0:
+; BSWAP: # %bb.0:
; BSWAP-NEXT: movl {{[0-9]+}}(%esp), %eax
; BSWAP-NEXT: movl -4(%eax), %eax
; BSWAP-NEXT: bswapl %eax
; BSWAP-NEXT: retl
;
; MOVBE-LABEL: load_i32_by_i8_neg_offset_bswap:
-; MOVBE: # BB#0:
+; MOVBE: # %bb.0:
; MOVBE-NEXT: movl {{[0-9]+}}(%esp), %eax
; MOVBE-NEXT: movbel -4(%eax), %eax
; MOVBE-NEXT: retl
;
; BSWAP64-LABEL: load_i32_by_i8_neg_offset_bswap:
-; BSWAP64: # BB#0:
+; BSWAP64: # %bb.0:
; BSWAP64-NEXT: movl -4(%rdi), %eax
; BSWAP64-NEXT: bswapl %eax
; BSWAP64-NEXT: retq
;
; MOVBE64-LABEL: load_i32_by_i8_neg_offset_bswap:
-; MOVBE64: # BB#0:
+; MOVBE64: # %bb.0:
; MOVBE64-NEXT: movbel -4(%rdi), %eax
; MOVBE64-NEXT: retq
%tmp = bitcast i32* %arg to i8*
@@ -761,7 +761,7 @@ define i32 @load_i32_by_i8_neg_offset_bswap(i32* %arg) {
; ((i32) p[i] << 24) | ((i32) p[i + 1] << 16) | ((i32) p[i + 2] << 8) | (i32) p[i + 3]
define i32 @load_i32_by_i8_bswap_base_index_offset(i32* %arg, i32 %arg1) {
; BSWAP-LABEL: load_i32_by_i8_bswap_base_index_offset:
-; BSWAP: # BB#0:
+; BSWAP: # %bb.0:
; BSWAP-NEXT: movl {{[0-9]+}}(%esp), %eax
; BSWAP-NEXT: movl {{[0-9]+}}(%esp), %ecx
; BSWAP-NEXT: movl (%ecx,%eax), %eax
@@ -769,21 +769,21 @@ define i32 @load_i32_by_i8_bswap_base_index_offset(i32* %arg, i32 %arg1) {
; BSWAP-NEXT: retl
;
; MOVBE-LABEL: load_i32_by_i8_bswap_base_index_offset:
-; MOVBE: # BB#0:
+; MOVBE: # %bb.0:
; MOVBE-NEXT: movl {{[0-9]+}}(%esp), %eax
; MOVBE-NEXT: movl {{[0-9]+}}(%esp), %ecx
; MOVBE-NEXT: movbel (%ecx,%eax), %eax
; MOVBE-NEXT: retl
;
; BSWAP64-LABEL: load_i32_by_i8_bswap_base_index_offset:
-; BSWAP64: # BB#0:
+; BSWAP64: # %bb.0:
; BSWAP64-NEXT: movslq %esi, %rax
; BSWAP64-NEXT: movl (%rdi,%rax), %eax
; BSWAP64-NEXT: bswapl %eax
; BSWAP64-NEXT: retq
;
; MOVBE64-LABEL: load_i32_by_i8_bswap_base_index_offset:
-; MOVBE64: # BB#0:
+; MOVBE64: # %bb.0:
; MOVBE64-NEXT: movslq %esi, %rax
; MOVBE64-NEXT: movbel (%rdi,%rax), %eax
; MOVBE64-NEXT: retq
@@ -815,14 +815,14 @@ define i32 @load_i32_by_i8_bswap_base_index_offset(i32* %arg, i32 %arg1) {
; Verify that we don't crash handling shl i32 %conv57, 32
define void @shift_i32_by_32(i8* %src1, i8* %src2, i64* %dst) {
; CHECK-LABEL: shift_i32_by_32:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl $-1, 4(%eax)
; CHECK-NEXT: movl $-1, (%eax)
; CHECK-NEXT: retl
;
; CHECK64-LABEL: shift_i32_by_32:
-; CHECK64: # BB#0: # %entry
+; CHECK64: # %bb.0: # %entry
; CHECK64-NEXT: movq $-1, (%rdx)
; CHECK64-NEXT: retq
entry:
@@ -846,26 +846,26 @@ declare i16 @llvm.bswap.i16(i16)
; (i32) bswap(p[1]) | (i32) bswap(p[0] << 16)
define i32 @load_i32_by_bswap_i16(i32* %arg) {
; BSWAP-LABEL: load_i32_by_bswap_i16:
-; BSWAP: # BB#0:
+; BSWAP: # %bb.0:
; BSWAP-NEXT: movl {{[0-9]+}}(%esp), %eax
; BSWAP-NEXT: movl (%eax), %eax
; BSWAP-NEXT: bswapl %eax
; BSWAP-NEXT: retl
;
; MOVBE-LABEL: load_i32_by_bswap_i16:
-; MOVBE: # BB#0:
+; MOVBE: # %bb.0:
; MOVBE-NEXT: movl {{[0-9]+}}(%esp), %eax
; MOVBE-NEXT: movbel (%eax), %eax
; MOVBE-NEXT: retl
;
; BSWAP64-LABEL: load_i32_by_bswap_i16:
-; BSWAP64: # BB#0:
+; BSWAP64: # %bb.0:
; BSWAP64-NEXT: movl (%rdi), %eax
; BSWAP64-NEXT: bswapl %eax
; BSWAP64-NEXT: retq
;
; MOVBE64-LABEL: load_i32_by_bswap_i16:
-; MOVBE64: # BB#0:
+; MOVBE64: # %bb.0:
; MOVBE64-NEXT: movbel (%rdi), %eax
; MOVBE64-NEXT: retq
%tmp = bitcast i32* %arg to i16*
@@ -885,13 +885,13 @@ define i32 @load_i32_by_bswap_i16(i32* %arg) {
; (i32) p[0] | (sext(p[1] << 16) to i32)
define i32 @load_i32_by_sext_i16(i32* %arg) {
; CHECK-LABEL: load_i32_by_sext_i16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl (%eax), %eax
; CHECK-NEXT: retl
;
; CHECK64-LABEL: load_i32_by_sext_i16:
-; CHECK64: # BB#0:
+; CHECK64: # %bb.0:
; CHECK64-NEXT: movl (%rdi), %eax
; CHECK64-NEXT: retq
%tmp = bitcast i32* %arg to i16*
@@ -910,14 +910,14 @@ define i32 @load_i32_by_sext_i16(i32* %arg) {
; (i32) p[i] | ((i32) p[i + 1] << 8) | ((i32) p[i + 2] << 16) | ((i32) p[i + 3] << 24)
define i32 @load_i32_by_i8_base_offset_index(i8* %arg, i32 %i) {
; CHECK-LABEL: load_i32_by_i8_base_offset_index:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movl 12(%eax,%ecx), %eax
; CHECK-NEXT: retl
;
; CHECK64-LABEL: load_i32_by_i8_base_offset_index:
-; CHECK64: # BB#0:
+; CHECK64: # %bb.0:
; CHECK64-NEXT: movl %esi, %eax
; CHECK64-NEXT: movl 12(%rdi,%rax), %eax
; CHECK64-NEXT: retq
@@ -955,14 +955,14 @@ define i32 @load_i32_by_i8_base_offset_index(i8* %arg, i32 %i) {
; (i32) p[i + 1] | ((i32) p[i + 2] << 8) | ((i32) p[i + 3] << 16) | ((i32) p[i + 4] << 24)
define i32 @load_i32_by_i8_base_offset_index_2(i8* %arg, i32 %i) {
; CHECK-LABEL: load_i32_by_i8_base_offset_index_2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movl 13(%eax,%ecx), %eax
; CHECK-NEXT: retl
;
; CHECK64-LABEL: load_i32_by_i8_base_offset_index_2:
-; CHECK64: # BB#0:
+; CHECK64: # %bb.0:
; CHECK64-NEXT: movl %esi, %eax
; CHECK64-NEXT: movl 13(%rdi,%rax), %eax
; CHECK64-NEXT: retq
@@ -1011,14 +1011,14 @@ define i32 @load_i32_by_i8_base_offset_index_2(i8* %arg, i32 %i) {
; to zext and aext loads.
define i32 @load_i32_by_i8_zaext_loads(i8* %arg, i32 %arg1) {
; CHECK-LABEL: load_i32_by_i8_zaext_loads:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movl 12(%eax,%ecx), %eax
; CHECK-NEXT: retl
;
; CHECK64-LABEL: load_i32_by_i8_zaext_loads:
-; CHECK64: # BB#0:
+; CHECK64: # %bb.0:
; CHECK64-NEXT: movl %esi, %eax
; CHECK64-NEXT: movl 12(%rdi,%rax), %eax
; CHECK64-NEXT: retq
@@ -1067,14 +1067,14 @@ define i32 @load_i32_by_i8_zaext_loads(i8* %arg, i32 %arg1) {
; (i32) p0[12] | ((i32) p1[12] << 8) | ((i32) p2[12] << 16) | ((i32) p3[12] << 24)
define i32 @load_i32_by_i8_zsext_loads(i8* %arg, i32 %arg1) {
; CHECK-LABEL: load_i32_by_i8_zsext_loads:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movl 12(%eax,%ecx), %eax
; CHECK-NEXT: retl
;
; CHECK64-LABEL: load_i32_by_i8_zsext_loads:
-; CHECK64: # BB#0:
+; CHECK64: # %bb.0:
; CHECK64-NEXT: movl %esi, %eax
; CHECK64-NEXT: movl 12(%rdi,%rax), %eax
; CHECK64-NEXT: retq
@@ -1115,7 +1115,7 @@ define i32 @load_i32_by_i8_zsext_loads(i8* %arg, i32 %arg1) {
; (i32) p[0] | ((i32) p[1] << 8)
define i32 @zext_load_i32_by_i8(i32* %arg) {
; CHECK-LABEL: zext_load_i32_by_i8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movzbl (%eax), %ecx
; CHECK-NEXT: movzbl 1(%eax), %eax
@@ -1124,7 +1124,7 @@ define i32 @zext_load_i32_by_i8(i32* %arg) {
; CHECK-NEXT: retl
;
; CHECK64-LABEL: zext_load_i32_by_i8:
-; CHECK64: # BB#0:
+; CHECK64: # %bb.0:
; CHECK64-NEXT: movzbl (%rdi), %ecx
; CHECK64-NEXT: movzbl 1(%rdi), %eax
; CHECK64-NEXT: shll $8, %eax
@@ -1146,7 +1146,7 @@ define i32 @zext_load_i32_by_i8(i32* %arg) {
; ((i32) p[0] << 8) | ((i32) p[1] << 16)
define i32 @zext_load_i32_by_i8_shl_8(i32* %arg) {
; CHECK-LABEL: zext_load_i32_by_i8_shl_8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movzbl (%eax), %ecx
; CHECK-NEXT: shll $8, %ecx
@@ -1156,7 +1156,7 @@ define i32 @zext_load_i32_by_i8_shl_8(i32* %arg) {
; CHECK-NEXT: retl
;
; CHECK64-LABEL: zext_load_i32_by_i8_shl_8:
-; CHECK64: # BB#0:
+; CHECK64: # %bb.0:
; CHECK64-NEXT: movzbl (%rdi), %ecx
; CHECK64-NEXT: shll $8, %ecx
; CHECK64-NEXT: movzbl 1(%rdi), %eax
@@ -1180,7 +1180,7 @@ define i32 @zext_load_i32_by_i8_shl_8(i32* %arg) {
; ((i32) p[0] << 16) | ((i32) p[1] << 24)
define i32 @zext_load_i32_by_i8_shl_16(i32* %arg) {
; CHECK-LABEL: zext_load_i32_by_i8_shl_16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movzbl (%eax), %ecx
; CHECK-NEXT: shll $16, %ecx
@@ -1190,7 +1190,7 @@ define i32 @zext_load_i32_by_i8_shl_16(i32* %arg) {
; CHECK-NEXT: retl
;
; CHECK64-LABEL: zext_load_i32_by_i8_shl_16:
-; CHECK64: # BB#0:
+; CHECK64: # %bb.0:
; CHECK64-NEXT: movzbl (%rdi), %ecx
; CHECK64-NEXT: shll $16, %ecx
; CHECK64-NEXT: movzbl 1(%rdi), %eax
@@ -1214,7 +1214,7 @@ define i32 @zext_load_i32_by_i8_shl_16(i32* %arg) {
; (i32) p[1] | ((i32) p[0] << 8)
define i32 @zext_load_i32_by_i8_bswap(i32* %arg) {
; CHECK-LABEL: zext_load_i32_by_i8_bswap:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movzbl 1(%eax), %ecx
; CHECK-NEXT: movzbl (%eax), %eax
@@ -1223,7 +1223,7 @@ define i32 @zext_load_i32_by_i8_bswap(i32* %arg) {
; CHECK-NEXT: retl
;
; CHECK64-LABEL: zext_load_i32_by_i8_bswap:
-; CHECK64: # BB#0:
+; CHECK64: # %bb.0:
; CHECK64-NEXT: movzbl 1(%rdi), %ecx
; CHECK64-NEXT: movzbl (%rdi), %eax
; CHECK64-NEXT: shll $8, %eax
@@ -1245,7 +1245,7 @@ define i32 @zext_load_i32_by_i8_bswap(i32* %arg) {
; ((i32) p[1] << 8) | ((i32) p[0] << 16)
define i32 @zext_load_i32_by_i8_bswap_shl_8(i32* %arg) {
; CHECK-LABEL: zext_load_i32_by_i8_bswap_shl_8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movzbl 1(%eax), %ecx
; CHECK-NEXT: shll $8, %ecx
@@ -1255,7 +1255,7 @@ define i32 @zext_load_i32_by_i8_bswap_shl_8(i32* %arg) {
; CHECK-NEXT: retl
;
; CHECK64-LABEL: zext_load_i32_by_i8_bswap_shl_8:
-; CHECK64: # BB#0:
+; CHECK64: # %bb.0:
; CHECK64-NEXT: movzbl 1(%rdi), %ecx
; CHECK64-NEXT: shll $8, %ecx
; CHECK64-NEXT: movzbl (%rdi), %eax
@@ -1279,7 +1279,7 @@ define i32 @zext_load_i32_by_i8_bswap_shl_8(i32* %arg) {
; ((i32) p[1] << 16) | ((i32) p[0] << 24)
define i32 @zext_load_i32_by_i8_bswap_shl_16(i32* %arg) {
; CHECK-LABEL: zext_load_i32_by_i8_bswap_shl_16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movzbl 1(%eax), %ecx
; CHECK-NEXT: shll $16, %ecx
@@ -1289,7 +1289,7 @@ define i32 @zext_load_i32_by_i8_bswap_shl_16(i32* %arg) {
; CHECK-NEXT: retl
;
; CHECK64-LABEL: zext_load_i32_by_i8_bswap_shl_16:
-; CHECK64: # BB#0:
+; CHECK64: # %bb.0:
; CHECK64-NEXT: movzbl 1(%rdi), %ecx
; CHECK64-NEXT: shll $16, %ecx
; CHECK64-NEXT: movzbl (%rdi), %eax
diff --git a/test/CodeGen/X86/logical-load-fold.ll b/test/CodeGen/X86/logical-load-fold.ll
index 5f06fce1b7b..3890c186941 100644
--- a/test/CodeGen/X86/logical-load-fold.ll
+++ b/test/CodeGen/X86/logical-load-fold.ll
@@ -12,14 +12,14 @@
define double @load_double_no_fold(double %x, double %y) {
; SSE2-LABEL: load_double_no_fold:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: cmplesd %xmm0, %xmm1
; SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: andpd %xmm1, %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: load_double_no_fold:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmplesd %xmm0, %xmm1, %xmm0
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: vandpd %xmm1, %xmm0, %xmm0
@@ -33,14 +33,14 @@ define double @load_double_no_fold(double %x, double %y) {
define float @load_float_no_fold(float %x, float %y) {
; SSE2-LABEL: load_float_no_fold:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: cmpless %xmm0, %xmm1
; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: andps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: load_float_no_fold:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpless %xmm0, %xmm1, %xmm0
; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
diff --git a/test/CodeGen/X86/longlong-deadload.ll b/test/CodeGen/X86/longlong-deadload.ll
index 01888f07306..4166b0f204e 100644
--- a/test/CodeGen/X86/longlong-deadload.ll
+++ b/test/CodeGen/X86/longlong-deadload.ll
@@ -4,7 +4,7 @@
define void @test(i64* %P) nounwind {
; CHECK-LABEL: test:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl (%eax), %ecx
; CHECK-NEXT: xorl $1, %ecx
diff --git a/test/CodeGen/X86/loop-search.ll b/test/CodeGen/X86/loop-search.ll
index 85ae7518a37..e0a81d28a70 100644
--- a/test/CodeGen/X86/loop-search.ll
+++ b/test/CodeGen/X86/loop-search.ll
@@ -6,10 +6,10 @@
define zeroext i1 @search(i32 %needle, i32* nocapture readonly %haystack, i32 %count) {
; CHECK-LABEL: search:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: testl %edx, %edx
; CHECK-NEXT: jle LBB0_1
-; CHECK-NEXT: ## BB#4: ## %for.body.preheader
+; CHECK-NEXT: ## %bb.4: ## %for.body.preheader
; CHECK-NEXT: movslq %edx, %rax
; CHECK-NEXT: xorl %ecx, %ecx
; CHECK-NEXT: .p2align 4, 0x90
@@ -17,13 +17,13 @@ define zeroext i1 @search(i32 %needle, i32* nocapture readonly %haystack, i32 %c
; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
; CHECK-NEXT: cmpl %edi, (%rsi,%rcx,4)
; CHECK-NEXT: je LBB0_6
-; CHECK-NEXT: ## BB#2: ## %for.cond
+; CHECK-NEXT: ## %bb.2: ## %for.cond
; CHECK-NEXT: ## in Loop: Header=BB0_5 Depth=1
; CHECK-NEXT: incq %rcx
; CHECK-NEXT: cmpq %rax, %rcx
; CHECK-NEXT: jl LBB0_5
-; ### FIXME: BB#3 and LBB0_1 should be merged
-; CHECK-NEXT: ## BB#3:
+; ### FIXME: %bb.3 and LBB0_1 should be merged
+; CHECK-NEXT: ## %bb.3:
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/lower-bitcast.ll b/test/CodeGen/X86/lower-bitcast.ll
index 79f90f49c7c..11271f15529 100644
--- a/test/CodeGen/X86/lower-bitcast.ll
+++ b/test/CodeGen/X86/lower-bitcast.ll
@@ -8,14 +8,14 @@
define double @test1(double %A) {
; CHECK-LABEL: test1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1,1,3]
; CHECK-NEXT: paddd {{.*}}(%rip), %xmm0
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; CHECK-NEXT: retq
;
; CHECK-WIDE-LABEL: test1:
-; CHECK-WIDE: # BB#0:
+; CHECK-WIDE: # %bb.0:
; CHECK-WIDE-NEXT: paddd {{.*}}(%rip), %xmm0
; CHECK-WIDE-NEXT: retq
%1 = bitcast double %A to <2 x i32>
@@ -26,12 +26,12 @@ define double @test1(double %A) {
define double @test2(double %A, double %B) {
; CHECK-LABEL: test2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: paddd %xmm1, %xmm0
; CHECK-NEXT: retq
;
; CHECK-WIDE-LABEL: test2:
-; CHECK-WIDE: # BB#0:
+; CHECK-WIDE: # %bb.0:
; CHECK-WIDE-NEXT: paddd %xmm1, %xmm0
; CHECK-WIDE-NEXT: retq
%1 = bitcast double %A to <2 x i32>
@@ -43,14 +43,14 @@ define double @test2(double %A, double %B) {
define i64 @test3(i64 %A) {
; CHECK-LABEL: test3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movq %rdi, %xmm0
; CHECK-NEXT: addps {{.*}}(%rip), %xmm0
; CHECK-NEXT: movq %xmm0, %rax
; CHECK-NEXT: retq
;
; CHECK-WIDE-LABEL: test3:
-; CHECK-WIDE: # BB#0:
+; CHECK-WIDE: # %bb.0:
; CHECK-WIDE-NEXT: movq %rdi, %xmm0
; CHECK-WIDE-NEXT: addps {{.*}}(%rip), %xmm0
; CHECK-WIDE-NEXT: movq %xmm0, %rax
@@ -66,7 +66,7 @@ define i64 @test3(i64 %A) {
define i64 @test4(i64 %A) {
; CHECK-LABEL: test4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movq %rdi, %xmm0
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
; CHECK-NEXT: paddd {{.*}}(%rip), %xmm0
@@ -75,7 +75,7 @@ define i64 @test4(i64 %A) {
; CHECK-NEXT: retq
;
; CHECK-WIDE-LABEL: test4:
-; CHECK-WIDE: # BB#0:
+; CHECK-WIDE: # %bb.0:
; CHECK-WIDE-NEXT: movq %rdi, %xmm0
; CHECK-WIDE-NEXT: paddd {{.*}}(%rip), %xmm0
; CHECK-WIDE-NEXT: movq %xmm0, %rax
@@ -88,12 +88,12 @@ define i64 @test4(i64 %A) {
define double @test5(double %A) {
; CHECK-LABEL: test5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: addps {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
;
; CHECK-WIDE-LABEL: test5:
-; CHECK-WIDE: # BB#0:
+; CHECK-WIDE: # %bb.0:
; CHECK-WIDE-NEXT: addps {{.*}}(%rip), %xmm0
; CHECK-WIDE-NEXT: retq
%1 = bitcast double %A to <2 x float>
@@ -107,14 +107,14 @@ define double @test5(double %A) {
define double @test6(double %A) {
; CHECK-LABEL: test6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; CHECK-NEXT: paddw {{.*}}(%rip), %xmm0
; CHECK-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; CHECK-NEXT: retq
;
; CHECK-WIDE-LABEL: test6:
-; CHECK-WIDE: # BB#0:
+; CHECK-WIDE: # %bb.0:
; CHECK-WIDE-NEXT: paddw {{.*}}(%rip), %xmm0
; CHECK-WIDE-NEXT: retq
%1 = bitcast double %A to <4 x i16>
@@ -125,12 +125,12 @@ define double @test6(double %A) {
define double @test7(double %A, double %B) {
; CHECK-LABEL: test7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: paddw %xmm1, %xmm0
; CHECK-NEXT: retq
;
; CHECK-WIDE-LABEL: test7:
-; CHECK-WIDE: # BB#0:
+; CHECK-WIDE: # %bb.0:
; CHECK-WIDE-NEXT: paddw %xmm1, %xmm0
; CHECK-WIDE-NEXT: retq
%1 = bitcast double %A to <4 x i16>
@@ -146,14 +146,14 @@ define double @test7(double %A, double %B) {
define double @test8(double %A) {
; CHECK-LABEL: test8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; CHECK-NEXT: paddb {{.*}}(%rip), %xmm0
; CHECK-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; CHECK-NEXT: retq
;
; CHECK-WIDE-LABEL: test8:
-; CHECK-WIDE: # BB#0:
+; CHECK-WIDE: # %bb.0:
; CHECK-WIDE-NEXT: paddb {{.*}}(%rip), %xmm0
; CHECK-WIDE-NEXT: retq
%1 = bitcast double %A to <8 x i8>
@@ -164,12 +164,12 @@ define double @test8(double %A) {
define double @test9(double %A, double %B) {
; CHECK-LABEL: test9:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: paddb %xmm1, %xmm0
; CHECK-NEXT: retq
;
; CHECK-WIDE-LABEL: test9:
-; CHECK-WIDE: # BB#0:
+; CHECK-WIDE: # %bb.0:
; CHECK-WIDE-NEXT: paddb %xmm1, %xmm0
; CHECK-WIDE-NEXT: retq
%1 = bitcast double %A to <8 x i8>
diff --git a/test/CodeGen/X86/lower-vec-shift-2.ll b/test/CodeGen/X86/lower-vec-shift-2.ll
index a617f44d3f9..aeaac0e0e9d 100644
--- a/test/CodeGen/X86/lower-vec-shift-2.ll
+++ b/test/CodeGen/X86/lower-vec-shift-2.ll
@@ -4,14 +4,14 @@
define <8 x i16> @test1(<8 x i16> %A, <8 x i16> %B) {
; SSE2-LABEL: test1:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pextrw $0, %xmm1, %eax
; SSE2-NEXT: movd %eax, %xmm1
; SSE2-NEXT: psllw %xmm1, %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: test1:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX-NEXT: vpsllw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -23,14 +23,14 @@ entry:
define <4 x i32> @test2(<4 x i32> %A, <4 x i32> %B) {
; SSE2-LABEL: test2:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: xorps %xmm2, %xmm2
; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
; SSE2-NEXT: pslld %xmm2, %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: test2:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; AVX-NEXT: vpslld %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -42,12 +42,12 @@ entry:
define <2 x i64> @test3(<2 x i64> %A, <2 x i64> %B) {
; SSE2-LABEL: test3:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: psllq %xmm1, %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: test3:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpsllq %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -58,14 +58,14 @@ entry:
define <8 x i16> @test4(<8 x i16> %A, <8 x i16> %B) {
; SSE2-LABEL: test4:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pextrw $0, %xmm1, %eax
; SSE2-NEXT: movd %eax, %xmm1
; SSE2-NEXT: psrlw %xmm1, %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: test4:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -77,14 +77,14 @@ entry:
define <4 x i32> @test5(<4 x i32> %A, <4 x i32> %B) {
; SSE2-LABEL: test5:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: xorps %xmm2, %xmm2
; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
; SSE2-NEXT: psrld %xmm2, %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: test5:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; AVX-NEXT: vpsrld %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -96,12 +96,12 @@ entry:
define <2 x i64> @test6(<2 x i64> %A, <2 x i64> %B) {
; SSE2-LABEL: test6:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: psrlq %xmm1, %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: test6:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -112,14 +112,14 @@ entry:
define <8 x i16> @test7(<8 x i16> %A, <8 x i16> %B) {
; SSE2-LABEL: test7:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pextrw $0, %xmm1, %eax
; SSE2-NEXT: movd %eax, %xmm1
; SSE2-NEXT: psraw %xmm1, %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: test7:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX-NEXT: vpsraw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -131,14 +131,14 @@ entry:
define <4 x i32> @test8(<4 x i32> %A, <4 x i32> %B) {
; SSE2-LABEL: test8:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: xorps %xmm2, %xmm2
; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
; SSE2-NEXT: psrad %xmm2, %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: test8:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; AVX-NEXT: vpsrad %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
diff --git a/test/CodeGen/X86/lower-vec-shift.ll b/test/CodeGen/X86/lower-vec-shift.ll
index 936de7c761a..8474f7e7530 100644
--- a/test/CodeGen/X86/lower-vec-shift.ll
+++ b/test/CodeGen/X86/lower-vec-shift.ll
@@ -10,7 +10,7 @@
define <8 x i16> @test1(<8 x i16> %a) {
; SSE-LABEL: test1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psrlw $3, %xmm1
; SSE-NEXT: psrlw $2, %xmm0
@@ -18,14 +18,14 @@ define <8 x i16> @test1(<8 x i16> %a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test1:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsrlw $3, %xmm0, %xmm1
; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm0
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: test1:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsrlw $3, %xmm0, %xmm1
; AVX2-NEXT: vpsrlw $2, %xmm0, %xmm0
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
@@ -36,7 +36,7 @@ define <8 x i16> @test1(<8 x i16> %a) {
define <8 x i16> @test2(<8 x i16> %a) {
; SSE-LABEL: test2:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psrlw $3, %xmm1
; SSE-NEXT: psrlw $2, %xmm0
@@ -44,14 +44,14 @@ define <8 x i16> @test2(<8 x i16> %a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test2:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm1
; AVX1-NEXT: vpsrlw $3, %xmm0, %xmm0
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: test2:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsrlw $2, %xmm0, %xmm1
; AVX2-NEXT: vpsrlw $3, %xmm0, %xmm0
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
@@ -62,7 +62,7 @@ define <8 x i16> @test2(<8 x i16> %a) {
define <4 x i32> @test3(<4 x i32> %a) {
; SSE-LABEL: test3:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psrld $3, %xmm1
; SSE-NEXT: psrld $2, %xmm0
@@ -70,14 +70,14 @@ define <4 x i32> @test3(<4 x i32> %a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test3:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsrld $3, %xmm0, %xmm1
; AVX1-NEXT: vpsrld $2, %xmm0, %xmm0
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: test3:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: retq
%lshr = lshr <4 x i32> %a, <i32 3, i32 2, i32 2, i32 2>
@@ -86,7 +86,7 @@ define <4 x i32> @test3(<4 x i32> %a) {
define <4 x i32> @test4(<4 x i32> %a) {
; SSE-LABEL: test4:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psrld $3, %xmm1
; SSE-NEXT: psrld $2, %xmm0
@@ -94,14 +94,14 @@ define <4 x i32> @test4(<4 x i32> %a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test4:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsrld $2, %xmm0, %xmm1
; AVX1-NEXT: vpsrld $3, %xmm0, %xmm0
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: test4:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: retq
%lshr = lshr <4 x i32> %a, <i32 3, i32 3, i32 2, i32 2>
@@ -110,7 +110,7 @@ define <4 x i32> @test4(<4 x i32> %a) {
define <8 x i16> @test5(<8 x i16> %a) {
; SSE-LABEL: test5:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psraw $3, %xmm1
; SSE-NEXT: psraw $2, %xmm0
@@ -118,14 +118,14 @@ define <8 x i16> @test5(<8 x i16> %a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test5:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsraw $3, %xmm0, %xmm1
; AVX1-NEXT: vpsraw $2, %xmm0, %xmm0
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: test5:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsraw $3, %xmm0, %xmm1
; AVX2-NEXT: vpsraw $2, %xmm0, %xmm0
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
@@ -136,7 +136,7 @@ define <8 x i16> @test5(<8 x i16> %a) {
define <8 x i16> @test6(<8 x i16> %a) {
; SSE-LABEL: test6:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psraw $3, %xmm1
; SSE-NEXT: psraw $2, %xmm0
@@ -144,14 +144,14 @@ define <8 x i16> @test6(<8 x i16> %a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test6:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsraw $2, %xmm0, %xmm1
; AVX1-NEXT: vpsraw $3, %xmm0, %xmm0
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: test6:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsraw $2, %xmm0, %xmm1
; AVX2-NEXT: vpsraw $3, %xmm0, %xmm0
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
@@ -162,7 +162,7 @@ define <8 x i16> @test6(<8 x i16> %a) {
define <4 x i32> @test7(<4 x i32> %a) {
; SSE-LABEL: test7:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psrad $3, %xmm1
; SSE-NEXT: psrad $2, %xmm0
@@ -170,14 +170,14 @@ define <4 x i32> @test7(<4 x i32> %a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test7:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsrad $3, %xmm0, %xmm1
; AVX1-NEXT: vpsrad $2, %xmm0, %xmm0
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: test7:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: retq
%lshr = ashr <4 x i32> %a, <i32 3, i32 2, i32 2, i32 2>
@@ -186,7 +186,7 @@ define <4 x i32> @test7(<4 x i32> %a) {
define <4 x i32> @test8(<4 x i32> %a) {
; SSE-LABEL: test8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psrad $3, %xmm1
; SSE-NEXT: psrad $2, %xmm0
@@ -194,14 +194,14 @@ define <4 x i32> @test8(<4 x i32> %a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsrad $2, %xmm0, %xmm1
; AVX1-NEXT: vpsrad $3, %xmm0, %xmm0
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: test8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: retq
%lshr = ashr <4 x i32> %a, <i32 3, i32 3, i32 2, i32 2>
diff --git a/test/CodeGen/X86/lower-vec-shuffle-bug.ll b/test/CodeGen/X86/lower-vec-shuffle-bug.ll
index 7a081b55686..0ae2fc1faba 100644
--- a/test/CodeGen/X86/lower-vec-shuffle-bug.ll
+++ b/test/CodeGen/X86/lower-vec-shuffle-bug.ll
@@ -3,7 +3,7 @@
define <4 x double> @test1(<4 x double> %A, <4 x double> %B) {
; CHECK-LABEL: test1:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; CHECK-NEXT: retq
entry:
@@ -13,7 +13,7 @@ entry:
define <4 x double> @test2(<4 x double> %A, <4 x double> %B) {
; CHECK-LABEL: test2:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: retq
entry:
@@ -23,7 +23,7 @@ entry:
define <4 x double> @test3(<4 x double> %A, <4 x double> %B) {
; CHECK-LABEL: test3:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; CHECK-NEXT: retq
entry:
@@ -33,7 +33,7 @@ entry:
define <4 x double> @test4(<4 x double> %A, <4 x double> %B) {
; CHECK-LABEL: test4:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/lwp-intrinsics-x86_64.ll b/test/CodeGen/X86/lwp-intrinsics-x86_64.ll
index 9ee95267fc3..32206989d71 100644
--- a/test/CodeGen/X86/lwp-intrinsics-x86_64.ll
+++ b/test/CodeGen/X86/lwp-intrinsics-x86_64.ll
@@ -7,7 +7,7 @@
define i8 @test_lwpins64_rri(i64 %a0, i32 %a1) nounwind {
; X64-LABEL: test_lwpins64_rri:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: lwpins $-1985229329, %esi, %rdi # imm = 0x89ABCDEF
; X64-NEXT: setb %al
; X64-NEXT: retq
@@ -17,7 +17,7 @@ define i8 @test_lwpins64_rri(i64 %a0, i32 %a1) nounwind {
define i8 @test_lwpins64_rmi(i64 %a0, i32 *%p1) nounwind {
; X64-LABEL: test_lwpins64_rmi:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: lwpins $1985229328, (%rsi), %rdi # imm = 0x76543210
; X64-NEXT: setb %al
; X64-NEXT: retq
@@ -28,7 +28,7 @@ define i8 @test_lwpins64_rmi(i64 %a0, i32 *%p1) nounwind {
define void @test_lwpval64_rri(i64 %a0, i32 %a1) nounwind {
; X64-LABEL: test_lwpval64_rri:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: lwpval $-19088744, %esi, %rdi # imm = 0xFEDCBA98
; X64-NEXT: retq
tail call void @llvm.x86.lwpval64(i64 %a0, i32 %a1, i32 4275878552)
@@ -37,7 +37,7 @@ define void @test_lwpval64_rri(i64 %a0, i32 %a1) nounwind {
define void @test_lwpval64_rmi(i64 %a0, i32 *%p1) nounwind {
; X64-LABEL: test_lwpval64_rmi:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: lwpval $305419896, (%rsi), %rdi # imm = 0x12345678
; X64-NEXT: retq
%a1 = load i32, i32 *%p1
diff --git a/test/CodeGen/X86/lwp-intrinsics.ll b/test/CodeGen/X86/lwp-intrinsics.ll
index c949bc80608..f693b610614 100644
--- a/test/CodeGen/X86/lwp-intrinsics.ll
+++ b/test/CodeGen/X86/lwp-intrinsics.ll
@@ -12,13 +12,13 @@
define void @test_llwpcb(i8 *%a0) nounwind {
; X86-LABEL: test_llwpcb:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: llwpcb %eax
; X86-NEXT: retl
;
; X64-LABEL: test_llwpcb:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: llwpcb %rdi
; X64-NEXT: retq
tail call void @llvm.x86.llwpcb(i8 *%a0)
@@ -27,12 +27,12 @@ define void @test_llwpcb(i8 *%a0) nounwind {
define i8* @test_slwpcb(i8 *%a0) nounwind {
; X86-LABEL: test_slwpcb:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: slwpcb %eax
; X86-NEXT: retl
;
; X64-LABEL: test_slwpcb:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: slwpcb %rax
; X64-NEXT: retq
%1 = tail call i8* @llvm.x86.slwpcb()
@@ -41,7 +41,7 @@ define i8* @test_slwpcb(i8 *%a0) nounwind {
define i8 @test_lwpins32_rri(i32 %a0, i32 %a1) nounwind {
; X86-LABEL: test_lwpins32_rri:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: addl %ecx, %ecx
@@ -50,7 +50,7 @@ define i8 @test_lwpins32_rri(i32 %a0, i32 %a1) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: test_lwpins32_rri:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: addl %esi, %esi
; X64-NEXT: lwpins $-1985229329, %esi, %edi # imm = 0x89ABCDEF
; X64-NEXT: setb %al
@@ -62,7 +62,7 @@ define i8 @test_lwpins32_rri(i32 %a0, i32 %a1) nounwind {
define i8 @test_lwpins32_rmi(i32 %a0, i32 *%p1) nounwind {
; X86-LABEL: test_lwpins32_rmi:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: lwpins $1985229328, (%eax), %ecx # imm = 0x76543210
@@ -70,7 +70,7 @@ define i8 @test_lwpins32_rmi(i32 %a0, i32 *%p1) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: test_lwpins32_rmi:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: lwpins $1985229328, (%rsi), %edi # imm = 0x76543210
; X64-NEXT: setb %al
; X64-NEXT: retq
@@ -81,7 +81,7 @@ define i8 @test_lwpins32_rmi(i32 %a0, i32 *%p1) nounwind {
define void @test_lwpval32_rri(i32 %a0, i32 %a1) nounwind {
; X86-LABEL: test_lwpval32_rri:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: addl %ecx, %ecx
@@ -89,7 +89,7 @@ define void @test_lwpval32_rri(i32 %a0, i32 %a1) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: test_lwpval32_rri:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: addl %esi, %esi
; X64-NEXT: lwpval $-19088744, %esi, %edi # imm = 0xFEDCBA98
; X64-NEXT: retq
@@ -100,14 +100,14 @@ define void @test_lwpval32_rri(i32 %a0, i32 %a1) nounwind {
define void @test_lwpval32_rmi(i32 %a0, i32 *%p1) nounwind {
; X86-LABEL: test_lwpval32_rmi:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: lwpval $305419896, (%eax), %ecx # imm = 0x12345678
; X86-NEXT: retl
;
; X64-LABEL: test_lwpval32_rmi:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: lwpval $305419896, (%rsi), %edi # imm = 0x12345678
; X64-NEXT: retq
%a1 = load i32, i32 *%p1
diff --git a/test/CodeGen/X86/lzcnt-schedule.ll b/test/CodeGen/X86/lzcnt-schedule.ll
index 10f1935f88f..64874bdee81 100644
--- a/test/CodeGen/X86/lzcnt-schedule.ll
+++ b/test/CodeGen/X86/lzcnt-schedule.ll
@@ -9,7 +9,7 @@
define i16 @test_ctlz_i16(i16 zeroext %a0, i16 *%a1) {
; GENERIC-LABEL: test_ctlz_i16:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: lzcntw (%rsi), %cx
; GENERIC-NEXT: lzcntw %di, %ax
; GENERIC-NEXT: orl %ecx, %eax # sched: [1:0.33]
@@ -17,7 +17,7 @@ define i16 @test_ctlz_i16(i16 zeroext %a0, i16 *%a1) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_ctlz_i16:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: lzcntw (%rsi), %cx # sched: [3:1.00]
; HASWELL-NEXT: lzcntw %di, %ax # sched: [3:1.00]
; HASWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
@@ -25,7 +25,7 @@ define i16 @test_ctlz_i16(i16 zeroext %a0, i16 *%a1) {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_ctlz_i16:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: lzcntw (%rsi), %cx # sched: [8:1.00]
; BROADWELL-NEXT: lzcntw %di, %ax # sched: [3:1.00]
; BROADWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
@@ -33,7 +33,7 @@ define i16 @test_ctlz_i16(i16 zeroext %a0, i16 *%a1) {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_ctlz_i16:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: lzcntw (%rsi), %cx # sched: [8:1.00]
; SKYLAKE-NEXT: lzcntw %di, %ax # sched: [3:1.00]
; SKYLAKE-NEXT: orl %ecx, %eax # sched: [1:0.25]
@@ -41,7 +41,7 @@ define i16 @test_ctlz_i16(i16 zeroext %a0, i16 *%a1) {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_ctlz_i16:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: lzcntw (%rsi), %cx
; BTVER2-NEXT: lzcntw %di, %ax
; BTVER2-NEXT: orl %ecx, %eax # sched: [1:0.50]
@@ -49,7 +49,7 @@ define i16 @test_ctlz_i16(i16 zeroext %a0, i16 *%a1) {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_ctlz_i16:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: lzcntw (%rsi), %cx # sched: [6:0.50]
; ZNVER1-NEXT: lzcntw %di, %ax # sched: [2:0.25]
; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25]
@@ -65,42 +65,42 @@ declare i16 @llvm.ctlz.i16(i16, i1)
define i32 @test_ctlz_i32(i32 %a0, i32 *%a1) {
; GENERIC-LABEL: test_ctlz_i32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: lzcntl (%rsi), %ecx
; GENERIC-NEXT: lzcntl %edi, %eax
; GENERIC-NEXT: orl %ecx, %eax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_ctlz_i32:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: lzcntl (%rsi), %ecx # sched: [3:1.00]
; HASWELL-NEXT: lzcntl %edi, %eax # sched: [3:1.00]
; HASWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_ctlz_i32:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: lzcntl (%rsi), %ecx # sched: [8:1.00]
; BROADWELL-NEXT: lzcntl %edi, %eax # sched: [3:1.00]
; BROADWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_ctlz_i32:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: lzcntl (%rsi), %ecx # sched: [8:1.00]
; SKYLAKE-NEXT: lzcntl %edi, %eax # sched: [3:1.00]
; SKYLAKE-NEXT: orl %ecx, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_ctlz_i32:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: lzcntl (%rsi), %ecx
; BTVER2-NEXT: lzcntl %edi, %eax
; BTVER2-NEXT: orl %ecx, %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_ctlz_i32:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: lzcntl (%rsi), %ecx # sched: [6:0.50]
; ZNVER1-NEXT: lzcntl %edi, %eax # sched: [2:0.25]
; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25]
@@ -115,42 +115,42 @@ declare i32 @llvm.ctlz.i32(i32, i1)
define i64 @test_ctlz_i64(i64 %a0, i64 *%a1) {
; GENERIC-LABEL: test_ctlz_i64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: lzcntq (%rsi), %rcx
; GENERIC-NEXT: lzcntq %rdi, %rax
; GENERIC-NEXT: orq %rcx, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_ctlz_i64:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: lzcntq (%rsi), %rcx # sched: [3:1.00]
; HASWELL-NEXT: lzcntq %rdi, %rax # sched: [3:1.00]
; HASWELL-NEXT: orq %rcx, %rax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_ctlz_i64:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: lzcntq (%rsi), %rcx # sched: [8:1.00]
; BROADWELL-NEXT: lzcntq %rdi, %rax # sched: [3:1.00]
; BROADWELL-NEXT: orq %rcx, %rax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_ctlz_i64:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: lzcntq (%rsi), %rcx # sched: [8:1.00]
; SKYLAKE-NEXT: lzcntq %rdi, %rax # sched: [3:1.00]
; SKYLAKE-NEXT: orq %rcx, %rax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_ctlz_i64:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: lzcntq (%rsi), %rcx
; BTVER2-NEXT: lzcntq %rdi, %rax
; BTVER2-NEXT: orq %rcx, %rax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_ctlz_i64:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: lzcntq (%rsi), %rcx # sched: [6:0.50]
; ZNVER1-NEXT: lzcntq %rdi, %rax # sched: [2:0.25]
; ZNVER1-NEXT: orq %rcx, %rax # sched: [1:0.25]
diff --git a/test/CodeGen/X86/lzcnt-zext-cmp.ll b/test/CodeGen/X86/lzcnt-zext-cmp.ll
index 5064579b511..6123bdfc0db 100644
--- a/test/CodeGen/X86/lzcnt-zext-cmp.ll
+++ b/test/CodeGen/X86/lzcnt-zext-cmp.ll
@@ -9,7 +9,7 @@
; Test one 32-bit input, output is 32-bit, no transformations expected.
define i32 @test_zext_cmp0(i32 %a) {
; ALL-LABEL: test_zext_cmp0:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: xorl %eax, %eax
; ALL-NEXT: testl %edi, %edi
; ALL-NEXT: sete %al
@@ -23,7 +23,7 @@ entry:
; Test two 32-bit inputs, output is 32-bit.
define i32 @test_zext_cmp1(i32 %a, i32 %b) {
; FASTLZCNT-LABEL: test_zext_cmp1:
-; FASTLZCNT: # BB#0:
+; FASTLZCNT: # %bb.0:
; FASTLZCNT-NEXT: lzcntl %edi, %ecx
; FASTLZCNT-NEXT: lzcntl %esi, %eax
; FASTLZCNT-NEXT: orl %ecx, %eax
@@ -31,7 +31,7 @@ define i32 @test_zext_cmp1(i32 %a, i32 %b) {
; FASTLZCNT-NEXT: retq
;
; NOFASTLZCNT-LABEL: test_zext_cmp1:
-; NOFASTLZCNT: # BB#0:
+; NOFASTLZCNT: # %bb.0:
; NOFASTLZCNT-NEXT: testl %edi, %edi
; NOFASTLZCNT-NEXT: sete %al
; NOFASTLZCNT-NEXT: testl %esi, %esi
@@ -49,7 +49,7 @@ define i32 @test_zext_cmp1(i32 %a, i32 %b) {
; Test two 64-bit inputs, output is 64-bit.
define i64 @test_zext_cmp2(i64 %a, i64 %b) {
; FASTLZCNT-LABEL: test_zext_cmp2:
-; FASTLZCNT: # BB#0:
+; FASTLZCNT: # %bb.0:
; FASTLZCNT-NEXT: lzcntq %rdi, %rcx
; FASTLZCNT-NEXT: lzcntq %rsi, %rax
; FASTLZCNT-NEXT: orl %ecx, %eax
@@ -57,7 +57,7 @@ define i64 @test_zext_cmp2(i64 %a, i64 %b) {
; FASTLZCNT-NEXT: retq
;
; NOFASTLZCNT-LABEL: test_zext_cmp2:
-; NOFASTLZCNT: # BB#0:
+; NOFASTLZCNT: # %bb.0:
; NOFASTLZCNT-NEXT: testq %rdi, %rdi
; NOFASTLZCNT-NEXT: sete %al
; NOFASTLZCNT-NEXT: testq %rsi, %rsi
@@ -77,7 +77,7 @@ define i64 @test_zext_cmp2(i64 %a, i64 %b) {
; upper 16-bits, adding one more instruction.
define i16 @test_zext_cmp3(i16 %a, i16 %b) {
; ALL-LABEL: test_zext_cmp3:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: testw %di, %di
; ALL-NEXT: sete %al
; ALL-NEXT: testw %si, %si
@@ -96,7 +96,7 @@ define i16 @test_zext_cmp3(i16 %a, i16 %b) {
; Test two 32-bit inputs, output is 64-bit.
define i64 @test_zext_cmp4(i32 %a, i32 %b) {
; FASTLZCNT-LABEL: test_zext_cmp4:
-; FASTLZCNT: # BB#0: # %entry
+; FASTLZCNT: # %bb.0: # %entry
; FASTLZCNT-NEXT: lzcntl %edi, %ecx
; FASTLZCNT-NEXT: lzcntl %esi, %eax
; FASTLZCNT-NEXT: orl %ecx, %eax
@@ -104,7 +104,7 @@ define i64 @test_zext_cmp4(i32 %a, i32 %b) {
; FASTLZCNT-NEXT: retq
;
; NOFASTLZCNT-LABEL: test_zext_cmp4:
-; NOFASTLZCNT: # BB#0: # %entry
+; NOFASTLZCNT: # %bb.0: # %entry
; NOFASTLZCNT-NEXT: testl %edi, %edi
; NOFASTLZCNT-NEXT: sete %al
; NOFASTLZCNT-NEXT: testl %esi, %esi
@@ -123,7 +123,7 @@ entry:
; Test two 64-bit inputs, output is 32-bit.
define i32 @test_zext_cmp5(i64 %a, i64 %b) {
; FASTLZCNT-LABEL: test_zext_cmp5:
-; FASTLZCNT: # BB#0: # %entry
+; FASTLZCNT: # %bb.0: # %entry
; FASTLZCNT-NEXT: lzcntq %rdi, %rcx
; FASTLZCNT-NEXT: lzcntq %rsi, %rax
; FASTLZCNT-NEXT: orl %ecx, %eax
@@ -132,7 +132,7 @@ define i32 @test_zext_cmp5(i64 %a, i64 %b) {
; FASTLZCNT-NEXT: retq
;
; NOFASTLZCNT-LABEL: test_zext_cmp5:
-; NOFASTLZCNT: # BB#0: # %entry
+; NOFASTLZCNT: # %bb.0: # %entry
; NOFASTLZCNT-NEXT: testq %rdi, %rdi
; NOFASTLZCNT-NEXT: sete %al
; NOFASTLZCNT-NEXT: testq %rsi, %rsi
@@ -151,7 +151,7 @@ entry:
; Test three 32-bit inputs, output is 32-bit.
define i32 @test_zext_cmp6(i32 %a, i32 %b, i32 %c) {
; FASTLZCNT-LABEL: test_zext_cmp6:
-; FASTLZCNT: # BB#0: # %entry
+; FASTLZCNT: # %bb.0: # %entry
; FASTLZCNT-NEXT: lzcntl %edi, %eax
; FASTLZCNT-NEXT: lzcntl %esi, %ecx
; FASTLZCNT-NEXT: orl %eax, %ecx
@@ -161,7 +161,7 @@ define i32 @test_zext_cmp6(i32 %a, i32 %b, i32 %c) {
; FASTLZCNT-NEXT: retq
;
; NOFASTLZCNT-LABEL: test_zext_cmp6:
-; NOFASTLZCNT: # BB#0: # %entry
+; NOFASTLZCNT: # %bb.0: # %entry
; NOFASTLZCNT-NEXT: testl %edi, %edi
; NOFASTLZCNT-NEXT: sete %al
; NOFASTLZCNT-NEXT: testl %esi, %esi
@@ -186,7 +186,7 @@ entry:
; %.cmp2 inputs' order is inverted.
define i32 @test_zext_cmp7(i32 %a, i32 %b, i32 %c) {
; FASTLZCNT-LABEL: test_zext_cmp7:
-; FASTLZCNT: # BB#0: # %entry
+; FASTLZCNT: # %bb.0: # %entry
; FASTLZCNT-NEXT: lzcntl %edi, %eax
; FASTLZCNT-NEXT: lzcntl %esi, %ecx
; FASTLZCNT-NEXT: orl %eax, %ecx
@@ -196,7 +196,7 @@ define i32 @test_zext_cmp7(i32 %a, i32 %b, i32 %c) {
; FASTLZCNT-NEXT: retq
;
; NOFASTLZCNT-LABEL: test_zext_cmp7:
-; NOFASTLZCNT: # BB#0: # %entry
+; NOFASTLZCNT: # %bb.0: # %entry
; NOFASTLZCNT-NEXT: testl %edi, %edi
; NOFASTLZCNT-NEXT: sete %al
; NOFASTLZCNT-NEXT: testl %esi, %esi
@@ -220,7 +220,7 @@ entry:
; Test four 32-bit inputs, output is 32-bit.
define i32 @test_zext_cmp8(i32 %a, i32 %b, i32 %c, i32 %d) {
; FASTLZCNT-LABEL: test_zext_cmp8:
-; FASTLZCNT: # BB#0: # %entry
+; FASTLZCNT: # %bb.0: # %entry
; FASTLZCNT-NEXT: lzcntl %edi, %eax
; FASTLZCNT-NEXT: lzcntl %esi, %esi
; FASTLZCNT-NEXT: lzcntl %edx, %edx
@@ -232,7 +232,7 @@ define i32 @test_zext_cmp8(i32 %a, i32 %b, i32 %c, i32 %d) {
; FASTLZCNT-NEXT: retq
;
; NOFASTLZCNT-LABEL: test_zext_cmp8:
-; NOFASTLZCNT: # BB#0: # %entry
+; NOFASTLZCNT: # %bb.0: # %entry
; NOFASTLZCNT-NEXT: testl %edi, %edi
; NOFASTLZCNT-NEXT: sete %dil
; NOFASTLZCNT-NEXT: testl %esi, %esi
@@ -261,7 +261,7 @@ entry:
; Test one 32-bit input, one 64-bit input, output is 32-bit.
define i32 @test_zext_cmp9(i32 %a, i64 %b) {
; FASTLZCNT-LABEL: test_zext_cmp9:
-; FASTLZCNT: # BB#0: # %entry
+; FASTLZCNT: # %bb.0: # %entry
; FASTLZCNT-NEXT: lzcntq %rsi, %rax
; FASTLZCNT-NEXT: lzcntl %edi, %ecx
; FASTLZCNT-NEXT: shrl $5, %ecx
@@ -271,7 +271,7 @@ define i32 @test_zext_cmp9(i32 %a, i64 %b) {
; FASTLZCNT-NEXT: retq
;
; NOFASTLZCNT-LABEL: test_zext_cmp9:
-; NOFASTLZCNT: # BB#0: # %entry
+; NOFASTLZCNT: # %bb.0: # %entry
; NOFASTLZCNT-NEXT: testl %edi, %edi
; NOFASTLZCNT-NEXT: sete %al
; NOFASTLZCNT-NEXT: testq %rsi, %rsi
@@ -290,7 +290,7 @@ entry:
; Test 2 128-bit inputs, output is 32-bit, no transformations expected.
define i32 @test_zext_cmp10(i64 %a.coerce0, i64 %a.coerce1, i64 %b.coerce0, i64 %b.coerce1) {
; ALL-LABEL: test_zext_cmp10:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: orq %rsi, %rdi
; ALL-NEXT: sete %al
; ALL-NEXT: orq %rcx, %rdx
@@ -318,7 +318,7 @@ entry:
define i32 @test_zext_cmp11(double %a, double %b) "no-nans-fp-math"="true" {
;
; ALL-LABEL: test_zext_cmp11:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: vxorps %xmm2, %xmm2, %xmm2
; ALL-NEXT: vucomisd %xmm2, %xmm0
; ALL-NEXT: sete %al
diff --git a/test/CodeGen/X86/machine-combiner-int-vec.ll b/test/CodeGen/X86/machine-combiner-int-vec.ll
index dc1ce77e13b..8aea7cd5f5e 100644
--- a/test/CodeGen/X86/machine-combiner-int-vec.ll
+++ b/test/CodeGen/X86/machine-combiner-int-vec.ll
@@ -5,14 +5,14 @@
define <4 x i32> @reassociate_and_v4i32(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, <4 x i32> %x3) {
; SSE-LABEL: reassociate_and_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: paddd %xmm1, %xmm0
; SSE-NEXT: pand %xmm3, %xmm2
; SSE-NEXT: pand %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: reassociate_and_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpand %xmm3, %xmm2, %xmm1
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -26,14 +26,14 @@ define <4 x i32> @reassociate_and_v4i32(<4 x i32> %x0, <4 x i32> %x1, <4 x i32>
define <4 x i32> @reassociate_or_v4i32(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, <4 x i32> %x3) {
; SSE-LABEL: reassociate_or_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: paddd %xmm1, %xmm0
; SSE-NEXT: por %xmm3, %xmm2
; SSE-NEXT: por %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: reassociate_or_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm1
; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
@@ -47,14 +47,14 @@ define <4 x i32> @reassociate_or_v4i32(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %
define <4 x i32> @reassociate_xor_v4i32(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, <4 x i32> %x3) {
; SSE-LABEL: reassociate_xor_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: paddd %xmm1, %xmm0
; SSE-NEXT: pxor %xmm3, %xmm2
; SSE-NEXT: pxor %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: reassociate_xor_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpxor %xmm3, %xmm2, %xmm1
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
@@ -70,7 +70,7 @@ define <4 x i32> @reassociate_xor_v4i32(<4 x i32> %x0, <4 x i32> %x1, <4 x i32>
define <8 x i32> @reassociate_and_v8i32(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, <8 x i32> %x3) {
; AVX-LABEL: reassociate_and_v8i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; AVX-NEXT: vpand %ymm3, %ymm2, %ymm1
; AVX-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -84,7 +84,7 @@ define <8 x i32> @reassociate_and_v8i32(<8 x i32> %x0, <8 x i32> %x1, <8 x i32>
define <8 x i32> @reassociate_or_v8i32(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, <8 x i32> %x3) {
; AVX-LABEL: reassociate_or_v8i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; AVX-NEXT: vpor %ymm3, %ymm2, %ymm1
; AVX-NEXT: vpor %ymm1, %ymm0, %ymm0
@@ -98,7 +98,7 @@ define <8 x i32> @reassociate_or_v8i32(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %
define <8 x i32> @reassociate_xor_v8i32(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, <8 x i32> %x3) {
; AVX-LABEL: reassociate_xor_v8i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; AVX-NEXT: vpxor %ymm3, %ymm2, %ymm1
; AVX-NEXT: vpxor %ymm1, %ymm0, %ymm0
diff --git a/test/CodeGen/X86/machine-combiner-int.ll b/test/CodeGen/X86/machine-combiner-int.ll
index df35abd9534..e26b7401941 100644
--- a/test/CodeGen/X86/machine-combiner-int.ll
+++ b/test/CodeGen/X86/machine-combiner-int.ll
@@ -9,7 +9,7 @@
define i16 @reassociate_muls_i16(i16 %x0, i16 %x1, i16 %x2, i16 %x3) {
; CHECK-LABEL: reassociate_muls_i16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: # kill
; CHECK-NEXT: # kill
; CHECK-NEXT: leal (%rdi,%rsi), %eax
@@ -25,7 +25,7 @@ define i16 @reassociate_muls_i16(i16 %x0, i16 %x1, i16 %x2, i16 %x3) {
define i32 @reassociate_muls_i32(i32 %x0, i32 %x1, i32 %x2, i32 %x3) {
; CHECK-LABEL: reassociate_muls_i32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: # kill
; CHECK-NEXT: # kill
; CHECK-NEXT: leal (%rdi,%rsi), %eax
@@ -45,7 +45,7 @@ define i32 @reassociate_muls_i32(i32 %x0, i32 %x1, i32 %x2, i32 %x3) {
define i64 @reassociate_muls_i64(i64 %x0, i64 %x1, i64 %x2, i64 %x3) {
; CHECK-LABEL: reassociate_muls_i64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: leaq (%rdi,%rsi), %rax
; CHECK-NEXT: imulq %rcx, %rdx
; CHECK-NEXT: imulq %rdx, %rax
@@ -61,7 +61,7 @@ define i64 @reassociate_muls_i64(i64 %x0, i64 %x1, i64 %x2, i64 %x3) {
define i8 @reassociate_ands_i8(i8 %x0, i8 %x1, i8 %x2, i8 %x3) {
; CHECK-LABEL: reassociate_ands_i8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: subb %sil, %dil
; CHECK-NEXT: andb %cl, %dl
; CHECK-NEXT: andb %dil, %dl
@@ -77,7 +77,7 @@ define i8 @reassociate_ands_i8(i8 %x0, i8 %x1, i8 %x2, i8 %x3) {
define i32 @reassociate_ands_i32(i32 %x0, i32 %x1, i32 %x2, i32 %x3) {
; CHECK-LABEL: reassociate_ands_i32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: subl %esi, %edi
; CHECK-NEXT: andl %ecx, %edx
; CHECK-NEXT: andl %edi, %edx
@@ -91,7 +91,7 @@ define i32 @reassociate_ands_i32(i32 %x0, i32 %x1, i32 %x2, i32 %x3) {
define i64 @reassociate_ands_i64(i64 %x0, i64 %x1, i64 %x2, i64 %x3) {
; CHECK-LABEL: reassociate_ands_i64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: subq %rsi, %rdi
; CHECK-NEXT: andq %rcx, %rdx
; CHECK-NEXT: andq %rdi, %rdx
@@ -108,7 +108,7 @@ define i64 @reassociate_ands_i64(i64 %x0, i64 %x1, i64 %x2, i64 %x3) {
define i8 @reassociate_ors_i8(i8 %x0, i8 %x1, i8 %x2, i8 %x3) {
; CHECK-LABEL: reassociate_ors_i8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: subb %sil, %dil
; CHECK-NEXT: orb %cl, %dl
; CHECK-NEXT: orb %dil, %dl
@@ -124,7 +124,7 @@ define i8 @reassociate_ors_i8(i8 %x0, i8 %x1, i8 %x2, i8 %x3) {
define i32 @reassociate_ors_i32(i32 %x0, i32 %x1, i32 %x2, i32 %x3) {
; CHECK-LABEL: reassociate_ors_i32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: subl %esi, %edi
; CHECK-NEXT: orl %ecx, %edx
; CHECK-NEXT: orl %edi, %edx
@@ -138,7 +138,7 @@ define i32 @reassociate_ors_i32(i32 %x0, i32 %x1, i32 %x2, i32 %x3) {
define i64 @reassociate_ors_i64(i64 %x0, i64 %x1, i64 %x2, i64 %x3) {
; CHECK-LABEL: reassociate_ors_i64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: subq %rsi, %rdi
; CHECK-NEXT: orq %rcx, %rdx
; CHECK-NEXT: orq %rdi, %rdx
@@ -155,7 +155,7 @@ define i64 @reassociate_ors_i64(i64 %x0, i64 %x1, i64 %x2, i64 %x3) {
define i8 @reassociate_xors_i8(i8 %x0, i8 %x1, i8 %x2, i8 %x3) {
; CHECK-LABEL: reassociate_xors_i8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: subb %sil, %dil
; CHECK-NEXT: xorb %cl, %dl
; CHECK-NEXT: xorb %dil, %dl
@@ -171,7 +171,7 @@ define i8 @reassociate_xors_i8(i8 %x0, i8 %x1, i8 %x2, i8 %x3) {
define i32 @reassociate_xors_i32(i32 %x0, i32 %x1, i32 %x2, i32 %x3) {
; CHECK-LABEL: reassociate_xors_i32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: subl %esi, %edi
; CHECK-NEXT: xorl %ecx, %edx
; CHECK-NEXT: xorl %edi, %edx
@@ -185,7 +185,7 @@ define i32 @reassociate_xors_i32(i32 %x0, i32 %x1, i32 %x2, i32 %x3) {
define i64 @reassociate_xors_i64(i64 %x0, i64 %x1, i64 %x2, i64 %x3) {
; CHECK-LABEL: reassociate_xors_i64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: subq %rsi, %rdi
; CHECK-NEXT: xorq %rcx, %rdx
; CHECK-NEXT: xorq %rdi, %rdx
diff --git a/test/CodeGen/X86/machine-combiner.ll b/test/CodeGen/X86/machine-combiner.ll
index 048d30b6b24..d634dbb6569 100644
--- a/test/CodeGen/X86/machine-combiner.ll
+++ b/test/CodeGen/X86/machine-combiner.ll
@@ -11,14 +11,14 @@
define float @reassociate_adds1(float %x0, float %x1, float %x2, float %x3) {
; SSE-LABEL: reassociate_adds1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addss %xmm1, %xmm0
; SSE-NEXT: addss %xmm3, %xmm2
; SSE-NEXT: addss %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: reassociate_adds1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm1
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
@@ -31,14 +31,14 @@ define float @reassociate_adds1(float %x0, float %x1, float %x2, float %x3) {
define float @reassociate_adds2(float %x0, float %x1, float %x2, float %x3) {
; SSE-LABEL: reassociate_adds2:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addss %xmm1, %xmm0
; SSE-NEXT: addss %xmm3, %xmm2
; SSE-NEXT: addss %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: reassociate_adds2:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm1
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
@@ -51,14 +51,14 @@ define float @reassociate_adds2(float %x0, float %x1, float %x2, float %x3) {
define float @reassociate_adds3(float %x0, float %x1, float %x2, float %x3) {
; SSE-LABEL: reassociate_adds3:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addss %xmm1, %xmm0
; SSE-NEXT: addss %xmm3, %xmm2
; SSE-NEXT: addss %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: reassociate_adds3:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm1
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
@@ -71,14 +71,14 @@ define float @reassociate_adds3(float %x0, float %x1, float %x2, float %x3) {
define float @reassociate_adds4(float %x0, float %x1, float %x2, float %x3) {
; SSE-LABEL: reassociate_adds4:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addss %xmm1, %xmm0
; SSE-NEXT: addss %xmm3, %xmm2
; SSE-NEXT: addss %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: reassociate_adds4:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm1
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
@@ -94,7 +94,7 @@ define float @reassociate_adds4(float %x0, float %x1, float %x2, float %x3) {
define float @reassociate_adds5(float %x0, float %x1, float %x2, float %x3, float %x4, float %x5, float %x6, float %x7) {
; SSE-LABEL: reassociate_adds5:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addss %xmm1, %xmm0
; SSE-NEXT: addss %xmm3, %xmm2
; SSE-NEXT: addss %xmm2, %xmm0
@@ -105,7 +105,7 @@ define float @reassociate_adds5(float %x0, float %x1, float %x2, float %x3, floa
; SSE-NEXT: retq
;
; AVX-LABEL: reassociate_adds5:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm1
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
@@ -131,14 +131,14 @@ define float @reassociate_adds5(float %x0, float %x1, float %x2, float %x3, floa
define float @reassociate_adds6(float %x0, float %x1, float %x2, float %x3) {
; SSE-LABEL: reassociate_adds6:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: divss %xmm1, %xmm0
; SSE-NEXT: addss %xmm3, %xmm2
; SSE-NEXT: addss %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: reassociate_adds6:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vdivss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm1
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
@@ -153,14 +153,14 @@ define float @reassociate_adds6(float %x0, float %x1, float %x2, float %x3) {
define float @reassociate_muls1(float %x0, float %x1, float %x2, float %x3) {
; SSE-LABEL: reassociate_muls1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: divss %xmm1, %xmm0
; SSE-NEXT: mulss %xmm3, %xmm2
; SSE-NEXT: mulss %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: reassociate_muls1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vdivss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmulss %xmm3, %xmm2, %xmm1
; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0
@@ -175,14 +175,14 @@ define float @reassociate_muls1(float %x0, float %x1, float %x2, float %x3) {
define double @reassociate_adds_double(double %x0, double %x1, double %x2, double %x3) {
; SSE-LABEL: reassociate_adds_double:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: divsd %xmm1, %xmm0
; SSE-NEXT: addsd %xmm3, %xmm2
; SSE-NEXT: addsd %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: reassociate_adds_double:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vdivsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vaddsd %xmm3, %xmm2, %xmm1
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
@@ -197,14 +197,14 @@ define double @reassociate_adds_double(double %x0, double %x1, double %x2, doubl
define double @reassociate_muls_double(double %x0, double %x1, double %x2, double %x3) {
; SSE-LABEL: reassociate_muls_double:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: divsd %xmm1, %xmm0
; SSE-NEXT: mulsd %xmm3, %xmm2
; SSE-NEXT: mulsd %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: reassociate_muls_double:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vdivsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmulsd %xmm3, %xmm2, %xmm1
; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0
@@ -219,14 +219,14 @@ define double @reassociate_muls_double(double %x0, double %x1, double %x2, doubl
define <4 x float> @reassociate_adds_v4f32(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) {
; SSE-LABEL: reassociate_adds_v4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: mulps %xmm1, %xmm0
; SSE-NEXT: addps %xmm3, %xmm2
; SSE-NEXT: addps %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: reassociate_adds_v4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmulps %xmm1, %xmm0, %xmm0
; AVX-NEXT: vaddps %xmm3, %xmm2, %xmm1
; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0
@@ -241,14 +241,14 @@ define <4 x float> @reassociate_adds_v4f32(<4 x float> %x0, <4 x float> %x1, <4
define <2 x double> @reassociate_adds_v2f64(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, <2 x double> %x3) {
; SSE-LABEL: reassociate_adds_v2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: mulpd %xmm1, %xmm0
; SSE-NEXT: addpd %xmm3, %xmm2
; SSE-NEXT: addpd %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: reassociate_adds_v2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmulpd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vaddpd %xmm3, %xmm2, %xmm1
; AVX-NEXT: vaddpd %xmm1, %xmm0, %xmm0
@@ -263,14 +263,14 @@ define <2 x double> @reassociate_adds_v2f64(<2 x double> %x0, <2 x double> %x1,
define <4 x float> @reassociate_muls_v4f32(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) {
; SSE-LABEL: reassociate_muls_v4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addps %xmm1, %xmm0
; SSE-NEXT: mulps %xmm3, %xmm2
; SSE-NEXT: mulps %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: reassociate_muls_v4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmulps %xmm3, %xmm2, %xmm1
; AVX-NEXT: vmulps %xmm1, %xmm0, %xmm0
@@ -285,14 +285,14 @@ define <4 x float> @reassociate_muls_v4f32(<4 x float> %x0, <4 x float> %x1, <4
define <2 x double> @reassociate_muls_v2f64(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, <2 x double> %x3) {
; SSE-LABEL: reassociate_muls_v2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addpd %xmm1, %xmm0
; SSE-NEXT: mulpd %xmm3, %xmm2
; SSE-NEXT: mulpd %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: reassociate_muls_v2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmulpd %xmm3, %xmm2, %xmm1
; AVX-NEXT: vmulpd %xmm1, %xmm0, %xmm0
@@ -307,7 +307,7 @@ define <2 x double> @reassociate_muls_v2f64(<2 x double> %x0, <2 x double> %x1,
define <8 x float> @reassociate_adds_v8f32(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, <8 x float> %x3) {
; AVX-LABEL: reassociate_adds_v8f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmulps %ymm1, %ymm0, %ymm0
; AVX-NEXT: vaddps %ymm3, %ymm2, %ymm1
; AVX-NEXT: vaddps %ymm1, %ymm0, %ymm0
@@ -322,7 +322,7 @@ define <8 x float> @reassociate_adds_v8f32(<8 x float> %x0, <8 x float> %x1, <8
define <4 x double> @reassociate_adds_v4f64(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, <4 x double> %x3) {
; AVX-LABEL: reassociate_adds_v4f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmulpd %ymm1, %ymm0, %ymm0
; AVX-NEXT: vaddpd %ymm3, %ymm2, %ymm1
; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0
@@ -337,7 +337,7 @@ define <4 x double> @reassociate_adds_v4f64(<4 x double> %x0, <4 x double> %x1,
define <8 x float> @reassociate_muls_v8f32(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, <8 x float> %x3) {
; AVX-LABEL: reassociate_muls_v8f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddps %ymm1, %ymm0, %ymm0
; AVX-NEXT: vmulps %ymm3, %ymm2, %ymm1
; AVX-NEXT: vmulps %ymm1, %ymm0, %ymm0
@@ -352,7 +352,7 @@ define <8 x float> @reassociate_muls_v8f32(<8 x float> %x0, <8 x float> %x1, <8
define <4 x double> @reassociate_muls_v4f64(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, <4 x double> %x3) {
; AVX-LABEL: reassociate_muls_v4f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; AVX-NEXT: vmulpd %ymm3, %ymm2, %ymm1
; AVX-NEXT: vmulpd %ymm1, %ymm0, %ymm0
@@ -367,14 +367,14 @@ define <4 x double> @reassociate_muls_v4f64(<4 x double> %x0, <4 x double> %x1,
define float @reassociate_mins_single(float %x0, float %x1, float %x2, float %x3) {
; SSE-LABEL: reassociate_mins_single:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: divss %xmm1, %xmm0
; SSE-NEXT: minss %xmm3, %xmm2
; SSE-NEXT: minss %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: reassociate_mins_single:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vdivss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vminss %xmm3, %xmm2, %xmm1
; AVX-NEXT: vminss %xmm1, %xmm0, %xmm0
@@ -391,14 +391,14 @@ define float @reassociate_mins_single(float %x0, float %x1, float %x2, float %x3
define float @reassociate_maxs_single(float %x0, float %x1, float %x2, float %x3) {
; SSE-LABEL: reassociate_maxs_single:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: divss %xmm1, %xmm0
; SSE-NEXT: maxss %xmm3, %xmm2
; SSE-NEXT: maxss %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: reassociate_maxs_single:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vdivss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmaxss %xmm3, %xmm2, %xmm1
; AVX-NEXT: vmaxss %xmm1, %xmm0, %xmm0
@@ -415,14 +415,14 @@ define float @reassociate_maxs_single(float %x0, float %x1, float %x2, float %x3
define double @reassociate_mins_double(double %x0, double %x1, double %x2, double %x3) {
; SSE-LABEL: reassociate_mins_double:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: divsd %xmm1, %xmm0
; SSE-NEXT: minsd %xmm3, %xmm2
; SSE-NEXT: minsd %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: reassociate_mins_double:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vdivsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vminsd %xmm3, %xmm2, %xmm1
; AVX-NEXT: vminsd %xmm1, %xmm0, %xmm0
@@ -439,14 +439,14 @@ define double @reassociate_mins_double(double %x0, double %x1, double %x2, doubl
define double @reassociate_maxs_double(double %x0, double %x1, double %x2, double %x3) {
; SSE-LABEL: reassociate_maxs_double:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: divsd %xmm1, %xmm0
; SSE-NEXT: maxsd %xmm3, %xmm2
; SSE-NEXT: maxsd %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: reassociate_maxs_double:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vdivsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmaxsd %xmm3, %xmm2, %xmm1
; AVX-NEXT: vmaxsd %xmm1, %xmm0, %xmm0
@@ -463,14 +463,14 @@ define double @reassociate_maxs_double(double %x0, double %x1, double %x2, doubl
define <4 x float> @reassociate_mins_v4f32(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) {
; SSE-LABEL: reassociate_mins_v4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addps %xmm1, %xmm0
; SSE-NEXT: minps %xmm3, %xmm2
; SSE-NEXT: minps %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: reassociate_mins_v4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX-NEXT: vminps %xmm3, %xmm2, %xmm1
; AVX-NEXT: vminps %xmm1, %xmm0, %xmm0
@@ -487,14 +487,14 @@ define <4 x float> @reassociate_mins_v4f32(<4 x float> %x0, <4 x float> %x1, <4
define <4 x float> @reassociate_maxs_v4f32(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) {
; SSE-LABEL: reassociate_maxs_v4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addps %xmm1, %xmm0
; SSE-NEXT: maxps %xmm3, %xmm2
; SSE-NEXT: maxps %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: reassociate_maxs_v4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmaxps %xmm3, %xmm2, %xmm1
; AVX-NEXT: vmaxps %xmm1, %xmm0, %xmm0
@@ -511,14 +511,14 @@ define <4 x float> @reassociate_maxs_v4f32(<4 x float> %x0, <4 x float> %x1, <4
define <2 x double> @reassociate_mins_v2f64(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, <2 x double> %x3) {
; SSE-LABEL: reassociate_mins_v2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addpd %xmm1, %xmm0
; SSE-NEXT: minpd %xmm3, %xmm2
; SSE-NEXT: minpd %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: reassociate_mins_v2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vminpd %xmm3, %xmm2, %xmm1
; AVX-NEXT: vminpd %xmm1, %xmm0, %xmm0
@@ -535,14 +535,14 @@ define <2 x double> @reassociate_mins_v2f64(<2 x double> %x0, <2 x double> %x1,
define <2 x double> @reassociate_maxs_v2f64(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, <2 x double> %x3) {
; SSE-LABEL: reassociate_maxs_v2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addpd %xmm1, %xmm0
; SSE-NEXT: maxpd %xmm3, %xmm2
; SSE-NEXT: maxpd %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: reassociate_maxs_v2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmaxpd %xmm3, %xmm2, %xmm1
; AVX-NEXT: vmaxpd %xmm1, %xmm0, %xmm0
@@ -559,7 +559,7 @@ define <2 x double> @reassociate_maxs_v2f64(<2 x double> %x0, <2 x double> %x1,
define <8 x float> @reassociate_mins_v8f32(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, <8 x float> %x3) {
; AVX-LABEL: reassociate_mins_v8f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddps %ymm1, %ymm0, %ymm0
; AVX-NEXT: vminps %ymm3, %ymm2, %ymm1
; AVX-NEXT: vminps %ymm1, %ymm0, %ymm0
@@ -576,7 +576,7 @@ define <8 x float> @reassociate_mins_v8f32(<8 x float> %x0, <8 x float> %x1, <8
define <8 x float> @reassociate_maxs_v8f32(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, <8 x float> %x3) {
; AVX-LABEL: reassociate_maxs_v8f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddps %ymm1, %ymm0, %ymm0
; AVX-NEXT: vmaxps %ymm3, %ymm2, %ymm1
; AVX-NEXT: vmaxps %ymm1, %ymm0, %ymm0
@@ -593,7 +593,7 @@ define <8 x float> @reassociate_maxs_v8f32(<8 x float> %x0, <8 x float> %x1, <8
define <4 x double> @reassociate_mins_v4f64(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, <4 x double> %x3) {
; AVX-LABEL: reassociate_mins_v4f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; AVX-NEXT: vminpd %ymm3, %ymm2, %ymm1
; AVX-NEXT: vminpd %ymm1, %ymm0, %ymm0
@@ -610,7 +610,7 @@ define <4 x double> @reassociate_mins_v4f64(<4 x double> %x0, <4 x double> %x1,
define <4 x double> @reassociate_maxs_v4f64(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, <4 x double> %x3) {
; AVX-LABEL: reassociate_maxs_v4f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; AVX-NEXT: vmaxpd %ymm3, %ymm2, %ymm1
; AVX-NEXT: vmaxpd %ymm1, %ymm0, %ymm0
diff --git a/test/CodeGen/X86/machine-cp.ll b/test/CodeGen/X86/machine-cp.ll
index a4fe112e1a7..cbac8e31d9a 100644
--- a/test/CodeGen/X86/machine-cp.ll
+++ b/test/CodeGen/X86/machine-cp.ll
@@ -5,7 +5,7 @@
; rdar://10640363
define i32 @t1(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: t1:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movl %esi, %edx
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: testl %edx, %edx
@@ -19,7 +19,7 @@ define i32 @t1(i32 %a, i32 %b) nounwind {
; CHECK-NEXT: testl %edx, %edx
; CHECK-NEXT: movl %ecx, %eax
; CHECK-NEXT: jne LBB0_2
-; CHECK-NEXT: ## BB#3: ## %while.end
+; CHECK-NEXT: ## %bb.3: ## %while.end
; CHECK-NEXT: movl %ecx, %eax
; CHECK-NEXT: retq
; CHECK-NEXT: LBB0_1:
@@ -44,7 +44,7 @@ while.end: ; preds = %while.body, %entry
; rdar://10428165
define <8 x i16> @t2(<8 x i16> %T0, <8 x i16> %T1) nounwind readnone {
; CHECK-LABEL: t2:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,2,4,5,6,7]
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
@@ -56,7 +56,7 @@ entry:
define i32 @t3(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: t3:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movq %rsi, %rdx
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: testq %rdx, %rdx
@@ -70,7 +70,7 @@ define i32 @t3(i64 %a, i64 %b) nounwind {
; CHECK-NEXT: testq %rdx, %rdx
; CHECK-NEXT: movq %rcx, %rax
; CHECK-NEXT: jne LBB2_2
-; CHECK-NEXT: ## BB#3: ## %while.end
+; CHECK-NEXT: ## %bb.3: ## %while.end
; CHECK-NEXT: movl %ecx, %eax
; CHECK-NEXT: retq
; CHECK-NEXT: LBB2_1:
@@ -98,7 +98,7 @@ while.end: ; preds = %while.body, %entry
; ... = op2 dst <-- this is used here.
define <16 x float> @foo(<16 x float> %x) {
; CHECK-LABEL: foo:
-; CHECK: ## BB#0: ## %bb
+; CHECK: ## %bb.0: ## %bb
; CHECK-NEXT: movaps %xmm3, %xmm8
; CHECK-NEXT: xorps %xmm3, %xmm3
; CHECK-NEXT: pxor %xmm6, %xmm6
diff --git a/test/CodeGen/X86/machine-cse.ll b/test/CodeGen/X86/machine-cse.ll
index b7441577c63..e5e9e6c1163 100644
--- a/test/CodeGen/X86/machine-cse.ll
+++ b/test/CodeGen/X86/machine-cse.ll
@@ -9,7 +9,7 @@
define fastcc i8* @t(i32 %base) nounwind {
; CHECK-LABEL: t:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: shlq $9, %rax
@@ -17,7 +17,7 @@ define fastcc i8* @t(i32 %base) nounwind {
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: testb %al, %al
; CHECK-NEXT: jne .LBB0_2
-; CHECK-NEXT: # BB#1: # %bb1
+; CHECK-NEXT: # %bb.1: # %bb1
; CHECK-NEXT: callq bar
; CHECK-NEXT: .LBB0_2: # %bb2
; CHECK-NEXT: callq foo
@@ -49,17 +49,17 @@ declare void @printf(...) nounwind
define void @commute(i32 %test_case, i32 %scale) nounwind ssp {
; CHECK-LABEL: commute:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: leal -1(%rdi), %eax
; CHECK-NEXT: cmpl $2, %eax
; CHECK-NEXT: ja .LBB1_4
-; CHECK-NEXT: # BB#1: # %sw.bb
+; CHECK-NEXT: # %bb.1: # %sw.bb
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: testb %al, %al
; CHECK-NEXT: jne .LBB1_4
-; CHECK-NEXT: # BB#2: # %if.end34
+; CHECK-NEXT: # %bb.2: # %if.end34
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: imull %edi, %esi
; CHECK-NEXT: leal (%rsi,%rsi,2), %esi
@@ -107,11 +107,11 @@ sw.bb307:
; rdar://10660865
define i32 @cross_mbb_phys_cse(i32 %a, i32 %b) nounwind ssp {
; CHECK-LABEL: cross_mbb_phys_cse:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: cmpl %esi, %edi
; CHECK-NEXT: ja .LBB2_2
-; CHECK-NEXT: # BB#1: # %if.end
+; CHECK-NEXT: # %bb.1: # %if.end
; CHECK-NEXT: sbbl %eax, %eax
; CHECK-NEXT: .LBB2_2: # %return
; CHECK-NEXT: retq
@@ -132,17 +132,17 @@ return:
; rdar://11393714
define i8* @bsd_memchr(i8* %s, i32 %a, i32 %c, i64 %n) nounwind ssp {
; CHECK-LABEL: bsd_memchr:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: testq %rcx, %rcx
; CHECK-NEXT: je .LBB3_4
-; CHECK-NEXT: # BB#1: # %preheader
+; CHECK-NEXT: # %bb.1: # %preheader
; CHECK-NEXT: movzbl %dl, %eax
; CHECK-NEXT: .p2align 4, 0x90
; CHECK-NEXT: .LBB3_2: # %do.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: cmpl %eax, %esi
; CHECK-NEXT: je .LBB3_5
-; CHECK-NEXT: # BB#3: # %do.cond
+; CHECK-NEXT: # %bb.3: # %do.cond
; CHECK-NEXT: # in Loop: Header=BB3_2 Depth=1
; CHECK-NEXT: incq %rdi
; CHECK-NEXT: decq %rcx
@@ -184,13 +184,13 @@ declare i1 @t2_func()
define i32 @t2() nounwind {
; CHECK-LABEL: t2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: movl $42, {{.*}}(%rip)
; CHECK-NEXT: callq t2_func
; CHECK-NEXT: testb $1, %al
; CHECK-NEXT: je .LBB4_2
-; CHECK-NEXT: # BB#1: # %a
+; CHECK-NEXT: # %bb.1: # %a
; CHECK-NEXT: movl {{.*}}(%rip), %eax
; CHECK-NEXT: popq %rcx
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/machine-region-info.mir b/test/CodeGen/X86/machine-region-info.mir
index 78823a3eb00..7704cb28560 100644
--- a/test/CodeGen/X86/machine-region-info.mir
+++ b/test/CodeGen/X86/machine-region-info.mir
@@ -53,12 +53,12 @@ body: |
...
# CHECK: Region tree:
-# CHECK-NEXT: [0] BB#0 => <Function Return>
-# CHECK-NEXT: [1] BB#0 => BB#11
-# CHECK-NEXT: [2] BB#7 => BB#9
-# CHECK-NEXT: [2] BB#9 => BB#11
-# CHECK-NEXT: [2] BB#1 => BB#11
-# CHECK-NEXT: [3] BB#2 => BB#5
-# CHECK-NEXT: [4] BB#3 => BB#5
-# CHECK-NEXT: [3] BB#5 => BB#11
+# CHECK-NEXT: [0] %bb.0 => <Function Return>
+# CHECK-NEXT: [1] %bb.0 => %bb.11
+# CHECK-NEXT: [2] %bb.7 => %bb.9
+# CHECK-NEXT: [2] %bb.9 => %bb.11
+# CHECK-NEXT: [2] %bb.1 => %bb.11
+# CHECK-NEXT: [3] %bb.2 => %bb.5
+# CHECK-NEXT: [4] %bb.3 => %bb.5
+# CHECK-NEXT: [3] %bb.5 => %bb.11
# CHECK-NEXT: End region tree
diff --git a/test/CodeGen/X86/madd.ll b/test/CodeGen/X86/madd.ll
index ae0ed8b3d61..44e7b91eef8 100644
--- a/test/CodeGen/X86/madd.ll
+++ b/test/CodeGen/X86/madd.ll
@@ -6,7 +6,7 @@
define i32 @_Z10test_shortPsS_i(i16* nocapture readonly, i16* nocapture readonly, i32) local_unnamed_addr #0 {
; SSE2-LABEL: _Z10test_shortPsS_i:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movl %edx, %eax
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: xorl %ecx, %ecx
@@ -21,7 +21,7 @@ define i32 @_Z10test_shortPsS_i(i16* nocapture readonly, i16* nocapture readonly
; SSE2-NEXT: addq $8, %rcx
; SSE2-NEXT: cmpq %rcx, %rax
; SSE2-NEXT: jne .LBB0_1
-; SSE2-NEXT: # BB#2: # %middle.block
+; SSE2-NEXT: # %bb.2: # %middle.block
; SSE2-NEXT: paddd %xmm0, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
; SSE2-NEXT: paddd %xmm1, %xmm0
@@ -31,7 +31,7 @@ define i32 @_Z10test_shortPsS_i(i16* nocapture readonly, i16* nocapture readonly
; SSE2-NEXT: retq
;
; AVX2-LABEL: _Z10test_shortPsS_i:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: movl %edx, %eax
; AVX2-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX2-NEXT: xorl %ecx, %ecx
@@ -44,7 +44,7 @@ define i32 @_Z10test_shortPsS_i(i16* nocapture readonly, i16* nocapture readonly
; AVX2-NEXT: addq $8, %rcx
; AVX2-NEXT: cmpq %rcx, %rax
; AVX2-NEXT: jne .LBB0_1
-; AVX2-NEXT: # BB#2: # %middle.block
+; AVX2-NEXT: # %bb.2: # %middle.block
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -55,7 +55,7 @@ define i32 @_Z10test_shortPsS_i(i16* nocapture readonly, i16* nocapture readonly
; AVX2-NEXT: retq
;
; AVX512-LABEL: _Z10test_shortPsS_i:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: movl %edx, %eax
; AVX512-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX512-NEXT: xorl %ecx, %ecx
@@ -68,7 +68,7 @@ define i32 @_Z10test_shortPsS_i(i16* nocapture readonly, i16* nocapture readonly
; AVX512-NEXT: addq $8, %rcx
; AVX512-NEXT: cmpq %rcx, %rax
; AVX512-NEXT: jne .LBB0_1
-; AVX512-NEXT: # BB#2: # %middle.block
+; AVX512-NEXT: # %bb.2: # %middle.block
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -111,7 +111,7 @@ middle.block:
define i32 @test_unsigned_short(i16* nocapture readonly, i16* nocapture readonly, i32) local_unnamed_addr #0 {
; SSE2-LABEL: test_unsigned_short:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movl %edx, %eax
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: xorl %ecx, %ecx
@@ -132,7 +132,7 @@ define i32 @test_unsigned_short(i16* nocapture readonly, i16* nocapture readonly
; SSE2-NEXT: addq $8, %rcx
; SSE2-NEXT: cmpq %rcx, %rax
; SSE2-NEXT: jne .LBB1_1
-; SSE2-NEXT: # BB#2: # %middle.block
+; SSE2-NEXT: # %bb.2: # %middle.block
; SSE2-NEXT: paddd %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; SSE2-NEXT: paddd %xmm0, %xmm1
@@ -142,7 +142,7 @@ define i32 @test_unsigned_short(i16* nocapture readonly, i16* nocapture readonly
; SSE2-NEXT: retq
;
; AVX2-LABEL: test_unsigned_short:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: movl %edx, %eax
; AVX2-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX2-NEXT: xorl %ecx, %ecx
@@ -156,7 +156,7 @@ define i32 @test_unsigned_short(i16* nocapture readonly, i16* nocapture readonly
; AVX2-NEXT: addq $8, %rcx
; AVX2-NEXT: cmpq %rcx, %rax
; AVX2-NEXT: jne .LBB1_1
-; AVX2-NEXT: # BB#2: # %middle.block
+; AVX2-NEXT: # %bb.2: # %middle.block
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -167,7 +167,7 @@ define i32 @test_unsigned_short(i16* nocapture readonly, i16* nocapture readonly
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_unsigned_short:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: movl %edx, %eax
; AVX512-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX512-NEXT: xorl %ecx, %ecx
@@ -181,7 +181,7 @@ define i32 @test_unsigned_short(i16* nocapture readonly, i16* nocapture readonly
; AVX512-NEXT: addq $8, %rcx
; AVX512-NEXT: cmpq %rcx, %rax
; AVX512-NEXT: jne .LBB1_1
-; AVX512-NEXT: # BB#2: # %middle.block
+; AVX512-NEXT: # %bb.2: # %middle.block
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -224,7 +224,7 @@ middle.block:
define i32 @_Z9test_charPcS_i(i8* nocapture readonly, i8* nocapture readonly, i32) local_unnamed_addr #0 {
; SSE2-LABEL: _Z9test_charPcS_i:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movl %edx, %eax
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: xorl %ecx, %ecx
@@ -263,7 +263,7 @@ define i32 @_Z9test_charPcS_i(i8* nocapture readonly, i8* nocapture readonly, i3
; SSE2-NEXT: addq $16, %rcx
; SSE2-NEXT: cmpq %rcx, %rax
; SSE2-NEXT: jne .LBB2_1
-; SSE2-NEXT: # BB#2: # %middle.block
+; SSE2-NEXT: # %bb.2: # %middle.block
; SSE2-NEXT: paddd %xmm3, %xmm0
; SSE2-NEXT: paddd %xmm2, %xmm1
; SSE2-NEXT: paddd %xmm0, %xmm1
@@ -275,7 +275,7 @@ define i32 @_Z9test_charPcS_i(i8* nocapture readonly, i8* nocapture readonly, i3
; SSE2-NEXT: retq
;
; AVX2-LABEL: _Z9test_charPcS_i:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: movl %edx, %eax
; AVX2-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX2-NEXT: xorl %ecx, %ecx
@@ -290,7 +290,7 @@ define i32 @_Z9test_charPcS_i(i8* nocapture readonly, i8* nocapture readonly, i3
; AVX2-NEXT: addq $16, %rcx
; AVX2-NEXT: cmpq %rcx, %rax
; AVX2-NEXT: jne .LBB2_1
-; AVX2-NEXT: # BB#2: # %middle.block
+; AVX2-NEXT: # %bb.2: # %middle.block
; AVX2-NEXT: vpaddd %ymm0, %ymm1, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
@@ -302,7 +302,7 @@ define i32 @_Z9test_charPcS_i(i8* nocapture readonly, i8* nocapture readonly, i3
; AVX2-NEXT: retq
;
; AVX512-LABEL: _Z9test_charPcS_i:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: movl %edx, %eax
; AVX512-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX512-NEXT: xorl %ecx, %ecx
@@ -316,7 +316,7 @@ define i32 @_Z9test_charPcS_i(i8* nocapture readonly, i8* nocapture readonly, i3
; AVX512-NEXT: addq $16, %rcx
; AVX512-NEXT: cmpq %rcx, %rax
; AVX512-NEXT: jne .LBB2_1
-; AVX512-NEXT: # BB#2: # %middle.block
+; AVX512-NEXT: # %bb.2: # %middle.block
; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
diff --git a/test/CodeGen/X86/mask-negated-bool.ll b/test/CodeGen/X86/mask-negated-bool.ll
index 29ecbf01c0f..b0147c3bb58 100644
--- a/test/CodeGen/X86/mask-negated-bool.ll
+++ b/test/CodeGen/X86/mask-negated-bool.ll
@@ -3,7 +3,7 @@
define i32 @mask_negated_zext_bool1(i1 %x) {
; CHECK-LABEL: mask_negated_zext_bool1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
@@ -15,7 +15,7 @@ define i32 @mask_negated_zext_bool1(i1 %x) {
define i32 @mask_negated_zext_bool2(i1 zeroext %x) {
; CHECK-LABEL: mask_negated_zext_bool2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
%ext = zext i1 %x to i32
@@ -26,7 +26,7 @@ define i32 @mask_negated_zext_bool2(i1 zeroext %x) {
define <4 x i32> @mask_negated_zext_bool_vec(<4 x i1> %x) {
; CHECK-LABEL: mask_negated_zext_bool_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: andps {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
%ext = zext <4 x i1> %x to <4 x i32>
@@ -37,7 +37,7 @@ define <4 x i32> @mask_negated_zext_bool_vec(<4 x i1> %x) {
define i32 @mask_negated_sext_bool1(i1 %x) {
; CHECK-LABEL: mask_negated_sext_bool1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
@@ -49,7 +49,7 @@ define i32 @mask_negated_sext_bool1(i1 %x) {
define i32 @mask_negated_sext_bool2(i1 zeroext %x) {
; CHECK-LABEL: mask_negated_sext_bool2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
%ext = sext i1 %x to i32
@@ -60,7 +60,7 @@ define i32 @mask_negated_sext_bool2(i1 zeroext %x) {
define <4 x i32> @mask_negated_sext_bool_vec(<4 x i1> %x) {
; CHECK-LABEL: mask_negated_sext_bool_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: andps {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
%ext = sext <4 x i1> %x to <4 x i32>
diff --git a/test/CodeGen/X86/masked_gather_scatter.ll b/test/CodeGen/X86/masked_gather_scatter.ll
index 1ec12d9c61d..d7bd9318e8e 100644
--- a/test/CodeGen/X86/masked_gather_scatter.ll
+++ b/test/CodeGen/X86/masked_gather_scatter.ll
@@ -18,14 +18,14 @@
define <16 x float> @test1(float* %base, <16 x i32> %ind) {
; KNL_64-LABEL: test1:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: kxnorw %k0, %k0, %k1
; KNL_64-NEXT: vgatherdps (%rdi,%zmm0,4), %zmm1 {%k1}
; KNL_64-NEXT: vmovaps %zmm1, %zmm0
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test1:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; KNL_32-NEXT: kxnorw %k0, %k0, %k1
; KNL_32-NEXT: vgatherdps (%eax,%zmm0,4), %zmm1 {%k1}
@@ -33,14 +33,14 @@ define <16 x float> @test1(float* %base, <16 x i32> %ind) {
; KNL_32-NEXT: retl
;
; SKX-LABEL: test1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kxnorw %k0, %k0, %k1
; SKX-NEXT: vgatherdps (%rdi,%zmm0,4), %zmm1 {%k1}
; SKX-NEXT: vmovaps %zmm1, %zmm0
; SKX-NEXT: retq
;
; SKX_32-LABEL: test1:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; SKX_32-NEXT: kxnorw %k0, %k0, %k1
; SKX_32-NEXT: vgatherdps (%eax,%zmm0,4), %zmm1 {%k1}
@@ -75,14 +75,14 @@ declare <8 x i32> @llvm.masked.gather.v8i32.v8p0i32(<8 x i32*> , i32, <8 x i1> ,
define <16 x float> @test2(float* %base, <16 x i32> %ind, i16 %mask) {
; KNL_64-LABEL: test2:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: kmovw %esi, %k1
; KNL_64-NEXT: vgatherdps (%rdi,%zmm0,4), %zmm1 {%k1}
; KNL_64-NEXT: vmovaps %zmm1, %zmm0
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test2:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; KNL_32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; KNL_32-NEXT: vgatherdps (%eax,%zmm0,4), %zmm1 {%k1}
@@ -90,14 +90,14 @@ define <16 x float> @test2(float* %base, <16 x i32> %ind, i16 %mask) {
; KNL_32-NEXT: retl
;
; SKX-LABEL: test2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovw %esi, %k1
; SKX-NEXT: vgatherdps (%rdi,%zmm0,4), %zmm1 {%k1}
; SKX-NEXT: vmovaps %zmm1, %zmm0
; SKX-NEXT: retq
;
; SKX_32-LABEL: test2:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; SKX_32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; SKX_32-NEXT: vgatherdps (%eax,%zmm0,4), %zmm1 {%k1}
@@ -116,14 +116,14 @@ define <16 x float> @test2(float* %base, <16 x i32> %ind, i16 %mask) {
define <16 x i32> @test3(i32* %base, <16 x i32> %ind, i16 %mask) {
; KNL_64-LABEL: test3:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: kmovw %esi, %k1
; KNL_64-NEXT: vpgatherdd (%rdi,%zmm0,4), %zmm1 {%k1}
; KNL_64-NEXT: vmovdqa64 %zmm1, %zmm0
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test3:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; KNL_32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; KNL_32-NEXT: vpgatherdd (%eax,%zmm0,4), %zmm1 {%k1}
@@ -131,14 +131,14 @@ define <16 x i32> @test3(i32* %base, <16 x i32> %ind, i16 %mask) {
; KNL_32-NEXT: retl
;
; SKX-LABEL: test3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovw %esi, %k1
; SKX-NEXT: vpgatherdd (%rdi,%zmm0,4), %zmm1 {%k1}
; SKX-NEXT: vmovdqa64 %zmm1, %zmm0
; SKX-NEXT: retq
;
; SKX_32-LABEL: test3:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; SKX_32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; SKX_32-NEXT: vpgatherdd (%eax,%zmm0,4), %zmm1 {%k1}
@@ -158,7 +158,7 @@ define <16 x i32> @test3(i32* %base, <16 x i32> %ind, i16 %mask) {
define <16 x i32> @test4(i32* %base, <16 x i32> %ind, i16 %mask) {
; KNL_64-LABEL: test4:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: kmovw %esi, %k1
; KNL_64-NEXT: kmovw %k1, %k2
; KNL_64-NEXT: vpgatherdd (%rdi,%zmm0,4), %zmm1 {%k2}
@@ -168,7 +168,7 @@ define <16 x i32> @test4(i32* %base, <16 x i32> %ind, i16 %mask) {
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test4:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; KNL_32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; KNL_32-NEXT: kmovw %k1, %k2
@@ -179,7 +179,7 @@ define <16 x i32> @test4(i32* %base, <16 x i32> %ind, i16 %mask) {
; KNL_32-NEXT: retl
;
; SKX-LABEL: test4:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovw %esi, %k1
; SKX-NEXT: kmovw %k1, %k2
; SKX-NEXT: vpgatherdd (%rdi,%zmm0,4), %zmm1 {%k2}
@@ -189,7 +189,7 @@ define <16 x i32> @test4(i32* %base, <16 x i32> %ind, i16 %mask) {
; SKX-NEXT: retq
;
; SKX_32-LABEL: test4:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; SKX_32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; SKX_32-NEXT: kmovw %k1, %k2
@@ -227,7 +227,7 @@ define <16 x i32> @test4(i32* %base, <16 x i32> %ind, i16 %mask) {
define void @test5(i32* %base, <16 x i32> %ind, i16 %mask, <16 x i32>%val) {
; KNL_64-LABEL: test5:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: kmovw %esi, %k1
; KNL_64-NEXT: kmovw %k1, %k2
; KNL_64-NEXT: vpscatterdd %zmm1, (%rdi,%zmm0,4) {%k2}
@@ -236,7 +236,7 @@ define void @test5(i32* %base, <16 x i32> %ind, i16 %mask, <16 x i32>%val) {
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test5:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; KNL_32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; KNL_32-NEXT: kmovw %k1, %k2
@@ -246,7 +246,7 @@ define void @test5(i32* %base, <16 x i32> %ind, i16 %mask, <16 x i32>%val) {
; KNL_32-NEXT: retl
;
; SKX-LABEL: test5:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovw %esi, %k1
; SKX-NEXT: kmovw %k1, %k2
; SKX-NEXT: vpscatterdd %zmm1, (%rdi,%zmm0,4) {%k2}
@@ -255,7 +255,7 @@ define void @test5(i32* %base, <16 x i32> %ind, i16 %mask, <16 x i32>%val) {
; SKX-NEXT: retq
;
; SKX_32-LABEL: test5:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; SKX_32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; SKX_32-NEXT: kmovw %k1, %k2
@@ -289,7 +289,7 @@ declare void @llvm.masked.scatter.v16i32.v16p0i32(<16 x i32> , <16 x i32*> , i32
define <8 x i32> @test6(<8 x i32>%a1, <8 x i32*> %ptr) {
; KNL_64-LABEL: test6:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: kxnorw %k0, %k0, %k1
; KNL_64-NEXT: kxnorw %k0, %k0, %k2
; KNL_64-NEXT: vpgatherqd (,%zmm1), %ymm2 {%k2}
@@ -298,7 +298,7 @@ define <8 x i32> @test6(<8 x i32>%a1, <8 x i32*> %ptr) {
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test6:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: vpmovsxdq %ymm1, %zmm2
; KNL_32-NEXT: kxnorw %k0, %k0, %k1
; KNL_32-NEXT: kxnorw %k0, %k0, %k2
@@ -308,7 +308,7 @@ define <8 x i32> @test6(<8 x i32>%a1, <8 x i32*> %ptr) {
; KNL_32-NEXT: retl
;
; SKX-LABEL: test6:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kxnorw %k0, %k0, %k1
; SKX-NEXT: kxnorw %k0, %k0, %k2
; SKX-NEXT: vpgatherqd (,%zmm1), %ymm2 {%k2}
@@ -317,7 +317,7 @@ define <8 x i32> @test6(<8 x i32>%a1, <8 x i32*> %ptr) {
; SKX-NEXT: retq
;
; SKX_32-LABEL: test6:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: kxnorw %k0, %k0, %k1
; SKX_32-NEXT: kxnorw %k0, %k0, %k2
; SKX_32-NEXT: vpgatherdd (,%ymm1), %ymm2 {%k2}
@@ -334,7 +334,7 @@ define <8 x i32> @test6(<8 x i32>%a1, <8 x i32*> %ptr) {
define <8 x i32> @test7(i32* %base, <8 x i32> %ind, i8 %mask) {
;
; KNL_64-LABEL: test7:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: kmovw %esi, %k1
; KNL_64-NEXT: vpmovsxdq %ymm0, %zmm0
; KNL_64-NEXT: kmovw %k1, %k2
@@ -345,7 +345,7 @@ define <8 x i32> @test7(i32* %base, <8 x i32> %ind, i8 %mask) {
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test7:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; KNL_32-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
; KNL_32-NEXT: kmovw %ecx, %k1
@@ -358,7 +358,7 @@ define <8 x i32> @test7(i32* %base, <8 x i32> %ind, i8 %mask) {
; KNL_32-NEXT: retl
;
; SKX-LABEL: test7:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovw %esi, %k1
; SKX-NEXT: kmovw %k1, %k2
; SKX-NEXT: vpgatherdd (%rdi,%ymm0,4), %ymm1 {%k2}
@@ -368,7 +368,7 @@ define <8 x i32> @test7(i32* %base, <8 x i32> %ind, i8 %mask) {
; SKX-NEXT: retq
;
; SKX_32-LABEL: test7:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; SKX_32-NEXT: kmovb {{[0-9]+}}(%esp), %k1
; SKX_32-NEXT: kmovw %k1, %k2
@@ -393,7 +393,7 @@ define <8 x i32> @test7(i32* %base, <8 x i32> %ind, i8 %mask) {
; each gather call will be split into two
define <16 x i32> @test8(<16 x i32*> %ptr.random, <16 x i32> %ind, i16 %mask) {
; KNL_64-LABEL: test8:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: kmovw %edi, %k1
; KNL_64-NEXT: kshiftrw $8, %k1, %k2
; KNL_64-NEXT: kmovw %k2, %k3
@@ -408,7 +408,7 @@ define <16 x i32> @test8(<16 x i32*> %ptr.random, <16 x i32> %ind, i16 %mask) {
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test8:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; KNL_32-NEXT: kmovw %k1, %k2
; KNL_32-NEXT: vpgatherdd (,%zmm0), %zmm1 {%k2}
@@ -418,7 +418,7 @@ define <16 x i32> @test8(<16 x i32*> %ptr.random, <16 x i32> %ind, i16 %mask) {
; KNL_32-NEXT: retl
;
; SKX-LABEL: test8:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovw %edi, %k1
; SKX-NEXT: kshiftrw $8, %k1, %k2
; SKX-NEXT: kmovw %k2, %k3
@@ -433,7 +433,7 @@ define <16 x i32> @test8(<16 x i32*> %ptr.random, <16 x i32> %ind, i16 %mask) {
; SKX-NEXT: retq
;
; SKX_32-LABEL: test8:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; SKX_32-NEXT: kmovw %k1, %k2
; SKX_32-NEXT: vpgatherdd (,%zmm0), %zmm1 {%k2}
@@ -458,7 +458,7 @@ define <16 x i32> @test8(<16 x i32*> %ptr.random, <16 x i32> %ind, i16 %mask) {
define <8 x i32> @test9(%struct.ST* %base, <8 x i64> %ind1, <8 x i32>%ind5) {
; KNL_64-LABEL: test9:
-; KNL_64: # BB#0: # %entry
+; KNL_64: # %bb.0: # %entry
; KNL_64-NEXT: vpbroadcastq %rdi, %zmm2
; KNL_64-NEXT: vpbroadcastq {{.*#+}} zmm3 = [824,824,824,824,824,824,824,824]
; KNL_64-NEXT: vpmuludq %zmm3, %zmm0, %zmm4
@@ -476,7 +476,7 @@ define <8 x i32> @test9(%struct.ST* %base, <8 x i64> %ind1, <8 x i32>%ind5) {
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test9:
-; KNL_32: # BB#0: # %entry
+; KNL_32: # %bb.0: # %entry
; KNL_32-NEXT: vpbroadcastd {{[0-9]+}}(%esp), %ymm2
; KNL_32-NEXT: vpbroadcastd {{.*#+}} ymm3 = [80,80,80,80,80,80,80,80]
; KNL_32-NEXT: vpmulld %ymm3, %ymm1, %ymm1
@@ -493,7 +493,7 @@ define <8 x i32> @test9(%struct.ST* %base, <8 x i64> %ind1, <8 x i32>%ind5) {
; KNL_32-NEXT: retl
;
; SKX_SMALL-LABEL: test9:
-; SKX_SMALL: # BB#0: # %entry
+; SKX_SMALL: # %bb.0: # %entry
; SKX_SMALL-NEXT: vpbroadcastq %rdi, %zmm2
; SKX_SMALL-NEXT: vpmullq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; SKX_SMALL-NEXT: vpmovsxdq %ymm1, %zmm1
@@ -506,7 +506,7 @@ define <8 x i32> @test9(%struct.ST* %base, <8 x i64> %ind1, <8 x i32>%ind5) {
; SKX_SMALL-NEXT: retq
;
; SKX_LARGE-LABEL: test9:
-; SKX_LARGE: # BB#0: # %entry
+; SKX_LARGE: # %bb.0: # %entry
; SKX_LARGE-NEXT: vpbroadcastq %rdi, %zmm2
; SKX_LARGE-NEXT: vpmovsxdq %ymm1, %zmm1
; SKX_LARGE-NEXT: movabsq ${{\.LCPI.*}}, %rax
@@ -522,7 +522,7 @@ define <8 x i32> @test9(%struct.ST* %base, <8 x i64> %ind1, <8 x i32>%ind5) {
; SKX_LARGE-NEXT: retq
;
; SKX_32-LABEL: test9:
-; SKX_32: # BB#0: # %entry
+; SKX_32: # %bb.0: # %entry
; SKX_32-NEXT: vpmulld {{\.LCPI.*}}{1to8}, %ymm1, %ymm1
; SKX_32-NEXT: vpmovqd %zmm0, %ymm0
; SKX_32-NEXT: vpmulld {{\.LCPI.*}}{1to8}, %ymm0, %ymm0
@@ -543,7 +543,7 @@ entry:
define <8 x i32> @test10(%struct.ST* %base, <8 x i64> %i1, <8 x i32>%ind5) {
; KNL_64-LABEL: test10:
-; KNL_64: # BB#0: # %entry
+; KNL_64: # %bb.0: # %entry
; KNL_64-NEXT: vpbroadcastq %rdi, %zmm2
; KNL_64-NEXT: vpbroadcastq {{.*#+}} zmm3 = [824,824,824,824,824,824,824,824]
; KNL_64-NEXT: vpmuludq %zmm3, %zmm0, %zmm4
@@ -561,7 +561,7 @@ define <8 x i32> @test10(%struct.ST* %base, <8 x i64> %i1, <8 x i32>%ind5) {
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test10:
-; KNL_32: # BB#0: # %entry
+; KNL_32: # %bb.0: # %entry
; KNL_32-NEXT: vpbroadcastd {{[0-9]+}}(%esp), %ymm2
; KNL_32-NEXT: vpbroadcastd {{.*#+}} ymm3 = [80,80,80,80,80,80,80,80]
; KNL_32-NEXT: vpmulld %ymm3, %ymm1, %ymm1
@@ -578,7 +578,7 @@ define <8 x i32> @test10(%struct.ST* %base, <8 x i64> %i1, <8 x i32>%ind5) {
; KNL_32-NEXT: retl
;
; SKX_SMALL-LABEL: test10:
-; SKX_SMALL: # BB#0: # %entry
+; SKX_SMALL: # %bb.0: # %entry
; SKX_SMALL-NEXT: vpbroadcastq %rdi, %zmm2
; SKX_SMALL-NEXT: vpmullq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; SKX_SMALL-NEXT: vpmovsxdq %ymm1, %zmm1
@@ -591,7 +591,7 @@ define <8 x i32> @test10(%struct.ST* %base, <8 x i64> %i1, <8 x i32>%ind5) {
; SKX_SMALL-NEXT: retq
;
; SKX_LARGE-LABEL: test10:
-; SKX_LARGE: # BB#0: # %entry
+; SKX_LARGE: # %bb.0: # %entry
; SKX_LARGE-NEXT: vpbroadcastq %rdi, %zmm2
; SKX_LARGE-NEXT: vpmovsxdq %ymm1, %zmm1
; SKX_LARGE-NEXT: movabsq ${{\.LCPI.*}}, %rax
@@ -607,7 +607,7 @@ define <8 x i32> @test10(%struct.ST* %base, <8 x i64> %i1, <8 x i32>%ind5) {
; SKX_LARGE-NEXT: retq
;
; SKX_32-LABEL: test10:
-; SKX_32: # BB#0: # %entry
+; SKX_32: # %bb.0: # %entry
; SKX_32-NEXT: vpmulld {{\.LCPI.*}}{1to8}, %ymm1, %ymm1
; SKX_32-NEXT: vpmovqd %zmm0, %ymm0
; SKX_32-NEXT: vpmulld {{\.LCPI.*}}{1to8}, %ymm0, %ymm0
@@ -629,14 +629,14 @@ entry:
; Splat index in GEP, requires broadcast
define <16 x float> @test11(float* %base, i32 %ind) {
; KNL_64-LABEL: test11:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: vpbroadcastd %esi, %zmm1
; KNL_64-NEXT: kxnorw %k0, %k0, %k1
; KNL_64-NEXT: vgatherdps (%rdi,%zmm1,4), %zmm0 {%k1}
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test11:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; KNL_32-NEXT: vbroadcastss {{[0-9]+}}(%esp), %zmm1
; KNL_32-NEXT: kxnorw %k0, %k0, %k1
@@ -644,14 +644,14 @@ define <16 x float> @test11(float* %base, i32 %ind) {
; KNL_32-NEXT: retl
;
; SKX-LABEL: test11:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpbroadcastd %esi, %zmm1
; SKX-NEXT: kxnorw %k0, %k0, %k1
; SKX-NEXT: vgatherdps (%rdi,%zmm1,4), %zmm0 {%k1}
; SKX-NEXT: retq
;
; SKX_32-LABEL: test11:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; SKX_32-NEXT: vbroadcastss {{[0-9]+}}(%esp), %zmm1
; SKX_32-NEXT: kxnorw %k0, %k0, %k1
@@ -670,14 +670,14 @@ define <16 x float> @test11(float* %base, i32 %ind) {
; We are checking the uniform base here. It is taken directly from input to vgatherdps
define <16 x float> @test12(float* %base, <16 x i32> %ind) {
; KNL_64-LABEL: test12:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: kxnorw %k0, %k0, %k1
; KNL_64-NEXT: vgatherdps (%rdi,%zmm0,4), %zmm1 {%k1}
; KNL_64-NEXT: vmovaps %zmm1, %zmm0
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test12:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; KNL_32-NEXT: kxnorw %k0, %k0, %k1
; KNL_32-NEXT: vgatherdps (%eax,%zmm0,4), %zmm1 {%k1}
@@ -685,14 +685,14 @@ define <16 x float> @test12(float* %base, <16 x i32> %ind) {
; KNL_32-NEXT: retl
;
; SKX-LABEL: test12:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kxnorw %k0, %k0, %k1
; SKX-NEXT: vgatherdps (%rdi,%zmm0,4), %zmm1 {%k1}
; SKX-NEXT: vmovaps %zmm1, %zmm0
; SKX-NEXT: retq
;
; SKX_32-LABEL: test12:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; SKX_32-NEXT: kxnorw %k0, %k0, %k1
; SKX_32-NEXT: vgatherdps (%eax,%zmm0,4), %zmm1 {%k1}
@@ -709,14 +709,14 @@ define <16 x float> @test12(float* %base, <16 x i32> %ind) {
; The same as the previous, but the mask is undefined
define <16 x float> @test13(float* %base, <16 x i32> %ind) {
; KNL_64-LABEL: test13:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: kxnorw %k0, %k0, %k1
; KNL_64-NEXT: vgatherdps (%rdi,%zmm0,4), %zmm1 {%k1}
; KNL_64-NEXT: vmovaps %zmm1, %zmm0
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test13:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; KNL_32-NEXT: kxnorw %k0, %k0, %k1
; KNL_32-NEXT: vgatherdps (%eax,%zmm0,4), %zmm1 {%k1}
@@ -724,14 +724,14 @@ define <16 x float> @test13(float* %base, <16 x i32> %ind) {
; KNL_32-NEXT: retl
;
; SKX-LABEL: test13:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kxnorw %k0, %k0, %k1
; SKX-NEXT: vgatherdps (%rdi,%zmm0,4), %zmm1 {%k1}
; SKX-NEXT: vmovaps %zmm1, %zmm0
; SKX-NEXT: retq
;
; SKX_32-LABEL: test13:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; SKX_32-NEXT: kxnorw %k0, %k0, %k1
; SKX_32-NEXT: vgatherdps (%eax,%zmm0,4), %zmm1 {%k1}
@@ -748,7 +748,7 @@ define <16 x float> @test13(float* %base, <16 x i32> %ind) {
; The base pointer is not splat, can't find unform base
define <16 x float> @test14(float* %base, i32 %ind, <16 x float*> %vec) {
; KNL_64-LABEL: test14:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: vpinsrq $1, %rdi, %xmm0, %xmm0
; KNL_64-NEXT: vpbroadcastq %xmm0, %zmm0
; KNL_64-NEXT: vmovd %esi, %xmm1
@@ -762,7 +762,7 @@ define <16 x float> @test14(float* %base, i32 %ind, <16 x float*> %vec) {
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test14:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
; KNL_32-NEXT: vpbroadcastd %xmm0, %zmm0
; KNL_32-NEXT: vpslld $2, {{[0-9]+}}(%esp){1to16}, %zmm1
@@ -772,7 +772,7 @@ define <16 x float> @test14(float* %base, i32 %ind, <16 x float*> %vec) {
; KNL_32-NEXT: retl
;
; SKX-LABEL: test14:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpinsrq $1, %rdi, %xmm0, %xmm0
; SKX-NEXT: vpbroadcastq %xmm0, %zmm0
; SKX-NEXT: vpbroadcastd %esi, %ymm1
@@ -785,7 +785,7 @@ define <16 x float> @test14(float* %base, i32 %ind, <16 x float*> %vec) {
; SKX-NEXT: retq
;
; SKX_32-LABEL: test14:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
; SKX_32-NEXT: vpbroadcastd %xmm0, %zmm0
; SKX_32-NEXT: vpslld $2, {{[0-9]+}}(%esp){1to16}, %zmm1
@@ -810,7 +810,7 @@ declare <2 x double> @llvm.masked.gather.v2f64.v2p0f64(<2 x double*>, i32, <2 x
; Gather smaller than existing instruction
define <4 x float> @test15(float* %base, <4 x i32> %ind, <4 x i1> %mask) {
; KNL_64-LABEL: test15:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; KNL_64-NEXT: vmovdqa %xmm1, %xmm1
; KNL_64-NEXT: vpmovsxdq %ymm0, %zmm2
@@ -822,7 +822,7 @@ define <4 x float> @test15(float* %base, <4 x i32> %ind, <4 x i1> %mask) {
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test15:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; KNL_32-NEXT: vmovdqa %xmm1, %xmm1
; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -835,7 +835,7 @@ define <4 x float> @test15(float* %base, <4 x i32> %ind, <4 x i1> %mask) {
; KNL_32-NEXT: retl
;
; SKX-LABEL: test15:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm1, %xmm1
; SKX-NEXT: vptestmd %xmm1, %xmm1, %k1
; SKX-NEXT: vgatherdps (%rdi,%xmm0,4), %xmm1 {%k1}
@@ -843,7 +843,7 @@ define <4 x float> @test15(float* %base, <4 x i32> %ind, <4 x i1> %mask) {
; SKX-NEXT: retq
;
; SKX_32-LABEL: test15:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: vpslld $31, %xmm1, %xmm1
; SKX_32-NEXT: vptestmd %xmm1, %xmm1, %k1
; SKX_32-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -860,7 +860,7 @@ define <4 x float> @test15(float* %base, <4 x i32> %ind, <4 x i1> %mask) {
; Gather smaller than existing instruction
define <4 x double> @test16(double* %base, <4 x i32> %ind, <4 x i1> %mask, <4 x double> %src0) {
; KNL_64-LABEL: test16:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: # kill: %ymm2<def> %ymm2<kill> %zmm2<def>
; KNL_64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; KNL_64-NEXT: vpslld $31, %xmm1, %xmm1
@@ -875,7 +875,7 @@ define <4 x double> @test16(double* %base, <4 x i32> %ind, <4 x i1> %mask, <4 x
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test16:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: # kill: %ymm2<def> %ymm2<kill> %zmm2<def>
; KNL_32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; KNL_32-NEXT: vpslld $31, %xmm1, %xmm1
@@ -891,7 +891,7 @@ define <4 x double> @test16(double* %base, <4 x i32> %ind, <4 x i1> %mask, <4 x
; KNL_32-NEXT: retl
;
; SKX-LABEL: test16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm1, %xmm1
; SKX-NEXT: vptestmd %xmm1, %xmm1, %k1
; SKX-NEXT: vgatherdpd (%rdi,%xmm0,8), %ymm2 {%k1}
@@ -899,7 +899,7 @@ define <4 x double> @test16(double* %base, <4 x i32> %ind, <4 x i1> %mask, <4 x
; SKX-NEXT: retq
;
; SKX_32-LABEL: test16:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: vpslld $31, %xmm1, %xmm1
; SKX_32-NEXT: vptestmd %xmm1, %xmm1, %k1
; SKX_32-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -915,7 +915,7 @@ define <4 x double> @test16(double* %base, <4 x i32> %ind, <4 x i1> %mask, <4 x
define <2 x double> @test17(double* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x double> %src0) {
; KNL_64-LABEL: test17:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: # kill: %xmm2<def> %xmm2<kill> %zmm2<def>
; KNL_64-NEXT: vpsllq $32, %xmm0, %xmm0
; KNL_64-NEXT: vpsraq $32, %zmm0, %zmm0
@@ -928,7 +928,7 @@ define <2 x double> @test17(double* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test17:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: # kill: %xmm2<def> %xmm2<kill> %zmm2<def>
; KNL_32-NEXT: vpsllq $32, %xmm0, %xmm0
; KNL_32-NEXT: vpsraq $32, %zmm0, %zmm0
@@ -942,7 +942,7 @@ define <2 x double> @test17(double* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x
; KNL_32-NEXT: retl
;
; SKX-LABEL: test17:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllq $32, %xmm0, %xmm0
; SKX-NEXT: vpsraq $32, %xmm0, %xmm0
; SKX-NEXT: vpsllq $63, %xmm1, %xmm1
@@ -952,7 +952,7 @@ define <2 x double> @test17(double* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x
; SKX-NEXT: retq
;
; SKX_32-LABEL: test17:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: vpsllq $32, %xmm0, %xmm0
; SKX_32-NEXT: vpsraq $32, %xmm0, %xmm0
; SKX_32-NEXT: vpsllq $63, %xmm1, %xmm1
@@ -976,7 +976,7 @@ declare void @llvm.masked.scatter.v2f32.v2p0f32(<2 x float> , <2 x float*> , i32
define void @test18(<4 x i32>%a1, <4 x i32*> %ptr, <4 x i1>%mask) {
; KNL_64-LABEL: test18:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; KNL_64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; KNL_64-NEXT: vmovdqa %xmm2, %xmm2
@@ -987,7 +987,7 @@ define void @test18(<4 x i32>%a1, <4 x i32*> %ptr, <4 x i1>%mask) {
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test18:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
; KNL_32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; KNL_32-NEXT: vmovdqa %xmm2, %xmm2
@@ -999,7 +999,7 @@ define void @test18(<4 x i32>%a1, <4 x i32*> %ptr, <4 x i1>%mask) {
; KNL_32-NEXT: retl
;
; SKX-LABEL: test18:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm2, %xmm2
; SKX-NEXT: vptestmd %xmm2, %xmm2, %k1
; SKX-NEXT: vpscatterqd %xmm0, (,%ymm1) {%k1}
@@ -1007,7 +1007,7 @@ define void @test18(<4 x i32>%a1, <4 x i32*> %ptr, <4 x i1>%mask) {
; SKX-NEXT: retq
;
; SKX_32-LABEL: test18:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: vpslld $31, %xmm2, %xmm2
; SKX_32-NEXT: vptestmd %xmm2, %xmm2, %k1
; SKX_32-NEXT: vpscatterdd %xmm0, (,%xmm1) {%k1}
@@ -1018,7 +1018,7 @@ define void @test18(<4 x i32>%a1, <4 x i32*> %ptr, <4 x i1>%mask) {
define void @test19(<4 x double>%a1, double* %ptr, <4 x i1>%mask, <4 x i64> %ind) {
; KNL_64-LABEL: test19:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: # kill: %ymm2<def> %ymm2<kill> %zmm2<def>
; KNL_64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL_64-NEXT: vpslld $31, %xmm1, %xmm1
@@ -1032,7 +1032,7 @@ define void @test19(<4 x double>%a1, double* %ptr, <4 x i1>%mask, <4 x i64> %ind
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test19:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: # kill: %ymm2<def> %ymm2<kill> %zmm2<def>
; KNL_32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL_32-NEXT: vpslld $31, %xmm1, %xmm1
@@ -1047,7 +1047,7 @@ define void @test19(<4 x double>%a1, double* %ptr, <4 x i1>%mask, <4 x i64> %ind
; KNL_32-NEXT: retl
;
; SKX-LABEL: test19:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm1, %xmm1
; SKX-NEXT: vptestmd %xmm1, %xmm1, %k1
; SKX-NEXT: vscatterqpd %ymm0, (%rdi,%ymm2,8) {%k1}
@@ -1055,7 +1055,7 @@ define void @test19(<4 x double>%a1, double* %ptr, <4 x i1>%mask, <4 x i64> %ind
; SKX-NEXT: retq
;
; SKX_32-LABEL: test19:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: vpslld $31, %xmm1, %xmm1
; SKX_32-NEXT: vptestmd %xmm1, %xmm1, %k1
; SKX_32-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -1070,7 +1070,7 @@ define void @test19(<4 x double>%a1, double* %ptr, <4 x i1>%mask, <4 x i64> %ind
; Data type requires widening
define void @test20(<2 x float>%a1, <2 x float*> %ptr, <2 x i1> %mask) {
; KNL_64-LABEL: test20:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; KNL_64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; KNL_64-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,2],zero,zero
@@ -1082,7 +1082,7 @@ define void @test20(<2 x float>%a1, <2 x float*> %ptr, <2 x i1> %mask) {
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test20:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; KNL_32-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; KNL_32-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,2],zero,zero
@@ -1095,7 +1095,7 @@ define void @test20(<2 x float>%a1, <2 x float*> %ptr, <2 x i1> %mask) {
; KNL_32-NEXT: retl
;
; SKX-LABEL: test20:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
; SKX-NEXT: vpsllq $63, %xmm2, %xmm2
; SKX-NEXT: vptestmq %xmm2, %xmm2, %k1
@@ -1104,7 +1104,7 @@ define void @test20(<2 x float>%a1, <2 x float*> %ptr, <2 x i1> %mask) {
; SKX-NEXT: retq
;
; SKX_32-LABEL: test20:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SKX_32-NEXT: vpsllq $63, %xmm2, %xmm2
; SKX_32-NEXT: vptestmq %xmm2, %xmm2, %k1
@@ -1117,7 +1117,7 @@ define void @test20(<2 x float>%a1, <2 x float*> %ptr, <2 x i1> %mask) {
; Data type requires promotion
define void @test21(<2 x i32>%a1, <2 x i32*> %ptr, <2 x i1>%mask) {
; KNL_64-LABEL: test21:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; KNL_64-NEXT: vmovdqa %xmm2, %xmm2
; KNL_64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -1128,7 +1128,7 @@ define void @test21(<2 x i32>%a1, <2 x i32*> %ptr, <2 x i1>%mask) {
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test21:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: vpsllq $32, %xmm1, %xmm1
; KNL_32-NEXT: vpsraq $32, %zmm1, %zmm1
; KNL_32-NEXT: vmovdqa %xmm2, %xmm2
@@ -1140,7 +1140,7 @@ define void @test21(<2 x i32>%a1, <2 x i32*> %ptr, <2 x i1>%mask) {
; KNL_32-NEXT: retl
;
; SKX-LABEL: test21:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
; SKX-NEXT: vpsllq $63, %xmm2, %xmm2
; SKX-NEXT: vptestmq %xmm2, %xmm2, %k1
@@ -1150,7 +1150,7 @@ define void @test21(<2 x i32>%a1, <2 x i32*> %ptr, <2 x i1>%mask) {
; SKX-NEXT: retq
;
; SKX_32-LABEL: test21:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: vpsllq $32, %xmm1, %xmm1
; SKX_32-NEXT: vpsraq $32, %xmm1, %xmm1
; SKX_32-NEXT: vpsllq $63, %xmm2, %xmm2
@@ -1168,7 +1168,7 @@ declare <2 x float> @llvm.masked.gather.v2f32.v2p0f32(<2 x float*>, i32, <2 x i1
define <2 x float> @test22(float* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x float> %src0) {
; KNL_64-LABEL: test22:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: # kill: %xmm2<def> %xmm2<kill> %ymm2<def>
; KNL_64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; KNL_64-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,2],zero,zero
@@ -1182,7 +1182,7 @@ define <2 x float> @test22(float* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x fl
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test22:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: # kill: %xmm2<def> %xmm2<kill> %ymm2<def>
; KNL_32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; KNL_32-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,2],zero,zero
@@ -1197,7 +1197,7 @@ define <2 x float> @test22(float* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x fl
; KNL_32-NEXT: retl
;
; SKX-LABEL: test22:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SKX-NEXT: vpsllq $63, %xmm1, %xmm1
; SKX-NEXT: vptestmq %xmm1, %xmm1, %k1
@@ -1206,7 +1206,7 @@ define <2 x float> @test22(float* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x fl
; SKX-NEXT: retq
;
; SKX_32-LABEL: test22:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SKX_32-NEXT: vpsllq $63, %xmm1, %xmm1
; SKX_32-NEXT: vptestmq %xmm1, %xmm1, %k1
@@ -1222,7 +1222,7 @@ define <2 x float> @test22(float* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x fl
define <2 x float> @test22a(float* %base, <2 x i64> %ind, <2 x i1> %mask, <2 x float> %src0) {
; KNL_64-LABEL: test22a:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: # kill: %xmm2<def> %xmm2<kill> %ymm2<def>
; KNL_64-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; KNL_64-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,2],zero,zero
@@ -1235,7 +1235,7 @@ define <2 x float> @test22a(float* %base, <2 x i64> %ind, <2 x i1> %mask, <2 x f
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test22a:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: # kill: %xmm2<def> %xmm2<kill> %ymm2<def>
; KNL_32-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; KNL_32-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,2],zero,zero
@@ -1249,7 +1249,7 @@ define <2 x float> @test22a(float* %base, <2 x i64> %ind, <2 x i1> %mask, <2 x f
; KNL_32-NEXT: retl
;
; SKX-LABEL: test22a:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllq $63, %xmm1, %xmm1
; SKX-NEXT: vptestmq %xmm1, %xmm1, %k1
; SKX-NEXT: vgatherqps (%rdi,%xmm0,4), %xmm2 {%k1}
@@ -1257,7 +1257,7 @@ define <2 x float> @test22a(float* %base, <2 x i64> %ind, <2 x i1> %mask, <2 x f
; SKX-NEXT: retq
;
; SKX_32-LABEL: test22a:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: vpsllq $63, %xmm1, %xmm1
; SKX_32-NEXT: vptestmq %xmm1, %xmm1, %k1
; SKX_32-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -1274,7 +1274,7 @@ declare <2 x i64> @llvm.masked.gather.v2i64.v2p0i64(<2 x i64*>, i32, <2 x i1>, <
define <2 x i32> @test23(i32* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x i32> %src0) {
; KNL_64-LABEL: test23:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; KNL_64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; KNL_64-NEXT: vpmovsxdq %ymm0, %zmm0
@@ -1288,7 +1288,7 @@ define <2 x i32> @test23(i32* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x i32> %
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test23:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; KNL_32-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; KNL_32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -1303,7 +1303,7 @@ define <2 x i32> @test23(i32* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x i32> %
; KNL_32-NEXT: retl
;
; SKX-LABEL: test23:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllq $63, %xmm1, %xmm1
; SKX-NEXT: vptestmq %xmm1, %xmm1, %k1
; SKX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -1313,7 +1313,7 @@ define <2 x i32> @test23(i32* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x i32> %
; SKX-NEXT: retq
;
; SKX_32-LABEL: test23:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: vpsllq $63, %xmm1, %xmm1
; SKX_32-NEXT: vptestmq %xmm1, %xmm1, %k1
; SKX_32-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -1330,7 +1330,7 @@ define <2 x i32> @test23(i32* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x i32> %
define <2 x i32> @test23b(i32* %base, <2 x i64> %ind, <2 x i1> %mask, <2 x i32> %src0) {
; KNL_64-LABEL: test23b:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; KNL_64-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; KNL_64-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,2],zero,zero
@@ -1343,7 +1343,7 @@ define <2 x i32> @test23b(i32* %base, <2 x i64> %ind, <2 x i1> %mask, <2 x i32>
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test23b:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; KNL_32-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
@@ -1357,7 +1357,7 @@ define <2 x i32> @test23b(i32* %base, <2 x i64> %ind, <2 x i1> %mask, <2 x i32>
; KNL_32-NEXT: retl
;
; SKX-LABEL: test23b:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllq $63, %xmm1, %xmm1
; SKX-NEXT: vptestmq %xmm1, %xmm1, %k1
; SKX-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
@@ -1366,7 +1366,7 @@ define <2 x i32> @test23b(i32* %base, <2 x i64> %ind, <2 x i1> %mask, <2 x i32>
; SKX-NEXT: retq
;
; SKX_32-LABEL: test23b:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: vpsllq $63, %xmm1, %xmm1
; SKX_32-NEXT: vptestmq %xmm1, %xmm1, %k1
; SKX_32-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -1381,7 +1381,7 @@ define <2 x i32> @test23b(i32* %base, <2 x i64> %ind, <2 x i1> %mask, <2 x i32>
define <2 x i32> @test24(i32* %base, <2 x i32> %ind) {
; KNL_64-LABEL: test24:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; KNL_64-NEXT: vpmovsxdq %ymm0, %zmm0
; KNL_64-NEXT: movb $3, %al
@@ -1392,7 +1392,7 @@ define <2 x i32> @test24(i32* %base, <2 x i32> %ind) {
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test24:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; KNL_32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; KNL_32-NEXT: vpmovsxdq %ymm0, %zmm0
@@ -1404,7 +1404,7 @@ define <2 x i32> @test24(i32* %base, <2 x i32> %ind) {
; KNL_32-NEXT: retl
;
; SKX-LABEL: test24:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movb $3, %al
; SKX-NEXT: kmovw %eax, %k1
; SKX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -1413,7 +1413,7 @@ define <2 x i32> @test24(i32* %base, <2 x i32> %ind) {
; SKX-NEXT: retq
;
; SKX_32-LABEL: test24:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; SKX_32-NEXT: movb $3, %cl
; SKX_32-NEXT: kmovw %ecx, %k1
@@ -1429,7 +1429,7 @@ define <2 x i32> @test24(i32* %base, <2 x i32> %ind) {
define <2 x i64> @test25(i64* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x i64> %src0) {
; KNL_64-LABEL: test25:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: # kill: %xmm2<def> %xmm2<kill> %zmm2<def>
; KNL_64-NEXT: vpsllq $32, %xmm0, %xmm0
; KNL_64-NEXT: vpsraq $32, %zmm0, %zmm0
@@ -1442,7 +1442,7 @@ define <2 x i64> @test25(i64* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x i64> %
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test25:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: # kill: %xmm2<def> %xmm2<kill> %zmm2<def>
; KNL_32-NEXT: vpsllq $32, %xmm0, %xmm0
; KNL_32-NEXT: vpsraq $32, %zmm0, %zmm0
@@ -1456,7 +1456,7 @@ define <2 x i64> @test25(i64* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x i64> %
; KNL_32-NEXT: retl
;
; SKX-LABEL: test25:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllq $32, %xmm0, %xmm0
; SKX-NEXT: vpsraq $32, %xmm0, %xmm0
; SKX-NEXT: vpsllq $63, %xmm1, %xmm1
@@ -1466,7 +1466,7 @@ define <2 x i64> @test25(i64* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x i64> %
; SKX-NEXT: retq
;
; SKX_32-LABEL: test25:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: vpsllq $32, %xmm0, %xmm0
; SKX_32-NEXT: vpsraq $32, %xmm0, %xmm0
; SKX_32-NEXT: vpsllq $63, %xmm1, %xmm1
@@ -1483,7 +1483,7 @@ define <2 x i64> @test25(i64* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x i64> %
define <2 x i64> @test26(i64* %base, <2 x i32> %ind, <2 x i64> %src0) {
; KNL_64-LABEL: test26:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; KNL_64-NEXT: vpsllq $32, %xmm0, %xmm0
; KNL_64-NEXT: vpsraq $32, %zmm0, %zmm0
@@ -1495,7 +1495,7 @@ define <2 x i64> @test26(i64* %base, <2 x i32> %ind, <2 x i64> %src0) {
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test26:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; KNL_32-NEXT: vpsllq $32, %xmm0, %xmm0
; KNL_32-NEXT: vpsraq $32, %zmm0, %zmm0
@@ -1509,7 +1509,7 @@ define <2 x i64> @test26(i64* %base, <2 x i32> %ind, <2 x i64> %src0) {
; KNL_32-NEXT: retl
;
; SKX-LABEL: test26:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllq $32, %xmm0, %xmm0
; SKX-NEXT: vpsraq $32, %xmm0, %xmm0
; SKX-NEXT: kxnorw %k0, %k0, %k1
@@ -1518,7 +1518,7 @@ define <2 x i64> @test26(i64* %base, <2 x i32> %ind, <2 x i64> %src0) {
; SKX-NEXT: retq
;
; SKX_32-LABEL: test26:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: vpsllq $32, %xmm0, %xmm0
; SKX_32-NEXT: vpsraq $32, %xmm0, %xmm0
; SKX_32-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -1535,7 +1535,7 @@ define <2 x i64> @test26(i64* %base, <2 x i32> %ind, <2 x i64> %src0) {
; Result type requires widening; all-ones mask
define <2 x float> @test27(float* %base, <2 x i32> %ind) {
; KNL_64-LABEL: test27:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; KNL_64-NEXT: vpmovsxdq %ymm0, %zmm1
; KNL_64-NEXT: movb $3, %al
@@ -1546,7 +1546,7 @@ define <2 x float> @test27(float* %base, <2 x i32> %ind) {
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test27:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; KNL_32-NEXT: vpmovsxdq %ymm0, %zmm1
@@ -1558,7 +1558,7 @@ define <2 x float> @test27(float* %base, <2 x i32> %ind) {
; KNL_32-NEXT: retl
;
; SKX-LABEL: test27:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[0,2,2,3]
; SKX-NEXT: movb $3, %al
; SKX-NEXT: kmovw %eax, %k1
@@ -1566,7 +1566,7 @@ define <2 x float> @test27(float* %base, <2 x i32> %ind) {
; SKX-NEXT: retq
;
; SKX_32-LABEL: test27:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[0,2,2,3]
; SKX_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; SKX_32-NEXT: movb $3, %cl
@@ -1582,7 +1582,7 @@ define <2 x float> @test27(float* %base, <2 x i32> %ind) {
; Data type requires promotion, mask is all-ones
define void @test28(<2 x i32>%a1, <2 x i32*> %ptr) {
; KNL_64-LABEL: test28:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; KNL_64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; KNL_64-NEXT: movb $3, %al
@@ -1592,7 +1592,7 @@ define void @test28(<2 x i32>%a1, <2 x i32*> %ptr) {
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test28:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: vpsllq $32, %xmm1, %xmm1
; KNL_32-NEXT: vpsraq $32, %zmm1, %zmm1
; KNL_32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -1604,7 +1604,7 @@ define void @test28(<2 x i32>%a1, <2 x i32*> %ptr) {
; KNL_32-NEXT: retl
;
; SKX-LABEL: test28:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
; SKX-NEXT: movb $3, %al
; SKX-NEXT: kmovw %eax, %k1
@@ -1614,7 +1614,7 @@ define void @test28(<2 x i32>%a1, <2 x i32*> %ptr) {
; SKX-NEXT: retq
;
; SKX_32-LABEL: test28:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: vpsllq $32, %xmm1, %xmm1
; SKX_32-NEXT: vpsraq $32, %xmm1, %xmm1
; SKX_32-NEXT: movb $3, %al
@@ -1636,7 +1636,7 @@ define void @test28(<2 x i32>%a1, <2 x i32*> %ptr) {
define <16 x float> @test29(float* %base, <16 x i32> %ind) {
; KNL_64-LABEL: test29:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: movw $44, %ax
; KNL_64-NEXT: kmovw %eax, %k1
; KNL_64-NEXT: vgatherdps (%rdi,%zmm0,4), %zmm1 {%k1}
@@ -1644,7 +1644,7 @@ define <16 x float> @test29(float* %base, <16 x i32> %ind) {
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test29:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; KNL_32-NEXT: movw $44, %cx
; KNL_32-NEXT: kmovw %ecx, %k1
@@ -1653,7 +1653,7 @@ define <16 x float> @test29(float* %base, <16 x i32> %ind) {
; KNL_32-NEXT: retl
;
; SKX-LABEL: test29:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movw $44, %ax
; SKX-NEXT: kmovw %eax, %k1
; SKX-NEXT: vgatherdps (%rdi,%zmm0,4), %zmm1 {%k1}
@@ -1661,7 +1661,7 @@ define <16 x float> @test29(float* %base, <16 x i32> %ind) {
; SKX-NEXT: retq
;
; SKX_32-LABEL: test29:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; SKX_32-NEXT: movw $44, %cx
; SKX_32-NEXT: kmovw %ecx, %k1
@@ -1683,7 +1683,7 @@ define <16 x float> @test29(float* %base, <16 x i32> %ind) {
declare <3 x i32> @llvm.masked.gather.v3i32.v3p0i32(<3 x i32*>, i32, <3 x i1>, <3 x i32>)
define <3 x i32> @test30(<3 x i32*> %base, <3 x i32> %ind, <3 x i1> %mask, <3 x i32> %src0) {
; KNL_64-LABEL: test30:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: kmovw %edx, %k0
; KNL_64-NEXT: kmovw %esi, %k2
; KNL_64-NEXT: vpmovsxdq %xmm1, %ymm1
@@ -1692,7 +1692,7 @@ define <3 x i32> @test30(<3 x i32*> %base, <3 x i32> %ind, <3 x i1> %mask, <3 x
; KNL_64-NEXT: testb $1, %dil
; KNL_64-NEXT: # implicit-def: %xmm0
; KNL_64-NEXT: je .LBB31_2
-; KNL_64-NEXT: # BB#1: # %cond.load
+; KNL_64-NEXT: # %bb.1: # %cond.load
; KNL_64-NEXT: vmovq %xmm1, %rax
; KNL_64-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; KNL_64-NEXT: .LBB31_2: # %else
@@ -1702,7 +1702,7 @@ define <3 x i32> @test30(<3 x i32*> %base, <3 x i32> %ind, <3 x i1> %mask, <3 x
; KNL_64-NEXT: kmovw %k2, %eax
; KNL_64-NEXT: testb $1, %al
; KNL_64-NEXT: je .LBB31_4
-; KNL_64-NEXT: # BB#3: # %cond.load1
+; KNL_64-NEXT: # %bb.3: # %cond.load1
; KNL_64-NEXT: vpextrq $1, %xmm1, %rax
; KNL_64-NEXT: vpinsrd $1, (%rax), %xmm0, %xmm0
; KNL_64-NEXT: .LBB31_4: # %else2
@@ -1711,7 +1711,7 @@ define <3 x i32> @test30(<3 x i32*> %base, <3 x i32> %ind, <3 x i1> %mask, <3 x
; KNL_64-NEXT: kmovw %k0, %eax
; KNL_64-NEXT: testb $1, %al
; KNL_64-NEXT: je .LBB31_6
-; KNL_64-NEXT: # BB#5: # %cond.load4
+; KNL_64-NEXT: # %bb.5: # %cond.load4
; KNL_64-NEXT: vextracti128 $1, %ymm1, %xmm1
; KNL_64-NEXT: vmovq %xmm1, %rax
; KNL_64-NEXT: vpinsrd $2, (%rax), %xmm0, %xmm0
@@ -1730,7 +1730,7 @@ define <3 x i32> @test30(<3 x i32*> %base, <3 x i32> %ind, <3 x i1> %mask, <3 x
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test30:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; KNL_32-NEXT: kmovw %eax, %k0
; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -1741,7 +1741,7 @@ define <3 x i32> @test30(<3 x i32*> %base, <3 x i32> %ind, <3 x i1> %mask, <3 x
; KNL_32-NEXT: testb $1, %al
; KNL_32-NEXT: # implicit-def: %xmm0
; KNL_32-NEXT: je .LBB31_2
-; KNL_32-NEXT: # BB#1: # %cond.load
+; KNL_32-NEXT: # %bb.1: # %cond.load
; KNL_32-NEXT: vmovd %xmm1, %ecx
; KNL_32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; KNL_32-NEXT: .LBB31_2: # %else
@@ -1751,7 +1751,7 @@ define <3 x i32> @test30(<3 x i32*> %base, <3 x i32> %ind, <3 x i1> %mask, <3 x
; KNL_32-NEXT: kmovw %k2, %eax
; KNL_32-NEXT: testb $1, %al
; KNL_32-NEXT: je .LBB31_4
-; KNL_32-NEXT: # BB#3: # %cond.load1
+; KNL_32-NEXT: # %bb.3: # %cond.load1
; KNL_32-NEXT: vpextrd $1, %xmm1, %eax
; KNL_32-NEXT: vpinsrd $1, (%eax), %xmm0, %xmm0
; KNL_32-NEXT: .LBB31_4: # %else2
@@ -1760,7 +1760,7 @@ define <3 x i32> @test30(<3 x i32*> %base, <3 x i32> %ind, <3 x i1> %mask, <3 x
; KNL_32-NEXT: kmovw %k0, %eax
; KNL_32-NEXT: testb $1, %al
; KNL_32-NEXT: je .LBB31_6
-; KNL_32-NEXT: # BB#5: # %cond.load4
+; KNL_32-NEXT: # %bb.5: # %cond.load4
; KNL_32-NEXT: vpextrd $2, %xmm1, %eax
; KNL_32-NEXT: vpinsrd $2, (%eax), %xmm0, %xmm0
; KNL_32-NEXT: .LBB31_6: # %else5
@@ -1777,7 +1777,7 @@ define <3 x i32> @test30(<3 x i32*> %base, <3 x i32> %ind, <3 x i1> %mask, <3 x
; KNL_32-NEXT: retl
;
; SKX-LABEL: test30:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm2, %xmm2
; SKX-NEXT: vptestmd %xmm2, %xmm2, %k1
; SKX-NEXT: kshiftlw $15, %k1, %k0
@@ -1789,7 +1789,7 @@ define <3 x i32> @test30(<3 x i32*> %base, <3 x i32> %ind, <3 x i1> %mask, <3 x
; SKX-NEXT: testb $1, %al
; SKX-NEXT: # implicit-def: %xmm0
; SKX-NEXT: je .LBB31_2
-; SKX-NEXT: # BB#1: # %cond.load
+; SKX-NEXT: # %bb.1: # %cond.load
; SKX-NEXT: vmovq %xmm1, %rax
; SKX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SKX-NEXT: .LBB31_2: # %else
@@ -1798,7 +1798,7 @@ define <3 x i32> @test30(<3 x i32*> %base, <3 x i32> %ind, <3 x i1> %mask, <3 x
; SKX-NEXT: kmovw %k0, %eax
; SKX-NEXT: testb $1, %al
; SKX-NEXT: je .LBB31_4
-; SKX-NEXT: # BB#3: # %cond.load1
+; SKX-NEXT: # %bb.3: # %cond.load1
; SKX-NEXT: vpextrq $1, %xmm1, %rax
; SKX-NEXT: vpinsrd $1, (%rax), %xmm0, %xmm0
; SKX-NEXT: .LBB31_4: # %else2
@@ -1807,7 +1807,7 @@ define <3 x i32> @test30(<3 x i32*> %base, <3 x i32> %ind, <3 x i1> %mask, <3 x
; SKX-NEXT: kmovw %k0, %eax
; SKX-NEXT: testb $1, %al
; SKX-NEXT: je .LBB31_6
-; SKX-NEXT: # BB#5: # %cond.load4
+; SKX-NEXT: # %bb.5: # %cond.load4
; SKX-NEXT: vextracti128 $1, %ymm1, %xmm1
; SKX-NEXT: vmovq %xmm1, %rax
; SKX-NEXT: vpinsrd $2, (%rax), %xmm0, %xmm0
@@ -1818,7 +1818,7 @@ define <3 x i32> @test30(<3 x i32*> %base, <3 x i32> %ind, <3 x i1> %mask, <3 x
; SKX-NEXT: retq
;
; SKX_32-LABEL: test30:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: subl $12, %esp
; SKX_32-NEXT: .cfi_def_cfa_offset 16
; SKX_32-NEXT: vpslld $31, %xmm2, %xmm2
@@ -1831,7 +1831,7 @@ define <3 x i32> @test30(<3 x i32*> %base, <3 x i32> %ind, <3 x i1> %mask, <3 x
; SKX_32-NEXT: testb $1, %al
; SKX_32-NEXT: # implicit-def: %xmm1
; SKX_32-NEXT: je .LBB31_2
-; SKX_32-NEXT: # BB#1: # %cond.load
+; SKX_32-NEXT: # %bb.1: # %cond.load
; SKX_32-NEXT: vmovd %xmm2, %eax
; SKX_32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SKX_32-NEXT: .LBB31_2: # %else
@@ -1840,7 +1840,7 @@ define <3 x i32> @test30(<3 x i32*> %base, <3 x i32> %ind, <3 x i1> %mask, <3 x
; SKX_32-NEXT: kmovw %k0, %eax
; SKX_32-NEXT: testb $1, %al
; SKX_32-NEXT: je .LBB31_4
-; SKX_32-NEXT: # BB#3: # %cond.load1
+; SKX_32-NEXT: # %bb.3: # %cond.load1
; SKX_32-NEXT: vpextrd $1, %xmm2, %eax
; SKX_32-NEXT: vpinsrd $1, (%eax), %xmm1, %xmm1
; SKX_32-NEXT: .LBB31_4: # %else2
@@ -1850,7 +1850,7 @@ define <3 x i32> @test30(<3 x i32*> %base, <3 x i32> %ind, <3 x i1> %mask, <3 x
; SKX_32-NEXT: kmovw %k0, %eax
; SKX_32-NEXT: testb $1, %al
; SKX_32-NEXT: je .LBB31_6
-; SKX_32-NEXT: # BB#5: # %cond.load4
+; SKX_32-NEXT: # %bb.5: # %cond.load4
; SKX_32-NEXT: vpextrd $2, %xmm2, %eax
; SKX_32-NEXT: vpinsrd $2, (%eax), %xmm1, %xmm1
; SKX_32-NEXT: .LBB31_6: # %else5
@@ -1867,7 +1867,7 @@ define <3 x i32> @test30(<3 x i32*> %base, <3 x i32> %ind, <3 x i1> %mask, <3 x
declare <16 x float*> @llvm.masked.gather.v16p0f32.v16p0p0f32(<16 x float**>, i32, <16 x i1>, <16 x float*>)
define <16 x float*> @test31(<16 x float**> %ptrs) {
; KNL_64-LABEL: test31:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: kxnorw %k0, %k0, %k1
; KNL_64-NEXT: kxnorw %k0, %k0, %k2
; KNL_64-NEXT: vpgatherqq (,%zmm0), %zmm2 {%k2}
@@ -1877,14 +1877,14 @@ define <16 x float*> @test31(<16 x float**> %ptrs) {
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test31:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: kxnorw %k0, %k0, %k1
; KNL_32-NEXT: vpgatherdd (,%zmm0), %zmm1 {%k1}
; KNL_32-NEXT: vmovdqa64 %zmm1, %zmm0
; KNL_32-NEXT: retl
;
; SKX-LABEL: test31:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kxnorw %k0, %k0, %k1
; SKX-NEXT: kxnorw %k0, %k0, %k2
; SKX-NEXT: vpgatherqq (,%zmm0), %zmm2 {%k2}
@@ -1894,7 +1894,7 @@ define <16 x float*> @test31(<16 x float**> %ptrs) {
; SKX-NEXT: retq
;
; SKX_32-LABEL: test31:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: kxnorw %k0, %k0, %k1
; SKX_32-NEXT: vpgatherdd (,%zmm0), %zmm1 {%k1}
; SKX_32-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -1906,7 +1906,7 @@ define <16 x float*> @test31(<16 x float**> %ptrs) {
define <16 x i32> @test_gather_16i32(<16 x i32*> %ptrs, <16 x i1> %mask, <16 x i32> %src0) {
; KNL_64-LABEL: test_gather_16i32:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: vpmovsxbd %xmm2, %zmm2
; KNL_64-NEXT: vpslld $31, %zmm2, %zmm2
; KNL_64-NEXT: vptestmd %zmm2, %zmm2, %k1
@@ -1918,7 +1918,7 @@ define <16 x i32> @test_gather_16i32(<16 x i32*> %ptrs, <16 x i1> %mask, <16 x i
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test_gather_16i32:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: vpmovsxbd %xmm1, %zmm1
; KNL_32-NEXT: vpslld $31, %zmm1, %zmm1
; KNL_32-NEXT: vptestmd %zmm1, %zmm1, %k1
@@ -1927,7 +1927,7 @@ define <16 x i32> @test_gather_16i32(<16 x i32*> %ptrs, <16 x i1> %mask, <16 x i
; KNL_32-NEXT: retl
;
; SKX-LABEL: test_gather_16i32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxbd %xmm2, %zmm2
; SKX-NEXT: vpslld $31, %zmm2, %zmm2
; SKX-NEXT: vptestmd %zmm2, %zmm2, %k1
@@ -1939,7 +1939,7 @@ define <16 x i32> @test_gather_16i32(<16 x i32*> %ptrs, <16 x i1> %mask, <16 x i
; SKX-NEXT: retq
;
; SKX_32-LABEL: test_gather_16i32:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: vpmovsxbd %xmm1, %zmm1
; SKX_32-NEXT: vpslld $31, %zmm1, %zmm1
; SKX_32-NEXT: vptestmd %zmm1, %zmm1, %k1
@@ -1951,7 +1951,7 @@ define <16 x i32> @test_gather_16i32(<16 x i32*> %ptrs, <16 x i1> %mask, <16 x i
}
define <16 x i64> @test_gather_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i64> %src0) {
; KNL_64-LABEL: test_gather_16i64:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: vpmovsxbd %xmm2, %zmm2
; KNL_64-NEXT: vpslld $31, %zmm2, %zmm2
; KNL_64-NEXT: vptestmd %zmm2, %zmm2, %k1
@@ -1963,7 +1963,7 @@ define <16 x i64> @test_gather_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test_gather_16i64:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: pushl %ebp
; KNL_32-NEXT: .cfi_def_cfa_offset 8
; KNL_32-NEXT: .cfi_offset %ebp, -8
@@ -1985,7 +1985,7 @@ define <16 x i64> @test_gather_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i
; KNL_32-NEXT: retl
;
; SKX-LABEL: test_gather_16i64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxbd %xmm2, %zmm2
; SKX-NEXT: vpslld $31, %zmm2, %zmm2
; SKX-NEXT: vptestmd %zmm2, %zmm2, %k1
@@ -1997,7 +1997,7 @@ define <16 x i64> @test_gather_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i
; SKX-NEXT: retq
;
; SKX_32-LABEL: test_gather_16i64:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: pushl %ebp
; SKX_32-NEXT: .cfi_def_cfa_offset 8
; SKX_32-NEXT: .cfi_offset %ebp, -8
@@ -2023,7 +2023,7 @@ define <16 x i64> @test_gather_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i
declare <16 x i64> @llvm.masked.gather.v16i64.v16p0i64(<16 x i64*> %ptrs, i32, <16 x i1> %mask, <16 x i64> %src0)
define <16 x float> @test_gather_16f32(<16 x float*> %ptrs, <16 x i1> %mask, <16 x float> %src0) {
; KNL_64-LABEL: test_gather_16f32:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: vpmovsxbd %xmm2, %zmm2
; KNL_64-NEXT: vpslld $31, %zmm2, %zmm2
; KNL_64-NEXT: vptestmd %zmm2, %zmm2, %k1
@@ -2035,7 +2035,7 @@ define <16 x float> @test_gather_16f32(<16 x float*> %ptrs, <16 x i1> %mask, <16
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test_gather_16f32:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: vpmovsxbd %xmm1, %zmm1
; KNL_32-NEXT: vpslld $31, %zmm1, %zmm1
; KNL_32-NEXT: vptestmd %zmm1, %zmm1, %k1
@@ -2044,7 +2044,7 @@ define <16 x float> @test_gather_16f32(<16 x float*> %ptrs, <16 x i1> %mask, <16
; KNL_32-NEXT: retl
;
; SKX-LABEL: test_gather_16f32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxbd %xmm2, %zmm2
; SKX-NEXT: vpslld $31, %zmm2, %zmm2
; SKX-NEXT: vptestmd %zmm2, %zmm2, %k1
@@ -2056,7 +2056,7 @@ define <16 x float> @test_gather_16f32(<16 x float*> %ptrs, <16 x i1> %mask, <16
; SKX-NEXT: retq
;
; SKX_32-LABEL: test_gather_16f32:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: vpmovsxbd %xmm1, %zmm1
; SKX_32-NEXT: vpslld $31, %zmm1, %zmm1
; SKX_32-NEXT: vptestmd %zmm1, %zmm1, %k1
@@ -2068,7 +2068,7 @@ define <16 x float> @test_gather_16f32(<16 x float*> %ptrs, <16 x i1> %mask, <16
}
define <16 x double> @test_gather_16f64(<16 x double*> %ptrs, <16 x i1> %mask, <16 x double> %src0) {
; KNL_64-LABEL: test_gather_16f64:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: vpmovsxbd %xmm2, %zmm2
; KNL_64-NEXT: vpslld $31, %zmm2, %zmm2
; KNL_64-NEXT: vptestmd %zmm2, %zmm2, %k1
@@ -2080,7 +2080,7 @@ define <16 x double> @test_gather_16f64(<16 x double*> %ptrs, <16 x i1> %mask, <
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test_gather_16f64:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: pushl %ebp
; KNL_32-NEXT: .cfi_def_cfa_offset 8
; KNL_32-NEXT: .cfi_offset %ebp, -8
@@ -2102,7 +2102,7 @@ define <16 x double> @test_gather_16f64(<16 x double*> %ptrs, <16 x i1> %mask, <
; KNL_32-NEXT: retl
;
; SKX-LABEL: test_gather_16f64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxbd %xmm2, %zmm2
; SKX-NEXT: vpslld $31, %zmm2, %zmm2
; SKX-NEXT: vptestmd %zmm2, %zmm2, %k1
@@ -2114,7 +2114,7 @@ define <16 x double> @test_gather_16f64(<16 x double*> %ptrs, <16 x i1> %mask, <
; SKX-NEXT: retq
;
; SKX_32-LABEL: test_gather_16f64:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: pushl %ebp
; SKX_32-NEXT: .cfi_def_cfa_offset 8
; SKX_32-NEXT: .cfi_offset %ebp, -8
@@ -2140,7 +2140,7 @@ define <16 x double> @test_gather_16f64(<16 x double*> %ptrs, <16 x i1> %mask, <
declare <16 x double> @llvm.masked.gather.v16f64.v16p0f64(<16 x double*> %ptrs, i32, <16 x i1> %mask, <16 x double> %src0)
define void @test_scatter_16i32(<16 x i32*> %ptrs, <16 x i1> %mask, <16 x i32> %src0) {
; KNL_64-LABEL: test_scatter_16i32:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: vpmovsxbd %xmm2, %zmm2
; KNL_64-NEXT: vpslld $31, %zmm2, %zmm2
; KNL_64-NEXT: vptestmd %zmm2, %zmm2, %k1
@@ -2152,7 +2152,7 @@ define void @test_scatter_16i32(<16 x i32*> %ptrs, <16 x i1> %mask, <16 x i32> %
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test_scatter_16i32:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: vpmovsxbd %xmm1, %zmm1
; KNL_32-NEXT: vpslld $31, %zmm1, %zmm1
; KNL_32-NEXT: vptestmd %zmm1, %zmm1, %k1
@@ -2161,7 +2161,7 @@ define void @test_scatter_16i32(<16 x i32*> %ptrs, <16 x i1> %mask, <16 x i32> %
; KNL_32-NEXT: retl
;
; SKX-LABEL: test_scatter_16i32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxbd %xmm2, %zmm2
; SKX-NEXT: vpslld $31, %zmm2, %zmm2
; SKX-NEXT: vptestmd %zmm2, %zmm2, %k1
@@ -2173,7 +2173,7 @@ define void @test_scatter_16i32(<16 x i32*> %ptrs, <16 x i1> %mask, <16 x i32> %
; SKX-NEXT: retq
;
; SKX_32-LABEL: test_scatter_16i32:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: vpmovsxbd %xmm1, %zmm1
; SKX_32-NEXT: vpslld $31, %zmm1, %zmm1
; SKX_32-NEXT: vptestmd %zmm1, %zmm1, %k1
@@ -2185,7 +2185,7 @@ define void @test_scatter_16i32(<16 x i32*> %ptrs, <16 x i1> %mask, <16 x i32> %
}
define void @test_scatter_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i64> %src0) {
; KNL_64-LABEL: test_scatter_16i64:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: vpmovsxbd %xmm2, %zmm2
; KNL_64-NEXT: vpslld $31, %zmm2, %zmm2
; KNL_64-NEXT: vptestmd %zmm2, %zmm2, %k1
@@ -2196,7 +2196,7 @@ define void @test_scatter_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i64> %
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test_scatter_16i64:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: pushl %ebp
; KNL_32-NEXT: .cfi_def_cfa_offset 8
; KNL_32-NEXT: .cfi_offset %ebp, -8
@@ -2218,7 +2218,7 @@ define void @test_scatter_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i64> %
; KNL_32-NEXT: retl
;
; SKX-LABEL: test_scatter_16i64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxbd %xmm2, %zmm2
; SKX-NEXT: vpslld $31, %zmm2, %zmm2
; SKX-NEXT: vptestmd %zmm2, %zmm2, %k1
@@ -2229,7 +2229,7 @@ define void @test_scatter_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i64> %
; SKX-NEXT: retq
;
; SKX_32-LABEL: test_scatter_16i64:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: pushl %ebp
; SKX_32-NEXT: .cfi_def_cfa_offset 8
; SKX_32-NEXT: .cfi_offset %ebp, -8
@@ -2255,7 +2255,7 @@ define void @test_scatter_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i64> %
declare void @llvm.masked.scatter.v16i64.v16p0i64(<16 x i64> %src0, <16 x i64*> %ptrs, i32, <16 x i1> %mask)
define void @test_scatter_16f32(<16 x float*> %ptrs, <16 x i1> %mask, <16 x float> %src0) {
; KNL_64-LABEL: test_scatter_16f32:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: vpmovsxbd %xmm2, %zmm2
; KNL_64-NEXT: vpslld $31, %zmm2, %zmm2
; KNL_64-NEXT: vptestmd %zmm2, %zmm2, %k1
@@ -2267,7 +2267,7 @@ define void @test_scatter_16f32(<16 x float*> %ptrs, <16 x i1> %mask, <16 x floa
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test_scatter_16f32:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: vpmovsxbd %xmm1, %zmm1
; KNL_32-NEXT: vpslld $31, %zmm1, %zmm1
; KNL_32-NEXT: vptestmd %zmm1, %zmm1, %k1
@@ -2276,7 +2276,7 @@ define void @test_scatter_16f32(<16 x float*> %ptrs, <16 x i1> %mask, <16 x floa
; KNL_32-NEXT: retl
;
; SKX-LABEL: test_scatter_16f32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxbd %xmm2, %zmm2
; SKX-NEXT: vpslld $31, %zmm2, %zmm2
; SKX-NEXT: vptestmd %zmm2, %zmm2, %k1
@@ -2288,7 +2288,7 @@ define void @test_scatter_16f32(<16 x float*> %ptrs, <16 x i1> %mask, <16 x floa
; SKX-NEXT: retq
;
; SKX_32-LABEL: test_scatter_16f32:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: vpmovsxbd %xmm1, %zmm1
; SKX_32-NEXT: vpslld $31, %zmm1, %zmm1
; SKX_32-NEXT: vptestmd %zmm1, %zmm1, %k1
@@ -2301,7 +2301,7 @@ define void @test_scatter_16f32(<16 x float*> %ptrs, <16 x i1> %mask, <16 x floa
declare void @llvm.masked.scatter.v16f32.v16p0f32(<16 x float> %src0, <16 x float*> %ptrs, i32, <16 x i1> %mask)
define void @test_scatter_16f64(<16 x double*> %ptrs, <16 x i1> %mask, <16 x double> %src0) {
; KNL_64-LABEL: test_scatter_16f64:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: vpmovsxbd %xmm2, %zmm2
; KNL_64-NEXT: vpslld $31, %zmm2, %zmm2
; KNL_64-NEXT: vptestmd %zmm2, %zmm2, %k1
@@ -2312,7 +2312,7 @@ define void @test_scatter_16f64(<16 x double*> %ptrs, <16 x i1> %mask, <16 x dou
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test_scatter_16f64:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: pushl %ebp
; KNL_32-NEXT: .cfi_def_cfa_offset 8
; KNL_32-NEXT: .cfi_offset %ebp, -8
@@ -2334,7 +2334,7 @@ define void @test_scatter_16f64(<16 x double*> %ptrs, <16 x i1> %mask, <16 x dou
; KNL_32-NEXT: retl
;
; SKX-LABEL: test_scatter_16f64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxbd %xmm2, %zmm2
; SKX-NEXT: vpslld $31, %zmm2, %zmm2
; SKX-NEXT: vptestmd %zmm2, %zmm2, %k1
@@ -2345,7 +2345,7 @@ define void @test_scatter_16f64(<16 x double*> %ptrs, <16 x i1> %mask, <16 x dou
; SKX-NEXT: retq
;
; SKX_32-LABEL: test_scatter_16f64:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: pushl %ebp
; SKX_32-NEXT: .cfi_def_cfa_offset 8
; SKX_32-NEXT: .cfi_offset %ebp, -8
@@ -2372,7 +2372,7 @@ declare void @llvm.masked.scatter.v16f64.v16p0f64(<16 x double> %src0, <16 x dou
define <4 x i64> @test_pr28312(<4 x i64*> %p1, <4 x i1> %k, <4 x i1> %k2,<4 x i64> %d) {
; KNL_64-LABEL: test_pr28312:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL_64-NEXT: vpslld $31, %xmm1, %xmm1
; KNL_64-NEXT: vpsrad $31, %xmm1, %xmm1
@@ -2386,7 +2386,7 @@ define <4 x i64> @test_pr28312(<4 x i64*> %p1, <4 x i1> %k, <4 x i1> %k2,<4 x i6
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test_pr28312:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: pushl %ebp
; KNL_32-NEXT: .cfi_def_cfa_offset 8
; KNL_32-NEXT: .cfi_offset %ebp, -8
@@ -2410,7 +2410,7 @@ define <4 x i64> @test_pr28312(<4 x i64*> %p1, <4 x i1> %k, <4 x i1> %k2,<4 x i6
; KNL_32-NEXT: retl
;
; SKX-LABEL: test_pr28312:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm1, %xmm1
; SKX-NEXT: vptestmd %xmm1, %xmm1, %k1
; SKX-NEXT: vpgatherqq (,%ymm0), %ymm1 {%k1}
@@ -2419,7 +2419,7 @@ define <4 x i64> @test_pr28312(<4 x i64*> %p1, <4 x i1> %k, <4 x i1> %k2,<4 x i6
; SKX-NEXT: retq
;
; SKX_32-LABEL: test_pr28312:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: pushl %ebp
; SKX_32-NEXT: .cfi_def_cfa_offset 8
; SKX_32-NEXT: .cfi_offset %ebp, -8
@@ -2446,28 +2446,28 @@ declare <4 x i64> @llvm.masked.gather.v4i64.v4p0i64(<4 x i64*>, i32, <4 x i1>, <
define <8 x i32> @test_global_array(<8 x i64> %indxs) {
; KNL_64-LABEL: test_global_array:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: kxnorw %k0, %k0, %k1
; KNL_64-NEXT: vpgatherqd glob_array(,%zmm0,4), %ymm1 {%k1}
; KNL_64-NEXT: vmovdqa %ymm1, %ymm0
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test_global_array:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: kxnorw %k0, %k0, %k1
; KNL_32-NEXT: vpgatherqd glob_array(,%zmm0,4), %ymm1 {%k1}
; KNL_32-NEXT: vmovdqa %ymm1, %ymm0
; KNL_32-NEXT: retl
;
; SKX_SMALL-LABEL: test_global_array:
-; SKX_SMALL: # BB#0:
+; SKX_SMALL: # %bb.0:
; SKX_SMALL-NEXT: kxnorw %k0, %k0, %k1
; SKX_SMALL-NEXT: vpgatherqd glob_array(,%zmm0,4), %ymm1 {%k1}
; SKX_SMALL-NEXT: vmovdqa %ymm1, %ymm0
; SKX_SMALL-NEXT: retq
;
; SKX_LARGE-LABEL: test_global_array:
-; SKX_LARGE: # BB#0:
+; SKX_LARGE: # %bb.0:
; SKX_LARGE-NEXT: movabsq $glob_array, %rax
; SKX_LARGE-NEXT: kxnorw %k0, %k0, %k1
; SKX_LARGE-NEXT: vpgatherqd (%rax,%zmm0,4), %ymm1 {%k1}
@@ -2475,7 +2475,7 @@ define <8 x i32> @test_global_array(<8 x i64> %indxs) {
; SKX_LARGE-NEXT: retq
;
; SKX_32-LABEL: test_global_array:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: kxnorw %k0, %k0, %k1
; SKX_32-NEXT: vpgatherqd glob_array(,%zmm0,4), %ymm1 {%k1}
; SKX_32-NEXT: vmovdqa %ymm1, %ymm0
@@ -2487,20 +2487,20 @@ define <8 x i32> @test_global_array(<8 x i64> %indxs) {
define void @v1_scatter(<1 x i32>%a1, <1 x i32*> %ptr, <1 x i1> %mask) {
; KNL_64-LABEL: v1_scatter:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: testb $1, %dl
; KNL_64-NEXT: jne .LBB43_1
-; KNL_64-NEXT: # BB#2: # %else
+; KNL_64-NEXT: # %bb.2: # %else
; KNL_64-NEXT: retq
; KNL_64-NEXT: .LBB43_1: # %cond.store
; KNL_64-NEXT: movl %edi, (%rsi)
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: v1_scatter:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: testb $1, {{[0-9]+}}(%esp)
; KNL_32-NEXT: jne .LBB43_1
-; KNL_32-NEXT: # BB#2: # %else
+; KNL_32-NEXT: # %bb.2: # %else
; KNL_32-NEXT: retl
; KNL_32-NEXT: .LBB43_1: # %cond.store
; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -2509,20 +2509,20 @@ define void @v1_scatter(<1 x i32>%a1, <1 x i32*> %ptr, <1 x i1> %mask) {
; KNL_32-NEXT: retl
;
; SKX-LABEL: v1_scatter:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: testb $1, %dl
; SKX-NEXT: jne .LBB43_1
-; SKX-NEXT: # BB#2: # %else
+; SKX-NEXT: # %bb.2: # %else
; SKX-NEXT: retq
; SKX-NEXT: .LBB43_1: # %cond.store
; SKX-NEXT: movl %edi, (%rsi)
; SKX-NEXT: retq
;
; SKX_32-LABEL: v1_scatter:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: testb $1, {{[0-9]+}}(%esp)
; SKX_32-NEXT: jne .LBB43_1
-; SKX_32-NEXT: # BB#2: # %else
+; SKX_32-NEXT: # %bb.2: # %else
; SKX_32-NEXT: retl
; SKX_32-NEXT: .LBB43_1: # %cond.store
; SKX_32-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -2536,23 +2536,23 @@ declare void @llvm.masked.scatter.v1i32.v1p0i32(<1 x i32>, <1 x i32*>, i32, <1 x
define <1 x i32> @v1_gather(<1 x i32*> %ptr, <1 x i1> %mask, <1 x i32> %src0) {
; KNL_64-LABEL: v1_gather:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: movl (%rdi), %eax
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: v1_gather:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; KNL_32-NEXT: movl (%eax), %eax
; KNL_32-NEXT: retl
;
; SKX-LABEL: v1_gather:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movl (%rdi), %eax
; SKX-NEXT: retq
;
; SKX_32-LABEL: v1_gather:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; SKX_32-NEXT: movl (%eax), %eax
; SKX_32-NEXT: retl
@@ -2565,7 +2565,7 @@ declare <1 x i32> @llvm.masked.gather.v1i32.v1p0i32(<1 x i32*>, i32, <1 x i1>, <
; This experienced a bad interaction when we widened and then tried to split.
define <2 x float> @large_index(float* %base, <2 x i128> %ind, <2 x i1> %mask, <2 x float> %src0) {
; KNL_64-LABEL: large_index:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
; KNL_64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; KNL_64-NEXT: vmovaps %xmm0, %xmm0
@@ -2580,7 +2580,7 @@ define <2 x float> @large_index(float* %base, <2 x i128> %ind, <2 x i1> %mask, <
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: large_index:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
; KNL_32-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; KNL_32-NEXT: vmovaps %xmm0, %xmm0
@@ -2597,7 +2597,7 @@ define <2 x float> @large_index(float* %base, <2 x i128> %ind, <2 x i1> %mask, <
; KNL_32-NEXT: retl
;
; SKX-LABEL: large_index:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllq $63, %xmm0, %xmm0
; SKX-NEXT: vptestmq %xmm0, %xmm0, %k1
; SKX-NEXT: vmovq %rcx, %xmm0
@@ -2608,7 +2608,7 @@ define <2 x float> @large_index(float* %base, <2 x i128> %ind, <2 x i1> %mask, <
; SKX-NEXT: retq
;
; SKX_32-LABEL: large_index:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: vpsllq $63, %xmm0, %xmm0
; SKX_32-NEXT: vptestmq %xmm0, %xmm0, %k1
; SKX_32-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -2627,7 +2627,7 @@ define <2 x float> @large_index(float* %base, <2 x i128> %ind, <2 x i1> %mask, <
; Make sure we allow index to be sign extended from a smaller than i32 element size.
define <16 x float> @sext_i8_index(float* %base, <16 x i8> %ind) {
; KNL_64-LABEL: sext_i8_index:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: vpmovsxbw %xmm0, %ymm0
; KNL_64-NEXT: vpmovsxwq %xmm0, %zmm1
; KNL_64-NEXT: vextracti128 $1, %ymm0, %xmm0
@@ -2640,7 +2640,7 @@ define <16 x float> @sext_i8_index(float* %base, <16 x i8> %ind) {
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: sext_i8_index:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; KNL_32-NEXT: vpmovsxbw %xmm0, %ymm0
; KNL_32-NEXT: vpmovsxwq %xmm0, %zmm1
@@ -2654,7 +2654,7 @@ define <16 x float> @sext_i8_index(float* %base, <16 x i8> %ind) {
; KNL_32-NEXT: retl
;
; SKX-LABEL: sext_i8_index:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxbw %xmm0, %ymm0
; SKX-NEXT: vpmovsxwq %xmm0, %zmm1
; SKX-NEXT: vextracti128 $1, %ymm0, %xmm0
@@ -2667,7 +2667,7 @@ define <16 x float> @sext_i8_index(float* %base, <16 x i8> %ind) {
; SKX-NEXT: retq
;
; SKX_32-LABEL: sext_i8_index:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; SKX_32-NEXT: vpmovsxbw %xmm0, %ymm0
; SKX_32-NEXT: vpmovsxwq %xmm0, %zmm1
@@ -2690,7 +2690,7 @@ define <16 x float> @sext_i8_index(float* %base, <16 x i8> %ind) {
; Make sure we allow index to be sign extended from a smaller than i32 element size.
define <8 x float> @sext_v8i8_index(float* %base, <8 x i8> %ind) {
; KNL_64-LABEL: sext_v8i8_index:
-; KNL_64: # BB#0:
+; KNL_64: # %bb.0:
; KNL_64-NEXT: vpmovzxwq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; KNL_64-NEXT: vpsllq $56, %zmm0, %zmm0
; KNL_64-NEXT: vpsraq $56, %zmm0, %zmm1
@@ -2699,7 +2699,7 @@ define <8 x float> @sext_v8i8_index(float* %base, <8 x i8> %ind) {
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: sext_v8i8_index:
-; KNL_32: # BB#0:
+; KNL_32: # %bb.0:
; KNL_32-NEXT: vpmovzxwq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; KNL_32-NEXT: vpsllq $56, %zmm0, %zmm0
@@ -2709,7 +2709,7 @@ define <8 x float> @sext_v8i8_index(float* %base, <8 x i8> %ind) {
; KNL_32-NEXT: retl
;
; SKX-LABEL: sext_v8i8_index:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxwq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; SKX-NEXT: vpsllq $56, %zmm0, %zmm0
; SKX-NEXT: vpsraq $56, %zmm0, %zmm1
@@ -2718,7 +2718,7 @@ define <8 x float> @sext_v8i8_index(float* %base, <8 x i8> %ind) {
; SKX-NEXT: retq
;
; SKX_32-LABEL: sext_v8i8_index:
-; SKX_32: # BB#0:
+; SKX_32: # %bb.0:
; SKX_32-NEXT: vpmovzxwq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; SKX_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; SKX_32-NEXT: vpsllq $56, %zmm0, %zmm0
diff --git a/test/CodeGen/X86/masked_memop.ll b/test/CodeGen/X86/masked_memop.ll
index 495983ac573..26f8255ce97 100644
--- a/test/CodeGen/X86/masked_memop.ll
+++ b/test/CodeGen/X86/masked_memop.ll
@@ -10,11 +10,11 @@
define <1 x double> @loadv1(<1 x i64> %trigger, <1 x double>* %addr, <1 x double> %dst) {
; AVX-LABEL: loadv1:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: testq %rdi, %rdi
; AVX-NEXT: ## implicit-def: %xmm1
; AVX-NEXT: je LBB0_1
-; AVX-NEXT: ## BB#2: ## %else
+; AVX-NEXT: ## %bb.2: ## %else
; AVX-NEXT: testq %rdi, %rdi
; AVX-NEXT: jne LBB0_3
; AVX-NEXT: LBB0_4: ## %else
@@ -30,11 +30,11 @@ define <1 x double> @loadv1(<1 x i64> %trigger, <1 x double>* %addr, <1 x double
; AVX-NEXT: retq
;
; AVX512F-LABEL: loadv1:
-; AVX512F: ## BB#0:
+; AVX512F: ## %bb.0:
; AVX512F-NEXT: testq %rdi, %rdi
; AVX512F-NEXT: ## implicit-def: %xmm1
; AVX512F-NEXT: jne LBB0_2
-; AVX512F-NEXT: ## BB#1: ## %cond.load
+; AVX512F-NEXT: ## %bb.1: ## %cond.load
; AVX512F-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX512F-NEXT: LBB0_2: ## %else
; AVX512F-NEXT: testq %rdi, %rdi
@@ -44,11 +44,11 @@ define <1 x double> @loadv1(<1 x i64> %trigger, <1 x double>* %addr, <1 x double
; AVX512F-NEXT: retq
;
; SKX-LABEL: loadv1:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: testq %rdi, %rdi
; SKX-NEXT: ## implicit-def: %xmm1
; SKX-NEXT: jne LBB0_2
-; SKX-NEXT: ## BB#1: ## %cond.load
+; SKX-NEXT: ## %bb.1: ## %cond.load
; SKX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; SKX-NEXT: LBB0_2: ## %else
; SKX-NEXT: testq %rdi, %rdi
@@ -64,20 +64,20 @@ declare <1 x double> @llvm.masked.load.v1f64.p0v1f64(<1 x double>*, i32, <1 x i1
define void @storev1(<1 x i32> %trigger, <1 x i32>* %addr, <1 x i32> %val) {
; AVX-LABEL: storev1:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: testl %edi, %edi
; AVX-NEXT: je LBB1_1
-; AVX-NEXT: ## BB#2: ## %else
+; AVX-NEXT: ## %bb.2: ## %else
; AVX-NEXT: retq
; AVX-NEXT: LBB1_1: ## %cond.store
; AVX-NEXT: movl %edx, (%rsi)
; AVX-NEXT: retq
;
; AVX512-LABEL: storev1:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: testl %edi, %edi
; AVX512-NEXT: je LBB1_1
-; AVX512-NEXT: ## BB#2: ## %else
+; AVX512-NEXT: ## %bb.2: ## %else
; AVX512-NEXT: retq
; AVX512-NEXT: LBB1_1: ## %cond.store
; AVX512-NEXT: movl %edx, (%rsi)
@@ -90,7 +90,7 @@ declare void @llvm.masked.store.v1i32.p0v1i32(<1 x i32>, <1 x i32>*, i32, <1 x i
define <2 x double> @test6(<2 x i64> %trigger, <2 x double>* %addr, <2 x double> %dst) {
; AVX-LABEL: test6:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX-NEXT: vpcmpeqq %xmm2, %xmm0, %xmm0
; AVX-NEXT: vmaskmovpd (%rdi), %xmm0, %xmm2
@@ -98,7 +98,7 @@ define <2 x double> @test6(<2 x i64> %trigger, <2 x double>* %addr, <2 x double>
; AVX-NEXT: retq
;
; AVX512F-LABEL: test6:
-; AVX512F: ## BB#0:
+; AVX512F: ## %bb.0:
; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512F-NEXT: vpcmpeqq %xmm2, %xmm0, %xmm0
; AVX512F-NEXT: vmaskmovpd (%rdi), %xmm0, %xmm2
@@ -106,7 +106,7 @@ define <2 x double> @test6(<2 x i64> %trigger, <2 x double>* %addr, <2 x double>
; AVX512F-NEXT: retq
;
; SKX-LABEL: test6:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; SKX-NEXT: vpcmpeqq %xmm2, %xmm0, %k1
; SKX-NEXT: vblendmpd (%rdi), %xmm1, %xmm0 {%k1}
@@ -118,7 +118,7 @@ define <2 x double> @test6(<2 x i64> %trigger, <2 x double>* %addr, <2 x double>
define <4 x float> @test7(<4 x i32> %trigger, <4 x float>* %addr, <4 x float> %dst) {
; AVX-LABEL: test7:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0
; AVX-NEXT: vmaskmovps (%rdi), %xmm0, %xmm2
@@ -126,7 +126,7 @@ define <4 x float> @test7(<4 x i32> %trigger, <4 x float>* %addr, <4 x float> %d
; AVX-NEXT: retq
;
; AVX512F-LABEL: test7:
-; AVX512F: ## BB#0:
+; AVX512F: ## %bb.0:
; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512F-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0
; AVX512F-NEXT: vmaskmovps (%rdi), %xmm0, %xmm2
@@ -134,7 +134,7 @@ define <4 x float> @test7(<4 x i32> %trigger, <4 x float>* %addr, <4 x float> %d
; AVX512F-NEXT: retq
;
; SKX-LABEL: test7:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; SKX-NEXT: vpcmpeqd %xmm2, %xmm0, %k1
; SKX-NEXT: vblendmps (%rdi), %xmm1, %xmm0 {%k1}
@@ -146,7 +146,7 @@ define <4 x float> @test7(<4 x i32> %trigger, <4 x float>* %addr, <4 x float> %d
define <4 x i32> @test8(<4 x i32> %trigger, <4 x i32>* %addr, <4 x i32> %dst) {
; AVX1-LABEL: test8:
-; AVX1: ## BB#0:
+; AVX1: ## %bb.0:
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vmaskmovps (%rdi), %xmm0, %xmm2
@@ -154,7 +154,7 @@ define <4 x i32> @test8(<4 x i32> %trigger, <4 x i32>* %addr, <4 x i32> %dst) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test8:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vpmaskmovd (%rdi), %xmm0, %xmm2
@@ -162,7 +162,7 @@ define <4 x i32> @test8(<4 x i32> %trigger, <4 x i32>* %addr, <4 x i32> %dst) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test8:
-; AVX512F: ## BB#0:
+; AVX512F: ## %bb.0:
; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512F-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0
; AVX512F-NEXT: vpmaskmovd (%rdi), %xmm0, %xmm2
@@ -170,7 +170,7 @@ define <4 x i32> @test8(<4 x i32> %trigger, <4 x i32>* %addr, <4 x i32> %dst) {
; AVX512F-NEXT: retq
;
; SKX-LABEL: test8:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; SKX-NEXT: vpcmpeqd %xmm2, %xmm0, %k1
; SKX-NEXT: vpblendmd (%rdi), %xmm1, %xmm0 {%k1}
@@ -182,28 +182,28 @@ define <4 x i32> @test8(<4 x i32> %trigger, <4 x i32>* %addr, <4 x i32> %dst) {
define void @test9(<4 x i32> %trigger, <4 x i32>* %addr, <4 x i32> %val) {
; AVX1-LABEL: test9:
-; AVX1: ## BB#0:
+; AVX1: ## %bb.0:
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vmaskmovps %xmm1, %xmm0, (%rdi)
; AVX1-NEXT: retq
;
; AVX2-LABEL: test9:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vpmaskmovd %xmm1, %xmm0, (%rdi)
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test9:
-; AVX512F: ## BB#0:
+; AVX512F: ## %bb.0:
; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512F-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0
; AVX512F-NEXT: vpmaskmovd %xmm1, %xmm0, (%rdi)
; AVX512F-NEXT: retq
;
; SKX-LABEL: test9:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; SKX-NEXT: vpcmpeqd %xmm2, %xmm0, %k1
; SKX-NEXT: vmovdqu32 %xmm1, (%rdi) {%k1}
@@ -215,7 +215,7 @@ define void @test9(<4 x i32> %trigger, <4 x i32>* %addr, <4 x i32> %val) {
define <4 x double> @test10(<4 x i32> %trigger, <4 x double>* %addr, <4 x double> %dst) {
; AVX1-LABEL: test10:
-; AVX1: ## BB#0:
+; AVX1: ## %bb.0:
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpmovsxdq %xmm0, %xmm2
@@ -227,7 +227,7 @@ define <4 x double> @test10(<4 x i32> %trigger, <4 x double>* %addr, <4 x double
; AVX1-NEXT: retq
;
; AVX2-LABEL: test10:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
@@ -236,7 +236,7 @@ define <4 x double> @test10(<4 x i32> %trigger, <4 x double>* %addr, <4 x double
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test10:
-; AVX512F: ## BB#0:
+; AVX512F: ## %bb.0:
; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512F-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0
; AVX512F-NEXT: vpmovsxdq %xmm0, %ymm0
@@ -245,7 +245,7 @@ define <4 x double> @test10(<4 x i32> %trigger, <4 x double>* %addr, <4 x double
; AVX512F-NEXT: retq
;
; SKX-LABEL: test10:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; SKX-NEXT: vpcmpeqd %xmm2, %xmm0, %k1
; SKX-NEXT: vblendmpd (%rdi), %ymm1, %ymm0 {%k1}
@@ -257,7 +257,7 @@ define <4 x double> @test10(<4 x i32> %trigger, <4 x double>* %addr, <4 x double
define <4 x double> @test10b(<4 x i32> %trigger, <4 x double>* %addr, <4 x double> %dst) {
; AVX1-LABEL: test10b:
-; AVX1: ## BB#0:
+; AVX1: ## %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpmovsxdq %xmm0, %xmm1
@@ -268,7 +268,7 @@ define <4 x double> @test10b(<4 x i32> %trigger, <4 x double>* %addr, <4 x doubl
; AVX1-NEXT: retq
;
; AVX2-LABEL: test10b:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
@@ -276,7 +276,7 @@ define <4 x double> @test10b(<4 x i32> %trigger, <4 x double>* %addr, <4 x doubl
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test10b:
-; AVX512F: ## BB#0:
+; AVX512F: ## %bb.0:
; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512F-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; AVX512F-NEXT: vpmovsxdq %xmm0, %ymm0
@@ -284,7 +284,7 @@ define <4 x double> @test10b(<4 x i32> %trigger, <4 x double>* %addr, <4 x doubl
; AVX512F-NEXT: retq
;
; SKX-LABEL: test10b:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; SKX-NEXT: vpcmpeqd %xmm1, %xmm0, %k1
; SKX-NEXT: vmovapd (%rdi), %ymm0 {%k1} {z}
@@ -296,7 +296,7 @@ define <4 x double> @test10b(<4 x i32> %trigger, <4 x double>* %addr, <4 x doubl
define <8 x float> @test11a(<8 x i32> %trigger, <8 x float>* %addr, <8 x float> %dst) {
; AVX1-LABEL: test11a:
-; AVX1: ## BB#0:
+; AVX1: ## %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
@@ -307,7 +307,7 @@ define <8 x float> @test11a(<8 x i32> %trigger, <8 x float>* %addr, <8 x float>
; AVX1-NEXT: retq
;
; AVX2-LABEL: test11a:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpcmpeqd %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vmaskmovps (%rdi), %ymm0, %ymm2
@@ -315,7 +315,7 @@ define <8 x float> @test11a(<8 x i32> %trigger, <8 x float>* %addr, <8 x float>
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test11a:
-; AVX512F: ## BB#0:
+; AVX512F: ## %bb.0:
; AVX512F-NEXT: ## kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; AVX512F-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2
@@ -327,7 +327,7 @@ define <8 x float> @test11a(<8 x i32> %trigger, <8 x float>* %addr, <8 x float>
; AVX512F-NEXT: retq
;
; SKX-LABEL: test11a:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; SKX-NEXT: vpcmpeqd %ymm2, %ymm0, %k1
; SKX-NEXT: vblendmps (%rdi), %ymm1, %ymm0 {%k1}
@@ -339,7 +339,7 @@ define <8 x float> @test11a(<8 x i32> %trigger, <8 x float>* %addr, <8 x float>
define <8 x i32> @test11b(<8 x i1> %mask, <8 x i32>* %addr, <8 x i32> %dst) {
; AVX1-LABEL: test11b:
-; AVX1: ## BB#0:
+; AVX1: ## %bb.0:
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vpslld $31, %xmm2, %xmm2
; AVX1-NEXT: vpsrad $31, %xmm2, %xmm2
@@ -352,7 +352,7 @@ define <8 x i32> @test11b(<8 x i1> %mask, <8 x i32>* %addr, <8 x i32> %dst) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test11b:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vpslld $31, %ymm0, %ymm0
; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0
@@ -361,7 +361,7 @@ define <8 x i32> @test11b(<8 x i1> %mask, <8 x i32>* %addr, <8 x i32> %dst) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test11b:
-; AVX512F: ## BB#0:
+; AVX512F: ## %bb.0:
; AVX512F-NEXT: ## kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; AVX512F-NEXT: vpmovsxwq %xmm0, %zmm0
; AVX512F-NEXT: vpsllq $63, %zmm0, %zmm0
@@ -371,7 +371,7 @@ define <8 x i32> @test11b(<8 x i1> %mask, <8 x i32>* %addr, <8 x i32> %dst) {
; AVX512F-NEXT: retq
;
; SKX-LABEL: test11b:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
; SKX-NEXT: vpmovw2m %xmm0, %k1
; SKX-NEXT: vpblendmd (%rdi), %ymm1, %ymm0 {%k1}
@@ -382,7 +382,7 @@ define <8 x i32> @test11b(<8 x i1> %mask, <8 x i32>* %addr, <8 x i32> %dst) {
define <8 x float> @test11c(<8 x i1> %mask, <8 x float>* %addr) {
; AVX1-LABEL: test11c:
-; AVX1: ## BB#0:
+; AVX1: ## %bb.0:
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vpslld $31, %xmm1, %xmm1
; AVX1-NEXT: vpsrad $31, %xmm1, %xmm1
@@ -394,7 +394,7 @@ define <8 x float> @test11c(<8 x i1> %mask, <8 x float>* %addr) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test11c:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vpslld $31, %ymm0, %ymm0
; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0
@@ -402,7 +402,7 @@ define <8 x float> @test11c(<8 x i1> %mask, <8 x float>* %addr) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test11c:
-; AVX512F: ## BB#0:
+; AVX512F: ## %bb.0:
; AVX512F-NEXT: vpmovsxwq %xmm0, %zmm0
; AVX512F-NEXT: vpsllq $63, %zmm0, %zmm0
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k1
@@ -411,7 +411,7 @@ define <8 x float> @test11c(<8 x i1> %mask, <8 x float>* %addr) {
; AVX512F-NEXT: retq
;
; SKX-LABEL: test11c:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
; SKX-NEXT: vpmovw2m %xmm0, %k1
; SKX-NEXT: vmovaps (%rdi), %ymm0 {%k1} {z}
@@ -422,7 +422,7 @@ define <8 x float> @test11c(<8 x i1> %mask, <8 x float>* %addr) {
define <8 x i32> @test11d(<8 x i1> %mask, <8 x i32>* %addr) {
; AVX1-LABEL: test11d:
-; AVX1: ## BB#0:
+; AVX1: ## %bb.0:
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vpslld $31, %xmm1, %xmm1
; AVX1-NEXT: vpsrad $31, %xmm1, %xmm1
@@ -434,7 +434,7 @@ define <8 x i32> @test11d(<8 x i1> %mask, <8 x i32>* %addr) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test11d:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vpslld $31, %ymm0, %ymm0
; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0
@@ -442,7 +442,7 @@ define <8 x i32> @test11d(<8 x i1> %mask, <8 x i32>* %addr) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test11d:
-; AVX512F: ## BB#0:
+; AVX512F: ## %bb.0:
; AVX512F-NEXT: vpmovsxwq %xmm0, %zmm0
; AVX512F-NEXT: vpsllq $63, %zmm0, %zmm0
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k1
@@ -451,7 +451,7 @@ define <8 x i32> @test11d(<8 x i1> %mask, <8 x i32>* %addr) {
; AVX512F-NEXT: retq
;
; SKX-LABEL: test11d:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
; SKX-NEXT: vpmovw2m %xmm0, %k1
; SKX-NEXT: vmovdqu32 (%rdi), %ymm0 {%k1} {z}
@@ -462,7 +462,7 @@ define <8 x i32> @test11d(<8 x i1> %mask, <8 x i32>* %addr) {
define void @test12(<8 x i32> %trigger, <8 x i32>* %addr, <8 x i32> %val) {
; AVX1-LABEL: test12:
-; AVX1: ## BB#0:
+; AVX1: ## %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
@@ -473,7 +473,7 @@ define void @test12(<8 x i32> %trigger, <8 x i32>* %addr, <8 x i32> %val) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test12:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpcmpeqd %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpmaskmovd %ymm1, %ymm0, (%rdi)
@@ -481,7 +481,7 @@ define void @test12(<8 x i32> %trigger, <8 x i32>* %addr, <8 x i32> %val) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test12:
-; AVX512F: ## BB#0:
+; AVX512F: ## %bb.0:
; AVX512F-NEXT: ## kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; AVX512F-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2
@@ -493,7 +493,7 @@ define void @test12(<8 x i32> %trigger, <8 x i32>* %addr, <8 x i32> %val) {
; AVX512F-NEXT: retq
;
; SKX-LABEL: test12:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; SKX-NEXT: vpcmpeqd %ymm2, %ymm0, %k1
; SKX-NEXT: vmovdqu32 %ymm1, (%rdi) {%k1}
@@ -506,7 +506,7 @@ define void @test12(<8 x i32> %trigger, <8 x i32>* %addr, <8 x i32> %val) {
define void @test14(<2 x i32> %trigger, <2 x float>* %addr, <2 x float> %val) {
; AVX1-LABEL: test14:
-; AVX1: ## BB#0:
+; AVX1: ## %bb.0:
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; AVX1-NEXT: vpcmpeqq %xmm2, %xmm0, %xmm0
@@ -515,7 +515,7 @@ define void @test14(<2 x i32> %trigger, <2 x float>* %addr, <2 x float> %val) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test14:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; AVX2-NEXT: vpcmpeqq %xmm2, %xmm0, %xmm0
@@ -524,7 +524,7 @@ define void @test14(<2 x i32> %trigger, <2 x float>* %addr, <2 x float> %val) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test14:
-; AVX512F: ## BB#0:
+; AVX512F: ## %bb.0:
; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512F-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; AVX512F-NEXT: vpcmpeqq %xmm2, %xmm0, %xmm0
@@ -533,7 +533,7 @@ define void @test14(<2 x i32> %trigger, <2 x float>* %addr, <2 x float> %val) {
; AVX512F-NEXT: retq
;
; SKX-LABEL: test14:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; SKX-NEXT: vpcmpeqq %xmm2, %xmm0, %k1
@@ -546,7 +546,7 @@ define void @test14(<2 x i32> %trigger, <2 x float>* %addr, <2 x float> %val) {
define void @test15(<2 x i32> %trigger, <2 x i32>* %addr, <2 x i32> %val) {
; AVX1-LABEL: test15:
-; AVX1: ## BB#0:
+; AVX1: ## %bb.0:
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; AVX1-NEXT: vpcmpeqq %xmm2, %xmm0, %xmm0
@@ -556,7 +556,7 @@ define void @test15(<2 x i32> %trigger, <2 x i32>* %addr, <2 x i32> %val) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test15:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; AVX2-NEXT: vpcmpeqq %xmm2, %xmm0, %xmm0
@@ -566,7 +566,7 @@ define void @test15(<2 x i32> %trigger, <2 x i32>* %addr, <2 x i32> %val) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test15:
-; AVX512F: ## BB#0:
+; AVX512F: ## %bb.0:
; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512F-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; AVX512F-NEXT: vpcmpeqq %xmm2, %xmm0, %xmm0
@@ -576,7 +576,7 @@ define void @test15(<2 x i32> %trigger, <2 x i32>* %addr, <2 x i32> %val) {
; AVX512F-NEXT: retq
;
; SKX-LABEL: test15:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; SKX-NEXT: vpcmpeqq %xmm2, %xmm0, %k1
@@ -589,7 +589,7 @@ define void @test15(<2 x i32> %trigger, <2 x i32>* %addr, <2 x i32> %val) {
define <2 x float> @test16(<2 x i32> %trigger, <2 x float>* %addr, <2 x float> %dst) {
; AVX1-LABEL: test16:
-; AVX1: ## BB#0:
+; AVX1: ## %bb.0:
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; AVX1-NEXT: vpcmpeqq %xmm2, %xmm0, %xmm0
@@ -599,7 +599,7 @@ define <2 x float> @test16(<2 x i32> %trigger, <2 x float>* %addr, <2 x float> %
; AVX1-NEXT: retq
;
; AVX2-LABEL: test16:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; AVX2-NEXT: vpcmpeqq %xmm2, %xmm0, %xmm0
@@ -609,7 +609,7 @@ define <2 x float> @test16(<2 x i32> %trigger, <2 x float>* %addr, <2 x float> %
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test16:
-; AVX512F: ## BB#0:
+; AVX512F: ## %bb.0:
; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512F-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; AVX512F-NEXT: vpcmpeqq %xmm2, %xmm0, %xmm0
@@ -619,7 +619,7 @@ define <2 x float> @test16(<2 x i32> %trigger, <2 x float>* %addr, <2 x float> %
; AVX512F-NEXT: retq
;
; SKX-LABEL: test16:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; SKX-NEXT: vpcmpeqq %xmm2, %xmm0, %k1
@@ -632,7 +632,7 @@ define <2 x float> @test16(<2 x i32> %trigger, <2 x float>* %addr, <2 x float> %
define <2 x i32> @test17(<2 x i32> %trigger, <2 x i32>* %addr, <2 x i32> %dst) {
; AVX1-LABEL: test17:
-; AVX1: ## BB#0:
+; AVX1: ## %bb.0:
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; AVX1-NEXT: vpcmpeqq %xmm2, %xmm0, %xmm0
@@ -644,7 +644,7 @@ define <2 x i32> @test17(<2 x i32> %trigger, <2 x i32>* %addr, <2 x i32> %dst) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test17:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; AVX2-NEXT: vpcmpeqq %xmm2, %xmm0, %xmm0
@@ -656,7 +656,7 @@ define <2 x i32> @test17(<2 x i32> %trigger, <2 x i32>* %addr, <2 x i32> %dst) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test17:
-; AVX512F: ## BB#0:
+; AVX512F: ## %bb.0:
; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512F-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; AVX512F-NEXT: vpcmpeqq %xmm2, %xmm0, %xmm0
@@ -668,7 +668,7 @@ define <2 x i32> @test17(<2 x i32> %trigger, <2 x i32>* %addr, <2 x i32> %dst) {
; AVX512F-NEXT: retq
;
; SKX-LABEL: test17:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; SKX-NEXT: vpcmpeqq %xmm2, %xmm0, %k1
@@ -683,7 +683,7 @@ define <2 x i32> @test17(<2 x i32> %trigger, <2 x i32>* %addr, <2 x i32> %dst) {
define <2 x float> @test18(<2 x i32> %trigger, <2 x float>* %addr) {
; AVX1-LABEL: test18:
-; AVX1: ## BB#0:
+; AVX1: ## %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
; AVX1-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
@@ -692,7 +692,7 @@ define <2 x float> @test18(<2 x i32> %trigger, <2 x float>* %addr) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test18:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; AVX2-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
@@ -701,7 +701,7 @@ define <2 x float> @test18(<2 x i32> %trigger, <2 x float>* %addr) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test18:
-; AVX512F: ## BB#0:
+; AVX512F: ## %bb.0:
; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512F-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; AVX512F-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
@@ -710,7 +710,7 @@ define <2 x float> @test18(<2 x i32> %trigger, <2 x float>* %addr) {
; AVX512F-NEXT: retq
;
; SKX-LABEL: test18:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; SKX-NEXT: vpcmpeqq %xmm1, %xmm0, %k1
@@ -723,18 +723,18 @@ define <2 x float> @test18(<2 x i32> %trigger, <2 x float>* %addr) {
define <4 x float> @load_all(<4 x i32> %trigger, <4 x float>* %addr) {
; AVX-LABEL: load_all:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vmovups (%rdi), %xmm0
; AVX-NEXT: retq
;
; AVX512F-LABEL: load_all:
-; AVX512F: ## BB#0:
+; AVX512F: ## %bb.0:
; AVX512F-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX512F-NEXT: vmaskmovps (%rdi), %xmm0, %xmm0
; AVX512F-NEXT: retq
;
; SKX-LABEL: load_all:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: kxnorw %k0, %k0, %k1
; SKX-NEXT: vmovups (%rdi), %xmm0 {%k1} {z}
; SKX-NEXT: retq
@@ -749,19 +749,19 @@ define <4 x float> @load_all(<4 x i32> %trigger, <4 x float>* %addr) {
define <4 x float> @mload_constmask_v4f32(<4 x float>* %addr, <4 x float> %dst) {
; AVX-LABEL: mload_constmask_v4f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vblendps {{.*#+}} xmm0 = mem[0],xmm0[1],mem[2,3]
; AVX-NEXT: retq
;
; AVX512F-LABEL: mload_constmask_v4f32:
-; AVX512F: ## BB#0:
+; AVX512F: ## %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} xmm1 = [4294967295,0,4294967295,4294967295]
; AVX512F-NEXT: vmaskmovps (%rdi), %xmm1, %xmm2
; AVX512F-NEXT: vblendvps %xmm1, %xmm2, %xmm0, %xmm0
; AVX512F-NEXT: retq
;
; SKX-LABEL: mload_constmask_v4f32:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: movb $13, %al
; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vmovups (%rdi), %xmm0 {%k1}
@@ -774,28 +774,28 @@ define <4 x float> @mload_constmask_v4f32(<4 x float>* %addr, <4 x float> %dst)
define <4 x i32> @mload_constmask_v4i32(<4 x i32>* %addr, <4 x i32> %dst) {
; AVX1-LABEL: mload_constmask_v4i32:
-; AVX1: ## BB#0:
+; AVX1: ## %bb.0:
; AVX1-NEXT: vmovaps {{.*#+}} xmm1 = [0,4294967295,4294967295,4294967295]
; AVX1-NEXT: vmaskmovps (%rdi), %xmm1, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: mload_constmask_v4i32:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [0,4294967295,4294967295,4294967295]
; AVX2-NEXT: vpmaskmovd (%rdi), %xmm1, %xmm1
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX2-NEXT: retq
;
; AVX512F-LABEL: mload_constmask_v4i32:
-; AVX512F: ## BB#0:
+; AVX512F: ## %bb.0:
; AVX512F-NEXT: vmovdqa {{.*#+}} xmm1 = [0,4294967295,4294967295,4294967295]
; AVX512F-NEXT: vpmaskmovd (%rdi), %xmm1, %xmm2
; AVX512F-NEXT: vblendvps %xmm1, %xmm2, %xmm0, %xmm0
; AVX512F-NEXT: retq
;
; SKX-LABEL: mload_constmask_v4i32:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: movb $14, %al
; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vmovdqu32 (%rdi), %xmm0 {%k1}
@@ -808,14 +808,14 @@ define <4 x i32> @mload_constmask_v4i32(<4 x i32>* %addr, <4 x i32> %dst) {
define <8 x float> @mload_constmask_v8f32(<8 x float>* %addr, <8 x float> %dst) {
; AVX-LABEL: mload_constmask_v8f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm1 = [4294967295,4294967295,4294967295,0,0,0,0,0]
; AVX-NEXT: vmaskmovps (%rdi), %ymm1, %ymm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
; AVX-NEXT: retq
;
; AVX512F-LABEL: mload_constmask_v8f32:
-; AVX512F: ## BB#0:
+; AVX512F: ## %bb.0:
; AVX512F-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512F-NEXT: movw $7, %ax
; AVX512F-NEXT: kmovw %eax, %k1
@@ -824,7 +824,7 @@ define <8 x float> @mload_constmask_v8f32(<8 x float>* %addr, <8 x float> %dst)
; AVX512F-NEXT: retq
;
; SKX-LABEL: mload_constmask_v8f32:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: movb $7, %al
; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vmovups (%rdi), %ymm0 {%k1}
@@ -835,21 +835,21 @@ define <8 x float> @mload_constmask_v8f32(<8 x float>* %addr, <8 x float> %dst)
define <4 x double> @mload_constmask_v4f64(<4 x double>* %addr, <4 x double> %dst) {
; AVX-LABEL: mload_constmask_v4f64:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vmovapd {{.*#+}} ymm1 = [18446744073709551615,18446744073709551615,18446744073709551615,0]
; AVX-NEXT: vmaskmovpd (%rdi), %ymm1, %ymm1
; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3]
; AVX-NEXT: retq
;
; AVX512F-LABEL: mload_constmask_v4f64:
-; AVX512F: ## BB#0:
+; AVX512F: ## %bb.0:
; AVX512F-NEXT: vmovapd {{.*#+}} ymm1 = [18446744073709551615,18446744073709551615,18446744073709551615,0]
; AVX512F-NEXT: vmaskmovpd (%rdi), %ymm1, %ymm2
; AVX512F-NEXT: vblendvpd %ymm1, %ymm2, %ymm0, %ymm0
; AVX512F-NEXT: retq
;
; SKX-LABEL: mload_constmask_v4f64:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: movb $7, %al
; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vmovupd (%rdi), %ymm0 {%k1}
@@ -862,12 +862,12 @@ define <4 x double> @mload_constmask_v4f64(<4 x double>* %addr, <4 x double> %ds
define <8 x i32> @mload_constmask_v8i32(<8 x i32>* %addr, <8 x i32> %dst) {
; AVX-LABEL: mload_constmask_v8i32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vblendps {{.*#+}} ymm0 = mem[0,1,2],ymm0[3,4,5,6],mem[7]
; AVX-NEXT: retq
;
; AVX512F-LABEL: mload_constmask_v8i32:
-; AVX512F: ## BB#0:
+; AVX512F: ## %bb.0:
; AVX512F-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512F-NEXT: movw $135, %ax
; AVX512F-NEXT: kmovw %eax, %k1
@@ -876,7 +876,7 @@ define <8 x i32> @mload_constmask_v8i32(<8 x i32>* %addr, <8 x i32> %dst) {
; AVX512F-NEXT: retq
;
; SKX-LABEL: mload_constmask_v8i32:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: movb $-121, %al
; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vmovdqu32 (%rdi), %ymm0 {%k1}
@@ -887,24 +887,24 @@ define <8 x i32> @mload_constmask_v8i32(<8 x i32>* %addr, <8 x i32> %dst) {
define <4 x i64> @mload_constmask_v4i64(<4 x i64>* %addr, <4 x i64> %dst) {
; AVX1-LABEL: mload_constmask_v4i64:
-; AVX1: ## BB#0:
+; AVX1: ## %bb.0:
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = mem[0],ymm0[1,2],mem[3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: mload_constmask_v4i64:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = mem[0,1],ymm0[2,3,4,5],mem[6,7]
; AVX2-NEXT: retq
;
; AVX512F-LABEL: mload_constmask_v4i64:
-; AVX512F: ## BB#0:
+; AVX512F: ## %bb.0:
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm1 = [18446744073709551615,0,0,18446744073709551615]
; AVX512F-NEXT: vpmaskmovq (%rdi), %ymm1, %ymm2
; AVX512F-NEXT: vblendvpd %ymm1, %ymm2, %ymm0, %ymm0
; AVX512F-NEXT: retq
;
; SKX-LABEL: mload_constmask_v4i64:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: movb $9, %al
; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vmovdqu64 (%rdi), %ymm0 {%k1}
@@ -917,20 +917,20 @@ define <4 x i64> @mload_constmask_v4i64(<4 x i64>* %addr, <4 x i64> %dst) {
define <8 x double> @mload_constmask_v8f64(<8 x double>* %addr, <8 x double> %dst) {
; AVX-LABEL: mload_constmask_v8f64:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0,1,2],mem[3]
; AVX-NEXT: vblendpd {{.*#+}} ymm0 = mem[0,1,2],ymm0[3]
; AVX-NEXT: retq
;
; AVX512F-LABEL: mload_constmask_v8f64:
-; AVX512F: ## BB#0:
+; AVX512F: ## %bb.0:
; AVX512F-NEXT: movb $-121, %al
; AVX512F-NEXT: kmovw %eax, %k1
; AVX512F-NEXT: vmovupd (%rdi), %zmm0 {%k1}
; AVX512F-NEXT: retq
;
; SKX-LABEL: mload_constmask_v8f64:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: movb $-121, %al
; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vmovupd (%rdi), %zmm0 {%k1}
@@ -943,19 +943,19 @@ define <8 x double> @mload_constmask_v8f64(<8 x double>* %addr, <8 x double> %ds
define <4 x double> @mload_constmask_v4f64_undef_passthrough(<4 x double>* %addr) {
; AVX-LABEL: mload_constmask_v4f64_undef_passthrough:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vmovapd {{.*#+}} ymm0 = [18446744073709551615,18446744073709551615,18446744073709551615,0]
; AVX-NEXT: vmaskmovpd (%rdi), %ymm0, %ymm0
; AVX-NEXT: retq
;
; AVX512F-LABEL: mload_constmask_v4f64_undef_passthrough:
-; AVX512F: ## BB#0:
+; AVX512F: ## %bb.0:
; AVX512F-NEXT: vmovapd {{.*#+}} ymm0 = [18446744073709551615,18446744073709551615,18446744073709551615,0]
; AVX512F-NEXT: vmaskmovpd (%rdi), %ymm0, %ymm0
; AVX512F-NEXT: retq
;
; SKX-LABEL: mload_constmask_v4f64_undef_passthrough:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: movb $7, %al
; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vmovupd (%rdi), %ymm0 {%k1} {z}
@@ -966,25 +966,25 @@ define <4 x double> @mload_constmask_v4f64_undef_passthrough(<4 x double>* %addr
define <4 x i64> @mload_constmask_v4i64_undef_passthrough(<4 x i64>* %addr) {
; AVX1-LABEL: mload_constmask_v4i64_undef_passthrough:
-; AVX1: ## BB#0:
+; AVX1: ## %bb.0:
; AVX1-NEXT: vmovapd {{.*#+}} ymm0 = [0,18446744073709551615,18446744073709551615,0]
; AVX1-NEXT: vmaskmovpd (%rdi), %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: mload_constmask_v4i64_undef_passthrough:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm0 = [0,18446744073709551615,18446744073709551615,0]
; AVX2-NEXT: vpmaskmovq (%rdi), %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: mload_constmask_v4i64_undef_passthrough:
-; AVX512F: ## BB#0:
+; AVX512F: ## %bb.0:
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm0 = [0,18446744073709551615,18446744073709551615,0]
; AVX512F-NEXT: vpmaskmovq (%rdi), %ymm0, %ymm0
; AVX512F-NEXT: retq
;
; SKX-LABEL: mload_constmask_v4i64_undef_passthrough:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: movb $6, %al
; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vmovdqu64 (%rdi), %ymm0 {%k1} {z}
@@ -995,25 +995,25 @@ define <4 x i64> @mload_constmask_v4i64_undef_passthrough(<4 x i64>* %addr) {
define void @test21(<4 x i32> %trigger, <4 x i32>* %addr, <4 x i32> %val) {
; AVX1-LABEL: test21:
-; AVX1: ## BB#0:
+; AVX1: ## %bb.0:
; AVX1-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX1-NEXT: vmaskmovps %xmm1, %xmm0, (%rdi)
; AVX1-NEXT: retq
;
; AVX2-LABEL: test21:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX2-NEXT: vpmaskmovd %xmm1, %xmm0, (%rdi)
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test21:
-; AVX512F: ## BB#0:
+; AVX512F: ## %bb.0:
; AVX512F-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX512F-NEXT: vpmaskmovd %xmm1, %xmm0, (%rdi)
; AVX512F-NEXT: retq
;
; SKX-LABEL: test21:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: kxnorw %k0, %k0, %k1
; SKX-NEXT: vmovdqu32 %xmm1, (%rdi) {%k1}
; SKX-NEXT: retq
@@ -1026,12 +1026,12 @@ define void @test21(<4 x i32> %trigger, <4 x i32>* %addr, <4 x i32> %val) {
define void @one_mask_bit_set1(<4 x i32>* %addr, <4 x i32> %val) {
; AVX-LABEL: one_mask_bit_set1:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vmovss %xmm0, (%rdi)
; AVX-NEXT: retq
;
; AVX512-LABEL: one_mask_bit_set1:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vmovss %xmm0, (%rdi)
; AVX512-NEXT: retq
call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %val, <4 x i32>* %addr, i32 4, <4 x i1><i1 true, i1 false, i1 false, i1 false>)
@@ -1042,12 +1042,12 @@ define void @one_mask_bit_set1(<4 x i32>* %addr, <4 x i32> %val) {
define void @one_mask_bit_set2(<4 x float>* %addr, <4 x float> %val) {
; AVX-LABEL: one_mask_bit_set2:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vextractps $2, %xmm0, 8(%rdi)
; AVX-NEXT: retq
;
; AVX512-LABEL: one_mask_bit_set2:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vextractps $2, %xmm0, 8(%rdi)
; AVX512-NEXT: retq
call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %val, <4 x float>* %addr, i32 4, <4 x i1><i1 false, i1 false, i1 true, i1 false>)
@@ -1058,14 +1058,14 @@ define void @one_mask_bit_set2(<4 x float>* %addr, <4 x float> %val) {
define void @one_mask_bit_set3(<4 x i64>* %addr, <4 x i64> %val) {
; AVX-LABEL: one_mask_bit_set3:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX-NEXT: vmovlps %xmm0, 16(%rdi)
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; AVX512-LABEL: one_mask_bit_set3:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512-NEXT: vmovlps %xmm0, 16(%rdi)
; AVX512-NEXT: vzeroupper
@@ -1078,14 +1078,14 @@ define void @one_mask_bit_set3(<4 x i64>* %addr, <4 x i64> %val) {
define void @one_mask_bit_set4(<4 x double>* %addr, <4 x double> %val) {
; AVX-LABEL: one_mask_bit_set4:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX-NEXT: vmovhpd %xmm0, 24(%rdi)
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; AVX512-LABEL: one_mask_bit_set4:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512-NEXT: vmovhpd %xmm0, 24(%rdi)
; AVX512-NEXT: vzeroupper
@@ -1098,14 +1098,14 @@ define void @one_mask_bit_set4(<4 x double>* %addr, <4 x double> %val) {
define void @one_mask_bit_set5(<8 x double>* %addr, <8 x double> %val) {
; AVX-LABEL: one_mask_bit_set5:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm0
; AVX-NEXT: vmovlps %xmm0, 48(%rdi)
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; AVX512-LABEL: one_mask_bit_set5:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vextractf32x4 $3, %zmm0, %xmm0
; AVX512-NEXT: vmovlps %xmm0, 48(%rdi)
; AVX512-NEXT: vzeroupper
@@ -1118,12 +1118,12 @@ define void @one_mask_bit_set5(<8 x double>* %addr, <8 x double> %val) {
define <4 x i32> @load_one_mask_bit_set1(<4 x i32>* %addr, <4 x i32> %val) {
; AVX-LABEL: load_one_mask_bit_set1:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vpinsrd $0, (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: load_one_mask_bit_set1:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vpinsrd $0, (%rdi), %xmm0, %xmm0
; AVX512-NEXT: retq
%res = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %addr, i32 4, <4 x i1><i1 true, i1 false, i1 false, i1 false>, <4 x i32> %val)
@@ -1134,12 +1134,12 @@ define <4 x i32> @load_one_mask_bit_set1(<4 x i32>* %addr, <4 x i32> %val) {
define <4 x float> @load_one_mask_bit_set2(<4 x float>* %addr, <4 x float> %val) {
; AVX-LABEL: load_one_mask_bit_set2:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
; AVX-NEXT: retq
;
; AVX512-LABEL: load_one_mask_bit_set2:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
; AVX512-NEXT: retq
%res = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %addr, i32 4, <4 x i1><i1 false, i1 false, i1 true, i1 false>, <4 x float> %val)
@@ -1150,21 +1150,21 @@ define <4 x float> @load_one_mask_bit_set2(<4 x float>* %addr, <4 x float> %val)
define <4 x i64> @load_one_mask_bit_set3(<4 x i64>* %addr, <4 x i64> %val) {
; AVX1-LABEL: load_one_mask_bit_set3:
-; AVX1: ## BB#0:
+; AVX1: ## %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpinsrq $0, 16(%rdi), %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_one_mask_bit_set3:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpinsrq $0, 16(%rdi), %xmm1, %xmm1
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_one_mask_bit_set3:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512-NEXT: vpinsrq $0, 16(%rdi), %xmm1, %xmm1
; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
@@ -1177,14 +1177,14 @@ define <4 x i64> @load_one_mask_bit_set3(<4 x i64>* %addr, <4 x i64> %val) {
define <4 x double> @load_one_mask_bit_set4(<4 x double>* %addr, <4 x double> %val) {
; AVX-LABEL: load_one_mask_bit_set4:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: load_one_mask_bit_set4:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX512-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
@@ -1197,7 +1197,7 @@ define <4 x double> @load_one_mask_bit_set4(<4 x double>* %addr, <4 x double> %v
define <8 x double> @load_one_mask_bit_set5(<8 x double>* %addr, <8 x double> %val) {
; AVX-LABEL: load_one_mask_bit_set5:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
; AVX-NEXT: vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0]
@@ -1205,7 +1205,7 @@ define <8 x double> @load_one_mask_bit_set5(<8 x double>* %addr, <8 x double> %v
; AVX-NEXT: retq
;
; AVX512-LABEL: load_one_mask_bit_set5:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vextractf32x4 $3, %zmm0, %xmm1
; AVX512-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
; AVX512-NEXT: vinsertf32x4 $3, %xmm1, %zmm0, %zmm0
@@ -1219,17 +1219,17 @@ define <8 x double> @load_one_mask_bit_set5(<8 x double>* %addr, <8 x double> %v
define void @trunc_mask(<4 x float> %x, <4 x float>* %ptr, <4 x float> %y, <4 x i32> %mask) {
; AVX-LABEL: trunc_mask:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vmaskmovps %xmm0, %xmm2, (%rdi)
; AVX-NEXT: retq
;
; AVX512F-LABEL: trunc_mask:
-; AVX512F: ## BB#0:
+; AVX512F: ## %bb.0:
; AVX512F-NEXT: vmaskmovps %xmm0, %xmm2, (%rdi)
; AVX512F-NEXT: retq
;
; SKX-LABEL: trunc_mask:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; SKX-NEXT: vpcmpgtd %xmm2, %xmm1, %k1
; SKX-NEXT: vmovups %xmm0, (%rdi) {%k1}
diff --git a/test/CodeGen/X86/memcmp-minsize.ll b/test/CodeGen/X86/memcmp-minsize.ll
index 9c196b13d2e..a1ab4e13006 100644
--- a/test/CodeGen/X86/memcmp-minsize.ll
+++ b/test/CodeGen/X86/memcmp-minsize.ll
@@ -13,7 +13,7 @@ declare i32 @memcmp(i8*, i8*, i64)
define i32 @length2(i8* %X, i8* %Y) nounwind minsize {
; X86-LABEL: length2:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $0
; X86-NEXT: pushl $2
; X86-NEXT: pushl {{[0-9]+}}(%esp)
@@ -23,7 +23,7 @@ define i32 @length2(i8* %X, i8* %Y) nounwind minsize {
; X86-NEXT: retl
;
; X64-LABEL: length2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pushq $2
; X64-NEXT: popq %rdx
; X64-NEXT: jmp memcmp # TAILCALL
@@ -33,7 +33,7 @@ define i32 @length2(i8* %X, i8* %Y) nounwind minsize {
define i1 @length2_eq(i8* %X, i8* %Y) nounwind minsize {
; X86-LABEL: length2_eq:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movzwl (%ecx), %ecx
@@ -42,7 +42,7 @@ define i1 @length2_eq(i8* %X, i8* %Y) nounwind minsize {
; X86-NEXT: retl
;
; X64-LABEL: length2_eq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movzwl (%rdi), %eax
; X64-NEXT: cmpw (%rsi), %ax
; X64-NEXT: sete %al
@@ -54,14 +54,14 @@ define i1 @length2_eq(i8* %X, i8* %Y) nounwind minsize {
define i1 @length2_eq_const(i8* %X) nounwind minsize {
; X86-LABEL: length2_eq_const:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: cmpw $12849, (%eax) # imm = 0x3231
; X86-NEXT: setne %al
; X86-NEXT: retl
;
; X64-LABEL: length2_eq_const:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpw $12849, (%rdi) # imm = 0x3231
; X64-NEXT: setne %al
; X64-NEXT: retq
@@ -72,7 +72,7 @@ define i1 @length2_eq_const(i8* %X) nounwind minsize {
define i1 @length2_eq_nobuiltin_attr(i8* %X, i8* %Y) nounwind minsize {
; X86-LABEL: length2_eq_nobuiltin_attr:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $0
; X86-NEXT: pushl $2
; X86-NEXT: pushl {{[0-9]+}}(%esp)
@@ -84,7 +84,7 @@ define i1 @length2_eq_nobuiltin_attr(i8* %X, i8* %Y) nounwind minsize {
; X86-NEXT: retl
;
; X64-LABEL: length2_eq_nobuiltin_attr:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pushq %rax
; X64-NEXT: pushq $2
; X64-NEXT: popq %rdx
@@ -100,7 +100,7 @@ define i1 @length2_eq_nobuiltin_attr(i8* %X, i8* %Y) nounwind minsize {
define i32 @length3(i8* %X, i8* %Y) nounwind minsize {
; X86-LABEL: length3:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $0
; X86-NEXT: pushl $3
; X86-NEXT: pushl {{[0-9]+}}(%esp)
@@ -110,7 +110,7 @@ define i32 @length3(i8* %X, i8* %Y) nounwind minsize {
; X86-NEXT: retl
;
; X64-LABEL: length3:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pushq $3
; X64-NEXT: popq %rdx
; X64-NEXT: jmp memcmp # TAILCALL
@@ -120,7 +120,7 @@ define i32 @length3(i8* %X, i8* %Y) nounwind minsize {
define i1 @length3_eq(i8* %X, i8* %Y) nounwind minsize {
; X86-LABEL: length3_eq:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $0
; X86-NEXT: pushl $3
; X86-NEXT: pushl {{[0-9]+}}(%esp)
@@ -132,7 +132,7 @@ define i1 @length3_eq(i8* %X, i8* %Y) nounwind minsize {
; X86-NEXT: retl
;
; X64-LABEL: length3_eq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pushq %rax
; X64-NEXT: pushq $3
; X64-NEXT: popq %rdx
@@ -148,7 +148,7 @@ define i1 @length3_eq(i8* %X, i8* %Y) nounwind minsize {
define i32 @length4(i8* %X, i8* %Y) nounwind minsize {
; X86-LABEL: length4:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $0
; X86-NEXT: pushl $4
; X86-NEXT: pushl {{[0-9]+}}(%esp)
@@ -158,7 +158,7 @@ define i32 @length4(i8* %X, i8* %Y) nounwind minsize {
; X86-NEXT: retl
;
; X64-LABEL: length4:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pushq $4
; X64-NEXT: popq %rdx
; X64-NEXT: jmp memcmp # TAILCALL
@@ -168,7 +168,7 @@ define i32 @length4(i8* %X, i8* %Y) nounwind minsize {
define i1 @length4_eq(i8* %X, i8* %Y) nounwind minsize {
; X86-LABEL: length4_eq:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl (%ecx), %ecx
@@ -177,7 +177,7 @@ define i1 @length4_eq(i8* %X, i8* %Y) nounwind minsize {
; X86-NEXT: retl
;
; X64-LABEL: length4_eq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl (%rdi), %eax
; X64-NEXT: cmpl (%rsi), %eax
; X64-NEXT: setne %al
@@ -189,14 +189,14 @@ define i1 @length4_eq(i8* %X, i8* %Y) nounwind minsize {
define i1 @length4_eq_const(i8* %X) nounwind minsize {
; X86-LABEL: length4_eq_const:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: cmpl $875770417, (%eax) # imm = 0x34333231
; X86-NEXT: sete %al
; X86-NEXT: retl
;
; X64-LABEL: length4_eq_const:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpl $875770417, (%rdi) # imm = 0x34333231
; X64-NEXT: sete %al
; X64-NEXT: retq
@@ -207,7 +207,7 @@ define i1 @length4_eq_const(i8* %X) nounwind minsize {
define i32 @length5(i8* %X, i8* %Y) nounwind minsize {
; X86-LABEL: length5:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $0
; X86-NEXT: pushl $5
; X86-NEXT: pushl {{[0-9]+}}(%esp)
@@ -217,7 +217,7 @@ define i32 @length5(i8* %X, i8* %Y) nounwind minsize {
; X86-NEXT: retl
;
; X64-LABEL: length5:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pushq $5
; X64-NEXT: popq %rdx
; X64-NEXT: jmp memcmp # TAILCALL
@@ -227,7 +227,7 @@ define i32 @length5(i8* %X, i8* %Y) nounwind minsize {
define i1 @length5_eq(i8* %X, i8* %Y) nounwind minsize {
; X86-LABEL: length5_eq:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $0
; X86-NEXT: pushl $5
; X86-NEXT: pushl {{[0-9]+}}(%esp)
@@ -239,7 +239,7 @@ define i1 @length5_eq(i8* %X, i8* %Y) nounwind minsize {
; X86-NEXT: retl
;
; X64-LABEL: length5_eq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pushq %rax
; X64-NEXT: pushq $5
; X64-NEXT: popq %rdx
@@ -255,7 +255,7 @@ define i1 @length5_eq(i8* %X, i8* %Y) nounwind minsize {
define i32 @length8(i8* %X, i8* %Y) nounwind minsize {
; X86-LABEL: length8:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $0
; X86-NEXT: pushl $8
; X86-NEXT: pushl {{[0-9]+}}(%esp)
@@ -265,7 +265,7 @@ define i32 @length8(i8* %X, i8* %Y) nounwind minsize {
; X86-NEXT: retl
;
; X64-LABEL: length8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pushq $8
; X64-NEXT: popq %rdx
; X64-NEXT: jmp memcmp # TAILCALL
@@ -275,7 +275,7 @@ define i32 @length8(i8* %X, i8* %Y) nounwind minsize {
define i1 @length8_eq(i8* %X, i8* %Y) nounwind minsize {
; X86-LABEL: length8_eq:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $0
; X86-NEXT: pushl $8
; X86-NEXT: pushl {{[0-9]+}}(%esp)
@@ -287,7 +287,7 @@ define i1 @length8_eq(i8* %X, i8* %Y) nounwind minsize {
; X86-NEXT: retl
;
; X64-LABEL: length8_eq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq (%rdi), %rax
; X64-NEXT: cmpq (%rsi), %rax
; X64-NEXT: sete %al
@@ -299,7 +299,7 @@ define i1 @length8_eq(i8* %X, i8* %Y) nounwind minsize {
define i1 @length8_eq_const(i8* %X) nounwind minsize {
; X86-LABEL: length8_eq_const:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $0
; X86-NEXT: pushl $8
; X86-NEXT: pushl $.L.str
@@ -311,7 +311,7 @@ define i1 @length8_eq_const(i8* %X) nounwind minsize {
; X86-NEXT: retl
;
; X64-LABEL: length8_eq_const:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movabsq $3978425819141910832, %rax # imm = 0x3736353433323130
; X64-NEXT: cmpq %rax, (%rdi)
; X64-NEXT: setne %al
@@ -323,7 +323,7 @@ define i1 @length8_eq_const(i8* %X) nounwind minsize {
define i1 @length12_eq(i8* %X, i8* %Y) nounwind minsize {
; X86-LABEL: length12_eq:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $0
; X86-NEXT: pushl $12
; X86-NEXT: pushl {{[0-9]+}}(%esp)
@@ -335,7 +335,7 @@ define i1 @length12_eq(i8* %X, i8* %Y) nounwind minsize {
; X86-NEXT: retl
;
; X64-LABEL: length12_eq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pushq %rax
; X64-NEXT: pushq $12
; X64-NEXT: popq %rdx
@@ -351,7 +351,7 @@ define i1 @length12_eq(i8* %X, i8* %Y) nounwind minsize {
define i32 @length12(i8* %X, i8* %Y) nounwind minsize {
; X86-LABEL: length12:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $0
; X86-NEXT: pushl $12
; X86-NEXT: pushl {{[0-9]+}}(%esp)
@@ -361,7 +361,7 @@ define i32 @length12(i8* %X, i8* %Y) nounwind minsize {
; X86-NEXT: retl
;
; X64-LABEL: length12:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pushq $12
; X64-NEXT: popq %rdx
; X64-NEXT: jmp memcmp # TAILCALL
@@ -373,7 +373,7 @@ define i32 @length12(i8* %X, i8* %Y) nounwind minsize {
define i32 @length16(i8* %X, i8* %Y) nounwind minsize {
; X86-LABEL: length16:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $0
; X86-NEXT: pushl $16
; X86-NEXT: pushl {{[0-9]+}}(%esp)
@@ -383,7 +383,7 @@ define i32 @length16(i8* %X, i8* %Y) nounwind minsize {
; X86-NEXT: retl
;
; X64-LABEL: length16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pushq $16
; X64-NEXT: popq %rdx
; X64-NEXT: jmp memcmp # TAILCALL
@@ -393,7 +393,7 @@ define i32 @length16(i8* %X, i8* %Y) nounwind minsize {
define i1 @length16_eq(i8* %x, i8* %y) nounwind minsize {
; X86-NOSSE-LABEL: length16_eq:
-; X86-NOSSE: # BB#0:
+; X86-NOSSE: # %bb.0:
; X86-NOSSE-NEXT: pushl $0
; X86-NOSSE-NEXT: pushl $16
; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp)
@@ -405,7 +405,7 @@ define i1 @length16_eq(i8* %x, i8* %y) nounwind minsize {
; X86-NOSSE-NEXT: retl
;
; X86-SSE2-LABEL: length16_eq:
-; X86-SSE2: # BB#0:
+; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-SSE2-NEXT: movdqu (%ecx), %xmm0
@@ -417,7 +417,7 @@ define i1 @length16_eq(i8* %x, i8* %y) nounwind minsize {
; X86-SSE2-NEXT: retl
;
; X64-SSE2-LABEL: length16_eq:
-; X64-SSE2: # BB#0:
+; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: movdqu (%rsi), %xmm0
; X64-SSE2-NEXT: movdqu (%rdi), %xmm1
; X64-SSE2-NEXT: pcmpeqb %xmm0, %xmm1
@@ -427,7 +427,7 @@ define i1 @length16_eq(i8* %x, i8* %y) nounwind minsize {
; X64-SSE2-NEXT: retq
;
; X64-AVX2-LABEL: length16_eq:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vmovdqu (%rdi), %xmm0
; X64-AVX2-NEXT: vpcmpeqb (%rsi), %xmm0, %xmm0
; X64-AVX2-NEXT: vpmovmskb %xmm0, %eax
@@ -441,7 +441,7 @@ define i1 @length16_eq(i8* %x, i8* %y) nounwind minsize {
define i1 @length16_eq_const(i8* %X) nounwind minsize {
; X86-NOSSE-LABEL: length16_eq_const:
-; X86-NOSSE: # BB#0:
+; X86-NOSSE: # %bb.0:
; X86-NOSSE-NEXT: pushl $0
; X86-NOSSE-NEXT: pushl $16
; X86-NOSSE-NEXT: pushl $.L.str
@@ -453,7 +453,7 @@ define i1 @length16_eq_const(i8* %X) nounwind minsize {
; X86-NOSSE-NEXT: retl
;
; X86-SSE2-LABEL: length16_eq_const:
-; X86-SSE2: # BB#0:
+; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movdqu (%eax), %xmm0
; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0
@@ -463,7 +463,7 @@ define i1 @length16_eq_const(i8* %X) nounwind minsize {
; X86-SSE2-NEXT: retl
;
; X64-SSE2-LABEL: length16_eq_const:
-; X64-SSE2: # BB#0:
+; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: movdqu (%rdi), %xmm0
; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: pmovmskb %xmm0, %eax
@@ -472,7 +472,7 @@ define i1 @length16_eq_const(i8* %X) nounwind minsize {
; X64-SSE2-NEXT: retq
;
; X64-AVX2-LABEL: length16_eq_const:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vmovdqu (%rdi), %xmm0
; X64-AVX2-NEXT: vpcmpeqb {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX2-NEXT: vpmovmskb %xmm0, %eax
@@ -488,7 +488,7 @@ define i1 @length16_eq_const(i8* %X) nounwind minsize {
define i32 @length24(i8* %X, i8* %Y) nounwind minsize {
; X86-LABEL: length24:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $0
; X86-NEXT: pushl $24
; X86-NEXT: pushl {{[0-9]+}}(%esp)
@@ -498,7 +498,7 @@ define i32 @length24(i8* %X, i8* %Y) nounwind minsize {
; X86-NEXT: retl
;
; X64-LABEL: length24:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pushq $24
; X64-NEXT: popq %rdx
; X64-NEXT: jmp memcmp # TAILCALL
@@ -508,7 +508,7 @@ define i32 @length24(i8* %X, i8* %Y) nounwind minsize {
define i1 @length24_eq(i8* %x, i8* %y) nounwind minsize {
; X86-LABEL: length24_eq:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $0
; X86-NEXT: pushl $24
; X86-NEXT: pushl {{[0-9]+}}(%esp)
@@ -520,7 +520,7 @@ define i1 @length24_eq(i8* %x, i8* %y) nounwind minsize {
; X86-NEXT: retl
;
; X64-LABEL: length24_eq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pushq %rax
; X64-NEXT: pushq $24
; X64-NEXT: popq %rdx
@@ -536,7 +536,7 @@ define i1 @length24_eq(i8* %x, i8* %y) nounwind minsize {
define i1 @length24_eq_const(i8* %X) nounwind minsize {
; X86-LABEL: length24_eq_const:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $0
; X86-NEXT: pushl $24
; X86-NEXT: pushl $.L.str
@@ -548,7 +548,7 @@ define i1 @length24_eq_const(i8* %X) nounwind minsize {
; X86-NEXT: retl
;
; X64-LABEL: length24_eq_const:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pushq %rax
; X64-NEXT: pushq $24
; X64-NEXT: popq %rdx
@@ -565,7 +565,7 @@ define i1 @length24_eq_const(i8* %X) nounwind minsize {
define i32 @length32(i8* %X, i8* %Y) nounwind minsize {
; X86-LABEL: length32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $0
; X86-NEXT: pushl $32
; X86-NEXT: pushl {{[0-9]+}}(%esp)
@@ -575,7 +575,7 @@ define i32 @length32(i8* %X, i8* %Y) nounwind minsize {
; X86-NEXT: retl
;
; X64-LABEL: length32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pushq $32
; X64-NEXT: popq %rdx
; X64-NEXT: jmp memcmp # TAILCALL
@@ -587,7 +587,7 @@ define i32 @length32(i8* %X, i8* %Y) nounwind minsize {
define i1 @length32_eq(i8* %x, i8* %y) nounwind minsize {
; X86-LABEL: length32_eq:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $0
; X86-NEXT: pushl $32
; X86-NEXT: pushl {{[0-9]+}}(%esp)
@@ -599,7 +599,7 @@ define i1 @length32_eq(i8* %x, i8* %y) nounwind minsize {
; X86-NEXT: retl
;
; X64-SSE2-LABEL: length32_eq:
-; X64-SSE2: # BB#0:
+; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pushq %rax
; X64-SSE2-NEXT: pushq $32
; X64-SSE2-NEXT: popq %rdx
@@ -610,7 +610,7 @@ define i1 @length32_eq(i8* %x, i8* %y) nounwind minsize {
; X64-SSE2-NEXT: retq
;
; X64-AVX2-LABEL: length32_eq:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0
; X64-AVX2-NEXT: vpcmpeqb (%rsi), %ymm0, %ymm0
; X64-AVX2-NEXT: vpmovmskb %ymm0, %eax
@@ -625,7 +625,7 @@ define i1 @length32_eq(i8* %x, i8* %y) nounwind minsize {
define i1 @length32_eq_const(i8* %X) nounwind minsize {
; X86-LABEL: length32_eq_const:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $0
; X86-NEXT: pushl $32
; X86-NEXT: pushl $.L.str
@@ -637,7 +637,7 @@ define i1 @length32_eq_const(i8* %X) nounwind minsize {
; X86-NEXT: retl
;
; X64-SSE2-LABEL: length32_eq_const:
-; X64-SSE2: # BB#0:
+; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pushq %rax
; X64-SSE2-NEXT: pushq $32
; X64-SSE2-NEXT: popq %rdx
@@ -649,7 +649,7 @@ define i1 @length32_eq_const(i8* %X) nounwind minsize {
; X64-SSE2-NEXT: retq
;
; X64-AVX2-LABEL: length32_eq_const:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0
; X64-AVX2-NEXT: vpcmpeqb {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX2-NEXT: vpmovmskb %ymm0, %eax
@@ -664,7 +664,7 @@ define i1 @length32_eq_const(i8* %X) nounwind minsize {
define i32 @length64(i8* %X, i8* %Y) nounwind minsize {
; X86-LABEL: length64:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $0
; X86-NEXT: pushl $64
; X86-NEXT: pushl {{[0-9]+}}(%esp)
@@ -674,7 +674,7 @@ define i32 @length64(i8* %X, i8* %Y) nounwind minsize {
; X86-NEXT: retl
;
; X64-LABEL: length64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pushq $64
; X64-NEXT: popq %rdx
; X64-NEXT: jmp memcmp # TAILCALL
@@ -684,7 +684,7 @@ define i32 @length64(i8* %X, i8* %Y) nounwind minsize {
define i1 @length64_eq(i8* %x, i8* %y) nounwind minsize {
; X86-LABEL: length64_eq:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $0
; X86-NEXT: pushl $64
; X86-NEXT: pushl {{[0-9]+}}(%esp)
@@ -696,7 +696,7 @@ define i1 @length64_eq(i8* %x, i8* %y) nounwind minsize {
; X86-NEXT: retl
;
; X64-LABEL: length64_eq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pushq %rax
; X64-NEXT: pushq $64
; X64-NEXT: popq %rdx
@@ -712,7 +712,7 @@ define i1 @length64_eq(i8* %x, i8* %y) nounwind minsize {
define i1 @length64_eq_const(i8* %X) nounwind minsize {
; X86-LABEL: length64_eq_const:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $0
; X86-NEXT: pushl $64
; X86-NEXT: pushl $.L.str
@@ -724,7 +724,7 @@ define i1 @length64_eq_const(i8* %X) nounwind minsize {
; X86-NEXT: retl
;
; X64-LABEL: length64_eq_const:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pushq %rax
; X64-NEXT: pushq $64
; X64-NEXT: popq %rdx
diff --git a/test/CodeGen/X86/memcmp-optsize.ll b/test/CodeGen/X86/memcmp-optsize.ll
index 3f5eeba7055..a5fb85fae5e 100644
--- a/test/CodeGen/X86/memcmp-optsize.ll
+++ b/test/CodeGen/X86/memcmp-optsize.ll
@@ -13,7 +13,7 @@ declare i32 @memcmp(i8*, i8*, i64)
define i32 @length2(i8* %X, i8* %Y) nounwind optsize {
; X86-LABEL: length2:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movzwl (%ecx), %ecx
@@ -26,7 +26,7 @@ define i32 @length2(i8* %X, i8* %Y) nounwind optsize {
; X86-NEXT: retl
;
; X64-LABEL: length2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movzwl (%rdi), %eax
; X64-NEXT: movzwl (%rsi), %ecx
; X64-NEXT: rolw $8, %ax
@@ -41,7 +41,7 @@ define i32 @length2(i8* %X, i8* %Y) nounwind optsize {
define i1 @length2_eq(i8* %X, i8* %Y) nounwind optsize {
; X86-LABEL: length2_eq:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movzwl (%ecx), %ecx
@@ -50,7 +50,7 @@ define i1 @length2_eq(i8* %X, i8* %Y) nounwind optsize {
; X86-NEXT: retl
;
; X64-LABEL: length2_eq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movzwl (%rdi), %eax
; X64-NEXT: cmpw (%rsi), %ax
; X64-NEXT: sete %al
@@ -62,7 +62,7 @@ define i1 @length2_eq(i8* %X, i8* %Y) nounwind optsize {
define i1 @length2_eq_const(i8* %X) nounwind optsize {
; X86-LABEL: length2_eq_const:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movzwl (%eax), %eax
; X86-NEXT: cmpl $12849, %eax # imm = 0x3231
@@ -70,7 +70,7 @@ define i1 @length2_eq_const(i8* %X) nounwind optsize {
; X86-NEXT: retl
;
; X64-LABEL: length2_eq_const:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movzwl (%rdi), %eax
; X64-NEXT: cmpl $12849, %eax # imm = 0x3231
; X64-NEXT: setne %al
@@ -82,7 +82,7 @@ define i1 @length2_eq_const(i8* %X) nounwind optsize {
define i1 @length2_eq_nobuiltin_attr(i8* %X, i8* %Y) nounwind optsize {
; X86-LABEL: length2_eq_nobuiltin_attr:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $0
; X86-NEXT: pushl $2
; X86-NEXT: pushl {{[0-9]+}}(%esp)
@@ -94,7 +94,7 @@ define i1 @length2_eq_nobuiltin_attr(i8* %X, i8* %Y) nounwind optsize {
; X86-NEXT: retl
;
; X64-LABEL: length2_eq_nobuiltin_attr:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pushq %rax
; X64-NEXT: movl $2, %edx
; X64-NEXT: callq memcmp
@@ -109,7 +109,7 @@ define i1 @length2_eq_nobuiltin_attr(i8* %X, i8* %Y) nounwind optsize {
define i32 @length3(i8* %X, i8* %Y) nounwind optsize {
; X86-LABEL: length3:
-; X86: # BB#0: # %loadbb
+; X86: # %bb.0: # %loadbb
; X86-NEXT: pushl %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -119,7 +119,7 @@ define i32 @length3(i8* %X, i8* %Y) nounwind optsize {
; X86-NEXT: rolw $8, %si
; X86-NEXT: cmpw %si, %dx
; X86-NEXT: jne .LBB4_1
-; X86-NEXT: # BB#2: # %loadbb1
+; X86-NEXT: # %bb.2: # %loadbb1
; X86-NEXT: movzbl 2(%eax), %eax
; X86-NEXT: movzbl 2(%ecx), %ecx
; X86-NEXT: subl %ecx, %eax
@@ -133,14 +133,14 @@ define i32 @length3(i8* %X, i8* %Y) nounwind optsize {
; X86-NEXT: retl
;
; X64-LABEL: length3:
-; X64: # BB#0: # %loadbb
+; X64: # %bb.0: # %loadbb
; X64-NEXT: movzwl (%rdi), %eax
; X64-NEXT: movzwl (%rsi), %ecx
; X64-NEXT: rolw $8, %ax
; X64-NEXT: rolw $8, %cx
; X64-NEXT: cmpw %cx, %ax
; X64-NEXT: jne .LBB4_1
-; X64-NEXT: # BB#2: # %loadbb1
+; X64-NEXT: # %bb.2: # %loadbb1
; X64-NEXT: movzbl 2(%rdi), %eax
; X64-NEXT: movzbl 2(%rsi), %ecx
; X64-NEXT: subl %ecx, %eax
@@ -156,13 +156,13 @@ define i32 @length3(i8* %X, i8* %Y) nounwind optsize {
define i1 @length3_eq(i8* %X, i8* %Y) nounwind optsize {
; X86-LABEL: length3_eq:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movzwl (%ecx), %edx
; X86-NEXT: cmpw (%eax), %dx
; X86-NEXT: jne .LBB5_2
-; X86-NEXT: # BB#1: # %loadbb1
+; X86-NEXT: # %bb.1: # %loadbb1
; X86-NEXT: movb 2(%ecx), %dl
; X86-NEXT: xorl %ecx, %ecx
; X86-NEXT: cmpb 2(%eax), %dl
@@ -176,11 +176,11 @@ define i1 @length3_eq(i8* %X, i8* %Y) nounwind optsize {
; X86-NEXT: retl
;
; X64-LABEL: length3_eq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movzwl (%rdi), %eax
; X64-NEXT: cmpw (%rsi), %ax
; X64-NEXT: jne .LBB5_2
-; X64-NEXT: # BB#1: # %loadbb1
+; X64-NEXT: # %bb.1: # %loadbb1
; X64-NEXT: movb 2(%rdi), %cl
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: cmpb 2(%rsi), %cl
@@ -198,7 +198,7 @@ define i1 @length3_eq(i8* %X, i8* %Y) nounwind optsize {
define i32 @length4(i8* %X, i8* %Y) nounwind optsize {
; X86-LABEL: length4:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl (%ecx), %ecx
@@ -212,7 +212,7 @@ define i32 @length4(i8* %X, i8* %Y) nounwind optsize {
; X86-NEXT: retl
;
; X64-LABEL: length4:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl (%rdi), %ecx
; X64-NEXT: movl (%rsi), %edx
; X64-NEXT: bswapl %ecx
@@ -228,7 +228,7 @@ define i32 @length4(i8* %X, i8* %Y) nounwind optsize {
define i1 @length4_eq(i8* %X, i8* %Y) nounwind optsize {
; X86-LABEL: length4_eq:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl (%ecx), %ecx
@@ -237,7 +237,7 @@ define i1 @length4_eq(i8* %X, i8* %Y) nounwind optsize {
; X86-NEXT: retl
;
; X64-LABEL: length4_eq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl (%rdi), %eax
; X64-NEXT: cmpl (%rsi), %eax
; X64-NEXT: setne %al
@@ -249,14 +249,14 @@ define i1 @length4_eq(i8* %X, i8* %Y) nounwind optsize {
define i1 @length4_eq_const(i8* %X) nounwind optsize {
; X86-LABEL: length4_eq_const:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: cmpl $875770417, (%eax) # imm = 0x34333231
; X86-NEXT: sete %al
; X86-NEXT: retl
;
; X64-LABEL: length4_eq_const:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpl $875770417, (%rdi) # imm = 0x34333231
; X64-NEXT: sete %al
; X64-NEXT: retq
@@ -267,7 +267,7 @@ define i1 @length4_eq_const(i8* %X) nounwind optsize {
define i32 @length5(i8* %X, i8* %Y) nounwind optsize {
; X86-LABEL: length5:
-; X86: # BB#0: # %loadbb
+; X86: # %bb.0: # %loadbb
; X86-NEXT: pushl %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -277,7 +277,7 @@ define i32 @length5(i8* %X, i8* %Y) nounwind optsize {
; X86-NEXT: bswapl %esi
; X86-NEXT: cmpl %esi, %edx
; X86-NEXT: jne .LBB9_1
-; X86-NEXT: # BB#2: # %loadbb1
+; X86-NEXT: # %bb.2: # %loadbb1
; X86-NEXT: movzbl 4(%eax), %eax
; X86-NEXT: movzbl 4(%ecx), %ecx
; X86-NEXT: subl %ecx, %eax
@@ -291,14 +291,14 @@ define i32 @length5(i8* %X, i8* %Y) nounwind optsize {
; X86-NEXT: retl
;
; X64-LABEL: length5:
-; X64: # BB#0: # %loadbb
+; X64: # %bb.0: # %loadbb
; X64-NEXT: movl (%rdi), %eax
; X64-NEXT: movl (%rsi), %ecx
; X64-NEXT: bswapl %eax
; X64-NEXT: bswapl %ecx
; X64-NEXT: cmpl %ecx, %eax
; X64-NEXT: jne .LBB9_1
-; X64-NEXT: # BB#2: # %loadbb1
+; X64-NEXT: # %bb.2: # %loadbb1
; X64-NEXT: movzbl 4(%rdi), %eax
; X64-NEXT: movzbl 4(%rsi), %ecx
; X64-NEXT: subl %ecx, %eax
@@ -314,13 +314,13 @@ define i32 @length5(i8* %X, i8* %Y) nounwind optsize {
define i1 @length5_eq(i8* %X, i8* %Y) nounwind optsize {
; X86-LABEL: length5_eq:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl (%ecx), %edx
; X86-NEXT: cmpl (%eax), %edx
; X86-NEXT: jne .LBB10_2
-; X86-NEXT: # BB#1: # %loadbb1
+; X86-NEXT: # %bb.1: # %loadbb1
; X86-NEXT: movb 4(%ecx), %dl
; X86-NEXT: xorl %ecx, %ecx
; X86-NEXT: cmpb 4(%eax), %dl
@@ -334,11 +334,11 @@ define i1 @length5_eq(i8* %X, i8* %Y) nounwind optsize {
; X86-NEXT: retl
;
; X64-LABEL: length5_eq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl (%rdi), %eax
; X64-NEXT: cmpl (%rsi), %eax
; X64-NEXT: jne .LBB10_2
-; X64-NEXT: # BB#1: # %loadbb1
+; X64-NEXT: # %bb.1: # %loadbb1
; X64-NEXT: movb 4(%rdi), %cl
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: cmpb 4(%rsi), %cl
@@ -356,7 +356,7 @@ define i1 @length5_eq(i8* %X, i8* %Y) nounwind optsize {
define i32 @length8(i8* %X, i8* %Y) nounwind optsize {
; X86-LABEL: length8:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
@@ -366,7 +366,7 @@ define i32 @length8(i8* %X, i8* %Y) nounwind optsize {
; X86-NEXT: bswapl %edx
; X86-NEXT: cmpl %edx, %ecx
; X86-NEXT: jne .LBB11_2
-; X86-NEXT: # BB#1: # %loadbb1
+; X86-NEXT: # %bb.1: # %loadbb1
; X86-NEXT: movl 4(%esi), %ecx
; X86-NEXT: movl 4(%eax), %edx
; X86-NEXT: bswapl %ecx
@@ -384,7 +384,7 @@ define i32 @length8(i8* %X, i8* %Y) nounwind optsize {
; X86-NEXT: retl
;
; X64-LABEL: length8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq (%rdi), %rcx
; X64-NEXT: movq (%rsi), %rdx
; X64-NEXT: bswapq %rcx
@@ -400,13 +400,13 @@ define i32 @length8(i8* %X, i8* %Y) nounwind optsize {
define i1 @length8_eq(i8* %X, i8* %Y) nounwind optsize {
; X86-LABEL: length8_eq:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl (%ecx), %edx
; X86-NEXT: cmpl (%eax), %edx
; X86-NEXT: jne .LBB12_2
-; X86-NEXT: # BB#1: # %loadbb1
+; X86-NEXT: # %bb.1: # %loadbb1
; X86-NEXT: movl 4(%ecx), %edx
; X86-NEXT: xorl %ecx, %ecx
; X86-NEXT: cmpl 4(%eax), %edx
@@ -420,7 +420,7 @@ define i1 @length8_eq(i8* %X, i8* %Y) nounwind optsize {
; X86-NEXT: retl
;
; X64-LABEL: length8_eq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq (%rdi), %rax
; X64-NEXT: cmpq (%rsi), %rax
; X64-NEXT: sete %al
@@ -432,11 +432,11 @@ define i1 @length8_eq(i8* %X, i8* %Y) nounwind optsize {
define i1 @length8_eq_const(i8* %X) nounwind optsize {
; X86-LABEL: length8_eq_const:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: cmpl $858927408, (%ecx) # imm = 0x33323130
; X86-NEXT: jne .LBB13_2
-; X86-NEXT: # BB#1: # %loadbb1
+; X86-NEXT: # %bb.1: # %loadbb1
; X86-NEXT: xorl %eax, %eax
; X86-NEXT: cmpl $926299444, 4(%ecx) # imm = 0x37363534
; X86-NEXT: je .LBB13_3
@@ -449,7 +449,7 @@ define i1 @length8_eq_const(i8* %X) nounwind optsize {
; X86-NEXT: retl
;
; X64-LABEL: length8_eq_const:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movabsq $3978425819141910832, %rax # imm = 0x3736353433323130
; X64-NEXT: cmpq %rax, (%rdi)
; X64-NEXT: setne %al
@@ -461,7 +461,7 @@ define i1 @length8_eq_const(i8* %X) nounwind optsize {
define i1 @length12_eq(i8* %X, i8* %Y) nounwind optsize {
; X86-LABEL: length12_eq:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $0
; X86-NEXT: pushl $12
; X86-NEXT: pushl {{[0-9]+}}(%esp)
@@ -473,11 +473,11 @@ define i1 @length12_eq(i8* %X, i8* %Y) nounwind optsize {
; X86-NEXT: retl
;
; X64-LABEL: length12_eq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq (%rdi), %rax
; X64-NEXT: cmpq (%rsi), %rax
; X64-NEXT: jne .LBB14_2
-; X64-NEXT: # BB#1: # %loadbb1
+; X64-NEXT: # %bb.1: # %loadbb1
; X64-NEXT: movl 8(%rdi), %ecx
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: cmpl 8(%rsi), %ecx
@@ -495,7 +495,7 @@ define i1 @length12_eq(i8* %X, i8* %Y) nounwind optsize {
define i32 @length12(i8* %X, i8* %Y) nounwind optsize {
; X86-LABEL: length12:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $0
; X86-NEXT: pushl $12
; X86-NEXT: pushl {{[0-9]+}}(%esp)
@@ -505,14 +505,14 @@ define i32 @length12(i8* %X, i8* %Y) nounwind optsize {
; X86-NEXT: retl
;
; X64-LABEL: length12:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq (%rdi), %rcx
; X64-NEXT: movq (%rsi), %rdx
; X64-NEXT: bswapq %rcx
; X64-NEXT: bswapq %rdx
; X64-NEXT: cmpq %rdx, %rcx
; X64-NEXT: jne .LBB15_2
-; X64-NEXT: # BB#1: # %loadbb1
+; X64-NEXT: # %bb.1: # %loadbb1
; X64-NEXT: movl 8(%rdi), %ecx
; X64-NEXT: movl 8(%rsi), %edx
; X64-NEXT: bswapl %ecx
@@ -535,7 +535,7 @@ define i32 @length12(i8* %X, i8* %Y) nounwind optsize {
define i32 @length16(i8* %X, i8* %Y) nounwind optsize {
; X86-LABEL: length16:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $0
; X86-NEXT: pushl $16
; X86-NEXT: pushl {{[0-9]+}}(%esp)
@@ -545,14 +545,14 @@ define i32 @length16(i8* %X, i8* %Y) nounwind optsize {
; X86-NEXT: retl
;
; X64-LABEL: length16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq (%rdi), %rcx
; X64-NEXT: movq (%rsi), %rdx
; X64-NEXT: bswapq %rcx
; X64-NEXT: bswapq %rdx
; X64-NEXT: cmpq %rdx, %rcx
; X64-NEXT: jne .LBB16_2
-; X64-NEXT: # BB#1: # %loadbb1
+; X64-NEXT: # %bb.1: # %loadbb1
; X64-NEXT: movq 8(%rdi), %rcx
; X64-NEXT: movq 8(%rsi), %rdx
; X64-NEXT: bswapq %rcx
@@ -573,7 +573,7 @@ define i32 @length16(i8* %X, i8* %Y) nounwind optsize {
define i1 @length16_eq(i8* %x, i8* %y) nounwind optsize {
; X86-NOSSE-LABEL: length16_eq:
-; X86-NOSSE: # BB#0:
+; X86-NOSSE: # %bb.0:
; X86-NOSSE-NEXT: pushl $0
; X86-NOSSE-NEXT: pushl $16
; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp)
@@ -585,7 +585,7 @@ define i1 @length16_eq(i8* %x, i8* %y) nounwind optsize {
; X86-NOSSE-NEXT: retl
;
; X86-SSE2-LABEL: length16_eq:
-; X86-SSE2: # BB#0:
+; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-SSE2-NEXT: movdqu (%ecx), %xmm0
@@ -597,7 +597,7 @@ define i1 @length16_eq(i8* %x, i8* %y) nounwind optsize {
; X86-SSE2-NEXT: retl
;
; X64-SSE2-LABEL: length16_eq:
-; X64-SSE2: # BB#0:
+; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: movdqu (%rdi), %xmm0
; X64-SSE2-NEXT: movdqu (%rsi), %xmm1
; X64-SSE2-NEXT: pcmpeqb %xmm0, %xmm1
@@ -607,7 +607,7 @@ define i1 @length16_eq(i8* %x, i8* %y) nounwind optsize {
; X64-SSE2-NEXT: retq
;
; X64-AVX2-LABEL: length16_eq:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vmovdqu (%rdi), %xmm0
; X64-AVX2-NEXT: vpcmpeqb (%rsi), %xmm0, %xmm0
; X64-AVX2-NEXT: vpmovmskb %xmm0, %eax
@@ -621,7 +621,7 @@ define i1 @length16_eq(i8* %x, i8* %y) nounwind optsize {
define i1 @length16_eq_const(i8* %X) nounwind optsize {
; X86-NOSSE-LABEL: length16_eq_const:
-; X86-NOSSE: # BB#0:
+; X86-NOSSE: # %bb.0:
; X86-NOSSE-NEXT: pushl $0
; X86-NOSSE-NEXT: pushl $16
; X86-NOSSE-NEXT: pushl $.L.str
@@ -633,7 +633,7 @@ define i1 @length16_eq_const(i8* %X) nounwind optsize {
; X86-NOSSE-NEXT: retl
;
; X86-SSE2-LABEL: length16_eq_const:
-; X86-SSE2: # BB#0:
+; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movdqu (%eax), %xmm0
; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0
@@ -643,7 +643,7 @@ define i1 @length16_eq_const(i8* %X) nounwind optsize {
; X86-SSE2-NEXT: retl
;
; X64-SSE2-LABEL: length16_eq_const:
-; X64-SSE2: # BB#0:
+; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: movdqu (%rdi), %xmm0
; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: pmovmskb %xmm0, %eax
@@ -652,7 +652,7 @@ define i1 @length16_eq_const(i8* %X) nounwind optsize {
; X64-SSE2-NEXT: retq
;
; X64-AVX2-LABEL: length16_eq_const:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vmovdqu (%rdi), %xmm0
; X64-AVX2-NEXT: vpcmpeqb {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX2-NEXT: vpmovmskb %xmm0, %eax
@@ -668,7 +668,7 @@ define i1 @length16_eq_const(i8* %X) nounwind optsize {
define i32 @length24(i8* %X, i8* %Y) nounwind optsize {
; X86-LABEL: length24:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $0
; X86-NEXT: pushl $24
; X86-NEXT: pushl {{[0-9]+}}(%esp)
@@ -678,7 +678,7 @@ define i32 @length24(i8* %X, i8* %Y) nounwind optsize {
; X86-NEXT: retl
;
; X64-LABEL: length24:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl $24, %edx
; X64-NEXT: jmp memcmp # TAILCALL
%m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 24) nounwind
@@ -687,7 +687,7 @@ define i32 @length24(i8* %X, i8* %Y) nounwind optsize {
define i1 @length24_eq(i8* %x, i8* %y) nounwind optsize {
; X86-LABEL: length24_eq:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $0
; X86-NEXT: pushl $24
; X86-NEXT: pushl {{[0-9]+}}(%esp)
@@ -699,14 +699,14 @@ define i1 @length24_eq(i8* %x, i8* %y) nounwind optsize {
; X86-NEXT: retl
;
; X64-SSE2-LABEL: length24_eq:
-; X64-SSE2: # BB#0:
+; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: movdqu (%rdi), %xmm0
; X64-SSE2-NEXT: movdqu (%rsi), %xmm1
; X64-SSE2-NEXT: pcmpeqb %xmm0, %xmm1
; X64-SSE2-NEXT: pmovmskb %xmm1, %eax
; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; X64-SSE2-NEXT: jne .LBB20_2
-; X64-SSE2-NEXT: # BB#1: # %loadbb1
+; X64-SSE2-NEXT: # %bb.1: # %loadbb1
; X64-SSE2-NEXT: movq 16(%rdi), %rcx
; X64-SSE2-NEXT: xorl %eax, %eax
; X64-SSE2-NEXT: cmpq 16(%rsi), %rcx
@@ -719,13 +719,13 @@ define i1 @length24_eq(i8* %x, i8* %y) nounwind optsize {
; X64-SSE2-NEXT: retq
;
; X64-AVX2-LABEL: length24_eq:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vmovdqu (%rdi), %xmm0
; X64-AVX2-NEXT: vpcmpeqb (%rsi), %xmm0, %xmm0
; X64-AVX2-NEXT: vpmovmskb %xmm0, %eax
; X64-AVX2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; X64-AVX2-NEXT: jne .LBB20_2
-; X64-AVX2-NEXT: # BB#1: # %loadbb1
+; X64-AVX2-NEXT: # %bb.1: # %loadbb1
; X64-AVX2-NEXT: movq 16(%rdi), %rcx
; X64-AVX2-NEXT: xorl %eax, %eax
; X64-AVX2-NEXT: cmpq 16(%rsi), %rcx
@@ -743,7 +743,7 @@ define i1 @length24_eq(i8* %x, i8* %y) nounwind optsize {
define i1 @length24_eq_const(i8* %X) nounwind optsize {
; X86-LABEL: length24_eq_const:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $0
; X86-NEXT: pushl $24
; X86-NEXT: pushl $.L.str
@@ -755,13 +755,13 @@ define i1 @length24_eq_const(i8* %X) nounwind optsize {
; X86-NEXT: retl
;
; X64-SSE2-LABEL: length24_eq_const:
-; X64-SSE2: # BB#0:
+; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: movdqu (%rdi), %xmm0
; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: pmovmskb %xmm0, %eax
; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; X64-SSE2-NEXT: jne .LBB21_2
-; X64-SSE2-NEXT: # BB#1: # %loadbb1
+; X64-SSE2-NEXT: # %bb.1: # %loadbb1
; X64-SSE2-NEXT: xorl %eax, %eax
; X64-SSE2-NEXT: movabsq $3689065127958034230, %rcx # imm = 0x3332313039383736
; X64-SSE2-NEXT: cmpq %rcx, 16(%rdi)
@@ -774,13 +774,13 @@ define i1 @length24_eq_const(i8* %X) nounwind optsize {
; X64-SSE2-NEXT: retq
;
; X64-AVX2-LABEL: length24_eq_const:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vmovdqu (%rdi), %xmm0
; X64-AVX2-NEXT: vpcmpeqb {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX2-NEXT: vpmovmskb %xmm0, %eax
; X64-AVX2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; X64-AVX2-NEXT: jne .LBB21_2
-; X64-AVX2-NEXT: # BB#1: # %loadbb1
+; X64-AVX2-NEXT: # %bb.1: # %loadbb1
; X64-AVX2-NEXT: xorl %eax, %eax
; X64-AVX2-NEXT: movabsq $3689065127958034230, %rcx # imm = 0x3332313039383736
; X64-AVX2-NEXT: cmpq %rcx, 16(%rdi)
@@ -798,7 +798,7 @@ define i1 @length24_eq_const(i8* %X) nounwind optsize {
define i32 @length32(i8* %X, i8* %Y) nounwind optsize {
; X86-LABEL: length32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $0
; X86-NEXT: pushl $32
; X86-NEXT: pushl {{[0-9]+}}(%esp)
@@ -808,7 +808,7 @@ define i32 @length32(i8* %X, i8* %Y) nounwind optsize {
; X86-NEXT: retl
;
; X64-LABEL: length32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl $32, %edx
; X64-NEXT: jmp memcmp # TAILCALL
%m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 32) nounwind
@@ -819,7 +819,7 @@ define i32 @length32(i8* %X, i8* %Y) nounwind optsize {
define i1 @length32_eq(i8* %x, i8* %y) nounwind optsize {
; X86-NOSSE-LABEL: length32_eq:
-; X86-NOSSE: # BB#0:
+; X86-NOSSE: # %bb.0:
; X86-NOSSE-NEXT: pushl $0
; X86-NOSSE-NEXT: pushl $32
; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp)
@@ -831,7 +831,7 @@ define i1 @length32_eq(i8* %x, i8* %y) nounwind optsize {
; X86-NOSSE-NEXT: retl
;
; X86-SSE2-LABEL: length32_eq:
-; X86-SSE2: # BB#0:
+; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-SSE2-NEXT: movdqu (%ecx), %xmm0
@@ -840,7 +840,7 @@ define i1 @length32_eq(i8* %x, i8* %y) nounwind optsize {
; X86-SSE2-NEXT: pmovmskb %xmm1, %edx
; X86-SSE2-NEXT: cmpl $65535, %edx # imm = 0xFFFF
; X86-SSE2-NEXT: jne .LBB23_2
-; X86-SSE2-NEXT: # BB#1: # %loadbb1
+; X86-SSE2-NEXT: # %bb.1: # %loadbb1
; X86-SSE2-NEXT: movdqu 16(%ecx), %xmm0
; X86-SSE2-NEXT: movdqu 16(%eax), %xmm1
; X86-SSE2-NEXT: pcmpeqb %xmm0, %xmm1
@@ -857,14 +857,14 @@ define i1 @length32_eq(i8* %x, i8* %y) nounwind optsize {
; X86-SSE2-NEXT: retl
;
; X64-SSE2-LABEL: length32_eq:
-; X64-SSE2: # BB#0:
+; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: movdqu (%rdi), %xmm0
; X64-SSE2-NEXT: movdqu (%rsi), %xmm1
; X64-SSE2-NEXT: pcmpeqb %xmm0, %xmm1
; X64-SSE2-NEXT: pmovmskb %xmm1, %eax
; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; X64-SSE2-NEXT: jne .LBB23_2
-; X64-SSE2-NEXT: # BB#1: # %loadbb1
+; X64-SSE2-NEXT: # %bb.1: # %loadbb1
; X64-SSE2-NEXT: movdqu 16(%rdi), %xmm0
; X64-SSE2-NEXT: movdqu 16(%rsi), %xmm1
; X64-SSE2-NEXT: pcmpeqb %xmm0, %xmm1
@@ -880,7 +880,7 @@ define i1 @length32_eq(i8* %x, i8* %y) nounwind optsize {
; X64-SSE2-NEXT: retq
;
; X64-AVX2-LABEL: length32_eq:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0
; X64-AVX2-NEXT: vpcmpeqb (%rsi), %ymm0, %ymm0
; X64-AVX2-NEXT: vpmovmskb %ymm0, %eax
@@ -895,7 +895,7 @@ define i1 @length32_eq(i8* %x, i8* %y) nounwind optsize {
define i1 @length32_eq_const(i8* %X) nounwind optsize {
; X86-NOSSE-LABEL: length32_eq_const:
-; X86-NOSSE: # BB#0:
+; X86-NOSSE: # %bb.0:
; X86-NOSSE-NEXT: pushl $0
; X86-NOSSE-NEXT: pushl $32
; X86-NOSSE-NEXT: pushl $.L.str
@@ -907,14 +907,14 @@ define i1 @length32_eq_const(i8* %X) nounwind optsize {
; X86-NOSSE-NEXT: retl
;
; X86-SSE2-LABEL: length32_eq_const:
-; X86-SSE2: # BB#0:
+; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movdqu (%eax), %xmm0
; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: pmovmskb %xmm0, %ecx
; X86-SSE2-NEXT: cmpl $65535, %ecx # imm = 0xFFFF
; X86-SSE2-NEXT: jne .LBB24_2
-; X86-SSE2-NEXT: # BB#1: # %loadbb1
+; X86-SSE2-NEXT: # %bb.1: # %loadbb1
; X86-SSE2-NEXT: movdqu 16(%eax), %xmm0
; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: pmovmskb %xmm0, %ecx
@@ -930,13 +930,13 @@ define i1 @length32_eq_const(i8* %X) nounwind optsize {
; X86-SSE2-NEXT: retl
;
; X64-SSE2-LABEL: length32_eq_const:
-; X64-SSE2: # BB#0:
+; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: movdqu (%rdi), %xmm0
; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: pmovmskb %xmm0, %eax
; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; X64-SSE2-NEXT: jne .LBB24_2
-; X64-SSE2-NEXT: # BB#1: # %loadbb1
+; X64-SSE2-NEXT: # %bb.1: # %loadbb1
; X64-SSE2-NEXT: movdqu 16(%rdi), %xmm0
; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: pmovmskb %xmm0, %ecx
@@ -951,7 +951,7 @@ define i1 @length32_eq_const(i8* %X) nounwind optsize {
; X64-SSE2-NEXT: retq
;
; X64-AVX2-LABEL: length32_eq_const:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0
; X64-AVX2-NEXT: vpcmpeqb {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX2-NEXT: vpmovmskb %ymm0, %eax
@@ -966,7 +966,7 @@ define i1 @length32_eq_const(i8* %X) nounwind optsize {
define i32 @length64(i8* %X, i8* %Y) nounwind optsize {
; X86-LABEL: length64:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $0
; X86-NEXT: pushl $64
; X86-NEXT: pushl {{[0-9]+}}(%esp)
@@ -976,7 +976,7 @@ define i32 @length64(i8* %X, i8* %Y) nounwind optsize {
; X86-NEXT: retl
;
; X64-LABEL: length64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl $64, %edx
; X64-NEXT: jmp memcmp # TAILCALL
%m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 64) nounwind
@@ -985,7 +985,7 @@ define i32 @length64(i8* %X, i8* %Y) nounwind optsize {
define i1 @length64_eq(i8* %x, i8* %y) nounwind optsize {
; X86-LABEL: length64_eq:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $0
; X86-NEXT: pushl $64
; X86-NEXT: pushl {{[0-9]+}}(%esp)
@@ -997,7 +997,7 @@ define i1 @length64_eq(i8* %x, i8* %y) nounwind optsize {
; X86-NEXT: retl
;
; X64-SSE2-LABEL: length64_eq:
-; X64-SSE2: # BB#0:
+; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pushq %rax
; X64-SSE2-NEXT: movl $64, %edx
; X64-SSE2-NEXT: callq memcmp
@@ -1007,13 +1007,13 @@ define i1 @length64_eq(i8* %x, i8* %y) nounwind optsize {
; X64-SSE2-NEXT: retq
;
; X64-AVX2-LABEL: length64_eq:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0
; X64-AVX2-NEXT: vpcmpeqb (%rsi), %ymm0, %ymm0
; X64-AVX2-NEXT: vpmovmskb %ymm0, %eax
; X64-AVX2-NEXT: cmpl $-1, %eax
; X64-AVX2-NEXT: jne .LBB26_2
-; X64-AVX2-NEXT: # BB#1: # %loadbb1
+; X64-AVX2-NEXT: # %bb.1: # %loadbb1
; X64-AVX2-NEXT: vmovdqu 32(%rdi), %ymm0
; X64-AVX2-NEXT: vpcmpeqb 32(%rsi), %ymm0, %ymm0
; X64-AVX2-NEXT: vpmovmskb %ymm0, %ecx
@@ -1034,7 +1034,7 @@ define i1 @length64_eq(i8* %x, i8* %y) nounwind optsize {
define i1 @length64_eq_const(i8* %X) nounwind optsize {
; X86-LABEL: length64_eq_const:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $0
; X86-NEXT: pushl $64
; X86-NEXT: pushl $.L.str
@@ -1046,7 +1046,7 @@ define i1 @length64_eq_const(i8* %X) nounwind optsize {
; X86-NEXT: retl
;
; X64-SSE2-LABEL: length64_eq_const:
-; X64-SSE2: # BB#0:
+; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pushq %rax
; X64-SSE2-NEXT: movl $.L.str, %esi
; X64-SSE2-NEXT: movl $64, %edx
@@ -1057,13 +1057,13 @@ define i1 @length64_eq_const(i8* %X) nounwind optsize {
; X64-SSE2-NEXT: retq
;
; X64-AVX2-LABEL: length64_eq_const:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0
; X64-AVX2-NEXT: vpcmpeqb {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX2-NEXT: vpmovmskb %ymm0, %eax
; X64-AVX2-NEXT: cmpl $-1, %eax
; X64-AVX2-NEXT: jne .LBB27_2
-; X64-AVX2-NEXT: # BB#1: # %loadbb1
+; X64-AVX2-NEXT: # %bb.1: # %loadbb1
; X64-AVX2-NEXT: vmovdqu 32(%rdi), %ymm0
; X64-AVX2-NEXT: vpcmpeqb {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX2-NEXT: vpmovmskb %ymm0, %ecx
diff --git a/test/CodeGen/X86/memcmp.ll b/test/CodeGen/X86/memcmp.ll
index 84fd45b0a08..ed7f496ee34 100644
--- a/test/CodeGen/X86/memcmp.ll
+++ b/test/CodeGen/X86/memcmp.ll
@@ -15,12 +15,12 @@ declare i32 @memcmp(i8*, i8*, i64)
define i32 @length0(i8* %X, i8* %Y) nounwind {
; X86-LABEL: length0:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: xorl %eax, %eax
; X86-NEXT: retl
;
; X64-LABEL: length0:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: retq
%m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 0) nounwind
@@ -29,12 +29,12 @@ define i32 @length0(i8* %X, i8* %Y) nounwind {
define i1 @length0_eq(i8* %X, i8* %Y) nounwind {
; X86-LABEL: length0_eq:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movb $1, %al
; X86-NEXT: retl
;
; X64-LABEL: length0_eq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movb $1, %al
; X64-NEXT: retq
%m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 0) nounwind
@@ -44,7 +44,7 @@ define i1 @length0_eq(i8* %X, i8* %Y) nounwind {
define i32 @length2(i8* %X, i8* %Y) nounwind {
; X86-LABEL: length2:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movzwl (%ecx), %ecx
@@ -57,7 +57,7 @@ define i32 @length2(i8* %X, i8* %Y) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: length2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movzwl (%rdi), %eax
; X64-NEXT: movzwl (%rsi), %ecx
; X64-NEXT: rolw $8, %ax
@@ -72,7 +72,7 @@ define i32 @length2(i8* %X, i8* %Y) nounwind {
define i1 @length2_eq(i8* %X, i8* %Y) nounwind {
; X86-LABEL: length2_eq:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movzwl (%ecx), %ecx
@@ -81,7 +81,7 @@ define i1 @length2_eq(i8* %X, i8* %Y) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: length2_eq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movzwl (%rdi), %eax
; X64-NEXT: cmpw (%rsi), %ax
; X64-NEXT: sete %al
@@ -93,7 +93,7 @@ define i1 @length2_eq(i8* %X, i8* %Y) nounwind {
define i1 @length2_eq_const(i8* %X) nounwind {
; X86-LABEL: length2_eq_const:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movzwl (%eax), %eax
; X86-NEXT: cmpl $12849, %eax # imm = 0x3231
@@ -101,7 +101,7 @@ define i1 @length2_eq_const(i8* %X) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: length2_eq_const:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movzwl (%rdi), %eax
; X64-NEXT: cmpl $12849, %eax # imm = 0x3231
; X64-NEXT: setne %al
@@ -113,7 +113,7 @@ define i1 @length2_eq_const(i8* %X) nounwind {
define i1 @length2_eq_nobuiltin_attr(i8* %X, i8* %Y) nounwind {
; X86-LABEL: length2_eq_nobuiltin_attr:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $0
; X86-NEXT: pushl $2
; X86-NEXT: pushl {{[0-9]+}}(%esp)
@@ -125,7 +125,7 @@ define i1 @length2_eq_nobuiltin_attr(i8* %X, i8* %Y) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: length2_eq_nobuiltin_attr:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pushq %rax
; X64-NEXT: movl $2, %edx
; X64-NEXT: callq memcmp
@@ -140,7 +140,7 @@ define i1 @length2_eq_nobuiltin_attr(i8* %X, i8* %Y) nounwind {
define i32 @length3(i8* %X, i8* %Y) nounwind {
; X86-LABEL: length3:
-; X86: # BB#0: # %loadbb
+; X86: # %bb.0: # %loadbb
; X86-NEXT: pushl %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -150,7 +150,7 @@ define i32 @length3(i8* %X, i8* %Y) nounwind {
; X86-NEXT: rolw $8, %si
; X86-NEXT: cmpw %si, %dx
; X86-NEXT: jne .LBB6_1
-; X86-NEXT: # BB#2: # %loadbb1
+; X86-NEXT: # %bb.2: # %loadbb1
; X86-NEXT: movzbl 2(%eax), %eax
; X86-NEXT: movzbl 2(%ecx), %ecx
; X86-NEXT: subl %ecx, %eax
@@ -164,14 +164,14 @@ define i32 @length3(i8* %X, i8* %Y) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: length3:
-; X64: # BB#0: # %loadbb
+; X64: # %bb.0: # %loadbb
; X64-NEXT: movzwl (%rdi), %eax
; X64-NEXT: movzwl (%rsi), %ecx
; X64-NEXT: rolw $8, %ax
; X64-NEXT: rolw $8, %cx
; X64-NEXT: cmpw %cx, %ax
; X64-NEXT: jne .LBB6_1
-; X64-NEXT: # BB#2: # %loadbb1
+; X64-NEXT: # %bb.2: # %loadbb1
; X64-NEXT: movzbl 2(%rdi), %eax
; X64-NEXT: movzbl 2(%rsi), %ecx
; X64-NEXT: subl %ecx, %eax
@@ -187,13 +187,13 @@ define i32 @length3(i8* %X, i8* %Y) nounwind {
define i1 @length3_eq(i8* %X, i8* %Y) nounwind {
; X86-LABEL: length3_eq:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movzwl (%ecx), %edx
; X86-NEXT: cmpw (%eax), %dx
; X86-NEXT: jne .LBB7_2
-; X86-NEXT: # BB#1: # %loadbb1
+; X86-NEXT: # %bb.1: # %loadbb1
; X86-NEXT: movb 2(%ecx), %dl
; X86-NEXT: xorl %ecx, %ecx
; X86-NEXT: cmpb 2(%eax), %dl
@@ -206,11 +206,11 @@ define i1 @length3_eq(i8* %X, i8* %Y) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: length3_eq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movzwl (%rdi), %eax
; X64-NEXT: cmpw (%rsi), %ax
; X64-NEXT: jne .LBB7_2
-; X64-NEXT: # BB#1: # %loadbb1
+; X64-NEXT: # %bb.1: # %loadbb1
; X64-NEXT: movb 2(%rdi), %cl
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: cmpb 2(%rsi), %cl
@@ -228,7 +228,7 @@ define i1 @length3_eq(i8* %X, i8* %Y) nounwind {
define i32 @length4(i8* %X, i8* %Y) nounwind {
; X86-LABEL: length4:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl (%ecx), %ecx
@@ -242,7 +242,7 @@ define i32 @length4(i8* %X, i8* %Y) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: length4:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl (%rdi), %ecx
; X64-NEXT: movl (%rsi), %edx
; X64-NEXT: bswapl %ecx
@@ -258,7 +258,7 @@ define i32 @length4(i8* %X, i8* %Y) nounwind {
define i1 @length4_eq(i8* %X, i8* %Y) nounwind {
; X86-LABEL: length4_eq:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl (%ecx), %ecx
@@ -267,7 +267,7 @@ define i1 @length4_eq(i8* %X, i8* %Y) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: length4_eq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl (%rdi), %eax
; X64-NEXT: cmpl (%rsi), %eax
; X64-NEXT: setne %al
@@ -279,14 +279,14 @@ define i1 @length4_eq(i8* %X, i8* %Y) nounwind {
define i1 @length4_eq_const(i8* %X) nounwind {
; X86-LABEL: length4_eq_const:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: cmpl $875770417, (%eax) # imm = 0x34333231
; X86-NEXT: sete %al
; X86-NEXT: retl
;
; X64-LABEL: length4_eq_const:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpl $875770417, (%rdi) # imm = 0x34333231
; X64-NEXT: sete %al
; X64-NEXT: retq
@@ -297,7 +297,7 @@ define i1 @length4_eq_const(i8* %X) nounwind {
define i32 @length5(i8* %X, i8* %Y) nounwind {
; X86-LABEL: length5:
-; X86: # BB#0: # %loadbb
+; X86: # %bb.0: # %loadbb
; X86-NEXT: pushl %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -307,7 +307,7 @@ define i32 @length5(i8* %X, i8* %Y) nounwind {
; X86-NEXT: bswapl %esi
; X86-NEXT: cmpl %esi, %edx
; X86-NEXT: jne .LBB11_1
-; X86-NEXT: # BB#2: # %loadbb1
+; X86-NEXT: # %bb.2: # %loadbb1
; X86-NEXT: movzbl 4(%eax), %eax
; X86-NEXT: movzbl 4(%ecx), %ecx
; X86-NEXT: subl %ecx, %eax
@@ -321,14 +321,14 @@ define i32 @length5(i8* %X, i8* %Y) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: length5:
-; X64: # BB#0: # %loadbb
+; X64: # %bb.0: # %loadbb
; X64-NEXT: movl (%rdi), %eax
; X64-NEXT: movl (%rsi), %ecx
; X64-NEXT: bswapl %eax
; X64-NEXT: bswapl %ecx
; X64-NEXT: cmpl %ecx, %eax
; X64-NEXT: jne .LBB11_1
-; X64-NEXT: # BB#2: # %loadbb1
+; X64-NEXT: # %bb.2: # %loadbb1
; X64-NEXT: movzbl 4(%rdi), %eax
; X64-NEXT: movzbl 4(%rsi), %ecx
; X64-NEXT: subl %ecx, %eax
@@ -344,13 +344,13 @@ define i32 @length5(i8* %X, i8* %Y) nounwind {
define i1 @length5_eq(i8* %X, i8* %Y) nounwind {
; X86-LABEL: length5_eq:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl (%ecx), %edx
; X86-NEXT: cmpl (%eax), %edx
; X86-NEXT: jne .LBB12_2
-; X86-NEXT: # BB#1: # %loadbb1
+; X86-NEXT: # %bb.1: # %loadbb1
; X86-NEXT: movb 4(%ecx), %dl
; X86-NEXT: xorl %ecx, %ecx
; X86-NEXT: cmpb 4(%eax), %dl
@@ -363,11 +363,11 @@ define i1 @length5_eq(i8* %X, i8* %Y) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: length5_eq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl (%rdi), %eax
; X64-NEXT: cmpl (%rsi), %eax
; X64-NEXT: jne .LBB12_2
-; X64-NEXT: # BB#1: # %loadbb1
+; X64-NEXT: # %bb.1: # %loadbb1
; X64-NEXT: movb 4(%rdi), %cl
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: cmpb 4(%rsi), %cl
@@ -385,7 +385,7 @@ define i1 @length5_eq(i8* %X, i8* %Y) nounwind {
define i32 @length8(i8* %X, i8* %Y) nounwind {
; X86-LABEL: length8:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
@@ -395,7 +395,7 @@ define i32 @length8(i8* %X, i8* %Y) nounwind {
; X86-NEXT: bswapl %edx
; X86-NEXT: cmpl %edx, %ecx
; X86-NEXT: jne .LBB13_2
-; X86-NEXT: # BB#1: # %loadbb1
+; X86-NEXT: # %bb.1: # %loadbb1
; X86-NEXT: movl 4(%esi), %ecx
; X86-NEXT: movl 4(%eax), %edx
; X86-NEXT: bswapl %ecx
@@ -413,7 +413,7 @@ define i32 @length8(i8* %X, i8* %Y) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: length8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq (%rdi), %rcx
; X64-NEXT: movq (%rsi), %rdx
; X64-NEXT: bswapq %rcx
@@ -429,13 +429,13 @@ define i32 @length8(i8* %X, i8* %Y) nounwind {
define i1 @length8_eq(i8* %X, i8* %Y) nounwind {
; X86-LABEL: length8_eq:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl (%ecx), %edx
; X86-NEXT: cmpl (%eax), %edx
; X86-NEXT: jne .LBB14_2
-; X86-NEXT: # BB#1: # %loadbb1
+; X86-NEXT: # %bb.1: # %loadbb1
; X86-NEXT: movl 4(%ecx), %edx
; X86-NEXT: xorl %ecx, %ecx
; X86-NEXT: cmpl 4(%eax), %edx
@@ -448,7 +448,7 @@ define i1 @length8_eq(i8* %X, i8* %Y) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: length8_eq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq (%rdi), %rax
; X64-NEXT: cmpq (%rsi), %rax
; X64-NEXT: sete %al
@@ -460,11 +460,11 @@ define i1 @length8_eq(i8* %X, i8* %Y) nounwind {
define i1 @length8_eq_const(i8* %X) nounwind {
; X86-LABEL: length8_eq_const:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: cmpl $858927408, (%ecx) # imm = 0x33323130
; X86-NEXT: jne .LBB15_2
-; X86-NEXT: # BB#1: # %loadbb1
+; X86-NEXT: # %bb.1: # %loadbb1
; X86-NEXT: xorl %eax, %eax
; X86-NEXT: cmpl $926299444, 4(%ecx) # imm = 0x37363534
; X86-NEXT: je .LBB15_3
@@ -476,7 +476,7 @@ define i1 @length8_eq_const(i8* %X) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: length8_eq_const:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movabsq $3978425819141910832, %rax # imm = 0x3736353433323130
; X64-NEXT: cmpq %rax, (%rdi)
; X64-NEXT: setne %al
@@ -488,7 +488,7 @@ define i1 @length8_eq_const(i8* %X) nounwind {
define i1 @length12_eq(i8* %X, i8* %Y) nounwind {
; X86-LABEL: length12_eq:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $0
; X86-NEXT: pushl $12
; X86-NEXT: pushl {{[0-9]+}}(%esp)
@@ -500,11 +500,11 @@ define i1 @length12_eq(i8* %X, i8* %Y) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: length12_eq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq (%rdi), %rax
; X64-NEXT: cmpq (%rsi), %rax
; X64-NEXT: jne .LBB16_2
-; X64-NEXT: # BB#1: # %loadbb1
+; X64-NEXT: # %bb.1: # %loadbb1
; X64-NEXT: movl 8(%rdi), %ecx
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: cmpl 8(%rsi), %ecx
@@ -522,7 +522,7 @@ define i1 @length12_eq(i8* %X, i8* %Y) nounwind {
define i32 @length12(i8* %X, i8* %Y) nounwind {
; X86-LABEL: length12:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $0
; X86-NEXT: pushl $12
; X86-NEXT: pushl {{[0-9]+}}(%esp)
@@ -532,14 +532,14 @@ define i32 @length12(i8* %X, i8* %Y) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: length12:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq (%rdi), %rcx
; X64-NEXT: movq (%rsi), %rdx
; X64-NEXT: bswapq %rcx
; X64-NEXT: bswapq %rdx
; X64-NEXT: cmpq %rdx, %rcx
; X64-NEXT: jne .LBB17_2
-; X64-NEXT: # BB#1: # %loadbb1
+; X64-NEXT: # %bb.1: # %loadbb1
; X64-NEXT: movl 8(%rdi), %ecx
; X64-NEXT: movl 8(%rsi), %edx
; X64-NEXT: bswapl %ecx
@@ -562,7 +562,7 @@ define i32 @length12(i8* %X, i8* %Y) nounwind {
define i32 @length16(i8* %X, i8* %Y) nounwind {
; X86-LABEL: length16:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $0
; X86-NEXT: pushl $16
; X86-NEXT: pushl {{[0-9]+}}(%esp)
@@ -572,14 +572,14 @@ define i32 @length16(i8* %X, i8* %Y) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: length16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq (%rdi), %rcx
; X64-NEXT: movq (%rsi), %rdx
; X64-NEXT: bswapq %rcx
; X64-NEXT: bswapq %rdx
; X64-NEXT: cmpq %rdx, %rcx
; X64-NEXT: jne .LBB18_2
-; X64-NEXT: # BB#1: # %loadbb1
+; X64-NEXT: # %bb.1: # %loadbb1
; X64-NEXT: movq 8(%rdi), %rcx
; X64-NEXT: movq 8(%rsi), %rdx
; X64-NEXT: bswapq %rcx
@@ -600,7 +600,7 @@ define i32 @length16(i8* %X, i8* %Y) nounwind {
define i1 @length16_eq(i8* %x, i8* %y) nounwind {
; X86-NOSSE-LABEL: length16_eq:
-; X86-NOSSE: # BB#0:
+; X86-NOSSE: # %bb.0:
; X86-NOSSE-NEXT: pushl $0
; X86-NOSSE-NEXT: pushl $16
; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp)
@@ -612,7 +612,7 @@ define i1 @length16_eq(i8* %x, i8* %y) nounwind {
; X86-NOSSE-NEXT: retl
;
; X86-SSE1-LABEL: length16_eq:
-; X86-SSE1: # BB#0:
+; X86-SSE1: # %bb.0:
; X86-SSE1-NEXT: pushl $0
; X86-SSE1-NEXT: pushl $16
; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp)
@@ -624,7 +624,7 @@ define i1 @length16_eq(i8* %x, i8* %y) nounwind {
; X86-SSE1-NEXT: retl
;
; X86-SSE2-LABEL: length16_eq:
-; X86-SSE2: # BB#0:
+; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-SSE2-NEXT: movdqu (%ecx), %xmm0
@@ -636,7 +636,7 @@ define i1 @length16_eq(i8* %x, i8* %y) nounwind {
; X86-SSE2-NEXT: retl
;
; X64-SSE2-LABEL: length16_eq:
-; X64-SSE2: # BB#0:
+; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: movdqu (%rdi), %xmm0
; X64-SSE2-NEXT: movdqu (%rsi), %xmm1
; X64-SSE2-NEXT: pcmpeqb %xmm0, %xmm1
@@ -646,7 +646,7 @@ define i1 @length16_eq(i8* %x, i8* %y) nounwind {
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: length16_eq:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovdqu (%rdi), %xmm0
; X64-AVX-NEXT: vpcmpeqb (%rsi), %xmm0, %xmm0
; X64-AVX-NEXT: vpmovmskb %xmm0, %eax
@@ -660,7 +660,7 @@ define i1 @length16_eq(i8* %x, i8* %y) nounwind {
define i1 @length16_eq_const(i8* %X) nounwind {
; X86-NOSSE-LABEL: length16_eq_const:
-; X86-NOSSE: # BB#0:
+; X86-NOSSE: # %bb.0:
; X86-NOSSE-NEXT: pushl $0
; X86-NOSSE-NEXT: pushl $16
; X86-NOSSE-NEXT: pushl $.L.str
@@ -672,7 +672,7 @@ define i1 @length16_eq_const(i8* %X) nounwind {
; X86-NOSSE-NEXT: retl
;
; X86-SSE1-LABEL: length16_eq_const:
-; X86-SSE1: # BB#0:
+; X86-SSE1: # %bb.0:
; X86-SSE1-NEXT: pushl $0
; X86-SSE1-NEXT: pushl $16
; X86-SSE1-NEXT: pushl $.L.str
@@ -684,7 +684,7 @@ define i1 @length16_eq_const(i8* %X) nounwind {
; X86-SSE1-NEXT: retl
;
; X86-SSE2-LABEL: length16_eq_const:
-; X86-SSE2: # BB#0:
+; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movdqu (%eax), %xmm0
; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0
@@ -694,7 +694,7 @@ define i1 @length16_eq_const(i8* %X) nounwind {
; X86-SSE2-NEXT: retl
;
; X64-SSE2-LABEL: length16_eq_const:
-; X64-SSE2: # BB#0:
+; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: movdqu (%rdi), %xmm0
; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: pmovmskb %xmm0, %eax
@@ -703,7 +703,7 @@ define i1 @length16_eq_const(i8* %X) nounwind {
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: length16_eq_const:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovdqu (%rdi), %xmm0
; X64-AVX-NEXT: vpcmpeqb {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpmovmskb %xmm0, %eax
@@ -719,7 +719,7 @@ define i1 @length16_eq_const(i8* %X) nounwind {
define i32 @length24(i8* %X, i8* %Y) nounwind {
; X86-LABEL: length24:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $0
; X86-NEXT: pushl $24
; X86-NEXT: pushl {{[0-9]+}}(%esp)
@@ -729,7 +729,7 @@ define i32 @length24(i8* %X, i8* %Y) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: length24:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl $24, %edx
; X64-NEXT: jmp memcmp # TAILCALL
%m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 24) nounwind
@@ -738,7 +738,7 @@ define i32 @length24(i8* %X, i8* %Y) nounwind {
define i1 @length24_eq(i8* %x, i8* %y) nounwind {
; X86-LABEL: length24_eq:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $0
; X86-NEXT: pushl $24
; X86-NEXT: pushl {{[0-9]+}}(%esp)
@@ -750,14 +750,14 @@ define i1 @length24_eq(i8* %x, i8* %y) nounwind {
; X86-NEXT: retl
;
; X64-SSE2-LABEL: length24_eq:
-; X64-SSE2: # BB#0:
+; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: movdqu (%rdi), %xmm0
; X64-SSE2-NEXT: movdqu (%rsi), %xmm1
; X64-SSE2-NEXT: pcmpeqb %xmm0, %xmm1
; X64-SSE2-NEXT: pmovmskb %xmm1, %eax
; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; X64-SSE2-NEXT: jne .LBB22_2
-; X64-SSE2-NEXT: # BB#1: # %loadbb1
+; X64-SSE2-NEXT: # %bb.1: # %loadbb1
; X64-SSE2-NEXT: movq 16(%rdi), %rcx
; X64-SSE2-NEXT: xorl %eax, %eax
; X64-SSE2-NEXT: cmpq 16(%rsi), %rcx
@@ -770,13 +770,13 @@ define i1 @length24_eq(i8* %x, i8* %y) nounwind {
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: length24_eq:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovdqu (%rdi), %xmm0
; X64-AVX-NEXT: vpcmpeqb (%rsi), %xmm0, %xmm0
; X64-AVX-NEXT: vpmovmskb %xmm0, %eax
; X64-AVX-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; X64-AVX-NEXT: jne .LBB22_2
-; X64-AVX-NEXT: # BB#1: # %loadbb1
+; X64-AVX-NEXT: # %bb.1: # %loadbb1
; X64-AVX-NEXT: movq 16(%rdi), %rcx
; X64-AVX-NEXT: xorl %eax, %eax
; X64-AVX-NEXT: cmpq 16(%rsi), %rcx
@@ -794,7 +794,7 @@ define i1 @length24_eq(i8* %x, i8* %y) nounwind {
define i1 @length24_eq_const(i8* %X) nounwind {
; X86-LABEL: length24_eq_const:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $0
; X86-NEXT: pushl $24
; X86-NEXT: pushl $.L.str
@@ -806,13 +806,13 @@ define i1 @length24_eq_const(i8* %X) nounwind {
; X86-NEXT: retl
;
; X64-SSE2-LABEL: length24_eq_const:
-; X64-SSE2: # BB#0:
+; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: movdqu (%rdi), %xmm0
; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: pmovmskb %xmm0, %eax
; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; X64-SSE2-NEXT: jne .LBB23_2
-; X64-SSE2-NEXT: # BB#1: # %loadbb1
+; X64-SSE2-NEXT: # %bb.1: # %loadbb1
; X64-SSE2-NEXT: xorl %eax, %eax
; X64-SSE2-NEXT: movabsq $3689065127958034230, %rcx # imm = 0x3332313039383736
; X64-SSE2-NEXT: cmpq %rcx, 16(%rdi)
@@ -825,13 +825,13 @@ define i1 @length24_eq_const(i8* %X) nounwind {
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: length24_eq_const:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovdqu (%rdi), %xmm0
; X64-AVX-NEXT: vpcmpeqb {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpmovmskb %xmm0, %eax
; X64-AVX-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; X64-AVX-NEXT: jne .LBB23_2
-; X64-AVX-NEXT: # BB#1: # %loadbb1
+; X64-AVX-NEXT: # %bb.1: # %loadbb1
; X64-AVX-NEXT: xorl %eax, %eax
; X64-AVX-NEXT: movabsq $3689065127958034230, %rcx # imm = 0x3332313039383736
; X64-AVX-NEXT: cmpq %rcx, 16(%rdi)
@@ -849,7 +849,7 @@ define i1 @length24_eq_const(i8* %X) nounwind {
define i32 @length32(i8* %X, i8* %Y) nounwind {
; X86-LABEL: length32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $0
; X86-NEXT: pushl $32
; X86-NEXT: pushl {{[0-9]+}}(%esp)
@@ -859,7 +859,7 @@ define i32 @length32(i8* %X, i8* %Y) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: length32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl $32, %edx
; X64-NEXT: jmp memcmp # TAILCALL
%m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 32) nounwind
@@ -870,7 +870,7 @@ define i32 @length32(i8* %X, i8* %Y) nounwind {
define i1 @length32_eq(i8* %x, i8* %y) nounwind {
; X86-NOSSE-LABEL: length32_eq:
-; X86-NOSSE: # BB#0:
+; X86-NOSSE: # %bb.0:
; X86-NOSSE-NEXT: pushl $0
; X86-NOSSE-NEXT: pushl $32
; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp)
@@ -882,7 +882,7 @@ define i1 @length32_eq(i8* %x, i8* %y) nounwind {
; X86-NOSSE-NEXT: retl
;
; X86-SSE1-LABEL: length32_eq:
-; X86-SSE1: # BB#0:
+; X86-SSE1: # %bb.0:
; X86-SSE1-NEXT: pushl $0
; X86-SSE1-NEXT: pushl $32
; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp)
@@ -894,7 +894,7 @@ define i1 @length32_eq(i8* %x, i8* %y) nounwind {
; X86-SSE1-NEXT: retl
;
; X86-SSE2-LABEL: length32_eq:
-; X86-SSE2: # BB#0:
+; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-SSE2-NEXT: movdqu (%ecx), %xmm0
@@ -903,7 +903,7 @@ define i1 @length32_eq(i8* %x, i8* %y) nounwind {
; X86-SSE2-NEXT: pmovmskb %xmm1, %edx
; X86-SSE2-NEXT: cmpl $65535, %edx # imm = 0xFFFF
; X86-SSE2-NEXT: jne .LBB25_2
-; X86-SSE2-NEXT: # BB#1: # %loadbb1
+; X86-SSE2-NEXT: # %bb.1: # %loadbb1
; X86-SSE2-NEXT: movdqu 16(%ecx), %xmm0
; X86-SSE2-NEXT: movdqu 16(%eax), %xmm1
; X86-SSE2-NEXT: pcmpeqb %xmm0, %xmm1
@@ -919,14 +919,14 @@ define i1 @length32_eq(i8* %x, i8* %y) nounwind {
; X86-SSE2-NEXT: retl
;
; X64-SSE2-LABEL: length32_eq:
-; X64-SSE2: # BB#0:
+; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: movdqu (%rdi), %xmm0
; X64-SSE2-NEXT: movdqu (%rsi), %xmm1
; X64-SSE2-NEXT: pcmpeqb %xmm0, %xmm1
; X64-SSE2-NEXT: pmovmskb %xmm1, %eax
; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; X64-SSE2-NEXT: jne .LBB25_2
-; X64-SSE2-NEXT: # BB#1: # %loadbb1
+; X64-SSE2-NEXT: # %bb.1: # %loadbb1
; X64-SSE2-NEXT: movdqu 16(%rdi), %xmm0
; X64-SSE2-NEXT: movdqu 16(%rsi), %xmm1
; X64-SSE2-NEXT: pcmpeqb %xmm0, %xmm1
@@ -942,13 +942,13 @@ define i1 @length32_eq(i8* %x, i8* %y) nounwind {
; X64-SSE2-NEXT: retq
;
; X64-AVX1-LABEL: length32_eq:
-; X64-AVX1: # BB#0:
+; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vmovdqu (%rdi), %xmm0
; X64-AVX1-NEXT: vpcmpeqb (%rsi), %xmm0, %xmm0
; X64-AVX1-NEXT: vpmovmskb %xmm0, %eax
; X64-AVX1-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; X64-AVX1-NEXT: jne .LBB25_2
-; X64-AVX1-NEXT: # BB#1: # %loadbb1
+; X64-AVX1-NEXT: # %bb.1: # %loadbb1
; X64-AVX1-NEXT: vmovdqu 16(%rdi), %xmm0
; X64-AVX1-NEXT: vpcmpeqb 16(%rsi), %xmm0, %xmm0
; X64-AVX1-NEXT: vpmovmskb %xmm0, %ecx
@@ -963,7 +963,7 @@ define i1 @length32_eq(i8* %x, i8* %y) nounwind {
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: length32_eq:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0
; X64-AVX2-NEXT: vpcmpeqb (%rsi), %ymm0, %ymm0
; X64-AVX2-NEXT: vpmovmskb %ymm0, %eax
@@ -978,7 +978,7 @@ define i1 @length32_eq(i8* %x, i8* %y) nounwind {
define i1 @length32_eq_const(i8* %X) nounwind {
; X86-NOSSE-LABEL: length32_eq_const:
-; X86-NOSSE: # BB#0:
+; X86-NOSSE: # %bb.0:
; X86-NOSSE-NEXT: pushl $0
; X86-NOSSE-NEXT: pushl $32
; X86-NOSSE-NEXT: pushl $.L.str
@@ -990,7 +990,7 @@ define i1 @length32_eq_const(i8* %X) nounwind {
; X86-NOSSE-NEXT: retl
;
; X86-SSE1-LABEL: length32_eq_const:
-; X86-SSE1: # BB#0:
+; X86-SSE1: # %bb.0:
; X86-SSE1-NEXT: pushl $0
; X86-SSE1-NEXT: pushl $32
; X86-SSE1-NEXT: pushl $.L.str
@@ -1002,14 +1002,14 @@ define i1 @length32_eq_const(i8* %X) nounwind {
; X86-SSE1-NEXT: retl
;
; X86-SSE2-LABEL: length32_eq_const:
-; X86-SSE2: # BB#0:
+; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movdqu (%eax), %xmm0
; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: pmovmskb %xmm0, %ecx
; X86-SSE2-NEXT: cmpl $65535, %ecx # imm = 0xFFFF
; X86-SSE2-NEXT: jne .LBB26_2
-; X86-SSE2-NEXT: # BB#1: # %loadbb1
+; X86-SSE2-NEXT: # %bb.1: # %loadbb1
; X86-SSE2-NEXT: movdqu 16(%eax), %xmm0
; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: pmovmskb %xmm0, %ecx
@@ -1024,13 +1024,13 @@ define i1 @length32_eq_const(i8* %X) nounwind {
; X86-SSE2-NEXT: retl
;
; X64-SSE2-LABEL: length32_eq_const:
-; X64-SSE2: # BB#0:
+; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: movdqu (%rdi), %xmm0
; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: pmovmskb %xmm0, %eax
; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; X64-SSE2-NEXT: jne .LBB26_2
-; X64-SSE2-NEXT: # BB#1: # %loadbb1
+; X64-SSE2-NEXT: # %bb.1: # %loadbb1
; X64-SSE2-NEXT: movdqu 16(%rdi), %xmm0
; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm0
; X64-SSE2-NEXT: pmovmskb %xmm0, %ecx
@@ -1045,13 +1045,13 @@ define i1 @length32_eq_const(i8* %X) nounwind {
; X64-SSE2-NEXT: retq
;
; X64-AVX1-LABEL: length32_eq_const:
-; X64-AVX1: # BB#0:
+; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vmovdqu (%rdi), %xmm0
; X64-AVX1-NEXT: vpcmpeqb {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX1-NEXT: vpmovmskb %xmm0, %eax
; X64-AVX1-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; X64-AVX1-NEXT: jne .LBB26_2
-; X64-AVX1-NEXT: # BB#1: # %loadbb1
+; X64-AVX1-NEXT: # %bb.1: # %loadbb1
; X64-AVX1-NEXT: vmovdqu 16(%rdi), %xmm0
; X64-AVX1-NEXT: vpcmpeqb {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX1-NEXT: vpmovmskb %xmm0, %ecx
@@ -1066,7 +1066,7 @@ define i1 @length32_eq_const(i8* %X) nounwind {
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: length32_eq_const:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0
; X64-AVX2-NEXT: vpcmpeqb {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX2-NEXT: vpmovmskb %ymm0, %eax
@@ -1081,7 +1081,7 @@ define i1 @length32_eq_const(i8* %X) nounwind {
define i32 @length64(i8* %X, i8* %Y) nounwind {
; X86-LABEL: length64:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $0
; X86-NEXT: pushl $64
; X86-NEXT: pushl {{[0-9]+}}(%esp)
@@ -1091,7 +1091,7 @@ define i32 @length64(i8* %X, i8* %Y) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: length64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl $64, %edx
; X64-NEXT: jmp memcmp # TAILCALL
%m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 64) nounwind
@@ -1100,7 +1100,7 @@ define i32 @length64(i8* %X, i8* %Y) nounwind {
define i1 @length64_eq(i8* %x, i8* %y) nounwind {
; X86-LABEL: length64_eq:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $0
; X86-NEXT: pushl $64
; X86-NEXT: pushl {{[0-9]+}}(%esp)
@@ -1112,7 +1112,7 @@ define i1 @length64_eq(i8* %x, i8* %y) nounwind {
; X86-NEXT: retl
;
; X64-SSE2-LABEL: length64_eq:
-; X64-SSE2: # BB#0:
+; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pushq %rax
; X64-SSE2-NEXT: movl $64, %edx
; X64-SSE2-NEXT: callq memcmp
@@ -1122,7 +1122,7 @@ define i1 @length64_eq(i8* %x, i8* %y) nounwind {
; X64-SSE2-NEXT: retq
;
; X64-AVX1-LABEL: length64_eq:
-; X64-AVX1: # BB#0:
+; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: pushq %rax
; X64-AVX1-NEXT: movl $64, %edx
; X64-AVX1-NEXT: callq memcmp
@@ -1132,13 +1132,13 @@ define i1 @length64_eq(i8* %x, i8* %y) nounwind {
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: length64_eq:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0
; X64-AVX2-NEXT: vpcmpeqb (%rsi), %ymm0, %ymm0
; X64-AVX2-NEXT: vpmovmskb %ymm0, %eax
; X64-AVX2-NEXT: cmpl $-1, %eax
; X64-AVX2-NEXT: jne .LBB28_2
-; X64-AVX2-NEXT: # BB#1: # %loadbb1
+; X64-AVX2-NEXT: # %bb.1: # %loadbb1
; X64-AVX2-NEXT: vmovdqu 32(%rdi), %ymm0
; X64-AVX2-NEXT: vpcmpeqb 32(%rsi), %ymm0, %ymm0
; X64-AVX2-NEXT: vpmovmskb %ymm0, %ecx
@@ -1159,7 +1159,7 @@ define i1 @length64_eq(i8* %x, i8* %y) nounwind {
define i1 @length64_eq_const(i8* %X) nounwind {
; X86-LABEL: length64_eq_const:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $0
; X86-NEXT: pushl $64
; X86-NEXT: pushl $.L.str
@@ -1171,7 +1171,7 @@ define i1 @length64_eq_const(i8* %X) nounwind {
; X86-NEXT: retl
;
; X64-SSE2-LABEL: length64_eq_const:
-; X64-SSE2: # BB#0:
+; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pushq %rax
; X64-SSE2-NEXT: movl $.L.str, %esi
; X64-SSE2-NEXT: movl $64, %edx
@@ -1182,7 +1182,7 @@ define i1 @length64_eq_const(i8* %X) nounwind {
; X64-SSE2-NEXT: retq
;
; X64-AVX1-LABEL: length64_eq_const:
-; X64-AVX1: # BB#0:
+; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: pushq %rax
; X64-AVX1-NEXT: movl $.L.str, %esi
; X64-AVX1-NEXT: movl $64, %edx
@@ -1193,13 +1193,13 @@ define i1 @length64_eq_const(i8* %X) nounwind {
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: length64_eq_const:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0
; X64-AVX2-NEXT: vpcmpeqb {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX2-NEXT: vpmovmskb %ymm0, %eax
; X64-AVX2-NEXT: cmpl $-1, %eax
; X64-AVX2-NEXT: jne .LBB29_2
-; X64-AVX2-NEXT: # BB#1: # %loadbb1
+; X64-AVX2-NEXT: # %bb.1: # %loadbb1
; X64-AVX2-NEXT: vmovdqu 32(%rdi), %ymm0
; X64-AVX2-NEXT: vpcmpeqb {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX2-NEXT: vpmovmskb %ymm0, %ecx
@@ -1221,7 +1221,7 @@ define i1 @length64_eq_const(i8* %X) nounwind {
; This checks that we do not do stupid things with huge sizes.
define i32 @huge_length(i8* %X, i8* %Y) nounwind {
; X86-LABEL: huge_length:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl $2147483647 # imm = 0x7FFFFFFF
; X86-NEXT: pushl $-1
; X86-NEXT: pushl {{[0-9]+}}(%esp)
@@ -1231,7 +1231,7 @@ define i32 @huge_length(i8* %X, i8* %Y) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: huge_length:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movabsq $9223372036854775807, %rdx # imm = 0x7FFFFFFFFFFFFFFF
; X64-NEXT: jmp memcmp # TAILCALL
%m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 9223372036854775807) nounwind
diff --git a/test/CodeGen/X86/memset-2.ll b/test/CodeGen/X86/memset-2.ll
index 1ac972048f1..e94432884b1 100644
--- a/test/CodeGen/X86/memset-2.ll
+++ b/test/CodeGen/X86/memset-2.ll
@@ -3,7 +3,7 @@
define fastcc void @t1() nounwind {
; CHECK-LABEL: t1:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: subl $16, %esp
; CHECK-NEXT: pushl $188
; CHECK-NEXT: pushl $0
@@ -17,7 +17,7 @@ entry:
define fastcc void @t2(i8 signext %c) nounwind {
; CHECK-LABEL: t2:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: subl $12, %esp
; CHECK-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; CHECK-NEXT: movl $76, {{[0-9]+}}(%esp)
@@ -31,7 +31,7 @@ declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) nounwind
define void @t3(i8* nocapture %s, i8 %a) nounwind {
; CHECK-LABEL: t3:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: imull $16843009, %ecx, %ecx ## imm = 0x1010101
@@ -45,7 +45,7 @@ entry:
define void @t4(i8* nocapture %s, i8 %a) nounwind {
; CHECK-LABEL: t4:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: imull $16843009, %ecx, %ecx ## imm = 0x1010101
diff --git a/test/CodeGen/X86/memset-nonzero.ll b/test/CodeGen/X86/memset-nonzero.ll
index f0a957c9417..1c97e8c768c 100644
--- a/test/CodeGen/X86/memset-nonzero.ll
+++ b/test/CodeGen/X86/memset-nonzero.ll
@@ -9,20 +9,20 @@
define void @memset_16_nonzero_bytes(i8* %x) {
; SSE-LABEL: memset_16_nonzero_bytes:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movabsq $3038287259199220266, %rax # imm = 0x2A2A2A2A2A2A2A2A
; SSE-NEXT: movq %rax, 8(%rdi)
; SSE-NEXT: movq %rax, (%rdi)
; SSE-NEXT: retq
;
; SSE2FAST-LABEL: memset_16_nonzero_bytes:
-; SSE2FAST: # BB#0:
+; SSE2FAST: # %bb.0:
; SSE2FAST-NEXT: movaps {{.*#+}} xmm0 = [42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42]
; SSE2FAST-NEXT: movups %xmm0, (%rdi)
; SSE2FAST-NEXT: retq
;
; AVX-LABEL: memset_16_nonzero_bytes:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42]
; AVX-NEXT: vmovups %xmm0, (%rdi)
; AVX-NEXT: retq
@@ -32,7 +32,7 @@ define void @memset_16_nonzero_bytes(i8* %x) {
define void @memset_32_nonzero_bytes(i8* %x) {
; SSE-LABEL: memset_32_nonzero_bytes:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movabsq $3038287259199220266, %rax # imm = 0x2A2A2A2A2A2A2A2A
; SSE-NEXT: movq %rax, 24(%rdi)
; SSE-NEXT: movq %rax, 16(%rdi)
@@ -41,14 +41,14 @@ define void @memset_32_nonzero_bytes(i8* %x) {
; SSE-NEXT: retq
;
; SSE2FAST-LABEL: memset_32_nonzero_bytes:
-; SSE2FAST: # BB#0:
+; SSE2FAST: # %bb.0:
; SSE2FAST-NEXT: movaps {{.*#+}} xmm0 = [42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42]
; SSE2FAST-NEXT: movups %xmm0, 16(%rdi)
; SSE2FAST-NEXT: movups %xmm0, (%rdi)
; SSE2FAST-NEXT: retq
;
; AVX-LABEL: memset_32_nonzero_bytes:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42]
; AVX-NEXT: vmovups %ymm0, (%rdi)
; AVX-NEXT: vzeroupper
@@ -59,7 +59,7 @@ define void @memset_32_nonzero_bytes(i8* %x) {
define void @memset_64_nonzero_bytes(i8* %x) {
; SSE-LABEL: memset_64_nonzero_bytes:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movabsq $3038287259199220266, %rax # imm = 0x2A2A2A2A2A2A2A2A
; SSE-NEXT: movq %rax, 56(%rdi)
; SSE-NEXT: movq %rax, 48(%rdi)
@@ -72,7 +72,7 @@ define void @memset_64_nonzero_bytes(i8* %x) {
; SSE-NEXT: retq
;
; SSE2FAST-LABEL: memset_64_nonzero_bytes:
-; SSE2FAST: # BB#0:
+; SSE2FAST: # %bb.0:
; SSE2FAST-NEXT: movaps {{.*#+}} xmm0 = [42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42]
; SSE2FAST-NEXT: movups %xmm0, 48(%rdi)
; SSE2FAST-NEXT: movups %xmm0, 32(%rdi)
@@ -81,7 +81,7 @@ define void @memset_64_nonzero_bytes(i8* %x) {
; SSE2FAST-NEXT: retq
;
; AVX-LABEL: memset_64_nonzero_bytes:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42]
; AVX-NEXT: vmovups %ymm0, 32(%rdi)
; AVX-NEXT: vmovups %ymm0, (%rdi)
@@ -93,7 +93,7 @@ define void @memset_64_nonzero_bytes(i8* %x) {
define void @memset_128_nonzero_bytes(i8* %x) {
; SSE-LABEL: memset_128_nonzero_bytes:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movabsq $3038287259199220266, %rax # imm = 0x2A2A2A2A2A2A2A2A
; SSE-NEXT: movq %rax, 120(%rdi)
; SSE-NEXT: movq %rax, 112(%rdi)
@@ -114,7 +114,7 @@ define void @memset_128_nonzero_bytes(i8* %x) {
; SSE-NEXT: retq
;
; SSE2FAST-LABEL: memset_128_nonzero_bytes:
-; SSE2FAST: # BB#0:
+; SSE2FAST: # %bb.0:
; SSE2FAST-NEXT: movaps {{.*#+}} xmm0 = [42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42]
; SSE2FAST-NEXT: movups %xmm0, 112(%rdi)
; SSE2FAST-NEXT: movups %xmm0, 96(%rdi)
@@ -127,7 +127,7 @@ define void @memset_128_nonzero_bytes(i8* %x) {
; SSE2FAST-NEXT: retq
;
; AVX-LABEL: memset_128_nonzero_bytes:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42]
; AVX-NEXT: vmovups %ymm0, 96(%rdi)
; AVX-NEXT: vmovups %ymm0, 64(%rdi)
@@ -141,7 +141,7 @@ define void @memset_128_nonzero_bytes(i8* %x) {
define void @memset_256_nonzero_bytes(i8* %x) {
; SSE-LABEL: memset_256_nonzero_bytes:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pushq %rax
; SSE-NEXT: .cfi_def_cfa_offset 16
; SSE-NEXT: movl $42, %esi
@@ -151,7 +151,7 @@ define void @memset_256_nonzero_bytes(i8* %x) {
; SSE-NEXT: retq
;
; SSE2FAST-LABEL: memset_256_nonzero_bytes:
-; SSE2FAST: # BB#0:
+; SSE2FAST: # %bb.0:
; SSE2FAST-NEXT: movaps {{.*#+}} xmm0 = [42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42]
; SSE2FAST-NEXT: movups %xmm0, 240(%rdi)
; SSE2FAST-NEXT: movups %xmm0, 224(%rdi)
@@ -172,7 +172,7 @@ define void @memset_256_nonzero_bytes(i8* %x) {
; SSE2FAST-NEXT: retq
;
; AVX-LABEL: memset_256_nonzero_bytes:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42]
; AVX-NEXT: vmovups %ymm0, 224(%rdi)
; AVX-NEXT: vmovups %ymm0, 192(%rdi)
@@ -194,7 +194,7 @@ declare i8* @__memset_chk(i8*, i32, i64, i64)
define void @memset_16_nonconst_bytes(i8* %x, i8 %c) {
; SSE-LABEL: memset_16_nonconst_bytes:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movzbl %sil, %eax
; SSE-NEXT: movabsq $72340172838076673, %rcx # imm = 0x101010101010101
; SSE-NEXT: imulq %rax, %rcx
@@ -203,7 +203,7 @@ define void @memset_16_nonconst_bytes(i8* %x, i8 %c) {
; SSE-NEXT: retq
;
; SSE2FAST-LABEL: memset_16_nonconst_bytes:
-; SSE2FAST: # BB#0:
+; SSE2FAST: # %bb.0:
; SSE2FAST-NEXT: movd %esi, %xmm0
; SSE2FAST-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2FAST-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
@@ -212,7 +212,7 @@ define void @memset_16_nonconst_bytes(i8* %x, i8 %c) {
; SSE2FAST-NEXT: retq
;
; AVX1-LABEL: memset_16_nonconst_bytes:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %esi, %xmm0
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
@@ -220,7 +220,7 @@ define void @memset_16_nonconst_bytes(i8* %x, i8 %c) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: memset_16_nonconst_bytes:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %esi, %xmm0
; AVX2-NEXT: vpbroadcastb %xmm0, %xmm0
; AVX2-NEXT: vmovdqu %xmm0, (%rdi)
@@ -231,7 +231,7 @@ define void @memset_16_nonconst_bytes(i8* %x, i8 %c) {
define void @memset_32_nonconst_bytes(i8* %x, i8 %c) {
; SSE-LABEL: memset_32_nonconst_bytes:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movzbl %sil, %eax
; SSE-NEXT: movabsq $72340172838076673, %rcx # imm = 0x101010101010101
; SSE-NEXT: imulq %rax, %rcx
@@ -242,7 +242,7 @@ define void @memset_32_nonconst_bytes(i8* %x, i8 %c) {
; SSE-NEXT: retq
;
; SSE2FAST-LABEL: memset_32_nonconst_bytes:
-; SSE2FAST: # BB#0:
+; SSE2FAST: # %bb.0:
; SSE2FAST-NEXT: movd %esi, %xmm0
; SSE2FAST-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2FAST-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
@@ -252,7 +252,7 @@ define void @memset_32_nonconst_bytes(i8* %x, i8 %c) {
; SSE2FAST-NEXT: retq
;
; AVX1-LABEL: memset_32_nonconst_bytes:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %esi, %xmm0
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
@@ -262,7 +262,7 @@ define void @memset_32_nonconst_bytes(i8* %x, i8 %c) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: memset_32_nonconst_bytes:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %esi, %xmm0
; AVX2-NEXT: vpbroadcastb %xmm0, %ymm0
; AVX2-NEXT: vmovdqu %ymm0, (%rdi)
@@ -274,7 +274,7 @@ define void @memset_32_nonconst_bytes(i8* %x, i8 %c) {
define void @memset_64_nonconst_bytes(i8* %x, i8 %c) {
; SSE-LABEL: memset_64_nonconst_bytes:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movzbl %sil, %eax
; SSE-NEXT: movabsq $72340172838076673, %rcx # imm = 0x101010101010101
; SSE-NEXT: imulq %rax, %rcx
@@ -289,7 +289,7 @@ define void @memset_64_nonconst_bytes(i8* %x, i8 %c) {
; SSE-NEXT: retq
;
; SSE2FAST-LABEL: memset_64_nonconst_bytes:
-; SSE2FAST: # BB#0:
+; SSE2FAST: # %bb.0:
; SSE2FAST-NEXT: movd %esi, %xmm0
; SSE2FAST-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2FAST-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
@@ -301,7 +301,7 @@ define void @memset_64_nonconst_bytes(i8* %x, i8 %c) {
; SSE2FAST-NEXT: retq
;
; AVX1-LABEL: memset_64_nonconst_bytes:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %esi, %xmm0
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
@@ -312,7 +312,7 @@ define void @memset_64_nonconst_bytes(i8* %x, i8 %c) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: memset_64_nonconst_bytes:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %esi, %xmm0
; AVX2-NEXT: vpbroadcastb %xmm0, %ymm0
; AVX2-NEXT: vmovdqu %ymm0, 32(%rdi)
@@ -325,7 +325,7 @@ define void @memset_64_nonconst_bytes(i8* %x, i8 %c) {
define void @memset_128_nonconst_bytes(i8* %x, i8 %c) {
; SSE-LABEL: memset_128_nonconst_bytes:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movzbl %sil, %eax
; SSE-NEXT: movabsq $72340172838076673, %rcx # imm = 0x101010101010101
; SSE-NEXT: imulq %rax, %rcx
@@ -348,7 +348,7 @@ define void @memset_128_nonconst_bytes(i8* %x, i8 %c) {
; SSE-NEXT: retq
;
; SSE2FAST-LABEL: memset_128_nonconst_bytes:
-; SSE2FAST: # BB#0:
+; SSE2FAST: # %bb.0:
; SSE2FAST-NEXT: movd %esi, %xmm0
; SSE2FAST-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2FAST-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
@@ -364,7 +364,7 @@ define void @memset_128_nonconst_bytes(i8* %x, i8 %c) {
; SSE2FAST-NEXT: retq
;
; AVX1-LABEL: memset_128_nonconst_bytes:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %esi, %xmm0
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
@@ -377,7 +377,7 @@ define void @memset_128_nonconst_bytes(i8* %x, i8 %c) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: memset_128_nonconst_bytes:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %esi, %xmm0
; AVX2-NEXT: vpbroadcastb %xmm0, %ymm0
; AVX2-NEXT: vmovdqu %ymm0, 96(%rdi)
@@ -392,12 +392,12 @@ define void @memset_128_nonconst_bytes(i8* %x, i8 %c) {
define void @memset_256_nonconst_bytes(i8* %x, i8 %c) {
; SSE-LABEL: memset_256_nonconst_bytes:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movl $256, %edx # imm = 0x100
; SSE-NEXT: jmp memset # TAILCALL
;
; SSE2FAST-LABEL: memset_256_nonconst_bytes:
-; SSE2FAST: # BB#0:
+; SSE2FAST: # %bb.0:
; SSE2FAST-NEXT: movd %esi, %xmm0
; SSE2FAST-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2FAST-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
@@ -421,7 +421,7 @@ define void @memset_256_nonconst_bytes(i8* %x, i8 %c) {
; SSE2FAST-NEXT: retq
;
; AVX1-LABEL: memset_256_nonconst_bytes:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %esi, %xmm0
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
@@ -438,7 +438,7 @@ define void @memset_256_nonconst_bytes(i8* %x, i8 %c) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: memset_256_nonconst_bytes:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %esi, %xmm0
; AVX2-NEXT: vpbroadcastb %xmm0, %ymm0
; AVX2-NEXT: vmovdqu %ymm0, 224(%rdi)
diff --git a/test/CodeGen/X86/memset.ll b/test/CodeGen/X86/memset.ll
index 21cf30d35ec..c9d8fbd58aa 100644
--- a/test/CodeGen/X86/memset.ll
+++ b/test/CodeGen/X86/memset.ll
@@ -7,7 +7,7 @@
define void @t() nounwind {
; X86-LABEL: t:
-; X86: ## BB#0: ## %entry
+; X86: ## %bb.0: ## %entry
; X86-NEXT: subl $44, %esp
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
@@ -25,7 +25,7 @@ define void @t() nounwind {
; X86-NEXT: ## -- End function
;
; XMM-LABEL: t:
-; XMM: ## BB#0: ## %entry
+; XMM: ## %bb.0: ## %entry
; XMM-NEXT: subl $60, %esp
; XMM-NEXT: xorps %xmm0, %xmm0
; XMM-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
@@ -38,7 +38,7 @@ define void @t() nounwind {
; XMM-NEXT: ## -- End function
;
; YMM-LABEL: t:
-; YMM: ## BB#0: ## %entry
+; YMM: ## %bb.0: ## %entry
; YMM-NEXT: pushl %ebp
; YMM-NEXT: movl %esp, %ebp
; YMM-NEXT: andl $-32, %esp
@@ -71,7 +71,7 @@ declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
; unaligned loads and stores.
define void @PR15348(i8* %a) {
; X86-LABEL: PR15348:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movb $0, 16(%eax)
; X86-NEXT: movl $0, 12(%eax)
@@ -81,7 +81,7 @@ define void @PR15348(i8* %a) {
; X86-NEXT: retl
;
; XMM-LABEL: PR15348:
-; XMM: ## BB#0:
+; XMM: ## %bb.0:
; XMM-NEXT: movl {{[0-9]+}}(%esp), %eax
; XMM-NEXT: movb $0, 16(%eax)
; XMM-NEXT: movl $0, 12(%eax)
@@ -91,7 +91,7 @@ define void @PR15348(i8* %a) {
; XMM-NEXT: retl
;
; YMM-LABEL: PR15348:
-; YMM: ## BB#0:
+; YMM: ## %bb.0:
; YMM-NEXT: movl {{[0-9]+}}(%esp), %eax
; YMM-NEXT: vxorps %xmm0, %xmm0, %xmm0
; YMM-NEXT: vmovups %xmm0, (%eax)
diff --git a/test/CodeGen/X86/memset64-on-x86-32.ll b/test/CodeGen/X86/memset64-on-x86-32.ll
index a7a3c61b139..0fc21920409 100644
--- a/test/CodeGen/X86/memset64-on-x86-32.ll
+++ b/test/CodeGen/X86/memset64-on-x86-32.ll
@@ -5,7 +5,7 @@
define void @bork() nounwind {
; FAST-LABEL: bork:
-; FAST: # BB#0:
+; FAST: # %bb.0:
; FAST-NEXT: xorps %xmm0, %xmm0
; FAST-NEXT: movups %xmm0, 64
; FAST-NEXT: movups %xmm0, 48
@@ -15,7 +15,7 @@ define void @bork() nounwind {
; FAST-NEXT: retl
;
; SLOW_32-LABEL: bork:
-; SLOW_32: # BB#0:
+; SLOW_32: # %bb.0:
; SLOW_32-NEXT: movl $0, 4
; SLOW_32-NEXT: movl $0, 0
; SLOW_32-NEXT: movl $0, 12
@@ -39,7 +39,7 @@ define void @bork() nounwind {
; SLOW_32-NEXT: retl
;
; SLOW_64-LABEL: bork:
-; SLOW_64: # BB#0:
+; SLOW_64: # %bb.0:
; SLOW_64-NEXT: movq $0, 72
; SLOW_64-NEXT: movq $0, 64
; SLOW_64-NEXT: movq $0, 56
diff --git a/test/CodeGen/X86/merge-consecutive-loads-128.ll b/test/CodeGen/X86/merge-consecutive-loads-128.ll
index 38bb07da229..8c96b2bec8a 100644
--- a/test/CodeGen/X86/merge-consecutive-loads-128.ll
+++ b/test/CodeGen/X86/merge-consecutive-loads-128.ll
@@ -11,17 +11,17 @@
define <2 x double> @merge_2f64_f64_23(double* %ptr) nounwind uwtable noinline ssp {
; SSE-LABEL: merge_2f64_f64_23:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movups 16(%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: merge_2f64_f64_23:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovups 16(%rdi), %xmm0
; AVX-NEXT: retq
;
; X32-SSE1-LABEL: merge_2f64_f64_23:
-; X32-SSE1: # BB#0:
+; X32-SSE1: # %bb.0:
; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE1-NEXT: fldl 16(%eax)
; X32-SSE1-NEXT: fldl 24(%eax)
@@ -29,7 +29,7 @@ define <2 x double> @merge_2f64_f64_23(double* %ptr) nounwind uwtable noinline s
; X32-SSE1-NEXT: retl
;
; X32-SSE41-LABEL: merge_2f64_f64_23:
-; X32-SSE41: # BB#0:
+; X32-SSE41: # %bb.0:
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: movups 16(%eax), %xmm0
; X32-SSE41-NEXT: retl
@@ -44,17 +44,17 @@ define <2 x double> @merge_2f64_f64_23(double* %ptr) nounwind uwtable noinline s
define <2 x i64> @merge_2i64_i64_12(i64* %ptr) nounwind uwtable noinline ssp {
; SSE-LABEL: merge_2i64_i64_12:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movups 8(%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: merge_2i64_i64_12:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovups 8(%rdi), %xmm0
; AVX-NEXT: retq
;
; X32-SSE1-LABEL: merge_2i64_i64_12:
-; X32-SSE1: # BB#0:
+; X32-SSE1: # %bb.0:
; X32-SSE1-NEXT: pushl %edi
; X32-SSE1-NEXT: .cfi_def_cfa_offset 8
; X32-SSE1-NEXT: pushl %esi
@@ -76,7 +76,7 @@ define <2 x i64> @merge_2i64_i64_12(i64* %ptr) nounwind uwtable noinline ssp {
; X32-SSE1-NEXT: retl $4
;
; X32-SSE41-LABEL: merge_2i64_i64_12:
-; X32-SSE41: # BB#0:
+; X32-SSE41: # %bb.0:
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: movups 8(%eax), %xmm0
; X32-SSE41-NEXT: retl
@@ -91,17 +91,17 @@ define <2 x i64> @merge_2i64_i64_12(i64* %ptr) nounwind uwtable noinline ssp {
define <4 x float> @merge_4f32_f32_2345(float* %ptr) nounwind uwtable noinline ssp {
; SSE-LABEL: merge_4f32_f32_2345:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movups 8(%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: merge_4f32_f32_2345:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovups 8(%rdi), %xmm0
; AVX-NEXT: retq
;
; X32-SSE-LABEL: merge_4f32_f32_2345:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE-NEXT: movups 8(%eax), %xmm0
; X32-SSE-NEXT: retl
@@ -122,17 +122,17 @@ define <4 x float> @merge_4f32_f32_2345(float* %ptr) nounwind uwtable noinline s
define <4 x float> @merge_4f32_f32_3zuu(float* %ptr) nounwind uwtable noinline ssp {
; SSE-LABEL: merge_4f32_f32_3zuu:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: retq
;
; AVX-LABEL: merge_4f32_f32_3zuu:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: retq
;
; X32-SSE-LABEL: merge_4f32_f32_3zuu:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-SSE-NEXT: retl
@@ -145,17 +145,17 @@ define <4 x float> @merge_4f32_f32_3zuu(float* %ptr) nounwind uwtable noinline s
define <4 x float> @merge_4f32_f32_34uu(float* %ptr) nounwind uwtable noinline ssp {
; SSE-LABEL: merge_4f32_f32_34uu:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: retq
;
; AVX-LABEL: merge_4f32_f32_34uu:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: retq
;
; X32-SSE1-LABEL: merge_4f32_f32_34uu:
-; X32-SSE1: # BB#0:
+; X32-SSE1: # %bb.0:
; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-SSE1-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -163,7 +163,7 @@ define <4 x float> @merge_4f32_f32_34uu(float* %ptr) nounwind uwtable noinline s
; X32-SSE1-NEXT: retl
;
; X32-SSE41-LABEL: merge_4f32_f32_34uu:
-; X32-SSE41: # BB#0:
+; X32-SSE41: # %bb.0:
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X32-SSE41-NEXT: retl
@@ -178,7 +178,7 @@ define <4 x float> @merge_4f32_f32_34uu(float* %ptr) nounwind uwtable noinline s
define <4 x float> @merge_4f32_f32_34z6(float* %ptr) nounwind uwtable noinline ssp {
; SSE2-LABEL: merge_4f32_f32_34z6:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movups 12(%rdi), %xmm0
; SSE2-NEXT: xorps %xmm1, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[3,0]
@@ -186,20 +186,20 @@ define <4 x float> @merge_4f32_f32_34z6(float* %ptr) nounwind uwtable noinline s
; SSE2-NEXT: retq
;
; SSE41-LABEL: merge_4f32_f32_34z6:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movups 12(%rdi), %xmm1
; SSE41-NEXT: xorps %xmm0, %xmm0
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3]
; SSE41-NEXT: retq
;
; AVX-LABEL: merge_4f32_f32_34z6:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: vblendps {{.*#+}} xmm0 = mem[0,1],xmm0[2],mem[3]
; AVX-NEXT: retq
;
; X32-SSE1-LABEL: merge_4f32_f32_34z6:
-; X32-SSE1: # BB#0:
+; X32-SSE1: # %bb.0:
; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE1-NEXT: movups 12(%eax), %xmm0
; X32-SSE1-NEXT: xorps %xmm1, %xmm1
@@ -208,7 +208,7 @@ define <4 x float> @merge_4f32_f32_34z6(float* %ptr) nounwind uwtable noinline s
; X32-SSE1-NEXT: retl
;
; X32-SSE41-LABEL: merge_4f32_f32_34z6:
-; X32-SSE41: # BB#0:
+; X32-SSE41: # %bb.0:
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: movups 12(%eax), %xmm1
; X32-SSE41-NEXT: xorps %xmm0, %xmm0
@@ -228,17 +228,17 @@ define <4 x float> @merge_4f32_f32_34z6(float* %ptr) nounwind uwtable noinline s
define <4 x float> @merge_4f32_f32_45zz(float* %ptr) nounwind uwtable noinline ssp {
; SSE-LABEL: merge_4f32_f32_45zz:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: retq
;
; AVX-LABEL: merge_4f32_f32_45zz:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: retq
;
; X32-SSE1-LABEL: merge_4f32_f32_45zz:
-; X32-SSE1: # BB#0:
+; X32-SSE1: # %bb.0:
; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-SSE1-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -248,7 +248,7 @@ define <4 x float> @merge_4f32_f32_45zz(float* %ptr) nounwind uwtable noinline s
; X32-SSE1-NEXT: retl
;
; X32-SSE41-LABEL: merge_4f32_f32_45zz:
-; X32-SSE41: # BB#0:
+; X32-SSE41: # %bb.0:
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X32-SSE41-NEXT: retl
@@ -263,26 +263,26 @@ define <4 x float> @merge_4f32_f32_45zz(float* %ptr) nounwind uwtable noinline s
define <4 x float> @merge_4f32_f32_012u(float* %ptr) nounwind uwtable noinline ssp {
; SSE2-LABEL: merge_4f32_f32_012u:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE2-NEXT: retq
;
; SSE41-LABEL: merge_4f32_f32_012u:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
; SSE41-NEXT: retq
;
; AVX-LABEL: merge_4f32_f32_012u:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
; AVX-NEXT: retq
;
; X32-SSE1-LABEL: merge_4f32_f32_012u:
-; X32-SSE1: # BB#0:
+; X32-SSE1: # %bb.0:
; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-SSE1-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -292,7 +292,7 @@ define <4 x float> @merge_4f32_f32_012u(float* %ptr) nounwind uwtable noinline s
; X32-SSE1-NEXT: retl
;
; X32-SSE41-LABEL: merge_4f32_f32_012u:
-; X32-SSE41: # BB#0:
+; X32-SSE41: # %bb.0:
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X32-SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
@@ -312,26 +312,26 @@ define <4 x float> @merge_4f32_f32_012u(float* %ptr) nounwind uwtable noinline s
define <4 x float> @merge_4f32_f32_019u(float* %ptr) nounwind uwtable noinline ssp {
; SSE2-LABEL: merge_4f32_f32_019u:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE2-NEXT: retq
;
; SSE41-LABEL: merge_4f32_f32_019u:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
; SSE41-NEXT: retq
;
; AVX-LABEL: merge_4f32_f32_019u:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
; AVX-NEXT: retq
;
; X32-SSE1-LABEL: merge_4f32_f32_019u:
-; X32-SSE1: # BB#0:
+; X32-SSE1: # %bb.0:
; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-SSE1-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -341,7 +341,7 @@ define <4 x float> @merge_4f32_f32_019u(float* %ptr) nounwind uwtable noinline s
; X32-SSE1-NEXT: retl
;
; X32-SSE41-LABEL: merge_4f32_f32_019u:
-; X32-SSE41: # BB#0:
+; X32-SSE41: # %bb.0:
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X32-SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
@@ -361,17 +361,17 @@ define <4 x float> @merge_4f32_f32_019u(float* %ptr) nounwind uwtable noinline s
define <4 x i32> @merge_4i32_i32_23u5(i32* %ptr) nounwind uwtable noinline ssp {
; SSE-LABEL: merge_4i32_i32_23u5:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movups 8(%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: merge_4i32_i32_23u5:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovups 8(%rdi), %xmm0
; AVX-NEXT: retq
;
; X32-SSE1-LABEL: merge_4i32_i32_23u5:
-; X32-SSE1: # BB#0:
+; X32-SSE1: # %bb.0:
; X32-SSE1-NEXT: pushl %esi
; X32-SSE1-NEXT: .cfi_def_cfa_offset 8
; X32-SSE1-NEXT: .cfi_offset %esi, -8
@@ -387,7 +387,7 @@ define <4 x i32> @merge_4i32_i32_23u5(i32* %ptr) nounwind uwtable noinline ssp {
; X32-SSE1-NEXT: retl $4
;
; X32-SSE41-LABEL: merge_4i32_i32_23u5:
-; X32-SSE41: # BB#0:
+; X32-SSE41: # %bb.0:
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: movups 8(%eax), %xmm0
; X32-SSE41-NEXT: retl
@@ -405,19 +405,19 @@ define <4 x i32> @merge_4i32_i32_23u5(i32* %ptr) nounwind uwtable noinline ssp {
define <4 x i32> @merge_4i32_i32_23u5_inc2(i32* %ptr) nounwind uwtable noinline ssp {
; SSE-LABEL: merge_4i32_i32_23u5_inc2:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movups 8(%rdi), %xmm0
; SSE-NEXT: incl 8(%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: merge_4i32_i32_23u5_inc2:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovups 8(%rdi), %xmm0
; AVX-NEXT: incl 8(%rdi)
; AVX-NEXT: retq
;
; X32-SSE1-LABEL: merge_4i32_i32_23u5_inc2:
-; X32-SSE1: # BB#0:
+; X32-SSE1: # %bb.0:
; X32-SSE1-NEXT: pushl %edi
; X32-SSE1-NEXT: .cfi_def_cfa_offset 8
; X32-SSE1-NEXT: pushl %esi
@@ -439,7 +439,7 @@ define <4 x i32> @merge_4i32_i32_23u5_inc2(i32* %ptr) nounwind uwtable noinline
; X32-SSE1-NEXT: retl $4
;
; X32-SSE41-LABEL: merge_4i32_i32_23u5_inc2:
-; X32-SSE41: # BB#0:
+; X32-SSE41: # %bb.0:
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: movups 8(%eax), %xmm0
; X32-SSE41-NEXT: incl 8(%eax)
@@ -460,19 +460,19 @@ define <4 x i32> @merge_4i32_i32_23u5_inc2(i32* %ptr) nounwind uwtable noinline
define <4 x i32> @merge_4i32_i32_23u5_inc3(i32* %ptr) nounwind uwtable noinline ssp {
; SSE-LABEL: merge_4i32_i32_23u5_inc3:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movups 8(%rdi), %xmm0
; SSE-NEXT: incl 12(%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: merge_4i32_i32_23u5_inc3:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovups 8(%rdi), %xmm0
; AVX-NEXT: incl 12(%rdi)
; AVX-NEXT: retq
;
; X32-SSE1-LABEL: merge_4i32_i32_23u5_inc3:
-; X32-SSE1: # BB#0:
+; X32-SSE1: # %bb.0:
; X32-SSE1-NEXT: pushl %edi
; X32-SSE1-NEXT: .cfi_def_cfa_offset 8
; X32-SSE1-NEXT: pushl %esi
@@ -494,7 +494,7 @@ define <4 x i32> @merge_4i32_i32_23u5_inc3(i32* %ptr) nounwind uwtable noinline
; X32-SSE1-NEXT: retl $4
;
; X32-SSE41-LABEL: merge_4i32_i32_23u5_inc3:
-; X32-SSE41: # BB#0:
+; X32-SSE41: # %bb.0:
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: movups 8(%eax), %xmm0
; X32-SSE41-NEXT: incl 12(%eax)
@@ -515,17 +515,17 @@ define <4 x i32> @merge_4i32_i32_23u5_inc3(i32* %ptr) nounwind uwtable noinline
define <4 x i32> @merge_4i32_i32_3zuu(i32* %ptr) nounwind uwtable noinline ssp {
; SSE-LABEL: merge_4i32_i32_3zuu:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: retq
;
; AVX-LABEL: merge_4i32_i32_3zuu:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: retq
;
; X32-SSE1-LABEL: merge_4i32_i32_3zuu:
-; X32-SSE1: # BB#0:
+; X32-SSE1: # %bb.0:
; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-SSE1-NEXT: movl 12(%ecx), %ecx
@@ -534,7 +534,7 @@ define <4 x i32> @merge_4i32_i32_3zuu(i32* %ptr) nounwind uwtable noinline ssp {
; X32-SSE1-NEXT: retl $4
;
; X32-SSE41-LABEL: merge_4i32_i32_3zuu:
-; X32-SSE41: # BB#0:
+; X32-SSE41: # %bb.0:
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-SSE41-NEXT: retl
@@ -547,17 +547,17 @@ define <4 x i32> @merge_4i32_i32_3zuu(i32* %ptr) nounwind uwtable noinline ssp {
define <4 x i32> @merge_4i32_i32_34uu(i32* %ptr) nounwind uwtable noinline ssp {
; SSE-LABEL: merge_4i32_i32_34uu:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: retq
;
; AVX-LABEL: merge_4i32_i32_34uu:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: retq
;
; X32-SSE1-LABEL: merge_4i32_i32_34uu:
-; X32-SSE1: # BB#0:
+; X32-SSE1: # %bb.0:
; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-SSE1-NEXT: movl 12(%ecx), %edx
@@ -567,7 +567,7 @@ define <4 x i32> @merge_4i32_i32_34uu(i32* %ptr) nounwind uwtable noinline ssp {
; X32-SSE1-NEXT: retl $4
;
; X32-SSE41-LABEL: merge_4i32_i32_34uu:
-; X32-SSE41: # BB#0:
+; X32-SSE41: # %bb.0:
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X32-SSE41-NEXT: retl
@@ -582,17 +582,17 @@ define <4 x i32> @merge_4i32_i32_34uu(i32* %ptr) nounwind uwtable noinline ssp {
define <4 x i32> @merge_4i32_i32_45zz(i32* %ptr) nounwind uwtable noinline ssp {
; SSE-LABEL: merge_4i32_i32_45zz:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: retq
;
; AVX-LABEL: merge_4i32_i32_45zz:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: retq
;
; X32-SSE1-LABEL: merge_4i32_i32_45zz:
-; X32-SSE1: # BB#0:
+; X32-SSE1: # %bb.0:
; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-SSE1-NEXT: movl 16(%ecx), %edx
@@ -604,7 +604,7 @@ define <4 x i32> @merge_4i32_i32_45zz(i32* %ptr) nounwind uwtable noinline ssp {
; X32-SSE1-NEXT: retl $4
;
; X32-SSE41-LABEL: merge_4i32_i32_45zz:
-; X32-SSE41: # BB#0:
+; X32-SSE41: # %bb.0:
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X32-SSE41-NEXT: retl
@@ -619,19 +619,19 @@ define <4 x i32> @merge_4i32_i32_45zz(i32* %ptr) nounwind uwtable noinline ssp {
define <4 x i32> @merge_4i32_i32_45zz_inc4(i32* %ptr) nounwind uwtable noinline ssp {
; SSE-LABEL: merge_4i32_i32_45zz_inc4:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: incl 16(%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: merge_4i32_i32_45zz_inc4:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: incl 16(%rdi)
; AVX-NEXT: retq
;
; X32-SSE1-LABEL: merge_4i32_i32_45zz_inc4:
-; X32-SSE1: # BB#0:
+; X32-SSE1: # %bb.0:
; X32-SSE1-NEXT: pushl %edi
; X32-SSE1-NEXT: .cfi_def_cfa_offset 8
; X32-SSE1-NEXT: pushl %esi
@@ -653,7 +653,7 @@ define <4 x i32> @merge_4i32_i32_45zz_inc4(i32* %ptr) nounwind uwtable noinline
; X32-SSE1-NEXT: retl $4
;
; X32-SSE41-LABEL: merge_4i32_i32_45zz_inc4:
-; X32-SSE41: # BB#0:
+; X32-SSE41: # %bb.0:
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X32-SSE41-NEXT: incl 16(%eax)
@@ -671,19 +671,19 @@ define <4 x i32> @merge_4i32_i32_45zz_inc4(i32* %ptr) nounwind uwtable noinline
define <4 x i32> @merge_4i32_i32_45zz_inc5(i32* %ptr) nounwind uwtable noinline ssp {
; SSE-LABEL: merge_4i32_i32_45zz_inc5:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: incl 20(%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: merge_4i32_i32_45zz_inc5:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: incl 20(%rdi)
; AVX-NEXT: retq
;
; X32-SSE1-LABEL: merge_4i32_i32_45zz_inc5:
-; X32-SSE1: # BB#0:
+; X32-SSE1: # %bb.0:
; X32-SSE1-NEXT: pushl %edi
; X32-SSE1-NEXT: .cfi_def_cfa_offset 8
; X32-SSE1-NEXT: pushl %esi
@@ -705,7 +705,7 @@ define <4 x i32> @merge_4i32_i32_45zz_inc5(i32* %ptr) nounwind uwtable noinline
; X32-SSE1-NEXT: retl $4
;
; X32-SSE41-LABEL: merge_4i32_i32_45zz_inc5:
-; X32-SSE41: # BB#0:
+; X32-SSE41: # %bb.0:
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X32-SSE41-NEXT: incl 20(%eax)
@@ -723,17 +723,17 @@ define <4 x i32> @merge_4i32_i32_45zz_inc5(i32* %ptr) nounwind uwtable noinline
define <8 x i16> @merge_8i16_i16_23u567u9(i16* %ptr) nounwind uwtable noinline ssp {
; SSE-LABEL: merge_8i16_i16_23u567u9:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movups 4(%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: merge_8i16_i16_23u567u9:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovups 4(%rdi), %xmm0
; AVX-NEXT: retq
;
; X32-SSE1-LABEL: merge_8i16_i16_23u567u9:
-; X32-SSE1: # BB#0:
+; X32-SSE1: # %bb.0:
; X32-SSE1-NEXT: pushl %edi
; X32-SSE1-NEXT: .cfi_def_cfa_offset 8
; X32-SSE1-NEXT: pushl %esi
@@ -755,7 +755,7 @@ define <8 x i16> @merge_8i16_i16_23u567u9(i16* %ptr) nounwind uwtable noinline s
; X32-SSE1-NEXT: retl $4
;
; X32-SSE41-LABEL: merge_8i16_i16_23u567u9:
-; X32-SSE41: # BB#0:
+; X32-SSE41: # %bb.0:
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: movups 4(%eax), %xmm0
; X32-SSE41-NEXT: retl
@@ -782,17 +782,17 @@ define <8 x i16> @merge_8i16_i16_23u567u9(i16* %ptr) nounwind uwtable noinline s
define <8 x i16> @merge_8i16_i16_34uuuuuu(i16* %ptr) nounwind uwtable noinline ssp {
; SSE-LABEL: merge_8i16_i16_34uuuuuu:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: retq
;
; AVX-LABEL: merge_8i16_i16_34uuuuuu:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: retq
;
; X32-SSE1-LABEL: merge_8i16_i16_34uuuuuu:
-; X32-SSE1: # BB#0:
+; X32-SSE1: # %bb.0:
; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-SSE1-NEXT: movl 6(%ecx), %ecx
@@ -800,7 +800,7 @@ define <8 x i16> @merge_8i16_i16_34uuuuuu(i16* %ptr) nounwind uwtable noinline s
; X32-SSE1-NEXT: retl $4
;
; X32-SSE41-LABEL: merge_8i16_i16_34uuuuuu:
-; X32-SSE41: # BB#0:
+; X32-SSE41: # %bb.0:
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-SSE41-NEXT: retl
@@ -815,17 +815,17 @@ define <8 x i16> @merge_8i16_i16_34uuuuuu(i16* %ptr) nounwind uwtable noinline s
define <8 x i16> @merge_8i16_i16_45u7zzzz(i16* %ptr) nounwind uwtable noinline ssp {
; SSE-LABEL: merge_8i16_i16_45u7zzzz:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: retq
;
; AVX-LABEL: merge_8i16_i16_45u7zzzz:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: retq
;
; X32-SSE1-LABEL: merge_8i16_i16_45u7zzzz:
-; X32-SSE1: # BB#0:
+; X32-SSE1: # %bb.0:
; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-SSE1-NEXT: movl 8(%ecx), %edx
@@ -837,7 +837,7 @@ define <8 x i16> @merge_8i16_i16_45u7zzzz(i16* %ptr) nounwind uwtable noinline s
; X32-SSE1-NEXT: retl $4
;
; X32-SSE41-LABEL: merge_8i16_i16_45u7zzzz:
-; X32-SSE41: # BB#0:
+; X32-SSE41: # %bb.0:
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X32-SSE41-NEXT: retl
@@ -859,17 +859,17 @@ define <8 x i16> @merge_8i16_i16_45u7zzzz(i16* %ptr) nounwind uwtable noinline s
define <16 x i8> @merge_16i8_i8_01u3456789ABCDuF(i8* %ptr) nounwind uwtable noinline ssp {
; SSE-LABEL: merge_16i8_i8_01u3456789ABCDuF:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movups (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: merge_16i8_i8_01u3456789ABCDuF:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovups (%rdi), %xmm0
; AVX-NEXT: retq
;
; X32-SSE1-LABEL: merge_16i8_i8_01u3456789ABCDuF:
-; X32-SSE1: # BB#0:
+; X32-SSE1: # %bb.0:
; X32-SSE1-NEXT: pushl %ebp
; X32-SSE1-NEXT: .cfi_def_cfa_offset 8
; X32-SSE1-NEXT: pushl %ebx
@@ -903,7 +903,7 @@ define <16 x i8> @merge_16i8_i8_01u3456789ABCDuF(i8* %ptr) nounwind uwtable noin
; X32-SSE1-NEXT: retl $4
;
; X32-SSE41-LABEL: merge_16i8_i8_01u3456789ABCDuF:
-; X32-SSE41: # BB#0:
+; X32-SSE41: # %bb.0:
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: movups (%eax), %xmm0
; X32-SSE41-NEXT: retl
@@ -954,17 +954,17 @@ define <16 x i8> @merge_16i8_i8_01u3456789ABCDuF(i8* %ptr) nounwind uwtable noin
define <16 x i8> @merge_16i8_i8_01u3uuzzuuuuuzzz(i8* %ptr) nounwind uwtable noinline ssp {
; SSE-LABEL: merge_16i8_i8_01u3uuzzuuuuuzzz:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: retq
;
; AVX-LABEL: merge_16i8_i8_01u3uuzzuuuuuzzz:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: retq
;
; X32-SSE1-LABEL: merge_16i8_i8_01u3uuzzuuuuuzzz:
-; X32-SSE1: # BB#0:
+; X32-SSE1: # %bb.0:
; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-SSE1-NEXT: movzwl (%ecx), %edx
@@ -977,7 +977,7 @@ define <16 x i8> @merge_16i8_i8_01u3uuzzuuuuuzzz(i8* %ptr) nounwind uwtable noin
; X32-SSE1-NEXT: retl $4
;
; X32-SSE41-LABEL: merge_16i8_i8_01u3uuzzuuuuuzzz:
-; X32-SSE41: # BB#0:
+; X32-SSE41: # %bb.0:
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-SSE41-NEXT: retl
@@ -1000,17 +1000,17 @@ define <16 x i8> @merge_16i8_i8_01u3uuzzuuuuuzzz(i8* %ptr) nounwind uwtable noin
define <16 x i8> @merge_16i8_i8_0123uu67uuuuuzzz(i8* %ptr) nounwind uwtable noinline ssp {
; SSE-LABEL: merge_16i8_i8_0123uu67uuuuuzzz:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: retq
;
; AVX-LABEL: merge_16i8_i8_0123uu67uuuuuzzz:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: retq
;
; X32-SSE1-LABEL: merge_16i8_i8_0123uu67uuuuuzzz:
-; X32-SSE1: # BB#0:
+; X32-SSE1: # %bb.0:
; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-SSE1-NEXT: movl (%ecx), %edx
@@ -1022,7 +1022,7 @@ define <16 x i8> @merge_16i8_i8_0123uu67uuuuuzzz(i8* %ptr) nounwind uwtable noin
; X32-SSE1-NEXT: retl $4
;
; X32-SSE41-LABEL: merge_16i8_i8_0123uu67uuuuuzzz:
-; X32-SSE41: # BB#0:
+; X32-SSE41: # %bb.0:
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X32-SSE41-NEXT: retl
@@ -1052,19 +1052,19 @@ define <16 x i8> @merge_16i8_i8_0123uu67uuuuuzzz(i8* %ptr) nounwind uwtable noin
define void @merge_4i32_i32_combine(<4 x i32>* %dst, i32* %src) {
; SSE-LABEL: merge_4i32_i32_combine:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: movaps %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: merge_4i32_i32_combine:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vmovaps %xmm0, (%rdi)
; AVX-NEXT: retq
;
; X32-SSE1-LABEL: merge_4i32_i32_combine:
-; X32-SSE1: # BB#0:
+; X32-SSE1: # %bb.0:
; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -1074,7 +1074,7 @@ define void @merge_4i32_i32_combine(<4 x i32>* %dst, i32* %src) {
; X32-SSE1-NEXT: retl
;
; X32-SSE41-LABEL: merge_4i32_i32_combine:
-; X32-SSE41: # BB#0:
+; X32-SSE41: # %bb.0:
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-SSE41-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -1096,21 +1096,21 @@ define void @merge_4i32_i32_combine(<4 x i32>* %dst, i32* %src) {
define <2 x i64> @merge_2i64_i64_12_volatile(i64* %ptr) nounwind uwtable noinline ssp {
; SSE-LABEL: merge_2i64_i64_12_volatile:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT: retq
;
; AVX-LABEL: merge_2i64_i64_12_volatile:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-NEXT: retq
;
; X32-SSE1-LABEL: merge_2i64_i64_12_volatile:
-; X32-SSE1: # BB#0:
+; X32-SSE1: # %bb.0:
; X32-SSE1-NEXT: pushl %edi
; X32-SSE1-NEXT: .cfi_def_cfa_offset 8
; X32-SSE1-NEXT: pushl %esi
@@ -1132,7 +1132,7 @@ define <2 x i64> @merge_2i64_i64_12_volatile(i64* %ptr) nounwind uwtable noinlin
; X32-SSE1-NEXT: retl $4
;
; X32-SSE41-LABEL: merge_2i64_i64_12_volatile:
-; X32-SSE41: # BB#0:
+; X32-SSE41: # %bb.0:
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-SSE41-NEXT: pinsrd $1, 12(%eax), %xmm0
@@ -1150,7 +1150,7 @@ define <2 x i64> @merge_2i64_i64_12_volatile(i64* %ptr) nounwind uwtable noinlin
define <4 x float> @merge_4f32_f32_2345_volatile(float* %ptr) nounwind uwtable noinline ssp {
; SSE2-LABEL: merge_4f32_f32_2345_volatile:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
@@ -1159,7 +1159,7 @@ define <4 x float> @merge_4f32_f32_2345_volatile(float* %ptr) nounwind uwtable n
; SSE2-NEXT: retq
;
; SSE41-LABEL: merge_4f32_f32_2345_volatile:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
@@ -1167,7 +1167,7 @@ define <4 x float> @merge_4f32_f32_2345_volatile(float* %ptr) nounwind uwtable n
; SSE41-NEXT: retq
;
; AVX-LABEL: merge_4f32_f32_2345_volatile:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
@@ -1175,7 +1175,7 @@ define <4 x float> @merge_4f32_f32_2345_volatile(float* %ptr) nounwind uwtable n
; AVX-NEXT: retq
;
; X32-SSE1-LABEL: merge_4f32_f32_2345_volatile:
-; X32-SSE1: # BB#0:
+; X32-SSE1: # %bb.0:
; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-SSE1-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -1187,7 +1187,7 @@ define <4 x float> @merge_4f32_f32_2345_volatile(float* %ptr) nounwind uwtable n
; X32-SSE1-NEXT: retl
;
; X32-SSE41-LABEL: merge_4f32_f32_2345_volatile:
-; X32-SSE41: # BB#0:
+; X32-SSE41: # %bb.0:
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
@@ -1215,21 +1215,21 @@ define <4 x float> @merge_4f32_f32_2345_volatile(float* %ptr) nounwind uwtable n
define <4 x float> @merge_4f32_f32_X0YY(float* %ptr0, float* %ptr1) nounwind uwtable noinline ssp {
; SSE-LABEL: merge_4f32_f32_X0YY:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0]
; SSE-NEXT: retq
;
; AVX-LABEL: merge_4f32_f32_X0YY:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0,0]
; AVX-NEXT: retq
;
; X32-SSE-LABEL: merge_4f32_f32_X0YY:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -1252,17 +1252,17 @@ define <4 x float> @merge_4f32_f32_X0YY(float* %ptr0, float* %ptr1) nounwind uwt
; PR31309
define <4 x i32> @load_i32_zext_i128_v4i32(i32* %ptr) {
; SSE-LABEL: load_i32_zext_i128_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: retq
;
; AVX-LABEL: load_i32_zext_i128_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: retq
;
; X32-SSE1-LABEL: load_i32_zext_i128_v4i32:
-; X32-SSE1: # BB#0:
+; X32-SSE1: # %bb.0:
; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-SSE1-NEXT: movl (%ecx), %ecx
@@ -1273,7 +1273,7 @@ define <4 x i32> @load_i32_zext_i128_v4i32(i32* %ptr) {
; X32-SSE1-NEXT: retl $4
;
; X32-SSE41-LABEL: load_i32_zext_i128_v4i32:
-; X32-SSE41: # BB#0:
+; X32-SSE41: # %bb.0:
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-SSE41-NEXT: retl
diff --git a/test/CodeGen/X86/merge-consecutive-loads-256.ll b/test/CodeGen/X86/merge-consecutive-loads-256.ll
index 618e316bd07..5693149b592 100644
--- a/test/CodeGen/X86/merge-consecutive-loads-256.ll
+++ b/test/CodeGen/X86/merge-consecutive-loads-256.ll
@@ -8,12 +8,12 @@
define <4 x double> @merge_4f64_2f64_23(<2 x double>* %ptr) nounwind uwtable noinline ssp {
; AVX-LABEL: merge_4f64_2f64_23:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovups 32(%rdi), %ymm0
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_4f64_2f64_23:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovups 32(%eax), %ymm0
; X32-AVX-NEXT: retl
@@ -27,12 +27,12 @@ define <4 x double> @merge_4f64_2f64_23(<2 x double>* %ptr) nounwind uwtable noi
define <4 x double> @merge_4f64_2f64_2z(<2 x double>* %ptr) nounwind uwtable noinline ssp {
; AVX-LABEL: merge_4f64_2f64_2z:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps 32(%rdi), %xmm0
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_4f64_2f64_2z:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovaps 32(%eax), %xmm0
; X32-AVX-NEXT: retl
@@ -44,12 +44,12 @@ define <4 x double> @merge_4f64_2f64_2z(<2 x double>* %ptr) nounwind uwtable noi
define <4 x double> @merge_4f64_f64_2345(double* %ptr) nounwind uwtable noinline ssp {
; AVX-LABEL: merge_4f64_f64_2345:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovups 16(%rdi), %ymm0
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_4f64_f64_2345:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovups 16(%eax), %ymm0
; X32-AVX-NEXT: retl
@@ -70,12 +70,12 @@ define <4 x double> @merge_4f64_f64_2345(double* %ptr) nounwind uwtable noinline
define <4 x double> @merge_4f64_f64_3zuu(double* %ptr) nounwind uwtable noinline ssp {
; AVX-LABEL: merge_4f64_f64_3zuu:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_4f64_f64_3zuu:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-AVX-NEXT: retl
@@ -88,12 +88,12 @@ define <4 x double> @merge_4f64_f64_3zuu(double* %ptr) nounwind uwtable noinline
define <4 x double> @merge_4f64_f64_34uu(double* %ptr) nounwind uwtable noinline ssp {
; AVX-LABEL: merge_4f64_f64_34uu:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovups 24(%rdi), %xmm0
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_4f64_f64_34uu:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovups 24(%eax), %xmm0
; X32-AVX-NEXT: retl
@@ -108,12 +108,12 @@ define <4 x double> @merge_4f64_f64_34uu(double* %ptr) nounwind uwtable noinline
define <4 x double> @merge_4f64_f64_45zz(double* %ptr) nounwind uwtable noinline ssp {
; AVX-LABEL: merge_4f64_f64_45zz:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps 32(%rdi), %xmm0
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_4f64_f64_45zz:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovaps 32(%eax), %xmm0
; X32-AVX-NEXT: retl
@@ -128,13 +128,13 @@ define <4 x double> @merge_4f64_f64_45zz(double* %ptr) nounwind uwtable noinline
define <4 x double> @merge_4f64_f64_34z6(double* %ptr) nounwind uwtable noinline ssp {
; AVX-LABEL: merge_4f64_f64_34z6:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorpd %xmm0, %xmm0, %xmm0
; AVX-NEXT: vblendpd {{.*#+}} ymm0 = mem[0,1],ymm0[2],mem[3]
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_4f64_f64_34z6:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vxorpd %xmm0, %xmm0, %xmm0
; X32-AVX-NEXT: vblendpd {{.*#+}} ymm0 = mem[0,1],ymm0[2],mem[3]
@@ -154,12 +154,12 @@ define <4 x double> @merge_4f64_f64_34z6(double* %ptr) nounwind uwtable noinline
define <4 x i64> @merge_4i64_2i64_3z(<2 x i64>* %ptr) nounwind uwtable noinline ssp {
; AVX-LABEL: merge_4i64_2i64_3z:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps 48(%rdi), %xmm0
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_4i64_2i64_3z:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovaps 48(%eax), %xmm0
; X32-AVX-NEXT: retl
@@ -171,12 +171,12 @@ define <4 x i64> @merge_4i64_2i64_3z(<2 x i64>* %ptr) nounwind uwtable noinline
define <4 x i64> @merge_4i64_i64_1234(i64* %ptr) nounwind uwtable noinline ssp {
; AVX-LABEL: merge_4i64_i64_1234:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovups 8(%rdi), %ymm0
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_4i64_i64_1234:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovups 8(%eax), %ymm0
; X32-AVX-NEXT: retl
@@ -197,12 +197,12 @@ define <4 x i64> @merge_4i64_i64_1234(i64* %ptr) nounwind uwtable noinline ssp {
define <4 x i64> @merge_4i64_i64_1zzu(i64* %ptr) nounwind uwtable noinline ssp {
; AVX-LABEL: merge_4i64_i64_1zzu:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_4i64_i64_1zzu:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-AVX-NEXT: retl
@@ -216,12 +216,12 @@ define <4 x i64> @merge_4i64_i64_1zzu(i64* %ptr) nounwind uwtable noinline ssp {
define <4 x i64> @merge_4i64_i64_23zz(i64* %ptr) nounwind uwtable noinline ssp {
; AVX-LABEL: merge_4i64_i64_23zz:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps 16(%rdi), %xmm0
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_4i64_i64_23zz:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovaps 16(%eax), %xmm0
; X32-AVX-NEXT: retl
@@ -236,7 +236,7 @@ define <4 x i64> @merge_4i64_i64_23zz(i64* %ptr) nounwind uwtable noinline ssp {
define <8 x float> @merge_8f32_2f32_23z5(<2 x float>* %ptr) nounwind uwtable noinline ssp {
; AVX1-LABEL: merge_8f32_2f32_23z5:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX1-NEXT: vmovups 16(%rdi), %xmm1
; AVX1-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
@@ -244,7 +244,7 @@ define <8 x float> @merge_8f32_2f32_23z5(<2 x float>* %ptr) nounwind uwtable noi
; AVX1-NEXT: retq
;
; AVX2-LABEL: merge_8f32_2f32_23z5:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX2-NEXT: vmovdqu 16(%rdi), %xmm1
; AVX2-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
@@ -252,7 +252,7 @@ define <8 x float> @merge_8f32_2f32_23z5(<2 x float>* %ptr) nounwind uwtable noi
; AVX2-NEXT: retq
;
; AVX512F-LABEL: merge_8f32_2f32_23z5:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX512F-NEXT: vmovdqu 16(%rdi), %xmm1
; AVX512F-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
@@ -260,7 +260,7 @@ define <8 x float> @merge_8f32_2f32_23z5(<2 x float>* %ptr) nounwind uwtable noi
; AVX512F-NEXT: retq
;
; X32-AVX-LABEL: merge_8f32_2f32_23z5:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vxorpd %xmm0, %xmm0, %xmm0
; X32-AVX-NEXT: vblendpd {{.*#+}} ymm0 = mem[0,1],ymm0[2],mem[3]
@@ -279,13 +279,13 @@ define <8 x float> @merge_8f32_2f32_23z5(<2 x float>* %ptr) nounwind uwtable noi
define <8 x float> @merge_8f32_4f32_z2(<4 x float>* %ptr) nounwind uwtable noinline ssp {
; AVX-LABEL: merge_8f32_4f32_z2:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: vinsertf128 $1, 32(%rdi), %ymm0, %ymm0
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_8f32_4f32_z2:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-AVX-NEXT: vinsertf128 $1, 32(%eax), %ymm0, %ymm0
@@ -298,12 +298,12 @@ define <8 x float> @merge_8f32_4f32_z2(<4 x float>* %ptr) nounwind uwtable noinl
define <8 x float> @merge_8f32_f32_12zzuuzz(float* %ptr) nounwind uwtable noinline ssp {
; AVX-LABEL: merge_8f32_f32_12zzuuzz:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_8f32_f32_12zzuuzz:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-AVX-NEXT: retl
@@ -322,13 +322,13 @@ define <8 x float> @merge_8f32_f32_12zzuuzz(float* %ptr) nounwind uwtable noinli
define <8 x float> @merge_8f32_f32_1u3u5zu8(float* %ptr) nounwind uwtable noinline ssp {
; AVX-LABEL: merge_8f32_f32_1u3u5zu8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: vblendps {{.*#+}} ymm0 = mem[0,1,2,3,4],ymm0[5],mem[6,7]
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_8f32_f32_1u3u5zu8:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-AVX-NEXT: vblendps {{.*#+}} ymm0 = mem[0,1,2,3,4],ymm0[5],mem[6,7]
@@ -351,13 +351,13 @@ define <8 x float> @merge_8f32_f32_1u3u5zu8(float* %ptr) nounwind uwtable noinli
define <8 x i32> @merge_8i32_4i32_z3(<4 x i32>* %ptr) nounwind uwtable noinline ssp {
; AVX-LABEL: merge_8i32_4i32_z3:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: vinsertf128 $1, 48(%rdi), %ymm0, %ymm0
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_8i32_4i32_z3:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-AVX-NEXT: vinsertf128 $1, 48(%eax), %ymm0, %ymm0
@@ -370,14 +370,14 @@ define <8 x i32> @merge_8i32_4i32_z3(<4 x i32>* %ptr) nounwind uwtable noinline
define <8 x i32> @merge_8i32_i32_56zz9uzz(i32* %ptr) nounwind uwtable noinline ssp {
; AVX-LABEL: merge_8i32_i32_56zz9uzz:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_8i32_i32_56zz9uzz:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -401,13 +401,13 @@ define <8 x i32> @merge_8i32_i32_56zz9uzz(i32* %ptr) nounwind uwtable noinline s
define <8 x i32> @merge_8i32_i32_1u3u5zu8(i32* %ptr) nounwind uwtable noinline ssp {
; AVX-LABEL: merge_8i32_i32_1u3u5zu8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: vblendps {{.*#+}} ymm0 = mem[0,1,2,3,4],ymm0[5],mem[6,7]
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_8i32_i32_1u3u5zu8:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-AVX-NEXT: vblendps {{.*#+}} ymm0 = mem[0,1,2,3,4],ymm0[5],mem[6,7]
@@ -430,12 +430,12 @@ define <8 x i32> @merge_8i32_i32_1u3u5zu8(i32* %ptr) nounwind uwtable noinline s
define <16 x i16> @merge_16i16_i16_89zzzuuuuuuuuuuuz(i16* %ptr) nounwind uwtable noinline ssp {
; AVX-LABEL: merge_16i16_i16_89zzzuuuuuuuuuuuz:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_16i16_i16_89zzzuuuuuuuuuuuz:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-AVX-NEXT: retl
@@ -454,12 +454,12 @@ define <16 x i16> @merge_16i16_i16_89zzzuuuuuuuuuuuz(i16* %ptr) nounwind uwtable
define <16 x i16> @merge_16i16_i16_45u7uuuuuuuuuuuu(i16* %ptr) nounwind uwtable noinline ssp {
; AVX-LABEL: merge_16i16_i16_45u7uuuuuuuuuuuu:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_16i16_i16_45u7uuuuuuuuuuuu:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-AVX-NEXT: retl
@@ -477,12 +477,12 @@ define <16 x i16> @merge_16i16_i16_45u7uuuuuuuuuuuu(i16* %ptr) nounwind uwtable
define <16 x i16> @merge_16i16_i16_0uu3uuuuuuuuCuEF(i16* %ptr) nounwind uwtable noinline ssp {
; AVX-LABEL: merge_16i16_i16_0uu3uuuuuuuuCuEF:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovups (%rdi), %ymm0
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_16i16_i16_0uu3uuuuuuuuCuEF:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovups (%eax), %ymm0
; X32-AVX-NEXT: retl
@@ -506,13 +506,13 @@ define <16 x i16> @merge_16i16_i16_0uu3uuuuuuuuCuEF(i16* %ptr) nounwind uwtable
define <16 x i16> @merge_16i16_i16_0uu3zzuuuuuzCuEF(i16* %ptr) nounwind uwtable noinline ssp {
; AVX-LABEL: merge_16i16_i16_0uu3zzuuuuuzCuEF:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovups (%rdi), %ymm0
; AVX-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_16i16_i16_0uu3zzuuuuuzCuEF:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovups (%eax), %ymm0
; X32-AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
@@ -540,12 +540,12 @@ define <16 x i16> @merge_16i16_i16_0uu3zzuuuuuzCuEF(i16* %ptr) nounwind uwtable
define <32 x i8> @merge_32i8_i8_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu(i8* %ptr) nounwind uwtable noinline ssp {
; AVX-LABEL: merge_32i8_i8_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_32i8_i8_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-AVX-NEXT: retl
@@ -563,12 +563,12 @@ define <32 x i8> @merge_32i8_i8_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu(i8* %ptr) nounw
define <32 x i8> @merge_32i8_i8_23u5uuuuuuuuuuzzzzuuuuuuuuuuuuuu(i8* %ptr) nounwind uwtable noinline ssp {
; AVX-LABEL: merge_32i8_i8_23u5uuuuuuuuuuzzzzuuuuuuuuuuuuuu:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_32i8_i8_23u5uuuuuuuuuuzzzzuuuuuuuuuuuuuu:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-AVX-NEXT: retl
@@ -594,14 +594,14 @@ define <32 x i8> @merge_32i8_i8_23u5uuuuuuuuuuzzzzuuuuuuuuuuuuuu(i8* %ptr) nounw
define <4 x double> @merge_4f64_f64_34uz_volatile(double* %ptr) nounwind uwtable noinline ssp {
; AVX-LABEL: merge_4f64_f64_34uz_volatile:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovapd %xmm0, %xmm0
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_4f64_f64_34uz_volatile:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-AVX-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
@@ -619,7 +619,7 @@ define <4 x double> @merge_4f64_f64_34uz_volatile(double* %ptr) nounwind uwtable
define <16 x i16> @merge_16i16_i16_0uu3zzuuuuuzCuEF_volatile(i16* %ptr) nounwind uwtable noinline ssp {
; AVX1-LABEL: merge_16i16_i16_0uu3zzuuuuuzCuEF_volatile:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX1-NEXT: vpinsrw $0, (%rdi), %xmm0, %xmm1
; AVX1-NEXT: vpinsrw $4, 24(%rdi), %xmm0, %xmm0
@@ -630,7 +630,7 @@ define <16 x i16> @merge_16i16_i16_0uu3zzuuuuuzCuEF_volatile(i16* %ptr) nounwind
; AVX1-NEXT: retq
;
; AVX2-LABEL: merge_16i16_i16_0uu3zzuuuuuzCuEF_volatile:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX2-NEXT: vpinsrw $0, (%rdi), %xmm0, %xmm1
; AVX2-NEXT: vpinsrw $4, 24(%rdi), %xmm0, %xmm0
@@ -641,7 +641,7 @@ define <16 x i16> @merge_16i16_i16_0uu3zzuuuuuzCuEF_volatile(i16* %ptr) nounwind
; AVX2-NEXT: retq
;
; AVX512F-LABEL: merge_16i16_i16_0uu3zzuuuuuzCuEF_volatile:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX512F-NEXT: vpinsrw $0, (%rdi), %xmm0, %xmm1
; AVX512F-NEXT: vpinsrw $4, 24(%rdi), %xmm0, %xmm0
@@ -652,7 +652,7 @@ define <16 x i16> @merge_16i16_i16_0uu3zzuuuuuzCuEF_volatile(i16* %ptr) nounwind
; AVX512F-NEXT: retq
;
; X32-AVX-LABEL: merge_16i16_i16_0uu3zzuuuuuzCuEF_volatile:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vpxor %xmm0, %xmm0, %xmm0
; X32-AVX-NEXT: vpinsrw $0, (%eax), %xmm0, %xmm1
diff --git a/test/CodeGen/X86/merge-consecutive-loads-512.ll b/test/CodeGen/X86/merge-consecutive-loads-512.ll
index 716f7767935..62102eb382c 100644
--- a/test/CodeGen/X86/merge-consecutive-loads-512.ll
+++ b/test/CodeGen/X86/merge-consecutive-loads-512.ll
@@ -7,14 +7,14 @@
define <8 x double> @merge_8f64_2f64_12u4(<2 x double>* %ptr) nounwind uwtable noinline ssp {
; ALL-LABEL: merge_8f64_2f64_12u4:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovups 16(%rdi), %ymm0
; ALL-NEXT: vinsertf128 $1, 64(%rdi), %ymm0, %ymm1
; ALL-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
; ALL-NEXT: retq
;
; X32-AVX512F-LABEL: merge_8f64_2f64_12u4:
-; X32-AVX512F: # BB#0:
+; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovups 16(%eax), %ymm0
; X32-AVX512F-NEXT: vinsertf128 $1, 64(%eax), %ymm0, %ymm1
@@ -34,7 +34,7 @@ define <8 x double> @merge_8f64_2f64_12u4(<2 x double>* %ptr) nounwind uwtable n
define <8 x double> @merge_8f64_2f64_23z5(<2 x double>* %ptr) nounwind uwtable noinline ssp {
; ALL-LABEL: merge_8f64_2f64_23z5:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovups 32(%rdi), %ymm0
; ALL-NEXT: vxorps %xmm1, %xmm1, %xmm1
; ALL-NEXT: vinsertf128 $1, 80(%rdi), %ymm1, %ymm1
@@ -42,7 +42,7 @@ define <8 x double> @merge_8f64_2f64_23z5(<2 x double>* %ptr) nounwind uwtable n
; ALL-NEXT: retq
;
; X32-AVX512F-LABEL: merge_8f64_2f64_23z5:
-; X32-AVX512F: # BB#0:
+; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovups 32(%eax), %ymm0
; X32-AVX512F-NEXT: vxorps %xmm1, %xmm1, %xmm1
@@ -63,13 +63,13 @@ define <8 x double> @merge_8f64_2f64_23z5(<2 x double>* %ptr) nounwind uwtable n
define <8 x double> @merge_8f64_4f64_z2(<4 x double>* %ptr) nounwind uwtable noinline ssp {
; ALL-LABEL: merge_8f64_4f64_z2:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vxorps %xmm0, %xmm0, %xmm0
; ALL-NEXT: vinsertf64x4 $1, 64(%rdi), %zmm0, %zmm0
; ALL-NEXT: retq
;
; X32-AVX512F-LABEL: merge_8f64_4f64_z2:
-; X32-AVX512F: # BB#0:
+; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-AVX512F-NEXT: vinsertf64x4 $1, 64(%eax), %zmm0, %zmm0
@@ -82,12 +82,12 @@ define <8 x double> @merge_8f64_4f64_z2(<4 x double>* %ptr) nounwind uwtable noi
define <8 x double> @merge_8f64_f64_23uuuuu9(double* %ptr) nounwind uwtable noinline ssp {
; ALL-LABEL: merge_8f64_f64_23uuuuu9:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovups 16(%rdi), %zmm0
; ALL-NEXT: retq
;
; X32-AVX512F-LABEL: merge_8f64_f64_23uuuuu9:
-; X32-AVX512F: # BB#0:
+; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovups 16(%eax), %zmm0
; X32-AVX512F-NEXT: retl
@@ -105,12 +105,12 @@ define <8 x double> @merge_8f64_f64_23uuuuu9(double* %ptr) nounwind uwtable noin
define <8 x double> @merge_8f64_f64_12zzuuzz(double* %ptr) nounwind uwtable noinline ssp {
; ALL-LABEL: merge_8f64_f64_12zzuuzz:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovaps 8(%rdi), %xmm0
; ALL-NEXT: retq
;
; X32-AVX512F-LABEL: merge_8f64_f64_12zzuuzz:
-; X32-AVX512F: # BB#0:
+; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovaps 8(%eax), %xmm0
; X32-AVX512F-NEXT: retl
@@ -129,7 +129,7 @@ define <8 x double> @merge_8f64_f64_12zzuuzz(double* %ptr) nounwind uwtable noin
define <8 x double> @merge_8f64_f64_1u3u5zu8(double* %ptr) nounwind uwtable noinline ssp {
; AVX512F-LABEL: merge_8f64_f64_1u3u5zu8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: movb $32, %al
; AVX512F-NEXT: kmovw %eax, %k0
; AVX512F-NEXT: knotw %k0, %k1
@@ -137,7 +137,7 @@ define <8 x double> @merge_8f64_f64_1u3u5zu8(double* %ptr) nounwind uwtable noin
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: merge_8f64_f64_1u3u5zu8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: movb $32, %al
; AVX512BW-NEXT: kmovd %eax, %k0
; AVX512BW-NEXT: knotw %k0, %k1
@@ -145,7 +145,7 @@ define <8 x double> @merge_8f64_f64_1u3u5zu8(double* %ptr) nounwind uwtable noin
; AVX512BW-NEXT: retq
;
; X32-AVX512F-LABEL: merge_8f64_f64_1u3u5zu8:
-; X32-AVX512F: # BB#0:
+; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: movb $32, %cl
; X32-AVX512F-NEXT: kmovw %ecx, %k0
@@ -170,13 +170,13 @@ define <8 x double> @merge_8f64_f64_1u3u5zu8(double* %ptr) nounwind uwtable noin
define <8 x i64> @merge_8i64_4i64_z3(<4 x i64>* %ptr) nounwind uwtable noinline ssp {
; ALL-LABEL: merge_8i64_4i64_z3:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vxorps %xmm0, %xmm0, %xmm0
; ALL-NEXT: vinsertf64x4 $1, 96(%rdi), %zmm0, %zmm0
; ALL-NEXT: retq
;
; X32-AVX512F-LABEL: merge_8i64_4i64_z3:
-; X32-AVX512F: # BB#0:
+; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-AVX512F-NEXT: vinsertf64x4 $1, 96(%eax), %zmm0, %zmm0
@@ -189,14 +189,14 @@ define <8 x i64> @merge_8i64_4i64_z3(<4 x i64>* %ptr) nounwind uwtable noinline
define <8 x i64> @merge_8i64_i64_56zz9uzz(i64* %ptr) nounwind uwtable noinline ssp {
; ALL-LABEL: merge_8i64_i64_56zz9uzz:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovaps 40(%rdi), %xmm0
; ALL-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; ALL-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
; ALL-NEXT: retq
;
; X32-AVX512F-LABEL: merge_8i64_i64_56zz9uzz:
-; X32-AVX512F: # BB#0:
+; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovaps 40(%eax), %xmm0
; X32-AVX512F-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
@@ -220,7 +220,7 @@ define <8 x i64> @merge_8i64_i64_56zz9uzz(i64* %ptr) nounwind uwtable noinline s
define <8 x i64> @merge_8i64_i64_1u3u5zu8(i64* %ptr) nounwind uwtable noinline ssp {
; AVX512F-LABEL: merge_8i64_i64_1u3u5zu8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: movb $32, %al
; AVX512F-NEXT: kmovw %eax, %k0
; AVX512F-NEXT: knotw %k0, %k1
@@ -228,7 +228,7 @@ define <8 x i64> @merge_8i64_i64_1u3u5zu8(i64* %ptr) nounwind uwtable noinline s
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: merge_8i64_i64_1u3u5zu8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: movb $32, %al
; AVX512BW-NEXT: kmovd %eax, %k0
; AVX512BW-NEXT: knotw %k0, %k1
@@ -236,7 +236,7 @@ define <8 x i64> @merge_8i64_i64_1u3u5zu8(i64* %ptr) nounwind uwtable noinline s
; AVX512BW-NEXT: retq
;
; X32-AVX512F-LABEL: merge_8i64_i64_1u3u5zu8:
-; X32-AVX512F: # BB#0:
+; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: movb $32, %cl
; X32-AVX512F-NEXT: kmovw %ecx, %k0
@@ -261,12 +261,12 @@ define <8 x i64> @merge_8i64_i64_1u3u5zu8(i64* %ptr) nounwind uwtable noinline s
define <16 x float> @merge_16f32_f32_89zzzuuuuuuuuuuuz(float* %ptr) nounwind uwtable noinline ssp {
; ALL-LABEL: merge_16f32_f32_89zzzuuuuuuuuuuuz:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; ALL-NEXT: retq
;
; X32-AVX512F-LABEL: merge_16f32_f32_89zzzuuuuuuuuuuuz:
-; X32-AVX512F: # BB#0:
+; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-AVX512F-NEXT: retl
@@ -285,12 +285,12 @@ define <16 x float> @merge_16f32_f32_89zzzuuuuuuuuuuuz(float* %ptr) nounwind uwt
define <16 x float> @merge_16f32_f32_45u7uuuuuuuuuuuu(float* %ptr) nounwind uwtable noinline ssp {
; ALL-LABEL: merge_16f32_f32_45u7uuuuuuuuuuuu:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovups 16(%rdi), %xmm0
; ALL-NEXT: retq
;
; X32-AVX512F-LABEL: merge_16f32_f32_45u7uuuuuuuuuuuu:
-; X32-AVX512F: # BB#0:
+; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovups 16(%eax), %xmm0
; X32-AVX512F-NEXT: retl
@@ -308,12 +308,12 @@ define <16 x float> @merge_16f32_f32_45u7uuuuuuuuuuuu(float* %ptr) nounwind uwta
define <16 x float> @merge_16f32_f32_0uu3uuuuuuuuCuEF(float* %ptr) nounwind uwtable noinline ssp {
; ALL-LABEL: merge_16f32_f32_0uu3uuuuuuuuCuEF:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovups (%rdi), %zmm0
; ALL-NEXT: retq
;
; X32-AVX512F-LABEL: merge_16f32_f32_0uu3uuuuuuuuCuEF:
-; X32-AVX512F: # BB#0:
+; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovups (%eax), %zmm0
; X32-AVX512F-NEXT: retl
@@ -337,7 +337,7 @@ define <16 x float> @merge_16f32_f32_0uu3uuuuuuuuCuEF(float* %ptr) nounwind uwta
define <16 x float> @merge_16f32_f32_0uu3zzuuuuuzCuEF(float* %ptr) nounwind uwtable noinline ssp {
; ALL-LABEL: merge_16f32_f32_0uu3zzuuuuuzCuEF:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovups (%rdi), %zmm1
; ALL-NEXT: vxorps %xmm2, %xmm2, %xmm2
; ALL-NEXT: vmovaps {{.*#+}} zmm0 = <0,u,u,3,20,21,u,u,u,u,u,u,12,29,14,15>
@@ -345,7 +345,7 @@ define <16 x float> @merge_16f32_f32_0uu3zzuuuuuzCuEF(float* %ptr) nounwind uwta
; ALL-NEXT: retq
;
; X32-AVX512F-LABEL: merge_16f32_f32_0uu3zzuuuuuzCuEF:
-; X32-AVX512F: # BB#0:
+; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovups (%eax), %zmm1
; X32-AVX512F-NEXT: vxorps %xmm2, %xmm2, %xmm2
@@ -375,12 +375,12 @@ define <16 x float> @merge_16f32_f32_0uu3zzuuuuuzCuEF(float* %ptr) nounwind uwta
define <16 x i32> @merge_16i32_i32_12zzzuuuuuuuuuuuz(i32* %ptr) nounwind uwtable noinline ssp {
; ALL-LABEL: merge_16i32_i32_12zzzuuuuuuuuuuuz:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; ALL-NEXT: retq
;
; X32-AVX512F-LABEL: merge_16i32_i32_12zzzuuuuuuuuuuuz:
-; X32-AVX512F: # BB#0:
+; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-AVX512F-NEXT: retl
@@ -399,12 +399,12 @@ define <16 x i32> @merge_16i32_i32_12zzzuuuuuuuuuuuz(i32* %ptr) nounwind uwtable
define <16 x i32> @merge_16i32_i32_23u5uuuuuuuuuuuu(i32* %ptr) nounwind uwtable noinline ssp {
; ALL-LABEL: merge_16i32_i32_23u5uuuuuuuuuuuu:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovups 8(%rdi), %xmm0
; ALL-NEXT: retq
;
; X32-AVX512F-LABEL: merge_16i32_i32_23u5uuuuuuuuuuuu:
-; X32-AVX512F: # BB#0:
+; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovups 8(%eax), %xmm0
; X32-AVX512F-NEXT: retl
@@ -422,12 +422,12 @@ define <16 x i32> @merge_16i32_i32_23u5uuuuuuuuuuuu(i32* %ptr) nounwind uwtable
define <16 x i32> @merge_16i32_i32_0uu3uuuuuuuuCuEF(i32* %ptr) nounwind uwtable noinline ssp {
; ALL-LABEL: merge_16i32_i32_0uu3uuuuuuuuCuEF:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovups (%rdi), %zmm0
; ALL-NEXT: retq
;
; X32-AVX512F-LABEL: merge_16i32_i32_0uu3uuuuuuuuCuEF:
-; X32-AVX512F: # BB#0:
+; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovups (%eax), %zmm0
; X32-AVX512F-NEXT: retl
@@ -451,7 +451,7 @@ define <16 x i32> @merge_16i32_i32_0uu3uuuuuuuuCuEF(i32* %ptr) nounwind uwtable
define <16 x i32> @merge_16i32_i32_0uu3zzuuuuuzCuEF(i32* %ptr) nounwind uwtable noinline ssp {
; AVX512F-LABEL: merge_16i32_i32_0uu3zzuuuuuzCuEF:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: movw $8240, %ax # imm = 0x2030
; AVX512F-NEXT: kmovw %eax, %k0
; AVX512F-NEXT: knotw %k0, %k1
@@ -459,7 +459,7 @@ define <16 x i32> @merge_16i32_i32_0uu3zzuuuuuzCuEF(i32* %ptr) nounwind uwtable
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: merge_16i32_i32_0uu3zzuuuuuzCuEF:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: movw $8240, %ax # imm = 0x2030
; AVX512BW-NEXT: kmovd %eax, %k0
; AVX512BW-NEXT: knotw %k0, %k1
@@ -467,7 +467,7 @@ define <16 x i32> @merge_16i32_i32_0uu3zzuuuuuzCuEF(i32* %ptr) nounwind uwtable
; AVX512BW-NEXT: retq
;
; X32-AVX512F-LABEL: merge_16i32_i32_0uu3zzuuuuuzCuEF:
-; X32-AVX512F: # BB#0:
+; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: movw $8240, %cx # imm = 0x2030
; X32-AVX512F-NEXT: kmovw %ecx, %k0
@@ -497,18 +497,18 @@ define <16 x i32> @merge_16i32_i32_0uu3zzuuuuuzCuEF(i32* %ptr) nounwind uwtable
define <32 x i16> @merge_32i16_i16_12u4uuuuuuuuuuuuuuuuuuuuuuuuuuzz(i16* %ptr) nounwind uwtable noinline ssp {
; AVX512F-LABEL: merge_32i16_i16_12u4uuuuuuuuuuuuuuuuuuuuuuuuuuzz:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX512F-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: merge_32i16_i16_12u4uuuuuuuuuuuuuuuuuuuuuuuuuuzz:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX512BW-NEXT: retq
;
; X32-AVX512F-LABEL: merge_32i16_i16_12u4uuuuuuuuuuuuuuuuuuuuuuuuuuzz:
-; X32-AVX512F: # BB#0:
+; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-AVX512F-NEXT: vxorps %xmm1, %xmm1, %xmm1
@@ -529,12 +529,12 @@ define <32 x i16> @merge_32i16_i16_12u4uuuuuuuuuuuuuuuuuuuuuuuuuuzz(i16* %ptr) n
define <32 x i16> @merge_32i16_i16_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu(i16* %ptr) nounwind uwtable noinline ssp {
; ALL-LABEL: merge_32i16_i16_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; ALL-NEXT: retq
;
; X32-AVX512F-LABEL: merge_32i16_i16_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu:
-; X32-AVX512F: # BB#0:
+; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-AVX512F-NEXT: retl
@@ -552,18 +552,18 @@ define <32 x i16> @merge_32i16_i16_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu(i16* %ptr) n
define <32 x i16> @merge_32i16_i16_23uzuuuuuuuuuuzzzzuuuuuuuuuuuuuu(i16* %ptr) nounwind uwtable noinline ssp {
; AVX512F-LABEL: merge_32i16_i16_23uzuuuuuuuuuuzzzzuuuuuuuuuuuuuu:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX512F-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: merge_32i16_i16_23uzuuuuuuuuuuzzzzuuuuuuuuuuuuuu:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX512BW-NEXT: retq
;
; X32-AVX512F-LABEL: merge_32i16_i16_23uzuuuuuuuuuuzzzzuuuuuuuuuuuuuu:
-; X32-AVX512F: # BB#0:
+; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-AVX512F-NEXT: vxorps %xmm1, %xmm1, %xmm1
@@ -584,18 +584,18 @@ define <32 x i16> @merge_32i16_i16_23uzuuuuuuuuuuzzzzuuuuuuuuuuuuuu(i16* %ptr) n
define <64 x i8> @merge_64i8_i8_12u4uuu8uuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz(i8* %ptr) nounwind uwtable noinline ssp {
; AVX512F-LABEL: merge_64i8_i8_12u4uuu8uuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX512F-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: merge_64i8_i8_12u4uuu8uuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX512BW-NEXT: retq
;
; X32-AVX512F-LABEL: merge_64i8_i8_12u4uuu8uuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz:
-; X32-AVX512F: # BB#0:
+; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-AVX512F-NEXT: vxorps %xmm1, %xmm1, %xmm1
@@ -622,18 +622,18 @@ define <64 x i8> @merge_64i8_i8_12u4uuu8uuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuu
define <64 x i8> @merge_64i8_i8_12u4uuuuuuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz(i8* %ptr) nounwind uwtable noinline ssp {
; AVX512F-LABEL: merge_64i8_i8_12u4uuuuuuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX512F-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: merge_64i8_i8_12u4uuuuuuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX512BW-NEXT: retq
;
; X32-AVX512F-LABEL: merge_64i8_i8_12u4uuuuuuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz:
-; X32-AVX512F: # BB#0:
+; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-AVX512F-NEXT: vxorps %xmm1, %xmm1, %xmm1
@@ -661,7 +661,7 @@ define <64 x i8> @merge_64i8_i8_12u4uuuuuuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuu
define <8 x double> @merge_8f64_f64_23uuuuu9_volatile(double* %ptr) nounwind uwtable noinline ssp {
; ALL-LABEL: merge_8f64_f64_23uuuuu9_volatile:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; ALL-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; ALL-NEXT: vbroadcastsd 72(%rdi), %ymm1
@@ -669,7 +669,7 @@ define <8 x double> @merge_8f64_f64_23uuuuu9_volatile(double* %ptr) nounwind uwt
; ALL-NEXT: retq
;
; X32-AVX512F-LABEL: merge_8f64_f64_23uuuuu9_volatile:
-; X32-AVX512F: # BB#0:
+; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-AVX512F-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
@@ -690,7 +690,7 @@ define <8 x double> @merge_8f64_f64_23uuuuu9_volatile(double* %ptr) nounwind uwt
define <16 x i32> @merge_16i32_i32_0uu3uuuuuuuuCuEF_volatile(i32* %ptr) nounwind uwtable noinline ssp {
; ALL-LABEL: merge_16i32_i32_0uu3uuuuuuuuCuEF_volatile:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; ALL-NEXT: vpinsrd $3, 12(%rdi), %xmm0, %xmm0
; ALL-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -701,7 +701,7 @@ define <16 x i32> @merge_16i32_i32_0uu3uuuuuuuuCuEF_volatile(i32* %ptr) nounwind
; ALL-NEXT: retq
;
; X32-AVX512F-LABEL: merge_16i32_i32_0uu3uuuuuuuuCuEF_volatile:
-; X32-AVX512F: # BB#0:
+; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-AVX512F-NEXT: vpinsrd $3, 12(%eax), %xmm0, %xmm0
diff --git a/test/CodeGen/X86/merge-consecutive-stores.ll b/test/CodeGen/X86/merge-consecutive-stores.ll
index 8cb6f3ae1ee..af5fb478e52 100644
--- a/test/CodeGen/X86/merge-consecutive-stores.ll
+++ b/test/CodeGen/X86/merge-consecutive-stores.ll
@@ -6,7 +6,7 @@
define i32 @foo (i64* %so) nounwind uwtable ssp {
; CHECK-LABEL: foo:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl $0, 28(%eax)
; CHECK-NEXT: movl $0, 24(%eax)
diff --git a/test/CodeGen/X86/merge-store-constants.ll b/test/CodeGen/X86/merge-store-constants.ll
index f5c36ca4c2f..b38019f860a 100644
--- a/test/CodeGen/X86/merge-store-constants.ll
+++ b/test/CodeGen/X86/merge-store-constants.ll
@@ -4,14 +4,14 @@
define void @big_nonzero_16_bytes(i32* nocapture %a) {
; X32-LABEL: big_nonzero_16_bytes:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovaps {{.*#+}} xmm0 = [1,2,3,4]
; X32-NEXT: vmovups %xmm0, (%eax)
; X32-NEXT: retl
;
; X64-LABEL: big_nonzero_16_bytes:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} xmm0 = [1,2,3,4]
; X64-NEXT: vmovups %xmm0, (%rdi)
; X64-NEXT: retq
@@ -32,14 +32,14 @@ define void @big_nonzero_16_bytes(i32* nocapture %a) {
define void @big_nonzero_16_bytes_big64bit_constants(i64* nocapture %a) {
; X32-LABEL: big_nonzero_16_bytes_big64bit_constants:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovaps {{.*#+}} xmm0 = [1,1,1,3]
; X32-NEXT: vmovups %xmm0, (%eax)
; X32-NEXT: retl
;
; X64-LABEL: big_nonzero_16_bytes_big64bit_constants:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movabsq $4294967297, %rax # imm = 0x100000001
; X64-NEXT: movq %rax, (%rdi)
; X64-NEXT: movabsq $12884901889, %rax # imm = 0x300000001
@@ -56,7 +56,7 @@ define void @big_nonzero_16_bytes_big64bit_constants(i64* nocapture %a) {
define void @big_nonzero_32_bytes_splat(i32* nocapture %a) {
; X32-LABEL: big_nonzero_32_bytes_splat:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovaps {{.*#+}} ymm0 = [42,42,42,42,42,42,42,42]
; X32-NEXT: vmovups %ymm0, (%eax)
@@ -64,7 +64,7 @@ define void @big_nonzero_32_bytes_splat(i32* nocapture %a) {
; X32-NEXT: retl
;
; X64-LABEL: big_nonzero_32_bytes_splat:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} ymm0 = [42,42,42,42,42,42,42,42]
; X64-NEXT: vmovups %ymm0, (%rdi)
; X64-NEXT: vzeroupper
@@ -92,7 +92,7 @@ define void @big_nonzero_32_bytes_splat(i32* nocapture %a) {
define void @big_nonzero_63_bytes(i8* nocapture %a) {
; X32-LABEL: big_nonzero_63_bytes:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovaps {{.*#+}} ymm0 = [1,0,2,0,3,0,4,0]
; X32-NEXT: vmovups %ymm0, (%eax)
@@ -107,7 +107,7 @@ define void @big_nonzero_63_bytes(i8* nocapture %a) {
; X32-NEXT: retl
;
; X64-LABEL: big_nonzero_63_bytes:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} ymm0 = [1,2,3,4]
; X64-NEXT: vmovups %ymm0, (%rdi)
; X64-NEXT: movq $5, 32(%rdi)
diff --git a/test/CodeGen/X86/merge-store-partially-alias-loads.ll b/test/CodeGen/X86/merge-store-partially-alias-loads.ll
index ec1f03100f5..8e3c4305d50 100644
--- a/test/CodeGen/X86/merge-store-partially-alias-loads.ll
+++ b/test/CodeGen/X86/merge-store-partially-alias-loads.ll
@@ -13,7 +13,7 @@
; X86-NEXT: movb [[HI1]], 3([[BASEREG]])
; X86-NEXT: retq
-; DBGDAG-LABEL: Optimized legalized selection DAG: BB#0 'merge_store_partial_overlap_load:'
+; DBGDAG-LABEL: Optimized legalized selection DAG: %bb.0 'merge_store_partial_overlap_load:'
; DBGDAG: [[ENTRYTOKEN:t[0-9]+]]: ch = EntryToken
; DBGDAG-DAG: [[BASEPTR:t[0-9]+]]: i64,ch = CopyFromReg [[ENTRYTOKEN]],
; DBGDAG-DAG: [[ADDPTR:t[0-9]+]]: i64 = add {{(nuw )?}}[[BASEPTR]], Constant:i64<2>
diff --git a/test/CodeGen/X86/merge_store.ll b/test/CodeGen/X86/merge_store.ll
index 1e4ea4cb944..f03175057fd 100644
--- a/test/CodeGen/X86/merge_store.ll
+++ b/test/CodeGen/X86/merge_store.ll
@@ -3,7 +3,7 @@
define void @merge_store(i32* nocapture %a) {
; CHECK-LABEL: merge_store:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: movabsq $4294967297, %rcx # imm = 0x100000001
; CHECK-NEXT: .p2align 4, 0x90
@@ -14,7 +14,7 @@ define void @merge_store(i32* nocapture %a) {
; CHECK-NEXT: addq $4, %rax
; CHECK-NEXT: cmpl $1000, %eax # imm = 0x3E8
; CHECK-NEXT: jl .LBB0_1
-; CHECK-NEXT: # BB#2: # %for.end
+; CHECK-NEXT: # %bb.2: # %for.end
; CHECK-NEXT: retq
entry:
br label %for.body
@@ -43,7 +43,7 @@ entry:
define void @indexed_store_merge(i64 %p, i8* %v) {
; CHECK-LABEL: indexed_store_merge:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl $0, 2(%rsi,%rdi)
; CHECK-NEXT: movb $0, (%rsi)
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/merge_store_duplicated_loads.ll b/test/CodeGen/X86/merge_store_duplicated_loads.ll
index cfc39035e40..9ef3255123c 100644
--- a/test/CodeGen/X86/merge_store_duplicated_loads.ll
+++ b/test/CodeGen/X86/merge_store_duplicated_loads.ll
@@ -6,7 +6,7 @@ target triple = "x86_64-unknown-linux-gnu"
define void @merge_double(double* noalias nocapture %st, double* noalias nocapture readonly %ld) #0 {
; CHECK-LABEL: merge_double:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: movsd %xmm0, (%rdi)
@@ -31,7 +31,7 @@ define void @merge_double(double* noalias nocapture %st, double* noalias nocaptu
define void @merge_loadstore_int(i64* noalias nocapture readonly %p, i64* noalias nocapture %q) local_unnamed_addr #0 {
; CHECK-LABEL: merge_loadstore_int:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movq (%rdi), %rax
; CHECK-NEXT: movq 8(%rdi), %rcx
; CHECK-NEXT: movq %rax, (%rsi)
@@ -55,7 +55,7 @@ entry:
define i64 @merge_loadstore_int_with_extra_use(i64* noalias nocapture readonly %p, i64* noalias nocapture %q) local_unnamed_addr #0 {
; CHECK-LABEL: merge_loadstore_int_with_extra_use:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movq (%rdi), %rax
; CHECK-NEXT: movq 8(%rdi), %rcx
; CHECK-NEXT: movq %rax, (%rsi)
diff --git a/test/CodeGen/X86/mfence.ll b/test/CodeGen/X86/mfence.ll
index b67a5c35504..93d99076d82 100644
--- a/test/CodeGen/X86/mfence.ll
+++ b/test/CodeGen/X86/mfence.ll
@@ -6,12 +6,12 @@
define void @test() {
; X32-LABEL: test:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: mfence
; X32-NEXT: retl
;
; X64-LABEL: test:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: mfence
; X64-NEXT: retq
fence seq_cst
@@ -20,14 +20,14 @@ define void @test() {
define i32 @fence(i32* %ptr) {
; X32-LABEL: fence:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: mfence
; X32-NEXT: movl (%eax), %eax
; X32-NEXT: retl
;
; X64-LABEL: fence:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: mfence
; X64-NEXT: movl (%rdi), %eax
; X64-NEXT: retq
diff --git a/test/CodeGen/X86/misched-copy.ll b/test/CodeGen/X86/misched-copy.ll
index 98890c66ba5..fa1d6d8801c 100644
--- a/test/CodeGen/X86/misched-copy.ll
+++ b/test/CodeGen/X86/misched-copy.ll
@@ -8,7 +8,7 @@
; MUL_HiLo PhysReg use copies should be just above the mul.
; MUL_HiLo PhysReg def copies should be just below the mul.
;
-; CHECK: *** Final schedule for BB#1 ***
+; CHECK: *** Final schedule for %bb.1 ***
; CHECK: %eax<def> = COPY
; CHECK-NEXT: MUL32r %{{[0-9]+}}, %eax<imp-def>, %edx<imp-def>, %eflags<imp-def,dead>, %eax<imp-use>;
; CHECK-NEXT: COPY %e{{[ad]}}x
diff --git a/test/CodeGen/X86/mmx-arg-passing-x86-64.ll b/test/CodeGen/X86/mmx-arg-passing-x86-64.ll
index 41f9a7822b2..b88916053be 100644
--- a/test/CodeGen/X86/mmx-arg-passing-x86-64.ll
+++ b/test/CodeGen/X86/mmx-arg-passing-x86-64.ll
@@ -8,7 +8,7 @@
define void @t3() nounwind {
; X86-64-LABEL: t3:
-; X86-64: ## BB#0:
+; X86-64: ## %bb.0:
; X86-64-NEXT: movq _g_v8qi@{{.*}}(%rip), %rax
; X86-64-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X86-64-NEXT: movb $1, %al
@@ -21,7 +21,7 @@ define void @t3() nounwind {
define void @t4(x86_mmx %v1, x86_mmx %v2) nounwind {
; X86-64-LABEL: t4:
-; X86-64: ## BB#0:
+; X86-64: ## %bb.0:
; X86-64-NEXT: movdq2q %xmm1, %mm0
; X86-64-NEXT: movq %mm0, -{{[0-9]+}}(%rsp)
; X86-64-NEXT: movdq2q %xmm0, %mm0
@@ -41,7 +41,7 @@ define void @t4(x86_mmx %v1, x86_mmx %v2) nounwind {
define void @t5() nounwind {
; X86-64-LABEL: t5:
-; X86-64: ## BB#0:
+; X86-64: ## %bb.0:
; X86-64-NEXT: pushq %rax
; X86-64-NEXT: xorl %edi, %edi
; X86-64-NEXT: callq _pass_v1di
diff --git a/test/CodeGen/X86/mmx-arg-passing.ll b/test/CodeGen/X86/mmx-arg-passing.ll
index 67ccb9e32dd..4ea00b2e9ac 100644
--- a/test/CodeGen/X86/mmx-arg-passing.ll
+++ b/test/CodeGen/X86/mmx-arg-passing.ll
@@ -12,13 +12,13 @@
define void @t1(x86_mmx %v1) nounwind {
; X86-32-LABEL: t1:
-; X86-32: ## BB#0:
+; X86-32: ## %bb.0:
; X86-32-NEXT: movl L_u1$non_lazy_ptr, %eax
; X86-32-NEXT: movq %mm0, (%eax)
; X86-32-NEXT: retl
;
; X86-64-LABEL: t1:
-; X86-64: ## BB#0:
+; X86-64: ## %bb.0:
; X86-64-NEXT: movdq2q %xmm0, %mm0
; X86-64-NEXT: movq _u1@{{.*}}(%rip), %rax
; X86-64-NEXT: movq %mm0, (%rax)
@@ -31,7 +31,7 @@ define void @t1(x86_mmx %v1) nounwind {
define void @t2(<1 x i64> %v1) nounwind {
; X86-32-LABEL: t2:
-; X86-32: ## BB#0:
+; X86-32: ## %bb.0:
; X86-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-32-NEXT: movl L_u2$non_lazy_ptr, %edx
@@ -40,7 +40,7 @@ define void @t2(<1 x i64> %v1) nounwind {
; X86-32-NEXT: retl
;
; X86-64-LABEL: t2:
-; X86-64: ## BB#0:
+; X86-64: ## %bb.0:
; X86-64-NEXT: movq _u2@{{.*}}(%rip), %rax
; X86-64-NEXT: movq %rdi, (%rax)
; X86-64-NEXT: retq
diff --git a/test/CodeGen/X86/mmx-bitcast.ll b/test/CodeGen/X86/mmx-bitcast.ll
index 30cf474dc38..d3befdaeff8 100644
--- a/test/CodeGen/X86/mmx-bitcast.ll
+++ b/test/CodeGen/X86/mmx-bitcast.ll
@@ -3,7 +3,7 @@
define i64 @t0(x86_mmx* %p) {
; CHECK-LABEL: t0:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movq (%rdi), %mm0
; CHECK-NEXT: paddq %mm0, %mm0
; CHECK-NEXT: movd %mm0, %rax
@@ -16,7 +16,7 @@ define i64 @t0(x86_mmx* %p) {
define i64 @t1(x86_mmx* %p) {
; CHECK-LABEL: t1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movq (%rdi), %mm0
; CHECK-NEXT: paddd %mm0, %mm0
; CHECK-NEXT: movd %mm0, %rax
@@ -29,7 +29,7 @@ define i64 @t1(x86_mmx* %p) {
define i64 @t2(x86_mmx* %p) {
; CHECK-LABEL: t2:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movq (%rdi), %mm0
; CHECK-NEXT: paddw %mm0, %mm0
; CHECK-NEXT: movd %mm0, %rax
@@ -42,7 +42,7 @@ define i64 @t2(x86_mmx* %p) {
define i64 @t3(x86_mmx* %p) {
; CHECK-LABEL: t3:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movq (%rdi), %mm0
; CHECK-NEXT: paddb %mm0, %mm0
; CHECK-NEXT: movd %mm0, %rax
@@ -57,7 +57,7 @@ define i64 @t3(x86_mmx* %p) {
define void @t4(<1 x i64> %A, <1 x i64> %B) {
; CHECK-LABEL: t4:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movd %rdi, %mm0
; CHECK-NEXT: movd %rsi, %mm1
; CHECK-NEXT: paddusw %mm0, %mm1
@@ -76,7 +76,7 @@ entry:
define i64 @t5(i32 %a, i32 %b) nounwind readnone {
; CHECK-LABEL: t5:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movd %esi, %xmm0
; CHECK-NEXT: movd %edi, %xmm1
; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -92,7 +92,7 @@ declare x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx, i32)
define <1 x i64> @t6(i64 %t) {
; CHECK-LABEL: t6:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movd %rdi, %mm0
; CHECK-NEXT: psllq $48, %mm0
; CHECK-NEXT: movd %mm0, %rax
diff --git a/test/CodeGen/X86/mmx-coalescing.ll b/test/CodeGen/X86/mmx-coalescing.ll
index a515e5ee375..c23e732d9bf 100644
--- a/test/CodeGen/X86/mmx-coalescing.ll
+++ b/test/CodeGen/X86/mmx-coalescing.ll
@@ -8,7 +8,7 @@
define i32 @test(%SA* %pSA, i16* %A, i32 %B, i32 %C, i32 %D, i8* %E) {
entry:
; CHECK-LABEL: test
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pshufw
; CHECK-NEXT: movd
; CHECK-NOT: movd
diff --git a/test/CodeGen/X86/mmx-cvt.ll b/test/CodeGen/X86/mmx-cvt.ll
index fd6c5081b5a..ff4edcc82ae 100644
--- a/test/CodeGen/X86/mmx-cvt.ll
+++ b/test/CodeGen/X86/mmx-cvt.ll
@@ -7,7 +7,7 @@
define void @cvt_v2f64_v2i32(<2 x double>, <1 x i64>*) nounwind {
; X86-LABEL: cvt_v2f64_v2i32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
@@ -25,7 +25,7 @@ define void @cvt_v2f64_v2i32(<2 x double>, <1 x i64>*) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: cvt_v2f64_v2i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cvtpd2pi %xmm0, %mm0
; X64-NEXT: paddd %mm0, %mm0
; X64-NEXT: movq %mm0, (%rdi)
@@ -43,7 +43,7 @@ define void @cvt_v2f64_v2i32(<2 x double>, <1 x i64>*) nounwind {
define void @cvtt_v2f64_v2i32(<2 x double>, <1 x i64>*) nounwind {
; X86-LABEL: cvtt_v2f64_v2i32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
@@ -61,7 +61,7 @@ define void @cvtt_v2f64_v2i32(<2 x double>, <1 x i64>*) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: cvtt_v2f64_v2i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cvttpd2pi %xmm0, %mm0
; X64-NEXT: paddd %mm0, %mm0
; X64-NEXT: movq %mm0, (%rdi)
@@ -79,7 +79,7 @@ define void @cvtt_v2f64_v2i32(<2 x double>, <1 x i64>*) nounwind {
define void @fptosi_v2f64_v2i32(<2 x double>, <1 x i64>*) nounwind {
; X86-LABEL: fptosi_v2f64_v2i32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
@@ -97,7 +97,7 @@ define void @fptosi_v2f64_v2i32(<2 x double>, <1 x i64>*) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: fptosi_v2f64_v2i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cvttpd2pi %xmm0, %mm0
; X64-NEXT: paddd %mm0, %mm0
; X64-NEXT: movq %mm0, (%rdi)
@@ -113,7 +113,7 @@ define void @fptosi_v2f64_v2i32(<2 x double>, <1 x i64>*) nounwind {
define void @cvt_v2f32_v2i32(<4 x float>, <1 x i64>*) nounwind {
; X86-LABEL: cvt_v2f32_v2i32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
@@ -131,7 +131,7 @@ define void @cvt_v2f32_v2i32(<4 x float>, <1 x i64>*) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: cvt_v2f32_v2i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cvtps2pi %xmm0, %mm0
; X64-NEXT: paddd %mm0, %mm0
; X64-NEXT: movq %mm0, (%rdi)
@@ -149,7 +149,7 @@ define void @cvt_v2f32_v2i32(<4 x float>, <1 x i64>*) nounwind {
define void @cvtt_v2f32_v2i32(<4 x float>, <1 x i64>*) nounwind {
; X86-LABEL: cvtt_v2f32_v2i32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
@@ -167,7 +167,7 @@ define void @cvtt_v2f32_v2i32(<4 x float>, <1 x i64>*) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: cvtt_v2f32_v2i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cvttps2pi %xmm0, %mm0
; X64-NEXT: paddd %mm0, %mm0
; X64-NEXT: movq %mm0, (%rdi)
@@ -185,7 +185,7 @@ define void @cvtt_v2f32_v2i32(<4 x float>, <1 x i64>*) nounwind {
define void @fptosi_v4f32_v4i32(<4 x float>, <1 x i64>*) nounwind {
; X86-LABEL: fptosi_v4f32_v4i32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
@@ -203,7 +203,7 @@ define void @fptosi_v4f32_v4i32(<4 x float>, <1 x i64>*) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: fptosi_v4f32_v4i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cvttps2pi %xmm0, %mm0
; X64-NEXT: paddd %mm0, %mm0
; X64-NEXT: movq %mm0, (%rdi)
@@ -220,7 +220,7 @@ define void @fptosi_v4f32_v4i32(<4 x float>, <1 x i64>*) nounwind {
define void @fptosi_v2f32_v2i32(<4 x float>, <1 x i64>*) nounwind {
; X86-LABEL: fptosi_v2f32_v2i32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
@@ -238,7 +238,7 @@ define void @fptosi_v2f32_v2i32(<4 x float>, <1 x i64>*) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: fptosi_v2f32_v2i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cvttps2pi %xmm0, %mm0
; X64-NEXT: paddd %mm0, %mm0
; X64-NEXT: movq %mm0, (%rdi)
@@ -259,7 +259,7 @@ define void @fptosi_v2f32_v2i32(<4 x float>, <1 x i64>*) nounwind {
define <2 x double> @sitofp_v2i32_v2f64(<1 x i64>*) nounwind {
; X86-LABEL: sitofp_v2i32_v2f64:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
@@ -274,7 +274,7 @@ define <2 x double> @sitofp_v2i32_v2f64(<1 x i64>*) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: sitofp_v2i32_v2f64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq (%rdi), %mm0
; X64-NEXT: paddd %mm0, %mm0
; X64-NEXT: movq2dq %mm0, %xmm0
@@ -293,7 +293,7 @@ define <2 x double> @sitofp_v2i32_v2f64(<1 x i64>*) nounwind {
define <4 x float> @sitofp_v2i32_v2f32(<1 x i64>*) nounwind {
; X86-LABEL: sitofp_v2i32_v2f32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
@@ -309,7 +309,7 @@ define <4 x float> @sitofp_v2i32_v2f32(<1 x i64>*) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: sitofp_v2i32_v2f32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq (%rdi), %mm0
; X64-NEXT: paddd %mm0, %mm0
; X64-NEXT: movq %mm0, -{{[0-9]+}}(%rsp)
@@ -327,7 +327,7 @@ define <4 x float> @sitofp_v2i32_v2f32(<1 x i64>*) nounwind {
define <4 x float> @cvt_v2i32_v2f32(<1 x i64>*) nounwind {
; X86-LABEL: cvt_v2i32_v2f32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
@@ -343,7 +343,7 @@ define <4 x float> @cvt_v2i32_v2f32(<1 x i64>*) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: cvt_v2i32_v2f32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq (%rdi), %mm0
; X64-NEXT: paddd %mm0, %mm0
; X64-NEXT: movd %mm0, %rax
diff --git a/test/CodeGen/X86/mmx-fold-load.ll b/test/CodeGen/X86/mmx-fold-load.ll
index 832743870fb..601d72c0d08 100644
--- a/test/CodeGen/X86/mmx-fold-load.ll
+++ b/test/CodeGen/X86/mmx-fold-load.ll
@@ -4,7 +4,7 @@
define i64 @t0(<1 x i64>* %a, i32* %b) nounwind {
; X86-LABEL: t0:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
@@ -22,7 +22,7 @@ define i64 @t0(<1 x i64>* %a, i32* %b) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: t0:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq (%rdi), %mm0
; X64-NEXT: movd (%rsi), %mm1
; X64-NEXT: psllq %mm1, %mm0
@@ -40,7 +40,7 @@ declare x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx, i32)
define i64 @t1(<1 x i64>* %a, i32* %b) nounwind {
; X86-LABEL: t1:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
@@ -58,7 +58,7 @@ define i64 @t1(<1 x i64>* %a, i32* %b) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: t1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq (%rdi), %mm0
; X64-NEXT: movd (%rsi), %mm1
; X64-NEXT: psrlq %mm1, %mm0
@@ -76,7 +76,7 @@ declare x86_mmx @llvm.x86.mmx.psrli.q(x86_mmx, i32)
define i64 @t2(<1 x i64>* %a, i32* %b) nounwind {
; X86-LABEL: t2:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
@@ -94,7 +94,7 @@ define i64 @t2(<1 x i64>* %a, i32* %b) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: t2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq (%rdi), %mm0
; X64-NEXT: movd (%rsi), %mm1
; X64-NEXT: psllw %mm1, %mm0
@@ -112,7 +112,7 @@ declare x86_mmx @llvm.x86.mmx.pslli.w(x86_mmx, i32)
define i64 @t3(<1 x i64>* %a, i32* %b) nounwind {
; X86-LABEL: t3:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
@@ -130,7 +130,7 @@ define i64 @t3(<1 x i64>* %a, i32* %b) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: t3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq (%rdi), %mm0
; X64-NEXT: movd (%rsi), %mm1
; X64-NEXT: psrlw %mm1, %mm0
@@ -148,7 +148,7 @@ declare x86_mmx @llvm.x86.mmx.psrli.w(x86_mmx, i32)
define i64 @t4(<1 x i64>* %a, i32* %b) nounwind {
; X86-LABEL: t4:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
@@ -166,7 +166,7 @@ define i64 @t4(<1 x i64>* %a, i32* %b) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: t4:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq (%rdi), %mm0
; X64-NEXT: movd (%rsi), %mm1
; X64-NEXT: pslld %mm1, %mm0
@@ -184,7 +184,7 @@ declare x86_mmx @llvm.x86.mmx.pslli.d(x86_mmx, i32)
define i64 @t5(<1 x i64>* %a, i32* %b) nounwind {
; X86-LABEL: t5:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
@@ -202,7 +202,7 @@ define i64 @t5(<1 x i64>* %a, i32* %b) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: t5:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq (%rdi), %mm0
; X64-NEXT: movd (%rsi), %mm1
; X64-NEXT: psrld %mm1, %mm0
@@ -220,7 +220,7 @@ declare x86_mmx @llvm.x86.mmx.psrli.d(x86_mmx, i32)
define i64 @t6(<1 x i64>* %a, i32* %b) nounwind {
; X86-LABEL: t6:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
@@ -238,7 +238,7 @@ define i64 @t6(<1 x i64>* %a, i32* %b) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: t6:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq (%rdi), %mm0
; X64-NEXT: movd (%rsi), %mm1
; X64-NEXT: psraw %mm1, %mm0
@@ -256,7 +256,7 @@ declare x86_mmx @llvm.x86.mmx.psrai.w(x86_mmx, i32)
define i64 @t7(<1 x i64>* %a, i32* %b) nounwind {
; X86-LABEL: t7:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
@@ -274,7 +274,7 @@ define i64 @t7(<1 x i64>* %a, i32* %b) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: t7:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq (%rdi), %mm0
; X64-NEXT: movd (%rsi), %mm1
; X64-NEXT: psrad %mm1, %mm0
@@ -292,7 +292,7 @@ declare x86_mmx @llvm.x86.mmx.psrai.d(x86_mmx, i32)
define i64 @tt0(x86_mmx %t, x86_mmx* %q) nounwind {
; X86-LABEL: tt0:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
@@ -308,7 +308,7 @@ define i64 @tt0(x86_mmx %t, x86_mmx* %q) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: tt0:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: paddb (%rdi), %mm0
; X64-NEXT: movd %mm0, %rax
; X64-NEXT: emms
@@ -325,7 +325,7 @@ declare void @llvm.x86.mmx.emms()
define i64 @tt1(x86_mmx %t, x86_mmx* %q) nounwind {
; X86-LABEL: tt1:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
@@ -341,7 +341,7 @@ define i64 @tt1(x86_mmx %t, x86_mmx* %q) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: tt1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: paddw (%rdi), %mm0
; X64-NEXT: movd %mm0, %rax
; X64-NEXT: emms
@@ -357,7 +357,7 @@ declare x86_mmx @llvm.x86.mmx.padd.w(x86_mmx, x86_mmx)
define i64 @tt2(x86_mmx %t, x86_mmx* %q) nounwind {
; X86-LABEL: tt2:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
@@ -373,7 +373,7 @@ define i64 @tt2(x86_mmx %t, x86_mmx* %q) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: tt2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: paddd (%rdi), %mm0
; X64-NEXT: movd %mm0, %rax
; X64-NEXT: emms
@@ -389,7 +389,7 @@ declare x86_mmx @llvm.x86.mmx.padd.d(x86_mmx, x86_mmx)
define i64 @tt3(x86_mmx %t, x86_mmx* %q) nounwind {
; X86-LABEL: tt3:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
@@ -405,7 +405,7 @@ define i64 @tt3(x86_mmx %t, x86_mmx* %q) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: tt3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: paddq (%rdi), %mm0
; X64-NEXT: movd %mm0, %rax
; X64-NEXT: emms
@@ -421,7 +421,7 @@ declare x86_mmx @llvm.x86.mmx.padd.q(x86_mmx, x86_mmx)
define i64 @tt4(x86_mmx %t, x86_mmx* %q) nounwind {
; X86-LABEL: tt4:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
@@ -437,7 +437,7 @@ define i64 @tt4(x86_mmx %t, x86_mmx* %q) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: tt4:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: paddusb (%rdi), %mm0
; X64-NEXT: movd %mm0, %rax
; X64-NEXT: emms
@@ -453,7 +453,7 @@ declare x86_mmx @llvm.x86.mmx.paddus.b(x86_mmx, x86_mmx)
define i64 @tt5(x86_mmx %t, x86_mmx* %q) nounwind {
; X86-LABEL: tt5:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
@@ -469,7 +469,7 @@ define i64 @tt5(x86_mmx %t, x86_mmx* %q) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: tt5:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: paddusw (%rdi), %mm0
; X64-NEXT: movd %mm0, %rax
; X64-NEXT: emms
@@ -485,7 +485,7 @@ declare x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx, x86_mmx)
define i64 @tt6(x86_mmx %t, x86_mmx* %q) nounwind {
; X86-LABEL: tt6:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
@@ -501,7 +501,7 @@ define i64 @tt6(x86_mmx %t, x86_mmx* %q) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: tt6:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: psrlw (%rdi), %mm0
; X64-NEXT: movd %mm0, %rax
; X64-NEXT: emms
@@ -517,7 +517,7 @@ declare x86_mmx @llvm.x86.mmx.psrl.w(x86_mmx, x86_mmx)
define i64 @tt7(x86_mmx %t, x86_mmx* %q) nounwind {
; X86-LABEL: tt7:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
@@ -533,7 +533,7 @@ define i64 @tt7(x86_mmx %t, x86_mmx* %q) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: tt7:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: psrld (%rdi), %mm0
; X64-NEXT: movd %mm0, %rax
; X64-NEXT: emms
@@ -549,7 +549,7 @@ declare x86_mmx @llvm.x86.mmx.psrl.d(x86_mmx, x86_mmx)
define i64 @tt8(x86_mmx %t, x86_mmx* %q) nounwind {
; X86-LABEL: tt8:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
@@ -565,7 +565,7 @@ define i64 @tt8(x86_mmx %t, x86_mmx* %q) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: tt8:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: psrlq (%rdi), %mm0
; X64-NEXT: movd %mm0, %rax
; X64-NEXT: emms
@@ -581,7 +581,7 @@ declare x86_mmx @llvm.x86.mmx.psrl.q(x86_mmx, x86_mmx)
define void @test_psrlq_by_volatile_shift_amount(x86_mmx* %t) nounwind {
; X86-LABEL: test_psrlq_by_volatile_shift_amount:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
@@ -599,7 +599,7 @@ define void @test_psrlq_by_volatile_shift_amount(x86_mmx* %t) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: test_psrlq_by_volatile_shift_amount:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movl $1, -{{[0-9]+}}(%rsp)
; X64-NEXT: movd -{{[0-9]+}}(%rsp), %mm0
; X64-NEXT: movl $255, %eax
diff --git a/test/CodeGen/X86/mmx-schedule.ll b/test/CodeGen/X86/mmx-schedule.ll
index 6b99559d380..a188e4b0ac7 100644
--- a/test/CodeGen/X86/mmx-schedule.ll
+++ b/test/CodeGen/X86/mmx-schedule.ll
@@ -13,7 +13,7 @@
define i64 @test_cvtpd2pi(<2 x double> %a0, <2 x double>* %a1) optsize {
; GENERIC-LABEL: test_cvtpd2pi:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: cvtpd2pi (%rdi), %mm0 # sched: [10:1.00]
; GENERIC-NEXT: cvtpd2pi %xmm0, %mm1 # sched: [4:1.00]
; GENERIC-NEXT: por %mm1, %mm0 # sched: [1:1.00]
@@ -21,7 +21,7 @@ define i64 @test_cvtpd2pi(<2 x double> %a0, <2 x double>* %a1) optsize {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_cvtpd2pi:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: cvtpd2pi (%rdi), %mm0 # sched: [8:4.00]
; ATOM-NEXT: cvtpd2pi %xmm0, %mm1 # sched: [7:3.50]
; ATOM-NEXT: por %mm1, %mm0 # sched: [1:0.50]
@@ -29,7 +29,7 @@ define i64 @test_cvtpd2pi(<2 x double> %a0, <2 x double>* %a1) optsize {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_cvtpd2pi:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: cvtpd2pi (%rdi), %mm1 # sched: [7:1.00]
; SLM-NEXT: cvtpd2pi %xmm0, %mm0 # sched: [4:0.50]
; SLM-NEXT: por %mm0, %mm1 # sched: [1:0.50]
@@ -37,7 +37,7 @@ define i64 @test_cvtpd2pi(<2 x double> %a0, <2 x double>* %a1) optsize {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_cvtpd2pi:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: cvtpd2pi (%rdi), %mm0 # sched: [10:1.00]
; SANDY-NEXT: cvtpd2pi %xmm0, %mm1 # sched: [4:1.00]
; SANDY-NEXT: por %mm1, %mm0 # sched: [1:1.00]
@@ -45,7 +45,7 @@ define i64 @test_cvtpd2pi(<2 x double> %a0, <2 x double>* %a1) optsize {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtpd2pi:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: cvtpd2pi (%rdi), %mm0 # sched: [4:1.00]
; HASWELL-NEXT: cvtpd2pi %xmm0, %mm1 # sched: [4:1.00]
; HASWELL-NEXT: por %mm1, %mm0 # sched: [1:0.33]
@@ -53,7 +53,7 @@ define i64 @test_cvtpd2pi(<2 x double> %a0, <2 x double>* %a1) optsize {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cvtpd2pi:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: cvtpd2pi (%rdi), %mm0 # sched: [9:1.00]
; BROADWELL-NEXT: cvtpd2pi %xmm0, %mm1 # sched: [4:1.00]
; BROADWELL-NEXT: por %mm1, %mm0 # sched: [1:0.33]
@@ -61,7 +61,7 @@ define i64 @test_cvtpd2pi(<2 x double> %a0, <2 x double>* %a1) optsize {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cvtpd2pi:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: cvtpd2pi %xmm0, %mm0 # sched: [5:1.00]
; SKYLAKE-NEXT: cvtpd2pi (%rdi), %mm1 # sched: [11:1.00]
; SKYLAKE-NEXT: por %mm0, %mm1 # sched: [1:0.50]
@@ -69,7 +69,7 @@ define i64 @test_cvtpd2pi(<2 x double> %a0, <2 x double>* %a1) optsize {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_cvtpd2pi:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: cvtpd2pi %xmm0, %mm0 # sched: [5:1.00]
; SKX-NEXT: cvtpd2pi (%rdi), %mm1 # sched: [11:1.00]
; SKX-NEXT: por %mm0, %mm1 # sched: [1:0.50]
@@ -77,7 +77,7 @@ define i64 @test_cvtpd2pi(<2 x double> %a0, <2 x double>* %a1) optsize {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cvtpd2pi:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: cvtpd2pi (%rdi), %mm1 # sched: [8:1.00]
; BTVER2-NEXT: cvtpd2pi %xmm0, %mm0 # sched: [3:1.00]
; BTVER2-NEXT: por %mm0, %mm1 # sched: [1:0.50]
@@ -85,7 +85,7 @@ define i64 @test_cvtpd2pi(<2 x double> %a0, <2 x double>* %a1) optsize {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cvtpd2pi:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: cvtpd2pi (%rdi), %mm1 # sched: [12:1.00]
; ZNVER1-NEXT: cvtpd2pi %xmm0, %mm0 # sched: [4:1.00]
; ZNVER1-NEXT: por %mm0, %mm1 # sched: [1:0.25]
@@ -102,70 +102,70 @@ declare x86_mmx @llvm.x86.sse.cvtpd2pi(<2 x double>) nounwind readnone
define <2 x double> @test_cvtpi2pd(x86_mmx %a0, x86_mmx* %a1) optsize {
; GENERIC-LABEL: test_cvtpi2pd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: cvtpi2pd %mm0, %xmm1 # sched: [4:1.00]
; GENERIC-NEXT: cvtpi2pd (%rdi), %xmm0 # sched: [10:1.00]
; GENERIC-NEXT: addpd %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_cvtpi2pd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: cvtpi2pd (%rdi), %xmm0 # sched: [8:4.00]
; ATOM-NEXT: cvtpi2pd %mm0, %xmm1 # sched: [7:3.50]
; ATOM-NEXT: addpd %xmm1, %xmm0 # sched: [6:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_cvtpi2pd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: cvtpi2pd (%rdi), %xmm0 # sched: [7:1.00]
; SLM-NEXT: cvtpi2pd %mm0, %xmm1 # sched: [4:0.50]
; SLM-NEXT: addpd %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_cvtpi2pd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: cvtpi2pd %mm0, %xmm0 # sched: [4:1.00]
; SANDY-NEXT: cvtpi2pd (%rdi), %xmm1 # sched: [10:1.00]
; SANDY-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtpi2pd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: cvtpi2pd %mm0, %xmm0 # sched: [4:1.00]
; HASWELL-NEXT: cvtpi2pd (%rdi), %xmm1 # sched: [4:1.00]
; HASWELL-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cvtpi2pd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: cvtpi2pd %mm0, %xmm0 # sched: [4:1.00]
; BROADWELL-NEXT: cvtpi2pd (%rdi), %xmm1 # sched: [9:1.00]
; BROADWELL-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cvtpi2pd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: cvtpi2pd %mm0, %xmm0 # sched: [5:1.00]
; SKYLAKE-NEXT: cvtpi2pd (%rdi), %xmm1 # sched: [10:1.00]
; SKYLAKE-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_cvtpi2pd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: cvtpi2pd %mm0, %xmm0 # sched: [5:1.00]
; SKX-NEXT: cvtpi2pd (%rdi), %xmm1 # sched: [10:1.00]
; SKX-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cvtpi2pd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: cvtpi2pd (%rdi), %xmm1 # sched: [8:1.00]
; BTVER2-NEXT: cvtpi2pd %mm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cvtpi2pd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: cvtpi2pd (%rdi), %xmm1 # sched: [12:1.00]
; ZNVER1-NEXT: cvtpi2pd %mm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
@@ -180,70 +180,70 @@ declare <2 x double> @llvm.x86.sse.cvtpi2pd(x86_mmx) nounwind readnone
define <4 x float> @test_cvtpi2ps(x86_mmx %a0, x86_mmx* %a1, <4 x float> %a2, <4 x float> %a3) optsize {
; GENERIC-LABEL: test_cvtpi2ps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: cvtpi2ps %mm0, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: cvtpi2ps (%rdi), %xmm1 # sched: [9:1.00]
; GENERIC-NEXT: addps %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_cvtpi2ps:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: cvtpi2ps (%rdi), %xmm1
; ATOM-NEXT: cvtpi2ps %mm0, %xmm0
; ATOM-NEXT: addps %xmm1, %xmm0 # sched: [5:5.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_cvtpi2ps:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: cvtpi2ps (%rdi), %xmm1 # sched: [7:1.00]
; SLM-NEXT: cvtpi2ps %mm0, %xmm0 # sched: [4:0.50]
; SLM-NEXT: addps %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_cvtpi2ps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: cvtpi2ps %mm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: cvtpi2ps (%rdi), %xmm1 # sched: [9:1.00]
; SANDY-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtpi2ps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: cvtpi2ps %mm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: cvtpi2ps (%rdi), %xmm1 # sched: [3:1.00]
; HASWELL-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cvtpi2ps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: cvtpi2ps %mm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: cvtpi2ps (%rdi), %xmm1 # sched: [8:1.00]
; BROADWELL-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cvtpi2ps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: cvtpi2ps %mm0, %xmm0 # sched: [6:2.00]
; SKYLAKE-NEXT: cvtpi2ps (%rdi), %xmm1 # sched: [9:1.00]
; SKYLAKE-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_cvtpi2ps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: cvtpi2ps %mm0, %xmm0 # sched: [6:2.00]
; SKX-NEXT: cvtpi2ps (%rdi), %xmm1 # sched: [9:1.00]
; SKX-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cvtpi2ps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: cvtpi2ps (%rdi), %xmm1 # sched: [8:1.00]
; BTVER2-NEXT: cvtpi2ps %mm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cvtpi2ps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: cvtpi2ps (%rdi), %xmm1 # sched: [12:1.00]
; ZNVER1-NEXT: cvtpi2ps %mm0, %xmm0 # sched: [5:1.00]
; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
@@ -258,7 +258,7 @@ declare <4 x float> @llvm.x86.sse.cvtpi2ps(<4 x float>, x86_mmx) nounwind readno
define i64 @test_cvtps2pi(<4 x float> %a0, <4 x float>* %a1) optsize {
; GENERIC-LABEL: test_cvtps2pi:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: cvtps2pi %xmm0, %mm0 # sched: [3:1.00]
; GENERIC-NEXT: cvtps2pi (%rdi), %mm1 # sched: [9:1.00]
; GENERIC-NEXT: por %mm0, %mm1 # sched: [1:1.00]
@@ -266,7 +266,7 @@ define i64 @test_cvtps2pi(<4 x float> %a0, <4 x float>* %a1) optsize {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_cvtps2pi:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: cvtps2pi %xmm0, %mm0 # sched: [5:5.00]
; ATOM-NEXT: cvtps2pi (%rdi), %mm1 # sched: [5:5.00]
; ATOM-NEXT: por %mm0, %mm1 # sched: [1:0.50]
@@ -274,7 +274,7 @@ define i64 @test_cvtps2pi(<4 x float> %a0, <4 x float>* %a1) optsize {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_cvtps2pi:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: cvtps2pi (%rdi), %mm1 # sched: [7:1.00]
; SLM-NEXT: cvtps2pi %xmm0, %mm0 # sched: [4:0.50]
; SLM-NEXT: por %mm0, %mm1 # sched: [1:0.50]
@@ -282,7 +282,7 @@ define i64 @test_cvtps2pi(<4 x float> %a0, <4 x float>* %a1) optsize {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_cvtps2pi:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: cvtps2pi %xmm0, %mm0 # sched: [3:1.00]
; SANDY-NEXT: cvtps2pi (%rdi), %mm1 # sched: [9:1.00]
; SANDY-NEXT: por %mm0, %mm1 # sched: [1:1.00]
@@ -290,7 +290,7 @@ define i64 @test_cvtps2pi(<4 x float> %a0, <4 x float>* %a1) optsize {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtps2pi:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: cvtps2pi %xmm0, %mm0 # sched: [4:1.00]
; HASWELL-NEXT: cvtps2pi (%rdi), %mm1 # sched: [3:1.00]
; HASWELL-NEXT: por %mm0, %mm1 # sched: [1:0.33]
@@ -298,7 +298,7 @@ define i64 @test_cvtps2pi(<4 x float> %a0, <4 x float>* %a1) optsize {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cvtps2pi:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: cvtps2pi %xmm0, %mm0 # sched: [4:1.00]
; BROADWELL-NEXT: cvtps2pi (%rdi), %mm1 # sched: [8:1.00]
; BROADWELL-NEXT: por %mm0, %mm1 # sched: [1:0.33]
@@ -306,7 +306,7 @@ define i64 @test_cvtps2pi(<4 x float> %a0, <4 x float>* %a1) optsize {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cvtps2pi:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: cvtps2pi %xmm0, %mm0 # sched: [5:1.00]
; SKYLAKE-NEXT: cvtps2pi (%rdi), %mm1 # sched: [9:0.50]
; SKYLAKE-NEXT: por %mm0, %mm1 # sched: [1:0.50]
@@ -314,7 +314,7 @@ define i64 @test_cvtps2pi(<4 x float> %a0, <4 x float>* %a1) optsize {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_cvtps2pi:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: cvtps2pi %xmm0, %mm0 # sched: [5:1.00]
; SKX-NEXT: cvtps2pi (%rdi), %mm1 # sched: [9:0.50]
; SKX-NEXT: por %mm0, %mm1 # sched: [1:0.50]
@@ -322,7 +322,7 @@ define i64 @test_cvtps2pi(<4 x float> %a0, <4 x float>* %a1) optsize {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cvtps2pi:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: cvtps2pi (%rdi), %mm1 # sched: [8:1.00]
; BTVER2-NEXT: cvtps2pi %xmm0, %mm0 # sched: [3:1.00]
; BTVER2-NEXT: por %mm0, %mm1 # sched: [1:0.50]
@@ -330,7 +330,7 @@ define i64 @test_cvtps2pi(<4 x float> %a0, <4 x float>* %a1) optsize {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cvtps2pi:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: cvtps2pi (%rdi), %mm1 # sched: [12:1.00]
; ZNVER1-NEXT: cvtps2pi %xmm0, %mm0 # sched: [4:1.00]
; ZNVER1-NEXT: por %mm0, %mm1 # sched: [1:0.25]
@@ -347,7 +347,7 @@ declare x86_mmx @llvm.x86.sse.cvtps2pi(<4 x float>) nounwind readnone
define i64 @test_cvttpd2pi(<2 x double> %a0, <2 x double>* %a1) optsize {
; GENERIC-LABEL: test_cvttpd2pi:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: cvttpd2pi (%rdi), %mm0 # sched: [10:1.00]
; GENERIC-NEXT: cvttpd2pi %xmm0, %mm1 # sched: [4:1.00]
; GENERIC-NEXT: por %mm1, %mm0 # sched: [1:1.00]
@@ -355,7 +355,7 @@ define i64 @test_cvttpd2pi(<2 x double> %a0, <2 x double>* %a1) optsize {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_cvttpd2pi:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: cvttpd2pi (%rdi), %mm0 # sched: [8:4.00]
; ATOM-NEXT: cvttpd2pi %xmm0, %mm1 # sched: [7:3.50]
; ATOM-NEXT: por %mm1, %mm0 # sched: [1:0.50]
@@ -363,7 +363,7 @@ define i64 @test_cvttpd2pi(<2 x double> %a0, <2 x double>* %a1) optsize {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_cvttpd2pi:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: cvttpd2pi (%rdi), %mm1 # sched: [7:1.00]
; SLM-NEXT: cvttpd2pi %xmm0, %mm0 # sched: [4:0.50]
; SLM-NEXT: por %mm0, %mm1 # sched: [1:0.50]
@@ -371,7 +371,7 @@ define i64 @test_cvttpd2pi(<2 x double> %a0, <2 x double>* %a1) optsize {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_cvttpd2pi:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: cvttpd2pi (%rdi), %mm0 # sched: [10:1.00]
; SANDY-NEXT: cvttpd2pi %xmm0, %mm1 # sched: [4:1.00]
; SANDY-NEXT: por %mm1, %mm0 # sched: [1:1.00]
@@ -379,7 +379,7 @@ define i64 @test_cvttpd2pi(<2 x double> %a0, <2 x double>* %a1) optsize {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvttpd2pi:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: cvttpd2pi (%rdi), %mm0 # sched: [4:1.00]
; HASWELL-NEXT: cvttpd2pi %xmm0, %mm1 # sched: [4:1.00]
; HASWELL-NEXT: por %mm1, %mm0 # sched: [1:0.33]
@@ -387,7 +387,7 @@ define i64 @test_cvttpd2pi(<2 x double> %a0, <2 x double>* %a1) optsize {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cvttpd2pi:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: cvttpd2pi (%rdi), %mm0 # sched: [9:1.00]
; BROADWELL-NEXT: cvttpd2pi %xmm0, %mm1 # sched: [4:1.00]
; BROADWELL-NEXT: por %mm1, %mm0 # sched: [1:0.33]
@@ -395,7 +395,7 @@ define i64 @test_cvttpd2pi(<2 x double> %a0, <2 x double>* %a1) optsize {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cvttpd2pi:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: cvttpd2pi %xmm0, %mm0 # sched: [5:1.00]
; SKYLAKE-NEXT: cvttpd2pi (%rdi), %mm1 # sched: [11:1.00]
; SKYLAKE-NEXT: por %mm0, %mm1 # sched: [1:0.50]
@@ -403,7 +403,7 @@ define i64 @test_cvttpd2pi(<2 x double> %a0, <2 x double>* %a1) optsize {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_cvttpd2pi:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: cvttpd2pi %xmm0, %mm0 # sched: [5:1.00]
; SKX-NEXT: cvttpd2pi (%rdi), %mm1 # sched: [11:1.00]
; SKX-NEXT: por %mm0, %mm1 # sched: [1:0.50]
@@ -411,7 +411,7 @@ define i64 @test_cvttpd2pi(<2 x double> %a0, <2 x double>* %a1) optsize {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cvttpd2pi:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: cvttpd2pi (%rdi), %mm1 # sched: [8:1.00]
; BTVER2-NEXT: cvttpd2pi %xmm0, %mm0 # sched: [3:1.00]
; BTVER2-NEXT: por %mm0, %mm1 # sched: [1:0.50]
@@ -419,7 +419,7 @@ define i64 @test_cvttpd2pi(<2 x double> %a0, <2 x double>* %a1) optsize {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cvttpd2pi:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: cvttpd2pi (%rdi), %mm1 # sched: [12:1.00]
; ZNVER1-NEXT: cvttpd2pi %xmm0, %mm0 # sched: [4:1.00]
; ZNVER1-NEXT: por %mm0, %mm1 # sched: [1:0.25]
@@ -436,7 +436,7 @@ declare x86_mmx @llvm.x86.sse.cvttpd2pi(<2 x double>) nounwind readnone
define i64 @test_cvttps2pi(<4 x float> %a0, <4 x float>* %a1) optsize {
; GENERIC-LABEL: test_cvttps2pi:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: cvttps2pi %xmm0, %mm0 # sched: [3:1.00]
; GENERIC-NEXT: cvttps2pi (%rdi), %mm1 # sched: [9:1.00]
; GENERIC-NEXT: por %mm0, %mm1 # sched: [1:1.00]
@@ -444,7 +444,7 @@ define i64 @test_cvttps2pi(<4 x float> %a0, <4 x float>* %a1) optsize {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_cvttps2pi:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: cvttps2pi %xmm0, %mm0 # sched: [5:5.00]
; ATOM-NEXT: cvttps2pi (%rdi), %mm1 # sched: [5:5.00]
; ATOM-NEXT: por %mm0, %mm1 # sched: [1:0.50]
@@ -452,7 +452,7 @@ define i64 @test_cvttps2pi(<4 x float> %a0, <4 x float>* %a1) optsize {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_cvttps2pi:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: cvttps2pi (%rdi), %mm1 # sched: [7:1.00]
; SLM-NEXT: cvttps2pi %xmm0, %mm0 # sched: [4:0.50]
; SLM-NEXT: por %mm0, %mm1 # sched: [1:0.50]
@@ -460,7 +460,7 @@ define i64 @test_cvttps2pi(<4 x float> %a0, <4 x float>* %a1) optsize {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_cvttps2pi:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: cvttps2pi %xmm0, %mm0 # sched: [3:1.00]
; SANDY-NEXT: cvttps2pi (%rdi), %mm1 # sched: [9:1.00]
; SANDY-NEXT: por %mm0, %mm1 # sched: [1:1.00]
@@ -468,7 +468,7 @@ define i64 @test_cvttps2pi(<4 x float> %a0, <4 x float>* %a1) optsize {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvttps2pi:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: cvttps2pi %xmm0, %mm0 # sched: [4:1.00]
; HASWELL-NEXT: cvttps2pi (%rdi), %mm1 # sched: [3:1.00]
; HASWELL-NEXT: por %mm0, %mm1 # sched: [1:0.33]
@@ -476,7 +476,7 @@ define i64 @test_cvttps2pi(<4 x float> %a0, <4 x float>* %a1) optsize {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cvttps2pi:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: cvttps2pi %xmm0, %mm0 # sched: [4:1.00]
; BROADWELL-NEXT: cvttps2pi (%rdi), %mm1 # sched: [8:1.00]
; BROADWELL-NEXT: por %mm0, %mm1 # sched: [1:0.33]
@@ -484,7 +484,7 @@ define i64 @test_cvttps2pi(<4 x float> %a0, <4 x float>* %a1) optsize {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cvttps2pi:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: cvttps2pi %xmm0, %mm0 # sched: [5:1.00]
; SKYLAKE-NEXT: cvttps2pi (%rdi), %mm1 # sched: [9:0.50]
; SKYLAKE-NEXT: por %mm0, %mm1 # sched: [1:0.50]
@@ -492,7 +492,7 @@ define i64 @test_cvttps2pi(<4 x float> %a0, <4 x float>* %a1) optsize {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_cvttps2pi:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: cvttps2pi %xmm0, %mm0 # sched: [5:1.00]
; SKX-NEXT: cvttps2pi (%rdi), %mm1 # sched: [9:0.50]
; SKX-NEXT: por %mm0, %mm1 # sched: [1:0.50]
@@ -500,7 +500,7 @@ define i64 @test_cvttps2pi(<4 x float> %a0, <4 x float>* %a1) optsize {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cvttps2pi:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: cvttps2pi (%rdi), %mm1 # sched: [8:1.00]
; BTVER2-NEXT: cvttps2pi %xmm0, %mm0 # sched: [3:1.00]
; BTVER2-NEXT: por %mm0, %mm1 # sched: [1:0.50]
@@ -508,7 +508,7 @@ define i64 @test_cvttps2pi(<4 x float> %a0, <4 x float>* %a1) optsize {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cvttps2pi:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: cvttps2pi (%rdi), %mm1 # sched: [12:1.00]
; ZNVER1-NEXT: cvttps2pi %xmm0, %mm0 # sched: [4:1.00]
; ZNVER1-NEXT: por %mm0, %mm1 # sched: [1:0.25]
@@ -525,52 +525,52 @@ declare x86_mmx @llvm.x86.sse.cvttps2pi(<4 x float>) nounwind readnone
define void @test_emms() optsize {
; GENERIC-LABEL: test_emms:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: emms
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_emms:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: emms # sched: [5:2.50]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_emms:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: emms
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_emms:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: emms
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_emms:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: emms # sched: [31:10.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_emms:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: emms # sched: [31:10.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_emms:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: emms # sched: [10:4.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_emms:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: emms # sched: [10:4.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_emms:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: emms
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_emms:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: emms
; ZNVER1-NEXT: retq # sched: [1:0.50]
call void @llvm.x86.mmx.emms()
@@ -580,52 +580,52 @@ declare void @llvm.x86.mmx.emms()
define void @test_maskmovq(x86_mmx %a0, x86_mmx %a1, i8* %a2) optsize {
; GENERIC-LABEL: test_maskmovq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: maskmovq %mm1, %mm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_maskmovq:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: maskmovq %mm1, %mm0 # sched: [1:1.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_maskmovq:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: maskmovq %mm1, %mm0 # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_maskmovq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: maskmovq %mm1, %mm0 # sched: [1:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_maskmovq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: maskmovq %mm1, %mm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_maskmovq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: maskmovq %mm1, %mm0 # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_maskmovq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: maskmovq %mm1, %mm0 # sched: [1:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_maskmovq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: maskmovq %mm1, %mm0 # sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_maskmovq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: maskmovq %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_maskmovq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: maskmovq %mm1, %mm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
call void @llvm.x86.mmx.maskmovq(x86_mmx %a0, x86_mmx %a1, i8* %a2)
@@ -635,7 +635,7 @@ declare void @llvm.x86.mmx.maskmovq(x86_mmx, x86_mmx, i8*) nounwind
define i32 @test_movd(x86_mmx %a0, i32 %a1, i32 *%a2) {
; GENERIC-LABEL: test_movd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movd %edi, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: movq %xmm0, -{{[0-9]+}}(%rsp) # sched: [5:1.00]
; GENERIC-NEXT: movq -{{[0-9]+}}(%rsp), %mm1 # sched: [4:0.50]
@@ -649,7 +649,7 @@ define i32 @test_movd(x86_mmx %a0, i32 %a1, i32 *%a2) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_movd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movd %edi, %xmm0 # sched: [1:1.00]
; ATOM-NEXT: movq %xmm0, -{{[0-9]+}}(%rsp) # sched: [1:1.00]
; ATOM-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [1:1.00]
@@ -663,7 +663,7 @@ define i32 @test_movd(x86_mmx %a0, i32 %a1, i32 *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_movd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movd %edi, %xmm0 # sched: [1:0.50]
; SLM-NEXT: movq %xmm0, -{{[0-9]+}}(%rsp) # sched: [1:1.00]
; SLM-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [3:1.00]
@@ -677,7 +677,7 @@ define i32 @test_movd(x86_mmx %a0, i32 %a1, i32 *%a2) {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_movd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmovd %edi, %xmm0 # sched: [1:1.00]
; SANDY-NEXT: vmovq %xmm0, -{{[0-9]+}}(%rsp) # sched: [5:1.00]
; SANDY-NEXT: movq -{{[0-9]+}}(%rsp), %mm1 # sched: [4:0.50]
@@ -691,7 +691,7 @@ define i32 @test_movd(x86_mmx %a0, i32 %a1, i32 *%a2) {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmovd %edi, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: vmovq %xmm0, -{{[0-9]+}}(%rsp) # sched: [1:1.00]
; HASWELL-NEXT: movq -{{[0-9]+}}(%rsp), %mm1 # sched: [1:0.50]
@@ -705,7 +705,7 @@ define i32 @test_movd(x86_mmx %a0, i32 %a1, i32 *%a2) {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmovd %edi, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: vmovq %xmm0, -{{[0-9]+}}(%rsp) # sched: [1:1.00]
; BROADWELL-NEXT: movq -{{[0-9]+}}(%rsp), %mm1 # sched: [5:0.50]
@@ -719,7 +719,7 @@ define i32 @test_movd(x86_mmx %a0, i32 %a1, i32 *%a2) {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmovd %edi, %xmm0 # sched: [1:1.00]
; SKYLAKE-NEXT: vmovq %xmm0, -{{[0-9]+}}(%rsp) # sched: [1:1.00]
; SKYLAKE-NEXT: movq -{{[0-9]+}}(%rsp), %mm1 # sched: [5:0.50]
@@ -733,7 +733,7 @@ define i32 @test_movd(x86_mmx %a0, i32 %a1, i32 *%a2) {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovd %edi, %xmm0 # sched: [1:1.00]
; SKX-NEXT: vpmovqd %xmm0, -{{[0-9]+}}(%rsp) # sched: [4:1.00]
; SKX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [5:0.50]
@@ -747,7 +747,7 @@ define i32 @test_movd(x86_mmx %a0, i32 %a1, i32 *%a2) {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovd %edi, %xmm0 # sched: [1:0.17]
; BTVER2-NEXT: vmovq %xmm0, -{{[0-9]+}}(%rsp) # sched: [1:1.00]
; BTVER2-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [5:1.00]
@@ -761,7 +761,7 @@ define i32 @test_movd(x86_mmx %a0, i32 %a1, i32 *%a2) {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmovd %edi, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmovq %xmm0, -{{[0-9]+}}(%rsp) # sched: [1:0.50]
; ZNVER1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [8:0.50]
@@ -790,70 +790,70 @@ define i32 @test_movd(x86_mmx %a0, i32 %a1, i32 *%a2) {
define i64 @test_movdq2q(<2 x i64> %a0) optsize {
; GENERIC-LABEL: test_movdq2q:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movdq2q %xmm0, %mm0 # sched: [2:1.00]
; GENERIC-NEXT: paddd %mm0, %mm0 # sched: [3:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_movdq2q:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movdq2q %xmm0, %mm0 # sched: [1:0.50]
; ATOM-NEXT: paddd %mm0, %mm0 # sched: [1:0.50]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_movdq2q:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movdq2q %xmm0, %mm0 # sched: [1:0.50]
; SLM-NEXT: paddd %mm0, %mm0 # sched: [1:0.50]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_movdq2q:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: movdq2q %xmm0, %mm0 # sched: [2:1.00]
; SANDY-NEXT: paddd %mm0, %mm0 # sched: [3:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movdq2q:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: movdq2q %xmm0, %mm0 # sched: [2:0.67]
; HASWELL-NEXT: paddd %mm0, %mm0 # sched: [1:0.50]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movdq2q:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movdq2q %xmm0, %mm0 # sched: [2:0.67]
; BROADWELL-NEXT: paddd %mm0, %mm0 # sched: [1:0.50]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movdq2q:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movdq2q %xmm0, %mm0 # sched: [2:1.00]
; SKYLAKE-NEXT: paddd %mm0, %mm0 # sched: [1:0.50]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movdq2q:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movdq2q %xmm0, %mm0 # sched: [2:1.00]
; SKX-NEXT: paddd %mm0, %mm0 # sched: [1:0.50]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movdq2q:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: movdq2q %xmm0, %mm0 # sched: [1:0.17]
; BTVER2-NEXT: paddd %mm0, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movdq2q:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movdq2q %xmm0, %mm0 # sched: [1:0.25]
; ZNVER1-NEXT: paddd %mm0, %mm0 # sched: [1:0.25]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -867,52 +867,52 @@ define i64 @test_movdq2q(<2 x i64> %a0) optsize {
define void @test_movntq(x86_mmx* %a0, x86_mmx %a1) optsize {
; GENERIC-LABEL: test_movntq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movntq %mm0, (%rdi) # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_movntq:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movntq %mm0, (%rdi) # sched: [1:1.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_movntq:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movntq %mm0, (%rdi) # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_movntq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: movntq %mm0, (%rdi) # sched: [1:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movntq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: movntq %mm0, (%rdi) # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movntq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movntq %mm0, (%rdi) # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movntq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movntq %mm0, (%rdi) # sched: [1:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movntq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movntq %mm0, (%rdi) # sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movntq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: movntq %mm0, (%rdi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movntq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movntq %mm0, (%rdi) # sched: [1:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
call void @llvm.x86.mmx.movnt.dq(x86_mmx* %a0, x86_mmx %a1)
@@ -922,14 +922,14 @@ declare void @llvm.x86.mmx.movnt.dq(x86_mmx*, x86_mmx) nounwind
define void @test_movq(i64 *%a0) {
; GENERIC-LABEL: test_movq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movq (%rdi), %mm0 # sched: [4:0.50]
; GENERIC-NEXT: paddd %mm0, %mm0 # sched: [3:1.00]
; GENERIC-NEXT: movq %mm0, (%rdi) # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_movq:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movq (%rdi), %mm0 # sched: [1:1.00]
; ATOM-NEXT: paddd %mm0, %mm0 # sched: [1:0.50]
; ATOM-NEXT: movq %mm0, (%rdi) # sched: [1:1.00]
@@ -938,56 +938,56 @@ define void @test_movq(i64 *%a0) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_movq:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movq (%rdi), %mm0 # sched: [3:1.00]
; SLM-NEXT: paddd %mm0, %mm0 # sched: [1:0.50]
; SLM-NEXT: movq %mm0, (%rdi) # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_movq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: movq (%rdi), %mm0 # sched: [4:0.50]
; SANDY-NEXT: paddd %mm0, %mm0 # sched: [3:1.00]
; SANDY-NEXT: movq %mm0, (%rdi) # sched: [1:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: movq (%rdi), %mm0 # sched: [1:0.50]
; HASWELL-NEXT: paddd %mm0, %mm0 # sched: [1:0.50]
; HASWELL-NEXT: movq %mm0, (%rdi) # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movq (%rdi), %mm0 # sched: [5:0.50]
; BROADWELL-NEXT: paddd %mm0, %mm0 # sched: [1:0.50]
; BROADWELL-NEXT: movq %mm0, (%rdi) # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movq (%rdi), %mm0 # sched: [5:0.50]
; SKYLAKE-NEXT: paddd %mm0, %mm0 # sched: [1:0.50]
; SKYLAKE-NEXT: movq %mm0, (%rdi) # sched: [1:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movq (%rdi), %mm0 # sched: [5:0.50]
; SKX-NEXT: paddd %mm0, %mm0 # sched: [1:0.50]
; SKX-NEXT: movq %mm0, (%rdi) # sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: movq (%rdi), %mm0 # sched: [5:1.00]
; BTVER2-NEXT: paddd %mm0, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: movq %mm0, (%rdi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movq (%rdi), %mm0 # sched: [8:0.50]
; ZNVER1-NEXT: paddd %mm0, %mm0 # sched: [1:0.25]
; ZNVER1-NEXT: movq %mm0, (%rdi) # sched: [1:0.50]
@@ -1002,52 +1002,52 @@ define void @test_movq(i64 *%a0) {
define <2 x i64> @test_movq2dq(x86_mmx %a0) optsize {
; GENERIC-LABEL: test_movq2dq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movq2dq %mm0, %xmm0 # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_movq2dq:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movq2dq %mm0, %xmm0
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_movq2dq:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movq2dq %mm0, %xmm0 # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_movq2dq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: movq2dq %mm0, %xmm0 # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movq2dq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: movq2dq %mm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movq2dq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movq2dq %mm0, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movq2dq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movq2dq %mm0, %xmm0 # sched: [2:2.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movq2dq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movq2dq %mm0, %xmm0 # sched: [2:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movq2dq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: movq2dq %mm0, %xmm0 # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movq2dq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movq2dq %mm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = bitcast x86_mmx %a0 to i64
@@ -1057,70 +1057,70 @@ define <2 x i64> @test_movq2dq(x86_mmx %a0) optsize {
define i64 @test_pabsb(x86_mmx *%a0) optsize {
; GENERIC-LABEL: test_pabsb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pabsb (%rdi), %mm0 # sched: [6:0.50]
; GENERIC-NEXT: pabsb %mm0, %mm0 # sched: [1:0.50]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pabsb:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pabsb (%rdi), %mm0 # sched: [1:1.00]
; ATOM-NEXT: pabsb %mm0, %mm0 # sched: [1:0.50]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pabsb:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pabsb (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: pabsb %mm0, %mm0 # sched: [1:0.50]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pabsb:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: pabsb (%rdi), %mm0 # sched: [6:0.50]
; SANDY-NEXT: pabsb %mm0, %mm0 # sched: [1:0.50]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pabsb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: pabsb (%rdi), %mm0 # sched: [1:0.50]
; HASWELL-NEXT: pabsb %mm0, %mm0 # sched: [1:0.50]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pabsb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: pabsb (%rdi), %mm0 # sched: [6:0.50]
; BROADWELL-NEXT: pabsb %mm0, %mm0 # sched: [1:0.50]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pabsb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: pabsb (%rdi), %mm0 # sched: [6:0.50]
; SKYLAKE-NEXT: pabsb %mm0, %mm0 # sched: [1:0.50]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pabsb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: pabsb (%rdi), %mm0 # sched: [6:0.50]
; SKX-NEXT: pabsb %mm0, %mm0 # sched: [1:0.50]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pabsb:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: pabsb (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: pabsb %mm0, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pabsb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: pabsb (%rdi), %mm0 # sched: [8:0.50]
; ZNVER1-NEXT: pabsb %mm0, %mm0 # sched: [1:0.25]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -1135,70 +1135,70 @@ declare x86_mmx @llvm.x86.ssse3.pabs.b(x86_mmx) nounwind readnone
define i64 @test_pabsd(x86_mmx *%a0) optsize {
; GENERIC-LABEL: test_pabsd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pabsd (%rdi), %mm0 # sched: [6:0.50]
; GENERIC-NEXT: pabsd %mm0, %mm0 # sched: [1:0.50]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pabsd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pabsd (%rdi), %mm0 # sched: [1:1.00]
; ATOM-NEXT: pabsd %mm0, %mm0 # sched: [1:0.50]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pabsd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pabsd (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: pabsd %mm0, %mm0 # sched: [1:0.50]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pabsd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: pabsd (%rdi), %mm0 # sched: [6:0.50]
; SANDY-NEXT: pabsd %mm0, %mm0 # sched: [1:0.50]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pabsd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: pabsd (%rdi), %mm0 # sched: [1:0.50]
; HASWELL-NEXT: pabsd %mm0, %mm0 # sched: [1:0.50]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pabsd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: pabsd (%rdi), %mm0 # sched: [6:0.50]
; BROADWELL-NEXT: pabsd %mm0, %mm0 # sched: [1:0.50]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pabsd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: pabsd (%rdi), %mm0 # sched: [6:0.50]
; SKYLAKE-NEXT: pabsd %mm0, %mm0 # sched: [1:0.50]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pabsd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: pabsd (%rdi), %mm0 # sched: [6:0.50]
; SKX-NEXT: pabsd %mm0, %mm0 # sched: [1:0.50]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pabsd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: pabsd (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: pabsd %mm0, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pabsd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: pabsd (%rdi), %mm0 # sched: [8:0.50]
; ZNVER1-NEXT: pabsd %mm0, %mm0 # sched: [1:0.25]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -1213,70 +1213,70 @@ declare x86_mmx @llvm.x86.ssse3.pabs.d(x86_mmx) nounwind readnone
define i64 @test_pabsw(x86_mmx *%a0) optsize {
; GENERIC-LABEL: test_pabsw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pabsw (%rdi), %mm0 # sched: [6:0.50]
; GENERIC-NEXT: pabsw %mm0, %mm0 # sched: [1:0.50]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pabsw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pabsw (%rdi), %mm0 # sched: [1:1.00]
; ATOM-NEXT: pabsw %mm0, %mm0 # sched: [1:0.50]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pabsw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pabsw (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: pabsw %mm0, %mm0 # sched: [1:0.50]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pabsw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: pabsw (%rdi), %mm0 # sched: [6:0.50]
; SANDY-NEXT: pabsw %mm0, %mm0 # sched: [1:0.50]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pabsw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: pabsw (%rdi), %mm0 # sched: [1:0.50]
; HASWELL-NEXT: pabsw %mm0, %mm0 # sched: [1:0.50]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pabsw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: pabsw (%rdi), %mm0 # sched: [6:0.50]
; BROADWELL-NEXT: pabsw %mm0, %mm0 # sched: [1:0.50]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pabsw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: pabsw (%rdi), %mm0 # sched: [6:0.50]
; SKYLAKE-NEXT: pabsw %mm0, %mm0 # sched: [1:0.50]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pabsw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: pabsw (%rdi), %mm0 # sched: [6:0.50]
; SKX-NEXT: pabsw %mm0, %mm0 # sched: [1:0.50]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pabsw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: pabsw (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: pabsw %mm0, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pabsw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: pabsw (%rdi), %mm0 # sched: [8:0.50]
; ZNVER1-NEXT: pabsw %mm0, %mm0 # sched: [1:0.25]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -1291,70 +1291,70 @@ declare x86_mmx @llvm.x86.ssse3.pabs.w(x86_mmx) nounwind readnone
define i64 @test_packssdw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_packssdw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: packssdw %mm1, %mm0 # sched: [1:1.00]
; GENERIC-NEXT: packssdw (%rdi), %mm0 # sched: [5:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_packssdw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: packssdw %mm1, %mm0 # sched: [1:0.50]
; ATOM-NEXT: packssdw (%rdi), %mm0 # sched: [1:1.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_packssdw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: packssdw %mm1, %mm0 # sched: [1:1.00]
; SLM-NEXT: packssdw (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_packssdw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: packssdw %mm1, %mm0 # sched: [1:1.00]
; SANDY-NEXT: packssdw (%rdi), %mm0 # sched: [5:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_packssdw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: packssdw %mm1, %mm0 # sched: [3:2.00]
; HASWELL-NEXT: packssdw (%rdi), %mm0 # sched: [2:2.00]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_packssdw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: packssdw %mm1, %mm0 # sched: [3:2.00]
; BROADWELL-NEXT: packssdw (%rdi), %mm0 # sched: [7:2.00]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_packssdw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: packssdw %mm1, %mm0 # sched: [3:2.00]
; SKYLAKE-NEXT: packssdw (%rdi), %mm0 # sched: [7:2.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_packssdw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: packssdw %mm1, %mm0 # sched: [3:2.00]
; SKX-NEXT: packssdw (%rdi), %mm0 # sched: [7:2.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_packssdw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: packssdw %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: packssdw (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_packssdw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: packssdw %mm1, %mm0 # sched: [1:0.50]
; ZNVER1-NEXT: packssdw (%rdi), %mm0 # sched: [1:0.50]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -1369,70 +1369,70 @@ declare x86_mmx @llvm.x86.mmx.packssdw(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_packsswb(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_packsswb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: packsswb %mm1, %mm0 # sched: [1:1.00]
; GENERIC-NEXT: packsswb (%rdi), %mm0 # sched: [5:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_packsswb:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: packsswb %mm1, %mm0 # sched: [1:0.50]
; ATOM-NEXT: packsswb (%rdi), %mm0 # sched: [1:1.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_packsswb:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: packsswb %mm1, %mm0 # sched: [1:1.00]
; SLM-NEXT: packsswb (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_packsswb:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: packsswb %mm1, %mm0 # sched: [1:1.00]
; SANDY-NEXT: packsswb (%rdi), %mm0 # sched: [5:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_packsswb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: packsswb %mm1, %mm0 # sched: [3:2.00]
; HASWELL-NEXT: packsswb (%rdi), %mm0 # sched: [2:2.00]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_packsswb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: packsswb %mm1, %mm0 # sched: [3:2.00]
; BROADWELL-NEXT: packsswb (%rdi), %mm0 # sched: [7:2.00]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_packsswb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: packsswb %mm1, %mm0 # sched: [3:2.00]
; SKYLAKE-NEXT: packsswb (%rdi), %mm0 # sched: [7:2.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_packsswb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: packsswb %mm1, %mm0 # sched: [3:2.00]
; SKX-NEXT: packsswb (%rdi), %mm0 # sched: [7:2.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_packsswb:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: packsswb %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: packsswb (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_packsswb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: packsswb %mm1, %mm0 # sched: [1:0.50]
; ZNVER1-NEXT: packsswb (%rdi), %mm0 # sched: [1:0.50]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -1447,70 +1447,70 @@ declare x86_mmx @llvm.x86.mmx.packsswb(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_packuswb(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_packuswb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: packuswb %mm1, %mm0 # sched: [1:1.00]
; GENERIC-NEXT: packuswb (%rdi), %mm0 # sched: [5:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_packuswb:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: packuswb %mm1, %mm0 # sched: [1:0.50]
; ATOM-NEXT: packuswb (%rdi), %mm0 # sched: [1:1.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_packuswb:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: packuswb %mm1, %mm0 # sched: [1:1.00]
; SLM-NEXT: packuswb (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_packuswb:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: packuswb %mm1, %mm0 # sched: [1:1.00]
; SANDY-NEXT: packuswb (%rdi), %mm0 # sched: [5:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_packuswb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: packuswb %mm1, %mm0 # sched: [3:2.00]
; HASWELL-NEXT: packuswb (%rdi), %mm0 # sched: [2:2.00]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_packuswb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: packuswb %mm1, %mm0 # sched: [3:2.00]
; BROADWELL-NEXT: packuswb (%rdi), %mm0 # sched: [7:2.00]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_packuswb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: packuswb %mm1, %mm0 # sched: [3:2.00]
; SKYLAKE-NEXT: packuswb (%rdi), %mm0 # sched: [7:2.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_packuswb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: packuswb %mm1, %mm0 # sched: [3:2.00]
; SKX-NEXT: packuswb (%rdi), %mm0 # sched: [7:2.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_packuswb:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: packuswb %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: packuswb (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_packuswb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: packuswb %mm1, %mm0 # sched: [1:0.50]
; ZNVER1-NEXT: packuswb (%rdi), %mm0 # sched: [1:0.50]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -1525,70 +1525,70 @@ declare x86_mmx @llvm.x86.mmx.packuswb(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_paddb(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_paddb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: paddb %mm1, %mm0 # sched: [3:1.00]
; GENERIC-NEXT: paddb (%rdi), %mm0 # sched: [7:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_paddb:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: paddb %mm1, %mm0 # sched: [1:0.50]
; ATOM-NEXT: paddb (%rdi), %mm0 # sched: [1:1.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_paddb:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: paddb %mm1, %mm0 # sched: [1:0.50]
; SLM-NEXT: paddb (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_paddb:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: paddb %mm1, %mm0 # sched: [3:1.00]
; SANDY-NEXT: paddb (%rdi), %mm0 # sched: [7:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_paddb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: paddb %mm1, %mm0 # sched: [1:0.50]
; HASWELL-NEXT: paddb (%rdi), %mm0 # sched: [1:0.50]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_paddb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: paddb %mm1, %mm0 # sched: [1:0.50]
; BROADWELL-NEXT: paddb (%rdi), %mm0 # sched: [6:0.50]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_paddb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: paddb %mm1, %mm0 # sched: [1:0.50]
; SKYLAKE-NEXT: paddb (%rdi), %mm0 # sched: [6:0.50]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_paddb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: paddb %mm1, %mm0 # sched: [1:0.50]
; SKX-NEXT: paddb (%rdi), %mm0 # sched: [6:0.50]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_paddb:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: paddb %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: paddb (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_paddb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: paddb %mm1, %mm0 # sched: [1:0.25]
; ZNVER1-NEXT: paddb (%rdi), %mm0 # sched: [8:0.50]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -1603,70 +1603,70 @@ declare x86_mmx @llvm.x86.mmx.padd.b(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_paddd(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_paddd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: paddd %mm1, %mm0 # sched: [3:1.00]
; GENERIC-NEXT: paddd (%rdi), %mm0 # sched: [7:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_paddd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: paddd %mm1, %mm0 # sched: [1:0.50]
; ATOM-NEXT: paddd (%rdi), %mm0 # sched: [1:1.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_paddd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: paddd %mm1, %mm0 # sched: [1:0.50]
; SLM-NEXT: paddd (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_paddd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: paddd %mm1, %mm0 # sched: [3:1.00]
; SANDY-NEXT: paddd (%rdi), %mm0 # sched: [7:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_paddd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: paddd %mm1, %mm0 # sched: [1:0.50]
; HASWELL-NEXT: paddd (%rdi), %mm0 # sched: [1:0.50]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_paddd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: paddd %mm1, %mm0 # sched: [1:0.50]
; BROADWELL-NEXT: paddd (%rdi), %mm0 # sched: [6:0.50]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_paddd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: paddd %mm1, %mm0 # sched: [1:0.50]
; SKYLAKE-NEXT: paddd (%rdi), %mm0 # sched: [6:0.50]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_paddd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: paddd %mm1, %mm0 # sched: [1:0.50]
; SKX-NEXT: paddd (%rdi), %mm0 # sched: [6:0.50]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_paddd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: paddd %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: paddd (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_paddd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: paddd %mm1, %mm0 # sched: [1:0.25]
; ZNVER1-NEXT: paddd (%rdi), %mm0 # sched: [8:0.50]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -1681,70 +1681,70 @@ declare x86_mmx @llvm.x86.mmx.padd.d(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_paddq(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_paddq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: paddq %mm1, %mm0 # sched: [1:0.50]
; GENERIC-NEXT: paddq (%rdi), %mm0 # sched: [7:0.50]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_paddq:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: paddq %mm1, %mm0 # sched: [2:1.00]
; ATOM-NEXT: paddq (%rdi), %mm0 # sched: [3:1.50]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_paddq:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: paddq %mm1, %mm0 # sched: [1:0.50]
; SLM-NEXT: paddq (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_paddq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: paddq %mm1, %mm0 # sched: [1:0.50]
; SANDY-NEXT: paddq (%rdi), %mm0 # sched: [7:0.50]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_paddq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: paddq %mm1, %mm0 # sched: [1:0.50]
; HASWELL-NEXT: paddq (%rdi), %mm0 # sched: [1:0.50]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_paddq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: paddq %mm1, %mm0 # sched: [1:0.50]
; BROADWELL-NEXT: paddq (%rdi), %mm0 # sched: [6:0.50]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_paddq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: paddq %mm1, %mm0 # sched: [1:0.50]
; SKYLAKE-NEXT: paddq (%rdi), %mm0 # sched: [6:0.50]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_paddq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: paddq %mm1, %mm0 # sched: [1:0.50]
; SKX-NEXT: paddq (%rdi), %mm0 # sched: [6:0.50]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_paddq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: paddq %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: paddq (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_paddq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: paddq %mm1, %mm0 # sched: [1:0.25]
; ZNVER1-NEXT: paddq (%rdi), %mm0 # sched: [8:0.50]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -1759,70 +1759,70 @@ declare x86_mmx @llvm.x86.mmx.padd.q(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_paddsb(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_paddsb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: paddsb %mm1, %mm0 # sched: [3:1.00]
; GENERIC-NEXT: paddsb (%rdi), %mm0 # sched: [7:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_paddsb:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: paddsb %mm1, %mm0 # sched: [1:0.50]
; ATOM-NEXT: paddsb (%rdi), %mm0 # sched: [1:1.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_paddsb:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: paddsb %mm1, %mm0 # sched: [1:0.50]
; SLM-NEXT: paddsb (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_paddsb:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: paddsb %mm1, %mm0 # sched: [3:1.00]
; SANDY-NEXT: paddsb (%rdi), %mm0 # sched: [7:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_paddsb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: paddsb %mm1, %mm0 # sched: [1:0.50]
; HASWELL-NEXT: paddsb (%rdi), %mm0 # sched: [1:0.50]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_paddsb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: paddsb %mm1, %mm0 # sched: [1:0.50]
; BROADWELL-NEXT: paddsb (%rdi), %mm0 # sched: [6:0.50]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_paddsb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: paddsb %mm1, %mm0 # sched: [1:1.00]
; SKYLAKE-NEXT: paddsb (%rdi), %mm0 # sched: [6:1.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_paddsb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: paddsb %mm1, %mm0 # sched: [1:1.00]
; SKX-NEXT: paddsb (%rdi), %mm0 # sched: [6:1.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_paddsb:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: paddsb %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: paddsb (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_paddsb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: paddsb %mm1, %mm0 # sched: [1:0.25]
; ZNVER1-NEXT: paddsb (%rdi), %mm0 # sched: [8:0.50]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -1837,70 +1837,70 @@ declare x86_mmx @llvm.x86.mmx.padds.b(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_paddsw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_paddsw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: paddsw %mm1, %mm0 # sched: [3:1.00]
; GENERIC-NEXT: paddsw (%rdi), %mm0 # sched: [7:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_paddsw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: paddsw %mm1, %mm0 # sched: [1:0.50]
; ATOM-NEXT: paddsw (%rdi), %mm0 # sched: [1:1.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_paddsw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: paddsw %mm1, %mm0 # sched: [1:0.50]
; SLM-NEXT: paddsw (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_paddsw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: paddsw %mm1, %mm0 # sched: [3:1.00]
; SANDY-NEXT: paddsw (%rdi), %mm0 # sched: [7:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_paddsw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: paddsw %mm1, %mm0 # sched: [1:0.50]
; HASWELL-NEXT: paddsw (%rdi), %mm0 # sched: [1:0.50]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_paddsw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: paddsw %mm1, %mm0 # sched: [1:0.50]
; BROADWELL-NEXT: paddsw (%rdi), %mm0 # sched: [6:0.50]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_paddsw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: paddsw %mm1, %mm0 # sched: [1:1.00]
; SKYLAKE-NEXT: paddsw (%rdi), %mm0 # sched: [6:1.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_paddsw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: paddsw %mm1, %mm0 # sched: [1:1.00]
; SKX-NEXT: paddsw (%rdi), %mm0 # sched: [6:1.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_paddsw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: paddsw %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: paddsw (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_paddsw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: paddsw %mm1, %mm0 # sched: [1:0.25]
; ZNVER1-NEXT: paddsw (%rdi), %mm0 # sched: [8:0.50]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -1915,70 +1915,70 @@ declare x86_mmx @llvm.x86.mmx.padds.w(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_paddusb(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_paddusb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: paddusb %mm1, %mm0 # sched: [3:1.00]
; GENERIC-NEXT: paddusb (%rdi), %mm0 # sched: [7:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_paddusb:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: paddusb %mm1, %mm0 # sched: [1:0.50]
; ATOM-NEXT: paddusb (%rdi), %mm0 # sched: [1:1.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_paddusb:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: paddusb %mm1, %mm0 # sched: [1:0.50]
; SLM-NEXT: paddusb (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_paddusb:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: paddusb %mm1, %mm0 # sched: [3:1.00]
; SANDY-NEXT: paddusb (%rdi), %mm0 # sched: [7:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_paddusb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: paddusb %mm1, %mm0 # sched: [1:0.50]
; HASWELL-NEXT: paddusb (%rdi), %mm0 # sched: [1:0.50]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_paddusb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: paddusb %mm1, %mm0 # sched: [1:0.50]
; BROADWELL-NEXT: paddusb (%rdi), %mm0 # sched: [6:0.50]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_paddusb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: paddusb %mm1, %mm0 # sched: [1:1.00]
; SKYLAKE-NEXT: paddusb (%rdi), %mm0 # sched: [6:1.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_paddusb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: paddusb %mm1, %mm0 # sched: [1:1.00]
; SKX-NEXT: paddusb (%rdi), %mm0 # sched: [6:1.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_paddusb:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: paddusb %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: paddusb (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_paddusb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: paddusb %mm1, %mm0 # sched: [1:0.25]
; ZNVER1-NEXT: paddusb (%rdi), %mm0 # sched: [8:0.50]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -1993,70 +1993,70 @@ declare x86_mmx @llvm.x86.mmx.paddus.b(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_paddusw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_paddusw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: paddusw %mm1, %mm0 # sched: [3:1.00]
; GENERIC-NEXT: paddusw (%rdi), %mm0 # sched: [7:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_paddusw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: paddusw %mm1, %mm0 # sched: [1:0.50]
; ATOM-NEXT: paddusw (%rdi), %mm0 # sched: [1:1.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_paddusw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: paddusw %mm1, %mm0 # sched: [1:0.50]
; SLM-NEXT: paddusw (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_paddusw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: paddusw %mm1, %mm0 # sched: [3:1.00]
; SANDY-NEXT: paddusw (%rdi), %mm0 # sched: [7:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_paddusw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: paddusw %mm1, %mm0 # sched: [1:0.50]
; HASWELL-NEXT: paddusw (%rdi), %mm0 # sched: [1:0.50]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_paddusw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: paddusw %mm1, %mm0 # sched: [1:0.50]
; BROADWELL-NEXT: paddusw (%rdi), %mm0 # sched: [6:0.50]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_paddusw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: paddusw %mm1, %mm0 # sched: [1:1.00]
; SKYLAKE-NEXT: paddusw (%rdi), %mm0 # sched: [6:1.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_paddusw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: paddusw %mm1, %mm0 # sched: [1:1.00]
; SKX-NEXT: paddusw (%rdi), %mm0 # sched: [6:1.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_paddusw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: paddusw %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: paddusw (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_paddusw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: paddusw %mm1, %mm0 # sched: [1:0.25]
; ZNVER1-NEXT: paddusw (%rdi), %mm0 # sched: [8:0.50]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -2071,70 +2071,70 @@ declare x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_paddw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_paddw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: paddw %mm1, %mm0 # sched: [3:1.00]
; GENERIC-NEXT: paddw (%rdi), %mm0 # sched: [7:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_paddw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: paddw %mm1, %mm0 # sched: [1:0.50]
; ATOM-NEXT: paddw (%rdi), %mm0 # sched: [1:1.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_paddw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: paddw %mm1, %mm0 # sched: [1:0.50]
; SLM-NEXT: paddw (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_paddw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: paddw %mm1, %mm0 # sched: [3:1.00]
; SANDY-NEXT: paddw (%rdi), %mm0 # sched: [7:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_paddw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: paddw %mm1, %mm0 # sched: [1:0.50]
; HASWELL-NEXT: paddw (%rdi), %mm0 # sched: [1:0.50]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_paddw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: paddw %mm1, %mm0 # sched: [1:0.50]
; BROADWELL-NEXT: paddw (%rdi), %mm0 # sched: [6:0.50]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_paddw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: paddw %mm1, %mm0 # sched: [1:0.50]
; SKYLAKE-NEXT: paddw (%rdi), %mm0 # sched: [6:0.50]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_paddw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: paddw %mm1, %mm0 # sched: [1:0.50]
; SKX-NEXT: paddw (%rdi), %mm0 # sched: [6:0.50]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_paddw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: paddw %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: paddw (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_paddw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: paddw %mm1, %mm0 # sched: [1:0.25]
; ZNVER1-NEXT: paddw (%rdi), %mm0 # sched: [8:0.50]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -2149,70 +2149,70 @@ declare x86_mmx @llvm.x86.mmx.padd.w(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_palignr(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_palignr:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: palignr $1, %mm1, %mm0 # sched: [1:0.50]
; GENERIC-NEXT: palignr $1, (%rdi), %mm0 # sched: [6:0.50]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_palignr:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: palignr $1, %mm1, %mm0
; ATOM-NEXT: palignr $1, (%rdi), %mm0
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_palignr:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: palignr $1, %mm1, %mm0 # sched: [1:1.00]
; SLM-NEXT: palignr $1, (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_palignr:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: palignr $1, %mm1, %mm0 # sched: [1:0.50]
; SANDY-NEXT: palignr $1, (%rdi), %mm0 # sched: [6:0.50]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_palignr:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: palignr $1, %mm1, %mm0 # sched: [1:1.00]
; HASWELL-NEXT: palignr $1, (%rdi), %mm0 # sched: [1:1.00]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_palignr:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: palignr $1, %mm1, %mm0 # sched: [1:1.00]
; BROADWELL-NEXT: palignr $1, (%rdi), %mm0 # sched: [6:1.00]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_palignr:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: palignr $1, %mm1, %mm0 # sched: [1:1.00]
; SKYLAKE-NEXT: palignr $1, (%rdi), %mm0 # sched: [6:1.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_palignr:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: palignr $1, %mm1, %mm0 # sched: [1:1.00]
; SKX-NEXT: palignr $1, (%rdi), %mm0 # sched: [6:1.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_palignr:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: palignr $1, %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: palignr $1, (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_palignr:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: palignr $1, %mm1, %mm0 # sched: [1:0.25]
; ZNVER1-NEXT: palignr $1, (%rdi), %mm0 # sched: [8:0.50]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -2227,70 +2227,70 @@ declare x86_mmx @llvm.x86.mmx.palignr.b(x86_mmx, x86_mmx, i8) nounwind readnone
define i64 @test_pand(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_pand:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pand %mm1, %mm0 # sched: [1:1.00]
; GENERIC-NEXT: pand (%rdi), %mm0 # sched: [5:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pand:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pand %mm1, %mm0 # sched: [1:0.50]
; ATOM-NEXT: pand (%rdi), %mm0 # sched: [1:1.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pand:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pand %mm1, %mm0 # sched: [1:0.50]
; SLM-NEXT: pand (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pand:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: pand %mm1, %mm0 # sched: [1:1.00]
; SANDY-NEXT: pand (%rdi), %mm0 # sched: [5:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pand:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: pand %mm1, %mm0 # sched: [1:0.33]
; HASWELL-NEXT: pand (%rdi), %mm0 # sched: [1:0.50]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pand:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: pand %mm1, %mm0 # sched: [1:0.33]
; BROADWELL-NEXT: pand (%rdi), %mm0 # sched: [6:0.50]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pand:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: pand %mm1, %mm0 # sched: [1:0.50]
; SKYLAKE-NEXT: pand (%rdi), %mm0 # sched: [6:0.50]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pand:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: pand %mm1, %mm0 # sched: [1:0.50]
; SKX-NEXT: pand (%rdi), %mm0 # sched: [6:0.50]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pand:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: pand %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: pand (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pand:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: pand %mm1, %mm0 # sched: [1:0.25]
; ZNVER1-NEXT: pand (%rdi), %mm0 # sched: [8:0.50]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -2305,70 +2305,70 @@ declare x86_mmx @llvm.x86.mmx.pand(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_pandn(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_pandn:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pandn %mm1, %mm0 # sched: [1:1.00]
; GENERIC-NEXT: pandn (%rdi), %mm0 # sched: [5:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pandn:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pandn %mm1, %mm0 # sched: [1:0.50]
; ATOM-NEXT: pandn (%rdi), %mm0 # sched: [1:1.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pandn:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pandn %mm1, %mm0 # sched: [1:0.50]
; SLM-NEXT: pandn (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pandn:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: pandn %mm1, %mm0 # sched: [1:1.00]
; SANDY-NEXT: pandn (%rdi), %mm0 # sched: [5:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pandn:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: pandn %mm1, %mm0 # sched: [1:0.33]
; HASWELL-NEXT: pandn (%rdi), %mm0 # sched: [1:0.50]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pandn:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: pandn %mm1, %mm0 # sched: [1:0.33]
; BROADWELL-NEXT: pandn (%rdi), %mm0 # sched: [6:0.50]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pandn:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: pandn %mm1, %mm0 # sched: [1:0.50]
; SKYLAKE-NEXT: pandn (%rdi), %mm0 # sched: [6:0.50]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pandn:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: pandn %mm1, %mm0 # sched: [1:0.50]
; SKX-NEXT: pandn (%rdi), %mm0 # sched: [6:0.50]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pandn:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: pandn %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: pandn (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pandn:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: pandn %mm1, %mm0 # sched: [1:0.25]
; ZNVER1-NEXT: pandn (%rdi), %mm0 # sched: [8:0.50]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -2383,70 +2383,70 @@ declare x86_mmx @llvm.x86.mmx.pandn(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_pavgb(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_pavgb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pavgb %mm1, %mm0 # sched: [5:1.00]
; GENERIC-NEXT: pavgb (%rdi), %mm0 # sched: [9:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pavgb:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pavgb %mm1, %mm0 # sched: [1:1.00]
; ATOM-NEXT: pavgb (%rdi), %mm0 # sched: [1:0.50]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pavgb:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pavgb %mm1, %mm0 # sched: [4:1.00]
; SLM-NEXT: pavgb (%rdi), %mm0 # sched: [7:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pavgb:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: pavgb %mm1, %mm0 # sched: [5:1.00]
; SANDY-NEXT: pavgb (%rdi), %mm0 # sched: [9:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pavgb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: pavgb %mm1, %mm0 # sched: [1:0.50]
; HASWELL-NEXT: pavgb (%rdi), %mm0 # sched: [1:0.50]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pavgb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: pavgb %mm1, %mm0 # sched: [1:0.50]
; BROADWELL-NEXT: pavgb (%rdi), %mm0 # sched: [6:0.50]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pavgb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: pavgb %mm1, %mm0 # sched: [1:1.00]
; SKYLAKE-NEXT: pavgb (%rdi), %mm0 # sched: [6:1.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pavgb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: pavgb %mm1, %mm0 # sched: [1:1.00]
; SKX-NEXT: pavgb (%rdi), %mm0 # sched: [6:1.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pavgb:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: pavgb %mm1, %mm0 # sched: [2:1.00]
; BTVER2-NEXT: pavgb (%rdi), %mm0 # sched: [7:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pavgb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: pavgb %mm1, %mm0 # sched: [4:1.00]
; ZNVER1-NEXT: pavgb (%rdi), %mm0 # sched: [11:1.00]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -2461,70 +2461,70 @@ declare x86_mmx @llvm.x86.mmx.pavg.b(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_pavgw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_pavgw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pavgw %mm1, %mm0 # sched: [5:1.00]
; GENERIC-NEXT: pavgw (%rdi), %mm0 # sched: [9:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pavgw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pavgw %mm1, %mm0 # sched: [1:1.00]
; ATOM-NEXT: pavgw (%rdi), %mm0 # sched: [1:0.50]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pavgw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pavgw %mm1, %mm0 # sched: [4:1.00]
; SLM-NEXT: pavgw (%rdi), %mm0 # sched: [7:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pavgw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: pavgw %mm1, %mm0 # sched: [5:1.00]
; SANDY-NEXT: pavgw (%rdi), %mm0 # sched: [9:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pavgw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: pavgw %mm1, %mm0 # sched: [1:0.50]
; HASWELL-NEXT: pavgw (%rdi), %mm0 # sched: [1:0.50]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pavgw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: pavgw %mm1, %mm0 # sched: [1:0.50]
; BROADWELL-NEXT: pavgw (%rdi), %mm0 # sched: [6:0.50]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pavgw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: pavgw %mm1, %mm0 # sched: [1:1.00]
; SKYLAKE-NEXT: pavgw (%rdi), %mm0 # sched: [6:1.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pavgw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: pavgw %mm1, %mm0 # sched: [1:1.00]
; SKX-NEXT: pavgw (%rdi), %mm0 # sched: [6:1.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pavgw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: pavgw %mm1, %mm0 # sched: [2:1.00]
; BTVER2-NEXT: pavgw (%rdi), %mm0 # sched: [7:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pavgw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: pavgw %mm1, %mm0 # sched: [4:1.00]
; ZNVER1-NEXT: pavgw (%rdi), %mm0 # sched: [11:1.00]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -2539,70 +2539,70 @@ declare x86_mmx @llvm.x86.mmx.pavg.w(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_pcmpeqb(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_pcmpeqb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pcmpeqb %mm1, %mm0 # sched: [3:1.00]
; GENERIC-NEXT: pcmpeqb (%rdi), %mm0 # sched: [7:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pcmpeqb:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pcmpeqb %mm1, %mm0 # sched: [1:0.50]
; ATOM-NEXT: pcmpeqb (%rdi), %mm0 # sched: [1:1.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pcmpeqb:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pcmpeqb %mm1, %mm0 # sched: [1:0.50]
; SLM-NEXT: pcmpeqb (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pcmpeqb:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: pcmpeqb %mm1, %mm0 # sched: [3:1.00]
; SANDY-NEXT: pcmpeqb (%rdi), %mm0 # sched: [7:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpeqb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: pcmpeqb %mm1, %mm0 # sched: [1:0.50]
; HASWELL-NEXT: pcmpeqb (%rdi), %mm0 # sched: [1:0.50]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pcmpeqb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: pcmpeqb %mm1, %mm0 # sched: [1:0.50]
; BROADWELL-NEXT: pcmpeqb (%rdi), %mm0 # sched: [6:0.50]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pcmpeqb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: pcmpeqb %mm1, %mm0 # sched: [1:1.00]
; SKYLAKE-NEXT: pcmpeqb (%rdi), %mm0 # sched: [6:1.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pcmpeqb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: pcmpeqb %mm1, %mm0 # sched: [1:1.00]
; SKX-NEXT: pcmpeqb (%rdi), %mm0 # sched: [6:1.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pcmpeqb:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: pcmpeqb %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: pcmpeqb (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pcmpeqb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: pcmpeqb %mm1, %mm0 # sched: [1:0.25]
; ZNVER1-NEXT: pcmpeqb (%rdi), %mm0 # sched: [8:0.50]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -2617,70 +2617,70 @@ declare x86_mmx @llvm.x86.mmx.pcmpeq.b(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_pcmpeqd(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_pcmpeqd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pcmpeqd %mm1, %mm0 # sched: [3:1.00]
; GENERIC-NEXT: pcmpeqd (%rdi), %mm0 # sched: [7:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pcmpeqd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pcmpeqd %mm1, %mm0 # sched: [1:0.50]
; ATOM-NEXT: pcmpeqd (%rdi), %mm0 # sched: [1:1.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pcmpeqd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pcmpeqd %mm1, %mm0 # sched: [1:0.50]
; SLM-NEXT: pcmpeqd (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pcmpeqd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: pcmpeqd %mm1, %mm0 # sched: [3:1.00]
; SANDY-NEXT: pcmpeqd (%rdi), %mm0 # sched: [7:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpeqd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: pcmpeqd %mm1, %mm0 # sched: [1:0.50]
; HASWELL-NEXT: pcmpeqd (%rdi), %mm0 # sched: [1:0.50]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pcmpeqd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: pcmpeqd %mm1, %mm0 # sched: [1:0.50]
; BROADWELL-NEXT: pcmpeqd (%rdi), %mm0 # sched: [6:0.50]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pcmpeqd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: pcmpeqd %mm1, %mm0 # sched: [1:1.00]
; SKYLAKE-NEXT: pcmpeqd (%rdi), %mm0 # sched: [6:1.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pcmpeqd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: pcmpeqd %mm1, %mm0 # sched: [1:1.00]
; SKX-NEXT: pcmpeqd (%rdi), %mm0 # sched: [6:1.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pcmpeqd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: pcmpeqd %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: pcmpeqd (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pcmpeqd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: pcmpeqd %mm1, %mm0 # sched: [1:0.25]
; ZNVER1-NEXT: pcmpeqd (%rdi), %mm0 # sched: [8:0.50]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -2695,70 +2695,70 @@ declare x86_mmx @llvm.x86.mmx.pcmpeq.d(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_pcmpeqw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_pcmpeqw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pcmpeqw %mm1, %mm0 # sched: [3:1.00]
; GENERIC-NEXT: pcmpeqw (%rdi), %mm0 # sched: [7:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pcmpeqw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pcmpeqw %mm1, %mm0 # sched: [1:0.50]
; ATOM-NEXT: pcmpeqw (%rdi), %mm0 # sched: [1:1.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pcmpeqw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pcmpeqw %mm1, %mm0 # sched: [1:0.50]
; SLM-NEXT: pcmpeqw (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pcmpeqw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: pcmpeqw %mm1, %mm0 # sched: [3:1.00]
; SANDY-NEXT: pcmpeqw (%rdi), %mm0 # sched: [7:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpeqw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: pcmpeqw %mm1, %mm0 # sched: [1:0.50]
; HASWELL-NEXT: pcmpeqw (%rdi), %mm0 # sched: [1:0.50]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pcmpeqw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: pcmpeqw %mm1, %mm0 # sched: [1:0.50]
; BROADWELL-NEXT: pcmpeqw (%rdi), %mm0 # sched: [6:0.50]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pcmpeqw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: pcmpeqw %mm1, %mm0 # sched: [1:1.00]
; SKYLAKE-NEXT: pcmpeqw (%rdi), %mm0 # sched: [6:1.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pcmpeqw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: pcmpeqw %mm1, %mm0 # sched: [1:1.00]
; SKX-NEXT: pcmpeqw (%rdi), %mm0 # sched: [6:1.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pcmpeqw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: pcmpeqw %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: pcmpeqw (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pcmpeqw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: pcmpeqw %mm1, %mm0 # sched: [1:0.25]
; ZNVER1-NEXT: pcmpeqw (%rdi), %mm0 # sched: [8:0.50]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -2773,70 +2773,70 @@ declare x86_mmx @llvm.x86.mmx.pcmpeq.w(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_pcmpgtb(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_pcmpgtb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pcmpgtb %mm1, %mm0 # sched: [3:1.00]
; GENERIC-NEXT: pcmpgtb (%rdi), %mm0 # sched: [7:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pcmpgtb:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pcmpgtb %mm1, %mm0 # sched: [1:0.50]
; ATOM-NEXT: pcmpgtb (%rdi), %mm0 # sched: [1:1.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pcmpgtb:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pcmpgtb %mm1, %mm0 # sched: [1:0.50]
; SLM-NEXT: pcmpgtb (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pcmpgtb:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: pcmpgtb %mm1, %mm0 # sched: [3:1.00]
; SANDY-NEXT: pcmpgtb (%rdi), %mm0 # sched: [7:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpgtb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: pcmpgtb %mm1, %mm0 # sched: [1:0.50]
; HASWELL-NEXT: pcmpgtb (%rdi), %mm0 # sched: [1:0.50]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pcmpgtb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: pcmpgtb %mm1, %mm0 # sched: [1:0.50]
; BROADWELL-NEXT: pcmpgtb (%rdi), %mm0 # sched: [6:0.50]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pcmpgtb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: pcmpgtb %mm1, %mm0 # sched: [1:1.00]
; SKYLAKE-NEXT: pcmpgtb (%rdi), %mm0 # sched: [6:1.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pcmpgtb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: pcmpgtb %mm1, %mm0 # sched: [1:1.00]
; SKX-NEXT: pcmpgtb (%rdi), %mm0 # sched: [6:1.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pcmpgtb:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: pcmpgtb %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: pcmpgtb (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pcmpgtb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: pcmpgtb %mm1, %mm0 # sched: [1:0.25]
; ZNVER1-NEXT: pcmpgtb (%rdi), %mm0 # sched: [8:0.50]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -2851,70 +2851,70 @@ declare x86_mmx @llvm.x86.mmx.pcmpgt.b(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_pcmpgtd(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_pcmpgtd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pcmpgtd %mm1, %mm0 # sched: [3:1.00]
; GENERIC-NEXT: pcmpgtd (%rdi), %mm0 # sched: [7:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pcmpgtd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pcmpgtd %mm1, %mm0 # sched: [1:0.50]
; ATOM-NEXT: pcmpgtd (%rdi), %mm0 # sched: [1:1.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pcmpgtd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pcmpgtd %mm1, %mm0 # sched: [1:0.50]
; SLM-NEXT: pcmpgtd (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pcmpgtd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: pcmpgtd %mm1, %mm0 # sched: [3:1.00]
; SANDY-NEXT: pcmpgtd (%rdi), %mm0 # sched: [7:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpgtd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: pcmpgtd %mm1, %mm0 # sched: [1:0.50]
; HASWELL-NEXT: pcmpgtd (%rdi), %mm0 # sched: [1:0.50]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pcmpgtd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: pcmpgtd %mm1, %mm0 # sched: [1:0.50]
; BROADWELL-NEXT: pcmpgtd (%rdi), %mm0 # sched: [6:0.50]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pcmpgtd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: pcmpgtd %mm1, %mm0 # sched: [1:1.00]
; SKYLAKE-NEXT: pcmpgtd (%rdi), %mm0 # sched: [6:1.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pcmpgtd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: pcmpgtd %mm1, %mm0 # sched: [1:1.00]
; SKX-NEXT: pcmpgtd (%rdi), %mm0 # sched: [6:1.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pcmpgtd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: pcmpgtd %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: pcmpgtd (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pcmpgtd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: pcmpgtd %mm1, %mm0 # sched: [1:0.25]
; ZNVER1-NEXT: pcmpgtd (%rdi), %mm0 # sched: [8:0.50]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -2929,70 +2929,70 @@ declare x86_mmx @llvm.x86.mmx.pcmpgt.d(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_pcmpgtw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_pcmpgtw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pcmpgtw %mm1, %mm0 # sched: [3:1.00]
; GENERIC-NEXT: pcmpgtw (%rdi), %mm0 # sched: [7:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pcmpgtw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pcmpgtw %mm1, %mm0 # sched: [1:0.50]
; ATOM-NEXT: pcmpgtw (%rdi), %mm0 # sched: [1:1.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pcmpgtw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pcmpgtw %mm1, %mm0 # sched: [1:0.50]
; SLM-NEXT: pcmpgtw (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pcmpgtw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: pcmpgtw %mm1, %mm0 # sched: [3:1.00]
; SANDY-NEXT: pcmpgtw (%rdi), %mm0 # sched: [7:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpgtw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: pcmpgtw %mm1, %mm0 # sched: [1:0.50]
; HASWELL-NEXT: pcmpgtw (%rdi), %mm0 # sched: [1:0.50]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pcmpgtw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: pcmpgtw %mm1, %mm0 # sched: [1:0.50]
; BROADWELL-NEXT: pcmpgtw (%rdi), %mm0 # sched: [6:0.50]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pcmpgtw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: pcmpgtw %mm1, %mm0 # sched: [1:1.00]
; SKYLAKE-NEXT: pcmpgtw (%rdi), %mm0 # sched: [6:1.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pcmpgtw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: pcmpgtw %mm1, %mm0 # sched: [1:1.00]
; SKX-NEXT: pcmpgtw (%rdi), %mm0 # sched: [6:1.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pcmpgtw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: pcmpgtw %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: pcmpgtw (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pcmpgtw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: pcmpgtw %mm1, %mm0 # sched: [1:0.25]
; ZNVER1-NEXT: pcmpgtw (%rdi), %mm0 # sched: [8:0.50]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -3007,52 +3007,52 @@ declare x86_mmx @llvm.x86.mmx.pcmpgt.w(x86_mmx, x86_mmx) nounwind readnone
define i32 @test_pextrw(x86_mmx %a0) optsize {
; GENERIC-LABEL: test_pextrw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pextrw $0, %mm0, %eax # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pextrw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pextrw $0, %mm0, %eax # sched: [4:2.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pextrw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pextrw $0, %mm0, %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pextrw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: pextrw $0, %mm0, %eax # sched: [1:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pextrw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: pextrw $0, %mm0, %eax # sched: [2:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pextrw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: pextrw $0, %mm0, %eax # sched: [2:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pextrw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: pextrw $0, %mm0, %eax # sched: [3:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pextrw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: pextrw $0, %mm0, %eax # sched: [3:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pextrw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: pextrw $0, %mm0, %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pextrw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: pextrw $0, %mm0, %eax # sched: [2:2.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call i32 @llvm.x86.mmx.pextr.w(x86_mmx %a0, i32 0)
@@ -3062,70 +3062,70 @@ declare i32 @llvm.x86.mmx.pextr.w(x86_mmx, i32) nounwind readnone
define i64 @test_phaddd(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_phaddd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: phaddd %mm1, %mm0 # sched: [3:1.50]
; GENERIC-NEXT: phaddd (%rdi), %mm0 # sched: [8:1.50]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_phaddd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: phaddd %mm1, %mm0 # sched: [3:1.50]
; ATOM-NEXT: phaddd (%rdi), %mm0 # sched: [4:2.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_phaddd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: phaddd %mm1, %mm0 # sched: [1:0.50]
; SLM-NEXT: phaddd (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_phaddd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: phaddd %mm1, %mm0 # sched: [3:1.50]
; SANDY-NEXT: phaddd (%rdi), %mm0 # sched: [8:1.50]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_phaddd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: phaddd %mm1, %mm0 # sched: [3:2.00]
; HASWELL-NEXT: phaddd (%rdi), %mm0 # sched: [3:2.00]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_phaddd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: phaddd %mm1, %mm0 # sched: [3:2.00]
; BROADWELL-NEXT: phaddd (%rdi), %mm0 # sched: [8:2.00]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_phaddd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: phaddd %mm1, %mm0 # sched: [3:2.00]
; SKYLAKE-NEXT: phaddd (%rdi), %mm0 # sched: [8:2.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_phaddd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: phaddd %mm1, %mm0 # sched: [3:2.00]
; SKX-NEXT: phaddd (%rdi), %mm0 # sched: [8:2.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_phaddd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: phaddd %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: phaddd (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_phaddd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: phaddd %mm1, %mm0 # sched: [100:?]
; ZNVER1-NEXT: phaddd (%rdi), %mm0 # sched: [100:?]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -3140,70 +3140,70 @@ declare x86_mmx @llvm.x86.ssse3.phadd.d(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_phaddsw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_phaddsw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: phaddsw %mm1, %mm0 # sched: [3:1.50]
; GENERIC-NEXT: phaddsw (%rdi), %mm0 # sched: [8:1.50]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_phaddsw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: phaddsw %mm1, %mm0 # sched: [5:2.50]
; ATOM-NEXT: phaddsw (%rdi), %mm0 # sched: [6:3.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_phaddsw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: phaddsw %mm1, %mm0 # sched: [1:0.50]
; SLM-NEXT: phaddsw (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_phaddsw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: phaddsw %mm1, %mm0 # sched: [3:1.50]
; SANDY-NEXT: phaddsw (%rdi), %mm0 # sched: [8:1.50]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_phaddsw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: phaddsw %mm1, %mm0 # sched: [3:2.00]
; HASWELL-NEXT: phaddsw (%rdi), %mm0 # sched: [3:2.00]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_phaddsw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: phaddsw %mm1, %mm0 # sched: [3:2.00]
; BROADWELL-NEXT: phaddsw (%rdi), %mm0 # sched: [8:2.00]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_phaddsw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: phaddsw %mm1, %mm0 # sched: [3:2.00]
; SKYLAKE-NEXT: phaddsw (%rdi), %mm0 # sched: [8:2.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_phaddsw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: phaddsw %mm1, %mm0 # sched: [3:2.00]
; SKX-NEXT: phaddsw (%rdi), %mm0 # sched: [8:2.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_phaddsw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: phaddsw %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: phaddsw (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_phaddsw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: phaddsw %mm1, %mm0 # sched: [100:?]
; ZNVER1-NEXT: phaddsw (%rdi), %mm0 # sched: [100:?]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -3218,70 +3218,70 @@ declare x86_mmx @llvm.x86.ssse3.phadd.sw(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_phaddw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_phaddw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: phaddw %mm1, %mm0 # sched: [3:1.50]
; GENERIC-NEXT: phaddw (%rdi), %mm0 # sched: [8:1.50]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_phaddw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: phaddw %mm1, %mm0 # sched: [5:2.50]
; ATOM-NEXT: phaddw (%rdi), %mm0 # sched: [6:3.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_phaddw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: phaddw %mm1, %mm0 # sched: [1:0.50]
; SLM-NEXT: phaddw (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_phaddw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: phaddw %mm1, %mm0 # sched: [3:1.50]
; SANDY-NEXT: phaddw (%rdi), %mm0 # sched: [8:1.50]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_phaddw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: phaddw %mm1, %mm0 # sched: [3:2.00]
; HASWELL-NEXT: phaddw (%rdi), %mm0 # sched: [3:2.00]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_phaddw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: phaddw %mm1, %mm0 # sched: [3:2.00]
; BROADWELL-NEXT: phaddw (%rdi), %mm0 # sched: [8:2.00]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_phaddw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: phaddw %mm1, %mm0 # sched: [3:2.00]
; SKYLAKE-NEXT: phaddw (%rdi), %mm0 # sched: [8:2.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_phaddw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: phaddw %mm1, %mm0 # sched: [3:2.00]
; SKX-NEXT: phaddw (%rdi), %mm0 # sched: [8:2.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_phaddw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: phaddw %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: phaddw (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_phaddw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: phaddw %mm1, %mm0 # sched: [100:?]
; ZNVER1-NEXT: phaddw (%rdi), %mm0 # sched: [100:?]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -3296,70 +3296,70 @@ declare x86_mmx @llvm.x86.ssse3.phadd.w(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_phsubd(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_phsubd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: phsubd %mm1, %mm0 # sched: [3:1.50]
; GENERIC-NEXT: phsubd (%rdi), %mm0 # sched: [8:1.50]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_phsubd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: phsubd %mm1, %mm0 # sched: [3:1.50]
; ATOM-NEXT: phsubd (%rdi), %mm0 # sched: [4:2.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_phsubd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: phsubd %mm1, %mm0 # sched: [1:0.50]
; SLM-NEXT: phsubd (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_phsubd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: phsubd %mm1, %mm0 # sched: [3:1.50]
; SANDY-NEXT: phsubd (%rdi), %mm0 # sched: [8:1.50]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_phsubd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: phsubd %mm1, %mm0 # sched: [3:2.00]
; HASWELL-NEXT: phsubd (%rdi), %mm0 # sched: [3:2.00]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_phsubd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: phsubd %mm1, %mm0 # sched: [3:2.00]
; BROADWELL-NEXT: phsubd (%rdi), %mm0 # sched: [8:2.00]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_phsubd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: phsubd %mm1, %mm0 # sched: [3:2.00]
; SKYLAKE-NEXT: phsubd (%rdi), %mm0 # sched: [8:2.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_phsubd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: phsubd %mm1, %mm0 # sched: [3:2.00]
; SKX-NEXT: phsubd (%rdi), %mm0 # sched: [8:2.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_phsubd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: phsubd %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: phsubd (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_phsubd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: phsubd %mm1, %mm0 # sched: [100:?]
; ZNVER1-NEXT: phsubd (%rdi), %mm0 # sched: [100:?]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -3374,70 +3374,70 @@ declare x86_mmx @llvm.x86.ssse3.phsub.d(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_phsubsw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_phsubsw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: phsubsw %mm1, %mm0 # sched: [3:1.50]
; GENERIC-NEXT: phsubsw (%rdi), %mm0 # sched: [8:1.50]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_phsubsw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: phsubsw %mm1, %mm0 # sched: [5:2.50]
; ATOM-NEXT: phsubsw (%rdi), %mm0 # sched: [6:3.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_phsubsw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: phsubsw %mm1, %mm0 # sched: [1:0.50]
; SLM-NEXT: phsubsw (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_phsubsw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: phsubsw %mm1, %mm0 # sched: [3:1.50]
; SANDY-NEXT: phsubsw (%rdi), %mm0 # sched: [8:1.50]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_phsubsw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: phsubsw %mm1, %mm0 # sched: [3:2.00]
; HASWELL-NEXT: phsubsw (%rdi), %mm0 # sched: [3:2.00]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_phsubsw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: phsubsw %mm1, %mm0 # sched: [3:2.00]
; BROADWELL-NEXT: phsubsw (%rdi), %mm0 # sched: [8:2.00]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_phsubsw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: phsubsw %mm1, %mm0 # sched: [3:2.00]
; SKYLAKE-NEXT: phsubsw (%rdi), %mm0 # sched: [8:2.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_phsubsw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: phsubsw %mm1, %mm0 # sched: [3:2.00]
; SKX-NEXT: phsubsw (%rdi), %mm0 # sched: [8:2.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_phsubsw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: phsubsw %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: phsubsw (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_phsubsw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: phsubsw %mm1, %mm0 # sched: [100:?]
; ZNVER1-NEXT: phsubsw (%rdi), %mm0 # sched: [8:0.50]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -3452,70 +3452,70 @@ declare x86_mmx @llvm.x86.ssse3.phsub.sw(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_phsubw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_phsubw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: phsubw %mm1, %mm0 # sched: [3:1.50]
; GENERIC-NEXT: phsubw (%rdi), %mm0 # sched: [8:1.50]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_phsubw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: phsubw %mm1, %mm0 # sched: [5:2.50]
; ATOM-NEXT: phsubw (%rdi), %mm0 # sched: [6:3.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_phsubw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: phsubw %mm1, %mm0 # sched: [1:0.50]
; SLM-NEXT: phsubw (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_phsubw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: phsubw %mm1, %mm0 # sched: [3:1.50]
; SANDY-NEXT: phsubw (%rdi), %mm0 # sched: [8:1.50]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_phsubw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: phsubw %mm1, %mm0 # sched: [3:2.00]
; HASWELL-NEXT: phsubw (%rdi), %mm0 # sched: [3:2.00]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_phsubw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: phsubw %mm1, %mm0 # sched: [3:2.00]
; BROADWELL-NEXT: phsubw (%rdi), %mm0 # sched: [8:2.00]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_phsubw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: phsubw %mm1, %mm0 # sched: [3:2.00]
; SKYLAKE-NEXT: phsubw (%rdi), %mm0 # sched: [8:2.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_phsubw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: phsubw %mm1, %mm0 # sched: [3:2.00]
; SKX-NEXT: phsubw (%rdi), %mm0 # sched: [8:2.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_phsubw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: phsubw %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: phsubw (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_phsubw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: phsubw %mm1, %mm0 # sched: [100:?]
; ZNVER1-NEXT: phsubw (%rdi), %mm0 # sched: [100:?]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -3530,7 +3530,7 @@ declare x86_mmx @llvm.x86.ssse3.phsub.w(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_pinsrw(x86_mmx %a0, i32 %a1, i16* %a2) optsize {
; GENERIC-LABEL: test_pinsrw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pinsrw $0, %edi, %mm0 # sched: [1:1.00]
; GENERIC-NEXT: movswl (%rsi), %eax # sched: [5:0.50]
; GENERIC-NEXT: pinsrw $1, %eax, %mm0 # sched: [1:1.00]
@@ -3538,7 +3538,7 @@ define i64 @test_pinsrw(x86_mmx %a0, i32 %a1, i16* %a2) optsize {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pinsrw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movswl (%rsi), %eax # sched: [1:1.00]
; ATOM-NEXT: pinsrw $0, %edi, %mm0 # sched: [1:1.00]
; ATOM-NEXT: pinsrw $1, %eax, %mm0 # sched: [1:1.00]
@@ -3546,7 +3546,7 @@ define i64 @test_pinsrw(x86_mmx %a0, i32 %a1, i16* %a2) optsize {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pinsrw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movswl (%rsi), %eax # sched: [4:1.00]
; SLM-NEXT: pinsrw $0, %edi, %mm0 # sched: [1:1.00]
; SLM-NEXT: pinsrw $1, %eax, %mm0 # sched: [1:1.00]
@@ -3554,7 +3554,7 @@ define i64 @test_pinsrw(x86_mmx %a0, i32 %a1, i16* %a2) optsize {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pinsrw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: pinsrw $0, %edi, %mm0 # sched: [1:1.00]
; SANDY-NEXT: movswl (%rsi), %eax # sched: [5:0.50]
; SANDY-NEXT: pinsrw $1, %eax, %mm0 # sched: [1:1.00]
@@ -3562,7 +3562,7 @@ define i64 @test_pinsrw(x86_mmx %a0, i32 %a1, i16* %a2) optsize {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pinsrw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: pinsrw $0, %edi, %mm0 # sched: [2:2.00]
; HASWELL-NEXT: movswl (%rsi), %eax # sched: [4:0.50]
; HASWELL-NEXT: pinsrw $1, %eax, %mm0 # sched: [2:2.00]
@@ -3570,7 +3570,7 @@ define i64 @test_pinsrw(x86_mmx %a0, i32 %a1, i16* %a2) optsize {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pinsrw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: pinsrw $0, %edi, %mm0 # sched: [2:2.00]
; BROADWELL-NEXT: movswl (%rsi), %eax # sched: [5:0.50]
; BROADWELL-NEXT: pinsrw $1, %eax, %mm0 # sched: [2:2.00]
@@ -3578,7 +3578,7 @@ define i64 @test_pinsrw(x86_mmx %a0, i32 %a1, i16* %a2) optsize {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pinsrw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: pinsrw $0, %edi, %mm0 # sched: [2:2.00]
; SKYLAKE-NEXT: movswl (%rsi), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: pinsrw $1, %eax, %mm0 # sched: [2:2.00]
@@ -3586,7 +3586,7 @@ define i64 @test_pinsrw(x86_mmx %a0, i32 %a1, i16* %a2) optsize {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pinsrw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: pinsrw $0, %edi, %mm0 # sched: [2:2.00]
; SKX-NEXT: movswl (%rsi), %eax # sched: [5:0.50]
; SKX-NEXT: pinsrw $1, %eax, %mm0 # sched: [2:2.00]
@@ -3594,7 +3594,7 @@ define i64 @test_pinsrw(x86_mmx %a0, i32 %a1, i16* %a2) optsize {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pinsrw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: movswl (%rsi), %eax # sched: [4:1.00]
; BTVER2-NEXT: pinsrw $0, %edi, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: pinsrw $1, %eax, %mm0 # sched: [1:0.50]
@@ -3602,7 +3602,7 @@ define i64 @test_pinsrw(x86_mmx %a0, i32 %a1, i16* %a2) optsize {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pinsrw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movswl (%rsi), %eax # sched: [8:0.50]
; ZNVER1-NEXT: pinsrw $0, %edi, %mm0 # sched: [1:0.25]
; ZNVER1-NEXT: pinsrw $1, %eax, %mm0 # sched: [1:0.25]
@@ -3619,70 +3619,70 @@ declare x86_mmx @llvm.x86.mmx.pinsr.w(x86_mmx, i32, i32) nounwind readnone
define i64 @test_pmaddwd(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_pmaddwd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pmaddwd %mm1, %mm0 # sched: [5:1.00]
; GENERIC-NEXT: pmaddwd (%rdi), %mm0 # sched: [9:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pmaddwd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pmaddwd %mm1, %mm0 # sched: [4:4.00]
; ATOM-NEXT: pmaddwd (%rdi), %mm0 # sched: [4:4.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pmaddwd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pmaddwd %mm1, %mm0 # sched: [4:1.00]
; SLM-NEXT: pmaddwd (%rdi), %mm0 # sched: [7:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pmaddwd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: pmaddwd %mm1, %mm0 # sched: [5:1.00]
; SANDY-NEXT: pmaddwd (%rdi), %mm0 # sched: [9:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmaddwd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: pmaddwd %mm1, %mm0 # sched: [5:1.00]
; HASWELL-NEXT: pmaddwd (%rdi), %mm0 # sched: [5:1.00]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmaddwd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: pmaddwd %mm1, %mm0 # sched: [5:1.00]
; BROADWELL-NEXT: pmaddwd (%rdi), %mm0 # sched: [10:1.00]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmaddwd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: pmaddwd %mm1, %mm0 # sched: [4:1.00]
; SKYLAKE-NEXT: pmaddwd (%rdi), %mm0 # sched: [9:1.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmaddwd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: pmaddwd %mm1, %mm0 # sched: [4:1.00]
; SKX-NEXT: pmaddwd (%rdi), %mm0 # sched: [9:1.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pmaddwd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: pmaddwd %mm1, %mm0 # sched: [2:1.00]
; BTVER2-NEXT: pmaddwd (%rdi), %mm0 # sched: [7:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pmaddwd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: pmaddwd %mm1, %mm0 # sched: [4:1.00]
; ZNVER1-NEXT: pmaddwd (%rdi), %mm0 # sched: [11:1.00]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -3697,70 +3697,70 @@ declare x86_mmx @llvm.x86.mmx.pmadd.wd(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_pmaddubsw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_pmaddubsw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pmaddubsw %mm1, %mm0 # sched: [3:1.00]
; GENERIC-NEXT: pmaddubsw (%rdi), %mm0 # sched: [8:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pmaddubsw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pmaddubsw %mm1, %mm0 # sched: [4:4.00]
; ATOM-NEXT: pmaddubsw (%rdi), %mm0 # sched: [4:4.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pmaddubsw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pmaddubsw %mm1, %mm0 # sched: [4:1.00]
; SLM-NEXT: pmaddubsw (%rdi), %mm0 # sched: [7:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pmaddubsw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: pmaddubsw %mm1, %mm0 # sched: [3:1.00]
; SANDY-NEXT: pmaddubsw (%rdi), %mm0 # sched: [8:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmaddubsw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: pmaddubsw %mm1, %mm0 # sched: [5:1.00]
; HASWELL-NEXT: pmaddubsw (%rdi), %mm0 # sched: [5:1.00]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmaddubsw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: pmaddubsw %mm1, %mm0 # sched: [5:1.00]
; BROADWELL-NEXT: pmaddubsw (%rdi), %mm0 # sched: [10:1.00]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmaddubsw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: pmaddubsw %mm1, %mm0 # sched: [4:1.00]
; SKYLAKE-NEXT: pmaddubsw (%rdi), %mm0 # sched: [9:1.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmaddubsw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: pmaddubsw %mm1, %mm0 # sched: [4:1.00]
; SKX-NEXT: pmaddubsw (%rdi), %mm0 # sched: [9:1.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pmaddubsw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: pmaddubsw %mm1, %mm0 # sched: [2:1.00]
; BTVER2-NEXT: pmaddubsw (%rdi), %mm0 # sched: [7:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pmaddubsw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: pmaddubsw %mm1, %mm0 # sched: [4:1.00]
; ZNVER1-NEXT: pmaddubsw (%rdi), %mm0 # sched: [11:1.00]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -3775,70 +3775,70 @@ declare x86_mmx @llvm.x86.ssse3.pmadd.ub.sw(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_pmaxsw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_pmaxsw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pmaxsw %mm1, %mm0 # sched: [5:1.00]
; GENERIC-NEXT: pmaxsw (%rdi), %mm0 # sched: [9:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pmaxsw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pmaxsw %mm1, %mm0 # sched: [1:1.00]
; ATOM-NEXT: pmaxsw (%rdi), %mm0 # sched: [1:0.50]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pmaxsw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pmaxsw %mm1, %mm0 # sched: [4:1.00]
; SLM-NEXT: pmaxsw (%rdi), %mm0 # sched: [7:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pmaxsw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: pmaxsw %mm1, %mm0 # sched: [5:1.00]
; SANDY-NEXT: pmaxsw (%rdi), %mm0 # sched: [9:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmaxsw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: pmaxsw %mm1, %mm0 # sched: [1:0.50]
; HASWELL-NEXT: pmaxsw (%rdi), %mm0 # sched: [1:0.50]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmaxsw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: pmaxsw %mm1, %mm0 # sched: [1:0.50]
; BROADWELL-NEXT: pmaxsw (%rdi), %mm0 # sched: [6:0.50]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmaxsw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: pmaxsw %mm1, %mm0 # sched: [1:1.00]
; SKYLAKE-NEXT: pmaxsw (%rdi), %mm0 # sched: [6:1.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmaxsw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: pmaxsw %mm1, %mm0 # sched: [1:1.00]
; SKX-NEXT: pmaxsw (%rdi), %mm0 # sched: [6:1.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pmaxsw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: pmaxsw %mm1, %mm0 # sched: [2:1.00]
; BTVER2-NEXT: pmaxsw (%rdi), %mm0 # sched: [7:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pmaxsw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: pmaxsw %mm1, %mm0 # sched: [4:1.00]
; ZNVER1-NEXT: pmaxsw (%rdi), %mm0 # sched: [11:1.00]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -3853,70 +3853,70 @@ declare x86_mmx @llvm.x86.mmx.pmaxs.w(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_pmaxub(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_pmaxub:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pmaxub %mm1, %mm0 # sched: [5:1.00]
; GENERIC-NEXT: pmaxub (%rdi), %mm0 # sched: [9:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pmaxub:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pmaxub %mm1, %mm0 # sched: [1:1.00]
; ATOM-NEXT: pmaxub (%rdi), %mm0 # sched: [1:0.50]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pmaxub:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pmaxub %mm1, %mm0 # sched: [4:1.00]
; SLM-NEXT: pmaxub (%rdi), %mm0 # sched: [7:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pmaxub:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: pmaxub %mm1, %mm0 # sched: [5:1.00]
; SANDY-NEXT: pmaxub (%rdi), %mm0 # sched: [9:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmaxub:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: pmaxub %mm1, %mm0 # sched: [1:0.50]
; HASWELL-NEXT: pmaxub (%rdi), %mm0 # sched: [1:0.50]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmaxub:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: pmaxub %mm1, %mm0 # sched: [1:0.50]
; BROADWELL-NEXT: pmaxub (%rdi), %mm0 # sched: [6:0.50]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmaxub:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: pmaxub %mm1, %mm0 # sched: [1:1.00]
; SKYLAKE-NEXT: pmaxub (%rdi), %mm0 # sched: [6:1.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmaxub:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: pmaxub %mm1, %mm0 # sched: [1:1.00]
; SKX-NEXT: pmaxub (%rdi), %mm0 # sched: [6:1.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pmaxub:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: pmaxub %mm1, %mm0 # sched: [2:1.00]
; BTVER2-NEXT: pmaxub (%rdi), %mm0 # sched: [7:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pmaxub:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: pmaxub %mm1, %mm0 # sched: [4:1.00]
; ZNVER1-NEXT: pmaxub (%rdi), %mm0 # sched: [11:1.00]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -3931,70 +3931,70 @@ declare x86_mmx @llvm.x86.mmx.pmaxu.b(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_pminsw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_pminsw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pminsw %mm1, %mm0 # sched: [5:1.00]
; GENERIC-NEXT: pminsw (%rdi), %mm0 # sched: [9:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pminsw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pminsw %mm1, %mm0 # sched: [1:1.00]
; ATOM-NEXT: pminsw (%rdi), %mm0 # sched: [1:0.50]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pminsw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pminsw %mm1, %mm0 # sched: [4:1.00]
; SLM-NEXT: pminsw (%rdi), %mm0 # sched: [7:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pminsw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: pminsw %mm1, %mm0 # sched: [5:1.00]
; SANDY-NEXT: pminsw (%rdi), %mm0 # sched: [9:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pminsw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: pminsw %mm1, %mm0 # sched: [1:0.50]
; HASWELL-NEXT: pminsw (%rdi), %mm0 # sched: [1:0.50]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pminsw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: pminsw %mm1, %mm0 # sched: [1:0.50]
; BROADWELL-NEXT: pminsw (%rdi), %mm0 # sched: [6:0.50]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pminsw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: pminsw %mm1, %mm0 # sched: [1:1.00]
; SKYLAKE-NEXT: pminsw (%rdi), %mm0 # sched: [6:1.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pminsw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: pminsw %mm1, %mm0 # sched: [1:1.00]
; SKX-NEXT: pminsw (%rdi), %mm0 # sched: [6:1.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pminsw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: pminsw %mm1, %mm0 # sched: [2:1.00]
; BTVER2-NEXT: pminsw (%rdi), %mm0 # sched: [7:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pminsw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: pminsw %mm1, %mm0 # sched: [4:1.00]
; ZNVER1-NEXT: pminsw (%rdi), %mm0 # sched: [11:1.00]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -4009,70 +4009,70 @@ declare x86_mmx @llvm.x86.mmx.pmins.w(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_pminub(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_pminub:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pminub %mm1, %mm0 # sched: [5:1.00]
; GENERIC-NEXT: pminub (%rdi), %mm0 # sched: [9:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pminub:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pminub %mm1, %mm0 # sched: [1:1.00]
; ATOM-NEXT: pminub (%rdi), %mm0 # sched: [1:0.50]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pminub:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pminub %mm1, %mm0 # sched: [4:1.00]
; SLM-NEXT: pminub (%rdi), %mm0 # sched: [7:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pminub:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: pminub %mm1, %mm0 # sched: [5:1.00]
; SANDY-NEXT: pminub (%rdi), %mm0 # sched: [9:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pminub:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: pminub %mm1, %mm0 # sched: [1:0.50]
; HASWELL-NEXT: pminub (%rdi), %mm0 # sched: [1:0.50]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pminub:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: pminub %mm1, %mm0 # sched: [1:0.50]
; BROADWELL-NEXT: pminub (%rdi), %mm0 # sched: [6:0.50]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pminub:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: pminub %mm1, %mm0 # sched: [1:1.00]
; SKYLAKE-NEXT: pminub (%rdi), %mm0 # sched: [6:1.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pminub:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: pminub %mm1, %mm0 # sched: [1:1.00]
; SKX-NEXT: pminub (%rdi), %mm0 # sched: [6:1.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pminub:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: pminub %mm1, %mm0 # sched: [2:1.00]
; BTVER2-NEXT: pminub (%rdi), %mm0 # sched: [7:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pminub:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: pminub %mm1, %mm0 # sched: [4:1.00]
; ZNVER1-NEXT: pminub (%rdi), %mm0 # sched: [11:1.00]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -4087,52 +4087,52 @@ declare x86_mmx @llvm.x86.mmx.pminu.b(x86_mmx, x86_mmx) nounwind readnone
define i32 @test_pmovmskb(x86_mmx %a0) optsize {
; GENERIC-LABEL: test_pmovmskb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pmovmskb %mm0, %eax # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pmovmskb:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pmovmskb %mm0, %eax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pmovmskb:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pmovmskb %mm0, %eax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pmovmskb:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: pmovmskb %mm0, %eax # sched: [1:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovmskb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: pmovmskb %mm0, %eax # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmovmskb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: pmovmskb %mm0, %eax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmovmskb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: pmovmskb %mm0, %eax # sched: [2:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmovmskb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: pmovmskb %mm0, %eax # sched: [2:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pmovmskb:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: pmovmskb %mm0, %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pmovmskb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: pmovmskb %mm0, %eax # sched: [1:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call i32 @llvm.x86.mmx.pmovmskb(x86_mmx %a0)
@@ -4142,70 +4142,70 @@ declare i32 @llvm.x86.mmx.pmovmskb(x86_mmx) nounwind readnone
define i64 @test_pmulhrsw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_pmulhrsw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pmulhrsw %mm1, %mm0 # sched: [3:1.00]
; GENERIC-NEXT: pmulhrsw (%rdi), %mm0 # sched: [8:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pmulhrsw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pmulhrsw %mm1, %mm0 # sched: [4:4.00]
; ATOM-NEXT: pmulhrsw (%rdi), %mm0 # sched: [4:4.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pmulhrsw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pmulhrsw %mm1, %mm0 # sched: [4:1.00]
; SLM-NEXT: pmulhrsw (%rdi), %mm0 # sched: [7:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pmulhrsw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: pmulhrsw %mm1, %mm0 # sched: [3:1.00]
; SANDY-NEXT: pmulhrsw (%rdi), %mm0 # sched: [8:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmulhrsw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: pmulhrsw %mm1, %mm0 # sched: [5:1.00]
; HASWELL-NEXT: pmulhrsw (%rdi), %mm0 # sched: [5:1.00]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmulhrsw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: pmulhrsw %mm1, %mm0 # sched: [5:1.00]
; BROADWELL-NEXT: pmulhrsw (%rdi), %mm0 # sched: [10:1.00]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmulhrsw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: pmulhrsw %mm1, %mm0 # sched: [4:1.00]
; SKYLAKE-NEXT: pmulhrsw (%rdi), %mm0 # sched: [9:1.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmulhrsw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: pmulhrsw %mm1, %mm0 # sched: [4:1.00]
; SKX-NEXT: pmulhrsw (%rdi), %mm0 # sched: [9:1.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pmulhrsw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: pmulhrsw %mm1, %mm0 # sched: [2:1.00]
; BTVER2-NEXT: pmulhrsw (%rdi), %mm0 # sched: [7:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pmulhrsw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: pmulhrsw %mm1, %mm0 # sched: [4:1.00]
; ZNVER1-NEXT: pmulhrsw (%rdi), %mm0 # sched: [11:1.00]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -4220,70 +4220,70 @@ declare x86_mmx @llvm.x86.ssse3.pmul.hr.sw(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_pmulhw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_pmulhw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pmulhw %mm1, %mm0 # sched: [5:1.00]
; GENERIC-NEXT: pmulhw (%rdi), %mm0 # sched: [9:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pmulhw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pmulhw %mm1, %mm0 # sched: [4:4.00]
; ATOM-NEXT: pmulhw (%rdi), %mm0 # sched: [4:4.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pmulhw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pmulhw %mm1, %mm0 # sched: [4:1.00]
; SLM-NEXT: pmulhw (%rdi), %mm0 # sched: [7:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pmulhw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: pmulhw %mm1, %mm0 # sched: [5:1.00]
; SANDY-NEXT: pmulhw (%rdi), %mm0 # sched: [9:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmulhw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: pmulhw %mm1, %mm0 # sched: [5:1.00]
; HASWELL-NEXT: pmulhw (%rdi), %mm0 # sched: [5:1.00]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmulhw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: pmulhw %mm1, %mm0 # sched: [5:1.00]
; BROADWELL-NEXT: pmulhw (%rdi), %mm0 # sched: [10:1.00]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmulhw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: pmulhw %mm1, %mm0 # sched: [4:1.00]
; SKYLAKE-NEXT: pmulhw (%rdi), %mm0 # sched: [9:1.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmulhw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: pmulhw %mm1, %mm0 # sched: [4:1.00]
; SKX-NEXT: pmulhw (%rdi), %mm0 # sched: [9:1.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pmulhw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: pmulhw %mm1, %mm0 # sched: [2:1.00]
; BTVER2-NEXT: pmulhw (%rdi), %mm0 # sched: [7:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pmulhw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: pmulhw %mm1, %mm0 # sched: [4:1.00]
; ZNVER1-NEXT: pmulhw (%rdi), %mm0 # sched: [11:1.00]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -4298,70 +4298,70 @@ declare x86_mmx @llvm.x86.mmx.pmulh.w(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_pmulhuw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_pmulhuw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pmulhuw %mm1, %mm0 # sched: [5:1.00]
; GENERIC-NEXT: pmulhuw (%rdi), %mm0 # sched: [9:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pmulhuw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pmulhuw %mm1, %mm0 # sched: [4:4.00]
; ATOM-NEXT: pmulhuw (%rdi), %mm0 # sched: [4:4.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pmulhuw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pmulhuw %mm1, %mm0 # sched: [4:1.00]
; SLM-NEXT: pmulhuw (%rdi), %mm0 # sched: [7:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pmulhuw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: pmulhuw %mm1, %mm0 # sched: [5:1.00]
; SANDY-NEXT: pmulhuw (%rdi), %mm0 # sched: [9:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmulhuw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: pmulhuw %mm1, %mm0 # sched: [5:1.00]
; HASWELL-NEXT: pmulhuw (%rdi), %mm0 # sched: [5:1.00]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmulhuw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: pmulhuw %mm1, %mm0 # sched: [5:1.00]
; BROADWELL-NEXT: pmulhuw (%rdi), %mm0 # sched: [10:1.00]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmulhuw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: pmulhuw %mm1, %mm0 # sched: [4:1.00]
; SKYLAKE-NEXT: pmulhuw (%rdi), %mm0 # sched: [9:1.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmulhuw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: pmulhuw %mm1, %mm0 # sched: [4:1.00]
; SKX-NEXT: pmulhuw (%rdi), %mm0 # sched: [9:1.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pmulhuw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: pmulhuw %mm1, %mm0 # sched: [2:1.00]
; BTVER2-NEXT: pmulhuw (%rdi), %mm0 # sched: [7:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pmulhuw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: pmulhuw %mm1, %mm0 # sched: [4:1.00]
; ZNVER1-NEXT: pmulhuw (%rdi), %mm0 # sched: [11:1.00]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -4376,70 +4376,70 @@ declare x86_mmx @llvm.x86.mmx.pmulhu.w(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_pmullw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_pmullw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pmullw %mm1, %mm0 # sched: [5:1.00]
; GENERIC-NEXT: pmullw (%rdi), %mm0 # sched: [9:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pmullw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pmullw %mm1, %mm0 # sched: [4:4.00]
; ATOM-NEXT: pmullw (%rdi), %mm0 # sched: [4:4.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pmullw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pmullw %mm1, %mm0 # sched: [4:1.00]
; SLM-NEXT: pmullw (%rdi), %mm0 # sched: [7:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pmullw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: pmullw %mm1, %mm0 # sched: [5:1.00]
; SANDY-NEXT: pmullw (%rdi), %mm0 # sched: [9:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmullw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: pmullw %mm1, %mm0 # sched: [5:1.00]
; HASWELL-NEXT: pmullw (%rdi), %mm0 # sched: [5:1.00]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmullw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: pmullw %mm1, %mm0 # sched: [5:1.00]
; BROADWELL-NEXT: pmullw (%rdi), %mm0 # sched: [10:1.00]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmullw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: pmullw %mm1, %mm0 # sched: [4:1.00]
; SKYLAKE-NEXT: pmullw (%rdi), %mm0 # sched: [9:1.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmullw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: pmullw %mm1, %mm0 # sched: [4:1.00]
; SKX-NEXT: pmullw (%rdi), %mm0 # sched: [9:1.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pmullw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: pmullw %mm1, %mm0 # sched: [2:1.00]
; BTVER2-NEXT: pmullw (%rdi), %mm0 # sched: [7:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pmullw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: pmullw %mm1, %mm0 # sched: [4:1.00]
; ZNVER1-NEXT: pmullw (%rdi), %mm0 # sched: [11:1.00]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -4454,70 +4454,70 @@ declare x86_mmx @llvm.x86.mmx.pmull.w(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_pmuludq(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_pmuludq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pmuludq %mm1, %mm0 # sched: [3:1.00]
; GENERIC-NEXT: pmuludq (%rdi), %mm0 # sched: [9:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pmuludq:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pmuludq %mm1, %mm0 # sched: [4:4.00]
; ATOM-NEXT: pmuludq (%rdi), %mm0 # sched: [4:4.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pmuludq:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pmuludq %mm1, %mm0 # sched: [4:1.00]
; SLM-NEXT: pmuludq (%rdi), %mm0 # sched: [7:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pmuludq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: pmuludq %mm1, %mm0 # sched: [3:1.00]
; SANDY-NEXT: pmuludq (%rdi), %mm0 # sched: [9:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmuludq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: pmuludq %mm1, %mm0 # sched: [5:1.00]
; HASWELL-NEXT: pmuludq (%rdi), %mm0 # sched: [5:1.00]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmuludq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: pmuludq %mm1, %mm0 # sched: [5:1.00]
; BROADWELL-NEXT: pmuludq (%rdi), %mm0 # sched: [10:1.00]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmuludq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: pmuludq %mm1, %mm0 # sched: [4:1.00]
; SKYLAKE-NEXT: pmuludq (%rdi), %mm0 # sched: [9:1.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmuludq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: pmuludq %mm1, %mm0 # sched: [4:1.00]
; SKX-NEXT: pmuludq (%rdi), %mm0 # sched: [9:1.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pmuludq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: pmuludq %mm1, %mm0 # sched: [2:1.00]
; BTVER2-NEXT: pmuludq (%rdi), %mm0 # sched: [7:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pmuludq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: pmuludq %mm1, %mm0 # sched: [4:1.00]
; ZNVER1-NEXT: pmuludq (%rdi), %mm0 # sched: [11:1.00]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -4532,70 +4532,70 @@ declare x86_mmx @llvm.x86.mmx.pmulu.dq(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_por(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_por:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: por %mm1, %mm0 # sched: [1:1.00]
; GENERIC-NEXT: por (%rdi), %mm0 # sched: [5:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_por:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: por %mm1, %mm0 # sched: [1:0.50]
; ATOM-NEXT: por (%rdi), %mm0 # sched: [1:1.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_por:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: por %mm1, %mm0 # sched: [1:0.50]
; SLM-NEXT: por (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_por:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: por %mm1, %mm0 # sched: [1:1.00]
; SANDY-NEXT: por (%rdi), %mm0 # sched: [5:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_por:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: por %mm1, %mm0 # sched: [1:0.33]
; HASWELL-NEXT: por (%rdi), %mm0 # sched: [1:0.50]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_por:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: por %mm1, %mm0 # sched: [1:0.33]
; BROADWELL-NEXT: por (%rdi), %mm0 # sched: [6:0.50]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_por:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: por %mm1, %mm0 # sched: [1:0.50]
; SKYLAKE-NEXT: por (%rdi), %mm0 # sched: [6:0.50]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_por:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: por %mm1, %mm0 # sched: [1:0.50]
; SKX-NEXT: por (%rdi), %mm0 # sched: [6:0.50]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_por:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: por %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: por (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_por:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: por %mm1, %mm0 # sched: [1:0.25]
; ZNVER1-NEXT: por (%rdi), %mm0 # sched: [8:0.50]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -4610,70 +4610,70 @@ declare x86_mmx @llvm.x86.mmx.por(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_psadbw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_psadbw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: psadbw %mm1, %mm0 # sched: [5:1.00]
; GENERIC-NEXT: psadbw (%rdi), %mm0 # sched: [9:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_psadbw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: psadbw %mm1, %mm0 # sched: [4:2.00]
; ATOM-NEXT: psadbw (%rdi), %mm0 # sched: [4:2.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_psadbw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: psadbw %mm1, %mm0 # sched: [4:1.00]
; SLM-NEXT: psadbw (%rdi), %mm0 # sched: [7:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_psadbw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: psadbw %mm1, %mm0 # sched: [5:1.00]
; SANDY-NEXT: psadbw (%rdi), %mm0 # sched: [9:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psadbw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: psadbw %mm1, %mm0 # sched: [5:1.00]
; HASWELL-NEXT: psadbw (%rdi), %mm0 # sched: [5:1.00]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psadbw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: psadbw %mm1, %mm0 # sched: [5:1.00]
; BROADWELL-NEXT: psadbw (%rdi), %mm0 # sched: [10:1.00]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psadbw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: psadbw %mm1, %mm0 # sched: [3:1.00]
; SKYLAKE-NEXT: psadbw (%rdi), %mm0 # sched: [8:1.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psadbw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: psadbw %mm1, %mm0 # sched: [3:1.00]
; SKX-NEXT: psadbw (%rdi), %mm0 # sched: [8:1.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_psadbw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: psadbw %mm1, %mm0 # sched: [2:1.00]
; BTVER2-NEXT: psadbw (%rdi), %mm0 # sched: [7:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_psadbw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: psadbw %mm1, %mm0 # sched: [4:1.00]
; ZNVER1-NEXT: psadbw (%rdi), %mm0 # sched: [11:1.00]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -4688,70 +4688,70 @@ declare x86_mmx @llvm.x86.mmx.psad.bw(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_pshufb(x86_mmx %a0, x86_mmx %a1, x86_mmx *%a2) optsize {
; GENERIC-LABEL: test_pshufb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pshufb %mm1, %mm0 # sched: [1:0.50]
; GENERIC-NEXT: pshufb (%rdi), %mm0 # sched: [6:0.50]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pshufb:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pshufb %mm1, %mm0 # sched: [1:1.00]
; ATOM-NEXT: pshufb (%rdi), %mm0 # sched: [1:1.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pshufb:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pshufb %mm1, %mm0 # sched: [1:1.00]
; SLM-NEXT: pshufb (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pshufb:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: pshufb %mm1, %mm0 # sched: [1:0.50]
; SANDY-NEXT: pshufb (%rdi), %mm0 # sched: [6:0.50]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pshufb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: pshufb %mm1, %mm0 # sched: [1:1.00]
; HASWELL-NEXT: pshufb (%rdi), %mm0 # sched: [1:1.00]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pshufb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: pshufb %mm1, %mm0 # sched: [1:1.00]
; BROADWELL-NEXT: pshufb (%rdi), %mm0 # sched: [6:1.00]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pshufb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: pshufb %mm1, %mm0 # sched: [1:1.00]
; SKYLAKE-NEXT: pshufb (%rdi), %mm0 # sched: [6:1.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pshufb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: pshufb %mm1, %mm0 # sched: [1:1.00]
; SKX-NEXT: pshufb (%rdi), %mm0 # sched: [6:1.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pshufb:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: pshufb %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: pshufb (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pshufb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: pshufb %mm1, %mm0 # sched: [1:0.25]
; ZNVER1-NEXT: pshufb (%rdi), %mm0 # sched: [8:0.50]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -4766,70 +4766,70 @@ declare x86_mmx @llvm.x86.ssse3.pshuf.b(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_pshufw(x86_mmx *%a0) optsize {
; GENERIC-LABEL: test_pshufw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pshufw $0, (%rdi), %mm0 # mm0 = mem[0,0,0,0] sched: [5:1.00]
; GENERIC-NEXT: pshufw $0, %mm0, %mm0 # mm0 = mm0[0,0,0,0] sched: [1:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pshufw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pshufw $0, (%rdi), %mm0 # mm0 = mem[0,0,0,0] sched: [1:1.00]
; ATOM-NEXT: pshufw $0, %mm0, %mm0 # mm0 = mm0[0,0,0,0] sched: [1:1.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pshufw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pshufw $0, (%rdi), %mm0 # mm0 = mem[0,0,0,0] sched: [4:1.00]
; SLM-NEXT: pshufw $0, %mm0, %mm0 # mm0 = mm0[0,0,0,0] sched: [1:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pshufw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: pshufw $0, (%rdi), %mm0 # mm0 = mem[0,0,0,0] sched: [5:1.00]
; SANDY-NEXT: pshufw $0, %mm0, %mm0 # mm0 = mm0[0,0,0,0] sched: [1:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pshufw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: pshufw $0, (%rdi), %mm0 # mm0 = mem[0,0,0,0] sched: [1:1.00]
; HASWELL-NEXT: pshufw $0, %mm0, %mm0 # mm0 = mm0[0,0,0,0] sched: [1:1.00]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pshufw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: pshufw $0, (%rdi), %mm0 # mm0 = mem[0,0,0,0] sched: [6:1.00]
; BROADWELL-NEXT: pshufw $0, %mm0, %mm0 # mm0 = mm0[0,0,0,0] sched: [1:1.00]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pshufw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: pshufw $0, (%rdi), %mm0 # mm0 = mem[0,0,0,0] sched: [6:1.00]
; SKYLAKE-NEXT: pshufw $0, %mm0, %mm0 # mm0 = mm0[0,0,0,0] sched: [1:1.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pshufw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: pshufw $0, (%rdi), %mm0 # mm0 = mem[0,0,0,0] sched: [6:1.00]
; SKX-NEXT: pshufw $0, %mm0, %mm0 # mm0 = mm0[0,0,0,0] sched: [1:1.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pshufw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: pshufw $0, (%rdi), %mm0 # mm0 = mem[0,0,0,0] sched: [6:1.00]
; BTVER2-NEXT: pshufw $0, %mm0, %mm0 # mm0 = mm0[0,0,0,0] sched: [1:0.50]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pshufw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: pshufw $0, (%rdi), %mm0 # mm0 = mem[0,0,0,0] sched: [8:0.50]
; ZNVER1-NEXT: pshufw $0, %mm0, %mm0 # mm0 = mm0[0,0,0,0] sched: [1:0.25]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -4844,70 +4844,70 @@ declare x86_mmx @llvm.x86.sse.pshuf.w(x86_mmx, i8) nounwind readnone
define i64 @test_psignb(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_psignb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: psignb %mm1, %mm0 # sched: [1:0.50]
; GENERIC-NEXT: psignb (%rdi), %mm0 # sched: [6:0.50]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_psignb:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: psignb %mm1, %mm0 # sched: [1:1.00]
; ATOM-NEXT: psignb (%rdi), %mm0 # sched: [1:0.50]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_psignb:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: psignb %mm1, %mm0 # sched: [4:1.00]
; SLM-NEXT: psignb (%rdi), %mm0 # sched: [7:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_psignb:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: psignb %mm1, %mm0 # sched: [1:0.50]
; SANDY-NEXT: psignb (%rdi), %mm0 # sched: [6:0.50]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psignb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: psignb %mm1, %mm0 # sched: [1:0.50]
; HASWELL-NEXT: psignb (%rdi), %mm0 # sched: [1:0.50]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psignb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: psignb %mm1, %mm0 # sched: [1:0.50]
; BROADWELL-NEXT: psignb (%rdi), %mm0 # sched: [6:0.50]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psignb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: psignb %mm1, %mm0 # sched: [1:0.50]
; SKYLAKE-NEXT: psignb (%rdi), %mm0 # sched: [6:0.50]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psignb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: psignb %mm1, %mm0 # sched: [1:0.50]
; SKX-NEXT: psignb (%rdi), %mm0 # sched: [6:0.50]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_psignb:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: psignb %mm1, %mm0 # sched: [2:1.00]
; BTVER2-NEXT: psignb (%rdi), %mm0 # sched: [7:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_psignb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: psignb %mm1, %mm0 # sched: [4:1.00]
; ZNVER1-NEXT: psignb (%rdi), %mm0 # sched: [11:1.00]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -4922,70 +4922,70 @@ declare x86_mmx @llvm.x86.ssse3.psign.b(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_psignd(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_psignd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: psignd %mm1, %mm0 # sched: [1:0.50]
; GENERIC-NEXT: psignd (%rdi), %mm0 # sched: [6:0.50]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_psignd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: psignd %mm1, %mm0 # sched: [1:1.00]
; ATOM-NEXT: psignd (%rdi), %mm0 # sched: [1:0.50]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_psignd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: psignd %mm1, %mm0 # sched: [4:1.00]
; SLM-NEXT: psignd (%rdi), %mm0 # sched: [7:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_psignd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: psignd %mm1, %mm0 # sched: [1:0.50]
; SANDY-NEXT: psignd (%rdi), %mm0 # sched: [6:0.50]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psignd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: psignd %mm1, %mm0 # sched: [1:0.50]
; HASWELL-NEXT: psignd (%rdi), %mm0 # sched: [1:0.50]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psignd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: psignd %mm1, %mm0 # sched: [1:0.50]
; BROADWELL-NEXT: psignd (%rdi), %mm0 # sched: [6:0.50]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psignd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: psignd %mm1, %mm0 # sched: [1:0.50]
; SKYLAKE-NEXT: psignd (%rdi), %mm0 # sched: [6:0.50]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psignd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: psignd %mm1, %mm0 # sched: [1:0.50]
; SKX-NEXT: psignd (%rdi), %mm0 # sched: [6:0.50]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_psignd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: psignd %mm1, %mm0 # sched: [2:1.00]
; BTVER2-NEXT: psignd (%rdi), %mm0 # sched: [7:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_psignd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: psignd %mm1, %mm0 # sched: [4:1.00]
; ZNVER1-NEXT: psignd (%rdi), %mm0 # sched: [11:1.00]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -5000,70 +5000,70 @@ declare x86_mmx @llvm.x86.ssse3.psign.d(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_psignw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_psignw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: psignw %mm1, %mm0 # sched: [1:0.50]
; GENERIC-NEXT: psignw (%rdi), %mm0 # sched: [6:0.50]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_psignw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: psignw %mm1, %mm0 # sched: [1:1.00]
; ATOM-NEXT: psignw (%rdi), %mm0 # sched: [1:0.50]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_psignw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: psignw %mm1, %mm0 # sched: [4:1.00]
; SLM-NEXT: psignw (%rdi), %mm0 # sched: [7:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_psignw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: psignw %mm1, %mm0 # sched: [1:0.50]
; SANDY-NEXT: psignw (%rdi), %mm0 # sched: [6:0.50]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psignw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: psignw %mm1, %mm0 # sched: [1:0.50]
; HASWELL-NEXT: psignw (%rdi), %mm0 # sched: [1:0.50]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psignw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: psignw %mm1, %mm0 # sched: [1:0.50]
; BROADWELL-NEXT: psignw (%rdi), %mm0 # sched: [6:0.50]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psignw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: psignw %mm1, %mm0 # sched: [1:0.50]
; SKYLAKE-NEXT: psignw (%rdi), %mm0 # sched: [6:0.50]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psignw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: psignw %mm1, %mm0 # sched: [1:0.50]
; SKX-NEXT: psignw (%rdi), %mm0 # sched: [6:0.50]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_psignw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: psignw %mm1, %mm0 # sched: [2:1.00]
; BTVER2-NEXT: psignw (%rdi), %mm0 # sched: [7:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_psignw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: psignw %mm1, %mm0 # sched: [4:1.00]
; ZNVER1-NEXT: psignw (%rdi), %mm0 # sched: [11:1.00]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -5078,7 +5078,7 @@ declare x86_mmx @llvm.x86.ssse3.psign.w(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_pslld(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_pslld:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pslld %mm1, %mm0 # sched: [1:1.00]
; GENERIC-NEXT: pslld (%rdi), %mm0 # sched: [5:1.00]
; GENERIC-NEXT: pslld $7, %mm0 # sched: [1:1.00]
@@ -5086,7 +5086,7 @@ define i64 @test_pslld(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pslld:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pslld %mm1, %mm0 # sched: [2:1.00]
; ATOM-NEXT: pslld (%rdi), %mm0 # sched: [3:1.50]
; ATOM-NEXT: pslld $7, %mm0 # sched: [1:0.50]
@@ -5094,7 +5094,7 @@ define i64 @test_pslld(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pslld:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pslld %mm1, %mm0 # sched: [1:1.00]
; SLM-NEXT: pslld (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: pslld $7, %mm0 # sched: [1:1.00]
@@ -5102,7 +5102,7 @@ define i64 @test_pslld(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pslld:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: pslld %mm1, %mm0 # sched: [1:1.00]
; SANDY-NEXT: pslld (%rdi), %mm0 # sched: [5:1.00]
; SANDY-NEXT: pslld $7, %mm0 # sched: [1:1.00]
@@ -5110,7 +5110,7 @@ define i64 @test_pslld(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pslld:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: pslld %mm1, %mm0 # sched: [1:1.00]
; HASWELL-NEXT: pslld (%rdi), %mm0 # sched: [1:1.00]
; HASWELL-NEXT: pslld $7, %mm0 # sched: [1:1.00]
@@ -5118,7 +5118,7 @@ define i64 @test_pslld(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pslld:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: pslld %mm1, %mm0 # sched: [1:1.00]
; BROADWELL-NEXT: pslld (%rdi), %mm0 # sched: [6:1.00]
; BROADWELL-NEXT: pslld $7, %mm0 # sched: [1:1.00]
@@ -5126,7 +5126,7 @@ define i64 @test_pslld(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pslld:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: pslld %mm1, %mm0 # sched: [1:1.00]
; SKYLAKE-NEXT: pslld (%rdi), %mm0 # sched: [6:1.00]
; SKYLAKE-NEXT: pslld $7, %mm0 # sched: [1:1.00]
@@ -5134,7 +5134,7 @@ define i64 @test_pslld(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pslld:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: pslld %mm1, %mm0 # sched: [1:1.00]
; SKX-NEXT: pslld (%rdi), %mm0 # sched: [6:1.00]
; SKX-NEXT: pslld $7, %mm0 # sched: [1:1.00]
@@ -5142,7 +5142,7 @@ define i64 @test_pslld(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pslld:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: pslld %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: pslld (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: pslld $7, %mm0 # sched: [1:0.50]
@@ -5150,7 +5150,7 @@ define i64 @test_pslld(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pslld:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: pslld %mm1, %mm0 # sched: [1:0.25]
; ZNVER1-NEXT: pslld (%rdi), %mm0 # sched: [8:0.50]
; ZNVER1-NEXT: pslld $7, %mm0 # sched: [1:0.25]
@@ -5168,7 +5168,7 @@ declare x86_mmx @llvm.x86.mmx.pslli.d(x86_mmx, i32) nounwind readnone
define i64 @test_psllq(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_psllq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: psllq %mm1, %mm0 # sched: [1:1.00]
; GENERIC-NEXT: psllq (%rdi), %mm0 # sched: [5:1.00]
; GENERIC-NEXT: psllq $7, %mm0 # sched: [1:1.00]
@@ -5176,7 +5176,7 @@ define i64 @test_psllq(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_psllq:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: psllq %mm1, %mm0 # sched: [2:1.00]
; ATOM-NEXT: psllq (%rdi), %mm0 # sched: [3:1.50]
; ATOM-NEXT: psllq $7, %mm0 # sched: [1:0.50]
@@ -5184,7 +5184,7 @@ define i64 @test_psllq(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_psllq:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: psllq %mm1, %mm0 # sched: [1:1.00]
; SLM-NEXT: psllq (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: psllq $7, %mm0 # sched: [1:1.00]
@@ -5192,7 +5192,7 @@ define i64 @test_psllq(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_psllq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: psllq %mm1, %mm0 # sched: [1:1.00]
; SANDY-NEXT: psllq (%rdi), %mm0 # sched: [5:1.00]
; SANDY-NEXT: psllq $7, %mm0 # sched: [1:1.00]
@@ -5200,7 +5200,7 @@ define i64 @test_psllq(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psllq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: psllq %mm1, %mm0 # sched: [1:1.00]
; HASWELL-NEXT: psllq (%rdi), %mm0 # sched: [1:1.00]
; HASWELL-NEXT: psllq $7, %mm0 # sched: [1:1.00]
@@ -5208,7 +5208,7 @@ define i64 @test_psllq(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psllq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: psllq %mm1, %mm0 # sched: [1:1.00]
; BROADWELL-NEXT: psllq (%rdi), %mm0 # sched: [6:1.00]
; BROADWELL-NEXT: psllq $7, %mm0 # sched: [1:1.00]
@@ -5216,7 +5216,7 @@ define i64 @test_psllq(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psllq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: psllq %mm1, %mm0 # sched: [1:1.00]
; SKYLAKE-NEXT: psllq (%rdi), %mm0 # sched: [6:1.00]
; SKYLAKE-NEXT: psllq $7, %mm0 # sched: [1:1.00]
@@ -5224,7 +5224,7 @@ define i64 @test_psllq(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psllq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: psllq %mm1, %mm0 # sched: [1:1.00]
; SKX-NEXT: psllq (%rdi), %mm0 # sched: [6:1.00]
; SKX-NEXT: psllq $7, %mm0 # sched: [1:1.00]
@@ -5232,7 +5232,7 @@ define i64 @test_psllq(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_psllq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: psllq %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: psllq (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: psllq $7, %mm0 # sched: [1:0.50]
@@ -5240,7 +5240,7 @@ define i64 @test_psllq(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_psllq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: psllq %mm1, %mm0 # sched: [1:0.25]
; ZNVER1-NEXT: psllq (%rdi), %mm0 # sched: [8:0.50]
; ZNVER1-NEXT: psllq $7, %mm0 # sched: [1:0.25]
@@ -5258,7 +5258,7 @@ declare x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx, i32) nounwind readnone
define i64 @test_psllw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_psllw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: psllw %mm1, %mm0 # sched: [1:1.00]
; GENERIC-NEXT: psllw (%rdi), %mm0 # sched: [5:1.00]
; GENERIC-NEXT: psllw $7, %mm0 # sched: [1:1.00]
@@ -5266,7 +5266,7 @@ define i64 @test_psllw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_psllw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: psllw %mm1, %mm0 # sched: [2:1.00]
; ATOM-NEXT: psllw (%rdi), %mm0 # sched: [3:1.50]
; ATOM-NEXT: psllw $7, %mm0 # sched: [1:0.50]
@@ -5274,7 +5274,7 @@ define i64 @test_psllw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_psllw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: psllw %mm1, %mm0 # sched: [1:1.00]
; SLM-NEXT: psllw (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: psllw $7, %mm0 # sched: [1:1.00]
@@ -5282,7 +5282,7 @@ define i64 @test_psllw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_psllw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: psllw %mm1, %mm0 # sched: [1:1.00]
; SANDY-NEXT: psllw (%rdi), %mm0 # sched: [5:1.00]
; SANDY-NEXT: psllw $7, %mm0 # sched: [1:1.00]
@@ -5290,7 +5290,7 @@ define i64 @test_psllw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psllw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: psllw %mm1, %mm0 # sched: [1:1.00]
; HASWELL-NEXT: psllw (%rdi), %mm0 # sched: [1:1.00]
; HASWELL-NEXT: psllw $7, %mm0 # sched: [1:1.00]
@@ -5298,7 +5298,7 @@ define i64 @test_psllw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psllw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: psllw %mm1, %mm0 # sched: [1:1.00]
; BROADWELL-NEXT: psllw (%rdi), %mm0 # sched: [6:1.00]
; BROADWELL-NEXT: psllw $7, %mm0 # sched: [1:1.00]
@@ -5306,7 +5306,7 @@ define i64 @test_psllw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psllw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: psllw %mm1, %mm0 # sched: [1:1.00]
; SKYLAKE-NEXT: psllw (%rdi), %mm0 # sched: [6:1.00]
; SKYLAKE-NEXT: psllw $7, %mm0 # sched: [1:1.00]
@@ -5314,7 +5314,7 @@ define i64 @test_psllw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psllw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: psllw %mm1, %mm0 # sched: [1:1.00]
; SKX-NEXT: psllw (%rdi), %mm0 # sched: [6:1.00]
; SKX-NEXT: psllw $7, %mm0 # sched: [1:1.00]
@@ -5322,7 +5322,7 @@ define i64 @test_psllw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_psllw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: psllw %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: psllw (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: psllw $7, %mm0 # sched: [1:0.50]
@@ -5330,7 +5330,7 @@ define i64 @test_psllw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_psllw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: psllw %mm1, %mm0 # sched: [1:0.25]
; ZNVER1-NEXT: psllw (%rdi), %mm0 # sched: [8:0.50]
; ZNVER1-NEXT: psllw $7, %mm0 # sched: [1:0.25]
@@ -5348,7 +5348,7 @@ declare x86_mmx @llvm.x86.mmx.pslli.w(x86_mmx, i32) nounwind readnone
define i64 @test_psrad(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_psrad:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: psrad %mm1, %mm0 # sched: [1:1.00]
; GENERIC-NEXT: psrad (%rdi), %mm0 # sched: [5:1.00]
; GENERIC-NEXT: psrad $7, %mm0 # sched: [1:1.00]
@@ -5356,7 +5356,7 @@ define i64 @test_psrad(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_psrad:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: psrad %mm1, %mm0 # sched: [2:1.00]
; ATOM-NEXT: psrad (%rdi), %mm0 # sched: [3:1.50]
; ATOM-NEXT: psrad $7, %mm0 # sched: [1:0.50]
@@ -5364,7 +5364,7 @@ define i64 @test_psrad(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_psrad:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: psrad %mm1, %mm0 # sched: [1:1.00]
; SLM-NEXT: psrad (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: psrad $7, %mm0 # sched: [1:1.00]
@@ -5372,7 +5372,7 @@ define i64 @test_psrad(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_psrad:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: psrad %mm1, %mm0 # sched: [1:1.00]
; SANDY-NEXT: psrad (%rdi), %mm0 # sched: [5:1.00]
; SANDY-NEXT: psrad $7, %mm0 # sched: [1:1.00]
@@ -5380,7 +5380,7 @@ define i64 @test_psrad(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psrad:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: psrad %mm1, %mm0 # sched: [1:1.00]
; HASWELL-NEXT: psrad (%rdi), %mm0 # sched: [1:1.00]
; HASWELL-NEXT: psrad $7, %mm0 # sched: [1:1.00]
@@ -5388,7 +5388,7 @@ define i64 @test_psrad(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psrad:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: psrad %mm1, %mm0 # sched: [1:1.00]
; BROADWELL-NEXT: psrad (%rdi), %mm0 # sched: [6:1.00]
; BROADWELL-NEXT: psrad $7, %mm0 # sched: [1:1.00]
@@ -5396,7 +5396,7 @@ define i64 @test_psrad(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psrad:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: psrad %mm1, %mm0 # sched: [1:1.00]
; SKYLAKE-NEXT: psrad (%rdi), %mm0 # sched: [6:1.00]
; SKYLAKE-NEXT: psrad $7, %mm0 # sched: [1:1.00]
@@ -5404,7 +5404,7 @@ define i64 @test_psrad(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psrad:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: psrad %mm1, %mm0 # sched: [1:1.00]
; SKX-NEXT: psrad (%rdi), %mm0 # sched: [6:1.00]
; SKX-NEXT: psrad $7, %mm0 # sched: [1:1.00]
@@ -5412,7 +5412,7 @@ define i64 @test_psrad(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_psrad:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: psrad %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: psrad (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: psrad $7, %mm0 # sched: [1:0.50]
@@ -5420,7 +5420,7 @@ define i64 @test_psrad(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_psrad:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: psrad %mm1, %mm0 # sched: [1:0.25]
; ZNVER1-NEXT: psrad (%rdi), %mm0 # sched: [8:0.50]
; ZNVER1-NEXT: psrad $7, %mm0 # sched: [1:0.25]
@@ -5438,7 +5438,7 @@ declare x86_mmx @llvm.x86.mmx.psrai.d(x86_mmx, i32) nounwind readnone
define i64 @test_psraw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_psraw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: psraw %mm1, %mm0 # sched: [1:1.00]
; GENERIC-NEXT: psraw (%rdi), %mm0 # sched: [5:1.00]
; GENERIC-NEXT: psraw $7, %mm0 # sched: [1:1.00]
@@ -5446,7 +5446,7 @@ define i64 @test_psraw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_psraw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: psraw %mm1, %mm0 # sched: [2:1.00]
; ATOM-NEXT: psraw (%rdi), %mm0 # sched: [3:1.50]
; ATOM-NEXT: psraw $7, %mm0 # sched: [1:0.50]
@@ -5454,7 +5454,7 @@ define i64 @test_psraw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_psraw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: psraw %mm1, %mm0 # sched: [1:1.00]
; SLM-NEXT: psraw (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: psraw $7, %mm0 # sched: [1:1.00]
@@ -5462,7 +5462,7 @@ define i64 @test_psraw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_psraw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: psraw %mm1, %mm0 # sched: [1:1.00]
; SANDY-NEXT: psraw (%rdi), %mm0 # sched: [5:1.00]
; SANDY-NEXT: psraw $7, %mm0 # sched: [1:1.00]
@@ -5470,7 +5470,7 @@ define i64 @test_psraw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psraw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: psraw %mm1, %mm0 # sched: [1:1.00]
; HASWELL-NEXT: psraw (%rdi), %mm0 # sched: [1:1.00]
; HASWELL-NEXT: psraw $7, %mm0 # sched: [1:1.00]
@@ -5478,7 +5478,7 @@ define i64 @test_psraw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psraw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: psraw %mm1, %mm0 # sched: [1:1.00]
; BROADWELL-NEXT: psraw (%rdi), %mm0 # sched: [6:1.00]
; BROADWELL-NEXT: psraw $7, %mm0 # sched: [1:1.00]
@@ -5486,7 +5486,7 @@ define i64 @test_psraw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psraw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: psraw %mm1, %mm0 # sched: [1:1.00]
; SKYLAKE-NEXT: psraw (%rdi), %mm0 # sched: [6:1.00]
; SKYLAKE-NEXT: psraw $7, %mm0 # sched: [1:1.00]
@@ -5494,7 +5494,7 @@ define i64 @test_psraw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psraw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: psraw %mm1, %mm0 # sched: [1:1.00]
; SKX-NEXT: psraw (%rdi), %mm0 # sched: [6:1.00]
; SKX-NEXT: psraw $7, %mm0 # sched: [1:1.00]
@@ -5502,7 +5502,7 @@ define i64 @test_psraw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_psraw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: psraw %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: psraw (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: psraw $7, %mm0 # sched: [1:0.50]
@@ -5510,7 +5510,7 @@ define i64 @test_psraw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_psraw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: psraw %mm1, %mm0 # sched: [1:0.25]
; ZNVER1-NEXT: psraw (%rdi), %mm0 # sched: [8:0.50]
; ZNVER1-NEXT: psraw $7, %mm0 # sched: [1:0.25]
@@ -5528,7 +5528,7 @@ declare x86_mmx @llvm.x86.mmx.psrai.w(x86_mmx, i32) nounwind readnone
define i64 @test_psrld(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_psrld:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: psrld %mm1, %mm0 # sched: [1:1.00]
; GENERIC-NEXT: psrld (%rdi), %mm0 # sched: [5:1.00]
; GENERIC-NEXT: psrld $7, %mm0 # sched: [1:1.00]
@@ -5536,7 +5536,7 @@ define i64 @test_psrld(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_psrld:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: psrld %mm1, %mm0 # sched: [2:1.00]
; ATOM-NEXT: psrld (%rdi), %mm0 # sched: [3:1.50]
; ATOM-NEXT: psrld $7, %mm0 # sched: [1:0.50]
@@ -5544,7 +5544,7 @@ define i64 @test_psrld(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_psrld:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: psrld %mm1, %mm0 # sched: [1:1.00]
; SLM-NEXT: psrld (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: psrld $7, %mm0 # sched: [1:1.00]
@@ -5552,7 +5552,7 @@ define i64 @test_psrld(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_psrld:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: psrld %mm1, %mm0 # sched: [1:1.00]
; SANDY-NEXT: psrld (%rdi), %mm0 # sched: [5:1.00]
; SANDY-NEXT: psrld $7, %mm0 # sched: [1:1.00]
@@ -5560,7 +5560,7 @@ define i64 @test_psrld(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psrld:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: psrld %mm1, %mm0 # sched: [1:1.00]
; HASWELL-NEXT: psrld (%rdi), %mm0 # sched: [1:1.00]
; HASWELL-NEXT: psrld $7, %mm0 # sched: [1:1.00]
@@ -5568,7 +5568,7 @@ define i64 @test_psrld(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psrld:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: psrld %mm1, %mm0 # sched: [1:1.00]
; BROADWELL-NEXT: psrld (%rdi), %mm0 # sched: [6:1.00]
; BROADWELL-NEXT: psrld $7, %mm0 # sched: [1:1.00]
@@ -5576,7 +5576,7 @@ define i64 @test_psrld(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psrld:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: psrld %mm1, %mm0 # sched: [1:1.00]
; SKYLAKE-NEXT: psrld (%rdi), %mm0 # sched: [6:1.00]
; SKYLAKE-NEXT: psrld $7, %mm0 # sched: [1:1.00]
@@ -5584,7 +5584,7 @@ define i64 @test_psrld(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psrld:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: psrld %mm1, %mm0 # sched: [1:1.00]
; SKX-NEXT: psrld (%rdi), %mm0 # sched: [6:1.00]
; SKX-NEXT: psrld $7, %mm0 # sched: [1:1.00]
@@ -5592,7 +5592,7 @@ define i64 @test_psrld(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_psrld:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: psrld %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: psrld (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: psrld $7, %mm0 # sched: [1:0.50]
@@ -5600,7 +5600,7 @@ define i64 @test_psrld(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_psrld:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: psrld %mm1, %mm0 # sched: [1:0.25]
; ZNVER1-NEXT: psrld (%rdi), %mm0 # sched: [8:0.50]
; ZNVER1-NEXT: psrld $7, %mm0 # sched: [1:0.25]
@@ -5618,7 +5618,7 @@ declare x86_mmx @llvm.x86.mmx.psrli.d(x86_mmx, i32) nounwind readnone
define i64 @test_psrlq(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_psrlq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: psrlq %mm1, %mm0 # sched: [1:1.00]
; GENERIC-NEXT: psrlq (%rdi), %mm0 # sched: [5:1.00]
; GENERIC-NEXT: psrlq $7, %mm0 # sched: [1:1.00]
@@ -5626,7 +5626,7 @@ define i64 @test_psrlq(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_psrlq:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: psrlq %mm1, %mm0 # sched: [2:1.00]
; ATOM-NEXT: psrlq (%rdi), %mm0 # sched: [3:1.50]
; ATOM-NEXT: psrlq $7, %mm0 # sched: [1:0.50]
@@ -5634,7 +5634,7 @@ define i64 @test_psrlq(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_psrlq:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: psrlq %mm1, %mm0 # sched: [1:1.00]
; SLM-NEXT: psrlq (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: psrlq $7, %mm0 # sched: [1:1.00]
@@ -5642,7 +5642,7 @@ define i64 @test_psrlq(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_psrlq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: psrlq %mm1, %mm0 # sched: [1:1.00]
; SANDY-NEXT: psrlq (%rdi), %mm0 # sched: [5:1.00]
; SANDY-NEXT: psrlq $7, %mm0 # sched: [1:1.00]
@@ -5650,7 +5650,7 @@ define i64 @test_psrlq(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psrlq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: psrlq %mm1, %mm0 # sched: [1:1.00]
; HASWELL-NEXT: psrlq (%rdi), %mm0 # sched: [1:1.00]
; HASWELL-NEXT: psrlq $7, %mm0 # sched: [1:1.00]
@@ -5658,7 +5658,7 @@ define i64 @test_psrlq(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psrlq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: psrlq %mm1, %mm0 # sched: [1:1.00]
; BROADWELL-NEXT: psrlq (%rdi), %mm0 # sched: [6:1.00]
; BROADWELL-NEXT: psrlq $7, %mm0 # sched: [1:1.00]
@@ -5666,7 +5666,7 @@ define i64 @test_psrlq(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psrlq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: psrlq %mm1, %mm0 # sched: [1:1.00]
; SKYLAKE-NEXT: psrlq (%rdi), %mm0 # sched: [6:1.00]
; SKYLAKE-NEXT: psrlq $7, %mm0 # sched: [1:1.00]
@@ -5674,7 +5674,7 @@ define i64 @test_psrlq(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psrlq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: psrlq %mm1, %mm0 # sched: [1:1.00]
; SKX-NEXT: psrlq (%rdi), %mm0 # sched: [6:1.00]
; SKX-NEXT: psrlq $7, %mm0 # sched: [1:1.00]
@@ -5682,7 +5682,7 @@ define i64 @test_psrlq(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_psrlq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: psrlq %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: psrlq (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: psrlq $7, %mm0 # sched: [1:0.50]
@@ -5690,7 +5690,7 @@ define i64 @test_psrlq(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_psrlq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: psrlq %mm1, %mm0 # sched: [1:0.25]
; ZNVER1-NEXT: psrlq (%rdi), %mm0 # sched: [8:0.50]
; ZNVER1-NEXT: psrlq $7, %mm0 # sched: [1:0.25]
@@ -5708,7 +5708,7 @@ declare x86_mmx @llvm.x86.mmx.psrli.q(x86_mmx, i32) nounwind readnone
define i64 @test_psrlw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_psrlw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: psrlw %mm1, %mm0 # sched: [1:1.00]
; GENERIC-NEXT: psrlw (%rdi), %mm0 # sched: [5:1.00]
; GENERIC-NEXT: psrlw $7, %mm0 # sched: [1:1.00]
@@ -5716,7 +5716,7 @@ define i64 @test_psrlw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_psrlw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: psrlw %mm1, %mm0 # sched: [2:1.00]
; ATOM-NEXT: psrlw (%rdi), %mm0 # sched: [3:1.50]
; ATOM-NEXT: psrlw $7, %mm0 # sched: [1:0.50]
@@ -5724,7 +5724,7 @@ define i64 @test_psrlw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_psrlw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: psrlw %mm1, %mm0 # sched: [1:1.00]
; SLM-NEXT: psrlw (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: psrlw $7, %mm0 # sched: [1:1.00]
@@ -5732,7 +5732,7 @@ define i64 @test_psrlw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_psrlw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: psrlw %mm1, %mm0 # sched: [1:1.00]
; SANDY-NEXT: psrlw (%rdi), %mm0 # sched: [5:1.00]
; SANDY-NEXT: psrlw $7, %mm0 # sched: [1:1.00]
@@ -5740,7 +5740,7 @@ define i64 @test_psrlw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psrlw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: psrlw %mm1, %mm0 # sched: [1:1.00]
; HASWELL-NEXT: psrlw (%rdi), %mm0 # sched: [1:1.00]
; HASWELL-NEXT: psrlw $7, %mm0 # sched: [1:1.00]
@@ -5748,7 +5748,7 @@ define i64 @test_psrlw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psrlw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: psrlw %mm1, %mm0 # sched: [1:1.00]
; BROADWELL-NEXT: psrlw (%rdi), %mm0 # sched: [6:1.00]
; BROADWELL-NEXT: psrlw $7, %mm0 # sched: [1:1.00]
@@ -5756,7 +5756,7 @@ define i64 @test_psrlw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psrlw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: psrlw %mm1, %mm0 # sched: [1:1.00]
; SKYLAKE-NEXT: psrlw (%rdi), %mm0 # sched: [6:1.00]
; SKYLAKE-NEXT: psrlw $7, %mm0 # sched: [1:1.00]
@@ -5764,7 +5764,7 @@ define i64 @test_psrlw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psrlw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: psrlw %mm1, %mm0 # sched: [1:1.00]
; SKX-NEXT: psrlw (%rdi), %mm0 # sched: [6:1.00]
; SKX-NEXT: psrlw $7, %mm0 # sched: [1:1.00]
@@ -5772,7 +5772,7 @@ define i64 @test_psrlw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_psrlw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: psrlw %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: psrlw (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: psrlw $7, %mm0 # sched: [1:0.50]
@@ -5780,7 +5780,7 @@ define i64 @test_psrlw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_psrlw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: psrlw %mm1, %mm0 # sched: [1:0.25]
; ZNVER1-NEXT: psrlw (%rdi), %mm0 # sched: [8:0.50]
; ZNVER1-NEXT: psrlw $7, %mm0 # sched: [1:0.25]
@@ -5798,70 +5798,70 @@ declare x86_mmx @llvm.x86.mmx.psrli.w(x86_mmx, i32) nounwind readnone
define i64 @test_psubb(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_psubb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: psubb %mm1, %mm0 # sched: [3:1.00]
; GENERIC-NEXT: psubb (%rdi), %mm0 # sched: [7:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_psubb:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: psubb %mm1, %mm0 # sched: [1:0.50]
; ATOM-NEXT: psubb (%rdi), %mm0 # sched: [1:1.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_psubb:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: psubb %mm1, %mm0 # sched: [1:0.50]
; SLM-NEXT: psubb (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_psubb:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: psubb %mm1, %mm0 # sched: [3:1.00]
; SANDY-NEXT: psubb (%rdi), %mm0 # sched: [7:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psubb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: psubb %mm1, %mm0 # sched: [1:0.50]
; HASWELL-NEXT: psubb (%rdi), %mm0 # sched: [1:0.50]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psubb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: psubb %mm1, %mm0 # sched: [1:0.50]
; BROADWELL-NEXT: psubb (%rdi), %mm0 # sched: [6:0.50]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psubb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: psubb %mm1, %mm0 # sched: [1:0.50]
; SKYLAKE-NEXT: psubb (%rdi), %mm0 # sched: [6:0.50]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psubb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: psubb %mm1, %mm0 # sched: [1:0.50]
; SKX-NEXT: psubb (%rdi), %mm0 # sched: [6:0.50]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_psubb:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: psubb %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: psubb (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_psubb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: psubb %mm1, %mm0 # sched: [1:0.25]
; ZNVER1-NEXT: psubb (%rdi), %mm0 # sched: [8:0.50]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -5876,70 +5876,70 @@ declare x86_mmx @llvm.x86.mmx.psub.b(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_psubd(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_psubd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: psubd %mm1, %mm0 # sched: [3:1.00]
; GENERIC-NEXT: psubd (%rdi), %mm0 # sched: [7:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_psubd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: psubd %mm1, %mm0 # sched: [1:0.50]
; ATOM-NEXT: psubd (%rdi), %mm0 # sched: [1:1.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_psubd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: psubd %mm1, %mm0 # sched: [1:0.50]
; SLM-NEXT: psubd (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_psubd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: psubd %mm1, %mm0 # sched: [3:1.00]
; SANDY-NEXT: psubd (%rdi), %mm0 # sched: [7:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psubd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: psubd %mm1, %mm0 # sched: [1:0.50]
; HASWELL-NEXT: psubd (%rdi), %mm0 # sched: [1:0.50]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psubd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: psubd %mm1, %mm0 # sched: [1:0.50]
; BROADWELL-NEXT: psubd (%rdi), %mm0 # sched: [6:0.50]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psubd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: psubd %mm1, %mm0 # sched: [1:0.50]
; SKYLAKE-NEXT: psubd (%rdi), %mm0 # sched: [6:0.50]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psubd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: psubd %mm1, %mm0 # sched: [1:0.50]
; SKX-NEXT: psubd (%rdi), %mm0 # sched: [6:0.50]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_psubd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: psubd %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: psubd (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_psubd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: psubd %mm1, %mm0 # sched: [1:0.25]
; ZNVER1-NEXT: psubd (%rdi), %mm0 # sched: [8:0.50]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -5954,70 +5954,70 @@ declare x86_mmx @llvm.x86.mmx.psub.d(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_psubq(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_psubq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: psubq %mm1, %mm0 # sched: [3:1.00]
; GENERIC-NEXT: psubq (%rdi), %mm0 # sched: [7:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_psubq:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: psubq %mm1, %mm0 # sched: [2:1.00]
; ATOM-NEXT: psubq (%rdi), %mm0 # sched: [3:1.50]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_psubq:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: psubq %mm1, %mm0 # sched: [1:0.50]
; SLM-NEXT: psubq (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_psubq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: psubq %mm1, %mm0 # sched: [3:1.00]
; SANDY-NEXT: psubq (%rdi), %mm0 # sched: [7:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psubq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: psubq %mm1, %mm0 # sched: [1:0.50]
; HASWELL-NEXT: psubq (%rdi), %mm0 # sched: [1:0.50]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psubq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: psubq %mm1, %mm0 # sched: [1:0.50]
; BROADWELL-NEXT: psubq (%rdi), %mm0 # sched: [6:0.50]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psubq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: psubq %mm1, %mm0 # sched: [1:0.50]
; SKYLAKE-NEXT: psubq (%rdi), %mm0 # sched: [6:0.50]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psubq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: psubq %mm1, %mm0 # sched: [1:0.50]
; SKX-NEXT: psubq (%rdi), %mm0 # sched: [6:0.50]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_psubq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: psubq %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: psubq (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_psubq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: psubq %mm1, %mm0 # sched: [1:0.25]
; ZNVER1-NEXT: psubq (%rdi), %mm0 # sched: [8:0.50]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -6032,70 +6032,70 @@ declare x86_mmx @llvm.x86.mmx.psub.q(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_psubsb(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_psubsb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: psubsb %mm1, %mm0 # sched: [3:1.00]
; GENERIC-NEXT: psubsb (%rdi), %mm0 # sched: [7:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_psubsb:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: psubsb %mm1, %mm0 # sched: [1:0.50]
; ATOM-NEXT: psubsb (%rdi), %mm0 # sched: [1:1.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_psubsb:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: psubsb %mm1, %mm0 # sched: [1:0.50]
; SLM-NEXT: psubsb (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_psubsb:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: psubsb %mm1, %mm0 # sched: [3:1.00]
; SANDY-NEXT: psubsb (%rdi), %mm0 # sched: [7:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psubsb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: psubsb %mm1, %mm0 # sched: [1:0.50]
; HASWELL-NEXT: psubsb (%rdi), %mm0 # sched: [1:0.50]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psubsb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: psubsb %mm1, %mm0 # sched: [1:0.50]
; BROADWELL-NEXT: psubsb (%rdi), %mm0 # sched: [6:0.50]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psubsb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: psubsb %mm1, %mm0 # sched: [1:1.00]
; SKYLAKE-NEXT: psubsb (%rdi), %mm0 # sched: [6:1.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psubsb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: psubsb %mm1, %mm0 # sched: [1:1.00]
; SKX-NEXT: psubsb (%rdi), %mm0 # sched: [6:1.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_psubsb:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: psubsb %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: psubsb (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_psubsb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: psubsb %mm1, %mm0 # sched: [1:0.25]
; ZNVER1-NEXT: psubsb (%rdi), %mm0 # sched: [8:0.50]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -6110,70 +6110,70 @@ declare x86_mmx @llvm.x86.mmx.psubs.b(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_psubsw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_psubsw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: psubsw %mm1, %mm0 # sched: [3:1.00]
; GENERIC-NEXT: psubsw (%rdi), %mm0 # sched: [7:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_psubsw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: psubsw %mm1, %mm0 # sched: [1:0.50]
; ATOM-NEXT: psubsw (%rdi), %mm0 # sched: [1:1.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_psubsw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: psubsw %mm1, %mm0 # sched: [1:0.50]
; SLM-NEXT: psubsw (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_psubsw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: psubsw %mm1, %mm0 # sched: [3:1.00]
; SANDY-NEXT: psubsw (%rdi), %mm0 # sched: [7:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psubsw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: psubsw %mm1, %mm0 # sched: [1:0.50]
; HASWELL-NEXT: psubsw (%rdi), %mm0 # sched: [1:0.50]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psubsw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: psubsw %mm1, %mm0 # sched: [1:0.50]
; BROADWELL-NEXT: psubsw (%rdi), %mm0 # sched: [6:0.50]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psubsw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: psubsw %mm1, %mm0 # sched: [1:1.00]
; SKYLAKE-NEXT: psubsw (%rdi), %mm0 # sched: [6:1.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psubsw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: psubsw %mm1, %mm0 # sched: [1:1.00]
; SKX-NEXT: psubsw (%rdi), %mm0 # sched: [6:1.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_psubsw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: psubsw %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: psubsw (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_psubsw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: psubsw %mm1, %mm0 # sched: [1:0.25]
; ZNVER1-NEXT: psubsw (%rdi), %mm0 # sched: [8:0.50]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -6188,70 +6188,70 @@ declare x86_mmx @llvm.x86.mmx.psubs.w(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_psubusb(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_psubusb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: psubusb %mm1, %mm0 # sched: [3:1.00]
; GENERIC-NEXT: psubusb (%rdi), %mm0 # sched: [7:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_psubusb:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: psubusb %mm1, %mm0 # sched: [1:0.50]
; ATOM-NEXT: psubusb (%rdi), %mm0 # sched: [1:1.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_psubusb:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: psubusb %mm1, %mm0 # sched: [1:0.50]
; SLM-NEXT: psubusb (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_psubusb:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: psubusb %mm1, %mm0 # sched: [3:1.00]
; SANDY-NEXT: psubusb (%rdi), %mm0 # sched: [7:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psubusb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: psubusb %mm1, %mm0 # sched: [1:0.50]
; HASWELL-NEXT: psubusb (%rdi), %mm0 # sched: [1:0.50]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psubusb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: psubusb %mm1, %mm0 # sched: [1:0.50]
; BROADWELL-NEXT: psubusb (%rdi), %mm0 # sched: [6:0.50]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psubusb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: psubusb %mm1, %mm0 # sched: [1:1.00]
; SKYLAKE-NEXT: psubusb (%rdi), %mm0 # sched: [6:1.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psubusb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: psubusb %mm1, %mm0 # sched: [1:1.00]
; SKX-NEXT: psubusb (%rdi), %mm0 # sched: [6:1.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_psubusb:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: psubusb %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: psubusb (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_psubusb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: psubusb %mm1, %mm0 # sched: [1:0.25]
; ZNVER1-NEXT: psubusb (%rdi), %mm0 # sched: [8:0.50]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -6266,70 +6266,70 @@ declare x86_mmx @llvm.x86.mmx.psubus.b(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_psubusw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_psubusw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: psubusw %mm1, %mm0 # sched: [3:1.00]
; GENERIC-NEXT: psubusw (%rdi), %mm0 # sched: [7:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_psubusw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: psubusw %mm1, %mm0 # sched: [1:0.50]
; ATOM-NEXT: psubusw (%rdi), %mm0 # sched: [1:1.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_psubusw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: psubusw %mm1, %mm0 # sched: [1:0.50]
; SLM-NEXT: psubusw (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_psubusw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: psubusw %mm1, %mm0 # sched: [3:1.00]
; SANDY-NEXT: psubusw (%rdi), %mm0 # sched: [7:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psubusw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: psubusw %mm1, %mm0 # sched: [1:0.50]
; HASWELL-NEXT: psubusw (%rdi), %mm0 # sched: [1:0.50]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psubusw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: psubusw %mm1, %mm0 # sched: [1:0.50]
; BROADWELL-NEXT: psubusw (%rdi), %mm0 # sched: [6:0.50]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psubusw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: psubusw %mm1, %mm0 # sched: [1:1.00]
; SKYLAKE-NEXT: psubusw (%rdi), %mm0 # sched: [6:1.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psubusw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: psubusw %mm1, %mm0 # sched: [1:1.00]
; SKX-NEXT: psubusw (%rdi), %mm0 # sched: [6:1.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_psubusw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: psubusw %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: psubusw (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_psubusw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: psubusw %mm1, %mm0 # sched: [1:0.25]
; ZNVER1-NEXT: psubusw (%rdi), %mm0 # sched: [8:0.50]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -6344,70 +6344,70 @@ declare x86_mmx @llvm.x86.mmx.psubus.w(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_psubw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_psubw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: psubw %mm1, %mm0 # sched: [3:1.00]
; GENERIC-NEXT: psubw (%rdi), %mm0 # sched: [7:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_psubw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: psubw %mm1, %mm0 # sched: [1:0.50]
; ATOM-NEXT: psubw (%rdi), %mm0 # sched: [1:1.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_psubw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: psubw %mm1, %mm0 # sched: [1:0.50]
; SLM-NEXT: psubw (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_psubw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: psubw %mm1, %mm0 # sched: [3:1.00]
; SANDY-NEXT: psubw (%rdi), %mm0 # sched: [7:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psubw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: psubw %mm1, %mm0 # sched: [1:0.50]
; HASWELL-NEXT: psubw (%rdi), %mm0 # sched: [1:0.50]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psubw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: psubw %mm1, %mm0 # sched: [1:0.50]
; BROADWELL-NEXT: psubw (%rdi), %mm0 # sched: [6:0.50]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psubw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: psubw %mm1, %mm0 # sched: [1:0.50]
; SKYLAKE-NEXT: psubw (%rdi), %mm0 # sched: [6:0.50]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psubw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: psubw %mm1, %mm0 # sched: [1:0.50]
; SKX-NEXT: psubw (%rdi), %mm0 # sched: [6:0.50]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_psubw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: psubw %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: psubw (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_psubw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: psubw %mm1, %mm0 # sched: [1:0.25]
; ZNVER1-NEXT: psubw (%rdi), %mm0 # sched: [8:0.50]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -6422,70 +6422,70 @@ declare x86_mmx @llvm.x86.mmx.psub.w(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_punpckhbw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_punpckhbw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: punpckhbw %mm1, %mm0 # mm0 = mm0[4],mm1[4],mm0[5],mm1[5],mm0[6],mm1[6],mm0[7],mm1[7] sched: [1:1.00]
; GENERIC-NEXT: punpckhbw (%rdi), %mm0 # mm0 = mm0[4],mem[4],mm0[5],mem[5],mm0[6],mem[6],mm0[7],mem[7] sched: [5:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_punpckhbw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: punpckhbw %mm1, %mm0 # mm0 = mm0[4],mm1[4],mm0[5],mm1[5],mm0[6],mm1[6],mm0[7],mm1[7] sched: [1:0.50]
; ATOM-NEXT: punpckhbw (%rdi), %mm0 # mm0 = mm0[4],mem[4],mm0[5],mem[5],mm0[6],mem[6],mm0[7],mem[7] sched: [1:1.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_punpckhbw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: punpckhbw %mm1, %mm0 # mm0 = mm0[4],mm1[4],mm0[5],mm1[5],mm0[6],mm1[6],mm0[7],mm1[7] sched: [1:1.00]
; SLM-NEXT: punpckhbw (%rdi), %mm0 # mm0 = mm0[4],mem[4],mm0[5],mem[5],mm0[6],mem[6],mm0[7],mem[7] sched: [4:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_punpckhbw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: punpckhbw %mm1, %mm0 # mm0 = mm0[4],mm1[4],mm0[5],mm1[5],mm0[6],mm1[6],mm0[7],mm1[7] sched: [1:1.00]
; SANDY-NEXT: punpckhbw (%rdi), %mm0 # mm0 = mm0[4],mem[4],mm0[5],mem[5],mm0[6],mem[6],mm0[7],mem[7] sched: [5:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_punpckhbw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: punpckhbw %mm1, %mm0 # mm0 = mm0[4],mm1[4],mm0[5],mm1[5],mm0[6],mm1[6],mm0[7],mm1[7] sched: [1:1.00]
; HASWELL-NEXT: punpckhbw (%rdi), %mm0 # mm0 = mm0[4],mem[4],mm0[5],mem[5],mm0[6],mem[6],mm0[7],mem[7] sched: [1:1.00]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_punpckhbw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: punpckhbw %mm1, %mm0 # mm0 = mm0[4],mm1[4],mm0[5],mm1[5],mm0[6],mm1[6],mm0[7],mm1[7] sched: [1:1.00]
; BROADWELL-NEXT: punpckhbw (%rdi), %mm0 # mm0 = mm0[4],mem[4],mm0[5],mem[5],mm0[6],mem[6],mm0[7],mem[7] sched: [6:1.00]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_punpckhbw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: punpckhbw %mm1, %mm0 # mm0 = mm0[4],mm1[4],mm0[5],mm1[5],mm0[6],mm1[6],mm0[7],mm1[7] sched: [1:1.00]
; SKYLAKE-NEXT: punpckhbw (%rdi), %mm0 # mm0 = mm0[4],mem[4],mm0[5],mem[5],mm0[6],mem[6],mm0[7],mem[7] sched: [6:1.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_punpckhbw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: punpckhbw %mm1, %mm0 # mm0 = mm0[4],mm1[4],mm0[5],mm1[5],mm0[6],mm1[6],mm0[7],mm1[7] sched: [1:1.00]
; SKX-NEXT: punpckhbw (%rdi), %mm0 # mm0 = mm0[4],mem[4],mm0[5],mem[5],mm0[6],mem[6],mm0[7],mem[7] sched: [6:1.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_punpckhbw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: punpckhbw %mm1, %mm0 # mm0 = mm0[4],mm1[4],mm0[5],mm1[5],mm0[6],mm1[6],mm0[7],mm1[7] sched: [1:0.50]
; BTVER2-NEXT: punpckhbw (%rdi), %mm0 # mm0 = mm0[4],mem[4],mm0[5],mem[5],mm0[6],mem[6],mm0[7],mem[7] sched: [6:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_punpckhbw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: punpckhbw %mm1, %mm0 # mm0 = mm0[4],mm1[4],mm0[5],mm1[5],mm0[6],mm1[6],mm0[7],mm1[7] sched: [1:0.25]
; ZNVER1-NEXT: punpckhbw (%rdi), %mm0 # mm0 = mm0[4],mem[4],mm0[5],mem[5],mm0[6],mem[6],mm0[7],mem[7] sched: [8:0.50]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -6500,70 +6500,70 @@ declare x86_mmx @llvm.x86.mmx.punpckhbw(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_punpckhdq(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_punpckhdq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: punpckhdq %mm1, %mm0 # mm0 = mm0[1],mm1[1] sched: [1:1.00]
; GENERIC-NEXT: punpckhdq (%rdi), %mm0 # mm0 = mm0[1],mem[1] sched: [5:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_punpckhdq:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: punpckhdq %mm1, %mm0 # mm0 = mm0[1],mm1[1] sched: [1:0.50]
; ATOM-NEXT: punpckhdq (%rdi), %mm0 # mm0 = mm0[1],mem[1] sched: [1:1.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_punpckhdq:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: punpckhdq %mm1, %mm0 # mm0 = mm0[1],mm1[1] sched: [1:1.00]
; SLM-NEXT: punpckhdq (%rdi), %mm0 # mm0 = mm0[1],mem[1] sched: [4:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_punpckhdq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: punpckhdq %mm1, %mm0 # mm0 = mm0[1],mm1[1] sched: [1:1.00]
; SANDY-NEXT: punpckhdq (%rdi), %mm0 # mm0 = mm0[1],mem[1] sched: [5:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_punpckhdq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: punpckhdq %mm1, %mm0 # mm0 = mm0[1],mm1[1] sched: [1:1.00]
; HASWELL-NEXT: punpckhdq (%rdi), %mm0 # mm0 = mm0[1],mem[1] sched: [1:1.00]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_punpckhdq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: punpckhdq %mm1, %mm0 # mm0 = mm0[1],mm1[1] sched: [1:1.00]
; BROADWELL-NEXT: punpckhdq (%rdi), %mm0 # mm0 = mm0[1],mem[1] sched: [6:1.00]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_punpckhdq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: punpckhdq %mm1, %mm0 # mm0 = mm0[1],mm1[1] sched: [1:1.00]
; SKYLAKE-NEXT: punpckhdq (%rdi), %mm0 # mm0 = mm0[1],mem[1] sched: [6:1.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_punpckhdq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: punpckhdq %mm1, %mm0 # mm0 = mm0[1],mm1[1] sched: [1:1.00]
; SKX-NEXT: punpckhdq (%rdi), %mm0 # mm0 = mm0[1],mem[1] sched: [6:1.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_punpckhdq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: punpckhdq %mm1, %mm0 # mm0 = mm0[1],mm1[1] sched: [1:0.50]
; BTVER2-NEXT: punpckhdq (%rdi), %mm0 # mm0 = mm0[1],mem[1] sched: [6:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_punpckhdq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: punpckhdq %mm1, %mm0 # mm0 = mm0[1],mm1[1] sched: [1:0.25]
; ZNVER1-NEXT: punpckhdq (%rdi), %mm0 # mm0 = mm0[1],mem[1] sched: [8:0.50]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -6578,70 +6578,70 @@ declare x86_mmx @llvm.x86.mmx.punpckhdq(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_punpckhwd(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_punpckhwd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: punpckhwd %mm1, %mm0 # mm0 = mm0[2],mm1[2],mm0[3],mm1[3] sched: [1:1.00]
; GENERIC-NEXT: punpckhwd (%rdi), %mm0 # mm0 = mm0[2],mem[2],mm0[3],mem[3] sched: [5:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_punpckhwd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: punpckhwd %mm1, %mm0 # mm0 = mm0[2],mm1[2],mm0[3],mm1[3] sched: [1:0.50]
; ATOM-NEXT: punpckhwd (%rdi), %mm0 # mm0 = mm0[2],mem[2],mm0[3],mem[3] sched: [1:1.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_punpckhwd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: punpckhwd %mm1, %mm0 # mm0 = mm0[2],mm1[2],mm0[3],mm1[3] sched: [1:1.00]
; SLM-NEXT: punpckhwd (%rdi), %mm0 # mm0 = mm0[2],mem[2],mm0[3],mem[3] sched: [4:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_punpckhwd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: punpckhwd %mm1, %mm0 # mm0 = mm0[2],mm1[2],mm0[3],mm1[3] sched: [1:1.00]
; SANDY-NEXT: punpckhwd (%rdi), %mm0 # mm0 = mm0[2],mem[2],mm0[3],mem[3] sched: [5:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_punpckhwd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: punpckhwd %mm1, %mm0 # mm0 = mm0[2],mm1[2],mm0[3],mm1[3] sched: [1:1.00]
; HASWELL-NEXT: punpckhwd (%rdi), %mm0 # mm0 = mm0[2],mem[2],mm0[3],mem[3] sched: [1:1.00]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_punpckhwd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: punpckhwd %mm1, %mm0 # mm0 = mm0[2],mm1[2],mm0[3],mm1[3] sched: [1:1.00]
; BROADWELL-NEXT: punpckhwd (%rdi), %mm0 # mm0 = mm0[2],mem[2],mm0[3],mem[3] sched: [6:1.00]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_punpckhwd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: punpckhwd %mm1, %mm0 # mm0 = mm0[2],mm1[2],mm0[3],mm1[3] sched: [1:1.00]
; SKYLAKE-NEXT: punpckhwd (%rdi), %mm0 # mm0 = mm0[2],mem[2],mm0[3],mem[3] sched: [6:1.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_punpckhwd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: punpckhwd %mm1, %mm0 # mm0 = mm0[2],mm1[2],mm0[3],mm1[3] sched: [1:1.00]
; SKX-NEXT: punpckhwd (%rdi), %mm0 # mm0 = mm0[2],mem[2],mm0[3],mem[3] sched: [6:1.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_punpckhwd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: punpckhwd %mm1, %mm0 # mm0 = mm0[2],mm1[2],mm0[3],mm1[3] sched: [1:0.50]
; BTVER2-NEXT: punpckhwd (%rdi), %mm0 # mm0 = mm0[2],mem[2],mm0[3],mem[3] sched: [6:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_punpckhwd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: punpckhwd %mm1, %mm0 # mm0 = mm0[2],mm1[2],mm0[3],mm1[3] sched: [1:0.25]
; ZNVER1-NEXT: punpckhwd (%rdi), %mm0 # mm0 = mm0[2],mem[2],mm0[3],mem[3] sched: [8:0.50]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -6656,70 +6656,70 @@ declare x86_mmx @llvm.x86.mmx.punpckhwd(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_punpcklbw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_punpcklbw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: punpcklbw %mm1, %mm0 # mm0 = mm0[0],mm1[0],mm0[1],mm1[1],mm0[2],mm1[2],mm0[3],mm1[3] sched: [1:1.00]
; GENERIC-NEXT: punpcklbw (%rdi), %mm0 # mm0 = mm0[0],mem[0],mm0[1],mem[1],mm0[2],mem[2],mm0[3],mem[3] sched: [5:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_punpcklbw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: punpcklbw %mm1, %mm0 # mm0 = mm0[0],mm1[0],mm0[1],mm1[1],mm0[2],mm1[2],mm0[3],mm1[3] sched: [1:1.00]
; ATOM-NEXT: punpcklbw (%rdi), %mm0 # mm0 = mm0[0],mem[0],mm0[1],mem[1],mm0[2],mem[2],mm0[3],mem[3] sched: [1:1.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_punpcklbw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: punpcklbw %mm1, %mm0 # mm0 = mm0[0],mm1[0],mm0[1],mm1[1],mm0[2],mm1[2],mm0[3],mm1[3] sched: [1:1.00]
; SLM-NEXT: punpcklbw (%rdi), %mm0 # mm0 = mm0[0],mem[0],mm0[1],mem[1],mm0[2],mem[2],mm0[3],mem[3] sched: [4:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_punpcklbw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: punpcklbw %mm1, %mm0 # mm0 = mm0[0],mm1[0],mm0[1],mm1[1],mm0[2],mm1[2],mm0[3],mm1[3] sched: [1:1.00]
; SANDY-NEXT: punpcklbw (%rdi), %mm0 # mm0 = mm0[0],mem[0],mm0[1],mem[1],mm0[2],mem[2],mm0[3],mem[3] sched: [5:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_punpcklbw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: punpcklbw %mm1, %mm0 # mm0 = mm0[0],mm1[0],mm0[1],mm1[1],mm0[2],mm1[2],mm0[3],mm1[3] sched: [1:1.00]
; HASWELL-NEXT: punpcklbw (%rdi), %mm0 # mm0 = mm0[0],mem[0],mm0[1],mem[1],mm0[2],mem[2],mm0[3],mem[3] sched: [1:1.00]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_punpcklbw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: punpcklbw %mm1, %mm0 # mm0 = mm0[0],mm1[0],mm0[1],mm1[1],mm0[2],mm1[2],mm0[3],mm1[3] sched: [1:1.00]
; BROADWELL-NEXT: punpcklbw (%rdi), %mm0 # mm0 = mm0[0],mem[0],mm0[1],mem[1],mm0[2],mem[2],mm0[3],mem[3] sched: [6:1.00]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_punpcklbw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: punpcklbw %mm1, %mm0 # mm0 = mm0[0],mm1[0],mm0[1],mm1[1],mm0[2],mm1[2],mm0[3],mm1[3] sched: [1:1.00]
; SKYLAKE-NEXT: punpcklbw (%rdi), %mm0 # mm0 = mm0[0],mem[0],mm0[1],mem[1],mm0[2],mem[2],mm0[3],mem[3] sched: [6:1.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_punpcklbw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: punpcklbw %mm1, %mm0 # mm0 = mm0[0],mm1[0],mm0[1],mm1[1],mm0[2],mm1[2],mm0[3],mm1[3] sched: [1:1.00]
; SKX-NEXT: punpcklbw (%rdi), %mm0 # mm0 = mm0[0],mem[0],mm0[1],mem[1],mm0[2],mem[2],mm0[3],mem[3] sched: [6:1.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_punpcklbw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: punpcklbw %mm1, %mm0 # mm0 = mm0[0],mm1[0],mm0[1],mm1[1],mm0[2],mm1[2],mm0[3],mm1[3] sched: [1:0.50]
; BTVER2-NEXT: punpcklbw (%rdi), %mm0 # mm0 = mm0[0],mem[0],mm0[1],mem[1],mm0[2],mem[2],mm0[3],mem[3] sched: [6:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_punpcklbw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: punpcklbw %mm1, %mm0 # mm0 = mm0[0],mm1[0],mm0[1],mm1[1],mm0[2],mm1[2],mm0[3],mm1[3] sched: [1:0.25]
; ZNVER1-NEXT: punpcklbw (%rdi), %mm0 # mm0 = mm0[0],mem[0],mm0[1],mem[1],mm0[2],mem[2],mm0[3],mem[3] sched: [8:0.50]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -6734,70 +6734,70 @@ declare x86_mmx @llvm.x86.mmx.punpcklbw(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_punpckldq(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_punpckldq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: punpckldq %mm1, %mm0 # mm0 = mm0[0],mm1[0] sched: [1:1.00]
; GENERIC-NEXT: punpckldq (%rdi), %mm0 # mm0 = mm0[0],mem[0] sched: [5:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_punpckldq:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: punpckldq %mm1, %mm0 # mm0 = mm0[0],mm1[0] sched: [1:1.00]
; ATOM-NEXT: punpckldq (%rdi), %mm0 # mm0 = mm0[0],mem[0] sched: [1:1.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_punpckldq:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: punpckldq %mm1, %mm0 # mm0 = mm0[0],mm1[0] sched: [1:1.00]
; SLM-NEXT: punpckldq (%rdi), %mm0 # mm0 = mm0[0],mem[0] sched: [4:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_punpckldq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: punpckldq %mm1, %mm0 # mm0 = mm0[0],mm1[0] sched: [1:1.00]
; SANDY-NEXT: punpckldq (%rdi), %mm0 # mm0 = mm0[0],mem[0] sched: [5:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_punpckldq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: punpckldq %mm1, %mm0 # mm0 = mm0[0],mm1[0] sched: [1:1.00]
; HASWELL-NEXT: punpckldq (%rdi), %mm0 # mm0 = mm0[0],mem[0] sched: [1:1.00]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_punpckldq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: punpckldq %mm1, %mm0 # mm0 = mm0[0],mm1[0] sched: [1:1.00]
; BROADWELL-NEXT: punpckldq (%rdi), %mm0 # mm0 = mm0[0],mem[0] sched: [6:1.00]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_punpckldq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: punpckldq %mm1, %mm0 # mm0 = mm0[0],mm1[0] sched: [1:1.00]
; SKYLAKE-NEXT: punpckldq (%rdi), %mm0 # mm0 = mm0[0],mem[0] sched: [6:1.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_punpckldq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: punpckldq %mm1, %mm0 # mm0 = mm0[0],mm1[0] sched: [1:1.00]
; SKX-NEXT: punpckldq (%rdi), %mm0 # mm0 = mm0[0],mem[0] sched: [6:1.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_punpckldq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: punpckldq %mm1, %mm0 # mm0 = mm0[0],mm1[0] sched: [1:0.50]
; BTVER2-NEXT: punpckldq (%rdi), %mm0 # mm0 = mm0[0],mem[0] sched: [6:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_punpckldq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: punpckldq %mm1, %mm0 # mm0 = mm0[0],mm1[0] sched: [1:0.25]
; ZNVER1-NEXT: punpckldq (%rdi), %mm0 # mm0 = mm0[0],mem[0] sched: [8:0.50]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -6812,70 +6812,70 @@ declare x86_mmx @llvm.x86.mmx.punpckldq(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_punpcklwd(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_punpcklwd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: punpcklwd %mm1, %mm0 # mm0 = mm0[0],mm1[0],mm0[1],mm1[1] sched: [1:1.00]
; GENERIC-NEXT: punpcklwd (%rdi), %mm0 # mm0 = mm0[0],mem[0],mm0[1],mem[1] sched: [5:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_punpcklwd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: punpcklwd %mm1, %mm0 # mm0 = mm0[0],mm1[0],mm0[1],mm1[1] sched: [1:1.00]
; ATOM-NEXT: punpcklwd (%rdi), %mm0 # mm0 = mm0[0],mem[0],mm0[1],mem[1] sched: [1:1.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_punpcklwd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: punpcklwd %mm1, %mm0 # mm0 = mm0[0],mm1[0],mm0[1],mm1[1] sched: [1:1.00]
; SLM-NEXT: punpcklwd (%rdi), %mm0 # mm0 = mm0[0],mem[0],mm0[1],mem[1] sched: [4:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_punpcklwd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: punpcklwd %mm1, %mm0 # mm0 = mm0[0],mm1[0],mm0[1],mm1[1] sched: [1:1.00]
; SANDY-NEXT: punpcklwd (%rdi), %mm0 # mm0 = mm0[0],mem[0],mm0[1],mem[1] sched: [5:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_punpcklwd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: punpcklwd %mm1, %mm0 # mm0 = mm0[0],mm1[0],mm0[1],mm1[1] sched: [1:1.00]
; HASWELL-NEXT: punpcklwd (%rdi), %mm0 # mm0 = mm0[0],mem[0],mm0[1],mem[1] sched: [1:1.00]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_punpcklwd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: punpcklwd %mm1, %mm0 # mm0 = mm0[0],mm1[0],mm0[1],mm1[1] sched: [1:1.00]
; BROADWELL-NEXT: punpcklwd (%rdi), %mm0 # mm0 = mm0[0],mem[0],mm0[1],mem[1] sched: [6:1.00]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_punpcklwd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: punpcklwd %mm1, %mm0 # mm0 = mm0[0],mm1[0],mm0[1],mm1[1] sched: [1:1.00]
; SKYLAKE-NEXT: punpcklwd (%rdi), %mm0 # mm0 = mm0[0],mem[0],mm0[1],mem[1] sched: [6:1.00]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_punpcklwd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: punpcklwd %mm1, %mm0 # mm0 = mm0[0],mm1[0],mm0[1],mm1[1] sched: [1:1.00]
; SKX-NEXT: punpcklwd (%rdi), %mm0 # mm0 = mm0[0],mem[0],mm0[1],mem[1] sched: [6:1.00]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_punpcklwd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: punpcklwd %mm1, %mm0 # mm0 = mm0[0],mm1[0],mm0[1],mm1[1] sched: [1:0.50]
; BTVER2-NEXT: punpcklwd (%rdi), %mm0 # mm0 = mm0[0],mem[0],mm0[1],mem[1] sched: [6:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_punpcklwd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: punpcklwd %mm1, %mm0 # mm0 = mm0[0],mm1[0],mm0[1],mm1[1] sched: [1:0.25]
; ZNVER1-NEXT: punpcklwd (%rdi), %mm0 # mm0 = mm0[0],mem[0],mm0[1],mem[1] sched: [8:0.50]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
@@ -6890,70 +6890,70 @@ declare x86_mmx @llvm.x86.mmx.punpcklwd(x86_mmx, x86_mmx) nounwind readnone
define i64 @test_pxor(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
; GENERIC-LABEL: test_pxor:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pxor %mm1, %mm0 # sched: [1:1.00]
; GENERIC-NEXT: pxor (%rdi), %mm0 # sched: [5:1.00]
; GENERIC-NEXT: movd %mm0, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pxor:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pxor %mm1, %mm0 # sched: [1:0.50]
; ATOM-NEXT: pxor (%rdi), %mm0 # sched: [1:1.00]
; ATOM-NEXT: movd %mm0, %rax # sched: [3:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pxor:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pxor %mm1, %mm0 # sched: [1:0.50]
; SLM-NEXT: pxor (%rdi), %mm0 # sched: [4:1.00]
; SLM-NEXT: movd %mm0, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pxor:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: pxor %mm1, %mm0 # sched: [1:1.00]
; SANDY-NEXT: pxor (%rdi), %mm0 # sched: [5:1.00]
; SANDY-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pxor:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: pxor %mm1, %mm0 # sched: [1:0.33]
; HASWELL-NEXT: pxor (%rdi), %mm0 # sched: [1:0.50]
; HASWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pxor:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: pxor %mm1, %mm0 # sched: [1:0.33]
; BROADWELL-NEXT: pxor (%rdi), %mm0 # sched: [6:0.50]
; BROADWELL-NEXT: movd %mm0, %rax # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pxor:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: pxor %mm1, %mm0 # sched: [1:0.50]
; SKYLAKE-NEXT: pxor (%rdi), %mm0 # sched: [6:0.50]
; SKYLAKE-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pxor:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: pxor %mm1, %mm0 # sched: [1:0.50]
; SKX-NEXT: pxor (%rdi), %mm0 # sched: [6:0.50]
; SKX-NEXT: movd %mm0, %rax # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pxor:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: pxor %mm1, %mm0 # sched: [1:0.50]
; BTVER2-NEXT: pxor (%rdi), %mm0 # sched: [6:1.00]
; BTVER2-NEXT: movd %mm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pxor:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: pxor %mm1, %mm0 # sched: [1:0.25]
; ZNVER1-NEXT: pxor (%rdi), %mm0 # sched: [8:0.50]
; ZNVER1-NEXT: movd %mm0, %rax # sched: [2:1.00]
diff --git a/test/CodeGen/X86/movbe-schedule.ll b/test/CodeGen/X86/movbe-schedule.ll
index 868a5c6080e..56e78219f91 100644
--- a/test/CodeGen/X86/movbe-schedule.ll
+++ b/test/CodeGen/X86/movbe-schedule.ll
@@ -11,13 +11,13 @@
define i16 @test_movbe_i16(i16 *%a0, i16 %a1, i16 *%a2) {
; GENERIC-LABEL: test_movbe_i16:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movbew (%rdi), %ax # sched: [5:0.50]
; GENERIC-NEXT: movbew %si, (%rdx) # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_movbe_i16:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movbew (%rdi), %ax # sched: [1:1.00]
; ATOM-NEXT: movbew %si, (%rdx) # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -27,37 +27,37 @@ define i16 @test_movbe_i16(i16 *%a0, i16 %a1, i16 *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_movbe_i16:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movbew (%rdi), %ax # sched: [4:1.00]
; SLM-NEXT: movbew %si, (%rdx) # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; HASWELL-LABEL: test_movbe_i16:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: movbew (%rdi), %ax # sched: [1:0.50]
; HASWELL-NEXT: movbew %si, (%rdx) # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movbe_i16:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movbew (%rdi), %ax # sched: [6:0.50]
; BROADWELL-NEXT: movbew %si, (%rdx) # sched: [2:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movbe_i16:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movbew (%rdi), %ax # sched: [6:0.50]
; SKYLAKE-NEXT: movbew %si, (%rdx) # sched: [2:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movbe_i16:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: movbew (%rdi), %ax # sched: [4:1.00]
; BTVER2-NEXT: movbew %si, (%rdx) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movbe_i16:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movbew (%rdi), %ax # sched: [5:0.50]
; ZNVER1-NEXT: movbew %si, (%rdx) # sched: [5:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -71,13 +71,13 @@ declare i16 @llvm.bswap.i16(i16)
define i32 @test_movbe_i32(i32 *%a0, i32 %a1, i32 *%a2) {
; GENERIC-LABEL: test_movbe_i32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movbel (%rdi), %eax # sched: [5:0.50]
; GENERIC-NEXT: movbel %esi, (%rdx) # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_movbe_i32:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movbel (%rdi), %eax # sched: [1:1.00]
; ATOM-NEXT: movbel %esi, (%rdx) # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -87,37 +87,37 @@ define i32 @test_movbe_i32(i32 *%a0, i32 %a1, i32 *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_movbe_i32:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movbel (%rdi), %eax # sched: [4:1.00]
; SLM-NEXT: movbel %esi, (%rdx) # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; HASWELL-LABEL: test_movbe_i32:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: movbel (%rdi), %eax # sched: [1:0.50]
; HASWELL-NEXT: movbel %esi, (%rdx) # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movbe_i32:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movbel (%rdi), %eax # sched: [6:0.50]
; BROADWELL-NEXT: movbel %esi, (%rdx) # sched: [2:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movbe_i32:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movbel (%rdi), %eax # sched: [6:0.50]
; SKYLAKE-NEXT: movbel %esi, (%rdx) # sched: [2:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movbe_i32:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: movbel (%rdi), %eax # sched: [4:1.00]
; BTVER2-NEXT: movbel %esi, (%rdx) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movbe_i32:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movbel (%rdi), %eax # sched: [5:0.50]
; ZNVER1-NEXT: movbel %esi, (%rdx) # sched: [5:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -131,13 +131,13 @@ declare i32 @llvm.bswap.i32(i32)
define i64 @test_movbe_i64(i64 *%a0, i64 %a1, i64 *%a2) {
; GENERIC-LABEL: test_movbe_i64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movbeq (%rdi), %rax # sched: [5:0.50]
; GENERIC-NEXT: movbeq %rsi, (%rdx) # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_movbe_i64:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movbeq (%rdi), %rax # sched: [1:1.00]
; ATOM-NEXT: movbeq %rsi, (%rdx) # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -147,37 +147,37 @@ define i64 @test_movbe_i64(i64 *%a0, i64 %a1, i64 *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_movbe_i64:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movbeq (%rdi), %rax # sched: [4:1.00]
; SLM-NEXT: movbeq %rsi, (%rdx) # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; HASWELL-LABEL: test_movbe_i64:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: movbeq (%rdi), %rax # sched: [1:0.50]
; HASWELL-NEXT: movbeq %rsi, (%rdx) # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movbe_i64:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movbeq (%rdi), %rax # sched: [6:0.50]
; BROADWELL-NEXT: movbeq %rsi, (%rdx) # sched: [2:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movbe_i64:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movbeq (%rdi), %rax # sched: [6:0.50]
; SKYLAKE-NEXT: movbeq %rsi, (%rdx) # sched: [2:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movbe_i64:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: movbeq (%rdi), %rax # sched: [4:1.00]
; BTVER2-NEXT: movbeq %rsi, (%rdx) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movbe_i64:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movbeq (%rdi), %rax # sched: [5:0.50]
; ZNVER1-NEXT: movbeq %rsi, (%rdx) # sched: [5:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
diff --git a/test/CodeGen/X86/movgs.ll b/test/CodeGen/X86/movgs.ll
index f7426188977..00fc598ec65 100644
--- a/test/CodeGen/X86/movgs.ll
+++ b/test/CodeGen/X86/movgs.ll
@@ -4,13 +4,13 @@
define i32 @test1() nounwind readonly {
; X32-LABEL: test1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl %gs:196, %eax
; X32-NEXT: movl (%eax), %eax
; X32-NEXT: retl
;
; X64-LABEL: test1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq %gs:320, %rax
; X64-NEXT: movl (%rax), %eax
; X64-NEXT: retq
@@ -22,7 +22,7 @@ entry:
define i64 @test2(void (i8*)* addrspace(256)* %tmp8) nounwind {
; X32-LABEL: test2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: subl $12, %esp
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: calll *%gs:(%eax)
@@ -32,7 +32,7 @@ define i64 @test2(void (i8*)* addrspace(256)* %tmp8) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: {{(subq.*%rsp|pushq)}}
; X64-NEXT: callq *%gs:(%{{(rcx|rdi)}})
; X64-NEXT: xorl %eax, %eax
@@ -46,13 +46,13 @@ entry:
define <2 x i64> @pmovsxwd_1(i64 addrspace(256)* %p) nounwind readonly {
; X32-LABEL: pmovsxwd_1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: pmovsxwd %gs:(%eax), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: pmovsxwd_1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: pmovsxwd %gs:(%{{(rcx|rdi)}}), %xmm0
; X64-NEXT: retq
entry:
@@ -69,7 +69,7 @@ entry:
; address spaces. Make sure they aren't CSE'd.
define i32 @test_no_cse() nounwind readonly {
; X32-LABEL: test_no_cse:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl %gs:196, %eax
; X32-NEXT: movl (%eax), %eax
; X32-NEXT: movl %fs:196, %ecx
@@ -77,7 +77,7 @@ define i32 @test_no_cse() nounwind readonly {
; X32-NEXT: retl
;
; X64-LABEL: test_no_cse:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq %gs:320, %rax
; X64-NEXT: movl (%rax), %eax
; X64-NEXT: movq %fs:320, %rcx
diff --git a/test/CodeGen/X86/movmsk.ll b/test/CodeGen/X86/movmsk.ll
index c2010d09d0e..b670d33b98d 100644
--- a/test/CodeGen/X86/movmsk.ll
+++ b/test/CodeGen/X86/movmsk.ll
@@ -6,7 +6,7 @@
define i32 @double_signbit(double %d1) nounwind uwtable readnone ssp {
; CHECK-LABEL: double_signbit:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movsd %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movsd %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movmskpd %xmm0, %eax
@@ -28,7 +28,7 @@ entry:
define i32 @double_add_signbit(double %d1, double %d2) nounwind uwtable readnone ssp {
; CHECK-LABEL: double_add_signbit:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: addsd %xmm1, %xmm0
; CHECK-NEXT: movsd %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movsd %xmm0, -{{[0-9]+}}(%rsp)
@@ -52,7 +52,7 @@ entry:
define i32 @float_signbit(float %f1) nounwind uwtable readnone ssp {
; CHECK-LABEL: float_signbit:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movss %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movss %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movmskps %xmm0, %eax
@@ -73,7 +73,7 @@ entry:
define i32 @float_add_signbit(float %f1, float %f2) nounwind uwtable readnone ssp {
; CHECK-LABEL: float_add_signbit:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: addss %xmm1, %xmm0
; CHECK-NEXT: movss %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movss %xmm0, -{{[0-9]+}}(%rsp)
@@ -99,7 +99,7 @@ entry:
; in this case, though.
define void @float_call_signbit(double %n) {
; CHECK-LABEL: float_call_signbit:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movq %xmm0, %rdi
; CHECK-NEXT: shrq $63, %rdi
; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<kill>
@@ -118,7 +118,7 @@ declare void @float_call_signbit_callee(i1 zeroext)
define i32 @t1(<4 x float> %x, i32* nocapture %indexTable) nounwind uwtable readonly ssp {
; CHECK-LABEL: t1:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movmskps %xmm0, %eax
; CHECK-NEXT: movl (%rdi,%rax,4), %eax
; CHECK-NEXT: retq
@@ -132,7 +132,7 @@ entry:
define i32 @t2(<4 x float> %x, i32* nocapture %indexTable) nounwind uwtable readonly ssp {
; CHECK-LABEL: t2:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movmskpd %xmm0, %eax
; CHECK-NEXT: movl (%rdi,%rax,4), %eax
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/mul-constant-i16.ll b/test/CodeGen/X86/mul-constant-i16.ll
index c3b822ac214..cde94c9a095 100644
--- a/test/CodeGen/X86/mul-constant-i16.ll
+++ b/test/CodeGen/X86/mul-constant-i16.ll
@@ -4,12 +4,12 @@
define i16 @test_mul_by_1(i16 %x) {
; X86-LABEL: test_mul_by_1:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 1
@@ -18,14 +18,14 @@ define i16 @test_mul_by_1(i16 %x) {
define i16 @test_mul_by_2(i16 %x) {
; X86-LABEL: test_mul_by_2:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: addl %eax, %eax
; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (%rdi,%rdi), %eax
; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -36,14 +36,14 @@ define i16 @test_mul_by_2(i16 %x) {
define i16 @test_mul_by_3(i16 %x) {
; X86-LABEL: test_mul_by_3:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_3:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (%rdi,%rdi,2), %eax
; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -54,14 +54,14 @@ define i16 @test_mul_by_3(i16 %x) {
define i16 @test_mul_by_4(i16 %x) {
; X86-LABEL: test_mul_by_4:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: shll $2, %eax
; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_4:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (,%rdi,4), %eax
; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -72,14 +72,14 @@ define i16 @test_mul_by_4(i16 %x) {
define i16 @test_mul_by_5(i16 %x) {
; X86-LABEL: test_mul_by_5:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,4), %eax
; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_5:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (%rdi,%rdi,4), %eax
; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -90,7 +90,7 @@ define i16 @test_mul_by_5(i16 %x) {
define i16 @test_mul_by_6(i16 %x) {
; X86-LABEL: test_mul_by_6:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: addl %eax, %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
@@ -98,7 +98,7 @@ define i16 @test_mul_by_6(i16 %x) {
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_6:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: addl %edi, %edi
; X64-NEXT: leal (%rdi,%rdi,2), %eax
@@ -110,7 +110,7 @@ define i16 @test_mul_by_6(i16 %x) {
define i16 @test_mul_by_7(i16 %x) {
; X86-LABEL: test_mul_by_7:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: leal (,%ecx,8), %eax
; X86-NEXT: subl %ecx, %eax
@@ -118,7 +118,7 @@ define i16 @test_mul_by_7(i16 %x) {
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_7:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (,%rdi,8), %eax
; X64-NEXT: subl %edi, %eax
@@ -130,14 +130,14 @@ define i16 @test_mul_by_7(i16 %x) {
define i16 @test_mul_by_8(i16 %x) {
; X86-LABEL: test_mul_by_8:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: shll $3, %eax
; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (,%rdi,8), %eax
; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -148,14 +148,14 @@ define i16 @test_mul_by_8(i16 %x) {
define i16 @test_mul_by_9(i16 %x) {
; X86-LABEL: test_mul_by_9:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,8), %eax
; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_9:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (%rdi,%rdi,8), %eax
; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -166,7 +166,7 @@ define i16 @test_mul_by_9(i16 %x) {
define i16 @test_mul_by_10(i16 %x) {
; X86-LABEL: test_mul_by_10:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: addl %eax, %eax
; X86-NEXT: leal (%eax,%eax,4), %eax
@@ -174,7 +174,7 @@ define i16 @test_mul_by_10(i16 %x) {
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_10:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: addl %edi, %edi
; X64-NEXT: leal (%rdi,%rdi,4), %eax
@@ -186,7 +186,7 @@ define i16 @test_mul_by_10(i16 %x) {
define i16 @test_mul_by_11(i16 %x) {
; X86-LABEL: test_mul_by_11:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,4), %ecx
; X86-NEXT: leal (%eax,%ecx,2), %eax
@@ -194,7 +194,7 @@ define i16 @test_mul_by_11(i16 %x) {
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_11:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (%rdi,%rdi,4), %eax
; X64-NEXT: leal (%rdi,%rax,2), %eax
@@ -206,7 +206,7 @@ define i16 @test_mul_by_11(i16 %x) {
define i16 @test_mul_by_12(i16 %x) {
; X86-LABEL: test_mul_by_12:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: shll $2, %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
@@ -214,7 +214,7 @@ define i16 @test_mul_by_12(i16 %x) {
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_12:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: shll $2, %edi
; X64-NEXT: leal (%rdi,%rdi,2), %eax
@@ -226,7 +226,7 @@ define i16 @test_mul_by_12(i16 %x) {
define i16 @test_mul_by_13(i16 %x) {
; X86-LABEL: test_mul_by_13:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,2), %ecx
; X86-NEXT: leal (%eax,%ecx,4), %eax
@@ -234,7 +234,7 @@ define i16 @test_mul_by_13(i16 %x) {
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_13:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (%rdi,%rdi,2), %eax
; X64-NEXT: leal (%rdi,%rax,4), %eax
@@ -246,7 +246,7 @@ define i16 @test_mul_by_13(i16 %x) {
define i16 @test_mul_by_14(i16 %x) {
; X86-LABEL: test_mul_by_14:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: leal (%ecx,%ecx,2), %eax
; X86-NEXT: leal (%ecx,%eax,4), %eax
@@ -255,7 +255,7 @@ define i16 @test_mul_by_14(i16 %x) {
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_14:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (%rdi,%rdi,2), %eax
; X64-NEXT: leal (%rdi,%rax,4), %eax
@@ -268,7 +268,7 @@ define i16 @test_mul_by_14(i16 %x) {
define i16 @test_mul_by_15(i16 %x) {
; X86-LABEL: test_mul_by_15:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,4), %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
@@ -276,7 +276,7 @@ define i16 @test_mul_by_15(i16 %x) {
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_15:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (%rdi,%rdi,4), %eax
; X64-NEXT: leal (%rax,%rax,2), %eax
@@ -288,14 +288,14 @@ define i16 @test_mul_by_15(i16 %x) {
define i16 @test_mul_by_16(i16 %x) {
; X86-LABEL: test_mul_by_16:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: shll $4, %eax
; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: shll $4, %edi
; X64-NEXT: movl %edi, %eax
; X64-NEXT: retq
@@ -305,7 +305,7 @@ define i16 @test_mul_by_16(i16 %x) {
define i16 @test_mul_by_17(i16 %x) {
; X86-LABEL: test_mul_by_17:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, %eax
; X86-NEXT: shll $4, %eax
@@ -314,7 +314,7 @@ define i16 @test_mul_by_17(i16 %x) {
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_17:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: movl %edi, %eax
; X64-NEXT: shll $4, %eax
@@ -327,7 +327,7 @@ define i16 @test_mul_by_17(i16 %x) {
define i16 @test_mul_by_18(i16 %x) {
; X86-LABEL: test_mul_by_18:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: addl %eax, %eax
; X86-NEXT: leal (%eax,%eax,8), %eax
@@ -335,7 +335,7 @@ define i16 @test_mul_by_18(i16 %x) {
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_18:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: addl %edi, %edi
; X64-NEXT: leal (%rdi,%rdi,8), %eax
@@ -347,7 +347,7 @@ define i16 @test_mul_by_18(i16 %x) {
define i16 @test_mul_by_19(i16 %x) {
; X86-LABEL: test_mul_by_19:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: leal (%ecx,%ecx,4), %eax
; X86-NEXT: shll $2, %eax
@@ -356,7 +356,7 @@ define i16 @test_mul_by_19(i16 %x) {
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_19:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (%rdi,%rdi,4), %eax
; X64-NEXT: shll $2, %eax
@@ -369,7 +369,7 @@ define i16 @test_mul_by_19(i16 %x) {
define i16 @test_mul_by_20(i16 %x) {
; X86-LABEL: test_mul_by_20:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: shll $2, %eax
; X86-NEXT: leal (%eax,%eax,4), %eax
@@ -377,7 +377,7 @@ define i16 @test_mul_by_20(i16 %x) {
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_20:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: shll $2, %edi
; X64-NEXT: leal (%rdi,%rdi,4), %eax
@@ -389,7 +389,7 @@ define i16 @test_mul_by_20(i16 %x) {
define i16 @test_mul_by_21(i16 %x) {
; X86-LABEL: test_mul_by_21:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,4), %ecx
; X86-NEXT: leal (%eax,%ecx,4), %eax
@@ -397,7 +397,7 @@ define i16 @test_mul_by_21(i16 %x) {
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_21:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (%rdi,%rdi,4), %eax
; X64-NEXT: leal (%rdi,%rax,4), %eax
@@ -409,7 +409,7 @@ define i16 @test_mul_by_21(i16 %x) {
define i16 @test_mul_by_22(i16 %x) {
; X86-LABEL: test_mul_by_22:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: leal (%ecx,%ecx,4), %eax
; X86-NEXT: leal (%ecx,%eax,4), %eax
@@ -418,7 +418,7 @@ define i16 @test_mul_by_22(i16 %x) {
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_22:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (%rdi,%rdi,4), %eax
; X64-NEXT: leal (%rdi,%rax,4), %eax
@@ -431,7 +431,7 @@ define i16 @test_mul_by_22(i16 %x) {
define i16 @test_mul_by_23(i16 %x) {
; X86-LABEL: test_mul_by_23:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: leal (%ecx,%ecx,2), %eax
; X86-NEXT: shll $3, %eax
@@ -440,7 +440,7 @@ define i16 @test_mul_by_23(i16 %x) {
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_23:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (%rdi,%rdi,2), %eax
; X64-NEXT: shll $3, %eax
@@ -453,7 +453,7 @@ define i16 @test_mul_by_23(i16 %x) {
define i16 @test_mul_by_24(i16 %x) {
; X86-LABEL: test_mul_by_24:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: shll $3, %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
@@ -461,7 +461,7 @@ define i16 @test_mul_by_24(i16 %x) {
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_24:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: shll $3, %edi
; X64-NEXT: leal (%rdi,%rdi,2), %eax
@@ -473,7 +473,7 @@ define i16 @test_mul_by_24(i16 %x) {
define i16 @test_mul_by_25(i16 %x) {
; X86-LABEL: test_mul_by_25:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,4), %eax
; X86-NEXT: leal (%eax,%eax,4), %eax
@@ -481,7 +481,7 @@ define i16 @test_mul_by_25(i16 %x) {
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_25:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (%rdi,%rdi,4), %eax
; X64-NEXT: leal (%rax,%rax,4), %eax
@@ -493,7 +493,7 @@ define i16 @test_mul_by_25(i16 %x) {
define i16 @test_mul_by_26(i16 %x) {
; X86-LABEL: test_mul_by_26:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: leal (%ecx,%ecx,8), %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
@@ -502,7 +502,7 @@ define i16 @test_mul_by_26(i16 %x) {
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_26:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (%rdi,%rdi,8), %eax
; X64-NEXT: leal (%rax,%rax,2), %eax
@@ -515,7 +515,7 @@ define i16 @test_mul_by_26(i16 %x) {
define i16 @test_mul_by_27(i16 %x) {
; X86-LABEL: test_mul_by_27:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,8), %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
@@ -523,7 +523,7 @@ define i16 @test_mul_by_27(i16 %x) {
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_27:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (%rdi,%rdi,8), %eax
; X64-NEXT: leal (%rax,%rax,2), %eax
@@ -535,7 +535,7 @@ define i16 @test_mul_by_27(i16 %x) {
define i16 @test_mul_by_28(i16 %x) {
; X86-LABEL: test_mul_by_28:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: leal (%ecx,%ecx,8), %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
@@ -544,7 +544,7 @@ define i16 @test_mul_by_28(i16 %x) {
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_28:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (%rdi,%rdi,8), %eax
; X64-NEXT: leal (%rax,%rax,2), %eax
@@ -557,7 +557,7 @@ define i16 @test_mul_by_28(i16 %x) {
define i16 @test_mul_by_29(i16 %x) {
; X86-LABEL: test_mul_by_29:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: leal (%ecx,%ecx,8), %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
@@ -567,7 +567,7 @@ define i16 @test_mul_by_29(i16 %x) {
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_29:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (%rdi,%rdi,8), %eax
; X64-NEXT: leal (%rax,%rax,2), %eax
@@ -581,7 +581,7 @@ define i16 @test_mul_by_29(i16 %x) {
define i16 @test_mul_by_30(i16 %x) {
; X86-LABEL: test_mul_by_30:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, %eax
; X86-NEXT: shll $5, %eax
@@ -591,7 +591,7 @@ define i16 @test_mul_by_30(i16 %x) {
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_30:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: shll $5, %eax
; X64-NEXT: subl %edi, %eax
@@ -604,7 +604,7 @@ define i16 @test_mul_by_30(i16 %x) {
define i16 @test_mul_by_31(i16 %x) {
; X86-LABEL: test_mul_by_31:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, %eax
; X86-NEXT: shll $5, %eax
@@ -613,7 +613,7 @@ define i16 @test_mul_by_31(i16 %x) {
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_31:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: shll $5, %eax
; X64-NEXT: subl %edi, %eax
@@ -625,14 +625,14 @@ define i16 @test_mul_by_31(i16 %x) {
define i16 @test_mul_by_32(i16 %x) {
; X86-LABEL: test_mul_by_32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: shll $5, %eax
; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: shll $5, %edi
; X64-NEXT: movl %edi, %eax
; X64-NEXT: retq
@@ -643,7 +643,7 @@ define i16 @test_mul_by_32(i16 %x) {
; (x*9+42)*(x*5+2)
define i16 @test_mul_spec(i16 %x) nounwind {
; X86-LABEL: test_mul_spec:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal 42(%eax,%eax,8), %ecx
; X86-NEXT: leal 2(%eax,%eax,4), %eax
@@ -652,7 +652,7 @@ define i16 @test_mul_spec(i16 %x) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: test_mul_spec:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal 42(%rdi,%rdi,8), %ecx
; X64-NEXT: leal 2(%rdi,%rdi,4), %eax
diff --git a/test/CodeGen/X86/mul-constant-i32.ll b/test/CodeGen/X86/mul-constant-i32.ll
index 228dd5e5f37..f862b175351 100644
--- a/test/CodeGen/X86/mul-constant-i32.ll
+++ b/test/CodeGen/X86/mul-constant-i32.ll
@@ -10,42 +10,42 @@
define i32 @test_mul_by_1(i32 %x) {
; X86-LABEL: test_mul_by_1:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_1:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: movl %edi, %eax # sched: [1:0.25]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_1:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: movl %edi, %eax # sched: [1:0.17]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_1:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_1:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: movl %edi, %eax # sched: [1:0.25]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_1:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: movl %edi, %eax # sched: [1:0.17]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_1:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: movl %edi, %eax # sched: [1:0.50]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_1:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: movl %edi, %eax # sched: [1:0.50]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i32 %x, 1
@@ -54,49 +54,49 @@ define i32 @test_mul_by_1(i32 %x) {
define i32 @test_mul_by_2(i32 %x) {
; X86-LABEL: test_mul_by_2:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: addl %eax, %eax
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_2:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: leal (%rdi,%rdi), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_2:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: leal (%rdi,%rdi), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_2:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NOOPT-NEXT: addl %eax, %eax
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_2:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; HSW-NOOPT-NEXT: leal (%rdi,%rdi), %eax # sched: [1:0.50]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_2:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; JAG-NOOPT-NEXT: leal (%rdi,%rdi), %eax # sched: [1:0.50]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_2:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-SLM-NEXT: leal (%rdi,%rdi), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_2:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SLM-NOOPT-NEXT: leal (%rdi,%rdi), %eax # sched: [1:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
@@ -106,47 +106,47 @@ define i32 @test_mul_by_2(i32 %x) {
define i32 @test_mul_by_3(i32 %x) {
; X86-LABEL: test_mul_by_3:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: imull $3, {{[0-9]+}}(%esp), %eax
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_3:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_3:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_3:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: imull $3, {{[0-9]+}}(%esp), %eax
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_3:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; HSW-NOOPT-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_3:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; JAG-NOOPT-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_3:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-SLM-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_3:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SLM-NOOPT-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
@@ -156,49 +156,49 @@ define i32 @test_mul_by_3(i32 %x) {
define i32 @test_mul_by_4(i32 %x) {
; X86-LABEL: test_mul_by_4:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: shll $2, %eax
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_4:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: leal (,%rdi,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_4:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: leal (,%rdi,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_4:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NOOPT-NEXT: shll $2, %eax
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_4:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; HSW-NOOPT-NEXT: leal (,%rdi,4), %eax # sched: [1:0.50]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_4:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; JAG-NOOPT-NEXT: leal (,%rdi,4), %eax # sched: [1:0.50]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_4:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-SLM-NEXT: leal (,%rdi,4), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_4:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SLM-NOOPT-NEXT: leal (,%rdi,4), %eax # sched: [1:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
@@ -208,47 +208,47 @@ define i32 @test_mul_by_4(i32 %x) {
define i32 @test_mul_by_5(i32 %x) {
; X86-LABEL: test_mul_by_5:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: imull $5, {{[0-9]+}}(%esp), %eax
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_5:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_5:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_5:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: imull $5, {{[0-9]+}}(%esp), %eax
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_5:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; HSW-NOOPT-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_5:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; JAG-NOOPT-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_5:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-SLM-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_5:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SLM-NOOPT-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
@@ -258,50 +258,50 @@ define i32 @test_mul_by_5(i32 %x) {
define i32 @test_mul_by_6(i32 %x) {
; X86-LABEL: test_mul_by_6:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: addl %eax, %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_6:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: addl %edi, %edi # sched: [1:0.25]
; X64-HSW-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_6:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: addl %edi, %edi # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_6:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: imull $6, {{[0-9]+}}(%esp), %eax
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_6:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imull $6, %edi, %eax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_6:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imull $6, %edi, %eax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_6:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-SLM-NEXT: addl %edi, %edi # sched: [1:0.50]
; X64-SLM-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_6:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imull $6, %edi, %eax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i32 %x, 6
@@ -310,50 +310,50 @@ define i32 @test_mul_by_6(i32 %x) {
define i32 @test_mul_by_7(i32 %x) {
; X86-LABEL: test_mul_by_7:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: leal (,%ecx,8), %eax
; X86-NEXT: subl %ecx, %eax
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_7:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: leal (,%rdi,8), %eax # sched: [1:0.50]
; X64-HSW-NEXT: subl %edi, %eax # sched: [1:0.25]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_7:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: leal (,%rdi,8), %eax # sched: [1:0.50]
; X64-JAG-NEXT: subl %edi, %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_7:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: imull $7, {{[0-9]+}}(%esp), %eax
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_7:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imull $7, %edi, %eax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_7:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imull $7, %edi, %eax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_7:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-SLM-NEXT: leal (,%rdi,8), %eax # sched: [1:1.00]
; X64-SLM-NEXT: subl %edi, %eax # sched: [1:0.50]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_7:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imull $7, %edi, %eax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i32 %x, 7
@@ -362,49 +362,49 @@ define i32 @test_mul_by_7(i32 %x) {
define i32 @test_mul_by_8(i32 %x) {
; X86-LABEL: test_mul_by_8:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: shll $3, %eax
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_8:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: leal (,%rdi,8), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_8:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: leal (,%rdi,8), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_8:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NOOPT-NEXT: shll $3, %eax
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_8:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; HSW-NOOPT-NEXT: leal (,%rdi,8), %eax # sched: [1:0.50]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_8:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; JAG-NOOPT-NEXT: leal (,%rdi,8), %eax # sched: [1:0.50]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_8:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-SLM-NEXT: leal (,%rdi,8), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_8:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SLM-NOOPT-NEXT: leal (,%rdi,8), %eax # sched: [1:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
@@ -414,47 +414,47 @@ define i32 @test_mul_by_8(i32 %x) {
define i32 @test_mul_by_9(i32 %x) {
; X86-LABEL: test_mul_by_9:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: imull $9, {{[0-9]+}}(%esp), %eax
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_9:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_9:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_9:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: imull $9, {{[0-9]+}}(%esp), %eax
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_9:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; HSW-NOOPT-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_9:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; JAG-NOOPT-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_9:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-SLM-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_9:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SLM-NOOPT-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
@@ -464,50 +464,50 @@ define i32 @test_mul_by_9(i32 %x) {
define i32 @test_mul_by_10(i32 %x) {
; X86-LABEL: test_mul_by_10:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: addl %eax, %eax
; X86-NEXT: leal (%eax,%eax,4), %eax
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_10:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: addl %edi, %edi # sched: [1:0.25]
; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_10:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: addl %edi, %edi # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_10:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: imull $10, {{[0-9]+}}(%esp), %eax
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_10:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imull $10, %edi, %eax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_10:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imull $10, %edi, %eax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_10:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-SLM-NEXT: addl %edi, %edi # sched: [1:0.50]
; X64-SLM-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_10:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imull $10, %edi, %eax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i32 %x, 10
@@ -516,48 +516,48 @@ define i32 @test_mul_by_10(i32 %x) {
define i32 @test_mul_by_11(i32 %x) {
; X86-LABEL: test_mul_by_11:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,4), %ecx
; X86-NEXT: leal (%eax,%ecx,2), %eax
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_11:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rdi,%rax,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_11:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rdi,%rax,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_11:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: imull $11, {{[0-9]+}}(%esp), %eax
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_11:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imull $11, %edi, %eax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_11:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imull $11, %edi, %eax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_11:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: imull $11, %edi, %eax # sched: [3:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_11:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imull $11, %edi, %eax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i32 %x, 11
@@ -566,50 +566,50 @@ define i32 @test_mul_by_11(i32 %x) {
define i32 @test_mul_by_12(i32 %x) {
; X86-LABEL: test_mul_by_12:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: shll $2, %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_12:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: shll $2, %edi # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_12:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: shll $2, %edi # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_12:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: imull $12, {{[0-9]+}}(%esp), %eax
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_12:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imull $12, %edi, %eax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_12:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imull $12, %edi, %eax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_12:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-SLM-NEXT: shll $2, %edi # sched: [1:1.00]
; X64-SLM-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_12:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imull $12, %edi, %eax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i32 %x, 12
@@ -618,48 +618,48 @@ define i32 @test_mul_by_12(i32 %x) {
define i32 @test_mul_by_13(i32 %x) {
; X86-LABEL: test_mul_by_13:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,2), %ecx
; X86-NEXT: leal (%eax,%ecx,4), %eax
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_13:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rdi,%rax,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_13:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rdi,%rax,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_13:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: imull $13, {{[0-9]+}}(%esp), %eax
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_13:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imull $13, %edi, %eax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_13:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imull $13, %edi, %eax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_13:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: imull $13, %edi, %eax # sched: [3:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_13:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imull $13, %edi, %eax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i32 %x, 13
@@ -668,7 +668,7 @@ define i32 @test_mul_by_13(i32 %x) {
define i32 @test_mul_by_14(i32 %x) {
; X86-LABEL: test_mul_by_14:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: leal (%ecx,%ecx,2), %eax
; X86-NEXT: leal (%ecx,%eax,4), %eax
@@ -676,7 +676,7 @@ define i32 @test_mul_by_14(i32 %x) {
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_14:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rdi,%rax,4), %eax # sched: [1:0.50]
@@ -684,7 +684,7 @@ define i32 @test_mul_by_14(i32 %x) {
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_14:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rdi,%rax,4), %eax # sched: [1:0.50]
@@ -692,27 +692,27 @@ define i32 @test_mul_by_14(i32 %x) {
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_14:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: imull $14, {{[0-9]+}}(%esp), %eax
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_14:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imull $14, %edi, %eax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_14:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imull $14, %edi, %eax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_14:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: imull $14, %edi, %eax # sched: [3:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_14:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imull $14, %edi, %eax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i32 %x, 14
@@ -721,50 +721,50 @@ define i32 @test_mul_by_14(i32 %x) {
define i32 @test_mul_by_15(i32 %x) {
; X86-LABEL: test_mul_by_15:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,4), %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_15:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_15:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_15:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: imull $15, {{[0-9]+}}(%esp), %eax
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_15:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imull $15, %edi, %eax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_15:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imull $15, %edi, %eax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_15:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-SLM-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:1.00]
; X64-SLM-NEXT: leal (%rax,%rax,2), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_15:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imull $15, %edi, %eax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i32 %x, 15
@@ -773,49 +773,49 @@ define i32 @test_mul_by_15(i32 %x) {
define i32 @test_mul_by_16(i32 %x) {
; X86-LABEL: test_mul_by_16:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: shll $4, %eax
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_16:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: shll $4, %edi # sched: [1:0.50]
; X64-HSW-NEXT: movl %edi, %eax # sched: [1:0.25]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_16:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: shll $4, %edi # sched: [1:0.50]
; X64-JAG-NEXT: movl %edi, %eax # sched: [1:0.17]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_16:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NOOPT-NEXT: shll $4, %eax
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_16:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: shll $4, %edi # sched: [1:0.50]
; HSW-NOOPT-NEXT: movl %edi, %eax # sched: [1:0.25]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_16:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: shll $4, %edi # sched: [1:0.50]
; JAG-NOOPT-NEXT: movl %edi, %eax # sched: [1:0.17]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_16:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: shll $4, %edi # sched: [1:1.00]
; X64-SLM-NEXT: movl %edi, %eax # sched: [1:0.50]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_16:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: shll $4, %edi # sched: [1:1.00]
; SLM-NOOPT-NEXT: movl %edi, %eax # sched: [1:0.50]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
@@ -825,7 +825,7 @@ define i32 @test_mul_by_16(i32 %x) {
define i32 @test_mul_by_17(i32 %x) {
; X86-LABEL: test_mul_by_17:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, %eax
; X86-NEXT: shll $4, %eax
@@ -833,7 +833,7 @@ define i32 @test_mul_by_17(i32 %x) {
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_17:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: movl %edi, %eax # sched: [1:0.25]
; X64-HSW-NEXT: shll $4, %eax # sched: [1:0.50]
@@ -841,7 +841,7 @@ define i32 @test_mul_by_17(i32 %x) {
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_17:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: movl %edi, %eax # sched: [1:0.17]
; X64-JAG-NEXT: shll $4, %eax # sched: [1:0.50]
@@ -849,22 +849,22 @@ define i32 @test_mul_by_17(i32 %x) {
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_17:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: imull $17, {{[0-9]+}}(%esp), %eax
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_17:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imull $17, %edi, %eax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_17:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imull $17, %edi, %eax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_17:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-SLM-NEXT: movl %edi, %eax # sched: [1:0.50]
; X64-SLM-NEXT: shll $4, %eax # sched: [1:1.00]
@@ -872,7 +872,7 @@ define i32 @test_mul_by_17(i32 %x) {
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_17:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imull $17, %edi, %eax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i32 %x, 17
@@ -881,50 +881,50 @@ define i32 @test_mul_by_17(i32 %x) {
define i32 @test_mul_by_18(i32 %x) {
; X86-LABEL: test_mul_by_18:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: addl %eax, %eax
; X86-NEXT: leal (%eax,%eax,8), %eax
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_18:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: addl %edi, %edi # sched: [1:0.25]
; X64-HSW-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_18:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: addl %edi, %edi # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_18:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: imull $18, {{[0-9]+}}(%esp), %eax
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_18:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imull $18, %edi, %eax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_18:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imull $18, %edi, %eax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_18:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-SLM-NEXT: addl %edi, %edi # sched: [1:0.50]
; X64-SLM-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_18:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imull $18, %edi, %eax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i32 %x, 18
@@ -933,7 +933,7 @@ define i32 @test_mul_by_18(i32 %x) {
define i32 @test_mul_by_19(i32 %x) {
; X86-LABEL: test_mul_by_19:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: leal (%ecx,%ecx,4), %eax
; X86-NEXT: shll $2, %eax
@@ -941,7 +941,7 @@ define i32 @test_mul_by_19(i32 %x) {
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_19:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: shll $2, %eax # sched: [1:0.50]
@@ -949,7 +949,7 @@ define i32 @test_mul_by_19(i32 %x) {
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_19:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: shll $2, %eax # sched: [1:0.50]
@@ -957,27 +957,27 @@ define i32 @test_mul_by_19(i32 %x) {
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_19:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: imull $19, {{[0-9]+}}(%esp), %eax
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_19:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imull $19, %edi, %eax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_19:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imull $19, %edi, %eax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_19:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: imull $19, %edi, %eax # sched: [3:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_19:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imull $19, %edi, %eax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i32 %x, 19
@@ -986,50 +986,50 @@ define i32 @test_mul_by_19(i32 %x) {
define i32 @test_mul_by_20(i32 %x) {
; X86-LABEL: test_mul_by_20:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: shll $2, %eax
; X86-NEXT: leal (%eax,%eax,4), %eax
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_20:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: shll $2, %edi # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_20:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: shll $2, %edi # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_20:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: imull $20, {{[0-9]+}}(%esp), %eax
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_20:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imull $20, %edi, %eax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_20:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imull $20, %edi, %eax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_20:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-SLM-NEXT: shll $2, %edi # sched: [1:1.00]
; X64-SLM-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_20:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imull $20, %edi, %eax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i32 %x, 20
@@ -1038,48 +1038,48 @@ define i32 @test_mul_by_20(i32 %x) {
define i32 @test_mul_by_21(i32 %x) {
; X86-LABEL: test_mul_by_21:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,4), %ecx
; X86-NEXT: leal (%eax,%ecx,4), %eax
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_21:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rdi,%rax,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_21:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rdi,%rax,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_21:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: imull $21, {{[0-9]+}}(%esp), %eax
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_21:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imull $21, %edi, %eax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_21:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imull $21, %edi, %eax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_21:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: imull $21, %edi, %eax # sched: [3:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_21:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imull $21, %edi, %eax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i32 %x, 21
@@ -1088,7 +1088,7 @@ define i32 @test_mul_by_21(i32 %x) {
define i32 @test_mul_by_22(i32 %x) {
; X86-LABEL: test_mul_by_22:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: leal (%ecx,%ecx,4), %eax
; X86-NEXT: leal (%ecx,%eax,4), %eax
@@ -1096,7 +1096,7 @@ define i32 @test_mul_by_22(i32 %x) {
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_22:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rdi,%rax,4), %eax # sched: [1:0.50]
@@ -1104,7 +1104,7 @@ define i32 @test_mul_by_22(i32 %x) {
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_22:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rdi,%rax,4), %eax # sched: [1:0.50]
@@ -1112,27 +1112,27 @@ define i32 @test_mul_by_22(i32 %x) {
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_22:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: imull $22, {{[0-9]+}}(%esp), %eax
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_22:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imull $22, %edi, %eax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_22:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imull $22, %edi, %eax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_22:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: imull $22, %edi, %eax # sched: [3:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_22:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imull $22, %edi, %eax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i32 %x, 22
@@ -1141,7 +1141,7 @@ define i32 @test_mul_by_22(i32 %x) {
define i32 @test_mul_by_23(i32 %x) {
; X86-LABEL: test_mul_by_23:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: leal (%ecx,%ecx,2), %eax
; X86-NEXT: shll $3, %eax
@@ -1149,7 +1149,7 @@ define i32 @test_mul_by_23(i32 %x) {
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_23:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: shll $3, %eax # sched: [1:0.50]
@@ -1157,7 +1157,7 @@ define i32 @test_mul_by_23(i32 %x) {
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_23:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: shll $3, %eax # sched: [1:0.50]
@@ -1165,27 +1165,27 @@ define i32 @test_mul_by_23(i32 %x) {
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_23:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: imull $23, {{[0-9]+}}(%esp), %eax
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_23:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imull $23, %edi, %eax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_23:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imull $23, %edi, %eax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_23:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: imull $23, %edi, %eax # sched: [3:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_23:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imull $23, %edi, %eax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i32 %x, 23
@@ -1194,50 +1194,50 @@ define i32 @test_mul_by_23(i32 %x) {
define i32 @test_mul_by_24(i32 %x) {
; X86-LABEL: test_mul_by_24:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: shll $3, %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_24:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: shll $3, %edi # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_24:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: shll $3, %edi # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_24:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: imull $24, {{[0-9]+}}(%esp), %eax
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_24:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imull $24, %edi, %eax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_24:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imull $24, %edi, %eax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_24:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-SLM-NEXT: shll $3, %edi # sched: [1:1.00]
; X64-SLM-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_24:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imull $24, %edi, %eax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i32 %x, 24
@@ -1246,50 +1246,50 @@ define i32 @test_mul_by_24(i32 %x) {
define i32 @test_mul_by_25(i32 %x) {
; X86-LABEL: test_mul_by_25:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,4), %eax
; X86-NEXT: leal (%eax,%eax,4), %eax
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_25:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rax,%rax,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_25:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rax,%rax,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_25:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: imull $25, {{[0-9]+}}(%esp), %eax
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_25:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imull $25, %edi, %eax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_25:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imull $25, %edi, %eax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_25:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-SLM-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:1.00]
; X64-SLM-NEXT: leal (%rax,%rax,4), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_25:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imull $25, %edi, %eax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i32 %x, 25
@@ -1298,7 +1298,7 @@ define i32 @test_mul_by_25(i32 %x) {
define i32 @test_mul_by_26(i32 %x) {
; X86-LABEL: test_mul_by_26:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: leal (%ecx,%ecx,8), %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
@@ -1306,7 +1306,7 @@ define i32 @test_mul_by_26(i32 %x) {
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_26:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50]
@@ -1314,7 +1314,7 @@ define i32 @test_mul_by_26(i32 %x) {
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_26:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50]
@@ -1322,27 +1322,27 @@ define i32 @test_mul_by_26(i32 %x) {
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_26:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: imull $26, {{[0-9]+}}(%esp), %eax
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_26:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imull $26, %edi, %eax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_26:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imull $26, %edi, %eax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_26:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: imull $26, %edi, %eax # sched: [3:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_26:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imull $26, %edi, %eax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i32 %x, 26
@@ -1351,50 +1351,50 @@ define i32 @test_mul_by_26(i32 %x) {
define i32 @test_mul_by_27(i32 %x) {
; X86-LABEL: test_mul_by_27:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,8), %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_27:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_27:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_27:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: imull $27, {{[0-9]+}}(%esp), %eax
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_27:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imull $27, %edi, %eax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_27:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imull $27, %edi, %eax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_27:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-SLM-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:1.00]
; X64-SLM-NEXT: leal (%rax,%rax,2), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_27:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imull $27, %edi, %eax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i32 %x, 27
@@ -1403,7 +1403,7 @@ define i32 @test_mul_by_27(i32 %x) {
define i32 @test_mul_by_28(i32 %x) {
; X86-LABEL: test_mul_by_28:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: leal (%ecx,%ecx,8), %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
@@ -1411,7 +1411,7 @@ define i32 @test_mul_by_28(i32 %x) {
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_28:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50]
@@ -1419,7 +1419,7 @@ define i32 @test_mul_by_28(i32 %x) {
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_28:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50]
@@ -1427,27 +1427,27 @@ define i32 @test_mul_by_28(i32 %x) {
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_28:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: imull $28, {{[0-9]+}}(%esp), %eax
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_28:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imull $28, %edi, %eax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_28:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imull $28, %edi, %eax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_28:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: imull $28, %edi, %eax # sched: [3:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_28:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imull $28, %edi, %eax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i32 %x, 28
@@ -1456,7 +1456,7 @@ define i32 @test_mul_by_28(i32 %x) {
define i32 @test_mul_by_29(i32 %x) {
; X86-LABEL: test_mul_by_29:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: leal (%ecx,%ecx,8), %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
@@ -1465,7 +1465,7 @@ define i32 @test_mul_by_29(i32 %x) {
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_29:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50]
@@ -1474,7 +1474,7 @@ define i32 @test_mul_by_29(i32 %x) {
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_29:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50]
@@ -1483,27 +1483,27 @@ define i32 @test_mul_by_29(i32 %x) {
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_29:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: imull $29, {{[0-9]+}}(%esp), %eax
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_29:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imull $29, %edi, %eax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_29:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imull $29, %edi, %eax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_29:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: imull $29, %edi, %eax # sched: [3:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_29:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imull $29, %edi, %eax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i32 %x, 29
@@ -1512,7 +1512,7 @@ define i32 @test_mul_by_29(i32 %x) {
define i32 @test_mul_by_30(i32 %x) {
; X86-LABEL: test_mul_by_30:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, %eax
; X86-NEXT: shll $5, %eax
@@ -1521,7 +1521,7 @@ define i32 @test_mul_by_30(i32 %x) {
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_30:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: movl %edi, %eax # sched: [1:0.25]
; X64-HSW-NEXT: shll $5, %eax # sched: [1:0.50]
; X64-HSW-NEXT: subl %edi, %eax # sched: [1:0.25]
@@ -1529,7 +1529,7 @@ define i32 @test_mul_by_30(i32 %x) {
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_30:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: movl %edi, %eax # sched: [1:0.17]
; X64-JAG-NEXT: shll $5, %eax # sched: [1:0.50]
; X64-JAG-NEXT: subl %edi, %eax # sched: [1:0.50]
@@ -1537,27 +1537,27 @@ define i32 @test_mul_by_30(i32 %x) {
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_30:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: imull $30, {{[0-9]+}}(%esp), %eax
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_30:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imull $30, %edi, %eax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_30:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imull $30, %edi, %eax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_30:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: imull $30, %edi, %eax # sched: [3:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_30:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imull $30, %edi, %eax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i32 %x, 30
@@ -1566,7 +1566,7 @@ define i32 @test_mul_by_30(i32 %x) {
define i32 @test_mul_by_31(i32 %x) {
; X86-LABEL: test_mul_by_31:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, %eax
; X86-NEXT: shll $5, %eax
@@ -1574,43 +1574,43 @@ define i32 @test_mul_by_31(i32 %x) {
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_31:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: movl %edi, %eax # sched: [1:0.25]
; X64-HSW-NEXT: shll $5, %eax # sched: [1:0.50]
; X64-HSW-NEXT: subl %edi, %eax # sched: [1:0.25]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_31:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: movl %edi, %eax # sched: [1:0.17]
; X64-JAG-NEXT: shll $5, %eax # sched: [1:0.50]
; X64-JAG-NEXT: subl %edi, %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_31:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: imull $31, {{[0-9]+}}(%esp), %eax
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_31:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imull $31, %edi, %eax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_31:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imull $31, %edi, %eax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_31:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: movl %edi, %eax # sched: [1:0.50]
; X64-SLM-NEXT: shll $5, %eax # sched: [1:1.00]
; X64-SLM-NEXT: subl %edi, %eax # sched: [1:0.50]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_31:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imull $31, %edi, %eax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i32 %x, 31
@@ -1619,49 +1619,49 @@ define i32 @test_mul_by_31(i32 %x) {
define i32 @test_mul_by_32(i32 %x) {
; X86-LABEL: test_mul_by_32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: shll $5, %eax
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_32:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: shll $5, %edi # sched: [1:0.50]
; X64-HSW-NEXT: movl %edi, %eax # sched: [1:0.25]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_32:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: shll $5, %edi # sched: [1:0.50]
; X64-JAG-NEXT: movl %edi, %eax # sched: [1:0.17]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_32:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NOOPT-NEXT: shll $5, %eax
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_32:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: shll $5, %edi # sched: [1:0.50]
; HSW-NOOPT-NEXT: movl %edi, %eax # sched: [1:0.25]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_32:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: shll $5, %edi # sched: [1:0.50]
; JAG-NOOPT-NEXT: movl %edi, %eax # sched: [1:0.17]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_32:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: shll $5, %edi # sched: [1:1.00]
; X64-SLM-NEXT: movl %edi, %eax # sched: [1:0.50]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_32:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: shll $5, %edi # sched: [1:1.00]
; SLM-NOOPT-NEXT: movl %edi, %eax # sched: [1:0.50]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
@@ -1672,7 +1672,7 @@ define i32 @test_mul_by_32(i32 %x) {
; (x*9+42)*(x*5+2)
define i32 @test_mul_spec(i32 %x) nounwind {
; X86-LABEL: test_mul_spec:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal 42(%eax,%eax,8), %ecx
; X86-NEXT: leal 2(%eax,%eax,4), %eax
@@ -1680,7 +1680,7 @@ define i32 @test_mul_spec(i32 %x) nounwind {
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_spec:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: leal (%rdi,%rdi,8), %ecx # sched: [1:0.50]
; X64-HSW-NEXT: addl $42, %ecx # sched: [1:0.25]
@@ -1690,7 +1690,7 @@ define i32 @test_mul_spec(i32 %x) nounwind {
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_spec:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: leal 42(%rdi,%rdi,8), %ecx # sched: [1:0.50]
; X64-JAG-NEXT: leal 2(%rdi,%rdi,4), %eax # sched: [1:0.50]
@@ -1698,7 +1698,7 @@ define i32 @test_mul_spec(i32 %x) nounwind {
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_spec:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NOOPT-NEXT: leal 42(%eax,%eax,8), %ecx
; X86-NOOPT-NEXT: leal 2(%eax,%eax,4), %eax
@@ -1706,7 +1706,7 @@ define i32 @test_mul_spec(i32 %x) nounwind {
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_spec:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; HSW-NOOPT-NEXT: leal (%rdi,%rdi,8), %ecx # sched: [1:0.50]
; HSW-NOOPT-NEXT: addl $42, %ecx # sched: [1:0.25]
@@ -1716,7 +1716,7 @@ define i32 @test_mul_spec(i32 %x) nounwind {
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_spec:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; JAG-NOOPT-NEXT: leal 42(%rdi,%rdi,8), %ecx # sched: [1:0.50]
; JAG-NOOPT-NEXT: leal 2(%rdi,%rdi,4), %eax # sched: [1:0.50]
@@ -1724,7 +1724,7 @@ define i32 @test_mul_spec(i32 %x) nounwind {
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_spec:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-SLM-NEXT: leal 42(%rdi,%rdi,8), %ecx # sched: [1:1.00]
; X64-SLM-NEXT: leal 2(%rdi,%rdi,4), %eax # sched: [1:1.00]
@@ -1732,7 +1732,7 @@ define i32 @test_mul_spec(i32 %x) nounwind {
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_spec:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SLM-NOOPT-NEXT: leal 42(%rdi,%rdi,8), %ecx # sched: [1:1.00]
; SLM-NOOPT-NEXT: leal 2(%rdi,%rdi,4), %eax # sched: [1:1.00]
diff --git a/test/CodeGen/X86/mul-constant-i64.ll b/test/CodeGen/X86/mul-constant-i64.ll
index 98568a6fc8e..e37000790e7 100644
--- a/test/CodeGen/X86/mul-constant-i64.ll
+++ b/test/CodeGen/X86/mul-constant-i64.ll
@@ -10,44 +10,44 @@
define i64 @test_mul_by_1(i64 %x) nounwind {
; X86-LABEL: test_mul_by_1:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_1:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: movq %rdi, %rax # sched: [1:0.25]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_1:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: movq %rdi, %rax # sched: [1:0.17]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_1:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NOOPT-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_1:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: movq %rdi, %rax # sched: [1:0.25]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_1:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: movq %rdi, %rax # sched: [1:0.17]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_1:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: movq %rdi, %rax # sched: [1:0.50]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_1:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: movq %rdi, %rax # sched: [1:0.50]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i64 %x, 1
@@ -56,7 +56,7 @@ define i64 @test_mul_by_1(i64 %x) nounwind {
define i64 @test_mul_by_2(i64 %x) {
; X86-LABEL: test_mul_by_2:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: shldl $1, %eax, %edx
@@ -64,17 +64,17 @@ define i64 @test_mul_by_2(i64 %x) {
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_2:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: leaq (%rdi,%rdi), %rax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_2:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: leaq (%rdi,%rdi), %rax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_2:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NOOPT-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NOOPT-NEXT: shldl $1, %eax, %edx
@@ -82,22 +82,22 @@ define i64 @test_mul_by_2(i64 %x) {
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_2:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: leaq (%rdi,%rdi), %rax # sched: [1:0.50]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_2:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: leaq (%rdi,%rdi), %rax # sched: [1:0.50]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_2:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: leaq (%rdi,%rdi), %rax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_2:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: leaq (%rdi,%rdi), %rax # sched: [1:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i64 %x, 2
@@ -106,7 +106,7 @@ define i64 @test_mul_by_2(i64 %x) {
define i64 @test_mul_by_3(i64 %x) {
; X86-LABEL: test_mul_by_3:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl $3, %eax
; X86-NEXT: mull {{[0-9]+}}(%esp)
; X86-NEXT: imull $3, {{[0-9]+}}(%esp), %ecx
@@ -114,17 +114,17 @@ define i64 @test_mul_by_3(i64 %x) {
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_3:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_3:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_3:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: movl $3, %eax
; X86-NOOPT-NEXT: mull {{[0-9]+}}(%esp)
; X86-NOOPT-NEXT: imull $3, {{[0-9]+}}(%esp), %ecx
@@ -132,22 +132,22 @@ define i64 @test_mul_by_3(i64 %x) {
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_3:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:0.50]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_3:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:0.50]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_3:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_3:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i64 %x, 3
@@ -156,7 +156,7 @@ define i64 @test_mul_by_3(i64 %x) {
define i64 @test_mul_by_4(i64 %x) {
; X86-LABEL: test_mul_by_4:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: shldl $2, %eax, %edx
@@ -164,17 +164,17 @@ define i64 @test_mul_by_4(i64 %x) {
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_4:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: leaq (,%rdi,4), %rax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_4:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: leaq (,%rdi,4), %rax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_4:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NOOPT-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NOOPT-NEXT: shldl $2, %eax, %edx
@@ -182,22 +182,22 @@ define i64 @test_mul_by_4(i64 %x) {
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_4:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: leaq (,%rdi,4), %rax # sched: [1:0.50]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_4:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: leaq (,%rdi,4), %rax # sched: [1:0.50]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_4:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: leaq (,%rdi,4), %rax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_4:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: leaq (,%rdi,4), %rax # sched: [1:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i64 %x, 4
@@ -206,7 +206,7 @@ define i64 @test_mul_by_4(i64 %x) {
define i64 @test_mul_by_5(i64 %x) {
; X86-LABEL: test_mul_by_5:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl $5, %eax
; X86-NEXT: mull {{[0-9]+}}(%esp)
; X86-NEXT: imull $5, {{[0-9]+}}(%esp), %ecx
@@ -214,17 +214,17 @@ define i64 @test_mul_by_5(i64 %x) {
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_5:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: leaq (%rdi,%rdi,4), %rax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_5:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: leaq (%rdi,%rdi,4), %rax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_5:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: movl $5, %eax
; X86-NOOPT-NEXT: mull {{[0-9]+}}(%esp)
; X86-NOOPT-NEXT: imull $5, {{[0-9]+}}(%esp), %ecx
@@ -232,22 +232,22 @@ define i64 @test_mul_by_5(i64 %x) {
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_5:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: leaq (%rdi,%rdi,4), %rax # sched: [1:0.50]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_5:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: leaq (%rdi,%rdi,4), %rax # sched: [1:0.50]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_5:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: leaq (%rdi,%rdi,4), %rax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_5:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: leaq (%rdi,%rdi,4), %rax # sched: [1:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i64 %x, 5
@@ -256,7 +256,7 @@ define i64 @test_mul_by_5(i64 %x) {
define i64 @test_mul_by_6(i64 %x) {
; X86-LABEL: test_mul_by_6:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,2), %ecx
; X86-NEXT: movl $6, %eax
@@ -265,19 +265,19 @@ define i64 @test_mul_by_6(i64 %x) {
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_6:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: addq %rdi, %rdi # sched: [1:0.25]
; X64-HSW-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_6:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: addq %rdi, %rdi # sched: [1:0.50]
; X64-JAG-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_6:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: movl $6, %eax
; X86-NOOPT-NEXT: mull {{[0-9]+}}(%esp)
; X86-NOOPT-NEXT: imull $6, {{[0-9]+}}(%esp), %ecx
@@ -285,23 +285,23 @@ define i64 @test_mul_by_6(i64 %x) {
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_6:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imulq $6, %rdi, %rax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_6:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imulq $6, %rdi, %rax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_6:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: addq %rdi, %rdi # sched: [1:0.50]
; X64-SLM-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_6:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imulq $6, %rdi, %rax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i64 %x, 6
@@ -310,7 +310,7 @@ define i64 @test_mul_by_6(i64 %x) {
define i64 @test_mul_by_7(i64 %x) {
; X86-LABEL: test_mul_by_7:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (,%eax,8), %ecx
; X86-NEXT: subl %eax, %ecx
@@ -320,19 +320,19 @@ define i64 @test_mul_by_7(i64 %x) {
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_7:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: leaq (,%rdi,8), %rax # sched: [1:0.50]
; X64-HSW-NEXT: subq %rdi, %rax # sched: [1:0.25]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_7:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: leaq (,%rdi,8), %rax # sched: [1:0.50]
; X64-JAG-NEXT: subq %rdi, %rax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_7:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: movl $7, %eax
; X86-NOOPT-NEXT: mull {{[0-9]+}}(%esp)
; X86-NOOPT-NEXT: imull $7, {{[0-9]+}}(%esp), %ecx
@@ -340,23 +340,23 @@ define i64 @test_mul_by_7(i64 %x) {
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_7:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imulq $7, %rdi, %rax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_7:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imulq $7, %rdi, %rax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_7:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: leaq (,%rdi,8), %rax # sched: [1:1.00]
; X64-SLM-NEXT: subq %rdi, %rax # sched: [1:0.50]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_7:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imulq $7, %rdi, %rax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i64 %x, 7
@@ -365,7 +365,7 @@ define i64 @test_mul_by_7(i64 %x) {
define i64 @test_mul_by_8(i64 %x) {
; X86-LABEL: test_mul_by_8:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: shldl $3, %eax, %edx
@@ -373,17 +373,17 @@ define i64 @test_mul_by_8(i64 %x) {
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_8:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: leaq (,%rdi,8), %rax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_8:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: leaq (,%rdi,8), %rax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_8:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NOOPT-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NOOPT-NEXT: shldl $3, %eax, %edx
@@ -391,22 +391,22 @@ define i64 @test_mul_by_8(i64 %x) {
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_8:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: leaq (,%rdi,8), %rax # sched: [1:0.50]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_8:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: leaq (,%rdi,8), %rax # sched: [1:0.50]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_8:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: leaq (,%rdi,8), %rax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_8:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: leaq (,%rdi,8), %rax # sched: [1:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i64 %x, 8
@@ -415,7 +415,7 @@ define i64 @test_mul_by_8(i64 %x) {
define i64 @test_mul_by_9(i64 %x) {
; X86-LABEL: test_mul_by_9:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl $9, %eax
; X86-NEXT: mull {{[0-9]+}}(%esp)
; X86-NEXT: imull $9, {{[0-9]+}}(%esp), %ecx
@@ -423,17 +423,17 @@ define i64 @test_mul_by_9(i64 %x) {
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_9:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: leaq (%rdi,%rdi,8), %rax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_9:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: leaq (%rdi,%rdi,8), %rax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_9:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: movl $9, %eax
; X86-NOOPT-NEXT: mull {{[0-9]+}}(%esp)
; X86-NOOPT-NEXT: imull $9, {{[0-9]+}}(%esp), %ecx
@@ -441,22 +441,22 @@ define i64 @test_mul_by_9(i64 %x) {
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_9:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: leaq (%rdi,%rdi,8), %rax # sched: [1:0.50]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_9:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: leaq (%rdi,%rdi,8), %rax # sched: [1:0.50]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_9:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: leaq (%rdi,%rdi,8), %rax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_9:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: leaq (%rdi,%rdi,8), %rax # sched: [1:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i64 %x, 9
@@ -465,7 +465,7 @@ define i64 @test_mul_by_9(i64 %x) {
define i64 @test_mul_by_10(i64 %x) {
; X86-LABEL: test_mul_by_10:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,4), %ecx
; X86-NEXT: movl $10, %eax
@@ -474,19 +474,19 @@ define i64 @test_mul_by_10(i64 %x) {
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_10:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: addq %rdi, %rdi # sched: [1:0.25]
; X64-HSW-NEXT: leaq (%rdi,%rdi,4), %rax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_10:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: addq %rdi, %rdi # sched: [1:0.50]
; X64-JAG-NEXT: leaq (%rdi,%rdi,4), %rax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_10:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: movl $10, %eax
; X86-NOOPT-NEXT: mull {{[0-9]+}}(%esp)
; X86-NOOPT-NEXT: imull $10, {{[0-9]+}}(%esp), %ecx
@@ -494,23 +494,23 @@ define i64 @test_mul_by_10(i64 %x) {
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_10:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imulq $10, %rdi, %rax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_10:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imulq $10, %rdi, %rax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_10:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: addq %rdi, %rdi # sched: [1:0.50]
; X64-SLM-NEXT: leaq (%rdi,%rdi,4), %rax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_10:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imulq $10, %rdi, %rax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i64 %x, 10
@@ -519,7 +519,7 @@ define i64 @test_mul_by_10(i64 %x) {
define i64 @test_mul_by_11(i64 %x) {
; X86-LABEL: test_mul_by_11:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,4), %ecx
; X86-NEXT: leal (%eax,%ecx,2), %ecx
@@ -529,19 +529,19 @@ define i64 @test_mul_by_11(i64 %x) {
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_11:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: leaq (%rdi,%rdi,4), %rax # sched: [1:0.50]
; X64-HSW-NEXT: leaq (%rdi,%rax,2), %rax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_11:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: leaq (%rdi,%rdi,4), %rax # sched: [1:0.50]
; X64-JAG-NEXT: leaq (%rdi,%rax,2), %rax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_11:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: movl $11, %eax
; X86-NOOPT-NEXT: mull {{[0-9]+}}(%esp)
; X86-NOOPT-NEXT: imull $11, {{[0-9]+}}(%esp), %ecx
@@ -549,22 +549,22 @@ define i64 @test_mul_by_11(i64 %x) {
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_11:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imulq $11, %rdi, %rax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_11:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imulq $11, %rdi, %rax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_11:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: imulq $11, %rdi, %rax # sched: [3:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_11:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imulq $11, %rdi, %rax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i64 %x, 11
@@ -573,7 +573,7 @@ define i64 @test_mul_by_11(i64 %x) {
define i64 @test_mul_by_12(i64 %x) {
; X86-LABEL: test_mul_by_12:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,2), %ecx
; X86-NEXT: movl $12, %eax
@@ -582,19 +582,19 @@ define i64 @test_mul_by_12(i64 %x) {
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_12:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: shlq $2, %rdi # sched: [1:0.50]
; X64-HSW-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_12:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: shlq $2, %rdi # sched: [1:0.50]
; X64-JAG-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_12:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: movl $12, %eax
; X86-NOOPT-NEXT: mull {{[0-9]+}}(%esp)
; X86-NOOPT-NEXT: imull $12, {{[0-9]+}}(%esp), %ecx
@@ -602,23 +602,23 @@ define i64 @test_mul_by_12(i64 %x) {
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_12:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imulq $12, %rdi, %rax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_12:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imulq $12, %rdi, %rax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_12:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: shlq $2, %rdi # sched: [1:1.00]
; X64-SLM-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_12:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imulq $12, %rdi, %rax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i64 %x, 12
@@ -627,7 +627,7 @@ define i64 @test_mul_by_12(i64 %x) {
define i64 @test_mul_by_13(i64 %x) {
; X86-LABEL: test_mul_by_13:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,2), %ecx
; X86-NEXT: leal (%eax,%ecx,4), %ecx
@@ -637,19 +637,19 @@ define i64 @test_mul_by_13(i64 %x) {
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_13:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:0.50]
; X64-HSW-NEXT: leaq (%rdi,%rax,4), %rax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_13:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:0.50]
; X64-JAG-NEXT: leaq (%rdi,%rax,4), %rax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_13:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: movl $13, %eax
; X86-NOOPT-NEXT: mull {{[0-9]+}}(%esp)
; X86-NOOPT-NEXT: imull $13, {{[0-9]+}}(%esp), %ecx
@@ -657,22 +657,22 @@ define i64 @test_mul_by_13(i64 %x) {
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_13:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imulq $13, %rdi, %rax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_13:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imulq $13, %rdi, %rax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_13:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: imulq $13, %rdi, %rax # sched: [3:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_13:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imulq $13, %rdi, %rax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i64 %x, 13
@@ -681,7 +681,7 @@ define i64 @test_mul_by_13(i64 %x) {
define i64 @test_mul_by_14(i64 %x) {
; X86-LABEL: test_mul_by_14:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,2), %ecx
; X86-NEXT: leal (%eax,%ecx,4), %ecx
@@ -692,21 +692,21 @@ define i64 @test_mul_by_14(i64 %x) {
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_14:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:0.50]
; X64-HSW-NEXT: leaq (%rdi,%rax,4), %rax # sched: [1:0.50]
; X64-HSW-NEXT: addq %rdi, %rax # sched: [1:0.25]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_14:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:0.50]
; X64-JAG-NEXT: leaq (%rdi,%rax,4), %rax # sched: [1:0.50]
; X64-JAG-NEXT: addq %rdi, %rax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_14:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: movl $14, %eax
; X86-NOOPT-NEXT: mull {{[0-9]+}}(%esp)
; X86-NOOPT-NEXT: imull $14, {{[0-9]+}}(%esp), %ecx
@@ -714,22 +714,22 @@ define i64 @test_mul_by_14(i64 %x) {
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_14:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imulq $14, %rdi, %rax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_14:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imulq $14, %rdi, %rax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_14:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: imulq $14, %rdi, %rax # sched: [3:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_14:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imulq $14, %rdi, %rax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i64 %x, 14
@@ -738,7 +738,7 @@ define i64 @test_mul_by_14(i64 %x) {
define i64 @test_mul_by_15(i64 %x) {
; X86-LABEL: test_mul_by_15:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl $15, %eax
; X86-NEXT: mull {{[0-9]+}}(%esp)
@@ -748,19 +748,19 @@ define i64 @test_mul_by_15(i64 %x) {
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_15:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: leaq (%rdi,%rdi,4), %rax # sched: [1:0.50]
; X64-HSW-NEXT: leaq (%rax,%rax,2), %rax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_15:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: leaq (%rdi,%rdi,4), %rax # sched: [1:0.50]
; X64-JAG-NEXT: leaq (%rax,%rax,2), %rax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_15:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: movl $15, %eax
; X86-NOOPT-NEXT: mull {{[0-9]+}}(%esp)
; X86-NOOPT-NEXT: imull $15, {{[0-9]+}}(%esp), %ecx
@@ -768,23 +768,23 @@ define i64 @test_mul_by_15(i64 %x) {
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_15:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imulq $15, %rdi, %rax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_15:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imulq $15, %rdi, %rax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_15:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: leaq (%rdi,%rdi,4), %rax # sched: [1:1.00]
; X64-SLM-NEXT: leaq (%rax,%rax,2), %rax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_15:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imulq $15, %rdi, %rax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i64 %x, 15
@@ -793,7 +793,7 @@ define i64 @test_mul_by_15(i64 %x) {
define i64 @test_mul_by_16(i64 %x) {
; X86-LABEL: test_mul_by_16:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: shldl $4, %eax, %edx
@@ -801,19 +801,19 @@ define i64 @test_mul_by_16(i64 %x) {
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_16:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: shlq $4, %rdi # sched: [1:0.50]
; X64-HSW-NEXT: movq %rdi, %rax # sched: [1:0.25]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_16:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: shlq $4, %rdi # sched: [1:0.50]
; X64-JAG-NEXT: movq %rdi, %rax # sched: [1:0.17]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_16:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NOOPT-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NOOPT-NEXT: shldl $4, %eax, %edx
@@ -821,25 +821,25 @@ define i64 @test_mul_by_16(i64 %x) {
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_16:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: shlq $4, %rdi # sched: [1:0.50]
; HSW-NOOPT-NEXT: movq %rdi, %rax # sched: [1:0.25]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_16:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: shlq $4, %rdi # sched: [1:0.50]
; JAG-NOOPT-NEXT: movq %rdi, %rax # sched: [1:0.17]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_16:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: shlq $4, %rdi # sched: [1:1.00]
; X64-SLM-NEXT: movq %rdi, %rax # sched: [1:0.50]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_16:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: shlq $4, %rdi # sched: [1:1.00]
; SLM-NOOPT-NEXT: movq %rdi, %rax # sched: [1:0.50]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
@@ -849,7 +849,7 @@ define i64 @test_mul_by_16(i64 %x) {
define i64 @test_mul_by_17(i64 %x) {
; X86-LABEL: test_mul_by_17:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl %eax, %ecx
; X86-NEXT: shll $4, %ecx
@@ -860,21 +860,21 @@ define i64 @test_mul_by_17(i64 %x) {
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_17:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: movq %rdi, %rax # sched: [1:0.25]
; X64-HSW-NEXT: shlq $4, %rax # sched: [1:0.50]
; X64-HSW-NEXT: leaq (%rax,%rdi), %rax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_17:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: movq %rdi, %rax # sched: [1:0.17]
; X64-JAG-NEXT: shlq $4, %rax # sched: [1:0.50]
; X64-JAG-NEXT: leaq (%rax,%rdi), %rax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_17:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: movl $17, %eax
; X86-NOOPT-NEXT: mull {{[0-9]+}}(%esp)
; X86-NOOPT-NEXT: imull $17, {{[0-9]+}}(%esp), %ecx
@@ -882,24 +882,24 @@ define i64 @test_mul_by_17(i64 %x) {
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_17:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imulq $17, %rdi, %rax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_17:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imulq $17, %rdi, %rax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_17:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: movq %rdi, %rax # sched: [1:0.50]
; X64-SLM-NEXT: shlq $4, %rax # sched: [1:1.00]
; X64-SLM-NEXT: addq %rdi, %rax # sched: [1:0.50]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_17:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imulq $17, %rdi, %rax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i64 %x, 17
@@ -908,7 +908,7 @@ define i64 @test_mul_by_17(i64 %x) {
define i64 @test_mul_by_18(i64 %x) {
; X86-LABEL: test_mul_by_18:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,8), %ecx
; X86-NEXT: movl $18, %eax
@@ -917,19 +917,19 @@ define i64 @test_mul_by_18(i64 %x) {
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_18:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: addq %rdi, %rdi # sched: [1:0.25]
; X64-HSW-NEXT: leaq (%rdi,%rdi,8), %rax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_18:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: addq %rdi, %rdi # sched: [1:0.50]
; X64-JAG-NEXT: leaq (%rdi,%rdi,8), %rax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_18:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: movl $18, %eax
; X86-NOOPT-NEXT: mull {{[0-9]+}}(%esp)
; X86-NOOPT-NEXT: imull $18, {{[0-9]+}}(%esp), %ecx
@@ -937,23 +937,23 @@ define i64 @test_mul_by_18(i64 %x) {
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_18:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imulq $18, %rdi, %rax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_18:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imulq $18, %rdi, %rax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_18:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: addq %rdi, %rdi # sched: [1:0.50]
; X64-SLM-NEXT: leaq (%rdi,%rdi,8), %rax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_18:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imulq $18, %rdi, %rax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i64 %x, 18
@@ -962,7 +962,7 @@ define i64 @test_mul_by_18(i64 %x) {
define i64 @test_mul_by_19(i64 %x) {
; X86-LABEL: test_mul_by_19:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,4), %ecx
; X86-NEXT: shll $2, %ecx
@@ -973,21 +973,21 @@ define i64 @test_mul_by_19(i64 %x) {
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_19:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: leaq (%rdi,%rdi,4), %rax # sched: [1:0.50]
; X64-HSW-NEXT: shlq $2, %rax # sched: [1:0.50]
; X64-HSW-NEXT: subq %rdi, %rax # sched: [1:0.25]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_19:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: leaq (%rdi,%rdi,4), %rax # sched: [1:0.50]
; X64-JAG-NEXT: shlq $2, %rax # sched: [1:0.50]
; X64-JAG-NEXT: subq %rdi, %rax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_19:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: movl $19, %eax
; X86-NOOPT-NEXT: mull {{[0-9]+}}(%esp)
; X86-NOOPT-NEXT: imull $19, {{[0-9]+}}(%esp), %ecx
@@ -995,22 +995,22 @@ define i64 @test_mul_by_19(i64 %x) {
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_19:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imulq $19, %rdi, %rax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_19:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imulq $19, %rdi, %rax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_19:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: imulq $19, %rdi, %rax # sched: [3:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_19:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imulq $19, %rdi, %rax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i64 %x, 19
@@ -1019,7 +1019,7 @@ define i64 @test_mul_by_19(i64 %x) {
define i64 @test_mul_by_20(i64 %x) {
; X86-LABEL: test_mul_by_20:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,4), %ecx
; X86-NEXT: movl $20, %eax
@@ -1028,19 +1028,19 @@ define i64 @test_mul_by_20(i64 %x) {
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_20:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: shlq $2, %rdi # sched: [1:0.50]
; X64-HSW-NEXT: leaq (%rdi,%rdi,4), %rax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_20:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: shlq $2, %rdi # sched: [1:0.50]
; X64-JAG-NEXT: leaq (%rdi,%rdi,4), %rax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_20:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: movl $20, %eax
; X86-NOOPT-NEXT: mull {{[0-9]+}}(%esp)
; X86-NOOPT-NEXT: imull $20, {{[0-9]+}}(%esp), %ecx
@@ -1048,23 +1048,23 @@ define i64 @test_mul_by_20(i64 %x) {
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_20:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imulq $20, %rdi, %rax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_20:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imulq $20, %rdi, %rax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_20:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: shlq $2, %rdi # sched: [1:1.00]
; X64-SLM-NEXT: leaq (%rdi,%rdi,4), %rax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_20:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imulq $20, %rdi, %rax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i64 %x, 20
@@ -1073,7 +1073,7 @@ define i64 @test_mul_by_20(i64 %x) {
define i64 @test_mul_by_21(i64 %x) {
; X86-LABEL: test_mul_by_21:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,4), %ecx
; X86-NEXT: leal (%eax,%ecx,4), %ecx
@@ -1083,19 +1083,19 @@ define i64 @test_mul_by_21(i64 %x) {
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_21:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: leaq (%rdi,%rdi,4), %rax # sched: [1:0.50]
; X64-HSW-NEXT: leaq (%rdi,%rax,4), %rax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_21:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: leaq (%rdi,%rdi,4), %rax # sched: [1:0.50]
; X64-JAG-NEXT: leaq (%rdi,%rax,4), %rax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_21:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: movl $21, %eax
; X86-NOOPT-NEXT: mull {{[0-9]+}}(%esp)
; X86-NOOPT-NEXT: imull $21, {{[0-9]+}}(%esp), %ecx
@@ -1103,22 +1103,22 @@ define i64 @test_mul_by_21(i64 %x) {
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_21:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imulq $21, %rdi, %rax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_21:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imulq $21, %rdi, %rax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_21:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: imulq $21, %rdi, %rax # sched: [3:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_21:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imulq $21, %rdi, %rax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i64 %x, 21
@@ -1127,7 +1127,7 @@ define i64 @test_mul_by_21(i64 %x) {
define i64 @test_mul_by_22(i64 %x) {
; X86-LABEL: test_mul_by_22:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,4), %ecx
; X86-NEXT: leal (%eax,%ecx,4), %ecx
@@ -1138,21 +1138,21 @@ define i64 @test_mul_by_22(i64 %x) {
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_22:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: leaq (%rdi,%rdi,4), %rax # sched: [1:0.50]
; X64-HSW-NEXT: leaq (%rdi,%rax,4), %rax # sched: [1:0.50]
; X64-HSW-NEXT: addq %rdi, %rax # sched: [1:0.25]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_22:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: leaq (%rdi,%rdi,4), %rax # sched: [1:0.50]
; X64-JAG-NEXT: leaq (%rdi,%rax,4), %rax # sched: [1:0.50]
; X64-JAG-NEXT: addq %rdi, %rax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_22:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: movl $22, %eax
; X86-NOOPT-NEXT: mull {{[0-9]+}}(%esp)
; X86-NOOPT-NEXT: imull $22, {{[0-9]+}}(%esp), %ecx
@@ -1160,22 +1160,22 @@ define i64 @test_mul_by_22(i64 %x) {
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_22:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imulq $22, %rdi, %rax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_22:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imulq $22, %rdi, %rax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_22:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: imulq $22, %rdi, %rax # sched: [3:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_22:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imulq $22, %rdi, %rax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i64 %x, 22
@@ -1184,7 +1184,7 @@ define i64 @test_mul_by_22(i64 %x) {
define i64 @test_mul_by_23(i64 %x) {
; X86-LABEL: test_mul_by_23:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,2), %ecx
; X86-NEXT: shll $3, %ecx
@@ -1195,21 +1195,21 @@ define i64 @test_mul_by_23(i64 %x) {
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_23:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:0.50]
; X64-HSW-NEXT: shlq $3, %rax # sched: [1:0.50]
; X64-HSW-NEXT: subq %rdi, %rax # sched: [1:0.25]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_23:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:0.50]
; X64-JAG-NEXT: shlq $3, %rax # sched: [1:0.50]
; X64-JAG-NEXT: subq %rdi, %rax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_23:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: movl $23, %eax
; X86-NOOPT-NEXT: mull {{[0-9]+}}(%esp)
; X86-NOOPT-NEXT: imull $23, {{[0-9]+}}(%esp), %ecx
@@ -1217,22 +1217,22 @@ define i64 @test_mul_by_23(i64 %x) {
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_23:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imulq $23, %rdi, %rax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_23:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imulq $23, %rdi, %rax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_23:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: imulq $23, %rdi, %rax # sched: [3:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_23:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imulq $23, %rdi, %rax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i64 %x, 23
@@ -1241,7 +1241,7 @@ define i64 @test_mul_by_23(i64 %x) {
define i64 @test_mul_by_24(i64 %x) {
; X86-LABEL: test_mul_by_24:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,2), %ecx
; X86-NEXT: movl $24, %eax
@@ -1250,19 +1250,19 @@ define i64 @test_mul_by_24(i64 %x) {
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_24:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: shlq $3, %rdi # sched: [1:0.50]
; X64-HSW-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_24:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: shlq $3, %rdi # sched: [1:0.50]
; X64-JAG-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_24:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: movl $24, %eax
; X86-NOOPT-NEXT: mull {{[0-9]+}}(%esp)
; X86-NOOPT-NEXT: imull $24, {{[0-9]+}}(%esp), %ecx
@@ -1270,23 +1270,23 @@ define i64 @test_mul_by_24(i64 %x) {
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_24:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imulq $24, %rdi, %rax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_24:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imulq $24, %rdi, %rax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_24:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: shlq $3, %rdi # sched: [1:1.00]
; X64-SLM-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_24:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imulq $24, %rdi, %rax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i64 %x, 24
@@ -1295,7 +1295,7 @@ define i64 @test_mul_by_24(i64 %x) {
define i64 @test_mul_by_25(i64 %x) {
; X86-LABEL: test_mul_by_25:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl $25, %eax
; X86-NEXT: mull {{[0-9]+}}(%esp)
@@ -1305,19 +1305,19 @@ define i64 @test_mul_by_25(i64 %x) {
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_25:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: leaq (%rdi,%rdi,4), %rax # sched: [1:0.50]
; X64-HSW-NEXT: leaq (%rax,%rax,4), %rax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_25:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: leaq (%rdi,%rdi,4), %rax # sched: [1:0.50]
; X64-JAG-NEXT: leaq (%rax,%rax,4), %rax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_25:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: movl $25, %eax
; X86-NOOPT-NEXT: mull {{[0-9]+}}(%esp)
; X86-NOOPT-NEXT: imull $25, {{[0-9]+}}(%esp), %ecx
@@ -1325,23 +1325,23 @@ define i64 @test_mul_by_25(i64 %x) {
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_25:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imulq $25, %rdi, %rax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_25:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imulq $25, %rdi, %rax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_25:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: leaq (%rdi,%rdi,4), %rax # sched: [1:1.00]
; X64-SLM-NEXT: leaq (%rax,%rax,4), %rax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_25:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imulq $25, %rdi, %rax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i64 %x, 25
@@ -1350,7 +1350,7 @@ define i64 @test_mul_by_25(i64 %x) {
define i64 @test_mul_by_26(i64 %x) {
; X86-LABEL: test_mul_by_26:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,8), %ecx
; X86-NEXT: leal (%ecx,%ecx,2), %ecx
@@ -1361,21 +1361,21 @@ define i64 @test_mul_by_26(i64 %x) {
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_26:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: leaq (%rdi,%rdi,8), %rax # sched: [1:0.50]
; X64-HSW-NEXT: leaq (%rax,%rax,2), %rax # sched: [1:0.50]
; X64-HSW-NEXT: subq %rdi, %rax # sched: [1:0.25]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_26:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: leaq (%rdi,%rdi,8), %rax # sched: [1:0.50]
; X64-JAG-NEXT: leaq (%rax,%rax,2), %rax # sched: [1:0.50]
; X64-JAG-NEXT: subq %rdi, %rax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_26:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: movl $26, %eax
; X86-NOOPT-NEXT: mull {{[0-9]+}}(%esp)
; X86-NOOPT-NEXT: imull $26, {{[0-9]+}}(%esp), %ecx
@@ -1383,22 +1383,22 @@ define i64 @test_mul_by_26(i64 %x) {
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_26:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imulq $26, %rdi, %rax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_26:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imulq $26, %rdi, %rax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_26:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: imulq $26, %rdi, %rax # sched: [3:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_26:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imulq $26, %rdi, %rax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i64 %x, 26
@@ -1407,7 +1407,7 @@ define i64 @test_mul_by_26(i64 %x) {
define i64 @test_mul_by_27(i64 %x) {
; X86-LABEL: test_mul_by_27:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl $27, %eax
; X86-NEXT: mull {{[0-9]+}}(%esp)
@@ -1417,19 +1417,19 @@ define i64 @test_mul_by_27(i64 %x) {
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_27:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: leaq (%rdi,%rdi,8), %rax # sched: [1:0.50]
; X64-HSW-NEXT: leaq (%rax,%rax,2), %rax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_27:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: leaq (%rdi,%rdi,8), %rax # sched: [1:0.50]
; X64-JAG-NEXT: leaq (%rax,%rax,2), %rax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_27:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: movl $27, %eax
; X86-NOOPT-NEXT: mull {{[0-9]+}}(%esp)
; X86-NOOPT-NEXT: imull $27, {{[0-9]+}}(%esp), %ecx
@@ -1437,23 +1437,23 @@ define i64 @test_mul_by_27(i64 %x) {
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_27:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imulq $27, %rdi, %rax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_27:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imulq $27, %rdi, %rax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_27:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: leaq (%rdi,%rdi,8), %rax # sched: [1:1.00]
; X64-SLM-NEXT: leaq (%rax,%rax,2), %rax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_27:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imulq $27, %rdi, %rax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i64 %x, 27
@@ -1462,7 +1462,7 @@ define i64 @test_mul_by_27(i64 %x) {
define i64 @test_mul_by_28(i64 %x) {
; X86-LABEL: test_mul_by_28:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,8), %ecx
; X86-NEXT: leal (%ecx,%ecx,2), %ecx
@@ -1473,21 +1473,21 @@ define i64 @test_mul_by_28(i64 %x) {
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_28:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: leaq (%rdi,%rdi,8), %rax # sched: [1:0.50]
; X64-HSW-NEXT: leaq (%rax,%rax,2), %rax # sched: [1:0.50]
; X64-HSW-NEXT: addq %rdi, %rax # sched: [1:0.25]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_28:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: leaq (%rdi,%rdi,8), %rax # sched: [1:0.50]
; X64-JAG-NEXT: leaq (%rax,%rax,2), %rax # sched: [1:0.50]
; X64-JAG-NEXT: addq %rdi, %rax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_28:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: movl $28, %eax
; X86-NOOPT-NEXT: mull {{[0-9]+}}(%esp)
; X86-NOOPT-NEXT: imull $28, {{[0-9]+}}(%esp), %ecx
@@ -1495,22 +1495,22 @@ define i64 @test_mul_by_28(i64 %x) {
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_28:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imulq $28, %rdi, %rax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_28:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imulq $28, %rdi, %rax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_28:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: imulq $28, %rdi, %rax # sched: [3:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_28:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imulq $28, %rdi, %rax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i64 %x, 28
@@ -1519,7 +1519,7 @@ define i64 @test_mul_by_28(i64 %x) {
define i64 @test_mul_by_29(i64 %x) {
; X86-LABEL: test_mul_by_29:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,8), %ecx
; X86-NEXT: leal (%ecx,%ecx,2), %ecx
@@ -1531,7 +1531,7 @@ define i64 @test_mul_by_29(i64 %x) {
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_29:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: leaq (%rdi,%rdi,8), %rax # sched: [1:0.50]
; X64-HSW-NEXT: leaq (%rax,%rax,2), %rax # sched: [1:0.50]
; X64-HSW-NEXT: addq %rdi, %rax # sched: [1:0.25]
@@ -1539,7 +1539,7 @@ define i64 @test_mul_by_29(i64 %x) {
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_29:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: leaq (%rdi,%rdi,8), %rax # sched: [1:0.50]
; X64-JAG-NEXT: leaq (%rax,%rax,2), %rax # sched: [1:0.50]
; X64-JAG-NEXT: addq %rdi, %rax # sched: [1:0.50]
@@ -1547,7 +1547,7 @@ define i64 @test_mul_by_29(i64 %x) {
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_29:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: movl $29, %eax
; X86-NOOPT-NEXT: mull {{[0-9]+}}(%esp)
; X86-NOOPT-NEXT: imull $29, {{[0-9]+}}(%esp), %ecx
@@ -1555,22 +1555,22 @@ define i64 @test_mul_by_29(i64 %x) {
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_29:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imulq $29, %rdi, %rax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_29:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imulq $29, %rdi, %rax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_29:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: imulq $29, %rdi, %rax # sched: [3:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_29:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imulq $29, %rdi, %rax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i64 %x, 29
@@ -1579,7 +1579,7 @@ define i64 @test_mul_by_29(i64 %x) {
define i64 @test_mul_by_30(i64 %x) {
; X86-LABEL: test_mul_by_30:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl %eax, %ecx
; X86-NEXT: shll $5, %ecx
@@ -1591,7 +1591,7 @@ define i64 @test_mul_by_30(i64 %x) {
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_30:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: movq %rdi, %rax # sched: [1:0.25]
; X64-HSW-NEXT: shlq $5, %rax # sched: [1:0.50]
; X64-HSW-NEXT: subq %rdi, %rax # sched: [1:0.25]
@@ -1599,7 +1599,7 @@ define i64 @test_mul_by_30(i64 %x) {
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_30:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: movq %rdi, %rax # sched: [1:0.17]
; X64-JAG-NEXT: shlq $5, %rax # sched: [1:0.50]
; X64-JAG-NEXT: subq %rdi, %rax # sched: [1:0.50]
@@ -1607,7 +1607,7 @@ define i64 @test_mul_by_30(i64 %x) {
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_30:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: movl $30, %eax
; X86-NOOPT-NEXT: mull {{[0-9]+}}(%esp)
; X86-NOOPT-NEXT: imull $30, {{[0-9]+}}(%esp), %ecx
@@ -1615,22 +1615,22 @@ define i64 @test_mul_by_30(i64 %x) {
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_30:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imulq $30, %rdi, %rax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_30:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imulq $30, %rdi, %rax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_30:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: imulq $30, %rdi, %rax # sched: [3:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_30:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imulq $30, %rdi, %rax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i64 %x, 30
@@ -1639,7 +1639,7 @@ define i64 @test_mul_by_30(i64 %x) {
define i64 @test_mul_by_31(i64 %x) {
; X86-LABEL: test_mul_by_31:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl %eax, %ecx
; X86-NEXT: shll $5, %ecx
@@ -1650,21 +1650,21 @@ define i64 @test_mul_by_31(i64 %x) {
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_31:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: movq %rdi, %rax # sched: [1:0.25]
; X64-HSW-NEXT: shlq $5, %rax # sched: [1:0.50]
; X64-HSW-NEXT: subq %rdi, %rax # sched: [1:0.25]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_31:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: movq %rdi, %rax # sched: [1:0.17]
; X64-JAG-NEXT: shlq $5, %rax # sched: [1:0.50]
; X64-JAG-NEXT: subq %rdi, %rax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_31:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: movl $31, %eax
; X86-NOOPT-NEXT: mull {{[0-9]+}}(%esp)
; X86-NOOPT-NEXT: imull $31, {{[0-9]+}}(%esp), %ecx
@@ -1672,24 +1672,24 @@ define i64 @test_mul_by_31(i64 %x) {
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_31:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: imulq $31, %rdi, %rax # sched: [3:1.00]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_31:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: imulq $31, %rdi, %rax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_31:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: movq %rdi, %rax # sched: [1:0.50]
; X64-SLM-NEXT: shlq $5, %rax # sched: [1:1.00]
; X64-SLM-NEXT: subq %rdi, %rax # sched: [1:0.50]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_31:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: imulq $31, %rdi, %rax # sched: [3:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i64 %x, 31
@@ -1698,7 +1698,7 @@ define i64 @test_mul_by_31(i64 %x) {
define i64 @test_mul_by_32(i64 %x) {
; X86-LABEL: test_mul_by_32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: shldl $5, %eax, %edx
@@ -1706,19 +1706,19 @@ define i64 @test_mul_by_32(i64 %x) {
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_32:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: shlq $5, %rdi # sched: [1:0.50]
; X64-HSW-NEXT: movq %rdi, %rax # sched: [1:0.25]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_32:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: shlq $5, %rdi # sched: [1:0.50]
; X64-JAG-NEXT: movq %rdi, %rax # sched: [1:0.17]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_32:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NOOPT-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NOOPT-NEXT: shldl $5, %eax, %edx
@@ -1726,25 +1726,25 @@ define i64 @test_mul_by_32(i64 %x) {
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_by_32:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: shlq $5, %rdi # sched: [1:0.50]
; HSW-NOOPT-NEXT: movq %rdi, %rax # sched: [1:0.25]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_32:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: shlq $5, %rdi # sched: [1:0.50]
; JAG-NOOPT-NEXT: movq %rdi, %rax # sched: [1:0.17]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_32:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: shlq $5, %rdi # sched: [1:1.00]
; X64-SLM-NEXT: movq %rdi, %rax # sched: [1:0.50]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_32:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: shlq $5, %rdi # sched: [1:1.00]
; SLM-NOOPT-NEXT: movq %rdi, %rax # sched: [1:0.50]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
@@ -1755,7 +1755,7 @@ define i64 @test_mul_by_32(i64 %x) {
; (x*9+42)*(x*5+2)
define i64 @test_mul_spec(i64 %x) nounwind {
; X86-LABEL: test_mul_spec:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl %ebx
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
@@ -1787,7 +1787,7 @@ define i64 @test_mul_spec(i64 %x) nounwind {
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_spec:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: leaq (%rdi,%rdi,8), %rcx # sched: [1:0.50]
; X64-HSW-NEXT: addq $42, %rcx # sched: [1:0.25]
; X64-HSW-NEXT: leaq (%rdi,%rdi,4), %rax # sched: [1:0.50]
@@ -1796,14 +1796,14 @@ define i64 @test_mul_spec(i64 %x) nounwind {
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_spec:
-; X64-JAG: # BB#0:
+; X64-JAG: # %bb.0:
; X64-JAG-NEXT: leaq 42(%rdi,%rdi,8), %rcx # sched: [1:0.50]
; X64-JAG-NEXT: leaq 2(%rdi,%rdi,4), %rax # sched: [1:0.50]
; X64-JAG-NEXT: imulq %rcx, %rax # sched: [3:1.00]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_spec:
-; X86-NOOPT: # BB#0:
+; X86-NOOPT: # %bb.0:
; X86-NOOPT-NEXT: pushl %ebx
; X86-NOOPT-NEXT: pushl %edi
; X86-NOOPT-NEXT: pushl %esi
@@ -1835,7 +1835,7 @@ define i64 @test_mul_spec(i64 %x) nounwind {
; X86-NOOPT-NEXT: retl
;
; HSW-NOOPT-LABEL: test_mul_spec:
-; HSW-NOOPT: # BB#0:
+; HSW-NOOPT: # %bb.0:
; HSW-NOOPT-NEXT: leaq (%rdi,%rdi,8), %rcx # sched: [1:0.50]
; HSW-NOOPT-NEXT: addq $42, %rcx # sched: [1:0.25]
; HSW-NOOPT-NEXT: leaq (%rdi,%rdi,4), %rax # sched: [1:0.50]
@@ -1844,21 +1844,21 @@ define i64 @test_mul_spec(i64 %x) nounwind {
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_spec:
-; JAG-NOOPT: # BB#0:
+; JAG-NOOPT: # %bb.0:
; JAG-NOOPT-NEXT: leaq 42(%rdi,%rdi,8), %rcx # sched: [1:0.50]
; JAG-NOOPT-NEXT: leaq 2(%rdi,%rdi,4), %rax # sched: [1:0.50]
; JAG-NOOPT-NEXT: imulq %rcx, %rax # sched: [3:1.00]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_spec:
-; X64-SLM: # BB#0:
+; X64-SLM: # %bb.0:
; X64-SLM-NEXT: leaq 42(%rdi,%rdi,8), %rcx # sched: [1:1.00]
; X64-SLM-NEXT: leaq 2(%rdi,%rdi,4), %rax # sched: [1:1.00]
; X64-SLM-NEXT: imulq %rcx, %rax # sched: [3:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_spec:
-; SLM-NOOPT: # BB#0:
+; SLM-NOOPT: # %bb.0:
; SLM-NOOPT-NEXT: leaq 42(%rdi,%rdi,8), %rcx # sched: [1:1.00]
; SLM-NOOPT-NEXT: leaq 2(%rdi,%rdi,4), %rax # sched: [1:1.00]
; SLM-NOOPT-NEXT: imulq %rcx, %rax # sched: [3:1.00]
diff --git a/test/CodeGen/X86/mul-constant-result.ll b/test/CodeGen/X86/mul-constant-result.ll
index 6e74c1d4e9e..0e7b877d431 100644
--- a/test/CodeGen/X86/mul-constant-result.ll
+++ b/test/CodeGen/X86/mul-constant-result.ll
@@ -8,7 +8,7 @@
; Function Attrs: norecurse nounwind readnone uwtable
define i32 @mult(i32, i32) local_unnamed_addr #0 {
; X86-LABEL: mult:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl %esi
; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: .cfi_offset %esi, -8
@@ -17,19 +17,19 @@ define i32 @mult(i32, i32) local_unnamed_addr #0 {
; X86-NEXT: movl $1, %eax
; X86-NEXT: movl $1, %esi
; X86-NEXT: jg .LBB0_2
-; X86-NEXT: # BB#1:
+; X86-NEXT: # %bb.1:
; X86-NEXT: movl %edx, %esi
; X86-NEXT: .LBB0_2:
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: testl %edx, %edx
; X86-NEXT: je .LBB0_4
-; X86-NEXT: # BB#3:
+; X86-NEXT: # %bb.3:
; X86-NEXT: movl %esi, %eax
; X86-NEXT: .LBB0_4:
; X86-NEXT: decl %ecx
; X86-NEXT: cmpl $31, %ecx
; X86-NEXT: ja .LBB0_39
-; X86-NEXT: # BB#5:
+; X86-NEXT: # %bb.5:
; X86-NEXT: jmpl *.LJTI0_0(,%ecx,4)
; X86-NEXT: .LBB0_6:
; X86-NEXT: addl %eax, %eax
@@ -187,7 +187,7 @@ define i32 @mult(i32, i32) local_unnamed_addr #0 {
; X86-NEXT: retl
;
; X64-HSW-LABEL: mult:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: cmpl $1, %esi
; X64-HSW-NEXT: movl $1, %ecx
@@ -198,7 +198,7 @@ define i32 @mult(i32, i32) local_unnamed_addr #0 {
; X64-HSW-NEXT: addl $-1, %edi
; X64-HSW-NEXT: cmpl $31, %edi
; X64-HSW-NEXT: ja .LBB0_36
-; X64-HSW-NEXT: # BB#1:
+; X64-HSW-NEXT: # %bb.1:
; X64-HSW-NEXT: jmpq *.LJTI0_0(,%rdi,8)
; X64-HSW-NEXT: .LBB0_2:
; X64-HSW-NEXT: addl %eax, %eax
@@ -524,7 +524,7 @@ define i32 @mult(i32, i32) local_unnamed_addr #0 {
; Function Attrs: norecurse nounwind readnone uwtable
define i32 @foo() local_unnamed_addr #0 {
; X86-LABEL: foo:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl %ebx
; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: pushl %edi
@@ -862,7 +862,7 @@ define i32 @foo() local_unnamed_addr #0 {
; X86-NEXT: retl
;
; X64-HSW-LABEL: foo:
-; X64-HSW: # BB#0:
+; X64-HSW: # %bb.0:
; X64-HSW-NEXT: pushq %rbp
; X64-HSW-NEXT: .cfi_def_cfa_offset 16
; X64-HSW-NEXT: pushq %r15
diff --git a/test/CodeGen/X86/mul-i1024.ll b/test/CodeGen/X86/mul-i1024.ll
index dd8bdcad830..9980042a4cc 100644
--- a/test/CodeGen/X86/mul-i1024.ll
+++ b/test/CodeGen/X86/mul-i1024.ll
@@ -4,7 +4,7 @@
define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-LABEL: test_1024:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebp
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: pushl %ebx
@@ -6726,7 +6726,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_1024:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pushq %rbp
; X64-NEXT: pushq %r15
; X64-NEXT: pushq %r14
diff --git a/test/CodeGen/X86/mul-i256.ll b/test/CodeGen/X86/mul-i256.ll
index 0a48ae761ec..c79685aecd0 100644
--- a/test/CodeGen/X86/mul-i256.ll
+++ b/test/CodeGen/X86/mul-i256.ll
@@ -6,7 +6,7 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
define void @test(i256* %a, i256* %b, i256* %out) #0 {
; X32-LABEL: test:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: pushl %ebp
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: pushl %ebx
@@ -356,7 +356,7 @@ define void @test(i256* %a, i256* %b, i256* %out) #0 {
; X32-NEXT: retl
;
; X64-LABEL: test:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: pushq %r15
; X64-NEXT: .cfi_def_cfa_offset 16
; X64-NEXT: pushq %r14
diff --git a/test/CodeGen/X86/mul-i512.ll b/test/CodeGen/X86/mul-i512.ll
index 5e165fe1871..d846729096e 100644
--- a/test/CodeGen/X86/mul-i512.ll
+++ b/test/CodeGen/X86/mul-i512.ll
@@ -4,7 +4,7 @@
define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
; X32-LABEL: test_512:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebp
; X32-NEXT: pushl %ebx
; X32-NEXT: pushl %edi
@@ -1530,7 +1530,7 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_512:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pushq %rbp
; X64-NEXT: pushq %r15
; X64-NEXT: pushq %r14
diff --git a/test/CodeGen/X86/mul128.ll b/test/CodeGen/X86/mul128.ll
index 70a6173a19f..e851c3a3d5b 100644
--- a/test/CodeGen/X86/mul128.ll
+++ b/test/CodeGen/X86/mul128.ll
@@ -4,7 +4,7 @@
define i128 @foo(i128 %t, i128 %u) {
; X64-LABEL: foo:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq %rdx, %r8
; X64-NEXT: imulq %rdi, %rcx
; X64-NEXT: movq %rdi, %rax
@@ -15,7 +15,7 @@ define i128 @foo(i128 %t, i128 %u) {
; X64-NEXT: retq
;
; X86-LABEL: foo:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl %ebp
; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: pushl %ebx
diff --git a/test/CodeGen/X86/mul64.ll b/test/CodeGen/X86/mul64.ll
index f5ca52a93b5..f8a7aaade6c 100644
--- a/test/CodeGen/X86/mul64.ll
+++ b/test/CodeGen/X86/mul64.ll
@@ -4,7 +4,7 @@
define i64 @foo(i64 %t, i64 %u) nounwind {
; X32-LABEL: foo:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %esi
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
@@ -18,7 +18,7 @@ define i64 @foo(i64 %t, i64 %u) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: foo:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: imulq %rsi, %rdi
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: retq
diff --git a/test/CodeGen/X86/mulvi32.ll b/test/CodeGen/X86/mulvi32.ll
index cb557fef2da..570299fed5b 100644
--- a/test/CodeGen/X86/mulvi32.ll
+++ b/test/CodeGen/X86/mulvi32.ll
@@ -8,7 +8,7 @@
define <2 x i32> @_mul2xi32a(<2 x i32>, <2 x i32>) {
; SSE-LABEL: _mul2xi32a:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: psrlq $32, %xmm2
; SSE-NEXT: pmuludq %xmm1, %xmm2
@@ -22,7 +22,7 @@ define <2 x i32> @_mul2xi32a(<2 x i32>, <2 x i32>) {
; SSE-NEXT: retq
;
; AVX-LABEL: _mul2xi32a:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrlq $32, %xmm0, %xmm2
; AVX-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
; AVX-NEXT: vpsrlq $32, %xmm1, %xmm3
@@ -38,7 +38,7 @@ define <2 x i32> @_mul2xi32a(<2 x i32>, <2 x i32>) {
define <2 x i32> @_mul2xi32b(<2 x i32>, <2 x i32>) {
; SSE2-LABEL: _mul2xi32b:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSE2-NEXT: pmuludq %xmm0, %xmm1
@@ -46,7 +46,7 @@ define <2 x i32> @_mul2xi32b(<2 x i32>, <2 x i32>) {
; SSE2-NEXT: retq
;
; SSE42-LABEL: _mul2xi32b:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSE42-NEXT: pmuludq %xmm0, %xmm1
@@ -54,7 +54,7 @@ define <2 x i32> @_mul2xi32b(<2 x i32>, <2 x i32>) {
; SSE42-NEXT: retq
;
; AVX-LABEL: _mul2xi32b:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
@@ -70,7 +70,7 @@ define <2 x i32> @_mul2xi32b(<2 x i32>, <2 x i32>) {
define <4 x i32> @_mul4xi32a(<4 x i32>, <4 x i32>) {
; SSE2-LABEL: _mul4xi32a:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -81,12 +81,12 @@ define <4 x i32> @_mul4xi32a(<4 x i32>, <4 x i32>) {
; SSE2-NEXT: retq
;
; SSE42-LABEL: _mul4xi32a:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pmulld %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: _mul4xi32a:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%r = mul <4 x i32> %0, %1
@@ -95,7 +95,7 @@ define <4 x i32> @_mul4xi32a(<4 x i32>, <4 x i32>) {
define <4 x i32> @_mul4xi32b(<4 x i32>, <4 x i32>) {
; SSE2-LABEL: _mul4xi32b:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
@@ -106,7 +106,7 @@ define <4 x i32> @_mul4xi32b(<4 x i32>, <4 x i32>) {
; SSE2-NEXT: retq
;
; SSE42-LABEL: _mul4xi32b:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; SSE42-NEXT: pmuludq %xmm1, %xmm0
; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
@@ -116,7 +116,7 @@ define <4 x i32> @_mul4xi32b(<4 x i32>, <4 x i32>) {
; SSE42-NEXT: retq
;
; AVX1-LABEL: _mul4xi32b:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
@@ -126,7 +126,7 @@ define <4 x i32> @_mul4xi32b(<4 x i32>, <4 x i32>) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: _mul4xi32b:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
@@ -152,7 +152,7 @@ define <4 x i32> @_mul4xi32b(<4 x i32>, <4 x i32>) {
; %ext1 = zext <4 x i32> %1 to <4 x i64>
define <4 x i64> @_mul4xi32toi64a(<4 x i32>, <4 x i32>) {
; SSE2-LABEL: _mul4xi32toi64a:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movq %xmm1, %rax
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
; SSE2-NEXT: movq %xmm1, %rcx
@@ -180,7 +180,7 @@ define <4 x i64> @_mul4xi32toi64a(<4 x i32>, <4 x i32>) {
; SSE2-NEXT: retq
;
; SSE42-LABEL: _mul4xi32toi64a:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movq %xmm1, %rax
; SSE42-NEXT: pextrq $1, %xmm1, %rcx
; SSE42-NEXT: movd %ecx, %xmm1
@@ -206,7 +206,7 @@ define <4 x i64> @_mul4xi32toi64a(<4 x i32>, <4 x i32>) {
; SSE42-NEXT: retq
;
; AVX1-LABEL: _mul4xi32toi64a:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovq %xmm0, %rax
; AVX1-NEXT: vmovd %eax, %xmm2
; AVX1-NEXT: shrq $32, %rax
@@ -233,7 +233,7 @@ define <4 x i64> @_mul4xi32toi64a(<4 x i32>, <4 x i32>) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: _mul4xi32toi64a:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovq %xmm1, %rax
; AVX2-NEXT: vmovd %eax, %xmm2
; AVX2-NEXT: shrq $32, %rax
@@ -290,7 +290,7 @@ define <4 x i64> @_mul4xi32toi64a(<4 x i32>, <4 x i32>) {
; there is no bitcast and the final shuffle is a little different
define <4 x i64> @_mul4xi32toi64b(<4 x i32>, <4 x i32>) {
; SSE-LABEL: _mul4xi32toi64b:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
; SSE-NEXT: pmuludq %xmm1, %xmm2
@@ -303,7 +303,7 @@ define <4 x i64> @_mul4xi32toi64b(<4 x i32>, <4 x i32>) {
; SSE-NEXT: retq
;
; AVX1-LABEL: _mul4xi32toi64b:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
@@ -314,7 +314,7 @@ define <4 x i64> @_mul4xi32toi64b(<4 x i32>, <4 x i32>) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: _mul4xi32toi64b:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
@@ -338,7 +338,7 @@ define <4 x i64> @_mul4xi32toi64b(<4 x i32>, <4 x i32>) {
; but the final shuffle is a no-op.
define <4 x i64> @_mul4xi32toi64c(<4 x i32>, <4 x i32>) {
; SSE2-LABEL: _mul4xi32toi64c:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,1,1,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,1,1,3]
; SSE2-NEXT: pmuludq %xmm3, %xmm2
@@ -349,7 +349,7 @@ define <4 x i64> @_mul4xi32toi64c(<4 x i32>, <4 x i32>) {
; SSE2-NEXT: retq
;
; SSE42-LABEL: _mul4xi32toi64c:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero
; SSE42-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
; SSE42-NEXT: pmuludq %xmm3, %xmm2
@@ -360,7 +360,7 @@ define <4 x i64> @_mul4xi32toi64c(<4 x i32>, <4 x i32>) {
; SSE42-NEXT: retq
;
; AVX1-LABEL: _mul4xi32toi64c:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero
; AVX1-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
@@ -371,7 +371,7 @@ define <4 x i64> @_mul4xi32toi64c(<4 x i32>, <4 x i32>) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: _mul4xi32toi64c:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero
; AVX2-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
@@ -403,7 +403,7 @@ define <4 x i64> @_mul4xi32toi64c(<4 x i32>, <4 x i32>) {
; %ext1 = zext <2 x i32> %1 to <2 x i64>
define <2 x i64> @_mul2xi64toi64a(<2 x i64>, <2 x i64>) {
; SSE2-LABEL: _mul2xi64toi64a:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [4294967295,4294967295]
; SSE2-NEXT: pand %xmm2, %xmm0
; SSE2-NEXT: pand %xmm2, %xmm1
@@ -411,7 +411,7 @@ define <2 x i64> @_mul2xi64toi64a(<2 x i64>, <2 x i64>) {
; SSE2-NEXT: retq
;
; SSE42-LABEL: _mul2xi64toi64a:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pxor %xmm2, %xmm2
; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
@@ -419,7 +419,7 @@ define <2 x i64> @_mul2xi64toi64a(<2 x i64>, <2 x i64>) {
; SSE42-NEXT: retq
;
; AVX1-LABEL: _mul2xi64toi64a:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
@@ -427,7 +427,7 @@ define <2 x i64> @_mul2xi64toi64a(<2 x i64>, <2 x i64>) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: _mul2xi64toi64a:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
@@ -455,12 +455,12 @@ define <2 x i64> @_mul2xi64toi64a(<2 x i64>, <2 x i64>) {
define <2 x i64> @_mul2xi64toi64b(<2 x i64>, <2 x i64>) {
; SSE-LABEL: _mul2xi64toi64b:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmuludq %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: _mul2xi64toi64b:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%f0 = bitcast <2 x i64> %0 to <4 x i32>
diff --git a/test/CodeGen/X86/mulx32.ll b/test/CodeGen/X86/mulx32.ll
index 9ebd380170d..d099f31189c 100644
--- a/test/CodeGen/X86/mulx32.ll
+++ b/test/CodeGen/X86/mulx32.ll
@@ -4,7 +4,7 @@
define i64 @f1(i32 %a, i32 %b) {
; CHECK-LABEL: f1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx
; CHECK-NEXT: mulxl {{[0-9]+}}(%esp), %eax, %edx
; CHECK-NEXT: retl
@@ -16,7 +16,7 @@ define i64 @f1(i32 %a, i32 %b) {
define i64 @f2(i32 %a, i32* %p) {
; CHECK-LABEL: f2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx
; CHECK-NEXT: mulxl (%eax), %eax, %edx
diff --git a/test/CodeGen/X86/mulx64.ll b/test/CodeGen/X86/mulx64.ll
index 7cc10e017fc..e038f330009 100644
--- a/test/CodeGen/X86/mulx64.ll
+++ b/test/CodeGen/X86/mulx64.ll
@@ -4,7 +4,7 @@
define i128 @f1(i64 %a, i64 %b) {
; CHECK-LABEL: f1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movq %rdi, %rdx
; CHECK-NEXT: mulxq %rsi, %rax, %rdx
; CHECK-NEXT: retq
@@ -16,7 +16,7 @@ define i128 @f1(i64 %a, i64 %b) {
define i128 @f2(i64 %a, i64* %p) {
; CHECK-LABEL: f2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movq %rdi, %rdx
; CHECK-NEXT: mulxq (%rsi), %rax, %rdx
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/neg_cmp.ll b/test/CodeGen/X86/neg_cmp.ll
index cc82857706c..47fa7fbb88f 100644
--- a/test/CodeGen/X86/neg_cmp.ll
+++ b/test/CodeGen/X86/neg_cmp.ll
@@ -8,10 +8,10 @@ declare void @g()
define void @neg_cmp(i32 %x, i32 %y) nounwind {
; CHECK-LABEL: neg_cmp:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: addl %esi, %edi
; CHECK-NEXT: jne .LBB0_1
-; CHECK-NEXT: # BB#2: # %if.then
+; CHECK-NEXT: # %bb.2: # %if.then
; CHECK-NEXT: jmp g # TAILCALL
; CHECK-NEXT: .LBB0_1: # %if.end
; CHECK-NEXT: retq
@@ -29,10 +29,10 @@ if.end:
define void @neg_cmp_commuted(i32 %x, i32 %y) nounwind {
; CHECK-LABEL: neg_cmp_commuted:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: addl %esi, %edi
; CHECK-NEXT: jne .LBB1_1
-; CHECK-NEXT: # BB#2: # %if.then
+; CHECK-NEXT: # %bb.2: # %if.then
; CHECK-NEXT: jmp g # TAILCALL
; CHECK-NEXT: .LBB1_1: # %if.end
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/negate-i1.ll b/test/CodeGen/X86/negate-i1.ll
index 0050fdc773f..2c1b0bba9dc 100644
--- a/test/CodeGen/X86/negate-i1.ll
+++ b/test/CodeGen/X86/negate-i1.ll
@@ -4,14 +4,14 @@
define i8 @select_i8_neg1_or_0(i1 %a) {
; X64-LABEL: select_i8_neg1_or_0:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andb $1, %dil
; X64-NEXT: negb %dil
; X64-NEXT: movl %edi, %eax
; X64-NEXT: retq
;
; X32-LABEL: select_i8_neg1_or_0:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: andb $1, %al
; X32-NEXT: negb %al
@@ -22,13 +22,13 @@ define i8 @select_i8_neg1_or_0(i1 %a) {
define i8 @select_i8_neg1_or_0_zeroext(i1 zeroext %a) {
; X64-LABEL: select_i8_neg1_or_0_zeroext:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: negb %dil
; X64-NEXT: movl %edi, %eax
; X64-NEXT: retq
;
; X32-LABEL: select_i8_neg1_or_0_zeroext:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: negb %al
; X32-NEXT: retl
@@ -38,14 +38,14 @@ define i8 @select_i8_neg1_or_0_zeroext(i1 zeroext %a) {
define i16 @select_i16_neg1_or_0(i1 %a) {
; X64-LABEL: select_i16_neg1_or_0:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andl $1, %edi
; X64-NEXT: negl %edi
; X64-NEXT: movl %edi, %eax
; X64-NEXT: retq
;
; X32-LABEL: select_i16_neg1_or_0:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: andl $1, %eax
; X32-NEXT: negl %eax
@@ -57,13 +57,13 @@ define i16 @select_i16_neg1_or_0(i1 %a) {
define i16 @select_i16_neg1_or_0_zeroext(i1 zeroext %a) {
; X64-LABEL: select_i16_neg1_or_0_zeroext:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: negl %edi
; X64-NEXT: movl %edi, %eax
; X64-NEXT: retq
;
; X32-LABEL: select_i16_neg1_or_0_zeroext:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: negl %eax
; X32-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -74,14 +74,14 @@ define i16 @select_i16_neg1_or_0_zeroext(i1 zeroext %a) {
define i32 @select_i32_neg1_or_0(i1 %a) {
; X64-LABEL: select_i32_neg1_or_0:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andl $1, %edi
; X64-NEXT: negl %edi
; X64-NEXT: movl %edi, %eax
; X64-NEXT: retq
;
; X32-LABEL: select_i32_neg1_or_0:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: andl $1, %eax
; X32-NEXT: negl %eax
@@ -92,13 +92,13 @@ define i32 @select_i32_neg1_or_0(i1 %a) {
define i32 @select_i32_neg1_or_0_zeroext(i1 zeroext %a) {
; X64-LABEL: select_i32_neg1_or_0_zeroext:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: negl %edi
; X64-NEXT: movl %edi, %eax
; X64-NEXT: retq
;
; X32-LABEL: select_i32_neg1_or_0_zeroext:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: negl %eax
; X32-NEXT: retl
@@ -108,7 +108,7 @@ define i32 @select_i32_neg1_or_0_zeroext(i1 zeroext %a) {
define i64 @select_i64_neg1_or_0(i1 %a) {
; X64-LABEL: select_i64_neg1_or_0:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: andl $1, %edi
; X64-NEXT: negq %rdi
@@ -116,7 +116,7 @@ define i64 @select_i64_neg1_or_0(i1 %a) {
; X64-NEXT: retq
;
; X32-LABEL: select_i64_neg1_or_0:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: andl $1, %eax
; X32-NEXT: negl %eax
@@ -128,13 +128,13 @@ define i64 @select_i64_neg1_or_0(i1 %a) {
define i64 @select_i64_neg1_or_0_zeroext(i1 zeroext %a) {
; X64-LABEL: select_i64_neg1_or_0_zeroext:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: negq %rax
; X64-NEXT: retq
;
; X32-LABEL: select_i64_neg1_or_0_zeroext:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: negl %eax
; X32-NEXT: movl %eax, %edx
diff --git a/test/CodeGen/X86/negate-shift.ll b/test/CodeGen/X86/negate-shift.ll
index cbe2f9456fa..8804460f380 100644
--- a/test/CodeGen/X86/negate-shift.ll
+++ b/test/CodeGen/X86/negate-shift.ll
@@ -3,7 +3,7 @@
define i32 @neg_lshr_signbit(i32 %x) {
; X64-LABEL: neg_lshr_signbit:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: sarl $31, %edi
; X64-NEXT: movl %edi, %eax
; X64-NEXT: retq
@@ -14,7 +14,7 @@ define i32 @neg_lshr_signbit(i32 %x) {
define i64 @neg_ashr_signbit(i64 %x) {
; X64-LABEL: neg_ashr_signbit:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: shrq $63, %rdi
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: retq
@@ -25,7 +25,7 @@ define i64 @neg_ashr_signbit(i64 %x) {
define <4 x i32> @neg_ashr_signbit_vec(<4 x i32> %x) {
; X64-LABEL: neg_ashr_signbit_vec:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psrld $31, %xmm0
; X64-NEXT: retq
%sh = ashr <4 x i32> %x, <i32 31, i32 31, i32 31, i32 31>
@@ -35,7 +35,7 @@ define <4 x i32> @neg_ashr_signbit_vec(<4 x i32> %x) {
define <8 x i16> @neg_lshr_signbit_vec(<8 x i16> %x) {
; X64-LABEL: neg_lshr_signbit_vec:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psraw $15, %xmm0
; X64-NEXT: retq
%sh = lshr <8 x i16> %x, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
diff --git a/test/CodeGen/X86/negate.ll b/test/CodeGen/X86/negate.ll
index 5bdb11479af..62e4dff4593 100644
--- a/test/CodeGen/X86/negate.ll
+++ b/test/CodeGen/X86/negate.ll
@@ -3,7 +3,7 @@
define i32 @negate_nuw(i32 %x) {
; CHECK-LABEL: negate_nuw:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: retq
%neg = sub nuw i32 0, %x
@@ -12,7 +12,7 @@ define i32 @negate_nuw(i32 %x) {
define <4 x i32> @negate_nuw_vec(<4 x i32> %x) {
; CHECK-LABEL: negate_nuw_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorps %xmm0, %xmm0
; CHECK-NEXT: retq
%neg = sub nuw <4 x i32> zeroinitializer, %x
@@ -21,7 +21,7 @@ define <4 x i32> @negate_nuw_vec(<4 x i32> %x) {
define i8 @negate_zero_or_minsigned_nsw(i8 %x) {
; CHECK-LABEL: negate_zero_or_minsigned_nsw:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: retq
%signbit = and i8 %x, 128
@@ -31,7 +31,7 @@ define i8 @negate_zero_or_minsigned_nsw(i8 %x) {
define <4 x i32> @negate_zero_or_minsigned_nsw_vec(<4 x i32> %x) {
; CHECK-LABEL: negate_zero_or_minsigned_nsw_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorps %xmm0, %xmm0
; CHECK-NEXT: retq
%signbit = shl <4 x i32> %x, <i32 31, i32 31, i32 31, i32 31>
@@ -41,7 +41,7 @@ define <4 x i32> @negate_zero_or_minsigned_nsw_vec(<4 x i32> %x) {
define i8 @negate_zero_or_minsigned(i8 %x) {
; CHECK-LABEL: negate_zero_or_minsigned:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: shlb $7, %dil
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
@@ -52,7 +52,7 @@ define i8 @negate_zero_or_minsigned(i8 %x) {
define <4 x i32> @negate_zero_or_minsigned_vec(<4 x i32> %x) {
; CHECK-LABEL: negate_zero_or_minsigned_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: andps {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
%signbit = and <4 x i32> %x, <i32 2147483648, i32 2147483648, i32 2147483648, i32 2147483648>
diff --git a/test/CodeGen/X86/negative-sin.ll b/test/CodeGen/X86/negative-sin.ll
index 94369e3e8d0..c30cd2741e6 100644
--- a/test/CodeGen/X86/negative-sin.ll
+++ b/test/CodeGen/X86/negative-sin.ll
@@ -7,7 +7,7 @@ declare double @sin(double %f)
define double @strict(double %e) nounwind {
; CHECK-LABEL: strict:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vsubsd %xmm0, %xmm1, %xmm0
@@ -27,7 +27,7 @@ define double @strict(double %e) nounwind {
define double @fast(double %e) nounwind {
; CHECK-LABEL: fast:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: jmp sin # TAILCALL
%f = fsub fast double 0.0, %e
%g = call double @sin(double %f) readonly
@@ -39,7 +39,7 @@ define double @fast(double %e) nounwind {
define double @nsz(double %e) nounwind {
; CHECK-LABEL: nsz:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: jmp sin # TAILCALL
%f = fsub nsz double 0.0, %e
%g = call double @sin(double %f) readonly
@@ -51,7 +51,7 @@ define double @nsz(double %e) nounwind {
define double @semi_strict1(double %e) nounwind {
; CHECK-LABEL: semi_strict1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vsubsd %xmm0, %xmm1, %xmm0
@@ -69,7 +69,7 @@ define double @semi_strict1(double %e) nounwind {
define double @semi_strict2(double %e) nounwind {
; CHECK-LABEL: semi_strict2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: callq sin
; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1
@@ -87,7 +87,7 @@ define double @semi_strict2(double %e) nounwind {
define double @fn_attr(double %e) nounwind #0 {
; CHECK-LABEL: fn_attr:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: jmp sin # TAILCALL
%f = fsub double 0.0, %e
%g = call double @sin(double %f) readonly
diff --git a/test/CodeGen/X86/no-sse2-avg.ll b/test/CodeGen/X86/no-sse2-avg.ll
index 39653fdeeb3..0472cc27d84 100644
--- a/test/CodeGen/X86/no-sse2-avg.ll
+++ b/test/CodeGen/X86/no-sse2-avg.ll
@@ -4,7 +4,7 @@
define <16 x i8> @PR27973() {
; CHECK-LABEL: PR27973:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movq $0, 8(%rdi)
; CHECK-NEXT: movq $0, (%rdi)
; CHECK-NEXT: movq %rdi, %rax
diff --git a/test/CodeGen/X86/nontemporal-2.ll b/test/CodeGen/X86/nontemporal-2.ll
index b6f2314b31e..47c1f7c0fbf 100644
--- a/test/CodeGen/X86/nontemporal-2.ll
+++ b/test/CodeGen/X86/nontemporal-2.ll
@@ -13,19 +13,19 @@
define void @test_zero_f32(float* %dst) {
; SSE-LABEL: test_zero_f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorl %eax, %eax
; SSE-NEXT: movntil %eax, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_zero_f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: xorl %eax, %eax
; AVX-NEXT: movntil %eax, (%rdi)
; AVX-NEXT: retq
;
; VLX-LABEL: test_zero_f32:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: xorl %eax, %eax
; VLX-NEXT: movntil %eax, (%rdi)
; VLX-NEXT: retq
@@ -35,19 +35,19 @@ define void @test_zero_f32(float* %dst) {
define void @test_zero_i32(i32* %dst) {
; SSE-LABEL: test_zero_i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorl %eax, %eax
; SSE-NEXT: movntil %eax, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_zero_i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: xorl %eax, %eax
; AVX-NEXT: movntil %eax, (%rdi)
; AVX-NEXT: retq
;
; VLX-LABEL: test_zero_i32:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: xorl %eax, %eax
; VLX-NEXT: movntil %eax, (%rdi)
; VLX-NEXT: retq
@@ -57,19 +57,19 @@ define void @test_zero_i32(i32* %dst) {
define void @test_zero_f64(double* %dst) {
; SSE-LABEL: test_zero_f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorl %eax, %eax
; SSE-NEXT: movntiq %rax, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_zero_f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: xorl %eax, %eax
; AVX-NEXT: movntiq %rax, (%rdi)
; AVX-NEXT: retq
;
; VLX-LABEL: test_zero_f64:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: xorl %eax, %eax
; VLX-NEXT: movntiq %rax, (%rdi)
; VLX-NEXT: retq
@@ -79,19 +79,19 @@ define void @test_zero_f64(double* %dst) {
define void @test_zero_i64(i64* %dst) {
; SSE-LABEL: test_zero_i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorl %eax, %eax
; SSE-NEXT: movntiq %rax, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_zero_i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: xorl %eax, %eax
; AVX-NEXT: movntiq %rax, (%rdi)
; AVX-NEXT: retq
;
; VLX-LABEL: test_zero_i64:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: xorl %eax, %eax
; VLX-NEXT: movntiq %rax, (%rdi)
; VLX-NEXT: retq
@@ -103,19 +103,19 @@ define void @test_zero_i64(i64* %dst) {
define void @test_zero_v4f32(<4 x float>* %dst) {
; SSE-LABEL: test_zero_v4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: movntps %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_zero_v4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: vmovntps %xmm0, (%rdi)
; AVX-NEXT: retq
;
; VLX-LABEL: test_zero_v4f32:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
; VLX-NEXT: vmovntdq %xmm0, (%rdi)
; VLX-NEXT: retq
@@ -125,19 +125,19 @@ define void @test_zero_v4f32(<4 x float>* %dst) {
define void @test_zero_v4i32(<4 x i32>* %dst) {
; SSE-LABEL: test_zero_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: movntps %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_zero_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: vmovntps %xmm0, (%rdi)
; AVX-NEXT: retq
;
; VLX-LABEL: test_zero_v4i32:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
; VLX-NEXT: vmovntdq %xmm0, (%rdi)
; VLX-NEXT: retq
@@ -148,19 +148,19 @@ define void @test_zero_v4i32(<4 x i32>* %dst) {
define void @test_zero_v2f64(<2 x double>* %dst) {
; SSE-LABEL: test_zero_v2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: movntps %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_zero_v2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: vmovntps %xmm0, (%rdi)
; AVX-NEXT: retq
;
; VLX-LABEL: test_zero_v2f64:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
; VLX-NEXT: vmovntdq %xmm0, (%rdi)
; VLX-NEXT: retq
@@ -170,19 +170,19 @@ define void @test_zero_v2f64(<2 x double>* %dst) {
define void @test_zero_v2i64(<2 x i64>* %dst) {
; SSE-LABEL: test_zero_v2i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: movntps %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_zero_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: vmovntps %xmm0, (%rdi)
; AVX-NEXT: retq
;
; VLX-LABEL: test_zero_v2i64:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
; VLX-NEXT: vmovntdq %xmm0, (%rdi)
; VLX-NEXT: retq
@@ -192,19 +192,19 @@ define void @test_zero_v2i64(<2 x i64>* %dst) {
define void @test_zero_v8i16(<8 x i16>* %dst) {
; SSE-LABEL: test_zero_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: movntps %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_zero_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: vmovntps %xmm0, (%rdi)
; AVX-NEXT: retq
;
; VLX-LABEL: test_zero_v8i16:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
; VLX-NEXT: vmovntdq %xmm0, (%rdi)
; VLX-NEXT: retq
@@ -214,19 +214,19 @@ define void @test_zero_v8i16(<8 x i16>* %dst) {
define void @test_zero_v16i8(<16 x i8>* %dst) {
; SSE-LABEL: test_zero_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: movntps %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_zero_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: vmovntps %xmm0, (%rdi)
; AVX-NEXT: retq
;
; VLX-LABEL: test_zero_v16i8:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
; VLX-NEXT: vmovntdq %xmm0, (%rdi)
; VLX-NEXT: retq
@@ -238,21 +238,21 @@ define void @test_zero_v16i8(<16 x i8>* %dst) {
define void @test_zero_v8f32(<8 x float>* %dst) {
; SSE-LABEL: test_zero_v8f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: movntps %xmm0, 16(%rdi)
; SSE-NEXT: movntps %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_zero_v8f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: vmovntps %ymm0, (%rdi)
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; VLX-LABEL: test_zero_v8f32:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
; VLX-NEXT: vmovntdq %ymm0, (%rdi)
; VLX-NEXT: vzeroupper
@@ -263,21 +263,21 @@ define void @test_zero_v8f32(<8 x float>* %dst) {
define void @test_zero_v8i32(<8 x i32>* %dst) {
; SSE-LABEL: test_zero_v8i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: movntps %xmm0, 16(%rdi)
; SSE-NEXT: movntps %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_zero_v8i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: vmovntps %ymm0, (%rdi)
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; VLX-LABEL: test_zero_v8i32:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
; VLX-NEXT: vmovntdq %ymm0, (%rdi)
; VLX-NEXT: vzeroupper
@@ -288,21 +288,21 @@ define void @test_zero_v8i32(<8 x i32>* %dst) {
define void @test_zero_v4f64(<4 x double>* %dst) {
; SSE-LABEL: test_zero_v4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: movntps %xmm0, 16(%rdi)
; SSE-NEXT: movntps %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_zero_v4f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: vmovntps %ymm0, (%rdi)
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; VLX-LABEL: test_zero_v4f64:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
; VLX-NEXT: vmovntdq %ymm0, (%rdi)
; VLX-NEXT: vzeroupper
@@ -313,21 +313,21 @@ define void @test_zero_v4f64(<4 x double>* %dst) {
define void @test_zero_v4i64(<4 x i64>* %dst) {
; SSE-LABEL: test_zero_v4i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: movntps %xmm0, 16(%rdi)
; SSE-NEXT: movntps %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_zero_v4i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: vmovntps %ymm0, (%rdi)
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; VLX-LABEL: test_zero_v4i64:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
; VLX-NEXT: vmovntdq %ymm0, (%rdi)
; VLX-NEXT: vzeroupper
@@ -338,21 +338,21 @@ define void @test_zero_v4i64(<4 x i64>* %dst) {
define void @test_zero_v16i16(<16 x i16>* %dst) {
; SSE-LABEL: test_zero_v16i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: movntps %xmm0, 16(%rdi)
; SSE-NEXT: movntps %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_zero_v16i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: vmovntps %ymm0, (%rdi)
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; VLX-LABEL: test_zero_v16i16:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
; VLX-NEXT: vmovntdq %ymm0, (%rdi)
; VLX-NEXT: vzeroupper
@@ -363,21 +363,21 @@ define void @test_zero_v16i16(<16 x i16>* %dst) {
define void @test_zero_v32i8(<32 x i8>* %dst) {
; SSE-LABEL: test_zero_v32i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: movntps %xmm0, 16(%rdi)
; SSE-NEXT: movntps %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_zero_v32i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: vmovntps %ymm0, (%rdi)
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; VLX-LABEL: test_zero_v32i8:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpxor %xmm0, %xmm0, %xmm0
; VLX-NEXT: vmovntdq %ymm0, (%rdi)
; VLX-NEXT: vzeroupper
@@ -393,27 +393,27 @@ define void @test_zero_v32i8(<32 x i8>* %dst) {
define void @test_arg_f32(float %arg, float* %dst) {
; SSE2-LABEL: test_arg_f32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movss %xmm0, (%rdi)
; SSE2-NEXT: retq
;
; SSE4A-LABEL: test_arg_f32:
-; SSE4A: # BB#0:
+; SSE4A: # %bb.0:
; SSE4A-NEXT: movntss %xmm0, (%rdi)
; SSE4A-NEXT: retq
;
; SSE41-LABEL: test_arg_f32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movss %xmm0, (%rdi)
; SSE41-NEXT: retq
;
; AVX-LABEL: test_arg_f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovss %xmm0, (%rdi)
; AVX-NEXT: retq
;
; VLX-LABEL: test_arg_f32:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vmovss %xmm0, (%rdi)
; VLX-NEXT: retq
store float %arg, float* %dst, align 1, !nontemporal !1
@@ -422,17 +422,17 @@ define void @test_arg_f32(float %arg, float* %dst) {
define void @test_arg_i32(i32 %arg, i32* %dst) {
; SSE-LABEL: test_arg_i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movntil %edi, (%rsi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_arg_i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: movntil %edi, (%rsi)
; AVX-NEXT: retq
;
; VLX-LABEL: test_arg_i32:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: movntil %edi, (%rsi)
; VLX-NEXT: retq
store i32 %arg, i32* %dst, align 1, !nontemporal !1
@@ -441,27 +441,27 @@ define void @test_arg_i32(i32 %arg, i32* %dst) {
define void @test_arg_f64(double %arg, double* %dst) {
; SSE2-LABEL: test_arg_f64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movsd %xmm0, (%rdi)
; SSE2-NEXT: retq
;
; SSE4A-LABEL: test_arg_f64:
-; SSE4A: # BB#0:
+; SSE4A: # %bb.0:
; SSE4A-NEXT: movntsd %xmm0, (%rdi)
; SSE4A-NEXT: retq
;
; SSE41-LABEL: test_arg_f64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movsd %xmm0, (%rdi)
; SSE41-NEXT: retq
;
; AVX-LABEL: test_arg_f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovsd %xmm0, (%rdi)
; AVX-NEXT: retq
;
; VLX-LABEL: test_arg_f64:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vmovsd %xmm0, (%rdi)
; VLX-NEXT: retq
store double %arg, double* %dst, align 1, !nontemporal !1
@@ -470,17 +470,17 @@ define void @test_arg_f64(double %arg, double* %dst) {
define void @test_arg_i64(i64 %arg, i64* %dst) {
; SSE-LABEL: test_arg_i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movntiq %rdi, (%rsi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_arg_i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: movntiq %rdi, (%rsi)
; AVX-NEXT: retq
;
; VLX-LABEL: test_arg_i64:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: movntiq %rdi, (%rsi)
; VLX-NEXT: retq
store i64 %arg, i64* %dst, align 1, !nontemporal !1
@@ -491,31 +491,31 @@ define void @test_arg_i64(i64 %arg, i64* %dst) {
define void @test_extract_f32(<4 x float> %arg, float* %dst) {
; SSE2-LABEL: test_extract_f32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3]
; SSE2-NEXT: movss %xmm0, (%rdi)
; SSE2-NEXT: retq
;
; SSE4A-LABEL: test_extract_f32:
-; SSE4A: # BB#0:
+; SSE4A: # %bb.0:
; SSE4A-NEXT: movshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
; SSE4A-NEXT: movntss %xmm0, (%rdi)
; SSE4A-NEXT: retq
;
; SSE41-LABEL: test_extract_f32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: extractps $1, %xmm0, %eax
; SSE41-NEXT: movntil %eax, (%rdi)
; SSE41-NEXT: retq
;
; AVX-LABEL: test_extract_f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vextractps $1, %xmm0, %eax
; AVX-NEXT: movntil %eax, (%rdi)
; AVX-NEXT: retq
;
; VLX-LABEL: test_extract_f32:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vextractps $1, %xmm0, %eax
; VLX-NEXT: movntil %eax, (%rdi)
; VLX-NEXT: retq
@@ -526,33 +526,33 @@ define void @test_extract_f32(<4 x float> %arg, float* %dst) {
define void @test_extract_i32(<4 x i32> %arg, i32* %dst) {
; SSE2-LABEL: test_extract_i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; SSE2-NEXT: movd %xmm0, %eax
; SSE2-NEXT: movntil %eax, (%rdi)
; SSE2-NEXT: retq
;
; SSE4A-LABEL: test_extract_i32:
-; SSE4A: # BB#0:
+; SSE4A: # %bb.0:
; SSE4A-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; SSE4A-NEXT: movd %xmm0, %eax
; SSE4A-NEXT: movntil %eax, (%rdi)
; SSE4A-NEXT: retq
;
; SSE41-LABEL: test_extract_i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: extractps $1, %xmm0, %eax
; SSE41-NEXT: movntil %eax, (%rdi)
; SSE41-NEXT: retq
;
; AVX-LABEL: test_extract_i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vextractps $1, %xmm0, %eax
; AVX-NEXT: movntil %eax, (%rdi)
; AVX-NEXT: retq
;
; VLX-LABEL: test_extract_i32:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vextractps $1, %xmm0, %eax
; VLX-NEXT: movntil %eax, (%rdi)
; VLX-NEXT: retq
@@ -563,28 +563,28 @@ define void @test_extract_i32(<4 x i32> %arg, i32* %dst) {
define void @test_extract_f64(<2 x double> %arg, double* %dst) {
; SSE2-LABEL: test_extract_f64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movhpd %xmm0, (%rdi)
; SSE2-NEXT: retq
;
; SSE4A-LABEL: test_extract_f64:
-; SSE4A: # BB#0:
+; SSE4A: # %bb.0:
; SSE4A-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSE4A-NEXT: movntsd %xmm0, (%rdi)
; SSE4A-NEXT: retq
;
; SSE41-LABEL: test_extract_f64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movhpd %xmm0, (%rdi)
; SSE41-NEXT: retq
;
; AVX-LABEL: test_extract_f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovhpd %xmm0, (%rdi)
; AVX-NEXT: retq
;
; VLX-LABEL: test_extract_f64:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vmovhpd %xmm0, (%rdi)
; VLX-NEXT: retq
%1 = extractelement <2 x double> %arg, i32 1
@@ -594,33 +594,33 @@ define void @test_extract_f64(<2 x double> %arg, double* %dst) {
define void @test_extract_i64(<2 x i64> %arg, i64* %dst) {
; SSE2-LABEL: test_extract_i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; SSE2-NEXT: movq %xmm0, %rax
; SSE2-NEXT: movntiq %rax, (%rdi)
; SSE2-NEXT: retq
;
; SSE4A-LABEL: test_extract_i64:
-; SSE4A: # BB#0:
+; SSE4A: # %bb.0:
; SSE4A-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; SSE4A-NEXT: movq %xmm0, %rax
; SSE4A-NEXT: movntiq %rax, (%rdi)
; SSE4A-NEXT: retq
;
; SSE41-LABEL: test_extract_i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pextrq $1, %xmm0, %rax
; SSE41-NEXT: movntiq %rax, (%rdi)
; SSE41-NEXT: retq
;
; AVX-LABEL: test_extract_i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpextrq $1, %xmm0, %rax
; AVX-NEXT: movntiq %rax, (%rdi)
; AVX-NEXT: retq
;
; VLX-LABEL: test_extract_i64:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpextrq $1, %xmm0, %rax
; VLX-NEXT: movntiq %rax, (%rdi)
; VLX-NEXT: retq
@@ -633,17 +633,17 @@ define void @test_extract_i64(<2 x i64> %arg, i64* %dst) {
define void @test_arg_v4f32(<4 x float> %arg, <4 x float>* %dst) {
; SSE-LABEL: test_arg_v4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movntps %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_arg_v4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovntps %xmm0, (%rdi)
; AVX-NEXT: retq
;
; VLX-LABEL: test_arg_v4f32:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vmovntps %xmm0, (%rdi)
; VLX-NEXT: retq
store <4 x float> %arg, <4 x float>* %dst, align 16, !nontemporal !1
@@ -652,17 +652,17 @@ define void @test_arg_v4f32(<4 x float> %arg, <4 x float>* %dst) {
define void @test_arg_v4i32(<4 x i32> %arg, <4 x i32>* %dst) {
; SSE-LABEL: test_arg_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movntps %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_arg_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovntps %xmm0, (%rdi)
; AVX-NEXT: retq
;
; VLX-LABEL: test_arg_v4i32:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vmovntps %xmm0, (%rdi)
; VLX-NEXT: retq
store <4 x i32> %arg, <4 x i32>* %dst, align 16, !nontemporal !1
@@ -671,17 +671,17 @@ define void @test_arg_v4i32(<4 x i32> %arg, <4 x i32>* %dst) {
define void @test_arg_v2f64(<2 x double> %arg, <2 x double>* %dst) {
; SSE-LABEL: test_arg_v2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movntps %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_arg_v2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovntps %xmm0, (%rdi)
; AVX-NEXT: retq
;
; VLX-LABEL: test_arg_v2f64:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vmovntps %xmm0, (%rdi)
; VLX-NEXT: retq
store <2 x double> %arg, <2 x double>* %dst, align 16, !nontemporal !1
@@ -690,17 +690,17 @@ define void @test_arg_v2f64(<2 x double> %arg, <2 x double>* %dst) {
define void @test_arg_v2i64(<2 x i64> %arg, <2 x i64>* %dst) {
; SSE-LABEL: test_arg_v2i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movntps %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_arg_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovntps %xmm0, (%rdi)
; AVX-NEXT: retq
;
; VLX-LABEL: test_arg_v2i64:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vmovntps %xmm0, (%rdi)
; VLX-NEXT: retq
store <2 x i64> %arg, <2 x i64>* %dst, align 16, !nontemporal !1
@@ -709,17 +709,17 @@ define void @test_arg_v2i64(<2 x i64> %arg, <2 x i64>* %dst) {
define void @test_arg_v8i16(<8 x i16> %arg, <8 x i16>* %dst) {
; SSE-LABEL: test_arg_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movntps %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_arg_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovntps %xmm0, (%rdi)
; AVX-NEXT: retq
;
; VLX-LABEL: test_arg_v8i16:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vmovntps %xmm0, (%rdi)
; VLX-NEXT: retq
store <8 x i16> %arg, <8 x i16>* %dst, align 16, !nontemporal !1
@@ -728,17 +728,17 @@ define void @test_arg_v8i16(<8 x i16> %arg, <8 x i16>* %dst) {
define void @test_arg_v16i8(<16 x i8> %arg, <16 x i8>* %dst) {
; SSE-LABEL: test_arg_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movntps %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_arg_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovntps %xmm0, (%rdi)
; AVX-NEXT: retq
;
; VLX-LABEL: test_arg_v16i8:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vmovntps %xmm0, (%rdi)
; VLX-NEXT: retq
store <16 x i8> %arg, <16 x i8>* %dst, align 16, !nontemporal !1
@@ -749,19 +749,19 @@ define void @test_arg_v16i8(<16 x i8> %arg, <16 x i8>* %dst) {
define void @test_arg_v8f32(<8 x float> %arg, <8 x float>* %dst) {
; SSE-LABEL: test_arg_v8f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movntps %xmm1, 16(%rdi)
; SSE-NEXT: movntps %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_arg_v8f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovntps %ymm0, (%rdi)
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; VLX-LABEL: test_arg_v8f32:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vmovntps %ymm0, (%rdi)
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
@@ -771,19 +771,19 @@ define void @test_arg_v8f32(<8 x float> %arg, <8 x float>* %dst) {
define void @test_arg_v8i32(<8 x i32> %arg, <8 x i32>* %dst) {
; SSE-LABEL: test_arg_v8i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movntps %xmm1, 16(%rdi)
; SSE-NEXT: movntps %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_arg_v8i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovntps %ymm0, (%rdi)
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; VLX-LABEL: test_arg_v8i32:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vmovntps %ymm0, (%rdi)
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
@@ -793,19 +793,19 @@ define void @test_arg_v8i32(<8 x i32> %arg, <8 x i32>* %dst) {
define void @test_arg_v4f64(<4 x double> %arg, <4 x double>* %dst) {
; SSE-LABEL: test_arg_v4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movntps %xmm1, 16(%rdi)
; SSE-NEXT: movntps %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_arg_v4f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovntps %ymm0, (%rdi)
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; VLX-LABEL: test_arg_v4f64:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vmovntps %ymm0, (%rdi)
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
@@ -815,19 +815,19 @@ define void @test_arg_v4f64(<4 x double> %arg, <4 x double>* %dst) {
define void @test_arg_v4i64(<4 x i64> %arg, <4 x i64>* %dst) {
; SSE-LABEL: test_arg_v4i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movntps %xmm1, 16(%rdi)
; SSE-NEXT: movntps %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_arg_v4i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovntps %ymm0, (%rdi)
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; VLX-LABEL: test_arg_v4i64:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vmovntps %ymm0, (%rdi)
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
@@ -837,19 +837,19 @@ define void @test_arg_v4i64(<4 x i64> %arg, <4 x i64>* %dst) {
define void @test_arg_v16i16(<16 x i16> %arg, <16 x i16>* %dst) {
; SSE-LABEL: test_arg_v16i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movntps %xmm1, 16(%rdi)
; SSE-NEXT: movntps %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_arg_v16i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovntps %ymm0, (%rdi)
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; VLX-LABEL: test_arg_v16i16:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vmovntps %ymm0, (%rdi)
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
@@ -859,19 +859,19 @@ define void @test_arg_v16i16(<16 x i16> %arg, <16 x i16>* %dst) {
define void @test_arg_v32i8(<32 x i8> %arg, <32 x i8>* %dst) {
; SSE-LABEL: test_arg_v32i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movntps %xmm1, 16(%rdi)
; SSE-NEXT: movntps %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_arg_v32i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovntps %ymm0, (%rdi)
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; VLX-LABEL: test_arg_v32i8:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vmovntps %ymm0, (%rdi)
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
@@ -885,19 +885,19 @@ define void @test_arg_v32i8(<32 x i8> %arg, <32 x i8>* %dst) {
define void @test_op_v4f32(<4 x float> %a, <4 x float> %b, <4 x float>* %dst) {
; SSE-LABEL: test_op_v4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addps %xmm1, %xmm0
; SSE-NEXT: movntps %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_op_v4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovntps %xmm0, (%rdi)
; AVX-NEXT: retq
;
; VLX-LABEL: test_op_v4f32:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vaddps %xmm1, %xmm0, %xmm0
; VLX-NEXT: vmovntps %xmm0, (%rdi)
; VLX-NEXT: retq
@@ -908,19 +908,19 @@ define void @test_op_v4f32(<4 x float> %a, <4 x float> %b, <4 x float>* %dst) {
define void @test_op_v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32>* %dst) {
; SSE-LABEL: test_op_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: paddd %xmm1, %xmm0
; SSE-NEXT: movntdq %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_op_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovntdq %xmm0, (%rdi)
; AVX-NEXT: retq
;
; VLX-LABEL: test_op_v4i32:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; VLX-NEXT: vmovntdq %xmm0, (%rdi)
; VLX-NEXT: retq
@@ -931,19 +931,19 @@ define void @test_op_v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32>* %dst) {
define void @test_op_v2f64(<2 x double> %a, <2 x double> %b, <2 x double>* %dst) {
; SSE-LABEL: test_op_v2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addpd %xmm1, %xmm0
; SSE-NEXT: movntpd %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_op_v2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovntpd %xmm0, (%rdi)
; AVX-NEXT: retq
;
; VLX-LABEL: test_op_v2f64:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; VLX-NEXT: vmovntpd %xmm0, (%rdi)
; VLX-NEXT: retq
@@ -954,19 +954,19 @@ define void @test_op_v2f64(<2 x double> %a, <2 x double> %b, <2 x double>* %dst)
define void @test_op_v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64>* %dst) {
; SSE-LABEL: test_op_v2i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: paddq %xmm1, %xmm0
; SSE-NEXT: movntdq %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_op_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovntdq %xmm0, (%rdi)
; AVX-NEXT: retq
;
; VLX-LABEL: test_op_v2i64:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; VLX-NEXT: vmovntdq %xmm0, (%rdi)
; VLX-NEXT: retq
@@ -977,19 +977,19 @@ define void @test_op_v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64>* %dst) {
define void @test_op_v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16>* %dst) {
; SSE-LABEL: test_op_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: paddw %xmm1, %xmm0
; SSE-NEXT: movntdq %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_op_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovntdq %xmm0, (%rdi)
; AVX-NEXT: retq
;
; VLX-LABEL: test_op_v8i16:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; VLX-NEXT: vmovntdq %xmm0, (%rdi)
; VLX-NEXT: retq
@@ -1000,19 +1000,19 @@ define void @test_op_v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16>* %dst) {
define void @test_op_v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8>* %dst) {
; SSE-LABEL: test_op_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: paddb %xmm1, %xmm0
; SSE-NEXT: movntdq %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: test_op_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovntdq %xmm0, (%rdi)
; AVX-NEXT: retq
;
; VLX-LABEL: test_op_v16i8:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; VLX-NEXT: vmovntdq %xmm0, (%rdi)
; VLX-NEXT: retq
@@ -1025,7 +1025,7 @@ define void @test_op_v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8>* %dst) {
define void @test_op_v8f32(<8 x float> %a, <8 x float> %b, <8 x float>* %dst) {
; SSE-LABEL: test_op_v8f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addps %xmm2, %xmm0
; SSE-NEXT: addps %xmm3, %xmm1
; SSE-NEXT: movntps %xmm1, 16(%rdi)
@@ -1033,14 +1033,14 @@ define void @test_op_v8f32(<8 x float> %a, <8 x float> %b, <8 x float>* %dst) {
; SSE-NEXT: retq
;
; AVX-LABEL: test_op_v8f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddps %ymm1, %ymm0, %ymm0
; AVX-NEXT: vmovntps %ymm0, (%rdi)
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; VLX-LABEL: test_op_v8f32:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vaddps %ymm1, %ymm0, %ymm0
; VLX-NEXT: vmovntps %ymm0, (%rdi)
; VLX-NEXT: vzeroupper
@@ -1052,7 +1052,7 @@ define void @test_op_v8f32(<8 x float> %a, <8 x float> %b, <8 x float>* %dst) {
define void @test_op_v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32>* %dst) {
; SSE-LABEL: test_op_v8i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: paddd %xmm2, %xmm0
; SSE-NEXT: paddd %xmm3, %xmm1
; SSE-NEXT: movntdq %xmm1, 16(%rdi)
@@ -1060,7 +1060,7 @@ define void @test_op_v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32>* %dst) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test_op_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpaddd %xmm2, %xmm3, %xmm2
@@ -1071,14 +1071,14 @@ define void @test_op_v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32>* %dst) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_op_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vmovntdq %ymm0, (%rdi)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; VLX-LABEL: test_op_v8i32:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; VLX-NEXT: vmovntdq %ymm0, (%rdi)
; VLX-NEXT: vzeroupper
@@ -1090,7 +1090,7 @@ define void @test_op_v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32>* %dst) {
define void @test_op_v4f64(<4 x double> %a, <4 x double> %b, <4 x double>* %dst) {
; SSE-LABEL: test_op_v4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addpd %xmm2, %xmm0
; SSE-NEXT: addpd %xmm3, %xmm1
; SSE-NEXT: movntpd %xmm1, 16(%rdi)
@@ -1098,14 +1098,14 @@ define void @test_op_v4f64(<4 x double> %a, <4 x double> %b, <4 x double>* %dst)
; SSE-NEXT: retq
;
; AVX-LABEL: test_op_v4f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; AVX-NEXT: vmovntpd %ymm0, (%rdi)
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; VLX-LABEL: test_op_v4f64:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; VLX-NEXT: vmovntpd %ymm0, (%rdi)
; VLX-NEXT: vzeroupper
@@ -1117,7 +1117,7 @@ define void @test_op_v4f64(<4 x double> %a, <4 x double> %b, <4 x double>* %dst)
define void @test_op_v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64>* %dst) {
; SSE-LABEL: test_op_v4i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: paddq %xmm2, %xmm0
; SSE-NEXT: paddq %xmm3, %xmm1
; SSE-NEXT: movntdq %xmm1, 16(%rdi)
@@ -1125,7 +1125,7 @@ define void @test_op_v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64>* %dst) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test_op_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpaddq %xmm2, %xmm3, %xmm2
@@ -1136,14 +1136,14 @@ define void @test_op_v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64>* %dst) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_op_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vmovntdq %ymm0, (%rdi)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; VLX-LABEL: test_op_v4i64:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; VLX-NEXT: vmovntdq %ymm0, (%rdi)
; VLX-NEXT: vzeroupper
@@ -1155,7 +1155,7 @@ define void @test_op_v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64>* %dst) {
define void @test_op_v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16>* %dst) {
; SSE-LABEL: test_op_v16i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: paddw %xmm2, %xmm0
; SSE-NEXT: paddw %xmm3, %xmm1
; SSE-NEXT: movntdq %xmm1, 16(%rdi)
@@ -1163,7 +1163,7 @@ define void @test_op_v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16>* %dst) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test_op_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpaddw %xmm2, %xmm3, %xmm2
@@ -1174,14 +1174,14 @@ define void @test_op_v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16>* %dst) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_op_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vmovntdq %ymm0, (%rdi)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; VLX-LABEL: test_op_v16i16:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; VLX-NEXT: vmovntdq %ymm0, (%rdi)
; VLX-NEXT: vzeroupper
@@ -1193,7 +1193,7 @@ define void @test_op_v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16>* %dst) {
define void @test_op_v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8>* %dst) {
; SSE-LABEL: test_op_v32i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: paddb %xmm2, %xmm0
; SSE-NEXT: paddb %xmm3, %xmm1
; SSE-NEXT: movntdq %xmm1, 16(%rdi)
@@ -1201,7 +1201,7 @@ define void @test_op_v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8>* %dst) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test_op_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpaddb %xmm2, %xmm3, %xmm2
@@ -1212,14 +1212,14 @@ define void @test_op_v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8>* %dst) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_op_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vmovntdq %ymm0, (%rdi)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; VLX-LABEL: test_op_v32i8:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; VLX-NEXT: vmovntdq %ymm0, (%rdi)
; VLX-NEXT: vzeroupper
@@ -1235,7 +1235,7 @@ define void @test_op_v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8>* %dst) {
; probably always worth even some 20 instruction scalarization.
define void @test_unaligned_v8f32(<8 x float> %a, <8 x float> %b, <8 x float>* %dst) {
; SSE-LABEL: test_unaligned_v8f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addps %xmm2, %xmm0
; SSE-NEXT: addps %xmm3, %xmm1
; SSE-NEXT: movntps %xmm1, 16(%rdi)
@@ -1243,14 +1243,14 @@ define void @test_unaligned_v8f32(<8 x float> %a, <8 x float> %b, <8 x float>* %
; SSE-NEXT: retq
;
; AVX-LABEL: test_unaligned_v8f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddps %ymm1, %ymm0, %ymm0
; AVX-NEXT: vmovups %ymm0, (%rdi)
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; VLX-LABEL: test_unaligned_v8f32:
-; VLX: # BB#0:
+; VLX: # %bb.0:
; VLX-NEXT: vaddps %ymm1, %ymm0, %ymm0
; VLX-NEXT: vmovups %ymm0, (%rdi)
; VLX-NEXT: vzeroupper
diff --git a/test/CodeGen/X86/nontemporal-loads.ll b/test/CodeGen/X86/nontemporal-loads.ll
index 1687df5446b..308395d365c 100644
--- a/test/CodeGen/X86/nontemporal-loads.ll
+++ b/test/CodeGen/X86/nontemporal-loads.ll
@@ -9,22 +9,22 @@
define <4 x float> @test_v4f32(<4 x float>* %src) {
; SSE2-LABEL: test_v4f32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps (%rdi), %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v4f32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movntdqa (%rdi), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: test_v4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovntdqa (%rdi), %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v4f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovntdqa (%rdi), %xmm0
; AVX512-NEXT: retq
%1 = load <4 x float>, <4 x float>* %src, align 16, !nontemporal !1
@@ -33,22 +33,22 @@ define <4 x float> @test_v4f32(<4 x float>* %src) {
define <4 x i32> @test_v4i32(<4 x i32>* %src) {
; SSE2-LABEL: test_v4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps (%rdi), %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movntdqa (%rdi), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: test_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovntdqa (%rdi), %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v4i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovntdqa (%rdi), %xmm0
; AVX512-NEXT: retq
%1 = load <4 x i32>, <4 x i32>* %src, align 16, !nontemporal !1
@@ -57,22 +57,22 @@ define <4 x i32> @test_v4i32(<4 x i32>* %src) {
define <2 x double> @test_v2f64(<2 x double>* %src) {
; SSE2-LABEL: test_v2f64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps (%rdi), %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v2f64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movntdqa (%rdi), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: test_v2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovntdqa (%rdi), %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v2f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovntdqa (%rdi), %xmm0
; AVX512-NEXT: retq
%1 = load <2 x double>, <2 x double>* %src, align 16, !nontemporal !1
@@ -81,22 +81,22 @@ define <2 x double> @test_v2f64(<2 x double>* %src) {
define <2 x i64> @test_v2i64(<2 x i64>* %src) {
; SSE2-LABEL: test_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps (%rdi), %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movntdqa (%rdi), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: test_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovntdqa (%rdi), %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v2i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovntdqa (%rdi), %xmm0
; AVX512-NEXT: retq
%1 = load <2 x i64>, <2 x i64>* %src, align 16, !nontemporal !1
@@ -105,22 +105,22 @@ define <2 x i64> @test_v2i64(<2 x i64>* %src) {
define <8 x i16> @test_v8i16(<8 x i16>* %src) {
; SSE2-LABEL: test_v8i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps (%rdi), %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v8i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movntdqa (%rdi), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: test_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovntdqa (%rdi), %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v8i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovntdqa (%rdi), %xmm0
; AVX512-NEXT: retq
%1 = load <8 x i16>, <8 x i16>* %src, align 16, !nontemporal !1
@@ -129,22 +129,22 @@ define <8 x i16> @test_v8i16(<8 x i16>* %src) {
define <16 x i8> @test_v16i8(<16 x i8>* %src) {
; SSE2-LABEL: test_v16i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps (%rdi), %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v16i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movntdqa (%rdi), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: test_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovntdqa (%rdi), %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovntdqa (%rdi), %xmm0
; AVX512-NEXT: retq
%1 = load <16 x i8>, <16 x i8>* %src, align 16, !nontemporal !1
@@ -155,31 +155,31 @@ define <16 x i8> @test_v16i8(<16 x i8>* %src) {
define <8 x float> @test_v8f32(<8 x float>* %src) {
; SSE2-LABEL: test_v8f32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps (%rdi), %xmm0
; SSE2-NEXT: movaps 16(%rdi), %xmm1
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v8f32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movntdqa (%rdi), %xmm0
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_v8f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v8f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v8f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovntdqa (%rdi), %ymm0
; AVX512-NEXT: retq
%1 = load <8 x float>, <8 x float>* %src, align 32, !nontemporal !1
@@ -188,31 +188,31 @@ define <8 x float> @test_v8f32(<8 x float>* %src) {
define <8 x i32> @test_v8i32(<8 x i32>* %src) {
; SSE2-LABEL: test_v8i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps (%rdi), %xmm0
; SSE2-NEXT: movaps 16(%rdi), %xmm1
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v8i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movntdqa (%rdi), %xmm0
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v8i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovntdqa (%rdi), %ymm0
; AVX512-NEXT: retq
%1 = load <8 x i32>, <8 x i32>* %src, align 32, !nontemporal !1
@@ -221,31 +221,31 @@ define <8 x i32> @test_v8i32(<8 x i32>* %src) {
define <4 x double> @test_v4f64(<4 x double>* %src) {
; SSE2-LABEL: test_v4f64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps (%rdi), %xmm0
; SSE2-NEXT: movaps 16(%rdi), %xmm1
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v4f64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movntdqa (%rdi), %xmm0
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_v4f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v4f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v4f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovntdqa (%rdi), %ymm0
; AVX512-NEXT: retq
%1 = load <4 x double>, <4 x double>* %src, align 32, !nontemporal !1
@@ -254,31 +254,31 @@ define <4 x double> @test_v4f64(<4 x double>* %src) {
define <4 x i64> @test_v4i64(<4 x i64>* %src) {
; SSE2-LABEL: test_v4i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps (%rdi), %xmm0
; SSE2-NEXT: movaps 16(%rdi), %xmm1
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v4i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movntdqa (%rdi), %xmm0
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v4i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovntdqa (%rdi), %ymm0
; AVX512-NEXT: retq
%1 = load <4 x i64>, <4 x i64>* %src, align 32, !nontemporal !1
@@ -287,31 +287,31 @@ define <4 x i64> @test_v4i64(<4 x i64>* %src) {
define <16 x i16> @test_v16i16(<16 x i16>* %src) {
; SSE2-LABEL: test_v16i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps (%rdi), %xmm0
; SSE2-NEXT: movaps 16(%rdi), %xmm1
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v16i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movntdqa (%rdi), %xmm0
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v16i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovntdqa (%rdi), %ymm0
; AVX512-NEXT: retq
%1 = load <16 x i16>, <16 x i16>* %src, align 32, !nontemporal !1
@@ -320,31 +320,31 @@ define <16 x i16> @test_v16i16(<16 x i16>* %src) {
define <32 x i8> @test_v32i8(<32 x i8>* %src) {
; SSE2-LABEL: test_v32i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps (%rdi), %xmm0
; SSE2-NEXT: movaps 16(%rdi), %xmm1
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v32i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movntdqa (%rdi), %xmm0
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v32i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovntdqa (%rdi), %ymm0
; AVX512-NEXT: retq
%1 = load <32 x i8>, <32 x i8>* %src, align 32, !nontemporal !1
@@ -355,7 +355,7 @@ define <32 x i8> @test_v32i8(<32 x i8>* %src) {
define <16 x float> @test_v16f32(<16 x float>* %src) {
; SSE2-LABEL: test_v16f32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps (%rdi), %xmm0
; SSE2-NEXT: movaps 16(%rdi), %xmm1
; SSE2-NEXT: movaps 32(%rdi), %xmm2
@@ -363,7 +363,7 @@ define <16 x float> @test_v16f32(<16 x float>* %src) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v16f32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movntdqa (%rdi), %xmm0
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
; SSE41-NEXT: movntdqa 32(%rdi), %xmm2
@@ -371,7 +371,7 @@ define <16 x float> @test_v16f32(<16 x float>* %src) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_v16f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
@@ -381,13 +381,13 @@ define <16 x float> @test_v16f32(<16 x float>* %src) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v16f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v16f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovntdqa (%rdi), %zmm0
; AVX512-NEXT: retq
%1 = load <16 x float>, <16 x float>* %src, align 64, !nontemporal !1
@@ -396,7 +396,7 @@ define <16 x float> @test_v16f32(<16 x float>* %src) {
define <16 x i32> @test_v16i32(<16 x i32>* %src) {
; SSE2-LABEL: test_v16i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps (%rdi), %xmm0
; SSE2-NEXT: movaps 16(%rdi), %xmm1
; SSE2-NEXT: movaps 32(%rdi), %xmm2
@@ -404,7 +404,7 @@ define <16 x i32> @test_v16i32(<16 x i32>* %src) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v16i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movntdqa (%rdi), %xmm0
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
; SSE41-NEXT: movntdqa 32(%rdi), %xmm2
@@ -412,7 +412,7 @@ define <16 x i32> @test_v16i32(<16 x i32>* %src) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_v16i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
@@ -422,13 +422,13 @@ define <16 x i32> @test_v16i32(<16 x i32>* %src) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v16i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v16i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovntdqa (%rdi), %zmm0
; AVX512-NEXT: retq
%1 = load <16 x i32>, <16 x i32>* %src, align 64, !nontemporal !1
@@ -437,7 +437,7 @@ define <16 x i32> @test_v16i32(<16 x i32>* %src) {
define <8 x double> @test_v8f64(<8 x double>* %src) {
; SSE2-LABEL: test_v8f64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps (%rdi), %xmm0
; SSE2-NEXT: movaps 16(%rdi), %xmm1
; SSE2-NEXT: movaps 32(%rdi), %xmm2
@@ -445,7 +445,7 @@ define <8 x double> @test_v8f64(<8 x double>* %src) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v8f64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movntdqa (%rdi), %xmm0
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
; SSE41-NEXT: movntdqa 32(%rdi), %xmm2
@@ -453,7 +453,7 @@ define <8 x double> @test_v8f64(<8 x double>* %src) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_v8f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
@@ -463,13 +463,13 @@ define <8 x double> @test_v8f64(<8 x double>* %src) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v8f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v8f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovntdqa (%rdi), %zmm0
; AVX512-NEXT: retq
%1 = load <8 x double>, <8 x double>* %src, align 64, !nontemporal !1
@@ -478,7 +478,7 @@ define <8 x double> @test_v8f64(<8 x double>* %src) {
define <8 x i64> @test_v8i64(<8 x i64>* %src) {
; SSE2-LABEL: test_v8i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps (%rdi), %xmm0
; SSE2-NEXT: movaps 16(%rdi), %xmm1
; SSE2-NEXT: movaps 32(%rdi), %xmm2
@@ -486,7 +486,7 @@ define <8 x i64> @test_v8i64(<8 x i64>* %src) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v8i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movntdqa (%rdi), %xmm0
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
; SSE41-NEXT: movntdqa 32(%rdi), %xmm2
@@ -494,7 +494,7 @@ define <8 x i64> @test_v8i64(<8 x i64>* %src) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_v8i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
@@ -504,13 +504,13 @@ define <8 x i64> @test_v8i64(<8 x i64>* %src) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v8i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v8i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovntdqa (%rdi), %zmm0
; AVX512-NEXT: retq
%1 = load <8 x i64>, <8 x i64>* %src, align 64, !nontemporal !1
@@ -519,7 +519,7 @@ define <8 x i64> @test_v8i64(<8 x i64>* %src) {
define <32 x i16> @test_v32i16(<32 x i16>* %src) {
; SSE2-LABEL: test_v32i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps (%rdi), %xmm0
; SSE2-NEXT: movaps 16(%rdi), %xmm1
; SSE2-NEXT: movaps 32(%rdi), %xmm2
@@ -527,7 +527,7 @@ define <32 x i16> @test_v32i16(<32 x i16>* %src) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v32i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movntdqa (%rdi), %xmm0
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
; SSE41-NEXT: movntdqa 32(%rdi), %xmm2
@@ -535,7 +535,7 @@ define <32 x i16> @test_v32i16(<32 x i16>* %src) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_v32i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
@@ -545,24 +545,24 @@ define <32 x i16> @test_v32i16(<32 x i16>* %src) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v32i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm1
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_v32i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovntdqa (%rdi), %ymm0
; AVX512F-NEXT: vmovntdqa 32(%rdi), %ymm1
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_v32i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovntdqa (%rdi), %zmm0
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: test_v32i16:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovntdqa (%rdi), %ymm0
; AVX512VL-NEXT: vmovntdqa 32(%rdi), %ymm1
; AVX512VL-NEXT: retq
@@ -572,7 +572,7 @@ define <32 x i16> @test_v32i16(<32 x i16>* %src) {
define <64 x i8> @test_v64i8(<64 x i8>* %src) {
; SSE2-LABEL: test_v64i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps (%rdi), %xmm0
; SSE2-NEXT: movaps 16(%rdi), %xmm1
; SSE2-NEXT: movaps 32(%rdi), %xmm2
@@ -580,7 +580,7 @@ define <64 x i8> @test_v64i8(<64 x i8>* %src) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v64i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movntdqa (%rdi), %xmm0
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
; SSE41-NEXT: movntdqa 32(%rdi), %xmm2
@@ -588,7 +588,7 @@ define <64 x i8> @test_v64i8(<64 x i8>* %src) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_v64i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
@@ -598,24 +598,24 @@ define <64 x i8> @test_v64i8(<64 x i8>* %src) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v64i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm1
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_v64i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovntdqa (%rdi), %ymm0
; AVX512F-NEXT: vmovntdqa 32(%rdi), %ymm1
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_v64i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovntdqa (%rdi), %zmm0
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: test_v64i8:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovntdqa (%rdi), %ymm0
; AVX512VL-NEXT: vmovntdqa 32(%rdi), %ymm1
; AVX512VL-NEXT: retq
@@ -628,24 +628,24 @@ define <64 x i8> @test_v64i8(<64 x i8>* %src) {
define <4 x float> @test_arg_v4f32(<4 x float> %arg, <4 x float>* %src) {
; SSE2-LABEL: test_arg_v4f32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: addps (%rdi), %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_arg_v4f32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movntdqa (%rdi), %xmm1
; SSE41-NEXT: addps %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: test_arg_v4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovntdqa (%rdi), %xmm1
; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: test_arg_v4f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovntdqa (%rdi), %xmm1
; AVX512-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
@@ -656,24 +656,24 @@ define <4 x float> @test_arg_v4f32(<4 x float> %arg, <4 x float>* %src) {
define <4 x i32> @test_arg_v4i32(<4 x i32> %arg, <4 x i32>* %src) {
; SSE2-LABEL: test_arg_v4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: paddd (%rdi), %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_arg_v4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movntdqa (%rdi), %xmm1
; SSE41-NEXT: paddd %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: test_arg_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovntdqa (%rdi), %xmm1
; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: test_arg_v4i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovntdqa (%rdi), %xmm1
; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
@@ -684,24 +684,24 @@ define <4 x i32> @test_arg_v4i32(<4 x i32> %arg, <4 x i32>* %src) {
define <2 x double> @test_arg_v2f64(<2 x double> %arg, <2 x double>* %src) {
; SSE2-LABEL: test_arg_v2f64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: addpd (%rdi), %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_arg_v2f64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movntdqa (%rdi), %xmm1
; SSE41-NEXT: addpd %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: test_arg_v2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovntdqa (%rdi), %xmm1
; AVX-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: test_arg_v2f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovntdqa (%rdi), %xmm1
; AVX512-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
@@ -712,24 +712,24 @@ define <2 x double> @test_arg_v2f64(<2 x double> %arg, <2 x double>* %src) {
define <2 x i64> @test_arg_v2i64(<2 x i64> %arg, <2 x i64>* %src) {
; SSE2-LABEL: test_arg_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: paddq (%rdi), %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_arg_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movntdqa (%rdi), %xmm1
; SSE41-NEXT: paddq %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: test_arg_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovntdqa (%rdi), %xmm1
; AVX-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: test_arg_v2i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovntdqa (%rdi), %xmm1
; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
@@ -740,24 +740,24 @@ define <2 x i64> @test_arg_v2i64(<2 x i64> %arg, <2 x i64>* %src) {
define <8 x i16> @test_arg_v8i16(<8 x i16> %arg, <8 x i16>* %src) {
; SSE2-LABEL: test_arg_v8i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: paddw (%rdi), %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_arg_v8i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movntdqa (%rdi), %xmm1
; SSE41-NEXT: paddw %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: test_arg_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovntdqa (%rdi), %xmm1
; AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: test_arg_v8i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovntdqa (%rdi), %xmm1
; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
@@ -768,24 +768,24 @@ define <8 x i16> @test_arg_v8i16(<8 x i16> %arg, <8 x i16>* %src) {
define <16 x i8> @test_arg_v16i8(<16 x i8> %arg, <16 x i8>* %src) {
; SSE2-LABEL: test_arg_v16i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: paddb (%rdi), %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_arg_v16i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movntdqa (%rdi), %xmm1
; SSE41-NEXT: paddb %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: test_arg_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovntdqa (%rdi), %xmm1
; AVX-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: test_arg_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovntdqa (%rdi), %xmm1
; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
@@ -798,13 +798,13 @@ define <16 x i8> @test_arg_v16i8(<16 x i8> %arg, <16 x i8>* %src) {
define <8 x float> @test_arg_v8f32(<8 x float> %arg, <8 x float>* %src) {
; SSE2-LABEL: test_arg_v8f32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: addps (%rdi), %xmm0
; SSE2-NEXT: addps 16(%rdi), %xmm1
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_arg_v8f32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movntdqa 16(%rdi), %xmm2
; SSE41-NEXT: movntdqa (%rdi), %xmm3
; SSE41-NEXT: addps %xmm3, %xmm0
@@ -812,7 +812,7 @@ define <8 x float> @test_arg_v8f32(<8 x float> %arg, <8 x float>* %src) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_arg_v8f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovntdqa (%rdi), %xmm1
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm2
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
@@ -820,13 +820,13 @@ define <8 x float> @test_arg_v8f32(<8 x float> %arg, <8 x float>* %src) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_arg_v8f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovntdqa (%rdi), %ymm1
; AVX2-NEXT: vaddps %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_arg_v8f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovntdqa (%rdi), %ymm1
; AVX512-NEXT: vaddps %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
@@ -837,13 +837,13 @@ define <8 x float> @test_arg_v8f32(<8 x float> %arg, <8 x float>* %src) {
define <8 x i32> @test_arg_v8i32(<8 x i32> %arg, <8 x i32>* %src) {
; SSE2-LABEL: test_arg_v8i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: paddd (%rdi), %xmm0
; SSE2-NEXT: paddd 16(%rdi), %xmm1
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_arg_v8i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movntdqa 16(%rdi), %xmm2
; SSE41-NEXT: movntdqa (%rdi), %xmm3
; SSE41-NEXT: paddd %xmm3, %xmm0
@@ -851,7 +851,7 @@ define <8 x i32> @test_arg_v8i32(<8 x i32> %arg, <8 x i32>* %src) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_arg_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovntdqa (%rdi), %xmm1
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
@@ -861,13 +861,13 @@ define <8 x i32> @test_arg_v8i32(<8 x i32> %arg, <8 x i32>* %src) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_arg_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovntdqa (%rdi), %ymm1
; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_arg_v8i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovntdqa (%rdi), %ymm1
; AVX512-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
@@ -878,13 +878,13 @@ define <8 x i32> @test_arg_v8i32(<8 x i32> %arg, <8 x i32>* %src) {
define <4 x double> @test_arg_v4f64(<4 x double> %arg, <4 x double>* %src) {
; SSE2-LABEL: test_arg_v4f64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: addpd (%rdi), %xmm0
; SSE2-NEXT: addpd 16(%rdi), %xmm1
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_arg_v4f64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movntdqa 16(%rdi), %xmm2
; SSE41-NEXT: movntdqa (%rdi), %xmm3
; SSE41-NEXT: addpd %xmm3, %xmm0
@@ -892,7 +892,7 @@ define <4 x double> @test_arg_v4f64(<4 x double> %arg, <4 x double>* %src) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_arg_v4f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovntdqa (%rdi), %xmm1
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm2
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
@@ -900,13 +900,13 @@ define <4 x double> @test_arg_v4f64(<4 x double> %arg, <4 x double>* %src) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_arg_v4f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovntdqa (%rdi), %ymm1
; AVX2-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_arg_v4f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovntdqa (%rdi), %ymm1
; AVX512-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
@@ -917,13 +917,13 @@ define <4 x double> @test_arg_v4f64(<4 x double> %arg, <4 x double>* %src) {
define <4 x i64> @test_arg_v4i64(<4 x i64> %arg, <4 x i64>* %src) {
; SSE2-LABEL: test_arg_v4i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: paddq (%rdi), %xmm0
; SSE2-NEXT: paddq 16(%rdi), %xmm1
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_arg_v4i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movntdqa 16(%rdi), %xmm2
; SSE41-NEXT: movntdqa (%rdi), %xmm3
; SSE41-NEXT: paddq %xmm3, %xmm0
@@ -931,7 +931,7 @@ define <4 x i64> @test_arg_v4i64(<4 x i64> %arg, <4 x i64>* %src) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_arg_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovntdqa (%rdi), %xmm1
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
@@ -941,13 +941,13 @@ define <4 x i64> @test_arg_v4i64(<4 x i64> %arg, <4 x i64>* %src) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_arg_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovntdqa (%rdi), %ymm1
; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_arg_v4i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovntdqa (%rdi), %ymm1
; AVX512-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
@@ -958,13 +958,13 @@ define <4 x i64> @test_arg_v4i64(<4 x i64> %arg, <4 x i64>* %src) {
define <16 x i16> @test_arg_v16i16(<16 x i16> %arg, <16 x i16>* %src) {
; SSE2-LABEL: test_arg_v16i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: paddw (%rdi), %xmm0
; SSE2-NEXT: paddw 16(%rdi), %xmm1
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_arg_v16i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movntdqa 16(%rdi), %xmm2
; SSE41-NEXT: movntdqa (%rdi), %xmm3
; SSE41-NEXT: paddw %xmm3, %xmm0
@@ -972,7 +972,7 @@ define <16 x i16> @test_arg_v16i16(<16 x i16> %arg, <16 x i16>* %src) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_arg_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovntdqa (%rdi), %xmm1
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
@@ -982,13 +982,13 @@ define <16 x i16> @test_arg_v16i16(<16 x i16> %arg, <16 x i16>* %src) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_arg_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovntdqa (%rdi), %ymm1
; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_arg_v16i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovntdqa (%rdi), %ymm1
; AVX512-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
@@ -999,13 +999,13 @@ define <16 x i16> @test_arg_v16i16(<16 x i16> %arg, <16 x i16>* %src) {
define <32 x i8> @test_arg_v32i8(<32 x i8> %arg, <32 x i8>* %src) {
; SSE2-LABEL: test_arg_v32i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: paddb (%rdi), %xmm0
; SSE2-NEXT: paddb 16(%rdi), %xmm1
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_arg_v32i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movntdqa 16(%rdi), %xmm2
; SSE41-NEXT: movntdqa (%rdi), %xmm3
; SSE41-NEXT: paddb %xmm3, %xmm0
@@ -1013,7 +1013,7 @@ define <32 x i8> @test_arg_v32i8(<32 x i8> %arg, <32 x i8>* %src) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_arg_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovntdqa (%rdi), %xmm1
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
@@ -1023,13 +1023,13 @@ define <32 x i8> @test_arg_v32i8(<32 x i8> %arg, <32 x i8>* %src) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_arg_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovntdqa (%rdi), %ymm1
; AVX2-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_arg_v32i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovntdqa (%rdi), %ymm1
; AVX512-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
@@ -1042,7 +1042,7 @@ define <32 x i8> @test_arg_v32i8(<32 x i8> %arg, <32 x i8>* %src) {
define <16 x float> @test_arg_v16f32(<16 x float> %arg, <16 x float>* %src) {
; SSE2-LABEL: test_arg_v16f32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: addps (%rdi), %xmm0
; SSE2-NEXT: addps 16(%rdi), %xmm1
; SSE2-NEXT: addps 32(%rdi), %xmm2
@@ -1050,7 +1050,7 @@ define <16 x float> @test_arg_v16f32(<16 x float> %arg, <16 x float>* %src) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_arg_v16f32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movntdqa 48(%rdi), %xmm4
; SSE41-NEXT: movntdqa 32(%rdi), %xmm5
; SSE41-NEXT: movntdqa 16(%rdi), %xmm6
@@ -1062,7 +1062,7 @@ define <16 x float> @test_arg_v16f32(<16 x float> %arg, <16 x float>* %src) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_arg_v16f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2
; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm3
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
@@ -1074,7 +1074,7 @@ define <16 x float> @test_arg_v16f32(<16 x float> %arg, <16 x float>* %src) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_arg_v16f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm2
; AVX2-NEXT: vmovntdqa (%rdi), %ymm3
; AVX2-NEXT: vaddps %ymm3, %ymm0, %ymm0
@@ -1082,7 +1082,7 @@ define <16 x float> @test_arg_v16f32(<16 x float> %arg, <16 x float>* %src) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_arg_v16f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovntdqa (%rdi), %zmm1
; AVX512-NEXT: vaddps %zmm1, %zmm0, %zmm0
; AVX512-NEXT: retq
@@ -1093,7 +1093,7 @@ define <16 x float> @test_arg_v16f32(<16 x float> %arg, <16 x float>* %src) {
define <16 x i32> @test_arg_v16i32(<16 x i32> %arg, <16 x i32>* %src) {
; SSE2-LABEL: test_arg_v16i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: paddd (%rdi), %xmm0
; SSE2-NEXT: paddd 16(%rdi), %xmm1
; SSE2-NEXT: paddd 32(%rdi), %xmm2
@@ -1101,7 +1101,7 @@ define <16 x i32> @test_arg_v16i32(<16 x i32> %arg, <16 x i32>* %src) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_arg_v16i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movntdqa 48(%rdi), %xmm4
; SSE41-NEXT: movntdqa 32(%rdi), %xmm5
; SSE41-NEXT: movntdqa 16(%rdi), %xmm6
@@ -1113,7 +1113,7 @@ define <16 x i32> @test_arg_v16i32(<16 x i32> %arg, <16 x i32>* %src) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_arg_v16i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2
; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm3
; AVX1-NEXT: vmovntdqa (%rdi), %xmm4
@@ -1129,7 +1129,7 @@ define <16 x i32> @test_arg_v16i32(<16 x i32> %arg, <16 x i32>* %src) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_arg_v16i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm2
; AVX2-NEXT: vmovntdqa (%rdi), %ymm3
; AVX2-NEXT: vpaddd %ymm3, %ymm0, %ymm0
@@ -1137,7 +1137,7 @@ define <16 x i32> @test_arg_v16i32(<16 x i32> %arg, <16 x i32>* %src) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_arg_v16i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovntdqa (%rdi), %zmm1
; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; AVX512-NEXT: retq
@@ -1148,7 +1148,7 @@ define <16 x i32> @test_arg_v16i32(<16 x i32> %arg, <16 x i32>* %src) {
define <8 x double> @test_arg_v8f64(<8 x double> %arg, <8 x double>* %src) {
; SSE2-LABEL: test_arg_v8f64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: addpd (%rdi), %xmm0
; SSE2-NEXT: addpd 16(%rdi), %xmm1
; SSE2-NEXT: addpd 32(%rdi), %xmm2
@@ -1156,7 +1156,7 @@ define <8 x double> @test_arg_v8f64(<8 x double> %arg, <8 x double>* %src) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_arg_v8f64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movntdqa 48(%rdi), %xmm4
; SSE41-NEXT: movntdqa 32(%rdi), %xmm5
; SSE41-NEXT: movntdqa 16(%rdi), %xmm6
@@ -1168,7 +1168,7 @@ define <8 x double> @test_arg_v8f64(<8 x double> %arg, <8 x double>* %src) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_arg_v8f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2
; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm3
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
@@ -1180,7 +1180,7 @@ define <8 x double> @test_arg_v8f64(<8 x double> %arg, <8 x double>* %src) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_arg_v8f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm2
; AVX2-NEXT: vmovntdqa (%rdi), %ymm3
; AVX2-NEXT: vaddpd %ymm3, %ymm0, %ymm0
@@ -1188,7 +1188,7 @@ define <8 x double> @test_arg_v8f64(<8 x double> %arg, <8 x double>* %src) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_arg_v8f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovntdqa (%rdi), %zmm1
; AVX512-NEXT: vaddpd %zmm1, %zmm0, %zmm0
; AVX512-NEXT: retq
@@ -1199,7 +1199,7 @@ define <8 x double> @test_arg_v8f64(<8 x double> %arg, <8 x double>* %src) {
define <8 x i64> @test_arg_v8i64(<8 x i64> %arg, <8 x i64>* %src) {
; SSE2-LABEL: test_arg_v8i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: paddq (%rdi), %xmm0
; SSE2-NEXT: paddq 16(%rdi), %xmm1
; SSE2-NEXT: paddq 32(%rdi), %xmm2
@@ -1207,7 +1207,7 @@ define <8 x i64> @test_arg_v8i64(<8 x i64> %arg, <8 x i64>* %src) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_arg_v8i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movntdqa 48(%rdi), %xmm4
; SSE41-NEXT: movntdqa 32(%rdi), %xmm5
; SSE41-NEXT: movntdqa 16(%rdi), %xmm6
@@ -1219,7 +1219,7 @@ define <8 x i64> @test_arg_v8i64(<8 x i64> %arg, <8 x i64>* %src) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_arg_v8i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2
; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm3
; AVX1-NEXT: vmovntdqa (%rdi), %xmm4
@@ -1235,7 +1235,7 @@ define <8 x i64> @test_arg_v8i64(<8 x i64> %arg, <8 x i64>* %src) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_arg_v8i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm2
; AVX2-NEXT: vmovntdqa (%rdi), %ymm3
; AVX2-NEXT: vpaddq %ymm3, %ymm0, %ymm0
@@ -1243,7 +1243,7 @@ define <8 x i64> @test_arg_v8i64(<8 x i64> %arg, <8 x i64>* %src) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_arg_v8i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovntdqa (%rdi), %zmm1
; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
; AVX512-NEXT: retq
@@ -1254,7 +1254,7 @@ define <8 x i64> @test_arg_v8i64(<8 x i64> %arg, <8 x i64>* %src) {
define <32 x i16> @test_arg_v32i16(<32 x i16> %arg, <32 x i16>* %src) {
; SSE2-LABEL: test_arg_v32i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: paddw (%rdi), %xmm0
; SSE2-NEXT: paddw 16(%rdi), %xmm1
; SSE2-NEXT: paddw 32(%rdi), %xmm2
@@ -1262,7 +1262,7 @@ define <32 x i16> @test_arg_v32i16(<32 x i16> %arg, <32 x i16>* %src) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_arg_v32i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movntdqa 48(%rdi), %xmm4
; SSE41-NEXT: movntdqa 32(%rdi), %xmm5
; SSE41-NEXT: movntdqa 16(%rdi), %xmm6
@@ -1274,7 +1274,7 @@ define <32 x i16> @test_arg_v32i16(<32 x i16> %arg, <32 x i16>* %src) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_arg_v32i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2
; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm3
; AVX1-NEXT: vmovntdqa (%rdi), %xmm4
@@ -1290,7 +1290,7 @@ define <32 x i16> @test_arg_v32i16(<32 x i16> %arg, <32 x i16>* %src) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_arg_v32i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm2
; AVX2-NEXT: vmovntdqa (%rdi), %ymm3
; AVX2-NEXT: vpaddw %ymm3, %ymm0, %ymm0
@@ -1298,7 +1298,7 @@ define <32 x i16> @test_arg_v32i16(<32 x i16> %arg, <32 x i16>* %src) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_arg_v32i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovntdqa 32(%rdi), %ymm2
; AVX512F-NEXT: vmovntdqa (%rdi), %ymm3
; AVX512F-NEXT: vpaddw %ymm3, %ymm0, %ymm0
@@ -1306,13 +1306,13 @@ define <32 x i16> @test_arg_v32i16(<32 x i16> %arg, <32 x i16>* %src) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_arg_v32i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovntdqa (%rdi), %zmm1
; AVX512BW-NEXT: vpaddw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: test_arg_v32i16:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovntdqa 32(%rdi), %ymm2
; AVX512VL-NEXT: vmovntdqa (%rdi), %ymm3
; AVX512VL-NEXT: vpaddw %ymm3, %ymm0, %ymm0
@@ -1325,7 +1325,7 @@ define <32 x i16> @test_arg_v32i16(<32 x i16> %arg, <32 x i16>* %src) {
define <64 x i8> @test_arg_v64i8(<64 x i8> %arg, <64 x i8>* %src) {
; SSE2-LABEL: test_arg_v64i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: paddb (%rdi), %xmm0
; SSE2-NEXT: paddb 16(%rdi), %xmm1
; SSE2-NEXT: paddb 32(%rdi), %xmm2
@@ -1333,7 +1333,7 @@ define <64 x i8> @test_arg_v64i8(<64 x i8> %arg, <64 x i8>* %src) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_arg_v64i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movntdqa 48(%rdi), %xmm4
; SSE41-NEXT: movntdqa 32(%rdi), %xmm5
; SSE41-NEXT: movntdqa 16(%rdi), %xmm6
@@ -1345,7 +1345,7 @@ define <64 x i8> @test_arg_v64i8(<64 x i8> %arg, <64 x i8>* %src) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_arg_v64i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2
; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm3
; AVX1-NEXT: vmovntdqa (%rdi), %xmm4
@@ -1361,7 +1361,7 @@ define <64 x i8> @test_arg_v64i8(<64 x i8> %arg, <64 x i8>* %src) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_arg_v64i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm2
; AVX2-NEXT: vmovntdqa (%rdi), %ymm3
; AVX2-NEXT: vpaddb %ymm3, %ymm0, %ymm0
@@ -1369,7 +1369,7 @@ define <64 x i8> @test_arg_v64i8(<64 x i8> %arg, <64 x i8>* %src) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_arg_v64i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovntdqa 32(%rdi), %ymm2
; AVX512F-NEXT: vmovntdqa (%rdi), %ymm3
; AVX512F-NEXT: vpaddb %ymm3, %ymm0, %ymm0
@@ -1377,13 +1377,13 @@ define <64 x i8> @test_arg_v64i8(<64 x i8> %arg, <64 x i8>* %src) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_arg_v64i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovntdqa (%rdi), %zmm1
; AVX512BW-NEXT: vpaddb %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: test_arg_v64i8:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovntdqa 32(%rdi), %ymm2
; AVX512VL-NEXT: vmovntdqa (%rdi), %ymm3
; AVX512VL-NEXT: vpaddb %ymm3, %ymm0, %ymm0
@@ -1399,17 +1399,17 @@ define <64 x i8> @test_arg_v64i8(<64 x i8> %arg, <64 x i8>* %src) {
define <4 x float> @test_unaligned_v4f32(<4 x float>* %src) {
; SSE-LABEL: test_unaligned_v4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movups (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_unaligned_v4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovups (%rdi), %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: test_unaligned_v4f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovups (%rdi), %xmm0
; AVX512-NEXT: retq
%1 = load <4 x float>, <4 x float>* %src, align 1, !nontemporal !1
@@ -1418,17 +1418,17 @@ define <4 x float> @test_unaligned_v4f32(<4 x float>* %src) {
define <4 x i32> @test_unaligned_v4i32(<4 x i32>* %src) {
; SSE-LABEL: test_unaligned_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movups (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_unaligned_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovups (%rdi), %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: test_unaligned_v4i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovups (%rdi), %xmm0
; AVX512-NEXT: retq
%1 = load <4 x i32>, <4 x i32>* %src, align 1, !nontemporal !1
@@ -1437,17 +1437,17 @@ define <4 x i32> @test_unaligned_v4i32(<4 x i32>* %src) {
define <2 x double> @test_unaligned_v2f64(<2 x double>* %src) {
; SSE-LABEL: test_unaligned_v2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movups (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_unaligned_v2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovups (%rdi), %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: test_unaligned_v2f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovups (%rdi), %xmm0
; AVX512-NEXT: retq
%1 = load <2 x double>, <2 x double>* %src, align 1, !nontemporal !1
@@ -1456,17 +1456,17 @@ define <2 x double> @test_unaligned_v2f64(<2 x double>* %src) {
define <2 x i64> @test_unaligned_v2i64(<2 x i64>* %src) {
; SSE-LABEL: test_unaligned_v2i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movups (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_unaligned_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovups (%rdi), %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: test_unaligned_v2i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovups (%rdi), %xmm0
; AVX512-NEXT: retq
%1 = load <2 x i64>, <2 x i64>* %src, align 1, !nontemporal !1
@@ -1475,17 +1475,17 @@ define <2 x i64> @test_unaligned_v2i64(<2 x i64>* %src) {
define <8 x i16> @test_unaligned_v8i16(<8 x i16>* %src) {
; SSE-LABEL: test_unaligned_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movups (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_unaligned_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovups (%rdi), %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: test_unaligned_v8i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovups (%rdi), %xmm0
; AVX512-NEXT: retq
%1 = load <8 x i16>, <8 x i16>* %src, align 1, !nontemporal !1
@@ -1494,17 +1494,17 @@ define <8 x i16> @test_unaligned_v8i16(<8 x i16>* %src) {
define <16 x i8> @test_unaligned_v16i8(<16 x i8>* %src) {
; SSE-LABEL: test_unaligned_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movups (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_unaligned_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovups (%rdi), %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: test_unaligned_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovups (%rdi), %xmm0
; AVX512-NEXT: retq
%1 = load <16 x i8>, <16 x i8>* %src, align 1, !nontemporal !1
@@ -1515,18 +1515,18 @@ define <16 x i8> @test_unaligned_v16i8(<16 x i8>* %src) {
define <8 x float> @test_unaligned_v8f32(<8 x float>* %src) {
; SSE-LABEL: test_unaligned_v8f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movups (%rdi), %xmm0
; SSE-NEXT: movups 16(%rdi), %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: test_unaligned_v8f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovups (%rdi), %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: test_unaligned_v8f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovups (%rdi), %ymm0
; AVX512-NEXT: retq
%1 = load <8 x float>, <8 x float>* %src, align 1, !nontemporal !1
@@ -1535,18 +1535,18 @@ define <8 x float> @test_unaligned_v8f32(<8 x float>* %src) {
define <8 x i32> @test_unaligned_v8i32(<8 x i32>* %src) {
; SSE-LABEL: test_unaligned_v8i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movups (%rdi), %xmm0
; SSE-NEXT: movups 16(%rdi), %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: test_unaligned_v8i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovups (%rdi), %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: test_unaligned_v8i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovups (%rdi), %ymm0
; AVX512-NEXT: retq
%1 = load <8 x i32>, <8 x i32>* %src, align 1, !nontemporal !1
@@ -1555,18 +1555,18 @@ define <8 x i32> @test_unaligned_v8i32(<8 x i32>* %src) {
define <4 x double> @test_unaligned_v4f64(<4 x double>* %src) {
; SSE-LABEL: test_unaligned_v4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movups (%rdi), %xmm0
; SSE-NEXT: movups 16(%rdi), %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: test_unaligned_v4f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovups (%rdi), %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: test_unaligned_v4f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovups (%rdi), %ymm0
; AVX512-NEXT: retq
%1 = load <4 x double>, <4 x double>* %src, align 1, !nontemporal !1
@@ -1575,18 +1575,18 @@ define <4 x double> @test_unaligned_v4f64(<4 x double>* %src) {
define <4 x i64> @test_unaligned_v4i64(<4 x i64>* %src) {
; SSE-LABEL: test_unaligned_v4i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movups (%rdi), %xmm0
; SSE-NEXT: movups 16(%rdi), %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: test_unaligned_v4i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovups (%rdi), %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: test_unaligned_v4i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovups (%rdi), %ymm0
; AVX512-NEXT: retq
%1 = load <4 x i64>, <4 x i64>* %src, align 1, !nontemporal !1
@@ -1595,18 +1595,18 @@ define <4 x i64> @test_unaligned_v4i64(<4 x i64>* %src) {
define <16 x i16> @test_unaligned_v16i16(<16 x i16>* %src) {
; SSE-LABEL: test_unaligned_v16i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movups (%rdi), %xmm0
; SSE-NEXT: movups 16(%rdi), %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: test_unaligned_v16i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovups (%rdi), %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: test_unaligned_v16i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovups (%rdi), %ymm0
; AVX512-NEXT: retq
%1 = load <16 x i16>, <16 x i16>* %src, align 1, !nontemporal !1
@@ -1615,18 +1615,18 @@ define <16 x i16> @test_unaligned_v16i16(<16 x i16>* %src) {
define <32 x i8> @test_unaligned_v32i8(<32 x i8>* %src) {
; SSE-LABEL: test_unaligned_v32i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movups (%rdi), %xmm0
; SSE-NEXT: movups 16(%rdi), %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: test_unaligned_v32i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovups (%rdi), %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: test_unaligned_v32i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovups (%rdi), %ymm0
; AVX512-NEXT: retq
%1 = load <32 x i8>, <32 x i8>* %src, align 1, !nontemporal !1
@@ -1637,7 +1637,7 @@ define <32 x i8> @test_unaligned_v32i8(<32 x i8>* %src) {
define <16 x float> @test_unaligned_v16f32(<16 x float>* %src) {
; SSE-LABEL: test_unaligned_v16f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movups (%rdi), %xmm0
; SSE-NEXT: movups 16(%rdi), %xmm1
; SSE-NEXT: movups 32(%rdi), %xmm2
@@ -1645,13 +1645,13 @@ define <16 x float> @test_unaligned_v16f32(<16 x float>* %src) {
; SSE-NEXT: retq
;
; AVX-LABEL: test_unaligned_v16f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovups (%rdi), %ymm0
; AVX-NEXT: vmovups 32(%rdi), %ymm1
; AVX-NEXT: retq
;
; AVX512-LABEL: test_unaligned_v16f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovups (%rdi), %zmm0
; AVX512-NEXT: retq
%1 = load <16 x float>, <16 x float>* %src, align 1, !nontemporal !1
@@ -1660,7 +1660,7 @@ define <16 x float> @test_unaligned_v16f32(<16 x float>* %src) {
define <16 x i32> @test_unaligned_v16i32(<16 x i32>* %src) {
; SSE-LABEL: test_unaligned_v16i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movups (%rdi), %xmm0
; SSE-NEXT: movups 16(%rdi), %xmm1
; SSE-NEXT: movups 32(%rdi), %xmm2
@@ -1668,13 +1668,13 @@ define <16 x i32> @test_unaligned_v16i32(<16 x i32>* %src) {
; SSE-NEXT: retq
;
; AVX-LABEL: test_unaligned_v16i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovups (%rdi), %ymm0
; AVX-NEXT: vmovups 32(%rdi), %ymm1
; AVX-NEXT: retq
;
; AVX512-LABEL: test_unaligned_v16i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovups (%rdi), %zmm0
; AVX512-NEXT: retq
%1 = load <16 x i32>, <16 x i32>* %src, align 1, !nontemporal !1
@@ -1683,7 +1683,7 @@ define <16 x i32> @test_unaligned_v16i32(<16 x i32>* %src) {
define <8 x double> @test_unaligned_v8f64(<8 x double>* %src) {
; SSE-LABEL: test_unaligned_v8f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movups (%rdi), %xmm0
; SSE-NEXT: movups 16(%rdi), %xmm1
; SSE-NEXT: movups 32(%rdi), %xmm2
@@ -1691,13 +1691,13 @@ define <8 x double> @test_unaligned_v8f64(<8 x double>* %src) {
; SSE-NEXT: retq
;
; AVX-LABEL: test_unaligned_v8f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovups (%rdi), %ymm0
; AVX-NEXT: vmovups 32(%rdi), %ymm1
; AVX-NEXT: retq
;
; AVX512-LABEL: test_unaligned_v8f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovups (%rdi), %zmm0
; AVX512-NEXT: retq
%1 = load <8 x double>, <8 x double>* %src, align 1, !nontemporal !1
@@ -1706,7 +1706,7 @@ define <8 x double> @test_unaligned_v8f64(<8 x double>* %src) {
define <8 x i64> @test_unaligned_v8i64(<8 x i64>* %src) {
; SSE-LABEL: test_unaligned_v8i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movups (%rdi), %xmm0
; SSE-NEXT: movups 16(%rdi), %xmm1
; SSE-NEXT: movups 32(%rdi), %xmm2
@@ -1714,13 +1714,13 @@ define <8 x i64> @test_unaligned_v8i64(<8 x i64>* %src) {
; SSE-NEXT: retq
;
; AVX-LABEL: test_unaligned_v8i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovups (%rdi), %ymm0
; AVX-NEXT: vmovups 32(%rdi), %ymm1
; AVX-NEXT: retq
;
; AVX512-LABEL: test_unaligned_v8i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovups (%rdi), %zmm0
; AVX512-NEXT: retq
%1 = load <8 x i64>, <8 x i64>* %src, align 1, !nontemporal !1
@@ -1729,7 +1729,7 @@ define <8 x i64> @test_unaligned_v8i64(<8 x i64>* %src) {
define <32 x i16> @test_unaligned_v32i16(<32 x i16>* %src) {
; SSE-LABEL: test_unaligned_v32i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movups (%rdi), %xmm0
; SSE-NEXT: movups 16(%rdi), %xmm1
; SSE-NEXT: movups 32(%rdi), %xmm2
@@ -1737,24 +1737,24 @@ define <32 x i16> @test_unaligned_v32i16(<32 x i16>* %src) {
; SSE-NEXT: retq
;
; AVX-LABEL: test_unaligned_v32i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovups (%rdi), %ymm0
; AVX-NEXT: vmovups 32(%rdi), %ymm1
; AVX-NEXT: retq
;
; AVX512F-LABEL: test_unaligned_v32i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovups (%rdi), %ymm0
; AVX512F-NEXT: vmovups 32(%rdi), %ymm1
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_unaligned_v32i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovups (%rdi), %zmm0
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: test_unaligned_v32i16:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovups (%rdi), %ymm0
; AVX512VL-NEXT: vmovups 32(%rdi), %ymm1
; AVX512VL-NEXT: retq
@@ -1764,7 +1764,7 @@ define <32 x i16> @test_unaligned_v32i16(<32 x i16>* %src) {
define <64 x i8> @test_unaligned_v64i8(<64 x i8>* %src) {
; SSE-LABEL: test_unaligned_v64i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movups (%rdi), %xmm0
; SSE-NEXT: movups 16(%rdi), %xmm1
; SSE-NEXT: movups 32(%rdi), %xmm2
@@ -1772,24 +1772,24 @@ define <64 x i8> @test_unaligned_v64i8(<64 x i8>* %src) {
; SSE-NEXT: retq
;
; AVX-LABEL: test_unaligned_v64i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovups (%rdi), %ymm0
; AVX-NEXT: vmovups 32(%rdi), %ymm1
; AVX-NEXT: retq
;
; AVX512F-LABEL: test_unaligned_v64i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovups (%rdi), %ymm0
; AVX512F-NEXT: vmovups 32(%rdi), %ymm1
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_unaligned_v64i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovups (%rdi), %zmm0
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: test_unaligned_v64i8:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovups (%rdi), %ymm0
; AVX512VL-NEXT: vmovups 32(%rdi), %ymm1
; AVX512VL-NEXT: retq
@@ -1799,7 +1799,7 @@ define <64 x i8> @test_unaligned_v64i8(<64 x i8>* %src) {
define <16 x i32> @test_masked_v16i32(i8 * %addr, <16 x i32> %old, <16 x i32> %mask1) {
; SSE2-LABEL: test_masked_v16i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm10
; SSE2-NEXT: pxor %xmm12, %xmm12
; SSE2-NEXT: pcmpeqd %xmm12, %xmm7
@@ -1832,7 +1832,7 @@ define <16 x i32> @test_masked_v16i32(i8 * %addr, <16 x i32> %old, <16 x i32> %m
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_masked_v16i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm8
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: pcmpeqd %xmm0, %xmm7
@@ -1859,7 +1859,7 @@ define <16 x i32> @test_masked_v16i32(i8 * %addr, <16 x i32> %old, <16 x i32> %m
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_masked_v16i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vpxor %xmm5, %xmm5, %xmm5
; AVX1-NEXT: vpcmpeqd %xmm5, %xmm4, %xmm4
@@ -1885,7 +1885,7 @@ define <16 x i32> @test_masked_v16i32(i8 * %addr, <16 x i32> %old, <16 x i32> %m
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_masked_v16i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm4, %xmm4, %xmm4
; AVX2-NEXT: vpcmpeqd %ymm4, %ymm3, %ymm3
; AVX2-NEXT: vpcmpeqd %ymm5, %ymm5, %ymm5
@@ -1899,7 +1899,7 @@ define <16 x i32> @test_masked_v16i32(i8 * %addr, <16 x i32> %old, <16 x i32> %m
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_masked_v16i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512-NEXT: vpcmpneqd %zmm2, %zmm1, %k1
; AVX512-NEXT: vmovntdqa (%rdi), %zmm1
diff --git a/test/CodeGen/X86/nontemporal.ll b/test/CodeGen/X86/nontemporal.ll
index d49c8872433..9ceb52fcf22 100644
--- a/test/CodeGen/X86/nontemporal.ll
+++ b/test/CodeGen/X86/nontemporal.ll
@@ -6,7 +6,7 @@
define void @f(<4 x float> %A, i8* %B, <2 x double> %C, i32 %D, <2 x i64> %E, <4 x i32> %F, <8 x i16> %G, <16 x i8> %H, i64 %I) nounwind {
; X32-SSE-LABEL: f:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: pushl %ebp
; X32-SSE-NEXT: movl %esp, %ebp
; X32-SSE-NEXT: andl $-16, %esp
@@ -36,7 +36,7 @@ define void @f(<4 x float> %A, i8* %B, <2 x double> %C, i32 %D, <2 x i64> %E, <4
; X32-SSE-NEXT: retl
;
; X32-AVX-LABEL: f:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: pushl %ebp
; X32-AVX-NEXT: movl %esp, %ebp
; X32-AVX-NEXT: andl $-16, %esp
@@ -66,7 +66,7 @@ define void @f(<4 x float> %A, i8* %B, <2 x double> %C, i32 %D, <2 x i64> %E, <4
; X32-AVX-NEXT: retl
;
; X64-SSE-LABEL: f:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: addps {{.*}}(%rip), %xmm0
; X64-SSE-NEXT: movntps %xmm0, (%rdi)
; X64-SSE-NEXT: paddq {{.*}}(%rip), %xmm2
@@ -84,7 +84,7 @@ define void @f(<4 x float> %A, i8* %B, <2 x double> %C, i32 %D, <2 x i64> %E, <4
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: f:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vaddps {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vmovntps %xmm0, (%rdi)
; X64-AVX-NEXT: vpaddq {{.*}}(%rip), %xmm2, %xmm0
diff --git a/test/CodeGen/X86/nosse-vector.ll b/test/CodeGen/X86/nosse-vector.ll
index 398234a6d03..ec97b1ed9c0 100644
--- a/test/CodeGen/X86/nosse-vector.ll
+++ b/test/CodeGen/X86/nosse-vector.ll
@@ -4,7 +4,7 @@
define void @fadd_2f64_mem(<2 x double>* %p0, <2 x double>* %p1, <2 x double>* %p2) nounwind {
; X32-LABEL: fadd_2f64_mem:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -18,7 +18,7 @@ define void @fadd_2f64_mem(<2 x double>* %p0, <2 x double>* %p1, <2 x double>* %
; X32-NEXT: retl
;
; X64-LABEL: fadd_2f64_mem:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: fldl 8(%rdi)
; X64-NEXT: fldl (%rdi)
; X64-NEXT: faddl (%rsi)
@@ -36,7 +36,7 @@ define void @fadd_2f64_mem(<2 x double>* %p0, <2 x double>* %p1, <2 x double>* %
define void @fadd_4f32_mem(<4 x float>* %p0, <4 x float>* %p1, <4 x float>* %p2) nounwind {
; X32-LABEL: fadd_4f32_mem:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -59,7 +59,7 @@ define void @fadd_4f32_mem(<4 x float>* %p0, <4 x float>* %p1, <4 x float>* %p2)
; X32-NEXT: retl
;
; X64-LABEL: fadd_4f32_mem:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: flds 12(%rdi)
; X64-NEXT: flds 8(%rdi)
; X64-NEXT: flds 4(%rdi)
@@ -86,7 +86,7 @@ define void @fadd_4f32_mem(<4 x float>* %p0, <4 x float>* %p1, <4 x float>* %p2)
define void @fdiv_4f32_mem(<4 x float>* %p0, <4 x float>* %p1, <4 x float>* %p2) nounwind {
; X32-LABEL: fdiv_4f32_mem:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -109,7 +109,7 @@ define void @fdiv_4f32_mem(<4 x float>* %p0, <4 x float>* %p1, <4 x float>* %p2)
; X32-NEXT: retl
;
; X64-LABEL: fdiv_4f32_mem:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: flds 12(%rdi)
; X64-NEXT: flds 8(%rdi)
; X64-NEXT: flds 4(%rdi)
@@ -136,7 +136,7 @@ define void @fdiv_4f32_mem(<4 x float>* %p0, <4 x float>* %p1, <4 x float>* %p2)
define void @sitofp_4i64_4f32_mem(<4 x i64>* %p0, <4 x float>* %p1) nounwind {
; X32-LABEL: sitofp_4i64_4f32_mem:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebp
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: pushl %ebx
@@ -182,7 +182,7 @@ define void @sitofp_4i64_4f32_mem(<4 x i64>* %p0, <4 x float>* %p1) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: sitofp_4i64_4f32_mem:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq 24(%rdi), %rax
; X64-NEXT: movq 16(%rdi), %rcx
; X64-NEXT: movq (%rdi), %rdx
@@ -208,7 +208,7 @@ define void @sitofp_4i64_4f32_mem(<4 x i64>* %p0, <4 x float>* %p1) nounwind {
define void @sitofp_4i32_4f32_mem(<4 x i32>* %p0, <4 x float>* %p1) nounwind {
; X32-LABEL: sitofp_4i32_4f32_mem:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %edi
; X32-NEXT: pushl %esi
; X32-NEXT: subl $16, %esp
@@ -236,7 +236,7 @@ define void @sitofp_4i32_4f32_mem(<4 x i32>* %p0, <4 x float>* %p1) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: sitofp_4i32_4f32_mem:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl 12(%rdi), %eax
; X64-NEXT: movl 8(%rdi), %ecx
; X64-NEXT: movl (%rdi), %edx
@@ -262,7 +262,7 @@ define void @sitofp_4i32_4f32_mem(<4 x i32>* %p0, <4 x float>* %p1) nounwind {
define void @add_2i64_mem(<2 x i64>* %p0, <2 x i64>* %p1, <2 x i64>* %p2) nounwind {
; X32-LABEL: add_2i64_mem:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebx
; X32-NEXT: pushl %edi
; X32-NEXT: pushl %esi
@@ -287,7 +287,7 @@ define void @add_2i64_mem(<2 x i64>* %p0, <2 x i64>* %p1, <2 x i64>* %p2) nounwi
; X32-NEXT: retl
;
; X64-LABEL: add_2i64_mem:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq (%rdi), %rax
; X64-NEXT: movq 8(%rdi), %rcx
; X64-NEXT: addq (%rsi), %rax
@@ -304,7 +304,7 @@ define void @add_2i64_mem(<2 x i64>* %p0, <2 x i64>* %p1, <2 x i64>* %p2) nounwi
define void @add_4i32_mem(<4 x i32>* %p0, <4 x i32>* %p1, <4 x i32>* %p2) nounwind {
; X32-LABEL: add_4i32_mem:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebx
; X32-NEXT: pushl %edi
; X32-NEXT: pushl %esi
@@ -329,7 +329,7 @@ define void @add_4i32_mem(<4 x i32>* %p0, <4 x i32>* %p1, <4 x i32>* %p2) nounwi
; X32-NEXT: retl
;
; X64-LABEL: add_4i32_mem:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl 12(%rdi), %eax
; X64-NEXT: movl 8(%rdi), %ecx
; X64-NEXT: movl (%rdi), %r8d
diff --git a/test/CodeGen/X86/not-and-simplify.ll b/test/CodeGen/X86/not-and-simplify.ll
index 8ecc859bead..e753aeb16d5 100644
--- a/test/CodeGen/X86/not-and-simplify.ll
+++ b/test/CodeGen/X86/not-and-simplify.ll
@@ -6,7 +6,7 @@
define i32 @shrink_xor_constant1(i32 %x) {
; ALL-LABEL: shrink_xor_constant1:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: shrl $31, %edi
; ALL-NEXT: xorl $1, %edi
; ALL-NEXT: movl %edi, %eax
@@ -19,7 +19,7 @@ define i32 @shrink_xor_constant1(i32 %x) {
define <4 x i32> @shrink_xor_constant1_splat(<4 x i32> %x) {
; ALL-LABEL: shrink_xor_constant1_splat:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: psrld $31, %xmm0
; ALL-NEXT: pandn {{.*}}(%rip), %xmm0
; ALL-NEXT: retq
@@ -33,7 +33,7 @@ define <4 x i32> @shrink_xor_constant1_splat(<4 x i32> %x) {
define i8 @shrink_xor_constant2(i8 %x) {
; ALL-LABEL: shrink_xor_constant2:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: shlb $5, %dil
; ALL-NEXT: xorb $-32, %dil
; ALL-NEXT: movl %edi, %eax
@@ -46,7 +46,7 @@ define i8 @shrink_xor_constant2(i8 %x) {
define <16 x i8> @shrink_xor_constant2_splat(<16 x i8> %x) {
; ALL-LABEL: shrink_xor_constant2_splat:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movaps {{.*#+}} xmm0 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; ALL-NEXT: retq
%sh = shl <16 x i8> %x, <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
diff --git a/test/CodeGen/X86/oddshuffles.ll b/test/CodeGen/X86/oddshuffles.ll
index 5f9979dbcb9..d06d824f3d4 100644
--- a/test/CodeGen/X86/oddshuffles.ll
+++ b/test/CodeGen/X86/oddshuffles.ll
@@ -7,7 +7,7 @@
define void @v3i64(<2 x i64> %a, <2 x i64> %b, <3 x i64>* %p) nounwind {
; SSE2-LABEL: v3i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE2-NEXT: movq %xmm2, 16(%rdi)
@@ -15,21 +15,21 @@ define void @v3i64(<2 x i64> %a, <2 x i64> %b, <3 x i64>* %p) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: v3i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pextrq $1, %xmm0, 16(%rdi)
; SSE42-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE42-NEXT: movdqa %xmm0, (%rdi)
; SSE42-NEXT: retq
;
; AVX1-LABEL: v3i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm0[0],xmm1[0]
; AVX1-NEXT: vpextrq $1, %xmm0, 16(%rdi)
; AVX1-NEXT: vmovdqa %xmm1, (%rdi)
; AVX1-NEXT: retq
;
; AVX2-LABEL: v3i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,1,3]
@@ -39,7 +39,7 @@ define void @v3i64(<2 x i64> %a, <2 x i64> %b, <3 x i64>* %p) nounwind {
; AVX2-NEXT: retq
;
; XOP-LABEL: v3i64:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm0[0],xmm1[0]
; XOP-NEXT: vpextrq $1, %xmm0, 16(%rdi)
; XOP-NEXT: vmovdqa %xmm1, (%rdi)
@@ -50,21 +50,21 @@ define void @v3i64(<2 x i64> %a, <2 x i64> %b, <3 x i64>* %p) nounwind {
}
define void @v3f64(<2 x double> %a, <2 x double> %b, <3 x double>* %p) nounwind {
; SSE-LABEL: v3f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movhpd %xmm0, 16(%rdi)
; SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT: movapd %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX1-LABEL: v3f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm0[0],xmm1[0]
; AVX1-NEXT: vmovhpd %xmm0, 16(%rdi)
; AVX1-NEXT: vmovapd %xmm1, (%rdi)
; AVX1-NEXT: retq
;
; AVX2-LABEL: v3f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,1,3]
@@ -74,7 +74,7 @@ define void @v3f64(<2 x double> %a, <2 x double> %b, <3 x double>* %p) nounwind
; AVX2-NEXT: retq
;
; XOP-LABEL: v3f64:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm0[0],xmm1[0]
; XOP-NEXT: vmovhpd %xmm0, 16(%rdi)
; XOP-NEXT: vmovapd %xmm1, (%rdi)
@@ -86,7 +86,7 @@ define void @v3f64(<2 x double> %a, <2 x double> %b, <3 x double>* %p) nounwind
define void @v3i32(<2 x i32> %a, <2 x i32> %b, <3 x i32>* %p) nounwind {
; SSE2-LABEL: v3i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -95,7 +95,7 @@ define void @v3i32(<2 x i32> %a, <2 x i32> %b, <3 x i32>* %p) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: v3i32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
; SSE42-NEXT: pextrd $2, %xmm0, 8(%rdi)
@@ -103,7 +103,7 @@ define void @v3i32(<2 x i32> %a, <2 x i32> %b, <3 x i32>* %p) nounwind {
; SSE42-NEXT: retq
;
; AVX1-LABEL: v3i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
; AVX1-NEXT: vpextrd $2, %xmm0, 8(%rdi)
@@ -111,7 +111,7 @@ define void @v3i32(<2 x i32> %a, <2 x i32> %b, <3 x i32>* %p) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: v3i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vbroadcastss %xmm1, %xmm1
; AVX2-NEXT: vblendps {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2,3]
; AVX2-NEXT: vextractps $2, %xmm0, 8(%rdi)
@@ -119,7 +119,7 @@ define void @v3i32(<2 x i32> %a, <2 x i32> %b, <3 x i32>* %p) nounwind {
; AVX2-NEXT: retq
;
; XOP-LABEL: v3i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
; XOP-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
; XOP-NEXT: vpextrd $2, %xmm0, 8(%rdi)
@@ -132,7 +132,7 @@ define void @v3i32(<2 x i32> %a, <2 x i32> %b, <3 x i32>* %p) nounwind {
define void @v5i16(<4 x i16> %a, <4 x i16> %b, <5 x i16>* %p) nounwind {
; SSE2-LABEL: v5i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,2,3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,0,2,3,4,5,6,7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm0[0,2,2,3,4,5,6,7]
@@ -144,7 +144,7 @@ define void @v5i16(<4 x i16> %a, <4 x i16> %b, <5 x i16>* %p) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: v5i16:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,2,3]
; SSE42-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,0,2,3,4,5,6,7]
; SSE42-NEXT: pshuflw {{.*#+}} xmm2 = xmm0[0,2,2,3,4,5,6,7]
@@ -155,7 +155,7 @@ define void @v5i16(<4 x i16> %a, <4 x i16> %b, <5 x i16>* %p) nounwind {
; SSE42-NEXT: retq
;
; AVX-LABEL: v5i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,1,2,3]
; AVX-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[2,0,2,3,4,5,6,7]
; AVX-NEXT: vpshuflw {{.*#+}} xmm2 = xmm0[0,2,2,3,4,5,6,7]
@@ -166,7 +166,7 @@ define void @v5i16(<4 x i16> %a, <4 x i16> %b, <5 x i16>* %p) nounwind {
; AVX-NEXT: retq
;
; XOP-LABEL: v5i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpperm {{.*#+}} xmm1 = xmm0[0,1],xmm1[4,5],xmm0[4,5],xmm1[8,9],xmm0[12,13],xmm1[4,5],xmm0[14,15],xmm1[6,7]
; XOP-NEXT: vpextrw $6, %xmm0, 8(%rdi)
; XOP-NEXT: vmovq %xmm1, (%rdi)
@@ -178,7 +178,7 @@ define void @v5i16(<4 x i16> %a, <4 x i16> %b, <5 x i16>* %p) nounwind {
define void @v5i32(<4 x i32> %a, <4 x i32> %b, <5 x i32>* %p) nounwind {
; SSE2-LABEL: v5i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,2,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
@@ -187,7 +187,7 @@ define void @v5i32(<4 x i32> %a, <4 x i32> %b, <5 x i32>* %p) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: v5i32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,2]
; SSE42-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
@@ -196,7 +196,7 @@ define void @v5i32(<4 x i32> %a, <4 x i32> %b, <5 x i32>* %p) nounwind {
; SSE42-NEXT: retq
;
; AVX1-LABEL: v5i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm0[0,1],xmm1[1,2]
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,2,1,3]
; AVX1-NEXT: vextractps $3, %xmm0, 16(%rdi)
@@ -204,7 +204,7 @@ define void @v5i32(<4 x i32> %a, <4 x i32> %b, <5 x i32>* %p) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: v5i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX2-NEXT: vmovaps {{.*#+}} ymm2 = <0,5,1,6,3,u,u,u>
@@ -215,7 +215,7 @@ define void @v5i32(<4 x i32> %a, <4 x i32> %b, <5 x i32>* %p) nounwind {
; AVX2-NEXT: retq
;
; XOP-LABEL: v5i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vshufps {{.*#+}} xmm1 = xmm0[0,1],xmm1[1,2]
; XOP-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,2,1,3]
; XOP-NEXT: vextractps $3, %xmm0, 16(%rdi)
@@ -228,7 +228,7 @@ define void @v5i32(<4 x i32> %a, <4 x i32> %b, <5 x i32>* %p) nounwind {
define void @v5f32(<4 x float> %a, <4 x float> %b, <5 x float>* %p) nounwind {
; SSE2-LABEL: v5f32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps %xmm0, %xmm2
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm1[1,2]
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3]
@@ -238,7 +238,7 @@ define void @v5f32(<4 x float> %a, <4 x float> %b, <5 x float>* %p) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: v5f32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: extractps $3, %xmm0, 16(%rdi)
; SSE42-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[1,2]
; SSE42-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
@@ -246,7 +246,7 @@ define void @v5f32(<4 x float> %a, <4 x float> %b, <5 x float>* %p) nounwind {
; SSE42-NEXT: retq
;
; AVX1-LABEL: v5f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm0[0,1],xmm1[1,2]
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,2,1,3]
; AVX1-NEXT: vextractps $3, %xmm0, 16(%rdi)
@@ -254,7 +254,7 @@ define void @v5f32(<4 x float> %a, <4 x float> %b, <5 x float>* %p) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: v5f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX2-NEXT: vmovaps {{.*#+}} ymm2 = <0,5,1,6,3,u,u,u>
@@ -265,7 +265,7 @@ define void @v5f32(<4 x float> %a, <4 x float> %b, <5 x float>* %p) nounwind {
; AVX2-NEXT: retq
;
; XOP-LABEL: v5f32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vshufps {{.*#+}} xmm1 = xmm0[0,1],xmm1[1,2]
; XOP-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,2,1,3]
; XOP-NEXT: vextractps $3, %xmm0, 16(%rdi)
@@ -278,7 +278,7 @@ define void @v5f32(<4 x float> %a, <4 x float> %b, <5 x float>* %p) nounwind {
define void @v7i8(<4 x i8> %a, <4 x i8> %b, <7 x i8>* %p) nounwind {
; SSE2-LABEL: v7i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,1,3]
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,65535,0,65535,0,65535,65535,65535]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,0,3]
@@ -299,7 +299,7 @@ define void @v7i8(<4 x i8> %a, <4 x i8> %b, <7 x i8>* %p) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: v7i8:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,1,3]
; SSE42-NEXT: pextrb $0, %xmm1, 6(%rdi)
; SSE42-NEXT: pshufb {{.*#+}} xmm1 = xmm1[8,9,8,9,4,5,8,9,0,1,12,13,0,1,14,15]
@@ -310,7 +310,7 @@ define void @v7i8(<4 x i8> %a, <4 x i8> %b, <7 x i8>* %p) nounwind {
; SSE42-NEXT: retq
;
; AVX-LABEL: v7i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,3,1,3]
; AVX-NEXT: vpshufb {{.*#+}} xmm2 = xmm1[8,9,8,9,4,5,8,9,0,1,12,13,0,1,14,15]
; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5,6,7]
@@ -321,7 +321,7 @@ define void @v7i8(<4 x i8> %a, <4 x i8> %b, <7 x i8>* %p) nounwind {
; AVX-NEXT: retq
;
; XOP-LABEL: v7i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpperm {{.*#+}} xmm0 = xmm0[0],xmm1[8],xmm0[12],xmm1[8],xmm0[4],xmm1[12,0,14,u,u,u,u,u,u,u,u]
; XOP-NEXT: vpextrb $0, %xmm1, 6(%rdi)
; XOP-NEXT: vpextrw $2, %xmm0, 4(%rdi)
@@ -334,7 +334,7 @@ define void @v7i8(<4 x i8> %a, <4 x i8> %b, <7 x i8>* %p) nounwind {
define void @v7i16(<4 x i16> %a, <4 x i16> %b, <7 x i16>* %p) nounwind {
; SSE2-LABEL: v7i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,1,3]
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,65535,0,65535,0,65535,65535,65535]
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,1,0,3]
@@ -351,7 +351,7 @@ define void @v7i16(<4 x i16> %a, <4 x i16> %b, <7 x i16>* %p) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: v7i16:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,1,3]
; SSE42-NEXT: pextrw $0, %xmm1, 12(%rdi)
; SSE42-NEXT: pshufb {{.*#+}} xmm1 = xmm1[8,9,8,9,4,5,8,9,0,1,12,13,0,1,14,15]
@@ -361,7 +361,7 @@ define void @v7i16(<4 x i16> %a, <4 x i16> %b, <7 x i16>* %p) nounwind {
; SSE42-NEXT: retq
;
; AVX-LABEL: v7i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,3,1,3]
; AVX-NEXT: vpshufb {{.*#+}} xmm2 = xmm1[8,9,8,9,4,5,8,9,0,1,12,13,0,1,14,15]
; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5,6,7]
@@ -371,7 +371,7 @@ define void @v7i16(<4 x i16> %a, <4 x i16> %b, <7 x i16>* %p) nounwind {
; AVX-NEXT: retq
;
; XOP-LABEL: v7i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpperm {{.*#+}} xmm0 = xmm0[0,1],xmm1[8,9],xmm0[12,13],xmm1[8,9],xmm0[4,5],xmm1[12,13,0,1,14,15]
; XOP-NEXT: vpextrw $0, %xmm1, 12(%rdi)
; XOP-NEXT: vpextrd $2, %xmm0, 8(%rdi)
@@ -385,7 +385,7 @@ define void @v7i16(<4 x i16> %a, <4 x i16> %b, <7 x i16>* %p) nounwind {
define void @v7i32(<4 x i32> %a, <4 x i32> %b, <7 x i32>* %p) nounwind {
; SSE2-LABEL: v7i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,1,2,2]
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,1,0,3]
; SSE2-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm2[2],xmm3[3],xmm2[3]
@@ -397,7 +397,7 @@ define void @v7i32(<4 x i32> %a, <4 x i32> %b, <7 x i32>* %p) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: v7i32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa %xmm1, %xmm2
; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm0[2,3],xmm2[4,5,6,7]
; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
@@ -409,7 +409,7 @@ define void @v7i32(<4 x i32> %a, <4 x i32> %b, <7 x i32>* %p) nounwind {
; SSE42-NEXT: retq
;
; AVX1-LABEL: v7i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vblendps {{.*#+}} xmm2 = xmm0[0,1],xmm1[2],xmm0[3]
; AVX1-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[0,2,3,2]
; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
@@ -420,7 +420,7 @@ define void @v7i32(<4 x i32> %a, <4 x i32> %b, <7 x i32>* %p) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: v7i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vmovaps {{.*#+}} ymm2 = <0,6,3,6,1,7,4,u>
@@ -433,7 +433,7 @@ define void @v7i32(<4 x i32> %a, <4 x i32> %b, <7 x i32>* %p) nounwind {
; AVX2-NEXT: retq
;
; XOP-LABEL: v7i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vblendps {{.*#+}} xmm2 = xmm0[0,1],xmm1[2],xmm0[3]
; XOP-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[0,2,3,2]
; XOP-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
@@ -449,7 +449,7 @@ define void @v7i32(<4 x i32> %a, <4 x i32> %b, <7 x i32>* %p) nounwind {
define void @v12i8(<8 x i8> %a, <8 x i8> %b, <12 x i8>* %p) nounwind {
; SSE2-LABEL: v12i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
@@ -471,7 +471,7 @@ define void @v12i8(<8 x i8> %a, <8 x i8> %b, <12 x i8>* %p) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: v12i8:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pshufb {{.*#+}} xmm1 = zero,zero,xmm1[0],zero,zero,xmm1[2],zero,zero,xmm1[4],zero,zero,xmm1[6,u,u,u,u]
; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,8],zero,xmm0[2,10],zero,xmm0[4,12],zero,xmm0[6,14],zero,xmm0[u,u,u,u]
; SSE42-NEXT: por %xmm1, %xmm0
@@ -480,7 +480,7 @@ define void @v12i8(<8 x i8> %a, <8 x i8> %b, <12 x i8>* %p) nounwind {
; SSE42-NEXT: retq
;
; AVX-LABEL: v12i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,xmm1[0],zero,zero,xmm1[2],zero,zero,xmm1[4],zero,zero,xmm1[6,u,u,u,u]
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,8],zero,xmm0[2,10],zero,xmm0[4,12],zero,xmm0[6,14],zero,xmm0[u,u,u,u]
; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
@@ -489,7 +489,7 @@ define void @v12i8(<8 x i8> %a, <8 x i8> %b, <12 x i8>* %p) nounwind {
; AVX-NEXT: retq
;
; XOP-LABEL: v12i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,xmm1[0],zero,zero,xmm1[2],zero,zero,xmm1[4],zero,zero,xmm1[6,u,u,u,u]
; XOP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,8],zero,xmm0[2,10],zero,xmm0[4,12],zero,xmm0[6,14],zero,xmm0[u,u,u,u]
; XOP-NEXT: vpor %xmm1, %xmm0, %xmm0
@@ -503,7 +503,7 @@ define void @v12i8(<8 x i8> %a, <8 x i8> %b, <12 x i8>* %p) nounwind {
define void @v12i16(<8 x i16> %a, <8 x i16> %b, <12 x i16>* %p) nounwind {
; SSE2-LABEL: v12i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,0,3]
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [65535,65535,0,65535,65535,0,65535,65535]
; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm0[0,1,2,3,6,5,4,7]
@@ -525,7 +525,7 @@ define void @v12i16(<8 x i16> %a, <8 x i16> %b, <12 x i16>* %p) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: v12i16:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,2,3]
; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,1,2,3]
; SSE42-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,3,1,3,4,5,6,7]
@@ -538,7 +538,7 @@ define void @v12i16(<8 x i16> %a, <8 x i16> %b, <12 x i16>* %p) nounwind {
; SSE42-NEXT: retq
;
; AVX1-LABEL: v12i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,2,3]
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[3,1,2,3]
; AVX1-NEXT: vpshuflw {{.*#+}} xmm3 = xmm3[0,3,1,3,4,5,6,7]
@@ -551,7 +551,7 @@ define void @v12i16(<8 x i16> %a, <8 x i16> %b, <12 x i16>* %p) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: v12i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,2,3]
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[3,1,2,3]
; AVX2-NEXT: vpshuflw {{.*#+}} xmm3 = xmm3[0,3,1,3,4,5,6,7]
@@ -564,7 +564,7 @@ define void @v12i16(<8 x i16> %a, <8 x i16> %b, <12 x i16>* %p) nounwind {
; AVX2-NEXT: retq
;
; XOP-LABEL: v12i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpperm {{.*#+}} xmm2 = xmm0[0,1,8,9],xmm1[0,1],xmm0[2,3,10,11],xmm1[2,3],xmm0[4,5,12,13]
; XOP-NEXT: vpperm {{.*#+}} xmm0 = xmm1[4,5],xmm0[6,7,14,15],xmm1[6,7],xmm0[8,9,10,11,12,13,14,15]
; XOP-NEXT: vmovq %xmm0, 16(%rdi)
@@ -577,7 +577,7 @@ define void @v12i16(<8 x i16> %a, <8 x i16> %b, <12 x i16>* %p) nounwind {
define void @v12i32(<8 x i32> %a, <8 x i32> %b, <12 x i32>* %p) nounwind {
; SSE2-LABEL: v12i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,2]
@@ -600,7 +600,7 @@ define void @v12i32(<8 x i32> %a, <8 x i32> %b, <12 x i32>* %p) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: v12i32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,0,1,1]
; SSE42-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,1,0,1]
; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm3[2,3],xmm4[4,5,6,7]
@@ -620,7 +620,7 @@ define void @v12i32(<8 x i32> %a, <8 x i32> %b, <12 x i32>* %p) nounwind {
; SSE42-NEXT: retq
;
; AVX1-LABEL: v12i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3,0,1]
; AVX1-NEXT: vmovsldup {{.*#+}} ymm2 = ymm2[0,0,2,2,4,4,6,6]
; AVX1-NEXT: vpermilps {{.*#+}} ymm3 = ymm0[0,u,u,1,5,u,u,6]
@@ -638,7 +638,7 @@ define void @v12i32(<8 x i32> %a, <8 x i32> %b, <12 x i32>* %p) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: v12i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[2,3,2,3]
; AVX2-NEXT: vpermilps {{.*#+}} ymm3 = ymm0[3,3,2,3,7,7,6,7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,2,2,3]
@@ -653,7 +653,7 @@ define void @v12i32(<8 x i32> %a, <8 x i32> %b, <12 x i32>* %p) nounwind {
; AVX2-NEXT: retq
;
; XOP-LABEL: v12i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3,0,1]
; XOP-NEXT: vpermil2ps {{.*#+}} ymm2 = ymm0[0],ymm2[0],ymm0[u,1,5,u],ymm2[6],ymm0[6]
; XOP-NEXT: vmovddup {{.*#+}} xmm3 = xmm1[0,0]
@@ -674,7 +674,7 @@ define void @v12i32(<8 x i32> %a, <8 x i32> %b, <12 x i32>* %p) nounwind {
define void @pr29025(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <12 x i8> *%p) nounwind {
; SSE2-LABEL: pr29025:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255]
; SSE2-NEXT: pand %xmm3, %xmm1
; SSE2-NEXT: pand %xmm3, %xmm0
@@ -704,7 +704,7 @@ define void @pr29025(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <12 x i8> *%p) nounw
; SSE2-NEXT: retq
;
; SSE42-LABEL: pr29025:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa {{.*#+}} xmm3 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
; SSE42-NEXT: pshufb %xmm3, %xmm1
; SSE42-NEXT: pshufb %xmm3, %xmm0
@@ -717,7 +717,7 @@ define void @pr29025(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <12 x i8> *%p) nounw
; SSE42-NEXT: retq
;
; AVX-LABEL: pr29025:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
; AVX-NEXT: vpshufb %xmm3, %xmm1, %xmm1
; AVX-NEXT: vpshufb %xmm3, %xmm0, %xmm0
@@ -730,7 +730,7 @@ define void @pr29025(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <12 x i8> *%p) nounw
; AVX-NEXT: retq
;
; XOP-LABEL: pr29025:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpperm {{.*#+}} xmm0 = xmm0[0,4,8,12],xmm1[0,4,8,12],xmm0[u,u,u,u,u,u,u,u]
; XOP-NEXT: vpperm {{.*#+}} xmm0 = xmm0[0,4],xmm2[0],xmm0[1,5],xmm2[4],xmm0[2,6],xmm2[8],xmm0[3,7],xmm2[12],xmm0[u,u,u,u]
; XOP-NEXT: vpextrd $2, %xmm0, 8(%rdi)
@@ -745,7 +745,7 @@ define void @pr29025(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <12 x i8> *%p) nounw
define void @interleave_24i8_out(<24 x i8>* %p, <8 x i8>* %q1, <8 x i8>* %q2, <8 x i8>* %q3) nounwind {
; SSE2-LABEL: interleave_24i8_out:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqu (%rdi), %xmm0
; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,0,255,255,0,255,255,255,255,255,255,255,255,255,255]
@@ -808,7 +808,7 @@ define void @interleave_24i8_out(<24 x i8>* %p, <8 x i8>* %q1, <8 x i8>* %q2, <8
; SSE2-NEXT: retq
;
; SSE42-LABEL: interleave_24i8_out:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqu (%rdi), %xmm0
; SSE42-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSE42-NEXT: movdqa %xmm1, %xmm2
@@ -830,7 +830,7 @@ define void @interleave_24i8_out(<24 x i8>* %p, <8 x i8>* %q1, <8 x i8>* %q2, <8
; SSE42-NEXT: retq
;
; AVX-LABEL: interleave_24i8_out:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqu (%rdi), %xmm0
; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,xmm1[2,5,u,u,u,u,u,u,u,u]
@@ -848,7 +848,7 @@ define void @interleave_24i8_out(<24 x i8>* %p, <8 x i8>* %q1, <8 x i8>* %q2, <8
; AVX-NEXT: retq
;
; XOP-LABEL: interleave_24i8_out:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vmovdqu (%rdi), %xmm0
; XOP-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; XOP-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,xmm1[2,5,u,u,u,u,u,u,u,u]
@@ -876,7 +876,7 @@ define void @interleave_24i8_out(<24 x i8>* %p, <8 x i8>* %q1, <8 x i8>* %q2, <8
define void @interleave_24i8_in(<24 x i8>* %p, <8 x i8>* %q1, <8 x i8>* %q2, <8 x i8>* %q3) nounwind {
; SSE2-LABEL: interleave_24i8_in:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
@@ -920,7 +920,7 @@ define void @interleave_24i8_in(<24 x i8>* %p, <8 x i8>* %q1, <8 x i8>* %q2, <8
; SSE2-NEXT: retq
;
; SSE42-LABEL: interleave_24i8_in:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE42-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSE42-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
@@ -938,7 +938,7 @@ define void @interleave_24i8_in(<24 x i8>* %p, <8 x i8>* %q1, <8 x i8>* %q2, <8
; SSE42-NEXT: retq
;
; AVX-LABEL: interleave_24i8_in:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
@@ -954,7 +954,7 @@ define void @interleave_24i8_in(<24 x i8>* %p, <8 x i8>* %q1, <8 x i8>* %q2, <8
; AVX-NEXT: retq
;
; XOP-LABEL: interleave_24i8_in:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; XOP-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; XOP-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
@@ -981,7 +981,7 @@ define void @interleave_24i8_in(<24 x i8>* %p, <8 x i8>* %q1, <8 x i8>* %q2, <8
define void @interleave_24i16_out(<24 x i16>* %p, <8 x i16>* %q1, <8 x i16>* %q2, <8 x i16>* %q3) nounwind {
; SSE2-LABEL: interleave_24i16_out:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqu (%rdi), %xmm3
; SSE2-NEXT: movdqu 16(%rdi), %xmm2
; SSE2-NEXT: movdqu 32(%rdi), %xmm8
@@ -1037,7 +1037,7 @@ define void @interleave_24i16_out(<24 x i16>* %p, <8 x i16>* %q1, <8 x i16>* %q2
; SSE2-NEXT: retq
;
; SSE42-LABEL: interleave_24i16_out:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqu (%rdi), %xmm0
; SSE42-NEXT: movdqu 16(%rdi), %xmm1
; SSE42-NEXT: movdqu 32(%rdi), %xmm2
@@ -1063,7 +1063,7 @@ define void @interleave_24i16_out(<24 x i16>* %p, <8 x i16>* %q1, <8 x i16>* %q2
; SSE42-NEXT: retq
;
; AVX1-LABEL: interleave_24i16_out:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqu 32(%rdi), %xmm0
; AVX1-NEXT: vmovdqu (%rdi), %ymm1
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
@@ -1087,7 +1087,7 @@ define void @interleave_24i16_out(<24 x i16>* %p, <8 x i16>* %q1, <8 x i16>* %q2
; AVX1-NEXT: retq
;
; AVX2-LABEL: interleave_24i16_out:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqu (%rdi), %ymm0
; AVX2-NEXT: vmovdqu 32(%rdi), %xmm1
; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7,8,9],ymm1[10],ymm0[11,12],ymm1[13],ymm0[14,15]
@@ -1109,7 +1109,7 @@ define void @interleave_24i16_out(<24 x i16>* %p, <8 x i16>* %q1, <8 x i16>* %q2
; AVX2-NEXT: retq
;
; XOP-LABEL: interleave_24i16_out:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vmovdqu 32(%rdi), %xmm0
; XOP-NEXT: vmovdqu (%rdi), %ymm1
; XOP-NEXT: vextractf128 $1, %ymm1, %xmm2
@@ -1136,7 +1136,7 @@ define void @interleave_24i16_out(<24 x i16>* %p, <8 x i16>* %q1, <8 x i16>* %q2
define void @interleave_24i16_in(<24 x i16>* %p, <8 x i16>* %q1, <8 x i16>* %q2, <8 x i16>* %q3) nounwind {
; SSE2-LABEL: interleave_24i16_in:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqu (%rsi), %xmm3
; SSE2-NEXT: movdqu (%rdx), %xmm2
; SSE2-NEXT: movdqu (%rcx), %xmm1
@@ -1176,7 +1176,7 @@ define void @interleave_24i16_in(<24 x i16>* %p, <8 x i16>* %q1, <8 x i16>* %q2,
; SSE2-NEXT: retq
;
; SSE42-LABEL: interleave_24i16_in:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqu (%rsi), %xmm0
; SSE42-NEXT: movdqu (%rdx), %xmm1
; SSE42-NEXT: movdqu (%rcx), %xmm2
@@ -1200,7 +1200,7 @@ define void @interleave_24i16_in(<24 x i16>* %p, <8 x i16>* %q1, <8 x i16>* %q2,
; SSE42-NEXT: retq
;
; AVX1-LABEL: interleave_24i16_in:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqu (%rsi), %xmm0
; AVX1-NEXT: vmovdqu (%rdx), %xmm1
; AVX1-NEXT: vmovdqu (%rcx), %xmm2
@@ -1225,7 +1225,7 @@ define void @interleave_24i16_in(<24 x i16>* %p, <8 x i16>* %q1, <8 x i16>* %q2,
; AVX1-NEXT: retq
;
; AVX2-LABEL: interleave_24i16_in:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqu (%rsi), %xmm0
; AVX2-NEXT: vmovdqu (%rdx), %xmm1
; AVX2-NEXT: vmovdqu (%rcx), %xmm2
@@ -1248,7 +1248,7 @@ define void @interleave_24i16_in(<24 x i16>* %p, <8 x i16>* %q1, <8 x i16>* %q2,
; AVX2-NEXT: retq
;
; XOP-LABEL: interleave_24i16_in:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vmovdqu (%rsi), %xmm0
; XOP-NEXT: vmovdqu (%rdx), %xmm1
; XOP-NEXT: vmovdqu (%rcx), %xmm2
@@ -1277,7 +1277,7 @@ define void @interleave_24i16_in(<24 x i16>* %p, <8 x i16>* %q1, <8 x i16>* %q2,
define void @interleave_24i32_out(<24 x i32>* %p, <8 x i32>* %q1, <8 x i32>* %q2, <8 x i32>* %q3) nounwind {
; SSE2-LABEL: interleave_24i32_out:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movups 80(%rdi), %xmm5
; SSE2-NEXT: movups 64(%rdi), %xmm8
; SSE2-NEXT: movups (%rdi), %xmm0
@@ -1321,7 +1321,7 @@ define void @interleave_24i32_out(<24 x i32>* %p, <8 x i32>* %q1, <8 x i32>* %q2
; SSE2-NEXT: retq
;
; SSE42-LABEL: interleave_24i32_out:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqu 80(%rdi), %xmm9
; SSE42-NEXT: movdqu 64(%rdi), %xmm10
; SSE42-NEXT: movdqu (%rdi), %xmm4
@@ -1361,7 +1361,7 @@ define void @interleave_24i32_out(<24 x i32>* %p, <8 x i32>* %q1, <8 x i32>* %q2
; SSE42-NEXT: retq
;
; AVX1-LABEL: interleave_24i32_out:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovups (%rdi), %ymm0
; AVX1-NEXT: vmovups 32(%rdi), %ymm1
; AVX1-NEXT: vmovups 64(%rdi), %ymm2
@@ -1401,7 +1401,7 @@ define void @interleave_24i32_out(<24 x i32>* %p, <8 x i32>* %q1, <8 x i32>* %q2
; AVX1-NEXT: retq
;
; AVX2-LABEL: interleave_24i32_out:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovups (%rdi), %ymm0
; AVX2-NEXT: vmovups 32(%rdi), %ymm1
; AVX2-NEXT: vmovups 64(%rdi), %ymm2
@@ -1430,7 +1430,7 @@ define void @interleave_24i32_out(<24 x i32>* %p, <8 x i32>* %q1, <8 x i32>* %q2
; AVX2-NEXT: retq
;
; XOP-LABEL: interleave_24i32_out:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vmovups (%rdi), %ymm0
; XOP-NEXT: vmovups 32(%rdi), %ymm1
; XOP-NEXT: vmovups 64(%rdi), %ymm2
@@ -1480,7 +1480,7 @@ define void @interleave_24i32_out(<24 x i32>* %p, <8 x i32>* %q1, <8 x i32>* %q2
define void @interleave_24i32_in(<24 x i32>* %p, <8 x i32>* %q1, <8 x i32>* %q2, <8 x i32>* %q3) nounwind {
; SSE2-LABEL: interleave_24i32_in:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqu (%rsi), %xmm5
; SSE2-NEXT: movdqu 16(%rsi), %xmm2
; SSE2-NEXT: movdqu (%rdx), %xmm6
@@ -1528,7 +1528,7 @@ define void @interleave_24i32_in(<24 x i32>* %p, <8 x i32>* %q1, <8 x i32>* %q2,
; SSE2-NEXT: retq
;
; SSE42-LABEL: interleave_24i32_in:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqu (%rsi), %xmm5
; SSE42-NEXT: movdqu 16(%rsi), %xmm2
; SSE42-NEXT: movdqu (%rdx), %xmm6
@@ -1570,7 +1570,7 @@ define void @interleave_24i32_in(<24 x i32>* %p, <8 x i32>* %q1, <8 x i32>* %q2,
; SSE42-NEXT: retq
;
; AVX1-LABEL: interleave_24i32_in:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovups (%rsi), %ymm0
; AVX1-NEXT: vmovups (%rdx), %ymm1
; AVX1-NEXT: vmovupd (%rcx), %ymm2
@@ -1604,7 +1604,7 @@ define void @interleave_24i32_in(<24 x i32>* %p, <8 x i32>* %q1, <8 x i32>* %q2,
; AVX1-NEXT: retq
;
; AVX2-LABEL: interleave_24i32_in:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovups (%rsi), %ymm0
; AVX2-NEXT: vmovups (%rdx), %ymm1
; AVX2-NEXT: vmovups (%rcx), %ymm2
@@ -1632,7 +1632,7 @@ define void @interleave_24i32_in(<24 x i32>* %p, <8 x i32>* %q1, <8 x i32>* %q2,
; AVX2-NEXT: retq
;
; XOP-LABEL: interleave_24i32_in:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vmovups (%rsi), %ymm0
; XOP-NEXT: vmovups (%rdx), %ymm1
; XOP-NEXT: vmovupd (%rcx), %ymm2
@@ -1674,7 +1674,7 @@ define void @interleave_24i32_in(<24 x i32>* %p, <8 x i32>* %q1, <8 x i32>* %q2,
define <2 x double> @wrongorder(<4 x double> %A, <8 x double>* %P) #0 {
; SSE2-LABEL: wrongorder:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
; SSE2-NEXT: movaps %xmm0, 48(%rdi)
; SSE2-NEXT: movaps %xmm0, 32(%rdi)
@@ -1683,7 +1683,7 @@ define <2 x double> @wrongorder(<4 x double> %A, <8 x double>* %P) #0 {
; SSE2-NEXT: retq
;
; SSE42-LABEL: wrongorder:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
; SSE42-NEXT: movapd %xmm0, 48(%rdi)
; SSE42-NEXT: movapd %xmm0, 32(%rdi)
@@ -1692,7 +1692,7 @@ define <2 x double> @wrongorder(<4 x double> %A, <8 x double>* %P) #0 {
; SSE42-NEXT: retq
;
; AVX1-LABEL: wrongorder:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1
; AVX1-NEXT: vmovaps %ymm1, 32(%rdi)
@@ -1702,7 +1702,7 @@ define <2 x double> @wrongorder(<4 x double> %A, <8 x double>* %P) #0 {
; AVX1-NEXT: retq
;
; AVX2-LABEL: wrongorder:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vbroadcastsd %xmm0, %ymm1
; AVX2-NEXT: vmovapd %ymm1, 32(%rdi)
; AVX2-NEXT: vmovapd %ymm1, (%rdi)
@@ -1711,7 +1711,7 @@ define <2 x double> @wrongorder(<4 x double> %A, <8 x double>* %P) #0 {
; AVX2-NEXT: retq
;
; XOP-LABEL: wrongorder:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; XOP-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1
; XOP-NEXT: vmovaps %ymm1, 32(%rdi)
diff --git a/test/CodeGen/X86/optimize-max-1.ll b/test/CodeGen/X86/optimize-max-1.ll
index 08cb86ab398..aa560c4ecad 100644
--- a/test/CodeGen/X86/optimize-max-1.ll
+++ b/test/CodeGen/X86/optimize-max-1.ll
@@ -8,7 +8,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
define void @fs(double* nocapture %p, i64 %n) nounwind {
; CHECK-LABEL: fs:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: .p2align 4, 0x90
; CHECK-NEXT: .LBB0_1: # %bb
@@ -17,7 +17,7 @@ define void @fs(double* nocapture %p, i64 %n) nounwind {
; CHECK-NEXT: incq %rax
; CHECK-NEXT: cmpq %rsi, %rax
; CHECK-NEXT: jl .LBB0_1
-; CHECK-NEXT: # BB#2: # %return
+; CHECK-NEXT: # %bb.2: # %return
; CHECK-NEXT: retq
entry:
%tmp = icmp slt i64 %n, 1 ; <i1> [#uses=1]
@@ -38,7 +38,7 @@ return: ; preds = %bb
define void @bs(double* nocapture %p, i64 %n) nounwind {
; CHECK-LABEL: bs:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: .p2align 4, 0x90
; CHECK-NEXT: .LBB1_1: # %bb
@@ -47,7 +47,7 @@ define void @bs(double* nocapture %p, i64 %n) nounwind {
; CHECK-NEXT: incq %rax
; CHECK-NEXT: cmpq %rsi, %rax
; CHECK-NEXT: jl .LBB1_1
-; CHECK-NEXT: # BB#2: # %return
+; CHECK-NEXT: # %bb.2: # %return
; CHECK-NEXT: retq
entry:
%tmp = icmp sge i64 %n, 1 ; <i1> [#uses=1]
@@ -68,7 +68,7 @@ return: ; preds = %bb
define void @fu(double* nocapture %p, i64 %n) nounwind {
; CHECK-LABEL: fu:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: .p2align 4, 0x90
; CHECK-NEXT: .LBB2_1: # %bb
@@ -77,7 +77,7 @@ define void @fu(double* nocapture %p, i64 %n) nounwind {
; CHECK-NEXT: incq %rax
; CHECK-NEXT: cmpq %rsi, %rax
; CHECK-NEXT: jb .LBB2_1
-; CHECK-NEXT: # BB#2: # %return
+; CHECK-NEXT: # %bb.2: # %return
; CHECK-NEXT: retq
entry:
%tmp = icmp eq i64 %n, 0 ; <i1> [#uses=1]
@@ -98,7 +98,7 @@ return: ; preds = %bb
define void @bu(double* nocapture %p, i64 %n) nounwind {
; CHECK-LABEL: bu:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: .p2align 4, 0x90
; CHECK-NEXT: .LBB3_1: # %bb
@@ -107,7 +107,7 @@ define void @bu(double* nocapture %p, i64 %n) nounwind {
; CHECK-NEXT: incq %rax
; CHECK-NEXT: cmpq %rsi, %rax
; CHECK-NEXT: jb .LBB3_1
-; CHECK-NEXT: # BB#2: # %return
+; CHECK-NEXT: # %bb.2: # %return
; CHECK-NEXT: retq
entry:
%tmp = icmp ne i64 %n, 0 ; <i1> [#uses=1]
diff --git a/test/CodeGen/X86/optimize-max-2.ll b/test/CodeGen/X86/optimize-max-2.ll
index 37d2a20975a..04e17f066ba 100644
--- a/test/CodeGen/X86/optimize-max-2.ll
+++ b/test/CodeGen/X86/optimize-max-2.ll
@@ -8,7 +8,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
define void @foo(double* nocapture %p, i64 %x, i64 %y) nounwind {
; CHECK-LABEL: foo:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: testq %rdx, %rdx
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: cmovneq %rdx, %rax
@@ -23,7 +23,7 @@ define void @foo(double* nocapture %p, i64 %x, i64 %y) nounwind {
; CHECK-NEXT: addq $8, %rdi
; CHECK-NEXT: decq %rax
; CHECK-NEXT: jne .LBB0_1
-; CHECK-NEXT: # BB#2: # %return
+; CHECK-NEXT: # %bb.2: # %return
; CHECK-NEXT: retq
entry:
%tmp = icmp eq i64 %y, 0 ; <i1> [#uses=1]
diff --git a/test/CodeGen/X86/or-branch.ll b/test/CodeGen/X86/or-branch.ll
index 71d7746642e..276258a3d40 100644
--- a/test/CodeGen/X86/or-branch.ll
+++ b/test/CodeGen/X86/or-branch.ll
@@ -4,20 +4,20 @@
define void @foo(i32 %X, i32 %Y, i32 %Z) nounwind {
; JUMP2-LABEL: foo:
-; JUMP2: # BB#0: # %entry
+; JUMP2: # %bb.0: # %entry
; JUMP2-NEXT: cmpl $5, {{[0-9]+}}(%esp)
; JUMP2-NEXT: jl .LBB0_3
-; JUMP2-NEXT: # BB#1: # %entry
+; JUMP2-NEXT: # %bb.1: # %entry
; JUMP2-NEXT: movl {{[0-9]+}}(%esp), %eax
; JUMP2-NEXT: testl %eax, %eax
; JUMP2-NEXT: je .LBB0_3
-; JUMP2-NEXT: # BB#2: # %UnifiedReturnBlock
+; JUMP2-NEXT: # %bb.2: # %UnifiedReturnBlock
; JUMP2-NEXT: retl
; JUMP2-NEXT: .LBB0_3: # %cond_true
; JUMP2-NEXT: jmp bar # TAILCALL
;
; JUMP1-LABEL: foo:
-; JUMP1: # BB#0: # %entry
+; JUMP1: # %bb.0: # %entry
; JUMP1-NEXT: cmpl $0, {{[0-9]+}}(%esp)
; JUMP1-NEXT: sete %al
; JUMP1-NEXT: cmpl $5, {{[0-9]+}}(%esp)
@@ -25,7 +25,7 @@ define void @foo(i32 %X, i32 %Y, i32 %Z) nounwind {
; JUMP1-NEXT: orb %al, %cl
; JUMP1-NEXT: cmpb $1, %cl
; JUMP1-NEXT: jne .LBB0_1
-; JUMP1-NEXT: # BB#2: # %cond_true
+; JUMP1-NEXT: # %bb.2: # %cond_true
; JUMP1-NEXT: jmp bar # TAILCALL
; JUMP1-NEXT: .LBB0_1: # %UnifiedReturnBlock
; JUMP1-NEXT: retl
@@ -48,7 +48,7 @@ UnifiedReturnBlock:
define void @unpredictable(i32 %X, i32 %Y, i32 %Z) nounwind {
; JUMP2-LABEL: unpredictable:
-; JUMP2: # BB#0: # %entry
+; JUMP2: # %bb.0: # %entry
; JUMP2-NEXT: cmpl $0, {{[0-9]+}}(%esp)
; JUMP2-NEXT: sete %al
; JUMP2-NEXT: cmpl $5, {{[0-9]+}}(%esp)
@@ -56,13 +56,13 @@ define void @unpredictable(i32 %X, i32 %Y, i32 %Z) nounwind {
; JUMP2-NEXT: orb %al, %cl
; JUMP2-NEXT: cmpb $1, %cl
; JUMP2-NEXT: jne .LBB1_1
-; JUMP2-NEXT: # BB#2: # %cond_true
+; JUMP2-NEXT: # %bb.2: # %cond_true
; JUMP2-NEXT: jmp bar # TAILCALL
; JUMP2-NEXT: .LBB1_1: # %UnifiedReturnBlock
; JUMP2-NEXT: retl
;
; JUMP1-LABEL: unpredictable:
-; JUMP1: # BB#0: # %entry
+; JUMP1: # %bb.0: # %entry
; JUMP1-NEXT: cmpl $0, {{[0-9]+}}(%esp)
; JUMP1-NEXT: sete %al
; JUMP1-NEXT: cmpl $5, {{[0-9]+}}(%esp)
@@ -70,7 +70,7 @@ define void @unpredictable(i32 %X, i32 %Y, i32 %Z) nounwind {
; JUMP1-NEXT: orb %al, %cl
; JUMP1-NEXT: cmpb $1, %cl
; JUMP1-NEXT: jne .LBB1_1
-; JUMP1-NEXT: # BB#2: # %cond_true
+; JUMP1-NEXT: # %bb.2: # %cond_true
; JUMP1-NEXT: jmp bar # TAILCALL
; JUMP1-NEXT: .LBB1_1: # %UnifiedReturnBlock
; JUMP1-NEXT: retl
diff --git a/test/CodeGen/X86/or-lea.ll b/test/CodeGen/X86/or-lea.ll
index 709f29bf9e0..ee1f6585eb7 100644
--- a/test/CodeGen/X86/or-lea.ll
+++ b/test/CodeGen/X86/or-lea.ll
@@ -8,7 +8,7 @@
define i32 @or_shift1_and1(i32 %x, i32 %y) {
; CHECK-LABEL: or_shift1_and1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: andl $1, %esi
@@ -23,7 +23,7 @@ define i32 @or_shift1_and1(i32 %x, i32 %y) {
define i32 @or_shift1_and1_swapped(i32 %x, i32 %y) {
; CHECK-LABEL: or_shift1_and1_swapped:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: andl $1, %esi
@@ -38,7 +38,7 @@ define i32 @or_shift1_and1_swapped(i32 %x, i32 %y) {
define i32 @or_shift2_and1(i32 %x, i32 %y) {
; CHECK-LABEL: or_shift2_and1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: andl $1, %esi
@@ -53,7 +53,7 @@ define i32 @or_shift2_and1(i32 %x, i32 %y) {
define i32 @or_shift3_and1(i32 %x, i32 %y) {
; CHECK-LABEL: or_shift3_and1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: andl $1, %esi
@@ -68,7 +68,7 @@ define i32 @or_shift3_and1(i32 %x, i32 %y) {
define i32 @or_shift3_and7(i32 %x, i32 %y) {
; CHECK-LABEL: or_shift3_and7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: andl $7, %esi
@@ -85,7 +85,7 @@ define i32 @or_shift3_and7(i32 %x, i32 %y) {
define i32 @or_shift4_and1(i32 %x, i32 %y) {
; CHECK-LABEL: or_shift4_and1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: shll $4, %edi
@@ -103,7 +103,7 @@ define i32 @or_shift4_and1(i32 %x, i32 %y) {
define i32 @or_shift3_and8(i32 %x, i32 %y) {
; CHECK-LABEL: or_shift3_and8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: leal (,%rdi,8), %eax
; CHECK-NEXT: andl $8, %esi
@@ -120,7 +120,7 @@ define i32 @or_shift3_and8(i32 %x, i32 %y) {
define i64 @or_shift1_and1_64(i64 %x, i64 %y) {
; CHECK-LABEL: or_shift1_and1_64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: leaq (%rsi,%rdi,2), %rax
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/overflow-intrinsic-setcc-fold.ll b/test/CodeGen/X86/overflow-intrinsic-setcc-fold.ll
index ca69b737465..1f26933e24b 100644
--- a/test/CodeGen/X86/overflow-intrinsic-setcc-fold.ll
+++ b/test/CodeGen/X86/overflow-intrinsic-setcc-fold.ll
@@ -3,7 +3,7 @@
define i1 @saddo_not_i32(i32 %v1, i32 %v2) {
; CHECK-LABEL: saddo_not_i32:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: addl %esi, %edi
; CHECK-NEXT: setno %al
; CHECK-NEXT: retq
@@ -16,7 +16,7 @@ entry:
define i1 @saddo_not_i64(i64 %v1, i64 %v2) {
; CHECK-LABEL: saddo_not_i64:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: addq %rsi, %rdi
; CHECK-NEXT: setno %al
; CHECK-NEXT: retq
@@ -29,7 +29,7 @@ entry:
define i1 @uaddo_not_i32(i32 %v1, i32 %v2) {
; CHECK-LABEL: uaddo_not_i32:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: addl %esi, %edi
; CHECK-NEXT: setae %al
; CHECK-NEXT: retq
@@ -42,7 +42,7 @@ entry:
define i1 @uaddo_not_i64(i64 %v1, i64 %v2) {
; CHECK-LABEL: uaddo_not_i64:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: addq %rsi, %rdi
; CHECK-NEXT: setae %al
; CHECK-NEXT: retq
@@ -55,7 +55,7 @@ entry:
define i1 @ssubo_not_i32(i32 %v1, i32 %v2) {
; CHECK-LABEL: ssubo_not_i32:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: cmpl %esi, %edi
; CHECK-NEXT: setno %al
; CHECK-NEXT: retq
@@ -68,7 +68,7 @@ entry:
define i1 @ssub_not_i64(i64 %v1, i64 %v2) {
; CHECK-LABEL: ssub_not_i64:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: cmpq %rsi, %rdi
; CHECK-NEXT: setno %al
; CHECK-NEXT: retq
@@ -81,7 +81,7 @@ entry:
define i1 @usubo_not_i32(i32 %v1, i32 %v2) {
; CHECK-LABEL: usubo_not_i32:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: cmpl %esi, %edi
; CHECK-NEXT: setae %al
; CHECK-NEXT: retq
@@ -94,7 +94,7 @@ entry:
define i1 @usubo_not_i64(i64 %v1, i64 %v2) {
; CHECK-LABEL: usubo_not_i64:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: cmpq %rsi, %rdi
; CHECK-NEXT: setae %al
; CHECK-NEXT: retq
@@ -107,7 +107,7 @@ entry:
define i1 @smulo_not_i32(i32 %v1, i32 %v2) {
; CHECK-LABEL: smulo_not_i32:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: imull %esi, %edi
; CHECK-NEXT: setno %al
; CHECK-NEXT: retq
@@ -120,7 +120,7 @@ entry:
define i1 @smulo_not_i64(i64 %v1, i64 %v2) {
; CHECK-LABEL: smulo_not_i64:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: imulq %rsi, %rdi
; CHECK-NEXT: setno %al
; CHECK-NEXT: retq
@@ -133,7 +133,7 @@ entry:
define i1 @umulo_not_i32(i32 %v1, i32 %v2) {
; CHECK-LABEL: umulo_not_i32:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: mull %esi
; CHECK-NEXT: setno %al
@@ -147,7 +147,7 @@ entry:
define i1 @umulo_not_i64(i64 %v1, i64 %v2) {
; CHECK-LABEL: umulo_not_i64:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: mulq %rsi
; CHECK-NEXT: setno %al
diff --git a/test/CodeGen/X86/overflow.ll b/test/CodeGen/X86/overflow.ll
index 1c68af2bd6e..a9fd19d4f5f 100644
--- a/test/CodeGen/X86/overflow.ll
+++ b/test/CodeGen/X86/overflow.ll
@@ -4,7 +4,7 @@
define i128 @mulhioverflow(i64 %a, i64 %b, i64 %c) nounwind {
; X32-LABEL: mulhioverflow:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebp
; X32-NEXT: pushl %ebx
; X32-NEXT: pushl %edi
@@ -64,7 +64,7 @@ define i128 @mulhioverflow(i64 %a, i64 %b, i64 %c) nounwind {
; X32-NEXT: retl $4
;
; X64-LABEL: mulhioverflow:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: mulq %rsi
diff --git a/test/CodeGen/X86/packss.ll b/test/CodeGen/X86/packss.ll
index 5a08b1f1ff2..0b4335f2b6f 100644
--- a/test/CodeGen/X86/packss.ll
+++ b/test/CodeGen/X86/packss.ll
@@ -8,7 +8,7 @@
define <4 x i32> @trunc_ashr_v4i64(<4 x i64> %a) nounwind {
; SSE-LABEL: trunc_ashr_v4i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrad $31, %xmm1
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
; SSE-NEXT: psrad $31, %xmm0
@@ -17,7 +17,7 @@ define <4 x i32> @trunc_ashr_v4i64(<4 x i64> %a) nounwind {
; SSE-NEXT: ret{{[l|q]}}
;
; AVX1-LABEL: trunc_ashr_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm2, %xmm1
@@ -27,7 +27,7 @@ define <4 x i32> @trunc_ashr_v4i64(<4 x i64> %a) nounwind {
; AVX1-NEXT: ret{{[l|q]}}
;
; AVX2-LABEL: trunc_ashr_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
@@ -41,14 +41,14 @@ define <4 x i32> @trunc_ashr_v4i64(<4 x i64> %a) nounwind {
define <8 x i16> @trunc_ashr_v8i32(<8 x i32> %a) nounwind {
; SSE-LABEL: trunc_ashr_v8i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrad $31, %xmm1
; SSE-NEXT: psrad $31, %xmm0
; SSE-NEXT: packssdw %xmm1, %xmm0
; SSE-NEXT: ret{{[l|q]}}
;
; AVX1-LABEL: trunc_ashr_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpsrad $31, %xmm1, %xmm1
; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0
@@ -57,7 +57,7 @@ define <8 x i16> @trunc_ashr_v8i32(<8 x i32> %a) nounwind {
; AVX1-NEXT: ret{{[l|q]}}
;
; AVX2-LABEL: trunc_ashr_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
@@ -70,28 +70,28 @@ define <8 x i16> @trunc_ashr_v8i32(<8 x i32> %a) nounwind {
define <8 x i16> @trunc_ashr_v4i32_icmp_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; X86-SSE-LABEL: trunc_ashr_v4i32_icmp_v4i32:
-; X86-SSE: # BB#0:
+; X86-SSE: # %bb.0:
; X86-SSE-NEXT: psrad $31, %xmm0
; X86-SSE-NEXT: pcmpgtd {{\.LCPI.*}}, %xmm1
; X86-SSE-NEXT: packssdw %xmm1, %xmm0
; X86-SSE-NEXT: ret{{[l|q]}}
;
; X86-AVX-LABEL: trunc_ashr_v4i32_icmp_v4i32:
-; X86-AVX: # BB#0:
+; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpsrad $31, %xmm0, %xmm0
; X86-AVX-NEXT: vpcmpgtd {{\.LCPI.*}}, %xmm1, %xmm1
; X86-AVX-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: ret{{[l|q]}}
;
; X64-SSE-LABEL: trunc_ashr_v4i32_icmp_v4i32:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: psrad $31, %xmm0
; X64-SSE-NEXT: pcmpgtd {{.*}}(%rip), %xmm1
; X64-SSE-NEXT: packssdw %xmm1, %xmm0
; X64-SSE-NEXT: ret{{[l|q]}}
;
; X64-AVX-LABEL: trunc_ashr_v4i32_icmp_v4i32:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpsrad $31, %xmm0, %xmm0
; X64-AVX-NEXT: vpcmpgtd {{.*}}(%rip), %xmm1, %xmm1
; X64-AVX-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
diff --git a/test/CodeGen/X86/palignr.ll b/test/CodeGen/X86/palignr.ll
index 50875f7a275..64bbf214157 100644
--- a/test/CodeGen/X86/palignr.ll
+++ b/test/CodeGen/X86/palignr.ll
@@ -5,12 +5,12 @@
define <4 x i32> @test1(<4 x i32> %A, <4 x i32> %B) nounwind {
; CHECK-SSE-LABEL: test1:
-; CHECK-SSE: # BB#0:
+; CHECK-SSE: # %bb.0:
; CHECK-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,2,3,0]
; CHECK-SSE-NEXT: retl
;
; CHECK-AVX-LABEL: test1:
-; CHECK-AVX: # BB#0:
+; CHECK-AVX: # %bb.0:
; CHECK-AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,2,3,0]
; CHECK-AVX-NEXT: retl
%C = shufflevector <4 x i32> %A, <4 x i32> undef, <4 x i32> < i32 1, i32 2, i32 3, i32 0 >
@@ -19,19 +19,19 @@ define <4 x i32> @test1(<4 x i32> %A, <4 x i32> %B) nounwind {
define <4 x i32> @test2(<4 x i32> %A, <4 x i32> %B) nounwind {
; CHECK-SSE2-LABEL: test2:
-; CHECK-SSE2: # BB#0:
+; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
; CHECK-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,2],xmm1[2,0]
; CHECK-SSE2-NEXT: retl
;
; CHECK-SSSE3-LABEL: test2:
-; CHECK-SSSE3: # BB#0:
+; CHECK-SSSE3: # %bb.0:
; CHECK-SSSE3-NEXT: palignr {{.*#+}} xmm1 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3]
; CHECK-SSSE3-NEXT: movdqa %xmm1, %xmm0
; CHECK-SSSE3-NEXT: retl
;
; CHECK-AVX-LABEL: test2:
-; CHECK-AVX: # BB#0:
+; CHECK-AVX: # %bb.0:
; CHECK-AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3]
; CHECK-AVX-NEXT: retl
%C = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> < i32 1, i32 2, i32 3, i32 4 >
@@ -40,18 +40,18 @@ define <4 x i32> @test2(<4 x i32> %A, <4 x i32> %B) nounwind {
define <4 x i32> @test3(<4 x i32> %A, <4 x i32> %B) nounwind {
; CHECK-SSE2-LABEL: test3:
-; CHECK-SSE2: # BB#0:
+; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,2],xmm1[2,0]
; CHECK-SSE2-NEXT: retl
;
; CHECK-SSSE3-LABEL: test3:
-; CHECK-SSSE3: # BB#0:
+; CHECK-SSSE3: # %bb.0:
; CHECK-SSSE3-NEXT: palignr {{.*#+}} xmm1 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3]
; CHECK-SSSE3-NEXT: movdqa %xmm1, %xmm0
; CHECK-SSSE3-NEXT: retl
;
; CHECK-AVX-LABEL: test3:
-; CHECK-AVX: # BB#0:
+; CHECK-AVX: # %bb.0:
; CHECK-AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3]
; CHECK-AVX-NEXT: retl
%C = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> < i32 1, i32 2, i32 undef, i32 4 >
@@ -60,18 +60,18 @@ define <4 x i32> @test3(<4 x i32> %A, <4 x i32> %B) nounwind {
define <4 x i32> @test4(<4 x i32> %A, <4 x i32> %B) nounwind {
; CHECK-SSE2-LABEL: test4:
-; CHECK-SSE2: # BB#0:
+; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1],xmm0[0]
; CHECK-SSE2-NEXT: movapd %xmm1, %xmm0
; CHECK-SSE2-NEXT: retl
;
; CHECK-SSSE3-LABEL: test4:
-; CHECK-SSSE3: # BB#0:
+; CHECK-SSSE3: # %bb.0:
; CHECK-SSSE3-NEXT: palignr {{.*#+}} xmm0 = xmm1[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
; CHECK-SSSE3-NEXT: retl
;
; CHECK-AVX-LABEL: test4:
-; CHECK-AVX: # BB#0:
+; CHECK-AVX: # %bb.0:
; CHECK-AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm1[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
; CHECK-AVX-NEXT: retl
%C = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> < i32 6, i32 7, i32 undef, i32 1 >
@@ -80,13 +80,13 @@ define <4 x i32> @test4(<4 x i32> %A, <4 x i32> %B) nounwind {
define <4 x float> @test5(<4 x float> %A, <4 x float> %B) nounwind {
; CHECK-SSE-LABEL: test5:
-; CHECK-SSE: # BB#0:
+; CHECK-SSE: # %bb.0:
; CHECK-SSE-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1],xmm0[0]
; CHECK-SSE-NEXT: movapd %xmm1, %xmm0
; CHECK-SSE-NEXT: retl
;
; CHECK-AVX-LABEL: test5:
-; CHECK-AVX: # BB#0:
+; CHECK-AVX: # %bb.0:
; CHECK-AVX-NEXT: vshufpd {{.*#+}} xmm0 = xmm1[1],xmm0[0]
; CHECK-AVX-NEXT: retl
%C = shufflevector <4 x float> %A, <4 x float> %B, <4 x i32> < i32 6, i32 7, i32 undef, i32 1 >
@@ -95,20 +95,20 @@ define <4 x float> @test5(<4 x float> %A, <4 x float> %B) nounwind {
define <8 x i16> @test6(<8 x i16> %A, <8 x i16> %B) nounwind {
; CHECK-SSE2-LABEL: test6:
-; CHECK-SSE2: # BB#0:
+; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
; CHECK-SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5]
; CHECK-SSE2-NEXT: por %xmm1, %xmm0
; CHECK-SSE2-NEXT: retl
;
; CHECK-SSSE3-LABEL: test6:
-; CHECK-SSSE3: # BB#0:
+; CHECK-SSSE3: # %bb.0:
; CHECK-SSSE3-NEXT: palignr {{.*#+}} xmm1 = xmm0[6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5]
; CHECK-SSSE3-NEXT: movdqa %xmm1, %xmm0
; CHECK-SSSE3-NEXT: retl
;
; CHECK-AVX-LABEL: test6:
-; CHECK-AVX: # BB#0:
+; CHECK-AVX: # %bb.0:
; CHECK-AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5]
; CHECK-AVX-NEXT: retl
%C = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> < i32 3, i32 4, i32 undef, i32 6, i32 7, i32 8, i32 9, i32 10 >
@@ -117,20 +117,20 @@ define <8 x i16> @test6(<8 x i16> %A, <8 x i16> %B) nounwind {
define <8 x i16> @test7(<8 x i16> %A, <8 x i16> %B) nounwind {
; CHECK-SSE2-LABEL: test7:
-; CHECK-SSE2: # BB#0:
+; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7,8,9]
; CHECK-SSE2-NEXT: por %xmm1, %xmm0
; CHECK-SSE2-NEXT: retl
;
; CHECK-SSSE3-LABEL: test7:
-; CHECK-SSSE3: # BB#0:
+; CHECK-SSSE3: # %bb.0:
; CHECK-SSSE3-NEXT: palignr {{.*#+}} xmm1 = xmm0[10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9]
; CHECK-SSSE3-NEXT: movdqa %xmm1, %xmm0
; CHECK-SSSE3-NEXT: retl
;
; CHECK-AVX-LABEL: test7:
-; CHECK-AVX: # BB#0:
+; CHECK-AVX: # %bb.0:
; CHECK-AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9]
; CHECK-AVX-NEXT: retl
%C = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> < i32 undef, i32 6, i32 undef, i32 8, i32 9, i32 10, i32 11, i32 12 >
@@ -139,20 +139,20 @@ define <8 x i16> @test7(<8 x i16> %A, <8 x i16> %B) nounwind {
define <16 x i8> @test8(<16 x i8> %A, <16 x i8> %B) nounwind {
; CHECK-SSE2-LABEL: test8:
-; CHECK-SSE2: # BB#0:
+; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero
; CHECK-SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4]
; CHECK-SSE2-NEXT: por %xmm1, %xmm0
; CHECK-SSE2-NEXT: retl
;
; CHECK-SSSE3-LABEL: test8:
-; CHECK-SSSE3: # BB#0:
+; CHECK-SSSE3: # %bb.0:
; CHECK-SSSE3-NEXT: palignr {{.*#+}} xmm1 = xmm0[5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4]
; CHECK-SSSE3-NEXT: movdqa %xmm1, %xmm0
; CHECK-SSSE3-NEXT: retl
;
; CHECK-AVX-LABEL: test8:
-; CHECK-AVX: # BB#0:
+; CHECK-AVX: # %bb.0:
; CHECK-AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4]
; CHECK-AVX-NEXT: retl
%C = shufflevector <16 x i8> %A, <16 x i8> %B, <16 x i32> < i32 5, i32 6, i32 7, i32 undef, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20 >
@@ -165,7 +165,7 @@ define <16 x i8> @test8(<16 x i8> %A, <16 x i8> %B) nounwind {
; was an UNDEF.)
define <8 x i16> @test9(<8 x i16> %A, <8 x i16> %B) nounwind {
; CHECK-SSE2-LABEL: test9:
-; CHECK-SSE2: # BB#0:
+; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: movdqa %xmm1, %xmm0
; CHECK-SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
; CHECK-SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1]
@@ -174,13 +174,13 @@ define <8 x i16> @test9(<8 x i16> %A, <8 x i16> %B) nounwind {
; CHECK-SSE2-NEXT: retl
;
; CHECK-SSSE3-LABEL: test9:
-; CHECK-SSSE3: # BB#0:
+; CHECK-SSSE3: # %bb.0:
; CHECK-SSSE3-NEXT: palignr {{.*#+}} xmm1 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1]
; CHECK-SSSE3-NEXT: movdqa %xmm1, %xmm0
; CHECK-SSSE3-NEXT: retl
;
; CHECK-AVX-LABEL: test9:
-; CHECK-AVX: # BB#0:
+; CHECK-AVX: # %bb.0:
; CHECK-AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1]
; CHECK-AVX-NEXT: retl
%C = shufflevector <8 x i16> %B, <8 x i16> %A, <8 x i32> < i32 undef, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0 >
diff --git a/test/CodeGen/X86/pause.ll b/test/CodeGen/X86/pause.ll
index 70ac79f78f6..2bace05e012 100644
--- a/test/CodeGen/X86/pause.ll
+++ b/test/CodeGen/X86/pause.ll
@@ -6,7 +6,7 @@
define void @test_x86_sse2_pause() {
; CHECK-LABEL: test_x86_sse2_pause:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pause ## encoding: [0xf3,0x90]
; CHECK-NEXT: retl ## encoding: [0xc3]
tail call void @llvm.x86.sse2.pause()
diff --git a/test/CodeGen/X86/peep-setb.ll b/test/CodeGen/X86/peep-setb.ll
index 01e445a8622..3794b378b2c 100644
--- a/test/CodeGen/X86/peep-setb.ll
+++ b/test/CodeGen/X86/peep-setb.ll
@@ -6,7 +6,7 @@
define i8 @test1(i8 %a, i8 %b) nounwind {
; CHECK-LABEL: test1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpb %sil, %dil
; CHECK-NEXT: adcb $0, %sil
; CHECK-NEXT: movl %esi, %eax
@@ -19,7 +19,7 @@ define i8 @test1(i8 %a, i8 %b) nounwind {
define i32 @test2(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: test2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpl %esi, %edi
; CHECK-NEXT: adcl $0, %esi
; CHECK-NEXT: movl %esi, %eax
@@ -32,7 +32,7 @@ define i32 @test2(i32 %a, i32 %b) nounwind {
define i64 @test3(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: test3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpq %rsi, %rdi
; CHECK-NEXT: adcq $0, %rsi
; CHECK-NEXT: movq %rsi, %rax
@@ -45,7 +45,7 @@ define i64 @test3(i64 %a, i64 %b) nounwind {
define i8 @test4(i8 %a, i8 %b) nounwind {
; CHECK-LABEL: test4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpb %sil, %dil
; CHECK-NEXT: sbbb $0, %sil
; CHECK-NEXT: movl %esi, %eax
@@ -58,7 +58,7 @@ define i8 @test4(i8 %a, i8 %b) nounwind {
define i32 @test5(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: test5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpl %esi, %edi
; CHECK-NEXT: sbbl $0, %esi
; CHECK-NEXT: movl %esi, %eax
@@ -71,7 +71,7 @@ define i32 @test5(i32 %a, i32 %b) nounwind {
define i64 @test6(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: test6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpq %rsi, %rdi
; CHECK-NEXT: sbbq $0, %rsi
; CHECK-NEXT: movq %rsi, %rax
@@ -84,7 +84,7 @@ define i64 @test6(i64 %a, i64 %b) nounwind {
define i8 @test7(i8 %a, i8 %b) nounwind {
; CHECK-LABEL: test7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpb %sil, %dil
; CHECK-NEXT: adcb $0, %sil
; CHECK-NEXT: movl %esi, %eax
@@ -97,7 +97,7 @@ define i8 @test7(i8 %a, i8 %b) nounwind {
define i32 @test8(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: test8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpl %esi, %edi
; CHECK-NEXT: adcl $0, %esi
; CHECK-NEXT: movl %esi, %eax
@@ -110,7 +110,7 @@ define i32 @test8(i32 %a, i32 %b) nounwind {
define i64 @test9(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: test9:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpq %rsi, %rdi
; CHECK-NEXT: adcq $0, %rsi
; CHECK-NEXT: movq %rsi, %rax
diff --git a/test/CodeGen/X86/peep-test-4.ll b/test/CodeGen/X86/peep-test-4.ll
index 832262aba7e..788f8fdbc7b 100644
--- a/test/CodeGen/X86/peep-test-4.ll
+++ b/test/CodeGen/X86/peep-test-4.ll
@@ -6,10 +6,10 @@ declare void @foo64(i64)
define void @neg(i32 %x) nounwind {
; CHECK-LABEL: neg:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: negl %edi
; CHECK-NEXT: je .LBB0_1
-; CHECK-NEXT: # BB#2: # %bb
+; CHECK-NEXT: # %bb.2: # %bb
; CHECK-NEXT: jmp foo # TAILCALL
; CHECK-NEXT: .LBB0_1: # %return
; CHECK-NEXT: retq
@@ -27,10 +27,10 @@ return:
define void @sar(i32 %x) nounwind {
; CHECK-LABEL: sar:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: sarl %edi
; CHECK-NEXT: je .LBB1_1
-; CHECK-NEXT: # BB#2: # %bb
+; CHECK-NEXT: # %bb.2: # %bb
; CHECK-NEXT: jmp foo # TAILCALL
; CHECK-NEXT: .LBB1_1: # %return
; CHECK-NEXT: retq
@@ -48,10 +48,10 @@ return:
define void @shr(i32 %x) nounwind {
; CHECK-LABEL: shr:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: shrl %edi
; CHECK-NEXT: je .LBB2_1
-; CHECK-NEXT: # BB#2: # %bb
+; CHECK-NEXT: # %bb.2: # %bb
; CHECK-NEXT: jmp foo # TAILCALL
; CHECK-NEXT: .LBB2_1: # %return
; CHECK-NEXT: retq
@@ -69,10 +69,10 @@ return:
define void @shri(i32 %x) nounwind {
; CHECK-LABEL: shri:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: shrl $3, %edi
; CHECK-NEXT: je .LBB3_1
-; CHECK-NEXT: # BB#2: # %bb
+; CHECK-NEXT: # %bb.2: # %bb
; CHECK-NEXT: jmp foo # TAILCALL
; CHECK-NEXT: .LBB3_1: # %return
; CHECK-NEXT: retq
@@ -90,10 +90,10 @@ return:
define void @shl(i32 %x) nounwind {
; CHECK-LABEL: shl:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: addl %edi, %edi
; CHECK-NEXT: je .LBB4_1
-; CHECK-NEXT: # BB#2: # %bb
+; CHECK-NEXT: # %bb.2: # %bb
; CHECK-NEXT: jmp foo # TAILCALL
; CHECK-NEXT: .LBB4_1: # %return
; CHECK-NEXT: retq
@@ -111,10 +111,10 @@ return:
define void @shli(i32 %x) nounwind {
; CHECK-LABEL: shli:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: shll $4, %edi
; CHECK-NEXT: je .LBB5_1
-; CHECK-NEXT: # BB#2: # %bb
+; CHECK-NEXT: # %bb.2: # %bb
; CHECK-NEXT: jmp foo # TAILCALL
; CHECK-NEXT: .LBB5_1: # %return
; CHECK-NEXT: retq
@@ -132,7 +132,7 @@ return:
define zeroext i1 @adc(i128 %x) nounwind {
; CHECK-LABEL: adc:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movabsq $-9223372036854775808, %rax # imm = 0x8000000000000000
; CHECK-NEXT: addq %rdi, %rax
; CHECK-NEXT: adcq $0, %rsi
@@ -145,7 +145,7 @@ define zeroext i1 @adc(i128 %x) nounwind {
define zeroext i1 @sbb(i128 %x, i128 %y) nounwind {
; CHECK-LABEL: sbb:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpq %rdx, %rdi
; CHECK-NEXT: sbbq %rcx, %rsi
; CHECK-NEXT: setns %al
@@ -157,10 +157,10 @@ define zeroext i1 @sbb(i128 %x, i128 %y) nounwind {
define void @andn(i32 %x, i32 %y) nounwind {
; CHECK-LABEL: andn:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: andnl %esi, %edi, %edi
; CHECK-NEXT: je .LBB8_1
-; CHECK-NEXT: # BB#2: # %bb
+; CHECK-NEXT: # %bb.2: # %bb
; CHECK-NEXT: jmp foo # TAILCALL
; CHECK-NEXT: .LBB8_1: # %return
; CHECK-NEXT: retq
@@ -180,10 +180,10 @@ return:
declare i32 @llvm.x86.bmi.bextr.32(i32, i32) nounwind readnone
define void @bextr(i32 %x, i32 %y) nounwind {
; CHECK-LABEL: bextr:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: bextrl %esi, %edi, %edi
; CHECK-NEXT: je .LBB9_1
-; CHECK-NEXT: # BB#2: # %bb
+; CHECK-NEXT: # %bb.2: # %bb
; CHECK-NEXT: jmp foo # TAILCALL
; CHECK-NEXT: .LBB9_1: # %return
; CHECK-NEXT: retq
@@ -202,10 +202,10 @@ return:
declare i32 @llvm.ctpop.i32(i32) nounwind readnone
define void @popcnt(i32 %x) nounwind {
; CHECK-LABEL: popcnt:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: popcntl %edi, %edi
; CHECK-NEXT: je .LBB10_1
-; CHECK-NEXT: # BB#2: # %bb
+; CHECK-NEXT: # %bb.2: # %bb
; CHECK-NEXT: jmp foo # TAILCALL
; CHECK-NEXT: .LBB10_1: # %return
; CHECK-NEXT: retq
@@ -222,7 +222,7 @@ return:
declare i64 @llvm.cttz.i64(i64, i1)
define i64 @testCTZ(i64 %v) nounwind {
; CHECK-LABEL: testCTZ:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: tzcntq %rdi, %rcx
; CHECK-NEXT: movl $255, %eax
; CHECK-NEXT: cmovaeq %rcx, %rax
@@ -236,11 +236,11 @@ define i64 @testCTZ(i64 %v) nounwind {
declare i32 @llvm.cttz.i32(i32, i1)
define void @testCTZ2(i32 %v) nounwind {
; CHECK-LABEL: testCTZ2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pushq %rbx
; CHECK-NEXT: tzcntl %edi, %ebx
; CHECK-NEXT: jb .LBB12_2
-; CHECK-NEXT: # BB#1: # %bb
+; CHECK-NEXT: # %bb.1: # %bb
; CHECK-NEXT: movl %ebx, %edi
; CHECK-NEXT: callq foo
; CHECK-NEXT: .LBB12_2: # %return
@@ -262,11 +262,11 @@ return:
define void @testCTZ3(i32 %v) nounwind {
; CHECK-LABEL: testCTZ3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pushq %rbx
; CHECK-NEXT: tzcntl %edi, %ebx
; CHECK-NEXT: jae .LBB13_2
-; CHECK-NEXT: # BB#1: # %bb
+; CHECK-NEXT: # %bb.1: # %bb
; CHECK-NEXT: movl %ebx, %edi
; CHECK-NEXT: callq foo
; CHECK-NEXT: .LBB13_2: # %return
@@ -289,7 +289,7 @@ return:
declare i64 @llvm.ctlz.i64(i64, i1)
define i64 @testCLZ(i64 %v) nounwind {
; CHECK-LABEL: testCLZ:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: lzcntq %rdi, %rcx
; CHECK-NEXT: movl $255, %eax
; CHECK-NEXT: cmovaeq %rcx, %rax
@@ -303,7 +303,7 @@ define i64 @testCLZ(i64 %v) nounwind {
declare i64 @llvm.ctpop.i64(i64)
define i64 @testPOPCNT(i64 %v) nounwind {
; CHECK-LABEL: testPOPCNT:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: popcntq %rdi, %rcx
; CHECK-NEXT: movl $255, %eax
; CHECK-NEXT: cmovneq %rcx, %rax
diff --git a/test/CodeGen/X86/peephole-cvt-sse.ll b/test/CodeGen/X86/peephole-cvt-sse.ll
index ecf78a46c63..7e9290f2d61 100644
--- a/test/CodeGen/X86/peephole-cvt-sse.ll
+++ b/test/CodeGen/X86/peephole-cvt-sse.ll
@@ -6,12 +6,12 @@
define <2 x double> @peephole_cvtps2pd(<4 x float>* %a0) {
; X86-64-LABEL: peephole_cvtps2pd:
-; X86-64: # BB#0:
+; X86-64: # %bb.0:
; X86-64-NEXT: cvtps2pd (%rdi), %xmm0
; X86-64-NEXT: retq
;
; I386-LABEL: peephole_cvtps2pd:
-; I386: # BB#0:
+; I386: # %bb.0:
; I386-NEXT: movl {{[0-9]+}}(%esp), %eax
; I386-NEXT: cvtps2pd (%eax), %xmm0
; I386-NEXT: retl
@@ -23,12 +23,12 @@ define <2 x double> @peephole_cvtps2pd(<4 x float>* %a0) {
define <2 x double> @peephole_cvtdq2pd(<4 x i32>* %a0) {
; X86-64-LABEL: peephole_cvtdq2pd:
-; X86-64: # BB#0:
+; X86-64: # %bb.0:
; X86-64-NEXT: cvtdq2pd (%rdi), %xmm0
; X86-64-NEXT: retq
;
; I386-LABEL: peephole_cvtdq2pd:
-; I386: # BB#0:
+; I386: # %bb.0:
; I386-NEXT: movl {{[0-9]+}}(%esp), %eax
; I386-NEXT: cvtdq2pd (%eax), %xmm0
; I386-NEXT: retl
diff --git a/test/CodeGen/X86/peephole-na-phys-copy-folding.ll b/test/CodeGen/X86/peephole-na-phys-copy-folding.ll
index e2f28e6ba4c..66047e3677f 100644
--- a/test/CodeGen/X86/peephole-na-phys-copy-folding.ll
+++ b/test/CodeGen/X86/peephole-na-phys-copy-folding.ll
@@ -16,14 +16,14 @@ declare i32 @bar(i64)
define i1 @plus_one() nounwind {
; CHECK32-LABEL: plus_one:
-; CHECK32: # BB#0: # %entry
+; CHECK32: # %bb.0: # %entry
; CHECK32-NEXT: movb M, %al
; CHECK32-NEXT: incl L
; CHECK32-NEXT: jne .LBB0_2
-; CHECK32-NEXT: # BB#1: # %entry
+; CHECK32-NEXT: # %bb.1: # %entry
; CHECK32-NEXT: andb $8, %al
; CHECK32-NEXT: je .LBB0_2
-; CHECK32-NEXT: # BB#3: # %exit2
+; CHECK32-NEXT: # %bb.3: # %exit2
; CHECK32-NEXT: xorl %eax, %eax
; CHECK32-NEXT: retl
; CHECK32-NEXT: .LBB0_2: # %exit
@@ -31,14 +31,14 @@ define i1 @plus_one() nounwind {
; CHECK32-NEXT: retl
;
; CHECK64-LABEL: plus_one:
-; CHECK64: # BB#0: # %entry
+; CHECK64: # %bb.0: # %entry
; CHECK64-NEXT: movb {{.*}}(%rip), %al
; CHECK64-NEXT: incl {{.*}}(%rip)
; CHECK64-NEXT: jne .LBB0_2
-; CHECK64-NEXT: # BB#1: # %entry
+; CHECK64-NEXT: # %bb.1: # %entry
; CHECK64-NEXT: andb $8, %al
; CHECK64-NEXT: je .LBB0_2
-; CHECK64-NEXT: # BB#3: # %exit2
+; CHECK64-NEXT: # %bb.3: # %exit2
; CHECK64-NEXT: xorl %eax, %eax
; CHECK64-NEXT: retq
; CHECK64-NEXT: .LBB0_2: # %exit
@@ -64,14 +64,14 @@ exit2:
define i1 @plus_forty_two() nounwind {
; CHECK32-LABEL: plus_forty_two:
-; CHECK32: # BB#0: # %entry
+; CHECK32: # %bb.0: # %entry
; CHECK32-NEXT: movb M, %al
; CHECK32-NEXT: addl $42, L
; CHECK32-NEXT: jne .LBB1_2
-; CHECK32-NEXT: # BB#1: # %entry
+; CHECK32-NEXT: # %bb.1: # %entry
; CHECK32-NEXT: andb $8, %al
; CHECK32-NEXT: je .LBB1_2
-; CHECK32-NEXT: # BB#3: # %exit2
+; CHECK32-NEXT: # %bb.3: # %exit2
; CHECK32-NEXT: xorl %eax, %eax
; CHECK32-NEXT: retl
; CHECK32-NEXT: .LBB1_2: # %exit
@@ -79,14 +79,14 @@ define i1 @plus_forty_two() nounwind {
; CHECK32-NEXT: retl
;
; CHECK64-LABEL: plus_forty_two:
-; CHECK64: # BB#0: # %entry
+; CHECK64: # %bb.0: # %entry
; CHECK64-NEXT: movb {{.*}}(%rip), %al
; CHECK64-NEXT: addl $42, {{.*}}(%rip)
; CHECK64-NEXT: jne .LBB1_2
-; CHECK64-NEXT: # BB#1: # %entry
+; CHECK64-NEXT: # %bb.1: # %entry
; CHECK64-NEXT: andb $8, %al
; CHECK64-NEXT: je .LBB1_2
-; CHECK64-NEXT: # BB#3: # %exit2
+; CHECK64-NEXT: # %bb.3: # %exit2
; CHECK64-NEXT: xorl %eax, %eax
; CHECK64-NEXT: retq
; CHECK64-NEXT: .LBB1_2: # %exit
@@ -112,14 +112,14 @@ exit2:
define i1 @minus_one() nounwind {
; CHECK32-LABEL: minus_one:
-; CHECK32: # BB#0: # %entry
+; CHECK32: # %bb.0: # %entry
; CHECK32-NEXT: movb M, %al
; CHECK32-NEXT: decl L
; CHECK32-NEXT: jne .LBB2_2
-; CHECK32-NEXT: # BB#1: # %entry
+; CHECK32-NEXT: # %bb.1: # %entry
; CHECK32-NEXT: andb $8, %al
; CHECK32-NEXT: je .LBB2_2
-; CHECK32-NEXT: # BB#3: # %exit2
+; CHECK32-NEXT: # %bb.3: # %exit2
; CHECK32-NEXT: xorl %eax, %eax
; CHECK32-NEXT: retl
; CHECK32-NEXT: .LBB2_2: # %exit
@@ -127,14 +127,14 @@ define i1 @minus_one() nounwind {
; CHECK32-NEXT: retl
;
; CHECK64-LABEL: minus_one:
-; CHECK64: # BB#0: # %entry
+; CHECK64: # %bb.0: # %entry
; CHECK64-NEXT: movb {{.*}}(%rip), %al
; CHECK64-NEXT: decl {{.*}}(%rip)
; CHECK64-NEXT: jne .LBB2_2
-; CHECK64-NEXT: # BB#1: # %entry
+; CHECK64-NEXT: # %bb.1: # %entry
; CHECK64-NEXT: andb $8, %al
; CHECK64-NEXT: je .LBB2_2
-; CHECK64-NEXT: # BB#3: # %exit2
+; CHECK64-NEXT: # %bb.3: # %exit2
; CHECK64-NEXT: xorl %eax, %eax
; CHECK64-NEXT: retq
; CHECK64-NEXT: .LBB2_2: # %exit
@@ -160,14 +160,14 @@ exit2:
define i1 @minus_forty_two() nounwind {
; CHECK32-LABEL: minus_forty_two:
-; CHECK32: # BB#0: # %entry
+; CHECK32: # %bb.0: # %entry
; CHECK32-NEXT: movb M, %al
; CHECK32-NEXT: addl $-42, L
; CHECK32-NEXT: jne .LBB3_2
-; CHECK32-NEXT: # BB#1: # %entry
+; CHECK32-NEXT: # %bb.1: # %entry
; CHECK32-NEXT: andb $8, %al
; CHECK32-NEXT: je .LBB3_2
-; CHECK32-NEXT: # BB#3: # %exit2
+; CHECK32-NEXT: # %bb.3: # %exit2
; CHECK32-NEXT: xorl %eax, %eax
; CHECK32-NEXT: retl
; CHECK32-NEXT: .LBB3_2: # %exit
@@ -175,14 +175,14 @@ define i1 @minus_forty_two() nounwind {
; CHECK32-NEXT: retl
;
; CHECK64-LABEL: minus_forty_two:
-; CHECK64: # BB#0: # %entry
+; CHECK64: # %bb.0: # %entry
; CHECK64-NEXT: movb {{.*}}(%rip), %al
; CHECK64-NEXT: addl $-42, {{.*}}(%rip)
; CHECK64-NEXT: jne .LBB3_2
-; CHECK64-NEXT: # BB#1: # %entry
+; CHECK64-NEXT: # %bb.1: # %entry
; CHECK64-NEXT: andb $8, %al
; CHECK64-NEXT: je .LBB3_2
-; CHECK64-NEXT: # BB#3: # %exit2
+; CHECK64-NEXT: # %bb.3: # %exit2
; CHECK64-NEXT: xorl %eax, %eax
; CHECK64-NEXT: retq
; CHECK64-NEXT: .LBB3_2: # %exit
@@ -208,7 +208,7 @@ exit2:
define i64 @test_intervening_call(i64* %foo, i64 %bar, i64 %baz) nounwind {
; CHECK32-LABEL: test_intervening_call:
-; CHECK32: # BB#0: # %entry
+; CHECK32: # %bb.0: # %entry
; CHECK32-NEXT: pushl %ebp
; CHECK32-NEXT: movl %esp, %ebp
; CHECK32-NEXT: pushl %ebx
@@ -233,7 +233,7 @@ define i64 @test_intervening_call(i64* %foo, i64 %bar, i64 %baz) nounwind {
; CHECK32-NEXT: addb $127, %al
; CHECK32-NEXT: sahf
; CHECK32-NEXT: jne .LBB4_3
-; CHECK32-NEXT: # BB#1: # %t
+; CHECK32-NEXT: # %bb.1: # %t
; CHECK32-NEXT: movl $42, %eax
; CHECK32-NEXT: jmp .LBB4_2
; CHECK32-NEXT: .LBB4_3: # %f
@@ -246,7 +246,7 @@ define i64 @test_intervening_call(i64* %foo, i64 %bar, i64 %baz) nounwind {
; CHECK32-NEXT: retl
;
; CHECK64-LABEL: test_intervening_call:
-; CHECK64: # BB#0: # %entry
+; CHECK64: # %bb.0: # %entry
; CHECK64-NEXT: pushq %rbp
; CHECK64-NEXT: movq %rsp, %rbp
; CHECK64-NEXT: pushq %rbx
@@ -264,7 +264,7 @@ define i64 @test_intervening_call(i64* %foo, i64 %bar, i64 %baz) nounwind {
; CHECK64-NEXT: addb $127, %al
; CHECK64-NEXT: sahf
; CHECK64-NEXT: jne .LBB4_3
-; CHECK64-NEXT: # BB#1: # %t
+; CHECK64-NEXT: # %bb.1: # %t
; CHECK64-NEXT: movl $42, %eax
; CHECK64-NEXT: jmp .LBB4_2
; CHECK64-NEXT: .LBB4_3: # %f
@@ -291,7 +291,7 @@ f:
define i64 @test_two_live_flags(i64* %foo0, i64 %bar0, i64 %baz0, i64* %foo1, i64 %bar1, i64 %baz1) nounwind {
; CHECK32-LABEL: test_two_live_flags:
-; CHECK32: # BB#0: # %entry
+; CHECK32: # %bb.0: # %entry
; CHECK32-NEXT: pushl %ebp
; CHECK32-NEXT: movl %esp, %ebp
; CHECK32-NEXT: pushl %ebx
@@ -320,10 +320,10 @@ define i64 @test_two_live_flags(i64* %foo0, i64 %bar0, i64 %baz0, i64* %foo1, i6
; CHECK32-NEXT: sahf
; CHECK32-NEXT: popl %eax
; CHECK32-NEXT: jne .LBB5_4
-; CHECK32-NEXT: # BB#1: # %entry
+; CHECK32-NEXT: # %bb.1: # %entry
; CHECK32-NEXT: testb %al, %al
; CHECK32-NEXT: je .LBB5_4
-; CHECK32-NEXT: # BB#2: # %t
+; CHECK32-NEXT: # %bb.2: # %t
; CHECK32-NEXT: movl $42, %eax
; CHECK32-NEXT: jmp .LBB5_3
; CHECK32-NEXT: .LBB5_4: # %f
@@ -337,7 +337,7 @@ define i64 @test_two_live_flags(i64* %foo0, i64 %bar0, i64 %baz0, i64* %foo1, i6
; CHECK32-NEXT: retl
;
; CHECK64-LABEL: test_two_live_flags:
-; CHECK64: # BB#0: # %entry
+; CHECK64: # %bb.0: # %entry
; CHECK64-NEXT: pushq %rbp
; CHECK64-NEXT: movq %rsp, %rbp
; CHECK64-NEXT: movq %rsi, %rax
@@ -354,10 +354,10 @@ define i64 @test_two_live_flags(i64* %foo0, i64 %bar0, i64 %baz0, i64* %foo1, i6
; CHECK64-NEXT: sahf
; CHECK64-NEXT: popq %rax
; CHECK64-NEXT: jne .LBB5_3
-; CHECK64-NEXT: # BB#1: # %entry
+; CHECK64-NEXT: # %bb.1: # %entry
; CHECK64-NEXT: testb %al, %al
; CHECK64-NEXT: je .LBB5_3
-; CHECK64-NEXT: # BB#2: # %t
+; CHECK64-NEXT: # %bb.2: # %t
; CHECK64-NEXT: movl $42, %eax
; CHECK64-NEXT: popq %rbp
; CHECK64-NEXT: retq
@@ -382,7 +382,7 @@ f:
define i1 @asm_clobbering_flags(i32* %mem) nounwind {
; CHECK32-LABEL: asm_clobbering_flags:
-; CHECK32: # BB#0: # %entry
+; CHECK32: # %bb.0: # %entry
; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK32-NEXT: movl (%ecx), %edx
; CHECK32-NEXT: testl %edx, %edx
@@ -394,7 +394,7 @@ define i1 @asm_clobbering_flags(i32* %mem) nounwind {
; CHECK32-NEXT: retl
;
; CHECK64-LABEL: asm_clobbering_flags:
-; CHECK64: # BB#0: # %entry
+; CHECK64: # %bb.0: # %entry
; CHECK64-NEXT: movl (%rdi), %ecx
; CHECK64-NEXT: testl %ecx, %ecx
; CHECK64-NEXT: setg %al
diff --git a/test/CodeGen/X86/peephole-recurrence.mir b/test/CodeGen/X86/peephole-recurrence.mir
index 115f38c4ca2..3fc8b2a3137 100644
--- a/test/CodeGen/X86/peephole-recurrence.mir
+++ b/test/CodeGen/X86/peephole-recurrence.mir
@@ -89,44 +89,44 @@ liveins:
- { reg: '%edi', virtual-reg: '%4' }
body: |
bb.0.bb0:
- successors: %bb.1.bb1(0x80000000)
+ successors: %bb.1(0x80000000)
liveins: %edi
%4 = COPY %edi
%5 = MOV32r0 implicit-def dead %eflags
bb.1.bb1:
- successors: %bb.3.bb4(0x30000000), %bb.2.bb3(0x50000000)
+ successors: %bb.3(0x30000000), %bb.2(0x50000000)
- ; CHECK: %0:gr32 = PHI %5, %bb.0.bb0, %3, %bb.5.bb7
- %0 = PHI %5, %bb.0.bb0, %3, %bb.5.bb7
+ ; CHECK: %0:gr32 = PHI %5, %bb.0, %3, %bb.5
+ %0 = PHI %5, %bb.0, %3, %bb.5
%6 = MOV32ri 1
TEST32rr %4, %4, implicit-def %eflags
- JE_1 %bb.3.bb4, implicit %eflags
- JMP_1 %bb.2.bb3
+ JE_1 %bb.3, implicit %eflags
+ JMP_1 %bb.2
bb.2.bb3:
- successors: %bb.3.bb4(0x80000000)
+ successors: %bb.3(0x80000000)
%7 = MOV32ri 2
bb.3.bb4:
- successors: %bb.5.bb7(0x30000000), %bb.4.bb6(0x50000000)
+ successors: %bb.5(0x30000000), %bb.4(0x50000000)
- %1 = PHI %6, %bb.1.bb1, %7, %bb.2.bb3
+ %1 = PHI %6, %bb.1, %7, %bb.2
TEST32rr %1, %1, implicit-def %eflags
- JE_1 %bb.5.bb7, implicit %eflags
- JMP_1 %bb.4.bb6
+ JE_1 %bb.5, implicit %eflags
+ JMP_1 %bb.4
bb.4.bb6:
- successors: %bb.5.bb7(0x80000000)
+ successors: %bb.5(0x80000000)
%9 = MOV32ri 2
bb.5.bb7:
- successors: %bb.1.bb1(0x7c000000), %bb.6.bb8(0x04000000)
+ successors: %bb.1(0x7c000000), %bb.6(0x04000000)
- %2 = PHI %6, %bb.3.bb4, %9, %bb.4.bb6
+ %2 = PHI %6, %bb.3, %9, %bb.4
%10 = ADD32rr %1, %0, implicit-def dead %eflags
; CHECK: %10:gr32 = ADD32rr
; CHECK-SAME: %0,
@@ -136,8 +136,8 @@ body: |
; CHECK-SAME: %10,
; CHECK-SAME: %2,
%11 = SUB32ri8 %3, 10, implicit-def %eflags
- JL_1 %bb.1.bb1, implicit %eflags
- JMP_1 %bb.6.bb8
+ JL_1 %bb.1, implicit %eflags
+ JMP_1 %bb.6
bb.6.bb8:
%12 = MOV32r0 implicit-def dead %eflags
@@ -172,7 +172,7 @@ liveins:
- { reg: '%rsi', virtual-reg: '%5' }
body: |
bb.0.bb0:
- successors: %bb.1.bb1(0x80000000)
+ successors: %bb.1(0x80000000)
liveins: %edi, %rsi
%5 = COPY %rsi
@@ -180,37 +180,37 @@ body: |
%6 = MOV32r0 implicit-def dead %eflags
bb.1.bb1:
- successors: %bb.3.bb4(0x30000000), %bb.2.bb3(0x50000000)
+ successors: %bb.3(0x30000000), %bb.2(0x50000000)
- %0 = PHI %6, %bb.0.bb0, %3, %bb.5.bb7
- ; CHECK: %0:gr32 = PHI %6, %bb.0.bb0, %3, %bb.5.bb7
+ %0 = PHI %6, %bb.0, %3, %bb.5
+ ; CHECK: %0:gr32 = PHI %6, %bb.0, %3, %bb.5
%7 = MOV32ri 1
TEST32rr %4, %4, implicit-def %eflags
- JE_1 %bb.3.bb4, implicit %eflags
- JMP_1 %bb.2.bb3
+ JE_1 %bb.3, implicit %eflags
+ JMP_1 %bb.2
bb.2.bb3:
- successors: %bb.3.bb4(0x80000000)
+ successors: %bb.3(0x80000000)
%8 = MOV32ri 2
bb.3.bb4:
- successors: %bb.5.bb7(0x30000000), %bb.4.bb6(0x50000000)
+ successors: %bb.5(0x30000000), %bb.4(0x50000000)
- %1 = PHI %7, %bb.1.bb1, %8, %bb.2.bb3
+ %1 = PHI %7, %bb.1, %8, %bb.2
TEST32rr %1, %1, implicit-def %eflags
- JE_1 %bb.5.bb7, implicit %eflags
- JMP_1 %bb.4.bb6
+ JE_1 %bb.5, implicit %eflags
+ JMP_1 %bb.4
bb.4.bb6:
- successors: %bb.5.bb7(0x80000000)
+ successors: %bb.5(0x80000000)
%10 = MOV32ri 2
bb.5.bb7:
- successors: %bb.1.bb1(0x7c000000), %bb.6.bb8(0x04000000)
+ successors: %bb.1(0x7c000000), %bb.6(0x04000000)
- %2 = PHI %7, %bb.3.bb4, %10, %bb.4.bb6
+ %2 = PHI %7, %bb.3, %10, %bb.4
%11 = ADD32rr %1, %0, implicit-def dead %eflags
; CHECK: %11:gr32 = ADD32rr
; CHECK-SAME: %1,
@@ -221,8 +221,8 @@ body: |
; CHECK-SAME: %2,
; CHECK-SAME: %11,
%12 = SUB32ri8 %3, 10, implicit-def %eflags
- JL_1 %bb.1.bb1, implicit %eflags
- JMP_1 %bb.6.bb8
+ JL_1 %bb.1, implicit %eflags
+ JMP_1 %bb.6
bb.6.bb8:
%13 = MOV32r0 implicit-def dead %eflags
diff --git a/test/CodeGen/X86/phaddsub.ll b/test/CodeGen/X86/phaddsub.ll
index 08015258867..64f89354136 100644
--- a/test/CodeGen/X86/phaddsub.ll
+++ b/test/CodeGen/X86/phaddsub.ll
@@ -4,12 +4,12 @@
define <8 x i16> @phaddw1(<8 x i16> %x, <8 x i16> %y) {
; SSSE3-LABEL: phaddw1:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: phaddw %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; AVX-LABEL: phaddw1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vphaddw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%a = shufflevector <8 x i16> %x, <8 x i16> %y, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
@@ -20,12 +20,12 @@ define <8 x i16> @phaddw1(<8 x i16> %x, <8 x i16> %y) {
define <8 x i16> @phaddw2(<8 x i16> %x, <8 x i16> %y) {
; SSSE3-LABEL: phaddw2:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: phaddw %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; AVX-LABEL: phaddw2:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vphaddw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%a = shufflevector <8 x i16> %x, <8 x i16> %y, <8 x i32> <i32 1, i32 2, i32 5, i32 6, i32 9, i32 10, i32 13, i32 14>
@@ -36,12 +36,12 @@ define <8 x i16> @phaddw2(<8 x i16> %x, <8 x i16> %y) {
define <4 x i32> @phaddd1(<4 x i32> %x, <4 x i32> %y) {
; SSSE3-LABEL: phaddd1:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: phaddd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; AVX-LABEL: phaddd1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vphaddd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%a = shufflevector <4 x i32> %x, <4 x i32> %y, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
@@ -52,12 +52,12 @@ define <4 x i32> @phaddd1(<4 x i32> %x, <4 x i32> %y) {
define <4 x i32> @phaddd2(<4 x i32> %x, <4 x i32> %y) {
; SSSE3-LABEL: phaddd2:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: phaddd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; AVX-LABEL: phaddd2:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vphaddd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%a = shufflevector <4 x i32> %x, <4 x i32> %y, <4 x i32> <i32 1, i32 2, i32 5, i32 6>
@@ -68,12 +68,12 @@ define <4 x i32> @phaddd2(<4 x i32> %x, <4 x i32> %y) {
define <4 x i32> @phaddd3(<4 x i32> %x) {
; SSSE3-LABEL: phaddd3:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: phaddd %xmm0, %xmm0
; SSSE3-NEXT: retq
;
; AVX-LABEL: phaddd3:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vphaddd %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%a = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> <i32 undef, i32 2, i32 4, i32 6>
@@ -84,12 +84,12 @@ define <4 x i32> @phaddd3(<4 x i32> %x) {
define <4 x i32> @phaddd4(<4 x i32> %x) {
; SSSE3-LABEL: phaddd4:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: phaddd %xmm0, %xmm0
; SSSE3-NEXT: retq
;
; AVX-LABEL: phaddd4:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vphaddd %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%a = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 undef, i32 undef>
@@ -100,12 +100,12 @@ define <4 x i32> @phaddd4(<4 x i32> %x) {
define <4 x i32> @phaddd5(<4 x i32> %x) {
; SSSE3-LABEL: phaddd5:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: phaddd %xmm0, %xmm0
; SSSE3-NEXT: retq
;
; AVX-LABEL: phaddd5:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vphaddd %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%a = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> <i32 0, i32 3, i32 undef, i32 undef>
@@ -116,12 +116,12 @@ define <4 x i32> @phaddd5(<4 x i32> %x) {
define <4 x i32> @phaddd6(<4 x i32> %x) {
; SSSE3-LABEL: phaddd6:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: phaddd %xmm0, %xmm0
; SSSE3-NEXT: retq
;
; AVX-LABEL: phaddd6:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vphaddd %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%a = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
@@ -132,12 +132,12 @@ define <4 x i32> @phaddd6(<4 x i32> %x) {
define <4 x i32> @phaddd7(<4 x i32> %x) {
; SSSE3-LABEL: phaddd7:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: phaddd %xmm0, %xmm0
; SSSE3-NEXT: retq
;
; AVX-LABEL: phaddd7:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vphaddd %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%a = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> <i32 undef, i32 3, i32 undef, i32 undef>
@@ -148,12 +148,12 @@ define <4 x i32> @phaddd7(<4 x i32> %x) {
define <8 x i16> @phsubw1(<8 x i16> %x, <8 x i16> %y) {
; SSSE3-LABEL: phsubw1:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: phsubw %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; AVX-LABEL: phsubw1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vphsubw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%a = shufflevector <8 x i16> %x, <8 x i16> %y, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
@@ -164,12 +164,12 @@ define <8 x i16> @phsubw1(<8 x i16> %x, <8 x i16> %y) {
define <4 x i32> @phsubd1(<4 x i32> %x, <4 x i32> %y) {
; SSSE3-LABEL: phsubd1:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: phsubd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; AVX-LABEL: phsubd1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vphsubd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%a = shufflevector <4 x i32> %x, <4 x i32> %y, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
@@ -180,12 +180,12 @@ define <4 x i32> @phsubd1(<4 x i32> %x, <4 x i32> %y) {
define <4 x i32> @phsubd2(<4 x i32> %x) {
; SSSE3-LABEL: phsubd2:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: phsubd %xmm0, %xmm0
; SSSE3-NEXT: retq
;
; AVX-LABEL: phsubd2:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vphsubd %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%a = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> <i32 undef, i32 2, i32 4, i32 6>
@@ -196,12 +196,12 @@ define <4 x i32> @phsubd2(<4 x i32> %x) {
define <4 x i32> @phsubd3(<4 x i32> %x) {
; SSSE3-LABEL: phsubd3:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: phsubd %xmm0, %xmm0
; SSSE3-NEXT: retq
;
; AVX-LABEL: phsubd3:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vphsubd %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%a = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 undef, i32 undef>
@@ -212,12 +212,12 @@ define <4 x i32> @phsubd3(<4 x i32> %x) {
define <4 x i32> @phsubd4(<4 x i32> %x) {
; SSSE3-LABEL: phsubd4:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: phsubd %xmm0, %xmm0
; SSSE3-NEXT: retq
;
; AVX-LABEL: phsubd4:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vphsubd %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%a = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
@@ -228,7 +228,7 @@ define <4 x i32> @phsubd4(<4 x i32> %x) {
define <8 x i16> @phsubw1_reverse(<8 x i16> %x, <8 x i16> %y) {
; SSSE3-LABEL: phsubw1_reverse:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15]
; SSSE3-NEXT: movdqa %xmm1, %xmm4
; SSSE3-NEXT: pshufb %xmm3, %xmm4
@@ -244,7 +244,7 @@ define <8 x i16> @phsubw1_reverse(<8 x i16> %x, <8 x i16> %y) {
; SSSE3-NEXT: retq
;
; AVX-LABEL: phsubw1_reverse:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15]
; AVX-NEXT: vpshufb %xmm2, %xmm1, %xmm3
; AVX-NEXT: vpshufb %xmm2, %xmm0, %xmm2
@@ -263,7 +263,7 @@ define <8 x i16> @phsubw1_reverse(<8 x i16> %x, <8 x i16> %y) {
define <4 x i32> @phsubd1_reverse(<4 x i32> %x, <4 x i32> %y) {
; SSSE3-LABEL: phsubd1_reverse:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movaps %xmm0, %xmm2
; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm1[1,3]
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
@@ -272,7 +272,7 @@ define <4 x i32> @phsubd1_reverse(<4 x i32> %x, <4 x i32> %y) {
; SSSE3-NEXT: retq
;
; AVX-LABEL: phsubd1_reverse:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm0[1,3],xmm1[1,3]
; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; AVX-NEXT: vpsubd %xmm0, %xmm2, %xmm0
diff --git a/test/CodeGen/X86/pku.ll b/test/CodeGen/X86/pku.ll
index 79b8c474ade..10875a589a6 100644
--- a/test/CodeGen/X86/pku.ll
+++ b/test/CodeGen/X86/pku.ll
@@ -4,7 +4,7 @@ declare void @llvm.x86.wrpkru(i32)
define void @test_x86_wrpkru(i32 %src) {
; CHECK-LABEL: test_x86_wrpkru:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: xorl %ecx, %ecx
; CHECK-NEXT: xorl %edx, %edx
; CHECK-NEXT: movl %edi, %eax
@@ -16,7 +16,7 @@ define void @test_x86_wrpkru(i32 %src) {
define i32 @test_x86_rdpkru() {
; CHECK-LABEL: test_x86_rdpkru:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: xorl %ecx, %ecx
; CHECK-NEXT: rdpkru
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/pmovsx-inreg.ll b/test/CodeGen/X86/pmovsx-inreg.ll
index e7abbadd084..f20065bd506 100644
--- a/test/CodeGen/X86/pmovsx-inreg.ll
+++ b/test/CodeGen/X86/pmovsx-inreg.ll
@@ -9,7 +9,7 @@
define void @test1(<2 x i8>* %in, <2 x i64>* %out) nounwind {
; SSE41-LABEL: test1:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovsxbq (%rdi), %xmm0
; SSE41-NEXT: xorps %xmm1, %xmm1
; SSE41-NEXT: movups %xmm1, (%rax)
@@ -17,7 +17,7 @@ define void @test1(<2 x i8>* %in, <2 x i64>* %out) nounwind {
; SSE41-NEXT: retq
;
; AVX-LABEL: test1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovsxbq (%rdi), %xmm0
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vmovups %xmm1, (%rax)
@@ -25,7 +25,7 @@ define void @test1(<2 x i8>* %in, <2 x i64>* %out) nounwind {
; AVX-NEXT: retq
;
; X32-AVX2-LABEL: test1:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-AVX2-NEXT: vpmovsxbq (%ecx), %xmm0
@@ -42,7 +42,7 @@ define void @test1(<2 x i8>* %in, <2 x i64>* %out) nounwind {
define void @test2(<4 x i8>* %in, <4 x i64>* %out) nounwind {
; SSE41-LABEL: test2:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovsxbq (%rdi), %xmm0
; SSE41-NEXT: pmovsxbq 2(%rdi), %xmm1
; SSE41-NEXT: xorps %xmm2, %xmm2
@@ -52,7 +52,7 @@ define void @test2(<4 x i8>* %in, <4 x i64>* %out) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: test2:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovsxbd (%rdi), %xmm0
; AVX1-NEXT: vpmovsxdq %xmm0, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -65,7 +65,7 @@ define void @test2(<4 x i8>* %in, <4 x i64>* %out) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test2:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovsxbq (%rdi), %ymm0
; AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vmovups %ymm1, (%rax)
@@ -74,7 +74,7 @@ define void @test2(<4 x i8>* %in, <4 x i64>* %out) nounwind {
; AVX2-NEXT: retq
;
; X32-AVX2-LABEL: test2:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-AVX2-NEXT: vpmovsxbq (%ecx), %ymm0
@@ -92,7 +92,7 @@ define void @test2(<4 x i8>* %in, <4 x i64>* %out) nounwind {
define void @test3(<4 x i8>* %in, <4 x i32>* %out) nounwind {
; SSE41-LABEL: test3:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovsxbd (%rdi), %xmm0
; SSE41-NEXT: xorps %xmm1, %xmm1
; SSE41-NEXT: movups %xmm1, (%rax)
@@ -100,7 +100,7 @@ define void @test3(<4 x i8>* %in, <4 x i32>* %out) nounwind {
; SSE41-NEXT: retq
;
; AVX-LABEL: test3:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovsxbd (%rdi), %xmm0
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vmovups %xmm1, (%rax)
@@ -108,7 +108,7 @@ define void @test3(<4 x i8>* %in, <4 x i32>* %out) nounwind {
; AVX-NEXT: retq
;
; X32-AVX2-LABEL: test3:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-AVX2-NEXT: vpmovsxbd (%ecx), %xmm0
@@ -125,7 +125,7 @@ define void @test3(<4 x i8>* %in, <4 x i32>* %out) nounwind {
define void @test4(<8 x i8>* %in, <8 x i32>* %out) nounwind {
; SSE41-LABEL: test4:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovsxbd (%rdi), %xmm0
; SSE41-NEXT: pmovsxbd 4(%rdi), %xmm1
; SSE41-NEXT: xorps %xmm2, %xmm2
@@ -135,7 +135,7 @@ define void @test4(<8 x i8>* %in, <8 x i32>* %out) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: test4:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovsxbw (%rdi), %xmm0
; AVX1-NEXT: vpmovsxwd %xmm0, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -148,7 +148,7 @@ define void @test4(<8 x i8>* %in, <8 x i32>* %out) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test4:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovsxbd (%rdi), %ymm0
; AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vmovups %ymm1, (%rax)
@@ -157,7 +157,7 @@ define void @test4(<8 x i8>* %in, <8 x i32>* %out) nounwind {
; AVX2-NEXT: retq
;
; X32-AVX2-LABEL: test4:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-AVX2-NEXT: vpmovsxbd (%ecx), %ymm0
@@ -175,7 +175,7 @@ define void @test4(<8 x i8>* %in, <8 x i32>* %out) nounwind {
define void @test5(<8 x i8>* %in, <8 x i16>* %out) nounwind {
; SSE41-LABEL: test5:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovsxbw (%rdi), %xmm0
; SSE41-NEXT: xorps %xmm1, %xmm1
; SSE41-NEXT: movups %xmm1, (%rax)
@@ -183,7 +183,7 @@ define void @test5(<8 x i8>* %in, <8 x i16>* %out) nounwind {
; SSE41-NEXT: retq
;
; AVX-LABEL: test5:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovsxbw (%rdi), %xmm0
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vmovups %xmm1, (%rax)
@@ -191,7 +191,7 @@ define void @test5(<8 x i8>* %in, <8 x i16>* %out) nounwind {
; AVX-NEXT: retq
;
; X32-AVX2-LABEL: test5:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-AVX2-NEXT: vpmovsxbw (%ecx), %xmm0
@@ -208,7 +208,7 @@ define void @test5(<8 x i8>* %in, <8 x i16>* %out) nounwind {
define void @test6(<16 x i8>* %in, <16 x i16>* %out) nounwind {
; SSE41-LABEL: test6:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovsxbw (%rdi), %xmm0
; SSE41-NEXT: pmovsxbw 8(%rdi), %xmm1
; SSE41-NEXT: xorps %xmm2, %xmm2
@@ -218,7 +218,7 @@ define void @test6(<16 x i8>* %in, <16 x i16>* %out) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: test6:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovsxbw (%rdi), %xmm0
; AVX1-NEXT: vpmovsxbw 8(%rdi), %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
@@ -229,7 +229,7 @@ define void @test6(<16 x i8>* %in, <16 x i16>* %out) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test6:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovsxbw (%rdi), %ymm0
; AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vmovups %ymm1, (%rax)
@@ -238,7 +238,7 @@ define void @test6(<16 x i8>* %in, <16 x i16>* %out) nounwind {
; AVX2-NEXT: retq
;
; X32-AVX2-LABEL: test6:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-AVX2-NEXT: vpmovsxbw (%ecx), %ymm0
@@ -256,7 +256,7 @@ define void @test6(<16 x i8>* %in, <16 x i16>* %out) nounwind {
define void @test7(<2 x i16>* %in, <2 x i64>* %out) nounwind {
; SSE41-LABEL: test7:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovsxwq (%rdi), %xmm0
; SSE41-NEXT: xorps %xmm1, %xmm1
; SSE41-NEXT: movups %xmm1, (%rax)
@@ -264,7 +264,7 @@ define void @test7(<2 x i16>* %in, <2 x i64>* %out) nounwind {
; SSE41-NEXT: retq
;
; AVX-LABEL: test7:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovsxwq (%rdi), %xmm0
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vmovups %xmm1, (%rax)
@@ -272,7 +272,7 @@ define void @test7(<2 x i16>* %in, <2 x i64>* %out) nounwind {
; AVX-NEXT: retq
;
; X32-AVX2-LABEL: test7:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-AVX2-NEXT: vpmovsxwq (%ecx), %xmm0
@@ -289,7 +289,7 @@ define void @test7(<2 x i16>* %in, <2 x i64>* %out) nounwind {
define void @test8(<4 x i16>* %in, <4 x i64>* %out) nounwind {
; SSE41-LABEL: test8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovsxwq (%rdi), %xmm0
; SSE41-NEXT: pmovsxwq 4(%rdi), %xmm1
; SSE41-NEXT: xorps %xmm2, %xmm2
@@ -299,7 +299,7 @@ define void @test8(<4 x i16>* %in, <4 x i64>* %out) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: test8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovsxwd (%rdi), %xmm0
; AVX1-NEXT: vpmovsxdq %xmm0, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -312,7 +312,7 @@ define void @test8(<4 x i16>* %in, <4 x i64>* %out) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovsxwq (%rdi), %ymm0
; AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vmovups %ymm1, (%rax)
@@ -321,7 +321,7 @@ define void @test8(<4 x i16>* %in, <4 x i64>* %out) nounwind {
; AVX2-NEXT: retq
;
; X32-AVX2-LABEL: test8:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-AVX2-NEXT: vpmovsxwq (%ecx), %ymm0
@@ -339,7 +339,7 @@ define void @test8(<4 x i16>* %in, <4 x i64>* %out) nounwind {
define void @test9(<4 x i16>* %in, <4 x i32>* %out) nounwind {
; SSE41-LABEL: test9:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovsxwd (%rdi), %xmm0
; SSE41-NEXT: xorps %xmm1, %xmm1
; SSE41-NEXT: movups %xmm1, (%rax)
@@ -347,7 +347,7 @@ define void @test9(<4 x i16>* %in, <4 x i32>* %out) nounwind {
; SSE41-NEXT: retq
;
; AVX-LABEL: test9:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovsxwd (%rdi), %xmm0
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vmovups %xmm1, (%rax)
@@ -355,7 +355,7 @@ define void @test9(<4 x i16>* %in, <4 x i32>* %out) nounwind {
; AVX-NEXT: retq
;
; X32-AVX2-LABEL: test9:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-AVX2-NEXT: vpmovsxwd (%ecx), %xmm0
@@ -372,7 +372,7 @@ define void @test9(<4 x i16>* %in, <4 x i32>* %out) nounwind {
define void @test10(<8 x i16>* %in, <8 x i32>* %out) nounwind {
; SSE41-LABEL: test10:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovsxwd (%rdi), %xmm0
; SSE41-NEXT: pmovsxwd 8(%rdi), %xmm1
; SSE41-NEXT: xorps %xmm2, %xmm2
@@ -382,7 +382,7 @@ define void @test10(<8 x i16>* %in, <8 x i32>* %out) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: test10:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovsxwd (%rdi), %xmm0
; AVX1-NEXT: vpmovsxwd 8(%rdi), %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
@@ -393,7 +393,7 @@ define void @test10(<8 x i16>* %in, <8 x i32>* %out) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test10:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovsxwd (%rdi), %ymm0
; AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vmovups %ymm1, (%rax)
@@ -402,7 +402,7 @@ define void @test10(<8 x i16>* %in, <8 x i32>* %out) nounwind {
; AVX2-NEXT: retq
;
; X32-AVX2-LABEL: test10:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-AVX2-NEXT: vpmovsxwd (%ecx), %ymm0
@@ -420,7 +420,7 @@ define void @test10(<8 x i16>* %in, <8 x i32>* %out) nounwind {
define void @test11(<2 x i32>* %in, <2 x i64>* %out) nounwind {
; SSE41-LABEL: test11:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovsxdq (%rdi), %xmm0
; SSE41-NEXT: xorps %xmm1, %xmm1
; SSE41-NEXT: movups %xmm1, (%rax)
@@ -428,7 +428,7 @@ define void @test11(<2 x i32>* %in, <2 x i64>* %out) nounwind {
; SSE41-NEXT: retq
;
; AVX-LABEL: test11:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovsxdq (%rdi), %xmm0
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vmovups %xmm1, (%rax)
@@ -436,7 +436,7 @@ define void @test11(<2 x i32>* %in, <2 x i64>* %out) nounwind {
; AVX-NEXT: retq
;
; X32-AVX2-LABEL: test11:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-AVX2-NEXT: vpmovsxdq (%ecx), %xmm0
@@ -453,7 +453,7 @@ define void @test11(<2 x i32>* %in, <2 x i64>* %out) nounwind {
define void @test12(<4 x i32>* %in, <4 x i64>* %out) nounwind {
; SSE41-LABEL: test12:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovsxdq (%rdi), %xmm0
; SSE41-NEXT: pmovsxdq 8(%rdi), %xmm1
; SSE41-NEXT: xorps %xmm2, %xmm2
@@ -463,7 +463,7 @@ define void @test12(<4 x i32>* %in, <4 x i64>* %out) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: test12:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovsxdq (%rdi), %xmm0
; AVX1-NEXT: vpmovsxdq 8(%rdi), %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
@@ -474,7 +474,7 @@ define void @test12(<4 x i32>* %in, <4 x i64>* %out) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test12:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovsxdq (%rdi), %ymm0
; AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vmovups %ymm1, (%rax)
@@ -483,7 +483,7 @@ define void @test12(<4 x i32>* %in, <4 x i64>* %out) nounwind {
; AVX2-NEXT: retq
;
; X32-AVX2-LABEL: test12:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-AVX2-NEXT: vpmovsxdq (%ecx), %ymm0
diff --git a/test/CodeGen/X86/pmul.ll b/test/CodeGen/X86/pmul.ll
index 249e20ce502..76b5b508711 100644
--- a/test/CodeGen/X86/pmul.ll
+++ b/test/CodeGen/X86/pmul.ll
@@ -7,7 +7,7 @@
define <16 x i8> @mul_v16i8c(<16 x i8> %i) nounwind {
; SSE2-LABEL: mul_v16i8c:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm1
@@ -23,7 +23,7 @@ define <16 x i8> @mul_v16i8c(<16 x i8> %i) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: mul_v16i8c:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovsxbw %xmm0, %xmm1
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [117,117,117,117,117,117,117,117]
; SSE41-NEXT: pmullw %xmm2, %xmm1
@@ -38,7 +38,7 @@ define <16 x i8> @mul_v16i8c(<16 x i8> %i) nounwind {
; SSE41-NEXT: retq
;
; AVX2-LABEL: mul_v16i8c:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
; AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
@@ -50,7 +50,7 @@ define <16 x i8> @mul_v16i8c(<16 x i8> %i) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: mul_v16i8c:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmovsxbw %xmm0, %ymm0
; AVX512F-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
@@ -59,7 +59,7 @@ define <16 x i8> @mul_v16i8c(<16 x i8> %i) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: mul_v16i8c:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0
; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
@@ -73,12 +73,12 @@ entry:
define <8 x i16> @mul_v8i16c(<8 x i16> %i) nounwind {
; SSE-LABEL: mul_v8i16c:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pmullw {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: mul_v8i16c:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -88,7 +88,7 @@ entry:
define <4 x i32> @mul_v4i32c(<4 x i32> %i) nounwind {
; SSE2-LABEL: mul_v4i32c:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [117,117,117,117]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm1, %xmm0
@@ -99,12 +99,12 @@ define <4 x i32> @mul_v4i32c(<4 x i32> %i) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: mul_v4i32c:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: mul_v4i32c:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [117,117,117,117]
; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -115,7 +115,7 @@ entry:
define <2 x i64> @mul_v2i64c(<2 x i64> %i) nounwind {
; SSE-LABEL: mul_v2i64c:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [117,117]
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: pmuludq %xmm1, %xmm2
@@ -126,7 +126,7 @@ define <2 x i64> @mul_v2i64c(<2 x i64> %i) nounwind {
; SSE-NEXT: retq
;
; AVX-LABEL: mul_v2i64c:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [117,117]
; AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
; AVX-NEXT: vpsrlq $32, %xmm0, %xmm0
@@ -141,7 +141,7 @@ entry:
define <16 x i8> @mul_v16i8(<16 x i8> %i, <16 x i8> %j) nounwind {
; SSE2-LABEL: mul_v16i8:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm2
@@ -161,7 +161,7 @@ define <16 x i8> @mul_v16i8(<16 x i8> %i, <16 x i8> %j) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: mul_v16i8:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovsxbw %xmm1, %xmm3
; SSE41-NEXT: pmovsxbw %xmm0, %xmm2
; SSE41-NEXT: pmullw %xmm3, %xmm2
@@ -178,7 +178,7 @@ define <16 x i8> @mul_v16i8(<16 x i8> %i, <16 x i8> %j) nounwind {
; SSE41-NEXT: retq
;
; AVX2-LABEL: mul_v16i8:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
@@ -191,7 +191,7 @@ define <16 x i8> @mul_v16i8(<16 x i8> %i, <16 x i8> %j) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: mul_v16i8:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmovsxbw %xmm1, %ymm1
; AVX512F-NEXT: vpmovsxbw %xmm0, %ymm0
; AVX512F-NEXT: vpmullw %ymm1, %ymm0, %ymm0
@@ -201,7 +201,7 @@ define <16 x i8> @mul_v16i8(<16 x i8> %i, <16 x i8> %j) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: mul_v16i8:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmovsxbw %xmm1, %ymm1
; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0
; AVX512BW-NEXT: vpmullw %ymm1, %ymm0, %ymm0
@@ -216,12 +216,12 @@ entry:
define <8 x i16> @mul_v8i16(<8 x i16> %i, <8 x i16> %j) nounwind {
; SSE-LABEL: mul_v8i16:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pmullw %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: mul_v8i16:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -231,7 +231,7 @@ entry:
define <4 x i32> @mul_v4i32(<4 x i32> %i, <4 x i32> %j) nounwind {
; SSE2-LABEL: mul_v4i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -242,12 +242,12 @@ define <4 x i32> @mul_v4i32(<4 x i32> %i, <4 x i32> %j) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: mul_v4i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmulld %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: mul_v4i32:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -257,7 +257,7 @@ entry:
define <2 x i64> @mul_v2i64(<2 x i64> %i, <2 x i64> %j) nounwind {
; SSE-LABEL: mul_v2i64:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: psrlq $32, %xmm2
; SSE-NEXT: pmuludq %xmm1, %xmm2
@@ -271,7 +271,7 @@ define <2 x i64> @mul_v2i64(<2 x i64> %i, <2 x i64> %j) nounwind {
; SSE-NEXT: retq
;
; AVX-LABEL: mul_v2i64:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpsrlq $32, %xmm0, %xmm2
; AVX-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
; AVX-NEXT: vpsrlq $32, %xmm1, %xmm3
@@ -290,7 +290,7 @@ declare void @foo()
define <4 x i32> @mul_v4i32spill(<4 x i32> %i, <4 x i32> %j) nounwind {
; SSE2-LABEL: mul_v4i32spill:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: subq $40, %rsp
; SSE2-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
@@ -308,7 +308,7 @@ define <4 x i32> @mul_v4i32spill(<4 x i32> %i, <4 x i32> %j) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: mul_v4i32spill:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: subq $40, %rsp
; SSE41-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
; SSE41-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
@@ -319,7 +319,7 @@ define <4 x i32> @mul_v4i32spill(<4 x i32> %i, <4 x i32> %j) nounwind {
; SSE41-NEXT: retq
;
; AVX-LABEL: mul_v4i32spill:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: vmovaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
@@ -337,7 +337,7 @@ entry:
define <2 x i64> @mul_v2i64spill(<2 x i64> %i, <2 x i64> %j) nounwind {
; SSE-LABEL: mul_v2i64spill:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: subq $40, %rsp
; SSE-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
; SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
@@ -358,7 +358,7 @@ define <2 x i64> @mul_v2i64spill(<2 x i64> %i, <2 x i64> %j) nounwind {
; SSE-NEXT: retq
;
; AVX-LABEL: mul_v2i64spill:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: vmovaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
@@ -384,7 +384,7 @@ entry:
define <32 x i8> @mul_v32i8c(<32 x i8> %i) nounwind {
; SSE2-LABEL: mul_v32i8c:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm2
@@ -410,7 +410,7 @@ define <32 x i8> @mul_v32i8c(<32 x i8> %i) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: mul_v32i8c:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovsxbw %xmm0, %xmm2
; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [117,117,117,117,117,117,117,117]
; SSE41-NEXT: pmullw %xmm4, %xmm2
@@ -434,7 +434,7 @@ define <32 x i8> @mul_v32i8c(<32 x i8> %i) nounwind {
; SSE41-NEXT: retq
;
; AVX2-LABEL: mul_v32i8c:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117]
@@ -454,7 +454,7 @@ define <32 x i8> @mul_v32i8c(<32 x i8> %i) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: mul_v32i8c:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmovsxbw %xmm0, %ymm1
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117]
; AVX512F-NEXT: vpmullw %ymm2, %ymm1, %ymm1
@@ -469,7 +469,7 @@ define <32 x i8> @mul_v32i8c(<32 x i8> %i) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: mul_v32i8c:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0
; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %zmm0, %zmm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
@@ -481,14 +481,14 @@ entry:
define <16 x i16> @mul_v16i16c(<16 x i16> %i) nounwind {
; SSE-LABEL: mul_v16i16c:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [117,117,117,117,117,117,117,117]
; SSE-NEXT: pmullw %xmm2, %xmm0
; SSE-NEXT: pmullw %xmm2, %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: mul_v16i16c:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
; AVX-NEXT: retq
entry:
@@ -498,7 +498,7 @@ entry:
define <8 x i32> @mul_v8i32c(<8 x i32> %i) nounwind {
; SSE2-LABEL: mul_v8i32c:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [117,117,117,117]
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm2, %xmm0
@@ -515,14 +515,14 @@ define <8 x i32> @mul_v8i32c(<8 x i32> %i) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: mul_v8i32c:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [117,117,117,117]
; SSE41-NEXT: pmulld %xmm2, %xmm0
; SSE41-NEXT: pmulld %xmm2, %xmm1
; SSE41-NEXT: retq
;
; AVX-LABEL: mul_v8i32c:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpbroadcastd {{.*#+}} ymm1 = [117,117,117,117,117,117,117,117]
; AVX-NEXT: vpmulld %ymm1, %ymm0, %ymm0
; AVX-NEXT: retq
@@ -533,7 +533,7 @@ entry:
define <4 x i64> @mul_v4i64c(<4 x i64> %i) nounwind {
; SSE-LABEL: mul_v4i64c:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [117,117]
; SSE-NEXT: movdqa %xmm0, %xmm3
; SSE-NEXT: pmuludq %xmm2, %xmm3
@@ -550,7 +550,7 @@ define <4 x i64> @mul_v4i64c(<4 x i64> %i) nounwind {
; SSE-NEXT: retq
;
; AVX-LABEL: mul_v4i64c:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpbroadcastq {{.*#+}} ymm1 = [117,117,117,117]
; AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm2
; AVX-NEXT: vpsrlq $32, %ymm0, %ymm0
@@ -565,7 +565,7 @@ entry:
define <32 x i8> @mul_v32i8(<32 x i8> %i, <32 x i8> %j) nounwind {
; SSE2-LABEL: mul_v32i8:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm2, %xmm4
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm4
@@ -600,7 +600,7 @@ define <32 x i8> @mul_v32i8(<32 x i8> %i, <32 x i8> %j) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: mul_v32i8:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovsxbw %xmm2, %xmm5
; SSE41-NEXT: pmovsxbw %xmm0, %xmm4
; SSE41-NEXT: pmullw %xmm5, %xmm4
@@ -629,7 +629,7 @@ define <32 x i8> @mul_v32i8(<32 x i8> %i, <32 x i8> %j) nounwind {
; SSE41-NEXT: retq
;
; AVX2-LABEL: mul_v32i8:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX2-NEXT: vpmovsxbw %xmm2, %ymm2
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm3
@@ -651,7 +651,7 @@ define <32 x i8> @mul_v32i8(<32 x i8> %i, <32 x i8> %j) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: mul_v32i8:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmovsxbw %xmm1, %ymm2
; AVX512F-NEXT: vpmovsxbw %xmm0, %ymm3
; AVX512F-NEXT: vpmullw %ymm2, %ymm3, %ymm2
@@ -668,7 +668,7 @@ define <32 x i8> @mul_v32i8(<32 x i8> %i, <32 x i8> %j) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: mul_v32i8:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmovsxbw %ymm1, %zmm1
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0
; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
@@ -681,13 +681,13 @@ entry:
define <16 x i16> @mul_v16i16(<16 x i16> %i, <16 x i16> %j) nounwind {
; SSE-LABEL: mul_v16i16:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pmullw %xmm2, %xmm0
; SSE-NEXT: pmullw %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: mul_v16i16:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmullw %ymm1, %ymm0, %ymm0
; AVX-NEXT: retq
entry:
@@ -697,7 +697,7 @@ entry:
define <8 x i32> @mul_v8i32(<8 x i32> %i, <8 x i32> %j) nounwind {
; SSE2-LABEL: mul_v8i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm2, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -715,13 +715,13 @@ define <8 x i32> @mul_v8i32(<8 x i32> %i, <8 x i32> %j) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: mul_v8i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmulld %xmm2, %xmm0
; SSE41-NEXT: pmulld %xmm3, %xmm1
; SSE41-NEXT: retq
;
; AVX-LABEL: mul_v8i32:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmulld %ymm1, %ymm0, %ymm0
; AVX-NEXT: retq
entry:
@@ -731,7 +731,7 @@ entry:
define <4 x i64> @mul_v4i64(<4 x i64> %i, <4 x i64> %j) nounwind {
; SSE-LABEL: mul_v4i64:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movdqa %xmm0, %xmm4
; SSE-NEXT: psrlq $32, %xmm4
; SSE-NEXT: pmuludq %xmm2, %xmm4
@@ -755,7 +755,7 @@ define <4 x i64> @mul_v4i64(<4 x i64> %i, <4 x i64> %j) nounwind {
; SSE-NEXT: retq
;
; AVX-LABEL: mul_v4i64:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpsrlq $32, %ymm0, %ymm2
; AVX-NEXT: vpmuludq %ymm1, %ymm2, %ymm2
; AVX-NEXT: vpsrlq $32, %ymm1, %ymm3
@@ -772,7 +772,7 @@ entry:
define <64 x i8> @mul_v64i8c(<64 x i8> %i) nounwind {
; SSE2-LABEL: mul_v64i8c:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm6
; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm6
@@ -818,7 +818,7 @@ define <64 x i8> @mul_v64i8c(<64 x i8> %i) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: mul_v64i8c:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movdqa %xmm1, %xmm4
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: pmovsxbw %xmm1, %xmm0
@@ -860,7 +860,7 @@ define <64 x i8> @mul_v64i8c(<64 x i8> %i) nounwind {
; SSE41-NEXT: retq
;
; AVX2-LABEL: mul_v64i8c:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX2-NEXT: vpmovsxbw %xmm2, %ymm2
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117]
@@ -894,7 +894,7 @@ define <64 x i8> @mul_v64i8c(<64 x i8> %i) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: mul_v64i8c:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmovsxbw %xmm0, %ymm2
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117]
; AVX512F-NEXT: vpmullw %ymm3, %ymm2, %ymm2
@@ -919,7 +919,7 @@ define <64 x i8> @mul_v64i8c(<64 x i8> %i) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: mul_v64i8c:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm1
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117]
; AVX512BW-NEXT: vpmullw %zmm2, %zmm1, %zmm1
@@ -937,7 +937,7 @@ entry:
define <64 x i8> @mul_v64i8(<64 x i8> %i, <64 x i8> %j) nounwind {
; SSE2-LABEL: mul_v64i8:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm4, %xmm8
; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm8
@@ -1002,7 +1002,7 @@ define <64 x i8> @mul_v64i8(<64 x i8> %i, <64 x i8> %j) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: mul_v64i8:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movdqa %xmm1, %xmm8
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: pmovsxbw %xmm4, %xmm9
@@ -1055,7 +1055,7 @@ define <64 x i8> @mul_v64i8(<64 x i8> %i, <64 x i8> %j) nounwind {
; SSE41-NEXT: retq
;
; AVX2-LABEL: mul_v64i8:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm4
; AVX2-NEXT: vpmovsxbw %xmm4, %ymm4
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm5
@@ -1094,7 +1094,7 @@ define <64 x i8> @mul_v64i8(<64 x i8> %i, <64 x i8> %j) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: mul_v64i8:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmovsxbw %xmm2, %ymm4
; AVX512F-NEXT: vpmovsxbw %xmm0, %ymm5
; AVX512F-NEXT: vpmullw %ymm4, %ymm5, %ymm4
@@ -1124,7 +1124,7 @@ define <64 x i8> @mul_v64i8(<64 x i8> %i, <64 x i8> %j) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: mul_v64i8:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmovsxbw %ymm1, %zmm2
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm3
; AVX512BW-NEXT: vpmullw %zmm2, %zmm3, %zmm2
@@ -1145,7 +1145,7 @@ entry:
; PR30845
define <4 x i32> @mul_v4i64_zero_upper(<4 x i32> %val1, <4 x i32> %val2) {
; SSE2-LABEL: mul_v4i64_zero_upper:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pxor %xmm3, %xmm3
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
@@ -1160,7 +1160,7 @@ define <4 x i32> @mul_v4i64_zero_upper(<4 x i32> %val1, <4 x i32> %val2) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: mul_v4i64_zero_upper:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero
@@ -1173,7 +1173,7 @@ define <4 x i32> @mul_v4i64_zero_upper(<4 x i32> %val1, <4 x i32> %val2) {
; SSE41-NEXT: retq
;
; AVX-LABEL: mul_v4i64_zero_upper:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
; AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
@@ -1192,7 +1192,7 @@ entry:
define <4 x i32> @mul_v4i64_zero_upper_left(<4 x i32> %val1, <4 x i64> %val2) {
; SSE2-LABEL: mul_v4i64_zero_upper_left:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pxor %xmm3, %xmm3
; SSE2-NEXT: movdqa %xmm0, %xmm4
; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
@@ -1213,7 +1213,7 @@ define <4 x i32> @mul_v4i64_zero_upper_left(<4 x i32> %val1, <4 x i64> %val2) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: mul_v4i64_zero_upper_left:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero
@@ -1233,7 +1233,7 @@ define <4 x i32> @mul_v4i64_zero_upper_left(<4 x i32> %val1, <4 x i64> %val2) {
; SSE41-NEXT: retq
;
; AVX-LABEL: mul_v4i64_zero_upper_left:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm2
; AVX-NEXT: vpsrlq $32, %ymm1, %ymm1
@@ -1254,7 +1254,7 @@ entry:
define <4 x i32> @mul_v4i64_zero_lower(<4 x i32> %val1, <4 x i64> %val2) {
; SSE2-LABEL: mul_v4i64_zero_lower:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pxor %xmm4, %xmm4
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
@@ -1270,7 +1270,7 @@ define <4 x i32> @mul_v4i64_zero_lower(<4 x i32> %val1, <4 x i64> %val2) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: mul_v4i64_zero_lower:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
@@ -1284,7 +1284,7 @@ define <4 x i32> @mul_v4i64_zero_lower(<4 x i32> %val1, <4 x i64> %val2) {
; SSE41-NEXT: retq
;
; AVX-LABEL: mul_v4i64_zero_lower:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX-NEXT: vpsrlq $32, %ymm1, %ymm1
; AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
@@ -1304,7 +1304,7 @@ entry:
define <8 x i32> @mul_v8i64_zero_upper(<8 x i32> %val1, <8 x i32> %val2) {
; SSE2-LABEL: mul_v8i64_zero_upper:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pxor %xmm6, %xmm6
; SSE2-NEXT: movdqa %xmm0, %xmm4
; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1]
@@ -1329,7 +1329,7 @@ define <8 x i32> @mul_v8i64_zero_upper(<8 x i32> %val1, <8 x i32> %val2) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: mul_v8i64_zero_upper:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,0,1]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero
@@ -1351,7 +1351,7 @@ define <8 x i32> @mul_v8i64_zero_upper(<8 x i32> %val1, <8 x i32> %val2) {
; SSE41-NEXT: retq
;
; AVX2-LABEL: mul_v8i64_zero_upper:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
@@ -1365,7 +1365,7 @@ define <8 x i32> @mul_v8i64_zero_upper(<8 x i32> %val1, <8 x i32> %val2) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: mul_v8i64_zero_upper:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero
; AVX512-NEXT: vpmuludq %zmm1, %zmm0, %zmm0
@@ -1384,7 +1384,7 @@ entry:
define <8 x i64> @mul_v8i64_sext(<8 x i16> %val1, <8 x i32> %val2) {
; SSE2-LABEL: mul_v8i64_sext:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm1[0],xmm8[1],xmm1[1],xmm8[2],xmm1[2],xmm8[3],xmm1[3]
@@ -1465,7 +1465,7 @@ define <8 x i64> @mul_v8i64_sext(<8 x i16> %val1, <8 x i32> %val2) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: mul_v8i64_sext:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,1,2,3]
; SSE41-NEXT: pmovsxwq %xmm3, %xmm4
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
@@ -1487,7 +1487,7 @@ define <8 x i64> @mul_v8i64_sext(<8 x i16> %val1, <8 x i32> %val2) {
; SSE41-NEXT: retq
;
; AVX2-LABEL: mul_v8i64_sext:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
; AVX2-NEXT: vpmovsxwq %xmm2, %ymm2
; AVX2-NEXT: vpmovsxwq %xmm0, %ymm0
@@ -1500,7 +1500,7 @@ define <8 x i64> @mul_v8i64_sext(<8 x i16> %val1, <8 x i32> %val2) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: mul_v8i64_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovsxwq %xmm0, %zmm0
; AVX512-NEXT: vpmovsxdq %ymm1, %zmm1
; AVX512-NEXT: vpmuldq %zmm1, %zmm0, %zmm0
diff --git a/test/CodeGen/X86/pointer-vector.ll b/test/CodeGen/X86/pointer-vector.ll
index d5297b9c70c..739e66c7bad 100644
--- a/test/CodeGen/X86/pointer-vector.ll
+++ b/test/CodeGen/X86/pointer-vector.ll
@@ -4,7 +4,7 @@
define <8 x i32*> @SHUFF0(<4 x i32*> %ptrv) nounwind {
; CHECK-LABEL: SHUFF0:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,1,2]
; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,1,1]
; CHECK-NEXT: movdqa %xmm2, %xmm0
@@ -16,7 +16,7 @@ entry:
define <4 x i32*> @SHUFF1(<4 x i32*> %ptrv) nounwind {
; CHECK-LABEL: SHUFF1:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,3,2]
; CHECK-NEXT: retl
entry:
@@ -26,7 +26,7 @@ entry:
define <4 x i8*> @SHUFF3(<4 x i8*> %ptrv) nounwind {
; CHECK-LABEL: SHUFF3:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,1,2]
; CHECK-NEXT: retl
entry:
@@ -36,7 +36,7 @@ entry:
define <4 x i8*> @LOAD0(<4 x i8*>* %p) nounwind {
; CHECK-LABEL: LOAD0:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movaps (%eax), %xmm0
; CHECK-NEXT: retl
@@ -47,7 +47,7 @@ entry:
define <4 x i8*> @LOAD1(<4 x i8*>* %p) nounwind {
; CHECK-LABEL: LOAD1:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movdqa (%eax), %xmm0
; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,0,3]
@@ -62,7 +62,7 @@ entry:
define <4 x i8*> @LOAD2(<4 x i8*>* %p) nounwind {
; CHECK-LABEL: LOAD2:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subl $28, %esp
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movaps (%eax), %xmm0
@@ -79,7 +79,7 @@ entry:
define <4 x i32> @INT2PTR0(<4 x i8*>* %p) nounwind {
; CHECK-LABEL: INT2PTR0:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movaps (%eax), %xmm0
; CHECK-NEXT: retl
@@ -91,7 +91,7 @@ entry:
define <4 x i32*> @INT2PTR1(<4 x i8>* %p) nounwind {
; CHECK-LABEL: INT2PTR1:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; CHECK-NEXT: retl
@@ -103,7 +103,7 @@ entry:
define <4 x i32*> @BITCAST0(<4 x i8*>* %p) nounwind {
; CHECK-LABEL: BITCAST0:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movaps (%eax), %xmm0
; CHECK-NEXT: retl
@@ -115,7 +115,7 @@ entry:
define <2 x i32*> @BITCAST1(<2 x i8*>* %p) nounwind {
; CHECK-LABEL: BITCAST1:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
; CHECK-NEXT: retl
@@ -127,7 +127,7 @@ entry:
define <4 x i32> @ICMP0(<4 x i8*>* %p0, <4 x i8*>* %p1) nounwind {
; CHECK-LABEL: ICMP0:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movdqa (%ecx), %xmm0
@@ -146,7 +146,7 @@ entry:
define <4 x i32> @ICMP1(<4 x i8*>* %p0, <4 x i8*>* %p1) nounwind {
; CHECK-LABEL: ICMP1:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movdqa (%ecx), %xmm0
diff --git a/test/CodeGen/X86/popcnt-schedule.ll b/test/CodeGen/X86/popcnt-schedule.ll
index 36b8e15eab6..b8a75dc6b2b 100644
--- a/test/CodeGen/X86/popcnt-schedule.ll
+++ b/test/CodeGen/X86/popcnt-schedule.ll
@@ -13,7 +13,7 @@
define i16 @test_ctpop_i16(i16 zeroext %a0, i16 *%a1) {
; GENERIC-LABEL: test_ctpop_i16:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: popcntw (%rsi), %cx # sched: [9:1.00]
; GENERIC-NEXT: popcntw %di, %ax # sched: [3:1.00]
; GENERIC-NEXT: orl %ecx, %eax # sched: [1:0.33]
@@ -21,7 +21,7 @@ define i16 @test_ctpop_i16(i16 zeroext %a0, i16 *%a1) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_ctpop_i16:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: popcntw (%rsi), %cx # sched: [6:1.00]
; SLM-NEXT: popcntw %di, %ax # sched: [3:1.00]
; SLM-NEXT: orl %ecx, %eax # sched: [1:0.50]
@@ -29,7 +29,7 @@ define i16 @test_ctpop_i16(i16 zeroext %a0, i16 *%a1) {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_ctpop_i16:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: popcntw (%rsi), %cx # sched: [9:1.00]
; SANDY-NEXT: popcntw %di, %ax # sched: [3:1.00]
; SANDY-NEXT: orl %ecx, %eax # sched: [1:0.33]
@@ -37,7 +37,7 @@ define i16 @test_ctpop_i16(i16 zeroext %a0, i16 *%a1) {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_ctpop_i16:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: popcntw (%rsi), %cx # sched: [3:1.00]
; HASWELL-NEXT: popcntw %di, %ax # sched: [3:1.00]
; HASWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
@@ -45,7 +45,7 @@ define i16 @test_ctpop_i16(i16 zeroext %a0, i16 *%a1) {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_ctpop_i16:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: popcntw (%rsi), %cx # sched: [8:1.00]
; BROADWELL-NEXT: popcntw %di, %ax # sched: [3:1.00]
; BROADWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
@@ -53,7 +53,7 @@ define i16 @test_ctpop_i16(i16 zeroext %a0, i16 *%a1) {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_ctpop_i16:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: popcntw (%rsi), %cx # sched: [8:1.00]
; SKYLAKE-NEXT: popcntw %di, %ax # sched: [3:1.00]
; SKYLAKE-NEXT: orl %ecx, %eax # sched: [1:0.25]
@@ -61,7 +61,7 @@ define i16 @test_ctpop_i16(i16 zeroext %a0, i16 *%a1) {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_ctpop_i16:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: popcntw (%rsi), %cx # sched: [8:1.00]
; BTVER2-NEXT: popcntw %di, %ax # sched: [3:1.00]
; BTVER2-NEXT: orl %ecx, %eax # sched: [1:0.50]
@@ -69,7 +69,7 @@ define i16 @test_ctpop_i16(i16 zeroext %a0, i16 *%a1) {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_ctpop_i16:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: popcntw (%rsi), %cx # sched: [10:1.00]
; ZNVER1-NEXT: popcntw %di, %ax # sched: [3:1.00]
; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25]
@@ -85,56 +85,56 @@ declare i16 @llvm.ctpop.i16(i16)
define i32 @test_ctpop_i32(i32 %a0, i32 *%a1) {
; GENERIC-LABEL: test_ctpop_i32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: popcntl (%rsi), %ecx # sched: [9:1.00]
; GENERIC-NEXT: popcntl %edi, %eax # sched: [3:1.00]
; GENERIC-NEXT: orl %ecx, %eax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_ctpop_i32:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: popcntl (%rsi), %ecx # sched: [6:1.00]
; SLM-NEXT: popcntl %edi, %eax # sched: [3:1.00]
; SLM-NEXT: orl %ecx, %eax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_ctpop_i32:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: popcntl (%rsi), %ecx # sched: [9:1.00]
; SANDY-NEXT: popcntl %edi, %eax # sched: [3:1.00]
; SANDY-NEXT: orl %ecx, %eax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_ctpop_i32:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: popcntl (%rsi), %ecx # sched: [3:1.00]
; HASWELL-NEXT: popcntl %edi, %eax # sched: [3:1.00]
; HASWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_ctpop_i32:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: popcntl (%rsi), %ecx # sched: [8:1.00]
; BROADWELL-NEXT: popcntl %edi, %eax # sched: [3:1.00]
; BROADWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_ctpop_i32:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: popcntl (%rsi), %ecx # sched: [8:1.00]
; SKYLAKE-NEXT: popcntl %edi, %eax # sched: [3:1.00]
; SKYLAKE-NEXT: orl %ecx, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_ctpop_i32:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: popcntl (%rsi), %ecx # sched: [8:1.00]
; BTVER2-NEXT: popcntl %edi, %eax # sched: [3:1.00]
; BTVER2-NEXT: orl %ecx, %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_ctpop_i32:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: popcntl (%rsi), %ecx # sched: [10:1.00]
; ZNVER1-NEXT: popcntl %edi, %eax # sched: [3:1.00]
; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25]
@@ -149,56 +149,56 @@ declare i32 @llvm.ctpop.i32(i32)
define i64 @test_ctpop_i64(i64 %a0, i64 *%a1) {
; GENERIC-LABEL: test_ctpop_i64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: popcntq (%rsi), %rcx # sched: [9:1.00]
; GENERIC-NEXT: popcntq %rdi, %rax # sched: [3:1.00]
; GENERIC-NEXT: orq %rcx, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_ctpop_i64:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: popcntq (%rsi), %rcx # sched: [6:1.00]
; SLM-NEXT: popcntq %rdi, %rax # sched: [3:1.00]
; SLM-NEXT: orq %rcx, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_ctpop_i64:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: popcntq (%rsi), %rcx # sched: [9:1.00]
; SANDY-NEXT: popcntq %rdi, %rax # sched: [3:1.00]
; SANDY-NEXT: orq %rcx, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_ctpop_i64:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: popcntq (%rsi), %rcx # sched: [3:1.00]
; HASWELL-NEXT: popcntq %rdi, %rax # sched: [3:1.00]
; HASWELL-NEXT: orq %rcx, %rax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_ctpop_i64:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: popcntq (%rsi), %rcx # sched: [8:1.00]
; BROADWELL-NEXT: popcntq %rdi, %rax # sched: [3:1.00]
; BROADWELL-NEXT: orq %rcx, %rax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_ctpop_i64:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: popcntq (%rsi), %rcx # sched: [8:1.00]
; SKYLAKE-NEXT: popcntq %rdi, %rax # sched: [3:1.00]
; SKYLAKE-NEXT: orq %rcx, %rax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_ctpop_i64:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: popcntq (%rsi), %rcx # sched: [8:1.00]
; BTVER2-NEXT: popcntq %rdi, %rax # sched: [3:1.00]
; BTVER2-NEXT: orq %rcx, %rax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_ctpop_i64:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: popcntq (%rsi), %rcx # sched: [10:1.00]
; ZNVER1-NEXT: popcntq %rdi, %rax # sched: [3:1.00]
; ZNVER1-NEXT: orq %rcx, %rax # sched: [1:0.25]
diff --git a/test/CodeGen/X86/popcnt.ll b/test/CodeGen/X86/popcnt.ll
index d11f714c3e7..8f078fdcf0d 100644
--- a/test/CodeGen/X86/popcnt.ll
+++ b/test/CodeGen/X86/popcnt.ll
@@ -6,7 +6,7 @@
define i8 @cnt8(i8 %x) nounwind readnone {
; X32-LABEL: cnt8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %cl
; X32-NEXT: movl %ecx, %eax
; X32-NEXT: shrb %al
@@ -24,7 +24,7 @@ define i8 @cnt8(i8 %x) nounwind readnone {
; X32-NEXT: retl
;
; X64-LABEL: cnt8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: shrb %al
; X64-NEXT: andb $85, %al
@@ -41,14 +41,14 @@ define i8 @cnt8(i8 %x) nounwind readnone {
; X64-NEXT: retq
;
; X32-POPCNT-LABEL: cnt8:
-; X32-POPCNT: # BB#0:
+; X32-POPCNT: # %bb.0:
; X32-POPCNT-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-POPCNT-NEXT: popcntl %eax, %eax
; X32-POPCNT-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X32-POPCNT-NEXT: retl
;
; X64-POPCNT-LABEL: cnt8:
-; X64-POPCNT: # BB#0:
+; X64-POPCNT: # %bb.0:
; X64-POPCNT-NEXT: movzbl %dil, %eax
; X64-POPCNT-NEXT: popcntl %eax, %eax
; X64-POPCNT-NEXT: # kill: %al<def> %al<kill> %eax<kill>
@@ -59,7 +59,7 @@ define i8 @cnt8(i8 %x) nounwind readnone {
define i16 @cnt16(i16 %x) nounwind readnone {
; X32-LABEL: cnt16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, %ecx
; X32-NEXT: shrl %ecx
@@ -83,7 +83,7 @@ define i16 @cnt16(i16 %x) nounwind readnone {
; X32-NEXT: retl
;
; X64-LABEL: cnt16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: shrl %eax
; X64-NEXT: andl $21845, %eax # imm = 0x5555
@@ -106,12 +106,12 @@ define i16 @cnt16(i16 %x) nounwind readnone {
; X64-NEXT: retq
;
; X32-POPCNT-LABEL: cnt16:
-; X32-POPCNT: # BB#0:
+; X32-POPCNT: # %bb.0:
; X32-POPCNT-NEXT: popcntw {{[0-9]+}}(%esp), %ax
; X32-POPCNT-NEXT: retl
;
; X64-POPCNT-LABEL: cnt16:
-; X64-POPCNT: # BB#0:
+; X64-POPCNT: # %bb.0:
; X64-POPCNT-NEXT: popcntw %di, %ax
; X64-POPCNT-NEXT: retq
%cnt = tail call i16 @llvm.ctpop.i16(i16 %x)
@@ -120,7 +120,7 @@ define i16 @cnt16(i16 %x) nounwind readnone {
define i32 @cnt32(i32 %x) nounwind readnone {
; X32-LABEL: cnt32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, %ecx
; X32-NEXT: shrl %ecx
@@ -140,7 +140,7 @@ define i32 @cnt32(i32 %x) nounwind readnone {
; X32-NEXT: retl
;
; X64-LABEL: cnt32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: shrl %eax
; X64-NEXT: andl $1431655765, %eax # imm = 0x55555555
@@ -159,12 +159,12 @@ define i32 @cnt32(i32 %x) nounwind readnone {
; X64-NEXT: retq
;
; X32-POPCNT-LABEL: cnt32:
-; X32-POPCNT: # BB#0:
+; X32-POPCNT: # %bb.0:
; X32-POPCNT-NEXT: popcntl {{[0-9]+}}(%esp), %eax
; X32-POPCNT-NEXT: retl
;
; X64-POPCNT-LABEL: cnt32:
-; X64-POPCNT: # BB#0:
+; X64-POPCNT: # %bb.0:
; X64-POPCNT-NEXT: popcntl %edi, %eax
; X64-POPCNT-NEXT: retq
%cnt = tail call i32 @llvm.ctpop.i32(i32 %x)
@@ -173,7 +173,7 @@ define i32 @cnt32(i32 %x) nounwind readnone {
define i64 @cnt64(i64 %x) nounwind readnone {
; X32-LABEL: cnt64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl %ecx, %edx
@@ -211,7 +211,7 @@ define i64 @cnt64(i64 %x) nounwind readnone {
; X32-NEXT: retl
;
; X64-LABEL: cnt64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: shrq %rax
; X64-NEXT: movabsq $6148914691236517205, %rcx # imm = 0x5555555555555555
@@ -234,7 +234,7 @@ define i64 @cnt64(i64 %x) nounwind readnone {
; X64-NEXT: retq
;
; X32-POPCNT-LABEL: cnt64:
-; X32-POPCNT: # BB#0:
+; X32-POPCNT: # %bb.0:
; X32-POPCNT-NEXT: popcntl {{[0-9]+}}(%esp), %ecx
; X32-POPCNT-NEXT: popcntl {{[0-9]+}}(%esp), %eax
; X32-POPCNT-NEXT: addl %ecx, %eax
@@ -242,7 +242,7 @@ define i64 @cnt64(i64 %x) nounwind readnone {
; X32-POPCNT-NEXT: retl
;
; X64-POPCNT-LABEL: cnt64:
-; X64-POPCNT: # BB#0:
+; X64-POPCNT: # %bb.0:
; X64-POPCNT-NEXT: popcntq %rdi, %rax
; X64-POPCNT-NEXT: retq
%cnt = tail call i64 @llvm.ctpop.i64(i64 %x)
diff --git a/test/CodeGen/X86/post-ra-sched.ll b/test/CodeGen/X86/post-ra-sched.ll
index c31072a8a5e..f6de77a6988 100644
--- a/test/CodeGen/X86/post-ra-sched.ll
+++ b/test/CodeGen/X86/post-ra-sched.ll
@@ -16,7 +16,7 @@
define void @addindirect() {
; CHECK-LABEL: addindirect:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl idxb, %ecx
; CHECK-NEXT: movl idxa, %eax
; CHECK-NEXT: movl ptrs(,%ecx,4), %ecx
diff --git a/test/CodeGen/X86/powi.ll b/test/CodeGen/X86/powi.ll
index fb7f570d625..246e853eed6 100644
--- a/test/CodeGen/X86/powi.ll
+++ b/test/CodeGen/X86/powi.ll
@@ -3,7 +3,7 @@
define double @pow_wrapper(double %a) nounwind readonly ssp noredzone {
; CHECK-LABEL: pow_wrapper:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movapd %xmm0, %xmm1
; CHECK-NEXT: mulsd %xmm1, %xmm1
; CHECK-NEXT: mulsd %xmm1, %xmm0
@@ -19,7 +19,7 @@ define double @pow_wrapper(double %a) nounwind readonly ssp noredzone {
define double @pow_wrapper_optsize(double %a) optsize {
; CHECK-LABEL: pow_wrapper_optsize:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl $15, %edi
; CHECK-NEXT: jmp
%ret = tail call double @llvm.powi.f64(double %a, i32 15) nounwind ; <double> [#uses=1]
@@ -28,7 +28,7 @@ define double @pow_wrapper_optsize(double %a) optsize {
define double @pow_wrapper_minsize(double %a) minsize {
; CHECK-LABEL: pow_wrapper_minsize:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pushq $15
; CHECK: popq %rdi
; CHECK: jmp
diff --git a/test/CodeGen/X86/pr11334.ll b/test/CodeGen/X86/pr11334.ll
index 8a154653414..d5c0f10324f 100644
--- a/test/CodeGen/X86/pr11334.ll
+++ b/test/CodeGen/X86/pr11334.ll
@@ -4,12 +4,12 @@
define <2 x double> @v2f2d_ext_vec(<2 x float> %v1) nounwind {
; SSE-LABEL: v2f2d_ext_vec:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: cvtps2pd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: v2f2d_ext_vec:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvtps2pd %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -19,7 +19,7 @@ entry:
define <3 x double> @v3f2d_ext_vec(<3 x float> %v1) nounwind {
; SSE-LABEL: v3f2d_ext_vec:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: cvtps2pd %xmm0, %xmm2
; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSE-NEXT: cvtps2pd %xmm0, %xmm0
@@ -31,7 +31,7 @@ define <3 x double> @v3f2d_ext_vec(<3 x float> %v1) nounwind {
; SSE-NEXT: retq
;
; AVX-LABEL: v3f2d_ext_vec:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvtps2pd %xmm0, %ymm0
; AVX-NEXT: retq
entry:
@@ -41,7 +41,7 @@ entry:
define <4 x double> @v4f2d_ext_vec(<4 x float> %v1) nounwind {
; SSE-LABEL: v4f2d_ext_vec:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: cvtps2pd %xmm0, %xmm2
; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSE-NEXT: cvtps2pd %xmm0, %xmm1
@@ -49,7 +49,7 @@ define <4 x double> @v4f2d_ext_vec(<4 x float> %v1) nounwind {
; SSE-NEXT: retq
;
; AVX-LABEL: v4f2d_ext_vec:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvtps2pd %xmm0, %ymm0
; AVX-NEXT: retq
entry:
@@ -59,7 +59,7 @@ entry:
define <8 x double> @v8f2d_ext_vec(<8 x float> %v1) nounwind {
; SSE-LABEL: v8f2d_ext_vec:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: cvtps2pd %xmm0, %xmm5
; SSE-NEXT: cvtps2pd %xmm1, %xmm2
; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
@@ -71,7 +71,7 @@ define <8 x double> @v8f2d_ext_vec(<8 x float> %v1) nounwind {
; SSE-NEXT: retq
;
; AVX-LABEL: v8f2d_ext_vec:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvtps2pd %xmm0, %ymm2
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX-NEXT: vcvtps2pd %xmm0, %ymm1
@@ -84,14 +84,14 @@ entry:
define void @test_vector_creation() nounwind {
; SSE-LABEL: test_vector_creation:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorpd %xmm0, %xmm0
; SSE-NEXT: movhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; SSE-NEXT: movapd %xmm0, (%rax)
; SSE-NEXT: retq
;
; AVX-LABEL: test_vector_creation:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorpd %xmm0, %xmm0, %xmm0
; AVX-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
diff --git a/test/CodeGen/X86/pr11985.ll b/test/CodeGen/X86/pr11985.ll
index aae00de112d..94b37215f63 100644
--- a/test/CodeGen/X86/pr11985.ll
+++ b/test/CodeGen/X86/pr11985.ll
@@ -8,7 +8,7 @@
define float @foo(i8* nocapture %buf, float %a, float %b) nounwind uwtable {
; PRESCOTT-LABEL: foo:
-; PRESCOTT: # BB#0: # %entry
+; PRESCOTT: # %bb.0: # %entry
; PRESCOTT-NEXT: movq .Ltmp0+14(%rip), %rax
; PRESCOTT-NEXT: movq %rax, 14(%rdi)
; PRESCOTT-NEXT: movq .Ltmp0+8(%rip), %rax
@@ -17,7 +17,7 @@ define float @foo(i8* nocapture %buf, float %a, float %b) nounwind uwtable {
; PRESCOTT-NEXT: movq %rax, (%rdi)
;
; NEHALEM-LABEL: foo:
-; NEHALEM: # BB#0: # %entry
+; NEHALEM: # %bb.0: # %entry
; NEHALEM-NEXT: movq .Ltmp0+14(%rip), %rax
; NEHALEM-NEXT: movq %rax, 14(%rdi)
; NEHALEM-NEXT: movups .Ltmp0(%rip), %xmm2
diff --git a/test/CodeGen/X86/pr12312.ll b/test/CodeGen/X86/pr12312.ll
index 6575d2a73d9..56c17f1217c 100644
--- a/test/CodeGen/X86/pr12312.ll
+++ b/test/CodeGen/X86/pr12312.ll
@@ -4,10 +4,10 @@
define i32 @veccond128(<4 x i32> %input) {
; SSE41-LABEL: veccond128:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: ptest %xmm0, %xmm0
; SSE41-NEXT: je .LBB0_2
-; SSE41-NEXT: # BB#1: # %if-true-block
+; SSE41-NEXT: # %bb.1: # %if-true-block
; SSE41-NEXT: xorl %eax, %eax
; SSE41-NEXT: retq
; SSE41-NEXT: .LBB0_2: # %endif-block
@@ -15,10 +15,10 @@ define i32 @veccond128(<4 x i32> %input) {
; SSE41-NEXT: retq
;
; AVX-LABEL: veccond128:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vptest %xmm0, %xmm0
; AVX-NEXT: je .LBB0_2
-; AVX-NEXT: # BB#1: # %if-true-block
+; AVX-NEXT: # %bb.1: # %if-true-block
; AVX-NEXT: xorl %eax, %eax
; AVX-NEXT: retq
; AVX-NEXT: .LBB0_2: # %endif-block
@@ -36,11 +36,11 @@ endif-block:
define i32 @veccond256(<8 x i32> %input) {
; SSE41-LABEL: veccond256:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: por %xmm1, %xmm0
; SSE41-NEXT: ptest %xmm0, %xmm0
; SSE41-NEXT: je .LBB1_2
-; SSE41-NEXT: # BB#1: # %if-true-block
+; SSE41-NEXT: # %bb.1: # %if-true-block
; SSE41-NEXT: xorl %eax, %eax
; SSE41-NEXT: retq
; SSE41-NEXT: .LBB1_2: # %endif-block
@@ -48,10 +48,10 @@ define i32 @veccond256(<8 x i32> %input) {
; SSE41-NEXT: retq
;
; AVX-LABEL: veccond256:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vptest %ymm0, %ymm0
; AVX-NEXT: je .LBB1_2
-; AVX-NEXT: # BB#1: # %if-true-block
+; AVX-NEXT: # %bb.1: # %if-true-block
; AVX-NEXT: xorl %eax, %eax
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
@@ -71,13 +71,13 @@ endif-block:
define i32 @veccond512(<16 x i32> %input) {
; SSE41-LABEL: veccond512:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: por %xmm3, %xmm1
; SSE41-NEXT: por %xmm2, %xmm1
; SSE41-NEXT: por %xmm0, %xmm1
; SSE41-NEXT: ptest %xmm1, %xmm1
; SSE41-NEXT: je .LBB2_2
-; SSE41-NEXT: # BB#1: # %if-true-block
+; SSE41-NEXT: # %bb.1: # %if-true-block
; SSE41-NEXT: xorl %eax, %eax
; SSE41-NEXT: retq
; SSE41-NEXT: .LBB2_2: # %endif-block
@@ -85,11 +85,11 @@ define i32 @veccond512(<16 x i32> %input) {
; SSE41-NEXT: retq
;
; AVX-LABEL: veccond512:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vorps %ymm1, %ymm0, %ymm0
; AVX-NEXT: vptest %ymm0, %ymm0
; AVX-NEXT: je .LBB2_2
-; AVX-NEXT: # BB#1: # %if-true-block
+; AVX-NEXT: # %bb.1: # %if-true-block
; AVX-NEXT: xorl %eax, %eax
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
@@ -109,14 +109,14 @@ endif-block:
define i32 @vectest128(<4 x i32> %input) {
; SSE41-LABEL: vectest128:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: xorl %eax, %eax
; SSE41-NEXT: ptest %xmm0, %xmm0
; SSE41-NEXT: setne %al
; SSE41-NEXT: retq
;
; AVX-LABEL: vectest128:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: xorl %eax, %eax
; AVX-NEXT: vptest %xmm0, %xmm0
; AVX-NEXT: setne %al
@@ -129,7 +129,7 @@ define i32 @vectest128(<4 x i32> %input) {
define i32 @vectest256(<8 x i32> %input) {
; SSE41-LABEL: vectest256:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: por %xmm1, %xmm0
; SSE41-NEXT: xorl %eax, %eax
; SSE41-NEXT: ptest %xmm0, %xmm0
@@ -137,7 +137,7 @@ define i32 @vectest256(<8 x i32> %input) {
; SSE41-NEXT: retq
;
; AVX-LABEL: vectest256:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: xorl %eax, %eax
; AVX-NEXT: vptest %ymm0, %ymm0
; AVX-NEXT: setne %al
@@ -151,7 +151,7 @@ define i32 @vectest256(<8 x i32> %input) {
define i32 @vectest512(<16 x i32> %input) {
; SSE41-LABEL: vectest512:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: por %xmm3, %xmm1
; SSE41-NEXT: por %xmm2, %xmm1
; SSE41-NEXT: por %xmm0, %xmm1
@@ -161,7 +161,7 @@ define i32 @vectest512(<16 x i32> %input) {
; SSE41-NEXT: retq
;
; AVX-LABEL: vectest512:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vorps %ymm1, %ymm0, %ymm0
; AVX-NEXT: xorl %eax, %eax
; AVX-NEXT: vptest %ymm0, %ymm0
@@ -176,14 +176,14 @@ define i32 @vectest512(<16 x i32> %input) {
define i32 @vecsel128(<4 x i32> %input, i32 %a, i32 %b) {
; SSE41-LABEL: vecsel128:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: ptest %xmm0, %xmm0
; SSE41-NEXT: cmovel %esi, %edi
; SSE41-NEXT: movl %edi, %eax
; SSE41-NEXT: retq
;
; AVX-LABEL: vecsel128:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vptest %xmm0, %xmm0
; AVX-NEXT: cmovel %esi, %edi
; AVX-NEXT: movl %edi, %eax
@@ -196,7 +196,7 @@ define i32 @vecsel128(<4 x i32> %input, i32 %a, i32 %b) {
define i32 @vecsel256(<8 x i32> %input, i32 %a, i32 %b) {
; SSE41-LABEL: vecsel256:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: por %xmm1, %xmm0
; SSE41-NEXT: ptest %xmm0, %xmm0
; SSE41-NEXT: cmovel %esi, %edi
@@ -204,7 +204,7 @@ define i32 @vecsel256(<8 x i32> %input, i32 %a, i32 %b) {
; SSE41-NEXT: retq
;
; AVX-LABEL: vecsel256:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vptest %ymm0, %ymm0
; AVX-NEXT: cmovel %esi, %edi
; AVX-NEXT: movl %edi, %eax
@@ -218,7 +218,7 @@ define i32 @vecsel256(<8 x i32> %input, i32 %a, i32 %b) {
define i32 @vecsel512(<16 x i32> %input, i32 %a, i32 %b) {
; SSE41-LABEL: vecsel512:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: por %xmm3, %xmm1
; SSE41-NEXT: por %xmm2, %xmm1
; SSE41-NEXT: por %xmm0, %xmm1
@@ -228,7 +228,7 @@ define i32 @vecsel512(<16 x i32> %input, i32 %a, i32 %b) {
; SSE41-NEXT: retq
;
; AVX-LABEL: vecsel512:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vorps %ymm1, %ymm0, %ymm0
; AVX-NEXT: vptest %ymm0, %ymm0
; AVX-NEXT: cmovel %esi, %edi
diff --git a/test/CodeGen/X86/pr13577.ll b/test/CodeGen/X86/pr13577.ll
index 665df2c183b..66bbf4531e5 100644
--- a/test/CodeGen/X86/pr13577.ll
+++ b/test/CodeGen/X86/pr13577.ll
@@ -8,7 +8,7 @@
define x86_fp80 @foo(x86_fp80 %a) {
; CHECK-LABEL: foo:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: fldt {{[0-9]+}}(%rsp)
; CHECK-NEXT: fstpt -{{[0-9]+}}(%rsp)
; CHECK-NEXT: testb $-128, -{{[0-9]+}}(%rsp)
@@ -28,7 +28,7 @@ declare x86_fp80 @copysignl(x86_fp80, x86_fp80) nounwind readnone
define float @pr26070() {
; CHECK-LABEL: pr26070:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0,0,0]
; CHECK-NEXT: orps {{.*}}(%rip), %xmm0
diff --git a/test/CodeGen/X86/pr14161.ll b/test/CodeGen/X86/pr14161.ll
index 95c71405bc9..ef8cd918f13 100644
--- a/test/CodeGen/X86/pr14161.ll
+++ b/test/CodeGen/X86/pr14161.ll
@@ -4,7 +4,7 @@ declare <4 x i32> @llvm.x86.sse41.pminud(<4 x i32>, <4 x i32>)
define <2 x i16> @good(<4 x i32>*, <4 x i8>*) {
; CHECK-LABEL: good:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movdqa (%rdi), %xmm0
; CHECK-NEXT: pminud {{.*}}(%rip), %xmm0
; CHECK-NEXT: pmovzxwq %xmm0, %xmm0
@@ -23,7 +23,7 @@ entry:
define <2 x i16> @bad(<4 x i32>*, <4 x i8>*) {
; CHECK-LABEL: bad:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movdqa (%rdi), %xmm0
; CHECK-NEXT: pminud {{.*}}(%rip), %xmm0
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
diff --git a/test/CodeGen/X86/pr14204.ll b/test/CodeGen/X86/pr14204.ll
index ab467d6ad96..65d5a7f51b4 100644
--- a/test/CodeGen/X86/pr14204.ll
+++ b/test/CodeGen/X86/pr14204.ll
@@ -3,7 +3,7 @@
define <8 x i32> @foo(<8 x i1> %bar) nounwind readnone {
; CHECK-LABEL: foo:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; CHECK-NEXT: vpslld $31, %ymm0, %ymm0
; CHECK-NEXT: vpsrad $31, %ymm0, %ymm0
diff --git a/test/CodeGen/X86/pr14314.ll b/test/CodeGen/X86/pr14314.ll
index 10733a47699..5223de39a52 100644
--- a/test/CodeGen/X86/pr14314.ll
+++ b/test/CodeGen/X86/pr14314.ll
@@ -3,7 +3,7 @@
define i64 @atomicSub(i64* %a, i64 %b) nounwind {
; CHECK-LABEL: atomicSub:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushl %ebp
; CHECK-NEXT: pushl %ebx
; CHECK-NEXT: pushl %edi
@@ -22,7 +22,7 @@ define i64 @atomicSub(i64* %a, i64 %b) nounwind {
; CHECK-NEXT: sbbl %esi, %ecx
; CHECK-NEXT: lock cmpxchg8b (%ebp)
; CHECK-NEXT: jne .LBB0_1
-; CHECK-NEXT: # BB#2: # %atomicrmw.end
+; CHECK-NEXT: # %bb.2: # %atomicrmw.end
; CHECK-NEXT: popl %esi
; CHECK-NEXT: popl %edi
; CHECK-NEXT: popl %ebx
diff --git a/test/CodeGen/X86/pr15267.ll b/test/CodeGen/X86/pr15267.ll
index d62aaf90587..b515fe8c486 100644
--- a/test/CodeGen/X86/pr15267.ll
+++ b/test/CodeGen/X86/pr15267.ll
@@ -3,7 +3,7 @@
define <4 x i3> @test1(<4 x i3>* %in) nounwind {
; CHECK-LABEL: test1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movzwl (%rdi), %eax
; CHECK-NEXT: movl %eax, %ecx
; CHECK-NEXT: shrl $3, %ecx
@@ -22,7 +22,7 @@ define <4 x i3> @test1(<4 x i3>* %in) nounwind {
define <4 x i1> @test2(<4 x i1>* %in) nounwind {
; CHECK-LABEL: test2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movzbl (%rdi), %eax
; CHECK-NEXT: movl %eax, %ecx
; CHECK-NEXT: shrl %ecx
@@ -41,7 +41,7 @@ define <4 x i1> @test2(<4 x i1>* %in) nounwind {
define <4 x i64> @test3(<4 x i1>* %in) nounwind {
; CHECK-LABEL: test3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movzbl (%rdi), %eax
; CHECK-NEXT: movq %rax, %rcx
; CHECK-NEXT: shlq $62, %rcx
@@ -70,7 +70,7 @@ define <4 x i64> @test3(<4 x i1>* %in) nounwind {
define <16 x i4> @test4(<16 x i4>* %in) nounwind {
; CHECK-LABEL: test4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movq (%rdi), %rax
; CHECK-NEXT: movl %eax, %ecx
; CHECK-NEXT: shrl $4, %ecx
diff --git a/test/CodeGen/X86/pr15309.ll b/test/CodeGen/X86/pr15309.ll
index 0301b58def1..8717353377f 100644
--- a/test/CodeGen/X86/pr15309.ll
+++ b/test/CodeGen/X86/pr15309.ll
@@ -3,7 +3,7 @@
define void @test_convert_float2_ulong2(<2 x i64>* nocapture %src, <2 x float>* nocapture %dest) nounwind {
; CHECK-LABEL: test_convert_float2_ulong2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pushl %edi
; CHECK-NEXT: pushl %esi
; CHECK-NEXT: subl $20, %esp
diff --git a/test/CodeGen/X86/pr15705.ll b/test/CodeGen/X86/pr15705.ll
index e728bc8d34c..d70895bac98 100644
--- a/test/CodeGen/X86/pr15705.ll
+++ b/test/CodeGen/X86/pr15705.ll
@@ -4,16 +4,16 @@
define i32 @PR15705(i32 %x, i32 %a, i32 %b, i32 %c) #0 {
; X86-LABEL: PR15705:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: cmpl %ecx, %edx
; X86-NEXT: je .LBB0_4
-; X86-NEXT: # BB#1: # %if.end
+; X86-NEXT: # %bb.1: # %if.end
; X86-NEXT: cmpl %eax, %edx
; X86-NEXT: jne .LBB0_3
-; X86-NEXT: # BB#2:
+; X86-NEXT: # %bb.2:
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: .LBB0_3: # %if.end
; X86-NEXT: movl %ecx, %eax
@@ -21,10 +21,10 @@ define i32 @PR15705(i32 %x, i32 %a, i32 %b, i32 %c) #0 {
; X86-NEXT: retl
;
; X64-LABEL: PR15705:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: cmpl %esi, %edi
; X64-NEXT: je .LBB0_2
-; X64-NEXT: # BB#1: # %if.end
+; X64-NEXT: # %bb.1: # %if.end
; X64-NEXT: cmpl %edx, %edi
; X64-NEXT: cmovel %ecx, %esi
; X64-NEXT: movl %esi, %edx
diff --git a/test/CodeGen/X86/pr15981.ll b/test/CodeGen/X86/pr15981.ll
index c171e6df18e..90e1cca36a0 100644
--- a/test/CodeGen/X86/pr15981.ll
+++ b/test/CodeGen/X86/pr15981.ll
@@ -8,17 +8,17 @@
define i32 @fn1(i32, i32) {
; X86-LABEL: fn1:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: testl %eax, %eax
; X86-NEXT: je .LBB0_2
-; X86-NEXT: # BB#1:
+; X86-NEXT: # %bb.1:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: .LBB0_2:
; X86-NEXT: retl
;
; X64-LABEL: fn1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: testl %esi, %esi
; X64-NEXT: cmovel %esi, %edi
; X64-NEXT: movl %edi, %eax
@@ -30,22 +30,22 @@ define i32 @fn1(i32, i32) {
define void @fn2() {
; X86-LABEL: fn2:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl b, %eax
; X86-NEXT: decl a
; X86-NEXT: jne .LBB1_2
-; X86-NEXT: # BB#1:
+; X86-NEXT: # %bb.1:
; X86-NEXT: xorl %eax, %eax
; X86-NEXT: .LBB1_2:
; X86-NEXT: movl %eax, c
; X86-NEXT: retl
;
; X64-LABEL: fn2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: decl {{.*}}(%rip)
; X64-NEXT: je .LBB1_2
-; X64-NEXT: # BB#1:
+; X64-NEXT: # %bb.1:
; X64-NEXT: movl {{.*}}(%rip), %eax
; X64-NEXT: .LBB1_2:
; X64-NEXT: movl %eax, {{.*}}(%rip)
diff --git a/test/CodeGen/X86/pr16031.ll b/test/CodeGen/X86/pr16031.ll
index 01bc38a243a..033a10fdfb3 100644
--- a/test/CodeGen/X86/pr16031.ll
+++ b/test/CodeGen/X86/pr16031.ll
@@ -3,7 +3,7 @@
define i64 @main(i1 %tobool1) nounwind {
; CHECK-LABEL: main:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushl %esi
; CHECK-NEXT: testb $1, {{[0-9]+}}(%esp)
; CHECK-NEXT: movl $-12, %eax
diff --git a/test/CodeGen/X86/pr16360.ll b/test/CodeGen/X86/pr16360.ll
index 0d2878dc6af..6511cf234de 100644
--- a/test/CodeGen/X86/pr16360.ll
+++ b/test/CodeGen/X86/pr16360.ll
@@ -3,7 +3,7 @@
define i64 @foo(i32 %sum) {
; CHECK-LABEL: foo:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: shrl $2, %eax
; CHECK-NEXT: orl $-67108864, %eax # imm = 0xFC000000
diff --git a/test/CodeGen/X86/pr17764.ll b/test/CodeGen/X86/pr17764.ll
index ccfdb5b5834..a262fc20b54 100644
--- a/test/CodeGen/X86/pr17764.ll
+++ b/test/CodeGen/X86/pr17764.ll
@@ -3,7 +3,7 @@
define <16 x i16> @foo(<16 x i1> %mask, <16 x i16> %x, <16 x i16> %y) {
; CHECK-LABEL: foo:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; CHECK-NEXT: vpsllw $15, %ymm0, %ymm0
; CHECK-NEXT: vpsraw $15, %ymm0, %ymm0
diff --git a/test/CodeGen/X86/pr18014.ll b/test/CodeGen/X86/pr18014.ll
index cba065002d5..fed68e86dfb 100644
--- a/test/CodeGen/X86/pr18014.ll
+++ b/test/CodeGen/X86/pr18014.ll
@@ -6,7 +6,7 @@
define <4 x i32> @foo(<4 x i32>* %p, <4 x i1> %cond, <4 x i32> %v1, <4 x i32> %v2, <4 x i32> %v3) {
; CHECK-LABEL: foo:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pslld $31, %xmm0
; CHECK-NEXT: psrad $31, %xmm0
; CHECK-NEXT: blendvps %xmm0, %xmm1, %xmm2
diff --git a/test/CodeGen/X86/pr18344.ll b/test/CodeGen/X86/pr18344.ll
index fcf4174ec3d..7ff489d70af 100644
--- a/test/CodeGen/X86/pr18344.ll
+++ b/test/CodeGen/X86/pr18344.ll
@@ -6,7 +6,7 @@
define void @FFT(%v4_varying_complex* noalias nocapture %destination, float* noalias %re, <4 x i32>* noalias nocapture %ptr_cast_for_load) nounwind {
; X86-LABEL: FFT:
-; X86: # BB#0: # %begin
+; X86: # %bb.0: # %begin
; X86-NEXT: pushl %ebx
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
@@ -33,7 +33,7 @@ define void @FFT(%v4_varying_complex* noalias nocapture %destination, float* noa
; X86-NEXT: retl
;
; X64-LABEL: FFT:
-; X64: # BB#0: # %begin
+; X64: # %bb.0: # %begin
; X64-NEXT: movdqu (%rdx), %xmm0
; X64-NEXT: pslld $4, %xmm0
; X64-NEXT: movq %xmm0, %rax
diff --git a/test/CodeGen/X86/pr20011.ll b/test/CodeGen/X86/pr20011.ll
index c1df8924cb5..a502df18e77 100644
--- a/test/CodeGen/X86/pr20011.ll
+++ b/test/CodeGen/X86/pr20011.ll
@@ -6,7 +6,7 @@
define void @crash(i64 %x0, i64 %y0, %destTy* nocapture %dest) nounwind {
; X86-LABEL: crash:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movb {{[0-9]+}}(%esp), %dl
@@ -17,7 +17,7 @@ define void @crash(i64 %x0, i64 %y0, %destTy* nocapture %dest) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: crash:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andl $3, %esi
; X64-NEXT: movb %sil, (%rdx)
; X64-NEXT: andl $3, %edi
diff --git a/test/CodeGen/X86/pr20012.ll b/test/CodeGen/X86/pr20012.ll
index b6e4b8eaa0a..5df781c32e0 100644
--- a/test/CodeGen/X86/pr20012.ll
+++ b/test/CodeGen/X86/pr20012.ll
@@ -4,12 +4,12 @@
define void @test () {
; X86-LABEL: test:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movb $0, (%eax)
; X86-NEXT: retl
;
; X64-LABEL: test:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movb $0, (%rax)
; X64-NEXT: retq
store <2 x i4> zeroinitializer, <2 x i4>* undef, align 1
diff --git a/test/CodeGen/X86/pr21792.ll b/test/CodeGen/X86/pr21792.ll
index 74f6c5a361f..1bb6ea6c592 100644
--- a/test/CodeGen/X86/pr21792.ll
+++ b/test/CodeGen/X86/pr21792.ll
@@ -8,7 +8,7 @@
define void @func(<4 x float> %vx) {
; CHECK-LABEL: func:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: pand {{.*}}(%rip), %xmm0
diff --git a/test/CodeGen/X86/pr22338.ll b/test/CodeGen/X86/pr22338.ll
index 41430f5af99..ccdbe46b343 100644
--- a/test/CodeGen/X86/pr22338.ll
+++ b/test/CodeGen/X86/pr22338.ll
@@ -4,7 +4,7 @@
define i32 @fn() {
; X86-LABEL: fn:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: xorl %eax, %eax
; X86-NEXT: cmpl $1, %eax
; X86-NEXT: setne %al
@@ -17,11 +17,11 @@ define i32 @fn() {
; X86-NEXT: # =>This Inner Loop Header: Depth=1
; X86-NEXT: testl %eax, %eax
; X86-NEXT: je .LBB0_1
-; X86-NEXT: # BB#2: # %bb2
+; X86-NEXT: # %bb.2: # %bb2
; X86-NEXT: retl
;
; X64-LABEL: fn:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: cmpl $1, %eax
; X64-NEXT: setne %al
@@ -34,7 +34,7 @@ define i32 @fn() {
; X64-NEXT: # =>This Inner Loop Header: Depth=1
; X64-NEXT: testl %eax, %eax
; X64-NEXT: je .LBB0_1
-; X64-NEXT: # BB#2: # %bb2
+; X64-NEXT: # %bb.2: # %bb2
; X64-NEXT: retq
entry:
%cmp1 = icmp ne i32 undef, 1
diff --git a/test/CodeGen/X86/pr22774.ll b/test/CodeGen/X86/pr22774.ll
index 0b2d8c04e7d..acd394a4b43 100644
--- a/test/CodeGen/X86/pr22774.ll
+++ b/test/CodeGen/X86/pr22774.ll
@@ -6,7 +6,7 @@
define i32 @_Z3foov() {
; CHECK-LABEL: _Z3foov:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmovdqa {{.*}}(%rip), %ymm0
; CHECK-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
; CHECK-NEXT: vmovdqa %xmm0, {{.*}}(%rip)
diff --git a/test/CodeGen/X86/pr22970.ll b/test/CodeGen/X86/pr22970.ll
index 8de9c9e22c7..cd0d1f80fbb 100644
--- a/test/CodeGen/X86/pr22970.ll
+++ b/test/CodeGen/X86/pr22970.ll
@@ -4,7 +4,7 @@
define i32 @PR22970_i32(i32* nocapture readonly, i32) {
; X86-LABEL: PR22970_i32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl $4095, %ecx # imm = 0xFFF
; X86-NEXT: andl {{[0-9]+}}(%esp), %ecx
@@ -12,7 +12,7 @@ define i32 @PR22970_i32(i32* nocapture readonly, i32) {
; X86-NEXT: retl
;
; X64-LABEL: PR22970_i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; X64-NEXT: andl $4095, %esi # imm = 0xFFF
; X64-NEXT: movl 32(%rdi,%rsi,4), %eax
@@ -27,7 +27,7 @@ define i32 @PR22970_i32(i32* nocapture readonly, i32) {
define i32 @PR22970_i64(i32* nocapture readonly, i64) {
; X86-LABEL: PR22970_i64:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl $4095, %ecx # imm = 0xFFF
; X86-NEXT: andl {{[0-9]+}}(%esp), %ecx
@@ -35,7 +35,7 @@ define i32 @PR22970_i64(i32* nocapture readonly, i64) {
; X86-NEXT: retl
;
; X64-LABEL: PR22970_i64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andl $4095, %esi # imm = 0xFFF
; X64-NEXT: movl 32(%rdi,%rsi,4), %eax
; X64-NEXT: retq
diff --git a/test/CodeGen/X86/pr23603.ll b/test/CodeGen/X86/pr23603.ll
index 315e6076861..f92d3687821 100644
--- a/test/CodeGen/X86/pr23603.ll
+++ b/test/CodeGen/X86/pr23603.ll
@@ -5,7 +5,7 @@ declare void @free_v()
define void @f(i32* %x, i32 %c32, i32* %y) nounwind {
; CHECK-LABEL: f:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rbp
; CHECK-NEXT: pushq %r14
; CHECK-NEXT: pushq %rbx
@@ -15,7 +15,7 @@ define void @f(i32* %x, i32 %c32, i32* %y) nounwind {
; CHECK-NEXT: callq free_v
; CHECK-NEXT: testl %ebp, %ebp
; CHECK-NEXT: je .LBB0_2
-; CHECK-NEXT: # BB#1: # %left
+; CHECK-NEXT: # %bb.1: # %left
; CHECK-NEXT: movl %ebx, (%r14)
; CHECK-NEXT: .LBB0_2: # %merge
; CHECK-NEXT: popq %rbx
diff --git a/test/CodeGen/X86/pr24602.ll b/test/CodeGen/X86/pr24602.ll
index 9c029aeefec..ef676efc42f 100644
--- a/test/CodeGen/X86/pr24602.ll
+++ b/test/CodeGen/X86/pr24602.ll
@@ -3,7 +3,7 @@
; PR24602: Make sure we don't barf on non-foldable code (with opaque constants).
; CHECK-LABEL: pr24602:
-; CHECK-NEXT: # BB#0
+; CHECK-NEXT: # %bb.0
; CHECK-NEXT: movabsq $-10000000000, [[CST:%[a-z0-9]+]]
; CHECK-NEXT: imulq [[CST]], %rsi
; CHECK-NEXT: leaq (%rdi,%rsi,8), %rax
diff --git a/test/CodeGen/X86/pr2585.ll b/test/CodeGen/X86/pr2585.ll
index 7796ee9a262..415164c8744 100644
--- a/test/CodeGen/X86/pr2585.ll
+++ b/test/CodeGen/X86/pr2585.ll
@@ -7,7 +7,7 @@
define internal void @PR2585() {
; X32-LABEL: PR2585:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pshuflw {{.*#+}} xmm0 = mem[0,2,2,3,4,5,6,7]
; X32-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -15,7 +15,7 @@ define internal void @PR2585() {
; X32-NEXT: retl
;
; X64-LABEL: PR2585:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pshuflw {{.*#+}} xmm0 = mem[0,2,2,3,4,5,6,7]
; X64-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
diff --git a/test/CodeGen/X86/pr26350.ll b/test/CodeGen/X86/pr26350.ll
index 5ba5862413b..0de1e7840ff 100644
--- a/test/CodeGen/X86/pr26350.ll
+++ b/test/CodeGen/X86/pr26350.ll
@@ -7,7 +7,7 @@ target triple = "i386-unknown-linux-gnu"
define i32 @main() {
; CHECK-LABEL: main:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl d, %eax
; CHECK-NEXT: movl %eax, %ecx
; CHECK-NEXT: shrl $31, %ecx
diff --git a/test/CodeGen/X86/pr2656.ll b/test/CodeGen/X86/pr2656.ll
index b3033781ccc..7ab295f4cb6 100644
--- a/test/CodeGen/X86/pr2656.ll
+++ b/test/CodeGen/X86/pr2656.ll
@@ -15,7 +15,7 @@ target triple = "i686-apple-darwin9.4.0"
define void @foo(%struct.anon* byval %p) nounwind {
; CHECK-LABEL: foo:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: subl $28, %esp
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -51,7 +51,7 @@ declare i32 @printf(...)
define double @PR22371(double %x) {
; CHECK-LABEL: PR22371:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: subl $12, %esp
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
diff --git a/test/CodeGen/X86/pr27591.ll b/test/CodeGen/X86/pr27591.ll
index ec116c22c2c..9291915c767 100644
--- a/test/CodeGen/X86/pr27591.ll
+++ b/test/CodeGen/X86/pr27591.ll
@@ -5,7 +5,7 @@ target triple = "x86_64-unknown-linux-gnu"
define void @test1(i32 %x) #0 {
; CHECK-LABEL: test1:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: cmpl $0, %edi
; CHECK-NEXT: setne %al
@@ -22,7 +22,7 @@ entry:
define void @test2(i32 %x) #0 {
; CHECK-LABEL: test2:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: cmpl $0, %edi
; CHECK-NEXT: setne %al
diff --git a/test/CodeGen/X86/pr28129.ll b/test/CodeGen/X86/pr28129.ll
index 8cdd76e2d9e..f86c439ef04 100644
--- a/test/CodeGen/X86/pr28129.ll
+++ b/test/CodeGen/X86/pr28129.ll
@@ -4,14 +4,14 @@
define <4 x double> @cmp4f64_domain(<4 x double> %a) {
; X86-LABEL: cmp4f64_domain:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X86-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
; X86-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; X86-NEXT: retl
;
; X64-LABEL: cmp4f64_domain:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
; X64-NEXT: vaddpd %ymm1, %ymm0, %ymm0
@@ -25,14 +25,14 @@ define <4 x double> @cmp4f64_domain(<4 x double> %a) {
define <4 x double> @cmp4f64_domain_optsize(<4 x double> %a) optsize {
; X86-LABEL: cmp4f64_domain_optsize:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X86-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
; X86-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; X86-NEXT: retl
;
; X64-LABEL: cmp4f64_domain_optsize:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
; X64-NEXT: vaddpd %ymm1, %ymm0, %ymm0
@@ -46,14 +46,14 @@ define <4 x double> @cmp4f64_domain_optsize(<4 x double> %a) optsize {
define <8 x float> @cmp8f32_domain(<8 x float> %a) {
; X86-LABEL: cmp8f32_domain:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X86-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
; X86-NEXT: vaddps %ymm1, %ymm0, %ymm0
; X86-NEXT: retl
;
; X64-LABEL: cmp8f32_domain:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
; X64-NEXT: vaddps %ymm1, %ymm0, %ymm0
@@ -67,14 +67,14 @@ define <8 x float> @cmp8f32_domain(<8 x float> %a) {
define <8 x float> @cmp8f32_domain_optsize(<8 x float> %a) optsize {
; X86-LABEL: cmp8f32_domain_optsize:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X86-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
; X86-NEXT: vaddps %ymm1, %ymm0, %ymm0
; X86-NEXT: retl
;
; X64-LABEL: cmp8f32_domain_optsize:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
; X64-NEXT: vaddps %ymm1, %ymm0, %ymm0
diff --git a/test/CodeGen/X86/pr28173.ll b/test/CodeGen/X86/pr28173.ll
index 0d2edcde4b7..ca455a129d3 100644
--- a/test/CodeGen/X86/pr28173.ll
+++ b/test/CodeGen/X86/pr28173.ll
@@ -7,7 +7,7 @@ target triple = "x86_64-unknown-linux-gnu"
define i64 @foo64(i1 zeroext %i) #0 {
; CHECK-LABEL: foo64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: orq $-2, %rax
; CHECK-NEXT: retq
@@ -24,7 +24,7 @@ end:
define i16 @foo16(i1 zeroext %i) #0 {
; CHECK-LABEL: foo16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: orl $65534, %eax # imm = 0xFFFE
; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -42,7 +42,7 @@ end:
define i16 @foo16_1(i1 zeroext %i, i32 %j) #0 {
; CHECK-LABEL: foo16_1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: orl $2, %eax
; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -60,7 +60,7 @@ end:
define i32 @foo32(i1 zeroext %i) #0 {
; CHECK-LABEL: foo32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: orl $-2, %eax
; CHECK-NEXT: retq
@@ -77,7 +77,7 @@ end:
define i8 @foo8(i1 zeroext %i) #0 {
; CHECK-LABEL: foo8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: orb $-2, %dil
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/pr28472.ll b/test/CodeGen/X86/pr28472.ll
index 9d2609022b3..603549a7313 100644
--- a/test/CodeGen/X86/pr28472.ll
+++ b/test/CodeGen/X86/pr28472.ll
@@ -1,7 +1,7 @@
; RUN: llc -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s
; CHECK-LABEL: {{^}}same_dynamic_index_fp_vector_type:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
define float @same_dynamic_index_fp_vector_type(float %val, i32 %idx) {
bb:
diff --git a/test/CodeGen/X86/pr29061.ll b/test/CodeGen/X86/pr29061.ll
index 918dfd4af01..93da35d60bf 100644
--- a/test/CodeGen/X86/pr29061.ll
+++ b/test/CodeGen/X86/pr29061.ll
@@ -6,7 +6,7 @@
define void @t1(i8 signext %c) {
; CHECK-LABEL: t1:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushl %edi
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: .cfi_offset %edi, -8
@@ -23,7 +23,7 @@ entry:
define void @t2(i8 signext %c) {
; CHECK-LABEL: t2:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushl %esi
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: .cfi_offset %esi, -8
diff --git a/test/CodeGen/X86/pr29112.ll b/test/CodeGen/X86/pr29112.ll
index cc670eeb978..f6bf76c1f85 100644
--- a/test/CodeGen/X86/pr29112.ll
+++ b/test/CodeGen/X86/pr29112.ll
@@ -7,7 +7,7 @@ declare <4 x float> @foo(<4 x float>, <4 x float>, <4 x float>, <4 x float>, <4
define <4 x float> @bar(<4 x float>* %a1p, <4 x float>* %a2p, <4 x float> %a3, <4 x float> %a4, <16 x float>%c1, <16 x float>%c2) {
; CHECK-LABEL: bar:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: subq $88, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 96
; CHECK-NEXT: vmovaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
diff --git a/test/CodeGen/X86/pr29170.ll b/test/CodeGen/X86/pr29170.ll
index ecb4c978536..dfbad021d28 100644
--- a/test/CodeGen/X86/pr29170.ll
+++ b/test/CodeGen/X86/pr29170.ll
@@ -8,11 +8,11 @@ target triple = "i386-unknown-linux-gnu"
define i32 @main() {
; CHECK-LABEL: main:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: testb %al, %al
; CHECK-NEXT: jne .LBB0_3
-; CHECK-NEXT: # BB#1: # %go
+; CHECK-NEXT: # %bb.1: # %go
; CHECK-NEXT: movl $-1, %ecx
; CHECK-NEXT: movsbl b, %edx
; CHECK-NEXT: notl %ecx
@@ -20,7 +20,7 @@ define i32 @main() {
; CHECK-NEXT: cmpl $-1, %edx
; CHECK-NEXT: sbbl %ecx, %eax
; CHECK-NEXT: jge .LBB0_3
-; CHECK-NEXT: # BB#2: # %if.then
+; CHECK-NEXT: # %bb.2: # %if.then
; CHECK-NEXT: movl $42, %eax
; CHECK-NEXT: retl
; CHECK-NEXT: .LBB0_3: # %if.else
diff --git a/test/CodeGen/X86/pr30284.ll b/test/CodeGen/X86/pr30284.ll
index 7ab1b729ea0..c6a688ebdc4 100644
--- a/test/CodeGen/X86/pr30284.ll
+++ b/test/CodeGen/X86/pr30284.ll
@@ -3,7 +3,7 @@
define void @f_f___un_3C_unf_3E_un_3C_unf_3E_() {
; CHECK-LABEL: f_f___un_3C_unf_3E_un_3C_unf_3E_:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovapd 0, %zmm0
; CHECK-NEXT: vmovapd 64, %zmm1
; CHECK-NEXT: vmovapd {{.*#+}} zmm2 = [0,16,0,16,0,16,0,16,0,16,0,16,0,16,0,16]
diff --git a/test/CodeGen/X86/pr30430.ll b/test/CodeGen/X86/pr30430.ll
index eb14503ec1e..816fe2376c4 100644
--- a/test/CodeGen/X86/pr30430.ll
+++ b/test/CodeGen/X86/pr30430.ll
@@ -3,7 +3,7 @@
define <16 x float> @makefloat(float %f1, float %f2, float %f3, float %f4, float %f5, float %f6, float %f7, float %f8, float %f9, float %f10, float %f11, float %f12, float %f13, float %f14, float %f15, float %f16) #0 {
; CHECK-LABEL: makefloat:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rbp
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset %rbp, -16
diff --git a/test/CodeGen/X86/pr30511.ll b/test/CodeGen/X86/pr30511.ll
index 3c512ba2700..7372980b41e 100644
--- a/test/CodeGen/X86/pr30511.ll
+++ b/test/CodeGen/X86/pr30511.ll
@@ -6,7 +6,7 @@ target triple = "x86_64-pc-linux-gnu"
define i64 @PR30511(<2 x double> %a) {
; CHECK-LABEL: PR30511:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: addpd {{.*}}(%rip), %xmm0
; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,2,3]
; CHECK-NEXT: cvtdq2pd %xmm0, %xmm0
diff --git a/test/CodeGen/X86/pr31045.ll b/test/CodeGen/X86/pr31045.ll
index 2cd59485048..f62836310bb 100644
--- a/test/CodeGen/X86/pr31045.ll
+++ b/test/CodeGen/X86/pr31045.ll
@@ -17,7 +17,7 @@
; Function Attrs: norecurse nounwind uwtable
define void @_Z1av() local_unnamed_addr #0 {
; CHECK-LABEL: _Z1av:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl struct_obj_3+{{.*}}(%rip), %eax
; CHECK-NEXT: movsbl {{.*}}(%rip), %ecx
; CHECK-NEXT: movzbl {{.*}}(%rip), %edx
diff --git a/test/CodeGen/X86/pr31088.ll b/test/CodeGen/X86/pr31088.ll
index 0dd8eb0ece8..f443ff417cc 100644
--- a/test/CodeGen/X86/pr31088.ll
+++ b/test/CodeGen/X86/pr31088.ll
@@ -5,7 +5,7 @@
define <1 x half> @ir_fadd_v1f16(<1 x half> %arg0, <1 x half> %arg1) nounwind {
; X86-LABEL: ir_fadd_v1f16:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: subl $28, %esp
; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-NEXT: movss %xmm0, (%esp)
@@ -31,7 +31,7 @@ define <1 x half> @ir_fadd_v1f16(<1 x half> %arg0, <1 x half> %arg1) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: ir_fadd_v1f16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pushq %rax
; X64-NEXT: movss %xmm0, {{[0-9]+}}(%rsp) # 4-byte Spill
; X64-NEXT: movaps %xmm1, %xmm0
@@ -49,7 +49,7 @@ define <1 x half> @ir_fadd_v1f16(<1 x half> %arg0, <1 x half> %arg1) nounwind {
; X64-NEXT: retq
;
; F16C-LABEL: ir_fadd_v1f16:
-; F16C: # BB#0:
+; F16C: # %bb.0:
; F16C-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; F16C-NEXT: vcvtph2ps %xmm1, %xmm1
; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
@@ -62,7 +62,7 @@ define <1 x half> @ir_fadd_v1f16(<1 x half> %arg0, <1 x half> %arg1) nounwind {
define <2 x half> @ir_fadd_v2f16(<2 x half> %arg0, <2 x half> %arg1) nounwind {
; X86-LABEL: ir_fadd_v2f16:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: subl $64, %esp
; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-NEXT: movss %xmm0, (%esp)
@@ -110,7 +110,7 @@ define <2 x half> @ir_fadd_v2f16(<2 x half> %arg0, <2 x half> %arg1) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: ir_fadd_v2f16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: subq $24, %rsp
; X64-NEXT: movss %xmm2, {{[0-9]+}}(%rsp) # 4-byte Spill
; X64-NEXT: movss %xmm1, {{[0-9]+}}(%rsp) # 4-byte Spill
@@ -145,7 +145,7 @@ define <2 x half> @ir_fadd_v2f16(<2 x half> %arg0, <2 x half> %arg1) nounwind {
; X64-NEXT: retq
;
; F16C-LABEL: ir_fadd_v2f16:
-; F16C: # BB#0:
+; F16C: # %bb.0:
; F16C-NEXT: vcvtps2ph $4, %xmm3, %xmm3
; F16C-NEXT: vcvtph2ps %xmm3, %xmm3
; F16C-NEXT: vcvtps2ph $4, %xmm1, %xmm1
diff --git a/test/CodeGen/X86/pr31323.ll b/test/CodeGen/X86/pr31323.ll
index 6db09318cc8..e0e1dbe726d 100644
--- a/test/CodeGen/X86/pr31323.ll
+++ b/test/CodeGen/X86/pr31323.ll
@@ -6,12 +6,12 @@
define i32 @pr31323(i32) {
; X32-LABEL: pr31323:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: retl
;
; X64-LABEL: pr31323:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/pr31773.ll b/test/CodeGen/X86/pr31773.ll
index d7ae04bf238..6b4261c2435 100644
--- a/test/CodeGen/X86/pr31773.ll
+++ b/test/CodeGen/X86/pr31773.ll
@@ -6,7 +6,7 @@
define <16 x i8> @usat_trunc_wb_256(<16 x i16> %i) {
; AVX-LABEL: usat_trunc_wb_256:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
; AVX-NEXT: vpminuw %xmm2, %xmm1, %xmm1
@@ -16,7 +16,7 @@ define <16 x i8> @usat_trunc_wb_256(<16 x i16> %i) {
; AVX-NEXT: retq
;
; AVX512-LABEL: usat_trunc_wb_256:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovuswb %ymm0, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
@@ -28,7 +28,7 @@ define <16 x i8> @usat_trunc_wb_256(<16 x i16> %i) {
define <8 x i16> @usat_trunc_dw_256(<8 x i32> %i) {
; AVX-LABEL: usat_trunc_dw_256:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [65535,65535,65535,65535]
; AVX-NEXT: vpminud %xmm2, %xmm1, %xmm1
@@ -38,7 +38,7 @@ define <8 x i16> @usat_trunc_dw_256(<8 x i32> %i) {
; AVX-NEXT: retq
;
; AVX512-LABEL: usat_trunc_dw_256:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovusdw %ymm0, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
diff --git a/test/CodeGen/X86/pr31956.ll b/test/CodeGen/X86/pr31956.ll
index e9293048f4e..80e4ed081f7 100644
--- a/test/CodeGen/X86/pr31956.ll
+++ b/test/CodeGen/X86/pr31956.ll
@@ -8,7 +8,7 @@ target triple = "x86_64-scei-ps4"
define <4 x float> @foo() {
; CHECK-LABEL: foo:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],mem[1,2,3]
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1
diff --git a/test/CodeGen/X86/pr32108.ll b/test/CodeGen/X86/pr32108.ll
index f14b04802a0..ff1b7d3401f 100644
--- a/test/CodeGen/X86/pr32108.ll
+++ b/test/CodeGen/X86/pr32108.ll
@@ -3,7 +3,7 @@
define void @pr32108() {
; CHECK-LABEL: pr32108:
-; CHECK: # BB#0: # %CF257
+; CHECK: # %bb.0: # %CF257
; CHECK-NEXT: movb $0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: .p2align 4, 0x90
; CHECK-NEXT: .LBB0_1: # %CF244
diff --git a/test/CodeGen/X86/pr32241.ll b/test/CodeGen/X86/pr32241.ll
index f48fef5f7fb..69c32eaacbb 100644
--- a/test/CodeGen/X86/pr32241.ll
+++ b/test/CodeGen/X86/pr32241.ll
@@ -3,7 +3,7 @@
define i32 @_Z3foov() {
; CHECK-LABEL: _Z3foov:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushl %esi
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: subl $16, %esp
@@ -18,7 +18,7 @@ define i32 @_Z3foov() {
; CHECK-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
; CHECK-NEXT: movb %al, {{[0-9]+}}(%esp) # 1-byte Spill
; CHECK-NEXT: jne .LBB0_2
-; CHECK-NEXT: # BB#1: # %lor.rhs
+; CHECK-NEXT: # %bb.1: # %lor.rhs
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: movb %al, %cl
; CHECK-NEXT: movb %cl, {{[0-9]+}}(%esp) # 1-byte Spill
@@ -37,7 +37,7 @@ define i32 @_Z3foov() {
; CHECK-NEXT: cmpl $0, %edx
; CHECK-NEXT: movb %cl, {{[0-9]+}}(%esp) # 1-byte Spill
; CHECK-NEXT: jne .LBB0_4
-; CHECK-NEXT: # BB#3: # %lor.rhs4
+; CHECK-NEXT: # %bb.3: # %lor.rhs4
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: movb %al, %cl
; CHECK-NEXT: movb %cl, {{[0-9]+}}(%esp) # 1-byte Spill
diff --git a/test/CodeGen/X86/pr32256.ll b/test/CodeGen/X86/pr32256.ll
index f6e254aaad0..ab6af886970 100644
--- a/test/CodeGen/X86/pr32256.ll
+++ b/test/CodeGen/X86/pr32256.ll
@@ -6,7 +6,7 @@
; Function Attrs: noinline nounwind
define void @_Z1av() {
; CHECK-LABEL: _Z1av:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subl $2, %esp
; CHECK-NEXT: .cfi_def_cfa_offset 6
; CHECK-NEXT: xorl %eax, %eax
diff --git a/test/CodeGen/X86/pr32282.ll b/test/CodeGen/X86/pr32282.ll
index ca4767ba73c..78dcb168e95 100644
--- a/test/CodeGen/X86/pr32282.ll
+++ b/test/CodeGen/X86/pr32282.ll
@@ -11,7 +11,7 @@
define void @foo() {
; X86-LABEL: foo:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl %eax
; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: movl d, %eax
@@ -46,7 +46,7 @@ define void @foo() {
; X86-NEXT: retl
;
; X64-LABEL: foo:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq {{.*}}(%rip), %rax
; X64-NEXT: movabsq $3013716102212485120, %rcx # imm = 0x29D2DED3DE400000
; X64-NEXT: andnq %rcx, %rax, %rcx
@@ -55,7 +55,7 @@ define void @foo() {
; X64-NEXT: movabsq $4393751543808, %rax # imm = 0x3FF00000000
; X64-NEXT: testq %rax, %rcx
; X64-NEXT: je .LBB0_1
-; X64-NEXT: # BB#2:
+; X64-NEXT: # %bb.2:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: xorl %edx, %edx
; X64-NEXT: idivq %rcx
diff --git a/test/CodeGen/X86/pr32284.ll b/test/CodeGen/X86/pr32284.ll
index 99536733b41..86bb74050ad 100644
--- a/test/CodeGen/X86/pr32284.ll
+++ b/test/CodeGen/X86/pr32284.ll
@@ -8,7 +8,7 @@
define void @foo() {
; X86-O0-LABEL: foo:
-; X86-O0: # BB#0: # %entry
+; X86-O0: # %bb.0: # %entry
; X86-O0-NEXT: xorl %eax, %eax
; X86-O0-NEXT: movl %eax, %ecx
; X86-O0-NEXT: xorl %eax, %eax
@@ -36,7 +36,7 @@ define void @foo() {
; X86-O0-NEXT: retq
;
; X64-LABEL: foo:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movzbl {{.*}}(%rip), %eax
; X64-NEXT: testb %al, %al
; X64-NEXT: setne -{{[0-9]+}}(%rsp)
@@ -50,7 +50,7 @@ define void @foo() {
; X64-NEXT: retq
;
; 686-O0-LABEL: foo:
-; 686-O0: # BB#0: # %entry
+; 686-O0: # %bb.0: # %entry
; 686-O0-NEXT: subl $8, %esp
; 686-O0-NEXT: .cfi_def_cfa_offset 12
; 686-O0-NEXT: movb c, %al
@@ -74,7 +74,7 @@ define void @foo() {
; 686-O0-NEXT: retl
;
; 686-LABEL: foo:
-; 686: # BB#0: # %entry
+; 686: # %bb.0: # %entry
; 686-NEXT: subl $8, %esp
; 686-NEXT: .cfi_def_cfa_offset 12
; 686-NEXT: movzbl c, %eax
@@ -120,7 +120,7 @@ entry:
define void @f1() {
; X86-O0-LABEL: f1:
-; X86-O0: # BB#0: # %entry
+; X86-O0: # %bb.0: # %entry
; X86-O0-NEXT: movabsq $8381627093, %rax # imm = 0x1F3957AD5
; X86-O0-NEXT: movslq var_5, %rcx
; X86-O0-NEXT: addq %rax, %rcx
@@ -156,7 +156,7 @@ define void @f1() {
; X86-O0-NEXT: retq
;
; X64-LABEL: f1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movslq {{.*}}(%rip), %rax
; X64-NEXT: xorl %ecx, %ecx
; X64-NEXT: cmpq $-1, %rax
@@ -176,7 +176,7 @@ define void @f1() {
; X64-NEXT: retq
;
; 686-O0-LABEL: f1:
-; 686-O0: # BB#0: # %entry
+; 686-O0: # %bb.0: # %entry
; 686-O0-NEXT: pushl %ebp
; 686-O0-NEXT: .cfi_def_cfa_offset 8
; 686-O0-NEXT: pushl %ebx
@@ -233,7 +233,7 @@ define void @f1() {
; 686-O0-NEXT: retl
;
; 686-LABEL: f1:
-; 686: # BB#0: # %entry
+; 686: # %bb.0: # %entry
; 686-NEXT: pushl %edi
; 686-NEXT: .cfi_def_cfa_offset 8
; 686-NEXT: pushl %esi
@@ -307,7 +307,7 @@ entry:
define void @f2() {
; X86-O0-LABEL: f2:
-; X86-O0: # BB#0: # %entry
+; X86-O0: # %bb.0: # %entry
; X86-O0-NEXT: # implicit-def: %rax
; X86-O0-NEXT: movzbl var_7, %ecx
; X86-O0-NEXT: cmpb $0, var_7
@@ -335,7 +335,7 @@ define void @f2() {
; X86-O0-NEXT: retq
;
; X64-LABEL: f2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movzbl {{.*}}(%rip), %eax
; X64-NEXT: xorl %ecx, %ecx
; X64-NEXT: testl %eax, %eax
@@ -352,7 +352,7 @@ define void @f2() {
; X64-NEXT: retq
;
; 686-O0-LABEL: f2:
-; 686-O0: # BB#0: # %entry
+; 686-O0: # %bb.0: # %entry
; 686-O0-NEXT: pushl %edi
; 686-O0-NEXT: .cfi_def_cfa_offset 8
; 686-O0-NEXT: pushl %esi
@@ -391,7 +391,7 @@ define void @f2() {
; 686-O0-NEXT: retl
;
; 686-LABEL: f2:
-; 686: # BB#0: # %entry
+; 686: # %bb.0: # %entry
; 686-NEXT: subl $2, %esp
; 686-NEXT: .cfi_def_cfa_offset 6
; 686-NEXT: movzbl var_7, %eax
@@ -441,7 +441,7 @@ entry:
define void @f3() #0 {
; X86-O0-LABEL: f3:
-; X86-O0: # BB#0: # %entry
+; X86-O0: # %bb.0: # %entry
; X86-O0-NEXT: movl var_13, %eax
; X86-O0-NEXT: xorl $-1, %eax
; X86-O0-NEXT: movl %eax, %eax
@@ -477,7 +477,7 @@ define void @f3() #0 {
; X86-O0-NEXT: retq
;
; X64-LABEL: f3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movl {{.*}}(%rip), %eax
; X64-NEXT: movl $4294967295, %ecx # imm = 0xFFFFFFFF
; X64-NEXT: xorq %rax, %rcx
@@ -493,7 +493,7 @@ define void @f3() #0 {
; X64-NEXT: retq
;
; 686-O0-LABEL: f3:
-; 686-O0: # BB#0: # %entry
+; 686-O0: # %bb.0: # %entry
; 686-O0-NEXT: pushl %ebp
; 686-O0-NEXT: .cfi_def_cfa_offset 8
; 686-O0-NEXT: .cfi_offset %ebp, -8
@@ -530,7 +530,7 @@ define void @f3() #0 {
; 686-O0-NEXT: retl
;
; 686-LABEL: f3:
-; 686: # BB#0: # %entry
+; 686: # %bb.0: # %entry
; 686-NEXT: pushl %ebp
; 686-NEXT: .cfi_def_cfa_offset 8
; 686-NEXT: .cfi_offset %ebp, -8
diff --git a/test/CodeGen/X86/pr32329.ll b/test/CodeGen/X86/pr32329.ll
index 4fea702e063..bc7fe8c0047 100644
--- a/test/CodeGen/X86/pr32329.ll
+++ b/test/CodeGen/X86/pr32329.ll
@@ -16,7 +16,7 @@
define void @foo() local_unnamed_addr {
; X86-LABEL: foo:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %ebp
; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: pushl %ebx
@@ -63,7 +63,7 @@ define void @foo() local_unnamed_addr {
; X86-NEXT: retl
;
; X64-LABEL: foo:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movl {{.*}}(%rip), %eax
; X64-NEXT: movsbl {{.*}}(%rip), %r9d
; X64-NEXT: movzwl {{.*}}(%rip), %r8d
diff --git a/test/CodeGen/X86/pr32340.ll b/test/CodeGen/X86/pr32340.ll
index dd160c6ee5b..f5a67c1a052 100644
--- a/test/CodeGen/X86/pr32340.ll
+++ b/test/CodeGen/X86/pr32340.ll
@@ -12,7 +12,7 @@
define void @foo() {
; X64-LABEL: foo:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: movl %eax, %ecx
; X64-NEXT: movabsq $-1142377792914660288, %rdx # imm = 0xF02575732E06E440
diff --git a/test/CodeGen/X86/pr32345.ll b/test/CodeGen/X86/pr32345.ll
index 038d5d639aa..78f78341839 100644
--- a/test/CodeGen/X86/pr32345.ll
+++ b/test/CodeGen/X86/pr32345.ll
@@ -9,7 +9,7 @@
define void @foo() {
; X640-LABEL: foo:
-; X640: # BB#0: # %bb
+; X640: # %bb.0: # %bb
; X640-NEXT: # implicit-def: %rax
; X640-NEXT: movzwl var_22, %ecx
; X640-NEXT: movzwl var_27, %edx
@@ -35,7 +35,7 @@ define void @foo() {
; X640-NEXT: retq
;
; 6860-LABEL: foo:
-; 6860: # BB#0: # %bb
+; 6860: # %bb.0: # %bb
; 6860-NEXT: pushl %ebp
; 6860-NEXT: .cfi_def_cfa_offset 8
; 6860-NEXT: .cfi_offset %ebp, -8
@@ -80,7 +80,7 @@ define void @foo() {
; 6860-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
; 6860-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
; 6860-NEXT: jne .LBB0_2
-; 6860-NEXT: # BB#1: # %bb
+; 6860-NEXT: # %bb.1: # %bb
; 6860-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; 6860-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; 6860-NEXT: .LBB0_2: # %bb
@@ -96,7 +96,7 @@ define void @foo() {
; 6860-NEXT: retl
;
; X64-LABEL: foo:
-; X64: # BB#0: # %bb
+; X64: # %bb.0: # %bb
; X64-NEXT: movzwl {{.*}}(%rip), %ecx
; X64-NEXT: movzwl {{.*}}(%rip), %eax
; X64-NEXT: xorw %cx, %ax
@@ -110,7 +110,7 @@ define void @foo() {
; X64-NEXT: retq
;
; 686-LABEL: foo:
-; 686: # BB#0: # %bb
+; 686: # %bb.0: # %bb
; 686-NEXT: pushl %ebp
; 686-NEXT: .cfi_def_cfa_offset 8
; 686-NEXT: .cfi_offset %ebp, -8
@@ -130,7 +130,7 @@ define void @foo() {
; 686-NEXT: shrdl %cl, %edx, %eax
; 686-NEXT: testb $32, %cl
; 686-NEXT: jne .LBB0_2
-; 686-NEXT: # BB#1: # %bb
+; 686-NEXT: # %bb.1: # %bb
; 686-NEXT: movl %eax, %edx
; 686-NEXT: .LBB0_2: # %bb
; 686-NEXT: movb %dl, (%eax)
diff --git a/test/CodeGen/X86/pr32368.ll b/test/CodeGen/X86/pr32368.ll
index b0f0b123cca..5fa771c03c8 100644
--- a/test/CodeGen/X86/pr32368.ll
+++ b/test/CodeGen/X86/pr32368.ll
@@ -6,21 +6,21 @@
define <4 x float> @PR32368_128(<4 x float>) {
; SSE-LABEL: PR32368_128:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
; SSE-NEXT: addps %xmm0, %xmm0
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: PR32368_128:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vaddps %xmm0, %xmm0, %xmm0
; AVX1-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: PR32368_128:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vbroadcastss {{.*}}(%rip), %xmm1
; AVX2-NEXT: vandps %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vaddps %xmm0, %xmm0, %xmm0
@@ -29,7 +29,7 @@ define <4 x float> @PR32368_128(<4 x float>) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: PR32368_128:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vbroadcastss {{.*}}(%rip), %xmm1
; AVX512-NEXT: vandps %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vaddps %xmm0, %xmm0, %xmm0
@@ -48,7 +48,7 @@ define <4 x float> @PR32368_128(<4 x float>) {
define <8 x float> @PR32368_256(<8 x float>) {
; SSE-LABEL: PR32368_256:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm2 = [4294967004,4294967004,4294967004,4294967004]
; SSE-NEXT: andps %xmm2, %xmm0
; SSE-NEXT: andps %xmm2, %xmm1
@@ -60,14 +60,14 @@ define <8 x float> @PR32368_256(<8 x float>) {
; SSE-NEXT: retq
;
; AVX1-LABEL: PR32368_256:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; AVX1-NEXT: vaddps %ymm0, %ymm0, %ymm0
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: PR32368_256:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vbroadcastss {{.*}}(%rip), %ymm1
; AVX2-NEXT: vandps %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vaddps %ymm0, %ymm0, %ymm0
@@ -76,7 +76,7 @@ define <8 x float> @PR32368_256(<8 x float>) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: PR32368_256:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vbroadcastss {{.*}}(%rip), %ymm1
; AVX512-NEXT: vandps %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vaddps %ymm0, %ymm0, %ymm0
@@ -95,7 +95,7 @@ define <8 x float> @PR32368_256(<8 x float>) {
define <16 x float> @PR32368_512(<16 x float>) {
; SSE-LABEL: PR32368_512:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm4 = [4294967004,4294967004,4294967004,4294967004]
; SSE-NEXT: andps %xmm4, %xmm0
; SSE-NEXT: andps %xmm4, %xmm1
@@ -113,7 +113,7 @@ define <16 x float> @PR32368_512(<16 x float>) {
; SSE-NEXT: retq
;
; AVX1-LABEL: PR32368_512:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [4294967004,4294967004,4294967004,4294967004,4294967004,4294967004,4294967004,4294967004]
; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1
@@ -125,7 +125,7 @@ define <16 x float> @PR32368_512(<16 x float>) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: PR32368_512:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vbroadcastss {{.*}}(%rip), %ymm2
; AVX2-NEXT: vandps %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vandps %ymm2, %ymm1, %ymm1
@@ -137,7 +137,7 @@ define <16 x float> @PR32368_512(<16 x float>) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: PR32368_512:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpandd {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512-NEXT: vaddps %zmm0, %zmm0, %zmm0
; AVX512-NEXT: vpandd {{.*}}(%rip){1to16}, %zmm0, %zmm0
diff --git a/test/CodeGen/X86/pr32420.ll b/test/CodeGen/X86/pr32420.ll
index d4812945900..e635c683502 100644
--- a/test/CodeGen/X86/pr32420.ll
+++ b/test/CodeGen/X86/pr32420.ll
@@ -9,7 +9,7 @@ target triple = "x86_64-apple-macosx10.12.0"
define i32 @PR32420() {
; CHECK-LABEL: PR32420:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movq _a@{{.*}}(%rip), %rax
; CHECK-NEXT: movzwl (%rax), %eax
; CHECK-NEXT: movl %eax, %ecx
diff --git a/test/CodeGen/X86/pr32451.ll b/test/CodeGen/X86/pr32451.ll
index 67c0cb39f8c..86a46facbb5 100644
--- a/test/CodeGen/X86/pr32451.ll
+++ b/test/CodeGen/X86/pr32451.ll
@@ -8,7 +8,7 @@ target triple = "x86_64-unknown-linux-gnu"
define i8** @japi1_convert_690(i8**, i8***, i32) {
; CHECK-LABEL: japi1_convert_690:
-; CHECK: # BB#0: # %top
+; CHECK: # %bb.0: # %top
; CHECK-NEXT: pushl %ebx
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: subl $16, %esp
diff --git a/test/CodeGen/X86/pr32484.ll b/test/CodeGen/X86/pr32484.ll
index 093c9b01c55..dc67ec2924b 100644
--- a/test/CodeGen/X86/pr32484.ll
+++ b/test/CodeGen/X86/pr32484.ll
@@ -3,7 +3,7 @@
define void @foo() {
; CHECK-LABEL: foo:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: # implicit-def: %rax
; CHECK-NEXT: jmpq *%rax
; CHECK-NEXT: .LBB0_1:
diff --git a/test/CodeGen/X86/pr32659.ll b/test/CodeGen/X86/pr32659.ll
index b74d99f1015..ad3a78052b6 100644
--- a/test/CodeGen/X86/pr32659.ll
+++ b/test/CodeGen/X86/pr32659.ll
@@ -23,7 +23,7 @@ declare i32 @putchar(i32) nounwind
define void @fn2() nounwind optsize {
; CHECK-LABEL: fn2:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushl %ebx
; CHECK-NEXT: subl $8, %esp
; CHECK-NEXT: movl $48, (%esp)
diff --git a/test/CodeGen/X86/pr32907.ll b/test/CodeGen/X86/pr32907.ll
index 8057b31c961..a4396e86cd2 100644
--- a/test/CodeGen/X86/pr32907.ll
+++ b/test/CodeGen/X86/pr32907.ll
@@ -6,7 +6,7 @@
define <2 x i64> @PR32907(<2 x i64> %astype.i, <2 x i64> %astype6.i) {
; SSE2-LABEL: PR32907:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: psubq %xmm1, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrad $31, %xmm1
@@ -20,7 +20,7 @@ define <2 x i64> @PR32907(<2 x i64> %astype.i, <2 x i64> %astype6.i) {
; SSE2-NEXT: retq
;
; SSE42-LABEL: PR32907:
-; SSE42: # BB#0: # %entry
+; SSE42: # %bb.0: # %entry
; SSE42-NEXT: psubq %xmm1, %xmm0
; SSE42-NEXT: pxor %xmm1, %xmm1
; SSE42-NEXT: pcmpgtq %xmm0, %xmm1
@@ -29,7 +29,7 @@ define <2 x i64> @PR32907(<2 x i64> %astype.i, <2 x i64> %astype6.i) {
; SSE42-NEXT: retq
;
; AVX2-LABEL: PR32907:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm1
@@ -38,7 +38,7 @@ define <2 x i64> @PR32907(<2 x i64> %astype.i, <2 x i64> %astype6.i) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: PR32907:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpsubq %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpsraq $63, %zmm0, %zmm1
; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
diff --git a/test/CodeGen/X86/pr33290.ll b/test/CodeGen/X86/pr33290.ll
index 4c07a273d71..b5d9754eba7 100644
--- a/test/CodeGen/X86/pr33290.ll
+++ b/test/CodeGen/X86/pr33290.ll
@@ -8,7 +8,7 @@
define void @e() {
; X86-LABEL: e:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl b, %eax
; X86-NEXT: .p2align 4, 0x90
; X86-NEXT: .LBB0_1: # %for.cond
@@ -20,7 +20,7 @@ define void @e() {
; X86-NEXT: jmp .LBB0_1
;
; X64-LABEL: e:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq {{.*}}(%rip), %rax
; X64-NEXT: movl $a, %esi
; X64-NEXT: .p2align 4, 0x90
diff --git a/test/CodeGen/X86/pr33349.ll b/test/CodeGen/X86/pr33349.ll
index e73d1f590c1..fb5eb7519e2 100644
--- a/test/CodeGen/X86/pr33349.ll
+++ b/test/CodeGen/X86/pr33349.ll
@@ -7,7 +7,7 @@ target triple = "x86_64-unknown-linux-gnu"
define void @test(<4 x i1> %m, <4 x x86_fp80> %v, <4 x x86_fp80>*%p) local_unnamed_addr {
; KNL-LABEL: test:
-; KNL: # BB#0: # %bb
+; KNL: # %bb.0: # %bb
; KNL-NEXT: vpextrb $0, %xmm0, %eax
; KNL-NEXT: testb $1, %al
; KNL-NEXT: fld1
@@ -37,7 +37,7 @@ target triple = "x86_64-unknown-linux-gnu"
; KNL-NEXT: retq
;
; SKX-LABEL: test:
-; SKX: # BB#0: # %bb
+; SKX: # %bb.0: # %bb
; SKX-NEXT: vpslld $31, %xmm0, %xmm0
; SKX-NEXT: vptestmd %xmm0, %xmm0, %k0
; SKX-NEXT: kshiftrw $2, %k0, %k1
diff --git a/test/CodeGen/X86/pr33828.ll b/test/CodeGen/X86/pr33828.ll
index 1b7f44323b6..6314ed6bd5b 100644
--- a/test/CodeGen/X86/pr33828.ll
+++ b/test/CodeGen/X86/pr33828.ll
@@ -6,20 +6,20 @@
define void @foo() {
; X86-LABEL: foo:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movsbl var_580, %eax
; X86-NEXT: testl $-536870913, %eax # imm = 0xDFFFFFFF
; X86-NEXT: jne .LBB0_1
-; X86-NEXT: # BB#2: # %if.end13
+; X86-NEXT: # %bb.2: # %if.end13
; X86-NEXT: retl
; X86-NEXT: .LBB0_1: # %if.then11
;
; X64-LABEL: foo:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movsbl {{.*}}(%rip), %eax
; X64-NEXT: testl $-536870913, %eax # imm = 0xDFFFFFFF
; X64-NEXT: jne .LBB0_1
-; X64-NEXT: # BB#2: # %if.end13
+; X64-NEXT: # %bb.2: # %if.end13
; X64-NEXT: retq
; X64-NEXT: .LBB0_1: # %if.then11
entry:
diff --git a/test/CodeGen/X86/pr33844.ll b/test/CodeGen/X86/pr33844.ll
index 2585945aa10..f832aca6d49 100644
--- a/test/CodeGen/X86/pr33844.ll
+++ b/test/CodeGen/X86/pr33844.ll
@@ -9,7 +9,7 @@ target triple = "x86_64-unknown-linux-gnu"
define void @patatino() {
; CHECK-LABEL: patatino:
-; CHECK: # BB#0: # %bb
+; CHECK: # %bb.0: # %bb
; CHECK-NEXT: movl {{.*}}(%rip), %eax
; CHECK-NEXT: movl %eax, %ecx
; CHECK-NEXT: shrl $31, %ecx
diff --git a/test/CodeGen/X86/pr33960.ll b/test/CodeGen/X86/pr33960.ll
index fb9236d3ffa..34af4df9455 100644
--- a/test/CodeGen/X86/pr33960.ll
+++ b/test/CodeGen/X86/pr33960.ll
@@ -6,12 +6,12 @@
define void @PR33960() {
; X86-LABEL: PR33960:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl $0, b
; X86-NEXT: retl
;
; X64-LABEL: PR33960:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movl $0, {{.*}}(%rip)
; X64-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/pr34080.ll b/test/CodeGen/X86/pr34080.ll
index 1eba8e70f23..72dbf3c4851 100644
--- a/test/CodeGen/X86/pr34080.ll
+++ b/test/CodeGen/X86/pr34080.ll
@@ -8,7 +8,7 @@
define void @_Z1fe(x86_fp80 %z) local_unnamed_addr #0 {
; SSE2-LABEL: _Z1fe:
-; SSE2: ## BB#0: ## %entry
+; SSE2: ## %bb.0: ## %entry
; SSE2-NEXT: pushq %rbp
; SSE2-NEXT: .cfi_def_cfa_offset 16
; SSE2-NEXT: .cfi_offset %rbp, -16
@@ -47,7 +47,7 @@ define void @_Z1fe(x86_fp80 %z) local_unnamed_addr #0 {
; SSE2-NEXT: retq
;
; SSE2-BROKEN-LABEL: _Z1fe:
-; SSE2-BROKEN: ## BB#0: ## %entry
+; SSE2-BROKEN: ## %bb.0: ## %entry
; SSE2-BROKEN-NEXT: pushq %rbp
; SSE2-BROKEN-NEXT: .cfi_def_cfa_offset 16
; SSE2-BROKEN-NEXT: .cfi_offset %rbp, -16
@@ -86,7 +86,7 @@ define void @_Z1fe(x86_fp80 %z) local_unnamed_addr #0 {
; SSE2-BROKEN-NEXT: retq
;
; SSE3-LABEL: _Z1fe:
-; SSE3: ## BB#0: ## %entry
+; SSE3: ## %bb.0: ## %entry
; SSE3-NEXT: pushq %rbp
; SSE3-NEXT: .cfi_def_cfa_offset 16
; SSE3-NEXT: .cfi_offset %rbp, -16
@@ -115,7 +115,7 @@ define void @_Z1fe(x86_fp80 %z) local_unnamed_addr #0 {
; SSE3-NEXT: retq
;
; AVX-LABEL: _Z1fe:
-; AVX: ## BB#0: ## %entry
+; AVX: ## %bb.0: ## %entry
; AVX-NEXT: pushq %rbp
; AVX-NEXT: .cfi_def_cfa_offset 16
; AVX-NEXT: .cfi_offset %rbp, -16
diff --git a/test/CodeGen/X86/pr34088.ll b/test/CodeGen/X86/pr34088.ll
index 2049c5507c6..4fa24a50648 100644
--- a/test/CodeGen/X86/pr34088.ll
+++ b/test/CodeGen/X86/pr34088.ll
@@ -9,7 +9,7 @@
;
define i32 @pr34088() local_unnamed_addr {
; CHECK-LABEL: pr34088:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushl %ebp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: .cfi_offset %ebp, -8
diff --git a/test/CodeGen/X86/pr34137.ll b/test/CodeGen/X86/pr34137.ll
index 6ca42185043..6210103db17 100644
--- a/test/CodeGen/X86/pr34137.ll
+++ b/test/CodeGen/X86/pr34137.ll
@@ -7,7 +7,7 @@
define void @pr34127() {
; CHECK-LABEL: pr34127:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movzwl {{.*}}(%rip), %eax
; CHECK-NEXT: movzwl {{.*}}(%rip), %ecx
; CHECK-NEXT: andw %ax, %cx
diff --git a/test/CodeGen/X86/pr34139.ll b/test/CodeGen/X86/pr34139.ll
index 0aea3fcfdba..e5c7c5be654 100644
--- a/test/CodeGen/X86/pr34139.ll
+++ b/test/CodeGen/X86/pr34139.ll
@@ -3,7 +3,7 @@
define void @f_f(<16 x double>* %ptr) {
; CHECK-LABEL: f_f:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vmovdqa %xmm0, (%rax)
store <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8>* undef
diff --git a/test/CodeGen/X86/pr34149.ll b/test/CodeGen/X86/pr34149.ll
index 3875c2fa47d..017d68553fd 100644
--- a/test/CodeGen/X86/pr34149.ll
+++ b/test/CodeGen/X86/pr34149.ll
@@ -7,7 +7,7 @@ declare <4 x double> @llvm.maxnum.v4f64(<4 x double> %x, <4 x double> %y)
define <4 x double> @via_minnum(<4 x double> %x, <4 x double> %y) {
; CHECK-LABEL: via_minnum:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vminpd %ymm0, %ymm1, %ymm2
; CHECK-NEXT: vcmpunordpd %ymm0, %ymm0, %ymm0
; CHECK-NEXT: vblendvpd %ymm0, %ymm1, %ymm2, %ymm0
@@ -18,7 +18,7 @@ define <4 x double> @via_minnum(<4 x double> %x, <4 x double> %y) {
define <4 x double> @via_maxnum(<4 x double> %x, <4 x double> %y) {
; CHECK-LABEL: via_maxnum:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmaxpd %ymm0, %ymm1, %ymm2
; CHECK-NEXT: vcmpunordpd %ymm0, %ymm0, %ymm0
; CHECK-NEXT: vblendvpd %ymm0, %ymm1, %ymm2, %ymm0
@@ -29,7 +29,7 @@ define <4 x double> @via_maxnum(<4 x double> %x, <4 x double> %y) {
define <4 x double> @via_fcmp(<4 x double> %x, <4 x double> %y) {
; CHECK-LABEL: via_fcmp:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vminpd %ymm0, %ymm1, %ymm0
; CHECK-NEXT: retq
%c = fcmp ule <4 x double> %x, %y
diff --git a/test/CodeGen/X86/pr34177.ll b/test/CodeGen/X86/pr34177.ll
index 7c210058ae6..5904e5df4a1 100644
--- a/test/CodeGen/X86/pr34177.ll
+++ b/test/CodeGen/X86/pr34177.ll
@@ -7,7 +7,7 @@ target triple = "x86_64-unknown-linux-gnu"
define void @test() local_unnamed_addr {
; CHECK-LABEL: test:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} xmm0 = [2,3]
; CHECK-NEXT: vpextrq $1, %xmm0, %rax
; CHECK-NEXT: vmovq %xmm0, %rcx
diff --git a/test/CodeGen/X86/pr34271-1.ll b/test/CodeGen/X86/pr34271-1.ll
index 2e2f0fd0aa9..d341ceb1c11 100644
--- a/test/CodeGen/X86/pr34271-1.ll
+++ b/test/CodeGen/X86/pr34271-1.ll
@@ -3,7 +3,7 @@
define <16 x i16> @foo(<16 x i32> %i) {
; CHECK-LABEL: foo:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpminud {{.*}}(%rip){1to16}, %zmm0, %zmm0
; CHECK-NEXT: vpmovdw %zmm0, %ymm0
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/pr34271.ll b/test/CodeGen/X86/pr34271.ll
index 40d01617c30..d626f8f6bf9 100644
--- a/test/CodeGen/X86/pr34271.ll
+++ b/test/CodeGen/X86/pr34271.ll
@@ -6,7 +6,7 @@
define <4 x i32> @f(<4 x i32> %a) {
; CHECK-LABEL: f:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: paddd .LCPI0_0(%rip), %xmm0
; CHECK-NEXT: retq
%v = add nuw nsw <4 x i32> %a, <i32 16843009, i32 16843009, i32 16843009, i32 16843009>
diff --git a/test/CodeGen/X86/pr34381.ll b/test/CodeGen/X86/pr34381.ll
index ce18f482d27..3053ddda5f8 100644
--- a/test/CodeGen/X86/pr34381.ll
+++ b/test/CodeGen/X86/pr34381.ll
@@ -10,7 +10,7 @@
; Function Attrs: noinline nounwind optnone uwtable
define void @_Z3foov() {
; CHECK-LABEL: _Z3foov:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movsbl {{.*}}(%rip), %eax
; CHECK-NEXT: negl %eax
; CHECK-NEXT: cmpl %eax, {{.*}}(%rip)
diff --git a/test/CodeGen/X86/pr34421.ll b/test/CodeGen/X86/pr34421.ll
index 5db8b4c601e..8241410be36 100644
--- a/test/CodeGen/X86/pr34421.ll
+++ b/test/CodeGen/X86/pr34421.ll
@@ -4,7 +4,7 @@
define void @thread_selfcounts() noimplicitfloat noredzone nounwind {
; X86-LABEL: thread_selfcounts:
-; X86: ## BB#0: ## %entry
+; X86: ## %bb.0: ## %entry
; X86-NEXT: subl $44, %esp
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
@@ -17,7 +17,7 @@ define void @thread_selfcounts() noimplicitfloat noredzone nounwind {
; X86-NEXT: ## -- End function
;
; X64-LABEL: thread_selfcounts:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: subq $40, %rsp
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx
diff --git a/test/CodeGen/X86/pr34605.ll b/test/CodeGen/X86/pr34605.ll
index 5fb5c84f416..8c25b068ecf 100644
--- a/test/CodeGen/X86/pr34605.ll
+++ b/test/CodeGen/X86/pr34605.ll
@@ -3,7 +3,7 @@
define void @pr34605(i8* nocapture %s, i32 %p) {
; CHECK-LABEL: pr34605:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: vpbroadcastd {{[0-9]+}}(%esp), %zmm0
; CHECK-NEXT: vpcmpeqd {{\.LCPI.*}}, %zmm0, %k0
diff --git a/test/CodeGen/X86/pr34629.ll b/test/CodeGen/X86/pr34629.ll
index 031b99d5974..55084b425c7 100644
--- a/test/CodeGen/X86/pr34629.ll
+++ b/test/CodeGen/X86/pr34629.ll
@@ -10,7 +10,7 @@ target triple = "x86_64-unknown-linux-gnu"
; Function Attrs: norecurse nounwind uwtable
define void @c() local_unnamed_addr #0 {
; CHECK-LABEL: c:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movq {{.*}}(%rip), %rax
; CHECK-NEXT: leaq (%rax,%rax,4), %rcx
; CHECK-NEXT: negq %rcx
@@ -18,7 +18,7 @@ define void @c() local_unnamed_addr #0 {
; CHECK-NEXT: leaq (%rax,%rax,4), %rax
; CHECK-NEXT: testq %rax, %rcx
; CHECK-NEXT: je .LBB0_2
-; CHECK-NEXT: # BB#1: # %if.then
+; CHECK-NEXT: # %bb.1: # %if.then
; CHECK-NEXT: movb $0, {{.*}}(%rip)
; CHECK-NEXT: .LBB0_2: # %if.end
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/pr34634.ll b/test/CodeGen/X86/pr34634.ll
index 6ebd6d87185..9ed78a28d4d 100644
--- a/test/CodeGen/X86/pr34634.ll
+++ b/test/CodeGen/X86/pr34634.ll
@@ -10,7 +10,7 @@ target triple = "x86_64-unknown-linux-gnu"
; Function Attrs: norecurse nounwind uwtable
define void @fn1() local_unnamed_addr #0 {
; CHECK-LABEL: fn1:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movslq {{.*}}(%rip), %rax
; CHECK-NEXT: leaq (%rax,%rax,4), %rcx
; CHECK-NEXT: leaq (,%rax,4), %rdx
@@ -33,7 +33,7 @@ entry:
; Function Attrs: norecurse nounwind uwtable
define i32 @main() local_unnamed_addr #0 {
; CHECK-LABEL: main:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movslq {{.*}}(%rip), %rax
; CHECK-NEXT: leaq (%rax,%rax,4), %rcx
; CHECK-NEXT: leaq (,%rax,4), %rdx
diff --git a/test/CodeGen/X86/pr34653.ll b/test/CodeGen/X86/pr34653.ll
index ef59282a40c..d888c566c7f 100644
--- a/test/CodeGen/X86/pr34653.ll
+++ b/test/CodeGen/X86/pr34653.ll
@@ -5,7 +5,7 @@ declare fastcc <38 x double> @test()
define void @pr34653() {
; CHECK-LABEL: pr34653:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rbp
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset %rbp, -16
diff --git a/test/CodeGen/X86/pr34657.ll b/test/CodeGen/X86/pr34657.ll
index a63bc2a08dd..58c97f65635 100644
--- a/test/CodeGen/X86/pr34657.ll
+++ b/test/CodeGen/X86/pr34657.ll
@@ -3,7 +3,7 @@
define <112 x i8> @pr34657() local_unnamed_addr {
; CHECK-LABEL: pr34657
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmovups (%rax), %xmm0
; CHECK-NEXT: vmovups (%rax), %ymm1
; CHECK-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
diff --git a/test/CodeGen/X86/pr34855.ll b/test/CodeGen/X86/pr34855.ll
index 989c943ac03..ee4428908a2 100644
--- a/test/CodeGen/X86/pr34855.ll
+++ b/test/CodeGen/X86/pr34855.ll
@@ -4,7 +4,7 @@
define void @PR34855(<2 x i32> *%p0, <2 x i32> *%p1, <2 x i32> *%p2) {
; X86-LABEL: PR34855:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
@@ -12,7 +12,7 @@ define void @PR34855(<2 x i32> *%p0, <2 x i32> *%p1, <2 x i32> *%p2) {
; X86-NEXT: retl
;
; X64-LABEL: PR34855:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movslq 4(%rdi), %rax
; X64-NEXT: movq %rax, %xmm0
; X64-NEXT: movslq (%rdi), %rax
diff --git a/test/CodeGen/X86/pr35272.ll b/test/CodeGen/X86/pr35272.ll
index e121ec8a3c9..0df1d7cb83c 100644
--- a/test/CodeGen/X86/pr35272.ll
+++ b/test/CodeGen/X86/pr35272.ll
@@ -3,7 +3,7 @@
define <2 x i48> @PR35272(<2 x i64> %a0, <2 x i48> %a1, <2 x i48> %a2) {
; CHECK-LABEL: PR35272:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpeqq %xmm3, %xmm0, %k1
; CHECK-NEXT: vpblendmq %xmm1, %xmm2, %xmm0 {%k1}
diff --git a/test/CodeGen/X86/pr35399.ll b/test/CodeGen/X86/pr35399.ll
index 394c257adfa..9b4b029b517 100644
--- a/test/CodeGen/X86/pr35399.ll
+++ b/test/CodeGen/X86/pr35399.ll
@@ -4,7 +4,7 @@
; Make sure we emit opoosite setcc instructions.
define i64 @pr35399(i64, i8*, i8*) {
; CHECK-LABEL: pr35399:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: lzcntq %rdi, %rax
; CHECK-NEXT: setae (%rsi)
; CHECK-NEXT: setb (%rdx)
diff --git a/test/CodeGen/X86/pr35443.ll b/test/CodeGen/X86/pr35443.ll
index e184d489282..1b4f7d4ea0c 100644
--- a/test/CodeGen/X86/pr35443.ll
+++ b/test/CodeGen/X86/pr35443.ll
@@ -7,7 +7,7 @@
; Function Attrs: norecurse nounwind uwtable
define void @main() {
; CHECK-LABEL: main:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movzbl ac+{{.*}}(%rip), %eax
; CHECK-NEXT: vmovd %eax, %xmm0
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
diff --git a/test/CodeGen/X86/pre-coalesce.mir b/test/CodeGen/X86/pre-coalesce.mir
index 945a87d750a..fb9429bc148 100644
--- a/test/CodeGen/X86/pre-coalesce.mir
+++ b/test/CodeGen/X86/pre-coalesce.mir
@@ -40,7 +40,7 @@
---
# Check A = B and B = A copies will not exist in the loop at the same time.
# CHECK: name: foo
-# CHECK: [[L1:bb.3.while.body]]:
+# CHECK: [[L1:bb.3]].{{[a-zA-Z0-9.]+}}:
# CHECK: %[[REGA:.*]] = COPY %[[REGB:.*]]
# CHECK-NOT: %[[REGB]] = COPY %[[REGA]]
# CHECK: JNE_1 %[[L1]]
@@ -87,11 +87,11 @@ body: |
%12 = MOV8rm %0, 1, %noreg, 0, %noreg :: (load 1 from %ir.t0)
TEST8rr %12, %12, implicit-def %eflags
%11 = MOV32rm %rip, 1, %noreg, @a, %noreg :: (dereferenceable load 4 from @a)
- JNE_1 %bb.1.while.body.preheader, implicit killed %eflags
+ JNE_1 %bb.1, implicit killed %eflags
bb.4:
%10 = COPY %11
- JMP_1 %bb.3.while.end
+ JMP_1 %bb.3
bb.1.while.body.preheader:
@@ -105,8 +105,8 @@ body: |
%12 = MOV8rm %0, 1, %noreg, 0, %noreg :: (load 1 from %ir.t0)
TEST8rr %12, %12, implicit-def %eflags
%11 = COPY %10
- JNE_1 %bb.2.while.body, implicit killed %eflags
- JMP_1 %bb.3.while.end
+ JNE_1 %bb.2, implicit killed %eflags
+ JMP_1 %bb.3
bb.3.while.end:
%eax = COPY %10
diff --git a/test/CodeGen/X86/promote-vec3.ll b/test/CodeGen/X86/promote-vec3.ll
index 85b610cce3f..5c6eb70b3ef 100644
--- a/test/CodeGen/X86/promote-vec3.ll
+++ b/test/CodeGen/X86/promote-vec3.ll
@@ -7,7 +7,7 @@
define <3 x i16> @zext_i8(<3 x i8>) {
; SSE3-LABEL: zext_i8:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; SSE3-NEXT: movd %eax, %xmm0
; SSE3-NEXT: movzbl {{[0-9]+}}(%esp), %eax
@@ -25,7 +25,7 @@ define <3 x i16> @zext_i8(<3 x i8>) {
; SSE3-NEXT: retl
;
; SSE41-LABEL: zext_i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: pinsrb $0, {{[0-9]+}}(%esp), %xmm0
; SSE41-NEXT: pinsrb $4, {{[0-9]+}}(%esp), %xmm0
@@ -39,7 +39,7 @@ define <3 x i16> @zext_i8(<3 x i8>) {
; SSE41-NEXT: retl
;
; AVX-32-LABEL: zext_i8:
-; AVX-32: # BB#0:
+; AVX-32: # %bb.0:
; AVX-32-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX-32-NEXT: vpinsrb $0, {{[0-9]+}}(%esp), %xmm0, %xmm0
; AVX-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm0, %xmm0
@@ -53,7 +53,7 @@ define <3 x i16> @zext_i8(<3 x i8>) {
; AVX-32-NEXT: retl
;
; AVX-64-LABEL: zext_i8:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vmovd %edi, %xmm0
; AVX-64-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0
; AVX-64-NEXT: vpinsrd $2, %edx, %xmm0, %xmm0
@@ -71,7 +71,7 @@ define <3 x i16> @zext_i8(<3 x i8>) {
define <3 x i16> @sext_i8(<3 x i8>) {
; SSE3-LABEL: sext_i8:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; SSE3-NEXT: movd %eax, %xmm0
; SSE3-NEXT: movzbl {{[0-9]+}}(%esp), %eax
@@ -91,7 +91,7 @@ define <3 x i16> @sext_i8(<3 x i8>) {
; SSE3-NEXT: retl
;
; SSE41-LABEL: sext_i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE41-NEXT: pinsrb $4, {{[0-9]+}}(%esp), %xmm0
; SSE41-NEXT: pinsrb $8, {{[0-9]+}}(%esp), %xmm0
@@ -106,7 +106,7 @@ define <3 x i16> @sext_i8(<3 x i8>) {
; SSE41-NEXT: retl
;
; AVX-32-LABEL: sext_i8:
-; AVX-32: # BB#0:
+; AVX-32: # %bb.0:
; AVX-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm0, %xmm0
; AVX-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm0, %xmm0
@@ -121,7 +121,7 @@ define <3 x i16> @sext_i8(<3 x i8>) {
; AVX-32-NEXT: retl
;
; AVX-64-LABEL: sext_i8:
-; AVX-64: # BB#0:
+; AVX-64: # %bb.0:
; AVX-64-NEXT: vmovd %edi, %xmm0
; AVX-64-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0
; AVX-64-NEXT: vpinsrd $2, %edx, %xmm0, %xmm0
diff --git a/test/CodeGen/X86/pseudo_cmov_lower2.ll b/test/CodeGen/X86/pseudo_cmov_lower2.ll
index 38712a96b2b..1a61b0b9700 100644
--- a/test/CodeGen/X86/pseudo_cmov_lower2.ll
+++ b/test/CodeGen/X86/pseudo_cmov_lower2.ll
@@ -51,7 +51,7 @@ entry:
; CHECK-LABEL: foo3:
; CHECK: js
; CHECK-NOT: js
-; CHECK-LABEL: # BB#1:
+; CHECK-LABEL: # %bb.1:
; CHECK-DAG: movapd %xmm2, %xmm1
; CHECK-DAG: movapd %xmm2, %xmm0
; CHECK-LABEL:.LBB2_2:
@@ -81,7 +81,7 @@ entry:
; CHECK-LABEL: foo4:
; CHECK: js
; CHECK-NOT: js
-; CHECK-LABEL: # BB#1:
+; CHECK-LABEL: # %bb.1:
; CHECK-DAG: movapd %xmm2, %xmm1
; CHECK-DAG: movapd %xmm2, %xmm0
; CHECK-LABEL:.LBB3_2:
diff --git a/test/CodeGen/X86/pshufb-mask-comments.ll b/test/CodeGen/X86/pshufb-mask-comments.ll
index 178fe3357d4..0900fdccb49 100644
--- a/test/CodeGen/X86/pshufb-mask-comments.ll
+++ b/test/CodeGen/X86/pshufb-mask-comments.ll
@@ -5,7 +5,7 @@
define <16 x i8> @test1(<16 x i8> %V) {
; CHECK-LABEL: test1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pshufb {{.*#+}} xmm0 = xmm0[1,0,0,0,0,2,0,0,0,0,3,0,0,0,0,4]
; CHECK-NEXT: retq
%1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %V, <16 x i8> <i8 1, i8 0, i8 0, i8 0, i8 0, i8 2, i8 0, i8 0, i8 0, i8 0, i8 3, i8 0, i8 0, i8 0, i8 0, i8 4>)
@@ -16,7 +16,7 @@ define <16 x i8> @test1(<16 x i8> %V) {
define <16 x i8> @test2(<16 x i8> %V) {
; CHECK-LABEL: test2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pshufb {{.*#+}} xmm0 = xmm0[15,0,0,0,0,0,0,0,0,0,1,0,0,0,0,2]
; CHECK-NEXT: retq
%1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %V, <16 x i8> <i8 15, i8 0, i8 0, i8 0, i8 0, i8 16, i8 0, i8 0, i8 0, i8 0, i8 17, i8 0, i8 0, i8 0, i8 0, i8 50>)
@@ -27,7 +27,7 @@ define <16 x i8> @test2(<16 x i8> %V) {
define <16 x i8> @test3(<16 x i8> %V) {
; CHECK-LABEL: test3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pshufb {{.*#+}} xmm0 = xmm0[1,0,0,15,0,2,0,0],zero,xmm0[0,3,0,0],zero,xmm0[0,4]
; CHECK-NEXT: retq
%1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %V, <16 x i8> <i8 1, i8 0, i8 0, i8 127, i8 0, i8 2, i8 0, i8 0, i8 128, i8 0, i8 3, i8 0, i8 0, i8 255, i8 0, i8 4>)
@@ -38,7 +38,7 @@ define <16 x i8> @test3(<16 x i8> %V) {
define <16 x i8> @test4(<16 x i8> %V, <2 x i64>* %P) {
; CHECK-LABEL: test4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movaps {{.*#+}} xmm1 = [1084818905618843912,506097522914230528]
; CHECK-NEXT: movaps %xmm1, (%rdi)
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -53,7 +53,7 @@ define <16 x i8> @test4(<16 x i8> %V, <2 x i64>* %P) {
define <16 x i8> @test5(<16 x i8> %V) {
; CHECK-LABEL: test5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: movq %rax, %xmm1
; CHECK-NEXT: movdqa %xmm1, (%rax)
@@ -74,7 +74,7 @@ define <16 x i8> @test5(<16 x i8> %V) {
define <16 x i8> @test6(<16 x i8> %V, <2 x i64>* %P) {
; CHECK-LABEL: test6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movaps {{.*#+}} xmm1 = [217019414673948672,506380106026255364]
; CHECK-NEXT: movaps %xmm1, (%rdi)
; CHECK-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
diff --git a/test/CodeGen/X86/psubus.ll b/test/CodeGen/X86/psubus.ll
index 899f17052cf..6a38e564b72 100644
--- a/test/CodeGen/X86/psubus.ll
+++ b/test/CodeGen/X86/psubus.ll
@@ -8,17 +8,17 @@
define <8 x i16> @test1(<8 x i16> %x) nounwind {
; SSE-LABEL: test1:
-; SSE: # BB#0: # %vector.ph
+; SSE: # %bb.0: # %vector.ph
; SSE-NEXT: psubusw {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test1:
-; AVX: # BB#0: # %vector.ph
+; AVX: # %bb.0: # %vector.ph
; AVX-NEXT: vpsubusw {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: test1:
-; AVX512: # BB#0: # %vector.ph
+; AVX512: # %bb.0: # %vector.ph
; AVX512-NEXT: vpsubusw {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: retq
vector.ph:
@@ -30,17 +30,17 @@ vector.ph:
define <8 x i16> @test2(<8 x i16> %x) nounwind {
; SSE-LABEL: test2:
-; SSE: # BB#0: # %vector.ph
+; SSE: # %bb.0: # %vector.ph
; SSE-NEXT: psubusw {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test2:
-; AVX: # BB#0: # %vector.ph
+; AVX: # %bb.0: # %vector.ph
; AVX-NEXT: vpsubusw {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: test2:
-; AVX512: # BB#0: # %vector.ph
+; AVX512: # %bb.0: # %vector.ph
; AVX512-NEXT: vpsubusw {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: retq
vector.ph:
@@ -52,7 +52,7 @@ vector.ph:
define <8 x i16> @test3(<8 x i16> %x, i16 zeroext %w) nounwind {
; SSE-LABEL: test3:
-; SSE: # BB#0: # %vector.ph
+; SSE: # %bb.0: # %vector.ph
; SSE-NEXT: movd %edi, %xmm1
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
@@ -60,7 +60,7 @@ define <8 x i16> @test3(<8 x i16> %x, i16 zeroext %w) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: test3:
-; AVX1: # BB#0: # %vector.ph
+; AVX1: # %bb.0: # %vector.ph
; AVX1-NEXT: vmovd %edi, %xmm1
; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
@@ -68,14 +68,14 @@ define <8 x i16> @test3(<8 x i16> %x, i16 zeroext %w) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test3:
-; AVX2: # BB#0: # %vector.ph
+; AVX2: # %bb.0: # %vector.ph
; AVX2-NEXT: vmovd %edi, %xmm1
; AVX2-NEXT: vpbroadcastw %xmm1, %xmm1
; AVX2-NEXT: vpsubusw %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test3:
-; AVX512: # BB#0: # %vector.ph
+; AVX512: # %bb.0: # %vector.ph
; AVX512-NEXT: vpbroadcastw %edi, %xmm1
; AVX512-NEXT: vpsubusw %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
@@ -90,17 +90,17 @@ vector.ph:
define <16 x i8> @test4(<16 x i8> %x) nounwind {
; SSE-LABEL: test4:
-; SSE: # BB#0: # %vector.ph
+; SSE: # %bb.0: # %vector.ph
; SSE-NEXT: psubusb {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test4:
-; AVX: # BB#0: # %vector.ph
+; AVX: # %bb.0: # %vector.ph
; AVX-NEXT: vpsubusb {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: test4:
-; AVX512: # BB#0: # %vector.ph
+; AVX512: # %bb.0: # %vector.ph
; AVX512-NEXT: vpsubusb {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: retq
vector.ph:
@@ -112,17 +112,17 @@ vector.ph:
define <16 x i8> @test5(<16 x i8> %x) nounwind {
; SSE-LABEL: test5:
-; SSE: # BB#0: # %vector.ph
+; SSE: # %bb.0: # %vector.ph
; SSE-NEXT: psubusb {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test5:
-; AVX: # BB#0: # %vector.ph
+; AVX: # %bb.0: # %vector.ph
; AVX-NEXT: vpsubusb {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: test5:
-; AVX512: # BB#0: # %vector.ph
+; AVX512: # %bb.0: # %vector.ph
; AVX512-NEXT: vpsubusb {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: retq
vector.ph:
@@ -134,7 +134,7 @@ vector.ph:
define <16 x i8> @test6(<16 x i8> %x, i8 zeroext %w) nounwind {
; SSE2-LABEL: test6:
-; SSE2: # BB#0: # %vector.ph
+; SSE2: # %bb.0: # %vector.ph
; SSE2-NEXT: movd %edi, %xmm1
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
@@ -143,7 +143,7 @@ define <16 x i8> @test6(<16 x i8> %x, i8 zeroext %w) nounwind {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test6:
-; SSSE3: # BB#0: # %vector.ph
+; SSSE3: # %bb.0: # %vector.ph
; SSSE3-NEXT: movd %edi, %xmm1
; SSSE3-NEXT: pxor %xmm2, %xmm2
; SSSE3-NEXT: pshufb %xmm2, %xmm1
@@ -151,7 +151,7 @@ define <16 x i8> @test6(<16 x i8> %x, i8 zeroext %w) nounwind {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: test6:
-; SSE41: # BB#0: # %vector.ph
+; SSE41: # %bb.0: # %vector.ph
; SSE41-NEXT: movd %edi, %xmm1
; SSE41-NEXT: pxor %xmm2, %xmm2
; SSE41-NEXT: pshufb %xmm2, %xmm1
@@ -159,7 +159,7 @@ define <16 x i8> @test6(<16 x i8> %x, i8 zeroext %w) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: test6:
-; AVX1: # BB#0: # %vector.ph
+; AVX1: # %bb.0: # %vector.ph
; AVX1-NEXT: vmovd %edi, %xmm1
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -167,14 +167,14 @@ define <16 x i8> @test6(<16 x i8> %x, i8 zeroext %w) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test6:
-; AVX2: # BB#0: # %vector.ph
+; AVX2: # %bb.0: # %vector.ph
; AVX2-NEXT: vmovd %edi, %xmm1
; AVX2-NEXT: vpbroadcastb %xmm1, %xmm1
; AVX2-NEXT: vpsubusb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test6:
-; AVX512: # BB#0: # %vector.ph
+; AVX512: # %bb.0: # %vector.ph
; AVX512-NEXT: vpbroadcastb %edi, %xmm1
; AVX512-NEXT: vpsubusb %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
@@ -189,14 +189,14 @@ vector.ph:
define <16 x i16> @test7(<16 x i16> %x) nounwind {
; SSE-LABEL: test7:
-; SSE: # BB#0: # %vector.ph
+; SSE: # %bb.0: # %vector.ph
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
; SSE-NEXT: psubusw %xmm2, %xmm0
; SSE-NEXT: psubusw %xmm2, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: test7:
-; AVX1: # BB#0: # %vector.ph
+; AVX1: # %bb.0: # %vector.ph
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpcmpgtw %xmm1, %xmm2, %xmm1
@@ -207,12 +207,12 @@ define <16 x i16> @test7(<16 x i16> %x) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test7:
-; AVX2: # BB#0: # %vector.ph
+; AVX2: # %bb.0: # %vector.ph
; AVX2-NEXT: vpsubusw {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test7:
-; AVX512: # BB#0: # %vector.ph
+; AVX512: # %bb.0: # %vector.ph
; AVX512-NEXT: vpsubusw {{.*}}(%rip), %ymm0, %ymm0
; AVX512-NEXT: retq
vector.ph:
@@ -224,14 +224,14 @@ vector.ph:
define <16 x i16> @test8(<16 x i16> %x) nounwind {
; SSE-LABEL: test8:
-; SSE: # BB#0: # %vector.ph
+; SSE: # %bb.0: # %vector.ph
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [32767,32767,32767,32767,32767,32767,32767,32767]
; SSE-NEXT: psubusw %xmm2, %xmm0
; SSE-NEXT: psubusw %xmm2, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: test8:
-; AVX1: # BB#0: # %vector.ph
+; AVX1: # %bb.0: # %vector.ph
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm3
@@ -248,12 +248,12 @@ define <16 x i16> @test8(<16 x i16> %x) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test8:
-; AVX2: # BB#0: # %vector.ph
+; AVX2: # %bb.0: # %vector.ph
; AVX2-NEXT: vpsubusw {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test8:
-; AVX512: # BB#0: # %vector.ph
+; AVX512: # %bb.0: # %vector.ph
; AVX512-NEXT: vpsubusw {{.*}}(%rip), %ymm0, %ymm0
; AVX512-NEXT: retq
vector.ph:
@@ -265,7 +265,7 @@ vector.ph:
define <16 x i16> @test9(<16 x i16> %x, i16 zeroext %w) nounwind {
; SSE-LABEL: test9:
-; SSE: # BB#0: # %vector.ph
+; SSE: # %bb.0: # %vector.ph
; SSE-NEXT: movd %edi, %xmm2
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,0,0,0,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
@@ -274,7 +274,7 @@ define <16 x i16> @test9(<16 x i16> %x, i16 zeroext %w) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: test9:
-; AVX1: # BB#0: # %vector.ph
+; AVX1: # %bb.0: # %vector.ph
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovd %edi, %xmm2
; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,0,0,0,4,5,6,7]
@@ -291,14 +291,14 @@ define <16 x i16> @test9(<16 x i16> %x, i16 zeroext %w) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test9:
-; AVX2: # BB#0: # %vector.ph
+; AVX2: # %bb.0: # %vector.ph
; AVX2-NEXT: vmovd %edi, %xmm1
; AVX2-NEXT: vpbroadcastw %xmm1, %ymm1
; AVX2-NEXT: vpsubusw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test9:
-; AVX512: # BB#0: # %vector.ph
+; AVX512: # %bb.0: # %vector.ph
; AVX512-NEXT: vpbroadcastw %edi, %ymm1
; AVX512-NEXT: vpsubusw %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
@@ -313,14 +313,14 @@ vector.ph:
define <32 x i8> @test10(<32 x i8> %x) nounwind {
; SSE-LABEL: test10:
-; SSE: # BB#0: # %vector.ph
+; SSE: # %bb.0: # %vector.ph
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
; SSE-NEXT: psubusb %xmm2, %xmm0
; SSE-NEXT: psubusb %xmm2, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: test10:
-; AVX1: # BB#0: # %vector.ph
+; AVX1: # %bb.0: # %vector.ph
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpcmpgtb %xmm1, %xmm2, %xmm1
@@ -331,12 +331,12 @@ define <32 x i8> @test10(<32 x i8> %x) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test10:
-; AVX2: # BB#0: # %vector.ph
+; AVX2: # %bb.0: # %vector.ph
; AVX2-NEXT: vpsubusb {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test10:
-; AVX512: # BB#0: # %vector.ph
+; AVX512: # %bb.0: # %vector.ph
; AVX512-NEXT: vpsubusb {{.*}}(%rip), %ymm0, %ymm0
; AVX512-NEXT: retq
vector.ph:
@@ -348,14 +348,14 @@ vector.ph:
define <32 x i8> @test11(<32 x i8> %x) nounwind {
; SSE-LABEL: test11:
-; SSE: # BB#0: # %vector.ph
+; SSE: # %bb.0: # %vector.ph
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; SSE-NEXT: psubusb %xmm2, %xmm0
; SSE-NEXT: psubusb %xmm2, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: test11:
-; AVX1: # BB#0: # %vector.ph
+; AVX1: # %bb.0: # %vector.ph
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm3
@@ -372,12 +372,12 @@ define <32 x i8> @test11(<32 x i8> %x) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test11:
-; AVX2: # BB#0: # %vector.ph
+; AVX2: # %bb.0: # %vector.ph
; AVX2-NEXT: vpsubusb {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test11:
-; AVX512: # BB#0: # %vector.ph
+; AVX512: # %bb.0: # %vector.ph
; AVX512-NEXT: vpsubusb {{.*}}(%rip), %ymm0, %ymm0
; AVX512-NEXT: retq
vector.ph:
@@ -389,7 +389,7 @@ vector.ph:
define <32 x i8> @test12(<32 x i8> %x, i8 zeroext %w) nounwind {
; SSE2-LABEL: test12:
-; SSE2: # BB#0: # %vector.ph
+; SSE2: # %bb.0: # %vector.ph
; SSE2-NEXT: movd %edi, %xmm2
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,0,0,0,4,5,6,7]
@@ -399,7 +399,7 @@ define <32 x i8> @test12(<32 x i8> %x, i8 zeroext %w) nounwind {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test12:
-; SSSE3: # BB#0: # %vector.ph
+; SSSE3: # %bb.0: # %vector.ph
; SSSE3-NEXT: movd %edi, %xmm2
; SSSE3-NEXT: pxor %xmm3, %xmm3
; SSSE3-NEXT: pshufb %xmm3, %xmm2
@@ -408,7 +408,7 @@ define <32 x i8> @test12(<32 x i8> %x, i8 zeroext %w) nounwind {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: test12:
-; SSE41: # BB#0: # %vector.ph
+; SSE41: # %bb.0: # %vector.ph
; SSE41-NEXT: movd %edi, %xmm2
; SSE41-NEXT: pxor %xmm3, %xmm3
; SSE41-NEXT: pshufb %xmm3, %xmm2
@@ -417,7 +417,7 @@ define <32 x i8> @test12(<32 x i8> %x, i8 zeroext %w) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: test12:
-; AVX1: # BB#0: # %vector.ph
+; AVX1: # %bb.0: # %vector.ph
; AVX1-NEXT: vmovd %edi, %xmm1
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -434,14 +434,14 @@ define <32 x i8> @test12(<32 x i8> %x, i8 zeroext %w) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test12:
-; AVX2: # BB#0: # %vector.ph
+; AVX2: # %bb.0: # %vector.ph
; AVX2-NEXT: vmovd %edi, %xmm1
; AVX2-NEXT: vpbroadcastb %xmm1, %ymm1
; AVX2-NEXT: vpsubusb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test12:
-; AVX512: # BB#0: # %vector.ph
+; AVX512: # %bb.0: # %vector.ph
; AVX512-NEXT: vpbroadcastb %edi, %ymm1
; AVX512-NEXT: vpsubusb %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
@@ -456,7 +456,7 @@ vector.ph:
define <8 x i16> @test13(<8 x i16> %x, <8 x i32> %y) nounwind {
; SSE2-LABEL: test13:
-; SSE2: # BB#0: # %vector.ph
+; SSE2: # %bb.0: # %vector.ph
; SSE2-NEXT: pxor %xmm4, %xmm4
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
@@ -484,7 +484,7 @@ define <8 x i16> @test13(<8 x i16> %x, <8 x i32> %y) nounwind {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test13:
-; SSSE3: # BB#0: # %vector.ph
+; SSSE3: # %bb.0: # %vector.ph
; SSSE3-NEXT: pxor %xmm3, %xmm3
; SSSE3-NEXT: movdqa %xmm0, %xmm4
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
@@ -511,7 +511,7 @@ define <8 x i16> @test13(<8 x i16> %x, <8 x i32> %y) nounwind {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: test13:
-; SSE41: # BB#0: # %vector.ph
+; SSE41: # %bb.0: # %vector.ph
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
@@ -536,7 +536,7 @@ define <8 x i16> @test13(<8 x i16> %x, <8 x i32> %y) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: test13:
-; AVX1: # BB#0: # %vector.ph
+; AVX1: # %bb.0: # %vector.ph
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
@@ -560,7 +560,7 @@ define <8 x i16> @test13(<8 x i16> %x, <8 x i32> %y) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test13:
-; AVX2: # BB#0: # %vector.ph
+; AVX2: # %bb.0: # %vector.ph
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm2 = [2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648]
; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm3
@@ -576,7 +576,7 @@ define <8 x i16> @test13(<8 x i16> %x, <8 x i32> %y) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: test13:
-; AVX512: # BB#0: # %vector.ph
+; AVX512: # %bb.0: # %vector.ph
; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512-NEXT: vpcmpnltud %ymm1, %ymm0, %k1
; AVX512-NEXT: vpsubd %ymm1, %ymm0, %ymm0
@@ -594,7 +594,7 @@ vector.ph:
define <16 x i8> @test14(<16 x i8> %x, <16 x i32> %y) nounwind {
; SSE2-LABEL: test14:
-; SSE2: # BB#0: # %vector.ph
+; SSE2: # %bb.0: # %vector.ph
; SSE2-NEXT: movdqa %xmm0, %xmm5
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: movdqa %xmm5, %xmm6
@@ -646,7 +646,7 @@ define <16 x i8> @test14(<16 x i8> %x, <16 x i32> %y) nounwind {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test14:
-; SSSE3: # BB#0: # %vector.ph
+; SSSE3: # %bb.0: # %vector.ph
; SSSE3-NEXT: movdqa %xmm0, %xmm5
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: movdqa %xmm5, %xmm7
@@ -700,7 +700,7 @@ define <16 x i8> @test14(<16 x i8> %x, <16 x i32> %y) nounwind {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: test14:
-; SSE41: # BB#0: # %vector.ph
+; SSE41: # %bb.0: # %vector.ph
; SSE41-NEXT: movdqa %xmm0, %xmm5
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,1,2,3]
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm8 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
@@ -751,7 +751,7 @@ define <16 x i8> @test14(<16 x i8> %x, <16 x i32> %y) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: test14:
-; AVX1: # BB#0: # %vector.ph
+; AVX1: # %bb.0: # %vector.ph
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,2,3]
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm8 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm9 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
@@ -794,7 +794,7 @@ define <16 x i8> @test14(<16 x i8> %x, <16 x i32> %y) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test14:
-; AVX2: # BB#0: # %vector.ph
+; AVX2: # %bb.0: # %vector.ph
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero,xmm3[4],zero,zero,zero,xmm3[5],zero,zero,zero,xmm3[6],zero,zero,zero,xmm3[7],zero,zero,zero
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
@@ -826,7 +826,7 @@ define <16 x i8> @test14(<16 x i8> %x, <16 x i32> %y) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: test14:
-; AVX512: # BB#0: # %vector.ph
+; AVX512: # %bb.0: # %vector.ph
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512-NEXT: vpcmpnltud %zmm0, %zmm1, %k1
; AVX512-NEXT: vpsubd %zmm0, %zmm1, %zmm0
@@ -844,7 +844,7 @@ vector.ph:
define <8 x i16> @test15(<8 x i16> %x, <8 x i32> %y) nounwind {
; SSE2-LABEL: test15:
-; SSE2: # BB#0: # %vector.ph
+; SSE2: # %bb.0: # %vector.ph
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pxor %xmm4, %xmm4
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
@@ -870,7 +870,7 @@ define <8 x i16> @test15(<8 x i16> %x, <8 x i32> %y) nounwind {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test15:
-; SSSE3: # BB#0: # %vector.ph
+; SSSE3: # %bb.0: # %vector.ph
; SSSE3-NEXT: pxor %xmm4, %xmm4
; SSSE3-NEXT: movdqa %xmm0, %xmm3
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
@@ -896,7 +896,7 @@ define <8 x i16> @test15(<8 x i16> %x, <8 x i32> %y) nounwind {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: test15:
-; SSE41: # BB#0: # %vector.ph
+; SSE41: # %bb.0: # %vector.ph
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
@@ -920,7 +920,7 @@ define <8 x i16> @test15(<8 x i16> %x, <8 x i32> %y) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: test15:
-; AVX1: # BB#0: # %vector.ph
+; AVX1: # %bb.0: # %vector.ph
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
@@ -944,7 +944,7 @@ define <8 x i16> @test15(<8 x i16> %x, <8 x i32> %y) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test15:
-; AVX2: # BB#0: # %vector.ph
+; AVX2: # %bb.0: # %vector.ph
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm2 = [2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648]
; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm3
@@ -960,7 +960,7 @@ define <8 x i16> @test15(<8 x i16> %x, <8 x i32> %y) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: test15:
-; AVX512: # BB#0: # %vector.ph
+; AVX512: # %bb.0: # %vector.ph
; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512-NEXT: vpcmpnleud %ymm1, %ymm0, %k1
; AVX512-NEXT: vpsubd %ymm1, %ymm0, %ymm0
@@ -978,7 +978,7 @@ vector.ph:
define <8 x i16> @test16(<8 x i16> %x, <8 x i32> %y) nounwind {
; SSE2-LABEL: test16:
-; SSE2: # BB#0: # %vector.ph
+; SSE2: # %bb.0: # %vector.ph
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pxor %xmm4, %xmm4
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
@@ -1004,7 +1004,7 @@ define <8 x i16> @test16(<8 x i16> %x, <8 x i32> %y) nounwind {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test16:
-; SSSE3: # BB#0: # %vector.ph
+; SSSE3: # %bb.0: # %vector.ph
; SSSE3-NEXT: pxor %xmm4, %xmm4
; SSSE3-NEXT: movdqa %xmm0, %xmm3
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
@@ -1030,7 +1030,7 @@ define <8 x i16> @test16(<8 x i16> %x, <8 x i32> %y) nounwind {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: test16:
-; SSE41: # BB#0: # %vector.ph
+; SSE41: # %bb.0: # %vector.ph
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
@@ -1054,7 +1054,7 @@ define <8 x i16> @test16(<8 x i16> %x, <8 x i32> %y) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: test16:
-; AVX1: # BB#0: # %vector.ph
+; AVX1: # %bb.0: # %vector.ph
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
@@ -1078,7 +1078,7 @@ define <8 x i16> @test16(<8 x i16> %x, <8 x i32> %y) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test16:
-; AVX2: # BB#0: # %vector.ph
+; AVX2: # %bb.0: # %vector.ph
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm2 = [2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648]
; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm3
@@ -1094,7 +1094,7 @@ define <8 x i16> @test16(<8 x i16> %x, <8 x i32> %y) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: test16:
-; AVX512: # BB#0: # %vector.ph
+; AVX512: # %bb.0: # %vector.ph
; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512-NEXT: vpcmpltud %ymm0, %ymm1, %k1
; AVX512-NEXT: vpsubd %ymm1, %ymm0, %ymm0
@@ -1112,7 +1112,7 @@ vector.ph:
define <8 x i16> @psubus_8i16_max(<8 x i16> %x, <8 x i16> %y) nounwind {
; SSE2-LABEL: psubus_8i16_max:
-; SSE2: # BB#0: # %vector.ph
+; SSE2: # %bb.0: # %vector.ph
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -1127,7 +1127,7 @@ define <8 x i16> @psubus_8i16_max(<8 x i16> %x, <8 x i16> %y) nounwind {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: psubus_8i16_max:
-; SSSE3: # BB#0: # %vector.ph
+; SSSE3: # %bb.0: # %vector.ph
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
; SSSE3-NEXT: movdqa %xmm0, %xmm3
; SSSE3-NEXT: pxor %xmm2, %xmm3
@@ -1142,17 +1142,17 @@ define <8 x i16> @psubus_8i16_max(<8 x i16> %x, <8 x i16> %y) nounwind {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: psubus_8i16_max:
-; SSE41: # BB#0: # %vector.ph
+; SSE41: # %bb.0: # %vector.ph
; SSE41-NEXT: psubusw %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: psubus_8i16_max:
-; AVX: # BB#0: # %vector.ph
+; AVX: # %bb.0: # %vector.ph
; AVX-NEXT: vpsubusw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: psubus_8i16_max:
-; AVX512: # BB#0: # %vector.ph
+; AVX512: # %bb.0: # %vector.ph
; AVX512-NEXT: vpsubusw %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
vector.ph:
@@ -1164,17 +1164,17 @@ vector.ph:
define <16 x i8> @psubus_16i8_max(<16 x i8> %x, <16 x i8> %y) nounwind {
; SSE-LABEL: psubus_16i8_max:
-; SSE: # BB#0: # %vector.ph
+; SSE: # %bb.0: # %vector.ph
; SSE-NEXT: psubusb %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: psubus_16i8_max:
-; AVX: # BB#0: # %vector.ph
+; AVX: # %bb.0: # %vector.ph
; AVX-NEXT: vpsubusb %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: psubus_16i8_max:
-; AVX512: # BB#0: # %vector.ph
+; AVX512: # %bb.0: # %vector.ph
; AVX512-NEXT: vpsubusb %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
vector.ph:
@@ -1186,7 +1186,7 @@ vector.ph:
define <16 x i16> @psubus_16i16_max(<16 x i16> %x, <16 x i16> %y) nounwind {
; SSE2-LABEL: psubus_16i16_max:
-; SSE2: # BB#0: # %vector.ph
+; SSE2: # %bb.0: # %vector.ph
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [32768,32768,32768,32768,32768,32768,32768,32768]
; SSE2-NEXT: movdqa %xmm0, %xmm6
; SSE2-NEXT: pxor %xmm4, %xmm6
@@ -1212,7 +1212,7 @@ define <16 x i16> @psubus_16i16_max(<16 x i16> %x, <16 x i16> %y) nounwind {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: psubus_16i16_max:
-; SSSE3: # BB#0: # %vector.ph
+; SSSE3: # %bb.0: # %vector.ph
; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [32768,32768,32768,32768,32768,32768,32768,32768]
; SSSE3-NEXT: movdqa %xmm0, %xmm6
; SSSE3-NEXT: pxor %xmm4, %xmm6
@@ -1238,13 +1238,13 @@ define <16 x i16> @psubus_16i16_max(<16 x i16> %x, <16 x i16> %y) nounwind {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: psubus_16i16_max:
-; SSE41: # BB#0: # %vector.ph
+; SSE41: # %bb.0: # %vector.ph
; SSE41-NEXT: psubusw %xmm2, %xmm0
; SSE41-NEXT: psubusw %xmm3, %xmm1
; SSE41-NEXT: retq
;
; AVX1-LABEL: psubus_16i16_max:
-; AVX1: # BB#0: # %vector.ph
+; AVX1: # %bb.0: # %vector.ph
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpsubusw %xmm2, %xmm3, %xmm2
@@ -1253,12 +1253,12 @@ define <16 x i16> @psubus_16i16_max(<16 x i16> %x, <16 x i16> %y) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: psubus_16i16_max:
-; AVX2: # BB#0: # %vector.ph
+; AVX2: # %bb.0: # %vector.ph
; AVX2-NEXT: vpsubusw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: psubus_16i16_max:
-; AVX512: # BB#0: # %vector.ph
+; AVX512: # %bb.0: # %vector.ph
; AVX512-NEXT: vpsubusw %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
vector.ph:
@@ -1270,7 +1270,7 @@ vector.ph:
define <32 x i16> @psubus_32i16_max(<32 x i16> %x, <32 x i16> %y) nounwind {
; SSE2-LABEL: psubus_32i16_max:
-; SSE2: # BB#0: # %vector.ph
+; SSE2: # %bb.0: # %vector.ph
; SSE2-NEXT: movdqa %xmm3, %xmm11
; SSE2-NEXT: movdqa %xmm2, %xmm10
; SSE2-NEXT: movdqa %xmm1, %xmm9
@@ -1318,7 +1318,7 @@ define <32 x i16> @psubus_32i16_max(<32 x i16> %x, <32 x i16> %y) nounwind {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: psubus_32i16_max:
-; SSSE3: # BB#0: # %vector.ph
+; SSSE3: # %bb.0: # %vector.ph
; SSSE3-NEXT: movdqa %xmm3, %xmm11
; SSSE3-NEXT: movdqa %xmm2, %xmm10
; SSSE3-NEXT: movdqa %xmm1, %xmm9
@@ -1366,7 +1366,7 @@ define <32 x i16> @psubus_32i16_max(<32 x i16> %x, <32 x i16> %y) nounwind {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: psubus_32i16_max:
-; SSE41: # BB#0: # %vector.ph
+; SSE41: # %bb.0: # %vector.ph
; SSE41-NEXT: psubusw %xmm4, %xmm0
; SSE41-NEXT: psubusw %xmm5, %xmm1
; SSE41-NEXT: psubusw %xmm6, %xmm2
@@ -1374,7 +1374,7 @@ define <32 x i16> @psubus_32i16_max(<32 x i16> %x, <32 x i16> %y) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: psubus_32i16_max:
-; AVX1: # BB#0: # %vector.ph
+; AVX1: # %bb.0: # %vector.ph
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX1-NEXT: vpsubusw %xmm4, %xmm5, %xmm4
@@ -1388,13 +1388,13 @@ define <32 x i16> @psubus_32i16_max(<32 x i16> %x, <32 x i16> %y) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: psubus_32i16_max:
-; AVX2: # BB#0: # %vector.ph
+; AVX2: # %bb.0: # %vector.ph
; AVX2-NEXT: vpsubusw %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpsubusw %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: psubus_32i16_max:
-; AVX512: # BB#0: # %vector.ph
+; AVX512: # %bb.0: # %vector.ph
; AVX512-NEXT: vpsubusw %zmm1, %zmm0, %zmm0
; AVX512-NEXT: retq
vector.ph:
@@ -1406,7 +1406,7 @@ vector.ph:
define <64 x i8> @psubus_64i8_max(<64 x i8> %x, <64 x i8> %y) nounwind {
; SSE-LABEL: psubus_64i8_max:
-; SSE: # BB#0: # %vector.ph
+; SSE: # %bb.0: # %vector.ph
; SSE-NEXT: psubusb %xmm4, %xmm0
; SSE-NEXT: psubusb %xmm5, %xmm1
; SSE-NEXT: psubusb %xmm6, %xmm2
@@ -1414,7 +1414,7 @@ define <64 x i8> @psubus_64i8_max(<64 x i8> %x, <64 x i8> %y) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: psubus_64i8_max:
-; AVX1: # BB#0: # %vector.ph
+; AVX1: # %bb.0: # %vector.ph
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX1-NEXT: vpsubusb %xmm4, %xmm5, %xmm4
@@ -1428,13 +1428,13 @@ define <64 x i8> @psubus_64i8_max(<64 x i8> %x, <64 x i8> %y) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: psubus_64i8_max:
-; AVX2: # BB#0: # %vector.ph
+; AVX2: # %bb.0: # %vector.ph
; AVX2-NEXT: vpsubusb %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpsubusb %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: psubus_64i8_max:
-; AVX512: # BB#0: # %vector.ph
+; AVX512: # %bb.0: # %vector.ph
; AVX512-NEXT: vpsubusb %zmm1, %zmm0, %zmm0
; AVX512-NEXT: retq
vector.ph:
@@ -1446,13 +1446,13 @@ vector.ph:
define <32 x i8> @psubus_32i8_max(<32 x i8> %x, <32 x i8> %y) nounwind {
; SSE-LABEL: psubus_32i8_max:
-; SSE: # BB#0: # %vector.ph
+; SSE: # %bb.0: # %vector.ph
; SSE-NEXT: psubusb %xmm2, %xmm0
; SSE-NEXT: psubusb %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: psubus_32i8_max:
-; AVX1: # BB#0: # %vector.ph
+; AVX1: # %bb.0: # %vector.ph
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpsubusb %xmm2, %xmm3, %xmm2
@@ -1461,12 +1461,12 @@ define <32 x i8> @psubus_32i8_max(<32 x i8> %x, <32 x i8> %y) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: psubus_32i8_max:
-; AVX2: # BB#0: # %vector.ph
+; AVX2: # %bb.0: # %vector.ph
; AVX2-NEXT: vpsubusb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: psubus_32i8_max:
-; AVX512: # BB#0: # %vector.ph
+; AVX512: # %bb.0: # %vector.ph
; AVX512-NEXT: vpsubusb %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
vector.ph:
@@ -1478,7 +1478,7 @@ vector.ph:
define <8 x i16> @psubus_8i32_max(<8 x i16> %x, <8 x i32> %y) nounwind {
; SSE2-LABEL: psubus_8i32_max:
-; SSE2: # BB#0: # %vector.ph
+; SSE2: # %bb.0: # %vector.ph
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: movdqa %xmm3, %xmm4
@@ -1512,7 +1512,7 @@ define <8 x i16> @psubus_8i32_max(<8 x i16> %x, <8 x i32> %y) nounwind {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: psubus_8i32_max:
-; SSSE3: # BB#0: # %vector.ph
+; SSSE3: # %bb.0: # %vector.ph
; SSSE3-NEXT: movdqa %xmm0, %xmm3
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: movdqa %xmm3, %xmm4
@@ -1545,7 +1545,7 @@ define <8 x i16> @psubus_8i32_max(<8 x i16> %x, <8 x i32> %y) nounwind {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: psubus_8i32_max:
-; SSE41: # BB#0: # %vector.ph
+; SSE41: # %bb.0: # %vector.ph
; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [65535,65535,65535,65535]
; SSE41-NEXT: pminud %xmm3, %xmm2
; SSE41-NEXT: pminud %xmm3, %xmm1
@@ -1554,7 +1554,7 @@ define <8 x i16> @psubus_8i32_max(<8 x i16> %x, <8 x i32> %y) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: psubus_8i32_max:
-; AVX1: # BB#0: # %vector.ph
+; AVX1: # %bb.0: # %vector.ph
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [65535,65535,65535,65535]
; AVX1-NEXT: vpminud %xmm3, %xmm2, %xmm2
@@ -1565,7 +1565,7 @@ define <8 x i16> @psubus_8i32_max(<8 x i16> %x, <8 x i32> %y) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: psubus_8i32_max:
-; AVX2: # BB#0: # %vector.ph
+; AVX2: # %bb.0: # %vector.ph
; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm2 = [65535,65535,65535,65535,65535,65535,65535,65535]
; AVX2-NEXT: vpminud %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -1575,7 +1575,7 @@ define <8 x i16> @psubus_8i32_max(<8 x i16> %x, <8 x i32> %y) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: psubus_8i32_max:
-; AVX512: # BB#0: # %vector.ph
+; AVX512: # %bb.0: # %vector.ph
; AVX512-NEXT: vpmovusdw %ymm1, %xmm1
; AVX512-NEXT: vpsubusw %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -1591,7 +1591,7 @@ vector.ph:
define <8 x i16> @psubus_8i64_max(<8 x i16> %x, <8 x i64> %y) nounwind {
; SSE2-LABEL: psubus_8i64_max:
-; SSE2: # BB#0: # %vector.ph
+; SSE2: # %bb.0: # %vector.ph
; SSE2-NEXT: pxor %xmm5, %xmm5
; SSE2-NEXT: movdqa %xmm0, %xmm10
; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm5[0],xmm10[1],xmm5[1],xmm10[2],xmm5[2],xmm10[3],xmm5[3]
@@ -1684,7 +1684,7 @@ define <8 x i16> @psubus_8i64_max(<8 x i16> %x, <8 x i64> %y) nounwind {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: psubus_8i64_max:
-; SSSE3: # BB#0: # %vector.ph
+; SSSE3: # %bb.0: # %vector.ph
; SSSE3-NEXT: pxor %xmm5, %xmm5
; SSSE3-NEXT: movdqa %xmm0, %xmm10
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm5[0],xmm10[1],xmm5[1],xmm10[2],xmm5[2],xmm10[3],xmm5[3]
@@ -1777,7 +1777,7 @@ define <8 x i16> @psubus_8i64_max(<8 x i16> %x, <8 x i64> %y) nounwind {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: psubus_8i64_max:
-; SSE41: # BB#0: # %vector.ph
+; SSE41: # %bb.0: # %vector.ph
; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[3,1,2,3]
; SSE41-NEXT: pmovzxwq {{.*#+}} xmm11 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero
; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[2,3,0,1]
@@ -1856,7 +1856,7 @@ define <8 x i16> @psubus_8i64_max(<8 x i16> %x, <8 x i64> %y) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: psubus_8i64_max:
-; AVX1: # BB#0: # %vector.ph
+; AVX1: # %bb.0: # %vector.ph
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[3,1,2,3]
@@ -1903,7 +1903,7 @@ define <8 x i16> @psubus_8i64_max(<8 x i16> %x, <8 x i64> %y) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: psubus_8i64_max:
-; AVX2: # BB#0: # %vector.ph
+; AVX2: # %bb.0: # %vector.ph
; AVX2-NEXT: vpmovzxwq {{.*#+}} ymm3 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX2-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
@@ -1930,7 +1930,7 @@ define <8 x i16> @psubus_8i64_max(<8 x i16> %x, <8 x i64> %y) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: psubus_8i64_max:
-; AVX512: # BB#0: # %vector.ph
+; AVX512: # %bb.0: # %vector.ph
; AVX512-NEXT: vpmovusqw %zmm1, %xmm1
; AVX512-NEXT: vpsubusw %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -1946,7 +1946,7 @@ vector.ph:
define <16 x i16> @psubus_16i32_max(<16 x i16> %x, <16 x i32> %y) nounwind {
; SSE2-LABEL: psubus_16i32_max:
-; SSE2: # BB#0: # %vector.ph
+; SSE2: # %bb.0: # %vector.ph
; SSE2-NEXT: movdqa %xmm1, %xmm8
; SSE2-NEXT: movdqa %xmm0, %xmm9
; SSE2-NEXT: pxor %xmm0, %xmm0
@@ -2009,7 +2009,7 @@ define <16 x i16> @psubus_16i32_max(<16 x i16> %x, <16 x i32> %y) nounwind {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: psubus_16i32_max:
-; SSSE3: # BB#0: # %vector.ph
+; SSSE3: # %bb.0: # %vector.ph
; SSSE3-NEXT: movdqa %xmm1, %xmm8
; SSSE3-NEXT: movdqa %xmm0, %xmm9
; SSSE3-NEXT: pxor %xmm0, %xmm0
@@ -2072,7 +2072,7 @@ define <16 x i16> @psubus_16i32_max(<16 x i16> %x, <16 x i32> %y) nounwind {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: psubus_16i32_max:
-; SSE41: # BB#0: # %vector.ph
+; SSE41: # %bb.0: # %vector.ph
; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,0,1]
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
@@ -2097,7 +2097,7 @@ define <16 x i16> @psubus_16i32_max(<16 x i16> %x, <16 x i32> %y) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: psubus_16i32_max:
-; AVX1: # BB#0: # %vector.ph
+; AVX1: # %bb.0: # %vector.ph
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [65535,65535,65535,65535]
; AVX1-NEXT: vpminud %xmm4, %xmm3, %xmm3
@@ -2121,7 +2121,7 @@ define <16 x i16> @psubus_16i32_max(<16 x i16> %x, <16 x i32> %y) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: psubus_16i32_max:
-; AVX2: # BB#0: # %vector.ph
+; AVX2: # %bb.0: # %vector.ph
; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm3 = [65535,65535,65535,65535,65535,65535,65535,65535]
; AVX2-NEXT: vpminud %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm4
@@ -2142,7 +2142,7 @@ define <16 x i16> @psubus_16i32_max(<16 x i16> %x, <16 x i32> %y) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: psubus_16i32_max:
-; AVX512: # BB#0: # %vector.ph
+; AVX512: # %bb.0: # %vector.ph
; AVX512-NEXT: vpmovusdw %zmm1, %ymm1
; AVX512-NEXT: vpsubusw %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
@@ -2157,7 +2157,7 @@ vector.ph:
define <8 x i16> @psubus_i16_i32_max_swapped(<8 x i16> %x, <8 x i32> %y) nounwind {
; SSE2-LABEL: psubus_i16_i32_max_swapped:
-; SSE2: # BB#0: # %vector.ph
+; SSE2: # %bb.0: # %vector.ph
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: movdqa %xmm3, %xmm5
@@ -2189,7 +2189,7 @@ define <8 x i16> @psubus_i16_i32_max_swapped(<8 x i16> %x, <8 x i32> %y) nounwin
; SSE2-NEXT: retq
;
; SSSE3-LABEL: psubus_i16_i32_max_swapped:
-; SSSE3: # BB#0: # %vector.ph
+; SSSE3: # %bb.0: # %vector.ph
; SSSE3-NEXT: movdqa %xmm0, %xmm3
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: movdqa %xmm3, %xmm5
@@ -2220,7 +2220,7 @@ define <8 x i16> @psubus_i16_i32_max_swapped(<8 x i16> %x, <8 x i32> %y) nounwin
; SSSE3-NEXT: retq
;
; SSE41-LABEL: psubus_i16_i32_max_swapped:
-; SSE41: # BB#0: # %vector.ph
+; SSE41: # %bb.0: # %vector.ph
; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [65535,65535,65535,65535]
; SSE41-NEXT: pminud %xmm3, %xmm2
; SSE41-NEXT: pminud %xmm3, %xmm1
@@ -2229,7 +2229,7 @@ define <8 x i16> @psubus_i16_i32_max_swapped(<8 x i16> %x, <8 x i32> %y) nounwin
; SSE41-NEXT: retq
;
; AVX1-LABEL: psubus_i16_i32_max_swapped:
-; AVX1: # BB#0: # %vector.ph
+; AVX1: # %bb.0: # %vector.ph
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [65535,65535,65535,65535]
; AVX1-NEXT: vpminud %xmm3, %xmm2, %xmm2
@@ -2240,7 +2240,7 @@ define <8 x i16> @psubus_i16_i32_max_swapped(<8 x i16> %x, <8 x i32> %y) nounwin
; AVX1-NEXT: retq
;
; AVX2-LABEL: psubus_i16_i32_max_swapped:
-; AVX2: # BB#0: # %vector.ph
+; AVX2: # %bb.0: # %vector.ph
; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm2 = [65535,65535,65535,65535,65535,65535,65535,65535]
; AVX2-NEXT: vpminud %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -2250,7 +2250,7 @@ define <8 x i16> @psubus_i16_i32_max_swapped(<8 x i16> %x, <8 x i32> %y) nounwin
; AVX2-NEXT: retq
;
; AVX512-LABEL: psubus_i16_i32_max_swapped:
-; AVX512: # BB#0: # %vector.ph
+; AVX512: # %bb.0: # %vector.ph
; AVX512-NEXT: vpmovusdw %ymm1, %xmm1
; AVX512-NEXT: vpsubusw %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -2266,7 +2266,7 @@ vector.ph:
define <8 x i16> @psubus_i16_i32_min(<8 x i16> %x, <8 x i32> %y) nounwind {
; SSE2-LABEL: psubus_i16_i32_min:
-; SSE2: # BB#0: # %vector.ph
+; SSE2: # %bb.0: # %vector.ph
; SSE2-NEXT: pxor %xmm4, %xmm4
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
@@ -2299,7 +2299,7 @@ define <8 x i16> @psubus_i16_i32_min(<8 x i16> %x, <8 x i32> %y) nounwind {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: psubus_i16_i32_min:
-; SSSE3: # BB#0: # %vector.ph
+; SSSE3: # %bb.0: # %vector.ph
; SSSE3-NEXT: pxor %xmm4, %xmm4
; SSSE3-NEXT: movdqa %xmm0, %xmm3
; SSSE3-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
@@ -2331,7 +2331,7 @@ define <8 x i16> @psubus_i16_i32_min(<8 x i16> %x, <8 x i32> %y) nounwind {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: psubus_i16_i32_min:
-; SSE41: # BB#0: # %vector.ph
+; SSE41: # %bb.0: # %vector.ph
; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [65535,65535,65535,65535]
; SSE41-NEXT: pminud %xmm3, %xmm2
; SSE41-NEXT: pminud %xmm3, %xmm1
@@ -2340,7 +2340,7 @@ define <8 x i16> @psubus_i16_i32_min(<8 x i16> %x, <8 x i32> %y) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: psubus_i16_i32_min:
-; AVX1: # BB#0: # %vector.ph
+; AVX1: # %bb.0: # %vector.ph
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [65535,65535,65535,65535]
; AVX1-NEXT: vpminud %xmm3, %xmm2, %xmm2
@@ -2351,7 +2351,7 @@ define <8 x i16> @psubus_i16_i32_min(<8 x i16> %x, <8 x i32> %y) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: psubus_i16_i32_min:
-; AVX2: # BB#0: # %vector.ph
+; AVX2: # %bb.0: # %vector.ph
; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm2 = [65535,65535,65535,65535,65535,65535,65535,65535]
; AVX2-NEXT: vpminud %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -2361,7 +2361,7 @@ define <8 x i16> @psubus_i16_i32_min(<8 x i16> %x, <8 x i32> %y) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: psubus_i16_i32_min:
-; AVX512: # BB#0: # %vector.ph
+; AVX512: # %bb.0: # %vector.ph
; AVX512-NEXT: vpmovusdw %ymm1, %xmm1
; AVX512-NEXT: vpsubusw %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
diff --git a/test/CodeGen/X86/rdrand-x86_64.ll b/test/CodeGen/X86/rdrand-x86_64.ll
index 06f1136087b..88c49c03d7d 100644
--- a/test/CodeGen/X86/rdrand-x86_64.ll
+++ b/test/CodeGen/X86/rdrand-x86_64.ll
@@ -5,7 +5,7 @@ declare {i64, i32} @llvm.x86.rdrand.64()
define i32 @_rdrand64_step(i64* %random_val) {
; CHECK-LABEL: _rdrand64_step:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: rdrandq %rcx
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: cmovael %ecx, %eax
diff --git a/test/CodeGen/X86/rdrand.ll b/test/CodeGen/X86/rdrand.ll
index 1e0c4f114dd..e3982cc0bc4 100644
--- a/test/CodeGen/X86/rdrand.ll
+++ b/test/CodeGen/X86/rdrand.ll
@@ -7,7 +7,7 @@ declare {i32, i32} @llvm.x86.rdrand.32()
define i32 @_rdrand16_step(i16* %random_val) {
; X86-LABEL: _rdrand16_step:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: rdrandw %ax
; X86-NEXT: movzwl %ax, %edx
@@ -17,7 +17,7 @@ define i32 @_rdrand16_step(i16* %random_val) {
; X86-NEXT: retl
;
; X64-LABEL: _rdrand16_step:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: rdrandw %ax
; X64-NEXT: movzwl %ax, %ecx
; X64-NEXT: movl $1, %eax
@@ -33,7 +33,7 @@ define i32 @_rdrand16_step(i16* %random_val) {
define i32 @_rdrand32_step(i32* %random_val) {
; X86-LABEL: _rdrand32_step:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: rdrandl %edx
; X86-NEXT: movl $1, %eax
@@ -42,7 +42,7 @@ define i32 @_rdrand32_step(i32* %random_val) {
; X86-NEXT: retl
;
; X64-LABEL: _rdrand32_step:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: rdrandl %ecx
; X64-NEXT: movl $1, %eax
; X64-NEXT: cmovael %ecx, %eax
@@ -58,14 +58,14 @@ define i32 @_rdrand32_step(i32* %random_val) {
; Check that MachineCSE doesn't eliminate duplicate rdrand instructions.
define i32 @CSE() nounwind {
; X86-LABEL: CSE:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: rdrandl %ecx
; X86-NEXT: rdrandl %eax
; X86-NEXT: addl %ecx, %eax
; X86-NEXT: retl
;
; X64-LABEL: CSE:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: rdrandl %ecx
; X64-NEXT: rdrandl %eax
; X64-NEXT: addl %ecx, %eax
@@ -81,11 +81,11 @@ define i32 @CSE() nounwind {
; Check that MachineLICM doesn't hoist rdrand instructions.
define void @loop(i32* %p, i32 %n) nounwind {
; X86-LABEL: loop:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: testl %eax, %eax
; X86-NEXT: je .LBB3_3
-; X86-NEXT: # BB#1: # %while.body.preheader
+; X86-NEXT: # %bb.1: # %while.body.preheader
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: .p2align 4, 0x90
; X86-NEXT: .LBB3_2: # %while.body
@@ -99,7 +99,7 @@ define void @loop(i32* %p, i32 %n) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: loop:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: testl %esi, %esi
; X64-NEXT: je .LBB3_2
; X64-NEXT: .p2align 4, 0x90
diff --git a/test/CodeGen/X86/rdseed-x86_64.ll b/test/CodeGen/X86/rdseed-x86_64.ll
index b0d9748dd6a..0708138ab79 100644
--- a/test/CodeGen/X86/rdseed-x86_64.ll
+++ b/test/CodeGen/X86/rdseed-x86_64.ll
@@ -5,7 +5,7 @@ declare {i64, i32} @llvm.x86.rdseed.64()
define i32 @_rdseed64_step(i64* %random_val) {
; CHECK-LABEL: _rdseed64_step:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: rdseedq %rcx
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: cmovael %ecx, %eax
diff --git a/test/CodeGen/X86/rdseed.ll b/test/CodeGen/X86/rdseed.ll
index b22e3e7ceac..1e0d113977c 100644
--- a/test/CodeGen/X86/rdseed.ll
+++ b/test/CodeGen/X86/rdseed.ll
@@ -7,7 +7,7 @@ declare {i32, i32} @llvm.x86.rdseed.32()
define i32 @_rdseed16_step(i16* %random_val) {
; X86-LABEL: _rdseed16_step:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: rdseedw %ax
; X86-NEXT: movzwl %ax, %edx
@@ -17,7 +17,7 @@ define i32 @_rdseed16_step(i16* %random_val) {
; X86-NEXT: retl
;
; X64-LABEL: _rdseed16_step:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: rdseedw %ax
; X64-NEXT: movzwl %ax, %ecx
; X64-NEXT: movl $1, %eax
@@ -33,7 +33,7 @@ define i32 @_rdseed16_step(i16* %random_val) {
define i32 @_rdseed32_step(i32* %random_val) {
; X86-LABEL: _rdseed32_step:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: rdseedl %edx
; X86-NEXT: movl $1, %eax
@@ -42,7 +42,7 @@ define i32 @_rdseed32_step(i32* %random_val) {
; X86-NEXT: retl
;
; X64-LABEL: _rdseed32_step:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: rdseedl %ecx
; X64-NEXT: movl $1, %eax
; X64-NEXT: cmovael %ecx, %eax
diff --git a/test/CodeGen/X86/recip-fastmath.ll b/test/CodeGen/X86/recip-fastmath.ll
index 296d165b3eb..00092e2a5c0 100644
--- a/test/CodeGen/X86/recip-fastmath.ll
+++ b/test/CodeGen/X86/recip-fastmath.ll
@@ -19,56 +19,56 @@
define float @f32_no_estimate(float %x) #0 {
; SSE-LABEL: f32_no_estimate:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE-NEXT: divss %xmm0, %xmm1
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-RECIP-LABEL: f32_no_estimate:
-; AVX-RECIP: # BB#0:
+; AVX-RECIP: # %bb.0:
; AVX-RECIP-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX-RECIP-NEXT: vdivss %xmm0, %xmm1, %xmm0
; AVX-RECIP-NEXT: retq
;
; FMA-RECIP-LABEL: f32_no_estimate:
-; FMA-RECIP: # BB#0:
+; FMA-RECIP: # %bb.0:
; FMA-RECIP-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; FMA-RECIP-NEXT: vdivss %xmm0, %xmm1, %xmm0
; FMA-RECIP-NEXT: retq
;
; BTVER2-LABEL: f32_no_estimate:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [5:1.00]
; BTVER2-NEXT: vdivss %xmm0, %xmm1, %xmm0 # sched: [19:19.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: f32_no_estimate:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [6:0.50]
; SANDY-NEXT: vdivss %xmm0, %xmm1, %xmm0 # sched: [14:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: f32_no_estimate:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [1:0.50]
; HASWELL-NEXT: vdivss %xmm0, %xmm1, %xmm0 # sched: [13:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; HASWELL-NO-FMA-LABEL: f32_no_estimate:
-; HASWELL-NO-FMA: # BB#0:
+; HASWELL-NO-FMA: # %bb.0:
; HASWELL-NO-FMA-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; HASWELL-NO-FMA-NEXT: vdivss %xmm0, %xmm1, %xmm0
; HASWELL-NO-FMA-NEXT: retq
;
; KNL-LABEL: f32_no_estimate:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [1:0.50]
; KNL-NEXT: vdivss %xmm0, %xmm1, %xmm0 # sched: [13:1.00]
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: f32_no_estimate:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [5:0.50]
; SKX-NEXT: vdivss %xmm0, %xmm1, %xmm0 # sched: [11:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -78,7 +78,7 @@ define float @f32_no_estimate(float %x) #0 {
define float @f32_one_step(float %x) #1 {
; SSE-LABEL: f32_one_step:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: rcpss %xmm0, %xmm2
; SSE-NEXT: mulss %xmm2, %xmm0
; SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -89,7 +89,7 @@ define float @f32_one_step(float %x) #1 {
; SSE-NEXT: retq
;
; AVX-RECIP-LABEL: f32_one_step:
-; AVX-RECIP: # BB#0:
+; AVX-RECIP: # %bb.0:
; AVX-RECIP-NEXT: vrcpss %xmm0, %xmm0, %xmm1
; AVX-RECIP-NEXT: vmulss %xmm1, %xmm0, %xmm0
; AVX-RECIP-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
@@ -99,14 +99,14 @@ define float @f32_one_step(float %x) #1 {
; AVX-RECIP-NEXT: retq
;
; FMA-RECIP-LABEL: f32_one_step:
-; FMA-RECIP: # BB#0:
+; FMA-RECIP: # %bb.0:
; FMA-RECIP-NEXT: vrcpss %xmm0, %xmm0, %xmm1
; FMA-RECIP-NEXT: vfnmadd213ss {{.*}}(%rip), %xmm1, %xmm0
; FMA-RECIP-NEXT: vfmadd132ss %xmm1, %xmm1, %xmm0
; FMA-RECIP-NEXT: retq
;
; BTVER2-LABEL: f32_one_step:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [5:1.00]
; BTVER2-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [2:1.00]
; BTVER2-NEXT: vmulss %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
@@ -116,7 +116,7 @@ define float @f32_one_step(float %x) #1 {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: f32_one_step:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
; SANDY-NEXT: vmulss %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
; SANDY-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [6:0.50]
@@ -126,14 +126,14 @@ define float @f32_one_step(float %x) #1 {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: f32_one_step:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
; HASWELL-NEXT: vfnmadd213ss {{.*}}(%rip), %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: vfmadd132ss %xmm1, %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; HASWELL-NO-FMA-LABEL: f32_one_step:
-; HASWELL-NO-FMA: # BB#0:
+; HASWELL-NO-FMA: # %bb.0:
; HASWELL-NO-FMA-NEXT: vrcpss %xmm0, %xmm0, %xmm1
; HASWELL-NO-FMA-NEXT: vmulss %xmm1, %xmm0, %xmm0
; HASWELL-NO-FMA-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
@@ -143,14 +143,14 @@ define float @f32_one_step(float %x) #1 {
; HASWELL-NO-FMA-NEXT: retq
;
; KNL-LABEL: f32_one_step:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
; KNL-NEXT: vfnmadd213ss {{.*}}(%rip), %xmm1, %xmm0 # sched: [5:0.50]
; KNL-NEXT: vfmadd132ss %xmm1, %xmm1, %xmm0 # sched: [5:0.50]
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: f32_one_step:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [4:1.00]
; SKX-NEXT: vfnmadd213ss {{.*}}(%rip), %xmm1, %xmm0 # sched: [9:0.50]
; SKX-NEXT: vfmadd132ss %xmm1, %xmm1, %xmm0 # sched: [4:0.33]
@@ -161,7 +161,7 @@ define float @f32_one_step(float %x) #1 {
define float @f32_two_step(float %x) #2 {
; SSE-LABEL: f32_two_step:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: rcpss %xmm0, %xmm2
; SSE-NEXT: movaps %xmm0, %xmm3
; SSE-NEXT: mulss %xmm2, %xmm3
@@ -178,7 +178,7 @@ define float @f32_two_step(float %x) #2 {
; SSE-NEXT: retq
;
; AVX-RECIP-LABEL: f32_two_step:
-; AVX-RECIP: # BB#0:
+; AVX-RECIP: # %bb.0:
; AVX-RECIP-NEXT: vrcpss %xmm0, %xmm0, %xmm1
; AVX-RECIP-NEXT: vmulss %xmm1, %xmm0, %xmm2
; AVX-RECIP-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
@@ -192,7 +192,7 @@ define float @f32_two_step(float %x) #2 {
; AVX-RECIP-NEXT: retq
;
; FMA-RECIP-LABEL: f32_two_step:
-; FMA-RECIP: # BB#0:
+; FMA-RECIP: # %bb.0:
; FMA-RECIP-NEXT: vrcpss %xmm0, %xmm0, %xmm1
; FMA-RECIP-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; FMA-RECIP-NEXT: vmovaps %xmm1, %xmm3
@@ -203,7 +203,7 @@ define float @f32_two_step(float %x) #2 {
; FMA-RECIP-NEXT: retq
;
; BTVER2-LABEL: f32_two_step:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero sched: [5:1.00]
; BTVER2-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [2:1.00]
; BTVER2-NEXT: vmulss %xmm1, %xmm0, %xmm2 # sched: [2:1.00]
@@ -217,7 +217,7 @@ define float @f32_two_step(float %x) #2 {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: f32_two_step:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
; SANDY-NEXT: vmulss %xmm1, %xmm0, %xmm2 # sched: [5:1.00]
; SANDY-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero sched: [6:0.50]
@@ -231,7 +231,7 @@ define float @f32_two_step(float %x) #2 {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: f32_two_step:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
; HASWELL-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [1:0.50]
; HASWELL-NEXT: vmovaps %xmm1, %xmm3 # sched: [1:1.00]
@@ -242,7 +242,7 @@ define float @f32_two_step(float %x) #2 {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; HASWELL-NO-FMA-LABEL: f32_two_step:
-; HASWELL-NO-FMA: # BB#0:
+; HASWELL-NO-FMA: # %bb.0:
; HASWELL-NO-FMA-NEXT: vrcpss %xmm0, %xmm0, %xmm1
; HASWELL-NO-FMA-NEXT: vmulss %xmm1, %xmm0, %xmm2
; HASWELL-NO-FMA-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
@@ -256,7 +256,7 @@ define float @f32_two_step(float %x) #2 {
; HASWELL-NO-FMA-NEXT: retq
;
; KNL-LABEL: f32_two_step:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
; KNL-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [1:0.50]
; KNL-NEXT: vmovaps %xmm1, %xmm3 # sched: [1:1.00]
@@ -267,7 +267,7 @@ define float @f32_two_step(float %x) #2 {
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: f32_two_step:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [4:1.00]
; SKX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [5:0.50]
; SKX-NEXT: vmovaps %xmm1, %xmm3 # sched: [1:1.00]
@@ -282,56 +282,56 @@ define float @f32_two_step(float %x) #2 {
define <4 x float> @v4f32_no_estimate(<4 x float> %x) #0 {
; SSE-LABEL: v4f32_no_estimate:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
; SSE-NEXT: divps %xmm0, %xmm1
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-RECIP-LABEL: v4f32_no_estimate:
-; AVX-RECIP: # BB#0:
+; AVX-RECIP: # %bb.0:
; AVX-RECIP-NEXT: vmovaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
; AVX-RECIP-NEXT: vdivps %xmm0, %xmm1, %xmm0
; AVX-RECIP-NEXT: retq
;
; FMA-RECIP-LABEL: v4f32_no_estimate:
-; FMA-RECIP: # BB#0:
+; FMA-RECIP: # %bb.0:
; FMA-RECIP-NEXT: vmovaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
; FMA-RECIP-NEXT: vdivps %xmm0, %xmm1, %xmm0
; FMA-RECIP-NEXT: retq
;
; BTVER2-LABEL: v4f32_no_estimate:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00]
; BTVER2-NEXT: vdivps %xmm0, %xmm1, %xmm0 # sched: [19:19.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: v4f32_no_estimate:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmovaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [6:0.50]
; SANDY-NEXT: vdivps %xmm0, %xmm1, %xmm0 # sched: [14:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: v4f32_no_estimate:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vbroadcastss {{.*#+}} xmm1 = [1,1,1,1] sched: [1:0.50]
; HASWELL-NEXT: vdivps %xmm0, %xmm1, %xmm0 # sched: [13:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; HASWELL-NO-FMA-LABEL: v4f32_no_estimate:
-; HASWELL-NO-FMA: # BB#0:
+; HASWELL-NO-FMA: # %bb.0:
; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*#+}} xmm1 = [1,1,1,1]
; HASWELL-NO-FMA-NEXT: vdivps %xmm0, %xmm1, %xmm0
; HASWELL-NO-FMA-NEXT: retq
;
; KNL-LABEL: v4f32_no_estimate:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vbroadcastss {{.*#+}} xmm1 = [1,1,1,1] sched: [1:0.50]
; KNL-NEXT: vdivps %xmm0, %xmm1, %xmm0 # sched: [13:1.00]
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: v4f32_no_estimate:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vbroadcastss {{.*#+}} xmm1 = [1,1,1,1] sched: [6:0.50]
; SKX-NEXT: vdivps %xmm0, %xmm1, %xmm0 # sched: [11:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -341,7 +341,7 @@ define <4 x float> @v4f32_no_estimate(<4 x float> %x) #0 {
define <4 x float> @v4f32_one_step(<4 x float> %x) #1 {
; SSE-LABEL: v4f32_one_step:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: rcpps %xmm0, %xmm2
; SSE-NEXT: mulps %xmm2, %xmm0
; SSE-NEXT: movaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
@@ -352,7 +352,7 @@ define <4 x float> @v4f32_one_step(<4 x float> %x) #1 {
; SSE-NEXT: retq
;
; AVX-RECIP-LABEL: v4f32_one_step:
-; AVX-RECIP: # BB#0:
+; AVX-RECIP: # %bb.0:
; AVX-RECIP-NEXT: vrcpps %xmm0, %xmm1
; AVX-RECIP-NEXT: vmulps %xmm1, %xmm0, %xmm0
; AVX-RECIP-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
@@ -362,14 +362,14 @@ define <4 x float> @v4f32_one_step(<4 x float> %x) #1 {
; AVX-RECIP-NEXT: retq
;
; FMA-RECIP-LABEL: v4f32_one_step:
-; FMA-RECIP: # BB#0:
+; FMA-RECIP: # %bb.0:
; FMA-RECIP-NEXT: vrcpps %xmm0, %xmm1
; FMA-RECIP-NEXT: vfnmadd213ps {{.*}}(%rip), %xmm1, %xmm0
; FMA-RECIP-NEXT: vfmadd132ps %xmm1, %xmm1, %xmm0
; FMA-RECIP-NEXT: retq
;
; BTVER2-LABEL: v4f32_one_step:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00]
; BTVER2-NEXT: vrcpps %xmm0, %xmm1 # sched: [2:1.00]
; BTVER2-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
@@ -379,7 +379,7 @@ define <4 x float> @v4f32_one_step(<4 x float> %x) #1 {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: v4f32_one_step:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
; SANDY-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
; SANDY-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [6:0.50]
@@ -389,7 +389,7 @@ define <4 x float> @v4f32_one_step(<4 x float> %x) #1 {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: v4f32_one_step:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
; HASWELL-NEXT: vbroadcastss {{.*#+}} xmm2 = [1,1,1,1] sched: [1:0.50]
; HASWELL-NEXT: vfnmadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
@@ -397,7 +397,7 @@ define <4 x float> @v4f32_one_step(<4 x float> %x) #1 {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; HASWELL-NO-FMA-LABEL: v4f32_one_step:
-; HASWELL-NO-FMA: # BB#0:
+; HASWELL-NO-FMA: # %bb.0:
; HASWELL-NO-FMA-NEXT: vrcpps %xmm0, %xmm1
; HASWELL-NO-FMA-NEXT: vmulps %xmm1, %xmm0, %xmm0
; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*#+}} xmm2 = [1,1,1,1]
@@ -407,7 +407,7 @@ define <4 x float> @v4f32_one_step(<4 x float> %x) #1 {
; HASWELL-NO-FMA-NEXT: retq
;
; KNL-LABEL: v4f32_one_step:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
; KNL-NEXT: vbroadcastss {{.*#+}} xmm2 = [1,1,1,1] sched: [1:0.50]
; KNL-NEXT: vfnmadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
@@ -415,7 +415,7 @@ define <4 x float> @v4f32_one_step(<4 x float> %x) #1 {
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: v4f32_one_step:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vrcpps %xmm0, %xmm1 # sched: [4:1.00]
; SKX-NEXT: vfnmadd213ps {{.*}}(%rip){1to4}, %xmm1, %xmm0 # sched: [10:0.50]
; SKX-NEXT: vfmadd132ps %xmm1, %xmm1, %xmm0 # sched: [4:0.33]
@@ -426,7 +426,7 @@ define <4 x float> @v4f32_one_step(<4 x float> %x) #1 {
define <4 x float> @v4f32_two_step(<4 x float> %x) #2 {
; SSE-LABEL: v4f32_two_step:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: rcpps %xmm0, %xmm2
; SSE-NEXT: movaps %xmm0, %xmm3
; SSE-NEXT: mulps %xmm2, %xmm3
@@ -443,7 +443,7 @@ define <4 x float> @v4f32_two_step(<4 x float> %x) #2 {
; SSE-NEXT: retq
;
; AVX-RECIP-LABEL: v4f32_two_step:
-; AVX-RECIP: # BB#0:
+; AVX-RECIP: # %bb.0:
; AVX-RECIP-NEXT: vrcpps %xmm0, %xmm1
; AVX-RECIP-NEXT: vmulps %xmm1, %xmm0, %xmm2
; AVX-RECIP-NEXT: vmovaps {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
@@ -457,7 +457,7 @@ define <4 x float> @v4f32_two_step(<4 x float> %x) #2 {
; AVX-RECIP-NEXT: retq
;
; FMA-RECIP-LABEL: v4f32_two_step:
-; FMA-RECIP: # BB#0:
+; FMA-RECIP: # %bb.0:
; FMA-RECIP-NEXT: vrcpps %xmm0, %xmm1
; FMA-RECIP-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
; FMA-RECIP-NEXT: vmovaps %xmm1, %xmm3
@@ -468,7 +468,7 @@ define <4 x float> @v4f32_two_step(<4 x float> %x) #2 {
; FMA-RECIP-NEXT: retq
;
; BTVER2-LABEL: v4f32_two_step:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovaps {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00]
; BTVER2-NEXT: vrcpps %xmm0, %xmm1 # sched: [2:1.00]
; BTVER2-NEXT: vmulps %xmm1, %xmm0, %xmm2 # sched: [2:1.00]
@@ -482,7 +482,7 @@ define <4 x float> @v4f32_two_step(<4 x float> %x) #2 {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: v4f32_two_step:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
; SANDY-NEXT: vmulps %xmm1, %xmm0, %xmm2 # sched: [5:1.00]
; SANDY-NEXT: vmovaps {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [6:0.50]
@@ -496,7 +496,7 @@ define <4 x float> @v4f32_two_step(<4 x float> %x) #2 {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: v4f32_two_step:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
; HASWELL-NEXT: vbroadcastss {{.*#+}} xmm2 = [1,1,1,1] sched: [1:0.50]
; HASWELL-NEXT: vmovaps %xmm1, %xmm3 # sched: [1:1.00]
@@ -507,7 +507,7 @@ define <4 x float> @v4f32_two_step(<4 x float> %x) #2 {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; HASWELL-NO-FMA-LABEL: v4f32_two_step:
-; HASWELL-NO-FMA: # BB#0:
+; HASWELL-NO-FMA: # %bb.0:
; HASWELL-NO-FMA-NEXT: vrcpps %xmm0, %xmm1
; HASWELL-NO-FMA-NEXT: vmulps %xmm1, %xmm0, %xmm2
; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*#+}} xmm3 = [1,1,1,1]
@@ -521,7 +521,7 @@ define <4 x float> @v4f32_two_step(<4 x float> %x) #2 {
; HASWELL-NO-FMA-NEXT: retq
;
; KNL-LABEL: v4f32_two_step:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
; KNL-NEXT: vbroadcastss {{.*#+}} xmm2 = [1,1,1,1] sched: [1:0.50]
; KNL-NEXT: vmovaps %xmm1, %xmm3 # sched: [1:1.00]
@@ -532,7 +532,7 @@ define <4 x float> @v4f32_two_step(<4 x float> %x) #2 {
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: v4f32_two_step:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vrcpps %xmm0, %xmm1 # sched: [4:1.00]
; SKX-NEXT: vbroadcastss {{.*#+}} xmm2 = [1,1,1,1] sched: [6:0.50]
; SKX-NEXT: vmovaps %xmm1, %xmm3 # sched: [1:1.00]
@@ -547,7 +547,7 @@ define <4 x float> @v4f32_two_step(<4 x float> %x) #2 {
define <8 x float> @v8f32_no_estimate(<8 x float> %x) #0 {
; SSE-LABEL: v8f32_no_estimate:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
; SSE-NEXT: movaps %xmm2, %xmm3
; SSE-NEXT: divps %xmm0, %xmm3
@@ -557,49 +557,49 @@ define <8 x float> @v8f32_no_estimate(<8 x float> %x) #0 {
; SSE-NEXT: retq
;
; AVX-RECIP-LABEL: v8f32_no_estimate:
-; AVX-RECIP: # BB#0:
+; AVX-RECIP: # %bb.0:
; AVX-RECIP-NEXT: vmovaps {{.*#+}} ymm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
; AVX-RECIP-NEXT: vdivps %ymm0, %ymm1, %ymm0
; AVX-RECIP-NEXT: retq
;
; FMA-RECIP-LABEL: v8f32_no_estimate:
-; FMA-RECIP: # BB#0:
+; FMA-RECIP: # %bb.0:
; FMA-RECIP-NEXT: vmovaps {{.*#+}} ymm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
; FMA-RECIP-NEXT: vdivps %ymm0, %ymm1, %ymm0
; FMA-RECIP-NEXT: retq
;
; BTVER2-LABEL: v8f32_no_estimate:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovaps {{.*#+}} ymm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00]
; BTVER2-NEXT: vdivps %ymm0, %ymm1, %ymm0 # sched: [38:38.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: v8f32_no_estimate:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmovaps {{.*#+}} ymm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [7:0.50]
; SANDY-NEXT: vdivps %ymm0, %ymm1, %ymm0 # sched: [29:2.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: v8f32_no_estimate:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vbroadcastss {{.*#+}} ymm1 = [1,1,1,1,1,1,1,1] sched: [1:0.50]
; HASWELL-NEXT: vdivps %ymm0, %ymm1, %ymm0 # sched: [21:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; HASWELL-NO-FMA-LABEL: v8f32_no_estimate:
-; HASWELL-NO-FMA: # BB#0:
+; HASWELL-NO-FMA: # %bb.0:
; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*#+}} ymm1 = [1,1,1,1,1,1,1,1]
; HASWELL-NO-FMA-NEXT: vdivps %ymm0, %ymm1, %ymm0
; HASWELL-NO-FMA-NEXT: retq
;
; KNL-LABEL: v8f32_no_estimate:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vbroadcastss {{.*#+}} ymm1 = [1,1,1,1,1,1,1,1] sched: [1:0.50]
; KNL-NEXT: vdivps %ymm0, %ymm1, %ymm0 # sched: [21:2.00]
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: v8f32_no_estimate:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vbroadcastss {{.*#+}} ymm1 = [1,1,1,1,1,1,1,1] sched: [7:0.50]
; SKX-NEXT: vdivps %ymm0, %ymm1, %ymm0 # sched: [11:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -609,7 +609,7 @@ define <8 x float> @v8f32_no_estimate(<8 x float> %x) #0 {
define <8 x float> @v8f32_one_step(<8 x float> %x) #1 {
; SSE-LABEL: v8f32_one_step:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: rcpps %xmm0, %xmm4
; SSE-NEXT: mulps %xmm4, %xmm0
; SSE-NEXT: movaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
@@ -627,7 +627,7 @@ define <8 x float> @v8f32_one_step(<8 x float> %x) #1 {
; SSE-NEXT: retq
;
; AVX-RECIP-LABEL: v8f32_one_step:
-; AVX-RECIP: # BB#0:
+; AVX-RECIP: # %bb.0:
; AVX-RECIP-NEXT: vrcpps %ymm0, %ymm1
; AVX-RECIP-NEXT: vmulps %ymm1, %ymm0, %ymm0
; AVX-RECIP-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
@@ -637,14 +637,14 @@ define <8 x float> @v8f32_one_step(<8 x float> %x) #1 {
; AVX-RECIP-NEXT: retq
;
; FMA-RECIP-LABEL: v8f32_one_step:
-; FMA-RECIP: # BB#0:
+; FMA-RECIP: # %bb.0:
; FMA-RECIP-NEXT: vrcpps %ymm0, %ymm1
; FMA-RECIP-NEXT: vfnmadd213ps {{.*}}(%rip), %ymm1, %ymm0
; FMA-RECIP-NEXT: vfmadd132ps %ymm1, %ymm1, %ymm0
; FMA-RECIP-NEXT: retq
;
; BTVER2-LABEL: v8f32_one_step:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00]
; BTVER2-NEXT: vrcpps %ymm0, %ymm1 # sched: [2:2.00]
; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:2.00]
@@ -654,7 +654,7 @@ define <8 x float> @v8f32_one_step(<8 x float> %x) #1 {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: v8f32_one_step:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vrcpps %ymm0, %ymm1 # sched: [7:2.00]
; SANDY-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; SANDY-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [7:0.50]
@@ -664,7 +664,7 @@ define <8 x float> @v8f32_one_step(<8 x float> %x) #1 {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: v8f32_one_step:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vrcpps %ymm0, %ymm1 # sched: [11:2.00]
; HASWELL-NEXT: vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] sched: [1:0.50]
; HASWELL-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
@@ -672,7 +672,7 @@ define <8 x float> @v8f32_one_step(<8 x float> %x) #1 {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; HASWELL-NO-FMA-LABEL: v8f32_one_step:
-; HASWELL-NO-FMA: # BB#0:
+; HASWELL-NO-FMA: # %bb.0:
; HASWELL-NO-FMA-NEXT: vrcpps %ymm0, %ymm1
; HASWELL-NO-FMA-NEXT: vmulps %ymm1, %ymm0, %ymm0
; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1]
@@ -682,7 +682,7 @@ define <8 x float> @v8f32_one_step(<8 x float> %x) #1 {
; HASWELL-NO-FMA-NEXT: retq
;
; KNL-LABEL: v8f32_one_step:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vrcpps %ymm0, %ymm1 # sched: [11:2.00]
; KNL-NEXT: vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] sched: [1:0.50]
; KNL-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
@@ -690,7 +690,7 @@ define <8 x float> @v8f32_one_step(<8 x float> %x) #1 {
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: v8f32_one_step:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vrcpps %ymm0, %ymm1 # sched: [4:1.00]
; SKX-NEXT: vfnmadd213ps {{.*}}(%rip){1to8}, %ymm1, %ymm0 # sched: [11:0.50]
; SKX-NEXT: vfmadd132ps %ymm1, %ymm1, %ymm0 # sched: [4:0.33]
@@ -701,7 +701,7 @@ define <8 x float> @v8f32_one_step(<8 x float> %x) #1 {
define <8 x float> @v8f32_two_step(<8 x float> %x) #2 {
; SSE-LABEL: v8f32_two_step:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps %xmm1, %xmm2
; SSE-NEXT: rcpps %xmm0, %xmm3
; SSE-NEXT: movaps %xmm0, %xmm4
@@ -731,7 +731,7 @@ define <8 x float> @v8f32_two_step(<8 x float> %x) #2 {
; SSE-NEXT: retq
;
; AVX-RECIP-LABEL: v8f32_two_step:
-; AVX-RECIP: # BB#0:
+; AVX-RECIP: # %bb.0:
; AVX-RECIP-NEXT: vrcpps %ymm0, %ymm1
; AVX-RECIP-NEXT: vmulps %ymm1, %ymm0, %ymm2
; AVX-RECIP-NEXT: vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
@@ -745,7 +745,7 @@ define <8 x float> @v8f32_two_step(<8 x float> %x) #2 {
; AVX-RECIP-NEXT: retq
;
; FMA-RECIP-LABEL: v8f32_two_step:
-; FMA-RECIP: # BB#0:
+; FMA-RECIP: # %bb.0:
; FMA-RECIP-NEXT: vrcpps %ymm0, %ymm1
; FMA-RECIP-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
; FMA-RECIP-NEXT: vmovaps %ymm1, %ymm3
@@ -756,7 +756,7 @@ define <8 x float> @v8f32_two_step(<8 x float> %x) #2 {
; FMA-RECIP-NEXT: retq
;
; BTVER2-LABEL: v8f32_two_step:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00]
; BTVER2-NEXT: vrcpps %ymm0, %ymm1 # sched: [2:2.00]
; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm2 # sched: [2:2.00]
@@ -770,7 +770,7 @@ define <8 x float> @v8f32_two_step(<8 x float> %x) #2 {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: v8f32_two_step:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vrcpps %ymm0, %ymm1 # sched: [7:2.00]
; SANDY-NEXT: vmulps %ymm1, %ymm0, %ymm2 # sched: [5:1.00]
; SANDY-NEXT: vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [7:0.50]
@@ -784,7 +784,7 @@ define <8 x float> @v8f32_two_step(<8 x float> %x) #2 {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: v8f32_two_step:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vrcpps %ymm0, %ymm1 # sched: [11:2.00]
; HASWELL-NEXT: vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] sched: [1:0.50]
; HASWELL-NEXT: vmovaps %ymm1, %ymm3 # sched: [1:1.00]
@@ -795,7 +795,7 @@ define <8 x float> @v8f32_two_step(<8 x float> %x) #2 {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; HASWELL-NO-FMA-LABEL: v8f32_two_step:
-; HASWELL-NO-FMA: # BB#0:
+; HASWELL-NO-FMA: # %bb.0:
; HASWELL-NO-FMA-NEXT: vrcpps %ymm0, %ymm1
; HASWELL-NO-FMA-NEXT: vmulps %ymm1, %ymm0, %ymm2
; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1]
@@ -809,7 +809,7 @@ define <8 x float> @v8f32_two_step(<8 x float> %x) #2 {
; HASWELL-NO-FMA-NEXT: retq
;
; KNL-LABEL: v8f32_two_step:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vrcpps %ymm0, %ymm1 # sched: [11:2.00]
; KNL-NEXT: vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] sched: [1:0.50]
; KNL-NEXT: vmovaps %ymm1, %ymm3 # sched: [1:1.00]
@@ -820,7 +820,7 @@ define <8 x float> @v8f32_two_step(<8 x float> %x) #2 {
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: v8f32_two_step:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vrcpps %ymm0, %ymm1 # sched: [4:1.00]
; SKX-NEXT: vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] sched: [7:0.50]
; SKX-NEXT: vmovaps %ymm1, %ymm3 # sched: [1:1.00]
diff --git a/test/CodeGen/X86/recip-fastmath2.ll b/test/CodeGen/X86/recip-fastmath2.ll
index b5001666b9e..511e9b12a1f 100644
--- a/test/CodeGen/X86/recip-fastmath2.ll
+++ b/test/CodeGen/X86/recip-fastmath2.ll
@@ -13,55 +13,55 @@
define float @f32_no_step_2(float %x) #3 {
; SSE-LABEL: f32_no_step_2:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: rcpss %xmm0, %xmm0
; SSE-NEXT: mulss {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-RECIP-LABEL: f32_no_step_2:
-; AVX-RECIP: # BB#0:
+; AVX-RECIP: # %bb.0:
; AVX-RECIP-NEXT: vrcpss %xmm0, %xmm0, %xmm0
; AVX-RECIP-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
; AVX-RECIP-NEXT: retq
;
; FMA-RECIP-LABEL: f32_no_step_2:
-; FMA-RECIP: # BB#0:
+; FMA-RECIP: # %bb.0:
; FMA-RECIP-NEXT: vrcpss %xmm0, %xmm0, %xmm0
; FMA-RECIP-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
; FMA-RECIP-NEXT: retq
;
; BTVER2-LABEL: f32_no_step_2:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vrcpss %xmm0, %xmm0, %xmm0 # sched: [2:1.00]
; BTVER2-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0 # sched: [7:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: f32_no_step_2:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vrcpss %xmm0, %xmm0, %xmm0 # sched: [5:1.00]
; SANDY-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0 # sched: [11:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: f32_no_step_2:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vrcpss %xmm0, %xmm0, %xmm0 # sched: [5:1.00]
; HASWELL-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; HASWELL-NO-FMA-LABEL: f32_no_step_2:
-; HASWELL-NO-FMA: # BB#0:
+; HASWELL-NO-FMA: # %bb.0:
; HASWELL-NO-FMA-NEXT: vrcpss %xmm0, %xmm0, %xmm0 # sched: [5:1.00]
; HASWELL-NO-FMA-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0 # sched: [5:0.50]
; HASWELL-NO-FMA-NEXT: retq # sched: [2:1.00]
;
; KNL-LABEL: f32_no_step_2:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vrcpss %xmm0, %xmm0, %xmm0 # sched: [5:1.00]
; KNL-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0 # sched: [5:0.50]
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: f32_no_step_2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vrcpss %xmm0, %xmm0, %xmm0 # sched: [4:1.00]
; SKX-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0 # sched: [9:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -71,7 +71,7 @@ define float @f32_no_step_2(float %x) #3 {
define float @f32_one_step_2(float %x) #1 {
; SSE-LABEL: f32_one_step_2:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: rcpss %xmm0, %xmm2
; SSE-NEXT: mulss %xmm2, %xmm0
; SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -83,7 +83,7 @@ define float @f32_one_step_2(float %x) #1 {
; SSE-NEXT: retq
;
; AVX-RECIP-LABEL: f32_one_step_2:
-; AVX-RECIP: # BB#0:
+; AVX-RECIP: # %bb.0:
; AVX-RECIP-NEXT: vrcpss %xmm0, %xmm0, %xmm1
; AVX-RECIP-NEXT: vmulss %xmm1, %xmm0, %xmm0
; AVX-RECIP-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
@@ -94,7 +94,7 @@ define float @f32_one_step_2(float %x) #1 {
; AVX-RECIP-NEXT: retq
;
; FMA-RECIP-LABEL: f32_one_step_2:
-; FMA-RECIP: # BB#0:
+; FMA-RECIP: # %bb.0:
; FMA-RECIP-NEXT: vrcpss %xmm0, %xmm0, %xmm1
; FMA-RECIP-NEXT: vfnmadd213ss {{.*}}(%rip), %xmm1, %xmm0
; FMA-RECIP-NEXT: vfmadd132ss %xmm1, %xmm1, %xmm0
@@ -102,7 +102,7 @@ define float @f32_one_step_2(float %x) #1 {
; FMA-RECIP-NEXT: retq
;
; BTVER2-LABEL: f32_one_step_2:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [5:1.00]
; BTVER2-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [2:1.00]
; BTVER2-NEXT: vmulss %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
@@ -113,7 +113,7 @@ define float @f32_one_step_2(float %x) #1 {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: f32_one_step_2:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
; SANDY-NEXT: vmulss %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
; SANDY-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [6:0.50]
@@ -124,7 +124,7 @@ define float @f32_one_step_2(float %x) #1 {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: f32_one_step_2:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
; HASWELL-NEXT: vfnmadd213ss {{.*}}(%rip), %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: vfmadd132ss %xmm1, %xmm1, %xmm0 # sched: [5:0.50]
@@ -132,7 +132,7 @@ define float @f32_one_step_2(float %x) #1 {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; HASWELL-NO-FMA-LABEL: f32_one_step_2:
-; HASWELL-NO-FMA: # BB#0:
+; HASWELL-NO-FMA: # %bb.0:
; HASWELL-NO-FMA-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
; HASWELL-NO-FMA-NEXT: vmulss %xmm1, %xmm0, %xmm0 # sched: [5:0.50]
; HASWELL-NO-FMA-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [1:0.50]
@@ -143,7 +143,7 @@ define float @f32_one_step_2(float %x) #1 {
; HASWELL-NO-FMA-NEXT: retq # sched: [2:1.00]
;
; KNL-LABEL: f32_one_step_2:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
; KNL-NEXT: vfnmadd213ss {{.*}}(%rip), %xmm1, %xmm0 # sched: [5:0.50]
; KNL-NEXT: vfmadd132ss %xmm1, %xmm1, %xmm0 # sched: [5:0.50]
@@ -151,7 +151,7 @@ define float @f32_one_step_2(float %x) #1 {
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: f32_one_step_2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [4:1.00]
; SKX-NEXT: vfnmadd213ss {{.*}}(%rip), %xmm1, %xmm0 # sched: [9:0.50]
; SKX-NEXT: vfmadd132ss %xmm1, %xmm1, %xmm0 # sched: [4:0.33]
@@ -163,7 +163,7 @@ define float @f32_one_step_2(float %x) #1 {
define float @f32_one_step_2_divs(float %x) #1 {
; SSE-LABEL: f32_one_step_2_divs:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: rcpss %xmm0, %xmm1
; SSE-NEXT: mulss %xmm1, %xmm0
; SSE-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
@@ -176,7 +176,7 @@ define float @f32_one_step_2_divs(float %x) #1 {
; SSE-NEXT: retq
;
; AVX-RECIP-LABEL: f32_one_step_2_divs:
-; AVX-RECIP: # BB#0:
+; AVX-RECIP: # %bb.0:
; AVX-RECIP-NEXT: vrcpss %xmm0, %xmm0, %xmm1
; AVX-RECIP-NEXT: vmulss %xmm1, %xmm0, %xmm0
; AVX-RECIP-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
@@ -188,7 +188,7 @@ define float @f32_one_step_2_divs(float %x) #1 {
; AVX-RECIP-NEXT: retq
;
; FMA-RECIP-LABEL: f32_one_step_2_divs:
-; FMA-RECIP: # BB#0:
+; FMA-RECIP: # %bb.0:
; FMA-RECIP-NEXT: vrcpss %xmm0, %xmm0, %xmm1
; FMA-RECIP-NEXT: vfnmadd213ss {{.*}}(%rip), %xmm1, %xmm0
; FMA-RECIP-NEXT: vfmadd132ss %xmm1, %xmm1, %xmm0
@@ -197,7 +197,7 @@ define float @f32_one_step_2_divs(float %x) #1 {
; FMA-RECIP-NEXT: retq
;
; BTVER2-LABEL: f32_one_step_2_divs:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [5:1.00]
; BTVER2-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [2:1.00]
; BTVER2-NEXT: vmulss %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
@@ -209,7 +209,7 @@ define float @f32_one_step_2_divs(float %x) #1 {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: f32_one_step_2_divs:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
; SANDY-NEXT: vmulss %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
; SANDY-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [6:0.50]
@@ -221,7 +221,7 @@ define float @f32_one_step_2_divs(float %x) #1 {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: f32_one_step_2_divs:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
; HASWELL-NEXT: vfnmadd213ss {{.*}}(%rip), %xmm1, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: vfmadd132ss %xmm1, %xmm1, %xmm0 # sched: [5:0.50]
@@ -230,7 +230,7 @@ define float @f32_one_step_2_divs(float %x) #1 {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; HASWELL-NO-FMA-LABEL: f32_one_step_2_divs:
-; HASWELL-NO-FMA: # BB#0:
+; HASWELL-NO-FMA: # %bb.0:
; HASWELL-NO-FMA-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
; HASWELL-NO-FMA-NEXT: vmulss %xmm1, %xmm0, %xmm0 # sched: [5:0.50]
; HASWELL-NO-FMA-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [1:0.50]
@@ -242,7 +242,7 @@ define float @f32_one_step_2_divs(float %x) #1 {
; HASWELL-NO-FMA-NEXT: retq # sched: [2:1.00]
;
; KNL-LABEL: f32_one_step_2_divs:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
; KNL-NEXT: vfnmadd213ss {{.*}}(%rip), %xmm1, %xmm0 # sched: [5:0.50]
; KNL-NEXT: vfmadd132ss %xmm1, %xmm1, %xmm0 # sched: [5:0.50]
@@ -251,7 +251,7 @@ define float @f32_one_step_2_divs(float %x) #1 {
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: f32_one_step_2_divs:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [4:1.00]
; SKX-NEXT: vfnmadd213ss {{.*}}(%rip), %xmm1, %xmm0 # sched: [9:0.50]
; SKX-NEXT: vfmadd132ss %xmm1, %xmm1, %xmm0 # sched: [4:0.33]
@@ -265,7 +265,7 @@ define float @f32_one_step_2_divs(float %x) #1 {
define float @f32_two_step_2(float %x) #2 {
; SSE-LABEL: f32_two_step_2:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: rcpss %xmm0, %xmm2
; SSE-NEXT: movaps %xmm0, %xmm3
; SSE-NEXT: mulss %xmm2, %xmm3
@@ -283,7 +283,7 @@ define float @f32_two_step_2(float %x) #2 {
; SSE-NEXT: retq
;
; AVX-RECIP-LABEL: f32_two_step_2:
-; AVX-RECIP: # BB#0:
+; AVX-RECIP: # %bb.0:
; AVX-RECIP-NEXT: vrcpss %xmm0, %xmm0, %xmm1
; AVX-RECIP-NEXT: vmulss %xmm1, %xmm0, %xmm2
; AVX-RECIP-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
@@ -298,7 +298,7 @@ define float @f32_two_step_2(float %x) #2 {
; AVX-RECIP-NEXT: retq
;
; FMA-RECIP-LABEL: f32_two_step_2:
-; FMA-RECIP: # BB#0:
+; FMA-RECIP: # %bb.0:
; FMA-RECIP-NEXT: vrcpss %xmm0, %xmm0, %xmm1
; FMA-RECIP-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; FMA-RECIP-NEXT: vmovaps %xmm1, %xmm3
@@ -310,7 +310,7 @@ define float @f32_two_step_2(float %x) #2 {
; FMA-RECIP-NEXT: retq
;
; BTVER2-LABEL: f32_two_step_2:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero sched: [5:1.00]
; BTVER2-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [2:1.00]
; BTVER2-NEXT: vmulss %xmm1, %xmm0, %xmm2 # sched: [2:1.00]
@@ -325,7 +325,7 @@ define float @f32_two_step_2(float %x) #2 {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: f32_two_step_2:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
; SANDY-NEXT: vmulss %xmm1, %xmm0, %xmm2 # sched: [5:1.00]
; SANDY-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero sched: [6:0.50]
@@ -340,7 +340,7 @@ define float @f32_two_step_2(float %x) #2 {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: f32_two_step_2:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
; HASWELL-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [1:0.50]
; HASWELL-NEXT: vmovaps %xmm1, %xmm3 # sched: [1:1.00]
@@ -352,7 +352,7 @@ define float @f32_two_step_2(float %x) #2 {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; HASWELL-NO-FMA-LABEL: f32_two_step_2:
-; HASWELL-NO-FMA: # BB#0:
+; HASWELL-NO-FMA: # %bb.0:
; HASWELL-NO-FMA-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
; HASWELL-NO-FMA-NEXT: vmulss %xmm1, %xmm0, %xmm2 # sched: [5:0.50]
; HASWELL-NO-FMA-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero sched: [1:0.50]
@@ -367,7 +367,7 @@ define float @f32_two_step_2(float %x) #2 {
; HASWELL-NO-FMA-NEXT: retq # sched: [2:1.00]
;
; KNL-LABEL: f32_two_step_2:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
; KNL-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [1:0.50]
; KNL-NEXT: vmovaps %xmm1, %xmm3 # sched: [1:1.00]
@@ -379,7 +379,7 @@ define float @f32_two_step_2(float %x) #2 {
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: f32_two_step_2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [5:0.50]
; SKX-NEXT: vrcpss %xmm0, %xmm0, %xmm2 # sched: [4:1.00]
; SKX-NEXT: vmovaps %xmm2, %xmm3 # sched: [1:1.00]
@@ -395,7 +395,7 @@ define float @f32_two_step_2(float %x) #2 {
define <4 x float> @v4f32_one_step2(<4 x float> %x) #1 {
; SSE-LABEL: v4f32_one_step2:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: rcpps %xmm0, %xmm2
; SSE-NEXT: mulps %xmm2, %xmm0
; SSE-NEXT: movaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
@@ -407,7 +407,7 @@ define <4 x float> @v4f32_one_step2(<4 x float> %x) #1 {
; SSE-NEXT: retq
;
; AVX-RECIP-LABEL: v4f32_one_step2:
-; AVX-RECIP: # BB#0:
+; AVX-RECIP: # %bb.0:
; AVX-RECIP-NEXT: vrcpps %xmm0, %xmm1
; AVX-RECIP-NEXT: vmulps %xmm1, %xmm0, %xmm0
; AVX-RECIP-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
@@ -418,7 +418,7 @@ define <4 x float> @v4f32_one_step2(<4 x float> %x) #1 {
; AVX-RECIP-NEXT: retq
;
; FMA-RECIP-LABEL: v4f32_one_step2:
-; FMA-RECIP: # BB#0:
+; FMA-RECIP: # %bb.0:
; FMA-RECIP-NEXT: vrcpps %xmm0, %xmm1
; FMA-RECIP-NEXT: vfnmadd213ps {{.*}}(%rip), %xmm1, %xmm0
; FMA-RECIP-NEXT: vfmadd132ps %xmm1, %xmm1, %xmm0
@@ -426,7 +426,7 @@ define <4 x float> @v4f32_one_step2(<4 x float> %x) #1 {
; FMA-RECIP-NEXT: retq
;
; BTVER2-LABEL: v4f32_one_step2:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00]
; BTVER2-NEXT: vrcpps %xmm0, %xmm1 # sched: [2:1.00]
; BTVER2-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
@@ -437,7 +437,7 @@ define <4 x float> @v4f32_one_step2(<4 x float> %x) #1 {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: v4f32_one_step2:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
; SANDY-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
; SANDY-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [6:0.50]
@@ -448,7 +448,7 @@ define <4 x float> @v4f32_one_step2(<4 x float> %x) #1 {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: v4f32_one_step2:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
; HASWELL-NEXT: vbroadcastss {{.*#+}} xmm2 = [1,1,1,1] sched: [1:0.50]
; HASWELL-NEXT: vfnmadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
@@ -457,7 +457,7 @@ define <4 x float> @v4f32_one_step2(<4 x float> %x) #1 {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; HASWELL-NO-FMA-LABEL: v4f32_one_step2:
-; HASWELL-NO-FMA: # BB#0:
+; HASWELL-NO-FMA: # %bb.0:
; HASWELL-NO-FMA-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
; HASWELL-NO-FMA-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [5:0.50]
; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*#+}} xmm2 = [1,1,1,1] sched: [1:0.50]
@@ -468,7 +468,7 @@ define <4 x float> @v4f32_one_step2(<4 x float> %x) #1 {
; HASWELL-NO-FMA-NEXT: retq # sched: [2:1.00]
;
; KNL-LABEL: v4f32_one_step2:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
; KNL-NEXT: vbroadcastss {{.*#+}} xmm2 = [1,1,1,1] sched: [1:0.50]
; KNL-NEXT: vfnmadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
@@ -477,7 +477,7 @@ define <4 x float> @v4f32_one_step2(<4 x float> %x) #1 {
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: v4f32_one_step2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vrcpps %xmm0, %xmm1 # sched: [4:1.00]
; SKX-NEXT: vfnmadd213ps {{.*}}(%rip){1to4}, %xmm1, %xmm0 # sched: [10:0.50]
; SKX-NEXT: vfmadd132ps %xmm1, %xmm1, %xmm0 # sched: [4:0.33]
@@ -489,7 +489,7 @@ define <4 x float> @v4f32_one_step2(<4 x float> %x) #1 {
define <4 x float> @v4f32_one_step_2_divs(<4 x float> %x) #1 {
; SSE-LABEL: v4f32_one_step_2_divs:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: rcpps %xmm0, %xmm1
; SSE-NEXT: mulps %xmm1, %xmm0
; SSE-NEXT: movaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
@@ -502,7 +502,7 @@ define <4 x float> @v4f32_one_step_2_divs(<4 x float> %x) #1 {
; SSE-NEXT: retq
;
; AVX-RECIP-LABEL: v4f32_one_step_2_divs:
-; AVX-RECIP: # BB#0:
+; AVX-RECIP: # %bb.0:
; AVX-RECIP-NEXT: vrcpps %xmm0, %xmm1
; AVX-RECIP-NEXT: vmulps %xmm1, %xmm0, %xmm0
; AVX-RECIP-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
@@ -514,7 +514,7 @@ define <4 x float> @v4f32_one_step_2_divs(<4 x float> %x) #1 {
; AVX-RECIP-NEXT: retq
;
; FMA-RECIP-LABEL: v4f32_one_step_2_divs:
-; FMA-RECIP: # BB#0:
+; FMA-RECIP: # %bb.0:
; FMA-RECIP-NEXT: vrcpps %xmm0, %xmm1
; FMA-RECIP-NEXT: vfnmadd213ps {{.*}}(%rip), %xmm1, %xmm0
; FMA-RECIP-NEXT: vfmadd132ps %xmm1, %xmm1, %xmm0
@@ -523,7 +523,7 @@ define <4 x float> @v4f32_one_step_2_divs(<4 x float> %x) #1 {
; FMA-RECIP-NEXT: retq
;
; BTVER2-LABEL: v4f32_one_step_2_divs:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00]
; BTVER2-NEXT: vrcpps %xmm0, %xmm1 # sched: [2:1.00]
; BTVER2-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
@@ -535,7 +535,7 @@ define <4 x float> @v4f32_one_step_2_divs(<4 x float> %x) #1 {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: v4f32_one_step_2_divs:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
; SANDY-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
; SANDY-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [6:0.50]
@@ -547,7 +547,7 @@ define <4 x float> @v4f32_one_step_2_divs(<4 x float> %x) #1 {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: v4f32_one_step_2_divs:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
; HASWELL-NEXT: vbroadcastss {{.*#+}} xmm2 = [1,1,1,1] sched: [1:0.50]
; HASWELL-NEXT: vfnmadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
@@ -557,7 +557,7 @@ define <4 x float> @v4f32_one_step_2_divs(<4 x float> %x) #1 {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; HASWELL-NO-FMA-LABEL: v4f32_one_step_2_divs:
-; HASWELL-NO-FMA: # BB#0:
+; HASWELL-NO-FMA: # %bb.0:
; HASWELL-NO-FMA-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
; HASWELL-NO-FMA-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [5:0.50]
; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*#+}} xmm2 = [1,1,1,1] sched: [1:0.50]
@@ -569,7 +569,7 @@ define <4 x float> @v4f32_one_step_2_divs(<4 x float> %x) #1 {
; HASWELL-NO-FMA-NEXT: retq # sched: [2:1.00]
;
; KNL-LABEL: v4f32_one_step_2_divs:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
; KNL-NEXT: vbroadcastss {{.*#+}} xmm2 = [1,1,1,1] sched: [1:0.50]
; KNL-NEXT: vfnmadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
@@ -579,7 +579,7 @@ define <4 x float> @v4f32_one_step_2_divs(<4 x float> %x) #1 {
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: v4f32_one_step_2_divs:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vrcpps %xmm0, %xmm1 # sched: [4:1.00]
; SKX-NEXT: vfnmadd213ps {{.*}}(%rip){1to4}, %xmm1, %xmm0 # sched: [10:0.50]
; SKX-NEXT: vfmadd132ps %xmm1, %xmm1, %xmm0 # sched: [4:0.33]
@@ -593,7 +593,7 @@ define <4 x float> @v4f32_one_step_2_divs(<4 x float> %x) #1 {
define <4 x float> @v4f32_two_step2(<4 x float> %x) #2 {
; SSE-LABEL: v4f32_two_step2:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: rcpps %xmm0, %xmm2
; SSE-NEXT: movaps %xmm0, %xmm3
; SSE-NEXT: mulps %xmm2, %xmm3
@@ -611,7 +611,7 @@ define <4 x float> @v4f32_two_step2(<4 x float> %x) #2 {
; SSE-NEXT: retq
;
; AVX-RECIP-LABEL: v4f32_two_step2:
-; AVX-RECIP: # BB#0:
+; AVX-RECIP: # %bb.0:
; AVX-RECIP-NEXT: vrcpps %xmm0, %xmm1
; AVX-RECIP-NEXT: vmulps %xmm1, %xmm0, %xmm2
; AVX-RECIP-NEXT: vmovaps {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
@@ -626,7 +626,7 @@ define <4 x float> @v4f32_two_step2(<4 x float> %x) #2 {
; AVX-RECIP-NEXT: retq
;
; FMA-RECIP-LABEL: v4f32_two_step2:
-; FMA-RECIP: # BB#0:
+; FMA-RECIP: # %bb.0:
; FMA-RECIP-NEXT: vrcpps %xmm0, %xmm1
; FMA-RECIP-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
; FMA-RECIP-NEXT: vmovaps %xmm1, %xmm3
@@ -638,7 +638,7 @@ define <4 x float> @v4f32_two_step2(<4 x float> %x) #2 {
; FMA-RECIP-NEXT: retq
;
; BTVER2-LABEL: v4f32_two_step2:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovaps {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00]
; BTVER2-NEXT: vrcpps %xmm0, %xmm1 # sched: [2:1.00]
; BTVER2-NEXT: vmulps %xmm1, %xmm0, %xmm2 # sched: [2:1.00]
@@ -653,7 +653,7 @@ define <4 x float> @v4f32_two_step2(<4 x float> %x) #2 {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: v4f32_two_step2:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
; SANDY-NEXT: vmulps %xmm1, %xmm0, %xmm2 # sched: [5:1.00]
; SANDY-NEXT: vmovaps {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [6:0.50]
@@ -668,7 +668,7 @@ define <4 x float> @v4f32_two_step2(<4 x float> %x) #2 {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: v4f32_two_step2:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
; HASWELL-NEXT: vbroadcastss {{.*#+}} xmm2 = [1,1,1,1] sched: [1:0.50]
; HASWELL-NEXT: vmovaps %xmm1, %xmm3 # sched: [1:1.00]
@@ -680,7 +680,7 @@ define <4 x float> @v4f32_two_step2(<4 x float> %x) #2 {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; HASWELL-NO-FMA-LABEL: v4f32_two_step2:
-; HASWELL-NO-FMA: # BB#0:
+; HASWELL-NO-FMA: # %bb.0:
; HASWELL-NO-FMA-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
; HASWELL-NO-FMA-NEXT: vmulps %xmm1, %xmm0, %xmm2 # sched: [5:0.50]
; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*#+}} xmm3 = [1,1,1,1] sched: [1:0.50]
@@ -695,7 +695,7 @@ define <4 x float> @v4f32_two_step2(<4 x float> %x) #2 {
; HASWELL-NO-FMA-NEXT: retq # sched: [2:1.00]
;
; KNL-LABEL: v4f32_two_step2:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
; KNL-NEXT: vbroadcastss {{.*#+}} xmm2 = [1,1,1,1] sched: [1:0.50]
; KNL-NEXT: vmovaps %xmm1, %xmm3 # sched: [1:1.00]
@@ -707,7 +707,7 @@ define <4 x float> @v4f32_two_step2(<4 x float> %x) #2 {
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: v4f32_two_step2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vrcpps %xmm0, %xmm1 # sched: [4:1.00]
; SKX-NEXT: vbroadcastss {{.*#+}} xmm2 = [1,1,1,1] sched: [6:0.50]
; SKX-NEXT: vmovaps %xmm1, %xmm3 # sched: [1:1.00]
@@ -723,7 +723,7 @@ define <4 x float> @v4f32_two_step2(<4 x float> %x) #2 {
define <8 x float> @v8f32_one_step2(<8 x float> %x) #1 {
; SSE-LABEL: v8f32_one_step2:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: rcpps %xmm1, %xmm4
; SSE-NEXT: mulps %xmm4, %xmm1
; SSE-NEXT: movaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
@@ -743,7 +743,7 @@ define <8 x float> @v8f32_one_step2(<8 x float> %x) #1 {
; SSE-NEXT: retq
;
; AVX-RECIP-LABEL: v8f32_one_step2:
-; AVX-RECIP: # BB#0:
+; AVX-RECIP: # %bb.0:
; AVX-RECIP-NEXT: vrcpps %ymm0, %ymm1
; AVX-RECIP-NEXT: vmulps %ymm1, %ymm0, %ymm0
; AVX-RECIP-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
@@ -754,7 +754,7 @@ define <8 x float> @v8f32_one_step2(<8 x float> %x) #1 {
; AVX-RECIP-NEXT: retq
;
; FMA-RECIP-LABEL: v8f32_one_step2:
-; FMA-RECIP: # BB#0:
+; FMA-RECIP: # %bb.0:
; FMA-RECIP-NEXT: vrcpps %ymm0, %ymm1
; FMA-RECIP-NEXT: vfnmadd213ps {{.*}}(%rip), %ymm1, %ymm0
; FMA-RECIP-NEXT: vfmadd132ps %ymm1, %ymm1, %ymm0
@@ -762,7 +762,7 @@ define <8 x float> @v8f32_one_step2(<8 x float> %x) #1 {
; FMA-RECIP-NEXT: retq
;
; BTVER2-LABEL: v8f32_one_step2:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00]
; BTVER2-NEXT: vrcpps %ymm0, %ymm1 # sched: [2:2.00]
; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:2.00]
@@ -773,7 +773,7 @@ define <8 x float> @v8f32_one_step2(<8 x float> %x) #1 {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: v8f32_one_step2:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vrcpps %ymm0, %ymm1 # sched: [7:2.00]
; SANDY-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; SANDY-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [7:0.50]
@@ -784,7 +784,7 @@ define <8 x float> @v8f32_one_step2(<8 x float> %x) #1 {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: v8f32_one_step2:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vrcpps %ymm0, %ymm1 # sched: [11:2.00]
; HASWELL-NEXT: vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] sched: [1:0.50]
; HASWELL-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
@@ -793,7 +793,7 @@ define <8 x float> @v8f32_one_step2(<8 x float> %x) #1 {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; HASWELL-NO-FMA-LABEL: v8f32_one_step2:
-; HASWELL-NO-FMA: # BB#0:
+; HASWELL-NO-FMA: # %bb.0:
; HASWELL-NO-FMA-NEXT: vrcpps %ymm0, %ymm1 # sched: [11:2.00]
; HASWELL-NO-FMA-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [5:0.50]
; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] sched: [1:0.50]
@@ -804,7 +804,7 @@ define <8 x float> @v8f32_one_step2(<8 x float> %x) #1 {
; HASWELL-NO-FMA-NEXT: retq # sched: [2:1.00]
;
; KNL-LABEL: v8f32_one_step2:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vrcpps %ymm0, %ymm1 # sched: [11:2.00]
; KNL-NEXT: vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] sched: [1:0.50]
; KNL-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
@@ -813,7 +813,7 @@ define <8 x float> @v8f32_one_step2(<8 x float> %x) #1 {
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: v8f32_one_step2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vrcpps %ymm0, %ymm1 # sched: [4:1.00]
; SKX-NEXT: vfnmadd213ps {{.*}}(%rip){1to8}, %ymm1, %ymm0 # sched: [11:0.50]
; SKX-NEXT: vfmadd132ps %ymm1, %ymm1, %ymm0 # sched: [4:0.33]
@@ -825,7 +825,7 @@ define <8 x float> @v8f32_one_step2(<8 x float> %x) #1 {
define <8 x float> @v8f32_one_step_2_divs(<8 x float> %x) #1 {
; SSE-LABEL: v8f32_one_step_2_divs:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: rcpps %xmm0, %xmm2
; SSE-NEXT: mulps %xmm2, %xmm0
; SSE-NEXT: movaps {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
@@ -847,7 +847,7 @@ define <8 x float> @v8f32_one_step_2_divs(<8 x float> %x) #1 {
; SSE-NEXT: retq
;
; AVX-RECIP-LABEL: v8f32_one_step_2_divs:
-; AVX-RECIP: # BB#0:
+; AVX-RECIP: # %bb.0:
; AVX-RECIP-NEXT: vrcpps %ymm0, %ymm1
; AVX-RECIP-NEXT: vmulps %ymm1, %ymm0, %ymm0
; AVX-RECIP-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
@@ -859,7 +859,7 @@ define <8 x float> @v8f32_one_step_2_divs(<8 x float> %x) #1 {
; AVX-RECIP-NEXT: retq
;
; FMA-RECIP-LABEL: v8f32_one_step_2_divs:
-; FMA-RECIP: # BB#0:
+; FMA-RECIP: # %bb.0:
; FMA-RECIP-NEXT: vrcpps %ymm0, %ymm1
; FMA-RECIP-NEXT: vfnmadd213ps {{.*}}(%rip), %ymm1, %ymm0
; FMA-RECIP-NEXT: vfmadd132ps %ymm1, %ymm1, %ymm0
@@ -868,7 +868,7 @@ define <8 x float> @v8f32_one_step_2_divs(<8 x float> %x) #1 {
; FMA-RECIP-NEXT: retq
;
; BTVER2-LABEL: v8f32_one_step_2_divs:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00]
; BTVER2-NEXT: vrcpps %ymm0, %ymm1 # sched: [2:2.00]
; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:2.00]
@@ -880,7 +880,7 @@ define <8 x float> @v8f32_one_step_2_divs(<8 x float> %x) #1 {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: v8f32_one_step_2_divs:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vrcpps %ymm0, %ymm1 # sched: [7:2.00]
; SANDY-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; SANDY-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [7:0.50]
@@ -892,7 +892,7 @@ define <8 x float> @v8f32_one_step_2_divs(<8 x float> %x) #1 {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: v8f32_one_step_2_divs:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vrcpps %ymm0, %ymm1 # sched: [11:2.00]
; HASWELL-NEXT: vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] sched: [1:0.50]
; HASWELL-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
@@ -902,7 +902,7 @@ define <8 x float> @v8f32_one_step_2_divs(<8 x float> %x) #1 {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; HASWELL-NO-FMA-LABEL: v8f32_one_step_2_divs:
-; HASWELL-NO-FMA: # BB#0:
+; HASWELL-NO-FMA: # %bb.0:
; HASWELL-NO-FMA-NEXT: vrcpps %ymm0, %ymm1 # sched: [11:2.00]
; HASWELL-NO-FMA-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [5:0.50]
; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] sched: [1:0.50]
@@ -914,7 +914,7 @@ define <8 x float> @v8f32_one_step_2_divs(<8 x float> %x) #1 {
; HASWELL-NO-FMA-NEXT: retq # sched: [2:1.00]
;
; KNL-LABEL: v8f32_one_step_2_divs:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vrcpps %ymm0, %ymm1 # sched: [11:2.00]
; KNL-NEXT: vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] sched: [1:0.50]
; KNL-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
@@ -924,7 +924,7 @@ define <8 x float> @v8f32_one_step_2_divs(<8 x float> %x) #1 {
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: v8f32_one_step_2_divs:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vrcpps %ymm0, %ymm1 # sched: [4:1.00]
; SKX-NEXT: vfnmadd213ps {{.*}}(%rip){1to8}, %ymm1, %ymm0 # sched: [11:0.50]
; SKX-NEXT: vfmadd132ps %ymm1, %ymm1, %ymm0 # sched: [4:0.33]
@@ -938,7 +938,7 @@ define <8 x float> @v8f32_one_step_2_divs(<8 x float> %x) #1 {
define <8 x float> @v8f32_two_step2(<8 x float> %x) #2 {
; SSE-LABEL: v8f32_two_step2:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: rcpps %xmm1, %xmm3
; SSE-NEXT: movaps %xmm1, %xmm4
@@ -970,7 +970,7 @@ define <8 x float> @v8f32_two_step2(<8 x float> %x) #2 {
; SSE-NEXT: retq
;
; AVX-RECIP-LABEL: v8f32_two_step2:
-; AVX-RECIP: # BB#0:
+; AVX-RECIP: # %bb.0:
; AVX-RECIP-NEXT: vrcpps %ymm0, %ymm1
; AVX-RECIP-NEXT: vmulps %ymm1, %ymm0, %ymm2
; AVX-RECIP-NEXT: vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
@@ -985,7 +985,7 @@ define <8 x float> @v8f32_two_step2(<8 x float> %x) #2 {
; AVX-RECIP-NEXT: retq
;
; FMA-RECIP-LABEL: v8f32_two_step2:
-; FMA-RECIP: # BB#0:
+; FMA-RECIP: # %bb.0:
; FMA-RECIP-NEXT: vrcpps %ymm0, %ymm1
; FMA-RECIP-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
; FMA-RECIP-NEXT: vmovaps %ymm1, %ymm3
@@ -997,7 +997,7 @@ define <8 x float> @v8f32_two_step2(<8 x float> %x) #2 {
; FMA-RECIP-NEXT: retq
;
; BTVER2-LABEL: v8f32_two_step2:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00]
; BTVER2-NEXT: vrcpps %ymm0, %ymm1 # sched: [2:2.00]
; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm2 # sched: [2:2.00]
@@ -1012,7 +1012,7 @@ define <8 x float> @v8f32_two_step2(<8 x float> %x) #2 {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: v8f32_two_step2:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vrcpps %ymm0, %ymm1 # sched: [7:2.00]
; SANDY-NEXT: vmulps %ymm1, %ymm0, %ymm2 # sched: [5:1.00]
; SANDY-NEXT: vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [7:0.50]
@@ -1027,7 +1027,7 @@ define <8 x float> @v8f32_two_step2(<8 x float> %x) #2 {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: v8f32_two_step2:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vrcpps %ymm0, %ymm1 # sched: [11:2.00]
; HASWELL-NEXT: vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] sched: [1:0.50]
; HASWELL-NEXT: vmovaps %ymm1, %ymm3 # sched: [1:1.00]
@@ -1039,7 +1039,7 @@ define <8 x float> @v8f32_two_step2(<8 x float> %x) #2 {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; HASWELL-NO-FMA-LABEL: v8f32_two_step2:
-; HASWELL-NO-FMA: # BB#0:
+; HASWELL-NO-FMA: # %bb.0:
; HASWELL-NO-FMA-NEXT: vrcpps %ymm0, %ymm1 # sched: [11:2.00]
; HASWELL-NO-FMA-NEXT: vmulps %ymm1, %ymm0, %ymm2 # sched: [5:0.50]
; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1] sched: [1:0.50]
@@ -1054,7 +1054,7 @@ define <8 x float> @v8f32_two_step2(<8 x float> %x) #2 {
; HASWELL-NO-FMA-NEXT: retq # sched: [2:1.00]
;
; KNL-LABEL: v8f32_two_step2:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vrcpps %ymm0, %ymm1 # sched: [11:2.00]
; KNL-NEXT: vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] sched: [1:0.50]
; KNL-NEXT: vmovaps %ymm1, %ymm3 # sched: [1:1.00]
@@ -1066,7 +1066,7 @@ define <8 x float> @v8f32_two_step2(<8 x float> %x) #2 {
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: v8f32_two_step2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vrcpps %ymm0, %ymm1 # sched: [4:1.00]
; SKX-NEXT: vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] sched: [7:0.50]
; SKX-NEXT: vmovaps %ymm1, %ymm3 # sched: [1:1.00]
@@ -1082,48 +1082,48 @@ define <8 x float> @v8f32_two_step2(<8 x float> %x) #2 {
define <8 x float> @v8f32_no_step(<8 x float> %x) #3 {
; SSE-LABEL: v8f32_no_step:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: rcpps %xmm0, %xmm0
; SSE-NEXT: rcpps %xmm1, %xmm1
; SSE-NEXT: retq
;
; AVX-RECIP-LABEL: v8f32_no_step:
-; AVX-RECIP: # BB#0:
+; AVX-RECIP: # %bb.0:
; AVX-RECIP-NEXT: vrcpps %ymm0, %ymm0
; AVX-RECIP-NEXT: retq
;
; FMA-RECIP-LABEL: v8f32_no_step:
-; FMA-RECIP: # BB#0:
+; FMA-RECIP: # %bb.0:
; FMA-RECIP-NEXT: vrcpps %ymm0, %ymm0
; FMA-RECIP-NEXT: retq
;
; BTVER2-LABEL: v8f32_no_step:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vrcpps %ymm0, %ymm0 # sched: [2:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: v8f32_no_step:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vrcpps %ymm0, %ymm0 # sched: [7:2.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: v8f32_no_step:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vrcpps %ymm0, %ymm0 # sched: [11:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; HASWELL-NO-FMA-LABEL: v8f32_no_step:
-; HASWELL-NO-FMA: # BB#0:
+; HASWELL-NO-FMA: # %bb.0:
; HASWELL-NO-FMA-NEXT: vrcpps %ymm0, %ymm0 # sched: [11:2.00]
; HASWELL-NO-FMA-NEXT: retq # sched: [2:1.00]
;
; KNL-LABEL: v8f32_no_step:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vrcpps %ymm0, %ymm0 # sched: [11:2.00]
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: v8f32_no_step:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vrcpps %ymm0, %ymm0 # sched: [4:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%div = fdiv fast <8 x float> <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, %x
@@ -1132,7 +1132,7 @@ define <8 x float> @v8f32_no_step(<8 x float> %x) #3 {
define <8 x float> @v8f32_no_step2(<8 x float> %x) #3 {
; SSE-LABEL: v8f32_no_step2:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: rcpps %xmm1, %xmm1
; SSE-NEXT: rcpps %xmm0, %xmm0
; SSE-NEXT: mulps {{.*}}(%rip), %xmm0
@@ -1140,49 +1140,49 @@ define <8 x float> @v8f32_no_step2(<8 x float> %x) #3 {
; SSE-NEXT: retq
;
; AVX-RECIP-LABEL: v8f32_no_step2:
-; AVX-RECIP: # BB#0:
+; AVX-RECIP: # %bb.0:
; AVX-RECIP-NEXT: vrcpps %ymm0, %ymm0
; AVX-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
; AVX-RECIP-NEXT: retq
;
; FMA-RECIP-LABEL: v8f32_no_step2:
-; FMA-RECIP: # BB#0:
+; FMA-RECIP: # %bb.0:
; FMA-RECIP-NEXT: vrcpps %ymm0, %ymm0
; FMA-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
; FMA-RECIP-NEXT: retq
;
; BTVER2-LABEL: v8f32_no_step2:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vrcpps %ymm0, %ymm0 # sched: [2:2.00]
; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [7:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: v8f32_no_step2:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vrcpps %ymm0, %ymm0 # sched: [7:2.00]
; SANDY-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [12:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: v8f32_no_step2:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vrcpps %ymm0, %ymm0 # sched: [11:2.00]
; HASWELL-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [5:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; HASWELL-NO-FMA-LABEL: v8f32_no_step2:
-; HASWELL-NO-FMA: # BB#0:
+; HASWELL-NO-FMA: # %bb.0:
; HASWELL-NO-FMA-NEXT: vrcpps %ymm0, %ymm0 # sched: [11:2.00]
; HASWELL-NO-FMA-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [5:0.50]
; HASWELL-NO-FMA-NEXT: retq # sched: [2:1.00]
;
; KNL-LABEL: v8f32_no_step2:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vrcpps %ymm0, %ymm0 # sched: [11:2.00]
; KNL-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [5:0.50]
; KNL-NEXT: retq # sched: [2:1.00]
;
; SKX-LABEL: v8f32_no_step2:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vrcpps %ymm0, %ymm0 # sched: [4:1.00]
; SKX-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
diff --git a/test/CodeGen/X86/recip-pic.ll b/test/CodeGen/X86/recip-pic.ll
index a4c1625728c..b3e363ea5d1 100644
--- a/test/CodeGen/X86/recip-pic.ll
+++ b/test/CodeGen/X86/recip-pic.ll
@@ -3,7 +3,7 @@
define fastcc float @foo(float %x) unnamed_addr #0 {
; CHECK-LABEL: foo:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: calll .L0$pb
; CHECK-NEXT: .cfi_adjust_cfa_offset 4
; CHECK-NEXT: .L0$pb:
diff --git a/test/CodeGen/X86/reduce-trunc-shl.ll b/test/CodeGen/X86/reduce-trunc-shl.ll
index 9f01c9e38c9..58835c9a495 100644
--- a/test/CodeGen/X86/reduce-trunc-shl.ll
+++ b/test/CodeGen/X86/reduce-trunc-shl.ll
@@ -4,7 +4,7 @@
define void @trunc_shl_7_v4i32_v4i64(<4 x i32> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) {
; SSE2-LABEL: trunc_shl_7_v4i32_v4i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps (%rsi), %xmm0
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],mem[0,2]
; SSE2-NEXT: pslld $7, %xmm0
@@ -12,7 +12,7 @@ define void @trunc_shl_7_v4i32_v4i64(<4 x i32> addrspace(1)* %out, <4 x i64> add
; SSE2-NEXT: retq
;
; AVX2-LABEL: trunc_shl_7_v4i32_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = mem[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpslld $7, %xmm0, %xmm0
@@ -28,7 +28,7 @@ define void @trunc_shl_7_v4i32_v4i64(<4 x i32> addrspace(1)* %out, <4 x i64> add
define <8 x i16> @trunc_shl_v8i16_v8i32(<8 x i32> %a) {
; SSE2-LABEL: trunc_shl_v8i16_v8i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pslld $17, %xmm0
; SSE2-NEXT: pslld $17, %xmm1
; SSE2-NEXT: pslld $16, %xmm1
@@ -39,7 +39,7 @@ define <8 x i16> @trunc_shl_v8i16_v8i32(<8 x i32> %a) {
; SSE2-NEXT: retq
;
; AVX2-LABEL: trunc_shl_v8i16_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpslld $17, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
@@ -53,14 +53,14 @@ define <8 x i16> @trunc_shl_v8i16_v8i32(<8 x i32> %a) {
define void @trunc_shl_31_i32_i64(i32* %out, i64* %in) {
; SSE2-LABEL: trunc_shl_31_i32_i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movl (%rsi), %eax
; SSE2-NEXT: shll $31, %eax
; SSE2-NEXT: movl %eax, (%rdi)
; SSE2-NEXT: retq
;
; AVX2-LABEL: trunc_shl_31_i32_i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: movl (%rsi), %eax
; AVX2-NEXT: shll $31, %eax
; AVX2-NEXT: movl %eax, (%rdi)
@@ -74,12 +74,12 @@ define void @trunc_shl_31_i32_i64(i32* %out, i64* %in) {
define void @trunc_shl_32_i32_i64(i32* %out, i64* %in) {
; SSE2-LABEL: trunc_shl_32_i32_i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movl $0, (%rdi)
; SSE2-NEXT: retq
;
; AVX2-LABEL: trunc_shl_32_i32_i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: movl $0, (%rdi)
; AVX2-NEXT: retq
%val = load i64, i64* %in
@@ -91,14 +91,14 @@ define void @trunc_shl_32_i32_i64(i32* %out, i64* %in) {
define void @trunc_shl_15_i16_i64(i16* %out, i64* %in) {
; SSE2-LABEL: trunc_shl_15_i16_i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movzwl (%rsi), %eax
; SSE2-NEXT: shlw $15, %ax
; SSE2-NEXT: movw %ax, (%rdi)
; SSE2-NEXT: retq
;
; AVX2-LABEL: trunc_shl_15_i16_i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: movzwl (%rsi), %eax
; AVX2-NEXT: shlw $15, %ax
; AVX2-NEXT: movw %ax, (%rdi)
@@ -112,12 +112,12 @@ define void @trunc_shl_15_i16_i64(i16* %out, i64* %in) {
define void @trunc_shl_16_i16_i64(i16* %out, i64* %in) {
; SSE2-LABEL: trunc_shl_16_i16_i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movw $0, (%rdi)
; SSE2-NEXT: retq
;
; AVX2-LABEL: trunc_shl_16_i16_i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: movw $0, (%rdi)
; AVX2-NEXT: retq
%val = load i64, i64* %in
@@ -129,14 +129,14 @@ define void @trunc_shl_16_i16_i64(i16* %out, i64* %in) {
define void @trunc_shl_7_i8_i64(i8* %out, i64* %in) {
; SSE2-LABEL: trunc_shl_7_i8_i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movb (%rsi), %al
; SSE2-NEXT: shlb $7, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: retq
;
; AVX2-LABEL: trunc_shl_7_i8_i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: movb (%rsi), %al
; AVX2-NEXT: shlb $7, %al
; AVX2-NEXT: movb %al, (%rdi)
@@ -150,12 +150,12 @@ define void @trunc_shl_7_i8_i64(i8* %out, i64* %in) {
define void @trunc_shl_8_i8_i64(i8* %out, i64* %in) {
; SSE2-LABEL: trunc_shl_8_i8_i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movb $0, (%rdi)
; SSE2-NEXT: retq
;
; AVX2-LABEL: trunc_shl_8_i8_i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: movb $0, (%rdi)
; AVX2-NEXT: retq
%val = load i64, i64* %in
diff --git a/test/CodeGen/X86/rem.ll b/test/CodeGen/X86/rem.ll
index 7b138f02eb4..672baa5c1bd 100644
--- a/test/CodeGen/X86/rem.ll
+++ b/test/CodeGen/X86/rem.ll
@@ -3,7 +3,7 @@
define i32 @test1(i32 %X) {
; CHECK-LABEL: test1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movl $-2139062143, %edx # imm = 0x80808081
; CHECK-NEXT: movl %ecx, %eax
@@ -25,7 +25,7 @@ define i32 @test1(i32 %X) {
define i32 @test2(i32 %X) {
; CHECK-LABEL: test2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl %eax, %ecx
; CHECK-NEXT: sarl $31, %ecx
@@ -40,7 +40,7 @@ define i32 @test2(i32 %X) {
define i32 @test3(i32 %X) {
; CHECK-LABEL: test3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movl $-2139062143, %edx # imm = 0x80808081
; CHECK-NEXT: movl %ecx, %eax
@@ -58,7 +58,7 @@ define i32 @test3(i32 %X) {
define i32 @test4(i32 %X) {
; CHECK-LABEL: test4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: retl
%tmp1 = urem i32 %X, 256
@@ -67,7 +67,7 @@ define i32 @test4(i32 %X) {
define i32 @test5(i32 %X) nounwind readnone {
; CHECK-LABEL: test5:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl $41, %eax
; CHECK-NEXT: xorl %edx, %edx
; CHECK-NEXT: idivl {{[0-9]+}}(%esp)
diff --git a/test/CodeGen/X86/replace-load-and-with-bzhi.ll b/test/CodeGen/X86/replace-load-and-with-bzhi.ll
index be9ecada106..51aed408ad2 100644
--- a/test/CodeGen/X86/replace-load-and-with-bzhi.ll
+++ b/test/CodeGen/X86/replace-load-and-with-bzhi.ll
@@ -9,14 +9,14 @@
define i32 @f32_bzhi(i32 %x, i32 %y) local_unnamed_addr {
; CHECK-LABEL: f32_bzhi:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movslq %esi, %rax
; CHECK-NEXT: andl fill_table32(,%rax,4), %edi
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: ret{{[l|q]}}
;
; CHECK32-LABEL: f32_bzhi:
-; CHECK32: # BB#0: # %entry
+; CHECK32: # %bb.0: # %entry
; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK32-NEXT: movl fill_table32(,%eax,4), %eax
; CHECK32-NEXT: andl {{[0-9]+}}(%esp), %eax
@@ -31,14 +31,14 @@ entry:
define i32 @f32_bzhi_partial(i32 %x, i32 %y) local_unnamed_addr {
; CHECK-LABEL: f32_bzhi_partial:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movslq %esi, %rax
; CHECK-NEXT: andl fill_table32_partial(,%rax,4), %edi
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: ret{{[l|q]}}
;
; CHECK32-LABEL: f32_bzhi_partial:
-; CHECK32: # BB#0: # %entry
+; CHECK32: # %bb.0: # %entry
; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK32-NEXT: movl fill_table32_partial(,%eax,4), %eax
; CHECK32-NEXT: andl {{[0-9]+}}(%esp), %eax
@@ -53,13 +53,13 @@ entry:
define i64 @f64_bzhi(i64 %x, i64 %y) local_unnamed_addr {
; CHECK-LABEL: f64_bzhi:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: andq fill_table64(,%rsi,8), %rdi
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: ret{{[l|q]}}
;
; CHECK32-LABEL: f64_bzhi:
-; CHECK32: # BB#0: # %entry
+; CHECK32: # %bb.0: # %entry
; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK32-NEXT: movl fill_table64+4(,%eax,8), %edx
; CHECK32-NEXT: movl fill_table64(,%eax,8), %eax
@@ -75,13 +75,13 @@ entry:
define i64 @f64_bzhi_partial(i64 %x, i64 %y) local_unnamed_addr {
; CHECK-LABEL: f64_bzhi_partial:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: andq fill_table64_partial(,%rsi,8), %rdi
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: ret{{[l|q]}}
;
; CHECK32-LABEL: f64_bzhi_partial:
-; CHECK32: # BB#0: # %entry
+; CHECK32: # %bb.0: # %entry
; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK32-NEXT: movl fill_table64_partial+4(,%eax,8), %edx
; CHECK32-NEXT: movl fill_table64_partial(,%eax,8), %eax
diff --git a/test/CodeGen/X86/ret-mmx.ll b/test/CodeGen/X86/ret-mmx.ll
index 65c3ac0cc44..6a9e59193aa 100644
--- a/test/CodeGen/X86/ret-mmx.ll
+++ b/test/CodeGen/X86/ret-mmx.ll
@@ -6,7 +6,7 @@
define void @t1() nounwind {
; CHECK-LABEL: t1:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: callq _return_v1di
; CHECK-NEXT: movq _g_v1di@{{.*}}(%rip), %rcx
@@ -23,7 +23,7 @@ declare <1 x i64> @return_v1di()
define <1 x i64> @t2() nounwind {
; CHECK-LABEL: t2:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: retq
ret <1 x i64> <i64 1>
@@ -31,7 +31,7 @@ define <1 x i64> @t2() nounwind {
define <2 x i32> @t3() nounwind {
; CHECK-LABEL: t3:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: movq %rax, %xmm0
; CHECK-NEXT: retq
@@ -40,7 +40,7 @@ define <2 x i32> @t3() nounwind {
define double @t4() nounwind {
; CHECK-LABEL: t4:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: movd %eax, %xmm0
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/rot16.ll b/test/CodeGen/X86/rot16.ll
index 809e3f714e5..481163e3126 100644
--- a/test/CodeGen/X86/rot16.ll
+++ b/test/CodeGen/X86/rot16.ll
@@ -4,14 +4,14 @@
define i16 @foo(i16 %x, i16 %y, i16 %z) nounwind {
; X32-LABEL: foo:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %cl
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: rolw %cl, %ax
; X32-NEXT: retl
;
; X64-LABEL: foo:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edx, %ecx
; X64-NEXT: shldw %cl, %di, %di
; X64-NEXT: movl %edi, %eax
@@ -25,7 +25,7 @@ define i16 @foo(i16 %x, i16 %y, i16 %z) nounwind {
define i16 @bar(i16 %x, i16 %y, i16 %z) nounwind {
; X32-LABEL: bar:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %cl
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
@@ -33,7 +33,7 @@ define i16 @bar(i16 %x, i16 %y, i16 %z) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: bar:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edx, %ecx
; X64-NEXT: shldw %cl, %di, %si
; X64-NEXT: movl %esi, %eax
@@ -47,14 +47,14 @@ define i16 @bar(i16 %x, i16 %y, i16 %z) nounwind {
define i16 @un(i16 %x, i16 %y, i16 %z) nounwind {
; X32-LABEL: un:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %cl
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: rorw %cl, %ax
; X32-NEXT: retl
;
; X64-LABEL: un:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edx, %ecx
; X64-NEXT: shrdw %cl, %di, %di
; X64-NEXT: movl %edi, %eax
@@ -68,7 +68,7 @@ define i16 @un(i16 %x, i16 %y, i16 %z) nounwind {
define i16 @bu(i16 %x, i16 %y, i16 %z) nounwind {
; X32-LABEL: bu:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %cl
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
@@ -76,7 +76,7 @@ define i16 @bu(i16 %x, i16 %y, i16 %z) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: bu:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edx, %ecx
; X64-NEXT: shrdw %cl, %di, %si
; X64-NEXT: movl %esi, %eax
@@ -90,13 +90,13 @@ define i16 @bu(i16 %x, i16 %y, i16 %z) nounwind {
define i16 @xfoo(i16 %x, i16 %y, i16 %z) nounwind {
; X32-LABEL: xfoo:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: rolw $5, %ax
; X32-NEXT: retl
;
; X64-LABEL: xfoo:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: rolw $5, %di
; X64-NEXT: movl %edi, %eax
; X64-NEXT: retq
@@ -108,14 +108,14 @@ define i16 @xfoo(i16 %x, i16 %y, i16 %z) nounwind {
define i16 @xbar(i16 %x, i16 %y, i16 %z) nounwind {
; X32-LABEL: xbar:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: shldw $5, %cx, %ax
; X32-NEXT: retl
;
; X64-LABEL: xbar:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: shldw $5, %di, %si
; X64-NEXT: movl %esi, %eax
; X64-NEXT: retq
@@ -127,13 +127,13 @@ define i16 @xbar(i16 %x, i16 %y, i16 %z) nounwind {
define i16 @xun(i16 %x, i16 %y, i16 %z) nounwind {
; X32-LABEL: xun:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: rolw $11, %ax
; X32-NEXT: retl
;
; X64-LABEL: xun:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: rolw $11, %di
; X64-NEXT: movl %edi, %eax
; X64-NEXT: retq
@@ -145,14 +145,14 @@ define i16 @xun(i16 %x, i16 %y, i16 %z) nounwind {
define i16 @xbu(i16 %x, i16 %y, i16 %z) nounwind {
; X32-LABEL: xbu:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: shldw $11, %cx, %ax
; X32-NEXT: retl
;
; X64-LABEL: xbu:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: shldw $11, %si, %di
; X64-NEXT: movl %edi, %eax
; X64-NEXT: retq
diff --git a/test/CodeGen/X86/rot32.ll b/test/CodeGen/X86/rot32.ll
index 305defeeea7..bd5329168c5 100644
--- a/test/CodeGen/X86/rot32.ll
+++ b/test/CodeGen/X86/rot32.ll
@@ -5,7 +5,7 @@
define i32 @foo(i32 %x, i32 %y, i32 %z) nounwind readnone {
; ALL-LABEL: foo:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: movb {{[0-9]+}}(%esp), %cl
; ALL-NEXT: movl {{[0-9]+}}(%esp), %eax
; ALL-NEXT: roll %cl, %eax
@@ -20,7 +20,7 @@ entry:
define i32 @bar(i32 %x, i32 %y, i32 %z) nounwind readnone {
; ALL-LABEL: bar:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: movb {{[0-9]+}}(%esp), %cl
; ALL-NEXT: movl {{[0-9]+}}(%esp), %edx
; ALL-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -36,7 +36,7 @@ entry:
define i32 @un(i32 %x, i32 %y, i32 %z) nounwind readnone {
; ALL-LABEL: un:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: movb {{[0-9]+}}(%esp), %cl
; ALL-NEXT: movl {{[0-9]+}}(%esp), %eax
; ALL-NEXT: rorl %cl, %eax
@@ -51,7 +51,7 @@ entry:
define i32 @bu(i32 %x, i32 %y, i32 %z) nounwind readnone {
; ALL-LABEL: bu:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: movb {{[0-9]+}}(%esp), %cl
; ALL-NEXT: movl {{[0-9]+}}(%esp), %edx
; ALL-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -67,19 +67,19 @@ entry:
define i32 @xfoo(i32 %x, i32 %y, i32 %z) nounwind readnone {
; X86-LABEL: xfoo:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: roll $7, %eax
; X86-NEXT: retl
;
; SHLD-LABEL: xfoo:
-; SHLD: # BB#0: # %entry
+; SHLD: # %bb.0: # %entry
; SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
; SHLD-NEXT: shldl $7, %eax, %eax
; SHLD-NEXT: retl
;
; BMI2-LABEL: xfoo:
-; BMI2: # BB#0: # %entry
+; BMI2: # %bb.0: # %entry
; BMI2-NEXT: rorxl $25, {{[0-9]+}}(%esp), %eax
; BMI2-NEXT: retl
entry:
@@ -91,21 +91,21 @@ entry:
define i32 @xfoop(i32* %p) nounwind readnone {
; X86-LABEL: xfoop:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl (%eax), %eax
; X86-NEXT: roll $7, %eax
; X86-NEXT: retl
;
; SHLD-LABEL: xfoop:
-; SHLD: # BB#0: # %entry
+; SHLD: # %bb.0: # %entry
; SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
; SHLD-NEXT: movl (%eax), %eax
; SHLD-NEXT: shldl $7, %eax, %eax
; SHLD-NEXT: retl
;
; BMI2-LABEL: xfoop:
-; BMI2: # BB#0: # %entry
+; BMI2: # %bb.0: # %entry
; BMI2-NEXT: movl {{[0-9]+}}(%esp), %eax
; BMI2-NEXT: rorxl $25, (%eax), %eax
; BMI2-NEXT: retl
@@ -119,7 +119,7 @@ entry:
define i32 @xbar(i32 %x, i32 %y, i32 %z) nounwind readnone {
; ALL-LABEL: xbar:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: movl {{[0-9]+}}(%esp), %ecx
; ALL-NEXT: movl {{[0-9]+}}(%esp), %eax
; ALL-NEXT: shldl $7, %ecx, %eax
@@ -133,19 +133,19 @@ entry:
define i32 @xun(i32 %x, i32 %y, i32 %z) nounwind readnone {
; X86-LABEL: xun:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: roll $25, %eax
; X86-NEXT: retl
;
; SHLD-LABEL: xun:
-; SHLD: # BB#0: # %entry
+; SHLD: # %bb.0: # %entry
; SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
; SHLD-NEXT: shldl $25, %eax, %eax
; SHLD-NEXT: retl
;
; BMI2-LABEL: xun:
-; BMI2: # BB#0: # %entry
+; BMI2: # %bb.0: # %entry
; BMI2-NEXT: rorxl $7, {{[0-9]+}}(%esp), %eax
; BMI2-NEXT: retl
entry:
@@ -157,21 +157,21 @@ entry:
define i32 @xunp(i32* %p) nounwind readnone {
; X86-LABEL: xunp:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl (%eax), %eax
; X86-NEXT: roll $25, %eax
; X86-NEXT: retl
;
; SHLD-LABEL: xunp:
-; SHLD: # BB#0: # %entry
+; SHLD: # %bb.0: # %entry
; SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
; SHLD-NEXT: movl (%eax), %eax
; SHLD-NEXT: shldl $25, %eax, %eax
; SHLD-NEXT: retl
;
; BMI2-LABEL: xunp:
-; BMI2: # BB#0: # %entry
+; BMI2: # %bb.0: # %entry
; BMI2-NEXT: movl {{[0-9]+}}(%esp), %eax
; BMI2-NEXT: rorxl $7, (%eax), %eax
; BMI2-NEXT: retl
@@ -187,7 +187,7 @@ entry:
define i32 @xbu(i32 %x, i32 %y, i32 %z) nounwind readnone {
; ALL-LABEL: xbu:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: movl {{[0-9]+}}(%esp), %ecx
; ALL-NEXT: movl {{[0-9]+}}(%esp), %eax
; ALL-NEXT: shldl $25, %ecx, %eax
diff --git a/test/CodeGen/X86/rot64.ll b/test/CodeGen/X86/rot64.ll
index b2e7d481d8c..e8f090cff99 100644
--- a/test/CodeGen/X86/rot64.ll
+++ b/test/CodeGen/X86/rot64.ll
@@ -5,7 +5,7 @@
define i64 @foo(i64 %x, i64 %y, i64 %z) nounwind readnone {
; ALL-LABEL: foo:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: movl %edx, %ecx
; ALL-NEXT: rolq %cl, %rdi
; ALL-NEXT: movq %rdi, %rax
@@ -20,7 +20,7 @@ entry:
define i64 @bar(i64 %x, i64 %y, i64 %z) nounwind readnone {
; ALL-LABEL: bar:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: movl %edx, %ecx
; ALL-NEXT: shldq %cl, %rdi, %rsi
; ALL-NEXT: movq %rsi, %rax
@@ -35,7 +35,7 @@ entry:
define i64 @un(i64 %x, i64 %y, i64 %z) nounwind readnone {
; ALL-LABEL: un:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: movl %edx, %ecx
; ALL-NEXT: rorq %cl, %rdi
; ALL-NEXT: movq %rdi, %rax
@@ -50,7 +50,7 @@ entry:
define i64 @bu(i64 %x, i64 %y, i64 %z) nounwind readnone {
; ALL-LABEL: bu:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: movl %edx, %ecx
; ALL-NEXT: shrdq %cl, %rdi, %rsi
; ALL-NEXT: movq %rsi, %rax
@@ -65,19 +65,19 @@ entry:
define i64 @xfoo(i64 %x, i64 %y, i64 %z) nounwind readnone {
; X64-LABEL: xfoo:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: rolq $7, %rdi
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: retq
;
; SHLD-LABEL: xfoo:
-; SHLD: # BB#0: # %entry
+; SHLD: # %bb.0: # %entry
; SHLD-NEXT: shldq $7, %rdi, %rdi
; SHLD-NEXT: movq %rdi, %rax
; SHLD-NEXT: retq
;
; BMI2-LABEL: xfoo:
-; BMI2: # BB#0: # %entry
+; BMI2: # %bb.0: # %entry
; BMI2-NEXT: rorxq $57, %rdi, %rax
; BMI2-NEXT: retq
entry:
@@ -89,19 +89,19 @@ entry:
define i64 @xfoop(i64* %p) nounwind readnone {
; X64-LABEL: xfoop:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq (%rdi), %rax
; X64-NEXT: rolq $7, %rax
; X64-NEXT: retq
;
; SHLD-LABEL: xfoop:
-; SHLD: # BB#0: # %entry
+; SHLD: # %bb.0: # %entry
; SHLD-NEXT: movq (%rdi), %rax
; SHLD-NEXT: shldq $7, %rax, %rax
; SHLD-NEXT: retq
;
; BMI2-LABEL: xfoop:
-; BMI2: # BB#0: # %entry
+; BMI2: # %bb.0: # %entry
; BMI2-NEXT: rorxq $57, (%rdi), %rax
; BMI2-NEXT: retq
entry:
@@ -114,7 +114,7 @@ entry:
define i64 @xbar(i64 %x, i64 %y, i64 %z) nounwind readnone {
; ALL-LABEL: xbar:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: shrdq $57, %rsi, %rdi
; ALL-NEXT: movq %rdi, %rax
; ALL-NEXT: retq
@@ -127,19 +127,19 @@ entry:
define i64 @xun(i64 %x, i64 %y, i64 %z) nounwind readnone {
; X64-LABEL: xun:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: rolq $57, %rdi
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: retq
;
; SHLD-LABEL: xun:
-; SHLD: # BB#0: # %entry
+; SHLD: # %bb.0: # %entry
; SHLD-NEXT: shldq $57, %rdi, %rdi
; SHLD-NEXT: movq %rdi, %rax
; SHLD-NEXT: retq
;
; BMI2-LABEL: xun:
-; BMI2: # BB#0: # %entry
+; BMI2: # %bb.0: # %entry
; BMI2-NEXT: rorxq $7, %rdi, %rax
; BMI2-NEXT: retq
entry:
@@ -151,19 +151,19 @@ entry:
define i64 @xunp(i64* %p) nounwind readnone {
; X64-LABEL: xunp:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq (%rdi), %rax
; X64-NEXT: rolq $57, %rax
; X64-NEXT: retq
;
; SHLD-LABEL: xunp:
-; SHLD: # BB#0: # %entry
+; SHLD: # %bb.0: # %entry
; SHLD-NEXT: movq (%rdi), %rax
; SHLD-NEXT: shldq $57, %rax, %rax
; SHLD-NEXT: retq
;
; BMI2-LABEL: xunp:
-; BMI2: # BB#0: # %entry
+; BMI2: # %bb.0: # %entry
; BMI2-NEXT: rorxq $7, (%rdi), %rax
; BMI2-NEXT: retq
entry:
@@ -176,7 +176,7 @@ entry:
define i64 @xbu(i64 %x, i64 %y, i64 %z) nounwind readnone {
; ALL-LABEL: xbu:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: shldq $57, %rsi, %rdi
; ALL-NEXT: movq %rdi, %rax
; ALL-NEXT: retq
diff --git a/test/CodeGen/X86/rotate.ll b/test/CodeGen/X86/rotate.ll
index 4be3a4c2391..6b6c9f0dec3 100644
--- a/test/CodeGen/X86/rotate.ll
+++ b/test/CodeGen/X86/rotate.ll
@@ -4,7 +4,7 @@
define i64 @rotl64(i64 %A, i8 %Amt) nounwind {
; 32-LABEL: rotl64:
-; 32: # BB#0:
+; 32: # %bb.0:
; 32-NEXT: pushl %ebx
; 32-NEXT: pushl %edi
; 32-NEXT: pushl %esi
@@ -17,7 +17,7 @@ define i64 @rotl64(i64 %A, i8 %Amt) nounwind {
; 32-NEXT: shldl %cl, %esi, %edx
; 32-NEXT: testb $32, %cl
; 32-NEXT: je .LBB0_2
-; 32-NEXT: # BB#1:
+; 32-NEXT: # %bb.1:
; 32-NEXT: movl %eax, %edx
; 32-NEXT: xorl %eax, %eax
; 32-NEXT: .LBB0_2:
@@ -29,7 +29,7 @@ define i64 @rotl64(i64 %A, i8 %Amt) nounwind {
; 32-NEXT: shrdl %cl, %edi, %esi
; 32-NEXT: testb $32, %ch
; 32-NEXT: je .LBB0_4
-; 32-NEXT: # BB#3:
+; 32-NEXT: # %bb.3:
; 32-NEXT: movl %ebx, %esi
; 32-NEXT: xorl %ebx, %ebx
; 32-NEXT: .LBB0_4:
@@ -41,7 +41,7 @@ define i64 @rotl64(i64 %A, i8 %Amt) nounwind {
; 32-NEXT: retl
;
; 64-LABEL: rotl64:
-; 64: # BB#0:
+; 64: # %bb.0:
; 64-NEXT: movl %esi, %ecx
; 64-NEXT: rolq %cl, %rdi
; 64-NEXT: movq %rdi, %rax
@@ -57,7 +57,7 @@ define i64 @rotl64(i64 %A, i8 %Amt) nounwind {
define i64 @rotr64(i64 %A, i8 %Amt) nounwind {
; 32-LABEL: rotr64:
-; 32: # BB#0:
+; 32: # %bb.0:
; 32-NEXT: pushl %ebx
; 32-NEXT: pushl %edi
; 32-NEXT: pushl %esi
@@ -70,7 +70,7 @@ define i64 @rotr64(i64 %A, i8 %Amt) nounwind {
; 32-NEXT: shrdl %cl, %esi, %eax
; 32-NEXT: testb $32, %cl
; 32-NEXT: je .LBB1_2
-; 32-NEXT: # BB#1:
+; 32-NEXT: # %bb.1:
; 32-NEXT: movl %edx, %eax
; 32-NEXT: xorl %edx, %edx
; 32-NEXT: .LBB1_2:
@@ -82,7 +82,7 @@ define i64 @rotr64(i64 %A, i8 %Amt) nounwind {
; 32-NEXT: shldl %cl, %edi, %esi
; 32-NEXT: testb $32, %ch
; 32-NEXT: je .LBB1_4
-; 32-NEXT: # BB#3:
+; 32-NEXT: # %bb.3:
; 32-NEXT: movl %ebx, %esi
; 32-NEXT: xorl %ebx, %ebx
; 32-NEXT: .LBB1_4:
@@ -94,7 +94,7 @@ define i64 @rotr64(i64 %A, i8 %Amt) nounwind {
; 32-NEXT: retl
;
; 64-LABEL: rotr64:
-; 64: # BB#0:
+; 64: # %bb.0:
; 64-NEXT: movl %esi, %ecx
; 64-NEXT: rorq %cl, %rdi
; 64-NEXT: movq %rdi, %rax
@@ -110,7 +110,7 @@ define i64 @rotr64(i64 %A, i8 %Amt) nounwind {
define i64 @rotli64(i64 %A) nounwind {
; 32-LABEL: rotli64:
-; 32: # BB#0:
+; 32: # %bb.0:
; 32-NEXT: movl {{[0-9]+}}(%esp), %eax
; 32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; 32-NEXT: movl %ecx, %edx
@@ -119,7 +119,7 @@ define i64 @rotli64(i64 %A) nounwind {
; 32-NEXT: retl
;
; 64-LABEL: rotli64:
-; 64: # BB#0:
+; 64: # %bb.0:
; 64-NEXT: rolq $5, %rdi
; 64-NEXT: movq %rdi, %rax
; 64-NEXT: retq
@@ -131,7 +131,7 @@ define i64 @rotli64(i64 %A) nounwind {
define i64 @rotri64(i64 %A) nounwind {
; 32-LABEL: rotri64:
-; 32: # BB#0:
+; 32: # %bb.0:
; 32-NEXT: movl {{[0-9]+}}(%esp), %edx
; 32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; 32-NEXT: movl %ecx, %eax
@@ -140,7 +140,7 @@ define i64 @rotri64(i64 %A) nounwind {
; 32-NEXT: retl
;
; 64-LABEL: rotri64:
-; 64: # BB#0:
+; 64: # %bb.0:
; 64-NEXT: rolq $59, %rdi
; 64-NEXT: movq %rdi, %rax
; 64-NEXT: retq
@@ -152,7 +152,7 @@ define i64 @rotri64(i64 %A) nounwind {
define i64 @rotl1_64(i64 %A) nounwind {
; 32-LABEL: rotl1_64:
-; 32: # BB#0:
+; 32: # %bb.0:
; 32-NEXT: movl {{[0-9]+}}(%esp), %eax
; 32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; 32-NEXT: movl %ecx, %edx
@@ -161,7 +161,7 @@ define i64 @rotl1_64(i64 %A) nounwind {
; 32-NEXT: retl
;
; 64-LABEL: rotl1_64:
-; 64: # BB#0:
+; 64: # %bb.0:
; 64-NEXT: rolq %rdi
; 64-NEXT: movq %rdi, %rax
; 64-NEXT: retq
@@ -173,7 +173,7 @@ define i64 @rotl1_64(i64 %A) nounwind {
define i64 @rotr1_64(i64 %A) nounwind {
; 32-LABEL: rotr1_64:
-; 32: # BB#0:
+; 32: # %bb.0:
; 32-NEXT: movl {{[0-9]+}}(%esp), %edx
; 32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; 32-NEXT: movl %ecx, %eax
@@ -182,7 +182,7 @@ define i64 @rotr1_64(i64 %A) nounwind {
; 32-NEXT: retl
;
; 64-LABEL: rotr1_64:
-; 64: # BB#0:
+; 64: # %bb.0:
; 64-NEXT: rorq %rdi
; 64-NEXT: movq %rdi, %rax
; 64-NEXT: retq
@@ -194,14 +194,14 @@ define i64 @rotr1_64(i64 %A) nounwind {
define i32 @rotl32(i32 %A, i8 %Amt) nounwind {
; 32-LABEL: rotl32:
-; 32: # BB#0:
+; 32: # %bb.0:
; 32-NEXT: movb {{[0-9]+}}(%esp), %cl
; 32-NEXT: movl {{[0-9]+}}(%esp), %eax
; 32-NEXT: roll %cl, %eax
; 32-NEXT: retl
;
; 64-LABEL: rotl32:
-; 64: # BB#0:
+; 64: # %bb.0:
; 64-NEXT: movl %esi, %ecx
; 64-NEXT: roll %cl, %edi
; 64-NEXT: movl %edi, %eax
@@ -217,14 +217,14 @@ define i32 @rotl32(i32 %A, i8 %Amt) nounwind {
define i32 @rotr32(i32 %A, i8 %Amt) nounwind {
; 32-LABEL: rotr32:
-; 32: # BB#0:
+; 32: # %bb.0:
; 32-NEXT: movb {{[0-9]+}}(%esp), %cl
; 32-NEXT: movl {{[0-9]+}}(%esp), %eax
; 32-NEXT: rorl %cl, %eax
; 32-NEXT: retl
;
; 64-LABEL: rotr32:
-; 64: # BB#0:
+; 64: # %bb.0:
; 64-NEXT: movl %esi, %ecx
; 64-NEXT: rorl %cl, %edi
; 64-NEXT: movl %edi, %eax
@@ -240,13 +240,13 @@ define i32 @rotr32(i32 %A, i8 %Amt) nounwind {
define i32 @rotli32(i32 %A) nounwind {
; 32-LABEL: rotli32:
-; 32: # BB#0:
+; 32: # %bb.0:
; 32-NEXT: movl {{[0-9]+}}(%esp), %eax
; 32-NEXT: roll $5, %eax
; 32-NEXT: retl
;
; 64-LABEL: rotli32:
-; 64: # BB#0:
+; 64: # %bb.0:
; 64-NEXT: roll $5, %edi
; 64-NEXT: movl %edi, %eax
; 64-NEXT: retq
@@ -258,13 +258,13 @@ define i32 @rotli32(i32 %A) nounwind {
define i32 @rotri32(i32 %A) nounwind {
; 32-LABEL: rotri32:
-; 32: # BB#0:
+; 32: # %bb.0:
; 32-NEXT: movl {{[0-9]+}}(%esp), %eax
; 32-NEXT: roll $27, %eax
; 32-NEXT: retl
;
; 64-LABEL: rotri32:
-; 64: # BB#0:
+; 64: # %bb.0:
; 64-NEXT: roll $27, %edi
; 64-NEXT: movl %edi, %eax
; 64-NEXT: retq
@@ -276,13 +276,13 @@ define i32 @rotri32(i32 %A) nounwind {
define i32 @rotl1_32(i32 %A) nounwind {
; 32-LABEL: rotl1_32:
-; 32: # BB#0:
+; 32: # %bb.0:
; 32-NEXT: movl {{[0-9]+}}(%esp), %eax
; 32-NEXT: roll %eax
; 32-NEXT: retl
;
; 64-LABEL: rotl1_32:
-; 64: # BB#0:
+; 64: # %bb.0:
; 64-NEXT: roll %edi
; 64-NEXT: movl %edi, %eax
; 64-NEXT: retq
@@ -294,13 +294,13 @@ define i32 @rotl1_32(i32 %A) nounwind {
define i32 @rotr1_32(i32 %A) nounwind {
; 32-LABEL: rotr1_32:
-; 32: # BB#0:
+; 32: # %bb.0:
; 32-NEXT: movl {{[0-9]+}}(%esp), %eax
; 32-NEXT: rorl %eax
; 32-NEXT: retl
;
; 64-LABEL: rotr1_32:
-; 64: # BB#0:
+; 64: # %bb.0:
; 64-NEXT: rorl %edi
; 64-NEXT: movl %edi, %eax
; 64-NEXT: retq
@@ -312,14 +312,14 @@ define i32 @rotr1_32(i32 %A) nounwind {
define i16 @rotl16(i16 %A, i8 %Amt) nounwind {
; 32-LABEL: rotl16:
-; 32: # BB#0:
+; 32: # %bb.0:
; 32-NEXT: movb {{[0-9]+}}(%esp), %cl
; 32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; 32-NEXT: rolw %cl, %ax
; 32-NEXT: retl
;
; 64-LABEL: rotl16:
-; 64: # BB#0:
+; 64: # %bb.0:
; 64-NEXT: movl %esi, %ecx
; 64-NEXT: rolw %cl, %di
; 64-NEXT: movl %edi, %eax
@@ -335,14 +335,14 @@ define i16 @rotl16(i16 %A, i8 %Amt) nounwind {
define i16 @rotr16(i16 %A, i8 %Amt) nounwind {
; 32-LABEL: rotr16:
-; 32: # BB#0:
+; 32: # %bb.0:
; 32-NEXT: movb {{[0-9]+}}(%esp), %cl
; 32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; 32-NEXT: rorw %cl, %ax
; 32-NEXT: retl
;
; 64-LABEL: rotr16:
-; 64: # BB#0:
+; 64: # %bb.0:
; 64-NEXT: movl %esi, %ecx
; 64-NEXT: rorw %cl, %di
; 64-NEXT: movl %edi, %eax
@@ -358,13 +358,13 @@ define i16 @rotr16(i16 %A, i8 %Amt) nounwind {
define i16 @rotli16(i16 %A) nounwind {
; 32-LABEL: rotli16:
-; 32: # BB#0:
+; 32: # %bb.0:
; 32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; 32-NEXT: rolw $5, %ax
; 32-NEXT: retl
;
; 64-LABEL: rotli16:
-; 64: # BB#0:
+; 64: # %bb.0:
; 64-NEXT: rolw $5, %di
; 64-NEXT: movl %edi, %eax
; 64-NEXT: retq
@@ -376,13 +376,13 @@ define i16 @rotli16(i16 %A) nounwind {
define i16 @rotri16(i16 %A) nounwind {
; 32-LABEL: rotri16:
-; 32: # BB#0:
+; 32: # %bb.0:
; 32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; 32-NEXT: rolw $11, %ax
; 32-NEXT: retl
;
; 64-LABEL: rotri16:
-; 64: # BB#0:
+; 64: # %bb.0:
; 64-NEXT: rolw $11, %di
; 64-NEXT: movl %edi, %eax
; 64-NEXT: retq
@@ -394,13 +394,13 @@ define i16 @rotri16(i16 %A) nounwind {
define i16 @rotl1_16(i16 %A) nounwind {
; 32-LABEL: rotl1_16:
-; 32: # BB#0:
+; 32: # %bb.0:
; 32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; 32-NEXT: rolw %ax
; 32-NEXT: retl
;
; 64-LABEL: rotl1_16:
-; 64: # BB#0:
+; 64: # %bb.0:
; 64-NEXT: rolw %di
; 64-NEXT: movl %edi, %eax
; 64-NEXT: retq
@@ -412,13 +412,13 @@ define i16 @rotl1_16(i16 %A) nounwind {
define i16 @rotr1_16(i16 %A) nounwind {
; 32-LABEL: rotr1_16:
-; 32: # BB#0:
+; 32: # %bb.0:
; 32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; 32-NEXT: rorw %ax
; 32-NEXT: retl
;
; 64-LABEL: rotr1_16:
-; 64: # BB#0:
+; 64: # %bb.0:
; 64-NEXT: rorw %di
; 64-NEXT: movl %edi, %eax
; 64-NEXT: retq
@@ -430,14 +430,14 @@ define i16 @rotr1_16(i16 %A) nounwind {
define i8 @rotl8(i8 %A, i8 %Amt) nounwind {
; 32-LABEL: rotl8:
-; 32: # BB#0:
+; 32: # %bb.0:
; 32-NEXT: movb {{[0-9]+}}(%esp), %cl
; 32-NEXT: movb {{[0-9]+}}(%esp), %al
; 32-NEXT: rolb %cl, %al
; 32-NEXT: retl
;
; 64-LABEL: rotl8:
-; 64: # BB#0:
+; 64: # %bb.0:
; 64-NEXT: movl %esi, %ecx
; 64-NEXT: rolb %cl, %dil
; 64-NEXT: movl %edi, %eax
@@ -451,14 +451,14 @@ define i8 @rotl8(i8 %A, i8 %Amt) nounwind {
define i8 @rotr8(i8 %A, i8 %Amt) nounwind {
; 32-LABEL: rotr8:
-; 32: # BB#0:
+; 32: # %bb.0:
; 32-NEXT: movb {{[0-9]+}}(%esp), %cl
; 32-NEXT: movb {{[0-9]+}}(%esp), %al
; 32-NEXT: rorb %cl, %al
; 32-NEXT: retl
;
; 64-LABEL: rotr8:
-; 64: # BB#0:
+; 64: # %bb.0:
; 64-NEXT: movl %esi, %ecx
; 64-NEXT: rorb %cl, %dil
; 64-NEXT: movl %edi, %eax
@@ -472,13 +472,13 @@ define i8 @rotr8(i8 %A, i8 %Amt) nounwind {
define i8 @rotli8(i8 %A) nounwind {
; 32-LABEL: rotli8:
-; 32: # BB#0:
+; 32: # %bb.0:
; 32-NEXT: movb {{[0-9]+}}(%esp), %al
; 32-NEXT: rolb $5, %al
; 32-NEXT: retl
;
; 64-LABEL: rotli8:
-; 64: # BB#0:
+; 64: # %bb.0:
; 64-NEXT: rolb $5, %dil
; 64-NEXT: movl %edi, %eax
; 64-NEXT: retq
@@ -490,13 +490,13 @@ define i8 @rotli8(i8 %A) nounwind {
define i8 @rotri8(i8 %A) nounwind {
; 32-LABEL: rotri8:
-; 32: # BB#0:
+; 32: # %bb.0:
; 32-NEXT: movb {{[0-9]+}}(%esp), %al
; 32-NEXT: rolb $3, %al
; 32-NEXT: retl
;
; 64-LABEL: rotri8:
-; 64: # BB#0:
+; 64: # %bb.0:
; 64-NEXT: rolb $3, %dil
; 64-NEXT: movl %edi, %eax
; 64-NEXT: retq
@@ -508,13 +508,13 @@ define i8 @rotri8(i8 %A) nounwind {
define i8 @rotl1_8(i8 %A) nounwind {
; 32-LABEL: rotl1_8:
-; 32: # BB#0:
+; 32: # %bb.0:
; 32-NEXT: movb {{[0-9]+}}(%esp), %al
; 32-NEXT: rolb %al
; 32-NEXT: retl
;
; 64-LABEL: rotl1_8:
-; 64: # BB#0:
+; 64: # %bb.0:
; 64-NEXT: rolb %dil
; 64-NEXT: movl %edi, %eax
; 64-NEXT: retq
@@ -526,13 +526,13 @@ define i8 @rotl1_8(i8 %A) nounwind {
define i8 @rotr1_8(i8 %A) nounwind {
; 32-LABEL: rotr1_8:
-; 32: # BB#0:
+; 32: # %bb.0:
; 32-NEXT: movb {{[0-9]+}}(%esp), %al
; 32-NEXT: rorb %al
; 32-NEXT: retl
;
; 64-LABEL: rotr1_8:
-; 64: # BB#0:
+; 64: # %bb.0:
; 64-NEXT: rorb %dil
; 64-NEXT: movl %edi, %eax
; 64-NEXT: retq
@@ -544,7 +544,7 @@ define i8 @rotr1_8(i8 %A) nounwind {
define void @rotr1_64_mem(i64* %Aptr) nounwind {
; 32-LABEL: rotr1_64_mem:
-; 32: # BB#0:
+; 32: # %bb.0:
; 32-NEXT: pushl %esi
; 32-NEXT: movl {{[0-9]+}}(%esp), %eax
; 32-NEXT: movl (%eax), %ecx
@@ -558,7 +558,7 @@ define void @rotr1_64_mem(i64* %Aptr) nounwind {
; 32-NEXT: retl
;
; 64-LABEL: rotr1_64_mem:
-; 64: # BB#0:
+; 64: # %bb.0:
; 64-NEXT: rorq (%rdi)
; 64-NEXT: retq
@@ -572,13 +572,13 @@ define void @rotr1_64_mem(i64* %Aptr) nounwind {
define void @rotr1_32_mem(i32* %Aptr) nounwind {
; 32-LABEL: rotr1_32_mem:
-; 32: # BB#0:
+; 32: # %bb.0:
; 32-NEXT: movl {{[0-9]+}}(%esp), %eax
; 32-NEXT: rorl (%eax)
; 32-NEXT: retl
;
; 64-LABEL: rotr1_32_mem:
-; 64: # BB#0:
+; 64: # %bb.0:
; 64-NEXT: rorl (%rdi)
; 64-NEXT: retq
%A = load i32, i32 *%Aptr
@@ -591,13 +591,13 @@ define void @rotr1_32_mem(i32* %Aptr) nounwind {
define void @rotr1_16_mem(i16* %Aptr) nounwind {
; 32-LABEL: rotr1_16_mem:
-; 32: # BB#0:
+; 32: # %bb.0:
; 32-NEXT: movl {{[0-9]+}}(%esp), %eax
; 32-NEXT: rorw (%eax)
; 32-NEXT: retl
;
; 64-LABEL: rotr1_16_mem:
-; 64: # BB#0:
+; 64: # %bb.0:
; 64-NEXT: rorw (%rdi)
; 64-NEXT: retq
%A = load i16, i16 *%Aptr
@@ -610,13 +610,13 @@ define void @rotr1_16_mem(i16* %Aptr) nounwind {
define void @rotr1_8_mem(i8* %Aptr) nounwind {
; 32-LABEL: rotr1_8_mem:
-; 32: # BB#0:
+; 32: # %bb.0:
; 32-NEXT: movl {{[0-9]+}}(%esp), %eax
; 32-NEXT: rorb (%eax)
; 32-NEXT: retl
;
; 64-LABEL: rotr1_8_mem:
-; 64: # BB#0:
+; 64: # %bb.0:
; 64-NEXT: rorb (%rdi)
; 64-NEXT: retq
%A = load i8, i8 *%Aptr
diff --git a/test/CodeGen/X86/rotate4.ll b/test/CodeGen/X86/rotate4.ll
index 242aaff441c..79822999dca 100644
--- a/test/CodeGen/X86/rotate4.ll
+++ b/test/CodeGen/X86/rotate4.ll
@@ -6,7 +6,7 @@
define i32 @rotate_left_32(i32 %a, i32 %b) {
; CHECK-LABEL: rotate_left_32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %esi, %ecx
; CHECK-NEXT: roll %cl, %edi
; CHECK-NEXT: movl %edi, %eax
@@ -22,7 +22,7 @@ define i32 @rotate_left_32(i32 %a, i32 %b) {
define i32 @rotate_right_32(i32 %a, i32 %b) {
; CHECK-LABEL: rotate_right_32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %esi, %ecx
; CHECK-NEXT: rorl %cl, %edi
; CHECK-NEXT: movl %edi, %eax
@@ -38,7 +38,7 @@ define i32 @rotate_right_32(i32 %a, i32 %b) {
define i64 @rotate_left_64(i64 %a, i64 %b) {
; CHECK-LABEL: rotate_left_64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %esi, %ecx
; CHECK-NEXT: rolq %cl, %rdi
; CHECK-NEXT: movq %rdi, %rax
@@ -54,7 +54,7 @@ define i64 @rotate_left_64(i64 %a, i64 %b) {
define i64 @rotate_right_64(i64 %a, i64 %b) {
; CHECK-LABEL: rotate_right_64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %esi, %ecx
; CHECK-NEXT: rorq %cl, %rdi
; CHECK-NEXT: movq %rdi, %rax
@@ -72,7 +72,7 @@ define i64 @rotate_right_64(i64 %a, i64 %b) {
define void @rotate_left_m32(i32 *%pa, i32 %b) {
; CHECK-LABEL: rotate_left_m32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %esi, %ecx
; CHECK-NEXT: roll %cl, (%rdi)
; CHECK-NEXT: retq
@@ -89,7 +89,7 @@ define void @rotate_left_m32(i32 *%pa, i32 %b) {
define void @rotate_right_m32(i32 *%pa, i32 %b) {
; CHECK-LABEL: rotate_right_m32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %esi, %ecx
; CHECK-NEXT: rorl %cl, (%rdi)
; CHECK-NEXT: retq
@@ -106,7 +106,7 @@ define void @rotate_right_m32(i32 *%pa, i32 %b) {
define void @rotate_left_m64(i64 *%pa, i64 %b) {
; CHECK-LABEL: rotate_left_m64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %esi, %ecx
; CHECK-NEXT: rolq %cl, (%rdi)
; CHECK-NEXT: retq
@@ -123,7 +123,7 @@ define void @rotate_left_m64(i64 *%pa, i64 %b) {
define void @rotate_right_m64(i64 *%pa, i64 %b) {
; CHECK-LABEL: rotate_right_m64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %esi, %ecx
; CHECK-NEXT: rorq %cl, (%rdi)
; CHECK-NEXT: retq
@@ -143,7 +143,7 @@ define void @rotate_right_m64(i64 *%pa, i64 %b) {
define i8 @rotate_left_8(i8 %x, i32 %amount) {
; CHECK-LABEL: rotate_left_8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %esi, %ecx
; CHECK-NEXT: rolb %cl, %dil
; CHECK-NEXT: movl %edi, %eax
@@ -160,7 +160,7 @@ define i8 @rotate_left_8(i8 %x, i32 %amount) {
define i8 @rotate_right_8(i8 %x, i32 %amount) {
; CHECK-LABEL: rotate_right_8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %esi, %ecx
; CHECK-NEXT: rorb %cl, %dil
; CHECK-NEXT: movl %edi, %eax
@@ -177,7 +177,7 @@ define i8 @rotate_right_8(i8 %x, i32 %amount) {
define i16 @rotate_left_16(i16 %x, i32 %amount) {
; CHECK-LABEL: rotate_left_16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %esi, %ecx
; CHECK-NEXT: rolw %cl, %di
; CHECK-NEXT: movl %edi, %eax
@@ -194,7 +194,7 @@ define i16 @rotate_left_16(i16 %x, i32 %amount) {
define i16 @rotate_right_16(i16 %x, i32 %amount) {
; CHECK-LABEL: rotate_right_16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %esi, %ecx
; CHECK-NEXT: rorw %cl, %di
; CHECK-NEXT: movl %edi, %eax
@@ -211,7 +211,7 @@ define i16 @rotate_right_16(i16 %x, i32 %amount) {
define void @rotate_left_m8(i8* %p, i32 %amount) {
; CHECK-LABEL: rotate_left_m8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %esi, %ecx
; CHECK-NEXT: rolb %cl, (%rdi)
; CHECK-NEXT: retq
@@ -229,7 +229,7 @@ define void @rotate_left_m8(i8* %p, i32 %amount) {
define void @rotate_right_m8(i8* %p, i32 %amount) {
; CHECK-LABEL: rotate_right_m8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %esi, %ecx
; CHECK-NEXT: rorb %cl, (%rdi)
; CHECK-NEXT: retq
@@ -247,7 +247,7 @@ define void @rotate_right_m8(i8* %p, i32 %amount) {
define void @rotate_left_m16(i16* %p, i32 %amount) {
; CHECK-LABEL: rotate_left_m16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %esi, %ecx
; CHECK-NEXT: rolw %cl, (%rdi)
; CHECK-NEXT: retq
@@ -265,7 +265,7 @@ define void @rotate_left_m16(i16* %p, i32 %amount) {
define void @rotate_right_m16(i16* %p, i32 %amount) {
; CHECK-LABEL: rotate_right_m16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %esi, %ecx
; CHECK-NEXT: rorw %cl, (%rdi)
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/rotate_vec.ll b/test/CodeGen/X86/rotate_vec.ll
index 8fb000bae82..ed0c4717ea8 100644
--- a/test/CodeGen/X86/rotate_vec.ll
+++ b/test/CodeGen/X86/rotate_vec.ll
@@ -3,7 +3,7 @@
define <4 x i32> @rot_v4i32_splat(<4 x i32> %x) {
; CHECK-LABEL: rot_v4i32_splat:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vprotd $31, %xmm0, %xmm0
; CHECK-NEXT: retq
%1 = lshr <4 x i32> %x, <i32 1, i32 1, i32 1, i32 1>
@@ -14,7 +14,7 @@ define <4 x i32> @rot_v4i32_splat(<4 x i32> %x) {
define <4 x i32> @rot_v4i32_non_splat(<4 x i32> %x) {
; CHECK-LABEL: rot_v4i32_non_splat:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vprotd {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: retq
%1 = lshr <4 x i32> %x, <i32 1, i32 2, i32 3, i32 4>
@@ -25,7 +25,7 @@ define <4 x i32> @rot_v4i32_non_splat(<4 x i32> %x) {
define <4 x i32> @rot_v4i32_splat_2masks(<4 x i32> %x) {
; CHECK-LABEL: rot_v4i32_splat_2masks:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vprotd $31, %xmm0, %xmm0
; CHECK-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: retq
@@ -40,7 +40,7 @@ define <4 x i32> @rot_v4i32_splat_2masks(<4 x i32> %x) {
define <4 x i32> @rot_v4i32_non_splat_2masks(<4 x i32> %x) {
; CHECK-LABEL: rot_v4i32_non_splat_2masks:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vprotd {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/rounding-ops.ll b/test/CodeGen/X86/rounding-ops.ll
index eec19410078..6e84635da29 100644
--- a/test/CodeGen/X86/rounding-ops.ll
+++ b/test/CodeGen/X86/rounding-ops.ll
@@ -5,17 +5,17 @@
define float @test1(float %x) nounwind {
; CHECK-SSE-LABEL: test1:
-; CHECK-SSE: ## BB#0:
+; CHECK-SSE: ## %bb.0:
; CHECK-SSE-NEXT: roundss $9, %xmm0, %xmm0
; CHECK-SSE-NEXT: retq
;
; CHECK-AVX-LABEL: test1:
-; CHECK-AVX: ## BB#0:
+; CHECK-AVX: ## %bb.0:
; CHECK-AVX-NEXT: vroundss $9, %xmm0, %xmm0, %xmm0
; CHECK-AVX-NEXT: retq
;
; CHECK-AVX512-LABEL: test1:
-; CHECK-AVX512: ## BB#0:
+; CHECK-AVX512: ## %bb.0:
; CHECK-AVX512-NEXT: vrndscaless $9, %xmm0, %xmm0, %xmm0
; CHECK-AVX512-NEXT: retq
%call = tail call float @floorf(float %x) nounwind readnone
@@ -26,17 +26,17 @@ declare float @floorf(float) nounwind readnone
define double @test2(double %x) nounwind {
; CHECK-SSE-LABEL: test2:
-; CHECK-SSE: ## BB#0:
+; CHECK-SSE: ## %bb.0:
; CHECK-SSE-NEXT: roundsd $9, %xmm0, %xmm0
; CHECK-SSE-NEXT: retq
;
; CHECK-AVX-LABEL: test2:
-; CHECK-AVX: ## BB#0:
+; CHECK-AVX: ## %bb.0:
; CHECK-AVX-NEXT: vroundsd $9, %xmm0, %xmm0, %xmm0
; CHECK-AVX-NEXT: retq
;
; CHECK-AVX512-LABEL: test2:
-; CHECK-AVX512: ## BB#0:
+; CHECK-AVX512: ## %bb.0:
; CHECK-AVX512-NEXT: vrndscalesd $9, %xmm0, %xmm0, %xmm0
; CHECK-AVX512-NEXT: retq
%call = tail call double @floor(double %x) nounwind readnone
@@ -47,17 +47,17 @@ declare double @floor(double) nounwind readnone
define float @test3(float %x) nounwind {
; CHECK-SSE-LABEL: test3:
-; CHECK-SSE: ## BB#0:
+; CHECK-SSE: ## %bb.0:
; CHECK-SSE-NEXT: roundss $12, %xmm0, %xmm0
; CHECK-SSE-NEXT: retq
;
; CHECK-AVX-LABEL: test3:
-; CHECK-AVX: ## BB#0:
+; CHECK-AVX: ## %bb.0:
; CHECK-AVX-NEXT: vroundss $12, %xmm0, %xmm0, %xmm0
; CHECK-AVX-NEXT: retq
;
; CHECK-AVX512-LABEL: test3:
-; CHECK-AVX512: ## BB#0:
+; CHECK-AVX512: ## %bb.0:
; CHECK-AVX512-NEXT: vrndscaless $12, %xmm0, %xmm0, %xmm0
; CHECK-AVX512-NEXT: retq
%call = tail call float @nearbyintf(float %x) nounwind readnone
@@ -68,17 +68,17 @@ declare float @nearbyintf(float) nounwind readnone
define double @test4(double %x) nounwind {
; CHECK-SSE-LABEL: test4:
-; CHECK-SSE: ## BB#0:
+; CHECK-SSE: ## %bb.0:
; CHECK-SSE-NEXT: roundsd $12, %xmm0, %xmm0
; CHECK-SSE-NEXT: retq
;
; CHECK-AVX-LABEL: test4:
-; CHECK-AVX: ## BB#0:
+; CHECK-AVX: ## %bb.0:
; CHECK-AVX-NEXT: vroundsd $12, %xmm0, %xmm0, %xmm0
; CHECK-AVX-NEXT: retq
;
; CHECK-AVX512-LABEL: test4:
-; CHECK-AVX512: ## BB#0:
+; CHECK-AVX512: ## %bb.0:
; CHECK-AVX512-NEXT: vrndscalesd $12, %xmm0, %xmm0, %xmm0
; CHECK-AVX512-NEXT: retq
%call = tail call double @nearbyint(double %x) nounwind readnone
@@ -89,17 +89,17 @@ declare double @nearbyint(double) nounwind readnone
define float @test5(float %x) nounwind {
; CHECK-SSE-LABEL: test5:
-; CHECK-SSE: ## BB#0:
+; CHECK-SSE: ## %bb.0:
; CHECK-SSE-NEXT: roundss $10, %xmm0, %xmm0
; CHECK-SSE-NEXT: retq
;
; CHECK-AVX-LABEL: test5:
-; CHECK-AVX: ## BB#0:
+; CHECK-AVX: ## %bb.0:
; CHECK-AVX-NEXT: vroundss $10, %xmm0, %xmm0, %xmm0
; CHECK-AVX-NEXT: retq
;
; CHECK-AVX512-LABEL: test5:
-; CHECK-AVX512: ## BB#0:
+; CHECK-AVX512: ## %bb.0:
; CHECK-AVX512-NEXT: vrndscaless $10, %xmm0, %xmm0, %xmm0
; CHECK-AVX512-NEXT: retq
%call = tail call float @ceilf(float %x) nounwind readnone
@@ -110,17 +110,17 @@ declare float @ceilf(float) nounwind readnone
define double @test6(double %x) nounwind {
; CHECK-SSE-LABEL: test6:
-; CHECK-SSE: ## BB#0:
+; CHECK-SSE: ## %bb.0:
; CHECK-SSE-NEXT: roundsd $10, %xmm0, %xmm0
; CHECK-SSE-NEXT: retq
;
; CHECK-AVX-LABEL: test6:
-; CHECK-AVX: ## BB#0:
+; CHECK-AVX: ## %bb.0:
; CHECK-AVX-NEXT: vroundsd $10, %xmm0, %xmm0, %xmm0
; CHECK-AVX-NEXT: retq
;
; CHECK-AVX512-LABEL: test6:
-; CHECK-AVX512: ## BB#0:
+; CHECK-AVX512: ## %bb.0:
; CHECK-AVX512-NEXT: vrndscalesd $10, %xmm0, %xmm0, %xmm0
; CHECK-AVX512-NEXT: retq
%call = tail call double @ceil(double %x) nounwind readnone
@@ -131,17 +131,17 @@ declare double @ceil(double) nounwind readnone
define float @test7(float %x) nounwind {
; CHECK-SSE-LABEL: test7:
-; CHECK-SSE: ## BB#0:
+; CHECK-SSE: ## %bb.0:
; CHECK-SSE-NEXT: roundss $4, %xmm0, %xmm0
; CHECK-SSE-NEXT: retq
;
; CHECK-AVX-LABEL: test7:
-; CHECK-AVX: ## BB#0:
+; CHECK-AVX: ## %bb.0:
; CHECK-AVX-NEXT: vroundss $4, %xmm0, %xmm0, %xmm0
; CHECK-AVX-NEXT: retq
;
; CHECK-AVX512-LABEL: test7:
-; CHECK-AVX512: ## BB#0:
+; CHECK-AVX512: ## %bb.0:
; CHECK-AVX512-NEXT: vrndscaless $4, %xmm0, %xmm0, %xmm0
; CHECK-AVX512-NEXT: retq
%call = tail call float @rintf(float %x) nounwind readnone
@@ -152,17 +152,17 @@ declare float @rintf(float) nounwind readnone
define double @test8(double %x) nounwind {
; CHECK-SSE-LABEL: test8:
-; CHECK-SSE: ## BB#0:
+; CHECK-SSE: ## %bb.0:
; CHECK-SSE-NEXT: roundsd $4, %xmm0, %xmm0
; CHECK-SSE-NEXT: retq
;
; CHECK-AVX-LABEL: test8:
-; CHECK-AVX: ## BB#0:
+; CHECK-AVX: ## %bb.0:
; CHECK-AVX-NEXT: vroundsd $4, %xmm0, %xmm0, %xmm0
; CHECK-AVX-NEXT: retq
;
; CHECK-AVX512-LABEL: test8:
-; CHECK-AVX512: ## BB#0:
+; CHECK-AVX512: ## %bb.0:
; CHECK-AVX512-NEXT: vrndscalesd $4, %xmm0, %xmm0, %xmm0
; CHECK-AVX512-NEXT: retq
%call = tail call double @rint(double %x) nounwind readnone
@@ -173,17 +173,17 @@ declare double @rint(double) nounwind readnone
define float @test9(float %x) nounwind {
; CHECK-SSE-LABEL: test9:
-; CHECK-SSE: ## BB#0:
+; CHECK-SSE: ## %bb.0:
; CHECK-SSE-NEXT: roundss $11, %xmm0, %xmm0
; CHECK-SSE-NEXT: retq
;
; CHECK-AVX-LABEL: test9:
-; CHECK-AVX: ## BB#0:
+; CHECK-AVX: ## %bb.0:
; CHECK-AVX-NEXT: vroundss $11, %xmm0, %xmm0, %xmm0
; CHECK-AVX-NEXT: retq
;
; CHECK-AVX512-LABEL: test9:
-; CHECK-AVX512: ## BB#0:
+; CHECK-AVX512: ## %bb.0:
; CHECK-AVX512-NEXT: vrndscaless $11, %xmm0, %xmm0, %xmm0
; CHECK-AVX512-NEXT: retq
%call = tail call float @truncf(float %x) nounwind readnone
@@ -194,17 +194,17 @@ declare float @truncf(float) nounwind readnone
define double @test10(double %x) nounwind {
; CHECK-SSE-LABEL: test10:
-; CHECK-SSE: ## BB#0:
+; CHECK-SSE: ## %bb.0:
; CHECK-SSE-NEXT: roundsd $11, %xmm0, %xmm0
; CHECK-SSE-NEXT: retq
;
; CHECK-AVX-LABEL: test10:
-; CHECK-AVX: ## BB#0:
+; CHECK-AVX: ## %bb.0:
; CHECK-AVX-NEXT: vroundsd $11, %xmm0, %xmm0, %xmm0
; CHECK-AVX-NEXT: retq
;
; CHECK-AVX512-LABEL: test10:
-; CHECK-AVX512: ## BB#0:
+; CHECK-AVX512: ## %bb.0:
; CHECK-AVX512-NEXT: vrndscalesd $11, %xmm0, %xmm0, %xmm0
; CHECK-AVX512-NEXT: retq
%call = tail call double @trunc(double %x) nounwind readnone
diff --git a/test/CodeGen/X86/rtm.ll b/test/CodeGen/X86/rtm.ll
index bd2d3e544bd..771e2344c00 100644
--- a/test/CodeGen/X86/rtm.ll
+++ b/test/CodeGen/X86/rtm.ll
@@ -9,18 +9,18 @@ declare void @f1()
define i32 @test_xbegin() nounwind uwtable {
; X86-LABEL: test_xbegin:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: xbegin .LBB0_2
-; X86-NEXT: # BB#1: # %entry
+; X86-NEXT: # %bb.1: # %entry
; X86-NEXT: movl $-1, %eax
; X86: .LBB0_2: # %entry
; X86-NEXT: # XABORT DEF
; X86-NEXT: retl
;
; X64-LABEL: test_xbegin:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: xbegin .LBB0_2
-; X64-NEXT: # BB#1: # %entry
+; X64-NEXT: # %bb.1: # %entry
; X64-NEXT: movl $-1, %eax
; X64: .LBB0_2: # %entry
; X64-NEXT: # XABORT DEF
@@ -32,12 +32,12 @@ entry:
define void @test_xend() nounwind uwtable {
; X86-LABEL: test_xend:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: xend
; X86-NEXT: retl
;
; X64-LABEL: test_xend:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: xend
; X64-NEXT: retq
entry:
@@ -47,12 +47,12 @@ entry:
define void @test_xabort() nounwind uwtable {
; X86-LABEL: test_xabort:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: xabort $2
; X86-NEXT: retl
;
; X64-LABEL: test_xabort:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: xabort $2
; X64-NEXT: retq
entry:
@@ -62,13 +62,13 @@ entry:
define void @f2(i32 %x) nounwind uwtable {
; X86-LABEL: f2:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: xabort $1
; X86-NEXT: calll f1
; X86-NEXT: retl
;
; X64-LABEL: f2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: pushq %rax
; X64-NEXT: .cfi_def_cfa_offset 16
; X64-NEXT: movl %edi, {{[0-9]+}}(%rsp)
diff --git a/test/CodeGen/X86/sad.ll b/test/CodeGen/X86/sad.ll
index 27a220e7cd6..3524c4aab1d 100644
--- a/test/CodeGen/X86/sad.ll
+++ b/test/CodeGen/X86/sad.ll
@@ -9,7 +9,7 @@
define i32 @sad_16i8() nounwind {
; SSE2-LABEL: sad_16i8:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: movq $-1024, %rax # imm = 0xFC00
; SSE2-NEXT: pxor %xmm1, %xmm1
@@ -22,7 +22,7 @@ define i32 @sad_16i8() nounwind {
; SSE2-NEXT: paddd %xmm3, %xmm1
; SSE2-NEXT: addq $4, %rax
; SSE2-NEXT: jne .LBB0_1
-; SSE2-NEXT: # BB#2: # %middle.block
+; SSE2-NEXT: # %bb.2: # %middle.block
; SSE2-NEXT: paddd %xmm0, %xmm1
; SSE2-NEXT: paddd %xmm0, %xmm0
; SSE2-NEXT: paddd %xmm1, %xmm0
@@ -34,7 +34,7 @@ define i32 @sad_16i8() nounwind {
; SSE2-NEXT: retq
;
; AVX2-LABEL: sad_16i8:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX2-NEXT: movq $-1024, %rax # imm = 0xFC00
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
@@ -46,7 +46,7 @@ define i32 @sad_16i8() nounwind {
; AVX2-NEXT: vpaddd %ymm1, %ymm2, %ymm1
; AVX2-NEXT: addq $4, %rax
; AVX2-NEXT: jne .LBB0_1
-; AVX2-NEXT: # BB#2: # %middle.block
+; AVX2-NEXT: # %bb.2: # %middle.block
; AVX2-NEXT: vpaddd %ymm0, %ymm1, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
@@ -58,7 +58,7 @@ define i32 @sad_16i8() nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: sad_16i8:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX512F-NEXT: movq $-1024, %rax # imm = 0xFC00
; AVX512F-NEXT: .p2align 4, 0x90
@@ -69,7 +69,7 @@ define i32 @sad_16i8() nounwind {
; AVX512F-NEXT: vpaddd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: addq $4, %rax
; AVX512F-NEXT: jne .LBB0_1
-; AVX512F-NEXT: # BB#2: # %middle.block
+; AVX512F-NEXT: # %bb.2: # %middle.block
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512F-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
@@ -83,7 +83,7 @@ define i32 @sad_16i8() nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: sad_16i8:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX512BW-NEXT: movq $-1024, %rax # imm = 0xFC00
; AVX512BW-NEXT: .p2align 4, 0x90
@@ -94,7 +94,7 @@ define i32 @sad_16i8() nounwind {
; AVX512BW-NEXT: vpaddd %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: addq $4, %rax
; AVX512BW-NEXT: jne .LBB0_1
-; AVX512BW-NEXT: # BB#2: # %middle.block
+; AVX512BW-NEXT: # %bb.2: # %middle.block
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BW-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
@@ -145,7 +145,7 @@ middle.block:
define i32 @sad_32i8() nounwind {
; SSE2-LABEL: sad_32i8:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pxor %xmm12, %xmm12
; SSE2-NEXT: movq $-1024, %rax # imm = 0xFC00
; SSE2-NEXT: pxor %xmm13, %xmm13
@@ -261,7 +261,7 @@ define i32 @sad_32i8() nounwind {
; SSE2-NEXT: paddd %xmm8, %xmm0
; SSE2-NEXT: addq $4, %rax
; SSE2-NEXT: jne .LBB1_1
-; SSE2-NEXT: # BB#2: # %middle.block
+; SSE2-NEXT: # %bb.2: # %middle.block
; SSE2-NEXT: paddd %xmm15, %xmm6
; SSE2-NEXT: paddd %xmm0, %xmm3
; SSE2-NEXT: paddd %xmm6, %xmm3
@@ -277,7 +277,7 @@ define i32 @sad_32i8() nounwind {
; SSE2-NEXT: retq
;
; AVX2-LABEL: sad_32i8:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX2-NEXT: movq $-1024, %rax # imm = 0xFC00
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
@@ -289,7 +289,7 @@ define i32 @sad_32i8() nounwind {
; AVX2-NEXT: vpaddd %ymm1, %ymm2, %ymm1
; AVX2-NEXT: addq $4, %rax
; AVX2-NEXT: jne .LBB1_1
-; AVX2-NEXT: # BB#2: # %middle.block
+; AVX2-NEXT: # %bb.2: # %middle.block
; AVX2-NEXT: vpaddd %ymm0, %ymm1, %ymm1
; AVX2-NEXT: vpaddd %ymm0, %ymm0, %ymm0
; AVX2-NEXT: vpaddd %ymm0, %ymm1, %ymm0
@@ -303,7 +303,7 @@ define i32 @sad_32i8() nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: sad_32i8:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX512F-NEXT: movq $-1024, %rax # imm = 0xFC00
; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
@@ -315,7 +315,7 @@ define i32 @sad_32i8() nounwind {
; AVX512F-NEXT: vpaddd %zmm1, %zmm2, %zmm1
; AVX512F-NEXT: addq $4, %rax
; AVX512F-NEXT: jne .LBB1_1
-; AVX512F-NEXT: # BB#2: # %middle.block
+; AVX512F-NEXT: # %bb.2: # %middle.block
; AVX512F-NEXT: vpaddd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512F-NEXT: vpaddd %zmm1, %zmm0, %zmm0
@@ -330,7 +330,7 @@ define i32 @sad_32i8() nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: sad_32i8:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX512BW-NEXT: movq $-1024, %rax # imm = 0xFC00
; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
@@ -342,7 +342,7 @@ define i32 @sad_32i8() nounwind {
; AVX512BW-NEXT: vpaddd %zmm1, %zmm2, %zmm1
; AVX512BW-NEXT: addq $4, %rax
; AVX512BW-NEXT: jne .LBB1_1
-; AVX512BW-NEXT: # BB#2: # %middle.block
+; AVX512BW-NEXT: # %bb.2: # %middle.block
; AVX512BW-NEXT: vpaddd %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BW-NEXT: vpaddd %zmm1, %zmm0, %zmm0
@@ -396,7 +396,7 @@ middle.block:
define i32 @sad_avx64i8() nounwind {
; SSE2-LABEL: sad_avx64i8:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: subq $200, %rsp
; SSE2-NEXT: pxor %xmm14, %xmm14
; SSE2-NEXT: movq $-1024, %rax # imm = 0xFC00
@@ -653,7 +653,7 @@ define i32 @sad_avx64i8() nounwind {
; SSE2-NEXT: paddd %xmm7, %xmm0
; SSE2-NEXT: addq $4, %rax
; SSE2-NEXT: jne .LBB2_1
-; SSE2-NEXT: # BB#2: # %middle.block
+; SSE2-NEXT: # %bb.2: # %middle.block
; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Folded Reload
; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm8 # 16-byte Folded Reload
; SSE2-NEXT: paddd %xmm3, %xmm8
@@ -678,7 +678,7 @@ define i32 @sad_avx64i8() nounwind {
; SSE2-NEXT: retq
;
; AVX2-LABEL: sad_avx64i8:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX2-NEXT: movq $-1024, %rax # imm = 0xFC00
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
@@ -736,7 +736,7 @@ define i32 @sad_avx64i8() nounwind {
; AVX2-NEXT: vpaddd %ymm4, %ymm8, %ymm4
; AVX2-NEXT: addq $4, %rax
; AVX2-NEXT: jne .LBB2_1
-; AVX2-NEXT: # BB#2: # %middle.block
+; AVX2-NEXT: # %bb.2: # %middle.block
; AVX2-NEXT: vpaddd %ymm6, %ymm2, %ymm2
; AVX2-NEXT: vpaddd %ymm7, %ymm4, %ymm4
; AVX2-NEXT: vpaddd %ymm4, %ymm2, %ymm2
@@ -754,7 +754,7 @@ define i32 @sad_avx64i8() nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: sad_avx64i8:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX512F-NEXT: movq $-1024, %rax # imm = 0xFC00
; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
@@ -785,7 +785,7 @@ define i32 @sad_avx64i8() nounwind {
; AVX512F-NEXT: vpaddd %zmm3, %zmm4, %zmm3
; AVX512F-NEXT: addq $4, %rax
; AVX512F-NEXT: jne .LBB2_1
-; AVX512F-NEXT: # BB#2: # %middle.block
+; AVX512F-NEXT: # %bb.2: # %middle.block
; AVX512F-NEXT: vpaddd %zmm2, %zmm0, %zmm0
; AVX512F-NEXT: vpaddd %zmm3, %zmm1, %zmm1
; AVX512F-NEXT: vpaddd %zmm1, %zmm0, %zmm0
@@ -802,7 +802,7 @@ define i32 @sad_avx64i8() nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: sad_avx64i8:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX512BW-NEXT: movq $-1024, %rax # imm = 0xFC00
; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
@@ -814,7 +814,7 @@ define i32 @sad_avx64i8() nounwind {
; AVX512BW-NEXT: vpaddd %zmm1, %zmm2, %zmm1
; AVX512BW-NEXT: addq $4, %rax
; AVX512BW-NEXT: jne .LBB2_1
-; AVX512BW-NEXT: # BB#2: # %middle.block
+; AVX512BW-NEXT: # %bb.2: # %middle.block
; AVX512BW-NEXT: vpaddd %zmm0, %zmm1, %zmm1
; AVX512BW-NEXT: vpaddd %zmm0, %zmm0, %zmm0
; AVX512BW-NEXT: vpaddd %zmm0, %zmm1, %zmm0
@@ -872,7 +872,7 @@ middle.block:
define i32 @sad_2i8() nounwind {
; SSE2-LABEL: sad_2i8:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: movq $-1024, %rax # imm = 0xFC00
; SSE2-NEXT: movl $65535, %ecx # imm = 0xFFFF
@@ -888,14 +888,14 @@ define i32 @sad_2i8() nounwind {
; SSE2-NEXT: paddq %xmm2, %xmm0
; SSE2-NEXT: addq $4, %rax
; SSE2-NEXT: jne .LBB3_1
-; SSE2-NEXT: # BB#2: # %middle.block
+; SSE2-NEXT: # %bb.2: # %middle.block
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; SSE2-NEXT: paddq %xmm0, %xmm1
; SSE2-NEXT: movd %xmm1, %eax
; SSE2-NEXT: retq
;
; AVX2-LABEL: sad_2i8:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX2-NEXT: movq $-1024, %rax # imm = 0xFC00
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
@@ -910,14 +910,14 @@ define i32 @sad_2i8() nounwind {
; AVX2-NEXT: vpaddq %xmm1, %xmm2, %xmm1
; AVX2-NEXT: addq $4, %rax
; AVX2-NEXT: jne .LBB3_1
-; AVX2-NEXT: # BB#2: # %middle.block
+; AVX2-NEXT: # %bb.2: # %middle.block
; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
; AVX2-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; AVX2-NEXT: vmovd %xmm0, %eax
; AVX2-NEXT: retq
;
; AVX512F-LABEL: sad_2i8:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX512F-NEXT: movq $-1024, %rax # imm = 0xFC00
; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
@@ -932,14 +932,14 @@ define i32 @sad_2i8() nounwind {
; AVX512F-NEXT: vpaddq %xmm1, %xmm2, %xmm1
; AVX512F-NEXT: addq $4, %rax
; AVX512F-NEXT: jne .LBB3_1
-; AVX512F-NEXT: # BB#2: # %middle.block
+; AVX512F-NEXT: # %bb.2: # %middle.block
; AVX512F-NEXT: vpshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
; AVX512F-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; AVX512F-NEXT: vmovd %xmm0, %eax
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: sad_2i8:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX512BW-NEXT: movq $-1024, %rax # imm = 0xFC00
; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
@@ -954,7 +954,7 @@ define i32 @sad_2i8() nounwind {
; AVX512BW-NEXT: vpaddq %xmm1, %xmm2, %xmm1
; AVX512BW-NEXT: addq $4, %rax
; AVX512BW-NEXT: jne .LBB3_1
-; AVX512BW-NEXT: # BB#2: # %middle.block
+; AVX512BW-NEXT: # %bb.2: # %middle.block
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
; AVX512BW-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; AVX512BW-NEXT: vmovd %xmm0, %eax
@@ -992,7 +992,7 @@ middle.block:
define i32 @sad_nonloop_4i8(<4 x i8>* nocapture readonly %p, i64, <4 x i8>* nocapture readonly %q) local_unnamed_addr #0 {
; SSE2-LABEL: sad_nonloop_4i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: psadbw %xmm0, %xmm1
@@ -1000,7 +1000,7 @@ define i32 @sad_nonloop_4i8(<4 x i8>* nocapture readonly %p, i64, <4 x i8>* noca
; SSE2-NEXT: retq
;
; AVX2-LABEL: sad_nonloop_4i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX2-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX2-NEXT: vpsadbw %xmm0, %xmm1, %xmm0
@@ -1008,7 +1008,7 @@ define i32 @sad_nonloop_4i8(<4 x i8>* nocapture readonly %p, i64, <4 x i8>* noca
; AVX2-NEXT: retq
;
; AVX512F-LABEL: sad_nonloop_4i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX512F-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX512F-NEXT: vpsadbw %xmm0, %xmm1, %xmm0
@@ -1016,7 +1016,7 @@ define i32 @sad_nonloop_4i8(<4 x i8>* nocapture readonly %p, i64, <4 x i8>* noca
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: sad_nonloop_4i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX512BW-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX512BW-NEXT: vpsadbw %xmm0, %xmm1, %xmm0
@@ -1040,7 +1040,7 @@ define i32 @sad_nonloop_4i8(<4 x i8>* nocapture readonly %p, i64, <4 x i8>* noca
define i32 @sad_nonloop_8i8(<8 x i8>* nocapture readonly %p, i64, <8 x i8>* nocapture readonly %q) local_unnamed_addr #0 {
; SSE2-LABEL: sad_nonloop_8i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSE2-NEXT: psadbw %xmm0, %xmm1
@@ -1048,7 +1048,7 @@ define i32 @sad_nonloop_8i8(<8 x i8>* nocapture readonly %p, i64, <8 x i8>* noca
; SSE2-NEXT: retq
;
; AVX2-LABEL: sad_nonloop_8i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX2-NEXT: vpsadbw %xmm0, %xmm1, %xmm0
@@ -1056,7 +1056,7 @@ define i32 @sad_nonloop_8i8(<8 x i8>* nocapture readonly %p, i64, <8 x i8>* noca
; AVX2-NEXT: retq
;
; AVX512F-LABEL: sad_nonloop_8i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX512F-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX512F-NEXT: vpsadbw %xmm0, %xmm1, %xmm0
@@ -1064,7 +1064,7 @@ define i32 @sad_nonloop_8i8(<8 x i8>* nocapture readonly %p, i64, <8 x i8>* noca
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: sad_nonloop_8i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX512BW-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX512BW-NEXT: vpsadbw %xmm0, %xmm1, %xmm0
@@ -1090,7 +1090,7 @@ define i32 @sad_nonloop_8i8(<8 x i8>* nocapture readonly %p, i64, <8 x i8>* noca
define i32 @sad_nonloop_16i8(<16 x i8>* nocapture readonly %p, i64, <16 x i8>* nocapture readonly %q) local_unnamed_addr #0 {
; SSE2-LABEL: sad_nonloop_16i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqu (%rdi), %xmm0
; SSE2-NEXT: movdqu (%rdx), %xmm1
; SSE2-NEXT: psadbw %xmm0, %xmm1
@@ -1100,7 +1100,7 @@ define i32 @sad_nonloop_16i8(<16 x i8>* nocapture readonly %p, i64, <16 x i8>* n
; SSE2-NEXT: retq
;
; AVX2-LABEL: sad_nonloop_16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqu (%rdi), %xmm0
; AVX2-NEXT: vpsadbw (%rdx), %xmm0, %xmm0
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -1109,7 +1109,7 @@ define i32 @sad_nonloop_16i8(<16 x i8>* nocapture readonly %p, i64, <16 x i8>* n
; AVX2-NEXT: retq
;
; AVX512F-LABEL: sad_nonloop_16i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqu (%rdi), %xmm0
; AVX512F-NEXT: vpsadbw (%rdx), %xmm0, %xmm0
; AVX512F-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -1118,7 +1118,7 @@ define i32 @sad_nonloop_16i8(<16 x i8>* nocapture readonly %p, i64, <16 x i8>* n
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: sad_nonloop_16i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqu (%rdi), %xmm0
; AVX512BW-NEXT: vpsadbw (%rdx), %xmm0, %xmm0
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -1147,7 +1147,7 @@ define i32 @sad_nonloop_16i8(<16 x i8>* nocapture readonly %p, i64, <16 x i8>* n
define i32 @sad_nonloop_32i8(<32 x i8>* nocapture readonly %p, i64, <32 x i8>* nocapture readonly %q) local_unnamed_addr #0 {
; SSE2-LABEL: sad_nonloop_32i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqu (%rdi), %xmm0
; SSE2-NEXT: movdqu 16(%rdi), %xmm12
; SSE2-NEXT: pxor %xmm1, %xmm1
@@ -1244,7 +1244,7 @@ define i32 @sad_nonloop_32i8(<32 x i8>* nocapture readonly %p, i64, <32 x i8>* n
; SSE2-NEXT: retq
;
; AVX2-LABEL: sad_nonloop_32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqu (%rdi), %ymm0
; AVX2-NEXT: vpsadbw (%rdx), %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
@@ -1256,7 +1256,7 @@ define i32 @sad_nonloop_32i8(<32 x i8>* nocapture readonly %p, i64, <32 x i8>* n
; AVX2-NEXT: retq
;
; AVX512F-LABEL: sad_nonloop_32i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqu (%rdi), %ymm0
; AVX512F-NEXT: vpsadbw (%rdx), %ymm0, %ymm0
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
@@ -1268,7 +1268,7 @@ define i32 @sad_nonloop_32i8(<32 x i8>* nocapture readonly %p, i64, <32 x i8>* n
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: sad_nonloop_32i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqu (%rdi), %ymm0
; AVX512BW-NEXT: vpsadbw (%rdx), %ymm0, %ymm0
; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
diff --git a/test/CodeGen/X86/sad_variations.ll b/test/CodeGen/X86/sad_variations.ll
index 04fda5ed877..cea86091a2b 100644
--- a/test/CodeGen/X86/sad_variations.ll
+++ b/test/CodeGen/X86/sad_variations.ll
@@ -5,7 +5,7 @@
define i32 @sad8_32bit_icmp_sge(i8* nocapture readonly %cur, i8* nocapture readonly %ref, i32 %stride) local_unnamed_addr #0 {
; SSE2-LABEL: sad8_32bit_icmp_sge:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSE2-NEXT: psadbw %xmm0, %xmm1
@@ -13,7 +13,7 @@ define i32 @sad8_32bit_icmp_sge(i8* nocapture readonly %cur, i8* nocapture reado
; SSE2-NEXT: retq
;
; AVX2-LABEL: sad8_32bit_icmp_sge:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX2-NEXT: vpsadbw %xmm0, %xmm1, %xmm0
@@ -21,7 +21,7 @@ define i32 @sad8_32bit_icmp_sge(i8* nocapture readonly %cur, i8* nocapture reado
; AVX2-NEXT: retq
;
; AVX512F-LABEL: sad8_32bit_icmp_sge:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX512F-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX512F-NEXT: vpsadbw %xmm0, %xmm1, %xmm0
@@ -55,7 +55,7 @@ for.body: ; preds = %entry
define i32 @sad8_32bit_icmp_sgt(i8* nocapture readonly %cur, i8* nocapture readonly %ref, i32 %stride) local_unnamed_addr #1 {
; SSE2-LABEL: sad8_32bit_icmp_sgt:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSE2-NEXT: psadbw %xmm0, %xmm1
@@ -63,7 +63,7 @@ define i32 @sad8_32bit_icmp_sgt(i8* nocapture readonly %cur, i8* nocapture reado
; SSE2-NEXT: retq
;
; AVX2-LABEL: sad8_32bit_icmp_sgt:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX2-NEXT: vpsadbw %xmm0, %xmm1, %xmm0
@@ -71,7 +71,7 @@ define i32 @sad8_32bit_icmp_sgt(i8* nocapture readonly %cur, i8* nocapture reado
; AVX2-NEXT: retq
;
; AVX512F-LABEL: sad8_32bit_icmp_sgt:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX512F-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX512F-NEXT: vpsadbw %xmm0, %xmm1, %xmm0
@@ -104,7 +104,7 @@ for.body: ; preds = %entry
define i32 @sad8_32bit_icmp_sle(i8* nocapture readonly %cur, i8* nocapture readonly %ref, i32 %stride) local_unnamed_addr #2 {
; SSE2-LABEL: sad8_32bit_icmp_sle:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSE2-NEXT: psadbw %xmm0, %xmm1
@@ -112,7 +112,7 @@ define i32 @sad8_32bit_icmp_sle(i8* nocapture readonly %cur, i8* nocapture reado
; SSE2-NEXT: retq
;
; AVX2-LABEL: sad8_32bit_icmp_sle:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX2-NEXT: vpsadbw %xmm0, %xmm1, %xmm0
@@ -120,7 +120,7 @@ define i32 @sad8_32bit_icmp_sle(i8* nocapture readonly %cur, i8* nocapture reado
; AVX2-NEXT: retq
;
; AVX512F-LABEL: sad8_32bit_icmp_sle:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX512F-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX512F-NEXT: vpsadbw %xmm0, %xmm1, %xmm0
@@ -153,7 +153,7 @@ for.body: ; preds = %entry
define i32 @sad8_32bit_icmp_slt(i8* nocapture readonly %cur, i8* nocapture readonly %ref, i32 %stride) local_unnamed_addr #3 {
; SSE2-LABEL: sad8_32bit_icmp_slt:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSE2-NEXT: psadbw %xmm0, %xmm1
@@ -161,7 +161,7 @@ define i32 @sad8_32bit_icmp_slt(i8* nocapture readonly %cur, i8* nocapture reado
; SSE2-NEXT: retq
;
; AVX2-LABEL: sad8_32bit_icmp_slt:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX2-NEXT: vpsadbw %xmm0, %xmm1, %xmm0
@@ -169,7 +169,7 @@ define i32 @sad8_32bit_icmp_slt(i8* nocapture readonly %cur, i8* nocapture reado
; AVX2-NEXT: retq
;
; AVX512F-LABEL: sad8_32bit_icmp_slt:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX512F-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX512F-NEXT: vpsadbw %xmm0, %xmm1, %xmm0
@@ -202,7 +202,7 @@ for.body: ; preds = %entry
define i64 @sad8_64bit_icmp_sext_slt(i8* nocapture readonly %cur, i8* nocapture readonly %ref, i64 %stride) local_unnamed_addr #4 {
; SSE2-LABEL: sad8_64bit_icmp_sext_slt:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSE2-NEXT: psadbw %xmm0, %xmm1
@@ -210,7 +210,7 @@ define i64 @sad8_64bit_icmp_sext_slt(i8* nocapture readonly %cur, i8* nocapture
; SSE2-NEXT: retq
;
; AVX2-LABEL: sad8_64bit_icmp_sext_slt:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX2-NEXT: vpsadbw %xmm0, %xmm1, %xmm0
@@ -218,7 +218,7 @@ define i64 @sad8_64bit_icmp_sext_slt(i8* nocapture readonly %cur, i8* nocapture
; AVX2-NEXT: retq
;
; AVX512F-LABEL: sad8_64bit_icmp_sext_slt:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX512F-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX512F-NEXT: vpsadbw %xmm0, %xmm1, %xmm0
@@ -251,7 +251,7 @@ for.body: ; preds = %entry
define i64 @sad8_64bit_icmp_zext_slt(i8* nocapture readonly %cur, i8* nocapture readonly %ref, i64 %stride) local_unnamed_addr #4 {
; SSE2-LABEL: sad8_64bit_icmp_zext_slt:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSE2-NEXT: psadbw %xmm0, %xmm1
@@ -259,7 +259,7 @@ define i64 @sad8_64bit_icmp_zext_slt(i8* nocapture readonly %cur, i8* nocapture
; SSE2-NEXT: retq
;
; AVX2-LABEL: sad8_64bit_icmp_zext_slt:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX2-NEXT: vpsadbw %xmm0, %xmm1, %xmm0
@@ -267,7 +267,7 @@ define i64 @sad8_64bit_icmp_zext_slt(i8* nocapture readonly %cur, i8* nocapture
; AVX2-NEXT: retq
;
; AVX512F-LABEL: sad8_64bit_icmp_zext_slt:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX512F-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX512F-NEXT: vpsadbw %xmm0, %xmm1, %xmm0
@@ -300,7 +300,7 @@ for.body: ; preds = %entry
define i64 @sad8_early_64bit_icmp_zext_slt(i8* nocapture readonly %cur, i8* nocapture readonly %ref, i64 %stride) local_unnamed_addr #4 {
; SSE2-LABEL: sad8_early_64bit_icmp_zext_slt:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSE2-NEXT: psadbw %xmm0, %xmm1
@@ -308,7 +308,7 @@ define i64 @sad8_early_64bit_icmp_zext_slt(i8* nocapture readonly %cur, i8* noca
; SSE2-NEXT: retq
;
; AVX2-LABEL: sad8_early_64bit_icmp_zext_slt:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX2-NEXT: vpsadbw %xmm0, %xmm1, %xmm0
@@ -316,7 +316,7 @@ define i64 @sad8_early_64bit_icmp_zext_slt(i8* nocapture readonly %cur, i8* noca
; AVX2-NEXT: retq
;
; AVX512F-LABEL: sad8_early_64bit_icmp_zext_slt:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX512F-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX512F-NEXT: vpsadbw %xmm0, %xmm1, %xmm0
diff --git a/test/CodeGen/X86/sandybridge-loads.ll b/test/CodeGen/X86/sandybridge-loads.ll
index 8570fe7fe7b..7e6272998f3 100644
--- a/test/CodeGen/X86/sandybridge-loads.ll
+++ b/test/CodeGen/X86/sandybridge-loads.ll
@@ -3,7 +3,7 @@
define void @wideloads(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwind uwtable noinline ssp {
; CHECK-LABEL: wideloads:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %xmm0
; CHECK-NEXT: vinsertf128 $1, 16(%rdi), %ymm0, %ymm0
; CHECK-NEXT: vmovaps (%rsi), %ymm1
@@ -28,7 +28,7 @@ define void @wideloads(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwi
define void @widestores(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwind uwtable noinline ssp {
; CHECK-LABEL: widestores:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps (%rdi), %ymm0
; CHECK-NEXT: vmovaps (%rsi), %ymm1
; CHECK-NEXT: vmovaps %ymm0, (%rsi)
diff --git a/test/CodeGen/X86/sar_fold.ll b/test/CodeGen/X86/sar_fold.ll
index bd0d0c7057d..195d0745b3f 100644
--- a/test/CodeGen/X86/sar_fold.ll
+++ b/test/CodeGen/X86/sar_fold.ll
@@ -2,7 +2,7 @@
define i32 @shl16sar15(i32 %a) #0 {
; CHECK-LABEL: shl16sar15:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movswl {{[0-9]+}}(%esp), %eax
%1 = shl i32 %a, 16
%2 = ashr exact i32 %1, 15
@@ -11,7 +11,7 @@ define i32 @shl16sar15(i32 %a) #0 {
define i32 @shl16sar17(i32 %a) #0 {
; CHECK-LABEL: shl16sar17:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movswl {{[0-9]+}}(%esp), %eax
%1 = shl i32 %a, 16
%2 = ashr exact i32 %1, 17
@@ -20,7 +20,7 @@ define i32 @shl16sar17(i32 %a) #0 {
define i32 @shl24sar23(i32 %a) #0 {
; CHECK-LABEL: shl24sar23:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movsbl {{[0-9]+}}(%esp), %eax
%1 = shl i32 %a, 24
%2 = ashr exact i32 %1, 23
@@ -29,7 +29,7 @@ define i32 @shl24sar23(i32 %a) #0 {
define i32 @shl24sar25(i32 %a) #0 {
; CHECK-LABEL: shl24sar25:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movsbl {{[0-9]+}}(%esp), %eax
%1 = shl i32 %a, 24
%2 = ashr exact i32 %1, 25
diff --git a/test/CodeGen/X86/sar_fold64.ll b/test/CodeGen/X86/sar_fold64.ll
index 7bea518162a..447f021442d 100644
--- a/test/CodeGen/X86/sar_fold64.ll
+++ b/test/CodeGen/X86/sar_fold64.ll
@@ -3,7 +3,7 @@
define i32 @shl48sar47(i64 %a) #0 {
; CHECK-LABEL: shl48sar47:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movswq %di, %rax
; CHECK-NEXT: addl %eax, %eax
; CHECK-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
@@ -16,7 +16,7 @@ define i32 @shl48sar47(i64 %a) #0 {
define i32 @shl48sar49(i64 %a) #0 {
; CHECK-LABEL: shl48sar49:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movswq %di, %rax
; CHECK-NEXT: shrq %rax
; CHECK-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
@@ -29,7 +29,7 @@ define i32 @shl48sar49(i64 %a) #0 {
define i32 @shl56sar55(i64 %a) #0 {
; CHECK-LABEL: shl56sar55:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movsbq %dil, %rax
; CHECK-NEXT: addl %eax, %eax
; CHECK-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
@@ -42,7 +42,7 @@ define i32 @shl56sar55(i64 %a) #0 {
define i32 @shl56sar57(i64 %a) #0 {
; CHECK-LABEL: shl56sar57:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movsbq %dil, %rax
; CHECK-NEXT: shrq %rax
; CHECK-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
@@ -55,7 +55,7 @@ define i32 @shl56sar57(i64 %a) #0 {
define i8 @all_sign_bit_ashr(i8 %x) {
; CHECK-LABEL: all_sign_bit_ashr:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: andb $1, %dil
; CHECK-NEXT: negb %dil
; CHECK-NEXT: movl %edi, %eax
@@ -68,7 +68,7 @@ define i8 @all_sign_bit_ashr(i8 %x) {
define <4 x i32> @all_sign_bit_ashr_vec(<4 x i32> %x) {
; CHECK-LABEL: all_sign_bit_ashr_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pand {{.*}}(%rip), %xmm0
; CHECK-NEXT: pxor %xmm1, %xmm1
; CHECK-NEXT: psubd %xmm0, %xmm1
diff --git a/test/CodeGen/X86/sbb.ll b/test/CodeGen/X86/sbb.ll
index 7429c0777a4..bd4a62f2169 100644
--- a/test/CodeGen/X86/sbb.ll
+++ b/test/CodeGen/X86/sbb.ll
@@ -7,7 +7,7 @@
define i8 @i8_select_0_or_neg1(i8 %x) {
; CHECK-LABEL: i8_select_0_or_neg1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: negb %dil
; CHECK-NEXT: sbbb %al, %al
; CHECK-NEXT: retq
@@ -20,7 +20,7 @@ define i8 @i8_select_0_or_neg1(i8 %x) {
define i16 @i16_select_0_or_neg1_as_math(i16 %x) {
; CHECK-LABEL: i16_select_0_or_neg1_as_math:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: negw %di
; CHECK-NEXT: sbbw %ax, %ax
; CHECK-NEXT: retq
@@ -34,7 +34,7 @@ define i16 @i16_select_0_or_neg1_as_math(i16 %x) {
define i32 @i32_select_0_or_neg1_commuted(i32 %x) {
; CHECK-LABEL: i32_select_0_or_neg1_commuted:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: negl %edi
; CHECK-NEXT: sbbl %eax, %eax
; CHECK-NEXT: retq
@@ -47,7 +47,7 @@ define i32 @i32_select_0_or_neg1_commuted(i32 %x) {
define i64 @i64_select_0_or_neg1_commuted_as_math(i64 %x) {
; CHECK-LABEL: i64_select_0_or_neg1_commuted_as_math:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: negq %rdi
; CHECK-NEXT: sbbq %rax, %rax
; CHECK-NEXT: retq
@@ -61,7 +61,7 @@ define i64 @i64_select_0_or_neg1_commuted_as_math(i64 %x) {
define i64 @i64_select_neg1_or_0(i64 %x) {
; CHECK-LABEL: i64_select_neg1_or_0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpq $1, %rdi
; CHECK-NEXT: sbbq %rax, %rax
; CHECK-NEXT: retq
@@ -74,7 +74,7 @@ define i64 @i64_select_neg1_or_0(i64 %x) {
define i32 @i32_select_neg1_or_0_as_math(i32 %x) {
; CHECK-LABEL: i32_select_neg1_or_0_as_math:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpl $1, %edi
; CHECK-NEXT: sbbl %eax, %eax
; CHECK-NEXT: retq
@@ -88,7 +88,7 @@ define i32 @i32_select_neg1_or_0_as_math(i32 %x) {
define i16 @i16_select_neg1_or_0_commuted(i16 %x) {
; CHECK-LABEL: i16_select_neg1_or_0_commuted:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpw $1, %di
; CHECK-NEXT: sbbw %ax, %ax
; CHECK-NEXT: retq
@@ -101,7 +101,7 @@ define i16 @i16_select_neg1_or_0_commuted(i16 %x) {
define i8 @i8_select_neg1_or_0_commuted_as_math(i8 %x) {
; CHECK-LABEL: i8_select_neg1_or_0_commuted_as_math:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpb $1, %dil
; CHECK-NEXT: sbbb %al, %al
; CHECK-NEXT: retq
@@ -115,7 +115,7 @@ define i8 @i8_select_neg1_or_0_commuted_as_math(i8 %x) {
define i32 @ult_select_neg1_or_0(i32 %x, i32 %y) nounwind {
; CHECK-LABEL: ult_select_neg1_or_0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpl %esi, %edi
; CHECK-NEXT: sbbl %eax, %eax
; CHECK-NEXT: retq
@@ -129,7 +129,7 @@ define i32 @ult_select_neg1_or_0(i32 %x, i32 %y) nounwind {
define i32 @ugt_select_neg1_or_0(i32 %x, i32 %y) nounwind {
; CHECK-LABEL: ugt_select_neg1_or_0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpl %esi, %edi
; CHECK-NEXT: sbbl %eax, %eax
; CHECK-NEXT: retq
@@ -143,7 +143,7 @@ define i32 @ugt_select_neg1_or_0(i32 %x, i32 %y) nounwind {
define i32 @uge_select_0_or_neg1(i32 %x, i32 %y) nounwind {
; CHECK-LABEL: uge_select_0_or_neg1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpl %esi, %edi
; CHECK-NEXT: sbbl %eax, %eax
; CHECK-NEXT: retq
@@ -158,7 +158,7 @@ define i32 @uge_select_0_or_neg1(i32 %x, i32 %y) nounwind {
define i32 @ule_select_0_or_neg1(i32 %x, i32 %y) nounwind {
; CHECK-LABEL: ule_select_0_or_neg1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpl %esi, %edi
; CHECK-NEXT: sbbl %eax, %eax
; CHECK-NEXT: retq
@@ -173,7 +173,7 @@ define i32 @ule_select_0_or_neg1(i32 %x, i32 %y) nounwind {
define i32 @uge_select_0_or_neg1_sub(i32 %x, i32 %y) nounwind {
; CHECK-LABEL: uge_select_0_or_neg1_sub:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpl %esi, %edi
; CHECK-NEXT: sbbl %eax, %eax
; CHECK-NEXT: retq
@@ -188,7 +188,7 @@ define i32 @uge_select_0_or_neg1_sub(i32 %x, i32 %y) nounwind {
define i64 @ugt_select_neg1_or_0_sub(i64 %x, i64 %y) nounwind {
; CHECK-LABEL: ugt_select_neg1_or_0_sub:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpq %rdi, %rsi
; CHECK-NEXT: sbbq %rax, %rax
; CHECK-NEXT: retq
@@ -203,7 +203,7 @@ define i64 @ugt_select_neg1_or_0_sub(i64 %x, i64 %y) nounwind {
define i16 @ult_select_neg1_or_0_sub(i16 %x, i16 %y) nounwind {
; CHECK-LABEL: ult_select_neg1_or_0_sub:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpw %di, %si
; CHECK-NEXT: sbbw %ax, %ax
; CHECK-NEXT: retq
@@ -220,7 +220,7 @@ define i16 @ult_select_neg1_or_0_sub(i16 %x, i16 %y) nounwind {
define void @PR33560(i8 %x, i64 %y) {
; CHECK-LABEL: PR33560:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: negb %dil
; CHECK-NEXT: sbbq %rax, %rax
; CHECK-NEXT: cmpq %rsi, %rax
diff --git a/test/CodeGen/X86/scalar-int-to-fp.ll b/test/CodeGen/X86/scalar-int-to-fp.ll
index ad1c2d49d23..66cc628ad5e 100644
--- a/test/CodeGen/X86/scalar-int-to-fp.ll
+++ b/test/CodeGen/X86/scalar-int-to-fp.ll
@@ -11,7 +11,7 @@
define float @u32_to_f(i32 %a) nounwind {
; AVX512_32-LABEL: u32_to_f:
-; AVX512_32: # BB#0:
+; AVX512_32: # %bb.0:
; AVX512_32-NEXT: pushl %eax
; AVX512_32-NEXT: vcvtusi2ssl {{[0-9]+}}(%esp), %xmm0, %xmm0
; AVX512_32-NEXT: vmovss %xmm0, (%esp)
@@ -20,12 +20,12 @@ define float @u32_to_f(i32 %a) nounwind {
; AVX512_32-NEXT: retl
;
; AVX512_64-LABEL: u32_to_f:
-; AVX512_64: # BB#0:
+; AVX512_64: # %bb.0:
; AVX512_64-NEXT: vcvtusi2ssl %edi, %xmm0, %xmm0
; AVX512_64-NEXT: retq
;
; SSE2_32-LABEL: u32_to_f:
-; SSE2_32: # BB#0:
+; SSE2_32: # %bb.0:
; SSE2_32-NEXT: pushl %eax
; SSE2_32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE2_32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -39,13 +39,13 @@ define float @u32_to_f(i32 %a) nounwind {
; SSE2_32-NEXT: retl
;
; SSE2_64-LABEL: u32_to_f:
-; SSE2_64: # BB#0:
+; SSE2_64: # %bb.0:
; SSE2_64-NEXT: movl %edi, %eax
; SSE2_64-NEXT: cvtsi2ssq %rax, %xmm0
; SSE2_64-NEXT: retq
;
; X87-LABEL: u32_to_f:
-; X87: # BB#0:
+; X87: # %bb.0:
; X87-NEXT: pushl %ebp
; X87-NEXT: movl %esp, %ebp
; X87-NEXT: andl $-8, %esp
@@ -63,7 +63,7 @@ define float @u32_to_f(i32 %a) nounwind {
define float @s32_to_f(i32 %a) nounwind {
; AVX512_32-LABEL: s32_to_f:
-; AVX512_32: # BB#0:
+; AVX512_32: # %bb.0:
; AVX512_32-NEXT: pushl %eax
; AVX512_32-NEXT: vcvtsi2ssl {{[0-9]+}}(%esp), %xmm0, %xmm0
; AVX512_32-NEXT: vmovss %xmm0, (%esp)
@@ -72,12 +72,12 @@ define float @s32_to_f(i32 %a) nounwind {
; AVX512_32-NEXT: retl
;
; AVX512_64-LABEL: s32_to_f:
-; AVX512_64: # BB#0:
+; AVX512_64: # %bb.0:
; AVX512_64-NEXT: vcvtsi2ssl %edi, %xmm0, %xmm0
; AVX512_64-NEXT: retq
;
; SSE2_32-LABEL: s32_to_f:
-; SSE2_32: # BB#0:
+; SSE2_32: # %bb.0:
; SSE2_32-NEXT: pushl %eax
; SSE2_32-NEXT: cvtsi2ssl {{[0-9]+}}(%esp), %xmm0
; SSE2_32-NEXT: movss %xmm0, (%esp)
@@ -86,12 +86,12 @@ define float @s32_to_f(i32 %a) nounwind {
; SSE2_32-NEXT: retl
;
; SSE2_64-LABEL: s32_to_f:
-; SSE2_64: # BB#0:
+; SSE2_64: # %bb.0:
; SSE2_64-NEXT: cvtsi2ssl %edi, %xmm0
; SSE2_64-NEXT: retq
;
; X87-LABEL: s32_to_f:
-; X87: # BB#0:
+; X87: # %bb.0:
; X87-NEXT: pushl %eax
; X87-NEXT: movl {{[0-9]+}}(%esp), %eax
; X87-NEXT: movl %eax, (%esp)
@@ -104,7 +104,7 @@ define float @s32_to_f(i32 %a) nounwind {
define double @u32_to_d(i32 %a) nounwind {
; AVX512_32-LABEL: u32_to_d:
-; AVX512_32: # BB#0:
+; AVX512_32: # %bb.0:
; AVX512_32-NEXT: pushl %ebp
; AVX512_32-NEXT: movl %esp, %ebp
; AVX512_32-NEXT: andl $-8, %esp
@@ -117,12 +117,12 @@ define double @u32_to_d(i32 %a) nounwind {
; AVX512_32-NEXT: retl
;
; AVX512_64-LABEL: u32_to_d:
-; AVX512_64: # BB#0:
+; AVX512_64: # %bb.0:
; AVX512_64-NEXT: vcvtusi2sdl %edi, %xmm0, %xmm0
; AVX512_64-NEXT: retq
;
; SSE2_32-LABEL: u32_to_d:
-; SSE2_32: # BB#0:
+; SSE2_32: # %bb.0:
; SSE2_32-NEXT: pushl %ebp
; SSE2_32-NEXT: movl %esp, %ebp
; SSE2_32-NEXT: andl $-8, %esp
@@ -138,13 +138,13 @@ define double @u32_to_d(i32 %a) nounwind {
; SSE2_32-NEXT: retl
;
; SSE2_64-LABEL: u32_to_d:
-; SSE2_64: # BB#0:
+; SSE2_64: # %bb.0:
; SSE2_64-NEXT: movl %edi, %eax
; SSE2_64-NEXT: cvtsi2sdq %rax, %xmm0
; SSE2_64-NEXT: retq
;
; X87-LABEL: u32_to_d:
-; X87: # BB#0:
+; X87: # %bb.0:
; X87-NEXT: pushl %ebp
; X87-NEXT: movl %esp, %ebp
; X87-NEXT: andl $-8, %esp
@@ -162,7 +162,7 @@ define double @u32_to_d(i32 %a) nounwind {
define double @s32_to_d(i32 %a) nounwind {
; AVX512_32-LABEL: s32_to_d:
-; AVX512_32: # BB#0:
+; AVX512_32: # %bb.0:
; AVX512_32-NEXT: pushl %ebp
; AVX512_32-NEXT: movl %esp, %ebp
; AVX512_32-NEXT: andl $-8, %esp
@@ -175,12 +175,12 @@ define double @s32_to_d(i32 %a) nounwind {
; AVX512_32-NEXT: retl
;
; AVX512_64-LABEL: s32_to_d:
-; AVX512_64: # BB#0:
+; AVX512_64: # %bb.0:
; AVX512_64-NEXT: vcvtsi2sdl %edi, %xmm0, %xmm0
; AVX512_64-NEXT: retq
;
; SSE2_32-LABEL: s32_to_d:
-; SSE2_32: # BB#0:
+; SSE2_32: # %bb.0:
; SSE2_32-NEXT: pushl %ebp
; SSE2_32-NEXT: movl %esp, %ebp
; SSE2_32-NEXT: andl $-8, %esp
@@ -193,12 +193,12 @@ define double @s32_to_d(i32 %a) nounwind {
; SSE2_32-NEXT: retl
;
; SSE2_64-LABEL: s32_to_d:
-; SSE2_64: # BB#0:
+; SSE2_64: # %bb.0:
; SSE2_64-NEXT: cvtsi2sdl %edi, %xmm0
; SSE2_64-NEXT: retq
;
; X87-LABEL: s32_to_d:
-; X87: # BB#0:
+; X87: # %bb.0:
; X87-NEXT: pushl %eax
; X87-NEXT: movl {{[0-9]+}}(%esp), %eax
; X87-NEXT: movl %eax, (%esp)
@@ -211,7 +211,7 @@ define double @s32_to_d(i32 %a) nounwind {
define x86_fp80 @u32_to_x(i32 %a) nounwind {
; AVX512_32-LABEL: u32_to_x:
-; AVX512_32: # BB#0:
+; AVX512_32: # %bb.0:
; AVX512_32-NEXT: pushl %ebp
; AVX512_32-NEXT: movl %esp, %ebp
; AVX512_32-NEXT: andl $-8, %esp
@@ -227,7 +227,7 @@ define x86_fp80 @u32_to_x(i32 %a) nounwind {
; AVX512_32-NEXT: retl
;
; AVX512_64-LABEL: u32_to_x:
-; AVX512_64: # BB#0:
+; AVX512_64: # %bb.0:
; AVX512_64-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX512_64-NEXT: vmovd %edi, %xmm1
; AVX512_64-NEXT: vpor %xmm0, %xmm1, %xmm1
@@ -237,7 +237,7 @@ define x86_fp80 @u32_to_x(i32 %a) nounwind {
; AVX512_64-NEXT: retq
;
; SSE2_32-LABEL: u32_to_x:
-; SSE2_32: # BB#0:
+; SSE2_32: # %bb.0:
; SSE2_32-NEXT: pushl %ebp
; SSE2_32-NEXT: movl %esp, %ebp
; SSE2_32-NEXT: andl $-8, %esp
@@ -253,14 +253,14 @@ define x86_fp80 @u32_to_x(i32 %a) nounwind {
; SSE2_32-NEXT: retl
;
; SSE2_64-LABEL: u32_to_x:
-; SSE2_64: # BB#0:
+; SSE2_64: # %bb.0:
; SSE2_64-NEXT: movl %edi, %eax
; SSE2_64-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
; SSE2_64-NEXT: fildll -{{[0-9]+}}(%rsp)
; SSE2_64-NEXT: retq
;
; X87-LABEL: u32_to_x:
-; X87: # BB#0:
+; X87: # %bb.0:
; X87-NEXT: pushl %ebp
; X87-NEXT: movl %esp, %ebp
; X87-NEXT: andl $-8, %esp
@@ -278,7 +278,7 @@ define x86_fp80 @u32_to_x(i32 %a) nounwind {
define x86_fp80 @s32_to_x(i32 %a) nounwind {
; CHECK32-LABEL: s32_to_x:
-; CHECK32: # BB#0:
+; CHECK32: # %bb.0:
; CHECK32-NEXT: pushl %eax
; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK32-NEXT: movl %eax, (%esp)
@@ -287,7 +287,7 @@ define x86_fp80 @s32_to_x(i32 %a) nounwind {
; CHECK32-NEXT: retl
;
; CHECK64-LABEL: s32_to_x:
-; CHECK64: # BB#0:
+; CHECK64: # %bb.0:
; CHECK64-NEXT: movl %edi, -{{[0-9]+}}(%rsp)
; CHECK64-NEXT: fildl -{{[0-9]+}}(%rsp)
; CHECK64-NEXT: retq
@@ -297,7 +297,7 @@ define x86_fp80 @s32_to_x(i32 %a) nounwind {
define float @u64_to_f(i64 %a) nounwind {
; AVX512_32-LABEL: u64_to_f:
-; AVX512_32: # BB#0:
+; AVX512_32: # %bb.0:
; AVX512_32-NEXT: pushl %ebp
; AVX512_32-NEXT: movl %esp, %ebp
; AVX512_32-NEXT: andl $-8, %esp
@@ -318,12 +318,12 @@ define float @u64_to_f(i64 %a) nounwind {
; AVX512_32-NEXT: retl
;
; AVX512_64-LABEL: u64_to_f:
-; AVX512_64: # BB#0:
+; AVX512_64: # %bb.0:
; AVX512_64-NEXT: vcvtusi2ssq %rdi, %xmm0, %xmm0
; AVX512_64-NEXT: retq
;
; SSE2_32-LABEL: u64_to_f:
-; SSE2_32: # BB#0:
+; SSE2_32: # %bb.0:
; SSE2_32-NEXT: pushl %ebp
; SSE2_32-NEXT: movl %esp, %ebp
; SSE2_32-NEXT: andl $-8, %esp
@@ -344,10 +344,10 @@ define float @u64_to_f(i64 %a) nounwind {
; SSE2_32-NEXT: retl
;
; SSE2_64-LABEL: u64_to_f:
-; SSE2_64: # BB#0:
+; SSE2_64: # %bb.0:
; SSE2_64-NEXT: testq %rdi, %rdi
; SSE2_64-NEXT: js .LBB6_1
-; SSE2_64-NEXT: # BB#2:
+; SSE2_64-NEXT: # %bb.2:
; SSE2_64-NEXT: cvtsi2ssq %rdi, %xmm0
; SSE2_64-NEXT: retq
; SSE2_64-NEXT: .LBB6_1:
@@ -360,7 +360,7 @@ define float @u64_to_f(i64 %a) nounwind {
; SSE2_64-NEXT: retq
;
; X87-LABEL: u64_to_f:
-; X87: # BB#0:
+; X87: # %bb.0:
; X87-NEXT: pushl %ebp
; X87-NEXT: movl %esp, %ebp
; X87-NEXT: andl $-8, %esp
@@ -385,7 +385,7 @@ define float @u64_to_f(i64 %a) nounwind {
define float @s64_to_f(i64 %a) nounwind {
; AVX512_32-LABEL: s64_to_f:
-; AVX512_32: # BB#0:
+; AVX512_32: # %bb.0:
; AVX512_32-NEXT: pushl %eax
; AVX512_32-NEXT: fildll {{[0-9]+}}(%esp)
; AVX512_32-NEXT: fstps (%esp)
@@ -394,12 +394,12 @@ define float @s64_to_f(i64 %a) nounwind {
; AVX512_32-NEXT: retl
;
; AVX512_64-LABEL: s64_to_f:
-; AVX512_64: # BB#0:
+; AVX512_64: # %bb.0:
; AVX512_64-NEXT: vcvtsi2ssq %rdi, %xmm0, %xmm0
; AVX512_64-NEXT: retq
;
; SSE2_32-LABEL: s64_to_f:
-; SSE2_32: # BB#0:
+; SSE2_32: # %bb.0:
; SSE2_32-NEXT: pushl %eax
; SSE2_32-NEXT: fildll {{[0-9]+}}(%esp)
; SSE2_32-NEXT: fstps (%esp)
@@ -408,12 +408,12 @@ define float @s64_to_f(i64 %a) nounwind {
; SSE2_32-NEXT: retl
;
; SSE2_64-LABEL: s64_to_f:
-; SSE2_64: # BB#0:
+; SSE2_64: # %bb.0:
; SSE2_64-NEXT: cvtsi2ssq %rdi, %xmm0
; SSE2_64-NEXT: retq
;
; X87-LABEL: s64_to_f:
-; X87: # BB#0:
+; X87: # %bb.0:
; X87-NEXT: fildll {{[0-9]+}}(%esp)
; X87-NEXT: retl
%r = sitofp i64 %a to float
@@ -422,7 +422,7 @@ define float @s64_to_f(i64 %a) nounwind {
define float @s64_to_f_2(i64 %a) nounwind {
; AVX512_32-LABEL: s64_to_f_2:
-; AVX512_32: # BB#0:
+; AVX512_32: # %bb.0:
; AVX512_32-NEXT: pushl %ebp
; AVX512_32-NEXT: movl %esp, %ebp
; AVX512_32-NEXT: andl $-8, %esp
@@ -442,13 +442,13 @@ define float @s64_to_f_2(i64 %a) nounwind {
; AVX512_32-NEXT: retl
;
; AVX512_64-LABEL: s64_to_f_2:
-; AVX512_64: # BB#0:
+; AVX512_64: # %bb.0:
; AVX512_64-NEXT: addq $5, %rdi
; AVX512_64-NEXT: vcvtsi2ssq %rdi, %xmm0, %xmm0
; AVX512_64-NEXT: retq
;
; SSE2_32-LABEL: s64_to_f_2:
-; SSE2_32: # BB#0:
+; SSE2_32: # %bb.0:
; SSE2_32-NEXT: pushl %ebp
; SSE2_32-NEXT: movl %esp, %ebp
; SSE2_32-NEXT: andl $-8, %esp
@@ -469,13 +469,13 @@ define float @s64_to_f_2(i64 %a) nounwind {
; SSE2_32-NEXT: retl
;
; SSE2_64-LABEL: s64_to_f_2:
-; SSE2_64: # BB#0:
+; SSE2_64: # %bb.0:
; SSE2_64-NEXT: addq $5, %rdi
; SSE2_64-NEXT: cvtsi2ssq %rdi, %xmm0
; SSE2_64-NEXT: retq
;
; X87-LABEL: s64_to_f_2:
-; X87: # BB#0:
+; X87: # %bb.0:
; X87-NEXT: pushl %ebp
; X87-NEXT: movl %esp, %ebp
; X87-NEXT: andl $-8, %esp
@@ -497,7 +497,7 @@ define float @s64_to_f_2(i64 %a) nounwind {
define double @u64_to_d(i64 %a) nounwind {
; AVX512_32-LABEL: u64_to_d:
-; AVX512_32: # BB#0:
+; AVX512_32: # %bb.0:
; AVX512_32-NEXT: pushl %ebp
; AVX512_32-NEXT: movl %esp, %ebp
; AVX512_32-NEXT: andl $-8, %esp
@@ -513,12 +513,12 @@ define double @u64_to_d(i64 %a) nounwind {
; AVX512_32-NEXT: retl
;
; AVX512_64-LABEL: u64_to_d:
-; AVX512_64: # BB#0:
+; AVX512_64: # %bb.0:
; AVX512_64-NEXT: vcvtusi2sdq %rdi, %xmm0, %xmm0
; AVX512_64-NEXT: retq
;
; SSE2_32-LABEL: u64_to_d:
-; SSE2_32: # BB#0:
+; SSE2_32: # %bb.0:
; SSE2_32-NEXT: pushl %ebp
; SSE2_32-NEXT: movl %esp, %ebp
; SSE2_32-NEXT: andl $-8, %esp
@@ -535,7 +535,7 @@ define double @u64_to_d(i64 %a) nounwind {
; SSE2_32-NEXT: retl
;
; SSE2_64-LABEL: u64_to_d:
-; SSE2_64: # BB#0:
+; SSE2_64: # %bb.0:
; SSE2_64-NEXT: movq %rdi, %xmm1
; SSE2_64-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
; SSE2_64-NEXT: subpd {{.*}}(%rip), %xmm1
@@ -544,7 +544,7 @@ define double @u64_to_d(i64 %a) nounwind {
; SSE2_64-NEXT: retq
;
; X87-LABEL: u64_to_d:
-; X87: # BB#0:
+; X87: # %bb.0:
; X87-NEXT: pushl %ebp
; X87-NEXT: movl %esp, %ebp
; X87-NEXT: andl $-8, %esp
@@ -569,7 +569,7 @@ define double @u64_to_d(i64 %a) nounwind {
define double @s64_to_d(i64 %a) nounwind {
; AVX512_32-LABEL: s64_to_d:
-; AVX512_32: # BB#0:
+; AVX512_32: # %bb.0:
; AVX512_32-NEXT: pushl %ebp
; AVX512_32-NEXT: movl %esp, %ebp
; AVX512_32-NEXT: andl $-8, %esp
@@ -582,12 +582,12 @@ define double @s64_to_d(i64 %a) nounwind {
; AVX512_32-NEXT: retl
;
; AVX512_64-LABEL: s64_to_d:
-; AVX512_64: # BB#0:
+; AVX512_64: # %bb.0:
; AVX512_64-NEXT: vcvtsi2sdq %rdi, %xmm0, %xmm0
; AVX512_64-NEXT: retq
;
; SSE2_32-LABEL: s64_to_d:
-; SSE2_32: # BB#0:
+; SSE2_32: # %bb.0:
; SSE2_32-NEXT: pushl %ebp
; SSE2_32-NEXT: movl %esp, %ebp
; SSE2_32-NEXT: andl $-8, %esp
@@ -600,12 +600,12 @@ define double @s64_to_d(i64 %a) nounwind {
; SSE2_32-NEXT: retl
;
; SSE2_64-LABEL: s64_to_d:
-; SSE2_64: # BB#0:
+; SSE2_64: # %bb.0:
; SSE2_64-NEXT: cvtsi2sdq %rdi, %xmm0
; SSE2_64-NEXT: retq
;
; X87-LABEL: s64_to_d:
-; X87: # BB#0:
+; X87: # %bb.0:
; X87-NEXT: fildll {{[0-9]+}}(%esp)
; X87-NEXT: retl
%r = sitofp i64 %a to double
@@ -614,7 +614,7 @@ define double @s64_to_d(i64 %a) nounwind {
define double @s64_to_d_2(i64 %a) nounwind {
; AVX512_32-LABEL: s64_to_d_2:
-; AVX512_32: # BB#0:
+; AVX512_32: # %bb.0:
; AVX512_32-NEXT: pushl %ebp
; AVX512_32-NEXT: movl %esp, %ebp
; AVX512_32-NEXT: andl $-8, %esp
@@ -634,13 +634,13 @@ define double @s64_to_d_2(i64 %a) nounwind {
; AVX512_32-NEXT: retl
;
; AVX512_64-LABEL: s64_to_d_2:
-; AVX512_64: # BB#0:
+; AVX512_64: # %bb.0:
; AVX512_64-NEXT: addq $5, %rdi
; AVX512_64-NEXT: vcvtsi2sdq %rdi, %xmm0, %xmm0
; AVX512_64-NEXT: retq
;
; SSE2_32-LABEL: s64_to_d_2:
-; SSE2_32: # BB#0:
+; SSE2_32: # %bb.0:
; SSE2_32-NEXT: pushl %ebp
; SSE2_32-NEXT: movl %esp, %ebp
; SSE2_32-NEXT: andl $-8, %esp
@@ -661,13 +661,13 @@ define double @s64_to_d_2(i64 %a) nounwind {
; SSE2_32-NEXT: retl
;
; SSE2_64-LABEL: s64_to_d_2:
-; SSE2_64: # BB#0:
+; SSE2_64: # %bb.0:
; SSE2_64-NEXT: addq $5, %rdi
; SSE2_64-NEXT: cvtsi2sdq %rdi, %xmm0
; SSE2_64-NEXT: retq
;
; X87-LABEL: s64_to_d_2:
-; X87: # BB#0:
+; X87: # %bb.0:
; X87-NEXT: pushl %ebp
; X87-NEXT: movl %esp, %ebp
; X87-NEXT: andl $-8, %esp
@@ -689,7 +689,7 @@ define double @s64_to_d_2(i64 %a) nounwind {
define x86_fp80 @u64_to_x(i64 %a) nounwind {
; CHECK32-LABEL: u64_to_x:
-; CHECK32: # BB#0:
+; CHECK32: # %bb.0:
; CHECK32-NEXT: pushl %ebp
; CHECK32-NEXT: movl %esp, %ebp
; CHECK32-NEXT: andl $-8, %esp
@@ -708,7 +708,7 @@ define x86_fp80 @u64_to_x(i64 %a) nounwind {
; CHECK32-NEXT: retl
;
; CHECK64-LABEL: u64_to_x:
-; CHECK64: # BB#0:
+; CHECK64: # %bb.0:
; CHECK64-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; CHECK64-NEXT: xorl %eax, %eax
; CHECK64-NEXT: testq %rdi, %rdi
@@ -722,12 +722,12 @@ define x86_fp80 @u64_to_x(i64 %a) nounwind {
define x86_fp80 @s64_to_x(i64 %a) nounwind {
; CHECK32-LABEL: s64_to_x:
-; CHECK32: # BB#0:
+; CHECK32: # %bb.0:
; CHECK32-NEXT: fildll {{[0-9]+}}(%esp)
; CHECK32-NEXT: retl
;
; CHECK64-LABEL: s64_to_x:
-; CHECK64: # BB#0:
+; CHECK64: # %bb.0:
; CHECK64-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; CHECK64-NEXT: fildll -{{[0-9]+}}(%rsp)
; CHECK64-NEXT: retq
diff --git a/test/CodeGen/X86/scatter-schedule.ll b/test/CodeGen/X86/scatter-schedule.ll
index 3b26a7c23a8..c7e6628ab2d 100644
--- a/test/CodeGen/X86/scatter-schedule.ll
+++ b/test/CodeGen/X86/scatter-schedule.ll
@@ -8,7 +8,7 @@ target triple = "x86_64-unknown-linux-gnu"
define void @test(i64 %x272, <16 x i32*> %x335, <16 x i32> %x270) {
; CHECK-LABEL: test:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kxnorw %k0, %k0, %k1
; CHECK-NEXT: kxnorw %k0, %k0, %k2
; CHECK-NEXT: vpscatterqd %ymm2, (,%zmm0) {%k2}
diff --git a/test/CodeGen/X86/schedule-x86_32.ll b/test/CodeGen/X86/schedule-x86_32.ll
index 770dddf09bb..bfe342493cf 100644
--- a/test/CodeGen/X86/schedule-x86_32.ll
+++ b/test/CodeGen/X86/schedule-x86_32.ll
@@ -13,7 +13,7 @@
define i8 @test_aaa(i8 %a0) optsize {
; GENERIC-LABEL: test_aaa:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movb {{[0-9]+}}(%esp), %al
; GENERIC-NEXT: #APP
; GENERIC-NEXT: aaa
@@ -21,7 +21,7 @@ define i8 @test_aaa(i8 %a0) optsize {
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_aaa:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [1:1.00]
; ATOM-NEXT: #APP
; ATOM-NEXT: aaa # sched: [13:6.50]
@@ -29,7 +29,7 @@ define i8 @test_aaa(i8 %a0) optsize {
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_aaa:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [3:1.00]
; SLM-NEXT: #APP
; SLM-NEXT: aaa # sched: [100:1.00]
@@ -37,7 +37,7 @@ define i8 @test_aaa(i8 %a0) optsize {
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_aaa:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [5:0.50]
; SANDY-NEXT: #APP
; SANDY-NEXT: aaa # sched: [100:0.33]
@@ -45,7 +45,7 @@ define i8 @test_aaa(i8 %a0) optsize {
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_aaa:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [1:0.50]
; HASWELL-NEXT: #APP
; HASWELL-NEXT: aaa # sched: [100:0.25]
@@ -53,7 +53,7 @@ define i8 @test_aaa(i8 %a0) optsize {
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_aaa:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [5:0.50]
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: aaa # sched: [100:0.25]
@@ -61,7 +61,7 @@ define i8 @test_aaa(i8 %a0) optsize {
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_aaa:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: aaa # sched: [100:0.25]
@@ -69,7 +69,7 @@ define i8 @test_aaa(i8 %a0) optsize {
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_aaa:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [5:0.50]
; SKX-NEXT: #APP
; SKX-NEXT: aaa # sched: [100:0.25]
@@ -77,7 +77,7 @@ define i8 @test_aaa(i8 %a0) optsize {
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_aaa:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [5:1.00]
; BTVER2-NEXT: #APP
; BTVER2-NEXT: aaa # sched: [100:0.17]
@@ -85,7 +85,7 @@ define i8 @test_aaa(i8 %a0) optsize {
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_aaa:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [8:0.50]
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: aaa # sched: [100:?]
@@ -97,7 +97,7 @@ define i8 @test_aaa(i8 %a0) optsize {
define i8 @test_aad(i16 %a0) optsize {
; GENERIC-LABEL: test_aad:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: #APP
; GENERIC-NEXT: aad
@@ -105,7 +105,7 @@ define i8 @test_aad(i16 %a0) optsize {
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_aad:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movzwl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: #APP
; ATOM-NEXT: aad # sched: [7:3.50]
@@ -113,7 +113,7 @@ define i8 @test_aad(i16 %a0) optsize {
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_aad:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movzwl {{[0-9]+}}(%esp), %eax # sched: [4:1.00]
; SLM-NEXT: #APP
; SLM-NEXT: aad # sched: [100:1.00]
@@ -121,7 +121,7 @@ define i8 @test_aad(i16 %a0) optsize {
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_aad:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: movzwl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: #APP
; SANDY-NEXT: aad # sched: [100:0.33]
@@ -129,7 +129,7 @@ define i8 @test_aad(i16 %a0) optsize {
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_aad:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: movzwl {{[0-9]+}}(%esp), %eax # sched: [4:0.50]
; HASWELL-NEXT: #APP
; HASWELL-NEXT: aad # sched: [100:0.25]
@@ -137,7 +137,7 @@ define i8 @test_aad(i16 %a0) optsize {
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_aad:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movzwl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: aad # sched: [100:0.25]
@@ -145,7 +145,7 @@ define i8 @test_aad(i16 %a0) optsize {
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_aad:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movzwl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: aad # sched: [100:0.25]
@@ -153,7 +153,7 @@ define i8 @test_aad(i16 %a0) optsize {
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_aad:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movzwl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: #APP
; SKX-NEXT: aad # sched: [100:0.25]
@@ -161,7 +161,7 @@ define i8 @test_aad(i16 %a0) optsize {
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_aad:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: movzwl {{[0-9]+}}(%esp), %eax # sched: [4:1.00]
; BTVER2-NEXT: #APP
; BTVER2-NEXT: aad # sched: [100:0.17]
@@ -169,7 +169,7 @@ define i8 @test_aad(i16 %a0) optsize {
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_aad:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movzwl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: aad # sched: [100:?]
@@ -181,7 +181,7 @@ define i8 @test_aad(i16 %a0) optsize {
define i16 @test_aam(i8 %a0) optsize {
; GENERIC-LABEL: test_aam:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movb {{[0-9]+}}(%esp), %al
; GENERIC-NEXT: #APP
; GENERIC-NEXT: aam
@@ -189,7 +189,7 @@ define i16 @test_aam(i8 %a0) optsize {
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_aam:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [1:1.00]
; ATOM-NEXT: #APP
; ATOM-NEXT: aam # sched: [21:10.50]
@@ -197,7 +197,7 @@ define i16 @test_aam(i8 %a0) optsize {
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_aam:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [3:1.00]
; SLM-NEXT: #APP
; SLM-NEXT: aam # sched: [100:1.00]
@@ -205,7 +205,7 @@ define i16 @test_aam(i8 %a0) optsize {
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_aam:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [5:0.50]
; SANDY-NEXT: #APP
; SANDY-NEXT: aam # sched: [100:0.33]
@@ -213,7 +213,7 @@ define i16 @test_aam(i8 %a0) optsize {
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_aam:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [1:0.50]
; HASWELL-NEXT: #APP
; HASWELL-NEXT: aam # sched: [100:0.25]
@@ -221,7 +221,7 @@ define i16 @test_aam(i8 %a0) optsize {
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_aam:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [5:0.50]
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: aam # sched: [100:0.25]
@@ -229,7 +229,7 @@ define i16 @test_aam(i8 %a0) optsize {
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_aam:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: aam # sched: [100:0.25]
@@ -237,7 +237,7 @@ define i16 @test_aam(i8 %a0) optsize {
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_aam:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [5:0.50]
; SKX-NEXT: #APP
; SKX-NEXT: aam # sched: [100:0.25]
@@ -245,7 +245,7 @@ define i16 @test_aam(i8 %a0) optsize {
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_aam:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [5:1.00]
; BTVER2-NEXT: #APP
; BTVER2-NEXT: aam # sched: [100:0.17]
@@ -253,7 +253,7 @@ define i16 @test_aam(i8 %a0) optsize {
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_aam:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [8:0.50]
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: aam # sched: [100:?]
@@ -265,7 +265,7 @@ define i16 @test_aam(i8 %a0) optsize {
define i8 @test_aas(i8 %a0) optsize {
; GENERIC-LABEL: test_aas:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movb {{[0-9]+}}(%esp), %al
; GENERIC-NEXT: #APP
; GENERIC-NEXT: aas
@@ -273,7 +273,7 @@ define i8 @test_aas(i8 %a0) optsize {
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_aas:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [1:1.00]
; ATOM-NEXT: #APP
; ATOM-NEXT: aas # sched: [13:6.50]
@@ -281,7 +281,7 @@ define i8 @test_aas(i8 %a0) optsize {
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_aas:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [3:1.00]
; SLM-NEXT: #APP
; SLM-NEXT: aas # sched: [100:1.00]
@@ -289,7 +289,7 @@ define i8 @test_aas(i8 %a0) optsize {
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_aas:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [5:0.50]
; SANDY-NEXT: #APP
; SANDY-NEXT: aas # sched: [100:0.33]
@@ -297,7 +297,7 @@ define i8 @test_aas(i8 %a0) optsize {
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_aas:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [1:0.50]
; HASWELL-NEXT: #APP
; HASWELL-NEXT: aas # sched: [100:0.25]
@@ -305,7 +305,7 @@ define i8 @test_aas(i8 %a0) optsize {
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_aas:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [5:0.50]
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: aas # sched: [100:0.25]
@@ -313,7 +313,7 @@ define i8 @test_aas(i8 %a0) optsize {
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_aas:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: aas # sched: [100:0.25]
@@ -321,7 +321,7 @@ define i8 @test_aas(i8 %a0) optsize {
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_aas:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [5:0.50]
; SKX-NEXT: #APP
; SKX-NEXT: aas # sched: [100:0.25]
@@ -329,7 +329,7 @@ define i8 @test_aas(i8 %a0) optsize {
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_aas:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [5:1.00]
; BTVER2-NEXT: #APP
; BTVER2-NEXT: aas # sched: [100:0.17]
@@ -337,7 +337,7 @@ define i8 @test_aas(i8 %a0) optsize {
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_aas:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [8:0.50]
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: aas # sched: [100:?]
@@ -351,7 +351,7 @@ define i8 @test_aas(i8 %a0) optsize {
define i8 @test_daa(i8 %a0) optsize {
; GENERIC-LABEL: test_daa:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movb {{[0-9]+}}(%esp), %al
; GENERIC-NEXT: #APP
; GENERIC-NEXT: daa
@@ -359,7 +359,7 @@ define i8 @test_daa(i8 %a0) optsize {
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_daa:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [1:1.00]
; ATOM-NEXT: #APP
; ATOM-NEXT: daa # sched: [18:9.00]
@@ -367,7 +367,7 @@ define i8 @test_daa(i8 %a0) optsize {
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_daa:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [3:1.00]
; SLM-NEXT: #APP
; SLM-NEXT: daa # sched: [100:1.00]
@@ -375,7 +375,7 @@ define i8 @test_daa(i8 %a0) optsize {
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_daa:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [5:0.50]
; SANDY-NEXT: #APP
; SANDY-NEXT: daa # sched: [100:0.33]
@@ -383,7 +383,7 @@ define i8 @test_daa(i8 %a0) optsize {
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_daa:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [1:0.50]
; HASWELL-NEXT: #APP
; HASWELL-NEXT: daa # sched: [100:0.25]
@@ -391,7 +391,7 @@ define i8 @test_daa(i8 %a0) optsize {
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_daa:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [5:0.50]
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: daa # sched: [100:0.25]
@@ -399,7 +399,7 @@ define i8 @test_daa(i8 %a0) optsize {
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_daa:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: daa # sched: [100:0.25]
@@ -407,7 +407,7 @@ define i8 @test_daa(i8 %a0) optsize {
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_daa:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [5:0.50]
; SKX-NEXT: #APP
; SKX-NEXT: daa # sched: [100:0.25]
@@ -415,7 +415,7 @@ define i8 @test_daa(i8 %a0) optsize {
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_daa:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [5:1.00]
; BTVER2-NEXT: #APP
; BTVER2-NEXT: daa # sched: [100:0.17]
@@ -423,7 +423,7 @@ define i8 @test_daa(i8 %a0) optsize {
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_daa:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [8:0.50]
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: daa # sched: [100:?]
@@ -435,7 +435,7 @@ define i8 @test_daa(i8 %a0) optsize {
define i8 @test_das(i8 %a0) optsize {
; GENERIC-LABEL: test_das:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movb {{[0-9]+}}(%esp), %al
; GENERIC-NEXT: #APP
; GENERIC-NEXT: das
@@ -443,7 +443,7 @@ define i8 @test_das(i8 %a0) optsize {
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_das:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [1:1.00]
; ATOM-NEXT: #APP
; ATOM-NEXT: das # sched: [20:10.00]
@@ -451,7 +451,7 @@ define i8 @test_das(i8 %a0) optsize {
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_das:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [3:1.00]
; SLM-NEXT: #APP
; SLM-NEXT: das # sched: [100:1.00]
@@ -459,7 +459,7 @@ define i8 @test_das(i8 %a0) optsize {
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_das:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [5:0.50]
; SANDY-NEXT: #APP
; SANDY-NEXT: das # sched: [100:0.33]
@@ -467,7 +467,7 @@ define i8 @test_das(i8 %a0) optsize {
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_das:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [1:0.50]
; HASWELL-NEXT: #APP
; HASWELL-NEXT: das # sched: [100:0.25]
@@ -475,7 +475,7 @@ define i8 @test_das(i8 %a0) optsize {
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_das:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [5:0.50]
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: das # sched: [100:0.25]
@@ -483,7 +483,7 @@ define i8 @test_das(i8 %a0) optsize {
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_das:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: das # sched: [100:0.25]
@@ -491,7 +491,7 @@ define i8 @test_das(i8 %a0) optsize {
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_das:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [5:0.50]
; SKX-NEXT: #APP
; SKX-NEXT: das # sched: [100:0.25]
@@ -499,7 +499,7 @@ define i8 @test_das(i8 %a0) optsize {
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_das:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [5:1.00]
; BTVER2-NEXT: #APP
; BTVER2-NEXT: das # sched: [100:0.17]
@@ -507,7 +507,7 @@ define i8 @test_das(i8 %a0) optsize {
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_das:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [8:0.50]
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: das # sched: [100:?]
diff --git a/test/CodeGen/X86/schedule-x86_64.ll b/test/CodeGen/X86/schedule-x86_64.ll
index acc54c74927..a157e25e251 100644
--- a/test/CodeGen/X86/schedule-x86_64.ll
+++ b/test/CodeGen/X86/schedule-x86_64.ll
@@ -17,7 +17,7 @@
define i16 @test_bsf16(i16 %a0, i16* %a1) optsize {
; GENERIC-LABEL: test_bsf16:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: bsfw %di, %ax # sched: [3:1.00]
; GENERIC-NEXT: bsfw (%rsi), %cx # sched: [8:1.00]
@@ -27,7 +27,7 @@ define i16 @test_bsf16(i16 %a0, i16* %a1) optsize {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_bsf16:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: bsfw %di, %ax # sched: [16:8.00]
; ATOM-NEXT: bsfw (%rsi), %cx # sched: [16:8.00]
@@ -37,7 +37,7 @@ define i16 @test_bsf16(i16 %a0, i16* %a1) optsize {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_bsf16:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: bsfw %di, %ax # sched: [1:1.00]
; SLM-NEXT: bsfw (%rsi), %cx # sched: [4:1.00]
@@ -47,7 +47,7 @@ define i16 @test_bsf16(i16 %a0, i16* %a1) optsize {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_bsf16:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: bsfw %di, %ax # sched: [3:1.00]
; SANDY-NEXT: bsfw (%rsi), %cx # sched: [8:1.00]
@@ -57,7 +57,7 @@ define i16 @test_bsf16(i16 %a0, i16* %a1) optsize {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_bsf16:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: bsfw %di, %ax # sched: [3:1.00]
; HASWELL-NEXT: bsfw (%rsi), %cx # sched: [3:1.00]
@@ -67,7 +67,7 @@ define i16 @test_bsf16(i16 %a0, i16* %a1) optsize {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_bsf16:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: bsfw %di, %ax # sched: [3:1.00]
; BROADWELL-NEXT: bsfw (%rsi), %cx # sched: [8:1.00]
@@ -77,7 +77,7 @@ define i16 @test_bsf16(i16 %a0, i16* %a1) optsize {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_bsf16:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: bsfw %di, %ax # sched: [3:1.00]
; SKYLAKE-NEXT: bsfw (%rsi), %cx # sched: [8:1.00]
@@ -87,7 +87,7 @@ define i16 @test_bsf16(i16 %a0, i16* %a1) optsize {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_bsf16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: bsfw %di, %ax # sched: [3:1.00]
; SKX-NEXT: bsfw (%rsi), %cx # sched: [8:1.00]
@@ -97,7 +97,7 @@ define i16 @test_bsf16(i16 %a0, i16* %a1) optsize {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_bsf16:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: bsfw %di, %ax # sched: [1:0.50]
; BTVER2-NEXT: bsfw (%rsi), %cx # sched: [4:1.00]
@@ -107,7 +107,7 @@ define i16 @test_bsf16(i16 %a0, i16* %a1) optsize {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_bsf16:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: bsfw %di, %ax # sched: [3:0.25]
; ZNVER1-NEXT: bsfw (%rsi), %cx # sched: [7:0.50]
@@ -123,7 +123,7 @@ define i16 @test_bsf16(i16 %a0, i16* %a1) optsize {
}
define i32 @test_bsf32(i32 %a0, i32* %a1) optsize {
; GENERIC-LABEL: test_bsf32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: bsfl %edi, %eax # sched: [3:1.00]
; GENERIC-NEXT: bsfl (%rsi), %ecx # sched: [8:1.00]
@@ -132,7 +132,7 @@ define i32 @test_bsf32(i32 %a0, i32* %a1) optsize {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_bsf32:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: bsfl %edi, %eax # sched: [16:8.00]
; ATOM-NEXT: bsfl (%rsi), %ecx # sched: [16:8.00]
@@ -141,7 +141,7 @@ define i32 @test_bsf32(i32 %a0, i32* %a1) optsize {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_bsf32:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: bsfl %edi, %eax # sched: [1:1.00]
; SLM-NEXT: bsfl (%rsi), %ecx # sched: [4:1.00]
@@ -150,7 +150,7 @@ define i32 @test_bsf32(i32 %a0, i32* %a1) optsize {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_bsf32:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: bsfl %edi, %eax # sched: [3:1.00]
; SANDY-NEXT: bsfl (%rsi), %ecx # sched: [8:1.00]
@@ -159,7 +159,7 @@ define i32 @test_bsf32(i32 %a0, i32* %a1) optsize {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_bsf32:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: bsfl %edi, %eax # sched: [3:1.00]
; HASWELL-NEXT: bsfl (%rsi), %ecx # sched: [3:1.00]
@@ -168,7 +168,7 @@ define i32 @test_bsf32(i32 %a0, i32* %a1) optsize {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_bsf32:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: bsfl %edi, %eax # sched: [3:1.00]
; BROADWELL-NEXT: bsfl (%rsi), %ecx # sched: [8:1.00]
@@ -177,7 +177,7 @@ define i32 @test_bsf32(i32 %a0, i32* %a1) optsize {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_bsf32:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: bsfl %edi, %eax # sched: [3:1.00]
; SKYLAKE-NEXT: bsfl (%rsi), %ecx # sched: [8:1.00]
@@ -186,7 +186,7 @@ define i32 @test_bsf32(i32 %a0, i32* %a1) optsize {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_bsf32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: bsfl %edi, %eax # sched: [3:1.00]
; SKX-NEXT: bsfl (%rsi), %ecx # sched: [8:1.00]
@@ -195,7 +195,7 @@ define i32 @test_bsf32(i32 %a0, i32* %a1) optsize {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_bsf32:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: bsfl %edi, %eax # sched: [1:0.50]
; BTVER2-NEXT: bsfl (%rsi), %ecx # sched: [4:1.00]
@@ -204,7 +204,7 @@ define i32 @test_bsf32(i32 %a0, i32* %a1) optsize {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_bsf32:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: bsfl %edi, %eax # sched: [3:0.25]
; ZNVER1-NEXT: bsfl (%rsi), %ecx # sched: [7:0.50]
@@ -219,7 +219,7 @@ define i32 @test_bsf32(i32 %a0, i32* %a1) optsize {
}
define i64 @test_bsf64(i64 %a0, i64* %a1) optsize {
; GENERIC-LABEL: test_bsf64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: bsfq %rdi, %rax # sched: [3:1.00]
; GENERIC-NEXT: bsfq (%rsi), %rcx # sched: [8:1.00]
@@ -228,7 +228,7 @@ define i64 @test_bsf64(i64 %a0, i64* %a1) optsize {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_bsf64:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: bsfq %rdi, %rax # sched: [16:8.00]
; ATOM-NEXT: bsfq (%rsi), %rcx # sched: [16:8.00]
@@ -237,7 +237,7 @@ define i64 @test_bsf64(i64 %a0, i64* %a1) optsize {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_bsf64:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: bsfq %rdi, %rax # sched: [1:1.00]
; SLM-NEXT: bsfq (%rsi), %rcx # sched: [4:1.00]
@@ -246,7 +246,7 @@ define i64 @test_bsf64(i64 %a0, i64* %a1) optsize {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_bsf64:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: bsfq %rdi, %rax # sched: [3:1.00]
; SANDY-NEXT: bsfq (%rsi), %rcx # sched: [8:1.00]
@@ -255,7 +255,7 @@ define i64 @test_bsf64(i64 %a0, i64* %a1) optsize {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_bsf64:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: bsfq %rdi, %rax # sched: [3:1.00]
; HASWELL-NEXT: bsfq (%rsi), %rcx # sched: [3:1.00]
@@ -264,7 +264,7 @@ define i64 @test_bsf64(i64 %a0, i64* %a1) optsize {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_bsf64:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: bsfq %rdi, %rax # sched: [3:1.00]
; BROADWELL-NEXT: bsfq (%rsi), %rcx # sched: [8:1.00]
@@ -273,7 +273,7 @@ define i64 @test_bsf64(i64 %a0, i64* %a1) optsize {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_bsf64:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: bsfq %rdi, %rax # sched: [3:1.00]
; SKYLAKE-NEXT: bsfq (%rsi), %rcx # sched: [8:1.00]
@@ -282,7 +282,7 @@ define i64 @test_bsf64(i64 %a0, i64* %a1) optsize {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_bsf64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: bsfq %rdi, %rax # sched: [3:1.00]
; SKX-NEXT: bsfq (%rsi), %rcx # sched: [8:1.00]
@@ -291,7 +291,7 @@ define i64 @test_bsf64(i64 %a0, i64* %a1) optsize {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_bsf64:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: bsfq %rdi, %rax # sched: [1:0.50]
; BTVER2-NEXT: bsfq (%rsi), %rcx # sched: [4:1.00]
@@ -300,7 +300,7 @@ define i64 @test_bsf64(i64 %a0, i64* %a1) optsize {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_bsf64:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: bsfq %rdi, %rax # sched: [3:0.25]
; ZNVER1-NEXT: bsfq (%rsi), %rcx # sched: [7:0.50]
@@ -316,7 +316,7 @@ define i64 @test_bsf64(i64 %a0, i64* %a1) optsize {
define i16 @test_bsr16(i16 %a0, i16* %a1) optsize {
; GENERIC-LABEL: test_bsr16:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: bsrw %di, %ax # sched: [3:1.00]
; GENERIC-NEXT: bsrw (%rsi), %cx # sched: [8:1.00]
@@ -326,7 +326,7 @@ define i16 @test_bsr16(i16 %a0, i16* %a1) optsize {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_bsr16:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: bsrw %di, %ax # sched: [16:8.00]
; ATOM-NEXT: bsrw (%rsi), %cx # sched: [16:8.00]
@@ -336,7 +336,7 @@ define i16 @test_bsr16(i16 %a0, i16* %a1) optsize {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_bsr16:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: bsrw %di, %ax # sched: [1:1.00]
; SLM-NEXT: bsrw (%rsi), %cx # sched: [4:1.00]
@@ -346,7 +346,7 @@ define i16 @test_bsr16(i16 %a0, i16* %a1) optsize {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_bsr16:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: bsrw %di, %ax # sched: [3:1.00]
; SANDY-NEXT: bsrw (%rsi), %cx # sched: [8:1.00]
@@ -356,7 +356,7 @@ define i16 @test_bsr16(i16 %a0, i16* %a1) optsize {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_bsr16:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: bsrw %di, %ax # sched: [3:1.00]
; HASWELL-NEXT: bsrw (%rsi), %cx # sched: [3:1.00]
@@ -366,7 +366,7 @@ define i16 @test_bsr16(i16 %a0, i16* %a1) optsize {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_bsr16:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: bsrw %di, %ax # sched: [3:1.00]
; BROADWELL-NEXT: bsrw (%rsi), %cx # sched: [8:1.00]
@@ -376,7 +376,7 @@ define i16 @test_bsr16(i16 %a0, i16* %a1) optsize {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_bsr16:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: bsrw %di, %ax # sched: [3:1.00]
; SKYLAKE-NEXT: bsrw (%rsi), %cx # sched: [8:1.00]
@@ -386,7 +386,7 @@ define i16 @test_bsr16(i16 %a0, i16* %a1) optsize {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_bsr16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: bsrw %di, %ax # sched: [3:1.00]
; SKX-NEXT: bsrw (%rsi), %cx # sched: [8:1.00]
@@ -396,7 +396,7 @@ define i16 @test_bsr16(i16 %a0, i16* %a1) optsize {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_bsr16:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: bsrw %di, %ax # sched: [1:0.50]
; BTVER2-NEXT: bsrw (%rsi), %cx # sched: [4:1.00]
@@ -406,7 +406,7 @@ define i16 @test_bsr16(i16 %a0, i16* %a1) optsize {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_bsr16:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: bsrw %di, %ax # sched: [3:0.25]
; ZNVER1-NEXT: bsrw (%rsi), %cx # sched: [7:0.50]
@@ -422,7 +422,7 @@ define i16 @test_bsr16(i16 %a0, i16* %a1) optsize {
}
define i32 @test_bsr32(i32 %a0, i32* %a1) optsize {
; GENERIC-LABEL: test_bsr32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: bsrl %edi, %eax # sched: [3:1.00]
; GENERIC-NEXT: bsrl (%rsi), %ecx # sched: [8:1.00]
@@ -431,7 +431,7 @@ define i32 @test_bsr32(i32 %a0, i32* %a1) optsize {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_bsr32:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: bsrl %edi, %eax # sched: [16:8.00]
; ATOM-NEXT: bsrl (%rsi), %ecx # sched: [16:8.00]
@@ -440,7 +440,7 @@ define i32 @test_bsr32(i32 %a0, i32* %a1) optsize {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_bsr32:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: bsrl %edi, %eax # sched: [1:1.00]
; SLM-NEXT: bsrl (%rsi), %ecx # sched: [4:1.00]
@@ -449,7 +449,7 @@ define i32 @test_bsr32(i32 %a0, i32* %a1) optsize {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_bsr32:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: bsrl %edi, %eax # sched: [3:1.00]
; SANDY-NEXT: bsrl (%rsi), %ecx # sched: [8:1.00]
@@ -458,7 +458,7 @@ define i32 @test_bsr32(i32 %a0, i32* %a1) optsize {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_bsr32:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: bsrl %edi, %eax # sched: [3:1.00]
; HASWELL-NEXT: bsrl (%rsi), %ecx # sched: [3:1.00]
@@ -467,7 +467,7 @@ define i32 @test_bsr32(i32 %a0, i32* %a1) optsize {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_bsr32:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: bsrl %edi, %eax # sched: [3:1.00]
; BROADWELL-NEXT: bsrl (%rsi), %ecx # sched: [8:1.00]
@@ -476,7 +476,7 @@ define i32 @test_bsr32(i32 %a0, i32* %a1) optsize {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_bsr32:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: bsrl %edi, %eax # sched: [3:1.00]
; SKYLAKE-NEXT: bsrl (%rsi), %ecx # sched: [8:1.00]
@@ -485,7 +485,7 @@ define i32 @test_bsr32(i32 %a0, i32* %a1) optsize {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_bsr32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: bsrl %edi, %eax # sched: [3:1.00]
; SKX-NEXT: bsrl (%rsi), %ecx # sched: [8:1.00]
@@ -494,7 +494,7 @@ define i32 @test_bsr32(i32 %a0, i32* %a1) optsize {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_bsr32:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: bsrl %edi, %eax # sched: [1:0.50]
; BTVER2-NEXT: bsrl (%rsi), %ecx # sched: [4:1.00]
@@ -503,7 +503,7 @@ define i32 @test_bsr32(i32 %a0, i32* %a1) optsize {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_bsr32:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: bsrl %edi, %eax # sched: [3:0.25]
; ZNVER1-NEXT: bsrl (%rsi), %ecx # sched: [7:0.50]
@@ -518,7 +518,7 @@ define i32 @test_bsr32(i32 %a0, i32* %a1) optsize {
}
define i64 @test_bsr64(i64 %a0, i64* %a1) optsize {
; GENERIC-LABEL: test_bsr64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: bsrq %rdi, %rax # sched: [3:1.00]
; GENERIC-NEXT: bsrq (%rsi), %rcx # sched: [8:1.00]
@@ -527,7 +527,7 @@ define i64 @test_bsr64(i64 %a0, i64* %a1) optsize {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_bsr64:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: bsrq %rdi, %rax # sched: [16:8.00]
; ATOM-NEXT: bsrq (%rsi), %rcx # sched: [16:8.00]
@@ -536,7 +536,7 @@ define i64 @test_bsr64(i64 %a0, i64* %a1) optsize {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_bsr64:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: bsrq %rdi, %rax # sched: [1:1.00]
; SLM-NEXT: bsrq (%rsi), %rcx # sched: [4:1.00]
@@ -545,7 +545,7 @@ define i64 @test_bsr64(i64 %a0, i64* %a1) optsize {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_bsr64:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: bsrq %rdi, %rax # sched: [3:1.00]
; SANDY-NEXT: bsrq (%rsi), %rcx # sched: [8:1.00]
@@ -554,7 +554,7 @@ define i64 @test_bsr64(i64 %a0, i64* %a1) optsize {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_bsr64:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: bsrq %rdi, %rax # sched: [3:1.00]
; HASWELL-NEXT: bsrq (%rsi), %rcx # sched: [3:1.00]
@@ -563,7 +563,7 @@ define i64 @test_bsr64(i64 %a0, i64* %a1) optsize {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_bsr64:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: bsrq %rdi, %rax # sched: [3:1.00]
; BROADWELL-NEXT: bsrq (%rsi), %rcx # sched: [8:1.00]
@@ -572,7 +572,7 @@ define i64 @test_bsr64(i64 %a0, i64* %a1) optsize {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_bsr64:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: bsrq %rdi, %rax # sched: [3:1.00]
; SKYLAKE-NEXT: bsrq (%rsi), %rcx # sched: [8:1.00]
@@ -581,7 +581,7 @@ define i64 @test_bsr64(i64 %a0, i64* %a1) optsize {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_bsr64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: bsrq %rdi, %rax # sched: [3:1.00]
; SKX-NEXT: bsrq (%rsi), %rcx # sched: [8:1.00]
@@ -590,7 +590,7 @@ define i64 @test_bsr64(i64 %a0, i64* %a1) optsize {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_bsr64:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: bsrq %rdi, %rax # sched: [1:0.50]
; BTVER2-NEXT: bsrq (%rsi), %rcx # sched: [4:1.00]
@@ -599,7 +599,7 @@ define i64 @test_bsr64(i64 %a0, i64* %a1) optsize {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_bsr64:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: bsrq %rdi, %rax # sched: [3:0.25]
; ZNVER1-NEXT: bsrq (%rsi), %rcx # sched: [7:0.50]
@@ -615,61 +615,61 @@ define i64 @test_bsr64(i64 %a0, i64* %a1) optsize {
define i32 @test_bswap32(i32 %a0) optsize {
; GENERIC-LABEL: test_bswap32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: bswapl %edi # sched: [2:1.00]
; GENERIC-NEXT: movl %edi, %eax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_bswap32:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: bswapl %edi # sched: [1:1.00]
; ATOM-NEXT: movl %edi, %eax # sched: [1:0.50]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_bswap32:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: bswapl %edi # sched: [1:0.50]
; SLM-NEXT: movl %edi, %eax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_bswap32:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: bswapl %edi # sched: [2:1.00]
; SANDY-NEXT: movl %edi, %eax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_bswap32:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: bswapl %edi # sched: [2:0.50]
; HASWELL-NEXT: movl %edi, %eax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_bswap32:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: bswapl %edi # sched: [2:0.50]
; BROADWELL-NEXT: movl %edi, %eax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_bswap32:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: bswapl %edi # sched: [2:0.50]
; SKYLAKE-NEXT: movl %edi, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_bswap32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: bswapl %edi # sched: [2:0.50]
; SKX-NEXT: movl %edi, %eax # sched: [1:0.25]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_bswap32:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: bswapl %edi # sched: [1:0.50]
; BTVER2-NEXT: movl %edi, %eax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_bswap32:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: bswapl %edi # sched: [1:1.00]
; ZNVER1-NEXT: movl %edi, %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -678,61 +678,61 @@ define i32 @test_bswap32(i32 %a0) optsize {
}
define i64 @test_bswap64(i64 %a0) optsize {
; GENERIC-LABEL: test_bswap64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: bswapq %rdi # sched: [2:1.00]
; GENERIC-NEXT: movq %rdi, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_bswap64:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: bswapq %rdi # sched: [1:1.00]
; ATOM-NEXT: movq %rdi, %rax # sched: [1:0.50]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_bswap64:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: bswapq %rdi # sched: [1:0.50]
; SLM-NEXT: movq %rdi, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_bswap64:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: bswapq %rdi # sched: [2:1.00]
; SANDY-NEXT: movq %rdi, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_bswap64:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: bswapq %rdi # sched: [2:0.50]
; HASWELL-NEXT: movq %rdi, %rax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_bswap64:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: bswapq %rdi # sched: [2:0.50]
; BROADWELL-NEXT: movq %rdi, %rax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_bswap64:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: bswapq %rdi # sched: [2:0.50]
; SKYLAKE-NEXT: movq %rdi, %rax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_bswap64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: bswapq %rdi # sched: [2:0.50]
; SKX-NEXT: movq %rdi, %rax # sched: [1:0.25]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_bswap64:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: bswapq %rdi # sched: [1:0.50]
; BTVER2-NEXT: movq %rdi, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_bswap64:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: bswapq %rdi # sched: [1:1.00]
; ZNVER1-NEXT: movq %rdi, %rax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -749,7 +749,7 @@ define i64 @test_bswap64(i64 %a0) optsize {
define void @test_cbw_cdq_cdqe_cqo_cwd_cwde() optsize {
; GENERIC-LABEL: test_cbw_cdq_cdqe_cqo_cwd_cwde:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: cbtw # sched: [1:0.33]
; GENERIC-NEXT: cltd # sched: [1:0.50]
@@ -761,7 +761,7 @@ define void @test_cbw_cdq_cdqe_cqo_cwd_cwde() optsize {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_cbw_cdq_cdqe_cqo_cwd_cwde:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: cbtw # sched: [4:2.00]
; ATOM-NEXT: cltd # sched: [4:2.00]
@@ -773,7 +773,7 @@ define void @test_cbw_cdq_cdqe_cqo_cwd_cwde() optsize {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_cbw_cdq_cdqe_cqo_cwd_cwde:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: cbtw # sched: [1:0.50]
; SLM-NEXT: cltd # sched: [1:0.50]
@@ -785,7 +785,7 @@ define void @test_cbw_cdq_cdqe_cqo_cwd_cwde() optsize {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_cbw_cdq_cdqe_cqo_cwd_cwde:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: cbtw # sched: [1:0.33]
; SANDY-NEXT: cltd # sched: [1:0.50]
@@ -797,7 +797,7 @@ define void @test_cbw_cdq_cdqe_cqo_cwd_cwde() optsize {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cbw_cdq_cdqe_cqo_cwd_cwde:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: cbtw # sched: [1:0.25]
; HASWELL-NEXT: cltd # sched: [1:0.50]
@@ -809,7 +809,7 @@ define void @test_cbw_cdq_cdqe_cqo_cwd_cwde() optsize {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cbw_cdq_cdqe_cqo_cwd_cwde:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: cbtw # sched: [1:0.25]
; BROADWELL-NEXT: cltd # sched: [1:0.50]
@@ -821,7 +821,7 @@ define void @test_cbw_cdq_cdqe_cqo_cwd_cwde() optsize {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cbw_cdq_cdqe_cqo_cwd_cwde:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: cbtw # sched: [1:0.25]
; SKYLAKE-NEXT: cltd # sched: [1:0.50]
@@ -833,7 +833,7 @@ define void @test_cbw_cdq_cdqe_cqo_cwd_cwde() optsize {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_cbw_cdq_cdqe_cqo_cwd_cwde:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: cbtw # sched: [1:0.25]
; SKX-NEXT: cltd # sched: [1:0.50]
@@ -845,7 +845,7 @@ define void @test_cbw_cdq_cdqe_cqo_cwd_cwde() optsize {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cbw_cdq_cdqe_cqo_cwd_cwde:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: cbtw # sched: [1:0.50]
; BTVER2-NEXT: cltd # sched: [1:0.50]
@@ -857,7 +857,7 @@ define void @test_cbw_cdq_cdqe_cqo_cwd_cwde() optsize {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cbw_cdq_cdqe_cqo_cwd_cwde:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: cbtw # sched: [1:0.25]
; ZNVER1-NEXT: cltd # sched: [1:0.25]
@@ -873,7 +873,7 @@ define void @test_cbw_cdq_cdqe_cqo_cwd_cwde() optsize {
define void @test_clc_cld_cmc() optsize {
; GENERIC-LABEL: test_clc_cld_cmc:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: clc # sched: [1:0.33]
; GENERIC-NEXT: cld # sched: [1:0.33]
@@ -882,7 +882,7 @@ define void @test_clc_cld_cmc() optsize {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_clc_cld_cmc:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: clc # sched: [1:0.50]
; ATOM-NEXT: cld # sched: [3:1.50]
@@ -891,7 +891,7 @@ define void @test_clc_cld_cmc() optsize {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_clc_cld_cmc:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: clc # sched: [1:0.50]
; SLM-NEXT: cld # sched: [1:0.50]
@@ -900,7 +900,7 @@ define void @test_clc_cld_cmc() optsize {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_clc_cld_cmc:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: clc # sched: [1:0.33]
; SANDY-NEXT: cld # sched: [1:0.33]
@@ -909,7 +909,7 @@ define void @test_clc_cld_cmc() optsize {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_clc_cld_cmc:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: clc # sched: [1:0.25]
; HASWELL-NEXT: cld # sched: [3:1.00]
@@ -918,7 +918,7 @@ define void @test_clc_cld_cmc() optsize {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_clc_cld_cmc:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: clc # sched: [1:0.25]
; BROADWELL-NEXT: cld # sched: [3:1.00]
@@ -927,7 +927,7 @@ define void @test_clc_cld_cmc() optsize {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_clc_cld_cmc:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: clc # sched: [1:0.25]
; SKYLAKE-NEXT: cld # sched: [3:1.00]
@@ -936,7 +936,7 @@ define void @test_clc_cld_cmc() optsize {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_clc_cld_cmc:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: clc # sched: [1:0.25]
; SKX-NEXT: cld # sched: [3:1.00]
@@ -945,7 +945,7 @@ define void @test_clc_cld_cmc() optsize {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_clc_cld_cmc:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: clc # sched: [1:0.50]
; BTVER2-NEXT: cld # sched: [1:0.50]
@@ -954,7 +954,7 @@ define void @test_clc_cld_cmc() optsize {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_clc_cld_cmc:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: clc # sched: [1:0.25]
; ZNVER1-NEXT: cld # sched: [1:0.25]
@@ -980,70 +980,70 @@ define void @test_clc_cld_cmc() optsize {
define void @test_cpuid() optsize {
; GENERIC-LABEL: test_cpuid:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: cpuid # sched: [100:0.33]
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_cpuid:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: cpuid # sched: [121:60.50]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_cpuid:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: cpuid # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_cpuid:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: cpuid # sched: [100:0.33]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cpuid:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: cpuid # sched: [18:2.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cpuid:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: cpuid # sched: [18:2.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cpuid:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: cpuid # sched: [18:2.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_cpuid:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: cpuid # sched: [18:2.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cpuid:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: cpuid # sched: [100:0.17]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cpuid:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: cpuid # sched: [100:?]
; ZNVER1-NEXT: #NO_APP
@@ -1074,7 +1074,7 @@ define void @test_cpuid() optsize {
define void @test_invlpg_invlpga(i8 *%a0) optsize {
; GENERIC-LABEL: test_invlpg_invlpga:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: invlpg (%rdi) # sched: [100:0.33]
; GENERIC-NEXT: invlpga %ecx, %rax # sched: [100:0.33]
@@ -1082,7 +1082,7 @@ define void @test_invlpg_invlpga(i8 *%a0) optsize {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_invlpg_invlpga:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: invlpg (%rdi) # sched: [71:35.50]
; ATOM-NEXT: invlpga %ecx, %rax # sched: [71:35.50]
@@ -1090,7 +1090,7 @@ define void @test_invlpg_invlpga(i8 *%a0) optsize {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_invlpg_invlpga:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: invlpg (%rdi) # sched: [100:1.00]
; SLM-NEXT: invlpga %ecx, %rax # sched: [100:1.00]
@@ -1098,7 +1098,7 @@ define void @test_invlpg_invlpga(i8 *%a0) optsize {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_invlpg_invlpga:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: invlpg (%rdi) # sched: [100:0.33]
; SANDY-NEXT: invlpga %ecx, %rax # sched: [100:0.33]
@@ -1106,7 +1106,7 @@ define void @test_invlpg_invlpga(i8 *%a0) optsize {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_invlpg_invlpga:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: invlpg (%rdi) # sched: [100:0.25]
; HASWELL-NEXT: invlpga %ecx, %rax # sched: [100:0.25]
@@ -1114,7 +1114,7 @@ define void @test_invlpg_invlpga(i8 *%a0) optsize {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_invlpg_invlpga:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: invlpg (%rdi) # sched: [100:0.25]
; BROADWELL-NEXT: invlpga %ecx, %rax # sched: [100:0.25]
@@ -1122,7 +1122,7 @@ define void @test_invlpg_invlpga(i8 *%a0) optsize {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_invlpg_invlpga:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: invlpg (%rdi) # sched: [100:0.25]
; SKYLAKE-NEXT: invlpga %ecx, %rax # sched: [100:0.25]
@@ -1130,7 +1130,7 @@ define void @test_invlpg_invlpga(i8 *%a0) optsize {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_invlpg_invlpga:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: invlpg (%rdi) # sched: [100:0.25]
; SKX-NEXT: invlpga %ecx, %rax # sched: [100:0.25]
@@ -1138,7 +1138,7 @@ define void @test_invlpg_invlpga(i8 *%a0) optsize {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_invlpg_invlpga:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: invlpg (%rdi) # sched: [100:0.17]
; BTVER2-NEXT: invlpga %ecx, %rax # sched: [100:0.17]
@@ -1146,7 +1146,7 @@ define void @test_invlpg_invlpga(i8 *%a0) optsize {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_invlpg_invlpga:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: invlpg (%rdi) # sched: [100:?]
; ZNVER1-NEXT: invlpga %ecx, %rax # sched: [100:?]
@@ -1261,7 +1261,7 @@ define void @test_invlpg_invlpga(i8 *%a0) optsize {
define void @test_shld_shrd_16(i16 %a0, i16 %a1, i16 *%a2) optsize {
; GENERIC-LABEL: test_shld_shrd_16:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: shldw %cl, %si, %di # sched: [4:1.50]
; GENERIC-NEXT: shrdw %cl, %si, %di # sched: [4:1.50]
@@ -1275,7 +1275,7 @@ define void @test_shld_shrd_16(i16 %a0, i16 %a1, i16 *%a2) optsize {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_shld_shrd_16:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: shldw %cl, %si, %di # sched: [6:3.00]
; ATOM-NEXT: shrdw %cl, %si, %di # sched: [6:3.00]
@@ -1289,7 +1289,7 @@ define void @test_shld_shrd_16(i16 %a0, i16 %a1, i16 *%a2) optsize {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_shld_shrd_16:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: shldw %cl, %si, %di # sched: [1:1.00]
; SLM-NEXT: shrdw %cl, %si, %di # sched: [1:1.00]
@@ -1303,7 +1303,7 @@ define void @test_shld_shrd_16(i16 %a0, i16 %a1, i16 *%a2) optsize {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_shld_shrd_16:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: shldw %cl, %si, %di # sched: [4:1.50]
; SANDY-NEXT: shrdw %cl, %si, %di # sched: [4:1.50]
@@ -1317,7 +1317,7 @@ define void @test_shld_shrd_16(i16 %a0, i16 %a1, i16 *%a2) optsize {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_shld_shrd_16:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: shldw %cl, %si, %di # sched: [6:1.00]
; HASWELL-NEXT: shrdw %cl, %si, %di # sched: [6:1.00]
@@ -1331,7 +1331,7 @@ define void @test_shld_shrd_16(i16 %a0, i16 %a1, i16 *%a2) optsize {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_shld_shrd_16:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: shldw %cl, %si, %di # sched: [6:1.00]
; BROADWELL-NEXT: shrdw %cl, %si, %di # sched: [6:1.00]
@@ -1345,7 +1345,7 @@ define void @test_shld_shrd_16(i16 %a0, i16 %a1, i16 *%a2) optsize {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_shld_shrd_16:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: shldw %cl, %si, %di # sched: [6:1.00]
; SKYLAKE-NEXT: shrdw %cl, %si, %di # sched: [6:1.00]
@@ -1359,7 +1359,7 @@ define void @test_shld_shrd_16(i16 %a0, i16 %a1, i16 *%a2) optsize {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_shld_shrd_16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: shldw %cl, %si, %di # sched: [6:1.00]
; SKX-NEXT: shrdw %cl, %si, %di # sched: [6:1.00]
@@ -1373,7 +1373,7 @@ define void @test_shld_shrd_16(i16 %a0, i16 %a1, i16 *%a2) optsize {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_shld_shrd_16:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: shldw %cl, %si, %di # sched: [4:4.00]
; BTVER2-NEXT: shrdw %cl, %si, %di # sched: [4:4.00]
@@ -1387,7 +1387,7 @@ define void @test_shld_shrd_16(i16 %a0, i16 %a1, i16 *%a2) optsize {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_shld_shrd_16:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: shldw %cl, %si, %di # sched: [100:?]
; ZNVER1-NEXT: shrdw %cl, %si, %di # sched: [100:?]
@@ -1404,7 +1404,7 @@ define void @test_shld_shrd_16(i16 %a0, i16 %a1, i16 *%a2) optsize {
}
define void @test_shld_shrd_32(i32 %a0, i32 %a1, i32 *%a2) optsize {
; GENERIC-LABEL: test_shld_shrd_32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: shldl %cl, %esi, %edi # sched: [4:1.50]
; GENERIC-NEXT: shrdl %cl, %esi, %edi # sched: [4:1.50]
@@ -1418,7 +1418,7 @@ define void @test_shld_shrd_32(i32 %a0, i32 %a1, i32 *%a2) optsize {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_shld_shrd_32:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: shldl %cl, %esi, %edi # sched: [2:1.00]
; ATOM-NEXT: shrdl %cl, %esi, %edi # sched: [2:1.00]
@@ -1432,7 +1432,7 @@ define void @test_shld_shrd_32(i32 %a0, i32 %a1, i32 *%a2) optsize {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_shld_shrd_32:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: shldl %cl, %esi, %edi # sched: [1:1.00]
; SLM-NEXT: shrdl %cl, %esi, %edi # sched: [1:1.00]
@@ -1446,7 +1446,7 @@ define void @test_shld_shrd_32(i32 %a0, i32 %a1, i32 *%a2) optsize {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_shld_shrd_32:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: shldl %cl, %esi, %edi # sched: [4:1.50]
; SANDY-NEXT: shrdl %cl, %esi, %edi # sched: [4:1.50]
@@ -1460,7 +1460,7 @@ define void @test_shld_shrd_32(i32 %a0, i32 %a1, i32 *%a2) optsize {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_shld_shrd_32:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: shldl %cl, %esi, %edi # sched: [6:1.00]
; HASWELL-NEXT: shrdl %cl, %esi, %edi # sched: [6:1.00]
@@ -1474,7 +1474,7 @@ define void @test_shld_shrd_32(i32 %a0, i32 %a1, i32 *%a2) optsize {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_shld_shrd_32:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: shldl %cl, %esi, %edi # sched: [6:1.00]
; BROADWELL-NEXT: shrdl %cl, %esi, %edi # sched: [6:1.00]
@@ -1488,7 +1488,7 @@ define void @test_shld_shrd_32(i32 %a0, i32 %a1, i32 *%a2) optsize {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_shld_shrd_32:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: shldl %cl, %esi, %edi # sched: [6:1.00]
; SKYLAKE-NEXT: shrdl %cl, %esi, %edi # sched: [6:1.00]
@@ -1502,7 +1502,7 @@ define void @test_shld_shrd_32(i32 %a0, i32 %a1, i32 *%a2) optsize {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_shld_shrd_32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: shldl %cl, %esi, %edi # sched: [6:1.00]
; SKX-NEXT: shrdl %cl, %esi, %edi # sched: [6:1.00]
@@ -1516,7 +1516,7 @@ define void @test_shld_shrd_32(i32 %a0, i32 %a1, i32 *%a2) optsize {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_shld_shrd_32:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: shldl %cl, %esi, %edi # sched: [4:4.00]
; BTVER2-NEXT: shrdl %cl, %esi, %edi # sched: [4:4.00]
@@ -1530,7 +1530,7 @@ define void @test_shld_shrd_32(i32 %a0, i32 %a1, i32 *%a2) optsize {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_shld_shrd_32:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: shldl %cl, %esi, %edi # sched: [100:?]
; ZNVER1-NEXT: shrdl %cl, %esi, %edi # sched: [100:?]
@@ -1547,7 +1547,7 @@ define void @test_shld_shrd_32(i32 %a0, i32 %a1, i32 *%a2) optsize {
}
define void @test_shld_shrd_64(i64 %a0, i64 %a1, i64 *%a2) optsize {
; GENERIC-LABEL: test_shld_shrd_64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: shldq %cl, %rsi, %rdi # sched: [4:1.50]
; GENERIC-NEXT: shrdq %cl, %rsi, %rdi # sched: [4:1.50]
@@ -1561,7 +1561,7 @@ define void @test_shld_shrd_64(i64 %a0, i64 %a1, i64 *%a2) optsize {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_shld_shrd_64:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: shldq %cl, %rsi, %rdi # sched: [8:4.00]
; ATOM-NEXT: shrdq %cl, %rsi, %rdi # sched: [8:4.00]
@@ -1575,7 +1575,7 @@ define void @test_shld_shrd_64(i64 %a0, i64 %a1, i64 *%a2) optsize {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_shld_shrd_64:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: shldq %cl, %rsi, %rdi # sched: [1:1.00]
; SLM-NEXT: shrdq %cl, %rsi, %rdi # sched: [1:1.00]
@@ -1589,7 +1589,7 @@ define void @test_shld_shrd_64(i64 %a0, i64 %a1, i64 *%a2) optsize {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_shld_shrd_64:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: shldq %cl, %rsi, %rdi # sched: [4:1.50]
; SANDY-NEXT: shrdq %cl, %rsi, %rdi # sched: [4:1.50]
@@ -1603,7 +1603,7 @@ define void @test_shld_shrd_64(i64 %a0, i64 %a1, i64 *%a2) optsize {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_shld_shrd_64:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: shldq %cl, %rsi, %rdi # sched: [6:1.00]
; HASWELL-NEXT: shrdq %cl, %rsi, %rdi # sched: [6:1.00]
@@ -1617,7 +1617,7 @@ define void @test_shld_shrd_64(i64 %a0, i64 %a1, i64 *%a2) optsize {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_shld_shrd_64:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: shldq %cl, %rsi, %rdi # sched: [6:1.00]
; BROADWELL-NEXT: shrdq %cl, %rsi, %rdi # sched: [6:1.00]
@@ -1631,7 +1631,7 @@ define void @test_shld_shrd_64(i64 %a0, i64 %a1, i64 *%a2) optsize {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_shld_shrd_64:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: shldq %cl, %rsi, %rdi # sched: [6:1.00]
; SKYLAKE-NEXT: shrdq %cl, %rsi, %rdi # sched: [6:1.00]
@@ -1645,7 +1645,7 @@ define void @test_shld_shrd_64(i64 %a0, i64 %a1, i64 *%a2) optsize {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_shld_shrd_64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: shldq %cl, %rsi, %rdi # sched: [6:1.00]
; SKX-NEXT: shrdq %cl, %rsi, %rdi # sched: [6:1.00]
@@ -1659,7 +1659,7 @@ define void @test_shld_shrd_64(i64 %a0, i64 %a1, i64 *%a2) optsize {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_shld_shrd_64:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: shldq %cl, %rsi, %rdi # sched: [4:4.00]
; BTVER2-NEXT: shrdq %cl, %rsi, %rdi # sched: [4:4.00]
@@ -1673,7 +1673,7 @@ define void @test_shld_shrd_64(i64 %a0, i64 %a1, i64 *%a2) optsize {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_shld_shrd_64:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: shldq %cl, %rsi, %rdi # sched: [100:?]
; ZNVER1-NEXT: shrdq %cl, %rsi, %rdi # sched: [100:?]
diff --git a/test/CodeGen/X86/select-mmx.ll b/test/CodeGen/X86/select-mmx.ll
index 795990e3c32..d452237e6e9 100644
--- a/test/CodeGen/X86/select-mmx.ll
+++ b/test/CodeGen/X86/select-mmx.ll
@@ -13,7 +13,7 @@
define i64 @test47(i64 %arg) {
;
; X64-LABEL: test47:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: testq %rdi, %rdi
; X64-NEXT: movl $7, %ecx
@@ -24,7 +24,7 @@ define i64 @test47(i64 %arg) {
; X64-NEXT: retq
;
; I32-LABEL: test47:
-; I32: # BB#0:
+; I32: # %bb.0:
; I32-NEXT: pushl %ebp
; I32-NEXT: .cfi_def_cfa_offset 8
; I32-NEXT: .cfi_offset %ebp, -8
@@ -36,7 +36,7 @@ define i64 @test47(i64 %arg) {
; I32-NEXT: orl 12(%ebp), %eax
; I32-NEXT: movl $7, %eax
; I32-NEXT: je .LBB0_2
-; I32-NEXT: # BB#1:
+; I32-NEXT: # %bb.1:
; I32-NEXT: xorl %eax, %eax
; I32-NEXT: .LBB0_2:
; I32-NEXT: movl %eax, {{[0-9]+}}(%esp)
@@ -67,7 +67,7 @@ define i64 @test47(i64 %arg) {
define i64 @test49(i64 %arg, i64 %x, i64 %y) {
;
; X64-LABEL: test49:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: testq %rdi, %rdi
; X64-NEXT: cmovneq %rdx, %rsi
; X64-NEXT: movd %rsi, %mm0
@@ -76,7 +76,7 @@ define i64 @test49(i64 %arg, i64 %x, i64 %y) {
; X64-NEXT: retq
;
; I32-LABEL: test49:
-; I32: # BB#0:
+; I32: # %bb.0:
; I32-NEXT: pushl %ebp
; I32-NEXT: .cfi_def_cfa_offset 8
; I32-NEXT: .cfi_offset %ebp, -8
@@ -87,7 +87,7 @@ define i64 @test49(i64 %arg, i64 %x, i64 %y) {
; I32-NEXT: movl 8(%ebp), %eax
; I32-NEXT: orl 12(%ebp), %eax
; I32-NEXT: je .LBB1_1
-; I32-NEXT: # BB#2:
+; I32-NEXT: # %bb.2:
; I32-NEXT: leal 24(%ebp), %eax
; I32-NEXT: jmp .LBB1_3
; I32-NEXT: .LBB1_1:
diff --git a/test/CodeGen/X86/select-with-and-or.ll b/test/CodeGen/X86/select-with-and-or.ll
index 45e4384d0fa..f710a5ce409 100644
--- a/test/CodeGen/X86/select-with-and-or.ll
+++ b/test/CodeGen/X86/select-with-and-or.ll
@@ -3,7 +3,7 @@
define <4 x i32> @test1(<4 x float> %a, <4 x float> %b, <4 x i32> %c) {
; CHECK-LABEL: test1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcmpnleps %xmm0, %xmm1, %xmm0
; CHECK-NEXT: vandps %xmm2, %xmm0, %xmm0
; CHECK-NEXT: retq
@@ -14,7 +14,7 @@ define <4 x i32> @test1(<4 x float> %a, <4 x float> %b, <4 x i32> %c) {
define <4 x i32> @test2(<4 x float> %a, <4 x float> %b, <4 x i32> %c) {
; CHECK-LABEL: test2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcmpnleps %xmm0, %xmm1, %xmm0
; CHECK-NEXT: vorps %xmm2, %xmm0, %xmm0
; CHECK-NEXT: retq
@@ -25,7 +25,7 @@ define <4 x i32> @test2(<4 x float> %a, <4 x float> %b, <4 x i32> %c) {
define <4 x i32> @test3(<4 x float> %a, <4 x float> %b, <4 x i32> %c) {
; CHECK-LABEL: test3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcmpleps %xmm0, %xmm1, %xmm0
; CHECK-NEXT: vandps %xmm2, %xmm0, %xmm0
; CHECK-NEXT: retq
@@ -36,7 +36,7 @@ define <4 x i32> @test3(<4 x float> %a, <4 x float> %b, <4 x i32> %c) {
define <4 x i32> @test4(<4 x float> %a, <4 x float> %b, <4 x i32> %c) {
; CHECK-LABEL: test4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcmpleps %xmm0, %xmm1, %xmm0
; CHECK-NEXT: vorps %xmm2, %xmm0, %xmm0
; CHECK-NEXT: retq
@@ -47,7 +47,7 @@ define <4 x i32> @test4(<4 x float> %a, <4 x float> %b, <4 x i32> %c) {
define <4 x i32> @test5(<4 x float> %a, <4 x float> %b, <4 x i32> %c) {
; CHECK-LABEL: test5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcmpnleps %xmm0, %xmm1, %xmm0
; CHECK-NEXT: retq
%f = fcmp ult <4 x float> %a, %b
@@ -57,7 +57,7 @@ define <4 x i32> @test5(<4 x float> %a, <4 x float> %b, <4 x i32> %c) {
define <4 x i32> @test6(<4 x float> %a, <4 x float> %b, <4 x i32> %c) {
; CHECK-LABEL: test6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcmpleps %xmm0, %xmm1, %xmm0
; CHECK-NEXT: retq
%not.f = fcmp oge <4 x float> %a, %b
@@ -67,7 +67,7 @@ define <4 x i32> @test6(<4 x float> %a, <4 x float> %b, <4 x i32> %c) {
define <4 x i32> @test7(<4 x float> %a, <4 x float> %b, <4 x i32>* %p) {
; CHECK-LABEL: test7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcmpnleps %xmm0, %xmm1, %xmm0
; CHECK-NEXT: vandps (%rdi), %xmm0, %xmm0
; CHECK-NEXT: retq
@@ -81,7 +81,7 @@ define <4 x i32> @test7(<4 x float> %a, <4 x float> %b, <4 x i32>* %p) {
define <2 x double> @test1f(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
; CHECK-LABEL: test1f:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcmpltpd %xmm0, %xmm1, %xmm0
; CHECK-NEXT: vandpd %xmm2, %xmm0, %xmm0
; CHECK-NEXT: retq
@@ -92,7 +92,7 @@ define <2 x double> @test1f(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
define <2 x double> @test2f(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
; CHECK-LABEL: test2f:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcmplepd %xmm0, %xmm1, %xmm0
; CHECK-NEXT: vorpd %xmm2, %xmm0, %xmm0
; CHECK-NEXT: retq
@@ -103,7 +103,7 @@ define <2 x double> @test2f(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
define <2 x double> @test3f(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
; CHECK-LABEL: test3f:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcmpnltpd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vandpd %xmm2, %xmm0, %xmm0
; CHECK-NEXT: retq
@@ -114,7 +114,7 @@ define <2 x double> @test3f(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
define <2 x double> @test4f(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
; CHECK-LABEL: test4f:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcmpnlepd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vorpd %xmm2, %xmm0, %xmm0
; CHECK-NEXT: retq
@@ -125,7 +125,7 @@ define <2 x double> @test4f(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
define <2 x double> @test5f(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
; CHECK-LABEL: test5f:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcmpnlepd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%f = fcmp ugt <2 x double> %a, %b
@@ -135,7 +135,7 @@ define <2 x double> @test5f(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
define <2 x double> @test6f(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
; CHECK-LABEL: test6f:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcmpltpd %xmm0, %xmm1, %xmm0
; CHECK-NEXT: retq
%f = fcmp ule <2 x double> %a, %b
@@ -145,7 +145,7 @@ define <2 x double> @test6f(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
define <2 x double> @test7f(<2 x double> %a, <2 x double> %b, <2 x double>* %p) {
; CHECK-LABEL: test7f:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vcmpeqpd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vandpd (%rdi), %xmm0, %xmm0
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/select.ll b/test/CodeGen/X86/select.ll
index 535d914a008..9c76975fc88 100644
--- a/test/CodeGen/X86/select.ll
+++ b/test/CodeGen/X86/select.ll
@@ -8,7 +8,7 @@
define i32 @test1(%0* %p, %0* %q, i1 %r) nounwind {
; CHECK-LABEL: test1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: addq $8, %rdi
; CHECK-NEXT: addq $8, %rsi
; CHECK-NEXT: testb $1, %dl
@@ -17,10 +17,10 @@ define i32 @test1(%0* %p, %0* %q, i1 %r) nounwind {
; CHECK-NEXT: retq
;
; MCU-LABEL: test1:
-; MCU: # BB#0:
+; MCU: # %bb.0:
; MCU-NEXT: testb $1, %cl
; MCU-NEXT: jne .LBB0_1
-; MCU-NEXT: # BB#2:
+; MCU-NEXT: # %bb.2:
; MCU-NEXT: addl $8, %edx
; MCU-NEXT: movl %edx, %eax
; MCU-NEXT: movl (%eax), %eax
@@ -39,7 +39,7 @@ define i32 @test1(%0* %p, %0* %q, i1 %r) nounwind {
; PR2139
define i32 @test2() nounwind {
; GENERIC-LABEL: test2:
-; GENERIC: ## BB#0: ## %entry
+; GENERIC: ## %bb.0: ## %entry
; GENERIC-NEXT: pushq %rax
; GENERIC-NEXT: callq _return_false
; GENERIC-NEXT: xorl %ecx, %ecx
@@ -49,14 +49,14 @@ define i32 @test2() nounwind {
; GENERIC-NEXT: shll $3, %eax
; GENERIC-NEXT: cmpl $32768, %eax ## imm = 0x8000
; GENERIC-NEXT: jge LBB1_1
-; GENERIC-NEXT: ## BB#2: ## %bb91
+; GENERIC-NEXT: ## %bb.2: ## %bb91
; GENERIC-NEXT: xorl %eax, %eax
; GENERIC-NEXT: popq %rcx
; GENERIC-NEXT: retq
; GENERIC-NEXT: LBB1_1: ## %bb90
;
; ATOM-LABEL: test2:
-; ATOM: ## BB#0: ## %entry
+; ATOM: ## %bb.0: ## %entry
; ATOM-NEXT: pushq %rax
; ATOM-NEXT: callq _return_false
; ATOM-NEXT: xorl %ecx, %ecx
@@ -66,25 +66,25 @@ define i32 @test2() nounwind {
; ATOM-NEXT: shll $3, %edx
; ATOM-NEXT: cmpl $32768, %edx ## imm = 0x8000
; ATOM-NEXT: jge LBB1_1
-; ATOM-NEXT: ## BB#2: ## %bb91
+; ATOM-NEXT: ## %bb.2: ## %bb91
; ATOM-NEXT: xorl %eax, %eax
; ATOM-NEXT: popq %rcx
; ATOM-NEXT: retq
; ATOM-NEXT: LBB1_1: ## %bb90
;
; MCU-LABEL: test2:
-; MCU: # BB#0: # %entry
+; MCU: # %bb.0: # %entry
; MCU-NEXT: calll return_false
; MCU-NEXT: xorl %ecx, %ecx
; MCU-NEXT: testb $1, %al
; MCU-NEXT: jne .LBB1_2
-; MCU-NEXT: # BB#1: # %entry
+; MCU-NEXT: # %bb.1: # %entry
; MCU-NEXT: movl $-480, %ecx # imm = 0xFE20
; MCU-NEXT: .LBB1_2: # %entry
; MCU-NEXT: shll $3, %ecx
; MCU-NEXT: cmpl $32768, %ecx # imm = 0x8000
; MCU-NEXT: jge .LBB1_3
-; MCU-NEXT: # BB#4: # %bb91
+; MCU-NEXT: # %bb.4: # %bb91
; MCU-NEXT: xorl %eax, %eax
; MCU-NEXT: retl
; MCU-NEXT: .LBB1_3: # %bb90
@@ -106,7 +106,7 @@ declare i1 @return_false()
;; Select between two floating point constants.
define float @test3(i32 %x) nounwind readnone {
; CHECK-LABEL: test3:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: sete %al
@@ -115,7 +115,7 @@ define float @test3(i32 %x) nounwind readnone {
; CHECK-NEXT: retq
;
; MCU-LABEL: test3:
-; MCU: # BB#0: # %entry
+; MCU: # %bb.0: # %entry
; MCU-NEXT: xorl %ecx, %ecx
; MCU-NEXT: testl %eax, %eax
; MCU-NEXT: sete %cl
@@ -129,7 +129,7 @@ entry:
define signext i8 @test4(i8* nocapture %P, double %F) nounwind readonly {
; CHECK-LABEL: test4:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: ucomisd %xmm0, %xmm1
@@ -138,7 +138,7 @@ define signext i8 @test4(i8* nocapture %P, double %F) nounwind readonly {
; CHECK-NEXT: retq
;
; MCU-LABEL: test4:
-; MCU: # BB#0: # %entry
+; MCU: # %bb.0: # %entry
; MCU-NEXT: movl %eax, %ecx
; MCU-NEXT: fldl {{[0-9]+}}(%esp)
; MCU-NEXT: flds {{\.LCPI.*}}
@@ -160,10 +160,10 @@ entry:
define void @test5(i1 %c, <2 x i16> %a, <2 x i16> %b, <2 x i16>* %p) nounwind {
; CHECK-LABEL: test5:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: testb $1, %dil
; CHECK-NEXT: jne LBB4_2
-; CHECK-NEXT: ## BB#1:
+; CHECK-NEXT: ## %bb.1:
; CHECK-NEXT: movdqa %xmm1, %xmm0
; CHECK-NEXT: LBB4_2:
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -172,12 +172,12 @@ define void @test5(i1 %c, <2 x i16> %a, <2 x i16> %b, <2 x i16>* %p) nounwind {
; CHECK-NEXT: retq
;
; MCU-LABEL: test5:
-; MCU: # BB#0:
+; MCU: # %bb.0:
; MCU-NEXT: pushl %esi
; MCU-NEXT: movl {{[0-9]+}}(%esp), %esi
; MCU-NEXT: testb $1, %al
; MCU-NEXT: jne .LBB4_2
-; MCU-NEXT: # BB#1:
+; MCU-NEXT: # %bb.1:
; MCU-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
; MCU-NEXT: movzwl {{[0-9]+}}(%esp), %edx
; MCU-NEXT: .LBB4_2:
@@ -193,10 +193,10 @@ define void @test5(i1 %c, <2 x i16> %a, <2 x i16> %b, <2 x i16>* %p) nounwind {
; Verify that the fmul gets sunk into the one part of the diamond where it is needed.
define void @test6(i32 %C, <4 x float>* %A, <4 x float>* %B) nounwind {
; CHECK-LABEL: test6:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: je LBB5_1
-; CHECK-NEXT: ## BB#2:
+; CHECK-NEXT: ## %bb.2:
; CHECK-NEXT: movaps (%rsi), %xmm0
; CHECK-NEXT: movaps %xmm0, (%rsi)
; CHECK-NEXT: retq
@@ -207,7 +207,7 @@ define void @test6(i32 %C, <4 x float>* %A, <4 x float>* %B) nounwind {
; CHECK-NEXT: retq
;
; MCU-LABEL: test6:
-; MCU: # BB#0:
+; MCU: # %bb.0:
; MCU-NEXT: pushl %eax
; MCU-NEXT: flds 12(%edx)
; MCU-NEXT: fstps (%esp) # 4-byte Folded Spill
@@ -227,7 +227,7 @@ define void @test6(i32 %C, <4 x float>* %A, <4 x float>* %B) nounwind {
; MCU-NEXT: testl %eax, %eax
; MCU-NEXT: flds (%edx)
; MCU-NEXT: je .LBB5_2
-; MCU-NEXT: # BB#1:
+; MCU-NEXT: # %bb.1:
; MCU-NEXT: fstp %st(1)
; MCU-NEXT: fstp %st(3)
; MCU-NEXT: fstp %st(1)
@@ -268,7 +268,7 @@ define void @test6(i32 %C, <4 x float>* %A, <4 x float>* %B) nounwind {
; Select with fp80's
define x86_fp80 @test7(i32 %tmp8) nounwind {
; CHECK-LABEL: test7:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: setns %al
@@ -278,7 +278,7 @@ define x86_fp80 @test7(i32 %tmp8) nounwind {
; CHECK-NEXT: retq
;
; MCU-LABEL: test7:
-; MCU: # BB#0:
+; MCU: # %bb.0:
; MCU-NEXT: xorl %ecx, %ecx
; MCU-NEXT: testl %eax, %eax
; MCU-NEXT: setns %cl
@@ -293,10 +293,10 @@ define x86_fp80 @test7(i32 %tmp8) nounwind {
; widening select v6i32 and then a sub
define void @test8(i1 %c, <6 x i32>* %dst.addr, <6 x i32> %src1,<6 x i32> %src2) nounwind {
; GENERIC-LABEL: test8:
-; GENERIC: ## BB#0:
+; GENERIC: ## %bb.0:
; GENERIC-NEXT: testb $1, %dil
; GENERIC-NEXT: jne LBB7_1
-; GENERIC-NEXT: ## BB#2:
+; GENERIC-NEXT: ## %bb.2:
; GENERIC-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; GENERIC-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; GENERIC-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -327,10 +327,10 @@ define void @test8(i1 %c, <6 x i32>* %dst.addr, <6 x i32> %src1,<6 x i32> %src2)
; GENERIC-NEXT: retq
;
; ATOM-LABEL: test8:
-; ATOM: ## BB#0:
+; ATOM: ## %bb.0:
; ATOM-NEXT: testb $1, %dil
; ATOM-NEXT: jne LBB7_1
-; ATOM-NEXT: ## BB#2:
+; ATOM-NEXT: ## %bb.2:
; ATOM-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; ATOM-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
; ATOM-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero
@@ -359,14 +359,14 @@ define void @test8(i1 %c, <6 x i32>* %dst.addr, <6 x i32> %src1,<6 x i32> %src2)
; ATOM-NEXT: retq
;
; MCU-LABEL: test8:
-; MCU: # BB#0:
+; MCU: # %bb.0:
; MCU-NEXT: pushl %ebp
; MCU-NEXT: pushl %ebx
; MCU-NEXT: pushl %edi
; MCU-NEXT: pushl %esi
; MCU-NEXT: testb $1, %al
; MCU-NEXT: jne .LBB7_1
-; MCU-NEXT: # BB#2:
+; MCU-NEXT: # %bb.2:
; MCU-NEXT: leal {{[0-9]+}}(%esp), %eax
; MCU-NEXT: movl (%eax), %eax
; MCU-NEXT: je .LBB7_5
@@ -441,14 +441,14 @@ define void @test8(i1 %c, <6 x i32>* %dst.addr, <6 x i32> %src1,<6 x i32> %src2)
define i64 @test9(i64 %x, i64 %y) nounwind readnone ssp noredzone {
; GENERIC-LABEL: test9:
-; GENERIC: ## BB#0:
+; GENERIC: ## %bb.0:
; GENERIC-NEXT: cmpq $1, %rdi
; GENERIC-NEXT: sbbq %rax, %rax
; GENERIC-NEXT: orq %rsi, %rax
; GENERIC-NEXT: retq
;
; ATOM-LABEL: test9:
-; ATOM: ## BB#0:
+; ATOM: ## %bb.0:
; ATOM-NEXT: cmpq $1, %rdi
; ATOM-NEXT: sbbq %rax, %rax
; ATOM-NEXT: orq %rsi, %rax
@@ -457,10 +457,10 @@ define i64 @test9(i64 %x, i64 %y) nounwind readnone ssp noredzone {
; ATOM-NEXT: retq
;
; MCU-LABEL: test9:
-; MCU: # BB#0:
+; MCU: # %bb.0:
; MCU-NEXT: orl %edx, %eax
; MCU-NEXT: jne .LBB8_1
-; MCU-NEXT: # BB#2:
+; MCU-NEXT: # %bb.2:
; MCU-NEXT: movl $-1, %eax
; MCU-NEXT: movl $-1, %edx
; MCU-NEXT: retl
@@ -476,14 +476,14 @@ define i64 @test9(i64 %x, i64 %y) nounwind readnone ssp noredzone {
;; Same as test9
define i64 @test9a(i64 %x, i64 %y) nounwind readnone ssp noredzone {
; GENERIC-LABEL: test9a:
-; GENERIC: ## BB#0:
+; GENERIC: ## %bb.0:
; GENERIC-NEXT: cmpq $1, %rdi
; GENERIC-NEXT: sbbq %rax, %rax
; GENERIC-NEXT: orq %rsi, %rax
; GENERIC-NEXT: retq
;
; ATOM-LABEL: test9a:
-; ATOM: ## BB#0:
+; ATOM: ## %bb.0:
; ATOM-NEXT: cmpq $1, %rdi
; ATOM-NEXT: sbbq %rax, %rax
; ATOM-NEXT: orq %rsi, %rax
@@ -492,12 +492,12 @@ define i64 @test9a(i64 %x, i64 %y) nounwind readnone ssp noredzone {
; ATOM-NEXT: retq
;
; MCU-LABEL: test9a:
-; MCU: # BB#0:
+; MCU: # %bb.0:
; MCU-NEXT: orl %edx, %eax
; MCU-NEXT: movl $-1, %eax
; MCU-NEXT: movl $-1, %edx
; MCU-NEXT: je .LBB9_2
-; MCU-NEXT: # BB#1:
+; MCU-NEXT: # %bb.1:
; MCU-NEXT: movl {{[0-9]+}}(%esp), %eax
; MCU-NEXT: movl {{[0-9]+}}(%esp), %edx
; MCU-NEXT: .LBB9_2:
@@ -509,14 +509,14 @@ define i64 @test9a(i64 %x, i64 %y) nounwind readnone ssp noredzone {
define i64 @test9b(i64 %x, i64 %y) nounwind readnone ssp noredzone {
; GENERIC-LABEL: test9b:
-; GENERIC: ## BB#0:
+; GENERIC: ## %bb.0:
; GENERIC-NEXT: cmpq $1, %rdi
; GENERIC-NEXT: sbbq %rax, %rax
; GENERIC-NEXT: orq %rsi, %rax
; GENERIC-NEXT: retq
;
; ATOM-LABEL: test9b:
-; ATOM: ## BB#0:
+; ATOM: ## %bb.0:
; ATOM-NEXT: cmpq $1, %rdi
; ATOM-NEXT: sbbq %rax, %rax
; ATOM-NEXT: orq %rsi, %rax
@@ -525,7 +525,7 @@ define i64 @test9b(i64 %x, i64 %y) nounwind readnone ssp noredzone {
; ATOM-NEXT: retq
;
; MCU-LABEL: test9b:
-; MCU: # BB#0:
+; MCU: # %bb.0:
; MCU-NEXT: movl %edx, %ecx
; MCU-NEXT: xorl %edx, %edx
; MCU-NEXT: orl %ecx, %eax
@@ -544,7 +544,7 @@ define i64 @test9b(i64 %x, i64 %y) nounwind readnone ssp noredzone {
;; Select between -1 and 1.
define i64 @test10(i64 %x, i64 %y) nounwind readnone ssp noredzone {
; CHECK-LABEL: test10:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: testq %rdi, %rdi
; CHECK-NEXT: setne %al
@@ -552,12 +552,12 @@ define i64 @test10(i64 %x, i64 %y) nounwind readnone ssp noredzone {
; CHECK-NEXT: retq
;
; MCU-LABEL: test10:
-; MCU: # BB#0:
+; MCU: # %bb.0:
; MCU-NEXT: orl %edx, %eax
; MCU-NEXT: movl $-1, %eax
; MCU-NEXT: movl $-1, %edx
; MCU-NEXT: je .LBB11_2
-; MCU-NEXT: # BB#1:
+; MCU-NEXT: # %bb.1:
; MCU-NEXT: xorl %edx, %edx
; MCU-NEXT: movl $1, %eax
; MCU-NEXT: .LBB11_2:
@@ -569,7 +569,7 @@ define i64 @test10(i64 %x, i64 %y) nounwind readnone ssp noredzone {
define i64 @test11(i64 %x, i64 %y) nounwind readnone ssp noredzone {
; CHECK-LABEL: test11:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: cmpq $1, %rdi
; CHECK-NEXT: sbbq %rax, %rax
; CHECK-NEXT: notq %rax
@@ -577,10 +577,10 @@ define i64 @test11(i64 %x, i64 %y) nounwind readnone ssp noredzone {
; CHECK-NEXT: retq
;
; MCU-LABEL: test11:
-; MCU: # BB#0:
+; MCU: # %bb.0:
; MCU-NEXT: orl %edx, %eax
; MCU-NEXT: je .LBB12_1
-; MCU-NEXT: # BB#2:
+; MCU-NEXT: # %bb.2:
; MCU-NEXT: movl $-1, %eax
; MCU-NEXT: movl $-1, %edx
; MCU-NEXT: retl
@@ -595,7 +595,7 @@ define i64 @test11(i64 %x, i64 %y) nounwind readnone ssp noredzone {
define i64 @test11a(i64 %x, i64 %y) nounwind readnone ssp noredzone {
; CHECK-LABEL: test11a:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: cmpq $1, %rdi
; CHECK-NEXT: sbbq %rax, %rax
; CHECK-NEXT: notq %rax
@@ -603,12 +603,12 @@ define i64 @test11a(i64 %x, i64 %y) nounwind readnone ssp noredzone {
; CHECK-NEXT: retq
;
; MCU-LABEL: test11a:
-; MCU: # BB#0:
+; MCU: # %bb.0:
; MCU-NEXT: orl %edx, %eax
; MCU-NEXT: movl $-1, %eax
; MCU-NEXT: movl $-1, %edx
; MCU-NEXT: jne .LBB13_2
-; MCU-NEXT: # BB#1:
+; MCU-NEXT: # %bb.1:
; MCU-NEXT: movl {{[0-9]+}}(%esp), %eax
; MCU-NEXT: movl {{[0-9]+}}(%esp), %edx
; MCU-NEXT: .LBB13_2:
@@ -623,7 +623,7 @@ declare noalias i8* @_Znam(i64) noredzone
define noalias i8* @test12(i64 %count) nounwind ssp noredzone {
; GENERIC-LABEL: test12:
-; GENERIC: ## BB#0: ## %entry
+; GENERIC: ## %bb.0: ## %entry
; GENERIC-NEXT: movl $4, %ecx
; GENERIC-NEXT: movq %rdi, %rax
; GENERIC-NEXT: mulq %rcx
@@ -632,7 +632,7 @@ define noalias i8* @test12(i64 %count) nounwind ssp noredzone {
; GENERIC-NEXT: jmp __Znam ## TAILCALL
;
; ATOM-LABEL: test12:
-; ATOM: ## BB#0: ## %entry
+; ATOM: ## %bb.0: ## %entry
; ATOM-NEXT: movq %rdi, %rax
; ATOM-NEXT: movl $4, %ecx
; ATOM-NEXT: mulq %rcx
@@ -641,7 +641,7 @@ define noalias i8* @test12(i64 %count) nounwind ssp noredzone {
; ATOM-NEXT: jmp __Znam ## TAILCALL
;
; MCU-LABEL: test12:
-; MCU: # BB#0: # %entry
+; MCU: # %bb.0: # %entry
; MCU-NEXT: pushl %ebp
; MCU-NEXT: pushl %ebx
; MCU-NEXT: pushl %edi
@@ -663,7 +663,7 @@ define noalias i8* @test12(i64 %count) nounwind ssp noredzone {
; MCU-NEXT: movl $-1, %eax
; MCU-NEXT: movl $-1, %edx
; MCU-NEXT: jne .LBB14_2
-; MCU-NEXT: # BB#1: # %entry
+; MCU-NEXT: # %bb.1: # %entry
; MCU-NEXT: movl %esi, %eax
; MCU-NEXT: movl %edi, %edx
; MCU-NEXT: .LBB14_2: # %entry
@@ -685,13 +685,13 @@ declare { i64, i1 } @llvm.umul.with.overflow.i64(i64, i64) nounwind readnone
define i32 @test13(i32 %a, i32 %b) nounwind {
; GENERIC-LABEL: test13:
-; GENERIC: ## BB#0:
+; GENERIC: ## %bb.0:
; GENERIC-NEXT: cmpl %esi, %edi
; GENERIC-NEXT: sbbl %eax, %eax
; GENERIC-NEXT: retq
;
; ATOM-LABEL: test13:
-; ATOM: ## BB#0:
+; ATOM: ## %bb.0:
; ATOM-NEXT: cmpl %esi, %edi
; ATOM-NEXT: sbbl %eax, %eax
; ATOM-NEXT: nop
@@ -701,7 +701,7 @@ define i32 @test13(i32 %a, i32 %b) nounwind {
; ATOM-NEXT: retq
;
; MCU-LABEL: test13:
-; MCU: # BB#0:
+; MCU: # %bb.0:
; MCU-NEXT: cmpl %edx, %eax
; MCU-NEXT: sbbl %eax, %eax
; MCU-NEXT: retl
@@ -712,7 +712,7 @@ define i32 @test13(i32 %a, i32 %b) nounwind {
define i32 @test14(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: test14:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: cmpl %esi, %edi
; CHECK-NEXT: setae %al
@@ -720,7 +720,7 @@ define i32 @test14(i32 %a, i32 %b) nounwind {
; CHECK-NEXT: retq
;
; MCU-LABEL: test14:
-; MCU: # BB#0:
+; MCU: # %bb.0:
; MCU-NEXT: xorl %ecx, %ecx
; MCU-NEXT: cmpl %edx, %eax
; MCU-NEXT: setae %cl
@@ -735,13 +735,13 @@ define i32 @test14(i32 %a, i32 %b) nounwind {
; rdar://10961709
define i32 @test15(i32 %x) nounwind {
; GENERIC-LABEL: test15:
-; GENERIC: ## BB#0: ## %entry
+; GENERIC: ## %bb.0: ## %entry
; GENERIC-NEXT: negl %edi
; GENERIC-NEXT: sbbl %eax, %eax
; GENERIC-NEXT: retq
;
; ATOM-LABEL: test15:
-; ATOM: ## BB#0: ## %entry
+; ATOM: ## %bb.0: ## %entry
; ATOM-NEXT: negl %edi
; ATOM-NEXT: sbbl %eax, %eax
; ATOM-NEXT: nop
@@ -751,7 +751,7 @@ define i32 @test15(i32 %x) nounwind {
; ATOM-NEXT: retq
;
; MCU-LABEL: test15:
-; MCU: # BB#0: # %entry
+; MCU: # %bb.0: # %entry
; MCU-NEXT: negl %eax
; MCU-NEXT: sbbl %eax, %eax
; MCU-NEXT: retl
@@ -763,13 +763,13 @@ entry:
define i64 @test16(i64 %x) nounwind uwtable readnone ssp {
; GENERIC-LABEL: test16:
-; GENERIC: ## BB#0: ## %entry
+; GENERIC: ## %bb.0: ## %entry
; GENERIC-NEXT: negq %rdi
; GENERIC-NEXT: sbbq %rax, %rax
; GENERIC-NEXT: retq
;
; ATOM-LABEL: test16:
-; ATOM: ## BB#0: ## %entry
+; ATOM: ## %bb.0: ## %entry
; ATOM-NEXT: negq %rdi
; ATOM-NEXT: sbbq %rax, %rax
; ATOM-NEXT: nop
@@ -779,7 +779,7 @@ define i64 @test16(i64 %x) nounwind uwtable readnone ssp {
; ATOM-NEXT: retq
;
; MCU-LABEL: test16:
-; MCU: # BB#0: # %entry
+; MCU: # %bb.0: # %entry
; MCU-NEXT: movl %eax, %ecx
; MCU-NEXT: xorl %eax, %eax
; MCU-NEXT: orl %edx, %ecx
@@ -795,14 +795,14 @@ entry:
define i16 @test17(i16 %x) nounwind {
; GENERIC-LABEL: test17:
-; GENERIC: ## BB#0: ## %entry
+; GENERIC: ## %bb.0: ## %entry
; GENERIC-NEXT: negw %di
; GENERIC-NEXT: sbbl %eax, %eax
; GENERIC-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; GENERIC-NEXT: retq
;
; ATOM-LABEL: test17:
-; ATOM: ## BB#0: ## %entry
+; ATOM: ## %bb.0: ## %entry
; ATOM-NEXT: negw %di
; ATOM-NEXT: sbbl %eax, %eax
; ATOM-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
@@ -813,7 +813,7 @@ define i16 @test17(i16 %x) nounwind {
; ATOM-NEXT: retq
;
; MCU-LABEL: test17:
-; MCU: # BB#0: # %entry
+; MCU: # %bb.0: # %entry
; MCU-NEXT: negw %ax
; MCU-NEXT: sbbl %eax, %eax
; MCU-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -826,14 +826,14 @@ entry:
define i8 @test18(i32 %x, i8 zeroext %a, i8 zeroext %b) nounwind {
; GENERIC-LABEL: test18:
-; GENERIC: ## BB#0:
+; GENERIC: ## %bb.0:
; GENERIC-NEXT: cmpl $15, %edi
; GENERIC-NEXT: cmovgel %edx, %esi
; GENERIC-NEXT: movl %esi, %eax
; GENERIC-NEXT: retq
;
; ATOM-LABEL: test18:
-; ATOM: ## BB#0:
+; ATOM: ## %bb.0:
; ATOM-NEXT: cmpl $15, %edi
; ATOM-NEXT: cmovgel %edx, %esi
; ATOM-NEXT: movl %esi, %eax
@@ -842,10 +842,10 @@ define i8 @test18(i32 %x, i8 zeroext %a, i8 zeroext %b) nounwind {
; ATOM-NEXT: retq
;
; MCU-LABEL: test18:
-; MCU: # BB#0:
+; MCU: # %bb.0:
; MCU-NEXT: cmpl $15, %eax
; MCU-NEXT: jl .LBB20_2
-; MCU-NEXT: # BB#1:
+; MCU-NEXT: # %bb.1:
; MCU-NEXT: movl %ecx, %edx
; MCU-NEXT: .LBB20_2:
; MCU-NEXT: movl %edx, %eax
@@ -857,7 +857,7 @@ define i8 @test18(i32 %x, i8 zeroext %a, i8 zeroext %b) nounwind {
define i32 @trunc_select_miscompile(i32 %a, i1 zeroext %cc) {
; CHECK-LABEL: trunc_select_miscompile:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: orb $2, %sil
; CHECK-NEXT: movl %esi, %ecx
; CHECK-NEXT: shll %cl, %edi
@@ -865,7 +865,7 @@ define i32 @trunc_select_miscompile(i32 %a, i1 zeroext %cc) {
; CHECK-NEXT: retq
;
; MCU-LABEL: trunc_select_miscompile:
-; MCU: # BB#0:
+; MCU: # %bb.0:
; MCU-NEXT: orb $2, %dl
; MCU-NEXT: movl %edx, %ecx
; MCU-NEXT: shll %cl, %eax
@@ -878,45 +878,45 @@ define i32 @trunc_select_miscompile(i32 %a, i1 zeroext %cc) {
; reproducer for pr29002
define void @clamp_i8(i32 %src, i8* %dst) {
; GENERIC-LABEL: clamp_i8:
-; GENERIC: ## BB#0:
+; GENERIC: ## %bb.0:
; GENERIC-NEXT: cmpl $127, %edi
; GENERIC-NEXT: movl $127, %eax
; GENERIC-NEXT: cmovlel %edi, %eax
; GENERIC-NEXT: cmpl $-128, %eax
; GENERIC-NEXT: movb $-128, %cl
; GENERIC-NEXT: jl LBB22_2
-; GENERIC-NEXT: ## BB#1:
+; GENERIC-NEXT: ## %bb.1:
; GENERIC-NEXT: movl %eax, %ecx
; GENERIC-NEXT: LBB22_2:
; GENERIC-NEXT: movb %cl, (%rsi)
; GENERIC-NEXT: retq
;
; ATOM-LABEL: clamp_i8:
-; ATOM: ## BB#0:
+; ATOM: ## %bb.0:
; ATOM-NEXT: cmpl $127, %edi
; ATOM-NEXT: movl $127, %eax
; ATOM-NEXT: cmovlel %edi, %eax
; ATOM-NEXT: movb $-128, %cl
; ATOM-NEXT: cmpl $-128, %eax
; ATOM-NEXT: jl LBB22_2
-; ATOM-NEXT: ## BB#1:
+; ATOM-NEXT: ## %bb.1:
; ATOM-NEXT: movl %eax, %ecx
; ATOM-NEXT: LBB22_2:
; ATOM-NEXT: movb %cl, (%rsi)
; ATOM-NEXT: retq
;
; MCU-LABEL: clamp_i8:
-; MCU: # BB#0:
+; MCU: # %bb.0:
; MCU-NEXT: cmpl $127, %eax
; MCU-NEXT: movl $127, %ecx
; MCU-NEXT: jg .LBB22_2
-; MCU-NEXT: # BB#1:
+; MCU-NEXT: # %bb.1:
; MCU-NEXT: movl %eax, %ecx
; MCU-NEXT: .LBB22_2:
; MCU-NEXT: cmpl $-128, %ecx
; MCU-NEXT: movb $-128, %al
; MCU-NEXT: jl .LBB22_4
-; MCU-NEXT: # BB#3:
+; MCU-NEXT: # %bb.3:
; MCU-NEXT: movl %ecx, %eax
; MCU-NEXT: .LBB22_4:
; MCU-NEXT: movb %al, (%edx)
@@ -933,7 +933,7 @@ define void @clamp_i8(i32 %src, i8* %dst) {
; reproducer for pr29002
define void @clamp(i32 %src, i16* %dst) {
; GENERIC-LABEL: clamp:
-; GENERIC: ## BB#0:
+; GENERIC: ## %bb.0:
; GENERIC-NEXT: cmpl $32767, %edi ## imm = 0x7FFF
; GENERIC-NEXT: movl $32767, %eax ## imm = 0x7FFF
; GENERIC-NEXT: cmovlel %edi, %eax
@@ -944,7 +944,7 @@ define void @clamp(i32 %src, i16* %dst) {
; GENERIC-NEXT: retq
;
; ATOM-LABEL: clamp:
-; ATOM: ## BB#0:
+; ATOM: ## %bb.0:
; ATOM-NEXT: cmpl $32767, %edi ## imm = 0x7FFF
; ATOM-NEXT: movl $32767, %eax ## imm = 0x7FFF
; ATOM-NEXT: cmovlel %edi, %eax
@@ -955,17 +955,17 @@ define void @clamp(i32 %src, i16* %dst) {
; ATOM-NEXT: retq
;
; MCU-LABEL: clamp:
-; MCU: # BB#0:
+; MCU: # %bb.0:
; MCU-NEXT: cmpl $32767, %eax # imm = 0x7FFF
; MCU-NEXT: movl $32767, %ecx # imm = 0x7FFF
; MCU-NEXT: jg .LBB23_2
-; MCU-NEXT: # BB#1:
+; MCU-NEXT: # %bb.1:
; MCU-NEXT: movl %eax, %ecx
; MCU-NEXT: .LBB23_2:
; MCU-NEXT: cmpl $-32768, %ecx # imm = 0x8000
; MCU-NEXT: movw $-32768, %ax # imm = 0x8000
; MCU-NEXT: jl .LBB23_4
-; MCU-NEXT: # BB#3:
+; MCU-NEXT: # %bb.3:
; MCU-NEXT: movl %ecx, %eax
; MCU-NEXT: .LBB23_4:
; MCU-NEXT: movw %ax, (%edx)
@@ -987,7 +987,7 @@ define void @test19() {
; that code path, it can be deleted.
;
; CHECK-LABEL: test19:
-; CHECK: ## BB#0: ## %BB
+; CHECK: ## %bb.0: ## %BB
; CHECK-NEXT: movl $-1, %eax
; CHECK-NEXT: movb $1, %cl
; CHECK-NEXT: .p2align 4, 0x90
@@ -995,7 +995,7 @@ define void @test19() {
; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
; CHECK-NEXT: testb %cl, %cl
; CHECK-NEXT: jne LBB24_1
-; CHECK-NEXT: ## BB#2: ## %CF250
+; CHECK-NEXT: ## %bb.2: ## %CF250
; CHECK-NEXT: ## in Loop: Header=BB24_1 Depth=1
; CHECK-NEXT: jne LBB24_1
; CHECK-NEXT: .p2align 4, 0x90
@@ -1004,11 +1004,11 @@ define void @test19() {
; CHECK-NEXT: cmpl %eax, %eax
; CHECK-NEXT: ucomiss %xmm0, %xmm0
; CHECK-NEXT: jp LBB24_3
-; CHECK-NEXT: ## BB#4: ## %CF244
+; CHECK-NEXT: ## %bb.4: ## %CF244
; CHECK-NEXT: retq
;
; MCU-LABEL: test19:
-; MCU: # BB#0: # %BB
+; MCU: # %bb.0: # %BB
; MCU-NEXT: movl $-1, %ecx
; MCU-NEXT: movb $1, %al
; MCU-NEXT: .p2align 4, 0x90
@@ -1016,10 +1016,10 @@ define void @test19() {
; MCU-NEXT: # =>This Inner Loop Header: Depth=1
; MCU-NEXT: testb %al, %al
; MCU-NEXT: jne .LBB24_1
-; MCU-NEXT: # BB#2: # %CF250
+; MCU-NEXT: # %bb.2: # %CF250
; MCU-NEXT: # in Loop: Header=BB24_1 Depth=1
; MCU-NEXT: jne .LBB24_1
-; MCU-NEXT: # BB#3: # %CF242.preheader
+; MCU-NEXT: # %bb.3: # %CF242.preheader
; MCU-NEXT: fldz
; MCU-NEXT: .p2align 4, 0x90
; MCU-NEXT: .LBB24_4: # %CF242
@@ -1030,7 +1030,7 @@ define void @test19() {
; MCU-NEXT: # kill: %ah<def> %ah<kill> %ax<kill>
; MCU-NEXT: sahf
; MCU-NEXT: jp .LBB24_4
-; MCU-NEXT: # BB#5: # %CF244
+; MCU-NEXT: # %bb.5: # %CF244
; MCU-NEXT: fstp %st(0)
; MCU-NEXT: retl
BB:
@@ -1059,7 +1059,7 @@ CF244:
define i16 @select_xor_1(i16 %A, i8 %cond) {
; CHECK-LABEL: select_xor_1:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: xorl $43, %eax
; CHECK-NEXT: testb $1, %sil
@@ -1068,7 +1068,7 @@ define i16 @select_xor_1(i16 %A, i8 %cond) {
; CHECK-NEXT: retq
;
; MCU-LABEL: select_xor_1:
-; MCU: # BB#0: # %entry
+; MCU: # %bb.0: # %entry
; MCU-NEXT: andl $1, %edx
; MCU-NEXT: negl %edx
; MCU-NEXT: andl $43, %edx
@@ -1085,7 +1085,7 @@ entry:
define i32 @select_xor_2(i32 %A, i32 %B, i8 %cond) {
; CHECK-LABEL: select_xor_2:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: xorl %edi, %esi
; CHECK-NEXT: testb $1, %dl
; CHECK-NEXT: cmovel %edi, %esi
@@ -1093,7 +1093,7 @@ define i32 @select_xor_2(i32 %A, i32 %B, i8 %cond) {
; CHECK-NEXT: retq
;
; MCU-LABEL: select_xor_2:
-; MCU: # BB#0: # %entry
+; MCU: # %bb.0: # %entry
; MCU-NEXT: andl $1, %ecx
; MCU-NEXT: negl %ecx
; MCU-NEXT: andl %edx, %ecx
@@ -1109,7 +1109,7 @@ entry:
define i32 @select_or(i32 %A, i32 %B, i8 %cond) {
; CHECK-LABEL: select_or:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: orl %edi, %esi
; CHECK-NEXT: testb $1, %dl
; CHECK-NEXT: cmovel %edi, %esi
@@ -1117,7 +1117,7 @@ define i32 @select_or(i32 %A, i32 %B, i8 %cond) {
; CHECK-NEXT: retq
;
; MCU-LABEL: select_or:
-; MCU: # BB#0: # %entry
+; MCU: # %bb.0: # %entry
; MCU-NEXT: andl $1, %ecx
; MCU-NEXT: negl %ecx
; MCU-NEXT: andl %edx, %ecx
@@ -1133,7 +1133,7 @@ entry:
define i32 @select_or_1(i32 %A, i32 %B, i32 %cond) {
; CHECK-LABEL: select_or_1:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: orl %edi, %esi
; CHECK-NEXT: testb $1, %dl
; CHECK-NEXT: cmovel %edi, %esi
@@ -1141,7 +1141,7 @@ define i32 @select_or_1(i32 %A, i32 %B, i32 %cond) {
; CHECK-NEXT: retq
;
; MCU-LABEL: select_or_1:
-; MCU: # BB#0: # %entry
+; MCU: # %bb.0: # %entry
; MCU-NEXT: andl $1, %ecx
; MCU-NEXT: negl %ecx
; MCU-NEXT: andl %edx, %ecx
diff --git a/test/CodeGen/X86/select_const.ll b/test/CodeGen/X86/select_const.ll
index 264cc8175b7..ee74986ab5d 100644
--- a/test/CodeGen/X86/select_const.ll
+++ b/test/CodeGen/X86/select_const.ll
@@ -8,7 +8,7 @@
define i32 @select_0_or_1(i1 %cond) {
; CHECK-LABEL: select_0_or_1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: notb %dil
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: andl $1, %eax
@@ -19,7 +19,7 @@ define i32 @select_0_or_1(i1 %cond) {
define i32 @select_0_or_1_zeroext(i1 zeroext %cond) {
; CHECK-LABEL: select_0_or_1_zeroext:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorb $1, %dil
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: retq
@@ -29,7 +29,7 @@ define i32 @select_0_or_1_zeroext(i1 zeroext %cond) {
define i32 @select_0_or_1_signext(i1 signext %cond) {
; CHECK-LABEL: select_0_or_1_signext:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: notb %dil
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: andl $1, %eax
@@ -42,7 +42,7 @@ define i32 @select_0_or_1_signext(i1 signext %cond) {
define i32 @select_1_or_0(i1 %cond) {
; CHECK-LABEL: select_1_or_0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
@@ -52,7 +52,7 @@ define i32 @select_1_or_0(i1 %cond) {
define i32 @select_1_or_0_zeroext(i1 zeroext %cond) {
; CHECK-LABEL: select_1_or_0_zeroext:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
%sel = select i1 %cond, i32 1, i32 0
@@ -61,7 +61,7 @@ define i32 @select_1_or_0_zeroext(i1 zeroext %cond) {
define i32 @select_1_or_0_signext(i1 signext %cond) {
; CHECK-LABEL: select_1_or_0_signext:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
@@ -73,7 +73,7 @@ define i32 @select_1_or_0_signext(i1 signext %cond) {
define i32 @select_0_or_neg1(i1 %cond) {
; CHECK-LABEL: select_0_or_neg1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: leal -1(%rdi), %eax
@@ -84,7 +84,7 @@ define i32 @select_0_or_neg1(i1 %cond) {
define i32 @select_0_or_neg1_zeroext(i1 zeroext %cond) {
; CHECK-LABEL: select_0_or_neg1_zeroext:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: leal -1(%rdi), %eax
; CHECK-NEXT: retq
@@ -94,7 +94,7 @@ define i32 @select_0_or_neg1_zeroext(i1 zeroext %cond) {
define i32 @select_0_or_neg1_signext(i1 signext %cond) {
; CHECK-LABEL: select_0_or_neg1_signext:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: notl %edi
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
@@ -106,7 +106,7 @@ define i32 @select_0_or_neg1_signext(i1 signext %cond) {
define i32 @select_neg1_or_0(i1 %cond) {
; CHECK-LABEL: select_neg1_or_0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: negl %edi
; CHECK-NEXT: movl %edi, %eax
@@ -117,7 +117,7 @@ define i32 @select_neg1_or_0(i1 %cond) {
define i32 @select_neg1_or_0_zeroext(i1 zeroext %cond) {
; CHECK-LABEL: select_neg1_or_0_zeroext:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: negl %edi
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
@@ -127,7 +127,7 @@ define i32 @select_neg1_or_0_zeroext(i1 zeroext %cond) {
define i32 @select_neg1_or_0_signext(i1 signext %cond) {
; CHECK-LABEL: select_neg1_or_0_signext:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
%sel = select i1 %cond, i32 -1, i32 0
@@ -138,7 +138,7 @@ define i32 @select_neg1_or_0_signext(i1 signext %cond) {
define i32 @select_Cplus1_C(i1 %cond) {
; CHECK-LABEL: select_Cplus1_C:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: leal 41(%rdi), %eax
@@ -149,7 +149,7 @@ define i32 @select_Cplus1_C(i1 %cond) {
define i32 @select_Cplus1_C_zeroext(i1 zeroext %cond) {
; CHECK-LABEL: select_Cplus1_C_zeroext:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: leal 41(%rdi), %eax
; CHECK-NEXT: retq
@@ -159,7 +159,7 @@ define i32 @select_Cplus1_C_zeroext(i1 zeroext %cond) {
define i32 @select_Cplus1_C_signext(i1 signext %cond) {
; CHECK-LABEL: select_Cplus1_C_signext:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl $41, %eax
; CHECK-NEXT: subl %edi, %eax
; CHECK-NEXT: retq
@@ -171,7 +171,7 @@ define i32 @select_Cplus1_C_signext(i1 signext %cond) {
define i32 @select_C_Cplus1(i1 %cond) {
; CHECK-LABEL: select_C_Cplus1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: movl $42, %eax
; CHECK-NEXT: subl %edi, %eax
@@ -182,7 +182,7 @@ define i32 @select_C_Cplus1(i1 %cond) {
define i32 @select_C_Cplus1_zeroext(i1 zeroext %cond) {
; CHECK-LABEL: select_C_Cplus1_zeroext:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl $42, %eax
; CHECK-NEXT: subl %edi, %eax
; CHECK-NEXT: retq
@@ -192,7 +192,7 @@ define i32 @select_C_Cplus1_zeroext(i1 zeroext %cond) {
define i32 @select_C_Cplus1_signext(i1 signext %cond) {
; CHECK-LABEL: select_C_Cplus1_signext:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: movl $42, %eax
; CHECK-NEXT: subl %edi, %eax
@@ -206,7 +206,7 @@ define i32 @select_C_Cplus1_signext(i1 signext %cond) {
define i32 @select_lea_2(i1 zeroext %cond) {
; CHECK-LABEL: select_lea_2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorb $1, %dil
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: leal -1(%rax,%rax), %eax
@@ -217,7 +217,7 @@ define i32 @select_lea_2(i1 zeroext %cond) {
define i64 @select_lea_3(i1 zeroext %cond) {
; CHECK-LABEL: select_lea_3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorb $1, %dil
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: leaq -2(%rax,%rax,2), %rax
@@ -228,7 +228,7 @@ define i64 @select_lea_3(i1 zeroext %cond) {
define i32 @select_lea_5(i1 zeroext %cond) {
; CHECK-LABEL: select_lea_5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorb $1, %dil
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: leal -2(%rax,%rax,4), %eax
@@ -239,7 +239,7 @@ define i32 @select_lea_5(i1 zeroext %cond) {
define i64 @select_lea_9(i1 zeroext %cond) {
; CHECK-LABEL: select_lea_9:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorb $1, %dil
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: leaq -7(%rax,%rax,8), %rax
@@ -252,7 +252,7 @@ define i64 @select_lea_9(i1 zeroext %cond) {
define i64 @sel_1_2(i64 %x, i64 %y) {
; CHECK-LABEL: sel_1_2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpq $42, %rdi
; CHECK-NEXT: sbbq $0, %rsi
; CHECK-NEXT: leaq 2(%rsi), %rax
@@ -267,7 +267,7 @@ define i64 @sel_1_2(i64 %x, i64 %y) {
define i8 @sel_1_neg1(i32 %x) {
; CHECK-LABEL: sel_1_neg1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpl $42, %edi
; CHECK-NEXT: setg %al
; CHECK-NEXT: shlb $2, %al
@@ -282,7 +282,7 @@ define i8 @sel_1_neg1(i32 %x) {
define i16 @sel_neg1_1(i32 %x) {
; CHECK-LABEL: sel_neg1_1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: cmpl $43, %edi
; CHECK-NEXT: setl %al
@@ -298,7 +298,7 @@ define i16 @sel_neg1_1(i32 %x) {
define i32 @sel_1_neg1_32(i32 %x) {
; CHECK-LABEL: sel_1_neg1_32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: cmpl $42, %edi
; CHECK-NEXT: setg %al
@@ -311,7 +311,7 @@ define i32 @sel_1_neg1_32(i32 %x) {
define i32 @sel_neg1_1_32(i32 %x) {
; CHECK-LABEL: sel_neg1_1_32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: cmpl $43, %edi
; CHECK-NEXT: setl %al
@@ -328,7 +328,7 @@ define i32 @sel_neg1_1_32(i32 %x) {
define i8 @select_pow2_diff(i1 zeroext %cond) {
; CHECK-LABEL: select_pow2_diff:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: shlb $4, %dil
; CHECK-NEXT: orb $3, %dil
; CHECK-NEXT: movl %edi, %eax
@@ -339,7 +339,7 @@ define i8 @select_pow2_diff(i1 zeroext %cond) {
define i16 @select_pow2_diff_invert(i1 zeroext %cond) {
; CHECK-LABEL: select_pow2_diff_invert:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorb $1, %dil
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: shll $6, %eax
@@ -352,7 +352,7 @@ define i16 @select_pow2_diff_invert(i1 zeroext %cond) {
define i32 @select_pow2_diff_neg(i1 zeroext %cond) {
; CHECK-LABEL: select_pow2_diff_neg:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: shlb $4, %dil
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: orl $-25, %eax
@@ -363,7 +363,7 @@ define i32 @select_pow2_diff_neg(i1 zeroext %cond) {
define i64 @select_pow2_diff_neg_invert(i1 zeroext %cond) {
; CHECK-LABEL: select_pow2_diff_neg_invert:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorb $1, %dil
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: shlq $7, %rax
@@ -377,11 +377,11 @@ define i64 @select_pow2_diff_neg_invert(i1 zeroext %cond) {
define i8 @sel_67_neg125(i32 %x) {
; CHECK-LABEL: sel_67_neg125:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: cmpl $42, %edi
; CHECK-NEXT: movb $67, %al
; CHECK-NEXT: jg .LBB31_2
-; CHECK-NEXT: # BB#1:
+; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: movb $-125, %al
; CHECK-NEXT: .LBB31_2:
; CHECK-NEXT: retq
@@ -396,7 +396,7 @@ define i8 @sel_67_neg125(i32 %x) {
define i32 @select_C1_C2(i1 %cond) {
; CHECK-LABEL: select_C1_C2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: testb $1, %dil
; CHECK-NEXT: movl $421, %ecx # imm = 0x1A5
; CHECK-NEXT: movl $42, %eax
@@ -408,7 +408,7 @@ define i32 @select_C1_C2(i1 %cond) {
define i32 @select_C1_C2_zeroext(i1 zeroext %cond) {
; CHECK-LABEL: select_C1_C2_zeroext:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: movl $421, %ecx # imm = 0x1A5
; CHECK-NEXT: movl $42, %eax
@@ -420,7 +420,7 @@ define i32 @select_C1_C2_zeroext(i1 zeroext %cond) {
define i32 @select_C1_C2_signext(i1 signext %cond) {
; CHECK-LABEL: select_C1_C2_signext:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: testb $1, %dil
; CHECK-NEXT: movl $421, %ecx # imm = 0x1A5
; CHECK-NEXT: movl $42, %eax
@@ -434,7 +434,7 @@ define i32 @select_C1_C2_signext(i1 signext %cond) {
define i64 @select_2_or_inc(i64 %x) {
; CHECK-LABEL: select_2_or_inc:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: leaq 1(%rdi), %rax
; CHECK-NEXT: cmpq $2, %rdi
; CHECK-NEXT: cmoveq %rdi, %rax
@@ -447,10 +447,10 @@ define i64 @select_2_or_inc(i64 %x) {
define <4 x i32> @sel_constants_add_constant_vec(i1 %cond) {
; CHECK-LABEL: sel_constants_add_constant_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: testb $1, %dil
; CHECK-NEXT: jne .LBB36_1
-; CHECK-NEXT: # BB#2:
+; CHECK-NEXT: # %bb.2:
; CHECK-NEXT: movaps {{.*#+}} xmm0 = [12,13,14,15]
; CHECK-NEXT: retq
; CHECK-NEXT: .LBB36_1:
@@ -463,10 +463,10 @@ define <4 x i32> @sel_constants_add_constant_vec(i1 %cond) {
define <2 x double> @sel_constants_fmul_constant_vec(i1 %cond) {
; CHECK-LABEL: sel_constants_fmul_constant_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: testb $1, %dil
; CHECK-NEXT: jne .LBB37_1
-; CHECK-NEXT: # BB#2:
+; CHECK-NEXT: # %bb.2:
; CHECK-NEXT: movaps {{.*#+}} xmm0 = [1.188300e+02,3.454000e+01]
; CHECK-NEXT: retq
; CHECK-NEXT: .LBB37_1:
@@ -482,7 +482,7 @@ define <2 x double> @sel_constants_fmul_constant_vec(i1 %cond) {
define i64 @opaque_constant(i1 %cond, i64 %x) {
; CHECK-LABEL: opaque_constant:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: testb $1, %dil
; CHECK-NEXT: movl $23, %ecx
; CHECK-NEXT: movq $-4, %rax
diff --git a/test/CodeGen/X86/setcc-combine.ll b/test/CodeGen/X86/setcc-combine.ll
index 38205c66073..a4a8e67d742 100644
--- a/test/CodeGen/X86/setcc-combine.ll
+++ b/test/CodeGen/X86/setcc-combine.ll
@@ -3,7 +3,7 @@
define i32 @test_eq_1(<4 x i32> %A, <4 x i32> %B) {
; CHECK-LABEL: test_eq_1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pcmpgtd %xmm0, %xmm1
; CHECK-NEXT: pcmpeqd %xmm0, %xmm0
; CHECK-NEXT: pxor %xmm1, %xmm0
@@ -20,7 +20,7 @@ define i32 @test_eq_1(<4 x i32> %A, <4 x i32> %B) {
define i32 @test_ne_1(<4 x i32> %A, <4 x i32> %B) {
; CHECK-LABEL: test_ne_1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pcmpgtd %xmm0, %xmm1
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
; CHECK-NEXT: movd %xmm0, %eax
@@ -35,7 +35,7 @@ define i32 @test_ne_1(<4 x i32> %A, <4 x i32> %B) {
define i32 @test_le_1(<4 x i32> %A, <4 x i32> %B) {
; CHECK-LABEL: test_le_1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl $-1, %eax
; CHECK-NEXT: retq
%cmp = icmp slt <4 x i32> %A, %B
@@ -48,7 +48,7 @@ define i32 @test_le_1(<4 x i32> %A, <4 x i32> %B) {
define i32 @test_ge_1(<4 x i32> %A, <4 x i32> %B) {
; CHECK-LABEL: test_ge_1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pcmpgtd %xmm0, %xmm1
; CHECK-NEXT: pcmpeqd %xmm0, %xmm0
; CHECK-NEXT: pxor %xmm1, %xmm0
@@ -65,7 +65,7 @@ define i32 @test_ge_1(<4 x i32> %A, <4 x i32> %B) {
define i32 @test_lt_1(<4 x i32> %A, <4 x i32> %B) {
; CHECK-LABEL: test_lt_1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pcmpgtd %xmm0, %xmm1
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
; CHECK-NEXT: movd %xmm0, %eax
@@ -80,7 +80,7 @@ define i32 @test_lt_1(<4 x i32> %A, <4 x i32> %B) {
define i32 @test_gt_1(<4 x i32> %A, <4 x i32> %B) {
; CHECK-LABEL: test_gt_1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: retq
%cmp = icmp slt <4 x i32> %A, %B
@@ -93,7 +93,7 @@ define i32 @test_gt_1(<4 x i32> %A, <4 x i32> %B) {
define i32 @test_eq_2(<4 x i32> %A, <4 x i32> %B) {
; CHECK-LABEL: test_eq_2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pcmpgtd %xmm1, %xmm0
; CHECK-NEXT: pcmpeqd %xmm1, %xmm1
; CHECK-NEXT: pxor %xmm0, %xmm1
@@ -110,7 +110,7 @@ define i32 @test_eq_2(<4 x i32> %A, <4 x i32> %B) {
define i32 @test_ne_2(<4 x i32> %A, <4 x i32> %B) {
; CHECK-LABEL: test_ne_2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pcmpgtd %xmm1, %xmm0
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; CHECK-NEXT: movd %xmm0, %eax
@@ -125,7 +125,7 @@ define i32 @test_ne_2(<4 x i32> %A, <4 x i32> %B) {
define i32 @test_le_2(<4 x i32> %A, <4 x i32> %B) {
; CHECK-LABEL: test_le_2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pcmpgtd %xmm1, %xmm0
; CHECK-NEXT: pcmpeqd %xmm1, %xmm1
; CHECK-NEXT: pxor %xmm0, %xmm1
@@ -142,7 +142,7 @@ define i32 @test_le_2(<4 x i32> %A, <4 x i32> %B) {
define i32 @test_ge_2(<4 x i32> %A, <4 x i32> %B) {
; CHECK-LABEL: test_ge_2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl $-1, %eax
; CHECK-NEXT: retq
%cmp = icmp slt <4 x i32> %B, %A
@@ -155,7 +155,7 @@ define i32 @test_ge_2(<4 x i32> %A, <4 x i32> %B) {
define i32 @test_lt_2(<4 x i32> %A, <4 x i32> %B) {
; CHECK-LABEL: test_lt_2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pcmpgtd %xmm1, %xmm0
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; CHECK-NEXT: movd %xmm0, %eax
@@ -170,7 +170,7 @@ define i32 @test_lt_2(<4 x i32> %A, <4 x i32> %B) {
define i32 @test_gt_2(<4 x i32> %A, <4 x i32> %B) {
; CHECK-LABEL: test_gt_2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pcmpgtd %xmm1, %xmm0
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; CHECK-NEXT: movd %xmm0, %eax
diff --git a/test/CodeGen/X86/setcc-logic.ll b/test/CodeGen/X86/setcc-logic.ll
index 4d1e5ba1654..9933b9cffc5 100644
--- a/test/CodeGen/X86/setcc-logic.ll
+++ b/test/CodeGen/X86/setcc-logic.ll
@@ -3,7 +3,7 @@
define zeroext i1 @all_bits_clear(i32 %P, i32 %Q) nounwind {
; CHECK-LABEL: all_bits_clear:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: orl %esi, %edi
; CHECK-NEXT: sete %al
; CHECK-NEXT: retq
@@ -15,7 +15,7 @@ define zeroext i1 @all_bits_clear(i32 %P, i32 %Q) nounwind {
define zeroext i1 @all_sign_bits_clear(i32 %P, i32 %Q) nounwind {
; CHECK-LABEL: all_sign_bits_clear:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: orl %esi, %edi
; CHECK-NEXT: setns %al
; CHECK-NEXT: retq
@@ -27,7 +27,7 @@ define zeroext i1 @all_sign_bits_clear(i32 %P, i32 %Q) nounwind {
define zeroext i1 @all_bits_set(i32 %P, i32 %Q) nounwind {
; CHECK-LABEL: all_bits_set:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: andl %esi, %edi
; CHECK-NEXT: cmpl $-1, %edi
; CHECK-NEXT: sete %al
@@ -40,7 +40,7 @@ define zeroext i1 @all_bits_set(i32 %P, i32 %Q) nounwind {
define zeroext i1 @all_sign_bits_set(i32 %P, i32 %Q) nounwind {
; CHECK-LABEL: all_sign_bits_set:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: andl %esi, %edi
; CHECK-NEXT: shrl $31, %edi
; CHECK-NEXT: movl %edi, %eax
@@ -53,7 +53,7 @@ define zeroext i1 @all_sign_bits_set(i32 %P, i32 %Q) nounwind {
define zeroext i1 @any_bits_set(i32 %P, i32 %Q) nounwind {
; CHECK-LABEL: any_bits_set:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: orl %esi, %edi
; CHECK-NEXT: setne %al
; CHECK-NEXT: retq
@@ -65,7 +65,7 @@ define zeroext i1 @any_bits_set(i32 %P, i32 %Q) nounwind {
define zeroext i1 @any_sign_bits_set(i32 %P, i32 %Q) nounwind {
; CHECK-LABEL: any_sign_bits_set:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: orl %esi, %edi
; CHECK-NEXT: shrl $31, %edi
; CHECK-NEXT: movl %edi, %eax
@@ -78,7 +78,7 @@ define zeroext i1 @any_sign_bits_set(i32 %P, i32 %Q) nounwind {
define zeroext i1 @any_bits_clear(i32 %P, i32 %Q) nounwind {
; CHECK-LABEL: any_bits_clear:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: andl %esi, %edi
; CHECK-NEXT: cmpl $-1, %edi
; CHECK-NEXT: setne %al
@@ -91,7 +91,7 @@ define zeroext i1 @any_bits_clear(i32 %P, i32 %Q) nounwind {
define zeroext i1 @any_sign_bits_clear(i32 %P, i32 %Q) nounwind {
; CHECK-LABEL: any_sign_bits_clear:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: testl %esi, %edi
; CHECK-NEXT: setns %al
; CHECK-NEXT: retq
@@ -104,10 +104,10 @@ define zeroext i1 @any_sign_bits_clear(i32 %P, i32 %Q) nounwind {
; PR3351 - (P == 0) & (Q == 0) -> (P|Q) == 0
define i32 @all_bits_clear_branch(i32* %P, i32* %Q) nounwind {
; CHECK-LABEL: all_bits_clear_branch:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: orq %rsi, %rdi
; CHECK-NEXT: jne .LBB8_2
-; CHECK-NEXT: # BB#1: # %bb1
+; CHECK-NEXT: # %bb.1: # %bb1
; CHECK-NEXT: movl $4, %eax
; CHECK-NEXT: retq
; CHECK-NEXT: .LBB8_2: # %return
@@ -128,13 +128,13 @@ return:
define i32 @all_sign_bits_clear_branch(i32 %P, i32 %Q) nounwind {
; CHECK-LABEL: all_sign_bits_clear_branch:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: js .LBB9_3
-; CHECK-NEXT: # BB#1: # %entry
+; CHECK-NEXT: # %bb.1: # %entry
; CHECK-NEXT: testl %esi, %esi
; CHECK-NEXT: js .LBB9_3
-; CHECK-NEXT: # BB#2: # %bb1
+; CHECK-NEXT: # %bb.2: # %bb1
; CHECK-NEXT: movl $4, %eax
; CHECK-NEXT: retq
; CHECK-NEXT: .LBB9_3: # %return
@@ -155,13 +155,13 @@ return:
define i32 @all_bits_set_branch(i32 %P, i32 %Q) nounwind {
; CHECK-LABEL: all_bits_set_branch:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cmpl $-1, %edi
; CHECK-NEXT: jne .LBB10_3
-; CHECK-NEXT: # BB#1: # %entry
+; CHECK-NEXT: # %bb.1: # %entry
; CHECK-NEXT: cmpl $-1, %esi
; CHECK-NEXT: jne .LBB10_3
-; CHECK-NEXT: # BB#2: # %bb1
+; CHECK-NEXT: # %bb.2: # %bb1
; CHECK-NEXT: movl $4, %eax
; CHECK-NEXT: retq
; CHECK-NEXT: .LBB10_3: # %return
@@ -182,13 +182,13 @@ return:
define i32 @all_sign_bits_set_branch(i32 %P, i32 %Q) nounwind {
; CHECK-LABEL: all_sign_bits_set_branch:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: jns .LBB11_3
-; CHECK-NEXT: # BB#1: # %entry
+; CHECK-NEXT: # %bb.1: # %entry
; CHECK-NEXT: testl %esi, %esi
; CHECK-NEXT: jns .LBB11_3
-; CHECK-NEXT: # BB#2: # %bb1
+; CHECK-NEXT: # %bb.2: # %bb1
; CHECK-NEXT: movl $4, %eax
; CHECK-NEXT: retq
; CHECK-NEXT: .LBB11_3: # %return
@@ -210,10 +210,10 @@ return:
; PR3351 - (P != 0) | (Q != 0) -> (P|Q) != 0
define i32 @any_bits_set_branch(i32* %P, i32* %Q) nounwind {
; CHECK-LABEL: any_bits_set_branch:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: orq %rsi, %rdi
; CHECK-NEXT: je .LBB12_2
-; CHECK-NEXT: # BB#1: # %bb1
+; CHECK-NEXT: # %bb.1: # %bb1
; CHECK-NEXT: movl $4, %eax
; CHECK-NEXT: retq
; CHECK-NEXT: .LBB12_2: # %return
@@ -234,13 +234,13 @@ return:
define i32 @any_sign_bits_set_branch(i32 %P, i32 %Q) nounwind {
; CHECK-LABEL: any_sign_bits_set_branch:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: js .LBB13_2
-; CHECK-NEXT: # BB#1: # %entry
+; CHECK-NEXT: # %bb.1: # %entry
; CHECK-NEXT: testl %esi, %esi
; CHECK-NEXT: js .LBB13_2
-; CHECK-NEXT: # BB#3: # %return
+; CHECK-NEXT: # %bb.3: # %return
; CHECK-NEXT: movl $192, %eax
; CHECK-NEXT: retq
; CHECK-NEXT: .LBB13_2: # %bb1
@@ -261,13 +261,13 @@ return:
define i32 @any_bits_clear_branch(i32 %P, i32 %Q) nounwind {
; CHECK-LABEL: any_bits_clear_branch:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cmpl $-1, %edi
; CHECK-NEXT: jne .LBB14_2
-; CHECK-NEXT: # BB#1: # %entry
+; CHECK-NEXT: # %bb.1: # %entry
; CHECK-NEXT: cmpl $-1, %esi
; CHECK-NEXT: jne .LBB14_2
-; CHECK-NEXT: # BB#3: # %return
+; CHECK-NEXT: # %bb.3: # %return
; CHECK-NEXT: movl $192, %eax
; CHECK-NEXT: retq
; CHECK-NEXT: .LBB14_2: # %bb1
@@ -288,13 +288,13 @@ return:
define i32 @any_sign_bits_clear_branch(i32 %P, i32 %Q) nounwind {
; CHECK-LABEL: any_sign_bits_clear_branch:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: jns .LBB15_2
-; CHECK-NEXT: # BB#1: # %entry
+; CHECK-NEXT: # %bb.1: # %entry
; CHECK-NEXT: testl %esi, %esi
; CHECK-NEXT: jns .LBB15_2
-; CHECK-NEXT: # BB#3: # %return
+; CHECK-NEXT: # %bb.3: # %return
; CHECK-NEXT: movl $192, %eax
; CHECK-NEXT: retq
; CHECK-NEXT: .LBB15_2: # %bb1
@@ -315,7 +315,7 @@ return:
define <4 x i1> @all_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) nounwind {
; CHECK-LABEL: all_bits_clear_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: por %xmm1, %xmm0
; CHECK-NEXT: pxor %xmm1, %xmm1
; CHECK-NEXT: pcmpeqd %xmm1, %xmm0
@@ -328,7 +328,7 @@ define <4 x i1> @all_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) nounwind {
define <4 x i1> @all_sign_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) nounwind {
; CHECK-LABEL: all_sign_bits_clear_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: por %xmm1, %xmm0
; CHECK-NEXT: pcmpeqd %xmm1, %xmm1
; CHECK-NEXT: pcmpgtd %xmm1, %xmm0
@@ -341,7 +341,7 @@ define <4 x i1> @all_sign_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) nounwind {
define <4 x i1> @all_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) nounwind {
; CHECK-LABEL: all_bits_set_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pand %xmm1, %xmm0
; CHECK-NEXT: pcmpeqd %xmm1, %xmm1
; CHECK-NEXT: pcmpeqd %xmm1, %xmm0
@@ -354,7 +354,7 @@ define <4 x i1> @all_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) nounwind {
define <4 x i1> @all_sign_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) nounwind {
; CHECK-LABEL: all_sign_bits_set_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pand %xmm1, %xmm0
; CHECK-NEXT: pxor %xmm1, %xmm1
; CHECK-NEXT: pcmpgtd %xmm0, %xmm1
@@ -368,7 +368,7 @@ define <4 x i1> @all_sign_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) nounwind {
define <4 x i1> @any_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) nounwind {
; CHECK-LABEL: any_bits_set_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: por %xmm1, %xmm0
; CHECK-NEXT: pxor %xmm1, %xmm1
; CHECK-NEXT: pcmpeqd %xmm1, %xmm0
@@ -383,7 +383,7 @@ define <4 x i1> @any_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) nounwind {
define <4 x i1> @any_sign_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) nounwind {
; CHECK-LABEL: any_sign_bits_set_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: por %xmm1, %xmm0
; CHECK-NEXT: pxor %xmm1, %xmm1
; CHECK-NEXT: pcmpgtd %xmm0, %xmm1
@@ -397,7 +397,7 @@ define <4 x i1> @any_sign_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) nounwind {
define <4 x i1> @any_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) nounwind {
; CHECK-LABEL: any_bits_clear_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pand %xmm1, %xmm0
; CHECK-NEXT: pcmpeqd %xmm1, %xmm1
; CHECK-NEXT: pcmpeqd %xmm1, %xmm0
@@ -411,7 +411,7 @@ define <4 x i1> @any_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) nounwind {
define <4 x i1> @any_sign_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) nounwind {
; CHECK-LABEL: any_sign_bits_clear_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pand %xmm1, %xmm0
; CHECK-NEXT: pcmpeqd %xmm1, %xmm1
; CHECK-NEXT: pcmpgtd %xmm1, %xmm0
@@ -424,7 +424,7 @@ define <4 x i1> @any_sign_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) nounwind {
define zeroext i1 @ne_neg1_and_ne_zero(i64 %x) nounwind {
; CHECK-LABEL: ne_neg1_and_ne_zero:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: incq %rdi
; CHECK-NEXT: cmpq $1, %rdi
; CHECK-NEXT: seta %al
@@ -439,7 +439,7 @@ define zeroext i1 @ne_neg1_and_ne_zero(i64 %x) nounwind {
define zeroext i1 @and_eq(i8 %a, i8 %b, i8 %c, i8 %d) nounwind {
; CHECK-LABEL: and_eq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorl %esi, %edi
; CHECK-NEXT: xorl %ecx, %edx
; CHECK-NEXT: orb %dl, %dil
@@ -453,7 +453,7 @@ define zeroext i1 @and_eq(i8 %a, i8 %b, i8 %c, i8 %d) nounwind {
define zeroext i1 @or_ne(i8 %a, i8 %b, i8 %c, i8 %d) nounwind {
; CHECK-LABEL: or_ne:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorl %esi, %edi
; CHECK-NEXT: xorl %ecx, %edx
; CHECK-NEXT: orb %dl, %dil
@@ -469,7 +469,7 @@ define zeroext i1 @or_ne(i8 %a, i8 %b, i8 %c, i8 %d) nounwind {
define <4 x i1> @and_eq_vec(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) nounwind {
; CHECK-LABEL: and_eq_vec:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pcmpeqd %xmm1, %xmm0
; CHECK-NEXT: pcmpeqd %xmm3, %xmm2
; CHECK-NEXT: pand %xmm2, %xmm0
diff --git a/test/CodeGen/X86/setcc-lowering.ll b/test/CodeGen/X86/setcc-lowering.ll
index f9222c4dec5..67e497aee0b 100644
--- a/test/CodeGen/X86/setcc-lowering.ll
+++ b/test/CodeGen/X86/setcc-lowering.ll
@@ -8,7 +8,7 @@
define <8 x i16> @pr25080(<8 x i32> %a) {
; AVX-LABEL: pr25080:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
@@ -22,7 +22,7 @@ define <8 x i16> @pr25080(<8 x i32> %a) {
; AVX-NEXT: retq
;
; KNL-32-LABEL: pr25080:
-; KNL-32: # BB#0: # %entry
+; KNL-32: # %bb.0: # %entry
; KNL-32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL-32-NEXT: vpbroadcastd {{.*#+}} ymm1 = [8388607,8388607,8388607,8388607,8388607,8388607,8388607,8388607]
; KNL-32-NEXT: vptestnmd %zmm1, %zmm0, %k0
@@ -42,7 +42,7 @@ entry:
define void @pr26232(i64 %a, <16 x i1> %b) {
; AVX-LABEL: pr26232:
-; AVX: # BB#0: # %for_loop599.preheader
+; AVX: # %bb.0: # %for_loop599.preheader
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
; AVX-NEXT: .p2align 4, 0x90
@@ -60,11 +60,11 @@ define void @pr26232(i64 %a, <16 x i1> %b) {
; AVX-NEXT: vpmovmskb %xmm3, %eax
; AVX-NEXT: testw %ax, %ax
; AVX-NEXT: jne .LBB1_1
-; AVX-NEXT: # BB#2: # %for_exit600
+; AVX-NEXT: # %bb.2: # %for_exit600
; AVX-NEXT: retq
;
; KNL-32-LABEL: pr26232:
-; KNL-32: # BB#0: # %for_loop599.preheader
+; KNL-32: # %bb.0: # %for_loop599.preheader
; KNL-32-NEXT: pushl %esi
; KNL-32-NEXT: .cfi_def_cfa_offset 8
; KNL-32-NEXT: .cfi_offset %esi, -8
@@ -87,7 +87,7 @@ define void @pr26232(i64 %a, <16 x i1> %b) {
; KNL-32-NEXT: kmovw %k1, %esi
; KNL-32-NEXT: testw %si, %si
; KNL-32-NEXT: jne .LBB1_1
-; KNL-32-NEXT: # BB#2: # %for_exit600
+; KNL-32-NEXT: # %bb.2: # %for_exit600
; KNL-32-NEXT: popl %esi
; KNL-32-NEXT: retl
allocas:
diff --git a/test/CodeGen/X86/setcc-narrowing.ll b/test/CodeGen/X86/setcc-narrowing.ll
index c914ef37631..52f143f8b32 100644
--- a/test/CodeGen/X86/setcc-narrowing.ll
+++ b/test/CodeGen/X86/setcc-narrowing.ll
@@ -6,7 +6,7 @@
define i32 @t1() nounwind ssp {
; CHECK-LABEL: t1:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: cmpl $0, _t1.global
; CHECK-NEXT: setne %al
diff --git a/test/CodeGen/X86/setcc-wide-types.ll b/test/CodeGen/X86/setcc-wide-types.ll
index 332bf2887fb..f935db72dcb 100644
--- a/test/CodeGen/X86/setcc-wide-types.ll
+++ b/test/CodeGen/X86/setcc-wide-types.ll
@@ -6,7 +6,7 @@
define i32 @ne_i128(<2 x i64> %x, <2 x i64> %y) {
; SSE2-LABEL: ne_i128:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pcmpeqb %xmm1, %xmm0
; SSE2-NEXT: pmovmskb %xmm0, %ecx
; SSE2-NEXT: xorl %eax, %eax
@@ -15,7 +15,7 @@ define i32 @ne_i128(<2 x i64> %x, <2 x i64> %y) {
; SSE2-NEXT: retq
;
; AVX2-LABEL: ne_i128:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpmovmskb %xmm0, %ecx
; AVX2-NEXT: xorl %eax, %eax
@@ -31,7 +31,7 @@ define i32 @ne_i128(<2 x i64> %x, <2 x i64> %y) {
define i32 @eq_i128(<2 x i64> %x, <2 x i64> %y) {
; SSE2-LABEL: eq_i128:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pcmpeqb %xmm1, %xmm0
; SSE2-NEXT: pmovmskb %xmm0, %ecx
; SSE2-NEXT: xorl %eax, %eax
@@ -40,7 +40,7 @@ define i32 @eq_i128(<2 x i64> %x, <2 x i64> %y) {
; SSE2-NEXT: retq
;
; AVX2-LABEL: eq_i128:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpmovmskb %xmm0, %ecx
; AVX2-NEXT: xorl %eax, %eax
@@ -56,7 +56,7 @@ define i32 @eq_i128(<2 x i64> %x, <2 x i64> %y) {
define i32 @ne_i256(<4 x i64> %x, <4 x i64> %y) {
; SSE2-LABEL: ne_i256:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,0,1]
; SSE2-NEXT: movq %xmm4, %rax
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
@@ -81,7 +81,7 @@ define i32 @ne_i256(<4 x i64> %x, <4 x i64> %y) {
; SSE2-NEXT: retq
;
; AVX2-LABEL: ne_i256:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpmovmskb %ymm0, %ecx
; AVX2-NEXT: xorl %eax, %eax
@@ -98,7 +98,7 @@ define i32 @ne_i256(<4 x i64> %x, <4 x i64> %y) {
define i32 @eq_i256(<4 x i64> %x, <4 x i64> %y) {
; SSE2-LABEL: eq_i256:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,0,1]
; SSE2-NEXT: movq %xmm4, %rax
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
@@ -123,7 +123,7 @@ define i32 @eq_i256(<4 x i64> %x, <4 x i64> %y) {
; SSE2-NEXT: retq
;
; AVX2-LABEL: eq_i256:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpmovmskb %ymm0, %ecx
; AVX2-NEXT: xorl %eax, %eax
diff --git a/test/CodeGen/X86/setcc.ll b/test/CodeGen/X86/setcc.ll
index fab4f413725..a1d27d38fc5 100644
--- a/test/CodeGen/X86/setcc.ll
+++ b/test/CodeGen/X86/setcc.ll
@@ -7,7 +7,7 @@
define zeroext i16 @t1(i16 zeroext %x) nounwind readnone ssp {
; CHECK-LABEL: t1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: cmpl $26, %edi
; CHECK-NEXT: seta %al
@@ -20,7 +20,7 @@ define zeroext i16 @t1(i16 zeroext %x) nounwind readnone ssp {
define zeroext i16 @t2(i16 zeroext %x) nounwind readnone ssp {
; CHECK-LABEL: t2:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: cmpl $26, %edi
; CHECK-NEXT: setb %al
@@ -33,7 +33,7 @@ define zeroext i16 @t2(i16 zeroext %x) nounwind readnone ssp {
define i64 @t3(i64 %x) nounwind readnone ssp {
; CHECK-LABEL: t3:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: cmpq $18, %rdi
; CHECK-NEXT: setb %al
@@ -48,7 +48,7 @@ define i64 @t3(i64 %x) nounwind readnone ssp {
define i32 @t4(i32 %a) {
; CHECK-LABEL: t4:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movq _v4@{{.*}}(%rip), %rax
; CHECK-NEXT: cmpl $1, (%rax)
; CHECK-NEXT: movw $1, %ax
@@ -67,7 +67,7 @@ define i32 @t4(i32 %a) {
define i8 @t5(i32 %a) #0 {
; CHECK-LABEL: t5:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: setns %al
; CHECK-NEXT: retq
@@ -79,7 +79,7 @@ define i8 @t5(i32 %a) #0 {
define zeroext i1 @t6(i32 %a) #0 {
; CHECK-LABEL: t6:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: setns %al
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/sext-i1.ll b/test/CodeGen/X86/sext-i1.ll
index 5aa51bcd721..5b3897df32a 100644
--- a/test/CodeGen/X86/sext-i1.ll
+++ b/test/CodeGen/X86/sext-i1.ll
@@ -7,13 +7,13 @@
define i32 @t1(i32 %x) nounwind readnone ssp {
; X32-LABEL: t1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpl $1, {{[0-9]+}}(%esp)
; X32-NEXT: sbbl %eax, %eax
; X32-NEXT: retl
;
; X64-LABEL: t1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpl $1, %edi
; X64-NEXT: sbbl %eax, %eax
; X64-NEXT: retq
@@ -24,13 +24,13 @@ define i32 @t1(i32 %x) nounwind readnone ssp {
define i32 @t2(i32 %x) nounwind readnone ssp {
; X32-LABEL: t2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpl $1, {{[0-9]+}}(%esp)
; X32-NEXT: sbbl %eax, %eax
; X32-NEXT: retl
;
; X64-LABEL: t2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpl $1, %edi
; X64-NEXT: sbbl %eax, %eax
; X64-NEXT: retq
@@ -41,7 +41,7 @@ define i32 @t2(i32 %x) nounwind readnone ssp {
define i32 @t3() nounwind readonly {
; X32-LABEL: t3:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: cmpl $1, %eax
; X32-NEXT: sbbl %eax, %eax
; X32-NEXT: cmpl %eax, %eax
@@ -50,7 +50,7 @@ define i32 @t3() nounwind readonly {
; X32-NEXT: retl
;
; X64-LABEL: t3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: testl %eax, %eax
; X64-NEXT: sete %al
@@ -76,7 +76,7 @@ if.end:
define i32 @t4(i64 %x) nounwind readnone ssp {
; X32-LABEL: t4:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: orl {{[0-9]+}}(%esp), %ecx
@@ -85,7 +85,7 @@ define i32 @t4(i64 %x) nounwind readnone ssp {
; X32-NEXT: retl
;
; X64-LABEL: t4:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpq $1, %rdi
; X64-NEXT: sbbl %eax, %eax
; X64-NEXT: retq
@@ -96,14 +96,14 @@ define i32 @t4(i64 %x) nounwind readnone ssp {
define i64 @t5(i32 %x) nounwind readnone ssp {
; X32-LABEL: t5:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpl $1, {{[0-9]+}}(%esp)
; X32-NEXT: sbbl %eax, %eax
; X32-NEXT: movl %eax, %edx
; X32-NEXT: retl
;
; X64-LABEL: t5:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpl $1, %edi
; X64-NEXT: sbbq %rax, %rax
; X64-NEXT: retq
@@ -116,14 +116,14 @@ define i64 @t5(i32 %x) nounwind readnone ssp {
define i32 @select_0_or_1s(i1 %cond) {
; X32-LABEL: select_0_or_1s:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: andl $1, %eax
; X32-NEXT: decl %eax
; X32-NEXT: retl
;
; X64-LABEL: select_0_or_1s:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: andl $1, %edi
; X64-NEXT: leal -1(%rdi), %eax
@@ -137,13 +137,13 @@ define i32 @select_0_or_1s(i1 %cond) {
define i32 @select_0_or_1s_zeroext(i1 zeroext %cond) {
; X32-LABEL: select_0_or_1s_zeroext:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: decl %eax
; X32-NEXT: retl
;
; X64-LABEL: select_0_or_1s_zeroext:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal -1(%rdi), %eax
; X64-NEXT: retq
@@ -156,7 +156,7 @@ define i32 @select_0_or_1s_zeroext(i1 zeroext %cond) {
define i32 @select_0_or_1s_signext(i1 signext %cond) {
; X32-LABEL: select_0_or_1s_signext:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: andb $1, %al
; X32-NEXT: movzbl %al, %eax
@@ -164,7 +164,7 @@ define i32 @select_0_or_1s_signext(i1 signext %cond) {
; X32-NEXT: retl
;
; X64-LABEL: select_0_or_1s_signext:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: notl %edi
; X64-NEXT: movl %edi, %eax
; X64-NEXT: retq
diff --git a/test/CodeGen/X86/sext-setcc-self.ll b/test/CodeGen/X86/sext-setcc-self.ll
index 9cbd3d85b38..452b600ffb5 100644
--- a/test/CodeGen/X86/sext-setcc-self.ll
+++ b/test/CodeGen/X86/sext-setcc-self.ll
@@ -3,7 +3,7 @@
define <4 x i32> @test_ueq(<4 x float> %in) {
; CHECK-LABEL: test_ueq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pcmpeqd %xmm0, %xmm0
; CHECK-NEXT: retq
%t0 = fcmp ueq <4 x float> %in, %in
@@ -13,7 +13,7 @@ define <4 x i32> @test_ueq(<4 x float> %in) {
define <4 x i32> @test_uge(<4 x float> %in) {
; CHECK-LABEL: test_uge:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pcmpeqd %xmm0, %xmm0
; CHECK-NEXT: retq
%t0 = fcmp uge <4 x float> %in, %in
@@ -23,7 +23,7 @@ define <4 x i32> @test_uge(<4 x float> %in) {
define <4 x i32> @test_ule(<4 x float> %in) {
; CHECK-LABEL: test_ule:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pcmpeqd %xmm0, %xmm0
; CHECK-NEXT: retq
%t0 = fcmp ule <4 x float> %in, %in
@@ -33,7 +33,7 @@ define <4 x i32> @test_ule(<4 x float> %in) {
define <4 x i32> @test_one(<4 x float> %in) {
; CHECK-LABEL: test_one:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorps %xmm0, %xmm0
; CHECK-NEXT: retq
%t0 = fcmp one <4 x float> %in, %in
@@ -43,7 +43,7 @@ define <4 x i32> @test_one(<4 x float> %in) {
define <4 x i32> @test_ogt(<4 x float> %in) {
; CHECK-LABEL: test_ogt:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorps %xmm0, %xmm0
; CHECK-NEXT: retq
%t0 = fcmp ogt <4 x float> %in, %in
@@ -53,7 +53,7 @@ define <4 x i32> @test_ogt(<4 x float> %in) {
define <4 x i32> @test_olt(<4 x float> %in) {
; CHECK-LABEL: test_olt:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorps %xmm0, %xmm0
; CHECK-NEXT: retq
%t0 = fcmp olt <4 x float> %in, %in
diff --git a/test/CodeGen/X86/sha-schedule.ll b/test/CodeGen/X86/sha-schedule.ll
index 3f1cad276bf..7dd492d5584 100644
--- a/test/CodeGen/X86/sha-schedule.ll
+++ b/test/CodeGen/X86/sha-schedule.ll
@@ -10,25 +10,25 @@
define <4 x i32> @test_sha1msg1(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_sha1msg1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: sha1msg1 %xmm1, %xmm0
; GENERIC-NEXT: sha1msg1 (%rdi), %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; GOLDMONT-LABEL: test_sha1msg1:
-; GOLDMONT: # BB#0:
+; GOLDMONT: # %bb.0:
; GOLDMONT-NEXT: sha1msg1 %xmm1, %xmm0
; GOLDMONT-NEXT: sha1msg1 (%rdi), %xmm0
; GOLDMONT-NEXT: retq # sched: [4:1.00]
;
; CANNONLAKE-LABEL: test_sha1msg1:
-; CANNONLAKE: # BB#0:
+; CANNONLAKE: # %bb.0:
; CANNONLAKE-NEXT: sha1msg1 %xmm1, %xmm0
; CANNONLAKE-NEXT: sha1msg1 (%rdi), %xmm0
; CANNONLAKE-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_sha1msg1:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: sha1msg1 %xmm1, %xmm0 # sched: [2:1.00]
; ZNVER1-NEXT: sha1msg1 (%rdi), %xmm0 # sched: [9:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -41,25 +41,25 @@ declare <4 x i32> @llvm.x86.sha1msg1(<4 x i32>, <4 x i32>)
define <4 x i32> @test_sha1msg2(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_sha1msg2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: sha1msg2 %xmm1, %xmm0
; GENERIC-NEXT: sha1msg2 (%rdi), %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; GOLDMONT-LABEL: test_sha1msg2:
-; GOLDMONT: # BB#0:
+; GOLDMONT: # %bb.0:
; GOLDMONT-NEXT: sha1msg2 %xmm1, %xmm0
; GOLDMONT-NEXT: sha1msg2 (%rdi), %xmm0
; GOLDMONT-NEXT: retq # sched: [4:1.00]
;
; CANNONLAKE-LABEL: test_sha1msg2:
-; CANNONLAKE: # BB#0:
+; CANNONLAKE: # %bb.0:
; CANNONLAKE-NEXT: sha1msg2 %xmm1, %xmm0
; CANNONLAKE-NEXT: sha1msg2 (%rdi), %xmm0
; CANNONLAKE-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_sha1msg2:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: sha1msg2 %xmm1, %xmm0 # sched: [1:0.50]
; ZNVER1-NEXT: sha1msg2 (%rdi), %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -72,25 +72,25 @@ declare <4 x i32> @llvm.x86.sha1msg2(<4 x i32>, <4 x i32>)
define <4 x i32> @test_sha1nexte(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_sha1nexte:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: sha1nexte %xmm1, %xmm0
; GENERIC-NEXT: sha1nexte (%rdi), %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; GOLDMONT-LABEL: test_sha1nexte:
-; GOLDMONT: # BB#0:
+; GOLDMONT: # %bb.0:
; GOLDMONT-NEXT: sha1nexte %xmm1, %xmm0
; GOLDMONT-NEXT: sha1nexte (%rdi), %xmm0
; GOLDMONT-NEXT: retq # sched: [4:1.00]
;
; CANNONLAKE-LABEL: test_sha1nexte:
-; CANNONLAKE: # BB#0:
+; CANNONLAKE: # %bb.0:
; CANNONLAKE-NEXT: sha1nexte %xmm1, %xmm0
; CANNONLAKE-NEXT: sha1nexte (%rdi), %xmm0
; CANNONLAKE-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_sha1nexte:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: sha1nexte %xmm1, %xmm0 # sched: [1:1.00]
; ZNVER1-NEXT: sha1nexte (%rdi), %xmm0 # sched: [8:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -103,25 +103,25 @@ declare <4 x i32> @llvm.x86.sha1nexte(<4 x i32>, <4 x i32>)
define <4 x i32> @test_sha1rnds4(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_sha1rnds4:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: sha1rnds4 $3, %xmm1, %xmm0
; GENERIC-NEXT: sha1rnds4 $3, (%rdi), %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; GOLDMONT-LABEL: test_sha1rnds4:
-; GOLDMONT: # BB#0:
+; GOLDMONT: # %bb.0:
; GOLDMONT-NEXT: sha1rnds4 $3, %xmm1, %xmm0
; GOLDMONT-NEXT: sha1rnds4 $3, (%rdi), %xmm0
; GOLDMONT-NEXT: retq # sched: [4:1.00]
;
; CANNONLAKE-LABEL: test_sha1rnds4:
-; CANNONLAKE: # BB#0:
+; CANNONLAKE: # %bb.0:
; CANNONLAKE-NEXT: sha1rnds4 $3, %xmm1, %xmm0
; CANNONLAKE-NEXT: sha1rnds4 $3, (%rdi), %xmm0
; CANNONLAKE-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_sha1rnds4:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: sha1rnds4 $3, %xmm1, %xmm0 # sched: [6:1.00]
; ZNVER1-NEXT: sha1rnds4 $3, (%rdi), %xmm0 # sched: [13:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -138,25 +138,25 @@ declare <4 x i32> @llvm.x86.sha1rnds4(<4 x i32>, <4 x i32>, i8)
define <4 x i32> @test_sha256msg1(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_sha256msg1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: sha256msg1 %xmm1, %xmm0
; GENERIC-NEXT: sha256msg1 (%rdi), %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; GOLDMONT-LABEL: test_sha256msg1:
-; GOLDMONT: # BB#0:
+; GOLDMONT: # %bb.0:
; GOLDMONT-NEXT: sha256msg1 %xmm1, %xmm0
; GOLDMONT-NEXT: sha256msg1 (%rdi), %xmm0
; GOLDMONT-NEXT: retq # sched: [4:1.00]
;
; CANNONLAKE-LABEL: test_sha256msg1:
-; CANNONLAKE: # BB#0:
+; CANNONLAKE: # %bb.0:
; CANNONLAKE-NEXT: sha256msg1 %xmm1, %xmm0
; CANNONLAKE-NEXT: sha256msg1 (%rdi), %xmm0
; CANNONLAKE-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_sha256msg1:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: sha256msg1 %xmm1, %xmm0 # sched: [2:1.00]
; ZNVER1-NEXT: sha256msg1 (%rdi), %xmm0 # sched: [9:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -169,25 +169,25 @@ declare <4 x i32> @llvm.x86.sha256msg1(<4 x i32>, <4 x i32>)
define <4 x i32> @test_sha256msg2(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_sha256msg2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: sha256msg2 %xmm1, %xmm0
; GENERIC-NEXT: sha256msg2 (%rdi), %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; GOLDMONT-LABEL: test_sha256msg2:
-; GOLDMONT: # BB#0:
+; GOLDMONT: # %bb.0:
; GOLDMONT-NEXT: sha256msg2 %xmm1, %xmm0
; GOLDMONT-NEXT: sha256msg2 (%rdi), %xmm0
; GOLDMONT-NEXT: retq # sched: [4:1.00]
;
; CANNONLAKE-LABEL: test_sha256msg2:
-; CANNONLAKE: # BB#0:
+; CANNONLAKE: # %bb.0:
; CANNONLAKE-NEXT: sha256msg2 %xmm1, %xmm0
; CANNONLAKE-NEXT: sha256msg2 (%rdi), %xmm0
; CANNONLAKE-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_sha256msg2:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: sha256msg2 %xmm1, %xmm0 # sched: [100:?]
; ZNVER1-NEXT: sha256msg2 (%rdi), %xmm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -200,7 +200,7 @@ declare <4 x i32> @llvm.x86.sha256msg2(<4 x i32>, <4 x i32>)
define <4 x i32> @test_sha256rnds2(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2, <4 x i32> *%a3) {
; GENERIC-LABEL: test_sha256rnds2:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movaps %xmm0, %xmm3 # sched: [1:1.00]
; GENERIC-NEXT: movaps %xmm2, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: sha256rnds2 %xmm0, %xmm1, %xmm3
@@ -209,7 +209,7 @@ define <4 x i32> @test_sha256rnds2(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2,
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; GOLDMONT-LABEL: test_sha256rnds2:
-; GOLDMONT: # BB#0:
+; GOLDMONT: # %bb.0:
; GOLDMONT-NEXT: movaps %xmm0, %xmm3 # sched: [1:1.00]
; GOLDMONT-NEXT: movaps %xmm2, %xmm0 # sched: [1:1.00]
; GOLDMONT-NEXT: sha256rnds2 %xmm0, %xmm1, %xmm3
@@ -218,7 +218,7 @@ define <4 x i32> @test_sha256rnds2(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2,
; GOLDMONT-NEXT: retq # sched: [4:1.00]
;
; CANNONLAKE-LABEL: test_sha256rnds2:
-; CANNONLAKE: # BB#0:
+; CANNONLAKE: # %bb.0:
; CANNONLAKE-NEXT: vmovaps %xmm0, %xmm3 # sched: [1:1.00]
; CANNONLAKE-NEXT: vmovaps %xmm2, %xmm0 # sched: [1:1.00]
; CANNONLAKE-NEXT: sha256rnds2 %xmm0, %xmm1, %xmm3
@@ -227,7 +227,7 @@ define <4 x i32> @test_sha256rnds2(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2,
; CANNONLAKE-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_sha256rnds2:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmovaps %xmm0, %xmm3 # sched: [1:0.50]
; ZNVER1-NEXT: vmovaps %xmm2, %xmm0 # sched: [1:0.50]
; ZNVER1-NEXT: sha256rnds2 %xmm0, %xmm1, %xmm3 # sched: [4:1.00]
diff --git a/test/CodeGen/X86/shift-and.ll b/test/CodeGen/X86/shift-and.ll
index f1f508c225d..1e448d39f77 100644
--- a/test/CodeGen/X86/shift-and.ll
+++ b/test/CodeGen/X86/shift-and.ll
@@ -4,14 +4,14 @@
define i32 @t1(i32 %t, i32 %val) nounwind {
; X32-LABEL: t1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %cl
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: shll %cl, %eax
; X32-NEXT: retl
;
; X64-LABEL: t1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edi, %ecx
; X64-NEXT: shll %cl, %esi
; X64-NEXT: movl %esi, %eax
@@ -23,14 +23,14 @@ define i32 @t1(i32 %t, i32 %val) nounwind {
define i32 @t2(i32 %t, i32 %val) nounwind {
; X32-LABEL: t2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %cl
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: shll %cl, %eax
; X32-NEXT: retl
;
; X64-LABEL: t2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edi, %ecx
; X64-NEXT: shll %cl, %esi
; X64-NEXT: movl %esi, %eax
@@ -44,13 +44,13 @@ define i32 @t2(i32 %t, i32 %val) nounwind {
define void @t3(i16 %t) nounwind {
; X32-LABEL: t3:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %cl
; X32-NEXT: sarw %cl, X
; X32-NEXT: retl
;
; X64-LABEL: t3:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edi, %ecx
; X64-NEXT: sarw %cl, {{.*}}(%rip)
; X64-NEXT: retq
@@ -63,7 +63,7 @@ define void @t3(i16 %t) nounwind {
define i64 @t4(i64 %t, i64 %val) nounwind {
; X32-LABEL: t4:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %esi
; X32-NEXT: movb {{[0-9]+}}(%esp), %cl
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -73,7 +73,7 @@ define i64 @t4(i64 %t, i64 %val) nounwind {
; X32-NEXT: shrdl %cl, %esi, %eax
; X32-NEXT: testb $32, %cl
; X32-NEXT: je .LBB3_2
-; X32-NEXT: # BB#1:
+; X32-NEXT: # %bb.1:
; X32-NEXT: movl %edx, %eax
; X32-NEXT: xorl %edx, %edx
; X32-NEXT: .LBB3_2:
@@ -81,7 +81,7 @@ define i64 @t4(i64 %t, i64 %val) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: t4:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edi, %ecx
; X64-NEXT: shrq %cl, %rsi
; X64-NEXT: movq %rsi, %rax
@@ -93,7 +93,7 @@ define i64 @t4(i64 %t, i64 %val) nounwind {
define i64 @t5(i64 %t, i64 %val) nounwind {
; X32-LABEL: t5:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %esi
; X32-NEXT: movb {{[0-9]+}}(%esp), %cl
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -103,7 +103,7 @@ define i64 @t5(i64 %t, i64 %val) nounwind {
; X32-NEXT: shrdl %cl, %esi, %eax
; X32-NEXT: testb $32, %cl
; X32-NEXT: je .LBB4_2
-; X32-NEXT: # BB#1:
+; X32-NEXT: # %bb.1:
; X32-NEXT: movl %edx, %eax
; X32-NEXT: xorl %edx, %edx
; X32-NEXT: .LBB4_2:
@@ -111,7 +111,7 @@ define i64 @t5(i64 %t, i64 %val) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: t5:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edi, %ecx
; X64-NEXT: shrq %cl, %rsi
; X64-NEXT: movq %rsi, %rax
@@ -123,7 +123,7 @@ define i64 @t5(i64 %t, i64 %val) nounwind {
define void @t5ptr(i64 %t, i64* %ptr) nounwind {
; X32-LABEL: t5ptr:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %edi
; X32-NEXT: pushl %esi
; X32-NEXT: movb {{[0-9]+}}(%esp), %cl
@@ -135,7 +135,7 @@ define void @t5ptr(i64 %t, i64* %ptr) nounwind {
; X32-NEXT: shrdl %cl, %edi, %edx
; X32-NEXT: testb $32, %cl
; X32-NEXT: je .LBB5_2
-; X32-NEXT: # BB#1:
+; X32-NEXT: # %bb.1:
; X32-NEXT: movl %esi, %edx
; X32-NEXT: xorl %esi, %esi
; X32-NEXT: .LBB5_2:
@@ -146,7 +146,7 @@ define void @t5ptr(i64 %t, i64* %ptr) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: t5ptr:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edi, %ecx
; X64-NEXT: shrq %cl, (%rsi)
; X64-NEXT: retq
@@ -161,7 +161,7 @@ define void @t5ptr(i64 %t, i64* %ptr) nounwind {
; rdar://11866926
define i64 @t6(i64 %key, i64* nocapture %val) nounwind {
; X32-LABEL: t6:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %edi
; X32-NEXT: pushl %esi
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
@@ -181,7 +181,7 @@ define i64 @t6(i64 %key, i64* nocapture %val) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: t6:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: shrq $3, %rdi
; X64-NEXT: movq (%rsi), %rax
; X64-NEXT: decq %rax
@@ -196,7 +196,7 @@ define i64 @t6(i64 %key, i64* nocapture %val) nounwind {
define i64 @big_mask_constant(i64 %x) nounwind {
; X32-LABEL: big_mask_constant:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: andl $4, %eax
; X32-NEXT: shll $25, %eax
@@ -204,7 +204,7 @@ define i64 @big_mask_constant(i64 %x) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: big_mask_constant:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: shrq $7, %rdi
; X64-NEXT: andl $134217728, %edi # imm = 0x8000000
; X64-NEXT: movq %rdi, %rax
diff --git a/test/CodeGen/X86/shift-bmi2.ll b/test/CodeGen/X86/shift-bmi2.ll
index 008dce7bb60..07e60e345c5 100644
--- a/test/CodeGen/X86/shift-bmi2.ll
+++ b/test/CodeGen/X86/shift-bmi2.ll
@@ -4,13 +4,13 @@
define i32 @shl32(i32 %x, i32 %shamt) nounwind uwtable readnone {
; BMI2-LABEL: shl32:
-; BMI2: # BB#0:
+; BMI2: # %bb.0:
; BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
; BMI2-NEXT: shlxl %eax, {{[0-9]+}}(%esp), %eax
; BMI2-NEXT: retl
;
; BMI264-LABEL: shl32:
-; BMI264: # BB#0:
+; BMI264: # %bb.0:
; BMI264-NEXT: shlxl %esi, %edi, %eax
; BMI264-NEXT: retq
%shl = shl i32 %x, %shamt
@@ -19,13 +19,13 @@ define i32 @shl32(i32 %x, i32 %shamt) nounwind uwtable readnone {
define i32 @shl32i(i32 %x) nounwind uwtable readnone {
; BMI2-LABEL: shl32i:
-; BMI2: # BB#0:
+; BMI2: # %bb.0:
; BMI2-NEXT: movl {{[0-9]+}}(%esp), %eax
; BMI2-NEXT: shll $5, %eax
; BMI2-NEXT: retl
;
; BMI264-LABEL: shl32i:
-; BMI264: # BB#0:
+; BMI264: # %bb.0:
; BMI264-NEXT: shll $5, %edi
; BMI264-NEXT: movl %edi, %eax
; BMI264-NEXT: retq
@@ -35,14 +35,14 @@ define i32 @shl32i(i32 %x) nounwind uwtable readnone {
define i32 @shl32p(i32* %p, i32 %shamt) nounwind uwtable readnone {
; BMI2-LABEL: shl32p:
-; BMI2: # BB#0:
+; BMI2: # %bb.0:
; BMI2-NEXT: movl {{[0-9]+}}(%esp), %eax
; BMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
; BMI2-NEXT: shlxl %ecx, (%eax), %eax
; BMI2-NEXT: retl
;
; BMI264-LABEL: shl32p:
-; BMI264: # BB#0:
+; BMI264: # %bb.0:
; BMI264-NEXT: shlxl %esi, (%rdi), %eax
; BMI264-NEXT: retq
%x = load i32, i32* %p
@@ -52,14 +52,14 @@ define i32 @shl32p(i32* %p, i32 %shamt) nounwind uwtable readnone {
define i32 @shl32pi(i32* %p) nounwind uwtable readnone {
; BMI2-LABEL: shl32pi:
-; BMI2: # BB#0:
+; BMI2: # %bb.0:
; BMI2-NEXT: movl {{[0-9]+}}(%esp), %eax
; BMI2-NEXT: movl (%eax), %eax
; BMI2-NEXT: shll $5, %eax
; BMI2-NEXT: retl
;
; BMI264-LABEL: shl32pi:
-; BMI264: # BB#0:
+; BMI264: # %bb.0:
; BMI264-NEXT: movl (%rdi), %eax
; BMI264-NEXT: shll $5, %eax
; BMI264-NEXT: retq
@@ -70,7 +70,7 @@ define i32 @shl32pi(i32* %p) nounwind uwtable readnone {
define i64 @shl64(i64 %x, i64 %shamt) nounwind uwtable readnone {
; BMI264-LABEL: shl64:
-; BMI264: # BB#0:
+; BMI264: # %bb.0:
; BMI264-NEXT: shlxq %rsi, %rdi, %rax
; BMI264-NEXT: retq
%shl = shl i64 %x, %shamt
@@ -79,7 +79,7 @@ define i64 @shl64(i64 %x, i64 %shamt) nounwind uwtable readnone {
define i64 @shl64i(i64 %x) nounwind uwtable readnone {
; BMI264-LABEL: shl64i:
-; BMI264: # BB#0:
+; BMI264: # %bb.0:
; BMI264-NEXT: shlq $7, %rdi
; BMI264-NEXT: movq %rdi, %rax
; BMI264-NEXT: retq
@@ -89,7 +89,7 @@ define i64 @shl64i(i64 %x) nounwind uwtable readnone {
define i64 @shl64p(i64* %p, i64 %shamt) nounwind uwtable readnone {
; BMI264-LABEL: shl64p:
-; BMI264: # BB#0:
+; BMI264: # %bb.0:
; BMI264-NEXT: shlxq %rsi, (%rdi), %rax
; BMI264-NEXT: retq
%x = load i64, i64* %p
@@ -99,7 +99,7 @@ define i64 @shl64p(i64* %p, i64 %shamt) nounwind uwtable readnone {
define i64 @shl64pi(i64* %p) nounwind uwtable readnone {
; BMI264-LABEL: shl64pi:
-; BMI264: # BB#0:
+; BMI264: # %bb.0:
; BMI264-NEXT: movq (%rdi), %rax
; BMI264-NEXT: shlq $7, %rax
; BMI264-NEXT: retq
@@ -110,13 +110,13 @@ define i64 @shl64pi(i64* %p) nounwind uwtable readnone {
define i32 @lshr32(i32 %x, i32 %shamt) nounwind uwtable readnone {
; BMI2-LABEL: lshr32:
-; BMI2: # BB#0:
+; BMI2: # %bb.0:
; BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
; BMI2-NEXT: shrxl %eax, {{[0-9]+}}(%esp), %eax
; BMI2-NEXT: retl
;
; BMI264-LABEL: lshr32:
-; BMI264: # BB#0:
+; BMI264: # %bb.0:
; BMI264-NEXT: shrxl %esi, %edi, %eax
; BMI264-NEXT: retq
%shl = lshr i32 %x, %shamt
@@ -125,14 +125,14 @@ define i32 @lshr32(i32 %x, i32 %shamt) nounwind uwtable readnone {
define i32 @lshr32p(i32* %p, i32 %shamt) nounwind uwtable readnone {
; BMI2-LABEL: lshr32p:
-; BMI2: # BB#0:
+; BMI2: # %bb.0:
; BMI2-NEXT: movl {{[0-9]+}}(%esp), %eax
; BMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
; BMI2-NEXT: shrxl %ecx, (%eax), %eax
; BMI2-NEXT: retl
;
; BMI264-LABEL: lshr32p:
-; BMI264: # BB#0:
+; BMI264: # %bb.0:
; BMI264-NEXT: shrxl %esi, (%rdi), %eax
; BMI264-NEXT: retq
%x = load i32, i32* %p
@@ -142,7 +142,7 @@ define i32 @lshr32p(i32* %p, i32 %shamt) nounwind uwtable readnone {
define i64 @lshr64(i64 %x, i64 %shamt) nounwind uwtable readnone {
; BMI264-LABEL: lshr64:
-; BMI264: # BB#0:
+; BMI264: # %bb.0:
; BMI264-NEXT: shrxq %rsi, %rdi, %rax
; BMI264-NEXT: retq
%shl = lshr i64 %x, %shamt
@@ -151,7 +151,7 @@ define i64 @lshr64(i64 %x, i64 %shamt) nounwind uwtable readnone {
define i64 @lshr64p(i64* %p, i64 %shamt) nounwind uwtable readnone {
; BMI264-LABEL: lshr64p:
-; BMI264: # BB#0:
+; BMI264: # %bb.0:
; BMI264-NEXT: shrxq %rsi, (%rdi), %rax
; BMI264-NEXT: retq
%x = load i64, i64* %p
@@ -161,13 +161,13 @@ define i64 @lshr64p(i64* %p, i64 %shamt) nounwind uwtable readnone {
define i32 @ashr32(i32 %x, i32 %shamt) nounwind uwtable readnone {
; BMI2-LABEL: ashr32:
-; BMI2: # BB#0:
+; BMI2: # %bb.0:
; BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
; BMI2-NEXT: sarxl %eax, {{[0-9]+}}(%esp), %eax
; BMI2-NEXT: retl
;
; BMI264-LABEL: ashr32:
-; BMI264: # BB#0:
+; BMI264: # %bb.0:
; BMI264-NEXT: sarxl %esi, %edi, %eax
; BMI264-NEXT: retq
%shl = ashr i32 %x, %shamt
@@ -176,14 +176,14 @@ define i32 @ashr32(i32 %x, i32 %shamt) nounwind uwtable readnone {
define i32 @ashr32p(i32* %p, i32 %shamt) nounwind uwtable readnone {
; BMI2-LABEL: ashr32p:
-; BMI2: # BB#0:
+; BMI2: # %bb.0:
; BMI2-NEXT: movl {{[0-9]+}}(%esp), %eax
; BMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
; BMI2-NEXT: sarxl %ecx, (%eax), %eax
; BMI2-NEXT: retl
;
; BMI264-LABEL: ashr32p:
-; BMI264: # BB#0:
+; BMI264: # %bb.0:
; BMI264-NEXT: sarxl %esi, (%rdi), %eax
; BMI264-NEXT: retq
%x = load i32, i32* %p
@@ -193,7 +193,7 @@ define i32 @ashr32p(i32* %p, i32 %shamt) nounwind uwtable readnone {
define i64 @ashr64(i64 %x, i64 %shamt) nounwind uwtable readnone {
; BMI264-LABEL: ashr64:
-; BMI264: # BB#0:
+; BMI264: # %bb.0:
; BMI264-NEXT: sarxq %rsi, %rdi, %rax
; BMI264-NEXT: retq
%shl = ashr i64 %x, %shamt
@@ -202,7 +202,7 @@ define i64 @ashr64(i64 %x, i64 %shamt) nounwind uwtable readnone {
define i64 @ashr64p(i64* %p, i64 %shamt) nounwind uwtable readnone {
; BMI264-LABEL: ashr64p:
-; BMI264: # BB#0:
+; BMI264: # %bb.0:
; BMI264-NEXT: sarxq %rsi, (%rdi), %rax
; BMI264-NEXT: retq
%x = load i64, i64* %p
@@ -212,13 +212,13 @@ define i64 @ashr64p(i64* %p, i64 %shamt) nounwind uwtable readnone {
define i32 @shl32and(i32 %t, i32 %val) nounwind {
; BMI2-LABEL: shl32and:
-; BMI2: # BB#0:
+; BMI2: # %bb.0:
; BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
; BMI2-NEXT: shlxl %eax, {{[0-9]+}}(%esp), %eax
; BMI2-NEXT: retl
;
; BMI264-LABEL: shl32and:
-; BMI264: # BB#0:
+; BMI264: # %bb.0:
; BMI264-NEXT: shlxl %edi, %esi, %eax
; BMI264-NEXT: retq
%shamt = and i32 %t, 31
@@ -228,7 +228,7 @@ define i32 @shl32and(i32 %t, i32 %val) nounwind {
define i64 @shl64and(i64 %t, i64 %val) nounwind {
; BMI264-LABEL: shl64and:
-; BMI264: # BB#0:
+; BMI264: # %bb.0:
; BMI264-NEXT: shlxq %rdi, %rsi, %rax
; BMI264-NEXT: retq
%shamt = and i64 %t, 63
@@ -238,13 +238,13 @@ define i64 @shl64and(i64 %t, i64 %val) nounwind {
define i32 @lshr32and(i32 %t, i32 %val) nounwind {
; BMI2-LABEL: lshr32and:
-; BMI2: # BB#0:
+; BMI2: # %bb.0:
; BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
; BMI2-NEXT: shrxl %eax, {{[0-9]+}}(%esp), %eax
; BMI2-NEXT: retl
;
; BMI264-LABEL: lshr32and:
-; BMI264: # BB#0:
+; BMI264: # %bb.0:
; BMI264-NEXT: shrxl %edi, %esi, %eax
; BMI264-NEXT: retq
%shamt = and i32 %t, 31
@@ -254,7 +254,7 @@ define i32 @lshr32and(i32 %t, i32 %val) nounwind {
define i64 @lshr64and(i64 %t, i64 %val) nounwind {
; BMI264-LABEL: lshr64and:
-; BMI264: # BB#0:
+; BMI264: # %bb.0:
; BMI264-NEXT: shrxq %rdi, %rsi, %rax
; BMI264-NEXT: retq
%shamt = and i64 %t, 63
@@ -264,13 +264,13 @@ define i64 @lshr64and(i64 %t, i64 %val) nounwind {
define i32 @ashr32and(i32 %t, i32 %val) nounwind {
; BMI2-LABEL: ashr32and:
-; BMI2: # BB#0:
+; BMI2: # %bb.0:
; BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
; BMI2-NEXT: sarxl %eax, {{[0-9]+}}(%esp), %eax
; BMI2-NEXT: retl
;
; BMI264-LABEL: ashr32and:
-; BMI264: # BB#0:
+; BMI264: # %bb.0:
; BMI264-NEXT: sarxl %edi, %esi, %eax
; BMI264-NEXT: retq
%shamt = and i32 %t, 31
@@ -280,7 +280,7 @@ define i32 @ashr32and(i32 %t, i32 %val) nounwind {
define i64 @ashr64and(i64 %t, i64 %val) nounwind {
; BMI264-LABEL: ashr64and:
-; BMI264: # BB#0:
+; BMI264: # %bb.0:
; BMI264-NEXT: sarxq %rdi, %rsi, %rax
; BMI264-NEXT: retq
%shamt = and i64 %t, 63
diff --git a/test/CodeGen/X86/shift-codegen.ll b/test/CodeGen/X86/shift-codegen.ll
index 295a55d86a0..838ec789db5 100644
--- a/test/CodeGen/X86/shift-codegen.ll
+++ b/test/CodeGen/X86/shift-codegen.ll
@@ -9,7 +9,7 @@ target triple = "i686-apple-darwin8"
define void @fn1() {
; CHECK-LABEL: fn1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl Y, %eax
; CHECK-NEXT: shll $3, %eax
; CHECK-NEXT: orl %eax, X
@@ -24,7 +24,7 @@ define void @fn1() {
define i32 @fn2(i32 %X, i32 %Y) {
; CHECK-LABEL: fn2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: shll $3, %eax
; CHECK-NEXT: orl {{[0-9]+}}(%esp), %eax
diff --git a/test/CodeGen/X86/shift-combine.ll b/test/CodeGen/X86/shift-combine.ll
index 7b8a1fc2dff..772dc583d81 100644
--- a/test/CodeGen/X86/shift-combine.ll
+++ b/test/CodeGen/X86/shift-combine.ll
@@ -6,14 +6,14 @@
define i32 @test_lshr_and(i32 %x) {
; X32-LABEL: test_lshr_and:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: andl $12, %eax
; X32-NEXT: movl array(%eax), %eax
; X32-NEXT: retl
;
; X64-LABEL: test_lshr_and:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: shrl $2, %edi
; X64-NEXT: andl $3, %edi
@@ -28,7 +28,7 @@ define i32 @test_lshr_and(i32 %x) {
define i32* @test_exact1(i32 %a, i32 %b, i32* %x) {
; X32-LABEL: test_exact1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: subl {{[0-9]+}}(%esp), %eax
; X32-NEXT: sarl %eax
@@ -36,7 +36,7 @@ define i32* @test_exact1(i32 %a, i32 %b, i32* %x) {
; X32-NEXT: retl
;
; X64-LABEL: test_exact1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: subl %edi, %esi
; X64-NEXT: sarl $3, %esi
; X64-NEXT: movslq %esi, %rax
@@ -50,7 +50,7 @@ define i32* @test_exact1(i32 %a, i32 %b, i32* %x) {
define i32* @test_exact2(i32 %a, i32 %b, i32* %x) {
; X32-LABEL: test_exact2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: subl {{[0-9]+}}(%esp), %eax
; X32-NEXT: sarl %eax
@@ -58,7 +58,7 @@ define i32* @test_exact2(i32 %a, i32 %b, i32* %x) {
; X32-NEXT: retl
;
; X64-LABEL: test_exact2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: subl %edi, %esi
; X64-NEXT: sarl $3, %esi
; X64-NEXT: movslq %esi, %rax
@@ -72,14 +72,14 @@ define i32* @test_exact2(i32 %a, i32 %b, i32* %x) {
define i32* @test_exact3(i32 %a, i32 %b, i32* %x) {
; X32-LABEL: test_exact3:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: subl {{[0-9]+}}(%esp), %eax
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
; X32-NEXT: retl
;
; X64-LABEL: test_exact3:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: subl %edi, %esi
; X64-NEXT: sarl $2, %esi
; X64-NEXT: movslq %esi, %rax
@@ -93,7 +93,7 @@ define i32* @test_exact3(i32 %a, i32 %b, i32* %x) {
define i32* @test_exact4(i32 %a, i32 %b, i32* %x) {
; X32-LABEL: test_exact4:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: subl {{[0-9]+}}(%esp), %eax
; X32-NEXT: shrl %eax
@@ -101,7 +101,7 @@ define i32* @test_exact4(i32 %a, i32 %b, i32* %x) {
; X32-NEXT: retl
;
; X64-LABEL: test_exact4:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; X64-NEXT: subl %edi, %esi
; X64-NEXT: shrl $3, %esi
@@ -115,7 +115,7 @@ define i32* @test_exact4(i32 %a, i32 %b, i32* %x) {
define i32* @test_exact5(i32 %a, i32 %b, i32* %x) {
; X32-LABEL: test_exact5:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: subl {{[0-9]+}}(%esp), %eax
; X32-NEXT: shrl %eax
@@ -123,7 +123,7 @@ define i32* @test_exact5(i32 %a, i32 %b, i32* %x) {
; X32-NEXT: retl
;
; X64-LABEL: test_exact5:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; X64-NEXT: subl %edi, %esi
; X64-NEXT: shrl $3, %esi
@@ -137,14 +137,14 @@ define i32* @test_exact5(i32 %a, i32 %b, i32* %x) {
define i32* @test_exact6(i32 %a, i32 %b, i32* %x) {
; X32-LABEL: test_exact6:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: subl {{[0-9]+}}(%esp), %eax
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
; X32-NEXT: retl
;
; X64-LABEL: test_exact6:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; X64-NEXT: subl %edi, %esi
; X64-NEXT: leaq (%rsi,%rdx), %rax
diff --git a/test/CodeGen/X86/shift-double-x86_64.ll b/test/CodeGen/X86/shift-double-x86_64.ll
index 28f6731e25e..0d5d9498fda 100644
--- a/test/CodeGen/X86/shift-double-x86_64.ll
+++ b/test/CodeGen/X86/shift-double-x86_64.ll
@@ -5,7 +5,7 @@
define i64 @test1(i64 %hi, i64 %lo, i64 %bits) nounwind {
; CHECK-LABEL: test1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: andl $63, %edx
; CHECK-NEXT: movl %edx, %ecx
; CHECK-NEXT: shldq %cl, %rsi, %rdi
@@ -21,7 +21,7 @@ define i64 @test1(i64 %hi, i64 %lo, i64 %bits) nounwind {
define i64 @test2(i64 %hi, i64 %lo, i64 %bits) nounwind {
; CHECK-LABEL: test2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: andl $63, %edx
; CHECK-NEXT: movl %edx, %ecx
; CHECK-NEXT: shrdq %cl, %rdi, %rsi
@@ -37,7 +37,7 @@ define i64 @test2(i64 %hi, i64 %lo, i64 %bits) nounwind {
define i64 @test3(i64 %hi, i64 %lo, i64 %bits) nounwind {
; CHECK-LABEL: test3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %edx, %ecx
; CHECK-NEXT: shldq %cl, %rsi, %rdi
; CHECK-NEXT: movq %rdi, %rax
@@ -51,7 +51,7 @@ define i64 @test3(i64 %hi, i64 %lo, i64 %bits) nounwind {
define i64 @test4(i64 %hi, i64 %lo, i64 %bits) nounwind {
; CHECK-LABEL: test4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %edx, %ecx
; CHECK-NEXT: shrdq %cl, %rdi, %rsi
; CHECK-NEXT: movq %rsi, %rax
@@ -65,7 +65,7 @@ define i64 @test4(i64 %hi, i64 %lo, i64 %bits) nounwind {
define i64 @test5(i64 %hi, i64 %lo, i64 %bits) nounwind {
; CHECK-LABEL: test5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %edx, %ecx
; CHECK-NEXT: shldq %cl, %rsi, %rdi
; CHECK-NEXT: movq %rdi, %rax
@@ -80,7 +80,7 @@ define i64 @test5(i64 %hi, i64 %lo, i64 %bits) nounwind {
define i64 @test6(i64 %hi, i64 %lo, i64 %bits) nounwind {
; CHECK-LABEL: test6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %edx, %ecx
; CHECK-NEXT: shrdq %cl, %rsi, %rdi
; CHECK-NEXT: movq %rdi, %rax
@@ -95,7 +95,7 @@ define i64 @test6(i64 %hi, i64 %lo, i64 %bits) nounwind {
define i64 @test7(i64 %hi, i64 %lo, i64 %bits) nounwind {
; CHECK-LABEL: test7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %edx, %ecx
; CHECK-NEXT: shrdq %cl, %rsi, %rdi
; CHECK-NEXT: movq %rdi, %rax
diff --git a/test/CodeGen/X86/shift-double.ll b/test/CodeGen/X86/shift-double.ll
index 3d0b755d56f..0ca04eff661 100644
--- a/test/CodeGen/X86/shift-double.ll
+++ b/test/CodeGen/X86/shift-double.ll
@@ -6,7 +6,7 @@
define i64 @test1(i64 %X, i8 %C) nounwind {
; X86-LABEL: test1:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl %esi
; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
@@ -16,7 +16,7 @@ define i64 @test1(i64 %X, i8 %C) nounwind {
; X86-NEXT: shldl %cl, %esi, %edx
; X86-NEXT: testb $32, %cl
; X86-NEXT: je .LBB0_2
-; X86-NEXT: # BB#1:
+; X86-NEXT: # %bb.1:
; X86-NEXT: movl %eax, %edx
; X86-NEXT: xorl %eax, %eax
; X86-NEXT: .LBB0_2:
@@ -24,7 +24,7 @@ define i64 @test1(i64 %X, i8 %C) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: test1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %esi, %ecx
; X64-NEXT: shlq %cl, %rdi
; X64-NEXT: movq %rdi, %rax
@@ -36,7 +36,7 @@ define i64 @test1(i64 %X, i8 %C) nounwind {
define i64 @test2(i64 %X, i8 %C) nounwind {
; X86-LABEL: test2:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl %esi
; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -46,7 +46,7 @@ define i64 @test2(i64 %X, i8 %C) nounwind {
; X86-NEXT: shrdl %cl, %esi, %eax
; X86-NEXT: testb $32, %cl
; X86-NEXT: je .LBB1_2
-; X86-NEXT: # BB#1:
+; X86-NEXT: # %bb.1:
; X86-NEXT: sarl $31, %esi
; X86-NEXT: movl %edx, %eax
; X86-NEXT: movl %esi, %edx
@@ -55,7 +55,7 @@ define i64 @test2(i64 %X, i8 %C) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: test2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %esi, %ecx
; X64-NEXT: sarq %cl, %rdi
; X64-NEXT: movq %rdi, %rax
@@ -67,7 +67,7 @@ define i64 @test2(i64 %X, i8 %C) nounwind {
define i64 @test3(i64 %X, i8 %C) nounwind {
; X86-LABEL: test3:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl %esi
; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -77,7 +77,7 @@ define i64 @test3(i64 %X, i8 %C) nounwind {
; X86-NEXT: shrdl %cl, %esi, %eax
; X86-NEXT: testb $32, %cl
; X86-NEXT: je .LBB2_2
-; X86-NEXT: # BB#1:
+; X86-NEXT: # %bb.1:
; X86-NEXT: movl %edx, %eax
; X86-NEXT: xorl %edx, %edx
; X86-NEXT: .LBB2_2:
@@ -85,7 +85,7 @@ define i64 @test3(i64 %X, i8 %C) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: test3:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %esi, %ecx
; X64-NEXT: shrq %cl, %rdi
; X64-NEXT: movq %rdi, %rax
@@ -99,7 +99,7 @@ define i64 @test3(i64 %X, i8 %C) nounwind {
define i32 @test4(i32 %A, i32 %B, i8 %C) nounwind {
; X86-LABEL: test4:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -107,7 +107,7 @@ define i32 @test4(i32 %A, i32 %B, i8 %C) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: test4:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edx, %ecx
; X64-NEXT: shldl %cl, %esi, %edi
; X64-NEXT: movl %edi, %eax
@@ -123,7 +123,7 @@ define i32 @test4(i32 %A, i32 %B, i8 %C) nounwind {
define i16 @test5(i16 %A, i16 %B, i8 %C) nounwind {
; X86-LABEL: test5:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
@@ -131,7 +131,7 @@ define i16 @test5(i16 %A, i16 %B, i8 %C) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: test5:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edx, %ecx
; X64-NEXT: shldw %cl, %si, %di
; X64-NEXT: movl %edi, %eax
@@ -149,7 +149,7 @@ define i16 @test5(i16 %A, i16 %B, i8 %C) nounwind {
define i32 @test6(i32 %A, i32 %B, i8 %C) nounwind {
; X86-LABEL: test6:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -157,7 +157,7 @@ define i32 @test6(i32 %A, i32 %B, i8 %C) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: test6:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edx, %ecx
; X64-NEXT: shrdl %cl, %esi, %edi
; X64-NEXT: movl %edi, %eax
@@ -173,7 +173,7 @@ define i32 @test6(i32 %A, i32 %B, i8 %C) nounwind {
define i16 @test7(i16 %A, i16 %B, i8 %C) nounwind {
; X86-LABEL: test7:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
@@ -181,7 +181,7 @@ define i16 @test7(i16 %A, i16 %B, i8 %C) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: test7:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edx, %ecx
; X64-NEXT: shrdw %cl, %si, %di
; X64-NEXT: movl %edi, %eax
@@ -199,7 +199,7 @@ define i16 @test7(i16 %A, i16 %B, i8 %C) nounwind {
define i64 @test8(i64 %val, i32 %bits) nounwind {
; X86-LABEL: test8:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl %esi
; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
@@ -211,7 +211,7 @@ define i64 @test8(i64 %val, i32 %bits) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: test8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andb $31, %sil
; X64-NEXT: movl %esi, %ecx
; X64-NEXT: shlq %cl, %rdi
@@ -225,7 +225,7 @@ define i64 @test8(i64 %val, i32 %bits) nounwind {
define i64 @test9(i64 %val, i32 %bits) nounwind {
; X86-LABEL: test9:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -234,7 +234,7 @@ define i64 @test9(i64 %val, i32 %bits) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: test9:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andb $31, %sil
; X64-NEXT: movl %esi, %ecx
; X64-NEXT: sarq %cl, %rdi
@@ -248,7 +248,7 @@ define i64 @test9(i64 %val, i32 %bits) nounwind {
define i64 @test10(i64 %val, i32 %bits) nounwind {
; X86-LABEL: test10:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -257,7 +257,7 @@ define i64 @test10(i64 %val, i32 %bits) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: test10:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andb $31, %sil
; X64-NEXT: movl %esi, %ecx
; X64-NEXT: shrq %cl, %rdi
@@ -273,7 +273,7 @@ define i64 @test10(i64 %val, i32 %bits) nounwind {
define i32 @test11(i32 %hi, i32 %lo, i32 %bits) nounwind {
; X86-LABEL: test11:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
@@ -283,7 +283,7 @@ define i32 @test11(i32 %hi, i32 %lo, i32 %bits) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: test11:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andl $31, %edx
; X64-NEXT: movl %edx, %ecx
; X64-NEXT: shldl %cl, %esi, %edi
@@ -299,7 +299,7 @@ define i32 @test11(i32 %hi, i32 %lo, i32 %bits) nounwind {
define i32 @test12(i32 %hi, i32 %lo, i32 %bits) nounwind {
; X86-LABEL: test12:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
@@ -309,7 +309,7 @@ define i32 @test12(i32 %hi, i32 %lo, i32 %bits) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: test12:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andl $31, %edx
; X64-NEXT: movl %edx, %ecx
; X64-NEXT: shrdl %cl, %edi, %esi
@@ -325,7 +325,7 @@ define i32 @test12(i32 %hi, i32 %lo, i32 %bits) nounwind {
define i32 @test13(i32 %hi, i32 %lo, i32 %bits) nounwind {
; X86-LABEL: test13:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -333,7 +333,7 @@ define i32 @test13(i32 %hi, i32 %lo, i32 %bits) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: test13:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edx, %ecx
; X64-NEXT: shldl %cl, %esi, %edi
; X64-NEXT: movl %edi, %eax
@@ -347,7 +347,7 @@ define i32 @test13(i32 %hi, i32 %lo, i32 %bits) nounwind {
define i32 @test14(i32 %hi, i32 %lo, i32 %bits) nounwind {
; X86-LABEL: test14:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -355,7 +355,7 @@ define i32 @test14(i32 %hi, i32 %lo, i32 %bits) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: test14:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edx, %ecx
; X64-NEXT: shrdl %cl, %edi, %esi
; X64-NEXT: movl %esi, %eax
@@ -369,7 +369,7 @@ define i32 @test14(i32 %hi, i32 %lo, i32 %bits) nounwind {
define i32 @test15(i32 %hi, i32 %lo, i32 %bits) nounwind {
; X86-LABEL: test15:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -377,7 +377,7 @@ define i32 @test15(i32 %hi, i32 %lo, i32 %bits) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: test15:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edx, %ecx
; X64-NEXT: shldl %cl, %esi, %edi
; X64-NEXT: movl %edi, %eax
@@ -392,7 +392,7 @@ define i32 @test15(i32 %hi, i32 %lo, i32 %bits) nounwind {
define i32 @test16(i32 %hi, i32 %lo, i32 %bits) nounwind {
; X86-LABEL: test16:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -400,7 +400,7 @@ define i32 @test16(i32 %hi, i32 %lo, i32 %bits) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: test16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edx, %ecx
; X64-NEXT: shrdl %cl, %esi, %edi
; X64-NEXT: movl %edi, %eax
@@ -415,7 +415,7 @@ define i32 @test16(i32 %hi, i32 %lo, i32 %bits) nounwind {
define i32 @test17(i32 %hi, i32 %lo, i32 %bits) nounwind {
; X86-LABEL: test17:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -423,7 +423,7 @@ define i32 @test17(i32 %hi, i32 %lo, i32 %bits) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: test17:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edx, %ecx
; X64-NEXT: shrdl %cl, %esi, %edi
; X64-NEXT: movl %edi, %eax
diff --git a/test/CodeGen/X86/shift-folding.ll b/test/CodeGen/X86/shift-folding.ll
index 76cf4a41a6c..d8cc50cb01d 100644
--- a/test/CodeGen/X86/shift-folding.ll
+++ b/test/CodeGen/X86/shift-folding.ll
@@ -3,7 +3,7 @@
define i32* @test1(i32* %P, i32 %X) {
; CHECK-LABEL: test1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: andl $-4, %eax
; CHECK-NEXT: addl {{[0-9]+}}(%esp), %eax
@@ -16,7 +16,7 @@ define i32* @test1(i32* %P, i32 %X) {
define i32* @test2(i32* %P, i32 %X) {
; CHECK-LABEL: test2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: shll $4, %eax
; CHECK-NEXT: addl {{[0-9]+}}(%esp), %eax
@@ -29,7 +29,7 @@ define i32* @test2(i32* %P, i32 %X) {
define i32* @test3(i32* %P, i32 %X) {
; CHECK-LABEL: test3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: andl $-4, %eax
; CHECK-NEXT: addl {{[0-9]+}}(%esp), %eax
@@ -41,7 +41,7 @@ define i32* @test3(i32* %P, i32 %X) {
define fastcc i32 @test4(i32* %d) {
; CHECK-LABEL: test4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movzbl 3(%ecx), %eax
; CHECK-NEXT: retl
%tmp4 = load i32, i32* %d
@@ -54,7 +54,7 @@ define fastcc i32 @test4(i32* %d) {
define i64 @test5(i16 %i, i32* %arr) {
; CHECK-LABEL: test5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: shrl $11, %eax
diff --git a/test/CodeGen/X86/shift-pcmp.ll b/test/CodeGen/X86/shift-pcmp.ll
index f509da2674b..e3ca10353cd 100644
--- a/test/CodeGen/X86/shift-pcmp.ll
+++ b/test/CodeGen/X86/shift-pcmp.ll
@@ -4,13 +4,13 @@
define <8 x i16> @foo(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: foo:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqw %xmm1, %xmm0
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: foo:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
@@ -23,13 +23,13 @@ define <8 x i16> @foo(<8 x i16> %a, <8 x i16> %b) {
; Don't fail with an assert due to an undef in the buildvector
define <8 x i16> @bar(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: bar:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqw %xmm1, %xmm0
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: bar:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
diff --git a/test/CodeGen/X86/shl-crash-on-legalize.ll b/test/CodeGen/X86/shl-crash-on-legalize.ll
index 2029bae8c46..22735f07b0a 100644
--- a/test/CodeGen/X86/shl-crash-on-legalize.ll
+++ b/test/CodeGen/X86/shl-crash-on-legalize.ll
@@ -11,7 +11,7 @@ target triple = "x86_64-unknown-linux-gnu"
; Function Attrs: norecurse nounwind uwtable
define i32 @_Z3foov() local_unnamed_addr #0 {
; CHECK-LABEL: _Z3foov:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movq %rax, {{.*}}(%rip)
; CHECK-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/shrink-compare.ll b/test/CodeGen/X86/shrink-compare.ll
index 0cecf9c0d1d..251d68f296b 100644
--- a/test/CodeGen/X86/shrink-compare.ll
+++ b/test/CodeGen/X86/shrink-compare.ll
@@ -5,10 +5,10 @@ declare void @bar()
define void @test1(i32* nocapture %X) nounwind minsize {
; CHECK-LABEL: test1:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cmpb $47, (%rdi)
; CHECK-NEXT: je bar # TAILCALL
-; CHECK-NEXT: # BB#1: # %if.end
+; CHECK-NEXT: # %bb.1: # %if.end
; CHECK-NEXT: retq
entry:
%tmp1 = load i32, i32* %X, align 4
@@ -26,10 +26,10 @@ if.end:
define void @test2(i32 %X) nounwind minsize {
; CHECK-LABEL: test2:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cmpb $47, %dil
; CHECK-NEXT: je bar # TAILCALL
-; CHECK-NEXT: # BB#1: # %if.end
+; CHECK-NEXT: # %bb.1: # %if.end
; CHECK-NEXT: retq
entry:
%and = and i32 %X, 255
@@ -46,10 +46,10 @@ if.end:
define void @test3(i32 %X) nounwind minsize {
; CHECK-LABEL: test3:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cmpb $-1, %dil
; CHECK-NEXT: je bar # TAILCALL
-; CHECK-NEXT: # BB#1: # %if.end
+; CHECK-NEXT: # %bb.1: # %if.end
; CHECK-NEXT: retq
entry:
%and = and i32 %X, 255
@@ -67,11 +67,11 @@ if.end:
; PR16083
define i1 @test4(i64 %a, i32 %b) {
; CHECK-LABEL: test4:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movb $1, %al
; CHECK-NEXT: testl %esi, %esi
; CHECK-NEXT: je .LBB3_1
-; CHECK-NEXT: # BB#2: # %lor.end
+; CHECK-NEXT: # %bb.2: # %lor.end
; CHECK-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq
; CHECK-NEXT: .LBB3_1: # %lor.rhs
@@ -97,14 +97,14 @@ lor.end: ; preds = %lor.rhs, %entry
; PR16551
define void @test5(i32 %X) nounwind minsize {
; CHECK-LABEL: test5:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movzbl x+{{.*}}(%rip), %eax
; CHECK-NEXT: shll $16, %eax
; CHECK-NEXT: movzwl x+{{.*}}(%rip), %ecx
; CHECK-NEXT: orl %eax, %ecx
; CHECK-NEXT: cmpl $1, %ecx
; CHECK-NEXT: jne bar # TAILCALL
-; CHECK-NEXT: # BB#1: # %if.end
+; CHECK-NEXT: # %bb.1: # %if.end
; CHECK-NEXT: retq
entry:
%bf.load = load i56, i56* bitcast ({ i8, i8, i8, i8, i8, i8, i8, i8 }* @x to i56*), align 4
@@ -123,11 +123,11 @@ if.end:
define void @test2_1(i32 %X) nounwind minsize {
; CHECK-LABEL: test2_1:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: cmpl $256, %eax # imm = 0x100
; CHECK-NEXT: je bar # TAILCALL
-; CHECK-NEXT: # BB#1: # %if.end
+; CHECK-NEXT: # %bb.1: # %if.end
; CHECK-NEXT: retq
entry:
%and = and i32 %X, 255
@@ -144,10 +144,10 @@ if.end:
define void @test_sext_i8_icmp_1(i8 %x) nounwind minsize {
; CHECK-LABEL: test_sext_i8_icmp_1:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cmpb $1, %dil
; CHECK-NEXT: je bar # TAILCALL
-; CHECK-NEXT: # BB#1: # %if.end
+; CHECK-NEXT: # %bb.1: # %if.end
; CHECK-NEXT: retq
entry:
%sext = sext i8 %x to i32
@@ -164,10 +164,10 @@ if.end:
define void @test_sext_i8_icmp_47(i8 %x) nounwind minsize {
; CHECK-LABEL: test_sext_i8_icmp_47:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cmpb $47, %dil
; CHECK-NEXT: je bar # TAILCALL
-; CHECK-NEXT: # BB#1: # %if.end
+; CHECK-NEXT: # %bb.1: # %if.end
; CHECK-NEXT: retq
entry:
%sext = sext i8 %x to i32
@@ -184,10 +184,10 @@ if.end:
define void @test_sext_i8_icmp_127(i8 %x) nounwind minsize {
; CHECK-LABEL: test_sext_i8_icmp_127:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cmpb $127, %dil
; CHECK-NEXT: je bar # TAILCALL
-; CHECK-NEXT: # BB#1: # %if.end
+; CHECK-NEXT: # %bb.1: # %if.end
; CHECK-NEXT: retq
entry:
%sext = sext i8 %x to i32
@@ -204,10 +204,10 @@ if.end:
define void @test_sext_i8_icmp_neg1(i8 %x) nounwind minsize {
; CHECK-LABEL: test_sext_i8_icmp_neg1:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cmpb $-1, %dil
; CHECK-NEXT: je bar # TAILCALL
-; CHECK-NEXT: # BB#1: # %if.end
+; CHECK-NEXT: # %bb.1: # %if.end
; CHECK-NEXT: retq
entry:
%sext = sext i8 %x to i32
@@ -224,10 +224,10 @@ if.end:
define void @test_sext_i8_icmp_neg2(i8 %x) nounwind minsize {
; CHECK-LABEL: test_sext_i8_icmp_neg2:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cmpb $-2, %dil
; CHECK-NEXT: je bar # TAILCALL
-; CHECK-NEXT: # BB#1: # %if.end
+; CHECK-NEXT: # %bb.1: # %if.end
; CHECK-NEXT: retq
entry:
%sext = sext i8 %x to i32
@@ -244,10 +244,10 @@ if.end:
define void @test_sext_i8_icmp_neg127(i8 %x) nounwind minsize {
; CHECK-LABEL: test_sext_i8_icmp_neg127:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cmpb $-127, %dil
; CHECK-NEXT: je bar # TAILCALL
-; CHECK-NEXT: # BB#1: # %if.end
+; CHECK-NEXT: # %bb.1: # %if.end
; CHECK-NEXT: retq
entry:
%sext = sext i8 %x to i32
@@ -264,10 +264,10 @@ if.end:
define void @test_sext_i8_icmp_neg128(i8 %x) nounwind minsize {
; CHECK-LABEL: test_sext_i8_icmp_neg128:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cmpb $-128, %dil
; CHECK-NEXT: je bar # TAILCALL
-; CHECK-NEXT: # BB#1: # %if.end
+; CHECK-NEXT: # %bb.1: # %if.end
; CHECK-NEXT: retq
entry:
%sext = sext i8 %x to i32
@@ -284,11 +284,11 @@ if.end:
define void @test_sext_i8_icmp_255(i8 %x) nounwind minsize {
; CHECK-LABEL: test_sext_i8_icmp_255:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movb $1, %al
; CHECK-NEXT: testb %al, %al
; CHECK-NEXT: je bar # TAILCALL
-; CHECK-NEXT: # BB#1: # %if.end
+; CHECK-NEXT: # %bb.1: # %if.end
; CHECK-NEXT: retq
entry:
%sext = sext i8 %x to i32
diff --git a/test/CodeGen/X86/shrink_vmul.ll b/test/CodeGen/X86/shrink_vmul.ll
index 79cf0f2c8f1..5700b1df15b 100644
--- a/test/CodeGen/X86/shrink_vmul.ll
+++ b/test/CodeGen/X86/shrink_vmul.ll
@@ -12,7 +12,7 @@
;
define void @mul_2xi8(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 %index) {
; X86-LABEL: mul_2xi8:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %esi
; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: .cfi_offset %esi, -8
@@ -34,7 +34,7 @@ define void @mul_2xi8(i8* nocapture readonly %a, i8* nocapture readonly %b, i64
; X86-NEXT: retl
;
; X64-LABEL: mul_2xi8:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq {{.*}}(%rip), %rax
; X64-NEXT: movzwl (%rdi,%rdx), %ecx
; X64-NEXT: movd %ecx, %xmm0
@@ -72,7 +72,7 @@ entry:
;
define void @mul_4xi8(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 %index) {
; X86-LABEL: mul_4xi8:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %esi
; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: .cfi_offset %esi, -8
@@ -92,7 +92,7 @@ define void @mul_4xi8(i8* nocapture readonly %a, i8* nocapture readonly %b, i64
; X86-NEXT: retl
;
; X64-LABEL: mul_4xi8:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq {{.*}}(%rip), %rax
; X64-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -128,7 +128,7 @@ entry:
;
define void @mul_8xi8(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 %index) {
; X86-LABEL: mul_8xi8:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %esi
; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: .cfi_offset %esi, -8
@@ -151,7 +151,7 @@ define void @mul_8xi8(i8* nocapture readonly %a, i8* nocapture readonly %b, i64
; X86-NEXT: retl
;
; X64-LABEL: mul_8xi8:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq {{.*}}(%rip), %rax
; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; X64-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
@@ -190,7 +190,7 @@ entry:
;
define void @mul_16xi8(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 %index) {
; X86-LABEL: mul_16xi8:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %esi
; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: .cfi_offset %esi, -8
@@ -223,7 +223,7 @@ define void @mul_16xi8(i8* nocapture readonly %a, i8* nocapture readonly %b, i64
; X86-NEXT: retl
;
; X64-LABEL: mul_16xi8:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq {{.*}}(%rip), %rax
; X64-NEXT: movdqu (%rdi,%rdx), %xmm0
; X64-NEXT: movdqu (%rsi,%rdx), %xmm1
@@ -272,7 +272,7 @@ entry:
;
define void @mul_2xi16(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 %index) {
; X86-LABEL: mul_2xi16:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %esi
; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: .cfi_offset %esi, -8
@@ -291,7 +291,7 @@ define void @mul_2xi16(i8* nocapture readonly %a, i8* nocapture readonly %b, i64
; X86-NEXT: retl
;
; X64-LABEL: mul_2xi16:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq {{.*}}(%rip), %rax
; X64-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -326,7 +326,7 @@ entry:
;
define void @mul_4xi16(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 %index) {
; X86-LABEL: mul_4xi16:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %esi
; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: .cfi_offset %esi, -8
@@ -345,7 +345,7 @@ define void @mul_4xi16(i8* nocapture readonly %a, i8* nocapture readonly %b, i64
; X86-NEXT: retl
;
; X64-LABEL: mul_4xi16:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq {{.*}}(%rip), %rax
; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; X64-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
@@ -380,7 +380,7 @@ entry:
;
define void @mul_8xi16(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 %index) {
; X86-LABEL: mul_8xi16:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %esi
; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: .cfi_offset %esi, -8
@@ -402,7 +402,7 @@ define void @mul_8xi16(i8* nocapture readonly %a, i8* nocapture readonly %b, i64
; X86-NEXT: retl
;
; X64-LABEL: mul_8xi16:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq {{.*}}(%rip), %rax
; X64-NEXT: movdqu (%rdi,%rdx), %xmm0
; X64-NEXT: movdqu (%rsi,%rdx), %xmm1
@@ -440,7 +440,7 @@ entry:
;
define void @mul_16xi16(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 %index) {
; X86-LABEL: mul_16xi16:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %esi
; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: .cfi_offset %esi, -8
@@ -472,7 +472,7 @@ define void @mul_16xi16(i8* nocapture readonly %a, i8* nocapture readonly %b, i6
; X86-NEXT: retl
;
; X64-LABEL: mul_16xi16:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq {{.*}}(%rip), %rax
; X64-NEXT: movdqu (%rdi,%rdx), %xmm0
; X64-NEXT: movdqu 16(%rdi,%rdx), %xmm1
@@ -520,7 +520,7 @@ entry:
;
define void @mul_2xi8_sext(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 %index) {
; X86-LABEL: mul_2xi8_sext:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %esi
; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: .cfi_offset %esi, -8
@@ -544,7 +544,7 @@ define void @mul_2xi8_sext(i8* nocapture readonly %a, i8* nocapture readonly %b,
; X86-NEXT: retl
;
; X64-LABEL: mul_2xi8_sext:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq {{.*}}(%rip), %rax
; X64-NEXT: movzwl (%rdi,%rdx), %ecx
; X64-NEXT: movd %ecx, %xmm0
@@ -584,7 +584,7 @@ entry:
;
define void @mul_2xi8_sext_zext(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 %index) {
; X86-LABEL: mul_2xi8_sext_zext:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %esi
; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: .cfi_offset %esi, -8
@@ -609,7 +609,7 @@ define void @mul_2xi8_sext_zext(i8* nocapture readonly %a, i8* nocapture readonl
; X86-NEXT: retl
;
; X64-LABEL: mul_2xi8_sext_zext:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq {{.*}}(%rip), %rax
; X64-NEXT: movzwl (%rdi,%rdx), %ecx
; X64-NEXT: movd %ecx, %xmm0
@@ -650,7 +650,7 @@ entry:
;
define void @mul_2xi16_sext(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 %index) {
; X86-LABEL: mul_2xi16_sext:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %esi
; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: .cfi_offset %esi, -8
@@ -669,7 +669,7 @@ define void @mul_2xi16_sext(i8* nocapture readonly %a, i8* nocapture readonly %b
; X86-NEXT: retl
;
; X64-LABEL: mul_2xi16_sext:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq {{.*}}(%rip), %rax
; X64-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -704,7 +704,7 @@ entry:
;
define void @mul_2xi16_sext_zext(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 %index) {
; X86-LABEL: mul_2xi16_sext_zext:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %esi
; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: .cfi_offset %esi, -8
@@ -736,7 +736,7 @@ define void @mul_2xi16_sext_zext(i8* nocapture readonly %a, i8* nocapture readon
; X86-NEXT: retl
;
; X64-LABEL: mul_2xi16_sext_zext:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq {{.*}}(%rip), %rax
; X64-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,1,4,5,6,7]
@@ -784,7 +784,7 @@ entry:
;
define void @mul_16xi16_sext(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 %index) {
; X86-LABEL: mul_16xi16_sext:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %esi
; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: .cfi_offset %esi, -8
@@ -816,7 +816,7 @@ define void @mul_16xi16_sext(i8* nocapture readonly %a, i8* nocapture readonly %
; X86-NEXT: retl
;
; X64-LABEL: mul_16xi16_sext:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq {{.*}}(%rip), %rax
; X64-NEXT: movdqu (%rdi,%rdx), %xmm0
; X64-NEXT: movdqu 16(%rdi,%rdx), %xmm1
@@ -863,7 +863,7 @@ entry:
;
define void @mul_2xi8_varconst1(i8* nocapture readonly %a, i64 %index) {
; X86-LABEL: mul_2xi8_varconst1:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl c, %edx
@@ -877,7 +877,7 @@ define void @mul_2xi8_varconst1(i8* nocapture readonly %a, i64 %index) {
; X86-NEXT: retl
;
; X64-LABEL: mul_2xi8_varconst1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq {{.*}}(%rip), %rax
; X64-NEXT: movzwl (%rdi,%rsi), %ecx
; X64-NEXT: movd %ecx, %xmm0
@@ -907,7 +907,7 @@ entry:
;
define void @mul_2xi8_varconst2(i8* nocapture readonly %a, i64 %index) {
; X86-LABEL: mul_2xi8_varconst2:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl c, %edx
@@ -922,7 +922,7 @@ define void @mul_2xi8_varconst2(i8* nocapture readonly %a, i64 %index) {
; X86-NEXT: retl
;
; X64-LABEL: mul_2xi8_varconst2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq {{.*}}(%rip), %rax
; X64-NEXT: movzwl (%rdi,%rsi), %ecx
; X64-NEXT: movd %ecx, %xmm0
@@ -953,7 +953,7 @@ entry:
;
define void @mul_2xi8_varconst3(i8* nocapture readonly %a, i64 %index) {
; X86-LABEL: mul_2xi8_varconst3:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl c, %edx
@@ -970,7 +970,7 @@ define void @mul_2xi8_varconst3(i8* nocapture readonly %a, i64 %index) {
; X86-NEXT: retl
;
; X64-LABEL: mul_2xi8_varconst3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq {{.*}}(%rip), %rax
; X64-NEXT: movzwl (%rdi,%rsi), %ecx
; X64-NEXT: movd %ecx, %xmm0
@@ -1003,7 +1003,7 @@ entry:
;
define void @mul_2xi8_varconst4(i8* nocapture readonly %a, i64 %index) {
; X86-LABEL: mul_2xi8_varconst4:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl c, %edx
@@ -1020,7 +1020,7 @@ define void @mul_2xi8_varconst4(i8* nocapture readonly %a, i64 %index) {
; X86-NEXT: retl
;
; X64-LABEL: mul_2xi8_varconst4:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq {{.*}}(%rip), %rax
; X64-NEXT: movzwl (%rdi,%rsi), %ecx
; X64-NEXT: movd %ecx, %xmm0
@@ -1053,7 +1053,7 @@ entry:
;
define void @mul_2xi8_varconst5(i8* nocapture readonly %a, i64 %index) {
; X86-LABEL: mul_2xi8_varconst5:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl c, %edx
@@ -1070,7 +1070,7 @@ define void @mul_2xi8_varconst5(i8* nocapture readonly %a, i64 %index) {
; X86-NEXT: retl
;
; X64-LABEL: mul_2xi8_varconst5:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq {{.*}}(%rip), %rax
; X64-NEXT: movzwl (%rdi,%rsi), %ecx
; X64-NEXT: movd %ecx, %xmm0
@@ -1103,7 +1103,7 @@ entry:
;
define void @mul_2xi8_varconst6(i8* nocapture readonly %a, i64 %index) {
; X86-LABEL: mul_2xi8_varconst6:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl c, %edx
@@ -1120,7 +1120,7 @@ define void @mul_2xi8_varconst6(i8* nocapture readonly %a, i64 %index) {
; X86-NEXT: retl
;
; X64-LABEL: mul_2xi8_varconst6:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq {{.*}}(%rip), %rax
; X64-NEXT: movzwl (%rdi,%rsi), %ecx
; X64-NEXT: movd %ecx, %xmm0
@@ -1153,7 +1153,7 @@ entry:
;
define void @mul_2xi16_varconst1(i8* nocapture readonly %a, i64 %index) {
; X86-LABEL: mul_2xi16_varconst1:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl c, %edx
@@ -1167,7 +1167,7 @@ define void @mul_2xi16_varconst1(i8* nocapture readonly %a, i64 %index) {
; X86-NEXT: retl
;
; X64-LABEL: mul_2xi16_varconst1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq {{.*}}(%rip), %rax
; X64-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-NEXT: movdqa {{.*#+}} xmm1 = <0,65535,u,u,u,u,u,u>
@@ -1197,7 +1197,7 @@ entry:
;
define void @mul_2xi16_varconst2(i8* nocapture readonly %a, i64 %index) {
; X86-LABEL: mul_2xi16_varconst2:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl c, %edx
@@ -1211,7 +1211,7 @@ define void @mul_2xi16_varconst2(i8* nocapture readonly %a, i64 %index) {
; X86-NEXT: retl
;
; X64-LABEL: mul_2xi16_varconst2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq {{.*}}(%rip), %rax
; X64-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-NEXT: movdqa {{.*#+}} xmm1 = <32768,32767,u,u,u,u,u,u>
@@ -1241,7 +1241,7 @@ entry:
;
define void @mul_2xi16_varconst3(i8* nocapture readonly %a, i64 %index) {
; X86-LABEL: mul_2xi16_varconst3:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl c, %edx
@@ -1261,7 +1261,7 @@ define void @mul_2xi16_varconst3(i8* nocapture readonly %a, i64 %index) {
; X86-NEXT: retl
;
; X64-LABEL: mul_2xi16_varconst3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq {{.*}}(%rip), %rax
; X64-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-NEXT: pxor %xmm1, %xmm1
@@ -1299,7 +1299,7 @@ entry:
;
define void @mul_2xi16_varconst4(i8* nocapture readonly %a, i64 %index) {
; X86-LABEL: mul_2xi16_varconst4:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl c, %edx
@@ -1319,7 +1319,7 @@ define void @mul_2xi16_varconst4(i8* nocapture readonly %a, i64 %index) {
; X86-NEXT: retl
;
; X64-LABEL: mul_2xi16_varconst4:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq {{.*}}(%rip), %rax
; X64-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,1,4,5,6,7]
@@ -1356,7 +1356,7 @@ entry:
define void @PR34947() {
; X86-LABEL: PR34947:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movdqa (%eax), %xmm0
; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
; X86-NEXT: movd %xmm1, %ecx
@@ -1403,7 +1403,7 @@ define void @PR34947() {
; X86-NEXT: retl
;
; X64-LABEL: PR34947:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movdqa (%rax), %xmm0
; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
; X64-NEXT: movd %xmm1, %ecx
diff --git a/test/CodeGen/X86/shrink_vmul_sse.ll b/test/CodeGen/X86/shrink_vmul_sse.ll
index 6701c247e6f..93bb2a4b1cd 100644
--- a/test/CodeGen/X86/shrink_vmul_sse.ll
+++ b/test/CodeGen/X86/shrink_vmul_sse.ll
@@ -9,7 +9,7 @@
define void @mul_2xi8(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 %index) nounwind {
; CHECK-LABEL: mul_2xi8:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushl %ebx
; CHECK-NEXT: pushl %edi
; CHECK-NEXT: pushl %esi
diff --git a/test/CodeGen/X86/shuffle-combine-crash-2.ll b/test/CodeGen/X86/shuffle-combine-crash-2.ll
index ea37d5b4853..c449ec5d3f1 100644
--- a/test/CodeGen/X86/shuffle-combine-crash-2.ll
+++ b/test/CodeGen/X86/shuffle-combine-crash-2.ll
@@ -4,13 +4,13 @@
define <4 x i64> @fold_movsd_zero() {
; X86-LABEL: fold_movsd_zero:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: xorps %xmm0, %xmm0
; X86-NEXT: xorps %xmm1, %xmm1
; X86-NEXT: retl
;
; X64-LABEL: fold_movsd_zero:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorps %xmm0, %xmm0
; X64-NEXT: xorps %xmm1, %xmm1
; X64-NEXT: retq
diff --git a/test/CodeGen/X86/shuffle-of-insert.ll b/test/CodeGen/X86/shuffle-of-insert.ll
index 251b4821d9c..f663f954744 100644
--- a/test/CodeGen/X86/shuffle-of-insert.ll
+++ b/test/CodeGen/X86/shuffle-of-insert.ll
@@ -5,20 +5,20 @@
define <4 x i32> @ins_elt_0(i32 %x, <4 x i32> %v1, <4 x i32> %v2) {
; SSE2-LABEL: ins_elt_0:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movd %edi, %xmm0
; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE4-LABEL: ins_elt_0:
-; SSE4: # BB#0:
+; SSE4: # %bb.0:
; SSE4-NEXT: pinsrd $0, %edi, %xmm0
; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
; SSE4-NEXT: retq
;
; AVX-LABEL: ins_elt_0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpinsrd $0, %edi, %xmm0, %xmm0
; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
; AVX-NEXT: retq
@@ -29,7 +29,7 @@ define <4 x i32> @ins_elt_0(i32 %x, <4 x i32> %v1, <4 x i32> %v2) {
define <4 x i32> @ins_elt_1(i32 %x, <4 x i32> %v1, <4 x i32> %v2) {
; SSE2-LABEL: ins_elt_1:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movd %edi, %xmm2
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm0[0,0]
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[0,0]
@@ -38,13 +38,13 @@ define <4 x i32> @ins_elt_1(i32 %x, <4 x i32> %v1, <4 x i32> %v2) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: ins_elt_1:
-; SSE4: # BB#0:
+; SSE4: # %bb.0:
; SSE4-NEXT: pinsrd $1, %edi, %xmm0
; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
; SSE4-NEXT: retq
;
; AVX-LABEL: ins_elt_1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpinsrd $1, %edi, %xmm0, %xmm0
; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
; AVX-NEXT: retq
@@ -57,7 +57,7 @@ define <4 x i32> @ins_elt_1(i32 %x, <4 x i32> %v1, <4 x i32> %v2) {
define <4 x i32> @ins_elt_2_commute(i32 %x, <4 x i32> %v1, <4 x i32> %v2) {
; SSE2-LABEL: ins_elt_2_commute:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movd %edi, %xmm2
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm0[3,0]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0,2]
@@ -67,13 +67,13 @@ define <4 x i32> @ins_elt_2_commute(i32 %x, <4 x i32> %v1, <4 x i32> %v2) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: ins_elt_2_commute:
-; SSE4: # BB#0:
+; SSE4: # %bb.0:
; SSE4-NEXT: pinsrd $2, %edi, %xmm0
; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5],xmm1[6,7]
; SSE4-NEXT: retq
;
; AVX-LABEL: ins_elt_2_commute:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpinsrd $2, %edi, %xmm0, %xmm0
; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5],xmm1[6,7]
; AVX-NEXT: retq
@@ -84,7 +84,7 @@ define <4 x i32> @ins_elt_2_commute(i32 %x, <4 x i32> %v1, <4 x i32> %v2) {
define <4 x i32> @ins_elt_3_commute(i32 %x, <4 x i32> %v1, <4 x i32> %v2) {
; SSE2-LABEL: ins_elt_3_commute:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movd %edi, %xmm2
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm0[2,0]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,0]
@@ -94,13 +94,13 @@ define <4 x i32> @ins_elt_3_commute(i32 %x, <4 x i32> %v1, <4 x i32> %v2) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: ins_elt_3_commute:
-; SSE4: # BB#0:
+; SSE4: # %bb.0:
; SSE4-NEXT: pinsrd $3, %edi, %xmm0
; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
; SSE4-NEXT: retq
;
; AVX-LABEL: ins_elt_3_commute:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpinsrd $3, %edi, %xmm0, %xmm0
; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
; AVX-NEXT: retq
@@ -113,7 +113,7 @@ define <4 x i32> @ins_elt_3_commute(i32 %x, <4 x i32> %v1, <4 x i32> %v2) {
define <4 x i32> @ins_elt_0_to_2(i32 %x, <4 x i32> %v1, <4 x i32> %v2) {
; SSE2-LABEL: ins_elt_0_to_2:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movd %edi, %xmm0
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[3,0]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[0,2]
@@ -121,14 +121,14 @@ define <4 x i32> @ins_elt_0_to_2(i32 %x, <4 x i32> %v1, <4 x i32> %v2) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: ins_elt_0_to_2:
-; SSE4: # BB#0:
+; SSE4: # %bb.0:
; SSE4-NEXT: pinsrd $0, %edi, %xmm0
; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5],xmm1[6,7]
; SSE4-NEXT: retq
;
; AVX-LABEL: ins_elt_0_to_2:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpinsrd $0, %edi, %xmm0, %xmm0
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5],xmm1[6,7]
@@ -140,21 +140,21 @@ define <4 x i32> @ins_elt_0_to_2(i32 %x, <4 x i32> %v1, <4 x i32> %v2) {
define <4 x i32> @ins_elt_1_to_0(i32 %x, <4 x i32> %v1, <4 x i32> %v2) {
; SSE2-LABEL: ins_elt_1_to_0:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movd %edi, %xmm0
; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE4-LABEL: ins_elt_1_to_0:
-; SSE4: # BB#0:
+; SSE4: # %bb.0:
; SSE4-NEXT: pinsrd $1, %edi, %xmm0
; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
; SSE4-NEXT: retq
;
; AVX-LABEL: ins_elt_1_to_0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpinsrd $1, %edi, %xmm0, %xmm0
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
@@ -166,7 +166,7 @@ define <4 x i32> @ins_elt_1_to_0(i32 %x, <4 x i32> %v1, <4 x i32> %v2) {
define <4 x i32> @ins_elt_2_to_3(i32 %x, <4 x i32> %v1, <4 x i32> %v2) {
; SSE2-LABEL: ins_elt_2_to_3:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movd %edi, %xmm2
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm0[3,0]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0,2]
@@ -176,14 +176,14 @@ define <4 x i32> @ins_elt_2_to_3(i32 %x, <4 x i32> %v1, <4 x i32> %v2) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: ins_elt_2_to_3:
-; SSE4: # BB#0:
+; SSE4: # %bb.0:
; SSE4-NEXT: pinsrd $2, %edi, %xmm0
; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,2]
; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
; SSE4-NEXT: retq
;
; AVX-LABEL: ins_elt_2_to_3:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpinsrd $2, %edi, %xmm0, %xmm0
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,2,2]
; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
@@ -195,7 +195,7 @@ define <4 x i32> @ins_elt_2_to_3(i32 %x, <4 x i32> %v1, <4 x i32> %v2) {
define <4 x i32> @ins_elt_3_to_1(i32 %x, <4 x i32> %v1, <4 x i32> %v2) {
; SSE2-LABEL: ins_elt_3_to_1:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movd %edi, %xmm2
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm0[2,0]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,0]
@@ -204,14 +204,14 @@ define <4 x i32> @ins_elt_3_to_1(i32 %x, <4 x i32> %v1, <4 x i32> %v2) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: ins_elt_3_to_1:
-; SSE4: # BB#0:
+; SSE4: # %bb.0:
; SSE4-NEXT: pinsrd $3, %edi, %xmm0
; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
; SSE4-NEXT: retq
;
; AVX-LABEL: ins_elt_3_to_1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpinsrd $3, %edi, %xmm0, %xmm0
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
diff --git a/test/CodeGen/X86/shuffle-of-splat-multiuses.ll b/test/CodeGen/X86/shuffle-of-splat-multiuses.ll
index d16ebd5405f..bbdff971c2f 100644
--- a/test/CodeGen/X86/shuffle-of-splat-multiuses.ll
+++ b/test/CodeGen/X86/shuffle-of-splat-multiuses.ll
@@ -4,7 +4,7 @@
define <2 x double> @foo2(<2 x double> %v, <2 x double> *%p) nounwind {
; AVX2-LABEL: foo2:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,1]
; AVX2-NEXT: vmovapd %xmm0, (%rdi)
; AVX2-NEXT: retq
@@ -16,7 +16,7 @@ define <2 x double> @foo2(<2 x double> %v, <2 x double> *%p) nounwind {
define <4 x double> @foo4(<4 x double> %v, <4 x double> *%p) nounwind {
; AVX2-LABEL: foo4:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
; AVX2-NEXT: vmovaps %ymm0, (%rdi)
; AVX2-NEXT: retq
@@ -28,7 +28,7 @@ define <4 x double> @foo4(<4 x double> %v, <4 x double> *%p) nounwind {
define <8 x float> @foo8(<8 x float> %v, <8 x float> *%p) nounwind {
; AVX2-LABEL: foo8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
; AVX2-NEXT: vmovaps %ymm0, (%rdi)
@@ -41,7 +41,7 @@ define <8 x float> @foo8(<8 x float> %v, <8 x float> *%p) nounwind {
define <4 x i32> @undef_splatmask(<4 x i32> %v) nounwind {
; AVX2-LABEL: undef_splatmask:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
; AVX2-NEXT: retq
%res = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 2, i32 undef, i32 2, i32 undef>
@@ -51,7 +51,7 @@ define <4 x i32> @undef_splatmask(<4 x i32> %v) nounwind {
define <4 x i32> @undef_splatmask2(<4 x i32> %v) nounwind {
; AVX2-LABEL: undef_splatmask2:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
; AVX2-NEXT: retq
%res = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 2, i32 1, i32 2, i32 undef>
@@ -61,7 +61,7 @@ define <4 x i32> @undef_splatmask2(<4 x i32> %v) nounwind {
define <4 x i32> @undef_splatmask3(<4 x i32> %v) nounwind {
; AVX2-LABEL: undef_splatmask3:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
; AVX2-NEXT: retq
%res = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 2, i32 undef, i32 2, i32 undef>
@@ -71,7 +71,7 @@ define <4 x i32> @undef_splatmask3(<4 x i32> %v) nounwind {
define <4 x i32> @undef_splatmask4(<4 x i32> %v, <4 x i32>* %p) nounwind {
; AVX2-LABEL: undef_splatmask4:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,2,3,3]
; AVX2-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,2,3]
; AVX2-NEXT: vmovaps %xmm0, (%rdi)
@@ -85,7 +85,7 @@ define <4 x i32> @undef_splatmask4(<4 x i32> %v, <4 x i32>* %p) nounwind {
define <4 x i32> @undef_splatmask5(<4 x i32> %v, <4 x i32>* %p) nounwind {
; AVX2-LABEL: undef_splatmask5:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastd %xmm0, %xmm1
; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0
; AVX2-NEXT: vmovdqa %xmm0, (%rdi)
diff --git a/test/CodeGen/X86/shuffle-strided-with-offset-128.ll b/test/CodeGen/X86/shuffle-strided-with-offset-128.ll
index 0641e9df6e6..0f1f818e250 100644
--- a/test/CodeGen/X86/shuffle-strided-with-offset-128.ll
+++ b/test/CodeGen/X86/shuffle-strided-with-offset-128.ll
@@ -10,7 +10,7 @@
define void @shuffle_v16i8_to_v8i8_1(<16 x i8>* %L, <8 x i8>* %S) nounwind {
; SSE2-LABEL: shuffle_v16i8_to_v8i8_1:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rdi), %xmm0
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: movdqa %xmm0, %xmm2
@@ -30,42 +30,42 @@ define void @shuffle_v16i8_to_v8i8_1(<16 x i8>* %L, <8 x i8>* %S) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: shuffle_v16i8_to_v8i8_1:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa (%rdi), %xmm0
; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u]
; SSE42-NEXT: movq %xmm0, (%rsi)
; SSE42-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_to_v8i8_1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa (%rdi), %xmm0
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u]
; AVX-NEXT: vmovq %xmm0, (%rsi)
; AVX-NEXT: retq
;
; AVX512F-LABEL: shuffle_v16i8_to_v8i8_1:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u]
; AVX512F-NEXT: vmovq %xmm0, (%rsi)
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i8_to_v8i8_1:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
; AVX512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u]
; AVX512VL-NEXT: vmovq %xmm0, (%rsi)
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v16i8_to_v8i8_1:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u]
; AVX512BW-NEXT: vmovq %xmm0, (%rsi)
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v16i8_to_v8i8_1:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpsrlw $8, (%rdi), %xmm0
; AVX512BWVL-NEXT: vpmovwb %xmm0, (%rsi)
; AVX512BWVL-NEXT: retq
@@ -77,7 +77,7 @@ define void @shuffle_v16i8_to_v8i8_1(<16 x i8>* %L, <8 x i8>* %S) nounwind {
define void @shuffle_v8i16_to_v4i16_1(<8 x i16>* %L, <4 x i16>* %S) nounwind {
; SSE2-LABEL: shuffle_v8i16_to_v4i16_1:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = mem[3,1,2,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,5,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -86,41 +86,41 @@ define void @shuffle_v8i16_to_v4i16_1(<8 x i16>* %L, <4 x i16>* %S) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: shuffle_v8i16_to_v4i16_1:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa (%rdi), %xmm0
; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15]
; SSE42-NEXT: movq %xmm0, (%rsi)
; SSE42-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_to_v4i16_1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa (%rdi), %xmm0
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15]
; AVX-NEXT: vmovq %xmm0, (%rsi)
; AVX-NEXT: retq
;
; AVX512F-LABEL: shuffle_v8i16_to_v4i16_1:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15]
; AVX512F-NEXT: vmovq %xmm0, (%rsi)
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v8i16_to_v4i16_1:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrld $16, (%rdi), %xmm0
; AVX512VL-NEXT: vpmovdw %xmm0, (%rsi)
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v8i16_to_v4i16_1:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15]
; AVX512BW-NEXT: vmovq %xmm0, (%rsi)
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v8i16_to_v4i16_1:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpsrld $16, (%rdi), %xmm0
; AVX512BWVL-NEXT: vpmovdw %xmm0, (%rsi)
; AVX512BWVL-NEXT: retq
@@ -132,37 +132,37 @@ define void @shuffle_v8i16_to_v4i16_1(<8 x i16>* %L, <4 x i16>* %S) nounwind {
define void @shuffle_v4i32_to_v2i32_1(<4 x i32>* %L, <2 x i32>* %S) nounwind {
; SSE-LABEL: shuffle_v4i32_to_v2i32_1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = mem[1,3,2,3]
; SSE-NEXT: movq %xmm0, (%rsi)
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v4i32_to_v2i32_1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = mem[1,3,2,3]
; AVX-NEXT: vmovlps %xmm0, (%rsi)
; AVX-NEXT: retq
;
; AVX512F-LABEL: shuffle_v4i32_to_v2i32_1:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermilps {{.*#+}} xmm0 = mem[1,3,2,3]
; AVX512F-NEXT: vmovlps %xmm0, (%rsi)
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4i32_to_v2i32_1:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = mem[1,1,3,3]
; AVX512VL-NEXT: vpmovqd %xmm0, (%rsi)
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v4i32_to_v2i32_1:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpermilps {{.*#+}} xmm0 = mem[1,3,2,3]
; AVX512BW-NEXT: vmovlps %xmm0, (%rsi)
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v4i32_to_v2i32_1:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm0 = mem[1,1,3,3]
; AVX512BWVL-NEXT: vpmovqd %xmm0, (%rsi)
; AVX512BWVL-NEXT: retq
@@ -174,7 +174,7 @@ define void @shuffle_v4i32_to_v2i32_1(<4 x i32>* %L, <2 x i32>* %S) nounwind {
define void @shuffle_v16i8_to_v4i8_1(<16 x i8>* %L, <4 x i8>* %S) nounwind {
; SSE2-LABEL: shuffle_v16i8_to_v4i8_1:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rdi), %xmm0
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: movdqa %xmm0, %xmm2
@@ -190,42 +190,42 @@ define void @shuffle_v16i8_to_v4i8_1(<16 x i8>* %L, <4 x i8>* %S) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: shuffle_v16i8_to_v4i8_1:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa (%rdi), %xmm0
; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u]
; SSE42-NEXT: movd %xmm0, (%rsi)
; SSE42-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_to_v4i8_1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa (%rdi), %xmm0
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX-NEXT: vmovd %xmm0, (%rsi)
; AVX-NEXT: retq
;
; AVX512F-LABEL: shuffle_v16i8_to_v4i8_1:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512F-NEXT: vmovd %xmm0, (%rsi)
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i8_to_v4i8_1:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
; AVX512VL-NEXT: vpsrlw $8, %xmm0, %xmm0
; AVX512VL-NEXT: vpmovdb %xmm0, (%rsi)
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v16i8_to_v4i8_1:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512BW-NEXT: vmovd %xmm0, (%rsi)
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v16i8_to_v4i8_1:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpsrlw $8, (%rdi), %xmm0
; AVX512BWVL-NEXT: vpmovdb %xmm0, (%rsi)
; AVX512BWVL-NEXT: retq
@@ -237,7 +237,7 @@ define void @shuffle_v16i8_to_v4i8_1(<16 x i8>* %L, <4 x i8>* %S) nounwind {
define void @shuffle_v16i8_to_v4i8_2(<16 x i8>* %L, <4 x i8>* %S) nounwind {
; SSE2-LABEL: shuffle_v16i8_to_v4i8_2:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rdi), %xmm0
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
@@ -249,41 +249,41 @@ define void @shuffle_v16i8_to_v4i8_2(<16 x i8>* %L, <4 x i8>* %S) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: shuffle_v16i8_to_v4i8_2:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa (%rdi), %xmm0
; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u]
; SSE42-NEXT: movd %xmm0, (%rsi)
; SSE42-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_to_v4i8_2:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa (%rdi), %xmm0
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX-NEXT: vmovd %xmm0, (%rsi)
; AVX-NEXT: retq
;
; AVX512F-LABEL: shuffle_v16i8_to_v4i8_2:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512F-NEXT: vmovd %xmm0, (%rsi)
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i8_to_v4i8_2:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrld $16, (%rdi), %xmm0
; AVX512VL-NEXT: vpmovdb %xmm0, (%rsi)
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v16i8_to_v4i8_2:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512BW-NEXT: vmovd %xmm0, (%rsi)
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v16i8_to_v4i8_2:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpsrld $16, (%rdi), %xmm0
; AVX512BWVL-NEXT: vpmovdb %xmm0, (%rsi)
; AVX512BWVL-NEXT: retq
@@ -295,7 +295,7 @@ define void @shuffle_v16i8_to_v4i8_2(<16 x i8>* %L, <4 x i8>* %S) nounwind {
define void @shuffle_v16i8_to_v4i8_3(<16 x i8>* %L, <4 x i8>* %S) nounwind {
; SSE2-LABEL: shuffle_v16i8_to_v4i8_3:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rdi), %xmm0
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: movdqa %xmm0, %xmm2
@@ -311,41 +311,41 @@ define void @shuffle_v16i8_to_v4i8_3(<16 x i8>* %L, <4 x i8>* %S) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: shuffle_v16i8_to_v4i8_3:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa (%rdi), %xmm0
; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u]
; SSE42-NEXT: movd %xmm0, (%rsi)
; SSE42-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_to_v4i8_3:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa (%rdi), %xmm0
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX-NEXT: vmovd %xmm0, (%rsi)
; AVX-NEXT: retq
;
; AVX512F-LABEL: shuffle_v16i8_to_v4i8_3:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512F-NEXT: vmovd %xmm0, (%rsi)
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i8_to_v4i8_3:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrld $24, (%rdi), %xmm0
; AVX512VL-NEXT: vpmovdb %xmm0, (%rsi)
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v16i8_to_v4i8_3:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512BW-NEXT: vmovd %xmm0, (%rsi)
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v16i8_to_v4i8_3:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpsrld $24, (%rdi), %xmm0
; AVX512BWVL-NEXT: vpmovdb %xmm0, (%rsi)
; AVX512BWVL-NEXT: retq
@@ -357,41 +357,41 @@ define void @shuffle_v16i8_to_v4i8_3(<16 x i8>* %L, <4 x i8>* %S) nounwind {
define void @shuffle_v8i16_to_v2i16_1(<8 x i16>* %L, <2 x i16>* %S) nounwind {
; SSE-LABEL: shuffle_v8i16_to_v2i16_1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = mem[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,3,2,3,4,5,6,7]
; SSE-NEXT: movd %xmm0, (%rsi)
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_to_v2i16_1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = mem[0,2,2,3]
; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,3,2,3,4,5,6,7]
; AVX-NEXT: vmovd %xmm0, (%rsi)
; AVX-NEXT: retq
;
; AVX512F-LABEL: shuffle_v8i16_to_v2i16_1:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpshufd {{.*#+}} xmm0 = mem[0,2,2,3]
; AVX512F-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,3,2,3,4,5,6,7]
; AVX512F-NEXT: vmovd %xmm0, (%rsi)
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v8i16_to_v2i16_1:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrld $16, (%rdi), %xmm0
; AVX512VL-NEXT: vpmovqw %xmm0, (%rsi)
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v8i16_to_v2i16_1:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = mem[0,2,2,3]
; AVX512BW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,3,2,3,4,5,6,7]
; AVX512BW-NEXT: vmovd %xmm0, (%rsi)
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v8i16_to_v2i16_1:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpsrld $16, (%rdi), %xmm0
; AVX512BWVL-NEXT: vpmovqw %xmm0, (%rsi)
; AVX512BWVL-NEXT: retq
@@ -403,41 +403,41 @@ define void @shuffle_v8i16_to_v2i16_1(<8 x i16>* %L, <2 x i16>* %S) nounwind {
define void @shuffle_v8i16_to_v2i16_2(<8 x i16>* %L, <2 x i16>* %S) nounwind {
; SSE-LABEL: shuffle_v8i16_to_v2i16_2:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = mem[3,1,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,0,2,3,4,5,6,7]
; SSE-NEXT: movd %xmm0, (%rsi)
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_to_v2i16_2:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = mem[3,1,2,3]
; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[2,0,2,3,4,5,6,7]
; AVX-NEXT: vmovd %xmm0, (%rsi)
; AVX-NEXT: retq
;
; AVX512F-LABEL: shuffle_v8i16_to_v2i16_2:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpshufd {{.*#+}} xmm0 = mem[3,1,2,3]
; AVX512F-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[2,0,2,3,4,5,6,7]
; AVX512F-NEXT: vmovd %xmm0, (%rsi)
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v8i16_to_v2i16_2:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = mem[1,1,3,3]
; AVX512VL-NEXT: vpmovqw %xmm0, (%rsi)
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v8i16_to_v2i16_2:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = mem[3,1,2,3]
; AVX512BW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[2,0,2,3,4,5,6,7]
; AVX512BW-NEXT: vmovd %xmm0, (%rsi)
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v8i16_to_v2i16_2:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm0 = mem[1,1,3,3]
; AVX512BWVL-NEXT: vpmovqw %xmm0, (%rsi)
; AVX512BWVL-NEXT: retq
@@ -449,41 +449,41 @@ define void @shuffle_v8i16_to_v2i16_2(<8 x i16>* %L, <2 x i16>* %S) nounwind {
define void @shuffle_v8i16_to_v2i16_3(<8 x i16>* %L, <2 x i16>* %S) nounwind {
; SSE-LABEL: shuffle_v8i16_to_v2i16_3:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = mem[3,1,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
; SSE-NEXT: movd %xmm0, (%rsi)
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_to_v2i16_3:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = mem[3,1,2,3]
; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
; AVX-NEXT: vmovd %xmm0, (%rsi)
; AVX-NEXT: retq
;
; AVX512F-LABEL: shuffle_v8i16_to_v2i16_3:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpshufd {{.*#+}} xmm0 = mem[3,1,2,3]
; AVX512F-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
; AVX512F-NEXT: vmovd %xmm0, (%rsi)
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v8i16_to_v2i16_3:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlq $48, (%rdi), %xmm0
; AVX512VL-NEXT: vpmovqw %xmm0, (%rsi)
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v8i16_to_v2i16_3:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = mem[3,1,2,3]
; AVX512BW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
; AVX512BW-NEXT: vmovd %xmm0, (%rsi)
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v8i16_to_v2i16_3:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpsrlq $48, (%rdi), %xmm0
; AVX512BWVL-NEXT: vpmovqw %xmm0, (%rsi)
; AVX512BWVL-NEXT: retq
@@ -495,7 +495,7 @@ define void @shuffle_v8i16_to_v2i16_3(<8 x i16>* %L, <2 x i16>* %S) nounwind {
define void @shuffle_v16i8_to_v2i8_1(<16 x i8>* %L, <2 x i8>* %S) nounwind {
; SSE2-LABEL: shuffle_v16i8_to_v2i8_1:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rdi), %xmm0
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: movdqa %xmm0, %xmm2
@@ -509,42 +509,42 @@ define void @shuffle_v16i8_to_v2i8_1(<16 x i8>* %L, <2 x i8>* %S) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: shuffle_v16i8_to_v2i8_1:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa (%rdi), %xmm0
; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[1,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; SSE42-NEXT: pextrw $0, %xmm0, (%rsi)
; SSE42-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_to_v2i8_1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa (%rdi), %xmm0
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[1,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX-NEXT: vpextrw $0, %xmm0, (%rsi)
; AVX-NEXT: retq
;
; AVX512F-LABEL: shuffle_v16i8_to_v2i8_1:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[1,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512F-NEXT: vpextrw $0, %xmm0, (%rsi)
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i8_to_v2i8_1:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
; AVX512VL-NEXT: vpsrlw $8, %xmm0, %xmm0
; AVX512VL-NEXT: vpmovqb %xmm0, (%rsi)
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v16i8_to_v2i8_1:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[1,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512BW-NEXT: vpextrw $0, %xmm0, (%rsi)
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v16i8_to_v2i8_1:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpsrlw $8, (%rdi), %xmm0
; AVX512BWVL-NEXT: vpmovqb %xmm0, (%rsi)
; AVX512BWVL-NEXT: retq
@@ -556,7 +556,7 @@ define void @shuffle_v16i8_to_v2i8_1(<16 x i8>* %L, <2 x i8>* %S) nounwind {
define void @shuffle_v16i8_to_v2i8_2(<16 x i8>* %L, <2 x i8>* %S) nounwind {
; SSE2-LABEL: shuffle_v16i8_to_v2i8_2:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rdi), %xmm0
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -567,41 +567,41 @@ define void @shuffle_v16i8_to_v2i8_2(<16 x i8>* %L, <2 x i8>* %S) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: shuffle_v16i8_to_v2i8_2:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa (%rdi), %xmm0
; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[2,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; SSE42-NEXT: pextrw $0, %xmm0, (%rsi)
; SSE42-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_to_v2i8_2:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa (%rdi), %xmm0
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX-NEXT: vpextrw $0, %xmm0, (%rsi)
; AVX-NEXT: retq
;
; AVX512F-LABEL: shuffle_v16i8_to_v2i8_2:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512F-NEXT: vpextrw $0, %xmm0, (%rsi)
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i8_to_v2i8_2:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrld $16, (%rdi), %xmm0
; AVX512VL-NEXT: vpmovqb %xmm0, (%rsi)
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v16i8_to_v2i8_2:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512BW-NEXT: vpextrw $0, %xmm0, (%rsi)
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v16i8_to_v2i8_2:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpsrld $16, (%rdi), %xmm0
; AVX512BWVL-NEXT: vpmovqb %xmm0, (%rsi)
; AVX512BWVL-NEXT: retq
@@ -613,7 +613,7 @@ define void @shuffle_v16i8_to_v2i8_2(<16 x i8>* %L, <2 x i8>* %S) nounwind {
define void @shuffle_v16i8_to_v2i8_3(<16 x i8>* %L, <2 x i8>* %S) nounwind {
; SSE2-LABEL: shuffle_v16i8_to_v2i8_3:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rdi), %xmm0
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: movdqa %xmm0, %xmm2
@@ -627,41 +627,41 @@ define void @shuffle_v16i8_to_v2i8_3(<16 x i8>* %L, <2 x i8>* %S) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: shuffle_v16i8_to_v2i8_3:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa (%rdi), %xmm0
; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[3,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; SSE42-NEXT: pextrw $0, %xmm0, (%rsi)
; SSE42-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_to_v2i8_3:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa (%rdi), %xmm0
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX-NEXT: vpextrw $0, %xmm0, (%rsi)
; AVX-NEXT: retq
;
; AVX512F-LABEL: shuffle_v16i8_to_v2i8_3:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512F-NEXT: vpextrw $0, %xmm0, (%rsi)
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i8_to_v2i8_3:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrld $24, (%rdi), %xmm0
; AVX512VL-NEXT: vpmovqb %xmm0, (%rsi)
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v16i8_to_v2i8_3:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512BW-NEXT: vpextrw $0, %xmm0, (%rsi)
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v16i8_to_v2i8_3:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpsrld $24, (%rdi), %xmm0
; AVX512BWVL-NEXT: vpmovqb %xmm0, (%rsi)
; AVX512BWVL-NEXT: retq
@@ -673,7 +673,7 @@ define void @shuffle_v16i8_to_v2i8_3(<16 x i8>* %L, <2 x i8>* %S) nounwind {
define void @shuffle_v16i8_to_v2i8_4(<16 x i8>* %L, <2 x i8>* %S) nounwind {
; SSE2-LABEL: shuffle_v16i8_to_v2i8_4:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rdi), %xmm0
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
@@ -684,41 +684,41 @@ define void @shuffle_v16i8_to_v2i8_4(<16 x i8>* %L, <2 x i8>* %S) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: shuffle_v16i8_to_v2i8_4:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa (%rdi), %xmm0
; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[4,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; SSE42-NEXT: pextrw $0, %xmm0, (%rsi)
; SSE42-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_to_v2i8_4:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa (%rdi), %xmm0
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX-NEXT: vpextrw $0, %xmm0, (%rsi)
; AVX-NEXT: retq
;
; AVX512F-LABEL: shuffle_v16i8_to_v2i8_4:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512F-NEXT: vpextrw $0, %xmm0, (%rsi)
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i8_to_v2i8_4:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = mem[1,1,3,3]
; AVX512VL-NEXT: vpmovqb %xmm0, (%rsi)
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v16i8_to_v2i8_4:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512BW-NEXT: vpextrw $0, %xmm0, (%rsi)
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v16i8_to_v2i8_4:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm0 = mem[1,1,3,3]
; AVX512BWVL-NEXT: vpmovqb %xmm0, (%rsi)
; AVX512BWVL-NEXT: retq
@@ -730,7 +730,7 @@ define void @shuffle_v16i8_to_v2i8_4(<16 x i8>* %L, <2 x i8>* %S) nounwind {
define void @shuffle_v16i8_to_v2i8_5(<16 x i8>* %L, <2 x i8>* %S) nounwind {
; SSE2-LABEL: shuffle_v16i8_to_v2i8_5:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rdi), %xmm0
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: movdqa %xmm0, %xmm2
@@ -744,41 +744,41 @@ define void @shuffle_v16i8_to_v2i8_5(<16 x i8>* %L, <2 x i8>* %S) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: shuffle_v16i8_to_v2i8_5:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa (%rdi), %xmm0
; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[5,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; SSE42-NEXT: pextrw $0, %xmm0, (%rsi)
; SSE42-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_to_v2i8_5:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa (%rdi), %xmm0
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX-NEXT: vpextrw $0, %xmm0, (%rsi)
; AVX-NEXT: retq
;
; AVX512F-LABEL: shuffle_v16i8_to_v2i8_5:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512F-NEXT: vpextrw $0, %xmm0, (%rsi)
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i8_to_v2i8_5:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlq $40, (%rdi), %xmm0
; AVX512VL-NEXT: vpmovqb %xmm0, (%rsi)
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v16i8_to_v2i8_5:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512BW-NEXT: vpextrw $0, %xmm0, (%rsi)
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v16i8_to_v2i8_5:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpsrlq $40, (%rdi), %xmm0
; AVX512BWVL-NEXT: vpmovqb %xmm0, (%rsi)
; AVX512BWVL-NEXT: retq
@@ -790,7 +790,7 @@ define void @shuffle_v16i8_to_v2i8_5(<16 x i8>* %L, <2 x i8>* %S) nounwind {
define void @shuffle_v16i8_to_v2i8_6(<16 x i8>* %L, <2 x i8>* %S) nounwind {
; SSE2-LABEL: shuffle_v16i8_to_v2i8_6:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rdi), %xmm0
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
@@ -801,41 +801,41 @@ define void @shuffle_v16i8_to_v2i8_6(<16 x i8>* %L, <2 x i8>* %S) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: shuffle_v16i8_to_v2i8_6:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa (%rdi), %xmm0
; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[6,14,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; SSE42-NEXT: pextrw $0, %xmm0, (%rsi)
; SSE42-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_to_v2i8_6:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa (%rdi), %xmm0
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[6,14,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX-NEXT: vpextrw $0, %xmm0, (%rsi)
; AVX-NEXT: retq
;
; AVX512F-LABEL: shuffle_v16i8_to_v2i8_6:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[6,14,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512F-NEXT: vpextrw $0, %xmm0, (%rsi)
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i8_to_v2i8_6:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlq $48, (%rdi), %xmm0
; AVX512VL-NEXT: vpmovqb %xmm0, (%rsi)
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v16i8_to_v2i8_6:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[6,14,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512BW-NEXT: vpextrw $0, %xmm0, (%rsi)
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v16i8_to_v2i8_6:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpsrlq $48, (%rdi), %xmm0
; AVX512BWVL-NEXT: vpmovqb %xmm0, (%rsi)
; AVX512BWVL-NEXT: retq
@@ -847,7 +847,7 @@ define void @shuffle_v16i8_to_v2i8_6(<16 x i8>* %L, <2 x i8>* %S) nounwind {
define void @shuffle_v16i8_to_v2i8_7(<16 x i8>* %L, <2 x i8>* %S) nounwind {
; SSE2-LABEL: shuffle_v16i8_to_v2i8_7:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rdi), %xmm0
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: movdqa %xmm0, %xmm2
@@ -861,41 +861,41 @@ define void @shuffle_v16i8_to_v2i8_7(<16 x i8>* %L, <2 x i8>* %S) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: shuffle_v16i8_to_v2i8_7:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa (%rdi), %xmm0
; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[7,15,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; SSE42-NEXT: pextrw $0, %xmm0, (%rsi)
; SSE42-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_to_v2i8_7:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa (%rdi), %xmm0
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[7,15,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX-NEXT: vpextrw $0, %xmm0, (%rsi)
; AVX-NEXT: retq
;
; AVX512F-LABEL: shuffle_v16i8_to_v2i8_7:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[7,15,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512F-NEXT: vpextrw $0, %xmm0, (%rsi)
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i8_to_v2i8_7:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlq $56, (%rdi), %xmm0
; AVX512VL-NEXT: vpmovqb %xmm0, (%rsi)
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v16i8_to_v2i8_7:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[7,15,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512BW-NEXT: vpextrw $0, %xmm0, (%rsi)
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v16i8_to_v2i8_7:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpsrlq $56, (%rdi), %xmm0
; AVX512BWVL-NEXT: vpmovqb %xmm0, (%rsi)
; AVX512BWVL-NEXT: retq
diff --git a/test/CodeGen/X86/shuffle-strided-with-offset-256.ll b/test/CodeGen/X86/shuffle-strided-with-offset-256.ll
index 4192029a6b7..7cef269ebc2 100644
--- a/test/CodeGen/X86/shuffle-strided-with-offset-256.ll
+++ b/test/CodeGen/X86/shuffle-strided-with-offset-256.ll
@@ -8,7 +8,7 @@
define void @shuffle_v32i8_to_v16i8_1(<32 x i8>* %L, <16 x i8>* %S) nounwind {
; AVX1-LABEL: shuffle_v32i8_to_v16i8_1:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u>
@@ -20,7 +20,7 @@ define void @shuffle_v32i8_to_v16i8_1(<32 x i8>* %L, <16 x i8>* %S) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_to_v16i8_1:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u>
@@ -32,7 +32,7 @@ define void @shuffle_v32i8_to_v16i8_1(<32 x i8>* %L, <16 x i8>* %S) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: shuffle_v32i8_to_v16i8_1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovdqa (%rdi), %ymm0
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = <1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u>
@@ -50,7 +50,7 @@ define void @shuffle_v32i8_to_v16i8_1(<32 x i8>* %L, <16 x i8>* %S) nounwind {
define void @shuffle_v16i16_to_v8i16_1(<16 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX1-LABEL: shuffle_v16i16_to_v8i16_1:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15]
@@ -62,7 +62,7 @@ define void @shuffle_v16i16_to_v8i16_1(<16 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_to_v8i16_1:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15]
@@ -74,7 +74,7 @@ define void @shuffle_v16i16_to_v8i16_1(<16 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: shuffle_v16i16_to_v8i16_1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovdqa (%rdi), %ymm0
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = [2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15]
@@ -92,7 +92,7 @@ define void @shuffle_v16i16_to_v8i16_1(<16 x i16>* %L, <8 x i16>* %S) nounwind {
define void @shuffle_v8i32_to_v4i32_1(<8 x i32>* %L, <4 x i32>* %S) nounwind {
; AVX-LABEL: shuffle_v8i32_to_v4i32_1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps (%rdi), %ymm0
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
@@ -101,7 +101,7 @@ define void @shuffle_v8i32_to_v4i32_1(<8 x i32>* %L, <4 x i32>* %S) nounwind {
; AVX-NEXT: retq
;
; AVX512-LABEL: shuffle_v8i32_to_v4i32_1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovaps (%rdi), %ymm0
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX512-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
@@ -116,7 +116,7 @@ define void @shuffle_v8i32_to_v4i32_1(<8 x i32>* %L, <4 x i32>* %S) nounwind {
define void @shuffle_v32i8_to_v8i8_1(<32 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX1-LABEL: shuffle_v32i8_to_v8i8_1:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -128,7 +128,7 @@ define void @shuffle_v32i8_to_v8i8_1(<32 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_to_v8i8_1:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -140,7 +140,7 @@ define void @shuffle_v32i8_to_v8i8_1(<32 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: shuffle_v32i8_to_v8i8_1:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512F-NEXT: vmovdqa {{.*#+}} xmm2 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -152,7 +152,7 @@ define void @shuffle_v32i8_to_v8i8_1(<32 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i8_to_v8i8_1:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm2 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -164,7 +164,7 @@ define void @shuffle_v32i8_to_v8i8_1(<32 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v32i8_to_v8i8_1:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -176,7 +176,7 @@ define void @shuffle_v32i8_to_v8i8_1(<32 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v32i8_to_v8i8_1:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512BWVL-NEXT: vmovdqa {{.*#+}} xmm2 = [1,1,5,5,9,9,13,13,13,13,5,5,12,12,13,13]
@@ -194,7 +194,7 @@ define void @shuffle_v32i8_to_v8i8_1(<32 x i8>* %L, <8 x i8>* %S) nounwind {
define void @shuffle_v32i8_to_v8i8_2(<32 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX1-LABEL: shuffle_v32i8_to_v8i8_2:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -206,7 +206,7 @@ define void @shuffle_v32i8_to_v8i8_2(<32 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_to_v8i8_2:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -218,7 +218,7 @@ define void @shuffle_v32i8_to_v8i8_2(<32 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: shuffle_v32i8_to_v8i8_2:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512F-NEXT: vmovdqa {{.*#+}} xmm2 = <2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -230,7 +230,7 @@ define void @shuffle_v32i8_to_v8i8_2(<32 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i8_to_v8i8_2:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm2 = <2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -242,7 +242,7 @@ define void @shuffle_v32i8_to_v8i8_2(<32 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v32i8_to_v8i8_2:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = <2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -254,7 +254,7 @@ define void @shuffle_v32i8_to_v8i8_2(<32 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v32i8_to_v8i8_2:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512BWVL-NEXT: vmovdqa {{.*#+}} xmm2 = [2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15]
@@ -272,7 +272,7 @@ define void @shuffle_v32i8_to_v8i8_2(<32 x i8>* %L, <8 x i8>* %S) nounwind {
define void @shuffle_v32i8_to_v8i8_3(<32 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX1-LABEL: shuffle_v32i8_to_v8i8_3:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -284,7 +284,7 @@ define void @shuffle_v32i8_to_v8i8_3(<32 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_to_v8i8_3:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -296,7 +296,7 @@ define void @shuffle_v32i8_to_v8i8_3(<32 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: shuffle_v32i8_to_v8i8_3:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512F-NEXT: vmovdqa {{.*#+}} xmm2 = <3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -308,7 +308,7 @@ define void @shuffle_v32i8_to_v8i8_3(<32 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i8_to_v8i8_3:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm2 = <3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -320,7 +320,7 @@ define void @shuffle_v32i8_to_v8i8_3(<32 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v32i8_to_v8i8_3:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = <3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -332,7 +332,7 @@ define void @shuffle_v32i8_to_v8i8_3(<32 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v32i8_to_v8i8_3:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512BWVL-NEXT: vmovdqa {{.*#+}} xmm2 = [3,3,7,7,11,11,15,15,7,7,15,15,6,6,7,7]
@@ -350,7 +350,7 @@ define void @shuffle_v32i8_to_v8i8_3(<32 x i8>* %L, <8 x i8>* %S) nounwind {
define void @shuffle_v16i16_to_v4i16_1(<16 x i16>* %L, <4 x i16>* %S) nounwind {
; AVX1-LABEL: shuffle_v16i16_to_v4i16_1:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -363,7 +363,7 @@ define void @shuffle_v16i16_to_v4i16_1(<16 x i16>* %L, <4 x i16>* %S) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_to_v4i16_1:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -376,7 +376,7 @@ define void @shuffle_v16i16_to_v4i16_1(<16 x i16>* %L, <4 x i16>* %S) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: shuffle_v16i16_to_v4i16_1:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512F-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -389,7 +389,7 @@ define void @shuffle_v16i16_to_v4i16_1(<16 x i16>* %L, <4 x i16>* %S) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_to_v4i16_1:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -402,7 +402,7 @@ define void @shuffle_v16i16_to_v4i16_1(<16 x i16>* %L, <4 x i16>* %S) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v16i16_to_v4i16_1:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -415,7 +415,7 @@ define void @shuffle_v16i16_to_v4i16_1(<16 x i16>* %L, <4 x i16>* %S) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v16i16_to_v4i16_1:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -434,7 +434,7 @@ define void @shuffle_v16i16_to_v4i16_1(<16 x i16>* %L, <4 x i16>* %S) nounwind {
define void @shuffle_v16i16_to_v4i16_2(<16 x i16>* %L, <4 x i16>* %S) nounwind {
; AVX1-LABEL: shuffle_v16i16_to_v4i16_2:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
@@ -447,7 +447,7 @@ define void @shuffle_v16i16_to_v4i16_2(<16 x i16>* %L, <4 x i16>* %S) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_to_v4i16_2:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
@@ -460,7 +460,7 @@ define void @shuffle_v16i16_to_v4i16_2(<16 x i16>* %L, <4 x i16>* %S) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: shuffle_v16i16_to_v4i16_2:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512F-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
@@ -473,7 +473,7 @@ define void @shuffle_v16i16_to_v4i16_2(<16 x i16>* %L, <4 x i16>* %S) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_to_v4i16_2:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovaps (%rdi), %ymm0
; AVX512VL-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX512VL-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
@@ -482,7 +482,7 @@ define void @shuffle_v16i16_to_v4i16_2(<16 x i16>* %L, <4 x i16>* %S) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v16i16_to_v4i16_2:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
@@ -495,7 +495,7 @@ define void @shuffle_v16i16_to_v4i16_2(<16 x i16>* %L, <4 x i16>* %S) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v16i16_to_v4i16_2:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovaps (%rdi), %ymm0
; AVX512BWVL-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX512BWVL-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
@@ -510,7 +510,7 @@ define void @shuffle_v16i16_to_v4i16_2(<16 x i16>* %L, <4 x i16>* %S) nounwind {
define void @shuffle_v16i16_to_v4i16_3(<16 x i16>* %L, <4 x i16>* %S) nounwind {
; AVX1-LABEL: shuffle_v16i16_to_v4i16_3:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
@@ -523,7 +523,7 @@ define void @shuffle_v16i16_to_v4i16_3(<16 x i16>* %L, <4 x i16>* %S) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_to_v4i16_3:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
@@ -536,7 +536,7 @@ define void @shuffle_v16i16_to_v4i16_3(<16 x i16>* %L, <4 x i16>* %S) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: shuffle_v16i16_to_v4i16_3:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512F-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
@@ -549,7 +549,7 @@ define void @shuffle_v16i16_to_v4i16_3(<16 x i16>* %L, <4 x i16>* %S) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_to_v4i16_3:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
@@ -562,7 +562,7 @@ define void @shuffle_v16i16_to_v4i16_3(<16 x i16>* %L, <4 x i16>* %S) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v16i16_to_v4i16_3:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
@@ -575,7 +575,7 @@ define void @shuffle_v16i16_to_v4i16_3(<16 x i16>* %L, <4 x i16>* %S) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v16i16_to_v4i16_3:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
@@ -594,7 +594,7 @@ define void @shuffle_v16i16_to_v4i16_3(<16 x i16>* %L, <4 x i16>* %S) nounwind {
define void @shuffle_v32i8_to_v4i8_1(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX1-LABEL: shuffle_v32i8_to_v4i8_1:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <1,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -606,7 +606,7 @@ define void @shuffle_v32i8_to_v4i8_1(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_to_v4i8_1:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <1,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -618,7 +618,7 @@ define void @shuffle_v32i8_to_v4i8_1(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: shuffle_v32i8_to_v4i8_1:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512F-NEXT: vmovdqa {{.*#+}} xmm2 = <1,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -630,7 +630,7 @@ define void @shuffle_v32i8_to_v4i8_1(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i8_to_v4i8_1:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm2 = [0,0,1,1,8,8,9,9,8,8,9,9,10,10,11,11]
@@ -644,7 +644,7 @@ define void @shuffle_v32i8_to_v4i8_1(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v32i8_to_v4i8_1:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = <1,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -656,7 +656,7 @@ define void @shuffle_v32i8_to_v4i8_1(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v32i8_to_v4i8_1:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512BWVL-NEXT: vmovdqa {{.*#+}} xmm2 = [0,0,1,1,8,8,9,9,8,8,9,9,10,10,11,11]
@@ -676,7 +676,7 @@ define void @shuffle_v32i8_to_v4i8_1(<32 x i8>* %L, <4 x i8>* %S) nounwind {
define void @shuffle_v32i8_to_v4i8_2(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX1-LABEL: shuffle_v32i8_to_v4i8_2:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <2,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -688,7 +688,7 @@ define void @shuffle_v32i8_to_v4i8_2(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_to_v4i8_2:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <2,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -700,7 +700,7 @@ define void @shuffle_v32i8_to_v4i8_2(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: shuffle_v32i8_to_v4i8_2:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512F-NEXT: vmovdqa {{.*#+}} xmm2 = <2,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -712,7 +712,7 @@ define void @shuffle_v32i8_to_v4i8_2(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i8_to_v4i8_2:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -725,7 +725,7 @@ define void @shuffle_v32i8_to_v4i8_2(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v32i8_to_v4i8_2:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = <2,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -737,7 +737,7 @@ define void @shuffle_v32i8_to_v4i8_2(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v32i8_to_v4i8_2:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -756,7 +756,7 @@ define void @shuffle_v32i8_to_v4i8_2(<32 x i8>* %L, <4 x i8>* %S) nounwind {
define void @shuffle_v32i8_to_v4i8_3(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX1-LABEL: shuffle_v32i8_to_v4i8_3:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <3,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -768,7 +768,7 @@ define void @shuffle_v32i8_to_v4i8_3(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_to_v4i8_3:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <3,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -780,7 +780,7 @@ define void @shuffle_v32i8_to_v4i8_3(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: shuffle_v32i8_to_v4i8_3:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512F-NEXT: vmovdqa {{.*#+}} xmm2 = <3,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -792,7 +792,7 @@ define void @shuffle_v32i8_to_v4i8_3(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i8_to_v4i8_3:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm2 = [10,10,11,11,2,2,3,3,8,8,9,9,10,10,11,11]
@@ -806,7 +806,7 @@ define void @shuffle_v32i8_to_v4i8_3(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v32i8_to_v4i8_3:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = <3,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -818,7 +818,7 @@ define void @shuffle_v32i8_to_v4i8_3(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v32i8_to_v4i8_3:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512BWVL-NEXT: vmovdqa {{.*#+}} xmm2 = [10,10,11,11,2,2,3,3,8,8,9,9,10,10,11,11]
@@ -838,7 +838,7 @@ define void @shuffle_v32i8_to_v4i8_3(<32 x i8>* %L, <4 x i8>* %S) nounwind {
define void @shuffle_v32i8_to_v4i8_4(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX1-LABEL: shuffle_v32i8_to_v4i8_4:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <4,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -850,7 +850,7 @@ define void @shuffle_v32i8_to_v4i8_4(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_to_v4i8_4:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <4,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -862,7 +862,7 @@ define void @shuffle_v32i8_to_v4i8_4(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: shuffle_v32i8_to_v4i8_4:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512F-NEXT: vmovdqa {{.*#+}} xmm2 = <4,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -874,7 +874,7 @@ define void @shuffle_v32i8_to_v4i8_4(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i8_to_v4i8_4:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovaps (%rdi), %ymm0
; AVX512VL-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX512VL-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
@@ -883,7 +883,7 @@ define void @shuffle_v32i8_to_v4i8_4(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v32i8_to_v4i8_4:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = <4,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -895,7 +895,7 @@ define void @shuffle_v32i8_to_v4i8_4(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v32i8_to_v4i8_4:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovaps (%rdi), %ymm0
; AVX512BWVL-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX512BWVL-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
@@ -910,7 +910,7 @@ define void @shuffle_v32i8_to_v4i8_4(<32 x i8>* %L, <4 x i8>* %S) nounwind {
define void @shuffle_v32i8_to_v4i8_5(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX1-LABEL: shuffle_v32i8_to_v4i8_5:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <5,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -922,7 +922,7 @@ define void @shuffle_v32i8_to_v4i8_5(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_to_v4i8_5:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <5,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -934,7 +934,7 @@ define void @shuffle_v32i8_to_v4i8_5(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: shuffle_v32i8_to_v4i8_5:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512F-NEXT: vmovdqa {{.*#+}} xmm2 = <5,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -946,7 +946,7 @@ define void @shuffle_v32i8_to_v4i8_5(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i8_to_v4i8_5:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
@@ -963,7 +963,7 @@ define void @shuffle_v32i8_to_v4i8_5(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v32i8_to_v4i8_5:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = <5,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -975,7 +975,7 @@ define void @shuffle_v32i8_to_v4i8_5(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v32i8_to_v4i8_5:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
@@ -998,7 +998,7 @@ define void @shuffle_v32i8_to_v4i8_5(<32 x i8>* %L, <4 x i8>* %S) nounwind {
define void @shuffle_v32i8_to_v4i8_6(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX1-LABEL: shuffle_v32i8_to_v4i8_6:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <6,14,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -1010,7 +1010,7 @@ define void @shuffle_v32i8_to_v4i8_6(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_to_v4i8_6:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <6,14,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -1022,7 +1022,7 @@ define void @shuffle_v32i8_to_v4i8_6(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: shuffle_v32i8_to_v4i8_6:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512F-NEXT: vmovdqa {{.*#+}} xmm2 = <6,14,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -1034,7 +1034,7 @@ define void @shuffle_v32i8_to_v4i8_6(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i8_to_v4i8_6:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
@@ -1047,7 +1047,7 @@ define void @shuffle_v32i8_to_v4i8_6(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v32i8_to_v4i8_6:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = <6,14,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -1059,7 +1059,7 @@ define void @shuffle_v32i8_to_v4i8_6(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v32i8_to_v4i8_6:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
@@ -1078,7 +1078,7 @@ define void @shuffle_v32i8_to_v4i8_6(<32 x i8>* %L, <4 x i8>* %S) nounwind {
define void @shuffle_v32i8_to_v4i8_7(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX1-LABEL: shuffle_v32i8_to_v4i8_7:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <7,15,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -1090,7 +1090,7 @@ define void @shuffle_v32i8_to_v4i8_7(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_to_v4i8_7:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <7,15,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -1102,7 +1102,7 @@ define void @shuffle_v32i8_to_v4i8_7(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: shuffle_v32i8_to_v4i8_7:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512F-NEXT: vmovdqa {{.*#+}} xmm2 = <7,15,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -1114,7 +1114,7 @@ define void @shuffle_v32i8_to_v4i8_7(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i8_to_v4i8_7:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm2 = [7,7,14,14,15,15,14,14,15,15,4,4,5,5,6,6]
@@ -1126,7 +1126,7 @@ define void @shuffle_v32i8_to_v4i8_7(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v32i8_to_v4i8_7:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = <7,15,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -1138,7 +1138,7 @@ define void @shuffle_v32i8_to_v4i8_7(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v32i8_to_v4i8_7:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512BWVL-NEXT: vmovdqa {{.*#+}} xmm2 = [7,7,14,14,15,15,14,14,15,15,4,4,5,5,6,6]
diff --git a/test/CodeGen/X86/shuffle-strided-with-offset-512.ll b/test/CodeGen/X86/shuffle-strided-with-offset-512.ll
index a4698a51ba1..7f3431fabed 100644
--- a/test/CodeGen/X86/shuffle-strided-with-offset-512.ll
+++ b/test/CodeGen/X86/shuffle-strided-with-offset-512.ll
@@ -6,7 +6,7 @@
define void @shuffle_v64i8_to_v32i8_1(<64 x i8>* %L, <32 x i8>* %S) nounwind {
; AVX512F-LABEL: shuffle_v64i8_to_v32i8_1:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512F-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u,17,19,21,23,25,27,29,31]
@@ -18,7 +18,7 @@ define void @shuffle_v64i8_to_v32i8_1(<64 x i8>* %L, <32 x i8>* %S) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v64i8_to_v32i8_1:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512VL-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u,17,19,21,23,25,27,29,31]
@@ -30,7 +30,7 @@ define void @shuffle_v64i8_to_v32i8_1(<64 x i8>* %L, <32 x i8>* %S) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v64i8_to_v32i8_1:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BW-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u,17,19,21,23,25,27,29,31]
@@ -42,7 +42,7 @@ define void @shuffle_v64i8_to_v32i8_1(<64 x i8>* %L, <32 x i8>* %S) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v64i8_to_v32i8_1:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BWVL-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u,17,19,21,23,25,27,29,31]
@@ -60,7 +60,7 @@ define void @shuffle_v64i8_to_v32i8_1(<64 x i8>* %L, <32 x i8>* %S) nounwind {
define void @shuffle_v32i16_to_v16i16_1(<32 x i16>* %L, <16 x i16>* %S) nounwind {
; AVX512F-LABEL: shuffle_v32i16_to_v16i16_1:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512F-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[6,7,2,3,4,5,6,7,2,3,6,7,10,11,14,15,22,23,18,19,20,21,22,23,18,19,22,23,26,27,30,31]
@@ -72,7 +72,7 @@ define void @shuffle_v32i16_to_v16i16_1(<32 x i16>* %L, <16 x i16>* %S) nounwind
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i16_to_v16i16_1:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512VL-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[6,7,2,3,4,5,6,7,2,3,6,7,10,11,14,15,22,23,18,19,20,21,22,23,18,19,22,23,26,27,30,31]
@@ -84,7 +84,7 @@ define void @shuffle_v32i16_to_v16i16_1(<32 x i16>* %L, <16 x i16>* %S) nounwind
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v32i16_to_v16i16_1:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BW-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[6,7,2,3,4,5,6,7,2,3,6,7,10,11,14,15,22,23,18,19,20,21,22,23,18,19,22,23,26,27,30,31]
@@ -96,7 +96,7 @@ define void @shuffle_v32i16_to_v16i16_1(<32 x i16>* %L, <16 x i16>* %S) nounwind
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v32i16_to_v16i16_1:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BWVL-NEXT: vmovdqa {{.*#+}} ymm2 = [1,3,5,7,17,19,21,23,9,11,13,15,25,27,29,31]
@@ -113,7 +113,7 @@ define void @shuffle_v32i16_to_v16i16_1(<32 x i16>* %L, <16 x i16>* %S) nounwind
define void @shuffle_v16i32_to_v8i32_1(<16 x i32>* %L, <8 x i32>* %S) nounwind {
; AVX512-LABEL: shuffle_v16i32_to_v8i32_1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovaps (%rdi), %zmm0
; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm1
; AVX512-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,3],ymm1[1,3],ymm0[5,7],ymm1[5,7]
@@ -129,7 +129,7 @@ define void @shuffle_v16i32_to_v8i32_1(<16 x i32>* %L, <8 x i32>* %S) nounwind {
define void @shuffle_v64i8_to_v16i8_1(<64 x i8>* %L, <16 x i8>* %S) nounwind {
; AVX512F-LABEL: shuffle_v64i8_to_v16i8_1:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -148,7 +148,7 @@ define void @shuffle_v64i8_to_v16i8_1(<64 x i8>* %L, <16 x i8>* %S) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v64i8_to_v16i8_1:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -167,7 +167,7 @@ define void @shuffle_v64i8_to_v16i8_1(<64 x i8>* %L, <16 x i8>* %S) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v64i8_to_v16i8_1:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -186,7 +186,7 @@ define void @shuffle_v64i8_to_v16i8_1(<64 x i8>* %L, <16 x i8>* %S) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v64i8_to_v16i8_1:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BWVL-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -211,7 +211,7 @@ define void @shuffle_v64i8_to_v16i8_1(<64 x i8>* %L, <16 x i8>* %S) nounwind {
define void @shuffle_v64i8_to_v16i8_2(<64 x i8>* %L, <16 x i8>* %S) nounwind {
; AVX512F-LABEL: shuffle_v64i8_to_v16i8_2:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -230,7 +230,7 @@ define void @shuffle_v64i8_to_v16i8_2(<64 x i8>* %L, <16 x i8>* %S) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v64i8_to_v16i8_2:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -249,7 +249,7 @@ define void @shuffle_v64i8_to_v16i8_2(<64 x i8>* %L, <16 x i8>* %S) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v64i8_to_v16i8_2:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -268,7 +268,7 @@ define void @shuffle_v64i8_to_v16i8_2(<64 x i8>* %L, <16 x i8>* %S) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v64i8_to_v16i8_2:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BWVL-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -293,7 +293,7 @@ define void @shuffle_v64i8_to_v16i8_2(<64 x i8>* %L, <16 x i8>* %S) nounwind {
define void @shuffle_v64i8_to_v16i8_3(<64 x i8>* %L, <16 x i8>* %S) nounwind {
; AVX512F-LABEL: shuffle_v64i8_to_v16i8_3:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -312,7 +312,7 @@ define void @shuffle_v64i8_to_v16i8_3(<64 x i8>* %L, <16 x i8>* %S) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v64i8_to_v16i8_3:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -331,7 +331,7 @@ define void @shuffle_v64i8_to_v16i8_3(<64 x i8>* %L, <16 x i8>* %S) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v64i8_to_v16i8_3:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -350,7 +350,7 @@ define void @shuffle_v64i8_to_v16i8_3(<64 x i8>* %L, <16 x i8>* %S) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v64i8_to_v16i8_3:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BWVL-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -375,7 +375,7 @@ define void @shuffle_v64i8_to_v16i8_3(<64 x i8>* %L, <16 x i8>* %S) nounwind {
define void @shuffle_v32i16_to_v8i16_1(<32 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX512F-LABEL: shuffle_v32i16_to_v8i16_1:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -396,7 +396,7 @@ define void @shuffle_v32i16_to_v8i16_1(<32 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i16_to_v8i16_1:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -417,7 +417,7 @@ define void @shuffle_v32i16_to_v8i16_1(<32 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v32i16_to_v8i16_1:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -438,7 +438,7 @@ define void @shuffle_v32i16_to_v8i16_1(<32 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v32i16_to_v8i16_1:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BWVL-NEXT: vmovdqa {{.*#+}} ymm2 = <1,5,9,13,17,21,25,29,u,u,u,u,u,u,u,u>
@@ -454,7 +454,7 @@ define void @shuffle_v32i16_to_v8i16_1(<32 x i16>* %L, <8 x i16>* %S) nounwind {
define void @shuffle_v32i16_to_v8i16_2(<32 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX512F-LABEL: shuffle_v32i16_to_v8i16_2:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -475,7 +475,7 @@ define void @shuffle_v32i16_to_v8i16_2(<32 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i16_to_v8i16_2:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -496,7 +496,7 @@ define void @shuffle_v32i16_to_v8i16_2(<32 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v32i16_to_v8i16_2:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -517,7 +517,7 @@ define void @shuffle_v32i16_to_v8i16_2(<32 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v32i16_to_v8i16_2:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BWVL-NEXT: vmovdqa {{.*#+}} ymm2 = <2,6,10,14,18,22,26,30,u,u,u,u,u,u,u,u>
@@ -533,7 +533,7 @@ define void @shuffle_v32i16_to_v8i16_2(<32 x i16>* %L, <8 x i16>* %S) nounwind {
define void @shuffle_v32i16_to_v8i16_3(<32 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX512F-LABEL: shuffle_v32i16_to_v8i16_3:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -554,7 +554,7 @@ define void @shuffle_v32i16_to_v8i16_3(<32 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i16_to_v8i16_3:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -575,7 +575,7 @@ define void @shuffle_v32i16_to_v8i16_3(<32 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v32i16_to_v8i16_3:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -596,7 +596,7 @@ define void @shuffle_v32i16_to_v8i16_3(<32 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v32i16_to_v8i16_3:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BWVL-NEXT: vmovdqa {{.*#+}} ymm2 = <3,7,11,15,19,23,27,31,u,u,u,u,u,u,u,u>
@@ -612,7 +612,7 @@ define void @shuffle_v32i16_to_v8i16_3(<32 x i16>* %L, <8 x i16>* %S) nounwind {
define void @shuffle_v64i8_to_v8i8_1(<64 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512F-LABEL: shuffle_v64i8_to_v8i8_1:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -631,7 +631,7 @@ define void @shuffle_v64i8_to_v8i8_1(<64 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v64i8_to_v8i8_1:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -650,7 +650,7 @@ define void @shuffle_v64i8_to_v8i8_1(<64 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v64i8_to_v8i8_1:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -669,7 +669,7 @@ define void @shuffle_v64i8_to_v8i8_1(<64 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v64i8_to_v8i8_1:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BWVL-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -697,7 +697,7 @@ define void @shuffle_v64i8_to_v8i8_1(<64 x i8>* %L, <8 x i8>* %S) nounwind {
define void @shuffle_v64i8_to_v8i8_2(<64 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512F-LABEL: shuffle_v64i8_to_v8i8_2:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -716,7 +716,7 @@ define void @shuffle_v64i8_to_v8i8_2(<64 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v64i8_to_v8i8_2:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -735,7 +735,7 @@ define void @shuffle_v64i8_to_v8i8_2(<64 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v64i8_to_v8i8_2:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -754,7 +754,7 @@ define void @shuffle_v64i8_to_v8i8_2(<64 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v64i8_to_v8i8_2:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BWVL-NEXT: vmovdqa {{.*#+}} ymm2 = <1,5,9,13,17,21,25,29,u,u,u,u,u,u,u,u>
@@ -770,7 +770,7 @@ define void @shuffle_v64i8_to_v8i8_2(<64 x i8>* %L, <8 x i8>* %S) nounwind {
define void @shuffle_v64i8_to_v8i8_3(<64 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512F-LABEL: shuffle_v64i8_to_v8i8_3:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -789,7 +789,7 @@ define void @shuffle_v64i8_to_v8i8_3(<64 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v64i8_to_v8i8_3:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -808,7 +808,7 @@ define void @shuffle_v64i8_to_v8i8_3(<64 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v64i8_to_v8i8_3:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -827,7 +827,7 @@ define void @shuffle_v64i8_to_v8i8_3(<64 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v64i8_to_v8i8_3:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BWVL-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -855,7 +855,7 @@ define void @shuffle_v64i8_to_v8i8_3(<64 x i8>* %L, <8 x i8>* %S) nounwind {
define void @shuffle_v64i8_to_v8i8_4(<64 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512F-LABEL: shuffle_v64i8_to_v8i8_4:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -874,7 +874,7 @@ define void @shuffle_v64i8_to_v8i8_4(<64 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v64i8_to_v8i8_4:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -893,7 +893,7 @@ define void @shuffle_v64i8_to_v8i8_4(<64 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v64i8_to_v8i8_4:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -912,7 +912,7 @@ define void @shuffle_v64i8_to_v8i8_4(<64 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v64i8_to_v8i8_4:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BWVL-NEXT: vmovdqa {{.*#+}} ymm2 = <2,6,10,14,18,22,26,30,u,u,u,u,u,u,u,u>
@@ -928,7 +928,7 @@ define void @shuffle_v64i8_to_v8i8_4(<64 x i8>* %L, <8 x i8>* %S) nounwind {
define void @shuffle_v64i8_to_v8i8_5(<64 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512F-LABEL: shuffle_v64i8_to_v8i8_5:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -947,7 +947,7 @@ define void @shuffle_v64i8_to_v8i8_5(<64 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v64i8_to_v8i8_5:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -966,7 +966,7 @@ define void @shuffle_v64i8_to_v8i8_5(<64 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v64i8_to_v8i8_5:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -985,7 +985,7 @@ define void @shuffle_v64i8_to_v8i8_5(<64 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v64i8_to_v8i8_5:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BWVL-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -1020,7 +1020,7 @@ define void @shuffle_v64i8_to_v8i8_5(<64 x i8>* %L, <8 x i8>* %S) nounwind {
define void @shuffle_v64i8_to_v8i8_6(<64 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512F-LABEL: shuffle_v64i8_to_v8i8_6:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -1039,7 +1039,7 @@ define void @shuffle_v64i8_to_v8i8_6(<64 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v64i8_to_v8i8_6:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -1058,7 +1058,7 @@ define void @shuffle_v64i8_to_v8i8_6(<64 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v64i8_to_v8i8_6:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -1077,7 +1077,7 @@ define void @shuffle_v64i8_to_v8i8_6(<64 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v64i8_to_v8i8_6:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BWVL-NEXT: vmovdqa {{.*#+}} ymm2 = <3,7,11,15,19,23,27,31,u,u,u,u,u,u,u,u>
@@ -1093,7 +1093,7 @@ define void @shuffle_v64i8_to_v8i8_6(<64 x i8>* %L, <8 x i8>* %S) nounwind {
define void @shuffle_v64i8_to_v8i8_7(<64 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512F-LABEL: shuffle_v64i8_to_v8i8_7:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -1112,7 +1112,7 @@ define void @shuffle_v64i8_to_v8i8_7(<64 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v64i8_to_v8i8_7:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -1131,7 +1131,7 @@ define void @shuffle_v64i8_to_v8i8_7(<64 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v64i8_to_v8i8_7:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -1150,7 +1150,7 @@ define void @shuffle_v64i8_to_v8i8_7(<64 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v64i8_to_v8i8_7:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BWVL-NEXT: vextracti128 $1, %ymm1, %xmm2
diff --git a/test/CodeGen/X86/shuffle-vs-trunc-128.ll b/test/CodeGen/X86/shuffle-vs-trunc-128.ll
index 3dcad711a73..1bfe37b1497 100644
--- a/test/CodeGen/X86/shuffle-vs-trunc-128.ll
+++ b/test/CodeGen/X86/shuffle-vs-trunc-128.ll
@@ -14,7 +14,7 @@
define void @shuffle_v16i8_to_v8i8(<16 x i8>* %L, <8 x i8>* %S) nounwind {
; SSE2-LABEL: shuffle_v16i8_to_v8i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rdi), %xmm0
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; SSE2-NEXT: packuswb %xmm0, %xmm0
@@ -22,42 +22,42 @@ define void @shuffle_v16i8_to_v8i8(<16 x i8>* %L, <8 x i8>* %S) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: shuffle_v16i8_to_v8i8:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa (%rdi), %xmm0
; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; SSE42-NEXT: movq %xmm0, (%rsi)
; SSE42-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_to_v8i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa (%rdi), %xmm0
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; AVX-NEXT: vmovq %xmm0, (%rsi)
; AVX-NEXT: retq
;
; AVX512F-LABEL: shuffle_v16i8_to_v8i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; AVX512F-NEXT: vmovq %xmm0, (%rsi)
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i8_to_v8i8:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
; AVX512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; AVX512VL-NEXT: vmovq %xmm0, (%rsi)
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v16i8_to_v8i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; AVX512BW-NEXT: vmovq %xmm0, (%rsi)
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v16i8_to_v8i8:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa (%rdi), %xmm0
; AVX512BWVL-NEXT: vpmovwb %xmm0, (%rsi)
; AVX512BWVL-NEXT: retq
@@ -69,7 +69,7 @@ define void @shuffle_v16i8_to_v8i8(<16 x i8>* %L, <8 x i8>* %S) nounwind {
define void @trunc_v8i16_to_v8i8(<16 x i8>* %L, <8 x i8>* %S) nounwind {
; SSE2-LABEL: trunc_v8i16_to_v8i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rdi), %xmm0
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; SSE2-NEXT: packuswb %xmm0, %xmm0
@@ -77,42 +77,42 @@ define void @trunc_v8i16_to_v8i8(<16 x i8>* %L, <8 x i8>* %S) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: trunc_v8i16_to_v8i8:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa (%rdi), %xmm0
; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; SSE42-NEXT: movq %xmm0, (%rsi)
; SSE42-NEXT: retq
;
; AVX-LABEL: trunc_v8i16_to_v8i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa (%rdi), %xmm0
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; AVX-NEXT: vmovq %xmm0, (%rsi)
; AVX-NEXT: retq
;
; AVX512F-LABEL: trunc_v8i16_to_v8i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; AVX512F-NEXT: vmovq %xmm0, (%rsi)
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: trunc_v8i16_to_v8i8:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
; AVX512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; AVX512VL-NEXT: vmovq %xmm0, (%rsi)
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: trunc_v8i16_to_v8i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; AVX512BW-NEXT: vmovq %xmm0, (%rsi)
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: trunc_v8i16_to_v8i8:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa (%rdi), %xmm0
; AVX512BWVL-NEXT: vpmovwb %xmm0, (%rsi)
; AVX512BWVL-NEXT: retq
@@ -125,7 +125,7 @@ define void @trunc_v8i16_to_v8i8(<16 x i8>* %L, <8 x i8>* %S) nounwind {
define void @shuffle_v8i16_to_v4i16(<8 x i16>* %L, <4 x i16>* %S) nounwind {
; SSE2-LABEL: shuffle_v8i16_to_v4i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = mem[0,2,2,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -133,41 +133,41 @@ define void @shuffle_v8i16_to_v4i16(<8 x i16>* %L, <4 x i16>* %S) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: shuffle_v8i16_to_v4i16:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa (%rdi), %xmm0
; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; SSE42-NEXT: movq %xmm0, (%rsi)
; SSE42-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_to_v4i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa (%rdi), %xmm0
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX-NEXT: vmovq %xmm0, (%rsi)
; AVX-NEXT: retq
;
; AVX512F-LABEL: shuffle_v8i16_to_v4i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX512F-NEXT: vmovq %xmm0, (%rsi)
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v8i16_to_v4i16:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
; AVX512VL-NEXT: vpmovdw %xmm0, (%rsi)
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v8i16_to_v4i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX512BW-NEXT: vmovq %xmm0, (%rsi)
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v8i16_to_v4i16:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa (%rdi), %xmm0
; AVX512BWVL-NEXT: vpmovdw %xmm0, (%rsi)
; AVX512BWVL-NEXT: retq
@@ -179,7 +179,7 @@ define void @shuffle_v8i16_to_v4i16(<8 x i16>* %L, <4 x i16>* %S) nounwind {
define void @trunc_v4i32_to_v4i16(<8 x i16>* %L, <4 x i16>* %S) nounwind {
; SSE2-LABEL: trunc_v4i32_to_v4i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = mem[0,2,2,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -187,41 +187,41 @@ define void @trunc_v4i32_to_v4i16(<8 x i16>* %L, <4 x i16>* %S) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: trunc_v4i32_to_v4i16:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa (%rdi), %xmm0
; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; SSE42-NEXT: movq %xmm0, (%rsi)
; SSE42-NEXT: retq
;
; AVX-LABEL: trunc_v4i32_to_v4i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa (%rdi), %xmm0
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX-NEXT: vmovq %xmm0, (%rsi)
; AVX-NEXT: retq
;
; AVX512F-LABEL: trunc_v4i32_to_v4i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX512F-NEXT: vmovq %xmm0, (%rsi)
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: trunc_v4i32_to_v4i16:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
; AVX512VL-NEXT: vpmovdw %xmm0, (%rsi)
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: trunc_v4i32_to_v4i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX512BW-NEXT: vmovq %xmm0, (%rsi)
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: trunc_v4i32_to_v4i16:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa (%rdi), %xmm0
; AVX512BWVL-NEXT: vpmovdw %xmm0, (%rsi)
; AVX512BWVL-NEXT: retq
@@ -234,37 +234,37 @@ define void @trunc_v4i32_to_v4i16(<8 x i16>* %L, <4 x i16>* %S) nounwind {
define void @shuffle_v4i32_to_v2i32(<4 x i32>* %L, <2 x i32>* %S) nounwind {
; SSE-LABEL: shuffle_v4i32_to_v2i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = mem[0,2,2,3]
; SSE-NEXT: movq %xmm0, (%rsi)
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v4i32_to_v2i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = mem[0,2,2,3]
; AVX-NEXT: vmovlps %xmm0, (%rsi)
; AVX-NEXT: retq
;
; AVX512F-LABEL: shuffle_v4i32_to_v2i32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermilps {{.*#+}} xmm0 = mem[0,2,2,3]
; AVX512F-NEXT: vmovlps %xmm0, (%rsi)
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4i32_to_v2i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
; AVX512VL-NEXT: vpmovqd %xmm0, (%rsi)
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v4i32_to_v2i32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpermilps {{.*#+}} xmm0 = mem[0,2,2,3]
; AVX512BW-NEXT: vmovlps %xmm0, (%rsi)
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v4i32_to_v2i32:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa (%rdi), %xmm0
; AVX512BWVL-NEXT: vpmovqd %xmm0, (%rsi)
; AVX512BWVL-NEXT: retq
@@ -276,37 +276,37 @@ define void @shuffle_v4i32_to_v2i32(<4 x i32>* %L, <2 x i32>* %S) nounwind {
define void @trunc_v2i64_to_v2i32(<4 x i32>* %L, <2 x i32>* %S) nounwind {
; SSE-LABEL: trunc_v2i64_to_v2i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = mem[0,2,2,3]
; SSE-NEXT: movq %xmm0, (%rsi)
; SSE-NEXT: retq
;
; AVX-LABEL: trunc_v2i64_to_v2i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = mem[0,2,2,3]
; AVX-NEXT: vmovlps %xmm0, (%rsi)
; AVX-NEXT: retq
;
; AVX512F-LABEL: trunc_v2i64_to_v2i32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermilps {{.*#+}} xmm0 = mem[0,2,2,3]
; AVX512F-NEXT: vmovlps %xmm0, (%rsi)
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: trunc_v2i64_to_v2i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
; AVX512VL-NEXT: vpmovqd %xmm0, (%rsi)
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: trunc_v2i64_to_v2i32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpermilps {{.*#+}} xmm0 = mem[0,2,2,3]
; AVX512BW-NEXT: vmovlps %xmm0, (%rsi)
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: trunc_v2i64_to_v2i32:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa (%rdi), %xmm0
; AVX512BWVL-NEXT: vpmovqd %xmm0, (%rsi)
; AVX512BWVL-NEXT: retq
@@ -319,7 +319,7 @@ define void @trunc_v2i64_to_v2i32(<4 x i32>* %L, <2 x i32>* %S) nounwind {
define void @shuffle_v16i8_to_v4i8(<16 x i8>* %L, <4 x i8>* %S) nounwind {
; SSE2-LABEL: shuffle_v16i8_to_v4i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rdi), %xmm0
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; SSE2-NEXT: packuswb %xmm0, %xmm0
@@ -328,41 +328,41 @@ define void @shuffle_v16i8_to_v4i8(<16 x i8>* %L, <4 x i8>* %S) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: shuffle_v16i8_to_v4i8:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa (%rdi), %xmm0
; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
; SSE42-NEXT: movd %xmm0, (%rsi)
; SSE42-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_to_v4i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa (%rdi), %xmm0
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX-NEXT: vmovd %xmm0, (%rsi)
; AVX-NEXT: retq
;
; AVX512F-LABEL: shuffle_v16i8_to_v4i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512F-NEXT: vmovd %xmm0, (%rsi)
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i8_to_v4i8:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
; AVX512VL-NEXT: vpmovdb %xmm0, (%rsi)
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v16i8_to_v4i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512BW-NEXT: vmovd %xmm0, (%rsi)
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v16i8_to_v4i8:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa (%rdi), %xmm0
; AVX512BWVL-NEXT: vpmovdb %xmm0, (%rsi)
; AVX512BWVL-NEXT: retq
@@ -374,7 +374,7 @@ define void @shuffle_v16i8_to_v4i8(<16 x i8>* %L, <4 x i8>* %S) nounwind {
define void @trunc_v4i32_to_v4i8(<16 x i8>* %L, <4 x i8>* %S) nounwind {
; SSE2-LABEL: trunc_v4i32_to_v4i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rdi), %xmm0
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; SSE2-NEXT: packuswb %xmm0, %xmm0
@@ -383,41 +383,41 @@ define void @trunc_v4i32_to_v4i8(<16 x i8>* %L, <4 x i8>* %S) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: trunc_v4i32_to_v4i8:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa (%rdi), %xmm0
; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
; SSE42-NEXT: movd %xmm0, (%rsi)
; SSE42-NEXT: retq
;
; AVX-LABEL: trunc_v4i32_to_v4i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa (%rdi), %xmm0
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX-NEXT: vmovd %xmm0, (%rsi)
; AVX-NEXT: retq
;
; AVX512F-LABEL: trunc_v4i32_to_v4i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512F-NEXT: vmovd %xmm0, (%rsi)
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: trunc_v4i32_to_v4i8:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
; AVX512VL-NEXT: vpmovdb %xmm0, (%rsi)
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: trunc_v4i32_to_v4i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512BW-NEXT: vmovd %xmm0, (%rsi)
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: trunc_v4i32_to_v4i8:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa (%rdi), %xmm0
; AVX512BWVL-NEXT: vpmovdb %xmm0, (%rsi)
; AVX512BWVL-NEXT: retq
@@ -430,41 +430,41 @@ define void @trunc_v4i32_to_v4i8(<16 x i8>* %L, <4 x i8>* %S) nounwind {
define void @shuffle_v8i16_to_v2i16(<8 x i16>* %L, <2 x i16>* %S) nounwind {
; SSE-LABEL: shuffle_v8i16_to_v2i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = mem[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; SSE-NEXT: movd %xmm0, (%rsi)
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_to_v2i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = mem[0,2,2,3]
; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; AVX-NEXT: vmovd %xmm0, (%rsi)
; AVX-NEXT: retq
;
; AVX512F-LABEL: shuffle_v8i16_to_v2i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpshufd {{.*#+}} xmm0 = mem[0,2,2,3]
; AVX512F-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; AVX512F-NEXT: vmovd %xmm0, (%rsi)
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v8i16_to_v2i16:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
; AVX512VL-NEXT: vpmovqw %xmm0, (%rsi)
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v8i16_to_v2i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = mem[0,2,2,3]
; AVX512BW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; AVX512BW-NEXT: vmovd %xmm0, (%rsi)
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v8i16_to_v2i16:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa (%rdi), %xmm0
; AVX512BWVL-NEXT: vpmovqw %xmm0, (%rsi)
; AVX512BWVL-NEXT: retq
@@ -476,41 +476,41 @@ define void @shuffle_v8i16_to_v2i16(<8 x i16>* %L, <2 x i16>* %S) nounwind {
define void @trunc_v2i64_to_v2i16(<8 x i16>* %L, <2 x i16>* %S) nounwind {
; SSE-LABEL: trunc_v2i64_to_v2i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = mem[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; SSE-NEXT: movd %xmm0, (%rsi)
; SSE-NEXT: retq
;
; AVX-LABEL: trunc_v2i64_to_v2i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = mem[0,2,2,3]
; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; AVX-NEXT: vmovd %xmm0, (%rsi)
; AVX-NEXT: retq
;
; AVX512F-LABEL: trunc_v2i64_to_v2i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpshufd {{.*#+}} xmm0 = mem[0,2,2,3]
; AVX512F-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; AVX512F-NEXT: vmovd %xmm0, (%rsi)
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: trunc_v2i64_to_v2i16:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
; AVX512VL-NEXT: vpmovqw %xmm0, (%rsi)
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: trunc_v2i64_to_v2i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = mem[0,2,2,3]
; AVX512BW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; AVX512BW-NEXT: vmovd %xmm0, (%rsi)
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: trunc_v2i64_to_v2i16:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa (%rdi), %xmm0
; AVX512BWVL-NEXT: vpmovqw %xmm0, (%rsi)
; AVX512BWVL-NEXT: retq
@@ -523,7 +523,7 @@ define void @trunc_v2i64_to_v2i16(<8 x i16>* %L, <2 x i16>* %S) nounwind {
define void @shuffle_v16i8_to_v2i8(<16 x i8>* %L, <2 x i8>* %S) nounwind {
; SSE2-LABEL: shuffle_v16i8_to_v2i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rdi), %xmm0
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; SSE2-NEXT: packuswb %xmm0, %xmm0
@@ -534,41 +534,41 @@ define void @shuffle_v16i8_to_v2i8(<16 x i8>* %L, <2 x i8>* %S) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: shuffle_v16i8_to_v2i8:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa (%rdi), %xmm0
; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; SSE42-NEXT: pextrw $0, %xmm0, (%rsi)
; SSE42-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_to_v2i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa (%rdi), %xmm0
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX-NEXT: vpextrw $0, %xmm0, (%rsi)
; AVX-NEXT: retq
;
; AVX512F-LABEL: shuffle_v16i8_to_v2i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512F-NEXT: vpextrw $0, %xmm0, (%rsi)
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i8_to_v2i8:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
; AVX512VL-NEXT: vpmovqb %xmm0, (%rsi)
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v16i8_to_v2i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512BW-NEXT: vpextrw $0, %xmm0, (%rsi)
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v16i8_to_v2i8:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa (%rdi), %xmm0
; AVX512BWVL-NEXT: vpmovqb %xmm0, (%rsi)
; AVX512BWVL-NEXT: retq
@@ -580,7 +580,7 @@ define void @shuffle_v16i8_to_v2i8(<16 x i8>* %L, <2 x i8>* %S) nounwind {
define void @trunc_v2i64_to_v2i8(<16 x i8>* %L, <2 x i8>* %S) nounwind {
; SSE2-LABEL: trunc_v2i64_to_v2i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rdi), %xmm0
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; SSE2-NEXT: packuswb %xmm0, %xmm0
@@ -591,41 +591,41 @@ define void @trunc_v2i64_to_v2i8(<16 x i8>* %L, <2 x i8>* %S) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: trunc_v2i64_to_v2i8:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa (%rdi), %xmm0
; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; SSE42-NEXT: pextrw $0, %xmm0, (%rsi)
; SSE42-NEXT: retq
;
; AVX-LABEL: trunc_v2i64_to_v2i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa (%rdi), %xmm0
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX-NEXT: vpextrw $0, %xmm0, (%rsi)
; AVX-NEXT: retq
;
; AVX512F-LABEL: trunc_v2i64_to_v2i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512F-NEXT: vpextrw $0, %xmm0, (%rsi)
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: trunc_v2i64_to_v2i8:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
; AVX512VL-NEXT: vpmovqb %xmm0, (%rsi)
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: trunc_v2i64_to_v2i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512BW-NEXT: vpextrw $0, %xmm0, (%rsi)
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: trunc_v2i64_to_v2i8:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa (%rdi), %xmm0
; AVX512BWVL-NEXT: vpmovqb %xmm0, (%rsi)
; AVX512BWVL-NEXT: retq
diff --git a/test/CodeGen/X86/shuffle-vs-trunc-256.ll b/test/CodeGen/X86/shuffle-vs-trunc-256.ll
index e986e1af2fb..f4008dcbcf1 100644
--- a/test/CodeGen/X86/shuffle-vs-trunc-256.ll
+++ b/test/CodeGen/X86/shuffle-vs-trunc-256.ll
@@ -12,7 +12,7 @@
define void @shuffle_v32i8_to_v16i8(<32 x i8>* %L, <16 x i8>* %S) nounwind {
; AVX1-LABEL: shuffle_v32i8_to_v16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -24,7 +24,7 @@ define void @shuffle_v32i8_to_v16i8(<32 x i8>* %L, <16 x i8>* %S) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_to_v16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -36,7 +36,7 @@ define void @shuffle_v32i8_to_v16i8(<32 x i8>* %L, <16 x i8>* %S) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: shuffle_v32i8_to_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovdqa (%rdi), %ymm0
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -54,7 +54,7 @@ define void @shuffle_v32i8_to_v16i8(<32 x i8>* %L, <16 x i8>* %S) nounwind {
define void @trunc_v16i16_to_v16i8(<32 x i8>* %L, <16 x i8>* %S) nounwind {
; AVX1-LABEL: trunc_v16i16_to_v16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -66,7 +66,7 @@ define void @trunc_v16i16_to_v16i8(<32 x i8>* %L, <16 x i8>* %S) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_v16i16_to_v16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -78,7 +78,7 @@ define void @trunc_v16i16_to_v16i8(<32 x i8>* %L, <16 x i8>* %S) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: trunc_v16i16_to_v16i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpmovsxwd (%rdi), %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
; AVX512F-NEXT: vmovdqa %xmm0, (%rsi)
@@ -86,7 +86,7 @@ define void @trunc_v16i16_to_v16i8(<32 x i8>* %L, <16 x i8>* %S) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: trunc_v16i16_to_v16i8:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpmovsxwd (%rdi), %zmm0
; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0
; AVX512VL-NEXT: vmovdqa %xmm0, (%rsi)
@@ -94,7 +94,7 @@ define void @trunc_v16i16_to_v16i8(<32 x i8>* %L, <16 x i8>* %S) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: trunc_v16i16_to_v16i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: vmovdqa %xmm0, (%rsi)
@@ -102,7 +102,7 @@ define void @trunc_v16i16_to_v16i8(<32 x i8>* %L, <16 x i8>* %S) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: trunc_v16i16_to_v16i8:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BWVL-NEXT: vpmovwb %ymm0, (%rsi)
; AVX512BWVL-NEXT: vzeroupper
@@ -116,7 +116,7 @@ define void @trunc_v16i16_to_v16i8(<32 x i8>* %L, <16 x i8>* %S) nounwind {
define void @shuffle_v16i16_to_v8i16(<16 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX1-LABEL: shuffle_v16i16_to_v8i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
@@ -128,7 +128,7 @@ define void @shuffle_v16i16_to_v8i16(<16 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_to_v8i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
@@ -140,7 +140,7 @@ define void @shuffle_v16i16_to_v8i16(<16 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: shuffle_v16i16_to_v8i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512F-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
@@ -152,7 +152,7 @@ define void @shuffle_v16i16_to_v8i16(<16 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_to_v8i16:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512VL-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
@@ -167,7 +167,7 @@ define void @shuffle_v16i16_to_v8i16(<16 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v16i16_to_v8i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
@@ -179,7 +179,7 @@ define void @shuffle_v16i16_to_v8i16(<16 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v16i16_to_v8i16:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512BWVL-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
@@ -200,7 +200,7 @@ define void @shuffle_v16i16_to_v8i16(<16 x i16>* %L, <8 x i16>* %S) nounwind {
define void @trunc_v8i32_to_v8i16(<16 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX1-LABEL: trunc_v8i32_to_v8i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
@@ -212,7 +212,7 @@ define void @trunc_v8i32_to_v8i16(<16 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_v8i32_to_v8i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
@@ -221,7 +221,7 @@ define void @trunc_v8i32_to_v8i16(<16 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: trunc_v8i32_to_v8i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
; AVX512F-NEXT: vmovdqa %xmm0, (%rsi)
@@ -229,14 +229,14 @@ define void @trunc_v8i32_to_v8i16(<16 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: trunc_v8i32_to_v8i16:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vpmovdw %ymm0, (%rsi)
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: trunc_v8i32_to_v8i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
; AVX512BW-NEXT: vmovdqa %xmm0, (%rsi)
@@ -244,7 +244,7 @@ define void @trunc_v8i32_to_v8i16(<16 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: trunc_v8i32_to_v8i16:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BWVL-NEXT: vpmovdw %ymm0, (%rsi)
; AVX512BWVL-NEXT: vzeroupper
@@ -258,7 +258,7 @@ define void @trunc_v8i32_to_v8i16(<16 x i16>* %L, <8 x i16>* %S) nounwind {
define void @shuffle_v8i32_to_v4i32(<8 x i32>* %L, <4 x i32>* %S) nounwind {
; AVX-LABEL: shuffle_v8i32_to_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps (%rdi), %ymm0
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
@@ -267,7 +267,7 @@ define void @shuffle_v8i32_to_v4i32(<8 x i32>* %L, <4 x i32>* %S) nounwind {
; AVX-NEXT: retq
;
; AVX512-LABEL: shuffle_v8i32_to_v4i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovaps (%rdi), %ymm0
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX512-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
@@ -282,7 +282,7 @@ define void @shuffle_v8i32_to_v4i32(<8 x i32>* %L, <4 x i32>* %S) nounwind {
define void @trunc_v4i64_to_v4i32(<8 x i32>* %L, <4 x i32>* %S) nounwind {
; AVX1-LABEL: trunc_v4i64_to_v4i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovaps (%rdi), %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
@@ -291,7 +291,7 @@ define void @trunc_v4i64_to_v4i32(<8 x i32>* %L, <4 x i32>* %S) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_v4i64_to_v4i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermilps {{.*#+}} ymm0 = mem[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vmovaps %xmm0, (%rsi)
@@ -299,7 +299,7 @@ define void @trunc_v4i64_to_v4i32(<8 x i32>* %L, <4 x i32>* %S) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: trunc_v4i64_to_v4i32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
; AVX512F-NEXT: vmovdqa %xmm0, (%rsi)
@@ -307,14 +307,14 @@ define void @trunc_v4i64_to_v4i32(<8 x i32>* %L, <4 x i32>* %S) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: trunc_v4i64_to_v4i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vpmovqd %ymm0, (%rsi)
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: trunc_v4i64_to_v4i32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
; AVX512BW-NEXT: vmovdqa %xmm0, (%rsi)
@@ -322,7 +322,7 @@ define void @trunc_v4i64_to_v4i32(<8 x i32>* %L, <4 x i32>* %S) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: trunc_v4i64_to_v4i32:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BWVL-NEXT: vpmovqd %ymm0, (%rsi)
; AVX512BWVL-NEXT: vzeroupper
@@ -336,7 +336,7 @@ define void @trunc_v4i64_to_v4i32(<8 x i32>* %L, <4 x i32>* %S) nounwind {
define void @shuffle_v32i8_to_v8i8(<32 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX1-LABEL: shuffle_v32i8_to_v8i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -348,7 +348,7 @@ define void @shuffle_v32i8_to_v8i8(<32 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_to_v8i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -360,7 +360,7 @@ define void @shuffle_v32i8_to_v8i8(<32 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: shuffle_v32i8_to_v8i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512F-NEXT: vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -372,7 +372,7 @@ define void @shuffle_v32i8_to_v8i8(<32 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i8_to_v8i8:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -384,7 +384,7 @@ define void @shuffle_v32i8_to_v8i8(<32 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v32i8_to_v8i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -396,7 +396,7 @@ define void @shuffle_v32i8_to_v8i8(<32 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v32i8_to_v8i8:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512BWVL-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
@@ -417,7 +417,7 @@ define void @shuffle_v32i8_to_v8i8(<32 x i8>* %L, <8 x i8>* %S) nounwind {
define void @trunc_v8i32_to_v8i8(<32 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX1-LABEL: trunc_v8i32_to_v8i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -429,7 +429,7 @@ define void @trunc_v8i32_to_v8i8(<32 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_v8i32_to_v8i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
@@ -439,7 +439,7 @@ define void @trunc_v8i32_to_v8i8(<32 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: trunc_v8i32_to_v8i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
@@ -448,14 +448,14 @@ define void @trunc_v8i32_to_v8i8(<32 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: trunc_v8i32_to_v8i8:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vpmovdb %ymm0, (%rsi)
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: trunc_v8i32_to_v8i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
@@ -464,7 +464,7 @@ define void @trunc_v8i32_to_v8i8(<32 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: trunc_v8i32_to_v8i8:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BWVL-NEXT: vpmovdb %ymm0, (%rsi)
; AVX512BWVL-NEXT: vzeroupper
@@ -478,7 +478,7 @@ define void @trunc_v8i32_to_v8i8(<32 x i8>* %L, <8 x i8>* %S) nounwind {
define void @shuffle_v16i16_to_v4i16(<16 x i16>* %L, <4 x i16>* %S) nounwind {
; AVX1-LABEL: shuffle_v16i16_to_v4i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -491,7 +491,7 @@ define void @shuffle_v16i16_to_v4i16(<16 x i16>* %L, <4 x i16>* %S) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_to_v4i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -504,7 +504,7 @@ define void @shuffle_v16i16_to_v4i16(<16 x i16>* %L, <4 x i16>* %S) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: shuffle_v16i16_to_v4i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512F-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -517,7 +517,7 @@ define void @shuffle_v16i16_to_v4i16(<16 x i16>* %L, <4 x i16>* %S) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_to_v4i16:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovaps (%rdi), %ymm0
; AVX512VL-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX512VL-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
@@ -526,7 +526,7 @@ define void @shuffle_v16i16_to_v4i16(<16 x i16>* %L, <4 x i16>* %S) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v16i16_to_v4i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -539,7 +539,7 @@ define void @shuffle_v16i16_to_v4i16(<16 x i16>* %L, <4 x i16>* %S) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v16i16_to_v4i16:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovaps (%rdi), %ymm0
; AVX512BWVL-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX512BWVL-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
@@ -554,7 +554,7 @@ define void @shuffle_v16i16_to_v4i16(<16 x i16>* %L, <4 x i16>* %S) nounwind {
define void @trunc_v4i64_to_v4i16(<16 x i16>* %L, <4 x i16>* %S) nounwind {
; AVX1-LABEL: trunc_v4i64_to_v4i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovaps (%rdi), %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
@@ -564,7 +564,7 @@ define void @trunc_v4i64_to_v4i16(<16 x i16>* %L, <4 x i16>* %S) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_v4i64_to_v4i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = mem[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
@@ -573,7 +573,7 @@ define void @trunc_v4i64_to_v4i16(<16 x i16>* %L, <4 x i16>* %S) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: trunc_v4i64_to_v4i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
@@ -582,14 +582,14 @@ define void @trunc_v4i64_to_v4i16(<16 x i16>* %L, <4 x i16>* %S) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: trunc_v4i64_to_v4i16:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vpmovqw %ymm0, (%rsi)
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: trunc_v4i64_to_v4i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
@@ -598,7 +598,7 @@ define void @trunc_v4i64_to_v4i16(<16 x i16>* %L, <4 x i16>* %S) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: trunc_v4i64_to_v4i16:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BWVL-NEXT: vpmovqw %ymm0, (%rsi)
; AVX512BWVL-NEXT: vzeroupper
@@ -612,7 +612,7 @@ define void @trunc_v4i64_to_v4i16(<16 x i16>* %L, <4 x i16>* %S) nounwind {
define void @shuffle_v32i8_to_v4i8(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX1-LABEL: shuffle_v32i8_to_v4i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -624,7 +624,7 @@ define void @shuffle_v32i8_to_v4i8(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_to_v4i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -636,7 +636,7 @@ define void @shuffle_v32i8_to_v4i8(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: shuffle_v32i8_to_v4i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512F-NEXT: vmovdqa {{.*#+}} xmm2 = <0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -648,7 +648,7 @@ define void @shuffle_v32i8_to_v4i8(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i8_to_v4i8:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovaps (%rdi), %ymm0
; AVX512VL-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX512VL-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
@@ -657,7 +657,7 @@ define void @shuffle_v32i8_to_v4i8(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v32i8_to_v4i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = <0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -669,7 +669,7 @@ define void @shuffle_v32i8_to_v4i8(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v32i8_to_v4i8:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovaps (%rdi), %ymm0
; AVX512BWVL-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX512BWVL-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
@@ -684,7 +684,7 @@ define void @shuffle_v32i8_to_v4i8(<32 x i8>* %L, <4 x i8>* %S) nounwind {
define void @trunc_v4i64_to_v4i8(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX1-LABEL: trunc_v4i64_to_v4i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovaps (%rdi), %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
@@ -694,7 +694,7 @@ define void @trunc_v4i64_to_v4i8(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_v4i64_to_v4i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = mem[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
@@ -703,7 +703,7 @@ define void @trunc_v4i64_to_v4i8(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: trunc_v4i64_to_v4i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
@@ -712,14 +712,14 @@ define void @trunc_v4i64_to_v4i8(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: trunc_v4i64_to_v4i8:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vpmovqb %ymm0, (%rsi)
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: trunc_v4i64_to_v4i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
@@ -728,7 +728,7 @@ define void @trunc_v4i64_to_v4i8(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: trunc_v4i64_to_v4i8:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BWVL-NEXT: vpmovqb %ymm0, (%rsi)
; AVX512BWVL-NEXT: vzeroupper
@@ -744,7 +744,7 @@ define void @trunc_v4i64_to_v4i8(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; the resulting BUILD_VECTOR should not be combined to a truncate.
define <16 x i8> @negative(<32 x i8> %v, <32 x i8> %w) nounwind {
; AVX1-LABEL: negative:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[u,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u],zero,zero,zero,zero,zero,zero,zero,xmm0[0,2,4,6,8,10,12,14]
@@ -755,7 +755,7 @@ define <16 x i8> @negative(<32 x i8> %v, <32 x i8> %w) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: negative:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14,u,18,20,22,24,26,28,30,16,18,20,22,24,26,28,30]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
@@ -765,7 +765,7 @@ define <16 x i8> @negative(<32 x i8> %v, <32 x i8> %w) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: negative:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14,u,18,20,22,24,26,28,30,16,18,20,22,24,26,28,30]
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512F-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
@@ -775,7 +775,7 @@ define <16 x i8> @negative(<32 x i8> %v, <32 x i8> %w) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: negative:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14,u,18,20,22,24,26,28,30,16,18,20,22,24,26,28,30]
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512VL-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
@@ -785,7 +785,7 @@ define <16 x i8> @negative(<32 x i8> %v, <32 x i8> %w) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: negative:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14,u,18,20,22,24,26,28,30,16,18,20,22,24,26,28,30]
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512BW-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
@@ -795,7 +795,7 @@ define <16 x i8> @negative(<32 x i8> %v, <32 x i8> %w) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: negative:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14,u,18,20,22,24,26,28,30,16,18,20,22,24,26,28,30]
; AVX512BWVL-NEXT: movl $65537, %eax # imm = 0x10001
; AVX512BWVL-NEXT: kmovd %eax, %k1
diff --git a/test/CodeGen/X86/shuffle-vs-trunc-512.ll b/test/CodeGen/X86/shuffle-vs-trunc-512.ll
index 8d62194926b..3fa148405f6 100644
--- a/test/CodeGen/X86/shuffle-vs-trunc-512.ll
+++ b/test/CodeGen/X86/shuffle-vs-trunc-512.ll
@@ -10,7 +10,7 @@
define void @shuffle_v64i8_to_v32i8(<64 x i8>* %L, <32 x i8>* %S) nounwind {
; AVX512F-LABEL: shuffle_v64i8_to_v32i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512F-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30]
@@ -22,7 +22,7 @@ define void @shuffle_v64i8_to_v32i8(<64 x i8>* %L, <32 x i8>* %S) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v64i8_to_v32i8:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512VL-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30]
@@ -34,7 +34,7 @@ define void @shuffle_v64i8_to_v32i8(<64 x i8>* %L, <32 x i8>* %S) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v64i8_to_v32i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BW-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30]
@@ -46,7 +46,7 @@ define void @shuffle_v64i8_to_v32i8(<64 x i8>* %L, <32 x i8>* %S) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v64i8_to_v32i8:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BWVL-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30]
@@ -64,7 +64,7 @@ define void @shuffle_v64i8_to_v32i8(<64 x i8>* %L, <32 x i8>* %S) nounwind {
define void @trunc_v32i16_to_v32i8(<64 x i8>* %L, <32 x i8>* %S) nounwind {
; AVX512F-LABEL: trunc_v32i16_to_v32i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpmovsxwd (%rdi), %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
; AVX512F-NEXT: vpmovsxwd 32(%rdi), %zmm1
@@ -75,7 +75,7 @@ define void @trunc_v32i16_to_v32i8(<64 x i8>* %L, <32 x i8>* %S) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: trunc_v32i16_to_v32i8:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpmovsxwd (%rdi), %zmm0
; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0
; AVX512VL-NEXT: vpmovsxwd 32(%rdi), %zmm1
@@ -86,14 +86,14 @@ define void @trunc_v32i16_to_v32i8(<64 x i8>* %L, <32 x i8>* %S) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: trunc_v32i16_to_v32i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BW-NEXT: vpmovwb %zmm0, (%rsi)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: trunc_v32i16_to_v32i8:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BWVL-NEXT: vpmovwb %zmm0, (%rsi)
; AVX512BWVL-NEXT: vzeroupper
@@ -107,7 +107,7 @@ define void @trunc_v32i16_to_v32i8(<64 x i8>* %L, <32 x i8>* %S) nounwind {
define void @shuffle_v32i16_to_v16i16(<32 x i16>* %L, <16 x i16>* %S) nounwind {
; AVX512F-LABEL: shuffle_v32i16_to_v16i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpshuflw {{.*#+}} ymm0 = mem[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
; AVX512F-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
; AVX512F-NEXT: vpshuflw {{.*#+}} ymm1 = mem[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
@@ -119,7 +119,7 @@ define void @shuffle_v32i16_to_v16i16(<32 x i16>* %L, <16 x i16>* %S) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i16_to_v16i16:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpshuflw {{.*#+}} ymm0 = mem[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
; AVX512VL-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
; AVX512VL-NEXT: vpshuflw {{.*#+}} ymm1 = mem[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
@@ -131,7 +131,7 @@ define void @shuffle_v32i16_to_v16i16(<32 x i16>* %L, <16 x i16>* %S) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v32i16_to_v16i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BW-NEXT: vpshuflw {{.*#+}} ymm1 = ymm1[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
@@ -145,7 +145,7 @@ define void @shuffle_v32i16_to_v16i16(<32 x i16>* %L, <16 x i16>* %S) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v32i16_to_v16i16:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BWVL-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2,4,6,16,18,20,22,8,10,12,14,24,26,28,30]
@@ -162,7 +162,7 @@ define void @shuffle_v32i16_to_v16i16(<32 x i16>* %L, <16 x i16>* %S) nounwind {
define void @trunc_v16i32_to_v16i16(<32 x i16>* %L, <16 x i16>* %S) nounwind {
; AVX512-LABEL: trunc_v16i32_to_v16i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovdqa32 (%rdi), %zmm0
; AVX512-NEXT: vpmovdw %zmm0, (%rsi)
; AVX512-NEXT: vzeroupper
@@ -176,7 +176,7 @@ define void @trunc_v16i32_to_v16i16(<32 x i16>* %L, <16 x i16>* %S) nounwind {
define void @shuffle_v16i32_to_v8i32(<16 x i32>* %L, <8 x i32>* %S) nounwind {
; AVX512-LABEL: shuffle_v16i32_to_v8i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovaps (%rdi), %zmm0
; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm1
; AVX512-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6]
@@ -192,7 +192,7 @@ define void @shuffle_v16i32_to_v8i32(<16 x i32>* %L, <8 x i32>* %S) nounwind {
define void @trunc_v8i64_to_v8i32(<16 x i32>* %L, <8 x i32>* %S) nounwind {
; AVX512-LABEL: trunc_v8i64_to_v8i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512-NEXT: vpmovqd %zmm0, (%rsi)
; AVX512-NEXT: vzeroupper
@@ -206,7 +206,7 @@ define void @trunc_v8i64_to_v8i32(<16 x i32>* %L, <8 x i32>* %S) nounwind {
define void @shuffle_v64i8_to_v16i8(<64 x i8>* %L, <16 x i8>* %S) nounwind {
; AVX512F-LABEL: shuffle_v64i8_to_v16i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -225,7 +225,7 @@ define void @shuffle_v64i8_to_v16i8(<64 x i8>* %L, <16 x i8>* %S) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v64i8_to_v16i8:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -244,7 +244,7 @@ define void @shuffle_v64i8_to_v16i8(<64 x i8>* %L, <16 x i8>* %S) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v64i8_to_v16i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -263,7 +263,7 @@ define void @shuffle_v64i8_to_v16i8(<64 x i8>* %L, <16 x i8>* %S) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v64i8_to_v16i8:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BWVL-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -288,7 +288,7 @@ define void @shuffle_v64i8_to_v16i8(<64 x i8>* %L, <16 x i8>* %S) nounwind {
define void @trunc_v16i32_to_v16i8(<64 x i8>* %L, <16 x i8>* %S) nounwind {
; AVX512-LABEL: trunc_v16i32_to_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovdqa32 (%rdi), %zmm0
; AVX512-NEXT: vpmovdb %zmm0, (%rsi)
; AVX512-NEXT: vzeroupper
@@ -302,7 +302,7 @@ define void @trunc_v16i32_to_v16i8(<64 x i8>* %L, <16 x i8>* %S) nounwind {
define void @shuffle_v32i16_to_v8i16(<32 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX512F-LABEL: shuffle_v32i16_to_v8i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -323,7 +323,7 @@ define void @shuffle_v32i16_to_v8i16(<32 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i16_to_v8i16:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -344,7 +344,7 @@ define void @shuffle_v32i16_to_v8i16(<32 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v32i16_to_v8i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -365,7 +365,7 @@ define void @shuffle_v32i16_to_v8i16(<32 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v32i16_to_v8i16:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BWVL-NEXT: vmovdqa {{.*#+}} ymm2 = <0,4,8,12,16,20,24,28,u,u,u,u,u,u,u,u>
@@ -381,7 +381,7 @@ define void @shuffle_v32i16_to_v8i16(<32 x i16>* %L, <8 x i16>* %S) nounwind {
define void @trunc_v8i64_to_v8i16(<32 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX512-LABEL: trunc_v8i64_to_v8i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512-NEXT: vpmovqw %zmm0, (%rsi)
; AVX512-NEXT: vzeroupper
@@ -395,7 +395,7 @@ define void @trunc_v8i64_to_v8i16(<32 x i16>* %L, <8 x i16>* %S) nounwind {
define void @shuffle_v64i8_to_v8i8(<64 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512F-LABEL: shuffle_v64i8_to_v8i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -414,7 +414,7 @@ define void @shuffle_v64i8_to_v8i8(<64 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v64i8_to_v8i8:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -433,7 +433,7 @@ define void @shuffle_v64i8_to_v8i8(<64 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v64i8_to_v8i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -452,7 +452,7 @@ define void @shuffle_v64i8_to_v8i8(<64 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v64i8_to_v8i8:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BWVL-NEXT: vmovdqa {{.*#+}} ymm2 = <0,4,8,12,16,20,24,28,u,u,u,u,u,u,u,u>
@@ -468,7 +468,7 @@ define void @shuffle_v64i8_to_v8i8(<64 x i8>* %L, <8 x i8>* %S) nounwind {
define void @trunc_v8i64_to_v8i8(<64 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512-LABEL: trunc_v8i64_to_v8i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512-NEXT: vpmovqb %zmm0, (%rsi)
; AVX512-NEXT: vzeroupper
@@ -482,7 +482,7 @@ define void @trunc_v8i64_to_v8i8(<64 x i8>* %L, <8 x i8>* %S) nounwind {
define <16 x i8> @trunc_shuffle_v64i8_01_05_09_13_17_21_25_29_33_37_41_45_49_53_57_61(<64 x i8> %x) {
; AVX512F-LABEL: trunc_shuffle_v64i8_01_05_09_13_17_21_25_29_33_37_41_45_49_53_57_61:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX512F-NEXT: vmovdqa {{.*#+}} xmm3 = <u,u,u,u,1,5,9,13,u,u,u,u,u,u,u,u>
; AVX512F-NEXT: vpshufb %xmm3, %xmm2, %xmm2
@@ -498,7 +498,7 @@ define <16 x i8> @trunc_shuffle_v64i8_01_05_09_13_17_21_25_29_33_37_41_45_49_53_
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: trunc_shuffle_v64i8_01_05_09_13_17_21_25_29_33_37_41_45_49_53_57_61:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm3 = <u,u,u,u,1,5,9,13,u,u,u,u,u,u,u,u>
; AVX512VL-NEXT: vpshufb %xmm3, %xmm2, %xmm2
@@ -514,7 +514,7 @@ define <16 x i8> @trunc_shuffle_v64i8_01_05_09_13_17_21_25_29_33_37_41_45_49_53_
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: trunc_shuffle_v64i8_01_05_09_13_17_21_25_29_33_37_41_45_49_53_57_61:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm3 = <u,u,u,u,1,5,9,13,u,u,u,u,u,u,u,u>
@@ -531,7 +531,7 @@ define <16 x i8> @trunc_shuffle_v64i8_01_05_09_13_17_21_25_29_33_37_41_45_49_53_
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: trunc_shuffle_v64i8_01_05_09_13_17_21_25_29_33_37_41_45_49_53_57_61:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BWVL-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX512BWVL-NEXT: vmovdqa {{.*#+}} xmm3 = <u,u,u,u,1,5,9,13,u,u,u,u,u,u,u,u>
@@ -552,7 +552,7 @@ define <16 x i8> @trunc_shuffle_v64i8_01_05_09_13_17_21_25_29_33_37_41_45_49_53_
define <16 x i8> @trunc_shuffle_v64i8_01_05_09_13_17_21_25_29_33_37_41_45_49_53_57_62(<64 x i8> %x) {
; AVX512F-LABEL: trunc_shuffle_v64i8_01_05_09_13_17_21_25_29_33_37_41_45_49_53_57_62:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX512F-NEXT: vmovdqa {{.*#+}} xmm3 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u>
; AVX512F-NEXT: vpshufb %xmm3, %xmm2, %xmm2
@@ -567,7 +567,7 @@ define <16 x i8> @trunc_shuffle_v64i8_01_05_09_13_17_21_25_29_33_37_41_45_49_53_
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: trunc_shuffle_v64i8_01_05_09_13_17_21_25_29_33_37_41_45_49_53_57_62:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm3 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u>
; AVX512VL-NEXT: vpshufb %xmm3, %xmm2, %xmm2
@@ -582,7 +582,7 @@ define <16 x i8> @trunc_shuffle_v64i8_01_05_09_13_17_21_25_29_33_37_41_45_49_53_
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: trunc_shuffle_v64i8_01_05_09_13_17_21_25_29_33_37_41_45_49_53_57_62:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u>
; AVX512BW-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -598,7 +598,7 @@ define <16 x i8> @trunc_shuffle_v64i8_01_05_09_13_17_21_25_29_33_37_41_45_49_53_
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: trunc_shuffle_v64i8_01_05_09_13_17_21_25_29_33_37_41_45_49_53_57_62:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512BWVL-NEXT: vmovdqa {{.*#+}} xmm2 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u>
; AVX512BWVL-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -618,7 +618,7 @@ define <16 x i8> @trunc_shuffle_v64i8_01_05_09_13_17_21_25_29_33_37_41_45_49_53_
define <4 x double> @PR34175(<32 x i16>* %p) {
; AVX512F-LABEL: PR34175:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqu (%rdi), %ymm0
; AVX512F-NEXT: vmovdqu 32(%rdi), %ymm1
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm2
@@ -632,7 +632,7 @@ define <4 x double> @PR34175(<32 x i16>* %p) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: PR34175:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqu (%rdi), %ymm0
; AVX512VL-NEXT: vmovdqu 32(%rdi), %ymm1
; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm2
@@ -646,7 +646,7 @@ define <4 x double> @PR34175(<32 x i16>* %p) {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: PR34175:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqu64 (%rdi), %zmm0
; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512BW-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@@ -660,7 +660,7 @@ define <4 x double> @PR34175(<32 x i16>* %p) {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: PR34175:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vmovdqu64 (%rdi), %zmm0
; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BWVL-NEXT: vmovdqa {{.*#+}} ymm2 = <0,8,16,24,u,u,u,u,u,u,u,u,u,u,u,u>
diff --git a/test/CodeGen/X86/sincos.ll b/test/CodeGen/X86/sincos.ll
index 63e7b0d11a3..c6c995f1a56 100644
--- a/test/CodeGen/X86/sincos.ll
+++ b/test/CodeGen/X86/sincos.ll
@@ -11,7 +11,7 @@ declare x86_fp80 @sinl(x86_fp80) readonly
define float @test1(float %X) {
; CHECK-LABEL: test1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: subl $12, %esp
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: flds {{[0-9]+}}(%esp)
@@ -25,7 +25,7 @@ define float @test1(float %X) {
define double @test2(double %X) {
; CHECK-LABEL: test2:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: subl $12, %esp
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: fldl {{[0-9]+}}(%esp)
@@ -39,7 +39,7 @@ define double @test2(double %X) {
define x86_fp80 @test3(x86_fp80 %X) {
; CHECK-LABEL: test3:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: subl $28, %esp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: fldt {{[0-9]+}}(%esp)
@@ -60,7 +60,7 @@ declare x86_fp80 @cosl(x86_fp80) readonly
define float @test4(float %X) {
; CHECK-LABEL: test4:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: subl $12, %esp
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: flds {{[0-9]+}}(%esp)
@@ -74,7 +74,7 @@ define float @test4(float %X) {
define double @test5(double %X) {
; CHECK-LABEL: test5:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: subl $12, %esp
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: fldl {{[0-9]+}}(%esp)
@@ -88,7 +88,7 @@ define double @test5(double %X) {
define x86_fp80 @test6(x86_fp80 %X) {
; CHECK-LABEL: test6:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: subl $28, %esp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: fldt {{[0-9]+}}(%esp)
diff --git a/test/CodeGen/X86/sink-blockfreq.ll b/test/CodeGen/X86/sink-blockfreq.ll
index d0b8972cee5..cad9cf81905 100644
--- a/test/CodeGen/X86/sink-blockfreq.ll
+++ b/test/CodeGen/X86/sink-blockfreq.ll
@@ -9,7 +9,7 @@
define i32 @sink_freqinfo(i32 %a, i32 %b) nounwind uwtable ssp {
; MSINK_BFI-LABEL: sink_freqinfo
; MSINK_BFI: jl
-; MSINK_BFI-NEXT: ## BB#
+; MSINK_BFI-NEXT: ## %bb.
; MSINK_BFI-NEXT: imull
; MSINK_NOBFI-LABEL: sink_freqinfo
diff --git a/test/CodeGen/X86/sink-out-of-loop.ll b/test/CodeGen/X86/sink-out-of-loop.ll
index 4bf829a0273..e7b721d36a0 100644
--- a/test/CodeGen/X86/sink-out-of-loop.ll
+++ b/test/CodeGen/X86/sink-out-of-loop.ll
@@ -68,7 +68,7 @@ loop:
br i1 %exit_cond, label %exit, label %loop
exit:
-; CHECK: BB#2
+; CHECK: %bb.2
; CHECK: imull %eax, %eax
; CHECK: retq
ret i32 %j
diff --git a/test/CodeGen/X86/slow-incdec.ll b/test/CodeGen/X86/slow-incdec.ll
index 5c406c77aa8..5e466f99a38 100644
--- a/test/CodeGen/X86/slow-incdec.ll
+++ b/test/CodeGen/X86/slow-incdec.ll
@@ -4,13 +4,13 @@
define i32 @inc(i32 %x) {
; INCDEC-LABEL: inc:
-; INCDEC: # BB#0:
+; INCDEC: # %bb.0:
; INCDEC-NEXT: movl {{[0-9]+}}(%esp), %eax
; INCDEC-NEXT: incl %eax
; INCDEC-NEXT: retl
;
; ADD-LABEL: inc:
-; ADD: # BB#0:
+; ADD: # %bb.0:
; ADD-NEXT: movl {{[0-9]+}}(%esp), %eax
; ADD-NEXT: addl $1, %eax
; ADD-NEXT: retl
@@ -20,13 +20,13 @@ define i32 @inc(i32 %x) {
define i32 @dec(i32 %x) {
; INCDEC-LABEL: dec:
-; INCDEC: # BB#0:
+; INCDEC: # %bb.0:
; INCDEC-NEXT: movl {{[0-9]+}}(%esp), %eax
; INCDEC-NEXT: decl %eax
; INCDEC-NEXT: retl
;
; ADD-LABEL: dec:
-; ADD: # BB#0:
+; ADD: # %bb.0:
; ADD-NEXT: movl {{[0-9]+}}(%esp), %eax
; ADD-NEXT: addl $-1, %eax
; ADD-NEXT: retl
@@ -36,7 +36,7 @@ define i32 @dec(i32 %x) {
define i32 @inc_size(i32 %x) optsize {
; CHECK-LABEL: inc_size:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: incl %eax
; CHECK-NEXT: retl
@@ -46,7 +46,7 @@ define i32 @inc_size(i32 %x) optsize {
define i32 @dec_size(i32 %x) optsize {
; CHECK-LABEL: dec_size:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: decl %eax
; CHECK-NEXT: retl
diff --git a/test/CodeGen/X86/slow-pmulld.ll b/test/CodeGen/X86/slow-pmulld.ll
index 1de19d2334d..4d73b11349f 100644
--- a/test/CodeGen/X86/slow-pmulld.ll
+++ b/test/CodeGen/X86/slow-pmulld.ll
@@ -9,7 +9,7 @@
define <4 x i32> @foo(<4 x i8> %A) {
; CHECK32-LABEL: foo:
-; CHECK32: # BB#0:
+; CHECK32: # %bb.0:
; CHECK32-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,xmm0[4],zero,xmm0[8],zero,xmm0[12],zero,xmm0[u,u,u,u,u,u,u,u]
; CHECK32-NEXT: movdqa {{.*#+}} xmm1 = <18778,18778,18778,18778,u,u,u,u>
; CHECK32-NEXT: movdqa %xmm0, %xmm2
@@ -19,7 +19,7 @@ define <4 x i32> @foo(<4 x i8> %A) {
; CHECK32-NEXT: retl
;
; CHECK64-LABEL: foo:
-; CHECK64: # BB#0:
+; CHECK64: # %bb.0:
; CHECK64-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,xmm0[4],zero,xmm0[8],zero,xmm0[12],zero,xmm0[u,u,u,u,u,u,u,u]
; CHECK64-NEXT: movdqa {{.*#+}} xmm1 = <18778,18778,18778,18778,u,u,u,u>
; CHECK64-NEXT: movdqa %xmm0, %xmm2
@@ -29,13 +29,13 @@ define <4 x i32> @foo(<4 x i8> %A) {
; CHECK64-NEXT: retq
;
; SSE4-32-LABEL: foo:
-; SSE4-32: # BB#0:
+; SSE4-32: # %bb.0:
; SSE4-32-NEXT: pand {{\.LCPI.*}}, %xmm0
; SSE4-32-NEXT: pmulld {{\.LCPI.*}}, %xmm0
; SSE4-32-NEXT: retl
;
; SSE4-64-LABEL: foo:
-; SSE4-64: # BB#0:
+; SSE4-64: # %bb.0:
; SSE4-64-NEXT: pand {{.*}}(%rip), %xmm0
; SSE4-64-NEXT: pmulld {{.*}}(%rip), %xmm0
; SSE4-64-NEXT: retq
@@ -46,25 +46,25 @@ define <4 x i32> @foo(<4 x i8> %A) {
define <4 x i32> @foo_os(<4 x i8> %A) minsize {
; CHECK32-LABEL: foo_os:
-; CHECK32: # BB#0:
+; CHECK32: # %bb.0:
; CHECK32-NEXT: pand {{\.LCPI.*}}, %xmm0
; CHECK32-NEXT: pmulld {{\.LCPI.*}}, %xmm0
; CHECK32-NEXT: retl
;
; CHECK64-LABEL: foo_os:
-; CHECK64: # BB#0:
+; CHECK64: # %bb.0:
; CHECK64-NEXT: pand {{.*}}(%rip), %xmm0
; CHECK64-NEXT: pmulld {{.*}}(%rip), %xmm0
; CHECK64-NEXT: retq
;
; SSE4-32-LABEL: foo_os:
-; SSE4-32: # BB#0:
+; SSE4-32: # %bb.0:
; SSE4-32-NEXT: pand {{\.LCPI.*}}, %xmm0
; SSE4-32-NEXT: pmulld {{\.LCPI.*}}, %xmm0
; SSE4-32-NEXT: retl
;
; SSE4-64-LABEL: foo_os:
-; SSE4-64: # BB#0:
+; SSE4-64: # %bb.0:
; SSE4-64-NEXT: pand {{.*}}(%rip), %xmm0
; SSE4-64-NEXT: pmulld {{.*}}(%rip), %xmm0
; SSE4-64-NEXT: retq
diff --git a/test/CodeGen/X86/slow-unaligned-mem.ll b/test/CodeGen/X86/slow-unaligned-mem.ll
index 8251eb324a7..a3a21892339 100644
--- a/test/CodeGen/X86/slow-unaligned-mem.ll
+++ b/test/CodeGen/X86/slow-unaligned-mem.ll
@@ -64,7 +64,7 @@
define void @store_zeros(i8* %a) {
; SLOW-NOT: not a recognized processor
; SLOW-LABEL: store_zeros:
-; SLOW: # BB#0:
+; SLOW: # %bb.0:
; SLOW-NEXT: movl
; SLOW-NEXT: movl
; SLOW-NEXT: movl
@@ -85,7 +85,7 @@ define void @store_zeros(i8* %a) {
;
; FAST-NOT: not a recognized processor
; FAST-LABEL: store_zeros:
-; FAST: # BB#0:
+; FAST: # %bb.0:
; FAST-NEXT: movl {{[0-9]+}}(%esp), %eax
; FAST-NOT: movl
call void @llvm.memset.p0i8.i64(i8* %a, i8 0, i64 64, i32 1, i1 false)
diff --git a/test/CodeGen/X86/soft-fp-legal-in-HW-reg.ll b/test/CodeGen/X86/soft-fp-legal-in-HW-reg.ll
index 0461ee809ef..ae516c3bf93 100644
--- a/test/CodeGen/X86/soft-fp-legal-in-HW-reg.ll
+++ b/test/CodeGen/X86/soft-fp-legal-in-HW-reg.ll
@@ -17,7 +17,7 @@ define fp128 @TestSelect(fp128 %a, fp128 %b) {
; CHECK-NEXT callq __subtf3
; CHECK-NEXT testl %ebx, %ebx
; CHECK-NEXT jg .LBB0_2
-; CHECK-NEXT # BB#1:
+; CHECK-NEXT # %bb.1:
; CHECK-NEXT movaps .LCPI0_0(%rip), %xmm0
; CHECK-NEXT .LBB0_2:
; CHECK-NEXT addq $32, %rsp
diff --git a/test/CodeGen/X86/splat-for-size.ll b/test/CodeGen/X86/splat-for-size.ll
index a43e7b76732..5a98a00338b 100644
--- a/test/CodeGen/X86/splat-for-size.ll
+++ b/test/CodeGen/X86/splat-for-size.ll
@@ -8,7 +8,7 @@
; There is no AVX broadcast from double to 128-bit vector because movddup has been around since SSE3 (grrr).
define <2 x double> @splat_v2f64(<2 x double> %x) #0 {
; CHECK-LABEL: splat_v2f64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
; CHECK-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
@@ -18,7 +18,7 @@ define <2 x double> @splat_v2f64(<2 x double> %x) #0 {
define <4 x double> @splat_v4f64(<4 x double> %x) #1 {
; CHECK-LABEL: splat_v4f64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastsd {{.*}}(%rip), %ymm1
; CHECK-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
@@ -28,7 +28,7 @@ define <4 x double> @splat_v4f64(<4 x double> %x) #1 {
define <4 x float> @splat_v4f32(<4 x float> %x) #0 {
; CHECK-LABEL: splat_v4f32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastss {{.*}}(%rip), %xmm1
; CHECK-NEXT: vaddps %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
@@ -38,7 +38,7 @@ define <4 x float> @splat_v4f32(<4 x float> %x) #0 {
define <8 x float> @splat_v8f32(<8 x float> %x) #1 {
; CHECK-LABEL: splat_v8f32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastss {{.*}}(%rip), %ymm1
; CHECK-NEXT: vaddps %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
@@ -50,13 +50,13 @@ define <8 x float> @splat_v8f32(<8 x float> %x) #1 {
; We also generate vmovddup for AVX2 because it's one byte smaller than vpbroadcastq.
define <2 x i64> @splat_v2i64(<2 x i64> %x) #1 {
; AVX-LABEL: splat_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
; AVX-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX2-LABEL: splat_v2i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %xmm1
; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
@@ -68,7 +68,7 @@ define <2 x i64> @splat_v2i64(<2 x i64> %x) #1 {
; and then we fake it: use vmovddup to splat 64-bit value.
define <4 x i64> @splat_v4i64(<4 x i64> %x) #0 {
; AVX-LABEL: splat_v4i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-NEXT: vmovddup {{.*#+}} xmm2 = mem[0,0]
; AVX-NEXT: vpaddq %xmm2, %xmm1, %xmm1
@@ -77,7 +77,7 @@ define <4 x i64> @splat_v4i64(<4 x i64> %x) #0 {
; AVX-NEXT: retq
;
; AVX2-LABEL: splat_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm1
; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
@@ -88,13 +88,13 @@ define <4 x i64> @splat_v4i64(<4 x i64> %x) #0 {
; AVX can't do integer splats, so fake it: use vbroadcastss to splat 32-bit value.
define <4 x i32> @splat_v4i32(<4 x i32> %x) #1 {
; AVX-LABEL: splat_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vbroadcastss {{.*}}(%rip), %xmm1
; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX2-LABEL: splat_v4i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
@@ -105,7 +105,7 @@ define <4 x i32> @splat_v4i32(<4 x i32> %x) #1 {
; AVX can't do integer splats, so fake it: use vbroadcastss to splat 32-bit value.
define <8 x i32> @splat_v8i32(<8 x i32> %x) #0 {
; AVX-LABEL: splat_v8i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-NEXT: vbroadcastss {{.*}}(%rip), %xmm2
; AVX-NEXT: vpaddd %xmm2, %xmm1, %xmm1
@@ -114,7 +114,7 @@ define <8 x i32> @splat_v8i32(<8 x i32> %x) #0 {
; AVX-NEXT: retq
;
; AVX2-LABEL: splat_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1
; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
@@ -125,12 +125,12 @@ define <8 x i32> @splat_v8i32(<8 x i32> %x) #0 {
; AVX can't do integer splats, and there's no broadcast fakery for 16-bit. Could use pshuflw, etc?
define <8 x i16> @splat_v8i16(<8 x i16> %x) #1 {
; AVX-LABEL: splat_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpaddw {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX2-LABEL: splat_v8i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastw {{.*}}(%rip), %xmm1
; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
@@ -141,7 +141,7 @@ define <8 x i16> @splat_v8i16(<8 x i16> %x) #1 {
; AVX can't do integer splats, and there's no broadcast fakery for 16-bit. Could use pshuflw, etc?
define <16 x i16> @splat_v16i16(<16 x i16> %x) #0 {
; AVX-LABEL: splat_v16i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [2,2,2,2,2,2,2,2]
; AVX-NEXT: vpaddw %xmm2, %xmm1, %xmm1
@@ -150,7 +150,7 @@ define <16 x i16> @splat_v16i16(<16 x i16> %x) #0 {
; AVX-NEXT: retq
;
; AVX2-LABEL: splat_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastw {{.*}}(%rip), %ymm1
; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
@@ -161,12 +161,12 @@ define <16 x i16> @splat_v16i16(<16 x i16> %x) #0 {
; AVX can't do integer splats, and there's no broadcast fakery for 8-bit. Could use pshufb, etc?
define <16 x i8> @splat_v16i8(<16 x i8> %x) #1 {
; AVX-LABEL: splat_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpaddb {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX2-LABEL: splat_v16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastb {{.*}}(%rip), %xmm1
; AVX2-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
@@ -177,7 +177,7 @@ define <16 x i8> @splat_v16i8(<16 x i8> %x) #1 {
; AVX can't do integer splats, and there's no broadcast fakery for 8-bit. Could use pshufb, etc?
define <32 x i8> @splat_v32i8(<32 x i8> %x) #0 {
; AVX-LABEL: splat_v32i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
; AVX-NEXT: vpaddb %xmm2, %xmm1, %xmm1
@@ -186,7 +186,7 @@ define <32 x i8> @splat_v32i8(<32 x i8> %x) #0 {
; AVX-NEXT: retq
;
; AVX2-LABEL: splat_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastb {{.*}}(%rip), %ymm1
; AVX2-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
diff --git a/test/CodeGen/X86/split-extend-vector-inreg.ll b/test/CodeGen/X86/split-extend-vector-inreg.ll
index 973395d76c8..b477b29ac54 100644
--- a/test/CodeGen/X86/split-extend-vector-inreg.ll
+++ b/test/CodeGen/X86/split-extend-vector-inreg.ll
@@ -4,7 +4,7 @@
define <4 x i64> @autogen_SD88863() {
; X32-LABEL: autogen_SD88863:
-; X32: # BB#0: # %BB
+; X32: # %bb.0: # %BB
; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,0,1]
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: vxorpd %xmm1, %xmm1, %xmm1
@@ -15,11 +15,11 @@ define <4 x i64> @autogen_SD88863() {
; X32-NEXT: # =>This Inner Loop Header: Depth=1
; X32-NEXT: testb %al, %al
; X32-NEXT: jne .LBB0_1
-; X32-NEXT: # BB#2: # %CF240
+; X32-NEXT: # %bb.2: # %CF240
; X32-NEXT: retl
;
; X64-LABEL: autogen_SD88863:
-; X64: # BB#0: # %BB
+; X64: # %bb.0: # %BB
; X64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,0,1]
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: vxorpd %xmm1, %xmm1, %xmm1
@@ -30,7 +30,7 @@ define <4 x i64> @autogen_SD88863() {
; X64-NEXT: # =>This Inner Loop Header: Depth=1
; X64-NEXT: testb %al, %al
; X64-NEXT: jne .LBB0_1
-; X64-NEXT: # BB#2: # %CF240
+; X64-NEXT: # %bb.2: # %CF240
; X64-NEXT: retq
BB:
%I26 = insertelement <4 x i64> undef, i64 undef, i32 2
diff --git a/test/CodeGen/X86/split-store.ll b/test/CodeGen/X86/split-store.ll
index 04dafae94ba..64238901d10 100644
--- a/test/CodeGen/X86/split-store.ll
+++ b/test/CodeGen/X86/split-store.ll
@@ -3,7 +3,7 @@
define void @int32_float_pair(i32 %tmp1, float %tmp2, i64* %ref.tmp) {
; CHECK-LABEL: int32_float_pair:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %edi, (%rsi)
; CHECK-NEXT: movss %xmm0, 4(%rsi)
; CHECK-NEXT: retq
@@ -18,7 +18,7 @@ define void @int32_float_pair(i32 %tmp1, float %tmp2, i64* %ref.tmp) {
define void @float_int32_pair(float %tmp1, i32 %tmp2, i64* %ref.tmp) {
; CHECK-LABEL: float_int32_pair:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movss %xmm0, (%rsi)
; CHECK-NEXT: movl %edi, 4(%rsi)
; CHECK-NEXT: retq
@@ -33,7 +33,7 @@ define void @float_int32_pair(float %tmp1, i32 %tmp2, i64* %ref.tmp) {
define void @int16_float_pair(i16 signext %tmp1, float %tmp2, i64* %ref.tmp) {
; CHECK-LABEL: int16_float_pair:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movzwl %di, %eax
; CHECK-NEXT: movl %eax, (%rsi)
; CHECK-NEXT: movss %xmm0, 4(%rsi)
@@ -49,7 +49,7 @@ define void @int16_float_pair(i16 signext %tmp1, float %tmp2, i64* %ref.tmp) {
define void @int8_float_pair(i8 signext %tmp1, float %tmp2, i64* %ref.tmp) {
; CHECK-LABEL: int8_float_pair:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: movl %eax, (%rsi)
; CHECK-NEXT: movss %xmm0, 4(%rsi)
@@ -65,7 +65,7 @@ define void @int8_float_pair(i8 signext %tmp1, float %tmp2, i64* %ref.tmp) {
define void @int32_int32_pair(i32 %tmp1, i32 %tmp2, i64* %ref.tmp) {
; CHECK-LABEL: int32_int32_pair:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %edi, (%rdx)
; CHECK-NEXT: movl %esi, 4(%rdx)
; CHECK-NEXT: retq
@@ -79,7 +79,7 @@ define void @int32_int32_pair(i32 %tmp1, i32 %tmp2, i64* %ref.tmp) {
define void @int16_int16_pair(i16 signext %tmp1, i16 signext %tmp2, i32* %ref.tmp) {
; CHECK-LABEL: int16_int16_pair:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movw %di, (%rdx)
; CHECK-NEXT: movw %si, 2(%rdx)
; CHECK-NEXT: retq
@@ -93,7 +93,7 @@ define void @int16_int16_pair(i16 signext %tmp1, i16 signext %tmp2, i32* %ref.tm
define void @int8_int8_pair(i8 signext %tmp1, i8 signext %tmp2, i16* %ref.tmp) {
; CHECK-LABEL: int8_int8_pair:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movb %dil, (%rdx)
; CHECK-NEXT: movb %sil, 1(%rdx)
; CHECK-NEXT: retq
@@ -107,7 +107,7 @@ define void @int8_int8_pair(i8 signext %tmp1, i8 signext %tmp2, i16* %ref.tmp) {
define void @int31_int31_pair(i31 %tmp1, i31 %tmp2, i64* %ref.tmp) {
; CHECK-LABEL: int31_int31_pair:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: andl $2147483647, %edi # imm = 0x7FFFFFFF
; CHECK-NEXT: movl %edi, (%rdx)
; CHECK-NEXT: andl $2147483647, %esi # imm = 0x7FFFFFFF
@@ -123,7 +123,7 @@ define void @int31_int31_pair(i31 %tmp1, i31 %tmp2, i64* %ref.tmp) {
define void @int31_int17_pair(i31 %tmp1, i17 %tmp2, i64* %ref.tmp) {
; CHECK-LABEL: int31_int17_pair:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: andl $2147483647, %edi # imm = 0x7FFFFFFF
; CHECK-NEXT: movl %edi, (%rdx)
; CHECK-NEXT: andl $131071, %esi # imm = 0x1FFFF
@@ -139,7 +139,7 @@ define void @int31_int17_pair(i31 %tmp1, i17 %tmp2, i64* %ref.tmp) {
define void @int7_int3_pair(i7 signext %tmp1, i3 signext %tmp2, i16* %ref.tmp) {
; CHECK-LABEL: int7_int3_pair:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: andb $127, %dil
; CHECK-NEXT: movb %dil, (%rdx)
; CHECK-NEXT: andb $7, %sil
@@ -155,7 +155,7 @@ define void @int7_int3_pair(i7 signext %tmp1, i3 signext %tmp2, i16* %ref.tmp) {
define void @int24_int24_pair(i24 signext %tmp1, i24 signext %tmp2, i48* %ref.tmp) {
; CHECK-LABEL: int24_int24_pair:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movw %di, (%rdx)
; CHECK-NEXT: shrl $16, %edi
; CHECK-NEXT: movb %dil, 2(%rdx)
@@ -175,7 +175,7 @@ define void @int24_int24_pair(i24 signext %tmp1, i24 signext %tmp2, i48* %ref.tm
define void @int12_int12_pair(i12 signext %tmp1, i12 signext %tmp2, i24* %ref.tmp) {
; CHECK-LABEL: int12_int12_pair:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %esi, %eax
; CHECK-NEXT: shll $12, %eax
; CHECK-NEXT: andl $4095, %edi # imm = 0xFFF
@@ -196,7 +196,7 @@ define void @int12_int12_pair(i12 signext %tmp1, i12 signext %tmp2, i24* %ref.tm
define void @int7_int7_pair(i7 signext %tmp1, i7 signext %tmp2, i14* %ref.tmp) {
; CHECK-LABEL: int7_int7_pair:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: shll $7, %esi
; CHECK-NEXT: andl $127, %edi
; CHECK-NEXT: orl %esi, %edi
@@ -215,7 +215,7 @@ define void @int7_int7_pair(i7 signext %tmp1, i7 signext %tmp2, i14* %ref.tmp) {
define void @int1_int1_pair(i1 signext %tmp1, i1 signext %tmp2, i2* %ref.tmp) {
; CHECK-LABEL: int1_int1_pair:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: addb %sil, %sil
; CHECK-NEXT: andb $1, %dil
; CHECK-NEXT: orb %sil, %dil
@@ -232,7 +232,7 @@ define void @int1_int1_pair(i1 signext %tmp1, i1 signext %tmp2, i2* %ref.tmp) {
define void @mbb_int32_float_pair(i32 %tmp1, float %tmp2, i64* %ref.tmp) {
; CHECK-LABEL: mbb_int32_float_pair:
-; CHECK: # BB#0: # %next
+; CHECK: # %bb.0: # %next
; CHECK-NEXT: movl %edi, (%rsi)
; CHECK-NEXT: movss %xmm0, 4(%rsi)
; CHECK-NEXT: retq
@@ -250,12 +250,12 @@ next:
define void @mbb_int32_float_multi_stores(i32 %tmp1, float %tmp2, i64* %ref.tmp, i64* %ref.tmp1, i1 %cmp) {
; CHECK-LABEL: mbb_int32_float_multi_stores:
-; CHECK: # BB#0: # %bb1
+; CHECK: # %bb.0: # %bb1
; CHECK-NEXT: movl %edi, (%rsi)
; CHECK-NEXT: movss %xmm0, 4(%rsi)
; CHECK-NEXT: testb $1, %cl
; CHECK-NEXT: je .LBB15_2
-; CHECK-NEXT: # BB#1: # %bb2
+; CHECK-NEXT: # %bb.1: # %bb2
; CHECK-NEXT: movl %edi, (%rdx)
; CHECK-NEXT: movss %xmm0, 4(%rdx)
; CHECK-NEXT: .LBB15_2: # %exitbb
diff --git a/test/CodeGen/X86/sqrt-fastmath-tune.ll b/test/CodeGen/X86/sqrt-fastmath-tune.ll
index afa01b674a6..65befee085c 100644
--- a/test/CodeGen/X86/sqrt-fastmath-tune.ll
+++ b/test/CodeGen/X86/sqrt-fastmath-tune.ll
@@ -12,12 +12,12 @@ declare <8 x float> @llvm.sqrt.v8f32(<8 x float>) #0
define float @foo_x1(float %f) #0 {
; SCALAR-EST-LABEL: foo_x1:
-; SCALAR-EST: # BB#0:
+; SCALAR-EST: # %bb.0:
; SCALAR-EST-NEXT: rsqrtss %xmm0
; SCALAR-EST: retq
;
; SCALAR-ACC-LABEL: foo_x1:
-; SCALAR-ACC: # BB#0:
+; SCALAR-ACC: # %bb.0:
; SCALAR-ACC-NEXT: {{^ *v?sqrtss %xmm0}}
; SCALAR-ACC-NEXT: retq
%call = tail call float @llvm.sqrt.f32(float %f) #1
@@ -26,12 +26,12 @@ define float @foo_x1(float %f) #0 {
define <4 x float> @foo_x4(<4 x float> %f) #0 {
; VECTOR-EST-LABEL: foo_x4:
-; VECTOR-EST: # BB#0:
+; VECTOR-EST: # %bb.0:
; VECTOR-EST-NEXT: rsqrtps %xmm0
; VECTOR-EST: retq
;
; VECTOR-ACC-LABEL: foo_x4:
-; VECTOR-ACC: # BB#0:
+; VECTOR-ACC: # %bb.0:
; VECTOR-ACC-NEXT: {{^ *v?sqrtps %xmm0}}
; VECTOR-ACC-NEXT: retq
%call = tail call <4 x float> @llvm.sqrt.v4f32(<4 x float> %f) #1
@@ -40,12 +40,12 @@ define <4 x float> @foo_x4(<4 x float> %f) #0 {
define <8 x float> @foo_x8(<8 x float> %f) #0 {
; VECTOR-EST-LABEL: foo_x8:
-; VECTOR-EST: # BB#0:
+; VECTOR-EST: # %bb.0:
; VECTOR-EST-NEXT: rsqrtps
; VECTOR-EST: retq
;
; VECTOR-ACC-LABEL: foo_x8:
-; VECTOR-ACC: # BB#0:
+; VECTOR-ACC: # %bb.0:
; VECTOR-ACC-NEXT: {{^ *v?sqrtps %[xy]mm0}}
; VECTOR-ACC-NOT: rsqrt
; VECTOR-ACC: retq
diff --git a/test/CodeGen/X86/sqrt-fastmath.ll b/test/CodeGen/X86/sqrt-fastmath.ll
index af2dcc495f5..ede954d92d3 100644
--- a/test/CodeGen/X86/sqrt-fastmath.ll
+++ b/test/CodeGen/X86/sqrt-fastmath.ll
@@ -12,12 +12,12 @@ declare <8 x float> @llvm.sqrt.v8f32(<8 x float>)
define double @finite_f64_no_estimate(double %d) #0 {
; SSE-LABEL: finite_f64_no_estimate:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: sqrtsd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: finite_f64_no_estimate:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%call = tail call double @__sqrt_finite(double %d) #2
@@ -28,12 +28,12 @@ define double @finite_f64_no_estimate(double %d) #0 {
define double @finite_f64_estimate(double %d) #1 {
; SSE-LABEL: finite_f64_estimate:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: sqrtsd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: finite_f64_estimate:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%call = tail call double @__sqrt_finite(double %d) #2
@@ -42,12 +42,12 @@ define double @finite_f64_estimate(double %d) #1 {
define float @finite_f32_no_estimate(float %f) #0 {
; SSE-LABEL: finite_f32_no_estimate:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: sqrtss %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: finite_f32_no_estimate:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vsqrtss %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%call = tail call float @__sqrtf_finite(float %f) #2
@@ -56,7 +56,7 @@ define float @finite_f32_no_estimate(float %f) #0 {
define float @finite_f32_estimate(float %f) #1 {
; SSE-LABEL: finite_f32_estimate:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: rsqrtss %xmm0, %xmm1
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: mulss %xmm1, %xmm2
@@ -71,7 +71,7 @@ define float @finite_f32_estimate(float %f) #1 {
; SSE-NEXT: retq
;
; AVX-LABEL: finite_f32_estimate:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vrsqrtss %xmm0, %xmm0, %xmm1
; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm2
; AVX-NEXT: vmulss %xmm1, %xmm2, %xmm1
@@ -88,7 +88,7 @@ define float @finite_f32_estimate(float %f) #1 {
define x86_fp80 @finite_f80_no_estimate(x86_fp80 %ld) #0 {
; CHECK-LABEL: finite_f80_no_estimate:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: fldt {{[0-9]+}}(%rsp)
; CHECK-NEXT: fsqrt
; CHECK-NEXT: retq
@@ -100,7 +100,7 @@ define x86_fp80 @finite_f80_no_estimate(x86_fp80 %ld) #0 {
define x86_fp80 @finite_f80_estimate_but_no(x86_fp80 %ld) #1 {
; CHECK-LABEL: finite_f80_estimate_but_no:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: fldt {{[0-9]+}}(%rsp)
; CHECK-NEXT: fsqrt
; CHECK-NEXT: retq
@@ -110,14 +110,14 @@ define x86_fp80 @finite_f80_estimate_but_no(x86_fp80 %ld) #1 {
define float @f32_no_estimate(float %x) #0 {
; SSE-LABEL: f32_no_estimate:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: sqrtss %xmm0, %xmm1
; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: divss %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: f32_no_estimate:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vsqrtss %xmm0, %xmm0, %xmm0
; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX-NEXT: vdivss %xmm0, %xmm1, %xmm0
@@ -129,7 +129,7 @@ define float @f32_no_estimate(float %x) #0 {
define float @f32_estimate(float %x) #1 {
; SSE-LABEL: f32_estimate:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: rsqrtss %xmm0, %xmm1
; SSE-NEXT: movaps %xmm1, %xmm2
; SSE-NEXT: mulss %xmm2, %xmm2
@@ -141,7 +141,7 @@ define float @f32_estimate(float %x) #1 {
; SSE-NEXT: retq
;
; AVX-LABEL: f32_estimate:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vrsqrtss %xmm0, %xmm0, %xmm1
; AVX-NEXT: vmulss %xmm1, %xmm1, %xmm2
; AVX-NEXT: vmulss %xmm2, %xmm0, %xmm0
@@ -156,14 +156,14 @@ define float @f32_estimate(float %x) #1 {
define <4 x float> @v4f32_no_estimate(<4 x float> %x) #0 {
; SSE-LABEL: v4f32_no_estimate:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: sqrtps %xmm0, %xmm1
; SSE-NEXT: movaps {{.*#+}} xmm0 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
; SSE-NEXT: divps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: v4f32_no_estimate:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vsqrtps %xmm0, %xmm0
; AVX-NEXT: vmovaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
; AVX-NEXT: vdivps %xmm0, %xmm1, %xmm0
@@ -175,7 +175,7 @@ define <4 x float> @v4f32_no_estimate(<4 x float> %x) #0 {
define <4 x float> @v4f32_estimate(<4 x float> %x) #1 {
; SSE-LABEL: v4f32_estimate:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: rsqrtps %xmm0, %xmm1
; SSE-NEXT: movaps %xmm1, %xmm2
; SSE-NEXT: mulps %xmm2, %xmm2
@@ -187,7 +187,7 @@ define <4 x float> @v4f32_estimate(<4 x float> %x) #1 {
; SSE-NEXT: retq
;
; AVX-LABEL: v4f32_estimate:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vrsqrtps %xmm0, %xmm1
; AVX-NEXT: vmulps %xmm1, %xmm1, %xmm2
; AVX-NEXT: vmulps %xmm2, %xmm0, %xmm0
@@ -202,7 +202,7 @@ define <4 x float> @v4f32_estimate(<4 x float> %x) #1 {
define <8 x float> @v8f32_no_estimate(<8 x float> %x) #0 {
; SSE-LABEL: v8f32_no_estimate:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: sqrtps %xmm1, %xmm2
; SSE-NEXT: sqrtps %xmm0, %xmm3
; SSE-NEXT: movaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
@@ -212,7 +212,7 @@ define <8 x float> @v8f32_no_estimate(<8 x float> %x) #0 {
; SSE-NEXT: retq
;
; AVX-LABEL: v8f32_no_estimate:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vsqrtps %ymm0, %ymm0
; AVX-NEXT: vmovaps {{.*#+}} ymm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
; AVX-NEXT: vdivps %ymm0, %ymm1, %ymm0
@@ -224,7 +224,7 @@ define <8 x float> @v8f32_no_estimate(<8 x float> %x) #0 {
define <8 x float> @v8f32_estimate(<8 x float> %x) #1 {
; SSE-LABEL: v8f32_estimate:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: rsqrtps %xmm0, %xmm3
; SSE-NEXT: movaps {{.*#+}} xmm4 = [-5.000000e-01,-5.000000e-01,-5.000000e-01,-5.000000e-01]
; SSE-NEXT: movaps %xmm3, %xmm2
@@ -246,7 +246,7 @@ define <8 x float> @v8f32_estimate(<8 x float> %x) #1 {
; SSE-NEXT: retq
;
; AVX-LABEL: v8f32_estimate:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vrsqrtps %ymm0, %ymm1
; AVX-NEXT: vmulps %ymm1, %ymm1, %ymm2
; AVX-NEXT: vmulps %ymm2, %ymm0, %ymm0
diff --git a/test/CodeGen/X86/sqrt-partial.ll b/test/CodeGen/X86/sqrt-partial.ll
index a7d4ef29c52..6f0d5249078 100644
--- a/test/CodeGen/X86/sqrt-partial.ll
+++ b/test/CodeGen/X86/sqrt-partial.ll
@@ -10,11 +10,11 @@
define float @f(float %val) nounwind {
; CHECK-LABEL: f:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorps %xmm1, %xmm1
; CHECK-NEXT: ucomiss %xmm1, %xmm0
; CHECK-NEXT: jb .LBB0_2
-; CHECK-NEXT: # BB#1: # %.split
+; CHECK-NEXT: # %bb.1: # %.split
; CHECK-NEXT: sqrtss %xmm0, %xmm0
; CHECK-NEXT: retq
; CHECK-NEXT: .LBB0_2: # %call.sqrt
@@ -25,11 +25,11 @@ define float @f(float %val) nounwind {
define double @d(double %val) nounwind {
; CHECK-LABEL: d:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorps %xmm1, %xmm1
; CHECK-NEXT: ucomisd %xmm1, %xmm0
; CHECK-NEXT: jb .LBB1_2
-; CHECK-NEXT: # BB#1: # %.split
+; CHECK-NEXT: # %bb.1: # %.split
; CHECK-NEXT: sqrtsd %xmm0, %xmm0
; CHECK-NEXT: retq
; CHECK-NEXT: .LBB1_2: # %call.sqrt
diff --git a/test/CodeGen/X86/sse-align-12.ll b/test/CodeGen/X86/sse-align-12.ll
index 688dd56cc00..15c3cb014ab 100644
--- a/test/CodeGen/X86/sse-align-12.ll
+++ b/test/CodeGen/X86/sse-align-12.ll
@@ -3,7 +3,7 @@
define <4 x float> @a(<4 x float>* %y) nounwind {
; CHECK-LABEL: a:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movups (%rdi), %xmm0
; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,2,1,0]
; CHECK-NEXT: retq
@@ -21,7 +21,7 @@ define <4 x float> @a(<4 x float>* %y) nounwind {
define <4 x float> @b(<4 x float>* %y, <4 x float> %z) nounwind {
; CHECK-LABEL: b:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movups (%rdi), %xmm1
; CHECK-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; CHECK-NEXT: retq
@@ -39,7 +39,7 @@ define <4 x float> @b(<4 x float>* %y, <4 x float> %z) nounwind {
define <2 x double> @c(<2 x double>* %y) nounwind {
; CHECK-LABEL: c:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movupd (%rdi), %xmm0
; CHECK-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0]
; CHECK-NEXT: retq
@@ -53,7 +53,7 @@ define <2 x double> @c(<2 x double>* %y) nounwind {
define <2 x double> @d(<2 x double>* %y, <2 x double> %z) nounwind {
; CHECK-LABEL: d:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movups (%rdi), %xmm1
; CHECK-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/sse-fcopysign.ll b/test/CodeGen/X86/sse-fcopysign.ll
index 6805334140f..883fb5290f0 100644
--- a/test/CodeGen/X86/sse-fcopysign.ll
+++ b/test/CodeGen/X86/sse-fcopysign.ll
@@ -8,7 +8,7 @@
define float @tst1(float %a, float %b) nounwind {
; X32-LABEL: tst1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: subl $8, %esp
; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -19,7 +19,7 @@ define float @tst1(float %a, float %b) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: tst1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps %xmm0, %xmm2
; X64-NEXT: movaps %xmm1, %xmm0
; X64-NEXT: movaps %xmm2, %xmm1
@@ -30,7 +30,7 @@ define float @tst1(float %a, float %b) nounwind {
define double @tst2(double %a, float %b, float %c) nounwind {
; X32-LABEL: tst2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: subl $16, %esp
; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -43,7 +43,7 @@ define double @tst2(double %a, float %b, float %c) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: tst2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: addss %xmm2, %xmm1
; X64-NEXT: cvtss2sd %xmm1, %xmm1
; X64-NEXT: jmp copysign # TAILCALL
@@ -62,7 +62,7 @@ declare double @copysign(double, double)
define float @int1(float %a, float %b) nounwind {
; X32-LABEL: int1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: andps {{\.LCPI.*}}, %xmm0
@@ -75,7 +75,7 @@ define float @int1(float %a, float %b) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: int1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andps {{.*}}(%rip), %xmm0
; X64-NEXT: andps {{.*}}(%rip), %xmm1
; X64-NEXT: orps %xmm1, %xmm0
@@ -86,7 +86,7 @@ define float @int1(float %a, float %b) nounwind {
define double @int2(double %a, float %b, float %c) nounwind {
; X32-LABEL: int2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebp
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: andl $-8, %esp
@@ -105,7 +105,7 @@ define double @int2(double %a, float %b, float %c) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: int2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: addss %xmm2, %xmm1
; X64-NEXT: cvtss2sd %xmm1, %xmm1
; X64-NEXT: andps {{.*}}(%rip), %xmm1
@@ -120,13 +120,13 @@ define double @int2(double %a, float %b, float %c) nounwind {
define float @cst1() nounwind {
; X32-LABEL: cst1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: fld1
; X32-NEXT: fchs
; X32-NEXT: retl
;
; X64-LABEL: cst1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-NEXT: retq
%tmp = tail call float @llvm.copysign.f32( float 1.0, float -2.0 )
@@ -135,13 +135,13 @@ define float @cst1() nounwind {
define double @cst2() nounwind {
; X32-LABEL: cst2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: fldz
; X32-NEXT: fchs
; X32-NEXT: retl
;
; X64-LABEL: cst2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X64-NEXT: retq
%tmp1 = fadd float -1.0, -1.0
diff --git a/test/CodeGen/X86/sse-fsignum.ll b/test/CodeGen/X86/sse-fsignum.ll
index d58bec2727f..6712c0dccc5 100644
--- a/test/CodeGen/X86/sse-fsignum.ll
+++ b/test/CodeGen/X86/sse-fsignum.ll
@@ -11,7 +11,7 @@
define void @signum32a(<4 x float>*) {
; AVX-LABEL: signum32a:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovaps (%rdi), %xmm0
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vcmpltps %xmm1, %xmm0, %xmm2
@@ -34,7 +34,7 @@ entry:
define void @signum64a(<2 x double>*) {
; AVX-LABEL: signum64a:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovapd (%rdi), %xmm0
; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vcmpltpd %xmm1, %xmm0, %xmm2
@@ -63,7 +63,7 @@ entry:
define void @signum32b(<8 x float>*) {
; AVX1-LABEL: signum32b:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovaps (%rdi), %ymm0
; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vcmpltps %ymm1, %ymm0, %ymm2
@@ -76,7 +76,7 @@ define void @signum32b(<8 x float>*) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: signum32b:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vmovaps (%rdi), %ymm0
; AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vcmpltps %ymm1, %ymm0, %ymm2
@@ -89,7 +89,7 @@ define void @signum32b(<8 x float>*) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: signum32b:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vmovaps (%rdi), %ymm0
; AVX512F-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX512F-NEXT: vcmpltps %zmm1, %zmm0, %k1
@@ -117,7 +117,7 @@ entry:
define void @signum64b(<4 x double>*) {
; AVX1-LABEL: signum64b:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovapd (%rdi), %ymm0
; AVX1-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vcmpltpd %ymm1, %ymm0, %ymm2
@@ -134,7 +134,7 @@ define void @signum64b(<4 x double>*) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: signum64b:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vmovapd (%rdi), %ymm0
; AVX2-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vcmpltpd %ymm1, %ymm0, %ymm2
@@ -151,7 +151,7 @@ define void @signum64b(<4 x double>*) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: signum64b:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vmovapd (%rdi), %ymm0
; AVX512F-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX512F-NEXT: vcmpltpd %ymm1, %ymm0, %ymm2
@@ -181,7 +181,7 @@ entry:
define void @signum32c(<8 x float>*) {
; AVX-LABEL: signum32c:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovaps (%rdi), %ymm0
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vcmpltps %ymm1, %ymm0, %ymm2
@@ -207,7 +207,7 @@ entry:
define void @signum64c(<4 x double>*) {
; AVX1-LABEL: signum64c:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovapd (%rdi), %ymm0
; AVX1-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vcmpltpd %ymm1, %ymm0, %ymm2
@@ -223,7 +223,7 @@ define void @signum64c(<4 x double>*) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: signum64c:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vmovapd (%rdi), %ymm0
; AVX2-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vcmpltpd %ymm1, %ymm0, %ymm2
@@ -237,7 +237,7 @@ define void @signum64c(<4 x double>*) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: signum64c:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vmovapd (%rdi), %ymm0
; AVX512F-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX512F-NEXT: vcmpltpd %ymm1, %ymm0, %ymm2
diff --git a/test/CodeGen/X86/sse-intrinsics-fast-isel-x86_64.ll b/test/CodeGen/X86/sse-intrinsics-fast-isel-x86_64.ll
index aad00e71dda..753f787e2d9 100644
--- a/test/CodeGen/X86/sse-intrinsics-fast-isel-x86_64.ll
+++ b/test/CodeGen/X86/sse-intrinsics-fast-isel-x86_64.ll
@@ -5,7 +5,7 @@
define <4 x float> @test_mm_cvtsi64_ss(<4 x float> %a0, i64 %a1) nounwind {
; X64-LABEL: test_mm_cvtsi64_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cvtsi2ssq %rdi, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse.cvtsi642ss(<4 x float> %a0, i64 %a1)
@@ -15,7 +15,7 @@ declare <4 x float> @llvm.x86.sse.cvtsi642ss(<4 x float>, i64) nounwind readnone
define i64 @test_mm_cvtss_si64(<4 x float> %a0) nounwind {
; X64-LABEL: test_mm_cvtss_si64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cvtss2si %xmm0, %rax
; X64-NEXT: retq
%res = call i64 @llvm.x86.sse.cvtss2si64(<4 x float> %a0)
@@ -25,7 +25,7 @@ declare i64 @llvm.x86.sse.cvtss2si64(<4 x float>) nounwind readnone
define i64 @test_mm_cvttss_si64(<4 x float> %a0) nounwind {
; X64-LABEL: test_mm_cvttss_si64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cvttss2si %xmm0, %rax
; X64-NEXT: retq
%res = call i64 @llvm.x86.sse.cvttss2si64(<4 x float> %a0)
diff --git a/test/CodeGen/X86/sse-intrinsics-fast-isel.ll b/test/CodeGen/X86/sse-intrinsics-fast-isel.ll
index 9f738aa9a0e..649a86dc1fc 100644
--- a/test/CodeGen/X86/sse-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/sse-intrinsics-fast-isel.ll
@@ -6,12 +6,12 @@
define <4 x float> @test_mm_add_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_add_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: addps %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_add_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: addps %xmm1, %xmm0
; X64-NEXT: retq
%res = fadd <4 x float> %a0, %a1
@@ -20,12 +20,12 @@ define <4 x float> @test_mm_add_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
define <4 x float> @test_mm_add_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_add_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: addss %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_add_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: addss %xmm1, %xmm0
; X64-NEXT: retq
%ext0 = extractelement <4 x float> %a0, i32 0
@@ -37,12 +37,12 @@ define <4 x float> @test_mm_add_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
define <4 x float> @test_mm_and_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_and_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: andps %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_and_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andps %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <4 x float> %a0 to <4 x i32>
@@ -54,12 +54,12 @@ define <4 x float> @test_mm_and_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
define <4 x float> @test_mm_andnot_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_andnot_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: andnps %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_andnot_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andnps %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <4 x float> %a0 to <4 x i32>
@@ -72,12 +72,12 @@ define <4 x float> @test_mm_andnot_ps(<4 x float> %a0, <4 x float> %a1) nounwind
define <4 x float> @test_mm_cmpeq_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_cmpeq_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpeqps %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpeq_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpeqps %xmm1, %xmm0
; X64-NEXT: retq
%cmp = fcmp oeq <4 x float> %a0, %a1
@@ -88,12 +88,12 @@ define <4 x float> @test_mm_cmpeq_ps(<4 x float> %a0, <4 x float> %a1) nounwind
define <4 x float> @test_mm_cmpeq_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_cmpeq_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpeqss %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpeq_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpeqss %xmm1, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse.cmp.ss(<4 x float> %a0, <4 x float> %a1, i8 0)
@@ -103,13 +103,13 @@ declare <4 x float> @llvm.x86.sse.cmp.ss(<4 x float>, <4 x float>, i8) nounwind
define <4 x float> @test_mm_cmpge_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_cmpge_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpleps %xmm0, %xmm1
; X32-NEXT: movaps %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpge_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpleps %xmm0, %xmm1
; X64-NEXT: movaps %xmm1, %xmm0
; X64-NEXT: retq
@@ -121,13 +121,13 @@ define <4 x float> @test_mm_cmpge_ps(<4 x float> %a0, <4 x float> %a1) nounwind
define <4 x float> @test_mm_cmpge_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_cmpge_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpless %xmm0, %xmm1
; X32-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpge_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpless %xmm0, %xmm1
; X64-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; X64-NEXT: retq
@@ -138,13 +138,13 @@ define <4 x float> @test_mm_cmpge_ss(<4 x float> %a0, <4 x float> %a1) nounwind
define <4 x float> @test_mm_cmpgt_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_cmpgt_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpltps %xmm0, %xmm1
; X32-NEXT: movaps %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpgt_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpltps %xmm0, %xmm1
; X64-NEXT: movaps %xmm1, %xmm0
; X64-NEXT: retq
@@ -156,13 +156,13 @@ define <4 x float> @test_mm_cmpgt_ps(<4 x float> %a0, <4 x float> %a1) nounwind
define <4 x float> @test_mm_cmpgt_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_cmpgt_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpltss %xmm0, %xmm1
; X32-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpgt_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpltss %xmm0, %xmm1
; X64-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; X64-NEXT: retq
@@ -173,12 +173,12 @@ define <4 x float> @test_mm_cmpgt_ss(<4 x float> %a0, <4 x float> %a1) nounwind
define <4 x float> @test_mm_cmple_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_cmple_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpleps %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmple_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpleps %xmm1, %xmm0
; X64-NEXT: retq
%cmp = fcmp ole <4 x float> %a0, %a1
@@ -189,12 +189,12 @@ define <4 x float> @test_mm_cmple_ps(<4 x float> %a0, <4 x float> %a1) nounwind
define <4 x float> @test_mm_cmple_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_cmple_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpless %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmple_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpless %xmm1, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse.cmp.ss(<4 x float> %a0, <4 x float> %a1, i8 2)
@@ -203,12 +203,12 @@ define <4 x float> @test_mm_cmple_ss(<4 x float> %a0, <4 x float> %a1) nounwind
define <4 x float> @test_mm_cmplt_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_cmplt_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpltps %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmplt_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpltps %xmm1, %xmm0
; X64-NEXT: retq
%cmp = fcmp olt <4 x float> %a0, %a1
@@ -219,12 +219,12 @@ define <4 x float> @test_mm_cmplt_ps(<4 x float> %a0, <4 x float> %a1) nounwind
define <4 x float> @test_mm_cmplt_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_cmplt_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpltss %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmplt_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpltss %xmm1, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse.cmp.ss(<4 x float> %a0, <4 x float> %a1, i8 1)
@@ -233,12 +233,12 @@ define <4 x float> @test_mm_cmplt_ss(<4 x float> %a0, <4 x float> %a1) nounwind
define <4 x float> @test_mm_cmpneq_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_cmpneq_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpneqps %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpneq_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpneqps %xmm1, %xmm0
; X64-NEXT: retq
%cmp = fcmp une <4 x float> %a0, %a1
@@ -249,12 +249,12 @@ define <4 x float> @test_mm_cmpneq_ps(<4 x float> %a0, <4 x float> %a1) nounwind
define <4 x float> @test_mm_cmpneq_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_cmpneq_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpneqss %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpneq_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpneqss %xmm1, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse.cmp.ss(<4 x float> %a0, <4 x float> %a1, i8 4)
@@ -263,13 +263,13 @@ define <4 x float> @test_mm_cmpneq_ss(<4 x float> %a0, <4 x float> %a1) nounwind
define <4 x float> @test_mm_cmpnge_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_cmpnge_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpnleps %xmm0, %xmm1
; X32-NEXT: movaps %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpnge_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpnleps %xmm0, %xmm1
; X64-NEXT: movaps %xmm1, %xmm0
; X64-NEXT: retq
@@ -281,13 +281,13 @@ define <4 x float> @test_mm_cmpnge_ps(<4 x float> %a0, <4 x float> %a1) nounwind
define <4 x float> @test_mm_cmpnge_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_cmpnge_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpnless %xmm0, %xmm1
; X32-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpnge_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpnless %xmm0, %xmm1
; X64-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; X64-NEXT: retq
@@ -298,13 +298,13 @@ define <4 x float> @test_mm_cmpnge_ss(<4 x float> %a0, <4 x float> %a1) nounwind
define <4 x float> @test_mm_cmpngt_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_cmpngt_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpnltps %xmm0, %xmm1
; X32-NEXT: movaps %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpngt_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpnltps %xmm0, %xmm1
; X64-NEXT: movaps %xmm1, %xmm0
; X64-NEXT: retq
@@ -316,13 +316,13 @@ define <4 x float> @test_mm_cmpngt_ps(<4 x float> %a0, <4 x float> %a1) nounwind
define <4 x float> @test_mm_cmpngt_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_cmpngt_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpnltss %xmm0, %xmm1
; X32-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpngt_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpnltss %xmm0, %xmm1
; X64-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; X64-NEXT: retq
@@ -333,12 +333,12 @@ define <4 x float> @test_mm_cmpngt_ss(<4 x float> %a0, <4 x float> %a1) nounwind
define <4 x float> @test_mm_cmpnle_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_cmpnle_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpnleps %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpnle_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpnleps %xmm1, %xmm0
; X64-NEXT: retq
%cmp = fcmp ugt <4 x float> %a0, %a1
@@ -349,12 +349,12 @@ define <4 x float> @test_mm_cmpnle_ps(<4 x float> %a0, <4 x float> %a1) nounwind
define <4 x float> @test_mm_cmpnle_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_cmpnle_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpnless %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpnle_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpnless %xmm1, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse.cmp.ss(<4 x float> %a0, <4 x float> %a1, i8 6)
@@ -363,12 +363,12 @@ define <4 x float> @test_mm_cmpnle_ss(<4 x float> %a0, <4 x float> %a1) nounwind
define <4 x float> @test_mm_cmpnlt_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_cmpnlt_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpnltps %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpnlt_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpnltps %xmm1, %xmm0
; X64-NEXT: retq
%cmp = fcmp uge <4 x float> %a0, %a1
@@ -379,12 +379,12 @@ define <4 x float> @test_mm_cmpnlt_ps(<4 x float> %a0, <4 x float> %a1) nounwind
define <4 x float> @test_mm_cmpnlt_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_cmpnlt_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpnltss %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpnlt_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpnltss %xmm1, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse.cmp.ss(<4 x float> %a0, <4 x float> %a1, i8 5)
@@ -393,12 +393,12 @@ define <4 x float> @test_mm_cmpnlt_ss(<4 x float> %a0, <4 x float> %a1) nounwind
define <4 x float> @test_mm_cmpord_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_cmpord_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpordps %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpord_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpordps %xmm1, %xmm0
; X64-NEXT: retq
%cmp = fcmp ord <4 x float> %a0, %a1
@@ -409,12 +409,12 @@ define <4 x float> @test_mm_cmpord_ps(<4 x float> %a0, <4 x float> %a1) nounwind
define <4 x float> @test_mm_cmpord_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_cmpord_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpordss %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpord_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpordss %xmm1, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse.cmp.ss(<4 x float> %a0, <4 x float> %a1, i8 7)
@@ -423,12 +423,12 @@ define <4 x float> @test_mm_cmpord_ss(<4 x float> %a0, <4 x float> %a1) nounwind
define <4 x float> @test_mm_cmpunord_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_cmpunord_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpunordps %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpunord_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpunordps %xmm1, %xmm0
; X64-NEXT: retq
%cmp = fcmp uno <4 x float> %a0, %a1
@@ -439,12 +439,12 @@ define <4 x float> @test_mm_cmpunord_ps(<4 x float> %a0, <4 x float> %a1) nounwi
define <4 x float> @test_mm_cmpunord_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_cmpunord_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpunordss %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpunord_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpunordss %xmm1, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse.cmp.ss(<4 x float> %a0, <4 x float> %a1, i8 3)
@@ -453,7 +453,7 @@ define <4 x float> @test_mm_cmpunord_ss(<4 x float> %a0, <4 x float> %a1) nounwi
define i32 @test_mm_comieq_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_comieq_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: comiss %xmm1, %xmm0
; X32-NEXT: setnp %al
; X32-NEXT: sete %cl
@@ -462,7 +462,7 @@ define i32 @test_mm_comieq_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_mm_comieq_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: comiss %xmm1, %xmm0
; X64-NEXT: setnp %al
; X64-NEXT: sete %cl
@@ -476,14 +476,14 @@ declare i32 @llvm.x86.sse.comieq.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_mm_comige_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_comige_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: comiss %xmm1, %xmm0
; X32-NEXT: setae %al
; X32-NEXT: retl
;
; X64-LABEL: test_mm_comige_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: comiss %xmm1, %xmm0
; X64-NEXT: setae %al
@@ -495,14 +495,14 @@ declare i32 @llvm.x86.sse.comige.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_mm_comigt_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_comigt_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: comiss %xmm1, %xmm0
; X32-NEXT: seta %al
; X32-NEXT: retl
;
; X64-LABEL: test_mm_comigt_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: comiss %xmm1, %xmm0
; X64-NEXT: seta %al
@@ -514,14 +514,14 @@ declare i32 @llvm.x86.sse.comigt.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_mm_comile_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_comile_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: comiss %xmm0, %xmm1
; X32-NEXT: setae %al
; X32-NEXT: retl
;
; X64-LABEL: test_mm_comile_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: comiss %xmm0, %xmm1
; X64-NEXT: setae %al
@@ -533,14 +533,14 @@ declare i32 @llvm.x86.sse.comile.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_mm_comilt_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_comilt_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: comiss %xmm0, %xmm1
; X32-NEXT: seta %al
; X32-NEXT: retl
;
; X64-LABEL: test_mm_comilt_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: comiss %xmm0, %xmm1
; X64-NEXT: seta %al
@@ -552,7 +552,7 @@ declare i32 @llvm.x86.sse.comilt.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_mm_comineq_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_comineq_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: comiss %xmm1, %xmm0
; X32-NEXT: setp %al
; X32-NEXT: setne %cl
@@ -561,7 +561,7 @@ define i32 @test_mm_comineq_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_mm_comineq_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: comiss %xmm1, %xmm0
; X64-NEXT: setp %al
; X64-NEXT: setne %cl
@@ -575,12 +575,12 @@ declare i32 @llvm.x86.sse.comineq.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_mm_cvt_ss2si(<4 x float> %a0) nounwind {
; X32-LABEL: test_mm_cvt_ss2si:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cvtss2si %xmm0, %eax
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cvt_ss2si:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cvtss2si %xmm0, %eax
; X64-NEXT: retq
%res = call i32 @llvm.x86.sse.cvtss2si(<4 x float> %a0)
@@ -590,12 +590,12 @@ declare i32 @llvm.x86.sse.cvtss2si(<4 x float>) nounwind readnone
define <4 x float> @test_mm_cvtsi32_ss(<4 x float> %a0, i32 %a1) nounwind {
; X32-LABEL: test_mm_cvtsi32_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cvtsi2ssl {{[0-9]+}}(%esp), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cvtsi32_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cvtsi2ssl %edi, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse.cvtsi2ss(<4 x float> %a0, i32 %a1)
@@ -605,7 +605,7 @@ declare <4 x float> @llvm.x86.sse.cvtsi2ss(<4 x float>, i32) nounwind readnone
define float @test_mm_cvtss_f32(<4 x float> %a0) nounwind {
; X32-LABEL: test_mm_cvtss_f32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: movss %xmm0, (%esp)
; X32-NEXT: flds (%esp)
@@ -613,7 +613,7 @@ define float @test_mm_cvtss_f32(<4 x float> %a0) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cvtss_f32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
%res = extractelement <4 x float> %a0, i32 0
ret float %res
@@ -621,12 +621,12 @@ define float @test_mm_cvtss_f32(<4 x float> %a0) nounwind {
define i32 @test_mm_cvtss_si32(<4 x float> %a0) nounwind {
; X32-LABEL: test_mm_cvtss_si32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cvtss2si %xmm0, %eax
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cvtss_si32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cvtss2si %xmm0, %eax
; X64-NEXT: retq
%res = call i32 @llvm.x86.sse.cvtss2si(<4 x float> %a0)
@@ -635,12 +635,12 @@ define i32 @test_mm_cvtss_si32(<4 x float> %a0) nounwind {
define i32 @test_mm_cvttss_si(<4 x float> %a0) nounwind {
; X32-LABEL: test_mm_cvttss_si:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cvttss2si %xmm0, %eax
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cvttss_si:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cvttss2si %xmm0, %eax
; X64-NEXT: retq
%res = call i32 @llvm.x86.sse.cvttss2si(<4 x float> %a0)
@@ -650,12 +650,12 @@ declare i32 @llvm.x86.sse.cvttss2si(<4 x float>) nounwind readnone
define i32 @test_mm_cvttss_si32(<4 x float> %a0) nounwind {
; X32-LABEL: test_mm_cvttss_si32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cvttss2si %xmm0, %eax
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cvttss_si32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cvttss2si %xmm0, %eax
; X64-NEXT: retq
%res = call i32 @llvm.x86.sse.cvttss2si(<4 x float> %a0)
@@ -664,12 +664,12 @@ define i32 @test_mm_cvttss_si32(<4 x float> %a0) nounwind {
define <4 x float> @test_mm_div_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_div_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: divps %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_div_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: divps %xmm1, %xmm0
; X64-NEXT: retq
%res = fdiv <4 x float> %a0, %a1
@@ -678,12 +678,12 @@ define <4 x float> @test_mm_div_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
define <4 x float> @test_mm_div_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_div_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: divss %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_div_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: divss %xmm1, %xmm0
; X64-NEXT: retq
%ext0 = extractelement <4 x float> %a0, i32 0
@@ -695,7 +695,7 @@ define <4 x float> @test_mm_div_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
define i32 @test_MM_GET_EXCEPTION_MASK() nounwind {
; X32-LABEL: test_MM_GET_EXCEPTION_MASK:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: movl %esp, %eax
; X32-NEXT: stmxcsr (%eax)
@@ -705,7 +705,7 @@ define i32 @test_MM_GET_EXCEPTION_MASK() nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_MM_GET_EXCEPTION_MASK:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: leaq -{{[0-9]+}}(%rsp), %rax
; X64-NEXT: stmxcsr (%rax)
; X64-NEXT: movl -{{[0-9]+}}(%rsp), %eax
@@ -722,7 +722,7 @@ declare void @llvm.x86.sse.stmxcsr(i8*) nounwind readnone
define i32 @test_MM_GET_EXCEPTION_STATE() nounwind {
; X32-LABEL: test_MM_GET_EXCEPTION_STATE:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: movl %esp, %eax
; X32-NEXT: stmxcsr (%eax)
@@ -732,7 +732,7 @@ define i32 @test_MM_GET_EXCEPTION_STATE() nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_MM_GET_EXCEPTION_STATE:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: leaq -{{[0-9]+}}(%rsp), %rax
; X64-NEXT: stmxcsr (%rax)
; X64-NEXT: movl -{{[0-9]+}}(%rsp), %eax
@@ -748,7 +748,7 @@ define i32 @test_MM_GET_EXCEPTION_STATE() nounwind {
define i32 @test_MM_GET_FLUSH_ZERO_MODE() nounwind {
; X32-LABEL: test_MM_GET_FLUSH_ZERO_MODE:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: movl %esp, %eax
; X32-NEXT: stmxcsr (%eax)
@@ -758,7 +758,7 @@ define i32 @test_MM_GET_FLUSH_ZERO_MODE() nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_MM_GET_FLUSH_ZERO_MODE:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: leaq -{{[0-9]+}}(%rsp), %rax
; X64-NEXT: stmxcsr (%rax)
; X64-NEXT: movl -{{[0-9]+}}(%rsp), %eax
@@ -774,7 +774,7 @@ define i32 @test_MM_GET_FLUSH_ZERO_MODE() nounwind {
define i32 @test_MM_GET_ROUNDING_MODE() nounwind {
; X32-LABEL: test_MM_GET_ROUNDING_MODE:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: movl %esp, %eax
; X32-NEXT: stmxcsr (%eax)
@@ -784,7 +784,7 @@ define i32 @test_MM_GET_ROUNDING_MODE() nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_MM_GET_ROUNDING_MODE:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: leaq -{{[0-9]+}}(%rsp), %rax
; X64-NEXT: stmxcsr (%rax)
; X64-NEXT: movl -{{[0-9]+}}(%rsp), %eax
@@ -800,7 +800,7 @@ define i32 @test_MM_GET_ROUNDING_MODE() nounwind {
define i32 @test_mm_getcsr() nounwind {
; X32-LABEL: test_mm_getcsr:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: movl %esp, %eax
; X32-NEXT: stmxcsr (%eax)
@@ -809,7 +809,7 @@ define i32 @test_mm_getcsr() nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_mm_getcsr:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: leaq -{{[0-9]+}}(%rsp), %rax
; X64-NEXT: stmxcsr (%rax)
; X64-NEXT: movl -{{[0-9]+}}(%rsp), %eax
@@ -823,13 +823,13 @@ define i32 @test_mm_getcsr() nounwind {
define <4 x float> @test_mm_load_ps(float* %a0) nounwind {
; X32-LABEL: test_mm_load_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movaps (%eax), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_load_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps (%rdi), %xmm0
; X64-NEXT: retq
%arg0 = bitcast float* %a0 to <4 x float>*
@@ -839,14 +839,14 @@ define <4 x float> @test_mm_load_ps(float* %a0) nounwind {
define <4 x float> @test_mm_load_ps1(float* %a0) nounwind {
; X32-LABEL: test_mm_load_ps1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0,0,0]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_load_ps1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0,0,0]
; X64-NEXT: retq
@@ -860,13 +860,13 @@ define <4 x float> @test_mm_load_ps1(float* %a0) nounwind {
define <4 x float> @test_mm_load_ss(float* %a0) nounwind {
; X32-LABEL: test_mm_load_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: retl
;
; X64-LABEL: test_mm_load_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-NEXT: retq
%ld = load float, float* %a0, align 1
@@ -879,14 +879,14 @@ define <4 x float> @test_mm_load_ss(float* %a0) nounwind {
define <4 x float> @test_mm_load1_ps(float* %a0) nounwind {
; X32-LABEL: test_mm_load1_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0,0,0]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_load1_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0,0,0]
; X64-NEXT: retq
@@ -900,7 +900,7 @@ define <4 x float> @test_mm_load1_ps(float* %a0) nounwind {
define <4 x float> @test_mm_loadh_pi(<4 x float> %a0, x86_mmx* %a1) {
; X32-LABEL: test_mm_loadh_pi:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
@@ -909,7 +909,7 @@ define <4 x float> @test_mm_loadh_pi(<4 x float> %a0, x86_mmx* %a1) {
; X32-NEXT: retl
;
; X64-LABEL: test_mm_loadh_pi:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq (%rdi), %rax
; X64-NEXT: movl %eax, -{{[0-9]+}}(%rsp)
; X64-NEXT: shrq $32, %rax
@@ -930,7 +930,7 @@ define <4 x float> @test_mm_loadh_pi(<4 x float> %a0, x86_mmx* %a1) {
define <4 x float> @test_mm_loadl_pi(<4 x float> %a0, x86_mmx* %a1) {
; X32-LABEL: test_mm_loadl_pi:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
@@ -940,7 +940,7 @@ define <4 x float> @test_mm_loadl_pi(<4 x float> %a0, x86_mmx* %a1) {
; X32-NEXT: retl
;
; X64-LABEL: test_mm_loadl_pi:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq (%rdi), %rax
; X64-NEXT: movl %eax, -{{[0-9]+}}(%rsp)
; X64-NEXT: shrq $32, %rax
@@ -962,14 +962,14 @@ define <4 x float> @test_mm_loadl_pi(<4 x float> %a0, x86_mmx* %a1) {
define <4 x float> @test_mm_loadr_ps(float* %a0) nounwind {
; X32-LABEL: test_mm_loadr_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movaps (%eax), %xmm0
; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,2,1,0]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_loadr_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps (%rdi), %xmm0
; X64-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,2,1,0]
; X64-NEXT: retq
@@ -981,13 +981,13 @@ define <4 x float> @test_mm_loadr_ps(float* %a0) nounwind {
define <4 x float> @test_mm_loadu_ps(float* %a0) nounwind {
; X32-LABEL: test_mm_loadu_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movups (%eax), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_loadu_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movups (%rdi), %xmm0
; X64-NEXT: retq
%arg0 = bitcast float* %a0 to <4 x float>*
@@ -997,12 +997,12 @@ define <4 x float> @test_mm_loadu_ps(float* %a0) nounwind {
define <4 x float> @test_mm_max_ps(<4 x float> %a0, <4 x float> %a1) {
; X32-LABEL: test_mm_max_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: maxps %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_max_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: maxps %xmm1, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %a0, <4 x float> %a1)
@@ -1012,12 +1012,12 @@ declare <4 x float> @llvm.x86.sse.max.ps(<4 x float>, <4 x float>) nounwind read
define <4 x float> @test_mm_max_ss(<4 x float> %a0, <4 x float> %a1) {
; X32-LABEL: test_mm_max_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: maxss %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_max_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: maxss %xmm1, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse.max.ss(<4 x float> %a0, <4 x float> %a1)
@@ -1027,12 +1027,12 @@ declare <4 x float> @llvm.x86.sse.max.ss(<4 x float>, <4 x float>) nounwind read
define <4 x float> @test_mm_min_ps(<4 x float> %a0, <4 x float> %a1) {
; X32-LABEL: test_mm_min_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: minps %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_min_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: minps %xmm1, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %a0, <4 x float> %a1)
@@ -1042,12 +1042,12 @@ declare <4 x float> @llvm.x86.sse.min.ps(<4 x float>, <4 x float>) nounwind read
define <4 x float> @test_mm_min_ss(<4 x float> %a0, <4 x float> %a1) {
; X32-LABEL: test_mm_min_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: minss %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_min_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: minss %xmm1, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse.min.ss(<4 x float> %a0, <4 x float> %a1)
@@ -1057,12 +1057,12 @@ declare <4 x float> @llvm.x86.sse.min.ss(<4 x float>, <4 x float>) nounwind read
define <4 x float> @test_mm_move_ss(<4 x float> %a0, <4 x float> %a1) {
; X32-LABEL: test_mm_move_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_move_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; X64-NEXT: retq
%res = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 4, i32 1, i32 2, i32 3>
@@ -1071,12 +1071,12 @@ define <4 x float> @test_mm_move_ss(<4 x float> %a0, <4 x float> %a1) {
define <4 x float> @test_mm_movehl_ps(<4 x float> %a0, <4 x float> %a1) {
; X32-LABEL: test_mm_movehl_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movhlps {{.*#+}} xmm0 = xmm1[1],xmm0[1]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_movehl_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movhlps {{.*#+}} xmm0 = xmm1[1],xmm0[1]
; X64-NEXT: retq
%res = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 6, i32 7, i32 2, i32 3>
@@ -1085,12 +1085,12 @@ define <4 x float> @test_mm_movehl_ps(<4 x float> %a0, <4 x float> %a1) {
define <4 x float> @test_mm_movelh_ps(<4 x float> %a0, <4 x float> %a1) {
; X32-LABEL: test_mm_movelh_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_movelh_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; X64-NEXT: retq
%res = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
@@ -1099,12 +1099,12 @@ define <4 x float> @test_mm_movelh_ps(<4 x float> %a0, <4 x float> %a1) {
define i32 @test_mm_movemask_ps(<4 x float> %a0) nounwind {
; X32-LABEL: test_mm_movemask_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movmskps %xmm0, %eax
; X32-NEXT: retl
;
; X64-LABEL: test_mm_movemask_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movmskps %xmm0, %eax
; X64-NEXT: retq
%res = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %a0)
@@ -1114,12 +1114,12 @@ declare i32 @llvm.x86.sse.movmsk.ps(<4 x float>) nounwind readnone
define <4 x float> @test_mm_mul_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_mul_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: mulps %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mul_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: mulps %xmm1, %xmm0
; X64-NEXT: retq
%res = fmul <4 x float> %a0, %a1
@@ -1128,12 +1128,12 @@ define <4 x float> @test_mm_mul_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
define <4 x float> @test_mm_mul_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_mul_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: mulss %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mul_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: mulss %xmm1, %xmm0
; X64-NEXT: retq
%ext0 = extractelement <4 x float> %a0, i32 0
@@ -1145,12 +1145,12 @@ define <4 x float> @test_mm_mul_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
define <4 x float> @test_mm_or_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_or_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: orps %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_or_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: orps %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <4 x float> %a0 to <4 x i32>
@@ -1162,13 +1162,13 @@ define <4 x float> @test_mm_or_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
define void @test_mm_prefetch(i8* %a0) {
; X32-LABEL: test_mm_prefetch:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: prefetchnta (%eax)
; X32-NEXT: retl
;
; X64-LABEL: test_mm_prefetch:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: prefetchnta (%rdi)
; X64-NEXT: retq
call void @llvm.prefetch(i8* %a0, i32 0, i32 0, i32 1)
@@ -1178,12 +1178,12 @@ declare void @llvm.prefetch(i8* nocapture, i32, i32, i32) nounwind readnone
define <4 x float> @test_mm_rcp_ps(<4 x float> %a0) {
; X32-LABEL: test_mm_rcp_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: rcpps %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_rcp_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: rcpps %xmm0, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse.rcp.ps(<4 x float> %a0)
@@ -1193,12 +1193,12 @@ declare <4 x float> @llvm.x86.sse.rcp.ps(<4 x float>) nounwind readnone
define <4 x float> @test_mm_rcp_ss(<4 x float> %a0) {
; X32-LABEL: test_mm_rcp_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: rcpss %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_rcp_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: rcpss %xmm0, %xmm0
; X64-NEXT: retq
%rcp = call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %a0)
@@ -1216,12 +1216,12 @@ declare <4 x float> @llvm.x86.sse.rcp.ss(<4 x float>) nounwind readnone
define <4 x float> @test_mm_rsqrt_ps(<4 x float> %a0) {
; X32-LABEL: test_mm_rsqrt_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: rsqrtps %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_rsqrt_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: rsqrtps %xmm0, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float> %a0)
@@ -1231,12 +1231,12 @@ declare <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float>) nounwind readnone
define <4 x float> @test_mm_rsqrt_ss(<4 x float> %a0) {
; X32-LABEL: test_mm_rsqrt_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: rsqrtss %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_rsqrt_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: rsqrtss %xmm0, %xmm0
; X64-NEXT: retq
%rsqrt = call <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float> %a0)
@@ -1254,7 +1254,7 @@ declare <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float>) nounwind readnone
define void @test_MM_SET_EXCEPTION_MASK(i32 %a0) nounwind {
; X32-LABEL: test_MM_SET_EXCEPTION_MASK:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %esp, %ecx
@@ -1268,7 +1268,7 @@ define void @test_MM_SET_EXCEPTION_MASK(i32 %a0) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_MM_SET_EXCEPTION_MASK:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: leaq -{{[0-9]+}}(%rsp), %rax
; X64-NEXT: stmxcsr (%rax)
; X64-NEXT: movl -{{[0-9]+}}(%rsp), %ecx
@@ -1291,7 +1291,7 @@ declare void @llvm.x86.sse.ldmxcsr(i8*) nounwind readnone
define void @test_MM_SET_EXCEPTION_STATE(i32 %a0) nounwind {
; X32-LABEL: test_MM_SET_EXCEPTION_STATE:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %esp, %ecx
@@ -1305,7 +1305,7 @@ define void @test_MM_SET_EXCEPTION_STATE(i32 %a0) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_MM_SET_EXCEPTION_STATE:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: leaq -{{[0-9]+}}(%rsp), %rax
; X64-NEXT: stmxcsr (%rax)
; X64-NEXT: movl -{{[0-9]+}}(%rsp), %ecx
@@ -1327,7 +1327,7 @@ define void @test_MM_SET_EXCEPTION_STATE(i32 %a0) nounwind {
define void @test_MM_SET_FLUSH_ZERO_MODE(i32 %a0) nounwind {
; X32-LABEL: test_MM_SET_FLUSH_ZERO_MODE:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %esp, %ecx
@@ -1341,7 +1341,7 @@ define void @test_MM_SET_FLUSH_ZERO_MODE(i32 %a0) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_MM_SET_FLUSH_ZERO_MODE:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: leaq -{{[0-9]+}}(%rsp), %rax
; X64-NEXT: stmxcsr (%rax)
; X64-NEXT: movl -{{[0-9]+}}(%rsp), %ecx
@@ -1363,7 +1363,7 @@ define void @test_MM_SET_FLUSH_ZERO_MODE(i32 %a0) nounwind {
define <4 x float> @test_mm_set_ps(float %a0, float %a1, float %a2, float %a3) nounwind {
; X32-LABEL: test_mm_set_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
@@ -1374,7 +1374,7 @@ define <4 x float> @test_mm_set_ps(float %a0, float %a1, float %a2, float %a3) n
; X32-NEXT: retl
;
; X64-LABEL: test_mm_set_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; X64-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
; X64-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm1[0]
@@ -1389,13 +1389,13 @@ define <4 x float> @test_mm_set_ps(float %a0, float %a1, float %a2, float %a3) n
define <4 x float> @test_mm_set_ps1(float %a0) nounwind {
; X32-LABEL: test_mm_set_ps1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0,0,0]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_set_ps1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0,0,0]
; X64-NEXT: retq
%res0 = insertelement <4 x float> undef, float %a0, i32 0
@@ -1407,7 +1407,7 @@ define <4 x float> @test_mm_set_ps1(float %a0) nounwind {
define void @test_MM_SET_ROUNDING_MODE(i32 %a0) nounwind {
; X32-LABEL: test_MM_SET_ROUNDING_MODE:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %esp, %ecx
@@ -1421,7 +1421,7 @@ define void @test_MM_SET_ROUNDING_MODE(i32 %a0) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_MM_SET_ROUNDING_MODE:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: leaq -{{[0-9]+}}(%rsp), %rax
; X64-NEXT: stmxcsr (%rax)
; X64-NEXT: movl -{{[0-9]+}}(%rsp), %ecx
@@ -1443,14 +1443,14 @@ define void @test_MM_SET_ROUNDING_MODE(i32 %a0) nounwind {
define <4 x float> @test_mm_set_ss(float %a0) nounwind {
; X32-LABEL: test_mm_set_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: xorps %xmm0, %xmm0
; X32-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_set_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorps %xmm1, %xmm1
; X64-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; X64-NEXT: movaps %xmm1, %xmm0
@@ -1464,13 +1464,13 @@ define <4 x float> @test_mm_set_ss(float %a0) nounwind {
define <4 x float> @test_mm_set1_ps(float %a0) nounwind {
; X32-LABEL: test_mm_set1_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0,0,0]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_set1_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0,0,0]
; X64-NEXT: retq
%res0 = insertelement <4 x float> undef, float %a0, i32 0
@@ -1482,13 +1482,13 @@ define <4 x float> @test_mm_set1_ps(float %a0) nounwind {
define void @test_mm_setcsr(i32 %a0) nounwind {
; X32-LABEL: test_mm_setcsr:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
; X32-NEXT: ldmxcsr (%eax)
; X32-NEXT: retl
;
; X64-LABEL: test_mm_setcsr:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: leaq -{{[0-9]+}}(%rsp), %rax
; X64-NEXT: movl %edi, -{{[0-9]+}}(%rsp)
; X64-NEXT: ldmxcsr (%rax)
@@ -1502,7 +1502,7 @@ define void @test_mm_setcsr(i32 %a0) nounwind {
define <4 x float> @test_mm_setr_ps(float %a0, float %a1, float %a2, float %a3) nounwind {
; X32-LABEL: test_mm_setr_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; X32-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
@@ -1513,7 +1513,7 @@ define <4 x float> @test_mm_setr_ps(float %a0, float %a1, float %a2, float %a3)
; X32-NEXT: retl
;
; X64-LABEL: test_mm_setr_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; X64-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X64-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
@@ -1527,12 +1527,12 @@ define <4 x float> @test_mm_setr_ps(float %a0, float %a1, float %a2, float %a3)
define <4 x float> @test_mm_setzero_ps() {
; X32-LABEL: test_mm_setzero_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorps %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_setzero_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorps %xmm0, %xmm0
; X64-NEXT: retq
ret <4 x float> zeroinitializer
@@ -1540,12 +1540,12 @@ define <4 x float> @test_mm_setzero_ps() {
define void @test_mm_sfence() nounwind {
; X32-LABEL: test_mm_sfence:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: sfence
; X32-NEXT: retl
;
; X64-LABEL: test_mm_sfence:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: sfence
; X64-NEXT: retq
call void @llvm.x86.sse.sfence()
@@ -1555,12 +1555,12 @@ declare void @llvm.x86.sse.sfence() nounwind readnone
define <4 x float> @test_mm_shuffle_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_shuffle_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_shuffle_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0]
; X64-NEXT: retq
%res = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 0, i32 4, i32 4>
@@ -1569,12 +1569,12 @@ define <4 x float> @test_mm_shuffle_ps(<4 x float> %a0, <4 x float> %a1) nounwin
define <4 x float> @test_mm_sqrt_ps(<4 x float> %a0) {
; X32-LABEL: test_mm_sqrt_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: sqrtps %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_sqrt_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: sqrtps %xmm0, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse.sqrt.ps(<4 x float> %a0)
@@ -1584,12 +1584,12 @@ declare <4 x float> @llvm.x86.sse.sqrt.ps(<4 x float>) nounwind readnone
define <4 x float> @test_mm_sqrt_ss(<4 x float> %a0) {
; X32-LABEL: test_mm_sqrt_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: sqrtss %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_sqrt_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: sqrtss %xmm0, %xmm0
; X64-NEXT: retq
%sqrt = call <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float> %a0)
@@ -1607,13 +1607,13 @@ declare <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float>) nounwind readnone
define void @test_mm_store_ps(float *%a0, <4 x float> %a1) {
; X32-LABEL: test_mm_store_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movaps %xmm0, (%eax)
; X32-NEXT: retl
;
; X64-LABEL: test_mm_store_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps %xmm0, (%rdi)
; X64-NEXT: retq
%arg0 = bitcast float* %a0 to <4 x float>*
@@ -1623,14 +1623,14 @@ define void @test_mm_store_ps(float *%a0, <4 x float> %a1) {
define void @test_mm_store_ps1(float *%a0, <4 x float> %a1) {
; X32-LABEL: test_mm_store_ps1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0,0,0]
; X32-NEXT: movaps %xmm0, (%eax)
; X32-NEXT: retl
;
; X64-LABEL: test_mm_store_ps1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0,0,0]
; X64-NEXT: movaps %xmm0, (%rdi)
; X64-NEXT: retq
@@ -1642,13 +1642,13 @@ define void @test_mm_store_ps1(float *%a0, <4 x float> %a1) {
define void @test_mm_store_ss(float *%a0, <4 x float> %a1) {
; X32-LABEL: test_mm_store_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movss %xmm0, (%eax)
; X32-NEXT: retl
;
; X64-LABEL: test_mm_store_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movss %xmm0, (%rdi)
; X64-NEXT: retq
%ext = extractelement <4 x float> %a1, i32 0
@@ -1658,14 +1658,14 @@ define void @test_mm_store_ss(float *%a0, <4 x float> %a1) {
define void @test_mm_store1_ps(float *%a0, <4 x float> %a1) {
; X32-LABEL: test_mm_store1_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0,0,0]
; X32-NEXT: movaps %xmm0, (%eax)
; X32-NEXT: retl
;
; X64-LABEL: test_mm_store1_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0,0,0]
; X64-NEXT: movaps %xmm0, (%rdi)
; X64-NEXT: retq
@@ -1677,7 +1677,7 @@ define void @test_mm_store1_ps(float *%a0, <4 x float> %a1) {
define void @test_mm_storeh_ps(x86_mmx *%a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_storeh_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebp
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: andl $-16, %esp
@@ -1693,7 +1693,7 @@ define void @test_mm_storeh_ps(x86_mmx *%a0, <4 x float> %a1) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_mm_storeh_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax
; X64-NEXT: movq %rax, (%rdi)
@@ -1707,7 +1707,7 @@ define void @test_mm_storeh_ps(x86_mmx *%a0, <4 x float> %a1) nounwind {
define void @test_mm_storel_ps(x86_mmx *%a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_storel_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebp
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: andl $-16, %esp
@@ -1723,7 +1723,7 @@ define void @test_mm_storel_ps(x86_mmx *%a0, <4 x float> %a1) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_mm_storel_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax
; X64-NEXT: movq %rax, (%rdi)
@@ -1737,14 +1737,14 @@ define void @test_mm_storel_ps(x86_mmx *%a0, <4 x float> %a1) nounwind {
define void @test_mm_storer_ps(float *%a0, <4 x float> %a1) {
; X32-LABEL: test_mm_storer_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,2,1,0]
; X32-NEXT: movaps %xmm0, (%eax)
; X32-NEXT: retl
;
; X64-LABEL: test_mm_storer_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,2,1,0]
; X64-NEXT: movaps %xmm0, (%rdi)
; X64-NEXT: retq
@@ -1756,13 +1756,13 @@ define void @test_mm_storer_ps(float *%a0, <4 x float> %a1) {
define void @test_mm_storeu_ps(float *%a0, <4 x float> %a1) {
; X32-LABEL: test_mm_storeu_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movups %xmm0, (%eax)
; X32-NEXT: retl
;
; X64-LABEL: test_mm_storeu_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movups %xmm0, (%rdi)
; X64-NEXT: retq
%arg0 = bitcast float* %a0 to <4 x float>*
@@ -1772,13 +1772,13 @@ define void @test_mm_storeu_ps(float *%a0, <4 x float> %a1) {
define void @test_mm_stream_ps(float *%a0, <4 x float> %a1) {
; X32-LABEL: test_mm_stream_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movntps %xmm0, (%eax)
; X32-NEXT: retl
;
; X64-LABEL: test_mm_stream_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movntps %xmm0, (%rdi)
; X64-NEXT: retq
%arg0 = bitcast float* %a0 to <4 x float>*
@@ -1788,12 +1788,12 @@ define void @test_mm_stream_ps(float *%a0, <4 x float> %a1) {
define <4 x float> @test_mm_sub_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_sub_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: subps %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_sub_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: subps %xmm1, %xmm0
; X64-NEXT: retq
%res = fsub <4 x float> %a0, %a1
@@ -1802,12 +1802,12 @@ define <4 x float> @test_mm_sub_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
define <4 x float> @test_mm_sub_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_sub_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: subss %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_sub_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: subss %xmm1, %xmm0
; X64-NEXT: retq
%ext0 = extractelement <4 x float> %a0, i32 0
@@ -1819,7 +1819,7 @@ define <4 x float> @test_mm_sub_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
define void @test_MM_TRANSPOSE4_PS(<4 x float>* %a0, <4 x float>* %a1, <4 x float>* %a2, <4 x float>* %a3) nounwind {
; X32-LABEL: test_MM_TRANSPOSE4_PS:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %esi
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
@@ -1849,7 +1849,7 @@ define void @test_MM_TRANSPOSE4_PS(<4 x float>* %a0, <4 x float>* %a1, <4 x floa
; X32-NEXT: retl
;
; X64-LABEL: test_MM_TRANSPOSE4_PS:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps (%rdi), %xmm0
; X64-NEXT: movaps (%rsi), %xmm1
; X64-NEXT: movaps (%rdx), %xmm2
@@ -1892,7 +1892,7 @@ define void @test_MM_TRANSPOSE4_PS(<4 x float>* %a0, <4 x float>* %a1, <4 x floa
define i32 @test_mm_ucomieq_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_ucomieq_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: ucomiss %xmm1, %xmm0
; X32-NEXT: setnp %al
; X32-NEXT: sete %cl
@@ -1901,7 +1901,7 @@ define i32 @test_mm_ucomieq_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_mm_ucomieq_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: ucomiss %xmm1, %xmm0
; X64-NEXT: setnp %al
; X64-NEXT: sete %cl
@@ -1915,14 +1915,14 @@ declare i32 @llvm.x86.sse.ucomieq.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_mm_ucomige_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_ucomige_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: ucomiss %xmm1, %xmm0
; X32-NEXT: setae %al
; X32-NEXT: retl
;
; X64-LABEL: test_mm_ucomige_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: ucomiss %xmm1, %xmm0
; X64-NEXT: setae %al
@@ -1934,14 +1934,14 @@ declare i32 @llvm.x86.sse.ucomige.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_mm_ucomigt_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_ucomigt_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: ucomiss %xmm1, %xmm0
; X32-NEXT: seta %al
; X32-NEXT: retl
;
; X64-LABEL: test_mm_ucomigt_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: ucomiss %xmm1, %xmm0
; X64-NEXT: seta %al
@@ -1953,14 +1953,14 @@ declare i32 @llvm.x86.sse.ucomigt.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_mm_ucomile_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_ucomile_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: ucomiss %xmm0, %xmm1
; X32-NEXT: setae %al
; X32-NEXT: retl
;
; X64-LABEL: test_mm_ucomile_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: ucomiss %xmm0, %xmm1
; X64-NEXT: setae %al
@@ -1972,14 +1972,14 @@ declare i32 @llvm.x86.sse.ucomile.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_mm_ucomilt_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_ucomilt_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: ucomiss %xmm0, %xmm1
; X32-NEXT: seta %al
; X32-NEXT: retl
;
; X64-LABEL: test_mm_ucomilt_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: ucomiss %xmm0, %xmm1
; X64-NEXT: seta %al
@@ -1991,7 +1991,7 @@ declare i32 @llvm.x86.sse.ucomilt.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_mm_ucomineq_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_ucomineq_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: ucomiss %xmm1, %xmm0
; X32-NEXT: setp %al
; X32-NEXT: setne %cl
@@ -2000,7 +2000,7 @@ define i32 @test_mm_ucomineq_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_mm_ucomineq_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: ucomiss %xmm1, %xmm0
; X64-NEXT: setp %al
; X64-NEXT: setne %cl
@@ -2014,23 +2014,23 @@ declare i32 @llvm.x86.sse.ucomineq.ss(<4 x float>, <4 x float>) nounwind readnon
define <4 x float> @test_mm_undefined_ps() {
; X32-LABEL: test_mm_undefined_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: test_mm_undefined_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
ret <4 x float> undef
}
define <4 x float> @test_mm_unpackhi_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_unpackhi_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_unpackhi_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; X64-NEXT: retq
%res = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
@@ -2039,12 +2039,12 @@ define <4 x float> @test_mm_unpackhi_ps(<4 x float> %a0, <4 x float> %a1) nounwi
define <4 x float> @test_mm_unpacklo_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_unpacklo_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_unpacklo_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X64-NEXT: retq
%res = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
@@ -2053,12 +2053,12 @@ define <4 x float> @test_mm_unpacklo_ps(<4 x float> %a0, <4 x float> %a1) nounwi
define <4 x float> @test_mm_xor_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_xor_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorps %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_xor_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorps %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <4 x float> %a0 to <4 x i32>
diff --git a/test/CodeGen/X86/sse-intrinsics-x86-upgrade.ll b/test/CodeGen/X86/sse-intrinsics-x86-upgrade.ll
index 2ecba887f7c..a65c1d312aa 100644
--- a/test/CodeGen/X86/sse-intrinsics-x86-upgrade.ll
+++ b/test/CodeGen/X86/sse-intrinsics-x86-upgrade.ll
@@ -3,18 +3,18 @@
define void @test_x86_sse_storeu_ps(i8* %a0, <4 x float> %a1) {
; SSE-LABEL: test_x86_sse_storeu_ps:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE-NEXT: movups %xmm0, (%eax)
; SSE-NEXT: retl
;
; KNL-LABEL: test_x86_sse_storeu_ps:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: movl {{[0-9]+}}(%esp), %eax
; KNL-NEXT: vmovups %xmm0, (%eax)
; KNL-NEXT: retl
; CHECK-LABEL: test_x86_sse_storeu_ps:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movups %xmm0, (%eax)
; CHECK-NEXT: retl
@@ -26,21 +26,21 @@ declare void @llvm.x86.sse.storeu.ps(i8*, <4 x float>) nounwind
define <4 x float> @test_x86_sse_add_ss(<4 x float> %a0, <4 x float> %a1) {
; SSE-LABEL: test_x86_sse_add_ss:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: addss %xmm1, %xmm0 ## encoding: [0xf3,0x0f,0x58,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse_add_ss:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vaddss %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x58,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse_add_ss:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vaddss %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7e,0x08,0x58,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
; CHECK-LABEL: test_x86_sse_add_ss:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: addss %xmm1, %xmm0
; CHECK-NEXT: retl
%res = call <4 x float> @llvm.x86.sse.add.ss(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [#uses=1]
@@ -51,21 +51,21 @@ declare <4 x float> @llvm.x86.sse.add.ss(<4 x float>, <4 x float>) nounwind read
define <4 x float> @test_x86_sse_sub_ss(<4 x float> %a0, <4 x float> %a1) {
; SSE-LABEL: test_x86_sse_sub_ss:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: subss %xmm1, %xmm0 ## encoding: [0xf3,0x0f,0x5c,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse_sub_ss:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vsubss %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x5c,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse_sub_ss:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vsubss %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7e,0x08,0x5c,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
; CHECK-LABEL: test_x86_sse_sub_ss:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: subss %xmm1, %xmm0
; CHECK-NEXT: retl
%res = call <4 x float> @llvm.x86.sse.sub.ss(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [#uses=1]
@@ -76,21 +76,21 @@ declare <4 x float> @llvm.x86.sse.sub.ss(<4 x float>, <4 x float>) nounwind read
define <4 x float> @test_x86_sse_mul_ss(<4 x float> %a0, <4 x float> %a1) {
; SSE-LABEL: test_x86_sse_mul_ss:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: mulss %xmm1, %xmm0 ## encoding: [0xf3,0x0f,0x59,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse_mul_ss:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vmulss %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x59,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse_mul_ss:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vmulss %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7e,0x08,0x59,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
; CHECK-LABEL: test_x86_sse_mul_ss:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: mulss %xmm1, %xmm0
; CHECK-NEXT: retl
%res = call <4 x float> @llvm.x86.sse.mul.ss(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [#uses=1]
@@ -101,21 +101,21 @@ declare <4 x float> @llvm.x86.sse.mul.ss(<4 x float>, <4 x float>) nounwind read
define <4 x float> @test_x86_sse_div_ss(<4 x float> %a0, <4 x float> %a1) {
; SSE-LABEL: test_x86_sse_div_ss:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: divss %xmm1, %xmm0 ## encoding: [0xf3,0x0f,0x5e,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse_div_ss:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vdivss %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x5e,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse_div_ss:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vdivss %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7e,0x08,0x5e,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
; CHECK-LABEL: test_x86_sse_div_ss:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: divss %xmm1, %xmm0
; CHECK-NEXT: retl
%res = call <4 x float> @llvm.x86.sse.div.ss(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [#uses=1]
diff --git a/test/CodeGen/X86/sse-intrinsics-x86.ll b/test/CodeGen/X86/sse-intrinsics-x86.ll
index ca74ee5732d..04a4352acca 100644
--- a/test/CodeGen/X86/sse-intrinsics-x86.ll
+++ b/test/CodeGen/X86/sse-intrinsics-x86.ll
@@ -5,12 +5,12 @@
define <4 x float> @test_x86_sse_cmp_ps(<4 x float> %a0, <4 x float> %a1) {
; SSE-LABEL: test_x86_sse_cmp_ps:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: cmpordps %xmm1, %xmm0 ## encoding: [0x0f,0xc2,0xc1,0x07]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse_cmp_ps:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: vcmpordps %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0xc2,0xc1,0x07]
; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a0, <4 x float> %a1, i8 7) ; <<4 x float>> [#uses=1]
@@ -21,12 +21,12 @@ declare <4 x float> @llvm.x86.sse.cmp.ps(<4 x float>, <4 x float>, i8) nounwind
define <4 x float> @test_x86_sse_cmp_ss(<4 x float> %a0, <4 x float> %a1) {
; SSE-LABEL: test_x86_sse_cmp_ss:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: cmpordss %xmm1, %xmm0 ## encoding: [0xf3,0x0f,0xc2,0xc1,0x07]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse_cmp_ss:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: vcmpordss %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0xc2,0xc1,0x07]
; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.sse.cmp.ss(<4 x float> %a0, <4 x float> %a1, i8 7) ; <<4 x float>> [#uses=1]
@@ -37,7 +37,7 @@ declare <4 x float> @llvm.x86.sse.cmp.ss(<4 x float>, <4 x float>, i8) nounwind
define i32 @test_x86_sse_comieq_ss(<4 x float> %a0, <4 x float> %a1) {
; SSE-LABEL: test_x86_sse_comieq_ss:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: comiss %xmm1, %xmm0 ## encoding: [0x0f,0x2f,0xc1]
; SSE-NEXT: setnp %al ## encoding: [0x0f,0x9b,0xc0]
; SSE-NEXT: sete %cl ## encoding: [0x0f,0x94,0xc1]
@@ -46,7 +46,7 @@ define i32 @test_x86_sse_comieq_ss(<4 x float> %a0, <4 x float> %a1) {
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse_comieq_ss:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vcomiss %xmm1, %xmm0 ## encoding: [0xc5,0xf8,0x2f,0xc1]
; AVX2-NEXT: setnp %al ## encoding: [0x0f,0x9b,0xc0]
; AVX2-NEXT: sete %cl ## encoding: [0x0f,0x94,0xc1]
@@ -55,7 +55,7 @@ define i32 @test_x86_sse_comieq_ss(<4 x float> %a0, <4 x float> %a1) {
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse_comieq_ss:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vcomiss %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2f,0xc1]
; SKX-NEXT: setnp %al ## encoding: [0x0f,0x9b,0xc0]
; SKX-NEXT: sete %cl ## encoding: [0x0f,0x94,0xc1]
@@ -70,21 +70,21 @@ declare i32 @llvm.x86.sse.comieq.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_sse_comige_ss(<4 x float> %a0, <4 x float> %a1) {
; SSE-LABEL: test_x86_sse_comige_ss:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; SSE-NEXT: comiss %xmm1, %xmm0 ## encoding: [0x0f,0x2f,0xc1]
; SSE-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse_comige_ss:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; AVX2-NEXT: vcomiss %xmm1, %xmm0 ## encoding: [0xc5,0xf8,0x2f,0xc1]
; AVX2-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse_comige_ss:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; SKX-NEXT: vcomiss %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2f,0xc1]
; SKX-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
@@ -97,21 +97,21 @@ declare i32 @llvm.x86.sse.comige.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_sse_comigt_ss(<4 x float> %a0, <4 x float> %a1) {
; SSE-LABEL: test_x86_sse_comigt_ss:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; SSE-NEXT: comiss %xmm1, %xmm0 ## encoding: [0x0f,0x2f,0xc1]
; SSE-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse_comigt_ss:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; AVX2-NEXT: vcomiss %xmm1, %xmm0 ## encoding: [0xc5,0xf8,0x2f,0xc1]
; AVX2-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse_comigt_ss:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; SKX-NEXT: vcomiss %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2f,0xc1]
; SKX-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
@@ -124,21 +124,21 @@ declare i32 @llvm.x86.sse.comigt.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_sse_comile_ss(<4 x float> %a0, <4 x float> %a1) {
; SSE-LABEL: test_x86_sse_comile_ss:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; SSE-NEXT: comiss %xmm0, %xmm1 ## encoding: [0x0f,0x2f,0xc8]
; SSE-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse_comile_ss:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; AVX2-NEXT: vcomiss %xmm0, %xmm1 ## encoding: [0xc5,0xf8,0x2f,0xc8]
; AVX2-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse_comile_ss:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; SKX-NEXT: vcomiss %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2f,0xc8]
; SKX-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
@@ -151,21 +151,21 @@ declare i32 @llvm.x86.sse.comile.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_sse_comilt_ss(<4 x float> %a0, <4 x float> %a1) {
; SSE-LABEL: test_x86_sse_comilt_ss:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; SSE-NEXT: comiss %xmm0, %xmm1 ## encoding: [0x0f,0x2f,0xc8]
; SSE-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse_comilt_ss:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; AVX2-NEXT: vcomiss %xmm0, %xmm1 ## encoding: [0xc5,0xf8,0x2f,0xc8]
; AVX2-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse_comilt_ss:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; SKX-NEXT: vcomiss %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2f,0xc8]
; SKX-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
@@ -178,7 +178,7 @@ declare i32 @llvm.x86.sse.comilt.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_sse_comineq_ss(<4 x float> %a0, <4 x float> %a1) {
; SSE-LABEL: test_x86_sse_comineq_ss:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: comiss %xmm1, %xmm0 ## encoding: [0x0f,0x2f,0xc1]
; SSE-NEXT: setp %al ## encoding: [0x0f,0x9a,0xc0]
; SSE-NEXT: setne %cl ## encoding: [0x0f,0x95,0xc1]
@@ -187,7 +187,7 @@ define i32 @test_x86_sse_comineq_ss(<4 x float> %a0, <4 x float> %a1) {
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse_comineq_ss:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vcomiss %xmm1, %xmm0 ## encoding: [0xc5,0xf8,0x2f,0xc1]
; AVX2-NEXT: setp %al ## encoding: [0x0f,0x9a,0xc0]
; AVX2-NEXT: setne %cl ## encoding: [0x0f,0x95,0xc1]
@@ -196,7 +196,7 @@ define i32 @test_x86_sse_comineq_ss(<4 x float> %a0, <4 x float> %a1) {
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse_comineq_ss:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vcomiss %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2f,0xc1]
; SKX-NEXT: setp %al ## encoding: [0x0f,0x9a,0xc0]
; SKX-NEXT: setne %cl ## encoding: [0x0f,0x95,0xc1]
@@ -211,19 +211,19 @@ declare i32 @llvm.x86.sse.comineq.ss(<4 x float>, <4 x float>) nounwind readnone
define <4 x float> @test_x86_sse_cvtsi2ss(<4 x float> %a0) {
; SSE-LABEL: test_x86_sse_cvtsi2ss:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
; SSE-NEXT: cvtsi2ssl %eax, %xmm0 ## encoding: [0xf3,0x0f,0x2a,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse_cvtsi2ss:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
; AVX2-NEXT: vcvtsi2ssl %eax, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x2a,0xc0]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse_cvtsi2ss:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
; SKX-NEXT: vcvtsi2ssl %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x2a,0xc0]
; SKX-NEXT: retl ## encoding: [0xc3]
@@ -235,17 +235,17 @@ declare <4 x float> @llvm.x86.sse.cvtsi2ss(<4 x float>, i32) nounwind readnone
define i32 @test_x86_sse_cvtss2si(<4 x float> %a0) {
; SSE-LABEL: test_x86_sse_cvtss2si:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: cvtss2si %xmm0, %eax ## encoding: [0xf3,0x0f,0x2d,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse_cvtss2si:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vcvtss2si %xmm0, %eax ## encoding: [0xc5,0xfa,0x2d,0xc0]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse_cvtss2si:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vcvtss2si %xmm0, %eax ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x2d,0xc0]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call i32 @llvm.x86.sse.cvtss2si(<4 x float> %a0) ; <i32> [#uses=1]
@@ -256,17 +256,17 @@ declare i32 @llvm.x86.sse.cvtss2si(<4 x float>) nounwind readnone
define i32 @test_x86_sse_cvttss2si(<4 x float> %a0) {
; SSE-LABEL: test_x86_sse_cvttss2si:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: cvttss2si %xmm0, %eax ## encoding: [0xf3,0x0f,0x2c,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse_cvttss2si:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vcvttss2si %xmm0, %eax ## encoding: [0xc5,0xfa,0x2c,0xc0]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse_cvttss2si:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vcvttss2si %xmm0, %eax ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x2c,0xc0]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call i32 @llvm.x86.sse.cvttss2si(<4 x float> %a0) ; <i32> [#uses=1]
@@ -277,13 +277,13 @@ declare i32 @llvm.x86.sse.cvttss2si(<4 x float>) nounwind readnone
define void @test_x86_sse_ldmxcsr(i8* %a0) {
; SSE-LABEL: test_x86_sse_ldmxcsr:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; SSE-NEXT: ldmxcsr (%eax) ## encoding: [0x0f,0xae,0x10]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse_ldmxcsr:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; VCHECK-NEXT: vldmxcsr (%eax) ## encoding: [0xc5,0xf8,0xae,0x10]
; VCHECK-NEXT: retl ## encoding: [0xc3]
@@ -296,17 +296,17 @@ declare void @llvm.x86.sse.ldmxcsr(i8*) nounwind
define <4 x float> @test_x86_sse_max_ps(<4 x float> %a0, <4 x float> %a1) {
; SSE-LABEL: test_x86_sse_max_ps:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: maxps %xmm1, %xmm0 ## encoding: [0x0f,0x5f,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse_max_ps:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vmaxps %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x5f,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse_max_ps:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vmaxps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x5f,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [#uses=1]
@@ -317,17 +317,17 @@ declare <4 x float> @llvm.x86.sse.max.ps(<4 x float>, <4 x float>) nounwind read
define <4 x float> @test_x86_sse_max_ss(<4 x float> %a0, <4 x float> %a1) {
; SSE-LABEL: test_x86_sse_max_ss:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: maxss %xmm1, %xmm0 ## encoding: [0xf3,0x0f,0x5f,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse_max_ss:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vmaxss %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x5f,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse_max_ss:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vmaxss %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x5f,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.sse.max.ss(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [#uses=1]
@@ -338,17 +338,17 @@ declare <4 x float> @llvm.x86.sse.max.ss(<4 x float>, <4 x float>) nounwind read
define <4 x float> @test_x86_sse_min_ps(<4 x float> %a0, <4 x float> %a1) {
; SSE-LABEL: test_x86_sse_min_ps:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: minps %xmm1, %xmm0 ## encoding: [0x0f,0x5d,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse_min_ps:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vminps %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x5d,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse_min_ps:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vminps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x5d,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [#uses=1]
@@ -359,17 +359,17 @@ declare <4 x float> @llvm.x86.sse.min.ps(<4 x float>, <4 x float>) nounwind read
define <4 x float> @test_x86_sse_min_ss(<4 x float> %a0, <4 x float> %a1) {
; SSE-LABEL: test_x86_sse_min_ss:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: minss %xmm1, %xmm0 ## encoding: [0xf3,0x0f,0x5d,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse_min_ss:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vminss %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x5d,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse_min_ss:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vminss %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x5d,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.sse.min.ss(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [#uses=1]
@@ -380,12 +380,12 @@ declare <4 x float> @llvm.x86.sse.min.ss(<4 x float>, <4 x float>) nounwind read
define i32 @test_x86_sse_movmsk_ps(<4 x float> %a0) {
; SSE-LABEL: test_x86_sse_movmsk_ps:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: movmskps %xmm0, %eax ## encoding: [0x0f,0x50,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse_movmsk_ps:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: vmovmskps %xmm0, %eax ## encoding: [0xc5,0xf8,0x50,0xc0]
; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %a0) ; <i32> [#uses=1]
@@ -397,12 +397,12 @@ declare i32 @llvm.x86.sse.movmsk.ps(<4 x float>) nounwind readnone
define <4 x float> @test_x86_sse_rcp_ps(<4 x float> %a0) {
; SSE-LABEL: test_x86_sse_rcp_ps:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: rcpps %xmm0, %xmm0 ## encoding: [0x0f,0x53,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse_rcp_ps:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: vrcpps %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x53,0xc0]
; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.sse.rcp.ps(<4 x float> %a0) ; <<4 x float>> [#uses=1]
@@ -413,12 +413,12 @@ declare <4 x float> @llvm.x86.sse.rcp.ps(<4 x float>) nounwind readnone
define <4 x float> @test_x86_sse_rcp_ss(<4 x float> %a0) {
; SSE-LABEL: test_x86_sse_rcp_ss:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: rcpss %xmm0, %xmm0 ## encoding: [0xf3,0x0f,0x53,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse_rcp_ss:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: vrcpss %xmm0, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x53,0xc0]
; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %a0) ; <<4 x float>> [#uses=1]
@@ -429,12 +429,12 @@ declare <4 x float> @llvm.x86.sse.rcp.ss(<4 x float>) nounwind readnone
define <4 x float> @test_x86_sse_rsqrt_ps(<4 x float> %a0) {
; SSE-LABEL: test_x86_sse_rsqrt_ps:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: rsqrtps %xmm0, %xmm0 ## encoding: [0x0f,0x52,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse_rsqrt_ps:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: vrsqrtps %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x52,0xc0]
; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float> %a0) ; <<4 x float>> [#uses=1]
@@ -445,12 +445,12 @@ declare <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float>) nounwind readnone
define <4 x float> @test_x86_sse_rsqrt_ss(<4 x float> %a0) {
; SSE-LABEL: test_x86_sse_rsqrt_ss:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: rsqrtss %xmm0, %xmm0 ## encoding: [0xf3,0x0f,0x52,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse_rsqrt_ss:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: vrsqrtss %xmm0, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x52,0xc0]
; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float> %a0) ; <<4 x float>> [#uses=1]
@@ -461,17 +461,17 @@ declare <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float>) nounwind readnone
define <4 x float> @test_x86_sse_sqrt_ps(<4 x float> %a0) {
; SSE-LABEL: test_x86_sse_sqrt_ps:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: sqrtps %xmm0, %xmm0 ## encoding: [0x0f,0x51,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse_sqrt_ps:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vsqrtps %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x51,0xc0]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse_sqrt_ps:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vsqrtps %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x51,0xc0]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.sse.sqrt.ps(<4 x float> %a0) ; <<4 x float>> [#uses=1]
@@ -482,17 +482,17 @@ declare <4 x float> @llvm.x86.sse.sqrt.ps(<4 x float>) nounwind readnone
define <4 x float> @test_x86_sse_sqrt_ss(<4 x float> %a0) {
; SSE-LABEL: test_x86_sse_sqrt_ss:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: sqrtss %xmm0, %xmm0 ## encoding: [0xf3,0x0f,0x51,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse_sqrt_ss:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vsqrtss %xmm0, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x51,0xc0]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse_sqrt_ss:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vsqrtss %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x51,0xc0]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float> %a0) ; <<4 x float>> [#uses=1]
@@ -503,13 +503,13 @@ declare <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float>) nounwind readnone
define void @test_x86_sse_stmxcsr(i8* %a0) {
; SSE-LABEL: test_x86_sse_stmxcsr:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; SSE-NEXT: stmxcsr (%eax) ## encoding: [0x0f,0xae,0x18]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse_stmxcsr:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; VCHECK-NEXT: vstmxcsr (%eax) ## encoding: [0xc5,0xf8,0xae,0x18]
; VCHECK-NEXT: retl ## encoding: [0xc3]
@@ -521,7 +521,7 @@ declare void @llvm.x86.sse.stmxcsr(i8*) nounwind
define i32 @test_x86_sse_ucomieq_ss(<4 x float> %a0, <4 x float> %a1) {
; SSE-LABEL: test_x86_sse_ucomieq_ss:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: ucomiss %xmm1, %xmm0 ## encoding: [0x0f,0x2e,0xc1]
; SSE-NEXT: setnp %al ## encoding: [0x0f,0x9b,0xc0]
; SSE-NEXT: sete %cl ## encoding: [0x0f,0x94,0xc1]
@@ -530,7 +530,7 @@ define i32 @test_x86_sse_ucomieq_ss(<4 x float> %a0, <4 x float> %a1) {
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse_ucomieq_ss:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vucomiss %xmm1, %xmm0 ## encoding: [0xc5,0xf8,0x2e,0xc1]
; AVX2-NEXT: setnp %al ## encoding: [0x0f,0x9b,0xc0]
; AVX2-NEXT: sete %cl ## encoding: [0x0f,0x94,0xc1]
@@ -539,7 +539,7 @@ define i32 @test_x86_sse_ucomieq_ss(<4 x float> %a0, <4 x float> %a1) {
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse_ucomieq_ss:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vucomiss %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2e,0xc1]
; SKX-NEXT: setnp %al ## encoding: [0x0f,0x9b,0xc0]
; SKX-NEXT: sete %cl ## encoding: [0x0f,0x94,0xc1]
@@ -554,21 +554,21 @@ declare i32 @llvm.x86.sse.ucomieq.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_sse_ucomige_ss(<4 x float> %a0, <4 x float> %a1) {
; SSE-LABEL: test_x86_sse_ucomige_ss:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; SSE-NEXT: ucomiss %xmm1, %xmm0 ## encoding: [0x0f,0x2e,0xc1]
; SSE-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse_ucomige_ss:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; AVX2-NEXT: vucomiss %xmm1, %xmm0 ## encoding: [0xc5,0xf8,0x2e,0xc1]
; AVX2-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse_ucomige_ss:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; SKX-NEXT: vucomiss %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2e,0xc1]
; SKX-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
@@ -581,21 +581,21 @@ declare i32 @llvm.x86.sse.ucomige.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_sse_ucomigt_ss(<4 x float> %a0, <4 x float> %a1) {
; SSE-LABEL: test_x86_sse_ucomigt_ss:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; SSE-NEXT: ucomiss %xmm1, %xmm0 ## encoding: [0x0f,0x2e,0xc1]
; SSE-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse_ucomigt_ss:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; AVX2-NEXT: vucomiss %xmm1, %xmm0 ## encoding: [0xc5,0xf8,0x2e,0xc1]
; AVX2-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse_ucomigt_ss:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; SKX-NEXT: vucomiss %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2e,0xc1]
; SKX-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
@@ -608,21 +608,21 @@ declare i32 @llvm.x86.sse.ucomigt.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_sse_ucomile_ss(<4 x float> %a0, <4 x float> %a1) {
; SSE-LABEL: test_x86_sse_ucomile_ss:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; SSE-NEXT: ucomiss %xmm0, %xmm1 ## encoding: [0x0f,0x2e,0xc8]
; SSE-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse_ucomile_ss:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; AVX2-NEXT: vucomiss %xmm0, %xmm1 ## encoding: [0xc5,0xf8,0x2e,0xc8]
; AVX2-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse_ucomile_ss:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; SKX-NEXT: vucomiss %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2e,0xc8]
; SKX-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
@@ -635,21 +635,21 @@ declare i32 @llvm.x86.sse.ucomile.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_sse_ucomilt_ss(<4 x float> %a0, <4 x float> %a1) {
; SSE-LABEL: test_x86_sse_ucomilt_ss:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; SSE-NEXT: ucomiss %xmm0, %xmm1 ## encoding: [0x0f,0x2e,0xc8]
; SSE-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse_ucomilt_ss:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; AVX2-NEXT: vucomiss %xmm0, %xmm1 ## encoding: [0xc5,0xf8,0x2e,0xc8]
; AVX2-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse_ucomilt_ss:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; SKX-NEXT: vucomiss %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2e,0xc8]
; SKX-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
@@ -662,7 +662,7 @@ declare i32 @llvm.x86.sse.ucomilt.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_sse_ucomineq_ss(<4 x float> %a0, <4 x float> %a1) {
; SSE-LABEL: test_x86_sse_ucomineq_ss:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: ucomiss %xmm1, %xmm0 ## encoding: [0x0f,0x2e,0xc1]
; SSE-NEXT: setp %al ## encoding: [0x0f,0x9a,0xc0]
; SSE-NEXT: setne %cl ## encoding: [0x0f,0x95,0xc1]
@@ -671,7 +671,7 @@ define i32 @test_x86_sse_ucomineq_ss(<4 x float> %a0, <4 x float> %a1) {
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse_ucomineq_ss:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vucomiss %xmm1, %xmm0 ## encoding: [0xc5,0xf8,0x2e,0xc1]
; AVX2-NEXT: setp %al ## encoding: [0x0f,0x9a,0xc0]
; AVX2-NEXT: setne %cl ## encoding: [0x0f,0x95,0xc1]
@@ -680,7 +680,7 @@ define i32 @test_x86_sse_ucomineq_ss(<4 x float> %a0, <4 x float> %a1) {
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse_ucomineq_ss:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vucomiss %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2e,0xc1]
; SKX-NEXT: setp %al ## encoding: [0x0f,0x9a,0xc0]
; SKX-NEXT: setne %cl ## encoding: [0x0f,0x95,0xc1]
@@ -695,12 +695,12 @@ declare i32 @llvm.x86.sse.ucomineq.ss(<4 x float>, <4 x float>) nounwind readnon
define void @sfence() nounwind {
; SSE-LABEL: sfence:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: sfence ## encoding: [0x0f,0xae,0xf8]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: sfence:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: sfence ## encoding: [0x0f,0xae,0xf8]
; VCHECK-NEXT: retl ## encoding: [0xc3]
tail call void @llvm.x86.sse.sfence()
diff --git a/test/CodeGen/X86/sse-intrinsics-x86_64.ll b/test/CodeGen/X86/sse-intrinsics-x86_64.ll
index 61d0cae9acf..6f95b8d9ea8 100644
--- a/test/CodeGen/X86/sse-intrinsics-x86_64.ll
+++ b/test/CodeGen/X86/sse-intrinsics-x86_64.ll
@@ -5,21 +5,21 @@
define i64 @test_x86_sse_cvtss2si64(<4 x float> %a0) {
; CHECK-LABEL: test_x86_sse_cvtss2si64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvtss2si %xmm0, %rax
; CHECK-NEXT: retq
; SSE-LABEL: test_x86_sse_cvtss2si64:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: cvtss2si %xmm0, %rax ## encoding: [0xf3,0x48,0x0f,0x2d,0xc0]
; SSE-NEXT: retq ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse_cvtss2si64:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vcvtss2si %xmm0, %rax ## encoding: [0xc4,0xe1,0xfa,0x2d,0xc0]
; AVX2-NEXT: retq ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse_cvtss2si64:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vcvtss2si %xmm0, %rax ## EVEX TO VEX Compression encoding: [0xc4,0xe1,0xfa,0x2d,0xc0]
; SKX-NEXT: retq ## encoding: [0xc3]
%res = call i64 @llvm.x86.sse.cvtss2si64(<4 x float> %a0) ; <i64> [#uses=1]
@@ -30,21 +30,21 @@ declare i64 @llvm.x86.sse.cvtss2si64(<4 x float>) nounwind readnone
define <4 x float> @test_x86_sse_cvtsi642ss(<4 x float> %a0, i64 %a1) {
; CHECK-LABEL: test_x86_sse_cvtsi642ss:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvtsi2ssq %rdi, %xmm0, %xmm0
; CHECK-NEXT: retq
; SSE-LABEL: test_x86_sse_cvtsi642ss:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: cvtsi2ssq %rdi, %xmm0 ## encoding: [0xf3,0x48,0x0f,0x2a,0xc7]
; SSE-NEXT: retq ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse_cvtsi642ss:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vcvtsi2ssq %rdi, %xmm0, %xmm0 ## encoding: [0xc4,0xe1,0xfa,0x2a,0xc7]
; AVX2-NEXT: retq ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse_cvtsi642ss:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vcvtsi2ssq %rdi, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe1,0xfa,0x2a,0xc7]
; SKX-NEXT: retq ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.sse.cvtsi642ss(<4 x float> %a0, i64 %a1) ; <<4 x float>> [#uses=1]
@@ -55,21 +55,21 @@ declare <4 x float> @llvm.x86.sse.cvtsi642ss(<4 x float>, i64) nounwind readnone
define i64 @test_x86_sse_cvttss2si64(<4 x float> %a0) {
; CHECK-LABEL: test_x86_sse_cvttss2si64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvttss2si %xmm0, %rax
; CHECK-NEXT: retq
; SSE-LABEL: test_x86_sse_cvttss2si64:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: cvttss2si %xmm0, %rax ## encoding: [0xf3,0x48,0x0f,0x2c,0xc0]
; SSE-NEXT: retq ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse_cvttss2si64:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vcvttss2si %xmm0, %rax ## encoding: [0xc4,0xe1,0xfa,0x2c,0xc0]
; AVX2-NEXT: retq ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse_cvttss2si64:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vcvttss2si %xmm0, %rax ## EVEX TO VEX Compression encoding: [0xc4,0xe1,0xfa,0x2c,0xc0]
; SKX-NEXT: retq ## encoding: [0xc3]
%res = call i64 @llvm.x86.sse.cvttss2si64(<4 x float> %a0) ; <i64> [#uses=1]
diff --git a/test/CodeGen/X86/sse-minmax.ll b/test/CodeGen/X86/sse-minmax.ll
index 2944001ed7e..f79749169c0 100644
--- a/test/CodeGen/X86/sse-minmax.ll
+++ b/test/CodeGen/X86/sse-minmax.ll
@@ -15,7 +15,7 @@
define double @ogt(double %x, double %y) {
; ALL-LABEL: ogt:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: maxsd %xmm1, %xmm0
; ALL-NEXT: retq
%c = fcmp ogt double %x, %y
@@ -25,7 +25,7 @@ define double @ogt(double %x, double %y) {
define double @olt(double %x, double %y) {
; ALL-LABEL: olt:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: minsd %xmm1, %xmm0
; ALL-NEXT: retq
%c = fcmp olt double %x, %y
@@ -35,18 +35,18 @@ define double @olt(double %x, double %y) {
define double @ogt_inverse(double %x, double %y) {
; STRICT-LABEL: ogt_inverse:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: minsd %xmm0, %xmm1
; STRICT-NEXT: movapd %xmm1, %xmm0
; STRICT-NEXT: retq
;
; UNSAFE-LABEL: ogt_inverse:
-; UNSAFE: # BB#0:
+; UNSAFE: # %bb.0:
; UNSAFE-NEXT: minsd %xmm1, %xmm0
; UNSAFE-NEXT: retq
;
; FINITE-LABEL: ogt_inverse:
-; FINITE: # BB#0:
+; FINITE: # %bb.0:
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
@@ -57,18 +57,18 @@ define double @ogt_inverse(double %x, double %y) {
define double @olt_inverse(double %x, double %y) {
; STRICT-LABEL: olt_inverse:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: maxsd %xmm0, %xmm1
; STRICT-NEXT: movapd %xmm1, %xmm0
; STRICT-NEXT: retq
;
; UNSAFE-LABEL: olt_inverse:
-; UNSAFE: # BB#0:
+; UNSAFE: # %bb.0:
; UNSAFE-NEXT: maxsd %xmm1, %xmm0
; UNSAFE-NEXT: retq
;
; FINITE-LABEL: olt_inverse:
-; FINITE: # BB#0:
+; FINITE: # %bb.0:
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
@@ -79,7 +79,7 @@ define double @olt_inverse(double %x, double %y) {
define double @oge(double %x, double %y) {
; STRICT-LABEL: oge:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: movapd %xmm1, %xmm2
; STRICT-NEXT: cmplesd %xmm0, %xmm2
; STRICT-NEXT: andpd %xmm2, %xmm0
@@ -88,7 +88,7 @@ define double @oge(double %x, double %y) {
; STRICT-NEXT: retq
;
; RELAX-LABEL: oge:
-; RELAX: # BB#0:
+; RELAX: # %bb.0:
; RELAX-NEXT: maxsd %xmm1, %xmm0
; RELAX-NEXT: retq
%c = fcmp oge double %x, %y
@@ -98,7 +98,7 @@ define double @oge(double %x, double %y) {
define double @ole(double %x, double %y) {
; STRICT-LABEL: ole:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: movapd %xmm0, %xmm2
; STRICT-NEXT: cmplesd %xmm1, %xmm2
; STRICT-NEXT: andpd %xmm2, %xmm0
@@ -108,7 +108,7 @@ define double @ole(double %x, double %y) {
; STRICT-NEXT: retq
;
; RELAX-LABEL: ole:
-; RELAX: # BB#0:
+; RELAX: # %bb.0:
; RELAX-NEXT: minsd %xmm1, %xmm0
; RELAX-NEXT: retq
%c = fcmp ole double %x, %y
@@ -118,7 +118,7 @@ define double @ole(double %x, double %y) {
define double @oge_inverse(double %x, double %y) {
; STRICT-LABEL: oge_inverse:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: movapd %xmm1, %xmm2
; STRICT-NEXT: cmplesd %xmm0, %xmm2
; STRICT-NEXT: andpd %xmm2, %xmm1
@@ -128,12 +128,12 @@ define double @oge_inverse(double %x, double %y) {
; STRICT-NEXT: retq
;
; UNSAFE-LABEL: oge_inverse:
-; UNSAFE: # BB#0:
+; UNSAFE: # %bb.0:
; UNSAFE-NEXT: minsd %xmm1, %xmm0
; UNSAFE-NEXT: retq
;
; FINITE-LABEL: oge_inverse:
-; FINITE: # BB#0:
+; FINITE: # %bb.0:
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
@@ -144,7 +144,7 @@ define double @oge_inverse(double %x, double %y) {
define double @ole_inverse(double %x, double %y) {
; STRICT-LABEL: ole_inverse:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: movapd %xmm0, %xmm2
; STRICT-NEXT: cmplesd %xmm1, %xmm2
; STRICT-NEXT: andpd %xmm2, %xmm1
@@ -154,12 +154,12 @@ define double @ole_inverse(double %x, double %y) {
; STRICT-NEXT: retq
;
; UNSAFE-LABEL: ole_inverse:
-; UNSAFE: # BB#0:
+; UNSAFE: # %bb.0:
; UNSAFE-NEXT: maxsd %xmm1, %xmm0
; UNSAFE-NEXT: retq
;
; FINITE-LABEL: ole_inverse:
-; FINITE: # BB#0:
+; FINITE: # %bb.0:
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
@@ -170,7 +170,7 @@ define double @ole_inverse(double %x, double %y) {
define double @ogt_x(double %x) {
; ALL-LABEL: ogt_x:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: xorpd %xmm1, %xmm1
; ALL-NEXT: maxsd %xmm1, %xmm0
; ALL-NEXT: retq
@@ -181,7 +181,7 @@ define double @ogt_x(double %x) {
define double @olt_x(double %x) {
; ALL-LABEL: olt_x:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: xorpd %xmm1, %xmm1
; ALL-NEXT: minsd %xmm1, %xmm0
; ALL-NEXT: retq
@@ -192,20 +192,20 @@ define double @olt_x(double %x) {
define double @ogt_inverse_x(double %x) {
; STRICT-LABEL: ogt_inverse_x:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: xorpd %xmm1, %xmm1
; STRICT-NEXT: minsd %xmm0, %xmm1
; STRICT-NEXT: movapd %xmm1, %xmm0
; STRICT-NEXT: retq
;
; UNSAFE-LABEL: ogt_inverse_x:
-; UNSAFE: # BB#0:
+; UNSAFE: # %bb.0:
; UNSAFE-NEXT: xorpd %xmm1, %xmm1
; UNSAFE-NEXT: minsd %xmm1, %xmm0
; UNSAFE-NEXT: retq
;
; FINITE-LABEL: ogt_inverse_x:
-; FINITE: # BB#0:
+; FINITE: # %bb.0:
; FINITE-NEXT: xorpd %xmm1, %xmm1
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
@@ -217,20 +217,20 @@ define double @ogt_inverse_x(double %x) {
define double @olt_inverse_x(double %x) {
; STRICT-LABEL: olt_inverse_x:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: xorpd %xmm1, %xmm1
; STRICT-NEXT: maxsd %xmm0, %xmm1
; STRICT-NEXT: movapd %xmm1, %xmm0
; STRICT-NEXT: retq
;
; UNSAFE-LABEL: olt_inverse_x:
-; UNSAFE: # BB#0:
+; UNSAFE: # %bb.0:
; UNSAFE-NEXT: xorpd %xmm1, %xmm1
; UNSAFE-NEXT: maxsd %xmm1, %xmm0
; UNSAFE-NEXT: retq
;
; FINITE-LABEL: olt_inverse_x:
-; FINITE: # BB#0:
+; FINITE: # %bb.0:
; FINITE-NEXT: xorpd %xmm1, %xmm1
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
@@ -242,14 +242,14 @@ define double @olt_inverse_x(double %x) {
define double @oge_x(double %x) {
; STRICT-LABEL: oge_x:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: xorpd %xmm1, %xmm1
; STRICT-NEXT: cmplesd %xmm0, %xmm1
; STRICT-NEXT: andpd %xmm1, %xmm0
; STRICT-NEXT: retq
;
; RELAX-LABEL: oge_x:
-; RELAX: # BB#0:
+; RELAX: # %bb.0:
; RELAX-NEXT: xorpd %xmm1, %xmm1
; RELAX-NEXT: maxsd %xmm1, %xmm0
; RELAX-NEXT: retq
@@ -260,7 +260,7 @@ define double @oge_x(double %x) {
define double @ole_x(double %x) {
; STRICT-LABEL: ole_x:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: xorpd %xmm2, %xmm2
; STRICT-NEXT: movapd %xmm0, %xmm1
; STRICT-NEXT: cmplesd %xmm2, %xmm1
@@ -269,7 +269,7 @@ define double @ole_x(double %x) {
; STRICT-NEXT: retq
;
; RELAX-LABEL: ole_x:
-; RELAX: # BB#0:
+; RELAX: # %bb.0:
; RELAX-NEXT: xorpd %xmm1, %xmm1
; RELAX-NEXT: minsd %xmm1, %xmm0
; RELAX-NEXT: retq
@@ -280,7 +280,7 @@ define double @ole_x(double %x) {
define double @oge_inverse_x(double %x) {
; STRICT-LABEL: oge_inverse_x:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: xorpd %xmm1, %xmm1
; STRICT-NEXT: cmplesd %xmm0, %xmm1
; STRICT-NEXT: andnpd %xmm0, %xmm1
@@ -288,13 +288,13 @@ define double @oge_inverse_x(double %x) {
; STRICT-NEXT: retq
;
; UNSAFE-LABEL: oge_inverse_x:
-; UNSAFE: # BB#0:
+; UNSAFE: # %bb.0:
; UNSAFE-NEXT: xorpd %xmm1, %xmm1
; UNSAFE-NEXT: minsd %xmm1, %xmm0
; UNSAFE-NEXT: retq
;
; FINITE-LABEL: oge_inverse_x:
-; FINITE: # BB#0:
+; FINITE: # %bb.0:
; FINITE-NEXT: xorpd %xmm1, %xmm1
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
@@ -306,7 +306,7 @@ define double @oge_inverse_x(double %x) {
define double @ole_inverse_x(double %x) {
; STRICT-LABEL: ole_inverse_x:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: xorpd %xmm2, %xmm2
; STRICT-NEXT: movapd %xmm0, %xmm1
; STRICT-NEXT: cmplesd %xmm2, %xmm1
@@ -315,13 +315,13 @@ define double @ole_inverse_x(double %x) {
; STRICT-NEXT: retq
;
; UNSAFE-LABEL: ole_inverse_x:
-; UNSAFE: # BB#0:
+; UNSAFE: # %bb.0:
; UNSAFE-NEXT: xorpd %xmm1, %xmm1
; UNSAFE-NEXT: maxsd %xmm1, %xmm0
; UNSAFE-NEXT: retq
;
; FINITE-LABEL: ole_inverse_x:
-; FINITE: # BB#0:
+; FINITE: # %bb.0:
; FINITE-NEXT: xorpd %xmm1, %xmm1
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
@@ -333,7 +333,7 @@ define double @ole_inverse_x(double %x) {
define double @ugt(double %x, double %y) {
; STRICT-LABEL: ugt:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: movapd %xmm0, %xmm2
; STRICT-NEXT: cmpnlesd %xmm1, %xmm2
; STRICT-NEXT: andpd %xmm2, %xmm0
@@ -343,7 +343,7 @@ define double @ugt(double %x, double %y) {
; STRICT-NEXT: retq
;
; RELAX-LABEL: ugt:
-; RELAX: # BB#0:
+; RELAX: # %bb.0:
; RELAX-NEXT: maxsd %xmm1, %xmm0
; RELAX-NEXT: retq
%c = fcmp ugt double %x, %y
@@ -353,7 +353,7 @@ define double @ugt(double %x, double %y) {
define double @ult(double %x, double %y) {
; STRICT-LABEL: ult:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: movapd %xmm1, %xmm2
; STRICT-NEXT: cmpnlesd %xmm0, %xmm2
; STRICT-NEXT: andpd %xmm2, %xmm0
@@ -362,7 +362,7 @@ define double @ult(double %x, double %y) {
; STRICT-NEXT: retq
;
; RELAX-LABEL: ult:
-; RELAX: # BB#0:
+; RELAX: # %bb.0:
; RELAX-NEXT: minsd %xmm1, %xmm0
; RELAX-NEXT: retq
%c = fcmp ult double %x, %y
@@ -372,7 +372,7 @@ define double @ult(double %x, double %y) {
define double @ugt_inverse(double %x, double %y) {
; STRICT-LABEL: ugt_inverse:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: movapd %xmm0, %xmm2
; STRICT-NEXT: cmpnlesd %xmm1, %xmm2
; STRICT-NEXT: andpd %xmm2, %xmm1
@@ -382,12 +382,12 @@ define double @ugt_inverse(double %x, double %y) {
; STRICT-NEXT: retq
;
; UNSAFE-LABEL: ugt_inverse:
-; UNSAFE: # BB#0:
+; UNSAFE: # %bb.0:
; UNSAFE-NEXT: minsd %xmm1, %xmm0
; UNSAFE-NEXT: retq
;
; FINITE-LABEL: ugt_inverse:
-; FINITE: # BB#0:
+; FINITE: # %bb.0:
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
@@ -398,7 +398,7 @@ define double @ugt_inverse(double %x, double %y) {
define double @ult_inverse(double %x, double %y) {
; STRICT-LABEL: ult_inverse:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: movapd %xmm1, %xmm2
; STRICT-NEXT: cmpnlesd %xmm0, %xmm2
; STRICT-NEXT: andpd %xmm2, %xmm1
@@ -408,12 +408,12 @@ define double @ult_inverse(double %x, double %y) {
; STRICT-NEXT: retq
;
; UNSAFE-LABEL: ult_inverse:
-; UNSAFE: # BB#0:
+; UNSAFE: # %bb.0:
; UNSAFE-NEXT: maxsd %xmm1, %xmm0
; UNSAFE-NEXT: retq
;
; FINITE-LABEL: ult_inverse:
-; FINITE: # BB#0:
+; FINITE: # %bb.0:
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
@@ -424,13 +424,13 @@ define double @ult_inverse(double %x, double %y) {
define double @uge(double %x, double %y) {
; STRICT-LABEL: uge:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: maxsd %xmm0, %xmm1
; STRICT-NEXT: movapd %xmm1, %xmm0
; STRICT-NEXT: retq
;
; RELAX-LABEL: uge:
-; RELAX: # BB#0:
+; RELAX: # %bb.0:
; RELAX-NEXT: maxsd %xmm1, %xmm0
; RELAX-NEXT: retq
%c = fcmp uge double %x, %y
@@ -440,13 +440,13 @@ define double @uge(double %x, double %y) {
define double @ule(double %x, double %y) {
; STRICT-LABEL: ule:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: minsd %xmm0, %xmm1
; STRICT-NEXT: movapd %xmm1, %xmm0
; STRICT-NEXT: retq
;
; RELAX-LABEL: ule:
-; RELAX: # BB#0:
+; RELAX: # %bb.0:
; RELAX-NEXT: minsd %xmm1, %xmm0
; RELAX-NEXT: retq
%c = fcmp ule double %x, %y
@@ -456,17 +456,17 @@ define double @ule(double %x, double %y) {
define double @uge_inverse(double %x, double %y) {
; STRICT-LABEL: uge_inverse:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: minsd %xmm1, %xmm0
; STRICT-NEXT: retq
;
; UNSAFE-LABEL: uge_inverse:
-; UNSAFE: # BB#0:
+; UNSAFE: # %bb.0:
; UNSAFE-NEXT: minsd %xmm1, %xmm0
; UNSAFE-NEXT: retq
;
; FINITE-LABEL: uge_inverse:
-; FINITE: # BB#0:
+; FINITE: # %bb.0:
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
@@ -477,17 +477,17 @@ define double @uge_inverse(double %x, double %y) {
define double @ule_inverse(double %x, double %y) {
; STRICT-LABEL: ule_inverse:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: maxsd %xmm1, %xmm0
; STRICT-NEXT: retq
;
; UNSAFE-LABEL: ule_inverse:
-; UNSAFE: # BB#0:
+; UNSAFE: # %bb.0:
; UNSAFE-NEXT: maxsd %xmm1, %xmm0
; UNSAFE-NEXT: retq
;
; FINITE-LABEL: ule_inverse:
-; FINITE: # BB#0:
+; FINITE: # %bb.0:
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
@@ -498,7 +498,7 @@ define double @ule_inverse(double %x, double %y) {
define double @ugt_x(double %x) {
; STRICT-LABEL: ugt_x:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: xorpd %xmm2, %xmm2
; STRICT-NEXT: movapd %xmm0, %xmm1
; STRICT-NEXT: cmpnlesd %xmm2, %xmm1
@@ -507,7 +507,7 @@ define double @ugt_x(double %x) {
; STRICT-NEXT: retq
;
; RELAX-LABEL: ugt_x:
-; RELAX: # BB#0:
+; RELAX: # %bb.0:
; RELAX-NEXT: xorpd %xmm1, %xmm1
; RELAX-NEXT: maxsd %xmm1, %xmm0
; RELAX-NEXT: retq
@@ -518,14 +518,14 @@ define double @ugt_x(double %x) {
define double @ult_x(double %x) {
; STRICT-LABEL: ult_x:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: xorpd %xmm1, %xmm1
; STRICT-NEXT: cmpnlesd %xmm0, %xmm1
; STRICT-NEXT: andpd %xmm1, %xmm0
; STRICT-NEXT: retq
;
; RELAX-LABEL: ult_x:
-; RELAX: # BB#0:
+; RELAX: # %bb.0:
; RELAX-NEXT: xorpd %xmm1, %xmm1
; RELAX-NEXT: minsd %xmm1, %xmm0
; RELAX-NEXT: retq
@@ -536,7 +536,7 @@ define double @ult_x(double %x) {
define double @ugt_inverse_x(double %x) {
; STRICT-LABEL: ugt_inverse_x:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: xorpd %xmm2, %xmm2
; STRICT-NEXT: movapd %xmm0, %xmm1
; STRICT-NEXT: cmpnlesd %xmm2, %xmm1
@@ -545,13 +545,13 @@ define double @ugt_inverse_x(double %x) {
; STRICT-NEXT: retq
;
; UNSAFE-LABEL: ugt_inverse_x:
-; UNSAFE: # BB#0:
+; UNSAFE: # %bb.0:
; UNSAFE-NEXT: xorpd %xmm1, %xmm1
; UNSAFE-NEXT: minsd %xmm1, %xmm0
; UNSAFE-NEXT: retq
;
; FINITE-LABEL: ugt_inverse_x:
-; FINITE: # BB#0:
+; FINITE: # %bb.0:
; FINITE-NEXT: xorpd %xmm1, %xmm1
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
@@ -563,7 +563,7 @@ define double @ugt_inverse_x(double %x) {
define double @ult_inverse_x(double %x) {
; STRICT-LABEL: ult_inverse_x:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: xorpd %xmm1, %xmm1
; STRICT-NEXT: cmpnlesd %xmm0, %xmm1
; STRICT-NEXT: andnpd %xmm0, %xmm1
@@ -571,13 +571,13 @@ define double @ult_inverse_x(double %x) {
; STRICT-NEXT: retq
;
; UNSAFE-LABEL: ult_inverse_x:
-; UNSAFE: # BB#0:
+; UNSAFE: # %bb.0:
; UNSAFE-NEXT: xorpd %xmm1, %xmm1
; UNSAFE-NEXT: maxsd %xmm1, %xmm0
; UNSAFE-NEXT: retq
;
; FINITE-LABEL: ult_inverse_x:
-; FINITE: # BB#0:
+; FINITE: # %bb.0:
; FINITE-NEXT: xorpd %xmm1, %xmm1
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
@@ -589,14 +589,14 @@ define double @ult_inverse_x(double %x) {
define double @uge_x(double %x) {
; STRICT-LABEL: uge_x:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: xorpd %xmm1, %xmm1
; STRICT-NEXT: maxsd %xmm0, %xmm1
; STRICT-NEXT: movapd %xmm1, %xmm0
; STRICT-NEXT: retq
;
; RELAX-LABEL: uge_x:
-; RELAX: # BB#0:
+; RELAX: # %bb.0:
; RELAX-NEXT: xorpd %xmm1, %xmm1
; RELAX-NEXT: maxsd %xmm1, %xmm0
; RELAX-NEXT: retq
@@ -607,14 +607,14 @@ define double @uge_x(double %x) {
define double @ule_x(double %x) {
; STRICT-LABEL: ule_x:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: xorpd %xmm1, %xmm1
; STRICT-NEXT: minsd %xmm0, %xmm1
; STRICT-NEXT: movapd %xmm1, %xmm0
; STRICT-NEXT: retq
;
; RELAX-LABEL: ule_x:
-; RELAX: # BB#0:
+; RELAX: # %bb.0:
; RELAX-NEXT: xorpd %xmm1, %xmm1
; RELAX-NEXT: minsd %xmm1, %xmm0
; RELAX-NEXT: retq
@@ -625,19 +625,19 @@ define double @ule_x(double %x) {
define double @uge_inverse_x(double %x) {
; STRICT-LABEL: uge_inverse_x:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: xorpd %xmm1, %xmm1
; STRICT-NEXT: minsd %xmm1, %xmm0
; STRICT-NEXT: retq
;
; UNSAFE-LABEL: uge_inverse_x:
-; UNSAFE: # BB#0:
+; UNSAFE: # %bb.0:
; UNSAFE-NEXT: xorpd %xmm1, %xmm1
; UNSAFE-NEXT: minsd %xmm1, %xmm0
; UNSAFE-NEXT: retq
;
; FINITE-LABEL: uge_inverse_x:
-; FINITE: # BB#0:
+; FINITE: # %bb.0:
; FINITE-NEXT: xorpd %xmm1, %xmm1
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
@@ -649,19 +649,19 @@ define double @uge_inverse_x(double %x) {
define double @ule_inverse_x(double %x) {
; STRICT-LABEL: ule_inverse_x:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: xorpd %xmm1, %xmm1
; STRICT-NEXT: maxsd %xmm1, %xmm0
; STRICT-NEXT: retq
;
; UNSAFE-LABEL: ule_inverse_x:
-; UNSAFE: # BB#0:
+; UNSAFE: # %bb.0:
; UNSAFE-NEXT: xorpd %xmm1, %xmm1
; UNSAFE-NEXT: maxsd %xmm1, %xmm0
; UNSAFE-NEXT: retq
;
; FINITE-LABEL: ule_inverse_x:
-; FINITE: # BB#0:
+; FINITE: # %bb.0:
; FINITE-NEXT: xorpd %xmm1, %xmm1
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
@@ -673,7 +673,7 @@ define double @ule_inverse_x(double %x) {
define double @ogt_y(double %x) {
; ALL-LABEL: ogt_y:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: maxsd {{.*}}(%rip), %xmm0
; ALL-NEXT: retq
%c = fcmp ogt double %x, -0.000000e+00
@@ -683,7 +683,7 @@ define double @ogt_y(double %x) {
define double @olt_y(double %x) {
; ALL-LABEL: olt_y:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: minsd {{.*}}(%rip), %xmm0
; ALL-NEXT: retq
%c = fcmp olt double %x, -0.000000e+00
@@ -693,19 +693,19 @@ define double @olt_y(double %x) {
define double @ogt_inverse_y(double %x) {
; STRICT-LABEL: ogt_inverse_y:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; STRICT-NEXT: minsd %xmm0, %xmm1
; STRICT-NEXT: movapd %xmm1, %xmm0
; STRICT-NEXT: retq
;
; UNSAFE-LABEL: ogt_inverse_y:
-; UNSAFE: # BB#0:
+; UNSAFE: # %bb.0:
; UNSAFE-NEXT: minsd {{.*}}(%rip), %xmm0
; UNSAFE-NEXT: retq
;
; FINITE-LABEL: ogt_inverse_y:
-; FINITE: # BB#0:
+; FINITE: # %bb.0:
; FINITE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
@@ -717,19 +717,19 @@ define double @ogt_inverse_y(double %x) {
define double @olt_inverse_y(double %x) {
; STRICT-LABEL: olt_inverse_y:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; STRICT-NEXT: maxsd %xmm0, %xmm1
; STRICT-NEXT: movapd %xmm1, %xmm0
; STRICT-NEXT: retq
;
; UNSAFE-LABEL: olt_inverse_y:
-; UNSAFE: # BB#0:
+; UNSAFE: # %bb.0:
; UNSAFE-NEXT: maxsd {{.*}}(%rip), %xmm0
; UNSAFE-NEXT: retq
;
; FINITE-LABEL: olt_inverse_y:
-; FINITE: # BB#0:
+; FINITE: # %bb.0:
; FINITE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
@@ -741,7 +741,7 @@ define double @olt_inverse_y(double %x) {
define double @oge_y(double %x) {
; STRICT-LABEL: oge_y:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; STRICT-NEXT: movapd %xmm1, %xmm2
; STRICT-NEXT: cmplesd %xmm0, %xmm2
@@ -751,7 +751,7 @@ define double @oge_y(double %x) {
; STRICT-NEXT: retq
;
; RELAX-LABEL: oge_y:
-; RELAX: # BB#0:
+; RELAX: # %bb.0:
; RELAX-NEXT: maxsd {{.*}}(%rip), %xmm0
; RELAX-NEXT: retq
%c = fcmp oge double %x, -0.000000e+00
@@ -761,7 +761,7 @@ define double @oge_y(double %x) {
define double @ole_y(double %x) {
; STRICT-LABEL: ole_y:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
; STRICT-NEXT: movapd %xmm0, %xmm1
; STRICT-NEXT: cmplesd %xmm2, %xmm1
@@ -772,7 +772,7 @@ define double @ole_y(double %x) {
; STRICT-NEXT: retq
;
; RELAX-LABEL: ole_y:
-; RELAX: # BB#0:
+; RELAX: # %bb.0:
; RELAX-NEXT: minsd {{.*}}(%rip), %xmm0
; RELAX-NEXT: retq
%c = fcmp ole double %x, -0.000000e+00
@@ -782,7 +782,7 @@ define double @ole_y(double %x) {
define double @oge_inverse_y(double %x) {
; STRICT-LABEL: oge_inverse_y:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
; STRICT-NEXT: movapd %xmm2, %xmm1
; STRICT-NEXT: cmplesd %xmm0, %xmm1
@@ -793,12 +793,12 @@ define double @oge_inverse_y(double %x) {
; STRICT-NEXT: retq
;
; UNSAFE-LABEL: oge_inverse_y:
-; UNSAFE: # BB#0:
+; UNSAFE: # %bb.0:
; UNSAFE-NEXT: minsd {{.*}}(%rip), %xmm0
; UNSAFE-NEXT: retq
;
; FINITE-LABEL: oge_inverse_y:
-; FINITE: # BB#0:
+; FINITE: # %bb.0:
; FINITE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
@@ -810,7 +810,7 @@ define double @oge_inverse_y(double %x) {
define double @ole_inverse_y(double %x) {
; STRICT-LABEL: ole_inverse_y:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
; STRICT-NEXT: movapd %xmm0, %xmm1
; STRICT-NEXT: cmplesd %xmm2, %xmm1
@@ -821,12 +821,12 @@ define double @ole_inverse_y(double %x) {
; STRICT-NEXT: retq
;
; UNSAFE-LABEL: ole_inverse_y:
-; UNSAFE: # BB#0:
+; UNSAFE: # %bb.0:
; UNSAFE-NEXT: maxsd {{.*}}(%rip), %xmm0
; UNSAFE-NEXT: retq
;
; FINITE-LABEL: ole_inverse_y:
-; FINITE: # BB#0:
+; FINITE: # %bb.0:
; FINITE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
@@ -838,7 +838,7 @@ define double @ole_inverse_y(double %x) {
define double @ugt_y(double %x) {
; STRICT-LABEL: ugt_y:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
; STRICT-NEXT: movapd %xmm0, %xmm1
; STRICT-NEXT: cmpnlesd %xmm2, %xmm1
@@ -849,7 +849,7 @@ define double @ugt_y(double %x) {
; STRICT-NEXT: retq
;
; RELAX-LABEL: ugt_y:
-; RELAX: # BB#0:
+; RELAX: # %bb.0:
; RELAX-NEXT: maxsd {{.*}}(%rip), %xmm0
; RELAX-NEXT: retq
%c = fcmp ugt double %x, -0.000000e+00
@@ -859,7 +859,7 @@ define double @ugt_y(double %x) {
define double @ult_y(double %x) {
; STRICT-LABEL: ult_y:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; STRICT-NEXT: movapd %xmm1, %xmm2
; STRICT-NEXT: cmpnlesd %xmm0, %xmm2
@@ -869,7 +869,7 @@ define double @ult_y(double %x) {
; STRICT-NEXT: retq
;
; RELAX-LABEL: ult_y:
-; RELAX: # BB#0:
+; RELAX: # %bb.0:
; RELAX-NEXT: minsd {{.*}}(%rip), %xmm0
; RELAX-NEXT: retq
%c = fcmp ult double %x, -0.000000e+00
@@ -879,7 +879,7 @@ define double @ult_y(double %x) {
define double @ugt_inverse_y(double %x) {
; STRICT-LABEL: ugt_inverse_y:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
; STRICT-NEXT: movapd %xmm0, %xmm1
; STRICT-NEXT: cmpnlesd %xmm2, %xmm1
@@ -890,12 +890,12 @@ define double @ugt_inverse_y(double %x) {
; STRICT-NEXT: retq
;
; UNSAFE-LABEL: ugt_inverse_y:
-; UNSAFE: # BB#0:
+; UNSAFE: # %bb.0:
; UNSAFE-NEXT: minsd {{.*}}(%rip), %xmm0
; UNSAFE-NEXT: retq
;
; FINITE-LABEL: ugt_inverse_y:
-; FINITE: # BB#0:
+; FINITE: # %bb.0:
; FINITE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
@@ -907,7 +907,7 @@ define double @ugt_inverse_y(double %x) {
define double @ult_inverse_y(double %x) {
; STRICT-LABEL: ult_inverse_y:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
; STRICT-NEXT: movapd %xmm2, %xmm1
; STRICT-NEXT: cmpnlesd %xmm0, %xmm1
@@ -918,12 +918,12 @@ define double @ult_inverse_y(double %x) {
; STRICT-NEXT: retq
;
; UNSAFE-LABEL: ult_inverse_y:
-; UNSAFE: # BB#0:
+; UNSAFE: # %bb.0:
; UNSAFE-NEXT: maxsd {{.*}}(%rip), %xmm0
; UNSAFE-NEXT: retq
;
; FINITE-LABEL: ult_inverse_y:
-; FINITE: # BB#0:
+; FINITE: # %bb.0:
; FINITE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
@@ -935,14 +935,14 @@ define double @ult_inverse_y(double %x) {
define double @uge_y(double %x) {
; STRICT-LABEL: uge_y:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; STRICT-NEXT: maxsd %xmm0, %xmm1
; STRICT-NEXT: movapd %xmm1, %xmm0
; STRICT-NEXT: retq
;
; RELAX-LABEL: uge_y:
-; RELAX: # BB#0:
+; RELAX: # %bb.0:
; RELAX-NEXT: maxsd {{.*}}(%rip), %xmm0
; RELAX-NEXT: retq
%c = fcmp uge double %x, -0.000000e+00
@@ -952,14 +952,14 @@ define double @uge_y(double %x) {
define double @ule_y(double %x) {
; STRICT-LABEL: ule_y:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; STRICT-NEXT: minsd %xmm0, %xmm1
; STRICT-NEXT: movapd %xmm1, %xmm0
; STRICT-NEXT: retq
;
; RELAX-LABEL: ule_y:
-; RELAX: # BB#0:
+; RELAX: # %bb.0:
; RELAX-NEXT: minsd {{.*}}(%rip), %xmm0
; RELAX-NEXT: retq
%c = fcmp ule double %x, -0.000000e+00
@@ -969,17 +969,17 @@ define double @ule_y(double %x) {
define double @uge_inverse_y(double %x) {
; STRICT-LABEL: uge_inverse_y:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: minsd {{.*}}(%rip), %xmm0
; STRICT-NEXT: retq
;
; UNSAFE-LABEL: uge_inverse_y:
-; UNSAFE: # BB#0:
+; UNSAFE: # %bb.0:
; UNSAFE-NEXT: minsd {{.*}}(%rip), %xmm0
; UNSAFE-NEXT: retq
;
; FINITE-LABEL: uge_inverse_y:
-; FINITE: # BB#0:
+; FINITE: # %bb.0:
; FINITE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
@@ -991,17 +991,17 @@ define double @uge_inverse_y(double %x) {
define double @ule_inverse_y(double %x) {
; STRICT-LABEL: ule_inverse_y:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: maxsd {{.*}}(%rip), %xmm0
; STRICT-NEXT: retq
;
; UNSAFE-LABEL: ule_inverse_y:
-; UNSAFE: # BB#0:
+; UNSAFE: # %bb.0:
; UNSAFE-NEXT: maxsd {{.*}}(%rip), %xmm0
; UNSAFE-NEXT: retq
;
; FINITE-LABEL: ule_inverse_y:
-; FINITE: # BB#0:
+; FINITE: # %bb.0:
; FINITE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
@@ -1015,19 +1015,19 @@ define double @ule_inverse_y(double %x) {
define double @clampTo3k_a(double %x) {
; STRICT-LABEL: clampTo3k_a:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; STRICT-NEXT: minsd %xmm0, %xmm1
; STRICT-NEXT: movapd %xmm1, %xmm0
; STRICT-NEXT: retq
;
; UNSAFE-LABEL: clampTo3k_a:
-; UNSAFE: # BB#0:
+; UNSAFE: # %bb.0:
; UNSAFE-NEXT: minsd {{.*}}(%rip), %xmm0
; UNSAFE-NEXT: retq
;
; FINITE-LABEL: clampTo3k_a:
-; FINITE: # BB#0:
+; FINITE: # %bb.0:
; FINITE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
@@ -1039,17 +1039,17 @@ define double @clampTo3k_a(double %x) {
define double @clampTo3k_b(double %x) {
; STRICT-LABEL: clampTo3k_b:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: minsd {{.*}}(%rip), %xmm0
; STRICT-NEXT: retq
;
; UNSAFE-LABEL: clampTo3k_b:
-; UNSAFE: # BB#0:
+; UNSAFE: # %bb.0:
; UNSAFE-NEXT: minsd {{.*}}(%rip), %xmm0
; UNSAFE-NEXT: retq
;
; FINITE-LABEL: clampTo3k_b:
-; FINITE: # BB#0:
+; FINITE: # %bb.0:
; FINITE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
@@ -1061,19 +1061,19 @@ define double @clampTo3k_b(double %x) {
define double @clampTo3k_c(double %x) {
; STRICT-LABEL: clampTo3k_c:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; STRICT-NEXT: maxsd %xmm0, %xmm1
; STRICT-NEXT: movapd %xmm1, %xmm0
; STRICT-NEXT: retq
;
; UNSAFE-LABEL: clampTo3k_c:
-; UNSAFE: # BB#0:
+; UNSAFE: # %bb.0:
; UNSAFE-NEXT: maxsd {{.*}}(%rip), %xmm0
; UNSAFE-NEXT: retq
;
; FINITE-LABEL: clampTo3k_c:
-; FINITE: # BB#0:
+; FINITE: # %bb.0:
; FINITE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
@@ -1085,17 +1085,17 @@ define double @clampTo3k_c(double %x) {
define double @clampTo3k_d(double %x) {
; STRICT-LABEL: clampTo3k_d:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: maxsd {{.*}}(%rip), %xmm0
; STRICT-NEXT: retq
;
; UNSAFE-LABEL: clampTo3k_d:
-; UNSAFE: # BB#0:
+; UNSAFE: # %bb.0:
; UNSAFE-NEXT: maxsd {{.*}}(%rip), %xmm0
; UNSAFE-NEXT: retq
;
; FINITE-LABEL: clampTo3k_d:
-; FINITE: # BB#0:
+; FINITE: # %bb.0:
; FINITE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
@@ -1107,19 +1107,19 @@ define double @clampTo3k_d(double %x) {
define double @clampTo3k_e(double %x) {
; STRICT-LABEL: clampTo3k_e:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; STRICT-NEXT: maxsd %xmm0, %xmm1
; STRICT-NEXT: movapd %xmm1, %xmm0
; STRICT-NEXT: retq
;
; UNSAFE-LABEL: clampTo3k_e:
-; UNSAFE: # BB#0:
+; UNSAFE: # %bb.0:
; UNSAFE-NEXT: maxsd {{.*}}(%rip), %xmm0
; UNSAFE-NEXT: retq
;
; FINITE-LABEL: clampTo3k_e:
-; FINITE: # BB#0:
+; FINITE: # %bb.0:
; FINITE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
@@ -1131,17 +1131,17 @@ define double @clampTo3k_e(double %x) {
define double @clampTo3k_f(double %x) {
; STRICT-LABEL: clampTo3k_f:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: maxsd {{.*}}(%rip), %xmm0
; STRICT-NEXT: retq
;
; UNSAFE-LABEL: clampTo3k_f:
-; UNSAFE: # BB#0:
+; UNSAFE: # %bb.0:
; UNSAFE-NEXT: maxsd {{.*}}(%rip), %xmm0
; UNSAFE-NEXT: retq
;
; FINITE-LABEL: clampTo3k_f:
-; FINITE: # BB#0:
+; FINITE: # %bb.0:
; FINITE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
@@ -1153,19 +1153,19 @@ define double @clampTo3k_f(double %x) {
define double @clampTo3k_g(double %x) {
; STRICT-LABEL: clampTo3k_g:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; STRICT-NEXT: minsd %xmm0, %xmm1
; STRICT-NEXT: movapd %xmm1, %xmm0
; STRICT-NEXT: retq
;
; UNSAFE-LABEL: clampTo3k_g:
-; UNSAFE: # BB#0:
+; UNSAFE: # %bb.0:
; UNSAFE-NEXT: minsd {{.*}}(%rip), %xmm0
; UNSAFE-NEXT: retq
;
; FINITE-LABEL: clampTo3k_g:
-; FINITE: # BB#0:
+; FINITE: # %bb.0:
; FINITE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
@@ -1177,17 +1177,17 @@ define double @clampTo3k_g(double %x) {
define double @clampTo3k_h(double %x) {
; STRICT-LABEL: clampTo3k_h:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: minsd {{.*}}(%rip), %xmm0
; STRICT-NEXT: retq
;
; UNSAFE-LABEL: clampTo3k_h:
-; UNSAFE: # BB#0:
+; UNSAFE: # %bb.0:
; UNSAFE-NEXT: minsd {{.*}}(%rip), %xmm0
; UNSAFE-NEXT: retq
;
; FINITE-LABEL: clampTo3k_h:
-; FINITE: # BB#0:
+; FINITE: # %bb.0:
; FINITE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
@@ -1199,7 +1199,7 @@ define double @clampTo3k_h(double %x) {
define <2 x double> @test_maxpd(<2 x double> %x, <2 x double> %y) {
; STRICT-LABEL: test_maxpd:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: movapd %xmm0, %xmm2
; STRICT-NEXT: movapd %xmm1, %xmm0
; STRICT-NEXT: cmplepd %xmm2, %xmm0
@@ -1208,7 +1208,7 @@ define <2 x double> @test_maxpd(<2 x double> %x, <2 x double> %y) {
; STRICT-NEXT: retq
;
; RELAX-LABEL: test_maxpd:
-; RELAX: # BB#0:
+; RELAX: # %bb.0:
; RELAX-NEXT: maxpd %xmm1, %xmm0
; RELAX-NEXT: retq
%max_is_x = fcmp oge <2 x double> %x, %y
@@ -1218,7 +1218,7 @@ define <2 x double> @test_maxpd(<2 x double> %x, <2 x double> %y) {
define <2 x double> @test_minpd(<2 x double> %x, <2 x double> %y) {
; STRICT-LABEL: test_minpd:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: movapd %xmm0, %xmm2
; STRICT-NEXT: cmplepd %xmm1, %xmm0
; STRICT-NEXT: blendvpd %xmm0, %xmm2, %xmm1
@@ -1226,7 +1226,7 @@ define <2 x double> @test_minpd(<2 x double> %x, <2 x double> %y) {
; STRICT-NEXT: retq
;
; RELAX-LABEL: test_minpd:
-; RELAX: # BB#0:
+; RELAX: # %bb.0:
; RELAX-NEXT: minpd %xmm1, %xmm0
; RELAX-NEXT: retq
%min_is_x = fcmp ole <2 x double> %x, %y
@@ -1236,7 +1236,7 @@ define <2 x double> @test_minpd(<2 x double> %x, <2 x double> %y) {
define <4 x float> @test_maxps(<4 x float> %x, <4 x float> %y) {
; STRICT-LABEL: test_maxps:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: movaps %xmm0, %xmm2
; STRICT-NEXT: movaps %xmm1, %xmm0
; STRICT-NEXT: cmpleps %xmm2, %xmm0
@@ -1245,7 +1245,7 @@ define <4 x float> @test_maxps(<4 x float> %x, <4 x float> %y) {
; STRICT-NEXT: retq
;
; RELAX-LABEL: test_maxps:
-; RELAX: # BB#0:
+; RELAX: # %bb.0:
; RELAX-NEXT: maxps %xmm1, %xmm0
; RELAX-NEXT: retq
%max_is_x = fcmp oge <4 x float> %x, %y
@@ -1255,7 +1255,7 @@ define <4 x float> @test_maxps(<4 x float> %x, <4 x float> %y) {
define <4 x float> @test_minps(<4 x float> %x, <4 x float> %y) {
; STRICT-LABEL: test_minps:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: movaps %xmm0, %xmm2
; STRICT-NEXT: cmpleps %xmm1, %xmm0
; STRICT-NEXT: blendvps %xmm0, %xmm2, %xmm1
@@ -1263,7 +1263,7 @@ define <4 x float> @test_minps(<4 x float> %x, <4 x float> %y) {
; STRICT-NEXT: retq
;
; RELAX-LABEL: test_minps:
-; RELAX: # BB#0:
+; RELAX: # %bb.0:
; RELAX-NEXT: minps %xmm1, %xmm0
; RELAX-NEXT: retq
%min_is_x = fcmp ole <4 x float> %x, %y
@@ -1273,7 +1273,7 @@ define <4 x float> @test_minps(<4 x float> %x, <4 x float> %y) {
define <2 x float> @test_maxps_illegal_v2f32(<2 x float> %x, <2 x float> %y) {
; STRICT-LABEL: test_maxps_illegal_v2f32:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: movaps %xmm0, %xmm2
; STRICT-NEXT: movaps %xmm1, %xmm0
; STRICT-NEXT: cmpleps %xmm2, %xmm0
@@ -1282,7 +1282,7 @@ define <2 x float> @test_maxps_illegal_v2f32(<2 x float> %x, <2 x float> %y) {
; STRICT-NEXT: retq
;
; RELAX-LABEL: test_maxps_illegal_v2f32:
-; RELAX: # BB#0:
+; RELAX: # %bb.0:
; RELAX-NEXT: maxps %xmm1, %xmm0
; RELAX-NEXT: retq
%max_is_x = fcmp oge <2 x float> %x, %y
@@ -1292,7 +1292,7 @@ define <2 x float> @test_maxps_illegal_v2f32(<2 x float> %x, <2 x float> %y) {
define <2 x float> @test_minps_illegal_v2f32(<2 x float> %x, <2 x float> %y) {
; STRICT-LABEL: test_minps_illegal_v2f32:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: movaps %xmm0, %xmm2
; STRICT-NEXT: cmpleps %xmm1, %xmm0
; STRICT-NEXT: blendvps %xmm0, %xmm2, %xmm1
@@ -1300,7 +1300,7 @@ define <2 x float> @test_minps_illegal_v2f32(<2 x float> %x, <2 x float> %y) {
; STRICT-NEXT: retq
;
; RELAX-LABEL: test_minps_illegal_v2f32:
-; RELAX: # BB#0:
+; RELAX: # %bb.0:
; RELAX-NEXT: minps %xmm1, %xmm0
; RELAX-NEXT: retq
%min_is_x = fcmp ole <2 x float> %x, %y
@@ -1310,7 +1310,7 @@ define <2 x float> @test_minps_illegal_v2f32(<2 x float> %x, <2 x float> %y) {
define <3 x float> @test_maxps_illegal_v3f32(<3 x float> %x, <3 x float> %y) {
; STRICT-LABEL: test_maxps_illegal_v3f32:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: movaps %xmm0, %xmm2
; STRICT-NEXT: movaps %xmm1, %xmm0
; STRICT-NEXT: cmpleps %xmm2, %xmm0
@@ -1319,7 +1319,7 @@ define <3 x float> @test_maxps_illegal_v3f32(<3 x float> %x, <3 x float> %y) {
; STRICT-NEXT: retq
;
; RELAX-LABEL: test_maxps_illegal_v3f32:
-; RELAX: # BB#0:
+; RELAX: # %bb.0:
; RELAX-NEXT: maxps %xmm1, %xmm0
; RELAX-NEXT: retq
%max_is_x = fcmp oge <3 x float> %x, %y
@@ -1329,7 +1329,7 @@ define <3 x float> @test_maxps_illegal_v3f32(<3 x float> %x, <3 x float> %y) {
define <3 x float> @test_minps_illegal_v3f32(<3 x float> %x, <3 x float> %y) {
; STRICT-LABEL: test_minps_illegal_v3f32:
-; STRICT: # BB#0:
+; STRICT: # %bb.0:
; STRICT-NEXT: movaps %xmm0, %xmm2
; STRICT-NEXT: cmpleps %xmm1, %xmm0
; STRICT-NEXT: blendvps %xmm0, %xmm2, %xmm1
@@ -1337,7 +1337,7 @@ define <3 x float> @test_minps_illegal_v3f32(<3 x float> %x, <3 x float> %y) {
; STRICT-NEXT: retq
;
; RELAX-LABEL: test_minps_illegal_v3f32:
-; RELAX: # BB#0:
+; RELAX: # %bb.0:
; RELAX-NEXT: minps %xmm1, %xmm0
; RELAX-NEXT: retq
%min_is_x = fcmp ole <3 x float> %x, %y
diff --git a/test/CodeGen/X86/sse-only.ll b/test/CodeGen/X86/sse-only.ll
index 9c4574365b4..5cc09c52004 100644
--- a/test/CodeGen/X86/sse-only.ll
+++ b/test/CodeGen/X86/sse-only.ll
@@ -5,7 +5,7 @@
define void @test1(<2 x double>* %r, <2 x double>* %A, double %B) nounwind {
; CHECK-LABEL: test1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movapd (%ecx), %xmm0
diff --git a/test/CodeGen/X86/sse-scalar-fp-arith-unary.ll b/test/CodeGen/X86/sse-scalar-fp-arith-unary.ll
index 63751e1ab7e..1ed4d3401ca 100644
--- a/test/CodeGen/X86/sse-scalar-fp-arith-unary.ll
+++ b/test/CodeGen/X86/sse-scalar-fp-arith-unary.ll
@@ -9,12 +9,12 @@
define <4 x float> @recip(<4 x float> %x) {
; SSE-LABEL: recip:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: rcpss %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: recip:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vrcpss %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%y = tail call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %x)
@@ -24,12 +24,12 @@ define <4 x float> @recip(<4 x float> %x) {
define <4 x float> @recip_square_root(<4 x float> %x) {
; SSE-LABEL: recip_square_root:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: rsqrtss %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: recip_square_root:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vrsqrtss %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%y = tail call <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float> %x)
@@ -39,12 +39,12 @@ define <4 x float> @recip_square_root(<4 x float> %x) {
define <4 x float> @square_root(<4 x float> %x) {
; SSE-LABEL: square_root:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: sqrtss %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: square_root:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vsqrtss %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%y = tail call <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float> %x)
@@ -54,12 +54,12 @@ define <4 x float> @square_root(<4 x float> %x) {
define <2 x double> @square_root_double(<2 x double> %x) {
; SSE-LABEL: square_root_double:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: sqrtsd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: square_root_double:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%y = tail call <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double> %x)
diff --git a/test/CodeGen/X86/sse-scalar-fp-arith.ll b/test/CodeGen/X86/sse-scalar-fp-arith.ll
index ebc29b1393b..8761920bb16 100644
--- a/test/CodeGen/X86/sse-scalar-fp-arith.ll
+++ b/test/CodeGen/X86/sse-scalar-fp-arith.ll
@@ -10,12 +10,12 @@
define <4 x float> @test_add_ss(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: test_add_ss:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addss %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_add_ss:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = extractelement <4 x float> %b, i32 0
@@ -27,12 +27,12 @@ define <4 x float> @test_add_ss(<4 x float> %a, <4 x float> %b) {
define <4 x float> @test_sub_ss(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: test_sub_ss:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: subss %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_sub_ss:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vsubss %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = extractelement <4 x float> %b, i32 0
@@ -44,12 +44,12 @@ define <4 x float> @test_sub_ss(<4 x float> %a, <4 x float> %b) {
define <4 x float> @test_mul_ss(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: test_mul_ss:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: mulss %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_mul_ss:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = extractelement <4 x float> %b, i32 0
@@ -61,12 +61,12 @@ define <4 x float> @test_mul_ss(<4 x float> %a, <4 x float> %b) {
define <4 x float> @test_div_ss(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: test_div_ss:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: divss %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_div_ss:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vdivss %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = extractelement <4 x float> %b, i32 0
@@ -78,25 +78,25 @@ define <4 x float> @test_div_ss(<4 x float> %a, <4 x float> %b) {
define <4 x float> @test_sqrt_ss(<4 x float> %a) {
; SSE2-LABEL: test_sqrt_ss:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: sqrtss %xmm0, %xmm1
; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_sqrt_ss:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: sqrtss %xmm0, %xmm1
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_sqrt_ss:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vsqrtss %xmm0, %xmm0, %xmm1
; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; AVX1-NEXT: retq
;
; AVX512-LABEL: test_sqrt_ss:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vsqrtss %xmm0, %xmm0, %xmm1
; AVX512-NEXT: vmovss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; AVX512-NEXT: retq
@@ -109,12 +109,12 @@ declare float @llvm.sqrt.f32(float)
define <2 x double> @test_add_sd(<2 x double> %a, <2 x double> %b) {
; SSE-LABEL: test_add_sd:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addsd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_add_sd:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = extractelement <2 x double> %b, i32 0
@@ -126,12 +126,12 @@ define <2 x double> @test_add_sd(<2 x double> %a, <2 x double> %b) {
define <2 x double> @test_sub_sd(<2 x double> %a, <2 x double> %b) {
; SSE-LABEL: test_sub_sd:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: subsd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_sub_sd:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vsubsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = extractelement <2 x double> %b, i32 0
@@ -143,12 +143,12 @@ define <2 x double> @test_sub_sd(<2 x double> %a, <2 x double> %b) {
define <2 x double> @test_mul_sd(<2 x double> %a, <2 x double> %b) {
; SSE-LABEL: test_mul_sd:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: mulsd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_mul_sd:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = extractelement <2 x double> %b, i32 0
@@ -160,12 +160,12 @@ define <2 x double> @test_mul_sd(<2 x double> %a, <2 x double> %b) {
define <2 x double> @test_div_sd(<2 x double> %a, <2 x double> %b) {
; SSE-LABEL: test_div_sd:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: divsd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_div_sd:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vdivsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = extractelement <2 x double> %b, i32 0
@@ -177,25 +177,25 @@ define <2 x double> @test_div_sd(<2 x double> %a, <2 x double> %b) {
define <2 x double> @test_sqrt_sd(<2 x double> %a) {
; SSE2-LABEL: test_sqrt_sd:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: sqrtsd %xmm0, %xmm1
; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_sqrt_sd:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: sqrtsd %xmm0, %xmm1
; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_sqrt_sd:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vsqrtsd %xmm0, %xmm0, %xmm1
; AVX1-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; AVX1-NEXT: retq
;
; AVX512-LABEL: test_sqrt_sd:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vsqrtsd %xmm0, %xmm0, %xmm1
; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; AVX512-NEXT: retq
@@ -208,13 +208,13 @@ declare double @llvm.sqrt.f64(double)
define <4 x float> @test2_add_ss(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: test2_add_ss:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addss %xmm0, %xmm1
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test2_add_ss:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = extractelement <4 x float> %a, i32 0
@@ -226,13 +226,13 @@ define <4 x float> @test2_add_ss(<4 x float> %a, <4 x float> %b) {
define <4 x float> @test2_sub_ss(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: test2_sub_ss:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: subss %xmm0, %xmm1
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test2_sub_ss:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vsubss %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = extractelement <4 x float> %a, i32 0
@@ -244,13 +244,13 @@ define <4 x float> @test2_sub_ss(<4 x float> %a, <4 x float> %b) {
define <4 x float> @test2_mul_ss(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: test2_mul_ss:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: mulss %xmm0, %xmm1
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test2_mul_ss:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmulss %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = extractelement <4 x float> %a, i32 0
@@ -262,13 +262,13 @@ define <4 x float> @test2_mul_ss(<4 x float> %a, <4 x float> %b) {
define <4 x float> @test2_div_ss(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: test2_div_ss:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: divss %xmm0, %xmm1
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test2_div_ss:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vdivss %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = extractelement <4 x float> %a, i32 0
@@ -280,13 +280,13 @@ define <4 x float> @test2_div_ss(<4 x float> %a, <4 x float> %b) {
define <2 x double> @test2_add_sd(<2 x double> %a, <2 x double> %b) {
; SSE-LABEL: test2_add_sd:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addsd %xmm0, %xmm1
; SSE-NEXT: movapd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test2_add_sd:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = extractelement <2 x double> %a, i32 0
@@ -298,13 +298,13 @@ define <2 x double> @test2_add_sd(<2 x double> %a, <2 x double> %b) {
define <2 x double> @test2_sub_sd(<2 x double> %a, <2 x double> %b) {
; SSE-LABEL: test2_sub_sd:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: subsd %xmm0, %xmm1
; SSE-NEXT: movapd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test2_sub_sd:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vsubsd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = extractelement <2 x double> %a, i32 0
@@ -316,13 +316,13 @@ define <2 x double> @test2_sub_sd(<2 x double> %a, <2 x double> %b) {
define <2 x double> @test2_mul_sd(<2 x double> %a, <2 x double> %b) {
; SSE-LABEL: test2_mul_sd:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: mulsd %xmm0, %xmm1
; SSE-NEXT: movapd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test2_mul_sd:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmulsd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = extractelement <2 x double> %a, i32 0
@@ -334,13 +334,13 @@ define <2 x double> @test2_mul_sd(<2 x double> %a, <2 x double> %b) {
define <2 x double> @test2_div_sd(<2 x double> %a, <2 x double> %b) {
; SSE-LABEL: test2_div_sd:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: divsd %xmm0, %xmm1
; SSE-NEXT: movapd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test2_div_sd:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vdivsd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = extractelement <2 x double> %a, i32 0
@@ -352,13 +352,13 @@ define <2 x double> @test2_div_sd(<2 x double> %a, <2 x double> %b) {
define <4 x float> @test_multiple_add_ss(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: test_multiple_add_ss:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addss %xmm0, %xmm1
; SSE-NEXT: addss %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_multiple_add_ss:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm1
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -372,14 +372,14 @@ define <4 x float> @test_multiple_add_ss(<4 x float> %a, <4 x float> %b) {
define <4 x float> @test_multiple_sub_ss(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: test_multiple_sub_ss:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: subss %xmm1, %xmm2
; SSE-NEXT: subss %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_multiple_sub_ss:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vsubss %xmm1, %xmm0, %xmm1
; AVX-NEXT: vsubss %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -393,13 +393,13 @@ define <4 x float> @test_multiple_sub_ss(<4 x float> %a, <4 x float> %b) {
define <4 x float> @test_multiple_mul_ss(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: test_multiple_mul_ss:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: mulss %xmm0, %xmm1
; SSE-NEXT: mulss %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_multiple_mul_ss:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm1
; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -413,14 +413,14 @@ define <4 x float> @test_multiple_mul_ss(<4 x float> %a, <4 x float> %b) {
define <4 x float> @test_multiple_div_ss(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: test_multiple_div_ss:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: divss %xmm1, %xmm2
; SSE-NEXT: divss %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_multiple_div_ss:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vdivss %xmm1, %xmm0, %xmm1
; AVX-NEXT: vdivss %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -437,12 +437,12 @@ define <4 x float> @test_multiple_div_ss(<4 x float> %a, <4 x float> %b) {
define <4 x float> @blend_add_ss(<4 x float> %a, float %b) {
; SSE-LABEL: blend_add_ss:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addss %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: blend_add_ss:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -455,12 +455,12 @@ define <4 x float> @blend_add_ss(<4 x float> %a, float %b) {
define <4 x float> @blend_sub_ss(<4 x float> %a, float %b) {
; SSE-LABEL: blend_sub_ss:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: subss %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: blend_sub_ss:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vsubss %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -473,12 +473,12 @@ define <4 x float> @blend_sub_ss(<4 x float> %a, float %b) {
define <4 x float> @blend_mul_ss(<4 x float> %a, float %b) {
; SSE-LABEL: blend_mul_ss:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: mulss %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: blend_mul_ss:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -491,12 +491,12 @@ define <4 x float> @blend_mul_ss(<4 x float> %a, float %b) {
define <4 x float> @blend_div_ss(<4 x float> %a, float %b) {
; SSE-LABEL: blend_div_ss:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: divss %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: blend_div_ss:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vdivss %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -509,12 +509,12 @@ define <4 x float> @blend_div_ss(<4 x float> %a, float %b) {
define <2 x double> @blend_add_sd(<2 x double> %a, double %b) {
; SSE-LABEL: blend_add_sd:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addsd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: blend_add_sd:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -527,12 +527,12 @@ define <2 x double> @blend_add_sd(<2 x double> %a, double %b) {
define <2 x double> @blend_sub_sd(<2 x double> %a, double %b) {
; SSE-LABEL: blend_sub_sd:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: subsd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: blend_sub_sd:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vsubsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -545,12 +545,12 @@ define <2 x double> @blend_sub_sd(<2 x double> %a, double %b) {
define <2 x double> @blend_mul_sd(<2 x double> %a, double %b) {
; SSE-LABEL: blend_mul_sd:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: mulsd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: blend_mul_sd:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -563,12 +563,12 @@ define <2 x double> @blend_mul_sd(<2 x double> %a, double %b) {
define <2 x double> @blend_div_sd(<2 x double> %a, double %b) {
; SSE-LABEL: blend_div_sd:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: divsd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: blend_div_sd:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vdivsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -584,12 +584,12 @@ define <2 x double> @blend_div_sd(<2 x double> %a, double %b) {
define <4 x float> @insert_test_add_ss(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: insert_test_add_ss:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addss %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: insert_test_add_ss:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = fadd <4 x float> %a, %b
@@ -599,12 +599,12 @@ define <4 x float> @insert_test_add_ss(<4 x float> %a, <4 x float> %b) {
define <4 x float> @insert_test_sub_ss(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: insert_test_sub_ss:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: subss %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: insert_test_sub_ss:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vsubss %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = fsub <4 x float> %a, %b
@@ -614,12 +614,12 @@ define <4 x float> @insert_test_sub_ss(<4 x float> %a, <4 x float> %b) {
define <4 x float> @insert_test_mul_ss(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: insert_test_mul_ss:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: mulss %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: insert_test_mul_ss:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = fmul <4 x float> %a, %b
@@ -629,12 +629,12 @@ define <4 x float> @insert_test_mul_ss(<4 x float> %a, <4 x float> %b) {
define <4 x float> @insert_test_div_ss(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: insert_test_div_ss:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: divss %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: insert_test_div_ss:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vdivss %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = fdiv <4 x float> %a, %b
@@ -644,12 +644,12 @@ define <4 x float> @insert_test_div_ss(<4 x float> %a, <4 x float> %b) {
define <2 x double> @insert_test_add_sd(<2 x double> %a, <2 x double> %b) {
; SSE-LABEL: insert_test_add_sd:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addsd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: insert_test_add_sd:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = fadd <2 x double> %a, %b
@@ -659,12 +659,12 @@ define <2 x double> @insert_test_add_sd(<2 x double> %a, <2 x double> %b) {
define <2 x double> @insert_test_sub_sd(<2 x double> %a, <2 x double> %b) {
; SSE-LABEL: insert_test_sub_sd:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: subsd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: insert_test_sub_sd:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vsubsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = fsub <2 x double> %a, %b
@@ -674,12 +674,12 @@ define <2 x double> @insert_test_sub_sd(<2 x double> %a, <2 x double> %b) {
define <2 x double> @insert_test_mul_sd(<2 x double> %a, <2 x double> %b) {
; SSE-LABEL: insert_test_mul_sd:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: mulsd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: insert_test_mul_sd:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = fmul <2 x double> %a, %b
@@ -689,12 +689,12 @@ define <2 x double> @insert_test_mul_sd(<2 x double> %a, <2 x double> %b) {
define <2 x double> @insert_test_div_sd(<2 x double> %a, <2 x double> %b) {
; SSE-LABEL: insert_test_div_sd:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: divsd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: insert_test_div_sd:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vdivsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = fdiv <2 x double> %a, %b
@@ -704,13 +704,13 @@ define <2 x double> @insert_test_div_sd(<2 x double> %a, <2 x double> %b) {
define <4 x float> @insert_test2_add_ss(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: insert_test2_add_ss:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addss %xmm0, %xmm1
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: insert_test2_add_ss:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = fadd <4 x float> %b, %a
@@ -720,13 +720,13 @@ define <4 x float> @insert_test2_add_ss(<4 x float> %a, <4 x float> %b) {
define <4 x float> @insert_test2_sub_ss(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: insert_test2_sub_ss:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: subss %xmm0, %xmm1
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: insert_test2_sub_ss:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vsubss %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = fsub <4 x float> %b, %a
@@ -736,13 +736,13 @@ define <4 x float> @insert_test2_sub_ss(<4 x float> %a, <4 x float> %b) {
define <4 x float> @insert_test2_mul_ss(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: insert_test2_mul_ss:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: mulss %xmm0, %xmm1
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: insert_test2_mul_ss:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmulss %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = fmul <4 x float> %b, %a
@@ -752,13 +752,13 @@ define <4 x float> @insert_test2_mul_ss(<4 x float> %a, <4 x float> %b) {
define <4 x float> @insert_test2_div_ss(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: insert_test2_div_ss:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: divss %xmm0, %xmm1
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: insert_test2_div_ss:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vdivss %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = fdiv <4 x float> %b, %a
@@ -768,13 +768,13 @@ define <4 x float> @insert_test2_div_ss(<4 x float> %a, <4 x float> %b) {
define <2 x double> @insert_test2_add_sd(<2 x double> %a, <2 x double> %b) {
; SSE-LABEL: insert_test2_add_sd:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addsd %xmm0, %xmm1
; SSE-NEXT: movapd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: insert_test2_add_sd:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = fadd <2 x double> %b, %a
@@ -784,13 +784,13 @@ define <2 x double> @insert_test2_add_sd(<2 x double> %a, <2 x double> %b) {
define <2 x double> @insert_test2_sub_sd(<2 x double> %a, <2 x double> %b) {
; SSE-LABEL: insert_test2_sub_sd:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: subsd %xmm0, %xmm1
; SSE-NEXT: movapd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: insert_test2_sub_sd:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vsubsd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = fsub <2 x double> %b, %a
@@ -800,13 +800,13 @@ define <2 x double> @insert_test2_sub_sd(<2 x double> %a, <2 x double> %b) {
define <2 x double> @insert_test2_mul_sd(<2 x double> %a, <2 x double> %b) {
; SSE-LABEL: insert_test2_mul_sd:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: mulsd %xmm0, %xmm1
; SSE-NEXT: movapd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: insert_test2_mul_sd:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmulsd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = fmul <2 x double> %b, %a
@@ -816,13 +816,13 @@ define <2 x double> @insert_test2_mul_sd(<2 x double> %a, <2 x double> %b) {
define <2 x double> @insert_test2_div_sd(<2 x double> %a, <2 x double> %b) {
; SSE-LABEL: insert_test2_div_sd:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: divsd %xmm0, %xmm1
; SSE-NEXT: movapd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: insert_test2_div_sd:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vdivsd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = fdiv <2 x double> %b, %a
@@ -832,12 +832,12 @@ define <2 x double> @insert_test2_div_sd(<2 x double> %a, <2 x double> %b) {
define <4 x float> @insert_test3_add_ss(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: insert_test3_add_ss:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addss %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: insert_test3_add_ss:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = fadd <4 x float> %a, %b
@@ -847,12 +847,12 @@ define <4 x float> @insert_test3_add_ss(<4 x float> %a, <4 x float> %b) {
define <4 x float> @insert_test3_sub_ss(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: insert_test3_sub_ss:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: subss %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: insert_test3_sub_ss:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vsubss %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = fsub <4 x float> %a, %b
@@ -862,12 +862,12 @@ define <4 x float> @insert_test3_sub_ss(<4 x float> %a, <4 x float> %b) {
define <4 x float> @insert_test3_mul_ss(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: insert_test3_mul_ss:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: mulss %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: insert_test3_mul_ss:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = fmul <4 x float> %a, %b
@@ -877,12 +877,12 @@ define <4 x float> @insert_test3_mul_ss(<4 x float> %a, <4 x float> %b) {
define <4 x float> @insert_test3_div_ss(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: insert_test3_div_ss:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: divss %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: insert_test3_div_ss:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vdivss %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = fdiv <4 x float> %a, %b
@@ -892,12 +892,12 @@ define <4 x float> @insert_test3_div_ss(<4 x float> %a, <4 x float> %b) {
define <2 x double> @insert_test3_add_sd(<2 x double> %a, <2 x double> %b) {
; SSE-LABEL: insert_test3_add_sd:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addsd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: insert_test3_add_sd:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = fadd <2 x double> %a, %b
@@ -907,12 +907,12 @@ define <2 x double> @insert_test3_add_sd(<2 x double> %a, <2 x double> %b) {
define <2 x double> @insert_test3_sub_sd(<2 x double> %a, <2 x double> %b) {
; SSE-LABEL: insert_test3_sub_sd:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: subsd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: insert_test3_sub_sd:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vsubsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = fsub <2 x double> %a, %b
@@ -922,12 +922,12 @@ define <2 x double> @insert_test3_sub_sd(<2 x double> %a, <2 x double> %b) {
define <2 x double> @insert_test3_mul_sd(<2 x double> %a, <2 x double> %b) {
; SSE-LABEL: insert_test3_mul_sd:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: mulsd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: insert_test3_mul_sd:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = fmul <2 x double> %a, %b
@@ -937,12 +937,12 @@ define <2 x double> @insert_test3_mul_sd(<2 x double> %a, <2 x double> %b) {
define <2 x double> @insert_test3_div_sd(<2 x double> %a, <2 x double> %b) {
; SSE-LABEL: insert_test3_div_sd:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: divsd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: insert_test3_div_sd:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vdivsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = fdiv <2 x double> %a, %b
@@ -952,13 +952,13 @@ define <2 x double> @insert_test3_div_sd(<2 x double> %a, <2 x double> %b) {
define <4 x float> @insert_test4_add_ss(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: insert_test4_add_ss:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addss %xmm0, %xmm1
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: insert_test4_add_ss:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = fadd <4 x float> %b, %a
@@ -968,13 +968,13 @@ define <4 x float> @insert_test4_add_ss(<4 x float> %a, <4 x float> %b) {
define <4 x float> @insert_test4_sub_ss(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: insert_test4_sub_ss:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: subss %xmm0, %xmm1
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: insert_test4_sub_ss:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vsubss %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = fsub <4 x float> %b, %a
@@ -984,13 +984,13 @@ define <4 x float> @insert_test4_sub_ss(<4 x float> %a, <4 x float> %b) {
define <4 x float> @insert_test4_mul_ss(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: insert_test4_mul_ss:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: mulss %xmm0, %xmm1
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: insert_test4_mul_ss:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmulss %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = fmul <4 x float> %b, %a
@@ -1000,13 +1000,13 @@ define <4 x float> @insert_test4_mul_ss(<4 x float> %a, <4 x float> %b) {
define <4 x float> @insert_test4_div_ss(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: insert_test4_div_ss:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: divss %xmm0, %xmm1
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: insert_test4_div_ss:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vdivss %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = fdiv <4 x float> %b, %a
@@ -1016,13 +1016,13 @@ define <4 x float> @insert_test4_div_ss(<4 x float> %a, <4 x float> %b) {
define <2 x double> @insert_test4_add_sd(<2 x double> %a, <2 x double> %b) {
; SSE-LABEL: insert_test4_add_sd:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addsd %xmm0, %xmm1
; SSE-NEXT: movapd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: insert_test4_add_sd:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = fadd <2 x double> %b, %a
@@ -1032,13 +1032,13 @@ define <2 x double> @insert_test4_add_sd(<2 x double> %a, <2 x double> %b) {
define <2 x double> @insert_test4_sub_sd(<2 x double> %a, <2 x double> %b) {
; SSE-LABEL: insert_test4_sub_sd:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: subsd %xmm0, %xmm1
; SSE-NEXT: movapd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: insert_test4_sub_sd:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vsubsd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = fsub <2 x double> %b, %a
@@ -1048,13 +1048,13 @@ define <2 x double> @insert_test4_sub_sd(<2 x double> %a, <2 x double> %b) {
define <2 x double> @insert_test4_mul_sd(<2 x double> %a, <2 x double> %b) {
; SSE-LABEL: insert_test4_mul_sd:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: mulsd %xmm0, %xmm1
; SSE-NEXT: movapd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: insert_test4_mul_sd:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmulsd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = fmul <2 x double> %b, %a
@@ -1064,13 +1064,13 @@ define <2 x double> @insert_test4_mul_sd(<2 x double> %a, <2 x double> %b) {
define <2 x double> @insert_test4_div_sd(<2 x double> %a, <2 x double> %b) {
; SSE-LABEL: insert_test4_div_sd:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: divsd %xmm0, %xmm1
; SSE-NEXT: movapd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: insert_test4_div_sd:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vdivsd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = fdiv <2 x double> %b, %a
@@ -1080,10 +1080,10 @@ define <2 x double> @insert_test4_div_sd(<2 x double> %a, <2 x double> %b) {
define <4 x float> @add_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) {
; SSE2-LABEL: add_ss_mask:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: testb $1, %dil
; SSE2-NEXT: jne .LBB62_1
-; SSE2-NEXT: # BB#2:
+; SSE2-NEXT: # %bb.2:
; SSE2-NEXT: movaps %xmm2, %xmm1
; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; SSE2-NEXT: retq
@@ -1093,10 +1093,10 @@ define <4 x float> @add_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float> %c,
; SSE2-NEXT: retq
;
; SSE41-LABEL: add_ss_mask:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: testb $1, %dil
; SSE41-NEXT: jne .LBB62_1
-; SSE41-NEXT: # BB#2:
+; SSE41-NEXT: # %bb.2:
; SSE41-NEXT: movaps %xmm2, %xmm1
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; SSE41-NEXT: retq
@@ -1106,17 +1106,17 @@ define <4 x float> @add_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float> %c,
; SSE41-NEXT: retq
;
; AVX1-LABEL: add_ss_mask:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: testb $1, %dil
; AVX1-NEXT: je .LBB62_2
-; AVX1-NEXT: # BB#1:
+; AVX1-NEXT: # %bb.1:
; AVX1-NEXT: vaddss %xmm1, %xmm0, %xmm2
; AVX1-NEXT: .LBB62_2:
; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
; AVX1-NEXT: retq
;
; AVX512-LABEL: add_ss_mask:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm1
; AVX512-NEXT: kmovw %edi, %k1
; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm2 {%k1}
@@ -1135,10 +1135,10 @@ define <4 x float> @add_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float> %c,
define <2 x double> @add_sd_mask(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) {
; SSE2-LABEL: add_sd_mask:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: testb $1, %dil
; SSE2-NEXT: jne .LBB63_1
-; SSE2-NEXT: # BB#2:
+; SSE2-NEXT: # %bb.2:
; SSE2-NEXT: movapd %xmm2, %xmm1
; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq
@@ -1148,10 +1148,10 @@ define <2 x double> @add_sd_mask(<2 x double> %a, <2 x double> %b, <2 x double>
; SSE2-NEXT: retq
;
; SSE41-LABEL: add_sd_mask:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: testb $1, %dil
; SSE41-NEXT: jne .LBB63_1
-; SSE41-NEXT: # BB#2:
+; SSE41-NEXT: # %bb.2:
; SSE41-NEXT: movapd %xmm2, %xmm1
; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE41-NEXT: retq
@@ -1161,17 +1161,17 @@ define <2 x double> @add_sd_mask(<2 x double> %a, <2 x double> %b, <2 x double>
; SSE41-NEXT: retq
;
; AVX1-LABEL: add_sd_mask:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: testb $1, %dil
; AVX1-NEXT: je .LBB63_2
-; AVX1-NEXT: # BB#1:
+; AVX1-NEXT: # %bb.1:
; AVX1-NEXT: vaddsd %xmm1, %xmm0, %xmm2
; AVX1-NEXT: .LBB63_2:
; AVX1-NEXT: vblendpd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
; AVX1-NEXT: retq
;
; AVX512-LABEL: add_sd_mask:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm1
; AVX512-NEXT: kmovw %edi, %k1
; AVX512-NEXT: vmovsd %xmm1, %xmm0, %xmm2 {%k1}
diff --git a/test/CodeGen/X86/sse-schedule.ll b/test/CodeGen/X86/sse-schedule.ll
index 04e5f523f79..b2429cc465a 100644
--- a/test/CodeGen/X86/sse-schedule.ll
+++ b/test/CodeGen/X86/sse-schedule.ll
@@ -13,61 +13,61 @@
define <4 x float> @test_addps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; GENERIC-LABEL: test_addps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: addps %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: addps (%rdi), %xmm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_addps:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: addps %xmm1, %xmm0 # sched: [5:5.00]
; ATOM-NEXT: addps (%rdi), %xmm0 # sched: [5:5.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_addps:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: addps %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: addps (%rdi), %xmm0 # sched: [6:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_addps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vaddps (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_addps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: vaddps (%rdi), %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_addps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: vaddps (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_addps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vaddps (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_addps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vaddps (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_addps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vaddps (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_addps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vaddps (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -79,61 +79,61 @@ define <4 x float> @test_addps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a
define float @test_addss(float %a0, float %a1, float *%a2) {
; GENERIC-LABEL: test_addss:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: addss %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: addss (%rdi), %xmm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_addss:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: addss %xmm1, %xmm0 # sched: [5:5.00]
; ATOM-NEXT: addss (%rdi), %xmm0 # sched: [5:5.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_addss:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: addss %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: addss (%rdi), %xmm0 # sched: [6:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_addss:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vaddss (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_addss:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: vaddss (%rdi), %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_addss:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: vaddss (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_addss:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vaddss (%rdi), %xmm0, %xmm0 # sched: [9:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_addss:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vaddss (%rdi), %xmm0, %xmm0 # sched: [9:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_addss:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vaddss (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_addss:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vaddss (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -145,13 +145,13 @@ define float @test_addss(float %a0, float %a1, float *%a2) {
define <4 x float> @test_andps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; GENERIC-LABEL: test_andps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: andps %xmm1, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: andps (%rdi), %xmm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_andps:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: andps %xmm1, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: andps (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -161,49 +161,49 @@ define <4 x float> @test_andps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_andps:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: andps %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: andps (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_andps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vandps %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; SANDY-NEXT: vandps (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_andps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vandps %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: vandps (%rdi), %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_andps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vandps %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: vandps (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_andps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vandps %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vandps (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_andps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vandps %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: vandps (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_andps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vandps %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vandps (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_andps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vandps %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vandps (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -219,13 +219,13 @@ define <4 x float> @test_andps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a
define <4 x float> @test_andnotps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; GENERIC-LABEL: test_andnotps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: andnps %xmm1, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: andnps (%rdi), %xmm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_andnotps:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: andnps %xmm1, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: andnps (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -235,49 +235,49 @@ define <4 x float> @test_andnotps(<4 x float> %a0, <4 x float> %a1, <4 x float>
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_andnotps:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: andnps %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: andnps (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_andnotps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vandnps %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; SANDY-NEXT: vandnps (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_andnotps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vandnps %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: vandnps (%rdi), %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_andnotps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vandnps %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: vandnps (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_andnotps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vandnps %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vandnps (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_andnotps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vandnps %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: vandnps (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_andnotps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vandnps %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vandnps (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_andnotps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vandnps %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vandnps (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -295,56 +295,56 @@ define <4 x float> @test_andnotps(<4 x float> %a0, <4 x float> %a1, <4 x float>
define <4 x float> @test_cmpps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; GENERIC-LABEL: test_cmpps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: cmpeqps %xmm0, %xmm1 # sched: [3:1.00]
; GENERIC-NEXT: cmpeqps (%rdi), %xmm0 # sched: [9:1.00]
; GENERIC-NEXT: orps %xmm1, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_cmpps:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: cmpeqps %xmm0, %xmm1 # sched: [5:5.00]
; ATOM-NEXT: cmpeqps (%rdi), %xmm0 # sched: [5:5.00]
; ATOM-NEXT: orps %xmm1, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_cmpps:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: cmpeqps %xmm0, %xmm1 # sched: [3:1.00]
; SLM-NEXT: cmpeqps (%rdi), %xmm0 # sched: [6:1.00]
; SLM-NEXT: orps %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_cmpps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vcmpeqps %xmm1, %xmm0, %xmm1 # sched: [3:1.00]
; SANDY-NEXT: vcmpeqps (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; SANDY-NEXT: vorps %xmm0, %xmm1, %xmm0 # sched: [1:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cmpps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vcmpeqps %xmm1, %xmm0, %xmm1 # sched: [3:1.00]
; HASWELL-NEXT: vcmpeqps (%rdi), %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: vorps %xmm0, %xmm1, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cmpps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vcmpeqps %xmm1, %xmm0, %xmm1 # sched: [3:1.00]
; BROADWELL-NEXT: vcmpeqps (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BROADWELL-NEXT: vorps %xmm0, %xmm1, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cmpps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vcmpeqps %xmm1, %xmm0, %xmm1 # sched: [4:0.33]
; SKYLAKE-NEXT: vcmpeqps (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; SKYLAKE-NEXT: vorps %xmm0, %xmm1, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_cmpps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcmpeqps %xmm1, %xmm0, %k0 # sched: [3:1.00]
; SKX-NEXT: vcmpeqps (%rdi), %xmm0, %k1 # sched: [9:1.00]
; SKX-NEXT: korw %k1, %k0, %k0 # sched: [1:1.00]
@@ -352,14 +352,14 @@ define <4 x float> @test_cmpps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cmpps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vcmpeqps %xmm1, %xmm0, %xmm1 # sched: [3:1.00]
; BTVER2-NEXT: vcmpeqps (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: vorps %xmm0, %xmm1, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cmpps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vcmpeqps %xmm1, %xmm0, %xmm1 # sched: [3:1.00]
; ZNVER1-NEXT: vcmpeqps (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
; ZNVER1-NEXT: vorps %xmm0, %xmm1, %xmm0 # sched: [1:0.25]
@@ -375,61 +375,61 @@ define <4 x float> @test_cmpps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a
define float @test_cmpss(float %a0, float %a1, float *%a2) {
; GENERIC-LABEL: test_cmpss:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: cmpeqss %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: cmpeqss (%rdi), %xmm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_cmpss:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: cmpeqss %xmm1, %xmm0 # sched: [5:5.00]
; ATOM-NEXT: cmpeqss (%rdi), %xmm0 # sched: [5:5.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_cmpss:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: cmpeqss %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: cmpeqss (%rdi), %xmm0 # sched: [6:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_cmpss:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vcmpeqss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vcmpeqss (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cmpss:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vcmpeqss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: vcmpeqss (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cmpss:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vcmpeqss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: vcmpeqss (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cmpss:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vcmpeqss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SKYLAKE-NEXT: vcmpeqss (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_cmpss:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcmpeqss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SKX-NEXT: vcmpeqss (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cmpss:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vcmpeqss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vcmpeqss (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cmpss:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vcmpeqss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vcmpeqss (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -446,7 +446,7 @@ declare <4 x float> @llvm.x86.sse.cmp.ss(<4 x float>, <4 x float>, i8) nounwind
define i32 @test_comiss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; GENERIC-LABEL: test_comiss:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: comiss %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: setnp %al # sched: [1:0.50]
; GENERIC-NEXT: sete %cl # sched: [1:0.50]
@@ -460,7 +460,7 @@ define i32 @test_comiss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_comiss:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: comiss %xmm1, %xmm0 # sched: [9:4.50]
; ATOM-NEXT: setnp %al # sched: [1:0.50]
; ATOM-NEXT: sete %cl # sched: [1:0.50]
@@ -474,7 +474,7 @@ define i32 @test_comiss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_comiss:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: comiss %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: setnp %al # sched: [1:0.50]
; SLM-NEXT: sete %cl # sched: [1:0.50]
@@ -488,7 +488,7 @@ define i32 @test_comiss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_comiss:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vcomiss %xmm1, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: setnp %al # sched: [1:0.50]
; SANDY-NEXT: sete %cl # sched: [1:0.50]
@@ -502,7 +502,7 @@ define i32 @test_comiss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_comiss:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vcomiss %xmm1, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: setnp %al # sched: [1:0.50]
; HASWELL-NEXT: sete %cl # sched: [1:0.50]
@@ -516,7 +516,7 @@ define i32 @test_comiss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_comiss:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vcomiss %xmm1, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: setnp %al # sched: [1:0.50]
; BROADWELL-NEXT: sete %cl # sched: [1:0.50]
@@ -530,7 +530,7 @@ define i32 @test_comiss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_comiss:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vcomiss %xmm1, %xmm0 # sched: [3:1.00]
; SKYLAKE-NEXT: setnp %al # sched: [1:0.50]
; SKYLAKE-NEXT: sete %cl # sched: [1:0.50]
@@ -544,7 +544,7 @@ define i32 @test_comiss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_comiss:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcomiss %xmm1, %xmm0 # sched: [3:1.00]
; SKX-NEXT: setnp %al # sched: [1:0.50]
; SKX-NEXT: sete %cl # sched: [1:0.50]
@@ -558,7 +558,7 @@ define i32 @test_comiss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_comiss:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vcomiss %xmm1, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: setnp %al # sched: [1:0.50]
; BTVER2-NEXT: sete %cl # sched: [1:0.50]
@@ -572,7 +572,7 @@ define i32 @test_comiss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_comiss:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vcomiss %xmm1, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: setnp %al # sched: [1:0.25]
; ZNVER1-NEXT: sete %cl # sched: [1:0.25]
@@ -594,70 +594,70 @@ declare i32 @llvm.x86.sse.comieq.ss(<4 x float>, <4 x float>) nounwind readnone
define float @test_cvtsi2ss(i32 %a0, i32 *%a1) {
; GENERIC-LABEL: test_cvtsi2ss:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: cvtsi2ssl %edi, %xmm1 # sched: [5:2.00]
; GENERIC-NEXT: cvtsi2ssl (%rsi), %xmm0 # sched: [10:1.00]
; GENERIC-NEXT: addss %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_cvtsi2ss:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: cvtsi2ssl (%rsi), %xmm0 # sched: [7:3.50]
; ATOM-NEXT: cvtsi2ssl %edi, %xmm1 # sched: [6:3.00]
; ATOM-NEXT: addss %xmm1, %xmm0 # sched: [5:5.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_cvtsi2ss:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: cvtsi2ssl (%rsi), %xmm0 # sched: [7:1.00]
; SLM-NEXT: cvtsi2ssl %edi, %xmm1 # sched: [4:0.50]
; SLM-NEXT: addss %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_cvtsi2ss:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vcvtsi2ssl %edi, %xmm0, %xmm0 # sched: [5:2.00]
; SANDY-NEXT: vcvtsi2ssl (%rsi), %xmm1, %xmm1 # sched: [10:1.00]
; SANDY-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtsi2ss:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vcvtsi2ssl %edi, %xmm0, %xmm0 # sched: [4:1.00]
; HASWELL-NEXT: vcvtsi2ssl (%rsi), %xmm1, %xmm1 # sched: [8:1.00]
; HASWELL-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cvtsi2ss:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vcvtsi2ssl %edi, %xmm0, %xmm0 # sched: [4:1.00]
; BROADWELL-NEXT: vcvtsi2ssl (%rsi), %xmm1, %xmm1 # sched: [9:1.00]
; BROADWELL-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cvtsi2ss:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vcvtsi2ssl %edi, %xmm0, %xmm0 # sched: [5:1.00]
; SKYLAKE-NEXT: vcvtsi2ssl (%rsi), %xmm1, %xmm1 # sched: [9:1.00]
; SKYLAKE-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_cvtsi2ss:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtsi2ssl %edi, %xmm0, %xmm0 # sched: [5:1.00]
; SKX-NEXT: vcvtsi2ssl (%rsi), %xmm1, %xmm1 # sched: [9:1.00]
; SKX-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cvtsi2ss:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vcvtsi2ssl %edi, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vcvtsi2ssl (%rsi), %xmm1, %xmm1 # sched: [8:1.00]
; BTVER2-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cvtsi2ss:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vcvtsi2ssl %edi, %xmm0, %xmm0 # sched: [5:1.00]
; ZNVER1-NEXT: vcvtsi2ssl (%rsi), %xmm1, %xmm1 # sched: [12:1.00]
; ZNVER1-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
@@ -671,70 +671,70 @@ define float @test_cvtsi2ss(i32 %a0, i32 *%a1) {
define float @test_cvtsi2ssq(i64 %a0, i64 *%a1) {
; GENERIC-LABEL: test_cvtsi2ssq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: cvtsi2ssq %rdi, %xmm1 # sched: [5:2.00]
; GENERIC-NEXT: cvtsi2ssq (%rsi), %xmm0 # sched: [10:1.00]
; GENERIC-NEXT: addss %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_cvtsi2ssq:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: cvtsi2ssq (%rsi), %xmm0 # sched: [7:3.50]
; ATOM-NEXT: cvtsi2ssq %rdi, %xmm1 # sched: [6:3.00]
; ATOM-NEXT: addss %xmm1, %xmm0 # sched: [5:5.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_cvtsi2ssq:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: cvtsi2ssq (%rsi), %xmm0 # sched: [7:1.00]
; SLM-NEXT: cvtsi2ssq %rdi, %xmm1 # sched: [4:0.50]
; SLM-NEXT: addss %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_cvtsi2ssq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vcvtsi2ssq %rdi, %xmm0, %xmm0 # sched: [5:2.00]
; SANDY-NEXT: vcvtsi2ssq (%rsi), %xmm1, %xmm1 # sched: [10:1.00]
; SANDY-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtsi2ssq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vcvtsi2ssq %rdi, %xmm0, %xmm0 # sched: [5:2.00]
; HASWELL-NEXT: vcvtsi2ssq (%rsi), %xmm1, %xmm1 # sched: [8:1.00]
; HASWELL-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cvtsi2ssq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vcvtsi2ssq %rdi, %xmm0, %xmm0 # sched: [5:2.00]
; BROADWELL-NEXT: vcvtsi2ssq (%rsi), %xmm1, %xmm1 # sched: [9:1.00]
; BROADWELL-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cvtsi2ssq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vcvtsi2ssq %rdi, %xmm0, %xmm0 # sched: [6:2.00]
; SKYLAKE-NEXT: vcvtsi2ssq (%rsi), %xmm1, %xmm1 # sched: [9:1.00]
; SKYLAKE-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_cvtsi2ssq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtsi2ssq %rdi, %xmm0, %xmm0 # sched: [6:2.00]
; SKX-NEXT: vcvtsi2ssq (%rsi), %xmm1, %xmm1 # sched: [9:1.00]
; SKX-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cvtsi2ssq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vcvtsi2ssq %rdi, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vcvtsi2ssq (%rsi), %xmm1, %xmm1 # sched: [8:1.00]
; BTVER2-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cvtsi2ssq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vcvtsi2ssq %rdi, %xmm0, %xmm0 # sched: [5:1.00]
; ZNVER1-NEXT: vcvtsi2ssq (%rsi), %xmm1, %xmm1 # sched: [12:1.00]
; ZNVER1-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
@@ -748,70 +748,70 @@ define float @test_cvtsi2ssq(i64 %a0, i64 *%a1) {
define i32 @test_cvtss2si(float %a0, float *%a1) {
; GENERIC-LABEL: test_cvtss2si:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: cvtss2si %xmm0, %ecx # sched: [5:1.00]
; GENERIC-NEXT: cvtss2si (%rdi), %eax # sched: [9:1.00]
; GENERIC-NEXT: addl %ecx, %eax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_cvtss2si:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: cvtss2si (%rdi), %eax # sched: [9:4.50]
; ATOM-NEXT: cvtss2si %xmm0, %ecx # sched: [8:4.00]
; ATOM-NEXT: addl %ecx, %eax # sched: [1:0.50]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_cvtss2si:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: cvtss2si (%rdi), %eax # sched: [7:1.00]
; SLM-NEXT: cvtss2si %xmm0, %ecx # sched: [4:0.50]
; SLM-NEXT: addl %ecx, %eax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_cvtss2si:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vcvtss2si %xmm0, %ecx # sched: [5:1.00]
; SANDY-NEXT: vcvtss2si (%rdi), %eax # sched: [10:1.00]
; SANDY-NEXT: addl %ecx, %eax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtss2si:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vcvtss2si %xmm0, %ecx # sched: [4:1.00]
; HASWELL-NEXT: vcvtss2si (%rdi), %eax # sched: [4:1.00]
; HASWELL-NEXT: addl %ecx, %eax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cvtss2si:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vcvtss2si %xmm0, %ecx # sched: [4:1.00]
; BROADWELL-NEXT: vcvtss2si (%rdi), %eax # sched: [9:1.00]
; BROADWELL-NEXT: addl %ecx, %eax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cvtss2si:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vcvtss2si %xmm0, %ecx # sched: [6:1.00]
; SKYLAKE-NEXT: vcvtss2si (%rdi), %eax # sched: [11:1.00]
; SKYLAKE-NEXT: addl %ecx, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_cvtss2si:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtss2si %xmm0, %ecx # sched: [6:1.00]
; SKX-NEXT: vcvtss2si (%rdi), %eax # sched: [11:1.00]
; SKX-NEXT: addl %ecx, %eax # sched: [1:0.25]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cvtss2si:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vcvtss2si (%rdi), %eax # sched: [8:1.00]
; BTVER2-NEXT: vcvtss2si %xmm0, %ecx # sched: [3:1.00]
; BTVER2-NEXT: addl %ecx, %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cvtss2si:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vcvtss2si (%rdi), %eax # sched: [12:1.00]
; ZNVER1-NEXT: vcvtss2si %xmm0, %ecx # sched: [5:1.00]
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
@@ -828,70 +828,70 @@ declare i32 @llvm.x86.sse.cvtss2si(<4 x float>) nounwind readnone
define i64 @test_cvtss2siq(float %a0, float *%a1) {
; GENERIC-LABEL: test_cvtss2siq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: cvtss2si %xmm0, %rcx # sched: [5:1.00]
; GENERIC-NEXT: cvtss2si (%rdi), %rax # sched: [9:1.00]
; GENERIC-NEXT: addq %rcx, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_cvtss2siq:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: cvtss2si (%rdi), %rax # sched: [10:5.00]
; ATOM-NEXT: cvtss2si %xmm0, %rcx # sched: [9:4.50]
; ATOM-NEXT: addq %rcx, %rax # sched: [1:0.50]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_cvtss2siq:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: cvtss2si (%rdi), %rax # sched: [7:1.00]
; SLM-NEXT: cvtss2si %xmm0, %rcx # sched: [4:0.50]
; SLM-NEXT: addq %rcx, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_cvtss2siq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vcvtss2si %xmm0, %rcx # sched: [5:1.00]
; SANDY-NEXT: vcvtss2si (%rdi), %rax # sched: [10:1.00]
; SANDY-NEXT: addq %rcx, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtss2siq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vcvtss2si %xmm0, %rcx # sched: [4:1.00]
; HASWELL-NEXT: vcvtss2si (%rdi), %rax # sched: [4:1.00]
; HASWELL-NEXT: addq %rcx, %rax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cvtss2siq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vcvtss2si %xmm0, %rcx # sched: [4:1.00]
; BROADWELL-NEXT: vcvtss2si (%rdi), %rax # sched: [9:1.00]
; BROADWELL-NEXT: addq %rcx, %rax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cvtss2siq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vcvtss2si %xmm0, %rcx # sched: [6:1.00]
; SKYLAKE-NEXT: vcvtss2si (%rdi), %rax # sched: [11:1.00]
; SKYLAKE-NEXT: addq %rcx, %rax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_cvtss2siq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtss2si %xmm0, %rcx # sched: [6:1.00]
; SKX-NEXT: vcvtss2si (%rdi), %rax # sched: [11:1.00]
; SKX-NEXT: addq %rcx, %rax # sched: [1:0.25]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cvtss2siq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vcvtss2si (%rdi), %rax # sched: [8:1.00]
; BTVER2-NEXT: vcvtss2si %xmm0, %rcx # sched: [3:1.00]
; BTVER2-NEXT: addq %rcx, %rax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cvtss2siq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vcvtss2si (%rdi), %rax # sched: [12:1.00]
; ZNVER1-NEXT: vcvtss2si %xmm0, %rcx # sched: [5:1.00]
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
@@ -908,70 +908,70 @@ declare i64 @llvm.x86.sse.cvtss2si64(<4 x float>) nounwind readnone
define i32 @test_cvttss2si(float %a0, float *%a1) {
; GENERIC-LABEL: test_cvttss2si:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: cvttss2si %xmm0, %ecx # sched: [5:1.00]
; GENERIC-NEXT: cvttss2si (%rdi), %eax # sched: [9:1.00]
; GENERIC-NEXT: addl %ecx, %eax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_cvttss2si:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: cvttss2si (%rdi), %eax # sched: [9:4.50]
; ATOM-NEXT: cvttss2si %xmm0, %ecx # sched: [8:4.00]
; ATOM-NEXT: addl %ecx, %eax # sched: [1:0.50]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_cvttss2si:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: cvttss2si (%rdi), %eax # sched: [7:1.00]
; SLM-NEXT: cvttss2si %xmm0, %ecx # sched: [4:0.50]
; SLM-NEXT: addl %ecx, %eax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_cvttss2si:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vcvttss2si %xmm0, %ecx # sched: [5:1.00]
; SANDY-NEXT: vcvttss2si (%rdi), %eax # sched: [10:1.00]
; SANDY-NEXT: addl %ecx, %eax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvttss2si:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vcvttss2si %xmm0, %ecx # sched: [4:1.00]
; HASWELL-NEXT: vcvttss2si (%rdi), %eax # sched: [4:1.00]
; HASWELL-NEXT: addl %ecx, %eax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cvttss2si:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vcvttss2si %xmm0, %ecx # sched: [4:1.00]
; BROADWELL-NEXT: vcvttss2si (%rdi), %eax # sched: [9:1.00]
; BROADWELL-NEXT: addl %ecx, %eax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cvttss2si:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vcvttss2si %xmm0, %ecx # sched: [7:1.00]
; SKYLAKE-NEXT: vcvttss2si (%rdi), %eax # sched: [11:1.00]
; SKYLAKE-NEXT: addl %ecx, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_cvttss2si:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvttss2si %xmm0, %ecx # sched: [7:1.00]
; SKX-NEXT: vcvttss2si (%rdi), %eax # sched: [11:1.00]
; SKX-NEXT: addl %ecx, %eax # sched: [1:0.25]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cvttss2si:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vcvttss2si (%rdi), %eax # sched: [8:1.00]
; BTVER2-NEXT: vcvttss2si %xmm0, %ecx # sched: [3:1.00]
; BTVER2-NEXT: addl %ecx, %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cvttss2si:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vcvttss2si (%rdi), %eax # sched: [12:1.00]
; ZNVER1-NEXT: vcvttss2si %xmm0, %ecx # sched: [5:1.00]
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
@@ -985,70 +985,70 @@ define i32 @test_cvttss2si(float %a0, float *%a1) {
define i64 @test_cvttss2siq(float %a0, float *%a1) {
; GENERIC-LABEL: test_cvttss2siq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: cvttss2si %xmm0, %rcx # sched: [5:1.00]
; GENERIC-NEXT: cvttss2si (%rdi), %rax # sched: [9:1.00]
; GENERIC-NEXT: addq %rcx, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_cvttss2siq:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: cvttss2si (%rdi), %rax # sched: [10:5.00]
; ATOM-NEXT: cvttss2si %xmm0, %rcx # sched: [9:4.50]
; ATOM-NEXT: addq %rcx, %rax # sched: [1:0.50]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_cvttss2siq:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: cvttss2si (%rdi), %rax # sched: [7:1.00]
; SLM-NEXT: cvttss2si %xmm0, %rcx # sched: [4:0.50]
; SLM-NEXT: addq %rcx, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_cvttss2siq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vcvttss2si %xmm0, %rcx # sched: [5:1.00]
; SANDY-NEXT: vcvttss2si (%rdi), %rax # sched: [10:1.00]
; SANDY-NEXT: addq %rcx, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvttss2siq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vcvttss2si %xmm0, %rcx # sched: [4:1.00]
; HASWELL-NEXT: vcvttss2si (%rdi), %rax # sched: [4:1.00]
; HASWELL-NEXT: addq %rcx, %rax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cvttss2siq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vcvttss2si %xmm0, %rcx # sched: [4:1.00]
; BROADWELL-NEXT: vcvttss2si (%rdi), %rax # sched: [9:1.00]
; BROADWELL-NEXT: addq %rcx, %rax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cvttss2siq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vcvttss2si %xmm0, %rcx # sched: [7:1.00]
; SKYLAKE-NEXT: vcvttss2si (%rdi), %rax # sched: [11:1.00]
; SKYLAKE-NEXT: addq %rcx, %rax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_cvttss2siq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvttss2si %xmm0, %rcx # sched: [7:1.00]
; SKX-NEXT: vcvttss2si (%rdi), %rax # sched: [11:1.00]
; SKX-NEXT: addq %rcx, %rax # sched: [1:0.25]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cvttss2siq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vcvttss2si (%rdi), %rax # sched: [8:1.00]
; BTVER2-NEXT: vcvttss2si %xmm0, %rcx # sched: [3:1.00]
; BTVER2-NEXT: addq %rcx, %rax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cvttss2siq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vcvttss2si (%rdi), %rax # sched: [12:1.00]
; ZNVER1-NEXT: vcvttss2si %xmm0, %rcx # sched: [5:1.00]
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
@@ -1062,61 +1062,61 @@ define i64 @test_cvttss2siq(float %a0, float *%a1) {
define <4 x float> @test_divps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; GENERIC-LABEL: test_divps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: divps %xmm1, %xmm0 # sched: [14:1.00]
; GENERIC-NEXT: divps (%rdi), %xmm0 # sched: [20:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_divps:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: divps %xmm1, %xmm0 # sched: [70:35.00]
; ATOM-NEXT: divps (%rdi), %xmm0 # sched: [125:62.50]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_divps:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: divps %xmm1, %xmm0 # sched: [34:34.00]
; SLM-NEXT: divps (%rdi), %xmm0 # sched: [37:34.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_divps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vdivps %xmm1, %xmm0, %xmm0 # sched: [14:1.00]
; SANDY-NEXT: vdivps (%rdi), %xmm0, %xmm0 # sched: [20:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_divps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vdivps %xmm1, %xmm0, %xmm0 # sched: [13:1.00]
; HASWELL-NEXT: vdivps (%rdi), %xmm0, %xmm0 # sched: [13:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_divps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vdivps %xmm1, %xmm0, %xmm0 # sched: [11:1.00]
; BROADWELL-NEXT: vdivps (%rdi), %xmm0, %xmm0 # sched: [16:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_divps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vdivps %xmm1, %xmm0, %xmm0 # sched: [11:1.00]
; SKYLAKE-NEXT: vdivps (%rdi), %xmm0, %xmm0 # sched: [17:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_divps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vdivps %xmm1, %xmm0, %xmm0 # sched: [11:1.00]
; SKX-NEXT: vdivps (%rdi), %xmm0, %xmm0 # sched: [17:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_divps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vdivps %xmm1, %xmm0, %xmm0 # sched: [19:19.00]
; BTVER2-NEXT: vdivps (%rdi), %xmm0, %xmm0 # sched: [24:19.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_divps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vdivps %xmm1, %xmm0, %xmm0 # sched: [15:1.00]
; ZNVER1-NEXT: vdivps (%rdi), %xmm0, %xmm0 # sched: [22:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1128,61 +1128,61 @@ define <4 x float> @test_divps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a
define float @test_divss(float %a0, float %a1, float *%a2) {
; GENERIC-LABEL: test_divss:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: divss %xmm1, %xmm0 # sched: [14:1.00]
; GENERIC-NEXT: divss (%rdi), %xmm0 # sched: [20:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_divss:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: divss %xmm1, %xmm0 # sched: [34:17.00]
; ATOM-NEXT: divss (%rdi), %xmm0 # sched: [62:31.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_divss:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: divss %xmm1, %xmm0 # sched: [34:34.00]
; SLM-NEXT: divss (%rdi), %xmm0 # sched: [37:34.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_divss:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vdivss %xmm1, %xmm0, %xmm0 # sched: [14:1.00]
; SANDY-NEXT: vdivss (%rdi), %xmm0, %xmm0 # sched: [20:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_divss:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vdivss %xmm1, %xmm0, %xmm0 # sched: [13:1.00]
; HASWELL-NEXT: vdivss (%rdi), %xmm0, %xmm0 # sched: [13:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_divss:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vdivss %xmm1, %xmm0, %xmm0 # sched: [11:1.00]
; BROADWELL-NEXT: vdivss (%rdi), %xmm0, %xmm0 # sched: [16:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_divss:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vdivss %xmm1, %xmm0, %xmm0 # sched: [11:1.00]
; SKYLAKE-NEXT: vdivss (%rdi), %xmm0, %xmm0 # sched: [16:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_divss:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vdivss %xmm1, %xmm0, %xmm0 # sched: [11:1.00]
; SKX-NEXT: vdivss (%rdi), %xmm0, %xmm0 # sched: [16:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_divss:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vdivss %xmm1, %xmm0, %xmm0 # sched: [19:19.00]
; BTVER2-NEXT: vdivss (%rdi), %xmm0, %xmm0 # sched: [24:19.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_divss:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vdivss %xmm1, %xmm0, %xmm0 # sched: [15:1.00]
; ZNVER1-NEXT: vdivss (%rdi), %xmm0, %xmm0 # sched: [22:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1194,61 +1194,61 @@ define float @test_divss(float %a0, float %a1, float *%a2) {
define void @test_ldmxcsr(i32 %a0) {
; GENERIC-LABEL: test_ldmxcsr:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movl %edi, -{{[0-9]+}}(%rsp) # sched: [5:1.00]
; GENERIC-NEXT: ldmxcsr -{{[0-9]+}}(%rsp) # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_ldmxcsr:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movl %edi, -{{[0-9]+}}(%rsp) # sched: [1:1.00]
; ATOM-NEXT: ldmxcsr -{{[0-9]+}}(%rsp) # sched: [5:2.50]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_ldmxcsr:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movl %edi, -{{[0-9]+}}(%rsp) # sched: [1:1.00]
; SLM-NEXT: ldmxcsr -{{[0-9]+}}(%rsp) # sched: [3:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_ldmxcsr:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: movl %edi, -{{[0-9]+}}(%rsp) # sched: [5:1.00]
; SANDY-NEXT: vldmxcsr -{{[0-9]+}}(%rsp) # sched: [5:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_ldmxcsr:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: movl %edi, -{{[0-9]+}}(%rsp) # sched: [1:1.00]
; HASWELL-NEXT: vldmxcsr -{{[0-9]+}}(%rsp) # sched: [2:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_ldmxcsr:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl %edi, -{{[0-9]+}}(%rsp) # sched: [1:1.00]
; BROADWELL-NEXT: vldmxcsr -{{[0-9]+}}(%rsp) # sched: [7:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_ldmxcsr:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl %edi, -{{[0-9]+}}(%rsp) # sched: [1:1.00]
; SKYLAKE-NEXT: vldmxcsr -{{[0-9]+}}(%rsp) # sched: [7:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_ldmxcsr:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movl %edi, -{{[0-9]+}}(%rsp) # sched: [1:1.00]
; SKX-NEXT: vldmxcsr -{{[0-9]+}}(%rsp) # sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_ldmxcsr:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: movl %edi, -{{[0-9]+}}(%rsp) # sched: [1:1.00]
; BTVER2-NEXT: vldmxcsr -{{[0-9]+}}(%rsp) # sched: [5:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_ldmxcsr:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl %edi, -{{[0-9]+}}(%rsp) # sched: [1:0.50]
; ZNVER1-NEXT: vldmxcsr -{{[0-9]+}}(%rsp) # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1262,61 +1262,61 @@ declare void @llvm.x86.sse.ldmxcsr(i8*) nounwind readnone
define <4 x float> @test_maxps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; GENERIC-LABEL: test_maxps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: maxps %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: maxps (%rdi), %xmm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_maxps:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: maxps %xmm1, %xmm0 # sched: [5:5.00]
; ATOM-NEXT: maxps (%rdi), %xmm0 # sched: [5:5.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_maxps:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: maxps %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: maxps (%rdi), %xmm0 # sched: [6:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_maxps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmaxps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vmaxps (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_maxps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmaxps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: vmaxps (%rdi), %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_maxps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmaxps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: vmaxps (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_maxps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmaxps %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKYLAKE-NEXT: vmaxps (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_maxps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmaxps %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vmaxps (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_maxps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmaxps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vmaxps (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_maxps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmaxps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmaxps (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1329,61 +1329,61 @@ declare <4 x float> @llvm.x86.sse.max.ps(<4 x float>, <4 x float>) nounwind read
define <4 x float> @test_maxss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; GENERIC-LABEL: test_maxss:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: maxss %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: maxss (%rdi), %xmm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_maxss:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: maxss %xmm1, %xmm0 # sched: [5:5.00]
; ATOM-NEXT: maxss (%rdi), %xmm0 # sched: [5:5.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_maxss:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: maxss %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: maxss (%rdi), %xmm0 # sched: [6:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_maxss:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmaxss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vmaxss (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_maxss:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmaxss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: vmaxss (%rdi), %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_maxss:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmaxss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: vmaxss (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_maxss:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmaxss %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKYLAKE-NEXT: vmaxss (%rdi), %xmm0, %xmm0 # sched: [9:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_maxss:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmaxss %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vmaxss (%rdi), %xmm0, %xmm0 # sched: [9:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_maxss:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmaxss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vmaxss (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_maxss:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmaxss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmaxss (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1396,61 +1396,61 @@ declare <4 x float> @llvm.x86.sse.max.ss(<4 x float>, <4 x float>) nounwind read
define <4 x float> @test_minps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; GENERIC-LABEL: test_minps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: minps %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: minps (%rdi), %xmm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_minps:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: minps %xmm1, %xmm0 # sched: [5:5.00]
; ATOM-NEXT: minps (%rdi), %xmm0 # sched: [5:5.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_minps:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: minps %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: minps (%rdi), %xmm0 # sched: [6:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_minps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vminps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vminps (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_minps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vminps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: vminps (%rdi), %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_minps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vminps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: vminps (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_minps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vminps %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKYLAKE-NEXT: vminps (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_minps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vminps %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vminps (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_minps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vminps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vminps (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_minps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vminps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vminps (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1463,61 +1463,61 @@ declare <4 x float> @llvm.x86.sse.min.ps(<4 x float>, <4 x float>) nounwind read
define <4 x float> @test_minss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; GENERIC-LABEL: test_minss:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: minss %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: minss (%rdi), %xmm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_minss:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: minss %xmm1, %xmm0 # sched: [5:5.00]
; ATOM-NEXT: minss (%rdi), %xmm0 # sched: [5:5.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_minss:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: minss %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: minss (%rdi), %xmm0 # sched: [6:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_minss:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vminss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vminss (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_minss:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vminss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: vminss (%rdi), %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_minss:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vminss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: vminss (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_minss:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vminss %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKYLAKE-NEXT: vminss (%rdi), %xmm0, %xmm0 # sched: [9:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_minss:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vminss %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vminss (%rdi), %xmm0, %xmm0 # sched: [9:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_minss:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vminss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vminss (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_minss:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vminss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vminss (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1530,70 +1530,70 @@ declare <4 x float> @llvm.x86.sse.min.ss(<4 x float>, <4 x float>) nounwind read
define void @test_movaps(<4 x float> *%a0, <4 x float> *%a1) {
; GENERIC-LABEL: test_movaps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movaps (%rdi), %xmm0 # sched: [6:0.50]
; GENERIC-NEXT: addps %xmm0, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: movaps %xmm0, (%rsi) # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_movaps:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movaps (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: addps %xmm0, %xmm0 # sched: [5:5.00]
; ATOM-NEXT: movaps %xmm0, (%rsi) # sched: [1:1.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_movaps:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movaps (%rdi), %xmm0 # sched: [3:1.00]
; SLM-NEXT: addps %xmm0, %xmm0 # sched: [3:1.00]
; SLM-NEXT: movaps %xmm0, (%rsi) # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_movaps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmovaps (%rdi), %xmm0 # sched: [6:0.50]
; SANDY-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vmovaps %xmm0, (%rsi) # sched: [5:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movaps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmovaps (%rdi), %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: vmovaps %xmm0, (%rsi) # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movaps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmovaps (%rdi), %xmm0 # sched: [5:0.50]
; BROADWELL-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: vmovaps %xmm0, (%rsi) # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movaps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmovaps (%rdi), %xmm0 # sched: [6:0.50]
; SKYLAKE-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vmovaps %xmm0, (%rsi) # sched: [1:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movaps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovaps (%rdi), %xmm0 # sched: [6:0.50]
; SKX-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vmovaps %xmm0, (%rsi) # sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movaps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovaps (%rdi), %xmm0 # sched: [5:1.00]
; BTVER2-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vmovaps %xmm0, (%rsi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movaps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmovaps (%rdi), %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmovaps %xmm0, (%rsi) # sched: [1:0.50]
@@ -1608,12 +1608,12 @@ define void @test_movaps(<4 x float> *%a0, <4 x float> *%a1) {
define <4 x float> @test_movhlps(<4 x float> %a0, <4 x float> %a1) {
; GENERIC-LABEL: test_movhlps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movhlps {{.*#+}} xmm0 = xmm1[1],xmm0[1] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_movhlps:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movhlps {{.*#+}} xmm0 = xmm1[1],xmm0[1] sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -1624,42 +1624,42 @@ define <4 x float> @test_movhlps(<4 x float> %a0, <4 x float> %a1) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_movhlps:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movhlps {{.*#+}} xmm0 = xmm1[1],xmm0[1] sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_movhlps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] sched: [1:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movhlps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movhlps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movhlps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] sched: [1:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movhlps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movhlps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movhlps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] sched: [1:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 6, i32 7, i32 2, i32 3>
@@ -1670,7 +1670,7 @@ define <4 x float> @test_movhlps(<4 x float> %a0, <4 x float> %a1) {
define void @test_movhps(<4 x float> %a0, <4 x float> %a1, x86_mmx *%a2) {
; GENERIC-LABEL: test_movhps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [7:1.00]
; GENERIC-NEXT: addps %xmm0, %xmm1 # sched: [3:1.00]
; GENERIC-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1] sched: [1:1.00]
@@ -1678,7 +1678,7 @@ define void @test_movhps(<4 x float> %a0, <4 x float> %a1, x86_mmx *%a2) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_movhps:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [1:1.00]
; ATOM-NEXT: addps %xmm0, %xmm1 # sched: [5:5.00]
; ATOM-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1] sched: [1:1.00]
@@ -1686,56 +1686,56 @@ define void @test_movhps(<4 x float> %a0, <4 x float> %a1, x86_mmx *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_movhps:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [4:1.00]
; SLM-NEXT: addps %xmm0, %xmm1 # sched: [3:1.00]
; SLM-NEXT: pextrq $1, %xmm1, (%rdi) # sched: [4:2.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_movhps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [7:1.00]
; SANDY-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vpextrq $1, %xmm0, (%rdi) # sched: [5:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movhps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [1:1.00]
; HASWELL-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: vpextrq $1, %xmm0, (%rdi) # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movhps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [6:1.00]
; BROADWELL-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: vpextrq $1, %xmm0, (%rdi) # sched: [2:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movhps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [6:1.00]
; SKYLAKE-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vpextrq $1, %xmm0, (%rdi) # sched: [2:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movhps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [6:1.00]
; SKX-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vpextrq $1, %xmm0, (%rdi) # sched: [2:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movhps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [6:1.00]
; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vpextrq $1, %xmm0, (%rdi) # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movhps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [8:0.50]
; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vpextrq $1, %xmm0, (%rdi) # sched: [8:1.00]
@@ -1754,61 +1754,61 @@ define void @test_movhps(<4 x float> %a0, <4 x float> %a1, x86_mmx *%a2) {
define <4 x float> @test_movlhps(<4 x float> %a0, <4 x float> %a1) {
; GENERIC-LABEL: test_movlhps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00]
; GENERIC-NEXT: addps %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_movlhps:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00]
; ATOM-NEXT: addps %xmm1, %xmm0 # sched: [5:5.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_movlhps:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00]
; SLM-NEXT: addps %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_movlhps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00]
; SANDY-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movlhps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00]
; HASWELL-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movlhps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00]
; BROADWELL-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movlhps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00]
; SKYLAKE-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movlhps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00]
; SKX-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movlhps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:0.50]
; BTVER2-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movlhps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:0.50]
; ZNVER1-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1819,70 +1819,70 @@ define <4 x float> @test_movlhps(<4 x float> %a0, <4 x float> %a1) {
define void @test_movlps(<4 x float> %a0, <4 x float> %a1, x86_mmx *%a2) {
; GENERIC-LABEL: test_movlps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [7:1.00]
; GENERIC-NEXT: addps %xmm0, %xmm1 # sched: [3:1.00]
; GENERIC-NEXT: movlps %xmm1, (%rdi) # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_movlps:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [1:1.00]
; ATOM-NEXT: addps %xmm0, %xmm1 # sched: [5:5.00]
; ATOM-NEXT: movlps %xmm1, (%rdi) # sched: [1:1.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_movlps:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [4:1.00]
; SLM-NEXT: addps %xmm0, %xmm1 # sched: [3:1.00]
; SLM-NEXT: movlps %xmm1, (%rdi) # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_movlps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [7:1.00]
; SANDY-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vmovlps %xmm0, (%rdi) # sched: [5:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movlps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [1:1.00]
; HASWELL-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: vmovlps %xmm0, (%rdi) # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movlps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [6:1.00]
; BROADWELL-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: vmovlps %xmm0, (%rdi) # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movlps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [6:1.00]
; SKYLAKE-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vmovlps %xmm0, (%rdi) # sched: [1:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movlps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [6:1.00]
; SKX-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vmovlps %xmm0, (%rdi) # sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movlps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [6:1.00]
; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vmovlps %xmm0, (%rdi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movlps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [8:0.50]
; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmovlps %xmm0, (%rdi) # sched: [1:0.50]
@@ -1899,54 +1899,54 @@ define void @test_movlps(<4 x float> %a0, <4 x float> %a1, x86_mmx *%a2) {
define i32 @test_movmskps(<4 x float> %a0) {
; GENERIC-LABEL: test_movmskps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movmskps %xmm0, %eax # sched: [2:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_movmskps:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movmskps %xmm0, %eax # sched: [3:3.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_movmskps:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movmskps %xmm0, %eax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_movmskps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmovmskps %xmm0, %eax # sched: [2:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movmskps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmovmskps %xmm0, %eax # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movmskps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmovmskps %xmm0, %eax # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movmskps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmovmskps %xmm0, %eax # sched: [2:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movmskps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovmskps %xmm0, %eax # sched: [2:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movmskps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovmskps %xmm0, %eax # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movmskps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmovmskps %xmm0, %eax # sched: [1:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %a0)
@@ -1956,12 +1956,12 @@ declare i32 @llvm.x86.sse.movmsk.ps(<4 x float>) nounwind readnone
define void @test_movntps(<4 x float> %a0, <4 x float> *%a1) {
; GENERIC-LABEL: test_movntps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movntps %xmm0, (%rdi) # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_movntps:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movntps %xmm0, (%rdi) # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -1972,42 +1972,42 @@ define void @test_movntps(<4 x float> %a0, <4 x float> *%a1) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_movntps:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movntps %xmm0, (%rdi) # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_movntps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmovntps %xmm0, (%rdi) # sched: [5:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movntps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmovntps %xmm0, (%rdi) # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movntps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmovntps %xmm0, (%rdi) # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movntps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmovntps %xmm0, (%rdi) # sched: [1:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movntps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovntps %xmm0, (%rdi) # sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movntps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovntps %xmm0, (%rdi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movntps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmovntps %xmm0, (%rdi) # sched: [1:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
store <4 x float> %a0, <4 x float> *%a1, align 16, !nontemporal !0
@@ -2016,70 +2016,70 @@ define void @test_movntps(<4 x float> %a0, <4 x float> *%a1) {
define void @test_movss_mem(float* %a0, float* %a1) {
; GENERIC-LABEL: test_movss_mem:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [6:0.50]
; GENERIC-NEXT: addss %xmm0, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: movss %xmm0, (%rsi) # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_movss_mem:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [1:1.00]
; ATOM-NEXT: addss %xmm0, %xmm0 # sched: [5:5.00]
; ATOM-NEXT: movss %xmm0, (%rsi) # sched: [1:1.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_movss_mem:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [3:1.00]
; SLM-NEXT: addss %xmm0, %xmm0 # sched: [3:1.00]
; SLM-NEXT: movss %xmm0, (%rsi) # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_movss_mem:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [6:0.50]
; SANDY-NEXT: vaddss %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vmovss %xmm0, (%rsi) # sched: [5:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movss_mem:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [1:0.50]
; HASWELL-NEXT: vaddss %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: vmovss %xmm0, (%rsi) # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movss_mem:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [5:0.50]
; BROADWELL-NEXT: vaddss %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: vmovss %xmm0, (%rsi) # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movss_mem:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [5:0.50]
; SKYLAKE-NEXT: vaddss %xmm0, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vmovss %xmm0, (%rsi) # sched: [1:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movss_mem:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [5:0.50]
; SKX-NEXT: vaddss %xmm0, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vmovss %xmm0, (%rsi) # sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movss_mem:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [5:1.00]
; BTVER2-NEXT: vaddss %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vmovss %xmm0, (%rsi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movss_mem:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [8:0.50]
; ZNVER1-NEXT: vaddss %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmovss %xmm0, (%rsi) # sched: [1:0.50]
@@ -2092,12 +2092,12 @@ define void @test_movss_mem(float* %a0, float* %a1) {
define <4 x float> @test_movss_reg(<4 x float> %a0, <4 x float> %a1) {
; GENERIC-LABEL: test_movss_reg:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_movss_reg:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -2108,42 +2108,42 @@ define <4 x float> @test_movss_reg(<4 x float> %a0, <4 x float> %a1) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_movss_reg:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_movss_reg:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movss_reg:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] sched: [1:0.33]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movss_reg:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] sched: [1:0.33]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movss_reg:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movss_reg:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movss_reg:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movss_reg:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] sched: [1:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 4, i32 1, i32 2, i32 3>
@@ -2152,70 +2152,70 @@ define <4 x float> @test_movss_reg(<4 x float> %a0, <4 x float> %a1) {
define void @test_movups(<4 x float> *%a0, <4 x float> *%a1) {
; GENERIC-LABEL: test_movups:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movups (%rdi), %xmm0 # sched: [6:0.50]
; GENERIC-NEXT: addps %xmm0, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: movups %xmm0, (%rsi) # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_movups:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movups (%rdi), %xmm0 # sched: [3:1.50]
; ATOM-NEXT: addps %xmm0, %xmm0 # sched: [5:5.00]
; ATOM-NEXT: movups %xmm0, (%rsi) # sched: [2:1.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_movups:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movups (%rdi), %xmm0 # sched: [3:1.00]
; SLM-NEXT: addps %xmm0, %xmm0 # sched: [3:1.00]
; SLM-NEXT: movups %xmm0, (%rsi) # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_movups:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmovups (%rdi), %xmm0 # sched: [6:0.50]
; SANDY-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vmovups %xmm0, (%rsi) # sched: [5:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movups:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmovups (%rdi), %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: vmovups %xmm0, (%rsi) # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movups:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmovups (%rdi), %xmm0 # sched: [5:0.50]
; BROADWELL-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: vmovups %xmm0, (%rsi) # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movups:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmovups (%rdi), %xmm0 # sched: [6:0.50]
; SKYLAKE-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vmovups %xmm0, (%rsi) # sched: [1:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movups:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovups (%rdi), %xmm0 # sched: [6:0.50]
; SKX-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vmovups %xmm0, (%rsi) # sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movups:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovups (%rdi), %xmm0 # sched: [5:1.00]
; BTVER2-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vmovups %xmm0, (%rsi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movups:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmovups (%rdi), %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmovups %xmm0, (%rsi) # sched: [1:0.50]
@@ -2228,61 +2228,61 @@ define void @test_movups(<4 x float> *%a0, <4 x float> *%a1) {
define <4 x float> @test_mulps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; GENERIC-LABEL: test_mulps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: mulps %xmm1, %xmm0 # sched: [5:1.00]
; GENERIC-NEXT: mulps (%rdi), %xmm0 # sched: [11:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_mulps:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: mulps %xmm1, %xmm0 # sched: [5:5.00]
; ATOM-NEXT: mulps (%rdi), %xmm0 # sched: [10:5.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_mulps:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: mulps %xmm1, %xmm0 # sched: [5:2.00]
; SLM-NEXT: mulps (%rdi), %xmm0 # sched: [8:2.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_mulps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
; SANDY-NEXT: vmulps (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_mulps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: vmulps (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_mulps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [3:0.50]
; BROADWELL-NEXT: vmulps (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_mulps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vmulps (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_mulps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vmulps (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_mulps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; BTVER2-NEXT: vmulps (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_mulps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [3:0.50]
; ZNVER1-NEXT: vmulps (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -2294,61 +2294,61 @@ define <4 x float> @test_mulps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a
define float @test_mulss(float %a0, float %a1, float *%a2) {
; GENERIC-LABEL: test_mulss:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: mulss %xmm1, %xmm0 # sched: [5:1.00]
; GENERIC-NEXT: mulss (%rdi), %xmm0 # sched: [11:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_mulss:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: mulss %xmm1, %xmm0 # sched: [4:4.00]
; ATOM-NEXT: mulss (%rdi), %xmm0 # sched: [5:5.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_mulss:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: mulss %xmm1, %xmm0 # sched: [5:2.00]
; SLM-NEXT: mulss (%rdi), %xmm0 # sched: [8:2.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_mulss:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmulss %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
; SANDY-NEXT: vmulss (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_mulss:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmulss %xmm1, %xmm0, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: vmulss (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_mulss:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmulss %xmm1, %xmm0, %xmm0 # sched: [3:0.50]
; BROADWELL-NEXT: vmulss (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_mulss:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmulss %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vmulss (%rdi), %xmm0, %xmm0 # sched: [9:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_mulss:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmulss %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vmulss (%rdi), %xmm0, %xmm0 # sched: [9:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_mulss:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmulss %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; BTVER2-NEXT: vmulss (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_mulss:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmulss %xmm1, %xmm0, %xmm0 # sched: [3:0.50]
; ZNVER1-NEXT: vmulss (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -2360,13 +2360,13 @@ define float @test_mulss(float %a0, float %a1, float *%a2) {
define <4 x float> @test_orps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; GENERIC-LABEL: test_orps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: orps %xmm1, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: orps (%rdi), %xmm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_orps:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: orps %xmm1, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: orps (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -2376,49 +2376,49 @@ define <4 x float> @test_orps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_orps:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: orps %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: orps (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_orps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vorps %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; SANDY-NEXT: vorps (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_orps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vorps %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: vorps (%rdi), %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_orps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vorps %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: vorps (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_orps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vorps %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vorps (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_orps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vorps %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: vorps (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_orps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vorps %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vorps (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_orps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vorps %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vorps (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -2434,12 +2434,12 @@ define <4 x float> @test_orps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2
define void @test_prefetchnta(i8* %a0) {
; GENERIC-LABEL: test_prefetchnta:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: prefetchnta (%rdi) # sched: [5:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_prefetchnta:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: prefetchnta (%rdi) # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -2450,42 +2450,42 @@ define void @test_prefetchnta(i8* %a0) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_prefetchnta:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: prefetchnta (%rdi) # sched: [3:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_prefetchnta:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: prefetchnta (%rdi) # sched: [5:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_prefetchnta:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: prefetchnta (%rdi) # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_prefetchnta:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: prefetchnta (%rdi) # sched: [5:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_prefetchnta:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: prefetchnta (%rdi) # sched: [5:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_prefetchnta:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: prefetchnta (%rdi) # sched: [5:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_prefetchnta:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: prefetchnta (%rdi) # sched: [5:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_prefetchnta:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: prefetchnta (%rdi) # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
call void @llvm.prefetch(i8* %a0, i32 0, i32 0, i32 1)
@@ -2495,14 +2495,14 @@ declare void @llvm.prefetch(i8* nocapture, i32, i32, i32) nounwind readnone
define <4 x float> @test_rcpps(<4 x float> %a0, <4 x float> *%a1) {
; GENERIC-LABEL: test_rcpps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: rcpps %xmm0, %xmm1 # sched: [5:1.00]
; GENERIC-NEXT: rcpps (%rdi), %xmm0 # sched: [11:1.00]
; GENERIC-NEXT: addps %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_rcpps:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: rcpps (%rdi), %xmm1 # sched: [10:5.00]
; ATOM-NEXT: rcpps %xmm0, %xmm0 # sched: [9:4.50]
; ATOM-NEXT: addps %xmm0, %xmm1 # sched: [5:5.00]
@@ -2510,7 +2510,7 @@ define <4 x float> @test_rcpps(<4 x float> %a0, <4 x float> *%a1) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_rcpps:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: rcpps (%rdi), %xmm1 # sched: [8:1.00]
; SLM-NEXT: rcpps %xmm0, %xmm0 # sched: [5:1.00]
; SLM-NEXT: addps %xmm0, %xmm1 # sched: [3:1.00]
@@ -2518,49 +2518,49 @@ define <4 x float> @test_rcpps(<4 x float> %a0, <4 x float> *%a1) {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_rcpps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vrcpps %xmm0, %xmm0 # sched: [5:1.00]
; SANDY-NEXT: vrcpps (%rdi), %xmm1 # sched: [11:1.00]
; SANDY-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_rcpps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vrcpps %xmm0, %xmm0 # sched: [5:1.00]
; HASWELL-NEXT: vrcpps (%rdi), %xmm1 # sched: [5:1.00]
; HASWELL-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_rcpps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vrcpps %xmm0, %xmm0 # sched: [5:1.00]
; BROADWELL-NEXT: vrcpps (%rdi), %xmm1 # sched: [10:1.00]
; BROADWELL-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_rcpps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vrcpps %xmm0, %xmm0 # sched: [4:1.00]
; SKYLAKE-NEXT: vrcpps (%rdi), %xmm1 # sched: [10:1.00]
; SKYLAKE-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_rcpps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vrcpps %xmm0, %xmm0 # sched: [4:1.00]
; SKX-NEXT: vrcpps (%rdi), %xmm1 # sched: [10:1.00]
; SKX-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_rcpps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vrcpps (%rdi), %xmm1 # sched: [7:1.00]
; BTVER2-NEXT: vrcpps %xmm0, %xmm0 # sched: [2:1.00]
; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_rcpps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vrcpps (%rdi), %xmm1 # sched: [12:0.50]
; ZNVER1-NEXT: vrcpps %xmm0, %xmm0 # sched: [5:0.50]
; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
@@ -2577,7 +2577,7 @@ declare <4 x float> @llvm.x86.sse.rcp.ps(<4 x float>) nounwind readnone
define <4 x float> @test_rcpss(float %a0, float *%a1) {
; GENERIC-LABEL: test_rcpss:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: rcpss %xmm0, %xmm0 # sched: [5:1.00]
; GENERIC-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [6:0.50]
; GENERIC-NEXT: rcpss %xmm1, %xmm1 # sched: [5:1.00]
@@ -2585,7 +2585,7 @@ define <4 x float> @test_rcpss(float %a0, float *%a1) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_rcpss:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [1:1.00]
; ATOM-NEXT: rcpss %xmm0, %xmm0
; ATOM-NEXT: rcpss %xmm1, %xmm1
@@ -2593,7 +2593,7 @@ define <4 x float> @test_rcpss(float %a0, float *%a1) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_rcpss:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [3:1.00]
; SLM-NEXT: rcpss %xmm0, %xmm0 # sched: [8:1.00]
; SLM-NEXT: rcpss %xmm1, %xmm1 # sched: [8:1.00]
@@ -2601,7 +2601,7 @@ define <4 x float> @test_rcpss(float %a0, float *%a1) {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_rcpss:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vrcpss %xmm0, %xmm0, %xmm0 # sched: [5:1.00]
; SANDY-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [6:0.50]
; SANDY-NEXT: vrcpss %xmm1, %xmm1, %xmm1 # sched: [5:1.00]
@@ -2609,7 +2609,7 @@ define <4 x float> @test_rcpss(float %a0, float *%a1) {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_rcpss:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vrcpss %xmm0, %xmm0, %xmm0 # sched: [5:1.00]
; HASWELL-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [1:0.50]
; HASWELL-NEXT: vrcpss %xmm1, %xmm1, %xmm1 # sched: [5:1.00]
@@ -2617,7 +2617,7 @@ define <4 x float> @test_rcpss(float %a0, float *%a1) {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_rcpss:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vrcpss %xmm0, %xmm0, %xmm0 # sched: [5:1.00]
; BROADWELL-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [5:0.50]
; BROADWELL-NEXT: vrcpss %xmm1, %xmm1, %xmm1 # sched: [5:1.00]
@@ -2625,7 +2625,7 @@ define <4 x float> @test_rcpss(float %a0, float *%a1) {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_rcpss:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vrcpss %xmm0, %xmm0, %xmm0 # sched: [4:1.00]
; SKYLAKE-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [5:0.50]
; SKYLAKE-NEXT: vrcpss %xmm1, %xmm1, %xmm1 # sched: [4:1.00]
@@ -2633,7 +2633,7 @@ define <4 x float> @test_rcpss(float %a0, float *%a1) {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_rcpss:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vrcpss %xmm0, %xmm0, %xmm0 # sched: [4:1.00]
; SKX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [5:0.50]
; SKX-NEXT: vrcpss %xmm1, %xmm1, %xmm1 # sched: [4:1.00]
@@ -2641,7 +2641,7 @@ define <4 x float> @test_rcpss(float %a0, float *%a1) {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_rcpss:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [5:1.00]
; BTVER2-NEXT: vrcpss %xmm0, %xmm0, %xmm0 # sched: [7:1.00]
; BTVER2-NEXT: vrcpss %xmm1, %xmm1, %xmm1 # sched: [7:1.00]
@@ -2649,7 +2649,7 @@ define <4 x float> @test_rcpss(float %a0, float *%a1) {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_rcpss:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [8:0.50]
; ZNVER1-NEXT: vrcpss %xmm0, %xmm0, %xmm0 # sched: [12:0.50]
; ZNVER1-NEXT: vrcpss %xmm1, %xmm1, %xmm1 # sched: [12:0.50]
@@ -2667,14 +2667,14 @@ declare <4 x float> @llvm.x86.sse.rcp.ss(<4 x float>) nounwind readnone
define <4 x float> @test_rsqrtps(<4 x float> %a0, <4 x float> *%a1) {
; GENERIC-LABEL: test_rsqrtps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: rsqrtps %xmm0, %xmm1 # sched: [5:1.00]
; GENERIC-NEXT: rsqrtps (%rdi), %xmm0 # sched: [11:1.00]
; GENERIC-NEXT: addps %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_rsqrtps:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: rsqrtps (%rdi), %xmm1 # sched: [10:5.00]
; ATOM-NEXT: rsqrtps %xmm0, %xmm0 # sched: [9:4.50]
; ATOM-NEXT: addps %xmm0, %xmm1 # sched: [5:5.00]
@@ -2682,7 +2682,7 @@ define <4 x float> @test_rsqrtps(<4 x float> %a0, <4 x float> *%a1) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_rsqrtps:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: rsqrtps (%rdi), %xmm1 # sched: [8:1.00]
; SLM-NEXT: rsqrtps %xmm0, %xmm0 # sched: [5:1.00]
; SLM-NEXT: addps %xmm0, %xmm1 # sched: [3:1.00]
@@ -2690,49 +2690,49 @@ define <4 x float> @test_rsqrtps(<4 x float> %a0, <4 x float> *%a1) {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_rsqrtps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vrsqrtps %xmm0, %xmm0 # sched: [5:1.00]
; SANDY-NEXT: vrsqrtps (%rdi), %xmm1 # sched: [11:1.00]
; SANDY-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_rsqrtps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vrsqrtps %xmm0, %xmm0 # sched: [5:1.00]
; HASWELL-NEXT: vrsqrtps (%rdi), %xmm1 # sched: [5:1.00]
; HASWELL-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_rsqrtps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vrsqrtps %xmm0, %xmm0 # sched: [5:1.00]
; BROADWELL-NEXT: vrsqrtps (%rdi), %xmm1 # sched: [10:1.00]
; BROADWELL-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_rsqrtps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vrsqrtps %xmm0, %xmm0 # sched: [4:1.00]
; SKYLAKE-NEXT: vrsqrtps (%rdi), %xmm1 # sched: [10:1.00]
; SKYLAKE-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_rsqrtps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vrsqrtps %xmm0, %xmm0 # sched: [4:1.00]
; SKX-NEXT: vrsqrtps (%rdi), %xmm1 # sched: [10:1.00]
; SKX-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_rsqrtps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vrsqrtps (%rdi), %xmm1 # sched: [7:1.00]
; BTVER2-NEXT: vrsqrtps %xmm0, %xmm0 # sched: [2:1.00]
; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_rsqrtps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vrsqrtps (%rdi), %xmm1 # sched: [12:0.50]
; ZNVER1-NEXT: vrsqrtps %xmm0, %xmm0 # sched: [5:0.50]
; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
@@ -2749,7 +2749,7 @@ declare <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float>) nounwind readnone
define <4 x float> @test_rsqrtss(float %a0, float *%a1) {
; GENERIC-LABEL: test_rsqrtss:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: rsqrtss %xmm0, %xmm0 # sched: [5:1.00]
; GENERIC-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [6:0.50]
; GENERIC-NEXT: rsqrtss %xmm1, %xmm1 # sched: [5:1.00]
@@ -2757,7 +2757,7 @@ define <4 x float> @test_rsqrtss(float %a0, float *%a1) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_rsqrtss:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [1:1.00]
; ATOM-NEXT: rsqrtss %xmm0, %xmm0
; ATOM-NEXT: rsqrtss %xmm1, %xmm1
@@ -2765,7 +2765,7 @@ define <4 x float> @test_rsqrtss(float %a0, float *%a1) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_rsqrtss:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [3:1.00]
; SLM-NEXT: rsqrtss %xmm0, %xmm0 # sched: [8:1.00]
; SLM-NEXT: rsqrtss %xmm1, %xmm1 # sched: [8:1.00]
@@ -2773,7 +2773,7 @@ define <4 x float> @test_rsqrtss(float %a0, float *%a1) {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_rsqrtss:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vrsqrtss %xmm0, %xmm0, %xmm0 # sched: [5:1.00]
; SANDY-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [6:0.50]
; SANDY-NEXT: vrsqrtss %xmm1, %xmm1, %xmm1 # sched: [5:1.00]
@@ -2781,7 +2781,7 @@ define <4 x float> @test_rsqrtss(float %a0, float *%a1) {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_rsqrtss:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vrsqrtss %xmm0, %xmm0, %xmm0 # sched: [5:1.00]
; HASWELL-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [1:0.50]
; HASWELL-NEXT: vrsqrtss %xmm1, %xmm1, %xmm1 # sched: [5:1.00]
@@ -2789,7 +2789,7 @@ define <4 x float> @test_rsqrtss(float %a0, float *%a1) {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_rsqrtss:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vrsqrtss %xmm0, %xmm0, %xmm0 # sched: [5:1.00]
; BROADWELL-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [5:0.50]
; BROADWELL-NEXT: vrsqrtss %xmm1, %xmm1, %xmm1 # sched: [5:1.00]
@@ -2797,7 +2797,7 @@ define <4 x float> @test_rsqrtss(float %a0, float *%a1) {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_rsqrtss:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vrsqrtss %xmm0, %xmm0, %xmm0 # sched: [4:1.00]
; SKYLAKE-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [5:0.50]
; SKYLAKE-NEXT: vrsqrtss %xmm1, %xmm1, %xmm1 # sched: [4:1.00]
@@ -2805,7 +2805,7 @@ define <4 x float> @test_rsqrtss(float %a0, float *%a1) {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_rsqrtss:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vrsqrtss %xmm0, %xmm0, %xmm0 # sched: [4:1.00]
; SKX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [5:0.50]
; SKX-NEXT: vrsqrtss %xmm1, %xmm1, %xmm1 # sched: [4:1.00]
@@ -2813,7 +2813,7 @@ define <4 x float> @test_rsqrtss(float %a0, float *%a1) {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_rsqrtss:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [5:1.00]
; BTVER2-NEXT: vrsqrtss %xmm0, %xmm0, %xmm0 # sched: [7:1.00]
; BTVER2-NEXT: vrsqrtss %xmm1, %xmm1, %xmm1 # sched: [7:1.00]
@@ -2821,7 +2821,7 @@ define <4 x float> @test_rsqrtss(float %a0, float *%a1) {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_rsqrtss:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [8:0.50]
; ZNVER1-NEXT: vrsqrtss %xmm0, %xmm0, %xmm0 # sched: [5:0.50]
; ZNVER1-NEXT: vrsqrtss %xmm1, %xmm1, %xmm1 # sched: [5:0.50]
@@ -2839,12 +2839,12 @@ declare <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float>) nounwind readnone
define void @test_sfence() {
; GENERIC-LABEL: test_sfence:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: sfence # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_sfence:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: sfence # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -2855,42 +2855,42 @@ define void @test_sfence() {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_sfence:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: sfence # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_sfence:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: sfence # sched: [1:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_sfence:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: sfence # sched: [1:0.33]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_sfence:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: sfence # sched: [2:0.33]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_sfence:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: sfence # sched: [2:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_sfence:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: sfence # sched: [2:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_sfence:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: sfence # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_sfence:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: sfence # sched: [1:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
call void @llvm.x86.sse.sfence()
@@ -2900,13 +2900,13 @@ declare void @llvm.x86.sse.sfence() nounwind readnone
define <4 x float> @test_shufps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) nounwind {
; GENERIC-LABEL: test_shufps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0] sched: [1:1.00]
; GENERIC-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],mem[0,0] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_shufps:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0] sched: [1:1.00]
; ATOM-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],mem[0,0] sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -2916,49 +2916,49 @@ define <4 x float> @test_shufps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_shufps:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0] sched: [1:1.00]
; SLM-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],mem[0,0] sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_shufps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0] sched: [1:1.00]
; SANDY-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3],mem[0,0] sched: [7:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_shufps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0] sched: [1:1.00]
; HASWELL-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3],mem[0,0] sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_shufps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0] sched: [1:1.00]
; BROADWELL-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3],mem[0,0] sched: [6:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_shufps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0] sched: [1:1.00]
; SKYLAKE-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3],mem[0,0] sched: [7:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_shufps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0] sched: [1:1.00]
; SKX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3],mem[0,0] sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_shufps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0] sched: [1:0.50]
; BTVER2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3],mem[0,0] sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_shufps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0] sched: [1:0.50]
; ZNVER1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3],mem[0,0] sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -2970,21 +2970,21 @@ define <4 x float> @test_shufps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%
define <4 x float> @test_sqrtps(<4 x float> %a0, <4 x float> *%a1) {
; GENERIC-LABEL: test_sqrtps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: sqrtps %xmm0, %xmm1 # sched: [14:1.00]
; GENERIC-NEXT: sqrtps (%rdi), %xmm0 # sched: [20:1.00]
; GENERIC-NEXT: addps %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_sqrtps:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: sqrtps %xmm0, %xmm1 # sched: [70:35.00]
; ATOM-NEXT: sqrtps (%rdi), %xmm0 # sched: [70:35.00]
; ATOM-NEXT: addps %xmm1, %xmm0 # sched: [5:5.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_sqrtps:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: sqrtps (%rdi), %xmm1 # sched: [18:1.00]
; SLM-NEXT: sqrtps %xmm0, %xmm0 # sched: [15:1.00]
; SLM-NEXT: addps %xmm0, %xmm1 # sched: [3:1.00]
@@ -2992,49 +2992,49 @@ define <4 x float> @test_sqrtps(<4 x float> %a0, <4 x float> *%a1) {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_sqrtps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vsqrtps %xmm0, %xmm0 # sched: [14:1.00]
; SANDY-NEXT: vsqrtps (%rdi), %xmm1 # sched: [20:1.00]
; SANDY-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_sqrtps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vsqrtps %xmm0, %xmm0 # sched: [14:1.00]
; HASWELL-NEXT: vsqrtps (%rdi), %xmm1 # sched: [14:1.00]
; HASWELL-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_sqrtps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vsqrtps %xmm0, %xmm0 # sched: [14:1.00]
; BROADWELL-NEXT: vsqrtps (%rdi), %xmm1 # sched: [19:1.00]
; BROADWELL-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_sqrtps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vsqrtps %xmm0, %xmm0 # sched: [12:1.00]
; SKYLAKE-NEXT: vsqrtps (%rdi), %xmm1 # sched: [18:1.00]
; SKYLAKE-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_sqrtps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vsqrtps %xmm0, %xmm0 # sched: [12:1.00]
; SKX-NEXT: vsqrtps (%rdi), %xmm1 # sched: [18:1.00]
; SKX-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_sqrtps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vsqrtps (%rdi), %xmm1 # sched: [26:21.00]
; BTVER2-NEXT: vsqrtps %xmm0, %xmm0 # sched: [21:21.00]
; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_sqrtps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vsqrtps (%rdi), %xmm1 # sched: [27:1.00]
; ZNVER1-NEXT: vsqrtps %xmm0, %xmm0 # sched: [20:1.00]
; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
@@ -3051,7 +3051,7 @@ declare <4 x float> @llvm.x86.sse.sqrt.ps(<4 x float>) nounwind readnone
define <4 x float> @test_sqrtss(<4 x float> %a0, <4 x float> *%a1) {
; GENERIC-LABEL: test_sqrtss:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: sqrtss %xmm0, %xmm0 # sched: [14:1.00]
; GENERIC-NEXT: movaps (%rdi), %xmm1 # sched: [6:0.50]
; GENERIC-NEXT: sqrtss %xmm1, %xmm1 # sched: [14:1.00]
@@ -3059,7 +3059,7 @@ define <4 x float> @test_sqrtss(<4 x float> %a0, <4 x float> *%a1) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_sqrtss:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movaps (%rdi), %xmm1 # sched: [1:1.00]
; ATOM-NEXT: sqrtss %xmm0, %xmm0
; ATOM-NEXT: sqrtss %xmm1, %xmm1
@@ -3067,7 +3067,7 @@ define <4 x float> @test_sqrtss(<4 x float> %a0, <4 x float> *%a1) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_sqrtss:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movaps (%rdi), %xmm1 # sched: [3:1.00]
; SLM-NEXT: sqrtss %xmm0, %xmm0 # sched: [18:1.00]
; SLM-NEXT: sqrtss %xmm1, %xmm1 # sched: [18:1.00]
@@ -3075,7 +3075,7 @@ define <4 x float> @test_sqrtss(<4 x float> %a0, <4 x float> *%a1) {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_sqrtss:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vsqrtss %xmm0, %xmm0, %xmm0 # sched: [114:1.00]
; SANDY-NEXT: vmovaps (%rdi), %xmm1 # sched: [6:0.50]
; SANDY-NEXT: vsqrtss %xmm1, %xmm1, %xmm1 # sched: [114:1.00]
@@ -3083,7 +3083,7 @@ define <4 x float> @test_sqrtss(<4 x float> %a0, <4 x float> *%a1) {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_sqrtss:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vsqrtss %xmm0, %xmm0, %xmm0 # sched: [14:1.00]
; HASWELL-NEXT: vmovaps (%rdi), %xmm1 # sched: [1:0.50]
; HASWELL-NEXT: vsqrtss %xmm1, %xmm1, %xmm1 # sched: [14:1.00]
@@ -3091,7 +3091,7 @@ define <4 x float> @test_sqrtss(<4 x float> %a0, <4 x float> *%a1) {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_sqrtss:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vsqrtss %xmm0, %xmm0, %xmm0 # sched: [14:1.00]
; BROADWELL-NEXT: vmovaps (%rdi), %xmm1 # sched: [5:0.50]
; BROADWELL-NEXT: vsqrtss %xmm1, %xmm1, %xmm1 # sched: [14:1.00]
@@ -3099,7 +3099,7 @@ define <4 x float> @test_sqrtss(<4 x float> %a0, <4 x float> *%a1) {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_sqrtss:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vsqrtss %xmm0, %xmm0, %xmm0 # sched: [12:1.00]
; SKYLAKE-NEXT: vmovaps (%rdi), %xmm1 # sched: [6:0.50]
; SKYLAKE-NEXT: vsqrtss %xmm1, %xmm1, %xmm1 # sched: [12:1.00]
@@ -3107,7 +3107,7 @@ define <4 x float> @test_sqrtss(<4 x float> %a0, <4 x float> *%a1) {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_sqrtss:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vsqrtss %xmm0, %xmm0, %xmm0 # sched: [12:1.00]
; SKX-NEXT: vmovaps (%rdi), %xmm1 # sched: [6:0.50]
; SKX-NEXT: vsqrtss %xmm1, %xmm1, %xmm1 # sched: [12:1.00]
@@ -3115,7 +3115,7 @@ define <4 x float> @test_sqrtss(<4 x float> %a0, <4 x float> *%a1) {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_sqrtss:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovaps (%rdi), %xmm1 # sched: [5:1.00]
; BTVER2-NEXT: vsqrtss %xmm0, %xmm0, %xmm0 # sched: [26:21.00]
; BTVER2-NEXT: vsqrtss %xmm1, %xmm1, %xmm1 # sched: [26:21.00]
@@ -3123,7 +3123,7 @@ define <4 x float> @test_sqrtss(<4 x float> %a0, <4 x float> *%a1) {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_sqrtss:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmovaps (%rdi), %xmm1 # sched: [8:0.50]
; ZNVER1-NEXT: vsqrtss %xmm0, %xmm0, %xmm0 # sched: [27:1.00]
; ZNVER1-NEXT: vsqrtss %xmm1, %xmm1, %xmm1 # sched: [27:1.00]
@@ -3139,61 +3139,61 @@ declare <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float>) nounwind readnone
define i32 @test_stmxcsr() {
; GENERIC-LABEL: test_stmxcsr:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: stmxcsr -{{[0-9]+}}(%rsp) # sched: [5:1.00]
; GENERIC-NEXT: movl -{{[0-9]+}}(%rsp), %eax # sched: [5:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_stmxcsr:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: stmxcsr -{{[0-9]+}}(%rsp) # sched: [15:7.50]
; ATOM-NEXT: movl -{{[0-9]+}}(%rsp), %eax # sched: [1:1.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_stmxcsr:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: stmxcsr -{{[0-9]+}}(%rsp) # sched: [1:1.00]
; SLM-NEXT: movl -{{[0-9]+}}(%rsp), %eax # sched: [3:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_stmxcsr:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vstmxcsr -{{[0-9]+}}(%rsp) # sched: [5:1.00]
; SANDY-NEXT: movl -{{[0-9]+}}(%rsp), %eax # sched: [5:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_stmxcsr:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vstmxcsr -{{[0-9]+}}(%rsp) # sched: [1:1.00]
; HASWELL-NEXT: movl -{{[0-9]+}}(%rsp), %eax # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_stmxcsr:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vstmxcsr -{{[0-9]+}}(%rsp) # sched: [2:1.00]
; BROADWELL-NEXT: movl -{{[0-9]+}}(%rsp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_stmxcsr:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vstmxcsr -{{[0-9]+}}(%rsp) # sched: [2:1.00]
; SKYLAKE-NEXT: movl -{{[0-9]+}}(%rsp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_stmxcsr:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vstmxcsr -{{[0-9]+}}(%rsp) # sched: [2:1.00]
; SKX-NEXT: movl -{{[0-9]+}}(%rsp), %eax # sched: [5:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_stmxcsr:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vstmxcsr -{{[0-9]+}}(%rsp) # sched: [1:1.00]
; BTVER2-NEXT: movl -{{[0-9]+}}(%rsp), %eax # sched: [5:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_stmxcsr:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vstmxcsr -{{[0-9]+}}(%rsp) # sched: [100:?]
; ZNVER1-NEXT: movl -{{[0-9]+}}(%rsp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3207,61 +3207,61 @@ declare void @llvm.x86.sse.stmxcsr(i8*) nounwind readnone
define <4 x float> @test_subps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; GENERIC-LABEL: test_subps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: subps %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: subps (%rdi), %xmm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_subps:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: subps %xmm1, %xmm0 # sched: [5:5.00]
; ATOM-NEXT: subps (%rdi), %xmm0 # sched: [5:5.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_subps:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: subps %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: subps (%rdi), %xmm0 # sched: [6:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_subps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vsubps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vsubps (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_subps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vsubps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: vsubps (%rdi), %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_subps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vsubps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: vsubps (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_subps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vsubps %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vsubps (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_subps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vsubps %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vsubps (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_subps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vsubps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vsubps (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_subps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vsubps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vsubps (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3273,61 +3273,61 @@ define <4 x float> @test_subps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a
define float @test_subss(float %a0, float %a1, float *%a2) {
; GENERIC-LABEL: test_subss:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: subss %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: subss (%rdi), %xmm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_subss:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: subss %xmm1, %xmm0 # sched: [5:5.00]
; ATOM-NEXT: subss (%rdi), %xmm0 # sched: [5:5.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_subss:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: subss %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: subss (%rdi), %xmm0 # sched: [6:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_subss:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vsubss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vsubss (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_subss:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vsubss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: vsubss (%rdi), %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_subss:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vsubss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: vsubss (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_subss:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vsubss %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vsubss (%rdi), %xmm0, %xmm0 # sched: [9:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_subss:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vsubss %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vsubss (%rdi), %xmm0, %xmm0 # sched: [9:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_subss:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vsubss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vsubss (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_subss:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vsubss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vsubss (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3339,7 +3339,7 @@ define float @test_subss(float %a0, float %a1, float *%a2) {
define i32 @test_ucomiss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; GENERIC-LABEL: test_ucomiss:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: ucomiss %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: setnp %al # sched: [1:0.50]
; GENERIC-NEXT: sete %cl # sched: [1:0.50]
@@ -3353,7 +3353,7 @@ define i32 @test_ucomiss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_ucomiss:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: ucomiss %xmm1, %xmm0 # sched: [9:4.50]
; ATOM-NEXT: setnp %al # sched: [1:0.50]
; ATOM-NEXT: sete %cl # sched: [1:0.50]
@@ -3367,7 +3367,7 @@ define i32 @test_ucomiss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_ucomiss:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: ucomiss %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: setnp %al # sched: [1:0.50]
; SLM-NEXT: sete %cl # sched: [1:0.50]
@@ -3381,7 +3381,7 @@ define i32 @test_ucomiss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_ucomiss:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vucomiss %xmm1, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: setnp %al # sched: [1:0.50]
; SANDY-NEXT: sete %cl # sched: [1:0.50]
@@ -3395,7 +3395,7 @@ define i32 @test_ucomiss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_ucomiss:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vucomiss %xmm1, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: setnp %al # sched: [1:0.50]
; HASWELL-NEXT: sete %cl # sched: [1:0.50]
@@ -3409,7 +3409,7 @@ define i32 @test_ucomiss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_ucomiss:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vucomiss %xmm1, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: setnp %al # sched: [1:0.50]
; BROADWELL-NEXT: sete %cl # sched: [1:0.50]
@@ -3423,7 +3423,7 @@ define i32 @test_ucomiss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_ucomiss:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vucomiss %xmm1, %xmm0 # sched: [3:1.00]
; SKYLAKE-NEXT: setnp %al # sched: [1:0.50]
; SKYLAKE-NEXT: sete %cl # sched: [1:0.50]
@@ -3437,7 +3437,7 @@ define i32 @test_ucomiss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_ucomiss:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vucomiss %xmm1, %xmm0 # sched: [3:1.00]
; SKX-NEXT: setnp %al # sched: [1:0.50]
; SKX-NEXT: sete %cl # sched: [1:0.50]
@@ -3451,7 +3451,7 @@ define i32 @test_ucomiss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_ucomiss:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vucomiss %xmm1, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: setnp %al # sched: [1:0.50]
; BTVER2-NEXT: sete %cl # sched: [1:0.50]
@@ -3465,7 +3465,7 @@ define i32 @test_ucomiss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_ucomiss:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vucomiss %xmm1, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: setnp %al # sched: [1:0.25]
; ZNVER1-NEXT: sete %cl # sched: [1:0.25]
@@ -3487,13 +3487,13 @@ declare i32 @llvm.x86.sse.ucomieq.ss(<4 x float>, <4 x float>) nounwind readnone
define <4 x float> @test_unpckhps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; GENERIC-LABEL: test_unpckhps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00]
; GENERIC-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_unpckhps:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00]
; ATOM-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -3503,49 +3503,49 @@ define <4 x float> @test_unpckhps(<4 x float> %a0, <4 x float> %a1, <4 x float>
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_unpckhps:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00]
; SLM-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_unpckhps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00]
; SANDY-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] sched: [7:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_unpckhps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00]
; HASWELL-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_unpckhps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00]
; BROADWELL-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] sched: [6:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_unpckhps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00]
; SKYLAKE-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] sched: [7:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_unpckhps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00]
; SKX-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_unpckhps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:0.50]
; BTVER2-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_unpckhps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:0.50]
; ZNVER1-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3557,13 +3557,13 @@ define <4 x float> @test_unpckhps(<4 x float> %a0, <4 x float> %a1, <4 x float>
define <4 x float> @test_unpcklps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; GENERIC-LABEL: test_unpcklps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:1.00]
; GENERIC-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_unpcklps:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:1.00]
; ATOM-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -3573,49 +3573,49 @@ define <4 x float> @test_unpcklps(<4 x float> %a0, <4 x float> %a1, <4 x float>
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_unpcklps:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:1.00]
; SLM-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_unpcklps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:1.00]
; SANDY-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] sched: [7:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_unpcklps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:1.00]
; HASWELL-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_unpcklps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:1.00]
; BROADWELL-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] sched: [6:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_unpcklps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:1.00]
; SKYLAKE-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] sched: [7:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_unpcklps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:1.00]
; SKX-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_unpcklps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:0.50]
; BTVER2-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_unpcklps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:0.50]
; ZNVER1-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3627,13 +3627,13 @@ define <4 x float> @test_unpcklps(<4 x float> %a0, <4 x float> %a1, <4 x float>
define <4 x float> @test_xorps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; GENERIC-LABEL: test_xorps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: xorps %xmm1, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: xorps (%rdi), %xmm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_xorps:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: xorps %xmm1, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: xorps (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -3643,49 +3643,49 @@ define <4 x float> @test_xorps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_xorps:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: xorps %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: xorps (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_xorps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vxorps %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; SANDY-NEXT: vxorps (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_xorps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vxorps %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: vxorps (%rdi), %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_xorps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vxorps %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: vxorps (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_xorps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vxorps %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vxorps (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_xorps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vxorps %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: vxorps (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_xorps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vxorps %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vxorps (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_xorps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vxorps %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vxorps (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
diff --git a/test/CodeGen/X86/sse1.ll b/test/CodeGen/X86/sse1.ll
index b29fc55e0b2..7222a27c826 100644
--- a/test/CodeGen/X86/sse1.ll
+++ b/test/CodeGen/X86/sse1.ll
@@ -14,7 +14,7 @@
; rdar://8368414
define <2 x float> @test4(<2 x float> %A, <2 x float> %B) nounwind {
; X32-LABEL: test4:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movaps %xmm0, %xmm2
; X32-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,2,3]
; X32-NEXT: addss %xmm1, %xmm0
@@ -24,7 +24,7 @@ define <2 x float> @test4(<2 x float> %A, <2 x float> %B) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test4:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movaps %xmm0, %xmm2
; X64-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,2,3]
; X64-NEXT: addss %xmm1, %xmm0
@@ -52,11 +52,11 @@ entry:
define <4 x float> @vselect(<4 x float>*%p, <4 x i32> %q) {
; X32-LABEL: vselect:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: cmpl $0, {{[0-9]+}}(%esp)
; X32-NEXT: xorps %xmm0, %xmm0
; X32-NEXT: je .LBB1_1
-; X32-NEXT: # BB#2: # %entry
+; X32-NEXT: # %bb.2: # %entry
; X32-NEXT: xorps %xmm1, %xmm1
; X32-NEXT: cmpl $0, {{[0-9]+}}(%esp)
; X32-NEXT: jne .LBB1_5
@@ -91,11 +91,11 @@ define <4 x float> @vselect(<4 x float>*%p, <4 x i32> %q) {
; X32-NEXT: retl
;
; X64-LABEL: vselect:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: testl %edx, %edx
; X64-NEXT: xorps %xmm0, %xmm0
; X64-NEXT: je .LBB1_1
-; X64-NEXT: # BB#2: # %entry
+; X64-NEXT: # %bb.2: # %entry
; X64-NEXT: xorps %xmm1, %xmm1
; X64-NEXT: testl %ecx, %ecx
; X64-NEXT: jne .LBB1_5
@@ -138,12 +138,12 @@ entry:
define <4 x float> @PR28044(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: PR28044:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpeqps %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: PR28044:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpeqps %xmm1, %xmm0
; X64-NEXT: retq
%cmp = fcmp oeq <4 x float> %a0, %a1
@@ -157,7 +157,7 @@ define <4 x float> @PR28044(<4 x float> %a0, <4 x float> %a1) nounwind {
define <4 x i32> @PR30512(<4 x i32> %x, <4 x i32> %y) nounwind {
; X32-LABEL: PR30512:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebx
; X32-NEXT: pushl %edi
; X32-NEXT: pushl %esi
@@ -203,7 +203,7 @@ define <4 x i32> @PR30512(<4 x i32> %x, <4 x i32> %y) nounwind {
; X32-NEXT: retl $4
;
; X64-LABEL: PR30512:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: cmpl {{[0-9]+}}(%rsp), %r8d
; X64-NEXT: sete %al
@@ -251,12 +251,12 @@ define <4 x i32> @PR30512(<4 x i32> %x, <4 x i32> %y) nounwind {
define <2 x float> @PR31672() #0 {
; X32-LABEL: PR31672:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: sqrtps {{\.LCPI.*}}, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: PR31672:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: sqrtps {{.*}}(%rip), %xmm0
; X64-NEXT: retq
%t0 = call fast <2 x float> @llvm.sqrt.v2f32(<2 x float> <float 42.0, float 3.0>)
diff --git a/test/CodeGen/X86/sse2-intrinsics-fast-isel-x86_64.ll b/test/CodeGen/X86/sse2-intrinsics-fast-isel-x86_64.ll
index 54de15c292f..bfbcf250c7b 100644
--- a/test/CodeGen/X86/sse2-intrinsics-fast-isel-x86_64.ll
+++ b/test/CodeGen/X86/sse2-intrinsics-fast-isel-x86_64.ll
@@ -5,7 +5,7 @@
define i64 @test_mm_cvtsd_si64(<2 x double> %a0) nounwind {
; X64-LABEL: test_mm_cvtsd_si64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cvtsd2si %xmm0, %rax
; X64-NEXT: retq
%res = call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> %a0)
@@ -15,7 +15,7 @@ declare i64 @llvm.x86.sse2.cvtsd2si64(<2 x double>) nounwind readnone
define i64 @test_mm_cvtsi128_si64(<2 x i64> %a0) nounwind {
; X64-LABEL: test_mm_cvtsi128_si64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq %xmm0, %rax
; X64-NEXT: retq
%res = extractelement <2 x i64> %a0, i32 0
@@ -24,7 +24,7 @@ define i64 @test_mm_cvtsi128_si64(<2 x i64> %a0) nounwind {
define <2 x double> @test_mm_cvtsi64_sd(<2 x double> %a0, i64 %a1) nounwind {
; X64-LABEL: test_mm_cvtsi64_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cvtsi2sdq %rdi, %xmm0
; X64-NEXT: retq
%res = call <2 x double> @llvm.x86.sse2.cvtsi642sd(<2 x double> %a0, i64 %a1)
@@ -34,7 +34,7 @@ declare <2 x double> @llvm.x86.sse2.cvtsi642sd(<2 x double>, i64) nounwind readn
define <2 x i64> @test_mm_cvtsi64_si128(i64 %a0) nounwind {
; X64-LABEL: test_mm_cvtsi64_si128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq %rdi, %xmm0
; X64-NEXT: retq
%res0 = insertelement <2 x i64> undef, i64 %a0, i32 0
@@ -44,7 +44,7 @@ define <2 x i64> @test_mm_cvtsi64_si128(i64 %a0) nounwind {
define i64 @test_mm_cvttsd_si64(<2 x double> %a0) nounwind {
; X64-LABEL: test_mm_cvttsd_si64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cvttsd2si %xmm0, %rax
; X64-NEXT: retq
%res = call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> %a0)
@@ -54,7 +54,7 @@ declare i64 @llvm.x86.sse2.cvttsd2si64(<2 x double>) nounwind readnone
define <2 x i64> @test_mm_loadu_si64(i64* %a0) nounwind {
; X64-LABEL: test_mm_loadu_si64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X64-NEXT: retq
%ld = load i64, i64* %a0, align 1
@@ -65,7 +65,7 @@ define <2 x i64> @test_mm_loadu_si64(i64* %a0) nounwind {
define void @test_mm_stream_si64(i64 *%a0, i64 %a1) {
; X64-LABEL: test_mm_stream_si64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movntiq %rsi, (%rdi)
; X64-NEXT: retq
store i64 %a1, i64* %a0, align 1, !nontemporal !0
diff --git a/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll b/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll
index d355925ca73..a75a0597325 100644
--- a/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll
@@ -6,12 +6,12 @@
define <2 x i64> @test_mm_add_epi8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm_add_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: paddb %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_add_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: paddb %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -23,12 +23,12 @@ define <2 x i64> @test_mm_add_epi8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
define <2 x i64> @test_mm_add_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm_add_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: paddw %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_add_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: paddw %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -40,12 +40,12 @@ define <2 x i64> @test_mm_add_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
define <2 x i64> @test_mm_add_epi32(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm_add_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: paddd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_add_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: paddd %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -57,12 +57,12 @@ define <2 x i64> @test_mm_add_epi32(<2 x i64> %a0, <2 x i64> %a1) nounwind {
define <2 x i64> @test_mm_add_epi64(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm_add_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: paddq %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_add_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: paddq %xmm1, %xmm0
; X64-NEXT: retq
%res = add <2 x i64> %a0, %a1
@@ -71,12 +71,12 @@ define <2 x i64> @test_mm_add_epi64(<2 x i64> %a0, <2 x i64> %a1) nounwind {
define <2 x double> @test_mm_add_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_add_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: addpd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_add_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: addpd %xmm1, %xmm0
; X64-NEXT: retq
%res = fadd <2 x double> %a0, %a1
@@ -85,12 +85,12 @@ define <2 x double> @test_mm_add_pd(<2 x double> %a0, <2 x double> %a1) nounwind
define <2 x double> @test_mm_add_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_add_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: addsd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_add_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: addsd %xmm1, %xmm0
; X64-NEXT: retq
%ext0 = extractelement <2 x double> %a0, i32 0
@@ -102,12 +102,12 @@ define <2 x double> @test_mm_add_sd(<2 x double> %a0, <2 x double> %a1) nounwind
define <2 x i64> @test_mm_adds_epi8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm_adds_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: paddsb %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_adds_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: paddsb %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -120,12 +120,12 @@ declare <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8>, <16 x i8>) nounwind readnone
define <2 x i64> @test_mm_adds_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm_adds_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: paddsw %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_adds_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: paddsw %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -138,12 +138,12 @@ declare <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16>, <8 x i16>) nounwind readnone
define <2 x i64> @test_mm_adds_epu8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm_adds_epu8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: paddusb %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_adds_epu8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: paddusb %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -156,12 +156,12 @@ declare <16 x i8> @llvm.x86.sse2.paddus.b(<16 x i8>, <16 x i8>) nounwind readnon
define <2 x i64> @test_mm_adds_epu16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm_adds_epu16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: paddusw %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_adds_epu16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: paddusw %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -174,12 +174,12 @@ declare <8 x i16> @llvm.x86.sse2.paddus.w(<8 x i16>, <8 x i16>) nounwind readnon
define <2 x double> @test_mm_and_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_and_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: andps %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_and_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andps %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x double> %a0 to <4 x i32>
@@ -191,12 +191,12 @@ define <2 x double> @test_mm_and_pd(<2 x double> %a0, <2 x double> %a1) nounwind
define <2 x i64> @test_mm_and_si128(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm_and_si128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: andps %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_and_si128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andps %xmm1, %xmm0
; X64-NEXT: retq
%res = and <2 x i64> %a0, %a1
@@ -205,12 +205,12 @@ define <2 x i64> @test_mm_and_si128(<2 x i64> %a0, <2 x i64> %a1) nounwind {
define <2 x double> @test_mm_andnot_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_andnot_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: andnps %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_andnot_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andnps %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x double> %a0 to <4 x i32>
@@ -223,14 +223,14 @@ define <2 x double> @test_mm_andnot_pd(<2 x double> %a0, <2 x double> %a1) nounw
define <2 x i64> @test_mm_andnot_si128(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm_andnot_si128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pcmpeqd %xmm2, %xmm2
; X32-NEXT: pxor %xmm2, %xmm0
; X32-NEXT: pand %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_andnot_si128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pcmpeqd %xmm2, %xmm2
; X64-NEXT: pxor %xmm2, %xmm0
; X64-NEXT: pand %xmm1, %xmm0
@@ -242,12 +242,12 @@ define <2 x i64> @test_mm_andnot_si128(<2 x i64> %a0, <2 x i64> %a1) nounwind {
define <2 x i64> @test_mm_avg_epu8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm_avg_epu8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pavgb %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_avg_epu8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pavgb %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -264,12 +264,12 @@ define <2 x i64> @test_mm_avg_epu8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
define <2 x i64> @test_mm_avg_epu16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm_avg_epu16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pavgw %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_avg_epu16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pavgw %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -286,12 +286,12 @@ define <2 x i64> @test_mm_avg_epu16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
define <2 x i64> @test_mm_bslli_si128(<2 x i64> %a0) nounwind {
; X32-LABEL: test_mm_bslli_si128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_bslli_si128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10]
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -302,12 +302,12 @@ define <2 x i64> @test_mm_bslli_si128(<2 x i64> %a0) nounwind {
define <2 x i64> @test_mm_bsrli_si128(<2 x i64> %a0) nounwind {
; X32-LABEL: test_mm_bsrli_si128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: psrldq {{.*#+}} xmm0 = xmm0[5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero
; X32-NEXT: retl
;
; X64-LABEL: test_mm_bsrli_si128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psrldq {{.*#+}} xmm0 = xmm0[5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -318,11 +318,11 @@ define <2 x i64> @test_mm_bsrli_si128(<2 x i64> %a0) nounwind {
define <4 x float> @test_mm_castpd_ps(<2 x double> %a0) nounwind {
; X32-LABEL: test_mm_castpd_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: test_mm_castpd_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
%res = bitcast <2 x double> %a0 to <4 x float>
ret <4 x float> %res
@@ -330,11 +330,11 @@ define <4 x float> @test_mm_castpd_ps(<2 x double> %a0) nounwind {
define <2 x i64> @test_mm_castpd_si128(<2 x double> %a0) nounwind {
; X32-LABEL: test_mm_castpd_si128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: test_mm_castpd_si128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
%res = bitcast <2 x double> %a0 to <2 x i64>
ret <2 x i64> %res
@@ -342,11 +342,11 @@ define <2 x i64> @test_mm_castpd_si128(<2 x double> %a0) nounwind {
define <2 x double> @test_mm_castps_pd(<4 x float> %a0) nounwind {
; X32-LABEL: test_mm_castps_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: test_mm_castps_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
%res = bitcast <4 x float> %a0 to <2 x double>
ret <2 x double> %res
@@ -354,11 +354,11 @@ define <2 x double> @test_mm_castps_pd(<4 x float> %a0) nounwind {
define <2 x i64> @test_mm_castps_si128(<4 x float> %a0) nounwind {
; X32-LABEL: test_mm_castps_si128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: test_mm_castps_si128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
%res = bitcast <4 x float> %a0 to <2 x i64>
ret <2 x i64> %res
@@ -366,11 +366,11 @@ define <2 x i64> @test_mm_castps_si128(<4 x float> %a0) nounwind {
define <2 x double> @test_mm_castsi128_pd(<2 x i64> %a0) nounwind {
; X32-LABEL: test_mm_castsi128_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: test_mm_castsi128_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
%res = bitcast <2 x i64> %a0 to <2 x double>
ret <2 x double> %res
@@ -378,11 +378,11 @@ define <2 x double> @test_mm_castsi128_pd(<2 x i64> %a0) nounwind {
define <4 x float> @test_mm_castsi128_ps(<2 x i64> %a0) nounwind {
; X32-LABEL: test_mm_castsi128_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: test_mm_castsi128_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
%res = bitcast <2 x i64> %a0 to <4 x float>
ret <4 x float> %res
@@ -390,13 +390,13 @@ define <4 x float> @test_mm_castsi128_ps(<2 x i64> %a0) nounwind {
define void @test_mm_clflush(i8* %a0) nounwind {
; X32-LABEL: test_mm_clflush:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: clflush (%eax)
; X32-NEXT: retl
;
; X64-LABEL: test_mm_clflush:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: clflush (%rdi)
; X64-NEXT: retq
call void @llvm.x86.sse2.clflush(i8* %a0)
@@ -406,12 +406,12 @@ declare void @llvm.x86.sse2.clflush(i8*) nounwind readnone
define <2 x i64> @test_mm_cmpeq_epi8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm_cmpeq_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pcmpeqb %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpeq_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pcmpeqb %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -424,12 +424,12 @@ define <2 x i64> @test_mm_cmpeq_epi8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
define <2 x i64> @test_mm_cmpeq_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm_cmpeq_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pcmpeqw %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpeq_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pcmpeqw %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -442,12 +442,12 @@ define <2 x i64> @test_mm_cmpeq_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
define <2 x i64> @test_mm_cmpeq_epi32(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm_cmpeq_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pcmpeqd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpeq_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pcmpeqd %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -460,12 +460,12 @@ define <2 x i64> @test_mm_cmpeq_epi32(<2 x i64> %a0, <2 x i64> %a1) nounwind {
define <2 x double> @test_mm_cmpeq_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_cmpeq_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpeqpd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpeq_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpeqpd %xmm1, %xmm0
; X64-NEXT: retq
%fcmp = fcmp oeq <2 x double> %a0, %a1
@@ -476,12 +476,12 @@ define <2 x double> @test_mm_cmpeq_pd(<2 x double> %a0, <2 x double> %a1) nounwi
define <2 x double> @test_mm_cmpeq_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_cmpeq_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpeqsd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpeq_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpeqsd %xmm1, %xmm0
; X64-NEXT: retq
%res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 0)
@@ -491,13 +491,13 @@ declare <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double>, <2 x double>, i8) nounw
define <2 x double> @test_mm_cmpge_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_cmpge_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmplepd %xmm0, %xmm1
; X32-NEXT: movapd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpge_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmplepd %xmm0, %xmm1
; X64-NEXT: movapd %xmm1, %xmm0
; X64-NEXT: retq
@@ -509,13 +509,13 @@ define <2 x double> @test_mm_cmpge_pd(<2 x double> %a0, <2 x double> %a1) nounwi
define <2 x double> @test_mm_cmpge_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_cmpge_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmplesd %xmm0, %xmm1
; X32-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpge_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmplesd %xmm0, %xmm1
; X64-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; X64-NEXT: retq
@@ -529,12 +529,12 @@ define <2 x double> @test_mm_cmpge_sd(<2 x double> %a0, <2 x double> %a1) nounwi
define <2 x i64> @test_mm_cmpgt_epi8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm_cmpgt_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pcmpgtb %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpgt_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pcmpgtb %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -547,12 +547,12 @@ define <2 x i64> @test_mm_cmpgt_epi8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
define <2 x i64> @test_mm_cmpgt_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm_cmpgt_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pcmpgtw %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpgt_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pcmpgtw %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -565,12 +565,12 @@ define <2 x i64> @test_mm_cmpgt_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
define <2 x i64> @test_mm_cmpgt_epi32(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm_cmpgt_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pcmpgtd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpgt_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pcmpgtd %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -583,13 +583,13 @@ define <2 x i64> @test_mm_cmpgt_epi32(<2 x i64> %a0, <2 x i64> %a1) nounwind {
define <2 x double> @test_mm_cmpgt_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_cmpgt_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpltpd %xmm0, %xmm1
; X32-NEXT: movapd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpgt_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpltpd %xmm0, %xmm1
; X64-NEXT: movapd %xmm1, %xmm0
; X64-NEXT: retq
@@ -601,13 +601,13 @@ define <2 x double> @test_mm_cmpgt_pd(<2 x double> %a0, <2 x double> %a1) nounwi
define <2 x double> @test_mm_cmpgt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_cmpgt_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpltsd %xmm0, %xmm1
; X32-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpgt_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpltsd %xmm0, %xmm1
; X64-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; X64-NEXT: retq
@@ -621,12 +621,12 @@ define <2 x double> @test_mm_cmpgt_sd(<2 x double> %a0, <2 x double> %a1) nounwi
define <2 x double> @test_mm_cmple_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_cmple_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmplepd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmple_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmplepd %xmm1, %xmm0
; X64-NEXT: retq
%fcmp = fcmp ole <2 x double> %a0, %a1
@@ -637,12 +637,12 @@ define <2 x double> @test_mm_cmple_pd(<2 x double> %a0, <2 x double> %a1) nounwi
define <2 x double> @test_mm_cmple_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_cmple_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmplesd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmple_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmplesd %xmm1, %xmm0
; X64-NEXT: retq
%res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 2)
@@ -651,13 +651,13 @@ define <2 x double> @test_mm_cmple_sd(<2 x double> %a0, <2 x double> %a1) nounwi
define <2 x i64> @test_mm_cmplt_epi8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm_cmplt_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pcmpgtb %xmm0, %xmm1
; X32-NEXT: movdqa %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmplt_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pcmpgtb %xmm0, %xmm1
; X64-NEXT: movdqa %xmm1, %xmm0
; X64-NEXT: retq
@@ -671,13 +671,13 @@ define <2 x i64> @test_mm_cmplt_epi8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
define <2 x i64> @test_mm_cmplt_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm_cmplt_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pcmpgtw %xmm0, %xmm1
; X32-NEXT: movdqa %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmplt_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pcmpgtw %xmm0, %xmm1
; X64-NEXT: movdqa %xmm1, %xmm0
; X64-NEXT: retq
@@ -691,13 +691,13 @@ define <2 x i64> @test_mm_cmplt_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
define <2 x i64> @test_mm_cmplt_epi32(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm_cmplt_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pcmpgtd %xmm0, %xmm1
; X32-NEXT: movdqa %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmplt_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pcmpgtd %xmm0, %xmm1
; X64-NEXT: movdqa %xmm1, %xmm0
; X64-NEXT: retq
@@ -711,12 +711,12 @@ define <2 x i64> @test_mm_cmplt_epi32(<2 x i64> %a0, <2 x i64> %a1) nounwind {
define <2 x double> @test_mm_cmplt_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_cmplt_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpltpd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmplt_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpltpd %xmm1, %xmm0
; X64-NEXT: retq
%fcmp = fcmp olt <2 x double> %a0, %a1
@@ -727,12 +727,12 @@ define <2 x double> @test_mm_cmplt_pd(<2 x double> %a0, <2 x double> %a1) nounwi
define <2 x double> @test_mm_cmplt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_cmplt_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpltsd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmplt_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpltsd %xmm1, %xmm0
; X64-NEXT: retq
%res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 1)
@@ -741,12 +741,12 @@ define <2 x double> @test_mm_cmplt_sd(<2 x double> %a0, <2 x double> %a1) nounwi
define <2 x double> @test_mm_cmpneq_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_cmpneq_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpneqpd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpneq_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpneqpd %xmm1, %xmm0
; X64-NEXT: retq
%fcmp = fcmp une <2 x double> %a0, %a1
@@ -757,12 +757,12 @@ define <2 x double> @test_mm_cmpneq_pd(<2 x double> %a0, <2 x double> %a1) nounw
define <2 x double> @test_mm_cmpneq_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_cmpneq_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpneqsd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpneq_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpneqsd %xmm1, %xmm0
; X64-NEXT: retq
%res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 4)
@@ -771,13 +771,13 @@ define <2 x double> @test_mm_cmpneq_sd(<2 x double> %a0, <2 x double> %a1) nounw
define <2 x double> @test_mm_cmpnge_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_cmpnge_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpnlepd %xmm0, %xmm1
; X32-NEXT: movapd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpnge_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpnlepd %xmm0, %xmm1
; X64-NEXT: movapd %xmm1, %xmm0
; X64-NEXT: retq
@@ -789,13 +789,13 @@ define <2 x double> @test_mm_cmpnge_pd(<2 x double> %a0, <2 x double> %a1) nounw
define <2 x double> @test_mm_cmpnge_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_cmpnge_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpnlesd %xmm0, %xmm1
; X32-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpnge_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpnlesd %xmm0, %xmm1
; X64-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; X64-NEXT: retq
@@ -809,13 +809,13 @@ define <2 x double> @test_mm_cmpnge_sd(<2 x double> %a0, <2 x double> %a1) nounw
define <2 x double> @test_mm_cmpngt_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_cmpngt_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpnltpd %xmm0, %xmm1
; X32-NEXT: movapd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpngt_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpnltpd %xmm0, %xmm1
; X64-NEXT: movapd %xmm1, %xmm0
; X64-NEXT: retq
@@ -827,13 +827,13 @@ define <2 x double> @test_mm_cmpngt_pd(<2 x double> %a0, <2 x double> %a1) nounw
define <2 x double> @test_mm_cmpngt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_cmpngt_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpnltsd %xmm0, %xmm1
; X32-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpngt_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpnltsd %xmm0, %xmm1
; X64-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; X64-NEXT: retq
@@ -847,12 +847,12 @@ define <2 x double> @test_mm_cmpngt_sd(<2 x double> %a0, <2 x double> %a1) nounw
define <2 x double> @test_mm_cmpnle_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_cmpnle_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpnlepd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpnle_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpnlepd %xmm1, %xmm0
; X64-NEXT: retq
%fcmp = fcmp ugt <2 x double> %a0, %a1
@@ -863,12 +863,12 @@ define <2 x double> @test_mm_cmpnle_pd(<2 x double> %a0, <2 x double> %a1) nounw
define <2 x double> @test_mm_cmpnle_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_cmpnle_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpnlesd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpnle_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpnlesd %xmm1, %xmm0
; X64-NEXT: retq
%res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 6)
@@ -877,12 +877,12 @@ define <2 x double> @test_mm_cmpnle_sd(<2 x double> %a0, <2 x double> %a1) nounw
define <2 x double> @test_mm_cmpnlt_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_cmpnlt_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpnltpd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpnlt_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpnltpd %xmm1, %xmm0
; X64-NEXT: retq
%fcmp = fcmp uge <2 x double> %a0, %a1
@@ -893,12 +893,12 @@ define <2 x double> @test_mm_cmpnlt_pd(<2 x double> %a0, <2 x double> %a1) nounw
define <2 x double> @test_mm_cmpnlt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_cmpnlt_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpnltsd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpnlt_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpnltsd %xmm1, %xmm0
; X64-NEXT: retq
%res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 5)
@@ -907,12 +907,12 @@ define <2 x double> @test_mm_cmpnlt_sd(<2 x double> %a0, <2 x double> %a1) nounw
define <2 x double> @test_mm_cmpord_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_cmpord_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpordpd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpord_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpordpd %xmm1, %xmm0
; X64-NEXT: retq
%fcmp = fcmp ord <2 x double> %a0, %a1
@@ -923,12 +923,12 @@ define <2 x double> @test_mm_cmpord_pd(<2 x double> %a0, <2 x double> %a1) nounw
define <2 x double> @test_mm_cmpord_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_cmpord_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpordsd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpord_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpordsd %xmm1, %xmm0
; X64-NEXT: retq
%res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 7)
@@ -937,12 +937,12 @@ define <2 x double> @test_mm_cmpord_sd(<2 x double> %a0, <2 x double> %a1) nounw
define <2 x double> @test_mm_cmpunord_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_cmpunord_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpunordpd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpunord_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpunordpd %xmm1, %xmm0
; X64-NEXT: retq
%fcmp = fcmp uno <2 x double> %a0, %a1
@@ -953,12 +953,12 @@ define <2 x double> @test_mm_cmpunord_pd(<2 x double> %a0, <2 x double> %a1) nou
define <2 x double> @test_mm_cmpunord_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_cmpunord_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cmpunordsd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpunord_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cmpunordsd %xmm1, %xmm0
; X64-NEXT: retq
%res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 3)
@@ -967,7 +967,7 @@ define <2 x double> @test_mm_cmpunord_sd(<2 x double> %a0, <2 x double> %a1) nou
define i32 @test_mm_comieq_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_comieq_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: comisd %xmm1, %xmm0
; X32-NEXT: setnp %al
; X32-NEXT: sete %cl
@@ -976,7 +976,7 @@ define i32 @test_mm_comieq_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_mm_comieq_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: comisd %xmm1, %xmm0
; X64-NEXT: setnp %al
; X64-NEXT: sete %cl
@@ -990,14 +990,14 @@ declare i32 @llvm.x86.sse2.comieq.sd(<2 x double>, <2 x double>) nounwind readno
define i32 @test_mm_comige_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_comige_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: comisd %xmm1, %xmm0
; X32-NEXT: setae %al
; X32-NEXT: retl
;
; X64-LABEL: test_mm_comige_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: comisd %xmm1, %xmm0
; X64-NEXT: setae %al
@@ -1009,14 +1009,14 @@ declare i32 @llvm.x86.sse2.comige.sd(<2 x double>, <2 x double>) nounwind readno
define i32 @test_mm_comigt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_comigt_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: comisd %xmm1, %xmm0
; X32-NEXT: seta %al
; X32-NEXT: retl
;
; X64-LABEL: test_mm_comigt_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: comisd %xmm1, %xmm0
; X64-NEXT: seta %al
@@ -1028,14 +1028,14 @@ declare i32 @llvm.x86.sse2.comigt.sd(<2 x double>, <2 x double>) nounwind readno
define i32 @test_mm_comile_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_comile_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: comisd %xmm0, %xmm1
; X32-NEXT: setae %al
; X32-NEXT: retl
;
; X64-LABEL: test_mm_comile_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: comisd %xmm0, %xmm1
; X64-NEXT: setae %al
@@ -1047,14 +1047,14 @@ declare i32 @llvm.x86.sse2.comile.sd(<2 x double>, <2 x double>) nounwind readno
define i32 @test_mm_comilt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_comilt_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: comisd %xmm0, %xmm1
; X32-NEXT: seta %al
; X32-NEXT: retl
;
; X64-LABEL: test_mm_comilt_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: comisd %xmm0, %xmm1
; X64-NEXT: seta %al
@@ -1066,7 +1066,7 @@ declare i32 @llvm.x86.sse2.comilt.sd(<2 x double>, <2 x double>) nounwind readno
define i32 @test_mm_comineq_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_comineq_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: comisd %xmm1, %xmm0
; X32-NEXT: setp %al
; X32-NEXT: setne %cl
@@ -1075,7 +1075,7 @@ define i32 @test_mm_comineq_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_mm_comineq_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: comisd %xmm1, %xmm0
; X64-NEXT: setp %al
; X64-NEXT: setne %cl
@@ -1089,12 +1089,12 @@ declare i32 @llvm.x86.sse2.comineq.sd(<2 x double>, <2 x double>) nounwind readn
define <2 x double> @test_mm_cvtepi32_pd(<2 x i64> %a0) nounwind {
; X32-LABEL: test_mm_cvtepi32_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cvtdq2pd %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cvtepi32_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cvtdq2pd %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -1105,12 +1105,12 @@ define <2 x double> @test_mm_cvtepi32_pd(<2 x i64> %a0) nounwind {
define <4 x float> @test_mm_cvtepi32_ps(<2 x i64> %a0) nounwind {
; X32-LABEL: test_mm_cvtepi32_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cvtdq2ps %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cvtepi32_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cvtdq2ps %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -1121,12 +1121,12 @@ declare <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32>) nounwind readnone
define <2 x i64> @test_mm_cvtpd_epi32(<2 x double> %a0) nounwind {
; X32-LABEL: test_mm_cvtpd_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cvtpd2dq %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cvtpd_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cvtpd2dq %xmm0, %xmm0
; X64-NEXT: retq
%res = call <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double> %a0)
@@ -1137,12 +1137,12 @@ declare <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double>) nounwind readnone
define <4 x float> @test_mm_cvtpd_ps(<2 x double> %a0) nounwind {
; X32-LABEL: test_mm_cvtpd_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cvtpd2ps %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cvtpd_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cvtpd2ps %xmm0, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse2.cvtpd2ps(<2 x double> %a0)
@@ -1152,12 +1152,12 @@ declare <4 x float> @llvm.x86.sse2.cvtpd2ps(<2 x double>) nounwind readnone
define <2 x i64> @test_mm_cvtps_epi32(<4 x float> %a0) nounwind {
; X32-LABEL: test_mm_cvtps_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cvtps2dq %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cvtps_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cvtps2dq %xmm0, %xmm0
; X64-NEXT: retq
%res = call <4 x i32> @llvm.x86.sse2.cvtps2dq(<4 x float> %a0)
@@ -1168,12 +1168,12 @@ declare <4 x i32> @llvm.x86.sse2.cvtps2dq(<4 x float>) nounwind readnone
define <2 x double> @test_mm_cvtps_pd(<4 x float> %a0) nounwind {
; X32-LABEL: test_mm_cvtps_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cvtps2pd %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cvtps_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cvtps2pd %xmm0, %xmm0
; X64-NEXT: retq
%ext = shufflevector <4 x float> %a0, <4 x float> %a0, <2 x i32> <i32 0, i32 1>
@@ -1183,7 +1183,7 @@ define <2 x double> @test_mm_cvtps_pd(<4 x float> %a0) nounwind {
define double @test_mm_cvtsd_f64(<2 x double> %a0) nounwind {
; X32-LABEL: test_mm_cvtsd_f64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebp
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: andl $-8, %esp
@@ -1195,7 +1195,7 @@ define double @test_mm_cvtsd_f64(<2 x double> %a0) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cvtsd_f64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
%res = extractelement <2 x double> %a0, i32 0
ret double %res
@@ -1203,12 +1203,12 @@ define double @test_mm_cvtsd_f64(<2 x double> %a0) nounwind {
define i32 @test_mm_cvtsd_si32(<2 x double> %a0) nounwind {
; X32-LABEL: test_mm_cvtsd_si32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cvtsd2si %xmm0, %eax
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cvtsd_si32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cvtsd2si %xmm0, %eax
; X64-NEXT: retq
%res = call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> %a0)
@@ -1218,12 +1218,12 @@ declare i32 @llvm.x86.sse2.cvtsd2si(<2 x double>) nounwind readnone
define <4 x float> @test_mm_cvtsd_ss(<4 x float> %a0, <2 x double> %a1) {
; X32-LABEL: test_mm_cvtsd_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cvtsd2ss %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cvtsd_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cvtsd2ss %xmm1, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse2.cvtsd2ss(<4 x float> %a0, <2 x double> %a1)
@@ -1233,13 +1233,13 @@ declare <4 x float> @llvm.x86.sse2.cvtsd2ss(<4 x float>, <2 x double>) nounwind
define <4 x float> @test_mm_cvtsd_ss_load(<4 x float> %a0, <2 x double>* %p1) {
; X32-LABEL: test_mm_cvtsd_ss_load:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: cvtsd2ss (%eax), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cvtsd_ss_load:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cvtsd2ss (%rdi), %xmm0
; X64-NEXT: retq
%a1 = load <2 x double>, <2 x double>* %p1
@@ -1249,12 +1249,12 @@ define <4 x float> @test_mm_cvtsd_ss_load(<4 x float> %a0, <2 x double>* %p1) {
define i32 @test_mm_cvtsi128_si32(<2 x i64> %a0) nounwind {
; X32-LABEL: test_mm_cvtsi128_si32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movd %xmm0, %eax
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cvtsi128_si32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movd %xmm0, %eax
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -1264,12 +1264,12 @@ define i32 @test_mm_cvtsi128_si32(<2 x i64> %a0) nounwind {
define <2 x double> @test_mm_cvtsi32_sd(<2 x double> %a0, i32 %a1) nounwind {
; X32-LABEL: test_mm_cvtsi32_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cvtsi2sdl {{[0-9]+}}(%esp), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cvtsi32_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cvtsi2sdl %edi, %xmm0
; X64-NEXT: retq
%cvt = sitofp i32 %a1 to double
@@ -1279,12 +1279,12 @@ define <2 x double> @test_mm_cvtsi32_sd(<2 x double> %a0, i32 %a1) nounwind {
define <2 x i64> @test_mm_cvtsi32_si128(i32 %a0) nounwind {
; X32-LABEL: test_mm_cvtsi32_si128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cvtsi32_si128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movd %edi, %xmm0
; X64-NEXT: retq
%res0 = insertelement <4 x i32> undef, i32 %a0, i32 0
@@ -1297,12 +1297,12 @@ define <2 x i64> @test_mm_cvtsi32_si128(i32 %a0) nounwind {
define <2 x double> @test_mm_cvtss_sd(<2 x double> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_cvtss_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cvtss2sd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cvtss_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cvtss2sd %xmm1, %xmm0
; X64-NEXT: retq
%ext = extractelement <4 x float> %a1, i32 0
@@ -1313,12 +1313,12 @@ define <2 x double> @test_mm_cvtss_sd(<2 x double> %a0, <4 x float> %a1) nounwin
define <2 x i64> @test_mm_cvttpd_epi32(<2 x double> %a0) nounwind {
; X32-LABEL: test_mm_cvttpd_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cvttpd2dq %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cvttpd_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cvttpd2dq %xmm0, %xmm0
; X64-NEXT: retq
%res = call <4 x i32> @llvm.x86.sse2.cvttpd2dq(<2 x double> %a0)
@@ -1329,12 +1329,12 @@ declare <4 x i32> @llvm.x86.sse2.cvttpd2dq(<2 x double>) nounwind readnone
define <2 x i64> @test_mm_cvttps_epi32(<4 x float> %a0) nounwind {
; X32-LABEL: test_mm_cvttps_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cvttps2dq %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cvttps_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cvttps2dq %xmm0, %xmm0
; X64-NEXT: retq
%res = call <4 x i32> @llvm.x86.sse2.cvttps2dq(<4 x float> %a0)
@@ -1345,12 +1345,12 @@ declare <4 x i32> @llvm.x86.sse2.cvttps2dq(<4 x float>) nounwind readnone
define i32 @test_mm_cvttsd_si32(<2 x double> %a0) nounwind {
; X32-LABEL: test_mm_cvttsd_si32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: cvttsd2si %xmm0, %eax
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cvttsd_si32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: cvttsd2si %xmm0, %eax
; X64-NEXT: retq
%res = call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> %a0)
@@ -1360,12 +1360,12 @@ declare i32 @llvm.x86.sse2.cvttsd2si(<2 x double>) nounwind readnone
define <2 x double> @test_mm_div_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_div_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: divpd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_div_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: divpd %xmm1, %xmm0
; X64-NEXT: retq
%res = fdiv <2 x double> %a0, %a1
@@ -1374,12 +1374,12 @@ define <2 x double> @test_mm_div_pd(<2 x double> %a0, <2 x double> %a1) nounwind
define <2 x double> @test_mm_div_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_div_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: divsd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_div_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: divsd %xmm1, %xmm0
; X64-NEXT: retq
%ext0 = extractelement <2 x double> %a0, i32 0
@@ -1391,13 +1391,13 @@ define <2 x double> @test_mm_div_sd(<2 x double> %a0, <2 x double> %a1) nounwind
define i32 @test_mm_extract_epi16(<2 x i64> %a0) nounwind {
; X32-LABEL: test_mm_extract_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pextrw $1, %xmm0, %eax
; X32-NEXT: movzwl %ax, %eax
; X32-NEXT: retl
;
; X64-LABEL: test_mm_extract_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pextrw $1, %xmm0, %eax
; X64-NEXT: movzwl %ax, %eax
; X64-NEXT: retq
@@ -1409,13 +1409,13 @@ define i32 @test_mm_extract_epi16(<2 x i64> %a0) nounwind {
define <2 x i64> @test_mm_insert_epi16(<2 x i64> %a0, i16 %a1) nounwind {
; X32-LABEL: test_mm_insert_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: pinsrw $1, %eax, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_insert_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pinsrw $1, %edi, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -1426,12 +1426,12 @@ define <2 x i64> @test_mm_insert_epi16(<2 x i64> %a0, i16 %a1) nounwind {
define void @test_mm_lfence() nounwind {
; X32-LABEL: test_mm_lfence:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: lfence
; X32-NEXT: retl
;
; X64-LABEL: test_mm_lfence:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: lfence
; X64-NEXT: retq
call void @llvm.x86.sse2.lfence()
@@ -1441,13 +1441,13 @@ declare void @llvm.x86.sse2.lfence() nounwind readnone
define <2 x double> @test_mm_load_pd(double* %a0) nounwind {
; X32-LABEL: test_mm_load_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movaps (%eax), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_load_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps (%rdi), %xmm0
; X64-NEXT: retq
%arg0 = bitcast double* %a0 to <2 x double>*
@@ -1457,13 +1457,13 @@ define <2 x double> @test_mm_load_pd(double* %a0) nounwind {
define <2 x double> @test_mm_load_sd(double* %a0) nounwind {
; X32-LABEL: test_mm_load_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X32-NEXT: retl
;
; X64-LABEL: test_mm_load_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X64-NEXT: retq
%ld = load double, double* %a0, align 1
@@ -1474,13 +1474,13 @@ define <2 x double> @test_mm_load_sd(double* %a0) nounwind {
define <2 x i64> @test_mm_load_si128(<2 x i64>* %a0) nounwind {
; X32-LABEL: test_mm_load_si128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movaps (%eax), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_load_si128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps (%rdi), %xmm0
; X64-NEXT: retq
%res = load <2 x i64>, <2 x i64>* %a0, align 16
@@ -1489,14 +1489,14 @@ define <2 x i64> @test_mm_load_si128(<2 x i64>* %a0) nounwind {
define <2 x double> @test_mm_load1_pd(double* %a0) nounwind {
; X32-LABEL: test_mm_load1_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X32-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_load1_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X64-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
; X64-NEXT: retq
@@ -1508,13 +1508,13 @@ define <2 x double> @test_mm_load1_pd(double* %a0) nounwind {
define <2 x double> @test_mm_loadh_pd(<2 x double> %a0, double* %a1) nounwind {
; X32-LABEL: test_mm_loadh_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_loadh_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; X64-NEXT: retq
%ld = load double, double* %a1, align 8
@@ -1524,13 +1524,13 @@ define <2 x double> @test_mm_loadh_pd(<2 x double> %a0, double* %a1) nounwind {
define <2 x i64> @test_mm_loadl_epi64(<2 x i64> %a0, <2 x i64>* %a1) nounwind {
; X32-LABEL: test_mm_loadl_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X32-NEXT: retl
;
; X64-LABEL: test_mm_loadl_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X64-NEXT: retq
%bc = bitcast <2 x i64>* %a1 to i64*
@@ -1542,13 +1542,13 @@ define <2 x i64> @test_mm_loadl_epi64(<2 x i64> %a0, <2 x i64>* %a1) nounwind {
define <2 x double> @test_mm_loadl_pd(<2 x double> %a0, double* %a1) nounwind {
; X32-LABEL: test_mm_loadl_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movlpd {{.*#+}} xmm0 = mem[0],xmm0[1]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_loadl_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movlpd {{.*#+}} xmm0 = mem[0],xmm0[1]
; X64-NEXT: retq
%ld = load double, double* %a1, align 8
@@ -1558,14 +1558,14 @@ define <2 x double> @test_mm_loadl_pd(<2 x double> %a0, double* %a1) nounwind {
define <2 x double> @test_mm_loadr_pd(double* %a0) nounwind {
; X32-LABEL: test_mm_loadr_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movapd (%eax), %xmm0
; X32-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_loadr_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movapd (%rdi), %xmm0
; X64-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0]
; X64-NEXT: retq
@@ -1577,13 +1577,13 @@ define <2 x double> @test_mm_loadr_pd(double* %a0) nounwind {
define <2 x double> @test_mm_loadu_pd(double* %a0) nounwind {
; X32-LABEL: test_mm_loadu_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movups (%eax), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_loadu_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movups (%rdi), %xmm0
; X64-NEXT: retq
%arg0 = bitcast double* %a0 to <2 x double>*
@@ -1593,13 +1593,13 @@ define <2 x double> @test_mm_loadu_pd(double* %a0) nounwind {
define <2 x i64> @test_mm_loadu_si128(<2 x i64>* %a0) nounwind {
; X32-LABEL: test_mm_loadu_si128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movups (%eax), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_loadu_si128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movups (%rdi), %xmm0
; X64-NEXT: retq
%res = load <2 x i64>, <2 x i64>* %a0, align 1
@@ -1608,12 +1608,12 @@ define <2 x i64> @test_mm_loadu_si128(<2 x i64>* %a0) nounwind {
define <2 x i64> @test_mm_madd_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm_madd_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pmaddwd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_madd_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmaddwd %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -1626,7 +1626,7 @@ declare <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16>, <8 x i16>) nounwind readnon
define void @test_mm_maskmoveu_si128(<2 x i64> %a0, <2 x i64> %a1, i8* %a2) nounwind {
; X32-LABEL: test_mm_maskmoveu_si128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %edi
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
; X32-NEXT: maskmovdqu %xmm1, %xmm0
@@ -1634,7 +1634,7 @@ define void @test_mm_maskmoveu_si128(<2 x i64> %a0, <2 x i64> %a1, i8* %a2) noun
; X32-NEXT: retl
;
; X64-LABEL: test_mm_maskmoveu_si128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: maskmovdqu %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -1646,12 +1646,12 @@ declare void @llvm.x86.sse2.maskmov.dqu(<16 x i8>, <16 x i8>, i8*) nounwind
define <2 x i64> @test_mm_max_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm_max_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pmaxsw %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_max_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmaxsw %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -1664,12 +1664,12 @@ define <2 x i64> @test_mm_max_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
define <2 x i64> @test_mm_max_epu8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm_max_epu8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pmaxub %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_max_epu8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmaxub %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -1682,12 +1682,12 @@ define <2 x i64> @test_mm_max_epu8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
define <2 x double> @test_mm_max_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_max_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: maxpd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_max_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: maxpd %xmm1, %xmm0
; X64-NEXT: retq
%res = call <2 x double> @llvm.x86.sse2.max.pd(<2 x double> %a0, <2 x double> %a1)
@@ -1697,12 +1697,12 @@ declare <2 x double> @llvm.x86.sse2.max.pd(<2 x double>, <2 x double>) nounwind
define <2 x double> @test_mm_max_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_max_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: maxsd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_max_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: maxsd %xmm1, %xmm0
; X64-NEXT: retq
%res = call <2 x double> @llvm.x86.sse2.max.sd(<2 x double> %a0, <2 x double> %a1)
@@ -1712,12 +1712,12 @@ declare <2 x double> @llvm.x86.sse2.max.sd(<2 x double>, <2 x double>) nounwind
define void @test_mm_mfence() nounwind {
; X32-LABEL: test_mm_mfence:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: mfence
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mfence:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: mfence
; X64-NEXT: retq
call void @llvm.x86.sse2.mfence()
@@ -1727,12 +1727,12 @@ declare void @llvm.x86.sse2.mfence() nounwind readnone
define <2 x i64> @test_mm_min_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm_min_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pminsw %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_min_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pminsw %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -1745,12 +1745,12 @@ define <2 x i64> @test_mm_min_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
define <2 x i64> @test_mm_min_epu8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm_min_epu8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pminub %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_min_epu8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pminub %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -1763,12 +1763,12 @@ define <2 x i64> @test_mm_min_epu8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
define <2 x double> @test_mm_min_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_min_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: minpd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_min_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: minpd %xmm1, %xmm0
; X64-NEXT: retq
%res = call <2 x double> @llvm.x86.sse2.min.pd(<2 x double> %a0, <2 x double> %a1)
@@ -1778,12 +1778,12 @@ declare <2 x double> @llvm.x86.sse2.min.pd(<2 x double>, <2 x double>) nounwind
define <2 x double> @test_mm_min_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_min_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: minsd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_min_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: minsd %xmm1, %xmm0
; X64-NEXT: retq
%res = call <2 x double> @llvm.x86.sse2.min.sd(<2 x double> %a0, <2 x double> %a1)
@@ -1793,12 +1793,12 @@ declare <2 x double> @llvm.x86.sse2.min.sd(<2 x double>, <2 x double>) nounwind
define <2 x i64> @test_mm_move_epi64(<2 x i64> %a0) nounwind {
; X32-LABEL: test_mm_move_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; X32-NEXT: retl
;
; X64-LABEL: test_mm_move_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; X64-NEXT: retq
%res = shufflevector <2 x i64> %a0, <2 x i64> zeroinitializer, <2 x i32> <i32 0, i32 2>
@@ -1807,12 +1807,12 @@ define <2 x i64> @test_mm_move_epi64(<2 x i64> %a0) nounwind {
define <2 x double> @test_mm_move_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_move_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_move_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; X64-NEXT: retq
%ext0 = extractelement <2 x double> %a1, i32 0
@@ -1824,12 +1824,12 @@ define <2 x double> @test_mm_move_sd(<2 x double> %a0, <2 x double> %a1) nounwin
define i32 @test_mm_movemask_epi8(<2 x i64> %a0) nounwind {
; X32-LABEL: test_mm_movemask_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pmovmskb %xmm0, %eax
; X32-NEXT: retl
;
; X64-LABEL: test_mm_movemask_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmovmskb %xmm0, %eax
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -1840,12 +1840,12 @@ declare i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8>) nounwind readnone
define i32 @test_mm_movemask_pd(<2 x double> %a0) nounwind {
; X32-LABEL: test_mm_movemask_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movmskpd %xmm0, %eax
; X32-NEXT: retl
;
; X64-LABEL: test_mm_movemask_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movmskpd %xmm0, %eax
; X64-NEXT: retq
%res = call i32 @llvm.x86.sse2.movmsk.pd(<2 x double> %a0)
@@ -1855,12 +1855,12 @@ declare i32 @llvm.x86.sse2.movmsk.pd(<2 x double>) nounwind readnone
define <2 x i64> @test_mm_mul_epu32(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_mul_epu32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pmuludq %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mul_epu32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmuludq %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -1872,12 +1872,12 @@ declare <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32>, <4 x i32>) nounwind readnon
define <2 x double> @test_mm_mul_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_mul_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: mulpd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mul_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: mulpd %xmm1, %xmm0
; X64-NEXT: retq
%res = fmul <2 x double> %a0, %a1
@@ -1886,12 +1886,12 @@ define <2 x double> @test_mm_mul_pd(<2 x double> %a0, <2 x double> %a1) nounwind
define <2 x double> @test_mm_mul_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_mul_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: mulsd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mul_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: mulsd %xmm1, %xmm0
; X64-NEXT: retq
%ext0 = extractelement <2 x double> %a0, i32 0
@@ -1903,12 +1903,12 @@ define <2 x double> @test_mm_mul_sd(<2 x double> %a0, <2 x double> %a1) nounwind
define <2 x i64> @test_mm_mulhi_epi16(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_mulhi_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pmulhw %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mulhi_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmulhw %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -1921,12 +1921,12 @@ declare <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16>, <8 x i16>) nounwind readnone
define <2 x i64> @test_mm_mulhi_epu16(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_mulhi_epu16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pmulhuw %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mulhi_epu16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmulhuw %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -1939,12 +1939,12 @@ declare <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16>, <8 x i16>) nounwind readnon
define <2 x i64> @test_mm_mullo_epi16(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_mullo_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pmullw %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mullo_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmullw %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -1956,12 +1956,12 @@ define <2 x i64> @test_mm_mullo_epi16(<2 x i64> %a0, <2 x i64> %a1) {
define <2 x double> @test_mm_or_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_or_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: orps %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_or_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: orps %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x double> %a0 to <4 x i32>
@@ -1973,12 +1973,12 @@ define <2 x double> @test_mm_or_pd(<2 x double> %a0, <2 x double> %a1) nounwind
define <2 x i64> @test_mm_or_si128(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm_or_si128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: orps %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_or_si128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: orps %xmm1, %xmm0
; X64-NEXT: retq
%res = or <2 x i64> %a0, %a1
@@ -1987,12 +1987,12 @@ define <2 x i64> @test_mm_or_si128(<2 x i64> %a0, <2 x i64> %a1) nounwind {
define <2 x i64> @test_mm_packs_epi16(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_packs_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: packsswb %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_packs_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: packsswb %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -2005,12 +2005,12 @@ declare <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16>, <8 x i16>) nounwind rea
define <2 x i64> @test_mm_packs_epi32(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_packs_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: packssdw %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_packs_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: packssdw %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -2023,12 +2023,12 @@ declare <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32>, <4 x i32>) nounwind rea
define <2 x i64> @test_mm_packus_epi16(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_packus_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: packuswb %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_packus_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: packuswb %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -2041,12 +2041,12 @@ declare <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16>, <8 x i16>) nounwind rea
define void @test_mm_pause() nounwind {
; X32-LABEL: test_mm_pause:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pause
; X32-NEXT: retl
;
; X64-LABEL: test_mm_pause:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pause
; X64-NEXT: retq
call void @llvm.x86.sse2.pause()
@@ -2056,12 +2056,12 @@ declare void @llvm.x86.sse2.pause() nounwind readnone
define <2 x i64> @test_mm_sad_epu8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm_sad_epu8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: psadbw %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_sad_epu8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psadbw %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -2073,7 +2073,7 @@ declare <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8>, <16 x i8>) nounwind readnone
define <2 x i64> @test_mm_set_epi8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, i8 %a5, i8 %a6, i8 %a7, i8 %a8, i8 %a9, i8 %a10, i8 %a11, i8 %a12, i8 %a13, i8 %a14, i8 %a15) nounwind {
; X32-LABEL: test_mm_set_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movd %eax, %xmm0
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
@@ -2124,7 +2124,7 @@ define <2 x i64> @test_mm_set_epi8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, i8 %a
; X32-NEXT: retl
;
; X64-LABEL: test_mm_set_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: movd %eax, %xmm0
; X64-NEXT: movzbl %sil, %eax
@@ -2195,7 +2195,7 @@ define <2 x i64> @test_mm_set_epi8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, i8 %a
define <2 x i64> @test_mm_set_epi16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %a4, i16 %a5, i16 %a6, i16 %a7) nounwind {
; X32-LABEL: test_mm_set_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movd %eax, %xmm1
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
@@ -2222,7 +2222,7 @@ define <2 x i64> @test_mm_set_epi16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %a4,
; X32-NEXT: retl
;
; X64-LABEL: test_mm_set_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movzwl {{[0-9]+}}(%rsp), %r10d
; X64-NEXT: movzwl {{[0-9]+}}(%rsp), %eax
; X64-NEXT: movd %edi, %xmm0
@@ -2255,7 +2255,7 @@ define <2 x i64> @test_mm_set_epi16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %a4,
define <2 x i64> @test_mm_set_epi32(i32 %a0, i32 %a1, i32 %a2, i32 %a3) nounwind {
; X32-LABEL: test_mm_set_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -2266,7 +2266,7 @@ define <2 x i64> @test_mm_set_epi32(i32 %a0, i32 %a1, i32 %a2, i32 %a3) nounwind
; X32-NEXT: retl
;
; X64-LABEL: test_mm_set_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movd %edi, %xmm0
; X64-NEXT: movd %esi, %xmm1
; X64-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -2287,7 +2287,7 @@ define <2 x i64> @test_mm_set_epi32(i32 %a0, i32 %a1, i32 %a2, i32 %a3) nounwind
define <2 x i64> @test_mm_set_epi64x(i64 %a0, i64 %a1) nounwind {
; X32-LABEL: test_mm_set_epi64x:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -2298,7 +2298,7 @@ define <2 x i64> @test_mm_set_epi64x(i64 %a0, i64 %a1) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_mm_set_epi64x:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq %rdi, %xmm1
; X64-NEXT: movq %rsi, %xmm0
; X64-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
@@ -2310,14 +2310,14 @@ define <2 x i64> @test_mm_set_epi64x(i64 %a0, i64 %a1) nounwind {
define <2 x double> @test_mm_set_pd(double %a0, double %a1) nounwind {
; X32-LABEL: test_mm_set_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X32-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; X32-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_set_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; X64-NEXT: movaps %xmm1, %xmm0
; X64-NEXT: retq
@@ -2328,13 +2328,13 @@ define <2 x double> @test_mm_set_pd(double %a0, double %a1) nounwind {
define <2 x double> @test_mm_set_pd1(double %a0) nounwind {
; X32-LABEL: test_mm_set_pd1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X32-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_set_pd1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
; X64-NEXT: retq
%res0 = insertelement <2 x double> undef, double %a0, i32 0
@@ -2344,13 +2344,13 @@ define <2 x double> @test_mm_set_pd1(double %a0) nounwind {
define <2 x double> @test_mm_set_sd(double %a0) nounwind {
; X32-LABEL: test_mm_set_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; X32-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; X32-NEXT: retl
;
; X64-LABEL: test_mm_set_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; X64-NEXT: retq
%res0 = insertelement <2 x double> undef, double %a0, i32 0
@@ -2360,7 +2360,7 @@ define <2 x double> @test_mm_set_sd(double %a0) nounwind {
define <2 x i64> @test_mm_set1_epi8(i8 %a0) nounwind {
; X32-LABEL: test_mm_set1_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movd %eax, %xmm0
; X32-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -2369,7 +2369,7 @@ define <2 x i64> @test_mm_set1_epi8(i8 %a0) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_mm_set1_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: movd %eax, %xmm0
; X64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -2398,7 +2398,7 @@ define <2 x i64> @test_mm_set1_epi8(i8 %a0) nounwind {
define <2 x i64> @test_mm_set1_epi16(i16 %a0) nounwind {
; X32-LABEL: test_mm_set1_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movd %eax, %xmm0
; X32-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
@@ -2406,7 +2406,7 @@ define <2 x i64> @test_mm_set1_epi16(i16 %a0) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_mm_set1_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movd %edi, %xmm0
; X64-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
@@ -2425,13 +2425,13 @@ define <2 x i64> @test_mm_set1_epi16(i16 %a0) nounwind {
define <2 x i64> @test_mm_set1_epi32(i32 %a0) nounwind {
; X32-LABEL: test_mm_set1_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_set1_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movd %edi, %xmm0
; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; X64-NEXT: retq
@@ -2447,7 +2447,7 @@ define <2 x i64> @test_mm_set1_epi32(i32 %a0) nounwind {
define <2 x i64> @test_mm_set1_epi64x(i64 %a0) nounwind {
; X32-LABEL: test_mm_set1_epi64x:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
@@ -2455,7 +2455,7 @@ define <2 x i64> @test_mm_set1_epi64x(i64 %a0) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_mm_set1_epi64x:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq %rdi, %xmm0
; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; X64-NEXT: retq
@@ -2466,13 +2466,13 @@ define <2 x i64> @test_mm_set1_epi64x(i64 %a0) nounwind {
define <2 x double> @test_mm_set1_pd(double %a0) nounwind {
; X32-LABEL: test_mm_set1_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X32-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_set1_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
; X64-NEXT: retq
%res0 = insertelement <2 x double> undef, double %a0, i32 0
@@ -2482,7 +2482,7 @@ define <2 x double> @test_mm_set1_pd(double %a0) nounwind {
define <2 x i64> @test_mm_setr_epi8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, i8 %a5, i8 %a6, i8 %a7, i8 %a8, i8 %a9, i8 %a10, i8 %a11, i8 %a12, i8 %a13, i8 %a14, i8 %a15) nounwind {
; X32-LABEL: test_mm_setr_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movd %eax, %xmm0
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
@@ -2533,7 +2533,7 @@ define <2 x i64> @test_mm_setr_epi8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, i8 %
; X32-NEXT: retl
;
; X64-LABEL: test_mm_setr_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; X64-NEXT: movd %eax, %xmm0
; X64-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
@@ -2604,7 +2604,7 @@ define <2 x i64> @test_mm_setr_epi8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, i8 %
define <2 x i64> @test_mm_setr_epi16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %a4, i16 %a5, i16 %a6, i16 %a7) nounwind {
; X32-LABEL: test_mm_setr_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movd %eax, %xmm1
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
@@ -2631,7 +2631,7 @@ define <2 x i64> @test_mm_setr_epi16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %a4
; X32-NEXT: retl
;
; X64-LABEL: test_mm_setr_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movzwl {{[0-9]+}}(%rsp), %eax
; X64-NEXT: movzwl {{[0-9]+}}(%rsp), %r10d
; X64-NEXT: movd %eax, %xmm0
@@ -2664,7 +2664,7 @@ define <2 x i64> @test_mm_setr_epi16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %a4
define <2 x i64> @test_mm_setr_epi32(i32 %a0, i32 %a1, i32 %a2, i32 %a3) nounwind {
; X32-LABEL: test_mm_setr_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -2675,7 +2675,7 @@ define <2 x i64> @test_mm_setr_epi32(i32 %a0, i32 %a1, i32 %a2, i32 %a3) nounwin
; X32-NEXT: retl
;
; X64-LABEL: test_mm_setr_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movd %ecx, %xmm0
; X64-NEXT: movd %edx, %xmm1
; X64-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -2696,7 +2696,7 @@ define <2 x i64> @test_mm_setr_epi32(i32 %a0, i32 %a1, i32 %a2, i32 %a3) nounwin
define <2 x i64> @test_mm_setr_epi64x(i64 %a0, i64 %a1) nounwind {
; X32-LABEL: test_mm_setr_epi64x:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -2707,7 +2707,7 @@ define <2 x i64> @test_mm_setr_epi64x(i64 %a0, i64 %a1) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_mm_setr_epi64x:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq %rsi, %xmm1
; X64-NEXT: movq %rdi, %xmm0
; X64-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
@@ -2719,14 +2719,14 @@ define <2 x i64> @test_mm_setr_epi64x(i64 %a0, i64 %a1) nounwind {
define <2 x double> @test_mm_setr_pd(double %a0, double %a1) nounwind {
; X32-LABEL: test_mm_setr_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X32-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_setr_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; X64-NEXT: retq
%res0 = insertelement <2 x double> undef, double %a0, i32 0
@@ -2736,12 +2736,12 @@ define <2 x double> @test_mm_setr_pd(double %a0, double %a1) nounwind {
define <2 x double> @test_mm_setzero_pd() {
; X32-LABEL: test_mm_setzero_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorps %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_setzero_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorps %xmm0, %xmm0
; X64-NEXT: retq
ret <2 x double> zeroinitializer
@@ -2749,12 +2749,12 @@ define <2 x double> @test_mm_setzero_pd() {
define <2 x i64> @test_mm_setzero_si128() {
; X32-LABEL: test_mm_setzero_si128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorps %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_setzero_si128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorps %xmm0, %xmm0
; X64-NEXT: retq
ret <2 x i64> zeroinitializer
@@ -2762,12 +2762,12 @@ define <2 x i64> @test_mm_setzero_si128() {
define <2 x i64> @test_mm_shuffle_epi32(<2 x i64> %a0) {
; X32-LABEL: test_mm_shuffle_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_shuffle_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -2778,12 +2778,12 @@ define <2 x i64> @test_mm_shuffle_epi32(<2 x i64> %a0) {
define <2 x double> @test_mm_shuffle_pd(<2 x double> %a0, <2 x double> %a1) {
; X32-LABEL: test_mm_shuffle_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_shuffle_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0]
; X64-NEXT: retq
%res = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 1, i32 2>
@@ -2792,12 +2792,12 @@ define <2 x double> @test_mm_shuffle_pd(<2 x double> %a0, <2 x double> %a1) {
define <2 x i64> @test_mm_shufflehi_epi16(<2 x i64> %a0) {
; X32-LABEL: test_mm_shufflehi_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_shufflehi_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -2808,12 +2808,12 @@ define <2 x i64> @test_mm_shufflehi_epi16(<2 x i64> %a0) {
define <2 x i64> @test_mm_shufflelo_epi16(<2 x i64> %a0) {
; X32-LABEL: test_mm_shufflelo_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_shufflelo_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -2824,12 +2824,12 @@ define <2 x i64> @test_mm_shufflelo_epi16(<2 x i64> %a0) {
define <2 x i64> @test_mm_sll_epi16(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_sll_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: psllw %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_sll_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psllw %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -2842,12 +2842,12 @@ declare <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16>, <8 x i16>) nounwind readnone
define <2 x i64> @test_mm_sll_epi32(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_sll_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pslld %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_sll_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pslld %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -2860,12 +2860,12 @@ declare <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32>, <4 x i32>) nounwind readnone
define <2 x i64> @test_mm_sll_epi64(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_sll_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: psllq %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_sll_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psllq %xmm1, %xmm0
; X64-NEXT: retq
%res = call <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64> %a0, <2 x i64> %a1)
@@ -2875,12 +2875,12 @@ declare <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64>, <2 x i64>) nounwind readnone
define <2 x i64> @test_mm_slli_epi16(<2 x i64> %a0) {
; X32-LABEL: test_mm_slli_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: psllw $1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_slli_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psllw $1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -2892,12 +2892,12 @@ declare <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16>, i32) nounwind readnone
define <2 x i64> @test_mm_slli_epi32(<2 x i64> %a0) {
; X32-LABEL: test_mm_slli_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pslld $1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_slli_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pslld $1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -2909,12 +2909,12 @@ declare <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32>, i32) nounwind readnone
define <2 x i64> @test_mm_slli_epi64(<2 x i64> %a0) {
; X32-LABEL: test_mm_slli_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: psllq $1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_slli_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psllq $1, %xmm0
; X64-NEXT: retq
%res = call <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64> %a0, i32 1)
@@ -2924,12 +2924,12 @@ declare <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64>, i32) nounwind readnone
define <2 x i64> @test_mm_slli_si128(<2 x i64> %a0) nounwind {
; X32-LABEL: test_mm_slli_si128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_slli_si128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10]
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -2940,12 +2940,12 @@ define <2 x i64> @test_mm_slli_si128(<2 x i64> %a0) nounwind {
define <2 x double> @test_mm_sqrt_pd(<2 x double> %a0) nounwind {
; X32-LABEL: test_mm_sqrt_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: sqrtpd %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_sqrt_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: sqrtpd %xmm0, %xmm0
; X64-NEXT: retq
%res = call <2 x double> @llvm.x86.sse2.sqrt.pd(<2 x double> %a0)
@@ -2955,13 +2955,13 @@ declare <2 x double> @llvm.x86.sse2.sqrt.pd(<2 x double>) nounwind readnone
define <2 x double> @test_mm_sqrt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_sqrt_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: sqrtsd %xmm0, %xmm1
; X32-NEXT: movapd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_sqrt_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: sqrtsd %xmm0, %xmm1
; X64-NEXT: movapd %xmm1, %xmm0
; X64-NEXT: retq
@@ -2976,12 +2976,12 @@ declare <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double>) nounwind readnone
define <2 x i64> @test_mm_sra_epi16(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_sra_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: psraw %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_sra_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psraw %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -2994,12 +2994,12 @@ declare <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16>, <8 x i16>) nounwind readnone
define <2 x i64> @test_mm_sra_epi32(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_sra_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: psrad %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_sra_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psrad %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -3012,12 +3012,12 @@ declare <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32>, <4 x i32>) nounwind readnone
define <2 x i64> @test_mm_srai_epi16(<2 x i64> %a0) {
; X32-LABEL: test_mm_srai_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: psraw $1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_srai_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psraw $1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -3029,12 +3029,12 @@ declare <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16>, i32) nounwind readnone
define <2 x i64> @test_mm_srai_epi32(<2 x i64> %a0) {
; X32-LABEL: test_mm_srai_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: psrad $1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_srai_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psrad $1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -3046,12 +3046,12 @@ declare <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32>, i32) nounwind readnone
define <2 x i64> @test_mm_srl_epi16(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_srl_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: psrlw %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_srl_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psrlw %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -3064,12 +3064,12 @@ declare <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16>, <8 x i16>) nounwind readnone
define <2 x i64> @test_mm_srl_epi32(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_srl_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: psrld %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_srl_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psrld %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -3082,12 +3082,12 @@ declare <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32>, <4 x i32>) nounwind readnone
define <2 x i64> @test_mm_srl_epi64(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_srl_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: psrlq %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_srl_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psrlq %xmm1, %xmm0
; X64-NEXT: retq
%res = call <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64> %a0, <2 x i64> %a1)
@@ -3097,12 +3097,12 @@ declare <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64>, <2 x i64>) nounwind readnone
define <2 x i64> @test_mm_srli_epi16(<2 x i64> %a0) {
; X32-LABEL: test_mm_srli_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: psrlw $1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_srli_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psrlw $1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -3114,12 +3114,12 @@ declare <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16>, i32) nounwind readnone
define <2 x i64> @test_mm_srli_epi32(<2 x i64> %a0) {
; X32-LABEL: test_mm_srli_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: psrld $1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_srli_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psrld $1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -3131,12 +3131,12 @@ declare <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32>, i32) nounwind readnone
define <2 x i64> @test_mm_srli_epi64(<2 x i64> %a0) {
; X32-LABEL: test_mm_srli_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: psrlq $1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_srli_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psrlq $1, %xmm0
; X64-NEXT: retq
%res = call <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64> %a0, i32 1)
@@ -3146,12 +3146,12 @@ declare <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64>, i32) nounwind readnone
define <2 x i64> @test_mm_srli_si128(<2 x i64> %a0) nounwind {
; X32-LABEL: test_mm_srli_si128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: psrldq {{.*#+}} xmm0 = xmm0[5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero
; X32-NEXT: retl
;
; X64-LABEL: test_mm_srli_si128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psrldq {{.*#+}} xmm0 = xmm0[5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -3162,13 +3162,13 @@ define <2 x i64> @test_mm_srli_si128(<2 x i64> %a0) nounwind {
define void @test_mm_store_pd(double *%a0, <2 x double> %a1) {
; X32-LABEL: test_mm_store_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movaps %xmm0, (%eax)
; X32-NEXT: retl
;
; X64-LABEL: test_mm_store_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps %xmm0, (%rdi)
; X64-NEXT: retq
%arg0 = bitcast double* %a0 to <2 x double>*
@@ -3178,14 +3178,14 @@ define void @test_mm_store_pd(double *%a0, <2 x double> %a1) {
define void @test_mm_store_pd1(double *%a0, <2 x double> %a1) {
; X32-LABEL: test_mm_store_pd1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
; X32-NEXT: movaps %xmm0, (%eax)
; X32-NEXT: retl
;
; X64-LABEL: test_mm_store_pd1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
; X64-NEXT: movaps %xmm0, (%rdi)
; X64-NEXT: retq
@@ -3197,13 +3197,13 @@ define void @test_mm_store_pd1(double *%a0, <2 x double> %a1) {
define void @test_mm_store_sd(double *%a0, <2 x double> %a1) {
; X32-LABEL: test_mm_store_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movsd %xmm0, (%eax)
; X32-NEXT: retl
;
; X64-LABEL: test_mm_store_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movsd %xmm0, (%rdi)
; X64-NEXT: retq
%ext = extractelement <2 x double> %a1, i32 0
@@ -3213,13 +3213,13 @@ define void @test_mm_store_sd(double *%a0, <2 x double> %a1) {
define void @test_mm_store_si128(<2 x i64> *%a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_store_si128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movaps %xmm0, (%eax)
; X32-NEXT: retl
;
; X64-LABEL: test_mm_store_si128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps %xmm0, (%rdi)
; X64-NEXT: retq
store <2 x i64> %a1, <2 x i64>* %a0, align 16
@@ -3228,14 +3228,14 @@ define void @test_mm_store_si128(<2 x i64> *%a0, <2 x i64> %a1) {
define void @test_mm_store1_pd(double *%a0, <2 x double> %a1) {
; X32-LABEL: test_mm_store1_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
; X32-NEXT: movaps %xmm0, (%eax)
; X32-NEXT: retl
;
; X64-LABEL: test_mm_store1_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
; X64-NEXT: movaps %xmm0, (%rdi)
; X64-NEXT: retq
@@ -3247,14 +3247,14 @@ define void @test_mm_store1_pd(double *%a0, <2 x double> %a1) {
define void @test_mm_storeh_sd(double *%a0, <2 x double> %a1) {
; X32-LABEL: test_mm_storeh_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; X32-NEXT: movsd %xmm0, (%eax)
; X32-NEXT: retl
;
; X64-LABEL: test_mm_storeh_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; X64-NEXT: movsd %xmm0, (%rdi)
; X64-NEXT: retq
@@ -3265,13 +3265,13 @@ define void @test_mm_storeh_sd(double *%a0, <2 x double> %a1) {
define void @test_mm_storel_epi64(<2 x i64> *%a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_storel_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movlps %xmm0, (%eax)
; X32-NEXT: retl
;
; X64-LABEL: test_mm_storel_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq %xmm0, %rax
; X64-NEXT: movq %rax, (%rdi)
; X64-NEXT: retq
@@ -3283,13 +3283,13 @@ define void @test_mm_storel_epi64(<2 x i64> *%a0, <2 x i64> %a1) {
define void @test_mm_storel_sd(double *%a0, <2 x double> %a1) {
; X32-LABEL: test_mm_storel_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movsd %xmm0, (%eax)
; X32-NEXT: retl
;
; X64-LABEL: test_mm_storel_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movsd %xmm0, (%rdi)
; X64-NEXT: retq
%ext = extractelement <2 x double> %a1, i32 0
@@ -3299,14 +3299,14 @@ define void @test_mm_storel_sd(double *%a0, <2 x double> %a1) {
define void @test_mm_storer_pd(double *%a0, <2 x double> %a1) {
; X32-LABEL: test_mm_storer_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0]
; X32-NEXT: movapd %xmm0, (%eax)
; X32-NEXT: retl
;
; X64-LABEL: test_mm_storer_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0]
; X64-NEXT: movapd %xmm0, (%rdi)
; X64-NEXT: retq
@@ -3318,13 +3318,13 @@ define void @test_mm_storer_pd(double *%a0, <2 x double> %a1) {
define void @test_mm_storeu_pd(double *%a0, <2 x double> %a1) {
; X32-LABEL: test_mm_storeu_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movups %xmm0, (%eax)
; X32-NEXT: retl
;
; X64-LABEL: test_mm_storeu_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movups %xmm0, (%rdi)
; X64-NEXT: retq
%arg0 = bitcast double* %a0 to <2 x double>*
@@ -3334,13 +3334,13 @@ define void @test_mm_storeu_pd(double *%a0, <2 x double> %a1) {
define void @test_mm_storeu_si128(<2 x i64> *%a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_storeu_si128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movups %xmm0, (%eax)
; X32-NEXT: retl
;
; X64-LABEL: test_mm_storeu_si128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movups %xmm0, (%rdi)
; X64-NEXT: retq
store <2 x i64> %a1, <2 x i64>* %a0, align 1
@@ -3349,13 +3349,13 @@ define void @test_mm_storeu_si128(<2 x i64> *%a0, <2 x i64> %a1) {
define void @test_mm_stream_pd(double *%a0, <2 x double> %a1) {
; X32-LABEL: test_mm_stream_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movntps %xmm0, (%eax)
; X32-NEXT: retl
;
; X64-LABEL: test_mm_stream_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movntps %xmm0, (%rdi)
; X64-NEXT: retq
%arg0 = bitcast double* %a0 to <2 x double>*
@@ -3365,14 +3365,14 @@ define void @test_mm_stream_pd(double *%a0, <2 x double> %a1) {
define void @test_mm_stream_si32(i32 *%a0, i32 %a1) {
; X32-LABEL: test_mm_stream_si32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movntil %eax, (%ecx)
; X32-NEXT: retl
;
; X64-LABEL: test_mm_stream_si32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movntil %esi, (%rdi)
; X64-NEXT: retq
store i32 %a1, i32* %a0, align 1, !nontemporal !0
@@ -3381,13 +3381,13 @@ define void @test_mm_stream_si32(i32 *%a0, i32 %a1) {
define void @test_mm_stream_si128(<2 x i64> *%a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_stream_si128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movntps %xmm0, (%eax)
; X32-NEXT: retl
;
; X64-LABEL: test_mm_stream_si128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movntps %xmm0, (%rdi)
; X64-NEXT: retq
store <2 x i64> %a1, <2 x i64>* %a0, align 16, !nontemporal !0
@@ -3396,12 +3396,12 @@ define void @test_mm_stream_si128(<2 x i64> *%a0, <2 x i64> %a1) {
define <2 x i64> @test_mm_sub_epi8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm_sub_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: psubb %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_sub_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psubb %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -3413,12 +3413,12 @@ define <2 x i64> @test_mm_sub_epi8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
define <2 x i64> @test_mm_sub_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm_sub_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: psubw %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_sub_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psubw %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -3430,12 +3430,12 @@ define <2 x i64> @test_mm_sub_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
define <2 x i64> @test_mm_sub_epi32(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm_sub_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: psubd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_sub_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psubd %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -3447,12 +3447,12 @@ define <2 x i64> @test_mm_sub_epi32(<2 x i64> %a0, <2 x i64> %a1) nounwind {
define <2 x i64> @test_mm_sub_epi64(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm_sub_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: psubq %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_sub_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psubq %xmm1, %xmm0
; X64-NEXT: retq
%res = sub <2 x i64> %a0, %a1
@@ -3461,12 +3461,12 @@ define <2 x i64> @test_mm_sub_epi64(<2 x i64> %a0, <2 x i64> %a1) nounwind {
define <2 x double> @test_mm_sub_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_sub_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: subpd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_sub_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: subpd %xmm1, %xmm0
; X64-NEXT: retq
%res = fsub <2 x double> %a0, %a1
@@ -3475,12 +3475,12 @@ define <2 x double> @test_mm_sub_pd(<2 x double> %a0, <2 x double> %a1) nounwind
define <2 x double> @test_mm_sub_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_sub_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: subsd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_sub_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: subsd %xmm1, %xmm0
; X64-NEXT: retq
%ext0 = extractelement <2 x double> %a0, i32 0
@@ -3492,12 +3492,12 @@ define <2 x double> @test_mm_sub_sd(<2 x double> %a0, <2 x double> %a1) nounwind
define <2 x i64> @test_mm_subs_epi8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm_subs_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: psubsb %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_subs_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psubsb %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -3510,12 +3510,12 @@ declare <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8>, <16 x i8>) nounwind readnone
define <2 x i64> @test_mm_subs_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm_subs_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: psubsw %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_subs_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psubsw %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -3528,12 +3528,12 @@ declare <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16>, <8 x i16>) nounwind readnone
define <2 x i64> @test_mm_subs_epu8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm_subs_epu8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: psubusb %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_subs_epu8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psubusb %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -3546,12 +3546,12 @@ declare <16 x i8> @llvm.x86.sse2.psubus.b(<16 x i8>, <16 x i8>) nounwind readnon
define <2 x i64> @test_mm_subs_epu16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm_subs_epu16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: psubusw %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_subs_epu16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psubusw %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -3564,7 +3564,7 @@ declare <8 x i16> @llvm.x86.sse2.psubus.w(<8 x i16>, <8 x i16>) nounwind readnon
define i32 @test_mm_ucomieq_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_ucomieq_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: ucomisd %xmm1, %xmm0
; X32-NEXT: setnp %al
; X32-NEXT: sete %cl
@@ -3573,7 +3573,7 @@ define i32 @test_mm_ucomieq_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_mm_ucomieq_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: ucomisd %xmm1, %xmm0
; X64-NEXT: setnp %al
; X64-NEXT: sete %cl
@@ -3587,14 +3587,14 @@ declare i32 @llvm.x86.sse2.ucomieq.sd(<2 x double>, <2 x double>) nounwind readn
define i32 @test_mm_ucomige_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_ucomige_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: ucomisd %xmm1, %xmm0
; X32-NEXT: setae %al
; X32-NEXT: retl
;
; X64-LABEL: test_mm_ucomige_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: ucomisd %xmm1, %xmm0
; X64-NEXT: setae %al
@@ -3606,14 +3606,14 @@ declare i32 @llvm.x86.sse2.ucomige.sd(<2 x double>, <2 x double>) nounwind readn
define i32 @test_mm_ucomigt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_ucomigt_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: ucomisd %xmm1, %xmm0
; X32-NEXT: seta %al
; X32-NEXT: retl
;
; X64-LABEL: test_mm_ucomigt_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: ucomisd %xmm1, %xmm0
; X64-NEXT: seta %al
@@ -3625,14 +3625,14 @@ declare i32 @llvm.x86.sse2.ucomigt.sd(<2 x double>, <2 x double>) nounwind readn
define i32 @test_mm_ucomile_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_ucomile_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: ucomisd %xmm0, %xmm1
; X32-NEXT: setae %al
; X32-NEXT: retl
;
; X64-LABEL: test_mm_ucomile_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: ucomisd %xmm0, %xmm1
; X64-NEXT: setae %al
@@ -3644,14 +3644,14 @@ declare i32 @llvm.x86.sse2.ucomile.sd(<2 x double>, <2 x double>) nounwind readn
define i32 @test_mm_ucomilt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_ucomilt_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: ucomisd %xmm0, %xmm1
; X32-NEXT: seta %al
; X32-NEXT: retl
;
; X64-LABEL: test_mm_ucomilt_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: ucomisd %xmm0, %xmm1
; X64-NEXT: seta %al
@@ -3663,7 +3663,7 @@ declare i32 @llvm.x86.sse2.ucomilt.sd(<2 x double>, <2 x double>) nounwind readn
define i32 @test_mm_ucomineq_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_ucomineq_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: ucomisd %xmm1, %xmm0
; X32-NEXT: setp %al
; X32-NEXT: setne %cl
@@ -3672,7 +3672,7 @@ define i32 @test_mm_ucomineq_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test_mm_ucomineq_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: ucomisd %xmm1, %xmm0
; X64-NEXT: setp %al
; X64-NEXT: setne %cl
@@ -3686,34 +3686,34 @@ declare i32 @llvm.x86.sse2.ucomineq.sd(<2 x double>, <2 x double>) nounwind read
define <2 x double> @test_mm_undefined_pd() {
; X32-LABEL: test_mm_undefined_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: test_mm_undefined_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
ret <2 x double> undef
}
define <2 x i64> @test_mm_undefined_si128() {
; X32-LABEL: test_mm_undefined_si128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: test_mm_undefined_si128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
ret <2 x i64> undef
}
define <2 x i64> @test_mm_unpackhi_epi8(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_unpackhi_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_unpackhi_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -3725,12 +3725,12 @@ define <2 x i64> @test_mm_unpackhi_epi8(<2 x i64> %a0, <2 x i64> %a1) {
define <2 x i64> @test_mm_unpackhi_epi16(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_unpackhi_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_unpackhi_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -3742,12 +3742,12 @@ define <2 x i64> @test_mm_unpackhi_epi16(<2 x i64> %a0, <2 x i64> %a1) {
define <2 x i64> @test_mm_unpackhi_epi32(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_unpackhi_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_unpackhi_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -3759,12 +3759,12 @@ define <2 x i64> @test_mm_unpackhi_epi32(<2 x i64> %a0, <2 x i64> %a1) {
define <2 x i64> @test_mm_unpackhi_epi64(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_unpackhi_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_unpackhi_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
; X64-NEXT: retq
%res = shufflevector <2 x i64> %a0, <2 x i64> %a1, <2 x i32> <i32 1, i32 3>
@@ -3773,12 +3773,12 @@ define <2 x i64> @test_mm_unpackhi_epi64(<2 x i64> %a0, <2 x i64> %a1) {
define <2 x double> @test_mm_unpackhi_pd(<2 x double> %a0, <2 x double> %a1) {
; X32-LABEL: test_mm_unpackhi_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_unpackhi_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
; X64-NEXT: retq
%res = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 1, i32 3>
@@ -3787,12 +3787,12 @@ define <2 x double> @test_mm_unpackhi_pd(<2 x double> %a0, <2 x double> %a1) {
define <2 x i64> @test_mm_unpacklo_epi8(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_unpacklo_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_unpacklo_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -3804,12 +3804,12 @@ define <2 x i64> @test_mm_unpacklo_epi8(<2 x i64> %a0, <2 x i64> %a1) {
define <2 x i64> @test_mm_unpacklo_epi16(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_unpacklo_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_unpacklo_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -3821,12 +3821,12 @@ define <2 x i64> @test_mm_unpacklo_epi16(<2 x i64> %a0, <2 x i64> %a1) {
define <2 x i64> @test_mm_unpacklo_epi32(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_unpacklo_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_unpacklo_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -3838,12 +3838,12 @@ define <2 x i64> @test_mm_unpacklo_epi32(<2 x i64> %a0, <2 x i64> %a1) {
define <2 x i64> @test_mm_unpacklo_epi64(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_unpacklo_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_unpacklo_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; X64-NEXT: retq
%res = shufflevector <2 x i64> %a0, <2 x i64> %a1, <2 x i32> <i32 0, i32 2>
@@ -3852,12 +3852,12 @@ define <2 x i64> @test_mm_unpacklo_epi64(<2 x i64> %a0, <2 x i64> %a1) {
define <2 x double> @test_mm_unpacklo_pd(<2 x double> %a0, <2 x double> %a1) {
; X32-LABEL: test_mm_unpacklo_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_unpacklo_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; X64-NEXT: retq
%res = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 0, i32 2>
@@ -3866,12 +3866,12 @@ define <2 x double> @test_mm_unpacklo_pd(<2 x double> %a0, <2 x double> %a1) {
define <2 x double> @test_mm_xor_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_xor_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorps %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_xor_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorps %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x double> %a0 to <4 x i32>
@@ -3883,12 +3883,12 @@ define <2 x double> @test_mm_xor_pd(<2 x double> %a0, <2 x double> %a1) nounwind
define <2 x i64> @test_mm_xor_si128(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm_xor_si128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorps %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_xor_si128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorps %xmm1, %xmm0
; X64-NEXT: retq
%res = xor <2 x i64> %a0, %a1
diff --git a/test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll b/test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll
index d3e5da2994d..3571e2968bf 100644
--- a/test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll
+++ b/test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll
@@ -3,7 +3,7 @@
define <2 x i64> @test_x86_sse2_psll_dq_bs(<2 x i64> %a0) {
; CHECK-LABEL: test_x86_sse2_psll_dq_bs:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8]
; CHECK-NEXT: retl
%res = call <2 x i64> @llvm.x86.sse2.psll.dq.bs(<2 x i64> %a0, i32 7) ; <<2 x i64>> [#uses=1]
@@ -14,7 +14,7 @@ declare <2 x i64> @llvm.x86.sse2.psll.dq.bs(<2 x i64>, i32) nounwind readnone
define <2 x i64> @test_x86_sse2_psrl_dq_bs(<2 x i64> %a0) {
; CHECK-LABEL: test_x86_sse2_psrl_dq_bs:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: psrldq {{.*#+}} xmm0 = xmm0[7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT: retl
%res = call <2 x i64> @llvm.x86.sse2.psrl.dq.bs(<2 x i64> %a0, i32 7) ; <<2 x i64>> [#uses=1]
@@ -24,7 +24,7 @@ declare <2 x i64> @llvm.x86.sse2.psrl.dq.bs(<2 x i64>, i32) nounwind readnone
define <2 x i64> @test_x86_sse2_psll_dq(<2 x i64> %a0) {
; CHECK-LABEL: test_x86_sse2_psll_dq:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pslldq {{.*#+}} xmm0 = zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
; CHECK-NEXT: retl
%res = call <2 x i64> @llvm.x86.sse2.psll.dq(<2 x i64> %a0, i32 8) ; <<2 x i64>> [#uses=1]
@@ -35,7 +35,7 @@ declare <2 x i64> @llvm.x86.sse2.psll.dq(<2 x i64>, i32) nounwind readnone
define <2 x i64> @test_x86_sse2_psrl_dq(<2 x i64> %a0) {
; CHECK-LABEL: test_x86_sse2_psrl_dq:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: psrldq {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero
; CHECK-NEXT: retl
%res = call <2 x i64> @llvm.x86.sse2.psrl.dq(<2 x i64> %a0, i32 8) ; <<2 x i64>> [#uses=1]
@@ -46,7 +46,7 @@ declare <2 x i64> @llvm.x86.sse2.psrl.dq(<2 x i64>, i32) nounwind readnone
define <2 x double> @test_x86_sse2_cvtdq2pd(<4 x i32> %a0) {
; CHECK-LABEL: test_x86_sse2_cvtdq2pd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: cvtdq2pd %xmm0, %xmm0
; CHECK-NEXT: retl
%res = call <2 x double> @llvm.x86.sse2.cvtdq2pd(<4 x i32> %a0) ; <<2 x double>> [#uses=1]
@@ -57,7 +57,7 @@ declare <2 x double> @llvm.x86.sse2.cvtdq2pd(<4 x i32>) nounwind readnone
define <2 x double> @test_x86_sse2_cvtps2pd(<4 x float> %a0) {
; CHECK-LABEL: test_x86_sse2_cvtps2pd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: cvtps2pd %xmm0, %xmm0
; CHECK-NEXT: retl
%res = call <2 x double> @llvm.x86.sse2.cvtps2pd(<4 x float> %a0) ; <<2 x double>> [#uses=1]
@@ -68,7 +68,7 @@ declare <2 x double> @llvm.x86.sse2.cvtps2pd(<4 x float>) nounwind readnone
define void @test_x86_sse2_storel_dq(i8* %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_x86_sse2_storel_dq:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movlps %xmm0, (%eax)
; CHECK-NEXT: retl
@@ -81,7 +81,7 @@ declare void @llvm.x86.sse2.storel.dq(i8*, <4 x i32>) nounwind
define void @test_x86_sse2_storeu_dq(i8* %a0, <16 x i8> %a1) {
; add operation forces the execution domain.
; CHECK-LABEL: test_x86_sse2_storeu_dq:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: pcmpeqd %xmm1, %xmm1
; CHECK-NEXT: psubb %xmm1, %xmm0
@@ -97,7 +97,7 @@ declare void @llvm.x86.sse2.storeu.dq(i8*, <16 x i8>) nounwind
define void @test_x86_sse2_storeu_pd(i8* %a0, <2 x double> %a1) {
; fadd operation forces the execution domain.
; CHECK-LABEL: test_x86_sse2_storeu_pd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: xorpd %xmm1, %xmm1
; CHECK-NEXT: movhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
@@ -112,7 +112,7 @@ declare void @llvm.x86.sse2.storeu.pd(i8*, <2 x double>) nounwind
define <4 x i32> @test_x86_sse2_pshuf_d(<4 x i32> %a) {
; CHECK-LABEL: test_x86_sse2_pshuf_d:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,2,1,0]
; CHECK-NEXT: retl
entry:
@@ -123,7 +123,7 @@ declare <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32>, i8) nounwind readnone
define <8 x i16> @test_x86_sse2_pshufl_w(<8 x i16> %a) {
; CHECK-LABEL: test_x86_sse2_pshufl_w:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
; CHECK-NEXT: retl
entry:
@@ -134,7 +134,7 @@ declare <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16>, i8) nounwind readnone
define <8 x i16> @test_x86_sse2_pshufh_w(<8 x i16> %a) {
; CHECK-LABEL: test_x86_sse2_pshufh_w:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
; CHECK-NEXT: retl
entry:
@@ -145,7 +145,7 @@ declare <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16>, i8) nounwind readnone
define <16 x i8> @max_epu8(<16 x i8> %a0, <16 x i8> %a1) {
; CHECK-LABEL: max_epu8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pmaxub %xmm1, %xmm0
; CHECK-NEXT: retl
%res = call <16 x i8> @llvm.x86.sse2.pmaxu.b(<16 x i8> %a0, <16 x i8> %a1)
@@ -155,7 +155,7 @@ declare <16 x i8> @llvm.x86.sse2.pmaxu.b(<16 x i8>, <16 x i8>) nounwind readnone
define <16 x i8> @min_epu8(<16 x i8> %a0, <16 x i8> %a1) {
; CHECK-LABEL: min_epu8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pminub %xmm1, %xmm0
; CHECK-NEXT: retl
%res = call <16 x i8> @llvm.x86.sse2.pminu.b(<16 x i8> %a0, <16 x i8> %a1)
@@ -165,7 +165,7 @@ declare <16 x i8> @llvm.x86.sse2.pminu.b(<16 x i8>, <16 x i8>) nounwind readnone
define <8 x i16> @max_epi16(<8 x i16> %a0, <8 x i16> %a1) {
; CHECK-LABEL: max_epi16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pmaxsw %xmm1, %xmm0
; CHECK-NEXT: retl
%res = call <8 x i16> @llvm.x86.sse2.pmaxs.w(<8 x i16> %a0, <8 x i16> %a1)
@@ -175,7 +175,7 @@ declare <8 x i16> @llvm.x86.sse2.pmaxs.w(<8 x i16>, <8 x i16>) nounwind readnone
define <8 x i16> @min_epi16(<8 x i16> %a0, <8 x i16> %a1) {
; CHECK-LABEL: min_epi16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pminsw %xmm1, %xmm0
; CHECK-NEXT: retl
%res = call <8 x i16> @llvm.x86.sse2.pmins.w(<8 x i16> %a0, <8 x i16> %a1)
@@ -185,21 +185,21 @@ declare <8 x i16> @llvm.x86.sse2.pmins.w(<8 x i16>, <8 x i16>) nounwind readnone
define <2 x double> @test_x86_sse2_add_sd(<2 x double> %a0, <2 x double> %a1) {
; SSE-LABEL: test_x86_sse2_add_sd:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: addsd %xmm1, %xmm0 ## encoding: [0xf2,0x0f,0x58,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_add_sd:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x58,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_add_sd:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vaddsd %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xff,0x08,0x58,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
; CHECK-LABEL: test_x86_sse2_add_sd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: addsd %xmm1, %xmm0
; CHECK-NEXT: retl
%res = call <2 x double> @llvm.x86.sse2.add.sd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1]
@@ -210,21 +210,21 @@ declare <2 x double> @llvm.x86.sse2.add.sd(<2 x double>, <2 x double>) nounwind
define <2 x double> @test_x86_sse2_sub_sd(<2 x double> %a0, <2 x double> %a1) {
; SSE-LABEL: test_x86_sse2_sub_sd:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: subsd %xmm1, %xmm0 ## encoding: [0xf2,0x0f,0x5c,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_sub_sd:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vsubsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x5c,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_sub_sd:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vsubsd %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xff,0x08,0x5c,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
; CHECK-LABEL: test_x86_sse2_sub_sd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: subsd %xmm1, %xmm0
; CHECK-NEXT: retl
%res = call <2 x double> @llvm.x86.sse2.sub.sd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1]
@@ -235,21 +235,21 @@ declare <2 x double> @llvm.x86.sse2.sub.sd(<2 x double>, <2 x double>) nounwind
define <2 x double> @test_x86_sse2_mul_sd(<2 x double> %a0, <2 x double> %a1) {
; SSE-LABEL: test_x86_sse2_mul_sd:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: mulsd %xmm1, %xmm0 ## encoding: [0xf2,0x0f,0x59,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_mul_sd:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x59,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_mul_sd:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xff,0x08,0x59,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
; CHECK-LABEL: test_x86_sse2_mul_sd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: mulsd %xmm1, %xmm0
; CHECK-NEXT: retl
%res = call <2 x double> @llvm.x86.sse2.mul.sd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1]
@@ -260,21 +260,21 @@ declare <2 x double> @llvm.x86.sse2.mul.sd(<2 x double>, <2 x double>) nounwind
define <2 x double> @test_x86_sse2_div_sd(<2 x double> %a0, <2 x double> %a1) {
; SSE-LABEL: test_x86_sse2_div_sd:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: divsd %xmm1, %xmm0 ## encoding: [0xf2,0x0f,0x5e,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_div_sd:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vdivsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x5e,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_div_sd:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vdivsd %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xff,0x08,0x5e,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
; CHECK-LABEL: test_x86_sse2_div_sd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: divsd %xmm1, %xmm0
; CHECK-NEXT: retl
%res = call <2 x double> @llvm.x86.sse2.div.sd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1]
@@ -284,7 +284,7 @@ declare <2 x double> @llvm.x86.sse2.div.sd(<2 x double>, <2 x double>) nounwind
define <16 x i8> @mm_avg_epu8(<16 x i8> %a0, <16 x i8> %a1) {
; CHECK-LABEL: mm_avg_epu8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pavgb %xmm1, %xmm0
; CHECK-NEXT: retl
%res = call <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
@@ -294,7 +294,7 @@ declare <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8>, <16 x i8>) nounwind readnone
define <8 x i16> @mm_avg_epu16(<8 x i16> %a0, <8 x i16> %a1) {
; CHECK-LABEL: mm_avg_epu16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pavgw %xmm1, %xmm0
; CHECK-NEXT: retl
%res = call <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
diff --git a/test/CodeGen/X86/sse2-intrinsics-x86.ll b/test/CodeGen/X86/sse2-intrinsics-x86.ll
index 72c68c56638..e3c02b625fb 100644
--- a/test/CodeGen/X86/sse2-intrinsics-x86.ll
+++ b/test/CodeGen/X86/sse2-intrinsics-x86.ll
@@ -5,12 +5,12 @@
define <2 x double> @test_x86_sse2_cmp_pd(<2 x double> %a0, <2 x double> %a1) {
; SSE-LABEL: test_x86_sse2_cmp_pd:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: cmpordpd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xc2,0xc1,0x07]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse2_cmp_pd:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: vcmpordpd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xc2,0xc1,0x07]
; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call <2 x double> @llvm.x86.sse2.cmp.pd(<2 x double> %a0, <2 x double> %a1, i8 7) ; <<2 x double>> [#uses=1]
@@ -21,12 +21,12 @@ declare <2 x double> @llvm.x86.sse2.cmp.pd(<2 x double>, <2 x double>, i8) nounw
define <2 x double> @test_x86_sse2_cmp_sd(<2 x double> %a0, <2 x double> %a1) {
; SSE-LABEL: test_x86_sse2_cmp_sd:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: cmpordsd %xmm1, %xmm0 ## encoding: [0xf2,0x0f,0xc2,0xc1,0x07]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse2_cmp_sd:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: vcmpordsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0xc2,0xc1,0x07]
; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 7) ; <<2 x double>> [#uses=1]
@@ -37,7 +37,7 @@ declare <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double>, <2 x double>, i8) nounw
define i32 @test_x86_sse2_comieq_sd(<2 x double> %a0, <2 x double> %a1) {
; SSE-LABEL: test_x86_sse2_comieq_sd:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: comisd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x2f,0xc1]
; SSE-NEXT: setnp %al ## encoding: [0x0f,0x9b,0xc0]
; SSE-NEXT: sete %cl ## encoding: [0x0f,0x94,0xc1]
@@ -46,7 +46,7 @@ define i32 @test_x86_sse2_comieq_sd(<2 x double> %a0, <2 x double> %a1) {
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_comieq_sd:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vcomisd %xmm1, %xmm0 ## encoding: [0xc5,0xf9,0x2f,0xc1]
; AVX2-NEXT: setnp %al ## encoding: [0x0f,0x9b,0xc0]
; AVX2-NEXT: sete %cl ## encoding: [0x0f,0x94,0xc1]
@@ -55,7 +55,7 @@ define i32 @test_x86_sse2_comieq_sd(<2 x double> %a0, <2 x double> %a1) {
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_comieq_sd:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vcomisd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2f,0xc1]
; SKX-NEXT: setnp %al ## encoding: [0x0f,0x9b,0xc0]
; SKX-NEXT: sete %cl ## encoding: [0x0f,0x94,0xc1]
@@ -70,21 +70,21 @@ declare i32 @llvm.x86.sse2.comieq.sd(<2 x double>, <2 x double>) nounwind readno
define i32 @test_x86_sse2_comige_sd(<2 x double> %a0, <2 x double> %a1) {
; SSE-LABEL: test_x86_sse2_comige_sd:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; SSE-NEXT: comisd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x2f,0xc1]
; SSE-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_comige_sd:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; AVX2-NEXT: vcomisd %xmm1, %xmm0 ## encoding: [0xc5,0xf9,0x2f,0xc1]
; AVX2-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_comige_sd:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; SKX-NEXT: vcomisd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2f,0xc1]
; SKX-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
@@ -97,21 +97,21 @@ declare i32 @llvm.x86.sse2.comige.sd(<2 x double>, <2 x double>) nounwind readno
define i32 @test_x86_sse2_comigt_sd(<2 x double> %a0, <2 x double> %a1) {
; SSE-LABEL: test_x86_sse2_comigt_sd:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; SSE-NEXT: comisd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x2f,0xc1]
; SSE-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_comigt_sd:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; AVX2-NEXT: vcomisd %xmm1, %xmm0 ## encoding: [0xc5,0xf9,0x2f,0xc1]
; AVX2-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_comigt_sd:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; SKX-NEXT: vcomisd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2f,0xc1]
; SKX-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
@@ -124,21 +124,21 @@ declare i32 @llvm.x86.sse2.comigt.sd(<2 x double>, <2 x double>) nounwind readno
define i32 @test_x86_sse2_comile_sd(<2 x double> %a0, <2 x double> %a1) {
; SSE-LABEL: test_x86_sse2_comile_sd:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; SSE-NEXT: comisd %xmm0, %xmm1 ## encoding: [0x66,0x0f,0x2f,0xc8]
; SSE-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_comile_sd:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; AVX2-NEXT: vcomisd %xmm0, %xmm1 ## encoding: [0xc5,0xf9,0x2f,0xc8]
; AVX2-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_comile_sd:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; SKX-NEXT: vcomisd %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2f,0xc8]
; SKX-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
@@ -151,21 +151,21 @@ declare i32 @llvm.x86.sse2.comile.sd(<2 x double>, <2 x double>) nounwind readno
define i32 @test_x86_sse2_comilt_sd(<2 x double> %a0, <2 x double> %a1) {
; SSE-LABEL: test_x86_sse2_comilt_sd:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; SSE-NEXT: comisd %xmm0, %xmm1 ## encoding: [0x66,0x0f,0x2f,0xc8]
; SSE-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_comilt_sd:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; AVX2-NEXT: vcomisd %xmm0, %xmm1 ## encoding: [0xc5,0xf9,0x2f,0xc8]
; AVX2-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_comilt_sd:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; SKX-NEXT: vcomisd %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2f,0xc8]
; SKX-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
@@ -178,7 +178,7 @@ declare i32 @llvm.x86.sse2.comilt.sd(<2 x double>, <2 x double>) nounwind readno
define i32 @test_x86_sse2_comineq_sd(<2 x double> %a0, <2 x double> %a1) {
; SSE-LABEL: test_x86_sse2_comineq_sd:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: comisd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x2f,0xc1]
; SSE-NEXT: setp %al ## encoding: [0x0f,0x9a,0xc0]
; SSE-NEXT: setne %cl ## encoding: [0x0f,0x95,0xc1]
@@ -187,7 +187,7 @@ define i32 @test_x86_sse2_comineq_sd(<2 x double> %a0, <2 x double> %a1) {
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_comineq_sd:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vcomisd %xmm1, %xmm0 ## encoding: [0xc5,0xf9,0x2f,0xc1]
; AVX2-NEXT: setp %al ## encoding: [0x0f,0x9a,0xc0]
; AVX2-NEXT: setne %cl ## encoding: [0x0f,0x95,0xc1]
@@ -196,7 +196,7 @@ define i32 @test_x86_sse2_comineq_sd(<2 x double> %a0, <2 x double> %a1) {
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_comineq_sd:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vcomisd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2f,0xc1]
; SKX-NEXT: setp %al ## encoding: [0x0f,0x9a,0xc0]
; SKX-NEXT: setne %cl ## encoding: [0x0f,0x95,0xc1]
@@ -211,17 +211,17 @@ declare i32 @llvm.x86.sse2.comineq.sd(<2 x double>, <2 x double>) nounwind readn
define <4 x float> @test_x86_sse2_cvtdq2ps(<4 x i32> %a0) {
; SSE-LABEL: test_x86_sse2_cvtdq2ps:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: cvtdq2ps %xmm0, %xmm0 ## encoding: [0x0f,0x5b,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_cvtdq2ps:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vcvtdq2ps %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x5b,0xc0]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_cvtdq2ps:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vcvtdq2ps %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x5b,0xc0]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32> %a0) ; <<4 x float>> [#uses=1]
@@ -232,17 +232,17 @@ declare <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32>) nounwind readnone
define <4 x i32> @test_x86_sse2_cvtpd2dq(<2 x double> %a0) {
; SSE-LABEL: test_x86_sse2_cvtpd2dq:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: cvtpd2dq %xmm0, %xmm0 ## encoding: [0xf2,0x0f,0xe6,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_cvtpd2dq:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vcvtpd2dq %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0xe6,0xc0]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_cvtpd2dq:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vcvtpd2dq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0xe6,0xc0]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double> %a0) ; <<4 x i32>> [#uses=1]
@@ -253,17 +253,17 @@ declare <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double>) nounwind readnone
define <2 x i64> @test_mm_cvtpd_epi32_zext(<2 x double> %a0) nounwind {
; SSE-LABEL: test_mm_cvtpd_epi32_zext:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: cvtpd2dq %xmm0, %xmm0 ## encoding: [0xf2,0x0f,0xe6,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_mm_cvtpd_epi32_zext:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vcvtpd2dq %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0xe6,0xc0]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_mm_cvtpd_epi32_zext:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vcvtpd2dq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0xe6,0xc0]
; SKX-NEXT: retl ## encoding: [0xc3]
%cvt = call <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double> %a0)
@@ -275,19 +275,19 @@ define <2 x i64> @test_mm_cvtpd_epi32_zext(<2 x double> %a0) nounwind {
define <2 x i64> @test_mm_cvtpd_epi32_zext_load(<2 x double>* %p0) nounwind {
; SSE-LABEL: test_mm_cvtpd_epi32_zext_load:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; SSE-NEXT: cvtpd2dq (%eax), %xmm0 ## encoding: [0xf2,0x0f,0xe6,0x00]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_mm_cvtpd_epi32_zext_load:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; AVX2-NEXT: vcvtpd2dqx (%eax), %xmm0 ## encoding: [0xc5,0xfb,0xe6,0x00]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_mm_cvtpd_epi32_zext_load:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; SKX-NEXT: vcvtpd2dqx (%eax), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0xe6,0x00]
; SKX-NEXT: retl ## encoding: [0xc3]
@@ -301,17 +301,17 @@ define <2 x i64> @test_mm_cvtpd_epi32_zext_load(<2 x double>* %p0) nounwind {
define <4 x float> @test_x86_sse2_cvtpd2ps(<2 x double> %a0) {
; SSE-LABEL: test_x86_sse2_cvtpd2ps:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: cvtpd2ps %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x5a,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_cvtpd2ps:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vcvtpd2ps %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x5a,0xc0]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_cvtpd2ps:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vcvtpd2ps %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x5a,0xc0]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.sse2.cvtpd2ps(<2 x double> %a0) ; <<4 x float>> [#uses=1]
@@ -321,17 +321,17 @@ declare <4 x float> @llvm.x86.sse2.cvtpd2ps(<2 x double>) nounwind readnone
define <4 x float> @test_x86_sse2_cvtpd2ps_zext(<2 x double> %a0) nounwind {
; SSE-LABEL: test_x86_sse2_cvtpd2ps_zext:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: cvtpd2ps %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x5a,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_cvtpd2ps_zext:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vcvtpd2ps %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x5a,0xc0]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_cvtpd2ps_zext:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vcvtpd2ps %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x5a,0xc0]
; SKX-NEXT: retl ## encoding: [0xc3]
%cvt = call <4 x float> @llvm.x86.sse2.cvtpd2ps(<2 x double> %a0)
@@ -341,19 +341,19 @@ define <4 x float> @test_x86_sse2_cvtpd2ps_zext(<2 x double> %a0) nounwind {
define <4 x float> @test_x86_sse2_cvtpd2ps_zext_load(<2 x double>* %p0) nounwind {
; SSE-LABEL: test_x86_sse2_cvtpd2ps_zext_load:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; SSE-NEXT: cvtpd2ps (%eax), %xmm0 ## encoding: [0x66,0x0f,0x5a,0x00]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_cvtpd2ps_zext_load:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; AVX2-NEXT: vcvtpd2psx (%eax), %xmm0 ## encoding: [0xc5,0xf9,0x5a,0x00]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_cvtpd2ps_zext_load:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; SKX-NEXT: vcvtpd2psx (%eax), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x5a,0x00]
; SKX-NEXT: retl ## encoding: [0xc3]
@@ -365,12 +365,12 @@ define <4 x float> @test_x86_sse2_cvtpd2ps_zext_load(<2 x double>* %p0) nounwind
define <4 x i32> @test_x86_sse2_cvtps2dq(<4 x float> %a0) {
; SSE-LABEL: test_x86_sse2_cvtps2dq:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: cvtps2dq %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x5b,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse2_cvtps2dq:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: vcvtps2dq %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x5b,0xc0]
; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.sse2.cvtps2dq(<4 x float> %a0) ; <<4 x i32>> [#uses=1]
@@ -381,17 +381,17 @@ declare <4 x i32> @llvm.x86.sse2.cvtps2dq(<4 x float>) nounwind readnone
define i32 @test_x86_sse2_cvtsd2si(<2 x double> %a0) {
; SSE-LABEL: test_x86_sse2_cvtsd2si:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: cvtsd2si %xmm0, %eax ## encoding: [0xf2,0x0f,0x2d,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_cvtsd2si:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vcvtsd2si %xmm0, %eax ## encoding: [0xc5,0xfb,0x2d,0xc0]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_cvtsd2si:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vcvtsd2si %xmm0, %eax ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x2d,0xc0]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> %a0) ; <i32> [#uses=1]
@@ -402,12 +402,12 @@ declare i32 @llvm.x86.sse2.cvtsd2si(<2 x double>) nounwind readnone
define <4 x float> @test_x86_sse2_cvtsd2ss(<4 x float> %a0, <2 x double> %a1) {
; SSE-LABEL: test_x86_sse2_cvtsd2ss:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: cvtsd2ss %xmm1, %xmm0 ## encoding: [0xf2,0x0f,0x5a,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse2_cvtsd2ss:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: vcvtsd2ss %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x5a,0xc1]
; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.sse2.cvtsd2ss(<4 x float> %a0, <2 x double> %a1) ; <<4 x float>> [#uses=1]
@@ -418,13 +418,13 @@ declare <4 x float> @llvm.x86.sse2.cvtsd2ss(<4 x float>, <2 x double>) nounwind
define <4 x float> @test_x86_sse2_cvtsd2ss_load(<4 x float> %a0, <2 x double>* %p1) {
; SSE-LABEL: test_x86_sse2_cvtsd2ss_load:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; SSE-NEXT: cvtsd2ss (%eax), %xmm0 ## encoding: [0xf2,0x0f,0x5a,0x00]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse2_cvtsd2ss_load:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; VCHECK-NEXT: vcvtsd2ss (%eax), %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x5a,0x00]
; VCHECK-NEXT: retl ## encoding: [0xc3]
@@ -436,13 +436,13 @@ define <4 x float> @test_x86_sse2_cvtsd2ss_load(<4 x float> %a0, <2 x double>* %
define <4 x float> @test_x86_sse2_cvtsd2ss_load_optsize(<4 x float> %a0, <2 x double>* %p1) optsize {
; SSE-LABEL: test_x86_sse2_cvtsd2ss_load_optsize:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; SSE-NEXT: cvtsd2ss (%eax), %xmm0 ## encoding: [0xf2,0x0f,0x5a,0x00]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse2_cvtsd2ss_load_optsize:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; VCHECK-NEXT: vcvtsd2ss (%eax), %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x5a,0x00]
; VCHECK-NEXT: retl ## encoding: [0xc3]
@@ -454,17 +454,17 @@ define <4 x float> @test_x86_sse2_cvtsd2ss_load_optsize(<4 x float> %a0, <2 x do
define <2 x double> @test_x86_sse2_cvtsi2sd(<2 x double> %a0, i32 %a1) {
; SSE-LABEL: test_x86_sse2_cvtsi2sd:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: cvtsi2sdl {{[0-9]+}}(%esp), %xmm0 ## encoding: [0xf2,0x0f,0x2a,0x44,0x24,0x04]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_cvtsi2sd:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vcvtsi2sdl {{[0-9]+}}(%esp), %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x2a,0x44,0x24,0x04]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_cvtsi2sd:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vcvtsi2sdl {{[0-9]+}}(%esp), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x2a,0x44,0x24,0x04]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <2 x double> @llvm.x86.sse2.cvtsi2sd(<2 x double> %a0, i32 %a1) ; <<2 x double>> [#uses=1]
@@ -475,12 +475,12 @@ declare <2 x double> @llvm.x86.sse2.cvtsi2sd(<2 x double>, i32) nounwind readnon
define <2 x double> @test_x86_sse2_cvtss2sd(<2 x double> %a0, <4 x float> %a1) {
; SSE-LABEL: test_x86_sse2_cvtss2sd:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: cvtss2sd %xmm1, %xmm0 ## encoding: [0xf3,0x0f,0x5a,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse2_cvtss2sd:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: vcvtss2sd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x5a,0xc1]
; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call <2 x double> @llvm.x86.sse2.cvtss2sd(<2 x double> %a0, <4 x float> %a1) ; <<2 x double>> [#uses=1]
@@ -491,13 +491,13 @@ declare <2 x double> @llvm.x86.sse2.cvtss2sd(<2 x double>, <4 x float>) nounwind
define <2 x double> @test_x86_sse2_cvtss2sd_load(<2 x double> %a0, <4 x float>* %p1) {
; SSE-LABEL: test_x86_sse2_cvtss2sd_load:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; SSE-NEXT: cvtss2sd (%eax), %xmm0 ## encoding: [0xf3,0x0f,0x5a,0x00]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse2_cvtss2sd_load:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; VCHECK-NEXT: vcvtss2sd (%eax), %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x5a,0x00]
; VCHECK-NEXT: retl ## encoding: [0xc3]
@@ -509,13 +509,13 @@ define <2 x double> @test_x86_sse2_cvtss2sd_load(<2 x double> %a0, <4 x float>*
define <2 x double> @test_x86_sse2_cvtss2sd_load_optsize(<2 x double> %a0, <4 x float>* %p1) optsize {
; SSE-LABEL: test_x86_sse2_cvtss2sd_load_optsize:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; SSE-NEXT: cvtss2sd (%eax), %xmm0 ## encoding: [0xf3,0x0f,0x5a,0x00]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse2_cvtss2sd_load_optsize:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; VCHECK-NEXT: vcvtss2sd (%eax), %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x5a,0x00]
; VCHECK-NEXT: retl ## encoding: [0xc3]
@@ -527,17 +527,17 @@ define <2 x double> @test_x86_sse2_cvtss2sd_load_optsize(<2 x double> %a0, <4 x
define <4 x i32> @test_x86_sse2_cvttpd2dq(<2 x double> %a0) {
; SSE-LABEL: test_x86_sse2_cvttpd2dq:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: cvttpd2dq %xmm0, %xmm0 ## encoding: [0x66,0x0f,0xe6,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_cvttpd2dq:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vcvttpd2dq %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe6,0xc0]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_cvttpd2dq:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vcvttpd2dq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe6,0xc0]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.sse2.cvttpd2dq(<2 x double> %a0) ; <<4 x i32>> [#uses=1]
@@ -548,17 +548,17 @@ declare <4 x i32> @llvm.x86.sse2.cvttpd2dq(<2 x double>) nounwind readnone
define <2 x i64> @test_mm_cvttpd_epi32_zext(<2 x double> %a0) nounwind {
; SSE-LABEL: test_mm_cvttpd_epi32_zext:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: cvttpd2dq %xmm0, %xmm0 ## encoding: [0x66,0x0f,0xe6,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_mm_cvttpd_epi32_zext:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vcvttpd2dq %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe6,0xc0]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_mm_cvttpd_epi32_zext:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vcvttpd2dq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe6,0xc0]
; SKX-NEXT: retl ## encoding: [0xc3]
%cvt = call <4 x i32> @llvm.x86.sse2.cvttpd2dq(<2 x double> %a0)
@@ -570,19 +570,19 @@ define <2 x i64> @test_mm_cvttpd_epi32_zext(<2 x double> %a0) nounwind {
define <2 x i64> @test_mm_cvttpd_epi32_zext_load(<2 x double>* %p0) nounwind {
; SSE-LABEL: test_mm_cvttpd_epi32_zext_load:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; SSE-NEXT: cvttpd2dq (%eax), %xmm0 ## encoding: [0x66,0x0f,0xe6,0x00]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_mm_cvttpd_epi32_zext_load:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; AVX2-NEXT: vcvttpd2dqx (%eax), %xmm0 ## encoding: [0xc5,0xf9,0xe6,0x00]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_mm_cvttpd_epi32_zext_load:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; SKX-NEXT: vcvttpd2dqx (%eax), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe6,0x00]
; SKX-NEXT: retl ## encoding: [0xc3]
@@ -596,17 +596,17 @@ define <2 x i64> @test_mm_cvttpd_epi32_zext_load(<2 x double>* %p0) nounwind {
define <4 x i32> @test_x86_sse2_cvttps2dq(<4 x float> %a0) {
; SSE-LABEL: test_x86_sse2_cvttps2dq:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: cvttps2dq %xmm0, %xmm0 ## encoding: [0xf3,0x0f,0x5b,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_cvttps2dq:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vcvttps2dq %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x5b,0xc0]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_cvttps2dq:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vcvttps2dq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x5b,0xc0]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.sse2.cvttps2dq(<4 x float> %a0) ; <<4 x i32>> [#uses=1]
@@ -617,17 +617,17 @@ declare <4 x i32> @llvm.x86.sse2.cvttps2dq(<4 x float>) nounwind readnone
define i32 @test_x86_sse2_cvttsd2si(<2 x double> %a0) {
; SSE-LABEL: test_x86_sse2_cvttsd2si:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: cvttsd2si %xmm0, %eax ## encoding: [0xf2,0x0f,0x2c,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_cvttsd2si:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vcvttsd2si %xmm0, %eax ## encoding: [0xc5,0xfb,0x2c,0xc0]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_cvttsd2si:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vcvttsd2si %xmm0, %eax ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x2c,0xc0]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> %a0) ; <i32> [#uses=1]
@@ -638,17 +638,17 @@ declare i32 @llvm.x86.sse2.cvttsd2si(<2 x double>) nounwind readnone
define <2 x double> @test_x86_sse2_max_pd(<2 x double> %a0, <2 x double> %a1) {
; SSE-LABEL: test_x86_sse2_max_pd:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: maxpd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x5f,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_max_pd:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vmaxpd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x5f,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_max_pd:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vmaxpd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x5f,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <2 x double> @llvm.x86.sse2.max.pd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1]
@@ -659,17 +659,17 @@ declare <2 x double> @llvm.x86.sse2.max.pd(<2 x double>, <2 x double>) nounwind
define <2 x double> @test_x86_sse2_max_sd(<2 x double> %a0, <2 x double> %a1) {
; SSE-LABEL: test_x86_sse2_max_sd:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: maxsd %xmm1, %xmm0 ## encoding: [0xf2,0x0f,0x5f,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_max_sd:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vmaxsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x5f,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_max_sd:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vmaxsd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x5f,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <2 x double> @llvm.x86.sse2.max.sd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1]
@@ -680,17 +680,17 @@ declare <2 x double> @llvm.x86.sse2.max.sd(<2 x double>, <2 x double>) nounwind
define <2 x double> @test_x86_sse2_min_pd(<2 x double> %a0, <2 x double> %a1) {
; SSE-LABEL: test_x86_sse2_min_pd:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: minpd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x5d,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_min_pd:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vminpd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x5d,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_min_pd:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vminpd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x5d,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <2 x double> @llvm.x86.sse2.min.pd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1]
@@ -701,17 +701,17 @@ declare <2 x double> @llvm.x86.sse2.min.pd(<2 x double>, <2 x double>) nounwind
define <2 x double> @test_x86_sse2_min_sd(<2 x double> %a0, <2 x double> %a1) {
; SSE-LABEL: test_x86_sse2_min_sd:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: minsd %xmm1, %xmm0 ## encoding: [0xf2,0x0f,0x5d,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_min_sd:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vminsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x5d,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_min_sd:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vminsd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x5d,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <2 x double> @llvm.x86.sse2.min.sd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1]
@@ -722,12 +722,12 @@ declare <2 x double> @llvm.x86.sse2.min.sd(<2 x double>, <2 x double>) nounwind
define i32 @test_x86_sse2_movmsk_pd(<2 x double> %a0) {
; SSE-LABEL: test_x86_sse2_movmsk_pd:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: movmskpd %xmm0, %eax ## encoding: [0x66,0x0f,0x50,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse2_movmsk_pd:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: vmovmskpd %xmm0, %eax ## encoding: [0xc5,0xf9,0x50,0xc0]
; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call i32 @llvm.x86.sse2.movmsk.pd(<2 x double> %a0) ; <i32> [#uses=1]
@@ -738,17 +738,17 @@ declare i32 @llvm.x86.sse2.movmsk.pd(<2 x double>) nounwind readnone
define <8 x i16> @test_x86_sse2_packssdw_128(<4 x i32> %a0, <4 x i32> %a1) {
; SSE-LABEL: test_x86_sse2_packssdw_128:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: packssdw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x6b,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_packssdw_128:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x6b,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_packssdw_128:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6b,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %a0, <4 x i32> %a1) ; <<8 x i16>> [#uses=1]
@@ -759,21 +759,21 @@ declare <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32>, <4 x i32>) nounwind rea
define <8 x i16> @test_x86_sse2_packssdw_128_fold() {
; SSE-LABEL: test_x86_sse2_packssdw_128_fold:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [0,0,0,0,32767,32767,65535,32768]
; SSE-NEXT: ## encoding: [0x0f,0x28,0x05,A,A,A,A]
; SSE-NEXT: ## fixup A - offset: 3, value: LCPI35_0, kind: FK_Data_4
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_packssdw_128_fold:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vmovaps {{.*#+}} xmm0 = [0,0,0,0,32767,32767,65535,32768]
; AVX2-NEXT: ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
; AVX2-NEXT: ## fixup A - offset: 4, value: LCPI35_0, kind: FK_Data_4
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_packssdw_128_fold:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vmovaps LCPI35_0, %xmm0 ## EVEX TO VEX Compression xmm0 = [0,0,0,0,32767,32767,65535,32768]
; SKX-NEXT: ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
; SKX-NEXT: ## fixup A - offset: 4, value: LCPI35_0, kind: FK_Data_4
@@ -785,17 +785,17 @@ define <8 x i16> @test_x86_sse2_packssdw_128_fold() {
define <16 x i8> @test_x86_sse2_packsswb_128(<8 x i16> %a0, <8 x i16> %a1) {
; SSE-LABEL: test_x86_sse2_packsswb_128:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: packsswb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x63,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_packsswb_128:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x63,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_packsswb_128:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x63,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> %a0, <8 x i16> %a1) ; <<16 x i8>> [#uses=1]
@@ -806,21 +806,21 @@ declare <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16>, <8 x i16>) nounwind rea
define <16 x i8> @test_x86_sse2_packsswb_128_fold() {
; SSE-LABEL: test_x86_sse2_packsswb_128_fold:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
; SSE-NEXT: ## encoding: [0x0f,0x28,0x05,A,A,A,A]
; SSE-NEXT: ## fixup A - offset: 3, value: LCPI37_0, kind: FK_Data_4
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_packsswb_128_fold:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vmovaps {{.*#+}} xmm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
; AVX2-NEXT: ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
; AVX2-NEXT: ## fixup A - offset: 4, value: LCPI37_0, kind: FK_Data_4
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_packsswb_128_fold:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vmovaps LCPI37_0, %xmm0 ## EVEX TO VEX Compression xmm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
; SKX-NEXT: ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
; SKX-NEXT: ## fixup A - offset: 4, value: LCPI37_0, kind: FK_Data_4
@@ -832,17 +832,17 @@ define <16 x i8> @test_x86_sse2_packsswb_128_fold() {
define <16 x i8> @test_x86_sse2_packuswb_128(<8 x i16> %a0, <8 x i16> %a1) {
; SSE-LABEL: test_x86_sse2_packuswb_128:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: packuswb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x67,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_packuswb_128:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x67,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_packuswb_128:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x67,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> %a0, <8 x i16> %a1) ; <<16 x i8>> [#uses=1]
@@ -853,21 +853,21 @@ declare <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16>, <8 x i16>) nounwind rea
define <16 x i8> @test_x86_sse2_packuswb_128_fold() {
; SSE-LABEL: test_x86_sse2_packuswb_128_fold:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
; SSE-NEXT: ## encoding: [0x0f,0x28,0x05,A,A,A,A]
; SSE-NEXT: ## fixup A - offset: 3, value: LCPI39_0, kind: FK_Data_4
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_packuswb_128_fold:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vmovaps {{.*#+}} xmm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
; AVX2-NEXT: ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
; AVX2-NEXT: ## fixup A - offset: 4, value: LCPI39_0, kind: FK_Data_4
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_packuswb_128_fold:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vmovaps LCPI39_0, %xmm0 ## EVEX TO VEX Compression xmm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
; SKX-NEXT: ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
; SKX-NEXT: ## fixup A - offset: 4, value: LCPI39_0, kind: FK_Data_4
@@ -879,17 +879,17 @@ define <16 x i8> @test_x86_sse2_packuswb_128_fold() {
define <16 x i8> @test_x86_sse2_padds_b(<16 x i8> %a0, <16 x i8> %a1) {
; SSE-LABEL: test_x86_sse2_padds_b:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: paddsb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xec,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_padds_b:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpaddsb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xec,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_padds_b:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpaddsb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xec,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
@@ -900,17 +900,17 @@ declare <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8>, <16 x i8>) nounwind readnone
define <8 x i16> @test_x86_sse2_padds_w(<8 x i16> %a0, <8 x i16> %a1) {
; SSE-LABEL: test_x86_sse2_padds_w:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: paddsw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xed,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_padds_w:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpaddsw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xed,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_padds_w:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpaddsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xed,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
@@ -921,17 +921,17 @@ declare <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16>, <8 x i16>) nounwind readnone
define <16 x i8> @test_x86_sse2_paddus_b(<16 x i8> %a0, <16 x i8> %a1) {
; SSE-LABEL: test_x86_sse2_paddus_b:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: paddusb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xdc,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_paddus_b:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xdc,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_paddus_b:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdc,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <16 x i8> @llvm.x86.sse2.paddus.b(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
@@ -942,17 +942,17 @@ declare <16 x i8> @llvm.x86.sse2.paddus.b(<16 x i8>, <16 x i8>) nounwind readnon
define <8 x i16> @test_x86_sse2_paddus_w(<8 x i16> %a0, <8 x i16> %a1) {
; SSE-LABEL: test_x86_sse2_paddus_w:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: paddusw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xdd,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_paddus_w:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpaddusw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xdd,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_paddus_w:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpaddusw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdd,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.sse2.paddus.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
@@ -963,17 +963,17 @@ declare <8 x i16> @llvm.x86.sse2.paddus.w(<8 x i16>, <8 x i16>) nounwind readnon
define <4 x i32> @test_x86_sse2_pmadd_wd(<8 x i16> %a0, <8 x i16> %a1) {
; SSE-LABEL: test_x86_sse2_pmadd_wd:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: pmaddwd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xf5,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_pmadd_wd:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpmaddwd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xf5,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_pmadd_wd:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpmaddwd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf5,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> %a0, <8 x i16> %a1) ; <<4 x i32>> [#uses=1]
@@ -984,17 +984,17 @@ declare <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16>, <8 x i16>) nounwind readnon
define <8 x i16> @test_x86_sse2_pmaxs_w(<8 x i16> %a0, <8 x i16> %a1) {
; SSE-LABEL: test_x86_sse2_pmaxs_w:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: pmaxsw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xee,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_pmaxs_w:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xee,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_pmaxs_w:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xee,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.sse2.pmaxs.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
@@ -1005,17 +1005,17 @@ declare <8 x i16> @llvm.x86.sse2.pmaxs.w(<8 x i16>, <8 x i16>) nounwind readnone
define <16 x i8> @test_x86_sse2_pmaxu_b(<16 x i8> %a0, <16 x i8> %a1) {
; SSE-LABEL: test_x86_sse2_pmaxu_b:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: pmaxub %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xde,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_pmaxu_b:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpmaxub %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xde,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_pmaxu_b:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpmaxub %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xde,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <16 x i8> @llvm.x86.sse2.pmaxu.b(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
@@ -1026,17 +1026,17 @@ declare <16 x i8> @llvm.x86.sse2.pmaxu.b(<16 x i8>, <16 x i8>) nounwind readnone
define <8 x i16> @test_x86_sse2_pmins_w(<8 x i16> %a0, <8 x i16> %a1) {
; SSE-LABEL: test_x86_sse2_pmins_w:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: pminsw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xea,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_pmins_w:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpminsw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xea,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_pmins_w:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpminsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xea,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.sse2.pmins.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
@@ -1047,17 +1047,17 @@ declare <8 x i16> @llvm.x86.sse2.pmins.w(<8 x i16>, <8 x i16>) nounwind readnone
define <16 x i8> @test_x86_sse2_pminu_b(<16 x i8> %a0, <16 x i8> %a1) {
; SSE-LABEL: test_x86_sse2_pminu_b:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: pminub %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xda,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_pminu_b:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpminub %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xda,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_pminu_b:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpminub %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xda,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <16 x i8> @llvm.x86.sse2.pminu.b(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
@@ -1068,12 +1068,12 @@ declare <16 x i8> @llvm.x86.sse2.pminu.b(<16 x i8>, <16 x i8>) nounwind readnone
define i32 @test_x86_sse2_pmovmskb_128(<16 x i8> %a0) {
; SSE-LABEL: test_x86_sse2_pmovmskb_128:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: pmovmskb %xmm0, %eax ## encoding: [0x66,0x0f,0xd7,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse2_pmovmskb_128:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: vpmovmskb %xmm0, %eax ## encoding: [0xc5,0xf9,0xd7,0xc0]
; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8> %a0) ; <i32> [#uses=1]
@@ -1084,17 +1084,17 @@ declare i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8>) nounwind readnone
define <8 x i16> @test_x86_sse2_pmulh_w(<8 x i16> %a0, <8 x i16> %a1) {
; SSE-LABEL: test_x86_sse2_pmulh_w:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: pmulhw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xe5,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_pmulh_w:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpmulhw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe5,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_pmulh_w:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpmulhw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe5,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
@@ -1105,17 +1105,17 @@ declare <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16>, <8 x i16>) nounwind readnone
define <8 x i16> @test_x86_sse2_pmulhu_w(<8 x i16> %a0, <8 x i16> %a1) {
; SSE-LABEL: test_x86_sse2_pmulhu_w:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: pmulhuw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xe4,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_pmulhu_w:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpmulhuw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe4,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_pmulhu_w:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpmulhuw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe4,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
@@ -1126,17 +1126,17 @@ declare <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16>, <8 x i16>) nounwind readnon
define <2 x i64> @test_x86_sse2_pmulu_dq(<4 x i32> %a0, <4 x i32> %a1) {
; SSE-LABEL: test_x86_sse2_pmulu_dq:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: pmuludq %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xf4,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_pmulu_dq:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xf4,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_pmulu_dq:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf4,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32> %a0, <4 x i32> %a1) ; <<2 x i64>> [#uses=1]
@@ -1147,17 +1147,17 @@ declare <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32>, <4 x i32>) nounwind readnon
define <2 x i64> @test_x86_sse2_psad_bw(<16 x i8> %a0, <16 x i8> %a1) {
; SSE-LABEL: test_x86_sse2_psad_bw:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: psadbw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xf6,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_psad_bw:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsadbw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xf6,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_psad_bw:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsadbw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf6,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8> %a0, <16 x i8> %a1) ; <<2 x i64>> [#uses=1]
@@ -1168,17 +1168,17 @@ declare <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8>, <16 x i8>) nounwind readnone
define <4 x i32> @test_x86_sse2_psll_d(<4 x i32> %a0, <4 x i32> %a1) {
; SSE-LABEL: test_x86_sse2_psll_d:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: pslld %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xf2,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_psll_d:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpslld %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xf2,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_psll_d:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpslld %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf2,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
@@ -1189,17 +1189,17 @@ declare <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32>, <4 x i32>) nounwind readnone
define <2 x i64> @test_x86_sse2_psll_q(<2 x i64> %a0, <2 x i64> %a1) {
; SSE-LABEL: test_x86_sse2_psll_q:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: psllq %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xf3,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_psll_q:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsllq %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xf3,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_psll_q:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsllq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf3,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64> %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1]
@@ -1210,17 +1210,17 @@ declare <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64>, <2 x i64>) nounwind readnone
define <8 x i16> @test_x86_sse2_psll_w(<8 x i16> %a0, <8 x i16> %a1) {
; SSE-LABEL: test_x86_sse2_psll_w:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: psllw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xf1,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_psll_w:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsllw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xf1,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_psll_w:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsllw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf1,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
@@ -1231,17 +1231,17 @@ declare <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16>, <8 x i16>) nounwind readnone
define <4 x i32> @test_x86_sse2_pslli_d(<4 x i32> %a0) {
; SSE-LABEL: test_x86_sse2_pslli_d:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: pslld $7, %xmm0 ## encoding: [0x66,0x0f,0x72,0xf0,0x07]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_pslli_d:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpslld $7, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x72,0xf0,0x07]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_pslli_d:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpslld $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x72,0xf0,0x07]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> %a0, i32 7) ; <<4 x i32>> [#uses=1]
@@ -1252,17 +1252,17 @@ declare <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32>, i32) nounwind readnone
define <2 x i64> @test_x86_sse2_pslli_q(<2 x i64> %a0) {
; SSE-LABEL: test_x86_sse2_pslli_q:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: psllq $7, %xmm0 ## encoding: [0x66,0x0f,0x73,0xf0,0x07]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_pslli_q:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsllq $7, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x73,0xf0,0x07]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_pslli_q:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsllq $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x73,0xf0,0x07]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64> %a0, i32 7) ; <<2 x i64>> [#uses=1]
@@ -1273,17 +1273,17 @@ declare <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64>, i32) nounwind readnone
define <8 x i16> @test_x86_sse2_pslli_w(<8 x i16> %a0) {
; SSE-LABEL: test_x86_sse2_pslli_w:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: psllw $7, %xmm0 ## encoding: [0x66,0x0f,0x71,0xf0,0x07]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_pslli_w:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsllw $7, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x71,0xf0,0x07]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_pslli_w:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsllw $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x71,0xf0,0x07]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16> %a0, i32 7) ; <<8 x i16>> [#uses=1]
@@ -1294,17 +1294,17 @@ declare <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16>, i32) nounwind readnone
define <4 x i32> @test_x86_sse2_psra_d(<4 x i32> %a0, <4 x i32> %a1) {
; SSE-LABEL: test_x86_sse2_psra_d:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: psrad %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xe2,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_psra_d:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsrad %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe2,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_psra_d:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsrad %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe2,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
@@ -1315,17 +1315,17 @@ declare <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32>, <4 x i32>) nounwind readnone
define <8 x i16> @test_x86_sse2_psra_w(<8 x i16> %a0, <8 x i16> %a1) {
; SSE-LABEL: test_x86_sse2_psra_w:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: psraw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xe1,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_psra_w:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsraw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe1,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_psra_w:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsraw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe1,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
@@ -1336,17 +1336,17 @@ declare <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16>, <8 x i16>) nounwind readnone
define <4 x i32> @test_x86_sse2_psrai_d(<4 x i32> %a0) {
; SSE-LABEL: test_x86_sse2_psrai_d:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: psrad $7, %xmm0 ## encoding: [0x66,0x0f,0x72,0xe0,0x07]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_psrai_d:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsrad $7, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x72,0xe0,0x07]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_psrai_d:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsrad $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x72,0xe0,0x07]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32> %a0, i32 7) ; <<4 x i32>> [#uses=1]
@@ -1357,17 +1357,17 @@ declare <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32>, i32) nounwind readnone
define <8 x i16> @test_x86_sse2_psrai_w(<8 x i16> %a0) {
; SSE-LABEL: test_x86_sse2_psrai_w:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: psraw $7, %xmm0 ## encoding: [0x66,0x0f,0x71,0xe0,0x07]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_psrai_w:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsraw $7, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x71,0xe0,0x07]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_psrai_w:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsraw $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x71,0xe0,0x07]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16> %a0, i32 7) ; <<8 x i16>> [#uses=1]
@@ -1378,17 +1378,17 @@ declare <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16>, i32) nounwind readnone
define <4 x i32> @test_x86_sse2_psrl_d(<4 x i32> %a0, <4 x i32> %a1) {
; SSE-LABEL: test_x86_sse2_psrl_d:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: psrld %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xd2,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_psrl_d:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsrld %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd2,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_psrl_d:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsrld %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd2,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
@@ -1399,17 +1399,17 @@ declare <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32>, <4 x i32>) nounwind readnone
define <2 x i64> @test_x86_sse2_psrl_q(<2 x i64> %a0, <2 x i64> %a1) {
; SSE-LABEL: test_x86_sse2_psrl_q:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: psrlq %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xd3,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_psrl_q:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsrlq %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd3,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_psrl_q:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsrlq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd3,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64> %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1]
@@ -1420,17 +1420,17 @@ declare <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64>, <2 x i64>) nounwind readnone
define <8 x i16> @test_x86_sse2_psrl_w(<8 x i16> %a0, <8 x i16> %a1) {
; SSE-LABEL: test_x86_sse2_psrl_w:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: psrlw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xd1,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_psrl_w:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsrlw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd1,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_psrl_w:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsrlw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd1,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
@@ -1441,17 +1441,17 @@ declare <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16>, <8 x i16>) nounwind readnone
define <4 x i32> @test_x86_sse2_psrli_d(<4 x i32> %a0) {
; SSE-LABEL: test_x86_sse2_psrli_d:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: psrld $7, %xmm0 ## encoding: [0x66,0x0f,0x72,0xd0,0x07]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_psrli_d:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsrld $7, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x72,0xd0,0x07]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_psrli_d:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsrld $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x72,0xd0,0x07]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32> %a0, i32 7) ; <<4 x i32>> [#uses=1]
@@ -1462,17 +1462,17 @@ declare <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32>, i32) nounwind readnone
define <2 x i64> @test_x86_sse2_psrli_q(<2 x i64> %a0) {
; SSE-LABEL: test_x86_sse2_psrli_q:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: psrlq $7, %xmm0 ## encoding: [0x66,0x0f,0x73,0xd0,0x07]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_psrli_q:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsrlq $7, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x73,0xd0,0x07]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_psrli_q:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsrlq $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x73,0xd0,0x07]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64> %a0, i32 7) ; <<2 x i64>> [#uses=1]
@@ -1483,17 +1483,17 @@ declare <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64>, i32) nounwind readnone
define <8 x i16> @test_x86_sse2_psrli_w(<8 x i16> %a0) {
; SSE-LABEL: test_x86_sse2_psrli_w:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: psrlw $7, %xmm0 ## encoding: [0x66,0x0f,0x71,0xd0,0x07]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_psrli_w:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsrlw $7, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x71,0xd0,0x07]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_psrli_w:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsrlw $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x71,0xd0,0x07]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16> %a0, i32 7) ; <<8 x i16>> [#uses=1]
@@ -1504,17 +1504,17 @@ declare <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16>, i32) nounwind readnone
define <16 x i8> @test_x86_sse2_psubs_b(<16 x i8> %a0, <16 x i8> %a1) {
; SSE-LABEL: test_x86_sse2_psubs_b:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: psubsb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xe8,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_psubs_b:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsubsb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe8,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_psubs_b:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsubsb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe8,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
@@ -1525,17 +1525,17 @@ declare <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8>, <16 x i8>) nounwind readnone
define <8 x i16> @test_x86_sse2_psubs_w(<8 x i16> %a0, <8 x i16> %a1) {
; SSE-LABEL: test_x86_sse2_psubs_w:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: psubsw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xe9,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_psubs_w:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsubsw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe9,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_psubs_w:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsubsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe9,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
@@ -1546,17 +1546,17 @@ declare <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16>, <8 x i16>) nounwind readnone
define <16 x i8> @test_x86_sse2_psubus_b(<16 x i8> %a0, <16 x i8> %a1) {
; SSE-LABEL: test_x86_sse2_psubus_b:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: psubusb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xd8,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_psubus_b:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd8,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_psubus_b:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd8,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <16 x i8> @llvm.x86.sse2.psubus.b(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
@@ -1567,17 +1567,17 @@ declare <16 x i8> @llvm.x86.sse2.psubus.b(<16 x i8>, <16 x i8>) nounwind readnon
define <8 x i16> @test_x86_sse2_psubus_w(<8 x i16> %a0, <8 x i16> %a1) {
; SSE-LABEL: test_x86_sse2_psubus_w:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: psubusw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xd9,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_psubus_w:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsubusw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd9,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_psubus_w:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsubusw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd9,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.sse2.psubus.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
@@ -1588,17 +1588,17 @@ declare <8 x i16> @llvm.x86.sse2.psubus.w(<8 x i16>, <8 x i16>) nounwind readnon
define <2 x double> @test_x86_sse2_sqrt_pd(<2 x double> %a0) {
; SSE-LABEL: test_x86_sse2_sqrt_pd:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: sqrtpd %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x51,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_sqrt_pd:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vsqrtpd %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x51,0xc0]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_sqrt_pd:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vsqrtpd %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x51,0xc0]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <2 x double> @llvm.x86.sse2.sqrt.pd(<2 x double> %a0) ; <<2 x double>> [#uses=1]
@@ -1609,17 +1609,17 @@ declare <2 x double> @llvm.x86.sse2.sqrt.pd(<2 x double>) nounwind readnone
define <2 x double> @test_x86_sse2_sqrt_sd(<2 x double> %a0) {
; SSE-LABEL: test_x86_sse2_sqrt_sd:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: sqrtsd %xmm0, %xmm0 ## encoding: [0xf2,0x0f,0x51,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_sqrt_sd:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x51,0xc0]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_sqrt_sd:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x51,0xc0]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double> %a0) ; <<2 x double>> [#uses=1]
@@ -1630,21 +1630,21 @@ declare <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double>) nounwind readnone
define <2 x double> @test_x86_sse2_sqrt_sd_vec_load(<2 x double>* %a0) {
; SSE-LABEL: test_x86_sse2_sqrt_sd_vec_load:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; SSE-NEXT: movapd (%eax), %xmm0 ## encoding: [0x66,0x0f,0x28,0x00]
; SSE-NEXT: sqrtsd %xmm0, %xmm0 ## encoding: [0xf2,0x0f,0x51,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_sqrt_sd_vec_load:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; AVX2-NEXT: vmovapd (%eax), %xmm0 ## encoding: [0xc5,0xf9,0x28,0x00]
; AVX2-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x51,0xc0]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_sqrt_sd_vec_load:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; SKX-NEXT: vmovapd (%eax), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0x00]
; SKX-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x51,0xc0]
@@ -1657,7 +1657,7 @@ define <2 x double> @test_x86_sse2_sqrt_sd_vec_load(<2 x double>* %a0) {
define i32 @test_x86_sse2_ucomieq_sd(<2 x double> %a0, <2 x double> %a1) {
; SSE-LABEL: test_x86_sse2_ucomieq_sd:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: ucomisd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x2e,0xc1]
; SSE-NEXT: setnp %al ## encoding: [0x0f,0x9b,0xc0]
; SSE-NEXT: sete %cl ## encoding: [0x0f,0x94,0xc1]
@@ -1666,7 +1666,7 @@ define i32 @test_x86_sse2_ucomieq_sd(<2 x double> %a0, <2 x double> %a1) {
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_ucomieq_sd:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vucomisd %xmm1, %xmm0 ## encoding: [0xc5,0xf9,0x2e,0xc1]
; AVX2-NEXT: setnp %al ## encoding: [0x0f,0x9b,0xc0]
; AVX2-NEXT: sete %cl ## encoding: [0x0f,0x94,0xc1]
@@ -1675,7 +1675,7 @@ define i32 @test_x86_sse2_ucomieq_sd(<2 x double> %a0, <2 x double> %a1) {
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_ucomieq_sd:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vucomisd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2e,0xc1]
; SKX-NEXT: setnp %al ## encoding: [0x0f,0x9b,0xc0]
; SKX-NEXT: sete %cl ## encoding: [0x0f,0x94,0xc1]
@@ -1690,21 +1690,21 @@ declare i32 @llvm.x86.sse2.ucomieq.sd(<2 x double>, <2 x double>) nounwind readn
define i32 @test_x86_sse2_ucomige_sd(<2 x double> %a0, <2 x double> %a1) {
; SSE-LABEL: test_x86_sse2_ucomige_sd:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; SSE-NEXT: ucomisd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x2e,0xc1]
; SSE-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_ucomige_sd:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; AVX2-NEXT: vucomisd %xmm1, %xmm0 ## encoding: [0xc5,0xf9,0x2e,0xc1]
; AVX2-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_ucomige_sd:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; SKX-NEXT: vucomisd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2e,0xc1]
; SKX-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
@@ -1717,21 +1717,21 @@ declare i32 @llvm.x86.sse2.ucomige.sd(<2 x double>, <2 x double>) nounwind readn
define i32 @test_x86_sse2_ucomigt_sd(<2 x double> %a0, <2 x double> %a1) {
; SSE-LABEL: test_x86_sse2_ucomigt_sd:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; SSE-NEXT: ucomisd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x2e,0xc1]
; SSE-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_ucomigt_sd:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; AVX2-NEXT: vucomisd %xmm1, %xmm0 ## encoding: [0xc5,0xf9,0x2e,0xc1]
; AVX2-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_ucomigt_sd:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; SKX-NEXT: vucomisd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2e,0xc1]
; SKX-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
@@ -1744,21 +1744,21 @@ declare i32 @llvm.x86.sse2.ucomigt.sd(<2 x double>, <2 x double>) nounwind readn
define i32 @test_x86_sse2_ucomile_sd(<2 x double> %a0, <2 x double> %a1) {
; SSE-LABEL: test_x86_sse2_ucomile_sd:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; SSE-NEXT: ucomisd %xmm0, %xmm1 ## encoding: [0x66,0x0f,0x2e,0xc8]
; SSE-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_ucomile_sd:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; AVX2-NEXT: vucomisd %xmm0, %xmm1 ## encoding: [0xc5,0xf9,0x2e,0xc8]
; AVX2-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_ucomile_sd:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; SKX-NEXT: vucomisd %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2e,0xc8]
; SKX-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
@@ -1771,21 +1771,21 @@ declare i32 @llvm.x86.sse2.ucomile.sd(<2 x double>, <2 x double>) nounwind readn
define i32 @test_x86_sse2_ucomilt_sd(<2 x double> %a0, <2 x double> %a1) {
; SSE-LABEL: test_x86_sse2_ucomilt_sd:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; SSE-NEXT: ucomisd %xmm0, %xmm1 ## encoding: [0x66,0x0f,0x2e,0xc8]
; SSE-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_ucomilt_sd:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; AVX2-NEXT: vucomisd %xmm0, %xmm1 ## encoding: [0xc5,0xf9,0x2e,0xc8]
; AVX2-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_ucomilt_sd:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; SKX-NEXT: vucomisd %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2e,0xc8]
; SKX-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
@@ -1798,7 +1798,7 @@ declare i32 @llvm.x86.sse2.ucomilt.sd(<2 x double>, <2 x double>) nounwind readn
define i32 @test_x86_sse2_ucomineq_sd(<2 x double> %a0, <2 x double> %a1) {
; SSE-LABEL: test_x86_sse2_ucomineq_sd:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: ucomisd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x2e,0xc1]
; SSE-NEXT: setp %al ## encoding: [0x0f,0x9a,0xc0]
; SSE-NEXT: setne %cl ## encoding: [0x0f,0x95,0xc1]
@@ -1807,7 +1807,7 @@ define i32 @test_x86_sse2_ucomineq_sd(<2 x double> %a0, <2 x double> %a1) {
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_ucomineq_sd:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vucomisd %xmm1, %xmm0 ## encoding: [0xc5,0xf9,0x2e,0xc1]
; AVX2-NEXT: setp %al ## encoding: [0x0f,0x9a,0xc0]
; AVX2-NEXT: setne %cl ## encoding: [0x0f,0x95,0xc1]
@@ -1816,7 +1816,7 @@ define i32 @test_x86_sse2_ucomineq_sd(<2 x double> %a0, <2 x double> %a1) {
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_ucomineq_sd:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vucomisd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2e,0xc1]
; SKX-NEXT: setp %al ## encoding: [0x0f,0x9a,0xc0]
; SKX-NEXT: setne %cl ## encoding: [0x0f,0x95,0xc1]
@@ -1830,7 +1830,7 @@ declare i32 @llvm.x86.sse2.ucomineq.sd(<2 x double>, <2 x double>) nounwind read
define void @test_x86_sse2_pause() {
; CHECK-LABEL: test_x86_sse2_pause:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pause ## encoding: [0xf3,0x90]
; CHECK-NEXT: retl ## encoding: [0xc3]
tail call void @llvm.x86.sse2.pause()
@@ -1840,7 +1840,7 @@ declare void @llvm.x86.sse2.pause() nounwind
define void @lfence() nounwind {
; CHECK-LABEL: lfence:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: lfence ## encoding: [0x0f,0xae,0xe8]
; CHECK-NEXT: retl ## encoding: [0xc3]
tail call void @llvm.x86.sse2.lfence()
@@ -1850,7 +1850,7 @@ declare void @llvm.x86.sse2.lfence() nounwind
define void @mfence() nounwind {
; CHECK-LABEL: mfence:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: mfence ## encoding: [0x0f,0xae,0xf0]
; CHECK-NEXT: retl ## encoding: [0xc3]
tail call void @llvm.x86.sse2.mfence()
@@ -1860,7 +1860,7 @@ declare void @llvm.x86.sse2.mfence() nounwind
define void @clflush(i8* %p) nounwind {
; CHECK-LABEL: clflush:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; CHECK-NEXT: clflush (%eax) ## encoding: [0x0f,0xae,0x38]
; CHECK-NEXT: retl ## encoding: [0xc3]
diff --git a/test/CodeGen/X86/sse2-intrinsics-x86_64.ll b/test/CodeGen/X86/sse2-intrinsics-x86_64.ll
index cd5e11e1279..41b4b2905dc 100644
--- a/test/CodeGen/X86/sse2-intrinsics-x86_64.ll
+++ b/test/CodeGen/X86/sse2-intrinsics-x86_64.ll
@@ -5,21 +5,21 @@
define i64 @test_x86_sse2_cvtsd2si64(<2 x double> %a0) {
; CHECK-LABEL: test_x86_sse2_cvtsd2si64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvtsd2si %xmm0, %rax
; CHECK-NEXT: retq
; SSE-LABEL: test_x86_sse2_cvtsd2si64:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: cvtsd2si %xmm0, %rax ## encoding: [0xf2,0x48,0x0f,0x2d,0xc0]
; SSE-NEXT: retq ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_cvtsd2si64:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vcvtsd2si %xmm0, %rax ## encoding: [0xc4,0xe1,0xfb,0x2d,0xc0]
; AVX2-NEXT: retq ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_cvtsd2si64:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vcvtsd2si %xmm0, %rax ## EVEX TO VEX Compression encoding: [0xc4,0xe1,0xfb,0x2d,0xc0]
; SKX-NEXT: retq ## encoding: [0xc3]
%res = call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> %a0) ; <i64> [#uses=1]
@@ -30,21 +30,21 @@ declare i64 @llvm.x86.sse2.cvtsd2si64(<2 x double>) nounwind readnone
define <2 x double> @test_x86_sse2_cvtsi642sd(<2 x double> %a0, i64 %a1) {
; CHECK-LABEL: test_x86_sse2_cvtsi642sd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvtsi2sdq %rdi, %xmm0, %xmm0
; CHECK-NEXT: retq
; SSE-LABEL: test_x86_sse2_cvtsi642sd:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: cvtsi2sdq %rdi, %xmm0 ## encoding: [0xf2,0x48,0x0f,0x2a,0xc7]
; SSE-NEXT: retq ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_cvtsi642sd:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vcvtsi2sdq %rdi, %xmm0, %xmm0 ## encoding: [0xc4,0xe1,0xfb,0x2a,0xc7]
; AVX2-NEXT: retq ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_cvtsi642sd:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vcvtsi2sdq %rdi, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe1,0xfb,0x2a,0xc7]
; SKX-NEXT: retq ## encoding: [0xc3]
%res = call <2 x double> @llvm.x86.sse2.cvtsi642sd(<2 x double> %a0, i64 %a1) ; <<2 x double>> [#uses=1]
@@ -55,21 +55,21 @@ declare <2 x double> @llvm.x86.sse2.cvtsi642sd(<2 x double>, i64) nounwind readn
define i64 @test_x86_sse2_cvttsd2si64(<2 x double> %a0) {
; CHECK-LABEL: test_x86_sse2_cvttsd2si64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvttsd2si %xmm0, %rax
; CHECK-NEXT: retq
; SSE-LABEL: test_x86_sse2_cvttsd2si64:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: cvttsd2si %xmm0, %rax ## encoding: [0xf2,0x48,0x0f,0x2c,0xc0]
; SSE-NEXT: retq ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_cvttsd2si64:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vcvttsd2si %xmm0, %rax ## encoding: [0xc4,0xe1,0xfb,0x2c,0xc0]
; AVX2-NEXT: retq ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_cvttsd2si64:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vcvttsd2si %xmm0, %rax ## EVEX TO VEX Compression encoding: [0xc4,0xe1,0xfb,0x2c,0xc0]
; SKX-NEXT: retq ## encoding: [0xc3]
%res = call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> %a0) ; <i64> [#uses=1]
diff --git a/test/CodeGen/X86/sse2-schedule.ll b/test/CodeGen/X86/sse2-schedule.ll
index b7a4d7be2cd..247317d7161 100644
--- a/test/CodeGen/X86/sse2-schedule.ll
+++ b/test/CodeGen/X86/sse2-schedule.ll
@@ -13,61 +13,61 @@
define <2 x double> @test_addpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; GENERIC-LABEL: test_addpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: addpd %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: addpd (%rdi), %xmm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_addpd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: addpd %xmm1, %xmm0 # sched: [6:3.00]
; ATOM-NEXT: addpd (%rdi), %xmm0 # sched: [7:3.50]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_addpd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: addpd %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: addpd (%rdi), %xmm0 # sched: [6:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_addpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vaddpd (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_addpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: vaddpd (%rdi), %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_addpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: vaddpd (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_addpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vaddpd (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_addpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vaddpd (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_addpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vaddpd (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_addpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vaddpd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -79,61 +79,61 @@ define <2 x double> @test_addpd(<2 x double> %a0, <2 x double> %a1, <2 x double>
define double @test_addsd(double %a0, double %a1, double *%a2) {
; GENERIC-LABEL: test_addsd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: addsd %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: addsd (%rdi), %xmm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_addsd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: addsd %xmm1, %xmm0 # sched: [5:5.00]
; ATOM-NEXT: addsd (%rdi), %xmm0 # sched: [5:5.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_addsd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: addsd %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: addsd (%rdi), %xmm0 # sched: [6:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_addsd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vaddsd (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_addsd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: vaddsd (%rdi), %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_addsd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: vaddsd (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_addsd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vaddsd (%rdi), %xmm0, %xmm0 # sched: [9:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_addsd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vaddsd (%rdi), %xmm0, %xmm0 # sched: [9:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_addsd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vaddsd (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_addsd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vaddsd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -145,70 +145,70 @@ define double @test_addsd(double %a0, double %a1, double *%a2) {
define <2 x double> @test_andpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; GENERIC-LABEL: test_andpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: andpd %xmm1, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: andpd (%rdi), %xmm0 # sched: [7:1.00]
; GENERIC-NEXT: addpd %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_andpd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: andpd %xmm1, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: andpd (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: addpd %xmm1, %xmm0 # sched: [6:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_andpd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: andpd %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: andpd (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: addpd %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_andpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vandpd %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; SANDY-NEXT: vandpd (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; SANDY-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_andpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vandpd %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: vandpd (%rdi), %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_andpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vandpd %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: vandpd (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BROADWELL-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_andpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vandpd %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vandpd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_andpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vandpd %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: vandpd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_andpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vandpd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vandpd (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_andpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vandpd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vandpd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
@@ -226,70 +226,70 @@ define <2 x double> @test_andpd(<2 x double> %a0, <2 x double> %a1, <2 x double>
define <2 x double> @test_andnotpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; GENERIC-LABEL: test_andnotpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: andnpd %xmm1, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: andnpd (%rdi), %xmm0 # sched: [7:1.00]
; GENERIC-NEXT: addpd %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_andnotpd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: andnpd %xmm1, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: andnpd (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: addpd %xmm1, %xmm0 # sched: [6:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_andnotpd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: andnpd %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: andnpd (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: addpd %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_andnotpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vandnpd %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; SANDY-NEXT: vandnpd (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; SANDY-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_andnotpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vandnpd %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: vandnpd (%rdi), %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_andnotpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vandnpd %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: vandnpd (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BROADWELL-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_andnotpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vandnpd %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vandnpd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_andnotpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vandnpd %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: vandnpd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_andnotpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vandnpd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vandnpd (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_andnotpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vandnpd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vandnpd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
@@ -309,12 +309,12 @@ define <2 x double> @test_andnotpd(<2 x double> %a0, <2 x double> %a1, <2 x doub
define void @test_clflush(i8* %p){
; GENERIC-LABEL: test_clflush:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: clflush (%rdi) # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_clflush:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: clflush (%rdi) # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -325,42 +325,42 @@ define void @test_clflush(i8* %p){
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_clflush:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: clflush (%rdi) # sched: [3:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_clflush:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: clflush (%rdi) # sched: [5:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_clflush:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: clflush (%rdi) # sched: [2:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_clflush:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: clflush (%rdi) # sched: [2:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_clflush:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: clflush (%rdi) # sched: [2:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_clflush:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: clflush (%rdi) # sched: [2:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_clflush:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: clflush (%rdi) # sched: [5:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_clflush:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: clflush (%rdi) # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
tail call void @llvm.x86.sse2.clflush(i8* %p)
@@ -370,56 +370,56 @@ declare void @llvm.x86.sse2.clflush(i8*) nounwind
define <2 x double> @test_cmppd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; GENERIC-LABEL: test_cmppd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: cmpeqpd %xmm0, %xmm1 # sched: [3:1.00]
; GENERIC-NEXT: cmpeqpd (%rdi), %xmm0 # sched: [9:1.00]
; GENERIC-NEXT: orpd %xmm1, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_cmppd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: cmpeqpd %xmm0, %xmm1 # sched: [6:3.00]
; ATOM-NEXT: cmpeqpd (%rdi), %xmm0 # sched: [7:3.50]
; ATOM-NEXT: orpd %xmm1, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_cmppd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: cmpeqpd %xmm0, %xmm1 # sched: [3:1.00]
; SLM-NEXT: cmpeqpd (%rdi), %xmm0 # sched: [6:1.00]
; SLM-NEXT: orpd %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_cmppd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vcmpeqpd %xmm1, %xmm0, %xmm1 # sched: [3:1.00]
; SANDY-NEXT: vcmpeqpd (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; SANDY-NEXT: vorpd %xmm0, %xmm1, %xmm0 # sched: [1:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cmppd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vcmpeqpd %xmm1, %xmm0, %xmm1 # sched: [3:1.00]
; HASWELL-NEXT: vcmpeqpd (%rdi), %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: vorpd %xmm0, %xmm1, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cmppd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vcmpeqpd %xmm1, %xmm0, %xmm1 # sched: [3:1.00]
; BROADWELL-NEXT: vcmpeqpd (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BROADWELL-NEXT: vorpd %xmm0, %xmm1, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cmppd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vcmpeqpd %xmm1, %xmm0, %xmm1 # sched: [4:0.33]
; SKYLAKE-NEXT: vcmpeqpd (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; SKYLAKE-NEXT: vorpd %xmm0, %xmm1, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_cmppd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcmpeqpd %xmm1, %xmm0, %k0 # sched: [3:1.00]
; SKX-NEXT: vcmpeqpd (%rdi), %xmm0, %k1 # sched: [9:1.00]
; SKX-NEXT: korw %k1, %k0, %k0 # sched: [1:1.00]
@@ -427,14 +427,14 @@ define <2 x double> @test_cmppd(<2 x double> %a0, <2 x double> %a1, <2 x double>
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cmppd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vcmpeqpd %xmm1, %xmm0, %xmm1 # sched: [3:1.00]
; BTVER2-NEXT: vcmpeqpd (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: vorpd %xmm0, %xmm1, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cmppd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vcmpeqpd %xmm1, %xmm0, %xmm1 # sched: [3:1.00]
; ZNVER1-NEXT: vcmpeqpd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
; ZNVER1-NEXT: vorpd %xmm0, %xmm1, %xmm0 # sched: [1:0.25]
@@ -450,61 +450,61 @@ define <2 x double> @test_cmppd(<2 x double> %a0, <2 x double> %a1, <2 x double>
define double @test_cmpsd(double %a0, double %a1, double *%a2) {
; GENERIC-LABEL: test_cmpsd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: cmpeqsd %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: cmpeqsd (%rdi), %xmm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_cmpsd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: cmpeqsd %xmm1, %xmm0 # sched: [5:5.00]
; ATOM-NEXT: cmpeqsd (%rdi), %xmm0 # sched: [5:5.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_cmpsd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: cmpeqsd %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: cmpeqsd (%rdi), %xmm0 # sched: [6:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_cmpsd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vcmpeqsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vcmpeqsd (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cmpsd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vcmpeqsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: vcmpeqsd (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cmpsd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vcmpeqsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: vcmpeqsd (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cmpsd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vcmpeqsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SKYLAKE-NEXT: vcmpeqsd (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_cmpsd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcmpeqsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SKX-NEXT: vcmpeqsd (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cmpsd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vcmpeqsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vcmpeqsd (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cmpsd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vcmpeqsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vcmpeqsd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -521,7 +521,7 @@ declare <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double>, <2 x double>, i8) nounw
define i32 @test_comisd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; GENERIC-LABEL: test_comisd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: comisd %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: setnp %al # sched: [1:0.50]
; GENERIC-NEXT: sete %cl # sched: [1:0.50]
@@ -535,7 +535,7 @@ define i32 @test_comisd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_comisd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: comisd %xmm1, %xmm0 # sched: [9:4.50]
; ATOM-NEXT: setnp %al # sched: [1:0.50]
; ATOM-NEXT: sete %cl # sched: [1:0.50]
@@ -549,7 +549,7 @@ define i32 @test_comisd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_comisd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: comisd %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: setnp %al # sched: [1:0.50]
; SLM-NEXT: sete %cl # sched: [1:0.50]
@@ -563,7 +563,7 @@ define i32 @test_comisd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_comisd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vcomisd %xmm1, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: setnp %al # sched: [1:0.50]
; SANDY-NEXT: sete %cl # sched: [1:0.50]
@@ -577,7 +577,7 @@ define i32 @test_comisd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_comisd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vcomisd %xmm1, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: setnp %al # sched: [1:0.50]
; HASWELL-NEXT: sete %cl # sched: [1:0.50]
@@ -591,7 +591,7 @@ define i32 @test_comisd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_comisd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vcomisd %xmm1, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: setnp %al # sched: [1:0.50]
; BROADWELL-NEXT: sete %cl # sched: [1:0.50]
@@ -605,7 +605,7 @@ define i32 @test_comisd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_comisd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vcomisd %xmm1, %xmm0 # sched: [3:1.00]
; SKYLAKE-NEXT: setnp %al # sched: [1:0.50]
; SKYLAKE-NEXT: sete %cl # sched: [1:0.50]
@@ -619,7 +619,7 @@ define i32 @test_comisd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_comisd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcomisd %xmm1, %xmm0 # sched: [3:1.00]
; SKX-NEXT: setnp %al # sched: [1:0.50]
; SKX-NEXT: sete %cl # sched: [1:0.50]
@@ -633,7 +633,7 @@ define i32 @test_comisd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_comisd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vcomisd %xmm1, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: setnp %al # sched: [1:0.50]
; BTVER2-NEXT: sete %cl # sched: [1:0.50]
@@ -647,7 +647,7 @@ define i32 @test_comisd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_comisd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vcomisd %xmm1, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: setnp %al # sched: [1:0.25]
; ZNVER1-NEXT: sete %cl # sched: [1:0.25]
@@ -669,70 +669,70 @@ declare i32 @llvm.x86.sse2.comieq.sd(<2 x double>, <2 x double>) nounwind readno
define <2 x double> @test_cvtdq2pd(<4 x i32> %a0, <4 x i32> *%a1) {
; GENERIC-LABEL: test_cvtdq2pd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: cvtdq2pd %xmm0, %xmm1 # sched: [4:1.00]
; GENERIC-NEXT: cvtdq2pd (%rdi), %xmm0 # sched: [10:1.00]
; GENERIC-NEXT: addpd %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_cvtdq2pd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: cvtdq2pd %xmm0, %xmm1 # sched: [8:4.00]
; ATOM-NEXT: cvtdq2pd (%rdi), %xmm0 # sched: [7:3.50]
; ATOM-NEXT: addpd %xmm1, %xmm0 # sched: [6:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_cvtdq2pd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: cvtdq2pd %xmm0, %xmm1 # sched: [4:0.50]
; SLM-NEXT: cvtdq2pd (%rdi), %xmm0 # sched: [7:1.00]
; SLM-NEXT: addpd %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_cvtdq2pd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vcvtdq2pd %xmm0, %xmm0 # sched: [4:1.00]
; SANDY-NEXT: vcvtdq2pd (%rdi), %xmm1 # sched: [10:1.00]
; SANDY-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtdq2pd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vcvtdq2pd %xmm0, %xmm0 # sched: [4:1.00]
; HASWELL-NEXT: vcvtdq2pd (%rdi), %xmm1 # sched: [4:1.00]
; HASWELL-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cvtdq2pd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vcvtdq2pd %xmm0, %xmm0 # sched: [4:1.00]
; BROADWELL-NEXT: vcvtdq2pd (%rdi), %xmm1 # sched: [9:1.00]
; BROADWELL-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cvtdq2pd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vcvtdq2pd %xmm0, %xmm0 # sched: [5:1.00]
; SKYLAKE-NEXT: vcvtdq2pd (%rdi), %xmm1 # sched: [11:1.00]
; SKYLAKE-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_cvtdq2pd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtdq2pd %xmm0, %xmm0 # sched: [5:1.00]
; SKX-NEXT: vcvtdq2pd (%rdi), %xmm1 # sched: [11:1.00]
; SKX-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cvtdq2pd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vcvtdq2pd (%rdi), %xmm1 # sched: [8:1.00]
; BTVER2-NEXT: vcvtdq2pd %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cvtdq2pd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vcvtdq2pd (%rdi), %xmm1 # sched: [12:1.00]
; ZNVER1-NEXT: vcvtdq2pd %xmm0, %xmm0 # sched: [5:1.00]
; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
@@ -748,14 +748,14 @@ define <2 x double> @test_cvtdq2pd(<4 x i32> %a0, <4 x i32> *%a1) {
define <4 x float> @test_cvtdq2ps(<4 x i32> %a0, <4 x i32> *%a1) {
; GENERIC-LABEL: test_cvtdq2ps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: cvtdq2ps %xmm0, %xmm1 # sched: [3:1.00]
; GENERIC-NEXT: cvtdq2ps (%rdi), %xmm0 # sched: [9:1.00]
; GENERIC-NEXT: addps %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_cvtdq2ps:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: cvtdq2ps (%rdi), %xmm1 # sched: [7:3.50]
; ATOM-NEXT: cvtdq2ps %xmm0, %xmm0 # sched: [6:3.00]
; ATOM-NEXT: addps %xmm0, %xmm1 # sched: [5:5.00]
@@ -763,56 +763,56 @@ define <4 x float> @test_cvtdq2ps(<4 x i32> %a0, <4 x i32> *%a1) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_cvtdq2ps:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: cvtdq2ps %xmm0, %xmm1 # sched: [4:0.50]
; SLM-NEXT: cvtdq2ps (%rdi), %xmm0 # sched: [7:1.00]
; SLM-NEXT: addps %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_cvtdq2ps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vcvtdq2ps %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vcvtdq2ps (%rdi), %xmm1 # sched: [9:1.00]
; SANDY-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtdq2ps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vcvtdq2ps %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: vcvtdq2ps (%rdi), %xmm1 # sched: [3:1.00]
; HASWELL-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cvtdq2ps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vcvtdq2ps %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: vcvtdq2ps (%rdi), %xmm1 # sched: [8:1.00]
; BROADWELL-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cvtdq2ps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vcvtdq2ps %xmm0, %xmm0 # sched: [4:0.33]
; SKYLAKE-NEXT: vcvtdq2ps (%rdi), %xmm1 # sched: [10:0.50]
; SKYLAKE-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_cvtdq2ps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtdq2ps %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vcvtdq2ps (%rdi), %xmm1 # sched: [10:0.50]
; SKX-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cvtdq2ps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vcvtdq2ps (%rdi), %xmm1 # sched: [8:1.00]
; BTVER2-NEXT: vcvtdq2ps %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cvtdq2ps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vcvtdq2ps (%rdi), %xmm1 # sched: [12:1.00]
; ZNVER1-NEXT: vcvtdq2ps %xmm0, %xmm0 # sched: [5:1.00]
; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
@@ -826,14 +826,14 @@ define <4 x float> @test_cvtdq2ps(<4 x i32> %a0, <4 x i32> *%a1) {
define <4 x i32> @test_cvtpd2dq(<2 x double> %a0, <2 x double> *%a1) {
; GENERIC-LABEL: test_cvtpd2dq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: cvtpd2dq %xmm0, %xmm1 # sched: [4:1.00]
; GENERIC-NEXT: cvtpd2dq (%rdi), %xmm0 # sched: [10:1.00]
; GENERIC-NEXT: paddd %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_cvtpd2dq:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: cvtpd2dq (%rdi), %xmm1 # sched: [8:4.00]
; ATOM-NEXT: cvtpd2dq %xmm0, %xmm0 # sched: [7:3.50]
; ATOM-NEXT: paddd %xmm0, %xmm1 # sched: [1:0.50]
@@ -841,56 +841,56 @@ define <4 x i32> @test_cvtpd2dq(<2 x double> %a0, <2 x double> *%a1) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_cvtpd2dq:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: cvtpd2dq %xmm0, %xmm1 # sched: [4:0.50]
; SLM-NEXT: cvtpd2dq (%rdi), %xmm0 # sched: [7:1.00]
; SLM-NEXT: paddd %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_cvtpd2dq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vcvtpd2dq %xmm0, %xmm0 # sched: [4:1.00]
; SANDY-NEXT: vcvtpd2dqx (%rdi), %xmm1 # sched: [10:1.00]
; SANDY-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtpd2dq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vcvtpd2dq %xmm0, %xmm0 # sched: [4:1.00]
; HASWELL-NEXT: vcvtpd2dqx (%rdi), %xmm1 # sched: [7:1.00]
; HASWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cvtpd2dq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vcvtpd2dq %xmm0, %xmm0 # sched: [4:1.00]
; BROADWELL-NEXT: vcvtpd2dqx (%rdi), %xmm1 # sched: [8:1.00]
; BROADWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cvtpd2dq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vcvtpd2dq %xmm0, %xmm0 # sched: [5:1.00]
; SKYLAKE-NEXT: vcvtpd2dqx (%rdi), %xmm1 # sched: [8:1.00]
; SKYLAKE-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_cvtpd2dq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtpd2dq %xmm0, %xmm0 # sched: [5:1.00]
; SKX-NEXT: vcvtpd2dqx (%rdi), %xmm1 # sched: [8:1.00]
; SKX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cvtpd2dq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vcvtpd2dqx (%rdi), %xmm1 # sched: [8:1.00]
; BTVER2-NEXT: vcvtpd2dq %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cvtpd2dq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vcvtpd2dqx (%rdi), %xmm1 # sched: [12:1.00]
; ZNVER1-NEXT: vcvtpd2dq %xmm0, %xmm0 # sched: [5:1.00]
; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
@@ -905,14 +905,14 @@ declare <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double>) nounwind readnone
define <4 x float> @test_cvtpd2ps(<2 x double> %a0, <2 x double> *%a1) {
; GENERIC-LABEL: test_cvtpd2ps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: cvtpd2ps %xmm0, %xmm1 # sched: [4:1.00]
; GENERIC-NEXT: cvtpd2ps (%rdi), %xmm0 # sched: [10:1.00]
; GENERIC-NEXT: addps %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_cvtpd2ps:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: cvtpd2ps (%rdi), %xmm1 # sched: [8:4.00]
; ATOM-NEXT: cvtpd2ps %xmm0, %xmm0 # sched: [7:3.50]
; ATOM-NEXT: addps %xmm0, %xmm1 # sched: [5:5.00]
@@ -920,56 +920,56 @@ define <4 x float> @test_cvtpd2ps(<2 x double> %a0, <2 x double> *%a1) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_cvtpd2ps:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: cvtpd2ps %xmm0, %xmm1 # sched: [4:0.50]
; SLM-NEXT: cvtpd2ps (%rdi), %xmm0 # sched: [7:1.00]
; SLM-NEXT: addps %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_cvtpd2ps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vcvtpd2ps %xmm0, %xmm0 # sched: [4:1.00]
; SANDY-NEXT: vcvtpd2psx (%rdi), %xmm1 # sched: [10:1.00]
; SANDY-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtpd2ps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vcvtpd2ps %xmm0, %xmm0 # sched: [4:1.00]
; HASWELL-NEXT: vcvtpd2psx (%rdi), %xmm1 # sched: [7:1.00]
; HASWELL-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cvtpd2ps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vcvtpd2ps %xmm0, %xmm0 # sched: [4:1.00]
; BROADWELL-NEXT: vcvtpd2psx (%rdi), %xmm1 # sched: [8:1.00]
; BROADWELL-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cvtpd2ps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vcvtpd2ps %xmm0, %xmm0 # sched: [5:1.00]
; SKYLAKE-NEXT: vcvtpd2psx (%rdi), %xmm1 # sched: [8:1.00]
; SKYLAKE-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_cvtpd2ps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtpd2ps %xmm0, %xmm0 # sched: [5:1.00]
; SKX-NEXT: vcvtpd2psx (%rdi), %xmm1 # sched: [8:1.00]
; SKX-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cvtpd2ps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vcvtpd2psx (%rdi), %xmm1 # sched: [8:1.00]
; BTVER2-NEXT: vcvtpd2ps %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cvtpd2ps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vcvtpd2psx (%rdi), %xmm1 # sched: [11:1.00]
; ZNVER1-NEXT: vcvtpd2ps %xmm0, %xmm0 # sched: [4:1.00]
; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
@@ -984,14 +984,14 @@ declare <4 x float> @llvm.x86.sse2.cvtpd2ps(<2 x double>) nounwind readnone
define <4 x i32> @test_cvtps2dq(<4 x float> %a0, <4 x float> *%a1) {
; GENERIC-LABEL: test_cvtps2dq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: cvtps2dq %xmm0, %xmm1 # sched: [3:1.00]
; GENERIC-NEXT: cvtps2dq (%rdi), %xmm0 # sched: [9:1.00]
; GENERIC-NEXT: paddd %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_cvtps2dq:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: cvtps2dq (%rdi), %xmm1 # sched: [7:3.50]
; ATOM-NEXT: cvtps2dq %xmm0, %xmm0 # sched: [6:3.00]
; ATOM-NEXT: paddd %xmm0, %xmm1 # sched: [1:0.50]
@@ -999,56 +999,56 @@ define <4 x i32> @test_cvtps2dq(<4 x float> %a0, <4 x float> *%a1) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_cvtps2dq:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: cvtps2dq %xmm0, %xmm1 # sched: [4:0.50]
; SLM-NEXT: cvtps2dq (%rdi), %xmm0 # sched: [7:1.00]
; SLM-NEXT: paddd %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_cvtps2dq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vcvtps2dq %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vcvtps2dq (%rdi), %xmm1 # sched: [9:1.00]
; SANDY-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtps2dq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vcvtps2dq %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: vcvtps2dq (%rdi), %xmm1 # sched: [3:1.00]
; HASWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cvtps2dq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vcvtps2dq %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: vcvtps2dq (%rdi), %xmm1 # sched: [8:1.00]
; BROADWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cvtps2dq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vcvtps2dq %xmm0, %xmm0 # sched: [4:0.33]
; SKYLAKE-NEXT: vcvtps2dq (%rdi), %xmm1 # sched: [10:0.50]
; SKYLAKE-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_cvtps2dq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtps2dq %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vcvtps2dq (%rdi), %xmm1 # sched: [10:0.50]
; SKX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cvtps2dq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vcvtps2dq (%rdi), %xmm1 # sched: [8:1.00]
; BTVER2-NEXT: vcvtps2dq %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cvtps2dq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vcvtps2dq (%rdi), %xmm1 # sched: [12:1.00]
; ZNVER1-NEXT: vcvtps2dq %xmm0, %xmm0 # sched: [5:1.00]
; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
@@ -1063,14 +1063,14 @@ declare <4 x i32> @llvm.x86.sse2.cvtps2dq(<4 x float>) nounwind readnone
define <2 x double> @test_cvtps2pd(<4 x float> %a0, <4 x float> *%a1) {
; GENERIC-LABEL: test_cvtps2pd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: cvtps2pd %xmm0, %xmm1 # sched: [2:1.00]
; GENERIC-NEXT: cvtps2pd (%rdi), %xmm0 # sched: [7:1.00]
; GENERIC-NEXT: addpd %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_cvtps2pd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: cvtps2pd (%rdi), %xmm1 # sched: [8:4.00]
; ATOM-NEXT: cvtps2pd %xmm0, %xmm0 # sched: [7:3.50]
; ATOM-NEXT: addpd %xmm0, %xmm1 # sched: [6:3.00]
@@ -1078,56 +1078,56 @@ define <2 x double> @test_cvtps2pd(<4 x float> %a0, <4 x float> *%a1) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_cvtps2pd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: cvtps2pd %xmm0, %xmm1 # sched: [4:0.50]
; SLM-NEXT: cvtps2pd (%rdi), %xmm0 # sched: [7:1.00]
; SLM-NEXT: addpd %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_cvtps2pd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vcvtps2pd %xmm0, %xmm0 # sched: [2:1.00]
; SANDY-NEXT: vcvtps2pd (%rdi), %xmm1 # sched: [7:1.00]
; SANDY-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtps2pd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vcvtps2pd %xmm0, %xmm0 # sched: [2:1.00]
; HASWELL-NEXT: vcvtps2pd (%rdi), %xmm1 # sched: [1:1.00]
; HASWELL-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cvtps2pd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vcvtps2pd %xmm0, %xmm0 # sched: [2:1.00]
; BROADWELL-NEXT: vcvtps2pd (%rdi), %xmm1 # sched: [6:1.00]
; BROADWELL-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cvtps2pd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vcvtps2pd %xmm0, %xmm0 # sched: [5:1.00]
; SKYLAKE-NEXT: vcvtps2pd (%rdi), %xmm1 # sched: [9:0.50]
; SKYLAKE-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_cvtps2pd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtps2pd %xmm0, %xmm0 # sched: [5:1.00]
; SKX-NEXT: vcvtps2pd (%rdi), %xmm1 # sched: [9:0.50]
; SKX-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cvtps2pd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vcvtps2pd (%rdi), %xmm1 # sched: [8:1.00]
; BTVER2-NEXT: vcvtps2pd %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cvtps2pd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vcvtps2pd (%rdi), %xmm1 # sched: [10:1.00]
; ZNVER1-NEXT: vcvtps2pd %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
@@ -1143,70 +1143,70 @@ define <2 x double> @test_cvtps2pd(<4 x float> %a0, <4 x float> *%a1) {
define i32 @test_cvtsd2si(double %a0, double *%a1) {
; GENERIC-LABEL: test_cvtsd2si:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: cvtsd2si %xmm0, %ecx # sched: [5:1.00]
; GENERIC-NEXT: cvtsd2si (%rdi), %eax # sched: [9:1.00]
; GENERIC-NEXT: addl %ecx, %eax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_cvtsd2si:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: cvtsd2si (%rdi), %eax # sched: [9:4.50]
; ATOM-NEXT: cvtsd2si %xmm0, %ecx # sched: [8:4.00]
; ATOM-NEXT: addl %ecx, %eax # sched: [1:0.50]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_cvtsd2si:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: cvtsd2si (%rdi), %eax # sched: [7:1.00]
; SLM-NEXT: cvtsd2si %xmm0, %ecx # sched: [4:0.50]
; SLM-NEXT: addl %ecx, %eax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_cvtsd2si:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vcvtsd2si %xmm0, %ecx # sched: [5:1.00]
; SANDY-NEXT: vcvtsd2si (%rdi), %eax # sched: [10:1.00]
; SANDY-NEXT: addl %ecx, %eax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtsd2si:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vcvtsd2si %xmm0, %ecx # sched: [4:1.00]
; HASWELL-NEXT: vcvtsd2si (%rdi), %eax # sched: [4:1.00]
; HASWELL-NEXT: addl %ecx, %eax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cvtsd2si:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vcvtsd2si %xmm0, %ecx # sched: [4:1.00]
; BROADWELL-NEXT: vcvtsd2si (%rdi), %eax # sched: [9:1.00]
; BROADWELL-NEXT: addl %ecx, %eax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cvtsd2si:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vcvtsd2si %xmm0, %ecx # sched: [6:1.00]
; SKYLAKE-NEXT: vcvtsd2si (%rdi), %eax # sched: [11:1.00]
; SKYLAKE-NEXT: addl %ecx, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_cvtsd2si:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtsd2si %xmm0, %ecx # sched: [6:1.00]
; SKX-NEXT: vcvtsd2si (%rdi), %eax # sched: [11:1.00]
; SKX-NEXT: addl %ecx, %eax # sched: [1:0.25]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cvtsd2si:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vcvtsd2si (%rdi), %eax # sched: [8:1.00]
; BTVER2-NEXT: vcvtsd2si %xmm0, %ecx # sched: [3:1.00]
; BTVER2-NEXT: addl %ecx, %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cvtsd2si:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vcvtsd2si (%rdi), %eax # sched: [12:1.00]
; ZNVER1-NEXT: vcvtsd2si %xmm0, %ecx # sched: [5:1.00]
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
@@ -1223,70 +1223,70 @@ declare i32 @llvm.x86.sse2.cvtsd2si(<2 x double>) nounwind readnone
define i64 @test_cvtsd2siq(double %a0, double *%a1) {
; GENERIC-LABEL: test_cvtsd2siq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: cvtsd2si %xmm0, %rcx # sched: [5:1.00]
; GENERIC-NEXT: cvtsd2si (%rdi), %rax # sched: [9:1.00]
; GENERIC-NEXT: addq %rcx, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_cvtsd2siq:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: cvtsd2si (%rdi), %rax # sched: [9:4.50]
; ATOM-NEXT: cvtsd2si %xmm0, %rcx # sched: [8:4.00]
; ATOM-NEXT: addq %rcx, %rax # sched: [1:0.50]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_cvtsd2siq:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: cvtsd2si (%rdi), %rax # sched: [7:1.00]
; SLM-NEXT: cvtsd2si %xmm0, %rcx # sched: [4:0.50]
; SLM-NEXT: addq %rcx, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_cvtsd2siq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vcvtsd2si %xmm0, %rcx # sched: [5:1.00]
; SANDY-NEXT: vcvtsd2si (%rdi), %rax # sched: [10:1.00]
; SANDY-NEXT: addq %rcx, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtsd2siq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vcvtsd2si %xmm0, %rcx # sched: [4:1.00]
; HASWELL-NEXT: vcvtsd2si (%rdi), %rax # sched: [4:1.00]
; HASWELL-NEXT: addq %rcx, %rax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cvtsd2siq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vcvtsd2si %xmm0, %rcx # sched: [4:1.00]
; BROADWELL-NEXT: vcvtsd2si (%rdi), %rax # sched: [9:1.00]
; BROADWELL-NEXT: addq %rcx, %rax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cvtsd2siq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vcvtsd2si %xmm0, %rcx # sched: [6:1.00]
; SKYLAKE-NEXT: vcvtsd2si (%rdi), %rax # sched: [11:1.00]
; SKYLAKE-NEXT: addq %rcx, %rax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_cvtsd2siq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtsd2si %xmm0, %rcx # sched: [6:1.00]
; SKX-NEXT: vcvtsd2si (%rdi), %rax # sched: [11:1.00]
; SKX-NEXT: addq %rcx, %rax # sched: [1:0.25]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cvtsd2siq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vcvtsd2si (%rdi), %rax # sched: [8:1.00]
; BTVER2-NEXT: vcvtsd2si %xmm0, %rcx # sched: [3:1.00]
; BTVER2-NEXT: addq %rcx, %rax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cvtsd2siq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vcvtsd2si (%rdi), %rax # sched: [12:1.00]
; ZNVER1-NEXT: vcvtsd2si %xmm0, %rcx # sched: [5:1.00]
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
@@ -1303,7 +1303,7 @@ declare i64 @llvm.x86.sse2.cvtsd2si64(<2 x double>) nounwind readnone
define float @test_cvtsd2ss(double %a0, double *%a1) {
; GENERIC-LABEL: test_cvtsd2ss:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: cvtsd2ss %xmm0, %xmm1 # sched: [4:1.00]
; GENERIC-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero sched: [4:0.50]
; GENERIC-NEXT: cvtsd2ss %xmm0, %xmm0 # sched: [4:1.00]
@@ -1311,7 +1311,7 @@ define float @test_cvtsd2ss(double %a0, double *%a1) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_cvtsd2ss:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero sched: [1:1.00]
; ATOM-NEXT: cvtsd2ss %xmm0, %xmm2 # sched: [6:3.00]
; ATOM-NEXT: xorps %xmm0, %xmm0 # sched: [1:0.50]
@@ -1320,7 +1320,7 @@ define float @test_cvtsd2ss(double %a0, double *%a1) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_cvtsd2ss:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: cvtsd2ss %xmm0, %xmm1 # sched: [4:0.50]
; SLM-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero sched: [3:1.00]
; SLM-NEXT: cvtsd2ss %xmm0, %xmm0 # sched: [4:0.50]
@@ -1328,7 +1328,7 @@ define float @test_cvtsd2ss(double %a0, double *%a1) {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_cvtsd2ss:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0 # sched: [4:1.00]
; SANDY-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero sched: [6:0.50]
; SANDY-NEXT: vcvtsd2ss %xmm1, %xmm1, %xmm1 # sched: [4:1.00]
@@ -1336,7 +1336,7 @@ define float @test_cvtsd2ss(double %a0, double *%a1) {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtsd2ss:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0 # sched: [4:1.00]
; HASWELL-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero sched: [1:0.50]
; HASWELL-NEXT: vcvtsd2ss %xmm1, %xmm1, %xmm1 # sched: [4:1.00]
@@ -1344,7 +1344,7 @@ define float @test_cvtsd2ss(double %a0, double *%a1) {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cvtsd2ss:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0 # sched: [4:1.00]
; BROADWELL-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero sched: [5:0.50]
; BROADWELL-NEXT: vcvtsd2ss %xmm1, %xmm1, %xmm1 # sched: [4:1.00]
@@ -1352,7 +1352,7 @@ define float @test_cvtsd2ss(double %a0, double *%a1) {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cvtsd2ss:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0 # sched: [5:1.00]
; SKYLAKE-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero sched: [5:0.50]
; SKYLAKE-NEXT: vcvtsd2ss %xmm1, %xmm1, %xmm1 # sched: [5:1.00]
@@ -1360,7 +1360,7 @@ define float @test_cvtsd2ss(double %a0, double *%a1) {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_cvtsd2ss:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0 # sched: [5:1.00]
; SKX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero sched: [5:0.50]
; SKX-NEXT: vcvtsd2ss %xmm1, %xmm1, %xmm1 # sched: [5:1.00]
@@ -1368,7 +1368,7 @@ define float @test_cvtsd2ss(double %a0, double *%a1) {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cvtsd2ss:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero sched: [5:1.00]
; BTVER2-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vcvtsd2ss %xmm1, %xmm1, %xmm1 # sched: [3:1.00]
@@ -1376,7 +1376,7 @@ define float @test_cvtsd2ss(double %a0, double *%a1) {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cvtsd2ss:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero sched: [8:0.50]
; ZNVER1-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0 # sched: [4:1.00]
; ZNVER1-NEXT: vcvtsd2ss %xmm1, %xmm1, %xmm1 # sched: [4:1.00]
@@ -1391,70 +1391,70 @@ define float @test_cvtsd2ss(double %a0, double *%a1) {
define double @test_cvtsi2sd(i32 %a0, i32 *%a1) {
; GENERIC-LABEL: test_cvtsi2sd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: cvtsi2sdl %edi, %xmm1 # sched: [4:1.00]
; GENERIC-NEXT: cvtsi2sdl (%rsi), %xmm0 # sched: [9:1.00]
; GENERIC-NEXT: addsd %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_cvtsi2sd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: cvtsi2sdl (%rsi), %xmm0 # sched: [7:3.50]
; ATOM-NEXT: cvtsi2sdl %edi, %xmm1 # sched: [6:3.00]
; ATOM-NEXT: addsd %xmm1, %xmm0 # sched: [5:5.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_cvtsi2sd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: cvtsi2sdl (%rsi), %xmm0 # sched: [7:1.00]
; SLM-NEXT: cvtsi2sdl %edi, %xmm1 # sched: [4:0.50]
; SLM-NEXT: addsd %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_cvtsi2sd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vcvtsi2sdl %edi, %xmm0, %xmm0 # sched: [4:1.00]
; SANDY-NEXT: vcvtsi2sdl (%rsi), %xmm1, %xmm1 # sched: [9:1.00]
; SANDY-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtsi2sd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vcvtsi2sdl %edi, %xmm0, %xmm0 # sched: [4:1.00]
; HASWELL-NEXT: vcvtsi2sdl (%rsi), %xmm1, %xmm1 # sched: [8:1.00]
; HASWELL-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cvtsi2sd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vcvtsi2sdl %edi, %xmm0, %xmm0 # sched: [4:1.00]
; BROADWELL-NEXT: vcvtsi2sdl (%rsi), %xmm1, %xmm1 # sched: [9:1.00]
; BROADWELL-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cvtsi2sd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vcvtsi2sdl %edi, %xmm0, %xmm0 # sched: [5:1.00]
; SKYLAKE-NEXT: vcvtsi2sdl (%rsi), %xmm1, %xmm1 # sched: [9:1.00]
; SKYLAKE-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_cvtsi2sd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtsi2sdl %edi, %xmm0, %xmm0 # sched: [5:1.00]
; SKX-NEXT: vcvtsi2sdl (%rsi), %xmm1, %xmm1 # sched: [9:1.00]
; SKX-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cvtsi2sd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vcvtsi2sdl %edi, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vcvtsi2sdl (%rsi), %xmm1, %xmm1 # sched: [8:1.00]
; BTVER2-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cvtsi2sd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vcvtsi2sdl %edi, %xmm0, %xmm0 # sched: [5:1.00]
; ZNVER1-NEXT: vcvtsi2sdl (%rsi), %xmm1, %xmm1 # sched: [12:1.00]
; ZNVER1-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
@@ -1468,70 +1468,70 @@ define double @test_cvtsi2sd(i32 %a0, i32 *%a1) {
define double @test_cvtsi2sdq(i64 %a0, i64 *%a1) {
; GENERIC-LABEL: test_cvtsi2sdq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: cvtsi2sdq %rdi, %xmm1 # sched: [4:1.00]
; GENERIC-NEXT: cvtsi2sdq (%rsi), %xmm0 # sched: [9:1.00]
; GENERIC-NEXT: addsd %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_cvtsi2sdq:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: cvtsi2sdq (%rsi), %xmm0 # sched: [7:3.50]
; ATOM-NEXT: cvtsi2sdq %rdi, %xmm1 # sched: [6:3.00]
; ATOM-NEXT: addsd %xmm1, %xmm0 # sched: [5:5.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_cvtsi2sdq:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: cvtsi2sdq (%rsi), %xmm0 # sched: [7:1.00]
; SLM-NEXT: cvtsi2sdq %rdi, %xmm1 # sched: [4:0.50]
; SLM-NEXT: addsd %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_cvtsi2sdq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vcvtsi2sdq %rdi, %xmm0, %xmm0 # sched: [4:1.00]
; SANDY-NEXT: vcvtsi2sdq (%rsi), %xmm1, %xmm1 # sched: [9:1.00]
; SANDY-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtsi2sdq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vcvtsi2sdq %rdi, %xmm0, %xmm0 # sched: [4:1.00]
; HASWELL-NEXT: vcvtsi2sdq (%rsi), %xmm1, %xmm1 # sched: [8:1.00]
; HASWELL-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cvtsi2sdq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vcvtsi2sdq %rdi, %xmm0, %xmm0 # sched: [4:1.00]
; BROADWELL-NEXT: vcvtsi2sdq (%rsi), %xmm1, %xmm1 # sched: [9:1.00]
; BROADWELL-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cvtsi2sdq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vcvtsi2sdq %rdi, %xmm0, %xmm0 # sched: [5:1.00]
; SKYLAKE-NEXT: vcvtsi2sdq (%rsi), %xmm1, %xmm1 # sched: [9:1.00]
; SKYLAKE-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_cvtsi2sdq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtsi2sdq %rdi, %xmm0, %xmm0 # sched: [5:1.00]
; SKX-NEXT: vcvtsi2sdq (%rsi), %xmm1, %xmm1 # sched: [9:1.00]
; SKX-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cvtsi2sdq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vcvtsi2sdq %rdi, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vcvtsi2sdq (%rsi), %xmm1, %xmm1 # sched: [8:1.00]
; BTVER2-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cvtsi2sdq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vcvtsi2sdq %rdi, %xmm0, %xmm0 # sched: [5:1.00]
; ZNVER1-NEXT: vcvtsi2sdq (%rsi), %xmm1, %xmm1 # sched: [12:1.00]
; ZNVER1-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
@@ -1547,7 +1547,7 @@ define double @test_cvtsi2sdq(i64 %a0, i64 *%a1) {
define double @test_cvtss2sd(float %a0, float *%a1) {
; GENERIC-LABEL: test_cvtss2sd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: cvtss2sd %xmm0, %xmm1 # sched: [1:1.00]
; GENERIC-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [6:0.50]
; GENERIC-NEXT: cvtss2sd %xmm0, %xmm0 # sched: [1:1.00]
@@ -1555,7 +1555,7 @@ define double @test_cvtss2sd(float %a0, float *%a1) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_cvtss2sd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [1:1.00]
; ATOM-NEXT: cvtss2sd %xmm0, %xmm2 # sched: [6:3.00]
; ATOM-NEXT: xorps %xmm0, %xmm0 # sched: [1:0.50]
@@ -1564,7 +1564,7 @@ define double @test_cvtss2sd(float %a0, float *%a1) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_cvtss2sd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: cvtss2sd %xmm0, %xmm1 # sched: [4:0.50]
; SLM-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [3:1.00]
; SLM-NEXT: cvtss2sd %xmm0, %xmm0 # sched: [4:0.50]
@@ -1572,7 +1572,7 @@ define double @test_cvtss2sd(float %a0, float *%a1) {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_cvtss2sd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 # sched: [1:1.00]
; SANDY-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [6:0.50]
; SANDY-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 # sched: [1:1.00]
@@ -1580,7 +1580,7 @@ define double @test_cvtss2sd(float %a0, float *%a1) {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvtss2sd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 # sched: [2:1.00]
; HASWELL-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [1:0.50]
; HASWELL-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 # sched: [2:1.00]
@@ -1588,7 +1588,7 @@ define double @test_cvtss2sd(float %a0, float *%a1) {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cvtss2sd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 # sched: [2:1.00]
; BROADWELL-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [5:0.50]
; BROADWELL-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 # sched: [2:1.00]
@@ -1596,7 +1596,7 @@ define double @test_cvtss2sd(float %a0, float *%a1) {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cvtss2sd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 # sched: [5:1.00]
; SKYLAKE-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [5:0.50]
; SKYLAKE-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 # sched: [5:1.00]
@@ -1604,7 +1604,7 @@ define double @test_cvtss2sd(float %a0, float *%a1) {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_cvtss2sd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 # sched: [5:1.00]
; SKX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [5:0.50]
; SKX-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 # sched: [5:1.00]
@@ -1612,7 +1612,7 @@ define double @test_cvtss2sd(float %a0, float *%a1) {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cvtss2sd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [5:1.00]
; BTVER2-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 # sched: [3:1.00]
@@ -1620,7 +1620,7 @@ define double @test_cvtss2sd(float %a0, float *%a1) {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cvtss2sd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [8:0.50]
; ZNVER1-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 # sched: [4:1.00]
; ZNVER1-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 # sched: [4:1.00]
@@ -1635,14 +1635,14 @@ define double @test_cvtss2sd(float %a0, float *%a1) {
define <4 x i32> @test_cvttpd2dq(<2 x double> %a0, <2 x double> *%a1) {
; GENERIC-LABEL: test_cvttpd2dq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: cvttpd2dq %xmm0, %xmm1 # sched: [4:1.00]
; GENERIC-NEXT: cvttpd2dq (%rdi), %xmm0 # sched: [10:1.00]
; GENERIC-NEXT: paddd %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_cvttpd2dq:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: cvttpd2dq (%rdi), %xmm1 # sched: [8:4.00]
; ATOM-NEXT: cvttpd2dq %xmm0, %xmm0 # sched: [7:3.50]
; ATOM-NEXT: paddd %xmm0, %xmm1 # sched: [1:0.50]
@@ -1650,56 +1650,56 @@ define <4 x i32> @test_cvttpd2dq(<2 x double> %a0, <2 x double> *%a1) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_cvttpd2dq:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: cvttpd2dq %xmm0, %xmm1 # sched: [4:0.50]
; SLM-NEXT: cvttpd2dq (%rdi), %xmm0 # sched: [7:1.00]
; SLM-NEXT: paddd %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_cvttpd2dq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vcvttpd2dq %xmm0, %xmm0 # sched: [4:1.00]
; SANDY-NEXT: vcvttpd2dqx (%rdi), %xmm1 # sched: [10:1.00]
; SANDY-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvttpd2dq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vcvttpd2dq %xmm0, %xmm0 # sched: [4:1.00]
; HASWELL-NEXT: vcvttpd2dqx (%rdi), %xmm1 # sched: [7:1.00]
; HASWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cvttpd2dq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vcvttpd2dq %xmm0, %xmm0 # sched: [4:1.00]
; BROADWELL-NEXT: vcvttpd2dqx (%rdi), %xmm1 # sched: [8:1.00]
; BROADWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cvttpd2dq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vcvttpd2dq %xmm0, %xmm0 # sched: [5:1.00]
; SKYLAKE-NEXT: vcvttpd2dqx (%rdi), %xmm1 # sched: [8:1.00]
; SKYLAKE-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_cvttpd2dq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvttpd2dq %xmm0, %xmm0 # sched: [5:1.00]
; SKX-NEXT: vcvttpd2dqx (%rdi), %xmm1 # sched: [8:1.00]
; SKX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cvttpd2dq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vcvttpd2dqx (%rdi), %xmm1 # sched: [8:1.00]
; BTVER2-NEXT: vcvttpd2dq %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cvttpd2dq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vcvttpd2dqx (%rdi), %xmm1 # sched: [12:1.00]
; ZNVER1-NEXT: vcvttpd2dq %xmm0, %xmm0 # sched: [5:1.00]
; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
@@ -1715,14 +1715,14 @@ define <4 x i32> @test_cvttpd2dq(<2 x double> %a0, <2 x double> *%a1) {
define <4 x i32> @test_cvttps2dq(<4 x float> %a0, <4 x float> *%a1) {
; GENERIC-LABEL: test_cvttps2dq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: cvttps2dq %xmm0, %xmm1 # sched: [3:1.00]
; GENERIC-NEXT: cvttps2dq (%rdi), %xmm0 # sched: [9:1.00]
; GENERIC-NEXT: paddd %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_cvttps2dq:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: cvttps2dq (%rdi), %xmm1 # sched: [7:3.50]
; ATOM-NEXT: cvttps2dq %xmm0, %xmm0 # sched: [6:3.00]
; ATOM-NEXT: paddd %xmm0, %xmm1 # sched: [1:0.50]
@@ -1730,56 +1730,56 @@ define <4 x i32> @test_cvttps2dq(<4 x float> %a0, <4 x float> *%a1) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_cvttps2dq:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: cvttps2dq %xmm0, %xmm1 # sched: [4:0.50]
; SLM-NEXT: cvttps2dq (%rdi), %xmm0 # sched: [7:1.00]
; SLM-NEXT: paddd %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_cvttps2dq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vcvttps2dq %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vcvttps2dq (%rdi), %xmm1 # sched: [9:1.00]
; SANDY-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvttps2dq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vcvttps2dq %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: vcvttps2dq (%rdi), %xmm1 # sched: [3:1.00]
; HASWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cvttps2dq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vcvttps2dq %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: vcvttps2dq (%rdi), %xmm1 # sched: [8:1.00]
; BROADWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cvttps2dq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vcvttps2dq %xmm0, %xmm0 # sched: [4:0.33]
; SKYLAKE-NEXT: vcvttps2dq (%rdi), %xmm1 # sched: [10:0.50]
; SKYLAKE-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_cvttps2dq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvttps2dq %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vcvttps2dq (%rdi), %xmm1 # sched: [10:0.50]
; SKX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cvttps2dq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vcvttps2dq (%rdi), %xmm1 # sched: [8:1.00]
; BTVER2-NEXT: vcvttps2dq %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cvttps2dq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vcvttps2dq (%rdi), %xmm1 # sched: [12:1.00]
; ZNVER1-NEXT: vcvttps2dq %xmm0, %xmm0 # sched: [5:1.00]
; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
@@ -1793,70 +1793,70 @@ define <4 x i32> @test_cvttps2dq(<4 x float> %a0, <4 x float> *%a1) {
define i32 @test_cvttsd2si(double %a0, double *%a1) {
; GENERIC-LABEL: test_cvttsd2si:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: cvttsd2si %xmm0, %ecx # sched: [5:1.00]
; GENERIC-NEXT: cvttsd2si (%rdi), %eax # sched: [9:1.00]
; GENERIC-NEXT: addl %ecx, %eax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_cvttsd2si:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: cvttsd2si (%rdi), %eax # sched: [9:4.50]
; ATOM-NEXT: cvttsd2si %xmm0, %ecx # sched: [8:4.00]
; ATOM-NEXT: addl %ecx, %eax # sched: [1:0.50]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_cvttsd2si:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: cvttsd2si (%rdi), %eax # sched: [7:1.00]
; SLM-NEXT: cvttsd2si %xmm0, %ecx # sched: [4:0.50]
; SLM-NEXT: addl %ecx, %eax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_cvttsd2si:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vcvttsd2si %xmm0, %ecx # sched: [5:1.00]
; SANDY-NEXT: vcvttsd2si (%rdi), %eax # sched: [10:1.00]
; SANDY-NEXT: addl %ecx, %eax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvttsd2si:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vcvttsd2si %xmm0, %ecx # sched: [4:1.00]
; HASWELL-NEXT: vcvttsd2si (%rdi), %eax # sched: [4:1.00]
; HASWELL-NEXT: addl %ecx, %eax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cvttsd2si:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vcvttsd2si %xmm0, %ecx # sched: [4:1.00]
; BROADWELL-NEXT: vcvttsd2si (%rdi), %eax # sched: [9:1.00]
; BROADWELL-NEXT: addl %ecx, %eax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cvttsd2si:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vcvttsd2si %xmm0, %ecx # sched: [6:1.00]
; SKYLAKE-NEXT: vcvttsd2si (%rdi), %eax # sched: [11:1.00]
; SKYLAKE-NEXT: addl %ecx, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_cvttsd2si:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvttsd2si %xmm0, %ecx # sched: [6:1.00]
; SKX-NEXT: vcvttsd2si (%rdi), %eax # sched: [11:1.00]
; SKX-NEXT: addl %ecx, %eax # sched: [1:0.25]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cvttsd2si:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vcvttsd2si (%rdi), %eax # sched: [8:1.00]
; BTVER2-NEXT: vcvttsd2si %xmm0, %ecx # sched: [3:1.00]
; BTVER2-NEXT: addl %ecx, %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cvttsd2si:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vcvttsd2si (%rdi), %eax # sched: [12:1.00]
; ZNVER1-NEXT: vcvttsd2si %xmm0, %ecx # sched: [5:1.00]
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
@@ -1870,70 +1870,70 @@ define i32 @test_cvttsd2si(double %a0, double *%a1) {
define i64 @test_cvttsd2siq(double %a0, double *%a1) {
; GENERIC-LABEL: test_cvttsd2siq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: cvttsd2si %xmm0, %rcx # sched: [5:1.00]
; GENERIC-NEXT: cvttsd2si (%rdi), %rax # sched: [9:1.00]
; GENERIC-NEXT: addq %rcx, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_cvttsd2siq:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: cvttsd2si (%rdi), %rax # sched: [9:4.50]
; ATOM-NEXT: cvttsd2si %xmm0, %rcx # sched: [8:4.00]
; ATOM-NEXT: addq %rcx, %rax # sched: [1:0.50]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_cvttsd2siq:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: cvttsd2si (%rdi), %rax # sched: [7:1.00]
; SLM-NEXT: cvttsd2si %xmm0, %rcx # sched: [4:0.50]
; SLM-NEXT: addq %rcx, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_cvttsd2siq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vcvttsd2si %xmm0, %rcx # sched: [5:1.00]
; SANDY-NEXT: vcvttsd2si (%rdi), %rax # sched: [10:1.00]
; SANDY-NEXT: addq %rcx, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cvttsd2siq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vcvttsd2si %xmm0, %rcx # sched: [4:1.00]
; HASWELL-NEXT: vcvttsd2si (%rdi), %rax # sched: [4:1.00]
; HASWELL-NEXT: addq %rcx, %rax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cvttsd2siq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vcvttsd2si %xmm0, %rcx # sched: [4:1.00]
; BROADWELL-NEXT: vcvttsd2si (%rdi), %rax # sched: [9:1.00]
; BROADWELL-NEXT: addq %rcx, %rax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cvttsd2siq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vcvttsd2si %xmm0, %rcx # sched: [6:1.00]
; SKYLAKE-NEXT: vcvttsd2si (%rdi), %rax # sched: [11:1.00]
; SKYLAKE-NEXT: addq %rcx, %rax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_cvttsd2siq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vcvttsd2si %xmm0, %rcx # sched: [6:1.00]
; SKX-NEXT: vcvttsd2si (%rdi), %rax # sched: [11:1.00]
; SKX-NEXT: addq %rcx, %rax # sched: [1:0.25]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cvttsd2siq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vcvttsd2si (%rdi), %rax # sched: [8:1.00]
; BTVER2-NEXT: vcvttsd2si %xmm0, %rcx # sched: [3:1.00]
; BTVER2-NEXT: addq %rcx, %rax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cvttsd2siq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vcvttsd2si (%rdi), %rax # sched: [12:1.00]
; ZNVER1-NEXT: vcvttsd2si %xmm0, %rcx # sched: [5:1.00]
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
@@ -1947,61 +1947,61 @@ define i64 @test_cvttsd2siq(double %a0, double *%a1) {
define <2 x double> @test_divpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; GENERIC-LABEL: test_divpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: divpd %xmm1, %xmm0 # sched: [22:1.00]
; GENERIC-NEXT: divpd (%rdi), %xmm0 # sched: [28:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_divpd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: divpd %xmm1, %xmm0 # sched: [125:62.50]
; ATOM-NEXT: divpd (%rdi), %xmm0 # sched: [125:62.50]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_divpd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: divpd %xmm1, %xmm0 # sched: [34:34.00]
; SLM-NEXT: divpd (%rdi), %xmm0 # sched: [37:34.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_divpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vdivpd %xmm1, %xmm0, %xmm0 # sched: [22:1.00]
; SANDY-NEXT: vdivpd (%rdi), %xmm0, %xmm0 # sched: [28:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_divpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vdivpd %xmm1, %xmm0, %xmm0 # sched: [20:1.00]
; HASWELL-NEXT: vdivpd (%rdi), %xmm0, %xmm0 # sched: [20:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_divpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vdivpd %xmm1, %xmm0, %xmm0 # sched: [14:1.00]
; BROADWELL-NEXT: vdivpd (%rdi), %xmm0, %xmm0 # sched: [19:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_divpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vdivpd %xmm1, %xmm0, %xmm0 # sched: [14:1.00]
; SKYLAKE-NEXT: vdivpd (%rdi), %xmm0, %xmm0 # sched: [20:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_divpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vdivpd %xmm1, %xmm0, %xmm0 # sched: [14:1.00]
; SKX-NEXT: vdivpd (%rdi), %xmm0, %xmm0 # sched: [20:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_divpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vdivpd %xmm1, %xmm0, %xmm0 # sched: [19:19.00]
; BTVER2-NEXT: vdivpd (%rdi), %xmm0, %xmm0 # sched: [24:19.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_divpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vdivpd %xmm1, %xmm0, %xmm0 # sched: [15:1.00]
; ZNVER1-NEXT: vdivpd (%rdi), %xmm0, %xmm0 # sched: [22:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -2013,61 +2013,61 @@ define <2 x double> @test_divpd(<2 x double> %a0, <2 x double> %a1, <2 x double>
define double @test_divsd(double %a0, double %a1, double *%a2) {
; GENERIC-LABEL: test_divsd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: divsd %xmm1, %xmm0 # sched: [22:1.00]
; GENERIC-NEXT: divsd (%rdi), %xmm0 # sched: [28:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_divsd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: divsd %xmm1, %xmm0 # sched: [62:31.00]
; ATOM-NEXT: divsd (%rdi), %xmm0 # sched: [62:31.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_divsd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: divsd %xmm1, %xmm0 # sched: [34:34.00]
; SLM-NEXT: divsd (%rdi), %xmm0 # sched: [37:34.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_divsd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vdivsd %xmm1, %xmm0, %xmm0 # sched: [22:1.00]
; SANDY-NEXT: vdivsd (%rdi), %xmm0, %xmm0 # sched: [28:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_divsd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vdivsd %xmm1, %xmm0, %xmm0 # sched: [20:1.00]
; HASWELL-NEXT: vdivsd (%rdi), %xmm0, %xmm0 # sched: [20:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_divsd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vdivsd %xmm1, %xmm0, %xmm0 # sched: [14:1.00]
; BROADWELL-NEXT: vdivsd (%rdi), %xmm0, %xmm0 # sched: [19:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_divsd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vdivsd %xmm1, %xmm0, %xmm0 # sched: [14:1.00]
; SKYLAKE-NEXT: vdivsd (%rdi), %xmm0, %xmm0 # sched: [19:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_divsd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vdivsd %xmm1, %xmm0, %xmm0 # sched: [14:1.00]
; SKX-NEXT: vdivsd (%rdi), %xmm0, %xmm0 # sched: [19:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_divsd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vdivsd %xmm1, %xmm0, %xmm0 # sched: [19:19.00]
; BTVER2-NEXT: vdivsd (%rdi), %xmm0, %xmm0 # sched: [24:19.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_divsd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vdivsd %xmm1, %xmm0, %xmm0 # sched: [15:1.00]
; ZNVER1-NEXT: vdivsd (%rdi), %xmm0, %xmm0 # sched: [22:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -2079,12 +2079,12 @@ define double @test_divsd(double %a0, double %a1, double *%a2) {
define void @test_lfence() {
; GENERIC-LABEL: test_lfence:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: lfence # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lfence:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: lfence # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -2095,42 +2095,42 @@ define void @test_lfence() {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_lfence:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: lfence # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lfence:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: lfence # sched: [1:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lfence:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: lfence # sched: [2:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_lfence:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: lfence # sched: [2:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lfence:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: lfence # sched: [2:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_lfence:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: lfence # sched: [2:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lfence:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: lfence # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lfence:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: lfence # sched: [1:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
call void @llvm.x86.sse2.lfence()
@@ -2140,12 +2140,12 @@ declare void @llvm.x86.sse2.lfence() nounwind readnone
define void @test_mfence() {
; GENERIC-LABEL: test_mfence:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: mfence # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_mfence:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: mfence # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -2156,42 +2156,42 @@ define void @test_mfence() {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_mfence:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: mfence # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_mfence:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: mfence # sched: [1:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_mfence:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: mfence # sched: [2:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_mfence:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: mfence # sched: [2:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_mfence:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: mfence # sched: [3:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_mfence:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: mfence # sched: [3:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_mfence:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: mfence # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_mfence:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: mfence # sched: [1:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
call void @llvm.x86.sse2.mfence()
@@ -2201,12 +2201,12 @@ declare void @llvm.x86.sse2.mfence() nounwind readnone
define void @test_maskmovdqu(<16 x i8> %a0, <16 x i8> %a1, i8* %a2) {
; GENERIC-LABEL: test_maskmovdqu:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: maskmovdqu %xmm1, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_maskmovdqu:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: maskmovdqu %xmm1, %xmm0 # sched: [2:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -2215,42 +2215,42 @@ define void @test_maskmovdqu(<16 x i8> %a0, <16 x i8> %a1, i8* %a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_maskmovdqu:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: maskmovdqu %xmm1, %xmm0 # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_maskmovdqu:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmaskmovdqu %xmm1, %xmm0 # sched: [1:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_maskmovdqu:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmaskmovdqu %xmm1, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_maskmovdqu:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmaskmovdqu %xmm1, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_maskmovdqu:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmaskmovdqu %xmm1, %xmm0 # sched: [2:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_maskmovdqu:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmaskmovdqu %xmm1, %xmm0 # sched: [2:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_maskmovdqu:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmaskmovdqu %xmm1, %xmm0 # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_maskmovdqu:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmaskmovdqu %xmm1, %xmm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
call void @llvm.x86.sse2.maskmov.dqu(<16 x i8> %a0, <16 x i8> %a1, i8* %a2)
@@ -2260,61 +2260,61 @@ declare void @llvm.x86.sse2.maskmov.dqu(<16 x i8>, <16 x i8>, i8*) nounwind
define <2 x double> @test_maxpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; GENERIC-LABEL: test_maxpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: maxpd %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: maxpd (%rdi), %xmm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_maxpd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: maxpd %xmm1, %xmm0 # sched: [6:3.00]
; ATOM-NEXT: maxpd (%rdi), %xmm0 # sched: [7:3.50]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_maxpd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: maxpd %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: maxpd (%rdi), %xmm0 # sched: [6:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_maxpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmaxpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vmaxpd (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_maxpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmaxpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: vmaxpd (%rdi), %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_maxpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmaxpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: vmaxpd (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_maxpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmaxpd %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKYLAKE-NEXT: vmaxpd (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_maxpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmaxpd %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vmaxpd (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_maxpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmaxpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vmaxpd (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_maxpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmaxpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmaxpd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -2327,61 +2327,61 @@ declare <2 x double> @llvm.x86.sse2.max.pd(<2 x double>, <2 x double>) nounwind
define <2 x double> @test_maxsd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; GENERIC-LABEL: test_maxsd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: maxsd %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: maxsd (%rdi), %xmm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_maxsd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: maxsd %xmm1, %xmm0 # sched: [5:5.00]
; ATOM-NEXT: maxsd (%rdi), %xmm0 # sched: [5:5.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_maxsd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: maxsd %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: maxsd (%rdi), %xmm0 # sched: [6:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_maxsd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmaxsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vmaxsd (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_maxsd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmaxsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: vmaxsd (%rdi), %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_maxsd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmaxsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: vmaxsd (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_maxsd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmaxsd %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKYLAKE-NEXT: vmaxsd (%rdi), %xmm0, %xmm0 # sched: [9:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_maxsd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmaxsd %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vmaxsd (%rdi), %xmm0, %xmm0 # sched: [9:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_maxsd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmaxsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vmaxsd (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_maxsd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmaxsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmaxsd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -2394,61 +2394,61 @@ declare <2 x double> @llvm.x86.sse2.max.sd(<2 x double>, <2 x double>) nounwind
define <2 x double> @test_minpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; GENERIC-LABEL: test_minpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: minpd %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: minpd (%rdi), %xmm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_minpd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: minpd %xmm1, %xmm0 # sched: [6:3.00]
; ATOM-NEXT: minpd (%rdi), %xmm0 # sched: [7:3.50]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_minpd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: minpd %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: minpd (%rdi), %xmm0 # sched: [6:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_minpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vminpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vminpd (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_minpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vminpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: vminpd (%rdi), %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_minpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vminpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: vminpd (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_minpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vminpd %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKYLAKE-NEXT: vminpd (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_minpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vminpd %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vminpd (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_minpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vminpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vminpd (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_minpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vminpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vminpd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -2461,61 +2461,61 @@ declare <2 x double> @llvm.x86.sse2.min.pd(<2 x double>, <2 x double>) nounwind
define <2 x double> @test_minsd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; GENERIC-LABEL: test_minsd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: minsd %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: minsd (%rdi), %xmm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_minsd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: minsd %xmm1, %xmm0 # sched: [5:5.00]
; ATOM-NEXT: minsd (%rdi), %xmm0 # sched: [5:5.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_minsd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: minsd %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: minsd (%rdi), %xmm0 # sched: [6:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_minsd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vminsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vminsd (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_minsd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vminsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: vminsd (%rdi), %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_minsd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vminsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: vminsd (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_minsd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vminsd %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKYLAKE-NEXT: vminsd (%rdi), %xmm0, %xmm0 # sched: [9:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_minsd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vminsd %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vminsd (%rdi), %xmm0, %xmm0 # sched: [9:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_minsd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vminsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vminsd (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_minsd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vminsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vminsd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -2528,70 +2528,70 @@ declare <2 x double> @llvm.x86.sse2.min.sd(<2 x double>, <2 x double>) nounwind
define void @test_movapd(<2 x double> *%a0, <2 x double> *%a1) {
; GENERIC-LABEL: test_movapd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movapd (%rdi), %xmm0 # sched: [6:0.50]
; GENERIC-NEXT: addpd %xmm0, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: movapd %xmm0, (%rsi) # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_movapd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movapd (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: addpd %xmm0, %xmm0 # sched: [6:3.00]
; ATOM-NEXT: movapd %xmm0, (%rsi) # sched: [1:1.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_movapd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movapd (%rdi), %xmm0 # sched: [3:1.00]
; SLM-NEXT: addpd %xmm0, %xmm0 # sched: [3:1.00]
; SLM-NEXT: movapd %xmm0, (%rsi) # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_movapd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmovapd (%rdi), %xmm0 # sched: [6:0.50]
; SANDY-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vmovapd %xmm0, (%rsi) # sched: [5:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movapd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmovapd (%rdi), %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: vmovapd %xmm0, (%rsi) # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movapd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmovapd (%rdi), %xmm0 # sched: [5:0.50]
; BROADWELL-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: vmovapd %xmm0, (%rsi) # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movapd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmovapd (%rdi), %xmm0 # sched: [6:0.50]
; SKYLAKE-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vmovapd %xmm0, (%rsi) # sched: [1:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movapd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovapd (%rdi), %xmm0 # sched: [6:0.50]
; SKX-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vmovapd %xmm0, (%rsi) # sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movapd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovapd (%rdi), %xmm0 # sched: [5:1.00]
; BTVER2-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vmovapd %xmm0, (%rsi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movapd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmovapd (%rdi), %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmovapd %xmm0, (%rsi) # sched: [1:0.50]
@@ -2604,70 +2604,70 @@ define void @test_movapd(<2 x double> *%a0, <2 x double> *%a1) {
define void @test_movdqa(<2 x i64> *%a0, <2 x i64> *%a1) {
; GENERIC-LABEL: test_movdqa:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movdqa (%rdi), %xmm0 # sched: [6:0.50]
; GENERIC-NEXT: paddq %xmm0, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: movdqa %xmm0, (%rsi) # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_movdqa:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movdqa (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: paddq %xmm0, %xmm0 # sched: [2:1.00]
; ATOM-NEXT: movdqa %xmm0, (%rsi) # sched: [1:1.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_movdqa:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movdqa (%rdi), %xmm0 # sched: [3:1.00]
; SLM-NEXT: paddq %xmm0, %xmm0 # sched: [1:0.50]
; SLM-NEXT: movdqa %xmm0, (%rsi) # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_movdqa:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmovdqa (%rdi), %xmm0 # sched: [6:0.50]
; SANDY-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vmovdqa %xmm0, (%rsi) # sched: [5:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movdqa:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmovdqa (%rdi), %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vmovdqa %xmm0, (%rsi) # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movdqa:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmovdqa (%rdi), %xmm0 # sched: [5:0.50]
; BROADWELL-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: vmovdqa %xmm0, (%rsi) # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movdqa:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmovdqa (%rdi), %xmm0 # sched: [6:0.50]
; SKYLAKE-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vmovdqa %xmm0, (%rsi) # sched: [1:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movdqa:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa (%rdi), %xmm0 # sched: [6:0.50]
; SKX-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: vmovdqa %xmm0, (%rsi) # sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movdqa:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovdqa (%rdi), %xmm0 # sched: [5:1.00]
; BTVER2-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vmovdqa %xmm0, (%rsi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movdqa:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmovdqa (%rdi), %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vmovdqa %xmm0, (%rsi) # sched: [1:0.50]
@@ -2680,70 +2680,70 @@ define void @test_movdqa(<2 x i64> *%a0, <2 x i64> *%a1) {
define void @test_movdqu(<2 x i64> *%a0, <2 x i64> *%a1) {
; GENERIC-LABEL: test_movdqu:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movdqu (%rdi), %xmm0 # sched: [6:0.50]
; GENERIC-NEXT: paddq %xmm0, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: movdqu %xmm0, (%rsi) # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_movdqu:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movdqu (%rdi), %xmm0 # sched: [3:1.50]
; ATOM-NEXT: paddq %xmm0, %xmm0 # sched: [2:1.00]
; ATOM-NEXT: movdqu %xmm0, (%rsi) # sched: [2:1.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_movdqu:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movdqu (%rdi), %xmm0 # sched: [3:1.00]
; SLM-NEXT: paddq %xmm0, %xmm0 # sched: [1:0.50]
; SLM-NEXT: movdqu %xmm0, (%rsi) # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_movdqu:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmovdqu (%rdi), %xmm0 # sched: [6:0.50]
; SANDY-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vmovdqu %xmm0, (%rsi) # sched: [5:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movdqu:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmovdqu (%rdi), %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vmovdqu %xmm0, (%rsi) # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movdqu:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmovdqu (%rdi), %xmm0 # sched: [5:0.50]
; BROADWELL-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: vmovdqu %xmm0, (%rsi) # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movdqu:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmovdqu (%rdi), %xmm0 # sched: [6:0.50]
; SKYLAKE-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vmovdqu %xmm0, (%rsi) # sched: [1:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movdqu:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqu (%rdi), %xmm0 # sched: [6:0.50]
; SKX-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: vmovdqu %xmm0, (%rsi) # sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movdqu:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovdqu (%rdi), %xmm0 # sched: [5:1.00]
; BTVER2-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vmovdqu %xmm0, (%rsi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movdqu:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmovdqu (%rdi), %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vmovdqu %xmm0, (%rsi) # sched: [1:0.50]
@@ -2756,7 +2756,7 @@ define void @test_movdqu(<2 x i64> *%a0, <2 x i64> *%a1) {
define i32 @test_movd(<4 x i32> %a0, i32 %a1, i32 *%a2) {
; GENERIC-LABEL: test_movd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movd %edi, %xmm1 # sched: [1:1.00]
; GENERIC-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [6:0.50]
; GENERIC-NEXT: paddd %xmm0, %xmm1 # sched: [1:0.50]
@@ -2766,7 +2766,7 @@ define i32 @test_movd(<4 x i32> %a0, i32 %a1, i32 *%a2) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_movd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [1:1.00]
; ATOM-NEXT: paddd %xmm0, %xmm1 # sched: [1:0.50]
; ATOM-NEXT: movd %xmm1, %eax # sched: [3:3.00]
@@ -2776,7 +2776,7 @@ define i32 @test_movd(<4 x i32> %a0, i32 %a1, i32 *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_movd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [3:1.00]
; SLM-NEXT: movd %edi, %xmm1 # sched: [1:0.50]
; SLM-NEXT: paddd %xmm0, %xmm1 # sched: [1:0.50]
@@ -2786,7 +2786,7 @@ define i32 @test_movd(<4 x i32> %a0, i32 %a1, i32 *%a2) {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_movd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmovd %edi, %xmm1 # sched: [1:1.00]
; SANDY-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [6:0.50]
; SANDY-NEXT: vpaddd %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
@@ -2796,7 +2796,7 @@ define i32 @test_movd(<4 x i32> %a0, i32 %a1, i32 *%a2) {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmovd %edi, %xmm1 # sched: [1:1.00]
; HASWELL-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [1:0.50]
; HASWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
@@ -2806,7 +2806,7 @@ define i32 @test_movd(<4 x i32> %a0, i32 %a1, i32 *%a2) {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmovd %edi, %xmm1 # sched: [1:1.00]
; BROADWELL-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [5:0.50]
; BROADWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
@@ -2816,7 +2816,7 @@ define i32 @test_movd(<4 x i32> %a0, i32 %a1, i32 *%a2) {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmovd %edi, %xmm1 # sched: [1:1.00]
; SKYLAKE-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [5:0.50]
; SKYLAKE-NEXT: vpaddd %xmm1, %xmm0, %xmm1 # sched: [1:0.33]
@@ -2826,7 +2826,7 @@ define i32 @test_movd(<4 x i32> %a0, i32 %a1, i32 *%a2) {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [5:0.50]
; SKX-NEXT: vmovd %edi, %xmm2 # sched: [1:1.00]
; SKX-NEXT: vpaddd %xmm2, %xmm0, %xmm2 # sched: [1:0.33]
@@ -2836,7 +2836,7 @@ define i32 @test_movd(<4 x i32> %a0, i32 %a1, i32 *%a2) {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [5:1.00]
; BTVER2-NEXT: vmovd %edi, %xmm1 # sched: [1:0.17]
; BTVER2-NEXT: vpaddd %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
@@ -2846,7 +2846,7 @@ define i32 @test_movd(<4 x i32> %a0, i32 %a1, i32 *%a2) {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [8:0.50]
; ZNVER1-NEXT: vmovd %edi, %xmm1 # sched: [3:1.00]
; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm1 # sched: [1:0.25]
@@ -2867,7 +2867,7 @@ define i32 @test_movd(<4 x i32> %a0, i32 %a1, i32 *%a2) {
define i64 @test_movd_64(<2 x i64> %a0, i64 %a1, i64 *%a2) {
; GENERIC-LABEL: test_movd_64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movq %rdi, %xmm1 # sched: [1:1.00]
; GENERIC-NEXT: movq {{.*#+}} xmm2 = mem[0],zero sched: [4:0.50]
; GENERIC-NEXT: paddq %xmm0, %xmm1 # sched: [1:0.50]
@@ -2877,7 +2877,7 @@ define i64 @test_movd_64(<2 x i64> %a0, i64 %a1, i64 *%a2) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_movd_64:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movq {{.*#+}} xmm1 = mem[0],zero sched: [1:1.00]
; ATOM-NEXT: movq %rdi, %xmm2 # sched: [1:1.00]
; ATOM-NEXT: paddq %xmm0, %xmm2 # sched: [2:1.00]
@@ -2887,7 +2887,7 @@ define i64 @test_movd_64(<2 x i64> %a0, i64 %a1, i64 *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_movd_64:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movq {{.*#+}} xmm2 = mem[0],zero sched: [3:1.00]
; SLM-NEXT: movq %rdi, %xmm1 # sched: [1:0.50]
; SLM-NEXT: paddq %xmm0, %xmm1 # sched: [1:0.50]
@@ -2897,7 +2897,7 @@ define i64 @test_movd_64(<2 x i64> %a0, i64 %a1, i64 *%a2) {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_movd_64:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmovq %rdi, %xmm1 # sched: [1:1.00]
; SANDY-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero sched: [6:0.50]
; SANDY-NEXT: vpaddq %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
@@ -2907,7 +2907,7 @@ define i64 @test_movd_64(<2 x i64> %a0, i64 %a1, i64 *%a2) {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movd_64:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmovq %rdi, %xmm1 # sched: [1:1.00]
; HASWELL-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero sched: [1:0.50]
; HASWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
@@ -2917,7 +2917,7 @@ define i64 @test_movd_64(<2 x i64> %a0, i64 %a1, i64 *%a2) {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movd_64:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmovq %rdi, %xmm1 # sched: [1:1.00]
; BROADWELL-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero sched: [5:0.50]
; BROADWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
@@ -2927,7 +2927,7 @@ define i64 @test_movd_64(<2 x i64> %a0, i64 %a1, i64 *%a2) {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movd_64:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmovq %rdi, %xmm1 # sched: [1:1.00]
; SKYLAKE-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero sched: [5:0.50]
; SKYLAKE-NEXT: vpaddq %xmm1, %xmm0, %xmm1 # sched: [1:0.33]
@@ -2937,7 +2937,7 @@ define i64 @test_movd_64(<2 x i64> %a0, i64 %a1, i64 *%a2) {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movd_64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero sched: [5:0.50]
; SKX-NEXT: vmovq %rdi, %xmm2 # sched: [1:1.00]
; SKX-NEXT: vpaddq %xmm2, %xmm0, %xmm2 # sched: [1:0.33]
@@ -2947,7 +2947,7 @@ define i64 @test_movd_64(<2 x i64> %a0, i64 %a1, i64 *%a2) {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movd_64:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero sched: [5:1.00]
; BTVER2-NEXT: vmovq %rdi, %xmm1 # sched: [1:0.17]
; BTVER2-NEXT: vpaddq %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
@@ -2957,7 +2957,7 @@ define i64 @test_movd_64(<2 x i64> %a0, i64 %a1, i64 *%a2) {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movd_64:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero sched: [8:0.50]
; ZNVER1-NEXT: vmovq %rdi, %xmm1 # sched: [3:1.00]
; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm1 # sched: [1:0.25]
@@ -2978,70 +2978,70 @@ define i64 @test_movd_64(<2 x i64> %a0, i64 %a1, i64 *%a2) {
define void @test_movhpd(<2 x double> %a0, <2 x double> %a1, x86_mmx *%a2) {
; GENERIC-LABEL: test_movhpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [7:1.00]
; GENERIC-NEXT: addpd %xmm0, %xmm1 # sched: [3:1.00]
; GENERIC-NEXT: movhpd %xmm1, (%rdi) # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_movhpd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [1:1.00]
; ATOM-NEXT: addpd %xmm0, %xmm1 # sched: [6:3.00]
; ATOM-NEXT: movhpd %xmm1, (%rdi) # sched: [1:1.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_movhpd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [4:1.00]
; SLM-NEXT: addpd %xmm0, %xmm1 # sched: [3:1.00]
; SLM-NEXT: movhpd %xmm1, (%rdi) # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_movhpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [7:1.00]
; SANDY-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vmovhpd %xmm0, (%rdi) # sched: [5:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movhpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [1:1.00]
; HASWELL-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: vmovhpd %xmm0, (%rdi) # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movhpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [6:1.00]
; BROADWELL-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: vmovhpd %xmm0, (%rdi) # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movhpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [6:1.00]
; SKYLAKE-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vmovhpd %xmm0, (%rdi) # sched: [1:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movhpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [6:1.00]
; SKX-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vmovhpd %xmm0, (%rdi) # sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movhpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [6:1.00]
; BTVER2-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vmovhpd %xmm0, (%rdi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movhpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [8:0.50]
; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmovhpd %xmm0, (%rdi) # sched: [1:0.50]
@@ -3057,70 +3057,70 @@ define void @test_movhpd(<2 x double> %a0, <2 x double> %a1, x86_mmx *%a2) {
define void @test_movlpd(<2 x double> %a0, <2 x double> %a1, x86_mmx *%a2) {
; GENERIC-LABEL: test_movlpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [7:1.00]
; GENERIC-NEXT: addpd %xmm0, %xmm1 # sched: [3:1.00]
; GENERIC-NEXT: movlpd %xmm1, (%rdi) # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_movlpd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [1:1.00]
; ATOM-NEXT: addpd %xmm0, %xmm1 # sched: [6:3.00]
; ATOM-NEXT: movlpd %xmm1, (%rdi) # sched: [1:1.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_movlpd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [4:1.00]
; SLM-NEXT: addpd %xmm0, %xmm1 # sched: [3:1.00]
; SLM-NEXT: movlpd %xmm1, (%rdi) # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_movlpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [7:1.00]
; SANDY-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vmovlpd %xmm0, (%rdi) # sched: [5:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movlpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [1:1.00]
; HASWELL-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: vmovlpd %xmm0, (%rdi) # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movlpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [6:1.00]
; BROADWELL-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: vmovlpd %xmm0, (%rdi) # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movlpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [6:1.00]
; SKYLAKE-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vmovlpd %xmm0, (%rdi) # sched: [1:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movlpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [6:1.00]
; SKX-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vmovlpd %xmm0, (%rdi) # sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movlpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [6:1.00]
; BTVER2-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vmovlpd %xmm0, (%rdi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movlpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [8:0.50]
; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmovlpd %xmm0, (%rdi) # sched: [1:0.50]
@@ -3136,54 +3136,54 @@ define void @test_movlpd(<2 x double> %a0, <2 x double> %a1, x86_mmx *%a2) {
define i32 @test_movmskpd(<2 x double> %a0) {
; GENERIC-LABEL: test_movmskpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movmskpd %xmm0, %eax # sched: [2:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_movmskpd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movmskpd %xmm0, %eax # sched: [3:3.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_movmskpd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movmskpd %xmm0, %eax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_movmskpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmovmskpd %xmm0, %eax # sched: [2:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movmskpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmovmskpd %xmm0, %eax # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movmskpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmovmskpd %xmm0, %eax # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movmskpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmovmskpd %xmm0, %eax # sched: [2:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movmskpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovmskpd %xmm0, %eax # sched: [2:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movmskpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovmskpd %xmm0, %eax # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movmskpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmovmskpd %xmm0, %eax # sched: [1:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call i32 @llvm.x86.sse2.movmsk.pd(<2 x double> %a0)
@@ -3193,13 +3193,13 @@ declare i32 @llvm.x86.sse2.movmsk.pd(<2 x double>) nounwind readnone
define void @test_movntdqa(<2 x i64> %a0, <2 x i64> *%a1) {
; GENERIC-LABEL: test_movntdqa:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: paddq %xmm0, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: movntdq %xmm0, (%rdi) # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_movntdqa:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: paddq %xmm0, %xmm0 # sched: [2:1.00]
; ATOM-NEXT: movntdq %xmm0, (%rdi) # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -3207,49 +3207,49 @@ define void @test_movntdqa(<2 x i64> %a0, <2 x i64> *%a1) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_movntdqa:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: paddq %xmm0, %xmm0 # sched: [1:0.50]
; SLM-NEXT: movntdq %xmm0, (%rdi) # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_movntdqa:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vmovntdq %xmm0, (%rdi) # sched: [5:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movntdqa:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vmovntdq %xmm0, (%rdi) # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movntdqa:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: vmovntdq %xmm0, (%rdi) # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movntdqa:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vmovntdq %xmm0, (%rdi) # sched: [1:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movntdqa:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: vmovntdq %xmm0, (%rdi) # sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movntdqa:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vmovntdq %xmm0, (%rdi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movntdqa:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vmovntdq %xmm0, (%rdi) # sched: [1:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3260,61 +3260,61 @@ define void @test_movntdqa(<2 x i64> %a0, <2 x i64> *%a1) {
define void @test_movntpd(<2 x double> %a0, <2 x double> *%a1) {
; GENERIC-LABEL: test_movntpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: addpd %xmm0, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: movntpd %xmm0, (%rdi) # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_movntpd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: addpd %xmm0, %xmm0 # sched: [6:3.00]
; ATOM-NEXT: movntpd %xmm0, (%rdi) # sched: [1:1.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_movntpd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: addpd %xmm0, %xmm0 # sched: [3:1.00]
; SLM-NEXT: movntpd %xmm0, (%rdi) # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_movntpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vmovntpd %xmm0, (%rdi) # sched: [5:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movntpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: vmovntpd %xmm0, (%rdi) # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movntpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: vmovntpd %xmm0, (%rdi) # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movntpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vmovntpd %xmm0, (%rdi) # sched: [1:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movntpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vmovntpd %xmm0, (%rdi) # sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movntpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vmovntpd %xmm0, (%rdi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movntpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmovntpd %xmm0, (%rdi) # sched: [1:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3325,70 +3325,70 @@ define void @test_movntpd(<2 x double> %a0, <2 x double> *%a1) {
define <2 x i64> @test_movq_mem(<2 x i64> %a0, i64 *%a1) {
; GENERIC-LABEL: test_movq_mem:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movq {{.*#+}} xmm1 = mem[0],zero sched: [4:0.50]
; GENERIC-NEXT: paddq %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: movq %xmm0, (%rdi) # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_movq_mem:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movq {{.*#+}} xmm1 = mem[0],zero sched: [1:1.00]
; ATOM-NEXT: paddq %xmm1, %xmm0 # sched: [2:1.00]
; ATOM-NEXT: movq %xmm0, (%rdi) # sched: [1:1.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_movq_mem:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movq {{.*#+}} xmm1 = mem[0],zero sched: [3:1.00]
; SLM-NEXT: paddq %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: movq %xmm0, (%rdi) # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_movq_mem:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero sched: [6:0.50]
; SANDY-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vmovq %xmm0, (%rdi) # sched: [5:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movq_mem:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero sched: [1:0.50]
; HASWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vmovq %xmm0, (%rdi) # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movq_mem:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero sched: [5:0.50]
; BROADWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: vmovq %xmm0, (%rdi) # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movq_mem:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero sched: [5:0.50]
; SKYLAKE-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vmovq %xmm0, (%rdi) # sched: [1:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movq_mem:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero sched: [5:0.50]
; SKX-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: vmovq %xmm0, (%rdi) # sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movq_mem:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero sched: [5:1.00]
; BTVER2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vmovq %xmm0, (%rdi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movq_mem:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero sched: [8:0.50]
; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vmovq %xmm0, (%rdi) # sched: [1:0.50]
@@ -3403,13 +3403,13 @@ define <2 x i64> @test_movq_mem(<2 x i64> %a0, i64 *%a1) {
define <2 x i64> @test_movq_reg(<2 x i64> %a0, <2 x i64> %a1) {
; GENERIC-LABEL: test_movq_reg:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero sched: [1:1.00]
; GENERIC-NEXT: paddq %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_movq_reg:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero sched: [1:0.50]
; ATOM-NEXT: paddq %xmm1, %xmm0 # sched: [2:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -3417,49 +3417,49 @@ define <2 x i64> @test_movq_reg(<2 x i64> %a0, <2 x i64> %a1) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_movq_reg:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero sched: [1:0.50]
; SLM-NEXT: paddq %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_movq_reg:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero sched: [1:0.33]
; SANDY-NEXT: vpaddq %xmm0, %xmm1, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movq_reg:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero sched: [1:0.33]
; HASWELL-NEXT: vpaddq %xmm0, %xmm1, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movq_reg:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero sched: [1:0.33]
; BROADWELL-NEXT: vpaddq %xmm0, %xmm1, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movq_reg:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero sched: [1:0.33]
; SKYLAKE-NEXT: vpaddq %xmm0, %xmm1, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movq_reg:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero sched: [1:0.33]
; SKX-NEXT: vpaddq %xmm0, %xmm1, %xmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movq_reg:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero sched: [1:0.50]
; BTVER2-NEXT: vpaddq %xmm0, %xmm1, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movq_reg:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero sched: [1:0.25]
; ZNVER1-NEXT: vpaddq %xmm0, %xmm1, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3470,70 +3470,70 @@ define <2 x i64> @test_movq_reg(<2 x i64> %a0, <2 x i64> %a1) {
define void @test_movsd_mem(double* %a0, double* %a1) {
; GENERIC-LABEL: test_movsd_mem:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero sched: [4:0.50]
; GENERIC-NEXT: addsd %xmm0, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: movsd %xmm0, (%rsi) # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_movsd_mem:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero sched: [1:1.00]
; ATOM-NEXT: addsd %xmm0, %xmm0 # sched: [5:5.00]
; ATOM-NEXT: movsd %xmm0, (%rsi) # sched: [1:1.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_movsd_mem:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero sched: [3:1.00]
; SLM-NEXT: addsd %xmm0, %xmm0 # sched: [3:1.00]
; SLM-NEXT: movsd %xmm0, (%rsi) # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_movsd_mem:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero sched: [6:0.50]
; SANDY-NEXT: vaddsd %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vmovsd %xmm0, (%rsi) # sched: [5:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movsd_mem:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero sched: [1:0.50]
; HASWELL-NEXT: vaddsd %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: vmovsd %xmm0, (%rsi) # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movsd_mem:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero sched: [5:0.50]
; BROADWELL-NEXT: vaddsd %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: vmovsd %xmm0, (%rsi) # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movsd_mem:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero sched: [5:0.50]
; SKYLAKE-NEXT: vaddsd %xmm0, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vmovsd %xmm0, (%rsi) # sched: [1:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movsd_mem:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero sched: [5:0.50]
; SKX-NEXT: vaddsd %xmm0, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vmovsd %xmm0, (%rsi) # sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movsd_mem:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero sched: [5:1.00]
; BTVER2-NEXT: vaddsd %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vmovsd %xmm0, (%rsi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movsd_mem:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero sched: [8:0.50]
; ZNVER1-NEXT: vaddsd %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmovsd %xmm0, (%rsi) # sched: [1:0.50]
@@ -3546,13 +3546,13 @@ define void @test_movsd_mem(double* %a0, double* %a1) {
define <2 x double> @test_movsd_reg(<2 x double> %a0, <2 x double> %a1) {
; GENERIC-LABEL: test_movsd_reg:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0] sched: [1:1.00]
; GENERIC-NEXT: movaps %xmm1, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_movsd_reg:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0] sched: [1:1.00]
; ATOM-NEXT: movaps %xmm1, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -3562,43 +3562,43 @@ define <2 x double> @test_movsd_reg(<2 x double> %a0, <2 x double> %a1) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_movsd_reg:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0] sched: [1:1.00]
; SLM-NEXT: movaps %xmm1, %xmm0 # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_movsd_reg:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0] sched: [1:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movsd_reg:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0] sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movsd_reg:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0] sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movsd_reg:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0] sched: [1:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movsd_reg:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0] sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movsd_reg:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0] sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movsd_reg:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0] sched: [1:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 2, i32 0>
@@ -3607,70 +3607,70 @@ define <2 x double> @test_movsd_reg(<2 x double> %a0, <2 x double> %a1) {
define void @test_movupd(<2 x double> *%a0, <2 x double> *%a1) {
; GENERIC-LABEL: test_movupd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movupd (%rdi), %xmm0 # sched: [6:0.50]
; GENERIC-NEXT: addpd %xmm0, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: movupd %xmm0, (%rsi) # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_movupd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movupd (%rdi), %xmm0 # sched: [3:1.50]
; ATOM-NEXT: addpd %xmm0, %xmm0 # sched: [6:3.00]
; ATOM-NEXT: movupd %xmm0, (%rsi) # sched: [2:1.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_movupd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movupd (%rdi), %xmm0 # sched: [3:1.00]
; SLM-NEXT: addpd %xmm0, %xmm0 # sched: [3:1.00]
; SLM-NEXT: movupd %xmm0, (%rsi) # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_movupd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmovupd (%rdi), %xmm0 # sched: [6:0.50]
; SANDY-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vmovupd %xmm0, (%rsi) # sched: [5:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movupd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmovupd (%rdi), %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: vmovupd %xmm0, (%rsi) # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movupd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmovupd (%rdi), %xmm0 # sched: [5:0.50]
; BROADWELL-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: vmovupd %xmm0, (%rsi) # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movupd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmovupd (%rdi), %xmm0 # sched: [6:0.50]
; SKYLAKE-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vmovupd %xmm0, (%rsi) # sched: [1:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movupd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovupd (%rdi), %xmm0 # sched: [6:0.50]
; SKX-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vmovupd %xmm0, (%rsi) # sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movupd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovupd (%rdi), %xmm0 # sched: [5:1.00]
; BTVER2-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vmovupd %xmm0, (%rsi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movupd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmovupd (%rdi), %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmovupd %xmm0, (%rsi) # sched: [1:0.50]
@@ -3683,61 +3683,61 @@ define void @test_movupd(<2 x double> *%a0, <2 x double> *%a1) {
define <2 x double> @test_mulpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; GENERIC-LABEL: test_mulpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: mulpd %xmm1, %xmm0 # sched: [5:1.00]
; GENERIC-NEXT: mulpd (%rdi), %xmm0 # sched: [11:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_mulpd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: mulpd %xmm1, %xmm0 # sched: [9:4.50]
; ATOM-NEXT: mulpd (%rdi), %xmm0 # sched: [10:5.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_mulpd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: mulpd %xmm1, %xmm0 # sched: [5:2.00]
; SLM-NEXT: mulpd (%rdi), %xmm0 # sched: [8:2.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_mulpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmulpd %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
; SANDY-NEXT: vmulpd (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_mulpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmulpd %xmm1, %xmm0, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: vmulpd (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_mulpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmulpd %xmm1, %xmm0, %xmm0 # sched: [3:0.50]
; BROADWELL-NEXT: vmulpd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_mulpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmulpd %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vmulpd (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_mulpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmulpd %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vmulpd (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_mulpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmulpd %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; BTVER2-NEXT: vmulpd (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_mulpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmulpd %xmm1, %xmm0, %xmm0 # sched: [3:0.50]
; ZNVER1-NEXT: vmulpd (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3749,61 +3749,61 @@ define <2 x double> @test_mulpd(<2 x double> %a0, <2 x double> %a1, <2 x double>
define double @test_mulsd(double %a0, double %a1, double *%a2) {
; GENERIC-LABEL: test_mulsd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: mulsd %xmm1, %xmm0 # sched: [5:1.00]
; GENERIC-NEXT: mulsd (%rdi), %xmm0 # sched: [11:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_mulsd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: mulsd %xmm1, %xmm0 # sched: [5:5.00]
; ATOM-NEXT: mulsd (%rdi), %xmm0 # sched: [5:5.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_mulsd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: mulsd %xmm1, %xmm0 # sched: [5:2.00]
; SLM-NEXT: mulsd (%rdi), %xmm0 # sched: [8:2.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_mulsd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmulsd %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
; SANDY-NEXT: vmulsd (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_mulsd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmulsd %xmm1, %xmm0, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: vmulsd (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_mulsd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmulsd %xmm1, %xmm0, %xmm0 # sched: [3:0.50]
; BROADWELL-NEXT: vmulsd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_mulsd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmulsd %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vmulsd (%rdi), %xmm0, %xmm0 # sched: [9:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_mulsd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vmulsd (%rdi), %xmm0, %xmm0 # sched: [9:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_mulsd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmulsd %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; BTVER2-NEXT: vmulsd (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_mulsd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmulsd %xmm1, %xmm0, %xmm0 # sched: [3:0.50]
; ZNVER1-NEXT: vmulsd (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3815,70 +3815,70 @@ define double @test_mulsd(double %a0, double %a1, double *%a2) {
define <2 x double> @test_orpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; GENERIC-LABEL: test_orpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: orpd %xmm1, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: orpd (%rdi), %xmm0 # sched: [7:1.00]
; GENERIC-NEXT: addpd %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_orpd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: orpd %xmm1, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: orpd (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: addpd %xmm1, %xmm0 # sched: [6:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_orpd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: orpd %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: orpd (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: addpd %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_orpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vorpd %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; SANDY-NEXT: vorpd (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; SANDY-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_orpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vorpd %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: vorpd (%rdi), %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_orpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vorpd %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: vorpd (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BROADWELL-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_orpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vorpd %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vorpd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_orpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vorpd %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: vorpd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_orpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vorpd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vorpd (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_orpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vorpd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vorpd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
@@ -3896,13 +3896,13 @@ define <2 x double> @test_orpd(<2 x double> %a0, <2 x double> %a1, <2 x double>
define <8 x i16> @test_packssdw(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_packssdw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: packssdw %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: packssdw (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_packssdw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: packssdw %xmm1, %xmm0 # sched: [1:1.00]
; ATOM-NEXT: packssdw (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -3912,49 +3912,49 @@ define <8 x i16> @test_packssdw(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_packssdw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: packssdw %xmm1, %xmm0 # sched: [1:1.00]
; SLM-NEXT: packssdw (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_packssdw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpackssdw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_packssdw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: vpackssdw (%rdi), %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_packssdw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: vpackssdw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_packssdw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; SKYLAKE-NEXT: vpackssdw (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_packssdw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; SKX-NEXT: vpackssdw (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_packssdw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpackssdw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_packssdw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpackssdw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3968,13 +3968,13 @@ declare <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32>, <4 x i32>) nounwind rea
define <16 x i8> @test_packsswb(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; GENERIC-LABEL: test_packsswb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: packsswb %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: packsswb (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_packsswb:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: packsswb %xmm1, %xmm0 # sched: [1:1.00]
; ATOM-NEXT: packsswb (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -3984,49 +3984,49 @@ define <16 x i8> @test_packsswb(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_packsswb:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: packsswb %xmm1, %xmm0 # sched: [1:1.00]
; SLM-NEXT: packsswb (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_packsswb:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpacksswb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_packsswb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: vpacksswb (%rdi), %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_packsswb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: vpacksswb (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_packsswb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; SKYLAKE-NEXT: vpacksswb (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_packsswb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; SKX-NEXT: vpacksswb (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_packsswb:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpacksswb (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_packsswb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpacksswb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -4040,13 +4040,13 @@ declare <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16>, <8 x i16>) nounwind rea
define <16 x i8> @test_packuswb(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; GENERIC-LABEL: test_packuswb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: packuswb %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: packuswb (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_packuswb:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: packuswb %xmm1, %xmm0 # sched: [1:1.00]
; ATOM-NEXT: packuswb (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -4056,49 +4056,49 @@ define <16 x i8> @test_packuswb(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_packuswb:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: packuswb %xmm1, %xmm0 # sched: [1:1.00]
; SLM-NEXT: packuswb (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_packuswb:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpackuswb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_packuswb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: vpackuswb (%rdi), %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_packuswb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: vpackuswb (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_packuswb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; SKYLAKE-NEXT: vpackuswb (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_packuswb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; SKX-NEXT: vpackuswb (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_packuswb:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpackuswb (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_packuswb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpackuswb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -4112,13 +4112,13 @@ declare <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16>, <8 x i16>) nounwind rea
define <16 x i8> @test_paddb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; GENERIC-LABEL: test_paddb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: paddb %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: paddb (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_paddb:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: paddb %xmm1, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: paddb (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -4128,49 +4128,49 @@ define <16 x i8> @test_paddb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_paddb:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: paddb %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: paddb (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_paddb:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpaddb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpaddb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_paddb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpaddb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpaddb (%rdi), %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_paddb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpaddb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpaddb (%rdi), %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_paddb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpaddb %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vpaddb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_paddb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpaddb %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: vpaddb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_paddb:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpaddb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpaddb (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_paddb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpaddb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -4182,13 +4182,13 @@ define <16 x i8> @test_paddb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
define <4 x i32> @test_paddd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_paddd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: paddd %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: paddd (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_paddd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: paddd %xmm1, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: paddd (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -4198,49 +4198,49 @@ define <4 x i32> @test_paddd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_paddd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: paddd %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: paddd (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_paddd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpaddd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_paddd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpaddd (%rdi), %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_paddd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpaddd (%rdi), %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_paddd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vpaddd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_paddd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: vpaddd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_paddd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpaddd (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_paddd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -4252,61 +4252,61 @@ define <4 x i32> @test_paddd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
define <2 x i64> @test_paddq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; GENERIC-LABEL: test_paddq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: paddq %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: paddq (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_paddq:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: paddq %xmm1, %xmm0 # sched: [2:1.00]
; ATOM-NEXT: paddq (%rdi), %xmm0 # sched: [3:1.50]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_paddq:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: paddq %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: paddq (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_paddq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpaddq (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_paddq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpaddq (%rdi), %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_paddq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpaddq (%rdi), %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_paddq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vpaddq (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_paddq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: vpaddq (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_paddq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpaddq (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_paddq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddq (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -4318,13 +4318,13 @@ define <2 x i64> @test_paddq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
define <16 x i8> @test_paddsb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; GENERIC-LABEL: test_paddsb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: paddsb %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: paddsb (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_paddsb:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: paddsb %xmm1, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: paddsb (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -4334,49 +4334,49 @@ define <16 x i8> @test_paddsb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_paddsb:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: paddsb %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: paddsb (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_paddsb:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpaddsb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpaddsb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_paddsb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpaddsb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpaddsb (%rdi), %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_paddsb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpaddsb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpaddsb (%rdi), %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_paddsb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpaddsb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpaddsb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_paddsb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpaddsb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpaddsb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_paddsb:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpaddsb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpaddsb (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_paddsb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpaddsb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddsb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -4389,13 +4389,13 @@ declare <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8>, <16 x i8>) nounwind readnone
define <8 x i16> @test_paddsw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; GENERIC-LABEL: test_paddsw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: paddsw %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: paddsw (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_paddsw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: paddsw %xmm1, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: paddsw (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -4405,49 +4405,49 @@ define <8 x i16> @test_paddsw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_paddsw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: paddsw %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: paddsw (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_paddsw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpaddsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpaddsw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_paddsw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpaddsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpaddsw (%rdi), %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_paddsw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpaddsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpaddsw (%rdi), %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_paddsw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpaddsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpaddsw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_paddsw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpaddsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpaddsw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_paddsw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpaddsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpaddsw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_paddsw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpaddsw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddsw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -4460,13 +4460,13 @@ declare <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16>, <8 x i16>) nounwind readnone
define <16 x i8> @test_paddusb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; GENERIC-LABEL: test_paddusb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: paddusb %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: paddusb (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_paddusb:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: paddusb %xmm1, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: paddusb (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -4476,49 +4476,49 @@ define <16 x i8> @test_paddusb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_paddusb:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: paddusb %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: paddusb (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_paddusb:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpaddusb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_paddusb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpaddusb (%rdi), %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_paddusb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpaddusb (%rdi), %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_paddusb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpaddusb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_paddusb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpaddusb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_paddusb:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpaddusb (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_paddusb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddusb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -4531,13 +4531,13 @@ declare <16 x i8> @llvm.x86.sse2.paddus.b(<16 x i8>, <16 x i8>) nounwind readnon
define <8 x i16> @test_paddusw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; GENERIC-LABEL: test_paddusw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: paddusw %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: paddusw (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_paddusw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: paddusw %xmm1, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: paddusw (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -4547,49 +4547,49 @@ define <8 x i16> @test_paddusw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_paddusw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: paddusw %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: paddusw (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_paddusw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpaddusw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpaddusw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_paddusw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpaddusw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpaddusw (%rdi), %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_paddusw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpaddusw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpaddusw (%rdi), %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_paddusw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpaddusw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpaddusw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_paddusw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpaddusw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpaddusw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_paddusw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpaddusw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpaddusw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_paddusw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpaddusw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddusw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -4602,13 +4602,13 @@ declare <8 x i16> @llvm.x86.sse2.paddus.w(<8 x i16>, <8 x i16>) nounwind readnon
define <8 x i16> @test_paddw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; GENERIC-LABEL: test_paddw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: paddw %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: paddw (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_paddw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: paddw %xmm1, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: paddw (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -4618,49 +4618,49 @@ define <8 x i16> @test_paddw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_paddw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: paddw %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: paddw (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_paddw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpaddw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_paddw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpaddw (%rdi), %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_paddw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpaddw (%rdi), %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_paddw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vpaddw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_paddw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: vpaddw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_paddw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpaddw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_paddw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -4672,70 +4672,70 @@ define <8 x i16> @test_paddw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
define <2 x i64> @test_pand(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; GENERIC-LABEL: test_pand:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pand %xmm1, %xmm0 # sched: [1:0.33]
; GENERIC-NEXT: pand (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: paddq %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pand:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pand %xmm1, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: pand (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: paddq %xmm1, %xmm0 # sched: [2:1.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pand:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pand %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: pand (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: paddq %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pand:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpand %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SANDY-NEXT: vpand (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pand:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpand %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; HASWELL-NEXT: vpand (%rdi), %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pand:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpand %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; BROADWELL-NEXT: vpand (%rdi), %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pand:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpand %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vpand (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pand:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpand %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: vpand (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pand:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpand %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpand (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pand:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpand %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpand (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
@@ -4749,7 +4749,7 @@ define <2 x i64> @test_pand(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
define <2 x i64> @test_pandn(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; GENERIC-LABEL: test_pandn:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pandn %xmm1, %xmm0 # sched: [1:0.33]
; GENERIC-NEXT: movdqa %xmm0, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: pandn (%rdi), %xmm1 # sched: [7:0.50]
@@ -4758,7 +4758,7 @@ define <2 x i64> @test_pandn(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pandn:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pandn %xmm1, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: movdqa %xmm0, %xmm1 # sched: [1:0.50]
; ATOM-NEXT: pandn (%rdi), %xmm1 # sched: [1:1.00]
@@ -4767,7 +4767,7 @@ define <2 x i64> @test_pandn(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pandn:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pandn %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: movdqa %xmm0, %xmm1 # sched: [1:0.50]
; SLM-NEXT: pandn (%rdi), %xmm1 # sched: [4:1.00]
@@ -4776,49 +4776,49 @@ define <2 x i64> @test_pandn(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pandn:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpandn %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SANDY-NEXT: vpandn (%rdi), %xmm0, %xmm1 # sched: [7:0.50]
; SANDY-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pandn:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpandn %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; HASWELL-NEXT: vpandn (%rdi), %xmm0, %xmm1 # sched: [1:0.50]
; HASWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pandn:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpandn %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; BROADWELL-NEXT: vpandn (%rdi), %xmm0, %xmm1 # sched: [6:0.50]
; BROADWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pandn:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpandn %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vpandn (%rdi), %xmm0, %xmm1 # sched: [7:0.50]
; SKYLAKE-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pandn:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpandn %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: vpandn (%rdi), %xmm0, %xmm1 # sched: [7:0.50]
; SKX-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pandn:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpandn %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpandn (%rdi), %xmm0, %xmm1 # sched: [6:1.00]
; BTVER2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pandn:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpandn %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpandn (%rdi), %xmm0, %xmm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
@@ -4834,13 +4834,13 @@ define <2 x i64> @test_pandn(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
define <16 x i8> @test_pavgb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; GENERIC-LABEL: test_pavgb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pavgb %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: pavgb (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pavgb:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pavgb %xmm1, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: pavgb (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -4850,49 +4850,49 @@ define <16 x i8> @test_pavgb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pavgb:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pavgb %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: pavgb (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pavgb:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpavgb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpavgb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pavgb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpavgb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpavgb (%rdi), %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pavgb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpavgb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpavgb (%rdi), %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pavgb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpavgb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpavgb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pavgb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpavgb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpavgb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pavgb:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpavgb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpavgb (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pavgb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpavgb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpavgb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -4914,13 +4914,13 @@ define <16 x i8> @test_pavgb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
define <8 x i16> @test_pavgw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; GENERIC-LABEL: test_pavgw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pavgw %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: pavgw (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pavgw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pavgw %xmm1, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: pavgw (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -4930,49 +4930,49 @@ define <8 x i16> @test_pavgw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pavgw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pavgw %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: pavgw (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pavgw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpavgw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpavgw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pavgw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpavgw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpavgw (%rdi), %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pavgw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpavgw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpavgw (%rdi), %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pavgw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpavgw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpavgw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pavgw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpavgw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpavgw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pavgw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpavgw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpavgw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pavgw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpavgw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpavgw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -4994,14 +4994,14 @@ define <8 x i16> @test_pavgw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
define <16 x i8> @test_pcmpeqb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; GENERIC-LABEL: test_pcmpeqb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pcmpeqb %xmm0, %xmm1 # sched: [1:0.50]
; GENERIC-NEXT: pcmpeqb (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: por %xmm1, %xmm0 # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pcmpeqb:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pcmpeqb %xmm0, %xmm1 # sched: [1:0.50]
; ATOM-NEXT: pcmpeqb (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: por %xmm1, %xmm0 # sched: [1:0.50]
@@ -5010,42 +5010,42 @@ define <16 x i8> @test_pcmpeqb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pcmpeqb:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pcmpeqb %xmm0, %xmm1 # sched: [1:0.50]
; SLM-NEXT: pcmpeqb (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: por %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pcmpeqb:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
; SANDY-NEXT: vpcmpeqb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpeqb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
; HASWELL-NEXT: vpcmpeqb (%rdi), %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.33]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pcmpeqb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
; BROADWELL-NEXT: vpcmpeqb (%rdi), %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.33]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pcmpeqb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
; SKYLAKE-NEXT: vpcmpeqb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pcmpeqb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpeqb %xmm1, %xmm0, %k0 # sched: [3:1.00]
; SKX-NEXT: vpcmpeqb (%rdi), %xmm0, %k1 # sched: [9:1.00]
; SKX-NEXT: korw %k1, %k0, %k0 # sched: [1:1.00]
@@ -5053,14 +5053,14 @@ define <16 x i8> @test_pcmpeqb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pcmpeqb:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
; BTVER2-NEXT: vpcmpeqb (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pcmpeqb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm1 # sched: [1:0.25]
; ZNVER1-NEXT: vpcmpeqb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.25]
@@ -5075,14 +5075,14 @@ define <16 x i8> @test_pcmpeqb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
define <4 x i32> @test_pcmpeqd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_pcmpeqd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pcmpeqd %xmm0, %xmm1 # sched: [1:0.50]
; GENERIC-NEXT: pcmpeqd (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: por %xmm1, %xmm0 # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pcmpeqd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pcmpeqd %xmm0, %xmm1 # sched: [1:0.50]
; ATOM-NEXT: pcmpeqd (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: por %xmm1, %xmm0 # sched: [1:0.50]
@@ -5091,42 +5091,42 @@ define <4 x i32> @test_pcmpeqd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pcmpeqd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pcmpeqd %xmm0, %xmm1 # sched: [1:0.50]
; SLM-NEXT: pcmpeqd (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: por %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pcmpeqd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
; SANDY-NEXT: vpcmpeqd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpeqd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
; HASWELL-NEXT: vpcmpeqd (%rdi), %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.33]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pcmpeqd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
; BROADWELL-NEXT: vpcmpeqd (%rdi), %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.33]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pcmpeqd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
; SKYLAKE-NEXT: vpcmpeqd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pcmpeqd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 # sched: [3:1.00]
; SKX-NEXT: vpcmpeqd (%rdi), %xmm0, %k1 # sched: [9:1.00]
; SKX-NEXT: korw %k1, %k0, %k0 # sched: [1:1.00]
@@ -5134,14 +5134,14 @@ define <4 x i32> @test_pcmpeqd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pcmpeqd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
; BTVER2-NEXT: vpcmpeqd (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pcmpeqd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm1 # sched: [1:0.25]
; ZNVER1-NEXT: vpcmpeqd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.25]
@@ -5156,14 +5156,14 @@ define <4 x i32> @test_pcmpeqd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
define <8 x i16> @test_pcmpeqw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; GENERIC-LABEL: test_pcmpeqw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pcmpeqw %xmm0, %xmm1 # sched: [1:0.50]
; GENERIC-NEXT: pcmpeqw (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: por %xmm1, %xmm0 # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pcmpeqw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pcmpeqw %xmm0, %xmm1 # sched: [1:0.50]
; ATOM-NEXT: pcmpeqw (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: por %xmm1, %xmm0 # sched: [1:0.50]
@@ -5172,42 +5172,42 @@ define <8 x i16> @test_pcmpeqw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pcmpeqw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pcmpeqw %xmm0, %xmm1 # sched: [1:0.50]
; SLM-NEXT: pcmpeqw (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: por %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pcmpeqw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
; SANDY-NEXT: vpcmpeqw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpeqw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
; HASWELL-NEXT: vpcmpeqw (%rdi), %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.33]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pcmpeqw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
; BROADWELL-NEXT: vpcmpeqw (%rdi), %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.33]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pcmpeqw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
; SKYLAKE-NEXT: vpcmpeqw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pcmpeqw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpeqw %xmm1, %xmm0, %k0 # sched: [3:1.00]
; SKX-NEXT: vpcmpeqw (%rdi), %xmm0, %k1 # sched: [9:1.00]
; SKX-NEXT: korb %k1, %k0, %k0 # sched: [1:1.00]
@@ -5215,14 +5215,14 @@ define <8 x i16> @test_pcmpeqw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pcmpeqw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
; BTVER2-NEXT: vpcmpeqw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pcmpeqw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm1 # sched: [1:0.25]
; ZNVER1-NEXT: vpcmpeqw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.25]
@@ -5237,7 +5237,7 @@ define <8 x i16> @test_pcmpeqw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
define <16 x i8> @test_pcmpgtb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; GENERIC-LABEL: test_pcmpgtb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movdqa %xmm0, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: pcmpgtb %xmm1, %xmm2 # sched: [1:0.50]
; GENERIC-NEXT: pcmpgtb (%rdi), %xmm0 # sched: [7:0.50]
@@ -5245,7 +5245,7 @@ define <16 x i8> @test_pcmpgtb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pcmpgtb:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movdqa %xmm0, %xmm2 # sched: [1:0.50]
; ATOM-NEXT: pcmpgtb (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: pcmpgtb %xmm1, %xmm2 # sched: [1:0.50]
@@ -5253,7 +5253,7 @@ define <16 x i8> @test_pcmpgtb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pcmpgtb:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movdqa %xmm0, %xmm2 # sched: [1:0.50]
; SLM-NEXT: pcmpgtb (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: pcmpgtb %xmm1, %xmm2 # sched: [1:0.50]
@@ -5261,35 +5261,35 @@ define <16 x i8> @test_pcmpgtb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pcmpgtb:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
; SANDY-NEXT: vpcmpgtb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpgtb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
; HASWELL-NEXT: vpcmpgtb (%rdi), %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.33]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pcmpgtb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
; BROADWELL-NEXT: vpcmpgtb (%rdi), %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.33]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pcmpgtb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
; SKYLAKE-NEXT: vpcmpgtb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pcmpgtb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpgtb %xmm1, %xmm0, %k0 # sched: [3:1.00]
; SKX-NEXT: vpcmpgtb (%rdi), %xmm0, %k1 # sched: [9:1.00]
; SKX-NEXT: korw %k1, %k0, %k0 # sched: [1:1.00]
@@ -5297,14 +5297,14 @@ define <16 x i8> @test_pcmpgtb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pcmpgtb:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
; BTVER2-NEXT: vpcmpgtb (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pcmpgtb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm1 # sched: [1:0.25]
; ZNVER1-NEXT: vpcmpgtb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.25]
@@ -5319,7 +5319,7 @@ define <16 x i8> @test_pcmpgtb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
define <4 x i32> @test_pcmpgtd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_pcmpgtd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movdqa %xmm0, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: pcmpgtd %xmm1, %xmm2 # sched: [1:0.50]
; GENERIC-NEXT: pcmpeqd (%rdi), %xmm0 # sched: [7:0.50]
@@ -5327,7 +5327,7 @@ define <4 x i32> @test_pcmpgtd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pcmpgtd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movdqa %xmm0, %xmm2 # sched: [1:0.50]
; ATOM-NEXT: pcmpeqd (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: pcmpgtd %xmm1, %xmm2 # sched: [1:0.50]
@@ -5335,7 +5335,7 @@ define <4 x i32> @test_pcmpgtd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pcmpgtd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movdqa %xmm0, %xmm2 # sched: [1:0.50]
; SLM-NEXT: pcmpeqd (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: pcmpgtd %xmm1, %xmm2 # sched: [1:0.50]
@@ -5343,35 +5343,35 @@ define <4 x i32> @test_pcmpgtd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pcmpgtd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
; SANDY-NEXT: vpcmpeqd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpgtd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
; HASWELL-NEXT: vpcmpeqd (%rdi), %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.33]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pcmpgtd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
; BROADWELL-NEXT: vpcmpeqd (%rdi), %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.33]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pcmpgtd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
; SKYLAKE-NEXT: vpcmpeqd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pcmpgtd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 # sched: [3:1.00]
; SKX-NEXT: vpcmpeqd (%rdi), %xmm0, %k1 # sched: [9:1.00]
; SKX-NEXT: korw %k1, %k0, %k0 # sched: [1:1.00]
@@ -5379,14 +5379,14 @@ define <4 x i32> @test_pcmpgtd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pcmpgtd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
; BTVER2-NEXT: vpcmpeqd (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pcmpgtd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm1 # sched: [1:0.25]
; ZNVER1-NEXT: vpcmpeqd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.25]
@@ -5401,7 +5401,7 @@ define <4 x i32> @test_pcmpgtd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
define <8 x i16> @test_pcmpgtw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; GENERIC-LABEL: test_pcmpgtw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movdqa %xmm0, %xmm2 # sched: [1:0.33]
; GENERIC-NEXT: pcmpgtw %xmm1, %xmm2 # sched: [1:0.50]
; GENERIC-NEXT: pcmpgtw (%rdi), %xmm0 # sched: [7:0.50]
@@ -5409,7 +5409,7 @@ define <8 x i16> @test_pcmpgtw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pcmpgtw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movdqa %xmm0, %xmm2 # sched: [1:0.50]
; ATOM-NEXT: pcmpgtw (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: pcmpgtw %xmm1, %xmm2 # sched: [1:0.50]
@@ -5417,7 +5417,7 @@ define <8 x i16> @test_pcmpgtw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pcmpgtw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movdqa %xmm0, %xmm2 # sched: [1:0.50]
; SLM-NEXT: pcmpgtw (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: pcmpgtw %xmm1, %xmm2 # sched: [1:0.50]
@@ -5425,35 +5425,35 @@ define <8 x i16> @test_pcmpgtw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pcmpgtw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
; SANDY-NEXT: vpcmpgtw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpgtw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
; HASWELL-NEXT: vpcmpgtw (%rdi), %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.33]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pcmpgtw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
; BROADWELL-NEXT: vpcmpgtw (%rdi), %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.33]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pcmpgtw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
; SKYLAKE-NEXT: vpcmpgtw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pcmpgtw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpgtw %xmm1, %xmm0, %k0 # sched: [3:1.00]
; SKX-NEXT: vpcmpgtw (%rdi), %xmm0, %k1 # sched: [9:1.00]
; SKX-NEXT: korb %k1, %k0, %k0 # sched: [1:1.00]
@@ -5461,14 +5461,14 @@ define <8 x i16> @test_pcmpgtw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pcmpgtw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm1 # sched: [1:0.50]
; BTVER2-NEXT: vpcmpgtw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pcmpgtw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm1 # sched: [1:0.25]
; ZNVER1-NEXT: vpcmpgtw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.25]
@@ -5483,61 +5483,61 @@ define <8 x i16> @test_pcmpgtw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
define i16 @test_pextrw(<8 x i16> %a0) {
; GENERIC-LABEL: test_pextrw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pextrw $6, %xmm0, %eax # sched: [3:1.00]
; GENERIC-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pextrw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pextrw $6, %xmm0, %eax # sched: [4:2.00]
; ATOM-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pextrw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pextrw $6, %xmm0, %eax # sched: [4:1.00]
; SLM-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pextrw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpextrw $6, %xmm0, %eax # sched: [3:1.00]
; SANDY-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pextrw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpextrw $6, %xmm0, %eax # sched: [2:1.00]
; HASWELL-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pextrw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpextrw $6, %xmm0, %eax # sched: [2:1.00]
; BROADWELL-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pextrw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpextrw $6, %xmm0, %eax # sched: [3:1.00]
; SKYLAKE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pextrw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpextrw $6, %xmm0, %eax # sched: [3:1.00]
; SKX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pextrw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpextrw $6, %xmm0, %eax # sched: [1:0.50]
; BTVER2-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pextrw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpextrw $6, %xmm0, %eax # sched: [1:0.25]
; ZNVER1-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -5547,13 +5547,13 @@ define i16 @test_pextrw(<8 x i16> %a0) {
define <8 x i16> @test_pinsrw(<8 x i16> %a0, i16 %a1, i16 *%a2) {
; GENERIC-LABEL: test_pinsrw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pinsrw $1, %edi, %xmm0 # sched: [2:1.00]
; GENERIC-NEXT: pinsrw $3, (%rsi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pinsrw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pinsrw $1, %edi, %xmm0 # sched: [1:1.00]
; ATOM-NEXT: pinsrw $3, (%rsi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -5563,49 +5563,49 @@ define <8 x i16> @test_pinsrw(<8 x i16> %a0, i16 %a1, i16 *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pinsrw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pinsrw $1, %edi, %xmm0 # sched: [1:1.00]
; SLM-NEXT: pinsrw $3, (%rsi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pinsrw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpinsrw $1, %edi, %xmm0, %xmm0 # sched: [2:1.00]
; SANDY-NEXT: vpinsrw $3, (%rsi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pinsrw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpinsrw $1, %edi, %xmm0, %xmm0 # sched: [2:2.00]
; HASWELL-NEXT: vpinsrw $3, (%rsi), %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pinsrw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpinsrw $1, %edi, %xmm0, %xmm0 # sched: [2:2.00]
; BROADWELL-NEXT: vpinsrw $3, (%rsi), %xmm0, %xmm0 # sched: [6:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pinsrw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpinsrw $1, %edi, %xmm0, %xmm0 # sched: [2:2.00]
; SKYLAKE-NEXT: vpinsrw $3, (%rsi), %xmm0, %xmm0 # sched: [6:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pinsrw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpinsrw $1, %edi, %xmm0, %xmm0 # sched: [2:2.00]
; SKX-NEXT: vpinsrw $3, (%rsi), %xmm0, %xmm0 # sched: [6:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pinsrw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpinsrw $1, %edi, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpinsrw $3, (%rsi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pinsrw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpinsrw $1, %edi, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpinsrw $3, (%rsi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -5617,13 +5617,13 @@ define <8 x i16> @test_pinsrw(<8 x i16> %a0, i16 %a1, i16 *%a2) {
define <4 x i32> @test_pmaddwd(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; GENERIC-LABEL: test_pmaddwd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pmaddwd %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: pmaddwd (%rdi), %xmm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pmaddwd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pmaddwd %xmm1, %xmm0
; ATOM-NEXT: pmaddwd (%rdi), %xmm0
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -5637,49 +5637,49 @@ define <4 x i32> @test_pmaddwd(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pmaddwd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pmaddwd %xmm1, %xmm0 # sched: [4:1.00]
; SLM-NEXT: pmaddwd (%rdi), %xmm0 # sched: [7:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pmaddwd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpmaddwd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vpmaddwd (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmaddwd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmaddwd %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
; HASWELL-NEXT: vpmaddwd (%rdi), %xmm0, %xmm0 # sched: [5:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmaddwd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmaddwd %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
; BROADWELL-NEXT: vpmaddwd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmaddwd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmaddwd %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKYLAKE-NEXT: vpmaddwd (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmaddwd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmaddwd %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vpmaddwd (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pmaddwd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpmaddwd %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; BTVER2-NEXT: vpmaddwd (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pmaddwd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmaddwd %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
; ZNVER1-NEXT: vpmaddwd (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -5693,13 +5693,13 @@ declare <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16>, <8 x i16>) nounwind readnon
define <8 x i16> @test_pmaxsw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; GENERIC-LABEL: test_pmaxsw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pmaxsw %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: pmaxsw (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pmaxsw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pmaxsw %xmm1, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: pmaxsw (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -5709,49 +5709,49 @@ define <8 x i16> @test_pmaxsw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pmaxsw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pmaxsw %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: pmaxsw (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pmaxsw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpmaxsw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmaxsw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpmaxsw (%rdi), %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmaxsw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpmaxsw (%rdi), %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmaxsw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpmaxsw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmaxsw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpmaxsw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pmaxsw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpmaxsw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pmaxsw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpmaxsw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -5764,13 +5764,13 @@ declare <8 x i16> @llvm.x86.sse2.pmaxs.w(<8 x i16>, <8 x i16>) nounwind readnone
define <16 x i8> @test_pmaxub(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; GENERIC-LABEL: test_pmaxub:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pmaxub %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: pmaxub (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pmaxub:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pmaxub %xmm1, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: pmaxub (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -5780,49 +5780,49 @@ define <16 x i8> @test_pmaxub(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pmaxub:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pmaxub %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: pmaxub (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pmaxub:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpmaxub %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpmaxub (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmaxub:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmaxub %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpmaxub (%rdi), %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmaxub:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmaxub %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpmaxub (%rdi), %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmaxub:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmaxub %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpmaxub (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmaxub:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmaxub %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpmaxub (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pmaxub:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpmaxub %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpmaxub (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pmaxub:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmaxub %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpmaxub (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -5835,13 +5835,13 @@ declare <16 x i8> @llvm.x86.sse2.pmaxu.b(<16 x i8>, <16 x i8>) nounwind readnone
define <8 x i16> @test_pminsw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; GENERIC-LABEL: test_pminsw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pminsw %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: pminsw (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pminsw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pminsw %xmm1, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: pminsw (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -5851,49 +5851,49 @@ define <8 x i16> @test_pminsw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pminsw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pminsw %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: pminsw (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pminsw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpminsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpminsw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pminsw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpminsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpminsw (%rdi), %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pminsw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpminsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpminsw (%rdi), %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pminsw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpminsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpminsw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pminsw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpminsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpminsw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pminsw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpminsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpminsw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pminsw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpminsw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpminsw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -5906,13 +5906,13 @@ declare <8 x i16> @llvm.x86.sse2.pmins.w(<8 x i16>, <8 x i16>) nounwind readnone
define <16 x i8> @test_pminub(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; GENERIC-LABEL: test_pminub:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pminub %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: pminub (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pminub:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pminub %xmm1, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: pminub (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -5922,49 +5922,49 @@ define <16 x i8> @test_pminub(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pminub:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pminub %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: pminub (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pminub:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpminub %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpminub (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pminub:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpminub %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpminub (%rdi), %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pminub:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpminub %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpminub (%rdi), %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pminub:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpminub %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpminub (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pminub:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpminub %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpminub (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pminub:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpminub %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpminub (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pminub:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpminub %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpminub (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -5977,54 +5977,54 @@ declare <16 x i8> @llvm.x86.sse2.pminu.b(<16 x i8>, <16 x i8>) nounwind readnone
define i32 @test_pmovmskb(<16 x i8> %a0) {
; GENERIC-LABEL: test_pmovmskb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pmovmskb %xmm0, %eax # sched: [2:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pmovmskb:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pmovmskb %xmm0, %eax # sched: [3:3.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pmovmskb:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pmovmskb %xmm0, %eax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pmovmskb:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpmovmskb %xmm0, %eax # sched: [1:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovmskb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmovmskb %xmm0, %eax # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmovmskb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmovmskb %xmm0, %eax # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmovmskb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmovmskb %xmm0, %eax # sched: [2:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmovmskb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovmskb %xmm0, %eax # sched: [2:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pmovmskb:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpmovmskb %xmm0, %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pmovmskb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmovmskb %xmm0, %eax # sched: [1:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8> %a0)
@@ -6034,61 +6034,61 @@ declare i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8>) nounwind readnone
define <8 x i16> @test_pmulhuw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; GENERIC-LABEL: test_pmulhuw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pmulhuw %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: pmulhuw (%rdi), %xmm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pmulhuw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pmulhuw %xmm1, %xmm0 # sched: [5:5.00]
; ATOM-NEXT: pmulhuw (%rdi), %xmm0 # sched: [5:5.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pmulhuw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pmulhuw %xmm1, %xmm0 # sched: [4:1.00]
; SLM-NEXT: pmulhuw (%rdi), %xmm0 # sched: [7:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pmulhuw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpmulhuw %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vpmulhuw (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmulhuw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmulhuw %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
; HASWELL-NEXT: vpmulhuw (%rdi), %xmm0, %xmm0 # sched: [5:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmulhuw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmulhuw %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
; BROADWELL-NEXT: vpmulhuw (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmulhuw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmulhuw %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKYLAKE-NEXT: vpmulhuw (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmulhuw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmulhuw %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vpmulhuw (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pmulhuw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpmulhuw %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; BTVER2-NEXT: vpmulhuw (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pmulhuw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmulhuw %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
; ZNVER1-NEXT: vpmulhuw (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -6101,61 +6101,61 @@ declare <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16>, <8 x i16>) nounwind readnon
define <8 x i16> @test_pmulhw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; GENERIC-LABEL: test_pmulhw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pmulhw %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: pmulhw (%rdi), %xmm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pmulhw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pmulhw %xmm1, %xmm0 # sched: [5:5.00]
; ATOM-NEXT: pmulhw (%rdi), %xmm0 # sched: [5:5.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pmulhw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pmulhw %xmm1, %xmm0 # sched: [4:1.00]
; SLM-NEXT: pmulhw (%rdi), %xmm0 # sched: [7:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pmulhw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpmulhw %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vpmulhw (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmulhw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmulhw %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
; HASWELL-NEXT: vpmulhw (%rdi), %xmm0, %xmm0 # sched: [5:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmulhw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmulhw %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
; BROADWELL-NEXT: vpmulhw (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmulhw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmulhw %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKYLAKE-NEXT: vpmulhw (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmulhw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmulhw %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vpmulhw (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pmulhw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpmulhw %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; BTVER2-NEXT: vpmulhw (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pmulhw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmulhw %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
; ZNVER1-NEXT: vpmulhw (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -6168,61 +6168,61 @@ declare <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16>, <8 x i16>) nounwind readnone
define <8 x i16> @test_pmullw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; GENERIC-LABEL: test_pmullw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pmullw %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: pmullw (%rdi), %xmm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pmullw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pmullw %xmm1, %xmm0 # sched: [5:5.00]
; ATOM-NEXT: pmullw (%rdi), %xmm0 # sched: [5:5.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pmullw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pmullw %xmm1, %xmm0 # sched: [4:1.00]
; SLM-NEXT: pmullw (%rdi), %xmm0 # sched: [7:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pmullw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpmullw %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vpmullw (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmullw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmullw %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
; HASWELL-NEXT: vpmullw (%rdi), %xmm0, %xmm0 # sched: [5:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmullw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmullw %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
; BROADWELL-NEXT: vpmullw (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmullw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmullw %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKYLAKE-NEXT: vpmullw (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmullw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmullw %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vpmullw (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pmullw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpmullw %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; BTVER2-NEXT: vpmullw (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pmullw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmullw %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
; ZNVER1-NEXT: vpmullw (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -6234,13 +6234,13 @@ define <8 x i16> @test_pmullw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
define <2 x i64> @test_pmuludq(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_pmuludq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pmuludq %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: pmuludq (%rdi), %xmm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pmuludq:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pmuludq %xmm1, %xmm0
; ATOM-NEXT: pmuludq (%rdi), %xmm0
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -6254,49 +6254,49 @@ define <2 x i64> @test_pmuludq(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pmuludq:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pmuludq %xmm1, %xmm0 # sched: [4:1.00]
; SLM-NEXT: pmuludq (%rdi), %xmm0 # sched: [7:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pmuludq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vpmuludq (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmuludq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
; HASWELL-NEXT: vpmuludq (%rdi), %xmm0, %xmm0 # sched: [5:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmuludq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
; BROADWELL-NEXT: vpmuludq (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmuludq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKYLAKE-NEXT: vpmuludq (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmuludq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vpmuludq (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pmuludq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; BTVER2-NEXT: vpmuludq (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pmuludq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
; ZNVER1-NEXT: vpmuludq (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -6310,70 +6310,70 @@ declare <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32>, <4 x i32>) nounwind readnon
define <2 x i64> @test_por(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; GENERIC-LABEL: test_por:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: por %xmm1, %xmm0 # sched: [1:0.33]
; GENERIC-NEXT: por (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: paddq %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_por:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: por %xmm1, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: por (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: paddq %xmm1, %xmm0 # sched: [2:1.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_por:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: por %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: por (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: paddq %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_por:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SANDY-NEXT: vpor (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_por:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; HASWELL-NEXT: vpor (%rdi), %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_por:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; BROADWELL-NEXT: vpor (%rdi), %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_por:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vpor (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_por:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: vpor (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_por:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpor (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_por:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpor (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
@@ -6387,13 +6387,13 @@ define <2 x i64> @test_por(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
define <2 x i64> @test_psadbw(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; GENERIC-LABEL: test_psadbw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: psadbw %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: psadbw (%rdi), %xmm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_psadbw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: psadbw %xmm1, %xmm0
; ATOM-NEXT: psadbw (%rdi), %xmm0
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -6407,49 +6407,49 @@ define <2 x i64> @test_psadbw(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_psadbw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: psadbw %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: psadbw (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_psadbw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpsadbw %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vpsadbw (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psadbw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsadbw %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
; HASWELL-NEXT: vpsadbw (%rdi), %xmm0, %xmm0 # sched: [5:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psadbw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsadbw %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
; BROADWELL-NEXT: vpsadbw (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psadbw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsadbw %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SKYLAKE-NEXT: vpsadbw (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psadbw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsadbw %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SKX-NEXT: vpsadbw (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_psadbw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpsadbw %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; BTVER2-NEXT: vpsadbw (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_psadbw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsadbw %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
; ZNVER1-NEXT: vpsadbw (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -6463,14 +6463,14 @@ declare <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8>, <16 x i8>) nounwind readnone
define <4 x i32> @test_pshufd(<4 x i32> %a0, <4 x i32> *%a1) {
; GENERIC-LABEL: test_pshufd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,0,3,2] sched: [1:0.50]
; GENERIC-NEXT: pshufd {{.*#+}} xmm0 = mem[3,2,1,0] sched: [7:0.50]
; GENERIC-NEXT: paddd %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pshufd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pshufd {{.*#+}} xmm1 = mem[3,2,1,0] sched: [1:1.00]
; ATOM-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,0,3,2] sched: [1:1.00]
; ATOM-NEXT: paddd %xmm0, %xmm1 # sched: [1:0.50]
@@ -6478,7 +6478,7 @@ define <4 x i32> @test_pshufd(<4 x i32> %a0, <4 x i32> *%a1) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pshufd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pshufd {{.*#+}} xmm1 = mem[3,2,1,0] sched: [4:1.00]
; SLM-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,0,3,2] sched: [1:1.00]
; SLM-NEXT: paddd %xmm0, %xmm1 # sched: [1:0.50]
@@ -6486,49 +6486,49 @@ define <4 x i32> @test_pshufd(<4 x i32> %a0, <4 x i32> *%a1) {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pshufd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,3,2] sched: [1:0.50]
; SANDY-NEXT: vpshufd {{.*#+}} xmm1 = mem[3,2,1,0] sched: [7:0.50]
; SANDY-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pshufd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,3,2] sched: [1:1.00]
; HASWELL-NEXT: vpshufd {{.*#+}} xmm1 = mem[3,2,1,0] sched: [1:1.00]
; HASWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pshufd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,3,2] sched: [1:1.00]
; BROADWELL-NEXT: vpshufd {{.*#+}} xmm1 = mem[3,2,1,0] sched: [6:1.00]
; BROADWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pshufd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,3,2] sched: [1:1.00]
; SKYLAKE-NEXT: vpshufd {{.*#+}} xmm1 = mem[3,2,1,0] sched: [7:1.00]
; SKYLAKE-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pshufd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,3,2] sched: [1:1.00]
; SKX-NEXT: vpshufd {{.*#+}} xmm1 = mem[3,2,1,0] sched: [7:1.00]
; SKX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pshufd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpshufd {{.*#+}} xmm1 = mem[3,2,1,0] sched: [6:1.00]
; BTVER2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,3,2] sched: [1:0.50]
; BTVER2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pshufd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpshufd {{.*#+}} xmm1 = mem[3,2,1,0] sched: [8:0.50]
; ZNVER1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,3,2] sched: [1:0.25]
; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
@@ -6542,14 +6542,14 @@ define <4 x i32> @test_pshufd(<4 x i32> %a0, <4 x i32> *%a1) {
define <8 x i16> @test_pshufhw(<8 x i16> %a0, <8 x i16> *%a1) {
; GENERIC-LABEL: test_pshufhw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,5,4,7,6] sched: [1:0.50]
; GENERIC-NEXT: pshufhw {{.*#+}} xmm0 = mem[0,1,2,3,7,6,5,4] sched: [7:0.50]
; GENERIC-NEXT: paddw %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pshufhw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pshufhw {{.*#+}} xmm1 = mem[0,1,2,3,7,6,5,4] sched: [1:1.00]
; ATOM-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6] sched: [1:1.00]
; ATOM-NEXT: paddw %xmm0, %xmm1 # sched: [1:0.50]
@@ -6557,7 +6557,7 @@ define <8 x i16> @test_pshufhw(<8 x i16> %a0, <8 x i16> *%a1) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pshufhw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pshufhw {{.*#+}} xmm1 = mem[0,1,2,3,7,6,5,4] sched: [4:1.00]
; SLM-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6] sched: [1:1.00]
; SLM-NEXT: paddw %xmm0, %xmm1 # sched: [1:0.50]
@@ -6565,49 +6565,49 @@ define <8 x i16> @test_pshufhw(<8 x i16> %a0, <8 x i16> *%a1) {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pshufhw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6] sched: [1:0.50]
; SANDY-NEXT: vpshufhw {{.*#+}} xmm1 = mem[0,1,2,3,7,6,5,4] sched: [7:0.50]
; SANDY-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pshufhw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6] sched: [1:1.00]
; HASWELL-NEXT: vpshufhw {{.*#+}} xmm1 = mem[0,1,2,3,7,6,5,4] sched: [1:1.00]
; HASWELL-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pshufhw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6] sched: [1:1.00]
; BROADWELL-NEXT: vpshufhw {{.*#+}} xmm1 = mem[0,1,2,3,7,6,5,4] sched: [6:1.00]
; BROADWELL-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pshufhw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6] sched: [1:1.00]
; SKYLAKE-NEXT: vpshufhw {{.*#+}} xmm1 = mem[0,1,2,3,7,6,5,4] sched: [7:1.00]
; SKYLAKE-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pshufhw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6] sched: [1:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} xmm1 = mem[0,1,2,3,7,6,5,4] sched: [7:1.00]
; SKX-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pshufhw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpshufhw {{.*#+}} xmm1 = mem[0,1,2,3,7,6,5,4] sched: [6:1.00]
; BTVER2-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6] sched: [1:0.50]
; BTVER2-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pshufhw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpshufhw {{.*#+}} xmm1 = mem[0,1,2,3,7,6,5,4] sched: [8:0.50]
; ZNVER1-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6] sched: [1:0.25]
; ZNVER1-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
@@ -6621,14 +6621,14 @@ define <8 x i16> @test_pshufhw(<8 x i16> %a0, <8 x i16> *%a1) {
define <8 x i16> @test_pshuflw(<8 x i16> %a0, <8 x i16> *%a1) {
; GENERIC-LABEL: test_pshuflw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[1,0,3,2,4,5,6,7] sched: [1:0.50]
; GENERIC-NEXT: pshuflw {{.*#+}} xmm0 = mem[3,2,1,0,4,5,6,7] sched: [7:0.50]
; GENERIC-NEXT: paddw %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pshuflw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pshuflw {{.*#+}} xmm1 = mem[3,2,1,0,4,5,6,7] sched: [1:1.00]
; ATOM-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7] sched: [1:1.00]
; ATOM-NEXT: paddw %xmm0, %xmm1 # sched: [1:0.50]
@@ -6636,7 +6636,7 @@ define <8 x i16> @test_pshuflw(<8 x i16> %a0, <8 x i16> *%a1) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pshuflw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pshuflw {{.*#+}} xmm1 = mem[3,2,1,0,4,5,6,7] sched: [4:1.00]
; SLM-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7] sched: [1:1.00]
; SLM-NEXT: paddw %xmm0, %xmm1 # sched: [1:0.50]
@@ -6644,49 +6644,49 @@ define <8 x i16> @test_pshuflw(<8 x i16> %a0, <8 x i16> *%a1) {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pshuflw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7] sched: [1:0.50]
; SANDY-NEXT: vpshuflw {{.*#+}} xmm1 = mem[3,2,1,0,4,5,6,7] sched: [7:0.50]
; SANDY-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pshuflw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7] sched: [1:1.00]
; HASWELL-NEXT: vpshuflw {{.*#+}} xmm1 = mem[3,2,1,0,4,5,6,7] sched: [1:1.00]
; HASWELL-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pshuflw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7] sched: [1:1.00]
; BROADWELL-NEXT: vpshuflw {{.*#+}} xmm1 = mem[3,2,1,0,4,5,6,7] sched: [6:1.00]
; BROADWELL-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pshuflw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7] sched: [1:1.00]
; SKYLAKE-NEXT: vpshuflw {{.*#+}} xmm1 = mem[3,2,1,0,4,5,6,7] sched: [7:1.00]
; SKYLAKE-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pshuflw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7] sched: [1:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} xmm1 = mem[3,2,1,0,4,5,6,7] sched: [7:1.00]
; SKX-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pshuflw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpshuflw {{.*#+}} xmm1 = mem[3,2,1,0,4,5,6,7] sched: [6:1.00]
; BTVER2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7] sched: [1:0.50]
; BTVER2-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pshuflw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpshuflw {{.*#+}} xmm1 = mem[3,2,1,0,4,5,6,7] sched: [8:0.50]
; ZNVER1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7] sched: [1:0.25]
; ZNVER1-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
@@ -6700,70 +6700,70 @@ define <8 x i16> @test_pshuflw(<8 x i16> %a0, <8 x i16> *%a1) {
define <4 x i32> @test_pslld(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_pslld:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pslld %xmm1, %xmm0 # sched: [2:1.00]
; GENERIC-NEXT: pslld (%rdi), %xmm0 # sched: [8:1.00]
; GENERIC-NEXT: pslld $2, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pslld:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pslld %xmm1, %xmm0 # sched: [2:1.00]
; ATOM-NEXT: pslld (%rdi), %xmm0 # sched: [3:1.50]
; ATOM-NEXT: pslld $2, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pslld:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pslld %xmm1, %xmm0 # sched: [1:1.00]
; SLM-NEXT: pslld (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: pslld $2, %xmm0 # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pslld:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpslld %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; SANDY-NEXT: vpslld (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; SANDY-NEXT: vpslld $2, %xmm0, %xmm0 # sched: [1:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pslld:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpslld %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; HASWELL-NEXT: vpslld (%rdi), %xmm0, %xmm0 # sched: [2:1.00]
; HASWELL-NEXT: vpslld $2, %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pslld:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpslld %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; BROADWELL-NEXT: vpslld (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; BROADWELL-NEXT: vpslld $2, %xmm0, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pslld:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpslld %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; SKYLAKE-NEXT: vpslld (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: vpslld $2, %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pslld:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; SKX-NEXT: vpslld (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: vpslld $2, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pslld:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpslld %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpslld (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: vpslld $2, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pslld:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpslld %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; ZNVER1-NEXT: vpslld (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; ZNVER1-NEXT: vpslld $2, %xmm0, %xmm0 # sched: [1:0.25]
@@ -6779,12 +6779,12 @@ declare <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32>, i32) nounwind readnone
define <4 x i32> @test_pslldq(<4 x i32> %a0) {
; GENERIC-LABEL: test_pslldq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11] sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pslldq:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11] sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -6795,42 +6795,42 @@ define <4 x i32> @test_pslldq(<4 x i32> %a0) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pslldq:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11] sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pslldq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11] sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pslldq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11] sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pslldq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11] sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pslldq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11] sched: [1:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pslldq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11] sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pslldq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11] sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pslldq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11] sched: [1:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <4 x i32> %a0, <4 x i32> zeroinitializer, <4 x i32> <i32 4, i32 0, i32 1, i32 2>
@@ -6839,70 +6839,70 @@ define <4 x i32> @test_pslldq(<4 x i32> %a0) {
define <2 x i64> @test_psllq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; GENERIC-LABEL: test_psllq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: psllq %xmm1, %xmm0 # sched: [2:1.00]
; GENERIC-NEXT: psllq (%rdi), %xmm0 # sched: [8:1.00]
; GENERIC-NEXT: psllq $2, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_psllq:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: psllq %xmm1, %xmm0 # sched: [2:1.00]
; ATOM-NEXT: psllq (%rdi), %xmm0 # sched: [3:1.50]
; ATOM-NEXT: psllq $2, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_psllq:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: psllq %xmm1, %xmm0 # sched: [1:1.00]
; SLM-NEXT: psllq (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: psllq $2, %xmm0 # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_psllq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpsllq %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; SANDY-NEXT: vpsllq (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; SANDY-NEXT: vpsllq $2, %xmm0, %xmm0 # sched: [1:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psllq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsllq %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; HASWELL-NEXT: vpsllq (%rdi), %xmm0, %xmm0 # sched: [2:1.00]
; HASWELL-NEXT: vpsllq $2, %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psllq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsllq %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; BROADWELL-NEXT: vpsllq (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; BROADWELL-NEXT: vpsllq $2, %xmm0, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psllq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsllq %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; SKYLAKE-NEXT: vpsllq (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: vpsllq $2, %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psllq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllq %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; SKX-NEXT: vpsllq (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: vpsllq $2, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_psllq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpsllq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpsllq (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: vpsllq $2, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_psllq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsllq %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; ZNVER1-NEXT: vpsllq (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; ZNVER1-NEXT: vpsllq $2, %xmm0, %xmm0 # sched: [1:0.25]
@@ -6918,70 +6918,70 @@ declare <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64>, i32) nounwind readnone
define <8 x i16> @test_psllw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; GENERIC-LABEL: test_psllw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: psllw %xmm1, %xmm0 # sched: [2:1.00]
; GENERIC-NEXT: psllw (%rdi), %xmm0 # sched: [8:1.00]
; GENERIC-NEXT: psllw $2, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_psllw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: psllw %xmm1, %xmm0 # sched: [2:1.00]
; ATOM-NEXT: psllw (%rdi), %xmm0 # sched: [3:1.50]
; ATOM-NEXT: psllw $2, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_psllw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: psllw %xmm1, %xmm0 # sched: [1:1.00]
; SLM-NEXT: psllw (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: psllw $2, %xmm0 # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_psllw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpsllw %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; SANDY-NEXT: vpsllw (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; SANDY-NEXT: vpsllw $2, %xmm0, %xmm0 # sched: [1:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psllw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsllw %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; HASWELL-NEXT: vpsllw (%rdi), %xmm0, %xmm0 # sched: [2:1.00]
; HASWELL-NEXT: vpsllw $2, %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psllw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsllw %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; BROADWELL-NEXT: vpsllw (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; BROADWELL-NEXT: vpsllw $2, %xmm0, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psllw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsllw %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; SKYLAKE-NEXT: vpsllw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: vpsllw $2, %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psllw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; SKX-NEXT: vpsllw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: vpsllw $2, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_psllw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpsllw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpsllw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: vpsllw $2, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_psllw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsllw %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; ZNVER1-NEXT: vpsllw (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; ZNVER1-NEXT: vpsllw $2, %xmm0, %xmm0 # sched: [1:0.25]
@@ -6997,70 +6997,70 @@ declare <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16>, i32) nounwind readnone
define <4 x i32> @test_psrad(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_psrad:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: psrad %xmm1, %xmm0 # sched: [2:1.00]
; GENERIC-NEXT: psrad (%rdi), %xmm0 # sched: [8:1.00]
; GENERIC-NEXT: psrad $2, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_psrad:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: psrad %xmm1, %xmm0 # sched: [2:1.00]
; ATOM-NEXT: psrad (%rdi), %xmm0 # sched: [3:1.50]
; ATOM-NEXT: psrad $2, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_psrad:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: psrad %xmm1, %xmm0 # sched: [1:1.00]
; SLM-NEXT: psrad (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: psrad $2, %xmm0 # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_psrad:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpsrad %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; SANDY-NEXT: vpsrad (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; SANDY-NEXT: vpsrad $2, %xmm0, %xmm0 # sched: [1:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psrad:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsrad %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; HASWELL-NEXT: vpsrad (%rdi), %xmm0, %xmm0 # sched: [2:1.00]
; HASWELL-NEXT: vpsrad $2, %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psrad:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsrad %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; BROADWELL-NEXT: vpsrad (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; BROADWELL-NEXT: vpsrad $2, %xmm0, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psrad:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsrad %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; SKYLAKE-NEXT: vpsrad (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: vpsrad $2, %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psrad:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsrad %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; SKX-NEXT: vpsrad (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: vpsrad $2, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_psrad:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpsrad %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpsrad (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: vpsrad $2, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_psrad:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsrad %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; ZNVER1-NEXT: vpsrad (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; ZNVER1-NEXT: vpsrad $2, %xmm0, %xmm0 # sched: [1:0.25]
@@ -7076,70 +7076,70 @@ declare <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32>, i32) nounwind readnone
define <8 x i16> @test_psraw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; GENERIC-LABEL: test_psraw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: psraw %xmm1, %xmm0 # sched: [2:1.00]
; GENERIC-NEXT: psraw (%rdi), %xmm0 # sched: [8:1.00]
; GENERIC-NEXT: psraw $2, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_psraw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: psraw %xmm1, %xmm0 # sched: [2:1.00]
; ATOM-NEXT: psraw (%rdi), %xmm0 # sched: [3:1.50]
; ATOM-NEXT: psraw $2, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_psraw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: psraw %xmm1, %xmm0 # sched: [1:1.00]
; SLM-NEXT: psraw (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: psraw $2, %xmm0 # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_psraw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpsraw %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; SANDY-NEXT: vpsraw (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; SANDY-NEXT: vpsraw $2, %xmm0, %xmm0 # sched: [1:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psraw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsraw %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; HASWELL-NEXT: vpsraw (%rdi), %xmm0, %xmm0 # sched: [2:1.00]
; HASWELL-NEXT: vpsraw $2, %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psraw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsraw %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; BROADWELL-NEXT: vpsraw (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; BROADWELL-NEXT: vpsraw $2, %xmm0, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psraw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsraw %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; SKYLAKE-NEXT: vpsraw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: vpsraw $2, %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psraw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsraw %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; SKX-NEXT: vpsraw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: vpsraw $2, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_psraw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpsraw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpsraw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: vpsraw $2, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_psraw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsraw %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; ZNVER1-NEXT: vpsraw (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; ZNVER1-NEXT: vpsraw $2, %xmm0, %xmm0 # sched: [1:0.25]
@@ -7155,70 +7155,70 @@ declare <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16>, i32) nounwind readnone
define <4 x i32> @test_psrld(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_psrld:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: psrld %xmm1, %xmm0 # sched: [2:1.00]
; GENERIC-NEXT: psrld (%rdi), %xmm0 # sched: [8:1.00]
; GENERIC-NEXT: psrld $2, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_psrld:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: psrld %xmm1, %xmm0 # sched: [2:1.00]
; ATOM-NEXT: psrld (%rdi), %xmm0 # sched: [3:1.50]
; ATOM-NEXT: psrld $2, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_psrld:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: psrld %xmm1, %xmm0 # sched: [1:1.00]
; SLM-NEXT: psrld (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: psrld $2, %xmm0 # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_psrld:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpsrld %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; SANDY-NEXT: vpsrld (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; SANDY-NEXT: vpsrld $2, %xmm0, %xmm0 # sched: [1:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psrld:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsrld %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; HASWELL-NEXT: vpsrld (%rdi), %xmm0, %xmm0 # sched: [2:1.00]
; HASWELL-NEXT: vpsrld $2, %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psrld:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsrld %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; BROADWELL-NEXT: vpsrld (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; BROADWELL-NEXT: vpsrld $2, %xmm0, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psrld:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsrld %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; SKYLAKE-NEXT: vpsrld (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: vpsrld $2, %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psrld:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsrld %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; SKX-NEXT: vpsrld (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: vpsrld $2, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_psrld:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpsrld %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpsrld (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: vpsrld $2, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_psrld:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsrld %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; ZNVER1-NEXT: vpsrld (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; ZNVER1-NEXT: vpsrld $2, %xmm0, %xmm0 # sched: [1:0.25]
@@ -7234,12 +7234,12 @@ declare <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32>, i32) nounwind readnone
define <4 x i32> @test_psrldq(<4 x i32> %a0) {
; GENERIC-LABEL: test_psrldq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: psrldq {{.*#+}} xmm0 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_psrldq:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: psrldq {{.*#+}} xmm0 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -7250,42 +7250,42 @@ define <4 x i32> @test_psrldq(<4 x i32> %a0) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_psrldq:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: psrldq {{.*#+}} xmm0 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_psrldq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psrldq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psrldq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psrldq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero sched: [1:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psrldq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_psrldq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_psrldq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero sched: [1:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <4 x i32> %a0, <4 x i32> zeroinitializer, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
@@ -7294,70 +7294,70 @@ define <4 x i32> @test_psrldq(<4 x i32> %a0) {
define <2 x i64> @test_psrlq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; GENERIC-LABEL: test_psrlq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: psrlq %xmm1, %xmm0 # sched: [2:1.00]
; GENERIC-NEXT: psrlq (%rdi), %xmm0 # sched: [8:1.00]
; GENERIC-NEXT: psrlq $2, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_psrlq:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: psrlq %xmm1, %xmm0 # sched: [2:1.00]
; ATOM-NEXT: psrlq (%rdi), %xmm0 # sched: [3:1.50]
; ATOM-NEXT: psrlq $2, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_psrlq:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: psrlq %xmm1, %xmm0 # sched: [1:1.00]
; SLM-NEXT: psrlq (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: psrlq $2, %xmm0 # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_psrlq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpsrlq %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; SANDY-NEXT: vpsrlq (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; SANDY-NEXT: vpsrlq $2, %xmm0, %xmm0 # sched: [1:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psrlq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsrlq %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; HASWELL-NEXT: vpsrlq (%rdi), %xmm0, %xmm0 # sched: [2:1.00]
; HASWELL-NEXT: vpsrlq $2, %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psrlq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsrlq %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; BROADWELL-NEXT: vpsrlq (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; BROADWELL-NEXT: vpsrlq $2, %xmm0, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psrlq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsrlq %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; SKYLAKE-NEXT: vpsrlq (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: vpsrlq $2, %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psrlq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsrlq %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; SKX-NEXT: vpsrlq (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: vpsrlq $2, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_psrlq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpsrlq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpsrlq (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: vpsrlq $2, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_psrlq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsrlq %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; ZNVER1-NEXT: vpsrlq (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; ZNVER1-NEXT: vpsrlq $2, %xmm0, %xmm0 # sched: [1:0.25]
@@ -7373,70 +7373,70 @@ declare <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64>, i32) nounwind readnone
define <8 x i16> @test_psrlw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; GENERIC-LABEL: test_psrlw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: psrlw %xmm1, %xmm0 # sched: [2:1.00]
; GENERIC-NEXT: psrlw (%rdi), %xmm0 # sched: [8:1.00]
; GENERIC-NEXT: psrlw $2, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_psrlw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: psrlw %xmm1, %xmm0 # sched: [2:1.00]
; ATOM-NEXT: psrlw (%rdi), %xmm0 # sched: [3:1.50]
; ATOM-NEXT: psrlw $2, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_psrlw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: psrlw %xmm1, %xmm0 # sched: [1:1.00]
; SLM-NEXT: psrlw (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: psrlw $2, %xmm0 # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_psrlw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpsrlw %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; SANDY-NEXT: vpsrlw (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; SANDY-NEXT: vpsrlw $2, %xmm0, %xmm0 # sched: [1:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psrlw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsrlw %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; HASWELL-NEXT: vpsrlw (%rdi), %xmm0, %xmm0 # sched: [2:1.00]
; HASWELL-NEXT: vpsrlw $2, %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psrlw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsrlw %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; BROADWELL-NEXT: vpsrlw (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; BROADWELL-NEXT: vpsrlw $2, %xmm0, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psrlw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsrlw %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; SKYLAKE-NEXT: vpsrlw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: vpsrlw $2, %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psrlw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsrlw %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; SKX-NEXT: vpsrlw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: vpsrlw $2, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_psrlw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpsrlw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpsrlw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: vpsrlw $2, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_psrlw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsrlw %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; ZNVER1-NEXT: vpsrlw (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; ZNVER1-NEXT: vpsrlw $2, %xmm0, %xmm0 # sched: [1:0.25]
@@ -7452,13 +7452,13 @@ declare <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16>, i32) nounwind readnone
define <16 x i8> @test_psubb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; GENERIC-LABEL: test_psubb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: psubb %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: psubb (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_psubb:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: psubb %xmm1, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: psubb (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -7468,49 +7468,49 @@ define <16 x i8> @test_psubb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_psubb:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: psubb %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: psubb (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_psubb:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpsubb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpsubb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psubb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsubb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpsubb (%rdi), %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psubb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsubb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpsubb (%rdi), %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psubb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsubb %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vpsubb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psubb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsubb %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: vpsubb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_psubb:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpsubb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpsubb (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_psubb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsubb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsubb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -7522,13 +7522,13 @@ define <16 x i8> @test_psubb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
define <4 x i32> @test_psubd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_psubd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: psubd %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: psubd (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_psubd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: psubd %xmm1, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: psubd (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -7538,49 +7538,49 @@ define <4 x i32> @test_psubd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_psubd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: psubd %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: psubd (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_psubd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpsubd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpsubd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psubd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsubd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpsubd (%rdi), %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psubd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsubd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpsubd (%rdi), %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psubd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsubd %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vpsubd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psubd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsubd %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: vpsubd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_psubd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpsubd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpsubd (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_psubd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsubd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsubd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -7592,61 +7592,61 @@ define <4 x i32> @test_psubd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
define <2 x i64> @test_psubq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; GENERIC-LABEL: test_psubq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: psubq %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: psubq (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_psubq:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: psubq %xmm1, %xmm0 # sched: [2:1.00]
; ATOM-NEXT: psubq (%rdi), %xmm0 # sched: [3:1.50]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_psubq:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: psubq %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: psubq (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_psubq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpsubq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpsubq (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psubq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsubq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpsubq (%rdi), %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psubq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsubq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpsubq (%rdi), %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psubq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsubq %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vpsubq (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psubq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsubq %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: vpsubq (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_psubq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpsubq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpsubq (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_psubq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsubq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsubq (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -7658,13 +7658,13 @@ define <2 x i64> @test_psubq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
define <16 x i8> @test_psubsb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; GENERIC-LABEL: test_psubsb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: psubsb %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: psubsb (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_psubsb:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: psubsb %xmm1, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: psubsb (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -7674,49 +7674,49 @@ define <16 x i8> @test_psubsb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_psubsb:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: psubsb %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: psubsb (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_psubsb:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpsubsb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpsubsb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psubsb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsubsb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpsubsb (%rdi), %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psubsb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsubsb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpsubsb (%rdi), %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psubsb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsubsb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpsubsb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psubsb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsubsb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpsubsb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_psubsb:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpsubsb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpsubsb (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_psubsb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsubsb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsubsb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -7729,13 +7729,13 @@ declare <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8>, <16 x i8>) nounwind readnone
define <8 x i16> @test_psubsw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; GENERIC-LABEL: test_psubsw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: psubsw %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: psubsw (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_psubsw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: psubsw %xmm1, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: psubsw (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -7745,49 +7745,49 @@ define <8 x i16> @test_psubsw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_psubsw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: psubsw %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: psubsw (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_psubsw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpsubsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpsubsw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psubsw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsubsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpsubsw (%rdi), %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psubsw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsubsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpsubsw (%rdi), %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psubsw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsubsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpsubsw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psubsw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsubsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpsubsw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_psubsw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpsubsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpsubsw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_psubsw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsubsw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsubsw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -7800,13 +7800,13 @@ declare <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16>, <8 x i16>) nounwind readnone
define <16 x i8> @test_psubusb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; GENERIC-LABEL: test_psubusb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: psubusb %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: psubusb (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_psubusb:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: psubusb %xmm1, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: psubusb (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -7816,49 +7816,49 @@ define <16 x i8> @test_psubusb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_psubusb:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: psubusb %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: psubusb (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_psubusb:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpsubusb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psubusb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpsubusb (%rdi), %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psubusb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpsubusb (%rdi), %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psubusb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpsubusb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psubusb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpsubusb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_psubusb:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpsubusb (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_psubusb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsubusb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -7871,13 +7871,13 @@ declare <16 x i8> @llvm.x86.sse2.psubus.b(<16 x i8>, <16 x i8>) nounwind readnon
define <8 x i16> @test_psubusw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; GENERIC-LABEL: test_psubusw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: psubusw %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: psubusw (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_psubusw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: psubusw %xmm1, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: psubusw (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -7887,49 +7887,49 @@ define <8 x i16> @test_psubusw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_psubusw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: psubusw %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: psubusw (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_psubusw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpsubusw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpsubusw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psubusw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsubusw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpsubusw (%rdi), %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psubusw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsubusw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpsubusw (%rdi), %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psubusw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsubusw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpsubusw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psubusw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsubusw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpsubusw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_psubusw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpsubusw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpsubusw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_psubusw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsubusw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsubusw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -7942,13 +7942,13 @@ declare <8 x i16> @llvm.x86.sse2.psubus.w(<8 x i16>, <8 x i16>) nounwind readnon
define <8 x i16> @test_psubw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; GENERIC-LABEL: test_psubw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: psubw %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: psubw (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_psubw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: psubw %xmm1, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: psubw (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -7958,49 +7958,49 @@ define <8 x i16> @test_psubw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_psubw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: psubw %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: psubw (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_psubw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpsubw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpsubw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psubw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsubw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpsubw (%rdi), %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psubw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsubw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpsubw (%rdi), %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psubw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsubw %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vpsubw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psubw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsubw %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: vpsubw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_psubw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpsubw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpsubw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_psubw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsubw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsubw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -8012,13 +8012,13 @@ define <8 x i16> @test_psubw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
define <16 x i8> @test_punpckhbw(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; GENERIC-LABEL: test_punpckhbw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] sched: [1:0.50]
; GENERIC-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],mem[8],xmm0[9],mem[9],xmm0[10],mem[10],xmm0[11],mem[11],xmm0[12],mem[12],xmm0[13],mem[13],xmm0[14],mem[14],xmm0[15],mem[15] sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_punpckhbw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] sched: [1:1.00]
; ATOM-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],mem[8],xmm0[9],mem[9],xmm0[10],mem[10],xmm0[11],mem[11],xmm0[12],mem[12],xmm0[13],mem[13],xmm0[14],mem[14],xmm0[15],mem[15] sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -8028,49 +8028,49 @@ define <16 x i8> @test_punpckhbw(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_punpckhbw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] sched: [1:1.00]
; SLM-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],mem[8],xmm0[9],mem[9],xmm0[10],mem[10],xmm0[11],mem[11],xmm0[12],mem[12],xmm0[13],mem[13],xmm0[14],mem[14],xmm0[15],mem[15] sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_punpckhbw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] sched: [1:0.50]
; SANDY-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],mem[8],xmm0[9],mem[9],xmm0[10],mem[10],xmm0[11],mem[11],xmm0[12],mem[12],xmm0[13],mem[13],xmm0[14],mem[14],xmm0[15],mem[15] sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_punpckhbw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] sched: [1:1.00]
; HASWELL-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],mem[8],xmm0[9],mem[9],xmm0[10],mem[10],xmm0[11],mem[11],xmm0[12],mem[12],xmm0[13],mem[13],xmm0[14],mem[14],xmm0[15],mem[15] sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_punpckhbw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] sched: [1:1.00]
; BROADWELL-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],mem[8],xmm0[9],mem[9],xmm0[10],mem[10],xmm0[11],mem[11],xmm0[12],mem[12],xmm0[13],mem[13],xmm0[14],mem[14],xmm0[15],mem[15] sched: [6:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_punpckhbw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] sched: [1:1.00]
; SKYLAKE-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],mem[8],xmm0[9],mem[9],xmm0[10],mem[10],xmm0[11],mem[11],xmm0[12],mem[12],xmm0[13],mem[13],xmm0[14],mem[14],xmm0[15],mem[15] sched: [7:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_punpckhbw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] sched: [1:1.00]
; SKX-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],mem[8],xmm0[9],mem[9],xmm0[10],mem[10],xmm0[11],mem[11],xmm0[12],mem[12],xmm0[13],mem[13],xmm0[14],mem[14],xmm0[15],mem[15] sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_punpckhbw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] sched: [1:0.50]
; BTVER2-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],mem[8],xmm0[9],mem[9],xmm0[10],mem[10],xmm0[11],mem[11],xmm0[12],mem[12],xmm0[13],mem[13],xmm0[14],mem[14],xmm0[15],mem[15] sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_punpckhbw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] sched: [1:0.25]
; ZNVER1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],mem[8],xmm0[9],mem[9],xmm0[10],mem[10],xmm0[11],mem[11],xmm0[12],mem[12],xmm0[13],mem[13],xmm0[14],mem[14],xmm0[15],mem[15] sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -8082,14 +8082,14 @@ define <16 x i8> @test_punpckhbw(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
define <4 x i32> @test_punpckhdq(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_punpckhdq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:0.50]
; GENERIC-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] sched: [7:0.50]
; GENERIC-NEXT: paddd %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_punpckhdq:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00]
; ATOM-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] sched: [1:1.00]
; ATOM-NEXT: paddd %xmm1, %xmm0 # sched: [1:0.50]
@@ -8098,56 +8098,56 @@ define <4 x i32> @test_punpckhdq(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_punpckhdq:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00]
; SLM-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] sched: [4:1.00]
; SLM-NEXT: paddd %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_punpckhdq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:0.50]
; SANDY-NEXT: vpunpckhdq {{.*#+}} xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] sched: [7:0.50]
; SANDY-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_punpckhdq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00]
; HASWELL-NEXT: vpunpckhdq {{.*#+}} xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] sched: [1:1.00]
; HASWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_punpckhdq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00]
; BROADWELL-NEXT: vpunpckhdq {{.*#+}} xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] sched: [6:1.00]
; BROADWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_punpckhdq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00]
; SKYLAKE-NEXT: vpunpckhdq {{.*#+}} xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] sched: [7:1.00]
; SKYLAKE-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_punpckhdq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00]
; SKX-NEXT: vpunpckhdq {{.*#+}} xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] sched: [7:1.00]
; SKX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_punpckhdq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:0.50]
; BTVER2-NEXT: vpunpckhdq {{.*#+}} xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] sched: [6:1.00]
; BTVER2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_punpckhdq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:0.25]
; ZNVER1-NEXT: vpunpckhdq {{.*#+}} xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] sched: [8:0.50]
; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
@@ -8161,70 +8161,70 @@ define <4 x i32> @test_punpckhdq(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
define <2 x i64> @test_punpckhqdq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; GENERIC-LABEL: test_punpckhqdq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [1:0.50]
; GENERIC-NEXT: punpckhqdq {{.*#+}} xmm1 = xmm1[1],mem[1] sched: [7:0.50]
; GENERIC-NEXT: paddq %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_punpckhqdq:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [1:1.00]
; ATOM-NEXT: punpckhqdq {{.*#+}} xmm1 = xmm1[1],mem[1] sched: [1:1.00]
; ATOM-NEXT: paddq %xmm1, %xmm0 # sched: [2:1.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_punpckhqdq:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [1:1.00]
; SLM-NEXT: punpckhqdq {{.*#+}} xmm1 = xmm1[1],mem[1] sched: [4:1.00]
; SLM-NEXT: paddq %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_punpckhqdq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [1:0.50]
; SANDY-NEXT: vpunpckhqdq {{.*#+}} xmm1 = xmm1[1],mem[1] sched: [7:0.50]
; SANDY-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_punpckhqdq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [1:1.00]
; HASWELL-NEXT: vpunpckhqdq {{.*#+}} xmm1 = xmm1[1],mem[1] sched: [1:1.00]
; HASWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_punpckhqdq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [1:1.00]
; BROADWELL-NEXT: vpunpckhqdq {{.*#+}} xmm1 = xmm1[1],mem[1] sched: [6:1.00]
; BROADWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_punpckhqdq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [1:1.00]
; SKYLAKE-NEXT: vpunpckhqdq {{.*#+}} xmm1 = xmm1[1],mem[1] sched: [7:1.00]
; SKYLAKE-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_punpckhqdq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [1:1.00]
; SKX-NEXT: vpunpckhqdq {{.*#+}} xmm1 = xmm1[1],mem[1] sched: [7:1.00]
; SKX-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_punpckhqdq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [1:0.50]
; BTVER2-NEXT: vpunpckhqdq {{.*#+}} xmm1 = xmm1[1],mem[1] sched: [6:1.00]
; BTVER2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_punpckhqdq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [1:0.25]
; ZNVER1-NEXT: vpunpckhqdq {{.*#+}} xmm1 = xmm1[1],mem[1] sched: [8:0.50]
; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
@@ -8238,13 +8238,13 @@ define <2 x i64> @test_punpckhqdq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2)
define <8 x i16> @test_punpckhwd(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; GENERIC-LABEL: test_punpckhwd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] sched: [1:0.50]
; GENERIC-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_punpckhwd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] sched: [1:1.00]
; ATOM-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -8254,49 +8254,49 @@ define <8 x i16> @test_punpckhwd(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_punpckhwd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] sched: [1:1.00]
; SLM-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_punpckhwd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] sched: [1:0.50]
; SANDY-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_punpckhwd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] sched: [1:1.00]
; HASWELL-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_punpckhwd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] sched: [1:1.00]
; BROADWELL-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] sched: [6:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_punpckhwd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] sched: [1:1.00]
; SKYLAKE-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] sched: [7:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_punpckhwd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] sched: [1:1.00]
; SKX-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_punpckhwd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] sched: [1:0.50]
; BTVER2-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_punpckhwd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] sched: [1:0.25]
; ZNVER1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -8308,13 +8308,13 @@ define <8 x i16> @test_punpckhwd(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
define <16 x i8> @test_punpcklbw(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; GENERIC-LABEL: test_punpcklbw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] sched: [1:0.50]
; GENERIC-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_punpcklbw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] sched: [1:1.00]
; ATOM-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -8324,49 +8324,49 @@ define <16 x i8> @test_punpcklbw(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_punpcklbw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] sched: [1:1.00]
; SLM-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_punpcklbw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] sched: [1:0.50]
; SANDY-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_punpcklbw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] sched: [1:1.00]
; HASWELL-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_punpcklbw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] sched: [1:1.00]
; BROADWELL-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] sched: [6:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_punpcklbw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] sched: [1:1.00]
; SKYLAKE-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] sched: [7:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_punpcklbw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] sched: [1:1.00]
; SKX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_punpcklbw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] sched: [1:0.50]
; BTVER2-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_punpcklbw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] sched: [1:0.25]
; ZNVER1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -8378,14 +8378,14 @@ define <16 x i8> @test_punpcklbw(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
define <4 x i32> @test_punpckldq(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_punpckldq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:0.50]
; GENERIC-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] sched: [7:0.50]
; GENERIC-NEXT: paddd %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_punpckldq:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:1.00]
; ATOM-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] sched: [1:1.00]
; ATOM-NEXT: paddd %xmm1, %xmm0 # sched: [1:0.50]
@@ -8394,56 +8394,56 @@ define <4 x i32> @test_punpckldq(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_punpckldq:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:1.00]
; SLM-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] sched: [4:1.00]
; SLM-NEXT: paddd %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_punpckldq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:0.50]
; SANDY-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] sched: [7:0.50]
; SANDY-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_punpckldq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:1.00]
; HASWELL-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] sched: [1:1.00]
; HASWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_punpckldq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:1.00]
; BROADWELL-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] sched: [6:1.00]
; BROADWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_punpckldq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:1.00]
; SKYLAKE-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] sched: [7:1.00]
; SKYLAKE-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_punpckldq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:1.00]
; SKX-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] sched: [7:1.00]
; SKX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_punpckldq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:0.50]
; BTVER2-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] sched: [6:1.00]
; BTVER2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_punpckldq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:0.25]
; ZNVER1-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] sched: [8:0.50]
; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
@@ -8457,70 +8457,70 @@ define <4 x i32> @test_punpckldq(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
define <2 x i64> @test_punpcklqdq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; GENERIC-LABEL: test_punpcklqdq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:0.50]
; GENERIC-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [7:0.50]
; GENERIC-NEXT: paddq %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_punpcklqdq:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00]
; ATOM-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [1:1.00]
; ATOM-NEXT: paddq %xmm1, %xmm0 # sched: [2:1.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_punpcklqdq:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00]
; SLM-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [4:1.00]
; SLM-NEXT: paddq %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_punpcklqdq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:0.50]
; SANDY-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [7:0.50]
; SANDY-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_punpcklqdq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00]
; HASWELL-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [1:1.00]
; HASWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_punpcklqdq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00]
; BROADWELL-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [6:1.00]
; BROADWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_punpcklqdq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00]
; SKYLAKE-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [7:1.00]
; SKYLAKE-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_punpcklqdq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00]
; SKX-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [7:1.00]
; SKX-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_punpcklqdq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:0.50]
; BTVER2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [6:1.00]
; BTVER2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_punpcklqdq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:0.25]
; ZNVER1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [8:0.50]
; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
@@ -8534,13 +8534,13 @@ define <2 x i64> @test_punpcklqdq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2)
define <8 x i16> @test_punpcklwd(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; GENERIC-LABEL: test_punpcklwd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:0.50]
; GENERIC-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_punpcklwd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00]
; ATOM-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -8550,49 +8550,49 @@ define <8 x i16> @test_punpcklwd(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_punpcklwd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00]
; SLM-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_punpcklwd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:0.50]
; SANDY-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_punpcklwd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00]
; HASWELL-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_punpcklwd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00]
; BROADWELL-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] sched: [6:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_punpcklwd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00]
; SKYLAKE-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] sched: [7:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_punpcklwd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00]
; SKX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_punpcklwd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:0.50]
; BTVER2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_punpcklwd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:0.25]
; ZNVER1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -8604,70 +8604,70 @@ define <8 x i16> @test_punpcklwd(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
define <2 x i64> @test_pxor(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; GENERIC-LABEL: test_pxor:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pxor %xmm1, %xmm0 # sched: [1:0.33]
; GENERIC-NEXT: pxor (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: paddq %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pxor:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pxor %xmm1, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: pxor (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: paddq %xmm1, %xmm0 # sched: [2:1.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pxor:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pxor %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: pxor (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: paddq %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pxor:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpxor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SANDY-NEXT: vpxor (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pxor:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpxor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; HASWELL-NEXT: vpxor (%rdi), %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pxor:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpxor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; BROADWELL-NEXT: vpxor (%rdi), %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pxor:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpxor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vpxor (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pxor:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: vpxor (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pxor:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpxor %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpxor (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pxor:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpxor %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpxor (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
@@ -8681,70 +8681,70 @@ define <2 x i64> @test_pxor(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
define <2 x double> @test_shufpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; GENERIC-LABEL: test_shufpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0] sched: [1:1.00]
; GENERIC-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1],mem[0] sched: [7:1.00]
; GENERIC-NEXT: addpd %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_shufpd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0] sched: [1:1.00]
; ATOM-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1],mem[0] sched: [1:1.00]
; ATOM-NEXT: addpd %xmm1, %xmm0 # sched: [6:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_shufpd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0] sched: [1:1.00]
; SLM-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1],mem[0] sched: [4:1.00]
; SLM-NEXT: addpd %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_shufpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0] sched: [1:1.00]
; SANDY-NEXT: vshufpd {{.*#+}} xmm1 = xmm1[1],mem[0] sched: [7:1.00]
; SANDY-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_shufpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0] sched: [1:1.00]
; HASWELL-NEXT: vshufpd {{.*#+}} xmm1 = xmm1[1],mem[0] sched: [1:1.00]
; HASWELL-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_shufpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0] sched: [1:1.00]
; BROADWELL-NEXT: vshufpd {{.*#+}} xmm1 = xmm1[1],mem[0] sched: [6:1.00]
; BROADWELL-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_shufpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0] sched: [1:1.00]
; SKYLAKE-NEXT: vshufpd {{.*#+}} xmm1 = xmm1[1],mem[0] sched: [7:1.00]
; SKYLAKE-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_shufpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0] sched: [1:1.00]
; SKX-NEXT: vshufpd {{.*#+}} xmm1 = xmm1[1],mem[0] sched: [7:1.00]
; SKX-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_shufpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0] sched: [1:0.50]
; BTVER2-NEXT: vshufpd {{.*#+}} xmm1 = xmm1[1],mem[0] sched: [6:1.00]
; BTVER2-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_shufpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0] sched: [1:0.50]
; ZNVER1-NEXT: vshufpd {{.*#+}} xmm1 = xmm1[1],mem[0] sched: [8:0.50]
; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
@@ -8758,21 +8758,21 @@ define <2 x double> @test_shufpd(<2 x double> %a0, <2 x double> %a1, <2 x double
define <2 x double> @test_sqrtpd(<2 x double> %a0, <2 x double> *%a1) {
; GENERIC-LABEL: test_sqrtpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: sqrtpd %xmm0, %xmm1 # sched: [22:1.00]
; GENERIC-NEXT: sqrtpd (%rdi), %xmm0 # sched: [28:1.00]
; GENERIC-NEXT: addpd %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_sqrtpd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: sqrtpd %xmm0, %xmm1 # sched: [125:62.50]
; ATOM-NEXT: sqrtpd (%rdi), %xmm0 # sched: [125:62.50]
; ATOM-NEXT: addpd %xmm1, %xmm0 # sched: [6:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_sqrtpd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: sqrtpd (%rdi), %xmm1 # sched: [18:1.00]
; SLM-NEXT: sqrtpd %xmm0, %xmm0 # sched: [15:1.00]
; SLM-NEXT: addpd %xmm0, %xmm1 # sched: [3:1.00]
@@ -8780,49 +8780,49 @@ define <2 x double> @test_sqrtpd(<2 x double> %a0, <2 x double> *%a1) {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_sqrtpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vsqrtpd %xmm0, %xmm0 # sched: [22:1.00]
; SANDY-NEXT: vsqrtpd (%rdi), %xmm1 # sched: [28:1.00]
; SANDY-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_sqrtpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vsqrtpd %xmm0, %xmm0 # sched: [21:1.00]
; HASWELL-NEXT: vsqrtpd (%rdi), %xmm1 # sched: [21:1.00]
; HASWELL-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_sqrtpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vsqrtpd %xmm0, %xmm0 # sched: [21:1.00]
; BROADWELL-NEXT: vsqrtpd (%rdi), %xmm1 # sched: [26:1.00]
; BROADWELL-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_sqrtpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vsqrtpd %xmm0, %xmm0 # sched: [18:1.00]
; SKYLAKE-NEXT: vsqrtpd (%rdi), %xmm1 # sched: [24:1.00]
; SKYLAKE-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_sqrtpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vsqrtpd %xmm0, %xmm0 # sched: [18:1.00]
; SKX-NEXT: vsqrtpd (%rdi), %xmm1 # sched: [24:1.00]
; SKX-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_sqrtpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vsqrtpd (%rdi), %xmm1 # sched: [26:21.00]
; BTVER2-NEXT: vsqrtpd %xmm0, %xmm0 # sched: [21:21.00]
; BTVER2-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_sqrtpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vsqrtpd (%rdi), %xmm1 # sched: [27:1.00]
; ZNVER1-NEXT: vsqrtpd %xmm0, %xmm0 # sched: [20:1.00]
; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
@@ -8839,7 +8839,7 @@ declare <2 x double> @llvm.x86.sse2.sqrt.pd(<2 x double>) nounwind readnone
define <2 x double> @test_sqrtsd(<2 x double> %a0, <2 x double> *%a1) {
; GENERIC-LABEL: test_sqrtsd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: sqrtsd %xmm0, %xmm0 # sched: [22:1.00]
; GENERIC-NEXT: movapd (%rdi), %xmm1 # sched: [6:0.50]
; GENERIC-NEXT: sqrtsd %xmm1, %xmm1 # sched: [22:1.00]
@@ -8847,7 +8847,7 @@ define <2 x double> @test_sqrtsd(<2 x double> %a0, <2 x double> *%a1) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_sqrtsd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movapd (%rdi), %xmm1 # sched: [1:1.00]
; ATOM-NEXT: sqrtsd %xmm0, %xmm0
; ATOM-NEXT: sqrtsd %xmm1, %xmm1
@@ -8855,7 +8855,7 @@ define <2 x double> @test_sqrtsd(<2 x double> %a0, <2 x double> *%a1) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_sqrtsd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movapd (%rdi), %xmm1 # sched: [3:1.00]
; SLM-NEXT: sqrtsd %xmm0, %xmm0 # sched: [18:1.00]
; SLM-NEXT: sqrtsd %xmm1, %xmm1 # sched: [18:1.00]
@@ -8863,7 +8863,7 @@ define <2 x double> @test_sqrtsd(<2 x double> %a0, <2 x double> *%a1) {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_sqrtsd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 # sched: [21:1.00]
; SANDY-NEXT: vmovapd (%rdi), %xmm1 # sched: [6:0.50]
; SANDY-NEXT: vsqrtsd %xmm1, %xmm1, %xmm1 # sched: [21:1.00]
@@ -8871,7 +8871,7 @@ define <2 x double> @test_sqrtsd(<2 x double> %a0, <2 x double> *%a1) {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_sqrtsd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 # sched: [21:1.00]
; HASWELL-NEXT: vmovapd (%rdi), %xmm1 # sched: [1:0.50]
; HASWELL-NEXT: vsqrtsd %xmm1, %xmm1, %xmm1 # sched: [21:1.00]
@@ -8879,7 +8879,7 @@ define <2 x double> @test_sqrtsd(<2 x double> %a0, <2 x double> *%a1) {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_sqrtsd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 # sched: [21:1.00]
; BROADWELL-NEXT: vmovapd (%rdi), %xmm1 # sched: [5:0.50]
; BROADWELL-NEXT: vsqrtsd %xmm1, %xmm1, %xmm1 # sched: [21:1.00]
@@ -8887,7 +8887,7 @@ define <2 x double> @test_sqrtsd(<2 x double> %a0, <2 x double> *%a1) {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_sqrtsd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 # sched: [18:1.00]
; SKYLAKE-NEXT: vmovapd (%rdi), %xmm1 # sched: [6:0.50]
; SKYLAKE-NEXT: vsqrtsd %xmm1, %xmm1, %xmm1 # sched: [18:1.00]
@@ -8895,7 +8895,7 @@ define <2 x double> @test_sqrtsd(<2 x double> %a0, <2 x double> *%a1) {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_sqrtsd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 # sched: [18:1.00]
; SKX-NEXT: vmovapd (%rdi), %xmm1 # sched: [6:0.50]
; SKX-NEXT: vsqrtsd %xmm1, %xmm1, %xmm1 # sched: [18:1.00]
@@ -8903,7 +8903,7 @@ define <2 x double> @test_sqrtsd(<2 x double> %a0, <2 x double> *%a1) {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_sqrtsd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovapd (%rdi), %xmm1 # sched: [5:1.00]
; BTVER2-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 # sched: [26:21.00]
; BTVER2-NEXT: vsqrtsd %xmm1, %xmm1, %xmm1 # sched: [26:21.00]
@@ -8911,7 +8911,7 @@ define <2 x double> @test_sqrtsd(<2 x double> %a0, <2 x double> *%a1) {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_sqrtsd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmovapd (%rdi), %xmm1 # sched: [8:0.50]
; ZNVER1-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 # sched: [27:1.00]
; ZNVER1-NEXT: vsqrtsd %xmm1, %xmm1, %xmm1 # sched: [27:1.00]
@@ -8927,61 +8927,61 @@ declare <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double>) nounwind readnone
define <2 x double> @test_subpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; GENERIC-LABEL: test_subpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: subpd %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: subpd (%rdi), %xmm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_subpd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: subpd %xmm1, %xmm0 # sched: [6:3.00]
; ATOM-NEXT: subpd (%rdi), %xmm0 # sched: [7:3.50]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_subpd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: subpd %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: subpd (%rdi), %xmm0 # sched: [6:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_subpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vsubpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vsubpd (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_subpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vsubpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: vsubpd (%rdi), %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_subpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vsubpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: vsubpd (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_subpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vsubpd %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vsubpd (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_subpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vsubpd %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vsubpd (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_subpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vsubpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vsubpd (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_subpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vsubpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vsubpd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -8993,61 +8993,61 @@ define <2 x double> @test_subpd(<2 x double> %a0, <2 x double> %a1, <2 x double>
define double @test_subsd(double %a0, double %a1, double *%a2) {
; GENERIC-LABEL: test_subsd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: subsd %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: subsd (%rdi), %xmm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_subsd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: subsd %xmm1, %xmm0 # sched: [5:5.00]
; ATOM-NEXT: subsd (%rdi), %xmm0 # sched: [5:5.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_subsd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: subsd %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: subsd (%rdi), %xmm0 # sched: [6:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_subsd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vsubsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vsubsd (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_subsd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vsubsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: vsubsd (%rdi), %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_subsd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vsubsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: vsubsd (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_subsd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vsubsd %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vsubsd (%rdi), %xmm0, %xmm0 # sched: [9:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_subsd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vsubsd %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vsubsd (%rdi), %xmm0, %xmm0 # sched: [9:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_subsd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vsubsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vsubsd (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_subsd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vsubsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vsubsd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -9059,7 +9059,7 @@ define double @test_subsd(double %a0, double %a1, double *%a2) {
define i32 @test_ucomisd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; GENERIC-LABEL: test_ucomisd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: ucomisd %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: setnp %al # sched: [1:0.50]
; GENERIC-NEXT: sete %cl # sched: [1:0.50]
@@ -9073,7 +9073,7 @@ define i32 @test_ucomisd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2)
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_ucomisd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: ucomisd %xmm1, %xmm0 # sched: [9:4.50]
; ATOM-NEXT: setnp %al # sched: [1:0.50]
; ATOM-NEXT: sete %cl # sched: [1:0.50]
@@ -9087,7 +9087,7 @@ define i32 @test_ucomisd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2)
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_ucomisd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: ucomisd %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: setnp %al # sched: [1:0.50]
; SLM-NEXT: sete %cl # sched: [1:0.50]
@@ -9101,7 +9101,7 @@ define i32 @test_ucomisd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2)
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_ucomisd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vucomisd %xmm1, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: setnp %al # sched: [1:0.50]
; SANDY-NEXT: sete %cl # sched: [1:0.50]
@@ -9115,7 +9115,7 @@ define i32 @test_ucomisd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2)
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_ucomisd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vucomisd %xmm1, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: setnp %al # sched: [1:0.50]
; HASWELL-NEXT: sete %cl # sched: [1:0.50]
@@ -9129,7 +9129,7 @@ define i32 @test_ucomisd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2)
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_ucomisd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vucomisd %xmm1, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: setnp %al # sched: [1:0.50]
; BROADWELL-NEXT: sete %cl # sched: [1:0.50]
@@ -9143,7 +9143,7 @@ define i32 @test_ucomisd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2)
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_ucomisd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vucomisd %xmm1, %xmm0 # sched: [3:1.00]
; SKYLAKE-NEXT: setnp %al # sched: [1:0.50]
; SKYLAKE-NEXT: sete %cl # sched: [1:0.50]
@@ -9157,7 +9157,7 @@ define i32 @test_ucomisd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2)
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_ucomisd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vucomisd %xmm1, %xmm0 # sched: [3:1.00]
; SKX-NEXT: setnp %al # sched: [1:0.50]
; SKX-NEXT: sete %cl # sched: [1:0.50]
@@ -9171,7 +9171,7 @@ define i32 @test_ucomisd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2)
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_ucomisd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vucomisd %xmm1, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: setnp %al # sched: [1:0.50]
; BTVER2-NEXT: sete %cl # sched: [1:0.50]
@@ -9185,7 +9185,7 @@ define i32 @test_ucomisd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2)
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_ucomisd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vucomisd %xmm1, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: setnp %al # sched: [1:0.25]
; ZNVER1-NEXT: sete %cl # sched: [1:0.25]
@@ -9207,70 +9207,70 @@ declare i32 @llvm.x86.sse2.ucomieq.sd(<2 x double>, <2 x double>) nounwind readn
define <2 x double> @test_unpckhpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; GENERIC-LABEL: test_unpckhpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [1:1.00]
; GENERIC-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],mem[1] sched: [7:1.00]
; GENERIC-NEXT: addpd %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_unpckhpd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [1:1.00]
; ATOM-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],mem[1] sched: [1:1.00]
; ATOM-NEXT: addpd %xmm1, %xmm0 # sched: [6:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_unpckhpd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [1:1.00]
; SLM-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],mem[1] sched: [4:1.00]
; SLM-NEXT: addpd %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_unpckhpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [1:1.00]
; SANDY-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],mem[1] sched: [7:1.00]
; SANDY-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_unpckhpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [1:1.00]
; HASWELL-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],mem[1] sched: [1:1.00]
; HASWELL-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_unpckhpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [1:1.00]
; BROADWELL-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],mem[1] sched: [6:1.00]
; BROADWELL-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_unpckhpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [1:1.00]
; SKYLAKE-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],mem[1] sched: [7:1.00]
; SKYLAKE-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_unpckhpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [1:1.00]
; SKX-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],mem[1] sched: [7:1.00]
; SKX-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_unpckhpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [1:0.50]
; BTVER2-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],mem[1] sched: [6:1.00]
; BTVER2-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_unpckhpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [1:0.50]
; ZNVER1-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],mem[1] sched: [8:0.50]
; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
@@ -9284,7 +9284,7 @@ define <2 x double> @test_unpckhpd(<2 x double> %a0, <2 x double> %a1, <2 x doub
define <2 x double> @test_unpcklpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; GENERIC-LABEL: test_unpcklpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00]
; GENERIC-NEXT: movapd %xmm0, %xmm1 # sched: [1:1.00]
; GENERIC-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [7:1.00]
@@ -9293,7 +9293,7 @@ define <2 x double> @test_unpcklpd(<2 x double> %a0, <2 x double> %a1, <2 x doub
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_unpcklpd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00]
; ATOM-NEXT: movapd %xmm0, %xmm1 # sched: [1:0.50]
; ATOM-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [1:1.00]
@@ -9302,7 +9302,7 @@ define <2 x double> @test_unpcklpd(<2 x double> %a0, <2 x double> %a1, <2 x doub
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_unpcklpd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00]
; SLM-NEXT: movapd %xmm0, %xmm1 # sched: [1:1.00]
; SLM-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [4:1.00]
@@ -9311,49 +9311,49 @@ define <2 x double> @test_unpcklpd(<2 x double> %a0, <2 x double> %a1, <2 x doub
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_unpcklpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00]
; SANDY-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm0[0],mem[0] sched: [7:1.00]
; SANDY-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_unpcklpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00]
; HASWELL-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm0[0],mem[0] sched: [1:1.00]
; HASWELL-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_unpcklpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00]
; BROADWELL-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm0[0],mem[0] sched: [6:1.00]
; BROADWELL-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_unpcklpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00]
; SKYLAKE-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm0[0],mem[0] sched: [7:1.00]
; SKYLAKE-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_unpcklpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00]
; SKX-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm0[0],mem[0] sched: [7:1.00]
; SKX-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_unpcklpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:0.50]
; BTVER2-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm0[0],mem[0] sched: [6:1.00]
; BTVER2-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_unpcklpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:0.50]
; ZNVER1-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm0[0],mem[0] sched: [8:0.50]
; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
@@ -9367,70 +9367,70 @@ define <2 x double> @test_unpcklpd(<2 x double> %a0, <2 x double> %a1, <2 x doub
define <2 x double> @test_xorpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; GENERIC-LABEL: test_xorpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: xorpd %xmm1, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: xorpd (%rdi), %xmm0 # sched: [7:1.00]
; GENERIC-NEXT: addpd %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_xorpd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: xorpd %xmm1, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: xorpd (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: addpd %xmm1, %xmm0 # sched: [6:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_xorpd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: xorpd %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: xorpd (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: addpd %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_xorpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vxorpd %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; SANDY-NEXT: vxorpd (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; SANDY-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_xorpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vxorpd %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: vxorpd (%rdi), %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_xorpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vxorpd %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: vxorpd (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BROADWELL-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_xorpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vxorpd %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vxorpd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_xorpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vxorpd %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: vxorpd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_xorpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vxorpd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vxorpd (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_xorpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vxorpd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vxorpd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
diff --git a/test/CodeGen/X86/sse2-vector-shifts.ll b/test/CodeGen/X86/sse2-vector-shifts.ll
index c2bb239639a..82d4b7721d9 100644
--- a/test/CodeGen/X86/sse2-vector-shifts.ll
+++ b/test/CodeGen/X86/sse2-vector-shifts.ll
@@ -5,7 +5,7 @@
define <8 x i16> @test_sllw_1(<8 x i16> %InVec) {
; CHECK-LABEL: test_sllw_1:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: retq
entry:
%shl = shl <8 x i16> %InVec, <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>
@@ -14,7 +14,7 @@ entry:
define <8 x i16> @test_sllw_2(<8 x i16> %InVec) {
; CHECK-LABEL: test_sllw_2:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: paddw %xmm0, %xmm0
; CHECK-NEXT: retq
entry:
@@ -24,7 +24,7 @@ entry:
define <8 x i16> @test_sllw_3(<8 x i16> %InVec) {
; CHECK-LABEL: test_sllw_3:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: psllw $15, %xmm0
; CHECK-NEXT: retq
entry:
@@ -34,7 +34,7 @@ entry:
define <4 x i32> @test_slld_1(<4 x i32> %InVec) {
; CHECK-LABEL: test_slld_1:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: retq
entry:
%shl = shl <4 x i32> %InVec, <i32 0, i32 0, i32 0, i32 0>
@@ -43,7 +43,7 @@ entry:
define <4 x i32> @test_slld_2(<4 x i32> %InVec) {
; CHECK-LABEL: test_slld_2:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: paddd %xmm0, %xmm0
; CHECK-NEXT: retq
entry:
@@ -53,7 +53,7 @@ entry:
define <4 x i32> @test_slld_3(<4 x i32> %InVec) {
; CHECK-LABEL: test_slld_3:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pslld $31, %xmm0
; CHECK-NEXT: retq
entry:
@@ -63,7 +63,7 @@ entry:
define <2 x i64> @test_sllq_1(<2 x i64> %InVec) {
; CHECK-LABEL: test_sllq_1:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: retq
entry:
%shl = shl <2 x i64> %InVec, <i64 0, i64 0>
@@ -72,7 +72,7 @@ entry:
define <2 x i64> @test_sllq_2(<2 x i64> %InVec) {
; CHECK-LABEL: test_sllq_2:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: paddq %xmm0, %xmm0
; CHECK-NEXT: retq
entry:
@@ -82,7 +82,7 @@ entry:
define <2 x i64> @test_sllq_3(<2 x i64> %InVec) {
; CHECK-LABEL: test_sllq_3:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: psllq $63, %xmm0
; CHECK-NEXT: retq
entry:
@@ -94,7 +94,7 @@ entry:
define <8 x i16> @test_sraw_1(<8 x i16> %InVec) {
; CHECK-LABEL: test_sraw_1:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: retq
entry:
%shl = ashr <8 x i16> %InVec, <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>
@@ -103,7 +103,7 @@ entry:
define <8 x i16> @test_sraw_2(<8 x i16> %InVec) {
; CHECK-LABEL: test_sraw_2:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: psraw $1, %xmm0
; CHECK-NEXT: retq
entry:
@@ -113,7 +113,7 @@ entry:
define <8 x i16> @test_sraw_3(<8 x i16> %InVec) {
; CHECK-LABEL: test_sraw_3:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: psraw $15, %xmm0
; CHECK-NEXT: retq
entry:
@@ -123,7 +123,7 @@ entry:
define <4 x i32> @test_srad_1(<4 x i32> %InVec) {
; CHECK-LABEL: test_srad_1:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: retq
entry:
%shl = ashr <4 x i32> %InVec, <i32 0, i32 0, i32 0, i32 0>
@@ -132,7 +132,7 @@ entry:
define <4 x i32> @test_srad_2(<4 x i32> %InVec) {
; CHECK-LABEL: test_srad_2:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: psrad $1, %xmm0
; CHECK-NEXT: retq
entry:
@@ -142,7 +142,7 @@ entry:
define <4 x i32> @test_srad_3(<4 x i32> %InVec) {
; CHECK-LABEL: test_srad_3:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: psrad $31, %xmm0
; CHECK-NEXT: retq
entry:
@@ -154,7 +154,7 @@ entry:
define <8 x i16> @test_srlw_1(<8 x i16> %InVec) {
; CHECK-LABEL: test_srlw_1:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: retq
entry:
%shl = lshr <8 x i16> %InVec, <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>
@@ -163,7 +163,7 @@ entry:
define <8 x i16> @test_srlw_2(<8 x i16> %InVec) {
; CHECK-LABEL: test_srlw_2:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: psrlw $1, %xmm0
; CHECK-NEXT: retq
entry:
@@ -173,7 +173,7 @@ entry:
define <8 x i16> @test_srlw_3(<8 x i16> %InVec) {
; CHECK-LABEL: test_srlw_3:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: psrlw $15, %xmm0
; CHECK-NEXT: retq
entry:
@@ -183,7 +183,7 @@ entry:
define <4 x i32> @test_srld_1(<4 x i32> %InVec) {
; CHECK-LABEL: test_srld_1:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: retq
entry:
%shl = lshr <4 x i32> %InVec, <i32 0, i32 0, i32 0, i32 0>
@@ -192,7 +192,7 @@ entry:
define <4 x i32> @test_srld_2(<4 x i32> %InVec) {
; CHECK-LABEL: test_srld_2:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: psrld $1, %xmm0
; CHECK-NEXT: retq
entry:
@@ -202,7 +202,7 @@ entry:
define <4 x i32> @test_srld_3(<4 x i32> %InVec) {
; CHECK-LABEL: test_srld_3:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: psrld $31, %xmm0
; CHECK-NEXT: retq
entry:
@@ -212,7 +212,7 @@ entry:
define <2 x i64> @test_srlq_1(<2 x i64> %InVec) {
; CHECK-LABEL: test_srlq_1:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: retq
entry:
%shl = lshr <2 x i64> %InVec, <i64 0, i64 0>
@@ -221,7 +221,7 @@ entry:
define <2 x i64> @test_srlq_2(<2 x i64> %InVec) {
; CHECK-LABEL: test_srlq_2:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: psrlq $1, %xmm0
; CHECK-NEXT: retq
entry:
@@ -231,7 +231,7 @@ entry:
define <2 x i64> @test_srlq_3(<2 x i64> %InVec) {
; CHECK-LABEL: test_srlq_3:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: psrlq $63, %xmm0
; CHECK-NEXT: retq
entry:
@@ -241,7 +241,7 @@ entry:
define <4 x i32> @sra_sra_v4i32(<4 x i32> %x) nounwind {
; CHECK-LABEL: sra_sra_v4i32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: psrad $6, %xmm0
; CHECK-NEXT: retq
%sra0 = ashr <4 x i32> %x, <i32 2, i32 2, i32 2, i32 2>
@@ -251,7 +251,7 @@ define <4 x i32> @sra_sra_v4i32(<4 x i32> %x) nounwind {
define <4 x i32> @srl_srl_v4i32(<4 x i32> %x) nounwind {
; CHECK-LABEL: srl_srl_v4i32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: psrld $6, %xmm0
; CHECK-NEXT: retq
%srl0 = lshr <4 x i32> %x, <i32 2, i32 2, i32 2, i32 2>
@@ -261,7 +261,7 @@ define <4 x i32> @srl_srl_v4i32(<4 x i32> %x) nounwind {
define <4 x i32> @srl_shl_v4i32(<4 x i32> %x) nounwind {
; CHECK-LABEL: srl_shl_v4i32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: andps {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
%srl0 = shl <4 x i32> %x, <i32 4, i32 4, i32 4, i32 4>
@@ -271,7 +271,7 @@ define <4 x i32> @srl_shl_v4i32(<4 x i32> %x) nounwind {
define <4 x i32> @srl_sra_31_v4i32(<4 x i32> %x, <4 x i32> %y) nounwind {
; CHECK-LABEL: srl_sra_31_v4i32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: psrld $31, %xmm0
; CHECK-NEXT: retq
%sra = ashr <4 x i32> %x, %y
@@ -281,7 +281,7 @@ define <4 x i32> @srl_sra_31_v4i32(<4 x i32> %x, <4 x i32> %y) nounwind {
define <4 x i32> @shl_shl_v4i32(<4 x i32> %x) nounwind {
; CHECK-LABEL: shl_shl_v4i32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pslld $6, %xmm0
; CHECK-NEXT: retq
%shl0 = shl <4 x i32> %x, <i32 2, i32 2, i32 2, i32 2>
@@ -291,7 +291,7 @@ define <4 x i32> @shl_shl_v4i32(<4 x i32> %x) nounwind {
define <4 x i32> @shl_sra_v4i32(<4 x i32> %x) nounwind {
; CHECK-LABEL: shl_sra_v4i32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: andps {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
%shl0 = ashr <4 x i32> %x, <i32 4, i32 4, i32 4, i32 4>
@@ -301,7 +301,7 @@ define <4 x i32> @shl_sra_v4i32(<4 x i32> %x) nounwind {
define <4 x i32> @shl_srl_v4i32(<4 x i32> %x) nounwind {
; CHECK-LABEL: shl_srl_v4i32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pslld $3, %xmm0
; CHECK-NEXT: pand {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
@@ -312,7 +312,7 @@ define <4 x i32> @shl_srl_v4i32(<4 x i32> %x) nounwind {
define <4 x i32> @shl_zext_srl_v4i32(<4 x i16> %x) nounwind {
; CHECK-LABEL: shl_zext_srl_v4i32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: andps {{.*}}(%rip), %xmm0
; CHECK-NEXT: andps {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
@@ -324,7 +324,7 @@ define <4 x i32> @shl_zext_srl_v4i32(<4 x i16> %x) nounwind {
define <4 x i16> @sra_trunc_srl_v4i32(<4 x i32> %x) nounwind {
; CHECK-LABEL: sra_trunc_srl_v4i32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: psrad $19, %xmm0
; CHECK-NEXT: retq
%srl = lshr <4 x i32> %x, <i32 16, i32 16, i32 16, i32 16>
@@ -335,7 +335,7 @@ define <4 x i16> @sra_trunc_srl_v4i32(<4 x i32> %x) nounwind {
define <4 x i32> @shl_zext_shl_v4i32(<4 x i16> %x) nounwind {
; CHECK-LABEL: shl_zext_shl_v4i32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pslld $19, %xmm0
; CHECK-NEXT: retq
%shl0 = shl <4 x i16> %x, <i16 2, i16 2, i16 2, i16 2>
@@ -346,7 +346,7 @@ define <4 x i32> @shl_zext_shl_v4i32(<4 x i16> %x) nounwind {
define <4 x i32> @sra_v4i32(<4 x i32> %x) nounwind {
; CHECK-LABEL: sra_v4i32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: psrad $3, %xmm0
; CHECK-NEXT: retq
%sra = ashr <4 x i32> %x, <i32 3, i32 3, i32 3, i32 3>
@@ -355,7 +355,7 @@ define <4 x i32> @sra_v4i32(<4 x i32> %x) nounwind {
define <4 x i32> @srl_v4i32(<4 x i32> %x) nounwind {
; CHECK-LABEL: srl_v4i32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: psrld $3, %xmm0
; CHECK-NEXT: retq
%sra = lshr <4 x i32> %x, <i32 3, i32 3, i32 3, i32 3>
@@ -364,7 +364,7 @@ define <4 x i32> @srl_v4i32(<4 x i32> %x) nounwind {
define <4 x i32> @shl_v4i32(<4 x i32> %x) nounwind {
; CHECK-LABEL: shl_v4i32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pslld $3, %xmm0
; CHECK-NEXT: retq
%sra = shl <4 x i32> %x, <i32 3, i32 3, i32 3, i32 3>
diff --git a/test/CodeGen/X86/sse2.ll b/test/CodeGen/X86/sse2.ll
index b7e780b512c..285fdb6e76d 100644
--- a/test/CodeGen/X86/sse2.ll
+++ b/test/CodeGen/X86/sse2.ll
@@ -6,7 +6,7 @@
define void @test1(<2 x double>* %r, <2 x double>* %A, double %B) nounwind {
; X86-LABEL: test1:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movapd (%ecx), %xmm0
@@ -15,7 +15,7 @@ define void @test1(<2 x double>* %r, <2 x double>* %A, double %B) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: test1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movapd (%rsi), %xmm1
; X64-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; X64-NEXT: movapd %xmm1, (%rdi)
@@ -29,7 +29,7 @@ define void @test1(<2 x double>* %r, <2 x double>* %A, double %B) nounwind {
define void @test2(<2 x double>* %r, <2 x double>* %A, double %B) nounwind {
; X86-LABEL: test2:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movapd (%ecx), %xmm0
@@ -38,7 +38,7 @@ define void @test2(<2 x double>* %r, <2 x double>* %A, double %B) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: test2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps (%rsi), %xmm1
; X64-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; X64-NEXT: movaps %xmm1, (%rdi)
@@ -53,7 +53,7 @@ define void @test2(<2 x double>* %r, <2 x double>* %A, double %B) nounwind {
define void @test3(<4 x float>* %res, <4 x float>* %A, <4 x float>* %B) nounwind {
; X86-LABEL: test3:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -63,7 +63,7 @@ define void @test3(<4 x float>* %res, <4 x float>* %A, <4 x float>* %B) nounwind
; X86-NEXT: retl
;
; X64-LABEL: test3:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps (%rsi), %xmm0
; X64-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
; X64-NEXT: movaps %xmm0, (%rdi)
@@ -84,14 +84,14 @@ define void @test3(<4 x float>* %res, <4 x float>* %A, <4 x float>* %B) nounwind
define void @test4(<4 x float> %X, <4 x float>* %res) nounwind {
; X86-LABEL: test4:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,1,3,3]
; X86-NEXT: movaps %xmm0, (%eax)
; X86-NEXT: retl
;
; X64-LABEL: test4:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,1,3,3]
; X64-NEXT: movaps %xmm0, (%rdi)
; X64-NEXT: retq
@@ -102,7 +102,7 @@ define void @test4(<4 x float> %X, <4 x float>* %res) nounwind {
define <4 x i32> @test5(i8** %ptr) nounwind {
; X86-LABEL: test5:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl (%eax), %eax
; X86-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -112,7 +112,7 @@ define <4 x i32> @test5(i8** %ptr) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: test5:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq (%rdi), %rax
; X64-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X64-NEXT: pxor %xmm0, %xmm0
@@ -136,7 +136,7 @@ define <4 x i32> @test5(i8** %ptr) nounwind {
define void @test6(<4 x float>* %res, <4 x float>* %A) nounwind {
; X86-LABEL: test6:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movaps (%ecx), %xmm0
@@ -144,7 +144,7 @@ define void @test6(<4 x float>* %res, <4 x float>* %A) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: test6:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps (%rsi), %xmm0
; X64-NEXT: movaps %xmm0, (%rdi)
; X64-NEXT: retq
@@ -156,13 +156,13 @@ define void @test6(<4 x float>* %res, <4 x float>* %A) nounwind {
define void @test7() nounwind {
; X86-LABEL: test7:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: xorps %xmm0, %xmm0
; X86-NEXT: movaps %xmm0, 0
; X86-NEXT: retl
;
; X64-LABEL: test7:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorps %xmm0, %xmm0
; X64-NEXT: movaps %xmm0, 0
; X64-NEXT: retq
@@ -176,12 +176,12 @@ define void @test7() nounwind {
define <2 x i64> @test8() nounwind {
; X86-LABEL: test8:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movups x, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: test8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movups {{.*}}(%rip), %xmm0
; X64-NEXT: retq
%tmp = load i32, i32* getelementptr ([4 x i32], [4 x i32]* @x, i32 0, i32 0) ; <i32> [#uses=1]
@@ -198,12 +198,12 @@ define <2 x i64> @test8() nounwind {
define <4 x float> @test9(i32 %dummy, float %a, float %b, float %c, float %d) nounwind {
; X86-LABEL: test9:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movups {{[0-9]+}}(%esp), %xmm0
; X86-NEXT: retl
;
; X64-LABEL: test9:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; X64-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X64-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
@@ -217,12 +217,12 @@ define <4 x float> @test9(i32 %dummy, float %a, float %b, float %c, float %d) no
define <4 x float> @test10(float %a, float %b, float %c, float %d) nounwind {
; X86-LABEL: test10:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movups {{[0-9]+}}(%esp), %xmm0
; X86-NEXT: retl
;
; X64-LABEL: test10:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; X64-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X64-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
@@ -236,12 +236,12 @@ define <4 x float> @test10(float %a, float %b, float %c, float %d) nounwind {
define <2 x double> @test11(double %a, double %b) nounwind {
; X86-LABEL: test11:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movups {{[0-9]+}}(%esp), %xmm0
; X86-NEXT: retl
;
; X64-LABEL: test11:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; X64-NEXT: retq
%tmp = insertelement <2 x double> undef, double %a, i32 0 ; <<2 x double>> [#uses=1]
@@ -251,7 +251,7 @@ define <2 x double> @test11(double %a, double %b) nounwind {
define void @test12() nounwind {
; X86-LABEL: test12:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movapd 0, %xmm0
; X86-NEXT: movapd {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
; X86-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
@@ -262,7 +262,7 @@ define void @test12() nounwind {
; X86-NEXT: retl
;
; X64-LABEL: test12:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movapd 0, %xmm0
; X64-NEXT: movapd {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
; X64-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
@@ -281,7 +281,7 @@ define void @test12() nounwind {
define void @test13(<4 x float>* %res, <4 x float>* %A, <4 x float>* %B, <4 x float>* %C) nounwind {
; X86-LABEL: test13:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -292,7 +292,7 @@ define void @test13(<4 x float>* %res, <4 x float>* %A, <4 x float>* %B, <4 x fl
; X86-NEXT: retl
;
; X64-LABEL: test13:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps (%rdx), %xmm0
; X64-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],mem[0,1]
; X64-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
@@ -307,7 +307,7 @@ define void @test13(<4 x float>* %res, <4 x float>* %A, <4 x float>* %B, <4 x fl
define <4 x float> @test14(<4 x float>* %x, <4 x float>* %y) nounwind {
; X86-LABEL: test14:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movaps (%ecx), %xmm1
@@ -319,7 +319,7 @@ define <4 x float> @test14(<4 x float>* %x, <4 x float>* %y) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: test14:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps (%rsi), %xmm1
; X64-NEXT: movaps (%rdi), %xmm2
; X64-NEXT: movaps %xmm2, %xmm0
@@ -337,7 +337,7 @@ define <4 x float> @test14(<4 x float>* %x, <4 x float>* %y) nounwind {
define <4 x float> @test15(<4 x float>* %x, <4 x float>* %y) nounwind {
; X86-LABEL: test15:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movaps (%ecx), %xmm0
@@ -345,7 +345,7 @@ define <4 x float> @test15(<4 x float>* %x, <4 x float>* %y) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: test15:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movaps (%rdi), %xmm0
; X64-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],mem[1]
; X64-NEXT: retq
@@ -360,14 +360,14 @@ entry:
define <2 x double> @test16(<4 x double> * nocapture %srcA, <2 x double>* nocapture %dst) {
; X86-LABEL: test16:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movaps 96(%eax), %xmm0
; X86-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; X86-NEXT: retl
;
; X64-LABEL: test16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps 96(%rdi), %xmm0
; X64-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; X64-NEXT: retq
@@ -380,13 +380,13 @@ define <2 x double> @test16(<4 x double> * nocapture %srcA, <2 x double>* nocap
; PR9009
define fastcc void @test17() nounwind {
; X86-LABEL: test17:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movaps {{.*#+}} xmm0 = <u,u,32768,32768>
; X86-NEXT: movaps %xmm0, (%eax)
; X86-NEXT: retl
;
; X64-LABEL: test17:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movaps {{.*#+}} xmm0 = <u,u,32768,32768>
; X64-NEXT: movaps %xmm0, (%rax)
; X64-NEXT: retq
@@ -401,14 +401,14 @@ entry:
; PR9210
define <4 x float> @f(<4 x double>) nounwind {
; X86-LABEL: f:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: cvtpd2ps %xmm1, %xmm1
; X86-NEXT: cvtpd2ps %xmm0, %xmm0
; X86-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; X86-NEXT: retl
;
; X64-LABEL: f:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: cvtpd2ps %xmm1, %xmm1
; X64-NEXT: cvtpd2ps %xmm0, %xmm0
; X64-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
@@ -420,12 +420,12 @@ entry:
define <2 x i64> @test_insert_64_zext(<2 x i64> %i) {
; X86-LABEL: test_insert_64_zext:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; X86-NEXT: retl
;
; X64-LABEL: test_insert_64_zext:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; X64-NEXT: retq
%1 = shufflevector <2 x i64> %i, <2 x i64> <i64 0, i64 undef>, <2 x i32> <i32 0, i32 2>
@@ -434,12 +434,12 @@ define <2 x i64> @test_insert_64_zext(<2 x i64> %i) {
define <4 x i32> @PR19721(<4 x i32> %i) {
; X86-LABEL: PR19721:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: andps {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: PR19721:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq %xmm0, %rax
; X64-NEXT: movabsq $-4294967296, %rcx # imm = 0xFFFFFFFF00000000
; X64-NEXT: andq %rax, %rcx
@@ -454,7 +454,7 @@ define <4 x i32> @PR19721(<4 x i32> %i) {
define <4 x i32> @test_mul(<4 x i32> %x, <4 x i32> %y) {
; X86-LABEL: test_mul:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; X86-NEXT: pmuludq %xmm1, %xmm0
; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -465,7 +465,7 @@ define <4 x i32> @test_mul(<4 x i32> %x, <4 x i32> %y) {
; X86-NEXT: retl
;
; X64-LABEL: test_mul:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; X64-NEXT: pmuludq %xmm1, %xmm0
; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
diff --git a/test/CodeGen/X86/sse3-avx-addsub-2.ll b/test/CodeGen/X86/sse3-avx-addsub-2.ll
index f80ee38fa96..aba916241f3 100644
--- a/test/CodeGen/X86/sse3-avx-addsub-2.ll
+++ b/test/CodeGen/X86/sse3-avx-addsub-2.ll
@@ -7,12 +7,12 @@
define <4 x float> @test1(<4 x float> %A, <4 x float> %B) {
; SSE-LABEL: test1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addsubps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddsubps %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = extractelement <4 x float> %A, i32 0
@@ -36,12 +36,12 @@ define <4 x float> @test1(<4 x float> %A, <4 x float> %B) {
define <4 x float> @test2(<4 x float> %A, <4 x float> %B) {
; SSE-LABEL: test2:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addsubps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test2:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddsubps %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = extractelement <4 x float> %A, i32 2
@@ -57,12 +57,12 @@ define <4 x float> @test2(<4 x float> %A, <4 x float> %B) {
define <4 x float> @test3(<4 x float> %A, <4 x float> %B) {
; SSE-LABEL: test3:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addsubps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test3:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddsubps %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = extractelement <4 x float> %A, i32 0
@@ -78,12 +78,12 @@ define <4 x float> @test3(<4 x float> %A, <4 x float> %B) {
define <4 x float> @test4(<4 x float> %A, <4 x float> %B) {
; SSE-LABEL: test4:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addsubps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test4:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddsubps %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = extractelement <4 x float> %A, i32 2
@@ -99,12 +99,12 @@ define <4 x float> @test4(<4 x float> %A, <4 x float> %B) {
define <4 x float> @test5(<4 x float> %A, <4 x float> %B) {
; SSE-LABEL: test5:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addsubps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test5:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddsubps %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = extractelement <4 x float> %A, i32 0
@@ -120,12 +120,12 @@ define <4 x float> @test5(<4 x float> %A, <4 x float> %B) {
define <4 x float> @test6(<4 x float> %A, <4 x float> %B) {
; SSE-LABEL: test6:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addsubps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test6:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddsubps %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = extractelement <4 x float> %A, i32 0
@@ -149,13 +149,13 @@ define <4 x float> @test6(<4 x float> %A, <4 x float> %B) {
define <4 x double> @test7(<4 x double> %A, <4 x double> %B) {
; SSE-LABEL: test7:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addsubpd %xmm2, %xmm0
; SSE-NEXT: addsubpd %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: test7:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddsubpd %ymm1, %ymm0, %ymm0
; AVX-NEXT: retq
%1 = extractelement <4 x double> %A, i32 0
@@ -179,12 +179,12 @@ define <4 x double> @test7(<4 x double> %A, <4 x double> %B) {
define <2 x double> @test8(<2 x double> %A, <2 x double> %B) {
; SSE-LABEL: test8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addsubpd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddsubpd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = extractelement <2 x double> %A, i32 0
@@ -200,13 +200,13 @@ define <2 x double> @test8(<2 x double> %A, <2 x double> %B) {
define <8 x float> @test9(<8 x float> %A, <8 x float> %B) {
; SSE-LABEL: test9:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addsubps %xmm2, %xmm0
; SSE-NEXT: addsubps %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: test9:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddsubps %ymm1, %ymm0, %ymm0
; AVX-NEXT: retq
%1 = extractelement <8 x float> %A, i32 0
@@ -249,12 +249,12 @@ define <8 x float> @test9(<8 x float> %A, <8 x float> %B) {
define <4 x float> @test10(<4 x float> %A, <4 x float> %B) {
; SSE-LABEL: test10:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: subss %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test10:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vsubss %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = extractelement <4 x float> %A, i32 0
@@ -266,7 +266,7 @@ define <4 x float> @test10(<4 x float> %A, <4 x float> %B) {
define <4 x float> @test11(<4 x float> %A, <4 x float> %B) {
; SSE-LABEL: test11:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
; SSE-NEXT: subss %xmm1, %xmm0
@@ -274,7 +274,7 @@ define <4 x float> @test11(<4 x float> %A, <4 x float> %B) {
; SSE-NEXT: retq
;
; AVX-LABEL: test11:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX-NEXT: vsubss %xmm1, %xmm0, %xmm0
@@ -289,7 +289,7 @@ define <4 x float> @test11(<4 x float> %A, <4 x float> %B) {
define <4 x float> @test12(<4 x float> %A, <4 x float> %B) {
; SSE-LABEL: test12:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
; SSE-NEXT: movshdup {{.*#+}} xmm1 = xmm1[1,1,3,3]
; SSE-NEXT: addss %xmm0, %xmm1
@@ -297,7 +297,7 @@ define <4 x float> @test12(<4 x float> %A, <4 x float> %B) {
; SSE-NEXT: retq
;
; AVX-LABEL: test12:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm1[1,1,3,3]
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
@@ -312,7 +312,7 @@ define <4 x float> @test12(<4 x float> %A, <4 x float> %B) {
define <4 x float> @test13(<4 x float> %A, <4 x float> %B) {
; SSE-LABEL: test13:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; SSE-NEXT: addss %xmm0, %xmm1
@@ -321,7 +321,7 @@ define <4 x float> @test13(<4 x float> %A, <4 x float> %B) {
; SSE-NEXT: retq
;
; AVX-LABEL: test13:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
@@ -336,7 +336,7 @@ define <4 x float> @test13(<4 x float> %A, <4 x float> %B) {
define <4 x float> @test14(<4 x float> %A, <4 x float> %B) {
; SSE-LABEL: test14:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: subss %xmm1, %xmm2
; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
@@ -347,7 +347,7 @@ define <4 x float> @test14(<4 x float> %A, <4 x float> %B) {
; SSE-NEXT: retq
;
; AVX-LABEL: test14:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vsubss %xmm1, %xmm0, %xmm2
; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
@@ -367,7 +367,7 @@ define <4 x float> @test14(<4 x float> %A, <4 x float> %B) {
define <4 x float> @test15(<4 x float> %A, <4 x float> %B) {
; SSE-LABEL: test15:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
; SSE-NEXT: movshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; SSE-NEXT: addss %xmm3, %xmm2
@@ -379,7 +379,7 @@ define <4 x float> @test15(<4 x float> %A, <4 x float> %B) {
; SSE-NEXT: retq
;
; AVX-LABEL: test15:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX-NEXT: vmovshdup {{.*#+}} xmm3 = xmm1[1,1,3,3]
; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm2
@@ -402,7 +402,7 @@ define <4 x float> @test15(<4 x float> %A, <4 x float> %B) {
define <4 x float> @test16(<4 x float> %A, <4 x float> %B) {
; SSE-LABEL: test16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: subss %xmm0, %xmm2
; SSE-NEXT: movaps %xmm0, %xmm3
@@ -422,7 +422,7 @@ define <4 x float> @test16(<4 x float> %A, <4 x float> %B) {
; SSE-NEXT: retq
;
; AVX-LABEL: test16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vsubss %xmm0, %xmm0, %xmm2
; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
; AVX-NEXT: vpermilpd {{.*#+}} xmm4 = xmm1[1,0]
@@ -457,12 +457,12 @@ define <4 x float> @test16(<4 x float> %A, <4 x float> %B) {
define <2 x float> @test_v2f32(<2 x float> %v0, <2 x float> %v1) {
; SSE-LABEL: test_v2f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addsubps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_v2f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddsubps %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%v2 = extractelement <2 x float> %v0, i32 0
diff --git a/test/CodeGen/X86/sse3-avx-addsub.ll b/test/CodeGen/X86/sse3-avx-addsub.ll
index 0e0cf485256..7c87532ffea 100644
--- a/test/CodeGen/X86/sse3-avx-addsub.ll
+++ b/test/CodeGen/X86/sse3-avx-addsub.ll
@@ -38,12 +38,12 @@
define <4 x float> @test1(<4 x float> %A, <4 x float> %B) {
; SSE-LABEL: test1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addsubps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddsubps %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%sub = fsub <4 x float> %A, %B
@@ -54,13 +54,13 @@ define <4 x float> @test1(<4 x float> %A, <4 x float> %B) {
define <8 x float> @test2(<8 x float> %A, <8 x float> %B) {
; SSE-LABEL: test2:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addsubps %xmm2, %xmm0
; SSE-NEXT: addsubps %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: test2:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddsubps %ymm1, %ymm0, %ymm0
; AVX-NEXT: retq
%sub = fsub <8 x float> %A, %B
@@ -71,13 +71,13 @@ define <8 x float> @test2(<8 x float> %A, <8 x float> %B) {
define <4 x double> @test3(<4 x double> %A, <4 x double> %B) {
; SSE-LABEL: test3:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addsubpd %xmm2, %xmm0
; SSE-NEXT: addsubpd %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: test3:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddsubpd %ymm1, %ymm0, %ymm0
; AVX-NEXT: retq
%sub = fsub <4 x double> %A, %B
@@ -88,12 +88,12 @@ define <4 x double> @test3(<4 x double> %A, <4 x double> %B) {
define <2 x double> @test4(<2 x double> %A, <2 x double> %B) #0 {
; SSE-LABEL: test4:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addsubpd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test4:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddsubpd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%add = fadd <2 x double> %A, %B
@@ -104,7 +104,7 @@ define <2 x double> @test4(<2 x double> %A, <2 x double> %B) #0 {
define <16 x float> @test5(<16 x float> %A, <16 x float> %B) {
; SSE-LABEL: test5:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addsubps %xmm4, %xmm0
; SSE-NEXT: addsubps %xmm5, %xmm1
; SSE-NEXT: addsubps %xmm6, %xmm2
@@ -112,13 +112,13 @@ define <16 x float> @test5(<16 x float> %A, <16 x float> %B) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test5:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vaddsubps %ymm2, %ymm0, %ymm0
; AVX1-NEXT: vaddsubps %ymm3, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX512-LABEL: test5:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vsubps %zmm1, %zmm0, %zmm2
; AVX512-NEXT: movw $-21846, %ax # imm = 0xAAAA
; AVX512-NEXT: kmovw %eax, %k1
@@ -133,7 +133,7 @@ define <16 x float> @test5(<16 x float> %A, <16 x float> %B) {
define <8 x double> @test6(<8 x double> %A, <8 x double> %B) {
; SSE-LABEL: test6:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addsubpd %xmm4, %xmm0
; SSE-NEXT: addsubpd %xmm5, %xmm1
; SSE-NEXT: addsubpd %xmm6, %xmm2
@@ -141,13 +141,13 @@ define <8 x double> @test6(<8 x double> %A, <8 x double> %B) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test6:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vaddsubpd %ymm2, %ymm0, %ymm0
; AVX1-NEXT: vaddsubpd %ymm3, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX512-LABEL: test6:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vaddpd %zmm1, %zmm0, %zmm2
; AVX512-NEXT: vsubpd %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vshufpd {{.*#+}} zmm0 = zmm0[0],zmm2[1],zmm0[2],zmm2[3],zmm0[4],zmm2[5],zmm0[6],zmm2[7]
@@ -160,12 +160,12 @@ define <8 x double> @test6(<8 x double> %A, <8 x double> %B) {
define <4 x float> @test1b(<4 x float> %A, <4 x float>* %B) {
; SSE-LABEL: test1b:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addsubps (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test1b:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddsubps (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
%1 = load <4 x float>, <4 x float>* %B
@@ -177,13 +177,13 @@ define <4 x float> @test1b(<4 x float> %A, <4 x float>* %B) {
define <8 x float> @test2b(<8 x float> %A, <8 x float>* %B) {
; SSE-LABEL: test2b:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addsubps (%rdi), %xmm0
; SSE-NEXT: addsubps 16(%rdi), %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: test2b:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddsubps (%rdi), %ymm0, %ymm0
; AVX-NEXT: retq
%1 = load <8 x float>, <8 x float>* %B
@@ -195,13 +195,13 @@ define <8 x float> @test2b(<8 x float> %A, <8 x float>* %B) {
define <4 x double> @test3b(<4 x double> %A, <4 x double>* %B) {
; SSE-LABEL: test3b:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addsubpd (%rdi), %xmm0
; SSE-NEXT: addsubpd 16(%rdi), %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: test3b:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddsubpd (%rdi), %ymm0, %ymm0
; AVX-NEXT: retq
%1 = load <4 x double>, <4 x double>* %B
@@ -213,12 +213,12 @@ define <4 x double> @test3b(<4 x double> %A, <4 x double>* %B) {
define <2 x double> @test4b(<2 x double> %A, <2 x double>* %B) {
; SSE-LABEL: test4b:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addsubpd (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test4b:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddsubpd (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
%1 = load <2 x double>, <2 x double>* %B
@@ -230,12 +230,12 @@ define <2 x double> @test4b(<2 x double> %A, <2 x double>* %B) {
define <4 x float> @test1c(<4 x float> %A, <4 x float>* %B) {
; SSE-LABEL: test1c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addsubps (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test1c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddsubps (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
%1 = load <4 x float>, <4 x float>* %B
@@ -247,13 +247,13 @@ define <4 x float> @test1c(<4 x float> %A, <4 x float>* %B) {
define <8 x float> @test2c(<8 x float> %A, <8 x float>* %B) {
; SSE-LABEL: test2c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addsubps (%rdi), %xmm0
; SSE-NEXT: addsubps 16(%rdi), %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: test2c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddsubps (%rdi), %ymm0, %ymm0
; AVX-NEXT: retq
%1 = load <8 x float>, <8 x float>* %B
@@ -265,13 +265,13 @@ define <8 x float> @test2c(<8 x float> %A, <8 x float>* %B) {
define <4 x double> @test3c(<4 x double> %A, <4 x double>* %B) {
; SSE-LABEL: test3c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addsubpd (%rdi), %xmm0
; SSE-NEXT: addsubpd 16(%rdi), %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: test3c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddsubpd (%rdi), %ymm0, %ymm0
; AVX-NEXT: retq
%1 = load <4 x double>, <4 x double>* %B
@@ -283,12 +283,12 @@ define <4 x double> @test3c(<4 x double> %A, <4 x double>* %B) {
define <2 x double> @test4c(<2 x double> %A, <2 x double>* %B) {
; SSE-LABEL: test4c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: addsubpd (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test4c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vaddsubpd (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
%1 = load <2 x double>, <2 x double>* %B
diff --git a/test/CodeGen/X86/sse3-intrinsics-fast-isel.ll b/test/CodeGen/X86/sse3-intrinsics-fast-isel.ll
index 0111de2f521..5bf36a51c76 100644
--- a/test/CodeGen/X86/sse3-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/sse3-intrinsics-fast-isel.ll
@@ -6,12 +6,12 @@
define <2 x double> @test_mm_addsub_pd(<2 x double> %a0, <2 x double> %a1) {
; X32-LABEL: test_mm_addsub_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: addsubpd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_addsub_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: addsubpd %xmm1, %xmm0
; X64-NEXT: retq
%res = call <2 x double> @llvm.x86.sse3.addsub.pd(<2 x double> %a0, <2 x double> %a1)
@@ -21,12 +21,12 @@ declare <2 x double> @llvm.x86.sse3.addsub.pd(<2 x double>, <2 x double>) nounwi
define <4 x float> @test_mm_addsub_ps(<4 x float> %a0, <4 x float> %a1) {
; X32-LABEL: test_mm_addsub_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: addsubps %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_addsub_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: addsubps %xmm1, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse3.addsub.ps(<4 x float> %a0, <4 x float> %a1)
@@ -36,12 +36,12 @@ declare <4 x float> @llvm.x86.sse3.addsub.ps(<4 x float>, <4 x float>) nounwind
define <2 x double> @test_mm_hadd_pd(<2 x double> %a0, <2 x double> %a1) {
; X32-LABEL: test_mm_hadd_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: haddpd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_hadd_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: haddpd %xmm1, %xmm0
; X64-NEXT: retq
%res = call <2 x double> @llvm.x86.sse3.hadd.pd(<2 x double> %a0, <2 x double> %a1)
@@ -51,12 +51,12 @@ declare <2 x double> @llvm.x86.sse3.hadd.pd(<2 x double>, <2 x double>) nounwind
define <4 x float> @test_mm_hadd_ps(<4 x float> %a0, <4 x float> %a1) {
; X32-LABEL: test_mm_hadd_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: haddps %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_hadd_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: haddps %xmm1, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %a0, <4 x float> %a1)
@@ -66,12 +66,12 @@ declare <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float>, <4 x float>) nounwind re
define <2 x double> @test_mm_hsub_pd(<2 x double> %a0, <2 x double> %a1) {
; X32-LABEL: test_mm_hsub_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: hsubpd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_hsub_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: hsubpd %xmm1, %xmm0
; X64-NEXT: retq
%res = call <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double> %a0, <2 x double> %a1)
@@ -81,12 +81,12 @@ declare <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double>, <2 x double>) nounwind
define <4 x float> @test_mm_hsub_ps(<4 x float> %a0, <4 x float> %a1) {
; X32-LABEL: test_mm_hsub_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: hsubps %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_hsub_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: hsubps %xmm1, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float> %a0, <4 x float> %a1)
@@ -96,13 +96,13 @@ declare <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float>, <4 x float>) nounwind re
define <2 x i64> @test_mm_lddqu_si128(<2 x i64>* %a0) {
; X32-LABEL: test_mm_lddqu_si128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: lddqu (%eax), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_lddqu_si128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: lddqu (%rdi), %xmm0
; X64-NEXT: retq
%bc = bitcast <2 x i64>* %a0 to i8*
@@ -114,13 +114,13 @@ declare <16 x i8> @llvm.x86.sse3.ldu.dq(i8*) nounwind readonly
define <2 x double> @test_mm_loaddup_pd(double* %a0) {
; X32-LABEL: test_mm_loaddup_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movddup {{.*#+}} xmm0 = mem[0,0]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_loaddup_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movddup {{.*#+}} xmm0 = mem[0,0]
; X64-NEXT: retq
%ld = load double, double* %a0
@@ -131,12 +131,12 @@ define <2 x double> @test_mm_loaddup_pd(double* %a0) {
define <2 x double> @test_mm_movedup_pd(<2 x double> %a0) {
; X32-LABEL: test_mm_movedup_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_movedup_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
; X64-NEXT: retq
%res = shufflevector <2 x double> %a0, <2 x double> %a0, <2 x i32> zeroinitializer
@@ -145,12 +145,12 @@ define <2 x double> @test_mm_movedup_pd(<2 x double> %a0) {
define <4 x float> @test_mm_movehdup_ps(<4 x float> %a0) {
; X32-LABEL: test_mm_movehdup_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_movehdup_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
; X64-NEXT: retq
%res = shufflevector <4 x float> %a0, <4 x float> %a0, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
@@ -159,12 +159,12 @@ define <4 x float> @test_mm_movehdup_ps(<4 x float> %a0) {
define <4 x float> @test_mm_moveldup_ps(<4 x float> %a0) {
; X32-LABEL: test_mm_moveldup_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movsldup {{.*#+}} xmm0 = xmm0[0,0,2,2]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_moveldup_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movsldup {{.*#+}} xmm0 = xmm0[0,0,2,2]
; X64-NEXT: retq
%res = shufflevector <4 x float> %a0, <4 x float> %a0, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
diff --git a/test/CodeGen/X86/sse3-intrinsics-x86.ll b/test/CodeGen/X86/sse3-intrinsics-x86.ll
index fd7f59a0157..18bd2195cb9 100644
--- a/test/CodeGen/X86/sse3-intrinsics-x86.ll
+++ b/test/CodeGen/X86/sse3-intrinsics-x86.ll
@@ -5,12 +5,12 @@
define <2 x double> @test_x86_sse3_addsub_pd(<2 x double> %a0, <2 x double> %a1) {
; SSE-LABEL: test_x86_sse3_addsub_pd:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: addsubpd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xd0,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse3_addsub_pd:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: vaddsubpd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd0,0xc1]
; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call <2 x double> @llvm.x86.sse3.addsub.pd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1]
@@ -21,12 +21,12 @@ declare <2 x double> @llvm.x86.sse3.addsub.pd(<2 x double>, <2 x double>) nounwi
define <4 x float> @test_x86_sse3_addsub_ps(<4 x float> %a0, <4 x float> %a1) {
; SSE-LABEL: test_x86_sse3_addsub_ps:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: addsubps %xmm1, %xmm0 ## encoding: [0xf2,0x0f,0xd0,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse3_addsub_ps:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: vaddsubps %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0xd0,0xc1]
; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.sse3.addsub.ps(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [#uses=1]
@@ -37,12 +37,12 @@ declare <4 x float> @llvm.x86.sse3.addsub.ps(<4 x float>, <4 x float>) nounwind
define <2 x double> @test_x86_sse3_hadd_pd(<2 x double> %a0, <2 x double> %a1) {
; SSE-LABEL: test_x86_sse3_hadd_pd:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: haddpd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x7c,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse3_hadd_pd:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: vhaddpd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x7c,0xc1]
; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call <2 x double> @llvm.x86.sse3.hadd.pd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1]
@@ -53,12 +53,12 @@ declare <2 x double> @llvm.x86.sse3.hadd.pd(<2 x double>, <2 x double>) nounwind
define <4 x float> @test_x86_sse3_hadd_ps(<4 x float> %a0, <4 x float> %a1) {
; SSE-LABEL: test_x86_sse3_hadd_ps:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: haddps %xmm1, %xmm0 ## encoding: [0xf2,0x0f,0x7c,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse3_hadd_ps:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: vhaddps %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x7c,0xc1]
; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [#uses=1]
@@ -69,12 +69,12 @@ declare <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float>, <4 x float>) nounwind re
define <2 x double> @test_x86_sse3_hsub_pd(<2 x double> %a0, <2 x double> %a1) {
; SSE-LABEL: test_x86_sse3_hsub_pd:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: hsubpd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x7d,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse3_hsub_pd:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: vhsubpd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x7d,0xc1]
; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1]
@@ -85,12 +85,12 @@ declare <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double>, <2 x double>) nounwind
define <4 x float> @test_x86_sse3_hsub_ps(<4 x float> %a0, <4 x float> %a1) {
; SSE-LABEL: test_x86_sse3_hsub_ps:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: hsubps %xmm1, %xmm0 ## encoding: [0xf2,0x0f,0x7d,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse3_hsub_ps:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: vhsubps %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x7d,0xc1]
; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [#uses=1]
@@ -101,13 +101,13 @@ declare <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float>, <4 x float>) nounwind re
define <16 x i8> @test_x86_sse3_ldu_dq(i8* %a0) {
; SSE-LABEL: test_x86_sse3_ldu_dq:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; SSE-NEXT: lddqu (%eax), %xmm0 ## encoding: [0xf2,0x0f,0xf0,0x00]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse3_ldu_dq:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; VCHECK-NEXT: vlddqu (%eax), %xmm0 ## encoding: [0xc5,0xfb,0xf0,0x00]
; VCHECK-NEXT: retl ## encoding: [0xc3]
@@ -120,7 +120,7 @@ declare <16 x i8> @llvm.x86.sse3.ldu.dq(i8*) nounwind readonly
define void @monitor(i8* %P, i32 %E, i32 %H) nounwind {
; CHECK-LABEL: monitor:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx ## encoding: [0x8b,0x54,0x24,0x0c]
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x08]
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
@@ -134,7 +134,7 @@ declare void @llvm.x86.sse3.monitor(i8*, i32, i32) nounwind
define void @mwait(i32 %E, i32 %H) nounwind {
; CHECK-LABEL: mwait:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08]
; CHECK-NEXT: mwait ## encoding: [0x0f,0x01,0xc9]
diff --git a/test/CodeGen/X86/sse3-schedule.ll b/test/CodeGen/X86/sse3-schedule.ll
index 2a3dae1b64e..e84ffca3702 100644
--- a/test/CodeGen/X86/sse3-schedule.ll
+++ b/test/CodeGen/X86/sse3-schedule.ll
@@ -13,61 +13,61 @@
define <2 x double> @test_addsubpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; GENERIC-LABEL: test_addsubpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: addsubpd %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: addsubpd (%rdi), %xmm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_addsubpd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: addsubpd %xmm1, %xmm0 # sched: [6:3.00]
; ATOM-NEXT: addsubpd (%rdi), %xmm0 # sched: [6:3.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_addsubpd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: addsubpd %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: addsubpd (%rdi), %xmm0 # sched: [6:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_addsubpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vaddsubpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vaddsubpd (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_addsubpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vaddsubpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: vaddsubpd (%rdi), %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_addsubpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vaddsubpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: vaddsubpd (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_addsubpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vaddsubpd %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vaddsubpd (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_addsubpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vaddsubpd %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vaddsubpd (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_addsubpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vaddsubpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vaddsubpd (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_addsubpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vaddsubpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vaddsubpd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -80,61 +80,61 @@ declare <2 x double> @llvm.x86.sse3.addsub.pd(<2 x double>, <2 x double>) nounwi
define <4 x float> @test_addsubps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; GENERIC-LABEL: test_addsubps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: addsubps %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: addsubps (%rdi), %xmm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_addsubps:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: addsubps %xmm1, %xmm0 # sched: [5:5.00]
; ATOM-NEXT: addsubps (%rdi), %xmm0 # sched: [5:5.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_addsubps:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: addsubps %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: addsubps (%rdi), %xmm0 # sched: [6:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_addsubps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vaddsubps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vaddsubps (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_addsubps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vaddsubps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: vaddsubps (%rdi), %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_addsubps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vaddsubps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: vaddsubps (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_addsubps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vaddsubps %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vaddsubps (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_addsubps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vaddsubps %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vaddsubps (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_addsubps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vaddsubps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vaddsubps (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_addsubps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vaddsubps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vaddsubps (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -147,61 +147,61 @@ declare <4 x float> @llvm.x86.sse3.addsub.ps(<4 x float>, <4 x float>) nounwind
define <2 x double> @test_haddpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; GENERIC-LABEL: test_haddpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: haddpd %xmm1, %xmm0 # sched: [5:2.00]
; GENERIC-NEXT: haddpd (%rdi), %xmm0 # sched: [11:2.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_haddpd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: haddpd %xmm1, %xmm0 # sched: [8:4.00]
; ATOM-NEXT: haddpd (%rdi), %xmm0 # sched: [9:4.50]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_haddpd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: haddpd %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: haddpd (%rdi), %xmm0 # sched: [6:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_haddpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vhaddpd %xmm1, %xmm0, %xmm0 # sched: [5:2.00]
; SANDY-NEXT: vhaddpd (%rdi), %xmm0, %xmm0 # sched: [11:2.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_haddpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vhaddpd %xmm1, %xmm0, %xmm0 # sched: [5:2.00]
; HASWELL-NEXT: vhaddpd (%rdi), %xmm0, %xmm0 # sched: [5:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_haddpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vhaddpd %xmm1, %xmm0, %xmm0 # sched: [5:2.00]
; BROADWELL-NEXT: vhaddpd (%rdi), %xmm0, %xmm0 # sched: [10:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_haddpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vhaddpd %xmm1, %xmm0, %xmm0 # sched: [6:2.00]
; SKYLAKE-NEXT: vhaddpd (%rdi), %xmm0, %xmm0 # sched: [12:2.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_haddpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vhaddpd %xmm1, %xmm0, %xmm0 # sched: [6:2.00]
; SKX-NEXT: vhaddpd (%rdi), %xmm0, %xmm0 # sched: [12:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_haddpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vhaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vhaddpd (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_haddpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vhaddpd %xmm1, %xmm0, %xmm0 # sched: [100:?]
; ZNVER1-NEXT: vhaddpd (%rdi), %xmm0, %xmm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -214,61 +214,61 @@ declare <2 x double> @llvm.x86.sse3.hadd.pd(<2 x double>, <2 x double>) nounwind
define <4 x float> @test_haddps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; GENERIC-LABEL: test_haddps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: haddps %xmm1, %xmm0 # sched: [5:2.00]
; GENERIC-NEXT: haddps (%rdi), %xmm0 # sched: [11:2.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_haddps:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: haddps %xmm1, %xmm0 # sched: [8:4.00]
; ATOM-NEXT: haddps (%rdi), %xmm0 # sched: [9:4.50]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_haddps:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: haddps %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: haddps (%rdi), %xmm0 # sched: [6:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_haddps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vhaddps %xmm1, %xmm0, %xmm0 # sched: [5:2.00]
; SANDY-NEXT: vhaddps (%rdi), %xmm0, %xmm0 # sched: [11:2.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_haddps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vhaddps %xmm1, %xmm0, %xmm0 # sched: [5:2.00]
; HASWELL-NEXT: vhaddps (%rdi), %xmm0, %xmm0 # sched: [5:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_haddps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vhaddps %xmm1, %xmm0, %xmm0 # sched: [5:2.00]
; BROADWELL-NEXT: vhaddps (%rdi), %xmm0, %xmm0 # sched: [10:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_haddps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vhaddps %xmm1, %xmm0, %xmm0 # sched: [6:2.00]
; SKYLAKE-NEXT: vhaddps (%rdi), %xmm0, %xmm0 # sched: [12:2.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_haddps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vhaddps %xmm1, %xmm0, %xmm0 # sched: [6:2.00]
; SKX-NEXT: vhaddps (%rdi), %xmm0, %xmm0 # sched: [12:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_haddps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vhaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vhaddps (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_haddps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vhaddps %xmm1, %xmm0, %xmm0 # sched: [100:?]
; ZNVER1-NEXT: vhaddps (%rdi), %xmm0, %xmm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -281,61 +281,61 @@ declare <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float>, <4 x float>) nounwind re
define <2 x double> @test_hsubpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; GENERIC-LABEL: test_hsubpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: hsubpd %xmm1, %xmm0 # sched: [5:2.00]
; GENERIC-NEXT: hsubpd (%rdi), %xmm0 # sched: [11:2.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_hsubpd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: hsubpd %xmm1, %xmm0 # sched: [8:4.00]
; ATOM-NEXT: hsubpd (%rdi), %xmm0 # sched: [9:4.50]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_hsubpd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: hsubpd %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: hsubpd (%rdi), %xmm0 # sched: [6:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_hsubpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vhsubpd %xmm1, %xmm0, %xmm0 # sched: [5:2.00]
; SANDY-NEXT: vhsubpd (%rdi), %xmm0, %xmm0 # sched: [11:2.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_hsubpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vhsubpd %xmm1, %xmm0, %xmm0 # sched: [5:2.00]
; HASWELL-NEXT: vhsubpd (%rdi), %xmm0, %xmm0 # sched: [5:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_hsubpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vhsubpd %xmm1, %xmm0, %xmm0 # sched: [5:2.00]
; BROADWELL-NEXT: vhsubpd (%rdi), %xmm0, %xmm0 # sched: [10:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_hsubpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vhsubpd %xmm1, %xmm0, %xmm0 # sched: [6:2.00]
; SKYLAKE-NEXT: vhsubpd (%rdi), %xmm0, %xmm0 # sched: [12:2.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_hsubpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vhsubpd %xmm1, %xmm0, %xmm0 # sched: [6:2.00]
; SKX-NEXT: vhsubpd (%rdi), %xmm0, %xmm0 # sched: [12:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_hsubpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vhsubpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vhsubpd (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_hsubpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vhsubpd %xmm1, %xmm0, %xmm0 # sched: [100:?]
; ZNVER1-NEXT: vhsubpd (%rdi), %xmm0, %xmm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -348,61 +348,61 @@ declare <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double>, <2 x double>) nounwind
define <4 x float> @test_hsubps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; GENERIC-LABEL: test_hsubps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: hsubps %xmm1, %xmm0 # sched: [5:2.00]
; GENERIC-NEXT: hsubps (%rdi), %xmm0 # sched: [11:2.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_hsubps:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: hsubps %xmm1, %xmm0 # sched: [8:4.00]
; ATOM-NEXT: hsubps (%rdi), %xmm0 # sched: [9:4.50]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_hsubps:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: hsubps %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: hsubps (%rdi), %xmm0 # sched: [6:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_hsubps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vhsubps %xmm1, %xmm0, %xmm0 # sched: [5:2.00]
; SANDY-NEXT: vhsubps (%rdi), %xmm0, %xmm0 # sched: [11:2.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_hsubps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vhsubps %xmm1, %xmm0, %xmm0 # sched: [5:2.00]
; HASWELL-NEXT: vhsubps (%rdi), %xmm0, %xmm0 # sched: [5:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_hsubps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vhsubps %xmm1, %xmm0, %xmm0 # sched: [5:2.00]
; BROADWELL-NEXT: vhsubps (%rdi), %xmm0, %xmm0 # sched: [10:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_hsubps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vhsubps %xmm1, %xmm0, %xmm0 # sched: [6:2.00]
; SKYLAKE-NEXT: vhsubps (%rdi), %xmm0, %xmm0 # sched: [12:2.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_hsubps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vhsubps %xmm1, %xmm0, %xmm0 # sched: [6:2.00]
; SKX-NEXT: vhsubps (%rdi), %xmm0, %xmm0 # sched: [12:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_hsubps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vhsubps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vhsubps (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_hsubps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vhsubps %xmm1, %xmm0, %xmm0 # sched: [100:?]
; ZNVER1-NEXT: vhsubps (%rdi), %xmm0, %xmm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -415,54 +415,54 @@ declare <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float>, <4 x float>) nounwind re
define <16 x i8> @test_lddqu(i8* %a0) {
; GENERIC-LABEL: test_lddqu:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: lddqu (%rdi), %xmm0 # sched: [6:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lddqu:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: lddqu (%rdi), %xmm0 # sched: [3:1.50]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_lddqu:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: lddqu (%rdi), %xmm0 # sched: [3:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lddqu:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vlddqu (%rdi), %xmm0 # sched: [6:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lddqu:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vlddqu (%rdi), %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_lddqu:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vlddqu (%rdi), %xmm0 # sched: [5:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lddqu:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vlddqu (%rdi), %xmm0 # sched: [6:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_lddqu:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vlddqu (%rdi), %xmm0 # sched: [6:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lddqu:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vlddqu (%rdi), %xmm0 # sched: [5:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lddqu:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vlddqu (%rdi), %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <16 x i8> @llvm.x86.sse3.ldu.dq(i8* %a0)
@@ -472,70 +472,70 @@ declare <16 x i8> @llvm.x86.sse3.ldu.dq(i8*) nounwind readonly
define void @test_monitor(i8* %a0, i32 %a1, i32 %a2) {
; GENERIC-LABEL: test_monitor:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: leaq (%rdi), %rax # sched: [1:0.50]
; GENERIC-NEXT: movl %esi, %ecx # sched: [1:0.33]
; GENERIC-NEXT: monitor # sched: [100:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_monitor:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: leaq (%rdi), %rax # sched: [1:1.00]
; ATOM-NEXT: movl %esi, %ecx # sched: [1:0.50]
; ATOM-NEXT: monitor # sched: [45:22.50]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_monitor:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: leaq (%rdi), %rax # sched: [1:1.00]
; SLM-NEXT: movl %esi, %ecx # sched: [1:0.50]
; SLM-NEXT: monitor # sched: [100:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_monitor:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: leaq (%rdi), %rax # sched: [1:0.50]
; SANDY-NEXT: movl %esi, %ecx # sched: [1:0.33]
; SANDY-NEXT: monitor # sched: [100:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_monitor:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: leaq (%rdi), %rax # sched: [1:0.50]
; HASWELL-NEXT: movl %esi, %ecx # sched: [1:0.25]
; HASWELL-NEXT: monitor # sched: [100:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_monitor:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: leaq (%rdi), %rax # sched: [1:0.50]
; BROADWELL-NEXT: movl %esi, %ecx # sched: [1:0.25]
; BROADWELL-NEXT: monitor # sched: [100:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_monitor:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: leaq (%rdi), %rax # sched: [1:0.50]
; SKYLAKE-NEXT: movl %esi, %ecx # sched: [1:0.25]
; SKYLAKE-NEXT: monitor # sched: [100:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_monitor:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: leaq (%rdi), %rax # sched: [1:0.50]
; SKX-NEXT: movl %esi, %ecx # sched: [1:0.25]
; SKX-NEXT: monitor # sched: [100:0.25]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_monitor:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: leaq (%rdi), %rax # sched: [1:0.50]
; BTVER2-NEXT: movl %esi, %ecx # sched: [1:0.17]
; BTVER2-NEXT: monitor # sched: [100:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_monitor:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: leaq (%rdi), %rax # sched: [1:0.25]
; ZNVER1-NEXT: movl %esi, %ecx # sched: [1:0.25]
; ZNVER1-NEXT: monitor # sched: [100:?]
@@ -547,14 +547,14 @@ declare void @llvm.x86.sse3.monitor(i8*, i32, i32)
define <2 x double> @test_movddup(<2 x double> %a0, <2 x double> *%a1) {
; GENERIC-LABEL: test_movddup:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movddup {{.*#+}} xmm1 = xmm0[0,0] sched: [1:1.00]
; GENERIC-NEXT: movddup {{.*#+}} xmm0 = mem[0,0] sched: [6:0.50]
; GENERIC-NEXT: subpd %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_movddup:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movddup {{.*#+}} xmm1 = mem[0,0] sched: [1:1.00]
; ATOM-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0] sched: [1:1.00]
; ATOM-NEXT: subpd %xmm0, %xmm1 # sched: [6:3.00]
@@ -562,56 +562,56 @@ define <2 x double> @test_movddup(<2 x double> %a0, <2 x double> *%a1) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_movddup:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movddup {{.*#+}} xmm1 = xmm0[0,0] sched: [1:1.00]
; SLM-NEXT: movddup {{.*#+}} xmm0 = mem[0,0] sched: [3:1.00]
; SLM-NEXT: subpd %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_movddup:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] sched: [1:1.00]
; SANDY-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] sched: [6:0.50]
; SANDY-NEXT: vsubpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movddup:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] sched: [1:1.00]
; HASWELL-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] sched: [1:0.50]
; HASWELL-NEXT: vsubpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movddup:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] sched: [1:1.00]
; BROADWELL-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] sched: [5:0.50]
; BROADWELL-NEXT: vsubpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movddup:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] sched: [1:1.00]
; SKYLAKE-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] sched: [5:0.50]
; SKYLAKE-NEXT: vsubpd %xmm0, %xmm1, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movddup:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] sched: [1:1.00]
; SKX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] sched: [5:0.50]
; SKX-NEXT: vsubpd %xmm0, %xmm1, %xmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movddup:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] sched: [5:1.00]
; BTVER2-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] sched: [1:0.50]
; BTVER2-NEXT: vsubpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movddup:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] sched: [8:0.50]
; ZNVER1-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] sched: [1:0.50]
; ZNVER1-NEXT: vsubpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
@@ -625,14 +625,14 @@ define <2 x double> @test_movddup(<2 x double> %a0, <2 x double> *%a1) {
define <4 x float> @test_movshdup(<4 x float> %a0, <4 x float> *%a1) {
; GENERIC-LABEL: test_movshdup:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] sched: [1:1.00]
; GENERIC-NEXT: movshdup {{.*#+}} xmm0 = mem[1,1,3,3] sched: [6:0.50]
; GENERIC-NEXT: addps %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_movshdup:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movshdup {{.*#+}} xmm1 = mem[1,1,3,3] sched: [1:1.00]
; ATOM-NEXT: movshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] sched: [1:1.00]
; ATOM-NEXT: addps %xmm0, %xmm1 # sched: [5:5.00]
@@ -640,56 +640,56 @@ define <4 x float> @test_movshdup(<4 x float> %a0, <4 x float> *%a1) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_movshdup:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] sched: [1:1.00]
; SLM-NEXT: movshdup {{.*#+}} xmm0 = mem[1,1,3,3] sched: [3:1.00]
; SLM-NEXT: addps %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_movshdup:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] sched: [1:1.00]
; SANDY-NEXT: vmovshdup {{.*#+}} xmm1 = mem[1,1,3,3] sched: [6:0.50]
; SANDY-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movshdup:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] sched: [1:1.00]
; HASWELL-NEXT: vmovshdup {{.*#+}} xmm1 = mem[1,1,3,3] sched: [1:0.50]
; HASWELL-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movshdup:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] sched: [1:1.00]
; BROADWELL-NEXT: vmovshdup {{.*#+}} xmm1 = mem[1,1,3,3] sched: [5:0.50]
; BROADWELL-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movshdup:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] sched: [1:1.00]
; SKYLAKE-NEXT: vmovshdup {{.*#+}} xmm1 = mem[1,1,3,3] sched: [6:0.50]
; SKYLAKE-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movshdup:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] sched: [1:1.00]
; SKX-NEXT: vmovshdup {{.*#+}} xmm1 = mem[1,1,3,3] sched: [6:0.50]
; SKX-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movshdup:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovshdup {{.*#+}} xmm1 = mem[1,1,3,3] sched: [5:1.00]
; BTVER2-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] sched: [1:0.50]
; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movshdup:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmovshdup {{.*#+}} xmm1 = mem[1,1,3,3] sched: [8:0.50]
; ZNVER1-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] sched: [1:0.50]
; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
@@ -703,14 +703,14 @@ define <4 x float> @test_movshdup(<4 x float> %a0, <4 x float> *%a1) {
define <4 x float> @test_movsldup(<4 x float> %a0, <4 x float> *%a1) {
; GENERIC-LABEL: test_movsldup:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movsldup {{.*#+}} xmm1 = xmm0[0,0,2,2] sched: [1:1.00]
; GENERIC-NEXT: movsldup {{.*#+}} xmm0 = mem[0,0,2,2] sched: [6:0.50]
; GENERIC-NEXT: addps %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_movsldup:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movsldup {{.*#+}} xmm1 = mem[0,0,2,2] sched: [1:1.00]
; ATOM-NEXT: movsldup {{.*#+}} xmm0 = xmm0[0,0,2,2] sched: [1:1.00]
; ATOM-NEXT: addps %xmm0, %xmm1 # sched: [5:5.00]
@@ -718,56 +718,56 @@ define <4 x float> @test_movsldup(<4 x float> %a0, <4 x float> *%a1) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_movsldup:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movsldup {{.*#+}} xmm1 = xmm0[0,0,2,2] sched: [1:1.00]
; SLM-NEXT: movsldup {{.*#+}} xmm0 = mem[0,0,2,2] sched: [3:1.00]
; SLM-NEXT: addps %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_movsldup:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2] sched: [1:1.00]
; SANDY-NEXT: vmovsldup {{.*#+}} xmm1 = mem[0,0,2,2] sched: [6:0.50]
; SANDY-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movsldup:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2] sched: [1:1.00]
; HASWELL-NEXT: vmovsldup {{.*#+}} xmm1 = mem[0,0,2,2] sched: [1:0.50]
; HASWELL-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movsldup:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2] sched: [1:1.00]
; BROADWELL-NEXT: vmovsldup {{.*#+}} xmm1 = mem[0,0,2,2] sched: [5:0.50]
; BROADWELL-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movsldup:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2] sched: [1:1.00]
; SKYLAKE-NEXT: vmovsldup {{.*#+}} xmm1 = mem[0,0,2,2] sched: [6:0.50]
; SKYLAKE-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movsldup:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2] sched: [1:1.00]
; SKX-NEXT: vmovsldup {{.*#+}} xmm1 = mem[0,0,2,2] sched: [6:0.50]
; SKX-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movsldup:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovsldup {{.*#+}} xmm1 = mem[0,0,2,2] sched: [5:1.00]
; BTVER2-NEXT: vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2] sched: [1:0.50]
; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movsldup:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmovsldup {{.*#+}} xmm1 = mem[0,0,2,2] sched: [8:0.50]
; ZNVER1-NEXT: vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2] sched: [1:0.50]
; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
@@ -781,70 +781,70 @@ define <4 x float> @test_movsldup(<4 x float> %a0, <4 x float> *%a1) {
define void @test_mwait(i32 %a0, i32 %a1) {
; GENERIC-LABEL: test_mwait:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movl %edi, %ecx # sched: [1:0.33]
; GENERIC-NEXT: movl %esi, %eax # sched: [1:0.33]
; GENERIC-NEXT: mwait # sched: [100:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_mwait:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movl %edi, %ecx # sched: [1:0.50]
; ATOM-NEXT: movl %esi, %eax # sched: [1:0.50]
; ATOM-NEXT: mwait # sched: [46:23.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_mwait:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movl %edi, %ecx # sched: [1:0.50]
; SLM-NEXT: movl %esi, %eax # sched: [1:0.50]
; SLM-NEXT: mwait # sched: [100:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_mwait:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: movl %edi, %ecx # sched: [1:0.33]
; SANDY-NEXT: movl %esi, %eax # sched: [1:0.33]
; SANDY-NEXT: mwait # sched: [100:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_mwait:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: movl %edi, %ecx # sched: [1:0.25]
; HASWELL-NEXT: movl %esi, %eax # sched: [1:0.25]
; HASWELL-NEXT: mwait # sched: [20:2.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_mwait:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl %edi, %ecx # sched: [1:0.25]
; BROADWELL-NEXT: movl %esi, %eax # sched: [1:0.25]
; BROADWELL-NEXT: mwait # sched: [100:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_mwait:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl %edi, %ecx # sched: [1:0.25]
; SKYLAKE-NEXT: movl %esi, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: mwait # sched: [20:2.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_mwait:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movl %edi, %ecx # sched: [1:0.25]
; SKX-NEXT: movl %esi, %eax # sched: [1:0.25]
; SKX-NEXT: mwait # sched: [20:2.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_mwait:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: movl %edi, %ecx # sched: [1:0.17]
; BTVER2-NEXT: movl %esi, %eax # sched: [1:0.17]
; BTVER2-NEXT: mwait # sched: [100:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_mwait:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl %edi, %ecx # sched: [1:0.25]
; ZNVER1-NEXT: movl %esi, %eax # sched: [1:0.25]
; ZNVER1-NEXT: mwait # sched: [100:?]
diff --git a/test/CodeGen/X86/sse3.ll b/test/CodeGen/X86/sse3.ll
index 3e9b06a57b9..09914e09faa 100644
--- a/test/CodeGen/X86/sse3.ll
+++ b/test/CodeGen/X86/sse3.ll
@@ -9,7 +9,7 @@
define void @t0(<8 x i16>* %dest, <8 x i16>* %old) nounwind {
; X86-LABEL: t0:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl $1, %edx
@@ -19,7 +19,7 @@ define void @t0(<8 x i16>* %dest, <8 x i16>* %old) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: t0:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movl $1, %eax
; X64-NEXT: movd %eax, %xmm0
; X64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
@@ -36,7 +36,7 @@ entry:
define <8 x i16> @t1(<8 x i16>* %A, <8 x i16>* %B) nounwind {
; X86-LABEL: t1:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movaps {{.*#+}} xmm0 = [0,65535,65535,65535,65535,65535,65535,65535]
@@ -47,7 +47,7 @@ define <8 x i16> @t1(<8 x i16>* %A, <8 x i16>* %B) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: t1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps {{.*#+}} xmm0 = [0,65535,65535,65535,65535,65535,65535,65535]
; X64-NEXT: movaps %xmm0, %xmm1
; X64-NEXT: andnps (%rsi), %xmm1
@@ -63,7 +63,7 @@ define <8 x i16> @t1(<8 x i16>* %A, <8 x i16>* %B) nounwind {
define <8 x i16> @t2(<8 x i16> %A, <8 x i16> %B) nounwind {
; X86-LABEL: t2:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movdqa {{.*#+}} xmm2 = [0,65535,65535,0,65535,65535,65535,65535]
; X86-NEXT: pand %xmm2, %xmm0
; X86-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,1,2,1,4,5,6,7]
@@ -72,7 +72,7 @@ define <8 x i16> @t2(<8 x i16> %A, <8 x i16> %B) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: t2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movdqa {{.*#+}} xmm2 = [0,65535,65535,0,65535,65535,65535,65535]
; X64-NEXT: pand %xmm2, %xmm0
; X64-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,1,2,1,4,5,6,7]
@@ -85,7 +85,7 @@ define <8 x i16> @t2(<8 x i16> %A, <8 x i16> %B) nounwind {
define <8 x i16> @t3(<8 x i16> %A, <8 x i16> %B) nounwind {
; X86-LABEL: t3:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,0]
; X86-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,5]
; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,0]
@@ -94,7 +94,7 @@ define <8 x i16> @t3(<8 x i16> %A, <8 x i16> %B) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: t3:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,0]
; X64-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,5]
; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,0]
@@ -107,7 +107,7 @@ define <8 x i16> @t3(<8 x i16> %A, <8 x i16> %B) nounwind {
define <8 x i16> @t4(<8 x i16> %A, <8 x i16> %B) nounwind {
; X86-LABEL: t4:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,0,3]
; X86-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,4,7]
; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,0]
@@ -115,7 +115,7 @@ define <8 x i16> @t4(<8 x i16> %A, <8 x i16> %B) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: t4:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,0,3]
; X64-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,4,7]
; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,0]
@@ -127,13 +127,13 @@ define <8 x i16> @t4(<8 x i16> %A, <8 x i16> %B) nounwind {
define <8 x i16> @t5(<8 x i16> %A, <8 x i16> %B) nounwind {
; X86-LABEL: t5:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; X86-NEXT: movaps %xmm1, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: t5:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; X64-NEXT: movaps %xmm1, %xmm0
; X64-NEXT: retq
@@ -143,12 +143,12 @@ define <8 x i16> @t5(<8 x i16> %A, <8 x i16> %B) nounwind {
define <8 x i16> @t6(<8 x i16> %A, <8 x i16> %B) nounwind {
; X86-LABEL: t6:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; X86-NEXT: retl
;
; X64-LABEL: t6:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; X64-NEXT: retq
%tmp = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> < i32 8, i32 9, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7 >
@@ -157,13 +157,13 @@ define <8 x i16> @t6(<8 x i16> %A, <8 x i16> %B) nounwind {
define <8 x i16> @t7(<8 x i16> %A, <8 x i16> %B) nounwind {
; X86-LABEL: t7:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,3,2,4,5,6,7]
; X86-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,4,7]
; X86-NEXT: retl
;
; X64-LABEL: t7:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,3,2,4,5,6,7]
; X64-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,4,7]
; X64-NEXT: retq
@@ -173,7 +173,7 @@ define <8 x i16> @t7(<8 x i16> %A, <8 x i16> %B) nounwind {
define void @t8(<2 x i64>* %res, <2 x i64>* %A) nounwind {
; X86-LABEL: t8:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: pshuflw {{.*#+}} xmm0 = mem[2,1,0,3,4,5,6,7]
@@ -182,7 +182,7 @@ define void @t8(<2 x i64>* %res, <2 x i64>* %A) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: t8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pshuflw {{.*#+}} xmm0 = mem[2,1,0,3,4,5,6,7]
; X64-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,4,7]
; X64-NEXT: movdqa %xmm0, (%rdi)
@@ -212,7 +212,7 @@ define void @t8(<2 x i64>* %res, <2 x i64>* %A) nounwind {
define void @t9(<4 x float>* %r, <2 x i32>* %A) nounwind {
; X86-LABEL: t9:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movapd (%ecx), %xmm0
@@ -221,7 +221,7 @@ define void @t9(<4 x float>* %r, <2 x i32>* %A) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: t9:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movapd (%rdi), %xmm0
; X64-NEXT: movhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; X64-NEXT: movapd %xmm0, (%rdi)
@@ -254,7 +254,7 @@ define void @t9(<4 x float>* %r, <2 x i32>* %A) nounwind {
define void @t10() nounwind {
; X86-LABEL: t10:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pshuflw {{.*#+}} xmm0 = mem[0,2,2,3,4,5,6,7]
; X86-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -262,7 +262,7 @@ define void @t10() nounwind {
; X86-NEXT: retl
;
; X64-LABEL: t10:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pshuflw {{.*#+}} xmm0 = mem[0,2,2,3,4,5,6,7]
; X64-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -281,13 +281,13 @@ define void @t10() nounwind {
; Pack various elements via shuffles.
define <8 x i16> @t11(<8 x i16> %T0, <8 x i16> %T1) nounwind readnone {
; X86-LABEL: t11:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: psrld $16, %xmm0
; X86-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; X86-NEXT: retl
;
; X64-LABEL: t11:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: psrld $16, %xmm0
; X64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; X64-NEXT: retq
@@ -299,14 +299,14 @@ entry:
define <8 x i16> @t12(<8 x i16> %T0, <8 x i16> %T1) nounwind readnone {
; X86-LABEL: t12:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; X86-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,3,3]
; X86-NEXT: retl
;
; X64-LABEL: t12:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; X64-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,3,3]
@@ -319,14 +319,14 @@ entry:
define <8 x i16> @t13(<8 x i16> %T0, <8 x i16> %T1) nounwind readnone {
; X86-LABEL: t13:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; X86-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,2,2,3,4,5,6,7]
; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,3,3]
; X86-NEXT: retl
;
; X64-LABEL: t13:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; X64-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,2,2,3,4,5,6,7]
; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,3,3]
@@ -338,14 +338,14 @@ entry:
define <8 x i16> @t14(<8 x i16> %T0, <8 x i16> %T1) nounwind readnone {
; X86-LABEL: t14:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: psrlq $16, %xmm0
; X86-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; X86-NEXT: movdqa %xmm1, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: t14:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: psrlq $16, %xmm0
; X64-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; X64-NEXT: movdqa %xmm1, %xmm0
@@ -358,14 +358,14 @@ entry:
; FIXME: t15 is worse off from disabling of scheduler 2-address hack.
define <8 x i16> @t15(<8 x i16> %T0, <8 x i16> %T1) nounwind readnone {
; X86-LABEL: t15:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
; X86-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,2,4,5,6,7]
; X86-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; X86-NEXT: retl
;
; X64-LABEL: t15:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
; X64-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,2,4,5,6,7]
; X64-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
@@ -378,14 +378,14 @@ entry:
; Test yonah where we convert a shuffle to pextrw and pinrsw
define <16 x i8> @t16(<16 x i8> %T0) nounwind readnone {
; X86-LABEL: t16:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movdqa {{.*#+}} xmm1 = [0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0]
; X86-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; X86-NEXT: movdqa %xmm1, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: t16:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movdqa {{.*#+}} xmm1 = [0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0]
; X64-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; X64-NEXT: movdqa %xmm1, %xmm0
@@ -399,7 +399,7 @@ entry:
; rdar://8520311
define <4 x i32> @t17() nounwind {
; X86-LABEL: t17:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movaps (%eax), %xmm0
; X86-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0,0,1,1]
; X86-NEXT: xorps %xmm1, %xmm1
@@ -407,7 +407,7 @@ define <4 x i32> @t17() nounwind {
; X86-NEXT: retl
;
; X64-LABEL: t17:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movaps (%rax), %xmm0
; X64-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0,0,1,1]
; X64-NEXT: xorps %xmm1, %xmm1
diff --git a/test/CodeGen/X86/sse41-intrinsics-fast-isel.ll b/test/CodeGen/X86/sse41-intrinsics-fast-isel.ll
index b35c9766c16..fcb6bbbdd11 100644
--- a/test/CodeGen/X86/sse41-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/sse41-intrinsics-fast-isel.ll
@@ -6,12 +6,12 @@
define <2 x i64> @test_mm_blend_epi16(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_blend_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6,7]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_blend_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6,7]
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -23,12 +23,12 @@ define <2 x i64> @test_mm_blend_epi16(<2 x i64> %a0, <2 x i64> %a1) {
define <2 x double> @test_mm_blend_pd(<2 x double> %a0, <2 x double> %a1) {
; X32-LABEL: test_mm_blend_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: blendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_blend_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: blendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; X64-NEXT: retq
%res = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 0, i32 3>
@@ -37,12 +37,12 @@ define <2 x double> @test_mm_blend_pd(<2 x double> %a0, <2 x double> %a1) {
define <4 x float> @test_mm_blend_ps(<4 x float> %a0, <4 x float> %a1) {
; X32-LABEL: test_mm_blend_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_blend_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3]
; X64-NEXT: retq
%res = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
@@ -51,7 +51,7 @@ define <4 x float> @test_mm_blend_ps(<4 x float> %a0, <4 x float> %a1) {
define <2 x i64> @test_mm_blendv_epi8(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2) {
; X32-LABEL: test_mm_blendv_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movdqa %xmm0, %xmm3
; X32-NEXT: movaps %xmm2, %xmm0
; X32-NEXT: pblendvb %xmm0, %xmm1, %xmm3
@@ -59,7 +59,7 @@ define <2 x i64> @test_mm_blendv_epi8(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a
; X32-NEXT: retl
;
; X64-LABEL: test_mm_blendv_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movdqa %xmm0, %xmm3
; X64-NEXT: movaps %xmm2, %xmm0
; X64-NEXT: pblendvb %xmm0, %xmm1, %xmm3
@@ -76,7 +76,7 @@ declare <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8>, <16 x i8>, <16 x i8>) noun
define <2 x double> @test_mm_blendv_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) {
; X32-LABEL: test_mm_blendv_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movapd %xmm0, %xmm3
; X32-NEXT: movaps %xmm2, %xmm0
; X32-NEXT: blendvpd %xmm0, %xmm1, %xmm3
@@ -84,7 +84,7 @@ define <2 x double> @test_mm_blendv_pd(<2 x double> %a0, <2 x double> %a1, <2 x
; X32-NEXT: retl
;
; X64-LABEL: test_mm_blendv_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movapd %xmm0, %xmm3
; X64-NEXT: movaps %xmm2, %xmm0
; X64-NEXT: blendvpd %xmm0, %xmm1, %xmm3
@@ -97,7 +97,7 @@ declare <2 x double> @llvm.x86.sse41.blendvpd(<2 x double>, <2 x double>, <2 x d
define <4 x float> @test_mm_blendv_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) {
; X32-LABEL: test_mm_blendv_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movaps %xmm0, %xmm3
; X32-NEXT: movaps %xmm2, %xmm0
; X32-NEXT: blendvps %xmm0, %xmm1, %xmm3
@@ -105,7 +105,7 @@ define <4 x float> @test_mm_blendv_ps(<4 x float> %a0, <4 x float> %a1, <4 x flo
; X32-NEXT: retl
;
; X64-LABEL: test_mm_blendv_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps %xmm0, %xmm3
; X64-NEXT: movaps %xmm2, %xmm0
; X64-NEXT: blendvps %xmm0, %xmm1, %xmm3
@@ -118,12 +118,12 @@ declare <4 x float> @llvm.x86.sse41.blendvps(<4 x float>, <4 x float>, <4 x floa
define <2 x double> @test_mm_ceil_pd(<2 x double> %a0) {
; X32-LABEL: test_mm_ceil_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: roundpd $2, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_ceil_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: roundpd $2, %xmm0, %xmm0
; X64-NEXT: retq
%res = call <2 x double> @llvm.x86.sse41.round.pd(<2 x double> %a0, i32 2)
@@ -133,12 +133,12 @@ declare <2 x double> @llvm.x86.sse41.round.pd(<2 x double>, i32) nounwind readno
define <4 x float> @test_mm_ceil_ps(<4 x float> %a0) {
; X32-LABEL: test_mm_ceil_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: roundps $2, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_ceil_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: roundps $2, %xmm0, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> %a0, i32 2)
@@ -148,12 +148,12 @@ declare <4 x float> @llvm.x86.sse41.round.ps(<4 x float>, i32) nounwind readnone
define <2 x double> @test_mm_ceil_sd(<2 x double> %a0, <2 x double> %a1) {
; X32-LABEL: test_mm_ceil_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: roundsd $2, %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_ceil_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: roundsd $2, %xmm1, %xmm0
; X64-NEXT: retq
%res = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %a0, <2 x double> %a1, i32 2)
@@ -163,12 +163,12 @@ declare <2 x double> @llvm.x86.sse41.round.sd(<2 x double>, <2 x double>, i32) n
define <4 x float> @test_mm_ceil_ss(<4 x float> %a0, <4 x float> %a1) {
; X32-LABEL: test_mm_ceil_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: roundss $2, %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_ceil_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: roundss $2, %xmm1, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %a0, <4 x float> %a1, i32 2)
@@ -178,12 +178,12 @@ declare <4 x float> @llvm.x86.sse41.round.ss(<4 x float>, <4 x float>, i32) noun
define <2 x i64> @test_mm_cmpeq_epi64(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_cmpeq_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pcmpeqq %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpeq_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pcmpeqq %xmm1, %xmm0
; X64-NEXT: retq
%cmp = icmp eq <2 x i64> %a0, %a1
@@ -193,12 +193,12 @@ define <2 x i64> @test_mm_cmpeq_epi64(<2 x i64> %a0, <2 x i64> %a1) {
define <2 x i64> @test_mm_cvtepi8_epi16(<2 x i64> %a0) {
; X32-LABEL: test_mm_cvtepi8_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pmovsxbw %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cvtepi8_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmovsxbw %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -210,12 +210,12 @@ define <2 x i64> @test_mm_cvtepi8_epi16(<2 x i64> %a0) {
define <2 x i64> @test_mm_cvtepi8_epi32(<2 x i64> %a0) {
; X32-LABEL: test_mm_cvtepi8_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pmovsxbd %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cvtepi8_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmovsxbd %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -227,12 +227,12 @@ define <2 x i64> @test_mm_cvtepi8_epi32(<2 x i64> %a0) {
define <2 x i64> @test_mm_cvtepi8_epi64(<2 x i64> %a0) {
; X32-LABEL: test_mm_cvtepi8_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pmovsxbq %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cvtepi8_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmovsxbq %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -243,12 +243,12 @@ define <2 x i64> @test_mm_cvtepi8_epi64(<2 x i64> %a0) {
define <2 x i64> @test_mm_cvtepi16_epi32(<2 x i64> %a0) {
; X32-LABEL: test_mm_cvtepi16_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pmovsxwd %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cvtepi16_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmovsxwd %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -260,12 +260,12 @@ define <2 x i64> @test_mm_cvtepi16_epi32(<2 x i64> %a0) {
define <2 x i64> @test_mm_cvtepi16_epi64(<2 x i64> %a0) {
; X32-LABEL: test_mm_cvtepi16_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pmovsxwq %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cvtepi16_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmovsxwq %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -276,12 +276,12 @@ define <2 x i64> @test_mm_cvtepi16_epi64(<2 x i64> %a0) {
define <2 x i64> @test_mm_cvtepi32_epi64(<2 x i64> %a0) {
; X32-LABEL: test_mm_cvtepi32_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pmovsxdq %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cvtepi32_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmovsxdq %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -292,12 +292,12 @@ define <2 x i64> @test_mm_cvtepi32_epi64(<2 x i64> %a0) {
define <2 x i64> @test_mm_cvtepu8_epi16(<2 x i64> %a0) {
; X32-LABEL: test_mm_cvtepu8_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cvtepu8_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -309,12 +309,12 @@ define <2 x i64> @test_mm_cvtepu8_epi16(<2 x i64> %a0) {
define <2 x i64> @test_mm_cvtepu8_epi32(<2 x i64> %a0) {
; X32-LABEL: test_mm_cvtepu8_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cvtepu8_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -326,12 +326,12 @@ define <2 x i64> @test_mm_cvtepu8_epi32(<2 x i64> %a0) {
define <2 x i64> @test_mm_cvtepu8_epi64(<2 x i64> %a0) {
; X32-LABEL: test_mm_cvtepu8_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cvtepu8_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -342,12 +342,12 @@ define <2 x i64> @test_mm_cvtepu8_epi64(<2 x i64> %a0) {
define <2 x i64> @test_mm_cvtepu16_epi32(<2 x i64> %a0) {
; X32-LABEL: test_mm_cvtepu16_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cvtepu16_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -359,12 +359,12 @@ define <2 x i64> @test_mm_cvtepu16_epi32(<2 x i64> %a0) {
define <2 x i64> @test_mm_cvtepu16_epi64(<2 x i64> %a0) {
; X32-LABEL: test_mm_cvtepu16_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cvtepu16_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -375,12 +375,12 @@ define <2 x i64> @test_mm_cvtepu16_epi64(<2 x i64> %a0) {
define <2 x i64> @test_mm_cvtepu32_epi64(<2 x i64> %a0) {
; X32-LABEL: test_mm_cvtepu32_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cvtepu32_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -391,12 +391,12 @@ define <2 x i64> @test_mm_cvtepu32_epi64(<2 x i64> %a0) {
define <2 x double> @test_mm_dp_pd(<2 x double> %a0, <2 x double> %a1) {
; X32-LABEL: test_mm_dp_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: dppd $7, %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_dp_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: dppd $7, %xmm1, %xmm0
; X64-NEXT: retq
%res = call <2 x double> @llvm.x86.sse41.dppd(<2 x double> %a0, <2 x double> %a1, i8 7)
@@ -406,12 +406,12 @@ declare <2 x double> @llvm.x86.sse41.dppd(<2 x double>, <2 x double>, i8) nounwi
define <4 x float> @test_mm_dp_ps(<4 x float> %a0, <4 x float> %a1) {
; X32-LABEL: test_mm_dp_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: dpps $7, %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_dp_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: dpps $7, %xmm1, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse41.dpps(<4 x float> %a0, <4 x float> %a1, i8 7)
@@ -421,13 +421,13 @@ declare <4 x float> @llvm.x86.sse41.dpps(<4 x float>, <4 x float>, i8) nounwind
define i32 @test_mm_extract_epi8(<2 x i64> %a0) {
; X32-LABEL: test_mm_extract_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pextrb $1, %xmm0, %eax
; X32-NEXT: movzbl %al, %eax
; X32-NEXT: retl
;
; X64-LABEL: test_mm_extract_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pextrb $1, %xmm0, %eax
; X64-NEXT: movzbl %al, %eax
; X64-NEXT: retq
@@ -439,12 +439,12 @@ define i32 @test_mm_extract_epi8(<2 x i64> %a0) {
define i32 @test_mm_extract_epi32(<2 x i64> %a0) {
; X32-LABEL: test_mm_extract_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: extractps $1, %xmm0, %eax
; X32-NEXT: retl
;
; X64-LABEL: test_mm_extract_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: extractps $1, %xmm0, %eax
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -454,13 +454,13 @@ define i32 @test_mm_extract_epi32(<2 x i64> %a0) {
define i64 @test_mm_extract_epi64(<2 x i64> %a0) {
; X32-LABEL: test_mm_extract_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: extractps $2, %xmm0, %eax
; X32-NEXT: extractps $3, %xmm0, %edx
; X32-NEXT: retl
;
; X64-LABEL: test_mm_extract_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pextrq $1, %xmm0, %rax
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -470,13 +470,13 @@ define i64 @test_mm_extract_epi64(<2 x i64> %a0) {
define i32 @test_mm_extract_ps(<4 x float> %a0) {
; X32-LABEL: test_mm_extract_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
; X32-NEXT: movd %xmm0, %eax
; X32-NEXT: retl
;
; X64-LABEL: test_mm_extract_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
; X64-NEXT: movd %xmm0, %eax
; X64-NEXT: retq
@@ -487,12 +487,12 @@ define i32 @test_mm_extract_ps(<4 x float> %a0) {
define <2 x double> @test_mm_floor_pd(<2 x double> %a0) {
; X32-LABEL: test_mm_floor_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: roundpd $1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_floor_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: roundpd $1, %xmm0, %xmm0
; X64-NEXT: retq
%res = call <2 x double> @llvm.x86.sse41.round.pd(<2 x double> %a0, i32 1)
@@ -501,12 +501,12 @@ define <2 x double> @test_mm_floor_pd(<2 x double> %a0) {
define <4 x float> @test_mm_floor_ps(<4 x float> %a0) {
; X32-LABEL: test_mm_floor_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: roundps $1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_floor_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: roundps $1, %xmm0, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> %a0, i32 1)
@@ -515,12 +515,12 @@ define <4 x float> @test_mm_floor_ps(<4 x float> %a0) {
define <2 x double> @test_mm_floor_sd(<2 x double> %a0, <2 x double> %a1) {
; X32-LABEL: test_mm_floor_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: roundsd $1, %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_floor_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: roundsd $1, %xmm1, %xmm0
; X64-NEXT: retq
%res = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %a0, <2 x double> %a1, i32 1)
@@ -529,12 +529,12 @@ define <2 x double> @test_mm_floor_sd(<2 x double> %a0, <2 x double> %a1) {
define <4 x float> @test_mm_floor_ss(<4 x float> %a0, <4 x float> %a1) {
; X32-LABEL: test_mm_floor_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: roundss $1, %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_floor_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: roundss $1, %xmm1, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %a0, <4 x float> %a1, i32 1)
@@ -543,13 +543,13 @@ define <4 x float> @test_mm_floor_ss(<4 x float> %a0, <4 x float> %a1) {
define <2 x i64> @test_mm_insert_epi8(<2 x i64> %a0, i8 %a1) {
; X32-LABEL: test_mm_insert_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: pinsrb $1, %eax, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_insert_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: pinsrb $1, %eax, %xmm0
; X64-NEXT: retq
@@ -561,12 +561,12 @@ define <2 x i64> @test_mm_insert_epi8(<2 x i64> %a0, i8 %a1) {
define <2 x i64> @test_mm_insert_epi32(<2 x i64> %a0, i32 %a1) {
; X32-LABEL: test_mm_insert_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pinsrd $1, {{[0-9]+}}(%esp), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_insert_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pinsrd $1, %edi, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -577,13 +577,13 @@ define <2 x i64> @test_mm_insert_epi32(<2 x i64> %a0, i32 %a1) {
define <2 x i64> @test_mm_insert_epi64(<2 x i64> %a0, i64 %a1) {
; X32-LABEL: test_mm_insert_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pinsrd $2, {{[0-9]+}}(%esp), %xmm0
; X32-NEXT: pinsrd $3, {{[0-9]+}}(%esp), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_insert_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pinsrq $1, %rdi, %xmm0
; X64-NEXT: retq
%res = insertelement <2 x i64> %a0, i64 %a1,i32 1
@@ -592,12 +592,12 @@ define <2 x i64> @test_mm_insert_epi64(<2 x i64> %a0, i64 %a1) {
define <4 x float> @test_mm_insert_ps(<4 x float> %a0, <4 x float> %a1) {
; X32-LABEL: test_mm_insert_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: insertps {{.*#+}} xmm0 = xmm1[0],xmm0[1],zero,xmm0[3]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_insert_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm1[0],xmm0[1],zero,xmm0[3]
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a0, <4 x float> %a1, i8 4)
@@ -607,12 +607,12 @@ declare <4 x float> @llvm.x86.sse41.insertps(<4 x float>, <4 x float>, i8) nounw
define <2 x i64> @test_mm_max_epi8(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_max_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pmaxsb %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_max_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmaxsb %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -625,12 +625,12 @@ define <2 x i64> @test_mm_max_epi8(<2 x i64> %a0, <2 x i64> %a1) {
define <2 x i64> @test_mm_max_epi32(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_max_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pmaxsd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_max_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmaxsd %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -643,12 +643,12 @@ define <2 x i64> @test_mm_max_epi32(<2 x i64> %a0, <2 x i64> %a1) {
define <2 x i64> @test_mm_max_epu16(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_max_epu16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pmaxuw %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_max_epu16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmaxuw %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -661,12 +661,12 @@ define <2 x i64> @test_mm_max_epu16(<2 x i64> %a0, <2 x i64> %a1) {
define <2 x i64> @test_mm_max_epu32(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_max_epu32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pmaxud %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_max_epu32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmaxud %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -679,12 +679,12 @@ define <2 x i64> @test_mm_max_epu32(<2 x i64> %a0, <2 x i64> %a1) {
define <2 x i64> @test_mm_min_epi8(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_min_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pminsb %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_min_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pminsb %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -697,12 +697,12 @@ define <2 x i64> @test_mm_min_epi8(<2 x i64> %a0, <2 x i64> %a1) {
define <2 x i64> @test_mm_min_epi32(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_min_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pminsd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_min_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pminsd %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -715,12 +715,12 @@ define <2 x i64> @test_mm_min_epi32(<2 x i64> %a0, <2 x i64> %a1) {
define <2 x i64> @test_mm_min_epu16(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_min_epu16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pminuw %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_min_epu16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pminuw %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -733,12 +733,12 @@ define <2 x i64> @test_mm_min_epu16(<2 x i64> %a0, <2 x i64> %a1) {
define <2 x i64> @test_mm_min_epu32(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_min_epu32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pminud %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_min_epu32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pminud %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -751,12 +751,12 @@ define <2 x i64> @test_mm_min_epu32(<2 x i64> %a0, <2 x i64> %a1) {
define <2 x i64> @test_mm_minpos_epu16(<2 x i64> %a0) {
; X32-LABEL: test_mm_minpos_epu16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: phminposuw %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_minpos_epu16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: phminposuw %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -768,12 +768,12 @@ declare <8 x i16> @llvm.x86.sse41.phminposuw(<8 x i16>) nounwind readnone
define <2 x i64> @test_mm_mpsadbw_epu8(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_mpsadbw_epu8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: mpsadbw $1, %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mpsadbw_epu8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: mpsadbw $1, %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -786,12 +786,12 @@ declare <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8>, <16 x i8>, i8) nounwind rea
define <2 x i64> @test_mm_mul_epi32(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_mul_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pmuldq %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mul_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmuldq %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -803,12 +803,12 @@ declare <2 x i64> @llvm.x86.sse41.pmuldq(<4 x i32>, <4 x i32>) nounwind readnone
define <2 x i64> @test_mm_mullo_epi32(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_mullo_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pmulld %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mullo_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmulld %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -820,12 +820,12 @@ define <2 x i64> @test_mm_mullo_epi32(<2 x i64> %a0, <2 x i64> %a1) {
define <2 x i64> @test_mm_packus_epi32(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_packus_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: packusdw %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_packus_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: packusdw %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -838,12 +838,12 @@ declare <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32>, <4 x i32>) nounwind readno
define <2 x double> @test_mm_round_pd(<2 x double> %a0) {
; X32-LABEL: test_mm_round_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: roundpd $4, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_round_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: roundpd $4, %xmm0, %xmm0
; X64-NEXT: retq
%res = call <2 x double> @llvm.x86.sse41.round.pd(<2 x double> %a0, i32 4)
@@ -852,12 +852,12 @@ define <2 x double> @test_mm_round_pd(<2 x double> %a0) {
define <4 x float> @test_mm_round_ps(<4 x float> %a0) {
; X32-LABEL: test_mm_round_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: roundps $4, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_round_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: roundps $4, %xmm0, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> %a0, i32 4)
@@ -866,12 +866,12 @@ define <4 x float> @test_mm_round_ps(<4 x float> %a0) {
define <2 x double> @test_mm_round_sd(<2 x double> %a0, <2 x double> %a1) {
; X32-LABEL: test_mm_round_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: roundsd $4, %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_round_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: roundsd $4, %xmm1, %xmm0
; X64-NEXT: retq
%res = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %a0, <2 x double> %a1, i32 4)
@@ -880,12 +880,12 @@ define <2 x double> @test_mm_round_sd(<2 x double> %a0, <2 x double> %a1) {
define <4 x float> @test_mm_round_ss(<4 x float> %a0, <4 x float> %a1) {
; X32-LABEL: test_mm_round_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: roundss $4, %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_round_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: roundss $4, %xmm1, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %a0, <4 x float> %a1, i32 4)
@@ -894,13 +894,13 @@ define <4 x float> @test_mm_round_ss(<4 x float> %a0, <4 x float> %a1) {
define <2 x i64> @test_mm_stream_load_si128(<2 x i64>* %a0) {
; X32-LABEL: test_mm_stream_load_si128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movntdqa (%eax), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_stream_load_si128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movntdqa (%rdi), %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64>* %a0 to i8*
@@ -911,7 +911,7 @@ declare <2 x i64> @llvm.x86.sse41.movntdqa(i8*) nounwind readnone
define i32 @test_mm_test_all_ones(<2 x i64> %a0) {
; X32-LABEL: test_mm_test_all_ones:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pcmpeqd %xmm1, %xmm1
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: ptest %xmm1, %xmm0
@@ -919,7 +919,7 @@ define i32 @test_mm_test_all_ones(<2 x i64> %a0) {
; X32-NEXT: retl
;
; X64-LABEL: test_mm_test_all_ones:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pcmpeqd %xmm1, %xmm1
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: ptest %xmm1, %xmm0
@@ -932,14 +932,14 @@ declare i32 @llvm.x86.sse41.ptestc(<2 x i64>, <2 x i64>) nounwind readnone
define i32 @test_mm_test_all_zeros(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_test_all_zeros:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: ptest %xmm1, %xmm0
; X32-NEXT: sete %al
; X32-NEXT: retl
;
; X64-LABEL: test_mm_test_all_zeros:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: ptest %xmm1, %xmm0
; X64-NEXT: sete %al
@@ -951,14 +951,14 @@ declare i32 @llvm.x86.sse41.ptestz(<2 x i64>, <2 x i64>) nounwind readnone
define i32 @test_mm_test_mix_ones_zeros(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_test_mix_ones_zeros:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: ptest %xmm1, %xmm0
; X32-NEXT: seta %al
; X32-NEXT: retl
;
; X64-LABEL: test_mm_test_mix_ones_zeros:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: ptest %xmm1, %xmm0
; X64-NEXT: seta %al
@@ -970,14 +970,14 @@ declare i32 @llvm.x86.sse41.ptestnzc(<2 x i64>, <2 x i64>) nounwind readnone
define i32 @test_mm_testc_si128(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_testc_si128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: ptest %xmm1, %xmm0
; X32-NEXT: setb %al
; X32-NEXT: retl
;
; X64-LABEL: test_mm_testc_si128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: ptest %xmm1, %xmm0
; X64-NEXT: setb %al
@@ -988,14 +988,14 @@ define i32 @test_mm_testc_si128(<2 x i64> %a0, <2 x i64> %a1) {
define i32 @test_mm_testnzc_si128(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_testnzc_si128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: ptest %xmm1, %xmm0
; X32-NEXT: seta %al
; X32-NEXT: retl
;
; X64-LABEL: test_mm_testnzc_si128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: ptest %xmm1, %xmm0
; X64-NEXT: seta %al
@@ -1006,14 +1006,14 @@ define i32 @test_mm_testnzc_si128(<2 x i64> %a0, <2 x i64> %a1) {
define i32 @test_mm_testz_si128(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_testz_si128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: ptest %xmm1, %xmm0
; X32-NEXT: sete %al
; X32-NEXT: retl
;
; X64-LABEL: test_mm_testz_si128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: ptest %xmm1, %xmm0
; X64-NEXT: sete %al
diff --git a/test/CodeGen/X86/sse41-intrinsics-x86-upgrade.ll b/test/CodeGen/X86/sse41-intrinsics-x86-upgrade.ll
index 9bda90a2302..d942d4776c1 100644
--- a/test/CodeGen/X86/sse41-intrinsics-x86-upgrade.ll
+++ b/test/CodeGen/X86/sse41-intrinsics-x86-upgrade.ll
@@ -6,7 +6,7 @@
define <2 x double> @test_x86_sse41_blendpd(<2 x double> %a0, <2 x double> %a1) {
; CHECK-LABEL: test_x86_sse41_blendpd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: blendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; CHECK-NEXT: retl
%res = call <2 x double> @llvm.x86.sse41.blendpd(<2 x double> %a0, <2 x double> %a1, i32 6) ; <<2 x double>> [#uses=1]
@@ -17,7 +17,7 @@ declare <2 x double> @llvm.x86.sse41.blendpd(<2 x double>, <2 x double>, i32) no
define <4 x float> @test_x86_sse41_blendps(<4 x float> %a0, <4 x float> %a1) {
; CHECK-LABEL: test_x86_sse41_blendps:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: blendps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
; CHECK-NEXT: retl
%res = call <4 x float> @llvm.x86.sse41.blendps(<4 x float> %a0, <4 x float> %a1, i32 7) ; <<4 x float>> [#uses=1]
@@ -28,7 +28,7 @@ declare <4 x float> @llvm.x86.sse41.blendps(<4 x float>, <4 x float>, i32) nounw
define <2 x double> @test_x86_sse41_dppd(<2 x double> %a0, <2 x double> %a1) {
; CHECK-LABEL: test_x86_sse41_dppd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: dppd $7, %xmm1, %xmm0
; CHECK-NEXT: retl
%res = call <2 x double> @llvm.x86.sse41.dppd(<2 x double> %a0, <2 x double> %a1, i32 7) ; <<2 x double>> [#uses=1]
@@ -39,7 +39,7 @@ declare <2 x double> @llvm.x86.sse41.dppd(<2 x double>, <2 x double>, i32) nounw
define <4 x float> @test_x86_sse41_dpps(<4 x float> %a0, <4 x float> %a1) {
; CHECK-LABEL: test_x86_sse41_dpps:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: dpps $7, %xmm1, %xmm0
; CHECK-NEXT: retl
%res = call <4 x float> @llvm.x86.sse41.dpps(<4 x float> %a0, <4 x float> %a1, i32 7) ; <<4 x float>> [#uses=1]
@@ -50,7 +50,7 @@ declare <4 x float> @llvm.x86.sse41.dpps(<4 x float>, <4 x float>, i32) nounwind
define <4 x float> @test_x86_sse41_insertps(<4 x float> %a0, <4 x float> %a1) {
; CHECK-LABEL: test_x86_sse41_insertps:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: insertps {{.*#+}} xmm0 = zero,xmm1[0],xmm0[2,3]
; CHECK-NEXT: retl
%res = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a0, <4 x float> %a1, i32 17) ; <<4 x float>> [#uses=1]
@@ -61,7 +61,7 @@ declare <4 x float> @llvm.x86.sse41.insertps(<4 x float>, <4 x float>, i32) noun
define <2 x i64> @test_x86_sse41_movntdqa(<2 x i64>* %a0) {
; CHECK-LABEL: test_x86_sse41_movntdqa:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movntdqa (%eax), %xmm0
; CHECK-NEXT: retl
@@ -74,7 +74,7 @@ declare <2 x i64> @llvm.x86.sse41.movntdqa(i8*) nounwind readnone
define <8 x i16> @test_x86_sse41_mpsadbw(<16 x i8> %a0, <16 x i8> %a1) {
; CHECK-LABEL: test_x86_sse41_mpsadbw:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: mpsadbw $7, %xmm1, %xmm0
; CHECK-NEXT: retl
%res = call <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8> %a0, <16 x i8> %a1, i32 7) ; <<8 x i16>> [#uses=1]
@@ -85,7 +85,7 @@ declare <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8>, <16 x i8>, i32) nounwind re
define <8 x i16> @test_x86_sse41_pblendw(<8 x i16> %a0, <8 x i16> %a1) {
; CHECK-LABEL: test_x86_sse41_pblendw:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3,4,5,6,7]
; CHECK-NEXT: retl
%res = call <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16> %a0, <8 x i16> %a1, i32 7) ; <<8 x i16>> [#uses=1]
@@ -96,7 +96,7 @@ declare <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16>, <8 x i16>, i32) nounwind re
define <4 x i32> @test_x86_sse41_pmovsxbd(<16 x i8> %a0) {
; CHECK-LABEL: test_x86_sse41_pmovsxbd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pmovsxbd %xmm0, %xmm0
; CHECK-NEXT: retl
%res = call <4 x i32> @llvm.x86.sse41.pmovsxbd(<16 x i8> %a0) ; <<4 x i32>> [#uses=1]
@@ -107,7 +107,7 @@ declare <4 x i32> @llvm.x86.sse41.pmovsxbd(<16 x i8>) nounwind readnone
define <2 x i64> @test_x86_sse41_pmovsxbq(<16 x i8> %a0) {
; CHECK-LABEL: test_x86_sse41_pmovsxbq:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pmovsxbq %xmm0, %xmm0
; CHECK-NEXT: retl
%res = call <2 x i64> @llvm.x86.sse41.pmovsxbq(<16 x i8> %a0) ; <<2 x i64>> [#uses=1]
@@ -118,7 +118,7 @@ declare <2 x i64> @llvm.x86.sse41.pmovsxbq(<16 x i8>) nounwind readnone
define <8 x i16> @test_x86_sse41_pmovsxbw(<16 x i8> %a0) {
; CHECK-LABEL: test_x86_sse41_pmovsxbw:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pmovsxbw %xmm0, %xmm0
; CHECK-NEXT: retl
%res = call <8 x i16> @llvm.x86.sse41.pmovsxbw(<16 x i8> %a0) ; <<8 x i16>> [#uses=1]
@@ -129,7 +129,7 @@ declare <8 x i16> @llvm.x86.sse41.pmovsxbw(<16 x i8>) nounwind readnone
define <2 x i64> @test_x86_sse41_pmovsxdq(<4 x i32> %a0) {
; CHECK-LABEL: test_x86_sse41_pmovsxdq:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pmovsxdq %xmm0, %xmm0
; CHECK-NEXT: retl
%res = call <2 x i64> @llvm.x86.sse41.pmovsxdq(<4 x i32> %a0) ; <<2 x i64>> [#uses=1]
@@ -140,7 +140,7 @@ declare <2 x i64> @llvm.x86.sse41.pmovsxdq(<4 x i32>) nounwind readnone
define <4 x i32> @test_x86_sse41_pmovsxwd(<8 x i16> %a0) {
; CHECK-LABEL: test_x86_sse41_pmovsxwd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pmovsxwd %xmm0, %xmm0
; CHECK-NEXT: retl
%res = call <4 x i32> @llvm.x86.sse41.pmovsxwd(<8 x i16> %a0) ; <<4 x i32>> [#uses=1]
@@ -151,7 +151,7 @@ declare <4 x i32> @llvm.x86.sse41.pmovsxwd(<8 x i16>) nounwind readnone
define <2 x i64> @test_x86_sse41_pmovsxwq(<8 x i16> %a0) {
; CHECK-LABEL: test_x86_sse41_pmovsxwq:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pmovsxwq %xmm0, %xmm0
; CHECK-NEXT: retl
%res = call <2 x i64> @llvm.x86.sse41.pmovsxwq(<8 x i16> %a0) ; <<2 x i64>> [#uses=1]
@@ -162,7 +162,7 @@ declare <2 x i64> @llvm.x86.sse41.pmovsxwq(<8 x i16>) nounwind readnone
define <4 x i32> @test_x86_sse41_pmovzxbd(<16 x i8> %a0) {
; CHECK-LABEL: test_x86_sse41_pmovzxbd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; CHECK-NEXT: retl
%res = call <4 x i32> @llvm.x86.sse41.pmovzxbd(<16 x i8> %a0) ; <<4 x i32>> [#uses=1]
@@ -173,7 +173,7 @@ declare <4 x i32> @llvm.x86.sse41.pmovzxbd(<16 x i8>) nounwind readnone
define <2 x i64> @test_x86_sse41_pmovzxbq(<16 x i8> %a0) {
; CHECK-LABEL: test_x86_sse41_pmovzxbq:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT: retl
%res = call <2 x i64> @llvm.x86.sse41.pmovzxbq(<16 x i8> %a0) ; <<2 x i64>> [#uses=1]
@@ -184,7 +184,7 @@ declare <2 x i64> @llvm.x86.sse41.pmovzxbq(<16 x i8>) nounwind readnone
define <8 x i16> @test_x86_sse41_pmovzxbw(<16 x i8> %a0) {
; CHECK-LABEL: test_x86_sse41_pmovzxbw:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; CHECK-NEXT: retl
%res = call <8 x i16> @llvm.x86.sse41.pmovzxbw(<16 x i8> %a0) ; <<8 x i16>> [#uses=1]
@@ -195,7 +195,7 @@ declare <8 x i16> @llvm.x86.sse41.pmovzxbw(<16 x i8>) nounwind readnone
define <2 x i64> @test_x86_sse41_pmovzxdq(<4 x i32> %a0) {
; CHECK-LABEL: test_x86_sse41_pmovzxdq:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; CHECK-NEXT: retl
%res = call <2 x i64> @llvm.x86.sse41.pmovzxdq(<4 x i32> %a0) ; <<2 x i64>> [#uses=1]
@@ -206,7 +206,7 @@ declare <2 x i64> @llvm.x86.sse41.pmovzxdq(<4 x i32>) nounwind readnone
define <4 x i32> @test_x86_sse41_pmovzxwd(<8 x i16> %a0) {
; CHECK-LABEL: test_x86_sse41_pmovzxwd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; CHECK-NEXT: retl
%res = call <4 x i32> @llvm.x86.sse41.pmovzxwd(<8 x i16> %a0) ; <<4 x i32>> [#uses=1]
@@ -217,7 +217,7 @@ declare <4 x i32> @llvm.x86.sse41.pmovzxwd(<8 x i16>) nounwind readnone
define <2 x i64> @test_x86_sse41_pmovzxwq(<8 x i16> %a0) {
; CHECK-LABEL: test_x86_sse41_pmovzxwq:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; CHECK-NEXT: retl
%res = call <2 x i64> @llvm.x86.sse41.pmovzxwq(<8 x i16> %a0) ; <<2 x i64>> [#uses=1]
@@ -227,7 +227,7 @@ declare <2 x i64> @llvm.x86.sse41.pmovzxwq(<8 x i16>) nounwind readnone
define <16 x i8> @max_epi8(<16 x i8> %a0, <16 x i8> %a1) {
; CHECK-LABEL: max_epi8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pmaxsb %xmm1, %xmm0
; CHECK-NEXT: retl
%res = call <16 x i8> @llvm.x86.sse41.pmaxsb(<16 x i8> %a0, <16 x i8> %a1)
@@ -237,7 +237,7 @@ declare <16 x i8> @llvm.x86.sse41.pmaxsb(<16 x i8>, <16 x i8>) nounwind readnone
define <16 x i8> @min_epi8(<16 x i8> %a0, <16 x i8> %a1) {
; CHECK-LABEL: min_epi8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pminsb %xmm1, %xmm0
; CHECK-NEXT: retl
%res = call <16 x i8> @llvm.x86.sse41.pminsb(<16 x i8> %a0, <16 x i8> %a1)
@@ -247,7 +247,7 @@ declare <16 x i8> @llvm.x86.sse41.pminsb(<16 x i8>, <16 x i8>) nounwind readnone
define <8 x i16> @max_epu16(<8 x i16> %a0, <8 x i16> %a1) {
; CHECK-LABEL: max_epu16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pmaxuw %xmm1, %xmm0
; CHECK-NEXT: retl
%res = call <8 x i16> @llvm.x86.sse41.pmaxuw(<8 x i16> %a0, <8 x i16> %a1)
@@ -257,7 +257,7 @@ declare <8 x i16> @llvm.x86.sse41.pmaxuw(<8 x i16>, <8 x i16>) nounwind readnone
define <8 x i16> @min_epu16(<8 x i16> %a0, <8 x i16> %a1) {
; CHECK-LABEL: min_epu16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pminuw %xmm1, %xmm0
; CHECK-NEXT: retl
%res = call <8 x i16> @llvm.x86.sse41.pminuw(<8 x i16> %a0, <8 x i16> %a1)
@@ -267,7 +267,7 @@ declare <8 x i16> @llvm.x86.sse41.pminuw(<8 x i16>, <8 x i16>) nounwind readnone
define <4 x i32> @max_epi32(<4 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: max_epi32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pmaxsd %xmm1, %xmm0
; CHECK-NEXT: retl
%res = call <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32> %a0, <4 x i32> %a1)
@@ -277,7 +277,7 @@ declare <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32>, <4 x i32>) nounwind readnone
define <4 x i32> @min_epi32(<4 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: min_epi32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pminsd %xmm1, %xmm0
; CHECK-NEXT: retl
%res = call <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32> %a0, <4 x i32> %a1)
@@ -287,7 +287,7 @@ declare <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32>, <4 x i32>) nounwind readnone
define <4 x i32> @max_epu32(<4 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: max_epu32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pmaxud %xmm1, %xmm0
; CHECK-NEXT: retl
%res = call <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32> %a0, <4 x i32> %a1)
@@ -297,7 +297,7 @@ declare <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32>, <4 x i32>) nounwind readnone
define <4 x i32> @min_epu32(<4 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: min_epu32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pminud %xmm1, %xmm0
; CHECK-NEXT: retl
%res = call <4 x i32> @llvm.x86.sse41.pminud(<4 x i32> %a0, <4 x i32> %a1)
diff --git a/test/CodeGen/X86/sse41-intrinsics-x86.ll b/test/CodeGen/X86/sse41-intrinsics-x86.ll
index eec4ef991de..2c38904e4c7 100644
--- a/test/CodeGen/X86/sse41-intrinsics-x86.ll
+++ b/test/CodeGen/X86/sse41-intrinsics-x86.ll
@@ -5,7 +5,7 @@
define <2 x double> @test_x86_sse41_blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) {
; SSE41-LABEL: test_x86_sse41_blendvpd:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: movapd %xmm0, %xmm3 ## encoding: [0x66,0x0f,0x28,0xd8]
; SSE41-NEXT: movaps %xmm2, %xmm0 ## encoding: [0x0f,0x28,0xc2]
; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm3 ## encoding: [0x66,0x0f,0x38,0x15,0xd9]
@@ -13,7 +13,7 @@ define <2 x double> @test_x86_sse41_blendvpd(<2 x double> %a0, <2 x double> %a1,
; SSE41-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse41_blendvpd:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x4b,0xc1,0x20]
; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call <2 x double> @llvm.x86.sse41.blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) ; <<2 x double>> [#uses=1]
@@ -24,7 +24,7 @@ declare <2 x double> @llvm.x86.sse41.blendvpd(<2 x double>, <2 x double>, <2 x d
define <4 x float> @test_x86_sse41_blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) {
; SSE41-LABEL: test_x86_sse41_blendvps:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: movaps %xmm0, %xmm3 ## encoding: [0x0f,0x28,0xd8]
; SSE41-NEXT: movaps %xmm2, %xmm0 ## encoding: [0x0f,0x28,0xc2]
; SSE41-NEXT: blendvps %xmm0, %xmm1, %xmm3 ## encoding: [0x66,0x0f,0x38,0x14,0xd9]
@@ -32,7 +32,7 @@ define <4 x float> @test_x86_sse41_blendvps(<4 x float> %a0, <4 x float> %a1, <4
; SSE41-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse41_blendvps:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x4a,0xc1,0x20]
; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) ; <<4 x float>> [#uses=1]
@@ -43,12 +43,12 @@ declare <4 x float> @llvm.x86.sse41.blendvps(<4 x float>, <4 x float>, <4 x floa
define <2 x double> @test_x86_sse41_dppd(<2 x double> %a0, <2 x double> %a1) {
; SSE41-LABEL: test_x86_sse41_dppd:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: dppd $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x41,0xc1,0x07]
; SSE41-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse41_dppd:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: vdppd $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x41,0xc1,0x07]
; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call <2 x double> @llvm.x86.sse41.dppd(<2 x double> %a0, <2 x double> %a1, i8 7) ; <<2 x double>> [#uses=1]
@@ -59,12 +59,12 @@ declare <2 x double> @llvm.x86.sse41.dppd(<2 x double>, <2 x double>, i8) nounwi
define <4 x float> @test_x86_sse41_dpps(<4 x float> %a0, <4 x float> %a1) {
; SSE41-LABEL: test_x86_sse41_dpps:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: dpps $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x40,0xc1,0x07]
; SSE41-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse41_dpps:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: vdpps $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x40,0xc1,0x07]
; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.sse41.dpps(<4 x float> %a0, <4 x float> %a1, i8 7) ; <<4 x float>> [#uses=1]
@@ -75,19 +75,19 @@ declare <4 x float> @llvm.x86.sse41.dpps(<4 x float>, <4 x float>, i8) nounwind
define <4 x float> @test_x86_sse41_insertps(<4 x float> %a0, <4 x float> %a1) {
; SSE41-LABEL: test_x86_sse41_insertps:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: insertps $17, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0x11]
; SSE41-NEXT: ## xmm0 = zero,xmm1[0],xmm0[2,3]
; SSE41-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse41_insertps:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vinsertps $17, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x11]
; AVX2-NEXT: ## xmm0 = zero,xmm1[0],xmm0[2,3]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse41_insertps:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vinsertps $17, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x11]
; SKX-NEXT: ## xmm0 = zero,xmm1[0],xmm0[2,3]
; SKX-NEXT: retl ## encoding: [0xc3]
@@ -100,12 +100,12 @@ declare <4 x float> @llvm.x86.sse41.insertps(<4 x float>, <4 x float>, i8) nounw
define <8 x i16> @test_x86_sse41_mpsadbw(<16 x i8> %a0, <16 x i8> %a1) {
; SSE41-LABEL: test_x86_sse41_mpsadbw:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: mpsadbw $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x42,0xc1,0x07]
; SSE41-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse41_mpsadbw:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: vmpsadbw $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x42,0xc1,0x07]
; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8> %a0, <16 x i8> %a1, i8 7) ; <<8 x i16>> [#uses=1]
@@ -116,17 +116,17 @@ declare <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8>, <16 x i8>, i8) nounwind rea
define <8 x i16> @test_x86_sse41_packusdw(<4 x i32> %a0, <4 x i32> %a1) {
; SSE41-LABEL: test_x86_sse41_packusdw:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: packusdw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x2b,0xc1]
; SSE41-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse41_packusdw:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x2b,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse41_packusdw:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x2b,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> %a0, <4 x i32> %a1) ; <<8 x i16>> [#uses=1]
@@ -137,21 +137,21 @@ declare <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32>, <4 x i32>) nounwind readno
define <8 x i16> @test_x86_sse41_packusdw_fold() {
; SSE41-LABEL: test_x86_sse41_packusdw_fold:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: movaps {{.*#+}} xmm0 = [0,0,0,0,65535,65535,0,0]
; SSE41-NEXT: ## encoding: [0x0f,0x28,0x05,A,A,A,A]
; SSE41-NEXT: ## fixup A - offset: 3, value: LCPI7_0, kind: FK_Data_4
; SSE41-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse41_packusdw_fold:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vmovaps {{.*#+}} xmm0 = [0,0,0,0,65535,65535,0,0]
; AVX2-NEXT: ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
; AVX2-NEXT: ## fixup A - offset: 4, value: LCPI7_0, kind: FK_Data_4
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse41_packusdw_fold:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vmovaps LCPI7_0, %xmm0 ## EVEX TO VEX Compression xmm0 = [0,0,0,0,65535,65535,0,0]
; SKX-NEXT: ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
; SKX-NEXT: ## fixup A - offset: 4, value: LCPI7_0, kind: FK_Data_4
@@ -163,7 +163,7 @@ define <8 x i16> @test_x86_sse41_packusdw_fold() {
define <16 x i8> @test_x86_sse41_pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %a2) {
; SSE41-LABEL: test_x86_sse41_pblendvb:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm3 ## encoding: [0x66,0x0f,0x6f,0xd8]
; SSE41-NEXT: movaps %xmm2, %xmm0 ## encoding: [0x0f,0x28,0xc2]
; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm3 ## encoding: [0x66,0x0f,0x38,0x10,0xd9]
@@ -171,7 +171,7 @@ define <16 x i8> @test_x86_sse41_pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8
; SSE41-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse41_pblendvb:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x4c,0xc1,0x20]
; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %a2) ; <<16 x i8>> [#uses=1]
@@ -182,12 +182,12 @@ declare <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8>, <16 x i8>, <16 x i8>) noun
define <8 x i16> @test_x86_sse41_phminposuw(<8 x i16> %a0) {
; SSE41-LABEL: test_x86_sse41_phminposuw:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: phminposuw %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x38,0x41,0xc0]
; SSE41-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse41_phminposuw:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: vphminposuw %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x41,0xc0]
; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.sse41.phminposuw(<8 x i16> %a0) ; <<8 x i16>> [#uses=1]
@@ -198,17 +198,17 @@ declare <8 x i16> @llvm.x86.sse41.phminposuw(<8 x i16>) nounwind readnone
define <16 x i8> @test_x86_sse41_pmaxsb(<16 x i8> %a0, <16 x i8> %a1) {
; SSE41-LABEL: test_x86_sse41_pmaxsb:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: pmaxsb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3c,0xc1]
; SSE41-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse41_pmaxsb:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3c,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse41_pmaxsb:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3c,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <16 x i8> @llvm.x86.sse41.pmaxsb(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
@@ -219,17 +219,17 @@ declare <16 x i8> @llvm.x86.sse41.pmaxsb(<16 x i8>, <16 x i8>) nounwind readnone
define <4 x i32> @test_x86_sse41_pmaxsd(<4 x i32> %a0, <4 x i32> %a1) {
; SSE41-LABEL: test_x86_sse41_pmaxsd:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: pmaxsd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3d,0xc1]
; SSE41-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse41_pmaxsd:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3d,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse41_pmaxsd:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3d,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
@@ -240,17 +240,17 @@ declare <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32>, <4 x i32>) nounwind readnone
define <4 x i32> @test_x86_sse41_pmaxud(<4 x i32> %a0, <4 x i32> %a1) {
; SSE41-LABEL: test_x86_sse41_pmaxud:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: pmaxud %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3f,0xc1]
; SSE41-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse41_pmaxud:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpmaxud %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3f,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse41_pmaxud:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpmaxud %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3f,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
@@ -261,17 +261,17 @@ declare <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32>, <4 x i32>) nounwind readnone
define <8 x i16> @test_x86_sse41_pmaxuw(<8 x i16> %a0, <8 x i16> %a1) {
; SSE41-LABEL: test_x86_sse41_pmaxuw:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: pmaxuw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3e,0xc1]
; SSE41-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse41_pmaxuw:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3e,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse41_pmaxuw:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3e,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.sse41.pmaxuw(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
@@ -282,17 +282,17 @@ declare <8 x i16> @llvm.x86.sse41.pmaxuw(<8 x i16>, <8 x i16>) nounwind readnone
define <16 x i8> @test_x86_sse41_pminsb(<16 x i8> %a0, <16 x i8> %a1) {
; SSE41-LABEL: test_x86_sse41_pminsb:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: pminsb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x38,0xc1]
; SSE41-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse41_pminsb:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpminsb %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x38,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse41_pminsb:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpminsb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x38,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <16 x i8> @llvm.x86.sse41.pminsb(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
@@ -303,17 +303,17 @@ declare <16 x i8> @llvm.x86.sse41.pminsb(<16 x i8>, <16 x i8>) nounwind readnone
define <4 x i32> @test_x86_sse41_pminsd(<4 x i32> %a0, <4 x i32> %a1) {
; SSE41-LABEL: test_x86_sse41_pminsd:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: pminsd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x39,0xc1]
; SSE41-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse41_pminsd:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpminsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x39,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse41_pminsd:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpminsd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x39,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
@@ -324,17 +324,17 @@ declare <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32>, <4 x i32>) nounwind readnone
define <4 x i32> @test_x86_sse41_pminud(<4 x i32> %a0, <4 x i32> %a1) {
; SSE41-LABEL: test_x86_sse41_pminud:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: pminud %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3b,0xc1]
; SSE41-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse41_pminud:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpminud %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3b,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse41_pminud:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpminud %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3b,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.sse41.pminud(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
@@ -345,17 +345,17 @@ declare <4 x i32> @llvm.x86.sse41.pminud(<4 x i32>, <4 x i32>) nounwind readnone
define <8 x i16> @test_x86_sse41_pminuw(<8 x i16> %a0, <8 x i16> %a1) {
; SSE41-LABEL: test_x86_sse41_pminuw:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: pminuw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3a,0xc1]
; SSE41-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse41_pminuw:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpminuw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3a,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse41_pminuw:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpminuw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3a,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.sse41.pminuw(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
@@ -366,17 +366,17 @@ declare <8 x i16> @llvm.x86.sse41.pminuw(<8 x i16>, <8 x i16>) nounwind readnone
define <2 x i64> @test_x86_sse41_pmuldq(<4 x i32> %a0, <4 x i32> %a1) {
; SSE41-LABEL: test_x86_sse41_pmuldq:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: pmuldq %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x28,0xc1]
; SSE41-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse41_pmuldq:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpmuldq %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x28,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse41_pmuldq:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpmuldq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x28,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.sse41.pmuldq(<4 x i32> %a0, <4 x i32> %a1) ; <<2 x i64>> [#uses=1]
@@ -387,14 +387,14 @@ declare <2 x i64> @llvm.x86.sse41.pmuldq(<4 x i32>, <4 x i32>) nounwind readnone
define i32 @test_x86_sse41_ptestc(<2 x i64> %a0, <2 x i64> %a1) {
; SSE41-LABEL: test_x86_sse41_ptestc:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; SSE41-NEXT: ptest %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x17,0xc1]
; SSE41-NEXT: setb %al ## encoding: [0x0f,0x92,0xc0]
; SSE41-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse41_ptestc:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; VCHECK-NEXT: vptest %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x17,0xc1]
; VCHECK-NEXT: setb %al ## encoding: [0x0f,0x92,0xc0]
@@ -407,14 +407,14 @@ declare i32 @llvm.x86.sse41.ptestc(<2 x i64>, <2 x i64>) nounwind readnone
define i32 @test_x86_sse41_ptestnzc(<2 x i64> %a0, <2 x i64> %a1) {
; SSE41-LABEL: test_x86_sse41_ptestnzc:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; SSE41-NEXT: ptest %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x17,0xc1]
; SSE41-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
; SSE41-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse41_ptestnzc:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; VCHECK-NEXT: vptest %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x17,0xc1]
; VCHECK-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
@@ -427,14 +427,14 @@ declare i32 @llvm.x86.sse41.ptestnzc(<2 x i64>, <2 x i64>) nounwind readnone
define i32 @test_x86_sse41_ptestz(<2 x i64> %a0, <2 x i64> %a1) {
; SSE41-LABEL: test_x86_sse41_ptestz:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; SSE41-NEXT: ptest %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x17,0xc1]
; SSE41-NEXT: sete %al ## encoding: [0x0f,0x94,0xc0]
; SSE41-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse41_ptestz:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; VCHECK-NEXT: vptest %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x17,0xc1]
; VCHECK-NEXT: sete %al ## encoding: [0x0f,0x94,0xc0]
@@ -447,17 +447,17 @@ declare i32 @llvm.x86.sse41.ptestz(<2 x i64>, <2 x i64>) nounwind readnone
define <2 x double> @test_x86_sse41_round_pd(<2 x double> %a0) {
; SSE41-LABEL: test_x86_sse41_round_pd:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundpd $7, %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x09,0xc0,0x07]
; SSE41-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse41_round_pd:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vroundpd $7, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x09,0xc0,0x07]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse41_round_pd:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vrndscalepd $7, %xmm0, %xmm0 ## encoding: [0x62,0xf3,0xfd,0x08,0x09,0xc0,0x07]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <2 x double> @llvm.x86.sse41.round.pd(<2 x double> %a0, i32 7) ; <<2 x double>> [#uses=1]
@@ -468,17 +468,17 @@ declare <2 x double> @llvm.x86.sse41.round.pd(<2 x double>, i32) nounwind readno
define <4 x float> @test_x86_sse41_round_ps(<4 x float> %a0) {
; SSE41-LABEL: test_x86_sse41_round_ps:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundps $7, %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x08,0xc0,0x07]
; SSE41-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse41_round_ps:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vroundps $7, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x08,0xc0,0x07]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse41_round_ps:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vrndscaleps $7, %xmm0, %xmm0 ## encoding: [0x62,0xf3,0x7d,0x08,0x08,0xc0,0x07]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> %a0, i32 7) ; <<4 x float>> [#uses=1]
@@ -489,17 +489,17 @@ declare <4 x float> @llvm.x86.sse41.round.ps(<4 x float>, i32) nounwind readnone
define <2 x double> @test_x86_sse41_round_sd(<2 x double> %a0, <2 x double> %a1) {
; SSE41-LABEL: test_x86_sse41_round_sd:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundsd $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0b,0xc1,0x07]
; SSE41-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse41_round_sd:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vroundsd $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0b,0xc1,0x07]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse41_round_sd:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vrndscalesd $7, %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf3,0xfd,0x08,0x0b,0xc1,0x07]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %a0, <2 x double> %a1, i32 7) ; <<2 x double>> [#uses=1]
@@ -510,19 +510,19 @@ declare <2 x double> @llvm.x86.sse41.round.sd(<2 x double>, <2 x double>, i32) n
define <2 x double> @test_x86_sse41_round_sd_load(<2 x double> %a0, <2 x double>* %a1) {
; SSE41-LABEL: test_x86_sse41_round_sd_load:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; SSE41-NEXT: roundsd $7, (%eax), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0b,0x00,0x07]
; SSE41-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse41_round_sd_load:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; AVX2-NEXT: vroundsd $7, (%eax), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0b,0x00,0x07]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse41_round_sd_load:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; SKX-NEXT: vrndscalesd $7, (%eax), %xmm0, %xmm0 ## encoding: [0x62,0xf3,0xfd,0x08,0x0b,0x00,0x07]
; SKX-NEXT: retl ## encoding: [0xc3]
@@ -534,17 +534,17 @@ define <2 x double> @test_x86_sse41_round_sd_load(<2 x double> %a0, <2 x double>
define <4 x float> @test_x86_sse41_round_ss(<4 x float> %a0, <4 x float> %a1) {
; SSE41-LABEL: test_x86_sse41_round_ss:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundss $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0a,0xc1,0x07]
; SSE41-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse41_round_ss:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vroundss $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0a,0xc1,0x07]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse41_round_ss:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vrndscaless $7, %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf3,0x7d,0x08,0x0a,0xc1,0x07]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %a0, <4 x float> %a1, i32 7) ; <<4 x float>> [#uses=1]
diff --git a/test/CodeGen/X86/sse41-pmovxrm.ll b/test/CodeGen/X86/sse41-pmovxrm.ll
index d62053c96b7..2e65a470435 100644
--- a/test/CodeGen/X86/sse41-pmovxrm.ll
+++ b/test/CodeGen/X86/sse41-pmovxrm.ll
@@ -5,12 +5,12 @@
define <8 x i16> @test_llvm_x86_sse41_pmovsxbw(<16 x i8>* %a) {
; SSE41-LABEL: test_llvm_x86_sse41_pmovsxbw:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: pmovsxbw (%rdi), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: test_llvm_x86_sse41_pmovsxbw:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vpmovsxbw (%rdi), %xmm0
; AVX-NEXT: retq
%1 = load <16 x i8>, <16 x i8>* %a, align 1
@@ -21,12 +21,12 @@ define <8 x i16> @test_llvm_x86_sse41_pmovsxbw(<16 x i8>* %a) {
define <4 x i32> @test_llvm_x86_sse41_pmovsxbd(<16 x i8>* %a) {
; SSE41-LABEL: test_llvm_x86_sse41_pmovsxbd:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: pmovsxbd (%rdi), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: test_llvm_x86_sse41_pmovsxbd:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vpmovsxbd (%rdi), %xmm0
; AVX-NEXT: retq
%1 = load <16 x i8>, <16 x i8>* %a, align 1
@@ -37,12 +37,12 @@ define <4 x i32> @test_llvm_x86_sse41_pmovsxbd(<16 x i8>* %a) {
define <2 x i64> @test_llvm_x86_sse41_pmovsxbq(<16 x i8>* %a) {
; SSE41-LABEL: test_llvm_x86_sse41_pmovsxbq:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: pmovsxbq (%rdi), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: test_llvm_x86_sse41_pmovsxbq:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vpmovsxbq (%rdi), %xmm0
; AVX-NEXT: retq
%1 = load <16 x i8>, <16 x i8>* %a, align 1
@@ -53,12 +53,12 @@ define <2 x i64> @test_llvm_x86_sse41_pmovsxbq(<16 x i8>* %a) {
define <4 x i32> @test_llvm_x86_sse41_pmovsxwd(<8 x i16>* %a) {
; SSE41-LABEL: test_llvm_x86_sse41_pmovsxwd:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: pmovsxwd (%rdi), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: test_llvm_x86_sse41_pmovsxwd:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vpmovsxwd (%rdi), %xmm0
; AVX-NEXT: retq
%1 = load <8 x i16>, <8 x i16>* %a, align 1
@@ -69,12 +69,12 @@ define <4 x i32> @test_llvm_x86_sse41_pmovsxwd(<8 x i16>* %a) {
define <2 x i64> @test_llvm_x86_sse41_pmovsxwq(<8 x i16>* %a) {
; SSE41-LABEL: test_llvm_x86_sse41_pmovsxwq:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: pmovsxwq (%rdi), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: test_llvm_x86_sse41_pmovsxwq:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vpmovsxwq (%rdi), %xmm0
; AVX-NEXT: retq
%1 = load <8 x i16>, <8 x i16>* %a, align 1
@@ -85,12 +85,12 @@ define <2 x i64> @test_llvm_x86_sse41_pmovsxwq(<8 x i16>* %a) {
define <2 x i64> @test_llvm_x86_sse41_pmovsxdq(<4 x i32>* %a) {
; SSE41-LABEL: test_llvm_x86_sse41_pmovsxdq:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: pmovsxdq (%rdi), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: test_llvm_x86_sse41_pmovsxdq:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vpmovsxdq (%rdi), %xmm0
; AVX-NEXT: retq
%1 = load <4 x i32>, <4 x i32>* %a, align 1
@@ -101,12 +101,12 @@ define <2 x i64> @test_llvm_x86_sse41_pmovsxdq(<4 x i32>* %a) {
define <8 x i16> @test_llvm_x86_sse41_pmovzxbw(<16 x i8>* %a) {
; SSE41-LABEL: test_llvm_x86_sse41_pmovzxbw:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; SSE41-NEXT: retq
;
; AVX-LABEL: test_llvm_x86_sse41_pmovzxbw:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX-NEXT: retq
%1 = load <16 x i8>, <16 x i8>* %a, align 1
@@ -117,12 +117,12 @@ define <8 x i16> @test_llvm_x86_sse41_pmovzxbw(<16 x i8>* %a) {
define <4 x i32> @test_llvm_x86_sse41_pmovzxbd(<16 x i8>* %a) {
; SSE41-LABEL: test_llvm_x86_sse41_pmovzxbd:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; SSE41-NEXT: retq
;
; AVX-LABEL: test_llvm_x86_sse41_pmovzxbd:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX-NEXT: retq
%1 = load <16 x i8>, <16 x i8>* %a, align 1
@@ -133,12 +133,12 @@ define <4 x i32> @test_llvm_x86_sse41_pmovzxbd(<16 x i8>* %a) {
define <2 x i64> @test_llvm_x86_sse41_pmovzxbq(<16 x i8>* %a) {
; SSE41-LABEL: test_llvm_x86_sse41_pmovzxbq:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: pmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
; SSE41-NEXT: retq
;
; AVX-LABEL: test_llvm_x86_sse41_pmovzxbq:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vpmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
; AVX-NEXT: retq
%1 = load <16 x i8>, <16 x i8>* %a, align 1
@@ -149,12 +149,12 @@ define <2 x i64> @test_llvm_x86_sse41_pmovzxbq(<16 x i8>* %a) {
define <4 x i32> @test_llvm_x86_sse41_pmovzxwd(<8 x i16>* %a) {
; SSE41-LABEL: test_llvm_x86_sse41_pmovzxwd:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; SSE41-NEXT: retq
;
; AVX-LABEL: test_llvm_x86_sse41_pmovzxwd:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX-NEXT: retq
%1 = load <8 x i16>, <8 x i16>* %a, align 1
@@ -165,12 +165,12 @@ define <4 x i32> @test_llvm_x86_sse41_pmovzxwd(<8 x i16>* %a) {
define <2 x i64> @test_llvm_x86_sse41_pmovzxwq(<8 x i16>* %a) {
; SSE41-LABEL: test_llvm_x86_sse41_pmovzxwq:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: pmovzxwq {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero
; SSE41-NEXT: retq
;
; AVX-LABEL: test_llvm_x86_sse41_pmovzxwq:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vpmovzxwq {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero
; AVX-NEXT: retq
%1 = load <8 x i16>, <8 x i16>* %a, align 1
@@ -181,12 +181,12 @@ define <2 x i64> @test_llvm_x86_sse41_pmovzxwq(<8 x i16>* %a) {
define <2 x i64> @test_llvm_x86_sse41_pmovzxdq(<4 x i32>* %a) {
; SSE41-LABEL: test_llvm_x86_sse41_pmovzxdq:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
; SSE41-NEXT: retq
;
; AVX-LABEL: test_llvm_x86_sse41_pmovzxdq:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
; AVX-NEXT: retq
%1 = load <4 x i32>, <4 x i32>* %a, align 1
diff --git a/test/CodeGen/X86/sse41-schedule.ll b/test/CodeGen/X86/sse41-schedule.ll
index 0eb3728d362..16fe86b7868 100644
--- a/test/CodeGen/X86/sse41-schedule.ll
+++ b/test/CodeGen/X86/sse41-schedule.ll
@@ -12,49 +12,49 @@
define <2 x double> @test_blendpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; GENERIC-LABEL: test_blendpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: blendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1] sched: [1:0.50]
; GENERIC-NEXT: addpd %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: blendpd {{.*#+}} xmm0 = xmm0[0],mem[1] sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_blendpd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: blendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1] sched: [1:1.00]
; SLM-NEXT: addpd %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: blendpd {{.*#+}} xmm0 = xmm0[0],mem[1] sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_blendpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1] sched: [1:0.50]
; SANDY-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],mem[1] sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_blendpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1] sched: [1:0.33]
; HASWELL-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],mem[1] sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_blendpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1] sched: [1:0.33]
; BROADWELL-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],mem[1] sched: [6:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_blendpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1] sched: [1:0.33]
; SKYLAKE-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],mem[1] sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_blendpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1] sched: [1:1.00]
; SKX-NEXT: vmovapd (%rdi), %xmm2 # sched: [6:0.50]
; SKX-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [4:0.33]
@@ -62,14 +62,14 @@ define <2 x double> @test_blendpd(<2 x double> %a0, <2 x double> %a1, <2 x doubl
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_blendpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1] sched: [1:0.50]
; BTVER2-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],mem[1] sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_blendpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1] sched: [1:0.50]
; ZNVER1-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],mem[1] sched: [8:0.50]
@@ -83,55 +83,55 @@ define <2 x double> @test_blendpd(<2 x double> %a0, <2 x double> %a1, <2 x doubl
define <4 x float> @test_blendps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; GENERIC-LABEL: test_blendps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3] sched: [1:0.50]
; GENERIC-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2,3] sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_blendps:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3] sched: [1:1.00]
; SLM-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2,3] sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_blendps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3] sched: [1:0.50]
; SANDY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2,3] sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_blendps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3] sched: [1:0.33]
; HASWELL-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2,3] sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_blendps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3] sched: [1:0.33]
; BROADWELL-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2,3] sched: [6:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_blendps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3] sched: [1:0.33]
; SKYLAKE-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2,3] sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_blendps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3] sched: [1:0.33]
; SKX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2,3] sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_blendps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3] sched: [1:0.50]
; BTVER2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2,3] sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_blendps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3] sched: [1:0.50]
; ZNVER1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2,3] sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -143,7 +143,7 @@ define <4 x float> @test_blendps(<4 x float> %a0, <4 x float> %a1, <4 x float> *
define <2 x double> @test_blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> *%a3) {
; GENERIC-LABEL: test_blendvpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movapd %xmm0, %xmm3 # sched: [1:1.00]
; GENERIC-NEXT: movaps %xmm2, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: blendvpd %xmm0, %xmm1, %xmm3 # sched: [2:1.00]
@@ -152,7 +152,7 @@ define <2 x double> @test_blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x doub
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_blendvpd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movapd %xmm0, %xmm3 # sched: [1:1.00]
; SLM-NEXT: movaps %xmm2, %xmm0 # sched: [1:1.00]
; SLM-NEXT: blendvpd %xmm0, %xmm1, %xmm3 # sched: [1:1.00]
@@ -161,43 +161,43 @@ define <2 x double> @test_blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x doub
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_blendvpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; SANDY-NEXT: vblendvpd %xmm2, (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_blendvpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0 # sched: [2:2.00]
; HASWELL-NEXT: vblendvpd %xmm2, (%rdi), %xmm0, %xmm0 # sched: [2:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_blendvpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0 # sched: [2:2.00]
; BROADWELL-NEXT: vblendvpd %xmm2, (%rdi), %xmm0, %xmm0 # sched: [7:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_blendvpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0 # sched: [2:0.67]
; SKYLAKE-NEXT: vblendvpd %xmm2, (%rdi), %xmm0, %xmm0 # sched: [8:0.67]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_blendvpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0 # sched: [2:0.67]
; SKX-NEXT: vblendvpd %xmm2, (%rdi), %xmm0, %xmm0 # sched: [8:0.67]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_blendvpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0 # sched: [2:2.00]
; BTVER2-NEXT: vblendvpd %xmm2, (%rdi), %xmm0, %xmm0 # sched: [7:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_blendvpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; ZNVER1-NEXT: vblendvpd %xmm2, (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -210,7 +210,7 @@ declare <2 x double> @llvm.x86.sse41.blendvpd(<2 x double>, <2 x double>, <2 x d
define <4 x float> @test_blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, <4 x float> *%a3) {
; GENERIC-LABEL: test_blendvps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movaps %xmm0, %xmm3 # sched: [1:1.00]
; GENERIC-NEXT: movaps %xmm2, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: blendvps %xmm0, %xmm1, %xmm3 # sched: [2:1.00]
@@ -219,7 +219,7 @@ define <4 x float> @test_blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_blendvps:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movaps %xmm0, %xmm3 # sched: [1:1.00]
; SLM-NEXT: movaps %xmm2, %xmm0 # sched: [1:1.00]
; SLM-NEXT: blendvps %xmm0, %xmm1, %xmm3 # sched: [1:1.00]
@@ -228,43 +228,43 @@ define <4 x float> @test_blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float>
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_blendvps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; SANDY-NEXT: vblendvps %xmm2, (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_blendvps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0 # sched: [2:2.00]
; HASWELL-NEXT: vblendvps %xmm2, (%rdi), %xmm0, %xmm0 # sched: [2:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_blendvps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0 # sched: [2:2.00]
; BROADWELL-NEXT: vblendvps %xmm2, (%rdi), %xmm0, %xmm0 # sched: [7:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_blendvps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0 # sched: [2:0.67]
; SKYLAKE-NEXT: vblendvps %xmm2, (%rdi), %xmm0, %xmm0 # sched: [8:0.67]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_blendvps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0 # sched: [2:0.67]
; SKX-NEXT: vblendvps %xmm2, (%rdi), %xmm0, %xmm0 # sched: [8:0.67]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_blendvps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0 # sched: [2:2.00]
; BTVER2-NEXT: vblendvps %xmm2, (%rdi), %xmm0, %xmm0 # sched: [7:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_blendvps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; ZNVER1-NEXT: vblendvps %xmm2, (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -277,55 +277,55 @@ declare <4 x float> @llvm.x86.sse41.blendvps(<4 x float>, <4 x float>, <4 x floa
define <2 x double> @test_dppd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; GENERIC-LABEL: test_dppd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: dppd $7, %xmm1, %xmm0 # sched: [9:1.00]
; GENERIC-NEXT: dppd $7, (%rdi), %xmm0 # sched: [15:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_dppd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: dppd $7, %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: dppd $7, (%rdi), %xmm0 # sched: [6:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_dppd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vdppd $7, %xmm1, %xmm0, %xmm0 # sched: [9:1.00]
; SANDY-NEXT: vdppd $7, (%rdi), %xmm0, %xmm0 # sched: [15:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_dppd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vdppd $7, %xmm1, %xmm0, %xmm0 # sched: [9:1.00]
; HASWELL-NEXT: vdppd $7, (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_dppd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vdppd $7, %xmm1, %xmm0, %xmm0 # sched: [9:1.00]
; BROADWELL-NEXT: vdppd $7, (%rdi), %xmm0, %xmm0 # sched: [14:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_dppd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vdppd $7, %xmm1, %xmm0, %xmm0 # sched: [9:1.00]
; SKYLAKE-NEXT: vdppd $7, (%rdi), %xmm0, %xmm0 # sched: [15:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_dppd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vdppd $7, %xmm1, %xmm0, %xmm0 # sched: [9:1.00]
; SKX-NEXT: vdppd $7, (%rdi), %xmm0, %xmm0 # sched: [15:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_dppd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vdppd $7, %xmm1, %xmm0, %xmm0 # sched: [9:3.00]
; BTVER2-NEXT: vdppd $7, (%rdi), %xmm0, %xmm0 # sched: [14:3.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_dppd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vdppd $7, %xmm1, %xmm0, %xmm0 # sched: [100:?]
; ZNVER1-NEXT: vdppd $7, (%rdi), %xmm0, %xmm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -338,55 +338,55 @@ declare <2 x double> @llvm.x86.sse41.dppd(<2 x double>, <2 x double>, i8) nounwi
define <4 x float> @test_dpps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; GENERIC-LABEL: test_dpps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: dpps $7, %xmm1, %xmm0 # sched: [12:2.00]
; GENERIC-NEXT: dpps $7, (%rdi), %xmm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_dpps:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: dpps $7, %xmm1, %xmm0 # sched: [3:1.00]
; SLM-NEXT: dpps $7, (%rdi), %xmm0 # sched: [6:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_dpps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vdpps $7, %xmm1, %xmm0, %xmm0 # sched: [12:2.00]
; SANDY-NEXT: vdpps $7, (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_dpps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vdpps $7, %xmm1, %xmm0, %xmm0 # sched: [14:2.00]
; HASWELL-NEXT: vdpps $7, (%rdi), %xmm0, %xmm0 # sched: [14:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_dpps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vdpps $7, %xmm1, %xmm0, %xmm0 # sched: [14:2.00]
; BROADWELL-NEXT: vdpps $7, (%rdi), %xmm0, %xmm0 # sched: [19:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_dpps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vdpps $7, %xmm1, %xmm0, %xmm0 # sched: [13:1.33]
; SKYLAKE-NEXT: vdpps $7, (%rdi), %xmm0, %xmm0 # sched: [19:1.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_dpps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vdpps $7, %xmm1, %xmm0, %xmm0 # sched: [13:1.33]
; SKX-NEXT: vdpps $7, (%rdi), %xmm0, %xmm0 # sched: [19:1.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_dpps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vdpps $7, %xmm1, %xmm0, %xmm0 # sched: [11:3.00]
; BTVER2-NEXT: vdpps $7, (%rdi), %xmm0, %xmm0 # sched: [16:3.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_dpps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vdpps $7, %xmm1, %xmm0, %xmm0 # sched: [100:?]
; ZNVER1-NEXT: vdpps $7, (%rdi), %xmm0, %xmm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -399,55 +399,55 @@ declare <4 x float> @llvm.x86.sse41.dpps(<4 x float>, <4 x float>, i8) nounwind
define i32 @test_extractps(<4 x float> %a0, i32 *%a1) {
; GENERIC-LABEL: test_extractps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: extractps $3, %xmm0, %eax # sched: [3:1.00]
; GENERIC-NEXT: extractps $1, %xmm0, (%rdi) # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_extractps:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: extractps $3, %xmm0, %eax # sched: [1:1.00]
; SLM-NEXT: extractps $1, %xmm0, (%rdi) # sched: [4:2.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_extractps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vextractps $3, %xmm0, %eax # sched: [3:1.00]
; SANDY-NEXT: vextractps $1, %xmm0, (%rdi) # sched: [5:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_extractps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vextractps $3, %xmm0, %eax # sched: [2:1.00]
; HASWELL-NEXT: vextractps $1, %xmm0, (%rdi) # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_extractps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vextractps $3, %xmm0, %eax # sched: [2:1.00]
; BROADWELL-NEXT: vextractps $1, %xmm0, (%rdi) # sched: [2:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_extractps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vextractps $3, %xmm0, %eax # sched: [3:1.00]
; SKYLAKE-NEXT: vextractps $1, %xmm0, (%rdi) # sched: [2:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_extractps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vextractps $3, %xmm0, %eax # sched: [3:1.00]
; SKX-NEXT: vextractps $1, %xmm0, (%rdi) # sched: [2:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_extractps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vextractps $3, %xmm0, %eax # sched: [1:0.50]
; BTVER2-NEXT: vextractps $1, %xmm0, (%rdi) # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_extractps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vextractps $3, %xmm0, %eax # sched: [2:2.00]
; ZNVER1-NEXT: vextractps $1, %xmm0, (%rdi) # sched: [5:2.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -461,55 +461,55 @@ define i32 @test_extractps(<4 x float> %a0, i32 *%a1) {
define <4 x float> @test_insertps(<4 x float> %a0, <4 x float> %a1, float *%a2) {
; GENERIC-LABEL: test_insertps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: insertps {{.*#+}} xmm0 = zero,xmm1[0],xmm0[2,3] sched: [1:1.00]
; GENERIC-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0] sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_insertps:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: insertps {{.*#+}} xmm0 = zero,xmm1[0],xmm0[2,3] sched: [1:1.00]
; SLM-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0] sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_insertps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vinsertps {{.*#+}} xmm0 = zero,xmm1[0],xmm0[2,3] sched: [1:1.00]
; SANDY-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0] sched: [7:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_insertps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vinsertps {{.*#+}} xmm0 = zero,xmm1[0],xmm0[2,3] sched: [1:1.00]
; HASWELL-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0] sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_insertps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vinsertps {{.*#+}} xmm0 = zero,xmm1[0],xmm0[2,3] sched: [1:1.00]
; BROADWELL-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0] sched: [6:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_insertps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vinsertps {{.*#+}} xmm0 = zero,xmm1[0],xmm0[2,3] sched: [1:1.00]
; SKYLAKE-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0] sched: [7:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_insertps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vinsertps {{.*#+}} xmm0 = zero,xmm1[0],xmm0[2,3] sched: [1:1.00]
; SKX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0] sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_insertps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vinsertps {{.*#+}} xmm0 = zero,xmm1[0],xmm0[2,3] sched: [1:0.50]
; BTVER2-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0] sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_insertps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vinsertps {{.*#+}} xmm0 = zero,xmm1[0],xmm0[2,3] sched: [1:0.50]
; ZNVER1-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0] sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -522,47 +522,47 @@ declare <4 x float> @llvm.x86.sse41.insertps(<4 x float>, <4 x float>, i8) nounw
define <2 x i64> @test_movntdqa(i8* %a0) {
; GENERIC-LABEL: test_movntdqa:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movntdqa (%rdi), %xmm0 # sched: [6:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_movntdqa:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movntdqa (%rdi), %xmm0 # sched: [3:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_movntdqa:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmovntdqa (%rdi), %xmm0 # sched: [6:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movntdqa:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmovntdqa (%rdi), %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movntdqa:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmovntdqa (%rdi), %xmm0 # sched: [5:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movntdqa:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmovntdqa (%rdi), %xmm0 # sched: [6:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movntdqa:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovntdqa (%rdi), %xmm0 # sched: [6:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_movntdqa:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovntdqa (%rdi), %xmm0 # sched: [5:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movntdqa:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmovntdqa (%rdi), %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <2 x i64> @llvm.x86.sse41.movntdqa(i8* %a0)
@@ -572,55 +572,55 @@ declare <2 x i64> @llvm.x86.sse41.movntdqa(i8*) nounwind readnone
define <8 x i16> @test_mpsadbw(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; GENERIC-LABEL: test_mpsadbw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: mpsadbw $7, %xmm1, %xmm0 # sched: [5:1.00]
; GENERIC-NEXT: mpsadbw $7, (%rdi), %xmm0 # sched: [11:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_mpsadbw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: mpsadbw $7, %xmm1, %xmm0 # sched: [7:1.00]
; SLM-NEXT: mpsadbw $7, (%rdi), %xmm0 # sched: [10:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_mpsadbw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vmpsadbw $7, %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
; SANDY-NEXT: vmpsadbw $7, (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_mpsadbw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmpsadbw $7, %xmm1, %xmm0, %xmm0 # sched: [7:2.00]
; HASWELL-NEXT: vmpsadbw $7, (%rdi), %xmm0, %xmm0 # sched: [7:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_mpsadbw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmpsadbw $7, %xmm1, %xmm0, %xmm0 # sched: [7:2.00]
; BROADWELL-NEXT: vmpsadbw $7, (%rdi), %xmm0, %xmm0 # sched: [12:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_mpsadbw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmpsadbw $7, %xmm1, %xmm0, %xmm0 # sched: [4:2.00]
; SKYLAKE-NEXT: vmpsadbw $7, (%rdi), %xmm0, %xmm0 # sched: [10:2.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_mpsadbw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmpsadbw $7, %xmm1, %xmm0, %xmm0 # sched: [4:2.00]
; SKX-NEXT: vmpsadbw $7, (%rdi), %xmm0, %xmm0 # sched: [10:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_mpsadbw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmpsadbw $7, %xmm1, %xmm0, %xmm0 # sched: [3:2.00]
; BTVER2-NEXT: vmpsadbw $7, (%rdi), %xmm0, %xmm0 # sched: [8:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_mpsadbw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmpsadbw $7, %xmm1, %xmm0, %xmm0 # sched: [100:?]
; ZNVER1-NEXT: vmpsadbw $7, (%rdi), %xmm0, %xmm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -634,55 +634,55 @@ declare <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8>, <16 x i8>, i8) nounwind rea
define <8 x i16> @test_packusdw(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_packusdw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: packusdw %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: packusdw (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_packusdw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: packusdw %xmm1, %xmm0 # sched: [1:1.00]
; SLM-NEXT: packusdw (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_packusdw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpackusdw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_packusdw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: vpackusdw (%rdi), %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_packusdw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: vpackusdw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_packusdw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; SKYLAKE-NEXT: vpackusdw (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_packusdw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; SKX-NEXT: vpackusdw (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_packusdw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpackusdw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_packusdw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpackusdw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -696,7 +696,7 @@ declare <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32>, <4 x i32>) nounwind readno
define <16 x i8> @test_pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %a2, <16 x i8> *%a3) {
; GENERIC-LABEL: test_pblendvb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movdqa %xmm0, %xmm3 # sched: [1:0.33]
; GENERIC-NEXT: movaps %xmm2, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: pblendvb %xmm0, %xmm1, %xmm3 # sched: [8:1.00]
@@ -705,7 +705,7 @@ define <16 x i8> @test_pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %a2, <16
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_pblendvb:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movdqa %xmm0, %xmm3 # sched: [1:0.50]
; SLM-NEXT: movaps %xmm2, %xmm0 # sched: [1:1.00]
; SLM-NEXT: pblendvb %xmm0, %xmm1, %xmm3 # sched: [1:1.00]
@@ -714,43 +714,43 @@ define <16 x i8> @test_pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %a2, <16
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pblendvb:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; SANDY-NEXT: vpblendvb %xmm2, (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pblendvb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 # sched: [2:2.00]
; HASWELL-NEXT: vpblendvb %xmm2, (%rdi), %xmm0, %xmm0 # sched: [2:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pblendvb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 # sched: [2:2.00]
; BROADWELL-NEXT: vpblendvb %xmm2, (%rdi), %xmm0, %xmm0 # sched: [7:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pblendvb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 # sched: [2:0.67]
; SKYLAKE-NEXT: vpblendvb %xmm2, (%rdi), %xmm0, %xmm0 # sched: [8:0.67]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pblendvb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 # sched: [2:0.67]
; SKX-NEXT: vpblendvb %xmm2, (%rdi), %xmm0, %xmm0 # sched: [8:0.67]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pblendvb:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 # sched: [2:2.00]
; BTVER2-NEXT: vpblendvb %xmm2, (%rdi), %xmm0, %xmm0 # sched: [7:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pblendvb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; ZNVER1-NEXT: vpblendvb %xmm2, (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -763,55 +763,55 @@ declare <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8>, <16 x i8>, <16 x i8>) noun
define <8 x i16> @test_pblendw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; GENERIC-LABEL: test_pblendw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7] sched: [1:0.50]
; GENERIC-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],mem[2,3],xmm0[4,5,6],mem[7] sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_pblendw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7] sched: [1:1.00]
; SLM-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],mem[2,3],xmm0[4,5,6],mem[7] sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pblendw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7] sched: [1:0.50]
; SANDY-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],mem[2,3],xmm0[4,5,6],mem[7] sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pblendw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7] sched: [1:1.00]
; HASWELL-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],mem[2,3],xmm0[4,5,6],mem[7] sched: [4:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pblendw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7] sched: [1:1.00]
; BROADWELL-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],mem[2,3],xmm0[4,5,6],mem[7] sched: [6:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pblendw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7] sched: [1:1.00]
; SKYLAKE-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],mem[2,3],xmm0[4,5,6],mem[7] sched: [7:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pblendw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7] sched: [1:1.00]
; SKX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],mem[2,3],xmm0[4,5,6],mem[7] sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pblendw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7] sched: [1:0.50]
; BTVER2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],mem[2,3],xmm0[4,5,6],mem[7] sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pblendw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7] sched: [1:0.33]
; ZNVER1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],mem[2,3],xmm0[4,5,6],mem[7] sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -823,43 +823,43 @@ define <8 x i16> @test_pblendw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
define <2 x i64> @test_pcmpeqq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; GENERIC-LABEL: test_pcmpeqq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pcmpeqq %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: pcmpeqq (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_pcmpeqq:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pcmpeqq %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: pcmpeqq (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pcmpeqq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpcmpeqq (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpeqq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpcmpeqq (%rdi), %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pcmpeqq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpcmpeqq (%rdi), %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pcmpeqq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpcmpeqq (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pcmpeqq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 # sched: [3:1.00]
; SKX-NEXT: vpmovm2q %k0, %xmm0
; SKX-NEXT: vpcmpeqq (%rdi), %xmm0, %k0 # sched: [9:1.00]
@@ -867,13 +867,13 @@ define <2 x i64> @test_pcmpeqq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pcmpeqq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpcmpeqq (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pcmpeqq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpcmpeqq (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -887,55 +887,55 @@ define <2 x i64> @test_pcmpeqq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
define i32 @test_pextrb(<16 x i8> %a0, i8 *%a1) {
; GENERIC-LABEL: test_pextrb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pextrb $3, %xmm0, %eax # sched: [3:1.00]
; GENERIC-NEXT: pextrb $1, %xmm0, (%rdi) # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_pextrb:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pextrb $3, %xmm0, %eax # sched: [1:1.00]
; SLM-NEXT: pextrb $1, %xmm0, (%rdi) # sched: [4:2.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pextrb:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpextrb $3, %xmm0, %eax # sched: [3:1.00]
; SANDY-NEXT: vpextrb $1, %xmm0, (%rdi) # sched: [5:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pextrb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpextrb $3, %xmm0, %eax # sched: [2:1.00]
; HASWELL-NEXT: vpextrb $1, %xmm0, (%rdi) # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pextrb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpextrb $3, %xmm0, %eax # sched: [2:1.00]
; BROADWELL-NEXT: vpextrb $1, %xmm0, (%rdi) # sched: [2:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pextrb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpextrb $3, %xmm0, %eax # sched: [3:1.00]
; SKYLAKE-NEXT: vpextrb $1, %xmm0, (%rdi) # sched: [2:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pextrb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpextrb $3, %xmm0, %eax # sched: [3:1.00]
; SKX-NEXT: vpextrb $1, %xmm0, (%rdi) # sched: [2:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pextrb:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpextrb $3, %xmm0, %eax # sched: [1:0.50]
; BTVER2-NEXT: vpextrb $1, %xmm0, (%rdi) # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pextrb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpextrb $3, %xmm0, %eax # sched: [1:0.25]
; ZNVER1-NEXT: vpextrb $1, %xmm0, (%rdi) # sched: [8:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -948,63 +948,63 @@ define i32 @test_pextrb(<16 x i8> %a0, i8 *%a1) {
define i32 @test_pextrd(<4 x i32> %a0, i32 *%a1) {
; GENERIC-LABEL: test_pextrd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: paddd %xmm0, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: pextrd $3, %xmm0, %eax # sched: [3:1.00]
; GENERIC-NEXT: pextrd $1, %xmm0, (%rdi) # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_pextrd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: paddd %xmm0, %xmm0 # sched: [1:0.50]
; SLM-NEXT: pextrd $3, %xmm0, %eax # sched: [1:1.00]
; SLM-NEXT: pextrd $1, %xmm0, (%rdi) # sched: [4:2.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pextrd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpaddd %xmm0, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpextrd $3, %xmm0, %eax # sched: [3:1.00]
; SANDY-NEXT: vpextrd $1, %xmm0, (%rdi) # sched: [5:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pextrd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpaddd %xmm0, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpextrd $3, %xmm0, %eax # sched: [2:1.00]
; HASWELL-NEXT: vpextrd $1, %xmm0, (%rdi) # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pextrd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpaddd %xmm0, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpextrd $3, %xmm0, %eax # sched: [2:1.00]
; BROADWELL-NEXT: vpextrd $1, %xmm0, (%rdi) # sched: [2:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pextrd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpaddd %xmm0, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vpextrd $3, %xmm0, %eax # sched: [3:1.00]
; SKYLAKE-NEXT: vpextrd $1, %xmm0, (%rdi) # sched: [2:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pextrd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpaddd %xmm0, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: vpextrd $3, %xmm0, %eax # sched: [3:1.00]
; SKX-NEXT: vpextrd $1, %xmm0, (%rdi) # sched: [2:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pextrd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpaddd %xmm0, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpextrd $3, %xmm0, %eax # sched: [1:0.50]
; BTVER2-NEXT: vpextrd $1, %xmm0, (%rdi) # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pextrd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpaddd %xmm0, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpextrd $3, %xmm0, %eax # sched: [1:0.25]
; ZNVER1-NEXT: vpextrd $1, %xmm0, (%rdi) # sched: [8:1.00]
@@ -1018,55 +1018,55 @@ define i32 @test_pextrd(<4 x i32> %a0, i32 *%a1) {
define i64 @test_pextrq(<2 x i64> %a0, <2 x i64> %a1, i64 *%a2) {
; GENERIC-LABEL: test_pextrq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pextrq $1, %xmm0, %rax # sched: [3:1.00]
; GENERIC-NEXT: pextrq $1, %xmm0, (%rdi) # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_pextrq:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pextrq $1, %xmm0, %rax # sched: [1:1.00]
; SLM-NEXT: pextrq $1, %xmm0, (%rdi) # sched: [4:2.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pextrq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpextrq $1, %xmm0, %rax # sched: [3:1.00]
; SANDY-NEXT: vpextrq $1, %xmm0, (%rdi) # sched: [5:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pextrq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpextrq $1, %xmm0, %rax # sched: [2:1.00]
; HASWELL-NEXT: vpextrq $1, %xmm0, (%rdi) # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pextrq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpextrq $1, %xmm0, %rax # sched: [2:1.00]
; BROADWELL-NEXT: vpextrq $1, %xmm0, (%rdi) # sched: [2:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pextrq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpextrq $1, %xmm0, %rax # sched: [3:1.00]
; SKYLAKE-NEXT: vpextrq $1, %xmm0, (%rdi) # sched: [2:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pextrq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpextrq $1, %xmm0, %rax # sched: [3:1.00]
; SKX-NEXT: vpextrq $1, %xmm0, (%rdi) # sched: [2:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pextrq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpextrq $1, %xmm0, %rax # sched: [1:0.50]
; BTVER2-NEXT: vpextrq $1, %xmm0, (%rdi) # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pextrq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpextrq $1, %xmm0, %rax # sched: [1:0.25]
; ZNVER1-NEXT: vpextrq $1, %xmm0, (%rdi) # sched: [8:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1078,55 +1078,55 @@ define i64 @test_pextrq(<2 x i64> %a0, <2 x i64> %a1, i64 *%a2) {
define i32 @test_pextrw(<8 x i16> %a0, i16 *%a1) {
; GENERIC-LABEL: test_pextrw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pextrw $3, %xmm0, %eax # sched: [3:1.00]
; GENERIC-NEXT: pextrw $1, %xmm0, (%rdi) # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_pextrw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pextrw $3, %xmm0, %eax # sched: [4:1.00]
; SLM-NEXT: pextrw $1, %xmm0, (%rdi) # sched: [4:2.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pextrw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpextrw $3, %xmm0, %eax # sched: [3:1.00]
; SANDY-NEXT: vpextrw $1, %xmm0, (%rdi) # sched: [5:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pextrw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpextrw $3, %xmm0, %eax # sched: [2:1.00]
; HASWELL-NEXT: vpextrw $1, %xmm0, (%rdi) # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pextrw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpextrw $3, %xmm0, %eax # sched: [2:1.00]
; BROADWELL-NEXT: vpextrw $1, %xmm0, (%rdi) # sched: [2:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pextrw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpextrw $3, %xmm0, %eax # sched: [3:1.00]
; SKYLAKE-NEXT: vpextrw $1, %xmm0, (%rdi) # sched: [2:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pextrw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpextrw $3, %xmm0, %eax # sched: [3:1.00]
; SKX-NEXT: vpextrw $1, %xmm0, (%rdi) # sched: [2:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pextrw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpextrw $3, %xmm0, %eax # sched: [1:0.50]
; BTVER2-NEXT: vpextrw $1, %xmm0, (%rdi) # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pextrw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpextrw $3, %xmm0, %eax # sched: [1:0.25]
; ZNVER1-NEXT: vpextrw $1, %xmm0, (%rdi) # sched: [8:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1139,55 +1139,55 @@ define i32 @test_pextrw(<8 x i16> %a0, i16 *%a1) {
define <8 x i16> @test_phminposuw(<8 x i16> *%a0) {
; GENERIC-LABEL: test_phminposuw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: phminposuw (%rdi), %xmm0 # sched: [11:1.00]
; GENERIC-NEXT: phminposuw %xmm0, %xmm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_phminposuw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: phminposuw (%rdi), %xmm0 # sched: [7:1.00]
; SLM-NEXT: phminposuw %xmm0, %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_phminposuw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vphminposuw (%rdi), %xmm0 # sched: [11:1.00]
; SANDY-NEXT: vphminposuw %xmm0, %xmm0 # sched: [5:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_phminposuw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vphminposuw (%rdi), %xmm0 # sched: [5:1.00]
; HASWELL-NEXT: vphminposuw %xmm0, %xmm0 # sched: [5:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_phminposuw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vphminposuw (%rdi), %xmm0 # sched: [10:1.00]
; BROADWELL-NEXT: vphminposuw %xmm0, %xmm0 # sched: [5:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_phminposuw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vphminposuw (%rdi), %xmm0 # sched: [10:0.50]
; SKYLAKE-NEXT: vphminposuw %xmm0, %xmm0 # sched: [4:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_phminposuw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vphminposuw (%rdi), %xmm0 # sched: [10:0.50]
; SKX-NEXT: vphminposuw %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_phminposuw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vphminposuw (%rdi), %xmm0 # sched: [7:1.00]
; BTVER2-NEXT: vphminposuw %xmm0, %xmm0 # sched: [2:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_phminposuw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vphminposuw (%rdi), %xmm0 # sched: [11:1.00]
; ZNVER1-NEXT: vphminposuw %xmm0, %xmm0 # sched: [4:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1200,55 +1200,55 @@ declare <8 x i16> @llvm.x86.sse41.phminposuw(<8 x i16>) nounwind readnone
define <16 x i8> @test_pinsrb(<16 x i8> %a0, i8 %a1, i8 *%a2) {
; GENERIC-LABEL: test_pinsrb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pinsrb $1, %edi, %xmm0 # sched: [2:1.00]
; GENERIC-NEXT: pinsrb $3, (%rsi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_pinsrb:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pinsrb $1, %edi, %xmm0 # sched: [1:1.00]
; SLM-NEXT: pinsrb $3, (%rsi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pinsrb:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpinsrb $1, %edi, %xmm0, %xmm0 # sched: [2:1.00]
; SANDY-NEXT: vpinsrb $3, (%rsi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pinsrb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpinsrb $1, %edi, %xmm0, %xmm0 # sched: [2:2.00]
; HASWELL-NEXT: vpinsrb $3, (%rsi), %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pinsrb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpinsrb $1, %edi, %xmm0, %xmm0 # sched: [2:2.00]
; BROADWELL-NEXT: vpinsrb $3, (%rsi), %xmm0, %xmm0 # sched: [6:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pinsrb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpinsrb $1, %edi, %xmm0, %xmm0 # sched: [2:2.00]
; SKYLAKE-NEXT: vpinsrb $3, (%rsi), %xmm0, %xmm0 # sched: [6:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pinsrb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpinsrb $1, %edi, %xmm0, %xmm0 # sched: [2:2.00]
; SKX-NEXT: vpinsrb $3, (%rsi), %xmm0, %xmm0 # sched: [6:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pinsrb:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpinsrb $1, %edi, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpinsrb $3, (%rsi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pinsrb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpinsrb $1, %edi, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpinsrb $3, (%rsi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1260,55 +1260,55 @@ define <16 x i8> @test_pinsrb(<16 x i8> %a0, i8 %a1, i8 *%a2) {
define <4 x i32> @test_pinsrd(<4 x i32> %a0, i32 %a1, i32 *%a2) {
; GENERIC-LABEL: test_pinsrd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pinsrd $1, %edi, %xmm0 # sched: [2:1.00]
; GENERIC-NEXT: pinsrd $3, (%rsi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_pinsrd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pinsrd $1, %edi, %xmm0 # sched: [1:1.00]
; SLM-NEXT: pinsrd $3, (%rsi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pinsrd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpinsrd $1, %edi, %xmm0, %xmm0 # sched: [2:1.00]
; SANDY-NEXT: vpinsrd $3, (%rsi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pinsrd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpinsrd $1, %edi, %xmm0, %xmm0 # sched: [2:2.00]
; HASWELL-NEXT: vpinsrd $3, (%rsi), %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pinsrd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpinsrd $1, %edi, %xmm0, %xmm0 # sched: [2:2.00]
; BROADWELL-NEXT: vpinsrd $3, (%rsi), %xmm0, %xmm0 # sched: [6:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pinsrd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpinsrd $1, %edi, %xmm0, %xmm0 # sched: [2:2.00]
; SKYLAKE-NEXT: vpinsrd $3, (%rsi), %xmm0, %xmm0 # sched: [6:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pinsrd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpinsrd $1, %edi, %xmm0, %xmm0 # sched: [2:2.00]
; SKX-NEXT: vpinsrd $3, (%rsi), %xmm0, %xmm0 # sched: [6:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pinsrd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpinsrd $1, %edi, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpinsrd $3, (%rsi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pinsrd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpinsrd $1, %edi, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpinsrd $3, (%rsi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1320,63 +1320,63 @@ define <4 x i32> @test_pinsrd(<4 x i32> %a0, i32 %a1, i32 *%a2) {
define <2 x i64> @test_pinsrq(<2 x i64> %a0, <2 x i64> %a1, i64 %a2, i64 *%a3) {
; GENERIC-LABEL: test_pinsrq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pinsrq $1, %rdi, %xmm0 # sched: [2:1.00]
; GENERIC-NEXT: pinsrq $1, (%rsi), %xmm1 # sched: [7:0.50]
; GENERIC-NEXT: paddq %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_pinsrq:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pinsrq $1, (%rsi), %xmm1 # sched: [4:1.00]
; SLM-NEXT: pinsrq $1, %rdi, %xmm0 # sched: [1:1.00]
; SLM-NEXT: paddq %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pinsrq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpinsrq $1, %rdi, %xmm0, %xmm0 # sched: [2:1.00]
; SANDY-NEXT: vpinsrq $1, (%rsi), %xmm1, %xmm1 # sched: [7:0.50]
; SANDY-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pinsrq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpinsrq $1, %rdi, %xmm0, %xmm0 # sched: [2:2.00]
; HASWELL-NEXT: vpinsrq $1, (%rsi), %xmm1, %xmm1 # sched: [1:1.00]
; HASWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pinsrq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpinsrq $1, %rdi, %xmm0, %xmm0 # sched: [2:2.00]
; BROADWELL-NEXT: vpinsrq $1, (%rsi), %xmm1, %xmm1 # sched: [6:1.00]
; BROADWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pinsrq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpinsrq $1, %rdi, %xmm0, %xmm0 # sched: [2:2.00]
; SKYLAKE-NEXT: vpinsrq $1, (%rsi), %xmm1, %xmm1 # sched: [6:1.00]
; SKYLAKE-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pinsrq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpinsrq $1, %rdi, %xmm0, %xmm0 # sched: [2:2.00]
; SKX-NEXT: vpinsrq $1, (%rsi), %xmm1, %xmm1 # sched: [6:1.00]
; SKX-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pinsrq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpinsrq $1, (%rsi), %xmm1, %xmm1 # sched: [6:1.00]
; BTVER2-NEXT: vpinsrq $1, %rdi, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pinsrq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpinsrq $1, (%rsi), %xmm1, %xmm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpinsrq $1, %rdi, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
@@ -1390,55 +1390,55 @@ define <2 x i64> @test_pinsrq(<2 x i64> %a0, <2 x i64> %a1, i64 %a2, i64 *%a3) {
define <16 x i8> @test_pmaxsb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; GENERIC-LABEL: test_pmaxsb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pmaxsb %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: pmaxsb (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_pmaxsb:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pmaxsb %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: pmaxsb (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pmaxsb:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpmaxsb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmaxsb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpmaxsb (%rdi), %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmaxsb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpmaxsb (%rdi), %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmaxsb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpmaxsb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmaxsb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpmaxsb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pmaxsb:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpmaxsb (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pmaxsb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpmaxsb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1451,55 +1451,55 @@ declare <16 x i8> @llvm.x86.sse41.pmaxsb(<16 x i8>, <16 x i8>) nounwind readnone
define <4 x i32> @test_pmaxsd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_pmaxsd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pmaxsd %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: pmaxsd (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_pmaxsd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pmaxsd %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: pmaxsd (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pmaxsd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpmaxsd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmaxsd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpmaxsd (%rdi), %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmaxsd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpmaxsd (%rdi), %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmaxsd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpmaxsd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmaxsd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpmaxsd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pmaxsd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpmaxsd (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pmaxsd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpmaxsd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1512,55 +1512,55 @@ declare <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32>, <4 x i32>) nounwind readnone
define <4 x i32> @test_pmaxud(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_pmaxud:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pmaxud %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: pmaxud (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_pmaxud:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pmaxud %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: pmaxud (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pmaxud:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpmaxud %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpmaxud (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmaxud:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmaxud %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpmaxud (%rdi), %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmaxud:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmaxud %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpmaxud (%rdi), %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmaxud:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmaxud %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpmaxud (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmaxud:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmaxud %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpmaxud (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pmaxud:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpmaxud %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpmaxud (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pmaxud:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmaxud %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpmaxud (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1573,55 +1573,55 @@ declare <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32>, <4 x i32>) nounwind readnone
define <8 x i16> @test_pmaxuw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; GENERIC-LABEL: test_pmaxuw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pmaxuw %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: pmaxuw (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_pmaxuw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pmaxuw %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: pmaxuw (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pmaxuw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpmaxuw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmaxuw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpmaxuw (%rdi), %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmaxuw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpmaxuw (%rdi), %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmaxuw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpmaxuw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmaxuw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpmaxuw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pmaxuw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpmaxuw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pmaxuw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpmaxuw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1634,55 +1634,55 @@ declare <8 x i16> @llvm.x86.sse41.pmaxuw(<8 x i16>, <8 x i16>) nounwind readnone
define <16 x i8> @test_pminsb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; GENERIC-LABEL: test_pminsb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pminsb %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: pminsb (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_pminsb:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pminsb %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: pminsb (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pminsb:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpminsb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpminsb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pminsb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpminsb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpminsb (%rdi), %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pminsb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpminsb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpminsb (%rdi), %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pminsb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpminsb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpminsb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pminsb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpminsb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpminsb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pminsb:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpminsb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpminsb (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pminsb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpminsb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpminsb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1695,55 +1695,55 @@ declare <16 x i8> @llvm.x86.sse41.pminsb(<16 x i8>, <16 x i8>) nounwind readnone
define <4 x i32> @test_pminsd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_pminsd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pminsd %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: pminsd (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_pminsd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pminsd %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: pminsd (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pminsd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpminsd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpminsd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pminsd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpminsd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpminsd (%rdi), %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pminsd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpminsd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpminsd (%rdi), %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pminsd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpminsd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpminsd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pminsd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpminsd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpminsd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pminsd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpminsd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpminsd (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pminsd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpminsd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpminsd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1756,55 +1756,55 @@ declare <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32>, <4 x i32>) nounwind readnone
define <4 x i32> @test_pminud(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_pminud:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pminud %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: pminud (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_pminud:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pminud %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: pminud (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pminud:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpminud %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpminud (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pminud:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpminud %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpminud (%rdi), %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pminud:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpminud %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpminud (%rdi), %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pminud:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpminud %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpminud (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pminud:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpminud %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpminud (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pminud:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpminud %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpminud (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pminud:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpminud %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpminud (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1817,55 +1817,55 @@ declare <4 x i32> @llvm.x86.sse41.pminud(<4 x i32>, <4 x i32>) nounwind readnone
define <8 x i16> @test_pminuw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; GENERIC-LABEL: test_pminuw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pminuw %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: pminuw (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_pminuw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pminuw %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: pminuw (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pminuw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpminuw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpminuw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pminuw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpminuw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpminuw (%rdi), %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pminuw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpminuw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpminuw (%rdi), %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pminuw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpminuw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpminuw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pminuw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpminuw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpminuw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pminuw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpminuw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpminuw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pminuw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpminuw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpminuw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1878,14 +1878,14 @@ declare <8 x i16> @llvm.x86.sse41.pminuw(<8 x i16>, <8 x i16>) nounwind readnone
define <8 x i16> @test_pmovsxbw(<16 x i8> %a0, <8 x i8> *%a1) {
; GENERIC-LABEL: test_pmovsxbw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pmovsxbw %xmm0, %xmm1 # sched: [1:0.50]
; GENERIC-NEXT: pmovsxbw (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: paddw %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_pmovsxbw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pmovsxbw (%rdi), %xmm1 # sched: [4:1.00]
; SLM-NEXT: pmovsxbw %xmm0, %xmm0 # sched: [1:1.00]
; SLM-NEXT: paddw %xmm0, %xmm1 # sched: [1:0.50]
@@ -1893,49 +1893,49 @@ define <8 x i16> @test_pmovsxbw(<16 x i8> %a0, <8 x i8> *%a1) {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pmovsxbw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpmovsxbw %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpmovsxbw (%rdi), %xmm1 # sched: [7:0.50]
; SANDY-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovsxbw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmovsxbw %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: vpmovsxbw (%rdi), %xmm1 # sched: [1:1.00]
; HASWELL-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmovsxbw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmovsxbw %xmm0, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: vpmovsxbw (%rdi), %xmm1 # sched: [6:1.00]
; BROADWELL-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmovsxbw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmovsxbw %xmm0, %xmm0 # sched: [1:1.00]
; SKYLAKE-NEXT: vpmovsxbw (%rdi), %xmm1 # sched: [6:1.00]
; SKYLAKE-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmovsxbw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxbw %xmm0, %xmm0 # sched: [1:1.00]
; SKX-NEXT: vpmovsxbw (%rdi), %xmm1 # sched: [6:1.00]
; SKX-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pmovsxbw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpmovsxbw (%rdi), %xmm1 # sched: [6:1.00]
; BTVER2-NEXT: vpmovsxbw %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pmovsxbw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmovsxbw (%rdi), %xmm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpmovsxbw %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
@@ -1950,14 +1950,14 @@ define <8 x i16> @test_pmovsxbw(<16 x i8> %a0, <8 x i8> *%a1) {
define <4 x i32> @test_pmovsxbd(<16 x i8> %a0, <4 x i8> *%a1) {
; GENERIC-LABEL: test_pmovsxbd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pmovsxbd %xmm0, %xmm1 # sched: [1:0.50]
; GENERIC-NEXT: pmovsxbd (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: paddd %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_pmovsxbd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pmovsxbd (%rdi), %xmm1 # sched: [4:1.00]
; SLM-NEXT: pmovsxbd %xmm0, %xmm0 # sched: [1:1.00]
; SLM-NEXT: paddd %xmm0, %xmm1 # sched: [1:0.50]
@@ -1965,49 +1965,49 @@ define <4 x i32> @test_pmovsxbd(<16 x i8> %a0, <4 x i8> *%a1) {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pmovsxbd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpmovsxbd %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpmovsxbd (%rdi), %xmm1 # sched: [7:0.50]
; SANDY-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovsxbd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmovsxbd %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: vpmovsxbd (%rdi), %xmm1 # sched: [1:1.00]
; HASWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmovsxbd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmovsxbd %xmm0, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: vpmovsxbd (%rdi), %xmm1 # sched: [6:1.00]
; BROADWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmovsxbd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmovsxbd %xmm0, %xmm0 # sched: [1:1.00]
; SKYLAKE-NEXT: vpmovsxbd (%rdi), %xmm1 # sched: [6:1.00]
; SKYLAKE-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmovsxbd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxbd %xmm0, %xmm0 # sched: [1:1.00]
; SKX-NEXT: vpmovsxbd (%rdi), %xmm1 # sched: [6:1.00]
; SKX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pmovsxbd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpmovsxbd (%rdi), %xmm1 # sched: [6:1.00]
; BTVER2-NEXT: vpmovsxbd %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pmovsxbd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmovsxbd (%rdi), %xmm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpmovsxbd %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
@@ -2022,14 +2022,14 @@ define <4 x i32> @test_pmovsxbd(<16 x i8> %a0, <4 x i8> *%a1) {
define <2 x i64> @test_pmovsxbq(<16 x i8> %a0, <2 x i8> *%a1) {
; GENERIC-LABEL: test_pmovsxbq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pmovsxbq %xmm0, %xmm1 # sched: [1:0.50]
; GENERIC-NEXT: pmovsxbq (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: paddq %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_pmovsxbq:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pmovsxbq (%rdi), %xmm1 # sched: [4:1.00]
; SLM-NEXT: pmovsxbq %xmm0, %xmm0 # sched: [1:1.00]
; SLM-NEXT: paddq %xmm0, %xmm1 # sched: [1:0.50]
@@ -2037,49 +2037,49 @@ define <2 x i64> @test_pmovsxbq(<16 x i8> %a0, <2 x i8> *%a1) {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pmovsxbq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpmovsxbq %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpmovsxbq (%rdi), %xmm1 # sched: [7:0.50]
; SANDY-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovsxbq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmovsxbq %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: vpmovsxbq (%rdi), %xmm1 # sched: [1:1.00]
; HASWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmovsxbq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmovsxbq %xmm0, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: vpmovsxbq (%rdi), %xmm1 # sched: [6:1.00]
; BROADWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmovsxbq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmovsxbq %xmm0, %xmm0 # sched: [1:1.00]
; SKYLAKE-NEXT: vpmovsxbq (%rdi), %xmm1 # sched: [6:1.00]
; SKYLAKE-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmovsxbq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxbq %xmm0, %xmm0 # sched: [1:1.00]
; SKX-NEXT: vpmovsxbq (%rdi), %xmm1 # sched: [6:1.00]
; SKX-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pmovsxbq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpmovsxbq (%rdi), %xmm1 # sched: [6:1.00]
; BTVER2-NEXT: vpmovsxbq %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pmovsxbq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmovsxbq (%rdi), %xmm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpmovsxbq %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
@@ -2094,14 +2094,14 @@ define <2 x i64> @test_pmovsxbq(<16 x i8> %a0, <2 x i8> *%a1) {
define <2 x i64> @test_pmovsxdq(<4 x i32> %a0, <2 x i32> *%a1) {
; GENERIC-LABEL: test_pmovsxdq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pmovsxdq %xmm0, %xmm1 # sched: [1:0.50]
; GENERIC-NEXT: pmovsxdq (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: paddq %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_pmovsxdq:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pmovsxdq (%rdi), %xmm1 # sched: [4:1.00]
; SLM-NEXT: pmovsxdq %xmm0, %xmm0 # sched: [1:1.00]
; SLM-NEXT: paddq %xmm0, %xmm1 # sched: [1:0.50]
@@ -2109,49 +2109,49 @@ define <2 x i64> @test_pmovsxdq(<4 x i32> %a0, <2 x i32> *%a1) {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pmovsxdq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpmovsxdq %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpmovsxdq (%rdi), %xmm1 # sched: [7:0.50]
; SANDY-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovsxdq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmovsxdq %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: vpmovsxdq (%rdi), %xmm1 # sched: [1:1.00]
; HASWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmovsxdq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmovsxdq %xmm0, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: vpmovsxdq (%rdi), %xmm1 # sched: [6:1.00]
; BROADWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmovsxdq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmovsxdq %xmm0, %xmm0 # sched: [1:1.00]
; SKYLAKE-NEXT: vpmovsxdq (%rdi), %xmm1 # sched: [6:1.00]
; SKYLAKE-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmovsxdq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxdq %xmm0, %xmm0 # sched: [1:1.00]
; SKX-NEXT: vpmovsxdq (%rdi), %xmm1 # sched: [6:1.00]
; SKX-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pmovsxdq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpmovsxdq (%rdi), %xmm1 # sched: [6:1.00]
; BTVER2-NEXT: vpmovsxdq %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pmovsxdq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmovsxdq (%rdi), %xmm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpmovsxdq %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
@@ -2166,14 +2166,14 @@ define <2 x i64> @test_pmovsxdq(<4 x i32> %a0, <2 x i32> *%a1) {
define <4 x i32> @test_pmovsxwd(<8 x i16> %a0, <4 x i16> *%a1) {
; GENERIC-LABEL: test_pmovsxwd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pmovsxwd %xmm0, %xmm1 # sched: [1:0.50]
; GENERIC-NEXT: pmovsxwd (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: paddd %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_pmovsxwd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pmovsxwd (%rdi), %xmm1 # sched: [4:1.00]
; SLM-NEXT: pmovsxwd %xmm0, %xmm0 # sched: [1:1.00]
; SLM-NEXT: paddd %xmm0, %xmm1 # sched: [1:0.50]
@@ -2181,49 +2181,49 @@ define <4 x i32> @test_pmovsxwd(<8 x i16> %a0, <4 x i16> *%a1) {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pmovsxwd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpmovsxwd %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpmovsxwd (%rdi), %xmm1 # sched: [7:0.50]
; SANDY-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovsxwd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmovsxwd %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: vpmovsxwd (%rdi), %xmm1 # sched: [1:1.00]
; HASWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmovsxwd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmovsxwd %xmm0, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: vpmovsxwd (%rdi), %xmm1 # sched: [6:1.00]
; BROADWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmovsxwd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmovsxwd %xmm0, %xmm0 # sched: [1:1.00]
; SKYLAKE-NEXT: vpmovsxwd (%rdi), %xmm1 # sched: [6:1.00]
; SKYLAKE-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmovsxwd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxwd %xmm0, %xmm0 # sched: [1:1.00]
; SKX-NEXT: vpmovsxwd (%rdi), %xmm1 # sched: [6:1.00]
; SKX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pmovsxwd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpmovsxwd (%rdi), %xmm1 # sched: [6:1.00]
; BTVER2-NEXT: vpmovsxwd %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pmovsxwd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmovsxwd (%rdi), %xmm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpmovsxwd %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
@@ -2238,14 +2238,14 @@ define <4 x i32> @test_pmovsxwd(<8 x i16> %a0, <4 x i16> *%a1) {
define <2 x i64> @test_pmovsxwq(<8 x i16> %a0, <2 x i16> *%a1) {
; GENERIC-LABEL: test_pmovsxwq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pmovsxwq %xmm0, %xmm1 # sched: [1:0.50]
; GENERIC-NEXT: pmovsxwq (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: paddq %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_pmovsxwq:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pmovsxwq (%rdi), %xmm1 # sched: [4:1.00]
; SLM-NEXT: pmovsxwq %xmm0, %xmm0 # sched: [1:1.00]
; SLM-NEXT: paddq %xmm0, %xmm1 # sched: [1:0.50]
@@ -2253,49 +2253,49 @@ define <2 x i64> @test_pmovsxwq(<8 x i16> %a0, <2 x i16> *%a1) {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pmovsxwq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpmovsxwq %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpmovsxwq (%rdi), %xmm1 # sched: [7:0.50]
; SANDY-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovsxwq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmovsxwq %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: vpmovsxwq (%rdi), %xmm1 # sched: [1:1.00]
; HASWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmovsxwq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmovsxwq %xmm0, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: vpmovsxwq (%rdi), %xmm1 # sched: [6:1.00]
; BROADWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmovsxwq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmovsxwq %xmm0, %xmm0 # sched: [1:1.00]
; SKYLAKE-NEXT: vpmovsxwq (%rdi), %xmm1 # sched: [6:1.00]
; SKYLAKE-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmovsxwq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxwq %xmm0, %xmm0 # sched: [1:1.00]
; SKX-NEXT: vpmovsxwq (%rdi), %xmm1 # sched: [6:1.00]
; SKX-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pmovsxwq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpmovsxwq (%rdi), %xmm1 # sched: [6:1.00]
; BTVER2-NEXT: vpmovsxwq %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pmovsxwq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmovsxwq (%rdi), %xmm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpmovsxwq %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
@@ -2310,14 +2310,14 @@ define <2 x i64> @test_pmovsxwq(<8 x i16> %a0, <2 x i16> *%a1) {
define <8 x i16> @test_pmovzxbw(<16 x i8> %a0, <8 x i8> *%a1) {
; GENERIC-LABEL: test_pmovzxbw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [1:0.50]
; GENERIC-NEXT: pmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero sched: [7:0.50]
; GENERIC-NEXT: paddw %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_pmovzxbw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero sched: [4:1.00]
; SLM-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [1:1.00]
; SLM-NEXT: paddw %xmm0, %xmm1 # sched: [1:0.50]
@@ -2325,49 +2325,49 @@ define <8 x i16> @test_pmovzxbw(<16 x i8> %a0, <8 x i8> *%a1) {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pmovzxbw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [1:0.50]
; SANDY-NEXT: vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero sched: [7:0.50]
; SANDY-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovzxbw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [1:1.00]
; HASWELL-NEXT: vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero sched: [1:1.00]
; HASWELL-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmovzxbw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [1:1.00]
; BROADWELL-NEXT: vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero sched: [6:1.00]
; BROADWELL-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmovzxbw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [1:1.00]
; SKYLAKE-NEXT: vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero sched: [6:1.00]
; SKYLAKE-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmovzxbw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [1:1.00]
; SKX-NEXT: vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero sched: [6:1.00]
; SKX-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pmovzxbw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero sched: [6:1.00]
; BTVER2-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [1:0.50]
; BTVER2-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pmovzxbw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero sched: [8:0.50]
; ZNVER1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [1:0.25]
; ZNVER1-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
@@ -2382,14 +2382,14 @@ define <8 x i16> @test_pmovzxbw(<16 x i8> %a0, <8 x i8> *%a1) {
define <4 x i32> @test_pmovzxbd(<16 x i8> %a0, <4 x i8> *%a1) {
; GENERIC-LABEL: test_pmovzxbd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero sched: [1:0.50]
; GENERIC-NEXT: pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero sched: [7:0.50]
; GENERIC-NEXT: paddd %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_pmovzxbd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero sched: [4:1.00]
; SLM-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero sched: [1:1.00]
; SLM-NEXT: paddd %xmm0, %xmm1 # sched: [1:0.50]
@@ -2397,49 +2397,49 @@ define <4 x i32> @test_pmovzxbd(<16 x i8> %a0, <4 x i8> *%a1) {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pmovzxbd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero sched: [1:0.50]
; SANDY-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero sched: [7:0.50]
; SANDY-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovzxbd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero sched: [1:1.00]
; HASWELL-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero sched: [1:1.00]
; HASWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmovzxbd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero sched: [1:1.00]
; BROADWELL-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero sched: [6:1.00]
; BROADWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmovzxbd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero sched: [1:1.00]
; SKYLAKE-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero sched: [6:1.00]
; SKYLAKE-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmovzxbd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero sched: [1:1.00]
; SKX-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero sched: [6:1.00]
; SKX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pmovzxbd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero sched: [6:1.00]
; BTVER2-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero sched: [1:0.50]
; BTVER2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pmovzxbd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero sched: [8:0.50]
; ZNVER1-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero sched: [1:0.25]
; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
@@ -2454,14 +2454,14 @@ define <4 x i32> @test_pmovzxbd(<16 x i8> %a0, <4 x i8> *%a1) {
define <2 x i64> @test_pmovzxbq(<16 x i8> %a0, <2 x i8> *%a1) {
; GENERIC-LABEL: test_pmovzxbq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pmovzxbq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero sched: [1:0.50]
; GENERIC-NEXT: pmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero sched: [7:0.50]
; GENERIC-NEXT: paddq %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_pmovzxbq:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pmovzxbq {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero sched: [4:1.00]
; SLM-NEXT: pmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero sched: [1:1.00]
; SLM-NEXT: paddq %xmm0, %xmm1 # sched: [1:0.50]
@@ -2469,49 +2469,49 @@ define <2 x i64> @test_pmovzxbq(<16 x i8> %a0, <2 x i8> *%a1) {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pmovzxbq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero sched: [1:0.50]
; SANDY-NEXT: vpmovzxbq {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero sched: [7:0.50]
; SANDY-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovzxbq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero sched: [1:1.00]
; HASWELL-NEXT: vpmovzxbq {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero sched: [1:1.00]
; HASWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmovzxbq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero sched: [1:1.00]
; BROADWELL-NEXT: vpmovzxbq {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero sched: [6:1.00]
; BROADWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmovzxbq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero sched: [1:1.00]
; SKYLAKE-NEXT: vpmovzxbq {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero sched: [6:1.00]
; SKYLAKE-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmovzxbq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero sched: [1:1.00]
; SKX-NEXT: vpmovzxbq {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero sched: [6:1.00]
; SKX-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pmovzxbq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpmovzxbq {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero sched: [6:1.00]
; BTVER2-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero sched: [1:0.50]
; BTVER2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pmovzxbq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmovzxbq {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero sched: [8:0.50]
; ZNVER1-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero sched: [1:0.25]
; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
@@ -2526,14 +2526,14 @@ define <2 x i64> @test_pmovzxbq(<16 x i8> %a0, <2 x i8> *%a1) {
define <2 x i64> @test_pmovzxdq(<4 x i32> %a0, <2 x i32> *%a1) {
; GENERIC-LABEL: test_pmovzxdq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero sched: [1:0.50]
; GENERIC-NEXT: pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero sched: [7:0.50]
; GENERIC-NEXT: paddq %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_pmovzxdq:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero sched: [4:1.00]
; SLM-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero sched: [1:1.00]
; SLM-NEXT: paddq %xmm0, %xmm1 # sched: [1:0.50]
@@ -2541,49 +2541,49 @@ define <2 x i64> @test_pmovzxdq(<4 x i32> %a0, <2 x i32> *%a1) {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pmovzxdq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero sched: [1:0.50]
; SANDY-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero sched: [7:0.50]
; SANDY-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovzxdq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero sched: [1:1.00]
; HASWELL-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero sched: [1:1.00]
; HASWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmovzxdq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero sched: [1:1.00]
; BROADWELL-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero sched: [6:1.00]
; BROADWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmovzxdq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero sched: [1:1.00]
; SKYLAKE-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero sched: [6:1.00]
; SKYLAKE-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmovzxdq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero sched: [1:1.00]
; SKX-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero sched: [6:1.00]
; SKX-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pmovzxdq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero sched: [6:1.00]
; BTVER2-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero sched: [1:0.50]
; BTVER2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pmovzxdq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero sched: [8:0.50]
; ZNVER1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero sched: [1:0.25]
; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
@@ -2598,14 +2598,14 @@ define <2 x i64> @test_pmovzxdq(<4 x i32> %a0, <2 x i32> *%a1) {
define <4 x i32> @test_pmovzxwd(<8 x i16> %a0, <4 x i16> *%a1) {
; GENERIC-LABEL: test_pmovzxwd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero sched: [1:0.50]
; GENERIC-NEXT: pmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero sched: [7:0.50]
; GENERIC-NEXT: paddd %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_pmovzxwd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero sched: [4:1.00]
; SLM-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero sched: [1:1.00]
; SLM-NEXT: paddd %xmm0, %xmm1 # sched: [1:0.50]
@@ -2613,49 +2613,49 @@ define <4 x i32> @test_pmovzxwd(<8 x i16> %a0, <4 x i16> *%a1) {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pmovzxwd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero sched: [1:0.50]
; SANDY-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero sched: [7:0.50]
; SANDY-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovzxwd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero sched: [1:1.00]
; HASWELL-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero sched: [1:1.00]
; HASWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmovzxwd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero sched: [1:1.00]
; BROADWELL-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero sched: [6:1.00]
; BROADWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmovzxwd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero sched: [1:1.00]
; SKYLAKE-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero sched: [6:1.00]
; SKYLAKE-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmovzxwd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero sched: [1:1.00]
; SKX-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero sched: [6:1.00]
; SKX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pmovzxwd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero sched: [6:1.00]
; BTVER2-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero sched: [1:0.50]
; BTVER2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pmovzxwd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero sched: [8:0.50]
; ZNVER1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero sched: [1:0.25]
; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
@@ -2670,14 +2670,14 @@ define <4 x i32> @test_pmovzxwd(<8 x i16> %a0, <4 x i16> *%a1) {
define <2 x i64> @test_pmovzxwq(<8 x i16> %a0, <2 x i16> *%a1) {
; GENERIC-LABEL: test_pmovzxwq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pmovzxwq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero sched: [1:0.50]
; GENERIC-NEXT: pmovzxwq {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero sched: [7:0.50]
; GENERIC-NEXT: paddq %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_pmovzxwq:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pmovzxwq {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero sched: [4:1.00]
; SLM-NEXT: pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero sched: [1:1.00]
; SLM-NEXT: paddq %xmm0, %xmm1 # sched: [1:0.50]
@@ -2685,49 +2685,49 @@ define <2 x i64> @test_pmovzxwq(<8 x i16> %a0, <2 x i16> *%a1) {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pmovzxwq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero sched: [1:0.50]
; SANDY-NEXT: vpmovzxwq {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero sched: [7:0.50]
; SANDY-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovzxwq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero sched: [1:1.00]
; HASWELL-NEXT: vpmovzxwq {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero sched: [1:1.00]
; HASWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmovzxwq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero sched: [1:1.00]
; BROADWELL-NEXT: vpmovzxwq {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero sched: [6:1.00]
; BROADWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmovzxwq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero sched: [1:1.00]
; SKYLAKE-NEXT: vpmovzxwq {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero sched: [6:1.00]
; SKYLAKE-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmovzxwq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero sched: [1:1.00]
; SKX-NEXT: vpmovzxwq {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero sched: [6:1.00]
; SKX-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pmovzxwq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpmovzxwq {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero sched: [6:1.00]
; BTVER2-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero sched: [1:0.50]
; BTVER2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pmovzxwq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmovzxwq {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero sched: [8:0.50]
; ZNVER1-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero sched: [1:0.25]
; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
@@ -2742,55 +2742,55 @@ define <2 x i64> @test_pmovzxwq(<8 x i16> %a0, <2 x i16> *%a1) {
define <2 x i64> @test_pmuldq(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_pmuldq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pmuldq %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: pmuldq (%rdi), %xmm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_pmuldq:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pmuldq %xmm1, %xmm0 # sched: [4:1.00]
; SLM-NEXT: pmuldq (%rdi), %xmm0 # sched: [7:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pmuldq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpmuldq %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vpmuldq (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmuldq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmuldq %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
; HASWELL-NEXT: vpmuldq (%rdi), %xmm0, %xmm0 # sched: [5:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmuldq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmuldq %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
; BROADWELL-NEXT: vpmuldq (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmuldq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmuldq %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKYLAKE-NEXT: vpmuldq (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmuldq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmuldq %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vpmuldq (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pmuldq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpmuldq %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; BTVER2-NEXT: vpmuldq (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pmuldq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmuldq %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
; ZNVER1-NEXT: vpmuldq (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -2804,55 +2804,55 @@ declare <2 x i64> @llvm.x86.sse41.pmuldq(<4 x i32>, <4 x i32>) nounwind readnone
define <4 x i32> @test_pmulld(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_pmulld:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pmulld %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: pmulld (%rdi), %xmm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_pmulld:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pmulld %xmm1, %xmm0 # sched: [4:1.00]
; SLM-NEXT: pmulld (%rdi), %xmm0 # sched: [7:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pmulld:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpmulld %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vpmulld (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmulld:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmulld %xmm1, %xmm0, %xmm0 # sched: [10:2.00]
; HASWELL-NEXT: vpmulld (%rdi), %xmm0, %xmm0 # sched: [10:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmulld:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmulld %xmm1, %xmm0, %xmm0 # sched: [10:2.00]
; BROADWELL-NEXT: vpmulld (%rdi), %xmm0, %xmm0 # sched: [15:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmulld:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmulld %xmm1, %xmm0, %xmm0 # sched: [8:0.67]
; SKYLAKE-NEXT: vpmulld (%rdi), %xmm0, %xmm0 # sched: [14:0.67]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmulld:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmulld %xmm1, %xmm0, %xmm0 # sched: [8:0.67]
; SKX-NEXT: vpmulld (%rdi), %xmm0, %xmm0 # sched: [14:0.67]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pmulld:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpmulld %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; BTVER2-NEXT: vpmulld (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pmulld:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmulld %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
; ZNVER1-NEXT: vpmulld (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -2864,7 +2864,7 @@ define <4 x i32> @test_pmulld(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
define i32 @test_ptest(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; GENERIC-LABEL: test_ptest:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: ptest %xmm1, %xmm0 # sched: [2:1.00]
; GENERIC-NEXT: setb %al # sched: [1:0.50]
; GENERIC-NEXT: ptest (%rdi), %xmm0 # sched: [8:1.00]
@@ -2874,7 +2874,7 @@ define i32 @test_ptest(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_ptest:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: ptest %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: setb %al # sched: [1:0.50]
; SLM-NEXT: ptest (%rdi), %xmm0 # sched: [4:1.00]
@@ -2884,7 +2884,7 @@ define i32 @test_ptest(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_ptest:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vptest %xmm1, %xmm0 # sched: [2:1.00]
; SANDY-NEXT: setb %al # sched: [1:0.50]
; SANDY-NEXT: vptest (%rdi), %xmm0 # sched: [8:1.00]
@@ -2894,7 +2894,7 @@ define i32 @test_ptest(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_ptest:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vptest %xmm1, %xmm0 # sched: [2:1.00]
; HASWELL-NEXT: setb %al # sched: [1:0.50]
; HASWELL-NEXT: vptest (%rdi), %xmm0 # sched: [2:1.00]
@@ -2904,7 +2904,7 @@ define i32 @test_ptest(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_ptest:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vptest %xmm1, %xmm0 # sched: [2:1.00]
; BROADWELL-NEXT: setb %al # sched: [1:0.50]
; BROADWELL-NEXT: vptest (%rdi), %xmm0 # sched: [7:1.00]
@@ -2914,7 +2914,7 @@ define i32 @test_ptest(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_ptest:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vptest %xmm1, %xmm0 # sched: [3:1.00]
; SKYLAKE-NEXT: setb %al # sched: [1:0.50]
; SKYLAKE-NEXT: vptest (%rdi), %xmm0 # sched: [9:1.00]
@@ -2924,7 +2924,7 @@ define i32 @test_ptest(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_ptest:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vptest %xmm1, %xmm0 # sched: [3:1.00]
; SKX-NEXT: setb %al # sched: [1:0.50]
; SKX-NEXT: vptest (%rdi), %xmm0 # sched: [9:1.00]
@@ -2934,7 +2934,7 @@ define i32 @test_ptest(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_ptest:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vptest %xmm1, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: setb %al # sched: [1:0.50]
; BTVER2-NEXT: vptest (%rdi), %xmm0 # sched: [8:1.00]
@@ -2944,7 +2944,7 @@ define i32 @test_ptest(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_ptest:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vptest %xmm1, %xmm0 # sched: [1:1.00]
; ZNVER1-NEXT: setb %al # sched: [1:0.25]
; ZNVER1-NEXT: vptest (%rdi), %xmm0 # sched: [8:1.00]
@@ -2962,14 +2962,14 @@ declare i32 @llvm.x86.sse41.ptestc(<2 x i64>, <2 x i64>) nounwind readnone
define <2 x double> @test_roundpd(<2 x double> %a0, <2 x double> *%a1) {
; GENERIC-LABEL: test_roundpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: roundpd $7, %xmm0, %xmm1 # sched: [3:1.00]
; GENERIC-NEXT: roundpd $7, (%rdi), %xmm0 # sched: [9:1.00]
; GENERIC-NEXT: addpd %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_roundpd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: roundpd $7, (%rdi), %xmm1 # sched: [6:1.00]
; SLM-NEXT: roundpd $7, %xmm0, %xmm0 # sched: [3:1.00]
; SLM-NEXT: addpd %xmm0, %xmm1 # sched: [3:1.00]
@@ -2977,49 +2977,49 @@ define <2 x double> @test_roundpd(<2 x double> %a0, <2 x double> *%a1) {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_roundpd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vroundpd $7, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vroundpd $7, (%rdi), %xmm1 # sched: [9:1.00]
; SANDY-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_roundpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vroundpd $7, %xmm0, %xmm0 # sched: [5:1.25]
; HASWELL-NEXT: vroundpd $7, (%rdi), %xmm1 # sched: [6:2.00]
; HASWELL-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_roundpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vroundpd $7, %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: vroundpd $7, (%rdi), %xmm1 # sched: [11:2.00]
; BROADWELL-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_roundpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vroundpd $7, %xmm0, %xmm0 # sched: [8:0.67]
; SKYLAKE-NEXT: vroundpd $7, (%rdi), %xmm1 # sched: [14:0.67]
; SKYLAKE-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_roundpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vrndscalepd $7, %xmm0, %xmm0 # sched: [8:0.67]
; SKX-NEXT: vrndscalepd $7, (%rdi), %xmm1 # sched: [14:0.67]
; SKX-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_roundpd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vroundpd $7, (%rdi), %xmm1 # sched: [8:1.00]
; BTVER2-NEXT: vroundpd $7, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_roundpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vroundpd $7, (%rdi), %xmm1 # sched: [11:1.00]
; ZNVER1-NEXT: vroundpd $7, %xmm0, %xmm0 # sched: [4:1.00]
; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
@@ -3034,14 +3034,14 @@ declare <2 x double> @llvm.x86.sse41.round.pd(<2 x double>, i32) nounwind readno
define <4 x float> @test_roundps(<4 x float> %a0, <4 x float> *%a1) {
; GENERIC-LABEL: test_roundps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: roundps $7, %xmm0, %xmm1 # sched: [3:1.00]
; GENERIC-NEXT: roundps $7, (%rdi), %xmm0 # sched: [9:1.00]
; GENERIC-NEXT: addps %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_roundps:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: roundps $7, (%rdi), %xmm1 # sched: [6:1.00]
; SLM-NEXT: roundps $7, %xmm0, %xmm0 # sched: [3:1.00]
; SLM-NEXT: addps %xmm0, %xmm1 # sched: [3:1.00]
@@ -3049,49 +3049,49 @@ define <4 x float> @test_roundps(<4 x float> %a0, <4 x float> *%a1) {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_roundps:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vroundps $7, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vroundps $7, (%rdi), %xmm1 # sched: [9:1.00]
; SANDY-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_roundps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vroundps $7, %xmm0, %xmm0 # sched: [5:1.25]
; HASWELL-NEXT: vroundps $7, (%rdi), %xmm1 # sched: [6:2.00]
; HASWELL-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_roundps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vroundps $7, %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: vroundps $7, (%rdi), %xmm1 # sched: [11:2.00]
; BROADWELL-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_roundps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vroundps $7, %xmm0, %xmm0 # sched: [8:0.67]
; SKYLAKE-NEXT: vroundps $7, (%rdi), %xmm1 # sched: [14:0.67]
; SKYLAKE-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_roundps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vrndscaleps $7, %xmm0, %xmm0 # sched: [8:0.67]
; SKX-NEXT: vrndscaleps $7, (%rdi), %xmm1 # sched: [14:0.67]
; SKX-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_roundps:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vroundps $7, (%rdi), %xmm1 # sched: [8:1.00]
; BTVER2-NEXT: vroundps $7, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_roundps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vroundps $7, (%rdi), %xmm1 # sched: [11:1.00]
; ZNVER1-NEXT: vroundps $7, %xmm0, %xmm0 # sched: [4:1.00]
; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
@@ -3106,7 +3106,7 @@ declare <4 x float> @llvm.x86.sse41.round.ps(<4 x float>, i32) nounwind readnone
define <2 x double> @test_roundsd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; GENERIC-LABEL: test_roundsd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movapd %xmm0, %xmm2 # sched: [1:1.00]
; GENERIC-NEXT: roundsd $7, %xmm1, %xmm2 # sched: [3:1.00]
; GENERIC-NEXT: roundsd $7, (%rdi), %xmm0 # sched: [9:1.00]
@@ -3114,7 +3114,7 @@ define <2 x double> @test_roundsd(<2 x double> %a0, <2 x double> %a1, <2 x doubl
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_roundsd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movapd %xmm0, %xmm2 # sched: [1:1.00]
; SLM-NEXT: roundsd $7, (%rdi), %xmm0 # sched: [6:1.00]
; SLM-NEXT: roundsd $7, %xmm1, %xmm2 # sched: [3:1.00]
@@ -3122,49 +3122,49 @@ define <2 x double> @test_roundsd(<2 x double> %a0, <2 x double> %a1, <2 x doubl
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_roundsd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vroundsd $7, %xmm1, %xmm0, %xmm1 # sched: [3:1.00]
; SANDY-NEXT: vroundsd $7, (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; SANDY-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_roundsd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vroundsd $7, %xmm1, %xmm0, %xmm1 # sched: [5:1.25]
; HASWELL-NEXT: vroundsd $7, (%rdi), %xmm0, %xmm0 # sched: [6:2.00]
; HASWELL-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_roundsd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vroundsd $7, %xmm1, %xmm0, %xmm1 # sched: [6:0.50]
; BROADWELL-NEXT: vroundsd $7, (%rdi), %xmm0, %xmm0 # sched: [11:2.00]
; BROADWELL-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_roundsd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vroundsd $7, %xmm1, %xmm0, %xmm1 # sched: [8:0.67]
; SKYLAKE-NEXT: vroundsd $7, (%rdi), %xmm0, %xmm0 # sched: [14:0.67]
; SKYLAKE-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_roundsd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vrndscalesd $7, %xmm1, %xmm0, %xmm1 # sched: [8:0.67]
; SKX-NEXT: vrndscalesd $7, (%rdi), %xmm0, %xmm0 # sched: [14:0.67]
; SKX-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_roundsd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vroundsd $7, %xmm1, %xmm0, %xmm1 # sched: [3:1.00]
; BTVER2-NEXT: vroundsd $7, (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_roundsd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vroundsd $7, %xmm1, %xmm0, %xmm1 # sched: [4:1.00]
; ZNVER1-NEXT: vroundsd $7, (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
; ZNVER1-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
@@ -3179,7 +3179,7 @@ declare <2 x double> @llvm.x86.sse41.round.sd(<2 x double>, <2 x double>, i32) n
define <4 x float> @test_roundss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; GENERIC-LABEL: test_roundss:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movaps %xmm0, %xmm2 # sched: [1:1.00]
; GENERIC-NEXT: roundss $7, %xmm1, %xmm2 # sched: [3:1.00]
; GENERIC-NEXT: roundss $7, (%rdi), %xmm0 # sched: [9:1.00]
@@ -3187,7 +3187,7 @@ define <4 x float> @test_roundss(<4 x float> %a0, <4 x float> %a1, <4 x float> *
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_roundss:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movaps %xmm0, %xmm2 # sched: [1:1.00]
; SLM-NEXT: roundss $7, (%rdi), %xmm0 # sched: [6:1.00]
; SLM-NEXT: roundss $7, %xmm1, %xmm2 # sched: [3:1.00]
@@ -3195,49 +3195,49 @@ define <4 x float> @test_roundss(<4 x float> %a0, <4 x float> %a1, <4 x float> *
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_roundss:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vroundss $7, %xmm1, %xmm0, %xmm1 # sched: [3:1.00]
; SANDY-NEXT: vroundss $7, (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; SANDY-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_roundss:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vroundss $7, %xmm1, %xmm0, %xmm1 # sched: [5:1.25]
; HASWELL-NEXT: vroundss $7, (%rdi), %xmm0, %xmm0 # sched: [6:2.00]
; HASWELL-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_roundss:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vroundss $7, %xmm1, %xmm0, %xmm1 # sched: [6:0.50]
; BROADWELL-NEXT: vroundss $7, (%rdi), %xmm0, %xmm0 # sched: [11:2.00]
; BROADWELL-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_roundss:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vroundss $7, %xmm1, %xmm0, %xmm1 # sched: [8:0.67]
; SKYLAKE-NEXT: vroundss $7, (%rdi), %xmm0, %xmm0 # sched: [14:0.67]
; SKYLAKE-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_roundss:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vrndscaless $7, %xmm1, %xmm0, %xmm1 # sched: [8:0.67]
; SKX-NEXT: vrndscaless $7, (%rdi), %xmm0, %xmm0 # sched: [14:0.67]
; SKX-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_roundss:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vroundss $7, %xmm1, %xmm0, %xmm1 # sched: [3:1.00]
; BTVER2-NEXT: vroundss $7, (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_roundss:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vroundss $7, %xmm1, %xmm0, %xmm1 # sched: [4:1.00]
; ZNVER1-NEXT: vroundss $7, (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
; ZNVER1-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
diff --git a/test/CodeGen/X86/sse41.ll b/test/CodeGen/X86/sse41.ll
index 98ddd6d7f13..f4a5b057a48 100644
--- a/test/CodeGen/X86/sse41.ll
+++ b/test/CodeGen/X86/sse41.ll
@@ -6,12 +6,12 @@
define <4 x i32> @pinsrd_1(i32 %s, <4 x i32> %tmp) nounwind {
; X32-LABEL: pinsrd_1:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: pinsrd $1, {{[0-9]+}}(%esp), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: pinsrd_1:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: pinsrd $1, %edi, %xmm0
; X64-NEXT: retq
%tmp1 = insertelement <4 x i32> %tmp, i32 %s, i32 1
@@ -20,12 +20,12 @@ define <4 x i32> @pinsrd_1(i32 %s, <4 x i32> %tmp) nounwind {
define <16 x i8> @pinsrb_1(i8 %s, <16 x i8> %tmp) nounwind {
; X32-LABEL: pinsrb_1:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: pinsrb $1, {{[0-9]+}}(%esp), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: pinsrb_1:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: pinsrb $1, %edi, %xmm0
; X64-NEXT: retq
%tmp1 = insertelement <16 x i8> %tmp, i8 %s, i32 1
@@ -34,13 +34,13 @@ define <16 x i8> @pinsrb_1(i8 %s, <16 x i8> %tmp) nounwind {
define <2 x i64> @pmovzxbq_1() nounwind {
; X32-LABEL: pmovzxbq_1:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl L_g16$non_lazy_ptr, %eax
; X32-NEXT: pmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
; X32-NEXT: retl
;
; X64-LABEL: pmovzxbq_1:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: movq _g16@{{.*}}(%rip), %rax
; X64-NEXT: pmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
; X64-NEXT: retq
@@ -56,12 +56,12 @@ declare <2 x i64> @llvm.x86.sse41.pmovzxbq(<16 x i8>) nounwind readnone
define i32 @extractps_1(<4 x float> %v) nounwind {
; X32-LABEL: extractps_1:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: extractps $3, %xmm0, %eax
; X32-NEXT: retl
;
; X64-LABEL: extractps_1:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: extractps $3, %xmm0, %eax
; X64-NEXT: retq
%s = extractelement <4 x float> %v, i32 3
@@ -70,12 +70,12 @@ define i32 @extractps_1(<4 x float> %v) nounwind {
}
define i32 @extractps_2(<4 x float> %v) nounwind {
; X32-LABEL: extractps_2:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: extractps $3, %xmm0, %eax
; X32-NEXT: retl
;
; X64-LABEL: extractps_2:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: extractps $3, %xmm0, %eax
; X64-NEXT: retq
%t = bitcast <4 x float> %v to <4 x i32>
@@ -90,7 +90,7 @@ define i32 @extractps_2(<4 x float> %v) nounwind {
define float @ext_1(<4 x float> %v) nounwind {
; X32-LABEL: ext_1:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; X32-NEXT: addss LCPI5_0, %xmm0
@@ -100,7 +100,7 @@ define float @ext_1(<4 x float> %v) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: ext_1:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; X64-NEXT: addss {{.*}}(%rip), %xmm0
; X64-NEXT: retq
@@ -111,7 +111,7 @@ define float @ext_1(<4 x float> %v) nounwind {
define float @ext_2(<4 x float> %v) nounwind {
; X32-LABEL: ext_2:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; X32-NEXT: movss %xmm0, (%esp)
@@ -120,7 +120,7 @@ define float @ext_2(<4 x float> %v) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: ext_2:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; X64-NEXT: retq
%s = extractelement <4 x float> %v, i32 3
@@ -129,12 +129,12 @@ define float @ext_2(<4 x float> %v) nounwind {
define i32 @ext_3(<4 x i32> %v) nounwind {
; X32-LABEL: ext_3:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: extractps $3, %xmm0, %eax
; X32-NEXT: retl
;
; X64-LABEL: ext_3:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: extractps $3, %xmm0, %eax
; X64-NEXT: retq
%i = extractelement <4 x i32> %v, i32 3
@@ -143,12 +143,12 @@ define i32 @ext_3(<4 x i32> %v) nounwind {
define <4 x float> @insertps_1(<4 x float> %t1, <4 x float> %t2) nounwind {
; X32-LABEL: insertps_1:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: insertps {{.*#+}} xmm0 = zero,xmm1[0],zero,xmm0[3]
; X32-NEXT: retl
;
; X64-LABEL: insertps_1:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: insertps {{.*#+}} xmm0 = zero,xmm1[0],zero,xmm0[3]
; X64-NEXT: retq
%tmp1 = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %t1, <4 x float> %t2, i32 21) nounwind readnone
@@ -161,13 +161,13 @@ declare <4 x float> @llvm.x86.sse41.insertps(<4 x float>, <4 x float>, i32) noun
; generate a separate movss to load the scalar operand.
define <4 x float> @blendps_not_insertps_1(<4 x float> %t1, float %t2) nounwind {
; X32-LABEL: blendps_not_insertps_1:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; X32-NEXT: retl
;
; X64-LABEL: blendps_not_insertps_1:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; X64-NEXT: retq
%tmp1 = insertelement <4 x float> %t1, float %t2, i32 0
@@ -179,13 +179,13 @@ define <4 x float> @blendps_not_insertps_1(<4 x float> %t1, float %t2) nounwind
; generate an insertps for X32 but not for X64!
define <4 x float> @insertps_or_blendps(<4 x float> %t1, float %t2) minsize nounwind {
; X32-LABEL: insertps_or_blendps:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; X32-NEXT: retl
;
; X64-LABEL: insertps_or_blendps:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; X64-NEXT: retq
%tmp1 = insertelement <4 x float> %t1, float %t2, i32 0
@@ -196,12 +196,12 @@ define <4 x float> @insertps_or_blendps(<4 x float> %t1, float %t2) minsize noun
; is always just a blendps because blendps is never more expensive than insertps.
define <4 x float> @blendps_not_insertps_2(<4 x float> %t1, <4 x float> %t2) nounwind {
; X32-LABEL: blendps_not_insertps_2:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; X32-NEXT: retl
;
; X64-LABEL: blendps_not_insertps_2:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; X64-NEXT: retq
%tmp2 = extractelement <4 x float> %t2, i32 0
@@ -211,14 +211,14 @@ define <4 x float> @blendps_not_insertps_2(<4 x float> %t1, <4 x float> %t2) nou
define i32 @ptestz_1(<2 x i64> %t1, <2 x i64> %t2) nounwind {
; X32-LABEL: ptestz_1:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: ptest %xmm1, %xmm0
; X32-NEXT: sete %al
; X32-NEXT: retl
;
; X64-LABEL: ptestz_1:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: ptest %xmm1, %xmm0
; X64-NEXT: sete %al
@@ -229,14 +229,14 @@ define i32 @ptestz_1(<2 x i64> %t1, <2 x i64> %t2) nounwind {
define i32 @ptestz_2(<2 x i64> %t1, <2 x i64> %t2) nounwind {
; X32-LABEL: ptestz_2:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: ptest %xmm1, %xmm0
; X32-NEXT: setb %al
; X32-NEXT: retl
;
; X64-LABEL: ptestz_2:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: ptest %xmm1, %xmm0
; X64-NEXT: setb %al
@@ -247,14 +247,14 @@ define i32 @ptestz_2(<2 x i64> %t1, <2 x i64> %t2) nounwind {
define i32 @ptestz_3(<2 x i64> %t1, <2 x i64> %t2) nounwind {
; X32-LABEL: ptestz_3:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: ptest %xmm1, %xmm0
; X32-NEXT: seta %al
; X32-NEXT: retl
;
; X64-LABEL: ptestz_3:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: ptest %xmm1, %xmm0
; X64-NEXT: seta %al
@@ -271,7 +271,7 @@ declare i32 @llvm.x86.sse41.ptestnzc(<2 x i64>, <2 x i64>) nounwind readnone
; pointless.
define <2 x float> @buildvector(<2 x float> %A, <2 x float> %B) nounwind {
; X32-LABEL: buildvector:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; X32-NEXT: movshdup {{.*#+}} xmm3 = xmm1[1,1,3,3]
; X32-NEXT: addss %xmm2, %xmm3
@@ -280,7 +280,7 @@ define <2 x float> @buildvector(<2 x float> %A, <2 x float> %B) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: buildvector:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: movshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; X64-NEXT: movshdup {{.*#+}} xmm3 = xmm1[1,1,3,3]
; X64-NEXT: addss %xmm2, %xmm3
@@ -301,13 +301,13 @@ entry:
define <4 x float> @insertps_from_shufflevector_1(<4 x float> %a, <4 x float>* nocapture readonly %pb) {
; X32-LABEL: insertps_from_shufflevector_1:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
; X32-NEXT: retl
;
; X64-LABEL: insertps_from_shufflevector_1:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
; X64-NEXT: retq
entry:
@@ -318,12 +318,12 @@ entry:
define <4 x float> @insertps_from_shufflevector_2(<4 x float> %a, <4 x float> %b) {
; X32-LABEL: insertps_from_shufflevector_2:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[1],xmm0[3]
; X32-NEXT: retl
;
; X64-LABEL: insertps_from_shufflevector_2:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[1],xmm0[3]
; X64-NEXT: retq
entry:
@@ -335,14 +335,14 @@ entry:
; instead of insertps
define <4 x i32> @pinsrd_from_shufflevector_i32(<4 x i32> %a, <4 x i32>* nocapture readonly %pb) {
; X32-LABEL: pinsrd_from_shufflevector_i32:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: pshufd {{.*#+}} xmm1 = mem[0,1,2,0]
; X32-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
; X32-NEXT: retl
;
; X64-LABEL: pinsrd_from_shufflevector_i32:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: pshufd {{.*#+}} xmm1 = mem[0,1,2,0]
; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
; X64-NEXT: retq
@@ -354,13 +354,13 @@ entry:
define <4 x i32> @insertps_from_shufflevector_i32_2(<4 x i32> %a, <4 x i32> %b) {
; X32-LABEL: insertps_from_shufflevector_i32_2:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
; X32-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
; X32-NEXT: retl
;
; X64-LABEL: insertps_from_shufflevector_i32_2:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
; X64-NEXT: retq
@@ -371,13 +371,13 @@ entry:
define <4 x float> @insertps_from_load_ins_elt_undef(<4 x float> %a, float* %b) {
; X32-LABEL: insertps_from_load_ins_elt_undef:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
; X32-NEXT: retl
;
; X64-LABEL: insertps_from_load_ins_elt_undef:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
; X64-NEXT: retq
%1 = load float, float* %b, align 4
@@ -389,7 +389,7 @@ define <4 x float> @insertps_from_load_ins_elt_undef(<4 x float> %a, float* %b)
; TODO: Like on pinsrd_from_shufflevector_i32, remove this mov instr
define <4 x i32> @insertps_from_load_ins_elt_undef_i32(<4 x i32> %a, i32* %b) {
; X32-LABEL: insertps_from_load_ins_elt_undef_i32:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
@@ -397,7 +397,7 @@ define <4 x i32> @insertps_from_load_ins_elt_undef_i32(<4 x i32> %a, i32* %b) {
; X32-NEXT: retl
;
; X64-LABEL: insertps_from_load_ins_elt_undef_i32:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
@@ -411,13 +411,13 @@ define <4 x i32> @insertps_from_load_ins_elt_undef_i32(<4 x i32> %a, i32* %b) {
;;;;;; Shuffles optimizable with a single insertps or blend instruction
define <4 x float> @shuf_XYZ0(<4 x float> %x, <4 x float> %a) {
; X32-LABEL: shuf_XYZ0:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: xorps %xmm1, %xmm1
; X32-NEXT: blendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
; X32-NEXT: retl
;
; X64-LABEL: shuf_XYZ0:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: xorps %xmm1, %xmm1
; X64-NEXT: blendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
; X64-NEXT: retq
@@ -433,12 +433,12 @@ define <4 x float> @shuf_XYZ0(<4 x float> %x, <4 x float> %a) {
define <4 x float> @shuf_XY00(<4 x float> %x, <4 x float> %a) {
; X32-LABEL: shuf_XY00:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; X32-NEXT: retl
;
; X64-LABEL: shuf_XY00:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; X64-NEXT: retq
%vecext = extractelement <4 x float> %x, i32 0
@@ -452,12 +452,12 @@ define <4 x float> @shuf_XY00(<4 x float> %x, <4 x float> %a) {
define <4 x float> @shuf_XYY0(<4 x float> %x, <4 x float> %a) {
; X32-LABEL: shuf_XYY0:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,1],zero
; X32-NEXT: retl
;
; X64-LABEL: shuf_XYY0:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,1],zero
; X64-NEXT: retq
%vecext = extractelement <4 x float> %x, i32 0
@@ -471,12 +471,12 @@ define <4 x float> @shuf_XYY0(<4 x float> %x, <4 x float> %a) {
define <4 x float> @shuf_XYW0(<4 x float> %x, <4 x float> %a) {
; X32-LABEL: shuf_XYW0:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,3],zero
; X32-NEXT: retl
;
; X64-LABEL: shuf_XYW0:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,3],zero
; X64-NEXT: retq
%vecext = extractelement <4 x float> %x, i32 0
@@ -491,12 +491,12 @@ define <4 x float> @shuf_XYW0(<4 x float> %x, <4 x float> %a) {
define <4 x float> @shuf_W00W(<4 x float> %x, <4 x float> %a) {
; X32-LABEL: shuf_W00W:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[3],zero,zero,xmm0[3]
; X32-NEXT: retl
;
; X64-LABEL: shuf_W00W:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[3],zero,zero,xmm0[3]
; X64-NEXT: retq
%vecext = extractelement <4 x float> %x, i32 3
@@ -509,12 +509,12 @@ define <4 x float> @shuf_W00W(<4 x float> %x, <4 x float> %a) {
define <4 x float> @shuf_X00A(<4 x float> %x, <4 x float> %a) {
; X32-LABEL: shuf_X00A:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,zero,xmm1[0]
; X32-NEXT: retl
;
; X64-LABEL: shuf_X00A:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,zero,xmm1[0]
; X64-NEXT: retq
%vecext = extractelement <4 x float> %x, i32 0
@@ -527,12 +527,12 @@ define <4 x float> @shuf_X00A(<4 x float> %x, <4 x float> %a) {
define <4 x float> @shuf_X00X(<4 x float> %x, <4 x float> %a) {
; X32-LABEL: shuf_X00X:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,zero,xmm0[0]
; X32-NEXT: retl
;
; X64-LABEL: shuf_X00X:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,zero,xmm0[0]
; X64-NEXT: retq
%vecext = extractelement <4 x float> %x, i32 0
@@ -545,14 +545,14 @@ define <4 x float> @shuf_X00X(<4 x float> %x, <4 x float> %a) {
define <4 x float> @shuf_X0YC(<4 x float> %x, <4 x float> %a) {
; X32-LABEL: shuf_X0YC:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: xorps %xmm2, %xmm2
; X32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[2]
; X32-NEXT: retl
;
; X64-LABEL: shuf_X0YC:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: xorps %xmm2, %xmm2
; X64-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[2]
@@ -567,13 +567,13 @@ define <4 x float> @shuf_X0YC(<4 x float> %x, <4 x float> %a) {
define <4 x i32> @i32_shuf_XYZ0(<4 x i32> %x, <4 x i32> %a) {
; X32-LABEL: i32_shuf_XYZ0:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: pxor %xmm1, %xmm1
; X32-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
; X32-NEXT: retl
;
; X64-LABEL: i32_shuf_XYZ0:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: pxor %xmm1, %xmm1
; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
; X64-NEXT: retq
@@ -589,12 +589,12 @@ define <4 x i32> @i32_shuf_XYZ0(<4 x i32> %x, <4 x i32> %a) {
define <4 x i32> @i32_shuf_XY00(<4 x i32> %x, <4 x i32> %a) {
; X32-LABEL: i32_shuf_XY00:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; X32-NEXT: retl
;
; X64-LABEL: i32_shuf_XY00:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; X64-NEXT: retq
%vecext = extractelement <4 x i32> %x, i32 0
@@ -608,14 +608,14 @@ define <4 x i32> @i32_shuf_XY00(<4 x i32> %x, <4 x i32> %a) {
define <4 x i32> @i32_shuf_XYY0(<4 x i32> %x, <4 x i32> %a) {
; X32-LABEL: i32_shuf_XYY0:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,1,3]
; X32-NEXT: pxor %xmm0, %xmm0
; X32-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
; X32-NEXT: retl
;
; X64-LABEL: i32_shuf_XYY0:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,1,3]
; X64-NEXT: pxor %xmm0, %xmm0
; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
@@ -631,14 +631,14 @@ define <4 x i32> @i32_shuf_XYY0(<4 x i32> %x, <4 x i32> %a) {
define <4 x i32> @i32_shuf_XYW0(<4 x i32> %x, <4 x i32> %a) {
; X32-LABEL: i32_shuf_XYW0:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,3,3]
; X32-NEXT: pxor %xmm0, %xmm0
; X32-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
; X32-NEXT: retl
;
; X64-LABEL: i32_shuf_XYW0:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,3,3]
; X64-NEXT: pxor %xmm0, %xmm0
; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
@@ -655,14 +655,14 @@ define <4 x i32> @i32_shuf_XYW0(<4 x i32> %x, <4 x i32> %a) {
define <4 x i32> @i32_shuf_W00W(<4 x i32> %x, <4 x i32> %a) {
; X32-LABEL: i32_shuf_W00W:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
; X32-NEXT: pxor %xmm0, %xmm0
; X32-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5],xmm1[6,7]
; X32-NEXT: retl
;
; X64-LABEL: i32_shuf_W00W:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
; X64-NEXT: pxor %xmm0, %xmm0
; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5],xmm1[6,7]
@@ -677,7 +677,7 @@ define <4 x i32> @i32_shuf_W00W(<4 x i32> %x, <4 x i32> %a) {
define <4 x i32> @i32_shuf_X00A(<4 x i32> %x, <4 x i32> %a) {
; X32-LABEL: i32_shuf_X00A:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: pxor %xmm2, %xmm2
; X32-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3,4,5,6,7]
; X32-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
@@ -685,7 +685,7 @@ define <4 x i32> @i32_shuf_X00A(<4 x i32> %x, <4 x i32> %a) {
; X32-NEXT: retl
;
; X64-LABEL: i32_shuf_X00A:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: pxor %xmm2, %xmm2
; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3,4,5,6,7]
; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
@@ -701,14 +701,14 @@ define <4 x i32> @i32_shuf_X00A(<4 x i32> %x, <4 x i32> %a) {
define <4 x i32> @i32_shuf_X00X(<4 x i32> %x, <4 x i32> %a) {
; X32-LABEL: i32_shuf_X00X:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: pxor %xmm1, %xmm1
; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
; X32-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5],xmm0[6,7]
; X32-NEXT: retl
;
; X64-LABEL: i32_shuf_X00X:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: pxor %xmm1, %xmm1
; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5],xmm0[6,7]
@@ -723,14 +723,14 @@ define <4 x i32> @i32_shuf_X00X(<4 x i32> %x, <4 x i32> %a) {
define <4 x i32> @i32_shuf_X0YC(<4 x i32> %x, <4 x i32> %a) {
; X32-LABEL: i32_shuf_X0YC:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,1,2,2]
; X32-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1,2,3,4,5],xmm0[6,7]
; X32-NEXT: retl
;
; X64-LABEL: i32_shuf_X0YC:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,1,2,2]
; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1,2,3,4,5],xmm0[6,7]
@@ -746,14 +746,14 @@ define <4 x i32> @i32_shuf_X0YC(<4 x i32> %x, <4 x i32> %a) {
;; Test for a bug in the first implementation of LowerBuildVectorv4x32
define < 4 x float> @test_insertps_no_undef(<4 x float> %x) {
; X32-LABEL: test_insertps_no_undef:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: xorps %xmm1, %xmm1
; X32-NEXT: blendps {{.*#+}} xmm1 = xmm0[0,1,2],xmm1[3]
; X32-NEXT: maxps %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_insertps_no_undef:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: xorps %xmm1, %xmm1
; X64-NEXT: blendps {{.*#+}} xmm1 = xmm0[0,1,2],xmm1[3]
; X64-NEXT: maxps %xmm1, %xmm0
@@ -772,7 +772,7 @@ define < 4 x float> @test_insertps_no_undef(<4 x float> %x) {
define <8 x i16> @blendvb_fallback(<8 x i1> %mask, <8 x i16> %x, <8 x i16> %y) {
; X32-LABEL: blendvb_fallback:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: psllw $15, %xmm0
; X32-NEXT: psraw $15, %xmm0
; X32-NEXT: pblendvb %xmm0, %xmm1, %xmm2
@@ -780,7 +780,7 @@ define <8 x i16> @blendvb_fallback(<8 x i1> %mask, <8 x i16> %x, <8 x i16> %y) {
; X32-NEXT: retl
;
; X64-LABEL: blendvb_fallback:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: psllw $15, %xmm0
; X64-NEXT: psraw $15, %xmm0
; X64-NEXT: pblendvb %xmm0, %xmm1, %xmm2
@@ -793,13 +793,13 @@ define <8 x i16> @blendvb_fallback(<8 x i1> %mask, <8 x i16> %x, <8 x i16> %y) {
; On X32, account for the argument's move to registers
define <4 x float> @insertps_from_vector_load(<4 x float> %a, <4 x float>* nocapture readonly %pb) {
; X32-LABEL: insertps_from_vector_load:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
; X32-NEXT: retl
;
; X64-LABEL: insertps_from_vector_load:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
; X64-NEXT: retq
%1 = load <4 x float>, <4 x float>* %pb, align 16
@@ -811,13 +811,13 @@ define <4 x float> @insertps_from_vector_load(<4 x float> %a, <4 x float>* nocap
;; Try to match a bit more of the instr, since we need the load's offset.
define <4 x float> @insertps_from_vector_load_offset(<4 x float> %a, <4 x float>* nocapture readonly %pb) {
; X32-LABEL: insertps_from_vector_load_offset:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
; X32-NEXT: retl
;
; X64-LABEL: insertps_from_vector_load_offset:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
; X64-NEXT: retq
%1 = load <4 x float>, <4 x float>* %pb, align 16
@@ -828,7 +828,7 @@ define <4 x float> @insertps_from_vector_load_offset(<4 x float> %a, <4 x float>
;; Try to match a bit more of the instr, since we need the load's offset.
define <4 x float> @insertps_from_vector_load_offset_2(<4 x float> %a, <4 x float>* nocapture readonly %pb, i64 %index) {
; X32-LABEL: insertps_from_vector_load_offset_2:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: shll $4, %ecx
@@ -836,7 +836,7 @@ define <4 x float> @insertps_from_vector_load_offset_2(<4 x float> %a, <4 x floa
; X32-NEXT: retl
;
; X64-LABEL: insertps_from_vector_load_offset_2:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: shlq $4, %rsi
; X64-NEXT: insertps {{.*#+}} xmm0 = mem[0],xmm0[1,2,3]
; X64-NEXT: retq
@@ -848,14 +848,14 @@ define <4 x float> @insertps_from_vector_load_offset_2(<4 x float> %a, <4 x floa
define <4 x float> @insertps_from_broadcast_loadf32(<4 x float> %a, float* nocapture readonly %fb, i64 %index) {
; X32-LABEL: insertps_from_broadcast_loadf32:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
; X32-NEXT: retl
;
; X64-LABEL: insertps_from_broadcast_loadf32:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
; X64-NEXT: retq
%1 = getelementptr inbounds float, float* %fb, i64 %index
@@ -870,13 +870,13 @@ define <4 x float> @insertps_from_broadcast_loadf32(<4 x float> %a, float* nocap
define <4 x float> @insertps_from_broadcast_loadv4f32(<4 x float> %a, <4 x float>* nocapture readonly %b) {
; X32-LABEL: insertps_from_broadcast_loadv4f32:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
; X32-NEXT: retl
;
; X64-LABEL: insertps_from_broadcast_loadv4f32:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
; X64-NEXT: retq
%1 = load <4 x float>, <4 x float>* %b, align 4
@@ -891,7 +891,7 @@ define <4 x float> @insertps_from_broadcast_loadv4f32(<4 x float> %a, <4 x float
define <4 x float> @insertps_from_broadcast_multiple_use(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x float> %d, float* nocapture readonly %fb, i64 %index) {
; X32-LABEL: insertps_from_broadcast_multiple_use:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movss {{.*#+}} xmm4 = mem[0],zero,zero,zero
@@ -905,7 +905,7 @@ define <4 x float> @insertps_from_broadcast_multiple_use(<4 x float> %a, <4 x fl
; X32-NEXT: retl
;
; X64-LABEL: insertps_from_broadcast_multiple_use:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: movss {{.*#+}} xmm4 = mem[0],zero,zero,zero
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm4[0]
; X64-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[0]
@@ -933,7 +933,7 @@ define <4 x float> @insertps_from_broadcast_multiple_use(<4 x float> %a, <4 x fl
define <4 x float> @insertps_with_undefs(<4 x float> %a, float* %b) {
; X32-LABEL: insertps_with_undefs:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
@@ -941,7 +941,7 @@ define <4 x float> @insertps_with_undefs(<4 x float> %a, float* %b) {
; X32-NEXT: retl
;
; X64-LABEL: insertps_with_undefs:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X64-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; X64-NEXT: movaps %xmm1, %xmm0
@@ -956,13 +956,13 @@ define <4 x float> @insertps_with_undefs(<4 x float> %a, float* %b) {
; the destination index to change the load, instead of the source index.
define <4 x float> @pr20087(<4 x float> %a, <4 x float> *%ptr) {
; X32-LABEL: pr20087:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[2],mem[0]
; X32-NEXT: retl
;
; X64-LABEL: pr20087:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[2],mem[0]
; X64-NEXT: retq
%load = load <4 x float> , <4 x float> *%ptr
@@ -973,7 +973,7 @@ define <4 x float> @pr20087(<4 x float> %a, <4 x float> *%ptr) {
; Edge case for insertps where we end up with a shuffle with mask=<0, 7, -1, -1>
define void @insertps_pr20411(<4 x i32> %shuffle109, <4 x i32> %shuffle116, i32* noalias nocapture %RET) #1 {
; X32-LABEL: insertps_pr20411:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
; X32-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
@@ -981,7 +981,7 @@ define void @insertps_pr20411(<4 x i32> %shuffle109, <4 x i32> %shuffle116, i32*
; X32-NEXT: retl
;
; X64-LABEL: insertps_pr20411:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
; X64-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
; X64-NEXT: movdqu %xmm1, (%rdi)
@@ -994,12 +994,12 @@ define void @insertps_pr20411(<4 x i32> %shuffle109, <4 x i32> %shuffle116, i32*
define <4 x float> @insertps_4(<4 x float> %A, <4 x float> %B) {
; X32-LABEL: insertps_4:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm1[2],zero
; X32-NEXT: retl
;
; X64-LABEL: insertps_4:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm1[2],zero
; X64-NEXT: retq
%vecext = extractelement <4 x float> %A, i32 0
@@ -1013,12 +1013,12 @@ define <4 x float> @insertps_4(<4 x float> %A, <4 x float> %B) {
define <4 x float> @insertps_5(<4 x float> %A, <4 x float> %B) {
; X32-LABEL: insertps_5:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[1],zero,zero
; X32-NEXT: retl
;
; X64-LABEL: insertps_5:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[1],zero,zero
; X64-NEXT: retq
%vecext = extractelement <4 x float> %A, i32 0
@@ -1032,12 +1032,12 @@ define <4 x float> @insertps_5(<4 x float> %A, <4 x float> %B) {
define <4 x float> @insertps_6(<4 x float> %A, <4 x float> %B) {
; X32-LABEL: insertps_6:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: insertps {{.*#+}} xmm0 = zero,xmm0[1],xmm1[2],zero
; X32-NEXT: retl
;
; X64-LABEL: insertps_6:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: insertps {{.*#+}} xmm0 = zero,xmm0[1],xmm1[2],zero
; X64-NEXT: retq
%vecext = extractelement <4 x float> %A, i32 1
@@ -1050,12 +1050,12 @@ define <4 x float> @insertps_6(<4 x float> %A, <4 x float> %B) {
define <4 x float> @insertps_7(<4 x float> %A, <4 x float> %B) {
; X32-LABEL: insertps_7:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm1[1],zero
; X32-NEXT: retl
;
; X64-LABEL: insertps_7:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm1[1],zero
; X64-NEXT: retq
%vecext = extractelement <4 x float> %A, i32 0
@@ -1069,12 +1069,12 @@ define <4 x float> @insertps_7(<4 x float> %A, <4 x float> %B) {
define <4 x float> @insertps_8(<4 x float> %A, <4 x float> %B) {
; X32-LABEL: insertps_8:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],zero,zero
; X32-NEXT: retl
;
; X64-LABEL: insertps_8:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],zero,zero
; X64-NEXT: retq
%vecext = extractelement <4 x float> %A, i32 0
@@ -1088,13 +1088,13 @@ define <4 x float> @insertps_8(<4 x float> %A, <4 x float> %B) {
define <4 x float> @insertps_9(<4 x float> %A, <4 x float> %B) {
; X32-LABEL: insertps_9:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: insertps {{.*#+}} xmm1 = zero,xmm0[0],xmm1[2],zero
; X32-NEXT: movaps %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: insertps_9:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: insertps {{.*#+}} xmm1 = zero,xmm0[0],xmm1[2],zero
; X64-NEXT: movaps %xmm1, %xmm0
; X64-NEXT: retq
@@ -1108,12 +1108,12 @@ define <4 x float> @insertps_9(<4 x float> %A, <4 x float> %B) {
define <4 x float> @insertps_10(<4 x float> %A) {
; X32-LABEL: insertps_10:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[0],zero
; X32-NEXT: retl
;
; X64-LABEL: insertps_10:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[0],zero
; X64-NEXT: retq
%vecext = extractelement <4 x float> %A, i32 0
@@ -1124,13 +1124,13 @@ define <4 x float> @insertps_10(<4 x float> %A) {
define <4 x float> @build_vector_to_shuffle_1(<4 x float> %A) {
; X32-LABEL: build_vector_to_shuffle_1:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: xorps %xmm1, %xmm1
; X32-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
; X32-NEXT: retl
;
; X64-LABEL: build_vector_to_shuffle_1:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: xorps %xmm1, %xmm1
; X64-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
; X64-NEXT: retq
@@ -1143,13 +1143,13 @@ define <4 x float> @build_vector_to_shuffle_1(<4 x float> %A) {
define <4 x float> @build_vector_to_shuffle_2(<4 x float> %A) {
; X32-LABEL: build_vector_to_shuffle_2:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: xorps %xmm1, %xmm1
; X32-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
; X32-NEXT: retl
;
; X64-LABEL: build_vector_to_shuffle_2:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: xorps %xmm1, %xmm1
; X64-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
; X64-NEXT: retq
diff --git a/test/CodeGen/X86/sse42-intrinsics-fast-isel-x86_64.ll b/test/CodeGen/X86/sse42-intrinsics-fast-isel-x86_64.ll
index 0a69d263212..cac396f8b77 100644
--- a/test/CodeGen/X86/sse42-intrinsics-fast-isel-x86_64.ll
+++ b/test/CodeGen/X86/sse42-intrinsics-fast-isel-x86_64.ll
@@ -5,7 +5,7 @@
define i64 @test_mm_crc64_u8(i64 %a0, i8 %a1) nounwind{
; X64-LABEL: test_mm_crc64_u8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: crc32b %sil, %edi
; X64-NEXT: movl %edi, %eax
; X64-NEXT: retq
@@ -16,7 +16,7 @@ declare i64 @llvm.x86.sse42.crc32.64.8(i64, i8) nounwind readnone
define i64 @test_mm_crc64_u64(i64 %a0, i64 %a1) nounwind{
; X64-LABEL: test_mm_crc64_u64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: crc32q %rsi, %rdi
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: retq
diff --git a/test/CodeGen/X86/sse42-intrinsics-fast-isel.ll b/test/CodeGen/X86/sse42-intrinsics-fast-isel.ll
index 383ab21bd40..f8d7f61d206 100644
--- a/test/CodeGen/X86/sse42-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/sse42-intrinsics-fast-isel.ll
@@ -6,7 +6,7 @@
define i32 @test_mm_cmpestra(<2 x i64> %a0, i32 %a1, <2 x i64> %a2, i32 %a3) nounwind {
; X32-LABEL: test_mm_cmpestra:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebx
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -18,7 +18,7 @@ define i32 @test_mm_cmpestra(<2 x i64> %a0, i32 %a1, <2 x i64> %a2, i32 %a3) nou
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpestra:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %r8d, %r8d
; X64-NEXT: movl %edi, %eax
; X64-NEXT: movl %esi, %edx
@@ -35,7 +35,7 @@ declare i32 @llvm.x86.sse42.pcmpestria128(<16 x i8>, i32, <16 x i8>, i32, i8) no
define i32 @test_mm_cmpestrc(<2 x i64> %a0, i32 %a1, <2 x i64> %a2, i32 %a3) nounwind {
; X32-LABEL: test_mm_cmpestrc:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebx
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -47,7 +47,7 @@ define i32 @test_mm_cmpestrc(<2 x i64> %a0, i32 %a1, <2 x i64> %a2, i32 %a3) nou
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpestrc:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %r8d, %r8d
; X64-NEXT: movl %edi, %eax
; X64-NEXT: movl %esi, %edx
@@ -64,7 +64,7 @@ declare i32 @llvm.x86.sse42.pcmpestric128(<16 x i8>, i32, <16 x i8>, i32, i8) no
define i32 @test_mm_cmpestri(<2 x i64> %a0, i32 %a1, <2 x i64> %a2, i32 %a3) {
; X32-LABEL: test_mm_cmpestri:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: pcmpestri $7, %xmm1, %xmm0
@@ -72,7 +72,7 @@ define i32 @test_mm_cmpestri(<2 x i64> %a0, i32 %a1, <2 x i64> %a2, i32 %a3) {
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpestri:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: movl %esi, %edx
; X64-NEXT: pcmpestri $7, %xmm1, %xmm0
@@ -87,14 +87,14 @@ declare i32 @llvm.x86.sse42.pcmpestri128(<16 x i8>, i32, <16 x i8>, i32, i8) nou
define <2 x i64> @test_mm_cmpestrm(<2 x i64> %a0, i32 %a1, <2 x i64> %a2, i32 %a3) {
; X32-LABEL: test_mm_cmpestrm:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: pcmpestrm $7, %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpestrm:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: movl %esi, %edx
; X64-NEXT: pcmpestrm $7, %xmm1, %xmm0
@@ -109,7 +109,7 @@ declare <16 x i8> @llvm.x86.sse42.pcmpestrm128(<16 x i8>, i32, <16 x i8>, i32, i
define i32 @test_mm_cmpestro(<2 x i64> %a0, i32 %a1, <2 x i64> %a2, i32 %a3) nounwind {
; X32-LABEL: test_mm_cmpestro:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebx
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -121,7 +121,7 @@ define i32 @test_mm_cmpestro(<2 x i64> %a0, i32 %a1, <2 x i64> %a2, i32 %a3) nou
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpestro:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %r8d, %r8d
; X64-NEXT: movl %edi, %eax
; X64-NEXT: movl %esi, %edx
@@ -138,7 +138,7 @@ declare i32 @llvm.x86.sse42.pcmpestrio128(<16 x i8>, i32, <16 x i8>, i32, i8) no
define i32 @test_mm_cmpestrs(<2 x i64> %a0, i32 %a1, <2 x i64> %a2, i32 %a3) nounwind {
; X32-LABEL: test_mm_cmpestrs:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebx
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -150,7 +150,7 @@ define i32 @test_mm_cmpestrs(<2 x i64> %a0, i32 %a1, <2 x i64> %a2, i32 %a3) nou
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpestrs:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %r8d, %r8d
; X64-NEXT: movl %edi, %eax
; X64-NEXT: movl %esi, %edx
@@ -167,7 +167,7 @@ declare i32 @llvm.x86.sse42.pcmpestris128(<16 x i8>, i32, <16 x i8>, i32, i8) no
define i32 @test_mm_cmpestrz(<2 x i64> %a0, i32 %a1, <2 x i64> %a2, i32 %a3) nounwind {
; X32-LABEL: test_mm_cmpestrz:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebx
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -179,7 +179,7 @@ define i32 @test_mm_cmpestrz(<2 x i64> %a0, i32 %a1, <2 x i64> %a2, i32 %a3) nou
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpestrz:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %r8d, %r8d
; X64-NEXT: movl %edi, %eax
; X64-NEXT: movl %esi, %edx
@@ -196,12 +196,12 @@ declare i32 @llvm.x86.sse42.pcmpestriz128(<16 x i8>, i32, <16 x i8>, i32, i8) no
define <2 x i64> @test_mm_cmpgt_epi64(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_cmpgt_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pcmpgtq %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpgt_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pcmpgtq %xmm1, %xmm0
; X64-NEXT: retq
%cmp = icmp sgt <2 x i64> %a0, %a1
@@ -211,14 +211,14 @@ define <2 x i64> @test_mm_cmpgt_epi64(<2 x i64> %a0, <2 x i64> %a1) {
define i32 @test_mm_cmpistra(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_cmpistra:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: pcmpistri $7, %xmm1, %xmm0
; X32-NEXT: seta %al
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpistra:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: pcmpistri $7, %xmm1, %xmm0
; X64-NEXT: seta %al
@@ -232,14 +232,14 @@ declare i32 @llvm.x86.sse42.pcmpistria128(<16 x i8>, <16 x i8>, i8) nounwind rea
define i32 @test_mm_cmpistrc(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_cmpistrc:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: pcmpistri $7, %xmm1, %xmm0
; X32-NEXT: setb %al
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpistrc:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: pcmpistri $7, %xmm1, %xmm0
; X64-NEXT: setb %al
@@ -253,13 +253,13 @@ declare i32 @llvm.x86.sse42.pcmpistric128(<16 x i8>, <16 x i8>, i8) nounwind rea
define i32 @test_mm_cmpistri(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_cmpistri:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pcmpistri $7, %xmm1, %xmm0
; X32-NEXT: movl %ecx, %eax
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpistri:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pcmpistri $7, %xmm1, %xmm0
; X64-NEXT: movl %ecx, %eax
; X64-NEXT: retq
@@ -272,12 +272,12 @@ declare i32 @llvm.x86.sse42.pcmpistri128(<16 x i8>, <16 x i8>, i8) nounwind read
define <2 x i64> @test_mm_cmpistrm(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_cmpistrm:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pcmpistrm $7, %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpistrm:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pcmpistrm $7, %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -290,14 +290,14 @@ declare <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8>, <16 x i8>, i8) nounwin
define i32 @test_mm_cmpistro(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_cmpistro:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: pcmpistri $7, %xmm1, %xmm0
; X32-NEXT: seto %al
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpistro:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: pcmpistri $7, %xmm1, %xmm0
; X64-NEXT: seto %al
@@ -311,14 +311,14 @@ declare i32 @llvm.x86.sse42.pcmpistrio128(<16 x i8>, <16 x i8>, i8) nounwind rea
define i32 @test_mm_cmpistrs(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_cmpistrs:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: pcmpistri $7, %xmm1, %xmm0
; X32-NEXT: sets %al
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpistrs:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: pcmpistri $7, %xmm1, %xmm0
; X64-NEXT: sets %al
@@ -332,14 +332,14 @@ declare i32 @llvm.x86.sse42.pcmpistris128(<16 x i8>, <16 x i8>, i8) nounwind rea
define i32 @test_mm_cmpistrz(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_cmpistrz:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: pcmpistri $7, %xmm1, %xmm0
; X32-NEXT: sete %al
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpistrz:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: pcmpistri $7, %xmm1, %xmm0
; X64-NEXT: sete %al
@@ -353,14 +353,14 @@ declare i32 @llvm.x86.sse42.pcmpistriz128(<16 x i8>, <16 x i8>, i8) nounwind rea
define i32 @test_mm_crc32_u8(i32 %a0, i8 %a1) {
; X32-LABEL: test_mm_crc32_u8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %cl
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: crc32b %cl, %eax
; X32-NEXT: retl
;
; X64-LABEL: test_mm_crc32_u8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: crc32b %sil, %edi
; X64-NEXT: movl %edi, %eax
; X64-NEXT: retq
@@ -371,14 +371,14 @@ declare i32 @llvm.x86.sse42.crc32.32.8(i32, i8) nounwind readnone
define i32 @test_mm_crc32_u16(i32 %a0, i16 %a1) {
; X32-LABEL: test_mm_crc32_u16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: crc32w %cx, %eax
; X32-NEXT: retl
;
; X64-LABEL: test_mm_crc32_u16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: crc32w %si, %edi
; X64-NEXT: movl %edi, %eax
; X64-NEXT: retq
@@ -389,13 +389,13 @@ declare i32 @llvm.x86.sse42.crc32.32.16(i32, i16) nounwind readnone
define i32 @test_mm_crc32_u32(i32 %a0, i32 %a1) {
; X32-LABEL: test_mm_crc32_u32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: crc32l {{[0-9]+}}(%esp), %eax
; X32-NEXT: retl
;
; X64-LABEL: test_mm_crc32_u32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: crc32l %esi, %edi
; X64-NEXT: movl %edi, %eax
; X64-NEXT: retq
diff --git a/test/CodeGen/X86/sse42-intrinsics-x86.ll b/test/CodeGen/X86/sse42-intrinsics-x86.ll
index a8bbfebbc99..400a78f85bc 100644
--- a/test/CodeGen/X86/sse42-intrinsics-x86.ll
+++ b/test/CodeGen/X86/sse42-intrinsics-x86.ll
@@ -5,7 +5,7 @@
define i32 @test_x86_sse42_pcmpestri128(<16 x i8> %a0, <16 x i8> %a2) {
; SSE42-LABEL: test_x86_sse42_pcmpestri128:
-; SSE42: ## BB#0:
+; SSE42: ## %bb.0:
; SSE42-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
; SSE42-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
; SSE42-NEXT: pcmpestri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x61,0xc1,0x07]
@@ -13,7 +13,7 @@ define i32 @test_x86_sse42_pcmpestri128(<16 x i8> %a0, <16 x i8> %a2) {
; SSE42-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse42_pcmpestri128:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
; VCHECK-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
; VCHECK-NEXT: vpcmpestri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0xc1,0x07]
@@ -27,7 +27,7 @@ declare i32 @llvm.x86.sse42.pcmpestri128(<16 x i8>, i32, <16 x i8>, i32, i8) nou
define i32 @test_x86_sse42_pcmpestri128_load(<16 x i8>* %a0, <16 x i8>* %a2) {
; SSE42-LABEL: test_x86_sse42_pcmpestri128_load:
-; SSE42: ## BB#0:
+; SSE42: ## %bb.0:
; SSE42-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x08]
; SSE42-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; SSE42-NEXT: movdqa (%eax), %xmm0 ## encoding: [0x66,0x0f,0x6f,0x00]
@@ -38,7 +38,7 @@ define i32 @test_x86_sse42_pcmpestri128_load(<16 x i8>* %a0, <16 x i8>* %a2) {
; SSE42-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse42_pcmpestri128_load:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x08]
; AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; AVX2-NEXT: vmovdqa (%eax), %xmm0 ## encoding: [0xc5,0xf9,0x6f,0x00]
@@ -49,7 +49,7 @@ define i32 @test_x86_sse42_pcmpestri128_load(<16 x i8>* %a0, <16 x i8>* %a2) {
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse42_pcmpestri128_load:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x08]
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; SKX-NEXT: vmovdqa (%eax), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0x00]
@@ -67,7 +67,7 @@ define i32 @test_x86_sse42_pcmpestri128_load(<16 x i8>* %a0, <16 x i8>* %a2) {
define i32 @test_x86_sse42_pcmpestria128(<16 x i8> %a0, <16 x i8> %a2) nounwind {
; SSE42-LABEL: test_x86_sse42_pcmpestria128:
-; SSE42: ## BB#0:
+; SSE42: ## %bb.0:
; SSE42-NEXT: pushl %ebx ## encoding: [0x53]
; SSE42-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
; SSE42-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
@@ -79,7 +79,7 @@ define i32 @test_x86_sse42_pcmpestria128(<16 x i8> %a0, <16 x i8> %a2) nounwind
; SSE42-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse42_pcmpestria128:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: pushl %ebx ## encoding: [0x53]
; VCHECK-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
; VCHECK-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
@@ -97,7 +97,7 @@ declare i32 @llvm.x86.sse42.pcmpestria128(<16 x i8>, i32, <16 x i8>, i32, i8) no
define i32 @test_x86_sse42_pcmpestric128(<16 x i8> %a0, <16 x i8> %a2) nounwind {
; SSE42-LABEL: test_x86_sse42_pcmpestric128:
-; SSE42: ## BB#0:
+; SSE42: ## %bb.0:
; SSE42-NEXT: pushl %ebx ## encoding: [0x53]
; SSE42-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
; SSE42-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
@@ -109,7 +109,7 @@ define i32 @test_x86_sse42_pcmpestric128(<16 x i8> %a0, <16 x i8> %a2) nounwind
; SSE42-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse42_pcmpestric128:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: pushl %ebx ## encoding: [0x53]
; VCHECK-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
; VCHECK-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
@@ -127,7 +127,7 @@ declare i32 @llvm.x86.sse42.pcmpestric128(<16 x i8>, i32, <16 x i8>, i32, i8) no
define i32 @test_x86_sse42_pcmpestrio128(<16 x i8> %a0, <16 x i8> %a2) nounwind {
; SSE42-LABEL: test_x86_sse42_pcmpestrio128:
-; SSE42: ## BB#0:
+; SSE42: ## %bb.0:
; SSE42-NEXT: pushl %ebx ## encoding: [0x53]
; SSE42-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
; SSE42-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
@@ -139,7 +139,7 @@ define i32 @test_x86_sse42_pcmpestrio128(<16 x i8> %a0, <16 x i8> %a2) nounwind
; SSE42-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse42_pcmpestrio128:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: pushl %ebx ## encoding: [0x53]
; VCHECK-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
; VCHECK-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
@@ -157,7 +157,7 @@ declare i32 @llvm.x86.sse42.pcmpestrio128(<16 x i8>, i32, <16 x i8>, i32, i8) no
define i32 @test_x86_sse42_pcmpestris128(<16 x i8> %a0, <16 x i8> %a2) nounwind {
; SSE42-LABEL: test_x86_sse42_pcmpestris128:
-; SSE42: ## BB#0:
+; SSE42: ## %bb.0:
; SSE42-NEXT: pushl %ebx ## encoding: [0x53]
; SSE42-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
; SSE42-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
@@ -169,7 +169,7 @@ define i32 @test_x86_sse42_pcmpestris128(<16 x i8> %a0, <16 x i8> %a2) nounwind
; SSE42-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse42_pcmpestris128:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: pushl %ebx ## encoding: [0x53]
; VCHECK-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
; VCHECK-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
@@ -187,7 +187,7 @@ declare i32 @llvm.x86.sse42.pcmpestris128(<16 x i8>, i32, <16 x i8>, i32, i8) no
define i32 @test_x86_sse42_pcmpestriz128(<16 x i8> %a0, <16 x i8> %a2) nounwind {
; SSE42-LABEL: test_x86_sse42_pcmpestriz128:
-; SSE42: ## BB#0:
+; SSE42: ## %bb.0:
; SSE42-NEXT: pushl %ebx ## encoding: [0x53]
; SSE42-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
; SSE42-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
@@ -199,7 +199,7 @@ define i32 @test_x86_sse42_pcmpestriz128(<16 x i8> %a0, <16 x i8> %a2) nounwind
; SSE42-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse42_pcmpestriz128:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: pushl %ebx ## encoding: [0x53]
; VCHECK-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
; VCHECK-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
@@ -217,14 +217,14 @@ declare i32 @llvm.x86.sse42.pcmpestriz128(<16 x i8>, i32, <16 x i8>, i32, i8) no
define <16 x i8> @test_x86_sse42_pcmpestrm128(<16 x i8> %a0, <16 x i8> %a2) {
; SSE42-LABEL: test_x86_sse42_pcmpestrm128:
-; SSE42: ## BB#0:
+; SSE42: ## %bb.0:
; SSE42-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
; SSE42-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
; SSE42-NEXT: pcmpestrm $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x60,0xc1,0x07]
; SSE42-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse42_pcmpestrm128:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
; VCHECK-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
; VCHECK-NEXT: vpcmpestrm $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x60,0xc1,0x07]
@@ -237,7 +237,7 @@ declare <16 x i8> @llvm.x86.sse42.pcmpestrm128(<16 x i8>, i32, <16 x i8>, i32, i
define <16 x i8> @test_x86_sse42_pcmpestrm128_load(<16 x i8> %a0, <16 x i8>* %a2) {
; SSE42-LABEL: test_x86_sse42_pcmpestrm128_load:
-; SSE42: ## BB#0:
+; SSE42: ## %bb.0:
; SSE42-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
; SSE42-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
; SSE42-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
@@ -245,7 +245,7 @@ define <16 x i8> @test_x86_sse42_pcmpestrm128_load(<16 x i8> %a0, <16 x i8>* %a2
; SSE42-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse42_pcmpestrm128_load:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
; VCHECK-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
; VCHECK-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
@@ -259,13 +259,13 @@ define <16 x i8> @test_x86_sse42_pcmpestrm128_load(<16 x i8> %a0, <16 x i8>* %a2
define i32 @test_x86_sse42_pcmpistri128(<16 x i8> %a0, <16 x i8> %a1) {
; SSE42-LABEL: test_x86_sse42_pcmpistri128:
-; SSE42: ## BB#0:
+; SSE42: ## %bb.0:
; SSE42-NEXT: pcmpistri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x63,0xc1,0x07]
; SSE42-NEXT: movl %ecx, %eax ## encoding: [0x89,0xc8]
; SSE42-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse42_pcmpistri128:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: vpcmpistri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0xc1,0x07]
; VCHECK-NEXT: movl %ecx, %eax ## encoding: [0x89,0xc8]
; VCHECK-NEXT: retl ## encoding: [0xc3]
@@ -277,7 +277,7 @@ declare i32 @llvm.x86.sse42.pcmpistri128(<16 x i8>, <16 x i8>, i8) nounwind read
define i32 @test_x86_sse42_pcmpistri128_load(<16 x i8>* %a0, <16 x i8>* %a1) {
; SSE42-LABEL: test_x86_sse42_pcmpistri128_load:
-; SSE42: ## BB#0:
+; SSE42: ## %bb.0:
; SSE42-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08]
; SSE42-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
; SSE42-NEXT: movdqa (%ecx), %xmm0 ## encoding: [0x66,0x0f,0x6f,0x01]
@@ -286,7 +286,7 @@ define i32 @test_x86_sse42_pcmpistri128_load(<16 x i8>* %a0, <16 x i8>* %a1) {
; SSE42-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse42_pcmpistri128_load:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08]
; AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
; AVX2-NEXT: vmovdqa (%ecx), %xmm0 ## encoding: [0xc5,0xf9,0x6f,0x01]
@@ -295,7 +295,7 @@ define i32 @test_x86_sse42_pcmpistri128_load(<16 x i8>* %a0, <16 x i8>* %a1) {
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse42_pcmpistri128_load:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08]
; SKX-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
; SKX-NEXT: vmovdqa (%ecx), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0x01]
@@ -311,14 +311,14 @@ define i32 @test_x86_sse42_pcmpistri128_load(<16 x i8>* %a0, <16 x i8>* %a1) {
define i32 @test_x86_sse42_pcmpistria128(<16 x i8> %a0, <16 x i8> %a1) {
; SSE42-LABEL: test_x86_sse42_pcmpistria128:
-; SSE42: ## BB#0:
+; SSE42: ## %bb.0:
; SSE42-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; SSE42-NEXT: pcmpistri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x63,0xc1,0x07]
; SSE42-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
; SSE42-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse42_pcmpistria128:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; VCHECK-NEXT: vpcmpistri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0xc1,0x07]
; VCHECK-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
@@ -331,14 +331,14 @@ declare i32 @llvm.x86.sse42.pcmpistria128(<16 x i8>, <16 x i8>, i8) nounwind rea
define i32 @test_x86_sse42_pcmpistric128(<16 x i8> %a0, <16 x i8> %a1) {
; SSE42-LABEL: test_x86_sse42_pcmpistric128:
-; SSE42: ## BB#0:
+; SSE42: ## %bb.0:
; SSE42-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; SSE42-NEXT: pcmpistri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x63,0xc1,0x07]
; SSE42-NEXT: setb %al ## encoding: [0x0f,0x92,0xc0]
; SSE42-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse42_pcmpistric128:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; VCHECK-NEXT: vpcmpistri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0xc1,0x07]
; VCHECK-NEXT: setb %al ## encoding: [0x0f,0x92,0xc0]
@@ -351,14 +351,14 @@ declare i32 @llvm.x86.sse42.pcmpistric128(<16 x i8>, <16 x i8>, i8) nounwind rea
define i32 @test_x86_sse42_pcmpistrio128(<16 x i8> %a0, <16 x i8> %a1) {
; SSE42-LABEL: test_x86_sse42_pcmpistrio128:
-; SSE42: ## BB#0:
+; SSE42: ## %bb.0:
; SSE42-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; SSE42-NEXT: pcmpistri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x63,0xc1,0x07]
; SSE42-NEXT: seto %al ## encoding: [0x0f,0x90,0xc0]
; SSE42-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse42_pcmpistrio128:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; VCHECK-NEXT: vpcmpistri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0xc1,0x07]
; VCHECK-NEXT: seto %al ## encoding: [0x0f,0x90,0xc0]
@@ -371,14 +371,14 @@ declare i32 @llvm.x86.sse42.pcmpistrio128(<16 x i8>, <16 x i8>, i8) nounwind rea
define i32 @test_x86_sse42_pcmpistris128(<16 x i8> %a0, <16 x i8> %a1) {
; SSE42-LABEL: test_x86_sse42_pcmpistris128:
-; SSE42: ## BB#0:
+; SSE42: ## %bb.0:
; SSE42-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; SSE42-NEXT: pcmpistri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x63,0xc1,0x07]
; SSE42-NEXT: sets %al ## encoding: [0x0f,0x98,0xc0]
; SSE42-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse42_pcmpistris128:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; VCHECK-NEXT: vpcmpistri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0xc1,0x07]
; VCHECK-NEXT: sets %al ## encoding: [0x0f,0x98,0xc0]
@@ -391,14 +391,14 @@ declare i32 @llvm.x86.sse42.pcmpistris128(<16 x i8>, <16 x i8>, i8) nounwind rea
define i32 @test_x86_sse42_pcmpistriz128(<16 x i8> %a0, <16 x i8> %a1) {
; SSE42-LABEL: test_x86_sse42_pcmpistriz128:
-; SSE42: ## BB#0:
+; SSE42: ## %bb.0:
; SSE42-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; SSE42-NEXT: pcmpistri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x63,0xc1,0x07]
; SSE42-NEXT: sete %al ## encoding: [0x0f,0x94,0xc0]
; SSE42-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse42_pcmpistriz128:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; VCHECK-NEXT: vpcmpistri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0xc1,0x07]
; VCHECK-NEXT: sete %al ## encoding: [0x0f,0x94,0xc0]
@@ -411,12 +411,12 @@ declare i32 @llvm.x86.sse42.pcmpistriz128(<16 x i8>, <16 x i8>, i8) nounwind rea
define <16 x i8> @test_x86_sse42_pcmpistrm128(<16 x i8> %a0, <16 x i8> %a1) {
; SSE42-LABEL: test_x86_sse42_pcmpistrm128:
-; SSE42: ## BB#0:
+; SSE42: ## %bb.0:
; SSE42-NEXT: pcmpistrm $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x62,0xc1,0x07]
; SSE42-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse42_pcmpistrm128:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: vpcmpistrm $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x62,0xc1,0x07]
; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8> %a0, <16 x i8> %a1, i8 7) ; <<16 x i8>> [#uses=1]
@@ -427,13 +427,13 @@ declare <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8>, <16 x i8>, i8) nounwin
define <16 x i8> @test_x86_sse42_pcmpistrm128_load(<16 x i8> %a0, <16 x i8>* %a1) {
; SSE42-LABEL: test_x86_sse42_pcmpistrm128_load:
-; SSE42: ## BB#0:
+; SSE42: ## %bb.0:
; SSE42-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; SSE42-NEXT: pcmpistrm $7, (%eax), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x62,0x00,0x07]
; SSE42-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse42_pcmpistrm128_load:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; VCHECK-NEXT: vpcmpistrm $7, (%eax), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x62,0x00,0x07]
; VCHECK-NEXT: retl ## encoding: [0xc3]
@@ -444,7 +444,7 @@ define <16 x i8> @test_x86_sse42_pcmpistrm128_load(<16 x i8> %a0, <16 x i8>* %a1
define i32 @crc32_32_8(i32 %a, i8 %b) nounwind {
; CHECK-LABEL: crc32_32_8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; CHECK-NEXT: crc32b {{[0-9]+}}(%esp), %eax ## encoding: [0xf2,0x0f,0x38,0xf0,0x44,0x24,0x08]
; CHECK-NEXT: retl ## encoding: [0xc3]
@@ -455,7 +455,7 @@ declare i32 @llvm.x86.sse42.crc32.32.8(i32, i8) nounwind
define i32 @crc32_32_16(i32 %a, i16 %b) nounwind {
; CHECK-LABEL: crc32_32_16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; CHECK-NEXT: crc32w {{[0-9]+}}(%esp), %eax ## encoding: [0x66,0xf2,0x0f,0x38,0xf1,0x44,0x24,0x08]
; CHECK-NEXT: retl ## encoding: [0xc3]
@@ -466,7 +466,7 @@ declare i32 @llvm.x86.sse42.crc32.32.16(i32, i16) nounwind
define i32 @crc32_32_32(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: crc32_32_32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; CHECK-NEXT: crc32l {{[0-9]+}}(%esp), %eax ## encoding: [0xf2,0x0f,0x38,0xf1,0x44,0x24,0x08]
; CHECK-NEXT: retl ## encoding: [0xc3]
diff --git a/test/CodeGen/X86/sse42-intrinsics-x86_64.ll b/test/CodeGen/X86/sse42-intrinsics-x86_64.ll
index e90aa455cfd..bde37879fe1 100644
--- a/test/CodeGen/X86/sse42-intrinsics-x86_64.ll
+++ b/test/CodeGen/X86/sse42-intrinsics-x86_64.ll
@@ -8,7 +8,7 @@ declare i64 @llvm.x86.sse42.crc32.64.64(i64, i64) nounwind
define i64 @crc32_64_8(i64 %a, i8 %b) nounwind {
; CHECK-LABEL: crc32_64_8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: crc32b %sil, %edi ## encoding: [0xf2,0x40,0x0f,0x38,0xf0,0xfe]
; CHECK-NEXT: movq %rdi, %rax ## encoding: [0x48,0x89,0xf8]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -18,7 +18,7 @@ define i64 @crc32_64_8(i64 %a, i8 %b) nounwind {
define i64 @crc32_64_64(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: crc32_64_64:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: crc32q %rsi, %rdi ## encoding: [0xf2,0x48,0x0f,0x38,0xf1,0xfe]
; CHECK-NEXT: movq %rdi, %rax ## encoding: [0x48,0x89,0xf8]
; CHECK-NEXT: retq ## encoding: [0xc3]
diff --git a/test/CodeGen/X86/sse42-schedule.ll b/test/CodeGen/X86/sse42-schedule.ll
index 4af89595cad..4fcabaae29b 100644
--- a/test/CodeGen/X86/sse42-schedule.ll
+++ b/test/CodeGen/X86/sse42-schedule.ll
@@ -12,63 +12,63 @@
define i32 @crc32_32_8(i32 %a0, i8 %a1, i8 *%a2) {
; GENERIC-LABEL: crc32_32_8:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: crc32b %sil, %edi # sched: [3:1.00]
; GENERIC-NEXT: crc32b (%rdx), %edi # sched: [8:1.00]
; GENERIC-NEXT: movl %edi, %eax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: crc32_32_8:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: crc32b %sil, %edi # sched: [3:1.00]
; SLM-NEXT: crc32b (%rdx), %edi # sched: [6:1.00]
; SLM-NEXT: movl %edi, %eax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: crc32_32_8:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: crc32b %sil, %edi # sched: [3:1.00]
; SANDY-NEXT: crc32b (%rdx), %edi # sched: [8:1.00]
; SANDY-NEXT: movl %edi, %eax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: crc32_32_8:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: crc32b %sil, %edi # sched: [3:1.00]
; HASWELL-NEXT: crc32b (%rdx), %edi # sched: [7:1.00]
; HASWELL-NEXT: movl %edi, %eax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: crc32_32_8:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: crc32b %sil, %edi # sched: [3:1.00]
; BROADWELL-NEXT: crc32b (%rdx), %edi # sched: [8:1.00]
; BROADWELL-NEXT: movl %edi, %eax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: crc32_32_8:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: crc32b %sil, %edi # sched: [3:1.00]
; SKYLAKE-NEXT: crc32b (%rdx), %edi # sched: [8:1.00]
; SKYLAKE-NEXT: movl %edi, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: crc32_32_8:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: crc32b %sil, %edi # sched: [3:1.00]
; SKX-NEXT: crc32b (%rdx), %edi # sched: [8:1.00]
; SKX-NEXT: movl %edi, %eax # sched: [1:0.25]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: crc32_32_8:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: crc32b %sil, %edi # sched: [3:1.00]
; BTVER2-NEXT: crc32b (%rdx), %edi # sched: [8:1.00]
; BTVER2-NEXT: movl %edi, %eax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: crc32_32_8:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: crc32b %sil, %edi # sched: [3:1.00]
; ZNVER1-NEXT: crc32b (%rdx), %edi # sched: [10:1.00]
; ZNVER1-NEXT: movl %edi, %eax # sched: [1:0.25]
@@ -82,63 +82,63 @@ declare i32 @llvm.x86.sse42.crc32.32.8(i32, i8) nounwind
define i32 @crc32_32_16(i32 %a0, i16 %a1, i16 *%a2) {
; GENERIC-LABEL: crc32_32_16:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: crc32w %si, %edi # sched: [3:1.00]
; GENERIC-NEXT: crc32w (%rdx), %edi # sched: [7:1.00]
; GENERIC-NEXT: movl %edi, %eax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: crc32_32_16:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: crc32w %si, %edi # sched: [3:1.00]
; SLM-NEXT: crc32w (%rdx), %edi # sched: [6:1.00]
; SLM-NEXT: movl %edi, %eax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: crc32_32_16:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: crc32w %si, %edi # sched: [3:1.00]
; SANDY-NEXT: crc32w (%rdx), %edi # sched: [7:1.00]
; SANDY-NEXT: movl %edi, %eax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: crc32_32_16:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: crc32w %si, %edi # sched: [3:1.00]
; HASWELL-NEXT: crc32w (%rdx), %edi # sched: [7:1.00]
; HASWELL-NEXT: movl %edi, %eax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: crc32_32_16:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: crc32w %si, %edi # sched: [3:1.00]
; BROADWELL-NEXT: crc32w (%rdx), %edi # sched: [8:1.00]
; BROADWELL-NEXT: movl %edi, %eax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: crc32_32_16:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: crc32w %si, %edi # sched: [3:1.00]
; SKYLAKE-NEXT: crc32w (%rdx), %edi # sched: [8:1.00]
; SKYLAKE-NEXT: movl %edi, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: crc32_32_16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: crc32w %si, %edi # sched: [3:1.00]
; SKX-NEXT: crc32w (%rdx), %edi # sched: [8:1.00]
; SKX-NEXT: movl %edi, %eax # sched: [1:0.25]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: crc32_32_16:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: crc32w %si, %edi # sched: [3:1.00]
; BTVER2-NEXT: crc32w (%rdx), %edi # sched: [8:1.00]
; BTVER2-NEXT: movl %edi, %eax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: crc32_32_16:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: crc32w %si, %edi # sched: [3:1.00]
; ZNVER1-NEXT: crc32w (%rdx), %edi # sched: [10:1.00]
; ZNVER1-NEXT: movl %edi, %eax # sched: [1:0.25]
@@ -152,63 +152,63 @@ declare i32 @llvm.x86.sse42.crc32.32.16(i32, i16) nounwind
define i32 @crc32_32_32(i32 %a0, i32 %a1, i32 *%a2) {
; GENERIC-LABEL: crc32_32_32:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: crc32l %esi, %edi # sched: [3:1.00]
; GENERIC-NEXT: crc32l (%rdx), %edi # sched: [7:1.00]
; GENERIC-NEXT: movl %edi, %eax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: crc32_32_32:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: crc32l %esi, %edi # sched: [3:1.00]
; SLM-NEXT: crc32l (%rdx), %edi # sched: [6:1.00]
; SLM-NEXT: movl %edi, %eax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: crc32_32_32:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: crc32l %esi, %edi # sched: [3:1.00]
; SANDY-NEXT: crc32l (%rdx), %edi # sched: [7:1.00]
; SANDY-NEXT: movl %edi, %eax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: crc32_32_32:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: crc32l %esi, %edi # sched: [3:1.00]
; HASWELL-NEXT: crc32l (%rdx), %edi # sched: [7:1.00]
; HASWELL-NEXT: movl %edi, %eax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: crc32_32_32:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: crc32l %esi, %edi # sched: [3:1.00]
; BROADWELL-NEXT: crc32l (%rdx), %edi # sched: [8:1.00]
; BROADWELL-NEXT: movl %edi, %eax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: crc32_32_32:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: crc32l %esi, %edi # sched: [3:1.00]
; SKYLAKE-NEXT: crc32l (%rdx), %edi # sched: [8:1.00]
; SKYLAKE-NEXT: movl %edi, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: crc32_32_32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: crc32l %esi, %edi # sched: [3:1.00]
; SKX-NEXT: crc32l (%rdx), %edi # sched: [8:1.00]
; SKX-NEXT: movl %edi, %eax # sched: [1:0.25]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: crc32_32_32:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: crc32l %esi, %edi # sched: [3:1.00]
; BTVER2-NEXT: crc32l (%rdx), %edi # sched: [8:1.00]
; BTVER2-NEXT: movl %edi, %eax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: crc32_32_32:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: crc32l %esi, %edi # sched: [3:1.00]
; ZNVER1-NEXT: crc32l (%rdx), %edi # sched: [10:1.00]
; ZNVER1-NEXT: movl %edi, %eax # sched: [1:0.25]
@@ -222,63 +222,63 @@ declare i32 @llvm.x86.sse42.crc32.32.32(i32, i32) nounwind
define i64 @crc32_64_8(i64 %a0, i8 %a1, i8 *%a2) nounwind {
; GENERIC-LABEL: crc32_64_8:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: crc32b %sil, %edi # sched: [3:1.00]
; GENERIC-NEXT: crc32b (%rdx), %edi # sched: [8:1.00]
; GENERIC-NEXT: movq %rdi, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: crc32_64_8:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: crc32b %sil, %edi # sched: [3:1.00]
; SLM-NEXT: crc32b (%rdx), %edi # sched: [6:1.00]
; SLM-NEXT: movq %rdi, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: crc32_64_8:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: crc32b %sil, %edi # sched: [3:1.00]
; SANDY-NEXT: crc32b (%rdx), %edi # sched: [8:1.00]
; SANDY-NEXT: movq %rdi, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: crc32_64_8:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: crc32b %sil, %edi # sched: [3:1.00]
; HASWELL-NEXT: crc32b (%rdx), %edi # sched: [7:1.00]
; HASWELL-NEXT: movq %rdi, %rax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: crc32_64_8:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: crc32b %sil, %edi # sched: [3:1.00]
; BROADWELL-NEXT: crc32b (%rdx), %edi # sched: [8:1.00]
; BROADWELL-NEXT: movq %rdi, %rax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: crc32_64_8:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: crc32b %sil, %edi # sched: [3:1.00]
; SKYLAKE-NEXT: crc32b (%rdx), %edi # sched: [8:1.00]
; SKYLAKE-NEXT: movq %rdi, %rax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: crc32_64_8:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: crc32b %sil, %edi # sched: [3:1.00]
; SKX-NEXT: crc32b (%rdx), %edi # sched: [8:1.00]
; SKX-NEXT: movq %rdi, %rax # sched: [1:0.25]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: crc32_64_8:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: crc32b %sil, %edi # sched: [3:1.00]
; BTVER2-NEXT: crc32b (%rdx), %edi # sched: [8:1.00]
; BTVER2-NEXT: movq %rdi, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: crc32_64_8:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: crc32b %sil, %edi # sched: [3:1.00]
; ZNVER1-NEXT: crc32b (%rdx), %edi # sched: [10:1.00]
; ZNVER1-NEXT: movq %rdi, %rax # sched: [1:0.25]
@@ -292,63 +292,63 @@ declare i64 @llvm.x86.sse42.crc32.64.8(i64, i8) nounwind
define i64 @crc32_64_64(i64 %a0, i64 %a1, i64 *%a2) {
; GENERIC-LABEL: crc32_64_64:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: crc32q %rsi, %rdi # sched: [3:1.00]
; GENERIC-NEXT: crc32q (%rdx), %rdi # sched: [8:1.00]
; GENERIC-NEXT: movq %rdi, %rax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: crc32_64_64:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: crc32q %rsi, %rdi # sched: [3:1.00]
; SLM-NEXT: crc32q (%rdx), %rdi # sched: [6:1.00]
; SLM-NEXT: movq %rdi, %rax # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: crc32_64_64:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: crc32q %rsi, %rdi # sched: [3:1.00]
; SANDY-NEXT: crc32q (%rdx), %rdi # sched: [8:1.00]
; SANDY-NEXT: movq %rdi, %rax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: crc32_64_64:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: crc32q %rsi, %rdi # sched: [3:1.00]
; HASWELL-NEXT: crc32q (%rdx), %rdi # sched: [7:1.00]
; HASWELL-NEXT: movq %rdi, %rax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: crc32_64_64:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: crc32q %rsi, %rdi # sched: [3:1.00]
; BROADWELL-NEXT: crc32q (%rdx), %rdi # sched: [8:1.00]
; BROADWELL-NEXT: movq %rdi, %rax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: crc32_64_64:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: crc32q %rsi, %rdi # sched: [3:1.00]
; SKYLAKE-NEXT: crc32q (%rdx), %rdi # sched: [8:1.00]
; SKYLAKE-NEXT: movq %rdi, %rax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: crc32_64_64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: crc32q %rsi, %rdi # sched: [3:1.00]
; SKX-NEXT: crc32q (%rdx), %rdi # sched: [8:1.00]
; SKX-NEXT: movq %rdi, %rax # sched: [1:0.25]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: crc32_64_64:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: crc32q %rsi, %rdi # sched: [3:1.00]
; BTVER2-NEXT: crc32q (%rdx), %rdi # sched: [8:1.00]
; BTVER2-NEXT: movq %rdi, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: crc32_64_64:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: crc32q %rsi, %rdi # sched: [3:1.00]
; ZNVER1-NEXT: crc32q (%rdx), %rdi # sched: [10:1.00]
; ZNVER1-NEXT: movq %rdi, %rax # sched: [1:0.25]
@@ -362,7 +362,7 @@ declare i64 @llvm.x86.sse42.crc32.64.64(i64, i64) nounwind
define i32 @test_pcmpestri(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; GENERIC-LABEL: test_pcmpestri:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movl $7, %eax # sched: [1:0.33]
; GENERIC-NEXT: movl $7, %edx # sched: [1:0.33]
; GENERIC-NEXT: pcmpestri $7, %xmm1, %xmm0 # sched: [4:2.67]
@@ -375,7 +375,7 @@ define i32 @test_pcmpestri(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_pcmpestri:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movl $7, %eax # sched: [1:0.50]
; SLM-NEXT: movl $7, %edx # sched: [1:0.50]
; SLM-NEXT: pcmpestri $7, %xmm1, %xmm0 # sched: [21:21.00]
@@ -388,7 +388,7 @@ define i32 @test_pcmpestri(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pcmpestri:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: movl $7, %eax # sched: [1:0.33]
; SANDY-NEXT: movl $7, %edx # sched: [1:0.33]
; SANDY-NEXT: vpcmpestri $7, %xmm1, %xmm0 # sched: [4:2.67]
@@ -401,7 +401,7 @@ define i32 @test_pcmpestri(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpestri:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: movl $7, %eax # sched: [1:0.25]
; HASWELL-NEXT: movl $7, %edx # sched: [1:0.25]
; HASWELL-NEXT: vpcmpestri $7, %xmm1, %xmm0 # sched: [18:4.00]
@@ -414,7 +414,7 @@ define i32 @test_pcmpestri(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pcmpestri:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl $7, %eax # sched: [1:0.25]
; BROADWELL-NEXT: movl $7, %edx # sched: [1:0.25]
; BROADWELL-NEXT: vpcmpestri $7, %xmm1, %xmm0 # sched: [18:4.00]
@@ -427,7 +427,7 @@ define i32 @test_pcmpestri(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pcmpestri:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl $7, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: movl $7, %edx # sched: [1:0.25]
; SKYLAKE-NEXT: vpcmpestri $7, %xmm1, %xmm0 # sched: [18:4.00]
@@ -440,7 +440,7 @@ define i32 @test_pcmpestri(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pcmpestri:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movl $7, %eax # sched: [1:0.25]
; SKX-NEXT: movl $7, %edx # sched: [1:0.25]
; SKX-NEXT: vpcmpestri $7, %xmm1, %xmm0 # sched: [18:4.00]
@@ -453,7 +453,7 @@ define i32 @test_pcmpestri(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pcmpestri:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: movl $7, %eax # sched: [1:0.17]
; BTVER2-NEXT: movl $7, %edx # sched: [1:0.17]
; BTVER2-NEXT: vpcmpestri $7, %xmm1, %xmm0 # sched: [14:10.00]
@@ -466,7 +466,7 @@ define i32 @test_pcmpestri(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pcmpestri:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl $7, %eax # sched: [1:0.25]
; ZNVER1-NEXT: movl $7, %edx # sched: [1:0.25]
; ZNVER1-NEXT: vpcmpestri $7, %xmm1, %xmm0 # sched: [100:?]
@@ -487,7 +487,7 @@ declare i32 @llvm.x86.sse42.pcmpestri128(<16 x i8>, i32, <16 x i8>, i32, i8) nou
define <16 x i8> @test_pcmpestrm(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; GENERIC-LABEL: test_pcmpestrm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movl $7, %eax # sched: [1:0.33]
; GENERIC-NEXT: movl $7, %edx # sched: [1:0.33]
; GENERIC-NEXT: pcmpestrm $7, %xmm1, %xmm0 # sched: [11:2.67]
@@ -497,7 +497,7 @@ define <16 x i8> @test_pcmpestrm(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_pcmpestrm:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movl $7, %eax # sched: [1:0.50]
; SLM-NEXT: movl $7, %edx # sched: [1:0.50]
; SLM-NEXT: pcmpestrm $7, %xmm1, %xmm0 # sched: [17:17.00]
@@ -507,7 +507,7 @@ define <16 x i8> @test_pcmpestrm(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pcmpestrm:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: movl $7, %eax # sched: [1:0.33]
; SANDY-NEXT: movl $7, %edx # sched: [1:0.33]
; SANDY-NEXT: vpcmpestrm $7, %xmm1, %xmm0 # sched: [11:2.67]
@@ -517,7 +517,7 @@ define <16 x i8> @test_pcmpestrm(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpestrm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: movl $7, %eax # sched: [1:0.25]
; HASWELL-NEXT: movl $7, %edx # sched: [1:0.25]
; HASWELL-NEXT: vpcmpestrm $7, %xmm1, %xmm0 # sched: [19:4.00]
@@ -527,7 +527,7 @@ define <16 x i8> @test_pcmpestrm(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pcmpestrm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl $7, %eax # sched: [1:0.25]
; BROADWELL-NEXT: movl $7, %edx # sched: [1:0.25]
; BROADWELL-NEXT: vpcmpestrm $7, %xmm1, %xmm0 # sched: [19:4.00]
@@ -537,7 +537,7 @@ define <16 x i8> @test_pcmpestrm(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pcmpestrm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl $7, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: movl $7, %edx # sched: [1:0.25]
; SKYLAKE-NEXT: vpcmpestrm $7, %xmm1, %xmm0 # sched: [19:4.00]
@@ -547,7 +547,7 @@ define <16 x i8> @test_pcmpestrm(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pcmpestrm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movl $7, %eax # sched: [1:0.25]
; SKX-NEXT: movl $7, %edx # sched: [1:0.25]
; SKX-NEXT: vpcmpestrm $7, %xmm1, %xmm0 # sched: [19:4.00]
@@ -557,7 +557,7 @@ define <16 x i8> @test_pcmpestrm(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pcmpestrm:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: movl $7, %eax # sched: [1:0.17]
; BTVER2-NEXT: movl $7, %edx # sched: [1:0.17]
; BTVER2-NEXT: vpcmpestrm $7, %xmm1, %xmm0 # sched: [14:10.00]
@@ -567,7 +567,7 @@ define <16 x i8> @test_pcmpestrm(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pcmpestrm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl $7, %eax # sched: [1:0.25]
; ZNVER1-NEXT: movl $7, %edx # sched: [1:0.25]
; ZNVER1-NEXT: vpcmpestrm $7, %xmm1, %xmm0 # sched: [100:?]
@@ -584,7 +584,7 @@ declare <16 x i8> @llvm.x86.sse42.pcmpestrm128(<16 x i8>, i32, <16 x i8>, i32, i
define i32 @test_pcmpistri(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; GENERIC-LABEL: test_pcmpistri:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pcmpistri $7, %xmm1, %xmm0 # sched: [11:3.00]
; GENERIC-NEXT: movl %ecx, %eax # sched: [1:0.33]
; GENERIC-NEXT: pcmpistri $7, (%rdi), %xmm0 # sched: [17:3.00]
@@ -593,7 +593,7 @@ define i32 @test_pcmpistri(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_pcmpistri:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pcmpistri $7, %xmm1, %xmm0 # sched: [17:17.00]
; SLM-NEXT: movl %ecx, %eax # sched: [1:0.50]
; SLM-NEXT: pcmpistri $7, (%rdi), %xmm0 # sched: [17:17.00]
@@ -602,7 +602,7 @@ define i32 @test_pcmpistri(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pcmpistri:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpcmpistri $7, %xmm1, %xmm0 # sched: [11:3.00]
; SANDY-NEXT: movl %ecx, %eax # sched: [1:0.33]
; SANDY-NEXT: vpcmpistri $7, (%rdi), %xmm0 # sched: [17:3.00]
@@ -611,7 +611,7 @@ define i32 @test_pcmpistri(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpistri:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpcmpistri $7, %xmm1, %xmm0 # sched: [11:3.00]
; HASWELL-NEXT: movl %ecx, %eax # sched: [1:0.25]
; HASWELL-NEXT: vpcmpistri $7, (%rdi), %xmm0 # sched: [11:3.00]
@@ -620,7 +620,7 @@ define i32 @test_pcmpistri(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pcmpistri:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpcmpistri $7, %xmm1, %xmm0 # sched: [11:3.00]
; BROADWELL-NEXT: movl %ecx, %eax # sched: [1:0.25]
; BROADWELL-NEXT: vpcmpistri $7, (%rdi), %xmm0 # sched: [16:3.00]
@@ -629,7 +629,7 @@ define i32 @test_pcmpistri(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pcmpistri:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpcmpistri $7, %xmm1, %xmm0 # sched: [10:3.00]
; SKYLAKE-NEXT: movl %ecx, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: vpcmpistri $7, (%rdi), %xmm0 # sched: [16:3.00]
@@ -638,7 +638,7 @@ define i32 @test_pcmpistri(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pcmpistri:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpistri $7, %xmm1, %xmm0 # sched: [10:3.00]
; SKX-NEXT: movl %ecx, %eax # sched: [1:0.25]
; SKX-NEXT: vpcmpistri $7, (%rdi), %xmm0 # sched: [16:3.00]
@@ -647,7 +647,7 @@ define i32 @test_pcmpistri(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pcmpistri:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpcmpistri $7, %xmm1, %xmm0 # sched: [7:2.00]
; BTVER2-NEXT: movl %ecx, %eax # sched: [1:0.17]
; BTVER2-NEXT: vpcmpistri $7, (%rdi), %xmm0 # sched: [12:2.00]
@@ -656,7 +656,7 @@ define i32 @test_pcmpistri(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pcmpistri:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpcmpistri $7, %xmm1, %xmm0 # sched: [100:?]
; ZNVER1-NEXT: movl %ecx, %eax # sched: [1:0.25]
; ZNVER1-NEXT: vpcmpistri $7, (%rdi), %xmm0 # sched: [100:?]
@@ -673,55 +673,55 @@ declare i32 @llvm.x86.sse42.pcmpistri128(<16 x i8>, <16 x i8>, i8) nounwind read
define <16 x i8> @test_pcmpistrm(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; GENERIC-LABEL: test_pcmpistrm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pcmpistrm $7, %xmm1, %xmm0 # sched: [11:3.00]
; GENERIC-NEXT: pcmpistrm $7, (%rdi), %xmm0 # sched: [17:3.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_pcmpistrm:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pcmpistrm $7, %xmm1, %xmm0 # sched: [13:13.00]
; SLM-NEXT: pcmpistrm $7, (%rdi), %xmm0 # sched: [13:13.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pcmpistrm:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpcmpistrm $7, %xmm1, %xmm0 # sched: [11:3.00]
; SANDY-NEXT: vpcmpistrm $7, (%rdi), %xmm0 # sched: [17:3.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpistrm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpcmpistrm $7, %xmm1, %xmm0 # sched: [11:3.00]
; HASWELL-NEXT: vpcmpistrm $7, (%rdi), %xmm0 # sched: [11:3.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pcmpistrm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpcmpistrm $7, %xmm1, %xmm0 # sched: [11:3.00]
; BROADWELL-NEXT: vpcmpistrm $7, (%rdi), %xmm0 # sched: [16:3.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pcmpistrm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpcmpistrm $7, %xmm1, %xmm0 # sched: [10:3.00]
; SKYLAKE-NEXT: vpcmpistrm $7, (%rdi), %xmm0 # sched: [16:3.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pcmpistrm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpistrm $7, %xmm1, %xmm0 # sched: [10:3.00]
; SKX-NEXT: vpcmpistrm $7, (%rdi), %xmm0 # sched: [16:3.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pcmpistrm:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpcmpistrm $7, %xmm1, %xmm0 # sched: [8:2.00]
; BTVER2-NEXT: vpcmpistrm $7, (%rdi), %xmm0 # sched: [13:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pcmpistrm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpcmpistrm $7, %xmm1, %xmm0 # sched: [100:?]
; ZNVER1-NEXT: vpcmpistrm $7, (%rdi), %xmm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -734,43 +734,43 @@ declare <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8>, <16 x i8>, i8) nounwin
define <2 x i64> @test_pcmpgtq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; GENERIC-LABEL: test_pcmpgtq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pcmpgtq %xmm1, %xmm0 # sched: [5:1.00]
; GENERIC-NEXT: pcmpgtq (%rdi), %xmm0 # sched: [11:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_pcmpgtq:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pcmpgtq %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: pcmpgtq (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pcmpgtq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
; SANDY-NEXT: vpcmpgtq (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpgtq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
; HASWELL-NEXT: vpcmpgtq (%rdi), %xmm0, %xmm0 # sched: [5:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pcmpgtq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
; BROADWELL-NEXT: vpcmpgtq (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pcmpgtq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SKYLAKE-NEXT: vpcmpgtq (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pcmpgtq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 # sched: [3:1.00]
; SKX-NEXT: vpmovm2q %k0, %xmm0
; SKX-NEXT: vpcmpgtq (%rdi), %xmm0, %k0 # sched: [9:1.00]
@@ -778,13 +778,13 @@ define <2 x i64> @test_pcmpgtq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pcmpgtq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpcmpgtq (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pcmpgtq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; ZNVER1-NEXT: vpcmpgtq (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -798,55 +798,55 @@ define <2 x i64> @test_pcmpgtq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
define <2 x i64> @test_pclmulqdq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; GENERIC-LABEL: test_pclmulqdq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pclmulqdq $0, %xmm1, %xmm0 # sched: [14:6.00]
; GENERIC-NEXT: pclmulqdq $0, (%rdi), %xmm0 # sched: [14:5.67]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_pclmulqdq:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pclmulqdq $0, %xmm1, %xmm0 # sched: [10:10.00]
; SLM-NEXT: pclmulqdq $0, (%rdi), %xmm0 # sched: [10:10.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pclmulqdq:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpclmulqdq $0, %xmm1, %xmm0, %xmm0 # sched: [14:6.00]
; SANDY-NEXT: vpclmulqdq $0, (%rdi), %xmm0, %xmm0 # sched: [14:5.67]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pclmulqdq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpclmulqdq $0, %xmm1, %xmm0, %xmm0 # sched: [11:2.00]
; HASWELL-NEXT: vpclmulqdq $0, (%rdi), %xmm0, %xmm0 # sched: [11:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pclmulqdq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpclmulqdq $0, %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
; BROADWELL-NEXT: vpclmulqdq $0, (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pclmulqdq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpclmulqdq $0, %xmm1, %xmm0, %xmm0 # sched: [6:1.00]
; SKYLAKE-NEXT: vpclmulqdq $0, (%rdi), %xmm0, %xmm0 # sched: [12:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pclmulqdq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpclmulqdq $0, %xmm1, %xmm0, %xmm0 # sched: [6:1.00]
; SKX-NEXT: vpclmulqdq $0, (%rdi), %xmm0, %xmm0 # sched: [12:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pclmulqdq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpclmulqdq $0, %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; BTVER2-NEXT: vpclmulqdq $0, (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pclmulqdq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpclmulqdq $0, %xmm1, %xmm0, %xmm0 # sched: [100:?]
; ZNVER1-NEXT: vpclmulqdq $0, (%rdi), %xmm0, %xmm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
diff --git a/test/CodeGen/X86/sse4a-intrinsics-fast-isel.ll b/test/CodeGen/X86/sse4a-intrinsics-fast-isel.ll
index f45abf1d85d..51d056f2049 100644
--- a/test/CodeGen/X86/sse4a-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/sse4a-intrinsics-fast-isel.ll
@@ -8,12 +8,12 @@
define <2 x i64> @test_mm_extracti_si64(<2 x i64> %x) {
; X32-LABEL: test_mm_extracti_si64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: extrq $2, $3, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_extracti_si64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: extrq $2, $3, %xmm0
; X64-NEXT: retq
%res = call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> %x, i8 3, i8 2)
@@ -23,12 +23,12 @@ declare <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64>, i8, i8) nounwind readnone
define <2 x i64> @test_mm_extract_si64(<2 x i64> %x, <2 x i64> %y) {
; X32-LABEL: test_mm_extract_si64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: extrq %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_extract_si64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: extrq %xmm1, %xmm0
; X64-NEXT: retq
%bc = bitcast <2 x i64> %y to <16 x i8>
@@ -39,12 +39,12 @@ declare <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64>, <16 x i8>) nounwind readnone
define <2 x i64> @test_mm_inserti_si64(<2 x i64> %x, <2 x i64> %y) {
; X32-LABEL: test_mm_inserti_si64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: insertq $6, $5, %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_inserti_si64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: insertq $6, $5, %xmm1, %xmm0
; X64-NEXT: retq
%res = call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %x, <2 x i64> %y, i8 5, i8 6)
@@ -54,12 +54,12 @@ declare <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64>, <2 x i64>, i8, i8) nounwin
define <2 x i64> @test_mm_insert_si64(<2 x i64> %x, <2 x i64> %y) {
; X32-LABEL: test_mm_insert_si64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: insertq %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_insert_si64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: insertq %xmm1, %xmm0
; X64-NEXT: retq
%res = call <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64> %x, <2 x i64> %y)
@@ -69,13 +69,13 @@ declare <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64>, <2 x i64>) nounwind readnon
define void @test_stream_sd(double* %p, <2 x double> %a) {
; X32-LABEL: test_stream_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movntsd %xmm0, (%eax)
; X32-NEXT: retl
;
; X64-LABEL: test_stream_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movntsd %xmm0, (%rdi)
; X64-NEXT: retq
%1 = extractelement <2 x double> %a, i64 0
@@ -85,13 +85,13 @@ define void @test_stream_sd(double* %p, <2 x double> %a) {
define void @test_mm_stream_ss(float* %p, <4 x float> %a) {
; X32-LABEL: test_mm_stream_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movntss %xmm0, (%eax)
; X32-NEXT: retl
;
; X64-LABEL: test_mm_stream_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movntss %xmm0, (%rdi)
; X64-NEXT: retq
%1 = extractelement <4 x float> %a, i64 0
diff --git a/test/CodeGen/X86/sse4a-schedule.ll b/test/CodeGen/X86/sse4a-schedule.ll
index 78dcf4875e3..24ce51fd2e7 100644
--- a/test/CodeGen/X86/sse4a-schedule.ll
+++ b/test/CodeGen/X86/sse4a-schedule.ll
@@ -5,17 +5,17 @@
define <2 x i64> @test_extrq(<2 x i64> %a0, <16 x i8> %a1) {
; GENERIC-LABEL: test_extrq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: extrq %xmm1, %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; BTVER2-LABEL: test_extrq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: extrq %xmm1, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_extrq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: extrq %xmm1, %xmm0 # sched: [2:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = tail call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> %a0, <16 x i8> %a1)
@@ -25,17 +25,17 @@ declare <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64>, <16 x i8>)
define <2 x i64> @test_extrqi(<2 x i64> %a0) {
; GENERIC-LABEL: test_extrqi:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: extrq $2, $3, %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; BTVER2-LABEL: test_extrqi:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: extrq $2, $3, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_extrqi:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: extrq $2, $3, %xmm0 # sched: [2:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> %a0, i8 3, i8 2)
@@ -45,17 +45,17 @@ declare <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64>, i8, i8)
define <2 x i64> @test_insertq(<2 x i64> %a0, <2 x i64> %a1) {
; GENERIC-LABEL: test_insertq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: insertq %xmm1, %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; BTVER2-LABEL: test_insertq:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: insertq %xmm1, %xmm0 # sched: [2:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_insertq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: insertq %xmm1, %xmm0 # sched: [4:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = tail call <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64> %a0, <2 x i64> %a1)
@@ -65,17 +65,17 @@ declare <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64>, <2 x i64>)
define <2 x i64> @test_insertqi(<2 x i64> %a0, <2 x i64> %a1) {
; GENERIC-LABEL: test_insertqi:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: insertq $6, $5, %xmm1, %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; BTVER2-LABEL: test_insertqi:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: insertq $6, $5, %xmm1, %xmm0 # sched: [2:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_insertqi:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: insertq $6, $5, %xmm1, %xmm0 # sched: [4:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %a0, <2 x i64> %a1, i8 5, i8 6)
@@ -85,17 +85,17 @@ declare <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64>, <2 x i64>, i8, i8)
define void @test_movntsd(i8* %p, <2 x double> %a) {
; GENERIC-LABEL: test_movntsd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movntsd %xmm0, (%rdi) # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; BTVER2-LABEL: test_movntsd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: movntsd %xmm0, (%rdi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movntsd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movntsd %xmm0, (%rdi) # sched: [8:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
tail call void @llvm.x86.sse4a.movnt.sd(i8* %p, <2 x double> %a)
@@ -105,17 +105,17 @@ declare void @llvm.x86.sse4a.movnt.sd(i8*, <2 x double>)
define void @test_movntss(i8* %p, <4 x float> %a) {
; GENERIC-LABEL: test_movntss:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movntss %xmm0, (%rdi) # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; BTVER2-LABEL: test_movntss:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: movntss %xmm0, (%rdi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_movntss:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movntss %xmm0, (%rdi) # sched: [8:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
tail call void @llvm.x86.sse4a.movnt.ss(i8* %p, <4 x float> %a)
diff --git a/test/CodeGen/X86/sse4a-upgrade.ll b/test/CodeGen/X86/sse4a-upgrade.ll
index a129c658f4b..04cb11758ca 100644
--- a/test/CodeGen/X86/sse4a-upgrade.ll
+++ b/test/CodeGen/X86/sse4a-upgrade.ll
@@ -6,13 +6,13 @@
define void @test_movntss(i8* %p, <4 x float> %a) nounwind optsize ssp {
; X32-LABEL: test_movntss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movntss %xmm0, (%eax)
; X32-NEXT: retl
;
; X64-LABEL: test_movntss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movntss %xmm0, (%rdi)
; X64-NEXT: retq
tail call void @llvm.x86.sse4a.movnt.ss(i8* %p, <4 x float> %a) nounwind
@@ -23,13 +23,13 @@ declare void @llvm.x86.sse4a.movnt.ss(i8*, <4 x float>)
define void @test_movntsd(i8* %p, <2 x double> %a) nounwind optsize ssp {
; X32-LABEL: test_movntsd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movntsd %xmm0, (%eax)
; X32-NEXT: retl
;
; X64-LABEL: test_movntsd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movntsd %xmm0, (%rdi)
; X64-NEXT: retq
tail call void @llvm.x86.sse4a.movnt.sd(i8* %p, <2 x double> %a) nounwind
diff --git a/test/CodeGen/X86/sse4a.ll b/test/CodeGen/X86/sse4a.ll
index ad04e257dc9..612e3b7de9c 100644
--- a/test/CodeGen/X86/sse4a.ll
+++ b/test/CodeGen/X86/sse4a.ll
@@ -6,12 +6,12 @@
define <2 x i64> @test_extrqi(<2 x i64> %x) nounwind uwtable ssp {
; X32-LABEL: test_extrqi:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: extrq $2, $3, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_extrqi:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: extrq $2, $3, %xmm0
; X64-NEXT: retq
%1 = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> %x, i8 3, i8 2)
@@ -20,27 +20,27 @@ define <2 x i64> @test_extrqi(<2 x i64> %x) nounwind uwtable ssp {
define <2 x i64> @test_extrqi_domain(<2 x i64> *%p) nounwind uwtable ssp {
; X32-SSE-LABEL: test_extrqi_domain:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE-NEXT: movdqa (%eax), %xmm0
; X32-SSE-NEXT: extrq $2, $3, %xmm0
; X32-SSE-NEXT: retl
;
; X32-AVX-LABEL: test_extrqi_domain:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovdqa (%eax), %xmm0
; X32-AVX-NEXT: extrq $2, $3, %xmm0
; X32-AVX-NEXT: retl
;
; X64-SSE-LABEL: test_extrqi_domain:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: movdqa (%rdi), %xmm0
; X64-SSE-NEXT: extrq $2, $3, %xmm0
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: test_extrqi_domain:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovdqa (%rdi), %xmm0
; X64-AVX-NEXT: extrq $2, $3, %xmm0
; X64-AVX-NEXT: retq
@@ -53,12 +53,12 @@ declare <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64>, i8, i8) nounwind
define <2 x i64> @test_extrq(<2 x i64> %x, <2 x i64> %y) nounwind uwtable ssp {
; X32-LABEL: test_extrq:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: extrq %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_extrq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: extrq %xmm1, %xmm0
; X64-NEXT: retq
%1 = bitcast <2 x i64> %y to <16 x i8>
@@ -68,7 +68,7 @@ define <2 x i64> @test_extrq(<2 x i64> %x, <2 x i64> %y) nounwind uwtable ssp {
define <2 x i64> @test_extrq_domain(<2 x i64> *%p, <2 x i64> %y) nounwind uwtable ssp {
; X32-SSE-LABEL: test_extrq_domain:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE-NEXT: movdqa (%eax), %xmm1
; X32-SSE-NEXT: extrq %xmm0, %xmm1
@@ -76,7 +76,7 @@ define <2 x i64> @test_extrq_domain(<2 x i64> *%p, <2 x i64> %y) nounwind uwtabl
; X32-SSE-NEXT: retl
;
; X32-AVX-LABEL: test_extrq_domain:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovdqa (%eax), %xmm1
; X32-AVX-NEXT: extrq %xmm0, %xmm1
@@ -84,14 +84,14 @@ define <2 x i64> @test_extrq_domain(<2 x i64> *%p, <2 x i64> %y) nounwind uwtabl
; X32-AVX-NEXT: retl
;
; X64-SSE-LABEL: test_extrq_domain:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: movdqa (%rdi), %xmm1
; X64-SSE-NEXT: extrq %xmm0, %xmm1
; X64-SSE-NEXT: movdqa %xmm1, %xmm0
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: test_extrq_domain:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovdqa (%rdi), %xmm1
; X64-AVX-NEXT: extrq %xmm0, %xmm1
; X64-AVX-NEXT: vmovdqa %xmm1, %xmm0
@@ -106,12 +106,12 @@ declare <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64>, <16 x i8>) nounwind
define <2 x i64> @test_insertqi(<2 x i64> %x, <2 x i64> %y) nounwind uwtable ssp {
; X32-LABEL: test_insertqi:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: insertq $6, $5, %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_insertqi:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: insertq $6, $5, %xmm1, %xmm0
; X64-NEXT: retq
%1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %x, <2 x i64> %y, i8 5, i8 6)
@@ -120,7 +120,7 @@ define <2 x i64> @test_insertqi(<2 x i64> %x, <2 x i64> %y) nounwind uwtable ssp
define <2 x i64> @test_insertqi_domain(<2 x i64> *%p, <2 x i64> %y) nounwind uwtable ssp {
; X32-SSE-LABEL: test_insertqi_domain:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE-NEXT: movdqa (%eax), %xmm1
; X32-SSE-NEXT: insertq $6, $5, %xmm0, %xmm1
@@ -128,7 +128,7 @@ define <2 x i64> @test_insertqi_domain(<2 x i64> *%p, <2 x i64> %y) nounwind uwt
; X32-SSE-NEXT: retl
;
; X32-AVX-LABEL: test_insertqi_domain:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovdqa (%eax), %xmm1
; X32-AVX-NEXT: insertq $6, $5, %xmm0, %xmm1
@@ -136,14 +136,14 @@ define <2 x i64> @test_insertqi_domain(<2 x i64> *%p, <2 x i64> %y) nounwind uwt
; X32-AVX-NEXT: retl
;
; X64-SSE-LABEL: test_insertqi_domain:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: movdqa (%rdi), %xmm1
; X64-SSE-NEXT: insertq $6, $5, %xmm0, %xmm1
; X64-SSE-NEXT: movdqa %xmm1, %xmm0
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: test_insertqi_domain:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovdqa (%rdi), %xmm1
; X64-AVX-NEXT: insertq $6, $5, %xmm0, %xmm1
; X64-AVX-NEXT: vmovdqa %xmm1, %xmm0
@@ -157,12 +157,12 @@ declare <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64>, <2 x i64>, i8, i8) nounwin
define <2 x i64> @test_insertq(<2 x i64> %x, <2 x i64> %y) nounwind uwtable ssp {
; X32-LABEL: test_insertq:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: insertq %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_insertq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: insertq %xmm1, %xmm0
; X64-NEXT: retq
%1 = tail call <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64> %x, <2 x i64> %y) nounwind
@@ -171,7 +171,7 @@ define <2 x i64> @test_insertq(<2 x i64> %x, <2 x i64> %y) nounwind uwtable ssp
define <2 x i64> @test_insertq_domain(<2 x i64> *%p, <2 x i64> %y) nounwind uwtable ssp {
; X32-SSE-LABEL: test_insertq_domain:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE-NEXT: movdqa (%eax), %xmm1
; X32-SSE-NEXT: insertq %xmm0, %xmm1
@@ -179,7 +179,7 @@ define <2 x i64> @test_insertq_domain(<2 x i64> *%p, <2 x i64> %y) nounwind uwta
; X32-SSE-NEXT: retl
;
; X32-AVX-LABEL: test_insertq_domain:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovdqa (%eax), %xmm1
; X32-AVX-NEXT: insertq %xmm0, %xmm1
@@ -187,14 +187,14 @@ define <2 x i64> @test_insertq_domain(<2 x i64> *%p, <2 x i64> %y) nounwind uwta
; X32-AVX-NEXT: retl
;
; X64-SSE-LABEL: test_insertq_domain:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: movdqa (%rdi), %xmm1
; X64-SSE-NEXT: insertq %xmm0, %xmm1
; X64-SSE-NEXT: movdqa %xmm1, %xmm0
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: test_insertq_domain:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovdqa (%rdi), %xmm1
; X64-AVX-NEXT: insertq %xmm0, %xmm1
; X64-AVX-NEXT: vmovdqa %xmm1, %xmm0
diff --git a/test/CodeGen/X86/sse_partial_update.ll b/test/CodeGen/X86/sse_partial_update.ll
index 8dfb8ee7007..f1007cc9951 100644
--- a/test/CodeGen/X86/sse_partial_update.ll
+++ b/test/CodeGen/X86/sse_partial_update.ll
@@ -10,7 +10,7 @@
define void @rsqrtss(<4 x float> %a) nounwind uwtable ssp {
; CHECK-LABEL: rsqrtss:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: rsqrtss %xmm0, %xmm0
; CHECK-NEXT: cvtss2sd %xmm0, %xmm2
; CHECK-NEXT: movshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
@@ -32,7 +32,7 @@ declare <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float>) nounwind readnone
define void @rcpss(<4 x float> %a) nounwind uwtable ssp {
; CHECK-LABEL: rcpss:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: rcpss %xmm0, %xmm0
; CHECK-NEXT: cvtss2sd %xmm0, %xmm2
; CHECK-NEXT: movshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
@@ -53,7 +53,7 @@ declare <4 x float> @llvm.x86.sse.rcp.ss(<4 x float>) nounwind readnone
define void @sqrtss(<4 x float> %a) nounwind uwtable ssp {
; CHECK-LABEL: sqrtss:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: sqrtss %xmm0, %xmm0
; CHECK-NEXT: cvtss2sd %xmm0, %xmm2
; CHECK-NEXT: movshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
@@ -74,7 +74,7 @@ declare <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float>) nounwind readnone
define void @sqrtsd(<2 x double> %a) nounwind uwtable ssp {
; CHECK-LABEL: sqrtsd:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: sqrtsd %xmm0, %xmm0
; CHECK-NEXT: cvtsd2ss %xmm0, %xmm2
; CHECK-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
@@ -97,7 +97,7 @@ declare <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double>) nounwind readnone
define <2 x double> @load_fold_cvtss2sd_int(<4 x float> *%a) {
; CHECK-LABEL: load_fold_cvtss2sd_int:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: xorps %xmm0, %xmm0
; CHECK-NEXT: cvtss2sd (%rdi), %xmm0
; CHECK-NEXT: retq
@@ -108,7 +108,7 @@ define <2 x double> @load_fold_cvtss2sd_int(<4 x float> *%a) {
define <2 x double> @load_fold_cvtss2sd_int_optsize(<4 x float> *%a) optsize {
; CHECK-LABEL: load_fold_cvtss2sd_int_optsize:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: xorps %xmm0, %xmm0
; CHECK-NEXT: cvtss2sd (%rdi), %xmm0
; CHECK-NEXT: retq
@@ -119,7 +119,7 @@ define <2 x double> @load_fold_cvtss2sd_int_optsize(<4 x float> *%a) optsize {
define <2 x double> @load_fold_cvtss2sd_int_minsize(<4 x float> *%a) minsize {
; CHECK-LABEL: load_fold_cvtss2sd_int_minsize:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: xorps %xmm0, %xmm0
; CHECK-NEXT: cvtss2sd (%rdi), %xmm0
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/ssse3-intrinsics-fast-isel.ll b/test/CodeGen/X86/ssse3-intrinsics-fast-isel.ll
index f994dd924ed..74c5924b600 100644
--- a/test/CodeGen/X86/ssse3-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/ssse3-intrinsics-fast-isel.ll
@@ -6,12 +6,12 @@
define <2 x i64> @test_mm_abs_epi8(<2 x i64> %a0) {
; X32-LABEL: test_mm_abs_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pabsb %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_abs_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pabsb %xmm0, %xmm0
; X64-NEXT: retq
%arg = bitcast <2 x i64> %a0 to <16 x i8>
@@ -25,12 +25,12 @@ declare <16 x i8> @llvm.x86.ssse3.pabs.b.128(<16 x i8>) nounwind readnone
define <2 x i64> @test_mm_abs_epi16(<2 x i64> %a0) {
; X32-LABEL: test_mm_abs_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pabsw %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_abs_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pabsw %xmm0, %xmm0
; X64-NEXT: retq
%arg = bitcast <2 x i64> %a0 to <8 x i16>
@@ -44,12 +44,12 @@ declare <8 x i16> @llvm.x86.ssse3.pabs.w.128(<8 x i16>) nounwind readnone
define <2 x i64> @test_mm_abs_epi32(<2 x i64> %a0) {
; X32-LABEL: test_mm_abs_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pabsd %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_abs_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pabsd %xmm0, %xmm0
; X64-NEXT: retq
%arg = bitcast <2 x i64> %a0 to <4 x i32>
@@ -63,13 +63,13 @@ declare <4 x i32> @llvm.x86.ssse3.pabs.d.128(<4 x i32>) nounwind readnone
define <2 x i64> @test_mm_alignr_epi8(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_alignr_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: palignr {{.*#+}} xmm1 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1]
; X32-NEXT: movdqa %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_alignr_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: palignr {{.*#+}} xmm1 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1]
; X64-NEXT: movdqa %xmm1, %xmm0
; X64-NEXT: retq
@@ -82,13 +82,13 @@ define <2 x i64> @test_mm_alignr_epi8(<2 x i64> %a0, <2 x i64> %a1) {
define <2 x i64> @test2_mm_alignr_epi8(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test2_mm_alignr_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: palignr {{.*#+}} xmm1 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm1[0]
; X32-NEXT: movdqa %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test2_mm_alignr_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: palignr {{.*#+}} xmm1 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm1[0]
; X64-NEXT: movdqa %xmm1, %xmm0
; X64-NEXT: retq
@@ -101,12 +101,12 @@ define <2 x i64> @test2_mm_alignr_epi8(<2 x i64> %a0, <2 x i64> %a1) {
define <2 x i64> @test_mm_hadd_epi16(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_hadd_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: phaddw %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_hadd_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: phaddw %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -119,12 +119,12 @@ declare <8 x i16> @llvm.x86.ssse3.phadd.w.128(<8 x i16>, <8 x i16>) nounwind rea
define <2 x i64> @test_mm_hadd_epi32(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_hadd_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: phaddd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_hadd_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: phaddd %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -137,12 +137,12 @@ declare <4 x i32> @llvm.x86.ssse3.phadd.d.128(<4 x i32>, <4 x i32>) nounwind rea
define <2 x i64> @test_mm_hadds_epi16(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_hadds_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: phaddsw %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_hadds_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: phaddsw %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -155,12 +155,12 @@ declare <8 x i16> @llvm.x86.ssse3.phadd.sw.128(<8 x i16>, <8 x i16>) nounwind re
define <2 x i64> @test_mm_hsub_epi16(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_hsub_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: phsubw %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_hsub_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: phsubw %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -173,12 +173,12 @@ declare <8 x i16> @llvm.x86.ssse3.phsub.w.128(<8 x i16>, <8 x i16>) nounwind rea
define <2 x i64> @test_mm_hsub_epi32(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_hsub_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: phsubd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_hsub_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: phsubd %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -191,12 +191,12 @@ declare <4 x i32> @llvm.x86.ssse3.phsub.d.128(<4 x i32>, <4 x i32>) nounwind rea
define <2 x i64> @test_mm_hsubs_epi16(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_hsubs_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: phsubsw %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_hsubs_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: phsubsw %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -209,12 +209,12 @@ declare <8 x i16> @llvm.x86.ssse3.phsub.sw.128(<8 x i16>, <8 x i16>) nounwind re
define <2 x i64> @test_mm_maddubs_epi16(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_maddubs_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pmaddubsw %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_maddubs_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmaddubsw %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -227,12 +227,12 @@ declare <8 x i16> @llvm.x86.ssse3.pmadd.ub.sw.128(<16 x i8>, <16 x i8>) nounwind
define <2 x i64> @test_mm_mulhrs_epi16(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_mulhrs_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pmulhrsw %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mulhrs_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmulhrsw %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -245,12 +245,12 @@ declare <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16>, <8 x i16>) nounwind
define <2 x i64> @test_mm_shuffle_epi8(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_shuffle_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pshufb %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_shuffle_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pshufb %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -263,12 +263,12 @@ declare <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8>, <16 x i8>) nounwind rea
define <2 x i64> @test_mm_sign_epi8(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_sign_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: psignb %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_sign_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psignb %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -281,12 +281,12 @@ declare <16 x i8> @llvm.x86.ssse3.psign.b.128(<16 x i8>, <16 x i8>) nounwind rea
define <2 x i64> @test_mm_sign_epi16(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_sign_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: psignw %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_sign_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psignw %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -299,12 +299,12 @@ declare <8 x i16> @llvm.x86.ssse3.psign.w.128(<8 x i16>, <8 x i16>) nounwind rea
define <2 x i64> @test_mm_sign_epi32(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_sign_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: psignd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_sign_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psignd %xmm1, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
diff --git a/test/CodeGen/X86/ssse3-intrinsics-x86.ll b/test/CodeGen/X86/ssse3-intrinsics-x86.ll
index d9a6cc9725e..66265d63a97 100644
--- a/test/CodeGen/X86/ssse3-intrinsics-x86.ll
+++ b/test/CodeGen/X86/ssse3-intrinsics-x86.ll
@@ -5,17 +5,17 @@
define <16 x i8> @test_x86_ssse3_pabs_b_128(<16 x i8> %a0) {
; SSE-LABEL: test_x86_ssse3_pabs_b_128:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: pabsb %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x38,0x1c,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_ssse3_pabs_b_128:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpabsb %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x1c,0xc0]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_ssse3_pabs_b_128:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpabsb %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x1c,0xc0]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <16 x i8> @llvm.x86.ssse3.pabs.b.128(<16 x i8> %a0) ; <<16 x i8>> [#uses=1]
@@ -26,17 +26,17 @@ declare <16 x i8> @llvm.x86.ssse3.pabs.b.128(<16 x i8>) nounwind readnone
define <4 x i32> @test_x86_ssse3_pabs_d_128(<4 x i32> %a0) {
; SSE-LABEL: test_x86_ssse3_pabs_d_128:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: pabsd %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x38,0x1e,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_ssse3_pabs_d_128:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpabsd %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x1e,0xc0]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_ssse3_pabs_d_128:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpabsd %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x1e,0xc0]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.ssse3.pabs.d.128(<4 x i32> %a0) ; <<4 x i32>> [#uses=1]
@@ -47,17 +47,17 @@ declare <4 x i32> @llvm.x86.ssse3.pabs.d.128(<4 x i32>) nounwind readnone
define <8 x i16> @test_x86_ssse3_pabs_w_128(<8 x i16> %a0) {
; SSE-LABEL: test_x86_ssse3_pabs_w_128:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: pabsw %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x38,0x1d,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_ssse3_pabs_w_128:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpabsw %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x1d,0xc0]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_ssse3_pabs_w_128:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpabsw %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x1d,0xc0]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.ssse3.pabs.w.128(<8 x i16> %a0) ; <<8 x i16>> [#uses=1]
@@ -68,12 +68,12 @@ declare <8 x i16> @llvm.x86.ssse3.pabs.w.128(<8 x i16>) nounwind readnone
define <4 x i32> @test_x86_ssse3_phadd_d_128(<4 x i32> %a0, <4 x i32> %a1) {
; SSE-LABEL: test_x86_ssse3_phadd_d_128:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: phaddd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x02,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_ssse3_phadd_d_128:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: vphaddd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x02,0xc1]
; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.ssse3.phadd.d.128(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
@@ -84,12 +84,12 @@ declare <4 x i32> @llvm.x86.ssse3.phadd.d.128(<4 x i32>, <4 x i32>) nounwind rea
define <8 x i16> @test_x86_ssse3_phadd_sw_128(<8 x i16> %a0, <8 x i16> %a1) {
; SSE-LABEL: test_x86_ssse3_phadd_sw_128:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: phaddsw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x03,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_ssse3_phadd_sw_128:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: vphaddsw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x03,0xc1]
; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.ssse3.phadd.sw.128(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
@@ -100,12 +100,12 @@ declare <8 x i16> @llvm.x86.ssse3.phadd.sw.128(<8 x i16>, <8 x i16>) nounwind re
define <8 x i16> @test_x86_ssse3_phadd_w_128(<8 x i16> %a0, <8 x i16> %a1) {
; SSE-LABEL: test_x86_ssse3_phadd_w_128:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: phaddw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x01,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_ssse3_phadd_w_128:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: vphaddw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x01,0xc1]
; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.ssse3.phadd.w.128(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
@@ -116,12 +116,12 @@ declare <8 x i16> @llvm.x86.ssse3.phadd.w.128(<8 x i16>, <8 x i16>) nounwind rea
define <4 x i32> @test_x86_ssse3_phsub_d_128(<4 x i32> %a0, <4 x i32> %a1) {
; SSE-LABEL: test_x86_ssse3_phsub_d_128:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: phsubd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x06,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_ssse3_phsub_d_128:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: vphsubd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x06,0xc1]
; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.ssse3.phsub.d.128(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
@@ -132,12 +132,12 @@ declare <4 x i32> @llvm.x86.ssse3.phsub.d.128(<4 x i32>, <4 x i32>) nounwind rea
define <8 x i16> @test_x86_ssse3_phsub_sw_128(<8 x i16> %a0, <8 x i16> %a1) {
; SSE-LABEL: test_x86_ssse3_phsub_sw_128:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: phsubsw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x07,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_ssse3_phsub_sw_128:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: vphsubsw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x07,0xc1]
; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.ssse3.phsub.sw.128(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
@@ -148,12 +148,12 @@ declare <8 x i16> @llvm.x86.ssse3.phsub.sw.128(<8 x i16>, <8 x i16>) nounwind re
define <8 x i16> @test_x86_ssse3_phsub_w_128(<8 x i16> %a0, <8 x i16> %a1) {
; SSE-LABEL: test_x86_ssse3_phsub_w_128:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: phsubw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x05,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_ssse3_phsub_w_128:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: vphsubw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x05,0xc1]
; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.ssse3.phsub.w.128(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
@@ -164,17 +164,17 @@ declare <8 x i16> @llvm.x86.ssse3.phsub.w.128(<8 x i16>, <8 x i16>) nounwind rea
define <8 x i16> @test_x86_ssse3_pmadd_ub_sw_128(<16 x i8> %a0, <16 x i8> %a1) {
; SSE-LABEL: test_x86_ssse3_pmadd_ub_sw_128:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: pmaddubsw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x04,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_ssse3_pmadd_ub_sw_128:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpmaddubsw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x04,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_ssse3_pmadd_ub_sw_128:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpmaddubsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x04,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.ssse3.pmadd.ub.sw.128(<16 x i8> %a0, <16 x i8> %a1) ; <<8 x i16>> [#uses=1]
@@ -186,7 +186,7 @@ declare <8 x i16> @llvm.x86.ssse3.pmadd.ub.sw.128(<16 x i8>, <16 x i8>) nounwind
; Make sure we don't commute this operation.
define <8 x i16> @test_x86_ssse3_pmadd_ub_sw_128_load_op0(<16 x i8>* %ptr, <16 x i8> %a1) {
; SSE-LABEL: test_x86_ssse3_pmadd_ub_sw_128_load_op0:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; SSE-NEXT: movdqa (%eax), %xmm1 ## encoding: [0x66,0x0f,0x6f,0x08]
; SSE-NEXT: pmaddubsw %xmm0, %xmm1 ## encoding: [0x66,0x0f,0x38,0x04,0xc8]
@@ -194,14 +194,14 @@ define <8 x i16> @test_x86_ssse3_pmadd_ub_sw_128_load_op0(<16 x i8>* %ptr, <16 x
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_ssse3_pmadd_ub_sw_128_load_op0:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; AVX2-NEXT: vmovdqa (%eax), %xmm1 ## encoding: [0xc5,0xf9,0x6f,0x08]
; AVX2-NEXT: vpmaddubsw %xmm0, %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x71,0x04,0xc0]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_ssse3_pmadd_ub_sw_128_load_op0:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; SKX-NEXT: vmovdqa (%eax), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0x08]
; SKX-NEXT: vpmaddubsw %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0x04,0xc0]
@@ -214,17 +214,17 @@ define <8 x i16> @test_x86_ssse3_pmadd_ub_sw_128_load_op0(<16 x i8>* %ptr, <16 x
define <8 x i16> @test_x86_ssse3_pmul_hr_sw_128(<8 x i16> %a0, <8 x i16> %a1) {
; SSE-LABEL: test_x86_ssse3_pmul_hr_sw_128:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: pmulhrsw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x0b,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_ssse3_pmul_hr_sw_128:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpmulhrsw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x0b,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_ssse3_pmul_hr_sw_128:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpmulhrsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x0b,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
@@ -235,17 +235,17 @@ declare <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16>, <8 x i16>) nounwind
define <16 x i8> @test_x86_ssse3_pshuf_b_128(<16 x i8> %a0, <16 x i8> %a1) {
; SSE-LABEL: test_x86_ssse3_pshuf_b_128:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: pshufb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x00,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_ssse3_pshuf_b_128:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpshufb %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x00,0xc1]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_ssse3_pshuf_b_128:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpshufb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x00,0xc1]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
@@ -256,12 +256,12 @@ declare <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8>, <16 x i8>) nounwind rea
define <16 x i8> @test_x86_ssse3_psign_b_128(<16 x i8> %a0, <16 x i8> %a1) {
; SSE-LABEL: test_x86_ssse3_psign_b_128:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: psignb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x08,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_ssse3_psign_b_128:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: vpsignb %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x08,0xc1]
; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call <16 x i8> @llvm.x86.ssse3.psign.b.128(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
@@ -272,12 +272,12 @@ declare <16 x i8> @llvm.x86.ssse3.psign.b.128(<16 x i8>, <16 x i8>) nounwind rea
define <4 x i32> @test_x86_ssse3_psign_d_128(<4 x i32> %a0, <4 x i32> %a1) {
; SSE-LABEL: test_x86_ssse3_psign_d_128:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: psignd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x0a,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_ssse3_psign_d_128:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: vpsignd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x0a,0xc1]
; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.ssse3.psign.d.128(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
@@ -288,12 +288,12 @@ declare <4 x i32> @llvm.x86.ssse3.psign.d.128(<4 x i32>, <4 x i32>) nounwind rea
define <8 x i16> @test_x86_ssse3_psign_w_128(<8 x i16> %a0, <8 x i16> %a1) {
; SSE-LABEL: test_x86_ssse3_psign_w_128:
-; SSE: ## BB#0:
+; SSE: ## %bb.0:
; SSE-NEXT: psignw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x09,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_ssse3_psign_w_128:
-; VCHECK: ## BB#0:
+; VCHECK: ## %bb.0:
; VCHECK-NEXT: vpsignw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x09,0xc1]
; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.ssse3.psign.w.128(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
diff --git a/test/CodeGen/X86/ssse3-schedule.ll b/test/CodeGen/X86/ssse3-schedule.ll
index 4ac10745e87..c39e2979663 100644
--- a/test/CodeGen/X86/ssse3-schedule.ll
+++ b/test/CodeGen/X86/ssse3-schedule.ll
@@ -13,14 +13,14 @@
define <16 x i8> @test_pabsb(<16 x i8> %a0, <16 x i8> *%a1) {
; GENERIC-LABEL: test_pabsb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pabsb %xmm0, %xmm1 # sched: [1:0.50]
; GENERIC-NEXT: pabsb (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: por %xmm1, %xmm0 # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pabsb:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pabsb (%rdi), %xmm1 # sched: [1:1.00]
; ATOM-NEXT: pabsb %xmm0, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: por %xmm0, %xmm1 # sched: [1:0.50]
@@ -28,56 +28,56 @@ define <16 x i8> @test_pabsb(<16 x i8> %a0, <16 x i8> *%a1) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pabsb:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pabsb %xmm0, %xmm1 # sched: [1:0.50]
; SLM-NEXT: pabsb (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: por %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pabsb:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpabsb %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpabsb (%rdi), %xmm1 # sched: [7:0.50]
; SANDY-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pabsb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpabsb %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpabsb (%rdi), %xmm1 # sched: [1:0.50]
; HASWELL-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pabsb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpabsb %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpabsb (%rdi), %xmm1 # sched: [6:0.50]
; BROADWELL-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pabsb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpabsb %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpabsb (%rdi), %xmm1 # sched: [7:0.50]
; SKYLAKE-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pabsb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpabsb %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpabsb (%rdi), %xmm1 # sched: [7:0.50]
; SKX-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pabsb:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpabsb (%rdi), %xmm1 # sched: [6:1.00]
; BTVER2-NEXT: vpabsb %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pabsb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpabsb (%rdi), %xmm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpabsb %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
@@ -92,14 +92,14 @@ declare <16 x i8> @llvm.x86.ssse3.pabs.b.128(<16 x i8>) nounwind readnone
define <4 x i32> @test_pabsd(<4 x i32> %a0, <4 x i32> *%a1) {
; GENERIC-LABEL: test_pabsd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pabsd %xmm0, %xmm1 # sched: [1:0.50]
; GENERIC-NEXT: pabsd (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: por %xmm1, %xmm0 # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pabsd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pabsd (%rdi), %xmm1 # sched: [1:1.00]
; ATOM-NEXT: pabsd %xmm0, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: por %xmm0, %xmm1 # sched: [1:0.50]
@@ -107,56 +107,56 @@ define <4 x i32> @test_pabsd(<4 x i32> %a0, <4 x i32> *%a1) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pabsd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pabsd %xmm0, %xmm1 # sched: [1:0.50]
; SLM-NEXT: pabsd (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: por %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pabsd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpabsd %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpabsd (%rdi), %xmm1 # sched: [7:0.50]
; SANDY-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pabsd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpabsd %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpabsd (%rdi), %xmm1 # sched: [1:0.50]
; HASWELL-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pabsd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpabsd %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpabsd (%rdi), %xmm1 # sched: [6:0.50]
; BROADWELL-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pabsd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpabsd %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpabsd (%rdi), %xmm1 # sched: [7:0.50]
; SKYLAKE-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pabsd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpabsd %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpabsd (%rdi), %xmm1 # sched: [7:0.50]
; SKX-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pabsd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpabsd (%rdi), %xmm1 # sched: [6:1.00]
; BTVER2-NEXT: vpabsd %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pabsd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpabsd (%rdi), %xmm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpabsd %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
@@ -171,14 +171,14 @@ declare <4 x i32> @llvm.x86.ssse3.pabs.d.128(<4 x i32>) nounwind readnone
define <8 x i16> @test_pabsw(<8 x i16> %a0, <8 x i16> *%a1) {
; GENERIC-LABEL: test_pabsw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pabsw %xmm0, %xmm1 # sched: [1:0.50]
; GENERIC-NEXT: pabsw (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: por %xmm1, %xmm0 # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pabsw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pabsw (%rdi), %xmm1 # sched: [1:1.00]
; ATOM-NEXT: pabsw %xmm0, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: por %xmm0, %xmm1 # sched: [1:0.50]
@@ -186,56 +186,56 @@ define <8 x i16> @test_pabsw(<8 x i16> %a0, <8 x i16> *%a1) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pabsw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pabsw %xmm0, %xmm1 # sched: [1:0.50]
; SLM-NEXT: pabsw (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: por %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pabsw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpabsw %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpabsw (%rdi), %xmm1 # sched: [7:0.50]
; SANDY-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pabsw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpabsw %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpabsw (%rdi), %xmm1 # sched: [1:0.50]
; HASWELL-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pabsw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpabsw %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpabsw (%rdi), %xmm1 # sched: [6:0.50]
; BROADWELL-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pabsw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpabsw %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpabsw (%rdi), %xmm1 # sched: [7:0.50]
; SKYLAKE-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pabsw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpabsw %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpabsw (%rdi), %xmm1 # sched: [7:0.50]
; SKX-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pabsw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpabsw (%rdi), %xmm1 # sched: [6:1.00]
; BTVER2-NEXT: vpabsw %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pabsw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpabsw (%rdi), %xmm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpabsw %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
@@ -250,14 +250,14 @@ declare <8 x i16> @llvm.x86.ssse3.pabs.w.128(<8 x i16>) nounwind readnone
define <8 x i16> @test_palignr(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; GENERIC-LABEL: test_palignr:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: palignr {{.*#+}} xmm1 = xmm0[6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5] sched: [1:0.50]
; GENERIC-NEXT: palignr {{.*#+}} xmm1 = mem[14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13] sched: [7:0.50]
; GENERIC-NEXT: movdqa %xmm1, %xmm0 # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_palignr:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: palignr {{.*#+}} xmm1 = xmm0[6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5] sched: [1:1.00]
; ATOM-NEXT: palignr {{.*#+}} xmm1 = mem[14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13] sched: [1:1.00]
; ATOM-NEXT: movdqa %xmm1, %xmm0 # sched: [1:0.50]
@@ -266,50 +266,50 @@ define <8 x i16> @test_palignr(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_palignr:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: palignr {{.*#+}} xmm1 = xmm0[6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5] sched: [1:1.00]
; SLM-NEXT: palignr {{.*#+}} xmm1 = mem[14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13] sched: [4:1.00]
; SLM-NEXT: movdqa %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_palignr:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5] sched: [1:0.50]
; SANDY-NEXT: vpalignr {{.*#+}} xmm0 = mem[14,15],xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13] sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_palignr:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5] sched: [1:1.00]
; HASWELL-NEXT: vpalignr {{.*#+}} xmm0 = mem[14,15],xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13] sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_palignr:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5] sched: [1:1.00]
; BROADWELL-NEXT: vpalignr {{.*#+}} xmm0 = mem[14,15],xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13] sched: [6:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_palignr:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5] sched: [1:1.00]
; SKYLAKE-NEXT: vpalignr {{.*#+}} xmm0 = mem[14,15],xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13] sched: [7:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_palignr:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5] sched: [1:1.00]
; SKX-NEXT: vpalignr {{.*#+}} xmm0 = mem[14,15],xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13] sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_palignr:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5] sched: [1:0.50]
; BTVER2-NEXT: vpalignr {{.*#+}} xmm0 = mem[14,15],xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13] sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_palignr:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5] sched: [1:0.25]
; ZNVER1-NEXT: vpalignr {{.*#+}} xmm0 = mem[14,15],xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13] sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -321,61 +321,61 @@ define <8 x i16> @test_palignr(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
define <4 x i32> @test_phaddd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_phaddd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: phaddd %xmm1, %xmm0 # sched: [3:1.50]
; GENERIC-NEXT: phaddd (%rdi), %xmm0 # sched: [9:1.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_phaddd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: phaddd %xmm1, %xmm0 # sched: [3:1.50]
; ATOM-NEXT: phaddd (%rdi), %xmm0 # sched: [4:2.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_phaddd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: phaddd %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: phaddd (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_phaddd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vphaddd %xmm1, %xmm0, %xmm0 # sched: [3:1.50]
; SANDY-NEXT: vphaddd (%rdi), %xmm0, %xmm0 # sched: [9:1.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_phaddd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vphaddd %xmm1, %xmm0, %xmm0 # sched: [3:2.00]
; HASWELL-NEXT: vphaddd (%rdi), %xmm0, %xmm0 # sched: [3:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_phaddd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vphaddd %xmm1, %xmm0, %xmm0 # sched: [3:2.00]
; BROADWELL-NEXT: vphaddd (%rdi), %xmm0, %xmm0 # sched: [8:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_phaddd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vphaddd %xmm1, %xmm0, %xmm0 # sched: [3:2.00]
; SKYLAKE-NEXT: vphaddd (%rdi), %xmm0, %xmm0 # sched: [9:2.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_phaddd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vphaddd %xmm1, %xmm0, %xmm0 # sched: [3:2.00]
; SKX-NEXT: vphaddd (%rdi), %xmm0, %xmm0 # sched: [9:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_phaddd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vphaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vphaddd (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_phaddd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vphaddd %xmm1, %xmm0, %xmm0 # sched: [100:?]
; ZNVER1-NEXT: vphaddd (%rdi), %xmm0, %xmm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -388,61 +388,61 @@ declare <4 x i32> @llvm.x86.ssse3.phadd.d.128(<4 x i32>, <4 x i32>) nounwind rea
define <8 x i16> @test_phaddsw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; GENERIC-LABEL: test_phaddsw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: phaddsw %xmm1, %xmm0 # sched: [3:1.50]
; GENERIC-NEXT: phaddsw (%rdi), %xmm0 # sched: [9:1.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_phaddsw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: phaddsw %xmm1, %xmm0 # sched: [7:3.50]
; ATOM-NEXT: phaddsw (%rdi), %xmm0 # sched: [8:4.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_phaddsw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: phaddsw %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: phaddsw (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_phaddsw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vphaddsw %xmm1, %xmm0, %xmm0 # sched: [3:1.50]
; SANDY-NEXT: vphaddsw (%rdi), %xmm0, %xmm0 # sched: [9:1.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_phaddsw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vphaddsw %xmm1, %xmm0, %xmm0 # sched: [3:2.00]
; HASWELL-NEXT: vphaddsw (%rdi), %xmm0, %xmm0 # sched: [3:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_phaddsw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vphaddsw %xmm1, %xmm0, %xmm0 # sched: [3:2.00]
; BROADWELL-NEXT: vphaddsw (%rdi), %xmm0, %xmm0 # sched: [8:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_phaddsw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vphaddsw %xmm1, %xmm0, %xmm0 # sched: [3:2.00]
; SKYLAKE-NEXT: vphaddsw (%rdi), %xmm0, %xmm0 # sched: [9:2.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_phaddsw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vphaddsw %xmm1, %xmm0, %xmm0 # sched: [3:2.00]
; SKX-NEXT: vphaddsw (%rdi), %xmm0, %xmm0 # sched: [9:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_phaddsw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vphaddsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vphaddsw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_phaddsw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vphaddsw %xmm1, %xmm0, %xmm0 # sched: [100:?]
; ZNVER1-NEXT: vphaddsw (%rdi), %xmm0, %xmm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -455,61 +455,61 @@ declare <8 x i16> @llvm.x86.ssse3.phadd.sw.128(<8 x i16>, <8 x i16>) nounwind re
define <8 x i16> @test_phaddw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; GENERIC-LABEL: test_phaddw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: phaddw %xmm1, %xmm0 # sched: [3:1.50]
; GENERIC-NEXT: phaddw (%rdi), %xmm0 # sched: [9:1.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_phaddw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: phaddw %xmm1, %xmm0 # sched: [7:3.50]
; ATOM-NEXT: phaddw (%rdi), %xmm0 # sched: [8:4.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_phaddw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: phaddw %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: phaddw (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_phaddw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vphaddw %xmm1, %xmm0, %xmm0 # sched: [3:1.50]
; SANDY-NEXT: vphaddw (%rdi), %xmm0, %xmm0 # sched: [9:1.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_phaddw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vphaddw %xmm1, %xmm0, %xmm0 # sched: [3:2.00]
; HASWELL-NEXT: vphaddw (%rdi), %xmm0, %xmm0 # sched: [3:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_phaddw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vphaddw %xmm1, %xmm0, %xmm0 # sched: [3:2.00]
; BROADWELL-NEXT: vphaddw (%rdi), %xmm0, %xmm0 # sched: [8:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_phaddw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vphaddw %xmm1, %xmm0, %xmm0 # sched: [3:2.00]
; SKYLAKE-NEXT: vphaddw (%rdi), %xmm0, %xmm0 # sched: [9:2.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_phaddw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vphaddw %xmm1, %xmm0, %xmm0 # sched: [3:2.00]
; SKX-NEXT: vphaddw (%rdi), %xmm0, %xmm0 # sched: [9:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_phaddw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vphaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vphaddw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_phaddw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vphaddw %xmm1, %xmm0, %xmm0 # sched: [100:?]
; ZNVER1-NEXT: vphaddw (%rdi), %xmm0, %xmm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -522,61 +522,61 @@ declare <8 x i16> @llvm.x86.ssse3.phadd.w.128(<8 x i16>, <8 x i16>) nounwind rea
define <4 x i32> @test_phsubd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_phsubd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: phsubd %xmm1, %xmm0 # sched: [3:1.50]
; GENERIC-NEXT: phsubd (%rdi), %xmm0 # sched: [9:1.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_phsubd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: phsubd %xmm1, %xmm0 # sched: [3:1.50]
; ATOM-NEXT: phsubd (%rdi), %xmm0 # sched: [4:2.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_phsubd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: phsubd %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: phsubd (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_phsubd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vphsubd %xmm1, %xmm0, %xmm0 # sched: [3:1.50]
; SANDY-NEXT: vphsubd (%rdi), %xmm0, %xmm0 # sched: [9:1.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_phsubd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vphsubd %xmm1, %xmm0, %xmm0 # sched: [3:2.00]
; HASWELL-NEXT: vphsubd (%rdi), %xmm0, %xmm0 # sched: [3:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_phsubd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vphsubd %xmm1, %xmm0, %xmm0 # sched: [3:2.00]
; BROADWELL-NEXT: vphsubd (%rdi), %xmm0, %xmm0 # sched: [8:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_phsubd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vphsubd %xmm1, %xmm0, %xmm0 # sched: [3:2.00]
; SKYLAKE-NEXT: vphsubd (%rdi), %xmm0, %xmm0 # sched: [9:2.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_phsubd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vphsubd %xmm1, %xmm0, %xmm0 # sched: [3:2.00]
; SKX-NEXT: vphsubd (%rdi), %xmm0, %xmm0 # sched: [9:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_phsubd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vphsubd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vphsubd (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_phsubd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vphsubd %xmm1, %xmm0, %xmm0 # sched: [100:?]
; ZNVER1-NEXT: vphsubd (%rdi), %xmm0, %xmm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -589,61 +589,61 @@ declare <4 x i32> @llvm.x86.ssse3.phsub.d.128(<4 x i32>, <4 x i32>) nounwind rea
define <8 x i16> @test_phsubsw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; GENERIC-LABEL: test_phsubsw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: phsubsw %xmm1, %xmm0 # sched: [3:1.50]
; GENERIC-NEXT: phsubsw (%rdi), %xmm0 # sched: [9:1.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_phsubsw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: phsubsw %xmm1, %xmm0 # sched: [7:3.50]
; ATOM-NEXT: phsubsw (%rdi), %xmm0 # sched: [8:4.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_phsubsw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: phsubsw %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: phsubsw (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_phsubsw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vphsubsw %xmm1, %xmm0, %xmm0 # sched: [3:1.50]
; SANDY-NEXT: vphsubsw (%rdi), %xmm0, %xmm0 # sched: [9:1.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_phsubsw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vphsubsw %xmm1, %xmm0, %xmm0 # sched: [3:2.00]
; HASWELL-NEXT: vphsubsw (%rdi), %xmm0, %xmm0 # sched: [3:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_phsubsw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vphsubsw %xmm1, %xmm0, %xmm0 # sched: [3:2.00]
; BROADWELL-NEXT: vphsubsw (%rdi), %xmm0, %xmm0 # sched: [8:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_phsubsw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vphsubsw %xmm1, %xmm0, %xmm0 # sched: [3:2.00]
; SKYLAKE-NEXT: vphsubsw (%rdi), %xmm0, %xmm0 # sched: [9:2.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_phsubsw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vphsubsw %xmm1, %xmm0, %xmm0 # sched: [3:2.00]
; SKX-NEXT: vphsubsw (%rdi), %xmm0, %xmm0 # sched: [9:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_phsubsw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vphsubsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vphsubsw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_phsubsw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vphsubsw %xmm1, %xmm0, %xmm0 # sched: [100:?]
; ZNVER1-NEXT: vphsubsw (%rdi), %xmm0, %xmm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -656,61 +656,61 @@ declare <8 x i16> @llvm.x86.ssse3.phsub.sw.128(<8 x i16>, <8 x i16>) nounwind re
define <8 x i16> @test_phsubw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; GENERIC-LABEL: test_phsubw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: phsubw %xmm1, %xmm0 # sched: [3:1.50]
; GENERIC-NEXT: phsubw (%rdi), %xmm0 # sched: [9:1.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_phsubw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: phsubw %xmm1, %xmm0 # sched: [7:3.50]
; ATOM-NEXT: phsubw (%rdi), %xmm0 # sched: [8:4.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_phsubw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: phsubw %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: phsubw (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_phsubw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vphsubw %xmm1, %xmm0, %xmm0 # sched: [3:1.50]
; SANDY-NEXT: vphsubw (%rdi), %xmm0, %xmm0 # sched: [9:1.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_phsubw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vphsubw %xmm1, %xmm0, %xmm0 # sched: [3:2.00]
; HASWELL-NEXT: vphsubw (%rdi), %xmm0, %xmm0 # sched: [3:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_phsubw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vphsubw %xmm1, %xmm0, %xmm0 # sched: [3:2.00]
; BROADWELL-NEXT: vphsubw (%rdi), %xmm0, %xmm0 # sched: [8:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_phsubw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vphsubw %xmm1, %xmm0, %xmm0 # sched: [3:2.00]
; SKYLAKE-NEXT: vphsubw (%rdi), %xmm0, %xmm0 # sched: [9:2.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_phsubw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vphsubw %xmm1, %xmm0, %xmm0 # sched: [3:2.00]
; SKX-NEXT: vphsubw (%rdi), %xmm0, %xmm0 # sched: [9:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_phsubw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vphsubw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vphsubw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_phsubw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vphsubw %xmm1, %xmm0, %xmm0 # sched: [100:?]
; ZNVER1-NEXT: vphsubw (%rdi), %xmm0, %xmm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -723,61 +723,61 @@ declare <8 x i16> @llvm.x86.ssse3.phsub.w.128(<8 x i16>, <8 x i16>) nounwind rea
define <8 x i16> @test_pmaddubsw(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; GENERIC-LABEL: test_pmaddubsw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pmaddubsw %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: pmaddubsw (%rdi), %xmm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pmaddubsw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pmaddubsw %xmm1, %xmm0 # sched: [5:5.00]
; ATOM-NEXT: pmaddubsw (%rdi), %xmm0 # sched: [5:5.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pmaddubsw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pmaddubsw %xmm1, %xmm0 # sched: [4:1.00]
; SLM-NEXT: pmaddubsw (%rdi), %xmm0 # sched: [7:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pmaddubsw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpmaddubsw %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vpmaddubsw (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmaddubsw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmaddubsw %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
; HASWELL-NEXT: vpmaddubsw (%rdi), %xmm0, %xmm0 # sched: [5:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmaddubsw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmaddubsw %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
; BROADWELL-NEXT: vpmaddubsw (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmaddubsw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmaddubsw %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKYLAKE-NEXT: vpmaddubsw (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmaddubsw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmaddubsw %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vpmaddubsw (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pmaddubsw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpmaddubsw %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; BTVER2-NEXT: vpmaddubsw (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pmaddubsw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmaddubsw %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
; ZNVER1-NEXT: vpmaddubsw (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -791,61 +791,61 @@ declare <8 x i16> @llvm.x86.ssse3.pmadd.ub.sw.128(<16 x i8>, <16 x i8>) nounwind
define <8 x i16> @test_pmulhrsw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; GENERIC-LABEL: test_pmulhrsw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pmulhrsw %xmm1, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: pmulhrsw (%rdi), %xmm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pmulhrsw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pmulhrsw %xmm1, %xmm0 # sched: [5:5.00]
; ATOM-NEXT: pmulhrsw (%rdi), %xmm0 # sched: [5:5.00]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pmulhrsw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pmulhrsw %xmm1, %xmm0 # sched: [4:1.00]
; SLM-NEXT: pmulhrsw (%rdi), %xmm0 # sched: [7:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pmulhrsw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpmulhrsw %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; SANDY-NEXT: vpmulhrsw (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmulhrsw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmulhrsw %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
; HASWELL-NEXT: vpmulhrsw (%rdi), %xmm0, %xmm0 # sched: [5:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmulhrsw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmulhrsw %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
; BROADWELL-NEXT: vpmulhrsw (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmulhrsw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmulhrsw %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKYLAKE-NEXT: vpmulhrsw (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmulhrsw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmulhrsw %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: vpmulhrsw (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pmulhrsw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpmulhrsw %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; BTVER2-NEXT: vpmulhrsw (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pmulhrsw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmulhrsw %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
; ZNVER1-NEXT: vpmulhrsw (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -858,61 +858,61 @@ declare <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16>, <8 x i16>) nounwind
define <16 x i8> @test_pshufb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; GENERIC-LABEL: test_pshufb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: pshufb %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: pshufb (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pshufb:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: pshufb %xmm1, %xmm0 # sched: [4:2.00]
; ATOM-NEXT: pshufb (%rdi), %xmm0 # sched: [5:2.50]
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pshufb:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: pshufb %xmm1, %xmm0 # sched: [1:1.00]
; SLM-NEXT: pshufb (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pshufb:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpshufb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpshufb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pshufb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpshufb %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: vpshufb (%rdi), %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pshufb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpshufb %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: vpshufb (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pshufb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpshufb %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; SKYLAKE-NEXT: vpshufb (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pshufb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpshufb %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; SKX-NEXT: vpshufb (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pshufb:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpshufb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpshufb (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pshufb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpshufb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpshufb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -925,13 +925,13 @@ declare <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8>, <16 x i8>) nounwind rea
define <16 x i8> @test_psignb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; GENERIC-LABEL: test_psignb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: psignb %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: psignb (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_psignb:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: psignb %xmm1, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: psignb (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -941,49 +941,49 @@ define <16 x i8> @test_psignb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_psignb:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: psignb %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: psignb (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_psignb:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpsignb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpsignb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psignb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsignb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpsignb (%rdi), %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psignb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsignb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpsignb (%rdi), %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psignb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsignb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpsignb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psignb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsignb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpsignb (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_psignb:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpsignb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpsignb (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_psignb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsignb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsignb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -996,13 +996,13 @@ declare <16 x i8> @llvm.x86.ssse3.psign.b.128(<16 x i8>, <16 x i8>) nounwind rea
define <4 x i32> @test_psignd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_psignd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: psignd %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: psignd (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_psignd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: psignd %xmm1, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: psignd (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -1012,49 +1012,49 @@ define <4 x i32> @test_psignd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_psignd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: psignd %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: psignd (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_psignd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpsignd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpsignd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psignd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsignd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpsignd (%rdi), %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psignd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsignd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpsignd (%rdi), %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psignd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsignd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpsignd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psignd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsignd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpsignd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_psignd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpsignd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpsignd (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_psignd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsignd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsignd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1067,13 +1067,13 @@ declare <4 x i32> @llvm.x86.ssse3.psign.d.128(<4 x i32>, <4 x i32>) nounwind rea
define <8 x i16> @test_psignw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; GENERIC-LABEL: test_psignw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: psignw %xmm1, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: psignw (%rdi), %xmm0 # sched: [7:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_psignw:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: psignw %xmm1, %xmm0 # sched: [1:0.50]
; ATOM-NEXT: psignw (%rdi), %xmm0 # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -1083,49 +1083,49 @@ define <8 x i16> @test_psignw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_psignw:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: psignw %xmm1, %xmm0 # sched: [1:0.50]
; SLM-NEXT: psignw (%rdi), %xmm0 # sched: [4:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_psignw:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: vpsignw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SANDY-NEXT: vpsignw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psignw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsignw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: vpsignw (%rdi), %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psignw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsignw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpsignw (%rdi), %xmm0, %xmm0 # sched: [6:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psignw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsignw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpsignw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psignw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsignw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpsignw (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_psignw:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpsignw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpsignw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_psignw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsignw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsignw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
diff --git a/test/CodeGen/X86/stack-folding-bmi.ll b/test/CodeGen/X86/stack-folding-bmi.ll
index cabc88432be..0bc6ef8f9ba 100644
--- a/test/CodeGen/X86/stack-folding-bmi.ll
+++ b/test/CodeGen/X86/stack-folding-bmi.ll
@@ -28,7 +28,7 @@ define i64 @stack_fold_andn_u64(i64 %a0, i64 %a1) {
define i32 @stack_fold_bextr_u32(i32 %a0, i32 %a1) {
;CHECK-LABEL: stack_fold_bextr_u32
- ;CHECK: # BB#0:
+ ;CHECK: # %bb.0:
;CHECK: bextrl %eax, {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Folded Reload
%1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
%2 = tail call i32 @llvm.x86.bmi.bextr.32(i32 %a0, i32 %a1)
@@ -38,7 +38,7 @@ declare i32 @llvm.x86.bmi.bextr.32(i32, i32)
define i64 @stack_fold_bextr_u64(i64 %a0, i64 %a1) {
;CHECK-LABEL: stack_fold_bextr_u64
- ;CHECK: # BB#0:
+ ;CHECK: # %bb.0:
;CHECK: bextrq %rax, {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 8-byte Folded Reload
%1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
%2 = tail call i64 @llvm.x86.bmi.bextr.64(i64 %a0, i64 %a1)
diff --git a/test/CodeGen/X86/stack-folding-lwp.ll b/test/CodeGen/X86/stack-folding-lwp.ll
index edf2798ff84..30b93323883 100644
--- a/test/CodeGen/X86/stack-folding-lwp.ll
+++ b/test/CodeGen/X86/stack-folding-lwp.ll
@@ -10,7 +10,7 @@ target triple = "x86_64-unknown-unknown"
define i8 @stack_fold_lwpins_u32(i32 %a0, i32 %a1) {
; CHECK-LABEL: stack_fold_lwpins_u32
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK: lwpins $2814, {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Folded Reload
%1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
%2 = tail call i8 @llvm.x86.lwpins32(i32 %a0, i32 %a1, i32 2814)
@@ -20,7 +20,7 @@ declare i8 @llvm.x86.lwpins32(i32, i32, i32)
define i8 @stack_fold_lwpins_u64(i64 %a0, i32 %a1) {
; CHECK-LABEL: stack_fold_lwpins_u64
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK: lwpins $2814, {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 4-byte Folded Reload
%1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
%2 = tail call i8 @llvm.x86.lwpins64(i64 %a0, i32 %a1, i32 2814)
@@ -30,7 +30,7 @@ declare i8 @llvm.x86.lwpins64(i64, i32, i32)
define void @stack_fold_lwpval_u32(i32 %a0, i32 %a1) {
; CHECK-LABEL: stack_fold_lwpval_u32
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK: lwpval $2814, {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Folded Reload
%1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
tail call void @llvm.x86.lwpval32(i32 %a0, i32 %a1, i32 2814)
@@ -40,7 +40,7 @@ declare void @llvm.x86.lwpval32(i32, i32, i32)
define void @stack_fold_lwpval_u64(i64 %a0, i32 %a1) {
; CHECK-LABEL: stack_fold_lwpval_u64
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK: lwpval $2814, {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 4-byte Folded Reload
%1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
tail call void @llvm.x86.lwpval64(i64 %a0, i32 %a1, i32 2814)
diff --git a/test/CodeGen/X86/stack-folding-tbm.ll b/test/CodeGen/X86/stack-folding-tbm.ll
index fe3c828a69b..ac7d97c826e 100644
--- a/test/CodeGen/X86/stack-folding-tbm.ll
+++ b/test/CodeGen/X86/stack-folding-tbm.ll
@@ -10,7 +10,7 @@ target triple = "x86_64-unknown-unknown"
define i32 @stack_fold_bextri_u32(i32 %a0) {
;CHECK-LABEL: stack_fold_bextri_u32
- ;CHECK: # BB#0:
+ ;CHECK: # %bb.0:
;CHECK: bextr $2814, {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Folded Reload
%1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
%2 = tail call i32 @llvm.x86.tbm.bextri.u32(i32 %a0, i32 2814)
@@ -20,7 +20,7 @@ declare i32 @llvm.x86.tbm.bextri.u32(i32, i32)
define i64 @stack_fold_bextri_u64(i64 %a0) {
;CHECK-LABEL: stack_fold_bextri_u64
- ;CHECK: # BB#0:
+ ;CHECK: # %bb.0:
;CHECK: bextr $2814, {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 8-byte Folded Reload
%1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
%2 = tail call i64 @llvm.x86.tbm.bextri.u64(i64 %a0, i64 2814)
diff --git a/test/CodeGen/X86/stack-protector-weight.ll b/test/CodeGen/X86/stack-protector-weight.ll
index c547012d79c..3708d216f8d 100644
--- a/test/CodeGen/X86/stack-protector-weight.ll
+++ b/test/CodeGen/X86/stack-protector-weight.ll
@@ -4,15 +4,15 @@
; RUN: llc -mtriple=i386-pc-windows-msvc -print-machineinstrs=expand-isel-pseudos -enable-selectiondag-sp=false %s -o /dev/null 2>&1 | FileCheck %s -check-prefix=MSVC-IR
; DARWIN-SELDAG: # Machine code for function test_branch_weights:
-; DARWIN-SELDAG: Successors according to CFG: BB#[[SUCCESS:[0-9]+]]({{[0-9a-fx/= ]+}}100.00%) BB#[[FAILURE:[0-9]+]]
-; DARWIN-SELDAG: BB#[[FAILURE]]:
+; DARWIN-SELDAG: Successors according to CFG: %bb.[[SUCCESS:[0-9]+]]({{[0-9a-fx/= ]+}}100.00%) %bb.[[FAILURE:[0-9]+]]
+; DARWIN-SELDAG: %bb.[[FAILURE]]:
; DARWIN-SELDAG: CALL64pcrel32 <es:__stack_chk_fail>
-; DARWIN-SELDAG: BB#[[SUCCESS]]:
+; DARWIN-SELDAG: %bb.[[SUCCESS]]:
; DARWIN-IR: # Machine code for function test_branch_weights:
-; DARWIN-IR: Successors according to CFG: BB#[[SUCCESS:[0-9]+]]({{[0-9a-fx/= ]+}}100.00%) BB#[[FAILURE:[0-9]+]]
-; DARWIN-IR: BB#[[SUCCESS]]:
-; DARWIN-IR: BB#[[FAILURE]]:
+; DARWIN-IR: Successors according to CFG: %bb.[[SUCCESS:[0-9]+]]({{[0-9a-fx/= ]+}}100.00%) %bb.[[FAILURE:[0-9]+]]
+; DARWIN-IR: %bb.[[SUCCESS]]:
+; DARWIN-IR: %bb.[[FAILURE]]:
; DARWIN-IR: CALL64pcrel32 <ga:@__stack_chk_fail>
; MSVC-SELDAG: # Machine code for function test_branch_weights:
diff --git a/test/CodeGen/X86/statepoint-live-in.ll b/test/CodeGen/X86/statepoint-live-in.ll
index 9342c93d300..2c9b95916d8 100644
--- a/test/CodeGen/X86/statepoint-live-in.ll
+++ b/test/CodeGen/X86/statepoint-live-in.ll
@@ -8,7 +8,7 @@ declare void @baz()
define void @test1(i32 %a) gc "statepoint-example" {
; CHECK-LABEL: test1:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: callq _bar
@@ -23,7 +23,7 @@ entry:
define void @test2(i32 %a, i32 %b) gc "statepoint-example" {
; CHECK-LABEL: test2:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: pushq %rbp
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: pushq %rbx
@@ -52,7 +52,7 @@ entry:
define void @test3(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h, i32 %i) gc "statepoint-example" {
; CHECK-LABEL: test3:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: callq _bar
@@ -71,7 +71,7 @@ entry:
; stack slots into the statepoint.
define void @test4(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h, i32 %i, i32 %j, i32 %k, i32 %l, i32 %m, i32 %n, i32 %o, i32 %p, i32 %q, i32 %r, i32 %s, i32 %t, i32 %u, i32 %v, i32 %w, i32 %x, i32 %y, i32 %z) gc "statepoint-example" {
; CHECK-LABEL: test4:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: callq _bar
@@ -89,7 +89,7 @@ entry:
; as to put less stress on the register allocator for no benefit.
define i32 addrspace(1)* @test5(i32 %a, i32 addrspace(1)* %p) gc "statepoint-example" {
; CHECK-LABEL: test5:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: movq %rsi, (%rsp)
@@ -107,7 +107,7 @@ entry:
; Show the interaction of live-through spilling followed by live-in.
define void @test6(i32 %a) gc "statepoint-example" {
; CHECK-LABEL: test6:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: pushq %rbx
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: subq $16, %rsp
diff --git a/test/CodeGen/X86/stores-merging.ll b/test/CodeGen/X86/stores-merging.ll
index 60cc7aca73b..5ccb5825934 100644
--- a/test/CodeGen/X86/stores-merging.ll
+++ b/test/CodeGen/X86/stores-merging.ll
@@ -12,7 +12,7 @@
define void @redundant_stores_merging() {
; CHECK-LABEL: redundant_stores_merging:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movabsq $528280977409, %rax # imm = 0x7B00000001
; CHECK-NEXT: movq %rax, e+{{.*}}(%rip)
; CHECK-NEXT: movl $456, e+{{.*}}(%rip) # imm = 0x1C8
@@ -26,7 +26,7 @@ define void @redundant_stores_merging() {
;; This variant tests PR25154.
define void @redundant_stores_merging_reverse() {
; CHECK-LABEL: redundant_stores_merging_reverse:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movabsq $528280977409, %rax # imm = 0x7B00000001
; CHECK-NEXT: movq %rax, e+{{.*}}(%rip)
; CHECK-NEXT: movl $456, e+{{.*}}(%rip) # imm = 0x1C8
@@ -46,7 +46,7 @@ define void @redundant_stores_merging_reverse() {
define void @overlapping_stores_merging() {
; CHECK-LABEL: overlapping_stores_merging:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl $1, {{.*}}(%rip)
; CHECK-NEXT: movw $2, b+{{.*}}(%rip)
; CHECK-NEXT: retq
@@ -58,7 +58,7 @@ define void @overlapping_stores_merging() {
define void @extract_vector_store_16_consecutive_bytes(<2 x i64> %v, i8* %ptr) #0 {
; CHECK-LABEL: extract_vector_store_16_consecutive_bytes:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovups %xmm0, (%rdi)
; CHECK-NEXT: retq
%bc = bitcast <2 x i64> %v to <16 x i8>
@@ -117,7 +117,7 @@ define void @extract_vector_store_16_consecutive_bytes(<2 x i64> %v, i8* %ptr) #
define void @extract_vector_store_32_consecutive_bytes(<4 x i64> %v, i8* %ptr) #0 {
; CHECK-LABEL: extract_vector_store_32_consecutive_bytes:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovups %ymm0, (%rdi)
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/subcarry.ll b/test/CodeGen/X86/subcarry.ll
index df676328f68..862d489e138 100644
--- a/test/CodeGen/X86/subcarry.ll
+++ b/test/CodeGen/X86/subcarry.ll
@@ -5,7 +5,7 @@
define %S @negate(%S* nocapture readonly %this) {
; CHECK-LABEL: negate:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movq (%rsi), %rax
; CHECK-NEXT: movq 8(%rsi), %rcx
; CHECK-NEXT: notq %rax
@@ -62,7 +62,7 @@ entry:
define %S @sub(%S* nocapture readonly %this, %S %arg.b) local_unnamed_addr {
; CHECK-LABEL: sub:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: notq %rdx
; CHECK-NEXT: xorl %r10d, %r10d
; CHECK-NEXT: addq (%rsi), %rdx
diff --git a/test/CodeGen/X86/subvector-broadcast.ll b/test/CodeGen/X86/subvector-broadcast.ll
index 3f6d25bd2fc..e3c91ffaaa0 100644
--- a/test/CodeGen/X86/subvector-broadcast.ll
+++ b/test/CodeGen/X86/subvector-broadcast.ll
@@ -16,13 +16,13 @@
define <4 x double> @test_broadcast_2f64_4f64(<2 x double> *%p) nounwind {
; X32-LABEL: test_broadcast_2f64_4f64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_2f64_4f64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-NEXT: retq
%1 = load <2 x double>, <2 x double> *%p
@@ -32,26 +32,26 @@ define <4 x double> @test_broadcast_2f64_4f64(<2 x double> *%p) nounwind {
define <8 x double> @test_broadcast_2f64_8f64(<2 x double> *%p) nounwind {
; X32-AVX-LABEL: test_broadcast_2f64_8f64:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-AVX-NEXT: vmovdqa %ymm0, %ymm1
; X32-AVX-NEXT: retl
;
; X32-AVX512-LABEL: test_broadcast_2f64_8f64:
-; X32-AVX512: # BB#0:
+; X32-AVX512: # %bb.0:
; X32-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512-NEXT: vbroadcastf32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X32-AVX512-NEXT: retl
;
; X64-AVX-LABEL: test_broadcast_2f64_8f64:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX-NEXT: vmovdqa %ymm0, %ymm1
; X64-AVX-NEXT: retq
;
; X64-AVX512-LABEL: test_broadcast_2f64_8f64:
-; X64-AVX512: # BB#0:
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: vbroadcastf32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X64-AVX512-NEXT: retq
%1 = load <2 x double>, <2 x double> *%p
@@ -61,26 +61,26 @@ define <8 x double> @test_broadcast_2f64_8f64(<2 x double> *%p) nounwind {
define <8 x double> @test_broadcast_4f64_8f64(<4 x double> *%p) nounwind {
; X32-AVX-LABEL: test_broadcast_4f64_8f64:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovaps (%eax), %ymm0
; X32-AVX-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX-NEXT: retl
;
; X32-AVX512-LABEL: test_broadcast_4f64_8f64:
-; X32-AVX512: # BB#0:
+; X32-AVX512: # %bb.0:
; X32-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512-NEXT: vbroadcastf64x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3]
; X32-AVX512-NEXT: retl
;
; X64-AVX-LABEL: test_broadcast_4f64_8f64:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovaps (%rdi), %ymm0
; X64-AVX-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX-NEXT: retq
;
; X64-AVX512-LABEL: test_broadcast_4f64_8f64:
-; X64-AVX512: # BB#0:
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: vbroadcastf64x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512-NEXT: retq
%1 = load <4 x double>, <4 x double> *%p
@@ -90,24 +90,24 @@ define <8 x double> @test_broadcast_4f64_8f64(<4 x double> *%p) nounwind {
define <4 x i64> @test_broadcast_2i64_4i64(<2 x i64> *%p) nounwind {
; X32-AVX-LABEL: test_broadcast_2i64_4i64:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-AVX-NEXT: retl
;
; X32-AVX512-LABEL: test_broadcast_2i64_4i64:
-; X32-AVX512: # BB#0:
+; X32-AVX512: # %bb.0:
; X32-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-AVX512-NEXT: retl
;
; X64-AVX-LABEL: test_broadcast_2i64_4i64:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX-NEXT: retq
;
; X64-AVX512-LABEL: test_broadcast_2i64_4i64:
-; X64-AVX512: # BB#0:
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512-NEXT: retq
%1 = load <2 x i64>, <2 x i64> *%p
@@ -117,39 +117,39 @@ define <4 x i64> @test_broadcast_2i64_4i64(<2 x i64> *%p) nounwind {
define <8 x i64> @test_broadcast_2i64_8i64(<2 x i64> *%p) nounwind {
; X32-AVX1-LABEL: test_broadcast_2i64_8i64:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX1-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-AVX1-NEXT: vmovdqa %ymm0, %ymm1
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: test_broadcast_2i64_8i64:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX2-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-AVX2-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX2-NEXT: retl
;
; X32-AVX512-LABEL: test_broadcast_2i64_8i64:
-; X32-AVX512: # BB#0:
+; X32-AVX512: # %bb.0:
; X32-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X32-AVX512-NEXT: retl
;
; X64-AVX1-LABEL: test_broadcast_2i64_8i64:
-; X64-AVX1: # BB#0:
+; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX1-NEXT: vmovdqa %ymm0, %ymm1
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_broadcast_2i64_8i64:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX2-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: test_broadcast_2i64_8i64:
-; X64-AVX512: # BB#0:
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X64-AVX512-NEXT: retq
%1 = load <2 x i64>, <2 x i64> *%p
@@ -159,26 +159,26 @@ define <8 x i64> @test_broadcast_2i64_8i64(<2 x i64> *%p) nounwind {
define <8 x i64> @test_broadcast_4i64_8i64(<4 x i64> *%p) nounwind {
; X32-AVX-LABEL: test_broadcast_4i64_8i64:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovaps (%eax), %ymm0
; X32-AVX-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX-NEXT: retl
;
; X32-AVX512-LABEL: test_broadcast_4i64_8i64:
-; X32-AVX512: # BB#0:
+; X32-AVX512: # %bb.0:
; X32-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3]
; X32-AVX512-NEXT: retl
;
; X64-AVX-LABEL: test_broadcast_4i64_8i64:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovaps (%rdi), %ymm0
; X64-AVX-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX-NEXT: retq
;
; X64-AVX512-LABEL: test_broadcast_4i64_8i64:
-; X64-AVX512: # BB#0:
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512-NEXT: retq
%1 = load <4 x i64>, <4 x i64> *%p
@@ -188,13 +188,13 @@ define <8 x i64> @test_broadcast_4i64_8i64(<4 x i64> *%p) nounwind {
define <8 x float> @test_broadcast_4f32_8f32(<4 x float> *%p) nounwind {
; X32-LABEL: test_broadcast_4f32_8f32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_4f32_8f32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-NEXT: retq
%1 = load <4 x float>, <4 x float> *%p
@@ -204,26 +204,26 @@ define <8 x float> @test_broadcast_4f32_8f32(<4 x float> *%p) nounwind {
define <16 x float> @test_broadcast_4f32_16f32(<4 x float> *%p) nounwind {
; X32-AVX-LABEL: test_broadcast_4f32_16f32:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-AVX-NEXT: vmovdqa %ymm0, %ymm1
; X32-AVX-NEXT: retl
;
; X32-AVX512-LABEL: test_broadcast_4f32_16f32:
-; X32-AVX512: # BB#0:
+; X32-AVX512: # %bb.0:
; X32-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512-NEXT: vbroadcastf32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X32-AVX512-NEXT: retl
;
; X64-AVX-LABEL: test_broadcast_4f32_16f32:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX-NEXT: vmovdqa %ymm0, %ymm1
; X64-AVX-NEXT: retq
;
; X64-AVX512-LABEL: test_broadcast_4f32_16f32:
-; X64-AVX512: # BB#0:
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: vbroadcastf32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X64-AVX512-NEXT: retq
%1 = load <4 x float>, <4 x float> *%p
@@ -233,26 +233,26 @@ define <16 x float> @test_broadcast_4f32_16f32(<4 x float> *%p) nounwind {
define <16 x float> @test_broadcast_8f32_16f32(<8 x float> *%p) nounwind {
; X32-AVX-LABEL: test_broadcast_8f32_16f32:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovaps (%eax), %ymm0
; X32-AVX-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX-NEXT: retl
;
; X32-AVX512-LABEL: test_broadcast_8f32_16f32:
-; X32-AVX512: # BB#0:
+; X32-AVX512: # %bb.0:
; X32-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512-NEXT: vbroadcastf64x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3]
; X32-AVX512-NEXT: retl
;
; X64-AVX-LABEL: test_broadcast_8f32_16f32:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovaps (%rdi), %ymm0
; X64-AVX-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX-NEXT: retq
;
; X64-AVX512-LABEL: test_broadcast_8f32_16f32:
-; X64-AVX512: # BB#0:
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: vbroadcastf64x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512-NEXT: retq
%1 = load <8 x float>, <8 x float> *%p
@@ -262,24 +262,24 @@ define <16 x float> @test_broadcast_8f32_16f32(<8 x float> *%p) nounwind {
define <8 x i32> @test_broadcast_4i32_8i32(<4 x i32> *%p) nounwind {
; X32-AVX-LABEL: test_broadcast_4i32_8i32:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-AVX-NEXT: retl
;
; X32-AVX512-LABEL: test_broadcast_4i32_8i32:
-; X32-AVX512: # BB#0:
+; X32-AVX512: # %bb.0:
; X32-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-AVX512-NEXT: retl
;
; X64-AVX-LABEL: test_broadcast_4i32_8i32:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX-NEXT: retq
;
; X64-AVX512-LABEL: test_broadcast_4i32_8i32:
-; X64-AVX512: # BB#0:
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512-NEXT: retq
%1 = load <4 x i32>, <4 x i32> *%p
@@ -289,39 +289,39 @@ define <8 x i32> @test_broadcast_4i32_8i32(<4 x i32> *%p) nounwind {
define <16 x i32> @test_broadcast_4i32_16i32(<4 x i32> *%p) nounwind {
; X32-AVX1-LABEL: test_broadcast_4i32_16i32:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX1-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-AVX1-NEXT: vmovdqa %ymm0, %ymm1
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: test_broadcast_4i32_16i32:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX2-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-AVX2-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX2-NEXT: retl
;
; X32-AVX512-LABEL: test_broadcast_4i32_16i32:
-; X32-AVX512: # BB#0:
+; X32-AVX512: # %bb.0:
; X32-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X32-AVX512-NEXT: retl
;
; X64-AVX1-LABEL: test_broadcast_4i32_16i32:
-; X64-AVX1: # BB#0:
+; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX1-NEXT: vmovdqa %ymm0, %ymm1
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_broadcast_4i32_16i32:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX2-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: test_broadcast_4i32_16i32:
-; X64-AVX512: # BB#0:
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X64-AVX512-NEXT: retq
%1 = load <4 x i32>, <4 x i32> *%p
@@ -331,26 +331,26 @@ define <16 x i32> @test_broadcast_4i32_16i32(<4 x i32> *%p) nounwind {
define <16 x i32> @test_broadcast_8i32_16i32(<8 x i32> *%p) nounwind {
; X32-AVX-LABEL: test_broadcast_8i32_16i32:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovaps (%eax), %ymm0
; X32-AVX-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX-NEXT: retl
;
; X32-AVX512-LABEL: test_broadcast_8i32_16i32:
-; X32-AVX512: # BB#0:
+; X32-AVX512: # %bb.0:
; X32-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3]
; X32-AVX512-NEXT: retl
;
; X64-AVX-LABEL: test_broadcast_8i32_16i32:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovaps (%rdi), %ymm0
; X64-AVX-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX-NEXT: retq
;
; X64-AVX512-LABEL: test_broadcast_8i32_16i32:
-; X64-AVX512: # BB#0:
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512-NEXT: retq
%1 = load <8 x i32>, <8 x i32> *%p
@@ -360,24 +360,24 @@ define <16 x i32> @test_broadcast_8i32_16i32(<8 x i32> *%p) nounwind {
define <16 x i16> @test_broadcast_8i16_16i16(<8 x i16> *%p) nounwind {
; X32-AVX-LABEL: test_broadcast_8i16_16i16:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-AVX-NEXT: retl
;
; X32-AVX512-LABEL: test_broadcast_8i16_16i16:
-; X32-AVX512: # BB#0:
+; X32-AVX512: # %bb.0:
; X32-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-AVX512-NEXT: retl
;
; X64-AVX-LABEL: test_broadcast_8i16_16i16:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX-NEXT: retq
;
; X64-AVX512-LABEL: test_broadcast_8i16_16i16:
-; X64-AVX512: # BB#0:
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512-NEXT: retq
%1 = load <8 x i16>, <8 x i16> *%p
@@ -387,64 +387,64 @@ define <16 x i16> @test_broadcast_8i16_16i16(<8 x i16> *%p) nounwind {
define <32 x i16> @test_broadcast_8i16_32i16(<8 x i16> *%p) nounwind {
; X32-AVX1-LABEL: test_broadcast_8i16_32i16:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX1-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-AVX1-NEXT: vmovdqa %ymm0, %ymm1
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: test_broadcast_8i16_32i16:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX2-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-AVX2-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX2-NEXT: retl
;
; X32-AVX512F-LABEL: test_broadcast_8i16_32i16:
-; X32-AVX512F: # BB#0:
+; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-AVX512F-NEXT: vmovdqa %ymm0, %ymm1
; X32-AVX512F-NEXT: retl
;
; X32-AVX512BW-LABEL: test_broadcast_8i16_32i16:
-; X32-AVX512BW: # BB#0:
+; X32-AVX512BW: # %bb.0:
; X32-AVX512BW-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X32-AVX512BW-NEXT: retl
;
; X32-AVX512DQ-LABEL: test_broadcast_8i16_32i16:
-; X32-AVX512DQ: # BB#0:
+; X32-AVX512DQ: # %bb.0:
; X32-AVX512DQ-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-AVX512DQ-NEXT: vmovdqa %ymm0, %ymm1
; X32-AVX512DQ-NEXT: retl
;
; X64-AVX1-LABEL: test_broadcast_8i16_32i16:
-; X64-AVX1: # BB#0:
+; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX1-NEXT: vmovdqa %ymm0, %ymm1
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_broadcast_8i16_32i16:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX2-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX2-NEXT: retq
;
; X64-AVX512F-LABEL: test_broadcast_8i16_32i16:
-; X64-AVX512F: # BB#0:
+; X64-AVX512F: # %bb.0:
; X64-AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512F-NEXT: vmovdqa %ymm0, %ymm1
; X64-AVX512F-NEXT: retq
;
; X64-AVX512BW-LABEL: test_broadcast_8i16_32i16:
-; X64-AVX512BW: # BB#0:
+; X64-AVX512BW: # %bb.0:
; X64-AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X64-AVX512BW-NEXT: retq
;
; X64-AVX512DQ-LABEL: test_broadcast_8i16_32i16:
-; X64-AVX512DQ: # BB#0:
+; X64-AVX512DQ: # %bb.0:
; X64-AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512DQ-NEXT: vmovdqa %ymm0, %ymm1
; X64-AVX512DQ-NEXT: retq
@@ -455,51 +455,51 @@ define <32 x i16> @test_broadcast_8i16_32i16(<8 x i16> *%p) nounwind {
define <32 x i16> @test_broadcast_16i16_32i16(<16 x i16> *%p) nounwind {
; X32-AVX-LABEL: test_broadcast_16i16_32i16:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovaps (%eax), %ymm0
; X32-AVX-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX-NEXT: retl
;
; X32-AVX512F-LABEL: test_broadcast_16i16_32i16:
-; X32-AVX512F: # BB#0:
+; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovaps (%eax), %ymm0
; X32-AVX512F-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX512F-NEXT: retl
;
; X32-AVX512BW-LABEL: test_broadcast_16i16_32i16:
-; X32-AVX512BW: # BB#0:
+; X32-AVX512BW: # %bb.0:
; X32-AVX512BW-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3]
; X32-AVX512BW-NEXT: retl
;
; X32-AVX512DQ-LABEL: test_broadcast_16i16_32i16:
-; X32-AVX512DQ: # BB#0:
+; X32-AVX512DQ: # %bb.0:
; X32-AVX512DQ-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512DQ-NEXT: vmovaps (%eax), %ymm0
; X32-AVX512DQ-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX512DQ-NEXT: retl
;
; X64-AVX-LABEL: test_broadcast_16i16_32i16:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovaps (%rdi), %ymm0
; X64-AVX-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX-NEXT: retq
;
; X64-AVX512F-LABEL: test_broadcast_16i16_32i16:
-; X64-AVX512F: # BB#0:
+; X64-AVX512F: # %bb.0:
; X64-AVX512F-NEXT: vmovaps (%rdi), %ymm0
; X64-AVX512F-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX512F-NEXT: retq
;
; X64-AVX512BW-LABEL: test_broadcast_16i16_32i16:
-; X64-AVX512BW: # BB#0:
+; X64-AVX512BW: # %bb.0:
; X64-AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512BW-NEXT: retq
;
; X64-AVX512DQ-LABEL: test_broadcast_16i16_32i16:
-; X64-AVX512DQ: # BB#0:
+; X64-AVX512DQ: # %bb.0:
; X64-AVX512DQ-NEXT: vmovaps (%rdi), %ymm0
; X64-AVX512DQ-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX512DQ-NEXT: retq
@@ -510,24 +510,24 @@ define <32 x i16> @test_broadcast_16i16_32i16(<16 x i16> *%p) nounwind {
define <32 x i8> @test_broadcast_16i8_32i8(<16 x i8> *%p) nounwind {
; X32-AVX-LABEL: test_broadcast_16i8_32i8:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-AVX-NEXT: retl
;
; X32-AVX512-LABEL: test_broadcast_16i8_32i8:
-; X32-AVX512: # BB#0:
+; X32-AVX512: # %bb.0:
; X32-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-AVX512-NEXT: retl
;
; X64-AVX-LABEL: test_broadcast_16i8_32i8:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX-NEXT: retq
;
; X64-AVX512-LABEL: test_broadcast_16i8_32i8:
-; X64-AVX512: # BB#0:
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512-NEXT: retq
%1 = load <16 x i8>, <16 x i8> *%p
@@ -537,64 +537,64 @@ define <32 x i8> @test_broadcast_16i8_32i8(<16 x i8> *%p) nounwind {
define <64 x i8> @test_broadcast_16i8_64i8(<16 x i8> *%p) nounwind {
; X32-AVX1-LABEL: test_broadcast_16i8_64i8:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX1-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-AVX1-NEXT: vmovdqa %ymm0, %ymm1
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: test_broadcast_16i8_64i8:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX2-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-AVX2-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX2-NEXT: retl
;
; X32-AVX512F-LABEL: test_broadcast_16i8_64i8:
-; X32-AVX512F: # BB#0:
+; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-AVX512F-NEXT: vmovdqa %ymm0, %ymm1
; X32-AVX512F-NEXT: retl
;
; X32-AVX512BW-LABEL: test_broadcast_16i8_64i8:
-; X32-AVX512BW: # BB#0:
+; X32-AVX512BW: # %bb.0:
; X32-AVX512BW-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X32-AVX512BW-NEXT: retl
;
; X32-AVX512DQ-LABEL: test_broadcast_16i8_64i8:
-; X32-AVX512DQ: # BB#0:
+; X32-AVX512DQ: # %bb.0:
; X32-AVX512DQ-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-AVX512DQ-NEXT: vmovdqa %ymm0, %ymm1
; X32-AVX512DQ-NEXT: retl
;
; X64-AVX1-LABEL: test_broadcast_16i8_64i8:
-; X64-AVX1: # BB#0:
+; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX1-NEXT: vmovdqa %ymm0, %ymm1
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_broadcast_16i8_64i8:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX2-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX2-NEXT: retq
;
; X64-AVX512F-LABEL: test_broadcast_16i8_64i8:
-; X64-AVX512F: # BB#0:
+; X64-AVX512F: # %bb.0:
; X64-AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512F-NEXT: vmovdqa %ymm0, %ymm1
; X64-AVX512F-NEXT: retq
;
; X64-AVX512BW-LABEL: test_broadcast_16i8_64i8:
-; X64-AVX512BW: # BB#0:
+; X64-AVX512BW: # %bb.0:
; X64-AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X64-AVX512BW-NEXT: retq
;
; X64-AVX512DQ-LABEL: test_broadcast_16i8_64i8:
-; X64-AVX512DQ: # BB#0:
+; X64-AVX512DQ: # %bb.0:
; X64-AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512DQ-NEXT: vmovdqa %ymm0, %ymm1
; X64-AVX512DQ-NEXT: retq
@@ -605,51 +605,51 @@ define <64 x i8> @test_broadcast_16i8_64i8(<16 x i8> *%p) nounwind {
define <64 x i8> @test_broadcast_32i8_64i8(<32 x i8> *%p) nounwind {
; X32-AVX-LABEL: test_broadcast_32i8_64i8:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovaps (%eax), %ymm0
; X32-AVX-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX-NEXT: retl
;
; X32-AVX512F-LABEL: test_broadcast_32i8_64i8:
-; X32-AVX512F: # BB#0:
+; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovaps (%eax), %ymm0
; X32-AVX512F-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX512F-NEXT: retl
;
; X32-AVX512BW-LABEL: test_broadcast_32i8_64i8:
-; X32-AVX512BW: # BB#0:
+; X32-AVX512BW: # %bb.0:
; X32-AVX512BW-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3]
; X32-AVX512BW-NEXT: retl
;
; X32-AVX512DQ-LABEL: test_broadcast_32i8_64i8:
-; X32-AVX512DQ: # BB#0:
+; X32-AVX512DQ: # %bb.0:
; X32-AVX512DQ-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512DQ-NEXT: vmovaps (%eax), %ymm0
; X32-AVX512DQ-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX512DQ-NEXT: retl
;
; X64-AVX-LABEL: test_broadcast_32i8_64i8:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovaps (%rdi), %ymm0
; X64-AVX-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX-NEXT: retq
;
; X64-AVX512F-LABEL: test_broadcast_32i8_64i8:
-; X64-AVX512F: # BB#0:
+; X64-AVX512F: # %bb.0:
; X64-AVX512F-NEXT: vmovaps (%rdi), %ymm0
; X64-AVX512F-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX512F-NEXT: retq
;
; X64-AVX512BW-LABEL: test_broadcast_32i8_64i8:
-; X64-AVX512BW: # BB#0:
+; X64-AVX512BW: # %bb.0:
; X64-AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512BW-NEXT: retq
;
; X64-AVX512DQ-LABEL: test_broadcast_32i8_64i8:
-; X64-AVX512DQ: # BB#0:
+; X64-AVX512DQ: # %bb.0:
; X64-AVX512DQ-NEXT: vmovaps (%rdi), %ymm0
; X64-AVX512DQ-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX512DQ-NEXT: retq
@@ -664,7 +664,7 @@ define <64 x i8> @test_broadcast_32i8_64i8(<32 x i8> *%p) nounwind {
define <4 x double> @test_broadcast_2f64_4f64_reuse(<2 x double>* %p0, <2 x double>* %p1) {
; X32-LABEL: test_broadcast_2f64_4f64_reuse:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovaps (%ecx), %xmm0
@@ -673,7 +673,7 @@ define <4 x double> @test_broadcast_2f64_4f64_reuse(<2 x double>* %p0, <2 x doub
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_2f64_4f64_reuse:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps (%rdi), %xmm0
; X64-NEXT: vmovaps %xmm0, (%rsi)
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
@@ -686,7 +686,7 @@ define <4 x double> @test_broadcast_2f64_4f64_reuse(<2 x double>* %p0, <2 x doub
define <4 x i64> @test_broadcast_2i64_4i64_reuse(<2 x i64>* %p0, <2 x i64>* %p1) {
; X32-LABEL: test_broadcast_2i64_4i64_reuse:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovaps (%ecx), %xmm0
@@ -695,7 +695,7 @@ define <4 x i64> @test_broadcast_2i64_4i64_reuse(<2 x i64>* %p0, <2 x i64>* %p1)
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_2i64_4i64_reuse:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps (%rdi), %xmm0
; X64-NEXT: vmovaps %xmm0, (%rsi)
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
@@ -708,7 +708,7 @@ define <4 x i64> @test_broadcast_2i64_4i64_reuse(<2 x i64>* %p0, <2 x i64>* %p1)
define <8 x float> @test_broadcast_4f32_8f32_reuse(<4 x float>* %p0, <4 x float>* %p1) {
; X32-LABEL: test_broadcast_4f32_8f32_reuse:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovaps (%ecx), %xmm0
@@ -717,7 +717,7 @@ define <8 x float> @test_broadcast_4f32_8f32_reuse(<4 x float>* %p0, <4 x float>
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_4f32_8f32_reuse:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps (%rdi), %xmm0
; X64-NEXT: vmovaps %xmm0, (%rsi)
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
@@ -730,7 +730,7 @@ define <8 x float> @test_broadcast_4f32_8f32_reuse(<4 x float>* %p0, <4 x float>
define <8 x i32> @test_broadcast_4i32_8i32_reuse(<4 x i32>* %p0, <4 x i32>* %p1) {
; X32-LABEL: test_broadcast_4i32_8i32_reuse:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovaps (%ecx), %xmm0
@@ -739,7 +739,7 @@ define <8 x i32> @test_broadcast_4i32_8i32_reuse(<4 x i32>* %p0, <4 x i32>* %p1)
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_4i32_8i32_reuse:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps (%rdi), %xmm0
; X64-NEXT: vmovaps %xmm0, (%rsi)
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
@@ -752,7 +752,7 @@ define <8 x i32> @test_broadcast_4i32_8i32_reuse(<4 x i32>* %p0, <4 x i32>* %p1)
define <16 x i16> @test_broadcast_8i16_16i16_reuse(<8 x i16> *%p0, <8 x i16> *%p1) nounwind {
; X32-LABEL: test_broadcast_8i16_16i16_reuse:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovaps (%ecx), %xmm0
@@ -761,7 +761,7 @@ define <16 x i16> @test_broadcast_8i16_16i16_reuse(<8 x i16> *%p0, <8 x i16> *%p
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_8i16_16i16_reuse:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps (%rdi), %xmm0
; X64-NEXT: vmovaps %xmm0, (%rsi)
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
@@ -774,7 +774,7 @@ define <16 x i16> @test_broadcast_8i16_16i16_reuse(<8 x i16> *%p0, <8 x i16> *%p
define <32 x i8> @test_broadcast_16i8_32i8_reuse(<16 x i8> *%p0, <16 x i8> *%p1) nounwind {
; X32-LABEL: test_broadcast_16i8_32i8_reuse:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovaps (%ecx), %xmm0
@@ -783,7 +783,7 @@ define <32 x i8> @test_broadcast_16i8_32i8_reuse(<16 x i8> *%p0, <16 x i8> *%p1)
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_16i8_32i8_reuse:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps (%rdi), %xmm0
; X64-NEXT: vmovaps %xmm0, (%rsi)
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
@@ -800,7 +800,7 @@ define <32 x i8> @test_broadcast_16i8_32i8_reuse(<16 x i8> *%p0, <16 x i8> *%p1)
define <8 x i32> @test_broadcast_4i32_8i32_chain(<4 x i32>* %p0, <4 x float>* %p1) {
; X32-AVX-LABEL: test_broadcast_4i32_8i32_chain:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-AVX-NEXT: vmovaps (%ecx), %xmm0
@@ -810,7 +810,7 @@ define <8 x i32> @test_broadcast_4i32_8i32_chain(<4 x i32>* %p0, <4 x float>* %p
; X32-AVX-NEXT: retl
;
; X32-AVX512F-LABEL: test_broadcast_4i32_8i32_chain:
-; X32-AVX512F: # BB#0:
+; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-AVX512F-NEXT: vmovaps (%ecx), %xmm0
@@ -820,7 +820,7 @@ define <8 x i32> @test_broadcast_4i32_8i32_chain(<4 x i32>* %p0, <4 x float>* %p
; X32-AVX512F-NEXT: retl
;
; X32-AVX512BW-LABEL: test_broadcast_4i32_8i32_chain:
-; X32-AVX512BW: # BB#0:
+; X32-AVX512BW: # %bb.0:
; X32-AVX512BW-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512BW-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-AVX512BW-NEXT: vmovaps (%ecx), %xmm0
@@ -830,7 +830,7 @@ define <8 x i32> @test_broadcast_4i32_8i32_chain(<4 x i32>* %p0, <4 x float>* %p
; X32-AVX512BW-NEXT: retl
;
; X32-AVX512DQ-LABEL: test_broadcast_4i32_8i32_chain:
-; X32-AVX512DQ: # BB#0:
+; X32-AVX512DQ: # %bb.0:
; X32-AVX512DQ-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512DQ-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-AVX512DQ-NEXT: vmovaps (%ecx), %xmm0
@@ -840,7 +840,7 @@ define <8 x i32> @test_broadcast_4i32_8i32_chain(<4 x i32>* %p0, <4 x float>* %p
; X32-AVX512DQ-NEXT: retl
;
; X64-AVX-LABEL: test_broadcast_4i32_8i32_chain:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovaps (%rdi), %xmm0
; X64-AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-AVX-NEXT: vmovaps %xmm1, (%rsi)
@@ -848,7 +848,7 @@ define <8 x i32> @test_broadcast_4i32_8i32_chain(<4 x i32>* %p0, <4 x float>* %p
; X64-AVX-NEXT: retq
;
; X64-AVX512F-LABEL: test_broadcast_4i32_8i32_chain:
-; X64-AVX512F: # BB#0:
+; X64-AVX512F: # %bb.0:
; X64-AVX512F-NEXT: vmovaps (%rdi), %xmm0
; X64-AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-AVX512F-NEXT: vmovdqa %xmm1, (%rsi)
@@ -856,7 +856,7 @@ define <8 x i32> @test_broadcast_4i32_8i32_chain(<4 x i32>* %p0, <4 x float>* %p
; X64-AVX512F-NEXT: retq
;
; X64-AVX512BW-LABEL: test_broadcast_4i32_8i32_chain:
-; X64-AVX512BW: # BB#0:
+; X64-AVX512BW: # %bb.0:
; X64-AVX512BW-NEXT: vmovaps (%rdi), %xmm0
; X64-AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-AVX512BW-NEXT: vmovdqa %xmm1, (%rsi)
@@ -864,7 +864,7 @@ define <8 x i32> @test_broadcast_4i32_8i32_chain(<4 x i32>* %p0, <4 x float>* %p
; X64-AVX512BW-NEXT: retq
;
; X64-AVX512DQ-LABEL: test_broadcast_4i32_8i32_chain:
-; X64-AVX512DQ: # BB#0:
+; X64-AVX512DQ: # %bb.0:
; X64-AVX512DQ-NEXT: vmovaps (%rdi), %xmm0
; X64-AVX512DQ-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-AVX512DQ-NEXT: vmovaps %xmm1, (%rsi)
@@ -878,7 +878,7 @@ define <8 x i32> @test_broadcast_4i32_8i32_chain(<4 x i32>* %p0, <4 x float>* %p
define <16 x i32> @test_broadcast_4i32_16i32_chain(<4 x i32>* %p0, <4 x float>* %p1) {
; X32-AVX-LABEL: test_broadcast_4i32_16i32_chain:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-AVX-NEXT: vmovaps (%ecx), %xmm0
@@ -889,7 +889,7 @@ define <16 x i32> @test_broadcast_4i32_16i32_chain(<4 x i32>* %p0, <4 x float>*
; X32-AVX-NEXT: retl
;
; X32-AVX512F-LABEL: test_broadcast_4i32_16i32_chain:
-; X32-AVX512F: # BB#0:
+; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-AVX512F-NEXT: vmovdqa (%ecx), %xmm0
@@ -899,7 +899,7 @@ define <16 x i32> @test_broadcast_4i32_16i32_chain(<4 x i32>* %p0, <4 x float>*
; X32-AVX512F-NEXT: retl
;
; X32-AVX512BW-LABEL: test_broadcast_4i32_16i32_chain:
-; X32-AVX512BW: # BB#0:
+; X32-AVX512BW: # %bb.0:
; X32-AVX512BW-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512BW-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-AVX512BW-NEXT: vmovdqa (%ecx), %xmm0
@@ -909,7 +909,7 @@ define <16 x i32> @test_broadcast_4i32_16i32_chain(<4 x i32>* %p0, <4 x float>*
; X32-AVX512BW-NEXT: retl
;
; X32-AVX512DQ-LABEL: test_broadcast_4i32_16i32_chain:
-; X32-AVX512DQ: # BB#0:
+; X32-AVX512DQ: # %bb.0:
; X32-AVX512DQ-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512DQ-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-AVX512DQ-NEXT: vmovdqa (%ecx), %xmm0
@@ -919,7 +919,7 @@ define <16 x i32> @test_broadcast_4i32_16i32_chain(<4 x i32>* %p0, <4 x float>*
; X32-AVX512DQ-NEXT: retl
;
; X64-AVX-LABEL: test_broadcast_4i32_16i32_chain:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovaps (%rdi), %xmm0
; X64-AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-AVX-NEXT: vmovaps %xmm1, (%rsi)
@@ -928,7 +928,7 @@ define <16 x i32> @test_broadcast_4i32_16i32_chain(<4 x i32>* %p0, <4 x float>*
; X64-AVX-NEXT: retq
;
; X64-AVX512F-LABEL: test_broadcast_4i32_16i32_chain:
-; X64-AVX512F: # BB#0:
+; X64-AVX512F: # %bb.0:
; X64-AVX512F-NEXT: vmovdqa (%rdi), %xmm0
; X64-AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-AVX512F-NEXT: vmovdqa %xmm1, (%rsi)
@@ -936,7 +936,7 @@ define <16 x i32> @test_broadcast_4i32_16i32_chain(<4 x i32>* %p0, <4 x float>*
; X64-AVX512F-NEXT: retq
;
; X64-AVX512BW-LABEL: test_broadcast_4i32_16i32_chain:
-; X64-AVX512BW: # BB#0:
+; X64-AVX512BW: # %bb.0:
; X64-AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
; X64-AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-AVX512BW-NEXT: vmovdqa %xmm1, (%rsi)
@@ -944,7 +944,7 @@ define <16 x i32> @test_broadcast_4i32_16i32_chain(<4 x i32>* %p0, <4 x float>*
; X64-AVX512BW-NEXT: retq
;
; X64-AVX512DQ-LABEL: test_broadcast_4i32_16i32_chain:
-; X64-AVX512DQ: # BB#0:
+; X64-AVX512DQ: # %bb.0:
; X64-AVX512DQ-NEXT: vmovdqa (%rdi), %xmm0
; X64-AVX512DQ-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-AVX512DQ-NEXT: vmovaps %xmm1, (%rsi)
@@ -966,7 +966,7 @@ define <16 x i32> @test_broadcast_4i32_16i32_chain(<4 x i32>* %p0, <4 x float>*
define void @fallback_broadcast_v4i64_to_v8i64(<4 x i64> %a, <8 x i64> %b) {
; X32-AVX1-LABEL: fallback_broadcast_v4i64_to_v8i64:
-; X32-AVX1: # BB#0: # %entry
+; X32-AVX1: # %bb.0: # %entry
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; X32-AVX1-NEXT: vmovdqa {{.*#+}} ymm4 = [1,0,2,0,3,0,4,0]
; X32-AVX1-NEXT: vextractf128 $1, %ymm4, %xmm5
@@ -990,7 +990,7 @@ define void @fallback_broadcast_v4i64_to_v8i64(<4 x i64> %a, <8 x i64> %b) {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: fallback_broadcast_v4i64_to_v8i64:
-; X32-AVX2: # BB#0: # %entry
+; X32-AVX2: # %bb.0: # %entry
; X32-AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [1,0,2,0,3,0,4,0]
; X32-AVX2-NEXT: vpaddq %ymm3, %ymm0, %ymm0
; X32-AVX2-NEXT: vpaddq %ymm3, %ymm2, %ymm2
@@ -1004,7 +1004,7 @@ define void @fallback_broadcast_v4i64_to_v8i64(<4 x i64> %a, <8 x i64> %b) {
; X32-AVX2-NEXT: retl
;
; X32-AVX512-LABEL: fallback_broadcast_v4i64_to_v8i64:
-; X32-AVX512: # BB#0: # %entry
+; X32-AVX512: # %bb.0: # %entry
; X32-AVX512-NEXT: vpaddq {{\.LCPI.*}}, %ymm0, %ymm0
; X32-AVX512-NEXT: vmovdqa64 {{.*#+}} zmm2 = [1,0,2,0,3,0,4,0,1,0,2,0,3,0,4,0]
; X32-AVX512-NEXT: vpaddq %zmm2, %zmm1, %zmm1
@@ -1015,7 +1015,7 @@ define void @fallback_broadcast_v4i64_to_v8i64(<4 x i64> %a, <8 x i64> %b) {
; X32-AVX512-NEXT: retl
;
; X64-AVX1-LABEL: fallback_broadcast_v4i64_to_v8i64:
-; X64-AVX1: # BB#0: # %entry
+; X64-AVX1: # %bb.0: # %entry
; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; X64-AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [3,4]
; X64-AVX1-NEXT: vpaddq %xmm4, %xmm3, %xmm3
@@ -1040,7 +1040,7 @@ define void @fallback_broadcast_v4i64_to_v8i64(<4 x i64> %a, <8 x i64> %b) {
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: fallback_broadcast_v4i64_to_v8i64:
-; X64-AVX2: # BB#0: # %entry
+; X64-AVX2: # %bb.0: # %entry
; X64-AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [1,2,3,4]
; X64-AVX2-NEXT: vpaddq %ymm3, %ymm0, %ymm0
; X64-AVX2-NEXT: vpaddq %ymm3, %ymm2, %ymm2
@@ -1054,7 +1054,7 @@ define void @fallback_broadcast_v4i64_to_v8i64(<4 x i64> %a, <8 x i64> %b) {
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: fallback_broadcast_v4i64_to_v8i64:
-; X64-AVX512: # BB#0: # %entry
+; X64-AVX512: # %bb.0: # %entry
; X64-AVX512-NEXT: vmovdqa {{.*#+}} ymm2 = [1,2,3,4]
; X64-AVX512-NEXT: vpaddq %ymm2, %ymm0, %ymm0
; X64-AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm2, %zmm2
@@ -1079,7 +1079,7 @@ entry:
define void @fallback_broadcast_v4f64_to_v8f64(<4 x double> %a, <8 x double> %b) {
; X32-AVX-LABEL: fallback_broadcast_v4f64_to_v8f64:
-; X32-AVX: # BB#0: # %entry
+; X32-AVX: # %bb.0: # %entry
; X32-AVX-NEXT: vmovapd {{.*#+}} ymm3 = [1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00]
; X32-AVX-NEXT: vaddpd %ymm3, %ymm0, %ymm0
; X32-AVX-NEXT: vaddpd %ymm3, %ymm2, %ymm2
@@ -1093,7 +1093,7 @@ define void @fallback_broadcast_v4f64_to_v8f64(<4 x double> %a, <8 x double> %b)
; X32-AVX-NEXT: retl
;
; X32-AVX512-LABEL: fallback_broadcast_v4f64_to_v8f64:
-; X32-AVX512: # BB#0: # %entry
+; X32-AVX512: # %bb.0: # %entry
; X32-AVX512-NEXT: vmovapd {{.*#+}} ymm2 = [1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00]
; X32-AVX512-NEXT: vaddpd %ymm2, %ymm0, %ymm0
; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm2, %zmm2, %zmm2
@@ -1105,7 +1105,7 @@ define void @fallback_broadcast_v4f64_to_v8f64(<4 x double> %a, <8 x double> %b)
; X32-AVX512-NEXT: retl
;
; X64-AVX-LABEL: fallback_broadcast_v4f64_to_v8f64:
-; X64-AVX: # BB#0: # %entry
+; X64-AVX: # %bb.0: # %entry
; X64-AVX-NEXT: vmovapd {{.*#+}} ymm3 = [1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00]
; X64-AVX-NEXT: vaddpd %ymm3, %ymm0, %ymm0
; X64-AVX-NEXT: vaddpd %ymm3, %ymm2, %ymm2
@@ -1119,7 +1119,7 @@ define void @fallback_broadcast_v4f64_to_v8f64(<4 x double> %a, <8 x double> %b)
; X64-AVX-NEXT: retq
;
; X64-AVX512-LABEL: fallback_broadcast_v4f64_to_v8f64:
-; X64-AVX512: # BB#0: # %entry
+; X64-AVX512: # %bb.0: # %entry
; X64-AVX512-NEXT: vmovapd {{.*#+}} ymm2 = [1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00]
; X64-AVX512-NEXT: vaddpd %ymm2, %ymm0, %ymm0
; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm2, %zmm2, %zmm2
@@ -1144,13 +1144,13 @@ entry:
define <4 x double> @reg_broadcast_2f64_4f64(<2 x double> %a0) nounwind {
; X32-LABEL: reg_broadcast_2f64_4f64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: reg_broadcast_2f64_4f64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: retq
@@ -1160,28 +1160,28 @@ define <4 x double> @reg_broadcast_2f64_4f64(<2 x double> %a0) nounwind {
define <8 x double> @reg_broadcast_2f64_8f64(<2 x double> %a0) nounwind {
; X32-AVX-LABEL: reg_broadcast_2f64_8f64:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX-NEXT: retl
;
; X32-AVX512-LABEL: reg_broadcast_2f64_8f64:
-; X32-AVX512: # BB#0:
+; X32-AVX512: # %bb.0:
; X32-AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512-NEXT: retl
;
; X64-AVX-LABEL: reg_broadcast_2f64_8f64:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX-NEXT: retq
;
; X64-AVX512-LABEL: reg_broadcast_2f64_8f64:
-; X64-AVX512: # BB#0:
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
@@ -1192,23 +1192,23 @@ define <8 x double> @reg_broadcast_2f64_8f64(<2 x double> %a0) nounwind {
define <8 x double> @reg_broadcast_4f64_8f64(<4 x double> %a0) nounwind {
; X32-AVX-LABEL: reg_broadcast_4f64_8f64:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX-NEXT: retl
;
; X32-AVX512-LABEL: reg_broadcast_4f64_8f64:
-; X32-AVX512: # BB#0:
+; X32-AVX512: # %bb.0:
; X32-AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512-NEXT: retl
;
; X64-AVX-LABEL: reg_broadcast_4f64_8f64:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX-NEXT: retq
;
; X64-AVX512-LABEL: reg_broadcast_4f64_8f64:
-; X64-AVX512: # BB#0:
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512-NEXT: retq
@@ -1218,13 +1218,13 @@ define <8 x double> @reg_broadcast_4f64_8f64(<4 x double> %a0) nounwind {
define <4 x i64> @reg_broadcast_2i64_4i64(<2 x i64> %a0) nounwind {
; X32-LABEL: reg_broadcast_2i64_4i64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: reg_broadcast_2i64_4i64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: retq
@@ -1234,28 +1234,28 @@ define <4 x i64> @reg_broadcast_2i64_4i64(<2 x i64> %a0) nounwind {
define <8 x i64> @reg_broadcast_2i64_8i64(<2 x i64> %a0) nounwind {
; X32-AVX-LABEL: reg_broadcast_2i64_8i64:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX-NEXT: retl
;
; X32-AVX512-LABEL: reg_broadcast_2i64_8i64:
-; X32-AVX512: # BB#0:
+; X32-AVX512: # %bb.0:
; X32-AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512-NEXT: retl
;
; X64-AVX-LABEL: reg_broadcast_2i64_8i64:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX-NEXT: retq
;
; X64-AVX512-LABEL: reg_broadcast_2i64_8i64:
-; X64-AVX512: # BB#0:
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
@@ -1266,23 +1266,23 @@ define <8 x i64> @reg_broadcast_2i64_8i64(<2 x i64> %a0) nounwind {
define <8 x i64> @reg_broadcast_4i64_8i64(<4 x i64> %a0) nounwind {
; X32-AVX-LABEL: reg_broadcast_4i64_8i64:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX-NEXT: retl
;
; X32-AVX512-LABEL: reg_broadcast_4i64_8i64:
-; X32-AVX512: # BB#0:
+; X32-AVX512: # %bb.0:
; X32-AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512-NEXT: retl
;
; X64-AVX-LABEL: reg_broadcast_4i64_8i64:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX-NEXT: retq
;
; X64-AVX512-LABEL: reg_broadcast_4i64_8i64:
-; X64-AVX512: # BB#0:
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512-NEXT: retq
@@ -1292,13 +1292,13 @@ define <8 x i64> @reg_broadcast_4i64_8i64(<4 x i64> %a0) nounwind {
define <8 x float> @reg_broadcast_4f32_8f32(<4 x float> %a0) nounwind {
; X32-LABEL: reg_broadcast_4f32_8f32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: reg_broadcast_4f32_8f32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: retq
@@ -1308,28 +1308,28 @@ define <8 x float> @reg_broadcast_4f32_8f32(<4 x float> %a0) nounwind {
define <16 x float> @reg_broadcast_4f32_16f32(<4 x float> %a0) nounwind {
; X32-AVX-LABEL: reg_broadcast_4f32_16f32:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX-NEXT: retl
;
; X32-AVX512-LABEL: reg_broadcast_4f32_16f32:
-; X32-AVX512: # BB#0:
+; X32-AVX512: # %bb.0:
; X32-AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512-NEXT: retl
;
; X64-AVX-LABEL: reg_broadcast_4f32_16f32:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX-NEXT: retq
;
; X64-AVX512-LABEL: reg_broadcast_4f32_16f32:
-; X64-AVX512: # BB#0:
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
@@ -1340,23 +1340,23 @@ define <16 x float> @reg_broadcast_4f32_16f32(<4 x float> %a0) nounwind {
define <16 x float> @reg_broadcast_8f32_16f32(<8 x float> %a0) nounwind {
; X32-AVX-LABEL: reg_broadcast_8f32_16f32:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX-NEXT: retl
;
; X32-AVX512-LABEL: reg_broadcast_8f32_16f32:
-; X32-AVX512: # BB#0:
+; X32-AVX512: # %bb.0:
; X32-AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512-NEXT: retl
;
; X64-AVX-LABEL: reg_broadcast_8f32_16f32:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX-NEXT: retq
;
; X64-AVX512-LABEL: reg_broadcast_8f32_16f32:
-; X64-AVX512: # BB#0:
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512-NEXT: retq
@@ -1366,13 +1366,13 @@ define <16 x float> @reg_broadcast_8f32_16f32(<8 x float> %a0) nounwind {
define <8 x i32> @reg_broadcast_4i32_8i32(<4 x i32> %a0) nounwind {
; X32-LABEL: reg_broadcast_4i32_8i32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: reg_broadcast_4i32_8i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: retq
@@ -1382,28 +1382,28 @@ define <8 x i32> @reg_broadcast_4i32_8i32(<4 x i32> %a0) nounwind {
define <16 x i32> @reg_broadcast_4i32_16i32(<4 x i32> %a0) nounwind {
; X32-AVX-LABEL: reg_broadcast_4i32_16i32:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX-NEXT: retl
;
; X32-AVX512-LABEL: reg_broadcast_4i32_16i32:
-; X32-AVX512: # BB#0:
+; X32-AVX512: # %bb.0:
; X32-AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512-NEXT: retl
;
; X64-AVX-LABEL: reg_broadcast_4i32_16i32:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX-NEXT: retq
;
; X64-AVX512-LABEL: reg_broadcast_4i32_16i32:
-; X64-AVX512: # BB#0:
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
@@ -1414,23 +1414,23 @@ define <16 x i32> @reg_broadcast_4i32_16i32(<4 x i32> %a0) nounwind {
define <16 x i32> @reg_broadcast_8i32_16i32(<8 x i32> %a0) nounwind {
; X32-AVX-LABEL: reg_broadcast_8i32_16i32:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX-NEXT: retl
;
; X32-AVX512-LABEL: reg_broadcast_8i32_16i32:
-; X32-AVX512: # BB#0:
+; X32-AVX512: # %bb.0:
; X32-AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512-NEXT: retl
;
; X64-AVX-LABEL: reg_broadcast_8i32_16i32:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX-NEXT: retq
;
; X64-AVX512-LABEL: reg_broadcast_8i32_16i32:
-; X64-AVX512: # BB#0:
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512-NEXT: retq
@@ -1440,13 +1440,13 @@ define <16 x i32> @reg_broadcast_8i32_16i32(<8 x i32> %a0) nounwind {
define <16 x i16> @reg_broadcast_8i16_16i16(<8 x i16> %a0) nounwind {
; X32-LABEL: reg_broadcast_8i16_16i16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: reg_broadcast_8i16_16i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: retq
@@ -1456,56 +1456,56 @@ define <16 x i16> @reg_broadcast_8i16_16i16(<8 x i16> %a0) nounwind {
define <32 x i16> @reg_broadcast_8i16_32i16(<8 x i16> %a0) nounwind {
; X32-AVX-LABEL: reg_broadcast_8i16_32i16:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX-NEXT: retl
;
; X32-AVX512F-LABEL: reg_broadcast_8i16_32i16:
-; X32-AVX512F: # BB#0:
+; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX512F-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX512F-NEXT: retl
;
; X32-AVX512BW-LABEL: reg_broadcast_8i16_32i16:
-; X32-AVX512BW: # BB#0:
+; X32-AVX512BW: # %bb.0:
; X32-AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-AVX512BW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512BW-NEXT: retl
;
; X32-AVX512DQ-LABEL: reg_broadcast_8i16_32i16:
-; X32-AVX512DQ: # BB#0:
+; X32-AVX512DQ: # %bb.0:
; X32-AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-AVX512DQ-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX512DQ-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX512DQ-NEXT: retl
;
; X64-AVX-LABEL: reg_broadcast_8i16_32i16:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX-NEXT: retq
;
; X64-AVX512F-LABEL: reg_broadcast_8i16_32i16:
-; X64-AVX512F: # BB#0:
+; X64-AVX512F: # %bb.0:
; X64-AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512F-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX512F-NEXT: retq
;
; X64-AVX512BW-LABEL: reg_broadcast_8i16_32i16:
-; X64-AVX512BW: # BB#0:
+; X64-AVX512BW: # %bb.0:
; X64-AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-AVX512BW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BW-NEXT: retq
;
; X64-AVX512DQ-LABEL: reg_broadcast_8i16_32i16:
-; X64-AVX512DQ: # BB#0:
+; X64-AVX512DQ: # %bb.0:
; X64-AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-AVX512DQ-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512DQ-NEXT: vmovaps %ymm0, %ymm1
@@ -1516,44 +1516,44 @@ define <32 x i16> @reg_broadcast_8i16_32i16(<8 x i16> %a0) nounwind {
define <32 x i16> @reg_broadcast_16i16_32i16(<16 x i16> %a0) nounwind {
; X32-AVX-LABEL: reg_broadcast_16i16_32i16:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX-NEXT: retl
;
; X32-AVX512F-LABEL: reg_broadcast_16i16_32i16:
-; X32-AVX512F: # BB#0:
+; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX512F-NEXT: retl
;
; X32-AVX512BW-LABEL: reg_broadcast_16i16_32i16:
-; X32-AVX512BW: # BB#0:
+; X32-AVX512BW: # %bb.0:
; X32-AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; X32-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512BW-NEXT: retl
;
; X32-AVX512DQ-LABEL: reg_broadcast_16i16_32i16:
-; X32-AVX512DQ: # BB#0:
+; X32-AVX512DQ: # %bb.0:
; X32-AVX512DQ-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX512DQ-NEXT: retl
;
; X64-AVX-LABEL: reg_broadcast_16i16_32i16:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX-NEXT: retq
;
; X64-AVX512F-LABEL: reg_broadcast_16i16_32i16:
-; X64-AVX512F: # BB#0:
+; X64-AVX512F: # %bb.0:
; X64-AVX512F-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX512F-NEXT: retq
;
; X64-AVX512BW-LABEL: reg_broadcast_16i16_32i16:
-; X64-AVX512BW: # BB#0:
+; X64-AVX512BW: # %bb.0:
; X64-AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; X64-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BW-NEXT: retq
;
; X64-AVX512DQ-LABEL: reg_broadcast_16i16_32i16:
-; X64-AVX512DQ: # BB#0:
+; X64-AVX512DQ: # %bb.0:
; X64-AVX512DQ-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX512DQ-NEXT: retq
%1 = shufflevector <16 x i16> %a0, <16 x i16> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -1562,13 +1562,13 @@ define <32 x i16> @reg_broadcast_16i16_32i16(<16 x i16> %a0) nounwind {
define <32 x i8> @reg_broadcast_16i8_32i8(<16 x i8> %a0) nounwind {
; X32-LABEL: reg_broadcast_16i8_32i8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: reg_broadcast_16i8_32i8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: retq
@@ -1578,56 +1578,56 @@ define <32 x i8> @reg_broadcast_16i8_32i8(<16 x i8> %a0) nounwind {
define <64 x i8> @reg_broadcast_16i8_64i8(<16 x i8> %a0) nounwind {
; X32-AVX-LABEL: reg_broadcast_16i8_64i8:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX-NEXT: retl
;
; X32-AVX512F-LABEL: reg_broadcast_16i8_64i8:
-; X32-AVX512F: # BB#0:
+; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX512F-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX512F-NEXT: retl
;
; X32-AVX512BW-LABEL: reg_broadcast_16i8_64i8:
-; X32-AVX512BW: # BB#0:
+; X32-AVX512BW: # %bb.0:
; X32-AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-AVX512BW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512BW-NEXT: retl
;
; X32-AVX512DQ-LABEL: reg_broadcast_16i8_64i8:
-; X32-AVX512DQ: # BB#0:
+; X32-AVX512DQ: # %bb.0:
; X32-AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-AVX512DQ-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX512DQ-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX512DQ-NEXT: retl
;
; X64-AVX-LABEL: reg_broadcast_16i8_64i8:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX-NEXT: retq
;
; X64-AVX512F-LABEL: reg_broadcast_16i8_64i8:
-; X64-AVX512F: # BB#0:
+; X64-AVX512F: # %bb.0:
; X64-AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512F-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX512F-NEXT: retq
;
; X64-AVX512BW-LABEL: reg_broadcast_16i8_64i8:
-; X64-AVX512BW: # BB#0:
+; X64-AVX512BW: # %bb.0:
; X64-AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-AVX512BW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BW-NEXT: retq
;
; X64-AVX512DQ-LABEL: reg_broadcast_16i8_64i8:
-; X64-AVX512DQ: # BB#0:
+; X64-AVX512DQ: # %bb.0:
; X64-AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-AVX512DQ-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512DQ-NEXT: vmovaps %ymm0, %ymm1
@@ -1638,44 +1638,44 @@ define <64 x i8> @reg_broadcast_16i8_64i8(<16 x i8> %a0) nounwind {
define <64 x i8> @reg_broadcast_32i8_64i8(<32 x i8> %a0) nounwind {
; X32-AVX-LABEL: reg_broadcast_32i8_64i8:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX-NEXT: retl
;
; X32-AVX512F-LABEL: reg_broadcast_32i8_64i8:
-; X32-AVX512F: # BB#0:
+; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX512F-NEXT: retl
;
; X32-AVX512BW-LABEL: reg_broadcast_32i8_64i8:
-; X32-AVX512BW: # BB#0:
+; X32-AVX512BW: # %bb.0:
; X32-AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; X32-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512BW-NEXT: retl
;
; X32-AVX512DQ-LABEL: reg_broadcast_32i8_64i8:
-; X32-AVX512DQ: # BB#0:
+; X32-AVX512DQ: # %bb.0:
; X32-AVX512DQ-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX512DQ-NEXT: retl
;
; X64-AVX-LABEL: reg_broadcast_32i8_64i8:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX-NEXT: retq
;
; X64-AVX512F-LABEL: reg_broadcast_32i8_64i8:
-; X64-AVX512F: # BB#0:
+; X64-AVX512F: # %bb.0:
; X64-AVX512F-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX512F-NEXT: retq
;
; X64-AVX512BW-LABEL: reg_broadcast_32i8_64i8:
-; X64-AVX512BW: # BB#0:
+; X64-AVX512BW: # %bb.0:
; X64-AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; X64-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BW-NEXT: retq
;
; X64-AVX512DQ-LABEL: reg_broadcast_32i8_64i8:
-; X64-AVX512DQ: # BB#0:
+; X64-AVX512DQ: # %bb.0:
; X64-AVX512DQ-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX512DQ-NEXT: retq
%1 = shufflevector <32 x i8> %a0, <32 x i8> undef, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
diff --git a/test/CodeGen/X86/switch-edge-weight.ll b/test/CodeGen/X86/switch-edge-weight.ll
index 861bd1289eb..516c254223f 100644
--- a/test/CodeGen/X86/switch-edge-weight.ll
+++ b/test/CodeGen/X86/switch-edge-weight.ll
@@ -31,25 +31,25 @@ sw.epilog:
; Check if weights are correctly assigned to edges generated from switch
; statement.
;
-; CHECK: BB#0:
-; BB#0 to BB#4: [0, 1133] (65 = 60 + 5)
-; BB#0 to BB#5: [1134, UINT32_MAX] (25 = 20 + 5)
-; CHECK: Successors according to CFG: BB#4({{[0-9a-fx/= ]+}}72.22%) BB#5({{[0-9a-fx/= ]+}}27.78%)
+; CHECK: %bb.0:
+; %bb.0 to %bb.4: [0, 1133] (65 = 60 + 5)
+; %bb.0 to %bb.5: [1134, UINT32_MAX] (25 = 20 + 5)
+; CHECK: Successors according to CFG: %bb.4({{[0-9a-fx/= ]+}}72.22%) %bb.5({{[0-9a-fx/= ]+}}27.78%)
;
-; CHECK: BB#4:
-; BB#4 to BB#1: [155, 159] (50)
-; BB#4 to BB#5: [0, 1133] - [155, 159] (15 = 10 + 5)
-; CHECK: Successors according to CFG: BB#1({{[0-9a-fx/= ]+}}76.92%) BB#7({{[0-9a-fx/= ]+}}23.08%)
+; CHECK: %bb.4:
+; %bb.4 to %bb.1: [155, 159] (50)
+; %bb.4 to %bb.5: [0, 1133] - [155, 159] (15 = 10 + 5)
+; CHECK: Successors according to CFG: %bb.1({{[0-9a-fx/= ]+}}76.92%) %bb.7({{[0-9a-fx/= ]+}}23.08%)
;
-; CHECK: BB#5:
-; BB#5 to BB#1: {1140} (10)
-; BB#5 to BB#6: [1134, UINT32_MAX] - {1140} (15 = 10 + 5)
-; CHECK: Successors according to CFG: BB#1({{[0-9a-fx/= ]+}}40.00%) BB#6({{[0-9a-fx/= ]+}}60.00%)
+; CHECK: %bb.5:
+; %bb.5 to %bb.1: {1140} (10)
+; %bb.5 to %bb.6: [1134, UINT32_MAX] - {1140} (15 = 10 + 5)
+; CHECK: Successors according to CFG: %bb.1({{[0-9a-fx/= ]+}}40.00%) %bb.6({{[0-9a-fx/= ]+}}60.00%)
;
-; CHECK: BB#6:
-; BB#6 to BB#1: {1134} (10)
-; BB#6 to BB#2: [1134, UINT32_MAX] - {1134, 1140} (5)
-; CHECK: Successors according to CFG: BB#1({{[0-9a-fx/= ]+}}66.67%) BB#2({{[0-9a-fx/= ]+}}33.33%)
+; CHECK: %bb.6:
+; %bb.6 to %bb.1: {1134} (10)
+; %bb.6 to %bb.2: [1134, UINT32_MAX] - {1134, 1140} (5)
+; CHECK: Successors according to CFG: %bb.1({{[0-9a-fx/= ]+}}66.67%) %bb.2({{[0-9a-fx/= ]+}}33.33%)
}
; CHECK-LABEL: test2
@@ -99,19 +99,19 @@ sw.epilog:
; Check if weights are correctly assigned to edges generated from switch
; statement.
;
-; CHECK: BB#0:
-; BB#0 to BB#6: {0} + [15, UINT32_MAX] (5)
-; BB#0 to BB#8: [1, 14] (jump table) (65 = 60 + 5)
-; CHECK: Successors according to CFG: BB#6({{[0-9a-fx/= ]+}}7.14%) BB#8({{[0-9a-fx/= ]+}}92.86%
+; CHECK: %bb.0:
+; %bb.0 to %bb.6: {0} + [15, UINT32_MAX] (5)
+; %bb.0 to %bb.8: [1, 14] (jump table) (65 = 60 + 5)
+; CHECK: Successors according to CFG: %bb.6({{[0-9a-fx/= ]+}}7.14%) %bb.8({{[0-9a-fx/= ]+}}92.86%
;
-; CHECK: BB#8:
-; BB#8 to BB#1: {1} (10)
-; BB#8 to BB#6: [2, 9] (5)
-; BB#8 to BB#2: {10} (10)
-; BB#8 to BB#3: {11} (10)
-; BB#8 to BB#4: {12} (10)
-; BB#8 to BB#5: {13, 14} (20)
-; CHECK: Successors according to CFG: BB#1({{[0-9a-fx/= ]+}}15.38%) BB#6({{[0-9a-fx/= ]+}}7.69%) BB#2({{[0-9a-fx/= ]+}}15.38%) BB#3({{[0-9a-fx/= ]+}}15.38%) BB#4({{[0-9a-fx/= ]+}}15.38%) BB#5({{[0-9a-fx/= ]+}}30.77%)
+; CHECK: %bb.8:
+; %bb.8 to %bb.1: {1} (10)
+; %bb.8 to %bb.6: [2, 9] (5)
+; %bb.8 to %bb.2: {10} (10)
+; %bb.8 to %bb.3: {11} (10)
+; %bb.8 to %bb.4: {12} (10)
+; %bb.8 to %bb.5: {13, 14} (20)
+; CHECK: Successors according to CFG: %bb.1({{[0-9a-fx/= ]+}}15.38%) %bb.6({{[0-9a-fx/= ]+}}7.69%) %bb.2({{[0-9a-fx/= ]+}}15.38%) %bb.3({{[0-9a-fx/= ]+}}15.38%) %bb.4({{[0-9a-fx/= ]+}}15.38%) %bb.5({{[0-9a-fx/= ]+}}30.77%)
}
; CHECK-LABEL: test3
@@ -160,18 +160,18 @@ sw.epilog:
; Check if weights are correctly assigned to edges generated from switch
; statement.
;
-; CHECK: BB#0:
-; BB#0 to BB#6: [0, 9] + [15, UINT32_MAX] {10}
-; BB#0 to BB#8: [10, 14] (jump table) (50)
-; CHECK: Successors according to CFG: BB#6({{[0-9a-fx/= ]+}}16.67%) BB#8({{[0-9a-fx/= ]+}}83.33%)
+; CHECK: %bb.0:
+; %bb.0 to %bb.6: [0, 9] + [15, UINT32_MAX] {10}
+; %bb.0 to %bb.8: [10, 14] (jump table) (50)
+; CHECK: Successors according to CFG: %bb.6({{[0-9a-fx/= ]+}}16.67%) %bb.8({{[0-9a-fx/= ]+}}83.33%)
;
-; CHECK: BB#8:
-; BB#8 to BB#1: {10} (10)
-; BB#8 to BB#2: {11} (10)
-; BB#8 to BB#3: {12} (10)
-; BB#8 to BB#4: {13} (10)
-; BB#8 to BB#5: {14} (10)
-; CHECK: Successors according to CFG: BB#1({{[0-9a-fx/= ]+}}20.00%) BB#2({{[0-9a-fx/= ]+}}20.00%) BB#3({{[0-9a-fx/= ]+}}20.00%) BB#4({{[0-9a-fx/= ]+}}20.00%) BB#5({{[0-9a-fx/= ]+}}20.00%)
+; CHECK: %bb.8:
+; %bb.8 to %bb.1: {10} (10)
+; %bb.8 to %bb.2: {11} (10)
+; %bb.8 to %bb.3: {12} (10)
+; %bb.8 to %bb.4: {13} (10)
+; %bb.8 to %bb.5: {14} (10)
+; CHECK: Successors according to CFG: %bb.1({{[0-9a-fx/= ]+}}20.00%) %bb.2({{[0-9a-fx/= ]+}}20.00%) %bb.3({{[0-9a-fx/= ]+}}20.00%) %bb.4({{[0-9a-fx/= ]+}}20.00%) %bb.5({{[0-9a-fx/= ]+}}20.00%)
}
; CHECK-LABEL: test4
@@ -213,15 +213,15 @@ sw.epilog:
; Check if weights are correctly assigned to edges generated from switch
; statement.
;
-; CHECK: BB#0:
-; BB#0 to BB#6: [0, 110] + [116, UINT32_MAX] (20)
-; BB#0 to BB#7: [111, 115] (bit test) (50)
-; CHECK: Successors according to CFG: BB#6({{[0-9a-fx/= ]+}}28.57%) BB#7({{[0-9a-fx/= ]+}}71.43%)
+; CHECK: %bb.0:
+; %bb.0 to %bb.6: [0, 110] + [116, UINT32_MAX] (20)
+; %bb.0 to %bb.7: [111, 115] (bit test) (50)
+; CHECK: Successors according to CFG: %bb.6({{[0-9a-fx/= ]+}}28.57%) %bb.7({{[0-9a-fx/= ]+}}71.43%)
;
-; CHECK: BB#7:
-; BB#7 to BB#2: {111, 114, 115} (30)
-; BB#7 to BB#3: {112, 113} (20)
-; CHECK: Successors according to CFG: BB#2({{[0-9a-fx/= ]+}}60.00%) BB#3({{[0-9a-fx/= ]+}}40.00%)
+; CHECK: %bb.7:
+; %bb.7 to %bb.2: {111, 114, 115} (30)
+; %bb.7 to %bb.3: {112, 113} (20)
+; CHECK: Successors according to CFG: %bb.2({{[0-9a-fx/= ]+}}60.00%) %bb.3({{[0-9a-fx/= ]+}}40.00%)
}
; CHECK-LABEL: test5
@@ -270,10 +270,10 @@ sw.epilog:
; Check if weights are correctly assigned to edges generated from switch
; statement.
;
-; CHECK: BB#0:
-; BB#0 to BB#6: [10, UINT32_MAX] (15)
-; BB#0 to BB#8: [4, 20, 28, 36] (jump table) (45)
-; CHECK: Successors according to CFG: BB#8({{[0-9a-fx/= ]+}}25.00%) BB#9({{[0-9a-fx/= ]+}}75.00%)
+; CHECK: %bb.0:
+; %bb.0 to %bb.6: [10, UINT32_MAX] (15)
+; %bb.0 to %bb.8: [4, 20, 28, 36] (jump table) (45)
+; CHECK: Successors according to CFG: %bb.8({{[0-9a-fx/= ]+}}25.00%) %bb.9({{[0-9a-fx/= ]+}}75.00%)
}
!1 = !{!"branch_weights", i32 10, i32 10, i32 10, i32 10, i32 10, i32 10, i32 10, i32 10, i32 10}
diff --git a/test/CodeGen/X86/switch-jump-table.ll b/test/CodeGen/X86/switch-jump-table.ll
index 6393c688e28..a4564dc2ac7 100644
--- a/test/CodeGen/X86/switch-jump-table.ll
+++ b/test/CodeGen/X86/switch-jump-table.ll
@@ -9,7 +9,7 @@ define void @foo(i32 %x, i32* %to) {
; CHECK: movl 4(%esp), [[REG:%e[a-z]{2}]]
; CHECK: cmpl $3, [[REG]]
; CHECK: ja .LBB0_6
-; CHECK-NEXT: # BB#1:
+; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: jmpl *.LJTI0_0(,[[REG]],4)
; CHECK: movl $4
; CHECK: retl
@@ -55,8 +55,8 @@ default:
define void @bar(i32 %x, i32* %to) {
; CHECK-JT-PROB-LABEL: bar:
-; CHECK-JT-PROB: Successors according to CFG: BB#6({{[0-9a-fx/= ]+}}14.29%) BB#8({{[0-9a-fx/= ]+}}85.71%)
-; CHECK-JT-PROB: Successors according to CFG: BB#1({{[0-9a-fx/= ]+}}16.67%) BB#2({{[0-9a-fx/= ]+}}16.67%) BB#3({{[0-9a-fx/= ]+}}16.67%) BB#4({{[0-9a-fx/= ]+}}16.67%) BB#5({{[0-9a-fx/= ]+}}33.33%)
+; CHECK-JT-PROB: Successors according to CFG: %bb.6({{[0-9a-fx/= ]+}}14.29%) %bb.8({{[0-9a-fx/= ]+}}85.71%)
+; CHECK-JT-PROB: Successors according to CFG: %bb.1({{[0-9a-fx/= ]+}}16.67%) %bb.2({{[0-9a-fx/= ]+}}16.67%) %bb.3({{[0-9a-fx/= ]+}}16.67%) %bb.4({{[0-9a-fx/= ]+}}16.67%) %bb.5({{[0-9a-fx/= ]+}}33.33%)
entry:
switch i32 %x, label %default [
diff --git a/test/CodeGen/X86/switch-lower-peel-top-case.ll b/test/CodeGen/X86/switch-lower-peel-top-case.ll
index a43e73c2b77..8a169c41836 100644
--- a/test/CodeGen/X86/switch-lower-peel-top-case.ll
+++ b/test/CodeGen/X86/switch-lower-peel-top-case.ll
@@ -15,12 +15,12 @@ entry:
; CHECK: %{{[0-9]+}}:gr32 = SUB32ri %[[VAL]], 18568, implicit-def %eflags
; CHECK: JE_1 %[[PEELED_CASE_LABEL]], implicit %eflags
; CHECK: JMP_1 %[[PEELED_SWITCH_LABEL]]
-; CHECK: [[PEELED_SWITCH_LABEL]]:
+; CHECK: [[PEELED_SWITCH_LABEL]].{{[a-zA-Z0-9.]+}}:
; CHECK: successors: %[[BB1_LABEL:.*]](0x0206d3a0), %[[BB2_LABEL:.*]](0x7df92c60)
; CHECK: %{{[0-9]+}}:gr32 = SUB32ri %[[VAL]], 18311, implicit-def %eflags
; CHECK: JG_1 %[[BB2_LABEL]], implicit %eflags
; CHECK: JMP_1 %[[BB1_LABEL]]
-; CHECK: [[BB1_LABEL]]:
+; CHECK: [[BB1_LABEL]].{{[a-zA-Z0-9.]+}}:
; CHECK: successors: %[[CASE2_LABEL:.*]](0x35e50d5b), %[[BB3_LABEL:.*]](0x4a1af2a5)
; CHECK: %{{[0-9]+}}:gr32 = SUB32ri %[[VAL]], -8826, implicit-def %eflags
; CHECK: JE_1 %[[CASE2_LABEL]], implicit %eflags
@@ -30,12 +30,12 @@ entry:
; CHECK: %{{[0-9]+}}:gr32 = SUB32ri %[[VAL]], 129, implicit-def %eflags
; CHECK: JE_1 %[[CASE5_LABEL]], implicit %eflags
; CHECK: JMP_1 %[[BB4_LABEL]]
-; CHECK: [[BB4_LABEL:.*]]:
+; CHECK: [[BB4_LABEL:.*]].{{[a-zA-Z0-9.]+}}:
; CHECK: successors: %[[CASE1_LABEL:.*]](0x66666666), %[[DEFAULT_BB_LABEL:.*]](0x1999999a)
; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %[[VAL]], 8, implicit-def %eflags
; CHECK: JE_1 %[[CASE1_LABEL]], implicit %eflags
; CHECK: JMP_1 %[[DEFAULT_BB_LABEL]]
-; CHECK: [[BB2_LABEL]]:
+; CHECK: [[BB2_LABEL]].{{[a-zA-Z0-9.]+}}:
; CHECK: successors: %[[CASE3_LABEL:.*]](0x7fe44107), %[[DEFAULT_BB_LABEL]](0x001bbef9)
; CHECK: %{{[0-9]+}}:gr32 = SUB32ri %[[VAL]], 18312, implicit-def %eflags
; CHECK: JE_1 %[[CASE3_LABEL]], implicit %eflags
@@ -78,32 +78,32 @@ entry:
; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %{{[0-9]+}}, 2, implicit-def %eflags
; CHECK: JB_1 %[[PEELED_CASE_LABEL]], implicit %eflags
; CHECK: JMP_1 %[[PEELED_SWITCH_LABEL]]
-; CHECK: [[PEELED_SWITCH_LABEL]]:
+; CHECK: [[PEELED_SWITCH_LABEL]].{{[a-zA-Z0-9.]+}}:
; CHECK: successors: %[[BB1_LABEL:.*]](0x0088888a), %[[BB2_LABEL:.*]](0x7f777776)
; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %[[VAL]], 4, implicit-def %eflags
; CHECK: JG_1 %[[BB2_LABEL]], implicit %eflags
; CHECK: JMP_1 %[[BB1_LABEL]]
-; CHECK: [[BB1_LABEL]]:
+; CHECK: [[BB1_LABEL]].{{[a-zA-Z0-9.]+}}:
; CHECK: successors: %[[CASE4_LABEL:.*]](0x7f775a4f), %[[BB3_LABEL:.*]](0x0088a5b1)
; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %[[VAL]], 1, implicit-def %eflags
; CHECK: JE_1 %[[CASE4_LABEL]], implicit %eflags
; CHECK: JMP_1 %[[BB3_LABEL]]
-; CHECK: [[BB3_LABEL]]:
+; CHECK: [[BB3_LABEL]].{{[a-zA-Z0-9.]+}}:
; CHECK: successors: %[[CASE1_LABEL:.*]](0x66666666), %[[DEFAULT_BB_LABEL:.*]](0x1999999a)
; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %[[VAL]], -40, implicit-def %eflags
; CHECK: JE_1 %[[CASE1_LABEL]], implicit %eflags
; CHECK: JMP_1 %[[DEFAULT_BB_LABEL]]
-; CHECK: [[BB2_LABEL]]:
+; CHECK: [[BB2_LABEL]].{{[a-zA-Z0-9.]+}}:
; CHECK: successors: %[[CASE5_LABEL:.*]](0x00000000), %[[BB4_LABEL:.*]](0x80000000)
; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %[[VAL]], 5, implicit-def %eflags
; CHECK: JE_1 %[[CASE5_LABEL]], implicit %eflags
; CHECK: JMP_1 %[[BB4_LABEL]]
-; CHECK: [[BB4_LABEL]]:
+; CHECK: [[BB4_LABEL]].{{[a-zA-Z0-9.]+}}:
; CHECK: successors: %[[CASE6_LABEL:.*]](0x00000000), %[[BB5_LABEL:.*]](0x80000000)
; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %[[VAL]], 7, implicit-def %eflags
; CHECK: JE_1 %[[CASE6_LABEL]], implicit %eflags
; CHECK: JMP_1 %[[BB5_LABEL]]
-; CHECK: [[BB5_LABEL]]:
+; CHECK: [[BB5_LABEL]].{{[a-zA-Z0-9.]+}}:
; CHECK: successors: %[[CASE7_LABEL:.*]](0x00000000), %[[DEFAULT_BB_LABEL]](0x80000000)
; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %[[VAL]], 49, implicit-def %eflags
; CHECK: JE_1 %[[CASE7_LABEL]], implicit %eflags
diff --git a/test/CodeGen/X86/switch.ll b/test/CodeGen/X86/switch.ll
index 0cf70efaec8..95b2ed0e618 100644
--- a/test/CodeGen/X86/switch.ll
+++ b/test/CodeGen/X86/switch.ll
@@ -432,9 +432,9 @@ sw:
; Branch directly to the default.
; (In optimized builds the switch is removed earlier.)
; NOOPT-LABEL: default_only
-; NOOPT: .[[L:[A-Z0-9_]+]]:
+; NOOPT: .LBB[[L:[A-Z0-9_]+]]:
; NOOPT-NEXT: retq
-; NOOPT: jmp .[[L]]
+; NOOPT: jmp .LBB[[L]]
}
diff --git a/test/CodeGen/X86/swizzle-2.ll b/test/CodeGen/X86/swizzle-2.ll
index fd81573edec..dad6a4d7d4f 100644
--- a/test/CodeGen/X86/swizzle-2.ll
+++ b/test/CodeGen/X86/swizzle-2.ll
@@ -11,7 +11,7 @@
define <4 x i32> @swizzle_1(<4 x i32> %v) {
; CHECK-LABEL: swizzle_1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,0,3,2]
; CHECK-NEXT: retq
%1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 0, i32 1>
@@ -21,7 +21,7 @@ define <4 x i32> @swizzle_1(<4 x i32> %v) {
define <4 x i32> @swizzle_2(<4 x i32> %v) {
; CHECK-LABEL: swizzle_2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,3,0]
; CHECK-NEXT: retq
%1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 3, i32 1, i32 0, i32 2>
@@ -31,7 +31,7 @@ define <4 x i32> @swizzle_2(<4 x i32> %v) {
define <4 x i32> @swizzle_3(<4 x i32> %v) {
; CHECK-LABEL: swizzle_3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,0,3,2]
; CHECK-NEXT: retq
%1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 1, i32 0>
@@ -41,7 +41,7 @@ define <4 x i32> @swizzle_3(<4 x i32> %v) {
define <4 x i32> @swizzle_4(<4 x i32> %v) {
; CHECK-LABEL: swizzle_4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,0,2]
; CHECK-NEXT: retq
%1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 2, i32 1, i32 3, i32 0>
@@ -51,7 +51,7 @@ define <4 x i32> @swizzle_4(<4 x i32> %v) {
define <4 x i32> @swizzle_5(<4 x i32> %v) {
; CHECK-LABEL: swizzle_5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; CHECK-NEXT: retq
%1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 0>
@@ -61,7 +61,7 @@ define <4 x i32> @swizzle_5(<4 x i32> %v) {
define <4 x i32> @swizzle_6(<4 x i32> %v) {
; CHECK-LABEL: swizzle_6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,0,1,3]
; CHECK-NEXT: retq
%1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 1, i32 2, i32 0, i32 3>
@@ -71,7 +71,7 @@ define <4 x i32> @swizzle_6(<4 x i32> %v) {
define <4 x i32> @swizzle_7(<4 x i32> %v) {
; CHECK-LABEL: swizzle_7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,3,1]
; CHECK-NEXT: retq
%1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 0, i32 3, i32 1, i32 2>
@@ -81,7 +81,7 @@ define <4 x i32> @swizzle_7(<4 x i32> %v) {
define <4 x i32> @swizzle_8(<4 x i32> %v) {
; CHECK-LABEL: swizzle_8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,0]
; CHECK-NEXT: retq
%1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 3, i32 0, i32 2, i32 1>
@@ -91,7 +91,7 @@ define <4 x i32> @swizzle_8(<4 x i32> %v) {
define <4 x i32> @swizzle_9(<4 x i32> %v) {
; CHECK-LABEL: swizzle_9:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; CHECK-NEXT: retq
%1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 3, i32 0, i32 1, i32 2>
@@ -101,7 +101,7 @@ define <4 x i32> @swizzle_9(<4 x i32> %v) {
define <4 x i32> @swizzle_10(<4 x i32> %v) {
; CHECK-LABEL: swizzle_10:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,2,0,3]
; CHECK-NEXT: retq
%1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 2, i32 0, i32 1, i32 3>
@@ -111,7 +111,7 @@ define <4 x i32> @swizzle_10(<4 x i32> %v) {
define <4 x i32> @swizzle_11(<4 x i32> %v) {
; CHECK-LABEL: swizzle_11:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,2,1,0]
; CHECK-NEXT: retq
%1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 2, i32 0, i32 3, i32 1>
@@ -121,7 +121,7 @@ define <4 x i32> @swizzle_11(<4 x i32> %v) {
define <4 x i32> @swizzle_12(<4 x i32> %v) {
; CHECK-LABEL: swizzle_12:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,1,2]
; CHECK-NEXT: retq
%1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 3, i32 1>
@@ -131,7 +131,7 @@ define <4 x i32> @swizzle_12(<4 x i32> %v) {
define <4 x i32> @swizzle_13(<4 x i32> %v) {
; CHECK-LABEL: swizzle_13:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,2,1,0]
; CHECK-NEXT: retq
%1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 1, i32 3, i32 0, i32 2>
@@ -141,7 +141,7 @@ define <4 x i32> @swizzle_13(<4 x i32> %v) {
define <4 x i32> @swizzle_14(<4 x i32> %v) {
; CHECK-LABEL: swizzle_14:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,0,2,1]
; CHECK-NEXT: retq
%1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
@@ -151,7 +151,7 @@ define <4 x i32> @swizzle_14(<4 x i32> %v) {
define <4 x float> @swizzle_15(<4 x float> %v) {
; CHECK-LABEL: swizzle_15:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0,3,2]
; CHECK-NEXT: retq
%1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 0, i32 1>
@@ -161,7 +161,7 @@ define <4 x float> @swizzle_15(<4 x float> %v) {
define <4 x float> @swizzle_16(<4 x float> %v) {
; CHECK-LABEL: swizzle_16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,1,3,0]
; CHECK-NEXT: retq
%1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 3, i32 1, i32 0, i32 2>
@@ -171,7 +171,7 @@ define <4 x float> @swizzle_16(<4 x float> %v) {
define <4 x float> @swizzle_17(<4 x float> %v) {
; CHECK-LABEL: swizzle_17:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0,3,2]
; CHECK-NEXT: retq
%1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 1, i32 0>
@@ -181,7 +181,7 @@ define <4 x float> @swizzle_17(<4 x float> %v) {
define <4 x float> @swizzle_18(<4 x float> %v) {
; CHECK-LABEL: swizzle_18:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,0,2]
; CHECK-NEXT: retq
%1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 2, i32 1, i32 3, i32 0>
@@ -191,7 +191,7 @@ define <4 x float> @swizzle_18(<4 x float> %v) {
define <4 x float> @swizzle_19(<4 x float> %v) {
; CHECK-LABEL: swizzle_19:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0]
; CHECK-NEXT: retq
%1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 0>
@@ -201,7 +201,7 @@ define <4 x float> @swizzle_19(<4 x float> %v) {
define <4 x float> @swizzle_20(<4 x float> %v) {
; CHECK-LABEL: swizzle_20:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0,1,3]
; CHECK-NEXT: retq
%1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 1, i32 2, i32 0, i32 3>
@@ -211,7 +211,7 @@ define <4 x float> @swizzle_20(<4 x float> %v) {
define <4 x float> @swizzle_21(<4 x float> %v) {
; CHECK-LABEL: swizzle_21:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,3,1]
; CHECK-NEXT: retq
%1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 0, i32 3, i32 1, i32 2>
@@ -221,7 +221,7 @@ define <4 x float> @swizzle_21(<4 x float> %v) {
define <4 x float> @swizzle_22(<4 x float> %v) {
; CHECK-LABEL: swizzle_22:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3,2,0]
; CHECK-NEXT: retq
%1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 3, i32 0, i32 2, i32 1>
@@ -231,7 +231,7 @@ define <4 x float> @swizzle_22(<4 x float> %v) {
define <4 x float> @swizzle_23(<4 x float> %v) {
; CHECK-LABEL: swizzle_23:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0]
; CHECK-NEXT: retq
%1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 3, i32 0, i32 1, i32 2>
@@ -241,7 +241,7 @@ define <4 x float> @swizzle_23(<4 x float> %v) {
define <4 x float> @swizzle_24(<4 x float> %v) {
; CHECK-LABEL: swizzle_24:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,2,0,3]
; CHECK-NEXT: retq
%1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 2, i32 0, i32 1, i32 3>
@@ -251,7 +251,7 @@ define <4 x float> @swizzle_24(<4 x float> %v) {
define <4 x float> @swizzle_25(<4 x float> %v) {
; CHECK-LABEL: swizzle_25:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,2,1,0]
; CHECK-NEXT: retq
%1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 2, i32 0, i32 3, i32 1>
@@ -261,7 +261,7 @@ define <4 x float> @swizzle_25(<4 x float> %v) {
define <4 x float> @swizzle_26(<4 x float> %v) {
; CHECK-LABEL: swizzle_26:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3,1,2]
; CHECK-NEXT: retq
%1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 0, i32 2, i32 3, i32 1>
@@ -271,7 +271,7 @@ define <4 x float> @swizzle_26(<4 x float> %v) {
define <4 x float> @swizzle_27(<4 x float> %v) {
; CHECK-LABEL: swizzle_27:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,2,1,0]
; CHECK-NEXT: retq
%1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 1, i32 3, i32 0, i32 2>
@@ -281,7 +281,7 @@ define <4 x float> @swizzle_27(<4 x float> %v) {
define <4 x float> @swizzle_28(<4 x float> %v) {
; CHECK-LABEL: swizzle_28:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,0,2,1]
; CHECK-NEXT: retq
%1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
@@ -291,7 +291,7 @@ define <4 x float> @swizzle_28(<4 x float> %v) {
define <4 x float> @swizzle_29(<4 x float> %v) {
; CHECK-LABEL: swizzle_29:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3,2,0]
; CHECK-NEXT: retq
%1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 3, i32 1, i32 2, i32 0>
@@ -304,7 +304,7 @@ define <4 x float> @swizzle_29(<4 x float> %v) {
define <8 x i16> @swizzle_30(<8 x i16> %v) {
; CHECK-LABEL: swizzle_30:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,3,2,0,4,5,6,7]
; CHECK-NEXT: retq
%1 = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 3, i32 1, i32 2, i32 0, i32 7, i32 5, i32 6, i32 4>
@@ -314,7 +314,7 @@ define <8 x i16> @swizzle_30(<8 x i16> %v) {
define <8 x i16> @swizzle_31(<8 x i16> %v) {
; CHECK-LABEL: swizzle_31:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,3,2,0,4,5,6,7]
; CHECK-NEXT: retq
%1 = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 3, i32 0, i32 2, i32 1, i32 7, i32 5, i32 6, i32 4>
@@ -324,7 +324,7 @@ define <8 x i16> @swizzle_31(<8 x i16> %v) {
define <8 x i16> @swizzle_32(<8 x i16> %v) {
; CHECK-LABEL: swizzle_32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,0,2,3]
; CHECK-NEXT: retq
%1 = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 1, i32 2, i32 3, i32 0, i32 7, i32 5, i32 6, i32 4>
@@ -334,7 +334,7 @@ define <8 x i16> @swizzle_32(<8 x i16> %v) {
define <8 x i16> @swizzle_33(<8 x i16> %v) {
; CHECK-LABEL: swizzle_33:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,3,0,4,5,6,7]
; CHECK-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,7,6,4]
; CHECK-NEXT: retq
@@ -345,7 +345,7 @@ define <8 x i16> @swizzle_33(<8 x i16> %v) {
define <8 x i16> @swizzle_34(<8 x i16> %v) {
; CHECK-LABEL: swizzle_34:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,3,0,2,4,5,6,7]
; CHECK-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,4,5]
; CHECK-NEXT: retq
@@ -356,7 +356,7 @@ define <8 x i16> @swizzle_34(<8 x i16> %v) {
define <8 x i16> @swizzle_35(<8 x i16> %v) {
; CHECK-LABEL: swizzle_35:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,0,3,4,5,6,7]
; CHECK-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,7,6]
; CHECK-NEXT: retq
@@ -367,7 +367,7 @@ define <8 x i16> @swizzle_35(<8 x i16> %v) {
define <8 x i16> @swizzle_36(<8 x i16> %v) {
; CHECK-LABEL: swizzle_36:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,1,4,5,6,7]
; CHECK-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,7]
; CHECK-NEXT: retq
@@ -378,7 +378,7 @@ define <8 x i16> @swizzle_36(<8 x i16> %v) {
define <8 x i16> @swizzle_37(<8 x i16> %v) {
; CHECK-LABEL: swizzle_37:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,5]
; CHECK-NEXT: retq
%1 = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 7, i32 5, i32 6, i32 4>
@@ -388,7 +388,7 @@ define <8 x i16> @swizzle_37(<8 x i16> %v) {
define <8 x i16> @swizzle_38(<8 x i16> %v) {
; CHECK-LABEL: swizzle_38:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,0,3,4,5,6,7]
; CHECK-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,6,7]
; CHECK-NEXT: retq
@@ -399,7 +399,7 @@ define <8 x i16> @swizzle_38(<8 x i16> %v) {
define <8 x i16> @swizzle_39(<8 x i16> %v) {
; CHECK-LABEL: swizzle_39:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,3,1,0,4,5,6,7]
; CHECK-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,4,5]
; CHECK-NEXT: retq
@@ -410,7 +410,7 @@ define <8 x i16> @swizzle_39(<8 x i16> %v) {
define <8 x i16> @swizzle_40(<8 x i16> %v) {
; CHECK-LABEL: swizzle_40:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,0,4,5,6,7]
; CHECK-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,7]
; CHECK-NEXT: retq
@@ -421,7 +421,7 @@ define <8 x i16> @swizzle_40(<8 x i16> %v) {
define <8 x i16> @swizzle_41(<8 x i16> %v) {
; CHECK-LABEL: swizzle_41:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,3,2]
; CHECK-NEXT: retq
@@ -432,7 +432,7 @@ define <8 x i16> @swizzle_41(<8 x i16> %v) {
define <8 x i16> @swizzle_42(<8 x i16> %v) {
; CHECK-LABEL: swizzle_42:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6]
; CHECK-NEXT: retq
%1 = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 0, i32 1, i32 3, i32 2, i32 7, i32 6, i32 4, i32 5>
diff --git a/test/CodeGen/X86/swizzle-avx2.ll b/test/CodeGen/X86/swizzle-avx2.ll
index dadaff4eaa1..14244c3f8c7 100644
--- a/test/CodeGen/X86/swizzle-avx2.ll
+++ b/test/CodeGen/X86/swizzle-avx2.ll
@@ -13,7 +13,7 @@
define <8 x i32> @swizzle_1(<8 x i32> %v) {
; CHECK-LABEL: swizzle_1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} ymm1 = [1,3,2,0,4,5,6,7]
; CHECK-NEXT: vpermps %ymm0, %ymm1, %ymm0
; CHECK-NEXT: retq
@@ -24,7 +24,7 @@ define <8 x i32> @swizzle_1(<8 x i32> %v) {
define <8 x i32> @swizzle_2(<8 x i32> %v) {
; CHECK-LABEL: swizzle_2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
; CHECK-NEXT: retq
%1 = shufflevector <8 x i32> %v, <8 x i32> undef, <8 x i32> <i32 6, i32 7, i32 4, i32 5, i32 0, i32 1, i32 2, i32 3>
@@ -34,7 +34,7 @@ define <8 x i32> @swizzle_2(<8 x i32> %v) {
define <8 x i32> @swizzle_3(<8 x i32> %v) {
; CHECK-LABEL: swizzle_3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
; CHECK-NEXT: retq
%1 = shufflevector <8 x i32> %v, <8 x i32> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 2, i32 3, i32 0, i32 1>
@@ -44,7 +44,7 @@ define <8 x i32> @swizzle_3(<8 x i32> %v) {
define <8 x i32> @swizzle_4(<8 x i32> %v) {
; CHECK-LABEL: swizzle_4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} ymm1 = [3,1,2,0,6,5,4,7]
; CHECK-NEXT: vpermps %ymm0, %ymm1, %ymm0
; CHECK-NEXT: retq
@@ -55,7 +55,7 @@ define <8 x i32> @swizzle_4(<8 x i32> %v) {
define <8 x i32> @swizzle_5(<8 x i32> %v) {
; CHECK-LABEL: swizzle_5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} ymm1 = [3,0,1,2,7,6,4,5]
; CHECK-NEXT: vpermps %ymm0, %ymm1, %ymm0
; CHECK-NEXT: retq
@@ -66,7 +66,7 @@ define <8 x i32> @swizzle_5(<8 x i32> %v) {
define <8 x i32> @swizzle_6(<8 x i32> %v) {
; CHECK-LABEL: swizzle_6:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} ymm1 = [3,1,0,2,4,5,6,7]
; CHECK-NEXT: vpermps %ymm0, %ymm1, %ymm0
; CHECK-NEXT: retq
@@ -77,7 +77,7 @@ define <8 x i32> @swizzle_6(<8 x i32> %v) {
define <8 x i32> @swizzle_7(<8 x i32> %v) {
; CHECK-LABEL: swizzle_7:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} ymm1 = [0,2,3,1,4,5,6,7]
; CHECK-NEXT: vpermps %ymm0, %ymm1, %ymm0
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/tbm-intrinsics-fast-isel-x86_64.ll b/test/CodeGen/X86/tbm-intrinsics-fast-isel-x86_64.ll
index f6c49cab71b..72ff630b967 100644
--- a/test/CodeGen/X86/tbm-intrinsics-fast-isel-x86_64.ll
+++ b/test/CodeGen/X86/tbm-intrinsics-fast-isel-x86_64.ll
@@ -5,7 +5,7 @@
define i64 @test__bextri_u64(i64 %a0) {
; X64-LABEL: test__bextri_u64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: bextr $1, %rdi, %rax
; X64-NEXT: retq
%1 = call i64 @llvm.x86.tbm.bextri.u64(i64 %a0, i64 1)
@@ -14,7 +14,7 @@ define i64 @test__bextri_u64(i64 %a0) {
define i64 @test__blcfill_u64(i64 %a0) {
; X64-LABEL: test__blcfill_u64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: leaq 1(%rdi), %rax
; X64-NEXT: andq %rdi, %rax
; X64-NEXT: retq
@@ -25,7 +25,7 @@ define i64 @test__blcfill_u64(i64 %a0) {
define i64 @test__blci_u64(i64 %a0) {
; X64-LABEL: test__blci_u64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: leaq 1(%rdi), %rax
; X64-NEXT: xorq $-1, %rax
; X64-NEXT: orq %rdi, %rax
@@ -38,7 +38,7 @@ define i64 @test__blci_u64(i64 %a0) {
define i64 @test__blcic_u64(i64 %a0) {
; X64-LABEL: test__blcic_u64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: xorq $-1, %rax
; X64-NEXT: addq $1, %rdi
@@ -53,7 +53,7 @@ define i64 @test__blcic_u64(i64 %a0) {
define i64 @test__blcmsk_u64(i64 %a0) {
; X64-LABEL: test__blcmsk_u64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: leaq 1(%rdi), %rax
; X64-NEXT: xorq %rdi, %rax
; X64-NEXT: retq
@@ -64,7 +64,7 @@ define i64 @test__blcmsk_u64(i64 %a0) {
define i64 @test__blcs_u64(i64 %a0) {
; X64-LABEL: test__blcs_u64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: leaq 1(%rdi), %rax
; X64-NEXT: orq %rdi, %rax
; X64-NEXT: retq
@@ -75,7 +75,7 @@ define i64 @test__blcs_u64(i64 %a0) {
define i64 @test__blsfill_u64(i64 %a0) {
; X64-LABEL: test__blsfill_u64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: subq $1, %rax
; X64-NEXT: orq %rdi, %rax
@@ -87,7 +87,7 @@ define i64 @test__blsfill_u64(i64 %a0) {
define i64 @test__blsic_u64(i64 %a0) {
; X64-LABEL: test__blsic_u64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: xorq $-1, %rax
; X64-NEXT: subq $1, %rdi
@@ -102,7 +102,7 @@ define i64 @test__blsic_u64(i64 %a0) {
define i64 @test__t1mskc_u64(i64 %a0) {
; X64-LABEL: test__t1mskc_u64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: xorq $-1, %rax
; X64-NEXT: addq $1, %rdi
@@ -117,7 +117,7 @@ define i64 @test__t1mskc_u64(i64 %a0) {
define i64 @test__tzmsk_u64(i64 %a0) {
; X64-LABEL: test__tzmsk_u64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: xorq $-1, %rax
; X64-NEXT: subq $1, %rdi
diff --git a/test/CodeGen/X86/tbm-intrinsics-fast-isel.ll b/test/CodeGen/X86/tbm-intrinsics-fast-isel.ll
index 7a90ba3f5ec..862c421f63f 100644
--- a/test/CodeGen/X86/tbm-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/tbm-intrinsics-fast-isel.ll
@@ -6,12 +6,12 @@
define i32 @test__bextri_u32(i32 %a0) {
; X32-LABEL: test__bextri_u32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: bextr $1, {{[0-9]+}}(%esp), %eax
; X32-NEXT: retl
;
; X64-LABEL: test__bextri_u32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: bextr $1, %edi, %eax
; X64-NEXT: retq
%1 = call i32 @llvm.x86.tbm.bextri.u32(i32 %a0, i32 1)
@@ -20,14 +20,14 @@ define i32 @test__bextri_u32(i32 %a0) {
define i32 @test__blcfill_u32(i32 %a0) {
; X32-LABEL: test__blcfill_u32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: leal 1(%ecx), %eax
; X32-NEXT: andl %ecx, %eax
; X32-NEXT: retl
;
; X64-LABEL: test__blcfill_u32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal 1(%rdi), %eax
; X64-NEXT: andl %edi, %eax
@@ -39,7 +39,7 @@ define i32 @test__blcfill_u32(i32 %a0) {
define i32 @test__blci_u32(i32 %a0) {
; X32-LABEL: test__blci_u32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: leal 1(%ecx), %eax
; X32-NEXT: xorl $-1, %eax
@@ -47,7 +47,7 @@ define i32 @test__blci_u32(i32 %a0) {
; X32-NEXT: retl
;
; X64-LABEL: test__blci_u32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal 1(%rdi), %eax
; X64-NEXT: xorl $-1, %eax
@@ -61,7 +61,7 @@ define i32 @test__blci_u32(i32 %a0) {
define i32 @test__blcic_u32(i32 %a0) {
; X32-LABEL: test__blcic_u32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, %ecx
; X32-NEXT: xorl $-1, %ecx
@@ -70,7 +70,7 @@ define i32 @test__blcic_u32(i32 %a0) {
; X32-NEXT: retl
;
; X64-LABEL: test__blcic_u32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: xorl $-1, %eax
; X64-NEXT: addl $1, %edi
@@ -85,14 +85,14 @@ define i32 @test__blcic_u32(i32 %a0) {
define i32 @test__blcmsk_u32(i32 %a0) {
; X32-LABEL: test__blcmsk_u32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: leal 1(%ecx), %eax
; X32-NEXT: xorl %ecx, %eax
; X32-NEXT: retl
;
; X64-LABEL: test__blcmsk_u32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal 1(%rdi), %eax
; X64-NEXT: xorl %edi, %eax
@@ -104,14 +104,14 @@ define i32 @test__blcmsk_u32(i32 %a0) {
define i32 @test__blcs_u32(i32 %a0) {
; X32-LABEL: test__blcs_u32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: leal 1(%ecx), %eax
; X32-NEXT: orl %ecx, %eax
; X32-NEXT: retl
;
; X64-LABEL: test__blcs_u32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal 1(%rdi), %eax
; X64-NEXT: orl %edi, %eax
@@ -123,7 +123,7 @@ define i32 @test__blcs_u32(i32 %a0) {
define i32 @test__blsfill_u32(i32 %a0) {
; X32-LABEL: test__blsfill_u32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl %ecx, %eax
; X32-NEXT: subl $1, %eax
@@ -131,7 +131,7 @@ define i32 @test__blsfill_u32(i32 %a0) {
; X32-NEXT: retl
;
; X64-LABEL: test__blsfill_u32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: subl $1, %eax
; X64-NEXT: orl %edi, %eax
@@ -143,7 +143,7 @@ define i32 @test__blsfill_u32(i32 %a0) {
define i32 @test__blsic_u32(i32 %a0) {
; X32-LABEL: test__blsic_u32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, %ecx
; X32-NEXT: xorl $-1, %ecx
@@ -152,7 +152,7 @@ define i32 @test__blsic_u32(i32 %a0) {
; X32-NEXT: retl
;
; X64-LABEL: test__blsic_u32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: xorl $-1, %eax
; X64-NEXT: subl $1, %edi
@@ -167,7 +167,7 @@ define i32 @test__blsic_u32(i32 %a0) {
define i32 @test__t1mskc_u32(i32 %a0) {
; X32-LABEL: test__t1mskc_u32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, %ecx
; X32-NEXT: xorl $-1, %ecx
@@ -176,7 +176,7 @@ define i32 @test__t1mskc_u32(i32 %a0) {
; X32-NEXT: retl
;
; X64-LABEL: test__t1mskc_u32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: xorl $-1, %eax
; X64-NEXT: addl $1, %edi
@@ -191,7 +191,7 @@ define i32 @test__t1mskc_u32(i32 %a0) {
define i32 @test__tzmsk_u32(i32 %a0) {
; X32-LABEL: test__tzmsk_u32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, %ecx
; X32-NEXT: xorl $-1, %ecx
@@ -200,7 +200,7 @@ define i32 @test__tzmsk_u32(i32 %a0) {
; X32-NEXT: retl
;
; X64-LABEL: test__tzmsk_u32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: xorl $-1, %eax
; X64-NEXT: subl $1, %edi
diff --git a/test/CodeGen/X86/tbm-intrinsics-x86_64.ll b/test/CodeGen/X86/tbm-intrinsics-x86_64.ll
index d92e5f0a359..3c2e62276e5 100644
--- a/test/CodeGen/X86/tbm-intrinsics-x86_64.ll
+++ b/test/CodeGen/X86/tbm-intrinsics-x86_64.ll
@@ -3,7 +3,7 @@
define i32 @test_x86_tbm_bextri_u32(i32 %a) nounwind readnone {
; CHECK-LABEL: test_x86_tbm_bextri_u32:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: bextr $2814, %edi, %eax # imm = 0xAFE
; CHECK-NEXT: retq
entry:
@@ -15,7 +15,7 @@ declare i32 @llvm.x86.tbm.bextri.u32(i32, i32) nounwind readnone
define i32 @test_x86_tbm_bextri_u32_m(i32* nocapture %a) nounwind readonly {
; CHECK-LABEL: test_x86_tbm_bextri_u32_m:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: bextr $2814, (%rdi), %eax # imm = 0xAFE
; CHECK-NEXT: retq
entry:
@@ -26,7 +26,7 @@ entry:
define i32 @test_x86_tbm_bextri_u32_z(i32 %a, i32 %b) nounwind readonly {
; CHECK-LABEL: test_x86_tbm_bextri_u32_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: bextr $2814, %edi, %eax # imm = 0xAFE
; CHECK-NEXT: cmovel %esi, %eax
; CHECK-NEXT: retq
@@ -39,7 +39,7 @@ entry:
define i64 @test_x86_tbm_bextri_u64(i64 %a) nounwind readnone {
; CHECK-LABEL: test_x86_tbm_bextri_u64:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: bextr $2814, %rdi, %rax # imm = 0xAFE
; CHECK-NEXT: retq
entry:
@@ -51,7 +51,7 @@ declare i64 @llvm.x86.tbm.bextri.u64(i64, i64) nounwind readnone
define i64 @test_x86_tbm_bextri_u64_m(i64* nocapture %a) nounwind readonly {
; CHECK-LABEL: test_x86_tbm_bextri_u64_m:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: bextr $2814, (%rdi), %rax # imm = 0xAFE
; CHECK-NEXT: retq
entry:
@@ -62,7 +62,7 @@ entry:
define i64 @test_x86_tbm_bextri_u64_z(i64 %a, i64 %b) nounwind readnone {
; CHECK-LABEL: test_x86_tbm_bextri_u64_z:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: bextr $2814, %rdi, %rax # imm = 0xAFE
; CHECK-NEXT: cmoveq %rsi, %rax
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/tbm_patterns.ll b/test/CodeGen/X86/tbm_patterns.ll
index b78f19fa101..b629d2e7f4d 100644
--- a/test/CodeGen/X86/tbm_patterns.ll
+++ b/test/CodeGen/X86/tbm_patterns.ll
@@ -5,7 +5,7 @@
define i32 @test_x86_tbm_bextri_u32(i32 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_bextri_u32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: bextr $3076, %edi, %eax # imm = 0xC04
; CHECK-NEXT: retq
%t0 = lshr i32 %a, 4
@@ -16,7 +16,7 @@ define i32 @test_x86_tbm_bextri_u32(i32 %a) nounwind {
; Make sure we still use AH subreg trick for extracting bits 15:8
define i32 @test_x86_tbm_bextri_u32_subreg(i32 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_bextri_u32_subreg:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: movzbl %ah, %eax # NOREX
; CHECK-NEXT: retq
@@ -27,7 +27,7 @@ define i32 @test_x86_tbm_bextri_u32_subreg(i32 %a) nounwind {
define i32 @test_x86_tbm_bextri_u32_m(i32* nocapture %a) nounwind {
; CHECK-LABEL: test_x86_tbm_bextri_u32_m:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: bextr $3076, (%rdi), %eax # imm = 0xC04
; CHECK-NEXT: retq
%t0 = load i32, i32* %a
@@ -38,7 +38,7 @@ define i32 @test_x86_tbm_bextri_u32_m(i32* nocapture %a) nounwind {
define i32 @test_x86_tbm_bextri_u32_z(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: test_x86_tbm_bextri_u32_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: bextr $3076, %edi, %eax # imm = 0xC04
; CHECK-NEXT: cmovel %esi, %eax
; CHECK-NEXT: retq
@@ -51,7 +51,7 @@ define i32 @test_x86_tbm_bextri_u32_z(i32 %a, i32 %b) nounwind {
define i32 @test_x86_tbm_bextri_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_bextri_u32_z2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: shrl $4, %edi
; CHECK-NEXT: testl $4095, %edi # imm = 0xFFF
; CHECK-NEXT: cmovnel %edx, %esi
@@ -66,7 +66,7 @@ define i32 @test_x86_tbm_bextri_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
define i64 @test_x86_tbm_bextri_u64(i64 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_bextri_u64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: bextr $3076, %edi, %eax # imm = 0xC04
; CHECK-NEXT: retq
%t0 = lshr i64 %a, 4
@@ -77,7 +77,7 @@ define i64 @test_x86_tbm_bextri_u64(i64 %a) nounwind {
; Make sure we still use AH subreg trick for extracting bits 15:8
define i64 @test_x86_tbm_bextri_u64_subreg(i64 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_bextri_u64_subreg:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: movzbl %ah, %eax # NOREX
; CHECK-NEXT: retq
@@ -88,7 +88,7 @@ define i64 @test_x86_tbm_bextri_u64_subreg(i64 %a) nounwind {
define i64 @test_x86_tbm_bextri_u64_m(i64* nocapture %a) nounwind {
; CHECK-LABEL: test_x86_tbm_bextri_u64_m:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: bextr $3076, (%rdi), %eax # imm = 0xC04
; CHECK-NEXT: retq
%t0 = load i64, i64* %a
@@ -99,7 +99,7 @@ define i64 @test_x86_tbm_bextri_u64_m(i64* nocapture %a) nounwind {
define i64 @test_x86_tbm_bextri_u64_z(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: test_x86_tbm_bextri_u64_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: bextr $3076, %edi, %eax # imm = 0xC04
; CHECK-NEXT: cmoveq %rsi, %rax
; CHECK-NEXT: retq
@@ -112,7 +112,7 @@ define i64 @test_x86_tbm_bextri_u64_z(i64 %a, i64 %b) nounwind {
define i64 @test_x86_tbm_bextri_u64_z2(i64 %a, i64 %b, i64 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_bextri_u64_z2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: shrl $4, %edi
; CHECK-NEXT: testl $4095, %edi # imm = 0xFFF
; CHECK-NEXT: cmovneq %rdx, %rsi
@@ -127,7 +127,7 @@ define i64 @test_x86_tbm_bextri_u64_z2(i64 %a, i64 %b, i64 %c) nounwind {
define i32 @test_x86_tbm_blcfill_u32(i32 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_blcfill_u32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blcfill %edi, %eax
; CHECK-NEXT: retq
%t0 = add i32 %a, 1
@@ -137,7 +137,7 @@ define i32 @test_x86_tbm_blcfill_u32(i32 %a) nounwind {
define i32 @test_x86_tbm_blcfill_u32_z(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: test_x86_tbm_blcfill_u32_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blcfill %edi, %eax
; CHECK-NEXT: cmovel %esi, %eax
; CHECK-NEXT: retq
@@ -150,7 +150,7 @@ define i32 @test_x86_tbm_blcfill_u32_z(i32 %a, i32 %b) nounwind {
define i32 @test_x86_tbm_blcfill_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_blcfill_u32_z2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: leal 1(%rdi), %eax
; CHECK-NEXT: testl %edi, %eax
@@ -166,7 +166,7 @@ define i32 @test_x86_tbm_blcfill_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
define i64 @test_x86_tbm_blcfill_u64(i64 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_blcfill_u64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blcfill %rdi, %rax
; CHECK-NEXT: retq
%t0 = add i64 %a, 1
@@ -176,7 +176,7 @@ define i64 @test_x86_tbm_blcfill_u64(i64 %a) nounwind {
define i64 @test_x86_tbm_blcfill_u64_z(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: test_x86_tbm_blcfill_u64_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blcfill %rdi, %rax
; CHECK-NEXT: cmoveq %rsi, %rax
; CHECK-NEXT: retq
@@ -189,7 +189,7 @@ define i64 @test_x86_tbm_blcfill_u64_z(i64 %a, i64 %b) nounwind {
define i64 @test_x86_tbm_blcfill_u64_z2(i64 %a, i64 %b, i64 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_blcfill_u64_z2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: leaq 1(%rdi), %rax
; CHECK-NEXT: testq %rdi, %rax
; CHECK-NEXT: cmovneq %rdx, %rsi
@@ -204,7 +204,7 @@ define i64 @test_x86_tbm_blcfill_u64_z2(i64 %a, i64 %b, i64 %c) nounwind {
define i32 @test_x86_tbm_blci_u32(i32 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_blci_u32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blci %edi, %eax
; CHECK-NEXT: retq
%t0 = add i32 1, %a
@@ -215,7 +215,7 @@ define i32 @test_x86_tbm_blci_u32(i32 %a) nounwind {
define i32 @test_x86_tbm_blci_u32_z(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: test_x86_tbm_blci_u32_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blci %edi, %eax
; CHECK-NEXT: cmovel %esi, %eax
; CHECK-NEXT: retq
@@ -229,7 +229,7 @@ define i32 @test_x86_tbm_blci_u32_z(i32 %a, i32 %b) nounwind {
define i32 @test_x86_tbm_blci_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_blci_u32_z2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: leal 1(%rdi), %eax
; CHECK-NEXT: notl %eax
@@ -247,7 +247,7 @@ define i32 @test_x86_tbm_blci_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
define i64 @test_x86_tbm_blci_u64(i64 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_blci_u64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blci %rdi, %rax
; CHECK-NEXT: retq
%t0 = add i64 1, %a
@@ -258,7 +258,7 @@ define i64 @test_x86_tbm_blci_u64(i64 %a) nounwind {
define i64 @test_x86_tbm_blci_u64_z(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: test_x86_tbm_blci_u64_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blci %rdi, %rax
; CHECK-NEXT: cmoveq %rsi, %rax
; CHECK-NEXT: retq
@@ -272,7 +272,7 @@ define i64 @test_x86_tbm_blci_u64_z(i64 %a, i64 %b) nounwind {
define i64 @test_x86_tbm_blci_u64_z2(i64 %a, i64 %b, i64 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_blci_u64_z2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: leaq 1(%rdi), %rax
; CHECK-NEXT: notq %rax
; CHECK-NEXT: orq %rdi, %rax
@@ -289,7 +289,7 @@ define i64 @test_x86_tbm_blci_u64_z2(i64 %a, i64 %b, i64 %c) nounwind {
define i32 @test_x86_tbm_blci_u32_b(i32 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_blci_u32_b:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blci %edi, %eax
; CHECK-NEXT: retq
%t0 = sub i32 -2, %a
@@ -299,7 +299,7 @@ define i32 @test_x86_tbm_blci_u32_b(i32 %a) nounwind {
define i64 @test_x86_tbm_blci_u64_b(i64 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_blci_u64_b:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blci %rdi, %rax
; CHECK-NEXT: retq
%t0 = sub i64 -2, %a
@@ -309,7 +309,7 @@ define i64 @test_x86_tbm_blci_u64_b(i64 %a) nounwind {
define i32 @test_x86_tbm_blcic_u32(i32 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_blcic_u32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blcic %edi, %eax
; CHECK-NEXT: retq
%t0 = xor i32 %a, -1
@@ -320,7 +320,7 @@ define i32 @test_x86_tbm_blcic_u32(i32 %a) nounwind {
define i32 @test_x86_tbm_blcic_u32_z(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: test_x86_tbm_blcic_u32_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blcic %edi, %eax
; CHECK-NEXT: cmovel %esi, %eax
; CHECK-NEXT: retq
@@ -334,7 +334,7 @@ define i32 @test_x86_tbm_blcic_u32_z(i32 %a, i32 %b) nounwind {
define i32 @test_x86_tbm_blcic_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_blcic_u32_z2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: notl %eax
; CHECK-NEXT: incl %edi
@@ -352,7 +352,7 @@ define i32 @test_x86_tbm_blcic_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
define i64 @test_x86_tbm_blcic_u64(i64 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_blcic_u64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blcic %rdi, %rax
; CHECK-NEXT: retq
%t0 = xor i64 %a, -1
@@ -363,7 +363,7 @@ define i64 @test_x86_tbm_blcic_u64(i64 %a) nounwind {
define i64 @test_x86_tbm_blcic_u64_z(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: test_x86_tbm_blcic_u64_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blcic %rdi, %rax
; CHECK-NEXT: cmoveq %rsi, %rax
; CHECK-NEXT: retq
@@ -377,7 +377,7 @@ define i64 @test_x86_tbm_blcic_u64_z(i64 %a, i64 %b) nounwind {
define i64 @test_x86_tbm_blcic_u64_z2(i64 %a, i64 %b, i64 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_blcic_u64_z2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: notq %rax
; CHECK-NEXT: incq %rdi
@@ -395,7 +395,7 @@ define i64 @test_x86_tbm_blcic_u64_z2(i64 %a, i64 %b, i64 %c) nounwind {
define i32 @test_x86_tbm_blcmsk_u32(i32 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_blcmsk_u32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blcmsk %edi, %eax
; CHECK-NEXT: retq
%t0 = add i32 %a, 1
@@ -405,7 +405,7 @@ define i32 @test_x86_tbm_blcmsk_u32(i32 %a) nounwind {
define i32 @test_x86_tbm_blcmsk_u32_z(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: test_x86_tbm_blcmsk_u32_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blcmsk %edi, %eax
; CHECK-NEXT: cmovel %esi, %eax
; CHECK-NEXT: retq
@@ -418,7 +418,7 @@ define i32 @test_x86_tbm_blcmsk_u32_z(i32 %a, i32 %b) nounwind {
define i32 @test_x86_tbm_blcmsk_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_blcmsk_u32_z2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: leal 1(%rdi), %eax
; CHECK-NEXT: xorl %edi, %eax
@@ -434,7 +434,7 @@ define i32 @test_x86_tbm_blcmsk_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
define i64 @test_x86_tbm_blcmsk_u64(i64 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_blcmsk_u64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blcmsk %rdi, %rax
; CHECK-NEXT: retq
%t0 = add i64 %a, 1
@@ -444,7 +444,7 @@ define i64 @test_x86_tbm_blcmsk_u64(i64 %a) nounwind {
define i64 @test_x86_tbm_blcmsk_u64_z(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: test_x86_tbm_blcmsk_u64_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blcmsk %rdi, %rax
; CHECK-NEXT: cmoveq %rsi, %rax
; CHECK-NEXT: retq
@@ -457,7 +457,7 @@ define i64 @test_x86_tbm_blcmsk_u64_z(i64 %a, i64 %b) nounwind {
define i64 @test_x86_tbm_blcmsk_u64_z2(i64 %a, i64 %b, i64 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_blcmsk_u64_z2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: leaq 1(%rdi), %rax
; CHECK-NEXT: xorq %rdi, %rax
; CHECK-NEXT: cmovneq %rdx, %rsi
@@ -472,7 +472,7 @@ define i64 @test_x86_tbm_blcmsk_u64_z2(i64 %a, i64 %b, i64 %c) nounwind {
define i32 @test_x86_tbm_blcs_u32(i32 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_blcs_u32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blcs %edi, %eax
; CHECK-NEXT: retq
%t0 = add i32 %a, 1
@@ -482,7 +482,7 @@ define i32 @test_x86_tbm_blcs_u32(i32 %a) nounwind {
define i32 @test_x86_tbm_blcs_u32_z(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: test_x86_tbm_blcs_u32_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blcs %edi, %eax
; CHECK-NEXT: cmovel %esi, %eax
; CHECK-NEXT: retq
@@ -495,7 +495,7 @@ define i32 @test_x86_tbm_blcs_u32_z(i32 %a, i32 %b) nounwind {
define i32 @test_x86_tbm_blcs_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_blcs_u32_z2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: leal 1(%rdi), %eax
; CHECK-NEXT: orl %edi, %eax
@@ -511,7 +511,7 @@ define i32 @test_x86_tbm_blcs_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
define i64 @test_x86_tbm_blcs_u64(i64 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_blcs_u64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blcs %rdi, %rax
; CHECK-NEXT: retq
%t0 = add i64 %a, 1
@@ -521,7 +521,7 @@ define i64 @test_x86_tbm_blcs_u64(i64 %a) nounwind {
define i64 @test_x86_tbm_blcs_u64_z(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: test_x86_tbm_blcs_u64_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blcs %rdi, %rax
; CHECK-NEXT: cmoveq %rsi, %rax
; CHECK-NEXT: retq
@@ -534,7 +534,7 @@ define i64 @test_x86_tbm_blcs_u64_z(i64 %a, i64 %b) nounwind {
define i64 @test_x86_tbm_blcs_u64_z2(i64 %a, i64 %b, i64 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_blcs_u64_z2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: leaq 1(%rdi), %rax
; CHECK-NEXT: orq %rdi, %rax
; CHECK-NEXT: cmovneq %rdx, %rsi
@@ -549,7 +549,7 @@ define i64 @test_x86_tbm_blcs_u64_z2(i64 %a, i64 %b, i64 %c) nounwind {
define i32 @test_x86_tbm_blsfill_u32(i32 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_blsfill_u32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blsfill %edi, %eax
; CHECK-NEXT: retq
%t0 = add i32 %a, -1
@@ -559,7 +559,7 @@ define i32 @test_x86_tbm_blsfill_u32(i32 %a) nounwind {
define i32 @test_x86_tbm_blsfill_u32_z(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: test_x86_tbm_blsfill_u32_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blsfill %edi, %eax
; CHECK-NEXT: cmovel %esi, %eax
; CHECK-NEXT: retq
@@ -572,7 +572,7 @@ define i32 @test_x86_tbm_blsfill_u32_z(i32 %a, i32 %b) nounwind {
define i32 @test_x86_tbm_blsfill_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_blsfill_u32_z2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: leal -1(%rdi), %eax
; CHECK-NEXT: orl %edi, %eax
@@ -588,7 +588,7 @@ define i32 @test_x86_tbm_blsfill_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
define i64 @test_x86_tbm_blsfill_u64(i64 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_blsfill_u64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blsfill %rdi, %rax
; CHECK-NEXT: retq
%t0 = add i64 %a, -1
@@ -598,7 +598,7 @@ define i64 @test_x86_tbm_blsfill_u64(i64 %a) nounwind {
define i64 @test_x86_tbm_blsfill_u64_z(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: test_x86_tbm_blsfill_u64_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blsfill %rdi, %rax
; CHECK-NEXT: cmoveq %rsi, %rax
; CHECK-NEXT: retq
@@ -611,7 +611,7 @@ define i64 @test_x86_tbm_blsfill_u64_z(i64 %a, i64 %b) nounwind {
define i64 @test_x86_tbm_blsfill_u64_z2(i64 %a, i64 %b, i64 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_blsfill_u64_z2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: leaq -1(%rdi), %rax
; CHECK-NEXT: orq %rdi, %rax
; CHECK-NEXT: cmovneq %rdx, %rsi
@@ -626,7 +626,7 @@ define i64 @test_x86_tbm_blsfill_u64_z2(i64 %a, i64 %b, i64 %c) nounwind {
define i32 @test_x86_tbm_blsic_u32(i32 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_blsic_u32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blsic %edi, %eax
; CHECK-NEXT: retq
%t0 = xor i32 %a, -1
@@ -637,7 +637,7 @@ define i32 @test_x86_tbm_blsic_u32(i32 %a) nounwind {
define i32 @test_x86_tbm_blsic_u32_z(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: test_x86_tbm_blsic_u32_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blsic %edi, %eax
; CHECK-NEXT: cmovel %esi, %eax
; CHECK-NEXT: retq
@@ -651,7 +651,7 @@ define i32 @test_x86_tbm_blsic_u32_z(i32 %a, i32 %b) nounwind {
define i32 @test_x86_tbm_blsic_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_blsic_u32_z2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: notl %eax
; CHECK-NEXT: decl %edi
@@ -669,7 +669,7 @@ define i32 @test_x86_tbm_blsic_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
define i64 @test_x86_tbm_blsic_u64(i64 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_blsic_u64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blsic %rdi, %rax
; CHECK-NEXT: retq
%t0 = xor i64 %a, -1
@@ -680,7 +680,7 @@ define i64 @test_x86_tbm_blsic_u64(i64 %a) nounwind {
define i64 @test_x86_tbm_blsic_u64_z(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: test_x86_tbm_blsic_u64_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blsic %rdi, %rax
; CHECK-NEXT: cmoveq %rsi, %rax
; CHECK-NEXT: retq
@@ -694,7 +694,7 @@ define i64 @test_x86_tbm_blsic_u64_z(i64 %a, i64 %b) nounwind {
define i64 @test_x86_tbm_blsic_u64_z2(i64 %a, i64 %b, i64 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_blsic_u64_z2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: notq %rax
; CHECK-NEXT: decq %rdi
@@ -712,7 +712,7 @@ define i64 @test_x86_tbm_blsic_u64_z2(i64 %a, i64 %b, i64 %c) nounwind {
define i32 @test_x86_tbm_t1mskc_u32(i32 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_t1mskc_u32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: t1mskc %edi, %eax
; CHECK-NEXT: retq
%t0 = xor i32 %a, -1
@@ -723,7 +723,7 @@ define i32 @test_x86_tbm_t1mskc_u32(i32 %a) nounwind {
define i32 @test_x86_tbm_t1mskc_u32_z(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: test_x86_tbm_t1mskc_u32_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: t1mskc %edi, %eax
; CHECK-NEXT: testl %eax, %eax
; CHECK-NEXT: cmovel %esi, %eax
@@ -738,7 +738,7 @@ define i32 @test_x86_tbm_t1mskc_u32_z(i32 %a, i32 %b) nounwind {
define i32 @test_x86_tbm_t1mskc_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_t1mskc_u32_z2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: notl %eax
; CHECK-NEXT: incl %edi
@@ -756,7 +756,7 @@ define i32 @test_x86_tbm_t1mskc_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
define i64 @test_x86_tbm_t1mskc_u64(i64 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_t1mskc_u64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: t1mskc %rdi, %rax
; CHECK-NEXT: retq
%t0 = xor i64 %a, -1
@@ -767,7 +767,7 @@ define i64 @test_x86_tbm_t1mskc_u64(i64 %a) nounwind {
define i64 @test_x86_tbm_t1mskc_u64_z(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: test_x86_tbm_t1mskc_u64_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: t1mskc %rdi, %rax
; CHECK-NEXT: testq %rax, %rax
; CHECK-NEXT: cmoveq %rsi, %rax
@@ -782,7 +782,7 @@ define i64 @test_x86_tbm_t1mskc_u64_z(i64 %a, i64 %b) nounwind {
define i64 @test_x86_tbm_t1mskc_u64_z2(i64 %a, i64 %b, i64 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_t1mskc_u64_z2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: notq %rax
; CHECK-NEXT: incq %rdi
@@ -800,7 +800,7 @@ define i64 @test_x86_tbm_t1mskc_u64_z2(i64 %a, i64 %b, i64 %c) nounwind {
define i32 @test_x86_tbm_tzmsk_u32(i32 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_tzmsk_u32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: tzmsk %edi, %eax
; CHECK-NEXT: retq
%t0 = xor i32 %a, -1
@@ -811,7 +811,7 @@ define i32 @test_x86_tbm_tzmsk_u32(i32 %a) nounwind {
define i32 @test_x86_tbm_tzmsk_u32_z(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: test_x86_tbm_tzmsk_u32_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: tzmsk %edi, %eax
; CHECK-NEXT: testl %eax, %eax
; CHECK-NEXT: cmovel %esi, %eax
@@ -826,7 +826,7 @@ define i32 @test_x86_tbm_tzmsk_u32_z(i32 %a, i32 %b) nounwind {
define i32 @test_x86_tbm_tzmsk_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_tzmsk_u32_z2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: notl %eax
; CHECK-NEXT: decl %edi
@@ -844,7 +844,7 @@ define i32 @test_x86_tbm_tzmsk_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
define i64 @test_x86_tbm_tzmsk_u64(i64 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_tzmsk_u64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: tzmsk %rdi, %rax
; CHECK-NEXT: retq
%t0 = xor i64 %a, -1
@@ -855,7 +855,7 @@ define i64 @test_x86_tbm_tzmsk_u64(i64 %a) nounwind {
define i64 @test_x86_tbm_tzmsk_u64_z(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: test_x86_tbm_tzmsk_u64_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: tzmsk %rdi, %rax
; CHECK-NEXT: testq %rax, %rax
; CHECK-NEXT: cmoveq %rsi, %rax
@@ -870,7 +870,7 @@ define i64 @test_x86_tbm_tzmsk_u64_z(i64 %a, i64 %b) nounwind {
define i64 @test_x86_tbm_tzmsk_u64_z2(i64 %a, i64 %b, i64 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_tzmsk_u64_z2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: notq %rax
; CHECK-NEXT: decq %rdi
@@ -888,7 +888,7 @@ define i64 @test_x86_tbm_tzmsk_u64_z2(i64 %a, i64 %b, i64 %c) nounwind {
define i64 @test_and_large_constant_mask(i64 %x) {
; CHECK-LABEL: test_and_large_constant_mask:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: bextr $15872, %rdi, %rax # imm = 0x3E00
; CHECK-NEXT: retq
entry:
@@ -898,7 +898,7 @@ entry:
define i64 @test_and_large_constant_mask_load(i64* %x) {
; CHECK-LABEL: test_and_large_constant_mask_load:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: bextr $15872, (%rdi), %rax # imm = 0x3E00
; CHECK-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/tls-pie.ll b/test/CodeGen/X86/tls-pie.ll
index d9bccfcf506..4f5c4f8fed5 100644
--- a/test/CodeGen/X86/tls-pie.ll
+++ b/test/CodeGen/X86/tls-pie.ll
@@ -8,17 +8,17 @@
define i32 @f1() {
; X86-LABEL: f1:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl %gs:i@NTPOFF, %eax
; X86-NEXT: retl
;
; X32-LABEL: f1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl %fs:i@TPOFF, %eax
; X32-NEXT: retq
;
; X64-LABEL: f1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movl %fs:i@TPOFF, %eax
; X64-NEXT: retq
entry:
@@ -28,19 +28,19 @@ entry:
define i32* @f2() {
; X86-LABEL: f2:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl %gs:0, %eax
; X86-NEXT: leal i@NTPOFF(%eax), %eax
; X86-NEXT: retl
;
; X32-LABEL: f2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl %fs:0, %eax
; X32-NEXT: leal i@TPOFF(%rax), %eax
; X32-NEXT: retq
;
; X64-LABEL: f2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq %fs:0, %rax
; X64-NEXT: leaq i@TPOFF(%rax), %rax
; X64-NEXT: retq
@@ -50,7 +50,7 @@ entry:
define i32 @f3() {
; X86-LABEL: f3:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: calll .L2$pb
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: .L2$pb:
@@ -63,13 +63,13 @@ define i32 @f3() {
; X86-NEXT: retl
;
; X32-LABEL: f3:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl i2@{{.*}}(%rip), %eax
; X32-NEXT: movl %fs:(%eax), %eax
; X32-NEXT: retq
;
; X64-LABEL: f3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq i2@{{.*}}(%rip), %rax
; X64-NEXT: movl %fs:(%rax), %eax
; X64-NEXT: retq
@@ -80,7 +80,7 @@ entry:
define i32* @f4() {
; X86-LABEL: f4:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: calll .L3$pb
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: .L3$pb:
@@ -93,13 +93,13 @@ define i32* @f4() {
; X86-NEXT: retl
;
; X32-LABEL: f4:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl %fs:0, %eax
; X32-NEXT: addl i2@{{.*}}(%rip), %eax
; X32-NEXT: retq
;
; X64-LABEL: f4:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq %fs:0, %rax
; X64-NEXT: addq i2@{{.*}}(%rip), %rax
; X64-NEXT: retq
diff --git a/test/CodeGen/X86/tls-shrink-wrapping.ll b/test/CodeGen/X86/tls-shrink-wrapping.ll
index abd7023113c..216bb95f721 100644
--- a/test/CodeGen/X86/tls-shrink-wrapping.ll
+++ b/test/CodeGen/X86/tls-shrink-wrapping.ll
@@ -37,7 +37,7 @@ if.end: ; preds = %if.then, %entry
; CHECK: g: # @g
; CHECK-NEXT: .cfi_startproc
-; CHECK-NEXT: # BB#0: # %entry
+; CHECK-NEXT: # %bb.0: # %entry
; CHECK-NEXT: pushq %rbp
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset %rbp, -16
diff --git a/test/CodeGen/X86/trunc-ext-ld-st.ll b/test/CodeGen/X86/trunc-ext-ld-st.ll
index 889d7185459..f926cfa9111 100644
--- a/test/CodeGen/X86/trunc-ext-ld-st.ll
+++ b/test/CodeGen/X86/trunc-ext-ld-st.ll
@@ -5,7 +5,7 @@
; A single 16-bit load + a single 16-bit store
define void @load_2_i8(<2 x i8>* %A) {
; SSE2-LABEL: load_2_i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movzwl (%rdi), %eax
; SSE2-NEXT: movd %eax, %xmm0
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -21,7 +21,7 @@ define void @load_2_i8(<2 x i8>* %A) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: load_2_i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
; SSE41-NEXT: paddq {{.*}}(%rip), %xmm0
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
@@ -36,7 +36,7 @@ define void @load_2_i8(<2 x i8>* %A) {
; Read 32-bits
define void @load_2_i16(<2 x i16>* %A) {
; SSE2-LABEL: load_2_i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,6,7]
@@ -47,7 +47,7 @@ define void @load_2_i16(<2 x i16>* %A) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: load_2_i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovzxwq {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero
; SSE41-NEXT: paddq {{.*}}(%rip), %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -62,7 +62,7 @@ define void @load_2_i16(<2 x i16>* %A) {
define void @load_2_i32(<2 x i32>* %A) {
; SSE2-LABEL: load_2_i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
; SSE2-NEXT: paddd {{.*}}(%rip), %xmm0
@@ -71,7 +71,7 @@ define void @load_2_i32(<2 x i32>* %A) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: load_2_i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
; SSE41-NEXT: paddd {{.*}}(%rip), %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -85,7 +85,7 @@ define void @load_2_i32(<2 x i32>* %A) {
define void @load_4_i8(<4 x i8>* %A) {
; SSE2-LABEL: load_4_i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
@@ -97,7 +97,7 @@ define void @load_4_i8(<4 x i8>* %A) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: load_4_i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; SSE41-NEXT: paddd {{.*}}(%rip), %xmm0
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
@@ -111,7 +111,7 @@ define void @load_4_i8(<4 x i8>* %A) {
define void @load_4_i16(<4 x i16>* %A) {
; SSE2-LABEL: load_4_i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE2-NEXT: paddw {{.*}}(%rip), %xmm0
@@ -122,7 +122,7 @@ define void @load_4_i16(<4 x i16>* %A) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: load_4_i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; SSE41-NEXT: paddw {{.*}}(%rip), %xmm0
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
@@ -136,7 +136,7 @@ define void @load_4_i16(<4 x i16>* %A) {
define void @load_8_i8(<8 x i8>* %A) {
; SSE2-LABEL: load_8_i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: paddb %xmm0, %xmm0
@@ -146,7 +146,7 @@ define void @load_8_i8(<8 x i8>* %A) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: load_8_i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; SSE41-NEXT: paddb %xmm0, %xmm0
; SSE41-NEXT: packuswb %xmm0, %xmm0
diff --git a/test/CodeGen/X86/trunc-store.ll b/test/CodeGen/X86/trunc-store.ll
index a241876ff41..3da9240fa63 100644
--- a/test/CodeGen/X86/trunc-store.ll
+++ b/test/CodeGen/X86/trunc-store.ll
@@ -28,14 +28,14 @@
define void @fn1() {
; CHECK-LABEL: fn1:
-; CHECK: # BB#0: # %for.cond
+; CHECK: # %bb.0: # %for.cond
; CHECK-NEXT: .p2align 4, 0x90
; CHECK-NEXT: .LBB0_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: movb $0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: cmpq $8, %rax
; CHECK-NEXT: jne .LBB0_1
-; CHECK-NEXT: # BB#2: # %middle.block
+; CHECK-NEXT: # %bb.2: # %middle.block
; CHECK-NEXT: retq
for.cond:
br label %vector.body
diff --git a/test/CodeGen/X86/trunc-to-bool.ll b/test/CodeGen/X86/trunc-to-bool.ll
index 8e253f11e93..d4f2e585283 100644
--- a/test/CodeGen/X86/trunc-to-bool.ll
+++ b/test/CodeGen/X86/trunc-to-bool.ll
@@ -6,7 +6,7 @@
define zeroext i1 @test1(i32 %X) nounwind {
; CHECK-LABEL: test1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movb {{[0-9]+}}(%esp), %al
; CHECK-NEXT: andb $1, %al
; CHECK-NEXT: retl
@@ -16,12 +16,12 @@ define zeroext i1 @test1(i32 %X) nounwind {
define i1 @test2(i32 %val, i32 %mask) nounwind {
; CHECK-LABEL: test2:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: btl %ecx, %eax
; CHECK-NEXT: jae .LBB1_2
-; CHECK-NEXT: # BB#1: # %ret_true
+; CHECK-NEXT: # %bb.1: # %ret_true
; CHECK-NEXT: movb $1, %al
; CHECK-NEXT: retl
; CHECK-NEXT: .LBB1_2: # %ret_false
@@ -40,11 +40,11 @@ ret_false:
define i32 @test3(i8* %ptr) nounwind {
; CHECK-LABEL: test3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: testb $1, (%eax)
; CHECK-NEXT: je .LBB2_2
-; CHECK-NEXT: # BB#1: # %cond_true
+; CHECK-NEXT: # %bb.1: # %cond_true
; CHECK-NEXT: movl $21, %eax
; CHECK-NEXT: retl
; CHECK-NEXT: .LBB2_2: # %cond_false
@@ -61,10 +61,10 @@ cond_false:
define i32 @test4(i8* %ptr) nounwind {
; CHECK-LABEL: test4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: testb $1, {{[0-9]+}}(%esp)
; CHECK-NEXT: je .LBB3_2
-; CHECK-NEXT: # BB#1: # %cond_true
+; CHECK-NEXT: # %bb.1: # %cond_true
; CHECK-NEXT: movl $21, %eax
; CHECK-NEXT: retl
; CHECK-NEXT: .LBB3_2: # %cond_false
@@ -80,7 +80,7 @@ cond_false:
define i32 @test5(double %d) nounwind {
; CHECK-LABEL: test5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pushl %eax
; CHECK-NEXT: fldl {{[0-9]+}}(%esp)
; CHECK-NEXT: fnstcw (%esp)
@@ -92,7 +92,7 @@ define i32 @test5(double %d) nounwind {
; CHECK-NEXT: fldcw (%esp)
; CHECK-NEXT: testb $1, {{[0-9]+}}(%esp)
; CHECK-NEXT: je .LBB4_2
-; CHECK-NEXT: # BB#1: # %cond_true
+; CHECK-NEXT: # %bb.1: # %cond_true
; CHECK-NEXT: movl $21, %eax
; CHECK-NEXT: popl %ecx
; CHECK-NEXT: retl
diff --git a/test/CodeGen/X86/uint64-to-float.ll b/test/CodeGen/X86/uint64-to-float.ll
index 60f9487b466..ac7371fdf1b 100644
--- a/test/CodeGen/X86/uint64-to-float.ll
+++ b/test/CodeGen/X86/uint64-to-float.ll
@@ -8,7 +8,7 @@
define float @test(i64 %a) nounwind {
; X86-LABEL: test:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
@@ -29,10 +29,10 @@ define float @test(i64 %a) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: test:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: testq %rdi, %rdi
; X64-NEXT: js .LBB0_1
-; X64-NEXT: # BB#2: # %entry
+; X64-NEXT: # %bb.2: # %entry
; X64-NEXT: cvtsi2ssq %rdi, %xmm0
; X64-NEXT: retq
; X64-NEXT: .LBB0_1:
diff --git a/test/CodeGen/X86/uint_to_fp-2.ll b/test/CodeGen/X86/uint_to_fp-2.ll
index b06b6c9109f..f925488632f 100644
--- a/test/CodeGen/X86/uint_to_fp-2.ll
+++ b/test/CodeGen/X86/uint_to_fp-2.ll
@@ -4,7 +4,7 @@
; rdar://6504833
define float @test1(i32 %x) nounwind readnone {
; CHECK-LABEL: test1:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushl %eax
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -24,7 +24,7 @@ entry:
; PR10802
define float @test2(<4 x i32> %x) nounwind readnone ssp {
; CHECK-LABEL: test2:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushl %eax
; CHECK-NEXT: xorps %xmm1, %xmm1
; CHECK-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
diff --git a/test/CodeGen/X86/uint_to_fp-3.ll b/test/CodeGen/X86/uint_to_fp-3.ll
index 47f8abfe041..9efd9a5bef5 100644
--- a/test/CodeGen/X86/uint_to_fp-3.ll
+++ b/test/CodeGen/X86/uint_to_fp-3.ll
@@ -8,25 +8,25 @@
define <4 x float> @mask_ucvt_4i32_4f32(<4 x i32> %a) {
; X32-SSE-LABEL: mask_ucvt_4i32_4f32:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: andps {{\.LCPI.*}}, %xmm0
; X32-SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; X32-SSE-NEXT: retl
;
; X32-AVX-LABEL: mask_ucvt_4i32_4f32:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0
; X32-AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
; X32-AVX-NEXT: retl
;
; X64-SSE-LABEL: mask_ucvt_4i32_4f32:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: andps {{.*}}(%rip), %xmm0
; X64-SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: mask_ucvt_4i32_4f32:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
; X64-AVX-NEXT: retq
@@ -37,7 +37,7 @@ define <4 x float> @mask_ucvt_4i32_4f32(<4 x i32> %a) {
define <4 x double> @mask_ucvt_4i32_4f64(<4 x i32> %a) {
; X32-SSE-LABEL: mask_ucvt_4i32_4f64:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
; X32-SSE-NEXT: cvtdq2pd %xmm0, %xmm2
; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -46,13 +46,13 @@ define <4 x double> @mask_ucvt_4i32_4f64(<4 x i32> %a) {
; X32-SSE-NEXT: retl
;
; X32-AVX-LABEL: mask_ucvt_4i32_4f64:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0
; X32-AVX-NEXT: vcvtdq2pd %xmm0, %ymm0
; X32-AVX-NEXT: retl
;
; X64-SSE-LABEL: mask_ucvt_4i32_4f64:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE-NEXT: cvtdq2pd %xmm0, %xmm2
; X64-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -61,7 +61,7 @@ define <4 x double> @mask_ucvt_4i32_4f64(<4 x i32> %a) {
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: mask_ucvt_4i32_4f64:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vcvtdq2pd %xmm0, %ymm0
; X64-AVX-NEXT: retq
diff --git a/test/CodeGen/X86/uint_to_fp.ll b/test/CodeGen/X86/uint_to_fp.ll
index a2784fdcbbd..afc5464fb70 100644
--- a/test/CodeGen/X86/uint_to_fp.ll
+++ b/test/CodeGen/X86/uint_to_fp.ll
@@ -5,7 +5,7 @@
define void @test(i32 %x, float* %y) nounwind {
; X32-LABEL: test:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: shrl $23, %ecx
@@ -14,7 +14,7 @@ define void @test(i32 %x, float* %y) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: shrl $23, %edi
; X64-NEXT: cvtsi2ssl %edi, %xmm0
; X64-NEXT: movss %xmm0, (%rsi)
diff --git a/test/CodeGen/X86/umul-with-overflow.ll b/test/CodeGen/X86/umul-with-overflow.ll
index 2e877a0b6e0..22e1057b803 100644
--- a/test/CodeGen/X86/umul-with-overflow.ll
+++ b/test/CodeGen/X86/umul-with-overflow.ll
@@ -6,7 +6,7 @@ declare {i32, i1} @llvm.umul.with.overflow.i32(i32 %a, i32 %b)
define zeroext i1 @a(i32 %x) nounwind {
; X86-LABEL: a:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl $3, %ecx
; X86-NEXT: mull %ecx
@@ -14,7 +14,7 @@ define zeroext i1 @a(i32 %x) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: a:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl $3, %ecx
; X64-NEXT: movl %edi, %eax
; X64-NEXT: mull %ecx
@@ -27,14 +27,14 @@ define zeroext i1 @a(i32 %x) nounwind {
define i32 @test2(i32 %a, i32 %b) nounwind readnone {
; X86-LABEL: test2:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: addl {{[0-9]+}}(%esp), %eax
; X86-NEXT: addl %eax, %eax
; X86-NEXT: retl
;
; X64-LABEL: test2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: addl %esi, %edi
; X64-NEXT: leal (%rdi,%rdi), %eax
@@ -48,7 +48,7 @@ entry:
define i32 @test3(i32 %a, i32 %b) nounwind readnone {
; X86-LABEL: test3:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: addl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl $4, %ecx
@@ -56,7 +56,7 @@ define i32 @test3(i32 %a, i32 %b) nounwind readnone {
; X86-NEXT: retl
;
; X64-LABEL: test3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (%rdi,%rsi), %eax
diff --git a/test/CodeGen/X86/unaligned-32-byte-memops.ll b/test/CodeGen/X86/unaligned-32-byte-memops.ll
index 391f7a38a37..c7825400910 100644
--- a/test/CodeGen/X86/unaligned-32-byte-memops.ll
+++ b/test/CodeGen/X86/unaligned-32-byte-memops.ll
@@ -7,18 +7,18 @@
define <8 x float> @load32bytes(<8 x float>* %Ap) {
; AVXSLOW-LABEL: load32bytes:
-; AVXSLOW: # BB#0:
+; AVXSLOW: # %bb.0:
; AVXSLOW-NEXT: vmovaps (%rdi), %xmm0
; AVXSLOW-NEXT: vinsertf128 $1, 16(%rdi), %ymm0, %ymm0
; AVXSLOW-NEXT: retq
;
; AVXFAST-LABEL: load32bytes:
-; AVXFAST: # BB#0:
+; AVXFAST: # %bb.0:
; AVXFAST-NEXT: vmovups (%rdi), %ymm0
; AVXFAST-NEXT: retq
;
; AVX2-LABEL: load32bytes:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovups (%rdi), %ymm0
; AVX2-NEXT: retq
%A = load <8 x float>, <8 x float>* %Ap, align 16
@@ -29,20 +29,20 @@ define <8 x float> @load32bytes(<8 x float>* %Ap) {
define void @store32bytes(<8 x float> %A, <8 x float>* %P) {
; AVXSLOW-LABEL: store32bytes:
-; AVXSLOW: # BB#0:
+; AVXSLOW: # %bb.0:
; AVXSLOW-NEXT: vextractf128 $1, %ymm0, 16(%rdi)
; AVXSLOW-NEXT: vmovaps %xmm0, (%rdi)
; AVXSLOW-NEXT: vzeroupper
; AVXSLOW-NEXT: retq
;
; AVXFAST-LABEL: store32bytes:
-; AVXFAST: # BB#0:
+; AVXFAST: # %bb.0:
; AVXFAST-NEXT: vmovups %ymm0, (%rdi)
; AVXFAST-NEXT: vzeroupper
; AVXFAST-NEXT: retq
;
; AVX2-LABEL: store32bytes:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovups %ymm0, (%rdi)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
@@ -54,18 +54,18 @@ define void @store32bytes(<8 x float> %A, <8 x float>* %P) {
define <8 x float> @combine_16_byte_loads_no_intrinsic(<4 x float>* %ptr) {
; AVXSLOW-LABEL: combine_16_byte_loads_no_intrinsic:
-; AVXSLOW: # BB#0:
+; AVXSLOW: # %bb.0:
; AVXSLOW-NEXT: vmovups 48(%rdi), %xmm0
; AVXSLOW-NEXT: vinsertf128 $1, 64(%rdi), %ymm0, %ymm0
; AVXSLOW-NEXT: retq
;
; AVXFAST-LABEL: combine_16_byte_loads_no_intrinsic:
-; AVXFAST: # BB#0:
+; AVXFAST: # %bb.0:
; AVXFAST-NEXT: vmovups 48(%rdi), %ymm0
; AVXFAST-NEXT: retq
;
; AVX2-LABEL: combine_16_byte_loads_no_intrinsic:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovups 48(%rdi), %ymm0
; AVX2-NEXT: retq
%ptr1 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 3
@@ -80,17 +80,17 @@ define <8 x float> @combine_16_byte_loads_no_intrinsic(<4 x float>* %ptr) {
define <8 x float> @combine_16_byte_loads_aligned(<4 x float>* %ptr) {
; AVXSLOW-LABEL: combine_16_byte_loads_aligned:
-; AVXSLOW: # BB#0:
+; AVXSLOW: # %bb.0:
; AVXSLOW-NEXT: vmovaps 48(%rdi), %ymm0
; AVXSLOW-NEXT: retq
;
; AVXFAST-LABEL: combine_16_byte_loads_aligned:
-; AVXFAST: # BB#0:
+; AVXFAST: # %bb.0:
; AVXFAST-NEXT: vmovaps 48(%rdi), %ymm0
; AVXFAST-NEXT: retq
;
; AVX2-LABEL: combine_16_byte_loads_aligned:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovaps 48(%rdi), %ymm0
; AVX2-NEXT: retq
%ptr1 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 3
@@ -105,18 +105,18 @@ define <8 x float> @combine_16_byte_loads_aligned(<4 x float>* %ptr) {
define <8 x float> @combine_16_byte_loads_no_intrinsic_swap(<4 x float>* %ptr) {
; AVXSLOW-LABEL: combine_16_byte_loads_no_intrinsic_swap:
-; AVXSLOW: # BB#0:
+; AVXSLOW: # %bb.0:
; AVXSLOW-NEXT: vmovups 64(%rdi), %xmm0
; AVXSLOW-NEXT: vinsertf128 $1, 80(%rdi), %ymm0, %ymm0
; AVXSLOW-NEXT: retq
;
; AVXFAST-LABEL: combine_16_byte_loads_no_intrinsic_swap:
-; AVXFAST: # BB#0:
+; AVXFAST: # %bb.0:
; AVXFAST-NEXT: vmovups 64(%rdi), %ymm0
; AVXFAST-NEXT: retq
;
; AVX2-LABEL: combine_16_byte_loads_no_intrinsic_swap:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovups 64(%rdi), %ymm0
; AVX2-NEXT: retq
%ptr1 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 4
@@ -133,7 +133,7 @@ define <8 x float> @combine_16_byte_loads_no_intrinsic_swap(<4 x float>* %ptr) {
define <4 x i64> @combine_16_byte_loads_i64(<2 x i64>* %ptr, <4 x i64> %x) {
; AVXSLOW-LABEL: combine_16_byte_loads_i64:
-; AVXSLOW: # BB#0:
+; AVXSLOW: # %bb.0:
; AVXSLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVXSLOW-NEXT: vpaddq 96(%rdi), %xmm1, %xmm1
; AVXSLOW-NEXT: vpaddq 80(%rdi), %xmm0, %xmm0
@@ -141,7 +141,7 @@ define <4 x i64> @combine_16_byte_loads_i64(<2 x i64>* %ptr, <4 x i64> %x) {
; AVXSLOW-NEXT: retq
;
; AVXFAST-LABEL: combine_16_byte_loads_i64:
-; AVXFAST: # BB#0:
+; AVXFAST: # %bb.0:
; AVXFAST-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVXFAST-NEXT: vpaddq 96(%rdi), %xmm1, %xmm1
; AVXFAST-NEXT: vpaddq 80(%rdi), %xmm0, %xmm0
@@ -149,7 +149,7 @@ define <4 x i64> @combine_16_byte_loads_i64(<2 x i64>* %ptr, <4 x i64> %x) {
; AVXFAST-NEXT: retq
;
; AVX2-LABEL: combine_16_byte_loads_i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpaddq 80(%rdi), %ymm0, %ymm0
; AVX2-NEXT: retq
%ptr1 = getelementptr inbounds <2 x i64>, <2 x i64>* %ptr, i64 5
@@ -163,7 +163,7 @@ define <4 x i64> @combine_16_byte_loads_i64(<2 x i64>* %ptr, <4 x i64> %x) {
define <8 x i32> @combine_16_byte_loads_i32(<4 x i32>* %ptr, <8 x i32> %x) {
; AVXSLOW-LABEL: combine_16_byte_loads_i32:
-; AVXSLOW: # BB#0:
+; AVXSLOW: # %bb.0:
; AVXSLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVXSLOW-NEXT: vpaddd 112(%rdi), %xmm1, %xmm1
; AVXSLOW-NEXT: vpaddd 96(%rdi), %xmm0, %xmm0
@@ -171,7 +171,7 @@ define <8 x i32> @combine_16_byte_loads_i32(<4 x i32>* %ptr, <8 x i32> %x) {
; AVXSLOW-NEXT: retq
;
; AVXFAST-LABEL: combine_16_byte_loads_i32:
-; AVXFAST: # BB#0:
+; AVXFAST: # %bb.0:
; AVXFAST-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVXFAST-NEXT: vpaddd 112(%rdi), %xmm1, %xmm1
; AVXFAST-NEXT: vpaddd 96(%rdi), %xmm0, %xmm0
@@ -179,7 +179,7 @@ define <8 x i32> @combine_16_byte_loads_i32(<4 x i32>* %ptr, <8 x i32> %x) {
; AVXFAST-NEXT: retq
;
; AVX2-LABEL: combine_16_byte_loads_i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpaddd 96(%rdi), %ymm0, %ymm0
; AVX2-NEXT: retq
%ptr1 = getelementptr inbounds <4 x i32>, <4 x i32>* %ptr, i64 6
@@ -193,7 +193,7 @@ define <8 x i32> @combine_16_byte_loads_i32(<4 x i32>* %ptr, <8 x i32> %x) {
define <16 x i16> @combine_16_byte_loads_i16(<8 x i16>* %ptr, <16 x i16> %x) {
; AVXSLOW-LABEL: combine_16_byte_loads_i16:
-; AVXSLOW: # BB#0:
+; AVXSLOW: # %bb.0:
; AVXSLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVXSLOW-NEXT: vpaddw 128(%rdi), %xmm1, %xmm1
; AVXSLOW-NEXT: vpaddw 112(%rdi), %xmm0, %xmm0
@@ -201,7 +201,7 @@ define <16 x i16> @combine_16_byte_loads_i16(<8 x i16>* %ptr, <16 x i16> %x) {
; AVXSLOW-NEXT: retq
;
; AVXFAST-LABEL: combine_16_byte_loads_i16:
-; AVXFAST: # BB#0:
+; AVXFAST: # %bb.0:
; AVXFAST-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVXFAST-NEXT: vpaddw 128(%rdi), %xmm1, %xmm1
; AVXFAST-NEXT: vpaddw 112(%rdi), %xmm0, %xmm0
@@ -209,7 +209,7 @@ define <16 x i16> @combine_16_byte_loads_i16(<8 x i16>* %ptr, <16 x i16> %x) {
; AVXFAST-NEXT: retq
;
; AVX2-LABEL: combine_16_byte_loads_i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpaddw 112(%rdi), %ymm0, %ymm0
; AVX2-NEXT: retq
%ptr1 = getelementptr inbounds <8 x i16>, <8 x i16>* %ptr, i64 7
@@ -223,7 +223,7 @@ define <16 x i16> @combine_16_byte_loads_i16(<8 x i16>* %ptr, <16 x i16> %x) {
define <32 x i8> @combine_16_byte_loads_i8(<16 x i8>* %ptr, <32 x i8> %x) {
; AVXSLOW-LABEL: combine_16_byte_loads_i8:
-; AVXSLOW: # BB#0:
+; AVXSLOW: # %bb.0:
; AVXSLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVXSLOW-NEXT: vpaddb 144(%rdi), %xmm1, %xmm1
; AVXSLOW-NEXT: vpaddb 128(%rdi), %xmm0, %xmm0
@@ -231,7 +231,7 @@ define <32 x i8> @combine_16_byte_loads_i8(<16 x i8>* %ptr, <32 x i8> %x) {
; AVXSLOW-NEXT: retq
;
; AVXFAST-LABEL: combine_16_byte_loads_i8:
-; AVXFAST: # BB#0:
+; AVXFAST: # %bb.0:
; AVXFAST-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVXFAST-NEXT: vpaddb 144(%rdi), %xmm1, %xmm1
; AVXFAST-NEXT: vpaddb 128(%rdi), %xmm0, %xmm0
@@ -239,7 +239,7 @@ define <32 x i8> @combine_16_byte_loads_i8(<16 x i8>* %ptr, <32 x i8> %x) {
; AVXFAST-NEXT: retq
;
; AVX2-LABEL: combine_16_byte_loads_i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpaddb 128(%rdi), %ymm0, %ymm0
; AVX2-NEXT: retq
%ptr1 = getelementptr inbounds <16 x i8>, <16 x i8>* %ptr, i64 8
@@ -253,19 +253,19 @@ define <32 x i8> @combine_16_byte_loads_i8(<16 x i8>* %ptr, <32 x i8> %x) {
define <4 x double> @combine_16_byte_loads_double(<2 x double>* %ptr, <4 x double> %x) {
; AVXSLOW-LABEL: combine_16_byte_loads_double:
-; AVXSLOW: # BB#0:
+; AVXSLOW: # %bb.0:
; AVXSLOW-NEXT: vmovups 144(%rdi), %xmm1
; AVXSLOW-NEXT: vinsertf128 $1, 160(%rdi), %ymm1, %ymm1
; AVXSLOW-NEXT: vaddpd %ymm0, %ymm1, %ymm0
; AVXSLOW-NEXT: retq
;
; AVXFAST-LABEL: combine_16_byte_loads_double:
-; AVXFAST: # BB#0:
+; AVXFAST: # %bb.0:
; AVXFAST-NEXT: vaddpd 144(%rdi), %ymm0, %ymm0
; AVXFAST-NEXT: retq
;
; AVX2-LABEL: combine_16_byte_loads_double:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vaddpd 144(%rdi), %ymm0, %ymm0
; AVX2-NEXT: retq
%ptr1 = getelementptr inbounds <2 x double>, <2 x double>* %ptr, i64 9
diff --git a/test/CodeGen/X86/urem-i8-constant.ll b/test/CodeGen/X86/urem-i8-constant.ll
index a9cb99c0d35..7405a48de78 100644
--- a/test/CodeGen/X86/urem-i8-constant.ll
+++ b/test/CodeGen/X86/urem-i8-constant.ll
@@ -5,7 +5,7 @@
define i8 @foo(i8 %tmp325) {
; CHECK-LABEL: foo:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: imull $111, %ecx, %eax
; CHECK-NEXT: shrl $12, %eax
diff --git a/test/CodeGen/X86/urem-power-of-two.ll b/test/CodeGen/X86/urem-power-of-two.ll
index 9509cfe1231..8dc5e5338aa 100644
--- a/test/CodeGen/X86/urem-power-of-two.ll
+++ b/test/CodeGen/X86/urem-power-of-two.ll
@@ -6,14 +6,14 @@
define i64 @const_pow_2(i64 %x) {
; X86-LABEL: const_pow_2:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: andl $31, %eax
; X86-NEXT: xorl %edx, %edx
; X86-NEXT: retl
;
; X64-LABEL: const_pow_2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andl $31, %edi
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: retq
@@ -25,7 +25,7 @@ define i64 @const_pow_2(i64 %x) {
define i25 @shift_left_pow_2(i25 %x, i25 %y) {
; X86-LABEL: shift_left_pow_2:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
; X86-NEXT: movl $1, %eax
; X86-NEXT: shll %cl, %eax
@@ -34,7 +34,7 @@ define i25 @shift_left_pow_2(i25 %x, i25 %y) {
; X86-NEXT: retl
;
; X64-LABEL: shift_left_pow_2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl $1, %eax
; X64-NEXT: movl %esi, %ecx
; X64-NEXT: shll %cl, %eax
@@ -50,7 +50,7 @@ define i25 @shift_left_pow_2(i25 %x, i25 %y) {
define i16 @shift_right_pow_2(i16 %x, i16 %y) {
; X86-LABEL: shift_right_pow_2:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
; X86-NEXT: movl $32768, %eax # imm = 0x8000
; X86-NEXT: shrl %cl, %eax
@@ -60,7 +60,7 @@ define i16 @shift_right_pow_2(i16 %x, i16 %y) {
; X86-NEXT: retl
;
; X64-LABEL: shift_right_pow_2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl $32768, %eax # imm = 0x8000
; X64-NEXT: movl %esi, %ecx
; X64-NEXT: shrl %cl, %eax
@@ -77,7 +77,7 @@ define i16 @shift_right_pow_2(i16 %x, i16 %y) {
define i8 @and_pow_2(i8 %x, i8 %y) {
; X86-LABEL: and_pow_2:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
; X86-NEXT: andb $4, %cl
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
@@ -88,7 +88,7 @@ define i8 @and_pow_2(i8 %x, i8 %y) {
; X86-NEXT: retl
;
; X64-LABEL: and_pow_2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andb $4, %sil
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
@@ -105,12 +105,12 @@ define i8 @and_pow_2(i8 %x, i8 %y) {
define <4 x i32> @vec_const_uniform_pow_2(<4 x i32> %x) {
; X86-LABEL: vec_const_uniform_pow_2:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: andps {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: vec_const_uniform_pow_2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andps {{.*}}(%rip), %xmm0
; X64-NEXT: retq
%urem = urem <4 x i32> %x, <i32 16, i32 16, i32 16, i32 16>
@@ -119,12 +119,12 @@ define <4 x i32> @vec_const_uniform_pow_2(<4 x i32> %x) {
define <4 x i32> @vec_const_nonuniform_pow_2(<4 x i32> %x) {
; X86-LABEL: vec_const_nonuniform_pow_2:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: andps {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: vec_const_nonuniform_pow_2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andps {{.*}}(%rip), %xmm0
; X64-NEXT: retq
%urem = urem <4 x i32> %x, <i32 2, i32 4, i32 8, i32 16>
diff --git a/test/CodeGen/X86/use-add-flags.ll b/test/CodeGen/X86/use-add-flags.ll
index aadb1b8b6fd..37baef9fb2c 100644
--- a/test/CodeGen/X86/use-add-flags.ll
+++ b/test/CodeGen/X86/use-add-flags.ll
@@ -9,14 +9,14 @@
define i32 @test1(i32* %x, i32 %y, i32 %a, i32 %b) nounwind {
; LNX-LABEL: test1:
-; LNX: # BB#0:
+; LNX: # %bb.0:
; LNX-NEXT: addl (%rdi), %esi
; LNX-NEXT: cmovnsl %ecx, %edx
; LNX-NEXT: movl %edx, %eax
; LNX-NEXT: retq
;
; WIN-LABEL: test1:
-; WIN: # BB#0:
+; WIN: # %bb.0:
; WIN-NEXT: addl (%rcx), %edx
; WIN-NEXT: cmovnsl %r9d, %r8d
; WIN-NEXT: movl %r8d, %eax
@@ -35,10 +35,10 @@ declare void @foo(i32)
define void @test2(i32 %x) nounwind {
; LNX-LABEL: test2:
-; LNX: # BB#0:
+; LNX: # %bb.0:
; LNX-NEXT: testb $16, %dil
; LNX-NEXT: jne .LBB1_2
-; LNX-NEXT: # BB#1: # %true
+; LNX-NEXT: # %bb.1: # %true
; LNX-NEXT: pushq %rax
; LNX-NEXT: callq foo
; LNX-NEXT: popq %rax
@@ -46,11 +46,11 @@ define void @test2(i32 %x) nounwind {
; LNX-NEXT: retq
;
; WIN-LABEL: test2:
-; WIN: # BB#0:
+; WIN: # %bb.0:
; WIN-NEXT: subq $40, %rsp
; WIN-NEXT: testb $16, %cl
; WIN-NEXT: jne .LBB1_2
-; WIN-NEXT: # BB#1: # %true
+; WIN-NEXT: # %bb.1: # %true
; WIN-NEXT: callq foo
; WIN-NEXT: .LBB1_2: # %false
; WIN-NEXT: addq $40, %rsp
@@ -69,10 +69,10 @@ false:
define void @test3(i32 %x) nounwind {
; LNX-LABEL: test3:
-; LNX: # BB#0:
+; LNX: # %bb.0:
; LNX-NEXT: andl $16, %edi
; LNX-NEXT: jne .LBB2_2
-; LNX-NEXT: # BB#1: # %true
+; LNX-NEXT: # %bb.1: # %true
; LNX-NEXT: pushq %rax
; LNX-NEXT: callq foo
; LNX-NEXT: popq %rax
@@ -80,11 +80,11 @@ define void @test3(i32 %x) nounwind {
; LNX-NEXT: retq
;
; WIN-LABEL: test3:
-; WIN: # BB#0:
+; WIN: # %bb.0:
; WIN-NEXT: subq $40, %rsp
; WIN-NEXT: andl $16, %ecx
; WIN-NEXT: jne .LBB2_2
-; WIN-NEXT: # BB#1: # %true
+; WIN-NEXT: # %bb.1: # %true
; WIN-NEXT: callq foo
; WIN-NEXT: .LBB2_2: # %false
; WIN-NEXT: addq $40, %rsp
diff --git a/test/CodeGen/X86/v2f32.ll b/test/CodeGen/X86/v2f32.ll
index 2fb46edc5c8..cabefa46c50 100644
--- a/test/CodeGen/X86/v2f32.ll
+++ b/test/CodeGen/X86/v2f32.ll
@@ -5,14 +5,14 @@
; PR7518
define void @test1(<2 x float> %Q, float *%P2) nounwind {
; X64-LABEL: test1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; X64-NEXT: addss %xmm0, %xmm1
; X64-NEXT: movss %xmm1, (%rdi)
; X64-NEXT: retq
;
; X32-LABEL: test1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; X32-NEXT: addss %xmm0, %xmm1
@@ -27,12 +27,12 @@ define void @test1(<2 x float> %Q, float *%P2) nounwind {
define <2 x float> @test2(<2 x float> %Q, <2 x float> %R, <2 x float> *%P) nounwind {
; X64-LABEL: test2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: addps %xmm1, %xmm0
; X64-NEXT: retq
;
; X32-LABEL: test2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: addps %xmm1, %xmm0
; X32-NEXT: retl
%Z = fadd <2 x float> %Q, %R
@@ -41,12 +41,12 @@ define <2 x float> @test2(<2 x float> %Q, <2 x float> %R, <2 x float> *%P) nounw
define <2 x float> @test3(<4 x float> %A) nounwind {
; X64-LABEL: test3:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: addps %xmm0, %xmm0
; X64-NEXT: retq
;
; X32-LABEL: test3:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: addps %xmm0, %xmm0
; X32-NEXT: retl
%B = shufflevector <4 x float> %A, <4 x float> undef, <2 x i32> <i32 0, i32 1>
@@ -56,12 +56,12 @@ define <2 x float> @test3(<4 x float> %A) nounwind {
define <2 x float> @test4(<2 x float> %A) nounwind {
; X64-LABEL: test4:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: addps %xmm0, %xmm0
; X64-NEXT: retq
;
; X32-LABEL: test4:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: addps %xmm0, %xmm0
; X32-NEXT: retl
%C = fadd <2 x float> %A, %A
@@ -70,13 +70,13 @@ define <2 x float> @test4(<2 x float> %A) nounwind {
define <4 x float> @test5(<4 x float> %A) nounwind {
; X64-LABEL: test5:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: addps %xmm0, %xmm0
; X64-NEXT: addps %xmm0, %xmm0
; X64-NEXT: retq
;
; X32-LABEL: test5:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: addps %xmm0, %xmm0
; X32-NEXT: addps %xmm0, %xmm0
; X32-NEXT: retl
diff --git a/test/CodeGen/X86/v4f32-immediate.ll b/test/CodeGen/X86/v4f32-immediate.ll
index 7945b1093f8..cc73cd5a63a 100644
--- a/test/CodeGen/X86/v4f32-immediate.ll
+++ b/test/CodeGen/X86/v4f32-immediate.ll
@@ -4,12 +4,12 @@
define <4 x float> @foo() {
; X32-LABEL: foo:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movaps {{.*#+}} xmm0 = [3.223542e+00,2.300000e+00,1.200000e+00,1.000000e-01]
; X32-NEXT: retl
;
; X64-LABEL: foo:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps {{.*#+}} xmm0 = [3.223542e+00,2.300000e+00,1.200000e+00,1.000000e-01]
; X64-NEXT: retq
ret <4 x float> <float 0x4009C9D0A0000000, float 0x4002666660000000, float 0x3FF3333340000000, float 0x3FB99999A0000000>
diff --git a/test/CodeGen/X86/v8i1-masks.ll b/test/CodeGen/X86/v8i1-masks.ll
index e378cf33dea..5175850c734 100644
--- a/test/CodeGen/X86/v8i1-masks.ll
+++ b/test/CodeGen/X86/v8i1-masks.ll
@@ -4,7 +4,7 @@
define void @and_masks(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwind uwtable noinline ssp {
; X32-LABEL: and_masks:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -20,7 +20,7 @@ define void @and_masks(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwi
; X32-NEXT: retl
;
; X64-LABEL: and_masks:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vmovups (%rdi), %ymm0
; X64-NEXT: vmovups (%rsi), %ymm1
; X64-NEXT: vcmpltps %ymm0, %ymm1, %ymm1
@@ -44,7 +44,7 @@ define void @and_masks(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwi
define void @neg_masks(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwind uwtable noinline ssp {
; X32-LABEL: neg_masks:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovups (%ecx), %ymm0
@@ -55,7 +55,7 @@ define void @neg_masks(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwi
; X32-NEXT: retl
;
; X64-LABEL: neg_masks:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vmovups (%rsi), %ymm0
; X64-NEXT: vcmpnltps (%rdi), %ymm0, %ymm0
; X64-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
diff --git a/test/CodeGen/X86/vaargs.ll b/test/CodeGen/X86/vaargs.ll
index 3767f41c2aa..7d27684c51c 100644
--- a/test/CodeGen/X86/vaargs.ll
+++ b/test/CodeGen/X86/vaargs.ll
@@ -8,7 +8,7 @@ target triple = "x86_64-apple-macosx10.9.0"
define i32 @sum(i32 %count, ...) nounwind optsize ssp uwtable {
; CHECK: testb %al, %al
; CHECK-NEXT: je
-; CHECK-NEXT: ## BB#{{[0-9]+}}:
+; CHECK-NEXT: ## %bb.{{[0-9]+}}:
; CHECK-NEXT: vmovaps %xmm0, 48(%rsp)
; CHECK-NEXT: vmovaps %xmm1, 64(%rsp)
; CHECK-NEXT: vmovaps %xmm2, 80(%rsp)
diff --git a/test/CodeGen/X86/vaes-intrinsics-avx-x86.ll b/test/CodeGen/X86/vaes-intrinsics-avx-x86.ll
index cc5915df4f0..06acb27218e 100644
--- a/test/CodeGen/X86/vaes-intrinsics-avx-x86.ll
+++ b/test/CodeGen/X86/vaes-intrinsics-avx-x86.ll
@@ -4,7 +4,7 @@
; {vaes, avx}
define <4 x i64> @test_x86_aesni_aesenc_256(<4 x i64> %a0, <4 x i64> %a1) {
; VAES_AVX-LABEL: test_x86_aesni_aesenc_256:
-; VAES_AVX: # BB#0:
+; VAES_AVX: # %bb.0:
; VAES_AVX-NEXT: vaesenc %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0xdc,0xc1]
; VAES_AVX-NEXT: retl # encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.aesni.aesenc.256(<4 x i64> %a0, <4 x i64> %a1)
diff --git a/test/CodeGen/X86/vaes-intrinsics-avx512-x86.ll b/test/CodeGen/X86/vaes-intrinsics-avx512-x86.ll
index dd451b80430..b36400df16d 100644
--- a/test/CodeGen/X86/vaes-intrinsics-avx512-x86.ll
+++ b/test/CodeGen/X86/vaes-intrinsics-avx512-x86.ll
@@ -2,7 +2,7 @@
define <8 x i64> @test_x86_aesni_aesenc_512(<8 x i64> %a0, <8 x i64> %a1) {
; VAES_AVX512-LABEL: test_x86_aesni_aesenc_512:
-; VAES_AVX512: # BB#0:
+; VAES_AVX512: # %bb.0:
; VAES_AVX512-NEXT: vaesenc %zmm1, %zmm0, %zmm0 # encoding: [0x62,0xf2,0x7d,0x48,0xdc,0xc1]
; VAES_AVX512-NEXT: retq # encoding: [0xc3]
%res = call <8 x i64> @llvm.x86.aesni.aesenc.512(<8 x i64> %a0, <8 x i64> %a1)
@@ -12,7 +12,7 @@ declare <8 x i64> @llvm.x86.aesni.aesenc.512(<8 x i64>, <8 x i64>) nounwind read
define <8 x i64> @test_x86_aesni_aesenclast_512(<8 x i64> %a0, <8 x i64> %a1) {
; VAES_AVX512-LABEL: test_x86_aesni_aesenclast_512:
-; VAES_AVX512: # BB#0:
+; VAES_AVX512: # %bb.0:
; VAES_AVX512-NEXT: vaesenclast %zmm1, %zmm0, %zmm0 # encoding: [0x62,0xf2,0x7d,0x48,0xdd,0xc1]
; VAES_AVX512-NEXT: retq # encoding: [0xc3]
%res = call <8 x i64> @llvm.x86.aesni.aesenclast.512(<8 x i64> %a0, <8 x i64> %a1)
@@ -22,7 +22,7 @@ declare <8 x i64> @llvm.x86.aesni.aesenclast.512(<8 x i64>, <8 x i64>) nounwind
define <8 x i64> @test_x86_aesni_aesdec_512(<8 x i64> %a0, <8 x i64> %a1) {
; VAES_AVX512-LABEL: test_x86_aesni_aesdec_512:
-; VAES_AVX512: # BB#0:
+; VAES_AVX512: # %bb.0:
; VAES_AVX512-NEXT: vaesdec %zmm1, %zmm0, %zmm0 # encoding: [0x62,0xf2,0x7d,0x48,0xde,0xc1]
; VAES_AVX512-NEXT: retq # encoding: [0xc3]
%res = call <8 x i64> @llvm.x86.aesni.aesdec.512(<8 x i64> %a0, <8 x i64> %a1)
@@ -32,7 +32,7 @@ declare <8 x i64> @llvm.x86.aesni.aesdec.512(<8 x i64>, <8 x i64>) nounwind read
define <8 x i64> @test_x86_aesni_aesdeclast_512(<8 x i64> %a0, <8 x i64> %a1) {
; VAES_AVX512-LABEL: test_x86_aesni_aesdeclast_512:
-; VAES_AVX512: # BB#0:
+; VAES_AVX512: # %bb.0:
; VAES_AVX512-NEXT: vaesdeclast %zmm1, %zmm0, %zmm0 # encoding: [0x62,0xf2,0x7d,0x48,0xdf,0xc1]
; VAES_AVX512-NEXT: retq # encoding: [0xc3]
%res = call <8 x i64> @llvm.x86.aesni.aesdeclast.512(<8 x i64> %a0, <8 x i64> %a1)
diff --git a/test/CodeGen/X86/vaes-intrinsics-avx512vl-x86.ll b/test/CodeGen/X86/vaes-intrinsics-avx512vl-x86.ll
index 29285bfbeaf..79b3b7bfba5 100644
--- a/test/CodeGen/X86/vaes-intrinsics-avx512vl-x86.ll
+++ b/test/CodeGen/X86/vaes-intrinsics-avx512vl-x86.ll
@@ -2,7 +2,7 @@
define <2 x i64> @test_x86_aesni_aesenc(<2 x i64> %a0, <2 x i64> %a1) {
; VAES_AVX512VL-LABEL: test_x86_aesni_aesenc:
-; VAES_AVX512VL: # BB#0:
+; VAES_AVX512VL: # %bb.0:
; VAES_AVX512VL-NEXT: vaesenc %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xdc,0xc1]
; VAES_AVX512VL-NEXT: retq # encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.aesni.aesenc(<2 x i64> %a0, <2 x i64> %a1)
@@ -12,7 +12,7 @@ declare <2 x i64> @llvm.x86.aesni.aesenc(<2 x i64>, <2 x i64>) nounwind readnone
define <4 x i64> @test_x86_aesni_aesenc_256(<4 x i64> %a0, <4 x i64> %a1) {
; VAES_AVX512VL-LABEL: test_x86_aesni_aesenc_256:
-; VAES_AVX512VL: # BB#0:
+; VAES_AVX512VL: # %bb.0:
; VAES_AVX512VL-NEXT: vaesenc %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xdc,0xc1]
; VAES_AVX512VL-NEXT: retq # encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.aesni.aesenc.256(<4 x i64> %a0, <4 x i64> %a1)
@@ -22,7 +22,7 @@ declare <4 x i64> @llvm.x86.aesni.aesenc.256(<4 x i64>, <4 x i64>) nounwind read
define <2 x i64> @test_x86_aesni_aesenclast(<2 x i64> %a0, <2 x i64> %a1) {
; VAES_AVX512VL-LABEL: test_x86_aesni_aesenclast:
-; VAES_AVX512VL: # BB#0:
+; VAES_AVX512VL: # %bb.0:
; VAES_AVX512VL-NEXT: vaesenclast %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xdd,0xc1]
; VAES_AVX512VL-NEXT: retq # encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.aesni.aesenclast(<2 x i64> %a0, <2 x i64> %a1)
@@ -32,7 +32,7 @@ declare <2 x i64> @llvm.x86.aesni.aesenclast(<2 x i64>, <2 x i64>) nounwind read
define <4 x i64> @test_x86_aesni_aesenclast_256(<4 x i64> %a0, <4 x i64> %a1) {
; VAES_AVX512VL-LABEL: test_x86_aesni_aesenclast_256:
-; VAES_AVX512VL: # BB#0:
+; VAES_AVX512VL: # %bb.0:
; VAES_AVX512VL-NEXT: vaesenclast %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xdd,0xc1]
; VAES_AVX512VL-NEXT: retq # encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.aesni.aesenclast.256(<4 x i64> %a0, <4 x i64> %a1)
@@ -42,7 +42,7 @@ declare <4 x i64> @llvm.x86.aesni.aesenclast.256(<4 x i64>, <4 x i64>) nounwind
define <2 x i64> @test_x86_aesni_aesdec(<2 x i64> %a0, <2 x i64> %a1) {
; VAES_AVX512VL-LABEL: test_x86_aesni_aesdec:
-; VAES_AVX512VL: # BB#0:
+; VAES_AVX512VL: # %bb.0:
; VAES_AVX512VL-NEXT: vaesdec %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xde,0xc1]
; VAES_AVX512VL-NEXT: retq # encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.aesni.aesdec(<2 x i64> %a0, <2 x i64> %a1)
@@ -52,7 +52,7 @@ declare <2 x i64> @llvm.x86.aesni.aesdec(<2 x i64>, <2 x i64>) nounwind readnone
define <4 x i64> @test_x86_aesni_aesdec_256(<4 x i64> %a0, <4 x i64> %a1) {
; VAES_AVX512VL-LABEL: test_x86_aesni_aesdec_256:
-; VAES_AVX512VL: # BB#0:
+; VAES_AVX512VL: # %bb.0:
; VAES_AVX512VL-NEXT: vaesdec %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xde,0xc1]
; VAES_AVX512VL-NEXT: retq # encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.aesni.aesdec.256(<4 x i64> %a0, <4 x i64> %a1)
@@ -62,7 +62,7 @@ declare <4 x i64> @llvm.x86.aesni.aesdec.256(<4 x i64>, <4 x i64>) nounwind read
define <2 x i64> @test_x86_aesni_aesdeclast(<2 x i64> %a0, <2 x i64> %a1) {
; VAES_AVX512VL-LABEL: test_x86_aesni_aesdeclast:
-; VAES_AVX512VL: # BB#0:
+; VAES_AVX512VL: # %bb.0:
; VAES_AVX512VL-NEXT: vaesdeclast %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xdf,0xc1]
; VAES_AVX512VL-NEXT: retq # encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.aesni.aesdeclast(<2 x i64> %a0, <2 x i64> %a1)
@@ -72,7 +72,7 @@ declare <2 x i64> @llvm.x86.aesni.aesdeclast(<2 x i64>, <2 x i64>) nounwind read
define <4 x i64> @test_x86_aesni_aesdeclast_256(<4 x i64> %a0, <4 x i64> %a1) {
; VAES_AVX512VL-LABEL: test_x86_aesni_aesdeclast_256:
-; VAES_AVX512VL: # BB#0:
+; VAES_AVX512VL: # %bb.0:
; VAES_AVX512VL-NEXT: vaesdeclast %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xdf,0xc1]
; VAES_AVX512VL-NEXT: retq # encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.aesni.aesdeclast.256(<4 x i64> %a0, <4 x i64> %a1)
diff --git a/test/CodeGen/X86/var-permute-128.ll b/test/CodeGen/X86/var-permute-128.ll
index 208fab88b58..fb5f02e8d5d 100644
--- a/test/CodeGen/X86/var-permute-128.ll
+++ b/test/CodeGen/X86/var-permute-128.ll
@@ -9,7 +9,7 @@
define <2 x i64> @var_shuffle_v2i64(<2 x i64> %v, <2 x i64> %indices) nounwind {
; SSSE3-LABEL: var_shuffle_v2i64:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movq %xmm1, %rax
; SSSE3-NEXT: andl $1, %eax
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
@@ -22,7 +22,7 @@ define <2 x i64> @var_shuffle_v2i64(<2 x i64> %v, <2 x i64> %indices) nounwind {
; SSSE3-NEXT: retq
;
; AVX-LABEL: var_shuffle_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovq %xmm1, %rax
; AVX-NEXT: andl $1, %eax
; AVX-NEXT: vpextrq $1, %xmm1, %rcx
@@ -43,7 +43,7 @@ define <2 x i64> @var_shuffle_v2i64(<2 x i64> %v, <2 x i64> %indices) nounwind {
define <4 x i32> @var_shuffle_v4i32(<4 x i32> %v, <4 x i32> %indices) nounwind {
; SSSE3-LABEL: var_shuffle_v4i32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
; SSSE3-NEXT: movq %xmm2, %rax
; SSSE3-NEXT: movq %rax, %rcx
@@ -66,7 +66,7 @@ define <4 x i32> @var_shuffle_v4i32(<4 x i32> %v, <4 x i32> %indices) nounwind {
; SSSE3-NEXT: retq
;
; AVX-LABEL: var_shuffle_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpextrq $1, %xmm1, %rax
; AVX-NEXT: movq %rax, %rcx
; AVX-NEXT: sarq $32, %rcx
@@ -100,7 +100,7 @@ define <4 x i32> @var_shuffle_v4i32(<4 x i32> %v, <4 x i32> %indices) nounwind {
define <8 x i16> @var_shuffle_v8i16(<8 x i16> %v, <8 x i16> %indices) nounwind {
; SSSE3-LABEL: var_shuffle_v8i16:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movd %xmm1, %r8d
; SSSE3-NEXT: pextrw $1, %xmm1, %r9d
; SSSE3-NEXT: pextrw $2, %xmm1, %r10d
@@ -144,7 +144,7 @@ define <8 x i16> @var_shuffle_v8i16(<8 x i16> %v, <8 x i16> %indices) nounwind {
; SSSE3-NEXT: retq
;
; AVXNOVLBW-LABEL: var_shuffle_v8i16:
-; AVXNOVLBW: # BB#0:
+; AVXNOVLBW: # %bb.0:
; AVXNOVLBW-NEXT: vmovd %xmm1, %eax
; AVXNOVLBW-NEXT: vpextrw $1, %xmm1, %r10d
; AVXNOVLBW-NEXT: vpextrw $2, %xmm1, %ecx
@@ -174,7 +174,7 @@ define <8 x i16> @var_shuffle_v8i16(<8 x i16> %v, <8 x i16> %indices) nounwind {
; AVXNOVLBW-NEXT: retq
;
; AVX512VLBW-LABEL: var_shuffle_v8i16:
-; AVX512VLBW: # BB#0:
+; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpermw %xmm0, %xmm1, %xmm0
; AVX512VLBW-NEXT: retq
%index0 = extractelement <8 x i16> %indices, i32 0
@@ -206,13 +206,13 @@ define <8 x i16> @var_shuffle_v8i16(<8 x i16> %v, <8 x i16> %indices) nounwind {
define <16 x i8> @var_shuffle_v16i8(<16 x i8> %v, <16 x i8> %indices) nounwind {
; SSSE3-LABEL: var_shuffle_v16i8:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb %xmm0, %xmm1
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; AVX-LABEL: var_shuffle_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%index0 = extractelement <16 x i8> %indices, i32 0
@@ -268,7 +268,7 @@ define <16 x i8> @var_shuffle_v16i8(<16 x i8> %v, <16 x i8> %indices) nounwind {
define <2 x double> @var_shuffle_v2f64(<2 x double> %v, <2 x i64> %indices) nounwind {
; SSSE3-LABEL: var_shuffle_v2f64:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movq %xmm1, %rax
; SSSE3-NEXT: andl $1, %eax
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
@@ -280,7 +280,7 @@ define <2 x double> @var_shuffle_v2f64(<2 x double> %v, <2 x i64> %indices) noun
; SSSE3-NEXT: retq
;
; AVX-LABEL: var_shuffle_v2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovq %xmm1, %rax
; AVX-NEXT: andl $1, %eax
; AVX-NEXT: vpextrq $1, %xmm1, %rcx
@@ -300,7 +300,7 @@ define <2 x double> @var_shuffle_v2f64(<2 x double> %v, <2 x i64> %indices) noun
define <4 x float> @var_shuffle_v4f32(<4 x float> %v, <4 x i32> %indices) nounwind {
; SSSE3-LABEL: var_shuffle_v4f32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
; SSSE3-NEXT: movq %xmm2, %rax
; SSSE3-NEXT: movq %rax, %rcx
@@ -323,7 +323,7 @@ define <4 x float> @var_shuffle_v4f32(<4 x float> %v, <4 x i32> %indices) nounwi
; SSSE3-NEXT: retq
;
; AVX-LABEL: var_shuffle_v4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpextrq $1, %xmm1, %rax
; AVX-NEXT: movq %rax, %rcx
; AVX-NEXT: sarq $32, %rcx
diff --git a/test/CodeGen/X86/var-permute-256.ll b/test/CodeGen/X86/var-permute-256.ll
index beef4643c13..82a790298f2 100644
--- a/test/CodeGen/X86/var-permute-256.ll
+++ b/test/CodeGen/X86/var-permute-256.ll
@@ -8,7 +8,7 @@
define <4 x i64> @var_shuffle_v4i64(<4 x i64> %v, <4 x i64> %indices) nounwind {
; AVX1-LABEL: var_shuffle_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: pushq %rbp
; AVX1-NEXT: movq %rsp, %rbp
; AVX1-NEXT: andq $-32, %rsp
@@ -35,7 +35,7 @@ define <4 x i64> @var_shuffle_v4i64(<4 x i64> %v, <4 x i64> %indices) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shuffle_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: pushq %rbp
; AVX2-NEXT: movq %rsp, %rbp
; AVX2-NEXT: andq $-32, %rsp
@@ -62,7 +62,7 @@ define <4 x i64> @var_shuffle_v4i64(<4 x i64> %v, <4 x i64> %indices) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: var_shuffle_v4i64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: pushq %rbp
; AVX512F-NEXT: movq %rsp, %rbp
; AVX512F-NEXT: andq $-32, %rsp
@@ -89,12 +89,12 @@ define <4 x i64> @var_shuffle_v4i64(<4 x i64> %v, <4 x i64> %indices) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: var_shuffle_v4i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermpd %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
;
; AVX512VLBW-LABEL: var_shuffle_v4i64:
-; AVX512VLBW: # BB#0:
+; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpermpd %ymm0, %ymm1, %ymm0
; AVX512VLBW-NEXT: retq
%index0 = extractelement <4 x i64> %indices, i32 0
@@ -114,7 +114,7 @@ define <4 x i64> @var_shuffle_v4i64(<4 x i64> %v, <4 x i64> %indices) nounwind {
define <8 x i32> @var_shuffle_v8i32(<8 x i32> %v, <8 x i32> %indices) nounwind {
; AVX1-LABEL: var_shuffle_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: pushq %rbp
; AVX1-NEXT: movq %rsp, %rbp
; AVX1-NEXT: andq $-32, %rsp
@@ -156,7 +156,7 @@ define <8 x i32> @var_shuffle_v8i32(<8 x i32> %v, <8 x i32> %indices) nounwind {
; AVX1-NEXT: retq
;
; INT256-LABEL: var_shuffle_v8i32:
-; INT256: # BB#0:
+; INT256: # %bb.0:
; INT256-NEXT: vpermps %ymm0, %ymm1, %ymm0
; INT256-NEXT: retq
%index0 = extractelement <8 x i32> %indices, i32 0
@@ -188,7 +188,7 @@ define <8 x i32> @var_shuffle_v8i32(<8 x i32> %v, <8 x i32> %indices) nounwind {
define <16 x i16> @var_shuffle_v16i16(<16 x i16> %v, <16 x i16> %indices) nounwind {
; AVX1-LABEL: var_shuffle_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: pushq %rbp
; AVX1-NEXT: movq %rsp, %rbp
; AVX1-NEXT: andq $-32, %rsp
@@ -251,7 +251,7 @@ define <16 x i16> @var_shuffle_v16i16(<16 x i16> %v, <16 x i16> %indices) nounwi
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shuffle_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: pushq %rbp
; AVX2-NEXT: movq %rsp, %rbp
; AVX2-NEXT: andq $-32, %rsp
@@ -314,7 +314,7 @@ define <16 x i16> @var_shuffle_v16i16(<16 x i16> %v, <16 x i16> %indices) nounwi
; AVX2-NEXT: retq
;
; AVX512F-LABEL: var_shuffle_v16i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: pushq %rbp
; AVX512F-NEXT: movq %rsp, %rbp
; AVX512F-NEXT: andq $-32, %rsp
@@ -377,7 +377,7 @@ define <16 x i16> @var_shuffle_v16i16(<16 x i16> %v, <16 x i16> %indices) nounwi
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: var_shuffle_v16i16:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: pushq %rbp
; AVX512VL-NEXT: movq %rsp, %rbp
; AVX512VL-NEXT: andq $-32, %rsp
@@ -440,7 +440,7 @@ define <16 x i16> @var_shuffle_v16i16(<16 x i16> %v, <16 x i16> %indices) nounwi
; AVX512VL-NEXT: retq
;
; AVX512VLBW-LABEL: var_shuffle_v16i16:
-; AVX512VLBW: # BB#0:
+; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VLBW-NEXT: retq
%index0 = extractelement <16 x i16> %indices, i32 0
@@ -496,7 +496,7 @@ define <16 x i16> @var_shuffle_v16i16(<16 x i16> %v, <16 x i16> %indices) nounwi
define <32 x i8> @var_shuffle_v32i8(<32 x i8> %v, <32 x i8> %indices) nounwind {
; AVX1-LABEL: var_shuffle_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: pushq %rbp
; AVX1-NEXT: movq %rsp, %rbp
; AVX1-NEXT: andq $-32, %rsp
@@ -624,7 +624,7 @@ define <32 x i8> @var_shuffle_v32i8(<32 x i8> %v, <32 x i8> %indices) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shuffle_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: pushq %rbp
; AVX2-NEXT: movq %rsp, %rbp
; AVX2-NEXT: andq $-32, %rsp
@@ -752,7 +752,7 @@ define <32 x i8> @var_shuffle_v32i8(<32 x i8> %v, <32 x i8> %indices) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: var_shuffle_v32i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: pushq %rbp
; AVX512F-NEXT: movq %rsp, %rbp
; AVX512F-NEXT: andq $-32, %rsp
@@ -880,7 +880,7 @@ define <32 x i8> @var_shuffle_v32i8(<32 x i8> %v, <32 x i8> %indices) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: var_shuffle_v32i8:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: pushq %rbp
; AVX512VL-NEXT: movq %rsp, %rbp
; AVX512VL-NEXT: andq $-32, %rsp
@@ -1008,7 +1008,7 @@ define <32 x i8> @var_shuffle_v32i8(<32 x i8> %v, <32 x i8> %indices) nounwind {
; AVX512VL-NEXT: retq
;
; VBMI-LABEL: var_shuffle_v32i8:
-; VBMI: # BB#0:
+; VBMI: # %bb.0:
; VBMI-NEXT: vpermb %ymm0, %ymm1, %ymm0
; VBMI-NEXT: retq
%index0 = extractelement <32 x i8> %indices, i32 0
@@ -1112,7 +1112,7 @@ define <32 x i8> @var_shuffle_v32i8(<32 x i8> %v, <32 x i8> %indices) nounwind {
define <4 x double> @var_shuffle_v4f64(<4 x double> %v, <4 x i64> %indices) nounwind {
; AVX1-LABEL: var_shuffle_v4f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: pushq %rbp
; AVX1-NEXT: movq %rsp, %rbp
; AVX1-NEXT: andq $-32, %rsp
@@ -1137,7 +1137,7 @@ define <4 x double> @var_shuffle_v4f64(<4 x double> %v, <4 x i64> %indices) noun
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shuffle_v4f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: pushq %rbp
; AVX2-NEXT: movq %rsp, %rbp
; AVX2-NEXT: andq $-32, %rsp
@@ -1162,7 +1162,7 @@ define <4 x double> @var_shuffle_v4f64(<4 x double> %v, <4 x i64> %indices) noun
; AVX2-NEXT: retq
;
; AVX512F-LABEL: var_shuffle_v4f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: pushq %rbp
; AVX512F-NEXT: movq %rsp, %rbp
; AVX512F-NEXT: andq $-32, %rsp
@@ -1187,12 +1187,12 @@ define <4 x double> @var_shuffle_v4f64(<4 x double> %v, <4 x i64> %indices) noun
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: var_shuffle_v4f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermpd %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
;
; AVX512VLBW-LABEL: var_shuffle_v4f64:
-; AVX512VLBW: # BB#0:
+; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpermpd %ymm0, %ymm1, %ymm0
; AVX512VLBW-NEXT: retq
%index0 = extractelement <4 x i64> %indices, i32 0
@@ -1212,7 +1212,7 @@ define <4 x double> @var_shuffle_v4f64(<4 x double> %v, <4 x i64> %indices) noun
define <8 x float> @var_shuffle_v8f32(<8 x float> %v, <8 x i32> %indices) nounwind {
; AVX1-LABEL: var_shuffle_v8f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: pushq %rbp
; AVX1-NEXT: movq %rsp, %rbp
; AVX1-NEXT: andq $-32, %rsp
@@ -1254,7 +1254,7 @@ define <8 x float> @var_shuffle_v8f32(<8 x float> %v, <8 x i32> %indices) nounwi
; AVX1-NEXT: retq
;
; INT256-LABEL: var_shuffle_v8f32:
-; INT256: # BB#0:
+; INT256: # %bb.0:
; INT256-NEXT: vpermps %ymm0, %ymm1, %ymm0
; INT256-NEXT: retq
%index0 = extractelement <8 x i32> %indices, i32 0
diff --git a/test/CodeGen/X86/var-permute-512.ll b/test/CodeGen/X86/var-permute-512.ll
index 15c7a1c8b8b..a5aa73cdf1a 100644
--- a/test/CodeGen/X86/var-permute-512.ll
+++ b/test/CodeGen/X86/var-permute-512.ll
@@ -5,7 +5,7 @@
define <8 x i64> @var_shuffle_v8i64(<8 x i64> %v, <8 x i64> %indices) nounwind {
; AVX512-LABEL: var_shuffle_v8i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512-NEXT: retq
%index0 = extractelement <8 x i64> %indices, i32 0
@@ -37,7 +37,7 @@ define <8 x i64> @var_shuffle_v8i64(<8 x i64> %v, <8 x i64> %indices) nounwind {
define <16 x i32> @var_shuffle_v16i32(<16 x i32> %v, <16 x i32> %indices) nounwind {
; AVX512-LABEL: var_shuffle_v16i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpermps %zmm0, %zmm1, %zmm0
; AVX512-NEXT: retq
%index0 = extractelement <16 x i32> %indices, i32 0
@@ -93,7 +93,7 @@ define <16 x i32> @var_shuffle_v16i32(<16 x i32> %v, <16 x i32> %indices) nounwi
define <32 x i16> @var_shuffle_v32i16(<32 x i16> %v, <32 x i16> %indices) nounwind {
; NOBW-LABEL: var_shuffle_v32i16:
-; NOBW: # BB#0:
+; NOBW: # %bb.0:
; NOBW-NEXT: pushq %rbp
; NOBW-NEXT: movq %rsp, %rbp
; NOBW-NEXT: andq $-64, %rsp
@@ -271,7 +271,7 @@ define <32 x i16> @var_shuffle_v32i16(<32 x i16> %v, <32 x i16> %indices) nounwi
; NOBW-NEXT: retq
;
; AVX512BW-LABEL: var_shuffle_v32i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpermw %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: retq
%index0 = extractelement <32 x i16> %indices, i32 0
@@ -375,7 +375,7 @@ define <32 x i16> @var_shuffle_v32i16(<32 x i16> %v, <32 x i16> %indices) nounwi
define <64 x i8> @var_shuffle_v64i8(<64 x i8> %v, <64 x i8> %indices) nounwind {
; NOBW-LABEL: var_shuffle_v64i8:
-; NOBW: # BB#0:
+; NOBW: # %bb.0:
; NOBW-NEXT: pushq %rbp
; NOBW-NEXT: movq %rsp, %rbp
; NOBW-NEXT: andq $-64, %rsp
@@ -777,7 +777,7 @@ define <64 x i8> @var_shuffle_v64i8(<64 x i8> %v, <64 x i8> %indices) nounwind {
; NOBW-NEXT: retq
;
; VBMI-LABEL: var_shuffle_v64i8:
-; VBMI: # BB#0:
+; VBMI: # %bb.0:
; VBMI-NEXT: vpermb %zmm0, %zmm1, %zmm0
; VBMI-NEXT: retq
%index0 = extractelement <64 x i8> %indices, i32 0
@@ -977,7 +977,7 @@ define <64 x i8> @var_shuffle_v64i8(<64 x i8> %v, <64 x i8> %indices) nounwind {
define <8 x double> @var_shuffle_v8f64(<8 x double> %v, <8 x i64> %indices) nounwind {
; AVX512-LABEL: var_shuffle_v8f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512-NEXT: retq
%index0 = extractelement <8 x i64> %indices, i32 0
@@ -1009,7 +1009,7 @@ define <8 x double> @var_shuffle_v8f64(<8 x double> %v, <8 x i64> %indices) noun
define <16 x float> @var_shuffle_v16f32(<16 x float> %v, <16 x i32> %indices) nounwind {
; AVX512-LABEL: var_shuffle_v16f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpermps %zmm0, %zmm1, %zmm0
; AVX512-NEXT: retq
%index0 = extractelement <16 x i32> %indices, i32 0
diff --git a/test/CodeGen/X86/vec-copysign-avx512.ll b/test/CodeGen/X86/vec-copysign-avx512.ll
index 535065d39aa..9aa9b529290 100644
--- a/test/CodeGen/X86/vec-copysign-avx512.ll
+++ b/test/CodeGen/X86/vec-copysign-avx512.ll
@@ -4,14 +4,14 @@
define <4 x float> @v4f32(<4 x float> %a, <4 x float> %b) nounwind {
; AVX512VL-LABEL: v4f32:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpandd {{.*}}(%rip){1to4}, %xmm1, %xmm1
; AVX512VL-NEXT: vpandd {{.*}}(%rip){1to4}, %xmm0, %xmm0
; AVX512VL-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; AVX512VLDQ-LABEL: v4f32:
-; AVX512VLDQ: ## BB#0:
+; AVX512VLDQ: ## %bb.0:
; AVX512VLDQ-NEXT: vandps {{.*}}(%rip){1to4}, %xmm1, %xmm1
; AVX512VLDQ-NEXT: vandps {{.*}}(%rip){1to4}, %xmm0, %xmm0
; AVX512VLDQ-NEXT: vorps %xmm1, %xmm0, %xmm0
@@ -22,14 +22,14 @@ define <4 x float> @v4f32(<4 x float> %a, <4 x float> %b) nounwind {
define <8 x float> @v8f32(<8 x float> %a, <8 x float> %b) nounwind {
; AVX512VL-LABEL: v8f32:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpandd {{.*}}(%rip){1to8}, %ymm1, %ymm1
; AVX512VL-NEXT: vpandd {{.*}}(%rip){1to8}, %ymm0, %ymm0
; AVX512VL-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; AVX512VLDQ-LABEL: v8f32:
-; AVX512VLDQ: ## BB#0:
+; AVX512VLDQ: ## %bb.0:
; AVX512VLDQ-NEXT: vandps {{.*}}(%rip){1to8}, %ymm1, %ymm1
; AVX512VLDQ-NEXT: vandps {{.*}}(%rip){1to8}, %ymm0, %ymm0
; AVX512VLDQ-NEXT: vorps %ymm1, %ymm0, %ymm0
@@ -40,14 +40,14 @@ define <8 x float> @v8f32(<8 x float> %a, <8 x float> %b) nounwind {
define <16 x float> @v16f32(<16 x float> %a, <16 x float> %b) nounwind {
; AVX512VL-LABEL: v16f32:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpandd {{.*}}(%rip){1to16}, %zmm1, %zmm1
; AVX512VL-NEXT: vpandd {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512VL-NEXT: vporq %zmm1, %zmm0, %zmm0
; AVX512VL-NEXT: retq
;
; AVX512VLDQ-LABEL: v16f32:
-; AVX512VLDQ: ## BB#0:
+; AVX512VLDQ: ## %bb.0:
; AVX512VLDQ-NEXT: vandps {{.*}}(%rip){1to16}, %zmm1, %zmm1
; AVX512VLDQ-NEXT: vandps {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512VLDQ-NEXT: vorps %zmm1, %zmm0, %zmm0
@@ -58,14 +58,14 @@ define <16 x float> @v16f32(<16 x float> %a, <16 x float> %b) nounwind {
define <2 x double> @v2f64(<2 x double> %a, <2 x double> %b) nounwind {
; AVX512VL-LABEL: v2f64:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX512VL-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; AVX512VLDQ-LABEL: v2f64:
-; AVX512VLDQ: ## BB#0:
+; AVX512VLDQ: ## %bb.0:
; AVX512VLDQ-NEXT: vandps {{.*}}(%rip), %xmm1, %xmm1
; AVX512VLDQ-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; AVX512VLDQ-NEXT: vorps %xmm1, %xmm0, %xmm0
@@ -76,14 +76,14 @@ define <2 x double> @v2f64(<2 x double> %a, <2 x double> %b) nounwind {
define <4 x double> @v4f64(<4 x double> %a, <4 x double> %b) nounwind {
; AVX512VL-LABEL: v4f64:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpandq {{.*}}(%rip){1to4}, %ymm1, %ymm1
; AVX512VL-NEXT: vpandq {{.*}}(%rip){1to4}, %ymm0, %ymm0
; AVX512VL-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; AVX512VLDQ-LABEL: v4f64:
-; AVX512VLDQ: ## BB#0:
+; AVX512VLDQ: ## %bb.0:
; AVX512VLDQ-NEXT: vandpd {{.*}}(%rip){1to4}, %ymm1, %ymm1
; AVX512VLDQ-NEXT: vandpd {{.*}}(%rip){1to4}, %ymm0, %ymm0
; AVX512VLDQ-NEXT: vorpd %ymm1, %ymm0, %ymm0
@@ -94,14 +94,14 @@ define <4 x double> @v4f64(<4 x double> %a, <4 x double> %b) nounwind {
define <8 x double> @v8f64(<8 x double> %a, <8 x double> %b) nounwind {
; AVX512VL-LABEL: v8f64:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm1, %zmm1
; AVX512VL-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512VL-NEXT: vporq %zmm1, %zmm0, %zmm0
; AVX512VL-NEXT: retq
;
; AVX512VLDQ-LABEL: v8f64:
-; AVX512VLDQ: ## BB#0:
+; AVX512VLDQ: ## %bb.0:
; AVX512VLDQ-NEXT: vandpd {{.*}}(%rip){1to8}, %zmm1, %zmm1
; AVX512VLDQ-NEXT: vandpd {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512VLDQ-NEXT: vorpd %zmm1, %zmm0, %zmm0
diff --git a/test/CodeGen/X86/vec-copysign.ll b/test/CodeGen/X86/vec-copysign.ll
index 1ebd7ceafce..852ebcd3e45 100644
--- a/test/CodeGen/X86/vec-copysign.ll
+++ b/test/CodeGen/X86/vec-copysign.ll
@@ -18,14 +18,14 @@
define <4 x float> @v4f32(<4 x float> %a, <4 x float> %b) nounwind {
; SSE2-LABEL: v4f32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: andps [[SIGNMASK1]](%rip), %xmm1
; SSE2-NEXT: andps [[MAGMASK1]](%rip), %xmm0
; SSE2-NEXT: orps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: v4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vandps [[SIGNMASK1]](%rip), %xmm1, %xmm1
; AVX-NEXT: vandps [[MAGMASK1]](%rip), %xmm0, %xmm0
; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
@@ -69,7 +69,7 @@ define <4 x float> @v4f32(<4 x float> %a, <4 x float> %b) nounwind {
define <8 x float> @v8f32(<8 x float> %a, <8 x float> %b) nounwind {
; SSE2-LABEL: v8f32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps [[SIGNMASK2]](%rip), %xmm4
; SSE2-NEXT: andps %xmm4, %xmm2
; SSE2-NEXT: movaps [[MAGMASK2]](%rip), %xmm5
@@ -81,7 +81,7 @@ define <8 x float> @v8f32(<8 x float> %a, <8 x float> %b) nounwind {
; SSE2-NEXT: retq
;
; AVX-LABEL: v8f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vandps [[SIGNMASK2]](%rip), %ymm1, %ymm1
; AVX-NEXT: vandps [[MAGMASK2]](%rip), %ymm0, %ymm0
; AVX-NEXT: vorps %ymm1, %ymm0, %ymm0
@@ -101,14 +101,14 @@ define <8 x float> @v8f32(<8 x float> %a, <8 x float> %b) nounwind {
define <2 x double> @v2f64(<2 x double> %a, <2 x double> %b) nounwind {
; SSE2-LABEL: v2f64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: andps [[SIGNMASK3]](%rip), %xmm1
; SSE2-NEXT: andps [[MAGMASK3]](%rip), %xmm0
; SSE2-NEXT: orps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: v2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vandps [[SIGNMASK3]](%rip), %xmm1, %xmm1
; AVX-NEXT: vandps [[MAGMASK3]](%rip), %xmm0, %xmm0
; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
@@ -140,7 +140,7 @@ define <2 x double> @v2f64(<2 x double> %a, <2 x double> %b) nounwind {
define <4 x double> @v4f64(<4 x double> %a, <4 x double> %b) nounwind {
; SSE2-LABEL: v4f64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps [[SIGNMASK4]](%rip), %xmm4
; SSE2-NEXT: andps %xmm4, %xmm2
; SSE2-NEXT: movaps [[MAGMASK4]](%rip), %xmm5
@@ -152,7 +152,7 @@ define <4 x double> @v4f64(<4 x double> %a, <4 x double> %b) nounwind {
; SSE2-NEXT: retq
;
; AVX-LABEL: v4f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vandps [[SIGNMASK4]](%rip), %ymm1, %ymm1
; AVX-NEXT: vandps [[MAGMASK4]](%rip), %ymm0, %ymm0
; AVX-NEXT: vorps %ymm1, %ymm0, %ymm0
diff --git a/test/CodeGen/X86/vec-trunc-store.ll b/test/CodeGen/X86/vec-trunc-store.ll
index e2d23242d5e..23af5f4d48a 100644
--- a/test/CodeGen/X86/vec-trunc-store.ll
+++ b/test/CodeGen/X86/vec-trunc-store.ll
@@ -3,7 +3,7 @@
define void @foo(<8 x i32>* %p) nounwind {
; CHECK-LABEL: foo:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movdqa (%rdi), %xmm0
; CHECK-NEXT: movdqa 16(%rdi), %xmm1
; CHECK-NEXT: pslld $16, %xmm1
@@ -21,7 +21,7 @@ define void @foo(<8 x i32>* %p) nounwind {
define void @bar(<4 x i32>* %p) nounwind {
; CHECK-LABEL: bar:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = mem[0,2,2,3,4,5,6,7]
; CHECK-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
diff --git a/test/CodeGen/X86/vec3.ll b/test/CodeGen/X86/vec3.ll
index e9c47ffd21c..6d3f71f4750 100644
--- a/test/CodeGen/X86/vec3.ll
+++ b/test/CodeGen/X86/vec3.ll
@@ -3,7 +3,7 @@
define <3 x float> @fadd(<3 x float> %v, float %d) {
; CHECK-LABEL: fadd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0,0,3]
; CHECK-NEXT: addps %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -17,7 +17,7 @@ define <3 x float> @fadd(<3 x float> %v, float %d) {
define <3 x float> @fdiv(<3 x float> %v, float %d) {
; CHECK-LABEL: fdiv:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0,0,3]
; CHECK-NEXT: divps %xmm0, %xmm1
; CHECK-NEXT: movaps %xmm1, %xmm0
diff --git a/test/CodeGen/X86/vec_cast2.ll b/test/CodeGen/X86/vec_cast2.ll
index 723b0ca53fc..262c29fb629 100644
--- a/test/CodeGen/X86/vec_cast2.ll
+++ b/test/CodeGen/X86/vec_cast2.ll
@@ -4,7 +4,7 @@
define <8 x float> @foo1_8(<8 x i8> %src) {
; CHECK-LABEL: foo1_8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4,4,5,5,6,6,7,7]
; CHECK-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; CHECK-NEXT: vpslld $24, %xmm0, %xmm0
@@ -16,7 +16,7 @@ define <8 x float> @foo1_8(<8 x i8> %src) {
; CHECK-NEXT: retl
;
; CHECK-WIDE-LABEL: foo1_8:
-; CHECK-WIDE: ## BB#0:
+; CHECK-WIDE: ## %bb.0:
; CHECK-WIDE-NEXT: vpmovsxbd %xmm0, %xmm1
; CHECK-WIDE-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; CHECK-WIDE-NEXT: vpmovsxbd %xmm0, %xmm0
@@ -29,14 +29,14 @@ define <8 x float> @foo1_8(<8 x i8> %src) {
define <4 x float> @foo1_4(<4 x i8> %src) {
; CHECK-LABEL: foo1_4:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpslld $24, %xmm0, %xmm0
; CHECK-NEXT: vpsrad $24, %xmm0, %xmm0
; CHECK-NEXT: vcvtdq2ps %xmm0, %xmm0
; CHECK-NEXT: retl
;
; CHECK-WIDE-LABEL: foo1_4:
-; CHECK-WIDE: ## BB#0:
+; CHECK-WIDE: ## %bb.0:
; CHECK-WIDE-NEXT: vpmovsxbd %xmm0, %xmm0
; CHECK-WIDE-NEXT: vcvtdq2ps %xmm0, %xmm0
; CHECK-WIDE-NEXT: retl
@@ -46,7 +46,7 @@ define <4 x float> @foo1_4(<4 x i8> %src) {
define <8 x float> @foo2_8(<8 x i8> %src) {
; CHECK-LABEL: foo2_8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpand LCPI2_0, %xmm0, %xmm0
; CHECK-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -56,7 +56,7 @@ define <8 x float> @foo2_8(<8 x i8> %src) {
; CHECK-NEXT: retl
;
; CHECK-WIDE-LABEL: foo2_8:
-; CHECK-WIDE: ## BB#0:
+; CHECK-WIDE: ## %bb.0:
; CHECK-WIDE-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; CHECK-WIDE-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; CHECK-WIDE-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
@@ -69,13 +69,13 @@ define <8 x float> @foo2_8(<8 x i8> %src) {
define <4 x float> @foo2_4(<4 x i8> %src) {
; CHECK-LABEL: foo2_4:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vandps LCPI3_0, %xmm0, %xmm0
; CHECK-NEXT: vcvtdq2ps %xmm0, %xmm0
; CHECK-NEXT: retl
;
; CHECK-WIDE-LABEL: foo2_4:
-; CHECK-WIDE: ## BB#0:
+; CHECK-WIDE: ## %bb.0:
; CHECK-WIDE-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; CHECK-WIDE-NEXT: vcvtdq2ps %xmm0, %xmm0
; CHECK-WIDE-NEXT: retl
@@ -85,7 +85,7 @@ define <4 x float> @foo2_4(<4 x i8> %src) {
define <8 x i8> @foo3_8(<8 x float> %src) {
; CHECK-LABEL: foo3_8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvttps2dq %ymm0, %ymm0
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1
; CHECK-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
@@ -93,7 +93,7 @@ define <8 x i8> @foo3_8(<8 x float> %src) {
; CHECK-NEXT: retl
;
; CHECK-WIDE-LABEL: foo3_8:
-; CHECK-WIDE: ## BB#0:
+; CHECK-WIDE: ## %bb.0:
; CHECK-WIDE-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; CHECK-WIDE-NEXT: vcvttss2si %xmm1, %eax
; CHECK-WIDE-NEXT: vcvttss2si %xmm0, %ecx
@@ -125,12 +125,12 @@ define <8 x i8> @foo3_8(<8 x float> %src) {
define <4 x i8> @foo3_4(<4 x float> %src) {
; CHECK-LABEL: foo3_4:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvttps2dq %xmm0, %xmm0
; CHECK-NEXT: retl
;
; CHECK-WIDE-LABEL: foo3_4:
-; CHECK-WIDE: ## BB#0:
+; CHECK-WIDE: ## %bb.0:
; CHECK-WIDE-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; CHECK-WIDE-NEXT: vcvttss2si %xmm1, %eax
; CHECK-WIDE-NEXT: vcvttss2si %xmm0, %ecx
diff --git a/test/CodeGen/X86/vec_cmp_sint-128.ll b/test/CodeGen/X86/vec_cmp_sint-128.ll
index 1407f71de71..a1b60f8fb0e 100644
--- a/test/CodeGen/X86/vec_cmp_sint-128.ll
+++ b/test/CodeGen/X86/vec_cmp_sint-128.ll
@@ -15,29 +15,29 @@
define <2 x i64> @eq_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-LABEL: eq_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pcmpeqd %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,0,3,2]
; SSE2-NEXT: pand %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: eq_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pcmpeqq %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: eq_v2i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpeqq %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: eq_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: eq_v2i64:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomeqq %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp eq <2 x i64> %a, %b
@@ -47,17 +47,17 @@ define <2 x i64> @eq_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
define <4 x i32> @eq_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE-LABEL: eq_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: eq_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: eq_v4i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomeqd %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp eq <4 x i32> %a, %b
@@ -67,17 +67,17 @@ define <4 x i32> @eq_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
define <8 x i16> @eq_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE-LABEL: eq_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqw %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: eq_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: eq_v8i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomeqw %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp eq <8 x i16> %a, %b
@@ -87,17 +87,17 @@ define <8 x i16> @eq_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
define <16 x i8> @eq_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE-LABEL: eq_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqb %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: eq_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: eq_v16i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomeqb %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp eq <16 x i8> %a, %b
@@ -111,7 +111,7 @@ define <16 x i8> @eq_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
define <2 x i64> @ne_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-LABEL: ne_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pcmpeqd %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,0,3,2]
; SSE2-NEXT: pand %xmm1, %xmm0
@@ -120,28 +120,28 @@ define <2 x i64> @ne_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: ne_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pcmpeqq %xmm1, %xmm0
; SSE41-NEXT: pcmpeqd %xmm1, %xmm1
; SSE41-NEXT: pxor %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: ne_v2i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpeqq %xmm1, %xmm0
; SSE42-NEXT: pcmpeqd %xmm1, %xmm1
; SSE42-NEXT: pxor %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: ne_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: ne_v2i64:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomneqq %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp ne <2 x i64> %a, %b
@@ -151,21 +151,21 @@ define <2 x i64> @ne_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
define <4 x i32> @ne_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE-LABEL: ne_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm1, %xmm0
; SSE-NEXT: pcmpeqd %xmm1, %xmm1
; SSE-NEXT: pxor %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: ne_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: ne_v4i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomneqd %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp ne <4 x i32> %a, %b
@@ -175,21 +175,21 @@ define <4 x i32> @ne_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
define <8 x i16> @ne_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE-LABEL: ne_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqw %xmm1, %xmm0
; SSE-NEXT: pcmpeqd %xmm1, %xmm1
; SSE-NEXT: pxor %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: ne_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: ne_v8i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomneqw %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp ne <8 x i16> %a, %b
@@ -199,21 +199,21 @@ define <8 x i16> @ne_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
define <16 x i8> @ne_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE-LABEL: ne_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqb %xmm1, %xmm0
; SSE-NEXT: pcmpeqd %xmm1, %xmm1
; SSE-NEXT: pxor %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: ne_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: ne_v16i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomneqb %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp ne <16 x i8> %a, %b
@@ -227,7 +227,7 @@ define <16 x i8> @ne_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
define <2 x i64> @ge_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-LABEL: ge_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; SSE2-NEXT: pxor %xmm2, %xmm0
; SSE2-NEXT: pxor %xmm2, %xmm1
@@ -244,7 +244,7 @@ define <2 x i64> @ge_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: ge_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; SSE41-NEXT: pxor %xmm2, %xmm0
; SSE41-NEXT: pxor %xmm2, %xmm1
@@ -261,21 +261,21 @@ define <2 x i64> @ge_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE41-NEXT: retq
;
; SSE42-LABEL: ge_v2i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpgtq %xmm0, %xmm1
; SSE42-NEXT: pcmpeqd %xmm0, %xmm0
; SSE42-NEXT: pxor %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: ge_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: ge_v2i64:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomgeq %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp sge <2 x i64> %a, %b
@@ -285,21 +285,21 @@ define <2 x i64> @ge_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
define <4 x i32> @ge_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE-LABEL: ge_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtd %xmm0, %xmm1
; SSE-NEXT: pcmpeqd %xmm0, %xmm0
; SSE-NEXT: pxor %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: ge_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: ge_v4i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomged %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp sge <4 x i32> %a, %b
@@ -309,21 +309,21 @@ define <4 x i32> @ge_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
define <8 x i16> @ge_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE-LABEL: ge_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtw %xmm0, %xmm1
; SSE-NEXT: pcmpeqd %xmm0, %xmm0
; SSE-NEXT: pxor %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: ge_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: ge_v8i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomgew %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp sge <8 x i16> %a, %b
@@ -333,21 +333,21 @@ define <8 x i16> @ge_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
define <16 x i8> @ge_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE-LABEL: ge_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtb %xmm0, %xmm1
; SSE-NEXT: pcmpeqd %xmm0, %xmm0
; SSE-NEXT: pxor %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: ge_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0
; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: ge_v16i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomgeb %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp sge <16 x i8> %a, %b
@@ -361,7 +361,7 @@ define <16 x i8> @ge_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
define <2 x i64> @gt_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-LABEL: gt_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; SSE2-NEXT: pxor %xmm2, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm0
@@ -376,7 +376,7 @@ define <2 x i64> @gt_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: gt_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; SSE41-NEXT: pxor %xmm2, %xmm1
; SSE41-NEXT: pxor %xmm2, %xmm0
@@ -391,17 +391,17 @@ define <2 x i64> @gt_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE41-NEXT: retq
;
; SSE42-LABEL: gt_v2i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpgtq %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: gt_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: gt_v2i64:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomgtq %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp sgt <2 x i64> %a, %b
@@ -411,17 +411,17 @@ define <2 x i64> @gt_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
define <4 x i32> @gt_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE-LABEL: gt_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: gt_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: gt_v4i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomgtd %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp sgt <4 x i32> %a, %b
@@ -431,17 +431,17 @@ define <4 x i32> @gt_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
define <8 x i16> @gt_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE-LABEL: gt_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtw %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: gt_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: gt_v8i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomgtw %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp sgt <8 x i16> %a, %b
@@ -451,17 +451,17 @@ define <8 x i16> @gt_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
define <16 x i8> @gt_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE-LABEL: gt_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtb %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: gt_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: gt_v16i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomgtb %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp sgt <16 x i8> %a, %b
@@ -475,7 +475,7 @@ define <16 x i8> @gt_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
define <2 x i64> @le_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-LABEL: le_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; SSE2-NEXT: pxor %xmm2, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm0
@@ -492,7 +492,7 @@ define <2 x i64> @le_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: le_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; SSE41-NEXT: pxor %xmm2, %xmm1
; SSE41-NEXT: pxor %xmm2, %xmm0
@@ -509,21 +509,21 @@ define <2 x i64> @le_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE41-NEXT: retq
;
; SSE42-LABEL: le_v2i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpgtq %xmm1, %xmm0
; SSE42-NEXT: pcmpeqd %xmm1, %xmm1
; SSE42-NEXT: pxor %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: le_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: le_v2i64:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomleq %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp sle <2 x i64> %a, %b
@@ -533,21 +533,21 @@ define <2 x i64> @le_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
define <4 x i32> @le_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE-LABEL: le_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtd %xmm1, %xmm0
; SSE-NEXT: pcmpeqd %xmm1, %xmm1
; SSE-NEXT: pxor %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: le_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: le_v4i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomled %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp sle <4 x i32> %a, %b
@@ -557,21 +557,21 @@ define <4 x i32> @le_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
define <8 x i16> @le_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE-LABEL: le_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtw %xmm1, %xmm0
; SSE-NEXT: pcmpeqd %xmm1, %xmm1
; SSE-NEXT: pxor %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: le_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: le_v8i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomlew %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp sle <8 x i16> %a, %b
@@ -581,21 +581,21 @@ define <8 x i16> @le_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
define <16 x i8> @le_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE-LABEL: le_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtb %xmm1, %xmm0
; SSE-NEXT: pcmpeqd %xmm1, %xmm1
; SSE-NEXT: pxor %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: le_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: le_v16i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomleb %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp sle <16 x i8> %a, %b
@@ -609,7 +609,7 @@ define <16 x i8> @le_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
define <2 x i64> @lt_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-LABEL: lt_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; SSE2-NEXT: pxor %xmm2, %xmm0
; SSE2-NEXT: pxor %xmm2, %xmm1
@@ -624,7 +624,7 @@ define <2 x i64> @lt_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: lt_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; SSE41-NEXT: pxor %xmm2, %xmm0
; SSE41-NEXT: pxor %xmm2, %xmm1
@@ -639,18 +639,18 @@ define <2 x i64> @lt_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE41-NEXT: retq
;
; SSE42-LABEL: lt_v2i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpgtq %xmm0, %xmm1
; SSE42-NEXT: movdqa %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: lt_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: lt_v2i64:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomltq %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp slt <2 x i64> %a, %b
@@ -660,18 +660,18 @@ define <2 x i64> @lt_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
define <4 x i32> @lt_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE-LABEL: lt_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtd %xmm0, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: lt_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: lt_v4i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomltd %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp slt <4 x i32> %a, %b
@@ -681,18 +681,18 @@ define <4 x i32> @lt_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
define <8 x i16> @lt_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE-LABEL: lt_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtw %xmm0, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: lt_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: lt_v8i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomltw %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp slt <8 x i16> %a, %b
@@ -702,18 +702,18 @@ define <8 x i16> @lt_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
define <16 x i8> @lt_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE-LABEL: lt_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtb %xmm0, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: lt_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: lt_v16i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomltb %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp slt <16 x i8> %a, %b
diff --git a/test/CodeGen/X86/vec_cmp_uint-128.ll b/test/CodeGen/X86/vec_cmp_uint-128.ll
index 57e3849a73c..70f6a1ff677 100644
--- a/test/CodeGen/X86/vec_cmp_uint-128.ll
+++ b/test/CodeGen/X86/vec_cmp_uint-128.ll
@@ -15,29 +15,29 @@
define <2 x i64> @eq_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-LABEL: eq_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pcmpeqd %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,0,3,2]
; SSE2-NEXT: pand %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: eq_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pcmpeqq %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: eq_v2i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpeqq %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: eq_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: eq_v2i64:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomeqq %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp eq <2 x i64> %a, %b
@@ -47,17 +47,17 @@ define <2 x i64> @eq_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
define <4 x i32> @eq_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE-LABEL: eq_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: eq_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: eq_v4i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomeqd %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp eq <4 x i32> %a, %b
@@ -67,17 +67,17 @@ define <4 x i32> @eq_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
define <8 x i16> @eq_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE-LABEL: eq_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqw %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: eq_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: eq_v8i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomeqw %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp eq <8 x i16> %a, %b
@@ -87,17 +87,17 @@ define <8 x i16> @eq_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
define <16 x i8> @eq_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE-LABEL: eq_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqb %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: eq_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: eq_v16i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomeqb %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp eq <16 x i8> %a, %b
@@ -111,7 +111,7 @@ define <16 x i8> @eq_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
define <2 x i64> @ne_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-LABEL: ne_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pcmpeqd %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,0,3,2]
; SSE2-NEXT: pand %xmm1, %xmm0
@@ -120,28 +120,28 @@ define <2 x i64> @ne_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: ne_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pcmpeqq %xmm1, %xmm0
; SSE41-NEXT: pcmpeqd %xmm1, %xmm1
; SSE41-NEXT: pxor %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: ne_v2i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpeqq %xmm1, %xmm0
; SSE42-NEXT: pcmpeqd %xmm1, %xmm1
; SSE42-NEXT: pxor %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: ne_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: ne_v2i64:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomneqq %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp ne <2 x i64> %a, %b
@@ -151,21 +151,21 @@ define <2 x i64> @ne_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
define <4 x i32> @ne_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE-LABEL: ne_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm1, %xmm0
; SSE-NEXT: pcmpeqd %xmm1, %xmm1
; SSE-NEXT: pxor %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: ne_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: ne_v4i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomneqd %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp ne <4 x i32> %a, %b
@@ -175,21 +175,21 @@ define <4 x i32> @ne_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
define <8 x i16> @ne_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE-LABEL: ne_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqw %xmm1, %xmm0
; SSE-NEXT: pcmpeqd %xmm1, %xmm1
; SSE-NEXT: pxor %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: ne_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: ne_v8i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomneqw %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp ne <8 x i16> %a, %b
@@ -199,21 +199,21 @@ define <8 x i16> @ne_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
define <16 x i8> @ne_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE-LABEL: ne_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqb %xmm1, %xmm0
; SSE-NEXT: pcmpeqd %xmm1, %xmm1
; SSE-NEXT: pxor %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: ne_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: ne_v16i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomneqb %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp ne <16 x i8> %a, %b
@@ -227,7 +227,7 @@ define <16 x i8> @ne_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
define <2 x i64> @ge_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-LABEL: ge_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: pxor %xmm2, %xmm0
; SSE2-NEXT: pxor %xmm2, %xmm1
@@ -244,7 +244,7 @@ define <2 x i64> @ge_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: ge_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE41-NEXT: pxor %xmm2, %xmm0
; SSE41-NEXT: pxor %xmm2, %xmm1
@@ -261,7 +261,7 @@ define <2 x i64> @ge_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE41-NEXT: retq
;
; SSE42-LABEL: ge_v2i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; SSE42-NEXT: pxor %xmm2, %xmm0
; SSE42-NEXT: pxor %xmm1, %xmm2
@@ -271,7 +271,7 @@ define <2 x i64> @ge_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE42-NEXT: retq
;
; AVX1-LABEL: ge_v2i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm1
@@ -281,7 +281,7 @@ define <2 x i64> @ge_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: ge_v2i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm1
@@ -291,12 +291,12 @@ define <2 x i64> @ge_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; AVX2-NEXT: retq
;
; XOP-LABEL: ge_v2i64:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomgeuq %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512-LABEL: ge_v2i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512-NEXT: vpmaxuq %zmm1, %zmm0, %zmm1
@@ -310,7 +310,7 @@ define <2 x i64> @ge_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
define <4 x i32> @ge_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE2-LABEL: ge_v4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: pxor %xmm2, %xmm0
; SSE2-NEXT: pxor %xmm1, %xmm2
@@ -320,25 +320,25 @@ define <4 x i32> @ge_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: ge_v4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmaxud %xmm0, %xmm1
; SSE41-NEXT: pcmpeqd %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: ge_v4i32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pmaxud %xmm0, %xmm1
; SSE42-NEXT: pcmpeqd %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: ge_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmaxud %xmm1, %xmm0, %xmm1
; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: ge_v4i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomgeud %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp uge <4 x i32> %a, %b
@@ -348,32 +348,32 @@ define <4 x i32> @ge_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
define <8 x i16> @ge_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE2-LABEL: ge_v8i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: psubusw %xmm0, %xmm1
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: pcmpeqw %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: ge_v8i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmaxuw %xmm0, %xmm1
; SSE41-NEXT: pcmpeqw %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: ge_v8i16:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pmaxuw %xmm0, %xmm1
; SSE42-NEXT: pcmpeqw %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: ge_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmaxuw %xmm1, %xmm0, %xmm1
; AVX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: ge_v8i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomgeuw %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp uge <8 x i16> %a, %b
@@ -383,19 +383,19 @@ define <8 x i16> @ge_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
define <16 x i8> @ge_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE-LABEL: ge_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmaxub %xmm0, %xmm1
; SSE-NEXT: pcmpeqb %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: ge_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmaxub %xmm1, %xmm0, %xmm1
; AVX-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: ge_v16i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomgeub %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp uge <16 x i8> %a, %b
@@ -409,7 +409,7 @@ define <16 x i8> @ge_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
define <2 x i64> @gt_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-LABEL: gt_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: pxor %xmm2, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm0
@@ -424,7 +424,7 @@ define <2 x i64> @gt_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: gt_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE41-NEXT: pxor %xmm2, %xmm1
; SSE41-NEXT: pxor %xmm2, %xmm0
@@ -439,7 +439,7 @@ define <2 x i64> @gt_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE41-NEXT: retq
;
; SSE42-LABEL: gt_v2i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; SSE42-NEXT: pxor %xmm2, %xmm1
; SSE42-NEXT: pxor %xmm2, %xmm0
@@ -447,7 +447,7 @@ define <2 x i64> @gt_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE42-NEXT: retq
;
; AVX-LABEL: gt_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX-NEXT: vpxor %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpxor %xmm2, %xmm0, %xmm0
@@ -455,7 +455,7 @@ define <2 x i64> @gt_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; AVX-NEXT: retq
;
; XOP-LABEL: gt_v2i64:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomgtuq %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp ugt <2 x i64> %a, %b
@@ -465,7 +465,7 @@ define <2 x i64> @gt_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
define <4 x i32> @gt_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE-LABEL: gt_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE-NEXT: pxor %xmm2, %xmm1
; SSE-NEXT: pxor %xmm2, %xmm0
@@ -473,7 +473,7 @@ define <4 x i32> @gt_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: gt_v4i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0
@@ -481,7 +481,7 @@ define <4 x i32> @gt_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: gt_v4i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0
@@ -489,12 +489,12 @@ define <4 x i32> @gt_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; AVX2-NEXT: retq
;
; XOP-LABEL: gt_v4i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomgtud %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512-LABEL: gt_v4i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; AVX512-NEXT: vpxor %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vpxor %xmm2, %xmm0, %xmm0
@@ -507,7 +507,7 @@ define <4 x i32> @gt_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
define <8 x i16> @gt_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE-LABEL: gt_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
; SSE-NEXT: pxor %xmm2, %xmm1
; SSE-NEXT: pxor %xmm2, %xmm0
@@ -515,7 +515,7 @@ define <8 x i16> @gt_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE-NEXT: retq
;
; AVX-LABEL: gt_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
; AVX-NEXT: vpxor %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpxor %xmm2, %xmm0, %xmm0
@@ -523,7 +523,7 @@ define <8 x i16> @gt_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; AVX-NEXT: retq
;
; XOP-LABEL: gt_v8i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomgtuw %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp ugt <8 x i16> %a, %b
@@ -533,7 +533,7 @@ define <8 x i16> @gt_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
define <16 x i8> @gt_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE-LABEL: gt_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
; SSE-NEXT: pxor %xmm2, %xmm1
; SSE-NEXT: pxor %xmm2, %xmm0
@@ -541,7 +541,7 @@ define <16 x i8> @gt_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE-NEXT: retq
;
; AVX-LABEL: gt_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
; AVX-NEXT: vpxor %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpxor %xmm2, %xmm0, %xmm0
@@ -549,7 +549,7 @@ define <16 x i8> @gt_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; AVX-NEXT: retq
;
; XOP-LABEL: gt_v16i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomgtub %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp ugt <16 x i8> %a, %b
@@ -563,7 +563,7 @@ define <16 x i8> @gt_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
define <2 x i64> @le_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-LABEL: le_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: pxor %xmm2, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm0
@@ -580,7 +580,7 @@ define <2 x i64> @le_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: le_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE41-NEXT: pxor %xmm2, %xmm1
; SSE41-NEXT: pxor %xmm2, %xmm0
@@ -597,7 +597,7 @@ define <2 x i64> @le_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE41-NEXT: retq
;
; SSE42-LABEL: le_v2i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; SSE42-NEXT: pxor %xmm2, %xmm1
; SSE42-NEXT: pxor %xmm2, %xmm0
@@ -607,7 +607,7 @@ define <2 x i64> @le_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE42-NEXT: retq
;
; AVX1-LABEL: le_v2i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0
@@ -617,7 +617,7 @@ define <2 x i64> @le_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: le_v2i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0
@@ -627,12 +627,12 @@ define <2 x i64> @le_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; AVX2-NEXT: retq
;
; XOP-LABEL: le_v2i64:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomleuq %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512-LABEL: le_v2i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512-NEXT: vpminuq %zmm1, %zmm0, %zmm1
@@ -646,7 +646,7 @@ define <2 x i64> @le_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
define <4 x i32> @le_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE2-LABEL: le_v4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: pxor %xmm2, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm0
@@ -656,25 +656,25 @@ define <4 x i32> @le_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: le_v4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pminud %xmm0, %xmm1
; SSE41-NEXT: pcmpeqd %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: le_v4i32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pminud %xmm0, %xmm1
; SSE42-NEXT: pcmpeqd %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: le_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpminud %xmm1, %xmm0, %xmm1
; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: le_v4i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomleud %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp ule <4 x i32> %a, %b
@@ -684,32 +684,32 @@ define <4 x i32> @le_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
define <8 x i16> @le_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE2-LABEL: le_v8i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: psubusw %xmm1, %xmm0
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: pcmpeqw %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: le_v8i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pminuw %xmm0, %xmm1
; SSE41-NEXT: pcmpeqw %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: le_v8i16:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pminuw %xmm0, %xmm1
; SSE42-NEXT: pcmpeqw %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: le_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpminuw %xmm1, %xmm0, %xmm1
; AVX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: le_v8i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomleuw %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp ule <8 x i16> %a, %b
@@ -719,19 +719,19 @@ define <8 x i16> @le_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
define <16 x i8> @le_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE-LABEL: le_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pminub %xmm0, %xmm1
; SSE-NEXT: pcmpeqb %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: le_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpminub %xmm1, %xmm0, %xmm1
; AVX-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: le_v16i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomleub %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp ule <16 x i8> %a, %b
@@ -745,7 +745,7 @@ define <16 x i8> @le_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
define <2 x i64> @lt_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-LABEL: lt_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: pxor %xmm2, %xmm0
; SSE2-NEXT: pxor %xmm2, %xmm1
@@ -760,7 +760,7 @@ define <2 x i64> @lt_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: lt_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE41-NEXT: pxor %xmm2, %xmm0
; SSE41-NEXT: pxor %xmm2, %xmm1
@@ -775,7 +775,7 @@ define <2 x i64> @lt_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE41-NEXT: retq
;
; SSE42-LABEL: lt_v2i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; SSE42-NEXT: pxor %xmm2, %xmm0
; SSE42-NEXT: pxor %xmm1, %xmm2
@@ -784,7 +784,7 @@ define <2 x i64> @lt_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE42-NEXT: retq
;
; AVX-LABEL: lt_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX-NEXT: vpxor %xmm2, %xmm0, %xmm0
; AVX-NEXT: vpxor %xmm2, %xmm1, %xmm1
@@ -792,7 +792,7 @@ define <2 x i64> @lt_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; AVX-NEXT: retq
;
; XOP-LABEL: lt_v2i64:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomltuq %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp ult <2 x i64> %a, %b
@@ -802,7 +802,7 @@ define <2 x i64> @lt_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
define <4 x i32> @lt_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE-LABEL: lt_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE-NEXT: pxor %xmm2, %xmm0
; SSE-NEXT: pxor %xmm1, %xmm2
@@ -811,7 +811,7 @@ define <4 x i32> @lt_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: lt_v4i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm1
@@ -819,7 +819,7 @@ define <4 x i32> @lt_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: lt_v4i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm1
@@ -827,12 +827,12 @@ define <4 x i32> @lt_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; AVX2-NEXT: retq
;
; XOP-LABEL: lt_v4i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomltud %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512-LABEL: lt_v4i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; AVX512-NEXT: vpxor %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vpxor %xmm2, %xmm1, %xmm1
@@ -845,7 +845,7 @@ define <4 x i32> @lt_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
define <8 x i16> @lt_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE-LABEL: lt_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
; SSE-NEXT: pxor %xmm2, %xmm0
; SSE-NEXT: pxor %xmm1, %xmm2
@@ -854,7 +854,7 @@ define <8 x i16> @lt_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE-NEXT: retq
;
; AVX-LABEL: lt_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
; AVX-NEXT: vpxor %xmm2, %xmm0, %xmm0
; AVX-NEXT: vpxor %xmm2, %xmm1, %xmm1
@@ -862,7 +862,7 @@ define <8 x i16> @lt_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; AVX-NEXT: retq
;
; XOP-LABEL: lt_v8i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomltuw %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp ult <8 x i16> %a, %b
@@ -872,7 +872,7 @@ define <8 x i16> @lt_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
define <16 x i8> @lt_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE-LABEL: lt_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
; SSE-NEXT: pxor %xmm2, %xmm0
; SSE-NEXT: pxor %xmm1, %xmm2
@@ -881,7 +881,7 @@ define <16 x i8> @lt_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE-NEXT: retq
;
; AVX-LABEL: lt_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
; AVX-NEXT: vpxor %xmm2, %xmm0, %xmm0
; AVX-NEXT: vpxor %xmm2, %xmm1, %xmm1
@@ -889,7 +889,7 @@ define <16 x i8> @lt_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; AVX-NEXT: retq
;
; XOP-LABEL: lt_v16i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomltub %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp ult <16 x i8> %a, %b
diff --git a/test/CodeGen/X86/vec_compare-sse4.ll b/test/CodeGen/X86/vec_compare-sse4.ll
index 71470189791..bd9ac20e8ee 100644
--- a/test/CodeGen/X86/vec_compare-sse4.ll
+++ b/test/CodeGen/X86/vec_compare-sse4.ll
@@ -5,7 +5,7 @@
define <2 x i64> @test1(<2 x i64> %A, <2 x i64> %B) nounwind {
; SSE2-LABEL: test1:
-; SSE2: ## BB#0:
+; SSE2: ## %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; SSE2-NEXT: pxor %xmm2, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm0
@@ -20,7 +20,7 @@ define <2 x i64> @test1(<2 x i64> %A, <2 x i64> %B) nounwind {
; SSE2-NEXT: retl
;
; SSE41-LABEL: test1:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; SSE41-NEXT: pxor %xmm2, %xmm1
; SSE41-NEXT: pxor %xmm2, %xmm0
@@ -35,7 +35,7 @@ define <2 x i64> @test1(<2 x i64> %A, <2 x i64> %B) nounwind {
; SSE41-NEXT: retl
;
; SSE42-LABEL: test1:
-; SSE42: ## BB#0:
+; SSE42: ## %bb.0:
; SSE42-NEXT: pcmpgtq %xmm1, %xmm0
; SSE42-NEXT: retl
%C = icmp sgt <2 x i64> %A, %B
@@ -45,19 +45,19 @@ define <2 x i64> @test1(<2 x i64> %A, <2 x i64> %B) nounwind {
define <2 x i64> @test2(<2 x i64> %A, <2 x i64> %B) nounwind {
; SSE2-LABEL: test2:
-; SSE2: ## BB#0:
+; SSE2: ## %bb.0:
; SSE2-NEXT: pcmpeqd %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,0,3,2]
; SSE2-NEXT: pand %xmm1, %xmm0
; SSE2-NEXT: retl
;
; SSE41-LABEL: test2:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: pcmpeqq %xmm1, %xmm0
; SSE41-NEXT: retl
;
; SSE42-LABEL: test2:
-; SSE42: ## BB#0:
+; SSE42: ## %bb.0:
; SSE42-NEXT: pcmpeqq %xmm1, %xmm0
; SSE42-NEXT: retl
%C = icmp eq <2 x i64> %A, %B
diff --git a/test/CodeGen/X86/vec_ctbits.ll b/test/CodeGen/X86/vec_ctbits.ll
index 65279f7c849..781c61b5789 100644
--- a/test/CodeGen/X86/vec_ctbits.ll
+++ b/test/CodeGen/X86/vec_ctbits.ll
@@ -7,7 +7,7 @@ declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>)
define <2 x i64> @footz(<2 x i64> %a) nounwind {
; CHECK-LABEL: footz:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pxor %xmm1, %xmm1
; CHECK-NEXT: pxor %xmm2, %xmm2
; CHECK-NEXT: psubq %xmm0, %xmm2
@@ -36,7 +36,7 @@ define <2 x i64> @footz(<2 x i64> %a) nounwind {
}
define <2 x i64> @foolz(<2 x i64> %a) nounwind {
; CHECK-LABEL: foolz:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movdqa %xmm0, %xmm1
; CHECK-NEXT: psrlq $1, %xmm1
; CHECK-NEXT: por %xmm0, %xmm1
@@ -81,7 +81,7 @@ define <2 x i64> @foolz(<2 x i64> %a) nounwind {
define <2 x i64> @foopop(<2 x i64> %a) nounwind {
; CHECK-LABEL: foopop:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movdqa %xmm0, %xmm1
; CHECK-NEXT: psrlq $1, %xmm1
; CHECK-NEXT: pand {{.*}}(%rip), %xmm1
@@ -110,7 +110,7 @@ declare <2 x i32> @llvm.ctpop.v2i32(<2 x i32>)
define <2 x i32> @promtz(<2 x i32> %a) nounwind {
; CHECK-LABEL: promtz:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: por {{.*}}(%rip), %xmm0
; CHECK-NEXT: pxor %xmm1, %xmm1
; CHECK-NEXT: pxor %xmm2, %xmm2
@@ -140,7 +140,7 @@ define <2 x i32> @promtz(<2 x i32> %a) nounwind {
}
define <2 x i32> @promlz(<2 x i32> %a) nounwind {
; CHECK-LABEL: promlz:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pand {{.*}}(%rip), %xmm0
; CHECK-NEXT: pxor %xmm1, %xmm1
; CHECK-NEXT: movdqa %xmm0, %xmm2
@@ -187,7 +187,7 @@ define <2 x i32> @promlz(<2 x i32> %a) nounwind {
define <2 x i32> @prompop(<2 x i32> %a) nounwind {
; CHECK-LABEL: prompop:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pand {{.*}}(%rip), %xmm0
; CHECK-NEXT: pxor %xmm2, %xmm2
; CHECK-NEXT: movdqa %xmm0, %xmm1
diff --git a/test/CodeGen/X86/vec_ext_inreg.ll b/test/CodeGen/X86/vec_ext_inreg.ll
index a4daeecbde4..157f2cad6fa 100644
--- a/test/CodeGen/X86/vec_ext_inreg.ll
+++ b/test/CodeGen/X86/vec_ext_inreg.ll
@@ -5,7 +5,7 @@
define <8 x i32> @a(<8 x i32> %a) nounwind {
; SSE-LABEL: a:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pslld $16, %xmm0
; SSE-NEXT: psrad $16, %xmm0
; SSE-NEXT: pslld $16, %xmm1
@@ -13,7 +13,7 @@ define <8 x i32> @a(<8 x i32> %a) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: a:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpslld $16, %xmm0, %xmm1
; AVX1-NEXT: vpsrad $16, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
@@ -23,7 +23,7 @@ define <8 x i32> @a(<8 x i32> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: a:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpslld $16, %ymm0, %ymm0
; AVX2-NEXT: vpsrad $16, %ymm0, %ymm0
; AVX2-NEXT: retq
@@ -34,13 +34,13 @@ define <8 x i32> @a(<8 x i32> %a) nounwind {
define <3 x i32> @b(<3 x i32> %a) nounwind {
; SSE-LABEL: b:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pslld $16, %xmm0
; SSE-NEXT: psrad $16, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: b:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpslld $16, %xmm0, %xmm0
; AVX-NEXT: vpsrad $16, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -51,7 +51,7 @@ define <3 x i32> @b(<3 x i32> %a) nounwind {
define <1 x i32> @c(<1 x i32> %a) nounwind {
; ALL-LABEL: c:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movswl %di, %eax
; ALL-NEXT: retq
%b = trunc <1 x i32> %a to <1 x i16>
@@ -61,19 +61,19 @@ define <1 x i32> @c(<1 x i32> %a) nounwind {
define <8 x i32> @d(<8 x i32> %a) nounwind {
; SSE-LABEL: d:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm2 = [65535,0,65535,0,65535,0,65535,0]
; SSE-NEXT: andps %xmm2, %xmm0
; SSE-NEXT: andps %xmm2, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: d:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: d:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
; AVX2-NEXT: retq
@@ -84,12 +84,12 @@ define <8 x i32> @d(<8 x i32> %a) nounwind {
define <3 x i32> @e(<3 x i32> %a) nounwind {
; SSE-LABEL: e:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: e:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6,7]
; AVX-NEXT: retq
@@ -100,7 +100,7 @@ define <3 x i32> @e(<3 x i32> %a) nounwind {
define <1 x i32> @f(<1 x i32> %a) nounwind {
; ALL-LABEL: f:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movzwl %di, %eax
; ALL-NEXT: retq
%b = trunc <1 x i32> %a to <1 x i16>
diff --git a/test/CodeGen/X86/vec_extract-avx.ll b/test/CodeGen/X86/vec_extract-avx.ll
index e53b2563036..3a9d8348ad5 100644
--- a/test/CodeGen/X86/vec_extract-avx.ll
+++ b/test/CodeGen/X86/vec_extract-avx.ll
@@ -10,14 +10,14 @@
; Extracting the low elements only requires using the right kind of store.
define void @low_v8f32_to_v4f32(<8 x float> %v, <4 x float>* %ptr) {
; X32-LABEL: low_v8f32_to_v4f32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovaps %xmm0, (%eax)
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: low_v8f32_to_v4f32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps %xmm0, (%rdi)
; X64-NEXT: vzeroupper
; X64-NEXT: retq
@@ -36,14 +36,14 @@ define void @low_v8f32_to_v4f32(<8 x float> %v, <4 x float>* %ptr) {
; Extracting the high elements requires just one AVX instruction.
define void @high_v8f32_to_v4f32(<8 x float> %v, <4 x float>* %ptr) {
; X32-LABEL: high_v8f32_to_v4f32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vextractf128 $1, %ymm0, (%eax)
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: high_v8f32_to_v4f32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vextractf128 $1, %ymm0, (%rdi)
; X64-NEXT: vzeroupper
; X64-NEXT: retq
@@ -64,14 +64,14 @@ define void @high_v8f32_to_v4f32(<8 x float> %v, <4 x float>* %ptr) {
; have AVX2, we should generate vextracti128 (the int version).
define void @high_v8i32_to_v4i32(<8 x i32> %v, <4 x i32>* %ptr) {
; X32-LABEL: high_v8i32_to_v4i32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vextractf128 $1, %ymm0, (%eax)
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: high_v8i32_to_v4i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vextractf128 $1, %ymm0, (%rdi)
; X64-NEXT: vzeroupper
; X64-NEXT: retq
@@ -90,14 +90,14 @@ define void @high_v8i32_to_v4i32(<8 x i32> %v, <4 x i32>* %ptr) {
; Make sure that element size doesn't alter the codegen.
define void @high_v4f64_to_v2f64(<4 x double> %v, <2 x double>* %ptr) {
; X32-LABEL: high_v4f64_to_v2f64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vextractf128 $1, %ymm0, (%eax)
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: high_v4f64_to_v2f64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vextractf128 $1, %ymm0, (%rdi)
; X64-NEXT: vzeroupper
; X64-NEXT: retq
@@ -114,7 +114,7 @@ define void @high_v4f64_to_v2f64(<4 x double> %v, <2 x double>* %ptr) {
define void @legal_vzmovl_2i32_8i32(<2 x i32>* %in, <8 x i32>* %out) {
; X32-LABEL: legal_vzmovl_2i32_8i32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
@@ -125,7 +125,7 @@ define void @legal_vzmovl_2i32_8i32(<2 x i32>* %in, <8 x i32>* %out) {
; X32-NEXT: retl
;
; X64-LABEL: legal_vzmovl_2i32_8i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
@@ -141,7 +141,7 @@ define void @legal_vzmovl_2i32_8i32(<2 x i32>* %in, <8 x i32>* %out) {
define void @legal_vzmovl_2i64_4i64(<2 x i64>* %in, <4 x i64>* %out) {
; X32-LABEL: legal_vzmovl_2i64_4i64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovupd (%ecx), %xmm0
@@ -152,7 +152,7 @@ define void @legal_vzmovl_2i64_4i64(<2 x i64>* %in, <4 x i64>* %out) {
; X32-NEXT: retl
;
; X64-LABEL: legal_vzmovl_2i64_4i64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovupd (%rdi), %xmm0
; X64-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X64-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
@@ -168,7 +168,7 @@ define void @legal_vzmovl_2i64_4i64(<2 x i64>* %in, <4 x i64>* %out) {
define void @legal_vzmovl_2f32_8f32(<2 x float>* %in, <8 x float>* %out) {
; X32-LABEL: legal_vzmovl_2f32_8f32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -177,7 +177,7 @@ define void @legal_vzmovl_2f32_8f32(<2 x float>* %in, <8 x float>* %out) {
; X32-NEXT: retl
;
; X64-LABEL: legal_vzmovl_2f32_8f32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
@@ -193,7 +193,7 @@ define void @legal_vzmovl_2f32_8f32(<2 x float>* %in, <8 x float>* %out) {
define void @legal_vzmovl_2f64_4f64(<2 x double>* %in, <4 x double>* %out) {
; X32-LABEL: legal_vzmovl_2f64_4f64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovupd (%ecx), %xmm0
@@ -204,7 +204,7 @@ define void @legal_vzmovl_2f64_4f64(<2 x double>* %in, <4 x double>* %out) {
; X32-NEXT: retl
;
; X64-LABEL: legal_vzmovl_2f64_4f64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovupd (%rdi), %xmm0
; X64-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X64-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
diff --git a/test/CodeGen/X86/vec_extract-mmx.ll b/test/CodeGen/X86/vec_extract-mmx.ll
index a137d052d29..d8502d831fd 100644
--- a/test/CodeGen/X86/vec_extract-mmx.ll
+++ b/test/CodeGen/X86/vec_extract-mmx.ll
@@ -4,7 +4,7 @@
define i32 @test0(<1 x i64>* %v4) nounwind {
; X32-LABEL: test0:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: pushl %ebp
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: andl $-8, %esp
@@ -22,7 +22,7 @@ define i32 @test0(<1 x i64>* %v4) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test0:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: pshufw $238, (%rdi), %mm0 # mm0 = mem[2,3,2,3]
; X64-NEXT: movd %mm0, %eax
; X64-NEXT: addl $32, %eax
@@ -43,7 +43,7 @@ entry:
define i32 @test1(i32* nocapture readonly %ptr) nounwind {
; X32-LABEL: test1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movd (%eax), %mm0
; X32-NEXT: pshufw $232, %mm0, %mm0 # mm0 = mm0[0,2,2,3]
@@ -52,7 +52,7 @@ define i32 @test1(i32* nocapture readonly %ptr) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movd (%rdi), %mm0
; X64-NEXT: pshufw $232, %mm0, %mm0 # mm0 = mm0[0,2,2,3]
; X64-NEXT: movd %mm0, %eax
@@ -78,7 +78,7 @@ entry:
define i32 @test2(i32* nocapture readonly %ptr) nounwind {
; X32-LABEL: test2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: pshufw $232, (%eax), %mm0 # mm0 = mem[0,2,2,3]
; X32-NEXT: movd %mm0, %eax
@@ -86,7 +86,7 @@ define i32 @test2(i32* nocapture readonly %ptr) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: pshufw $232, (%rdi), %mm0 # mm0 = mem[0,2,2,3]
; X64-NEXT: movd %mm0, %eax
; X64-NEXT: emms
@@ -106,12 +106,12 @@ entry:
define i32 @test3(x86_mmx %a) nounwind {
; X32-LABEL: test3:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movd %mm0, %eax
; X32-NEXT: retl
;
; X64-LABEL: test3:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movd %mm0, %eax
; X64-NEXT: retq
%tmp0 = bitcast x86_mmx %a to <2 x i32>
@@ -122,7 +122,7 @@ define i32 @test3(x86_mmx %a) nounwind {
; Verify we don't muck with extractelts from the upper lane.
define i32 @test4(x86_mmx %a) nounwind {
; X32-LABEL: test4:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebp
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: andl $-8, %esp
@@ -136,7 +136,7 @@ define i32 @test4(x86_mmx %a) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test4:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq %mm0, -{{[0-9]+}}(%rsp)
; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,0,1]
diff --git a/test/CodeGen/X86/vec_extract-sse4.ll b/test/CodeGen/X86/vec_extract-sse4.ll
index f073f1538d2..2d9eb7c5daa 100644
--- a/test/CodeGen/X86/vec_extract-sse4.ll
+++ b/test/CodeGen/X86/vec_extract-sse4.ll
@@ -4,7 +4,7 @@
define void @t1(float* %R, <4 x float>* %P1) nounwind {
; X32-LABEL: t1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -12,7 +12,7 @@ define void @t1(float* %R, <4 x float>* %P1) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: t1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-NEXT: movss %xmm0, (%rdi)
; X64-NEXT: retq
@@ -24,7 +24,7 @@ define void @t1(float* %R, <4 x float>* %P1) nounwind {
define float @t2(<4 x float>* %P1) nounwind {
; X32-LABEL: t2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movddup {{.*#+}} xmm0 = mem[0,0]
@@ -34,7 +34,7 @@ define float @t2(<4 x float>* %P1) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: t2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movddup {{.*#+}} xmm0 = mem[0,0]
; X64-NEXT: retq
%X = load <4 x float>, <4 x float>* %P1
@@ -44,7 +44,7 @@ define float @t2(<4 x float>* %P1) nounwind {
define void @t3(i32* %R, <4 x i32>* %P1) nounwind {
; X32-LABEL: t3:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl 12(%ecx), %ecx
@@ -52,7 +52,7 @@ define void @t3(i32* %R, <4 x i32>* %P1) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: t3:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl 12(%rsi), %eax
; X64-NEXT: movl %eax, (%rdi)
; X64-NEXT: retq
@@ -64,13 +64,13 @@ define void @t3(i32* %R, <4 x i32>* %P1) nounwind {
define i32 @t4(<4 x i32>* %P1) nounwind {
; X32-LABEL: t4:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl 12(%eax), %eax
; X32-NEXT: retl
;
; X64-LABEL: t4:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl 12(%rdi), %eax
; X64-NEXT: retq
%X = load <4 x i32>, <4 x i32>* %P1
diff --git a/test/CodeGen/X86/vec_extract.ll b/test/CodeGen/X86/vec_extract.ll
index 58d8392b235..7c1a532ab7c 100644
--- a/test/CodeGen/X86/vec_extract.ll
+++ b/test/CodeGen/X86/vec_extract.ll
@@ -4,7 +4,7 @@
define void @test1(<4 x float>* %F, float* %f) nounwind {
; X32-LABEL: test1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movaps (%ecx), %xmm0
@@ -13,7 +13,7 @@ define void @test1(<4 x float>* %F, float* %f) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movaps (%rdi), %xmm0
; X64-NEXT: addps %xmm0, %xmm0
; X64-NEXT: movss %xmm0, (%rsi)
@@ -28,7 +28,7 @@ entry:
define float @test2(<4 x float>* %F, float* %f) nounwind {
; X32-LABEL: test2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: pushl %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movaps (%eax), %xmm0
@@ -40,7 +40,7 @@ define float @test2(<4 x float>* %F, float* %f) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movaps (%rdi), %xmm0
; X64-NEXT: addps %xmm0, %xmm0
; X64-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
@@ -54,7 +54,7 @@ entry:
define void @test3(float* %R, <4 x float>* %P1) nounwind {
; X32-LABEL: test3:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -62,7 +62,7 @@ define void @test3(float* %R, <4 x float>* %P1) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-NEXT: movss %xmm0, (%rdi)
; X64-NEXT: retq
@@ -75,7 +75,7 @@ entry:
define double @test4(double %A) nounwind {
; X32-LABEL: test4:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: subl $12, %esp
; X32-NEXT: calll foo
; X32-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
@@ -86,7 +86,7 @@ define double @test4(double %A) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test4:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: pushq %rax
; X64-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill
; X64-NEXT: callq foo
diff --git a/test/CodeGen/X86/vec_fabs.ll b/test/CodeGen/X86/vec_fabs.ll
index aef62774e17..892599a3d7f 100644
--- a/test/CodeGen/X86/vec_fabs.ll
+++ b/test/CodeGen/X86/vec_fabs.ll
@@ -11,32 +11,32 @@
define <2 x double> @fabs_v2f64(<2 x double> %p) {
; X32_AVX-LABEL: fabs_v2f64:
-; X32_AVX: # BB#0:
+; X32_AVX: # %bb.0:
; X32_AVX-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0
; X32_AVX-NEXT: retl
;
; X32_AVX512VL-LABEL: fabs_v2f64:
-; X32_AVX512VL: # BB#0:
+; X32_AVX512VL: # %bb.0:
; X32_AVX512VL-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X32_AVX512VL-NEXT: retl
;
; X32_AVX512VLDQ-LABEL: fabs_v2f64:
-; X32_AVX512VLDQ: # BB#0:
+; X32_AVX512VLDQ: # %bb.0:
; X32_AVX512VLDQ-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0
; X32_AVX512VLDQ-NEXT: retl
;
; X64_AVX-LABEL: fabs_v2f64:
-; X64_AVX: # BB#0:
+; X64_AVX: # %bb.0:
; X64_AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; X64_AVX-NEXT: retq
;
; X64_AVX512VL-LABEL: fabs_v2f64:
-; X64_AVX512VL: # BB#0:
+; X64_AVX512VL: # %bb.0:
; X64_AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64_AVX512VL-NEXT: retq
;
; X64_AVX512VLDQ-LABEL: fabs_v2f64:
-; X64_AVX512VLDQ: # BB#0:
+; X64_AVX512VLDQ: # %bb.0:
; X64_AVX512VLDQ-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; X64_AVX512VLDQ-NEXT: retq
%t = call <2 x double> @llvm.fabs.v2f64(<2 x double> %p)
@@ -46,32 +46,32 @@ declare <2 x double> @llvm.fabs.v2f64(<2 x double> %p)
define <4 x float> @fabs_v4f32(<4 x float> %p) {
; X32_AVX-LABEL: fabs_v4f32:
-; X32_AVX: # BB#0:
+; X32_AVX: # %bb.0:
; X32_AVX-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0
; X32_AVX-NEXT: retl
;
; X32_AVX512VL-LABEL: fabs_v4f32:
-; X32_AVX512VL: # BB#0:
+; X32_AVX512VL: # %bb.0:
; X32_AVX512VL-NEXT: vpandd {{\.LCPI.*}}{1to4}, %xmm0, %xmm0
; X32_AVX512VL-NEXT: retl
;
; X32_AVX512VLDQ-LABEL: fabs_v4f32:
-; X32_AVX512VLDQ: # BB#0:
+; X32_AVX512VLDQ: # %bb.0:
; X32_AVX512VLDQ-NEXT: vandps {{\.LCPI.*}}{1to4}, %xmm0, %xmm0
; X32_AVX512VLDQ-NEXT: retl
;
; X64_AVX-LABEL: fabs_v4f32:
-; X64_AVX: # BB#0:
+; X64_AVX: # %bb.0:
; X64_AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; X64_AVX-NEXT: retq
;
; X64_AVX512VL-LABEL: fabs_v4f32:
-; X64_AVX512VL: # BB#0:
+; X64_AVX512VL: # %bb.0:
; X64_AVX512VL-NEXT: vpandd {{.*}}(%rip){1to4}, %xmm0, %xmm0
; X64_AVX512VL-NEXT: retq
;
; X64_AVX512VLDQ-LABEL: fabs_v4f32:
-; X64_AVX512VLDQ: # BB#0:
+; X64_AVX512VLDQ: # %bb.0:
; X64_AVX512VLDQ-NEXT: vandps {{.*}}(%rip){1to4}, %xmm0, %xmm0
; X64_AVX512VLDQ-NEXT: retq
%t = call <4 x float> @llvm.fabs.v4f32(<4 x float> %p)
@@ -81,32 +81,32 @@ declare <4 x float> @llvm.fabs.v4f32(<4 x float> %p)
define <4 x double> @fabs_v4f64(<4 x double> %p) {
; X32_AVX-LABEL: fabs_v4f64:
-; X32_AVX: # BB#0:
+; X32_AVX: # %bb.0:
; X32_AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
; X32_AVX-NEXT: retl
;
; X32_AVX512VL-LABEL: fabs_v4f64:
-; X32_AVX512VL: # BB#0:
+; X32_AVX512VL: # %bb.0:
; X32_AVX512VL-NEXT: vpandq {{\.LCPI.*}}{1to4}, %ymm0, %ymm0
; X32_AVX512VL-NEXT: retl
;
; X32_AVX512VLDQ-LABEL: fabs_v4f64:
-; X32_AVX512VLDQ: # BB#0:
+; X32_AVX512VLDQ: # %bb.0:
; X32_AVX512VLDQ-NEXT: vandpd {{\.LCPI.*}}{1to4}, %ymm0, %ymm0
; X32_AVX512VLDQ-NEXT: retl
;
; X64_AVX-LABEL: fabs_v4f64:
-; X64_AVX: # BB#0:
+; X64_AVX: # %bb.0:
; X64_AVX-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; X64_AVX-NEXT: retq
;
; X64_AVX512VL-LABEL: fabs_v4f64:
-; X64_AVX512VL: # BB#0:
+; X64_AVX512VL: # %bb.0:
; X64_AVX512VL-NEXT: vpandq {{.*}}(%rip){1to4}, %ymm0, %ymm0
; X64_AVX512VL-NEXT: retq
;
; X64_AVX512VLDQ-LABEL: fabs_v4f64:
-; X64_AVX512VLDQ: # BB#0:
+; X64_AVX512VLDQ: # %bb.0:
; X64_AVX512VLDQ-NEXT: vandpd {{.*}}(%rip){1to4}, %ymm0, %ymm0
; X64_AVX512VLDQ-NEXT: retq
%t = call <4 x double> @llvm.fabs.v4f64(<4 x double> %p)
@@ -116,32 +116,32 @@ declare <4 x double> @llvm.fabs.v4f64(<4 x double> %p)
define <8 x float> @fabs_v8f32(<8 x float> %p) {
; X32_AVX-LABEL: fabs_v8f32:
-; X32_AVX: # BB#0:
+; X32_AVX: # %bb.0:
; X32_AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
; X32_AVX-NEXT: retl
;
; X32_AVX512VL-LABEL: fabs_v8f32:
-; X32_AVX512VL: # BB#0:
+; X32_AVX512VL: # %bb.0:
; X32_AVX512VL-NEXT: vpandd {{\.LCPI.*}}{1to8}, %ymm0, %ymm0
; X32_AVX512VL-NEXT: retl
;
; X32_AVX512VLDQ-LABEL: fabs_v8f32:
-; X32_AVX512VLDQ: # BB#0:
+; X32_AVX512VLDQ: # %bb.0:
; X32_AVX512VLDQ-NEXT: vandps {{\.LCPI.*}}{1to8}, %ymm0, %ymm0
; X32_AVX512VLDQ-NEXT: retl
;
; X64_AVX-LABEL: fabs_v8f32:
-; X64_AVX: # BB#0:
+; X64_AVX: # %bb.0:
; X64_AVX-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; X64_AVX-NEXT: retq
;
; X64_AVX512VL-LABEL: fabs_v8f32:
-; X64_AVX512VL: # BB#0:
+; X64_AVX512VL: # %bb.0:
; X64_AVX512VL-NEXT: vpandd {{.*}}(%rip){1to8}, %ymm0, %ymm0
; X64_AVX512VL-NEXT: retq
;
; X64_AVX512VLDQ-LABEL: fabs_v8f32:
-; X64_AVX512VLDQ: # BB#0:
+; X64_AVX512VLDQ: # %bb.0:
; X64_AVX512VLDQ-NEXT: vandps {{.*}}(%rip){1to8}, %ymm0, %ymm0
; X64_AVX512VLDQ-NEXT: retq
%t = call <8 x float> @llvm.fabs.v8f32(<8 x float> %p)
@@ -151,36 +151,36 @@ declare <8 x float> @llvm.fabs.v8f32(<8 x float> %p)
define <8 x double> @fabs_v8f64(<8 x double> %p) {
; X32_AVX-LABEL: fabs_v8f64:
-; X32_AVX: # BB#0:
+; X32_AVX: # %bb.0:
; X32_AVX-NEXT: vmovaps {{.*#+}} ymm2 = [{{(nan|1\.#QNAN0e\+00)}},{{(nan|1\.#QNAN0e\+00)}},{{(nan|1\.#QNAN0e\+00)}},{{(nan|1\.#QNAN0e\+00)}}]
; X32_AVX-NEXT: vandps %ymm2, %ymm0, %ymm0
; X32_AVX-NEXT: vandps %ymm2, %ymm1, %ymm1
; X32_AVX-NEXT: retl
;
; X32_AVX512VL-LABEL: fabs_v8f64:
-; X32_AVX512VL: # BB#0:
+; X32_AVX512VL: # %bb.0:
; X32_AVX512VL-NEXT: vpandq {{\.LCPI.*}}{1to8}, %zmm0, %zmm0
; X32_AVX512VL-NEXT: retl
;
; X32_AVX512VLDQ-LABEL: fabs_v8f64:
-; X32_AVX512VLDQ: # BB#0:
+; X32_AVX512VLDQ: # %bb.0:
; X32_AVX512VLDQ-NEXT: vandpd {{\.LCPI.*}}{1to8}, %zmm0, %zmm0
; X32_AVX512VLDQ-NEXT: retl
;
; X64_AVX-LABEL: fabs_v8f64:
-; X64_AVX: # BB#0:
+; X64_AVX: # %bb.0:
; X64_AVX-NEXT: vmovaps {{.*#+}} ymm2 = [{{(nan|1\.#QNAN0e\+00)}},{{(nan|1\.#QNAN0e\+00)}},{{(nan|1\.#QNAN0e\+00)}},{{(nan|1\.#QNAN0e\+00)}}]
; X64_AVX-NEXT: vandps %ymm2, %ymm0, %ymm0
; X64_AVX-NEXT: vandps %ymm2, %ymm1, %ymm1
; X64_AVX-NEXT: retq
;
; X64_AVX512VL-LABEL: fabs_v8f64:
-; X64_AVX512VL: # BB#0:
+; X64_AVX512VL: # %bb.0:
; X64_AVX512VL-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; X64_AVX512VL-NEXT: retq
;
; X64_AVX512VLDQ-LABEL: fabs_v8f64:
-; X64_AVX512VLDQ: # BB#0:
+; X64_AVX512VLDQ: # %bb.0:
; X64_AVX512VLDQ-NEXT: vandpd {{.*}}(%rip){1to8}, %zmm0, %zmm0
; X64_AVX512VLDQ-NEXT: retq
%t = call <8 x double> @llvm.fabs.v8f64(<8 x double> %p)
@@ -190,36 +190,36 @@ declare <8 x double> @llvm.fabs.v8f64(<8 x double> %p)
define <16 x float> @fabs_v16f32(<16 x float> %p) {
; X32_AVX-LABEL: fabs_v16f32:
-; X32_AVX: # BB#0:
+; X32_AVX: # %bb.0:
; X32_AVX-NEXT: vmovaps {{.*#+}} ymm2 = [{{(nan|1\.#QNAN0e\+00)}},{{(nan|1\.#QNAN0e\+00)}},{{(nan|1\.#QNAN0e\+00)}},{{(nan|1\.#QNAN0e\+00)}},{{(nan|1\.#QNAN0e\+00)}},{{(nan|1\.#QNAN0e\+00)}},{{(nan|1\.#QNAN0e\+00)}},{{(nan|1\.#QNAN0e\+00)}}]
; X32_AVX-NEXT: vandps %ymm2, %ymm0, %ymm0
; X32_AVX-NEXT: vandps %ymm2, %ymm1, %ymm1
; X32_AVX-NEXT: retl
;
; X32_AVX512VL-LABEL: fabs_v16f32:
-; X32_AVX512VL: # BB#0:
+; X32_AVX512VL: # %bb.0:
; X32_AVX512VL-NEXT: vpandd {{\.LCPI.*}}{1to16}, %zmm0, %zmm0
; X32_AVX512VL-NEXT: retl
;
; X32_AVX512VLDQ-LABEL: fabs_v16f32:
-; X32_AVX512VLDQ: # BB#0:
+; X32_AVX512VLDQ: # %bb.0:
; X32_AVX512VLDQ-NEXT: vandps {{\.LCPI.*}}{1to16}, %zmm0, %zmm0
; X32_AVX512VLDQ-NEXT: retl
;
; X64_AVX-LABEL: fabs_v16f32:
-; X64_AVX: # BB#0:
+; X64_AVX: # %bb.0:
; X64_AVX-NEXT: vmovaps {{.*#+}} ymm2 = [{{(nan|1\.#QNAN0e\+00)}},{{(nan|1\.#QNAN0e\+00)}},{{(nan|1\.#QNAN0e\+00)}},{{(nan|1\.#QNAN0e\+00)}},{{(nan|1\.#QNAN0e\+00)}},{{(nan|1\.#QNAN0e\+00)}},{{(nan|1\.#QNAN0e\+00)}},{{(nan|1\.#QNAN0e\+00)}}]
; X64_AVX-NEXT: vandps %ymm2, %ymm0, %ymm0
; X64_AVX-NEXT: vandps %ymm2, %ymm1, %ymm1
; X64_AVX-NEXT: retq
;
; X64_AVX512VL-LABEL: fabs_v16f32:
-; X64_AVX512VL: # BB#0:
+; X64_AVX512VL: # %bb.0:
; X64_AVX512VL-NEXT: vpandd {{.*}}(%rip){1to16}, %zmm0, %zmm0
; X64_AVX512VL-NEXT: retq
;
; X64_AVX512VLDQ-LABEL: fabs_v16f32:
-; X64_AVX512VLDQ: # BB#0:
+; X64_AVX512VLDQ: # %bb.0:
; X64_AVX512VLDQ-NEXT: vandps {{.*}}(%rip){1to16}, %zmm0, %zmm0
; X64_AVX512VLDQ-NEXT: retq
%t = call <16 x float> @llvm.fabs.v16f32(<16 x float> %p)
@@ -244,13 +244,13 @@ declare <16 x float> @llvm.fabs.v16f32(<16 x float> %p)
define i64 @fabs_v2f32_1() {
; X32-LABEL: fabs_v2f32_1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: movl $2147483647, %edx # imm = 0x7FFFFFFF
; X32-NEXT: retl
;
; X64-LABEL: fabs_v2f32_1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movabsq $9223372032559808512, %rax # imm = 0x7FFFFFFF00000000
; X64-NEXT: retq
%bitcast = bitcast i64 18446744069414584320 to <2 x float> ; 0xFFFF_FFFF_0000_0000
@@ -261,13 +261,13 @@ define i64 @fabs_v2f32_1() {
define i64 @fabs_v2f32_2() {
; X32-LABEL: fabs_v2f32_2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
; X32-NEXT: xorl %edx, %edx
; X32-NEXT: retl
;
; X64-LABEL: fabs_v2f32_2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
; X64-NEXT: retq
%bitcast = bitcast i64 4294967295 to <2 x float> ; 0x0000_0000_FFFF_FFFF
diff --git a/test/CodeGen/X86/vec_floor.ll b/test/CodeGen/X86/vec_floor.ll
index 90763304d3f..d01c6f6ea90 100644
--- a/test/CodeGen/X86/vec_floor.ll
+++ b/test/CodeGen/X86/vec_floor.ll
@@ -5,17 +5,17 @@
define <2 x double> @floor_v2f64(<2 x double> %p) {
; SSE41-LABEL: floor_v2f64:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundpd $9, %xmm0, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: floor_v2f64:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundpd $9, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: floor_v2f64:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscalepd $9, %xmm0, %xmm0
; AVX512-NEXT: retq
%t = call <2 x double> @llvm.floor.v2f64(<2 x double> %p)
@@ -25,17 +25,17 @@ declare <2 x double> @llvm.floor.v2f64(<2 x double> %p)
define <4 x float> @floor_v4f32(<4 x float> %p) {
; SSE41-LABEL: floor_v4f32:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundps $9, %xmm0, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: floor_v4f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundps $9, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: floor_v4f32:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscaleps $9, %xmm0, %xmm0
; AVX512-NEXT: retq
%t = call <4 x float> @llvm.floor.v4f32(<4 x float> %p)
@@ -45,18 +45,18 @@ declare <4 x float> @llvm.floor.v4f32(<4 x float> %p)
define <4 x double> @floor_v4f64(<4 x double> %p){
; SSE41-LABEL: floor_v4f64:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundpd $9, %xmm0, %xmm0
; SSE41-NEXT: roundpd $9, %xmm1, %xmm1
; SSE41-NEXT: retq
;
; AVX-LABEL: floor_v4f64:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundpd $9, %ymm0, %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: floor_v4f64:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscalepd $9, %ymm0, %ymm0
; AVX512-NEXT: retq
%t = call <4 x double> @llvm.floor.v4f64(<4 x double> %p)
@@ -66,18 +66,18 @@ declare <4 x double> @llvm.floor.v4f64(<4 x double> %p)
define <8 x float> @floor_v8f32(<8 x float> %p) {
; SSE41-LABEL: floor_v8f32:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundps $9, %xmm0, %xmm0
; SSE41-NEXT: roundps $9, %xmm1, %xmm1
; SSE41-NEXT: retq
;
; AVX-LABEL: floor_v8f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundps $9, %ymm0, %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: floor_v8f32:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscaleps $9, %ymm0, %ymm0
; AVX512-NEXT: retq
%t = call <8 x float> @llvm.floor.v8f32(<8 x float> %p)
@@ -87,7 +87,7 @@ declare <8 x float> @llvm.floor.v8f32(<8 x float> %p)
define <8 x double> @floor_v8f64(<8 x double> %p){
; SSE41-LABEL: floor_v8f64:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundpd $9, %xmm0, %xmm0
; SSE41-NEXT: roundpd $9, %xmm1, %xmm1
; SSE41-NEXT: roundpd $9, %xmm2, %xmm2
@@ -95,13 +95,13 @@ define <8 x double> @floor_v8f64(<8 x double> %p){
; SSE41-NEXT: retq
;
; AVX-LABEL: floor_v8f64:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundpd $9, %ymm0, %ymm0
; AVX-NEXT: vroundpd $9, %ymm1, %ymm1
; AVX-NEXT: retq
;
; AVX512-LABEL: floor_v8f64:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscalepd $9, %zmm0, %zmm0
; AVX512-NEXT: retq
%t = call <8 x double> @llvm.floor.v8f64(<8 x double> %p)
@@ -111,7 +111,7 @@ declare <8 x double> @llvm.floor.v8f64(<8 x double> %p)
define <16 x float> @floor_v16f32(<16 x float> %p) {
; SSE41-LABEL: floor_v16f32:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundps $9, %xmm0, %xmm0
; SSE41-NEXT: roundps $9, %xmm1, %xmm1
; SSE41-NEXT: roundps $9, %xmm2, %xmm2
@@ -119,13 +119,13 @@ define <16 x float> @floor_v16f32(<16 x float> %p) {
; SSE41-NEXT: retq
;
; AVX-LABEL: floor_v16f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundps $9, %ymm0, %ymm0
; AVX-NEXT: vroundps $9, %ymm1, %ymm1
; AVX-NEXT: retq
;
; AVX512-LABEL: floor_v16f32:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscaleps $9, %zmm0, %zmm0
; AVX512-NEXT: retq
%t = call <16 x float> @llvm.floor.v16f32(<16 x float> %p)
@@ -135,17 +135,17 @@ declare <16 x float> @llvm.floor.v16f32(<16 x float> %p)
define <2 x double> @ceil_v2f64(<2 x double> %p) {
; SSE41-LABEL: ceil_v2f64:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundpd $10, %xmm0, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: ceil_v2f64:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundpd $10, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: ceil_v2f64:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscalepd $10, %xmm0, %xmm0
; AVX512-NEXT: retq
%t = call <2 x double> @llvm.ceil.v2f64(<2 x double> %p)
@@ -155,17 +155,17 @@ declare <2 x double> @llvm.ceil.v2f64(<2 x double> %p)
define <4 x float> @ceil_v4f32(<4 x float> %p) {
; SSE41-LABEL: ceil_v4f32:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundps $10, %xmm0, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: ceil_v4f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundps $10, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: ceil_v4f32:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscaleps $10, %xmm0, %xmm0
; AVX512-NEXT: retq
%t = call <4 x float> @llvm.ceil.v4f32(<4 x float> %p)
@@ -175,18 +175,18 @@ declare <4 x float> @llvm.ceil.v4f32(<4 x float> %p)
define <4 x double> @ceil_v4f64(<4 x double> %p) {
; SSE41-LABEL: ceil_v4f64:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundpd $10, %xmm0, %xmm0
; SSE41-NEXT: roundpd $10, %xmm1, %xmm1
; SSE41-NEXT: retq
;
; AVX-LABEL: ceil_v4f64:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundpd $10, %ymm0, %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: ceil_v4f64:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscalepd $10, %ymm0, %ymm0
; AVX512-NEXT: retq
%t = call <4 x double> @llvm.ceil.v4f64(<4 x double> %p)
@@ -196,18 +196,18 @@ declare <4 x double> @llvm.ceil.v4f64(<4 x double> %p)
define <8 x float> @ceil_v8f32(<8 x float> %p) {
; SSE41-LABEL: ceil_v8f32:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundps $10, %xmm0, %xmm0
; SSE41-NEXT: roundps $10, %xmm1, %xmm1
; SSE41-NEXT: retq
;
; AVX-LABEL: ceil_v8f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundps $10, %ymm0, %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: ceil_v8f32:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscaleps $10, %ymm0, %ymm0
; AVX512-NEXT: retq
%t = call <8 x float> @llvm.ceil.v8f32(<8 x float> %p)
@@ -217,7 +217,7 @@ declare <8 x float> @llvm.ceil.v8f32(<8 x float> %p)
define <8 x double> @ceil_v8f64(<8 x double> %p){
; SSE41-LABEL: ceil_v8f64:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundpd $10, %xmm0, %xmm0
; SSE41-NEXT: roundpd $10, %xmm1, %xmm1
; SSE41-NEXT: roundpd $10, %xmm2, %xmm2
@@ -225,13 +225,13 @@ define <8 x double> @ceil_v8f64(<8 x double> %p){
; SSE41-NEXT: retq
;
; AVX-LABEL: ceil_v8f64:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundpd $10, %ymm0, %ymm0
; AVX-NEXT: vroundpd $10, %ymm1, %ymm1
; AVX-NEXT: retq
;
; AVX512-LABEL: ceil_v8f64:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscalepd $10, %zmm0, %zmm0
; AVX512-NEXT: retq
%t = call <8 x double> @llvm.ceil.v8f64(<8 x double> %p)
@@ -241,7 +241,7 @@ declare <8 x double> @llvm.ceil.v8f64(<8 x double> %p)
define <16 x float> @ceil_v16f32(<16 x float> %p) {
; SSE41-LABEL: ceil_v16f32:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundps $10, %xmm0, %xmm0
; SSE41-NEXT: roundps $10, %xmm1, %xmm1
; SSE41-NEXT: roundps $10, %xmm2, %xmm2
@@ -249,13 +249,13 @@ define <16 x float> @ceil_v16f32(<16 x float> %p) {
; SSE41-NEXT: retq
;
; AVX-LABEL: ceil_v16f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundps $10, %ymm0, %ymm0
; AVX-NEXT: vroundps $10, %ymm1, %ymm1
; AVX-NEXT: retq
;
; AVX512-LABEL: ceil_v16f32:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscaleps $10, %zmm0, %zmm0
; AVX512-NEXT: retq
%t = call <16 x float> @llvm.ceil.v16f32(<16 x float> %p)
@@ -265,17 +265,17 @@ declare <16 x float> @llvm.ceil.v16f32(<16 x float> %p)
define <2 x double> @trunc_v2f64(<2 x double> %p) {
; SSE41-LABEL: trunc_v2f64:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundpd $11, %xmm0, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: trunc_v2f64:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundpd $11, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: trunc_v2f64:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscalepd $11, %xmm0, %xmm0
; AVX512-NEXT: retq
%t = call <2 x double> @llvm.trunc.v2f64(<2 x double> %p)
@@ -285,17 +285,17 @@ declare <2 x double> @llvm.trunc.v2f64(<2 x double> %p)
define <4 x float> @trunc_v4f32(<4 x float> %p) {
; SSE41-LABEL: trunc_v4f32:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundps $11, %xmm0, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: trunc_v4f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundps $11, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: trunc_v4f32:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscaleps $11, %xmm0, %xmm0
; AVX512-NEXT: retq
%t = call <4 x float> @llvm.trunc.v4f32(<4 x float> %p)
@@ -305,18 +305,18 @@ declare <4 x float> @llvm.trunc.v4f32(<4 x float> %p)
define <4 x double> @trunc_v4f64(<4 x double> %p) {
; SSE41-LABEL: trunc_v4f64:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundpd $11, %xmm0, %xmm0
; SSE41-NEXT: roundpd $11, %xmm1, %xmm1
; SSE41-NEXT: retq
;
; AVX-LABEL: trunc_v4f64:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundpd $11, %ymm0, %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: trunc_v4f64:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscalepd $11, %ymm0, %ymm0
; AVX512-NEXT: retq
%t = call <4 x double> @llvm.trunc.v4f64(<4 x double> %p)
@@ -326,18 +326,18 @@ declare <4 x double> @llvm.trunc.v4f64(<4 x double> %p)
define <8 x float> @trunc_v8f32(<8 x float> %p) {
; SSE41-LABEL: trunc_v8f32:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundps $11, %xmm0, %xmm0
; SSE41-NEXT: roundps $11, %xmm1, %xmm1
; SSE41-NEXT: retq
;
; AVX-LABEL: trunc_v8f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundps $11, %ymm0, %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: trunc_v8f32:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscaleps $11, %ymm0, %ymm0
; AVX512-NEXT: retq
%t = call <8 x float> @llvm.trunc.v8f32(<8 x float> %p)
@@ -347,7 +347,7 @@ declare <8 x float> @llvm.trunc.v8f32(<8 x float> %p)
define <8 x double> @trunc_v8f64(<8 x double> %p){
; SSE41-LABEL: trunc_v8f64:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundpd $11, %xmm0, %xmm0
; SSE41-NEXT: roundpd $11, %xmm1, %xmm1
; SSE41-NEXT: roundpd $11, %xmm2, %xmm2
@@ -355,13 +355,13 @@ define <8 x double> @trunc_v8f64(<8 x double> %p){
; SSE41-NEXT: retq
;
; AVX-LABEL: trunc_v8f64:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundpd $11, %ymm0, %ymm0
; AVX-NEXT: vroundpd $11, %ymm1, %ymm1
; AVX-NEXT: retq
;
; AVX512-LABEL: trunc_v8f64:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscalepd $11, %zmm0, %zmm0
; AVX512-NEXT: retq
%t = call <8 x double> @llvm.trunc.v8f64(<8 x double> %p)
@@ -371,7 +371,7 @@ declare <8 x double> @llvm.trunc.v8f64(<8 x double> %p)
define <16 x float> @trunc_v16f32(<16 x float> %p) {
; SSE41-LABEL: trunc_v16f32:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundps $11, %xmm0, %xmm0
; SSE41-NEXT: roundps $11, %xmm1, %xmm1
; SSE41-NEXT: roundps $11, %xmm2, %xmm2
@@ -379,13 +379,13 @@ define <16 x float> @trunc_v16f32(<16 x float> %p) {
; SSE41-NEXT: retq
;
; AVX-LABEL: trunc_v16f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundps $11, %ymm0, %ymm0
; AVX-NEXT: vroundps $11, %ymm1, %ymm1
; AVX-NEXT: retq
;
; AVX512-LABEL: trunc_v16f32:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscaleps $11, %zmm0, %zmm0
; AVX512-NEXT: retq
%t = call <16 x float> @llvm.trunc.v16f32(<16 x float> %p)
@@ -395,17 +395,17 @@ declare <16 x float> @llvm.trunc.v16f32(<16 x float> %p)
define <2 x double> @rint_v2f64(<2 x double> %p) {
; SSE41-LABEL: rint_v2f64:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundpd $4, %xmm0, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: rint_v2f64:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundpd $4, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: rint_v2f64:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscalepd $4, %xmm0, %xmm0
; AVX512-NEXT: retq
%t = call <2 x double> @llvm.rint.v2f64(<2 x double> %p)
@@ -415,17 +415,17 @@ declare <2 x double> @llvm.rint.v2f64(<2 x double> %p)
define <4 x float> @rint_v4f32(<4 x float> %p) {
; SSE41-LABEL: rint_v4f32:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundps $4, %xmm0, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: rint_v4f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundps $4, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: rint_v4f32:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscaleps $4, %xmm0, %xmm0
; AVX512-NEXT: retq
%t = call <4 x float> @llvm.rint.v4f32(<4 x float> %p)
@@ -435,18 +435,18 @@ declare <4 x float> @llvm.rint.v4f32(<4 x float> %p)
define <4 x double> @rint_v4f64(<4 x double> %p) {
; SSE41-LABEL: rint_v4f64:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundpd $4, %xmm0, %xmm0
; SSE41-NEXT: roundpd $4, %xmm1, %xmm1
; SSE41-NEXT: retq
;
; AVX-LABEL: rint_v4f64:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundpd $4, %ymm0, %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: rint_v4f64:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscalepd $4, %ymm0, %ymm0
; AVX512-NEXT: retq
%t = call <4 x double> @llvm.rint.v4f64(<4 x double> %p)
@@ -456,18 +456,18 @@ declare <4 x double> @llvm.rint.v4f64(<4 x double> %p)
define <8 x float> @rint_v8f32(<8 x float> %p) {
; SSE41-LABEL: rint_v8f32:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundps $4, %xmm0, %xmm0
; SSE41-NEXT: roundps $4, %xmm1, %xmm1
; SSE41-NEXT: retq
;
; AVX-LABEL: rint_v8f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundps $4, %ymm0, %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: rint_v8f32:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscaleps $4, %ymm0, %ymm0
; AVX512-NEXT: retq
%t = call <8 x float> @llvm.rint.v8f32(<8 x float> %p)
@@ -477,7 +477,7 @@ declare <8 x float> @llvm.rint.v8f32(<8 x float> %p)
define <8 x double> @rint_v8f64(<8 x double> %p){
; SSE41-LABEL: rint_v8f64:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundpd $4, %xmm0, %xmm0
; SSE41-NEXT: roundpd $4, %xmm1, %xmm1
; SSE41-NEXT: roundpd $4, %xmm2, %xmm2
@@ -485,13 +485,13 @@ define <8 x double> @rint_v8f64(<8 x double> %p){
; SSE41-NEXT: retq
;
; AVX-LABEL: rint_v8f64:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundpd $4, %ymm0, %ymm0
; AVX-NEXT: vroundpd $4, %ymm1, %ymm1
; AVX-NEXT: retq
;
; AVX512-LABEL: rint_v8f64:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscalepd $4, %zmm0, %zmm0
; AVX512-NEXT: retq
%t = call <8 x double> @llvm.rint.v8f64(<8 x double> %p)
@@ -501,7 +501,7 @@ declare <8 x double> @llvm.rint.v8f64(<8 x double> %p)
define <16 x float> @rint_v16f32(<16 x float> %p) {
; SSE41-LABEL: rint_v16f32:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundps $4, %xmm0, %xmm0
; SSE41-NEXT: roundps $4, %xmm1, %xmm1
; SSE41-NEXT: roundps $4, %xmm2, %xmm2
@@ -509,13 +509,13 @@ define <16 x float> @rint_v16f32(<16 x float> %p) {
; SSE41-NEXT: retq
;
; AVX-LABEL: rint_v16f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundps $4, %ymm0, %ymm0
; AVX-NEXT: vroundps $4, %ymm1, %ymm1
; AVX-NEXT: retq
;
; AVX512-LABEL: rint_v16f32:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscaleps $4, %zmm0, %zmm0
; AVX512-NEXT: retq
%t = call <16 x float> @llvm.rint.v16f32(<16 x float> %p)
@@ -525,17 +525,17 @@ declare <16 x float> @llvm.rint.v16f32(<16 x float> %p)
define <2 x double> @nearbyint_v2f64(<2 x double> %p) {
; SSE41-LABEL: nearbyint_v2f64:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundpd $12, %xmm0, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: nearbyint_v2f64:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundpd $12, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: nearbyint_v2f64:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscalepd $12, %xmm0, %xmm0
; AVX512-NEXT: retq
%t = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %p)
@@ -545,17 +545,17 @@ declare <2 x double> @llvm.nearbyint.v2f64(<2 x double> %p)
define <4 x float> @nearbyint_v4f32(<4 x float> %p) {
; SSE41-LABEL: nearbyint_v4f32:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundps $12, %xmm0, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: nearbyint_v4f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundps $12, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: nearbyint_v4f32:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscaleps $12, %xmm0, %xmm0
; AVX512-NEXT: retq
%t = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %p)
@@ -565,18 +565,18 @@ declare <4 x float> @llvm.nearbyint.v4f32(<4 x float> %p)
define <4 x double> @nearbyint_v4f64(<4 x double> %p) {
; SSE41-LABEL: nearbyint_v4f64:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundpd $12, %xmm0, %xmm0
; SSE41-NEXT: roundpd $12, %xmm1, %xmm1
; SSE41-NEXT: retq
;
; AVX-LABEL: nearbyint_v4f64:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundpd $12, %ymm0, %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: nearbyint_v4f64:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscalepd $12, %ymm0, %ymm0
; AVX512-NEXT: retq
%t = call <4 x double> @llvm.nearbyint.v4f64(<4 x double> %p)
@@ -586,18 +586,18 @@ declare <4 x double> @llvm.nearbyint.v4f64(<4 x double> %p)
define <8 x float> @nearbyint_v8f32(<8 x float> %p) {
; SSE41-LABEL: nearbyint_v8f32:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundps $12, %xmm0, %xmm0
; SSE41-NEXT: roundps $12, %xmm1, %xmm1
; SSE41-NEXT: retq
;
; AVX-LABEL: nearbyint_v8f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundps $12, %ymm0, %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: nearbyint_v8f32:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscaleps $12, %ymm0, %ymm0
; AVX512-NEXT: retq
%t = call <8 x float> @llvm.nearbyint.v8f32(<8 x float> %p)
@@ -607,7 +607,7 @@ declare <8 x float> @llvm.nearbyint.v8f32(<8 x float> %p)
define <8 x double> @nearbyint_v8f64(<8 x double> %p){
; SSE41-LABEL: nearbyint_v8f64:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundpd $12, %xmm0, %xmm0
; SSE41-NEXT: roundpd $12, %xmm1, %xmm1
; SSE41-NEXT: roundpd $12, %xmm2, %xmm2
@@ -615,13 +615,13 @@ define <8 x double> @nearbyint_v8f64(<8 x double> %p){
; SSE41-NEXT: retq
;
; AVX-LABEL: nearbyint_v8f64:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundpd $12, %ymm0, %ymm0
; AVX-NEXT: vroundpd $12, %ymm1, %ymm1
; AVX-NEXT: retq
;
; AVX512-LABEL: nearbyint_v8f64:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscalepd $12, %zmm0, %zmm0
; AVX512-NEXT: retq
%t = call <8 x double> @llvm.nearbyint.v8f64(<8 x double> %p)
@@ -631,7 +631,7 @@ declare <8 x double> @llvm.nearbyint.v8f64(<8 x double> %p)
define <16 x float> @nearbyint_v16f32(<16 x float> %p) {
; SSE41-LABEL: nearbyint_v16f32:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundps $12, %xmm0, %xmm0
; SSE41-NEXT: roundps $12, %xmm1, %xmm1
; SSE41-NEXT: roundps $12, %xmm2, %xmm2
@@ -639,13 +639,13 @@ define <16 x float> @nearbyint_v16f32(<16 x float> %p) {
; SSE41-NEXT: retq
;
; AVX-LABEL: nearbyint_v16f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundps $12, %ymm0, %ymm0
; AVX-NEXT: vroundps $12, %ymm1, %ymm1
; AVX-NEXT: retq
;
; AVX512-LABEL: nearbyint_v16f32:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscaleps $12, %zmm0, %zmm0
; AVX512-NEXT: retq
%t = call <16 x float> @llvm.nearbyint.v16f32(<16 x float> %p)
@@ -659,17 +659,17 @@ declare <16 x float> @llvm.nearbyint.v16f32(<16 x float> %p)
define <2 x double> @const_floor_v2f64() {
; SSE41-LABEL: const_floor_v2f64:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: movaps {{.*#+}} xmm0 = [-2.000000e+00,2.000000e+00]
; SSE41-NEXT: retq
;
; AVX-LABEL: const_floor_v2f64:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [-2.000000e+00,2.000000e+00]
; AVX-NEXT: retq
;
; AVX512-LABEL: const_floor_v2f64:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [-2.000000e+00,2.000000e+00]
; AVX512-NEXT: retq
%t = call <2 x double> @llvm.floor.v2f64(<2 x double> <double -1.5, double 2.5>)
@@ -678,17 +678,17 @@ define <2 x double> @const_floor_v2f64() {
define <4 x float> @const_floor_v4f32() {
; SSE41-LABEL: const_floor_v4f32:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: movaps {{.*#+}} xmm0 = [-4.000000e+00,6.000000e+00,-9.000000e+00,2.000000e+00]
; SSE41-NEXT: retq
;
; AVX-LABEL: const_floor_v4f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [-4.000000e+00,6.000000e+00,-9.000000e+00,2.000000e+00]
; AVX-NEXT: retq
;
; AVX512-LABEL: const_floor_v4f32:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [-4.000000e+00,6.000000e+00,-9.000000e+00,2.000000e+00]
; AVX512-NEXT: retq
%t = call <4 x float> @llvm.floor.v4f32(<4 x float> <float -3.5, float 6.0, float -9.0, float 2.5>)
@@ -697,17 +697,17 @@ define <4 x float> @const_floor_v4f32() {
define <2 x double> @const_ceil_v2f64() {
; SSE41-LABEL: const_ceil_v2f64:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: movaps {{.*#+}} xmm0 = [-1.000000e+00,3.000000e+00]
; SSE41-NEXT: retq
;
; AVX-LABEL: const_ceil_v2f64:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [-1.000000e+00,3.000000e+00]
; AVX-NEXT: retq
;
; AVX512-LABEL: const_ceil_v2f64:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [-1.000000e+00,3.000000e+00]
; AVX512-NEXT: retq
%t = call <2 x double> @llvm.ceil.v2f64(<2 x double> <double -1.5, double 2.5>)
@@ -716,17 +716,17 @@ define <2 x double> @const_ceil_v2f64() {
define <4 x float> @const_ceil_v4f32() {
; SSE41-LABEL: const_ceil_v4f32:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: movaps {{.*#+}} xmm0 = [-3.000000e+00,6.000000e+00,-9.000000e+00,3.000000e+00]
; SSE41-NEXT: retq
;
; AVX-LABEL: const_ceil_v4f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [-3.000000e+00,6.000000e+00,-9.000000e+00,3.000000e+00]
; AVX-NEXT: retq
;
; AVX512-LABEL: const_ceil_v4f32:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [-3.000000e+00,6.000000e+00,-9.000000e+00,3.000000e+00]
; AVX512-NEXT: retq
%t = call <4 x float> @llvm.ceil.v4f32(<4 x float> <float -3.5, float 6.0, float -9.0, float 2.5>)
@@ -735,17 +735,17 @@ define <4 x float> @const_ceil_v4f32() {
define <2 x double> @const_trunc_v2f64() {
; SSE41-LABEL: const_trunc_v2f64:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: movaps {{.*#+}} xmm0 = [-1.000000e+00,2.000000e+00]
; SSE41-NEXT: retq
;
; AVX-LABEL: const_trunc_v2f64:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [-1.000000e+00,2.000000e+00]
; AVX-NEXT: retq
;
; AVX512-LABEL: const_trunc_v2f64:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [-1.000000e+00,2.000000e+00]
; AVX512-NEXT: retq
%t = call <2 x double> @llvm.trunc.v2f64(<2 x double> <double -1.5, double 2.5>)
@@ -754,17 +754,17 @@ define <2 x double> @const_trunc_v2f64() {
define <4 x float> @const_trunc_v4f32() {
; SSE41-LABEL: const_trunc_v4f32:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: movaps {{.*#+}} xmm0 = [-3.000000e+00,6.000000e+00,-9.000000e+00,2.000000e+00]
; SSE41-NEXT: retq
;
; AVX-LABEL: const_trunc_v4f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [-3.000000e+00,6.000000e+00,-9.000000e+00,2.000000e+00]
; AVX-NEXT: retq
;
; AVX512-LABEL: const_trunc_v4f32:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [-3.000000e+00,6.000000e+00,-9.000000e+00,2.000000e+00]
; AVX512-NEXT: retq
%t = call <4 x float> @llvm.trunc.v4f32(<4 x float> <float -3.5, float 6.0, float -9.0, float 2.5>)
diff --git a/test/CodeGen/X86/vec_fneg.ll b/test/CodeGen/X86/vec_fneg.ll
index 9804f0ef983..d198964bf1d 100644
--- a/test/CodeGen/X86/vec_fneg.ll
+++ b/test/CodeGen/X86/vec_fneg.ll
@@ -9,12 +9,12 @@
; This test verifies that we use an xor with a constant to flip the sign bits; no subtraction needed.
define <4 x float> @t1(<4 x float> %Q) nounwind {
; X32-SSE-LABEL: t1:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: xorps {{\.LCPI.*}}, %xmm0
; X32-SSE-NEXT: retl
;
; X64-SSE-LABEL: t1:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: xorps {{.*}}(%rip), %xmm0
; X64-SSE-NEXT: retq
%tmp = fsub <4 x float> < float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00 >, %Q
@@ -24,14 +24,14 @@ define <4 x float> @t1(<4 x float> %Q) nounwind {
; This test verifies that we generate an FP subtraction because "0.0 - x" is not an fneg.
define <4 x float> @t2(<4 x float> %Q) nounwind {
; X32-SSE-LABEL: t2:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: xorps %xmm1, %xmm1
; X32-SSE-NEXT: subps %xmm0, %xmm1
; X32-SSE-NEXT: movaps %xmm1, %xmm0
; X32-SSE-NEXT: retl
;
; X64-SSE-LABEL: t2:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: xorps %xmm1, %xmm1
; X64-SSE-NEXT: subps %xmm0, %xmm1
; X64-SSE-NEXT: movaps %xmm1, %xmm0
@@ -53,7 +53,7 @@ define <4 x float> @t2(<4 x float> %Q) nounwind {
define <2 x float> @fneg_bitcast(i64 %i) nounwind {
; X32-SSE1-LABEL: fneg_bitcast:
-; X32-SSE1: # BB#0:
+; X32-SSE1: # %bb.0:
; X32-SSE1-NEXT: pushl %ebp
; X32-SSE1-NEXT: movl %esp, %ebp
; X32-SSE1-NEXT: andl $-16, %esp
@@ -70,7 +70,7 @@ define <2 x float> @fneg_bitcast(i64 %i) nounwind {
; X32-SSE1-NEXT: retl
;
; X32-SSE2-LABEL: fneg_bitcast:
-; X32-SSE2: # BB#0:
+; X32-SSE2: # %bb.0:
; X32-SSE2-NEXT: movl $-2147483648, %eax # imm = 0x80000000
; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-SSE2-NEXT: xorl %eax, %ecx
@@ -81,7 +81,7 @@ define <2 x float> @fneg_bitcast(i64 %i) nounwind {
; X32-SSE2-NEXT: retl
;
; X64-SSE1-LABEL: fneg_bitcast:
-; X64-SSE1: # BB#0:
+; X64-SSE1: # %bb.0:
; X64-SSE1-NEXT: movabsq $-9223372034707292160, %rax # imm = 0x8000000080000000
; X64-SSE1-NEXT: xorq %rdi, %rax
; X64-SSE1-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
@@ -89,7 +89,7 @@ define <2 x float> @fneg_bitcast(i64 %i) nounwind {
; X64-SSE1-NEXT: retq
;
; X64-SSE2-LABEL: fneg_bitcast:
-; X64-SSE2: # BB#0:
+; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: movabsq $-9223372034707292160, %rax # imm = 0x8000000080000000
; X64-SSE2-NEXT: xorq %rdi, %rax
; X64-SSE2-NEXT: movq %rax, %xmm0
diff --git a/test/CodeGen/X86/vec_fp_to_int.ll b/test/CodeGen/X86/vec_fp_to_int.ll
index 1a6512d2f36..1f1575368af 100644
--- a/test/CodeGen/X86/vec_fp_to_int.ll
+++ b/test/CodeGen/X86/vec_fp_to_int.ll
@@ -18,7 +18,7 @@
define <2 x i64> @fptosi_2f64_to_2i64(<2 x double> %a) {
; SSE-LABEL: fptosi_2f64_to_2i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvttsd2si %xmm0, %rax
; SSE-NEXT: movq %rax, %xmm1
; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
@@ -29,7 +29,7 @@ define <2 x i64> @fptosi_2f64_to_2i64(<2 x double> %a) {
; SSE-NEXT: retq
;
; VEX-LABEL: fptosi_2f64_to_2i64:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vcvttsd2si %xmm0, %rax
; VEX-NEXT: vmovq %rax, %xmm1
; VEX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
@@ -39,7 +39,7 @@ define <2 x i64> @fptosi_2f64_to_2i64(<2 x double> %a) {
; VEX-NEXT: retq
;
; AVX512F-LABEL: fptosi_2f64_to_2i64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vcvttsd2si %xmm0, %rax
; AVX512F-NEXT: vmovq %rax, %xmm1
; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
@@ -49,7 +49,7 @@ define <2 x i64> @fptosi_2f64_to_2i64(<2 x double> %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptosi_2f64_to_2i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvttsd2si %xmm0, %rax
; AVX512VL-NEXT: vmovq %rax, %xmm1
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
@@ -59,7 +59,7 @@ define <2 x i64> @fptosi_2f64_to_2i64(<2 x double> %a) {
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: fptosi_2f64_to_2i64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvttpd2qq %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -67,7 +67,7 @@ define <2 x i64> @fptosi_2f64_to_2i64(<2 x double> %a) {
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptosi_2f64_to_2i64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvttpd2qq %xmm0, %xmm0
; AVX512VLDQ-NEXT: retq
%cvt = fptosi <2 x double> %a to <2 x i64>
@@ -76,12 +76,12 @@ define <2 x i64> @fptosi_2f64_to_2i64(<2 x double> %a) {
define <4 x i32> @fptosi_2f64_to_4i32(<2 x double> %a) {
; SSE-LABEL: fptosi_2f64_to_4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvttpd2dq %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: fptosi_2f64_to_4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvttpd2dq %xmm0, %xmm0
; AVX-NEXT: retq
%cvt = fptosi <2 x double> %a to <2 x i32>
@@ -91,13 +91,13 @@ define <4 x i32> @fptosi_2f64_to_4i32(<2 x double> %a) {
define <2 x i32> @fptosi_2f64_to_2i32(<2 x double> %a) {
; SSE-LABEL: fptosi_2f64_to_2i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvttpd2dq %xmm0, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
; SSE-NEXT: retq
;
; AVX-LABEL: fptosi_2f64_to_2i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvttpd2dq %xmm0, %xmm0
; AVX-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; AVX-NEXT: retq
@@ -107,14 +107,14 @@ define <2 x i32> @fptosi_2f64_to_2i32(<2 x double> %a) {
define <4 x i32> @fptosi_4f64_to_2i32(<2 x double> %a) {
; SSE-LABEL: fptosi_4f64_to_2i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvttpd2dq %xmm0, %xmm1
; SSE-NEXT: cvttpd2dq %xmm0, %xmm0
; SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT: retq
;
; AVX-LABEL: fptosi_4f64_to_2i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX-NEXT: vcvttpd2dq %ymm0, %xmm0
; AVX-NEXT: vzeroupper
@@ -126,7 +126,7 @@ define <4 x i32> @fptosi_4f64_to_2i32(<2 x double> %a) {
define <4 x i64> @fptosi_4f64_to_4i64(<4 x double> %a) {
; SSE-LABEL: fptosi_4f64_to_4i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvttsd2si %xmm0, %rax
; SSE-NEXT: movq %rax, %xmm2
; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
@@ -144,7 +144,7 @@ define <4 x i64> @fptosi_4f64_to_4i64(<4 x double> %a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: fptosi_4f64_to_4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vcvttsd2si %xmm1, %rax
; AVX1-NEXT: vmovq %rax, %xmm2
@@ -162,7 +162,7 @@ define <4 x i64> @fptosi_4f64_to_4i64(<4 x double> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: fptosi_4f64_to_4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX2-NEXT: vcvttsd2si %xmm1, %rax
; AVX2-NEXT: vmovq %rax, %xmm2
@@ -180,7 +180,7 @@ define <4 x i64> @fptosi_4f64_to_4i64(<4 x double> %a) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: fptosi_4f64_to_4i64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX512F-NEXT: vcvttsd2si %xmm1, %rax
; AVX512F-NEXT: vmovq %rax, %xmm2
@@ -198,7 +198,7 @@ define <4 x i64> @fptosi_4f64_to_4i64(<4 x double> %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptosi_4f64_to_4i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX512VL-NEXT: vcvttsd2si %xmm1, %rax
; AVX512VL-NEXT: vmovq %rax, %xmm2
@@ -216,14 +216,14 @@ define <4 x i64> @fptosi_4f64_to_4i64(<4 x double> %a) {
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: fptosi_4f64_to_4i64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvttpd2qq %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptosi_4f64_to_4i64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvttpd2qq %ymm0, %ymm0
; AVX512VLDQ-NEXT: retq
%cvt = fptosi <4 x double> %a to <4 x i64>
@@ -232,14 +232,14 @@ define <4 x i64> @fptosi_4f64_to_4i64(<4 x double> %a) {
define <4 x i32> @fptosi_4f64_to_4i32(<4 x double> %a) {
; SSE-LABEL: fptosi_4f64_to_4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvttpd2dq %xmm1, %xmm1
; SSE-NEXT: cvttpd2dq %xmm0, %xmm0
; SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT: retq
;
; AVX-LABEL: fptosi_4f64_to_4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvttpd2dq %ymm0, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
@@ -253,7 +253,7 @@ define <4 x i32> @fptosi_4f64_to_4i32(<4 x double> %a) {
define <2 x i64> @fptoui_2f64_to_2i64(<2 x double> %a) {
; SSE-LABEL: fptoui_2f64_to_2i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
; SSE-NEXT: movapd %xmm0, %xmm1
; SSE-NEXT: subsd %xmm2, %xmm1
@@ -278,7 +278,7 @@ define <2 x i64> @fptoui_2f64_to_2i64(<2 x double> %a) {
; SSE-NEXT: retq
;
; VEX-LABEL: fptoui_2f64_to_2i64:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; VEX-NEXT: vsubsd %xmm1, %xmm0, %xmm2
; VEX-NEXT: vcvttsd2si %xmm2, %rax
@@ -300,7 +300,7 @@ define <2 x i64> @fptoui_2f64_to_2i64(<2 x double> %a) {
; VEX-NEXT: retq
;
; AVX512F-LABEL: fptoui_2f64_to_2i64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vcvttsd2usi %xmm0, %rax
; AVX512F-NEXT: vmovq %rax, %xmm1
; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
@@ -310,7 +310,7 @@ define <2 x i64> @fptoui_2f64_to_2i64(<2 x double> %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptoui_2f64_to_2i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvttsd2usi %xmm0, %rax
; AVX512VL-NEXT: vmovq %rax, %xmm1
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
@@ -320,7 +320,7 @@ define <2 x i64> @fptoui_2f64_to_2i64(<2 x double> %a) {
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: fptoui_2f64_to_2i64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvttpd2uqq %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -328,7 +328,7 @@ define <2 x i64> @fptoui_2f64_to_2i64(<2 x double> %a) {
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_2f64_to_2i64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvttpd2uqq %xmm0, %xmm0
; AVX512VLDQ-NEXT: retq
%cvt = fptoui <2 x double> %a to <2 x i64>
@@ -337,7 +337,7 @@ define <2 x i64> @fptoui_2f64_to_2i64(<2 x double> %a) {
define <4 x i32> @fptoui_2f64_to_4i32(<2 x double> %a) {
; SSE-LABEL: fptoui_2f64_to_4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
; SSE-NEXT: movapd %xmm0, %xmm1
; SSE-NEXT: subsd %xmm2, %xmm1
@@ -364,7 +364,7 @@ define <4 x i32> @fptoui_2f64_to_4i32(<2 x double> %a) {
; SSE-NEXT: retq
;
; VEX-LABEL: fptoui_2f64_to_4i32:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; VEX-NEXT: vsubsd %xmm1, %xmm0, %xmm2
; VEX-NEXT: vcvttsd2si %xmm2, %rax
@@ -387,7 +387,7 @@ define <4 x i32> @fptoui_2f64_to_4i32(<2 x double> %a) {
; VEX-NEXT: retq
;
; AVX512F-LABEL: fptoui_2f64_to_4i32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512F-NEXT: vcvttpd2udq %zmm0, %ymm0
; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
@@ -395,12 +395,12 @@ define <4 x i32> @fptoui_2f64_to_4i32(<2 x double> %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptoui_2f64_to_4i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvttpd2udq %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: fptoui_2f64_to_4i32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvttpd2udq %zmm0, %ymm0
; AVX512DQ-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
@@ -408,7 +408,7 @@ define <4 x i32> @fptoui_2f64_to_4i32(<2 x double> %a) {
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_2f64_to_4i32:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvttpd2udq %xmm0, %xmm0
; AVX512VLDQ-NEXT: retq
%cvt = fptoui <2 x double> %a to <2 x i32>
@@ -418,7 +418,7 @@ define <4 x i32> @fptoui_2f64_to_4i32(<2 x double> %a) {
define <4 x i32> @fptoui_2f64_to_2i32(<2 x double> %a) {
; SSE-LABEL: fptoui_2f64_to_2i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; SSE-NEXT: movapd %xmm0, %xmm2
; SSE-NEXT: subsd %xmm1, %xmm2
@@ -443,7 +443,7 @@ define <4 x i32> @fptoui_2f64_to_2i32(<2 x double> %a) {
; SSE-NEXT: retq
;
; VEX-LABEL: fptoui_2f64_to_2i32:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; VEX-NEXT: vsubsd %xmm1, %xmm0, %xmm2
; VEX-NEXT: vcvttsd2si %xmm2, %rax
@@ -466,7 +466,7 @@ define <4 x i32> @fptoui_2f64_to_2i32(<2 x double> %a) {
; VEX-NEXT: retq
;
; AVX512F-LABEL: fptoui_2f64_to_2i32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512F-NEXT: vcvttpd2udq %zmm0, %ymm0
; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -474,12 +474,12 @@ define <4 x i32> @fptoui_2f64_to_2i32(<2 x double> %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptoui_2f64_to_2i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvttpd2udq %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: fptoui_2f64_to_2i32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvttpd2udq %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -487,7 +487,7 @@ define <4 x i32> @fptoui_2f64_to_2i32(<2 x double> %a) {
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_2f64_to_2i32:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvttpd2udq %xmm0, %xmm0
; AVX512VLDQ-NEXT: retq
%cvt = fptoui <2 x double> %a to <2 x i32>
@@ -497,7 +497,7 @@ define <4 x i32> @fptoui_2f64_to_2i32(<2 x double> %a) {
define <4 x i32> @fptoui_4f64_to_2i32(<2 x double> %a) {
; SSE-LABEL: fptoui_4f64_to_2i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
; SSE-NEXT: movapd %xmm0, %xmm1
; SSE-NEXT: subsd %xmm2, %xmm1
@@ -529,7 +529,7 @@ define <4 x i32> @fptoui_4f64_to_2i32(<2 x double> %a) {
; SSE-NEXT: retq
;
; VEX-LABEL: fptoui_4f64_to_2i32:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; VEX-NEXT: vcvttsd2si %xmm1, %rax
; VEX-NEXT: vcvttsd2si %xmm0, %rcx
@@ -541,7 +541,7 @@ define <4 x i32> @fptoui_4f64_to_2i32(<2 x double> %a) {
; VEX-NEXT: retq
;
; AVX512F-LABEL: fptoui_4f64_to_2i32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512F-NEXT: vcvttpd2udq %zmm0, %ymm0
; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -549,14 +549,14 @@ define <4 x i32> @fptoui_4f64_to_2i32(<2 x double> %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptoui_4f64_to_2i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512VL-NEXT: vcvttpd2udq %ymm0, %xmm0
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: fptoui_4f64_to_2i32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvttpd2udq %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -564,7 +564,7 @@ define <4 x i32> @fptoui_4f64_to_2i32(<2 x double> %a) {
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_4f64_to_2i32:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512VLDQ-NEXT: vcvttpd2udq %ymm0, %xmm0
; AVX512VLDQ-NEXT: vzeroupper
@@ -576,7 +576,7 @@ define <4 x i32> @fptoui_4f64_to_2i32(<2 x double> %a) {
define <4 x i64> @fptoui_4f64_to_4i64(<4 x double> %a) {
; SSE-LABEL: fptoui_4f64_to_4i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movapd %xmm0, %xmm2
; SSE-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
; SSE-NEXT: subsd %xmm3, %xmm0
@@ -619,7 +619,7 @@ define <4 x i64> @fptoui_4f64_to_4i64(<4 x double> %a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: fptoui_4f64_to_4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX1-NEXT: vsubsd %xmm1, %xmm2, %xmm3
@@ -659,7 +659,7 @@ define <4 x i64> @fptoui_4f64_to_4i64(<4 x double> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: fptoui_4f64_to_4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX2-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX2-NEXT: vsubsd %xmm1, %xmm2, %xmm3
@@ -699,7 +699,7 @@ define <4 x i64> @fptoui_4f64_to_4i64(<4 x double> %a) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: fptoui_4f64_to_4i64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX512F-NEXT: vcvttsd2usi %xmm1, %rax
; AVX512F-NEXT: vmovq %rax, %xmm2
@@ -717,7 +717,7 @@ define <4 x i64> @fptoui_4f64_to_4i64(<4 x double> %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptoui_4f64_to_4i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX512VL-NEXT: vcvttsd2usi %xmm1, %rax
; AVX512VL-NEXT: vmovq %rax, %xmm2
@@ -735,14 +735,14 @@ define <4 x i64> @fptoui_4f64_to_4i64(<4 x double> %a) {
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: fptoui_4f64_to_4i64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvttpd2uqq %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_4f64_to_4i64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvttpd2uqq %ymm0, %ymm0
; AVX512VLDQ-NEXT: retq
%cvt = fptoui <4 x double> %a to <4 x i64>
@@ -751,7 +751,7 @@ define <4 x i64> @fptoui_4f64_to_4i64(<4 x double> %a) {
define <4 x i32> @fptoui_4f64_to_4i32(<4 x double> %a) {
; SSE-LABEL: fptoui_4f64_to_4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
; SSE-NEXT: movapd %xmm1, %xmm3
; SSE-NEXT: subsd %xmm2, %xmm3
@@ -795,7 +795,7 @@ define <4 x i32> @fptoui_4f64_to_4i32(<4 x double> %a) {
; SSE-NEXT: retq
;
; VEX-LABEL: fptoui_4f64_to_4i32:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; VEX-NEXT: vcvttsd2si %xmm1, %rax
; VEX-NEXT: vcvttsd2si %xmm0, %rcx
@@ -811,7 +811,7 @@ define <4 x i32> @fptoui_4f64_to_4i32(<4 x double> %a) {
; VEX-NEXT: retq
;
; AVX512F-LABEL: fptoui_4f64_to_4i32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512F-NEXT: vcvttpd2udq %zmm0, %ymm0
; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -819,13 +819,13 @@ define <4 x i32> @fptoui_4f64_to_4i32(<4 x double> %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptoui_4f64_to_4i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvttpd2udq %ymm0, %xmm0
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: fptoui_4f64_to_4i32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvttpd2udq %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -833,7 +833,7 @@ define <4 x i32> @fptoui_4f64_to_4i32(<4 x double> %a) {
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_4f64_to_4i32:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvttpd2udq %ymm0, %xmm0
; AVX512VLDQ-NEXT: vzeroupper
; AVX512VLDQ-NEXT: retq
@@ -847,13 +847,13 @@ define <4 x i32> @fptoui_4f64_to_4i32(<4 x double> %a) {
define <2 x i32> @fptosi_2f32_to_2i32(<2 x float> %a) {
; SSE-LABEL: fptosi_2f32_to_2i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvttps2dq %xmm0, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
; SSE-NEXT: retq
;
; AVX-LABEL: fptosi_2f32_to_2i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvttps2dq %xmm0, %xmm0
; AVX-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; AVX-NEXT: retq
@@ -863,12 +863,12 @@ define <2 x i32> @fptosi_2f32_to_2i32(<2 x float> %a) {
define <4 x i32> @fptosi_4f32_to_4i32(<4 x float> %a) {
; SSE-LABEL: fptosi_4f32_to_4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvttps2dq %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: fptosi_4f32_to_4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvttps2dq %xmm0, %xmm0
; AVX-NEXT: retq
%cvt = fptosi <4 x float> %a to <4 x i32>
@@ -877,7 +877,7 @@ define <4 x i32> @fptosi_4f32_to_4i32(<4 x float> %a) {
define <2 x i64> @fptosi_2f32_to_2i64(<4 x float> %a) {
; SSE-LABEL: fptosi_2f32_to_2i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvttss2si %xmm0, %rax
; SSE-NEXT: movq %rax, %xmm1
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3]
@@ -888,7 +888,7 @@ define <2 x i64> @fptosi_2f32_to_2i64(<4 x float> %a) {
; SSE-NEXT: retq
;
; VEX-LABEL: fptosi_2f32_to_2i64:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vcvttss2si %xmm0, %rax
; VEX-NEXT: vmovq %rax, %xmm1
; VEX-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
@@ -898,7 +898,7 @@ define <2 x i64> @fptosi_2f32_to_2i64(<4 x float> %a) {
; VEX-NEXT: retq
;
; AVX512F-LABEL: fptosi_2f32_to_2i64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vcvttss2si %xmm0, %rax
; AVX512F-NEXT: vmovq %rax, %xmm1
; AVX512F-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
@@ -908,7 +908,7 @@ define <2 x i64> @fptosi_2f32_to_2i64(<4 x float> %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptosi_2f32_to_2i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvttss2si %xmm0, %rax
; AVX512VL-NEXT: vmovq %rax, %xmm1
; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
@@ -918,7 +918,7 @@ define <2 x i64> @fptosi_2f32_to_2i64(<4 x float> %a) {
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: fptosi_2f32_to_2i64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcvttss2si %xmm0, %rax
; AVX512DQ-NEXT: vmovq %rax, %xmm1
; AVX512DQ-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
@@ -928,7 +928,7 @@ define <2 x i64> @fptosi_2f32_to_2i64(<4 x float> %a) {
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptosi_2f32_to_2i64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvttps2qq %xmm0, %xmm0
; AVX512VLDQ-NEXT: retq
%shuf = shufflevector <4 x float> %a, <4 x float> undef, <2 x i32> <i32 0, i32 1>
@@ -938,7 +938,7 @@ define <2 x i64> @fptosi_2f32_to_2i64(<4 x float> %a) {
define <2 x i64> @fptosi_4f32_to_2i64(<4 x float> %a) {
; SSE-LABEL: fptosi_4f32_to_2i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvttss2si %xmm0, %rax
; SSE-NEXT: movq %rax, %xmm1
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3]
@@ -949,7 +949,7 @@ define <2 x i64> @fptosi_4f32_to_2i64(<4 x float> %a) {
; SSE-NEXT: retq
;
; VEX-LABEL: fptosi_4f32_to_2i64:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; VEX-NEXT: vcvttss2si %xmm1, %rax
; VEX-NEXT: vcvttss2si %xmm0, %rcx
@@ -959,7 +959,7 @@ define <2 x i64> @fptosi_4f32_to_2i64(<4 x float> %a) {
; VEX-NEXT: retq
;
; AVX512F-LABEL: fptosi_4f32_to_2i64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512F-NEXT: vcvttss2si %xmm1, %rax
; AVX512F-NEXT: vcvttss2si %xmm0, %rcx
@@ -969,7 +969,7 @@ define <2 x i64> @fptosi_4f32_to_2i64(<4 x float> %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptosi_4f32_to_2i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512VL-NEXT: vcvttss2si %xmm1, %rax
; AVX512VL-NEXT: vcvttss2si %xmm0, %rcx
@@ -979,7 +979,7 @@ define <2 x i64> @fptosi_4f32_to_2i64(<4 x float> %a) {
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: fptosi_4f32_to_2i64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512DQ-NEXT: vcvttps2qq %ymm0, %zmm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -987,7 +987,7 @@ define <2 x i64> @fptosi_4f32_to_2i64(<4 x float> %a) {
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptosi_4f32_to_2i64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvttps2qq %xmm0, %ymm0
; AVX512VLDQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512VLDQ-NEXT: vzeroupper
@@ -999,13 +999,13 @@ define <2 x i64> @fptosi_4f32_to_2i64(<4 x float> %a) {
define <8 x i32> @fptosi_8f32_to_8i32(<8 x float> %a) {
; SSE-LABEL: fptosi_8f32_to_8i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvttps2dq %xmm0, %xmm0
; SSE-NEXT: cvttps2dq %xmm1, %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: fptosi_8f32_to_8i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvttps2dq %ymm0, %ymm0
; AVX-NEXT: retq
%cvt = fptosi <8 x float> %a to <8 x i32>
@@ -1014,7 +1014,7 @@ define <8 x i32> @fptosi_8f32_to_8i32(<8 x float> %a) {
define <4 x i64> @fptosi_4f32_to_4i64(<8 x float> %a) {
; SSE-LABEL: fptosi_4f32_to_4i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvttss2si %xmm0, %rax
; SSE-NEXT: movq %rax, %xmm2
; SSE-NEXT: movaps %xmm0, %xmm1
@@ -1034,7 +1034,7 @@ define <4 x i64> @fptosi_4f32_to_4i64(<8 x float> %a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: fptosi_4f32_to_4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
; AVX1-NEXT: vcvttss2si %xmm1, %rax
; AVX1-NEXT: vmovq %rax, %xmm1
@@ -1052,7 +1052,7 @@ define <4 x i64> @fptosi_4f32_to_4i64(<8 x float> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: fptosi_4f32_to_4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
; AVX2-NEXT: vcvttss2si %xmm1, %rax
; AVX2-NEXT: vmovq %rax, %xmm1
@@ -1070,7 +1070,7 @@ define <4 x i64> @fptosi_4f32_to_4i64(<8 x float> %a) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: fptosi_4f32_to_4i64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
; AVX512F-NEXT: vcvttss2si %xmm1, %rax
; AVX512F-NEXT: vmovq %rax, %xmm1
@@ -1088,7 +1088,7 @@ define <4 x i64> @fptosi_4f32_to_4i64(<8 x float> %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptosi_4f32_to_4i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
; AVX512VL-NEXT: vcvttss2si %xmm1, %rax
; AVX512VL-NEXT: vmovq %rax, %xmm1
@@ -1106,13 +1106,13 @@ define <4 x i64> @fptosi_4f32_to_4i64(<8 x float> %a) {
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: fptosi_4f32_to_4i64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcvttps2qq %ymm0, %zmm0
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptosi_4f32_to_4i64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvttps2qq %xmm0, %ymm0
; AVX512VLDQ-NEXT: retq
%shuf = shufflevector <8 x float> %a, <8 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -1122,7 +1122,7 @@ define <4 x i64> @fptosi_4f32_to_4i64(<8 x float> %a) {
define <4 x i64> @fptosi_8f32_to_4i64(<8 x float> %a) {
; SSE-LABEL: fptosi_8f32_to_4i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvttss2si %xmm0, %rax
; SSE-NEXT: movq %rax, %xmm2
; SSE-NEXT: movaps %xmm0, %xmm1
@@ -1142,7 +1142,7 @@ define <4 x i64> @fptosi_8f32_to_4i64(<8 x float> %a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: fptosi_8f32_to_4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
; AVX1-NEXT: vcvttss2si %xmm1, %rax
; AVX1-NEXT: vmovq %rax, %xmm1
@@ -1160,7 +1160,7 @@ define <4 x i64> @fptosi_8f32_to_4i64(<8 x float> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: fptosi_8f32_to_4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
; AVX2-NEXT: vcvttss2si %xmm1, %rax
; AVX2-NEXT: vmovq %rax, %xmm1
@@ -1178,7 +1178,7 @@ define <4 x i64> @fptosi_8f32_to_4i64(<8 x float> %a) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: fptosi_8f32_to_4i64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512F-NEXT: vcvttss2si %xmm1, %rax
; AVX512F-NEXT: vcvttss2si %xmm0, %rcx
@@ -1196,7 +1196,7 @@ define <4 x i64> @fptosi_8f32_to_4i64(<8 x float> %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptosi_8f32_to_4i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512VL-NEXT: vcvttss2si %xmm1, %rax
; AVX512VL-NEXT: vcvttss2si %xmm0, %rcx
@@ -1214,13 +1214,13 @@ define <4 x i64> @fptosi_8f32_to_4i64(<8 x float> %a) {
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: fptosi_8f32_to_4i64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcvttps2qq %ymm0, %zmm0
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptosi_8f32_to_4i64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvttps2qq %ymm0, %zmm0
; AVX512VLDQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512VLDQ-NEXT: retq
@@ -1235,7 +1235,7 @@ define <4 x i64> @fptosi_8f32_to_4i64(<8 x float> %a) {
define <2 x i32> @fptoui_2f32_to_2i32(<2 x float> %a) {
; SSE-LABEL: fptoui_2f32_to_2i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSE-NEXT: movaps %xmm0, %xmm1
; SSE-NEXT: subss %xmm2, %xmm1
@@ -1260,7 +1260,7 @@ define <2 x i32> @fptoui_2f32_to_2i32(<2 x float> %a) {
; SSE-NEXT: retq
;
; VEX-LABEL: fptoui_2f32_to_2i32:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; VEX-NEXT: vsubss %xmm1, %xmm0, %xmm2
; VEX-NEXT: vcvttss2si %xmm2, %rax
@@ -1282,7 +1282,7 @@ define <2 x i32> @fptoui_2f32_to_2i32(<2 x float> %a) {
; VEX-NEXT: retq
;
; AVX512F-LABEL: fptoui_2f32_to_2i32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512F-NEXT: vcvttps2udq %zmm0, %zmm0
; AVX512F-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
@@ -1290,13 +1290,13 @@ define <2 x i32> @fptoui_2f32_to_2i32(<2 x float> %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptoui_2f32_to_2i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvttps2udq %xmm0, %xmm0
; AVX512VL-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: fptoui_2f32_to_2i32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvttps2udq %zmm0, %zmm0
; AVX512DQ-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
@@ -1304,7 +1304,7 @@ define <2 x i32> @fptoui_2f32_to_2i32(<2 x float> %a) {
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_2f32_to_2i32:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvttps2udq %xmm0, %xmm0
; AVX512VLDQ-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; AVX512VLDQ-NEXT: retq
@@ -1314,7 +1314,7 @@ define <2 x i32> @fptoui_2f32_to_2i32(<2 x float> %a) {
define <4 x i32> @fptoui_4f32_to_4i32(<4 x float> %a) {
; SSE-LABEL: fptoui_4f32_to_4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps %xmm0, %xmm1
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; SSE-NEXT: cvttss2si %xmm1, %rax
@@ -1335,7 +1335,7 @@ define <4 x i32> @fptoui_4f32_to_4i32(<4 x float> %a) {
; SSE-NEXT: retq
;
; VEX-LABEL: fptoui_4f32_to_4i32:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; VEX-NEXT: vcvttss2si %xmm1, %rax
; VEX-NEXT: vcvttss2si %xmm0, %rcx
@@ -1350,7 +1350,7 @@ define <4 x i32> @fptoui_4f32_to_4i32(<4 x float> %a) {
; VEX-NEXT: retq
;
; AVX512F-LABEL: fptoui_4f32_to_4i32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512F-NEXT: vcvttps2udq %zmm0, %zmm0
; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -1358,12 +1358,12 @@ define <4 x i32> @fptoui_4f32_to_4i32(<4 x float> %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptoui_4f32_to_4i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvttps2udq %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: fptoui_4f32_to_4i32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvttps2udq %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -1371,7 +1371,7 @@ define <4 x i32> @fptoui_4f32_to_4i32(<4 x float> %a) {
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_4f32_to_4i32:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvttps2udq %xmm0, %xmm0
; AVX512VLDQ-NEXT: retq
%cvt = fptoui <4 x float> %a to <4 x i32>
@@ -1380,7 +1380,7 @@ define <4 x i32> @fptoui_4f32_to_4i32(<4 x float> %a) {
define <2 x i64> @fptoui_2f32_to_2i64(<4 x float> %a) {
; SSE-LABEL: fptoui_2f32_to_2i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSE-NEXT: movaps %xmm0, %xmm1
; SSE-NEXT: subss %xmm2, %xmm1
@@ -1405,7 +1405,7 @@ define <2 x i64> @fptoui_2f32_to_2i64(<4 x float> %a) {
; SSE-NEXT: retq
;
; VEX-LABEL: fptoui_2f32_to_2i64:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; VEX-NEXT: vsubss %xmm1, %xmm0, %xmm2
; VEX-NEXT: vcvttss2si %xmm2, %rax
@@ -1427,7 +1427,7 @@ define <2 x i64> @fptoui_2f32_to_2i64(<4 x float> %a) {
; VEX-NEXT: retq
;
; AVX512F-LABEL: fptoui_2f32_to_2i64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vcvttss2usi %xmm0, %rax
; AVX512F-NEXT: vmovq %rax, %xmm1
; AVX512F-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
@@ -1437,7 +1437,7 @@ define <2 x i64> @fptoui_2f32_to_2i64(<4 x float> %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptoui_2f32_to_2i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvttss2usi %xmm0, %rax
; AVX512VL-NEXT: vmovq %rax, %xmm1
; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
@@ -1447,7 +1447,7 @@ define <2 x i64> @fptoui_2f32_to_2i64(<4 x float> %a) {
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: fptoui_2f32_to_2i64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcvttss2usi %xmm0, %rax
; AVX512DQ-NEXT: vmovq %rax, %xmm1
; AVX512DQ-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
@@ -1457,7 +1457,7 @@ define <2 x i64> @fptoui_2f32_to_2i64(<4 x float> %a) {
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_2f32_to_2i64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvttps2uqq %xmm0, %xmm0
; AVX512VLDQ-NEXT: retq
%shuf = shufflevector <4 x float> %a, <4 x float> undef, <2 x i32> <i32 0, i32 1>
@@ -1467,7 +1467,7 @@ define <2 x i64> @fptoui_2f32_to_2i64(<4 x float> %a) {
define <2 x i64> @fptoui_4f32_to_2i64(<4 x float> %a) {
; SSE-LABEL: fptoui_4f32_to_2i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSE-NEXT: movaps %xmm0, %xmm1
; SSE-NEXT: subss %xmm2, %xmm1
@@ -1492,7 +1492,7 @@ define <2 x i64> @fptoui_4f32_to_2i64(<4 x float> %a) {
; SSE-NEXT: retq
;
; VEX-LABEL: fptoui_4f32_to_2i64:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; VEX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; VEX-NEXT: vsubss %xmm2, %xmm1, %xmm3
@@ -1514,7 +1514,7 @@ define <2 x i64> @fptoui_4f32_to_2i64(<4 x float> %a) {
; VEX-NEXT: retq
;
; AVX512F-LABEL: fptoui_4f32_to_2i64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512F-NEXT: vcvttss2usi %xmm1, %rax
; AVX512F-NEXT: vcvttss2usi %xmm0, %rcx
@@ -1524,7 +1524,7 @@ define <2 x i64> @fptoui_4f32_to_2i64(<4 x float> %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptoui_4f32_to_2i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512VL-NEXT: vcvttss2usi %xmm1, %rax
; AVX512VL-NEXT: vcvttss2usi %xmm0, %rcx
@@ -1534,7 +1534,7 @@ define <2 x i64> @fptoui_4f32_to_2i64(<4 x float> %a) {
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: fptoui_4f32_to_2i64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512DQ-NEXT: vcvttps2uqq %ymm0, %zmm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -1542,7 +1542,7 @@ define <2 x i64> @fptoui_4f32_to_2i64(<4 x float> %a) {
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_4f32_to_2i64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvttps2uqq %xmm0, %ymm0
; AVX512VLDQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512VLDQ-NEXT: vzeroupper
@@ -1554,7 +1554,7 @@ define <2 x i64> @fptoui_4f32_to_2i64(<4 x float> %a) {
define <8 x i32> @fptoui_8f32_to_8i32(<8 x float> %a) {
; SSE-LABEL: fptoui_8f32_to_8i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; SSE-NEXT: cvttss2si %xmm0, %rax
@@ -1591,7 +1591,7 @@ define <8 x i32> @fptoui_8f32_to_8i32(<8 x float> %a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: fptoui_8f32_to_8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX1-NEXT: vcvttss2si %xmm2, %rax
@@ -1619,7 +1619,7 @@ define <8 x i32> @fptoui_8f32_to_8i32(<8 x float> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: fptoui_8f32_to_8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX2-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX2-NEXT: vcvttss2si %xmm2, %rax
@@ -1647,26 +1647,26 @@ define <8 x i32> @fptoui_8f32_to_8i32(<8 x float> %a) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: fptoui_8f32_to_8i32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512F-NEXT: vcvttps2udq %zmm0, %zmm0
; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptoui_8f32_to_8i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvttps2udq %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: fptoui_8f32_to_8i32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvttps2udq %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_8f32_to_8i32:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvttps2udq %ymm0, %ymm0
; AVX512VLDQ-NEXT: retq
%cvt = fptoui <8 x float> %a to <8 x i32>
@@ -1675,7 +1675,7 @@ define <8 x i32> @fptoui_8f32_to_8i32(<8 x float> %a) {
define <4 x i64> @fptoui_4f32_to_4i64(<8 x float> %a) {
; SSE-LABEL: fptoui_4f32_to_4i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: subss %xmm1, %xmm2
@@ -1721,7 +1721,7 @@ define <4 x i64> @fptoui_4f32_to_4i64(<8 x float> %a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: fptoui_4f32_to_4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,1,2,3]
; AVX1-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX1-NEXT: vsubss %xmm1, %xmm2, %xmm3
@@ -1761,7 +1761,7 @@ define <4 x i64> @fptoui_4f32_to_4i64(<8 x float> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: fptoui_4f32_to_4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,1,2,3]
; AVX2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX2-NEXT: vsubss %xmm1, %xmm2, %xmm3
@@ -1801,7 +1801,7 @@ define <4 x i64> @fptoui_4f32_to_4i64(<8 x float> %a) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: fptoui_4f32_to_4i64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
; AVX512F-NEXT: vcvttss2usi %xmm1, %rax
; AVX512F-NEXT: vmovq %rax, %xmm1
@@ -1819,7 +1819,7 @@ define <4 x i64> @fptoui_4f32_to_4i64(<8 x float> %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptoui_4f32_to_4i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
; AVX512VL-NEXT: vcvttss2usi %xmm1, %rax
; AVX512VL-NEXT: vmovq %rax, %xmm1
@@ -1837,13 +1837,13 @@ define <4 x i64> @fptoui_4f32_to_4i64(<8 x float> %a) {
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: fptoui_4f32_to_4i64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcvttps2uqq %ymm0, %zmm0
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_4f32_to_4i64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvttps2uqq %xmm0, %ymm0
; AVX512VLDQ-NEXT: retq
%shuf = shufflevector <8 x float> %a, <8 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -1853,7 +1853,7 @@ define <4 x i64> @fptoui_4f32_to_4i64(<8 x float> %a) {
define <4 x i64> @fptoui_8f32_to_4i64(<8 x float> %a) {
; SSE-LABEL: fptoui_8f32_to_4i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: subss %xmm1, %xmm2
@@ -1899,7 +1899,7 @@ define <4 x i64> @fptoui_8f32_to_4i64(<8 x float> %a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: fptoui_8f32_to_4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,1,2,3]
; AVX1-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX1-NEXT: vsubss %xmm1, %xmm2, %xmm3
@@ -1939,7 +1939,7 @@ define <4 x i64> @fptoui_8f32_to_4i64(<8 x float> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: fptoui_8f32_to_4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,1,2,3]
; AVX2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX2-NEXT: vsubss %xmm1, %xmm2, %xmm3
@@ -1979,7 +1979,7 @@ define <4 x i64> @fptoui_8f32_to_4i64(<8 x float> %a) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: fptoui_8f32_to_4i64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512F-NEXT: vcvttss2usi %xmm1, %rax
; AVX512F-NEXT: vcvttss2usi %xmm0, %rcx
@@ -1997,7 +1997,7 @@ define <4 x i64> @fptoui_8f32_to_4i64(<8 x float> %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptoui_8f32_to_4i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512VL-NEXT: vcvttss2usi %xmm1, %rax
; AVX512VL-NEXT: vcvttss2usi %xmm0, %rcx
@@ -2015,13 +2015,13 @@ define <4 x i64> @fptoui_8f32_to_4i64(<8 x float> %a) {
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: fptoui_8f32_to_4i64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcvttps2uqq %ymm0, %zmm0
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_8f32_to_4i64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvttps2uqq %ymm0, %zmm0
; AVX512VLDQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512VLDQ-NEXT: retq
@@ -2036,12 +2036,12 @@ define <4 x i64> @fptoui_8f32_to_4i64(<8 x float> %a) {
define <2 x i64> @fptosi_2f64_to_2i64_const() {
; SSE-LABEL: fptosi_2f64_to_2i64_const:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [1,18446744073709551615]
; SSE-NEXT: retq
;
; AVX-LABEL: fptosi_2f64_to_2i64_const:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [1,18446744073709551615]
; AVX-NEXT: retq
%cvt = fptosi <2 x double> <double 1.0, double -1.0> to <2 x i64>
@@ -2050,12 +2050,12 @@ define <2 x i64> @fptosi_2f64_to_2i64_const() {
define <4 x i32> @fptosi_2f64_to_2i32_const() {
; SSE-LABEL: fptosi_2f64_to_2i32_const:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = <4294967295,1,u,u>
; SSE-NEXT: retq
;
; AVX-LABEL: fptosi_2f64_to_2i32_const:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = <4294967295,1,u,u>
; AVX-NEXT: retq
%cvt = fptosi <2 x double> <double -1.0, double 1.0> to <2 x i32>
@@ -2065,13 +2065,13 @@ define <4 x i32> @fptosi_2f64_to_2i32_const() {
define <4 x i64> @fptosi_4f64_to_4i64_const() {
; SSE-LABEL: fptosi_4f64_to_4i64_const:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [1,18446744073709551615]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [2,18446744073709551613]
; SSE-NEXT: retq
;
; AVX-LABEL: fptosi_4f64_to_4i64_const:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [1,18446744073709551615,2,18446744073709551613]
; AVX-NEXT: retq
%cvt = fptosi <4 x double> <double 1.0, double -1.0, double 2.0, double -3.0> to <4 x i64>
@@ -2080,12 +2080,12 @@ define <4 x i64> @fptosi_4f64_to_4i64_const() {
define <4 x i32> @fptosi_4f64_to_4i32_const() {
; SSE-LABEL: fptosi_4f64_to_4i32_const:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967295,1,4294967294,3]
; SSE-NEXT: retq
;
; AVX-LABEL: fptosi_4f64_to_4i32_const:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [4294967295,1,4294967294,3]
; AVX-NEXT: retq
%cvt = fptosi <4 x double> <double -1.0, double 1.0, double -2.0, double 3.0> to <4 x i32>
@@ -2094,12 +2094,12 @@ define <4 x i32> @fptosi_4f64_to_4i32_const() {
define <2 x i64> @fptoui_2f64_to_2i64_const() {
; SSE-LABEL: fptoui_2f64_to_2i64_const:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [2,4]
; SSE-NEXT: retq
;
; AVX-LABEL: fptoui_2f64_to_2i64_const:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [2,4]
; AVX-NEXT: retq
%cvt = fptoui <2 x double> <double 2.0, double 4.0> to <2 x i64>
@@ -2108,12 +2108,12 @@ define <2 x i64> @fptoui_2f64_to_2i64_const() {
define <4 x i32> @fptoui_2f64_to_2i32_const(<2 x double> %a) {
; SSE-LABEL: fptoui_2f64_to_2i32_const:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = <2,4,u,u>
; SSE-NEXT: retq
;
; AVX-LABEL: fptoui_2f64_to_2i32_const:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = <2,4,u,u>
; AVX-NEXT: retq
%cvt = fptoui <2 x double> <double 2.0, double 4.0> to <2 x i32>
@@ -2123,13 +2123,13 @@ define <4 x i32> @fptoui_2f64_to_2i32_const(<2 x double> %a) {
define <4 x i64> @fptoui_4f64_to_4i64_const(<4 x double> %a) {
; SSE-LABEL: fptoui_4f64_to_4i64_const:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [2,4]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [6,8]
; SSE-NEXT: retq
;
; AVX-LABEL: fptoui_4f64_to_4i64_const:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [2,4,6,8]
; AVX-NEXT: retq
%cvt = fptoui <4 x double> <double 2.0, double 4.0, double 6.0, double 8.0> to <4 x i64>
@@ -2138,12 +2138,12 @@ define <4 x i64> @fptoui_4f64_to_4i64_const(<4 x double> %a) {
define <4 x i32> @fptoui_4f64_to_4i32_const(<4 x double> %a) {
; SSE-LABEL: fptoui_4f64_to_4i32_const:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [2,4,6,8]
; SSE-NEXT: retq
;
; AVX-LABEL: fptoui_4f64_to_4i32_const:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [2,4,6,8]
; AVX-NEXT: retq
%cvt = fptoui <4 x double> <double 2.0, double 4.0, double 6.0, double 8.0> to <4 x i32>
@@ -2152,12 +2152,12 @@ define <4 x i32> @fptoui_4f64_to_4i32_const(<4 x double> %a) {
define <4 x i32> @fptosi_4f32_to_4i32_const() {
; SSE-LABEL: fptosi_4f32_to_4i32_const:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [1,4294967295,2,3]
; SSE-NEXT: retq
;
; AVX-LABEL: fptosi_4f32_to_4i32_const:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [1,4294967295,2,3]
; AVX-NEXT: retq
%cvt = fptosi <4 x float> <float 1.0, float -1.0, float 2.0, float 3.0> to <4 x i32>
@@ -2166,13 +2166,13 @@ define <4 x i32> @fptosi_4f32_to_4i32_const() {
define <4 x i64> @fptosi_4f32_to_4i64_const() {
; SSE-LABEL: fptosi_4f32_to_4i64_const:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [1,18446744073709551615]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [2,3]
; SSE-NEXT: retq
;
; AVX-LABEL: fptosi_4f32_to_4i64_const:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [1,18446744073709551615,2,3]
; AVX-NEXT: retq
%cvt = fptosi <4 x float> <float 1.0, float -1.0, float 2.0, float 3.0> to <4 x i64>
@@ -2181,13 +2181,13 @@ define <4 x i64> @fptosi_4f32_to_4i64_const() {
define <8 x i32> @fptosi_8f32_to_8i32_const(<8 x float> %a) {
; SSE-LABEL: fptosi_8f32_to_8i32_const:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [1,4294967295,2,3]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [6,4294967288,2,4294967295]
; SSE-NEXT: retq
;
; AVX-LABEL: fptosi_8f32_to_8i32_const:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [1,4294967295,2,3,6,4294967288,2,4294967295]
; AVX-NEXT: retq
%cvt = fptosi <8 x float> <float 1.0, float -1.0, float 2.0, float 3.0, float 6.0, float -8.0, float 2.0, float -1.0> to <8 x i32>
@@ -2196,12 +2196,12 @@ define <8 x i32> @fptosi_8f32_to_8i32_const(<8 x float> %a) {
define <4 x i32> @fptoui_4f32_to_4i32_const(<4 x float> %a) {
; SSE-LABEL: fptoui_4f32_to_4i32_const:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [1,2,4,6]
; SSE-NEXT: retq
;
; AVX-LABEL: fptoui_4f32_to_4i32_const:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [1,2,4,6]
; AVX-NEXT: retq
%cvt = fptoui <4 x float> <float 1.0, float 2.0, float 4.0, float 6.0> to <4 x i32>
@@ -2210,13 +2210,13 @@ define <4 x i32> @fptoui_4f32_to_4i32_const(<4 x float> %a) {
define <4 x i64> @fptoui_4f32_to_4i64_const() {
; SSE-LABEL: fptoui_4f32_to_4i64_const:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [1,2]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [4,8]
; SSE-NEXT: retq
;
; AVX-LABEL: fptoui_4f32_to_4i64_const:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [1,2,4,8]
; AVX-NEXT: retq
%cvt = fptoui <4 x float> <float 1.0, float 2.0, float 4.0, float 8.0> to <4 x i64>
@@ -2225,13 +2225,13 @@ define <4 x i64> @fptoui_4f32_to_4i64_const() {
define <8 x i32> @fptoui_8f32_to_8i32_const(<8 x float> %a) {
; SSE-LABEL: fptoui_8f32_to_8i32_const:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [1,2,4,6]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [8,6,4,1]
; SSE-NEXT: retq
;
; AVX-LABEL: fptoui_8f32_to_8i32_const:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [1,2,4,6,8,6,4,1]
; AVX-NEXT: retq
%cvt = fptoui <8 x float> <float 1.0, float 2.0, float 4.0, float 6.0, float 8.0, float 6.0, float 4.0, float 1.0> to <8 x i32>
@@ -2244,7 +2244,7 @@ define <8 x i32> @fptoui_8f32_to_8i32_const(<8 x float> %a) {
define <4 x i32> @fptosi_2f16_to_4i32(<2 x half> %a) nounwind {
; SSE-LABEL: fptosi_2f16_to_4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pushq %rax
; SSE-NEXT: movss %xmm1, {{[0-9]+}}(%rsp) # 4-byte Spill
; SSE-NEXT: callq __gnu_f2h_ieee
@@ -2267,7 +2267,7 @@ define <4 x i32> @fptosi_2f16_to_4i32(<2 x half> %a) nounwind {
; SSE-NEXT: retq
;
; VEX-LABEL: fptosi_2f16_to_4i32:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: pushq %rax
; VEX-NEXT: vmovss %xmm1, {{[0-9]+}}(%rsp) # 4-byte Spill
; VEX-NEXT: callq __gnu_f2h_ieee
@@ -2289,7 +2289,7 @@ define <4 x i32> @fptosi_2f16_to_4i32(<2 x half> %a) nounwind {
; VEX-NEXT: retq
;
; AVX512-LABEL: fptosi_2f16_to_4i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1
@@ -2308,7 +2308,7 @@ define <4 x i32> @fptosi_2f16_to_4i32(<2 x half> %a) nounwind {
define <4 x i32> @fptosi_2f80_to_4i32(<2 x x86_fp80> %a) nounwind {
; SSE-LABEL: fptosi_2f80_to_4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: fldt {{[0-9]+}}(%rsp)
; SSE-NEXT: fldt {{[0-9]+}}(%rsp)
; SSE-NEXT: fnstcw -{{[0-9]+}}(%rsp)
@@ -2333,7 +2333,7 @@ define <4 x i32> @fptosi_2f80_to_4i32(<2 x x86_fp80> %a) nounwind {
; SSE-NEXT: retq
;
; AVX-LABEL: fptosi_2f80_to_4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: fldt {{[0-9]+}}(%rsp)
; AVX-NEXT: fldt {{[0-9]+}}(%rsp)
; AVX-NEXT: fisttpll -{{[0-9]+}}(%rsp)
@@ -2350,7 +2350,7 @@ define <4 x i32> @fptosi_2f80_to_4i32(<2 x x86_fp80> %a) nounwind {
define <4 x i32> @fptosi_2f128_to_4i32(<2 x fp128> %a) nounwind {
; SSE-LABEL: fptosi_2f128_to_4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pushq %r14
; SSE-NEXT: pushq %rbx
; SSE-NEXT: subq $24, %rsp
@@ -2375,7 +2375,7 @@ define <4 x i32> @fptosi_2f128_to_4i32(<2 x fp128> %a) nounwind {
; SSE-NEXT: retq
;
; AVX-LABEL: fptosi_2f128_to_4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: pushq %r14
; AVX-NEXT: pushq %rbx
; AVX-NEXT: subq $24, %rsp
diff --git a/test/CodeGen/X86/vec_fpext.ll b/test/CodeGen/X86/vec_fpext.ll
index 609ed088209..6b546ea9e12 100644
--- a/test/CodeGen/X86/vec_fpext.ll
+++ b/test/CodeGen/X86/vec_fpext.ll
@@ -9,7 +9,7 @@
; PR11674
define void @fpext_frommem(<2 x float>* %in, <2 x double>* %out) {
; X32-SSE-LABEL: fpext_frommem:
-; X32-SSE: # BB#0: # %entry
+; X32-SSE: # %bb.0: # %entry
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08]
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx # encoding: [0x8b,0x4c,0x24,0x04]
; X32-SSE-NEXT: cvtps2pd (%ecx), %xmm0 # encoding: [0x0f,0x5a,0x01]
@@ -17,7 +17,7 @@ define void @fpext_frommem(<2 x float>* %in, <2 x double>* %out) {
; X32-SSE-NEXT: retl # encoding: [0xc3]
;
; X32-AVX-LABEL: fpext_frommem:
-; X32-AVX: # BB#0: # %entry
+; X32-AVX: # %bb.0: # %entry
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08]
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx # encoding: [0x8b,0x4c,0x24,0x04]
; X32-AVX-NEXT: vcvtps2pd (%ecx), %xmm0 # encoding: [0xc5,0xf8,0x5a,0x01]
@@ -25,7 +25,7 @@ define void @fpext_frommem(<2 x float>* %in, <2 x double>* %out) {
; X32-AVX-NEXT: retl # encoding: [0xc3]
;
; X32-AVX512VL-LABEL: fpext_frommem:
-; X32-AVX512VL: # BB#0: # %entry
+; X32-AVX512VL: # %bb.0: # %entry
; X32-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08]
; X32-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %ecx # encoding: [0x8b,0x4c,0x24,0x04]
; X32-AVX512VL-NEXT: vcvtps2pd (%ecx), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x5a,0x01]
@@ -33,19 +33,19 @@ define void @fpext_frommem(<2 x float>* %in, <2 x double>* %out) {
; X32-AVX512VL-NEXT: retl # encoding: [0xc3]
;
; X64-SSE-LABEL: fpext_frommem:
-; X64-SSE: # BB#0: # %entry
+; X64-SSE: # %bb.0: # %entry
; X64-SSE-NEXT: cvtps2pd (%rdi), %xmm0 # encoding: [0x0f,0x5a,0x07]
; X64-SSE-NEXT: movups %xmm0, (%rsi) # encoding: [0x0f,0x11,0x06]
; X64-SSE-NEXT: retq # encoding: [0xc3]
;
; X64-AVX-LABEL: fpext_frommem:
-; X64-AVX: # BB#0: # %entry
+; X64-AVX: # %bb.0: # %entry
; X64-AVX-NEXT: vcvtps2pd (%rdi), %xmm0 # encoding: [0xc5,0xf8,0x5a,0x07]
; X64-AVX-NEXT: vmovups %xmm0, (%rsi) # encoding: [0xc5,0xf8,0x11,0x06]
; X64-AVX-NEXT: retq # encoding: [0xc3]
;
; X64-AVX512VL-LABEL: fpext_frommem:
-; X64-AVX512VL: # BB#0: # %entry
+; X64-AVX512VL: # %bb.0: # %entry
; X64-AVX512VL-NEXT: vcvtps2pd (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x5a,0x07]
; X64-AVX512VL-NEXT: vmovups %xmm0, (%rsi) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x11,0x06]
; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
@@ -58,7 +58,7 @@ entry:
define void @fpext_frommem4(<4 x float>* %in, <4 x double>* %out) {
; X32-SSE-LABEL: fpext_frommem4:
-; X32-SSE: # BB#0: # %entry
+; X32-SSE: # %bb.0: # %entry
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08]
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx # encoding: [0x8b,0x4c,0x24,0x04]
; X32-SSE-NEXT: cvtps2pd (%ecx), %xmm0 # encoding: [0x0f,0x5a,0x01]
@@ -68,7 +68,7 @@ define void @fpext_frommem4(<4 x float>* %in, <4 x double>* %out) {
; X32-SSE-NEXT: retl # encoding: [0xc3]
;
; X32-AVX-LABEL: fpext_frommem4:
-; X32-AVX: # BB#0: # %entry
+; X32-AVX: # %bb.0: # %entry
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08]
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx # encoding: [0x8b,0x4c,0x24,0x04]
; X32-AVX-NEXT: vcvtps2pd (%ecx), %ymm0 # encoding: [0xc5,0xfc,0x5a,0x01]
@@ -77,7 +77,7 @@ define void @fpext_frommem4(<4 x float>* %in, <4 x double>* %out) {
; X32-AVX-NEXT: retl # encoding: [0xc3]
;
; X32-AVX512VL-LABEL: fpext_frommem4:
-; X32-AVX512VL: # BB#0: # %entry
+; X32-AVX512VL: # %bb.0: # %entry
; X32-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08]
; X32-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %ecx # encoding: [0x8b,0x4c,0x24,0x04]
; X32-AVX512VL-NEXT: vcvtps2pd (%ecx), %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x5a,0x01]
@@ -86,7 +86,7 @@ define void @fpext_frommem4(<4 x float>* %in, <4 x double>* %out) {
; X32-AVX512VL-NEXT: retl # encoding: [0xc3]
;
; X64-SSE-LABEL: fpext_frommem4:
-; X64-SSE: # BB#0: # %entry
+; X64-SSE: # %bb.0: # %entry
; X64-SSE-NEXT: cvtps2pd (%rdi), %xmm0 # encoding: [0x0f,0x5a,0x07]
; X64-SSE-NEXT: cvtps2pd 8(%rdi), %xmm1 # encoding: [0x0f,0x5a,0x4f,0x08]
; X64-SSE-NEXT: movups %xmm1, 16(%rsi) # encoding: [0x0f,0x11,0x4e,0x10]
@@ -94,14 +94,14 @@ define void @fpext_frommem4(<4 x float>* %in, <4 x double>* %out) {
; X64-SSE-NEXT: retq # encoding: [0xc3]
;
; X64-AVX-LABEL: fpext_frommem4:
-; X64-AVX: # BB#0: # %entry
+; X64-AVX: # %bb.0: # %entry
; X64-AVX-NEXT: vcvtps2pd (%rdi), %ymm0 # encoding: [0xc5,0xfc,0x5a,0x07]
; X64-AVX-NEXT: vmovups %ymm0, (%rsi) # encoding: [0xc5,0xfc,0x11,0x06]
; X64-AVX-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
; X64-AVX-NEXT: retq # encoding: [0xc3]
;
; X64-AVX512VL-LABEL: fpext_frommem4:
-; X64-AVX512VL: # BB#0: # %entry
+; X64-AVX512VL: # %bb.0: # %entry
; X64-AVX512VL-NEXT: vcvtps2pd (%rdi), %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x5a,0x07]
; X64-AVX512VL-NEXT: vmovups %ymm0, (%rsi) # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x11,0x06]
; X64-AVX512VL-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
@@ -115,7 +115,7 @@ entry:
define void @fpext_frommem8(<8 x float>* %in, <8 x double>* %out) {
; X32-SSE-LABEL: fpext_frommem8:
-; X32-SSE: # BB#0: # %entry
+; X32-SSE: # %bb.0: # %entry
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08]
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx # encoding: [0x8b,0x4c,0x24,0x04]
; X32-SSE-NEXT: cvtps2pd (%ecx), %xmm0 # encoding: [0x0f,0x5a,0x01]
@@ -129,7 +129,7 @@ define void @fpext_frommem8(<8 x float>* %in, <8 x double>* %out) {
; X32-SSE-NEXT: retl # encoding: [0xc3]
;
; X32-AVX-LABEL: fpext_frommem8:
-; X32-AVX: # BB#0: # %entry
+; X32-AVX: # %bb.0: # %entry
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08]
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx # encoding: [0x8b,0x4c,0x24,0x04]
; X32-AVX-NEXT: vcvtps2pd (%ecx), %ymm0 # encoding: [0xc5,0xfc,0x5a,0x01]
@@ -140,7 +140,7 @@ define void @fpext_frommem8(<8 x float>* %in, <8 x double>* %out) {
; X32-AVX-NEXT: retl # encoding: [0xc3]
;
; X32-AVX512VL-LABEL: fpext_frommem8:
-; X32-AVX512VL: # BB#0: # %entry
+; X32-AVX512VL: # %bb.0: # %entry
; X32-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08]
; X32-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %ecx # encoding: [0x8b,0x4c,0x24,0x04]
; X32-AVX512VL-NEXT: vcvtps2pd (%ecx), %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x5a,0x01]
@@ -149,7 +149,7 @@ define void @fpext_frommem8(<8 x float>* %in, <8 x double>* %out) {
; X32-AVX512VL-NEXT: retl # encoding: [0xc3]
;
; X64-SSE-LABEL: fpext_frommem8:
-; X64-SSE: # BB#0: # %entry
+; X64-SSE: # %bb.0: # %entry
; X64-SSE-NEXT: cvtps2pd (%rdi), %xmm0 # encoding: [0x0f,0x5a,0x07]
; X64-SSE-NEXT: cvtps2pd 8(%rdi), %xmm1 # encoding: [0x0f,0x5a,0x4f,0x08]
; X64-SSE-NEXT: cvtps2pd 16(%rdi), %xmm2 # encoding: [0x0f,0x5a,0x57,0x10]
@@ -161,7 +161,7 @@ define void @fpext_frommem8(<8 x float>* %in, <8 x double>* %out) {
; X64-SSE-NEXT: retq # encoding: [0xc3]
;
; X64-AVX-LABEL: fpext_frommem8:
-; X64-AVX: # BB#0: # %entry
+; X64-AVX: # %bb.0: # %entry
; X64-AVX-NEXT: vcvtps2pd (%rdi), %ymm0 # encoding: [0xc5,0xfc,0x5a,0x07]
; X64-AVX-NEXT: vcvtps2pd 16(%rdi), %ymm1 # encoding: [0xc5,0xfc,0x5a,0x4f,0x10]
; X64-AVX-NEXT: vmovups %ymm1, 32(%rsi) # encoding: [0xc5,0xfc,0x11,0x4e,0x20]
@@ -170,7 +170,7 @@ define void @fpext_frommem8(<8 x float>* %in, <8 x double>* %out) {
; X64-AVX-NEXT: retq # encoding: [0xc3]
;
; X64-AVX512VL-LABEL: fpext_frommem8:
-; X64-AVX512VL: # BB#0: # %entry
+; X64-AVX512VL: # %bb.0: # %entry
; X64-AVX512VL-NEXT: vcvtps2pd (%rdi), %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x5a,0x07]
; X64-AVX512VL-NEXT: vmovups %zmm0, (%rsi) # encoding: [0x62,0xf1,0x7c,0x48,0x11,0x06]
; X64-AVX512VL-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
@@ -184,42 +184,42 @@ entry:
define <2 x double> @fpext_fromconst() {
; X32-SSE-LABEL: fpext_fromconst:
-; X32-SSE: # BB#0: # %entry
+; X32-SSE: # %bb.0: # %entry
; X32-SSE-NEXT: movaps {{.*#+}} xmm0 = [1.000000e+00,-2.000000e+00]
; X32-SSE-NEXT: # encoding: [0x0f,0x28,0x05,A,A,A,A]
; X32-SSE-NEXT: # fixup A - offset: 3, value: {{\.LCPI.*}}, kind: FK_Data_4
; X32-SSE-NEXT: retl # encoding: [0xc3]
;
; X32-AVX-LABEL: fpext_fromconst:
-; X32-AVX: # BB#0: # %entry
+; X32-AVX: # %bb.0: # %entry
; X32-AVX-NEXT: vmovaps {{.*#+}} xmm0 = [1.000000e+00,-2.000000e+00]
; X32-AVX-NEXT: # encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
; X32-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
; X32-AVX-NEXT: retl # encoding: [0xc3]
;
; X32-AVX512VL-LABEL: fpext_fromconst:
-; X32-AVX512VL: # BB#0: # %entry
+; X32-AVX512VL: # %bb.0: # %entry
; X32-AVX512VL-NEXT: vmovaps {{\.LCPI.*}}, %xmm0 # EVEX TO VEX Compression xmm0 = [1.000000e+00,-2.000000e+00]
; X32-AVX512VL-NEXT: # encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
; X32-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
; X32-AVX512VL-NEXT: retl # encoding: [0xc3]
;
; X64-SSE-LABEL: fpext_fromconst:
-; X64-SSE: # BB#0: # %entry
+; X64-SSE: # %bb.0: # %entry
; X64-SSE-NEXT: movaps {{.*#+}} xmm0 = [1.000000e+00,-2.000000e+00]
; X64-SSE-NEXT: # encoding: [0x0f,0x28,0x05,A,A,A,A]
; X64-SSE-NEXT: # fixup A - offset: 3, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
; X64-SSE-NEXT: retq # encoding: [0xc3]
;
; X64-AVX-LABEL: fpext_fromconst:
-; X64-AVX: # BB#0: # %entry
+; X64-AVX: # %bb.0: # %entry
; X64-AVX-NEXT: vmovaps {{.*#+}} xmm0 = [1.000000e+00,-2.000000e+00]
; X64-AVX-NEXT: # encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: retq # encoding: [0xc3]
;
; X64-AVX512VL-LABEL: fpext_fromconst:
-; X64-AVX512VL: # BB#0: # %entry
+; X64-AVX512VL: # %bb.0: # %entry
; X64-AVX512VL-NEXT: vmovaps {{.*}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = [1.000000e+00,-2.000000e+00]
; X64-AVX512VL-NEXT: # encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
diff --git a/test/CodeGen/X86/vec_fptrunc.ll b/test/CodeGen/X86/vec_fptrunc.ll
index e6a0d52c5ae..79abeb0c59f 100644
--- a/test/CodeGen/X86/vec_fptrunc.ll
+++ b/test/CodeGen/X86/vec_fptrunc.ll
@@ -6,7 +6,7 @@
define void @fptrunc_frommem2(<2 x double>* %in, <2 x float>* %out) {
; X32-SSE-LABEL: fptrunc_frommem2:
-; X32-SSE: # BB#0: # %entry
+; X32-SSE: # %bb.0: # %entry
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-SSE-NEXT: cvtpd2ps (%ecx), %xmm0
@@ -15,7 +15,7 @@ define void @fptrunc_frommem2(<2 x double>* %in, <2 x float>* %out) {
; X32-SSE-NEXT: retl
;
; X32-AVX-LABEL: fptrunc_frommem2:
-; X32-AVX: # BB#0: # %entry
+; X32-AVX: # %bb.0: # %entry
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-AVX-NEXT: vcvtpd2psx (%ecx), %xmm0
@@ -24,13 +24,13 @@ define void @fptrunc_frommem2(<2 x double>* %in, <2 x float>* %out) {
; X32-AVX-NEXT: retl
;
; X64-SSE-LABEL: fptrunc_frommem2:
-; X64-SSE: # BB#0: # %entry
+; X64-SSE: # %bb.0: # %entry
; X64-SSE-NEXT: cvtpd2ps (%rdi), %xmm0
; X64-SSE-NEXT: movlpd %xmm0, (%rsi)
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: fptrunc_frommem2:
-; X64-AVX: # BB#0: # %entry
+; X64-AVX: # %bb.0: # %entry
; X64-AVX-NEXT: vcvtpd2psx (%rdi), %xmm0
; X64-AVX-NEXT: vmovlpd %xmm0, (%rsi)
; X64-AVX-NEXT: retq
@@ -43,7 +43,7 @@ entry:
define void @fptrunc_frommem4(<4 x double>* %in, <4 x float>* %out) {
; X32-SSE-LABEL: fptrunc_frommem4:
-; X32-SSE: # BB#0: # %entry
+; X32-SSE: # %bb.0: # %entry
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-SSE-NEXT: cvtpd2ps 16(%ecx), %xmm0
@@ -53,7 +53,7 @@ define void @fptrunc_frommem4(<4 x double>* %in, <4 x float>* %out) {
; X32-SSE-NEXT: retl
;
; X32-AVX-LABEL: fptrunc_frommem4:
-; X32-AVX: # BB#0: # %entry
+; X32-AVX: # %bb.0: # %entry
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-AVX-NEXT: vcvtpd2psy (%ecx), %xmm0
@@ -61,7 +61,7 @@ define void @fptrunc_frommem4(<4 x double>* %in, <4 x float>* %out) {
; X32-AVX-NEXT: retl
;
; X64-SSE-LABEL: fptrunc_frommem4:
-; X64-SSE: # BB#0: # %entry
+; X64-SSE: # %bb.0: # %entry
; X64-SSE-NEXT: cvtpd2ps 16(%rdi), %xmm0
; X64-SSE-NEXT: cvtpd2ps (%rdi), %xmm1
; X64-SSE-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0]
@@ -69,7 +69,7 @@ define void @fptrunc_frommem4(<4 x double>* %in, <4 x float>* %out) {
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: fptrunc_frommem4:
-; X64-AVX: # BB#0: # %entry
+; X64-AVX: # %bb.0: # %entry
; X64-AVX-NEXT: vcvtpd2psy (%rdi), %xmm0
; X64-AVX-NEXT: vmovupd %xmm0, (%rsi)
; X64-AVX-NEXT: retq
@@ -82,7 +82,7 @@ entry:
define void @fptrunc_frommem8(<8 x double>* %in, <8 x float>* %out) {
; X32-SSE-LABEL: fptrunc_frommem8:
-; X32-SSE: # BB#0: # %entry
+; X32-SSE: # %bb.0: # %entry
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-SSE-NEXT: cvtpd2ps 16(%ecx), %xmm0
@@ -96,7 +96,7 @@ define void @fptrunc_frommem8(<8 x double>* %in, <8 x float>* %out) {
; X32-SSE-NEXT: retl
;
; X32-AVX-LABEL: fptrunc_frommem8:
-; X32-AVX: # BB#0: # %entry
+; X32-AVX: # %bb.0: # %entry
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-AVX-NEXT: vcvtpd2psy (%ecx), %xmm0
@@ -107,7 +107,7 @@ define void @fptrunc_frommem8(<8 x double>* %in, <8 x float>* %out) {
; X32-AVX-NEXT: retl
;
; X64-SSE-LABEL: fptrunc_frommem8:
-; X64-SSE: # BB#0: # %entry
+; X64-SSE: # %bb.0: # %entry
; X64-SSE-NEXT: cvtpd2ps 16(%rdi), %xmm0
; X64-SSE-NEXT: cvtpd2ps (%rdi), %xmm1
; X64-SSE-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0]
@@ -119,7 +119,7 @@ define void @fptrunc_frommem8(<8 x double>* %in, <8 x float>* %out) {
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: fptrunc_frommem8:
-; X64-AVX: # BB#0: # %entry
+; X64-AVX: # %bb.0: # %entry
; X64-AVX-NEXT: vcvtpd2psy (%rdi), %xmm0
; X64-AVX-NEXT: vcvtpd2psy 32(%rdi), %xmm1
; X64-AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
@@ -135,24 +135,24 @@ entry:
define <4 x float> @fptrunc_frommem2_zext(<2 x double> * %ld) {
; X32-SSE-LABEL: fptrunc_frommem2_zext:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE-NEXT: cvtpd2ps (%eax), %xmm0
; X32-SSE-NEXT: retl
;
; X32-AVX-LABEL: fptrunc_frommem2_zext:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vcvtpd2psx (%eax), %xmm0
; X32-AVX-NEXT: retl
;
; X64-SSE-LABEL: fptrunc_frommem2_zext:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: cvtpd2ps (%rdi), %xmm0
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: fptrunc_frommem2_zext:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vcvtpd2psx (%rdi), %xmm0
; X64-AVX-NEXT: retq
%arg = load <2 x double>, <2 x double> * %ld, align 16
@@ -163,22 +163,22 @@ define <4 x float> @fptrunc_frommem2_zext(<2 x double> * %ld) {
define <4 x float> @fptrunc_fromreg2_zext(<2 x double> %arg) {
; X32-SSE-LABEL: fptrunc_fromreg2_zext:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: cvtpd2ps %xmm0, %xmm0
; X32-SSE-NEXT: retl
;
; X32-AVX-LABEL: fptrunc_fromreg2_zext:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vcvtpd2ps %xmm0, %xmm0
; X32-AVX-NEXT: retl
;
; X64-SSE-LABEL: fptrunc_fromreg2_zext:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: cvtpd2ps %xmm0, %xmm0
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: fptrunc_fromreg2_zext:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vcvtpd2ps %xmm0, %xmm0
; X64-AVX-NEXT: retq
%cvt = fptrunc <2 x double> %arg to <2 x float>
@@ -189,26 +189,26 @@ define <4 x float> @fptrunc_fromreg2_zext(<2 x double> %arg) {
; FIXME: For exact truncations we should be able to fold this.
define <4 x float> @fptrunc_fromconst() {
; X32-SSE-LABEL: fptrunc_fromconst:
-; X32-SSE: # BB#0: # %entry
+; X32-SSE: # %bb.0: # %entry
; X32-SSE-NEXT: cvtpd2ps {{\.LCPI.*}}, %xmm1
; X32-SSE-NEXT: cvtpd2ps {{\.LCPI.*}}, %xmm0
; X32-SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; X32-SSE-NEXT: retl
;
; X32-AVX-LABEL: fptrunc_fromconst:
-; X32-AVX: # BB#0: # %entry
+; X32-AVX: # %bb.0: # %entry
; X32-AVX-NEXT: vcvtpd2psy {{\.LCPI.*}}, %xmm0
; X32-AVX-NEXT: retl
;
; X64-SSE-LABEL: fptrunc_fromconst:
-; X64-SSE: # BB#0: # %entry
+; X64-SSE: # %bb.0: # %entry
; X64-SSE-NEXT: cvtpd2ps {{.*}}(%rip), %xmm1
; X64-SSE-NEXT: cvtpd2ps {{.*}}(%rip), %xmm0
; X64-SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: fptrunc_fromconst:
-; X64-AVX: # BB#0: # %entry
+; X64-AVX: # %bb.0: # %entry
; X64-AVX-NEXT: vcvtpd2psy {{.*}}(%rip), %xmm0
; X64-AVX-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/vec_i64.ll b/test/CodeGen/X86/vec_i64.ll
index 03d37889680..f9666a0cdef 100644
--- a/test/CodeGen/X86/vec_i64.ll
+++ b/test/CodeGen/X86/vec_i64.ll
@@ -6,13 +6,13 @@
define <2 x i64> @foo1(i64* %y) nounwind {
; X32-LABEL: foo1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X32-NEXT: retl
;
; X64-LABEL: foo1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X64-NEXT: retq
entry:
@@ -25,13 +25,13 @@ entry:
define <4 x float> @foo2(i64* %p) nounwind {
; X32-LABEL: foo2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X32-NEXT: retl
;
; X64-LABEL: foo2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X64-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/vec_ins_extract-1.ll b/test/CodeGen/X86/vec_ins_extract-1.ll
index 6b930649d15..66dd74acf1b 100644
--- a/test/CodeGen/X86/vec_ins_extract-1.ll
+++ b/test/CodeGen/X86/vec_ins_extract-1.ll
@@ -7,7 +7,7 @@
define i32 @t0(i32 inreg %t7, <4 x i32> inreg %t8) nounwind {
; X32-LABEL: t0:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebp
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: andl $-16, %esp
@@ -21,7 +21,7 @@ define i32 @t0(i32 inreg %t7, <4 x i32> inreg %t8) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: t0:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-NEXT: andl $3, %edi
@@ -35,7 +35,7 @@ define i32 @t0(i32 inreg %t7, <4 x i32> inreg %t8) nounwind {
define i32 @t1(i32 inreg %t7, <4 x i32> inreg %t8) nounwind {
; X32-LABEL: t1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebp
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: andl $-16, %esp
@@ -50,7 +50,7 @@ define i32 @t1(i32 inreg %t7, <4 x i32> inreg %t8) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: t1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: movl $76, %eax
; X64-NEXT: pinsrd $0, %eax, %xmm0
@@ -65,7 +65,7 @@ define i32 @t1(i32 inreg %t7, <4 x i32> inreg %t8) nounwind {
define <4 x i32> @t2(i32 inreg %t7, <4 x i32> inreg %t8) nounwind {
; X32-LABEL: t2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebp
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: andl $-16, %esp
@@ -78,7 +78,7 @@ define <4 x i32> @t2(i32 inreg %t7, <4 x i32> inreg %t8) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: t2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
; X64-NEXT: andl $3, %edi
@@ -91,7 +91,7 @@ define <4 x i32> @t2(i32 inreg %t7, <4 x i32> inreg %t8) nounwind {
define <4 x i32> @t3(i32 inreg %t7, <4 x i32> inreg %t8) nounwind {
; X32-LABEL: t3:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebp
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: andl $-16, %esp
@@ -105,7 +105,7 @@ define <4 x i32> @t3(i32 inreg %t7, <4 x i32> inreg %t8) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: t3:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-NEXT: andl $3, %edi
diff --git a/test/CodeGen/X86/vec_insert-2.ll b/test/CodeGen/X86/vec_insert-2.ll
index eedb1d252ba..9fb0dc54f2a 100644
--- a/test/CodeGen/X86/vec_insert-2.ll
+++ b/test/CodeGen/X86/vec_insert-2.ll
@@ -4,14 +4,14 @@
define <4 x float> @t1(float %s, <4 x float> %tmp) nounwind {
; X32-LABEL: t1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,0]
; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
; X32-NEXT: retl
;
; X64-LABEL: t1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[2,0]
; X64-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
; X64-NEXT: movaps %xmm1, %xmm0
@@ -22,14 +22,14 @@ define <4 x float> @t1(float %s, <4 x float> %tmp) nounwind {
define <4 x i32> @t2(i32 %s, <4 x i32> %tmp) nounwind {
; X32-LABEL: t2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,0]
; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
; X32-NEXT: retl
;
; X64-LABEL: t2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movd %edi, %xmm1
; X64-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,0]
; X64-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
@@ -40,12 +40,12 @@ define <4 x i32> @t2(i32 %s, <4 x i32> %tmp) nounwind {
define <2 x double> @t3(double %s, <2 x double> %tmp) nounwind {
; X32-LABEL: t3:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; X32-NEXT: retl
;
; X64-LABEL: t3:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; X64-NEXT: movaps %xmm1, %xmm0
; X64-NEXT: retq
@@ -55,12 +55,12 @@ define <2 x double> @t3(double %s, <2 x double> %tmp) nounwind {
define <8 x i16> @t4(i16 %s, <8 x i16> %tmp) nounwind {
; X32-LABEL: t4:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pinsrw $5, {{[0-9]+}}(%esp), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: t4:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pinsrw $5, %edi, %xmm0
; X64-NEXT: retq
%tmp1 = insertelement <8 x i16> %tmp, i16 %s, i32 5
diff --git a/test/CodeGen/X86/vec_insert-3.ll b/test/CodeGen/X86/vec_insert-3.ll
index ff8b1f14c52..8ec6fa1cf06 100644
--- a/test/CodeGen/X86/vec_insert-3.ll
+++ b/test/CodeGen/X86/vec_insert-3.ll
@@ -4,7 +4,7 @@
define <2 x i64> @t1(i64 %s, <2 x i64> %tmp) nounwind {
; X32-LABEL: t1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
@@ -14,7 +14,7 @@ define <2 x i64> @t1(i64 %s, <2 x i64> %tmp) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: t1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq %rdi, %xmm1
; X64-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; X64-NEXT: retq
diff --git a/test/CodeGen/X86/vec_insert-4.ll b/test/CodeGen/X86/vec_insert-4.ll
index aebac7f9d44..674abbc39f7 100644
--- a/test/CodeGen/X86/vec_insert-4.ll
+++ b/test/CodeGen/X86/vec_insert-4.ll
@@ -4,7 +4,7 @@
define <8 x float> @f(<8 x float> %a, i32 %b) nounwind {
; X32-LABEL: f:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: pushl %ebp
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: andl $-32, %esp
@@ -21,7 +21,7 @@ define <8 x float> @f(<8 x float> %a, i32 %b) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: f:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: pushq %rbp
; X64-NEXT: movq %rsp, %rbp
; X64-NEXT: andq $-32, %rsp
diff --git a/test/CodeGen/X86/vec_insert-5.ll b/test/CodeGen/X86/vec_insert-5.ll
index 64e8bbf5456..17d66f99674 100644
--- a/test/CodeGen/X86/vec_insert-5.ll
+++ b/test/CodeGen/X86/vec_insert-5.ll
@@ -6,7 +6,7 @@
define void @t1(i32 %a, x86_mmx* %P) nounwind {
; X32-LABEL: t1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: shll $12, %ecx
@@ -16,7 +16,7 @@ define void @t1(i32 %a, x86_mmx* %P) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: t1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: shll $12, %edi
; X64-NEXT: movq %rdi, %xmm0
@@ -34,7 +34,7 @@ define void @t1(i32 %a, x86_mmx* %P) nounwind {
define <4 x float> @t2(<4 x float>* %P) nounwind {
; X32-LABEL: t2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movaps (%eax), %xmm1
; X32-NEXT: xorps %xmm0, %xmm0
@@ -43,7 +43,7 @@ define <4 x float> @t2(<4 x float>* %P) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: t2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps (%rdi), %xmm1
; X64-NEXT: xorps %xmm0, %xmm0
; X64-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,0]
@@ -56,14 +56,14 @@ define <4 x float> @t2(<4 x float>* %P) nounwind {
define <4 x float> @t3(<4 x float>* %P) nounwind {
; X32-LABEL: t3:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: xorps %xmm0, %xmm0
; X32-NEXT: movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3]
; X32-NEXT: retl
;
; X64-LABEL: t3:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorps %xmm0, %xmm0
; X64-NEXT: movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3]
; X64-NEXT: retq
@@ -74,7 +74,7 @@ define <4 x float> @t3(<4 x float>* %P) nounwind {
define <4 x float> @t4(<4 x float>* %P) nounwind {
; X32-LABEL: t4:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movaps (%eax), %xmm0
; X32-NEXT: xorps %xmm1, %xmm1
@@ -83,7 +83,7 @@ define <4 x float> @t4(<4 x float>* %P) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: t4:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps (%rdi), %xmm0
; X64-NEXT: xorps %xmm1, %xmm1
; X64-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,0],xmm1[1,0]
@@ -96,12 +96,12 @@ define <4 x float> @t4(<4 x float>* %P) nounwind {
define <16 x i8> @t5(<16 x i8> %x) nounwind {
; X32-LABEL: t5:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: psrlw $8, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: t5:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psrlw $8, %xmm0
; X64-NEXT: retq
%s = shufflevector <16 x i8> %x, <16 x i8> zeroinitializer, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 17>
@@ -110,12 +110,12 @@ define <16 x i8> @t5(<16 x i8> %x) nounwind {
define <16 x i8> @t6(<16 x i8> %x) nounwind {
; X32-LABEL: t6:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: psrlw $8, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: t6:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psrlw $8, %xmm0
; X64-NEXT: retq
%s = shufflevector <16 x i8> %x, <16 x i8> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -124,12 +124,12 @@ define <16 x i8> @t6(<16 x i8> %x) nounwind {
define <16 x i8> @t7(<16 x i8> %x) nounwind {
; X32-LABEL: t7:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2]
; X32-NEXT: retl
;
; X64-LABEL: t7:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2]
; X64-NEXT: retq
%s = shufflevector <16 x i8> %x, <16 x i8> undef, <16 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 1, i32 2>
@@ -138,12 +138,12 @@ define <16 x i8> @t7(<16 x i8> %x) nounwind {
define <16 x i8> @t8(<16 x i8> %x) nounwind {
; X32-LABEL: t8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: psrldq {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero
; X32-NEXT: retl
;
; X64-LABEL: t8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psrldq {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero
; X64-NEXT: retq
%s = shufflevector <16 x i8> %x, <16 x i8> zeroinitializer, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 8, i32 9, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 17>
@@ -152,12 +152,12 @@ define <16 x i8> @t8(<16 x i8> %x) nounwind {
define <16 x i8> @t9(<16 x i8> %x) nounwind {
; X32-LABEL: t9:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: psrldq {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero
; X32-NEXT: retl
;
; X64-LABEL: t9:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psrldq {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero
; X64-NEXT: retq
%s = shufflevector <16 x i8> %x, <16 x i8> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 7, i32 8, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 14, i32 undef, i32 undef>
diff --git a/test/CodeGen/X86/vec_insert-7.ll b/test/CodeGen/X86/vec_insert-7.ll
index a600d20902d..bfced4b3877 100644
--- a/test/CodeGen/X86/vec_insert-7.ll
+++ b/test/CodeGen/X86/vec_insert-7.ll
@@ -7,7 +7,7 @@
define x86_mmx @mmx_movzl(x86_mmx %x) nounwind {
; X32-LABEL: mmx_movzl:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: subl $20, %esp
; X32-NEXT: movq %mm0, {{[0-9]+}}(%esp)
; X32-NEXT: pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
@@ -21,7 +21,7 @@ define x86_mmx @mmx_movzl(x86_mmx %x) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: mmx_movzl:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: movdq2q %xmm0, %mm0
; X64-NEXT: movq %mm0, -{{[0-9]+}}(%rsp)
; X64-NEXT: pmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
diff --git a/test/CodeGen/X86/vec_insert-8.ll b/test/CodeGen/X86/vec_insert-8.ll
index 5f80225a24d..71585474969 100644
--- a/test/CodeGen/X86/vec_insert-8.ll
+++ b/test/CodeGen/X86/vec_insert-8.ll
@@ -6,7 +6,7 @@
define <4 x i32> @var_insert(<4 x i32> %x, i32 %val, i32 %idx) nounwind {
; X32-LABEL: var_insert:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: pushl %ebp
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: andl $-16, %esp
@@ -22,7 +22,7 @@ define <4 x i32> @var_insert(<4 x i32> %x, i32 %val, i32 %idx) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: var_insert:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; X64-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-NEXT: andl $3, %esi
@@ -36,7 +36,7 @@ entry:
define i32 @var_extract(<4 x i32> %x, i32 %idx) nounwind {
; X32-LABEL: var_extract:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: pushl %ebp
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: andl $-16, %esp
@@ -50,7 +50,7 @@ define i32 @var_extract(<4 x i32> %x, i32 %idx) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: var_extract:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-NEXT: andl $3, %edi
diff --git a/test/CodeGen/X86/vec_insert-9.ll b/test/CodeGen/X86/vec_insert-9.ll
index ec4a0288e10..a750c6faac8 100644
--- a/test/CodeGen/X86/vec_insert-9.ll
+++ b/test/CodeGen/X86/vec_insert-9.ll
@@ -4,13 +4,13 @@
define <4 x i32> @var_insert2(<4 x i32> %x, i32 %val, i32 %idx) nounwind {
; X32-LABEL: var_insert2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: pinsrd $3, {{[0-9]+}}(%esp), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: var_insert2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movd %edi, %xmm0
; X64-NEXT: pinsrd $3, %esi, %xmm0
; X64-NEXT: retq
diff --git a/test/CodeGen/X86/vec_insert-mmx.ll b/test/CodeGen/X86/vec_insert-mmx.ll
index ad857636ebb..81bb25a1e74 100644
--- a/test/CodeGen/X86/vec_insert-mmx.ll
+++ b/test/CodeGen/X86/vec_insert-mmx.ll
@@ -5,7 +5,7 @@
; This is not an MMX operation; promoted to xmm.
define x86_mmx @t0(i32 %A) nounwind {
; X32-LABEL: t0:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: subl $12, %esp
; X32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,0,1,1]
@@ -15,7 +15,7 @@ define x86_mmx @t0(i32 %A) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: t0:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: movq %rdi, %xmm0
; X64-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
@@ -28,12 +28,12 @@ define x86_mmx @t0(i32 %A) nounwind {
define <8 x i8> @t1(i8 zeroext %x) nounwind {
; X32-LABEL: t1:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: retl
;
; X64-LABEL: t1:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: movd %edi, %xmm0
; X64-NEXT: retq
%r = insertelement <8 x i8> undef, i8 %x, i32 0
@@ -43,12 +43,12 @@ define <8 x i8> @t1(i8 zeroext %x) nounwind {
; PR2574
define <2 x float> @t2(<2 x float> %a0) {
; X32-LABEL: t2:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: xorps %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: t2:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: xorps %xmm0, %xmm0
; X64-NEXT: retq
%v1 = insertelement <2 x float> %a0, float 0.000000e+00, i32 0
@@ -62,7 +62,7 @@ define <2 x float> @t2(<2 x float> %a0) {
; PR2562
define void @t3() {
; X32-LABEL: t3:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl L_g0$non_lazy_ptr, %eax
; X32-NEXT: movl L_g1$non_lazy_ptr, %ecx
; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
@@ -77,7 +77,7 @@ define void @t3() {
; X32-NEXT: retl
;
; X64-LABEL: t3:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: movq _g0@{{.*}}(%rip), %rax
; X64-NEXT: movq _g1@{{.*}}(%rip), %rcx
; X64-NEXT: pmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
diff --git a/test/CodeGen/X86/vec_int_to_fp.ll b/test/CodeGen/X86/vec_int_to_fp.ll
index afcbc9a9d17..0ab320c63aa 100644
--- a/test/CodeGen/X86/vec_int_to_fp.ll
+++ b/test/CodeGen/X86/vec_int_to_fp.ll
@@ -18,7 +18,7 @@
define <2 x double> @sitofp_2i64_to_2f64(<2 x i64> %a) {
; SSE-LABEL: sitofp_2i64_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movq %xmm0, %rax
; SSE-NEXT: cvtsi2sdq %rax, %xmm1
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -30,7 +30,7 @@ define <2 x double> @sitofp_2i64_to_2f64(<2 x i64> %a) {
; SSE-NEXT: retq
;
; VEX-LABEL: sitofp_2i64_to_2f64:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vpextrq $1, %xmm0, %rax
; VEX-NEXT: vcvtsi2sdq %rax, %xmm1, %xmm1
; VEX-NEXT: vmovq %xmm0, %rax
@@ -39,7 +39,7 @@ define <2 x double> @sitofp_2i64_to_2f64(<2 x i64> %a) {
; VEX-NEXT: retq
;
; AVX512F-LABEL: sitofp_2i64_to_2f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpextrq $1, %xmm0, %rax
; AVX512F-NEXT: vcvtsi2sdq %rax, %xmm1, %xmm1
; AVX512F-NEXT: vmovq %xmm0, %rax
@@ -48,7 +48,7 @@ define <2 x double> @sitofp_2i64_to_2f64(<2 x i64> %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: sitofp_2i64_to_2f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpextrq $1, %xmm0, %rax
; AVX512VL-NEXT: vcvtsi2sdq %rax, %xmm1, %xmm1
; AVX512VL-NEXT: vmovq %xmm0, %rax
@@ -57,7 +57,7 @@ define <2 x double> @sitofp_2i64_to_2f64(<2 x i64> %a) {
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: sitofp_2i64_to_2f64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtqq2pd %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -65,7 +65,7 @@ define <2 x double> @sitofp_2i64_to_2f64(<2 x i64> %a) {
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: sitofp_2i64_to_2f64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtqq2pd %xmm0, %xmm0
; AVX512VLDQ-NEXT: retq
%cvt = sitofp <2 x i64> %a to <2 x double>
@@ -74,12 +74,12 @@ define <2 x double> @sitofp_2i64_to_2f64(<2 x i64> %a) {
define <2 x double> @sitofp_2i32_to_2f64(<4 x i32> %a) {
; SSE-LABEL: sitofp_2i32_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvtdq2pd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_2i32_to_2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvtdq2pd %xmm0, %xmm0
; AVX-NEXT: retq
%shuf = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
@@ -89,12 +89,12 @@ define <2 x double> @sitofp_2i32_to_2f64(<4 x i32> %a) {
define <2 x double> @sitofp_4i32_to_2f64(<4 x i32> %a) {
; SSE-LABEL: sitofp_4i32_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvtdq2pd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_4i32_to_2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX-NEXT: vzeroupper
@@ -106,14 +106,14 @@ define <2 x double> @sitofp_4i32_to_2f64(<4 x i32> %a) {
define <2 x double> @sitofp_2i16_to_2f64(<8 x i16> %a) {
; SSE-LABEL: sitofp_2i16_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE-NEXT: psrad $16, %xmm0
; SSE-NEXT: cvtdq2pd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_2i16_to_2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovsxwd %xmm0, %xmm0
; AVX-NEXT: vcvtdq2pd %xmm0, %xmm0
; AVX-NEXT: retq
@@ -124,14 +124,14 @@ define <2 x double> @sitofp_2i16_to_2f64(<8 x i16> %a) {
define <2 x double> @sitofp_8i16_to_2f64(<8 x i16> %a) {
; SSE-LABEL: sitofp_8i16_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE-NEXT: psrad $16, %xmm0
; SSE-NEXT: cvtdq2pd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: sitofp_8i16_to_2f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0
; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -139,7 +139,7 @@ define <2 x double> @sitofp_8i16_to_2f64(<8 x i16> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: sitofp_8i16_to_2f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -147,7 +147,7 @@ define <2 x double> @sitofp_8i16_to_2f64(<8 x i16> %a) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: sitofp_8i16_to_2f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -160,7 +160,7 @@ define <2 x double> @sitofp_8i16_to_2f64(<8 x i16> %a) {
define <2 x double> @sitofp_2i8_to_2f64(<16 x i8> %a) {
; SSE-LABEL: sitofp_2i8_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE-NEXT: psrad $24, %xmm0
@@ -168,7 +168,7 @@ define <2 x double> @sitofp_2i8_to_2f64(<16 x i8> %a) {
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_2i8_to_2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovsxbd %xmm0, %xmm0
; AVX-NEXT: vcvtdq2pd %xmm0, %xmm0
; AVX-NEXT: retq
@@ -179,7 +179,7 @@ define <2 x double> @sitofp_2i8_to_2f64(<16 x i8> %a) {
define <2 x double> @sitofp_16i8_to_2f64(<16 x i8> %a) {
; SSE-LABEL: sitofp_16i8_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE-NEXT: psrad $24, %xmm0
@@ -187,7 +187,7 @@ define <2 x double> @sitofp_16i8_to_2f64(<16 x i8> %a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: sitofp_16i8_to_2f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovsxbd %xmm0, %xmm0
; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -195,7 +195,7 @@ define <2 x double> @sitofp_16i8_to_2f64(<16 x i8> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: sitofp_16i8_to_2f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovsxbd %xmm0, %ymm0
; AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -203,7 +203,7 @@ define <2 x double> @sitofp_16i8_to_2f64(<16 x i8> %a) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: sitofp_16i8_to_2f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -216,7 +216,7 @@ define <2 x double> @sitofp_16i8_to_2f64(<16 x i8> %a) {
define <4 x double> @sitofp_4i64_to_4f64(<4 x i64> %a) {
; SSE-LABEL: sitofp_4i64_to_4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movq %xmm0, %rax
; SSE-NEXT: cvtsi2sdq %rax, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -236,7 +236,7 @@ define <4 x double> @sitofp_4i64_to_4f64(<4 x i64> %a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: sitofp_4i64_to_4f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpextrq $1, %xmm1, %rax
; AVX1-NEXT: vcvtsi2sdq %rax, %xmm2, %xmm2
@@ -252,7 +252,7 @@ define <4 x double> @sitofp_4i64_to_4f64(<4 x i64> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: sitofp_4i64_to_4f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpextrq $1, %xmm1, %rax
; AVX2-NEXT: vcvtsi2sdq %rax, %xmm2, %xmm2
@@ -268,7 +268,7 @@ define <4 x double> @sitofp_4i64_to_4f64(<4 x i64> %a) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: sitofp_4i64_to_4f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512F-NEXT: vpextrq $1, %xmm1, %rax
; AVX512F-NEXT: vcvtsi2sdq %rax, %xmm2, %xmm2
@@ -284,7 +284,7 @@ define <4 x double> @sitofp_4i64_to_4f64(<4 x i64> %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: sitofp_4i64_to_4f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512VL-NEXT: vpextrq $1, %xmm1, %rax
; AVX512VL-NEXT: vcvtsi2sdq %rax, %xmm2, %xmm2
@@ -300,14 +300,14 @@ define <4 x double> @sitofp_4i64_to_4f64(<4 x i64> %a) {
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: sitofp_4i64_to_4f64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtqq2pd %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: sitofp_4i64_to_4f64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtqq2pd %ymm0, %ymm0
; AVX512VLDQ-NEXT: retq
%cvt = sitofp <4 x i64> %a to <4 x double>
@@ -316,7 +316,7 @@ define <4 x double> @sitofp_4i64_to_4f64(<4 x i64> %a) {
define <4 x double> @sitofp_4i32_to_4f64(<4 x i32> %a) {
; SSE-LABEL: sitofp_4i32_to_4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvtdq2pd %xmm0, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; SSE-NEXT: cvtdq2pd %xmm0, %xmm1
@@ -324,7 +324,7 @@ define <4 x double> @sitofp_4i32_to_4f64(<4 x i32> %a) {
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_4i32_to_4f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX-NEXT: retq
%cvt = sitofp <4 x i32> %a to <4 x double>
@@ -333,7 +333,7 @@ define <4 x double> @sitofp_4i32_to_4f64(<4 x i32> %a) {
define <4 x double> @sitofp_4i16_to_4f64(<8 x i16> %a) {
; SSE-LABEL: sitofp_4i16_to_4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSE-NEXT: psrad $16, %xmm1
; SSE-NEXT: cvtdq2pd %xmm1, %xmm0
@@ -342,7 +342,7 @@ define <4 x double> @sitofp_4i16_to_4f64(<8 x i16> %a) {
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_4i16_to_4f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovsxwd %xmm0, %xmm0
; AVX-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX-NEXT: retq
@@ -353,7 +353,7 @@ define <4 x double> @sitofp_4i16_to_4f64(<8 x i16> %a) {
define <4 x double> @sitofp_8i16_to_4f64(<8 x i16> %a) {
; SSE-LABEL: sitofp_8i16_to_4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSE-NEXT: psrad $16, %xmm1
; SSE-NEXT: cvtdq2pd %xmm1, %xmm0
@@ -362,19 +362,19 @@ define <4 x double> @sitofp_8i16_to_4f64(<8 x i16> %a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: sitofp_8i16_to_4f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0
; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: sitofp_8i16_to_4f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: sitofp_8i16_to_4f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0
; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
@@ -386,7 +386,7 @@ define <4 x double> @sitofp_8i16_to_4f64(<8 x i16> %a) {
define <4 x double> @sitofp_4i8_to_4f64(<16 x i8> %a) {
; SSE-LABEL: sitofp_4i8_to_4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSE-NEXT: psrad $24, %xmm1
@@ -396,7 +396,7 @@ define <4 x double> @sitofp_4i8_to_4f64(<16 x i8> %a) {
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_4i8_to_4f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovsxbd %xmm0, %xmm0
; AVX-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX-NEXT: retq
@@ -407,7 +407,7 @@ define <4 x double> @sitofp_4i8_to_4f64(<16 x i8> %a) {
define <4 x double> @sitofp_16i8_to_4f64(<16 x i8> %a) {
; SSE-LABEL: sitofp_16i8_to_4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSE-NEXT: psrad $24, %xmm1
@@ -417,19 +417,19 @@ define <4 x double> @sitofp_16i8_to_4f64(<16 x i8> %a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: sitofp_16i8_to_4f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovsxbd %xmm0, %xmm0
; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: sitofp_16i8_to_4f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovsxbd %xmm0, %ymm0
; AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: sitofp_16i8_to_4f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0
; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
@@ -445,7 +445,7 @@ define <4 x double> @sitofp_16i8_to_4f64(<16 x i8> %a) {
define <2 x double> @uitofp_2i64_to_2f64(<2 x i64> %a) {
; SSE-LABEL: uitofp_2i64_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [1127219200,1160773632,0,0]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
@@ -461,7 +461,7 @@ define <2 x double> @uitofp_2i64_to_2f64(<2 x i64> %a) {
; SSE-NEXT: retq
;
; VEX-LABEL: uitofp_2i64_to_2f64:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vmovapd {{.*#+}} xmm1 = [1127219200,1160773632,0,0]
; VEX-NEXT: vunpcklps {{.*#+}} xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; VEX-NEXT: vmovapd {{.*#+}} xmm3 = [4.503600e+15,1.934281e+25]
@@ -473,7 +473,7 @@ define <2 x double> @uitofp_2i64_to_2f64(<2 x i64> %a) {
; VEX-NEXT: retq
;
; AVX512F-LABEL: uitofp_2i64_to_2f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpextrq $1, %xmm0, %rax
; AVX512F-NEXT: vcvtusi2sdq %rax, %xmm1, %xmm1
; AVX512F-NEXT: vmovq %xmm0, %rax
@@ -482,7 +482,7 @@ define <2 x double> @uitofp_2i64_to_2f64(<2 x i64> %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_2i64_to_2f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpextrq $1, %xmm0, %rax
; AVX512VL-NEXT: vcvtusi2sdq %rax, %xmm1, %xmm1
; AVX512VL-NEXT: vmovq %xmm0, %rax
@@ -491,7 +491,7 @@ define <2 x double> @uitofp_2i64_to_2f64(<2 x i64> %a) {
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: uitofp_2i64_to_2f64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtuqq2pd %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -499,7 +499,7 @@ define <2 x double> @uitofp_2i64_to_2f64(<2 x i64> %a) {
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_2i64_to_2f64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtuqq2pd %xmm0, %xmm0
; AVX512VLDQ-NEXT: retq
%cvt = uitofp <2 x i64> %a to <2 x double>
@@ -508,7 +508,7 @@ define <2 x double> @uitofp_2i64_to_2f64(<2 x i64> %a) {
define <2 x double> @uitofp_2i32_to_2f64(<4 x i32> %a) {
; SSE-LABEL: uitofp_2i32_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,0,65535,0,65535,0,65535,0]
; SSE-NEXT: pand %xmm0, %xmm1
; SSE-NEXT: cvtdq2pd %xmm1, %xmm1
@@ -519,7 +519,7 @@ define <2 x double> @uitofp_2i32_to_2f64(<4 x i32> %a) {
; SSE-NEXT: retq
;
; VEX-LABEL: uitofp_2i32_to_2f64:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; VEX-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
; VEX-NEXT: vcvtdq2pd %xmm1, %xmm1
@@ -530,7 +530,7 @@ define <2 x double> @uitofp_2i32_to_2f64(<4 x i32> %a) {
; VEX-NEXT: retq
;
; AVX512F-LABEL: uitofp_2i32_to_2f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512F-NEXT: vcvtudq2pd %ymm0, %zmm0
; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -538,12 +538,12 @@ define <2 x double> @uitofp_2i32_to_2f64(<4 x i32> %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_2i32_to_2f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvtudq2pd %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: uitofp_2i32_to_2f64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512DQ-NEXT: vcvtudq2pd %ymm0, %zmm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -551,7 +551,7 @@ define <2 x double> @uitofp_2i32_to_2f64(<4 x i32> %a) {
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_2i32_to_2f64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtudq2pd %xmm0, %xmm0
; AVX512VLDQ-NEXT: retq
%shuf = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
@@ -561,7 +561,7 @@ define <2 x double> @uitofp_2i32_to_2f64(<4 x i32> %a) {
define <2 x double> @uitofp_4i32_to_2f64(<4 x i32> %a) {
; SSE-LABEL: uitofp_4i32_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,0,65535,0,65535,0,65535,0]
; SSE-NEXT: pand %xmm0, %xmm1
; SSE-NEXT: cvtdq2pd %xmm1, %xmm1
@@ -572,7 +572,7 @@ define <2 x double> @uitofp_4i32_to_2f64(<4 x i32> %a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: uitofp_4i32_to_2f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
; AVX1-NEXT: vcvtdq2pd %xmm1, %ymm1
@@ -585,7 +585,7 @@ define <2 x double> @uitofp_4i32_to_2f64(<4 x i32> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: uitofp_4i32_to_2f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX2-NEXT: vcvtdq2pd %xmm1, %ymm1
; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm2 = [65536,65536,65536,65536]
@@ -599,7 +599,7 @@ define <2 x double> @uitofp_4i32_to_2f64(<4 x i32> %a) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: uitofp_4i32_to_2f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512F-NEXT: vcvtudq2pd %ymm0, %zmm0
; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -607,14 +607,14 @@ define <2 x double> @uitofp_4i32_to_2f64(<4 x i32> %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_4i32_to_2f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvtudq2pd %xmm0, %ymm0
; AVX512VL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: uitofp_4i32_to_2f64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512DQ-NEXT: vcvtudq2pd %ymm0, %zmm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -622,7 +622,7 @@ define <2 x double> @uitofp_4i32_to_2f64(<4 x i32> %a) {
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_4i32_to_2f64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtudq2pd %xmm0, %ymm0
; AVX512VLDQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512VLDQ-NEXT: vzeroupper
@@ -634,14 +634,14 @@ define <2 x double> @uitofp_4i32_to_2f64(<4 x i32> %a) {
define <2 x double> @uitofp_2i16_to_2f64(<8 x i16> %a) {
; SSE-LABEL: uitofp_2i16_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE-NEXT: cvtdq2pd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: uitofp_2i16_to_2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX-NEXT: vcvtdq2pd %xmm0, %xmm0
; AVX-NEXT: retq
@@ -652,14 +652,14 @@ define <2 x double> @uitofp_2i16_to_2f64(<8 x i16> %a) {
define <2 x double> @uitofp_8i16_to_2f64(<8 x i16> %a) {
; SSE-LABEL: uitofp_8i16_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE-NEXT: cvtdq2pd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: uitofp_8i16_to_2f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -667,7 +667,7 @@ define <2 x double> @uitofp_8i16_to_2f64(<8 x i16> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: uitofp_8i16_to_2f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -675,7 +675,7 @@ define <2 x double> @uitofp_8i16_to_2f64(<8 x i16> %a) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: uitofp_8i16_to_2f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -688,7 +688,7 @@ define <2 x double> @uitofp_8i16_to_2f64(<8 x i16> %a) {
define <2 x double> @uitofp_2i8_to_2f64(<16 x i8> %a) {
; SSE-LABEL: uitofp_2i8_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@@ -696,7 +696,7 @@ define <2 x double> @uitofp_2i8_to_2f64(<16 x i8> %a) {
; SSE-NEXT: retq
;
; AVX-LABEL: uitofp_2i8_to_2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX-NEXT: vcvtdq2pd %xmm0, %xmm0
; AVX-NEXT: retq
@@ -707,7 +707,7 @@ define <2 x double> @uitofp_2i8_to_2f64(<16 x i8> %a) {
define <2 x double> @uitofp_16i8_to_2f64(<16 x i8> %a) {
; SSE-LABEL: uitofp_16i8_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@@ -715,7 +715,7 @@ define <2 x double> @uitofp_16i8_to_2f64(<16 x i8> %a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: uitofp_16i8_to_2f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -723,7 +723,7 @@ define <2 x double> @uitofp_16i8_to_2f64(<16 x i8> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: uitofp_16i8_to_2f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -731,7 +731,7 @@ define <2 x double> @uitofp_16i8_to_2f64(<16 x i8> %a) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: uitofp_16i8_to_2f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -744,7 +744,7 @@ define <2 x double> @uitofp_16i8_to_2f64(<16 x i8> %a) {
define <4 x double> @uitofp_4i64_to_4f64(<4 x i64> %a) {
; SSE-LABEL: uitofp_4i64_to_4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [1127219200,1160773632,0,0]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
@@ -770,7 +770,7 @@ define <4 x double> @uitofp_4i64_to_4f64(<4 x i64> %a) {
; SSE-NEXT: retq
;
; VEX-LABEL: uitofp_4i64_to_4f64:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vextractf128 $1, %ymm0, %xmm1
; VEX-NEXT: vmovapd {{.*#+}} xmm2 = [1127219200,1160773632,0,0]
; VEX-NEXT: vunpcklps {{.*#+}} xmm3 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
@@ -790,7 +790,7 @@ define <4 x double> @uitofp_4i64_to_4f64(<4 x i64> %a) {
; VEX-NEXT: retq
;
; AVX512F-LABEL: uitofp_4i64_to_4f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512F-NEXT: vpextrq $1, %xmm1, %rax
; AVX512F-NEXT: vcvtusi2sdq %rax, %xmm2, %xmm2
@@ -806,7 +806,7 @@ define <4 x double> @uitofp_4i64_to_4f64(<4 x i64> %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_4i64_to_4f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512VL-NEXT: vpextrq $1, %xmm1, %rax
; AVX512VL-NEXT: vcvtusi2sdq %rax, %xmm2, %xmm2
@@ -822,14 +822,14 @@ define <4 x double> @uitofp_4i64_to_4f64(<4 x i64> %a) {
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: uitofp_4i64_to_4f64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtuqq2pd %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_4i64_to_4f64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtuqq2pd %ymm0, %ymm0
; AVX512VLDQ-NEXT: retq
%cvt = uitofp <4 x i64> %a to <4 x double>
@@ -838,7 +838,7 @@ define <4 x double> @uitofp_4i64_to_4f64(<4 x i64> %a) {
define <4 x double> @uitofp_4i32_to_4f64(<4 x i32> %a) {
; SSE-LABEL: uitofp_4i32_to_4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psrld $16, %xmm1
; SSE-NEXT: cvtdq2pd %xmm1, %xmm1
@@ -859,7 +859,7 @@ define <4 x double> @uitofp_4i32_to_4f64(<4 x i32> %a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: uitofp_4i32_to_4f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
; AVX1-NEXT: vcvtdq2pd %xmm1, %ymm1
@@ -870,7 +870,7 @@ define <4 x double> @uitofp_4i32_to_4f64(<4 x i32> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: uitofp_4i32_to_4f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX2-NEXT: vcvtdq2pd %xmm1, %ymm1
; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm2 = [65536,65536,65536,65536]
@@ -882,26 +882,26 @@ define <4 x double> @uitofp_4i32_to_4f64(<4 x i32> %a) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: uitofp_4i32_to_4f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512F-NEXT: vcvtudq2pd %ymm0, %zmm0
; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_4i32_to_4f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvtudq2pd %xmm0, %ymm0
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: uitofp_4i32_to_4f64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512DQ-NEXT: vcvtudq2pd %ymm0, %zmm0
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_4i32_to_4f64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtudq2pd %xmm0, %ymm0
; AVX512VLDQ-NEXT: retq
%cvt = uitofp <4 x i32> %a to <4 x double>
@@ -910,7 +910,7 @@ define <4 x double> @uitofp_4i32_to_4f64(<4 x i32> %a) {
define <4 x double> @uitofp_4i16_to_4f64(<8 x i16> %a) {
; SSE-LABEL: uitofp_4i16_to_4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE-NEXT: cvtdq2pd %xmm0, %xmm2
@@ -920,7 +920,7 @@ define <4 x double> @uitofp_4i16_to_4f64(<8 x i16> %a) {
; SSE-NEXT: retq
;
; AVX-LABEL: uitofp_4i16_to_4f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX-NEXT: retq
@@ -931,7 +931,7 @@ define <4 x double> @uitofp_4i16_to_4f64(<8 x i16> %a) {
define <4 x double> @uitofp_8i16_to_4f64(<8 x i16> %a) {
; SSE-LABEL: uitofp_8i16_to_4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE-NEXT: cvtdq2pd %xmm0, %xmm2
@@ -941,19 +941,19 @@ define <4 x double> @uitofp_8i16_to_4f64(<8 x i16> %a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: uitofp_8i16_to_4f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: uitofp_8i16_to_4f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: uitofp_8i16_to_4f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0
; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
@@ -965,7 +965,7 @@ define <4 x double> @uitofp_8i16_to_4f64(<8 x i16> %a) {
define <4 x double> @uitofp_4i8_to_4f64(<16 x i8> %a) {
; SSE-LABEL: uitofp_4i8_to_4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@@ -976,7 +976,7 @@ define <4 x double> @uitofp_4i8_to_4f64(<16 x i8> %a) {
; SSE-NEXT: retq
;
; AVX-LABEL: uitofp_4i8_to_4f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX-NEXT: retq
@@ -987,7 +987,7 @@ define <4 x double> @uitofp_4i8_to_4f64(<16 x i8> %a) {
define <4 x double> @uitofp_16i8_to_4f64(<16 x i8> %a) {
; SSE-LABEL: uitofp_16i8_to_4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@@ -998,19 +998,19 @@ define <4 x double> @uitofp_16i8_to_4f64(<16 x i8> %a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: uitofp_16i8_to_4f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: uitofp_16i8_to_4f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: uitofp_16i8_to_4f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0
; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
@@ -1026,7 +1026,7 @@ define <4 x double> @uitofp_16i8_to_4f64(<16 x i8> %a) {
define <4 x float> @sitofp_2i64_to_4f32(<2 x i64> %a) {
; SSE-LABEL: sitofp_2i64_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movq %xmm0, %rax
; SSE-NEXT: cvtsi2ssq %rax, %xmm1
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -1038,7 +1038,7 @@ define <4 x float> @sitofp_2i64_to_4f32(<2 x i64> %a) {
; SSE-NEXT: retq
;
; VEX-LABEL: sitofp_2i64_to_4f32:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vpextrq $1, %xmm0, %rax
; VEX-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; VEX-NEXT: vmovq %xmm0, %rax
@@ -1049,7 +1049,7 @@ define <4 x float> @sitofp_2i64_to_4f32(<2 x i64> %a) {
; VEX-NEXT: retq
;
; AVX512F-LABEL: sitofp_2i64_to_4f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpextrq $1, %xmm0, %rax
; AVX512F-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; AVX512F-NEXT: vmovq %xmm0, %rax
@@ -1060,7 +1060,7 @@ define <4 x float> @sitofp_2i64_to_4f32(<2 x i64> %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: sitofp_2i64_to_4f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpextrq $1, %xmm0, %rax
; AVX512VL-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; AVX512VL-NEXT: vmovq %xmm0, %rax
@@ -1071,7 +1071,7 @@ define <4 x float> @sitofp_2i64_to_4f32(<2 x i64> %a) {
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: sitofp_2i64_to_4f32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -1079,7 +1079,7 @@ define <4 x float> @sitofp_2i64_to_4f32(<2 x i64> %a) {
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: sitofp_2i64_to_4f32:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtqq2ps %xmm0, %xmm0
; AVX512VLDQ-NEXT: retq
%cvt = sitofp <2 x i64> %a to <2 x float>
@@ -1089,7 +1089,7 @@ define <4 x float> @sitofp_2i64_to_4f32(<2 x i64> %a) {
define <4 x float> @sitofp_2i64_to_4f32_zero(<2 x i64> %a) {
; SSE-LABEL: sitofp_2i64_to_4f32_zero:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; SSE-NEXT: movq %xmm1, %rax
; SSE-NEXT: xorps %xmm1, %xmm1
@@ -1102,7 +1102,7 @@ define <4 x float> @sitofp_2i64_to_4f32_zero(<2 x i64> %a) {
; SSE-NEXT: retq
;
; VEX-LABEL: sitofp_2i64_to_4f32_zero:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vpextrq $1, %xmm0, %rax
; VEX-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; VEX-NEXT: vmovq %xmm0, %rax
@@ -1111,7 +1111,7 @@ define <4 x float> @sitofp_2i64_to_4f32_zero(<2 x i64> %a) {
; VEX-NEXT: retq
;
; AVX512F-LABEL: sitofp_2i64_to_4f32_zero:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpextrq $1, %xmm0, %rax
; AVX512F-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; AVX512F-NEXT: vmovq %xmm0, %rax
@@ -1120,7 +1120,7 @@ define <4 x float> @sitofp_2i64_to_4f32_zero(<2 x i64> %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: sitofp_2i64_to_4f32_zero:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpextrq $1, %xmm0, %rax
; AVX512VL-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; AVX512VL-NEXT: vmovq %xmm0, %rax
@@ -1130,7 +1130,7 @@ define <4 x float> @sitofp_2i64_to_4f32_zero(<2 x i64> %a) {
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: sitofp_2i64_to_4f32_zero:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
@@ -1138,7 +1138,7 @@ define <4 x float> @sitofp_2i64_to_4f32_zero(<2 x i64> %a) {
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: sitofp_2i64_to_4f32_zero:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtqq2ps %xmm0, %xmm0
; AVX512VLDQ-NEXT: retq
%cvt = sitofp <2 x i64> %a to <2 x float>
@@ -1148,7 +1148,7 @@ define <4 x float> @sitofp_2i64_to_4f32_zero(<2 x i64> %a) {
define <4 x float> @sitofp_4i64_to_4f32_undef(<2 x i64> %a) {
; SSE-LABEL: sitofp_4i64_to_4f32_undef:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movq %xmm0, %rax
; SSE-NEXT: cvtsi2ssq %rax, %xmm1
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -1163,7 +1163,7 @@ define <4 x float> @sitofp_4i64_to_4f32_undef(<2 x i64> %a) {
; SSE-NEXT: retq
;
; VEX-LABEL: sitofp_4i64_to_4f32_undef:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vpextrq $1, %xmm0, %rax
; VEX-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; VEX-NEXT: vmovq %xmm0, %rax
@@ -1174,7 +1174,7 @@ define <4 x float> @sitofp_4i64_to_4f32_undef(<2 x i64> %a) {
; VEX-NEXT: retq
;
; AVX512F-LABEL: sitofp_4i64_to_4f32_undef:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpextrq $1, %xmm0, %rax
; AVX512F-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; AVX512F-NEXT: vmovq %xmm0, %rax
@@ -1185,7 +1185,7 @@ define <4 x float> @sitofp_4i64_to_4f32_undef(<2 x i64> %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: sitofp_4i64_to_4f32_undef:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpextrq $1, %xmm0, %rax
; AVX512VL-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; AVX512VL-NEXT: vmovq %xmm0, %rax
@@ -1196,7 +1196,7 @@ define <4 x float> @sitofp_4i64_to_4f32_undef(<2 x i64> %a) {
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: sitofp_4i64_to_4f32_undef:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -1204,7 +1204,7 @@ define <4 x float> @sitofp_4i64_to_4f32_undef(<2 x i64> %a) {
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: sitofp_4i64_to_4f32_undef:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512VLDQ-NEXT: vcvtqq2ps %ymm0, %xmm0
; AVX512VLDQ-NEXT: vzeroupper
@@ -1216,12 +1216,12 @@ define <4 x float> @sitofp_4i64_to_4f32_undef(<2 x i64> %a) {
define <4 x float> @sitofp_4i32_to_4f32(<4 x i32> %a) {
; SSE-LABEL: sitofp_4i32_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_4i32_to_4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
; AVX-NEXT: retq
%cvt = sitofp <4 x i32> %a to <4 x float>
@@ -1230,14 +1230,14 @@ define <4 x float> @sitofp_4i32_to_4f32(<4 x i32> %a) {
define <4 x float> @sitofp_4i16_to_4f32(<8 x i16> %a) {
; SSE-LABEL: sitofp_4i16_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE-NEXT: psrad $16, %xmm0
; SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_4i16_to_4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovsxwd %xmm0, %xmm0
; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
; AVX-NEXT: retq
@@ -1248,14 +1248,14 @@ define <4 x float> @sitofp_4i16_to_4f32(<8 x i16> %a) {
define <4 x float> @sitofp_8i16_to_4f32(<8 x i16> %a) {
; SSE-LABEL: sitofp_8i16_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE-NEXT: psrad $16, %xmm0
; SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: sitofp_8i16_to_4f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovsxwd %xmm0, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0
@@ -1266,7 +1266,7 @@ define <4 x float> @sitofp_8i16_to_4f32(<8 x i16> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: sitofp_8i16_to_4f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -1274,7 +1274,7 @@ define <4 x float> @sitofp_8i16_to_4f32(<8 x i16> %a) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: sitofp_8i16_to_4f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX512-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -1287,7 +1287,7 @@ define <4 x float> @sitofp_8i16_to_4f32(<8 x i16> %a) {
define <4 x float> @sitofp_4i8_to_4f32(<16 x i8> %a) {
; SSE-LABEL: sitofp_4i8_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE-NEXT: psrad $24, %xmm0
@@ -1295,7 +1295,7 @@ define <4 x float> @sitofp_4i8_to_4f32(<16 x i8> %a) {
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_4i8_to_4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovsxbd %xmm0, %xmm0
; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
; AVX-NEXT: retq
@@ -1306,7 +1306,7 @@ define <4 x float> @sitofp_4i8_to_4f32(<16 x i8> %a) {
define <4 x float> @sitofp_16i8_to_4f32(<16 x i8> %a) {
; SSE-LABEL: sitofp_16i8_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE-NEXT: psrad $24, %xmm0
@@ -1314,7 +1314,7 @@ define <4 x float> @sitofp_16i8_to_4f32(<16 x i8> %a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: sitofp_16i8_to_4f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovsxbd %xmm0, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; AVX1-NEXT: vpmovsxbd %xmm0, %xmm0
@@ -1325,7 +1325,7 @@ define <4 x float> @sitofp_16i8_to_4f32(<16 x i8> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: sitofp_16i8_to_4f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovsxbd %xmm0, %ymm0
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -1333,7 +1333,7 @@ define <4 x float> @sitofp_16i8_to_4f32(<16 x i8> %a) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: sitofp_16i8_to_4f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512-NEXT: vcvtdq2ps %zmm0, %zmm0
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -1346,7 +1346,7 @@ define <4 x float> @sitofp_16i8_to_4f32(<16 x i8> %a) {
define <4 x float> @sitofp_4i64_to_4f32(<4 x i64> %a) {
; SSE-LABEL: sitofp_4i64_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movq %xmm1, %rax
; SSE-NEXT: cvtsi2ssq %rax, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
@@ -1367,7 +1367,7 @@ define <4 x float> @sitofp_4i64_to_4f32(<4 x i64> %a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: sitofp_4i64_to_4f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpextrq $1, %xmm0, %rax
; AVX1-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; AVX1-NEXT: vmovq %xmm0, %rax
@@ -1384,7 +1384,7 @@ define <4 x float> @sitofp_4i64_to_4f32(<4 x i64> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: sitofp_4i64_to_4f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpextrq $1, %xmm0, %rax
; AVX2-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; AVX2-NEXT: vmovq %xmm0, %rax
@@ -1401,7 +1401,7 @@ define <4 x float> @sitofp_4i64_to_4f32(<4 x i64> %a) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: sitofp_4i64_to_4f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpextrq $1, %xmm0, %rax
; AVX512F-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; AVX512F-NEXT: vmovq %xmm0, %rax
@@ -1418,7 +1418,7 @@ define <4 x float> @sitofp_4i64_to_4f32(<4 x i64> %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: sitofp_4i64_to_4f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpextrq $1, %xmm0, %rax
; AVX512VL-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; AVX512VL-NEXT: vmovq %xmm0, %rax
@@ -1435,7 +1435,7 @@ define <4 x float> @sitofp_4i64_to_4f32(<4 x i64> %a) {
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: sitofp_4i64_to_4f32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -1443,7 +1443,7 @@ define <4 x float> @sitofp_4i64_to_4f32(<4 x i64> %a) {
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: sitofp_4i64_to_4f32:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtqq2ps %ymm0, %xmm0
; AVX512VLDQ-NEXT: vzeroupper
; AVX512VLDQ-NEXT: retq
@@ -1453,13 +1453,13 @@ define <4 x float> @sitofp_4i64_to_4f32(<4 x i64> %a) {
define <8 x float> @sitofp_8i32_to_8f32(<8 x i32> %a) {
; SSE-LABEL: sitofp_8i32_to_8f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; SSE-NEXT: cvtdq2ps %xmm1, %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_8i32_to_8f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX-NEXT: retq
%cvt = sitofp <8 x i32> %a to <8 x float>
@@ -1468,7 +1468,7 @@ define <8 x float> @sitofp_8i32_to_8f32(<8 x i32> %a) {
define <8 x float> @sitofp_8i16_to_8f32(<8 x i16> %a) {
; SSE-LABEL: sitofp_8i16_to_8f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSE-NEXT: psrad $16, %xmm1
; SSE-NEXT: cvtdq2ps %xmm1, %xmm2
@@ -1479,7 +1479,7 @@ define <8 x float> @sitofp_8i16_to_8f32(<8 x i16> %a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: sitofp_8i16_to_8f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovsxwd %xmm0, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0
@@ -1488,13 +1488,13 @@ define <8 x float> @sitofp_8i16_to_8f32(<8 x i16> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: sitofp_8i16_to_8f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: sitofp_8i16_to_8f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX512-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX512-NEXT: retq
@@ -1504,7 +1504,7 @@ define <8 x float> @sitofp_8i16_to_8f32(<8 x i16> %a) {
define <8 x float> @sitofp_8i8_to_8f32(<16 x i8> %a) {
; SSE-LABEL: sitofp_8i8_to_8f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3]
; SSE-NEXT: psrad $24, %xmm1
@@ -1518,7 +1518,7 @@ define <8 x float> @sitofp_8i8_to_8f32(<16 x i8> %a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: sitofp_8i8_to_8f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovsxbd %xmm0, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; AVX1-NEXT: vpmovsxbd %xmm0, %xmm0
@@ -1527,13 +1527,13 @@ define <8 x float> @sitofp_8i8_to_8f32(<16 x i8> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: sitofp_8i8_to_8f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovsxbd %xmm0, %ymm0
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: sitofp_8i8_to_8f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovsxbd %xmm0, %ymm0
; AVX512-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX512-NEXT: retq
@@ -1544,7 +1544,7 @@ define <8 x float> @sitofp_8i8_to_8f32(<16 x i8> %a) {
define <8 x float> @sitofp_16i8_to_8f32(<16 x i8> %a) {
; SSE-LABEL: sitofp_16i8_to_8f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3]
; SSE-NEXT: psrad $24, %xmm1
@@ -1558,7 +1558,7 @@ define <8 x float> @sitofp_16i8_to_8f32(<16 x i8> %a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: sitofp_16i8_to_8f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovsxbd %xmm0, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; AVX1-NEXT: vpmovsxbd %xmm0, %xmm0
@@ -1567,13 +1567,13 @@ define <8 x float> @sitofp_16i8_to_8f32(<16 x i8> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: sitofp_16i8_to_8f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovsxbd %xmm0, %ymm0
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: sitofp_16i8_to_8f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512-NEXT: vcvtdq2ps %zmm0, %zmm0
; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
@@ -1589,12 +1589,12 @@ define <8 x float> @sitofp_16i8_to_8f32(<16 x i8> %a) {
define <4 x float> @uitofp_2i64_to_4f32(<2 x i64> %a) {
; SSE-LABEL: uitofp_2i64_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: movq %xmm1, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB39_1
-; SSE-NEXT: # BB#2:
+; SSE-NEXT: # %bb.2:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: cvtsi2ssq %rax, %xmm0
; SSE-NEXT: jmp .LBB39_3
@@ -1611,7 +1611,7 @@ define <4 x float> @uitofp_2i64_to_4f32(<2 x i64> %a) {
; SSE-NEXT: movq %xmm1, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB39_4
-; SSE-NEXT: # BB#5:
+; SSE-NEXT: # %bb.5:
; SSE-NEXT: xorps %xmm1, %xmm1
; SSE-NEXT: cvtsi2ssq %rax, %xmm1
; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
@@ -1628,11 +1628,11 @@ define <4 x float> @uitofp_2i64_to_4f32(<2 x i64> %a) {
; SSE-NEXT: retq
;
; VEX-LABEL: uitofp_2i64_to_4f32:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vpextrq $1, %xmm0, %rax
; VEX-NEXT: testq %rax, %rax
; VEX-NEXT: js .LBB39_1
-; VEX-NEXT: # BB#2:
+; VEX-NEXT: # %bb.2:
; VEX-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; VEX-NEXT: jmp .LBB39_3
; VEX-NEXT: .LBB39_1:
@@ -1646,7 +1646,7 @@ define <4 x float> @uitofp_2i64_to_4f32(<2 x i64> %a) {
; VEX-NEXT: vmovq %xmm0, %rax
; VEX-NEXT: testq %rax, %rax
; VEX-NEXT: js .LBB39_4
-; VEX-NEXT: # BB#5:
+; VEX-NEXT: # %bb.5:
; VEX-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm0
; VEX-NEXT: jmp .LBB39_6
; VEX-NEXT: .LBB39_4:
@@ -1661,14 +1661,14 @@ define <4 x float> @uitofp_2i64_to_4f32(<2 x i64> %a) {
; VEX-NEXT: testq %rax, %rax
; VEX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; VEX-NEXT: js .LBB39_8
-; VEX-NEXT: # BB#7:
+; VEX-NEXT: # %bb.7:
; VEX-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm1
; VEX-NEXT: .LBB39_8:
; VEX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0]
; VEX-NEXT: retq
;
; AVX512F-LABEL: uitofp_2i64_to_4f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpextrq $1, %xmm0, %rax
; AVX512F-NEXT: vcvtusi2ssq %rax, %xmm1, %xmm1
; AVX512F-NEXT: vmovq %xmm0, %rax
@@ -1679,7 +1679,7 @@ define <4 x float> @uitofp_2i64_to_4f32(<2 x i64> %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_2i64_to_4f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpextrq $1, %xmm0, %rax
; AVX512VL-NEXT: vcvtusi2ssq %rax, %xmm1, %xmm1
; AVX512VL-NEXT: vmovq %xmm0, %rax
@@ -1690,7 +1690,7 @@ define <4 x float> @uitofp_2i64_to_4f32(<2 x i64> %a) {
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: uitofp_2i64_to_4f32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -1698,7 +1698,7 @@ define <4 x float> @uitofp_2i64_to_4f32(<2 x i64> %a) {
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_2i64_to_4f32:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtuqq2ps %xmm0, %xmm0
; AVX512VLDQ-NEXT: retq
%cvt = uitofp <2 x i64> %a to <2 x float>
@@ -1708,12 +1708,12 @@ define <4 x float> @uitofp_2i64_to_4f32(<2 x i64> %a) {
define <4 x float> @uitofp_2i64_to_2f32(<2 x i64> %a) {
; SSE-LABEL: uitofp_2i64_to_2f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; SSE-NEXT: movq %xmm1, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB40_1
-; SSE-NEXT: # BB#2:
+; SSE-NEXT: # %bb.2:
; SSE-NEXT: xorps %xmm1, %xmm1
; SSE-NEXT: cvtsi2ssq %rax, %xmm1
; SSE-NEXT: jmp .LBB40_3
@@ -1729,7 +1729,7 @@ define <4 x float> @uitofp_2i64_to_2f32(<2 x i64> %a) {
; SSE-NEXT: movq %xmm0, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB40_4
-; SSE-NEXT: # BB#5:
+; SSE-NEXT: # %bb.5:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: cvtsi2ssq %rax, %xmm0
; SSE-NEXT: jmp .LBB40_6
@@ -1747,11 +1747,11 @@ define <4 x float> @uitofp_2i64_to_2f32(<2 x i64> %a) {
; SSE-NEXT: retq
;
; VEX-LABEL: uitofp_2i64_to_2f32:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vpextrq $1, %xmm0, %rax
; VEX-NEXT: testq %rax, %rax
; VEX-NEXT: js .LBB40_1
-; VEX-NEXT: # BB#2:
+; VEX-NEXT: # %bb.2:
; VEX-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; VEX-NEXT: jmp .LBB40_3
; VEX-NEXT: .LBB40_1:
@@ -1765,7 +1765,7 @@ define <4 x float> @uitofp_2i64_to_2f32(<2 x i64> %a) {
; VEX-NEXT: vmovq %xmm0, %rax
; VEX-NEXT: testq %rax, %rax
; VEX-NEXT: js .LBB40_4
-; VEX-NEXT: # BB#5:
+; VEX-NEXT: # %bb.5:
; VEX-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm0
; VEX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],zero,zero
; VEX-NEXT: retq
@@ -1780,7 +1780,7 @@ define <4 x float> @uitofp_2i64_to_2f32(<2 x i64> %a) {
; VEX-NEXT: retq
;
; AVX512F-LABEL: uitofp_2i64_to_2f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpextrq $1, %xmm0, %rax
; AVX512F-NEXT: vcvtusi2ssq %rax, %xmm1, %xmm1
; AVX512F-NEXT: vmovq %xmm0, %rax
@@ -1789,7 +1789,7 @@ define <4 x float> @uitofp_2i64_to_2f32(<2 x i64> %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_2i64_to_2f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpextrq $1, %xmm0, %rax
; AVX512VL-NEXT: vcvtusi2ssq %rax, %xmm1, %xmm1
; AVX512VL-NEXT: vmovq %xmm0, %rax
@@ -1799,7 +1799,7 @@ define <4 x float> @uitofp_2i64_to_2f32(<2 x i64> %a) {
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: uitofp_2i64_to_2f32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
@@ -1807,7 +1807,7 @@ define <4 x float> @uitofp_2i64_to_2f32(<2 x i64> %a) {
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_2i64_to_2f32:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtuqq2ps %xmm0, %xmm0
; AVX512VLDQ-NEXT: retq
%cvt = uitofp <2 x i64> %a to <2 x float>
@@ -1817,12 +1817,12 @@ define <4 x float> @uitofp_2i64_to_2f32(<2 x i64> %a) {
define <4 x float> @uitofp_4i64_to_4f32_undef(<2 x i64> %a) {
; SSE-LABEL: uitofp_4i64_to_4f32_undef:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: movq %xmm1, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB41_1
-; SSE-NEXT: # BB#2:
+; SSE-NEXT: # %bb.2:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: cvtsi2ssq %rax, %xmm0
; SSE-NEXT: jmp .LBB41_3
@@ -1839,7 +1839,7 @@ define <4 x float> @uitofp_4i64_to_4f32_undef(<2 x i64> %a) {
; SSE-NEXT: movq %xmm1, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB41_4
-; SSE-NEXT: # BB#5:
+; SSE-NEXT: # %bb.5:
; SSE-NEXT: xorps %xmm1, %xmm1
; SSE-NEXT: cvtsi2ssq %rax, %xmm1
; SSE-NEXT: jmp .LBB41_6
@@ -1856,7 +1856,7 @@ define <4 x float> @uitofp_4i64_to_4f32_undef(<2 x i64> %a) {
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: xorps %xmm1, %xmm1
; SSE-NEXT: js .LBB41_8
-; SSE-NEXT: # BB#7:
+; SSE-NEXT: # %bb.7:
; SSE-NEXT: xorps %xmm1, %xmm1
; SSE-NEXT: cvtsi2ssq %rax, %xmm1
; SSE-NEXT: .LBB41_8:
@@ -1864,11 +1864,11 @@ define <4 x float> @uitofp_4i64_to_4f32_undef(<2 x i64> %a) {
; SSE-NEXT: retq
;
; VEX-LABEL: uitofp_4i64_to_4f32_undef:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vpextrq $1, %xmm0, %rax
; VEX-NEXT: testq %rax, %rax
; VEX-NEXT: js .LBB41_1
-; VEX-NEXT: # BB#2:
+; VEX-NEXT: # %bb.2:
; VEX-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; VEX-NEXT: jmp .LBB41_3
; VEX-NEXT: .LBB41_1:
@@ -1882,7 +1882,7 @@ define <4 x float> @uitofp_4i64_to_4f32_undef(<2 x i64> %a) {
; VEX-NEXT: vmovq %xmm0, %rax
; VEX-NEXT: testq %rax, %rax
; VEX-NEXT: js .LBB41_4
-; VEX-NEXT: # BB#5:
+; VEX-NEXT: # %bb.5:
; VEX-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm0
; VEX-NEXT: jmp .LBB41_6
; VEX-NEXT: .LBB41_4:
@@ -1897,14 +1897,14 @@ define <4 x float> @uitofp_4i64_to_4f32_undef(<2 x i64> %a) {
; VEX-NEXT: testq %rax, %rax
; VEX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; VEX-NEXT: js .LBB41_8
-; VEX-NEXT: # BB#7:
+; VEX-NEXT: # %bb.7:
; VEX-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm1
; VEX-NEXT: .LBB41_8:
; VEX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0]
; VEX-NEXT: retq
;
; AVX512F-LABEL: uitofp_4i64_to_4f32_undef:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpextrq $1, %xmm0, %rax
; AVX512F-NEXT: vcvtusi2ssq %rax, %xmm1, %xmm1
; AVX512F-NEXT: vmovq %xmm0, %rax
@@ -1915,7 +1915,7 @@ define <4 x float> @uitofp_4i64_to_4f32_undef(<2 x i64> %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_4i64_to_4f32_undef:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpextrq $1, %xmm0, %rax
; AVX512VL-NEXT: vcvtusi2ssq %rax, %xmm1, %xmm1
; AVX512VL-NEXT: vmovq %xmm0, %rax
@@ -1926,7 +1926,7 @@ define <4 x float> @uitofp_4i64_to_4f32_undef(<2 x i64> %a) {
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: uitofp_4i64_to_4f32_undef:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -1934,7 +1934,7 @@ define <4 x float> @uitofp_4i64_to_4f32_undef(<2 x i64> %a) {
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_4i64_to_4f32_undef:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512VLDQ-NEXT: vcvtuqq2ps %ymm0, %xmm0
; AVX512VLDQ-NEXT: vzeroupper
@@ -1946,7 +1946,7 @@ define <4 x float> @uitofp_4i64_to_4f32_undef(<2 x i64> %a) {
define <4 x float> @uitofp_4i32_to_4f32(<4 x i32> %a) {
; SSE-LABEL: uitofp_4i32_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535]
; SSE-NEXT: pand %xmm0, %xmm1
; SSE-NEXT: por {{.*}}(%rip), %xmm1
@@ -1957,7 +1957,7 @@ define <4 x float> @uitofp_4i32_to_4f32(<4 x i32> %a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: uitofp_4i32_to_4f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
@@ -1966,7 +1966,7 @@ define <4 x float> @uitofp_4i32_to_4f32(<4 x i32> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: uitofp_4i32_to_4f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1258291200,1258291200,1258291200,1258291200]
; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm0
@@ -1978,7 +1978,7 @@ define <4 x float> @uitofp_4i32_to_4f32(<4 x i32> %a) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: uitofp_4i32_to_4f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512F-NEXT: vcvtudq2ps %zmm0, %zmm0
; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -1986,12 +1986,12 @@ define <4 x float> @uitofp_4i32_to_4f32(<4 x i32> %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_4i32_to_4f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvtudq2ps %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: uitofp_4i32_to_4f32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtudq2ps %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -1999,7 +1999,7 @@ define <4 x float> @uitofp_4i32_to_4f32(<4 x i32> %a) {
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_4i32_to_4f32:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtudq2ps %xmm0, %xmm0
; AVX512VLDQ-NEXT: retq
%cvt = uitofp <4 x i32> %a to <4 x float>
@@ -2008,14 +2008,14 @@ define <4 x float> @uitofp_4i32_to_4f32(<4 x i32> %a) {
define <4 x float> @uitofp_4i16_to_4f32(<8 x i16> %a) {
; SSE-LABEL: uitofp_4i16_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: uitofp_4i16_to_4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
; AVX-NEXT: retq
@@ -2026,14 +2026,14 @@ define <4 x float> @uitofp_4i16_to_4f32(<8 x i16> %a) {
define <4 x float> @uitofp_8i16_to_4f32(<8 x i16> %a) {
; SSE-LABEL: uitofp_8i16_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: uitofp_8i16_to_4f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
@@ -2044,7 +2044,7 @@ define <4 x float> @uitofp_8i16_to_4f32(<8 x i16> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: uitofp_8i16_to_4f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -2052,7 +2052,7 @@ define <4 x float> @uitofp_8i16_to_4f32(<8 x i16> %a) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: uitofp_8i16_to_4f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -2065,7 +2065,7 @@ define <4 x float> @uitofp_8i16_to_4f32(<8 x i16> %a) {
define <4 x float> @uitofp_4i8_to_4f32(<16 x i8> %a) {
; SSE-LABEL: uitofp_4i8_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@@ -2073,7 +2073,7 @@ define <4 x float> @uitofp_4i8_to_4f32(<16 x i8> %a) {
; SSE-NEXT: retq
;
; AVX-LABEL: uitofp_4i8_to_4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
; AVX-NEXT: retq
@@ -2084,7 +2084,7 @@ define <4 x float> @uitofp_4i8_to_4f32(<16 x i8> %a) {
define <4 x float> @uitofp_16i8_to_4f32(<16 x i8> %a) {
; SSE-LABEL: uitofp_16i8_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@@ -2092,7 +2092,7 @@ define <4 x float> @uitofp_16i8_to_4f32(<16 x i8> %a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: uitofp_16i8_to_4f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
@@ -2103,7 +2103,7 @@ define <4 x float> @uitofp_16i8_to_4f32(<16 x i8> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: uitofp_16i8_to_4f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -2111,7 +2111,7 @@ define <4 x float> @uitofp_16i8_to_4f32(<16 x i8> %a) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: uitofp_16i8_to_4f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512-NEXT: vcvtdq2ps %zmm0, %zmm0
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -2124,11 +2124,11 @@ define <4 x float> @uitofp_16i8_to_4f32(<16 x i8> %a) {
define <4 x float> @uitofp_4i64_to_4f32(<4 x i64> %a) {
; SSE-LABEL: uitofp_4i64_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movq %xmm1, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB47_1
-; SSE-NEXT: # BB#2:
+; SSE-NEXT: # %bb.2:
; SSE-NEXT: cvtsi2ssq %rax, %xmm2
; SSE-NEXT: jmp .LBB47_3
; SSE-NEXT: .LBB47_1:
@@ -2143,7 +2143,7 @@ define <4 x float> @uitofp_4i64_to_4f32(<4 x i64> %a) {
; SSE-NEXT: movq %xmm1, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB47_4
-; SSE-NEXT: # BB#5:
+; SSE-NEXT: # %bb.5:
; SSE-NEXT: cvtsi2ssq %rax, %xmm3
; SSE-NEXT: jmp .LBB47_6
; SSE-NEXT: .LBB47_4:
@@ -2157,7 +2157,7 @@ define <4 x float> @uitofp_4i64_to_4f32(<4 x i64> %a) {
; SSE-NEXT: movq %xmm0, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB47_7
-; SSE-NEXT: # BB#8:
+; SSE-NEXT: # %bb.8:
; SSE-NEXT: xorps %xmm1, %xmm1
; SSE-NEXT: cvtsi2ssq %rax, %xmm1
; SSE-NEXT: jmp .LBB47_9
@@ -2175,7 +2175,7 @@ define <4 x float> @uitofp_4i64_to_4f32(<4 x i64> %a) {
; SSE-NEXT: movq %xmm0, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB47_10
-; SSE-NEXT: # BB#11:
+; SSE-NEXT: # %bb.11:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: cvtsi2ssq %rax, %xmm0
; SSE-NEXT: jmp .LBB47_12
@@ -2194,11 +2194,11 @@ define <4 x float> @uitofp_4i64_to_4f32(<4 x i64> %a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: uitofp_4i64_to_4f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpextrq $1, %xmm0, %rax
; AVX1-NEXT: testq %rax, %rax
; AVX1-NEXT: js .LBB47_1
-; AVX1-NEXT: # BB#2:
+; AVX1-NEXT: # %bb.2:
; AVX1-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; AVX1-NEXT: jmp .LBB47_3
; AVX1-NEXT: .LBB47_1:
@@ -2212,7 +2212,7 @@ define <4 x float> @uitofp_4i64_to_4f32(<4 x i64> %a) {
; AVX1-NEXT: vmovq %xmm0, %rax
; AVX1-NEXT: testq %rax, %rax
; AVX1-NEXT: js .LBB47_4
-; AVX1-NEXT: # BB#5:
+; AVX1-NEXT: # %bb.5:
; AVX1-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm2
; AVX1-NEXT: jmp .LBB47_6
; AVX1-NEXT: .LBB47_4:
@@ -2228,7 +2228,7 @@ define <4 x float> @uitofp_4i64_to_4f32(<4 x i64> %a) {
; AVX1-NEXT: vmovq %xmm0, %rax
; AVX1-NEXT: testq %rax, %rax
; AVX1-NEXT: js .LBB47_7
-; AVX1-NEXT: # BB#8:
+; AVX1-NEXT: # %bb.8:
; AVX1-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm2
; AVX1-NEXT: jmp .LBB47_9
; AVX1-NEXT: .LBB47_7:
@@ -2243,7 +2243,7 @@ define <4 x float> @uitofp_4i64_to_4f32(<4 x i64> %a) {
; AVX1-NEXT: vpextrq $1, %xmm0, %rax
; AVX1-NEXT: testq %rax, %rax
; AVX1-NEXT: js .LBB47_10
-; AVX1-NEXT: # BB#11:
+; AVX1-NEXT: # %bb.11:
; AVX1-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm0
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; AVX1-NEXT: vzeroupper
@@ -2260,11 +2260,11 @@ define <4 x float> @uitofp_4i64_to_4f32(<4 x i64> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: uitofp_4i64_to_4f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpextrq $1, %xmm0, %rax
; AVX2-NEXT: testq %rax, %rax
; AVX2-NEXT: js .LBB47_1
-; AVX2-NEXT: # BB#2:
+; AVX2-NEXT: # %bb.2:
; AVX2-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; AVX2-NEXT: jmp .LBB47_3
; AVX2-NEXT: .LBB47_1:
@@ -2278,7 +2278,7 @@ define <4 x float> @uitofp_4i64_to_4f32(<4 x i64> %a) {
; AVX2-NEXT: vmovq %xmm0, %rax
; AVX2-NEXT: testq %rax, %rax
; AVX2-NEXT: js .LBB47_4
-; AVX2-NEXT: # BB#5:
+; AVX2-NEXT: # %bb.5:
; AVX2-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm2
; AVX2-NEXT: jmp .LBB47_6
; AVX2-NEXT: .LBB47_4:
@@ -2294,7 +2294,7 @@ define <4 x float> @uitofp_4i64_to_4f32(<4 x i64> %a) {
; AVX2-NEXT: vmovq %xmm0, %rax
; AVX2-NEXT: testq %rax, %rax
; AVX2-NEXT: js .LBB47_7
-; AVX2-NEXT: # BB#8:
+; AVX2-NEXT: # %bb.8:
; AVX2-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm2
; AVX2-NEXT: jmp .LBB47_9
; AVX2-NEXT: .LBB47_7:
@@ -2309,7 +2309,7 @@ define <4 x float> @uitofp_4i64_to_4f32(<4 x i64> %a) {
; AVX2-NEXT: vpextrq $1, %xmm0, %rax
; AVX2-NEXT: testq %rax, %rax
; AVX2-NEXT: js .LBB47_10
-; AVX2-NEXT: # BB#11:
+; AVX2-NEXT: # %bb.11:
; AVX2-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm0
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; AVX2-NEXT: vzeroupper
@@ -2326,7 +2326,7 @@ define <4 x float> @uitofp_4i64_to_4f32(<4 x i64> %a) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: uitofp_4i64_to_4f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpextrq $1, %xmm0, %rax
; AVX512F-NEXT: vcvtusi2ssq %rax, %xmm1, %xmm1
; AVX512F-NEXT: vmovq %xmm0, %rax
@@ -2343,7 +2343,7 @@ define <4 x float> @uitofp_4i64_to_4f32(<4 x i64> %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_4i64_to_4f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpextrq $1, %xmm0, %rax
; AVX512VL-NEXT: vcvtusi2ssq %rax, %xmm1, %xmm1
; AVX512VL-NEXT: vmovq %xmm0, %rax
@@ -2360,7 +2360,7 @@ define <4 x float> @uitofp_4i64_to_4f32(<4 x i64> %a) {
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: uitofp_4i64_to_4f32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -2368,7 +2368,7 @@ define <4 x float> @uitofp_4i64_to_4f32(<4 x i64> %a) {
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_4i64_to_4f32:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtuqq2ps %ymm0, %xmm0
; AVX512VLDQ-NEXT: vzeroupper
; AVX512VLDQ-NEXT: retq
@@ -2378,7 +2378,7 @@ define <4 x float> @uitofp_4i64_to_4f32(<4 x i64> %a) {
define <8 x float> @uitofp_8i32_to_8f32(<8 x i32> %a) {
; SSE-LABEL: uitofp_8i32_to_8f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,65535]
; SSE-NEXT: movdqa %xmm0, %xmm3
; SSE-NEXT: pand %xmm2, %xmm3
@@ -2399,7 +2399,7 @@ define <8 x float> @uitofp_8i32_to_8f32(<8 x i32> %a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: uitofp_8i32_to_8f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpsrld $16, %xmm2, %xmm2
@@ -2412,7 +2412,7 @@ define <8 x float> @uitofp_8i32_to_8f32(<8 x i32> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: uitofp_8i32_to_8f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [1258291200,1258291200,1258291200,1258291200,1258291200,1258291200,1258291200,1258291200]
; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
@@ -2424,26 +2424,26 @@ define <8 x float> @uitofp_8i32_to_8f32(<8 x i32> %a) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: uitofp_8i32_to_8f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512F-NEXT: vcvtudq2ps %zmm0, %zmm0
; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_8i32_to_8f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvtudq2ps %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: uitofp_8i32_to_8f32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtudq2ps %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_8i32_to_8f32:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtudq2ps %ymm0, %ymm0
; AVX512VLDQ-NEXT: retq
%cvt = uitofp <8 x i32> %a to <8 x float>
@@ -2452,7 +2452,7 @@ define <8 x float> @uitofp_8i32_to_8f32(<8 x i32> %a) {
define <8 x float> @uitofp_8i16_to_8f32(<8 x i16> %a) {
; SSE-LABEL: uitofp_8i16_to_8f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
@@ -2463,7 +2463,7 @@ define <8 x float> @uitofp_8i16_to_8f32(<8 x i16> %a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: uitofp_8i16_to_8f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
@@ -2472,13 +2472,13 @@ define <8 x float> @uitofp_8i16_to_8f32(<8 x i16> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: uitofp_8i16_to_8f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: uitofp_8i16_to_8f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX512-NEXT: retq
@@ -2488,7 +2488,7 @@ define <8 x float> @uitofp_8i16_to_8f32(<8 x i16> %a) {
define <8 x float> @uitofp_8i8_to_8f32(<16 x i8> %a) {
; SSE-LABEL: uitofp_8i8_to_8f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE-NEXT: movdqa %xmm0, %xmm2
@@ -2500,7 +2500,7 @@ define <8 x float> @uitofp_8i8_to_8f32(<16 x i8> %a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: uitofp_8i8_to_8f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
@@ -2509,13 +2509,13 @@ define <8 x float> @uitofp_8i8_to_8f32(<16 x i8> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: uitofp_8i8_to_8f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: uitofp_8i8_to_8f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX512-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX512-NEXT: retq
@@ -2526,7 +2526,7 @@ define <8 x float> @uitofp_8i8_to_8f32(<16 x i8> %a) {
define <8 x float> @uitofp_16i8_to_8f32(<16 x i8> %a) {
; SSE-LABEL: uitofp_16i8_to_8f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE-NEXT: movdqa %xmm0, %xmm2
@@ -2538,7 +2538,7 @@ define <8 x float> @uitofp_16i8_to_8f32(<16 x i8> %a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: uitofp_16i8_to_8f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
@@ -2547,13 +2547,13 @@ define <8 x float> @uitofp_16i8_to_8f32(<16 x i8> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: uitofp_16i8_to_8f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: uitofp_16i8_to_8f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512-NEXT: vcvtdq2ps %zmm0, %zmm0
; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
@@ -2569,7 +2569,7 @@ define <8 x float> @uitofp_16i8_to_8f32(<16 x i8> %a) {
define <2 x double> @sitofp_load_2i64_to_2f64(<2 x i64> *%a) {
; SSE-LABEL: sitofp_load_2i64_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa (%rdi), %xmm1
; SSE-NEXT: movq %xmm1, %rax
; SSE-NEXT: cvtsi2sdq %rax, %xmm0
@@ -2581,7 +2581,7 @@ define <2 x double> @sitofp_load_2i64_to_2f64(<2 x i64> *%a) {
; SSE-NEXT: retq
;
; VEX-LABEL: sitofp_load_2i64_to_2f64:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vmovdqa (%rdi), %xmm0
; VEX-NEXT: vpextrq $1, %xmm0, %rax
; VEX-NEXT: vcvtsi2sdq %rax, %xmm1, %xmm1
@@ -2591,7 +2591,7 @@ define <2 x double> @sitofp_load_2i64_to_2f64(<2 x i64> *%a) {
; VEX-NEXT: retq
;
; AVX512F-LABEL: sitofp_load_2i64_to_2f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
; AVX512F-NEXT: vpextrq $1, %xmm0, %rax
; AVX512F-NEXT: vcvtsi2sdq %rax, %xmm1, %xmm1
@@ -2601,7 +2601,7 @@ define <2 x double> @sitofp_load_2i64_to_2f64(<2 x i64> *%a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: sitofp_load_2i64_to_2f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
; AVX512VL-NEXT: vpextrq $1, %xmm0, %rax
; AVX512VL-NEXT: vcvtsi2sdq %rax, %xmm1, %xmm1
@@ -2611,7 +2611,7 @@ define <2 x double> @sitofp_load_2i64_to_2f64(<2 x i64> *%a) {
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: sitofp_load_2i64_to_2f64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovaps (%rdi), %xmm0
; AVX512DQ-NEXT: vcvtqq2pd %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -2619,7 +2619,7 @@ define <2 x double> @sitofp_load_2i64_to_2f64(<2 x i64> *%a) {
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: sitofp_load_2i64_to_2f64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtqq2pd (%rdi), %xmm0
; AVX512VLDQ-NEXT: retq
%ld = load <2 x i64>, <2 x i64> *%a
@@ -2629,12 +2629,12 @@ define <2 x double> @sitofp_load_2i64_to_2f64(<2 x i64> *%a) {
define <2 x double> @sitofp_load_2i32_to_2f64(<2 x i32> *%a) {
; SSE-LABEL: sitofp_load_2i32_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvtdq2pd (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_load_2i32_to_2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvtdq2pd (%rdi), %xmm0
; AVX-NEXT: retq
%ld = load <2 x i32>, <2 x i32> *%a
@@ -2644,7 +2644,7 @@ define <2 x double> @sitofp_load_2i32_to_2f64(<2 x i32> *%a) {
define <2 x double> @sitofp_load_2i16_to_2f64(<2 x i16> *%a) {
; SSE-LABEL: sitofp_load_2i16_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,1,4,5,6,7]
; SSE-NEXT: psrad $16, %xmm0
@@ -2652,7 +2652,7 @@ define <2 x double> @sitofp_load_2i16_to_2f64(<2 x i16> *%a) {
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_load_2i16_to_2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovsxwq (%rdi), %xmm0
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; AVX-NEXT: vcvtdq2pd %xmm0, %xmm0
@@ -2664,7 +2664,7 @@ define <2 x double> @sitofp_load_2i16_to_2f64(<2 x i16> *%a) {
define <2 x double> @sitofp_load_2i8_to_2f64(<2 x i8> *%a) {
; SSE-LABEL: sitofp_load_2i8_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movzwl (%rdi), %eax
; SSE-NEXT: movd %eax, %xmm0
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -2674,7 +2674,7 @@ define <2 x double> @sitofp_load_2i8_to_2f64(<2 x i8> *%a) {
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_load_2i8_to_2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovsxbq (%rdi), %xmm0
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; AVX-NEXT: vcvtdq2pd %xmm0, %xmm0
@@ -2686,7 +2686,7 @@ define <2 x double> @sitofp_load_2i8_to_2f64(<2 x i8> *%a) {
define <4 x double> @sitofp_load_4i64_to_4f64(<4 x i64> *%a) {
; SSE-LABEL: sitofp_load_4i64_to_4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa (%rdi), %xmm1
; SSE-NEXT: movdqa 16(%rdi), %xmm2
; SSE-NEXT: movq %xmm1, %rax
@@ -2707,7 +2707,7 @@ define <4 x double> @sitofp_load_4i64_to_4f64(<4 x i64> *%a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: sitofp_load_4i64_to_4f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpextrq $1, %xmm1, %rax
@@ -2724,7 +2724,7 @@ define <4 x double> @sitofp_load_4i64_to_4f64(<4 x i64> *%a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: sitofp_load_4i64_to_4f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpextrq $1, %xmm1, %rax
@@ -2741,7 +2741,7 @@ define <4 x double> @sitofp_load_4i64_to_4f64(<4 x i64> *%a) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: sitofp_load_4i64_to_4f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512F-NEXT: vpextrq $1, %xmm1, %rax
@@ -2758,7 +2758,7 @@ define <4 x double> @sitofp_load_4i64_to_4f64(<4 x i64> *%a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: sitofp_load_4i64_to_4f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512VL-NEXT: vpextrq $1, %xmm1, %rax
@@ -2775,14 +2775,14 @@ define <4 x double> @sitofp_load_4i64_to_4f64(<4 x i64> *%a) {
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: sitofp_load_4i64_to_4f64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovaps (%rdi), %ymm0
; AVX512DQ-NEXT: vcvtqq2pd %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: sitofp_load_4i64_to_4f64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtqq2pd (%rdi), %ymm0
; AVX512VLDQ-NEXT: retq
%ld = load <4 x i64>, <4 x i64> *%a
@@ -2792,7 +2792,7 @@ define <4 x double> @sitofp_load_4i64_to_4f64(<4 x i64> *%a) {
define <4 x double> @sitofp_load_4i32_to_4f64(<4 x i32> *%a) {
; SSE-LABEL: sitofp_load_4i32_to_4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa (%rdi), %xmm1
; SSE-NEXT: cvtdq2pd %xmm1, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
@@ -2800,7 +2800,7 @@ define <4 x double> @sitofp_load_4i32_to_4f64(<4 x i32> *%a) {
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_load_4i32_to_4f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvtdq2pd (%rdi), %ymm0
; AVX-NEXT: retq
%ld = load <4 x i32>, <4 x i32> *%a
@@ -2810,7 +2810,7 @@ define <4 x double> @sitofp_load_4i32_to_4f64(<4 x i32> *%a) {
define <4 x double> @sitofp_load_4i16_to_4f64(<4 x i16> *%a) {
; SSE-LABEL: sitofp_load_4i16_to_4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSE-NEXT: psrad $16, %xmm1
@@ -2820,7 +2820,7 @@ define <4 x double> @sitofp_load_4i16_to_4f64(<4 x i16> *%a) {
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_load_4i16_to_4f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovsxwd (%rdi), %xmm0
; AVX-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX-NEXT: retq
@@ -2831,7 +2831,7 @@ define <4 x double> @sitofp_load_4i16_to_4f64(<4 x i16> *%a) {
define <4 x double> @sitofp_load_4i8_to_4f64(<4 x i8> *%a) {
; SSE-LABEL: sitofp_load_4i8_to_4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
@@ -2842,7 +2842,7 @@ define <4 x double> @sitofp_load_4i8_to_4f64(<4 x i8> *%a) {
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_load_4i8_to_4f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovsxbd (%rdi), %xmm0
; AVX-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX-NEXT: retq
@@ -2857,7 +2857,7 @@ define <4 x double> @sitofp_load_4i8_to_4f64(<4 x i8> *%a) {
define <2 x double> @uitofp_load_2i64_to_2f64(<2 x i64> *%a) {
; SSE-LABEL: uitofp_load_2i64_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa (%rdi), %xmm1
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [1127219200,1160773632,0,0]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
@@ -2874,7 +2874,7 @@ define <2 x double> @uitofp_load_2i64_to_2f64(<2 x i64> *%a) {
; SSE-NEXT: retq
;
; VEX-LABEL: uitofp_load_2i64_to_2f64:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vmovapd (%rdi), %xmm0
; VEX-NEXT: vmovapd {{.*#+}} xmm1 = [1127219200,1160773632,0,0]
; VEX-NEXT: vunpcklps {{.*#+}} xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
@@ -2887,7 +2887,7 @@ define <2 x double> @uitofp_load_2i64_to_2f64(<2 x i64> *%a) {
; VEX-NEXT: retq
;
; AVX512F-LABEL: uitofp_load_2i64_to_2f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
; AVX512F-NEXT: vpextrq $1, %xmm0, %rax
; AVX512F-NEXT: vcvtusi2sdq %rax, %xmm1, %xmm1
@@ -2897,7 +2897,7 @@ define <2 x double> @uitofp_load_2i64_to_2f64(<2 x i64> *%a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_load_2i64_to_2f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
; AVX512VL-NEXT: vpextrq $1, %xmm0, %rax
; AVX512VL-NEXT: vcvtusi2sdq %rax, %xmm1, %xmm1
@@ -2907,7 +2907,7 @@ define <2 x double> @uitofp_load_2i64_to_2f64(<2 x i64> *%a) {
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: uitofp_load_2i64_to_2f64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovaps (%rdi), %xmm0
; AVX512DQ-NEXT: vcvtuqq2pd %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -2915,7 +2915,7 @@ define <2 x double> @uitofp_load_2i64_to_2f64(<2 x i64> *%a) {
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_load_2i64_to_2f64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtuqq2pd (%rdi), %xmm0
; AVX512VLDQ-NEXT: retq
%ld = load <2 x i64>, <2 x i64> *%a
@@ -2925,7 +2925,7 @@ define <2 x double> @uitofp_load_2i64_to_2f64(<2 x i64> *%a) {
define <2 x double> @uitofp_load_2i32_to_2f64(<2 x i32> *%a) {
; SSE-LABEL: uitofp_load_2i32_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,0,65535,0,65535,0,65535,0]
; SSE-NEXT: pand %xmm0, %xmm1
@@ -2937,7 +2937,7 @@ define <2 x double> @uitofp_load_2i32_to_2f64(<2 x i32> *%a) {
; SSE-NEXT: retq
;
; VEX-LABEL: uitofp_load_2i32_to_2f64:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; VEX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; VEX-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
@@ -2949,7 +2949,7 @@ define <2 x double> @uitofp_load_2i32_to_2f64(<2 x i32> *%a) {
; VEX-NEXT: retq
;
; AVX512F-LABEL: uitofp_load_2i32_to_2f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX512F-NEXT: vcvtudq2pd %ymm0, %zmm0
; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -2957,12 +2957,12 @@ define <2 x double> @uitofp_load_2i32_to_2f64(<2 x i32> *%a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_load_2i32_to_2f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvtudq2pd (%rdi), %xmm0
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: uitofp_load_2i32_to_2f64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX512DQ-NEXT: vcvtudq2pd %ymm0, %zmm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -2970,7 +2970,7 @@ define <2 x double> @uitofp_load_2i32_to_2f64(<2 x i32> *%a) {
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_load_2i32_to_2f64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtudq2pd (%rdi), %xmm0
; AVX512VLDQ-NEXT: retq
%ld = load <2 x i32>, <2 x i32> *%a
@@ -2980,7 +2980,7 @@ define <2 x double> @uitofp_load_2i32_to_2f64(<2 x i32> *%a) {
define <2 x double> @uitofp_load_2i16_to_2f64(<2 x i16> *%a) {
; SSE-LABEL: uitofp_load_2i16_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@@ -2988,7 +2988,7 @@ define <2 x double> @uitofp_load_2i16_to_2f64(<2 x i16> *%a) {
; SSE-NEXT: retq
;
; AVX-LABEL: uitofp_load_2i16_to_2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX-NEXT: vcvtdq2pd %xmm0, %xmm0
@@ -3000,7 +3000,7 @@ define <2 x double> @uitofp_load_2i16_to_2f64(<2 x i16> *%a) {
define <2 x double> @uitofp_load_2i8_to_2f64(<2 x i8> *%a) {
; SSE-LABEL: uitofp_load_2i8_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movzwl (%rdi), %eax
; SSE-NEXT: movd %eax, %xmm0
; SSE-NEXT: pxor %xmm1, %xmm1
@@ -3010,7 +3010,7 @@ define <2 x double> @uitofp_load_2i8_to_2f64(<2 x i8> *%a) {
; SSE-NEXT: retq
;
; AVX-LABEL: uitofp_load_2i8_to_2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; AVX-NEXT: vcvtdq2pd %xmm0, %xmm0
@@ -3022,7 +3022,7 @@ define <2 x double> @uitofp_load_2i8_to_2f64(<2 x i8> *%a) {
define <4 x double> @uitofp_load_4i64_to_4f64(<4 x i64> *%a) {
; SSE-LABEL: uitofp_load_4i64_to_4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa (%rdi), %xmm1
; SSE-NEXT: movdqa 16(%rdi), %xmm2
; SSE-NEXT: movdqa {{.*#+}} xmm3 = [1127219200,1160773632,0,0]
@@ -3050,7 +3050,7 @@ define <4 x double> @uitofp_load_4i64_to_4f64(<4 x i64> *%a) {
; SSE-NEXT: retq
;
; VEX-LABEL: uitofp_load_4i64_to_4f64:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vmovapd (%rdi), %ymm0
; VEX-NEXT: vextractf128 $1, %ymm0, %xmm1
; VEX-NEXT: vmovapd {{.*#+}} xmm2 = [1127219200,1160773632,0,0]
@@ -3071,7 +3071,7 @@ define <4 x double> @uitofp_load_4i64_to_4f64(<4 x i64> *%a) {
; VEX-NEXT: retq
;
; AVX512F-LABEL: uitofp_load_4i64_to_4f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512F-NEXT: vpextrq $1, %xmm1, %rax
@@ -3088,7 +3088,7 @@ define <4 x double> @uitofp_load_4i64_to_4f64(<4 x i64> *%a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_load_4i64_to_4f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512VL-NEXT: vpextrq $1, %xmm1, %rax
@@ -3105,14 +3105,14 @@ define <4 x double> @uitofp_load_4i64_to_4f64(<4 x i64> *%a) {
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: uitofp_load_4i64_to_4f64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovaps (%rdi), %ymm0
; AVX512DQ-NEXT: vcvtuqq2pd %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_load_4i64_to_4f64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtuqq2pd (%rdi), %ymm0
; AVX512VLDQ-NEXT: retq
%ld = load <4 x i64>, <4 x i64> *%a
@@ -3122,7 +3122,7 @@ define <4 x double> @uitofp_load_4i64_to_4f64(<4 x i64> *%a) {
define <4 x double> @uitofp_load_4i32_to_4f64(<4 x i32> *%a) {
; SSE-LABEL: uitofp_load_4i32_to_4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa (%rdi), %xmm0
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psrld $16, %xmm1
@@ -3144,7 +3144,7 @@ define <4 x double> @uitofp_load_4i32_to_4f64(<4 x i32> *%a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: uitofp_load_4i32_to_4f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %xmm0
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
@@ -3156,7 +3156,7 @@ define <4 x double> @uitofp_load_4i32_to_4f64(<4 x i32> *%a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: uitofp_load_4i32_to_4f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %xmm0
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX2-NEXT: vcvtdq2pd %xmm1, %ymm1
@@ -3169,26 +3169,26 @@ define <4 x double> @uitofp_load_4i32_to_4f64(<4 x i32> *%a) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: uitofp_load_4i32_to_4f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps (%rdi), %xmm0
; AVX512F-NEXT: vcvtudq2pd %ymm0, %zmm0
; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_load_4i32_to_4f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvtudq2pd (%rdi), %ymm0
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: uitofp_load_4i32_to_4f64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovaps (%rdi), %xmm0
; AVX512DQ-NEXT: vcvtudq2pd %ymm0, %zmm0
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_load_4i32_to_4f64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtudq2pd (%rdi), %ymm0
; AVX512VLDQ-NEXT: retq
%ld = load <4 x i32>, <4 x i32> *%a
@@ -3198,7 +3198,7 @@ define <4 x double> @uitofp_load_4i32_to_4f64(<4 x i32> *%a) {
define <4 x double> @uitofp_load_4i16_to_4f64(<4 x i16> *%a) {
; SSE-LABEL: uitofp_load_4i16_to_4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSE-NEXT: pxor %xmm0, %xmm0
; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
@@ -3208,7 +3208,7 @@ define <4 x double> @uitofp_load_4i16_to_4f64(<4 x i16> *%a) {
; SSE-NEXT: retq
;
; AVX-LABEL: uitofp_load_4i16_to_4f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX-NEXT: retq
@@ -3219,7 +3219,7 @@ define <4 x double> @uitofp_load_4i16_to_4f64(<4 x i16> *%a) {
define <4 x double> @uitofp_load_4i8_to_4f64(<4 x i8> *%a) {
; SSE-LABEL: uitofp_load_4i8_to_4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE-NEXT: pxor %xmm0, %xmm0
; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
@@ -3230,7 +3230,7 @@ define <4 x double> @uitofp_load_4i8_to_4f64(<4 x i8> *%a) {
; SSE-NEXT: retq
;
; AVX-LABEL: uitofp_load_4i8_to_4f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX-NEXT: retq
@@ -3245,7 +3245,7 @@ define <4 x double> @uitofp_load_4i8_to_4f64(<4 x i8> *%a) {
define <4 x float> @sitofp_load_4i64_to_4f32(<4 x i64> *%a) {
; SSE-LABEL: sitofp_load_4i64_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa (%rdi), %xmm1
; SSE-NEXT: movdqa 16(%rdi), %xmm0
; SSE-NEXT: movq %xmm0, %rax
@@ -3267,7 +3267,7 @@ define <4 x float> @sitofp_load_4i64_to_4f32(<4 x i64> *%a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: sitofp_load_4i64_to_4f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %ymm0
; AVX1-NEXT: vpextrq $1, %xmm0, %rax
; AVX1-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
@@ -3285,7 +3285,7 @@ define <4 x float> @sitofp_load_4i64_to_4f32(<4 x i64> *%a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: sitofp_load_4i64_to_4f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-NEXT: vpextrq $1, %xmm0, %rax
; AVX2-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
@@ -3303,7 +3303,7 @@ define <4 x float> @sitofp_load_4i64_to_4f32(<4 x i64> *%a) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: sitofp_load_4i64_to_4f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vpextrq $1, %xmm0, %rax
; AVX512F-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
@@ -3321,7 +3321,7 @@ define <4 x float> @sitofp_load_4i64_to_4f32(<4 x i64> *%a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: sitofp_load_4i64_to_4f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vpextrq $1, %xmm0, %rax
; AVX512VL-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
@@ -3339,7 +3339,7 @@ define <4 x float> @sitofp_load_4i64_to_4f32(<4 x i64> *%a) {
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: sitofp_load_4i64_to_4f32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovaps (%rdi), %ymm0
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -3347,7 +3347,7 @@ define <4 x float> @sitofp_load_4i64_to_4f32(<4 x i64> *%a) {
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: sitofp_load_4i64_to_4f32:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtqq2psy (%rdi), %xmm0
; AVX512VLDQ-NEXT: retq
%ld = load <4 x i64>, <4 x i64> *%a
@@ -3357,12 +3357,12 @@ define <4 x float> @sitofp_load_4i64_to_4f32(<4 x i64> *%a) {
define <4 x float> @sitofp_load_4i32_to_4f32(<4 x i32> *%a) {
; SSE-LABEL: sitofp_load_4i32_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvtdq2ps (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_load_4i32_to_4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvtdq2ps (%rdi), %xmm0
; AVX-NEXT: retq
%ld = load <4 x i32>, <4 x i32> *%a
@@ -3372,7 +3372,7 @@ define <4 x float> @sitofp_load_4i32_to_4f32(<4 x i32> *%a) {
define <4 x float> @sitofp_load_4i16_to_4f32(<4 x i16> *%a) {
; SSE-LABEL: sitofp_load_4i16_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE-NEXT: psrad $16, %xmm0
@@ -3380,7 +3380,7 @@ define <4 x float> @sitofp_load_4i16_to_4f32(<4 x i16> *%a) {
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_load_4i16_to_4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovsxwd (%rdi), %xmm0
; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
; AVX-NEXT: retq
@@ -3391,7 +3391,7 @@ define <4 x float> @sitofp_load_4i16_to_4f32(<4 x i16> *%a) {
define <4 x float> @sitofp_load_4i8_to_4f32(<4 x i8> *%a) {
; SSE-LABEL: sitofp_load_4i8_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
@@ -3400,7 +3400,7 @@ define <4 x float> @sitofp_load_4i8_to_4f32(<4 x i8> *%a) {
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_load_4i8_to_4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovsxbd (%rdi), %xmm0
; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
; AVX-NEXT: retq
@@ -3411,7 +3411,7 @@ define <4 x float> @sitofp_load_4i8_to_4f32(<4 x i8> *%a) {
define <8 x float> @sitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; SSE-LABEL: sitofp_load_8i64_to_8f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa (%rdi), %xmm1
; SSE-NEXT: movdqa 16(%rdi), %xmm0
; SSE-NEXT: movdqa 32(%rdi), %xmm2
@@ -3452,7 +3452,7 @@ define <8 x float> @sitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: sitofp_load_8i64_to_8f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %ymm0
; AVX1-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX1-NEXT: vpextrq $1, %xmm1, %rax
@@ -3483,7 +3483,7 @@ define <8 x float> @sitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: sitofp_load_8i64_to_8f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX2-NEXT: vpextrq $1, %xmm1, %rax
@@ -3514,7 +3514,7 @@ define <8 x float> @sitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: sitofp_load_8i64_to_8f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512F-NEXT: vextracti32x4 $2, %zmm0, %xmm1
; AVX512F-NEXT: vpextrq $1, %xmm1, %rax
@@ -3545,7 +3545,7 @@ define <8 x float> @sitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: sitofp_load_8i64_to_8f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512VL-NEXT: vextracti32x4 $2, %zmm0, %xmm1
; AVX512VL-NEXT: vpextrq $1, %xmm1, %rax
@@ -3576,12 +3576,12 @@ define <8 x float> @sitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: sitofp_load_8i64_to_8f32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcvtqq2ps (%rdi), %ymm0
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: sitofp_load_8i64_to_8f32:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtqq2ps (%rdi), %ymm0
; AVX512VLDQ-NEXT: retq
%ld = load <8 x i64>, <8 x i64> *%a
@@ -3591,13 +3591,13 @@ define <8 x float> @sitofp_load_8i64_to_8f32(<8 x i64> *%a) {
define <8 x float> @sitofp_load_8i32_to_8f32(<8 x i32> *%a) {
; SSE-LABEL: sitofp_load_8i32_to_8f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvtdq2ps (%rdi), %xmm0
; SSE-NEXT: cvtdq2ps 16(%rdi), %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_load_8i32_to_8f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvtdq2ps (%rdi), %ymm0
; AVX-NEXT: retq
%ld = load <8 x i32>, <8 x i32> *%a
@@ -3607,7 +3607,7 @@ define <8 x float> @sitofp_load_8i32_to_8f32(<8 x i32> *%a) {
define <8 x float> @sitofp_load_8i16_to_8f32(<8 x i16> *%a) {
; SSE-LABEL: sitofp_load_8i16_to_8f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE-NEXT: psrad $16, %xmm0
@@ -3619,7 +3619,7 @@ define <8 x float> @sitofp_load_8i16_to_8f32(<8 x i16> *%a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: sitofp_load_8i16_to_8f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovsxwd (%rdi), %xmm0
; AVX1-NEXT: vpmovsxwd 8(%rdi), %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
@@ -3627,13 +3627,13 @@ define <8 x float> @sitofp_load_8i16_to_8f32(<8 x i16> *%a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: sitofp_load_8i16_to_8f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovsxwd (%rdi), %ymm0
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: sitofp_load_8i16_to_8f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovsxwd (%rdi), %ymm0
; AVX512-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX512-NEXT: retq
@@ -3644,7 +3644,7 @@ define <8 x float> @sitofp_load_8i16_to_8f32(<8 x i16> *%a) {
define <8 x float> @sitofp_load_8i8_to_8f32(<8 x i8> *%a) {
; SSE-LABEL: sitofp_load_8i8_to_8f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
@@ -3658,7 +3658,7 @@ define <8 x float> @sitofp_load_8i8_to_8f32(<8 x i8> *%a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: sitofp_load_8i8_to_8f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovsxbw (%rdi), %xmm0
; AVX1-NEXT: vpmovsxwd %xmm0, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -3668,13 +3668,13 @@ define <8 x float> @sitofp_load_8i8_to_8f32(<8 x i8> *%a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: sitofp_load_8i8_to_8f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovsxbd (%rdi), %ymm0
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: sitofp_load_8i8_to_8f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovsxbd (%rdi), %ymm0
; AVX512-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX512-NEXT: retq
@@ -3689,13 +3689,13 @@ define <8 x float> @sitofp_load_8i8_to_8f32(<8 x i8> *%a) {
define <4 x float> @uitofp_load_4i64_to_4f32(<4 x i64> *%a) {
; SSE-LABEL: uitofp_load_4i64_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa (%rdi), %xmm2
; SSE-NEXT: movdqa 16(%rdi), %xmm0
; SSE-NEXT: movq %xmm0, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB76_1
-; SSE-NEXT: # BB#2:
+; SSE-NEXT: # %bb.2:
; SSE-NEXT: cvtsi2ssq %rax, %xmm1
; SSE-NEXT: jmp .LBB76_3
; SSE-NEXT: .LBB76_1:
@@ -3710,7 +3710,7 @@ define <4 x float> @uitofp_load_4i64_to_4f32(<4 x i64> *%a) {
; SSE-NEXT: movq %xmm0, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB76_4
-; SSE-NEXT: # BB#5:
+; SSE-NEXT: # %bb.5:
; SSE-NEXT: cvtsi2ssq %rax, %xmm3
; SSE-NEXT: jmp .LBB76_6
; SSE-NEXT: .LBB76_4:
@@ -3724,7 +3724,7 @@ define <4 x float> @uitofp_load_4i64_to_4f32(<4 x i64> *%a) {
; SSE-NEXT: movq %xmm2, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB76_7
-; SSE-NEXT: # BB#8:
+; SSE-NEXT: # %bb.8:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: cvtsi2ssq %rax, %xmm0
; SSE-NEXT: jmp .LBB76_9
@@ -3742,7 +3742,7 @@ define <4 x float> @uitofp_load_4i64_to_4f32(<4 x i64> *%a) {
; SSE-NEXT: movq %xmm2, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB76_10
-; SSE-NEXT: # BB#11:
+; SSE-NEXT: # %bb.11:
; SSE-NEXT: xorps %xmm2, %xmm2
; SSE-NEXT: cvtsi2ssq %rax, %xmm2
; SSE-NEXT: jmp .LBB76_12
@@ -3760,12 +3760,12 @@ define <4 x float> @uitofp_load_4i64_to_4f32(<4 x i64> *%a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: uitofp_load_4i64_to_4f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %ymm0
; AVX1-NEXT: vpextrq $1, %xmm0, %rax
; AVX1-NEXT: testq %rax, %rax
; AVX1-NEXT: js .LBB76_1
-; AVX1-NEXT: # BB#2:
+; AVX1-NEXT: # %bb.2:
; AVX1-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; AVX1-NEXT: jmp .LBB76_3
; AVX1-NEXT: .LBB76_1:
@@ -3779,7 +3779,7 @@ define <4 x float> @uitofp_load_4i64_to_4f32(<4 x i64> *%a) {
; AVX1-NEXT: vmovq %xmm0, %rax
; AVX1-NEXT: testq %rax, %rax
; AVX1-NEXT: js .LBB76_4
-; AVX1-NEXT: # BB#5:
+; AVX1-NEXT: # %bb.5:
; AVX1-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm2
; AVX1-NEXT: jmp .LBB76_6
; AVX1-NEXT: .LBB76_4:
@@ -3795,7 +3795,7 @@ define <4 x float> @uitofp_load_4i64_to_4f32(<4 x i64> *%a) {
; AVX1-NEXT: vmovq %xmm0, %rax
; AVX1-NEXT: testq %rax, %rax
; AVX1-NEXT: js .LBB76_7
-; AVX1-NEXT: # BB#8:
+; AVX1-NEXT: # %bb.8:
; AVX1-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm2
; AVX1-NEXT: jmp .LBB76_9
; AVX1-NEXT: .LBB76_7:
@@ -3810,7 +3810,7 @@ define <4 x float> @uitofp_load_4i64_to_4f32(<4 x i64> *%a) {
; AVX1-NEXT: vpextrq $1, %xmm0, %rax
; AVX1-NEXT: testq %rax, %rax
; AVX1-NEXT: js .LBB76_10
-; AVX1-NEXT: # BB#11:
+; AVX1-NEXT: # %bb.11:
; AVX1-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm0
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; AVX1-NEXT: vzeroupper
@@ -3827,12 +3827,12 @@ define <4 x float> @uitofp_load_4i64_to_4f32(<4 x i64> *%a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: uitofp_load_4i64_to_4f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-NEXT: vpextrq $1, %xmm0, %rax
; AVX2-NEXT: testq %rax, %rax
; AVX2-NEXT: js .LBB76_1
-; AVX2-NEXT: # BB#2:
+; AVX2-NEXT: # %bb.2:
; AVX2-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; AVX2-NEXT: jmp .LBB76_3
; AVX2-NEXT: .LBB76_1:
@@ -3846,7 +3846,7 @@ define <4 x float> @uitofp_load_4i64_to_4f32(<4 x i64> *%a) {
; AVX2-NEXT: vmovq %xmm0, %rax
; AVX2-NEXT: testq %rax, %rax
; AVX2-NEXT: js .LBB76_4
-; AVX2-NEXT: # BB#5:
+; AVX2-NEXT: # %bb.5:
; AVX2-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm2
; AVX2-NEXT: jmp .LBB76_6
; AVX2-NEXT: .LBB76_4:
@@ -3862,7 +3862,7 @@ define <4 x float> @uitofp_load_4i64_to_4f32(<4 x i64> *%a) {
; AVX2-NEXT: vmovq %xmm0, %rax
; AVX2-NEXT: testq %rax, %rax
; AVX2-NEXT: js .LBB76_7
-; AVX2-NEXT: # BB#8:
+; AVX2-NEXT: # %bb.8:
; AVX2-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm2
; AVX2-NEXT: jmp .LBB76_9
; AVX2-NEXT: .LBB76_7:
@@ -3877,7 +3877,7 @@ define <4 x float> @uitofp_load_4i64_to_4f32(<4 x i64> *%a) {
; AVX2-NEXT: vpextrq $1, %xmm0, %rax
; AVX2-NEXT: testq %rax, %rax
; AVX2-NEXT: js .LBB76_10
-; AVX2-NEXT: # BB#11:
+; AVX2-NEXT: # %bb.11:
; AVX2-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm0
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; AVX2-NEXT: vzeroupper
@@ -3894,7 +3894,7 @@ define <4 x float> @uitofp_load_4i64_to_4f32(<4 x i64> *%a) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: uitofp_load_4i64_to_4f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vpextrq $1, %xmm0, %rax
; AVX512F-NEXT: vcvtusi2ssq %rax, %xmm1, %xmm1
@@ -3912,7 +3912,7 @@ define <4 x float> @uitofp_load_4i64_to_4f32(<4 x i64> *%a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_load_4i64_to_4f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vpextrq $1, %xmm0, %rax
; AVX512VL-NEXT: vcvtusi2ssq %rax, %xmm1, %xmm1
@@ -3930,7 +3930,7 @@ define <4 x float> @uitofp_load_4i64_to_4f32(<4 x i64> *%a) {
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: uitofp_load_4i64_to_4f32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovaps (%rdi), %ymm0
; AVX512DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -3938,7 +3938,7 @@ define <4 x float> @uitofp_load_4i64_to_4f32(<4 x i64> *%a) {
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_load_4i64_to_4f32:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtuqq2psy (%rdi), %xmm0
; AVX512VLDQ-NEXT: retq
%ld = load <4 x i64>, <4 x i64> *%a
@@ -3948,7 +3948,7 @@ define <4 x float> @uitofp_load_4i64_to_4f32(<4 x i64> *%a) {
define <4 x float> @uitofp_load_4i32_to_4f32(<4 x i32> *%a) {
; SSE-LABEL: uitofp_load_4i32_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa (%rdi), %xmm0
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535]
; SSE-NEXT: pand %xmm0, %xmm1
@@ -3960,7 +3960,7 @@ define <4 x float> @uitofp_load_4i32_to_4f32(<4 x i32> *%a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: uitofp_load_4i32_to_4f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %xmm0
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
@@ -3970,7 +3970,7 @@ define <4 x float> @uitofp_load_4i32_to_4f32(<4 x i32> *%a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: uitofp_load_4i32_to_4f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %xmm0
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1258291200,1258291200,1258291200,1258291200]
; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
@@ -3983,7 +3983,7 @@ define <4 x float> @uitofp_load_4i32_to_4f32(<4 x i32> *%a) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: uitofp_load_4i32_to_4f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps (%rdi), %xmm0
; AVX512F-NEXT: vcvtudq2ps %zmm0, %zmm0
; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -3991,12 +3991,12 @@ define <4 x float> @uitofp_load_4i32_to_4f32(<4 x i32> *%a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_load_4i32_to_4f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvtudq2ps (%rdi), %xmm0
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: uitofp_load_4i32_to_4f32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovaps (%rdi), %xmm0
; AVX512DQ-NEXT: vcvtudq2ps %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -4004,7 +4004,7 @@ define <4 x float> @uitofp_load_4i32_to_4f32(<4 x i32> *%a) {
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_load_4i32_to_4f32:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtudq2ps (%rdi), %xmm0
; AVX512VLDQ-NEXT: retq
%ld = load <4 x i32>, <4 x i32> *%a
@@ -4014,7 +4014,7 @@ define <4 x float> @uitofp_load_4i32_to_4f32(<4 x i32> *%a) {
define <4 x float> @uitofp_load_4i16_to_4f32(<4 x i16> *%a) {
; SSE-LABEL: uitofp_load_4i16_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@@ -4022,7 +4022,7 @@ define <4 x float> @uitofp_load_4i16_to_4f32(<4 x i16> *%a) {
; SSE-NEXT: retq
;
; AVX-LABEL: uitofp_load_4i16_to_4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
; AVX-NEXT: retq
@@ -4033,7 +4033,7 @@ define <4 x float> @uitofp_load_4i16_to_4f32(<4 x i16> *%a) {
define <4 x float> @uitofp_load_4i8_to_4f32(<4 x i8> *%a) {
; SSE-LABEL: uitofp_load_4i8_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
@@ -4042,7 +4042,7 @@ define <4 x float> @uitofp_load_4i8_to_4f32(<4 x i8> *%a) {
; SSE-NEXT: retq
;
; AVX-LABEL: uitofp_load_4i8_to_4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
; AVX-NEXT: retq
@@ -4053,7 +4053,7 @@ define <4 x float> @uitofp_load_4i8_to_4f32(<4 x i8> *%a) {
define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; SSE-LABEL: uitofp_load_8i64_to_8f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa (%rdi), %xmm5
; SSE-NEXT: movdqa 16(%rdi), %xmm0
; SSE-NEXT: movdqa 32(%rdi), %xmm2
@@ -4061,7 +4061,7 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; SSE-NEXT: movq %xmm0, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB80_1
-; SSE-NEXT: # BB#2:
+; SSE-NEXT: # %bb.2:
; SSE-NEXT: cvtsi2ssq %rax, %xmm3
; SSE-NEXT: jmp .LBB80_3
; SSE-NEXT: .LBB80_1:
@@ -4076,7 +4076,7 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; SSE-NEXT: movq %xmm0, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB80_4
-; SSE-NEXT: # BB#5:
+; SSE-NEXT: # %bb.5:
; SSE-NEXT: cvtsi2ssq %rax, %xmm4
; SSE-NEXT: jmp .LBB80_6
; SSE-NEXT: .LBB80_4:
@@ -4090,7 +4090,7 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; SSE-NEXT: movq %xmm5, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB80_7
-; SSE-NEXT: # BB#8:
+; SSE-NEXT: # %bb.8:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: cvtsi2ssq %rax, %xmm0
; SSE-NEXT: jmp .LBB80_9
@@ -4107,7 +4107,7 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; SSE-NEXT: movq %xmm5, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB80_10
-; SSE-NEXT: # BB#11:
+; SSE-NEXT: # %bb.11:
; SSE-NEXT: cvtsi2ssq %rax, %xmm6
; SSE-NEXT: jmp .LBB80_12
; SSE-NEXT: .LBB80_10:
@@ -4121,7 +4121,7 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; SSE-NEXT: movq %xmm1, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB80_13
-; SSE-NEXT: # BB#14:
+; SSE-NEXT: # %bb.14:
; SSE-NEXT: xorps %xmm5, %xmm5
; SSE-NEXT: cvtsi2ssq %rax, %xmm5
; SSE-NEXT: jmp .LBB80_15
@@ -4138,7 +4138,7 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; SSE-NEXT: movq %xmm1, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB80_16
-; SSE-NEXT: # BB#17:
+; SSE-NEXT: # %bb.17:
; SSE-NEXT: cvtsi2ssq %rax, %xmm7
; SSE-NEXT: jmp .LBB80_18
; SSE-NEXT: .LBB80_16:
@@ -4154,7 +4154,7 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; SSE-NEXT: movq %xmm2, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB80_19
-; SSE-NEXT: # BB#20:
+; SSE-NEXT: # %bb.20:
; SSE-NEXT: xorps %xmm1, %xmm1
; SSE-NEXT: cvtsi2ssq %rax, %xmm1
; SSE-NEXT: jmp .LBB80_21
@@ -4173,7 +4173,7 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; SSE-NEXT: movq %xmm2, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB80_22
-; SSE-NEXT: # BB#23:
+; SSE-NEXT: # %bb.23:
; SSE-NEXT: xorps %xmm2, %xmm2
; SSE-NEXT: cvtsi2ssq %rax, %xmm2
; SSE-NEXT: jmp .LBB80_24
@@ -4191,13 +4191,13 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: uitofp_load_8i64_to_8f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %ymm0
; AVX1-NEXT: vmovdqa 32(%rdi), %ymm2
; AVX1-NEXT: vpextrq $1, %xmm2, %rax
; AVX1-NEXT: testq %rax, %rax
; AVX1-NEXT: js .LBB80_1
-; AVX1-NEXT: # BB#2:
+; AVX1-NEXT: # %bb.2:
; AVX1-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; AVX1-NEXT: jmp .LBB80_3
; AVX1-NEXT: .LBB80_1:
@@ -4211,7 +4211,7 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; AVX1-NEXT: vmovq %xmm2, %rax
; AVX1-NEXT: testq %rax, %rax
; AVX1-NEXT: js .LBB80_4
-; AVX1-NEXT: # BB#5:
+; AVX1-NEXT: # %bb.5:
; AVX1-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm3
; AVX1-NEXT: jmp .LBB80_6
; AVX1-NEXT: .LBB80_4:
@@ -4226,7 +4226,7 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; AVX1-NEXT: vmovq %xmm2, %rax
; AVX1-NEXT: testq %rax, %rax
; AVX1-NEXT: js .LBB80_7
-; AVX1-NEXT: # BB#8:
+; AVX1-NEXT: # %bb.8:
; AVX1-NEXT: vcvtsi2ssq %rax, %xmm4, %xmm4
; AVX1-NEXT: jmp .LBB80_9
; AVX1-NEXT: .LBB80_7:
@@ -4240,7 +4240,7 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; AVX1-NEXT: vpextrq $1, %xmm2, %rax
; AVX1-NEXT: testq %rax, %rax
; AVX1-NEXT: js .LBB80_10
-; AVX1-NEXT: # BB#11:
+; AVX1-NEXT: # %bb.11:
; AVX1-NEXT: vcvtsi2ssq %rax, %xmm5, %xmm2
; AVX1-NEXT: jmp .LBB80_12
; AVX1-NEXT: .LBB80_10:
@@ -4254,7 +4254,7 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; AVX1-NEXT: vpextrq $1, %xmm0, %rax
; AVX1-NEXT: testq %rax, %rax
; AVX1-NEXT: js .LBB80_13
-; AVX1-NEXT: # BB#14:
+; AVX1-NEXT: # %bb.14:
; AVX1-NEXT: vcvtsi2ssq %rax, %xmm5, %xmm5
; AVX1-NEXT: jmp .LBB80_15
; AVX1-NEXT: .LBB80_13:
@@ -4269,7 +4269,7 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; AVX1-NEXT: vmovq %xmm0, %rax
; AVX1-NEXT: testq %rax, %rax
; AVX1-NEXT: js .LBB80_16
-; AVX1-NEXT: # BB#17:
+; AVX1-NEXT: # %bb.17:
; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm3
; AVX1-NEXT: jmp .LBB80_18
; AVX1-NEXT: .LBB80_16:
@@ -4286,7 +4286,7 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; AVX1-NEXT: vmovq %xmm4, %rax
; AVX1-NEXT: testq %rax, %rax
; AVX1-NEXT: js .LBB80_19
-; AVX1-NEXT: # BB#20:
+; AVX1-NEXT: # %bb.20:
; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm5
; AVX1-NEXT: jmp .LBB80_21
; AVX1-NEXT: .LBB80_19:
@@ -4302,7 +4302,7 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; AVX1-NEXT: vpextrq $1, %xmm4, %rax
; AVX1-NEXT: testq %rax, %rax
; AVX1-NEXT: js .LBB80_22
-; AVX1-NEXT: # BB#23:
+; AVX1-NEXT: # %bb.23:
; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm2
; AVX1-NEXT: jmp .LBB80_24
; AVX1-NEXT: .LBB80_22:
@@ -4318,13 +4318,13 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: uitofp_load_8i64_to_8f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-NEXT: vmovdqa 32(%rdi), %ymm2
; AVX2-NEXT: vpextrq $1, %xmm2, %rax
; AVX2-NEXT: testq %rax, %rax
; AVX2-NEXT: js .LBB80_1
-; AVX2-NEXT: # BB#2:
+; AVX2-NEXT: # %bb.2:
; AVX2-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; AVX2-NEXT: jmp .LBB80_3
; AVX2-NEXT: .LBB80_1:
@@ -4338,7 +4338,7 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; AVX2-NEXT: vmovq %xmm2, %rax
; AVX2-NEXT: testq %rax, %rax
; AVX2-NEXT: js .LBB80_4
-; AVX2-NEXT: # BB#5:
+; AVX2-NEXT: # %bb.5:
; AVX2-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm3
; AVX2-NEXT: jmp .LBB80_6
; AVX2-NEXT: .LBB80_4:
@@ -4353,7 +4353,7 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; AVX2-NEXT: vmovq %xmm2, %rax
; AVX2-NEXT: testq %rax, %rax
; AVX2-NEXT: js .LBB80_7
-; AVX2-NEXT: # BB#8:
+; AVX2-NEXT: # %bb.8:
; AVX2-NEXT: vcvtsi2ssq %rax, %xmm4, %xmm4
; AVX2-NEXT: jmp .LBB80_9
; AVX2-NEXT: .LBB80_7:
@@ -4367,7 +4367,7 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; AVX2-NEXT: vpextrq $1, %xmm2, %rax
; AVX2-NEXT: testq %rax, %rax
; AVX2-NEXT: js .LBB80_10
-; AVX2-NEXT: # BB#11:
+; AVX2-NEXT: # %bb.11:
; AVX2-NEXT: vcvtsi2ssq %rax, %xmm5, %xmm2
; AVX2-NEXT: jmp .LBB80_12
; AVX2-NEXT: .LBB80_10:
@@ -4381,7 +4381,7 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; AVX2-NEXT: vpextrq $1, %xmm0, %rax
; AVX2-NEXT: testq %rax, %rax
; AVX2-NEXT: js .LBB80_13
-; AVX2-NEXT: # BB#14:
+; AVX2-NEXT: # %bb.14:
; AVX2-NEXT: vcvtsi2ssq %rax, %xmm5, %xmm5
; AVX2-NEXT: jmp .LBB80_15
; AVX2-NEXT: .LBB80_13:
@@ -4396,7 +4396,7 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; AVX2-NEXT: vmovq %xmm0, %rax
; AVX2-NEXT: testq %rax, %rax
; AVX2-NEXT: js .LBB80_16
-; AVX2-NEXT: # BB#17:
+; AVX2-NEXT: # %bb.17:
; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm3
; AVX2-NEXT: jmp .LBB80_18
; AVX2-NEXT: .LBB80_16:
@@ -4413,7 +4413,7 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; AVX2-NEXT: vmovq %xmm4, %rax
; AVX2-NEXT: testq %rax, %rax
; AVX2-NEXT: js .LBB80_19
-; AVX2-NEXT: # BB#20:
+; AVX2-NEXT: # %bb.20:
; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm5
; AVX2-NEXT: jmp .LBB80_21
; AVX2-NEXT: .LBB80_19:
@@ -4429,7 +4429,7 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; AVX2-NEXT: vpextrq $1, %xmm4, %rax
; AVX2-NEXT: testq %rax, %rax
; AVX2-NEXT: js .LBB80_22
-; AVX2-NEXT: # BB#23:
+; AVX2-NEXT: # %bb.23:
; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm2
; AVX2-NEXT: jmp .LBB80_24
; AVX2-NEXT: .LBB80_22:
@@ -4445,7 +4445,7 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: uitofp_load_8i64_to_8f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512F-NEXT: vextracti32x4 $2, %zmm0, %xmm1
; AVX512F-NEXT: vpextrq $1, %xmm1, %rax
@@ -4476,7 +4476,7 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_load_8i64_to_8f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512VL-NEXT: vextracti32x4 $2, %zmm0, %xmm1
; AVX512VL-NEXT: vpextrq $1, %xmm1, %rax
@@ -4507,12 +4507,12 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: uitofp_load_8i64_to_8f32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcvtuqq2ps (%rdi), %ymm0
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_load_8i64_to_8f32:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtuqq2ps (%rdi), %ymm0
; AVX512VLDQ-NEXT: retq
%ld = load <8 x i64>, <8 x i64> *%a
@@ -4522,7 +4522,7 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
define <8 x float> @uitofp_load_8i32_to_8f32(<8 x i32> *%a) {
; SSE-LABEL: uitofp_load_8i32_to_8f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa (%rdi), %xmm0
; SSE-NEXT: movdqa 16(%rdi), %xmm1
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,65535]
@@ -4545,7 +4545,7 @@ define <8 x float> @uitofp_load_8i32_to_8f32(<8 x i32> *%a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: uitofp_load_8i32_to_8f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %ymm0
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
@@ -4559,7 +4559,7 @@ define <8 x float> @uitofp_load_8i32_to_8f32(<8 x i32> *%a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: uitofp_load_8i32_to_8f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [1258291200,1258291200,1258291200,1258291200,1258291200,1258291200,1258291200,1258291200]
; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
@@ -4572,26 +4572,26 @@ define <8 x float> @uitofp_load_8i32_to_8f32(<8 x i32> *%a) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: uitofp_load_8i32_to_8f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps (%rdi), %ymm0
; AVX512F-NEXT: vcvtudq2ps %zmm0, %zmm0
; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_load_8i32_to_8f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvtudq2ps (%rdi), %ymm0
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: uitofp_load_8i32_to_8f32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovaps (%rdi), %ymm0
; AVX512DQ-NEXT: vcvtudq2ps %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_load_8i32_to_8f32:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtudq2ps (%rdi), %ymm0
; AVX512VLDQ-NEXT: retq
%ld = load <8 x i32>, <8 x i32> *%a
@@ -4601,7 +4601,7 @@ define <8 x float> @uitofp_load_8i32_to_8f32(<8 x i32> *%a) {
define <8 x float> @uitofp_load_8i16_to_8f32(<8 x i16> *%a) {
; SSE-LABEL: uitofp_load_8i16_to_8f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa (%rdi), %xmm1
; SSE-NEXT: pxor %xmm2, %xmm2
; SSE-NEXT: movdqa %xmm1, %xmm0
@@ -4612,7 +4612,7 @@ define <8 x float> @uitofp_load_8i16_to_8f32(<8 x i16> *%a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: uitofp_load_8i16_to_8f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
@@ -4620,13 +4620,13 @@ define <8 x float> @uitofp_load_8i16_to_8f32(<8 x i16> *%a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: uitofp_load_8i16_to_8f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: uitofp_load_8i16_to_8f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX512-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX512-NEXT: retq
@@ -4637,7 +4637,7 @@ define <8 x float> @uitofp_load_8i16_to_8f32(<8 x i16> *%a) {
define <8 x float> @uitofp_load_8i8_to_8f32(<8 x i8> *%a) {
; SSE-LABEL: uitofp_load_8i8_to_8f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSE-NEXT: pxor %xmm2, %xmm2
; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
@@ -4649,7 +4649,7 @@ define <8 x float> @uitofp_load_8i8_to_8f32(<8 x i8> *%a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: uitofp_load_8i8_to_8f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
@@ -4657,13 +4657,13 @@ define <8 x float> @uitofp_load_8i8_to_8f32(<8 x i8> *%a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: uitofp_load_8i8_to_8f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: uitofp_load_8i8_to_8f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; AVX512-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX512-NEXT: retq
@@ -4679,7 +4679,7 @@ define <8 x float> @uitofp_load_8i8_to_8f32(<8 x i8> *%a) {
%Arguments = type <{ <8 x i8>, <8 x i16>, <8 x float>* }>
define void @aggregate_sitofp_8i16_to_8f32(%Arguments* nocapture readonly %a0) {
; SSE-LABEL: aggregate_sitofp_8i16_to_8f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movq 24(%rdi), %rax
; SSE-NEXT: movdqu 8(%rdi), %xmm0
; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
@@ -4693,7 +4693,7 @@ define void @aggregate_sitofp_8i16_to_8f32(%Arguments* nocapture readonly %a0) {
; SSE-NEXT: retq
;
; AVX1-LABEL: aggregate_sitofp_8i16_to_8f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: movq 24(%rdi), %rax
; AVX1-NEXT: vmovdqu 8(%rdi), %xmm0
; AVX1-NEXT: vpmovsxwd %xmm0, %xmm1
@@ -4706,7 +4706,7 @@ define void @aggregate_sitofp_8i16_to_8f32(%Arguments* nocapture readonly %a0) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: aggregate_sitofp_8i16_to_8f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: movq 24(%rdi), %rax
; AVX2-NEXT: vpmovsxwd 8(%rdi), %ymm0
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
@@ -4715,7 +4715,7 @@ define void @aggregate_sitofp_8i16_to_8f32(%Arguments* nocapture readonly %a0) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: aggregate_sitofp_8i16_to_8f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: movq 24(%rdi), %rax
; AVX512-NEXT: vpmovsxwd 8(%rdi), %ymm0
; AVX512-NEXT: vcvtdq2ps %ymm0, %ymm0
@@ -4732,12 +4732,12 @@ define void @aggregate_sitofp_8i16_to_8f32(%Arguments* nocapture readonly %a0) {
define <2 x double> @sitofp_i32_to_2f64(<2 x double> %a0, i32 %a1) nounwind {
; SSE-LABEL: sitofp_i32_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvtsi2sdl %edi, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_i32_to_2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvtsi2sdl %edi, %xmm0, %xmm0
; AVX-NEXT: retq
%cvt = sitofp i32 %a1 to double
@@ -4747,12 +4747,12 @@ define <2 x double> @sitofp_i32_to_2f64(<2 x double> %a0, i32 %a1) nounwind {
define <4 x float> @sitofp_i32_to_4f32(<4 x float> %a0, i32 %a1) nounwind {
; SSE-LABEL: sitofp_i32_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvtsi2ssl %edi, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_i32_to_4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvtsi2ssl %edi, %xmm0, %xmm0
; AVX-NEXT: retq
%cvt = sitofp i32 %a1 to float
@@ -4762,12 +4762,12 @@ define <4 x float> @sitofp_i32_to_4f32(<4 x float> %a0, i32 %a1) nounwind {
define <2 x double> @sitofp_i64_to_2f64(<2 x double> %a0, i64 %a1) nounwind {
; SSE-LABEL: sitofp_i64_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvtsi2sdq %rdi, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_i64_to_2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvtsi2sdq %rdi, %xmm0, %xmm0
; AVX-NEXT: retq
%cvt = sitofp i64 %a1 to double
@@ -4777,12 +4777,12 @@ define <2 x double> @sitofp_i64_to_2f64(<2 x double> %a0, i64 %a1) nounwind {
define <4 x float> @sitofp_i64_to_4f32(<4 x float> %a0, i64 %a1) nounwind {
; SSE-LABEL: sitofp_i64_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvtsi2ssq %rdi, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_i64_to_4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvtsi2ssq %rdi, %xmm0, %xmm0
; AVX-NEXT: retq
%cvt = sitofp i64 %a1 to float
diff --git a/test/CodeGen/X86/vec_loadsingles.ll b/test/CodeGen/X86/vec_loadsingles.ll
index b0d95c5d00d..8859270c5be 100644
--- a/test/CodeGen/X86/vec_loadsingles.ll
+++ b/test/CodeGen/X86/vec_loadsingles.ll
@@ -4,7 +4,7 @@
define <4 x float> @merge_2_floats(float* nocapture %p) nounwind readonly {
; ALL-LABEL: merge_2_floats:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; ALL-NEXT: retq
%tmp1 = load float, float* %p
@@ -19,7 +19,7 @@ define <4 x float> @merge_2_floats(float* nocapture %p) nounwind readonly {
; two i64s of a <4 x i64> as a load of two i32s.
define <4 x i64> @merge_2_floats_into_4() {
; ALL-LABEL: merge_2_floats_into_4:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movq (%rax), %rax
; ALL-NEXT: vmovups (%rax), %xmm0
; ALL-NEXT: retq
@@ -37,7 +37,7 @@ define <4 x i64> @merge_2_floats_into_4() {
define <4 x float> @merge_4_floats(float* %ptr) {
; ALL-LABEL: merge_4_floats:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovups (%rdi), %xmm0
; ALL-NEXT: retq
%a = load float, float* %ptr, align 8
@@ -61,12 +61,12 @@ define <4 x float> @merge_4_floats(float* %ptr) {
define <8 x float> @merge_8_floats(float* %ptr) {
; FAST32-LABEL: merge_8_floats:
-; FAST32: # BB#0:
+; FAST32: # %bb.0:
; FAST32-NEXT: vmovups (%rdi), %ymm0
; FAST32-NEXT: retq
;
; SLOW32-LABEL: merge_8_floats:
-; SLOW32: # BB#0:
+; SLOW32: # %bb.0:
; SLOW32-NEXT: vmovups (%rdi), %xmm0
; SLOW32-NEXT: vinsertf128 $1, 16(%rdi), %ymm0, %ymm0
; SLOW32-NEXT: retq
@@ -98,12 +98,12 @@ define <8 x float> @merge_8_floats(float* %ptr) {
define <4 x double> @merge_4_doubles(double* %ptr) {
; FAST32-LABEL: merge_4_doubles:
-; FAST32: # BB#0:
+; FAST32: # %bb.0:
; FAST32-NEXT: vmovups (%rdi), %ymm0
; FAST32-NEXT: retq
;
; SLOW32-LABEL: merge_4_doubles:
-; SLOW32: # BB#0:
+; SLOW32: # %bb.0:
; SLOW32-NEXT: vmovups (%rdi), %xmm0
; SLOW32-NEXT: vinsertf128 $1, 16(%rdi), %ymm0, %ymm0
; SLOW32-NEXT: retq
@@ -126,12 +126,12 @@ define <4 x double> @merge_4_doubles(double* %ptr) {
; first of the combined loads is offset from the base address.
define <4 x double> @merge_4_doubles_offset(double* %ptr) {
; FAST32-LABEL: merge_4_doubles_offset:
-; FAST32: # BB#0:
+; FAST32: # %bb.0:
; FAST32-NEXT: vmovups 32(%rdi), %ymm0
; FAST32-NEXT: retq
;
; SLOW32-LABEL: merge_4_doubles_offset:
-; SLOW32: # BB#0:
+; SLOW32: # %bb.0:
; SLOW32-NEXT: vmovups 32(%rdi), %xmm0
; SLOW32-NEXT: vinsertf128 $1, 48(%rdi), %ymm0, %ymm0
; SLOW32-NEXT: retq
diff --git a/test/CodeGen/X86/vec_logical.ll b/test/CodeGen/X86/vec_logical.ll
index 92ec76009f6..ec29d4886a2 100644
--- a/test/CodeGen/X86/vec_logical.ll
+++ b/test/CodeGen/X86/vec_logical.ll
@@ -4,13 +4,13 @@
define void @t(<4 x float> %A) {
; SSE-LABEL: t:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorps {{\.LCPI.*}}, %xmm0
; SSE-NEXT: movaps %xmm0, 0
; SSE-NEXT: retl
;
; AVX-LABEL: t:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps {{\.LCPI.*}}, %xmm0, %xmm0
; AVX-NEXT: vmovaps %xmm0, 0
; AVX-NEXT: retl
@@ -21,12 +21,12 @@ define void @t(<4 x float> %A) {
define <4 x float> @t1(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: t1:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: xorps %xmm1, %xmm0
; SSE-NEXT: retl
;
; AVX-LABEL: t1:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vxorps %xmm1, %xmm0, %xmm0
; AVX-NEXT: retl
entry:
@@ -39,12 +39,12 @@ entry:
define <2 x double> @t2(<2 x double> %a, <2 x double> %b) {
; SSE-LABEL: t2:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: andps %xmm1, %xmm0
; SSE-NEXT: retl
;
; AVX-LABEL: t2:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
; AVX-NEXT: retl
entry:
@@ -57,7 +57,7 @@ entry:
define void @t3(<4 x float> %a, <4 x float> %b, <4 x float>* %c, <4 x float>* %d) {
; SSE-LABEL: t3:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
; SSE-NEXT: andnps %xmm1, %xmm0
@@ -66,7 +66,7 @@ define void @t3(<4 x float> %a, <4 x float> %b, <4 x float>* %c, <4 x float>* %d
; SSE-NEXT: retl
;
; AVX-LABEL: t3:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
; AVX-NEXT: vandnps %xmm1, %xmm0, %xmm0
@@ -88,13 +88,13 @@ entry:
define <2 x i64> @andn_double_xor(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c) {
; SSE-LABEL: andn_double_xor:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm2, %xmm1
; SSE-NEXT: andnps %xmm1, %xmm0
; SSE-NEXT: retl
;
; AVX-LABEL: andn_double_xor:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm2, %xmm1, %xmm1
; AVX-NEXT: vandnps %xmm1, %xmm0, %xmm0
; AVX-NEXT: retl
diff --git a/test/CodeGen/X86/vec_minmax_match.ll b/test/CodeGen/X86/vec_minmax_match.ll
index b377bbee3d7..a3cef49c6a4 100644
--- a/test/CodeGen/X86/vec_minmax_match.ll
+++ b/test/CodeGen/X86/vec_minmax_match.ll
@@ -6,7 +6,7 @@
define <4 x i32> @smin_vec1(<4 x i32> %x) {
; CHECK-LABEL: smin_vec1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpxor %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpminsd %xmm1, %xmm0, %xmm0
@@ -19,7 +19,7 @@ define <4 x i32> @smin_vec1(<4 x i32> %x) {
define <4 x i32> @smin_vec2(<4 x i32> %x) {
; CHECK-LABEL: smin_vec2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpxor %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpminsd %xmm1, %xmm0, %xmm0
@@ -34,7 +34,7 @@ define <4 x i32> @smin_vec2(<4 x i32> %x) {
; (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0)
define <4 x i32> @smin_vec3(<4 x i32> %x, <4 x i32> %y) {
; CHECK-LABEL: smin_vec3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsubd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpminsd %xmm1, %xmm0, %xmm0
@@ -49,7 +49,7 @@ define <4 x i32> @smin_vec3(<4 x i32> %x, <4 x i32> %y) {
; (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0)
define <4 x i32> @smin_vec4(<4 x i32> %x, <4 x i32> %y) {
; CHECK-LABEL: smin_vec4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsubd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpminsd %xmm1, %xmm0, %xmm0
@@ -62,7 +62,7 @@ define <4 x i32> @smin_vec4(<4 x i32> %x, <4 x i32> %y) {
define <4 x i32> @smax_vec1(<4 x i32> %x) {
; CHECK-LABEL: smax_vec1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpxor %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
@@ -75,7 +75,7 @@ define <4 x i32> @smax_vec1(<4 x i32> %x) {
define <4 x i32> @smax_vec2(<4 x i32> %x) {
; CHECK-LABEL: smax_vec2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpxor %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
@@ -90,7 +90,7 @@ define <4 x i32> @smax_vec2(<4 x i32> %x) {
; (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0)
define <4 x i32> @smax_vec3(<4 x i32> %x, <4 x i32> %y) {
; CHECK-LABEL: smax_vec3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsubd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
@@ -105,7 +105,7 @@ define <4 x i32> @smax_vec3(<4 x i32> %x, <4 x i32> %y) {
; (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0)
define <4 x i32> @smax_vec4(<4 x i32> %x, <4 x i32> %y) {
; CHECK-LABEL: smax_vec4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsubd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
@@ -118,7 +118,7 @@ define <4 x i32> @smax_vec4(<4 x i32> %x, <4 x i32> %y) {
define <4 x i32> @umax_vec1(<4 x i32> %x) {
; CHECK-LABEL: umax_vec1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmaxud {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: retq
%cmp = icmp slt <4 x i32> %x, zeroinitializer
@@ -128,7 +128,7 @@ define <4 x i32> @umax_vec1(<4 x i32> %x) {
define <4 x i32> @umax_vec2(<4 x i32> %x) {
; CHECK-LABEL: umax_vec2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmaxud {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: retq
%cmp = icmp sgt <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1>
@@ -138,7 +138,7 @@ define <4 x i32> @umax_vec2(<4 x i32> %x) {
define <4 x i32> @umin_vec1(<4 x i32> %x) {
; CHECK-LABEL: umin_vec1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: retq
%cmp = icmp slt <4 x i32> %x, zeroinitializer
@@ -148,7 +148,7 @@ define <4 x i32> @umin_vec1(<4 x i32> %x) {
define <4 x i32> @umin_vec2(<4 x i32> %x) {
; CHECK-LABEL: umin_vec2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: retq
%cmp = icmp sgt <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1>
@@ -163,7 +163,7 @@ define <4 x i32> @umin_vec2(<4 x i32> %x) {
define <4 x i32> @clamp_signed1(<4 x i32> %x) {
; CHECK-LABEL: clamp_signed1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpminsd {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: vpmaxsd {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: retq
@@ -178,7 +178,7 @@ define <4 x i32> @clamp_signed1(<4 x i32> %x) {
define <4 x i32> @clamp_signed2(<4 x i32> %x) {
; CHECK-LABEL: clamp_signed2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmaxsd {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: vpminsd {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: retq
@@ -193,7 +193,7 @@ define <4 x i32> @clamp_signed2(<4 x i32> %x) {
define <4 x i32> @clamp_unsigned1(<4 x i32> %x) {
; CHECK-LABEL: clamp_unsigned1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: vpmaxud {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: retq
@@ -208,7 +208,7 @@ define <4 x i32> @clamp_unsigned1(<4 x i32> %x) {
define <4 x i32> @clamp_unsigned2(<4 x i32> %x) {
; CHECK-LABEL: clamp_unsigned2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmaxud {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: retq
@@ -221,7 +221,7 @@ define <4 x i32> @clamp_unsigned2(<4 x i32> %x) {
define <4 x i32> @wrong_pred_for_smin_with_not(<4 x i32> %x) {
; CHECK-LABEL: wrong_pred_for_smin_with_not:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpxor %xmm1, %xmm0, %xmm1
; CHECK-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
@@ -237,7 +237,7 @@ define <4 x i32> @wrong_pred_for_smin_with_not(<4 x i32> %x) {
define <4 x i32> @wrong_pred_for_smin_with_subnsw(<4 x i32> %x, <4 x i32> %y) {
; CHECK-LABEL: wrong_pred_for_smin_with_subnsw:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsubd %xmm1, %xmm0, %xmm2
; CHECK-NEXT: vpminud %xmm1, %xmm0, %xmm1
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
diff --git a/test/CodeGen/X86/vec_minmax_sint.ll b/test/CodeGen/X86/vec_minmax_sint.ll
index 70d60b0075c..67887dbe8fc 100644
--- a/test/CodeGen/X86/vec_minmax_sint.ll
+++ b/test/CodeGen/X86/vec_minmax_sint.ll
@@ -13,7 +13,7 @@
define <2 x i64> @max_gt_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: max_gt_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -32,7 +32,7 @@ define <2 x i64> @max_gt_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_gt_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,0,2147483648,0]
; SSE41-NEXT: movdqa %xmm1, %xmm3
@@ -51,7 +51,7 @@ define <2 x i64> @max_gt_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_gt_v2i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa %xmm0, %xmm2
; SSE42-NEXT: pcmpgtq %xmm1, %xmm0
; SSE42-NEXT: blendvpd %xmm0, %xmm2, %xmm1
@@ -59,19 +59,19 @@ define <2 x i64> @max_gt_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE42-NEXT: retq
;
; AVX1-LABEL: max_gt_v2i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: max_gt_v2i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: max_gt_v2i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
@@ -85,7 +85,7 @@ define <2 x i64> @max_gt_v2i64(<2 x i64> %a, <2 x i64> %b) {
define <4 x i64> @max_gt_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE2-LABEL: max_gt_v4i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,0,2147483648,0]
; SSE2-NEXT: movdqa %xmm3, %xmm5
; SSE2-NEXT: pxor %xmm4, %xmm5
@@ -119,7 +119,7 @@ define <4 x i64> @max_gt_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_gt_v4i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm8
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,0,2147483648,0]
; SSE41-NEXT: movdqa %xmm3, %xmm5
@@ -153,7 +153,7 @@ define <4 x i64> @max_gt_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_gt_v4i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa %xmm0, %xmm4
; SSE42-NEXT: movdqa %xmm1, %xmm5
; SSE42-NEXT: pcmpgtq %xmm3, %xmm5
@@ -166,7 +166,7 @@ define <4 x i64> @max_gt_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE42-NEXT: retq
;
; AVX1-LABEL: max_gt_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -176,13 +176,13 @@ define <4 x i64> @max_gt_v4i64(<4 x i64> %a, <4 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: max_gt_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: max_gt_v4i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
@@ -195,7 +195,7 @@ define <4 x i64> @max_gt_v4i64(<4 x i64> %a, <4 x i64> %b) {
define <4 x i32> @max_gt_v4i32(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: max_gt_v4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
; SSE2-NEXT: pand %xmm2, %xmm0
@@ -205,17 +205,17 @@ define <4 x i32> @max_gt_v4i32(<4 x i32> %a, <4 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_gt_v4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmaxsd %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_gt_v4i32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pmaxsd %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: max_gt_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp sgt <4 x i32> %a, %b
@@ -225,7 +225,7 @@ define <4 x i32> @max_gt_v4i32(<4 x i32> %a, <4 x i32> %b) {
define <8 x i32> @max_gt_v8i32(<8 x i32> %a, <8 x i32> %b) {
; SSE2-LABEL: max_gt_v8i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
; SSE2-NEXT: movdqa %xmm0, %xmm5
@@ -241,19 +241,19 @@ define <8 x i32> @max_gt_v8i32(<8 x i32> %a, <8 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_gt_v8i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmaxsd %xmm2, %xmm0
; SSE41-NEXT: pmaxsd %xmm3, %xmm1
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_gt_v8i32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pmaxsd %xmm2, %xmm0
; SSE42-NEXT: pmaxsd %xmm3, %xmm1
; SSE42-NEXT: retq
;
; AVX1-LABEL: max_gt_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxsd %xmm2, %xmm3, %xmm2
@@ -262,12 +262,12 @@ define <8 x i32> @max_gt_v8i32(<8 x i32> %a, <8 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: max_gt_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: max_gt_v8i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp sgt <8 x i32> %a, %b
@@ -277,12 +277,12 @@ define <8 x i32> @max_gt_v8i32(<8 x i32> %a, <8 x i32> %b) {
define <8 x i16> @max_gt_v8i16(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: max_gt_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmaxsw %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: max_gt_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp sgt <8 x i16> %a, %b
@@ -292,13 +292,13 @@ define <8 x i16> @max_gt_v8i16(<8 x i16> %a, <8 x i16> %b) {
define <16 x i16> @max_gt_v16i16(<16 x i16> %a, <16 x i16> %b) {
; SSE-LABEL: max_gt_v16i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmaxsw %xmm2, %xmm0
; SSE-NEXT: pmaxsw %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: max_gt_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxsw %xmm2, %xmm3, %xmm2
@@ -307,12 +307,12 @@ define <16 x i16> @max_gt_v16i16(<16 x i16> %a, <16 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: max_gt_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: max_gt_v16i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp sgt <16 x i16> %a, %b
@@ -322,7 +322,7 @@ define <16 x i16> @max_gt_v16i16(<16 x i16> %a, <16 x i16> %b) {
define <16 x i8> @max_gt_v16i8(<16 x i8> %a, <16 x i8> %b) {
; SSE2-LABEL: max_gt_v16i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pcmpgtb %xmm1, %xmm2
; SSE2-NEXT: pand %xmm2, %xmm0
@@ -332,17 +332,17 @@ define <16 x i8> @max_gt_v16i8(<16 x i8> %a, <16 x i8> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_gt_v16i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmaxsb %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_gt_v16i8:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pmaxsb %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: max_gt_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp sgt <16 x i8> %a, %b
@@ -352,7 +352,7 @@ define <16 x i8> @max_gt_v16i8(<16 x i8> %a, <16 x i8> %b) {
define <32 x i8> @max_gt_v32i8(<32 x i8> %a, <32 x i8> %b) {
; SSE2-LABEL: max_gt_v32i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pcmpgtb %xmm3, %xmm4
; SSE2-NEXT: movdqa %xmm0, %xmm5
@@ -368,19 +368,19 @@ define <32 x i8> @max_gt_v32i8(<32 x i8> %a, <32 x i8> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_gt_v32i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmaxsb %xmm2, %xmm0
; SSE41-NEXT: pmaxsb %xmm3, %xmm1
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_gt_v32i8:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pmaxsb %xmm2, %xmm0
; SSE42-NEXT: pmaxsb %xmm3, %xmm1
; SSE42-NEXT: retq
;
; AVX1-LABEL: max_gt_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxsb %xmm2, %xmm3, %xmm2
@@ -389,12 +389,12 @@ define <32 x i8> @max_gt_v32i8(<32 x i8> %a, <32 x i8> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: max_gt_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: max_gt_v32i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp sgt <32 x i8> %a, %b
@@ -408,7 +408,7 @@ define <32 x i8> @max_gt_v32i8(<32 x i8> %a, <32 x i8> %b) {
define <2 x i64> @max_ge_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: max_ge_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -430,7 +430,7 @@ define <2 x i64> @max_ge_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_ge_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,0,2147483648,0]
; SSE41-NEXT: movdqa %xmm2, %xmm3
@@ -451,7 +451,7 @@ define <2 x i64> @max_ge_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_ge_v2i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa %xmm0, %xmm2
; SSE42-NEXT: movdqa %xmm1, %xmm3
; SSE42-NEXT: pcmpgtq %xmm2, %xmm3
@@ -462,7 +462,7 @@ define <2 x i64> @max_ge_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE42-NEXT: retq
;
; AVX1-LABEL: max_ge_v2i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
@@ -470,7 +470,7 @@ define <2 x i64> @max_ge_v2i64(<2 x i64> %a, <2 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: max_ge_v2i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
; AVX2-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vpxor %xmm3, %xmm2, %xmm2
@@ -478,7 +478,7 @@ define <2 x i64> @max_ge_v2i64(<2 x i64> %a, <2 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: max_ge_v2i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
@@ -492,7 +492,7 @@ define <2 x i64> @max_ge_v2i64(<2 x i64> %a, <2 x i64> %b) {
define <4 x i64> @max_ge_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE2-LABEL: max_ge_v4i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [2147483648,0,2147483648,0]
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pxor %xmm7, %xmm4
@@ -532,7 +532,7 @@ define <4 x i64> @max_ge_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_ge_v4i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm8
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,0,2147483648,0]
; SSE41-NEXT: movdqa %xmm1, %xmm5
@@ -569,7 +569,7 @@ define <4 x i64> @max_ge_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_ge_v4i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa %xmm0, %xmm4
; SSE42-NEXT: movdqa %xmm3, %xmm5
; SSE42-NEXT: pcmpgtq %xmm1, %xmm5
@@ -586,7 +586,7 @@ define <4 x i64> @max_ge_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE42-NEXT: retq
;
; AVX1-LABEL: max_ge_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -599,7 +599,7 @@ define <4 x i64> @max_ge_v4i64(<4 x i64> %a, <4 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: max_ge_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm2
; AVX2-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3
; AVX2-NEXT: vpxor %ymm3, %ymm2, %ymm2
@@ -607,7 +607,7 @@ define <4 x i64> @max_ge_v4i64(<4 x i64> %a, <4 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: max_ge_v4i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
@@ -620,7 +620,7 @@ define <4 x i64> @max_ge_v4i64(<4 x i64> %a, <4 x i64> %b) {
define <4 x i32> @max_ge_v4i32(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: max_ge_v4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: pcmpgtd %xmm0, %xmm3
; SSE2-NEXT: pcmpeqd %xmm2, %xmm2
@@ -632,17 +632,17 @@ define <4 x i32> @max_ge_v4i32(<4 x i32> %a, <4 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_ge_v4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmaxsd %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_ge_v4i32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pmaxsd %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: max_ge_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp sge <4 x i32> %a, %b
@@ -652,7 +652,7 @@ define <4 x i32> @max_ge_v4i32(<4 x i32> %a, <4 x i32> %b) {
define <8 x i32> @max_ge_v8i32(<8 x i32> %a, <8 x i32> %b) {
; SSE2-LABEL: max_ge_v8i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm3, %xmm6
; SSE2-NEXT: pcmpgtd %xmm1, %xmm6
; SSE2-NEXT: pcmpeqd %xmm4, %xmm4
@@ -672,19 +672,19 @@ define <8 x i32> @max_ge_v8i32(<8 x i32> %a, <8 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_ge_v8i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmaxsd %xmm2, %xmm0
; SSE41-NEXT: pmaxsd %xmm3, %xmm1
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_ge_v8i32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pmaxsd %xmm2, %xmm0
; SSE42-NEXT: pmaxsd %xmm3, %xmm1
; SSE42-NEXT: retq
;
; AVX1-LABEL: max_ge_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxsd %xmm2, %xmm3, %xmm2
@@ -693,12 +693,12 @@ define <8 x i32> @max_ge_v8i32(<8 x i32> %a, <8 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: max_ge_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: max_ge_v8i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp sge <8 x i32> %a, %b
@@ -708,12 +708,12 @@ define <8 x i32> @max_ge_v8i32(<8 x i32> %a, <8 x i32> %b) {
define <8 x i16> @max_ge_v8i16(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: max_ge_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmaxsw %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: max_ge_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp sge <8 x i16> %a, %b
@@ -723,13 +723,13 @@ define <8 x i16> @max_ge_v8i16(<8 x i16> %a, <8 x i16> %b) {
define <16 x i16> @max_ge_v16i16(<16 x i16> %a, <16 x i16> %b) {
; SSE-LABEL: max_ge_v16i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmaxsw %xmm2, %xmm0
; SSE-NEXT: pmaxsw %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: max_ge_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxsw %xmm2, %xmm3, %xmm2
@@ -738,12 +738,12 @@ define <16 x i16> @max_ge_v16i16(<16 x i16> %a, <16 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: max_ge_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: max_ge_v16i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp sge <16 x i16> %a, %b
@@ -753,7 +753,7 @@ define <16 x i16> @max_ge_v16i16(<16 x i16> %a, <16 x i16> %b) {
define <16 x i8> @max_ge_v16i8(<16 x i8> %a, <16 x i8> %b) {
; SSE2-LABEL: max_ge_v16i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: pcmpgtb %xmm0, %xmm3
; SSE2-NEXT: pcmpeqd %xmm2, %xmm2
@@ -765,17 +765,17 @@ define <16 x i8> @max_ge_v16i8(<16 x i8> %a, <16 x i8> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_ge_v16i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmaxsb %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_ge_v16i8:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pmaxsb %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: max_ge_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp sge <16 x i8> %a, %b
@@ -785,7 +785,7 @@ define <16 x i8> @max_ge_v16i8(<16 x i8> %a, <16 x i8> %b) {
define <32 x i8> @max_ge_v32i8(<32 x i8> %a, <32 x i8> %b) {
; SSE2-LABEL: max_ge_v32i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm3, %xmm6
; SSE2-NEXT: pcmpgtb %xmm1, %xmm6
; SSE2-NEXT: pcmpeqd %xmm4, %xmm4
@@ -805,19 +805,19 @@ define <32 x i8> @max_ge_v32i8(<32 x i8> %a, <32 x i8> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_ge_v32i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmaxsb %xmm2, %xmm0
; SSE41-NEXT: pmaxsb %xmm3, %xmm1
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_ge_v32i8:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pmaxsb %xmm2, %xmm0
; SSE42-NEXT: pmaxsb %xmm3, %xmm1
; SSE42-NEXT: retq
;
; AVX1-LABEL: max_ge_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxsb %xmm2, %xmm3, %xmm2
@@ -826,12 +826,12 @@ define <32 x i8> @max_ge_v32i8(<32 x i8> %a, <32 x i8> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: max_ge_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: max_ge_v32i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp sge <32 x i8> %a, %b
@@ -845,7 +845,7 @@ define <32 x i8> @max_ge_v32i8(<32 x i8> %a, <32 x i8> %b) {
define <2 x i64> @min_lt_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: min_lt_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -864,7 +864,7 @@ define <2 x i64> @min_lt_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_lt_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,0,2147483648,0]
; SSE41-NEXT: movdqa %xmm2, %xmm3
@@ -883,7 +883,7 @@ define <2 x i64> @min_lt_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_lt_v2i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa %xmm0, %xmm2
; SSE42-NEXT: movdqa %xmm1, %xmm0
; SSE42-NEXT: pcmpgtq %xmm2, %xmm0
@@ -892,19 +892,19 @@ define <2 x i64> @min_lt_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE42-NEXT: retq
;
; AVX1-LABEL: min_lt_v2i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: min_lt_v2i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: min_lt_v2i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512-NEXT: vpminsq %zmm1, %zmm0, %zmm0
@@ -918,7 +918,7 @@ define <2 x i64> @min_lt_v2i64(<2 x i64> %a, <2 x i64> %b) {
define <4 x i64> @min_lt_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE2-LABEL: min_lt_v4i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,0,2147483648,0]
; SSE2-NEXT: movdqa %xmm1, %xmm5
; SSE2-NEXT: pxor %xmm4, %xmm5
@@ -952,7 +952,7 @@ define <4 x i64> @min_lt_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_lt_v4i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm8
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,0,2147483648,0]
; SSE41-NEXT: movdqa %xmm1, %xmm5
@@ -986,7 +986,7 @@ define <4 x i64> @min_lt_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_lt_v4i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa %xmm0, %xmm4
; SSE42-NEXT: movdqa %xmm3, %xmm5
; SSE42-NEXT: pcmpgtq %xmm1, %xmm5
@@ -1000,7 +1000,7 @@ define <4 x i64> @min_lt_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE42-NEXT: retq
;
; AVX1-LABEL: min_lt_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -1010,13 +1010,13 @@ define <4 x i64> @min_lt_v4i64(<4 x i64> %a, <4 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: min_lt_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm2
; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: min_lt_v4i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512-NEXT: vpminsq %zmm1, %zmm0, %zmm0
@@ -1029,7 +1029,7 @@ define <4 x i64> @min_lt_v4i64(<4 x i64> %a, <4 x i64> %b) {
define <4 x i32> @min_lt_v4i32(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: min_lt_v4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: pcmpgtd %xmm0, %xmm2
; SSE2-NEXT: pand %xmm2, %xmm0
@@ -1038,17 +1038,17 @@ define <4 x i32> @min_lt_v4i32(<4 x i32> %a, <4 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_lt_v4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pminsd %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_lt_v4i32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pminsd %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: min_lt_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpminsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp slt <4 x i32> %a, %b
@@ -1058,7 +1058,7 @@ define <4 x i32> @min_lt_v4i32(<4 x i32> %a, <4 x i32> %b) {
define <8 x i32> @min_lt_v8i32(<8 x i32> %a, <8 x i32> %b) {
; SSE2-LABEL: min_lt_v8i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: pcmpgtd %xmm1, %xmm4
; SSE2-NEXT: movdqa %xmm2, %xmm5
@@ -1072,19 +1072,19 @@ define <8 x i32> @min_lt_v8i32(<8 x i32> %a, <8 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_lt_v8i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pminsd %xmm2, %xmm0
; SSE41-NEXT: pminsd %xmm3, %xmm1
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_lt_v8i32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pminsd %xmm2, %xmm0
; SSE42-NEXT: pminsd %xmm3, %xmm1
; SSE42-NEXT: retq
;
; AVX1-LABEL: min_lt_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminsd %xmm2, %xmm3, %xmm2
@@ -1093,12 +1093,12 @@ define <8 x i32> @min_lt_v8i32(<8 x i32> %a, <8 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: min_lt_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpminsd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: min_lt_v8i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpminsd %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp slt <8 x i32> %a, %b
@@ -1108,12 +1108,12 @@ define <8 x i32> @min_lt_v8i32(<8 x i32> %a, <8 x i32> %b) {
define <8 x i16> @min_lt_v8i16(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: min_lt_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pminsw %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: min_lt_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpminsw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp slt <8 x i16> %a, %b
@@ -1123,13 +1123,13 @@ define <8 x i16> @min_lt_v8i16(<8 x i16> %a, <8 x i16> %b) {
define <16 x i16> @min_lt_v16i16(<16 x i16> %a, <16 x i16> %b) {
; SSE-LABEL: min_lt_v16i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pminsw %xmm2, %xmm0
; SSE-NEXT: pminsw %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: min_lt_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminsw %xmm2, %xmm3, %xmm2
@@ -1138,12 +1138,12 @@ define <16 x i16> @min_lt_v16i16(<16 x i16> %a, <16 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: min_lt_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpminsw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: min_lt_v16i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpminsw %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp slt <16 x i16> %a, %b
@@ -1153,7 +1153,7 @@ define <16 x i16> @min_lt_v16i16(<16 x i16> %a, <16 x i16> %b) {
define <16 x i8> @min_lt_v16i8(<16 x i8> %a, <16 x i8> %b) {
; SSE2-LABEL: min_lt_v16i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: pcmpgtb %xmm0, %xmm2
; SSE2-NEXT: pand %xmm2, %xmm0
@@ -1162,17 +1162,17 @@ define <16 x i8> @min_lt_v16i8(<16 x i8> %a, <16 x i8> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_lt_v16i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pminsb %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_lt_v16i8:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pminsb %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: min_lt_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpminsb %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp slt <16 x i8> %a, %b
@@ -1182,7 +1182,7 @@ define <16 x i8> @min_lt_v16i8(<16 x i8> %a, <16 x i8> %b) {
define <32 x i8> @min_lt_v32i8(<32 x i8> %a, <32 x i8> %b) {
; SSE2-LABEL: min_lt_v32i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: pcmpgtb %xmm1, %xmm4
; SSE2-NEXT: movdqa %xmm2, %xmm5
@@ -1196,19 +1196,19 @@ define <32 x i8> @min_lt_v32i8(<32 x i8> %a, <32 x i8> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_lt_v32i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pminsb %xmm2, %xmm0
; SSE41-NEXT: pminsb %xmm3, %xmm1
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_lt_v32i8:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pminsb %xmm2, %xmm0
; SSE42-NEXT: pminsb %xmm3, %xmm1
; SSE42-NEXT: retq
;
; AVX1-LABEL: min_lt_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminsb %xmm2, %xmm3, %xmm2
@@ -1217,12 +1217,12 @@ define <32 x i8> @min_lt_v32i8(<32 x i8> %a, <32 x i8> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: min_lt_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpminsb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: min_lt_v32i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpminsb %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp slt <32 x i8> %a, %b
@@ -1236,7 +1236,7 @@ define <32 x i8> @min_lt_v32i8(<32 x i8> %a, <32 x i8> %b) {
define <2 x i64> @min_le_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: min_le_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -1258,7 +1258,7 @@ define <2 x i64> @min_le_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_le_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,0,2147483648,0]
; SSE41-NEXT: movdqa %xmm1, %xmm3
@@ -1279,7 +1279,7 @@ define <2 x i64> @min_le_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_le_v2i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa %xmm0, %xmm2
; SSE42-NEXT: pcmpgtq %xmm1, %xmm0
; SSE42-NEXT: pcmpeqd %xmm3, %xmm3
@@ -1289,7 +1289,7 @@ define <2 x i64> @min_le_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE42-NEXT: retq
;
; AVX1-LABEL: min_le_v2i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
@@ -1297,7 +1297,7 @@ define <2 x i64> @min_le_v2i64(<2 x i64> %a, <2 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: min_le_v2i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
; AVX2-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vpxor %xmm3, %xmm2, %xmm2
@@ -1305,7 +1305,7 @@ define <2 x i64> @min_le_v2i64(<2 x i64> %a, <2 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: min_le_v2i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512-NEXT: vpminsq %zmm1, %zmm0, %zmm0
@@ -1319,7 +1319,7 @@ define <2 x i64> @min_le_v2i64(<2 x i64> %a, <2 x i64> %b) {
define <4 x i64> @min_le_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE2-LABEL: min_le_v4i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [2147483648,0,2147483648,0]
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: pxor %xmm7, %xmm4
@@ -1359,7 +1359,7 @@ define <4 x i64> @min_le_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_le_v4i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm8
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,0,2147483648,0]
; SSE41-NEXT: movdqa %xmm3, %xmm5
@@ -1396,7 +1396,7 @@ define <4 x i64> @min_le_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_le_v4i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa %xmm0, %xmm4
; SSE42-NEXT: movdqa %xmm1, %xmm5
; SSE42-NEXT: pcmpgtq %xmm3, %xmm5
@@ -1412,7 +1412,7 @@ define <4 x i64> @min_le_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE42-NEXT: retq
;
; AVX1-LABEL: min_le_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -1425,7 +1425,7 @@ define <4 x i64> @min_le_v4i64(<4 x i64> %a, <4 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: min_le_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3
; AVX2-NEXT: vpxor %ymm3, %ymm2, %ymm2
@@ -1433,7 +1433,7 @@ define <4 x i64> @min_le_v4i64(<4 x i64> %a, <4 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: min_le_v4i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512-NEXT: vpminsq %zmm1, %zmm0, %zmm0
@@ -1446,7 +1446,7 @@ define <4 x i64> @min_le_v4i64(<4 x i64> %a, <4 x i64> %b) {
define <4 x i32> @min_le_v4i32(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: min_le_v4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
; SSE2-NEXT: pcmpeqd %xmm3, %xmm3
@@ -1458,17 +1458,17 @@ define <4 x i32> @min_le_v4i32(<4 x i32> %a, <4 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_le_v4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pminsd %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_le_v4i32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pminsd %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: min_le_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpminsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp sle <4 x i32> %a, %b
@@ -1478,7 +1478,7 @@ define <4 x i32> @min_le_v4i32(<4 x i32> %a, <4 x i32> %b) {
define <8 x i32> @min_le_v8i32(<8 x i32> %a, <8 x i32> %b) {
; SSE2-LABEL: min_le_v8i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm1, %xmm6
; SSE2-NEXT: pcmpgtd %xmm3, %xmm6
; SSE2-NEXT: pcmpeqd %xmm7, %xmm7
@@ -1498,19 +1498,19 @@ define <8 x i32> @min_le_v8i32(<8 x i32> %a, <8 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_le_v8i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pminsd %xmm2, %xmm0
; SSE41-NEXT: pminsd %xmm3, %xmm1
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_le_v8i32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pminsd %xmm2, %xmm0
; SSE42-NEXT: pminsd %xmm3, %xmm1
; SSE42-NEXT: retq
;
; AVX1-LABEL: min_le_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminsd %xmm2, %xmm3, %xmm2
@@ -1519,12 +1519,12 @@ define <8 x i32> @min_le_v8i32(<8 x i32> %a, <8 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: min_le_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpminsd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: min_le_v8i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpminsd %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp sle <8 x i32> %a, %b
@@ -1534,12 +1534,12 @@ define <8 x i32> @min_le_v8i32(<8 x i32> %a, <8 x i32> %b) {
define <8 x i16> @min_le_v8i16(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: min_le_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pminsw %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: min_le_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpminsw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp sle <8 x i16> %a, %b
@@ -1549,13 +1549,13 @@ define <8 x i16> @min_le_v8i16(<8 x i16> %a, <8 x i16> %b) {
define <16 x i16> @min_le_v16i16(<16 x i16> %a, <16 x i16> %b) {
; SSE-LABEL: min_le_v16i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pminsw %xmm2, %xmm0
; SSE-NEXT: pminsw %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: min_le_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminsw %xmm2, %xmm3, %xmm2
@@ -1564,12 +1564,12 @@ define <16 x i16> @min_le_v16i16(<16 x i16> %a, <16 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: min_le_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpminsw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: min_le_v16i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpminsw %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp sle <16 x i16> %a, %b
@@ -1579,7 +1579,7 @@ define <16 x i16> @min_le_v16i16(<16 x i16> %a, <16 x i16> %b) {
define <16 x i8> @min_le_v16i8(<16 x i8> %a, <16 x i8> %b) {
; SSE2-LABEL: min_le_v16i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pcmpgtb %xmm1, %xmm2
; SSE2-NEXT: pcmpeqd %xmm3, %xmm3
@@ -1591,17 +1591,17 @@ define <16 x i8> @min_le_v16i8(<16 x i8> %a, <16 x i8> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_le_v16i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pminsb %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_le_v16i8:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pminsb %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: min_le_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpminsb %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp sle <16 x i8> %a, %b
@@ -1611,7 +1611,7 @@ define <16 x i8> @min_le_v16i8(<16 x i8> %a, <16 x i8> %b) {
define <32 x i8> @min_le_v32i8(<32 x i8> %a, <32 x i8> %b) {
; SSE2-LABEL: min_le_v32i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm1, %xmm6
; SSE2-NEXT: pcmpgtb %xmm3, %xmm6
; SSE2-NEXT: pcmpeqd %xmm7, %xmm7
@@ -1631,19 +1631,19 @@ define <32 x i8> @min_le_v32i8(<32 x i8> %a, <32 x i8> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_le_v32i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pminsb %xmm2, %xmm0
; SSE41-NEXT: pminsb %xmm3, %xmm1
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_le_v32i8:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pminsb %xmm2, %xmm0
; SSE42-NEXT: pminsb %xmm3, %xmm1
; SSE42-NEXT: retq
;
; AVX1-LABEL: min_le_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminsb %xmm2, %xmm3, %xmm2
@@ -1652,12 +1652,12 @@ define <32 x i8> @min_le_v32i8(<32 x i8> %a, <32 x i8> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: min_le_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpminsb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: min_le_v32i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpminsb %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp sle <32 x i8> %a, %b
@@ -1671,12 +1671,12 @@ define <32 x i8> @min_le_v32i8(<32 x i8> %a, <32 x i8> %b) {
define <2 x i64> @max_gt_v2i64c() {
; SSE-LABEL: max_gt_v2i64c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [18446744073709551615,7]
; SSE-NEXT: retq
;
; AVX-LABEL: max_gt_v2i64c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [18446744073709551615,7]
; AVX-NEXT: retq
%1 = insertelement <2 x i64> <i64 -7, i64 7>, i64 -7, i32 0
@@ -1688,13 +1688,13 @@ define <2 x i64> @max_gt_v2i64c() {
define <4 x i64> @max_gt_v4i64c() {
; SSE-LABEL: max_gt_v4i64c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm1 = [7,7]
; SSE-NEXT: pcmpeqd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: max_gt_v4i64c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [18446744073709551615,18446744073709551615,7,7]
; AVX-NEXT: retq
%1 = insertelement <4 x i64> <i64 -7, i64 -1, i64 1, i64 7>, i64 -7, i32 0
@@ -1706,12 +1706,12 @@ define <4 x i64> @max_gt_v4i64c() {
define <4 x i32> @max_gt_v4i32c() {
; SSE-LABEL: max_gt_v4i32c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967295,4294967295,7,7]
; SSE-NEXT: retq
;
; AVX-LABEL: max_gt_v4i32c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [4294967295,4294967295,7,7]
; AVX-NEXT: retq
%1 = insertelement <4 x i32> <i32 -7, i32 -1, i32 1, i32 7>, i32 -7, i32 0
@@ -1723,13 +1723,13 @@ define <4 x i32> @max_gt_v4i32c() {
define <8 x i32> @max_gt_v8i32c() {
; SSE-LABEL: max_gt_v8i32c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967295,4294967293,4294967293,4294967295]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [7,5,5,7]
; SSE-NEXT: retq
;
; AVX-LABEL: max_gt_v8i32c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [4294967295,4294967293,4294967293,4294967295,7,5,5,7]
; AVX-NEXT: retq
%1 = insertelement <8 x i32> <i32 -7, i32 -5, i32 -3, i32 -1, i32 1, i32 3, i32 5, i32 7>, i32 -7, i32 0
@@ -1741,12 +1741,12 @@ define <8 x i32> @max_gt_v8i32c() {
define <8 x i16> @max_gt_v8i16c() {
; SSE-LABEL: max_gt_v8i16c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [65535,65533,65533,65535,7,5,5,7]
; SSE-NEXT: retq
;
; AVX-LABEL: max_gt_v8i16c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [65535,65533,65533,65535,7,5,5,7]
; AVX-NEXT: retq
%1 = insertelement <8 x i16> <i16 -7, i16 -5, i16 -3, i16 -1, i16 1, i16 3, i16 5, i16 7>, i16 -7, i32 0
@@ -1758,13 +1758,13 @@ define <8 x i16> @max_gt_v8i16c() {
define <16 x i16> @max_gt_v16i16c() {
; SSE-LABEL: max_gt_v16i16c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [65535,65534,65533,65532,65533,65534,65535,0]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [7,6,5,4,5,6,7,8]
; SSE-NEXT: retq
;
; AVX-LABEL: max_gt_v16i16c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [65535,65534,65533,65532,65533,65534,65535,0,7,6,5,4,5,6,7,8]
; AVX-NEXT: retq
%1 = insertelement <16 x i16> <i16 -7, i16 -6, i16 -5, i16 -4, i16 -3, i16 -2, i16 -1, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>, i16 -7, i32 0
@@ -1776,12 +1776,12 @@ define <16 x i16> @max_gt_v16i16c() {
define <16 x i8> @max_gt_v16i8c() {
; SSE-LABEL: max_gt_v16i8c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [255,254,253,252,253,254,255,0,7,6,5,4,5,6,7,8]
; SSE-NEXT: retq
;
; AVX-LABEL: max_gt_v16i8c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [255,254,253,252,253,254,255,0,7,6,5,4,5,6,7,8]
; AVX-NEXT: retq
%1 = insertelement <16 x i8> <i8 -7, i8 -6, i8 -5, i8 -4, i8 -3, i8 -2, i8 -1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8>, i8 -7, i32 0
@@ -1793,12 +1793,12 @@ define <16 x i8> @max_gt_v16i8c() {
define <2 x i64> @max_ge_v2i64c() {
; SSE-LABEL: max_ge_v2i64c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [18446744073709551615,7]
; SSE-NEXT: retq
;
; AVX-LABEL: max_ge_v2i64c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [18446744073709551615,7]
; AVX-NEXT: retq
%1 = insertelement <2 x i64> <i64 -7, i64 7>, i64 -7, i32 0
@@ -1810,13 +1810,13 @@ define <2 x i64> @max_ge_v2i64c() {
define <4 x i64> @max_ge_v4i64c() {
; SSE-LABEL: max_ge_v4i64c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm1 = [7,7]
; SSE-NEXT: pcmpeqd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: max_ge_v4i64c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [18446744073709551615,18446744073709551615,7,7]
; AVX-NEXT: retq
%1 = insertelement <4 x i64> <i64 -7, i64 -1, i64 1, i64 7>, i64 -7, i32 0
@@ -1828,12 +1828,12 @@ define <4 x i64> @max_ge_v4i64c() {
define <4 x i32> @max_ge_v4i32c() {
; SSE-LABEL: max_ge_v4i32c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967295,4294967295,7,7]
; SSE-NEXT: retq
;
; AVX-LABEL: max_ge_v4i32c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [4294967295,4294967295,7,7]
; AVX-NEXT: retq
%1 = insertelement <4 x i32> <i32 -7, i32 -1, i32 1, i32 7>, i32 -7, i32 0
@@ -1845,13 +1845,13 @@ define <4 x i32> @max_ge_v4i32c() {
define <8 x i32> @max_ge_v8i32c() {
; SSE-LABEL: max_ge_v8i32c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967295,4294967293,4294967293,4294967295]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [7,5,5,7]
; SSE-NEXT: retq
;
; AVX-LABEL: max_ge_v8i32c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [4294967295,4294967293,4294967293,4294967295,7,5,5,7]
; AVX-NEXT: retq
%1 = insertelement <8 x i32> <i32 -7, i32 -5, i32 -3, i32 -1, i32 1, i32 3, i32 5, i32 7>, i32 -7, i32 0
@@ -1863,12 +1863,12 @@ define <8 x i32> @max_ge_v8i32c() {
define <8 x i16> @max_ge_v8i16c() {
; SSE-LABEL: max_ge_v8i16c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [65535,65533,65533,65535,7,5,5,7]
; SSE-NEXT: retq
;
; AVX-LABEL: max_ge_v8i16c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [65535,65533,65533,65535,7,5,5,7]
; AVX-NEXT: retq
%1 = insertelement <8 x i16> <i16 -7, i16 -5, i16 -3, i16 -1, i16 1, i16 3, i16 5, i16 7>, i16 -7, i32 0
@@ -1880,13 +1880,13 @@ define <8 x i16> @max_ge_v8i16c() {
define <16 x i16> @max_ge_v16i16c() {
; SSE-LABEL: max_ge_v16i16c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [65535,65534,65533,65532,65533,65534,65535,0]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [7,6,5,4,5,6,7,8]
; SSE-NEXT: retq
;
; AVX-LABEL: max_ge_v16i16c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [65535,65534,65533,65532,65533,65534,65535,0,7,6,5,4,5,6,7,8]
; AVX-NEXT: retq
%1 = insertelement <16 x i16> <i16 -7, i16 -6, i16 -5, i16 -4, i16 -3, i16 -2, i16 -1, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>, i16 -7, i32 0
@@ -1898,12 +1898,12 @@ define <16 x i16> @max_ge_v16i16c() {
define <16 x i8> @max_ge_v16i8c() {
; SSE-LABEL: max_ge_v16i8c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [255,254,253,252,253,254,255,0,7,6,5,4,5,6,7,8]
; SSE-NEXT: retq
;
; AVX-LABEL: max_ge_v16i8c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [255,254,253,252,253,254,255,0,7,6,5,4,5,6,7,8]
; AVX-NEXT: retq
%1 = insertelement <16 x i8> <i8 -7, i8 -6, i8 -5, i8 -4, i8 -3, i8 -2, i8 -1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8>, i8 -7, i32 0
@@ -1915,12 +1915,12 @@ define <16 x i8> @max_ge_v16i8c() {
define <2 x i64> @min_lt_v2i64c() {
; SSE-LABEL: min_lt_v2i64c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [18446744073709551609,1]
; SSE-NEXT: retq
;
; AVX-LABEL: min_lt_v2i64c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [18446744073709551609,1]
; AVX-NEXT: retq
%1 = insertelement <2 x i64> <i64 -7, i64 7>, i64 -7, i32 0
@@ -1932,13 +1932,13 @@ define <2 x i64> @min_lt_v2i64c() {
define <4 x i64> @min_lt_v4i64c() {
; SSE-LABEL: min_lt_v4i64c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [18446744073709551609,18446744073709551609]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [1,1]
; SSE-NEXT: retq
;
; AVX-LABEL: min_lt_v4i64c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [18446744073709551609,18446744073709551609,1,1]
; AVX-NEXT: retq
%1 = insertelement <4 x i64> <i64 -7, i64 -1, i64 1, i64 7>, i64 -7, i32 0
@@ -1950,12 +1950,12 @@ define <4 x i64> @min_lt_v4i64c() {
define <4 x i32> @min_lt_v4i32c() {
; SSE-LABEL: min_lt_v4i32c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967289,4294967289,1,1]
; SSE-NEXT: retq
;
; AVX-LABEL: min_lt_v4i32c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [4294967289,4294967289,1,1]
; AVX-NEXT: retq
%1 = insertelement <4 x i32> <i32 -7, i32 -1, i32 1, i32 7>, i32 -7, i32 0
@@ -1967,13 +1967,13 @@ define <4 x i32> @min_lt_v4i32c() {
define <8 x i32> @min_lt_v8i32c() {
; SSE-LABEL: min_lt_v8i32c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967289,4294967291,4294967291,4294967289]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [1,3,3,1]
; SSE-NEXT: retq
;
; AVX-LABEL: min_lt_v8i32c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [4294967289,4294967291,4294967291,4294967289,1,3,3,1]
; AVX-NEXT: retq
%1 = insertelement <8 x i32> <i32 -7, i32 -5, i32 -3, i32 -1, i32 1, i32 3, i32 5, i32 7>, i32 -7, i32 0
@@ -1985,12 +1985,12 @@ define <8 x i32> @min_lt_v8i32c() {
define <8 x i16> @min_lt_v8i16c() {
; SSE-LABEL: min_lt_v8i16c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [65529,65531,65531,65529,1,3,3,1]
; SSE-NEXT: retq
;
; AVX-LABEL: min_lt_v8i16c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [65529,65531,65531,65529,1,3,3,1]
; AVX-NEXT: retq
%1 = insertelement <8 x i16> <i16 -7, i16 -5, i16 -3, i16 -1, i16 1, i16 3, i16 5, i16 7>, i16 -7, i32 0
@@ -2002,13 +2002,13 @@ define <8 x i16> @min_lt_v8i16c() {
define <16 x i16> @min_lt_v16i16c() {
; SSE-LABEL: min_lt_v16i16c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [65529,65530,65531,65532,65531,65530,65529,0]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [1,2,3,4,3,2,1,0]
; SSE-NEXT: retq
;
; AVX-LABEL: min_lt_v16i16c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [65529,65530,65531,65532,65531,65530,65529,0,1,2,3,4,3,2,1,0]
; AVX-NEXT: retq
%1 = insertelement <16 x i16> <i16 -7, i16 -6, i16 -5, i16 -4, i16 -3, i16 -2, i16 -1, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>, i16 -7, i32 0
@@ -2020,12 +2020,12 @@ define <16 x i16> @min_lt_v16i16c() {
define <16 x i8> @min_lt_v16i8c() {
; SSE-LABEL: min_lt_v16i8c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [249,250,251,252,251,250,249,0,1,2,3,4,3,2,1,0]
; SSE-NEXT: retq
;
; AVX-LABEL: min_lt_v16i8c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [249,250,251,252,251,250,249,0,1,2,3,4,3,2,1,0]
; AVX-NEXT: retq
%1 = insertelement <16 x i8> <i8 -7, i8 -6, i8 -5, i8 -4, i8 -3, i8 -2, i8 -1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8>, i8 -7, i32 0
@@ -2037,12 +2037,12 @@ define <16 x i8> @min_lt_v16i8c() {
define <2 x i64> @min_le_v2i64c() {
; SSE-LABEL: min_le_v2i64c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [18446744073709551609,1]
; SSE-NEXT: retq
;
; AVX-LABEL: min_le_v2i64c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [18446744073709551609,1]
; AVX-NEXT: retq
%1 = insertelement <2 x i64> <i64 -7, i64 7>, i64 -7, i32 0
@@ -2054,13 +2054,13 @@ define <2 x i64> @min_le_v2i64c() {
define <4 x i64> @min_le_v4i64c() {
; SSE-LABEL: min_le_v4i64c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [18446744073709551609,18446744073709551609]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [1,1]
; SSE-NEXT: retq
;
; AVX-LABEL: min_le_v4i64c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [18446744073709551609,18446744073709551609,1,1]
; AVX-NEXT: retq
%1 = insertelement <4 x i64> <i64 -7, i64 -1, i64 1, i64 7>, i64 -7, i32 0
@@ -2072,12 +2072,12 @@ define <4 x i64> @min_le_v4i64c() {
define <4 x i32> @min_le_v4i32c() {
; SSE-LABEL: min_le_v4i32c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967289,4294967289,1,1]
; SSE-NEXT: retq
;
; AVX-LABEL: min_le_v4i32c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [4294967289,4294967289,1,1]
; AVX-NEXT: retq
%1 = insertelement <4 x i32> <i32 -7, i32 -1, i32 1, i32 7>, i32 -7, i32 0
@@ -2089,13 +2089,13 @@ define <4 x i32> @min_le_v4i32c() {
define <8 x i32> @min_le_v8i32c() {
; SSE-LABEL: min_le_v8i32c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967289,4294967291,4294967291,4294967289]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [1,3,3,1]
; SSE-NEXT: retq
;
; AVX-LABEL: min_le_v8i32c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [4294967289,4294967291,4294967291,4294967289,1,3,3,1]
; AVX-NEXT: retq
%1 = insertelement <8 x i32> <i32 -7, i32 -5, i32 -3, i32 -1, i32 1, i32 3, i32 5, i32 7>, i32 -7, i32 0
@@ -2107,12 +2107,12 @@ define <8 x i32> @min_le_v8i32c() {
define <8 x i16> @min_le_v8i16c() {
; SSE-LABEL: min_le_v8i16c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [65529,65531,65531,65529,1,3,3,1]
; SSE-NEXT: retq
;
; AVX-LABEL: min_le_v8i16c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [65529,65531,65531,65529,1,3,3,1]
; AVX-NEXT: retq
%1 = insertelement <8 x i16> <i16 -7, i16 -5, i16 -3, i16 -1, i16 1, i16 3, i16 5, i16 7>, i16 -7, i32 0
@@ -2124,13 +2124,13 @@ define <8 x i16> @min_le_v8i16c() {
define <16 x i16> @min_le_v16i16c() {
; SSE-LABEL: min_le_v16i16c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [65529,65530,65531,65532,65531,65530,65529,0]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [1,2,3,4,3,2,1,0]
; SSE-NEXT: retq
;
; AVX-LABEL: min_le_v16i16c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [65529,65530,65531,65532,65531,65530,65529,0,1,2,3,4,3,2,1,0]
; AVX-NEXT: retq
%1 = insertelement <16 x i16> <i16 -7, i16 -6, i16 -5, i16 -4, i16 -3, i16 -2, i16 -1, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>, i16 -7, i32 0
@@ -2142,12 +2142,12 @@ define <16 x i16> @min_le_v16i16c() {
define <16 x i8> @min_le_v16i8c() {
; SSE-LABEL: min_le_v16i8c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [249,250,251,252,251,250,249,0,1,2,3,4,3,2,1,0]
; SSE-NEXT: retq
;
; AVX-LABEL: min_le_v16i8c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [249,250,251,252,251,250,249,0,1,2,3,4,3,2,1,0]
; AVX-NEXT: retq
%1 = insertelement <16 x i8> <i8 -7, i8 -6, i8 -5, i8 -4, i8 -3, i8 -2, i8 -1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8>, i8 -7, i32 0
diff --git a/test/CodeGen/X86/vec_minmax_uint.ll b/test/CodeGen/X86/vec_minmax_uint.ll
index 9782384ebe1..cf764a2f346 100644
--- a/test/CodeGen/X86/vec_minmax_uint.ll
+++ b/test/CodeGen/X86/vec_minmax_uint.ll
@@ -13,7 +13,7 @@
define <2 x i64> @max_gt_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: max_gt_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -32,7 +32,7 @@ define <2 x i64> @max_gt_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_gt_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648,2147483648,2147483648]
; SSE41-NEXT: movdqa %xmm1, %xmm3
@@ -51,7 +51,7 @@ define <2 x i64> @max_gt_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_gt_v2i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa %xmm0, %xmm2
; SSE42-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
; SSE42-NEXT: movdqa %xmm1, %xmm3
@@ -63,7 +63,7 @@ define <2 x i64> @max_gt_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE42-NEXT: retq
;
; AVX1-LABEL: max_gt_v2i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm3
; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm2
@@ -72,7 +72,7 @@ define <2 x i64> @max_gt_v2i64(<2 x i64> %a, <2 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: max_gt_v2i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm3
; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm2
@@ -81,7 +81,7 @@ define <2 x i64> @max_gt_v2i64(<2 x i64> %a, <2 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: max_gt_v2i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
@@ -95,7 +95,7 @@ define <2 x i64> @max_gt_v2i64(<2 x i64> %a, <2 x i64> %b) {
define <4 x i64> @max_gt_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE2-LABEL: max_gt_v4i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm3, %xmm5
; SSE2-NEXT: pxor %xmm4, %xmm5
@@ -129,7 +129,7 @@ define <4 x i64> @max_gt_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_gt_v4i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm8
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648,2147483648,2147483648]
; SSE41-NEXT: movdqa %xmm3, %xmm5
@@ -163,7 +163,7 @@ define <4 x i64> @max_gt_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_gt_v4i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa %xmm0, %xmm4
; SSE42-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
; SSE42-NEXT: movdqa %xmm3, %xmm6
@@ -183,7 +183,7 @@ define <4 x i64> @max_gt_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE42-NEXT: retq
;
; AVX1-LABEL: max_gt_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
@@ -198,7 +198,7 @@ define <4 x i64> @max_gt_v4i64(<4 x i64> %a, <4 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: max_gt_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2
; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm3
; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm2
@@ -207,7 +207,7 @@ define <4 x i64> @max_gt_v4i64(<4 x i64> %a, <4 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: max_gt_v4i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
@@ -220,7 +220,7 @@ define <4 x i64> @max_gt_v4i64(<4 x i64> %a, <4 x i64> %b) {
define <4 x i32> @max_gt_v4i32(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: max_gt_v4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -232,17 +232,17 @@ define <4 x i32> @max_gt_v4i32(<4 x i32> %a, <4 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_gt_v4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmaxud %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_gt_v4i32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pmaxud %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: max_gt_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp ugt <4 x i32> %a, %b
@@ -252,7 +252,7 @@ define <4 x i32> @max_gt_v4i32(<4 x i32> %a, <4 x i32> %b) {
define <8 x i32> @max_gt_v8i32(<8 x i32> %a, <8 x i32> %b) {
; SSE2-LABEL: max_gt_v8i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm3, %xmm6
; SSE2-NEXT: pxor %xmm5, %xmm6
@@ -273,19 +273,19 @@ define <8 x i32> @max_gt_v8i32(<8 x i32> %a, <8 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_gt_v8i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmaxud %xmm2, %xmm0
; SSE41-NEXT: pmaxud %xmm3, %xmm1
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_gt_v8i32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pmaxud %xmm2, %xmm0
; SSE42-NEXT: pmaxud %xmm3, %xmm1
; SSE42-NEXT: retq
;
; AVX1-LABEL: max_gt_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxud %xmm2, %xmm3, %xmm2
@@ -294,12 +294,12 @@ define <8 x i32> @max_gt_v8i32(<8 x i32> %a, <8 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: max_gt_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: max_gt_v8i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp ugt <8 x i32> %a, %b
@@ -309,7 +309,7 @@ define <8 x i32> @max_gt_v8i32(<8 x i32> %a, <8 x i32> %b) {
define <8 x i16> @max_gt_v8i16(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: max_gt_v8i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -321,17 +321,17 @@ define <8 x i16> @max_gt_v8i16(<8 x i16> %a, <8 x i16> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_gt_v8i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmaxuw %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_gt_v8i16:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pmaxuw %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: max_gt_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp ugt <8 x i16> %a, %b
@@ -341,7 +341,7 @@ define <8 x i16> @max_gt_v8i16(<8 x i16> %a, <8 x i16> %b) {
define <16 x i16> @max_gt_v16i16(<16 x i16> %a, <16 x i16> %b) {
; SSE2-LABEL: max_gt_v16i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [32768,32768,32768,32768,32768,32768,32768,32768]
; SSE2-NEXT: movdqa %xmm3, %xmm6
; SSE2-NEXT: pxor %xmm5, %xmm6
@@ -362,19 +362,19 @@ define <16 x i16> @max_gt_v16i16(<16 x i16> %a, <16 x i16> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_gt_v16i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmaxuw %xmm2, %xmm0
; SSE41-NEXT: pmaxuw %xmm3, %xmm1
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_gt_v16i16:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pmaxuw %xmm2, %xmm0
; SSE42-NEXT: pmaxuw %xmm3, %xmm1
; SSE42-NEXT: retq
;
; AVX1-LABEL: max_gt_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxuw %xmm2, %xmm3, %xmm2
@@ -383,12 +383,12 @@ define <16 x i16> @max_gt_v16i16(<16 x i16> %a, <16 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: max_gt_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: max_gt_v16i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp ugt <16 x i16> %a, %b
@@ -398,12 +398,12 @@ define <16 x i16> @max_gt_v16i16(<16 x i16> %a, <16 x i16> %b) {
define <16 x i8> @max_gt_v16i8(<16 x i8> %a, <16 x i8> %b) {
; SSE-LABEL: max_gt_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmaxub %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: max_gt_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp ugt <16 x i8> %a, %b
@@ -413,13 +413,13 @@ define <16 x i8> @max_gt_v16i8(<16 x i8> %a, <16 x i8> %b) {
define <32 x i8> @max_gt_v32i8(<32 x i8> %a, <32 x i8> %b) {
; SSE-LABEL: max_gt_v32i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmaxub %xmm2, %xmm0
; SSE-NEXT: pmaxub %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: max_gt_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxub %xmm2, %xmm3, %xmm2
@@ -428,12 +428,12 @@ define <32 x i8> @max_gt_v32i8(<32 x i8> %a, <32 x i8> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: max_gt_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: max_gt_v32i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp ugt <32 x i8> %a, %b
@@ -447,7 +447,7 @@ define <32 x i8> @max_gt_v32i8(<32 x i8> %a, <32 x i8> %b) {
define <2 x i64> @max_ge_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: max_ge_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -469,7 +469,7 @@ define <2 x i64> @max_ge_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_ge_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648,2147483648,2147483648]
; SSE41-NEXT: movdqa %xmm2, %xmm3
@@ -490,7 +490,7 @@ define <2 x i64> @max_ge_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_ge_v2i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa %xmm0, %xmm2
; SSE42-NEXT: movdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
; SSE42-NEXT: pxor %xmm3, %xmm0
@@ -503,7 +503,7 @@ define <2 x i64> @max_ge_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE42-NEXT: retq
;
; AVX1-LABEL: max_ge_v2i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm3
; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm2
@@ -514,7 +514,7 @@ define <2 x i64> @max_ge_v2i64(<2 x i64> %a, <2 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: max_ge_v2i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm3
; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm2
@@ -525,7 +525,7 @@ define <2 x i64> @max_ge_v2i64(<2 x i64> %a, <2 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: max_ge_v2i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
@@ -539,7 +539,7 @@ define <2 x i64> @max_ge_v2i64(<2 x i64> %a, <2 x i64> %b) {
define <4 x i64> @max_ge_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE2-LABEL: max_ge_v4i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pxor %xmm7, %xmm4
@@ -579,7 +579,7 @@ define <4 x i64> @max_ge_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_ge_v4i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm8
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648,2147483648,2147483648]
; SSE41-NEXT: movdqa %xmm1, %xmm5
@@ -616,7 +616,7 @@ define <4 x i64> @max_ge_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_ge_v4i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa %xmm0, %xmm4
; SSE42-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
; SSE42-NEXT: movdqa %xmm1, %xmm6
@@ -639,7 +639,7 @@ define <4 x i64> @max_ge_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE42-NEXT: retq
;
; AVX1-LABEL: max_ge_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
@@ -657,7 +657,7 @@ define <4 x i64> @max_ge_v4i64(<4 x i64> %a, <4 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: max_ge_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2
; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm3
; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm2
@@ -668,7 +668,7 @@ define <4 x i64> @max_ge_v4i64(<4 x i64> %a, <4 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: max_ge_v4i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
@@ -681,7 +681,7 @@ define <4 x i64> @max_ge_v4i64(<4 x i64> %a, <4 x i64> %b) {
define <4 x i32> @max_ge_v4i32(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: max_ge_v4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pxor %xmm3, %xmm2
@@ -696,17 +696,17 @@ define <4 x i32> @max_ge_v4i32(<4 x i32> %a, <4 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_ge_v4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmaxud %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_ge_v4i32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pmaxud %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: max_ge_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp uge <4 x i32> %a, %b
@@ -716,7 +716,7 @@ define <4 x i32> @max_ge_v4i32(<4 x i32> %a, <4 x i32> %b) {
define <8 x i32> @max_ge_v8i32(<8 x i32> %a, <8 x i32> %b) {
; SSE2-LABEL: max_ge_v8i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pxor %xmm6, %xmm4
@@ -742,19 +742,19 @@ define <8 x i32> @max_ge_v8i32(<8 x i32> %a, <8 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_ge_v8i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmaxud %xmm2, %xmm0
; SSE41-NEXT: pmaxud %xmm3, %xmm1
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_ge_v8i32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pmaxud %xmm2, %xmm0
; SSE42-NEXT: pmaxud %xmm3, %xmm1
; SSE42-NEXT: retq
;
; AVX1-LABEL: max_ge_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxud %xmm2, %xmm3, %xmm2
@@ -763,12 +763,12 @@ define <8 x i32> @max_ge_v8i32(<8 x i32> %a, <8 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: max_ge_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: max_ge_v8i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp uge <8 x i32> %a, %b
@@ -778,7 +778,7 @@ define <8 x i32> @max_ge_v8i32(<8 x i32> %a, <8 x i32> %b) {
define <8 x i16> @max_ge_v8i16(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: max_ge_v8i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: psubusw %xmm0, %xmm2
; SSE2-NEXT: pxor %xmm3, %xmm3
@@ -789,17 +789,17 @@ define <8 x i16> @max_ge_v8i16(<8 x i16> %a, <8 x i16> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_ge_v8i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmaxuw %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_ge_v8i16:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pmaxuw %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: max_ge_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp uge <8 x i16> %a, %b
@@ -809,7 +809,7 @@ define <8 x i16> @max_ge_v8i16(<8 x i16> %a, <8 x i16> %b) {
define <16 x i16> @max_ge_v16i16(<16 x i16> %a, <16 x i16> %b) {
; SSE2-LABEL: max_ge_v16i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: psubusw %xmm1, %xmm4
; SSE2-NEXT: pxor %xmm5, %xmm5
@@ -826,19 +826,19 @@ define <16 x i16> @max_ge_v16i16(<16 x i16> %a, <16 x i16> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_ge_v16i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmaxuw %xmm2, %xmm0
; SSE41-NEXT: pmaxuw %xmm3, %xmm1
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_ge_v16i16:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pmaxuw %xmm2, %xmm0
; SSE42-NEXT: pmaxuw %xmm3, %xmm1
; SSE42-NEXT: retq
;
; AVX1-LABEL: max_ge_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxuw %xmm2, %xmm3, %xmm2
@@ -847,12 +847,12 @@ define <16 x i16> @max_ge_v16i16(<16 x i16> %a, <16 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: max_ge_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: max_ge_v16i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp uge <16 x i16> %a, %b
@@ -862,12 +862,12 @@ define <16 x i16> @max_ge_v16i16(<16 x i16> %a, <16 x i16> %b) {
define <16 x i8> @max_ge_v16i8(<16 x i8> %a, <16 x i8> %b) {
; SSE-LABEL: max_ge_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmaxub %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: max_ge_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp uge <16 x i8> %a, %b
@@ -877,13 +877,13 @@ define <16 x i8> @max_ge_v16i8(<16 x i8> %a, <16 x i8> %b) {
define <32 x i8> @max_ge_v32i8(<32 x i8> %a, <32 x i8> %b) {
; SSE-LABEL: max_ge_v32i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmaxub %xmm2, %xmm0
; SSE-NEXT: pmaxub %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: max_ge_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxub %xmm2, %xmm3, %xmm2
@@ -892,12 +892,12 @@ define <32 x i8> @max_ge_v32i8(<32 x i8> %a, <32 x i8> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: max_ge_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: max_ge_v32i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp uge <32 x i8> %a, %b
@@ -911,7 +911,7 @@ define <32 x i8> @max_ge_v32i8(<32 x i8> %a, <32 x i8> %b) {
define <2 x i64> @min_lt_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: min_lt_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -930,7 +930,7 @@ define <2 x i64> @min_lt_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_lt_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648,2147483648,2147483648]
; SSE41-NEXT: movdqa %xmm2, %xmm3
@@ -949,7 +949,7 @@ define <2 x i64> @min_lt_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_lt_v2i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa %xmm0, %xmm2
; SSE42-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
; SSE42-NEXT: movdqa %xmm2, %xmm3
@@ -961,7 +961,7 @@ define <2 x i64> @min_lt_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE42-NEXT: retq
;
; AVX1-LABEL: min_lt_v2i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm3
; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm2
@@ -970,7 +970,7 @@ define <2 x i64> @min_lt_v2i64(<2 x i64> %a, <2 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: min_lt_v2i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm3
; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm2
@@ -979,7 +979,7 @@ define <2 x i64> @min_lt_v2i64(<2 x i64> %a, <2 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: min_lt_v2i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512-NEXT: vpminuq %zmm1, %zmm0, %zmm0
@@ -993,7 +993,7 @@ define <2 x i64> @min_lt_v2i64(<2 x i64> %a, <2 x i64> %b) {
define <4 x i64> @min_lt_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE2-LABEL: min_lt_v4i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm1, %xmm5
; SSE2-NEXT: pxor %xmm4, %xmm5
@@ -1027,7 +1027,7 @@ define <4 x i64> @min_lt_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_lt_v4i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm8
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648,2147483648,2147483648]
; SSE41-NEXT: movdqa %xmm1, %xmm5
@@ -1061,7 +1061,7 @@ define <4 x i64> @min_lt_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_lt_v4i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa %xmm0, %xmm4
; SSE42-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
; SSE42-NEXT: movdqa %xmm1, %xmm6
@@ -1081,7 +1081,7 @@ define <4 x i64> @min_lt_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE42-NEXT: retq
;
; AVX1-LABEL: min_lt_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
@@ -1096,7 +1096,7 @@ define <4 x i64> @min_lt_v4i64(<4 x i64> %a, <4 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: min_lt_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2
; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm3
; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm2
@@ -1105,7 +1105,7 @@ define <4 x i64> @min_lt_v4i64(<4 x i64> %a, <4 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: min_lt_v4i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512-NEXT: vpminuq %zmm1, %zmm0, %zmm0
@@ -1118,7 +1118,7 @@ define <4 x i64> @min_lt_v4i64(<4 x i64> %a, <4 x i64> %b) {
define <4 x i32> @min_lt_v4i32(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: min_lt_v4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -1130,17 +1130,17 @@ define <4 x i32> @min_lt_v4i32(<4 x i32> %a, <4 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_lt_v4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pminud %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_lt_v4i32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pminud %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: min_lt_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpminud %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp ult <4 x i32> %a, %b
@@ -1150,7 +1150,7 @@ define <4 x i32> @min_lt_v4i32(<4 x i32> %a, <4 x i32> %b) {
define <8 x i32> @min_lt_v8i32(<8 x i32> %a, <8 x i32> %b) {
; SSE2-LABEL: min_lt_v8i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm1, %xmm5
; SSE2-NEXT: pxor %xmm4, %xmm5
@@ -1170,19 +1170,19 @@ define <8 x i32> @min_lt_v8i32(<8 x i32> %a, <8 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_lt_v8i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pminud %xmm2, %xmm0
; SSE41-NEXT: pminud %xmm3, %xmm1
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_lt_v8i32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pminud %xmm2, %xmm0
; SSE42-NEXT: pminud %xmm3, %xmm1
; SSE42-NEXT: retq
;
; AVX1-LABEL: min_lt_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminud %xmm2, %xmm3, %xmm2
@@ -1191,12 +1191,12 @@ define <8 x i32> @min_lt_v8i32(<8 x i32> %a, <8 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: min_lt_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: min_lt_v8i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpminud %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp ult <8 x i32> %a, %b
@@ -1206,7 +1206,7 @@ define <8 x i32> @min_lt_v8i32(<8 x i32> %a, <8 x i32> %b) {
define <8 x i16> @min_lt_v8i16(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: min_lt_v8i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -1218,17 +1218,17 @@ define <8 x i16> @min_lt_v8i16(<8 x i16> %a, <8 x i16> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_lt_v8i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pminuw %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_lt_v8i16:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pminuw %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: min_lt_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpminuw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp ult <8 x i16> %a, %b
@@ -1238,7 +1238,7 @@ define <8 x i16> @min_lt_v8i16(<8 x i16> %a, <8 x i16> %b) {
define <16 x i16> @min_lt_v16i16(<16 x i16> %a, <16 x i16> %b) {
; SSE2-LABEL: min_lt_v16i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [32768,32768,32768,32768,32768,32768,32768,32768]
; SSE2-NEXT: movdqa %xmm1, %xmm5
; SSE2-NEXT: pxor %xmm4, %xmm5
@@ -1258,19 +1258,19 @@ define <16 x i16> @min_lt_v16i16(<16 x i16> %a, <16 x i16> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_lt_v16i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pminuw %xmm2, %xmm0
; SSE41-NEXT: pminuw %xmm3, %xmm1
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_lt_v16i16:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pminuw %xmm2, %xmm0
; SSE42-NEXT: pminuw %xmm3, %xmm1
; SSE42-NEXT: retq
;
; AVX1-LABEL: min_lt_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminuw %xmm2, %xmm3, %xmm2
@@ -1279,12 +1279,12 @@ define <16 x i16> @min_lt_v16i16(<16 x i16> %a, <16 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: min_lt_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpminuw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: min_lt_v16i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpminuw %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp ult <16 x i16> %a, %b
@@ -1294,12 +1294,12 @@ define <16 x i16> @min_lt_v16i16(<16 x i16> %a, <16 x i16> %b) {
define <16 x i8> @min_lt_v16i8(<16 x i8> %a, <16 x i8> %b) {
; SSE-LABEL: min_lt_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pminub %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: min_lt_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp ult <16 x i8> %a, %b
@@ -1309,13 +1309,13 @@ define <16 x i8> @min_lt_v16i8(<16 x i8> %a, <16 x i8> %b) {
define <32 x i8> @min_lt_v32i8(<32 x i8> %a, <32 x i8> %b) {
; SSE-LABEL: min_lt_v32i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pminub %xmm2, %xmm0
; SSE-NEXT: pminub %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: min_lt_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminub %xmm2, %xmm3, %xmm2
@@ -1324,12 +1324,12 @@ define <32 x i8> @min_lt_v32i8(<32 x i8> %a, <32 x i8> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: min_lt_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpminub %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: min_lt_v32i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpminub %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp ult <32 x i8> %a, %b
@@ -1343,7 +1343,7 @@ define <32 x i8> @min_lt_v32i8(<32 x i8> %a, <32 x i8> %b) {
define <2 x i64> @min_le_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: min_le_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -1365,7 +1365,7 @@ define <2 x i64> @min_le_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_le_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648,2147483648,2147483648]
; SSE41-NEXT: movdqa %xmm1, %xmm3
@@ -1386,7 +1386,7 @@ define <2 x i64> @min_le_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_le_v2i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa %xmm0, %xmm2
; SSE42-NEXT: movdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
; SSE42-NEXT: movdqa %xmm1, %xmm0
@@ -1400,7 +1400,7 @@ define <2 x i64> @min_le_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE42-NEXT: retq
;
; AVX1-LABEL: min_le_v2i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm3
; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm2
@@ -1411,7 +1411,7 @@ define <2 x i64> @min_le_v2i64(<2 x i64> %a, <2 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: min_le_v2i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm3
; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm2
@@ -1422,7 +1422,7 @@ define <2 x i64> @min_le_v2i64(<2 x i64> %a, <2 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: min_le_v2i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512-NEXT: vpminuq %zmm1, %zmm0, %zmm0
@@ -1436,7 +1436,7 @@ define <2 x i64> @min_le_v2i64(<2 x i64> %a, <2 x i64> %b) {
define <4 x i64> @min_le_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE2-LABEL: min_le_v4i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: pxor %xmm7, %xmm4
@@ -1476,7 +1476,7 @@ define <4 x i64> @min_le_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_le_v4i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm8
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648,2147483648,2147483648]
; SSE41-NEXT: movdqa %xmm3, %xmm5
@@ -1513,7 +1513,7 @@ define <4 x i64> @min_le_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_le_v4i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa %xmm0, %xmm4
; SSE42-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
; SSE42-NEXT: movdqa %xmm3, %xmm6
@@ -1536,7 +1536,7 @@ define <4 x i64> @min_le_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE42-NEXT: retq
;
; AVX1-LABEL: min_le_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
@@ -1554,7 +1554,7 @@ define <4 x i64> @min_le_v4i64(<4 x i64> %a, <4 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: min_le_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2
; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm3
; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm2
@@ -1565,7 +1565,7 @@ define <4 x i64> @min_le_v4i64(<4 x i64> %a, <4 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: min_le_v4i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512-NEXT: vpminuq %zmm1, %zmm0, %zmm0
@@ -1578,7 +1578,7 @@ define <4 x i64> @min_le_v4i64(<4 x i64> %a, <4 x i64> %b) {
define <4 x i32> @min_le_v4i32(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: min_le_v4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: pxor %xmm3, %xmm2
@@ -1593,17 +1593,17 @@ define <4 x i32> @min_le_v4i32(<4 x i32> %a, <4 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_le_v4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pminud %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_le_v4i32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pminud %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: min_le_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpminud %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp ule <4 x i32> %a, %b
@@ -1613,7 +1613,7 @@ define <4 x i32> @min_le_v4i32(<4 x i32> %a, <4 x i32> %b) {
define <8 x i32> @min_le_v8i32(<8 x i32> %a, <8 x i32> %b) {
; SSE2-LABEL: min_le_v8i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: pxor %xmm6, %xmm4
@@ -1639,19 +1639,19 @@ define <8 x i32> @min_le_v8i32(<8 x i32> %a, <8 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_le_v8i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pminud %xmm2, %xmm0
; SSE41-NEXT: pminud %xmm3, %xmm1
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_le_v8i32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pminud %xmm2, %xmm0
; SSE42-NEXT: pminud %xmm3, %xmm1
; SSE42-NEXT: retq
;
; AVX1-LABEL: min_le_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminud %xmm2, %xmm3, %xmm2
@@ -1660,12 +1660,12 @@ define <8 x i32> @min_le_v8i32(<8 x i32> %a, <8 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: min_le_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: min_le_v8i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpminud %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp ule <8 x i32> %a, %b
@@ -1675,7 +1675,7 @@ define <8 x i32> @min_le_v8i32(<8 x i32> %a, <8 x i32> %b) {
define <8 x i16> @min_le_v8i16(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: min_le_v8i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: psubusw %xmm1, %xmm2
; SSE2-NEXT: pxor %xmm3, %xmm3
@@ -1686,17 +1686,17 @@ define <8 x i16> @min_le_v8i16(<8 x i16> %a, <8 x i16> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_le_v8i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pminuw %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_le_v8i16:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pminuw %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: min_le_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpminuw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp ule <8 x i16> %a, %b
@@ -1706,7 +1706,7 @@ define <8 x i16> @min_le_v8i16(<8 x i16> %a, <8 x i16> %b) {
define <16 x i16> @min_le_v16i16(<16 x i16> %a, <16 x i16> %b) {
; SSE2-LABEL: min_le_v16i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: psubusw %xmm3, %xmm4
; SSE2-NEXT: pxor %xmm6, %xmm6
@@ -1725,19 +1725,19 @@ define <16 x i16> @min_le_v16i16(<16 x i16> %a, <16 x i16> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_le_v16i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pminuw %xmm2, %xmm0
; SSE41-NEXT: pminuw %xmm3, %xmm1
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_le_v16i16:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pminuw %xmm2, %xmm0
; SSE42-NEXT: pminuw %xmm3, %xmm1
; SSE42-NEXT: retq
;
; AVX1-LABEL: min_le_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminuw %xmm2, %xmm3, %xmm2
@@ -1746,12 +1746,12 @@ define <16 x i16> @min_le_v16i16(<16 x i16> %a, <16 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: min_le_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpminuw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: min_le_v16i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpminuw %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp ule <16 x i16> %a, %b
@@ -1761,12 +1761,12 @@ define <16 x i16> @min_le_v16i16(<16 x i16> %a, <16 x i16> %b) {
define <16 x i8> @min_le_v16i8(<16 x i8> %a, <16 x i8> %b) {
; SSE-LABEL: min_le_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pminub %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: min_le_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp ule <16 x i8> %a, %b
@@ -1776,13 +1776,13 @@ define <16 x i8> @min_le_v16i8(<16 x i8> %a, <16 x i8> %b) {
define <32 x i8> @min_le_v32i8(<32 x i8> %a, <32 x i8> %b) {
; SSE-LABEL: min_le_v32i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pminub %xmm2, %xmm0
; SSE-NEXT: pminub %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: min_le_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminub %xmm2, %xmm3, %xmm2
@@ -1791,12 +1791,12 @@ define <32 x i8> @min_le_v32i8(<32 x i8> %a, <32 x i8> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: min_le_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpminub %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: min_le_v32i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpminub %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp ule <32 x i8> %a, %b
@@ -1810,12 +1810,12 @@ define <32 x i8> @min_le_v32i8(<32 x i8> %a, <32 x i8> %b) {
define <2 x i64> @max_gt_v2i64c() {
; SSE-LABEL: max_gt_v2i64c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [18446744073709551615,7]
; SSE-NEXT: retq
;
; AVX-LABEL: max_gt_v2i64c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [18446744073709551615,7]
; AVX-NEXT: retq
%1 = insertelement <2 x i64> <i64 -7, i64 7>, i64 -7, i32 0
@@ -1827,13 +1827,13 @@ define <2 x i64> @max_gt_v2i64c() {
define <4 x i64> @max_gt_v4i64c() {
; SSE-LABEL: max_gt_v4i64c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm1 = [7,7]
; SSE-NEXT: pcmpeqd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: max_gt_v4i64c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [18446744073709551615,18446744073709551615,7,7]
; AVX-NEXT: retq
%1 = insertelement <4 x i64> <i64 -7, i64 -1, i64 1, i64 7>, i64 -7, i32 0
@@ -1845,12 +1845,12 @@ define <4 x i64> @max_gt_v4i64c() {
define <4 x i32> @max_gt_v4i32c() {
; SSE-LABEL: max_gt_v4i32c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967295,4294967295,7,7]
; SSE-NEXT: retq
;
; AVX-LABEL: max_gt_v4i32c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [4294967295,4294967295,7,7]
; AVX-NEXT: retq
%1 = insertelement <4 x i32> <i32 -7, i32 -1, i32 1, i32 7>, i32 -7, i32 0
@@ -1862,13 +1862,13 @@ define <4 x i32> @max_gt_v4i32c() {
define <8 x i32> @max_gt_v8i32c() {
; SSE-LABEL: max_gt_v8i32c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967295,4294967293,4294967293,4294967295]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [7,5,5,7]
; SSE-NEXT: retq
;
; AVX-LABEL: max_gt_v8i32c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [4294967295,4294967293,4294967293,4294967295,7,5,5,7]
; AVX-NEXT: retq
%1 = insertelement <8 x i32> <i32 -7, i32 -5, i32 -3, i32 -1, i32 1, i32 3, i32 5, i32 7>, i32 -7, i32 0
@@ -1880,12 +1880,12 @@ define <8 x i32> @max_gt_v8i32c() {
define <8 x i16> @max_gt_v8i16c() {
; SSE-LABEL: max_gt_v8i16c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [65535,65533,65533,65535,7,5,5,7]
; SSE-NEXT: retq
;
; AVX-LABEL: max_gt_v8i16c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [65535,65533,65533,65535,7,5,5,7]
; AVX-NEXT: retq
%1 = insertelement <8 x i16> <i16 -7, i16 -5, i16 -3, i16 -1, i16 1, i16 3, i16 5, i16 7>, i16 -7, i32 0
@@ -1897,13 +1897,13 @@ define <8 x i16> @max_gt_v8i16c() {
define <16 x i16> @max_gt_v16i16c() {
; SSE-LABEL: max_gt_v16i16c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [65535,65534,65533,65532,65533,65534,65535,0]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [7,6,5,4,5,6,7,8]
; SSE-NEXT: retq
;
; AVX-LABEL: max_gt_v16i16c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [65535,65534,65533,65532,65533,65534,65535,0,7,6,5,4,5,6,7,8]
; AVX-NEXT: retq
%1 = insertelement <16 x i16> <i16 -7, i16 -6, i16 -5, i16 -4, i16 -3, i16 -2, i16 -1, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>, i16 -7, i32 0
@@ -1915,12 +1915,12 @@ define <16 x i16> @max_gt_v16i16c() {
define <16 x i8> @max_gt_v16i8c() {
; SSE-LABEL: max_gt_v16i8c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [255,254,253,252,253,254,255,0,7,6,5,4,5,6,7,8]
; SSE-NEXT: retq
;
; AVX-LABEL: max_gt_v16i8c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [255,254,253,252,253,254,255,0,7,6,5,4,5,6,7,8]
; AVX-NEXT: retq
%1 = insertelement <16 x i8> <i8 -7, i8 -6, i8 -5, i8 -4, i8 -3, i8 -2, i8 -1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8>, i8 -7, i32 0
@@ -1932,12 +1932,12 @@ define <16 x i8> @max_gt_v16i8c() {
define <2 x i64> @max_ge_v2i64c() {
; SSE-LABEL: max_ge_v2i64c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [18446744073709551615,7]
; SSE-NEXT: retq
;
; AVX-LABEL: max_ge_v2i64c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [18446744073709551615,7]
; AVX-NEXT: retq
%1 = insertelement <2 x i64> <i64 -7, i64 7>, i64 -7, i32 0
@@ -1949,13 +1949,13 @@ define <2 x i64> @max_ge_v2i64c() {
define <4 x i64> @max_ge_v4i64c() {
; SSE-LABEL: max_ge_v4i64c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm1 = [7,7]
; SSE-NEXT: pcmpeqd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: max_ge_v4i64c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [18446744073709551615,18446744073709551615,7,7]
; AVX-NEXT: retq
%1 = insertelement <4 x i64> <i64 -7, i64 -1, i64 1, i64 7>, i64 -7, i32 0
@@ -1967,12 +1967,12 @@ define <4 x i64> @max_ge_v4i64c() {
define <4 x i32> @max_ge_v4i32c() {
; SSE-LABEL: max_ge_v4i32c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967295,4294967295,7,7]
; SSE-NEXT: retq
;
; AVX-LABEL: max_ge_v4i32c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [4294967295,4294967295,7,7]
; AVX-NEXT: retq
%1 = insertelement <4 x i32> <i32 -7, i32 -1, i32 1, i32 7>, i32 -7, i32 0
@@ -1984,13 +1984,13 @@ define <4 x i32> @max_ge_v4i32c() {
define <8 x i32> @max_ge_v8i32c() {
; SSE-LABEL: max_ge_v8i32c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967295,4294967293,4294967293,4294967295]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [7,5,5,7]
; SSE-NEXT: retq
;
; AVX-LABEL: max_ge_v8i32c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [4294967295,4294967293,4294967293,4294967295,7,5,5,7]
; AVX-NEXT: retq
%1 = insertelement <8 x i32> <i32 -7, i32 -5, i32 -3, i32 -1, i32 1, i32 3, i32 5, i32 7>, i32 -7, i32 0
@@ -2002,12 +2002,12 @@ define <8 x i32> @max_ge_v8i32c() {
define <8 x i16> @max_ge_v8i16c() {
; SSE-LABEL: max_ge_v8i16c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [65535,65533,65533,65535,7,5,5,7]
; SSE-NEXT: retq
;
; AVX-LABEL: max_ge_v8i16c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [65535,65533,65533,65535,7,5,5,7]
; AVX-NEXT: retq
%1 = insertelement <8 x i16> <i16 -7, i16 -5, i16 -3, i16 -1, i16 1, i16 3, i16 5, i16 7>, i16 -7, i32 0
@@ -2019,13 +2019,13 @@ define <8 x i16> @max_ge_v8i16c() {
define <16 x i16> @max_ge_v16i16c() {
; SSE-LABEL: max_ge_v16i16c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [65535,65534,65533,65532,65533,65534,65535,0]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [7,6,5,4,5,6,7,8]
; SSE-NEXT: retq
;
; AVX-LABEL: max_ge_v16i16c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [65535,65534,65533,65532,65533,65534,65535,0,7,6,5,4,5,6,7,8]
; AVX-NEXT: retq
%1 = insertelement <16 x i16> <i16 -7, i16 -6, i16 -5, i16 -4, i16 -3, i16 -2, i16 -1, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>, i16 -7, i32 0
@@ -2037,12 +2037,12 @@ define <16 x i16> @max_ge_v16i16c() {
define <16 x i8> @max_ge_v16i8c() {
; SSE-LABEL: max_ge_v16i8c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [255,254,253,252,253,254,255,0,7,6,5,4,5,6,7,8]
; SSE-NEXT: retq
;
; AVX-LABEL: max_ge_v16i8c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [255,254,253,252,253,254,255,0,7,6,5,4,5,6,7,8]
; AVX-NEXT: retq
%1 = insertelement <16 x i8> <i8 -7, i8 -6, i8 -5, i8 -4, i8 -3, i8 -2, i8 -1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8>, i8 -7, i32 0
@@ -2054,12 +2054,12 @@ define <16 x i8> @max_ge_v16i8c() {
define <2 x i64> @min_lt_v2i64c() {
; SSE-LABEL: min_lt_v2i64c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [18446744073709551609,1]
; SSE-NEXT: retq
;
; AVX-LABEL: min_lt_v2i64c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [18446744073709551609,1]
; AVX-NEXT: retq
%1 = insertelement <2 x i64> <i64 -7, i64 7>, i64 -7, i32 0
@@ -2071,13 +2071,13 @@ define <2 x i64> @min_lt_v2i64c() {
define <4 x i64> @min_lt_v4i64c() {
; SSE-LABEL: min_lt_v4i64c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [18446744073709551609,18446744073709551609]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [1,1]
; SSE-NEXT: retq
;
; AVX-LABEL: min_lt_v4i64c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [18446744073709551609,18446744073709551609,1,1]
; AVX-NEXT: retq
%1 = insertelement <4 x i64> <i64 -7, i64 -1, i64 1, i64 7>, i64 -7, i32 0
@@ -2089,12 +2089,12 @@ define <4 x i64> @min_lt_v4i64c() {
define <4 x i32> @min_lt_v4i32c() {
; SSE-LABEL: min_lt_v4i32c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967289,4294967289,1,1]
; SSE-NEXT: retq
;
; AVX-LABEL: min_lt_v4i32c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [4294967289,4294967289,1,1]
; AVX-NEXT: retq
%1 = insertelement <4 x i32> <i32 -7, i32 -1, i32 1, i32 7>, i32 -7, i32 0
@@ -2106,13 +2106,13 @@ define <4 x i32> @min_lt_v4i32c() {
define <8 x i32> @min_lt_v8i32c() {
; SSE-LABEL: min_lt_v8i32c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967289,4294967291,4294967291,4294967289]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [1,3,3,1]
; SSE-NEXT: retq
;
; AVX-LABEL: min_lt_v8i32c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [4294967289,4294967291,4294967291,4294967289,1,3,3,1]
; AVX-NEXT: retq
%1 = insertelement <8 x i32> <i32 -7, i32 -5, i32 -3, i32 -1, i32 1, i32 3, i32 5, i32 7>, i32 -7, i32 0
@@ -2124,12 +2124,12 @@ define <8 x i32> @min_lt_v8i32c() {
define <8 x i16> @min_lt_v8i16c() {
; SSE-LABEL: min_lt_v8i16c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [1,65531,65531,65529,1,3,3,1]
; SSE-NEXT: retq
;
; AVX-LABEL: min_lt_v8i16c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [1,65531,65531,65529,1,3,3,1]
; AVX-NEXT: retq
%1 = insertelement <8 x i16> <i16 -7, i16 -5, i16 -3, i16 -1, i16 1, i16 3, i16 5, i16 7>, i16 -7, i32 0
@@ -2141,13 +2141,13 @@ define <8 x i16> @min_lt_v8i16c() {
define <16 x i16> @min_lt_v16i16c() {
; SSE-LABEL: min_lt_v16i16c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [1,65530,65531,65532,65531,65530,65529,0]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [1,2,3,4,3,2,1,0]
; SSE-NEXT: retq
;
; AVX-LABEL: min_lt_v16i16c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [1,65530,65531,65532,65531,65530,65529,0,1,2,3,4,3,2,1,0]
; AVX-NEXT: retq
%1 = insertelement <16 x i16> <i16 -7, i16 -6, i16 -5, i16 -4, i16 -3, i16 -2, i16 -1, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>, i16 -7, i32 0
@@ -2159,12 +2159,12 @@ define <16 x i16> @min_lt_v16i16c() {
define <16 x i8> @min_lt_v16i8c() {
; SSE-LABEL: min_lt_v16i8c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [1,250,251,252,251,250,249,0,1,2,3,4,3,2,1,0]
; SSE-NEXT: retq
;
; AVX-LABEL: min_lt_v16i8c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [1,250,251,252,251,250,249,0,1,2,3,4,3,2,1,0]
; AVX-NEXT: retq
%1 = insertelement <16 x i8> <i8 -7, i8 -6, i8 -5, i8 -4, i8 -3, i8 -2, i8 -1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8>, i8 -7, i32 0
@@ -2176,12 +2176,12 @@ define <16 x i8> @min_lt_v16i8c() {
define <2 x i64> @min_le_v2i64c() {
; SSE-LABEL: min_le_v2i64c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [18446744073709551609,1]
; SSE-NEXT: retq
;
; AVX-LABEL: min_le_v2i64c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [18446744073709551609,1]
; AVX-NEXT: retq
%1 = insertelement <2 x i64> <i64 -7, i64 7>, i64 -7, i32 0
@@ -2193,13 +2193,13 @@ define <2 x i64> @min_le_v2i64c() {
define <4 x i64> @min_le_v4i64c() {
; SSE-LABEL: min_le_v4i64c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [18446744073709551609,18446744073709551609]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [1,1]
; SSE-NEXT: retq
;
; AVX-LABEL: min_le_v4i64c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [18446744073709551609,18446744073709551609,1,1]
; AVX-NEXT: retq
%1 = insertelement <4 x i64> <i64 -7, i64 -1, i64 1, i64 7>, i64 -7, i32 0
@@ -2211,12 +2211,12 @@ define <4 x i64> @min_le_v4i64c() {
define <4 x i32> @min_le_v4i32c() {
; SSE-LABEL: min_le_v4i32c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967289,4294967289,1,1]
; SSE-NEXT: retq
;
; AVX-LABEL: min_le_v4i32c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [4294967289,4294967289,1,1]
; AVX-NEXT: retq
%1 = insertelement <4 x i32> <i32 -7, i32 -1, i32 1, i32 7>, i32 -7, i32 0
@@ -2228,13 +2228,13 @@ define <4 x i32> @min_le_v4i32c() {
define <8 x i32> @min_le_v8i32c() {
; SSE-LABEL: min_le_v8i32c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967289,4294967291,4294967291,4294967289]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [1,3,3,1]
; SSE-NEXT: retq
;
; AVX-LABEL: min_le_v8i32c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [4294967289,4294967291,4294967291,4294967289,1,3,3,1]
; AVX-NEXT: retq
%1 = insertelement <8 x i32> <i32 -7, i32 -5, i32 -3, i32 -1, i32 1, i32 3, i32 5, i32 7>, i32 -7, i32 0
@@ -2246,12 +2246,12 @@ define <8 x i32> @min_le_v8i32c() {
define <8 x i16> @min_le_v8i16c() {
; SSE-LABEL: min_le_v8i16c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [65529,65531,65531,65529,1,3,3,1]
; SSE-NEXT: retq
;
; AVX-LABEL: min_le_v8i16c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [65529,65531,65531,65529,1,3,3,1]
; AVX-NEXT: retq
%1 = insertelement <8 x i16> <i16 -7, i16 -5, i16 -3, i16 -1, i16 1, i16 3, i16 5, i16 7>, i16 -7, i32 0
@@ -2263,13 +2263,13 @@ define <8 x i16> @min_le_v8i16c() {
define <16 x i16> @min_le_v16i16c() {
; SSE-LABEL: min_le_v16i16c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [65529,65530,65531,65532,65531,65530,65529,0]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [1,2,3,4,3,2,1,0]
; SSE-NEXT: retq
;
; AVX-LABEL: min_le_v16i16c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [65529,65530,65531,65532,65531,65530,65529,0,1,2,3,4,3,2,1,0]
; AVX-NEXT: retq
%1 = insertelement <16 x i16> <i16 -7, i16 -6, i16 -5, i16 -4, i16 -3, i16 -2, i16 -1, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>, i16 -7, i32 0
@@ -2281,12 +2281,12 @@ define <16 x i16> @min_le_v16i16c() {
define <16 x i8> @min_le_v16i8c() {
; SSE-LABEL: min_le_v16i8c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [249,250,251,252,251,250,249,0,1,2,3,4,3,2,1,0]
; SSE-NEXT: retq
;
; AVX-LABEL: min_le_v16i8c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [249,250,251,252,251,250,249,0,1,2,3,4,3,2,1,0]
; AVX-NEXT: retq
%1 = insertelement <16 x i8> <i8 -7, i8 -6, i8 -5, i8 -4, i8 -3, i8 -2, i8 -1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8>, i8 -7, i32 0
diff --git a/test/CodeGen/X86/vec_partial.ll b/test/CodeGen/X86/vec_partial.ll
index ee15c2af6dd..a9044c6ffb5 100644
--- a/test/CodeGen/X86/vec_partial.ll
+++ b/test/CodeGen/X86/vec_partial.ll
@@ -5,12 +5,12 @@
; PR11580
define <3 x float> @addf3(<3 x float> %x) {
; X86-LABEL: addf3:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: addps {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: addf3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: addps {{.*}}(%rip), %xmm0
; X64-NEXT: retq
entry:
@@ -21,11 +21,11 @@ entry:
; PR11580
define <4 x float> @cvtf3_f4(<3 x float> %x) {
; X86-LABEL: cvtf3_f4:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: retl
;
; X64-LABEL: cvtf3_f4:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: retq
entry:
%extractVec = shufflevector <3 x float> %x, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
@@ -35,11 +35,11 @@ entry:
; PR11580
define <3 x float> @cvtf4_f3(<4 x float> %x) {
; X86-LABEL: cvtf4_f3:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: retl
;
; X64-LABEL: cvtf4_f3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: retq
entry:
%extractVec = shufflevector <4 x float> %x, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
diff --git a/test/CodeGen/X86/vec_reassociate.ll b/test/CodeGen/X86/vec_reassociate.ll
index 5234b0c8a77..c8b61809d31 100644
--- a/test/CodeGen/X86/vec_reassociate.ll
+++ b/test/CodeGen/X86/vec_reassociate.ll
@@ -4,12 +4,12 @@
define <4 x i32> @add_4i32(<4 x i32> %a0, <4 x i32> %a1) {
; X86-LABEL: add_4i32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: paddd %xmm1, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: add_4i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: paddd %xmm1, %xmm0
; X64-NEXT: retq
%1 = add <4 x i32> %a0, <i32 1, i32 -2, i32 3, i32 -4>
@@ -20,12 +20,12 @@ define <4 x i32> @add_4i32(<4 x i32> %a0, <4 x i32> %a1) {
define <4 x i32> @add_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) {
; X86-LABEL: add_4i32_commute:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: paddd %xmm1, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: add_4i32_commute:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: paddd %xmm1, %xmm0
; X64-NEXT: retq
%1 = add <4 x i32> <i32 1, i32 -2, i32 3, i32 -4>, %a0
@@ -36,13 +36,13 @@ define <4 x i32> @add_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) {
define <4 x i32> @mul_4i32(<4 x i32> %a0, <4 x i32> %a1) {
; X86-LABEL: mul_4i32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pmulld %xmm1, %xmm0
; X86-NEXT: pmulld {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: mul_4i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmulld %xmm1, %xmm0
; X64-NEXT: pmulld {{.*}}(%rip), %xmm0
; X64-NEXT: retq
@@ -54,13 +54,13 @@ define <4 x i32> @mul_4i32(<4 x i32> %a0, <4 x i32> %a1) {
define <4 x i32> @mul_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) {
; X86-LABEL: mul_4i32_commute:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pmulld %xmm1, %xmm0
; X86-NEXT: pmulld {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: mul_4i32_commute:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmulld %xmm1, %xmm0
; X64-NEXT: pmulld {{.*}}(%rip), %xmm0
; X64-NEXT: retq
@@ -72,13 +72,13 @@ define <4 x i32> @mul_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) {
define <4 x i32> @and_4i32(<4 x i32> %a0, <4 x i32> %a1) {
; X86-LABEL: and_4i32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: andps %xmm1, %xmm0
; X86-NEXT: andps {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: and_4i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andps %xmm1, %xmm0
; X64-NEXT: andps {{.*}}(%rip), %xmm0
; X64-NEXT: retq
@@ -90,13 +90,13 @@ define <4 x i32> @and_4i32(<4 x i32> %a0, <4 x i32> %a1) {
define <4 x i32> @and_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) {
; X86-LABEL: and_4i32_commute:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: andps %xmm1, %xmm0
; X86-NEXT: andps {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: and_4i32_commute:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andps %xmm1, %xmm0
; X64-NEXT: andps {{.*}}(%rip), %xmm0
; X64-NEXT: retq
@@ -108,13 +108,13 @@ define <4 x i32> @and_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) {
define <4 x i32> @or_4i32(<4 x i32> %a0, <4 x i32> %a1) {
; X86-LABEL: or_4i32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: orps %xmm1, %xmm0
; X86-NEXT: orps {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: or_4i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: orps %xmm1, %xmm0
; X64-NEXT: orps {{.*}}(%rip), %xmm0
; X64-NEXT: retq
@@ -126,13 +126,13 @@ define <4 x i32> @or_4i32(<4 x i32> %a0, <4 x i32> %a1) {
define <4 x i32> @or_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) {
; X86-LABEL: or_4i32_commute:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: orps %xmm1, %xmm0
; X86-NEXT: orps {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: or_4i32_commute:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: orps %xmm1, %xmm0
; X64-NEXT: orps {{.*}}(%rip), %xmm0
; X64-NEXT: retq
@@ -144,13 +144,13 @@ define <4 x i32> @or_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) {
define <4 x i32> @xor_4i32(<4 x i32> %a0, <4 x i32> %a1) {
; X86-LABEL: xor_4i32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: xorps %xmm1, %xmm0
; X86-NEXT: xorps {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: xor_4i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorps %xmm1, %xmm0
; X64-NEXT: xorps {{.*}}(%rip), %xmm0
; X64-NEXT: retq
@@ -162,13 +162,13 @@ define <4 x i32> @xor_4i32(<4 x i32> %a0, <4 x i32> %a1) {
define <4 x i32> @xor_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) {
; X86-LABEL: xor_4i32_commute:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: xorps %xmm1, %xmm0
; X86-NEXT: xorps {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: xor_4i32_commute:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorps %xmm1, %xmm0
; X64-NEXT: xorps {{.*}}(%rip), %xmm0
; X64-NEXT: retq
diff --git a/test/CodeGen/X86/vec_return.ll b/test/CodeGen/X86/vec_return.ll
index 556e32d0c87..f33b6a1c126 100644
--- a/test/CodeGen/X86/vec_return.ll
+++ b/test/CodeGen/X86/vec_return.ll
@@ -4,7 +4,7 @@
; Without any typed operations, always use the smaller xorps.
define <2 x double> @test() {
; CHECK-LABEL: test:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorps %xmm0, %xmm0
; CHECK-NEXT: retl
ret <2 x double> zeroinitializer
@@ -13,7 +13,7 @@ define <2 x double> @test() {
; Prefer a constant pool load here.
define <4 x i32> @test2() nounwind {
; CHECK-LABEL: test2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movaps {{.*#+}} xmm0 = [0,0,1,0]
; CHECK-NEXT: retl
ret <4 x i32> < i32 0, i32 0, i32 1, i32 0 >
diff --git a/test/CodeGen/X86/vec_sdiv_to_shift.ll b/test/CodeGen/X86/vec_sdiv_to_shift.ll
index f0c9069d8c7..b8c3bfcd996 100644
--- a/test/CodeGen/X86/vec_sdiv_to_shift.ll
+++ b/test/CodeGen/X86/vec_sdiv_to_shift.ll
@@ -5,7 +5,7 @@
define <8 x i16> @sdiv_vec8x16(<8 x i16> %var) {
; SSE-LABEL: sdiv_vec8x16:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psraw $15, %xmm1
; SSE-NEXT: psrlw $11, %xmm1
@@ -15,7 +15,7 @@ define <8 x i16> @sdiv_vec8x16(<8 x i16> %var) {
; SSE-NEXT: retq
;
; AVX-LABEL: sdiv_vec8x16:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpsraw $15, %xmm0, %xmm1
; AVX-NEXT: vpsrlw $11, %xmm1, %xmm1
; AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
@@ -28,7 +28,7 @@ entry:
define <8 x i16> @sdiv_vec8x16_minsize(<8 x i16> %var) minsize {
; SSE-LABEL: sdiv_vec8x16_minsize:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psraw $15, %xmm1
; SSE-NEXT: psrlw $11, %xmm1
@@ -38,7 +38,7 @@ define <8 x i16> @sdiv_vec8x16_minsize(<8 x i16> %var) minsize {
; SSE-NEXT: retq
;
; AVX-LABEL: sdiv_vec8x16_minsize:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpsraw $15, %xmm0, %xmm1
; AVX-NEXT: vpsrlw $11, %xmm1, %xmm1
; AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
@@ -51,7 +51,7 @@ entry:
define <4 x i32> @sdiv_vec4x32(<4 x i32> %var) {
; SSE-LABEL: sdiv_vec4x32:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psrad $31, %xmm1
; SSE-NEXT: psrld $28, %xmm1
@@ -61,7 +61,7 @@ define <4 x i32> @sdiv_vec4x32(<4 x i32> %var) {
; SSE-NEXT: retq
;
; AVX-LABEL: sdiv_vec4x32:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpsrad $31, %xmm0, %xmm1
; AVX-NEXT: vpsrld $28, %xmm1, %xmm1
; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
@@ -74,7 +74,7 @@ ret <4 x i32> %0
define <4 x i32> @sdiv_negative(<4 x i32> %var) {
; SSE-LABEL: sdiv_negative:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psrad $31, %xmm1
; SSE-NEXT: psrld $28, %xmm1
@@ -85,7 +85,7 @@ define <4 x i32> @sdiv_negative(<4 x i32> %var) {
; SSE-NEXT: retq
;
; AVX-LABEL: sdiv_negative:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpsrad $31, %xmm0, %xmm1
; AVX-NEXT: vpsrld $28, %xmm1, %xmm1
; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
@@ -100,7 +100,7 @@ ret <4 x i32> %0
define <8 x i32> @sdiv8x32(<8 x i32> %var) {
; SSE-LABEL: sdiv8x32:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: psrad $31, %xmm2
; SSE-NEXT: psrld $26, %xmm2
@@ -116,7 +116,7 @@ define <8 x i32> @sdiv8x32(<8 x i32> %var) {
; SSE-NEXT: retq
;
; AVX1-LABEL: sdiv8x32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpsrad $31, %xmm0, %xmm1
; AVX1-NEXT: vpsrld $26, %xmm1, %xmm1
; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm1
@@ -130,7 +130,7 @@ define <8 x i32> @sdiv8x32(<8 x i32> %var) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: sdiv8x32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpsrad $31, %ymm0, %ymm1
; AVX2-NEXT: vpsrld $26, %ymm1, %ymm1
; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
@@ -143,7 +143,7 @@ ret <8 x i32> %0
define <16 x i16> @sdiv16x16(<16 x i16> %var) {
; SSE-LABEL: sdiv16x16:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: psraw $15, %xmm2
; SSE-NEXT: psrlw $14, %xmm2
@@ -159,7 +159,7 @@ define <16 x i16> @sdiv16x16(<16 x i16> %var) {
; SSE-NEXT: retq
;
; AVX1-LABEL: sdiv16x16:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpsraw $15, %xmm0, %xmm1
; AVX1-NEXT: vpsrlw $14, %xmm1, %xmm1
; AVX1-NEXT: vpaddw %xmm1, %xmm0, %xmm1
@@ -173,7 +173,7 @@ define <16 x i16> @sdiv16x16(<16 x i16> %var) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: sdiv16x16:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpsraw $15, %ymm0, %ymm1
; AVX2-NEXT: vpsrlw $14, %ymm1, %ymm1
; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0
@@ -188,11 +188,11 @@ entry:
define <4 x i32> @sdiv_non_splat(<4 x i32> %x) {
; SSE-LABEL: sdiv_non_splat:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: retq
;
; AVX-LABEL: sdiv_non_splat:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: retq
%y = sdiv <4 x i32> %x, <i32 2, i32 0, i32 0, i32 0>
ret <4 x i32> %y
diff --git a/test/CodeGen/X86/vec_set-2.ll b/test/CodeGen/X86/vec_set-2.ll
index 51c8b211110..058e924aa4f 100644
--- a/test/CodeGen/X86/vec_set-2.ll
+++ b/test/CodeGen/X86/vec_set-2.ll
@@ -4,12 +4,12 @@
define <4 x float> @test1(float %a) nounwind {
; X86-LABEL: test1:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-NEXT: retl
;
; X64-LABEL: test1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorps %xmm1, %xmm1
; X64-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; X64-NEXT: movaps %xmm1, %xmm0
@@ -23,12 +23,12 @@ define <4 x float> @test1(float %a) nounwind {
define <2 x i64> @test(i32 %a) nounwind {
; X86-LABEL: test:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-NEXT: retl
;
; X64-LABEL: test:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movd %edi, %xmm0
; X64-NEXT: retq
%tmp = insertelement <4 x i32> zeroinitializer, i32 %a, i32 0
diff --git a/test/CodeGen/X86/vec_set-3.ll b/test/CodeGen/X86/vec_set-3.ll
index b34f30924a8..14f1587a6d4 100644
--- a/test/CodeGen/X86/vec_set-3.ll
+++ b/test/CodeGen/X86/vec_set-3.ll
@@ -4,12 +4,12 @@
define <4 x float> @test(float %a) {
; X86-LABEL: test:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: insertps {{.*#+}} xmm0 = zero,mem[0],zero,zero
; X86-NEXT: retl
;
; X64-LABEL: test:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: insertps {{.*#+}} xmm0 = zero,xmm0[0],zero,zero
; X64-NEXT: retq
%tmp = insertelement <4 x float> zeroinitializer, float %a, i32 1
@@ -20,13 +20,13 @@ define <4 x float> @test(float %a) {
define <2 x i64> @test2(i32 %a) {
; X86-LABEL: test2:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,0,1]
; X86-NEXT: retl
;
; X64-LABEL: test2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movd %edi, %xmm0
; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,0,1]
; X64-NEXT: retq
@@ -38,12 +38,12 @@ define <2 x i64> @test2(i32 %a) {
define <4 x float> @test3(<4 x float> %A) {
; X86-LABEL: test3:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: insertps {{.*#+}} xmm0 = zero,xmm0[0],zero,zero
; X86-NEXT: retl
;
; X64-LABEL: test3:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: insertps {{.*#+}} xmm0 = zero,xmm0[0],zero,zero
; X64-NEXT: retq
%tmp0 = extractelement <4 x float> %A, i32 0
diff --git a/test/CodeGen/X86/vec_set-4.ll b/test/CodeGen/X86/vec_set-4.ll
index 09142e16aa6..d01a913ea8a 100644
--- a/test/CodeGen/X86/vec_set-4.ll
+++ b/test/CodeGen/X86/vec_set-4.ll
@@ -4,13 +4,13 @@
define <2 x i64> @test(i16 %a) nounwind {
; X86-LABEL: test:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pxor %xmm0, %xmm0
; X86-NEXT: pinsrw $3, {{[0-9]+}}(%esp), %xmm0
; X86-NEXT: retl
;
; X64-LABEL: test:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pxor %xmm0, %xmm0
; X64-NEXT: pinsrw $3, %edi, %xmm0
; X64-NEXT: retq
@@ -25,14 +25,14 @@ define <2 x i64> @test(i16 %a) nounwind {
define <2 x i64> @test2(i8 %a) nounwind {
; X86-LABEL: test2:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X86-NEXT: pxor %xmm0, %xmm0
; X86-NEXT: pinsrw $5, %eax, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: test2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: pxor %xmm0, %xmm0
; X64-NEXT: pinsrw $5, %eax, %xmm0
diff --git a/test/CodeGen/X86/vec_set-6.ll b/test/CodeGen/X86/vec_set-6.ll
index 3c9aca3a02d..3f8997faf39 100644
--- a/test/CodeGen/X86/vec_set-6.ll
+++ b/test/CodeGen/X86/vec_set-6.ll
@@ -4,14 +4,14 @@
define <4 x float> @test(float %a, float %b, float %c) nounwind {
; X86-LABEL: test:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,1]
; X86-NEXT: retl
;
; X64-LABEL: test:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; X64-NEXT: xorps %xmm2, %xmm2
; X64-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3]
diff --git a/test/CodeGen/X86/vec_set-7.ll b/test/CodeGen/X86/vec_set-7.ll
index 757a0d44cd4..fced7e4c079 100644
--- a/test/CodeGen/X86/vec_set-7.ll
+++ b/test/CodeGen/X86/vec_set-7.ll
@@ -4,13 +4,13 @@
define <2 x i64> @test(<2 x i64>* %p) nounwind {
; X86-LABEL: test:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X86-NEXT: retl
;
; X64-LABEL: test:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X64-NEXT: retq
%tmp = bitcast <2 x i64>* %p to double*
diff --git a/test/CodeGen/X86/vec_set-8.ll b/test/CodeGen/X86/vec_set-8.ll
index a9dceb90855..e8cded6b216 100644
--- a/test/CodeGen/X86/vec_set-8.ll
+++ b/test/CodeGen/X86/vec_set-8.ll
@@ -4,12 +4,12 @@
define <2 x i64> @test(i64 %i) nounwind {
; X86-LABEL: test:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X86-NEXT: retl
;
; X64-LABEL: test:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq %rdi, %xmm0
; X64-NEXT: retq
%tmp10 = insertelement <2 x i64> undef, i64 %i, i32 0
diff --git a/test/CodeGen/X86/vec_set-A.ll b/test/CodeGen/X86/vec_set-A.ll
index 259ace98d36..9c0e9388b25 100644
--- a/test/CodeGen/X86/vec_set-A.ll
+++ b/test/CodeGen/X86/vec_set-A.ll
@@ -4,13 +4,13 @@
define <2 x i64> @test1() nounwind {
; X86-LABEL: test1:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl $1, %eax
; X86-NEXT: movd %eax, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: test1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl $1, %eax
; X64-NEXT: movq %rax, %xmm0
; X64-NEXT: retq
diff --git a/test/CodeGen/X86/vec_set-B.ll b/test/CodeGen/X86/vec_set-B.ll
index ecd9b57cfd0..cd5ce9fbb07 100644
--- a/test/CodeGen/X86/vec_set-B.ll
+++ b/test/CodeGen/X86/vec_set-B.ll
@@ -11,14 +11,14 @@
define <2 x i64> @test3(i64 %arg) nounwind {
; X86-LABEL: test3:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl $1234567, %eax # imm = 0x12D687
; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movd %eax, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: test3:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andl $1234567, %edi # imm = 0x12D687
; X64-NEXT: movq %rdi, %xmm0
; X64-NEXT: retq
@@ -29,14 +29,14 @@ define <2 x i64> @test3(i64 %arg) nounwind {
define <2 x i64> @test2(i64 %arg) nounwind {
; X86-LABEL: test2:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl $1234567, %eax # imm = 0x12D687
; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movd %eax, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: test2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andl $1234567, %edi # imm = 0x12D687
; X64-NEXT: movq %rdi, %xmm0
; X64-NEXT: retq
diff --git a/test/CodeGen/X86/vec_set-C.ll b/test/CodeGen/X86/vec_set-C.ll
index 865e2fb83f1..877d99abbb9 100644
--- a/test/CodeGen/X86/vec_set-C.ll
+++ b/test/CodeGen/X86/vec_set-C.ll
@@ -4,12 +4,12 @@
define <2 x i64> @t1(i64 %x) nounwind {
; X86-LABEL: t1:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X86-NEXT: retl
;
; X64-LABEL: t1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq %rdi, %xmm0
; X64-NEXT: retq
%tmp8 = insertelement <2 x i64> zeroinitializer, i64 %x, i32 0
diff --git a/test/CodeGen/X86/vec_set-D.ll b/test/CodeGen/X86/vec_set-D.ll
index 56499412d7d..3dde040d9ba 100644
--- a/test/CodeGen/X86/vec_set-D.ll
+++ b/test/CodeGen/X86/vec_set-D.ll
@@ -3,7 +3,7 @@
define <4 x i32> @t(i32 %x, i32 %y) nounwind {
; CHECK-LABEL: t:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: retl
%tmp1 = insertelement <4 x i32> zeroinitializer, i32 %x, i32 0
diff --git a/test/CodeGen/X86/vec_set-F.ll b/test/CodeGen/X86/vec_set-F.ll
index a5239914b44..75ec319c103 100644
--- a/test/CodeGen/X86/vec_set-F.ll
+++ b/test/CodeGen/X86/vec_set-F.ll
@@ -3,7 +3,7 @@
define <2 x i64> @t1(<2 x i64>* %ptr) nounwind {
; CHECK-LABEL: t1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: retl
@@ -16,7 +16,7 @@ define <2 x i64> @t1(<2 x i64>* %ptr) nounwind {
define <2 x i64> @t2(i64 %x) nounwind {
; CHECK-LABEL: t2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: retl
%tmp717 = bitcast i64 %x to double
diff --git a/test/CodeGen/X86/vec_set-H.ll b/test/CodeGen/X86/vec_set-H.ll
index af8ac70c5b3..03324f02a4f 100644
--- a/test/CodeGen/X86/vec_set-H.ll
+++ b/test/CodeGen/X86/vec_set-H.ll
@@ -3,7 +3,7 @@
define <2 x i64> @doload64(i16 signext %x) nounwind {
; CHECK-LABEL: doload64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
diff --git a/test/CodeGen/X86/vec_set.ll b/test/CodeGen/X86/vec_set.ll
index 918430efea1..5e1f09c9bae 100644
--- a/test/CodeGen/X86/vec_set.ll
+++ b/test/CodeGen/X86/vec_set.ll
@@ -4,7 +4,7 @@
define void @test(<8 x i16>* %b, i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %a4, i16 %a5, i16 %a6, i16 %a7) nounwind {
; X86-LABEL: test:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -25,7 +25,7 @@ define void @test(<8 x i16>* %b, i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %a4, i1
; X86-NEXT: retl
;
; X64-LABEL: test:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X64-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
diff --git a/test/CodeGen/X86/vec_setcc.ll b/test/CodeGen/X86/vec_setcc.ll
index 1eef0be2dbb..e9494d845b7 100644
--- a/test/CodeGen/X86/vec_setcc.ll
+++ b/test/CodeGen/X86/vec_setcc.ll
@@ -5,13 +5,13 @@
define <16 x i8> @v16i8_icmp_uge(<16 x i8> %a, <16 x i8> %b) nounwind readnone ssp uwtable {
; SSE-LABEL: v16i8_icmp_uge:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmaxub %xmm0, %xmm1
; SSE-NEXT: pcmpeqb %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: v16i8_icmp_uge:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmaxub %xmm1, %xmm0, %xmm1
; AVX-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -22,13 +22,13 @@ define <16 x i8> @v16i8_icmp_uge(<16 x i8> %a, <16 x i8> %b) nounwind readnone s
define <16 x i8> @v16i8_icmp_ule(<16 x i8> %a, <16 x i8> %b) nounwind readnone ssp uwtable {
; SSE-LABEL: v16i8_icmp_ule:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pminub %xmm0, %xmm1
; SSE-NEXT: pcmpeqb %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: v16i8_icmp_ule:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpminub %xmm1, %xmm0, %xmm1
; AVX-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -39,20 +39,20 @@ define <16 x i8> @v16i8_icmp_ule(<16 x i8> %a, <16 x i8> %b) nounwind readnone s
define <8 x i16> @v8i16_icmp_uge(<8 x i16> %a, <8 x i16> %b) nounwind readnone ssp uwtable {
; SSE2-LABEL: v8i16_icmp_uge:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: psubusw %xmm0, %xmm1
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: pcmpeqw %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: v8i16_icmp_uge:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmaxuw %xmm0, %xmm1
; SSE41-NEXT: pcmpeqw %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: v8i16_icmp_uge:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmaxuw %xmm1, %xmm0, %xmm1
; AVX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -63,20 +63,20 @@ define <8 x i16> @v8i16_icmp_uge(<8 x i16> %a, <8 x i16> %b) nounwind readnone s
define <8 x i16> @v8i16_icmp_ule(<8 x i16> %a, <8 x i16> %b) nounwind readnone ssp uwtable {
; SSE2-LABEL: v8i16_icmp_ule:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: psubusw %xmm1, %xmm0
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: pcmpeqw %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: v8i16_icmp_ule:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pminuw %xmm0, %xmm1
; SSE41-NEXT: pcmpeqw %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: v8i16_icmp_ule:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpminuw %xmm1, %xmm0, %xmm1
; AVX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -87,7 +87,7 @@ define <8 x i16> @v8i16_icmp_ule(<8 x i16> %a, <8 x i16> %b) nounwind readnone s
define <4 x i32> @v4i32_icmp_uge(<4 x i32> %a, <4 x i32> %b) nounwind readnone ssp uwtable {
; SSE2-LABEL: v4i32_icmp_uge:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: pxor %xmm2, %xmm0
; SSE2-NEXT: pxor %xmm1, %xmm2
@@ -97,13 +97,13 @@ define <4 x i32> @v4i32_icmp_uge(<4 x i32> %a, <4 x i32> %b) nounwind readnone s
; SSE2-NEXT: retq
;
; SSE41-LABEL: v4i32_icmp_uge:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmaxud %xmm0, %xmm1
; SSE41-NEXT: pcmpeqd %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: v4i32_icmp_uge:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmaxud %xmm1, %xmm0, %xmm1
; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -114,7 +114,7 @@ define <4 x i32> @v4i32_icmp_uge(<4 x i32> %a, <4 x i32> %b) nounwind readnone s
define <4 x i32> @v4i32_icmp_ule(<4 x i32> %a, <4 x i32> %b) nounwind readnone ssp uwtable {
; SSE2-LABEL: v4i32_icmp_ule:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: pxor %xmm2, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm0
@@ -124,13 +124,13 @@ define <4 x i32> @v4i32_icmp_ule(<4 x i32> %a, <4 x i32> %b) nounwind readnone s
; SSE2-NEXT: retq
;
; SSE41-LABEL: v4i32_icmp_ule:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pminud %xmm0, %xmm1
; SSE41-NEXT: pcmpeqd %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: v4i32_icmp_ule:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpminud %xmm1, %xmm0, %xmm1
; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -144,12 +144,12 @@ define <4 x i32> @v4i32_icmp_ule(<4 x i32> %a, <4 x i32> %b) nounwind readnone s
; should set all bits to 1.
define <16 x i8> @test_setcc_constfold_vi8(<16 x i8> %l, <16 x i8> %r) {
; SSE-LABEL: test_setcc_constfold_vi8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_setcc_constfold_vi8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%test1 = icmp eq <16 x i8> %l, %r
@@ -163,12 +163,12 @@ define <16 x i8> @test_setcc_constfold_vi8(<16 x i8> %l, <16 x i8> %r) {
; Make sure sensible results come from doing extension afterwards
define <16 x i8> @test_setcc_constfold_vi1(<16 x i8> %l, <16 x i8> %r) {
; SSE-LABEL: test_setcc_constfold_vi1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_setcc_constfold_vi1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%test1 = icmp eq <16 x i8> %l, %r
@@ -182,12 +182,12 @@ define <16 x i8> @test_setcc_constfold_vi1(<16 x i8> %l, <16 x i8> %r) {
; just 32-bits wide.
define <2 x i64> @test_setcc_constfold_vi64(<2 x i64> %l, <2 x i64> %r) {
; SSE-LABEL: test_setcc_constfold_vi64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_setcc_constfold_vi64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%test1 = icmp eq <2 x i64> %l, %r
diff --git a/test/CodeGen/X86/vec_shift.ll b/test/CodeGen/X86/vec_shift.ll
index 55b55936634..66cf8a9c3dc 100644
--- a/test/CodeGen/X86/vec_shift.ll
+++ b/test/CodeGen/X86/vec_shift.ll
@@ -4,12 +4,12 @@
define <2 x i64> @t1(<2 x i64> %b1, <2 x i64> %c) nounwind {
; X32-LABEL: t1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: psllw %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: t1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: psllw %xmm1, %xmm0
; X64-NEXT: retq
entry:
@@ -22,13 +22,13 @@ entry:
define <2 x i64> @t3(<2 x i64> %b1, i32 %c) nounwind {
; X32-LABEL: t3:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: psraw %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: t3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movd %edi, %xmm1
; X64-NEXT: psraw %xmm1, %xmm0
; X64-NEXT: retq
@@ -45,12 +45,12 @@ declare <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16>, <8 x i16>) nounwind readnone
define <2 x i64> @t2(<2 x i64> %b1, <2 x i64> %c) nounwind {
; X32-LABEL: t2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: psrlq %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: t2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: psrlq %xmm1, %xmm0
; X64-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/vec_shift2.ll b/test/CodeGen/X86/vec_shift2.ll
index 21d599fead0..7a1ade72a38 100644
--- a/test/CodeGen/X86/vec_shift2.ll
+++ b/test/CodeGen/X86/vec_shift2.ll
@@ -4,14 +4,14 @@
define <2 x i64> @t1(<2 x i64> %b1, <2 x i64> %c) nounwind {
; X32-LABEL: t1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl $14, %eax
; X32-NEXT: movd %eax, %xmm1
; X32-NEXT: psrlw %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: t1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl $14, %eax
; X64-NEXT: movd %eax, %xmm1
; X64-NEXT: psrlw %xmm1, %xmm0
@@ -24,14 +24,14 @@ define <2 x i64> @t1(<2 x i64> %b1, <2 x i64> %c) nounwind {
define <4 x i32> @t2(<2 x i64> %b1, <2 x i64> %c) nounwind {
; X32-LABEL: t2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl $14, %eax
; X32-NEXT: movd %eax, %xmm1
; X32-NEXT: pslld %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: t2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl $14, %eax
; X64-NEXT: movd %eax, %xmm1
; X64-NEXT: pslld %xmm1, %xmm0
diff --git a/test/CodeGen/X86/vec_shift3.ll b/test/CodeGen/X86/vec_shift3.ll
index 071f0d38b96..b5fc1fafb61 100644
--- a/test/CodeGen/X86/vec_shift3.ll
+++ b/test/CodeGen/X86/vec_shift3.ll
@@ -4,13 +4,13 @@
define <2 x i64> @t1(<2 x i64> %x1, i32 %bits) nounwind {
; X32-LABEL: t1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: psllq %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: t1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movd %edi, %xmm1
; X64-NEXT: psllq %xmm1, %xmm0
; X64-NEXT: retq
@@ -21,12 +21,12 @@ entry:
define <2 x i64> @t2(<2 x i64> %x1) nounwind {
; X32-LABEL: t2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: psllq $10, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: t2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: psllq $10, %xmm0
; X64-NEXT: retq
entry:
@@ -36,13 +36,13 @@ entry:
define <2 x i64> @t3(<2 x i64> %x1, i32 %bits) nounwind {
; X32-LABEL: t3:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: psraw %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: t3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movd %edi, %xmm1
; X64-NEXT: psraw %xmm1, %xmm0
; X64-NEXT: retq
diff --git a/test/CodeGen/X86/vec_shift4.ll b/test/CodeGen/X86/vec_shift4.ll
index bef2438aecd..04b4cb658f1 100644
--- a/test/CodeGen/X86/vec_shift4.ll
+++ b/test/CodeGen/X86/vec_shift4.ll
@@ -4,7 +4,7 @@
define <2 x i64> @shl1(<4 x i32> %r, <4 x i32> %a) nounwind readnone ssp {
; X32-LABEL: shl1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: pslld $23, %xmm1
; X32-NEXT: paddd {{\.LCPI.*}}, %xmm1
; X32-NEXT: cvttps2dq %xmm1, %xmm1
@@ -12,7 +12,7 @@ define <2 x i64> @shl1(<4 x i32> %r, <4 x i32> %a) nounwind readnone ssp {
; X32-NEXT: retl
;
; X64-LABEL: shl1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: pslld $23, %xmm1
; X64-NEXT: paddd {{.*}}(%rip), %xmm1
; X64-NEXT: cvttps2dq %xmm1, %xmm1
@@ -32,7 +32,7 @@ entry:
define <2 x i64> @shl2(<16 x i8> %r, <16 x i8> %a) nounwind readnone ssp {
; X32-LABEL: shl2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movdqa %xmm0, %xmm2
; X32-NEXT: psllw $5, %xmm1
; X32-NEXT: movdqa %xmm2, %xmm3
@@ -55,7 +55,7 @@ define <2 x i64> @shl2(<16 x i8> %r, <16 x i8> %a) nounwind readnone ssp {
; X32-NEXT: retl
;
; X64-LABEL: shl2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movdqa %xmm0, %xmm2
; X64-NEXT: psllw $5, %xmm1
; X64-NEXT: movdqa %xmm2, %xmm3
diff --git a/test/CodeGen/X86/vec_shift5.ll b/test/CodeGen/X86/vec_shift5.ll
index c0226d0a4c0..873de4b0834 100644
--- a/test/CodeGen/X86/vec_shift5.ll
+++ b/test/CodeGen/X86/vec_shift5.ll
@@ -8,12 +8,12 @@
define <8 x i16> @test1() {
; X32-LABEL: test1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movaps {{.*#+}} xmm0 = [8,16,32,64,8,16,32,64]
; X32-NEXT: retl
;
; X64-LABEL: test1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps {{.*#+}} xmm0 = [8,16,32,64,8,16,32,64]
; X64-NEXT: retq
%1 = tail call <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16> <i16 1, i16 2, i16 4, i16 8, i16 1, i16 2, i16 4, i16 8>, i32 3)
@@ -22,12 +22,12 @@ define <8 x i16> @test1() {
define <8 x i16> @test2() {
; X32-LABEL: test2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movaps {{.*#+}} xmm0 = [0,1,2,4,0,1,2,4]
; X32-NEXT: retl
;
; X64-LABEL: test2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps {{.*#+}} xmm0 = [0,1,2,4,0,1,2,4]
; X64-NEXT: retq
%1 = tail call <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16> <i16 4, i16 8, i16 16, i16 32, i16 4, i16 8, i16 16, i16 32>, i32 3)
@@ -36,12 +36,12 @@ define <8 x i16> @test2() {
define <8 x i16> @test3() {
; X32-LABEL: test3:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movaps {{.*#+}} xmm0 = [0,1,2,4,0,1,2,4]
; X32-NEXT: retl
;
; X64-LABEL: test3:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps {{.*#+}} xmm0 = [0,1,2,4,0,1,2,4]
; X64-NEXT: retq
%1 = tail call <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16> <i16 4, i16 8, i16 16, i16 32, i16 4, i16 8, i16 16, i16 32>, i32 3)
@@ -50,12 +50,12 @@ define <8 x i16> @test3() {
define <4 x i32> @test4() {
; X32-LABEL: test4:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movaps {{.*#+}} xmm0 = [8,16,32,64]
; X32-NEXT: retl
;
; X64-LABEL: test4:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps {{.*#+}} xmm0 = [8,16,32,64]
; X64-NEXT: retq
%1 = tail call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> <i32 1, i32 2, i32 4, i32 8>, i32 3)
@@ -64,12 +64,12 @@ define <4 x i32> @test4() {
define <4 x i32> @test5() {
; X32-LABEL: test5:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movaps {{.*#+}} xmm0 = [0,1,2,4]
; X32-NEXT: retl
;
; X64-LABEL: test5:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps {{.*#+}} xmm0 = [0,1,2,4]
; X64-NEXT: retq
%1 = tail call <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32> <i32 4, i32 8, i32 16, i32 32>, i32 3)
@@ -78,12 +78,12 @@ define <4 x i32> @test5() {
define <4 x i32> @test6() {
; X32-LABEL: test6:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movaps {{.*#+}} xmm0 = [0,1,2,4]
; X32-NEXT: retl
;
; X64-LABEL: test6:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps {{.*#+}} xmm0 = [0,1,2,4]
; X64-NEXT: retq
%1 = tail call <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32> <i32 4, i32 8, i32 16, i32 32>, i32 3)
@@ -92,12 +92,12 @@ define <4 x i32> @test6() {
define <2 x i64> @test7() {
; X32-LABEL: test7:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movaps {{.*#+}} xmm0 = [8,0,16,0]
; X32-NEXT: retl
;
; X64-LABEL: test7:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps {{.*#+}} xmm0 = [8,16]
; X64-NEXT: retq
%1 = tail call <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64> <i64 1, i64 2>, i32 3)
@@ -106,12 +106,12 @@ define <2 x i64> @test7() {
define <2 x i64> @test8() {
; X32-LABEL: test8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movaps {{.*#+}} xmm0 = [1,0,2,0]
; X32-NEXT: retl
;
; X64-LABEL: test8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps {{.*#+}} xmm0 = [1,2]
; X64-NEXT: retq
%1 = tail call <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64> <i64 8, i64 16>, i32 3)
@@ -120,12 +120,12 @@ define <2 x i64> @test8() {
define <8 x i16> @test9() {
; X32-LABEL: test9:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movaps {{.*#+}} xmm0 = <1,1,u,u,3,u,8,16>
; X32-NEXT: retl
;
; X64-LABEL: test9:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps {{.*#+}} xmm0 = <1,1,u,u,3,u,8,16>
; X64-NEXT: retq
%1 = tail call <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16> <i16 15, i16 8, i16 undef, i16 undef, i16 31, i16 undef, i16 64, i16 128>, i32 3)
@@ -134,12 +134,12 @@ define <8 x i16> @test9() {
define <4 x i32> @test10() {
; X32-LABEL: test10:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movaps {{.*#+}} xmm0 = <u,1,u,4>
; X32-NEXT: retl
;
; X64-LABEL: test10:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps {{.*#+}} xmm0 = <u,1,u,4>
; X64-NEXT: retq
%1 = tail call <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32> <i32 undef, i32 8, i32 undef, i32 32>, i32 3)
@@ -148,12 +148,12 @@ define <4 x i32> @test10() {
define <2 x i64> @test11() {
; X32-LABEL: test11:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movaps {{.*#+}} xmm0 = <u,u,3,0>
; X32-NEXT: retl
;
; X64-LABEL: test11:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps {{.*#+}} xmm0 = <u,3>
; X64-NEXT: retq
%1 = tail call <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64> <i64 undef, i64 31>, i32 3)
@@ -162,12 +162,12 @@ define <2 x i64> @test11() {
define <8 x i16> @test12() {
; X32-LABEL: test12:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movaps {{.*#+}} xmm0 = <1,1,u,u,3,u,8,16>
; X32-NEXT: retl
;
; X64-LABEL: test12:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps {{.*#+}} xmm0 = <1,1,u,u,3,u,8,16>
; X64-NEXT: retq
%1 = tail call <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16> <i16 15, i16 8, i16 undef, i16 undef, i16 31, i16 undef, i16 64, i16 128>, i32 3)
@@ -176,12 +176,12 @@ define <8 x i16> @test12() {
define <4 x i32> @test13() {
; X32-LABEL: test13:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movaps {{.*#+}} xmm0 = <u,1,u,4>
; X32-NEXT: retl
;
; X64-LABEL: test13:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps {{.*#+}} xmm0 = <u,1,u,4>
; X64-NEXT: retq
%1 = tail call <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32> <i32 undef, i32 8, i32 undef, i32 32>, i32 3)
@@ -190,12 +190,12 @@ define <4 x i32> @test13() {
define <8 x i16> @test14() {
; X32-LABEL: test14:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movaps {{.*#+}} xmm0 = <1,1,u,u,3,u,8,16>
; X32-NEXT: retl
;
; X64-LABEL: test14:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps {{.*#+}} xmm0 = <1,1,u,u,3,u,8,16>
; X64-NEXT: retq
%1 = tail call <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16> <i16 15, i16 8, i16 undef, i16 undef, i16 31, i16 undef, i16 64, i16 128>, i32 3)
@@ -204,12 +204,12 @@ define <8 x i16> @test14() {
define <4 x i32> @test15() {
; X32-LABEL: test15:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movaps {{.*#+}} xmm0 = <u,64,u,256>
; X32-NEXT: retl
;
; X64-LABEL: test15:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps {{.*#+}} xmm0 = <u,64,u,256>
; X64-NEXT: retq
%1 = tail call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> <i32 undef, i32 8, i32 undef, i32 32>, i32 3)
@@ -218,12 +218,12 @@ define <4 x i32> @test15() {
define <2 x i64> @test16() {
; X32-LABEL: test16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movaps {{.*#+}} xmm0 = <u,u,248,0>
; X32-NEXT: retl
;
; X64-LABEL: test16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps {{.*#+}} xmm0 = <u,248>
; X64-NEXT: retq
%1 = tail call <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64> <i64 undef, i64 31>, i32 3)
diff --git a/test/CodeGen/X86/vec_shift6.ll b/test/CodeGen/X86/vec_shift6.ll
index 731760a4ea5..db8ef0b213c 100644
--- a/test/CodeGen/X86/vec_shift6.ll
+++ b/test/CodeGen/X86/vec_shift6.ll
@@ -9,17 +9,17 @@
define <8 x i16> @test1(<8 x i16> %a) {
; SSE-LABEL: test1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmullw {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX2-LABEL: test1:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: retq
%shl = shl <8 x i16> %a, <i16 1, i16 1, i16 2, i16 3, i16 7, i16 0, i16 9, i16 11>
@@ -28,17 +28,17 @@ define <8 x i16> @test1(<8 x i16> %a) {
define <8 x i16> @test2(<8 x i16> %a) {
; SSE-LABEL: test2:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmullw {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX2-LABEL: test2:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test2:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: retq
%shl = shl <8 x i16> %a, <i16 0, i16 undef, i16 0, i16 0, i16 1, i16 undef, i16 -1, i16 1>
@@ -51,17 +51,17 @@ define <8 x i16> @test2(<8 x i16> %a) {
define <4 x i32> @test3(<4 x i32> %a) {
; SSE-LABEL: test3:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmulld {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX2-LABEL: test3:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test3:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: retq
%shl = shl <4 x i32> %a, <i32 1, i32 -1, i32 2, i32 -3>
@@ -70,17 +70,17 @@ define <4 x i32> @test3(<4 x i32> %a) {
define <4 x i32> @test4(<4 x i32> %a) {
; SSE-LABEL: test4:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmulld {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX2-LABEL: test4:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test4:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: retq
%shl = shl <4 x i32> %a, <i32 0, i32 0, i32 1, i32 1>
@@ -93,19 +93,19 @@ define <4 x i32> @test4(<4 x i32> %a) {
define <16 x i16> @test5(<16 x i16> %a) {
; SSE-LABEL: test5:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [2,2,4,8,128,1,512,2048]
; SSE-NEXT: pmullw %xmm2, %xmm0
; SSE-NEXT: pmullw %xmm2, %xmm1
; SSE-NEXT: retq
;
; AVX2-LABEL: test5:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test5:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
; AVX512-NEXT: retq
%shl = shl <16 x i16> %a, <i16 1, i16 1, i16 2, i16 3, i16 7, i16 0, i16 9, i16 11, i16 1, i16 1, i16 2, i16 3, i16 7, i16 0, i16 9, i16 11>
@@ -118,19 +118,19 @@ define <16 x i16> @test5(<16 x i16> %a) {
define <8 x i32> @test6(<8 x i32> %a) {
; SSE-LABEL: test6:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [2,2,4,8]
; SSE-NEXT: pmulld %xmm2, %xmm0
; SSE-NEXT: pmulld %xmm2, %xmm1
; SSE-NEXT: retq
;
; AVX2-LABEL: test6:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsllvd {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test6:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsllvd {{.*}}(%rip), %ymm0, %ymm0
; AVX512-NEXT: retq
%shl = shl <8 x i32> %a, <i32 1, i32 1, i32 2, i32 3, i32 1, i32 1, i32 2, i32 3>
@@ -143,7 +143,7 @@ define <8 x i32> @test6(<8 x i32> %a) {
define <32 x i16> @test7(<32 x i16> %a) {
; SSE-LABEL: test7:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm4 = [2,2,4,8,128,1,512,2048]
; SSE-NEXT: pmullw %xmm4, %xmm0
; SSE-NEXT: pmullw %xmm4, %xmm1
@@ -152,7 +152,7 @@ define <32 x i16> @test7(<32 x i16> %a) {
; SSE-NEXT: retq
;
; AVX2-LABEL: test7:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [2,2,4,8,128,1,512,2048,2,2,4,8,128,1,512,2048]
; AVX2-NEXT: # ymm2 = mem[0,1,0,1]
; AVX2-NEXT: vpmullw %ymm2, %ymm0, %ymm0
@@ -160,7 +160,7 @@ define <32 x i16> @test7(<32 x i16> %a) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: test7:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [2,2,4,8,128,1,512,2048,2,2,4,8,128,1,512,2048]
; AVX512-NEXT: # ymm2 = mem[0,1,0,1]
; AVX512-NEXT: vpmullw %ymm2, %ymm0, %ymm0
@@ -175,7 +175,7 @@ define <32 x i16> @test7(<32 x i16> %a) {
define <16 x i32> @test8(<16 x i32> %a) {
; SSE-LABEL: test8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm4 = [2,2,4,8]
; SSE-NEXT: pmulld %xmm4, %xmm0
; SSE-NEXT: pmulld %xmm4, %xmm1
@@ -184,7 +184,7 @@ define <16 x i32> @test8(<16 x i32> %a) {
; SSE-NEXT: retq
;
; AVX2-LABEL: test8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [1,1,2,3,1,1,2,3]
; AVX2-NEXT: # ymm2 = mem[0,1,0,1]
; AVX2-NEXT: vpsllvd %ymm2, %ymm0, %ymm0
@@ -192,7 +192,7 @@ define <16 x i32> @test8(<16 x i32> %a) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: test8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsllvd {{.*}}(%rip), %zmm0, %zmm0
; AVX512-NEXT: retq
%shl = shl <16 x i32> %a, <i32 1, i32 1, i32 2, i32 3, i32 1, i32 1, i32 2, i32 3, i32 1, i32 1, i32 2, i32 3, i32 1, i32 1, i32 2, i32 3>
@@ -203,7 +203,7 @@ define <16 x i32> @test8(<16 x i32> %a) {
define <8 x i64> @test9(<8 x i64> %a) {
; SSE-LABEL: test9:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm1, %xmm4
; SSE-NEXT: psllq $3, %xmm4
; SSE-NEXT: psllq $2, %xmm1
@@ -217,14 +217,14 @@ define <8 x i64> @test9(<8 x i64> %a) {
; SSE-NEXT: retq
;
; AVX2-LABEL: test9:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [1,1,2,3]
; AVX2-NEXT: vpsllvq %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpsllvq %ymm2, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: test9:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsllvq {{.*}}(%rip), %zmm0, %zmm0
; AVX512-NEXT: retq
%shl = shl <8 x i64> %a, <i64 1, i64 1, i64 2, i64 3, i64 1, i64 1, i64 2, i64 3>
diff --git a/test/CodeGen/X86/vec_shift7.ll b/test/CodeGen/X86/vec_shift7.ll
index c13299b9cb3..1624ae7346c 100644
--- a/test/CodeGen/X86/vec_shift7.ll
+++ b/test/CodeGen/X86/vec_shift7.ll
@@ -6,7 +6,7 @@
define i64 @test1(<2 x i64> %a) {
; X32-LABEL: test1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movdqa %xmm0, %xmm1
; X32-NEXT: psllq $2, %xmm1
; X32-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
@@ -16,7 +16,7 @@ define i64 @test1(<2 x i64> %a) {
; X32-NEXT: retl
;
; X64-LABEL: test1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq %xmm0, %rax
; X64-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/vec_ss_load_fold.ll b/test/CodeGen/X86/vec_ss_load_fold.ll
index f861391f166..ef8afbe934e 100644
--- a/test/CodeGen/X86/vec_ss_load_fold.ll
+++ b/test/CodeGen/X86/vec_ss_load_fold.ll
@@ -8,7 +8,7 @@
define i16 @test1(float %f) nounwind {
; X32-LABEL: test1:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: addss LCPI0_0, %xmm0
; X32-NEXT: mulss LCPI0_1, %xmm0
@@ -21,7 +21,7 @@ define i16 @test1(float %f) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test1:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: addss {{.*}}(%rip), %xmm0
; X64-NEXT: mulss {{.*}}(%rip), %xmm0
; X64-NEXT: xorps %xmm1, %xmm1
@@ -33,7 +33,7 @@ define i16 @test1(float %f) nounwind {
; X64-NEXT: retq
;
; X32_AVX1-LABEL: test1:
-; X32_AVX1: ## BB#0:
+; X32_AVX1: ## %bb.0:
; X32_AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32_AVX1-NEXT: vaddss LCPI0_0, %xmm0, %xmm0
; X32_AVX1-NEXT: vmulss LCPI0_1, %xmm0, %xmm0
@@ -46,7 +46,7 @@ define i16 @test1(float %f) nounwind {
; X32_AVX1-NEXT: retl
;
; X64_AVX1-LABEL: test1:
-; X64_AVX1: ## BB#0:
+; X64_AVX1: ## %bb.0:
; X64_AVX1-NEXT: vaddss {{.*}}(%rip), %xmm0, %xmm0
; X64_AVX1-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
; X64_AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
@@ -58,7 +58,7 @@ define i16 @test1(float %f) nounwind {
; X64_AVX1-NEXT: retq
;
; X32_AVX512-LABEL: test1:
-; X32_AVX512: ## BB#0:
+; X32_AVX512: ## %bb.0:
; X32_AVX512-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32_AVX512-NEXT: vaddss LCPI0_0, %xmm0, %xmm0
; X32_AVX512-NEXT: vmulss LCPI0_1, %xmm0, %xmm0
@@ -71,7 +71,7 @@ define i16 @test1(float %f) nounwind {
; X32_AVX512-NEXT: retl
;
; X64_AVX512-LABEL: test1:
-; X64_AVX512: ## BB#0:
+; X64_AVX512: ## %bb.0:
; X64_AVX512-NEXT: vaddss {{.*}}(%rip), %xmm0, %xmm0
; X64_AVX512-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
; X64_AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1
@@ -96,7 +96,7 @@ define i16 @test1(float %f) nounwind {
define i16 @test2(float %f) nounwind {
; X32-LABEL: test2:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: addss LCPI1_0, %xmm0
; X32-NEXT: mulss LCPI1_1, %xmm0
@@ -108,7 +108,7 @@ define i16 @test2(float %f) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test2:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: addss {{.*}}(%rip), %xmm0
; X64-NEXT: mulss {{.*}}(%rip), %xmm0
; X64-NEXT: minss {{.*}}(%rip), %xmm0
@@ -119,7 +119,7 @@ define i16 @test2(float %f) nounwind {
; X64-NEXT: retq
;
; X32_AVX-LABEL: test2:
-; X32_AVX: ## BB#0:
+; X32_AVX: ## %bb.0:
; X32_AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32_AVX-NEXT: vaddss LCPI1_0, %xmm0, %xmm0
; X32_AVX-NEXT: vmulss LCPI1_1, %xmm0, %xmm0
@@ -131,7 +131,7 @@ define i16 @test2(float %f) nounwind {
; X32_AVX-NEXT: retl
;
; X64_AVX-LABEL: test2:
-; X64_AVX: ## BB#0:
+; X64_AVX: ## %bb.0:
; X64_AVX-NEXT: vaddss {{.*}}(%rip), %xmm0, %xmm0
; X64_AVX-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
; X64_AVX-NEXT: vminss {{.*}}(%rip), %xmm0, %xmm0
@@ -166,35 +166,35 @@ declare <4 x float> @f()
define <4 x float> @test3(<4 x float> %A, float *%b, i32 %C) nounwind {
; X32-LABEL: test3:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: roundss $4, (%eax), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test3:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: roundss $4, (%rdi), %xmm0
; X64-NEXT: retq
;
; X32_AVX1-LABEL: test3:
-; X32_AVX1: ## BB#0:
+; X32_AVX1: ## %bb.0:
; X32_AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32_AVX1-NEXT: vroundss $4, (%eax), %xmm0, %xmm0
; X32_AVX1-NEXT: retl
;
; X64_AVX1-LABEL: test3:
-; X64_AVX1: ## BB#0:
+; X64_AVX1: ## %bb.0:
; X64_AVX1-NEXT: vroundss $4, (%rdi), %xmm0, %xmm0
; X64_AVX1-NEXT: retq
;
; X32_AVX512-LABEL: test3:
-; X32_AVX512: ## BB#0:
+; X32_AVX512: ## %bb.0:
; X32_AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32_AVX512-NEXT: vrndscaless $4, (%eax), %xmm0, %xmm0
; X32_AVX512-NEXT: retl
;
; X64_AVX512-LABEL: test3:
-; X64_AVX512: ## BB#0:
+; X64_AVX512: ## %bb.0:
; X64_AVX512-NEXT: vrndscaless $4, (%rdi), %xmm0, %xmm0
; X64_AVX512-NEXT: retq
%a = load float , float *%b
@@ -205,7 +205,7 @@ define <4 x float> @test3(<4 x float> %A, float *%b, i32 %C) nounwind {
define <4 x float> @test4(<4 x float> %A, float *%b, i32 %C) nounwind {
; X32-LABEL: test4:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: subl $28, %esp
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -216,7 +216,7 @@ define <4 x float> @test4(<4 x float> %A, float *%b, i32 %C) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test4:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: subq $24, %rsp
; X64-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
@@ -226,7 +226,7 @@ define <4 x float> @test4(<4 x float> %A, float *%b, i32 %C) nounwind {
; X64-NEXT: retq
;
; X32_AVX1-LABEL: test4:
-; X32_AVX1: ## BB#0:
+; X32_AVX1: ## %bb.0:
; X32_AVX1-NEXT: subl $28, %esp
; X32_AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32_AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -237,7 +237,7 @@ define <4 x float> @test4(<4 x float> %A, float *%b, i32 %C) nounwind {
; X32_AVX1-NEXT: retl
;
; X64_AVX1-LABEL: test4:
-; X64_AVX1: ## BB#0:
+; X64_AVX1: ## %bb.0:
; X64_AVX1-NEXT: subq $24, %rsp
; X64_AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64_AVX1-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
@@ -247,7 +247,7 @@ define <4 x float> @test4(<4 x float> %A, float *%b, i32 %C) nounwind {
; X64_AVX1-NEXT: retq
;
; X32_AVX512-LABEL: test4:
-; X32_AVX512: ## BB#0:
+; X32_AVX512: ## %bb.0:
; X32_AVX512-NEXT: subl $28, %esp
; X32_AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32_AVX512-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -259,7 +259,7 @@ define <4 x float> @test4(<4 x float> %A, float *%b, i32 %C) nounwind {
; X32_AVX512-NEXT: retl
;
; X64_AVX512-LABEL: test4:
-; X64_AVX512: ## BB#0:
+; X64_AVX512: ## %bb.0:
; X64_AVX512-NEXT: subq $24, %rsp
; X64_AVX512-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64_AVX512-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
@@ -278,28 +278,28 @@ define <4 x float> @test4(<4 x float> %A, float *%b, i32 %C) nounwind {
; PR13576
define <2 x double> @test5() nounwind uwtable readnone noinline {
; X32-LABEL: test5:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movaps {{.*#+}} xmm0 = [4.569870e+02,1.233210e+02]
; X32-NEXT: movl $128, %eax
; X32-NEXT: cvtsi2sdl %eax, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test5:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: movaps {{.*#+}} xmm0 = [4.569870e+02,1.233210e+02]
; X64-NEXT: movl $128, %eax
; X64-NEXT: cvtsi2sdl %eax, %xmm0
; X64-NEXT: retq
;
; X32_AVX-LABEL: test5:
-; X32_AVX: ## BB#0: ## %entry
+; X32_AVX: ## %bb.0: ## %entry
; X32_AVX-NEXT: vmovaps {{.*#+}} xmm0 = [4.569870e+02,1.233210e+02]
; X32_AVX-NEXT: movl $128, %eax
; X32_AVX-NEXT: vcvtsi2sdl %eax, %xmm0, %xmm0
; X32_AVX-NEXT: retl
;
; X64_AVX-LABEL: test5:
-; X64_AVX: ## BB#0: ## %entry
+; X64_AVX: ## %bb.0: ## %entry
; X64_AVX-NEXT: vmovaps {{.*#+}} xmm0 = [4.569870e+02,1.233210e+02]
; X64_AVX-NEXT: movl $128, %eax
; X64_AVX-NEXT: vcvtsi2sdl %eax, %xmm0, %xmm0
@@ -313,24 +313,24 @@ declare <2 x double> @llvm.x86.sse2.cvtsi2sd(<2 x double>, i32) nounwind readnon
define <4 x float> @minss_fold(float* %x, <4 x float> %y) {
; X32-LABEL: minss_fold:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: minss (%eax), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: minss_fold:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: minss (%rdi), %xmm0
; X64-NEXT: retq
;
; X32_AVX-LABEL: minss_fold:
-; X32_AVX: ## BB#0: ## %entry
+; X32_AVX: ## %bb.0: ## %entry
; X32_AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32_AVX-NEXT: vminss (%eax), %xmm0, %xmm0
; X32_AVX-NEXT: retl
;
; X64_AVX-LABEL: minss_fold:
-; X64_AVX: ## BB#0: ## %entry
+; X64_AVX: ## %bb.0: ## %entry
; X64_AVX-NEXT: vminss (%rdi), %xmm0, %xmm0
; X64_AVX-NEXT: retq
entry:
@@ -345,24 +345,24 @@ entry:
define <4 x float> @maxss_fold(float* %x, <4 x float> %y) {
; X32-LABEL: maxss_fold:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: maxss (%eax), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: maxss_fold:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: maxss (%rdi), %xmm0
; X64-NEXT: retq
;
; X32_AVX-LABEL: maxss_fold:
-; X32_AVX: ## BB#0: ## %entry
+; X32_AVX: ## %bb.0: ## %entry
; X32_AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32_AVX-NEXT: vmaxss (%eax), %xmm0, %xmm0
; X32_AVX-NEXT: retl
;
; X64_AVX-LABEL: maxss_fold:
-; X64_AVX: ## BB#0: ## %entry
+; X64_AVX: ## %bb.0: ## %entry
; X64_AVX-NEXT: vmaxss (%rdi), %xmm0, %xmm0
; X64_AVX-NEXT: retq
entry:
@@ -377,24 +377,24 @@ entry:
define <4 x float> @cmpss_fold(float* %x, <4 x float> %y) {
; X32-LABEL: cmpss_fold:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: cmpeqss (%eax), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: cmpss_fold:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: cmpeqss (%rdi), %xmm0
; X64-NEXT: retq
;
; X32_AVX-LABEL: cmpss_fold:
-; X32_AVX: ## BB#0: ## %entry
+; X32_AVX: ## %bb.0: ## %entry
; X32_AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32_AVX-NEXT: vcmpeqss (%eax), %xmm0, %xmm0
; X32_AVX-NEXT: retl
;
; X64_AVX-LABEL: cmpss_fold:
-; X64_AVX: ## BB#0: ## %entry
+; X64_AVX: ## %bb.0: ## %entry
; X64_AVX-NEXT: vcmpeqss (%rdi), %xmm0, %xmm0
; X64_AVX-NEXT: retq
entry:
@@ -411,7 +411,7 @@ declare <4 x float> @llvm.x86.sse.cmp.ss(<4 x float>, <4 x float>, i8) nounwind
define <4 x float> @double_fold(float* %x, <4 x float> %y) {
; X32-LABEL: double_fold:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: movaps %xmm0, %xmm2
@@ -421,7 +421,7 @@ define <4 x float> @double_fold(float* %x, <4 x float> %y) {
; X32-NEXT: retl
;
; X64-LABEL: double_fold:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X64-NEXT: movaps %xmm0, %xmm2
; X64-NEXT: minss %xmm1, %xmm2
@@ -430,7 +430,7 @@ define <4 x float> @double_fold(float* %x, <4 x float> %y) {
; X64-NEXT: retq
;
; X32_AVX-LABEL: double_fold:
-; X32_AVX: ## BB#0: ## %entry
+; X32_AVX: ## %bb.0: ## %entry
; X32_AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32_AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32_AVX-NEXT: vminss %xmm1, %xmm0, %xmm2
@@ -439,7 +439,7 @@ define <4 x float> @double_fold(float* %x, <4 x float> %y) {
; X32_AVX-NEXT: retl
;
; X64_AVX-LABEL: double_fold:
-; X64_AVX: ## BB#0: ## %entry
+; X64_AVX: ## %bb.0: ## %entry
; X64_AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X64_AVX-NEXT: vminss %xmm1, %xmm0, %xmm2
; X64_AVX-NEXT: vmaxss %xmm1, %xmm0, %xmm0
diff --git a/test/CodeGen/X86/vec_trunc_sext.ll b/test/CodeGen/X86/vec_trunc_sext.ll
index 66af87c7818..1e2de8e20c3 100644
--- a/test/CodeGen/X86/vec_trunc_sext.ll
+++ b/test/CodeGen/X86/vec_trunc_sext.ll
@@ -10,7 +10,7 @@
define <4 x i32> @trunc_sext(<4 x i16>* %in) {
; NO_SSE_41-LABEL: trunc_sext:
-; NO_SSE_41: # BB#0:
+; NO_SSE_41: # %bb.0:
; NO_SSE_41-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; NO_SSE_41-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; NO_SSE_41-NEXT: pslld $24, %xmm0
@@ -18,7 +18,7 @@ define <4 x i32> @trunc_sext(<4 x i16>* %in) {
; NO_SSE_41-NEXT: retq
;
; SSE_41-LABEL: trunc_sext:
-; SSE_41: # BB#0:
+; SSE_41: # %bb.0:
; SSE_41-NEXT: pmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; SSE_41-NEXT: pslld $24, %xmm0
; SSE_41-NEXT: psrad $24, %xmm0
diff --git a/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll b/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll
index 51f5b41051a..bf468995307 100644
--- a/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll
+++ b/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll
@@ -28,7 +28,7 @@
define <4 x float> @test_uitofp_v4i32_to_v4f32(<4 x i32> %arg) {
; SSE2-LABEL: test_uitofp_v4i32_to_v4f32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps {{.*#+}} xmm1 = [65535,65535,65535,65535]
; SSE2-NEXT: andps %xmm0, %xmm1
; SSE2-NEXT: cvtdq2ps %xmm1, %xmm1
@@ -39,7 +39,7 @@ define <4 x float> @test_uitofp_v4i32_to_v4f32(<4 x i32> %arg) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_uitofp_v4i32_to_v4f32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
; SSE41-NEXT: cvtdq2ps %xmm1, %xmm1
@@ -50,7 +50,7 @@ define <4 x float> @test_uitofp_v4i32_to_v4f32(<4 x i32> %arg) {
; SSE41-NEXT: retq
;
; AVX-LABEL: test_uitofp_v4i32_to_v4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
; AVX-NEXT: vcvtdq2ps %xmm1, %xmm1
@@ -61,7 +61,7 @@ define <4 x float> @test_uitofp_v4i32_to_v4f32(<4 x i32> %arg) {
; AVX-NEXT: retq
;
; AVX2-LABEL: test_uitofp_v4i32_to_v4f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX2-NEXT: vcvtdq2ps %xmm1, %xmm1
; AVX2-NEXT: vbroadcastss [[FPMASKCSTADDR]](%rip), %xmm2
@@ -73,7 +73,7 @@ define <4 x float> @test_uitofp_v4i32_to_v4f32(<4 x i32> %arg) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_uitofp_v4i32_to_v4f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill
; AVX512F-NEXT: vcvtudq2ps %zmm0, %zmm0
; AVX512F-NEXT: # kill
@@ -81,7 +81,7 @@ define <4 x float> @test_uitofp_v4i32_to_v4f32(<4 x i32> %arg) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: test_uitofp_v4i32_to_v4f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvtudq2ps %xmm0, %xmm0
; AVX512VL-NEXT: retq
%tmp = uitofp <4 x i32> %arg to <4 x float>
@@ -105,7 +105,7 @@ define <4 x float> @test_uitofp_v4i32_to_v4f32(<4 x i32> %arg) {
define <8 x float> @test_uitofp_v8i32_to_v8f32(<8 x i32> %arg) {
; SSE2-LABEL: test_uitofp_v8i32_to_v8f32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: psrld $16, %xmm2
; SSE2-NEXT: cvtdq2ps %xmm2, %xmm2
@@ -125,7 +125,7 @@ define <8 x float> @test_uitofp_v8i32_to_v8f32(<8 x i32> %arg) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_uitofp_v8i32_to_v8f32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: psrld $16, %xmm2
; SSE41-NEXT: cvtdq2ps %xmm2, %xmm2
@@ -145,7 +145,7 @@ define <8 x float> @test_uitofp_v8i32_to_v8f32(<8 x i32> %arg) {
; SSE41-NEXT: retq
;
; AVX-LABEL: test_uitofp_v8i32_to_v8f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX-NEXT: vpsrld $16, %xmm2, %xmm2
@@ -158,7 +158,7 @@ define <8 x float> @test_uitofp_v8i32_to_v8f32(<8 x i32> %arg) {
; AVX-NEXT: retq
;
; AVX2-LABEL: test_uitofp_v8i32_to_v8f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsrld $16, %ymm0, %ymm1
; AVX2-NEXT: vcvtdq2ps %ymm1, %ymm1
; AVX2-NEXT: vbroadcastss [[FPMASKCSTADDR_v8]](%rip), %ymm2
@@ -170,14 +170,14 @@ define <8 x float> @test_uitofp_v8i32_to_v8f32(<8 x i32> %arg) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_uitofp_v8i32_to_v8f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill
; AVX512F-NEXT: vcvtudq2ps %zmm0, %zmm0
; AVX512F-NEXT: # kill
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: test_uitofp_v8i32_to_v8f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvtudq2ps %ymm0, %ymm0
; AVX512VL-NEXT: retq
%tmp = uitofp <8 x i32> %arg to <8 x float>
diff --git a/test/CodeGen/X86/vec_unsafe-fp-math.ll b/test/CodeGen/X86/vec_unsafe-fp-math.ll
index 745316effc9..340177ec49a 100644
--- a/test/CodeGen/X86/vec_unsafe-fp-math.ll
+++ b/test/CodeGen/X86/vec_unsafe-fp-math.ll
@@ -6,7 +6,7 @@
; Subtracting zero is free.
define <4 x float> @vec_fsub_zero(<4 x float> %x) {
; CHECK-LABEL: vec_fsub_zero:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%sub = fsub <4 x float> %x, zeroinitializer
ret <4 x float> %sub
@@ -15,7 +15,7 @@ define <4 x float> @vec_fsub_zero(<4 x float> %x) {
; Negating doesn't require subtraction.
define <4 x float> @vec_fneg(<4 x float> %x) {
; CHECK-LABEL: vec_fneg:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorps {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
%sub = fsub <4 x float> zeroinitializer, %x
diff --git a/test/CodeGen/X86/vec_zero_cse.ll b/test/CodeGen/X86/vec_zero_cse.ll
index 75e85348ba8..b868cc01407 100644
--- a/test/CodeGen/X86/vec_zero_cse.ll
+++ b/test/CodeGen/X86/vec_zero_cse.ll
@@ -12,7 +12,7 @@
define void @test1() {
; X32-LABEL: test1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl $0, M1+4
; X32-NEXT: movl $0, M1
; X32-NEXT: xorps %xmm0, %xmm0
@@ -20,7 +20,7 @@ define void @test1() {
; X32-NEXT: retl
;
; X64-LABEL: test1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq $0, {{.*}}(%rip)
; X64-NEXT: movq $0, {{.*}}(%rip)
; X64-NEXT: retq
@@ -31,7 +31,7 @@ define void @test1() {
define void @test2() {
; X32-LABEL: test2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl $-1, M1+4
; X32-NEXT: movl $-1, M1
; X32-NEXT: pcmpeqd %xmm0, %xmm0
@@ -39,7 +39,7 @@ define void @test2() {
; X32-NEXT: retl
;
; X64-LABEL: test2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq $-1, {{.*}}(%rip)
; X64-NEXT: movq {{.*}}(%rip), %rax
; X64-NEXT: movq %rax, {{.*}}(%rip)
@@ -51,14 +51,14 @@ define void @test2() {
define void @test3() {
; X32-LABEL: test3:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorps %xmm0, %xmm0
; X32-NEXT: movaps %xmm0, S1
; X32-NEXT: movaps %xmm0, S2
; X32-NEXT: retl
;
; X64-LABEL: test3:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorps %xmm0, %xmm0
; X64-NEXT: movaps %xmm0, {{.*}}(%rip)
; X64-NEXT: movaps %xmm0, {{.*}}(%rip)
@@ -70,14 +70,14 @@ define void @test3() {
define void @test4() {
; X32-LABEL: test4:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pcmpeqd %xmm0, %xmm0
; X32-NEXT: movdqa %xmm0, S1
; X32-NEXT: movdqa %xmm0, S2
; X32-NEXT: retl
;
; X64-LABEL: test4:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pcmpeqd %xmm0, %xmm0
; X64-NEXT: movdqa %xmm0, {{.*}}(%rip)
; X64-NEXT: movdqa %xmm0, {{.*}}(%rip)
diff --git a/test/CodeGen/X86/vector-bitreverse.ll b/test/CodeGen/X86/vector-bitreverse.ll
index 646e5c3c306..0eb3a64311a 100644
--- a/test/CodeGen/X86/vector-bitreverse.ll
+++ b/test/CodeGen/X86/vector-bitreverse.ll
@@ -10,7 +10,7 @@
define i8 @test_bitreverse_i8(i8 %a) nounwind {
; SSE-LABEL: test_bitreverse_i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: rolb $4, %dil
; SSE-NEXT: movl %edi, %eax
; SSE-NEXT: andb $51, %al
@@ -28,7 +28,7 @@ define i8 @test_bitreverse_i8(i8 %a) nounwind {
; SSE-NEXT: retq
;
; AVX-LABEL: test_bitreverse_i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: rolb $4, %dil
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: andb $51, %al
@@ -46,7 +46,7 @@ define i8 @test_bitreverse_i8(i8 %a) nounwind {
; AVX-NEXT: retq
;
; XOP-LABEL: test_bitreverse_i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vmovd %edi, %xmm0
; XOP-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
; XOP-NEXT: vpextrb $0, %xmm0, %eax
@@ -58,7 +58,7 @@ define i8 @test_bitreverse_i8(i8 %a) nounwind {
define i16 @test_bitreverse_i16(i16 %a) nounwind {
; SSE-LABEL: test_bitreverse_i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SSE-NEXT: rolw $8, %di
; SSE-NEXT: movl %edi, %eax
@@ -81,7 +81,7 @@ define i16 @test_bitreverse_i16(i16 %a) nounwind {
; SSE-NEXT: retq
;
; AVX-LABEL: test_bitreverse_i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX-NEXT: rolw $8, %di
; AVX-NEXT: movl %edi, %eax
@@ -104,7 +104,7 @@ define i16 @test_bitreverse_i16(i16 %a) nounwind {
; AVX-NEXT: retq
;
; XOP-LABEL: test_bitreverse_i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vmovd %edi, %xmm0
; XOP-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
; XOP-NEXT: vmovd %xmm0, %eax
@@ -116,7 +116,7 @@ define i16 @test_bitreverse_i16(i16 %a) nounwind {
define i32 @test_bitreverse_i32(i32 %a) nounwind {
; SSE-LABEL: test_bitreverse_i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SSE-NEXT: bswapl %edi
; SSE-NEXT: movl %edi, %eax
@@ -138,7 +138,7 @@ define i32 @test_bitreverse_i32(i32 %a) nounwind {
; SSE-NEXT: retq
;
; AVX-LABEL: test_bitreverse_i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX-NEXT: bswapl %edi
; AVX-NEXT: movl %edi, %eax
@@ -160,7 +160,7 @@ define i32 @test_bitreverse_i32(i32 %a) nounwind {
; AVX-NEXT: retq
;
; XOP-LABEL: test_bitreverse_i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vmovd %edi, %xmm0
; XOP-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
; XOP-NEXT: vmovd %xmm0, %eax
@@ -171,7 +171,7 @@ define i32 @test_bitreverse_i32(i32 %a) nounwind {
define i64 @test_bitreverse_i64(i64 %a) nounwind {
; SSE-LABEL: test_bitreverse_i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: bswapq %rdi
; SSE-NEXT: movabsq $1085102592571150095, %rax # imm = 0xF0F0F0F0F0F0F0F
; SSE-NEXT: andq %rdi, %rax
@@ -195,7 +195,7 @@ define i64 @test_bitreverse_i64(i64 %a) nounwind {
; SSE-NEXT: retq
;
; AVX-LABEL: test_bitreverse_i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: bswapq %rdi
; AVX-NEXT: movabsq $1085102592571150095, %rax # imm = 0xF0F0F0F0F0F0F0F
; AVX-NEXT: andq %rdi, %rax
@@ -219,7 +219,7 @@ define i64 @test_bitreverse_i64(i64 %a) nounwind {
; AVX-NEXT: retq
;
; XOP-LABEL: test_bitreverse_i64:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vmovq %rdi, %xmm0
; XOP-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
; XOP-NEXT: vmovq %xmm0, %rax
@@ -230,7 +230,7 @@ define i64 @test_bitreverse_i64(i64 %a) nounwind {
define <16 x i8> @test_bitreverse_v16i8(<16 x i8> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v16i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pand %xmm1, %xmm2
@@ -259,7 +259,7 @@ define <16 x i8> @test_bitreverse_v16i8(<16 x i8> %a) nounwind {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_bitreverse_v16i8:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSSE3-NEXT: movdqa %xmm0, %xmm2
; SSSE3-NEXT: pand %xmm1, %xmm2
@@ -274,7 +274,7 @@ define <16 x i8> @test_bitreverse_v16i8(<16 x i8> %a) nounwind {
; SSSE3-NEXT: retq
;
; AVX-LABEL: test_bitreverse_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
@@ -287,7 +287,7 @@ define <16 x i8> @test_bitreverse_v16i8(<16 x i8> %a) nounwind {
; AVX-NEXT: retq
;
; XOP-LABEL: test_bitreverse_v16i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
; XOP-NEXT: retq
%b = call <16 x i8> @llvm.bitreverse.v16i8(<16 x i8> %a)
@@ -296,7 +296,7 @@ define <16 x i8> @test_bitreverse_v16i8(<16 x i8> %a) nounwind {
define <8 x i16> @test_bitreverse_v8i16(<8 x i16> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v8i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
@@ -334,7 +334,7 @@ define <8 x i16> @test_bitreverse_v8i16(<8 x i16> %a) nounwind {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_bitreverse_v8i16:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSSE3-NEXT: movdqa %xmm0, %xmm2
@@ -350,7 +350,7 @@ define <8 x i16> @test_bitreverse_v8i16(<8 x i16> %a) nounwind {
; SSSE3-NEXT: retq
;
; AVX-LABEL: test_bitreverse_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
@@ -364,7 +364,7 @@ define <8 x i16> @test_bitreverse_v8i16(<8 x i16> %a) nounwind {
; AVX-NEXT: retq
;
; XOP-LABEL: test_bitreverse_v8i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
; XOP-NEXT: retq
%b = call <8 x i16> @llvm.bitreverse.v8i16(<8 x i16> %a)
@@ -373,7 +373,7 @@ define <8 x i16> @test_bitreverse_v8i16(<8 x i16> %a) nounwind {
define <4 x i32> @test_bitreverse_v4i32(<4 x i32> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
@@ -411,7 +411,7 @@ define <4 x i32> @test_bitreverse_v4i32(<4 x i32> %a) nounwind {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_bitreverse_v4i32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSSE3-NEXT: movdqa %xmm0, %xmm2
@@ -427,7 +427,7 @@ define <4 x i32> @test_bitreverse_v4i32(<4 x i32> %a) nounwind {
; SSSE3-NEXT: retq
;
; AVX-LABEL: test_bitreverse_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
@@ -441,7 +441,7 @@ define <4 x i32> @test_bitreverse_v4i32(<4 x i32> %a) nounwind {
; AVX-NEXT: retq
;
; XOP-LABEL: test_bitreverse_v4i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
; XOP-NEXT: retq
%b = call <4 x i32> @llvm.bitreverse.v4i32(<4 x i32> %a)
@@ -450,7 +450,7 @@ define <4 x i32> @test_bitreverse_v4i32(<4 x i32> %a) nounwind {
define <2 x i64> @test_bitreverse_v2i64(<2 x i64> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
@@ -490,7 +490,7 @@ define <2 x i64> @test_bitreverse_v2i64(<2 x i64> %a) nounwind {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_bitreverse_v2i64:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8]
; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSSE3-NEXT: movdqa %xmm0, %xmm2
@@ -506,7 +506,7 @@ define <2 x i64> @test_bitreverse_v2i64(<2 x i64> %a) nounwind {
; SSSE3-NEXT: retq
;
; AVX-LABEL: test_bitreverse_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8]
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
@@ -520,7 +520,7 @@ define <2 x i64> @test_bitreverse_v2i64(<2 x i64> %a) nounwind {
; AVX-NEXT: retq
;
; XOP-LABEL: test_bitreverse_v2i64:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
; XOP-NEXT: retq
%b = call <2 x i64> @llvm.bitreverse.v2i64(<2 x i64> %a)
@@ -529,7 +529,7 @@ define <2 x i64> @test_bitreverse_v2i64(<2 x i64> %a) nounwind {
define <32 x i8> @test_bitreverse_v32i8(<32 x i8> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v32i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pand %xmm2, %xmm3
@@ -586,7 +586,7 @@ define <32 x i8> @test_bitreverse_v32i8(<32 x i8> %a) nounwind {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_bitreverse_v32i8:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSSE3-NEXT: movdqa %xmm0, %xmm2
; SSSE3-NEXT: pand %xmm4, %xmm2
@@ -611,7 +611,7 @@ define <32 x i8> @test_bitreverse_v32i8(<32 x i8> %a) nounwind {
; SSSE3-NEXT: retq
;
; AVX1-LABEL: test_bitreverse_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm3
@@ -632,7 +632,7 @@ define <32 x i8> @test_bitreverse_v32i8(<32 x i8> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_bitreverse_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
@@ -645,7 +645,7 @@ define <32 x i8> @test_bitreverse_v32i8(<32 x i8> %a) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_bitreverse_v32i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512-NEXT: vmovdqa {{.*#+}} ymm3 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
@@ -658,7 +658,7 @@ define <32 x i8> @test_bitreverse_v32i8(<32 x i8> %a) nounwind {
; AVX512-NEXT: retq
;
; XOPAVX1-LABEL: test_bitreverse_v32i8:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95]
; XOPAVX1-NEXT: vpperm %xmm2, %xmm1, %xmm0, %xmm1
@@ -667,7 +667,7 @@ define <32 x i8> @test_bitreverse_v32i8(<32 x i8> %a) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: test_bitreverse_v32i8:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95]
; XOPAVX2-NEXT: vpperm %xmm2, %xmm1, %xmm0, %xmm1
@@ -680,7 +680,7 @@ define <32 x i8> @test_bitreverse_v32i8(<32 x i8> %a) nounwind {
define <16 x i16> @test_bitreverse_v16i16(<16 x i16> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v16i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm4, %xmm4
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15]
@@ -754,7 +754,7 @@ define <16 x i16> @test_bitreverse_v16i16(<16 x i16> %a) nounwind {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_bitreverse_v16i16:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
; SSSE3-NEXT: pshufb %xmm4, %xmm0
; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
@@ -782,7 +782,7 @@ define <16 x i16> @test_bitreverse_v16i16(<16 x i16> %a) nounwind {
; SSSE3-NEXT: retq
;
; AVX1-LABEL: test_bitreverse_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -806,7 +806,7 @@ define <16 x i16> @test_bitreverse_v16i16(<16 x i16> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_bitreverse_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14,17,16,19,18,21,20,23,22,25,24,27,26,29,28,31,30]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
@@ -820,7 +820,7 @@ define <16 x i16> @test_bitreverse_v16i16(<16 x i16> %a) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_bitreverse_v16i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14,17,16,19,18,21,20,23,22,25,24,27,26,29,28,31,30]
; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm2
@@ -834,7 +834,7 @@ define <16 x i16> @test_bitreverse_v16i16(<16 x i16> %a) nounwind {
; AVX512-NEXT: retq
;
; XOPAVX1-LABEL: test_bitreverse_v16i16:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [81,80,83,82,85,84,87,86,89,88,91,90,93,92,95,94]
; XOPAVX1-NEXT: vpperm %xmm2, %xmm1, %xmm0, %xmm1
@@ -843,7 +843,7 @@ define <16 x i16> @test_bitreverse_v16i16(<16 x i16> %a) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: test_bitreverse_v16i16:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [81,80,83,82,85,84,87,86,89,88,91,90,93,92,95,94]
; XOPAVX2-NEXT: vpperm %xmm2, %xmm1, %xmm0, %xmm1
@@ -856,7 +856,7 @@ define <16 x i16> @test_bitreverse_v16i16(<16 x i16> %a) nounwind {
define <8 x i32> @test_bitreverse_v8i32(<8 x i32> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v8i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm4, %xmm4
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15]
@@ -930,7 +930,7 @@ define <8 x i32> @test_bitreverse_v8i32(<8 x i32> %a) nounwind {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_bitreverse_v8i32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
; SSSE3-NEXT: pshufb %xmm4, %xmm0
; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
@@ -958,7 +958,7 @@ define <8 x i32> @test_bitreverse_v8i32(<8 x i32> %a) nounwind {
; SSSE3-NEXT: retq
;
; AVX1-LABEL: test_bitreverse_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -982,7 +982,7 @@ define <8 x i32> @test_bitreverse_v8i32(<8 x i32> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_bitreverse_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12,19,18,17,16,23,22,21,20,27,26,25,24,31,30,29,28]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
@@ -996,7 +996,7 @@ define <8 x i32> @test_bitreverse_v8i32(<8 x i32> %a) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_bitreverse_v8i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12,19,18,17,16,23,22,21,20,27,26,25,24,31,30,29,28]
; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm2
@@ -1010,7 +1010,7 @@ define <8 x i32> @test_bitreverse_v8i32(<8 x i32> %a) nounwind {
; AVX512-NEXT: retq
;
; XOPAVX1-LABEL: test_bitreverse_v8i32:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [83,82,81,80,87,86,85,84,91,90,89,88,95,94,93,92]
; XOPAVX1-NEXT: vpperm %xmm2, %xmm1, %xmm0, %xmm1
@@ -1019,7 +1019,7 @@ define <8 x i32> @test_bitreverse_v8i32(<8 x i32> %a) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: test_bitreverse_v8i32:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [83,82,81,80,87,86,85,84,91,90,89,88,95,94,93,92]
; XOPAVX2-NEXT: vpperm %xmm2, %xmm1, %xmm0, %xmm1
@@ -1032,7 +1032,7 @@ define <8 x i32> @test_bitreverse_v8i32(<8 x i32> %a) nounwind {
define <4 x i64> @test_bitreverse_v4i64(<4 x i64> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v4i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm4, %xmm4
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15]
@@ -1110,7 +1110,7 @@ define <4 x i64> @test_bitreverse_v4i64(<4 x i64> %a) nounwind {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_bitreverse_v4i64:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8]
; SSSE3-NEXT: pshufb %xmm4, %xmm0
; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
@@ -1138,7 +1138,7 @@ define <4 x i64> @test_bitreverse_v4i64(<4 x i64> %a) nounwind {
; SSSE3-NEXT: retq
;
; AVX1-LABEL: test_bitreverse_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -1162,7 +1162,7 @@ define <4 x i64> @test_bitreverse_v4i64(<4 x i64> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_bitreverse_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,23,22,21,20,19,18,17,16,31,30,29,28,27,26,25,24]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
@@ -1176,7 +1176,7 @@ define <4 x i64> @test_bitreverse_v4i64(<4 x i64> %a) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_bitreverse_v4i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,23,22,21,20,19,18,17,16,31,30,29,28,27,26,25,24]
; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm2
@@ -1190,7 +1190,7 @@ define <4 x i64> @test_bitreverse_v4i64(<4 x i64> %a) nounwind {
; AVX512-NEXT: retq
;
; XOPAVX1-LABEL: test_bitreverse_v4i64:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [87,86,85,84,83,82,81,80,95,94,93,92,91,90,89,88]
; XOPAVX1-NEXT: vpperm %xmm2, %xmm1, %xmm0, %xmm1
@@ -1199,7 +1199,7 @@ define <4 x i64> @test_bitreverse_v4i64(<4 x i64> %a) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: test_bitreverse_v4i64:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [87,86,85,84,83,82,81,80,95,94,93,92,91,90,89,88]
; XOPAVX2-NEXT: vpperm %xmm2, %xmm1, %xmm0, %xmm1
@@ -1212,7 +1212,7 @@ define <4 x i64> @test_bitreverse_v4i64(<4 x i64> %a) nounwind {
define <64 x i8> @test_bitreverse_v64i8(<64 x i8> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v64i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm13 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE2-NEXT: movdqa %xmm0, %xmm5
; SSE2-NEXT: pand %xmm13, %xmm5
@@ -1315,7 +1315,7 @@ define <64 x i8> @test_bitreverse_v64i8(<64 x i8> %a) nounwind {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_bitreverse_v64i8:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa %xmm0, %xmm5
; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSSE3-NEXT: pand %xmm8, %xmm0
@@ -1359,7 +1359,7 @@ define <64 x i8> @test_bitreverse_v64i8(<64 x i8> %a) nounwind {
; SSSE3-NEXT: retq
;
; AVX1-LABEL: test_bitreverse_v64i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm4
@@ -1394,7 +1394,7 @@ define <64 x i8> @test_bitreverse_v64i8(<64 x i8> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_bitreverse_v64i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm3
; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
@@ -1413,7 +1413,7 @@ define <64 x i8> @test_bitreverse_v64i8(<64 x i8> %a) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_bitreverse_v64i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512F-NEXT: vpand %ymm2, %ymm0, %ymm3
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
@@ -1432,7 +1432,7 @@ define <64 x i8> @test_bitreverse_v64i8(<64 x i8> %a) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_bitreverse_v64i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
@@ -1445,7 +1445,7 @@ define <64 x i8> @test_bitreverse_v64i8(<64 x i8> %a) nounwind {
; AVX512BW-NEXT: retq
;
; XOPAVX1-LABEL: test_bitreverse_v64i8:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95]
; XOPAVX1-NEXT: vpperm %xmm3, %xmm2, %xmm0, %xmm2
@@ -1458,7 +1458,7 @@ define <64 x i8> @test_bitreverse_v64i8(<64 x i8> %a) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: test_bitreverse_v64i8:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95]
; XOPAVX2-NEXT: vpperm %xmm3, %xmm2, %xmm0, %xmm2
@@ -1475,7 +1475,7 @@ define <64 x i8> @test_bitreverse_v64i8(<64 x i8> %a) nounwind {
define <32 x i16> @test_bitreverse_v32i16(<32 x i16> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v32i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm14, %xmm14
; SSE2-NEXT: movdqa %xmm0, %xmm4
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm14[8],xmm4[9],xmm14[9],xmm4[10],xmm14[10],xmm4[11],xmm14[11],xmm4[12],xmm14[12],xmm4[13],xmm14[13],xmm4[14],xmm14[14],xmm4[15],xmm14[15]
@@ -1611,7 +1611,7 @@ define <32 x i16> @test_bitreverse_v32i16(<32 x i16> %a) nounwind {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_bitreverse_v32i16:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa %xmm1, %xmm5
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
@@ -1661,7 +1661,7 @@ define <32 x i16> @test_bitreverse_v32i16(<32 x i16> %a) nounwind {
; SSSE3-NEXT: retq
;
; AVX1-LABEL: test_bitreverse_v32i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2
@@ -1701,7 +1701,7 @@ define <32 x i16> @test_bitreverse_v32i16(<32 x i16> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_bitreverse_v32i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14,1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
; AVX2-NEXT: vpshufb %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
@@ -1723,7 +1723,7 @@ define <32 x i16> @test_bitreverse_v32i16(<32 x i16> %a) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_bitreverse_v32i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14,1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
; AVX512F-NEXT: vpshufb %ymm2, %ymm0, %ymm0
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
@@ -1745,7 +1745,7 @@ define <32 x i16> @test_bitreverse_v32i16(<32 x i16> %a) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_bitreverse_v32i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14,17,16,19,18,21,20,23,22,25,24,27,26,29,28,31,30,33,32,35,34,37,36,39,38,41,40,43,42,45,44,47,46,49,48,51,50,53,52,55,54,57,56,59,58,61,60,63,62]
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm2
@@ -1759,7 +1759,7 @@ define <32 x i16> @test_bitreverse_v32i16(<32 x i16> %a) nounwind {
; AVX512BW-NEXT: retq
;
; XOPAVX1-LABEL: test_bitreverse_v32i16:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [81,80,83,82,85,84,87,86,89,88,91,90,93,92,95,94]
; XOPAVX1-NEXT: vpperm %xmm3, %xmm2, %xmm0, %xmm2
@@ -1772,7 +1772,7 @@ define <32 x i16> @test_bitreverse_v32i16(<32 x i16> %a) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: test_bitreverse_v32i16:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [81,80,83,82,85,84,87,86,89,88,91,90,93,92,95,94]
; XOPAVX2-NEXT: vpperm %xmm3, %xmm2, %xmm0, %xmm2
@@ -1789,7 +1789,7 @@ define <32 x i16> @test_bitreverse_v32i16(<32 x i16> %a) nounwind {
define <16 x i32> @test_bitreverse_v16i32(<16 x i32> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v16i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm14, %xmm14
; SSE2-NEXT: movdqa %xmm0, %xmm4
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm14[8],xmm4[9],xmm14[9],xmm4[10],xmm14[10],xmm4[11],xmm14[11],xmm4[12],xmm14[12],xmm4[13],xmm14[13],xmm4[14],xmm14[14],xmm4[15],xmm14[15]
@@ -1925,7 +1925,7 @@ define <16 x i32> @test_bitreverse_v16i32(<16 x i32> %a) nounwind {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_bitreverse_v16i32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa %xmm1, %xmm5
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
@@ -1975,7 +1975,7 @@ define <16 x i32> @test_bitreverse_v16i32(<16 x i32> %a) nounwind {
; SSSE3-NEXT: retq
;
; AVX1-LABEL: test_bitreverse_v16i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2
@@ -2015,7 +2015,7 @@ define <16 x i32> @test_bitreverse_v16i32(<16 x i32> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_bitreverse_v16i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12,3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
; AVX2-NEXT: vpshufb %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
@@ -2037,7 +2037,7 @@ define <16 x i32> @test_bitreverse_v16i32(<16 x i32> %a) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_bitreverse_v16i32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsrld $24, %zmm0, %zmm1
; AVX512F-NEXT: vpsrld $8, %zmm0, %zmm2
; AVX512F-NEXT: vpandd {{.*}}(%rip){1to16}, %zmm2, %zmm2
@@ -2065,7 +2065,7 @@ define <16 x i32> @test_bitreverse_v16i32(<16 x i32> %a) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_bitreverse_v16i32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12,19,18,17,16,23,22,21,20,27,26,25,24,31,30,29,28,35,34,33,32,39,38,37,36,43,42,41,40,47,46,45,44,51,50,49,48,55,54,53,52,59,58,57,56,63,62,61,60]
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm2
@@ -2079,7 +2079,7 @@ define <16 x i32> @test_bitreverse_v16i32(<16 x i32> %a) nounwind {
; AVX512BW-NEXT: retq
;
; XOPAVX1-LABEL: test_bitreverse_v16i32:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [83,82,81,80,87,86,85,84,91,90,89,88,95,94,93,92]
; XOPAVX1-NEXT: vpperm %xmm3, %xmm2, %xmm0, %xmm2
@@ -2092,7 +2092,7 @@ define <16 x i32> @test_bitreverse_v16i32(<16 x i32> %a) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: test_bitreverse_v16i32:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [83,82,81,80,87,86,85,84,91,90,89,88,95,94,93,92]
; XOPAVX2-NEXT: vpperm %xmm3, %xmm2, %xmm0, %xmm2
@@ -2109,7 +2109,7 @@ define <16 x i32> @test_bitreverse_v16i32(<16 x i32> %a) nounwind {
define <8 x i64> @test_bitreverse_v8i64(<8 x i64> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v8i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm14, %xmm14
; SSE2-NEXT: movdqa %xmm0, %xmm4
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm14[8],xmm4[9],xmm14[9],xmm4[10],xmm14[10],xmm4[11],xmm14[11],xmm4[12],xmm14[12],xmm4[13],xmm14[13],xmm4[14],xmm14[14],xmm4[15],xmm14[15]
@@ -2253,7 +2253,7 @@ define <8 x i64> @test_bitreverse_v8i64(<8 x i64> %a) nounwind {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_bitreverse_v8i64:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa %xmm1, %xmm5
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8]
@@ -2303,7 +2303,7 @@ define <8 x i64> @test_bitreverse_v8i64(<8 x i64> %a) nounwind {
; SSSE3-NEXT: retq
;
; AVX1-LABEL: test_bitreverse_v8i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8]
; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2
@@ -2343,7 +2343,7 @@ define <8 x i64> @test_bitreverse_v8i64(<8 x i64> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_bitreverse_v8i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8]
; AVX2-NEXT: vpshufb %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
@@ -2365,7 +2365,7 @@ define <8 x i64> @test_bitreverse_v8i64(<8 x i64> %a) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_bitreverse_v8i64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsrlq $56, %zmm0, %zmm1
; AVX512F-NEXT: vpsrlq $40, %zmm0, %zmm2
; AVX512F-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm2, %zmm2
@@ -2405,7 +2405,7 @@ define <8 x i64> @test_bitreverse_v8i64(<8 x i64> %a) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_bitreverse_v8i64:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,23,22,21,20,19,18,17,16,31,30,29,28,27,26,25,24,39,38,37,36,35,34,33,32,47,46,45,44,43,42,41,40,55,54,53,52,51,50,49,48,63,62,61,60,59,58,57,56]
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm2
@@ -2419,7 +2419,7 @@ define <8 x i64> @test_bitreverse_v8i64(<8 x i64> %a) nounwind {
; AVX512BW-NEXT: retq
;
; XOPAVX1-LABEL: test_bitreverse_v8i64:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [87,86,85,84,83,82,81,80,95,94,93,92,91,90,89,88]
; XOPAVX1-NEXT: vpperm %xmm3, %xmm2, %xmm0, %xmm2
@@ -2432,7 +2432,7 @@ define <8 x i64> @test_bitreverse_v8i64(<8 x i64> %a) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: test_bitreverse_v8i64:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [87,86,85,84,83,82,81,80,95,94,93,92,91,90,89,88]
; XOPAVX2-NEXT: vpperm %xmm3, %xmm2, %xmm0, %xmm2
@@ -2453,7 +2453,7 @@ define <8 x i64> @test_bitreverse_v8i64(<8 x i64> %a) nounwind {
define i32 @fold_bitreverse_i32() nounwind {
; ALL-LABEL: fold_bitreverse_i32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movl $16711935, %eax # imm = 0xFF00FF
; ALL-NEXT: retq
%b = call i32 @llvm.bitreverse.i32(i32 4278255360)
@@ -2462,17 +2462,17 @@ define i32 @fold_bitreverse_i32() nounwind {
define <16 x i8> @fold_bitreverse_v16i8() nounwind {
; SSE-LABEL: fold_bitreverse_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [0,255,64,191,32,223,96,159,16,239,80,175,48,207,112,143]
; SSE-NEXT: retq
;
; AVX-LABEL: fold_bitreverse_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [0,255,64,191,32,223,96,159,16,239,80,175,48,207,112,143]
; AVX-NEXT: retq
;
; XOP-LABEL: fold_bitreverse_v16i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vmovaps {{.*#+}} xmm0 = [0,255,64,191,32,223,96,159,16,239,80,175,48,207,112,143]
; XOP-NEXT: retq
%b = call <16 x i8> @llvm.bitreverse.v16i8(<16 x i8> <i8 0, i8 -1, i8 2, i8 -3, i8 4, i8 -5, i8 6, i8 -7, i8 8, i8 -9, i8 10, i8 -11, i8 12, i8 -13, i8 14, i8 -15>)
@@ -2481,18 +2481,18 @@ define <16 x i8> @fold_bitreverse_v16i8() nounwind {
define <16 x i16> @fold_bitreverse_v16i16() nounwind {
; SSE-LABEL: fold_bitreverse_v16i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [0,65535,16384,49151,8192,57343,24576,40959]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [4096,61439,20480,45055,12288,53247,28672,36863]
; SSE-NEXT: retq
;
; AVX-LABEL: fold_bitreverse_v16i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,65535,16384,49151,8192,57343,24576,40959,4096,61439,20480,45055,12288,53247,28672,36863]
; AVX-NEXT: retq
;
; XOP-LABEL: fold_bitreverse_v16i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vmovaps {{.*#+}} ymm0 = [0,65535,16384,49151,8192,57343,24576,40959,4096,61439,20480,45055,12288,53247,28672,36863]
; XOP-NEXT: retq
%b = call <16 x i16> @llvm.bitreverse.v16i16(<16 x i16> <i16 0, i16 -1, i16 2, i16 -3, i16 4, i16 -5, i16 6, i16 -7, i16 8, i16 -9, i16 10, i16 -11, i16 12, i16 -13, i16 14, i16 -15>)
@@ -2501,7 +2501,7 @@ define <16 x i16> @fold_bitreverse_v16i16() nounwind {
define <16 x i32> @fold_bitreverse_v16i32() nounwind {
; SSE-LABEL: fold_bitreverse_v16i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [0,4294967295,1073741824,3221225471]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [536870912,3758096383,1610612736,2684354559]
; SSE-NEXT: movaps {{.*#+}} xmm2 = [268435456,4026531839,1342177280,2952790015]
@@ -2509,24 +2509,24 @@ define <16 x i32> @fold_bitreverse_v16i32() nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: fold_bitreverse_v16i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovaps {{.*#+}} ymm0 = [0,4294967295,1073741824,3221225471,536870912,3758096383,1610612736,2684354559]
; AVX1-NEXT: vmovaps {{.*#+}} ymm1 = [268435456,4026531839,1342177280,2952790015,805306368,3489660927,1879048192,2415919103]
; AVX1-NEXT: retq
;
; AVX2-LABEL: fold_bitreverse_v16i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovaps {{.*#+}} ymm0 = [0,4294967295,1073741824,3221225471,536870912,3758096383,1610612736,2684354559]
; AVX2-NEXT: vmovaps {{.*#+}} ymm1 = [268435456,4026531839,1342177280,2952790015,805306368,3489660927,1879048192,2415919103]
; AVX2-NEXT: retq
;
; AVX512-LABEL: fold_bitreverse_v16i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovaps {{.*#+}} zmm0 = [0,4294967295,1073741824,3221225471,536870912,3758096383,1610612736,2684354559,268435456,4026531839,1342177280,2952790015,805306368,3489660927,1879048192,2415919103]
; AVX512-NEXT: retq
;
; XOP-LABEL: fold_bitreverse_v16i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vmovaps {{.*#+}} ymm0 = [0,4294967295,1073741824,3221225471,536870912,3758096383,1610612736,2684354559]
; XOP-NEXT: vmovaps {{.*#+}} ymm1 = [268435456,4026531839,1342177280,2952790015,805306368,3489660927,1879048192,2415919103]
; XOP-NEXT: retq
diff --git a/test/CodeGen/X86/vector-blend.ll b/test/CodeGen/X86/vector-blend.ll
index 831d2a7970b..30c72f76029 100644
--- a/test/CodeGen/X86/vector-blend.ll
+++ b/test/CodeGen/X86/vector-blend.ll
@@ -9,24 +9,24 @@
define <4 x float> @vsel_float(<4 x float> %v1, <4 x float> %v2) {
; SSE2-LABEL: vsel_float:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[1,3]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: vsel_float:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[1,3]
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: vsel_float:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; SSE41-NEXT: retq
;
; AVX-LABEL: vsel_float:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; AVX-NEXT: retq
entry:
@@ -36,24 +36,24 @@ entry:
define <4 x float> @vsel_float2(<4 x float> %v1, <4 x float> %v2) {
; SSE2-LABEL: vsel_float2:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: vsel_float2:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: vsel_float2:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; SSE41-NEXT: retq
;
; AVX-LABEL: vsel_float2:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX-NEXT: retq
entry:
@@ -63,29 +63,29 @@ entry:
define <4 x i8> @vsel_4xi8(<4 x i8> %v1, <4 x i8> %v2) {
; SSE2-LABEL: vsel_4xi8:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[3,0]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: vsel_4xi8:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[3,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: vsel_4xi8:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: vsel_4xi8:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: vsel_4xi8:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
; AVX2-NEXT: retq
entry:
@@ -95,31 +95,31 @@ entry:
define <4 x i16> @vsel_4xi16(<4 x i16> %v1, <4 x i16> %v2) {
; SSE2-LABEL: vsel_4xi16:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm0[0,0]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[2,3]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: vsel_4xi16:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm0[0,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[2,3]
; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: vsel_4xi16:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: vsel_4xi16:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: vsel_4xi16:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
; AVX2-NEXT: retq
entry:
@@ -129,31 +129,31 @@ entry:
define <4 x i32> @vsel_i32(<4 x i32> %v1, <4 x i32> %v2) {
; SSE2-LABEL: vsel_i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: vsel_i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: vsel_i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: vsel_i32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: vsel_i32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; AVX2-NEXT: retq
entry:
@@ -163,24 +163,24 @@ entry:
define <2 x double> @vsel_double(<2 x double> %v1, <2 x double> %v2) {
; SSE2-LABEL: vsel_double:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE2-NEXT: movapd %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: vsel_double:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSSE3-NEXT: movapd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: vsel_double:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; SSE41-NEXT: retq
;
; AVX-LABEL: vsel_double:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; AVX-NEXT: retq
entry:
@@ -190,29 +190,29 @@ entry:
define <2 x i64> @vsel_i64(<2 x i64> %v1, <2 x i64> %v2) {
; SSE2-LABEL: vsel_i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE2-NEXT: movapd %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: vsel_i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSSE3-NEXT: movapd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: vsel_i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: vsel_i64:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: vsel_i64:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
; AVX2-NEXT: retq
entry:
@@ -222,7 +222,7 @@ entry:
define <8 x i16> @vsel_8xi16(<8 x i16> %v1, <8 x i16> %v2) {
; SSE2-LABEL: vsel_8xi16:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movaps {{.*#+}} xmm2 = [0,65535,65535,65535,0,65535,65535,65535]
; SSE2-NEXT: andps %xmm2, %xmm1
; SSE2-NEXT: andnps %xmm0, %xmm2
@@ -231,7 +231,7 @@ define <8 x i16> @vsel_8xi16(<8 x i16> %v1, <8 x i16> %v2) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: vsel_8xi16:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movaps {{.*#+}} xmm2 = [0,65535,65535,65535,0,65535,65535,65535]
; SSSE3-NEXT: andps %xmm2, %xmm1
; SSSE3-NEXT: andnps %xmm0, %xmm2
@@ -240,12 +240,12 @@ define <8 x i16> @vsel_8xi16(<8 x i16> %v1, <8 x i16> %v2) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: vsel_8xi16:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3],xmm0[4],xmm1[5,6,7]
; SSE41-NEXT: retq
;
; AVX-LABEL: vsel_8xi16:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3],xmm0[4],xmm1[5,6,7]
; AVX-NEXT: retq
entry:
@@ -255,7 +255,7 @@ entry:
define <16 x i8> @vsel_i8(<16 x i8> %v1, <16 x i8> %v2) {
; SSE2-LABEL: vsel_i8:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movaps {{.*#+}} xmm2 = [0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255]
; SSE2-NEXT: andps %xmm2, %xmm1
; SSE2-NEXT: andnps %xmm0, %xmm2
@@ -264,14 +264,14 @@ define <16 x i8> @vsel_i8(<16 x i8> %v1, <16 x i8> %v2) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: vsel_i8:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[12],zero,zero,zero
; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = zero,xmm1[1,2,3],zero,xmm1[5,6,7],zero,xmm1[9,10,11],zero,xmm1[13,14,15]
; SSSE3-NEXT: por %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: vsel_i8:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movaps {{.*#+}} xmm0 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
@@ -279,7 +279,7 @@ define <16 x i8> @vsel_i8(<16 x i8> %v1, <16 x i8> %v2) {
; SSE41-NEXT: retq
;
; AVX-LABEL: vsel_i8:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
; AVX-NEXT: vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
@@ -293,7 +293,7 @@ entry:
define <8 x float> @vsel_float8(<8 x float> %v1, <8 x float> %v2) {
; SSE2-LABEL: vsel_float8:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3]
; SSE2-NEXT: movss {{.*#+}} xmm3 = xmm1[0],xmm3[1,2,3]
; SSE2-NEXT: movaps %xmm2, %xmm0
@@ -301,7 +301,7 @@ define <8 x float> @vsel_float8(<8 x float> %v1, <8 x float> %v2) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: vsel_float8:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3]
; SSSE3-NEXT: movss {{.*#+}} xmm3 = xmm1[0],xmm3[1,2,3]
; SSSE3-NEXT: movaps %xmm2, %xmm0
@@ -309,13 +309,13 @@ define <8 x float> @vsel_float8(<8 x float> %v1, <8 x float> %v2) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: vsel_float8:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3]
; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm1[0],xmm3[1,2,3]
; SSE41-NEXT: retq
;
; AVX-LABEL: vsel_float8:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
; AVX-NEXT: retq
entry:
@@ -325,7 +325,7 @@ entry:
define <8 x i32> @vsel_i328(<8 x i32> %v1, <8 x i32> %v2) {
; SSE2-LABEL: vsel_i328:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3]
; SSE2-NEXT: movss {{.*#+}} xmm3 = xmm1[0],xmm3[1,2,3]
; SSE2-NEXT: movaps %xmm2, %xmm0
@@ -333,7 +333,7 @@ define <8 x i32> @vsel_i328(<8 x i32> %v1, <8 x i32> %v2) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: vsel_i328:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3]
; SSSE3-NEXT: movss {{.*#+}} xmm3 = xmm1[0],xmm3[1,2,3]
; SSSE3-NEXT: movaps %xmm2, %xmm0
@@ -341,13 +341,13 @@ define <8 x i32> @vsel_i328(<8 x i32> %v1, <8 x i32> %v2) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: vsel_i328:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3,4,5,6,7]
; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3,4,5,6,7]
; SSE41-NEXT: retq
;
; AVX-LABEL: vsel_i328:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
; AVX-NEXT: retq
entry:
@@ -357,7 +357,7 @@ entry:
define <8 x double> @vsel_double8(<8 x double> %v1, <8 x double> %v2) {
; SSE2-LABEL: vsel_double8:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movsd {{.*#+}} xmm4 = xmm0[0],xmm4[1]
; SSE2-NEXT: movsd {{.*#+}} xmm6 = xmm2[0],xmm6[1]
; SSE2-NEXT: movapd %xmm4, %xmm0
@@ -367,7 +367,7 @@ define <8 x double> @vsel_double8(<8 x double> %v1, <8 x double> %v2) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: vsel_double8:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movsd {{.*#+}} xmm4 = xmm0[0],xmm4[1]
; SSSE3-NEXT: movsd {{.*#+}} xmm6 = xmm2[0],xmm6[1]
; SSSE3-NEXT: movapd %xmm4, %xmm0
@@ -377,7 +377,7 @@ define <8 x double> @vsel_double8(<8 x double> %v1, <8 x double> %v2) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: vsel_double8:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm0[0],xmm4[1]
; SSE41-NEXT: blendpd {{.*#+}} xmm2 = xmm2[0],xmm6[1]
; SSE41-NEXT: movaps %xmm5, %xmm1
@@ -385,7 +385,7 @@ define <8 x double> @vsel_double8(<8 x double> %v1, <8 x double> %v2) {
; SSE41-NEXT: retq
;
; AVX-LABEL: vsel_double8:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3]
; AVX-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0],ymm3[1,2,3]
; AVX-NEXT: retq
@@ -396,7 +396,7 @@ entry:
define <8 x i64> @vsel_i648(<8 x i64> %v1, <8 x i64> %v2) {
; SSE2-LABEL: vsel_i648:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movsd {{.*#+}} xmm4 = xmm0[0],xmm4[1]
; SSE2-NEXT: movsd {{.*#+}} xmm6 = xmm2[0],xmm6[1]
; SSE2-NEXT: movapd %xmm4, %xmm0
@@ -406,7 +406,7 @@ define <8 x i64> @vsel_i648(<8 x i64> %v1, <8 x i64> %v2) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: vsel_i648:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movsd {{.*#+}} xmm4 = xmm0[0],xmm4[1]
; SSSE3-NEXT: movsd {{.*#+}} xmm6 = xmm2[0],xmm6[1]
; SSSE3-NEXT: movapd %xmm4, %xmm0
@@ -416,7 +416,7 @@ define <8 x i64> @vsel_i648(<8 x i64> %v1, <8 x i64> %v2) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: vsel_i648:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm4[4,5,6,7]
; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm6[4,5,6,7]
; SSE41-NEXT: movaps %xmm5, %xmm1
@@ -424,13 +424,13 @@ define <8 x i64> @vsel_i648(<8 x i64> %v1, <8 x i64> %v2) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: vsel_i648:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0],ymm3[1,2,3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: vsel_i648:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3,4,5,6,7]
; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm3[2,3,4,5,6,7]
; AVX2-NEXT: retq
@@ -441,7 +441,7 @@ entry:
define <4 x double> @vsel_double4(<4 x double> %v1, <4 x double> %v2) {
; SSE2-LABEL: vsel_double4:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
; SSE2-NEXT: movsd {{.*#+}} xmm3 = xmm1[0],xmm3[1]
; SSE2-NEXT: movapd %xmm2, %xmm0
@@ -449,7 +449,7 @@ define <4 x double> @vsel_double4(<4 x double> %v1, <4 x double> %v2) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: vsel_double4:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
; SSSE3-NEXT: movsd {{.*#+}} xmm3 = xmm1[0],xmm3[1]
; SSSE3-NEXT: movapd %xmm2, %xmm0
@@ -457,13 +457,13 @@ define <4 x double> @vsel_double4(<4 x double> %v1, <4 x double> %v2) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: vsel_double4:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm0[0],xmm2[1]
; SSE41-NEXT: blendpd {{.*#+}} xmm1 = xmm1[0],xmm3[1]
; SSE41-NEXT: retq
;
; AVX-LABEL: vsel_double4:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3]
; AVX-NEXT: retq
entry:
@@ -473,7 +473,7 @@ entry:
define <2 x double> @testa(<2 x double> %x, <2 x double> %y) {
; SSE2-LABEL: testa:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movapd %xmm1, %xmm2
; SSE2-NEXT: cmplepd %xmm0, %xmm2
; SSE2-NEXT: andpd %xmm2, %xmm0
@@ -482,7 +482,7 @@ define <2 x double> @testa(<2 x double> %x, <2 x double> %y) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: testa:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movapd %xmm1, %xmm2
; SSSE3-NEXT: cmplepd %xmm0, %xmm2
; SSSE3-NEXT: andpd %xmm2, %xmm0
@@ -491,7 +491,7 @@ define <2 x double> @testa(<2 x double> %x, <2 x double> %y) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: testa:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movapd %xmm0, %xmm2
; SSE41-NEXT: movapd %xmm1, %xmm0
; SSE41-NEXT: cmplepd %xmm2, %xmm0
@@ -500,7 +500,7 @@ define <2 x double> @testa(<2 x double> %x, <2 x double> %y) {
; SSE41-NEXT: retq
;
; AVX-LABEL: testa:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vcmplepd %xmm0, %xmm1, %xmm2
; AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
@@ -512,7 +512,7 @@ entry:
define <2 x double> @testb(<2 x double> %x, <2 x double> %y) {
; SSE2-LABEL: testb:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movapd %xmm1, %xmm2
; SSE2-NEXT: cmpnlepd %xmm0, %xmm2
; SSE2-NEXT: andpd %xmm2, %xmm0
@@ -521,7 +521,7 @@ define <2 x double> @testb(<2 x double> %x, <2 x double> %y) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: testb:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movapd %xmm1, %xmm2
; SSSE3-NEXT: cmpnlepd %xmm0, %xmm2
; SSSE3-NEXT: andpd %xmm2, %xmm0
@@ -530,7 +530,7 @@ define <2 x double> @testb(<2 x double> %x, <2 x double> %y) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: testb:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movapd %xmm0, %xmm2
; SSE41-NEXT: movapd %xmm1, %xmm0
; SSE41-NEXT: cmpnlepd %xmm2, %xmm0
@@ -539,7 +539,7 @@ define <2 x double> @testb(<2 x double> %x, <2 x double> %y) {
; SSE41-NEXT: retq
;
; AVX-LABEL: testb:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vcmpnlepd %xmm0, %xmm1, %xmm2
; AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
@@ -553,27 +553,27 @@ entry:
; blend instruction with an immediate mask
define <4 x double> @constant_blendvpd_avx(<4 x double> %xy, <4 x double> %ab) {
; SSE2-LABEL: constant_blendvpd_avx:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movsd {{.*#+}} xmm3 = xmm1[0],xmm3[1]
; SSE2-NEXT: movaps %xmm2, %xmm0
; SSE2-NEXT: movapd %xmm3, %xmm1
; SSE2-NEXT: retq
;
; SSSE3-LABEL: constant_blendvpd_avx:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movsd {{.*#+}} xmm3 = xmm1[0],xmm3[1]
; SSSE3-NEXT: movaps %xmm2, %xmm0
; SSSE3-NEXT: movapd %xmm3, %xmm1
; SSSE3-NEXT: retq
;
; SSE41-LABEL: constant_blendvpd_avx:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: blendpd {{.*#+}} xmm1 = xmm1[0],xmm3[1]
; SSE41-NEXT: movaps %xmm2, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: constant_blendvpd_avx:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3]
; AVX-NEXT: retq
entry:
@@ -583,7 +583,7 @@ entry:
define <8 x float> @constant_blendvps_avx(<8 x float> %xyzw, <8 x float> %abcd) {
; SSE2-LABEL: constant_blendvps_avx:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,0],xmm2[2,0]
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[2,0]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,0],xmm3[2,0]
@@ -593,7 +593,7 @@ define <8 x float> @constant_blendvps_avx(<8 x float> %xyzw, <8 x float> %abcd)
; SSE2-NEXT: retq
;
; SSSE3-LABEL: constant_blendvps_avx:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,0],xmm2[2,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[2,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,0],xmm3[2,0]
@@ -603,13 +603,13 @@ define <8 x float> @constant_blendvps_avx(<8 x float> %xyzw, <8 x float> %abcd)
; SSSE3-NEXT: retq
;
; SSE41-LABEL: constant_blendvps_avx:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[3]
; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3]
; SSE41-NEXT: retq
;
; AVX-LABEL: constant_blendvps_avx:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6],ymm0[7]
; AVX-NEXT: retq
entry:
@@ -619,7 +619,7 @@ entry:
define <32 x i8> @constant_pblendvb_avx2(<32 x i8> %xyzw, <32 x i8> %abcd) {
; SSE2-LABEL: constant_pblendvb_avx2:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movaps {{.*#+}} xmm4 = [255,255,0,255,0,0,0,255,255,255,0,255,0,0,0,255]
; SSE2-NEXT: movaps %xmm4, %xmm5
; SSE2-NEXT: andnps %xmm0, %xmm5
@@ -633,7 +633,7 @@ define <32 x i8> @constant_pblendvb_avx2(<32 x i8> %xyzw, <32 x i8> %abcd) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: constant_pblendvb_avx2:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [128,128,2,128,4,5,6,128,128,128,10,128,12,13,14,128]
; SSSE3-NEXT: pshufb %xmm4, %xmm0
; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [0,1,128,3,128,128,128,7,8,9,128,11,128,128,128,15]
@@ -645,7 +645,7 @@ define <32 x i8> @constant_pblendvb_avx2(<32 x i8> %xyzw, <32 x i8> %abcd) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: constant_pblendvb_avx2:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movdqa %xmm0, %xmm4
; SSE41-NEXT: movaps {{.*#+}} xmm0 = [0,0,255,0,255,255,255,0,0,0,255,0,255,255,255,0]
; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm2
@@ -655,7 +655,7 @@ define <32 x i8> @constant_pblendvb_avx2(<32 x i8> %xyzw, <32 x i8> %abcd) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: constant_pblendvb_avx2:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [255,255,0,255,0,0,0,255,255,255,0,255,0,0,0,255,255,255,0,255,0,0,0,255,255,255,0,255,0,0,0,255]
; AVX1-NEXT: vandnps %ymm0, %ymm2, %ymm0
; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1
@@ -663,7 +663,7 @@ define <32 x i8> @constant_pblendvb_avx2(<32 x i8> %xyzw, <32 x i8> %abcd) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: constant_pblendvb_avx2:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,0,255,0,255,255,255,0,0,0,255,0,255,255,255,0,0,0,255,0,255,255,255,0,0,0,255,0,255,255,255,0]
; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
@@ -678,24 +678,24 @@ declare <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double>, <4 x double>, <4
;; 4 tests for shufflevectors that optimize to blend + immediate
define <4 x float> @blend_shufflevector_4xfloat(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: blend_shufflevector_4xfloat:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[1,3]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: blend_shufflevector_4xfloat:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[1,3]
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: blend_shufflevector_4xfloat:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; SSE41-NEXT: retq
;
; AVX-LABEL: blend_shufflevector_4xfloat:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; AVX-NEXT: retq
entry:
@@ -705,7 +705,7 @@ entry:
define <8 x float> @blend_shufflevector_8xfloat(<8 x float> %a, <8 x float> %b) {
; SSE2-LABEL: blend_shufflevector_8xfloat:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm3[3,0]
; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm1[0,2]
@@ -714,7 +714,7 @@ define <8 x float> @blend_shufflevector_8xfloat(<8 x float> %a, <8 x float> %b)
; SSE2-NEXT: retq
;
; SSSE3-LABEL: blend_shufflevector_8xfloat:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3]
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm3[3,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm1[0,2]
@@ -723,13 +723,13 @@ define <8 x float> @blend_shufflevector_8xfloat(<8 x float> %a, <8 x float> %b)
; SSSE3-NEXT: retq
;
; SSE41-LABEL: blend_shufflevector_8xfloat:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3]
; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm3[0,1],xmm1[2],xmm3[3]
; SSE41-NEXT: retq
;
; AVX-LABEL: blend_shufflevector_8xfloat:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5],ymm0[6],ymm1[7]
; AVX-NEXT: retq
entry:
@@ -739,24 +739,24 @@ entry:
define <4 x double> @blend_shufflevector_4xdouble(<4 x double> %a, <4 x double> %b) {
; SSE2-LABEL: blend_shufflevector_4xdouble:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
; SSE2-NEXT: movapd %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: blend_shufflevector_4xdouble:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
; SSSE3-NEXT: movapd %xmm2, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: blend_shufflevector_4xdouble:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm0[0],xmm2[1]
; SSE41-NEXT: retq
;
; AVX-LABEL: blend_shufflevector_4xdouble:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3]
; AVX-NEXT: retq
entry:
@@ -766,30 +766,30 @@ entry:
define <4 x i64> @blend_shufflevector_4xi64(<4 x i64> %a, <4 x i64> %b) {
; SSE2-LABEL: blend_shufflevector_4xi64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
; SSE2-NEXT: movaps %xmm3, %xmm1
; SSE2-NEXT: retq
;
; SSSE3-LABEL: blend_shufflevector_4xi64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
; SSSE3-NEXT: movaps %xmm3, %xmm1
; SSSE3-NEXT: retq
;
; SSE41-LABEL: blend_shufflevector_4xi64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7]
; SSE41-NEXT: movaps %xmm3, %xmm1
; SSE41-NEXT: retq
;
; AVX1-LABEL: blend_shufflevector_4xi64:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: blend_shufflevector_4xi64:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
; AVX2-NEXT: retq
entry:
@@ -799,7 +799,7 @@ entry:
define <4 x i32> @blend_logic_v4i32(<4 x i32> %b, <4 x i32> %a, <4 x i32> %c) {
; SSE2-LABEL: blend_logic_v4i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: psrad $31, %xmm0
; SSE2-NEXT: pand %xmm0, %xmm1
; SSE2-NEXT: pandn %xmm2, %xmm0
@@ -807,7 +807,7 @@ define <4 x i32> @blend_logic_v4i32(<4 x i32> %b, <4 x i32> %a, <4 x i32> %c) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: blend_logic_v4i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: psrad $31, %xmm0
; SSSE3-NEXT: pand %xmm0, %xmm1
; SSSE3-NEXT: pandn %xmm2, %xmm0
@@ -815,14 +815,14 @@ define <4 x i32> @blend_logic_v4i32(<4 x i32> %b, <4 x i32> %a, <4 x i32> %c) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: blend_logic_v4i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: psrad $31, %xmm0
; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: blend_logic_v4i32:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpsrad $31, %xmm0, %xmm0
; AVX-NEXT: vpblendvb %xmm0, %xmm1, %xmm2, %xmm0
; AVX-NEXT: retq
@@ -838,7 +838,7 @@ entry:
define <8 x i32> @blend_logic_v8i32(<8 x i32> %b, <8 x i32> %a, <8 x i32> %c) {
; SSE2-LABEL: blend_logic_v8i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: psrad $31, %xmm0
; SSE2-NEXT: psrad $31, %xmm1
; SSE2-NEXT: pand %xmm1, %xmm3
@@ -850,7 +850,7 @@ define <8 x i32> @blend_logic_v8i32(<8 x i32> %b, <8 x i32> %a, <8 x i32> %c) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: blend_logic_v8i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: psrad $31, %xmm0
; SSSE3-NEXT: psrad $31, %xmm1
; SSSE3-NEXT: pand %xmm1, %xmm3
@@ -862,7 +862,7 @@ define <8 x i32> @blend_logic_v8i32(<8 x i32> %b, <8 x i32> %a, <8 x i32> %c) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: blend_logic_v8i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: psrad $31, %xmm1
; SSE41-NEXT: psrad $31, %xmm0
; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm4
@@ -873,7 +873,7 @@ define <8 x i32> @blend_logic_v8i32(<8 x i32> %b, <8 x i32> %a, <8 x i32> %c) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: blend_logic_v8i32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpsrad $31, %xmm0, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0
@@ -884,7 +884,7 @@ define <8 x i32> @blend_logic_v8i32(<8 x i32> %b, <8 x i32> %a, <8 x i32> %c) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: blend_logic_v8i32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0
; AVX2-NEXT: vpblendvb %ymm0, %ymm1, %ymm2, %ymm0
; AVX2-NEXT: retq
@@ -900,14 +900,14 @@ entry:
define <4 x i32> @blend_neg_logic_v4i32(<4 x i32> %a, <4 x i32> %b) {
; SSE-LABEL: blend_neg_logic_v4i32:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: psrad $31, %xmm1
; SSE-NEXT: pxor %xmm1, %xmm0
; SSE-NEXT: psubd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: blend_neg_logic_v4i32:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpsrad $31, %xmm1, %xmm1
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpsubd %xmm1, %xmm0, %xmm0
@@ -924,7 +924,7 @@ entry:
define <8 x i32> @blend_neg_logic_v8i32(<8 x i32> %a, <8 x i32> %b) {
; SSE-LABEL: blend_neg_logic_v8i32:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: psrad $31, %xmm3
; SSE-NEXT: psrad $31, %xmm2
; SSE-NEXT: pxor %xmm2, %xmm0
@@ -934,7 +934,7 @@ define <8 x i32> @blend_neg_logic_v8i32(<8 x i32> %a, <8 x i32> %b) {
; SSE-NEXT: retq
;
; AVX1-LABEL: blend_neg_logic_v8i32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpsrad $31, %xmm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX1-NEXT: vpsrad $31, %xmm1, %xmm1
@@ -950,7 +950,7 @@ define <8 x i32> @blend_neg_logic_v8i32(<8 x i32> %a, <8 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: blend_neg_logic_v8i32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpsrad $31, %ymm1, %ymm1
; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpsubd %ymm1, %ymm0, %ymm0
@@ -967,7 +967,7 @@ entry:
define <4 x i32> @blend_neg_logic_v4i32_2(<4 x i32> %v, <4 x i32> %c) {
; SSE2-LABEL: blend_neg_logic_v4i32_2:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: psrad $31, %xmm1
; SSE2-NEXT: pxor %xmm1, %xmm0
; SSE2-NEXT: psubd %xmm0, %xmm1
@@ -975,7 +975,7 @@ define <4 x i32> @blend_neg_logic_v4i32_2(<4 x i32> %v, <4 x i32> %c) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: blend_neg_logic_v4i32_2:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: psrad $31, %xmm1
; SSSE3-NEXT: pxor %xmm1, %xmm0
; SSSE3-NEXT: psubd %xmm0, %xmm1
@@ -983,7 +983,7 @@ define <4 x i32> @blend_neg_logic_v4i32_2(<4 x i32> %v, <4 x i32> %c) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: blend_neg_logic_v4i32_2:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: pxor %xmm3, %xmm3
; SSE41-NEXT: psubd %xmm2, %xmm3
@@ -993,7 +993,7 @@ define <4 x i32> @blend_neg_logic_v4i32_2(<4 x i32> %v, <4 x i32> %c) {
; SSE41-NEXT: retq
;
; AVX-LABEL: blend_neg_logic_v4i32_2:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX-NEXT: vpsubd %xmm0, %xmm2, %xmm2
; AVX-NEXT: vblendvps %xmm1, %xmm0, %xmm2, %xmm0
diff --git a/test/CodeGen/X86/vector-compare-all_of.ll b/test/CodeGen/X86/vector-compare-all_of.ll
index d9339299ea1..e45c88837c6 100644
--- a/test/CodeGen/X86/vector-compare-all_of.ll
+++ b/test/CodeGen/X86/vector-compare-all_of.ll
@@ -6,7 +6,7 @@
define i64 @test_v2f64_sext(<2 x double> %a0, <2 x double> %a1) {
; SSE-LABEL: test_v2f64_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpltpd %xmm0, %xmm1
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
; SSE-NEXT: pand %xmm1, %xmm0
@@ -14,7 +14,7 @@ define i64 @test_v2f64_sext(<2 x double> %a0, <2 x double> %a1) {
; SSE-NEXT: retq
;
; AVX-LABEL: test_v2f64_sext:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpltpd %xmm0, %xmm1, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX-NEXT: vandpd %xmm1, %xmm0, %xmm0
@@ -22,7 +22,7 @@ define i64 @test_v2f64_sext(<2 x double> %a0, <2 x double> %a1) {
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v2f64_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpltpd %xmm0, %xmm1, %k1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z}
@@ -40,7 +40,7 @@ define i64 @test_v2f64_sext(<2 x double> %a0, <2 x double> %a1) {
define i64 @test_v4f64_sext(<4 x double> %a0, <4 x double> %a1) {
; SSE-LABEL: test_v4f64_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpltpd %xmm1, %xmm3
; SSE-NEXT: cmpltpd %xmm0, %xmm2
; SSE-NEXT: andpd %xmm3, %xmm2
@@ -50,7 +50,7 @@ define i64 @test_v4f64_sext(<4 x double> %a0, <4 x double> %a1) {
; SSE-NEXT: retq
;
; AVX-LABEL: test_v4f64_sext:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
; AVX-NEXT: vmovmskpd %ymm0, %eax
; AVX-NEXT: xorl %ecx, %ecx
@@ -61,7 +61,7 @@ define i64 @test_v4f64_sext(<4 x double> %a0, <4 x double> %a1) {
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v4f64_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpltpd %ymm0, %ymm1, %k1
; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa64 %ymm0, %ymm0 {%k1} {z}
@@ -84,7 +84,7 @@ define i64 @test_v4f64_sext(<4 x double> %a0, <4 x double> %a1) {
define i64 @test_v4f64_legal_sext(<4 x double> %a0, <4 x double> %a1) {
; SSE-LABEL: test_v4f64_legal_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpltpd %xmm1, %xmm3
; SSE-NEXT: cmpltpd %xmm0, %xmm2
; SSE-NEXT: packssdw %xmm3, %xmm2
@@ -97,7 +97,7 @@ define i64 @test_v4f64_legal_sext(<4 x double> %a0, <4 x double> %a1) {
; SSE-NEXT: retq
;
; AVX-LABEL: test_v4f64_legal_sext:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
@@ -111,7 +111,7 @@ define i64 @test_v4f64_legal_sext(<4 x double> %a0, <4 x double> %a1) {
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v4f64_legal_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpltpd %ymm0, %ymm1, %k1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
@@ -136,7 +136,7 @@ define i64 @test_v4f64_legal_sext(<4 x double> %a0, <4 x double> %a1) {
define i32 @test_v4f32_sext(<4 x float> %a0, <4 x float> %a1) {
; SSE-LABEL: test_v4f32_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpltps %xmm0, %xmm1
; SSE-NEXT: movmskps %xmm1, %eax
; SSE-NEXT: xorl %ecx, %ecx
@@ -146,7 +146,7 @@ define i32 @test_v4f32_sext(<4 x float> %a0, <4 x float> %a1) {
; SSE-NEXT: retq
;
; AVX-LABEL: test_v4f32_sext:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpltps %xmm0, %xmm1, %xmm0
; AVX-NEXT: vmovmskps %xmm0, %eax
; AVX-NEXT: xorl %ecx, %ecx
@@ -156,7 +156,7 @@ define i32 @test_v4f32_sext(<4 x float> %a0, <4 x float> %a1) {
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v4f32_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpltps %xmm0, %xmm1, %k1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
@@ -178,7 +178,7 @@ define i32 @test_v4f32_sext(<4 x float> %a0, <4 x float> %a1) {
define i32 @test_v8f32_sext(<8 x float> %a0, <8 x float> %a1) {
; SSE-LABEL: test_v8f32_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpltps %xmm1, %xmm3
; SSE-NEXT: cmpltps %xmm0, %xmm2
; SSE-NEXT: andps %xmm3, %xmm2
@@ -190,7 +190,7 @@ define i32 @test_v8f32_sext(<8 x float> %a0, <8 x float> %a1) {
; SSE-NEXT: retq
;
; AVX-LABEL: test_v8f32_sext:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
; AVX-NEXT: vmovmskps %ymm0, %eax
; AVX-NEXT: xorl %ecx, %ecx
@@ -201,7 +201,7 @@ define i32 @test_v8f32_sext(<8 x float> %a0, <8 x float> %a1) {
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v8f32_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpltps %ymm0, %ymm1, %k1
; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
@@ -228,7 +228,7 @@ define i32 @test_v8f32_sext(<8 x float> %a0, <8 x float> %a1) {
define i32 @test_v8f32_legal_sext(<8 x float> %a0, <8 x float> %a1) {
; SSE-LABEL: test_v8f32_legal_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpltps %xmm1, %xmm3
; SSE-NEXT: cmpltps %xmm0, %xmm2
; SSE-NEXT: packssdw %xmm3, %xmm2
@@ -240,7 +240,7 @@ define i32 @test_v8f32_legal_sext(<8 x float> %a0, <8 x float> %a1) {
; SSE-NEXT: retq
;
; AVX-LABEL: test_v8f32_legal_sext:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
@@ -253,7 +253,7 @@ define i32 @test_v8f32_legal_sext(<8 x float> %a0, <8 x float> %a1) {
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v8f32_legal_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpltps %ymm0, %ymm1, %k0
; AVX512-NEXT: vpmovm2w %k0, %xmm0
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -281,7 +281,7 @@ define i32 @test_v8f32_legal_sext(<8 x float> %a0, <8 x float> %a1) {
define i64 @test_v2i64_sext(<2 x i64> %a0, <2 x i64> %a1) {
; SSE-LABEL: test_v2i64_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtq %xmm1, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; SSE-NEXT: pand %xmm0, %xmm1
@@ -289,7 +289,7 @@ define i64 @test_v2i64_sext(<2 x i64> %a0, <2 x i64> %a1) {
; SSE-NEXT: retq
;
; AVX-LABEL: test_v2i64_sext:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -297,7 +297,7 @@ define i64 @test_v2i64_sext(<2 x i64> %a0, <2 x i64> %a1) {
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v2i64_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtq %xmm1, %xmm0, %k1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z}
@@ -315,7 +315,7 @@ define i64 @test_v2i64_sext(<2 x i64> %a0, <2 x i64> %a1) {
define i64 @test_v4i64_sext(<4 x i64> %a0, <4 x i64> %a1) {
; SSE-LABEL: test_v4i64_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtq %xmm3, %xmm1
; SSE-NEXT: pcmpgtq %xmm2, %xmm0
; SSE-NEXT: pand %xmm1, %xmm0
@@ -325,7 +325,7 @@ define i64 @test_v4i64_sext(<4 x i64> %a0, <4 x i64> %a1) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v4i64_sext:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -340,7 +340,7 @@ define i64 @test_v4i64_sext(<4 x i64> %a0, <4 x i64> %a1) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v4i64_sext:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vmovmskpd %ymm0, %eax
; AVX2-NEXT: xorl %ecx, %ecx
@@ -351,7 +351,7 @@ define i64 @test_v4i64_sext(<4 x i64> %a0, <4 x i64> %a1) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v4i64_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtq %ymm1, %ymm0, %k1
; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa64 %ymm0, %ymm0 {%k1} {z}
@@ -374,7 +374,7 @@ define i64 @test_v4i64_sext(<4 x i64> %a0, <4 x i64> %a1) {
define i64 @test_v4i64_legal_sext(<4 x i64> %a0, <4 x i64> %a1) {
; SSE-LABEL: test_v4i64_legal_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtq %xmm3, %xmm1
; SSE-NEXT: pcmpgtq %xmm2, %xmm0
; SSE-NEXT: packssdw %xmm1, %xmm0
@@ -387,7 +387,7 @@ define i64 @test_v4i64_legal_sext(<4 x i64> %a0, <4 x i64> %a1) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v4i64_legal_sext:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -403,7 +403,7 @@ define i64 @test_v4i64_legal_sext(<4 x i64> %a0, <4 x i64> %a1) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v4i64_legal_sext:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
@@ -417,7 +417,7 @@ define i64 @test_v4i64_legal_sext(<4 x i64> %a0, <4 x i64> %a1) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v4i64_legal_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtq %ymm1, %ymm0, %k1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
@@ -442,7 +442,7 @@ define i64 @test_v4i64_legal_sext(<4 x i64> %a0, <4 x i64> %a1) {
define i32 @test_v4i32_sext(<4 x i32> %a0, <4 x i32> %a1) {
; SSE-LABEL: test_v4i32_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtd %xmm1, %xmm0
; SSE-NEXT: movmskps %xmm0, %eax
; SSE-NEXT: xorl %ecx, %ecx
@@ -452,7 +452,7 @@ define i32 @test_v4i32_sext(<4 x i32> %a0, <4 x i32> %a1) {
; SSE-NEXT: retq
;
; AVX-LABEL: test_v4i32_sext:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovmskps %xmm0, %eax
; AVX-NEXT: xorl %ecx, %ecx
@@ -462,7 +462,7 @@ define i32 @test_v4i32_sext(<4 x i32> %a0, <4 x i32> %a1) {
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v4i32_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtd %xmm1, %xmm0, %k1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
@@ -484,7 +484,7 @@ define i32 @test_v4i32_sext(<4 x i32> %a0, <4 x i32> %a1) {
define i32 @test_v8i32_sext(<8 x i32> %a0, <8 x i32> %a1) {
; SSE-LABEL: test_v8i32_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtd %xmm3, %xmm1
; SSE-NEXT: pcmpgtd %xmm2, %xmm0
; SSE-NEXT: pand %xmm1, %xmm0
@@ -496,7 +496,7 @@ define i32 @test_v8i32_sext(<8 x i32> %a0, <8 x i32> %a1) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v8i32_sext:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtd %xmm2, %xmm3, %xmm2
@@ -511,7 +511,7 @@ define i32 @test_v8i32_sext(<8 x i32> %a0, <8 x i32> %a1) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v8i32_sext:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vmovmskps %ymm0, %eax
; AVX2-NEXT: xorl %ecx, %ecx
@@ -522,7 +522,7 @@ define i32 @test_v8i32_sext(<8 x i32> %a0, <8 x i32> %a1) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v8i32_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtd %ymm1, %ymm0, %k1
; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
@@ -549,7 +549,7 @@ define i32 @test_v8i32_sext(<8 x i32> %a0, <8 x i32> %a1) {
define i32 @test_v8i32_legal_sext(<8 x i32> %a0, <8 x i32> %a1) {
; SSE-LABEL: test_v8i32_legal_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtd %xmm3, %xmm1
; SSE-NEXT: pcmpgtd %xmm2, %xmm0
; SSE-NEXT: packssdw %xmm1, %xmm0
@@ -561,7 +561,7 @@ define i32 @test_v8i32_legal_sext(<8 x i32> %a0, <8 x i32> %a1) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v8i32_legal_sext:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtd %xmm2, %xmm3, %xmm2
@@ -576,7 +576,7 @@ define i32 @test_v8i32_legal_sext(<8 x i32> %a0, <8 x i32> %a1) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v8i32_legal_sext:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
@@ -589,7 +589,7 @@ define i32 @test_v8i32_legal_sext(<8 x i32> %a0, <8 x i32> %a1) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v8i32_legal_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtd %ymm1, %ymm0, %k0
; AVX512-NEXT: vpmovm2w %k0, %xmm0
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -617,7 +617,7 @@ define i32 @test_v8i32_legal_sext(<8 x i32> %a0, <8 x i32> %a1) {
define i16 @test_v8i16_sext(<8 x i16> %a0, <8 x i16> %a1) {
; SSE-LABEL: test_v8i16_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtw %xmm1, %xmm0
; SSE-NEXT: pmovmskb %xmm0, %eax
; SSE-NEXT: xorl %ecx, %ecx
@@ -628,7 +628,7 @@ define i16 @test_v8i16_sext(<8 x i16> %a0, <8 x i16> %a1) {
; SSE-NEXT: retq
;
; AVX-LABEL: test_v8i16_sext:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpmovmskb %xmm0, %eax
; AVX-NEXT: xorl %ecx, %ecx
@@ -639,7 +639,7 @@ define i16 @test_v8i16_sext(<8 x i16> %a0, <8 x i16> %a1) {
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v8i16_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtw %xmm1, %xmm0, %k0
; AVX512-NEXT: vpmovm2w %k0, %xmm0
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -665,7 +665,7 @@ define i16 @test_v8i16_sext(<8 x i16> %a0, <8 x i16> %a1) {
define i16 @test_v16i16_sext(<16 x i16> %a0, <16 x i16> %a1) {
; SSE-LABEL: test_v16i16_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtw %xmm3, %xmm1
; SSE-NEXT: pcmpgtw %xmm2, %xmm0
; SSE-NEXT: pand %xmm1, %xmm0
@@ -678,7 +678,7 @@ define i16 @test_v16i16_sext(<16 x i16> %a0, <16 x i16> %a1) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v16i16_sext:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtw %xmm2, %xmm3, %xmm2
@@ -697,7 +697,7 @@ define i16 @test_v16i16_sext(<16 x i16> %a0, <16 x i16> %a1) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v16i16_sext:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpmovmskb %ymm0, %ecx
; AVX2-NEXT: xorl %eax, %eax
@@ -708,7 +708,7 @@ define i16 @test_v16i16_sext(<16 x i16> %a0, <16 x i16> %a1) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v16i16_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtw %ymm1, %ymm0, %k0
; AVX512-NEXT: vpmovm2w %k0, %ymm0
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
@@ -739,7 +739,7 @@ define i16 @test_v16i16_sext(<16 x i16> %a0, <16 x i16> %a1) {
define i16 @test_v16i16_legal_sext(<16 x i16> %a0, <16 x i16> %a1) {
; SSE-LABEL: test_v16i16_legal_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtw %xmm3, %xmm1
; SSE-NEXT: pcmpgtw %xmm2, %xmm0
; SSE-NEXT: packsswb %xmm1, %xmm0
@@ -752,7 +752,7 @@ define i16 @test_v16i16_legal_sext(<16 x i16> %a0, <16 x i16> %a1) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v16i16_legal_sext:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtw %xmm2, %xmm3, %xmm2
@@ -768,7 +768,7 @@ define i16 @test_v16i16_legal_sext(<16 x i16> %a0, <16 x i16> %a1) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v16i16_legal_sext:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
@@ -782,7 +782,7 @@ define i16 @test_v16i16_legal_sext(<16 x i16> %a0, <16 x i16> %a1) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v16i16_legal_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtw %ymm1, %ymm0, %k0
; AVX512-NEXT: vpmovm2b %k0, %xmm0
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -815,7 +815,7 @@ define i16 @test_v16i16_legal_sext(<16 x i16> %a0, <16 x i16> %a1) {
define i8 @test_v16i8_sext(<16 x i8> %a0, <16 x i8> %a1) {
; SSE-LABEL: test_v16i8_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtb %xmm1, %xmm0
; SSE-NEXT: pmovmskb %xmm0, %eax
; SSE-NEXT: xorl %ecx, %ecx
@@ -826,7 +826,7 @@ define i8 @test_v16i8_sext(<16 x i8> %a0, <16 x i8> %a1) {
; SSE-NEXT: retq
;
; AVX-LABEL: test_v16i8_sext:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpmovmskb %xmm0, %eax
; AVX-NEXT: xorl %ecx, %ecx
@@ -837,7 +837,7 @@ define i8 @test_v16i8_sext(<16 x i8> %a0, <16 x i8> %a1) {
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v16i8_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtb %xmm1, %xmm0, %k0
; AVX512-NEXT: vpmovm2b %k0, %xmm0
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -867,7 +867,7 @@ define i8 @test_v16i8_sext(<16 x i8> %a0, <16 x i8> %a1) {
define i8 @test_v32i8_sext(<32 x i8> %a0, <32 x i8> %a1) {
; SSE-LABEL: test_v32i8_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtb %xmm3, %xmm1
; SSE-NEXT: pcmpgtb %xmm2, %xmm0
; SSE-NEXT: pand %xmm1, %xmm0
@@ -880,7 +880,7 @@ define i8 @test_v32i8_sext(<32 x i8> %a0, <32 x i8> %a1) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v32i8_sext:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtb %xmm2, %xmm3, %xmm2
@@ -901,7 +901,7 @@ define i8 @test_v32i8_sext(<32 x i8> %a0, <32 x i8> %a1) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v32i8_sext:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpmovmskb %ymm0, %ecx
; AVX2-NEXT: xorl %eax, %eax
@@ -912,7 +912,7 @@ define i8 @test_v32i8_sext(<32 x i8> %a0, <32 x i8> %a1) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v32i8_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtb %ymm1, %ymm0, %k0
; AVX512-NEXT: vpmovm2b %k0, %ymm0
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
diff --git a/test/CodeGen/X86/vector-compare-any_of.ll b/test/CodeGen/X86/vector-compare-any_of.ll
index 1a6a1c17bc0..d49e4b7ae8b 100644
--- a/test/CodeGen/X86/vector-compare-any_of.ll
+++ b/test/CodeGen/X86/vector-compare-any_of.ll
@@ -6,7 +6,7 @@
define i64 @test_v2f64_sext(<2 x double> %a0, <2 x double> %a1) {
; SSE-LABEL: test_v2f64_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpltpd %xmm0, %xmm1
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
; SSE-NEXT: por %xmm1, %xmm0
@@ -14,7 +14,7 @@ define i64 @test_v2f64_sext(<2 x double> %a0, <2 x double> %a1) {
; SSE-NEXT: retq
;
; AVX-LABEL: test_v2f64_sext:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpltpd %xmm0, %xmm1, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX-NEXT: vorpd %xmm1, %xmm0, %xmm0
@@ -22,7 +22,7 @@ define i64 @test_v2f64_sext(<2 x double> %a0, <2 x double> %a1) {
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v2f64_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpltpd %xmm0, %xmm1, %k1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z}
@@ -40,7 +40,7 @@ define i64 @test_v2f64_sext(<2 x double> %a0, <2 x double> %a1) {
define i64 @test_v4f64_sext(<4 x double> %a0, <4 x double> %a1) {
; SSE-LABEL: test_v4f64_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpltpd %xmm1, %xmm3
; SSE-NEXT: cmpltpd %xmm0, %xmm2
; SSE-NEXT: orpd %xmm3, %xmm2
@@ -50,7 +50,7 @@ define i64 @test_v4f64_sext(<4 x double> %a0, <4 x double> %a1) {
; SSE-NEXT: retq
;
; AVX-LABEL: test_v4f64_sext:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
; AVX-NEXT: vmovmskpd %ymm0, %eax
; AVX-NEXT: negl %eax
@@ -59,7 +59,7 @@ define i64 @test_v4f64_sext(<4 x double> %a0, <4 x double> %a1) {
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v4f64_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpltpd %ymm0, %ymm1, %k1
; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa64 %ymm0, %ymm0 {%k1} {z}
@@ -82,7 +82,7 @@ define i64 @test_v4f64_sext(<4 x double> %a0, <4 x double> %a1) {
define i64 @test_v4f64_legal_sext(<4 x double> %a0, <4 x double> %a1) {
; SSE-LABEL: test_v4f64_legal_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpltpd %xmm1, %xmm3
; SSE-NEXT: cmpltpd %xmm0, %xmm2
; SSE-NEXT: packssdw %xmm3, %xmm2
@@ -93,7 +93,7 @@ define i64 @test_v4f64_legal_sext(<4 x double> %a0, <4 x double> %a1) {
; SSE-NEXT: retq
;
; AVX-LABEL: test_v4f64_legal_sext:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
@@ -105,7 +105,7 @@ define i64 @test_v4f64_legal_sext(<4 x double> %a0, <4 x double> %a1) {
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v4f64_legal_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpltpd %ymm0, %ymm1, %k1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
@@ -130,7 +130,7 @@ define i64 @test_v4f64_legal_sext(<4 x double> %a0, <4 x double> %a1) {
define i32 @test_v4f32_sext(<4 x float> %a0, <4 x float> %a1) {
; SSE-LABEL: test_v4f32_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpltps %xmm0, %xmm1
; SSE-NEXT: movmskps %xmm1, %eax
; SSE-NEXT: negl %eax
@@ -138,7 +138,7 @@ define i32 @test_v4f32_sext(<4 x float> %a0, <4 x float> %a1) {
; SSE-NEXT: retq
;
; AVX-LABEL: test_v4f32_sext:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpltps %xmm0, %xmm1, %xmm0
; AVX-NEXT: vmovmskps %xmm0, %eax
; AVX-NEXT: negl %eax
@@ -146,7 +146,7 @@ define i32 @test_v4f32_sext(<4 x float> %a0, <4 x float> %a1) {
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v4f32_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpltps %xmm0, %xmm1, %k1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
@@ -168,7 +168,7 @@ define i32 @test_v4f32_sext(<4 x float> %a0, <4 x float> %a1) {
define i32 @test_v8f32_sext(<8 x float> %a0, <8 x float> %a1) {
; SSE-LABEL: test_v8f32_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpltps %xmm1, %xmm3
; SSE-NEXT: cmpltps %xmm0, %xmm2
; SSE-NEXT: orps %xmm3, %xmm2
@@ -178,7 +178,7 @@ define i32 @test_v8f32_sext(<8 x float> %a0, <8 x float> %a1) {
; SSE-NEXT: retq
;
; AVX-LABEL: test_v8f32_sext:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
; AVX-NEXT: vmovmskps %ymm0, %eax
; AVX-NEXT: negl %eax
@@ -187,7 +187,7 @@ define i32 @test_v8f32_sext(<8 x float> %a0, <8 x float> %a1) {
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v8f32_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpltps %ymm0, %ymm1, %k1
; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
@@ -214,7 +214,7 @@ define i32 @test_v8f32_sext(<8 x float> %a0, <8 x float> %a1) {
define i32 @test_v8f32_legal_sext(<8 x float> %a0, <8 x float> %a1) {
; SSE-LABEL: test_v8f32_legal_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpltps %xmm1, %xmm3
; SSE-NEXT: cmpltps %xmm0, %xmm2
; SSE-NEXT: packssdw %xmm3, %xmm2
@@ -224,7 +224,7 @@ define i32 @test_v8f32_legal_sext(<8 x float> %a0, <8 x float> %a1) {
; SSE-NEXT: retq
;
; AVX-LABEL: test_v8f32_legal_sext:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
@@ -235,7 +235,7 @@ define i32 @test_v8f32_legal_sext(<8 x float> %a0, <8 x float> %a1) {
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v8f32_legal_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpltps %ymm0, %ymm1, %k0
; AVX512-NEXT: vpmovm2w %k0, %xmm0
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -263,7 +263,7 @@ define i32 @test_v8f32_legal_sext(<8 x float> %a0, <8 x float> %a1) {
define i64 @test_v2i64_sext(<2 x i64> %a0, <2 x i64> %a1) {
; SSE-LABEL: test_v2i64_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtq %xmm1, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; SSE-NEXT: por %xmm0, %xmm1
@@ -271,7 +271,7 @@ define i64 @test_v2i64_sext(<2 x i64> %a0, <2 x i64> %a1) {
; SSE-NEXT: retq
;
; AVX-LABEL: test_v2i64_sext:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
@@ -279,7 +279,7 @@ define i64 @test_v2i64_sext(<2 x i64> %a0, <2 x i64> %a1) {
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v2i64_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtq %xmm1, %xmm0, %k1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z}
@@ -297,7 +297,7 @@ define i64 @test_v2i64_sext(<2 x i64> %a0, <2 x i64> %a1) {
define i64 @test_v4i64_sext(<4 x i64> %a0, <4 x i64> %a1) {
; SSE-LABEL: test_v4i64_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtq %xmm3, %xmm1
; SSE-NEXT: pcmpgtq %xmm2, %xmm0
; SSE-NEXT: por %xmm1, %xmm0
@@ -307,7 +307,7 @@ define i64 @test_v4i64_sext(<4 x i64> %a0, <4 x i64> %a1) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v4i64_sext:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -320,7 +320,7 @@ define i64 @test_v4i64_sext(<4 x i64> %a0, <4 x i64> %a1) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v4i64_sext:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vmovmskpd %ymm0, %eax
; AVX2-NEXT: negl %eax
@@ -329,7 +329,7 @@ define i64 @test_v4i64_sext(<4 x i64> %a0, <4 x i64> %a1) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v4i64_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtq %ymm1, %ymm0, %k1
; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa64 %ymm0, %ymm0 {%k1} {z}
@@ -352,7 +352,7 @@ define i64 @test_v4i64_sext(<4 x i64> %a0, <4 x i64> %a1) {
define i64 @test_v4i64_legal_sext(<4 x i64> %a0, <4 x i64> %a1) {
; SSE-LABEL: test_v4i64_legal_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtq %xmm3, %xmm1
; SSE-NEXT: pcmpgtq %xmm2, %xmm0
; SSE-NEXT: packssdw %xmm1, %xmm0
@@ -363,7 +363,7 @@ define i64 @test_v4i64_legal_sext(<4 x i64> %a0, <4 x i64> %a1) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v4i64_legal_sext:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -377,7 +377,7 @@ define i64 @test_v4i64_legal_sext(<4 x i64> %a0, <4 x i64> %a1) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v4i64_legal_sext:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
@@ -389,7 +389,7 @@ define i64 @test_v4i64_legal_sext(<4 x i64> %a0, <4 x i64> %a1) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v4i64_legal_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtq %ymm1, %ymm0, %k1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
@@ -414,7 +414,7 @@ define i64 @test_v4i64_legal_sext(<4 x i64> %a0, <4 x i64> %a1) {
define i32 @test_v4i32_sext(<4 x i32> %a0, <4 x i32> %a1) {
; SSE-LABEL: test_v4i32_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtd %xmm1, %xmm0
; SSE-NEXT: movmskps %xmm0, %eax
; SSE-NEXT: negl %eax
@@ -422,7 +422,7 @@ define i32 @test_v4i32_sext(<4 x i32> %a0, <4 x i32> %a1) {
; SSE-NEXT: retq
;
; AVX-LABEL: test_v4i32_sext:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovmskps %xmm0, %eax
; AVX-NEXT: negl %eax
@@ -430,7 +430,7 @@ define i32 @test_v4i32_sext(<4 x i32> %a0, <4 x i32> %a1) {
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v4i32_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtd %xmm1, %xmm0, %k1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
@@ -452,7 +452,7 @@ define i32 @test_v4i32_sext(<4 x i32> %a0, <4 x i32> %a1) {
define i32 @test_v8i32_sext(<8 x i32> %a0, <8 x i32> %a1) {
; SSE-LABEL: test_v8i32_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtd %xmm3, %xmm1
; SSE-NEXT: pcmpgtd %xmm2, %xmm0
; SSE-NEXT: por %xmm1, %xmm0
@@ -462,7 +462,7 @@ define i32 @test_v8i32_sext(<8 x i32> %a0, <8 x i32> %a1) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v8i32_sext:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtd %xmm2, %xmm3, %xmm2
@@ -475,7 +475,7 @@ define i32 @test_v8i32_sext(<8 x i32> %a0, <8 x i32> %a1) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v8i32_sext:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vmovmskps %ymm0, %eax
; AVX2-NEXT: negl %eax
@@ -484,7 +484,7 @@ define i32 @test_v8i32_sext(<8 x i32> %a0, <8 x i32> %a1) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v8i32_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtd %ymm1, %ymm0, %k1
; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
@@ -511,7 +511,7 @@ define i32 @test_v8i32_sext(<8 x i32> %a0, <8 x i32> %a1) {
define i32 @test_v8i32_legal_sext(<8 x i32> %a0, <8 x i32> %a1) {
; SSE-LABEL: test_v8i32_legal_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtd %xmm3, %xmm1
; SSE-NEXT: pcmpgtd %xmm2, %xmm0
; SSE-NEXT: packssdw %xmm1, %xmm0
@@ -521,7 +521,7 @@ define i32 @test_v8i32_legal_sext(<8 x i32> %a0, <8 x i32> %a1) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v8i32_legal_sext:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtd %xmm2, %xmm3, %xmm2
@@ -534,7 +534,7 @@ define i32 @test_v8i32_legal_sext(<8 x i32> %a0, <8 x i32> %a1) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v8i32_legal_sext:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
@@ -545,7 +545,7 @@ define i32 @test_v8i32_legal_sext(<8 x i32> %a0, <8 x i32> %a1) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v8i32_legal_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtd %ymm1, %ymm0, %k0
; AVX512-NEXT: vpmovm2w %k0, %xmm0
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -573,7 +573,7 @@ define i32 @test_v8i32_legal_sext(<8 x i32> %a0, <8 x i32> %a1) {
define i16 @test_v8i16_sext(<8 x i16> %a0, <8 x i16> %a1) {
; SSE-LABEL: test_v8i16_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtw %xmm1, %xmm0
; SSE-NEXT: pmovmskb %xmm0, %eax
; SSE-NEXT: negl %eax
@@ -582,7 +582,7 @@ define i16 @test_v8i16_sext(<8 x i16> %a0, <8 x i16> %a1) {
; SSE-NEXT: retq
;
; AVX-LABEL: test_v8i16_sext:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpmovmskb %xmm0, %eax
; AVX-NEXT: negl %eax
@@ -591,7 +591,7 @@ define i16 @test_v8i16_sext(<8 x i16> %a0, <8 x i16> %a1) {
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v8i16_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtw %xmm1, %xmm0, %k0
; AVX512-NEXT: vpmovm2w %k0, %xmm0
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -617,7 +617,7 @@ define i16 @test_v8i16_sext(<8 x i16> %a0, <8 x i16> %a1) {
define i16 @test_v16i16_sext(<16 x i16> %a0, <16 x i16> %a1) {
; SSE-LABEL: test_v16i16_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtw %xmm3, %xmm1
; SSE-NEXT: pcmpgtw %xmm2, %xmm0
; SSE-NEXT: por %xmm1, %xmm0
@@ -628,7 +628,7 @@ define i16 @test_v16i16_sext(<16 x i16> %a0, <16 x i16> %a1) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v16i16_sext:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtw %xmm2, %xmm3, %xmm2
@@ -647,7 +647,7 @@ define i16 @test_v16i16_sext(<16 x i16> %a0, <16 x i16> %a1) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v16i16_sext:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpmovmskb %ymm0, %eax
; AVX2-NEXT: negl %eax
@@ -657,7 +657,7 @@ define i16 @test_v16i16_sext(<16 x i16> %a0, <16 x i16> %a1) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v16i16_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtw %ymm1, %ymm0, %k0
; AVX512-NEXT: vpmovm2w %k0, %ymm0
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
@@ -688,7 +688,7 @@ define i16 @test_v16i16_sext(<16 x i16> %a0, <16 x i16> %a1) {
define i16 @test_v16i16_legal_sext(<16 x i16> %a0, <16 x i16> %a1) {
; SSE-LABEL: test_v16i16_legal_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtw %xmm3, %xmm1
; SSE-NEXT: pcmpgtw %xmm2, %xmm0
; SSE-NEXT: packsswb %xmm1, %xmm0
@@ -699,7 +699,7 @@ define i16 @test_v16i16_legal_sext(<16 x i16> %a0, <16 x i16> %a1) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v16i16_legal_sext:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtw %xmm2, %xmm3, %xmm2
@@ -713,7 +713,7 @@ define i16 @test_v16i16_legal_sext(<16 x i16> %a0, <16 x i16> %a1) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v16i16_legal_sext:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
@@ -725,7 +725,7 @@ define i16 @test_v16i16_legal_sext(<16 x i16> %a0, <16 x i16> %a1) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v16i16_legal_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtw %ymm1, %ymm0, %k0
; AVX512-NEXT: vpmovm2b %k0, %xmm0
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -758,7 +758,7 @@ define i16 @test_v16i16_legal_sext(<16 x i16> %a0, <16 x i16> %a1) {
define i8 @test_v16i8_sext(<16 x i8> %a0, <16 x i8> %a1) {
; SSE-LABEL: test_v16i8_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtb %xmm1, %xmm0
; SSE-NEXT: pmovmskb %xmm0, %eax
; SSE-NEXT: negl %eax
@@ -767,7 +767,7 @@ define i8 @test_v16i8_sext(<16 x i8> %a0, <16 x i8> %a1) {
; SSE-NEXT: retq
;
; AVX-LABEL: test_v16i8_sext:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpmovmskb %xmm0, %eax
; AVX-NEXT: negl %eax
@@ -776,7 +776,7 @@ define i8 @test_v16i8_sext(<16 x i8> %a0, <16 x i8> %a1) {
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v16i8_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtb %xmm1, %xmm0, %k0
; AVX512-NEXT: vpmovm2b %k0, %xmm0
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -806,7 +806,7 @@ define i8 @test_v16i8_sext(<16 x i8> %a0, <16 x i8> %a1) {
define i8 @test_v32i8_sext(<32 x i8> %a0, <32 x i8> %a1) {
; SSE-LABEL: test_v32i8_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtb %xmm3, %xmm1
; SSE-NEXT: pcmpgtb %xmm2, %xmm0
; SSE-NEXT: por %xmm1, %xmm0
@@ -817,7 +817,7 @@ define i8 @test_v32i8_sext(<32 x i8> %a0, <32 x i8> %a1) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v32i8_sext:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtb %xmm2, %xmm3, %xmm2
@@ -838,7 +838,7 @@ define i8 @test_v32i8_sext(<32 x i8> %a0, <32 x i8> %a1) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v32i8_sext:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpmovmskb %ymm0, %eax
; AVX2-NEXT: negl %eax
@@ -848,7 +848,7 @@ define i8 @test_v32i8_sext(<32 x i8> %a0, <32 x i8> %a1) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v32i8_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtb %ymm1, %ymm0, %k0
; AVX512-NEXT: vpmovm2b %k0, %ymm0
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
diff --git a/test/CodeGen/X86/vector-compare-combines.ll b/test/CodeGen/X86/vector-compare-combines.ll
index bd7cbfb4bac..722de100948 100644
--- a/test/CodeGen/X86/vector-compare-combines.ll
+++ b/test/CodeGen/X86/vector-compare-combines.ll
@@ -9,12 +9,12 @@ declare <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32>, <4 x i32>)
define <4 x i32> @PR27924_cmpeq(<4 x i32> %a, <4 x i32> %b) {
; SSE-LABEL: PR27924_cmpeq:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: PR27924_cmpeq:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%cmp = icmp sgt <4 x i32> %a, %b
@@ -27,12 +27,12 @@ define <4 x i32> @PR27924_cmpeq(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @PR27924_cmpgt(<4 x i32> %a, <4 x i32> %b) {
; SSE-LABEL: PR27924_cmpgt:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: PR27924_cmpgt:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%cmp = icmp sgt <4 x i32> %a, %b
diff --git a/test/CodeGen/X86/vector-compare-results.ll b/test/CodeGen/X86/vector-compare-results.ll
index 3ceef9e8a2c..5ceb4b1cb88 100644
--- a/test/CodeGen/X86/vector-compare-results.ll
+++ b/test/CodeGen/X86/vector-compare-results.ll
@@ -13,13 +13,13 @@
define <2 x i1> @test_cmp_v2f64(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_cmp_v2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpltpd %xmm0, %xmm1
; SSE-NEXT: movapd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_cmp_v2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpltpd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = fcmp ogt <2 x double> %a0, %a1
@@ -28,13 +28,13 @@ define <2 x i1> @test_cmp_v2f64(<2 x double> %a0, <2 x double> %a1) nounwind {
define <4 x i1> @test_cmp_v4f32(<4 x float> %a0, <4 x float> %a1) nounwind {
; SSE-LABEL: test_cmp_v4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpltps %xmm0, %xmm1
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_cmp_v4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpltps %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = fcmp ogt <4 x float> %a0, %a1
@@ -43,7 +43,7 @@ define <4 x i1> @test_cmp_v4f32(<4 x float> %a0, <4 x float> %a1) nounwind {
define <2 x i1> @test_cmp_v2i64(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE2-LABEL: test_cmp_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; SSE2-NEXT: pxor %xmm2, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm0
@@ -58,12 +58,12 @@ define <2 x i1> @test_cmp_v2i64(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: test_cmp_v2i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpgtq %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: test_cmp_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp sgt <2 x i64> %a0, %a1
@@ -72,12 +72,12 @@ define <2 x i1> @test_cmp_v2i64(<2 x i64> %a0, <2 x i64> %a1) nounwind {
define <4 x i1> @test_cmp_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwind {
; SSE-LABEL: test_cmp_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_cmp_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp sgt <4 x i32> %a0, %a1
@@ -86,12 +86,12 @@ define <4 x i1> @test_cmp_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwind {
define <8 x i1> @test_cmp_v8i16(<8 x i16> %a0, <8 x i16> %a1) nounwind {
; SSE-LABEL: test_cmp_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtw %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_cmp_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp sgt <8 x i16> %a0, %a1
@@ -100,12 +100,12 @@ define <8 x i1> @test_cmp_v8i16(<8 x i16> %a0, <8 x i16> %a1) nounwind {
define <16 x i1> @test_cmp_v16i8(<16 x i8> %a0, <16 x i8> %a1) nounwind {
; SSE-LABEL: test_cmp_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtb %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_cmp_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp sgt <16 x i8> %a0, %a1
@@ -118,7 +118,7 @@ define <16 x i1> @test_cmp_v16i8(<16 x i8> %a0, <16 x i8> %a1) nounwind {
define <4 x i1> @test_cmp_v4f64(<4 x double> %a0, <4 x double> %a1) nounwind {
; SSE-LABEL: test_cmp_v4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpltpd %xmm1, %xmm3
; SSE-NEXT: cmpltpd %xmm0, %xmm2
; SSE-NEXT: packssdw %xmm3, %xmm2
@@ -126,7 +126,7 @@ define <4 x i1> @test_cmp_v4f64(<4 x double> %a0, <4 x double> %a1) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: test_cmp_v4f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
@@ -134,7 +134,7 @@ define <4 x i1> @test_cmp_v4f64(<4 x double> %a0, <4 x double> %a1) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v4f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
@@ -142,7 +142,7 @@ define <4 x i1> @test_cmp_v4f64(<4 x double> %a0, <4 x double> %a1) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_cmp_v4f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -154,7 +154,7 @@ define <4 x i1> @test_cmp_v4f64(<4 x double> %a0, <4 x double> %a1) nounwind {
define <8 x i1> @test_cmp_v8f32(<8 x float> %a0, <8 x float> %a1) nounwind {
; SSE-LABEL: test_cmp_v8f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpltps %xmm1, %xmm3
; SSE-NEXT: cmpltps %xmm0, %xmm2
; SSE-NEXT: packssdw %xmm3, %xmm2
@@ -162,7 +162,7 @@ define <8 x i1> @test_cmp_v8f32(<8 x float> %a0, <8 x float> %a1) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: test_cmp_v8f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
@@ -170,7 +170,7 @@ define <8 x i1> @test_cmp_v8f32(<8 x float> %a0, <8 x float> %a1) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v8f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
@@ -178,7 +178,7 @@ define <8 x i1> @test_cmp_v8f32(<8 x float> %a0, <8 x float> %a1) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_cmp_v8f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -190,7 +190,7 @@ define <8 x i1> @test_cmp_v8f32(<8 x float> %a0, <8 x float> %a1) nounwind {
define <4 x i1> @test_cmp_v4i64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; SSE2-LABEL: test_cmp_v4i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,0,2147483648,0]
; SSE2-NEXT: pxor %xmm4, %xmm3
; SSE2-NEXT: pxor %xmm4, %xmm1
@@ -216,14 +216,14 @@ define <4 x i1> @test_cmp_v4i64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: test_cmp_v4i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpgtq %xmm3, %xmm1
; SSE42-NEXT: pcmpgtq %xmm2, %xmm0
; SSE42-NEXT: packssdw %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX1-LABEL: test_cmp_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -233,7 +233,7 @@ define <4 x i1> @test_cmp_v4i64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
@@ -241,7 +241,7 @@ define <4 x i1> @test_cmp_v4i64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_cmp_v4i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -253,14 +253,14 @@ define <4 x i1> @test_cmp_v4i64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
define <8 x i1> @test_cmp_v8i32(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; SSE-LABEL: test_cmp_v8i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtd %xmm3, %xmm1
; SSE-NEXT: pcmpgtd %xmm2, %xmm0
; SSE-NEXT: packssdw %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: test_cmp_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtd %xmm2, %xmm3, %xmm2
@@ -270,7 +270,7 @@ define <8 x i1> @test_cmp_v8i32(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
@@ -278,7 +278,7 @@ define <8 x i1> @test_cmp_v8i32(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_cmp_v8i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -290,14 +290,14 @@ define <8 x i1> @test_cmp_v8i32(<8 x i32> %a0, <8 x i32> %a1) nounwind {
define <16 x i1> @test_cmp_v16i16(<16 x i16> %a0, <16 x i16> %a1) nounwind {
; SSE-LABEL: test_cmp_v16i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtw %xmm3, %xmm1
; SSE-NEXT: pcmpgtw %xmm2, %xmm0
; SSE-NEXT: packsswb %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: test_cmp_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtw %xmm2, %xmm3, %xmm2
@@ -307,7 +307,7 @@ define <16 x i1> @test_cmp_v16i16(<16 x i16> %a0, <16 x i16> %a1) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
@@ -315,7 +315,7 @@ define <16 x i1> @test_cmp_v16i16(<16 x i16> %a0, <16 x i16> %a1) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v16i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
@@ -323,7 +323,7 @@ define <16 x i1> @test_cmp_v16i16(<16 x i16> %a0, <16 x i16> %a1) nounwind {
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v16i16:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
@@ -331,7 +331,7 @@ define <16 x i1> @test_cmp_v16i16(<16 x i16> %a0, <16 x i16> %a1) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v16i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -343,7 +343,7 @@ define <16 x i1> @test_cmp_v16i16(<16 x i16> %a0, <16 x i16> %a1) nounwind {
define <32 x i1> @test_cmp_v32i8(<32 x i8> %a0, <32 x i8> %a1) nounwind {
; SSE2-LABEL: test_cmp_v32i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pcmpgtb %xmm2, %xmm0
; SSE2-NEXT: pcmpgtb %xmm3, %xmm1
; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp)
@@ -448,7 +448,7 @@ define <32 x i1> @test_cmp_v32i8(<32 x i8> %a0, <32 x i8> %a1) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: test_cmp_v32i8:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpgtb %xmm2, %xmm0
; SSE42-NEXT: pcmpgtb %xmm3, %xmm1
; SSE42-NEXT: pextrb $15, %xmm1, %eax
@@ -551,7 +551,7 @@ define <32 x i1> @test_cmp_v32i8(<32 x i8> %a0, <32 x i8> %a1) nounwind {
; SSE42-NEXT: retq
;
; AVX1-LABEL: test_cmp_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtb %xmm2, %xmm3, %xmm2
@@ -560,12 +560,12 @@ define <32 x i1> @test_cmp_v32i8(<32 x i8> %a0, <32 x i8> %a1) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_cmp_v32i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp sgt <32 x i8> %a0, %a1
@@ -578,7 +578,7 @@ define <32 x i1> @test_cmp_v32i8(<32 x i8> %a0, <32 x i8> %a1) nounwind {
define <8 x i1> @test_cmp_v8f64(<8 x double> %a0, <8 x double> %a1) nounwind {
; SSE-LABEL: test_cmp_v8f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpltpd %xmm3, %xmm7
; SSE-NEXT: cmpltpd %xmm2, %xmm6
; SSE-NEXT: packssdw %xmm7, %xmm6
@@ -590,7 +590,7 @@ define <8 x i1> @test_cmp_v8f64(<8 x double> %a0, <8 x double> %a1) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: test_cmp_v8f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vcmpltpd %ymm1, %ymm3, %ymm1
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpackssdw %xmm3, %xmm1, %xmm1
@@ -602,7 +602,7 @@ define <8 x i1> @test_cmp_v8f64(<8 x double> %a0, <8 x double> %a1) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v8f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vcmpltpd %ymm1, %ymm3, %ymm1
; AVX2-NEXT: vcmpltpd %ymm0, %ymm2, %ymm0
; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
@@ -613,7 +613,7 @@ define <8 x i1> @test_cmp_v8f64(<8 x double> %a0, <8 x double> %a1) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v8f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vcmpltpd %zmm0, %zmm1, %k1
; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovqw %zmm0, %xmm0
@@ -621,7 +621,7 @@ define <8 x i1> @test_cmp_v8f64(<8 x double> %a0, <8 x double> %a1) nounwind {
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v8f64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcmpltpd %zmm0, %zmm1, %k0
; AVX512DQ-NEXT: vpmovm2q %k0, %zmm0
; AVX512DQ-NEXT: vpmovqw %zmm0, %xmm0
@@ -629,7 +629,7 @@ define <8 x i1> @test_cmp_v8f64(<8 x double> %a0, <8 x double> %a1) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v8f64:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vcmpltpd %zmm0, %zmm1, %k0
; AVX512BW-NEXT: vpmovm2w %k0, %zmm0
; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -641,7 +641,7 @@ define <8 x i1> @test_cmp_v8f64(<8 x double> %a0, <8 x double> %a1) nounwind {
define <16 x i1> @test_cmp_v16f32(<16 x float> %a0, <16 x float> %a1) nounwind {
; SSE-LABEL: test_cmp_v16f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpltps %xmm3, %xmm7
; SSE-NEXT: cmpltps %xmm2, %xmm6
; SSE-NEXT: packssdw %xmm7, %xmm6
@@ -653,7 +653,7 @@ define <16 x i1> @test_cmp_v16f32(<16 x float> %a0, <16 x float> %a1) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: test_cmp_v16f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vcmpltps %ymm1, %ymm3, %ymm1
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpackssdw %xmm3, %xmm1, %xmm1
@@ -665,7 +665,7 @@ define <16 x i1> @test_cmp_v16f32(<16 x float> %a0, <16 x float> %a1) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v16f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vcmpltps %ymm1, %ymm3, %ymm1
; AVX2-NEXT: vcmpltps %ymm0, %ymm2, %ymm0
; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
@@ -676,7 +676,7 @@ define <16 x i1> @test_cmp_v16f32(<16 x float> %a0, <16 x float> %a1) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v16f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vcmpltps %zmm0, %zmm1, %k1
; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
@@ -684,7 +684,7 @@ define <16 x i1> @test_cmp_v16f32(<16 x float> %a0, <16 x float> %a1) nounwind {
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v16f32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcmpltps %zmm0, %zmm1, %k0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
@@ -692,7 +692,7 @@ define <16 x i1> @test_cmp_v16f32(<16 x float> %a0, <16 x float> %a1) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v16f32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vcmpltps %zmm0, %zmm1, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -704,7 +704,7 @@ define <16 x i1> @test_cmp_v16f32(<16 x float> %a0, <16 x float> %a1) nounwind {
define <8 x i1> @test_cmp_v8i64(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; SSE2-LABEL: test_cmp_v8i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [2147483648,0,2147483648,0]
; SSE2-NEXT: pxor %xmm8, %xmm7
; SSE2-NEXT: pxor %xmm8, %xmm3
@@ -752,7 +752,7 @@ define <8 x i1> @test_cmp_v8i64(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: test_cmp_v8i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpgtq %xmm7, %xmm3
; SSE42-NEXT: pcmpgtq %xmm6, %xmm2
; SSE42-NEXT: packssdw %xmm3, %xmm2
@@ -763,7 +763,7 @@ define <8 x i1> @test_cmp_v8i64(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; SSE42-NEXT: retq
;
; AVX1-LABEL: test_cmp_v8i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm5, %xmm4
@@ -779,7 +779,7 @@ define <8 x i1> @test_cmp_v8i64(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v8i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vpcmpgtq %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
@@ -790,7 +790,7 @@ define <8 x i1> @test_cmp_v8i64(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v8i64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovqw %zmm0, %xmm0
@@ -798,7 +798,7 @@ define <8 x i1> @test_cmp_v8i64(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v8i64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; AVX512DQ-NEXT: vpmovm2q %k0, %zmm0
; AVX512DQ-NEXT: vpmovqw %zmm0, %xmm0
@@ -806,7 +806,7 @@ define <8 x i1> @test_cmp_v8i64(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v8i64:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; AVX512BW-NEXT: vpmovm2w %k0, %zmm0
; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -818,7 +818,7 @@ define <8 x i1> @test_cmp_v8i64(<8 x i64> %a0, <8 x i64> %a1) nounwind {
define <16 x i1> @test_cmp_v16i32(<16 x i32> %a0, <16 x i32> %a1) nounwind {
; SSE-LABEL: test_cmp_v16i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtd %xmm7, %xmm3
; SSE-NEXT: pcmpgtd %xmm6, %xmm2
; SSE-NEXT: packssdw %xmm3, %xmm2
@@ -829,7 +829,7 @@ define <16 x i1> @test_cmp_v16i32(<16 x i32> %a0, <16 x i32> %a1) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: test_cmp_v16i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX1-NEXT: vpcmpgtd %xmm4, %xmm5, %xmm4
@@ -845,7 +845,7 @@ define <16 x i1> @test_cmp_v16i32(<16 x i32> %a0, <16 x i32> %a1) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v16i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtd %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vpcmpgtd %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
@@ -856,7 +856,7 @@ define <16 x i1> @test_cmp_v16i32(<16 x i32> %a0, <16 x i32> %a1) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v16i32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtd %zmm1, %zmm0, %k1
; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
@@ -864,7 +864,7 @@ define <16 x i1> @test_cmp_v16i32(<16 x i32> %a0, <16 x i32> %a1) nounwind {
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v16i32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
@@ -872,7 +872,7 @@ define <16 x i1> @test_cmp_v16i32(<16 x i32> %a0, <16 x i32> %a1) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v16i32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -884,7 +884,7 @@ define <16 x i1> @test_cmp_v16i32(<16 x i32> %a0, <16 x i32> %a1) nounwind {
define <32 x i1> @test_cmp_v32i16(<32 x i16> %a0, <32 x i16> %a1) nounwind {
; SSE2-LABEL: test_cmp_v32i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pcmpgtw %xmm5, %xmm1
; SSE2-NEXT: pcmpgtw %xmm4, %xmm0
; SSE2-NEXT: packsswb %xmm1, %xmm0
@@ -993,7 +993,7 @@ define <32 x i1> @test_cmp_v32i16(<32 x i16> %a0, <32 x i16> %a1) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: test_cmp_v32i16:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpgtw %xmm4, %xmm0
; SSE42-NEXT: pcmpgtw %xmm5, %xmm1
; SSE42-NEXT: pcmpgtw %xmm6, %xmm2
@@ -1098,7 +1098,7 @@ define <32 x i1> @test_cmp_v32i16(<32 x i16> %a0, <32 x i16> %a1) nounwind {
; SSE42-NEXT: retq
;
; AVX1-LABEL: test_cmp_v32i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX1-NEXT: vpcmpgtw %xmm4, %xmm5, %xmm4
@@ -1113,7 +1113,7 @@ define <32 x i1> @test_cmp_v32i16(<32 x i16> %a0, <32 x i16> %a1) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v32i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtw %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
@@ -1121,7 +1121,7 @@ define <32 x i1> @test_cmp_v32i16(<32 x i16> %a0, <32 x i16> %a1) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v32i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm0
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
@@ -1132,7 +1132,7 @@ define <32 x i1> @test_cmp_v32i16(<32 x i16> %a0, <32 x i16> %a1) nounwind {
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v32i16:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
@@ -1143,7 +1143,7 @@ define <32 x i1> @test_cmp_v32i16(<32 x i16> %a0, <32 x i16> %a1) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v32i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtw %zmm1, %zmm0, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
@@ -1154,7 +1154,7 @@ define <32 x i1> @test_cmp_v32i16(<32 x i16> %a0, <32 x i16> %a1) nounwind {
define <64 x i1> @test_cmp_v64i8(<64 x i8> %a0, <64 x i8> %a1) nounwind {
; SSE2-LABEL: test_cmp_v64i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pcmpgtb %xmm4, %xmm0
; SSE2-NEXT: pcmpgtb %xmm5, %xmm1
; SSE2-NEXT: pcmpgtb %xmm6, %xmm2
@@ -1359,7 +1359,7 @@ define <64 x i1> @test_cmp_v64i8(<64 x i8> %a0, <64 x i8> %a1) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: test_cmp_v64i8:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpgtb %xmm4, %xmm0
; SSE42-NEXT: pcmpgtb %xmm5, %xmm1
; SSE42-NEXT: pcmpgtb %xmm6, %xmm2
@@ -1560,7 +1560,7 @@ define <64 x i1> @test_cmp_v64i8(<64 x i8> %a0, <64 x i8> %a1) nounwind {
; SSE42-NEXT: retq
;
; AVX1-LABEL: test_cmp_v64i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpcmpgtb %xmm2, %xmm0, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
@@ -1766,7 +1766,7 @@ define <64 x i1> @test_cmp_v64i8(<64 x i8> %a0, <64 x i8> %a1) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v64i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpcmpgtb %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -1968,7 +1968,7 @@ define <64 x i1> @test_cmp_v64i8(<64 x i8> %a0, <64 x i8> %a1) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v64i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtb %ymm3, %ymm1, %ymm4
; AVX512F-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm0
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
@@ -1979,7 +1979,7 @@ define <64 x i1> @test_cmp_v64i8(<64 x i8> %a0, <64 x i8> %a1) nounwind {
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v64i8:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpcmpgtb %ymm3, %ymm1, %ymm4
; AVX512DQ-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm0
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm1
@@ -1990,7 +1990,7 @@ define <64 x i1> @test_cmp_v64i8(<64 x i8> %a0, <64 x i8> %a1) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v64i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtb %zmm1, %zmm0, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
; AVX512BW-NEXT: retq
@@ -2004,7 +2004,7 @@ define <64 x i1> @test_cmp_v64i8(<64 x i8> %a0, <64 x i8> %a1) nounwind {
define <16 x i1> @test_cmp_v16f64(<16 x double> %a0, <16 x double> %a1) nounwind {
; SSE-LABEL: test_cmp_v16f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movapd %xmm0, %xmm8
; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm11
@@ -2032,7 +2032,7 @@ define <16 x i1> @test_cmp_v16f64(<16 x double> %a0, <16 x double> %a1) nounwind
; SSE-NEXT: retq
;
; AVX1-LABEL: test_cmp_v16f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vcmpltpd %ymm3, %ymm7, %ymm3
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm7
; AVX1-NEXT: vpackssdw %xmm7, %xmm3, %xmm3
@@ -2052,7 +2052,7 @@ define <16 x i1> @test_cmp_v16f64(<16 x double> %a0, <16 x double> %a1) nounwind
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v16f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vcmpltpd %ymm3, %ymm7, %ymm3
; AVX2-NEXT: vcmpltpd %ymm2, %ymm6, %ymm2
; AVX2-NEXT: vpackssdw %ymm3, %ymm2, %ymm2
@@ -2069,7 +2069,7 @@ define <16 x i1> @test_cmp_v16f64(<16 x double> %a0, <16 x double> %a1) nounwind
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v16f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vcmpltpd %zmm0, %zmm2, %k0
; AVX512F-NEXT: vcmpltpd %zmm1, %zmm3, %k1
; AVX512F-NEXT: kunpckbw %k0, %k1, %k1
@@ -2079,7 +2079,7 @@ define <16 x i1> @test_cmp_v16f64(<16 x double> %a0, <16 x double> %a1) nounwind
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v16f64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcmpltpd %zmm0, %zmm2, %k0
; AVX512DQ-NEXT: vcmpltpd %zmm1, %zmm3, %k1
; AVX512DQ-NEXT: kunpckbw %k0, %k1, %k0
@@ -2089,7 +2089,7 @@ define <16 x i1> @test_cmp_v16f64(<16 x double> %a0, <16 x double> %a1) nounwind
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v16f64:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vcmpltpd %zmm0, %zmm2, %k0
; AVX512BW-NEXT: vcmpltpd %zmm1, %zmm3, %k1
; AVX512BW-NEXT: kunpckbw %k0, %k1, %k0
@@ -2103,7 +2103,7 @@ define <16 x i1> @test_cmp_v16f64(<16 x double> %a0, <16 x double> %a1) nounwind
define <32 x i1> @test_cmp_v32f32(<32 x float> %a0, <32 x float> %a1) nounwind {
; SSE2-LABEL: test_cmp_v32f32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps {{[0-9]+}}(%rsp), %xmm9
; SSE2-NEXT: movaps {{[0-9]+}}(%rsp), %xmm11
; SSE2-NEXT: movaps {{[0-9]+}}(%rsp), %xmm10
@@ -2228,7 +2228,7 @@ define <32 x i1> @test_cmp_v32f32(<32 x float> %a0, <32 x float> %a1) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: test_cmp_v32f32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movaps {{[0-9]+}}(%rsp), %xmm15
; SSE42-NEXT: movaps {{[0-9]+}}(%rsp), %xmm14
; SSE42-NEXT: movaps {{[0-9]+}}(%rsp), %xmm13
@@ -2345,7 +2345,7 @@ define <32 x i1> @test_cmp_v32f32(<32 x float> %a0, <32 x float> %a1) nounwind {
; SSE42-NEXT: retq
;
; AVX1-LABEL: test_cmp_v32f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vcmpltps %ymm3, %ymm7, %ymm3
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm7
; AVX1-NEXT: vpackssdw %xmm7, %xmm3, %xmm3
@@ -2364,7 +2364,7 @@ define <32 x i1> @test_cmp_v32f32(<32 x float> %a0, <32 x float> %a1) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v32f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vcmpltps %ymm3, %ymm7, %ymm3
; AVX2-NEXT: vcmpltps %ymm2, %ymm6, %ymm2
; AVX2-NEXT: vpackssdw %ymm3, %ymm2, %ymm2
@@ -2378,7 +2378,7 @@ define <32 x i1> @test_cmp_v32f32(<32 x float> %a0, <32 x float> %a1) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v32f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vcmpltps %zmm1, %zmm3, %k0
; AVX512F-NEXT: kshiftlw $14, %k0, %k1
; AVX512F-NEXT: kshiftrw $15, %k1, %k1
@@ -2515,7 +2515,7 @@ define <32 x i1> @test_cmp_v32f32(<32 x float> %a0, <32 x float> %a1) nounwind {
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v32f32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcmpltps %zmm1, %zmm3, %k0
; AVX512DQ-NEXT: kshiftlw $14, %k0, %k1
; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
@@ -2652,7 +2652,7 @@ define <32 x i1> @test_cmp_v32f32(<32 x float> %a0, <32 x float> %a1) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v32f32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vcmpltps %zmm0, %zmm2, %k0
; AVX512BW-NEXT: vcmpltps %zmm1, %zmm3, %k1
; AVX512BW-NEXT: kunpckwd %k0, %k1, %k0
@@ -2665,7 +2665,7 @@ define <32 x i1> @test_cmp_v32f32(<32 x float> %a0, <32 x float> %a1) nounwind {
define <16 x i1> @test_cmp_v16i64(<16 x i64> %a0, <16 x i64> %a1) nounwind {
; SSE2-LABEL: test_cmp_v16i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [2147483648,0,2147483648,0]
; SSE2-NEXT: pxor %xmm8, %xmm7
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm9
@@ -2764,7 +2764,7 @@ define <16 x i1> @test_cmp_v16i64(<16 x i64> %a0, <16 x i64> %a1) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: test_cmp_v16i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm7
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm6
; SSE42-NEXT: packssdw %xmm7, %xmm6
@@ -2783,7 +2783,7 @@ define <16 x i1> @test_cmp_v16i64(<16 x i64> %a0, <16 x i64> %a1) nounwind {
; SSE42-NEXT: retq
;
; AVX1-LABEL: test_cmp_v16i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm8
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm9
; AVX1-NEXT: vpcmpgtq %xmm8, %xmm9, %xmm8
@@ -2811,7 +2811,7 @@ define <16 x i1> @test_cmp_v16i64(<16 x i64> %a0, <16 x i64> %a1) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v16i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %ymm7, %ymm3, %ymm3
; AVX2-NEXT: vpcmpgtq %ymm6, %ymm2, %ymm2
; AVX2-NEXT: vpackssdw %ymm3, %ymm2, %ymm2
@@ -2828,7 +2828,7 @@ define <16 x i1> @test_cmp_v16i64(<16 x i64> %a0, <16 x i64> %a1) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v16i64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtq %zmm2, %zmm0, %k0
; AVX512F-NEXT: vpcmpgtq %zmm3, %zmm1, %k1
; AVX512F-NEXT: kunpckbw %k0, %k1, %k1
@@ -2838,7 +2838,7 @@ define <16 x i1> @test_cmp_v16i64(<16 x i64> %a0, <16 x i64> %a1) nounwind {
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v16i64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpcmpgtq %zmm2, %zmm0, %k0
; AVX512DQ-NEXT: vpcmpgtq %zmm3, %zmm1, %k1
; AVX512DQ-NEXT: kunpckbw %k0, %k1, %k0
@@ -2848,7 +2848,7 @@ define <16 x i1> @test_cmp_v16i64(<16 x i64> %a0, <16 x i64> %a1) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v16i64:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtq %zmm2, %zmm0, %k0
; AVX512BW-NEXT: vpcmpgtq %zmm3, %zmm1, %k1
; AVX512BW-NEXT: kunpckbw %k0, %k1, %k0
@@ -2862,7 +2862,7 @@ define <16 x i1> @test_cmp_v16i64(<16 x i64> %a0, <16 x i64> %a1) nounwind {
define <32 x i1> @test_cmp_v32i32(<32 x i32> %a0, <32 x i32> %a1) nounwind {
; SSE2-LABEL: test_cmp_v32i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm3
; SSE2-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm2
; SSE2-NEXT: packssdw %xmm3, %xmm2
@@ -2979,7 +2979,7 @@ define <32 x i1> @test_cmp_v32i32(<32 x i32> %a0, <32 x i32> %a1) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: test_cmp_v32i32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm0
; SSE42-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm1
; SSE42-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm2
@@ -3088,7 +3088,7 @@ define <32 x i1> @test_cmp_v32i32(<32 x i32> %a0, <32 x i32> %a1) nounwind {
; SSE42-NEXT: retq
;
; AVX1-LABEL: test_cmp_v32i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm8
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm9
; AVX1-NEXT: vpcmpgtd %xmm8, %xmm9, %xmm8
@@ -3115,7 +3115,7 @@ define <32 x i1> @test_cmp_v32i32(<32 x i32> %a0, <32 x i32> %a1) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v32i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtd %ymm7, %ymm3, %ymm3
; AVX2-NEXT: vpcmpgtd %ymm6, %ymm2, %ymm2
; AVX2-NEXT: vpackssdw %ymm3, %ymm2, %ymm2
@@ -3129,7 +3129,7 @@ define <32 x i1> @test_cmp_v32i32(<32 x i32> %a0, <32 x i32> %a1) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v32i32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtd %zmm3, %zmm1, %k0
; AVX512F-NEXT: kshiftlw $14, %k0, %k1
; AVX512F-NEXT: kshiftrw $15, %k1, %k1
@@ -3266,7 +3266,7 @@ define <32 x i1> @test_cmp_v32i32(<32 x i32> %a0, <32 x i32> %a1) nounwind {
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v32i32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpcmpgtd %zmm3, %zmm1, %k0
; AVX512DQ-NEXT: kshiftlw $14, %k0, %k1
; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
@@ -3403,7 +3403,7 @@ define <32 x i1> @test_cmp_v32i32(<32 x i32> %a0, <32 x i32> %a1) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v32i32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtd %zmm2, %zmm0, %k0
; AVX512BW-NEXT: vpcmpgtd %zmm3, %zmm1, %k1
; AVX512BW-NEXT: kunpckwd %k0, %k1, %k0
@@ -3416,7 +3416,7 @@ define <32 x i1> @test_cmp_v32i32(<32 x i32> %a0, <32 x i32> %a1) nounwind {
define <64 x i1> @test_cmp_v64i16(<64 x i16> %a0, <64 x i16> %a1) nounwind {
; SSE2-LABEL: test_cmp_v64i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm1
; SSE2-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm0
; SSE2-NEXT: packsswb %xmm1, %xmm0
@@ -3629,7 +3629,7 @@ define <64 x i1> @test_cmp_v64i16(<64 x i16> %a0, <64 x i16> %a1) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: test_cmp_v64i16:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm0
; SSE42-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm1
; SSE42-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm2
@@ -3834,7 +3834,7 @@ define <64 x i1> @test_cmp_v64i16(<64 x i16> %a0, <64 x i16> %a1) nounwind {
; SSE42-NEXT: retq
;
; AVX1-LABEL: test_cmp_v64i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpcmpgtw %xmm4, %xmm0, %xmm8
; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
@@ -4048,7 +4048,7 @@ define <64 x i1> @test_cmp_v64i16(<64 x i16> %a0, <64 x i16> %a1) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v64i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtw %ymm4, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm4
; AVX2-NEXT: vpcmpgtw %ymm5, %ymm1, %ymm1
@@ -4254,7 +4254,7 @@ define <64 x i1> @test_cmp_v64i16(<64 x i16> %a0, <64 x i16> %a1) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v64i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtw %ymm7, %ymm3, %ymm3
; AVX512F-NEXT: vpmovsxwd %ymm3, %zmm3
; AVX512F-NEXT: vpslld $31, %zmm3, %zmm3
@@ -4541,7 +4541,7 @@ define <64 x i1> @test_cmp_v64i16(<64 x i16> %a0, <64 x i16> %a1) nounwind {
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v64i16:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpcmpgtw %ymm7, %ymm3, %ymm3
; AVX512DQ-NEXT: vpmovsxwd %ymm3, %zmm3
; AVX512DQ-NEXT: vpslld $31, %zmm3, %zmm3
@@ -4828,7 +4828,7 @@ define <64 x i1> @test_cmp_v64i16(<64 x i16> %a0, <64 x i16> %a1) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v64i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtw %zmm2, %zmm0, %k0
; AVX512BW-NEXT: vpcmpgtw %zmm3, %zmm1, %k1
; AVX512BW-NEXT: kunpckdq %k0, %k1, %k0
@@ -4840,7 +4840,7 @@ define <64 x i1> @test_cmp_v64i16(<64 x i16> %a0, <64 x i16> %a1) nounwind {
define <128 x i1> @test_cmp_v128i8(<128 x i8> %a0, <128 x i8> %a1) nounwind {
; SSE2-LABEL: test_cmp_v128i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rax
; SSE2-NEXT: pcmpgtb {{[0-9]+}}(%rsp), %xmm0
; SSE2-NEXT: pcmpgtb {{[0-9]+}}(%rsp), %xmm1
@@ -5247,7 +5247,7 @@ define <128 x i1> @test_cmp_v128i8(<128 x i8> %a0, <128 x i8> %a1) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: test_cmp_v128i8:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpgtb {{[0-9]+}}(%rsp), %xmm0
; SSE42-NEXT: pcmpgtb {{[0-9]+}}(%rsp), %xmm1
; SSE42-NEXT: pcmpgtb {{[0-9]+}}(%rsp), %xmm2
@@ -5644,7 +5644,7 @@ define <128 x i1> @test_cmp_v128i8(<128 x i8> %a0, <128 x i8> %a1) nounwind {
; SSE42-NEXT: retq
;
; AVX1-LABEL: test_cmp_v128i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpcmpgtb %xmm4, %xmm0, %xmm8
; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
@@ -6050,7 +6050,7 @@ define <128 x i1> @test_cmp_v128i8(<128 x i8> %a0, <128 x i8> %a1) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v128i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtb %ymm4, %ymm0, %ymm0
; AVX2-NEXT: vpcmpgtb %ymm5, %ymm1, %ymm1
; AVX2-NEXT: vpcmpgtb %ymm6, %ymm2, %ymm2
@@ -6448,7 +6448,7 @@ define <128 x i1> @test_cmp_v128i8(<128 x i8> %a0, <128 x i8> %a1) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v128i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtb %ymm4, %ymm0, %ymm0
; AVX512F-NEXT: vpcmpgtb %ymm5, %ymm1, %ymm1
; AVX512F-NEXT: vpcmpgtb %ymm6, %ymm2, %ymm2
@@ -6494,7 +6494,7 @@ define <128 x i1> @test_cmp_v128i8(<128 x i8> %a0, <128 x i8> %a1) nounwind {
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v128i8:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpcmpgtb %ymm4, %ymm0, %ymm0
; AVX512DQ-NEXT: vpcmpgtb %ymm5, %ymm1, %ymm1
; AVX512DQ-NEXT: vpcmpgtb %ymm6, %ymm2, %ymm2
@@ -6540,7 +6540,7 @@ define <128 x i1> @test_cmp_v128i8(<128 x i8> %a0, <128 x i8> %a1) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v128i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtb %zmm3, %zmm1, %k0
; AVX512BW-NEXT: vpcmpgtb %zmm2, %zmm0, %k1
; AVX512BW-NEXT: vpmovm2b %k1, %zmm0
@@ -6556,7 +6556,7 @@ define <128 x i1> @test_cmp_v128i8(<128 x i8> %a0, <128 x i8> %a1) nounwind {
define <32 x i1> @test_cmp_v32f64(<32 x double> %a0, <32 x double> %a1) nounwind {
; SSE2-LABEL: test_cmp_v32f64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movapd {{[0-9]+}}(%rsp), %xmm8
; SSE2-NEXT: cmpltpd %xmm5, %xmm8
; SSE2-NEXT: movapd {{[0-9]+}}(%rsp), %xmm5
@@ -6725,7 +6725,7 @@ define <32 x i1> @test_cmp_v32f64(<32 x double> %a0, <32 x double> %a1) nounwind
; SSE2-NEXT: retq
;
; SSE42-LABEL: test_cmp_v32f64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pushq %rbp
; SSE42-NEXT: pushq %r15
; SSE42-NEXT: pushq %r14
@@ -6904,7 +6904,7 @@ define <32 x i1> @test_cmp_v32f64(<32 x double> %a0, <32 x double> %a1) nounwind
; SSE42-NEXT: retq
;
; AVX1-LABEL: test_cmp_v32f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: pushq %rbp
; AVX1-NEXT: movq %rsp, %rbp
; AVX1-NEXT: andq $-32, %rsp
@@ -6953,7 +6953,7 @@ define <32 x i1> @test_cmp_v32f64(<32 x double> %a0, <32 x double> %a1) nounwind
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v32f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: pushq %rbp
; AVX2-NEXT: movq %rsp, %rbp
; AVX2-NEXT: andq $-32, %rsp
@@ -6993,7 +6993,7 @@ define <32 x i1> @test_cmp_v32f64(<32 x double> %a0, <32 x double> %a1) nounwind
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v32f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vcmpltpd %zmm2, %zmm6, %k0
; AVX512F-NEXT: kshiftlw $14, %k0, %k1
; AVX512F-NEXT: kshiftrw $15, %k1, %k1
@@ -7134,7 +7134,7 @@ define <32 x i1> @test_cmp_v32f64(<32 x double> %a0, <32 x double> %a1) nounwind
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v32f64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcmpltpd %zmm2, %zmm6, %k0
; AVX512DQ-NEXT: kshiftlb $6, %k0, %k1
; AVX512DQ-NEXT: kshiftrb $7, %k1, %k1
@@ -7271,7 +7271,7 @@ define <32 x i1> @test_cmp_v32f64(<32 x double> %a0, <32 x double> %a1) nounwind
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v32f64:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vcmpltpd %zmm0, %zmm4, %k0
; AVX512BW-NEXT: vcmpltpd %zmm1, %zmm5, %k1
; AVX512BW-NEXT: kunpckbw %k0, %k1, %k0
@@ -7288,7 +7288,7 @@ define <32 x i1> @test_cmp_v32f64(<32 x double> %a0, <32 x double> %a1) nounwind
define <32 x i1> @test_cmp_v32i64(<32 x i64> %a0, <32 x i64> %a1) nounwind {
; SSE2-LABEL: test_cmp_v32i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [2147483648,0,2147483648,0]
; SSE2-NEXT: pxor %xmm8, %xmm5
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm9
@@ -7622,7 +7622,7 @@ define <32 x i1> @test_cmp_v32i64(<32 x i64> %a0, <32 x i64> %a1) nounwind {
; SSE2-NEXT: retq
;
; SSE42-LABEL: test_cmp_v32i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm10
; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm11
@@ -7781,7 +7781,7 @@ define <32 x i1> @test_cmp_v32i64(<32 x i64> %a0, <32 x i64> %a1) nounwind {
; SSE42-NEXT: retq
;
; AVX1-LABEL: test_cmp_v32i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: pushq %rbp
; AVX1-NEXT: movq %rsp, %rbp
; AVX1-NEXT: andq $-32, %rsp
@@ -7846,7 +7846,7 @@ define <32 x i1> @test_cmp_v32i64(<32 x i64> %a0, <32 x i64> %a1) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v32i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: pushq %rbp
; AVX2-NEXT: movq %rsp, %rbp
; AVX2-NEXT: andq $-32, %rsp
@@ -7878,7 +7878,7 @@ define <32 x i1> @test_cmp_v32i64(<32 x i64> %a0, <32 x i64> %a1) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v32i64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtq %zmm6, %zmm2, %k0
; AVX512F-NEXT: kshiftlw $14, %k0, %k1
; AVX512F-NEXT: kshiftrw $15, %k1, %k1
@@ -8019,7 +8019,7 @@ define <32 x i1> @test_cmp_v32i64(<32 x i64> %a0, <32 x i64> %a1) nounwind {
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v32i64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpcmpgtq %zmm6, %zmm2, %k0
; AVX512DQ-NEXT: kshiftlb $6, %k0, %k1
; AVX512DQ-NEXT: kshiftrb $7, %k1, %k1
@@ -8156,7 +8156,7 @@ define <32 x i1> @test_cmp_v32i64(<32 x i64> %a0, <32 x i64> %a1) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v32i64:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtq %zmm4, %zmm0, %k0
; AVX512BW-NEXT: vpcmpgtq %zmm5, %zmm1, %k1
; AVX512BW-NEXT: kunpckbw %k0, %k1, %k0
diff --git a/test/CodeGen/X86/vector-extend-inreg.ll b/test/CodeGen/X86/vector-extend-inreg.ll
index e111dcb4102..6741e2abb11 100644
--- a/test/CodeGen/X86/vector-extend-inreg.ll
+++ b/test/CodeGen/X86/vector-extend-inreg.ll
@@ -6,7 +6,7 @@
define i64 @extract_any_extend_vector_inreg_v16i64(<16 x i64> %a0, i32 %a1) nounwind {
; X32-SSE-LABEL: extract_any_extend_vector_inreg_v16i64:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: pushl %ebp
; X32-SSE-NEXT: movl %esp, %ebp
; X32-SSE-NEXT: andl $-128, %esp
@@ -42,7 +42,7 @@ define i64 @extract_any_extend_vector_inreg_v16i64(<16 x i64> %a0, i32 %a1) noun
; X32-SSE-NEXT: retl
;
; X64-SSE-LABEL: extract_any_extend_vector_inreg_v16i64:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: pushq %rbp
; X64-SSE-NEXT: movq %rsp, %rbp
; X64-SSE-NEXT: andq $-128, %rsp
@@ -65,7 +65,7 @@ define i64 @extract_any_extend_vector_inreg_v16i64(<16 x i64> %a0, i32 %a1) noun
; X64-SSE-NEXT: retq
;
; X32-AVX-LABEL: extract_any_extend_vector_inreg_v16i64:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: pushl %ebp
; X32-AVX-NEXT: movl %esp, %ebp
; X32-AVX-NEXT: andl $-128, %esp
@@ -94,7 +94,7 @@ define i64 @extract_any_extend_vector_inreg_v16i64(<16 x i64> %a0, i32 %a1) noun
; X32-AVX-NEXT: retl
;
; X64-AVX-LABEL: extract_any_extend_vector_inreg_v16i64:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: pushq %rbp
; X64-AVX-NEXT: movq %rsp, %rbp
; X64-AVX-NEXT: andq $-128, %rsp
diff --git a/test/CodeGen/X86/vector-half-conversions.ll b/test/CodeGen/X86/vector-half-conversions.ll
index dba0b084629..0df55fee6ee 100644
--- a/test/CodeGen/X86/vector-half-conversions.ll
+++ b/test/CodeGen/X86/vector-half-conversions.ll
@@ -10,7 +10,7 @@
define float @cvt_i16_to_f32(i16 %a0) nounwind {
; ALL-LABEL: cvt_i16_to_f32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movswl %di, %eax
; ALL-NEXT: vmovd %eax, %xmm0
; ALL-NEXT: vcvtph2ps %xmm0, %xmm0
@@ -22,7 +22,7 @@ define float @cvt_i16_to_f32(i16 %a0) nounwind {
define <4 x float> @cvt_4i16_to_4f32(<4 x i16> %a0) nounwind {
; AVX1-LABEL: cvt_4i16_to_4f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX1-NEXT: vmovq %xmm0, %rax
; AVX1-NEXT: movq %rax, %rcx
@@ -49,7 +49,7 @@ define <4 x float> @cvt_4i16_to_4f32(<4 x i16> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: cvt_4i16_to_4f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX2-NEXT: vmovq %xmm0, %rax
; AVX2-NEXT: movq %rax, %rcx
@@ -76,7 +76,7 @@ define <4 x float> @cvt_4i16_to_4f32(<4 x i16> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: cvt_4i16_to_4f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX512F-NEXT: vmovq %xmm0, %rax
; AVX512F-NEXT: movq %rax, %rcx
@@ -103,7 +103,7 @@ define <4 x float> @cvt_4i16_to_4f32(<4 x i16> %a0) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: cvt_4i16_to_4f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpmovdw %xmm0, -{{[0-9]+}}(%rsp)
; AVX512VL-NEXT: movq -{{[0-9]+}}(%rsp), %rax
; AVX512VL-NEXT: movq %rax, %rcx
@@ -135,7 +135,7 @@ define <4 x float> @cvt_4i16_to_4f32(<4 x i16> %a0) nounwind {
define <4 x float> @cvt_8i16_to_4f32(<8 x i16> %a0) nounwind {
; AVX1-LABEL: cvt_8i16_to_4f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovq %xmm0, %rax
; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: movq %rax, %rdx
@@ -161,7 +161,7 @@ define <4 x float> @cvt_8i16_to_4f32(<8 x i16> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: cvt_8i16_to_4f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovq %xmm0, %rax
; AVX2-NEXT: movq %rax, %rcx
; AVX2-NEXT: movq %rax, %rdx
@@ -187,7 +187,7 @@ define <4 x float> @cvt_8i16_to_4f32(<8 x i16> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: cvt_8i16_to_4f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovq %xmm0, %rax
; AVX512F-NEXT: movq %rax, %rcx
; AVX512F-NEXT: movq %rax, %rdx
@@ -213,7 +213,7 @@ define <4 x float> @cvt_8i16_to_4f32(<8 x i16> %a0) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: cvt_8i16_to_4f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX512VL-NEXT: vpmovdw %xmm0, -{{[0-9]+}}(%rsp)
; AVX512VL-NEXT: movq -{{[0-9]+}}(%rsp), %rax
@@ -247,7 +247,7 @@ define <4 x float> @cvt_8i16_to_4f32(<8 x i16> %a0) nounwind {
define <8 x float> @cvt_8i16_to_8f32(<8 x i16> %a0) nounwind {
; ALL-LABEL: cvt_8i16_to_8f32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpextrq $1, %xmm0, %rdx
; ALL-NEXT: movq %rdx, %r8
; ALL-NEXT: movq %rdx, %r10
@@ -301,7 +301,7 @@ define <8 x float> @cvt_8i16_to_8f32(<8 x i16> %a0) nounwind {
define <16 x float> @cvt_16i16_to_16f32(<16 x i16> %a0) nounwind {
; AVX1-LABEL: cvt_16i16_to_16f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
; AVX1-NEXT: vmovq %xmm4, %rax
; AVX1-NEXT: movq %rax, %rcx
@@ -396,7 +396,7 @@ define <16 x float> @cvt_16i16_to_16f32(<16 x i16> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: cvt_16i16_to_16f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm4
; AVX2-NEXT: vmovq %xmm4, %rax
; AVX2-NEXT: movq %rax, %rcx
@@ -491,7 +491,7 @@ define <16 x float> @cvt_16i16_to_16f32(<16 x i16> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: cvt_16i16_to_16f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm10
; AVX512F-NEXT: vmovq %xmm0, %rax
; AVX512F-NEXT: movq %rax, %rcx
@@ -587,7 +587,7 @@ define <16 x float> @cvt_16i16_to_16f32(<16 x i16> %a0) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: cvt_16i16_to_16f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm10
; AVX512VL-NEXT: vmovq %xmm0, %rax
; AVX512VL-NEXT: movq %rax, %rcx
@@ -692,7 +692,7 @@ define <16 x float> @cvt_16i16_to_16f32(<16 x i16> %a0) nounwind {
define float @load_cvt_i16_to_f32(i16* %a0) nounwind {
; ALL-LABEL: load_cvt_i16_to_f32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movswl (%rdi), %eax
; ALL-NEXT: vmovd %eax, %xmm0
; ALL-NEXT: vcvtph2ps %xmm0, %xmm0
@@ -705,7 +705,7 @@ define float @load_cvt_i16_to_f32(i16* %a0) nounwind {
define <4 x float> @load_cvt_4i16_to_4f32(<4 x i16>* %a0) nounwind {
; ALL-LABEL: load_cvt_4i16_to_4f32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movswl 6(%rdi), %eax
; ALL-NEXT: vmovd %eax, %xmm0
; ALL-NEXT: vcvtph2ps %xmm0, %xmm0
@@ -730,7 +730,7 @@ define <4 x float> @load_cvt_4i16_to_4f32(<4 x i16>* %a0) nounwind {
define <4 x float> @load_cvt_8i16_to_4f32(<8 x i16>* %a0) nounwind {
; AVX1-LABEL: load_cvt_8i16_to_4f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: movq (%rdi), %rax
; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: movq %rax, %rdx
@@ -756,7 +756,7 @@ define <4 x float> @load_cvt_8i16_to_4f32(<8 x i16>* %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_cvt_8i16_to_4f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: movq (%rdi), %rax
; AVX2-NEXT: movq %rax, %rcx
; AVX2-NEXT: movq %rax, %rdx
@@ -782,7 +782,7 @@ define <4 x float> @load_cvt_8i16_to_4f32(<8 x i16>* %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: load_cvt_8i16_to_4f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: movq (%rdi), %rax
; AVX512F-NEXT: movq %rax, %rcx
; AVX512F-NEXT: movq %rax, %rdx
@@ -808,7 +808,7 @@ define <4 x float> @load_cvt_8i16_to_4f32(<8 x i16>* %a0) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: load_cvt_8i16_to_4f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX512VL-NEXT: vpmovdw %xmm0, -{{[0-9]+}}(%rsp)
; AVX512VL-NEXT: movq -{{[0-9]+}}(%rsp), %rax
@@ -843,7 +843,7 @@ define <4 x float> @load_cvt_8i16_to_4f32(<8 x i16>* %a0) nounwind {
define <8 x float> @load_cvt_8i16_to_8f32(<8 x i16>* %a0) nounwind {
; ALL-LABEL: load_cvt_8i16_to_8f32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movswl 6(%rdi), %eax
; ALL-NEXT: vmovd %eax, %xmm0
; ALL-NEXT: vcvtph2ps %xmm0, %xmm0
@@ -884,7 +884,7 @@ define <8 x float> @load_cvt_8i16_to_8f32(<8 x i16>* %a0) nounwind {
define <16 x float> @load_cvt_16i16_to_16f32(<16 x i16>* %a0) nounwind {
; AVX1-LABEL: load_cvt_16i16_to_16f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: movswl 22(%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vcvtph2ps %xmm0, %xmm8
@@ -950,7 +950,7 @@ define <16 x float> @load_cvt_16i16_to_16f32(<16 x i16>* %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_cvt_16i16_to_16f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: movswl 22(%rdi), %eax
; AVX2-NEXT: vmovd %eax, %xmm0
; AVX2-NEXT: vcvtph2ps %xmm0, %xmm8
@@ -1016,7 +1016,7 @@ define <16 x float> @load_cvt_16i16_to_16f32(<16 x i16>* %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: load_cvt_16i16_to_16f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: movswl 6(%rdi), %eax
; AVX512F-NEXT: vmovd %eax, %xmm0
; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm8
@@ -1083,7 +1083,7 @@ define <16 x float> @load_cvt_16i16_to_16f32(<16 x i16>* %a0) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: load_cvt_16i16_to_16f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: movswl 6(%rdi), %eax
; AVX512VL-NEXT: vmovd %eax, %xmm0
; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm8
@@ -1160,7 +1160,7 @@ define <16 x float> @load_cvt_16i16_to_16f32(<16 x i16>* %a0) nounwind {
define double @cvt_i16_to_f64(i16 %a0) nounwind {
; ALL-LABEL: cvt_i16_to_f64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movswl %di, %eax
; ALL-NEXT: vmovd %eax, %xmm0
; ALL-NEXT: vcvtph2ps %xmm0, %xmm0
@@ -1173,7 +1173,7 @@ define double @cvt_i16_to_f64(i16 %a0) nounwind {
define <2 x double> @cvt_2i16_to_2f64(<2 x i16> %a0) nounwind {
; AVX1-LABEL: cvt_2i16_to_2f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; AVX1-NEXT: vmovd %xmm0, %eax
@@ -1190,7 +1190,7 @@ define <2 x double> @cvt_2i16_to_2f64(<2 x i16> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: cvt_2i16_to_2f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; AVX2-NEXT: vmovd %xmm0, %eax
@@ -1207,7 +1207,7 @@ define <2 x double> @cvt_2i16_to_2f64(<2 x i16> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: cvt_2i16_to_2f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; AVX512F-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; AVX512F-NEXT: vmovd %xmm0, %eax
@@ -1224,7 +1224,7 @@ define <2 x double> @cvt_2i16_to_2f64(<2 x i16> %a0) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: cvt_2i16_to_2f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpmovqw %xmm0, -{{[0-9]+}}(%rsp)
; AVX512VL-NEXT: movl -{{[0-9]+}}(%rsp), %eax
; AVX512VL-NEXT: movswl %ax, %ecx
@@ -1245,7 +1245,7 @@ define <2 x double> @cvt_2i16_to_2f64(<2 x i16> %a0) nounwind {
define <4 x double> @cvt_4i16_to_4f64(<4 x i16> %a0) nounwind {
; AVX1-LABEL: cvt_4i16_to_4f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX1-NEXT: vmovq %xmm0, %rax
; AVX1-NEXT: movq %rax, %rcx
@@ -1275,7 +1275,7 @@ define <4 x double> @cvt_4i16_to_4f64(<4 x i16> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: cvt_4i16_to_4f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX2-NEXT: vmovq %xmm0, %rax
; AVX2-NEXT: movq %rax, %rcx
@@ -1305,7 +1305,7 @@ define <4 x double> @cvt_4i16_to_4f64(<4 x i16> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: cvt_4i16_to_4f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX512F-NEXT: vmovq %xmm0, %rax
; AVX512F-NEXT: movq %rax, %rcx
@@ -1335,7 +1335,7 @@ define <4 x double> @cvt_4i16_to_4f64(<4 x i16> %a0) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: cvt_4i16_to_4f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpmovdw %xmm0, -{{[0-9]+}}(%rsp)
; AVX512VL-NEXT: movq -{{[0-9]+}}(%rsp), %rax
; AVX512VL-NEXT: movq %rax, %rcx
@@ -1370,7 +1370,7 @@ define <4 x double> @cvt_4i16_to_4f64(<4 x i16> %a0) nounwind {
define <2 x double> @cvt_8i16_to_2f64(<8 x i16> %a0) nounwind {
; AVX1-LABEL: cvt_8i16_to_2f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %xmm0, %eax
; AVX1-NEXT: movswl %ax, %ecx
; AVX1-NEXT: shrl $16, %eax
@@ -1385,7 +1385,7 @@ define <2 x double> @cvt_8i16_to_2f64(<8 x i16> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: cvt_8i16_to_2f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %xmm0, %eax
; AVX2-NEXT: movswl %ax, %ecx
; AVX2-NEXT: shrl $16, %eax
@@ -1400,7 +1400,7 @@ define <2 x double> @cvt_8i16_to_2f64(<8 x i16> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: cvt_8i16_to_2f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovd %xmm0, %eax
; AVX512F-NEXT: movswl %ax, %ecx
; AVX512F-NEXT: shrl $16, %eax
@@ -1415,7 +1415,7 @@ define <2 x double> @cvt_8i16_to_2f64(<8 x i16> %a0) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: cvt_8i16_to_2f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; AVX512VL-NEXT: vpmovqw %xmm0, -{{[0-9]+}}(%rsp)
; AVX512VL-NEXT: movl -{{[0-9]+}}(%rsp), %eax
@@ -1438,7 +1438,7 @@ define <2 x double> @cvt_8i16_to_2f64(<8 x i16> %a0) nounwind {
define <4 x double> @cvt_8i16_to_4f64(<8 x i16> %a0) nounwind {
; AVX1-LABEL: cvt_8i16_to_4f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovq %xmm0, %rax
; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: movl %eax, %edx
@@ -1467,7 +1467,7 @@ define <4 x double> @cvt_8i16_to_4f64(<8 x i16> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: cvt_8i16_to_4f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovq %xmm0, %rax
; AVX2-NEXT: movq %rax, %rcx
; AVX2-NEXT: movl %eax, %edx
@@ -1496,7 +1496,7 @@ define <4 x double> @cvt_8i16_to_4f64(<8 x i16> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: cvt_8i16_to_4f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovq %xmm0, %rax
; AVX512F-NEXT: movq %rax, %rcx
; AVX512F-NEXT: movl %eax, %edx
@@ -1525,7 +1525,7 @@ define <4 x double> @cvt_8i16_to_4f64(<8 x i16> %a0) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: cvt_8i16_to_4f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX512VL-NEXT: vpmovdw %xmm0, -{{[0-9]+}}(%rsp)
; AVX512VL-NEXT: movq -{{[0-9]+}}(%rsp), %rax
@@ -1562,7 +1562,7 @@ define <4 x double> @cvt_8i16_to_4f64(<8 x i16> %a0) nounwind {
define <8 x double> @cvt_8i16_to_8f64(<8 x i16> %a0) nounwind {
; AVX1-LABEL: cvt_8i16_to_8f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovq %xmm0, %rdx
; AVX1-NEXT: movq %rdx, %r9
; AVX1-NEXT: movl %edx, %r10d
@@ -1616,7 +1616,7 @@ define <8 x double> @cvt_8i16_to_8f64(<8 x i16> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: cvt_8i16_to_8f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovq %xmm0, %rdx
; AVX2-NEXT: movq %rdx, %r9
; AVX2-NEXT: movl %edx, %r10d
@@ -1670,7 +1670,7 @@ define <8 x double> @cvt_8i16_to_8f64(<8 x i16> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: cvt_8i16_to_8f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpextrq $1, %xmm0, %rdx
; AVX512-NEXT: movq %rdx, %r9
; AVX512-NEXT: movl %edx, %r10d
@@ -1734,7 +1734,7 @@ define <8 x double> @cvt_8i16_to_8f64(<8 x i16> %a0) nounwind {
define double @load_cvt_i16_to_f64(i16* %a0) nounwind {
; ALL-LABEL: load_cvt_i16_to_f64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movswl (%rdi), %eax
; ALL-NEXT: vmovd %eax, %xmm0
; ALL-NEXT: vcvtph2ps %xmm0, %xmm0
@@ -1748,7 +1748,7 @@ define double @load_cvt_i16_to_f64(i16* %a0) nounwind {
define <2 x double> @load_cvt_2i16_to_2f64(<2 x i16>* %a0) nounwind {
; ALL-LABEL: load_cvt_2i16_to_2f64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movswl (%rdi), %eax
; ALL-NEXT: vmovd %eax, %xmm0
; ALL-NEXT: vcvtph2ps %xmm0, %xmm0
@@ -1767,7 +1767,7 @@ define <2 x double> @load_cvt_2i16_to_2f64(<2 x i16>* %a0) nounwind {
define <4 x double> @load_cvt_4i16_to_4f64(<4 x i16>* %a0) nounwind {
; ALL-LABEL: load_cvt_4i16_to_4f64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movswl (%rdi), %eax
; ALL-NEXT: vmovd %eax, %xmm0
; ALL-NEXT: vcvtph2ps %xmm0, %xmm0
@@ -1796,7 +1796,7 @@ define <4 x double> @load_cvt_4i16_to_4f64(<4 x i16>* %a0) nounwind {
define <4 x double> @load_cvt_8i16_to_4f64(<8 x i16>* %a0) nounwind {
; AVX1-LABEL: load_cvt_8i16_to_4f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: movq (%rdi), %rax
; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: movl %eax, %edx
@@ -1825,7 +1825,7 @@ define <4 x double> @load_cvt_8i16_to_4f64(<8 x i16>* %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_cvt_8i16_to_4f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: movq (%rdi), %rax
; AVX2-NEXT: movq %rax, %rcx
; AVX2-NEXT: movl %eax, %edx
@@ -1854,7 +1854,7 @@ define <4 x double> @load_cvt_8i16_to_4f64(<8 x i16>* %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: load_cvt_8i16_to_4f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: movq (%rdi), %rax
; AVX512F-NEXT: movq %rax, %rcx
; AVX512F-NEXT: movl %eax, %edx
@@ -1883,7 +1883,7 @@ define <4 x double> @load_cvt_8i16_to_4f64(<8 x i16>* %a0) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: load_cvt_8i16_to_4f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX512VL-NEXT: vpmovdw %xmm0, -{{[0-9]+}}(%rsp)
; AVX512VL-NEXT: movq -{{[0-9]+}}(%rsp), %rax
@@ -1921,7 +1921,7 @@ define <4 x double> @load_cvt_8i16_to_4f64(<8 x i16>* %a0) nounwind {
define <8 x double> @load_cvt_8i16_to_8f64(<8 x i16>* %a0) nounwind {
; AVX1-LABEL: load_cvt_8i16_to_8f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: movswl 8(%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vcvtph2ps %xmm0, %xmm1
@@ -1963,7 +1963,7 @@ define <8 x double> @load_cvt_8i16_to_8f64(<8 x i16>* %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_cvt_8i16_to_8f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: movswl 8(%rdi), %eax
; AVX2-NEXT: vmovd %eax, %xmm0
; AVX2-NEXT: vcvtph2ps %xmm0, %xmm1
@@ -2005,7 +2005,7 @@ define <8 x double> @load_cvt_8i16_to_8f64(<8 x i16>* %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_cvt_8i16_to_8f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: movswl (%rdi), %eax
; AVX512-NEXT: vmovd %eax, %xmm0
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
@@ -2058,7 +2058,7 @@ define <8 x double> @load_cvt_8i16_to_8f64(<8 x i16>* %a0) nounwind {
define i16 @cvt_f32_to_i16(float %a0) nounwind {
; ALL-LABEL: cvt_f32_to_i16:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; ALL-NEXT: vmovd %xmm0, %eax
; ALL-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -2070,7 +2070,7 @@ define i16 @cvt_f32_to_i16(float %a0) nounwind {
define <4 x i16> @cvt_4f32_to_4i16(<4 x float> %a0) nounwind {
; ALL-LABEL: cvt_4f32_to_4i16:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; ALL-NEXT: vmovd %xmm1, %eax
@@ -2099,7 +2099,7 @@ define <4 x i16> @cvt_4f32_to_4i16(<4 x float> %a0) nounwind {
define <8 x i16> @cvt_4f32_to_8i16_undef(<4 x float> %a0) nounwind {
; AVX1-LABEL: cvt_4f32_to_8i16_undef:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX1-NEXT: vmovd %xmm1, %eax
@@ -2124,7 +2124,7 @@ define <8 x i16> @cvt_4f32_to_8i16_undef(<4 x float> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: cvt_4f32_to_8i16_undef:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX2-NEXT: vmovd %xmm1, %eax
@@ -2149,7 +2149,7 @@ define <8 x i16> @cvt_4f32_to_8i16_undef(<4 x float> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: cvt_4f32_to_8i16_undef:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512F-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX512F-NEXT: vmovd %xmm1, %eax
@@ -2174,7 +2174,7 @@ define <8 x i16> @cvt_4f32_to_8i16_undef(<4 x float> %a0) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: cvt_4f32_to_8i16_undef:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX512VL-NEXT: vmovd %xmm1, %eax
@@ -2206,7 +2206,7 @@ define <8 x i16> @cvt_4f32_to_8i16_undef(<4 x float> %a0) nounwind {
define <8 x i16> @cvt_4f32_to_8i16_zero(<4 x float> %a0) nounwind {
; AVX1-LABEL: cvt_4f32_to_8i16_zero:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX1-NEXT: vmovd %xmm1, %eax
@@ -2231,7 +2231,7 @@ define <8 x i16> @cvt_4f32_to_8i16_zero(<4 x float> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: cvt_4f32_to_8i16_zero:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX2-NEXT: vmovd %xmm1, %eax
@@ -2256,7 +2256,7 @@ define <8 x i16> @cvt_4f32_to_8i16_zero(<4 x float> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: cvt_4f32_to_8i16_zero:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512F-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX512F-NEXT: vmovd %xmm1, %eax
@@ -2281,7 +2281,7 @@ define <8 x i16> @cvt_4f32_to_8i16_zero(<4 x float> %a0) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: cvt_4f32_to_8i16_zero:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX512VL-NEXT: vmovd %xmm1, %eax
@@ -2315,7 +2315,7 @@ define <8 x i16> @cvt_4f32_to_8i16_zero(<4 x float> %a0) nounwind {
define <8 x i16> @cvt_8f32_to_8i16(<8 x float> %a0) nounwind {
; ALL-LABEL: cvt_8f32_to_8i16:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; ALL-NEXT: vmovd %xmm1, %eax
@@ -2367,7 +2367,7 @@ define <8 x i16> @cvt_8f32_to_8i16(<8 x float> %a0) nounwind {
define <16 x i16> @cvt_16f32_to_16i16(<16 x float> %a0) nounwind {
; AVX1-LABEL: cvt_16f32_to_16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm2
; AVX1-NEXT: vmovd %xmm2, %eax
; AVX1-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
@@ -2434,7 +2434,7 @@ define <16 x i16> @cvt_16f32_to_16i16(<16 x float> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: cvt_16f32_to_16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm2
; AVX2-NEXT: vmovd %xmm2, %eax
; AVX2-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
@@ -2501,7 +2501,7 @@ define <16 x i16> @cvt_16f32_to_16i16(<16 x float> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: cvt_16f32_to_16i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm1
; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm2
; AVX512-NEXT: vmovd %xmm2, %eax
@@ -2578,7 +2578,7 @@ define <16 x i16> @cvt_16f32_to_16i16(<16 x float> %a0) nounwind {
define void @store_cvt_f32_to_i16(float %a0, i16* %a1) nounwind {
; ALL-LABEL: store_cvt_f32_to_i16:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; ALL-NEXT: vmovd %xmm0, %eax
; ALL-NEXT: movw %ax, (%rdi)
@@ -2591,7 +2591,7 @@ define void @store_cvt_f32_to_i16(float %a0, i16* %a1) nounwind {
define void @store_cvt_4f32_to_4i16(<4 x float> %a0, <4 x i16>* %a1) nounwind {
; ALL-LABEL: store_cvt_4f32_to_4i16:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; ALL-NEXT: vmovd %xmm1, %eax
@@ -2616,7 +2616,7 @@ define void @store_cvt_4f32_to_4i16(<4 x float> %a0, <4 x i16>* %a1) nounwind {
define void @store_cvt_4f32_to_8i16_undef(<4 x float> %a0, <8 x i16>* %a1) nounwind {
; AVX1-LABEL: store_cvt_4f32_to_8i16_undef:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX1-NEXT: vmovd %xmm1, %eax
@@ -2642,7 +2642,7 @@ define void @store_cvt_4f32_to_8i16_undef(<4 x float> %a0, <8 x i16>* %a1) nounw
; AVX1-NEXT: retq
;
; AVX2-LABEL: store_cvt_4f32_to_8i16_undef:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX2-NEXT: vmovd %xmm1, %eax
@@ -2668,7 +2668,7 @@ define void @store_cvt_4f32_to_8i16_undef(<4 x float> %a0, <8 x i16>* %a1) nounw
; AVX2-NEXT: retq
;
; AVX512F-LABEL: store_cvt_4f32_to_8i16_undef:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512F-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX512F-NEXT: vmovd %xmm1, %eax
@@ -2694,7 +2694,7 @@ define void @store_cvt_4f32_to_8i16_undef(<4 x float> %a0, <8 x i16>* %a1) nounw
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: store_cvt_4f32_to_8i16_undef:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX512VL-NEXT: vmovd %xmm1, %eax
@@ -2728,7 +2728,7 @@ define void @store_cvt_4f32_to_8i16_undef(<4 x float> %a0, <8 x i16>* %a1) nounw
define void @store_cvt_4f32_to_8i16_zero(<4 x float> %a0, <8 x i16>* %a1) nounwind {
; AVX1-LABEL: store_cvt_4f32_to_8i16_zero:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX1-NEXT: vmovd %xmm1, %eax
@@ -2754,7 +2754,7 @@ define void @store_cvt_4f32_to_8i16_zero(<4 x float> %a0, <8 x i16>* %a1) nounwi
; AVX1-NEXT: retq
;
; AVX2-LABEL: store_cvt_4f32_to_8i16_zero:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX2-NEXT: vmovd %xmm1, %eax
@@ -2780,7 +2780,7 @@ define void @store_cvt_4f32_to_8i16_zero(<4 x float> %a0, <8 x i16>* %a1) nounwi
; AVX2-NEXT: retq
;
; AVX512F-LABEL: store_cvt_4f32_to_8i16_zero:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512F-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX512F-NEXT: vmovd %xmm1, %eax
@@ -2806,7 +2806,7 @@ define void @store_cvt_4f32_to_8i16_zero(<4 x float> %a0, <8 x i16>* %a1) nounwi
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: store_cvt_4f32_to_8i16_zero:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX512VL-NEXT: vmovd %xmm1, %eax
@@ -2842,7 +2842,7 @@ define void @store_cvt_4f32_to_8i16_zero(<4 x float> %a0, <8 x i16>* %a1) nounwi
define void @store_cvt_8f32_to_8i16(<8 x float> %a0, <8 x i16>* %a1) nounwind {
; ALL-LABEL: store_cvt_8f32_to_8i16:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; ALL-NEXT: vmovd %xmm1, %r8d
@@ -2884,7 +2884,7 @@ define void @store_cvt_8f32_to_8i16(<8 x float> %a0, <8 x i16>* %a1) nounwind {
define void @store_cvt_16f32_to_16i16(<16 x float> %a0, <16 x i16>* %a1) nounwind {
; AVX1-LABEL: store_cvt_16f32_to_16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vcvtps2ph $4, %xmm3, %xmm4
@@ -2951,7 +2951,7 @@ define void @store_cvt_16f32_to_16i16(<16 x float> %a0, <16 x i16>* %a1) nounwin
; AVX1-NEXT: retq
;
; AVX2-LABEL: store_cvt_16f32_to_16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX2-NEXT: vcvtps2ph $4, %xmm3, %xmm4
@@ -3018,7 +3018,7 @@ define void @store_cvt_16f32_to_16i16(<16 x float> %a0, <16 x i16>* %a1) nounwin
; AVX2-NEXT: retq
;
; AVX512-LABEL: store_cvt_16f32_to_16i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm2
; AVX512-NEXT: vextractf128 $1, %ymm2, %xmm3
@@ -3096,7 +3096,7 @@ define void @store_cvt_16f32_to_16i16(<16 x float> %a0, <16 x i16>* %a1) nounwin
define i16 @cvt_f64_to_i16(double %a0) nounwind {
; ALL-LABEL: cvt_f64_to_i16:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: jmp __truncdfhf2 # TAILCALL
%1 = fptrunc double %a0 to half
%2 = bitcast half %1 to i16
@@ -3105,7 +3105,7 @@ define i16 @cvt_f64_to_i16(double %a0) nounwind {
define <2 x i16> @cvt_2f64_to_2i16(<2 x double> %a0) nounwind {
; ALL-LABEL: cvt_2f64_to_2i16:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: pushq %rbx
; ALL-NEXT: subq $16, %rsp
; ALL-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
@@ -3128,7 +3128,7 @@ define <2 x i16> @cvt_2f64_to_2i16(<2 x double> %a0) nounwind {
define <4 x i16> @cvt_4f64_to_4i16(<4 x double> %a0) nounwind {
; AVX1-LABEL: cvt_4f64_to_4i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: pushq %r14
; AVX1-NEXT: pushq %rbx
; AVX1-NEXT: subq $40, %rsp
@@ -3165,7 +3165,7 @@ define <4 x i16> @cvt_4f64_to_4i16(<4 x double> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: cvt_4f64_to_4i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: pushq %r14
; AVX2-NEXT: pushq %rbx
; AVX2-NEXT: subq $40, %rsp
@@ -3202,7 +3202,7 @@ define <4 x i16> @cvt_4f64_to_4i16(<4 x double> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: cvt_4f64_to_4i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: pushq %r14
; AVX512-NEXT: pushq %rbx
; AVX512-NEXT: subq $40, %rsp
@@ -3244,7 +3244,7 @@ define <4 x i16> @cvt_4f64_to_4i16(<4 x double> %a0) nounwind {
define <8 x i16> @cvt_4f64_to_8i16_undef(<4 x double> %a0) nounwind {
; AVX1-LABEL: cvt_4f64_to_8i16_undef:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: pushq %r14
; AVX1-NEXT: pushq %rbx
; AVX1-NEXT: subq $40, %rsp
@@ -3282,7 +3282,7 @@ define <8 x i16> @cvt_4f64_to_8i16_undef(<4 x double> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: cvt_4f64_to_8i16_undef:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: pushq %r14
; AVX2-NEXT: pushq %rbx
; AVX2-NEXT: subq $40, %rsp
@@ -3320,7 +3320,7 @@ define <8 x i16> @cvt_4f64_to_8i16_undef(<4 x double> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: cvt_4f64_to_8i16_undef:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: pushq %r14
; AVX512F-NEXT: pushq %rbx
; AVX512F-NEXT: subq $40, %rsp
@@ -3358,7 +3358,7 @@ define <8 x i16> @cvt_4f64_to_8i16_undef(<4 x double> %a0) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: cvt_4f64_to_8i16_undef:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: pushq %r14
; AVX512VL-NEXT: pushq %rbx
; AVX512VL-NEXT: subq $40, %rsp
@@ -3403,7 +3403,7 @@ define <8 x i16> @cvt_4f64_to_8i16_undef(<4 x double> %a0) nounwind {
define <8 x i16> @cvt_4f64_to_8i16_zero(<4 x double> %a0) nounwind {
; AVX1-LABEL: cvt_4f64_to_8i16_zero:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: pushq %r14
; AVX1-NEXT: pushq %rbx
; AVX1-NEXT: subq $40, %rsp
@@ -3441,7 +3441,7 @@ define <8 x i16> @cvt_4f64_to_8i16_zero(<4 x double> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: cvt_4f64_to_8i16_zero:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: pushq %r14
; AVX2-NEXT: pushq %rbx
; AVX2-NEXT: subq $40, %rsp
@@ -3479,7 +3479,7 @@ define <8 x i16> @cvt_4f64_to_8i16_zero(<4 x double> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: cvt_4f64_to_8i16_zero:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: pushq %r14
; AVX512F-NEXT: pushq %rbx
; AVX512F-NEXT: subq $40, %rsp
@@ -3517,7 +3517,7 @@ define <8 x i16> @cvt_4f64_to_8i16_zero(<4 x double> %a0) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: cvt_4f64_to_8i16_zero:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: pushq %r14
; AVX512VL-NEXT: pushq %rbx
; AVX512VL-NEXT: subq $40, %rsp
@@ -3564,7 +3564,7 @@ define <8 x i16> @cvt_4f64_to_8i16_zero(<4 x double> %a0) nounwind {
define <8 x i16> @cvt_8f64_to_8i16(<8 x double> %a0) nounwind {
; AVX1-LABEL: cvt_8f64_to_8i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: pushq %r15
; AVX1-NEXT: pushq %r14
; AVX1-NEXT: pushq %rbx
@@ -3631,7 +3631,7 @@ define <8 x i16> @cvt_8f64_to_8i16(<8 x double> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: cvt_8f64_to_8i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: pushq %r15
; AVX2-NEXT: pushq %r14
; AVX2-NEXT: pushq %rbx
@@ -3698,7 +3698,7 @@ define <8 x i16> @cvt_8f64_to_8i16(<8 x double> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: cvt_8f64_to_8i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: pushq %r15
; AVX512-NEXT: pushq %r14
; AVX512-NEXT: pushq %rbx
@@ -3776,7 +3776,7 @@ define <8 x i16> @cvt_8f64_to_8i16(<8 x double> %a0) nounwind {
define void @store_cvt_f64_to_i16(double %a0, i16* %a1) nounwind {
; ALL-LABEL: store_cvt_f64_to_i16:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: pushq %rbx
; ALL-NEXT: movq %rdi, %rbx
; ALL-NEXT: callq __truncdfhf2
@@ -3791,7 +3791,7 @@ define void @store_cvt_f64_to_i16(double %a0, i16* %a1) nounwind {
define void @store_cvt_2f64_to_2i16(<2 x double> %a0, <2 x i16>* %a1) nounwind {
; ALL-LABEL: store_cvt_2f64_to_2i16:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: pushq %rbp
; ALL-NEXT: pushq %rbx
; ALL-NEXT: subq $24, %rsp
@@ -3816,7 +3816,7 @@ define void @store_cvt_2f64_to_2i16(<2 x double> %a0, <2 x i16>* %a1) nounwind {
define void @store_cvt_4f64_to_4i16(<4 x double> %a0, <4 x i16>* %a1) nounwind {
; AVX1-LABEL: store_cvt_4f64_to_4i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: pushq %rbp
; AVX1-NEXT: pushq %r15
; AVX1-NEXT: pushq %r14
@@ -3854,7 +3854,7 @@ define void @store_cvt_4f64_to_4i16(<4 x double> %a0, <4 x i16>* %a1) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: store_cvt_4f64_to_4i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: pushq %rbp
; AVX2-NEXT: pushq %r15
; AVX2-NEXT: pushq %r14
@@ -3892,7 +3892,7 @@ define void @store_cvt_4f64_to_4i16(<4 x double> %a0, <4 x i16>* %a1) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: store_cvt_4f64_to_4i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: pushq %rbp
; AVX512-NEXT: pushq %r15
; AVX512-NEXT: pushq %r14
@@ -3936,7 +3936,7 @@ define void @store_cvt_4f64_to_4i16(<4 x double> %a0, <4 x i16>* %a1) nounwind {
define void @store_cvt_4f64_to_8i16_undef(<4 x double> %a0, <8 x i16>* %a1) nounwind {
; AVX1-LABEL: store_cvt_4f64_to_8i16_undef:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: pushq %rbp
; AVX1-NEXT: pushq %r14
; AVX1-NEXT: pushq %rbx
@@ -3978,7 +3978,7 @@ define void @store_cvt_4f64_to_8i16_undef(<4 x double> %a0, <8 x i16>* %a1) noun
; AVX1-NEXT: retq
;
; AVX2-LABEL: store_cvt_4f64_to_8i16_undef:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: pushq %rbp
; AVX2-NEXT: pushq %r14
; AVX2-NEXT: pushq %rbx
@@ -4020,7 +4020,7 @@ define void @store_cvt_4f64_to_8i16_undef(<4 x double> %a0, <8 x i16>* %a1) noun
; AVX2-NEXT: retq
;
; AVX512F-LABEL: store_cvt_4f64_to_8i16_undef:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: pushq %rbp
; AVX512F-NEXT: pushq %r14
; AVX512F-NEXT: pushq %rbx
@@ -4062,7 +4062,7 @@ define void @store_cvt_4f64_to_8i16_undef(<4 x double> %a0, <8 x i16>* %a1) noun
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: store_cvt_4f64_to_8i16_undef:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: pushq %rbp
; AVX512VL-NEXT: pushq %r14
; AVX512VL-NEXT: pushq %rbx
@@ -4112,7 +4112,7 @@ define void @store_cvt_4f64_to_8i16_undef(<4 x double> %a0, <8 x i16>* %a1) noun
define void @store_cvt_4f64_to_8i16_zero(<4 x double> %a0, <8 x i16>* %a1) nounwind {
; AVX1-LABEL: store_cvt_4f64_to_8i16_zero:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: pushq %rbp
; AVX1-NEXT: pushq %r14
; AVX1-NEXT: pushq %rbx
@@ -4154,7 +4154,7 @@ define void @store_cvt_4f64_to_8i16_zero(<4 x double> %a0, <8 x i16>* %a1) nounw
; AVX1-NEXT: retq
;
; AVX2-LABEL: store_cvt_4f64_to_8i16_zero:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: pushq %rbp
; AVX2-NEXT: pushq %r14
; AVX2-NEXT: pushq %rbx
@@ -4196,7 +4196,7 @@ define void @store_cvt_4f64_to_8i16_zero(<4 x double> %a0, <8 x i16>* %a1) nounw
; AVX2-NEXT: retq
;
; AVX512F-LABEL: store_cvt_4f64_to_8i16_zero:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: pushq %rbp
; AVX512F-NEXT: pushq %r14
; AVX512F-NEXT: pushq %rbx
@@ -4238,7 +4238,7 @@ define void @store_cvt_4f64_to_8i16_zero(<4 x double> %a0, <8 x i16>* %a1) nounw
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: store_cvt_4f64_to_8i16_zero:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: pushq %rbp
; AVX512VL-NEXT: pushq %r14
; AVX512VL-NEXT: pushq %rbx
@@ -4290,7 +4290,7 @@ define void @store_cvt_4f64_to_8i16_zero(<4 x double> %a0, <8 x i16>* %a1) nounw
define void @store_cvt_8f64_to_8i16(<8 x double> %a0, <8 x i16>* %a1) nounwind {
; AVX1-LABEL: store_cvt_8f64_to_8i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: pushq %rbp
; AVX1-NEXT: pushq %r15
; AVX1-NEXT: pushq %r14
@@ -4358,7 +4358,7 @@ define void @store_cvt_8f64_to_8i16(<8 x double> %a0, <8 x i16>* %a1) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: store_cvt_8f64_to_8i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: pushq %rbp
; AVX2-NEXT: pushq %r15
; AVX2-NEXT: pushq %r14
@@ -4426,7 +4426,7 @@ define void @store_cvt_8f64_to_8i16(<8 x double> %a0, <8 x i16>* %a1) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: store_cvt_8f64_to_8i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: pushq %rbp
; AVX512-NEXT: pushq %r15
; AVX512-NEXT: pushq %r14
diff --git a/test/CodeGen/X86/vector-idiv-sdiv-128.ll b/test/CodeGen/X86/vector-idiv-sdiv-128.ll
index 4fa7f747ed4..61787fc19df 100644
--- a/test/CodeGen/X86/vector-idiv-sdiv-128.ll
+++ b/test/CodeGen/X86/vector-idiv-sdiv-128.ll
@@ -11,7 +11,7 @@
define <2 x i64> @test_div7_2i64(<2 x i64> %a) nounwind {
; SSE2-LABEL: test_div7_2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movq %xmm0, %rax
; SSE2-NEXT: movabsq $5270498306774157605, %rcx # imm = 0x4924924924924925
; SSE2-NEXT: imulq %rcx
@@ -33,7 +33,7 @@ define <2 x i64> @test_div7_2i64(<2 x i64> %a) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_div7_2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pextrq $1, %xmm0, %rax
; SSE41-NEXT: movabsq $5270498306774157605, %rcx # imm = 0x4924924924924925
; SSE41-NEXT: imulq %rcx
@@ -53,7 +53,7 @@ define <2 x i64> @test_div7_2i64(<2 x i64> %a) nounwind {
; SSE41-NEXT: retq
;
; AVX-LABEL: test_div7_2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpextrq $1, %xmm0, %rax
; AVX-NEXT: movabsq $5270498306774157605, %rcx # imm = 0x4924924924924925
; AVX-NEXT: imulq %rcx
@@ -77,7 +77,7 @@ define <2 x i64> @test_div7_2i64(<2 x i64> %a) nounwind {
define <4 x i32> @test_div7_4i32(<4 x i32> %a) nounwind {
; SSE2-LABEL: test_div7_4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [2454267027,2454267027,2454267027,2454267027]
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: psrad $31, %xmm2
@@ -103,7 +103,7 @@ define <4 x i32> @test_div7_4i32(<4 x i32> %a) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_div7_4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [2454267027,2454267027,2454267027,2454267027]
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
@@ -120,7 +120,7 @@ define <4 x i32> @test_div7_4i32(<4 x i32> %a) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_div7_4i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [2454267027,2454267027,2454267027,2454267027]
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
@@ -135,7 +135,7 @@ define <4 x i32> @test_div7_4i32(<4 x i32> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_div7_4i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [2454267027,2454267027,2454267027,2454267027]
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
@@ -154,7 +154,7 @@ define <4 x i32> @test_div7_4i32(<4 x i32> %a) nounwind {
define <8 x i16> @test_div7_8i16(<8 x i16> %a) nounwind {
; SSE-LABEL: test_div7_8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmulhw {{.*}}(%rip), %xmm0
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psrlw $15, %xmm1
@@ -163,7 +163,7 @@ define <8 x i16> @test_div7_8i16(<8 x i16> %a) nounwind {
; SSE-NEXT: retq
;
; AVX-LABEL: test_div7_8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vpsrlw $15, %xmm0, %xmm1
; AVX-NEXT: vpsraw $1, %xmm0, %xmm0
@@ -175,7 +175,7 @@ define <8 x i16> @test_div7_8i16(<8 x i16> %a) nounwind {
define <16 x i8> @test_div7_16i8(<16 x i8> %a) nounwind {
; SSE2-LABEL: test_div7_16i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm2
@@ -202,7 +202,7 @@ define <16 x i8> @test_div7_16i8(<16 x i8> %a) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_div7_16i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovsxbw %xmm0, %xmm1
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [65427,65427,65427,65427,65427,65427,65427,65427]
; SSE41-NEXT: pmullw %xmm2, %xmm1
@@ -226,7 +226,7 @@ define <16 x i8> @test_div7_16i8(<16 x i8> %a) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_div7_16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovsxbw %xmm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [65427,65427,65427,65427,65427,65427,65427,65427]
; AVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1
@@ -248,7 +248,7 @@ define <16 x i8> @test_div7_16i8(<16 x i8> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2NOBW-LABEL: test_div7_16i8:
-; AVX2NOBW: # BB#0:
+; AVX2NOBW: # %bb.0:
; AVX2NOBW-NEXT: vpmovsxbw %xmm0, %ymm1
; AVX2NOBW-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1
; AVX2NOBW-NEXT: vpsrlw $8, %ymm1, %ymm1
@@ -267,7 +267,7 @@ define <16 x i8> @test_div7_16i8(<16 x i8> %a) nounwind {
; AVX2NOBW-NEXT: retq
;
; AVX512BW-LABEL: test_div7_16i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm1
; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1
; AVX512BW-NEXT: vpsrlw $8, %ymm1, %ymm1
@@ -293,7 +293,7 @@ define <16 x i8> @test_div7_16i8(<16 x i8> %a) nounwind {
define <2 x i64> @test_rem7_2i64(<2 x i64> %a) nounwind {
; SSE2-LABEL: test_rem7_2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movq %xmm0, %rcx
; SSE2-NEXT: movabsq $5270498306774157605, %rsi # imm = 0x4924924924924925
; SSE2-NEXT: movq %rcx, %rax
@@ -323,7 +323,7 @@ define <2 x i64> @test_rem7_2i64(<2 x i64> %a) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_rem7_2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pextrq $1, %xmm0, %rcx
; SSE41-NEXT: movabsq $5270498306774157605, %rsi # imm = 0x4924924924924925
; SSE41-NEXT: movq %rcx, %rax
@@ -351,7 +351,7 @@ define <2 x i64> @test_rem7_2i64(<2 x i64> %a) nounwind {
; SSE41-NEXT: retq
;
; AVX-LABEL: test_rem7_2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpextrq $1, %xmm0, %rcx
; AVX-NEXT: movabsq $5270498306774157605, %rsi # imm = 0x4924924924924925
; AVX-NEXT: movq %rcx, %rax
@@ -383,7 +383,7 @@ define <2 x i64> @test_rem7_2i64(<2 x i64> %a) nounwind {
define <4 x i32> @test_rem7_4i32(<4 x i32> %a) nounwind {
; SSE2-LABEL: test_rem7_4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [2454267027,2454267027,2454267027,2454267027]
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: psrad $31, %xmm2
@@ -416,7 +416,7 @@ define <4 x i32> @test_rem7_4i32(<4 x i32> %a) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_rem7_4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [2454267027,2454267027,2454267027,2454267027]
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
@@ -434,7 +434,7 @@ define <4 x i32> @test_rem7_4i32(<4 x i32> %a) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_rem7_4i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [2454267027,2454267027,2454267027,2454267027]
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
@@ -451,7 +451,7 @@ define <4 x i32> @test_rem7_4i32(<4 x i32> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_rem7_4i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [2454267027,2454267027,2454267027,2454267027]
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
@@ -473,7 +473,7 @@ define <4 x i32> @test_rem7_4i32(<4 x i32> %a) nounwind {
define <8 x i16> @test_rem7_8i16(<8 x i16> %a) nounwind {
; SSE-LABEL: test_rem7_8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [18725,18725,18725,18725,18725,18725,18725,18725]
; SSE-NEXT: pmulhw %xmm0, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm2
@@ -485,7 +485,7 @@ define <8 x i16> @test_rem7_8i16(<8 x i16> %a) nounwind {
; SSE-NEXT: retq
;
; AVX-LABEL: test_rem7_8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm1
; AVX-NEXT: vpsrlw $15, %xmm1, %xmm2
; AVX-NEXT: vpsraw $1, %xmm1, %xmm1
@@ -499,7 +499,7 @@ define <8 x i16> @test_rem7_8i16(<8 x i16> %a) nounwind {
define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
; SSE2-LABEL: test_rem7_16i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm2
@@ -538,7 +538,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_rem7_16i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovsxbw %xmm0, %xmm1
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [65427,65427,65427,65427,65427,65427,65427,65427]
; SSE41-NEXT: pmullw %xmm2, %xmm1
@@ -572,7 +572,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_rem7_16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovsxbw %xmm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [65427,65427,65427,65427,65427,65427,65427,65427]
; AVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1
@@ -605,7 +605,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2NOBW-LABEL: test_rem7_16i8:
-; AVX2NOBW: # BB#0:
+; AVX2NOBW: # %bb.0:
; AVX2NOBW-NEXT: vpmovsxbw %xmm0, %ymm1
; AVX2NOBW-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1
; AVX2NOBW-NEXT: vpsrlw $8, %ymm1, %ymm1
@@ -632,7 +632,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
; AVX2NOBW-NEXT: retq
;
; AVX512BW-LABEL: test_rem7_16i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm1
; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1
; AVX512BW-NEXT: vpsrlw $8, %ymm1, %ymm1
diff --git a/test/CodeGen/X86/vector-idiv-sdiv-256.ll b/test/CodeGen/X86/vector-idiv-sdiv-256.ll
index 28e7194bdc4..a9d5976ee7d 100644
--- a/test/CodeGen/X86/vector-idiv-sdiv-256.ll
+++ b/test/CodeGen/X86/vector-idiv-sdiv-256.ll
@@ -9,7 +9,7 @@
define <4 x i64> @test_div7_4i64(<4 x i64> %a) nounwind {
; AVX1-LABEL: test_div7_4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpextrq $1, %xmm1, %rax
; AVX1-NEXT: movabsq $5270498306774157605, %rcx # imm = 0x4924924924924925
@@ -46,7 +46,7 @@ define <4 x i64> @test_div7_4i64(<4 x i64> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_div7_4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpextrq $1, %xmm1, %rax
; AVX2-NEXT: movabsq $5270498306774157605, %rcx # imm = 0x4924924924924925
@@ -87,7 +87,7 @@ define <4 x i64> @test_div7_4i64(<4 x i64> %a) nounwind {
define <8 x i32> @test_div7_8i32(<8 x i32> %a) nounwind {
; AVX1-LABEL: test_div7_8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} ymm1 = [2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027]
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
@@ -115,7 +115,7 @@ define <8 x i32> @test_div7_8i32(<8 x i32> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_div7_8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027]
; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7]
; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[1,1,3,3,5,5,7,7]
@@ -134,7 +134,7 @@ define <8 x i32> @test_div7_8i32(<8 x i32> %a) nounwind {
define <16 x i16> @test_div7_16i16(<16 x i16> %a) nounwind {
; AVX1-LABEL: test_div7_16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [18725,18725,18725,18725,18725,18725,18725,18725]
; AVX1-NEXT: vpmulhw %xmm2, %xmm1, %xmm1
@@ -149,7 +149,7 @@ define <16 x i16> @test_div7_16i16(<16 x i16> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_div7_16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmulhw {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vpsrlw $15, %ymm0, %ymm1
; AVX2-NEXT: vpsraw $1, %ymm0, %ymm0
@@ -161,7 +161,7 @@ define <16 x i16> @test_div7_16i16(<16 x i16> %a) nounwind {
define <32 x i8> @test_div7_32i8(<32 x i8> %a) nounwind {
; AVX1-LABEL: test_div7_32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpmovsxbw %xmm1, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [65427,65427,65427,65427,65427,65427,65427,65427]
@@ -203,7 +203,7 @@ define <32 x i8> @test_div7_32i8(<32 x i8> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2NOBW-LABEL: test_div7_32i8:
-; AVX2NOBW: # BB#0:
+; AVX2NOBW: # %bb.0:
; AVX2NOBW-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2NOBW-NEXT: vpmovsxbw %xmm1, %ymm1
; AVX2NOBW-NEXT: vmovdqa {{.*#+}} ymm2 = [65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427]
@@ -227,7 +227,7 @@ define <32 x i8> @test_div7_32i8(<32 x i8> %a) nounwind {
; AVX2NOBW-NEXT: retq
;
; AVX512BW-LABEL: test_div7_32i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm1
; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %zmm1, %zmm1
; AVX512BW-NEXT: vpsrlw $8, %zmm1, %zmm1
@@ -252,7 +252,7 @@ define <32 x i8> @test_div7_32i8(<32 x i8> %a) nounwind {
define <4 x i64> @test_rem7_4i64(<4 x i64> %a) nounwind {
; AVX1-LABEL: test_rem7_4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpextrq $1, %xmm1, %rcx
; AVX1-NEXT: movabsq $5270498306774157605, %rsi # imm = 0x4924924924924925
@@ -305,7 +305,7 @@ define <4 x i64> @test_rem7_4i64(<4 x i64> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_rem7_4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpextrq $1, %xmm1, %rcx
; AVX2-NEXT: movabsq $5270498306774157605, %rsi # imm = 0x4924924924924925
@@ -362,7 +362,7 @@ define <4 x i64> @test_rem7_4i64(<4 x i64> %a) nounwind {
define <8 x i32> @test_rem7_8i32(<8 x i32> %a) nounwind {
; AVX1-LABEL: test_rem7_8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} ymm1 = [2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027]
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
@@ -395,7 +395,7 @@ define <8 x i32> @test_rem7_8i32(<8 x i32> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_rem7_8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027]
; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7]
; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[1,1,3,3,5,5,7,7]
@@ -417,7 +417,7 @@ define <8 x i32> @test_rem7_8i32(<8 x i32> %a) nounwind {
define <16 x i16> @test_rem7_16i16(<16 x i16> %a) nounwind {
; AVX1-LABEL: test_rem7_16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [18725,18725,18725,18725,18725,18725,18725,18725]
; AVX1-NEXT: vpmulhw %xmm2, %xmm1, %xmm3
@@ -437,7 +437,7 @@ define <16 x i16> @test_rem7_16i16(<16 x i16> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_rem7_16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmulhw {{.*}}(%rip), %ymm0, %ymm1
; AVX2-NEXT: vpsrlw $15, %ymm1, %ymm2
; AVX2-NEXT: vpsraw $1, %ymm1, %ymm1
@@ -451,7 +451,7 @@ define <16 x i16> @test_rem7_16i16(<16 x i16> %a) nounwind {
define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind {
; AVX1-LABEL: test_rem7_32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpmovsxbw %xmm2, %xmm3
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [65427,65427,65427,65427,65427,65427,65427,65427]
@@ -513,7 +513,7 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2NOBW-LABEL: test_rem7_32i8:
-; AVX2NOBW: # BB#0:
+; AVX2NOBW: # %bb.0:
; AVX2NOBW-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2NOBW-NEXT: vpmovsxbw %xmm1, %ymm1
; AVX2NOBW-NEXT: vmovdqa {{.*#+}} ymm2 = [65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427]
@@ -554,7 +554,7 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind {
; AVX2NOBW-NEXT: retq
;
; AVX512BW-LABEL: test_rem7_32i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm1
; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %zmm1, %zmm1
; AVX512BW-NEXT: vpsrlw $8, %zmm1, %zmm1
diff --git a/test/CodeGen/X86/vector-idiv-sdiv-512.ll b/test/CodeGen/X86/vector-idiv-sdiv-512.ll
index 9f8bd4a90a2..d01c79f4c05 100644
--- a/test/CodeGen/X86/vector-idiv-sdiv-512.ll
+++ b/test/CodeGen/X86/vector-idiv-sdiv-512.ll
@@ -8,7 +8,7 @@
define <8 x i64> @test_div7_8i64(<8 x i64> %a) nounwind {
; AVX-LABEL: test_div7_8i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vextracti32x4 $3, %zmm0, %xmm1
; AVX-NEXT: vpextrq $1, %xmm1, %rax
; AVX-NEXT: movabsq $5270498306774157605, %rcx # imm = 0x4924924924924925
@@ -83,7 +83,7 @@ define <8 x i64> @test_div7_8i64(<8 x i64> %a) nounwind {
define <16 x i32> @test_div7_16i32(<16 x i32> %a) nounwind {
; AVX-LABEL: test_div7_16i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpbroadcastd {{.*#+}} zmm1 = [2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027]
; AVX-NEXT: vpmuldq %zmm1, %zmm0, %zmm2
; AVX-NEXT: vpshufd {{.*#+}} zmm1 = zmm1[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
@@ -102,7 +102,7 @@ define <16 x i32> @test_div7_16i32(<16 x i32> %a) nounwind {
define <32 x i16> @test_div7_32i16(<32 x i16> %a) nounwind {
; AVX512F-LABEL: test_div7_32i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [18725,18725,18725,18725,18725,18725,18725,18725,18725,18725,18725,18725,18725,18725,18725,18725]
; AVX512F-NEXT: vpmulhw %ymm2, %ymm0, %ymm0
; AVX512F-NEXT: vpsrlw $15, %ymm0, %ymm3
@@ -115,7 +115,7 @@ define <32 x i16> @test_div7_32i16(<32 x i16> %a) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_div7_32i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmulhw {{.*}}(%rip), %zmm0, %zmm0
; AVX512BW-NEXT: vpsrlw $15, %zmm0, %zmm1
; AVX512BW-NEXT: vpsraw $1, %zmm0, %zmm0
@@ -127,7 +127,7 @@ define <32 x i16> @test_div7_32i16(<32 x i16> %a) nounwind {
define <64 x i8> @test_div7_64i8(<64 x i8> %a) nounwind {
; AVX512F-LABEL: test_div7_64i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX512F-NEXT: vpmovsxbw %xmm2, %ymm2
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427]
@@ -171,7 +171,7 @@ define <64 x i8> @test_div7_64i8(<64 x i8> %a) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_div7_64i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm1
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427]
; AVX512BW-NEXT: vpmullw %zmm2, %zmm1, %zmm1
@@ -203,7 +203,7 @@ define <64 x i8> @test_div7_64i8(<64 x i8> %a) nounwind {
define <8 x i64> @test_rem7_8i64(<8 x i64> %a) nounwind {
; AVX-LABEL: test_rem7_8i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vextracti32x4 $3, %zmm0, %xmm1
; AVX-NEXT: vpextrq $1, %xmm1, %rcx
; AVX-NEXT: movabsq $5270498306774157605, %rsi # imm = 0x4924924924924925
@@ -310,7 +310,7 @@ define <8 x i64> @test_rem7_8i64(<8 x i64> %a) nounwind {
define <16 x i32> @test_rem7_16i32(<16 x i32> %a) nounwind {
; AVX-LABEL: test_rem7_16i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpbroadcastd {{.*#+}} zmm1 = [2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027]
; AVX-NEXT: vpmuldq %zmm1, %zmm0, %zmm2
; AVX-NEXT: vpshufd {{.*#+}} zmm1 = zmm1[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
@@ -331,7 +331,7 @@ define <16 x i32> @test_rem7_16i32(<16 x i32> %a) nounwind {
define <32 x i16> @test_rem7_32i16(<32 x i16> %a) nounwind {
; AVX512F-LABEL: test_rem7_32i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [18725,18725,18725,18725,18725,18725,18725,18725,18725,18725,18725,18725,18725,18725,18725,18725]
; AVX512F-NEXT: vpmulhw %ymm2, %ymm0, %ymm3
; AVX512F-NEXT: vpsrlw $15, %ymm3, %ymm4
@@ -349,7 +349,7 @@ define <32 x i16> @test_rem7_32i16(<32 x i16> %a) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_rem7_32i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmulhw {{.*}}(%rip), %zmm0, %zmm1
; AVX512BW-NEXT: vpsrlw $15, %zmm1, %zmm2
; AVX512BW-NEXT: vpsraw $1, %zmm1, %zmm1
@@ -363,7 +363,7 @@ define <32 x i16> @test_rem7_32i16(<32 x i16> %a) nounwind {
define <64 x i8> @test_rem7_64i8(<64 x i8> %a) nounwind {
; AVX512F-LABEL: test_rem7_64i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX512F-NEXT: vpmovsxbw %xmm2, %ymm3
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427]
@@ -430,7 +430,7 @@ define <64 x i8> @test_rem7_64i8(<64 x i8> %a) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_rem7_64i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm1
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427]
; AVX512BW-NEXT: vpmullw %zmm2, %zmm1, %zmm1
diff --git a/test/CodeGen/X86/vector-idiv-udiv-128.ll b/test/CodeGen/X86/vector-idiv-udiv-128.ll
index ede9c9fe9bd..9788cc037d4 100644
--- a/test/CodeGen/X86/vector-idiv-udiv-128.ll
+++ b/test/CodeGen/X86/vector-idiv-udiv-128.ll
@@ -11,7 +11,7 @@
define <2 x i64> @test_div7_2i64(<2 x i64> %a) nounwind {
; SSE2-LABEL: test_div7_2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movq %xmm0, %rcx
; SSE2-NEXT: movabsq $2635249153387078803, %rsi # imm = 0x2492492492492493
; SSE2-NEXT: movq %rcx, %rax
@@ -35,7 +35,7 @@ define <2 x i64> @test_div7_2i64(<2 x i64> %a) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_div7_2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pextrq $1, %xmm0, %rcx
; SSE41-NEXT: movabsq $2635249153387078803, %rsi # imm = 0x2492492492492493
; SSE41-NEXT: movq %rcx, %rax
@@ -57,7 +57,7 @@ define <2 x i64> @test_div7_2i64(<2 x i64> %a) nounwind {
; SSE41-NEXT: retq
;
; AVX-LABEL: test_div7_2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpextrq $1, %xmm0, %rcx
; AVX-NEXT: movabsq $2635249153387078803, %rsi # imm = 0x2492492492492493
; AVX-NEXT: movq %rcx, %rax
@@ -83,7 +83,7 @@ define <2 x i64> @test_div7_2i64(<2 x i64> %a) nounwind {
define <4 x i32> @test_div7_4i32(<4 x i32> %a) nounwind {
; SSE2-LABEL: test_div7_4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [613566757,613566757,613566757,613566757]
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pmuludq %xmm1, %xmm2
@@ -100,7 +100,7 @@ define <4 x i32> @test_div7_4i32(<4 x i32> %a) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_div7_4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [613566757,613566757,613566757,613566757]
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
@@ -115,7 +115,7 @@ define <4 x i32> @test_div7_4i32(<4 x i32> %a) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_div7_4i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [613566757,613566757,613566757,613566757]
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
@@ -130,7 +130,7 @@ define <4 x i32> @test_div7_4i32(<4 x i32> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_div7_4i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [613566757,613566757,613566757,613566757]
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
@@ -149,7 +149,7 @@ define <4 x i32> @test_div7_4i32(<4 x i32> %a) nounwind {
define <8 x i16> @test_div7_8i16(<8 x i16> %a) nounwind {
; SSE-LABEL: test_div7_8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [9363,9363,9363,9363,9363,9363,9363,9363]
; SSE-NEXT: pmulhuw %xmm0, %xmm1
; SSE-NEXT: psubw %xmm1, %xmm0
@@ -159,7 +159,7 @@ define <8 x i16> @test_div7_8i16(<8 x i16> %a) nounwind {
; SSE-NEXT: retq
;
; AVX-LABEL: test_div7_8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmulhuw {{.*}}(%rip), %xmm0, %xmm1
; AVX-NEXT: vpsubw %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
@@ -172,7 +172,7 @@ define <8 x i16> @test_div7_8i16(<8 x i16> %a) nounwind {
define <16 x i8> @test_div7_16i8(<16 x i8> %a) nounwind {
; SSE2-LABEL: test_div7_16i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
@@ -193,7 +193,7 @@ define <16 x i8> @test_div7_16i8(<16 x i8> %a) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_div7_16i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [37,37,37,37,37,37,37,37]
; SSE41-NEXT: pmullw %xmm2, %xmm1
@@ -212,7 +212,7 @@ define <16 x i8> @test_div7_16i8(<16 x i8> %a) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_div7_16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [37,37,37,37,37,37,37,37]
; AVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1
@@ -231,7 +231,7 @@ define <16 x i8> @test_div7_16i8(<16 x i8> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2NOBW-LABEL: test_div7_16i8:
-; AVX2NOBW: # BB#0:
+; AVX2NOBW: # %bb.0:
; AVX2NOBW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; AVX2NOBW-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1
; AVX2NOBW-NEXT: vpsrlw $8, %ymm1, %ymm1
@@ -247,7 +247,7 @@ define <16 x i8> @test_div7_16i8(<16 x i8> %a) nounwind {
; AVX2NOBW-NEXT: retq
;
; AVX512BW-LABEL: test_div7_16i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1
; AVX512BW-NEXT: vpsrlw $8, %ymm1, %ymm1
@@ -270,7 +270,7 @@ define <16 x i8> @test_div7_16i8(<16 x i8> %a) nounwind {
define <2 x i64> @test_rem7_2i64(<2 x i64> %a) nounwind {
; SSE2-LABEL: test_rem7_2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movq %xmm0, %rcx
; SSE2-NEXT: movabsq $2635249153387078803, %rsi # imm = 0x2492492492492493
; SSE2-NEXT: movq %rcx, %rax
@@ -302,7 +302,7 @@ define <2 x i64> @test_rem7_2i64(<2 x i64> %a) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_rem7_2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pextrq $1, %xmm0, %rcx
; SSE41-NEXT: movabsq $2635249153387078803, %rsi # imm = 0x2492492492492493
; SSE41-NEXT: movq %rcx, %rax
@@ -332,7 +332,7 @@ define <2 x i64> @test_rem7_2i64(<2 x i64> %a) nounwind {
; SSE41-NEXT: retq
;
; AVX-LABEL: test_rem7_2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpextrq $1, %xmm0, %rcx
; AVX-NEXT: movabsq $2635249153387078803, %rsi # imm = 0x2492492492492493
; AVX-NEXT: movq %rcx, %rax
@@ -366,7 +366,7 @@ define <2 x i64> @test_rem7_2i64(<2 x i64> %a) nounwind {
define <4 x i32> @test_rem7_4i32(<4 x i32> %a) nounwind {
; SSE2-LABEL: test_rem7_4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [613566757,613566757,613566757,613566757]
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pmuludq %xmm1, %xmm2
@@ -392,7 +392,7 @@ define <4 x i32> @test_rem7_4i32(<4 x i32> %a) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_rem7_4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [613566757,613566757,613566757,613566757]
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
@@ -410,7 +410,7 @@ define <4 x i32> @test_rem7_4i32(<4 x i32> %a) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_rem7_4i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [613566757,613566757,613566757,613566757]
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
@@ -427,7 +427,7 @@ define <4 x i32> @test_rem7_4i32(<4 x i32> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_rem7_4i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [613566757,613566757,613566757,613566757]
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
@@ -449,7 +449,7 @@ define <4 x i32> @test_rem7_4i32(<4 x i32> %a) nounwind {
define <8 x i16> @test_rem7_8i16(<8 x i16> %a) nounwind {
; SSE-LABEL: test_rem7_8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [9363,9363,9363,9363,9363,9363,9363,9363]
; SSE-NEXT: pmulhuw %xmm0, %xmm1
; SSE-NEXT: movdqa %xmm0, %xmm2
@@ -462,7 +462,7 @@ define <8 x i16> @test_rem7_8i16(<8 x i16> %a) nounwind {
; SSE-NEXT: retq
;
; AVX-LABEL: test_rem7_8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmulhuw {{.*}}(%rip), %xmm0, %xmm1
; AVX-NEXT: vpsubw %xmm1, %xmm0, %xmm2
; AVX-NEXT: vpsrlw $1, %xmm2, %xmm2
@@ -477,7 +477,7 @@ define <8 x i16> @test_rem7_8i16(<8 x i16> %a) nounwind {
define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
; SSE2-LABEL: test_rem7_16i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
@@ -512,7 +512,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_rem7_16i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [37,37,37,37,37,37,37,37]
; SSE41-NEXT: pmullw %xmm2, %xmm1
@@ -543,7 +543,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_rem7_16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [37,37,37,37,37,37,37,37]
; AVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1
@@ -573,7 +573,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2NOBW-LABEL: test_rem7_16i8:
-; AVX2NOBW: # BB#0:
+; AVX2NOBW: # %bb.0:
; AVX2NOBW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; AVX2NOBW-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1
; AVX2NOBW-NEXT: vpsrlw $8, %ymm1, %ymm1
@@ -597,7 +597,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
; AVX2NOBW-NEXT: retq
;
; AVX512BW-LABEL: test_rem7_16i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1
; AVX512BW-NEXT: vpsrlw $8, %ymm1, %ymm1
diff --git a/test/CodeGen/X86/vector-idiv-udiv-256.ll b/test/CodeGen/X86/vector-idiv-udiv-256.ll
index e2a7f7cf16d..602f050935d 100644
--- a/test/CodeGen/X86/vector-idiv-udiv-256.ll
+++ b/test/CodeGen/X86/vector-idiv-udiv-256.ll
@@ -9,7 +9,7 @@
define <4 x i64> @test_div7_4i64(<4 x i64> %a) nounwind {
; AVX1-LABEL: test_div7_4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpextrq $1, %xmm1, %rcx
; AVX1-NEXT: movabsq $2635249153387078803, %rsi # imm = 0x2492492492492493
@@ -50,7 +50,7 @@ define <4 x i64> @test_div7_4i64(<4 x i64> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_div7_4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpextrq $1, %xmm1, %rcx
; AVX2-NEXT: movabsq $2635249153387078803, %rsi # imm = 0x2492492492492493
@@ -95,7 +95,7 @@ define <4 x i64> @test_div7_4i64(<4 x i64> %a) nounwind {
define <8 x i32> @test_div7_8i32(<8 x i32> %a) nounwind {
; AVX1-LABEL: test_div7_8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} ymm1 = [613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757]
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
@@ -123,7 +123,7 @@ define <8 x i32> @test_div7_8i32(<8 x i32> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_div7_8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757]
; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7]
; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[1,1,3,3,5,5,7,7]
@@ -142,7 +142,7 @@ define <8 x i32> @test_div7_8i32(<8 x i32> %a) nounwind {
define <16 x i16> @test_div7_16i16(<16 x i16> %a) nounwind {
; AVX1-LABEL: test_div7_16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [9363,9363,9363,9363,9363,9363,9363,9363]
; AVX1-NEXT: vpmulhuw %xmm1, %xmm0, %xmm2
; AVX1-NEXT: vpsubw %xmm2, %xmm0, %xmm3
@@ -159,7 +159,7 @@ define <16 x i16> @test_div7_16i16(<16 x i16> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_div7_16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmulhuw {{.*}}(%rip), %ymm0, %ymm1
; AVX2-NEXT: vpsubw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
@@ -172,7 +172,7 @@ define <16 x i16> @test_div7_16i16(<16 x i16> %a) nounwind {
define <32 x i8> @test_div7_32i8(<32 x i8> %a) nounwind {
; AVX1-LABEL: test_div7_32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [37,37,37,37,37,37,37,37]
@@ -209,7 +209,7 @@ define <32 x i8> @test_div7_32i8(<32 x i8> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2NOBW-LABEL: test_div7_32i8:
-; AVX2NOBW: # BB#0:
+; AVX2NOBW: # %bb.0:
; AVX2NOBW-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2NOBW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
; AVX2NOBW-NEXT: vmovdqa {{.*#+}} ymm2 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37]
@@ -230,7 +230,7 @@ define <32 x i8> @test_div7_32i8(<32 x i8> %a) nounwind {
; AVX2NOBW-NEXT: retq
;
; AVX512BW-LABEL: test_div7_32i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %zmm1, %zmm1
; AVX512BW-NEXT: vpsrlw $8, %zmm1, %zmm1
@@ -252,7 +252,7 @@ define <32 x i8> @test_div7_32i8(<32 x i8> %a) nounwind {
define <4 x i64> @test_rem7_4i64(<4 x i64> %a) nounwind {
; AVX1-LABEL: test_rem7_4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpextrq $1, %xmm1, %rcx
; AVX1-NEXT: movabsq $2635249153387078803, %rsi # imm = 0x2492492492492493
@@ -309,7 +309,7 @@ define <4 x i64> @test_rem7_4i64(<4 x i64> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_rem7_4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpextrq $1, %xmm1, %rcx
; AVX2-NEXT: movabsq $2635249153387078803, %rsi # imm = 0x2492492492492493
@@ -370,7 +370,7 @@ define <4 x i64> @test_rem7_4i64(<4 x i64> %a) nounwind {
define <8 x i32> @test_rem7_8i32(<8 x i32> %a) nounwind {
; AVX1-LABEL: test_rem7_8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} ymm1 = [613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757]
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
@@ -403,7 +403,7 @@ define <8 x i32> @test_rem7_8i32(<8 x i32> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_rem7_8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757]
; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7]
; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[1,1,3,3,5,5,7,7]
@@ -425,7 +425,7 @@ define <8 x i32> @test_rem7_8i32(<8 x i32> %a) nounwind {
define <16 x i16> @test_rem7_16i16(<16 x i16> %a) nounwind {
; AVX1-LABEL: test_rem7_16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9363,9363,9363,9363,9363,9363,9363,9363]
; AVX1-NEXT: vpmulhuw %xmm2, %xmm1, %xmm3
@@ -447,7 +447,7 @@ define <16 x i16> @test_rem7_16i16(<16 x i16> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_rem7_16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmulhuw {{.*}}(%rip), %ymm0, %ymm1
; AVX2-NEXT: vpsubw %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vpsrlw $1, %ymm2, %ymm2
@@ -462,7 +462,7 @@ define <16 x i16> @test_rem7_16i16(<16 x i16> %a) nounwind {
define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind {
; AVX1-LABEL: test_rem7_32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [37,37,37,37,37,37,37,37]
@@ -519,7 +519,7 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2NOBW-LABEL: test_rem7_32i8:
-; AVX2NOBW: # BB#0:
+; AVX2NOBW: # %bb.0:
; AVX2NOBW-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2NOBW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
; AVX2NOBW-NEXT: vmovdqa {{.*#+}} ymm2 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37]
@@ -557,7 +557,7 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind {
; AVX2NOBW-NEXT: retq
;
; AVX512BW-LABEL: test_rem7_32i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %zmm1, %zmm1
; AVX512BW-NEXT: vpsrlw $8, %zmm1, %zmm1
diff --git a/test/CodeGen/X86/vector-idiv-udiv-512.ll b/test/CodeGen/X86/vector-idiv-udiv-512.ll
index 9066dd91df8..cd38e37fc25 100644
--- a/test/CodeGen/X86/vector-idiv-udiv-512.ll
+++ b/test/CodeGen/X86/vector-idiv-udiv-512.ll
@@ -8,7 +8,7 @@
define <8 x i64> @test_div7_8i64(<8 x i64> %a) nounwind {
; AVX-LABEL: test_div7_8i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vextracti32x4 $3, %zmm0, %xmm1
; AVX-NEXT: vpextrq $1, %xmm1, %rcx
; AVX-NEXT: movabsq $2635249153387078803, %rsi # imm = 0x2492492492492493
@@ -91,7 +91,7 @@ define <8 x i64> @test_div7_8i64(<8 x i64> %a) nounwind {
define <16 x i32> @test_div7_16i32(<16 x i32> %a) nounwind {
; AVX-LABEL: test_div7_16i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpbroadcastd {{.*#+}} zmm1 = [613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757]
; AVX-NEXT: vpmuludq %zmm1, %zmm0, %zmm2
; AVX-NEXT: vpshufd {{.*#+}} zmm1 = zmm1[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
@@ -110,7 +110,7 @@ define <16 x i32> @test_div7_16i32(<16 x i32> %a) nounwind {
define <32 x i16> @test_div7_32i16(<32 x i16> %a) nounwind {
; AVX512F-LABEL: test_div7_32i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [9363,9363,9363,9363,9363,9363,9363,9363,9363,9363,9363,9363,9363,9363,9363,9363]
; AVX512F-NEXT: vpmulhuw %ymm2, %ymm0, %ymm3
; AVX512F-NEXT: vpsubw %ymm3, %ymm0, %ymm0
@@ -125,7 +125,7 @@ define <32 x i16> @test_div7_32i16(<32 x i16> %a) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_div7_32i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmulhuw {{.*}}(%rip), %zmm0, %zmm1
; AVX512BW-NEXT: vpsubw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpsrlw $1, %zmm0, %zmm0
@@ -138,7 +138,7 @@ define <32 x i16> @test_div7_32i16(<32 x i16> %a) nounwind {
define <64 x i8> @test_div7_64i8(<64 x i8> %a) nounwind {
; AVX512F-LABEL: test_div7_64i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37]
@@ -177,7 +177,7 @@ define <64 x i8> @test_div7_64i8(<64 x i8> %a) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_div7_64i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37]
; AVX512BW-NEXT: vpmullw %zmm2, %zmm1, %zmm1
@@ -206,7 +206,7 @@ define <64 x i8> @test_div7_64i8(<64 x i8> %a) nounwind {
define <8 x i64> @test_rem7_8i64(<8 x i64> %a) nounwind {
; AVX-LABEL: test_rem7_8i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vextracti32x4 $3, %zmm0, %xmm1
; AVX-NEXT: vpextrq $1, %xmm1, %rcx
; AVX-NEXT: movabsq $2635249153387078803, %rsi # imm = 0x2492492492492493
@@ -321,7 +321,7 @@ define <8 x i64> @test_rem7_8i64(<8 x i64> %a) nounwind {
define <16 x i32> @test_rem7_16i32(<16 x i32> %a) nounwind {
; AVX-LABEL: test_rem7_16i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpbroadcastd {{.*#+}} zmm1 = [613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757]
; AVX-NEXT: vpmuludq %zmm1, %zmm0, %zmm2
; AVX-NEXT: vpshufd {{.*#+}} zmm1 = zmm1[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
@@ -342,7 +342,7 @@ define <16 x i32> @test_rem7_16i32(<16 x i32> %a) nounwind {
define <32 x i16> @test_rem7_32i16(<32 x i16> %a) nounwind {
; AVX512F-LABEL: test_rem7_32i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [9363,9363,9363,9363,9363,9363,9363,9363,9363,9363,9363,9363,9363,9363,9363,9363]
; AVX512F-NEXT: vpmulhuw %ymm2, %ymm0, %ymm3
; AVX512F-NEXT: vpsubw %ymm3, %ymm0, %ymm4
@@ -362,7 +362,7 @@ define <32 x i16> @test_rem7_32i16(<32 x i16> %a) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_rem7_32i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmulhuw {{.*}}(%rip), %zmm0, %zmm1
; AVX512BW-NEXT: vpsubw %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vpsrlw $1, %zmm2, %zmm2
@@ -377,7 +377,7 @@ define <32 x i16> @test_rem7_32i16(<32 x i16> %a) nounwind {
define <64 x i8> @test_rem7_64i8(<64 x i8> %a) nounwind {
; AVX512F-LABEL: test_rem7_64i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37]
@@ -439,7 +439,7 @@ define <64 x i8> @test_rem7_64i8(<64 x i8> %a) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_rem7_64i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37]
; AVX512BW-NEXT: vpmullw %zmm2, %zmm1, %zmm1
diff --git a/test/CodeGen/X86/vector-idiv.ll b/test/CodeGen/X86/vector-idiv.ll
index c65c3e7fd00..e2f769761e1 100644
--- a/test/CodeGen/X86/vector-idiv.ll
+++ b/test/CodeGen/X86/vector-idiv.ll
@@ -6,12 +6,12 @@
define <2 x i16> @test_urem_unary_v2i16() nounwind {
; SSE-LABEL: test_urem_unary_v2i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_urem_unary_v2i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%I8 = insertelement <2 x i16> zeroinitializer, i16 -1, i32 0
@@ -22,7 +22,7 @@ define <2 x i16> @test_urem_unary_v2i16() nounwind {
define <4 x i32> @PR20355(<4 x i32> %a) nounwind {
; SSE2-LABEL: PR20355:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [1431655766,1431655766,1431655766,1431655766]
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: psrad $31, %xmm2
@@ -45,7 +45,7 @@ define <4 x i32> @PR20355(<4 x i32> %a) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: PR20355:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [1431655766,1431655766,1431655766,1431655766]
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
@@ -59,7 +59,7 @@ define <4 x i32> @PR20355(<4 x i32> %a) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: PR20355:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1431655766,1431655766,1431655766,1431655766]
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
@@ -72,7 +72,7 @@ define <4 x i32> @PR20355(<4 x i32> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: PR20355:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1431655766,1431655766,1431655766,1431655766]
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
diff --git a/test/CodeGen/X86/vector-interleave.ll b/test/CodeGen/X86/vector-interleave.ll
index 9324398ff0e..04e6ccc0003 100644
--- a/test/CodeGen/X86/vector-interleave.ll
+++ b/test/CodeGen/X86/vector-interleave.ll
@@ -9,7 +9,7 @@
; PR21281
define <64 x i16> @interleave8x8(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i16> %d, <8 x i16> %e, <8 x i16> %f, <8 x i16> %h, <8 x i16> %g) {
; SSE-LABEL: interleave8x8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm8
; SSE-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm1[0],xmm8[1],xmm1[1],xmm8[2],xmm1[2],xmm8[3],xmm1[3]
; SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
@@ -58,7 +58,7 @@ define <64 x i16> @interleave8x8(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x
; SSE-NEXT: retq
;
; AVX1-LABEL: interleave8x8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm8 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
@@ -90,7 +90,7 @@ define <64 x i16> @interleave8x8(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x
; AVX1-NEXT: retq
;
; AVX2-LABEL: interleave8x8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm8 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
diff --git a/test/CodeGen/X86/vector-lzcnt-128.ll b/test/CodeGen/X86/vector-lzcnt-128.ll
index b23730f57ff..59f8fe4e103 100644
--- a/test/CodeGen/X86/vector-lzcnt-128.ll
+++ b/test/CodeGen/X86/vector-lzcnt-128.ll
@@ -15,7 +15,7 @@
define <2 x i64> @testv2i64(<2 x i64> %in) nounwind {
; SSE2-LABEL: testv2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrlq $1, %xmm1
; SSE2-NEXT: por %xmm0, %xmm1
@@ -55,7 +55,7 @@ define <2 x i64> @testv2i64(<2 x i64> %in) nounwind {
; SSE2-NEXT: retq
;
; SSE3-LABEL: testv2i64:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movdqa %xmm0, %xmm1
; SSE3-NEXT: psrlq $1, %xmm1
; SSE3-NEXT: por %xmm0, %xmm1
@@ -95,7 +95,7 @@ define <2 x i64> @testv2i64(<2 x i64> %in) nounwind {
; SSE3-NEXT: retq
;
; SSSE3-LABEL: testv2i64:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: pand %xmm2, %xmm1
@@ -131,7 +131,7 @@ define <2 x i64> @testv2i64(<2 x i64> %in) nounwind {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: testv2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: pand %xmm2, %xmm1
@@ -167,7 +167,7 @@ define <2 x i64> @testv2i64(<2 x i64> %in) nounwind {
; SSE41-NEXT: retq
;
; AVX-LABEL: testv2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -197,7 +197,7 @@ define <2 x i64> @testv2i64(<2 x i64> %in) nounwind {
; AVX-NEXT: retq
;
; AVX512VLBWDQ-LABEL: testv2i64:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -227,12 +227,12 @@ define <2 x i64> @testv2i64(<2 x i64> %in) nounwind {
; AVX512VLBWDQ-NEXT: retq
;
; AVX512VLCD-LABEL: testv2i64:
-; AVX512VLCD: # BB#0:
+; AVX512VLCD: # %bb.0:
; AVX512VLCD-NEXT: vplzcntq %xmm0, %xmm0
; AVX512VLCD-NEXT: retq
;
; AVX512CD-LABEL: testv2i64:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0
; AVX512CD-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -240,7 +240,7 @@ define <2 x i64> @testv2i64(<2 x i64> %in) nounwind {
; AVX512CD-NEXT: retq
;
; X32-SSE-LABEL: testv2i64:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
; X32-SSE-NEXT: pand %xmm2, %xmm1
@@ -281,7 +281,7 @@ define <2 x i64> @testv2i64(<2 x i64> %in) nounwind {
define <2 x i64> @testv2i64u(<2 x i64> %in) nounwind {
; SSE2-LABEL: testv2i64u:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrlq $1, %xmm1
; SSE2-NEXT: por %xmm0, %xmm1
@@ -321,7 +321,7 @@ define <2 x i64> @testv2i64u(<2 x i64> %in) nounwind {
; SSE2-NEXT: retq
;
; SSE3-LABEL: testv2i64u:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movdqa %xmm0, %xmm1
; SSE3-NEXT: psrlq $1, %xmm1
; SSE3-NEXT: por %xmm0, %xmm1
@@ -361,7 +361,7 @@ define <2 x i64> @testv2i64u(<2 x i64> %in) nounwind {
; SSE3-NEXT: retq
;
; SSSE3-LABEL: testv2i64u:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: pand %xmm2, %xmm1
@@ -397,7 +397,7 @@ define <2 x i64> @testv2i64u(<2 x i64> %in) nounwind {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: testv2i64u:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: pand %xmm2, %xmm1
@@ -433,7 +433,7 @@ define <2 x i64> @testv2i64u(<2 x i64> %in) nounwind {
; SSE41-NEXT: retq
;
; AVX-LABEL: testv2i64u:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -463,7 +463,7 @@ define <2 x i64> @testv2i64u(<2 x i64> %in) nounwind {
; AVX-NEXT: retq
;
; AVX512VLBWDQ-LABEL: testv2i64u:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -493,12 +493,12 @@ define <2 x i64> @testv2i64u(<2 x i64> %in) nounwind {
; AVX512VLBWDQ-NEXT: retq
;
; AVX512VLCD-LABEL: testv2i64u:
-; AVX512VLCD: # BB#0:
+; AVX512VLCD: # %bb.0:
; AVX512VLCD-NEXT: vplzcntq %xmm0, %xmm0
; AVX512VLCD-NEXT: retq
;
; AVX512CD-LABEL: testv2i64u:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0
; AVX512CD-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -506,7 +506,7 @@ define <2 x i64> @testv2i64u(<2 x i64> %in) nounwind {
; AVX512CD-NEXT: retq
;
; X32-SSE-LABEL: testv2i64u:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
; X32-SSE-NEXT: pand %xmm2, %xmm1
@@ -547,7 +547,7 @@ define <2 x i64> @testv2i64u(<2 x i64> %in) nounwind {
define <4 x i32> @testv4i32(<4 x i32> %in) nounwind {
; SSE2-LABEL: testv4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrld $1, %xmm1
; SSE2-NEXT: por %xmm0, %xmm1
@@ -589,7 +589,7 @@ define <4 x i32> @testv4i32(<4 x i32> %in) nounwind {
; SSE2-NEXT: retq
;
; SSE3-LABEL: testv4i32:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movdqa %xmm0, %xmm1
; SSE3-NEXT: psrld $1, %xmm1
; SSE3-NEXT: por %xmm0, %xmm1
@@ -631,7 +631,7 @@ define <4 x i32> @testv4i32(<4 x i32> %in) nounwind {
; SSE3-NEXT: retq
;
; SSSE3-LABEL: testv4i32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: pand %xmm2, %xmm1
@@ -661,7 +661,7 @@ define <4 x i32> @testv4i32(<4 x i32> %in) nounwind {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: testv4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: pand %xmm2, %xmm1
@@ -691,7 +691,7 @@ define <4 x i32> @testv4i32(<4 x i32> %in) nounwind {
; SSE41-NEXT: retq
;
; AVX-LABEL: testv4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -716,7 +716,7 @@ define <4 x i32> @testv4i32(<4 x i32> %in) nounwind {
; AVX-NEXT: retq
;
; AVX512VLBWDQ-LABEL: testv4i32:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -741,12 +741,12 @@ define <4 x i32> @testv4i32(<4 x i32> %in) nounwind {
; AVX512VLBWDQ-NEXT: retq
;
; AVX512VLCD-LABEL: testv4i32:
-; AVX512VLCD: # BB#0:
+; AVX512VLCD: # %bb.0:
; AVX512VLCD-NEXT: vplzcntd %xmm0, %xmm0
; AVX512VLCD-NEXT: retq
;
; AVX512CD-LABEL: testv4i32:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
; AVX512CD-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -754,7 +754,7 @@ define <4 x i32> @testv4i32(<4 x i32> %in) nounwind {
; AVX512CD-NEXT: retq
;
; X32-SSE-LABEL: testv4i32:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
; X32-SSE-NEXT: pand %xmm2, %xmm1
@@ -789,7 +789,7 @@ define <4 x i32> @testv4i32(<4 x i32> %in) nounwind {
define <4 x i32> @testv4i32u(<4 x i32> %in) nounwind {
; SSE2-LABEL: testv4i32u:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrld $1, %xmm1
; SSE2-NEXT: por %xmm0, %xmm1
@@ -831,7 +831,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %in) nounwind {
; SSE2-NEXT: retq
;
; SSE3-LABEL: testv4i32u:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movdqa %xmm0, %xmm1
; SSE3-NEXT: psrld $1, %xmm1
; SSE3-NEXT: por %xmm0, %xmm1
@@ -873,7 +873,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %in) nounwind {
; SSE3-NEXT: retq
;
; SSSE3-LABEL: testv4i32u:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: pand %xmm2, %xmm1
@@ -903,7 +903,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %in) nounwind {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: testv4i32u:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: pand %xmm2, %xmm1
@@ -933,7 +933,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %in) nounwind {
; SSE41-NEXT: retq
;
; AVX-LABEL: testv4i32u:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -958,7 +958,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %in) nounwind {
; AVX-NEXT: retq
;
; AVX512VLBWDQ-LABEL: testv4i32u:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -983,12 +983,12 @@ define <4 x i32> @testv4i32u(<4 x i32> %in) nounwind {
; AVX512VLBWDQ-NEXT: retq
;
; AVX512VLCD-LABEL: testv4i32u:
-; AVX512VLCD: # BB#0:
+; AVX512VLCD: # %bb.0:
; AVX512VLCD-NEXT: vplzcntd %xmm0, %xmm0
; AVX512VLCD-NEXT: retq
;
; AVX512CD-LABEL: testv4i32u:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
; AVX512CD-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -996,7 +996,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %in) nounwind {
; AVX512CD-NEXT: retq
;
; X32-SSE-LABEL: testv4i32u:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
; X32-SSE-NEXT: pand %xmm2, %xmm1
@@ -1031,7 +1031,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %in) nounwind {
define <8 x i16> @testv8i16(<8 x i16> %in) nounwind {
; SSE2-LABEL: testv8i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrlw $1, %xmm1
; SSE2-NEXT: por %xmm0, %xmm1
@@ -1067,7 +1067,7 @@ define <8 x i16> @testv8i16(<8 x i16> %in) nounwind {
; SSE2-NEXT: retq
;
; SSE3-LABEL: testv8i16:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movdqa %xmm0, %xmm1
; SSE3-NEXT: psrlw $1, %xmm1
; SSE3-NEXT: por %xmm0, %xmm1
@@ -1103,7 +1103,7 @@ define <8 x i16> @testv8i16(<8 x i16> %in) nounwind {
; SSE3-NEXT: retq
;
; SSSE3-LABEL: testv8i16:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: pand %xmm2, %xmm1
@@ -1127,7 +1127,7 @@ define <8 x i16> @testv8i16(<8 x i16> %in) nounwind {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: testv8i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: pand %xmm2, %xmm1
@@ -1151,7 +1151,7 @@ define <8 x i16> @testv8i16(<8 x i16> %in) nounwind {
; SSE41-NEXT: retq
;
; AVX-LABEL: testv8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -1171,7 +1171,7 @@ define <8 x i16> @testv8i16(<8 x i16> %in) nounwind {
; AVX-NEXT: retq
;
; AVX512VLBWDQ-LABEL: testv8i16:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -1191,7 +1191,7 @@ define <8 x i16> @testv8i16(<8 x i16> %in) nounwind {
; AVX512VLBWDQ-NEXT: retq
;
; AVX512VLCD-LABEL: testv8i16:
-; AVX512VLCD: # BB#0:
+; AVX512VLCD: # %bb.0:
; AVX512VLCD-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512VLCD-NEXT: vplzcntd %ymm0, %ymm0
; AVX512VLCD-NEXT: vpmovdw %ymm0, %xmm0
@@ -1200,7 +1200,7 @@ define <8 x i16> @testv8i16(<8 x i16> %in) nounwind {
; AVX512VLCD-NEXT: retq
;
; AVX512CD-LABEL: testv8i16:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
; AVX512CD-NEXT: vpmovdw %zmm0, %ymm0
@@ -1209,7 +1209,7 @@ define <8 x i16> @testv8i16(<8 x i16> %in) nounwind {
; AVX512CD-NEXT: retq
;
; X32-SSE-LABEL: testv8i16:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
; X32-SSE-NEXT: pand %xmm2, %xmm1
@@ -1237,7 +1237,7 @@ define <8 x i16> @testv8i16(<8 x i16> %in) nounwind {
define <8 x i16> @testv8i16u(<8 x i16> %in) nounwind {
; SSE2-LABEL: testv8i16u:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrlw $1, %xmm1
; SSE2-NEXT: por %xmm0, %xmm1
@@ -1273,7 +1273,7 @@ define <8 x i16> @testv8i16u(<8 x i16> %in) nounwind {
; SSE2-NEXT: retq
;
; SSE3-LABEL: testv8i16u:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movdqa %xmm0, %xmm1
; SSE3-NEXT: psrlw $1, %xmm1
; SSE3-NEXT: por %xmm0, %xmm1
@@ -1309,7 +1309,7 @@ define <8 x i16> @testv8i16u(<8 x i16> %in) nounwind {
; SSE3-NEXT: retq
;
; SSSE3-LABEL: testv8i16u:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: pand %xmm2, %xmm1
@@ -1333,7 +1333,7 @@ define <8 x i16> @testv8i16u(<8 x i16> %in) nounwind {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: testv8i16u:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: pand %xmm2, %xmm1
@@ -1357,7 +1357,7 @@ define <8 x i16> @testv8i16u(<8 x i16> %in) nounwind {
; SSE41-NEXT: retq
;
; AVX-LABEL: testv8i16u:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -1377,7 +1377,7 @@ define <8 x i16> @testv8i16u(<8 x i16> %in) nounwind {
; AVX-NEXT: retq
;
; AVX512VLBWDQ-LABEL: testv8i16u:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -1397,7 +1397,7 @@ define <8 x i16> @testv8i16u(<8 x i16> %in) nounwind {
; AVX512VLBWDQ-NEXT: retq
;
; AVX512VLCD-LABEL: testv8i16u:
-; AVX512VLCD: # BB#0:
+; AVX512VLCD: # %bb.0:
; AVX512VLCD-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512VLCD-NEXT: vplzcntd %ymm0, %ymm0
; AVX512VLCD-NEXT: vpmovdw %ymm0, %xmm0
@@ -1406,7 +1406,7 @@ define <8 x i16> @testv8i16u(<8 x i16> %in) nounwind {
; AVX512VLCD-NEXT: retq
;
; AVX512CD-LABEL: testv8i16u:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
; AVX512CD-NEXT: vpmovdw %zmm0, %ymm0
@@ -1415,7 +1415,7 @@ define <8 x i16> @testv8i16u(<8 x i16> %in) nounwind {
; AVX512CD-NEXT: retq
;
; X32-SSE-LABEL: testv8i16u:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
; X32-SSE-NEXT: pand %xmm2, %xmm1
@@ -1443,7 +1443,7 @@ define <8 x i16> @testv8i16u(<8 x i16> %in) nounwind {
define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
; SSE2-LABEL: testv16i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrlw $1, %xmm1
; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
@@ -1476,7 +1476,7 @@ define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
; SSE2-NEXT: retq
;
; SSE3-LABEL: testv16i8:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movdqa %xmm0, %xmm1
; SSE3-NEXT: psrlw $1, %xmm1
; SSE3-NEXT: pand {{.*}}(%rip), %xmm1
@@ -1509,7 +1509,7 @@ define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
; SSE3-NEXT: retq
;
; SSSE3-LABEL: testv16i8:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSSE3-NEXT: movdqa %xmm0, %xmm3
; SSSE3-NEXT: pand %xmm2, %xmm3
@@ -1527,7 +1527,7 @@ define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: testv16i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE41-NEXT: movdqa %xmm0, %xmm3
; SSE41-NEXT: pand %xmm2, %xmm3
@@ -1545,7 +1545,7 @@ define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
; SSE41-NEXT: retq
;
; AVX-LABEL: testv16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -1560,7 +1560,7 @@ define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
; AVX-NEXT: retq
;
; AVX512VLBWDQ-LABEL: testv16i8:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -1575,7 +1575,7 @@ define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
; AVX512VLBWDQ-NEXT: retq
;
; AVX512-LABEL: testv16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512-NEXT: vplzcntd %zmm0, %zmm0
; AVX512-NEXT: vpmovdb %zmm0, %xmm0
@@ -1584,7 +1584,7 @@ define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
; AVX512-NEXT: retq
;
; X32-SSE-LABEL: testv16i8:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; X32-SSE-NEXT: movdqa %xmm0, %xmm3
; X32-SSE-NEXT: pand %xmm2, %xmm3
@@ -1606,7 +1606,7 @@ define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
define <16 x i8> @testv16i8u(<16 x i8> %in) nounwind {
; SSE2-LABEL: testv16i8u:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrlw $1, %xmm1
; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
@@ -1639,7 +1639,7 @@ define <16 x i8> @testv16i8u(<16 x i8> %in) nounwind {
; SSE2-NEXT: retq
;
; SSE3-LABEL: testv16i8u:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movdqa %xmm0, %xmm1
; SSE3-NEXT: psrlw $1, %xmm1
; SSE3-NEXT: pand {{.*}}(%rip), %xmm1
@@ -1672,7 +1672,7 @@ define <16 x i8> @testv16i8u(<16 x i8> %in) nounwind {
; SSE3-NEXT: retq
;
; SSSE3-LABEL: testv16i8u:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSSE3-NEXT: movdqa %xmm0, %xmm3
; SSSE3-NEXT: pand %xmm2, %xmm3
@@ -1690,7 +1690,7 @@ define <16 x i8> @testv16i8u(<16 x i8> %in) nounwind {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: testv16i8u:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE41-NEXT: movdqa %xmm0, %xmm3
; SSE41-NEXT: pand %xmm2, %xmm3
@@ -1708,7 +1708,7 @@ define <16 x i8> @testv16i8u(<16 x i8> %in) nounwind {
; SSE41-NEXT: retq
;
; AVX-LABEL: testv16i8u:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -1723,7 +1723,7 @@ define <16 x i8> @testv16i8u(<16 x i8> %in) nounwind {
; AVX-NEXT: retq
;
; AVX512VLBWDQ-LABEL: testv16i8u:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -1738,7 +1738,7 @@ define <16 x i8> @testv16i8u(<16 x i8> %in) nounwind {
; AVX512VLBWDQ-NEXT: retq
;
; AVX512-LABEL: testv16i8u:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512-NEXT: vplzcntd %zmm0, %zmm0
; AVX512-NEXT: vpmovdb %zmm0, %xmm0
@@ -1747,7 +1747,7 @@ define <16 x i8> @testv16i8u(<16 x i8> %in) nounwind {
; AVX512-NEXT: retq
;
; X32-SSE-LABEL: testv16i8u:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; X32-SSE-NEXT: movdqa %xmm0, %xmm3
; X32-SSE-NEXT: pand %xmm2, %xmm3
@@ -1769,25 +1769,25 @@ define <16 x i8> @testv16i8u(<16 x i8> %in) nounwind {
define <2 x i64> @foldv2i64() nounwind {
; SSE-LABEL: foldv2i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movl $55, %eax
; SSE-NEXT: movq %rax, %xmm0
; SSE-NEXT: retq
;
; NOBW-LABEL: foldv2i64:
-; NOBW: # BB#0:
+; NOBW: # %bb.0:
; NOBW-NEXT: movl $55, %eax
; NOBW-NEXT: vmovq %rax, %xmm0
; NOBW-NEXT: retq
;
; AVX512VLBWDQ-LABEL: foldv2i64:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: movl $55, %eax
; AVX512VLBWDQ-NEXT: vmovq %rax, %xmm0
; AVX512VLBWDQ-NEXT: retq
;
; X32-SSE-LABEL: foldv2i64:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movl $55, %eax
; X32-SSE-NEXT: movd %eax, %xmm0
; X32-SSE-NEXT: retl
@@ -1797,25 +1797,25 @@ define <2 x i64> @foldv2i64() nounwind {
define <2 x i64> @foldv2i64u() nounwind {
; SSE-LABEL: foldv2i64u:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movl $55, %eax
; SSE-NEXT: movq %rax, %xmm0
; SSE-NEXT: retq
;
; NOBW-LABEL: foldv2i64u:
-; NOBW: # BB#0:
+; NOBW: # %bb.0:
; NOBW-NEXT: movl $55, %eax
; NOBW-NEXT: vmovq %rax, %xmm0
; NOBW-NEXT: retq
;
; AVX512VLBWDQ-LABEL: foldv2i64u:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: movl $55, %eax
; AVX512VLBWDQ-NEXT: vmovq %rax, %xmm0
; AVX512VLBWDQ-NEXT: retq
;
; X32-SSE-LABEL: foldv2i64u:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movl $55, %eax
; X32-SSE-NEXT: movd %eax, %xmm0
; X32-SSE-NEXT: retl
@@ -1825,22 +1825,22 @@ define <2 x i64> @foldv2i64u() nounwind {
define <4 x i32> @foldv4i32() nounwind {
; SSE-LABEL: foldv4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [23,0,32,24]
; SSE-NEXT: retq
;
; NOBW-LABEL: foldv4i32:
-; NOBW: # BB#0:
+; NOBW: # %bb.0:
; NOBW-NEXT: vmovaps {{.*#+}} xmm0 = [23,0,32,24]
; NOBW-NEXT: retq
;
; AVX512VLBWDQ-LABEL: foldv4i32:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: vmovaps {{.*#+}} xmm0 = [23,0,32,24]
; AVX512VLBWDQ-NEXT: retq
;
; X32-SSE-LABEL: foldv4i32:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movaps {{.*#+}} xmm0 = [23,0,32,24]
; X32-SSE-NEXT: retl
%out = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> <i32 256, i32 -1, i32 0, i32 255>, i1 0)
@@ -1849,22 +1849,22 @@ define <4 x i32> @foldv4i32() nounwind {
define <4 x i32> @foldv4i32u() nounwind {
; SSE-LABEL: foldv4i32u:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [23,0,32,24]
; SSE-NEXT: retq
;
; NOBW-LABEL: foldv4i32u:
-; NOBW: # BB#0:
+; NOBW: # %bb.0:
; NOBW-NEXT: vmovaps {{.*#+}} xmm0 = [23,0,32,24]
; NOBW-NEXT: retq
;
; AVX512VLBWDQ-LABEL: foldv4i32u:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: vmovaps {{.*#+}} xmm0 = [23,0,32,24]
; AVX512VLBWDQ-NEXT: retq
;
; X32-SSE-LABEL: foldv4i32u:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movaps {{.*#+}} xmm0 = [23,0,32,24]
; X32-SSE-NEXT: retl
%out = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> <i32 256, i32 -1, i32 0, i32 255>, i1 -1)
@@ -1873,22 +1873,22 @@ define <4 x i32> @foldv4i32u() nounwind {
define <8 x i16> @foldv8i16() nounwind {
; SSE-LABEL: foldv8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9]
; SSE-NEXT: retq
;
; NOBW-LABEL: foldv8i16:
-; NOBW: # BB#0:
+; NOBW: # %bb.0:
; NOBW-NEXT: vmovaps {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9]
; NOBW-NEXT: retq
;
; AVX512VLBWDQ-LABEL: foldv8i16:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: vmovaps {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9]
; AVX512VLBWDQ-NEXT: retq
;
; X32-SSE-LABEL: foldv8i16:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movaps {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9]
; X32-SSE-NEXT: retl
%out = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88>, i1 0)
@@ -1897,22 +1897,22 @@ define <8 x i16> @foldv8i16() nounwind {
define <8 x i16> @foldv8i16u() nounwind {
; SSE-LABEL: foldv8i16u:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9]
; SSE-NEXT: retq
;
; NOBW-LABEL: foldv8i16u:
-; NOBW: # BB#0:
+; NOBW: # %bb.0:
; NOBW-NEXT: vmovaps {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9]
; NOBW-NEXT: retq
;
; AVX512VLBWDQ-LABEL: foldv8i16u:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: vmovaps {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9]
; AVX512VLBWDQ-NEXT: retq
;
; X32-SSE-LABEL: foldv8i16u:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movaps {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9]
; X32-SSE-NEXT: retl
%out = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88>, i1 -1)
@@ -1921,22 +1921,22 @@ define <8 x i16> @foldv8i16u() nounwind {
define <16 x i8> @foldv16i8() nounwind {
; SSE-LABEL: foldv16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2]
; SSE-NEXT: retq
;
; NOBW-LABEL: foldv16i8:
-; NOBW: # BB#0:
+; NOBW: # %bb.0:
; NOBW-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2]
; NOBW-NEXT: retq
;
; AVX512VLBWDQ-LABEL: foldv16i8:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2]
; AVX512VLBWDQ-NEXT: retq
;
; X32-SSE-LABEL: foldv16i8:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2]
; X32-SSE-NEXT: retl
%out = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32>, i1 0)
@@ -1945,22 +1945,22 @@ define <16 x i8> @foldv16i8() nounwind {
define <16 x i8> @foldv16i8u() nounwind {
; SSE-LABEL: foldv16i8u:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2]
; SSE-NEXT: retq
;
; NOBW-LABEL: foldv16i8u:
-; NOBW: # BB#0:
+; NOBW: # %bb.0:
; NOBW-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2]
; NOBW-NEXT: retq
;
; AVX512VLBWDQ-LABEL: foldv16i8u:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2]
; AVX512VLBWDQ-NEXT: retq
;
; X32-SSE-LABEL: foldv16i8u:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2]
; X32-SSE-NEXT: retl
%out = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32>, i1 -1)
diff --git a/test/CodeGen/X86/vector-lzcnt-256.ll b/test/CodeGen/X86/vector-lzcnt-256.ll
index 54b53272288..55f797a2cc1 100644
--- a/test/CodeGen/X86/vector-lzcnt-256.ll
+++ b/test/CodeGen/X86/vector-lzcnt-256.ll
@@ -11,7 +11,7 @@
define <4 x i64> @testv4i64(<4 x i64> %in) nounwind {
; AVX1-LABEL: testv4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm1
@@ -66,7 +66,7 @@ define <4 x i64> @testv4i64(<4 x i64> %in) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: testv4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -96,7 +96,7 @@ define <4 x i64> @testv4i64(<4 x i64> %in) nounwind {
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: testv4i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VL-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -126,7 +126,7 @@ define <4 x i64> @testv4i64(<4 x i64> %in) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512VLBWDQ-LABEL: testv4i64:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VLBWDQ-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -156,19 +156,19 @@ define <4 x i64> @testv4i64(<4 x i64> %in) nounwind {
; AVX512VLBWDQ-NEXT: retq
;
; AVX512VLCD-LABEL: testv4i64:
-; AVX512VLCD: # BB#0:
+; AVX512VLCD: # %bb.0:
; AVX512VLCD-NEXT: vplzcntq %ymm0, %ymm0
; AVX512VLCD-NEXT: retq
;
; AVX512CD-LABEL: testv4i64:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0
; AVX512CD-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512CD-NEXT: retq
;
; X32-AVX-LABEL: testv4i64:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm2
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -203,7 +203,7 @@ define <4 x i64> @testv4i64(<4 x i64> %in) nounwind {
define <4 x i64> @testv4i64u(<4 x i64> %in) nounwind {
; AVX1-LABEL: testv4i64u:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm1
@@ -258,7 +258,7 @@ define <4 x i64> @testv4i64u(<4 x i64> %in) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: testv4i64u:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -288,7 +288,7 @@ define <4 x i64> @testv4i64u(<4 x i64> %in) nounwind {
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: testv4i64u:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VL-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -318,7 +318,7 @@ define <4 x i64> @testv4i64u(<4 x i64> %in) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512VLBWDQ-LABEL: testv4i64u:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VLBWDQ-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -348,19 +348,19 @@ define <4 x i64> @testv4i64u(<4 x i64> %in) nounwind {
; AVX512VLBWDQ-NEXT: retq
;
; AVX512VLCD-LABEL: testv4i64u:
-; AVX512VLCD: # BB#0:
+; AVX512VLCD: # %bb.0:
; AVX512VLCD-NEXT: vplzcntq %ymm0, %ymm0
; AVX512VLCD-NEXT: retq
;
; AVX512CD-LABEL: testv4i64u:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0
; AVX512CD-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512CD-NEXT: retq
;
; X32-AVX-LABEL: testv4i64u:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm2
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -395,7 +395,7 @@ define <4 x i64> @testv4i64u(<4 x i64> %in) nounwind {
define <8 x i32> @testv8i32(<8 x i32> %in) nounwind {
; AVX1-LABEL: testv8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm3
@@ -440,7 +440,7 @@ define <8 x i32> @testv8i32(<8 x i32> %in) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: testv8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -465,7 +465,7 @@ define <8 x i32> @testv8i32(<8 x i32> %in) nounwind {
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: testv8i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VL-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -490,7 +490,7 @@ define <8 x i32> @testv8i32(<8 x i32> %in) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512VLBWDQ-LABEL: testv8i32:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VLBWDQ-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -515,19 +515,19 @@ define <8 x i32> @testv8i32(<8 x i32> %in) nounwind {
; AVX512VLBWDQ-NEXT: retq
;
; AVX512VLCD-LABEL: testv8i32:
-; AVX512VLCD: # BB#0:
+; AVX512VLCD: # %bb.0:
; AVX512VLCD-NEXT: vplzcntd %ymm0, %ymm0
; AVX512VLCD-NEXT: retq
;
; AVX512CD-LABEL: testv8i32:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
; AVX512CD-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512CD-NEXT: retq
;
; X32-AVX-LABEL: testv8i32:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm2
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -557,7 +557,7 @@ define <8 x i32> @testv8i32(<8 x i32> %in) nounwind {
define <8 x i32> @testv8i32u(<8 x i32> %in) nounwind {
; AVX1-LABEL: testv8i32u:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm3
@@ -602,7 +602,7 @@ define <8 x i32> @testv8i32u(<8 x i32> %in) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: testv8i32u:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -627,7 +627,7 @@ define <8 x i32> @testv8i32u(<8 x i32> %in) nounwind {
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: testv8i32u:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VL-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -652,7 +652,7 @@ define <8 x i32> @testv8i32u(<8 x i32> %in) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512VLBWDQ-LABEL: testv8i32u:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VLBWDQ-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -677,19 +677,19 @@ define <8 x i32> @testv8i32u(<8 x i32> %in) nounwind {
; AVX512VLBWDQ-NEXT: retq
;
; AVX512VLCD-LABEL: testv8i32u:
-; AVX512VLCD: # BB#0:
+; AVX512VLCD: # %bb.0:
; AVX512VLCD-NEXT: vplzcntd %ymm0, %ymm0
; AVX512VLCD-NEXT: retq
;
; AVX512CD-LABEL: testv8i32u:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
; AVX512CD-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512CD-NEXT: retq
;
; X32-AVX-LABEL: testv8i32u:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm2
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -719,7 +719,7 @@ define <8 x i32> @testv8i32u(<8 x i32> %in) nounwind {
define <16 x i16> @testv16i16(<16 x i16> %in) nounwind {
; AVX1-LABEL: testv16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm3
@@ -754,7 +754,7 @@ define <16 x i16> @testv16i16(<16 x i16> %in) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: testv16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -774,7 +774,7 @@ define <16 x i16> @testv16i16(<16 x i16> %in) nounwind {
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: testv16i16:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VL-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -794,7 +794,7 @@ define <16 x i16> @testv16i16(<16 x i16> %in) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512VLBWDQ-LABEL: testv16i16:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VLBWDQ-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -814,7 +814,7 @@ define <16 x i16> @testv16i16(<16 x i16> %in) nounwind {
; AVX512VLBWDQ-NEXT: retq
;
; AVX512-LABEL: testv16i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512-NEXT: vplzcntd %zmm0, %zmm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
@@ -822,7 +822,7 @@ define <16 x i16> @testv16i16(<16 x i16> %in) nounwind {
; AVX512-NEXT: retq
;
; X32-AVX-LABEL: testv16i16:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm2
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -846,7 +846,7 @@ define <16 x i16> @testv16i16(<16 x i16> %in) nounwind {
define <16 x i16> @testv16i16u(<16 x i16> %in) nounwind {
; AVX1-LABEL: testv16i16u:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm3
@@ -881,7 +881,7 @@ define <16 x i16> @testv16i16u(<16 x i16> %in) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: testv16i16u:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -901,7 +901,7 @@ define <16 x i16> @testv16i16u(<16 x i16> %in) nounwind {
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: testv16i16u:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VL-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -921,7 +921,7 @@ define <16 x i16> @testv16i16u(<16 x i16> %in) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512VLBWDQ-LABEL: testv16i16u:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VLBWDQ-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -941,7 +941,7 @@ define <16 x i16> @testv16i16u(<16 x i16> %in) nounwind {
; AVX512VLBWDQ-NEXT: retq
;
; AVX512-LABEL: testv16i16u:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512-NEXT: vplzcntd %zmm0, %zmm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
@@ -949,7 +949,7 @@ define <16 x i16> @testv16i16u(<16 x i16> %in) nounwind {
; AVX512-NEXT: retq
;
; X32-AVX-LABEL: testv16i16u:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm2
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -973,7 +973,7 @@ define <16 x i16> @testv16i16u(<16 x i16> %in) nounwind {
define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
; AVX1-LABEL: testv32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm3
@@ -998,7 +998,7 @@ define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: testv32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -1013,7 +1013,7 @@ define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: testv32i8:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VL-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -1028,7 +1028,7 @@ define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512VLBWDQ-LABEL: testv32i8:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VLBWDQ-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -1043,7 +1043,7 @@ define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
; AVX512VLBWDQ-NEXT: retq
;
; AVX512-LABEL: testv32i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
; AVX512-NEXT: vplzcntd %zmm1, %zmm1
@@ -1058,7 +1058,7 @@ define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
; AVX512-NEXT: retq
;
; X32-AVX-LABEL: testv32i8:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm2
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -1077,7 +1077,7 @@ define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
define <32 x i8> @testv32i8u(<32 x i8> %in) nounwind {
; AVX1-LABEL: testv32i8u:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm3
@@ -1102,7 +1102,7 @@ define <32 x i8> @testv32i8u(<32 x i8> %in) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: testv32i8u:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -1117,7 +1117,7 @@ define <32 x i8> @testv32i8u(<32 x i8> %in) nounwind {
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: testv32i8u:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VL-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -1132,7 +1132,7 @@ define <32 x i8> @testv32i8u(<32 x i8> %in) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512VLBWDQ-LABEL: testv32i8u:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VLBWDQ-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -1147,7 +1147,7 @@ define <32 x i8> @testv32i8u(<32 x i8> %in) nounwind {
; AVX512VLBWDQ-NEXT: retq
;
; AVX512-LABEL: testv32i8u:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
; AVX512-NEXT: vplzcntd %zmm1, %zmm1
@@ -1162,7 +1162,7 @@ define <32 x i8> @testv32i8u(<32 x i8> %in) nounwind {
; AVX512-NEXT: retq
;
; X32-AVX-LABEL: testv32i8u:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm2
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -1181,12 +1181,12 @@ define <32 x i8> @testv32i8u(<32 x i8> %in) nounwind {
define <4 x i64> @foldv4i64() nounwind {
; X64-LABEL: foldv4i64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} ymm0 = [55,0,64,56]
; X64-NEXT: retq
;
; X32-AVX-LABEL: foldv4i64:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [55,0,0,0,64,0,56,0]
; X32-AVX-NEXT: retl
%out = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> <i64 256, i64 -1, i64 0, i64 255>, i1 0)
@@ -1195,12 +1195,12 @@ define <4 x i64> @foldv4i64() nounwind {
define <4 x i64> @foldv4i64u() nounwind {
; X64-LABEL: foldv4i64u:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} ymm0 = [55,0,64,56]
; X64-NEXT: retq
;
; X32-AVX-LABEL: foldv4i64u:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [55,0,0,0,64,0,56,0]
; X32-AVX-NEXT: retl
%out = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> <i64 256, i64 -1, i64 0, i64 255>, i1 -1)
@@ -1209,12 +1209,12 @@ define <4 x i64> @foldv4i64u() nounwind {
define <8 x i32> @foldv8i32() nounwind {
; X64-LABEL: foldv8i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} ymm0 = [23,0,32,24,0,29,27,25]
; X64-NEXT: retq
;
; X32-AVX-LABEL: foldv8i32:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [23,0,32,24,0,29,27,25]
; X32-AVX-NEXT: retl
%out = call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> <i32 256, i32 -1, i32 0, i32 255, i32 -65536, i32 7, i32 24, i32 88>, i1 0)
@@ -1223,12 +1223,12 @@ define <8 x i32> @foldv8i32() nounwind {
define <8 x i32> @foldv8i32u() nounwind {
; X64-LABEL: foldv8i32u:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} ymm0 = [23,0,32,24,0,29,27,25]
; X64-NEXT: retq
;
; X32-AVX-LABEL: foldv8i32u:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [23,0,32,24,0,29,27,25]
; X32-AVX-NEXT: retl
%out = call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> <i32 256, i32 -1, i32 0, i32 255, i32 -65536, i32 7, i32 24, i32 88>, i1 -1)
@@ -1237,12 +1237,12 @@ define <8 x i32> @foldv8i32u() nounwind {
define <16 x i16> @foldv16i16() nounwind {
; X64-LABEL: foldv16i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} ymm0 = [7,0,16,8,16,13,11,9,0,8,15,14,13,12,11,10]
; X64-NEXT: retq
;
; X32-AVX-LABEL: foldv16i16:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [7,0,16,8,16,13,11,9,0,8,15,14,13,12,11,10]
; X32-AVX-NEXT: retl
%out = call <16 x i16> @llvm.ctlz.v16i16(<16 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88, i16 -2, i16 254, i16 1, i16 2, i16 4, i16 8, i16 16, i16 32>, i1 0)
@@ -1251,12 +1251,12 @@ define <16 x i16> @foldv16i16() nounwind {
define <16 x i16> @foldv16i16u() nounwind {
; X64-LABEL: foldv16i16u:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} ymm0 = [7,0,16,8,16,13,11,9,0,8,15,14,13,12,11,10]
; X64-NEXT: retq
;
; X32-AVX-LABEL: foldv16i16u:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [7,0,16,8,16,13,11,9,0,8,15,14,13,12,11,10]
; X32-AVX-NEXT: retl
%out = call <16 x i16> @llvm.ctlz.v16i16(<16 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88, i16 -2, i16 254, i16 1, i16 2, i16 4, i16 8, i16 16, i16 32>, i1 -1)
@@ -1265,12 +1265,12 @@ define <16 x i16> @foldv16i16u() nounwind {
define <32 x i8> @foldv32i8() nounwind {
; X64-LABEL: foldv32i8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2,1,0,8,8,0,0,0,0,0,0,0,0,6,5,5,1]
; X64-NEXT: retq
;
; X32-AVX-LABEL: foldv32i8:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2,1,0,8,8,0,0,0,0,0,0,0,0,6,5,5,1]
; X32-AVX-NEXT: retl
%out = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32, i8 64, i8 128, i8 256, i8 -256, i8 -128, i8 -64, i8 -32, i8 -16, i8 -8, i8 -4, i8 -2, i8 -1, i8 3, i8 5, i8 7, i8 127>, i1 0)
@@ -1279,12 +1279,12 @@ define <32 x i8> @foldv32i8() nounwind {
define <32 x i8> @foldv32i8u() nounwind {
; X64-LABEL: foldv32i8u:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2,1,0,8,8,0,0,0,0,0,0,0,0,6,5,5,1]
; X64-NEXT: retq
;
; X32-AVX-LABEL: foldv32i8u:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2,1,0,8,8,0,0,0,0,0,0,0,0,6,5,5,1]
; X32-AVX-NEXT: retl
%out = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32, i8 64, i8 128, i8 256, i8 -256, i8 -128, i8 -64, i8 -32, i8 -16, i8 -8, i8 -4, i8 -2, i8 -1, i8 3, i8 5, i8 7, i8 127>, i1 -1)
diff --git a/test/CodeGen/X86/vector-lzcnt-512.ll b/test/CodeGen/X86/vector-lzcnt-512.ll
index 997992c0dab..9b66983cae3 100644
--- a/test/CodeGen/X86/vector-lzcnt-512.ll
+++ b/test/CodeGen/X86/vector-lzcnt-512.ll
@@ -6,17 +6,17 @@
define <8 x i64> @testv8i64(<8 x i64> %in) nounwind {
; AVX512CD-LABEL: testv8i64:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0
; AVX512CD-NEXT: retq
;
; AVX512CDBW-LABEL: testv8i64:
-; AVX512CDBW: # BB#0:
+; AVX512CDBW: # %bb.0:
; AVX512CDBW-NEXT: vplzcntq %zmm0, %zmm0
; AVX512CDBW-NEXT: retq
;
; AVX512BW-LABEL: testv8i64:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsrlq $1, %zmm0, %zmm1
; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpsrlq $2, %zmm0, %zmm1
@@ -44,7 +44,7 @@ define <8 x i64> @testv8i64(<8 x i64> %in) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: testv8i64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpsrlq $1, %zmm0, %zmm1
; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0
; AVX512DQ-NEXT: vpsrlq $2, %zmm0, %zmm1
@@ -85,17 +85,17 @@ define <8 x i64> @testv8i64(<8 x i64> %in) nounwind {
define <8 x i64> @testv8i64u(<8 x i64> %in) nounwind {
; AVX512CD-LABEL: testv8i64u:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0
; AVX512CD-NEXT: retq
;
; AVX512CDBW-LABEL: testv8i64u:
-; AVX512CDBW: # BB#0:
+; AVX512CDBW: # %bb.0:
; AVX512CDBW-NEXT: vplzcntq %zmm0, %zmm0
; AVX512CDBW-NEXT: retq
;
; AVX512BW-LABEL: testv8i64u:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsrlq $1, %zmm0, %zmm1
; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpsrlq $2, %zmm0, %zmm1
@@ -123,7 +123,7 @@ define <8 x i64> @testv8i64u(<8 x i64> %in) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: testv8i64u:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpsrlq $1, %zmm0, %zmm1
; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0
; AVX512DQ-NEXT: vpsrlq $2, %zmm0, %zmm1
@@ -164,17 +164,17 @@ define <8 x i64> @testv8i64u(<8 x i64> %in) nounwind {
define <16 x i32> @testv16i32(<16 x i32> %in) nounwind {
; AVX512CD-LABEL: testv16i32:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
; AVX512CD-NEXT: retq
;
; AVX512CDBW-LABEL: testv16i32:
-; AVX512CDBW: # BB#0:
+; AVX512CDBW: # %bb.0:
; AVX512CDBW-NEXT: vplzcntd %zmm0, %zmm0
; AVX512CDBW-NEXT: retq
;
; AVX512BW-LABEL: testv16i32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsrld $1, %zmm0, %zmm1
; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpsrld $2, %zmm0, %zmm1
@@ -204,7 +204,7 @@ define <16 x i32> @testv16i32(<16 x i32> %in) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: testv16i32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpsrld $1, %zmm0, %zmm1
; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0
; AVX512DQ-NEXT: vpsrld $2, %zmm0, %zmm1
@@ -251,17 +251,17 @@ define <16 x i32> @testv16i32(<16 x i32> %in) nounwind {
define <16 x i32> @testv16i32u(<16 x i32> %in) nounwind {
; AVX512CD-LABEL: testv16i32u:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
; AVX512CD-NEXT: retq
;
; AVX512CDBW-LABEL: testv16i32u:
-; AVX512CDBW: # BB#0:
+; AVX512CDBW: # %bb.0:
; AVX512CDBW-NEXT: vplzcntd %zmm0, %zmm0
; AVX512CDBW-NEXT: retq
;
; AVX512BW-LABEL: testv16i32u:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsrld $1, %zmm0, %zmm1
; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpsrld $2, %zmm0, %zmm1
@@ -291,7 +291,7 @@ define <16 x i32> @testv16i32u(<16 x i32> %in) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: testv16i32u:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpsrld $1, %zmm0, %zmm1
; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0
; AVX512DQ-NEXT: vpsrld $2, %zmm0, %zmm1
@@ -338,7 +338,7 @@ define <16 x i32> @testv16i32u(<16 x i32> %in) nounwind {
define <32 x i16> @testv32i16(<32 x i16> %in) nounwind {
; AVX512CD-LABEL: testv32i16:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
; AVX512CD-NEXT: vpmovdw %zmm0, %ymm0
@@ -351,7 +351,7 @@ define <32 x i16> @testv32i16(<32 x i16> %in) nounwind {
; AVX512CD-NEXT: retq
;
; AVX512CDBW-LABEL: testv32i16:
-; AVX512CDBW: # BB#0:
+; AVX512CDBW: # %bb.0:
; AVX512CDBW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512CDBW-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
; AVX512CDBW-NEXT: vplzcntd %zmm1, %zmm1
@@ -366,7 +366,7 @@ define <32 x i16> @testv32i16(<32 x i16> %in) nounwind {
; AVX512CDBW-NEXT: retq
;
; AVX512BW-LABEL: testv32i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -388,7 +388,7 @@ define <32 x i16> @testv32i16(<32 x i16> %in) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: testv32i16:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm3
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -425,7 +425,7 @@ define <32 x i16> @testv32i16(<32 x i16> %in) nounwind {
define <32 x i16> @testv32i16u(<32 x i16> %in) nounwind {
; AVX512CD-LABEL: testv32i16u:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
; AVX512CD-NEXT: vpmovdw %zmm0, %ymm0
@@ -438,7 +438,7 @@ define <32 x i16> @testv32i16u(<32 x i16> %in) nounwind {
; AVX512CD-NEXT: retq
;
; AVX512CDBW-LABEL: testv32i16u:
-; AVX512CDBW: # BB#0:
+; AVX512CDBW: # %bb.0:
; AVX512CDBW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512CDBW-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
; AVX512CDBW-NEXT: vplzcntd %zmm1, %zmm1
@@ -453,7 +453,7 @@ define <32 x i16> @testv32i16u(<32 x i16> %in) nounwind {
; AVX512CDBW-NEXT: retq
;
; AVX512BW-LABEL: testv32i16u:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -475,7 +475,7 @@ define <32 x i16> @testv32i16u(<32 x i16> %in) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: testv32i16u:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm3
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -512,7 +512,7 @@ define <32 x i16> @testv32i16u(<32 x i16> %in) nounwind {
define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
; AVX512CD-LABEL: testv64i8:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX512CD-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
; AVX512CD-NEXT: vplzcntd %zmm2, %zmm2
@@ -537,7 +537,7 @@ define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
; AVX512CD-NEXT: retq
;
; AVX512CDBW-LABEL: testv64i8:
-; AVX512CDBW: # BB#0:
+; AVX512CDBW: # %bb.0:
; AVX512CDBW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512CDBW-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX512CDBW-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
@@ -564,7 +564,7 @@ define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
; AVX512CDBW-NEXT: retq
;
; AVX512BW-LABEL: testv64i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -579,7 +579,7 @@ define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: testv64i8:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm3
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -606,7 +606,7 @@ define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
define <64 x i8> @testv64i8u(<64 x i8> %in) nounwind {
; AVX512CD-LABEL: testv64i8u:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX512CD-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
; AVX512CD-NEXT: vplzcntd %zmm2, %zmm2
@@ -631,7 +631,7 @@ define <64 x i8> @testv64i8u(<64 x i8> %in) nounwind {
; AVX512CD-NEXT: retq
;
; AVX512CDBW-LABEL: testv64i8u:
-; AVX512CDBW: # BB#0:
+; AVX512CDBW: # %bb.0:
; AVX512CDBW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512CDBW-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX512CDBW-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
@@ -658,7 +658,7 @@ define <64 x i8> @testv64i8u(<64 x i8> %in) nounwind {
; AVX512CDBW-NEXT: retq
;
; AVX512BW-LABEL: testv64i8u:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -673,7 +673,7 @@ define <64 x i8> @testv64i8u(<64 x i8> %in) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: testv64i8u:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm3
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
diff --git a/test/CodeGen/X86/vector-merge-store-fp-constants.ll b/test/CodeGen/X86/vector-merge-store-fp-constants.ll
index 94fd4df09cf..c7b3a89e9ff 100644
--- a/test/CodeGen/X86/vector-merge-store-fp-constants.ll
+++ b/test/CodeGen/X86/vector-merge-store-fp-constants.ll
@@ -4,7 +4,7 @@
define void @merge_8_float_zero_stores(float* %ptr) {
; DEFAULTCPU-LABEL: merge_8_float_zero_stores:
-; DEFAULTCPU: # BB#0:
+; DEFAULTCPU: # %bb.0:
; DEFAULTCPU-NEXT: movq $0, (%rdi)
; DEFAULTCPU-NEXT: movq $0, 8(%rdi)
; DEFAULTCPU-NEXT: movq $0, 16(%rdi)
@@ -12,7 +12,7 @@ define void @merge_8_float_zero_stores(float* %ptr) {
; DEFAULTCPU-NEXT: retq
;
; X64CPU-LABEL: merge_8_float_zero_stores:
-; X64CPU: # BB#0:
+; X64CPU: # %bb.0:
; X64CPU-NEXT: xorps %xmm0, %xmm0
; X64CPU-NEXT: movups %xmm0, (%rdi)
; X64CPU-NEXT: movups %xmm0, 16(%rdi)
diff --git a/test/CodeGen/X86/vector-mul.ll b/test/CodeGen/X86/vector-mul.ll
index 88f31e8b347..642da7c0137 100644
--- a/test/CodeGen/X86/vector-mul.ll
+++ b/test/CodeGen/X86/vector-mul.ll
@@ -10,17 +10,17 @@
define <2 x i64> @mul_v2i64_8(<2 x i64> %a0) nounwind {
; X86-LABEL: mul_v2i64_8:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: psllq $3, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: mul_v2i64_8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psllq $3, %xmm0
; X64-NEXT: retq
;
; X64-AVX-LABEL: mul_v2i64_8:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpsllq $3, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%1 = mul <2 x i64> %a0, <i64 8, i64 8>
@@ -29,17 +29,17 @@ define <2 x i64> @mul_v2i64_8(<2 x i64> %a0) nounwind {
define <4 x i32> @mul_v4i32_8(<4 x i32> %a0) nounwind {
; X86-LABEL: mul_v4i32_8:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pslld $3, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: mul_v4i32_8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pslld $3, %xmm0
; X64-NEXT: retq
;
; X64-AVX-LABEL: mul_v4i32_8:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpslld $3, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%1 = mul <4 x i32> %a0, <i32 8, i32 8, i32 8, i32 8>
@@ -48,17 +48,17 @@ define <4 x i32> @mul_v4i32_8(<4 x i32> %a0) nounwind {
define <8 x i16> @mul_v8i16_8(<8 x i16> %a0) nounwind {
; X86-LABEL: mul_v8i16_8:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: psllw $3, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: mul_v8i16_8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psllw $3, %xmm0
; X64-NEXT: retq
;
; X64-AVX-LABEL: mul_v8i16_8:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpsllw $3, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%1 = mul <8 x i16> %a0, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
@@ -67,24 +67,24 @@ define <8 x i16> @mul_v8i16_8(<8 x i16> %a0) nounwind {
define <16 x i8> @mul_v16i8_32(<16 x i8> %a0) nounwind {
; X86-LABEL: mul_v16i8_32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: psllw $5, %xmm0
; X86-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: mul_v16i8_32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psllw $5, %xmm0
; X64-NEXT: pand {{.*}}(%rip), %xmm0
; X64-NEXT: retq
;
; X64-XOP-LABEL: mul_v16i8_32:
-; X64-XOP: # BB#0:
+; X64-XOP: # %bb.0:
; X64-XOP-NEXT: vpshlb {{.*}}(%rip), %xmm0, %xmm0
; X64-XOP-NEXT: retq
;
; X64-AVX2-LABEL: mul_v16i8_32:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpsllw $5, %xmm0, %xmm0
; X64-AVX2-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX2-NEXT: retq
@@ -98,7 +98,7 @@ define <16 x i8> @mul_v16i8_32(<16 x i8> %a0) nounwind {
define <2 x i64> @mul_v2i64_32_8(<2 x i64> %a0) nounwind {
; X86-LABEL: mul_v2i64_32_8:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movdqa %xmm0, %xmm1
; X86-NEXT: psllq $3, %xmm1
; X86-NEXT: psllq $5, %xmm0
@@ -106,7 +106,7 @@ define <2 x i64> @mul_v2i64_32_8(<2 x i64> %a0) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: mul_v2i64_32_8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movdqa %xmm0, %xmm1
; X64-NEXT: psllq $3, %xmm1
; X64-NEXT: psllq $5, %xmm0
@@ -114,12 +114,12 @@ define <2 x i64> @mul_v2i64_32_8(<2 x i64> %a0) nounwind {
; X64-NEXT: retq
;
; X64-XOP-LABEL: mul_v2i64_32_8:
-; X64-XOP: # BB#0:
+; X64-XOP: # %bb.0:
; X64-XOP-NEXT: vpshlq {{.*}}(%rip), %xmm0, %xmm0
; X64-XOP-NEXT: retq
;
; X64-AVX2-LABEL: mul_v2i64_32_8:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpsllvq {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX2-NEXT: retq
%1 = mul <2 x i64> %a0, <i64 32, i64 8>
@@ -128,22 +128,22 @@ define <2 x i64> @mul_v2i64_32_8(<2 x i64> %a0) nounwind {
define <4 x i32> @mul_v4i32_1_2_4_8(<4 x i32> %a0) nounwind {
; X86-LABEL: mul_v4i32_1_2_4_8:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pmulld {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: mul_v4i32_1_2_4_8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmulld {{.*}}(%rip), %xmm0
; X64-NEXT: retq
;
; X64-XOP-LABEL: mul_v4i32_1_2_4_8:
-; X64-XOP: # BB#0:
+; X64-XOP: # %bb.0:
; X64-XOP-NEXT: vpshld {{.*}}(%rip), %xmm0, %xmm0
; X64-XOP-NEXT: retq
;
; X64-AVX2-LABEL: mul_v4i32_1_2_4_8:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX2-NEXT: retq
%1 = mul <4 x i32> %a0, <i32 1, i32 2, i32 4, i32 8>
@@ -152,22 +152,22 @@ define <4 x i32> @mul_v4i32_1_2_4_8(<4 x i32> %a0) nounwind {
define <8 x i16> @mul_v8i16_1_2_4_8_16_32_64_128(<8 x i16> %a0) nounwind {
; X86-LABEL: mul_v8i16_1_2_4_8_16_32_64_128:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pmullw {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: mul_v8i16_1_2_4_8_16_32_64_128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmullw {{.*}}(%rip), %xmm0
; X64-NEXT: retq
;
; X64-XOP-LABEL: mul_v8i16_1_2_4_8_16_32_64_128:
-; X64-XOP: # BB#0:
+; X64-XOP: # %bb.0:
; X64-XOP-NEXT: vpshlw {{.*}}(%rip), %xmm0, %xmm0
; X64-XOP-NEXT: retq
;
; X64-AVX2-LABEL: mul_v8i16_1_2_4_8_16_32_64_128:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX2-NEXT: retq
%1 = mul <8 x i16> %a0, <i16 1, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128>
@@ -176,7 +176,7 @@ define <8 x i16> @mul_v8i16_1_2_4_8_16_32_64_128(<8 x i16> %a0) nounwind {
define <16 x i8> @mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8(<16 x i8> %a0) nounwind {
; X86-LABEL: mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movdqa %xmm0, %xmm1
; X86-NEXT: movdqa %xmm1, %xmm2
; X86-NEXT: psllw $4, %xmm2
@@ -196,7 +196,7 @@ define <16 x i8> @mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8(<16 x i8> %a0) nounw
; X86-NEXT: retl
;
; X64-LABEL: mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movdqa %xmm0, %xmm1
; X64-NEXT: movdqa %xmm1, %xmm2
; X64-NEXT: psllw $4, %xmm2
@@ -216,12 +216,12 @@ define <16 x i8> @mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8(<16 x i8> %a0) nounw
; X64-NEXT: retq
;
; X64-XOP-LABEL: mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8:
-; X64-XOP: # BB#0:
+; X64-XOP: # %bb.0:
; X64-XOP-NEXT: vpshlb {{.*}}(%rip), %xmm0, %xmm0
; X64-XOP-NEXT: retq
;
; X64-AVX2-LABEL: mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpsllw $4, %xmm0, %xmm1
; X64-AVX2-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
; X64-AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [8192,24640,8192,24640,8192,24640,8192,24640]
@@ -244,7 +244,7 @@ define <16 x i8> @mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8(<16 x i8> %a0) nounw
define <2 x i64> @mul_v2i64_17(<2 x i64> %a0) nounwind {
; X86-LABEL: mul_v2i64_17:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movdqa {{.*#+}} xmm1 = [17,0,17,0]
; X86-NEXT: movdqa %xmm0, %xmm2
; X86-NEXT: pmuludq %xmm1, %xmm2
@@ -255,7 +255,7 @@ define <2 x i64> @mul_v2i64_17(<2 x i64> %a0) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: mul_v2i64_17:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movdqa {{.*#+}} xmm1 = [17,17]
; X64-NEXT: movdqa %xmm0, %xmm2
; X64-NEXT: pmuludq %xmm1, %xmm2
@@ -266,7 +266,7 @@ define <2 x i64> @mul_v2i64_17(<2 x i64> %a0) nounwind {
; X64-NEXT: retq
;
; X64-AVX-LABEL: mul_v2i64_17:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [17,17]
; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm0
@@ -280,22 +280,22 @@ define <2 x i64> @mul_v2i64_17(<2 x i64> %a0) nounwind {
define <4 x i32> @mul_v4i32_17(<4 x i32> %a0) nounwind {
; X86-LABEL: mul_v4i32_17:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pmulld {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: mul_v4i32_17:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmulld {{.*}}(%rip), %xmm0
; X64-NEXT: retq
;
; X64-XOP-LABEL: mul_v4i32_17:
-; X64-XOP: # BB#0:
+; X64-XOP: # %bb.0:
; X64-XOP-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
; X64-XOP-NEXT: retq
;
; X64-AVX2-LABEL: mul_v4i32_17:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [17,17,17,17]
; X64-AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: retq
@@ -305,17 +305,17 @@ define <4 x i32> @mul_v4i32_17(<4 x i32> %a0) nounwind {
define <8 x i16> @mul_v8i16_17(<8 x i16> %a0) nounwind {
; X86-LABEL: mul_v8i16_17:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pmullw {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: mul_v8i16_17:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmullw {{.*}}(%rip), %xmm0
; X64-NEXT: retq
;
; X64-AVX-LABEL: mul_v8i16_17:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: retq
%1 = mul <8 x i16> %a0, <i16 17, i16 17, i16 17, i16 17, i16 17, i16 17, i16 17, i16 17>
@@ -324,7 +324,7 @@ define <8 x i16> @mul_v8i16_17(<8 x i16> %a0) nounwind {
define <16 x i8> @mul_v16i8_17(<16 x i8> %a0) nounwind {
; X86-LABEL: mul_v16i8_17:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pmovsxbw %xmm0, %xmm1
; X86-NEXT: movdqa {{.*#+}} xmm2 = [17,17,17,17,17,17,17,17]
; X86-NEXT: pmullw %xmm2, %xmm1
@@ -339,7 +339,7 @@ define <16 x i8> @mul_v16i8_17(<16 x i8> %a0) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: mul_v16i8_17:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmovsxbw %xmm0, %xmm1
; X64-NEXT: movdqa {{.*#+}} xmm2 = [17,17,17,17,17,17,17,17]
; X64-NEXT: pmullw %xmm2, %xmm1
@@ -354,7 +354,7 @@ define <16 x i8> @mul_v16i8_17(<16 x i8> %a0) nounwind {
; X64-NEXT: retq
;
; X64-XOP-LABEL: mul_v16i8_17:
-; X64-XOP: # BB#0:
+; X64-XOP: # %bb.0:
; X64-XOP-NEXT: vpmovsxbw %xmm0, %xmm1
; X64-XOP-NEXT: vmovdqa {{.*#+}} xmm2 = [17,17,17,17,17,17,17,17]
; X64-XOP-NEXT: vpmullw %xmm2, %xmm1, %xmm1
@@ -365,7 +365,7 @@ define <16 x i8> @mul_v16i8_17(<16 x i8> %a0) nounwind {
; X64-XOP-NEXT: retq
;
; X64-AVX2-LABEL: mul_v16i8_17:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
; X64-AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
@@ -385,7 +385,7 @@ define <16 x i8> @mul_v16i8_17(<16 x i8> %a0) nounwind {
define <2 x i64> @mul_v2i64_17_65(<2 x i64> %a0) nounwind {
; X86-LABEL: mul_v2i64_17_65:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movdqa {{.*#+}} xmm1 = [17,0,65,0]
; X86-NEXT: movdqa %xmm0, %xmm2
; X86-NEXT: pmuludq %xmm1, %xmm2
@@ -396,7 +396,7 @@ define <2 x i64> @mul_v2i64_17_65(<2 x i64> %a0) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: mul_v2i64_17_65:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movdqa {{.*#+}} xmm1 = [17,65]
; X64-NEXT: movdqa %xmm0, %xmm2
; X64-NEXT: pmuludq %xmm1, %xmm2
@@ -407,7 +407,7 @@ define <2 x i64> @mul_v2i64_17_65(<2 x i64> %a0) nounwind {
; X64-NEXT: retq
;
; X64-AVX-LABEL: mul_v2i64_17_65:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [17,65]
; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm0
@@ -421,17 +421,17 @@ define <2 x i64> @mul_v2i64_17_65(<2 x i64> %a0) nounwind {
define <4 x i32> @mul_v4i32_5_17_33_65(<4 x i32> %a0) nounwind {
; X86-LABEL: mul_v4i32_5_17_33_65:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pmulld {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: mul_v4i32_5_17_33_65:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmulld {{.*}}(%rip), %xmm0
; X64-NEXT: retq
;
; X64-AVX-LABEL: mul_v4i32_5_17_33_65:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: retq
%1 = mul <4 x i32> %a0, <i32 5, i32 17, i32 33, i32 65>
@@ -440,17 +440,17 @@ define <4 x i32> @mul_v4i32_5_17_33_65(<4 x i32> %a0) nounwind {
define <8 x i16> @mul_v8i16_2_3_9_17_33_65_129_257(<8 x i16> %a0) nounwind {
; X86-LABEL: mul_v8i16_2_3_9_17_33_65_129_257:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pmullw {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: mul_v8i16_2_3_9_17_33_65_129_257:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmullw {{.*}}(%rip), %xmm0
; X64-NEXT: retq
;
; X64-AVX-LABEL: mul_v8i16_2_3_9_17_33_65_129_257:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: retq
%1 = mul <8 x i16> %a0, <i16 2, i16 3, i16 9, i16 17, i16 33, i16 65, i16 129, i16 257>
@@ -459,7 +459,7 @@ define <8 x i16> @mul_v8i16_2_3_9_17_33_65_129_257(<8 x i16> %a0) nounwind {
define <16 x i8> @mul_v16i8_2_3_9_17_33_65_129_2_3_9_17_33_65_129_2_3(<16 x i8> %a0) nounwind {
; X86-LABEL: mul_v16i8_2_3_9_17_33_65_129_2_3_9_17_33_65_129_2_3:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pmovsxbw %xmm0, %xmm1
; X86-NEXT: pmullw {{\.LCPI.*}}, %xmm1
; X86-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
@@ -473,7 +473,7 @@ define <16 x i8> @mul_v16i8_2_3_9_17_33_65_129_2_3_9_17_33_65_129_2_3(<16 x i8>
; X86-NEXT: retl
;
; X64-LABEL: mul_v16i8_2_3_9_17_33_65_129_2_3_9_17_33_65_129_2_3:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmovsxbw %xmm0, %xmm1
; X64-NEXT: pmullw {{.*}}(%rip), %xmm1
; X64-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
@@ -487,7 +487,7 @@ define <16 x i8> @mul_v16i8_2_3_9_17_33_65_129_2_3_9_17_33_65_129_2_3(<16 x i8>
; X64-NEXT: retq
;
; X64-XOP-LABEL: mul_v16i8_2_3_9_17_33_65_129_2_3_9_17_33_65_129_2_3:
-; X64-XOP: # BB#0:
+; X64-XOP: # %bb.0:
; X64-XOP-NEXT: vpmovsxbw %xmm0, %xmm1
; X64-XOP-NEXT: vpmullw {{.*}}(%rip), %xmm1, %xmm1
; X64-XOP-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -497,7 +497,7 @@ define <16 x i8> @mul_v16i8_2_3_9_17_33_65_129_2_3_9_17_33_65_129_2_3(<16 x i8>
; X64-XOP-NEXT: retq
;
; X64-AVX2-LABEL: mul_v16i8_2_3_9_17_33_65_129_2_3_9_17_33_65_129_2_3:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
; X64-AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
@@ -517,7 +517,7 @@ define <16 x i8> @mul_v16i8_2_3_9_17_33_65_129_2_3_9_17_33_65_129_2_3(<16 x i8>
define <2 x i64> @mul_v2i64_7(<2 x i64> %a0) nounwind {
; X86-LABEL: mul_v2i64_7:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movdqa {{.*#+}} xmm1 = [7,0,7,0]
; X86-NEXT: movdqa %xmm0, %xmm2
; X86-NEXT: pmuludq %xmm1, %xmm2
@@ -528,7 +528,7 @@ define <2 x i64> @mul_v2i64_7(<2 x i64> %a0) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: mul_v2i64_7:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movdqa {{.*#+}} xmm1 = [7,7]
; X64-NEXT: movdqa %xmm0, %xmm2
; X64-NEXT: pmuludq %xmm1, %xmm2
@@ -539,7 +539,7 @@ define <2 x i64> @mul_v2i64_7(<2 x i64> %a0) nounwind {
; X64-NEXT: retq
;
; X64-AVX-LABEL: mul_v2i64_7:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [7,7]
; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm0
@@ -553,22 +553,22 @@ define <2 x i64> @mul_v2i64_7(<2 x i64> %a0) nounwind {
define <4 x i32> @mul_v4i32_7(<4 x i32> %a0) nounwind {
; X86-LABEL: mul_v4i32_7:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pmulld {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: mul_v4i32_7:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmulld {{.*}}(%rip), %xmm0
; X64-NEXT: retq
;
; X64-XOP-LABEL: mul_v4i32_7:
-; X64-XOP: # BB#0:
+; X64-XOP: # %bb.0:
; X64-XOP-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
; X64-XOP-NEXT: retq
;
; X64-AVX2-LABEL: mul_v4i32_7:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [7,7,7,7]
; X64-AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: retq
@@ -578,17 +578,17 @@ define <4 x i32> @mul_v4i32_7(<4 x i32> %a0) nounwind {
define <8 x i16> @mul_v8i16_7(<8 x i16> %a0) nounwind {
; X86-LABEL: mul_v8i16_7:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pmullw {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: mul_v8i16_7:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmullw {{.*}}(%rip), %xmm0
; X64-NEXT: retq
;
; X64-AVX-LABEL: mul_v8i16_7:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: retq
%1 = mul <8 x i16> %a0, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
@@ -597,7 +597,7 @@ define <8 x i16> @mul_v8i16_7(<8 x i16> %a0) nounwind {
define <16 x i8> @mul_v16i8_31(<16 x i8> %a0) nounwind {
; X86-LABEL: mul_v16i8_31:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pmovsxbw %xmm0, %xmm1
; X86-NEXT: movdqa {{.*#+}} xmm2 = [31,31,31,31,31,31,31,31]
; X86-NEXT: pmullw %xmm2, %xmm1
@@ -612,7 +612,7 @@ define <16 x i8> @mul_v16i8_31(<16 x i8> %a0) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: mul_v16i8_31:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmovsxbw %xmm0, %xmm1
; X64-NEXT: movdqa {{.*#+}} xmm2 = [31,31,31,31,31,31,31,31]
; X64-NEXT: pmullw %xmm2, %xmm1
@@ -627,7 +627,7 @@ define <16 x i8> @mul_v16i8_31(<16 x i8> %a0) nounwind {
; X64-NEXT: retq
;
; X64-XOP-LABEL: mul_v16i8_31:
-; X64-XOP: # BB#0:
+; X64-XOP: # %bb.0:
; X64-XOP-NEXT: vpmovsxbw %xmm0, %xmm1
; X64-XOP-NEXT: vmovdqa {{.*#+}} xmm2 = [31,31,31,31,31,31,31,31]
; X64-XOP-NEXT: vpmullw %xmm2, %xmm1, %xmm1
@@ -638,7 +638,7 @@ define <16 x i8> @mul_v16i8_31(<16 x i8> %a0) nounwind {
; X64-XOP-NEXT: retq
;
; X64-AVX2-LABEL: mul_v16i8_31:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
; X64-AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
@@ -658,7 +658,7 @@ define <16 x i8> @mul_v16i8_31(<16 x i8> %a0) nounwind {
define <2 x i64> @mul_v2i64_15_63(<2 x i64> %a0) nounwind {
; X86-LABEL: mul_v2i64_15_63:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movdqa {{.*#+}} xmm1 = [15,0,63,0]
; X86-NEXT: movdqa %xmm0, %xmm2
; X86-NEXT: pmuludq %xmm1, %xmm2
@@ -669,7 +669,7 @@ define <2 x i64> @mul_v2i64_15_63(<2 x i64> %a0) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: mul_v2i64_15_63:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movdqa {{.*#+}} xmm1 = [15,63]
; X64-NEXT: movdqa %xmm0, %xmm2
; X64-NEXT: pmuludq %xmm1, %xmm2
@@ -680,7 +680,7 @@ define <2 x i64> @mul_v2i64_15_63(<2 x i64> %a0) nounwind {
; X64-NEXT: retq
;
; X64-AVX-LABEL: mul_v2i64_15_63:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,63]
; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm0
@@ -694,7 +694,7 @@ define <2 x i64> @mul_v2i64_15_63(<2 x i64> %a0) nounwind {
define <2 x i64> @mul_v2i64_neg_15_63(<2 x i64> %a0) nounwind {
; X86-LABEL: mul_v2i64_neg_15_63:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movdqa {{.*#+}} xmm1 = [4294967281,4294967295,4294967233,4294967295]
; X86-NEXT: movdqa %xmm0, %xmm2
; X86-NEXT: pmuludq %xmm1, %xmm2
@@ -708,7 +708,7 @@ define <2 x i64> @mul_v2i64_neg_15_63(<2 x i64> %a0) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: mul_v2i64_neg_15_63:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movdqa {{.*#+}} xmm1 = [18446744073709551601,18446744073709551553]
; X64-NEXT: movdqa %xmm0, %xmm2
; X64-NEXT: pmuludq %xmm1, %xmm2
@@ -722,7 +722,7 @@ define <2 x i64> @mul_v2i64_neg_15_63(<2 x i64> %a0) nounwind {
; X64-NEXT: retq
;
; X64-AVX-LABEL: mul_v2i64_neg_15_63:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [18446744073709551601,18446744073709551553]
; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm3
@@ -738,7 +738,7 @@ define <2 x i64> @mul_v2i64_neg_15_63(<2 x i64> %a0) nounwind {
define <2 x i64> @mul_v2i64_neg_17_65(<2 x i64> %a0) nounwind {
; X86-LABEL: mul_v2i64_neg_17_65:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movdqa {{.*#+}} xmm1 = [4294967279,4294967295,4294967231,4294967295]
; X86-NEXT: movdqa %xmm0, %xmm2
; X86-NEXT: pmuludq %xmm1, %xmm2
@@ -752,7 +752,7 @@ define <2 x i64> @mul_v2i64_neg_17_65(<2 x i64> %a0) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: mul_v2i64_neg_17_65:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movdqa {{.*#+}} xmm1 = [18446744073709551599,18446744073709551551]
; X64-NEXT: movdqa %xmm0, %xmm2
; X64-NEXT: pmuludq %xmm1, %xmm2
@@ -766,7 +766,7 @@ define <2 x i64> @mul_v2i64_neg_17_65(<2 x i64> %a0) nounwind {
; X64-NEXT: retq
;
; X64-AVX-LABEL: mul_v2i64_neg_17_65:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [18446744073709551599,18446744073709551551]
; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm3
@@ -782,7 +782,7 @@ define <2 x i64> @mul_v2i64_neg_17_65(<2 x i64> %a0) nounwind {
define <2 x i64> @mul_v2i64_0_1(<2 x i64> %a0) nounwind {
; X86-LABEL: mul_v2i64_0_1:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movdqa {{.*#+}} xmm1 = [0,0,1,0]
; X86-NEXT: movdqa %xmm0, %xmm2
; X86-NEXT: pmuludq %xmm1, %xmm2
@@ -793,7 +793,7 @@ define <2 x i64> @mul_v2i64_0_1(<2 x i64> %a0) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: mul_v2i64_0_1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl $1, %eax
; X64-NEXT: movq %rax, %xmm1
; X64-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7]
@@ -806,7 +806,7 @@ define <2 x i64> @mul_v2i64_0_1(<2 x i64> %a0) nounwind {
; X64-NEXT: retq
;
; X64-AVX-LABEL: mul_v2i64_0_1:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: movl $1, %eax
; X64-AVX-NEXT: vmovq %rax, %xmm1
; X64-AVX-NEXT: vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7]
@@ -822,7 +822,7 @@ define <2 x i64> @mul_v2i64_0_1(<2 x i64> %a0) nounwind {
define <2 x i64> @mul_v2i64_neg_0_1(<2 x i64> %a0) nounwind {
; X86-LABEL: mul_v2i64_neg_0_1:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movdqa {{.*#+}} xmm1 = [0,0,4294967295,4294967295]
; X86-NEXT: movdqa %xmm0, %xmm2
; X86-NEXT: pmuludq %xmm1, %xmm2
@@ -836,7 +836,7 @@ define <2 x i64> @mul_v2i64_neg_0_1(<2 x i64> %a0) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: mul_v2i64_neg_0_1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movdqa %xmm0, %xmm1
; X64-NEXT: psrlq $32, %xmm1
; X64-NEXT: movq $-1, %rax
@@ -854,7 +854,7 @@ define <2 x i64> @mul_v2i64_neg_0_1(<2 x i64> %a0) nounwind {
; X64-NEXT: retq
;
; X64-AVX-LABEL: mul_v2i64_neg_0_1:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm1
; X64-AVX-NEXT: movq $-1, %rax
; X64-AVX-NEXT: vmovq %rax, %xmm2
@@ -875,7 +875,7 @@ define <2 x i64> @mul_v2i64_neg_0_1(<2 x i64> %a0) nounwind {
define <2 x i64> @mul_v2i64_15_neg_63(<2 x i64> %a0) nounwind {
; X86-LABEL: mul_v2i64_15_neg_63:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movdqa {{.*#+}} xmm1 = [15,0,4294967233,4294967295]
; X86-NEXT: movdqa %xmm0, %xmm2
; X86-NEXT: pmuludq %xmm1, %xmm2
@@ -889,7 +889,7 @@ define <2 x i64> @mul_v2i64_15_neg_63(<2 x i64> %a0) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: mul_v2i64_15_neg_63:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movdqa %xmm0, %xmm1
; X64-NEXT: psrlq $32, %xmm1
; X64-NEXT: movdqa {{.*#+}} xmm2 = [15,18446744073709551553]
@@ -905,7 +905,7 @@ define <2 x i64> @mul_v2i64_15_neg_63(<2 x i64> %a0) nounwind {
; X64-NEXT: retq
;
; X64-AVX-LABEL: mul_v2i64_15_neg_63:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm1
; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [15,18446744073709551553]
; X64-AVX-NEXT: vpmuludq %xmm2, %xmm1, %xmm1
@@ -924,17 +924,17 @@ define <2 x i64> @mul_v2i64_15_neg_63(<2 x i64> %a0) nounwind {
define <4 x i32> @mul_v4i32_0_15_31_7(<4 x i32> %a0) nounwind {
; X86-LABEL: mul_v4i32_0_15_31_7:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pmulld {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: mul_v4i32_0_15_31_7:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmulld {{.*}}(%rip), %xmm0
; X64-NEXT: retq
;
; X64-AVX-LABEL: mul_v4i32_0_15_31_7:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: retq
%1 = mul <4 x i32> %a0, <i32 0, i32 15, i32 31, i32 7>
@@ -943,17 +943,17 @@ define <4 x i32> @mul_v4i32_0_15_31_7(<4 x i32> %a0) nounwind {
define <8 x i16> @mul_v8i16_0_1_7_15_31_63_127_255(<8 x i16> %a0) nounwind {
; X86-LABEL: mul_v8i16_0_1_7_15_31_63_127_255:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pmullw {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: mul_v8i16_0_1_7_15_31_63_127_255:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmullw {{.*}}(%rip), %xmm0
; X64-NEXT: retq
;
; X64-AVX-LABEL: mul_v8i16_0_1_7_15_31_63_127_255:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: retq
%1 = mul <8 x i16> %a0, <i16 0, i16 1, i16 7, i16 15, i16 31, i16 63, i16 127, i16 255>
@@ -962,7 +962,7 @@ define <8 x i16> @mul_v8i16_0_1_7_15_31_63_127_255(<8 x i16> %a0) nounwind {
define <16 x i8> @mul_v16i8_0_1_3_7_15_31_63_127_0_1_3_7_15_31_63_127(<16 x i8> %a0) nounwind {
; X86-LABEL: mul_v16i8_0_1_3_7_15_31_63_127_0_1_3_7_15_31_63_127:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pmovsxbw %xmm0, %xmm1
; X86-NEXT: movdqa {{.*#+}} xmm2 = [0,1,3,7,15,31,63,127]
; X86-NEXT: pmullw %xmm2, %xmm1
@@ -977,7 +977,7 @@ define <16 x i8> @mul_v16i8_0_1_3_7_15_31_63_127_0_1_3_7_15_31_63_127(<16 x i8>
; X86-NEXT: retl
;
; X64-LABEL: mul_v16i8_0_1_3_7_15_31_63_127_0_1_3_7_15_31_63_127:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmovsxbw %xmm0, %xmm1
; X64-NEXT: movdqa {{.*#+}} xmm2 = [0,1,3,7,15,31,63,127]
; X64-NEXT: pmullw %xmm2, %xmm1
@@ -992,7 +992,7 @@ define <16 x i8> @mul_v16i8_0_1_3_7_15_31_63_127_0_1_3_7_15_31_63_127(<16 x i8>
; X64-NEXT: retq
;
; X64-XOP-LABEL: mul_v16i8_0_1_3_7_15_31_63_127_0_1_3_7_15_31_63_127:
-; X64-XOP: # BB#0:
+; X64-XOP: # %bb.0:
; X64-XOP-NEXT: vpmovsxbw %xmm0, %xmm1
; X64-XOP-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,3,7,15,31,63,127]
; X64-XOP-NEXT: vpmullw %xmm2, %xmm1, %xmm1
@@ -1003,7 +1003,7 @@ define <16 x i8> @mul_v16i8_0_1_3_7_15_31_63_127_0_1_3_7_15_31_63_127(<16 x i8>
; X64-XOP-NEXT: retq
;
; X64-AVX2-LABEL: mul_v16i8_0_1_3_7_15_31_63_127_0_1_3_7_15_31_63_127:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
; X64-AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
@@ -1019,7 +1019,7 @@ define <16 x i8> @mul_v16i8_0_1_3_7_15_31_63_127_0_1_3_7_15_31_63_127(<16 x i8>
define <2 x i64> @mul_v2i64_68_132(<2 x i64> %x) nounwind {
; X86-LABEL: mul_v2i64_68_132:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movdqa {{.*#+}} xmm1 = [68,0,132,0]
; X86-NEXT: movdqa %xmm0, %xmm2
; X86-NEXT: pmuludq %xmm1, %xmm2
@@ -1030,7 +1030,7 @@ define <2 x i64> @mul_v2i64_68_132(<2 x i64> %x) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: mul_v2i64_68_132:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movdqa {{.*#+}} xmm1 = [68,132]
; X64-NEXT: movdqa %xmm0, %xmm2
; X64-NEXT: pmuludq %xmm1, %xmm2
@@ -1041,7 +1041,7 @@ define <2 x i64> @mul_v2i64_68_132(<2 x i64> %x) nounwind {
; X64-NEXT: retq
;
; X64-AVX-LABEL: mul_v2i64_68_132:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [68,132]
; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm0
@@ -1055,7 +1055,7 @@ define <2 x i64> @mul_v2i64_68_132(<2 x i64> %x) nounwind {
define <2 x i64> @mul_v2i64_60_120(<2 x i64> %x) nounwind {
; X86-LABEL: mul_v2i64_60_120:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movdqa {{.*#+}} xmm1 = [60,0,124,0]
; X86-NEXT: movdqa %xmm0, %xmm2
; X86-NEXT: pmuludq %xmm1, %xmm2
@@ -1066,7 +1066,7 @@ define <2 x i64> @mul_v2i64_60_120(<2 x i64> %x) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: mul_v2i64_60_120:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movdqa {{.*#+}} xmm1 = [60,124]
; X64-NEXT: movdqa %xmm0, %xmm2
; X64-NEXT: pmuludq %xmm1, %xmm2
@@ -1077,7 +1077,7 @@ define <2 x i64> @mul_v2i64_60_120(<2 x i64> %x) nounwind {
; X64-NEXT: retq
;
; X64-AVX-LABEL: mul_v2i64_60_120:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [60,124]
; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm0
diff --git a/test/CodeGen/X86/vector-narrow-binop.ll b/test/CodeGen/X86/vector-narrow-binop.ll
index 4d183f3172b..9b05ce4485e 100644
--- a/test/CodeGen/X86/vector-narrow-binop.ll
+++ b/test/CodeGen/X86/vector-narrow-binop.ll
@@ -11,7 +11,7 @@
define <8 x i32> @PR32790(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) {
; SSE-LABEL: PR32790:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: paddd %xmm2, %xmm0
; SSE-NEXT: paddd %xmm3, %xmm1
; SSE-NEXT: pand %xmm5, %xmm1
@@ -21,7 +21,7 @@ define <8 x i32> @PR32790(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d
; SSE-NEXT: retq
;
; AVX1-LABEL: PR32790:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
@@ -36,14 +36,14 @@ define <8 x i32> @PR32790(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d
; AVX1-NEXT: retq
;
; AVX2-LABEL: PR32790:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpsubd %ymm3, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: PR32790:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpand %ymm2, %ymm0, %ymm0
; AVX512-NEXT: vpsubd %ymm3, %ymm0, %ymm0
@@ -59,14 +59,14 @@ define <8 x i32> @PR32790(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d
define <4 x i32> @do_not_use_256bit_op(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) {
; SSE-LABEL: do_not_use_256bit_op:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pand %xmm2, %xmm0
; SSE-NEXT: pand %xmm3, %xmm1
; SSE-NEXT: psubd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: do_not_use_256bit_op:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX-NEXT: vpand %xmm3, %xmm1, %xmm1
; AVX-NEXT: vpsubd %xmm1, %xmm0, %xmm0
diff --git a/test/CodeGen/X86/vector-pcmp.ll b/test/CodeGen/X86/vector-pcmp.ll
index 478bc0c9476..782c72e2a4d 100644
--- a/test/CodeGen/X86/vector-pcmp.ll
+++ b/test/CodeGen/X86/vector-pcmp.ll
@@ -9,13 +9,13 @@
define <16 x i8> @test_pcmpgtb(<16 x i8> %x) {
; SSE-LABEL: test_pcmpgtb:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm1, %xmm1
; SSE-NEXT: pcmpgtb %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_pcmpgtb:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -26,13 +26,13 @@ define <16 x i8> @test_pcmpgtb(<16 x i8> %x) {
define <8 x i16> @test_pcmpgtw(<8 x i16> %x) {
; SSE-LABEL: test_pcmpgtw:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm1, %xmm1
; SSE-NEXT: pcmpgtw %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_pcmpgtw:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -43,13 +43,13 @@ define <8 x i16> @test_pcmpgtw(<8 x i16> %x) {
define <4 x i32> @test_pcmpgtd(<4 x i32> %x) {
; SSE-LABEL: test_pcmpgtd:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm1, %xmm1
; SSE-NEXT: pcmpgtd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_pcmpgtd:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -60,7 +60,7 @@ define <4 x i32> @test_pcmpgtd(<4 x i32> %x) {
define <2 x i64> @test_pcmpgtq(<2 x i64> %x) {
; SSE2-LABEL: test_pcmpgtq:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: psrad $31, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE2-NEXT: pcmpeqd %xmm0, %xmm0
@@ -68,13 +68,13 @@ define <2 x i64> @test_pcmpgtq(<2 x i64> %x) {
; SSE2-NEXT: retq
;
; SSE42-LABEL: test_pcmpgtq:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpeqd %xmm1, %xmm1
; SSE42-NEXT: pcmpgtq %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: test_pcmpgtq:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -85,7 +85,7 @@ define <2 x i64> @test_pcmpgtq(<2 x i64> %x) {
define <1 x i128> @test_strange_type(<1 x i128> %x) {
; SSE2-LABEL: test_strange_type:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: sarq $63, %rsi
; SSE2-NEXT: movq %rsi, %xmm0
; SSE2-NEXT: notq %rsi
@@ -97,7 +97,7 @@ define <1 x i128> @test_strange_type(<1 x i128> %x) {
; SSE2-NEXT: retq
;
; SSE42-LABEL: test_strange_type:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: sarq $63, %rsi
; SSE42-NEXT: movq %rsi, %xmm0
; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
@@ -108,7 +108,7 @@ define <1 x i128> @test_strange_type(<1 x i128> %x) {
; SSE42-NEXT: retq
;
; AVX1-LABEL: test_strange_type:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: sarq $63, %rsi
; AVX1-NEXT: vmovq %rsi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
@@ -119,7 +119,7 @@ define <1 x i128> @test_strange_type(<1 x i128> %x) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_strange_type:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: sarq $63, %rsi
; AVX2-NEXT: vmovq %rsi, %xmm0
; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0
@@ -135,14 +135,14 @@ define <1 x i128> @test_strange_type(<1 x i128> %x) {
define <32 x i8> @test_pcmpgtb_256(<32 x i8> %x) {
; SSE-LABEL: test_pcmpgtb_256:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm2, %xmm2
; SSE-NEXT: pcmpgtb %xmm2, %xmm0
; SSE-NEXT: pcmpgtb %xmm2, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: test_pcmpgtb_256:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpcmpgtb %xmm1, %xmm2, %xmm1
@@ -154,7 +154,7 @@ define <32 x i8> @test_pcmpgtb_256(<32 x i8> %x) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_pcmpgtb_256:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; AVX2-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
@@ -165,14 +165,14 @@ define <32 x i8> @test_pcmpgtb_256(<32 x i8> %x) {
define <16 x i16> @test_pcmpgtw_256(<16 x i16> %x) {
; SSE-LABEL: test_pcmpgtw_256:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm2, %xmm2
; SSE-NEXT: pcmpgtw %xmm2, %xmm0
; SSE-NEXT: pcmpgtw %xmm2, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: test_pcmpgtw_256:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsraw $15, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsraw $15, %xmm0, %xmm0
@@ -183,7 +183,7 @@ define <16 x i16> @test_pcmpgtw_256(<16 x i16> %x) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_pcmpgtw_256:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; AVX2-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
@@ -194,14 +194,14 @@ define <16 x i16> @test_pcmpgtw_256(<16 x i16> %x) {
define <8 x i32> @test_pcmpgtd_256(<8 x i32> %x) {
; SSE-LABEL: test_pcmpgtd_256:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm2, %xmm2
; SSE-NEXT: pcmpgtd %xmm2, %xmm0
; SSE-NEXT: pcmpgtd %xmm2, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: test_pcmpgtd_256:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsrad $31, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0
@@ -212,7 +212,7 @@ define <8 x i32> @test_pcmpgtd_256(<8 x i32> %x) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_pcmpgtd_256:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; AVX2-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
@@ -223,7 +223,7 @@ define <8 x i32> @test_pcmpgtd_256(<8 x i32> %x) {
define <4 x i64> @test_pcmpgtq_256(<4 x i64> %x) {
; SSE2-LABEL: test_pcmpgtq_256:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: psrad $31, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
; SSE2-NEXT: psrad $31, %xmm0
@@ -234,14 +234,14 @@ define <4 x i64> @test_pcmpgtq_256(<4 x i64> %x) {
; SSE2-NEXT: retq
;
; SSE42-LABEL: test_pcmpgtq_256:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpeqd %xmm2, %xmm2
; SSE42-NEXT: pcmpgtq %xmm2, %xmm0
; SSE42-NEXT: pcmpgtq %xmm2, %xmm1
; SSE42-NEXT: retq
;
; AVX1-LABEL: test_pcmpgtq_256:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm2, %xmm1
@@ -253,7 +253,7 @@ define <4 x i64> @test_pcmpgtq_256(<4 x i64> %x) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_pcmpgtq_256:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
@@ -264,13 +264,13 @@ define <4 x i64> @test_pcmpgtq_256(<4 x i64> %x) {
define <16 x i8> @cmpeq_zext_v16i8(<16 x i8> %a, <16 x i8> %b) {
; SSE-LABEL: cmpeq_zext_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqb %xmm1, %xmm0
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: cmpeq_zext_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
@@ -281,7 +281,7 @@ define <16 x i8> @cmpeq_zext_v16i8(<16 x i8> %a, <16 x i8> %b) {
define <16 x i16> @cmpeq_zext_v16i16(<16 x i16> %a, <16 x i16> %b) {
; SSE-LABEL: cmpeq_zext_v16i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqw %xmm2, %xmm0
; SSE-NEXT: psrlw $15, %xmm0
; SSE-NEXT: pcmpeqw %xmm3, %xmm1
@@ -289,7 +289,7 @@ define <16 x i16> @cmpeq_zext_v16i16(<16 x i16> %a, <16 x i16> %b) {
; SSE-NEXT: retq
;
; AVX1-LABEL: cmpeq_zext_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpeqw %xmm2, %xmm3, %xmm2
@@ -299,7 +299,7 @@ define <16 x i16> @cmpeq_zext_v16i16(<16 x i16> %a, <16 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: cmpeq_zext_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpsrlw $15, %ymm0, %ymm0
; AVX2-NEXT: retq
@@ -310,13 +310,13 @@ define <16 x i16> @cmpeq_zext_v16i16(<16 x i16> %a, <16 x i16> %b) {
define <4 x i32> @cmpeq_zext_v4i32(<4 x i32> %a, <4 x i32> %b) {
; SSE-LABEL: cmpeq_zext_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm1, %xmm0
; SSE-NEXT: psrld $31, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: cmpeq_zext_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpsrld $31, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -327,7 +327,7 @@ define <4 x i32> @cmpeq_zext_v4i32(<4 x i32> %a, <4 x i32> %b) {
define <4 x i64> @cmpeq_zext_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE2-LABEL: cmpeq_zext_v4i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pcmpeqd %xmm2, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,0,3,2]
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [1,1]
@@ -340,7 +340,7 @@ define <4 x i64> @cmpeq_zext_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE42-LABEL: cmpeq_zext_v4i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpeqq %xmm2, %xmm0
; SSE42-NEXT: psrlq $63, %xmm0
; SSE42-NEXT: pcmpeqq %xmm3, %xmm1
@@ -348,7 +348,7 @@ define <4 x i64> @cmpeq_zext_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE42-NEXT: retq
;
; AVX1-LABEL: cmpeq_zext_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpeqq %xmm2, %xmm3, %xmm2
@@ -358,7 +358,7 @@ define <4 x i64> @cmpeq_zext_v4i64(<4 x i64> %a, <4 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: cmpeq_zext_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpsrlq $63, %ymm0, %ymm0
; AVX2-NEXT: retq
@@ -369,7 +369,7 @@ define <4 x i64> @cmpeq_zext_v4i64(<4 x i64> %a, <4 x i64> %b) {
define <32 x i8> @cmpgt_zext_v32i8(<32 x i8> %a, <32 x i8> %b) {
; SSE-LABEL: cmpgt_zext_v32i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtb %xmm2, %xmm0
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; SSE-NEXT: pand %xmm2, %xmm0
@@ -378,7 +378,7 @@ define <32 x i8> @cmpgt_zext_v32i8(<32 x i8> %a, <32 x i8> %b) {
; SSE-NEXT: retq
;
; AVX1-LABEL: cmpgt_zext_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtb %xmm2, %xmm3, %xmm2
@@ -388,7 +388,7 @@ define <32 x i8> @cmpgt_zext_v32i8(<32 x i8> %a, <32 x i8> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: cmpgt_zext_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
@@ -399,13 +399,13 @@ define <32 x i8> @cmpgt_zext_v32i8(<32 x i8> %a, <32 x i8> %b) {
define <8 x i16> @cmpgt_zext_v8i16(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: cmpgt_zext_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtw %xmm1, %xmm0
; SSE-NEXT: psrlw $15, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: cmpgt_zext_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpsrlw $15, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -416,7 +416,7 @@ define <8 x i16> @cmpgt_zext_v8i16(<8 x i16> %a, <8 x i16> %b) {
define <8 x i32> @cmpgt_zext_v8i32(<8 x i32> %a, <8 x i32> %b) {
; SSE-LABEL: cmpgt_zext_v8i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtd %xmm2, %xmm0
; SSE-NEXT: psrld $31, %xmm0
; SSE-NEXT: pcmpgtd %xmm3, %xmm1
@@ -424,7 +424,7 @@ define <8 x i32> @cmpgt_zext_v8i32(<8 x i32> %a, <8 x i32> %b) {
; SSE-NEXT: retq
;
; AVX1-LABEL: cmpgt_zext_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtd %xmm2, %xmm3, %xmm2
@@ -434,7 +434,7 @@ define <8 x i32> @cmpgt_zext_v8i32(<8 x i32> %a, <8 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: cmpgt_zext_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpsrld $31, %ymm0, %ymm0
; AVX2-NEXT: retq
@@ -445,7 +445,7 @@ define <8 x i32> @cmpgt_zext_v8i32(<8 x i32> %a, <8 x i32> %b) {
define <2 x i64> @cmpgt_zext_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: cmpgt_zext_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; SSE2-NEXT: pxor %xmm2, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm0
@@ -461,13 +461,13 @@ define <2 x i64> @cmpgt_zext_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE42-LABEL: cmpgt_zext_v2i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpgtq %xmm1, %xmm0
; SSE42-NEXT: psrlq $63, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: cmpgt_zext_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpsrlq $63, %xmm0, %xmm0
; AVX-NEXT: retq
diff --git a/test/CodeGen/X86/vector-popcnt-128.ll b/test/CodeGen/X86/vector-popcnt-128.ll
index 86a3c6e6856..c316121e24e 100644
--- a/test/CodeGen/X86/vector-popcnt-128.ll
+++ b/test/CodeGen/X86/vector-popcnt-128.ll
@@ -11,7 +11,7 @@
define <2 x i64> @testv2i64(<2 x i64> %in) nounwind {
; SSE2-LABEL: testv2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrlq $1, %xmm1
; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
@@ -32,7 +32,7 @@ define <2 x i64> @testv2i64(<2 x i64> %in) nounwind {
; SSE2-NEXT: retq
;
; SSE3-LABEL: testv2i64:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movdqa %xmm0, %xmm1
; SSE3-NEXT: psrlq $1, %xmm1
; SSE3-NEXT: pand {{.*}}(%rip), %xmm1
@@ -53,7 +53,7 @@ define <2 x i64> @testv2i64(<2 x i64> %in) nounwind {
; SSE3-NEXT: retq
;
; SSSE3-LABEL: testv2i64:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSSE3-NEXT: movdqa %xmm0, %xmm2
; SSSE3-NEXT: pand %xmm1, %xmm2
@@ -69,7 +69,7 @@ define <2 x i64> @testv2i64(<2 x i64> %in) nounwind {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: testv2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: pand %xmm1, %xmm2
@@ -85,7 +85,7 @@ define <2 x i64> @testv2i64(<2 x i64> %in) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: testv2i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -99,7 +99,7 @@ define <2 x i64> @testv2i64(<2 x i64> %in) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: testv2i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -113,7 +113,7 @@ define <2 x i64> @testv2i64(<2 x i64> %in) nounwind {
; AVX2-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: testv2i64:
-; AVX512VPOPCNTDQ: # BB#0:
+; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0
; AVX512VPOPCNTDQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -121,7 +121,7 @@ define <2 x i64> @testv2i64(<2 x i64> %in) nounwind {
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-LABEL: testv2i64:
-; BITALG_NOVLX: # BB#0:
+; BITALG_NOVLX: # %bb.0:
; BITALG_NOVLX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; BITALG_NOVLX-NEXT: vpand %xmm1, %xmm0, %xmm2
; BITALG_NOVLX-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -135,7 +135,7 @@ define <2 x i64> @testv2i64(<2 x i64> %in) nounwind {
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: testv2i64:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; BITALG-NEXT: vpand %xmm1, %xmm0, %xmm2
; BITALG-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -153,7 +153,7 @@ define <2 x i64> @testv2i64(<2 x i64> %in) nounwind {
define <4 x i32> @testv4i32(<4 x i32> %in) nounwind {
; SSE2-LABEL: testv4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrld $1, %xmm1
; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
@@ -179,7 +179,7 @@ define <4 x i32> @testv4i32(<4 x i32> %in) nounwind {
; SSE2-NEXT: retq
;
; SSE3-LABEL: testv4i32:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movdqa %xmm0, %xmm1
; SSE3-NEXT: psrld $1, %xmm1
; SSE3-NEXT: pand {{.*}}(%rip), %xmm1
@@ -205,7 +205,7 @@ define <4 x i32> @testv4i32(<4 x i32> %in) nounwind {
; SSE3-NEXT: retq
;
; SSSE3-LABEL: testv4i32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSSE3-NEXT: movdqa %xmm0, %xmm3
; SSSE3-NEXT: pand %xmm2, %xmm3
@@ -227,7 +227,7 @@ define <4 x i32> @testv4i32(<4 x i32> %in) nounwind {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: testv4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: pand %xmm1, %xmm2
@@ -247,7 +247,7 @@ define <4 x i32> @testv4i32(<4 x i32> %in) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: testv4i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -265,7 +265,7 @@ define <4 x i32> @testv4i32(<4 x i32> %in) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: testv4i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -283,7 +283,7 @@ define <4 x i32> @testv4i32(<4 x i32> %in) nounwind {
; AVX2-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: testv4i32:
-; AVX512VPOPCNTDQ: # BB#0:
+; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
; AVX512VPOPCNTDQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -291,7 +291,7 @@ define <4 x i32> @testv4i32(<4 x i32> %in) nounwind {
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-LABEL: testv4i32:
-; BITALG_NOVLX: # BB#0:
+; BITALG_NOVLX: # %bb.0:
; BITALG_NOVLX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; BITALG_NOVLX-NEXT: vpand %xmm1, %xmm0, %xmm2
; BITALG_NOVLX-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -309,7 +309,7 @@ define <4 x i32> @testv4i32(<4 x i32> %in) nounwind {
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: testv4i32:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; BITALG-NEXT: vpand %xmm1, %xmm0, %xmm2
; BITALG-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -331,7 +331,7 @@ define <4 x i32> @testv4i32(<4 x i32> %in) nounwind {
define <8 x i16> @testv8i16(<8 x i16> %in) nounwind {
; SSE2-LABEL: testv8i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrlw $1, %xmm1
; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
@@ -353,7 +353,7 @@ define <8 x i16> @testv8i16(<8 x i16> %in) nounwind {
; SSE2-NEXT: retq
;
; SSE3-LABEL: testv8i16:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movdqa %xmm0, %xmm1
; SSE3-NEXT: psrlw $1, %xmm1
; SSE3-NEXT: pand {{.*}}(%rip), %xmm1
@@ -375,7 +375,7 @@ define <8 x i16> @testv8i16(<8 x i16> %in) nounwind {
; SSE3-NEXT: retq
;
; SSSE3-LABEL: testv8i16:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSSE3-NEXT: movdqa %xmm0, %xmm2
; SSSE3-NEXT: pand %xmm1, %xmm2
@@ -393,7 +393,7 @@ define <8 x i16> @testv8i16(<8 x i16> %in) nounwind {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: testv8i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: pand %xmm1, %xmm2
@@ -411,7 +411,7 @@ define <8 x i16> @testv8i16(<8 x i16> %in) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: testv8i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -426,7 +426,7 @@ define <8 x i16> @testv8i16(<8 x i16> %in) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: testv8i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -441,7 +441,7 @@ define <8 x i16> @testv8i16(<8 x i16> %in) nounwind {
; AVX2-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: testv8i16:
-; AVX512VPOPCNTDQ: # BB#0:
+; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: vpmovzxwq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0
; AVX512VPOPCNTDQ-NEXT: vpmovqw %zmm0, %xmm0
@@ -449,7 +449,7 @@ define <8 x i16> @testv8i16(<8 x i16> %in) nounwind {
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-LABEL: testv8i16:
-; BITALG_NOVLX: # BB#0:
+; BITALG_NOVLX: # %bb.0:
; BITALG_NOVLX-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; BITALG_NOVLX-NEXT: vpopcntw %zmm0, %zmm0
; BITALG_NOVLX-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -457,7 +457,7 @@ define <8 x i16> @testv8i16(<8 x i16> %in) nounwind {
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: testv8i16:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vpopcntw %xmm0, %xmm0
; BITALG-NEXT: retq
%out = call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> %in)
@@ -466,7 +466,7 @@ define <8 x i16> @testv8i16(<8 x i16> %in) nounwind {
define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
; SSE2-LABEL: testv16i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrlw $1, %xmm1
; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
@@ -485,7 +485,7 @@ define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
; SSE2-NEXT: retq
;
; SSE3-LABEL: testv16i8:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movdqa %xmm0, %xmm1
; SSE3-NEXT: psrlw $1, %xmm1
; SSE3-NEXT: pand {{.*}}(%rip), %xmm1
@@ -504,7 +504,7 @@ define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
; SSE3-NEXT: retq
;
; SSSE3-LABEL: testv16i8:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSSE3-NEXT: movdqa %xmm0, %xmm3
; SSSE3-NEXT: pand %xmm2, %xmm3
@@ -519,7 +519,7 @@ define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: testv16i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE41-NEXT: movdqa %xmm0, %xmm3
; SSE41-NEXT: pand %xmm2, %xmm3
@@ -534,7 +534,7 @@ define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: testv16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -546,7 +546,7 @@ define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: testv16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -558,7 +558,7 @@ define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
; AVX2-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: testv16i8:
-; AVX512VPOPCNTDQ: # BB#0:
+; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
; AVX512VPOPCNTDQ-NEXT: vpmovdb %zmm0, %xmm0
@@ -566,7 +566,7 @@ define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-LABEL: testv16i8:
-; BITALG_NOVLX: # BB#0:
+; BITALG_NOVLX: # %bb.0:
; BITALG_NOVLX-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; BITALG_NOVLX-NEXT: vpopcntb %zmm0, %zmm0
; BITALG_NOVLX-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -574,7 +574,7 @@ define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: testv16i8:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vpopcntb %xmm0, %xmm0
; BITALG-NEXT: retq
%out = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %in)
@@ -583,22 +583,22 @@ define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
define <2 x i64> @foldv2i64() nounwind {
; SSE-LABEL: foldv2i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [1,64]
; SSE-NEXT: retq
;
; AVX-LABEL: foldv2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [1,64]
; AVX-NEXT: retq
;
; BITALG_NOVLX-LABEL: foldv2i64:
-; BITALG_NOVLX: # BB#0:
+; BITALG_NOVLX: # %bb.0:
; BITALG_NOVLX-NEXT: vmovaps {{.*#+}} xmm0 = [1,64]
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: foldv2i64:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vmovaps {{.*#+}} xmm0 = [1,64]
; BITALG-NEXT: retq
%out = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> <i64 256, i64 -1>)
@@ -607,22 +607,22 @@ define <2 x i64> @foldv2i64() nounwind {
define <4 x i32> @foldv4i32() nounwind {
; SSE-LABEL: foldv4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [1,32,0,8]
; SSE-NEXT: retq
;
; AVX-LABEL: foldv4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [1,32,0,8]
; AVX-NEXT: retq
;
; BITALG_NOVLX-LABEL: foldv4i32:
-; BITALG_NOVLX: # BB#0:
+; BITALG_NOVLX: # %bb.0:
; BITALG_NOVLX-NEXT: vmovaps {{.*#+}} xmm0 = [1,32,0,8]
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: foldv4i32:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vmovaps {{.*#+}} xmm0 = [1,32,0,8]
; BITALG-NEXT: retq
%out = call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> <i32 256, i32 -1, i32 0, i32 255>)
@@ -631,22 +631,22 @@ define <4 x i32> @foldv4i32() nounwind {
define <8 x i16> @foldv8i16() nounwind {
; SSE-LABEL: foldv8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [1,16,0,8,0,3,2,3]
; SSE-NEXT: retq
;
; AVX-LABEL: foldv8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [1,16,0,8,0,3,2,3]
; AVX-NEXT: retq
;
; BITALG_NOVLX-LABEL: foldv8i16:
-; BITALG_NOVLX: # BB#0:
+; BITALG_NOVLX: # %bb.0:
; BITALG_NOVLX-NEXT: vmovaps {{.*#+}} xmm0 = [1,16,0,8,0,3,2,3]
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: foldv8i16:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vmovaps {{.*#+}} xmm0 = [1,16,0,8,0,3,2,3]
; BITALG-NEXT: retq
%out = call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88>)
@@ -655,22 +655,22 @@ define <8 x i16> @foldv8i16() nounwind {
define <16 x i8> @foldv16i8() nounwind {
; SSE-LABEL: foldv16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [0,8,0,8,0,3,2,3,7,7,1,1,1,1,1,1]
; SSE-NEXT: retq
;
; AVX-LABEL: foldv16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [0,8,0,8,0,3,2,3,7,7,1,1,1,1,1,1]
; AVX-NEXT: retq
;
; BITALG_NOVLX-LABEL: foldv16i8:
-; BITALG_NOVLX: # BB#0:
+; BITALG_NOVLX: # %bb.0:
; BITALG_NOVLX-NEXT: vmovaps {{.*#+}} xmm0 = [0,8,0,8,0,3,2,3,7,7,1,1,1,1,1,1]
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: foldv16i8:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vmovaps {{.*#+}} xmm0 = [0,8,0,8,0,3,2,3,7,7,1,1,1,1,1,1]
; BITALG-NEXT: retq
%out = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32>)
diff --git a/test/CodeGen/X86/vector-popcnt-256.ll b/test/CodeGen/X86/vector-popcnt-256.ll
index a4d101f4fd3..48d16601e4f 100644
--- a/test/CodeGen/X86/vector-popcnt-256.ll
+++ b/test/CodeGen/X86/vector-popcnt-256.ll
@@ -7,7 +7,7 @@
define <4 x i64> @testv4i64(<4 x i64> %in) nounwind {
; AVX1-LABEL: testv4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm3
@@ -30,7 +30,7 @@ define <4 x i64> @testv4i64(<4 x i64> %in) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: testv4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -44,14 +44,14 @@ define <4 x i64> @testv4i64(<4 x i64> %in) nounwind {
; AVX2-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: testv4i64:
-; AVX512VPOPCNTDQ: # BB#0:
+; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0
; AVX512VPOPCNTDQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-LABEL: testv4i64:
-; BITALG_NOVLX: # BB#0:
+; BITALG_NOVLX: # %bb.0:
; BITALG_NOVLX-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; BITALG_NOVLX-NEXT: vpand %ymm1, %ymm0, %ymm2
; BITALG_NOVLX-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -65,7 +65,7 @@ define <4 x i64> @testv4i64(<4 x i64> %in) nounwind {
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: testv4i64:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; BITALG-NEXT: vpand %ymm1, %ymm0, %ymm2
; BITALG-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -83,7 +83,7 @@ define <4 x i64> @testv4i64(<4 x i64> %in) nounwind {
define <8 x i32> @testv8i32(<8 x i32> %in) nounwind {
; AVX1-LABEL: testv8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm3
@@ -114,7 +114,7 @@ define <8 x i32> @testv8i32(<8 x i32> %in) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: testv8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -132,14 +132,14 @@ define <8 x i32> @testv8i32(<8 x i32> %in) nounwind {
; AVX2-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: testv8i32:
-; AVX512VPOPCNTDQ: # BB#0:
+; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
; AVX512VPOPCNTDQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-LABEL: testv8i32:
-; BITALG_NOVLX: # BB#0:
+; BITALG_NOVLX: # %bb.0:
; BITALG_NOVLX-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; BITALG_NOVLX-NEXT: vpand %ymm1, %ymm0, %ymm2
; BITALG_NOVLX-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -157,7 +157,7 @@ define <8 x i32> @testv8i32(<8 x i32> %in) nounwind {
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: testv8i32:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; BITALG-NEXT: vpand %ymm1, %ymm0, %ymm2
; BITALG-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -179,7 +179,7 @@ define <8 x i32> @testv8i32(<8 x i32> %in) nounwind {
define <16 x i16> @testv16i16(<16 x i16> %in) nounwind {
; AVX1-LABEL: testv16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -205,7 +205,7 @@ define <16 x i16> @testv16i16(<16 x i16> %in) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: testv16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -220,21 +220,21 @@ define <16 x i16> @testv16i16(<16 x i16> %in) nounwind {
; AVX2-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: testv16i16:
-; AVX512VPOPCNTDQ: # BB#0:
+; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
; AVX512VPOPCNTDQ-NEXT: vpmovdw %zmm0, %ymm0
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-LABEL: testv16i16:
-; BITALG_NOVLX: # BB#0:
+; BITALG_NOVLX: # %bb.0:
; BITALG_NOVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; BITALG_NOVLX-NEXT: vpopcntw %zmm0, %zmm0
; BITALG_NOVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: testv16i16:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vpopcntw %ymm0, %ymm0
; BITALG-NEXT: retq
%out = call <16 x i16> @llvm.ctpop.v16i16(<16 x i16> %in)
@@ -243,7 +243,7 @@ define <16 x i16> @testv16i16(<16 x i16> %in) nounwind {
define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
; AVX1-LABEL: testv32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm3
@@ -263,7 +263,7 @@ define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: testv32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -275,7 +275,7 @@ define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
; AVX2-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: testv32i8:
-; AVX512VPOPCNTDQ: # BB#0:
+; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VPOPCNTDQ-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -287,14 +287,14 @@ define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-LABEL: testv32i8:
-; BITALG_NOVLX: # BB#0:
+; BITALG_NOVLX: # %bb.0:
; BITALG_NOVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; BITALG_NOVLX-NEXT: vpopcntb %zmm0, %zmm0
; BITALG_NOVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: testv32i8:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vpopcntb %ymm0, %ymm0
; BITALG-NEXT: retq
%out = call <32 x i8> @llvm.ctpop.v32i8(<32 x i8> %in)
@@ -303,7 +303,7 @@ define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
define <4 x i64> @foldv4i64() nounwind {
; ALL-LABEL: foldv4i64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovaps {{.*#+}} ymm0 = [1,64,0,8]
; ALL-NEXT: retq
%out = call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> <i64 256, i64 -1, i64 0, i64 255>)
@@ -312,7 +312,7 @@ define <4 x i64> @foldv4i64() nounwind {
define <8 x i32> @foldv8i32() nounwind {
; ALL-LABEL: foldv8i32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovaps {{.*#+}} ymm0 = [1,32,0,8,16,3,2,3]
; ALL-NEXT: retq
%out = call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> <i32 256, i32 -1, i32 0, i32 255, i32 -65536, i32 7, i32 24, i32 88>)
@@ -321,7 +321,7 @@ define <8 x i32> @foldv8i32() nounwind {
define <16 x i16> @foldv16i16() nounwind {
; ALL-LABEL: foldv16i16:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovaps {{.*#+}} ymm0 = [1,16,0,8,0,3,2,3,15,7,1,1,1,1,1,1]
; ALL-NEXT: retq
%out = call <16 x i16> @llvm.ctpop.v16i16(<16 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88, i16 -2, i16 254, i16 1, i16 2, i16 4, i16 8, i16 16, i16 32>)
@@ -330,7 +330,7 @@ define <16 x i16> @foldv16i16() nounwind {
define <32 x i8> @foldv32i8() nounwind {
; ALL-LABEL: foldv32i8:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovaps {{.*#+}} ymm0 = [0,8,0,8,0,3,2,3,7,7,1,1,1,1,1,1,1,1,0,0,1,2,3,4,5,6,7,8,2,2,3,7]
; ALL-NEXT: retq
%out = call <32 x i8> @llvm.ctpop.v32i8(<32 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32, i8 64, i8 128, i8 256, i8 -256, i8 -128, i8 -64, i8 -32, i8 -16, i8 -8, i8 -4, i8 -2, i8 -1, i8 3, i8 5, i8 7, i8 127>)
diff --git a/test/CodeGen/X86/vector-popcnt-512.ll b/test/CodeGen/X86/vector-popcnt-512.ll
index cf61f55eb52..df5edc13c3e 100644
--- a/test/CodeGen/X86/vector-popcnt-512.ll
+++ b/test/CodeGen/X86/vector-popcnt-512.ll
@@ -7,7 +7,7 @@
define <8 x i64> @testv8i64(<8 x i64> %in) nounwind {
; AVX512F-LABEL: testv8i64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512F-NEXT: vpand %ymm2, %ymm1, %ymm3
@@ -30,7 +30,7 @@ define <8 x i64> @testv8i64(<8 x i64> %in) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: testv8i64:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -44,12 +44,12 @@ define <8 x i64> @testv8i64(<8 x i64> %in) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: testv8i64:
-; AVX512VPOPCNTDQ: # BB#0:
+; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG-LABEL: testv8i64:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; BITALG-NEXT: vpandq %zmm1, %zmm0, %zmm2
; BITALG-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -67,7 +67,7 @@ define <8 x i64> @testv8i64(<8 x i64> %in) nounwind {
define <16 x i32> @testv16i32(<16 x i32> %in) nounwind {
; AVX512F-LABEL: testv16i32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512F-NEXT: vpand %ymm2, %ymm1, %ymm3
@@ -98,7 +98,7 @@ define <16 x i32> @testv16i32(<16 x i32> %in) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: testv16i32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -116,12 +116,12 @@ define <16 x i32> @testv16i32(<16 x i32> %in) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: testv16i32:
-; AVX512VPOPCNTDQ: # BB#0:
+; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG-LABEL: testv16i32:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; BITALG-NEXT: vpandq %zmm1, %zmm0, %zmm2
; BITALG-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -143,7 +143,7 @@ define <16 x i32> @testv16i32(<16 x i32> %in) nounwind {
define <32 x i16> @testv32i16(<32 x i16> %in) nounwind {
; AVX512F-LABEL: testv32i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512F-NEXT: vpand %ymm2, %ymm0, %ymm3
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -167,7 +167,7 @@ define <32 x i16> @testv32i16(<32 x i16> %in) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: testv32i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -182,7 +182,7 @@ define <32 x i16> @testv32i16(<32 x i16> %in) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512VPOPCNTDQ-NOBW-LABEL: testv32i16:
-; AVX512VPOPCNTDQ-NOBW: # BB#0:
+; AVX512VPOPCNTDQ-NOBW: # %bb.0:
; AVX512VPOPCNTDQ-NOBW-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512VPOPCNTDQ-NOBW-NEXT: vpopcntd %zmm0, %zmm0
; AVX512VPOPCNTDQ-NOBW-NEXT: vpmovdw %zmm0, %ymm0
@@ -192,7 +192,7 @@ define <32 x i16> @testv32i16(<32 x i16> %in) nounwind {
; AVX512VPOPCNTDQ-NOBW-NEXT: retq
;
; AVX512VPOPCNTDQ-BW-LABEL: testv32i16:
-; AVX512VPOPCNTDQ-BW: # BB#0:
+; AVX512VPOPCNTDQ-BW: # %bb.0:
; AVX512VPOPCNTDQ-BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VPOPCNTDQ-BW-NEXT: vpandq %zmm1, %zmm0, %zmm2
; AVX512VPOPCNTDQ-BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -207,7 +207,7 @@ define <32 x i16> @testv32i16(<32 x i16> %in) nounwind {
; AVX512VPOPCNTDQ-BW-NEXT: retq
;
; BITALG-LABEL: testv32i16:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vpopcntw %zmm0, %zmm0
; BITALG-NEXT: retq
%out = call <32 x i16> @llvm.ctpop.v32i16(<32 x i16> %in)
@@ -216,7 +216,7 @@ define <32 x i16> @testv32i16(<32 x i16> %in) nounwind {
define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
; AVX512F-LABEL: testv64i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512F-NEXT: vpand %ymm2, %ymm0, %ymm3
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -234,7 +234,7 @@ define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: testv64i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -246,7 +246,7 @@ define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512VPOPCNTDQ-NOBW-LABEL: testv64i8:
-; AVX512VPOPCNTDQ-NOBW: # BB#0:
+; AVX512VPOPCNTDQ-NOBW: # %bb.0:
; AVX512VPOPCNTDQ-NOBW-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VPOPCNTDQ-NOBW-NEXT: vpand %ymm2, %ymm0, %ymm3
; AVX512VPOPCNTDQ-NOBW-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -264,7 +264,7 @@ define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
; AVX512VPOPCNTDQ-NOBW-NEXT: retq
;
; AVX512VPOPCNTDQ-BW-LABEL: testv64i8:
-; AVX512VPOPCNTDQ-BW: # BB#0:
+; AVX512VPOPCNTDQ-BW: # %bb.0:
; AVX512VPOPCNTDQ-BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VPOPCNTDQ-BW-NEXT: vpandq %zmm1, %zmm0, %zmm2
; AVX512VPOPCNTDQ-BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -276,7 +276,7 @@ define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
; AVX512VPOPCNTDQ-BW-NEXT: retq
;
; BITALG-LABEL: testv64i8:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vpopcntb %zmm0, %zmm0
; BITALG-NEXT: retq
%out = call <64 x i8> @llvm.ctpop.v64i8(<64 x i8> %in)
diff --git a/test/CodeGen/X86/vector-rem.ll b/test/CodeGen/X86/vector-rem.ll
index aa8d42ae20a..3f57bd833c0 100644
--- a/test/CodeGen/X86/vector-rem.ll
+++ b/test/CodeGen/X86/vector-rem.ll
@@ -3,7 +3,7 @@
define <4 x i32> @foo(<4 x i32> %t, <4 x i32> %u) nounwind {
; CHECK-LABEL: foo:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3]
; CHECK-NEXT: movd %xmm2, %eax
; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3]
@@ -41,7 +41,7 @@ define <4 x i32> @foo(<4 x i32> %t, <4 x i32> %u) nounwind {
define <4 x i32> @bar(<4 x i32> %t, <4 x i32> %u) nounwind {
; CHECK-LABEL: bar:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3]
; CHECK-NEXT: movd %xmm2, %eax
; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3]
@@ -79,7 +79,7 @@ define <4 x i32> @bar(<4 x i32> %t, <4 x i32> %u) nounwind {
define <4 x float> @qux(<4 x float> %t, <4 x float> %u) nounwind {
; CHECK-LABEL: qux:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: subq $72, %rsp
; CHECK-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
; CHECK-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
diff --git a/test/CodeGen/X86/vector-rotate-128.ll b/test/CodeGen/X86/vector-rotate-128.ll
index fcdb6cb61f6..8b2fbf7c0b1 100644
--- a/test/CodeGen/X86/vector-rotate-128.ll
+++ b/test/CodeGen/X86/vector-rotate-128.ll
@@ -17,7 +17,7 @@
define <2 x i64> @var_rotate_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-LABEL: var_rotate_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [64,64]
; SSE2-NEXT: psubq %xmm1, %xmm2
; SSE2-NEXT: movdqa %xmm0, %xmm3
@@ -35,7 +35,7 @@ define <2 x i64> @var_rotate_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: var_rotate_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [64,64]
; SSE41-NEXT: psubq %xmm1, %xmm2
; SSE41-NEXT: movdqa %xmm0, %xmm3
@@ -53,7 +53,7 @@ define <2 x i64> @var_rotate_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: var_rotate_v2i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [64,64]
; AVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm2
; AVX1-NEXT: vpsllq %xmm1, %xmm0, %xmm3
@@ -68,7 +68,7 @@ define <2 x i64> @var_rotate_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_rotate_v2i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [64,64]
; AVX2-NEXT: vpsubq %xmm1, %xmm2, %xmm2
; AVX2-NEXT: vpsllvq %xmm1, %xmm0, %xmm1
@@ -77,7 +77,7 @@ define <2 x i64> @var_rotate_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: var_rotate_v2i64:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512BW-NEXT: vprolvq %zmm1, %zmm0, %zmm0
@@ -86,17 +86,17 @@ define <2 x i64> @var_rotate_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: var_rotate_v2i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vprolvq %xmm1, %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; XOP-LABEL: var_rotate_v2i64:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vprotq %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
; X32-SSE-LABEL: var_rotate_v2i64:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [64,0,64,0]
; X32-SSE-NEXT: psubq %xmm1, %xmm2
; X32-SSE-NEXT: movdqa %xmm0, %xmm3
@@ -121,7 +121,7 @@ define <2 x i64> @var_rotate_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
define <4 x i32> @var_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE2-LABEL: var_rotate_v4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32,32,32,32]
; SSE2-NEXT: psubd %xmm1, %xmm2
; SSE2-NEXT: pslld $23, %xmm1
@@ -158,7 +158,7 @@ define <4 x i32> @var_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: var_rotate_v4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [32,32,32,32]
; SSE41-NEXT: psubd %xmm1, %xmm2
; SSE41-NEXT: pslld $23, %xmm1
@@ -186,7 +186,7 @@ define <4 x i32> @var_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: var_rotate_v4i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [32,32,32,32]
; AVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm2
; AVX1-NEXT: vpslld $23, %xmm1, %xmm1
@@ -209,7 +209,7 @@ define <4 x i32> @var_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_rotate_v4i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [32,32,32,32]
; AVX2-NEXT: vpsubd %xmm1, %xmm2, %xmm2
; AVX2-NEXT: vpsllvd %xmm1, %xmm0, %xmm1
@@ -218,7 +218,7 @@ define <4 x i32> @var_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: var_rotate_v4i32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512BW-NEXT: vprolvd %zmm1, %zmm0, %zmm0
@@ -227,17 +227,17 @@ define <4 x i32> @var_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: var_rotate_v4i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vprolvd %xmm1, %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; XOP-LABEL: var_rotate_v4i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vprotd %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
; X32-SSE-LABEL: var_rotate_v4i32:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [32,32,32,32]
; X32-SSE-NEXT: psubd %xmm1, %xmm2
; X32-SSE-NEXT: pslld $23, %xmm1
@@ -281,7 +281,7 @@ define <4 x i32> @var_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
define <8 x i16> @var_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE2-LABEL: var_rotate_v8i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
; SSE2-NEXT: psubw %xmm1, %xmm3
; SSE2-NEXT: psllw $12, %xmm1
@@ -350,7 +350,7 @@ define <8 x i16> @var_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: var_rotate_v8i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm3
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [16,16,16,16,16,16,16,16]
; SSE41-NEXT: psubw %xmm1, %xmm2
@@ -408,7 +408,7 @@ define <8 x i16> @var_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: var_rotate_v8i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [16,16,16,16,16,16,16,16]
; AVX1-NEXT: vpsubw %xmm1, %xmm2, %xmm2
; AVX1-NEXT: vpsllw $12, %xmm1, %xmm3
@@ -443,7 +443,7 @@ define <8 x i16> @var_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_rotate_v8i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [16,16,16,16,16,16,16,16]
; AVX2-NEXT: vpsubw %xmm1, %xmm2, %xmm2
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
@@ -461,7 +461,7 @@ define <8 x i16> @var_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: var_rotate_v8i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = [16,16,16,16,16,16,16,16]
@@ -473,7 +473,7 @@ define <8 x i16> @var_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: var_rotate_v8i16:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm2 = [16,16,16,16,16,16,16,16]
; AVX512VL-NEXT: vpsubw %xmm1, %xmm2, %xmm2
; AVX512VL-NEXT: vpsllvw %xmm1, %xmm0, %xmm1
@@ -482,12 +482,12 @@ define <8 x i16> @var_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; AVX512VL-NEXT: retq
;
; XOP-LABEL: var_rotate_v8i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vprotw %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
; X32-SSE-LABEL: var_rotate_v8i16:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
; X32-SSE-NEXT: psubw %xmm1, %xmm3
; X32-SSE-NEXT: psllw $12, %xmm1
@@ -563,7 +563,7 @@ define <8 x i16> @var_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
define <16 x i8> @var_rotate_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE2-LABEL: var_rotate_v16i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; SSE2-NEXT: psubb %xmm1, %xmm4
; SSE2-NEXT: psllw $5, %xmm1
@@ -623,7 +623,7 @@ define <16 x i8> @var_rotate_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: var_rotate_v16i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm1, %xmm3
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
@@ -670,7 +670,7 @@ define <16 x i8> @var_rotate_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE41-NEXT: retq
;
; AVX-LABEL: var_rotate_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; AVX-NEXT: vpsubb %xmm1, %xmm2, %xmm2
; AVX-NEXT: vpsllw $5, %xmm1, %xmm1
@@ -700,7 +700,7 @@ define <16 x i8> @var_rotate_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; AVX-NEXT: retq
;
; AVX512-LABEL: var_rotate_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; AVX512-NEXT: vpsubb %xmm1, %xmm2, %xmm2
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
@@ -715,12 +715,12 @@ define <16 x i8> @var_rotate_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; AVX512-NEXT: retq
;
; XOP-LABEL: var_rotate_v16i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vprotb %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
; X32-SSE-LABEL: var_rotate_v16i8:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; X32-SSE-NEXT: psubb %xmm1, %xmm4
; X32-SSE-NEXT: psllw $5, %xmm1
@@ -791,7 +791,7 @@ define <16 x i8> @var_rotate_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
define <2 x i64> @constant_rotate_v2i64(<2 x i64> %a) nounwind {
; SSE2-LABEL: constant_rotate_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psllq $4, %xmm1
; SSE2-NEXT: movdqa %xmm0, %xmm2
@@ -805,7 +805,7 @@ define <2 x i64> @constant_rotate_v2i64(<2 x i64> %a) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: constant_rotate_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: psllq $14, %xmm1
; SSE41-NEXT: movdqa %xmm0, %xmm2
@@ -819,7 +819,7 @@ define <2 x i64> @constant_rotate_v2i64(<2 x i64> %a) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: constant_rotate_v2i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsllq $14, %xmm0, %xmm1
; AVX1-NEXT: vpsllq $4, %xmm0, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
@@ -830,14 +830,14 @@ define <2 x i64> @constant_rotate_v2i64(<2 x i64> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: constant_rotate_v2i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsllvq {{.*}}(%rip), %xmm0, %xmm1
; AVX2-NEXT: vpsrlvq {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: constant_rotate_v2i64:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [4,14]
; AVX512BW-NEXT: vprolvq %zmm1, %zmm0, %zmm0
@@ -846,17 +846,17 @@ define <2 x i64> @constant_rotate_v2i64(<2 x i64> %a) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: constant_rotate_v2i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vprolvq {{.*}}(%rip), %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; XOP-LABEL: constant_rotate_v2i64:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vprotq {{.*}}(%rip), %xmm0, %xmm0
; XOP-NEXT: retq
;
; X32-SSE-LABEL: constant_rotate_v2i64:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
; X32-SSE-NEXT: psllq $4, %xmm1
; X32-SSE-NEXT: movdqa %xmm0, %xmm2
@@ -876,7 +876,7 @@ define <2 x i64> @constant_rotate_v2i64(<2 x i64> %a) nounwind {
define <4 x i32> @constant_rotate_v4i32(<4 x i32> %a) nounwind {
; SSE2-LABEL: constant_rotate_v4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [16,32,64,128]
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pmuludq %xmm1, %xmm2
@@ -902,7 +902,7 @@ define <4 x i32> @constant_rotate_v4i32(<4 x i32> %a) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: constant_rotate_v4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [16,32,64,128]
; SSE41-NEXT: pmulld %xmm0, %xmm1
; SSE41-NEXT: movdqa %xmm0, %xmm2
@@ -919,7 +919,7 @@ define <4 x i32> @constant_rotate_v4i32(<4 x i32> %a) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: constant_rotate_v4i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm1
; AVX1-NEXT: vpsrld $25, %xmm0, %xmm2
; AVX1-NEXT: vpsrld $27, %xmm0, %xmm3
@@ -932,14 +932,14 @@ define <4 x i32> @constant_rotate_v4i32(<4 x i32> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: constant_rotate_v4i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm1
; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: constant_rotate_v4i32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [4,5,6,7]
; AVX512BW-NEXT: vprolvd %zmm1, %zmm0, %zmm0
@@ -948,17 +948,17 @@ define <4 x i32> @constant_rotate_v4i32(<4 x i32> %a) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: constant_rotate_v4i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vprolvd {{.*}}(%rip), %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; XOP-LABEL: constant_rotate_v4i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vprotd {{.*}}(%rip), %xmm0, %xmm0
; XOP-NEXT: retq
;
; X32-SSE-LABEL: constant_rotate_v4i32:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [16,32,64,128]
; X32-SSE-NEXT: movdqa %xmm0, %xmm2
; X32-SSE-NEXT: pmuludq %xmm1, %xmm2
@@ -990,7 +990,7 @@ define <4 x i32> @constant_rotate_v4i32(<4 x i32> %a) nounwind {
define <8 x i16> @constant_rotate_v8i16(<8 x i16> %a) nounwind {
; SSE2-LABEL: constant_rotate_v8i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128]
; SSE2-NEXT: pmullw %xmm0, %xmm2
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [0,65535,65535,65535,65535,65535,65535,65535]
@@ -1022,7 +1022,7 @@ define <8 x i16> @constant_rotate_v8i16(<8 x i16> %a) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: constant_rotate_v8i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128]
; SSE41-NEXT: pmullw %xmm0, %xmm1
; SSE41-NEXT: movdqa %xmm0, %xmm2
@@ -1041,7 +1041,7 @@ define <8 x i16> @constant_rotate_v8i16(<8 x i16> %a) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: constant_rotate_v8i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm1
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3,4,5,6,7]
@@ -1055,7 +1055,7 @@ define <8 x i16> @constant_rotate_v8i16(<8 x i16> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: constant_rotate_v8i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm1
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %ymm0, %ymm0
@@ -1066,7 +1066,7 @@ define <8 x i16> @constant_rotate_v8i16(<8 x i16> %a) nounwind {
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: constant_rotate_v8i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7]
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
@@ -1077,19 +1077,19 @@ define <8 x i16> @constant_rotate_v8i16(<8 x i16> %a) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: constant_rotate_v8i16:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsllvw {{.*}}(%rip), %xmm0, %xmm1
; AVX512VL-NEXT: vpsrlvw {{.*}}(%rip), %xmm0, %xmm0
; AVX512VL-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX512VL-NEXT: retq
;
; XOP-LABEL: constant_rotate_v8i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vprotw {{.*}}(%rip), %xmm0, %xmm0
; XOP-NEXT: retq
;
; X32-SSE-LABEL: constant_rotate_v8i16:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128]
; X32-SSE-NEXT: pmullw %xmm0, %xmm2
; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [0,65535,65535,65535,65535,65535,65535,65535]
@@ -1127,7 +1127,7 @@ define <8 x i16> @constant_rotate_v8i16(<8 x i16> %a) nounwind {
define <16 x i8> @constant_rotate_v16i8(<16 x i8> %a) nounwind {
; SSE2-LABEL: constant_rotate_v16i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [8192,24640,41088,57536,57600,41152,24704,8256]
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: pxor %xmm1, %xmm1
@@ -1185,7 +1185,7 @@ define <16 x i8> @constant_rotate_v16i8(<16 x i8> %a) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: constant_rotate_v16i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm3
; SSE41-NEXT: psllw $4, %xmm3
@@ -1222,7 +1222,7 @@ define <16 x i8> @constant_rotate_v16i8(<16 x i8> %a) nounwind {
; SSE41-NEXT: retq
;
; AVX-LABEL: constant_rotate_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsllw $4, %xmm0, %xmm1
; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [8192,24640,41088,57536,57600,41152,24704,8256]
@@ -1250,7 +1250,7 @@ define <16 x i8> @constant_rotate_v16i8(<16 x i8> %a) nounwind {
; AVX-NEXT: retq
;
; AVX512-LABEL: constant_rotate_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512-NEXT: vpsllvd {{.*}}(%rip), %zmm0, %zmm1
; AVX512-NEXT: vpmovdb %zmm1, %xmm1
@@ -1261,12 +1261,12 @@ define <16 x i8> @constant_rotate_v16i8(<16 x i8> %a) nounwind {
; AVX512-NEXT: retq
;
; XOP-LABEL: constant_rotate_v16i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vprotb {{.*}}(%rip), %xmm0, %xmm0
; XOP-NEXT: retq
;
; X32-SSE-LABEL: constant_rotate_v16i8:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa {{.*#+}} xmm3 = [8192,24640,41088,57536,57600,41152,24704,8256]
; X32-SSE-NEXT: pxor %xmm2, %xmm2
; X32-SSE-NEXT: pxor %xmm1, %xmm1
@@ -1334,7 +1334,7 @@ define <16 x i8> @constant_rotate_v16i8(<16 x i8> %a) nounwind {
define <2 x i64> @splatconstant_rotate_v2i64(<2 x i64> %a) nounwind {
; SSE-LABEL: splatconstant_rotate_v2i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psllq $14, %xmm1
; SSE-NEXT: psrlq $50, %xmm0
@@ -1342,14 +1342,14 @@ define <2 x i64> @splatconstant_rotate_v2i64(<2 x i64> %a) nounwind {
; SSE-NEXT: retq
;
; AVX-LABEL: splatconstant_rotate_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsllq $14, %xmm0, %xmm1
; AVX-NEXT: vpsrlq $50, %xmm0, %xmm0
; AVX-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
; AVX512BW-LABEL: splatconstant_rotate_v2i64:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512BW-NEXT: vprolq $14, %zmm0, %zmm0
; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -1357,17 +1357,17 @@ define <2 x i64> @splatconstant_rotate_v2i64(<2 x i64> %a) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_rotate_v2i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vprolq $14, %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; XOP-LABEL: splatconstant_rotate_v2i64:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vprotq $14, %xmm0, %xmm0
; XOP-NEXT: retq
;
; X32-SSE-LABEL: splatconstant_rotate_v2i64:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
; X32-SSE-NEXT: psllq $14, %xmm1
; X32-SSE-NEXT: psrlq $50, %xmm0
@@ -1381,7 +1381,7 @@ define <2 x i64> @splatconstant_rotate_v2i64(<2 x i64> %a) nounwind {
define <4 x i32> @splatconstant_rotate_v4i32(<4 x i32> %a) nounwind {
; SSE-LABEL: splatconstant_rotate_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: pslld $4, %xmm1
; SSE-NEXT: psrld $28, %xmm0
@@ -1389,14 +1389,14 @@ define <4 x i32> @splatconstant_rotate_v4i32(<4 x i32> %a) nounwind {
; SSE-NEXT: retq
;
; AVX-LABEL: splatconstant_rotate_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpslld $4, %xmm0, %xmm1
; AVX-NEXT: vpsrld $28, %xmm0, %xmm0
; AVX-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
; AVX512BW-LABEL: splatconstant_rotate_v4i32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512BW-NEXT: vprold $4, %zmm0, %zmm0
; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -1404,17 +1404,17 @@ define <4 x i32> @splatconstant_rotate_v4i32(<4 x i32> %a) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_rotate_v4i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vprold $4, %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; XOP-LABEL: splatconstant_rotate_v4i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vprotd $4, %xmm0, %xmm0
; XOP-NEXT: retq
;
; X32-SSE-LABEL: splatconstant_rotate_v4i32:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
; X32-SSE-NEXT: pslld $4, %xmm1
; X32-SSE-NEXT: psrld $28, %xmm0
@@ -1428,7 +1428,7 @@ define <4 x i32> @splatconstant_rotate_v4i32(<4 x i32> %a) nounwind {
define <8 x i16> @splatconstant_rotate_v8i16(<8 x i16> %a) nounwind {
; SSE-LABEL: splatconstant_rotate_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psllw $7, %xmm1
; SSE-NEXT: psrlw $9, %xmm0
@@ -1436,26 +1436,26 @@ define <8 x i16> @splatconstant_rotate_v8i16(<8 x i16> %a) nounwind {
; SSE-NEXT: retq
;
; AVX-LABEL: splatconstant_rotate_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsllw $7, %xmm0, %xmm1
; AVX-NEXT: vpsrlw $9, %xmm0, %xmm0
; AVX-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: splatconstant_rotate_v8i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsllw $7, %xmm0, %xmm1
; AVX512-NEXT: vpsrlw $9, %xmm0, %xmm0
; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX512-NEXT: retq
;
; XOP-LABEL: splatconstant_rotate_v8i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vprotw $7, %xmm0, %xmm0
; XOP-NEXT: retq
;
; X32-SSE-LABEL: splatconstant_rotate_v8i16:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
; X32-SSE-NEXT: psllw $7, %xmm1
; X32-SSE-NEXT: psrlw $9, %xmm0
@@ -1469,7 +1469,7 @@ define <8 x i16> @splatconstant_rotate_v8i16(<8 x i16> %a) nounwind {
define <16 x i8> @splatconstant_rotate_v16i8(<16 x i8> %a) nounwind {
; SSE-LABEL: splatconstant_rotate_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psllw $4, %xmm1
; SSE-NEXT: pand {{.*}}(%rip), %xmm1
@@ -1479,7 +1479,7 @@ define <16 x i8> @splatconstant_rotate_v16i8(<16 x i8> %a) nounwind {
; SSE-NEXT: retq
;
; AVX-LABEL: splatconstant_rotate_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsllw $4, %xmm0, %xmm1
; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
; AVX-NEXT: vpsrlw $4, %xmm0, %xmm0
@@ -1488,7 +1488,7 @@ define <16 x i8> @splatconstant_rotate_v16i8(<16 x i8> %a) nounwind {
; AVX-NEXT: retq
;
; AVX512-LABEL: splatconstant_rotate_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsllw $4, %xmm0, %xmm1
; AVX512-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
; AVX512-NEXT: vpsrlw $4, %xmm0, %xmm0
@@ -1497,12 +1497,12 @@ define <16 x i8> @splatconstant_rotate_v16i8(<16 x i8> %a) nounwind {
; AVX512-NEXT: retq
;
; XOP-LABEL: splatconstant_rotate_v16i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vprotb $4, %xmm0, %xmm0
; XOP-NEXT: retq
;
; X32-SSE-LABEL: splatconstant_rotate_v16i8:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
; X32-SSE-NEXT: psllw $4, %xmm1
; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1
@@ -1522,19 +1522,19 @@ define <16 x i8> @splatconstant_rotate_v16i8(<16 x i8> %a) nounwind {
define <2 x i64> @splatconstant_rotate_mask_v2i64(<2 x i64> %a) nounwind {
; SSE-LABEL: splatconstant_rotate_mask_v2i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrlq $49, %xmm0
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: splatconstant_rotate_mask_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrlq $49, %xmm0, %xmm0
; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512BW-LABEL: splatconstant_rotate_mask_v2i64:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512BW-NEXT: vprolq $15, %zmm0, %zmm0
; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
@@ -1542,19 +1542,19 @@ define <2 x i64> @splatconstant_rotate_mask_v2i64(<2 x i64> %a) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_rotate_mask_v2i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vprolq $15, %xmm0, %xmm0
; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; XOP-LABEL: splatconstant_rotate_mask_v2i64:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vprotq $15, %xmm0, %xmm0
; XOP-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; XOP-NEXT: retq
;
; X32-SSE-LABEL: splatconstant_rotate_mask_v2i64:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: psrlq $49, %xmm0
; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
; X32-SSE-NEXT: retl
@@ -1568,7 +1568,7 @@ define <2 x i64> @splatconstant_rotate_mask_v2i64(<2 x i64> %a) nounwind {
define <4 x i32> @splatconstant_rotate_mask_v4i32(<4 x i32> %a) nounwind {
; SSE-LABEL: splatconstant_rotate_mask_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: pslld $4, %xmm1
; SSE-NEXT: psrld $28, %xmm0
@@ -1578,7 +1578,7 @@ define <4 x i32> @splatconstant_rotate_mask_v4i32(<4 x i32> %a) nounwind {
; SSE-NEXT: retq
;
; AVX-LABEL: splatconstant_rotate_mask_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpslld $4, %xmm0, %xmm1
; AVX-NEXT: vpsrld $28, %xmm0, %xmm0
; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
@@ -1586,7 +1586,7 @@ define <4 x i32> @splatconstant_rotate_mask_v4i32(<4 x i32> %a) nounwind {
; AVX-NEXT: retq
;
; AVX512BW-LABEL: splatconstant_rotate_mask_v4i32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512BW-NEXT: vprold $4, %zmm0, %zmm0
; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
@@ -1594,19 +1594,19 @@ define <4 x i32> @splatconstant_rotate_mask_v4i32(<4 x i32> %a) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_rotate_mask_v4i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vprold $4, %xmm0, %xmm0
; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; XOP-LABEL: splatconstant_rotate_mask_v4i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vprotd $4, %xmm0, %xmm0
; XOP-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; XOP-NEXT: retq
;
; X32-SSE-LABEL: splatconstant_rotate_mask_v4i32:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
; X32-SSE-NEXT: pslld $4, %xmm1
; X32-SSE-NEXT: psrld $28, %xmm0
@@ -1624,7 +1624,7 @@ define <4 x i32> @splatconstant_rotate_mask_v4i32(<4 x i32> %a) nounwind {
define <8 x i16> @splatconstant_rotate_mask_v8i16(<8 x i16> %a) nounwind {
; SSE-LABEL: splatconstant_rotate_mask_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psllw $5, %xmm1
; SSE-NEXT: psrlw $11, %xmm0
@@ -1635,7 +1635,7 @@ define <8 x i16> @splatconstant_rotate_mask_v8i16(<8 x i16> %a) nounwind {
; SSE-NEXT: retq
;
; AVX-LABEL: splatconstant_rotate_mask_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsllw $5, %xmm0, %xmm1
; AVX-NEXT: vpsrlw $11, %xmm0, %xmm0
; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
@@ -1644,7 +1644,7 @@ define <8 x i16> @splatconstant_rotate_mask_v8i16(<8 x i16> %a) nounwind {
; AVX-NEXT: retq
;
; AVX512-LABEL: splatconstant_rotate_mask_v8i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsllw $5, %xmm0, %xmm1
; AVX512-NEXT: vpsrlw $11, %xmm0, %xmm0
; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
@@ -1653,13 +1653,13 @@ define <8 x i16> @splatconstant_rotate_mask_v8i16(<8 x i16> %a) nounwind {
; AVX512-NEXT: retq
;
; XOP-LABEL: splatconstant_rotate_mask_v8i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vprotw $5, %xmm0, %xmm0
; XOP-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; XOP-NEXT: retq
;
; X32-SSE-LABEL: splatconstant_rotate_mask_v8i16:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
; X32-SSE-NEXT: psllw $5, %xmm1
; X32-SSE-NEXT: psrlw $11, %xmm0
@@ -1678,7 +1678,7 @@ define <8 x i16> @splatconstant_rotate_mask_v8i16(<8 x i16> %a) nounwind {
define <16 x i8> @splatconstant_rotate_mask_v16i8(<16 x i8> %a) nounwind {
; SSE-LABEL: splatconstant_rotate_mask_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psllw $4, %xmm1
; SSE-NEXT: pand {{.*}}(%rip), %xmm1
@@ -1691,7 +1691,7 @@ define <16 x i8> @splatconstant_rotate_mask_v16i8(<16 x i8> %a) nounwind {
; SSE-NEXT: retq
;
; AVX-LABEL: splatconstant_rotate_mask_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsllw $4, %xmm0, %xmm1
; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
; AVX-NEXT: vpsrlw $4, %xmm0, %xmm0
@@ -1702,7 +1702,7 @@ define <16 x i8> @splatconstant_rotate_mask_v16i8(<16 x i8> %a) nounwind {
; AVX-NEXT: retq
;
; AVX512-LABEL: splatconstant_rotate_mask_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsllw $4, %xmm0, %xmm1
; AVX512-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
; AVX512-NEXT: vpsrlw $4, %xmm0, %xmm0
@@ -1713,13 +1713,13 @@ define <16 x i8> @splatconstant_rotate_mask_v16i8(<16 x i8> %a) nounwind {
; AVX512-NEXT: retq
;
; XOP-LABEL: splatconstant_rotate_mask_v16i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vprotb $4, %xmm0, %xmm0
; XOP-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; XOP-NEXT: retq
;
; X32-SSE-LABEL: splatconstant_rotate_mask_v16i8:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
; X32-SSE-NEXT: psllw $4, %xmm1
; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1
diff --git a/test/CodeGen/X86/vector-rotate-256.ll b/test/CodeGen/X86/vector-rotate-256.ll
index 1af190f1665..954d0b0f31c 100644
--- a/test/CodeGen/X86/vector-rotate-256.ll
+++ b/test/CodeGen/X86/vector-rotate-256.ll
@@ -12,7 +12,7 @@
define <4 x i64> @var_rotate_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX1-LABEL: var_rotate_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [64,64]
; AVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
@@ -40,7 +40,7 @@ define <4 x i64> @var_rotate_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_rotate_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [64,64,64,64]
; AVX2-NEXT: vpsubq %ymm1, %ymm2, %ymm2
; AVX2-NEXT: vpsllvq %ymm1, %ymm0, %ymm1
@@ -49,7 +49,7 @@ define <4 x i64> @var_rotate_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: var_rotate_v4i64:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vprolvq %zmm1, %zmm0, %zmm0
@@ -57,12 +57,12 @@ define <4 x i64> @var_rotate_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: var_rotate_v4i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vprolvq %ymm1, %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; XOPAVX1-LABEL: var_rotate_v4i64:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; XOPAVX1-NEXT: vprotq %xmm2, %xmm3, %xmm2
@@ -71,7 +71,7 @@ define <4 x i64> @var_rotate_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: var_rotate_v4i64:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm3
; XOPAVX2-NEXT: vprotq %xmm2, %xmm3, %xmm2
@@ -87,7 +87,7 @@ define <4 x i64> @var_rotate_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
define <8 x i32> @var_rotate_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; AVX1-LABEL: var_rotate_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [32,32,32,32]
; AVX1-NEXT: vpsubd %xmm1, %xmm3, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
@@ -131,7 +131,7 @@ define <8 x i32> @var_rotate_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_rotate_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm2 = [32,32,32,32,32,32,32,32]
; AVX2-NEXT: vpsubd %ymm1, %ymm2, %ymm2
; AVX2-NEXT: vpsllvd %ymm1, %ymm0, %ymm1
@@ -140,7 +140,7 @@ define <8 x i32> @var_rotate_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: var_rotate_v8i32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vprolvd %zmm1, %zmm0, %zmm0
@@ -148,12 +148,12 @@ define <8 x i32> @var_rotate_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: var_rotate_v8i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vprolvd %ymm1, %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; XOPAVX1-LABEL: var_rotate_v8i32:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; XOPAVX1-NEXT: vprotd %xmm2, %xmm3, %xmm2
@@ -162,7 +162,7 @@ define <8 x i32> @var_rotate_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: var_rotate_v8i32:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm3
; XOPAVX2-NEXT: vprotd %xmm2, %xmm3, %xmm2
@@ -178,7 +178,7 @@ define <8 x i32> @var_rotate_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
define <16 x i16> @var_rotate_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; AVX1-LABEL: var_rotate_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
; AVX1-NEXT: vpsubw %xmm1, %xmm3, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
@@ -246,7 +246,7 @@ define <16 x i16> @var_rotate_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_rotate_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX2-NEXT: vpsubw %ymm1, %ymm2, %ymm2
; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
@@ -270,7 +270,7 @@ define <16 x i16> @var_rotate_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: var_rotate_v16i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
@@ -281,7 +281,7 @@ define <16 x i16> @var_rotate_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: var_rotate_v16i16:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX512VL-NEXT: vpsubw %ymm1, %ymm2, %ymm2
; AVX512VL-NEXT: vpsllvw %ymm1, %ymm0, %ymm1
@@ -290,7 +290,7 @@ define <16 x i16> @var_rotate_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; AVX512VL-NEXT: retq
;
; XOPAVX1-LABEL: var_rotate_v16i16:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; XOPAVX1-NEXT: vprotw %xmm2, %xmm3, %xmm2
@@ -299,7 +299,7 @@ define <16 x i16> @var_rotate_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: var_rotate_v16i16:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm3
; XOPAVX2-NEXT: vprotw %xmm2, %xmm3, %xmm2
@@ -315,7 +315,7 @@ define <16 x i16> @var_rotate_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
define <32 x i8> @var_rotate_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX1-LABEL: var_rotate_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; AVX1-NEXT: vpsubb %xmm1, %xmm3, %xmm8
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
@@ -378,7 +378,7 @@ define <32 x i8> @var_rotate_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_rotate_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; AVX2-NEXT: vpsubb %ymm1, %ymm2, %ymm2
; AVX2-NEXT: vpsllw $5, %ymm1, %ymm1
@@ -408,7 +408,7 @@ define <32 x i8> @var_rotate_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: var_rotate_v32i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovdqa {{.*#+}} ymm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; AVX512-NEXT: vpsubb %ymm1, %ymm2, %ymm2
; AVX512-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
@@ -422,7 +422,7 @@ define <32 x i8> @var_rotate_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX512-NEXT: retq
;
; XOPAVX1-LABEL: var_rotate_v32i8:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; XOPAVX1-NEXT: vprotb %xmm2, %xmm3, %xmm2
@@ -431,7 +431,7 @@ define <32 x i8> @var_rotate_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: var_rotate_v32i8:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm3
; XOPAVX2-NEXT: vprotb %xmm2, %xmm3, %xmm2
@@ -451,7 +451,7 @@ define <32 x i8> @var_rotate_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
define <4 x i64> @constant_rotate_v4i64(<4 x i64> %a) nounwind {
; AVX1-LABEL: constant_rotate_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpsllq $60, %xmm1, %xmm2
; AVX1-NEXT: vpsllq $50, %xmm1, %xmm3
@@ -471,14 +471,14 @@ define <4 x i64> @constant_rotate_v4i64(<4 x i64> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: constant_rotate_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsllvq {{.*}}(%rip), %ymm0, %ymm1
; AVX2-NEXT: vpsrlvq {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: constant_rotate_v4i64:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [4,14,50,60]
; AVX512BW-NEXT: vprolvq %zmm1, %zmm0, %zmm0
@@ -486,12 +486,12 @@ define <4 x i64> @constant_rotate_v4i64(<4 x i64> %a) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: constant_rotate_v4i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vprolvq {{.*}}(%rip), %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; XOPAVX1-LABEL: constant_rotate_v4i64:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vprotq {{.*}}(%rip), %xmm0, %xmm1
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; XOPAVX1-NEXT: vprotq {{.*}}(%rip), %xmm0, %xmm0
@@ -499,7 +499,7 @@ define <4 x i64> @constant_rotate_v4i64(<4 x i64> %a) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: constant_rotate_v4i64:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vprotq {{.*}}(%rip), %xmm0, %xmm1
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; XOPAVX2-NEXT: vprotq {{.*}}(%rip), %xmm0, %xmm0
@@ -513,7 +513,7 @@ define <4 x i64> @constant_rotate_v4i64(<4 x i64> %a) nounwind {
define <8 x i32> @constant_rotate_v8i32(<8 x i32> %a) nounwind {
; AVX1-LABEL: constant_rotate_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm2, %xmm3
@@ -537,14 +537,14 @@ define <8 x i32> @constant_rotate_v8i32(<8 x i32> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: constant_rotate_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsllvd {{.*}}(%rip), %ymm0, %ymm1
; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: constant_rotate_v8i32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [4,5,6,7,8,9,10,11]
; AVX512BW-NEXT: vprolvd %zmm1, %zmm0, %zmm0
@@ -552,12 +552,12 @@ define <8 x i32> @constant_rotate_v8i32(<8 x i32> %a) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: constant_rotate_v8i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vprolvd {{.*}}(%rip), %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; XOPAVX1-LABEL: constant_rotate_v8i32:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vprotd {{.*}}(%rip), %xmm0, %xmm1
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; XOPAVX1-NEXT: vprotd {{.*}}(%rip), %xmm0, %xmm0
@@ -565,7 +565,7 @@ define <8 x i32> @constant_rotate_v8i32(<8 x i32> %a) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: constant_rotate_v8i32:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vprotd {{.*}}(%rip), %xmm0, %xmm1
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; XOPAVX2-NEXT: vprotd {{.*}}(%rip), %xmm0, %xmm0
@@ -579,7 +579,7 @@ define <8 x i32> @constant_rotate_v8i32(<8 x i32> %a) nounwind {
define <16 x i16> @constant_rotate_v16i16(<16 x i16> %a) nounwind {
; AVX1-LABEL: constant_rotate_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm2, %xmm3
@@ -605,7 +605,7 @@ define <16 x i16> @constant_rotate_v16i16(<16 x i16> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: constant_rotate_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm1
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1]
@@ -622,7 +622,7 @@ define <16 x i16> @constant_rotate_v16i16(<16 x i16> %a) nounwind {
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: constant_rotate_v16i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
@@ -632,14 +632,14 @@ define <16 x i16> @constant_rotate_v16i16(<16 x i16> %a) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: constant_rotate_v16i16:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsllvw {{.*}}(%rip), %ymm0, %ymm1
; AVX512VL-NEXT: vpsrlvw {{.*}}(%rip), %ymm0, %ymm0
; AVX512VL-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
;
; XOPAVX1-LABEL: constant_rotate_v16i16:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vprotw {{.*}}(%rip), %xmm0, %xmm1
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; XOPAVX1-NEXT: vprotw {{.*}}(%rip), %xmm0, %xmm0
@@ -647,7 +647,7 @@ define <16 x i16> @constant_rotate_v16i16(<16 x i16> %a) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: constant_rotate_v16i16:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vprotw {{.*}}(%rip), %xmm0, %xmm1
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; XOPAVX2-NEXT: vprotw {{.*}}(%rip), %xmm0, %xmm0
@@ -661,7 +661,7 @@ define <16 x i16> @constant_rotate_v16i16(<16 x i16> %a) nounwind {
define <32 x i8> @constant_rotate_v32i8(<32 x i8> %a) nounwind {
; AVX1-LABEL: constant_rotate_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpsllw $4, %xmm1, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
@@ -714,7 +714,7 @@ define <32 x i8> @constant_rotate_v32i8(<32 x i8> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: constant_rotate_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsllw $4, %ymm0, %ymm1
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256]
@@ -742,7 +742,7 @@ define <32 x i8> @constant_rotate_v32i8(<32 x i8> %a) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: constant_rotate_v32i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
; AVX512-NEXT: vpsllvw {{.*}}(%rip), %zmm0, %zmm1
; AVX512-NEXT: vpmovwb %zmm1, %ymm1
@@ -752,7 +752,7 @@ define <32 x i8> @constant_rotate_v32i8(<32 x i8> %a) nounwind {
; AVX512-NEXT: retq
;
; XOPAVX1-LABEL: constant_rotate_v32i8:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7,8,7,6,5,4,3,2,1]
; XOPAVX1-NEXT: vprotb %xmm2, %xmm1, %xmm1
@@ -761,7 +761,7 @@ define <32 x i8> @constant_rotate_v32i8(<32 x i8> %a) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: constant_rotate_v32i8:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7,8,7,6,5,4,3,2,1]
; XOPAVX2-NEXT: vprotb %xmm2, %xmm1, %xmm1
@@ -780,7 +780,7 @@ define <32 x i8> @constant_rotate_v32i8(<32 x i8> %a) nounwind {
define <4 x i64> @splatconstant_rotate_v4i64(<4 x i64> %a) nounwind {
; AVX1-LABEL: splatconstant_rotate_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsllq $14, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpsllq $14, %xmm2, %xmm3
@@ -792,26 +792,26 @@ define <4 x i64> @splatconstant_rotate_v4i64(<4 x i64> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatconstant_rotate_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsllq $14, %ymm0, %ymm1
; AVX2-NEXT: vpsrlq $50, %ymm0, %ymm0
; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: splatconstant_rotate_v4i64:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vprolq $14, %zmm0, %zmm0
; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_rotate_v4i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vprolq $14, %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; XOPAVX1-LABEL: splatconstant_rotate_v4i64:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vprotq $14, %xmm0, %xmm1
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; XOPAVX1-NEXT: vprotq $14, %xmm0, %xmm0
@@ -819,7 +819,7 @@ define <4 x i64> @splatconstant_rotate_v4i64(<4 x i64> %a) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: splatconstant_rotate_v4i64:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vprotq $14, %xmm0, %xmm1
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; XOPAVX2-NEXT: vprotq $14, %xmm0, %xmm0
@@ -833,7 +833,7 @@ define <4 x i64> @splatconstant_rotate_v4i64(<4 x i64> %a) nounwind {
define <8 x i32> @splatconstant_rotate_v8i32(<8 x i32> %a) nounwind {
; AVX1-LABEL: splatconstant_rotate_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpslld $4, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpslld $4, %xmm2, %xmm3
@@ -845,26 +845,26 @@ define <8 x i32> @splatconstant_rotate_v8i32(<8 x i32> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatconstant_rotate_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpslld $4, %ymm0, %ymm1
; AVX2-NEXT: vpsrld $28, %ymm0, %ymm0
; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: splatconstant_rotate_v8i32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vprold $4, %zmm0, %zmm0
; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_rotate_v8i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vprold $4, %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; XOPAVX1-LABEL: splatconstant_rotate_v8i32:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vprotd $4, %xmm0, %xmm1
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; XOPAVX1-NEXT: vprotd $4, %xmm0, %xmm0
@@ -872,7 +872,7 @@ define <8 x i32> @splatconstant_rotate_v8i32(<8 x i32> %a) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: splatconstant_rotate_v8i32:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vprotd $4, %xmm0, %xmm1
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; XOPAVX2-NEXT: vprotd $4, %xmm0, %xmm0
@@ -886,7 +886,7 @@ define <8 x i32> @splatconstant_rotate_v8i32(<8 x i32> %a) nounwind {
define <16 x i16> @splatconstant_rotate_v16i16(<16 x i16> %a) nounwind {
; AVX1-LABEL: splatconstant_rotate_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsllw $7, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpsllw $7, %xmm2, %xmm3
@@ -898,21 +898,21 @@ define <16 x i16> @splatconstant_rotate_v16i16(<16 x i16> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatconstant_rotate_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsllw $7, %ymm0, %ymm1
; AVX2-NEXT: vpsrlw $9, %ymm0, %ymm0
; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: splatconstant_rotate_v16i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsllw $7, %ymm0, %ymm1
; AVX512-NEXT: vpsrlw $9, %ymm0, %ymm0
; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512-NEXT: retq
;
; XOPAVX1-LABEL: splatconstant_rotate_v16i16:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vprotw $7, %xmm0, %xmm1
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; XOPAVX1-NEXT: vprotw $7, %xmm0, %xmm0
@@ -920,7 +920,7 @@ define <16 x i16> @splatconstant_rotate_v16i16(<16 x i16> %a) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: splatconstant_rotate_v16i16:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vprotw $7, %xmm0, %xmm1
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; XOPAVX2-NEXT: vprotw $7, %xmm0, %xmm0
@@ -934,7 +934,7 @@ define <16 x i16> @splatconstant_rotate_v16i16(<16 x i16> %a) nounwind {
define <32 x i8> @splatconstant_rotate_v32i8(<32 x i8> %a) nounwind {
; AVX1-LABEL: splatconstant_rotate_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpsllw $4, %xmm1, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
@@ -952,7 +952,7 @@ define <32 x i8> @splatconstant_rotate_v32i8(<32 x i8> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatconstant_rotate_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsllw $4, %ymm0, %ymm1
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm0
@@ -961,7 +961,7 @@ define <32 x i8> @splatconstant_rotate_v32i8(<32 x i8> %a) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: splatconstant_rotate_v32i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsllw $4, %ymm0, %ymm1
; AVX512-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
; AVX512-NEXT: vpsrlw $4, %ymm0, %ymm0
@@ -970,7 +970,7 @@ define <32 x i8> @splatconstant_rotate_v32i8(<32 x i8> %a) nounwind {
; AVX512-NEXT: retq
;
; XOPAVX1-LABEL: splatconstant_rotate_v32i8:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vprotb $4, %xmm0, %xmm1
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; XOPAVX1-NEXT: vprotb $4, %xmm0, %xmm0
@@ -978,7 +978,7 @@ define <32 x i8> @splatconstant_rotate_v32i8(<32 x i8> %a) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: splatconstant_rotate_v32i8:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vprotb $4, %xmm0, %xmm1
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; XOPAVX2-NEXT: vprotb $4, %xmm0, %xmm0
@@ -996,7 +996,7 @@ define <32 x i8> @splatconstant_rotate_v32i8(<32 x i8> %a) nounwind {
define <4 x i64> @splatconstant_rotate_mask_v4i64(<4 x i64> %a) nounwind {
; AVX1-LABEL: splatconstant_rotate_mask_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsrlq $49, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsrlq $49, %xmm0, %xmm0
@@ -1005,26 +1005,26 @@ define <4 x i64> @splatconstant_rotate_mask_v4i64(<4 x i64> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatconstant_rotate_mask_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsrlq $49, %ymm0, %ymm0
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: splatconstant_rotate_mask_v4i64:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vprolq $15, %zmm0, %zmm0
; AVX512BW-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_rotate_mask_v4i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vprolq $15, %ymm0, %ymm0
; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; XOPAVX1-LABEL: splatconstant_rotate_mask_v4i64:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vprotq $15, %xmm0, %xmm1
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; XOPAVX1-NEXT: vprotq $15, %xmm0, %xmm0
@@ -1033,7 +1033,7 @@ define <4 x i64> @splatconstant_rotate_mask_v4i64(<4 x i64> %a) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: splatconstant_rotate_mask_v4i64:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vprotq $15, %xmm0, %xmm1
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; XOPAVX2-NEXT: vprotq $15, %xmm0, %xmm0
@@ -1050,7 +1050,7 @@ define <4 x i64> @splatconstant_rotate_mask_v4i64(<4 x i64> %a) nounwind {
define <8 x i32> @splatconstant_rotate_mask_v8i32(<8 x i32> %a) nounwind {
; AVX1-LABEL: splatconstant_rotate_mask_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpslld $4, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpslld $4, %xmm2, %xmm3
@@ -1064,7 +1064,7 @@ define <8 x i32> @splatconstant_rotate_mask_v8i32(<8 x i32> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatconstant_rotate_mask_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpslld $4, %ymm0, %ymm1
; AVX2-NEXT: vpsrld $28, %ymm0, %ymm0
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
@@ -1073,20 +1073,20 @@ define <8 x i32> @splatconstant_rotate_mask_v8i32(<8 x i32> %a) nounwind {
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: splatconstant_rotate_mask_v8i32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vprold $4, %zmm0, %zmm0
; AVX512BW-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_rotate_mask_v8i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vprold $4, %ymm0, %ymm0
; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; XOPAVX1-LABEL: splatconstant_rotate_mask_v8i32:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vprotd $4, %xmm0, %xmm1
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; XOPAVX1-NEXT: vprotd $4, %xmm0, %xmm0
@@ -1095,7 +1095,7 @@ define <8 x i32> @splatconstant_rotate_mask_v8i32(<8 x i32> %a) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: splatconstant_rotate_mask_v8i32:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vprotd $4, %xmm0, %xmm1
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; XOPAVX2-NEXT: vprotd $4, %xmm0, %xmm0
@@ -1112,7 +1112,7 @@ define <8 x i32> @splatconstant_rotate_mask_v8i32(<8 x i32> %a) nounwind {
define <16 x i16> @splatconstant_rotate_mask_v16i16(<16 x i16> %a) nounwind {
; AVX1-LABEL: splatconstant_rotate_mask_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsllw $5, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpsllw $5, %xmm2, %xmm3
@@ -1126,7 +1126,7 @@ define <16 x i16> @splatconstant_rotate_mask_v16i16(<16 x i16> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatconstant_rotate_mask_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsllw $5, %ymm0, %ymm1
; AVX2-NEXT: vpsrlw $11, %ymm0, %ymm0
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
@@ -1135,7 +1135,7 @@ define <16 x i16> @splatconstant_rotate_mask_v16i16(<16 x i16> %a) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: splatconstant_rotate_mask_v16i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsllw $5, %ymm0, %ymm1
; AVX512-NEXT: vpsrlw $11, %ymm0, %ymm0
; AVX512-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
@@ -1144,7 +1144,7 @@ define <16 x i16> @splatconstant_rotate_mask_v16i16(<16 x i16> %a) nounwind {
; AVX512-NEXT: retq
;
; XOPAVX1-LABEL: splatconstant_rotate_mask_v16i16:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vprotw $5, %xmm0, %xmm1
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; XOPAVX1-NEXT: vprotw $5, %xmm0, %xmm0
@@ -1153,7 +1153,7 @@ define <16 x i16> @splatconstant_rotate_mask_v16i16(<16 x i16> %a) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: splatconstant_rotate_mask_v16i16:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vprotw $5, %xmm0, %xmm1
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; XOPAVX2-NEXT: vprotw $5, %xmm0, %xmm0
@@ -1170,7 +1170,7 @@ define <16 x i16> @splatconstant_rotate_mask_v16i16(<16 x i16> %a) nounwind {
define <32 x i8> @splatconstant_rotate_mask_v32i8(<32 x i8> %a) nounwind {
; AVX1-LABEL: splatconstant_rotate_mask_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpsllw $4, %xmm1, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
@@ -1190,7 +1190,7 @@ define <32 x i8> @splatconstant_rotate_mask_v32i8(<32 x i8> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatconstant_rotate_mask_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsllw $4, %ymm0, %ymm1
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm0
@@ -1201,7 +1201,7 @@ define <32 x i8> @splatconstant_rotate_mask_v32i8(<32 x i8> %a) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: splatconstant_rotate_mask_v32i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsllw $4, %ymm0, %ymm1
; AVX512-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
; AVX512-NEXT: vpsrlw $4, %ymm0, %ymm0
@@ -1212,7 +1212,7 @@ define <32 x i8> @splatconstant_rotate_mask_v32i8(<32 x i8> %a) nounwind {
; AVX512-NEXT: retq
;
; XOPAVX1-LABEL: splatconstant_rotate_mask_v32i8:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vprotb $4, %xmm0, %xmm1
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; XOPAVX1-NEXT: vprotb $4, %xmm0, %xmm0
@@ -1221,7 +1221,7 @@ define <32 x i8> @splatconstant_rotate_mask_v32i8(<32 x i8> %a) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: splatconstant_rotate_mask_v32i8:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vprotb $4, %xmm0, %xmm1
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; XOPAVX2-NEXT: vprotb $4, %xmm0, %xmm0
diff --git a/test/CodeGen/X86/vector-rotate-512.ll b/test/CodeGen/X86/vector-rotate-512.ll
index bf02f94b161..8941be35c05 100644
--- a/test/CodeGen/X86/vector-rotate-512.ll
+++ b/test/CodeGen/X86/vector-rotate-512.ll
@@ -10,7 +10,7 @@
define <8 x i64> @var_rotate_v8i64(<8 x i64> %a, <8 x i64> %b) nounwind {
; AVX512-LABEL: var_rotate_v8i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vprolvq %zmm1, %zmm0, %zmm0
; AVX512-NEXT: retq
%b64 = sub <8 x i64> <i64 64, i64 64, i64 64, i64 64, i64 64, i64 64, i64 64, i64 64>, %b
@@ -22,7 +22,7 @@ define <8 x i64> @var_rotate_v8i64(<8 x i64> %a, <8 x i64> %b) nounwind {
define <16 x i32> @var_rotate_v16i32(<16 x i32> %a, <16 x i32> %b) nounwind {
; AVX512-LABEL: var_rotate_v16i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vprolvd %zmm1, %zmm0, %zmm0
; AVX512-NEXT: retq
%b32 = sub <16 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>, %b
@@ -34,7 +34,7 @@ define <16 x i32> @var_rotate_v16i32(<16 x i32> %a, <16 x i32> %b) nounwind {
define <32 x i16> @var_rotate_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
; AVX512F-LABEL: var_rotate_v32i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX512F-NEXT: vpsubw %ymm2, %ymm4, %ymm5
; AVX512F-NEXT: vpsubw %ymm3, %ymm4, %ymm4
@@ -57,7 +57,7 @@ define <32 x i16> @var_rotate_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: var_rotate_v32i16:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX512VL-NEXT: vpsubw %ymm2, %ymm4, %ymm5
; AVX512VL-NEXT: vpsubw %ymm3, %ymm4, %ymm4
@@ -80,7 +80,7 @@ define <32 x i16> @var_rotate_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: var_rotate_v32i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX512BW-NEXT: vpsubw %zmm1, %zmm2, %zmm2
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
@@ -89,7 +89,7 @@ define <32 x i16> @var_rotate_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512VLBW-LABEL: var_rotate_v32i16:
-; AVX512VLBW: # BB#0:
+; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX512VLBW-NEXT: vpsubw %zmm1, %zmm2, %zmm2
; AVX512VLBW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
@@ -105,7 +105,7 @@ define <32 x i16> @var_rotate_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
define <64 x i8> @var_rotate_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; AVX512F-LABEL: var_rotate_v64i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; AVX512F-NEXT: vpsubb %ymm2, %ymm5, %ymm4
; AVX512F-NEXT: vpsubb %ymm3, %ymm5, %ymm5
@@ -165,7 +165,7 @@ define <64 x i8> @var_rotate_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: var_rotate_v64i8:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm5 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; AVX512VL-NEXT: vpsubb %ymm2, %ymm5, %ymm4
; AVX512VL-NEXT: vpsubb %ymm3, %ymm5, %ymm5
@@ -225,7 +225,7 @@ define <64 x i8> @var_rotate_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: var_rotate_v64i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; AVX512BW-NEXT: vpsubb %zmm1, %zmm2, %zmm2
; AVX512BW-NEXT: vpsllw $4, %zmm0, %zmm3
@@ -260,7 +260,7 @@ define <64 x i8> @var_rotate_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512VLBW-LABEL: var_rotate_v64i8:
-; AVX512VLBW: # BB#0:
+; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; AVX512VLBW-NEXT: vpsubb %zmm1, %zmm2, %zmm2
; AVX512VLBW-NEXT: vpsllw $4, %zmm0, %zmm3
@@ -306,7 +306,7 @@ define <64 x i8> @var_rotate_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
define <8 x i64> @constant_rotate_v8i64(<8 x i64> %a) nounwind {
; AVX512-LABEL: constant_rotate_v8i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vprolvq {{.*}}(%rip), %zmm0, %zmm0
; AVX512-NEXT: retq
%shl = shl <8 x i64> %a, <i64 4, i64 14, i64 50, i64 60, i64 4, i64 14, i64 50, i64 60>
@@ -317,7 +317,7 @@ define <8 x i64> @constant_rotate_v8i64(<8 x i64> %a) nounwind {
define <16 x i32> @constant_rotate_v16i32(<16 x i32> %a) nounwind {
; AVX512-LABEL: constant_rotate_v16i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vprolvd {{.*}}(%rip), %zmm0, %zmm0
; AVX512-NEXT: retq
%shl = shl <16 x i32> %a, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
@@ -328,7 +328,7 @@ define <16 x i32> @constant_rotate_v16i32(<16 x i32> %a) nounwind {
define <32 x i16> @constant_rotate_v32i16(<32 x i16> %a) nounwind {
; AVX512F-LABEL: constant_rotate_v32i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
; AVX512F-NEXT: vpmullw %ymm2, %ymm1, %ymm3
; AVX512F-NEXT: vpmullw %ymm2, %ymm0, %ymm2
@@ -344,7 +344,7 @@ define <32 x i16> @constant_rotate_v32i16(<32 x i16> %a) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: constant_rotate_v32i16:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
; AVX512VL-NEXT: vpmullw %ymm2, %ymm1, %ymm3
; AVX512VL-NEXT: vpmullw %ymm2, %ymm0, %ymm2
@@ -360,14 +360,14 @@ define <32 x i16> @constant_rotate_v32i16(<32 x i16> %a) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: constant_rotate_v32i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsllvw {{.*}}(%rip), %zmm0, %zmm1
; AVX512BW-NEXT: vpsrlvw {{.*}}(%rip), %zmm0, %zmm0
; AVX512BW-NEXT: vporq %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512VLBW-LABEL: constant_rotate_v32i16:
-; AVX512VLBW: # BB#0:
+; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpsllvw {{.*}}(%rip), %zmm0, %zmm1
; AVX512VLBW-NEXT: vpsrlvw {{.*}}(%rip), %zmm0, %zmm0
; AVX512VLBW-NEXT: vporq %zmm0, %zmm1, %zmm0
@@ -380,7 +380,7 @@ define <32 x i16> @constant_rotate_v32i16(<32 x i16> %a) nounwind {
define <64 x i8> @constant_rotate_v64i8(<64 x i8> %a) nounwind {
; AVX512F-LABEL: constant_rotate_v64i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm2
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm2
@@ -431,7 +431,7 @@ define <64 x i8> @constant_rotate_v64i8(<64 x i8> %a) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: constant_rotate_v64i8:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm2
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; AVX512VL-NEXT: vpand %ymm3, %ymm2, %ymm2
@@ -482,7 +482,7 @@ define <64 x i8> @constant_rotate_v64i8(<64 x i8> %a) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: constant_rotate_v64i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256]
; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
; AVX512BW-NEXT: vpsllw $4, %zmm0, %zmm2
@@ -515,7 +515,7 @@ define <64 x i8> @constant_rotate_v64i8(<64 x i8> %a) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512VLBW-LABEL: constant_rotate_v64i8:
-; AVX512VLBW: # BB#0:
+; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256]
; AVX512VLBW-NEXT: vpmovb2m %zmm1, %k1
; AVX512VLBW-NEXT: vpsllw $4, %zmm0, %zmm2
@@ -558,7 +558,7 @@ define <64 x i8> @constant_rotate_v64i8(<64 x i8> %a) nounwind {
define <8 x i64> @splatconstant_rotate_v8i64(<8 x i64> %a) nounwind {
; AVX512-LABEL: splatconstant_rotate_v8i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vprolq $14, %zmm0, %zmm0
; AVX512-NEXT: retq
%shl = shl <8 x i64> %a, <i64 14, i64 14, i64 14, i64 14, i64 14, i64 14, i64 14, i64 14>
@@ -569,7 +569,7 @@ define <8 x i64> @splatconstant_rotate_v8i64(<8 x i64> %a) nounwind {
define <16 x i32> @splatconstant_rotate_v16i32(<16 x i32> %a) nounwind {
; AVX512-LABEL: splatconstant_rotate_v16i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vprold $4, %zmm0, %zmm0
; AVX512-NEXT: retq
%shl = shl <16 x i32> %a, <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
@@ -580,7 +580,7 @@ define <16 x i32> @splatconstant_rotate_v16i32(<16 x i32> %a) nounwind {
define <32 x i16> @splatconstant_rotate_v32i16(<32 x i16> %a) nounwind {
; AVX512F-LABEL: splatconstant_rotate_v32i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsllw $7, %ymm1, %ymm2
; AVX512F-NEXT: vpsllw $7, %ymm0, %ymm3
; AVX512F-NEXT: vpsrlw $9, %ymm1, %ymm1
@@ -590,7 +590,7 @@ define <32 x i16> @splatconstant_rotate_v32i16(<32 x i16> %a) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_rotate_v32i16:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsllw $7, %ymm1, %ymm2
; AVX512VL-NEXT: vpsllw $7, %ymm0, %ymm3
; AVX512VL-NEXT: vpsrlw $9, %ymm1, %ymm1
@@ -600,14 +600,14 @@ define <32 x i16> @splatconstant_rotate_v32i16(<32 x i16> %a) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: splatconstant_rotate_v32i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsllw $7, %zmm0, %zmm1
; AVX512BW-NEXT: vpsrlw $9, %zmm0, %zmm0
; AVX512BW-NEXT: vporq %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512VLBW-LABEL: splatconstant_rotate_v32i16:
-; AVX512VLBW: # BB#0:
+; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpsllw $7, %zmm0, %zmm1
; AVX512VLBW-NEXT: vpsrlw $9, %zmm0, %zmm0
; AVX512VLBW-NEXT: vporq %zmm0, %zmm1, %zmm0
@@ -620,7 +620,7 @@ define <32 x i16> @splatconstant_rotate_v32i16(<32 x i16> %a) nounwind {
define <64 x i8> @splatconstant_rotate_v64i8(<64 x i8> %a) nounwind {
; AVX512F-LABEL: splatconstant_rotate_v64i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm2
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm2
@@ -636,7 +636,7 @@ define <64 x i8> @splatconstant_rotate_v64i8(<64 x i8> %a) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_rotate_v64i8:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm2
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; AVX512VL-NEXT: vpand %ymm3, %ymm2, %ymm2
@@ -652,7 +652,7 @@ define <64 x i8> @splatconstant_rotate_v64i8(<64 x i8> %a) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: splatconstant_rotate_v64i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsllw $4, %zmm0, %zmm1
; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0
@@ -661,7 +661,7 @@ define <64 x i8> @splatconstant_rotate_v64i8(<64 x i8> %a) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512VLBW-LABEL: splatconstant_rotate_v64i8:
-; AVX512VLBW: # BB#0:
+; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpsllw $4, %zmm0, %zmm1
; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
; AVX512VLBW-NEXT: vpsrlw $4, %zmm0, %zmm0
@@ -680,7 +680,7 @@ define <64 x i8> @splatconstant_rotate_v64i8(<64 x i8> %a) nounwind {
define <8 x i64> @splatconstant_rotate_mask_v8i64(<8 x i64> %a) nounwind {
; AVX512-LABEL: splatconstant_rotate_mask_v8i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vprolq $15, %zmm0, %zmm0
; AVX512-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0
; AVX512-NEXT: retq
@@ -694,7 +694,7 @@ define <8 x i64> @splatconstant_rotate_mask_v8i64(<8 x i64> %a) nounwind {
define <16 x i32> @splatconstant_rotate_mask_v16i32(<16 x i32> %a) nounwind {
; AVX512-LABEL: splatconstant_rotate_mask_v16i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vprold $4, %zmm0, %zmm0
; AVX512-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0
; AVX512-NEXT: retq
@@ -708,7 +708,7 @@ define <16 x i32> @splatconstant_rotate_mask_v16i32(<16 x i32> %a) nounwind {
define <32 x i16> @splatconstant_rotate_mask_v32i16(<32 x i16> %a) nounwind {
; AVX512F-LABEL: splatconstant_rotate_mask_v32i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsllw $5, %ymm0, %ymm2
; AVX512F-NEXT: vpsllw $5, %ymm1, %ymm3
; AVX512F-NEXT: vpsrlw $11, %ymm0, %ymm0
@@ -724,7 +724,7 @@ define <32 x i16> @splatconstant_rotate_mask_v32i16(<32 x i16> %a) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_rotate_mask_v32i16:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsllw $5, %ymm0, %ymm2
; AVX512VL-NEXT: vpsllw $5, %ymm1, %ymm3
; AVX512VL-NEXT: vpsrlw $11, %ymm0, %ymm0
@@ -740,7 +740,7 @@ define <32 x i16> @splatconstant_rotate_mask_v32i16(<32 x i16> %a) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: splatconstant_rotate_mask_v32i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsllw $5, %zmm0, %zmm1
; AVX512BW-NEXT: vpsrlw $11, %zmm0, %zmm0
; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0
@@ -749,7 +749,7 @@ define <32 x i16> @splatconstant_rotate_mask_v32i16(<32 x i16> %a) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512VLBW-LABEL: splatconstant_rotate_mask_v32i16:
-; AVX512VLBW: # BB#0:
+; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpsllw $5, %zmm0, %zmm1
; AVX512VLBW-NEXT: vpsrlw $11, %zmm0, %zmm0
; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0
@@ -766,7 +766,7 @@ define <32 x i16> @splatconstant_rotate_mask_v32i16(<32 x i16> %a) nounwind {
define <64 x i8> @splatconstant_rotate_mask_v64i8(<64 x i8> %a) nounwind {
; AVX512F-LABEL: splatconstant_rotate_mask_v64i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm2
; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm3
; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm0
@@ -784,7 +784,7 @@ define <64 x i8> @splatconstant_rotate_mask_v64i8(<64 x i8> %a) nounwind {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_rotate_mask_v64i8:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm2
; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm3
; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm0
@@ -802,7 +802,7 @@ define <64 x i8> @splatconstant_rotate_mask_v64i8(<64 x i8> %a) nounwind {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: splatconstant_rotate_mask_v64i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsllw $4, %zmm0, %zmm1
; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0
@@ -813,7 +813,7 @@ define <64 x i8> @splatconstant_rotate_mask_v64i8(<64 x i8> %a) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512VLBW-LABEL: splatconstant_rotate_mask_v64i8:
-; AVX512VLBW: # BB#0:
+; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpsllw $4, %zmm0, %zmm1
; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
; AVX512VLBW-NEXT: vpsrlw $4, %zmm0, %zmm0
diff --git a/test/CodeGen/X86/vector-sext.ll b/test/CodeGen/X86/vector-sext.ll
index 009a4cf501a..88fc588d27e 100644
--- a/test/CodeGen/X86/vector-sext.ll
+++ b/test/CodeGen/X86/vector-sext.ll
@@ -12,29 +12,29 @@
define <8 x i16> @sext_16i8_to_8i16(<16 x i8> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: sext_16i8_to_8i16:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: psraw $8, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: sext_16i8_to_8i16:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSSE3-NEXT: psraw $8, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: sext_16i8_to_8i16:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovsxbw %xmm0, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: sext_16i8_to_8i16:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovsxbw %xmm0, %xmm0
; AVX-NEXT: retq
;
; X32-SSE41-LABEL: sext_16i8_to_8i16:
-; X32-SSE41: # BB#0: # %entry
+; X32-SSE41: # %bb.0: # %entry
; X32-SSE41-NEXT: pmovsxbw %xmm0, %xmm0
; X32-SSE41-NEXT: retl
entry:
@@ -45,7 +45,7 @@ entry:
define <16 x i16> @sext_16i8_to_16i16(<16 x i8> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: sext_16i8_to_16i16:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; SSE2-NEXT: psraw $8, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
@@ -54,7 +54,7 @@ define <16 x i16> @sext_16i8_to_16i16(<16 x i8> %A) nounwind uwtable readnone ss
; SSE2-NEXT: retq
;
; SSSE3-LABEL: sext_16i8_to_16i16:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; SSSE3-NEXT: psraw $8, %xmm2
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
@@ -63,7 +63,7 @@ define <16 x i16> @sext_16i8_to_16i16(<16 x i8> %A) nounwind uwtable readnone ss
; SSSE3-NEXT: retq
;
; SSE41-LABEL: sext_16i8_to_16i16:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovsxbw %xmm0, %xmm2
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; SSE41-NEXT: pmovsxbw %xmm0, %xmm1
@@ -71,7 +71,7 @@ define <16 x i16> @sext_16i8_to_16i16(<16 x i8> %A) nounwind uwtable readnone ss
; SSE41-NEXT: retq
;
; AVX1-LABEL: sext_16i8_to_16i16:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovsxbw %xmm0, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovsxbw %xmm0, %xmm0
@@ -79,17 +79,17 @@ define <16 x i16> @sext_16i8_to_16i16(<16 x i8> %A) nounwind uwtable readnone ss
; AVX1-NEXT: retq
;
; AVX2-LABEL: sext_16i8_to_16i16:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: sext_16i8_to_16i16:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovsxbw %xmm0, %ymm0
; AVX512-NEXT: retq
;
; X32-SSE41-LABEL: sext_16i8_to_16i16:
-; X32-SSE41: # BB#0: # %entry
+; X32-SSE41: # %bb.0: # %entry
; X32-SSE41-NEXT: pmovsxbw %xmm0, %xmm2
; X32-SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; X32-SSE41-NEXT: pmovsxbw %xmm0, %xmm1
@@ -102,7 +102,7 @@ entry:
define <32 x i16> @sext_32i8_to_32i16(<32 x i8> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: sext_32i8_to_32i16:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3],xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
; SSE2-NEXT: psraw $8, %xmm4
; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
@@ -116,7 +116,7 @@ define <32 x i16> @sext_32i8_to_32i16(<32 x i8> %A) nounwind uwtable readnone ss
; SSE2-NEXT: retq
;
; SSSE3-LABEL: sext_32i8_to_32i16:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3],xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
; SSSE3-NEXT: psraw $8, %xmm4
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
@@ -130,7 +130,7 @@ define <32 x i16> @sext_32i8_to_32i16(<32 x i8> %A) nounwind uwtable readnone ss
; SSSE3-NEXT: retq
;
; SSE41-LABEL: sext_32i8_to_32i16:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovsxbw %xmm0, %xmm5
; SSE41-NEXT: pmovsxbw %xmm1, %xmm2
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -142,7 +142,7 @@ define <32 x i16> @sext_32i8_to_32i16(<32 x i8> %A) nounwind uwtable readnone ss
; SSE41-NEXT: retq
;
; AVX1-LABEL: sext_32i8_to_32i16:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovsxbw %xmm0, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovsxbw %xmm2, %xmm2
@@ -156,7 +156,7 @@ define <32 x i16> @sext_32i8_to_32i16(<32 x i8> %A) nounwind uwtable readnone ss
; AVX1-NEXT: retq
;
; AVX2-LABEL: sext_32i8_to_32i16:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovsxbw %xmm0, %ymm2
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX2-NEXT: vpmovsxbw %xmm0, %ymm1
@@ -164,7 +164,7 @@ define <32 x i16> @sext_32i8_to_32i16(<32 x i8> %A) nounwind uwtable readnone ss
; AVX2-NEXT: retq
;
; AVX512F-LABEL: sext_32i8_to_32i16:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmovsxbw %xmm0, %ymm2
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX512F-NEXT: vpmovsxbw %xmm0, %ymm1
@@ -172,12 +172,12 @@ define <32 x i16> @sext_32i8_to_32i16(<32 x i8> %A) nounwind uwtable readnone ss
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: sext_32i8_to_32i16:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0
; AVX512BW-NEXT: retq
;
; X32-SSE41-LABEL: sext_32i8_to_32i16:
-; X32-SSE41: # BB#0: # %entry
+; X32-SSE41: # %bb.0: # %entry
; X32-SSE41-NEXT: pmovsxbw %xmm0, %xmm5
; X32-SSE41-NEXT: pmovsxbw %xmm1, %xmm2
; X32-SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -194,31 +194,31 @@ entry:
define <4 x i32> @sext_16i8_to_4i32(<16 x i8> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: sext_16i8_to_4i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE2-NEXT: psrad $24, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: sext_16i8_to_4i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSSE3-NEXT: psrad $24, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: sext_16i8_to_4i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovsxbd %xmm0, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: sext_16i8_to_4i32:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovsxbd %xmm0, %xmm0
; AVX-NEXT: retq
;
; X32-SSE41-LABEL: sext_16i8_to_4i32:
-; X32-SSE41: # BB#0: # %entry
+; X32-SSE41: # %bb.0: # %entry
; X32-SSE41-NEXT: pmovsxbd %xmm0, %xmm0
; X32-SSE41-NEXT: retl
entry:
@@ -229,7 +229,7 @@ entry:
define <8 x i32> @sext_16i8_to_8i32(<16 x i8> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: sext_16i8_to_8i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
; SSE2-NEXT: psrad $24, %xmm2
@@ -241,7 +241,7 @@ define <8 x i32> @sext_16i8_to_8i32(<16 x i8> %A) nounwind uwtable readnone ssp
; SSE2-NEXT: retq
;
; SSSE3-LABEL: sext_16i8_to_8i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
@@ -251,7 +251,7 @@ define <8 x i32> @sext_16i8_to_8i32(<16 x i8> %A) nounwind uwtable readnone ssp
; SSSE3-NEXT: retq
;
; SSE41-LABEL: sext_16i8_to_8i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovsxbd %xmm0, %xmm2
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; SSE41-NEXT: pmovsxbd %xmm0, %xmm1
@@ -259,7 +259,7 @@ define <8 x i32> @sext_16i8_to_8i32(<16 x i8> %A) nounwind uwtable readnone ssp
; SSE41-NEXT: retq
;
; AVX1-LABEL: sext_16i8_to_8i32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovsxbd %xmm0, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; AVX1-NEXT: vpmovsxbd %xmm0, %xmm0
@@ -267,17 +267,17 @@ define <8 x i32> @sext_16i8_to_8i32(<16 x i8> %A) nounwind uwtable readnone ssp
; AVX1-NEXT: retq
;
; AVX2-LABEL: sext_16i8_to_8i32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovsxbd %xmm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: sext_16i8_to_8i32:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovsxbd %xmm0, %ymm0
; AVX512-NEXT: retq
;
; X32-SSE41-LABEL: sext_16i8_to_8i32:
-; X32-SSE41: # BB#0: # %entry
+; X32-SSE41: # %bb.0: # %entry
; X32-SSE41-NEXT: pmovsxbd %xmm0, %xmm2
; X32-SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; X32-SSE41-NEXT: pmovsxbd %xmm0, %xmm1
@@ -291,7 +291,7 @@ entry:
define <16 x i32> @sext_16i8_to_16i32(<16 x i8> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: sext_16i8_to_16i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
; SSE2-NEXT: psrad $24, %xmm4
@@ -310,7 +310,7 @@ define <16 x i32> @sext_16i8_to_16i32(<16 x i8> %A) nounwind uwtable readnone ss
; SSE2-NEXT: retq
;
; SSSE3-LABEL: sext_16i8_to_16i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm3
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
@@ -326,7 +326,7 @@ define <16 x i32> @sext_16i8_to_16i32(<16 x i8> %A) nounwind uwtable readnone ss
; SSSE3-NEXT: retq
;
; SSE41-LABEL: sext_16i8_to_16i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovsxbd %xmm0, %xmm4
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
; SSE41-NEXT: pmovsxbd %xmm1, %xmm1
@@ -338,7 +338,7 @@ define <16 x i32> @sext_16i8_to_16i32(<16 x i8> %A) nounwind uwtable readnone ss
; SSE41-NEXT: retq
;
; AVX1-LABEL: sext_16i8_to_16i32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovsxbd %xmm0, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
; AVX1-NEXT: vpmovsxbd %xmm2, %xmm2
@@ -352,7 +352,7 @@ define <16 x i32> @sext_16i8_to_16i32(<16 x i8> %A) nounwind uwtable readnone ss
; AVX1-NEXT: retq
;
; AVX2-LABEL: sext_16i8_to_16i32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovsxbd %xmm0, %ymm2
; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX2-NEXT: vpmovsxbd %xmm0, %ymm1
@@ -360,12 +360,12 @@ define <16 x i32> @sext_16i8_to_16i32(<16 x i8> %A) nounwind uwtable readnone ss
; AVX2-NEXT: retq
;
; AVX512-LABEL: sext_16i8_to_16i32:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512-NEXT: retq
;
; X32-SSE41-LABEL: sext_16i8_to_16i32:
-; X32-SSE41: # BB#0: # %entry
+; X32-SSE41: # %bb.0: # %entry
; X32-SSE41-NEXT: pmovsxbd %xmm0, %xmm4
; X32-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
; X32-SSE41-NEXT: pmovsxbd %xmm1, %xmm1
@@ -382,7 +382,7 @@ entry:
define <2 x i64> @sext_16i8_to_2i64(<16 x i8> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: sext_16i8_to_2i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE2-NEXT: movdqa %xmm0, %xmm1
@@ -392,7 +392,7 @@ define <2 x i64> @sext_16i8_to_2i64(<16 x i8> %A) nounwind uwtable readnone ssp
; SSE2-NEXT: retq
;
; SSSE3-LABEL: sext_16i8_to_2i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSSE3-NEXT: movdqa %xmm0, %xmm1
@@ -402,17 +402,17 @@ define <2 x i64> @sext_16i8_to_2i64(<16 x i8> %A) nounwind uwtable readnone ssp
; SSSE3-NEXT: retq
;
; SSE41-LABEL: sext_16i8_to_2i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovsxbq %xmm0, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: sext_16i8_to_2i64:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovsxbq %xmm0, %xmm0
; AVX-NEXT: retq
;
; X32-SSE41-LABEL: sext_16i8_to_2i64:
-; X32-SSE41: # BB#0: # %entry
+; X32-SSE41: # %bb.0: # %entry
; X32-SSE41-NEXT: pmovsxbq %xmm0, %xmm0
; X32-SSE41-NEXT: retl
entry:
@@ -423,7 +423,7 @@ entry:
define <4 x i64> @sext_16i8_to_4i64(<16 x i8> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: sext_16i8_to_4i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
; SSE2-NEXT: movdqa %xmm2, %xmm1
@@ -441,7 +441,7 @@ define <4 x i64> @sext_16i8_to_4i64(<16 x i8> %A) nounwind uwtable readnone ssp
; SSE2-NEXT: retq
;
; SSSE3-LABEL: sext_16i8_to_4i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
@@ -457,7 +457,7 @@ define <4 x i64> @sext_16i8_to_4i64(<16 x i8> %A) nounwind uwtable readnone ssp
; SSSE3-NEXT: retq
;
; SSE41-LABEL: sext_16i8_to_4i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovsxbq %xmm0, %xmm2
; SSE41-NEXT: psrld $16, %xmm0
; SSE41-NEXT: pmovsxbq %xmm0, %xmm1
@@ -465,7 +465,7 @@ define <4 x i64> @sext_16i8_to_4i64(<16 x i8> %A) nounwind uwtable readnone ssp
; SSE41-NEXT: retq
;
; AVX1-LABEL: sext_16i8_to_4i64:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovsxbq %xmm0, %xmm1
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
; AVX1-NEXT: vpmovsxbq %xmm0, %xmm0
@@ -473,17 +473,17 @@ define <4 x i64> @sext_16i8_to_4i64(<16 x i8> %A) nounwind uwtable readnone ssp
; AVX1-NEXT: retq
;
; AVX2-LABEL: sext_16i8_to_4i64:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovsxbq %xmm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: sext_16i8_to_4i64:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovsxbq %xmm0, %ymm0
; AVX512-NEXT: retq
;
; X32-SSE41-LABEL: sext_16i8_to_4i64:
-; X32-SSE41: # BB#0: # %entry
+; X32-SSE41: # %bb.0: # %entry
; X32-SSE41-NEXT: pmovsxbq %xmm0, %xmm2
; X32-SSE41-NEXT: psrld $16, %xmm0
; X32-SSE41-NEXT: pmovsxbq %xmm0, %xmm1
@@ -497,7 +497,7 @@ entry:
define <8 x i64> @sext_16i8_to_8i64(<16 x i8> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: sext_16i8_to_8i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
@@ -529,7 +529,7 @@ define <8 x i64> @sext_16i8_to_8i64(<16 x i8> %A) nounwind uwtable readnone ssp
; SSE2-NEXT: retq
;
; SSSE3-LABEL: sext_16i8_to_8i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = <u,u,u,2,u,u,u,3,u,u,u,255,u,u,u,255>
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3],xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,2,3]
@@ -558,7 +558,7 @@ define <8 x i64> @sext_16i8_to_8i64(<16 x i8> %A) nounwind uwtable readnone ssp
; SSSE3-NEXT: retq
;
; SSE41-LABEL: sext_16i8_to_8i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovsxbq %xmm0, %xmm4
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: psrld $16, %xmm1
@@ -571,7 +571,7 @@ define <8 x i64> @sext_16i8_to_8i64(<16 x i8> %A) nounwind uwtable readnone ssp
; SSE41-NEXT: retq
;
; AVX1-LABEL: sext_16i8_to_8i64:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovsxbq %xmm0, %xmm1
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm2
; AVX1-NEXT: vpmovsxbq %xmm2, %xmm2
@@ -585,7 +585,7 @@ define <8 x i64> @sext_16i8_to_8i64(<16 x i8> %A) nounwind uwtable readnone ssp
; AVX1-NEXT: retq
;
; AVX2-LABEL: sext_16i8_to_8i64:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovsxbq %xmm0, %ymm2
; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; AVX2-NEXT: vpmovsxbq %xmm0, %ymm1
@@ -593,12 +593,12 @@ define <8 x i64> @sext_16i8_to_8i64(<16 x i8> %A) nounwind uwtable readnone ssp
; AVX2-NEXT: retq
;
; AVX512-LABEL: sext_16i8_to_8i64:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovsxbq %xmm0, %zmm0
; AVX512-NEXT: retq
;
; X32-SSE41-LABEL: sext_16i8_to_8i64:
-; X32-SSE41: # BB#0: # %entry
+; X32-SSE41: # %bb.0: # %entry
; X32-SSE41-NEXT: pmovsxbq %xmm0, %xmm4
; X32-SSE41-NEXT: movdqa %xmm0, %xmm1
; X32-SSE41-NEXT: psrld $16, %xmm1
@@ -617,29 +617,29 @@ entry:
define <4 x i32> @sext_8i16_to_4i32(<8 x i16> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: sext_8i16_to_4i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE2-NEXT: psrad $16, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: sext_8i16_to_4i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSSE3-NEXT: psrad $16, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: sext_8i16_to_4i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovsxwd %xmm0, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: sext_8i16_to_4i32:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovsxwd %xmm0, %xmm0
; AVX-NEXT: retq
;
; X32-SSE41-LABEL: sext_8i16_to_4i32:
-; X32-SSE41: # BB#0: # %entry
+; X32-SSE41: # %bb.0: # %entry
; X32-SSE41-NEXT: pmovsxwd %xmm0, %xmm0
; X32-SSE41-NEXT: retl
entry:
@@ -650,7 +650,7 @@ entry:
define <8 x i32> @sext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: sext_8i16_to_8i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
; SSE2-NEXT: psrad $16, %xmm2
; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
@@ -659,7 +659,7 @@ define <8 x i32> @sext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone ssp
; SSE2-NEXT: retq
;
; SSSE3-LABEL: sext_8i16_to_8i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
; SSSE3-NEXT: psrad $16, %xmm2
; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
@@ -668,7 +668,7 @@ define <8 x i32> @sext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone ssp
; SSSE3-NEXT: retq
;
; SSE41-LABEL: sext_8i16_to_8i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovsxwd %xmm0, %xmm2
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; SSE41-NEXT: pmovsxwd %xmm0, %xmm1
@@ -676,7 +676,7 @@ define <8 x i32> @sext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone ssp
; SSE41-NEXT: retq
;
; AVX1-LABEL: sext_8i16_to_8i32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovsxwd %xmm0, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0
@@ -684,17 +684,17 @@ define <8 x i32> @sext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone ssp
; AVX1-NEXT: retq
;
; AVX2-LABEL: sext_8i16_to_8i32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: sext_8i16_to_8i32:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX512-NEXT: retq
;
; X32-SSE41-LABEL: sext_8i16_to_8i32:
-; X32-SSE41: # BB#0: # %entry
+; X32-SSE41: # %bb.0: # %entry
; X32-SSE41-NEXT: pmovsxwd %xmm0, %xmm2
; X32-SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; X32-SSE41-NEXT: pmovsxwd %xmm0, %xmm1
@@ -707,7 +707,7 @@ entry:
define <16 x i32> @sext_16i16_to_16i32(<16 x i16> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: sext_16i16_to_16i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
; SSE2-NEXT: psrad $16, %xmm4
; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
@@ -721,7 +721,7 @@ define <16 x i32> @sext_16i16_to_16i32(<16 x i16> %A) nounwind uwtable readnone
; SSE2-NEXT: retq
;
; SSSE3-LABEL: sext_16i16_to_16i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
; SSSE3-NEXT: psrad $16, %xmm4
; SSSE3-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
@@ -735,7 +735,7 @@ define <16 x i32> @sext_16i16_to_16i32(<16 x i16> %A) nounwind uwtable readnone
; SSSE3-NEXT: retq
;
; SSE41-LABEL: sext_16i16_to_16i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovsxwd %xmm0, %xmm5
; SSE41-NEXT: pmovsxwd %xmm1, %xmm2
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -747,7 +747,7 @@ define <16 x i32> @sext_16i16_to_16i32(<16 x i16> %A) nounwind uwtable readnone
; SSE41-NEXT: retq
;
; AVX1-LABEL: sext_16i16_to_16i32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovsxwd %xmm0, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovsxwd %xmm2, %xmm2
@@ -761,7 +761,7 @@ define <16 x i32> @sext_16i16_to_16i32(<16 x i16> %A) nounwind uwtable readnone
; AVX1-NEXT: retq
;
; AVX2-LABEL: sext_16i16_to_16i32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovsxwd %xmm0, %ymm2
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX2-NEXT: vpmovsxwd %xmm0, %ymm1
@@ -769,12 +769,12 @@ define <16 x i32> @sext_16i16_to_16i32(<16 x i16> %A) nounwind uwtable readnone
; AVX2-NEXT: retq
;
; AVX512-LABEL: sext_16i16_to_16i32:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512-NEXT: retq
;
; X32-SSE41-LABEL: sext_16i16_to_16i32:
-; X32-SSE41: # BB#0: # %entry
+; X32-SSE41: # %bb.0: # %entry
; X32-SSE41-NEXT: pmovsxwd %xmm0, %xmm5
; X32-SSE41-NEXT: pmovsxwd %xmm1, %xmm2
; X32-SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -791,7 +791,7 @@ entry:
define <2 x i64> @sext_8i16_to_2i64(<8 x i16> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: sext_8i16_to_2i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrad $31, %xmm1
@@ -800,7 +800,7 @@ define <2 x i64> @sext_8i16_to_2i64(<8 x i16> %A) nounwind uwtable readnone ssp
; SSE2-NEXT: retq
;
; SSSE3-LABEL: sext_8i16_to_2i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: psrad $31, %xmm1
@@ -809,17 +809,17 @@ define <2 x i64> @sext_8i16_to_2i64(<8 x i16> %A) nounwind uwtable readnone ssp
; SSSE3-NEXT: retq
;
; SSE41-LABEL: sext_8i16_to_2i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovsxwq %xmm0, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: sext_8i16_to_2i64:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovsxwq %xmm0, %xmm0
; AVX-NEXT: retq
;
; X32-SSE41-LABEL: sext_8i16_to_2i64:
-; X32-SSE41: # BB#0: # %entry
+; X32-SSE41: # %bb.0: # %entry
; X32-SSE41-NEXT: pmovsxwq %xmm0, %xmm0
; X32-SSE41-NEXT: retl
entry:
@@ -830,7 +830,7 @@ entry:
define <4 x i64> @sext_8i16_to_4i64(<8 x i16> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: sext_8i16_to_4i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
; SSE2-NEXT: movdqa %xmm2, %xmm1
; SSE2-NEXT: psrad $31, %xmm1
@@ -846,7 +846,7 @@ define <4 x i64> @sext_8i16_to_4i64(<8 x i16> %A) nounwind uwtable readnone ssp
; SSE2-NEXT: retq
;
; SSSE3-LABEL: sext_8i16_to_4i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
; SSSE3-NEXT: movdqa %xmm2, %xmm1
; SSSE3-NEXT: psrad $31, %xmm1
@@ -862,7 +862,7 @@ define <4 x i64> @sext_8i16_to_4i64(<8 x i16> %A) nounwind uwtable readnone ssp
; SSSE3-NEXT: retq
;
; SSE41-LABEL: sext_8i16_to_4i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovsxwq %xmm0, %xmm2
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; SSE41-NEXT: pmovsxwq %xmm0, %xmm1
@@ -870,7 +870,7 @@ define <4 x i64> @sext_8i16_to_4i64(<8 x i16> %A) nounwind uwtable readnone ssp
; SSE41-NEXT: retq
;
; AVX1-LABEL: sext_8i16_to_4i64:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovsxwq %xmm0, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; AVX1-NEXT: vpmovsxwq %xmm0, %xmm0
@@ -878,17 +878,17 @@ define <4 x i64> @sext_8i16_to_4i64(<8 x i16> %A) nounwind uwtable readnone ssp
; AVX1-NEXT: retq
;
; AVX2-LABEL: sext_8i16_to_4i64:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovsxwq %xmm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: sext_8i16_to_4i64:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovsxwq %xmm0, %ymm0
; AVX512-NEXT: retq
;
; X32-SSE41-LABEL: sext_8i16_to_4i64:
-; X32-SSE41: # BB#0: # %entry
+; X32-SSE41: # %bb.0: # %entry
; X32-SSE41-NEXT: pmovsxwq %xmm0, %xmm2
; X32-SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; X32-SSE41-NEXT: pmovsxwq %xmm0, %xmm1
@@ -902,7 +902,7 @@ entry:
define <8 x i64> @sext_8i16_to_8i64(<8 x i16> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: sext_8i16_to_8i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
; SSE2-NEXT: movdqa %xmm4, %xmm1
; SSE2-NEXT: psrad $31, %xmm1
@@ -929,7 +929,7 @@ define <8 x i64> @sext_8i16_to_8i64(<8 x i16> %A) nounwind uwtable readnone ssp
; SSE2-NEXT: retq
;
; SSSE3-LABEL: sext_8i16_to_8i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
; SSSE3-NEXT: movdqa %xmm4, %xmm1
; SSSE3-NEXT: psrad $31, %xmm1
@@ -956,7 +956,7 @@ define <8 x i64> @sext_8i16_to_8i64(<8 x i16> %A) nounwind uwtable readnone ssp
; SSSE3-NEXT: retq
;
; SSE41-LABEL: sext_8i16_to_8i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovsxwq %xmm0, %xmm4
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
; SSE41-NEXT: pmovsxwq %xmm1, %xmm1
@@ -968,7 +968,7 @@ define <8 x i64> @sext_8i16_to_8i64(<8 x i16> %A) nounwind uwtable readnone ssp
; SSE41-NEXT: retq
;
; AVX1-LABEL: sext_8i16_to_8i64:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovsxwq %xmm0, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
; AVX1-NEXT: vpmovsxwq %xmm2, %xmm2
@@ -982,7 +982,7 @@ define <8 x i64> @sext_8i16_to_8i64(<8 x i16> %A) nounwind uwtable readnone ssp
; AVX1-NEXT: retq
;
; AVX2-LABEL: sext_8i16_to_8i64:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovsxwq %xmm0, %ymm2
; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX2-NEXT: vpmovsxwq %xmm0, %ymm1
@@ -990,12 +990,12 @@ define <8 x i64> @sext_8i16_to_8i64(<8 x i16> %A) nounwind uwtable readnone ssp
; AVX2-NEXT: retq
;
; AVX512-LABEL: sext_8i16_to_8i64:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovsxwq %xmm0, %zmm0
; AVX512-NEXT: retq
;
; X32-SSE41-LABEL: sext_8i16_to_8i64:
-; X32-SSE41: # BB#0: # %entry
+; X32-SSE41: # %bb.0: # %entry
; X32-SSE41-NEXT: pmovsxwq %xmm0, %xmm4
; X32-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
; X32-SSE41-NEXT: pmovsxwq %xmm1, %xmm1
@@ -1012,31 +1012,31 @@ entry:
define <2 x i64> @sext_4i32_to_2i64(<4 x i32> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: sext_4i32_to_2i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrad $31, %xmm1
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: sext_4i32_to_2i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: psrad $31, %xmm1
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: sext_4i32_to_2i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovsxdq %xmm0, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: sext_4i32_to_2i64:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovsxdq %xmm0, %xmm0
; AVX-NEXT: retq
;
; X32-SSE41-LABEL: sext_4i32_to_2i64:
-; X32-SSE41: # BB#0: # %entry
+; X32-SSE41: # %bb.0: # %entry
; X32-SSE41-NEXT: pmovsxdq %xmm0, %xmm0
; X32-SSE41-NEXT: retl
entry:
@@ -1047,7 +1047,7 @@ entry:
define <4 x i64> @sext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: sext_4i32_to_4i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: psrad $31, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -1058,7 +1058,7 @@ define <4 x i64> @sext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone ssp
; SSE2-NEXT: retq
;
; SSSE3-LABEL: sext_4i32_to_4i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm2
; SSSE3-NEXT: psrad $31, %xmm2
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -1069,7 +1069,7 @@ define <4 x i64> @sext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone ssp
; SSSE3-NEXT: retq
;
; SSE41-LABEL: sext_4i32_to_4i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovsxdq %xmm0, %xmm2
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; SSE41-NEXT: pmovsxdq %xmm0, %xmm1
@@ -1077,7 +1077,7 @@ define <4 x i64> @sext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone ssp
; SSE41-NEXT: retq
;
; AVX1-LABEL: sext_4i32_to_4i64:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovsxdq %xmm0, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
@@ -1085,17 +1085,17 @@ define <4 x i64> @sext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone ssp
; AVX1-NEXT: retq
;
; AVX2-LABEL: sext_4i32_to_4i64:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: sext_4i32_to_4i64:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovsxdq %xmm0, %ymm0
; AVX512-NEXT: retq
;
; X32-SSE41-LABEL: sext_4i32_to_4i64:
-; X32-SSE41: # BB#0: # %entry
+; X32-SSE41: # %bb.0: # %entry
; X32-SSE41-NEXT: pmovsxdq %xmm0, %xmm2
; X32-SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; X32-SSE41-NEXT: pmovsxdq %xmm0, %xmm1
@@ -1108,7 +1108,7 @@ entry:
define <8 x i64> @sext_8i32_to_8i64(<8 x i32> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: sext_8i32_to_8i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: psrad $31, %xmm3
@@ -1127,7 +1127,7 @@ define <8 x i64> @sext_8i32_to_8i64(<8 x i32> %A) nounwind uwtable readnone ssp
; SSE2-NEXT: retq
;
; SSSE3-LABEL: sext_8i32_to_8i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa %xmm1, %xmm2
; SSSE3-NEXT: movdqa %xmm0, %xmm3
; SSSE3-NEXT: psrad $31, %xmm3
@@ -1146,7 +1146,7 @@ define <8 x i64> @sext_8i32_to_8i64(<8 x i32> %A) nounwind uwtable readnone ssp
; SSSE3-NEXT: retq
;
; SSE41-LABEL: sext_8i32_to_8i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovsxdq %xmm0, %xmm5
; SSE41-NEXT: pmovsxdq %xmm1, %xmm2
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -1158,7 +1158,7 @@ define <8 x i64> @sext_8i32_to_8i64(<8 x i32> %A) nounwind uwtable readnone ssp
; SSE41-NEXT: retq
;
; AVX1-LABEL: sext_8i32_to_8i64:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovsxdq %xmm0, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovsxdq %xmm2, %xmm2
@@ -1172,7 +1172,7 @@ define <8 x i64> @sext_8i32_to_8i64(<8 x i32> %A) nounwind uwtable readnone ssp
; AVX1-NEXT: retq
;
; AVX2-LABEL: sext_8i32_to_8i64:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovsxdq %xmm0, %ymm2
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX2-NEXT: vpmovsxdq %xmm0, %ymm1
@@ -1180,12 +1180,12 @@ define <8 x i64> @sext_8i32_to_8i64(<8 x i32> %A) nounwind uwtable readnone ssp
; AVX2-NEXT: retq
;
; AVX512-LABEL: sext_8i32_to_8i64:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovsxdq %ymm0, %zmm0
; AVX512-NEXT: retq
;
; X32-SSE41-LABEL: sext_8i32_to_8i64:
-; X32-SSE41: # BB#0: # %entry
+; X32-SSE41: # %bb.0: # %entry
; X32-SSE41-NEXT: pmovsxdq %xmm0, %xmm5
; X32-SSE41-NEXT: pmovsxdq %xmm1, %xmm2
; X32-SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -1202,7 +1202,7 @@ entry:
define <2 x i64> @load_sext_2i1_to_2i64(<2 x i1> *%ptr) {
; SSE-LABEL: load_sext_2i1_to_2i64:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movzbl (%rdi), %eax
; SSE-NEXT: movq %rax, %rcx
; SSE-NEXT: shlq $62, %rcx
@@ -1215,7 +1215,7 @@ define <2 x i64> @load_sext_2i1_to_2i64(<2 x i1> *%ptr) {
; SSE-NEXT: retq
;
; AVX1-LABEL: load_sext_2i1_to_2i64:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: movzbl (%rdi), %eax
; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: shlq $62, %rcx
@@ -1228,7 +1228,7 @@ define <2 x i64> @load_sext_2i1_to_2i64(<2 x i1> *%ptr) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_sext_2i1_to_2i64:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: movzbl (%rdi), %eax
; AVX2-NEXT: movq %rax, %rcx
; AVX2-NEXT: shlq $62, %rcx
@@ -1241,7 +1241,7 @@ define <2 x i64> @load_sext_2i1_to_2i64(<2 x i1> *%ptr) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: load_sext_2i1_to_2i64:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: movzbl (%rdi), %eax
; AVX512F-NEXT: kmovw %eax, %k1
; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -1250,7 +1250,7 @@ define <2 x i64> @load_sext_2i1_to_2i64(<2 x i1> *%ptr) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: load_sext_2i1_to_2i64:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: movzbl (%rdi), %eax
; AVX512BW-NEXT: kmovd %eax, %k1
; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -1259,7 +1259,7 @@ define <2 x i64> @load_sext_2i1_to_2i64(<2 x i1> *%ptr) {
; AVX512BW-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_2i1_to_2i64:
-; X32-SSE41: # BB#0: # %entry
+; X32-SSE41: # %bb.0: # %entry
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: movzbl (%eax), %eax
; X32-SSE41-NEXT: movl %eax, %ecx
@@ -1280,7 +1280,7 @@ entry:
define <2 x i64> @load_sext_2i8_to_2i64(<2 x i8> *%ptr) {
; SSE2-LABEL: load_sext_2i8_to_2i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movzwl (%rdi), %eax
; SSE2-NEXT: movd %eax, %xmm0
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -1292,7 +1292,7 @@ define <2 x i64> @load_sext_2i8_to_2i64(<2 x i8> *%ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_sext_2i8_to_2i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movzwl (%rdi), %eax
; SSSE3-NEXT: movd %eax, %xmm0
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -1304,17 +1304,17 @@ define <2 x i64> @load_sext_2i8_to_2i64(<2 x i8> *%ptr) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_sext_2i8_to_2i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovsxbq (%rdi), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: load_sext_2i8_to_2i64:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovsxbq (%rdi), %xmm0
; AVX-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_2i8_to_2i64:
-; X32-SSE41: # BB#0: # %entry
+; X32-SSE41: # %bb.0: # %entry
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: pmovsxbq (%eax), %xmm0
; X32-SSE41-NEXT: retl
@@ -1326,7 +1326,7 @@ entry:
define <4 x i32> @load_sext_4i1_to_4i32(<4 x i1> *%ptr) {
; SSE2-LABEL: load_sext_4i1_to_4i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movzbl (%rdi), %eax
; SSE2-NEXT: movq %rax, %rcx
; SSE2-NEXT: shlq $60, %rcx
@@ -1349,7 +1349,7 @@ define <4 x i32> @load_sext_4i1_to_4i32(<4 x i1> *%ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_sext_4i1_to_4i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movzbl (%rdi), %eax
; SSSE3-NEXT: movq %rax, %rcx
; SSSE3-NEXT: shlq $60, %rcx
@@ -1372,7 +1372,7 @@ define <4 x i32> @load_sext_4i1_to_4i32(<4 x i1> *%ptr) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_sext_4i1_to_4i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movzbl (%rdi), %eax
; SSE41-NEXT: movq %rax, %rcx
; SSE41-NEXT: shlq $62, %rcx
@@ -1392,7 +1392,7 @@ define <4 x i32> @load_sext_4i1_to_4i32(<4 x i1> *%ptr) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: load_sext_4i1_to_4i32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: movzbl (%rdi), %eax
; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: shlq $62, %rcx
@@ -1412,7 +1412,7 @@ define <4 x i32> @load_sext_4i1_to_4i32(<4 x i1> *%ptr) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_sext_4i1_to_4i32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: movzbl (%rdi), %eax
; AVX2-NEXT: movq %rax, %rcx
; AVX2-NEXT: shlq $62, %rcx
@@ -1432,7 +1432,7 @@ define <4 x i32> @load_sext_4i1_to_4i32(<4 x i1> *%ptr) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: load_sext_4i1_to_4i32:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: movzbl (%rdi), %eax
; AVX512F-NEXT: kmovw %eax, %k1
; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -1442,7 +1442,7 @@ define <4 x i32> @load_sext_4i1_to_4i32(<4 x i1> *%ptr) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: load_sext_4i1_to_4i32:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: movzbl (%rdi), %eax
; AVX512BW-NEXT: kmovd %eax, %k1
; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -1452,7 +1452,7 @@ define <4 x i32> @load_sext_4i1_to_4i32(<4 x i1> *%ptr) {
; AVX512BW-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_4i1_to_4i32:
-; X32-SSE41: # BB#0: # %entry
+; X32-SSE41: # %bb.0: # %entry
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: movl (%eax), %eax
; X32-SSE41-NEXT: movl %eax, %ecx
@@ -1479,7 +1479,7 @@ entry:
define <4 x i32> @load_sext_4i8_to_4i32(<4 x i8> *%ptr) {
; SSE2-LABEL: load_sext_4i8_to_4i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
@@ -1487,7 +1487,7 @@ define <4 x i32> @load_sext_4i8_to_4i32(<4 x i8> *%ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_sext_4i8_to_4i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
@@ -1495,17 +1495,17 @@ define <4 x i32> @load_sext_4i8_to_4i32(<4 x i8> *%ptr) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_sext_4i8_to_4i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovsxbd (%rdi), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: load_sext_4i8_to_4i32:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovsxbd (%rdi), %xmm0
; AVX-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_4i8_to_4i32:
-; X32-SSE41: # BB#0: # %entry
+; X32-SSE41: # %bb.0: # %entry
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: pmovsxbd (%eax), %xmm0
; X32-SSE41-NEXT: retl
@@ -1517,7 +1517,7 @@ entry:
define <4 x i64> @load_sext_4i1_to_4i64(<4 x i1> *%ptr) {
; SSE2-LABEL: load_sext_4i1_to_4i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movl (%rdi), %eax
; SSE2-NEXT: movl %eax, %ecx
; SSE2-NEXT: shrl $3, %ecx
@@ -1543,7 +1543,7 @@ define <4 x i64> @load_sext_4i1_to_4i64(<4 x i1> *%ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_sext_4i1_to_4i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movl (%rdi), %eax
; SSSE3-NEXT: movl %eax, %ecx
; SSSE3-NEXT: shrl $3, %ecx
@@ -1569,7 +1569,7 @@ define <4 x i64> @load_sext_4i1_to_4i64(<4 x i1> *%ptr) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_sext_4i1_to_4i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movl (%rdi), %eax
; SSE41-NEXT: movl %eax, %ecx
; SSE41-NEXT: shrl %ecx
@@ -1592,7 +1592,7 @@ define <4 x i64> @load_sext_4i1_to_4i64(<4 x i1> *%ptr) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: load_sext_4i1_to_4i64:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: movzbl (%rdi), %eax
; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: shlq $62, %rcx
@@ -1616,7 +1616,7 @@ define <4 x i64> @load_sext_4i1_to_4i64(<4 x i1> *%ptr) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_sext_4i1_to_4i64:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: movzbl (%rdi), %eax
; AVX2-NEXT: movq %rax, %rcx
; AVX2-NEXT: shlq $60, %rcx
@@ -1639,7 +1639,7 @@ define <4 x i64> @load_sext_4i1_to_4i64(<4 x i1> *%ptr) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: load_sext_4i1_to_4i64:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: movzbl (%rdi), %eax
; AVX512F-NEXT: kmovw %eax, %k1
; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -1647,7 +1647,7 @@ define <4 x i64> @load_sext_4i1_to_4i64(<4 x i1> *%ptr) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: load_sext_4i1_to_4i64:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: movzbl (%rdi), %eax
; AVX512BW-NEXT: kmovd %eax, %k1
; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -1655,7 +1655,7 @@ define <4 x i64> @load_sext_4i1_to_4i64(<4 x i1> *%ptr) {
; AVX512BW-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_4i1_to_4i64:
-; X32-SSE41: # BB#0: # %entry
+; X32-SSE41: # %bb.0: # %entry
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: movzbl (%eax), %eax
; X32-SSE41-NEXT: movl %eax, %ecx
@@ -1685,7 +1685,7 @@ entry:
define <4 x i64> @load_sext_4i8_to_4i64(<4 x i8> *%ptr) {
; SSE2-LABEL: load_sext_4i8_to_4i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movsbq 1(%rdi), %rax
; SSE2-NEXT: movq %rax, %xmm1
; SSE2-NEXT: movsbq (%rdi), %rax
@@ -1699,7 +1699,7 @@ define <4 x i64> @load_sext_4i8_to_4i64(<4 x i8> *%ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_sext_4i8_to_4i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movsbq 1(%rdi), %rax
; SSSE3-NEXT: movq %rax, %xmm1
; SSSE3-NEXT: movsbq (%rdi), %rax
@@ -1713,13 +1713,13 @@ define <4 x i64> @load_sext_4i8_to_4i64(<4 x i8> *%ptr) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_sext_4i8_to_4i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovsxbq (%rdi), %xmm0
; SSE41-NEXT: pmovsxbq 2(%rdi), %xmm1
; SSE41-NEXT: retq
;
; AVX1-LABEL: load_sext_4i8_to_4i64:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovsxbd (%rdi), %xmm0
; AVX1-NEXT: vpmovsxdq %xmm0, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -1728,17 +1728,17 @@ define <4 x i64> @load_sext_4i8_to_4i64(<4 x i8> *%ptr) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_sext_4i8_to_4i64:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovsxbq (%rdi), %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_sext_4i8_to_4i64:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovsxbq (%rdi), %ymm0
; AVX512-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_4i8_to_4i64:
-; X32-SSE41: # BB#0: # %entry
+; X32-SSE41: # %bb.0: # %entry
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: pmovsxbq (%eax), %xmm0
; X32-SSE41-NEXT: pmovsxbq 2(%eax), %xmm1
@@ -1751,7 +1751,7 @@ entry:
define <2 x i64> @load_sext_4i8_to_4i64_extract(<4 x i8> *%ptr) {
; SSE2-LABEL: load_sext_4i8_to_4i64_extract:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movsbq 3(%rdi), %rax
; SSE2-NEXT: movq %rax, %xmm1
; SSE2-NEXT: movsbq 2(%rdi), %rax
@@ -1760,7 +1760,7 @@ define <2 x i64> @load_sext_4i8_to_4i64_extract(<4 x i8> *%ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_sext_4i8_to_4i64_extract:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movsbq 3(%rdi), %rax
; SSSE3-NEXT: movq %rax, %xmm1
; SSSE3-NEXT: movsbq 2(%rdi), %rax
@@ -1769,33 +1769,33 @@ define <2 x i64> @load_sext_4i8_to_4i64_extract(<4 x i8> *%ptr) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_sext_4i8_to_4i64_extract:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovsxbq 2(%rdi), %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: load_sext_4i8_to_4i64_extract:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovsxbd (%rdi), %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_sext_4i8_to_4i64_extract:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovsxbq (%rdi), %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_sext_4i8_to_4i64_extract:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovsxbq (%rdi), %ymm0
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_4i8_to_4i64_extract:
-; X32-SSE41: # BB#0:
+; X32-SSE41: # %bb.0:
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: pmovsxbq 2(%eax), %xmm0
; X32-SSE41-NEXT: retl
@@ -1807,7 +1807,7 @@ define <2 x i64> @load_sext_4i8_to_4i64_extract(<4 x i8> *%ptr) {
define <8 x i16> @load_sext_8i1_to_8i16(<8 x i1> *%ptr) {
; SSE2-LABEL: load_sext_8i1_to_8i16:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movsbq (%rdi), %rax
; SSE2-NEXT: movq %rax, %rcx
; SSE2-NEXT: shrq $7, %rcx
@@ -1849,7 +1849,7 @@ define <8 x i16> @load_sext_8i1_to_8i16(<8 x i1> *%ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_sext_8i1_to_8i16:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movsbq (%rdi), %rax
; SSSE3-NEXT: movq %rax, %rcx
; SSSE3-NEXT: shrq $7, %rcx
@@ -1891,7 +1891,7 @@ define <8 x i16> @load_sext_8i1_to_8i16(<8 x i1> *%ptr) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_sext_8i1_to_8i16:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movsbq (%rdi), %rax
; SSE41-NEXT: movq %rax, %rcx
; SSE41-NEXT: shlq $62, %rcx
@@ -1926,7 +1926,7 @@ define <8 x i16> @load_sext_8i1_to_8i16(<8 x i1> *%ptr) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: load_sext_8i1_to_8i16:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: movsbq (%rdi), %rax
; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: shlq $62, %rcx
@@ -1961,7 +1961,7 @@ define <8 x i16> @load_sext_8i1_to_8i16(<8 x i1> *%ptr) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_sext_8i1_to_8i16:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: movsbq (%rdi), %rax
; AVX2-NEXT: movq %rax, %rcx
; AVX2-NEXT: shlq $62, %rcx
@@ -1996,7 +1996,7 @@ define <8 x i16> @load_sext_8i1_to_8i16(<8 x i1> *%ptr) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: load_sext_8i1_to_8i16:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: movzbl (%rdi), %eax
; AVX512F-NEXT: kmovw %eax, %k1
; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -2005,7 +2005,7 @@ define <8 x i16> @load_sext_8i1_to_8i16(<8 x i1> *%ptr) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: load_sext_8i1_to_8i16:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: movzbl (%rdi), %eax
; AVX512BW-NEXT: kmovd %eax, %k0
; AVX512BW-NEXT: vpmovm2w %k0, %zmm0
@@ -2014,7 +2014,7 @@ define <8 x i16> @load_sext_8i1_to_8i16(<8 x i1> *%ptr) {
; AVX512BW-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_8i1_to_8i16:
-; X32-SSE41: # BB#0: # %entry
+; X32-SSE41: # %bb.0: # %entry
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: movsbl (%eax), %eax
; X32-SSE41-NEXT: movl %eax, %ecx
@@ -2056,31 +2056,31 @@ entry:
define <8 x i16> @load_sext_8i8_to_8i16(<8 x i8> *%ptr) {
; SSE2-LABEL: load_sext_8i8_to_8i16:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: psraw $8, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_sext_8i8_to_8i16:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSSE3-NEXT: psraw $8, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_sext_8i8_to_8i16:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovsxbw (%rdi), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: load_sext_8i8_to_8i16:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovsxbw (%rdi), %xmm0
; AVX-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_8i8_to_8i16:
-; X32-SSE41: # BB#0: # %entry
+; X32-SSE41: # %bb.0: # %entry
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: pmovsxbw (%eax), %xmm0
; X32-SSE41-NEXT: retl
@@ -2092,7 +2092,7 @@ entry:
define <8 x i64> @load_sext_8i8_to_8i64(<8 x i8> *%ptr) {
; SSE2-LABEL: load_sext_8i8_to_8i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movsbq 1(%rdi), %rax
; SSE2-NEXT: movq %rax, %xmm1
; SSE2-NEXT: movsbq (%rdi), %rax
@@ -2116,7 +2116,7 @@ define <8 x i64> @load_sext_8i8_to_8i64(<8 x i8> *%ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_sext_8i8_to_8i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movsbq 1(%rdi), %rax
; SSSE3-NEXT: movq %rax, %xmm1
; SSSE3-NEXT: movsbq (%rdi), %rax
@@ -2140,7 +2140,7 @@ define <8 x i64> @load_sext_8i8_to_8i64(<8 x i8> *%ptr) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_sext_8i8_to_8i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovsxbq (%rdi), %xmm0
; SSE41-NEXT: pmovsxbq 2(%rdi), %xmm1
; SSE41-NEXT: pmovsxbq 4(%rdi), %xmm2
@@ -2148,7 +2148,7 @@ define <8 x i64> @load_sext_8i8_to_8i64(<8 x i8> *%ptr) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: load_sext_8i8_to_8i64:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovsxbd (%rdi), %xmm0
; AVX1-NEXT: vpmovsxdq %xmm0, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -2162,18 +2162,18 @@ define <8 x i64> @load_sext_8i8_to_8i64(<8 x i8> *%ptr) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_sext_8i8_to_8i64:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovsxbq (%rdi), %ymm0
; AVX2-NEXT: vpmovsxbq 4(%rdi), %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_sext_8i8_to_8i64:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovsxbq (%rdi), %zmm0
; AVX512-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_8i8_to_8i64:
-; X32-SSE41: # BB#0: # %entry
+; X32-SSE41: # %bb.0: # %entry
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: pmovsxbq (%eax), %xmm0
; X32-SSE41-NEXT: pmovsxbq 2(%eax), %xmm1
@@ -2188,7 +2188,7 @@ entry:
define <8 x i32> @load_sext_8i1_to_8i32(<8 x i1> *%ptr) {
; SSE2-LABEL: load_sext_8i1_to_8i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movzbl (%rdi), %eax
; SSE2-NEXT: movl %eax, %ecx
; SSE2-NEXT: shrl $3, %ecx
@@ -2237,7 +2237,7 @@ define <8 x i32> @load_sext_8i1_to_8i32(<8 x i1> *%ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_sext_8i1_to_8i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movzbl (%rdi), %eax
; SSSE3-NEXT: movl %eax, %ecx
; SSSE3-NEXT: shrl $3, %ecx
@@ -2286,7 +2286,7 @@ define <8 x i32> @load_sext_8i1_to_8i32(<8 x i1> *%ptr) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_sext_8i1_to_8i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movzbl (%rdi), %eax
; SSE41-NEXT: movl %eax, %ecx
; SSE41-NEXT: shrl %ecx
@@ -2327,7 +2327,7 @@ define <8 x i32> @load_sext_8i1_to_8i32(<8 x i1> *%ptr) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: load_sext_8i1_to_8i32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: movsbq (%rdi), %rax
; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: shlq $58, %rcx
@@ -2363,7 +2363,7 @@ define <8 x i32> @load_sext_8i1_to_8i32(<8 x i1> *%ptr) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_sext_8i1_to_8i32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: movsbq (%rdi), %rax
; AVX2-NEXT: movq %rax, %rcx
; AVX2-NEXT: shlq $58, %rcx
@@ -2399,7 +2399,7 @@ define <8 x i32> @load_sext_8i1_to_8i32(<8 x i1> *%ptr) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: load_sext_8i1_to_8i32:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: movzbl (%rdi), %eax
; AVX512F-NEXT: kmovw %eax, %k1
; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -2407,7 +2407,7 @@ define <8 x i32> @load_sext_8i1_to_8i32(<8 x i1> *%ptr) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: load_sext_8i1_to_8i32:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: movzbl (%rdi), %eax
; AVX512BW-NEXT: kmovd %eax, %k1
; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -2415,7 +2415,7 @@ define <8 x i32> @load_sext_8i1_to_8i32(<8 x i1> *%ptr) {
; AVX512BW-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_8i1_to_8i32:
-; X32-SSE41: # BB#0: # %entry
+; X32-SSE41: # %bb.0: # %entry
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: movzbl (%eax), %eax
; X32-SSE41-NEXT: movl %eax, %ecx
@@ -2462,7 +2462,7 @@ entry:
define <8 x i32> @load_sext_8i8_to_8i32(<8 x i8> *%ptr) {
; SSE2-LABEL: load_sext_8i8_to_8i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
@@ -2474,7 +2474,7 @@ define <8 x i32> @load_sext_8i8_to_8i32(<8 x i8> *%ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_sext_8i8_to_8i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
@@ -2486,13 +2486,13 @@ define <8 x i32> @load_sext_8i8_to_8i32(<8 x i8> *%ptr) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_sext_8i8_to_8i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovsxbd (%rdi), %xmm0
; SSE41-NEXT: pmovsxbd 4(%rdi), %xmm1
; SSE41-NEXT: retq
;
; AVX1-LABEL: load_sext_8i8_to_8i32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovsxbw (%rdi), %xmm0
; AVX1-NEXT: vpmovsxwd %xmm0, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -2501,17 +2501,17 @@ define <8 x i32> @load_sext_8i8_to_8i32(<8 x i8> *%ptr) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_sext_8i8_to_8i32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovsxbd (%rdi), %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_sext_8i8_to_8i32:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovsxbd (%rdi), %ymm0
; AVX512-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_8i8_to_8i32:
-; X32-SSE41: # BB#0: # %entry
+; X32-SSE41: # %bb.0: # %entry
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: pmovsxbd (%eax), %xmm0
; X32-SSE41-NEXT: pmovsxbd 4(%eax), %xmm1
@@ -2524,7 +2524,7 @@ entry:
define <16 x i8> @load_sext_16i1_to_16i8(<16 x i1> *%ptr) nounwind readnone {
; SSE2-LABEL: load_sext_16i1_to_16i8:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pushq %rbp
; SSE2-NEXT: pushq %r15
; SSE2-NEXT: pushq %r14
@@ -2618,7 +2618,7 @@ define <16 x i8> @load_sext_16i1_to_16i8(<16 x i1> *%ptr) nounwind readnone {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_sext_16i1_to_16i8:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: pushq %rbp
; SSSE3-NEXT: pushq %r15
; SSSE3-NEXT: pushq %r14
@@ -2712,7 +2712,7 @@ define <16 x i8> @load_sext_16i1_to_16i8(<16 x i1> *%ptr) nounwind readnone {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_sext_16i1_to_16i8:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movswq (%rdi), %rax
; SSE41-NEXT: movq %rax, %rcx
; SSE41-NEXT: shlq $62, %rcx
@@ -2778,7 +2778,7 @@ define <16 x i8> @load_sext_16i1_to_16i8(<16 x i1> *%ptr) nounwind readnone {
; SSE41-NEXT: retq
;
; AVX1-LABEL: load_sext_16i1_to_16i8:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: movswq (%rdi), %rax
; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: shlq $62, %rcx
@@ -2844,7 +2844,7 @@ define <16 x i8> @load_sext_16i1_to_16i8(<16 x i1> *%ptr) nounwind readnone {
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_sext_16i1_to_16i8:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: movswq (%rdi), %rax
; AVX2-NEXT: movq %rax, %rcx
; AVX2-NEXT: shlq $62, %rcx
@@ -2910,7 +2910,7 @@ define <16 x i8> @load_sext_16i1_to_16i8(<16 x i1> *%ptr) nounwind readnone {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: load_sext_16i1_to_16i8:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: kmovw (%rdi), %k1
; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
@@ -2918,7 +2918,7 @@ define <16 x i8> @load_sext_16i1_to_16i8(<16 x i1> *%ptr) nounwind readnone {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: load_sext_16i1_to_16i8:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: kmovw (%rdi), %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -2926,7 +2926,7 @@ define <16 x i8> @load_sext_16i1_to_16i8(<16 x i1> *%ptr) nounwind readnone {
; AVX512BW-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_16i1_to_16i8:
-; X32-SSE41: # BB#0: # %entry
+; X32-SSE41: # %bb.0: # %entry
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: movswl (%eax), %eax
; X32-SSE41-NEXT: movl %eax, %ecx
@@ -2999,7 +2999,7 @@ entry:
define <16 x i16> @load_sext_16i1_to_16i16(<16 x i1> *%ptr) {
; SSE2-LABEL: load_sext_16i1_to_16i16:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movzwl (%rdi), %eax
; SSE2-NEXT: movl %eax, %ecx
; SSE2-NEXT: shrl $7, %ecx
@@ -3088,7 +3088,7 @@ define <16 x i16> @load_sext_16i1_to_16i16(<16 x i1> *%ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_sext_16i1_to_16i16:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movzwl (%rdi), %eax
; SSSE3-NEXT: movl %eax, %ecx
; SSSE3-NEXT: shrl $7, %ecx
@@ -3177,7 +3177,7 @@ define <16 x i16> @load_sext_16i1_to_16i16(<16 x i1> *%ptr) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_sext_16i1_to_16i16:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movzwl (%rdi), %eax
; SSE41-NEXT: movl %eax, %ecx
; SSE41-NEXT: shrl %ecx
@@ -3250,7 +3250,7 @@ define <16 x i16> @load_sext_16i1_to_16i16(<16 x i1> *%ptr) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: load_sext_16i1_to_16i16:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: pushq %rbp
; AVX1-NEXT: .cfi_def_cfa_offset 16
; AVX1-NEXT: pushq %r15
@@ -3341,7 +3341,7 @@ define <16 x i16> @load_sext_16i1_to_16i16(<16 x i1> *%ptr) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_sext_16i1_to_16i16:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: pushq %rbp
; AVX2-NEXT: .cfi_def_cfa_offset 16
; AVX2-NEXT: pushq %r15
@@ -3432,21 +3432,21 @@ define <16 x i16> @load_sext_16i1_to_16i16(<16 x i1> *%ptr) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: load_sext_16i1_to_16i16:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: kmovw (%rdi), %k1
; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: load_sext_16i1_to_16i16:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: kmovw (%rdi), %k0
; AVX512BW-NEXT: vpmovm2w %k0, %zmm0
; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512BW-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_16i1_to_16i16:
-; X32-SSE41: # BB#0: # %entry
+; X32-SSE41: # %bb.0: # %entry
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: movzwl (%eax), %eax
; X32-SSE41-NEXT: movl %eax, %ecx
@@ -3525,7 +3525,7 @@ entry:
define <32 x i8> @load_sext_32i1_to_32i8(<32 x i1> *%ptr) nounwind readnone {
; SSE2-LABEL: load_sext_32i1_to_32i8:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pushq %rbp
; SSE2-NEXT: pushq %r15
; SSE2-NEXT: pushq %r14
@@ -3697,7 +3697,7 @@ define <32 x i8> @load_sext_32i1_to_32i8(<32 x i1> *%ptr) nounwind readnone {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_sext_32i1_to_32i8:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: pushq %rbp
; SSSE3-NEXT: pushq %r15
; SSSE3-NEXT: pushq %r14
@@ -3869,7 +3869,7 @@ define <32 x i8> @load_sext_32i1_to_32i8(<32 x i1> *%ptr) nounwind readnone {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_sext_32i1_to_32i8:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movswq (%rdi), %rax
; SSE41-NEXT: movq %rax, %rcx
; SSE41-NEXT: shlq $62, %rcx
@@ -3997,7 +3997,7 @@ define <32 x i8> @load_sext_32i1_to_32i8(<32 x i1> *%ptr) nounwind readnone {
; SSE41-NEXT: retq
;
; AVX1-LABEL: load_sext_32i1_to_32i8:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: pushq %rbp
; AVX1-NEXT: pushq %r15
; AVX1-NEXT: pushq %r14
@@ -4140,7 +4140,7 @@ define <32 x i8> @load_sext_32i1_to_32i8(<32 x i1> *%ptr) nounwind readnone {
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_sext_32i1_to_32i8:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: pushq %rbp
; AVX2-NEXT: pushq %r15
; AVX2-NEXT: pushq %r14
@@ -4283,7 +4283,7 @@ define <32 x i8> @load_sext_32i1_to_32i8(<32 x i1> *%ptr) nounwind readnone {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: load_sext_32i1_to_32i8:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: kmovw (%rdi), %k1
; AVX512F-NEXT: kmovw 2(%rdi), %k2
; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -4294,14 +4294,14 @@ define <32 x i8> @load_sext_32i1_to_32i8(<32 x i1> *%ptr) nounwind readnone {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: load_sext_32i1_to_32i8:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: kmovd (%rdi), %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512BW-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_32i1_to_32i8:
-; X32-SSE41: # BB#0: # %entry
+; X32-SSE41: # %bb.0: # %entry
; X32-SSE41-NEXT: pushl %esi
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: movswl (%eax), %ecx
@@ -4438,7 +4438,7 @@ entry:
define <16 x i16> @load_sext_16i8_to_16i16(<16 x i8> *%ptr) {
; SSE2-LABEL: load_sext_16i8_to_16i16:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: psraw $8, %xmm0
@@ -4448,7 +4448,7 @@ define <16 x i16> @load_sext_16i8_to_16i16(<16 x i8> *%ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_sext_16i8_to_16i16:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSSE3-NEXT: psraw $8, %xmm0
@@ -4458,30 +4458,30 @@ define <16 x i16> @load_sext_16i8_to_16i16(<16 x i8> *%ptr) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_sext_16i8_to_16i16:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovsxbw (%rdi), %xmm0
; SSE41-NEXT: pmovsxbw 8(%rdi), %xmm1
; SSE41-NEXT: retq
;
; AVX1-LABEL: load_sext_16i8_to_16i16:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovsxbw (%rdi), %xmm0
; AVX1-NEXT: vpmovsxbw 8(%rdi), %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_sext_16i8_to_16i16:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovsxbw (%rdi), %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_sext_16i8_to_16i16:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovsxbw (%rdi), %ymm0
; AVX512-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_16i8_to_16i16:
-; X32-SSE41: # BB#0: # %entry
+; X32-SSE41: # %bb.0: # %entry
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: pmovsxbw (%eax), %xmm0
; X32-SSE41-NEXT: pmovsxbw 8(%eax), %xmm1
@@ -4494,7 +4494,7 @@ entry:
define <2 x i64> @load_sext_2i16_to_2i64(<2 x i16> *%ptr) {
; SSE2-LABEL: load_sext_2i16_to_2i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,1,4,5,6,7]
; SSE2-NEXT: movdqa %xmm0, %xmm1
@@ -4504,7 +4504,7 @@ define <2 x i64> @load_sext_2i16_to_2i64(<2 x i16> *%ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_sext_2i16_to_2i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,1,4,5,6,7]
; SSSE3-NEXT: movdqa %xmm0, %xmm1
@@ -4514,17 +4514,17 @@ define <2 x i64> @load_sext_2i16_to_2i64(<2 x i16> *%ptr) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_sext_2i16_to_2i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovsxwq (%rdi), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: load_sext_2i16_to_2i64:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovsxwq (%rdi), %xmm0
; AVX-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_2i16_to_2i64:
-; X32-SSE41: # BB#0: # %entry
+; X32-SSE41: # %bb.0: # %entry
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: pmovsxwq (%eax), %xmm0
; X32-SSE41-NEXT: retl
@@ -4536,31 +4536,31 @@ entry:
define <4 x i32> @load_sext_4i16_to_4i32(<4 x i16> *%ptr) {
; SSE2-LABEL: load_sext_4i16_to_4i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE2-NEXT: psrad $16, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_sext_4i16_to_4i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSSE3-NEXT: psrad $16, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_sext_4i16_to_4i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovsxwd (%rdi), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: load_sext_4i16_to_4i32:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovsxwd (%rdi), %xmm0
; AVX-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_4i16_to_4i32:
-; X32-SSE41: # BB#0: # %entry
+; X32-SSE41: # %bb.0: # %entry
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: pmovsxwd (%eax), %xmm0
; X32-SSE41-NEXT: retl
@@ -4572,7 +4572,7 @@ entry:
define <4 x i64> @load_sext_4i16_to_4i64(<4 x i16> *%ptr) {
; SSE2-LABEL: load_sext_4i16_to_4i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movswq 2(%rdi), %rax
; SSE2-NEXT: movq %rax, %xmm1
; SSE2-NEXT: movswq (%rdi), %rax
@@ -4586,7 +4586,7 @@ define <4 x i64> @load_sext_4i16_to_4i64(<4 x i16> *%ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_sext_4i16_to_4i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movswq 2(%rdi), %rax
; SSSE3-NEXT: movq %rax, %xmm1
; SSSE3-NEXT: movswq (%rdi), %rax
@@ -4600,13 +4600,13 @@ define <4 x i64> @load_sext_4i16_to_4i64(<4 x i16> *%ptr) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_sext_4i16_to_4i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovsxwq (%rdi), %xmm0
; SSE41-NEXT: pmovsxwq 4(%rdi), %xmm1
; SSE41-NEXT: retq
;
; AVX1-LABEL: load_sext_4i16_to_4i64:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovsxwd (%rdi), %xmm0
; AVX1-NEXT: vpmovsxdq %xmm0, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -4615,17 +4615,17 @@ define <4 x i64> @load_sext_4i16_to_4i64(<4 x i16> *%ptr) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_sext_4i16_to_4i64:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovsxwq (%rdi), %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_sext_4i16_to_4i64:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovsxwq (%rdi), %ymm0
; AVX512-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_4i16_to_4i64:
-; X32-SSE41: # BB#0: # %entry
+; X32-SSE41: # %bb.0: # %entry
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: pmovsxwq (%eax), %xmm0
; X32-SSE41-NEXT: pmovsxwq 4(%eax), %xmm1
@@ -4638,7 +4638,7 @@ entry:
define <8 x i32> @load_sext_8i16_to_8i32(<8 x i16> *%ptr) {
; SSE2-LABEL: load_sext_8i16_to_8i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE2-NEXT: psrad $16, %xmm0
@@ -4648,7 +4648,7 @@ define <8 x i32> @load_sext_8i16_to_8i32(<8 x i16> *%ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_sext_8i16_to_8i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSSE3-NEXT: psrad $16, %xmm0
@@ -4658,30 +4658,30 @@ define <8 x i32> @load_sext_8i16_to_8i32(<8 x i16> *%ptr) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_sext_8i16_to_8i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovsxwd (%rdi), %xmm0
; SSE41-NEXT: pmovsxwd 8(%rdi), %xmm1
; SSE41-NEXT: retq
;
; AVX1-LABEL: load_sext_8i16_to_8i32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovsxwd (%rdi), %xmm0
; AVX1-NEXT: vpmovsxwd 8(%rdi), %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_sext_8i16_to_8i32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovsxwd (%rdi), %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_sext_8i16_to_8i32:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovsxwd (%rdi), %ymm0
; AVX512-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_8i16_to_8i32:
-; X32-SSE41: # BB#0: # %entry
+; X32-SSE41: # %bb.0: # %entry
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: pmovsxwd (%eax), %xmm0
; X32-SSE41-NEXT: pmovsxwd 8(%eax), %xmm1
@@ -4694,7 +4694,7 @@ entry:
define <2 x i64> @load_sext_2i32_to_2i64(<2 x i32> *%ptr) {
; SSE2-LABEL: load_sext_2i32_to_2i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrad $31, %xmm1
@@ -4702,7 +4702,7 @@ define <2 x i64> @load_sext_2i32_to_2i64(<2 x i32> *%ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_sext_2i32_to_2i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: psrad $31, %xmm1
@@ -4710,17 +4710,17 @@ define <2 x i64> @load_sext_2i32_to_2i64(<2 x i32> *%ptr) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_sext_2i32_to_2i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovsxdq (%rdi), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: load_sext_2i32_to_2i64:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovsxdq (%rdi), %xmm0
; AVX-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_2i32_to_2i64:
-; X32-SSE41: # BB#0: # %entry
+; X32-SSE41: # %bb.0: # %entry
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: pmovsxdq (%eax), %xmm0
; X32-SSE41-NEXT: retl
@@ -4732,7 +4732,7 @@ entry:
define <4 x i64> @load_sext_4i32_to_4i64(<4 x i32> *%ptr) {
; SSE2-LABEL: load_sext_4i32_to_4i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa (%rdi), %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: psrad $31, %xmm2
@@ -4744,7 +4744,7 @@ define <4 x i64> @load_sext_4i32_to_4i64(<4 x i32> *%ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_sext_4i32_to_4i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa (%rdi), %xmm0
; SSSE3-NEXT: movdqa %xmm0, %xmm2
; SSSE3-NEXT: psrad $31, %xmm2
@@ -4756,30 +4756,30 @@ define <4 x i64> @load_sext_4i32_to_4i64(<4 x i32> *%ptr) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_sext_4i32_to_4i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovsxdq (%rdi), %xmm0
; SSE41-NEXT: pmovsxdq 8(%rdi), %xmm1
; SSE41-NEXT: retq
;
; AVX1-LABEL: load_sext_4i32_to_4i64:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovsxdq (%rdi), %xmm0
; AVX1-NEXT: vpmovsxdq 8(%rdi), %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_sext_4i32_to_4i64:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovsxdq (%rdi), %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_sext_4i32_to_4i64:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovsxdq (%rdi), %ymm0
; AVX512-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_4i32_to_4i64:
-; X32-SSE41: # BB#0: # %entry
+; X32-SSE41: # %bb.0: # %entry
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: pmovsxdq (%eax), %xmm0
; X32-SSE41-NEXT: pmovsxdq 8(%eax), %xmm1
@@ -4792,33 +4792,33 @@ entry:
define i32 @sext_2i8_to_i32(<16 x i8> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: sext_2i8_to_i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: psraw $8, %xmm0
; SSE2-NEXT: movd %xmm0, %eax
; SSE2-NEXT: retq
;
; SSSE3-LABEL: sext_2i8_to_i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSSE3-NEXT: psraw $8, %xmm0
; SSSE3-NEXT: movd %xmm0, %eax
; SSSE3-NEXT: retq
;
; SSE41-LABEL: sext_2i8_to_i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovsxbw %xmm0, %xmm0
; SSE41-NEXT: movd %xmm0, %eax
; SSE41-NEXT: retq
;
; AVX-LABEL: sext_2i8_to_i32:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovsxbw %xmm0, %xmm0
; AVX-NEXT: vmovd %xmm0, %eax
; AVX-NEXT: retq
;
; X32-SSE41-LABEL: sext_2i8_to_i32:
-; X32-SSE41: # BB#0: # %entry
+; X32-SSE41: # %bb.0: # %entry
; X32-SSE41-NEXT: pushl %eax
; X32-SSE41-NEXT: .cfi_def_cfa_offset 8
; X32-SSE41-NEXT: pmovsxbw %xmm0, %xmm0
@@ -4834,7 +4834,7 @@ entry:
define <4 x i64> @sext_4i1_to_4i64(<4 x i1> %mask) {
; SSE2-LABEL: sext_4i1_to_4i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pslld $31, %xmm0
; SSE2-NEXT: psrad $31, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm2
@@ -4847,7 +4847,7 @@ define <4 x i64> @sext_4i1_to_4i64(<4 x i1> %mask) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: sext_4i1_to_4i64:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pslld $31, %xmm0
; SSSE3-NEXT: psrad $31, %xmm0
; SSSE3-NEXT: movdqa %xmm0, %xmm2
@@ -4860,7 +4860,7 @@ define <4 x i64> @sext_4i1_to_4i64(<4 x i1> %mask) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: sext_4i1_to_4i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pslld $31, %xmm0
; SSE41-NEXT: psrad $31, %xmm0
; SSE41-NEXT: pmovsxdq %xmm0, %xmm2
@@ -4870,7 +4870,7 @@ define <4 x i64> @sext_4i1_to_4i64(<4 x i1> %mask) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: sext_4i1_to_4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpslld $31, %xmm0, %xmm0
; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0
; AVX1-NEXT: vpmovsxdq %xmm0, %xmm1
@@ -4880,21 +4880,21 @@ define <4 x i64> @sext_4i1_to_4i64(<4 x i1> %mask) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: sext_4i1_to_4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpslld $31, %xmm0, %xmm0
; AVX2-NEXT: vpsrad $31, %xmm0, %xmm0
; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: sext_4i1_to_4i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpslld $31, %xmm0, %xmm0
; AVX512-NEXT: vpsrad $31, %xmm0, %xmm0
; AVX512-NEXT: vpmovsxdq %xmm0, %ymm0
; AVX512-NEXT: retq
;
; X32-SSE41-LABEL: sext_4i1_to_4i64:
-; X32-SSE41: # BB#0:
+; X32-SSE41: # %bb.0:
; X32-SSE41-NEXT: pslld $31, %xmm0
; X32-SSE41-NEXT: psrad $31, %xmm0
; X32-SSE41-NEXT: pmovsxdq %xmm0, %xmm2
@@ -4908,7 +4908,7 @@ define <4 x i64> @sext_4i1_to_4i64(<4 x i1> %mask) {
define <4 x i64> @sext_4i8_to_4i64(<4 x i8> %mask) {
; SSE2-LABEL: sext_4i8_to_4i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pslld $24, %xmm0
; SSE2-NEXT: psrad $24, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm2
@@ -4921,7 +4921,7 @@ define <4 x i64> @sext_4i8_to_4i64(<4 x i8> %mask) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: sext_4i8_to_4i64:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pslld $24, %xmm0
; SSSE3-NEXT: psrad $24, %xmm0
; SSSE3-NEXT: movdqa %xmm0, %xmm2
@@ -4934,7 +4934,7 @@ define <4 x i64> @sext_4i8_to_4i64(<4 x i8> %mask) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: sext_4i8_to_4i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pslld $24, %xmm0
; SSE41-NEXT: psrad $24, %xmm0
; SSE41-NEXT: pmovsxdq %xmm0, %xmm2
@@ -4944,7 +4944,7 @@ define <4 x i64> @sext_4i8_to_4i64(<4 x i8> %mask) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: sext_4i8_to_4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpslld $24, %xmm0, %xmm0
; AVX1-NEXT: vpsrad $24, %xmm0, %xmm0
; AVX1-NEXT: vpmovsxdq %xmm0, %xmm1
@@ -4954,21 +4954,21 @@ define <4 x i64> @sext_4i8_to_4i64(<4 x i8> %mask) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: sext_4i8_to_4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpslld $24, %xmm0, %xmm0
; AVX2-NEXT: vpsrad $24, %xmm0, %xmm0
; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: sext_4i8_to_4i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpslld $24, %xmm0, %xmm0
; AVX512-NEXT: vpsrad $24, %xmm0, %xmm0
; AVX512-NEXT: vpmovsxdq %xmm0, %ymm0
; AVX512-NEXT: retq
;
; X32-SSE41-LABEL: sext_4i8_to_4i64:
-; X32-SSE41: # BB#0:
+; X32-SSE41: # %bb.0:
; X32-SSE41-NEXT: pslld $24, %xmm0
; X32-SSE41-NEXT: psrad $24, %xmm0
; X32-SSE41-NEXT: pmovsxdq %xmm0, %xmm2
@@ -4982,7 +4982,7 @@ define <4 x i64> @sext_4i8_to_4i64(<4 x i8> %mask) {
define <32 x i8> @sext_32xi1_to_32xi8(<32 x i16> %c1, <32 x i16> %c2)nounwind {
; SSE-LABEL: sext_32xi1_to_32xi8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqw %xmm5, %xmm1
; SSE-NEXT: pcmpeqw %xmm4, %xmm0
; SSE-NEXT: packsswb %xmm1, %xmm0
@@ -4993,7 +4993,7 @@ define <32 x i8> @sext_32xi1_to_32xi8(<32 x i16> %c1, <32 x i16> %c2)nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: sext_32xi1_to_32xi8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX1-NEXT: vpcmpeqw %xmm4, %xmm5, %xmm4
@@ -5008,7 +5008,7 @@ define <32 x i8> @sext_32xi1_to_32xi8(<32 x i16> %c1, <32 x i16> %c2)nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: sext_32xi1_to_32xi8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpeqw %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vpcmpeqw %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
@@ -5016,7 +5016,7 @@ define <32 x i8> @sext_32xi1_to_32xi8(<32 x i16> %c1, <32 x i16> %c2)nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: sext_32xi1_to_32xi8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpeqw %ymm2, %ymm0, %ymm0
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
@@ -5027,14 +5027,14 @@ define <32 x i8> @sext_32xi1_to_32xi8(<32 x i16> %c1, <32 x i16> %c2)nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: sext_32xi1_to_32xi8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpeqw %zmm1, %zmm0, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512BW-NEXT: retq
;
; X32-SSE41-LABEL: sext_32xi1_to_32xi8:
-; X32-SSE41: # BB#0:
+; X32-SSE41: # %bb.0:
; X32-SSE41-NEXT: pushl %ebp
; X32-SSE41-NEXT: movl %esp, %ebp
; X32-SSE41-NEXT: andl $-16, %esp
@@ -5057,7 +5057,7 @@ define <32 x i8> @sext_32xi1_to_32xi8(<32 x i16> %c1, <32 x i16> %c2)nounwind {
define <2 x i32> @sext_2i8_to_2i32(<2 x i8>* %addr) {
; SSE2-LABEL: sext_2i8_to_2i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movzwl (%rdi), %eax
; SSE2-NEXT: movd %eax, %xmm0
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -5068,7 +5068,7 @@ define <2 x i32> @sext_2i8_to_2i32(<2 x i8>* %addr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: sext_2i8_to_2i32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movzwl (%rdi), %eax
; SSSE3-NEXT: movd %eax, %xmm0
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -5079,19 +5079,19 @@ define <2 x i32> @sext_2i8_to_2i32(<2 x i8>* %addr) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: sext_2i8_to_2i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovsxbq (%rdi), %xmm0
; SSE41-NEXT: paddq %xmm0, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: sext_2i8_to_2i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovsxbq (%rdi), %xmm0
; AVX-NEXT: vpaddq %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
;
; X32-SSE41-LABEL: sext_2i8_to_2i32:
-; X32-SSE41: # BB#0:
+; X32-SSE41: # %bb.0:
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: pmovsxbq (%eax), %xmm0
; X32-SSE41-NEXT: paddq %xmm0, %xmm0
diff --git a/test/CodeGen/X86/vector-shift-ashr-128.ll b/test/CodeGen/X86/vector-shift-ashr-128.ll
index 3cfe0003807..a37b8602459 100644
--- a/test/CodeGen/X86/vector-shift-ashr-128.ll
+++ b/test/CodeGen/X86/vector-shift-ashr-128.ll
@@ -19,7 +19,7 @@
define <2 x i64> @var_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-LABEL: var_shift_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; SSE2-NEXT: movdqa %xmm2, %xmm3
; SSE2-NEXT: psrlq %xmm1, %xmm3
@@ -35,7 +35,7 @@ define <2 x i64> @var_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: var_shift_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; SSE41-NEXT: movdqa %xmm2, %xmm3
; SSE41-NEXT: psrlq %xmm1, %xmm3
@@ -51,7 +51,7 @@ define <2 x i64> @var_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: var_shift_v2i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpsrlq %xmm1, %xmm2, %xmm3
; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
@@ -65,7 +65,7 @@ define <2 x i64> @var_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shift_v2i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX2-NEXT: vpsrlvq %xmm1, %xmm2, %xmm3
; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0
@@ -74,14 +74,14 @@ define <2 x i64> @var_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; AVX2-NEXT: retq
;
; XOP-LABEL: var_shift_v2i64:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
; XOP-NEXT: vpsubq %xmm1, %xmm2, %xmm1
; XOP-NEXT: vpshaq %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512-LABEL: var_shift_v2i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0
@@ -90,12 +90,12 @@ define <2 x i64> @var_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: var_shift_v2i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsravq %xmm1, %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; X32-SSE-LABEL: var_shift_v2i64:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [0,2147483648,0,2147483648]
; X32-SSE-NEXT: movdqa %xmm2, %xmm3
; X32-SSE-NEXT: psrlq %xmm1, %xmm3
@@ -115,7 +115,7 @@ define <2 x i64> @var_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
define <4 x i32> @var_shift_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE2-LABEL: var_shift_v4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: psrlq $32, %xmm2
; SSE2-NEXT: movdqa %xmm0, %xmm3
@@ -139,7 +139,7 @@ define <4 x i32> @var_shift_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: var_shift_v4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm1, %xmm2
; SSE41-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; SSE41-NEXT: movdqa %xmm0, %xmm3
@@ -160,7 +160,7 @@ define <4 x i32> @var_shift_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: var_shift_v4i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsrldq {{.*#+}} xmm2 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX1-NEXT: vpsrad %xmm2, %xmm0, %xmm2
; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm3
@@ -176,34 +176,34 @@ define <4 x i32> @var_shift_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shift_v4i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsravd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: var_shift_v4i32:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; XOPAVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm1
; XOPAVX1-NEXT: vpshad %xmm1, %xmm0, %xmm0
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: var_shift_v4i32:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpsravd %xmm1, %xmm0, %xmm0
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: var_shift_v4i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsravd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: var_shift_v4i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsravd %xmm1, %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; X32-SSE-LABEL: var_shift_v4i32:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa %xmm1, %xmm2
; X32-SSE-NEXT: psrlq $32, %xmm2
; X32-SSE-NEXT: movdqa %xmm0, %xmm3
@@ -231,7 +231,7 @@ define <4 x i32> @var_shift_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE2-LABEL: var_shift_v8i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: psllw $12, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: psraw $15, %xmm2
@@ -266,7 +266,7 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: var_shift_v8i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: psllw $12, %xmm0
@@ -296,7 +296,7 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: var_shift_v8i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsllw $12, %xmm1, %xmm2
; AVX1-NEXT: vpsllw $4, %xmm1, %xmm1
; AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1
@@ -314,7 +314,7 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shift_v8i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX2-NEXT: vpsravd %ymm1, %ymm0, %ymm0
@@ -324,14 +324,14 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; AVX2-NEXT: retq
;
; XOP-LABEL: var_shift_v8i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
; XOP-NEXT: vpsubw %xmm1, %xmm2, %xmm1
; XOP-NEXT: vpshaw %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512DQ-LABEL: var_shift_v8i16:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; AVX512DQ-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX512DQ-NEXT: vpsravd %ymm1, %ymm0, %ymm0
@@ -341,7 +341,7 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: var_shift_v8i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
@@ -350,7 +350,7 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512DQVL-LABEL: var_shift_v8i16:
-; AVX512DQVL: # BB#0:
+; AVX512DQVL: # %bb.0:
; AVX512DQVL-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; AVX512DQVL-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX512DQVL-NEXT: vpsravd %ymm1, %ymm0, %ymm0
@@ -359,12 +359,12 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; AVX512DQVL-NEXT: retq
;
; AVX512BWVL-LABEL: var_shift_v8i16:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpsravw %xmm1, %xmm0, %xmm0
; AVX512BWVL-NEXT: retq
;
; X32-SSE-LABEL: var_shift_v8i16:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: psllw $12, %xmm1
; X32-SSE-NEXT: movdqa %xmm1, %xmm2
; X32-SSE-NEXT: psraw $15, %xmm2
@@ -403,7 +403,7 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE2-LABEL: var_shift_v16i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
; SSE2-NEXT: psllw $5, %xmm1
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15]
@@ -461,7 +461,7 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: var_shift_v16i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: psllw $5, %xmm1
; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
@@ -497,7 +497,7 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE41-NEXT: retq
;
; AVX-LABEL: var_shift_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsllw $5, %xmm1, %xmm1
; AVX-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
; AVX-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
@@ -525,14 +525,14 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; AVX-NEXT: retq
;
; XOP-LABEL: var_shift_v16i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
; XOP-NEXT: vpsubb %xmm1, %xmm2, %xmm1
; XOP-NEXT: vpshab %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512-LABEL: var_shift_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
; AVX512-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512-NEXT: vpsravd %zmm1, %zmm0, %zmm0
@@ -541,7 +541,7 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: var_shift_v16i8:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
; AVX512VL-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512VL-NEXT: vpsravd %zmm1, %zmm0, %zmm0
@@ -550,7 +550,7 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; AVX512VL-NEXT: retq
;
; X32-SSE-LABEL: var_shift_v16i8:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
; X32-SSE-NEXT: psllw $5, %xmm1
; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15]
@@ -616,7 +616,7 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
define <2 x i64> @splatvar_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE-LABEL: splatvar_shift_v2i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; SSE-NEXT: psrlq %xmm1, %xmm2
; SSE-NEXT: psrlq %xmm1, %xmm0
@@ -625,7 +625,7 @@ define <2 x i64> @splatvar_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE-NEXT: retq
;
; AVX-LABEL: splatvar_shift_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX-NEXT: vpsrlq %xmm1, %xmm2, %xmm2
; AVX-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
@@ -634,7 +634,7 @@ define <2 x i64> @splatvar_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; AVX-NEXT: retq
;
; XOPAVX1-LABEL: splatvar_shift_v2i64:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; XOPAVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm1
@@ -642,7 +642,7 @@ define <2 x i64> @splatvar_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: splatvar_shift_v2i64:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpbroadcastq %xmm1, %xmm1
; XOPAVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; XOPAVX2-NEXT: vpsubq %xmm1, %xmm2, %xmm1
@@ -650,7 +650,7 @@ define <2 x i64> @splatvar_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: splatvar_shift_v2i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512-NEXT: vpsraq %xmm1, %zmm0, %zmm0
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -658,12 +658,12 @@ define <2 x i64> @splatvar_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatvar_shift_v2i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsraq %xmm1, %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; X32-SSE-LABEL: splatvar_shift_v2i64:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [0,2147483648,0,2147483648]
; X32-SSE-NEXT: psrlq %xmm1, %xmm2
; X32-SSE-NEXT: psrlq %xmm1, %xmm0
@@ -677,44 +677,44 @@ define <2 x i64> @splatvar_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
define <4 x i32> @splatvar_shift_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE2-LABEL: splatvar_shift_v4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: xorps %xmm2, %xmm2
; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
; SSE2-NEXT: psrad %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: splatvar_shift_v4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; SSE41-NEXT: psrad %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: splatvar_shift_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; AVX-NEXT: vpsrad %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: splatvar_shift_v4i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; XOP-NEXT: vpsrad %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512-LABEL: splatvar_shift_v4i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; AVX512-NEXT: vpsrad %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatvar_shift_v4i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; AVX512VL-NEXT: vpsrad %xmm1, %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; X32-SSE-LABEL: splatvar_shift_v4i32:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: xorps %xmm2, %xmm2
; X32-SSE-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
; X32-SSE-NEXT: psrad %xmm2, %xmm0
@@ -726,44 +726,44 @@ define <4 x i32> @splatvar_shift_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
define <8 x i16> @splatvar_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE2-LABEL: splatvar_shift_v8i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pextrw $0, %xmm1, %eax
; SSE2-NEXT: movd %eax, %xmm1
; SSE2-NEXT: psraw %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: splatvar_shift_v8i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; SSE41-NEXT: psraw %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: splatvar_shift_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX-NEXT: vpsraw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: splatvar_shift_v8i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; XOP-NEXT: vpsraw %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512-LABEL: splatvar_shift_v8i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX512-NEXT: vpsraw %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatvar_shift_v8i16:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX512VL-NEXT: vpsraw %xmm1, %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; X32-SSE-LABEL: splatvar_shift_v8i16:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: pextrw $0, %xmm1, %eax
; X32-SSE-NEXT: movd %eax, %xmm1
; X32-SSE-NEXT: psraw %xmm1, %xmm0
@@ -775,7 +775,7 @@ define <8 x i16> @splatvar_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE2-LABEL: splatvar_shift_v16i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,0,1,1]
@@ -836,7 +836,7 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: splatvar_shift_v16i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: pshufb %xmm0, %xmm1
@@ -874,7 +874,7 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: splatvar_shift_v16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpsllw $5, %xmm1, %xmm1
@@ -904,7 +904,7 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatvar_shift_v16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastb %xmm1, %xmm1
; AVX2-NEXT: vpsllw $5, %xmm1, %xmm1
; AVX2-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
@@ -933,7 +933,7 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: splatvar_shift_v16i8:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; XOPAVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; XOPAVX1-NEXT: vpsubb %xmm1, %xmm2, %xmm1
@@ -941,7 +941,7 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: splatvar_shift_v16i8:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpbroadcastb %xmm1, %xmm1
; XOPAVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; XOPAVX2-NEXT: vpsubb %xmm1, %xmm2, %xmm1
@@ -949,7 +949,7 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: splatvar_shift_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpbroadcastb %xmm1, %xmm1
; AVX512-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
@@ -959,7 +959,7 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatvar_shift_v16i8:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpbroadcastb %xmm1, %xmm1
; AVX512VL-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
@@ -969,7 +969,7 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; AVX512VL-NEXT: retq
;
; X32-SSE-LABEL: splatvar_shift_v16i8:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; X32-SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
; X32-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,0,1,1]
@@ -1039,7 +1039,7 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
define <2 x i64> @constant_shift_v2i64(<2 x i64> %a) nounwind {
; SSE2-LABEL: constant_shift_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrlq $1, %xmm1
; SSE2-NEXT: psrlq $7, %xmm0
@@ -1050,7 +1050,7 @@ define <2 x i64> @constant_shift_v2i64(<2 x i64> %a) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: constant_shift_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: psrlq $7, %xmm1
; SSE41-NEXT: psrlq $1, %xmm0
@@ -1061,7 +1061,7 @@ define <2 x i64> @constant_shift_v2i64(<2 x i64> %a) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: constant_shift_v2i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsrlq $7, %xmm0, %xmm1
; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
@@ -1071,7 +1071,7 @@ define <2 x i64> @constant_shift_v2i64(<2 x i64> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: constant_shift_v2i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsrlvq {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [4611686018427387904,72057594037927936]
; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
@@ -1079,14 +1079,14 @@ define <2 x i64> @constant_shift_v2i64(<2 x i64> %a) nounwind {
; AVX2-NEXT: retq
;
; XOP-LABEL: constant_shift_v2i64:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpxor %xmm1, %xmm1, %xmm1
; XOP-NEXT: vpsubq {{.*}}(%rip), %xmm1, %xmm1
; XOP-NEXT: vpshaq %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512-LABEL: constant_shift_v2i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [1,7]
; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0
@@ -1095,12 +1095,12 @@ define <2 x i64> @constant_shift_v2i64(<2 x i64> %a) nounwind {
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: constant_shift_v2i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsravq {{.*}}(%rip), %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; X32-SSE-LABEL: constant_shift_v2i64:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [0,2147483648,0,2147483648]
; X32-SSE-NEXT: movdqa %xmm1, %xmm2
; X32-SSE-NEXT: psrlq $1, %xmm2
@@ -1119,7 +1119,7 @@ define <2 x i64> @constant_shift_v2i64(<2 x i64> %a) nounwind {
define <4 x i32> @constant_shift_v4i32(<4 x i32> %a) nounwind {
; SSE2-LABEL: constant_shift_v4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrad $5, %xmm1
; SSE2-NEXT: movdqa %xmm0, %xmm2
@@ -1135,7 +1135,7 @@ define <4 x i32> @constant_shift_v4i32(<4 x i32> %a) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: constant_shift_v4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: psrad $7, %xmm1
; SSE41-NEXT: movdqa %xmm0, %xmm2
@@ -1149,7 +1149,7 @@ define <4 x i32> @constant_shift_v4i32(<4 x i32> %a) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: constant_shift_v4i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsrad $7, %xmm0, %xmm1
; AVX1-NEXT: vpsrad $5, %xmm0, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
@@ -1160,32 +1160,32 @@ define <4 x i32> @constant_shift_v4i32(<4 x i32> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: constant_shift_v4i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: constant_shift_v4i32:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vpshad {{.*}}(%rip), %xmm0, %xmm0
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: constant_shift_v4i32:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: constant_shift_v4i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: constant_shift_v4i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; X32-SSE-LABEL: constant_shift_v4i32:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
; X32-SSE-NEXT: psrad $5, %xmm1
; X32-SSE-NEXT: movdqa %xmm0, %xmm2
@@ -1205,7 +1205,7 @@ define <4 x i32> @constant_shift_v4i32(<4 x i32> %a) nounwind {
define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
; SSE2-LABEL: constant_shift_v8i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psraw $4, %xmm1
; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
@@ -1222,7 +1222,7 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: constant_shift_v8i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: psraw $4, %xmm1
; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7]
@@ -1235,7 +1235,7 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: constant_shift_v8i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsraw $4, %xmm0, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; AVX1-NEXT: vpsraw $2, %xmm0, %xmm1
@@ -1245,7 +1245,7 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: constant_shift_v8i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX2-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
@@ -1254,14 +1254,14 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
; AVX2-NEXT: retq
;
; XOP-LABEL: constant_shift_v8i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpxor %xmm1, %xmm1, %xmm1
; XOP-NEXT: vpsubw {{.*}}(%rip), %xmm1, %xmm1
; XOP-NEXT: vpshaw %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512DQ-LABEL: constant_shift_v8i16:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX512DQ-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
@@ -1270,7 +1270,7 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: constant_shift_v8i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7]
; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
@@ -1279,7 +1279,7 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512DQVL-LABEL: constant_shift_v8i16:
-; AVX512DQVL: # BB#0:
+; AVX512DQVL: # %bb.0:
; AVX512DQVL-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX512DQVL-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0
; AVX512DQVL-NEXT: vpmovdw %ymm0, %xmm0
@@ -1287,12 +1287,12 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
; AVX512DQVL-NEXT: retq
;
; AVX512BWVL-LABEL: constant_shift_v8i16:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpsravw {{.*}}(%rip), %xmm0, %xmm0
; AVX512BWVL-NEXT: retq
;
; X32-SSE-LABEL: constant_shift_v8i16:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
; X32-SSE-NEXT: psraw $4, %xmm1
; X32-SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
@@ -1313,7 +1313,7 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
; SSE2-LABEL: constant_shift_v16i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [8192,24640,41088,57536,49376,32928,16480,32]
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15]
@@ -1371,7 +1371,7 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: constant_shift_v16i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [8192,24640,41088,57536,49376,32928,16480,32]
; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm3[8],xmm0[9],xmm3[9],xmm0[10],xmm3[10],xmm0[11],xmm3[11],xmm0[12],xmm3[12],xmm0[13],xmm3[13],xmm0[14],xmm3[14],xmm0[15],xmm3[15]
@@ -1407,7 +1407,7 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
; SSE41-NEXT: retq
;
; AVX-LABEL: constant_shift_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [8192,24640,41088,57536,49376,32928,16480,32]
; AVX-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
; AVX-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
@@ -1435,14 +1435,14 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
; AVX-NEXT: retq
;
; XOP-LABEL: constant_shift_v16i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpxor %xmm1, %xmm1, %xmm1
; XOP-NEXT: vpsubb {{.*}}(%rip), %xmm1, %xmm1
; XOP-NEXT: vpshab %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512-LABEL: constant_shift_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512-NEXT: vpsravd {{.*}}(%rip), %zmm0, %zmm0
; AVX512-NEXT: vpmovdb %zmm0, %xmm0
@@ -1450,7 +1450,7 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: constant_shift_v16i8:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512VL-NEXT: vpsravd {{.*}}(%rip), %zmm0, %zmm0
; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0
@@ -1458,7 +1458,7 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
; AVX512VL-NEXT: retq
;
; X32-SSE-LABEL: constant_shift_v16i8:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
; X32-SSE-NEXT: movdqa {{.*#+}} xmm3 = [8192,24640,41088,57536,49376,32928,16480,32]
; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15]
@@ -1524,7 +1524,7 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
define <2 x i64> @splatconstant_shift_v2i64(<2 x i64> %a) nounwind {
; SSE2-LABEL: splatconstant_shift_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrad $7, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
@@ -1534,7 +1534,7 @@ define <2 x i64> @splatconstant_shift_v2i64(<2 x i64> %a) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: splatconstant_shift_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: psrad $7, %xmm1
; SSE41-NEXT: psrlq $7, %xmm0
@@ -1542,28 +1542,28 @@ define <2 x i64> @splatconstant_shift_v2i64(<2 x i64> %a) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: splatconstant_shift_v2i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsrad $7, %xmm0, %xmm1
; AVX1-NEXT: vpsrlq $7, %xmm0, %xmm0
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatconstant_shift_v2i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsrad $7, %xmm0, %xmm1
; AVX2-NEXT: vpsrlq $7, %xmm0, %xmm0
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; AVX2-NEXT: retq
;
; XOP-LABEL: splatconstant_shift_v2i64:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpxor %xmm1, %xmm1, %xmm1
; XOP-NEXT: vpsubq {{.*}}(%rip), %xmm1, %xmm1
; XOP-NEXT: vpshaq %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512-LABEL: splatconstant_shift_v2i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512-NEXT: vpsraq $7, %zmm0, %zmm0
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -1571,12 +1571,12 @@ define <2 x i64> @splatconstant_shift_v2i64(<2 x i64> %a) nounwind {
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_shift_v2i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsraq $7, %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; X32-SSE-LABEL: splatconstant_shift_v2i64:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
; X32-SSE-NEXT: psrad $7, %xmm1
; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
@@ -1590,32 +1590,32 @@ define <2 x i64> @splatconstant_shift_v2i64(<2 x i64> %a) nounwind {
define <4 x i32> @splatconstant_shift_v4i32(<4 x i32> %a) nounwind {
; SSE-LABEL: splatconstant_shift_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrad $5, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: splatconstant_shift_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrad $5, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: splatconstant_shift_v4i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpsrad $5, %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512-LABEL: splatconstant_shift_v4i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsrad $5, %xmm0, %xmm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_shift_v4i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrad $5, %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; X32-SSE-LABEL: splatconstant_shift_v4i32:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: psrad $5, %xmm0
; X32-SSE-NEXT: retl
%shift = ashr <4 x i32> %a, <i32 5, i32 5, i32 5, i32 5>
@@ -1624,32 +1624,32 @@ define <4 x i32> @splatconstant_shift_v4i32(<4 x i32> %a) nounwind {
define <8 x i16> @splatconstant_shift_v8i16(<8 x i16> %a) nounwind {
; SSE-LABEL: splatconstant_shift_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psraw $3, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: splatconstant_shift_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsraw $3, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: splatconstant_shift_v8i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpsraw $3, %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512-LABEL: splatconstant_shift_v8i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsraw $3, %xmm0, %xmm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_shift_v8i16:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsraw $3, %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; X32-SSE-LABEL: splatconstant_shift_v8i16:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: psraw $3, %xmm0
; X32-SSE-NEXT: retl
%shift = ashr <8 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
@@ -1658,7 +1658,7 @@ define <8 x i16> @splatconstant_shift_v8i16(<8 x i16> %a) nounwind {
define <16 x i8> @splatconstant_shift_v16i8(<16 x i8> %a) nounwind {
; SSE-LABEL: splatconstant_shift_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrlw $3, %xmm0
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
@@ -1667,7 +1667,7 @@ define <16 x i8> @splatconstant_shift_v16i8(<16 x i8> %a) nounwind {
; SSE-NEXT: retq
;
; AVX-LABEL: splatconstant_shift_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrlw $3, %xmm0, %xmm0
; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
@@ -1676,14 +1676,14 @@ define <16 x i8> @splatconstant_shift_v16i8(<16 x i8> %a) nounwind {
; AVX-NEXT: retq
;
; XOP-LABEL: splatconstant_shift_v16i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpxor %xmm1, %xmm1, %xmm1
; XOP-NEXT: vpsubb {{.*}}(%rip), %xmm1, %xmm1
; XOP-NEXT: vpshab %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512-LABEL: splatconstant_shift_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsrlw $3, %xmm0, %xmm0
; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
@@ -1692,7 +1692,7 @@ define <16 x i8> @splatconstant_shift_v16i8(<16 x i8> %a) nounwind {
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_shift_v16i8:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlw $3, %xmm0, %xmm0
; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
@@ -1701,7 +1701,7 @@ define <16 x i8> @splatconstant_shift_v16i8(<16 x i8> %a) nounwind {
; AVX512VL-NEXT: retq
;
; X32-SSE-LABEL: splatconstant_shift_v16i8:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: psrlw $3, %xmm0
; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
diff --git a/test/CodeGen/X86/vector-shift-ashr-256.ll b/test/CodeGen/X86/vector-shift-ashr-256.ll
index d29518d3640..cb2c816758c 100644
--- a/test/CodeGen/X86/vector-shift-ashr-256.ll
+++ b/test/CodeGen/X86/vector-shift-ashr-256.ll
@@ -18,7 +18,7 @@
define <4 x i64> @var_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX1-LABEL: var_shift_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpsrlq %xmm2, %xmm3, %xmm4
@@ -44,7 +44,7 @@ define <4 x i64> @var_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shift_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
; AVX2-NEXT: vpsrlvq %ymm1, %ymm2, %ymm3
; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
@@ -53,7 +53,7 @@ define <4 x i64> @var_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: var_shift_v4i64:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; XOPAVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
; XOPAVX1-NEXT: vpsubq %xmm2, %xmm3, %xmm2
@@ -65,7 +65,7 @@ define <4 x i64> @var_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: var_shift_v4i64:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
; XOPAVX2-NEXT: vpsrlvq %ymm1, %ymm2, %ymm3
; XOPAVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
@@ -74,7 +74,7 @@ define <4 x i64> @var_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: var_shift_v4i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0
@@ -82,12 +82,12 @@ define <4 x i64> @var_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: var_shift_v4i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsravq %ymm1, %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; X32-AVX1-LABEL: var_shift_v4i64:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,2147483648,0,2147483648]
; X32-AVX1-NEXT: vpsrlq %xmm2, %xmm3, %xmm4
@@ -113,7 +113,7 @@ define <4 x i64> @var_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: var_shift_v4i64:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2147483648,0,2147483648,0,2147483648,0,2147483648]
; X32-AVX2-NEXT: vpsrlvq %ymm1, %ymm2, %ymm3
; X32-AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
@@ -126,7 +126,7 @@ define <4 x i64> @var_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
define <8 x i32> @var_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; AVX1-LABEL: var_shift_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpsrldq {{.*#+}} xmm4 = xmm3[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
@@ -156,12 +156,12 @@ define <8 x i32> @var_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shift_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsravd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: var_shift_v8i32:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; XOPAVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
; XOPAVX1-NEXT: vpsubd %xmm2, %xmm3, %xmm2
@@ -173,22 +173,22 @@ define <8 x i32> @var_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: var_shift_v8i32:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpsravd %ymm1, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: var_shift_v8i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsravd %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: var_shift_v8i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsravd %ymm1, %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; X32-AVX1-LABEL: var_shift_v8i32:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; X32-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; X32-AVX1-NEXT: vpsrldq {{.*#+}} xmm4 = xmm3[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
@@ -218,7 +218,7 @@ define <8 x i32> @var_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: var_shift_v8i32:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpsravd %ymm1, %ymm0, %ymm0
; X32-AVX2-NEXT: retl
%shift = ashr <8 x i32> %a, %b
@@ -227,7 +227,7 @@ define <8 x i32> @var_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; AVX1-LABEL: var_shift_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vpsllw $12, %xmm2, %xmm3
; AVX1-NEXT: vpsllw $4, %xmm2, %xmm2
@@ -262,7 +262,7 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shift_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
@@ -276,7 +276,7 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: var_shift_v16i16:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; XOPAVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
; XOPAVX1-NEXT: vpsubw %xmm2, %xmm3, %xmm2
@@ -288,7 +288,7 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: var_shift_v16i16:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
; XOPAVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
; XOPAVX2-NEXT: vpsubw %xmm2, %xmm3, %xmm2
@@ -300,7 +300,7 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; XOPAVX2-NEXT: retq
;
; AVX512DQ-LABEL: var_shift_v16i16:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512DQ-NEXT: vpsravd %zmm1, %zmm0, %zmm0
@@ -308,7 +308,7 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: var_shift_v16i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
@@ -316,7 +316,7 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512DQVL-LABEL: var_shift_v16i16:
-; AVX512DQVL: # BB#0:
+; AVX512DQVL: # %bb.0:
; AVX512DQVL-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
; AVX512DQVL-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512DQVL-NEXT: vpsravd %zmm1, %zmm0, %zmm0
@@ -324,12 +324,12 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; AVX512DQVL-NEXT: retq
;
; AVX512BWVL-LABEL: var_shift_v16i16:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpsravw %ymm1, %ymm0, %ymm0
; AVX512BWVL-NEXT: retq
;
; X32-AVX1-LABEL: var_shift_v16i16:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; X32-AVX1-NEXT: vpsllw $12, %xmm2, %xmm3
; X32-AVX1-NEXT: vpsllw $4, %xmm2, %xmm2
@@ -364,7 +364,7 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: var_shift_v16i16:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; X32-AVX2-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
; X32-AVX2-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
@@ -382,7 +382,7 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX1-LABEL: var_shift_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vpsllw $5, %xmm2, %xmm2
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
@@ -437,7 +437,7 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shift_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsllw $5, %ymm1, %ymm1
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
@@ -465,7 +465,7 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: var_shift_v32i8:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; XOPAVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
; XOPAVX1-NEXT: vpsubb %xmm2, %xmm3, %xmm2
@@ -477,7 +477,7 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: var_shift_v32i8:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
; XOPAVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
; XOPAVX2-NEXT: vpsubb %xmm2, %xmm3, %xmm2
@@ -489,7 +489,7 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; XOPAVX2-NEXT: retq
;
; AVX512DQ-LABEL: var_shift_v32i8:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpsllw $5, %ymm1, %ymm1
; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
@@ -517,7 +517,7 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: var_shift_v32i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0
; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
@@ -525,7 +525,7 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512DQVL-LABEL: var_shift_v32i8:
-; AVX512DQVL: # BB#0:
+; AVX512DQVL: # %bb.0:
; AVX512DQVL-NEXT: vpsllw $5, %ymm1, %ymm1
; AVX512DQVL-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
; AVX512DQVL-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
@@ -553,7 +553,7 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX512DQVL-NEXT: retq
;
; AVX512BWVL-LABEL: var_shift_v32i8:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
; AVX512BWVL-NEXT: vpmovsxbw %ymm0, %zmm0
; AVX512BWVL-NEXT: vpsravw %zmm1, %zmm0, %zmm0
@@ -561,7 +561,7 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX512BWVL-NEXT: retq
;
; X32-AVX1-LABEL: var_shift_v32i8:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; X32-AVX1-NEXT: vpsllw $5, %xmm2, %xmm2
; X32-AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
@@ -616,7 +616,7 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: var_shift_v32i8:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpsllw $5, %ymm1, %ymm1
; X32-AVX2-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
; X32-AVX2-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
@@ -652,7 +652,7 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX1-LABEL: splatvar_shift_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpsrlq %xmm1, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
@@ -666,7 +666,7 @@ define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatvar_shift_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
; AVX2-NEXT: vpsrlq %xmm1, %ymm2, %ymm2
; AVX2-NEXT: vpsrlq %xmm1, %ymm0, %ymm0
@@ -675,7 +675,7 @@ define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: splatvar_shift_v4i64:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vmovddup {{.*#+}} xmm1 = xmm1[0,0]
; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; XOPAVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm1
@@ -686,7 +686,7 @@ define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: splatvar_shift_v4i64:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
; XOPAVX2-NEXT: vpsrlq %xmm1, %ymm2, %ymm2
; XOPAVX2-NEXT: vpsrlq %xmm1, %ymm0, %ymm0
@@ -695,19 +695,19 @@ define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: splatvar_shift_v4i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512-NEXT: vpsraq %xmm1, %zmm0, %zmm0
; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatvar_shift_v4i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsraq %xmm1, %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; X32-AVX1-LABEL: splatvar_shift_v4i64:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,2147483648,0,2147483648]
; X32-AVX1-NEXT: vpsrlq %xmm1, %xmm2, %xmm2
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
@@ -721,7 +721,7 @@ define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: splatvar_shift_v4i64:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2147483648,0,2147483648,0,2147483648,0,2147483648]
; X32-AVX2-NEXT: vpsrlq %xmm1, %ymm2, %ymm2
; X32-AVX2-NEXT: vpsrlq %xmm1, %ymm0, %ymm0
@@ -735,7 +735,7 @@ define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
define <8 x i32> @splatvar_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; AVX1-LABEL: splatvar_shift_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; AVX1-NEXT: vpsrad %xmm1, %xmm2, %xmm2
@@ -744,13 +744,13 @@ define <8 x i32> @splatvar_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatvar_shift_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; AVX2-NEXT: vpsrad %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: splatvar_shift_v8i32:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; XOPAVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; XOPAVX1-NEXT: vpsrad %xmm1, %xmm2, %xmm2
@@ -759,25 +759,25 @@ define <8 x i32> @splatvar_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: splatvar_shift_v8i32:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; XOPAVX2-NEXT: vpsrad %xmm1, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: splatvar_shift_v8i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; AVX512-NEXT: vpsrad %xmm1, %ymm0, %ymm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatvar_shift_v8i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; AVX512VL-NEXT: vpsrad %xmm1, %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; X32-AVX1-LABEL: splatvar_shift_v8i32:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; X32-AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; X32-AVX1-NEXT: vpsrad %xmm1, %xmm2, %xmm2
@@ -786,7 +786,7 @@ define <8 x i32> @splatvar_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: splatvar_shift_v8i32:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; X32-AVX2-NEXT: vpsrad %xmm1, %ymm0, %ymm0
; X32-AVX2-NEXT: retl
@@ -797,7 +797,7 @@ define <8 x i32> @splatvar_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
define <16 x i16> @splatvar_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; AVX1-LABEL: splatvar_shift_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX1-NEXT: vpsraw %xmm1, %xmm2, %xmm2
@@ -806,13 +806,13 @@ define <16 x i16> @splatvar_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatvar_shift_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX2-NEXT: vpsraw %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: splatvar_shift_v16i16:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; XOPAVX1-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; XOPAVX1-NEXT: vpsraw %xmm1, %xmm2, %xmm2
@@ -821,25 +821,25 @@ define <16 x i16> @splatvar_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: splatvar_shift_v16i16:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; XOPAVX2-NEXT: vpsraw %xmm1, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: splatvar_shift_v16i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX512-NEXT: vpsraw %xmm1, %ymm0, %ymm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatvar_shift_v16i16:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX512VL-NEXT: vpsraw %xmm1, %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; X32-AVX1-LABEL: splatvar_shift_v16i16:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; X32-AVX1-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; X32-AVX1-NEXT: vpsraw %xmm1, %xmm2, %xmm2
@@ -848,7 +848,7 @@ define <16 x i16> @splatvar_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: splatvar_shift_v16i16:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; X32-AVX2-NEXT: vpsraw %xmm1, %ymm0, %ymm0
; X32-AVX2-NEXT: retl
@@ -859,7 +859,7 @@ define <16 x i16> @splatvar_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind
define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX1-LABEL: splatvar_shift_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpsllw $5, %xmm1, %xmm1
@@ -908,7 +908,7 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatvar_shift_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastb %xmm1, %ymm1
; AVX2-NEXT: vpsllw $5, %ymm1, %ymm1
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
@@ -937,7 +937,7 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: splatvar_shift_v32i8:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; XOPAVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; XOPAVX1-NEXT: vpsubb %xmm1, %xmm2, %xmm1
@@ -948,7 +948,7 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: splatvar_shift_v32i8:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpbroadcastb %xmm1, %ymm1
; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
; XOPAVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
@@ -961,7 +961,7 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; XOPAVX2-NEXT: retq
;
; AVX512DQ-LABEL: splatvar_shift_v32i8:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpbroadcastb %xmm1, %ymm1
; AVX512DQ-NEXT: vpsllw $5, %ymm1, %ymm1
; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
@@ -990,7 +990,7 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: splatvar_shift_v32i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpbroadcastb %xmm1, %ymm1
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
@@ -999,7 +999,7 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512DQVL-LABEL: splatvar_shift_v32i8:
-; AVX512DQVL: # BB#0:
+; AVX512DQVL: # %bb.0:
; AVX512DQVL-NEXT: vpbroadcastb %xmm1, %ymm1
; AVX512DQVL-NEXT: vpsllw $5, %ymm1, %ymm1
; AVX512DQVL-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
@@ -1028,7 +1028,7 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX512DQVL-NEXT: retq
;
; AVX512BWVL-LABEL: splatvar_shift_v32i8:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpbroadcastb %xmm1, %ymm1
; AVX512BWVL-NEXT: vpmovsxbw %ymm0, %zmm0
; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
@@ -1037,7 +1037,7 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX512BWVL-NEXT: retq
;
; X32-AVX1-LABEL: splatvar_shift_v32i8:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; X32-AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; X32-AVX1-NEXT: vpsllw $5, %xmm1, %xmm1
@@ -1086,7 +1086,7 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: splatvar_shift_v32i8:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpbroadcastb %xmm1, %ymm1
; X32-AVX2-NEXT: vpsllw $5, %ymm1, %ymm1
; X32-AVX2-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
@@ -1124,7 +1124,7 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
define <4 x i64> @constant_shift_v4i64(<4 x i64> %a) nounwind {
; AVX1-LABEL: constant_shift_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpsrlq $62, %xmm1, %xmm2
; AVX1-NEXT: vpsrlq $31, %xmm1, %xmm1
@@ -1142,7 +1142,7 @@ define <4 x i64> @constant_shift_v4i64(<4 x i64> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: constant_shift_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsrlvq {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [4611686018427387904,72057594037927936,4294967296,2]
; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
@@ -1150,7 +1150,7 @@ define <4 x i64> @constant_shift_v4i64(<4 x i64> %a) nounwind {
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: constant_shift_v4i64:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; XOPAVX1-NEXT: vpsubq {{.*}}(%rip), %xmm1, %xmm2
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
@@ -1161,7 +1161,7 @@ define <4 x i64> @constant_shift_v4i64(<4 x i64> %a) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: constant_shift_v4i64:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpsrlvq {{.*}}(%rip), %ymm0, %ymm0
; XOPAVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [4611686018427387904,72057594037927936,4294967296,2]
; XOPAVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
@@ -1169,7 +1169,7 @@ define <4 x i64> @constant_shift_v4i64(<4 x i64> %a) nounwind {
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: constant_shift_v4i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [1,7,31,62]
; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0
@@ -1177,12 +1177,12 @@ define <4 x i64> @constant_shift_v4i64(<4 x i64> %a) nounwind {
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: constant_shift_v4i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsravq {{.*}}(%rip), %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; X32-AVX1-LABEL: constant_shift_v4i64:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vmovdqa {{.*#+}} ymm1 = [1,0,7,0,31,0,62,0]
; X32-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,2147483648,0,2147483648]
@@ -1209,7 +1209,7 @@ define <4 x i64> @constant_shift_v4i64(<4 x i64> %a) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: constant_shift_v4i64:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [1,0,7,0,31,0,62,0]
; X32-AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2147483648,0,2147483648,0,2147483648,0,2147483648]
; X32-AVX2-NEXT: vpsrlvq %ymm1, %ymm2, %ymm3
@@ -1223,7 +1223,7 @@ define <4 x i64> @constant_shift_v4i64(<4 x i64> %a) nounwind {
define <8 x i32> @constant_shift_v8i32(<8 x i32> %a) nounwind {
; AVX1-LABEL: constant_shift_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsrad $7, %xmm0, %xmm1
; AVX1-NEXT: vpsrad $5, %xmm0, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
@@ -1241,12 +1241,12 @@ define <8 x i32> @constant_shift_v8i32(<8 x i32> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: constant_shift_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: constant_shift_v8i32:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vpshad {{.*}}(%rip), %xmm0, %xmm1
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; XOPAVX1-NEXT: vpshad {{.*}}(%rip), %xmm0, %xmm0
@@ -1254,22 +1254,22 @@ define <8 x i32> @constant_shift_v8i32(<8 x i32> %a) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: constant_shift_v8i32:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: constant_shift_v8i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: constant_shift_v8i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; X32-AVX1-LABEL: constant_shift_v8i32:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vpsrad $7, %xmm0, %xmm1
; X32-AVX1-NEXT: vpsrad $5, %xmm0, %xmm2
; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
@@ -1287,7 +1287,7 @@ define <8 x i32> @constant_shift_v8i32(<8 x i32> %a) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: constant_shift_v8i32:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpsravd {{\.LCPI.*}}, %ymm0, %ymm0
; X32-AVX2-NEXT: retl
%shift = ashr <8 x i32> %a, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 8, i32 7>
@@ -1296,7 +1296,7 @@ define <8 x i32> @constant_shift_v8i32(<8 x i32> %a) nounwind {
define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
; AVX1-LABEL: constant_shift_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpsraw $8, %xmm1, %xmm1
; AVX1-NEXT: vpsraw $4, %xmm1, %xmm2
@@ -1315,7 +1315,7 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: constant_shift_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm2[4],ymm1[4],ymm2[5],ymm1[5],ymm2[6],ymm1[6],ymm2[7],ymm1[7],ymm2[12],ymm1[12],ymm2[13],ymm1[13],ymm2[14],ymm1[14],ymm2[15],ymm1[15]
@@ -1330,7 +1330,7 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: constant_shift_v16i16:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; XOPAVX1-NEXT: vpsubw {{.*}}(%rip), %xmm1, %xmm2
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
@@ -1341,7 +1341,7 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: constant_shift_v16i16:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; XOPAVX2-NEXT: vpsubw {{.*}}(%rip), %xmm1, %xmm2
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm3
@@ -1352,14 +1352,14 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
; XOPAVX2-NEXT: retq
;
; AVX512DQ-LABEL: constant_shift_v16i16:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512DQ-NEXT: vpsravd {{.*}}(%rip), %zmm0, %zmm0
; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: constant_shift_v16i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
@@ -1367,19 +1367,19 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512DQVL-LABEL: constant_shift_v16i16:
-; AVX512DQVL: # BB#0:
+; AVX512DQVL: # %bb.0:
; AVX512DQVL-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512DQVL-NEXT: vpsravd {{.*}}(%rip), %zmm0, %zmm0
; AVX512DQVL-NEXT: vpmovdw %zmm0, %ymm0
; AVX512DQVL-NEXT: retq
;
; AVX512BWVL-LABEL: constant_shift_v16i16:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpsravw {{.*}}(%rip), %ymm0, %ymm0
; AVX512BWVL-NEXT: retq
;
; X32-AVX1-LABEL: constant_shift_v16i16:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; X32-AVX1-NEXT: vpsraw $8, %xmm1, %xmm1
; X32-AVX1-NEXT: vpsraw $4, %xmm1, %xmm2
@@ -1398,7 +1398,7 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: constant_shift_v16i16:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X32-AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; X32-AVX2-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm2[4],ymm1[4],ymm2[5],ymm1[5],ymm2[6],ymm1[6],ymm2[7],ymm1[7],ymm2[12],ymm1[12],ymm2[13],ymm1[13],ymm2[14],ymm1[14],ymm2[15],ymm1[15]
@@ -1417,7 +1417,7 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
; AVX1-LABEL: constant_shift_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [8192,24640,41088,57536,49376,32928,16480,32]
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
@@ -1464,7 +1464,7 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: constant_shift_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32]
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
@@ -1492,7 +1492,7 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: constant_shift_v32i8:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; XOPAVX1-NEXT: vpsubb {{.*}}(%rip), %xmm1, %xmm1
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
@@ -1502,7 +1502,7 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: constant_shift_v32i8:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; XOPAVX2-NEXT: vpsubb {{.*}}(%rip), %xmm1, %xmm1
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
@@ -1512,7 +1512,7 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
; XOPAVX2-NEXT: retq
;
; AVX512DQ-LABEL: constant_shift_v32i8:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm1 = [8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32]
; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
@@ -1540,14 +1540,14 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: constant_shift_v32i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0
; AVX512BW-NEXT: vpsravw {{.*}}(%rip), %zmm0, %zmm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: retq
;
; AVX512DQVL-LABEL: constant_shift_v32i8:
-; AVX512DQVL: # BB#0:
+; AVX512DQVL: # %bb.0:
; AVX512DQVL-NEXT: vmovdqa {{.*#+}} ymm1 = [8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32]
; AVX512DQVL-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
; AVX512DQVL-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
@@ -1575,14 +1575,14 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
; AVX512DQVL-NEXT: retq
;
; AVX512BWVL-LABEL: constant_shift_v32i8:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpmovsxbw %ymm0, %zmm0
; AVX512BWVL-NEXT: vpsravw {{.*}}(%rip), %zmm0, %zmm0
; AVX512BWVL-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BWVL-NEXT: retq
;
; X32-AVX1-LABEL: constant_shift_v32i8:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [8192,24640,41088,57536,49376,32928,16480,32]
; X32-AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
@@ -1629,7 +1629,7 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: constant_shift_v32i8:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32]
; X32-AVX2-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
; X32-AVX2-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
@@ -1665,7 +1665,7 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
define <4 x i64> @splatconstant_shift_v4i64(<4 x i64> %a) nounwind {
; AVX1-LABEL: splatconstant_shift_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpsrad $7, %xmm1, %xmm2
; AVX1-NEXT: vpsrlq $7, %xmm1, %xmm1
@@ -1677,14 +1677,14 @@ define <4 x i64> @splatconstant_shift_v4i64(<4 x i64> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatconstant_shift_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsrad $7, %ymm0, %ymm1
; AVX2-NEXT: vpsrlq $7, %ymm0, %ymm0
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: splatconstant_shift_v4i64:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; XOPAVX1-NEXT: vpsubq {{.*}}(%rip), %xmm1, %xmm1
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
@@ -1694,26 +1694,26 @@ define <4 x i64> @splatconstant_shift_v4i64(<4 x i64> %a) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: splatconstant_shift_v4i64:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpsrad $7, %ymm0, %ymm1
; XOPAVX2-NEXT: vpsrlq $7, %ymm0, %ymm0
; XOPAVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: splatconstant_shift_v4i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512-NEXT: vpsraq $7, %zmm0, %zmm0
; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_shift_v4i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsraq $7, %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; X32-AVX1-LABEL: splatconstant_shift_v4i64:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; X32-AVX1-NEXT: vpsrad $7, %xmm1, %xmm2
; X32-AVX1-NEXT: vpsrlq $7, %xmm1, %xmm1
@@ -1725,7 +1725,7 @@ define <4 x i64> @splatconstant_shift_v4i64(<4 x i64> %a) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: splatconstant_shift_v4i64:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpsrad $7, %ymm0, %ymm1
; X32-AVX2-NEXT: vpsrlq $7, %ymm0, %ymm0
; X32-AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
@@ -1736,7 +1736,7 @@ define <4 x i64> @splatconstant_shift_v4i64(<4 x i64> %a) nounwind {
define <8 x i32> @splatconstant_shift_v8i32(<8 x i32> %a) nounwind {
; AVX1-LABEL: splatconstant_shift_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsrad $5, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsrad $5, %xmm0, %xmm0
@@ -1744,12 +1744,12 @@ define <8 x i32> @splatconstant_shift_v8i32(<8 x i32> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatconstant_shift_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsrad $5, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: splatconstant_shift_v8i32:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vpsrad $5, %xmm0, %xmm1
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; XOPAVX1-NEXT: vpsrad $5, %xmm0, %xmm0
@@ -1757,22 +1757,22 @@ define <8 x i32> @splatconstant_shift_v8i32(<8 x i32> %a) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: splatconstant_shift_v8i32:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpsrad $5, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: splatconstant_shift_v8i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsrad $5, %ymm0, %ymm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_shift_v8i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrad $5, %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; X32-AVX1-LABEL: splatconstant_shift_v8i32:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vpsrad $5, %xmm0, %xmm1
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; X32-AVX1-NEXT: vpsrad $5, %xmm0, %xmm0
@@ -1780,7 +1780,7 @@ define <8 x i32> @splatconstant_shift_v8i32(<8 x i32> %a) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: splatconstant_shift_v8i32:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpsrad $5, %ymm0, %ymm0
; X32-AVX2-NEXT: retl
%shift = ashr <8 x i32> %a, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
@@ -1789,7 +1789,7 @@ define <8 x i32> @splatconstant_shift_v8i32(<8 x i32> %a) nounwind {
define <16 x i16> @splatconstant_shift_v16i16(<16 x i16> %a) nounwind {
; AVX1-LABEL: splatconstant_shift_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsraw $3, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsraw $3, %xmm0, %xmm0
@@ -1797,12 +1797,12 @@ define <16 x i16> @splatconstant_shift_v16i16(<16 x i16> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatconstant_shift_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsraw $3, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: splatconstant_shift_v16i16:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vpsraw $3, %xmm0, %xmm1
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; XOPAVX1-NEXT: vpsraw $3, %xmm0, %xmm0
@@ -1810,22 +1810,22 @@ define <16 x i16> @splatconstant_shift_v16i16(<16 x i16> %a) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: splatconstant_shift_v16i16:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpsraw $3, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: splatconstant_shift_v16i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsraw $3, %ymm0, %ymm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_shift_v16i16:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsraw $3, %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; X32-AVX1-LABEL: splatconstant_shift_v16i16:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vpsraw $3, %xmm0, %xmm1
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; X32-AVX1-NEXT: vpsraw $3, %xmm0, %xmm0
@@ -1833,7 +1833,7 @@ define <16 x i16> @splatconstant_shift_v16i16(<16 x i16> %a) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: splatconstant_shift_v16i16:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpsraw $3, %ymm0, %ymm0
; X32-AVX2-NEXT: retl
%shift = ashr <16 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
@@ -1842,7 +1842,7 @@ define <16 x i16> @splatconstant_shift_v16i16(<16 x i16> %a) nounwind {
define <32 x i8> @splatconstant_shift_v32i8(<32 x i8> %a) nounwind {
; AVX1-LABEL: splatconstant_shift_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpsrlw $3, %xmm1, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
@@ -1858,7 +1858,7 @@ define <32 x i8> @splatconstant_shift_v32i8(<32 x i8> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatconstant_shift_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsrlw $3, %ymm0, %ymm0
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
@@ -1867,7 +1867,7 @@ define <32 x i8> @splatconstant_shift_v32i8(<32 x i8> %a) nounwind {
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: splatconstant_shift_v32i8:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; XOPAVX1-NEXT: vpsubb {{.*}}(%rip), %xmm1, %xmm1
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
@@ -1877,7 +1877,7 @@ define <32 x i8> @splatconstant_shift_v32i8(<32 x i8> %a) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: splatconstant_shift_v32i8:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpsrlw $3, %ymm0, %ymm0
; XOPAVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; XOPAVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
@@ -1886,7 +1886,7 @@ define <32 x i8> @splatconstant_shift_v32i8(<32 x i8> %a) nounwind {
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: splatconstant_shift_v32i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsrlw $3, %ymm0, %ymm0
; AVX512-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
@@ -1895,7 +1895,7 @@ define <32 x i8> @splatconstant_shift_v32i8(<32 x i8> %a) nounwind {
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_shift_v32i8:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlw $3, %ymm0, %ymm0
; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
@@ -1904,7 +1904,7 @@ define <32 x i8> @splatconstant_shift_v32i8(<32 x i8> %a) nounwind {
; AVX512VL-NEXT: retq
;
; X32-AVX1-LABEL: splatconstant_shift_v32i8:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; X32-AVX1-NEXT: vpsrlw $3, %xmm1, %xmm1
; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
@@ -1920,7 +1920,7 @@ define <32 x i8> @splatconstant_shift_v32i8(<32 x i8> %a) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: splatconstant_shift_v32i8:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpsrlw $3, %ymm0, %ymm0
; X32-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0
; X32-AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
diff --git a/test/CodeGen/X86/vector-shift-ashr-512.ll b/test/CodeGen/X86/vector-shift-ashr-512.ll
index 9442206d20b..77fb34a95a3 100644
--- a/test/CodeGen/X86/vector-shift-ashr-512.ll
+++ b/test/CodeGen/X86/vector-shift-ashr-512.ll
@@ -8,7 +8,7 @@
define <8 x i64> @var_shift_v8i64(<8 x i64> %a, <8 x i64> %b) nounwind {
; ALL-LABEL: var_shift_v8i64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpsravq %zmm1, %zmm0, %zmm0
; ALL-NEXT: retq
%shift = ashr <8 x i64> %a, %b
@@ -17,7 +17,7 @@ define <8 x i64> @var_shift_v8i64(<8 x i64> %a, <8 x i64> %b) nounwind {
define <16 x i32> @var_shift_v16i32(<16 x i32> %a, <16 x i32> %b) nounwind {
; ALL-LABEL: var_shift_v16i32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpsravd %zmm1, %zmm0, %zmm0
; ALL-NEXT: retq
%shift = ashr <16 x i32> %a, %b
@@ -26,7 +26,7 @@ define <16 x i32> @var_shift_v16i32(<16 x i32> %a, <16 x i32> %b) nounwind {
define <32 x i16> @var_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
; AVX512DQ-LABEL: var_shift_v32i16:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512DQ-NEXT: vpsravd %zmm2, %zmm0, %zmm0
@@ -38,7 +38,7 @@ define <32 x i16> @var_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: var_shift_v32i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
%shift = ashr <32 x i16> %a, %b
@@ -47,7 +47,7 @@ define <32 x i16> @var_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; AVX512DQ-LABEL: var_shift_v64i8:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpsllw $5, %ymm2, %ymm2
; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31]
; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
@@ -99,7 +99,7 @@ define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: var_shift_v64i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
; AVX512BW-NEXT: vpsraw $4, %zmm2, %zmm3
; AVX512BW-NEXT: vpsllw $5, %zmm1, %zmm1
@@ -141,7 +141,7 @@ define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
define <8 x i64> @splatvar_shift_v8i64(<8 x i64> %a, <8 x i64> %b) nounwind {
; ALL-LABEL: splatvar_shift_v8i64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpsraq %xmm1, %zmm0, %zmm0
; ALL-NEXT: retq
%splat = shufflevector <8 x i64> %b, <8 x i64> undef, <8 x i32> zeroinitializer
@@ -151,7 +151,7 @@ define <8 x i64> @splatvar_shift_v8i64(<8 x i64> %a, <8 x i64> %b) nounwind {
define <16 x i32> @splatvar_shift_v16i32(<16 x i32> %a, <16 x i32> %b) nounwind {
; ALL-LABEL: splatvar_shift_v16i32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; ALL-NEXT: vpsrad %xmm1, %zmm0, %zmm0
; ALL-NEXT: retq
@@ -162,14 +162,14 @@ define <16 x i32> @splatvar_shift_v16i32(<16 x i32> %a, <16 x i32> %b) nounwind
define <32 x i16> @splatvar_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
; AVX512DQ-LABEL: splatvar_shift_v32i16:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
; AVX512DQ-NEXT: vpsraw %xmm2, %ymm0, %ymm0
; AVX512DQ-NEXT: vpsraw %xmm2, %ymm1, %ymm1
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: splatvar_shift_v32i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX512BW-NEXT: vpsraw %xmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
@@ -180,7 +180,7 @@ define <32 x i16> @splatvar_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind
define <64 x i8> @splatvar_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; AVX512DQ-LABEL: splatvar_shift_v64i8:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpbroadcastb %xmm2, %ymm2
; AVX512DQ-NEXT: vpsllw $5, %ymm2, %ymm2
; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31]
@@ -226,7 +226,7 @@ define <64 x i8> @splatvar_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: splatvar_shift_v64i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpbroadcastb %xmm1, %zmm1
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
; AVX512BW-NEXT: vpsraw $4, %zmm2, %zmm3
@@ -270,7 +270,7 @@ define <64 x i8> @splatvar_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
define <8 x i64> @constant_shift_v8i64(<8 x i64> %a) nounwind {
; ALL-LABEL: constant_shift_v8i64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpsravq {{.*}}(%rip), %zmm0, %zmm0
; ALL-NEXT: retq
%shift = ashr <8 x i64> %a, <i64 1, i64 7, i64 31, i64 62, i64 1, i64 7, i64 31, i64 62>
@@ -279,7 +279,7 @@ define <8 x i64> @constant_shift_v8i64(<8 x i64> %a) nounwind {
define <16 x i32> @constant_shift_v16i32(<16 x i32> %a) nounwind {
; ALL-LABEL: constant_shift_v16i32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpsravd {{.*}}(%rip), %zmm0, %zmm0
; ALL-NEXT: retq
%shift = ashr <16 x i32> %a, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 8, i32 7, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 8, i32 7>
@@ -288,7 +288,7 @@ define <16 x i32> @constant_shift_v16i32(<16 x i32> %a) nounwind {
define <32 x i16> @constant_shift_v32i16(<32 x i16> %a) nounwind {
; AVX512DQ-LABEL: constant_shift_v32i16:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512DQ-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; AVX512DQ-NEXT: vpsravd %zmm2, %zmm0, %zmm0
@@ -299,7 +299,7 @@ define <32 x i16> @constant_shift_v32i16(<32 x i16> %a) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: constant_shift_v32i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsravw {{.*}}(%rip), %zmm0, %zmm0
; AVX512BW-NEXT: retq
%shift = ashr <32 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
@@ -308,7 +308,7 @@ define <32 x i16> @constant_shift_v32i16(<32 x i16> %a) nounwind {
define <64 x i8> @constant_shift_v64i8(<64 x i8> %a) nounwind {
; AVX512DQ-LABEL: constant_shift_v64i8:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32]
; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31]
; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
@@ -353,7 +353,7 @@ define <64 x i8> @constant_shift_v64i8(<64 x i8> %a) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: constant_shift_v64i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm1 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
; AVX512BW-NEXT: vpsraw $4, %zmm1, %zmm2
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32]
@@ -395,7 +395,7 @@ define <64 x i8> @constant_shift_v64i8(<64 x i8> %a) nounwind {
define <8 x i64> @splatconstant_shift_v8i64(<8 x i64> %a) nounwind {
; ALL-LABEL: splatconstant_shift_v8i64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpsraq $7, %zmm0, %zmm0
; ALL-NEXT: retq
%shift = ashr <8 x i64> %a, <i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7>
@@ -404,7 +404,7 @@ define <8 x i64> @splatconstant_shift_v8i64(<8 x i64> %a) nounwind {
define <16 x i32> @splatconstant_shift_v16i32(<16 x i32> %a) nounwind {
; ALL-LABEL: splatconstant_shift_v16i32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpsrad $5, %zmm0, %zmm0
; ALL-NEXT: retq
%shift = ashr <16 x i32> %a, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
@@ -413,13 +413,13 @@ define <16 x i32> @splatconstant_shift_v16i32(<16 x i32> %a) nounwind {
define <32 x i16> @splatconstant_shift_v32i16(<32 x i16> %a) nounwind {
; AVX512DQ-LABEL: splatconstant_shift_v32i16:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpsraw $3, %ymm0, %ymm0
; AVX512DQ-NEXT: vpsraw $3, %ymm1, %ymm1
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: splatconstant_shift_v32i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsraw $3, %zmm0, %zmm0
; AVX512BW-NEXT: retq
%shift = ashr <32 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
@@ -428,7 +428,7 @@ define <32 x i16> @splatconstant_shift_v32i16(<32 x i16> %a) nounwind {
define <64 x i8> @splatconstant_shift_v64i8(<64 x i8> %a) nounwind {
; AVX512DQ-LABEL: splatconstant_shift_v64i8:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpsrlw $3, %ymm0, %ymm0
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm0
@@ -442,7 +442,7 @@ define <64 x i8> @splatconstant_shift_v64i8(<64 x i8> %a) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: splatconstant_shift_v64i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsrlw $3, %zmm0, %zmm0
; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
@@ -455,14 +455,14 @@ define <64 x i8> @splatconstant_shift_v64i8(<64 x i8> %a) nounwind {
define <64 x i8> @ashr_const7_v64i8(<64 x i8> %a) {
; AVX512DQ-LABEL: ashr_const7_v64i8:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512DQ-NEXT: vpcmpgtb %ymm0, %ymm2, %ymm0
; AVX512DQ-NEXT: vpcmpgtb %ymm1, %ymm2, %ymm1
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: ashr_const7_v64i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512BW-NEXT: vpcmpgtb %zmm0, %zmm1, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
diff --git a/test/CodeGen/X86/vector-shift-lshr-128.ll b/test/CodeGen/X86/vector-shift-lshr-128.ll
index 2511cfedf7e..9dc332799eb 100644
--- a/test/CodeGen/X86/vector-shift-lshr-128.ll
+++ b/test/CodeGen/X86/vector-shift-lshr-128.ll
@@ -19,7 +19,7 @@
define <2 x i64> @var_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-LABEL: var_shift_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: psrlq %xmm1, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
@@ -28,7 +28,7 @@ define <2 x i64> @var_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: var_shift_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: psrlq %xmm1, %xmm2
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
@@ -37,7 +37,7 @@ define <2 x i64> @var_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: var_shift_v2i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
; AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
@@ -45,34 +45,34 @@ define <2 x i64> @var_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shift_v2i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: var_shift_v2i64:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; XOPAVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm1
; XOPAVX1-NEXT: vpshlq %xmm1, %xmm0, %xmm0
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: var_shift_v2i64:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: var_shift_v2i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: var_shift_v2i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; X32-SSE-LABEL: var_shift_v2i64:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa %xmm0, %xmm2
; X32-SSE-NEXT: psrlq %xmm1, %xmm2
; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
@@ -85,7 +85,7 @@ define <2 x i64> @var_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
define <4 x i32> @var_shift_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE2-LABEL: var_shift_v4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: psrlq $32, %xmm2
; SSE2-NEXT: movdqa %xmm0, %xmm3
@@ -109,7 +109,7 @@ define <4 x i32> @var_shift_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: var_shift_v4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm1, %xmm2
; SSE41-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; SSE41-NEXT: movdqa %xmm0, %xmm3
@@ -130,7 +130,7 @@ define <4 x i32> @var_shift_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: var_shift_v4i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsrldq {{.*#+}} xmm2 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX1-NEXT: vpsrld %xmm2, %xmm0, %xmm2
; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm3
@@ -146,34 +146,34 @@ define <4 x i32> @var_shift_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shift_v4i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: var_shift_v4i32:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; XOPAVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm1
; XOPAVX1-NEXT: vpshld %xmm1, %xmm0, %xmm0
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: var_shift_v4i32:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: var_shift_v4i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: var_shift_v4i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; X32-SSE-LABEL: var_shift_v4i32:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa %xmm1, %xmm2
; X32-SSE-NEXT: psrlq $32, %xmm2
; X32-SSE-NEXT: movdqa %xmm0, %xmm3
@@ -201,7 +201,7 @@ define <4 x i32> @var_shift_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE2-LABEL: var_shift_v8i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: psllw $12, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: psraw $15, %xmm2
@@ -236,7 +236,7 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: var_shift_v8i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: psllw $12, %xmm0
@@ -266,7 +266,7 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: var_shift_v8i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsllw $12, %xmm1, %xmm2
; AVX1-NEXT: vpsllw $4, %xmm1, %xmm1
; AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1
@@ -284,7 +284,7 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shift_v8i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
@@ -295,14 +295,14 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; AVX2-NEXT: retq
;
; XOP-LABEL: var_shift_v8i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
; XOP-NEXT: vpsubw %xmm1, %xmm2, %xmm1
; XOP-NEXT: vpshlw %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512DQ-LABEL: var_shift_v8i16:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512DQ-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
@@ -312,7 +312,7 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: var_shift_v8i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
@@ -321,7 +321,7 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512DQVL-LABEL: var_shift_v8i16:
-; AVX512DQVL: # BB#0:
+; AVX512DQVL: # %bb.0:
; AVX512DQVL-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; AVX512DQVL-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512DQVL-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
@@ -330,12 +330,12 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; AVX512DQVL-NEXT: retq
;
; AVX512BWVL-LABEL: var_shift_v8i16:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpsrlvw %xmm1, %xmm0, %xmm0
; AVX512BWVL-NEXT: retq
;
; X32-SSE-LABEL: var_shift_v8i16:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: psllw $12, %xmm1
; X32-SSE-NEXT: movdqa %xmm1, %xmm2
; X32-SSE-NEXT: psraw $15, %xmm2
@@ -374,7 +374,7 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE2-LABEL: var_shift_v16i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: psllw $5, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: pxor %xmm3, %xmm3
@@ -405,7 +405,7 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: var_shift_v16i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: psllw $5, %xmm1
; SSE41-NEXT: movdqa %xmm2, %xmm3
@@ -429,7 +429,7 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE41-NEXT: retq
;
; AVX-LABEL: var_shift_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsllw $5, %xmm1, %xmm1
; AVX-NEXT: vpsrlw $4, %xmm0, %xmm2
; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
@@ -445,14 +445,14 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; AVX-NEXT: retq
;
; XOP-LABEL: var_shift_v16i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
; XOP-NEXT: vpsubb %xmm1, %xmm2, %xmm1
; XOP-NEXT: vpshlb %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512-LABEL: var_shift_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512-NEXT: vpsrlvd %zmm1, %zmm0, %zmm0
@@ -461,7 +461,7 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: var_shift_v16i8:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
; AVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512VL-NEXT: vpsrlvd %zmm1, %zmm0, %zmm0
@@ -470,7 +470,7 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; AVX512VL-NEXT: retq
;
; X32-SSE-LABEL: var_shift_v16i8:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: psllw $5, %xmm1
; X32-SSE-NEXT: pxor %xmm2, %xmm2
; X32-SSE-NEXT: pxor %xmm3, %xmm3
@@ -509,32 +509,32 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
define <2 x i64> @splatvar_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE-LABEL: splatvar_shift_v2i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrlq %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: splatvar_shift_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: splatvar_shift_v2i64:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512-LABEL: splatvar_shift_v2i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatvar_shift_v2i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; X32-SSE-LABEL: splatvar_shift_v2i64:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: psrlq %xmm1, %xmm0
; X32-SSE-NEXT: retl
%splat = shufflevector <2 x i64> %b, <2 x i64> undef, <2 x i32> zeroinitializer
@@ -544,44 +544,44 @@ define <2 x i64> @splatvar_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
define <4 x i32> @splatvar_shift_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE2-LABEL: splatvar_shift_v4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: xorps %xmm2, %xmm2
; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
; SSE2-NEXT: psrld %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: splatvar_shift_v4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; SSE41-NEXT: psrld %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: splatvar_shift_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; AVX-NEXT: vpsrld %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: splatvar_shift_v4i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; XOP-NEXT: vpsrld %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512-LABEL: splatvar_shift_v4i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; AVX512-NEXT: vpsrld %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatvar_shift_v4i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; AVX512VL-NEXT: vpsrld %xmm1, %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; X32-SSE-LABEL: splatvar_shift_v4i32:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: xorps %xmm2, %xmm2
; X32-SSE-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
; X32-SSE-NEXT: psrld %xmm2, %xmm0
@@ -593,44 +593,44 @@ define <4 x i32> @splatvar_shift_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
define <8 x i16> @splatvar_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE2-LABEL: splatvar_shift_v8i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pextrw $0, %xmm1, %eax
; SSE2-NEXT: movd %eax, %xmm1
; SSE2-NEXT: psrlw %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: splatvar_shift_v8i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; SSE41-NEXT: psrlw %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: splatvar_shift_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: splatvar_shift_v8i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; XOP-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512-LABEL: splatvar_shift_v8i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX512-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatvar_shift_v8i16:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX512VL-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; X32-SSE-LABEL: splatvar_shift_v8i16:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: pextrw $0, %xmm1, %eax
; X32-SSE-NEXT: movd %eax, %xmm1
; X32-SSE-NEXT: psrlw %xmm1, %xmm0
@@ -642,7 +642,7 @@ define <8 x i16> @splatvar_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE2-LABEL: splatvar_shift_v16i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,1,1]
@@ -676,7 +676,7 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: splatvar_shift_v16i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: pshufb %xmm0, %xmm1
@@ -703,7 +703,7 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: splatvar_shift_v16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpsllw $5, %xmm1, %xmm1
@@ -721,7 +721,7 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatvar_shift_v16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastb %xmm1, %xmm1
; AVX2-NEXT: vpsllw $5, %xmm1, %xmm1
; AVX2-NEXT: vpsrlw $4, %xmm0, %xmm2
@@ -738,7 +738,7 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: splatvar_shift_v16i8:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; XOPAVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; XOPAVX1-NEXT: vpsubb %xmm1, %xmm2, %xmm1
@@ -746,7 +746,7 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: splatvar_shift_v16i8:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpbroadcastb %xmm1, %xmm1
; XOPAVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; XOPAVX2-NEXT: vpsubb %xmm1, %xmm2, %xmm1
@@ -754,7 +754,7 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: splatvar_shift_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpbroadcastb %xmm1, %xmm1
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
@@ -764,7 +764,7 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatvar_shift_v16i8:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpbroadcastb %xmm1, %xmm1
; AVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
@@ -774,7 +774,7 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; AVX512VL-NEXT: retq
;
; X32-SSE-LABEL: splatvar_shift_v16i8:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; X32-SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,1,1]
@@ -817,7 +817,7 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
define <2 x i64> @constant_shift_v2i64(<2 x i64> %a) nounwind {
; SSE2-LABEL: constant_shift_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrlq $1, %xmm1
; SSE2-NEXT: psrlq $7, %xmm0
@@ -825,7 +825,7 @@ define <2 x i64> @constant_shift_v2i64(<2 x i64> %a) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: constant_shift_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: psrlq $7, %xmm1
; SSE41-NEXT: psrlq $1, %xmm0
@@ -833,41 +833,41 @@ define <2 x i64> @constant_shift_v2i64(<2 x i64> %a) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: constant_shift_v2i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsrlq $7, %xmm0, %xmm1
; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: constant_shift_v2i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsrlvq {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: constant_shift_v2i64:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; XOPAVX1-NEXT: vpsubq {{.*}}(%rip), %xmm1, %xmm1
; XOPAVX1-NEXT: vpshlq %xmm1, %xmm0, %xmm0
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: constant_shift_v2i64:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpsrlvq {{.*}}(%rip), %xmm0, %xmm0
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: constant_shift_v2i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsrlvq {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: constant_shift_v2i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlvq {{.*}}(%rip), %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; X32-SSE-LABEL: constant_shift_v2i64:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
; X32-SSE-NEXT: psrlq $1, %xmm1
; X32-SSE-NEXT: psrlq $7, %xmm0
@@ -879,7 +879,7 @@ define <2 x i64> @constant_shift_v2i64(<2 x i64> %a) nounwind {
define <4 x i32> @constant_shift_v4i32(<4 x i32> %a) nounwind {
; SSE2-LABEL: constant_shift_v4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrld $5, %xmm1
; SSE2-NEXT: movdqa %xmm0, %xmm2
@@ -895,7 +895,7 @@ define <4 x i32> @constant_shift_v4i32(<4 x i32> %a) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: constant_shift_v4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: psrld $7, %xmm1
; SSE41-NEXT: movdqa %xmm0, %xmm2
@@ -909,7 +909,7 @@ define <4 x i32> @constant_shift_v4i32(<4 x i32> %a) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: constant_shift_v4i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsrld $7, %xmm0, %xmm1
; AVX1-NEXT: vpsrld $5, %xmm0, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
@@ -920,32 +920,32 @@ define <4 x i32> @constant_shift_v4i32(<4 x i32> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: constant_shift_v4i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: constant_shift_v4i32:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vpshld {{.*}}(%rip), %xmm0, %xmm0
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: constant_shift_v4i32:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: constant_shift_v4i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: constant_shift_v4i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; X32-SSE-LABEL: constant_shift_v4i32:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
; X32-SSE-NEXT: psrld $5, %xmm1
; X32-SSE-NEXT: movdqa %xmm0, %xmm2
@@ -965,7 +965,7 @@ define <4 x i32> @constant_shift_v4i32(<4 x i32> %a) nounwind {
define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
; SSE2-LABEL: constant_shift_v8i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrlw $4, %xmm1
; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
@@ -982,7 +982,7 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: constant_shift_v8i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: psrlw $4, %xmm1
; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7]
@@ -995,7 +995,7 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: constant_shift_v8i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm1
@@ -1005,7 +1005,7 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: constant_shift_v8i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
@@ -1015,14 +1015,14 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
; AVX2-NEXT: retq
;
; XOP-LABEL: constant_shift_v8i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpxor %xmm1, %xmm1, %xmm1
; XOP-NEXT: vpsubw {{.*}}(%rip), %xmm1, %xmm1
; XOP-NEXT: vpshlw %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512DQ-LABEL: constant_shift_v8i16:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512DQ-NEXT: vpsrlvd {{.*}}(%rip), %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
@@ -1031,7 +1031,7 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: constant_shift_v8i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7]
; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
@@ -1040,7 +1040,7 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512DQVL-LABEL: constant_shift_v8i16:
-; AVX512DQVL: # BB#0:
+; AVX512DQVL: # %bb.0:
; AVX512DQVL-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512DQVL-NEXT: vpsrlvd {{.*}}(%rip), %ymm0, %ymm0
; AVX512DQVL-NEXT: vpmovdw %ymm0, %xmm0
@@ -1048,12 +1048,12 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
; AVX512DQVL-NEXT: retq
;
; AVX512BWVL-LABEL: constant_shift_v8i16:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpsrlvw {{.*}}(%rip), %xmm0, %xmm0
; AVX512BWVL-NEXT: retq
;
; X32-SSE-LABEL: constant_shift_v8i16:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
; X32-SSE-NEXT: psrlw $4, %xmm1
; X32-SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
@@ -1074,7 +1074,7 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
; SSE2-LABEL: constant_shift_v16i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [8192,24640,41088,57536,49376,32928,16480,32]
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: pxor %xmm3, %xmm3
@@ -1105,7 +1105,7 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: constant_shift_v16i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm2
; SSE41-NEXT: psrlw $4, %xmm2
@@ -1126,7 +1126,7 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
; SSE41-NEXT: retq
;
; AVX-LABEL: constant_shift_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrlw $4, %xmm0, %xmm1
; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [8192,24640,41088,57536,49376,32928,16480,32]
@@ -1142,14 +1142,14 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
; AVX-NEXT: retq
;
; XOP-LABEL: constant_shift_v16i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpxor %xmm1, %xmm1, %xmm1
; XOP-NEXT: vpsubb {{.*}}(%rip), %xmm1, %xmm1
; XOP-NEXT: vpshlb %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512-LABEL: constant_shift_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512-NEXT: vpsrlvd {{.*}}(%rip), %zmm0, %zmm0
; AVX512-NEXT: vpmovdb %zmm0, %xmm0
@@ -1157,7 +1157,7 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: constant_shift_v16i8:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512VL-NEXT: vpsrlvd {{.*}}(%rip), %zmm0, %zmm0
; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0
@@ -1165,7 +1165,7 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
; AVX512VL-NEXT: retq
;
; X32-SSE-LABEL: constant_shift_v16i8:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [8192,24640,41088,57536,49376,32928,16480,32]
; X32-SSE-NEXT: pxor %xmm1, %xmm1
; X32-SSE-NEXT: pxor %xmm3, %xmm3
@@ -1204,32 +1204,32 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
define <2 x i64> @splatconstant_shift_v2i64(<2 x i64> %a) nounwind {
; SSE-LABEL: splatconstant_shift_v2i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrlq $7, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: splatconstant_shift_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrlq $7, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: splatconstant_shift_v2i64:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpsrlq $7, %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512-LABEL: splatconstant_shift_v2i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsrlq $7, %xmm0, %xmm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_shift_v2i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlq $7, %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; X32-SSE-LABEL: splatconstant_shift_v2i64:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: psrlq $7, %xmm0
; X32-SSE-NEXT: retl
%shift = lshr <2 x i64> %a, <i64 7, i64 7>
@@ -1238,32 +1238,32 @@ define <2 x i64> @splatconstant_shift_v2i64(<2 x i64> %a) nounwind {
define <4 x i32> @splatconstant_shift_v4i32(<4 x i32> %a) nounwind {
; SSE-LABEL: splatconstant_shift_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrld $5, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: splatconstant_shift_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrld $5, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: splatconstant_shift_v4i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpsrld $5, %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512-LABEL: splatconstant_shift_v4i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsrld $5, %xmm0, %xmm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_shift_v4i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrld $5, %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; X32-SSE-LABEL: splatconstant_shift_v4i32:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: psrld $5, %xmm0
; X32-SSE-NEXT: retl
%shift = lshr <4 x i32> %a, <i32 5, i32 5, i32 5, i32 5>
@@ -1272,32 +1272,32 @@ define <4 x i32> @splatconstant_shift_v4i32(<4 x i32> %a) nounwind {
define <8 x i16> @splatconstant_shift_v8i16(<8 x i16> %a) nounwind {
; SSE-LABEL: splatconstant_shift_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrlw $3, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: splatconstant_shift_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrlw $3, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: splatconstant_shift_v8i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpsrlw $3, %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512-LABEL: splatconstant_shift_v8i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsrlw $3, %xmm0, %xmm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_shift_v8i16:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlw $3, %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; X32-SSE-LABEL: splatconstant_shift_v8i16:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: psrlw $3, %xmm0
; X32-SSE-NEXT: retl
%shift = lshr <8 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
@@ -1306,38 +1306,38 @@ define <8 x i16> @splatconstant_shift_v8i16(<8 x i16> %a) nounwind {
define <16 x i8> @splatconstant_shift_v16i8(<16 x i8> %a) nounwind {
; SSE-LABEL: splatconstant_shift_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrlw $3, %xmm0
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: splatconstant_shift_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrlw $3, %xmm0, %xmm0
; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: splatconstant_shift_v16i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpxor %xmm1, %xmm1, %xmm1
; XOP-NEXT: vpsubb {{.*}}(%rip), %xmm1, %xmm1
; XOP-NEXT: vpshlb %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512-LABEL: splatconstant_shift_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsrlw $3, %xmm0, %xmm0
; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_shift_v16i8:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlw $3, %xmm0, %xmm0
; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; X32-SSE-LABEL: splatconstant_shift_v16i8:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: psrlw $3, %xmm0
; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
; X32-SSE-NEXT: retl
diff --git a/test/CodeGen/X86/vector-shift-lshr-256.ll b/test/CodeGen/X86/vector-shift-lshr-256.ll
index 1d84a1f500e..7429ae003ea 100644
--- a/test/CodeGen/X86/vector-shift-lshr-256.ll
+++ b/test/CodeGen/X86/vector-shift-lshr-256.ll
@@ -18,7 +18,7 @@
define <4 x i64> @var_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX1-LABEL: var_shift_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpsrlq %xmm2, %xmm3, %xmm4
@@ -33,12 +33,12 @@ define <4 x i64> @var_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shift_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: var_shift_v4i64:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; XOPAVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
; XOPAVX1-NEXT: vpsubq %xmm2, %xmm3, %xmm2
@@ -50,22 +50,22 @@ define <4 x i64> @var_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: var_shift_v4i64:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: var_shift_v4i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: var_shift_v4i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; X32-AVX1-LABEL: var_shift_v4i64:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; X32-AVX1-NEXT: vpsrlq %xmm2, %xmm3, %xmm4
@@ -80,7 +80,7 @@ define <4 x i64> @var_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: var_shift_v4i64:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0
; X32-AVX2-NEXT: retl
%shift = lshr <4 x i64> %a, %b
@@ -89,7 +89,7 @@ define <4 x i64> @var_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
define <8 x i32> @var_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; AVX1-LABEL: var_shift_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpsrldq {{.*#+}} xmm4 = xmm3[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
@@ -119,12 +119,12 @@ define <8 x i32> @var_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shift_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: var_shift_v8i32:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; XOPAVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
; XOPAVX1-NEXT: vpsubd %xmm2, %xmm3, %xmm2
@@ -136,22 +136,22 @@ define <8 x i32> @var_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: var_shift_v8i32:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: var_shift_v8i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: var_shift_v8i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; X32-AVX1-LABEL: var_shift_v8i32:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; X32-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; X32-AVX1-NEXT: vpsrldq {{.*#+}} xmm4 = xmm3[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
@@ -181,7 +181,7 @@ define <8 x i32> @var_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: var_shift_v8i32:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; X32-AVX2-NEXT: retl
%shift = lshr <8 x i32> %a, %b
@@ -190,7 +190,7 @@ define <8 x i32> @var_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; AVX1-LABEL: var_shift_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vpsllw $12, %xmm2, %xmm3
; AVX1-NEXT: vpsllw $4, %xmm2, %xmm2
@@ -225,7 +225,7 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shift_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
@@ -239,7 +239,7 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: var_shift_v16i16:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; XOPAVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
; XOPAVX1-NEXT: vpsubw %xmm2, %xmm3, %xmm2
@@ -251,7 +251,7 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: var_shift_v16i16:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
; XOPAVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
; XOPAVX2-NEXT: vpsubw %xmm2, %xmm3, %xmm2
@@ -263,7 +263,7 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; XOPAVX2-NEXT: retq
;
; AVX512DQ-LABEL: var_shift_v16i16:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512DQ-NEXT: vpsrlvd %zmm1, %zmm0, %zmm0
@@ -271,7 +271,7 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: var_shift_v16i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
@@ -279,7 +279,7 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512DQVL-LABEL: var_shift_v16i16:
-; AVX512DQVL: # BB#0:
+; AVX512DQVL: # %bb.0:
; AVX512DQVL-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
; AVX512DQVL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512DQVL-NEXT: vpsrlvd %zmm1, %zmm0, %zmm0
@@ -287,12 +287,12 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; AVX512DQVL-NEXT: retq
;
; AVX512BWVL-LABEL: var_shift_v16i16:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpsrlvw %ymm1, %ymm0, %ymm0
; AVX512BWVL-NEXT: retq
;
; X32-AVX1-LABEL: var_shift_v16i16:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; X32-AVX1-NEXT: vpsllw $12, %xmm2, %xmm3
; X32-AVX1-NEXT: vpsllw $4, %xmm2, %xmm2
@@ -327,7 +327,7 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: var_shift_v16i16:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; X32-AVX2-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
; X32-AVX2-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
@@ -345,7 +345,7 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX1-LABEL: var_shift_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpsrlw $4, %xmm2, %xmm3
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
@@ -379,7 +379,7 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shift_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsllw $5, %ymm1, %ymm1
; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm2
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
@@ -395,7 +395,7 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: var_shift_v32i8:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; XOPAVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
; XOPAVX1-NEXT: vpsubb %xmm2, %xmm3, %xmm2
@@ -407,7 +407,7 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: var_shift_v32i8:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
; XOPAVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
; XOPAVX2-NEXT: vpsubb %xmm2, %xmm3, %xmm2
@@ -419,7 +419,7 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; XOPAVX2-NEXT: retq
;
; AVX512DQ-LABEL: var_shift_v32i8:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpsllw $5, %ymm1, %ymm1
; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm2
; AVX512DQ-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
@@ -435,7 +435,7 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: var_shift_v32i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
@@ -443,7 +443,7 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512DQVL-LABEL: var_shift_v32i8:
-; AVX512DQVL: # BB#0:
+; AVX512DQVL: # %bb.0:
; AVX512DQVL-NEXT: vpsllw $5, %ymm1, %ymm1
; AVX512DQVL-NEXT: vpsrlw $4, %ymm0, %ymm2
; AVX512DQVL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
@@ -459,7 +459,7 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX512DQVL-NEXT: retq
;
; AVX512BWVL-LABEL: var_shift_v32i8:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
; AVX512BWVL-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
@@ -467,7 +467,7 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX512BWVL-NEXT: retq
;
; X32-AVX1-LABEL: var_shift_v32i8:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; X32-AVX1-NEXT: vpsrlw $4, %xmm2, %xmm3
; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
@@ -501,7 +501,7 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: var_shift_v32i8:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpsllw $5, %ymm1, %ymm1
; X32-AVX2-NEXT: vpsrlw $4, %ymm0, %ymm2
; X32-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2
@@ -525,7 +525,7 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX1-LABEL: splatvar_shift_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpsrlq %xmm1, %xmm2, %xmm2
; AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
@@ -533,12 +533,12 @@ define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatvar_shift_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsrlq %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: splatvar_shift_v4i64:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; XOPAVX1-NEXT: vpsrlq %xmm1, %xmm2, %xmm2
; XOPAVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
@@ -546,22 +546,22 @@ define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: splatvar_shift_v4i64:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpsrlq %xmm1, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: splatvar_shift_v4i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsrlq %xmm1, %ymm0, %ymm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatvar_shift_v4i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlq %xmm1, %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; X32-AVX1-LABEL: splatvar_shift_v4i64:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; X32-AVX1-NEXT: vpsrlq %xmm1, %xmm2, %xmm2
; X32-AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
@@ -569,7 +569,7 @@ define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: splatvar_shift_v4i64:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpsrlq %xmm1, %ymm0, %ymm0
; X32-AVX2-NEXT: retl
%splat = shufflevector <4 x i64> %b, <4 x i64> undef, <4 x i32> zeroinitializer
@@ -579,7 +579,7 @@ define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
define <8 x i32> @splatvar_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; AVX1-LABEL: splatvar_shift_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; AVX1-NEXT: vpsrld %xmm1, %xmm2, %xmm2
@@ -588,13 +588,13 @@ define <8 x i32> @splatvar_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatvar_shift_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; AVX2-NEXT: vpsrld %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: splatvar_shift_v8i32:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; XOPAVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; XOPAVX1-NEXT: vpsrld %xmm1, %xmm2, %xmm2
@@ -603,25 +603,25 @@ define <8 x i32> @splatvar_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: splatvar_shift_v8i32:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; XOPAVX2-NEXT: vpsrld %xmm1, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: splatvar_shift_v8i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; AVX512-NEXT: vpsrld %xmm1, %ymm0, %ymm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatvar_shift_v8i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; AVX512VL-NEXT: vpsrld %xmm1, %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; X32-AVX1-LABEL: splatvar_shift_v8i32:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; X32-AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; X32-AVX1-NEXT: vpsrld %xmm1, %xmm2, %xmm2
@@ -630,7 +630,7 @@ define <8 x i32> @splatvar_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: splatvar_shift_v8i32:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; X32-AVX2-NEXT: vpsrld %xmm1, %ymm0, %ymm0
; X32-AVX2-NEXT: retl
@@ -641,7 +641,7 @@ define <8 x i32> @splatvar_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
define <16 x i16> @splatvar_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; AVX1-LABEL: splatvar_shift_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX1-NEXT: vpsrlw %xmm1, %xmm2, %xmm2
@@ -650,13 +650,13 @@ define <16 x i16> @splatvar_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatvar_shift_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX2-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: splatvar_shift_v16i16:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; XOPAVX1-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; XOPAVX1-NEXT: vpsrlw %xmm1, %xmm2, %xmm2
@@ -665,25 +665,25 @@ define <16 x i16> @splatvar_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: splatvar_shift_v16i16:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; XOPAVX2-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: splatvar_shift_v16i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX512-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatvar_shift_v16i16:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX512VL-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; X32-AVX1-LABEL: splatvar_shift_v16i16:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; X32-AVX1-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; X32-AVX1-NEXT: vpsrlw %xmm1, %xmm2, %xmm2
@@ -692,7 +692,7 @@ define <16 x i16> @splatvar_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: splatvar_shift_v16i16:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; X32-AVX2-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
; X32-AVX2-NEXT: retl
@@ -703,7 +703,7 @@ define <16 x i16> @splatvar_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind
define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX1-LABEL: splatvar_shift_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
@@ -735,7 +735,7 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatvar_shift_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastb %xmm1, %ymm1
; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm2
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
@@ -752,7 +752,7 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: splatvar_shift_v32i8:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; XOPAVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; XOPAVX1-NEXT: vpsubb %xmm1, %xmm2, %xmm1
@@ -763,7 +763,7 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: splatvar_shift_v32i8:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpbroadcastb %xmm1, %ymm1
; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
; XOPAVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
@@ -776,7 +776,7 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; XOPAVX2-NEXT: retq
;
; AVX512DQ-LABEL: splatvar_shift_v32i8:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpbroadcastb %xmm1, %ymm1
; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm2
; AVX512DQ-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
@@ -793,7 +793,7 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: splatvar_shift_v32i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpbroadcastb %xmm1, %ymm1
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
@@ -802,7 +802,7 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512DQVL-LABEL: splatvar_shift_v32i8:
-; AVX512DQVL: # BB#0:
+; AVX512DQVL: # %bb.0:
; AVX512DQVL-NEXT: vpbroadcastb %xmm1, %ymm1
; AVX512DQVL-NEXT: vpsrlw $4, %ymm0, %ymm2
; AVX512DQVL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
@@ -819,7 +819,7 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX512DQVL-NEXT: retq
;
; AVX512BWVL-LABEL: splatvar_shift_v32i8:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpbroadcastb %xmm1, %ymm1
; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
@@ -828,7 +828,7 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX512BWVL-NEXT: retq
;
; X32-AVX1-LABEL: splatvar_shift_v32i8:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; X32-AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
@@ -860,7 +860,7 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: splatvar_shift_v32i8:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpbroadcastb %xmm1, %ymm1
; X32-AVX2-NEXT: vpsrlw $4, %ymm0, %ymm2
; X32-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2
@@ -886,7 +886,7 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
define <4 x i64> @constant_shift_v4i64(<4 x i64> %a) nounwind {
; AVX1-LABEL: constant_shift_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpsrlq $62, %xmm1, %xmm2
; AVX1-NEXT: vpsrlq $31, %xmm1, %xmm1
@@ -898,12 +898,12 @@ define <4 x i64> @constant_shift_v4i64(<4 x i64> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: constant_shift_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsrlvq {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: constant_shift_v4i64:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; XOPAVX1-NEXT: vpsubq {{.*}}(%rip), %xmm1, %xmm2
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
@@ -914,22 +914,22 @@ define <4 x i64> @constant_shift_v4i64(<4 x i64> %a) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: constant_shift_v4i64:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpsrlvq {{.*}}(%rip), %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: constant_shift_v4i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsrlvq {{.*}}(%rip), %ymm0, %ymm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: constant_shift_v4i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlvq {{.*}}(%rip), %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; X32-AVX1-LABEL: constant_shift_v4i64:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vmovdqa {{.*#+}} ymm1 = [1,0,7,0,31,0,62,0]
; X32-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
@@ -945,7 +945,7 @@ define <4 x i64> @constant_shift_v4i64(<4 x i64> %a) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: constant_shift_v4i64:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpsrlvq {{\.LCPI.*}}, %ymm0, %ymm0
; X32-AVX2-NEXT: retl
%shift = lshr <4 x i64> %a, <i64 1, i64 7, i64 31, i64 62>
@@ -954,7 +954,7 @@ define <4 x i64> @constant_shift_v4i64(<4 x i64> %a) nounwind {
define <8 x i32> @constant_shift_v8i32(<8 x i32> %a) nounwind {
; AVX1-LABEL: constant_shift_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsrld $7, %xmm0, %xmm1
; AVX1-NEXT: vpsrld $5, %xmm0, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
@@ -972,12 +972,12 @@ define <8 x i32> @constant_shift_v8i32(<8 x i32> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: constant_shift_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: constant_shift_v8i32:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vpshld {{.*}}(%rip), %xmm0, %xmm1
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; XOPAVX1-NEXT: vpshld {{.*}}(%rip), %xmm0, %xmm0
@@ -985,22 +985,22 @@ define <8 x i32> @constant_shift_v8i32(<8 x i32> %a) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: constant_shift_v8i32:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpsrlvd {{.*}}(%rip), %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: constant_shift_v8i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsrlvd {{.*}}(%rip), %ymm0, %ymm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: constant_shift_v8i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlvd {{.*}}(%rip), %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; X32-AVX1-LABEL: constant_shift_v8i32:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vpsrld $7, %xmm0, %xmm1
; X32-AVX1-NEXT: vpsrld $5, %xmm0, %xmm2
; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
@@ -1018,7 +1018,7 @@ define <8 x i32> @constant_shift_v8i32(<8 x i32> %a) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: constant_shift_v8i32:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpsrlvd {{\.LCPI.*}}, %ymm0, %ymm0
; X32-AVX2-NEXT: retl
%shift = lshr <8 x i32> %a, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 8, i32 7>
@@ -1027,7 +1027,7 @@ define <8 x i32> @constant_shift_v8i32(<8 x i32> %a) nounwind {
define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
; AVX1-LABEL: constant_shift_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1
; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm2
@@ -1046,7 +1046,7 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: constant_shift_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm2[4],ymm1[4],ymm2[5],ymm1[5],ymm2[6],ymm1[6],ymm2[7],ymm1[7],ymm2[12],ymm1[12],ymm2[13],ymm1[13],ymm2[14],ymm1[14],ymm2[15],ymm1[15]
@@ -1061,7 +1061,7 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: constant_shift_v16i16:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; XOPAVX1-NEXT: vpsubw {{.*}}(%rip), %xmm1, %xmm2
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
@@ -1072,7 +1072,7 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: constant_shift_v16i16:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; XOPAVX2-NEXT: vpsubw {{.*}}(%rip), %xmm1, %xmm2
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm3
@@ -1083,14 +1083,14 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
; XOPAVX2-NEXT: retq
;
; AVX512DQ-LABEL: constant_shift_v16i16:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512DQ-NEXT: vpsrlvd {{.*}}(%rip), %zmm0, %zmm0
; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: constant_shift_v16i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
@@ -1098,19 +1098,19 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512DQVL-LABEL: constant_shift_v16i16:
-; AVX512DQVL: # BB#0:
+; AVX512DQVL: # %bb.0:
; AVX512DQVL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512DQVL-NEXT: vpsrlvd {{.*}}(%rip), %zmm0, %zmm0
; AVX512DQVL-NEXT: vpmovdw %zmm0, %ymm0
; AVX512DQVL-NEXT: retq
;
; AVX512BWVL-LABEL: constant_shift_v16i16:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpsrlvw {{.*}}(%rip), %ymm0, %ymm0
; AVX512BWVL-NEXT: retq
;
; X32-AVX1-LABEL: constant_shift_v16i16:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; X32-AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1
; X32-AVX1-NEXT: vpsrlw $4, %xmm1, %xmm2
@@ -1129,7 +1129,7 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: constant_shift_v16i16:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X32-AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; X32-AVX2-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm2[4],ymm1[4],ymm2[5],ymm1[5],ymm2[6],ymm1[6],ymm2[7],ymm1[7],ymm2[12],ymm1[12],ymm2[13],ymm1[13],ymm2[14],ymm1[14],ymm2[15],ymm1[15]
@@ -1148,7 +1148,7 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
; AVX1-LABEL: constant_shift_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
@@ -1178,7 +1178,7 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: constant_shift_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm1
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32]
@@ -1194,7 +1194,7 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: constant_shift_v32i8:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; XOPAVX1-NEXT: vpsubb {{.*}}(%rip), %xmm1, %xmm1
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
@@ -1204,7 +1204,7 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: constant_shift_v32i8:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; XOPAVX2-NEXT: vpsubb {{.*}}(%rip), %xmm1, %xmm1
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
@@ -1214,7 +1214,7 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
; XOPAVX2-NEXT: retq
;
; AVX512DQ-LABEL: constant_shift_v32i8:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm1
; AVX512DQ-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32]
@@ -1230,14 +1230,14 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: constant_shift_v32i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
; AVX512BW-NEXT: vpsrlvw {{.*}}(%rip), %zmm0, %zmm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: retq
;
; AVX512DQVL-LABEL: constant_shift_v32i8:
-; AVX512DQVL: # BB#0:
+; AVX512DQVL: # %bb.0:
; AVX512DQVL-NEXT: vpsrlw $4, %ymm0, %ymm1
; AVX512DQVL-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
; AVX512DQVL-NEXT: vmovdqa {{.*#+}} ymm2 = [8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32]
@@ -1253,14 +1253,14 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
; AVX512DQVL-NEXT: retq
;
; AVX512BWVL-LABEL: constant_shift_v32i8:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
; AVX512BWVL-NEXT: vpsrlvw {{.*}}(%rip), %zmm0, %zmm0
; AVX512BWVL-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BWVL-NEXT: retq
;
; X32-AVX1-LABEL: constant_shift_v32i8:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; X32-AVX1-NEXT: vpsrlw $4, %xmm1, %xmm2
; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
@@ -1290,7 +1290,7 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: constant_shift_v32i8:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpsrlw $4, %ymm0, %ymm1
; X32-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm1, %ymm1
; X32-AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32]
@@ -1314,7 +1314,7 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
define <4 x i64> @splatconstant_shift_v4i64(<4 x i64> %a) nounwind {
; AVX1-LABEL: splatconstant_shift_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsrlq $7, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsrlq $7, %xmm0, %xmm0
@@ -1322,12 +1322,12 @@ define <4 x i64> @splatconstant_shift_v4i64(<4 x i64> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatconstant_shift_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsrlq $7, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: splatconstant_shift_v4i64:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vpsrlq $7, %xmm0, %xmm1
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; XOPAVX1-NEXT: vpsrlq $7, %xmm0, %xmm0
@@ -1335,22 +1335,22 @@ define <4 x i64> @splatconstant_shift_v4i64(<4 x i64> %a) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: splatconstant_shift_v4i64:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpsrlq $7, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: splatconstant_shift_v4i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsrlq $7, %ymm0, %ymm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_shift_v4i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlq $7, %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; X32-AVX1-LABEL: splatconstant_shift_v4i64:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vpsrlq $7, %xmm0, %xmm1
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; X32-AVX1-NEXT: vpsrlq $7, %xmm0, %xmm0
@@ -1358,7 +1358,7 @@ define <4 x i64> @splatconstant_shift_v4i64(<4 x i64> %a) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: splatconstant_shift_v4i64:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpsrlq $7, %ymm0, %ymm0
; X32-AVX2-NEXT: retl
%shift = lshr <4 x i64> %a, <i64 7, i64 7, i64 7, i64 7>
@@ -1367,7 +1367,7 @@ define <4 x i64> @splatconstant_shift_v4i64(<4 x i64> %a) nounwind {
define <8 x i32> @splatconstant_shift_v8i32(<8 x i32> %a) nounwind {
; AVX1-LABEL: splatconstant_shift_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsrld $5, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsrld $5, %xmm0, %xmm0
@@ -1375,12 +1375,12 @@ define <8 x i32> @splatconstant_shift_v8i32(<8 x i32> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatconstant_shift_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsrld $5, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: splatconstant_shift_v8i32:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vpsrld $5, %xmm0, %xmm1
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; XOPAVX1-NEXT: vpsrld $5, %xmm0, %xmm0
@@ -1388,22 +1388,22 @@ define <8 x i32> @splatconstant_shift_v8i32(<8 x i32> %a) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: splatconstant_shift_v8i32:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpsrld $5, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: splatconstant_shift_v8i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsrld $5, %ymm0, %ymm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_shift_v8i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrld $5, %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; X32-AVX1-LABEL: splatconstant_shift_v8i32:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vpsrld $5, %xmm0, %xmm1
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; X32-AVX1-NEXT: vpsrld $5, %xmm0, %xmm0
@@ -1411,7 +1411,7 @@ define <8 x i32> @splatconstant_shift_v8i32(<8 x i32> %a) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: splatconstant_shift_v8i32:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpsrld $5, %ymm0, %ymm0
; X32-AVX2-NEXT: retl
%shift = lshr <8 x i32> %a, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
@@ -1420,7 +1420,7 @@ define <8 x i32> @splatconstant_shift_v8i32(<8 x i32> %a) nounwind {
define <16 x i16> @splatconstant_shift_v16i16(<16 x i16> %a) nounwind {
; AVX1-LABEL: splatconstant_shift_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsrlw $3, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsrlw $3, %xmm0, %xmm0
@@ -1428,12 +1428,12 @@ define <16 x i16> @splatconstant_shift_v16i16(<16 x i16> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatconstant_shift_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsrlw $3, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: splatconstant_shift_v16i16:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vpsrlw $3, %xmm0, %xmm1
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; XOPAVX1-NEXT: vpsrlw $3, %xmm0, %xmm0
@@ -1441,22 +1441,22 @@ define <16 x i16> @splatconstant_shift_v16i16(<16 x i16> %a) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: splatconstant_shift_v16i16:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpsrlw $3, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: splatconstant_shift_v16i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsrlw $3, %ymm0, %ymm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_shift_v16i16:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlw $3, %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; X32-AVX1-LABEL: splatconstant_shift_v16i16:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vpsrlw $3, %xmm0, %xmm1
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; X32-AVX1-NEXT: vpsrlw $3, %xmm0, %xmm0
@@ -1464,7 +1464,7 @@ define <16 x i16> @splatconstant_shift_v16i16(<16 x i16> %a) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: splatconstant_shift_v16i16:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpsrlw $3, %ymm0, %ymm0
; X32-AVX2-NEXT: retl
%shift = lshr <16 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
@@ -1473,7 +1473,7 @@ define <16 x i16> @splatconstant_shift_v16i16(<16 x i16> %a) nounwind {
define <32 x i8> @splatconstant_shift_v32i8(<32 x i8> %a) nounwind {
; AVX1-LABEL: splatconstant_shift_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpsrlw $3, %xmm1, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
@@ -1484,13 +1484,13 @@ define <32 x i8> @splatconstant_shift_v32i8(<32 x i8> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatconstant_shift_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsrlw $3, %ymm0, %ymm0
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: splatconstant_shift_v32i8:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; XOPAVX1-NEXT: vpsubb {{.*}}(%rip), %xmm1, %xmm1
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
@@ -1500,25 +1500,25 @@ define <32 x i8> @splatconstant_shift_v32i8(<32 x i8> %a) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: splatconstant_shift_v32i8:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpsrlw $3, %ymm0, %ymm0
; XOPAVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: splatconstant_shift_v32i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsrlw $3, %ymm0, %ymm0
; AVX512-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_shift_v32i8:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlw $3, %ymm0, %ymm0
; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; X32-AVX1-LABEL: splatconstant_shift_v32i8:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; X32-AVX1-NEXT: vpsrlw $3, %xmm1, %xmm1
; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
@@ -1529,7 +1529,7 @@ define <32 x i8> @splatconstant_shift_v32i8(<32 x i8> %a) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: splatconstant_shift_v32i8:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpsrlw $3, %ymm0, %ymm0
; X32-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0
; X32-AVX2-NEXT: retl
diff --git a/test/CodeGen/X86/vector-shift-lshr-512.ll b/test/CodeGen/X86/vector-shift-lshr-512.ll
index cc00dd6250a..1157c0a6696 100644
--- a/test/CodeGen/X86/vector-shift-lshr-512.ll
+++ b/test/CodeGen/X86/vector-shift-lshr-512.ll
@@ -8,7 +8,7 @@
define <8 x i64> @var_shift_v8i64(<8 x i64> %a, <8 x i64> %b) nounwind {
; ALL-LABEL: var_shift_v8i64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpsrlvq %zmm1, %zmm0, %zmm0
; ALL-NEXT: retq
%shift = lshr <8 x i64> %a, %b
@@ -17,7 +17,7 @@ define <8 x i64> @var_shift_v8i64(<8 x i64> %a, <8 x i64> %b) nounwind {
define <16 x i32> @var_shift_v16i32(<16 x i32> %a, <16 x i32> %b) nounwind {
; ALL-LABEL: var_shift_v16i32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpsrlvd %zmm1, %zmm0, %zmm0
; ALL-NEXT: retq
%shift = lshr <16 x i32> %a, %b
@@ -26,7 +26,7 @@ define <16 x i32> @var_shift_v16i32(<16 x i32> %a, <16 x i32> %b) nounwind {
define <32 x i16> @var_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
; AVX512DQ-LABEL: var_shift_v32i16:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512DQ-NEXT: vpsrlvd %zmm2, %zmm0, %zmm0
@@ -38,7 +38,7 @@ define <32 x i16> @var_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: var_shift_v32i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
%shift = lshr <32 x i16> %a, %b
@@ -47,7 +47,7 @@ define <32 x i16> @var_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; AVX512DQ-LABEL: var_shift_v64i8:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm4
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512DQ-NEXT: vpand %ymm5, %ymm4, %ymm4
@@ -78,7 +78,7 @@ define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: var_shift_v64i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm2
; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm2, %zmm2
; AVX512BW-NEXT: vpsllw $5, %zmm1, %zmm1
@@ -105,7 +105,7 @@ define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
define <8 x i64> @splatvar_shift_v8i64(<8 x i64> %a, <8 x i64> %b) nounwind {
; ALL-LABEL: splatvar_shift_v8i64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpsrlq %xmm1, %zmm0, %zmm0
; ALL-NEXT: retq
%splat = shufflevector <8 x i64> %b, <8 x i64> undef, <8 x i32> zeroinitializer
@@ -115,7 +115,7 @@ define <8 x i64> @splatvar_shift_v8i64(<8 x i64> %a, <8 x i64> %b) nounwind {
define <16 x i32> @splatvar_shift_v16i32(<16 x i32> %a, <16 x i32> %b) nounwind {
; ALL-LABEL: splatvar_shift_v16i32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; ALL-NEXT: vpsrld %xmm1, %zmm0, %zmm0
; ALL-NEXT: retq
@@ -126,14 +126,14 @@ define <16 x i32> @splatvar_shift_v16i32(<16 x i32> %a, <16 x i32> %b) nounwind
define <32 x i16> @splatvar_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
; AVX512DQ-LABEL: splatvar_shift_v32i16:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
; AVX512DQ-NEXT: vpsrlw %xmm2, %ymm0, %ymm0
; AVX512DQ-NEXT: vpsrlw %xmm2, %ymm1, %ymm1
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: splatvar_shift_v32i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX512BW-NEXT: vpsrlw %xmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
@@ -144,7 +144,7 @@ define <32 x i16> @splatvar_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind
define <64 x i8> @splatvar_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; AVX512DQ-LABEL: splatvar_shift_v64i8:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpbroadcastb %xmm2, %ymm2
; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm3
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
@@ -173,7 +173,7 @@ define <64 x i8> @splatvar_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: splatvar_shift_v64i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpbroadcastb %xmm1, %zmm1
; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm2
; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm2, %zmm2
@@ -202,7 +202,7 @@ define <64 x i8> @splatvar_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
define <8 x i64> @constant_shift_v8i64(<8 x i64> %a) nounwind {
; ALL-LABEL: constant_shift_v8i64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpsrlvq {{.*}}(%rip), %zmm0, %zmm0
; ALL-NEXT: retq
%shift = lshr <8 x i64> %a, <i64 1, i64 7, i64 31, i64 62, i64 1, i64 7, i64 31, i64 62>
@@ -211,7 +211,7 @@ define <8 x i64> @constant_shift_v8i64(<8 x i64> %a) nounwind {
define <16 x i32> @constant_shift_v16i32(<16 x i32> %a) nounwind {
; ALL-LABEL: constant_shift_v16i32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpsrlvd {{.*}}(%rip), %zmm0, %zmm0
; ALL-NEXT: retq
%shift = lshr <16 x i32> %a, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 8, i32 7, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 8, i32 7>
@@ -220,7 +220,7 @@ define <16 x i32> @constant_shift_v16i32(<16 x i32> %a) nounwind {
define <32 x i16> @constant_shift_v32i16(<32 x i16> %a) nounwind {
; AVX512DQ-LABEL: constant_shift_v32i16:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512DQ-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; AVX512DQ-NEXT: vpsrlvd %zmm2, %zmm0, %zmm0
@@ -231,7 +231,7 @@ define <32 x i16> @constant_shift_v32i16(<32 x i16> %a) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: constant_shift_v32i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsrlvw {{.*}}(%rip), %zmm0, %zmm0
; AVX512BW-NEXT: retq
%shift = lshr <32 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
@@ -240,7 +240,7 @@ define <32 x i16> @constant_shift_v32i16(<32 x i16> %a) nounwind {
define <64 x i8> @constant_shift_v64i8(<64 x i8> %a) nounwind {
; AVX512DQ-LABEL: constant_shift_v64i8:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm2
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512DQ-NEXT: vpand %ymm3, %ymm2, %ymm2
@@ -268,7 +268,7 @@ define <64 x i8> @constant_shift_v64i8(<64 x i8> %a) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: constant_shift_v64i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32]
; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm2
@@ -295,7 +295,7 @@ define <64 x i8> @constant_shift_v64i8(<64 x i8> %a) nounwind {
define <8 x i64> @splatconstant_shift_v8i64(<8 x i64> %a) nounwind {
; ALL-LABEL: splatconstant_shift_v8i64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpsrlq $7, %zmm0, %zmm0
; ALL-NEXT: retq
%shift = lshr <8 x i64> %a, <i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7>
@@ -304,7 +304,7 @@ define <8 x i64> @splatconstant_shift_v8i64(<8 x i64> %a) nounwind {
define <16 x i32> @splatconstant_shift_v16i32(<16 x i32> %a) nounwind {
; ALL-LABEL: splatconstant_shift_v16i32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpsrld $5, %zmm0, %zmm0
; ALL-NEXT: retq
%shift = lshr <16 x i32> %a, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
@@ -313,13 +313,13 @@ define <16 x i32> @splatconstant_shift_v16i32(<16 x i32> %a) nounwind {
define <32 x i16> @splatconstant_shift_v32i16(<32 x i16> %a) nounwind {
; AVX512DQ-LABEL: splatconstant_shift_v32i16:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpsrlw $3, %ymm0, %ymm0
; AVX512DQ-NEXT: vpsrlw $3, %ymm1, %ymm1
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: splatconstant_shift_v32i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsrlw $3, %zmm0, %zmm0
; AVX512BW-NEXT: retq
%shift = lshr <32 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
@@ -328,7 +328,7 @@ define <32 x i16> @splatconstant_shift_v32i16(<32 x i16> %a) nounwind {
define <64 x i8> @splatconstant_shift_v64i8(<64 x i8> %a) nounwind {
; AVX512DQ-LABEL: splatconstant_shift_v64i8:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpsrlw $3, %ymm0, %ymm0
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm0
@@ -337,7 +337,7 @@ define <64 x i8> @splatconstant_shift_v64i8(<64 x i8> %a) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: splatconstant_shift_v64i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsrlw $3, %zmm0, %zmm0
; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0
; AVX512BW-NEXT: retq
diff --git a/test/CodeGen/X86/vector-shift-shl-128.ll b/test/CodeGen/X86/vector-shift-shl-128.ll
index 33b479f96ee..ef4c8855182 100644
--- a/test/CodeGen/X86/vector-shift-shl-128.ll
+++ b/test/CodeGen/X86/vector-shift-shl-128.ll
@@ -19,7 +19,7 @@
define <2 x i64> @var_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-LABEL: var_shift_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: psllq %xmm1, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
@@ -28,7 +28,7 @@ define <2 x i64> @var_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: var_shift_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: psllq %xmm1, %xmm2
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
@@ -37,7 +37,7 @@ define <2 x i64> @var_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: var_shift_v2i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsllq %xmm1, %xmm0, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
; AVX1-NEXT: vpsllq %xmm1, %xmm0, %xmm0
@@ -45,32 +45,32 @@ define <2 x i64> @var_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shift_v2i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsllvq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: var_shift_v2i64:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vpshlq %xmm1, %xmm0, %xmm0
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: var_shift_v2i64:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpsllvq %xmm1, %xmm0, %xmm0
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: var_shift_v2i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsllvq %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: var_shift_v2i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsllvq %xmm1, %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; X32-SSE-LABEL: var_shift_v2i64:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa %xmm0, %xmm2
; X32-SSE-NEXT: psllq %xmm1, %xmm2
; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
@@ -83,7 +83,7 @@ define <2 x i64> @var_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
define <4 x i32> @var_shift_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE2-LABEL: var_shift_v4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pslld $23, %xmm1
; SSE2-NEXT: paddd {{.*}}(%rip), %xmm1
; SSE2-NEXT: cvttps2dq %xmm1, %xmm1
@@ -98,7 +98,7 @@ define <4 x i32> @var_shift_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: var_shift_v4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pslld $23, %xmm1
; SSE41-NEXT: paddd {{.*}}(%rip), %xmm1
; SSE41-NEXT: cvttps2dq %xmm1, %xmm1
@@ -106,7 +106,7 @@ define <4 x i32> @var_shift_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: var_shift_v4i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpslld $23, %xmm1, %xmm1
; AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1
@@ -114,32 +114,32 @@ define <4 x i32> @var_shift_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shift_v4i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsllvd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: var_shift_v4i32:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vpshld %xmm1, %xmm0, %xmm0
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: var_shift_v4i32:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpsllvd %xmm1, %xmm0, %xmm0
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: var_shift_v4i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsllvd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: var_shift_v4i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsllvd %xmm1, %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; X32-SSE-LABEL: var_shift_v4i32:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: pslld $23, %xmm1
; X32-SSE-NEXT: paddd {{\.LCPI.*}}, %xmm1
; X32-SSE-NEXT: cvttps2dq %xmm1, %xmm1
@@ -158,7 +158,7 @@ define <4 x i32> @var_shift_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE2-LABEL: var_shift_v8i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: psllw $12, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: psraw $15, %xmm2
@@ -193,7 +193,7 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: var_shift_v8i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: psllw $12, %xmm0
@@ -223,7 +223,7 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: var_shift_v8i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsllw $12, %xmm1, %xmm2
; AVX1-NEXT: vpsllw $4, %xmm1, %xmm1
; AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1
@@ -241,7 +241,7 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shift_v8i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
@@ -252,12 +252,12 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; AVX2-NEXT: retq
;
; XOP-LABEL: var_shift_v8i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpshlw %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512DQ-LABEL: var_shift_v8i16:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512DQ-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
@@ -267,7 +267,7 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: var_shift_v8i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
@@ -276,7 +276,7 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512DQVL-LABEL: var_shift_v8i16:
-; AVX512DQVL: # BB#0:
+; AVX512DQVL: # %bb.0:
; AVX512DQVL-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; AVX512DQVL-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512DQVL-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
@@ -285,12 +285,12 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; AVX512DQVL-NEXT: retq
;
; AVX512BWVL-LABEL: var_shift_v8i16:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpsllvw %xmm1, %xmm0, %xmm0
; AVX512BWVL-NEXT: retq
;
; X32-SSE-LABEL: var_shift_v8i16:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: psllw $12, %xmm1
; X32-SSE-NEXT: movdqa %xmm1, %xmm2
; X32-SSE-NEXT: psraw $15, %xmm2
@@ -329,7 +329,7 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE2-LABEL: var_shift_v16i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: psllw $5, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: pxor %xmm3, %xmm3
@@ -359,7 +359,7 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: var_shift_v16i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: psllw $5, %xmm1
; SSE41-NEXT: movdqa %xmm2, %xmm3
@@ -382,7 +382,7 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE41-NEXT: retq
;
; AVX-LABEL: var_shift_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsllw $5, %xmm1, %xmm1
; AVX-NEXT: vpsllw $4, %xmm0, %xmm2
; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
@@ -397,12 +397,12 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; AVX-NEXT: retq
;
; XOP-LABEL: var_shift_v16i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpshlb %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512-LABEL: var_shift_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512-NEXT: vpsllvd %zmm1, %zmm0, %zmm0
@@ -411,7 +411,7 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: var_shift_v16i8:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
; AVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512VL-NEXT: vpsllvd %zmm1, %zmm0, %zmm0
@@ -420,7 +420,7 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; AVX512VL-NEXT: retq
;
; X32-SSE-LABEL: var_shift_v16i8:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: psllw $5, %xmm1
; X32-SSE-NEXT: pxor %xmm2, %xmm2
; X32-SSE-NEXT: pxor %xmm3, %xmm3
@@ -458,32 +458,32 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
define <2 x i64> @splatvar_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE-LABEL: splatvar_shift_v2i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psllq %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: splatvar_shift_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsllq %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: splatvar_shift_v2i64:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpsllq %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512-LABEL: splatvar_shift_v2i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsllq %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatvar_shift_v2i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsllq %xmm1, %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; X32-SSE-LABEL: splatvar_shift_v2i64:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: psllq %xmm1, %xmm0
; X32-SSE-NEXT: retl
%splat = shufflevector <2 x i64> %b, <2 x i64> undef, <2 x i32> zeroinitializer
@@ -493,44 +493,44 @@ define <2 x i64> @splatvar_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
define <4 x i32> @splatvar_shift_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE2-LABEL: splatvar_shift_v4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: xorps %xmm2, %xmm2
; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
; SSE2-NEXT: pslld %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: splatvar_shift_v4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; SSE41-NEXT: pslld %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: splatvar_shift_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; AVX-NEXT: vpslld %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: splatvar_shift_v4i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; XOP-NEXT: vpslld %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512-LABEL: splatvar_shift_v4i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; AVX512-NEXT: vpslld %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatvar_shift_v4i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; AVX512VL-NEXT: vpslld %xmm1, %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; X32-SSE-LABEL: splatvar_shift_v4i32:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: xorps %xmm2, %xmm2
; X32-SSE-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
; X32-SSE-NEXT: pslld %xmm2, %xmm0
@@ -542,44 +542,44 @@ define <4 x i32> @splatvar_shift_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
define <8 x i16> @splatvar_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE2-LABEL: splatvar_shift_v8i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pextrw $0, %xmm1, %eax
; SSE2-NEXT: movd %eax, %xmm1
; SSE2-NEXT: psllw %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: splatvar_shift_v8i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; SSE41-NEXT: psllw %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: splatvar_shift_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX-NEXT: vpsllw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: splatvar_shift_v8i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; XOP-NEXT: vpsllw %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512-LABEL: splatvar_shift_v8i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX512-NEXT: vpsllw %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatvar_shift_v8i16:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX512VL-NEXT: vpsllw %xmm1, %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; X32-SSE-LABEL: splatvar_shift_v8i16:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: pextrw $0, %xmm1, %eax
; X32-SSE-NEXT: movd %eax, %xmm1
; X32-SSE-NEXT: psllw %xmm1, %xmm0
@@ -591,7 +591,7 @@ define <8 x i16> @splatvar_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE2-LABEL: splatvar_shift_v16i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,1,1]
@@ -624,7 +624,7 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: splatvar_shift_v16i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: pshufb %xmm0, %xmm1
@@ -650,7 +650,7 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: splatvar_shift_v16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpsllw $5, %xmm1, %xmm1
@@ -667,7 +667,7 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatvar_shift_v16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastb %xmm1, %xmm1
; AVX2-NEXT: vpsllw $5, %xmm1, %xmm1
; AVX2-NEXT: vpsllw $4, %xmm0, %xmm2
@@ -683,20 +683,20 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: splatvar_shift_v16i8:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; XOPAVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; XOPAVX1-NEXT: vpshlb %xmm1, %xmm0, %xmm0
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: splatvar_shift_v16i8:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpbroadcastb %xmm1, %xmm1
; XOPAVX2-NEXT: vpshlb %xmm1, %xmm0, %xmm0
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: splatvar_shift_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpbroadcastb %xmm1, %xmm1
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
@@ -706,7 +706,7 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatvar_shift_v16i8:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpbroadcastb %xmm1, %xmm1
; AVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
@@ -716,7 +716,7 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; AVX512VL-NEXT: retq
;
; X32-SSE-LABEL: splatvar_shift_v16i8:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; X32-SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,1,1]
@@ -758,7 +758,7 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
define <2 x i64> @constant_shift_v2i64(<2 x i64> %a) nounwind {
; SSE2-LABEL: constant_shift_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psllq $1, %xmm1
; SSE2-NEXT: psllq $7, %xmm0
@@ -766,7 +766,7 @@ define <2 x i64> @constant_shift_v2i64(<2 x i64> %a) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: constant_shift_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: psllq $7, %xmm1
; SSE41-NEXT: psllq $1, %xmm0
@@ -774,39 +774,39 @@ define <2 x i64> @constant_shift_v2i64(<2 x i64> %a) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: constant_shift_v2i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsllq $7, %xmm0, %xmm1
; AVX1-NEXT: vpsllq $1, %xmm0, %xmm0
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: constant_shift_v2i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsllvq {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: constant_shift_v2i64:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vpshlq {{.*}}(%rip), %xmm0, %xmm0
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: constant_shift_v2i64:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpsllvq {{.*}}(%rip), %xmm0, %xmm0
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: constant_shift_v2i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsllvq {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: constant_shift_v2i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsllvq {{.*}}(%rip), %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; X32-SSE-LABEL: constant_shift_v2i64:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
; X32-SSE-NEXT: psllq $1, %xmm1
; X32-SSE-NEXT: psllq $7, %xmm0
@@ -818,7 +818,7 @@ define <2 x i64> @constant_shift_v2i64(<2 x i64> %a) nounwind {
define <4 x i32> @constant_shift_v4i32(<4 x i32> %a) nounwind {
; SSE2-LABEL: constant_shift_v4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [16,32,64,128]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm1, %xmm0
@@ -830,42 +830,42 @@ define <4 x i32> @constant_shift_v4i32(<4 x i32> %a) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: constant_shift_v4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: constant_shift_v4i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: constant_shift_v4i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: constant_shift_v4i32:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vpshld {{.*}}(%rip), %xmm0, %xmm0
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: constant_shift_v4i32:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: constant_shift_v4i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: constant_shift_v4i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; X32-SSE-LABEL: constant_shift_v4i32:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [16,32,64,128]
; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; X32-SSE-NEXT: pmuludq %xmm1, %xmm0
@@ -881,27 +881,27 @@ define <4 x i32> @constant_shift_v4i32(<4 x i32> %a) nounwind {
define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
; SSE-LABEL: constant_shift_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmullw {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: constant_shift_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: constant_shift_v8i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpshlw {{.*}}(%rip), %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512DQ-LABEL: constant_shift_v8i16:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: constant_shift_v8i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7]
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
@@ -910,17 +910,17 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512DQVL-LABEL: constant_shift_v8i16:
-; AVX512DQVL: # BB#0:
+; AVX512DQVL: # %bb.0:
; AVX512DQVL-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
; AVX512DQVL-NEXT: retq
;
; AVX512BWVL-LABEL: constant_shift_v8i16:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpsllvw {{.*}}(%rip), %xmm0, %xmm0
; AVX512BWVL-NEXT: retq
;
; X32-SSE-LABEL: constant_shift_v8i16:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0
; X32-SSE-NEXT: retl
%shift = shl <8 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>
@@ -929,7 +929,7 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
; SSE2-LABEL: constant_shift_v16i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [8192,24640,41088,57536,49376,32928,16480,32]
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: pxor %xmm3, %xmm3
@@ -959,7 +959,7 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: constant_shift_v16i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm2
; SSE41-NEXT: psllw $4, %xmm2
@@ -979,7 +979,7 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
; SSE41-NEXT: retq
;
; AVX-LABEL: constant_shift_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsllw $4, %xmm0, %xmm1
; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [8192,24640,41088,57536,49376,32928,16480,32]
@@ -994,12 +994,12 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
; AVX-NEXT: retq
;
; XOP-LABEL: constant_shift_v16i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpshlb {{.*}}(%rip), %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512-LABEL: constant_shift_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512-NEXT: vpsllvd {{.*}}(%rip), %zmm0, %zmm0
; AVX512-NEXT: vpmovdb %zmm0, %xmm0
@@ -1007,7 +1007,7 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: constant_shift_v16i8:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512VL-NEXT: vpsllvd {{.*}}(%rip), %zmm0, %zmm0
; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0
@@ -1015,7 +1015,7 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
; AVX512VL-NEXT: retq
;
; X32-SSE-LABEL: constant_shift_v16i8:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [8192,24640,41088,57536,49376,32928,16480,32]
; X32-SSE-NEXT: pxor %xmm1, %xmm1
; X32-SSE-NEXT: pxor %xmm3, %xmm3
@@ -1053,32 +1053,32 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
define <2 x i64> @splatconstant_shift_v2i64(<2 x i64> %a) nounwind {
; SSE-LABEL: splatconstant_shift_v2i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psllq $7, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: splatconstant_shift_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsllq $7, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: splatconstant_shift_v2i64:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpsllq $7, %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512-LABEL: splatconstant_shift_v2i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsllq $7, %xmm0, %xmm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_shift_v2i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsllq $7, %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; X32-SSE-LABEL: splatconstant_shift_v2i64:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: psllq $7, %xmm0
; X32-SSE-NEXT: retl
%shift = shl <2 x i64> %a, <i64 7, i64 7>
@@ -1087,32 +1087,32 @@ define <2 x i64> @splatconstant_shift_v2i64(<2 x i64> %a) nounwind {
define <4 x i32> @splatconstant_shift_v4i32(<4 x i32> %a) nounwind {
; SSE-LABEL: splatconstant_shift_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pslld $5, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: splatconstant_shift_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpslld $5, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: splatconstant_shift_v4i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpslld $5, %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512-LABEL: splatconstant_shift_v4i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpslld $5, %xmm0, %xmm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_shift_v4i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpslld $5, %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; X32-SSE-LABEL: splatconstant_shift_v4i32:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: pslld $5, %xmm0
; X32-SSE-NEXT: retl
%shift = shl <4 x i32> %a, <i32 5, i32 5, i32 5, i32 5>
@@ -1121,32 +1121,32 @@ define <4 x i32> @splatconstant_shift_v4i32(<4 x i32> %a) nounwind {
define <8 x i16> @splatconstant_shift_v8i16(<8 x i16> %a) nounwind {
; SSE-LABEL: splatconstant_shift_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psllw $3, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: splatconstant_shift_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsllw $3, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: splatconstant_shift_v8i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpsllw $3, %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512-LABEL: splatconstant_shift_v8i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsllw $3, %xmm0, %xmm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_shift_v8i16:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsllw $3, %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; X32-SSE-LABEL: splatconstant_shift_v8i16:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: psllw $3, %xmm0
; X32-SSE-NEXT: retl
%shift = shl <8 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
@@ -1155,36 +1155,36 @@ define <8 x i16> @splatconstant_shift_v8i16(<8 x i16> %a) nounwind {
define <16 x i8> @splatconstant_shift_v16i8(<16 x i8> %a) nounwind {
; SSE-LABEL: splatconstant_shift_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psllw $3, %xmm0
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: splatconstant_shift_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsllw $3, %xmm0, %xmm0
; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: splatconstant_shift_v16i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpshlb {{.*}}(%rip), %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512-LABEL: splatconstant_shift_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsllw $3, %xmm0, %xmm0
; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_shift_v16i8:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsllw $3, %xmm0, %xmm0
; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; X32-SSE-LABEL: splatconstant_shift_v16i8:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: psllw $3, %xmm0
; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
; X32-SSE-NEXT: retl
diff --git a/test/CodeGen/X86/vector-shift-shl-256.ll b/test/CodeGen/X86/vector-shift-shl-256.ll
index a2cb3621d7b..712d9dbeef6 100644
--- a/test/CodeGen/X86/vector-shift-shl-256.ll
+++ b/test/CodeGen/X86/vector-shift-shl-256.ll
@@ -18,7 +18,7 @@
define <4 x i64> @var_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX1-LABEL: var_shift_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpsllq %xmm2, %xmm3, %xmm4
@@ -33,12 +33,12 @@ define <4 x i64> @var_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shift_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsllvq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: var_shift_v4i64:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; XOPAVX1-NEXT: vpshlq %xmm2, %xmm3, %xmm2
@@ -47,22 +47,22 @@ define <4 x i64> @var_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: var_shift_v4i64:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpsllvq %ymm1, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: var_shift_v4i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsllvq %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: var_shift_v4i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsllvq %ymm1, %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; X32-AVX1-LABEL: var_shift_v4i64:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; X32-AVX1-NEXT: vpsllq %xmm2, %xmm3, %xmm4
@@ -77,7 +77,7 @@ define <4 x i64> @var_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: var_shift_v4i64:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpsllvq %ymm1, %ymm0, %ymm0
; X32-AVX2-NEXT: retl
%shift = shl <4 x i64> %a, %b
@@ -86,7 +86,7 @@ define <4 x i64> @var_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
define <8 x i32> @var_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; AVX1-LABEL: var_shift_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vpslld $23, %xmm2, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1065353216,1065353216,1065353216,1065353216]
@@ -102,12 +102,12 @@ define <8 x i32> @var_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shift_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: var_shift_v8i32:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; XOPAVX1-NEXT: vpshld %xmm2, %xmm3, %xmm2
@@ -116,22 +116,22 @@ define <8 x i32> @var_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: var_shift_v8i32:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: var_shift_v8i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: var_shift_v8i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; X32-AVX1-LABEL: var_shift_v8i32:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; X32-AVX1-NEXT: vpslld $23, %xmm2, %xmm2
; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1065353216,1065353216,1065353216,1065353216]
@@ -147,7 +147,7 @@ define <8 x i32> @var_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: var_shift_v8i32:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; X32-AVX2-NEXT: retl
%shift = shl <8 x i32> %a, %b
@@ -156,7 +156,7 @@ define <8 x i32> @var_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; AVX1-LABEL: var_shift_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vpsllw $12, %xmm2, %xmm3
; AVX1-NEXT: vpsllw $4, %xmm2, %xmm2
@@ -191,7 +191,7 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shift_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
@@ -205,7 +205,7 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: var_shift_v16i16:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; XOPAVX1-NEXT: vpshlw %xmm2, %xmm3, %xmm2
@@ -214,7 +214,7 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: var_shift_v16i16:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm3
; XOPAVX2-NEXT: vpshlw %xmm2, %xmm3, %xmm2
@@ -223,7 +223,7 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; XOPAVX2-NEXT: retq
;
; AVX512DQ-LABEL: var_shift_v16i16:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512DQ-NEXT: vpsllvd %zmm1, %zmm0, %zmm0
@@ -231,7 +231,7 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: var_shift_v16i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
@@ -239,7 +239,7 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512DQVL-LABEL: var_shift_v16i16:
-; AVX512DQVL: # BB#0:
+; AVX512DQVL: # %bb.0:
; AVX512DQVL-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
; AVX512DQVL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512DQVL-NEXT: vpsllvd %zmm1, %zmm0, %zmm0
@@ -247,12 +247,12 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; AVX512DQVL-NEXT: retq
;
; AVX512BWVL-LABEL: var_shift_v16i16:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpsllvw %ymm1, %ymm0, %ymm0
; AVX512BWVL-NEXT: retq
;
; X32-AVX1-LABEL: var_shift_v16i16:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; X32-AVX1-NEXT: vpsllw $12, %xmm2, %xmm3
; X32-AVX1-NEXT: vpsllw $4, %xmm2, %xmm2
@@ -287,7 +287,7 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: var_shift_v16i16:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; X32-AVX2-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
; X32-AVX2-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
@@ -305,7 +305,7 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX1-LABEL: var_shift_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpsllw $4, %xmm2, %xmm3
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
@@ -336,7 +336,7 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shift_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsllw $5, %ymm1, %ymm1
; AVX2-NEXT: vpsllw $4, %ymm0, %ymm2
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
@@ -351,7 +351,7 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: var_shift_v32i8:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; XOPAVX1-NEXT: vpshlb %xmm2, %xmm3, %xmm2
@@ -360,7 +360,7 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: var_shift_v32i8:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm3
; XOPAVX2-NEXT: vpshlb %xmm2, %xmm3, %xmm2
@@ -369,7 +369,7 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; XOPAVX2-NEXT: retq
;
; AVX512DQ-LABEL: var_shift_v32i8:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpsllw $5, %ymm1, %ymm1
; AVX512DQ-NEXT: vpsllw $4, %ymm0, %ymm2
; AVX512DQ-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
@@ -384,7 +384,7 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: var_shift_v32i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
@@ -392,7 +392,7 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512DQVL-LABEL: var_shift_v32i8:
-; AVX512DQVL: # BB#0:
+; AVX512DQVL: # %bb.0:
; AVX512DQVL-NEXT: vpsllw $5, %ymm1, %ymm1
; AVX512DQVL-NEXT: vpsllw $4, %ymm0, %ymm2
; AVX512DQVL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
@@ -407,7 +407,7 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX512DQVL-NEXT: retq
;
; AVX512BWVL-LABEL: var_shift_v32i8:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
; AVX512BWVL-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
@@ -415,7 +415,7 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX512BWVL-NEXT: retq
;
; X32-AVX1-LABEL: var_shift_v32i8:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; X32-AVX1-NEXT: vpsllw $4, %xmm2, %xmm3
; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
@@ -446,7 +446,7 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: var_shift_v32i8:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpsllw $5, %ymm1, %ymm1
; X32-AVX2-NEXT: vpsllw $4, %ymm0, %ymm2
; X32-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2
@@ -469,7 +469,7 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX1-LABEL: splatvar_shift_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpsllq %xmm1, %xmm2, %xmm2
; AVX1-NEXT: vpsllq %xmm1, %xmm0, %xmm0
@@ -477,12 +477,12 @@ define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatvar_shift_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsllq %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: splatvar_shift_v4i64:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; XOPAVX1-NEXT: vpsllq %xmm1, %xmm2, %xmm2
; XOPAVX1-NEXT: vpsllq %xmm1, %xmm0, %xmm0
@@ -490,22 +490,22 @@ define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: splatvar_shift_v4i64:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpsllq %xmm1, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: splatvar_shift_v4i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsllq %xmm1, %ymm0, %ymm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatvar_shift_v4i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsllq %xmm1, %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; X32-AVX1-LABEL: splatvar_shift_v4i64:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; X32-AVX1-NEXT: vpsllq %xmm1, %xmm2, %xmm2
; X32-AVX1-NEXT: vpsllq %xmm1, %xmm0, %xmm0
@@ -513,7 +513,7 @@ define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: splatvar_shift_v4i64:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpsllq %xmm1, %ymm0, %ymm0
; X32-AVX2-NEXT: retl
%splat = shufflevector <4 x i64> %b, <4 x i64> undef, <4 x i32> zeroinitializer
@@ -523,7 +523,7 @@ define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
define <8 x i32> @splatvar_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; AVX1-LABEL: splatvar_shift_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; AVX1-NEXT: vpslld %xmm1, %xmm2, %xmm2
@@ -532,13 +532,13 @@ define <8 x i32> @splatvar_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatvar_shift_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; AVX2-NEXT: vpslld %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: splatvar_shift_v8i32:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; XOPAVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; XOPAVX1-NEXT: vpslld %xmm1, %xmm2, %xmm2
@@ -547,25 +547,25 @@ define <8 x i32> @splatvar_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: splatvar_shift_v8i32:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; XOPAVX2-NEXT: vpslld %xmm1, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: splatvar_shift_v8i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; AVX512-NEXT: vpslld %xmm1, %ymm0, %ymm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatvar_shift_v8i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; AVX512VL-NEXT: vpslld %xmm1, %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; X32-AVX1-LABEL: splatvar_shift_v8i32:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; X32-AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; X32-AVX1-NEXT: vpslld %xmm1, %xmm2, %xmm2
@@ -574,7 +574,7 @@ define <8 x i32> @splatvar_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: splatvar_shift_v8i32:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; X32-AVX2-NEXT: vpslld %xmm1, %ymm0, %ymm0
; X32-AVX2-NEXT: retl
@@ -585,7 +585,7 @@ define <8 x i32> @splatvar_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
define <16 x i16> @splatvar_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; AVX1-LABEL: splatvar_shift_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX1-NEXT: vpsllw %xmm1, %xmm2, %xmm2
@@ -594,13 +594,13 @@ define <16 x i16> @splatvar_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatvar_shift_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX2-NEXT: vpsllw %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: splatvar_shift_v16i16:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; XOPAVX1-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; XOPAVX1-NEXT: vpsllw %xmm1, %xmm2, %xmm2
@@ -609,25 +609,25 @@ define <16 x i16> @splatvar_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: splatvar_shift_v16i16:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; XOPAVX2-NEXT: vpsllw %xmm1, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: splatvar_shift_v16i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX512-NEXT: vpsllw %xmm1, %ymm0, %ymm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatvar_shift_v16i16:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX512VL-NEXT: vpsllw %xmm1, %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; X32-AVX1-LABEL: splatvar_shift_v16i16:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; X32-AVX1-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; X32-AVX1-NEXT: vpsllw %xmm1, %xmm2, %xmm2
@@ -636,7 +636,7 @@ define <16 x i16> @splatvar_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: splatvar_shift_v16i16:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; X32-AVX2-NEXT: vpsllw %xmm1, %ymm0, %ymm0
; X32-AVX2-NEXT: retl
@@ -647,7 +647,7 @@ define <16 x i16> @splatvar_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind
define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX1-LABEL: splatvar_shift_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
@@ -676,7 +676,7 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatvar_shift_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastb %xmm1, %ymm1
; AVX2-NEXT: vpsllw $4, %ymm0, %ymm2
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
@@ -692,7 +692,7 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: splatvar_shift_v32i8:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; XOPAVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
@@ -702,7 +702,7 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: splatvar_shift_v32i8:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpbroadcastb %xmm1, %ymm1
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
@@ -712,7 +712,7 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; XOPAVX2-NEXT: retq
;
; AVX512DQ-LABEL: splatvar_shift_v32i8:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpbroadcastb %xmm1, %ymm1
; AVX512DQ-NEXT: vpsllw $4, %ymm0, %ymm2
; AVX512DQ-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
@@ -728,7 +728,7 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: splatvar_shift_v32i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpbroadcastb %xmm1, %ymm1
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
@@ -737,7 +737,7 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512DQVL-LABEL: splatvar_shift_v32i8:
-; AVX512DQVL: # BB#0:
+; AVX512DQVL: # %bb.0:
; AVX512DQVL-NEXT: vpbroadcastb %xmm1, %ymm1
; AVX512DQVL-NEXT: vpsllw $4, %ymm0, %ymm2
; AVX512DQVL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
@@ -753,7 +753,7 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX512DQVL-NEXT: retq
;
; AVX512BWVL-LABEL: splatvar_shift_v32i8:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpbroadcastb %xmm1, %ymm1
; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
@@ -762,7 +762,7 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX512BWVL-NEXT: retq
;
; X32-AVX1-LABEL: splatvar_shift_v32i8:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; X32-AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
@@ -791,7 +791,7 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: splatvar_shift_v32i8:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpbroadcastb %xmm1, %ymm1
; X32-AVX2-NEXT: vpsllw $4, %ymm0, %ymm2
; X32-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2
@@ -816,7 +816,7 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
define <4 x i64> @constant_shift_v4i64(<4 x i64> %a) nounwind {
; AVX1-LABEL: constant_shift_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpsllq $62, %xmm1, %xmm2
; AVX1-NEXT: vpsllq $31, %xmm1, %xmm1
@@ -828,12 +828,12 @@ define <4 x i64> @constant_shift_v4i64(<4 x i64> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: constant_shift_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsllvq {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: constant_shift_v4i64:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vpshlq {{.*}}(%rip), %xmm0, %xmm1
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; XOPAVX1-NEXT: vpshlq {{.*}}(%rip), %xmm0, %xmm0
@@ -841,22 +841,22 @@ define <4 x i64> @constant_shift_v4i64(<4 x i64> %a) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: constant_shift_v4i64:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpsllvq {{.*}}(%rip), %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: constant_shift_v4i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsllvq {{.*}}(%rip), %ymm0, %ymm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: constant_shift_v4i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsllvq {{.*}}(%rip), %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; X32-AVX1-LABEL: constant_shift_v4i64:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vmovdqa {{.*#+}} ymm1 = [1,0,7,0,31,0,62,0]
; X32-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
@@ -872,7 +872,7 @@ define <4 x i64> @constant_shift_v4i64(<4 x i64> %a) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: constant_shift_v4i64:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpsllvq {{\.LCPI.*}}, %ymm0, %ymm0
; X32-AVX2-NEXT: retl
%shift = shl <4 x i64> %a, <i64 1, i64 7, i64 31, i64 62>
@@ -881,7 +881,7 @@ define <4 x i64> @constant_shift_v4i64(<4 x i64> %a) nounwind {
define <8 x i32> @constant_shift_v8i32(<8 x i32> %a) nounwind {
; AVX1-LABEL: constant_shift_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
@@ -889,12 +889,12 @@ define <8 x i32> @constant_shift_v8i32(<8 x i32> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: constant_shift_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsllvd {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: constant_shift_v8i32:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vpshld {{.*}}(%rip), %xmm0, %xmm1
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; XOPAVX1-NEXT: vpshld {{.*}}(%rip), %xmm0, %xmm0
@@ -902,22 +902,22 @@ define <8 x i32> @constant_shift_v8i32(<8 x i32> %a) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: constant_shift_v8i32:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpsllvd {{.*}}(%rip), %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: constant_shift_v8i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsllvd {{.*}}(%rip), %ymm0, %ymm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: constant_shift_v8i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsllvd {{.*}}(%rip), %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; X32-AVX1-LABEL: constant_shift_v8i32:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vpmulld {{\.LCPI.*}}, %xmm0, %xmm1
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; X32-AVX1-NEXT: vpmulld {{\.LCPI.*}}, %xmm0, %xmm0
@@ -925,7 +925,7 @@ define <8 x i32> @constant_shift_v8i32(<8 x i32> %a) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: constant_shift_v8i32:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpsllvd {{\.LCPI.*}}, %ymm0, %ymm0
; X32-AVX2-NEXT: retl
%shift = shl <8 x i32> %a, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 8, i32 7>
@@ -934,7 +934,7 @@ define <8 x i32> @constant_shift_v8i32(<8 x i32> %a) nounwind {
define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
; AVX1-LABEL: constant_shift_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
@@ -942,12 +942,12 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: constant_shift_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: constant_shift_v16i16:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vpshlw {{.*}}(%rip), %xmm0, %xmm1
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; XOPAVX1-NEXT: vpshlw {{.*}}(%rip), %xmm0, %xmm0
@@ -955,17 +955,17 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: constant_shift_v16i16:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
; AVX512DQ-LABEL: constant_shift_v16i16:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: constant_shift_v16i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
@@ -973,17 +973,17 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512DQVL-LABEL: constant_shift_v16i16:
-; AVX512DQVL: # BB#0:
+; AVX512DQVL: # %bb.0:
; AVX512DQVL-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
; AVX512DQVL-NEXT: retq
;
; AVX512BWVL-LABEL: constant_shift_v16i16:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpsllvw {{.*}}(%rip), %ymm0, %ymm0
; AVX512BWVL-NEXT: retq
;
; X32-AVX1-LABEL: constant_shift_v16i16:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vpmullw {{\.LCPI.*}}, %xmm0, %xmm1
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; X32-AVX1-NEXT: vpmullw {{\.LCPI.*}}, %xmm0, %xmm0
@@ -991,7 +991,7 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: constant_shift_v16i16:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpmullw {{\.LCPI.*}}, %ymm0, %ymm0
; X32-AVX2-NEXT: retl
%shift = shl <16 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
@@ -1000,7 +1000,7 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
; AVX1-LABEL: constant_shift_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpsllw $4, %xmm1, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
@@ -1027,7 +1027,7 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: constant_shift_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsllw $4, %ymm0, %ymm1
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32]
@@ -1042,7 +1042,7 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: constant_shift_v32i8:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
; XOPAVX1-NEXT: vpshlb %xmm2, %xmm1, %xmm1
@@ -1051,7 +1051,7 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: constant_shift_v32i8:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
; XOPAVX2-NEXT: vpshlb %xmm2, %xmm1, %xmm1
@@ -1060,7 +1060,7 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
; XOPAVX2-NEXT: retq
;
; AVX512DQ-LABEL: constant_shift_v32i8:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpsllw $4, %ymm0, %ymm1
; AVX512DQ-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32]
@@ -1075,14 +1075,14 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: constant_shift_v32i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
; AVX512BW-NEXT: vpsllvw {{.*}}(%rip), %zmm0, %zmm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: retq
;
; AVX512DQVL-LABEL: constant_shift_v32i8:
-; AVX512DQVL: # BB#0:
+; AVX512DQVL: # %bb.0:
; AVX512DQVL-NEXT: vpsllw $4, %ymm0, %ymm1
; AVX512DQVL-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
; AVX512DQVL-NEXT: vmovdqa {{.*#+}} ymm2 = [8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32]
@@ -1097,14 +1097,14 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
; AVX512DQVL-NEXT: retq
;
; AVX512BWVL-LABEL: constant_shift_v32i8:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
; AVX512BWVL-NEXT: vpsllvw {{.*}}(%rip), %zmm0, %zmm0
; AVX512BWVL-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BWVL-NEXT: retq
;
; X32-AVX1-LABEL: constant_shift_v32i8:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; X32-AVX1-NEXT: vpsllw $4, %xmm1, %xmm2
; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
@@ -1131,7 +1131,7 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: constant_shift_v32i8:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpsllw $4, %ymm0, %ymm1
; X32-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm1, %ymm1
; X32-AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32]
@@ -1154,7 +1154,7 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
define <4 x i64> @splatconstant_shift_v4i64(<4 x i64> %a) nounwind {
; AVX1-LABEL: splatconstant_shift_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsllq $7, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsllq $7, %xmm0, %xmm0
@@ -1162,12 +1162,12 @@ define <4 x i64> @splatconstant_shift_v4i64(<4 x i64> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatconstant_shift_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsllq $7, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: splatconstant_shift_v4i64:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vpsllq $7, %xmm0, %xmm1
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; XOPAVX1-NEXT: vpsllq $7, %xmm0, %xmm0
@@ -1175,22 +1175,22 @@ define <4 x i64> @splatconstant_shift_v4i64(<4 x i64> %a) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: splatconstant_shift_v4i64:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpsllq $7, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: splatconstant_shift_v4i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsllq $7, %ymm0, %ymm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_shift_v4i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsllq $7, %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; X32-AVX1-LABEL: splatconstant_shift_v4i64:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vpsllq $7, %xmm0, %xmm1
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; X32-AVX1-NEXT: vpsllq $7, %xmm0, %xmm0
@@ -1198,7 +1198,7 @@ define <4 x i64> @splatconstant_shift_v4i64(<4 x i64> %a) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: splatconstant_shift_v4i64:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpsllq $7, %ymm0, %ymm0
; X32-AVX2-NEXT: retl
%shift = shl <4 x i64> %a, <i64 7, i64 7, i64 7, i64 7>
@@ -1207,7 +1207,7 @@ define <4 x i64> @splatconstant_shift_v4i64(<4 x i64> %a) nounwind {
define <8 x i32> @splatconstant_shift_v8i32(<8 x i32> %a) nounwind {
; AVX1-LABEL: splatconstant_shift_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpslld $5, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpslld $5, %xmm0, %xmm0
@@ -1215,12 +1215,12 @@ define <8 x i32> @splatconstant_shift_v8i32(<8 x i32> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatconstant_shift_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpslld $5, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: splatconstant_shift_v8i32:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vpslld $5, %xmm0, %xmm1
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; XOPAVX1-NEXT: vpslld $5, %xmm0, %xmm0
@@ -1228,22 +1228,22 @@ define <8 x i32> @splatconstant_shift_v8i32(<8 x i32> %a) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: splatconstant_shift_v8i32:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpslld $5, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: splatconstant_shift_v8i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpslld $5, %ymm0, %ymm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_shift_v8i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpslld $5, %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; X32-AVX1-LABEL: splatconstant_shift_v8i32:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vpslld $5, %xmm0, %xmm1
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; X32-AVX1-NEXT: vpslld $5, %xmm0, %xmm0
@@ -1251,7 +1251,7 @@ define <8 x i32> @splatconstant_shift_v8i32(<8 x i32> %a) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: splatconstant_shift_v8i32:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpslld $5, %ymm0, %ymm0
; X32-AVX2-NEXT: retl
%shift = shl <8 x i32> %a, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
@@ -1260,7 +1260,7 @@ define <8 x i32> @splatconstant_shift_v8i32(<8 x i32> %a) nounwind {
define <16 x i16> @splatconstant_shift_v16i16(<16 x i16> %a) nounwind {
; AVX1-LABEL: splatconstant_shift_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsllw $3, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsllw $3, %xmm0, %xmm0
@@ -1268,12 +1268,12 @@ define <16 x i16> @splatconstant_shift_v16i16(<16 x i16> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatconstant_shift_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsllw $3, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: splatconstant_shift_v16i16:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vpsllw $3, %xmm0, %xmm1
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; XOPAVX1-NEXT: vpsllw $3, %xmm0, %xmm0
@@ -1281,22 +1281,22 @@ define <16 x i16> @splatconstant_shift_v16i16(<16 x i16> %a) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: splatconstant_shift_v16i16:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpsllw $3, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: splatconstant_shift_v16i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsllw $3, %ymm0, %ymm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_shift_v16i16:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsllw $3, %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; X32-AVX1-LABEL: splatconstant_shift_v16i16:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vpsllw $3, %xmm0, %xmm1
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; X32-AVX1-NEXT: vpsllw $3, %xmm0, %xmm0
@@ -1304,7 +1304,7 @@ define <16 x i16> @splatconstant_shift_v16i16(<16 x i16> %a) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: splatconstant_shift_v16i16:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpsllw $3, %ymm0, %ymm0
; X32-AVX2-NEXT: retl
%shift = shl <16 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
@@ -1313,7 +1313,7 @@ define <16 x i16> @splatconstant_shift_v16i16(<16 x i16> %a) nounwind {
define <32 x i8> @splatconstant_shift_v32i8(<32 x i8> %a) nounwind {
; AVX1-LABEL: splatconstant_shift_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpsllw $3, %xmm1, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248]
@@ -1324,13 +1324,13 @@ define <32 x i8> @splatconstant_shift_v32i8(<32 x i8> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatconstant_shift_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsllw $3, %ymm0, %ymm0
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: splatconstant_shift_v32i8:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
; XOPAVX1-NEXT: vpshlb %xmm2, %xmm1, %xmm1
@@ -1339,25 +1339,25 @@ define <32 x i8> @splatconstant_shift_v32i8(<32 x i8> %a) nounwind {
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: splatconstant_shift_v32i8:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpsllw $3, %ymm0, %ymm0
; XOPAVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: splatconstant_shift_v32i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsllw $3, %ymm0, %ymm0
; AVX512-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_shift_v32i8:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsllw $3, %ymm0, %ymm0
; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; X32-AVX1-LABEL: splatconstant_shift_v32i8:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; X32-AVX1-NEXT: vpsllw $3, %xmm1, %xmm1
; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248]
@@ -1368,7 +1368,7 @@ define <32 x i8> @splatconstant_shift_v32i8(<32 x i8> %a) nounwind {
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: splatconstant_shift_v32i8:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpsllw $3, %ymm0, %ymm0
; X32-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0
; X32-AVX2-NEXT: retl
diff --git a/test/CodeGen/X86/vector-shift-shl-512.ll b/test/CodeGen/X86/vector-shift-shl-512.ll
index 66a12024332..807319a4b24 100644
--- a/test/CodeGen/X86/vector-shift-shl-512.ll
+++ b/test/CodeGen/X86/vector-shift-shl-512.ll
@@ -8,7 +8,7 @@
define <8 x i64> @var_shift_v8i64(<8 x i64> %a, <8 x i64> %b) nounwind {
; ALL-LABEL: var_shift_v8i64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpsllvq %zmm1, %zmm0, %zmm0
; ALL-NEXT: retq
%shift = shl <8 x i64> %a, %b
@@ -17,7 +17,7 @@ define <8 x i64> @var_shift_v8i64(<8 x i64> %a, <8 x i64> %b) nounwind {
define <16 x i32> @var_shift_v16i32(<16 x i32> %a, <16 x i32> %b) nounwind {
; ALL-LABEL: var_shift_v16i32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpsllvd %zmm1, %zmm0, %zmm0
; ALL-NEXT: retq
%shift = shl <16 x i32> %a, %b
@@ -26,7 +26,7 @@ define <16 x i32> @var_shift_v16i32(<16 x i32> %a, <16 x i32> %b) nounwind {
define <32 x i16> @var_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
; AVX512DQ-LABEL: var_shift_v32i16:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512DQ-NEXT: vpsllvd %zmm2, %zmm0, %zmm0
@@ -38,7 +38,7 @@ define <32 x i16> @var_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: var_shift_v32i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
%shift = shl <32 x i16> %a, %b
@@ -47,7 +47,7 @@ define <32 x i16> @var_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; AVX512DQ-LABEL: var_shift_v64i8:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpsllw $4, %ymm0, %ymm4
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; AVX512DQ-NEXT: vpand %ymm5, %ymm4, %ymm4
@@ -75,7 +75,7 @@ define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: var_shift_v64i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsllw $4, %zmm0, %zmm2
; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm2, %zmm2
; AVX512BW-NEXT: vpsllw $5, %zmm1, %zmm1
@@ -100,7 +100,7 @@ define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
define <8 x i64> @splatvar_shift_v8i64(<8 x i64> %a, <8 x i64> %b) nounwind {
; ALL-LABEL: splatvar_shift_v8i64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpsllq %xmm1, %zmm0, %zmm0
; ALL-NEXT: retq
%splat = shufflevector <8 x i64> %b, <8 x i64> undef, <8 x i32> zeroinitializer
@@ -110,7 +110,7 @@ define <8 x i64> @splatvar_shift_v8i64(<8 x i64> %a, <8 x i64> %b) nounwind {
define <16 x i32> @splatvar_shift_v16i32(<16 x i32> %a, <16 x i32> %b) nounwind {
; ALL-LABEL: splatvar_shift_v16i32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; ALL-NEXT: vpslld %xmm1, %zmm0, %zmm0
; ALL-NEXT: retq
@@ -121,14 +121,14 @@ define <16 x i32> @splatvar_shift_v16i32(<16 x i32> %a, <16 x i32> %b) nounwind
define <32 x i16> @splatvar_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
; AVX512DQ-LABEL: splatvar_shift_v32i16:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
; AVX512DQ-NEXT: vpsllw %xmm2, %ymm0, %ymm0
; AVX512DQ-NEXT: vpsllw %xmm2, %ymm1, %ymm1
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: splatvar_shift_v32i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX512BW-NEXT: vpsllw %xmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
@@ -139,7 +139,7 @@ define <32 x i16> @splatvar_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind
define <64 x i8> @splatvar_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; AVX512DQ-LABEL: splatvar_shift_v64i8:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpbroadcastb %xmm2, %ymm2
; AVX512DQ-NEXT: vpsllw $4, %ymm0, %ymm3
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
@@ -165,7 +165,7 @@ define <64 x i8> @splatvar_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: splatvar_shift_v64i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpbroadcastb %xmm1, %zmm1
; AVX512BW-NEXT: vpsllw $4, %zmm0, %zmm2
; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm2, %zmm2
@@ -192,7 +192,7 @@ define <64 x i8> @splatvar_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
define <8 x i64> @constant_shift_v8i64(<8 x i64> %a) nounwind {
; ALL-LABEL: constant_shift_v8i64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpsllvq {{.*}}(%rip), %zmm0, %zmm0
; ALL-NEXT: retq
%shift = shl <8 x i64> %a, <i64 1, i64 7, i64 31, i64 62, i64 1, i64 7, i64 31, i64 62>
@@ -201,7 +201,7 @@ define <8 x i64> @constant_shift_v8i64(<8 x i64> %a) nounwind {
define <16 x i32> @constant_shift_v16i32(<16 x i32> %a) nounwind {
; ALL-LABEL: constant_shift_v16i32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpsllvd {{.*}}(%rip), %zmm0, %zmm0
; ALL-NEXT: retq
%shift = shl <16 x i32> %a, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 8, i32 7, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 8, i32 7>
@@ -210,14 +210,14 @@ define <16 x i32> @constant_shift_v16i32(<16 x i32> %a) nounwind {
define <32 x i16> @constant_shift_v32i16(<32 x i16> %a) nounwind {
; AVX512DQ-LABEL: constant_shift_v32i16:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
; AVX512DQ-NEXT: vpmullw %ymm2, %ymm0, %ymm0
; AVX512DQ-NEXT: vpmullw %ymm2, %ymm1, %ymm1
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: constant_shift_v32i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsllvw {{.*}}(%rip), %zmm0, %zmm0
; AVX512BW-NEXT: retq
%shift = shl <32 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
@@ -226,7 +226,7 @@ define <32 x i16> @constant_shift_v32i16(<32 x i16> %a) nounwind {
define <64 x i8> @constant_shift_v64i8(<64 x i8> %a) nounwind {
; AVX512DQ-LABEL: constant_shift_v64i8:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpsllw $4, %ymm0, %ymm2
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; AVX512DQ-NEXT: vpand %ymm3, %ymm2, %ymm2
@@ -251,7 +251,7 @@ define <64 x i8> @constant_shift_v64i8(<64 x i8> %a) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: constant_shift_v64i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32]
; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
; AVX512BW-NEXT: vpsllw $4, %zmm0, %zmm2
@@ -276,7 +276,7 @@ define <64 x i8> @constant_shift_v64i8(<64 x i8> %a) nounwind {
define <8 x i64> @splatconstant_shift_v8i64(<8 x i64> %a) nounwind {
; ALL-LABEL: splatconstant_shift_v8i64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpsllq $7, %zmm0, %zmm0
; ALL-NEXT: retq
%shift = shl <8 x i64> %a, <i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7>
@@ -285,7 +285,7 @@ define <8 x i64> @splatconstant_shift_v8i64(<8 x i64> %a) nounwind {
define <16 x i32> @splatconstant_shift_v16i32(<16 x i32> %a) nounwind {
; ALL-LABEL: splatconstant_shift_v16i32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpslld $5, %zmm0, %zmm0
; ALL-NEXT: retq
%shift = shl <16 x i32> %a, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
@@ -294,13 +294,13 @@ define <16 x i32> @splatconstant_shift_v16i32(<16 x i32> %a) nounwind {
define <32 x i16> @splatconstant_shift_v32i16(<32 x i16> %a) nounwind {
; AVX512DQ-LABEL: splatconstant_shift_v32i16:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpsllw $3, %ymm0, %ymm0
; AVX512DQ-NEXT: vpsllw $3, %ymm1, %ymm1
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: splatconstant_shift_v32i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsllw $3, %zmm0, %zmm0
; AVX512BW-NEXT: retq
%shift = shl <32 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
@@ -309,7 +309,7 @@ define <32 x i16> @splatconstant_shift_v32i16(<32 x i16> %a) nounwind {
define <64 x i8> @splatconstant_shift_v64i8(<64 x i8> %a) nounwind {
; AVX512DQ-LABEL: splatconstant_shift_v64i8:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpsllw $3, %ymm0, %ymm0
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248]
; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm0
@@ -318,7 +318,7 @@ define <64 x i8> @splatconstant_shift_v64i8(<64 x i8> %a) nounwind {
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: splatconstant_shift_v64i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsllw $3, %zmm0, %zmm0
; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0
; AVX512BW-NEXT: retq
diff --git a/test/CodeGen/X86/vector-shuffle-128-v16.ll b/test/CodeGen/X86/vector-shuffle-128-v16.ll
index 065cfff5fc8..2fcbd89b857 100644
--- a/test/CodeGen/X86/vector-shuffle-128-v16.ll
+++ b/test/CodeGen/X86/vector-shuffle-128-v16.ll
@@ -8,32 +8,32 @@
define <16 x i8> @shuffle_v16i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00(<16 x i8> %a, <16 x i8> %b) {
; SSE2-LABEL: shuffle_v16i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v16i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pxor %xmm1, %xmm1
; SSSE3-NEXT: pshufb %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v16i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: pshufb %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuffle_v16i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpbroadcastb %xmm0, %xmm0
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
@@ -42,29 +42,29 @@ define <16 x i8> @shuffle_v16i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00(
define <16 x i8> @shuffle_v16i8_00_00_00_00_00_00_00_00_01_01_01_01_01_01_01_01(<16 x i8> %a, <16 x i8> %b) {
; SSE2-LABEL: shuffle_v16i8_00_00_00_00_00_00_00_00_01_01_01_01_01_01_01_01:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,1,1,4,5,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v16i8_00_00_00_00_00_00_00_00_01_01_01_01_01_01_01_01:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v16i8_00_00_00_00_00_00_00_00_01_01_01_01_01_01_01_01:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
; SSE41-NEXT: retq
;
; AVX1OR2-LABEL: shuffle_v16i8_00_00_00_00_00_00_00_00_01_01_01_01_01_01_01_01:
-; AVX1OR2: # BB#0:
+; AVX1OR2: # %bb.0:
; AVX1OR2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
; AVX1OR2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i8_00_00_00_00_00_00_00_00_01_01_01_01_01_01_01_01:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; AVX512VL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,1,1,4,5,6,7]
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
@@ -75,7 +75,7 @@ define <16 x i8> @shuffle_v16i8_00_00_00_00_00_00_00_00_01_01_01_01_01_01_01_01(
define <16 x i8> @shuffle_v16i8_00_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08(<16 x i8> %a, <16 x i8> %b) {
; SSE2-LABEL: shuffle_v16i8_00_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -84,22 +84,22 @@ define <16 x i8> @shuffle_v16i8_00_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08(
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v16i8_00_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,8,8,8,8,8,8,8,8]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v16i8_00_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,8,8,8,8,8,8,8,8]
; SSE41-NEXT: retq
;
; AVX1OR2-LABEL: shuffle_v16i8_00_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08:
-; AVX1OR2: # BB#0:
+; AVX1OR2: # %bb.0:
; AVX1OR2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,8,8,8,8,8,8,8,8]
; AVX1OR2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i8_00_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,1,1,8,8,9,9,8,8,9,9,10,10,11,11]
; AVX512VL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,2,2,4,5,6,7]
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
@@ -110,13 +110,13 @@ define <16 x i8> @shuffle_v16i8_00_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08(
define <16 x i8> @shuffle_v16i8_00_00_00_00_01_01_01_01_02_02_02_02_03_03_03_03(<16 x i8> %a, <16 x i8> %b) {
; SSE-LABEL: shuffle_v16i8_00_00_00_00_01_01_01_01_02_02_02_02_03_03_03_03:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_00_00_00_00_01_01_01_01_02_02_02_02_03_03_03_03:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; AVX-NEXT: retq
@@ -126,13 +126,13 @@ define <16 x i8> @shuffle_v16i8_00_00_00_00_01_01_01_01_02_02_02_02_03_03_03_03(
define <16 x i8> @shuffle_v16i8_04_04_04_04_05_05_05_05_06_06_06_06_07_07_07_07(<16 x i8> %a, <16 x i8> %b) {
; SSE-LABEL: shuffle_v16i8_04_04_04_04_05_05_05_05_06_06_06_06_07_07_07_07:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_04_04_04_04_05_05_05_05_06_06_06_06_07_07_07_07:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; AVX-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
; AVX-NEXT: retq
@@ -142,7 +142,7 @@ define <16 x i8> @shuffle_v16i8_04_04_04_04_05_05_05_05_06_06_06_06_07_07_07_07(
define <16 x i8> @shuffle_v16i8_00_00_00_00_04_04_04_04_08_08_08_08_12_12_12_12(<16 x i8> %a, <16 x i8> %b) {
; SSE2-LABEL: shuffle_v16i8_00_00_00_00_04_04_04_04_08_08_08_08_12_12_12_12:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -152,17 +152,17 @@ define <16 x i8> @shuffle_v16i8_00_00_00_00_04_04_04_04_08_08_08_08_12_12_12_12(
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v16i8_00_00_00_00_04_04_04_04_08_08_08_08_12_12_12_12:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,4,4,4,4,8,8,8,8,12,12,12,12]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v16i8_00_00_00_00_04_04_04_04_08_08_08_08_12_12_12_12:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,4,4,4,4,8,8,8,8,12,12,12,12]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_00_00_00_00_04_04_04_04_08_08_08_08_12_12_12_12:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,4,4,4,4,8,8,8,8,12,12,12,12]
; AVX-NEXT: retq
%shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4, i32 8, i32 8, i32 8, i32 8, i32 12, i32 12, i32 12, i32 12>
@@ -171,12 +171,12 @@ define <16 x i8> @shuffle_v16i8_00_00_00_00_04_04_04_04_08_08_08_08_12_12_12_12(
define <16 x i8> @shuffle_v16i8_00_00_01_01_02_02_03_03_04_04_05_05_06_06_07_07(<16 x i8> %a, <16 x i8> %b) {
; SSE-LABEL: shuffle_v16i8_00_00_01_01_02_02_03_03_04_04_05_05_06_06_07_07:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_00_00_01_01_02_02_03_03_04_04_05_05_06_06_07_07:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; AVX-NEXT: retq
%shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 0, i32 1, i32 1, i32 2, i32 2, i32 3, i32 3, i32 4, i32 4, i32 5, i32 5, i32 6, i32 6, i32 7, i32 7>
@@ -185,19 +185,19 @@ define <16 x i8> @shuffle_v16i8_00_00_01_01_02_02_03_03_04_04_05_05_06_06_07_07(
define <16 x i8> @shuffle_v16i8_0101010101010101(<16 x i8> %a, <16 x i8> %b) {
; SSE-LABEL: shuffle_v16i8_0101010101010101:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
; SSE-NEXT: retq
;
; AVX1-LABEL: shuffle_v16i8_0101010101010101:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i8_0101010101010101:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpbroadcastw %xmm0, %xmm0
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
@@ -206,12 +206,12 @@ define <16 x i8> @shuffle_v16i8_0101010101010101(<16 x i8> %a, <16 x i8> %b) {
define <16 x i8> @shuffle_v16i8_00_16_01_17_02_18_03_19_04_20_05_21_06_22_07_23(<16 x i8> %a, <16 x i8> %b) {
; SSE-LABEL: shuffle_v16i8_00_16_01_17_02_18_03_19_04_20_05_21_06_22_07_23:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_00_16_01_17_02_18_03_19_04_20_05_21_06_22_07_23:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX-NEXT: retq
%shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
@@ -220,12 +220,12 @@ define <16 x i8> @shuffle_v16i8_00_16_01_17_02_18_03_19_04_20_05_21_06_22_07_23(
define <16 x i8> @shuffle_v16i8_08_24_09_25_10_26_11_27_12_28_13_29_14_30_15_31(<16 x i8> %a, <16 x i8> %b) {
; SSE-LABEL: shuffle_v16i8_08_24_09_25_10_26_11_27_12_28_13_29_14_30_15_31:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_08_24_09_25_10_26_11_27_12_28_13_29_14_30_15_31:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
; AVX-NEXT: retq
%shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
@@ -234,7 +234,7 @@ define <16 x i8> @shuffle_v16i8_08_24_09_25_10_26_11_27_12_28_13_29_14_30_15_31(
define <16 x i8> @shuffle_v16i8_16_00_16_01_16_02_16_03_16_04_16_05_16_06_16_07(<16 x i8> %a, <16 x i8> %b) {
; SSE2-LABEL: shuffle_v16i8_16_00_16_01_16_02_16_03_16_04_16_05_16_06_16_07:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,0,0,0,4,5,6,7]
@@ -244,7 +244,7 @@ define <16 x i8> @shuffle_v16i8_16_00_16_01_16_02_16_03_16_04_16_05_16_06_16_07(
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v16i8_16_00_16_01_16_02_16_03_16_04_16_05_16_06_16_07:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSSE3-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
@@ -252,7 +252,7 @@ define <16 x i8> @shuffle_v16i8_16_00_16_01_16_02_16_03_16_04_16_05_16_06_16_07(
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v16i8_16_00_16_01_16_02_16_03_16_04_16_05_16_06_16_07:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
; SSE41-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
@@ -260,14 +260,14 @@ define <16 x i8> @shuffle_v16i8_16_00_16_01_16_02_16_03_16_04_16_05_16_06_16_07(
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuffle_v16i8_16_00_16_01_16_02_16_03_16_04_16_05_16_06_16_07:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i8_16_00_16_01_16_02_16_03_16_04_16_05_16_06_16_07:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpbroadcastb %xmm1, %xmm1
; AVX2OR512VL-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; AVX2OR512VL-NEXT: retq
@@ -277,7 +277,7 @@ define <16 x i8> @shuffle_v16i8_16_00_16_01_16_02_16_03_16_04_16_05_16_06_16_07(
define <16 x i8> @shuffle_v16i8_03_02_01_00_07_06_05_04_11_10_09_08_15_14_13_12(<16 x i8> %a, <16 x i8> %b) {
; SSE2-LABEL: shuffle_v16i8_03_02_01_00_07_06_05_04_11_10_09_08_15_14_13_12:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
@@ -290,17 +290,17 @@ define <16 x i8> @shuffle_v16i8_03_02_01_00_07_06_05_04_11_10_09_08_15_14_13_12(
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v16i8_03_02_01_00_07_06_05_04_11_10_09_08_15_14_13_12:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v16i8_03_02_01_00_07_06_05_04_11_10_09_08_15_14_13_12:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_03_02_01_00_07_06_05_04_11_10_09_08_15_14_13_12:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
; AVX-NEXT: retq
%shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12>
@@ -309,7 +309,7 @@ define <16 x i8> @shuffle_v16i8_03_02_01_00_07_06_05_04_11_10_09_08_15_14_13_12(
define <16 x i8> @shuffle_v16i8_03_02_01_00_07_06_05_04_19_18_17_16_23_22_21_20(<16 x i8> %a, <16 x i8> %b) {
; SSE2-LABEL: shuffle_v16i8_03_02_01_00_07_06_05_04_19_18_17_16_23_22_21_20:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,2,1,0,4,5,6,7]
@@ -321,19 +321,19 @@ define <16 x i8> @shuffle_v16i8_03_02_01_00_07_06_05_04_19_18_17_16_23_22_21_20(
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v16i8_03_02_01_00_07_06_05_04_19_18_17_16_23_22_21_20:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[6,4,2,0,14,12,10,8,7,5,3,1,15,13,11,9]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v16i8_03_02_01_00_07_06_05_04_19_18_17_16_23_22_21_20:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[6,4,2,0,14,12,10,8,7,5,3,1,15,13,11,9]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_03_02_01_00_07_06_05_04_19_18_17_16_23_22_21_20:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[6,4,2,0,14,12,10,8,7,5,3,1,15,13,11,9]
; AVX-NEXT: retq
@@ -343,7 +343,7 @@ define <16 x i8> @shuffle_v16i8_03_02_01_00_07_06_05_04_19_18_17_16_23_22_21_20(
define <16 x i8> @shuffle_v16i8_03_02_01_00_31_30_29_28_11_10_09_08_23_22_21_20(<16 x i8> %a, <16 x i8> %b) {
; SSE2-LABEL: shuffle_v16i8_03_02_01_00_31_30_29_28_11_10_09_08_23_22_21_20:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
@@ -363,21 +363,21 @@ define <16 x i8> @shuffle_v16i8_03_02_01_00_31_30_29_28_11_10_09_08_23_22_21_20(
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v16i8_03_02_01_00_31_30_29_28_11_10_09_08_23_22_21_20:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[15,14,13,12,7,6,5,4,u,u,u,u,u,u,u,u]
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,11,10,9,8,u,u,u,u,u,u,u,u]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v16i8_03_02_01_00_31_30_29_28_11_10_09_08_23_22_21_20:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufb {{.*#+}} xmm1 = xmm1[15,14,13,12,7,6,5,4,u,u,u,u,u,u,u,u]
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,11,10,9,8,u,u,u,u,u,u,u,u]
; SSE41-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_03_02_01_00_31_30_29_28_11_10_09_08_23_22_21_20:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[15,14,13,12,7,6,5,4,u,u,u,u,u,u,u,u]
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,11,10,9,8,u,u,u,u,u,u,u,u]
; AVX-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
@@ -388,7 +388,7 @@ define <16 x i8> @shuffle_v16i8_03_02_01_00_31_30_29_28_11_10_09_08_23_22_21_20(
define <16 x i8> @shuffle_v16i8_00_17_02_19_04_21_06_23_08_25_10_27_12_29_14_31(<16 x i8> %a, <16 x i8> %b) {
; SSE2-LABEL: shuffle_v16i8_00_17_02_19_04_21_06_23_08_25_10_27_12_29_14_31:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
; SSE2-NEXT: andps %xmm2, %xmm0
; SSE2-NEXT: andnps %xmm1, %xmm2
@@ -396,14 +396,14 @@ define <16 x i8> @shuffle_v16i8_00_17_02_19_04_21_06_23_08_25_10_27_12_29_14_31(
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v16i8_00_17_02_19_04_21_06_23_08_25_10_27_12_29_14_31:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u]
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v16i8_00_17_02_19_04_21_06_23_08_25_10_27_12_29_14_31:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movaps {{.*#+}} xmm0 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
@@ -411,13 +411,13 @@ define <16 x i8> @shuffle_v16i8_00_17_02_19_04_21_06_23_08_25_10_27_12_29_14_31(
; SSE41-NEXT: retq
;
; AVX1OR2-LABEL: shuffle_v16i8_00_17_02_19_04_21_06_23_08_25_10_27_12_29_14_31:
-; AVX1OR2: # BB#0:
+; AVX1OR2: # %bb.0:
; AVX1OR2-NEXT: vmovdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
; AVX1OR2-NEXT: vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
; AVX1OR2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i8_00_17_02_19_04_21_06_23_08_25_10_27_12_29_14_31:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: movw $-21846, %ax # imm = 0xAAAA
; AVX512VL-NEXT: kmovd %eax, %k1
; AVX512VL-NEXT: vmovdqu8 %xmm1, %xmm0 {%k1}
@@ -428,7 +428,7 @@ define <16 x i8> @shuffle_v16i8_00_17_02_19_04_21_06_23_08_25_10_27_12_29_14_31(
define <16 x i8> @shuffle_v16i8_00_01_02_19_04_05_06_23_08_09_10_27_12_13_14_31(<16 x i8> %a, <16 x i8> %b) {
; SSE2-LABEL: shuffle_v16i8_00_01_02_19_04_05_06_23_08_09_10_27_12_13_14_31:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps {{.*#+}} xmm2 = [255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0]
; SSE2-NEXT: andps %xmm2, %xmm0
; SSE2-NEXT: andnps %xmm1, %xmm2
@@ -436,14 +436,14 @@ define <16 x i8> @shuffle_v16i8_00_01_02_19_04_05_06_23_08_09_10_27_12_13_14_31(
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v16i8_00_01_02_19_04_05_06_23_08_09_10_27_12_13_14_31:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[15]
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,2],zero,xmm0[4,5,6],zero,xmm0[8,9,10],zero,xmm0[12,13,14],zero
; SSSE3-NEXT: por %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v16i8_00_01_02_19_04_05_06_23_08_09_10_27_12_13_14_31:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movaps {{.*#+}} xmm0 = [255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0]
; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
@@ -451,13 +451,13 @@ define <16 x i8> @shuffle_v16i8_00_01_02_19_04_05_06_23_08_09_10_27_12_13_14_31(
; SSE41-NEXT: retq
;
; AVX1OR2-LABEL: shuffle_v16i8_00_01_02_19_04_05_06_23_08_09_10_27_12_13_14_31:
-; AVX1OR2: # BB#0:
+; AVX1OR2: # %bb.0:
; AVX1OR2-NEXT: vmovdqa {{.*#+}} xmm2 = [255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0]
; AVX1OR2-NEXT: vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
; AVX1OR2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i8_00_01_02_19_04_05_06_23_08_09_10_27_12_13_14_31:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: movw $-30584, %ax # imm = 0x8888
; AVX512VL-NEXT: kmovd %eax, %k1
; AVX512VL-NEXT: vmovdqu8 %xmm1, %xmm0 {%k1}
@@ -468,17 +468,17 @@ define <16 x i8> @shuffle_v16i8_00_01_02_19_04_05_06_23_08_09_10_27_12_13_14_31(
define <16 x i8> @shuffle_v16i8_00_01_02_zz_04_05_06_zz_08_09_10_zz_12_13_14_zz(<16 x i8> %a) {
; SSE-LABEL: shuffle_v16i8_00_01_02_zz_04_05_06_zz_08_09_10_zz_12_13_14_zz:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX1OR2-LABEL: shuffle_v16i8_00_01_02_zz_04_05_06_zz_08_09_10_zz_12_13_14_zz:
-; AVX1OR2: # BB#0:
+; AVX1OR2: # %bb.0:
; AVX1OR2-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; AVX1OR2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i8_00_01_02_zz_04_05_06_zz_08_09_10_zz_12_13_14_zz:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX512VL-NEXT: retq
%shuffle = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 19, i32 4, i32 5, i32 6, i32 23, i32 8, i32 9, i32 10, i32 27, i32 12, i32 13, i32 14, i32 31>
@@ -487,7 +487,7 @@ define <16 x i8> @shuffle_v16i8_00_01_02_zz_04_05_06_zz_08_09_10_zz_12_13_14_zz(
define <16 x i8> @shuffle_v16i8_00_01_02_03_20_05_06_23_08_09_10_11_28_13_14_31(<16 x i8> %a, <16 x i8> %b) {
; SSE2-LABEL: shuffle_v16i8_00_01_02_03_20_05_06_23_08_09_10_11_28_13_14_31:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps {{.*#+}} xmm2 = [255,255,255,255,0,255,255,0,255,255,255,255,0,255,255,0]
; SSE2-NEXT: andps %xmm2, %xmm0
; SSE2-NEXT: andnps %xmm1, %xmm2
@@ -495,14 +495,14 @@ define <16 x i8> @shuffle_v16i8_00_01_02_03_20_05_06_23_08_09_10_11_28_13_14_31(
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v16i8_00_01_02_03_20_05_06_23_08_09_10_11_28_13_14_31:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = zero,zero,zero,zero,xmm1[4],zero,zero,xmm1[7],zero,zero,zero,zero,xmm1[12],zero,zero,xmm1[15]
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,2,3],zero,xmm0[5,6],zero,xmm0[8,9,10,11],zero,xmm0[13,14],zero
; SSSE3-NEXT: por %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v16i8_00_01_02_03_20_05_06_23_08_09_10_11_28_13_14_31:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movaps {{.*#+}} xmm0 = [255,255,255,255,0,255,255,0,255,255,255,255,0,255,255,0]
; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
@@ -510,13 +510,13 @@ define <16 x i8> @shuffle_v16i8_00_01_02_03_20_05_06_23_08_09_10_11_28_13_14_31(
; SSE41-NEXT: retq
;
; AVX1OR2-LABEL: shuffle_v16i8_00_01_02_03_20_05_06_23_08_09_10_11_28_13_14_31:
-; AVX1OR2: # BB#0:
+; AVX1OR2: # %bb.0:
; AVX1OR2-NEXT: vmovdqa {{.*#+}} xmm2 = [255,255,255,255,0,255,255,0,255,255,255,255,0,255,255,0]
; AVX1OR2-NEXT: vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
; AVX1OR2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i8_00_01_02_03_20_05_06_23_08_09_10_11_28_13_14_31:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: movw $-28528, %ax # imm = 0x9090
; AVX512VL-NEXT: kmovd %eax, %k1
; AVX512VL-NEXT: vmovdqu8 %xmm1, %xmm0 {%k1}
@@ -527,7 +527,7 @@ define <16 x i8> @shuffle_v16i8_00_01_02_03_20_05_06_23_08_09_10_11_28_13_14_31(
define <16 x i8> @shuffle_v16i8_16_17_18_19_04_05_06_07_24_25_10_11_28_13_30_15(<16 x i8> %a, <16 x i8> %b) {
; SSE2-LABEL: shuffle_v16i8_16_17_18_19_04_05_06_07_24_25_10_11_28_13_30_15:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps {{.*#+}} xmm2 = [255,255,255,255,0,0,0,0,255,255,0,0,255,0,255,0]
; SSE2-NEXT: andps %xmm2, %xmm1
; SSE2-NEXT: andnps %xmm0, %xmm2
@@ -536,14 +536,14 @@ define <16 x i8> @shuffle_v16i8_16_17_18_19_04_05_06_07_24_25_10_11_28_13_30_15(
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v16i8_16_17_18_19_04_05_06_07_24_25_10_11_28_13_30_15:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[4,5,6,7],zero,zero,xmm0[10,11],zero,xmm0[13],zero,xmm0[15]
; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,1,2,3],zero,zero,zero,zero,xmm1[8,9],zero,zero,xmm1[12],zero,xmm1[14],zero
; SSSE3-NEXT: por %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v16i8_16_17_18_19_04_05_06_07_24_25_10_11_28_13_30_15:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movaps {{.*#+}} xmm0 = [255,255,255,255,0,0,0,0,255,255,0,0,255,0,255,0]
; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2
@@ -551,13 +551,13 @@ define <16 x i8> @shuffle_v16i8_16_17_18_19_04_05_06_07_24_25_10_11_28_13_30_15(
; SSE41-NEXT: retq
;
; AVX1OR2-LABEL: shuffle_v16i8_16_17_18_19_04_05_06_07_24_25_10_11_28_13_30_15:
-; AVX1OR2: # BB#0:
+; AVX1OR2: # %bb.0:
; AVX1OR2-NEXT: vmovdqa {{.*#+}} xmm2 = [255,255,255,255,0,0,0,0,255,255,0,0,255,0,255,0]
; AVX1OR2-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
; AVX1OR2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i8_16_17_18_19_04_05_06_07_24_25_10_11_28_13_30_15:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: movw $-21264, %ax # imm = 0xACF0
; AVX512VL-NEXT: kmovd %eax, %k1
; AVX512VL-NEXT: vpblendmb %xmm0, %xmm1, %xmm0 {%k1}
@@ -568,24 +568,24 @@ define <16 x i8> @shuffle_v16i8_16_17_18_19_04_05_06_07_24_25_10_11_28_13_30_15(
define <16 x i8> @trunc_v4i32_shuffle(<16 x i8> %a) {
; SSE2-LABEL: trunc_v4i32_shuffle:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; SSE2-NEXT: packuswb %xmm0, %xmm0
; SSE2-NEXT: packuswb %xmm0, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: trunc_v4i32_shuffle:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: trunc_v4i32_shuffle:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
; SSE41-NEXT: retq
;
; AVX-LABEL: trunc_v4i32_shuffle:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX-NEXT: retq
%shuffle = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -617,7 +617,7 @@ define <16 x i8> @undef_test1(<16 x i8> %s.0.5, <16 x i8> %s.0.8, <16 x i8> %s.0
; them because the result is 'undef'.
;
; ALL-LABEL: undef_test1:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: retq
entry:
%s.1.8 = shufflevector <16 x i8> %s.0.8, <16 x i8> undef, <16 x i32> <i32 9, i32 9, i32 undef, i32 undef, i32 undef, i32 2, i32 undef, i32 6, i32 undef, i32 6, i32 undef, i32 14, i32 14, i32 undef, i32 undef, i32 0>
@@ -639,24 +639,24 @@ entry:
define <16 x i8> @PR20540(<8 x i8> %a) {
; SSE2-LABEL: PR20540:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; SSE2-NEXT: packuswb %xmm0, %xmm0
; SSE2-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; SSE2-NEXT: retq
;
; SSSE3-LABEL: PR20540:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero
; SSSE3-NEXT: retq
;
; SSE41-LABEL: PR20540:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero
; SSE41-NEXT: retq
;
; AVX-LABEL: PR20540:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i8> %a, <8 x i8> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8>
@@ -665,13 +665,13 @@ define <16 x i8> @PR20540(<8 x i8> %a) {
define <16 x i8> @shuffle_v16i8_16_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz(i8 %i) {
; SSE-LABEL: shuffle_v16i8_16_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movzbl %dil, %eax
; SSE-NEXT: movd %eax, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_16_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: movzbl %dil, %eax
; AVX-NEXT: vmovd %eax, %xmm0
; AVX-NEXT: retq
@@ -682,27 +682,27 @@ define <16 x i8> @shuffle_v16i8_16_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz(
define <16 x i8> @shuffle_v16i8_zz_zz_zz_zz_zz_16_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz(i8 %i) {
; SSE2-LABEL: shuffle_v16i8_zz_zz_zz_zz_zz_16_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: shll $8, %edi
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: pinsrw $2, %edi, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v16i8_zz_zz_zz_zz_zz_16_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: shll $8, %edi
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: pinsrw $2, %edi, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v16i8_zz_zz_zz_zz_zz_16_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: pinsrb $5, %edi, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_zz_zz_zz_zz_zz_16_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX-NEXT: vpinsrb $5, %edi, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -713,27 +713,27 @@ define <16 x i8> @shuffle_v16i8_zz_zz_zz_zz_zz_16_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz(
define <16 x i8> @shuffle_v16i8_zz_uu_uu_zz_uu_uu_zz_zz_zz_zz_zz_zz_zz_zz_zz_16(i8 %i) {
; SSE2-LABEL: shuffle_v16i8_zz_uu_uu_zz_uu_uu_zz_zz_zz_zz_zz_zz_zz_zz_zz_16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: shll $8, %edi
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: pinsrw $7, %edi, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v16i8_zz_uu_uu_zz_uu_uu_zz_zz_zz_zz_zz_zz_zz_zz_zz_16:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: shll $8, %edi
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: pinsrw $7, %edi, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v16i8_zz_uu_uu_zz_uu_uu_zz_zz_zz_zz_zz_zz_zz_zz_zz_16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: pinsrb $15, %edi, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_zz_uu_uu_zz_uu_uu_zz_zz_zz_zz_zz_zz_zz_zz_zz_16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX-NEXT: vpinsrb $15, %edi, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -744,27 +744,27 @@ define <16 x i8> @shuffle_v16i8_zz_uu_uu_zz_uu_uu_zz_zz_zz_zz_zz_zz_zz_zz_zz_16(
define <16 x i8> @shuffle_v16i8_zz_zz_19_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz(i8 %i) {
; SSE2-LABEL: shuffle_v16i8_zz_zz_19_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movzbl %dil, %eax
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: pinsrw $1, %eax, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v16i8_zz_zz_19_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movzbl %dil, %eax
; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: pinsrw $1, %eax, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v16i8_zz_zz_19_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: pinsrb $2, %edi, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_zz_zz_19_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX-NEXT: vpinsrb $2, %edi, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -775,12 +775,12 @@ define <16 x i8> @shuffle_v16i8_zz_zz_19_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz(
define <16 x i8> @shuffle_v16i8_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_16_uu_18_uu(<16 x i8> %a) {
; SSE-LABEL: shuffle_v16i8_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_16_uu_18_uu:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_16_uu_18_uu:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3]
; AVX-NEXT: retq
%shuffle = shufflevector <16 x i8> zeroinitializer, <16 x i8> %a, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 16, i32 undef, i32 18, i32 undef>
@@ -789,12 +789,12 @@ define <16 x i8> @shuffle_v16i8_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_16_uu_18_uu(
define <16 x i8> @shuffle_v16i8_28_uu_30_31_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz(<16 x i8> %a) {
; SSE-LABEL: shuffle_v16i8_28_uu_30_31_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_28_uu_30_31_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX-NEXT: retq
%shuffle = shufflevector <16 x i8> zeroinitializer, <16 x i8> %a, <16 x i32> <i32 28, i32 undef, i32 30, i32 31, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 09, i32 0, i32 0, i32 0, i32 0, i32 0>
@@ -803,24 +803,24 @@ define <16 x i8> @shuffle_v16i8_28_uu_30_31_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz(
define <16 x i8> @shuffle_v16i8_31_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14(<16 x i8> %a, <16 x i8> %b) {
; SSE2-LABEL: shuffle_v16i8_31_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; SSE2-NEXT: pslldq {{.*#+}} xmm0 = zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v16i8_31_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: palignr {{.*#+}} xmm0 = xmm1[15],xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v16i8_31_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: palignr {{.*#+}} xmm0 = xmm1[15],xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_31_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm1[15],xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
; AVX-NEXT: retq
%shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 31, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14>
@@ -829,7 +829,7 @@ define <16 x i8> @shuffle_v16i8_31_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14(
define <16 x i8> @shuffle_v16i8_15_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14(<16 x i8> %a, <16 x i8> %b) {
; SSE2-LABEL: shuffle_v16i8_15_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; SSE2-NEXT: pslldq {{.*#+}} xmm0 = zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
@@ -837,17 +837,17 @@ define <16 x i8> @shuffle_v16i8_15_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14(
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v16i8_15_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: palignr {{.*#+}} xmm0 = xmm0[15,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v16i8_15_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: palignr {{.*#+}} xmm0 = xmm0[15,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_15_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[15,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
; AVX-NEXT: retq
%shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 15, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14>
@@ -856,24 +856,24 @@ define <16 x i8> @shuffle_v16i8_15_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14(
define <16 x i8> @shuffle_v16i8_17_18_19_20_21_22_23_24_25_26_27_28_29_30_31_00(<16 x i8> %a, <16 x i8> %b) {
; SSE2-LABEL: shuffle_v16i8_17_18_19_20_21_22_23_24_25_26_27_28_29_30_31_00:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero
; SSE2-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0]
; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v16i8_17_18_19_20_21_22_23_24_25_26_27_28_29_30_31_00:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: palignr {{.*#+}} xmm0 = xmm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v16i8_17_18_19_20_21_22_23_24_25_26_27_28_29_30_31_00:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: palignr {{.*#+}} xmm0 = xmm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_17_18_19_20_21_22_23_24_25_26_27_28_29_30_31_00:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0]
; AVX-NEXT: retq
%shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 0>
@@ -882,26 +882,26 @@ define <16 x i8> @shuffle_v16i8_17_18_19_20_21_22_23_24_25_26_27_28_29_30_31_00(
define <16 x i8> @shuffle_v16i8_01_02_03_04_05_06_07_08_09_10_11_12_13_14_15_16(<16 x i8> %a, <16 x i8> %b) {
; SSE2-LABEL: shuffle_v16i8_01_02_03_04_05_06_07_08_09_10_11_12_13_14_15_16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero
; SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0]
; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v16i8_01_02_03_04_05_06_07_08_09_10_11_12_13_14_15_16:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: palignr {{.*#+}} xmm1 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm1[0]
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v16i8_01_02_03_04_05_06_07_08_09_10_11_12_13_14_15_16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: palignr {{.*#+}} xmm1 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm1[0]
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_01_02_03_04_05_06_07_08_09_10_11_12_13_14_15_16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm1[0]
; AVX-NEXT: retq
%shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16>
@@ -910,7 +910,7 @@ define <16 x i8> @shuffle_v16i8_01_02_03_04_05_06_07_08_09_10_11_12_13_14_15_16(
define <16 x i8> @shuffle_v16i8_01_02_03_04_05_06_07_08_09_10_11_12_13_14_15_00(<16 x i8> %a, <16 x i8> %b) {
; SSE2-LABEL: shuffle_v16i8_01_02_03_04_05_06_07_08_09_10_11_12_13_14_15_00:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero
; SSE2-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0]
@@ -918,17 +918,17 @@ define <16 x i8> @shuffle_v16i8_01_02_03_04_05_06_07_08_09_10_11_12_13_14_15_00(
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v16i8_01_02_03_04_05_06_07_08_09_10_11_12_13_14_15_00:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: palignr {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v16i8_01_02_03_04_05_06_07_08_09_10_11_12_13_14_15_00:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: palignr {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_01_02_03_04_05_06_07_08_09_10_11_12_13_14_15_00:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0]
; AVX-NEXT: retq
%shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 0>
@@ -937,26 +937,26 @@ define <16 x i8> @shuffle_v16i8_01_02_03_04_05_06_07_08_09_10_11_12_13_14_15_00(
define <16 x i8> @shuffle_v16i8_15_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30(<16 x i8> %a, <16 x i8> %b) {
; SSE2-LABEL: shuffle_v16i8_15_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,xmm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v16i8_15_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: palignr {{.*#+}} xmm1 = xmm0[15],xmm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v16i8_15_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: palignr {{.*#+}} xmm1 = xmm0[15],xmm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_15_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[15],xmm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
; AVX-NEXT: retq
%shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30>
@@ -966,7 +966,7 @@ define <16 x i8> @shuffle_v16i8_15_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30(
; PR31151
define <16 x i8> @shuffle_v16i8_00_16_01_17_04_20_05_21_02_18_03_19_06_22_07_23(<16 x i8> %val1, <16 x i8> %val2) {
; SSE2-LABEL: shuffle_v16i8_00_16_01_17_04_20_05_21_02_18_03_19_06_22_07_23:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,2,1,3]
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
@@ -979,19 +979,19 @@ define <16 x i8> @shuffle_v16i8_00_16_01_17_04_20_05_21_02_18_03_19_06_22_07_23(
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v16i8_00_16_01_17_04_20_05_21_02_18_03_19_06_22_07_23:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v16i8_00_16_01_17_04_20_05_21_02_18_03_19_06_22_07_23:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_00_16_01_17_04_20_05_21_02_18_03_19_06_22_07_23:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
; AVX-NEXT: retq
@@ -1001,24 +1001,24 @@ define <16 x i8> @shuffle_v16i8_00_16_01_17_04_20_05_21_02_18_03_19_06_22_07_23(
define <16 x i8> @shuffle_v16i8_00_uu_uu_uu_uu_uu_uu_uu_01_uu_uu_uu_uu_uu_uu_uu(<16 x i8> %a) {
; SSE2-LABEL: shuffle_v16i8_00_uu_uu_uu_uu_uu_uu_uu_01_uu_uu_uu_uu_uu_uu_uu:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v16i8_00_uu_uu_uu_uu_uu_uu_uu_01_uu_uu_uu_uu_uu_uu_uu:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v16i8_00_uu_uu_uu_uu_uu_uu_uu_01_uu_uu_uu_uu_uu_uu_uu:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_00_uu_uu_uu_uu_uu_uu_uu_01_uu_uu_uu_uu_uu_uu_uu:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; AVX-NEXT: retq
%shuffle = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <16 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -1027,7 +1027,7 @@ define <16 x i8> @shuffle_v16i8_00_uu_uu_uu_uu_uu_uu_uu_01_uu_uu_uu_uu_uu_uu_uu(
define <16 x i8> @shuffle_v16i8_00_zz_zz_zz_zz_zz_zz_zz_01_zz_zz_zz_zz_zz_zz_zz(<16 x i8> %a) {
; SSE2-LABEL: shuffle_v16i8_00_zz_zz_zz_zz_zz_zz_zz_01_zz_zz_zz_zz_zz_zz_zz:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@@ -1035,17 +1035,17 @@ define <16 x i8> @shuffle_v16i8_00_zz_zz_zz_zz_zz_zz_zz_01_zz_zz_zz_zz_zz_zz_zz(
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v16i8_00_zz_zz_zz_zz_zz_zz_zz_01_zz_zz_zz_zz_zz_zz_zz:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v16i8_00_zz_zz_zz_zz_zz_zz_zz_01_zz_zz_zz_zz_zz_zz_zz:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_00_zz_zz_zz_zz_zz_zz_zz_01_zz_zz_zz_zz_zz_zz_zz:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; AVX-NEXT: retq
%shuffle = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <16 x i32> <i32 0, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 1, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
@@ -1054,24 +1054,24 @@ define <16 x i8> @shuffle_v16i8_00_zz_zz_zz_zz_zz_zz_zz_01_zz_zz_zz_zz_zz_zz_zz(
define <16 x i8> @shuffle_v16i8_00_uu_uu_uu_01_uu_uu_uu_02_uu_uu_uu_03_uu_uu_uu(<16 x i8> %a) {
; SSE2-LABEL: shuffle_v16i8_00_uu_uu_uu_01_uu_uu_uu_02_uu_uu_uu_03_uu_uu_uu:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v16i8_00_uu_uu_uu_01_uu_uu_uu_02_uu_uu_uu_03_uu_uu_uu:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v16i8_00_uu_uu_uu_01_uu_uu_uu_02_uu_uu_uu_03_uu_uu_uu:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_00_uu_uu_uu_01_uu_uu_uu_02_uu_uu_uu_03_uu_uu_uu:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX-NEXT: retq
%shuffle = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <16 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 1, i32 undef, i32 undef, i32 undef, i32 2, i32 undef, i32 undef, i32 undef, i32 3, i32 undef, i32 undef, i32 undef>
@@ -1080,26 +1080,26 @@ define <16 x i8> @shuffle_v16i8_00_uu_uu_uu_01_uu_uu_uu_02_uu_uu_uu_03_uu_uu_uu(
define <16 x i8> @shuffle_v16i8_00_zz_zz_zz_01_zz_zz_zz_02_zz_zz_zz_03_zz_zz_zz(<16 x i8> %a) {
; SSE2-LABEL: shuffle_v16i8_00_zz_zz_zz_01_zz_zz_zz_02_zz_zz_zz_03_zz_zz_zz:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v16i8_00_zz_zz_zz_01_zz_zz_zz_02_zz_zz_zz_03_zz_zz_zz:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pxor %xmm1, %xmm1
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v16i8_00_zz_zz_zz_01_zz_zz_zz_02_zz_zz_zz_03_zz_zz_zz:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_00_zz_zz_zz_01_zz_zz_zz_02_zz_zz_zz_03_zz_zz_zz:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX-NEXT: retq
%shuffle = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <16 x i32> <i32 0, i32 17, i32 18, i32 19, i32 1, i32 21, i32 22, i32 23, i32 2, i32 25, i32 26, i32 27, i32 3, i32 29, i32 30, i32 31>
@@ -1108,22 +1108,22 @@ define <16 x i8> @shuffle_v16i8_00_zz_zz_zz_01_zz_zz_zz_02_zz_zz_zz_03_zz_zz_zz(
define <16 x i8> @shuffle_v16i8_00_uu_01_uu_02_uu_03_uu_04_uu_05_uu_06_uu_07_uu(<16 x i8> %a) {
; SSE2-LABEL: shuffle_v16i8_00_uu_01_uu_02_uu_03_uu_04_uu_05_uu_06_uu_07_uu:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v16i8_00_uu_01_uu_02_uu_03_uu_04_uu_05_uu_06_uu_07_uu:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v16i8_00_uu_01_uu_02_uu_03_uu_04_uu_05_uu_06_uu_07_uu:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_00_uu_01_uu_02_uu_03_uu_04_uu_05_uu_06_uu_07_uu:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX-NEXT: retq
%shuffle = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <16 x i32> <i32 0, i32 undef, i32 1, i32 undef, i32 2, i32 undef, i32 3, i32 undef, i32 4, i32 undef, i32 5, i32 undef, i32 6, i32 undef, i32 7, i32 undef>
@@ -1132,24 +1132,24 @@ define <16 x i8> @shuffle_v16i8_00_uu_01_uu_02_uu_03_uu_04_uu_05_uu_06_uu_07_uu(
define <16 x i8> @shuffle_v16i8_00_zz_01_zz_02_zz_03_zz_04_zz_05_zz_06_zz_07_zz(<16 x i8> %a) {
; SSE2-LABEL: shuffle_v16i8_00_zz_01_zz_02_zz_03_zz_04_zz_05_zz_06_zz_07_zz:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v16i8_00_zz_01_zz_02_zz_03_zz_04_zz_05_zz_06_zz_07_zz:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pxor %xmm1, %xmm1
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v16i8_00_zz_01_zz_02_zz_03_zz_04_zz_05_zz_06_zz_07_zz:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_00_zz_01_zz_02_zz_03_zz_04_zz_05_zz_06_zz_07_zz:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX-NEXT: retq
%shuffle = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <16 x i32> <i32 0, i32 17, i32 1, i32 19, i32 2, i32 21, i32 3, i32 23, i32 4, i32 25, i32 5, i32 27, i32 6, i32 29, i32 7, i32 31>
@@ -1158,7 +1158,7 @@ define <16 x i8> @shuffle_v16i8_00_zz_01_zz_02_zz_03_zz_04_zz_05_zz_06_zz_07_zz(
define <16 x i8> @shuffle_v16i8_uu_10_02_07_22_14_07_02_18_03_01_14_18_09_11_00(<16 x i8> %a, <16 x i8> %b) {
; SSE2-LABEL: shuffle_v16i8_uu_10_02_07_22_14_07_02_18_03_01_14_18_09_11_00:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
@@ -1190,21 +1190,21 @@ define <16 x i8> @shuffle_v16i8_uu_10_02_07_22_14_07_02_18_03_01_14_18_09_11_00(
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v16i8_uu_10_02_07_22_14_07_02_18_03_01_14_18_09_11_00:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[u],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[2],zero,zero,zero
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[u,10,2,7],zero,xmm0[14,7,2],zero,xmm0[3,1,14],zero,xmm0[9,11,0]
; SSSE3-NEXT: por %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v16i8_uu_10_02_07_22_14_07_02_18_03_01_14_18_09_11_00:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pshufb {{.*#+}} xmm1 = xmm1[u],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[2],zero,zero,zero
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[u,10,2,7],zero,xmm0[14,7,2],zero,xmm0[3,1,14],zero,xmm0[9,11,0]
; SSE41-NEXT: por %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_uu_10_02_07_22_14_07_02_18_03_01_14_18_09_11_00:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[2],zero,zero,zero
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,10,2,7],zero,xmm0[14,7,2],zero,xmm0[3,1,14],zero,xmm0[9,11,0]
; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
@@ -1217,14 +1217,14 @@ entry:
define <16 x i8> @shuffe_v16i8_shift_00_02_04_06_08_10_12_14_16_18_20_22_24_26_28_30(<8 x i16> %a0, <8 x i16> %a1) {
; SSE-LABEL: shuffe_v16i8_shift_00_02_04_06_08_10_12_14_16_18_20_22_24_26_28_30:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrlw $8, %xmm0
; SSE-NEXT: psrlw $8, %xmm1
; SSE-NEXT: packuswb %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: shuffe_v16i8_shift_00_02_04_06_08_10_12_14_16_18_20_22_24_26_28_30:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrlw $8, %xmm0, %xmm0
; AVX-NEXT: vpsrlw $8, %xmm1, %xmm1
; AVX-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
@@ -1251,21 +1251,21 @@ entry:
define void @constant_gets_selected(<4 x i32>* %ptr1, <4 x i32>* %ptr2) {
; SSE-LABEL: constant_gets_selected:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: movaps %xmm0, (%rdi)
; SSE-NEXT: movaps %xmm0, (%rsi)
; SSE-NEXT: retq
;
; AVX1OR2-LABEL: constant_gets_selected:
-; AVX1OR2: # BB#0: # %entry
+; AVX1OR2: # %bb.0: # %entry
; AVX1OR2-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX1OR2-NEXT: vmovaps %xmm0, (%rdi)
; AVX1OR2-NEXT: vmovaps %xmm0, (%rsi)
; AVX1OR2-NEXT: retq
;
; AVX512VL-LABEL: constant_gets_selected:
-; AVX512VL: # BB#0: # %entry
+; AVX512VL: # %bb.0: # %entry
; AVX512VL-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX512VL-NEXT: vmovdqa %xmm0, (%rdi)
; AVX512VL-NEXT: vmovdqa %xmm0, (%rsi)
@@ -1285,12 +1285,12 @@ entry:
define <16 x i8> @shuffle_v16i8_zz_00_zz_02_zz_04_zz_06_zz_08_zz_10_zz_12_zz_14(<16 x i8> %a, <16 x i8> %b) {
; SSE-LABEL: shuffle_v16i8_zz_00_zz_02_zz_04_zz_06_zz_08_zz_10_zz_12_zz_14:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psllw $8, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_zz_00_zz_02_zz_04_zz_06_zz_08_zz_10_zz_12_zz_14:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsllw $8, %xmm0, %xmm0
; AVX-NEXT: retq
%shuffle = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <16 x i32><i32 16, i32 0, i32 16, i32 2, i32 16, i32 4, i32 16, i32 6, i32 16, i32 8, i32 16, i32 10, i32 16, i32 12, i32 16, i32 14>
@@ -1299,12 +1299,12 @@ define <16 x i8> @shuffle_v16i8_zz_00_zz_02_zz_04_zz_06_zz_08_zz_10_zz_12_zz_14(
define <16 x i8> @shuffle_v16i8_zz_zz_zz_00_zz_zz_zz_04_zz_zz_zz_08_zz_zz_zz_12(<16 x i8> %a, <16 x i8> %b) {
; SSE-LABEL: shuffle_v16i8_zz_zz_zz_00_zz_zz_zz_04_zz_zz_zz_08_zz_zz_zz_12:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pslld $24, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_zz_zz_zz_00_zz_zz_zz_04_zz_zz_zz_08_zz_zz_zz_12:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpslld $24, %xmm0, %xmm0
; AVX-NEXT: retq
%shuffle = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <16 x i32><i32 16, i32 16, i32 16, i32 0, i32 16, i32 16, i32 16, i32 4, i32 16, i32 16, i32 16, i32 8, i32 16, i32 16, i32 16, i32 12>
@@ -1313,12 +1313,12 @@ define <16 x i8> @shuffle_v16i8_zz_zz_zz_00_zz_zz_zz_04_zz_zz_zz_08_zz_zz_zz_12(
define <16 x i8> @shuffle_v16i8_zz_zz_zz_zz_zz_zz_zz_00_zz_zz_zz_zz_zz_zz_zz_08(<16 x i8> %a, <16 x i8> %b) {
; SSE-LABEL: shuffle_v16i8_zz_zz_zz_zz_zz_zz_zz_00_zz_zz_zz_zz_zz_zz_zz_08:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psllq $56, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_zz_zz_zz_zz_zz_zz_zz_00_zz_zz_zz_zz_zz_zz_zz_08:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsllq $56, %xmm0, %xmm0
; AVX-NEXT: retq
%shuffle = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <16 x i32><i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 0, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 8>
@@ -1327,12 +1327,12 @@ define <16 x i8> @shuffle_v16i8_zz_zz_zz_zz_zz_zz_zz_00_zz_zz_zz_zz_zz_zz_zz_08(
define <16 x i8> @shuffle_v16i8_zz_00_uu_02_03_uu_05_06_zz_08_09_uu_11_12_13_14(<16 x i8> %a, <16 x i8> %b) {
; SSE-LABEL: shuffle_v16i8_zz_00_uu_02_03_uu_05_06_zz_08_09_uu_11_12_13_14:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psllq $8, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_zz_00_uu_02_03_uu_05_06_zz_08_09_uu_11_12_13_14:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsllq $8, %xmm0, %xmm0
; AVX-NEXT: retq
%shuffle = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <16 x i32><i32 16, i32 0, i32 undef, i32 2, i32 3, i32 undef, i32 5, i32 6, i32 16, i32 8, i32 9, i32 undef, i32 11, i32 12, i32 13, i32 14>
@@ -1341,12 +1341,12 @@ define <16 x i8> @shuffle_v16i8_zz_00_uu_02_03_uu_05_06_zz_08_09_uu_11_12_13_14(
define <16 x i8> @shuffle_v16i8_01_uu_uu_uu_uu_zz_uu_zz_uu_zz_11_zz_13_zz_15_zz(<16 x i8> %a, <16 x i8> %b) {
; SSE-LABEL: shuffle_v16i8_01_uu_uu_uu_uu_zz_uu_zz_uu_zz_11_zz_13_zz_15_zz:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrlw $8, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_01_uu_uu_uu_uu_zz_uu_zz_uu_zz_11_zz_13_zz_15_zz:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrlw $8, %xmm0, %xmm0
; AVX-NEXT: retq
%shuffle = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <16 x i32><i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 16, i32 undef, i32 16, i32 undef, i32 16, i32 11, i32 16, i32 13, i32 16, i32 15, i32 16>
@@ -1355,12 +1355,12 @@ define <16 x i8> @shuffle_v16i8_01_uu_uu_uu_uu_zz_uu_zz_uu_zz_11_zz_13_zz_15_zz(
define <16 x i8> @shuffle_v16i8_02_03_zz_zz_06_07_uu_uu_uu_uu_uu_uu_14_15_zz_zz(<16 x i8> %a, <16 x i8> %b) {
; SSE-LABEL: shuffle_v16i8_02_03_zz_zz_06_07_uu_uu_uu_uu_uu_uu_14_15_zz_zz:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrld $16, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_02_03_zz_zz_06_07_uu_uu_uu_uu_uu_uu_14_15_zz_zz:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrld $16, %xmm0, %xmm0
; AVX-NEXT: retq
%shuffle = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <16 x i32><i32 2, i32 3, i32 16, i32 16, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 14, i32 15, i32 16, i32 16>
@@ -1369,12 +1369,12 @@ define <16 x i8> @shuffle_v16i8_02_03_zz_zz_06_07_uu_uu_uu_uu_uu_uu_14_15_zz_zz(
define <16 x i8> @shuffle_v16i8_07_zz_zz_zz_zz_zz_uu_uu_15_uu_uu_uu_uu_uu_zz_zz(<16 x i8> %a, <16 x i8> %b) {
; SSE-LABEL: shuffle_v16i8_07_zz_zz_zz_zz_zz_uu_uu_15_uu_uu_uu_uu_uu_zz_zz:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrlq $56, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_07_zz_zz_zz_zz_zz_uu_uu_15_uu_uu_uu_uu_uu_zz_zz:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrlq $56, %xmm0, %xmm0
; AVX-NEXT: retq
%shuffle = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <16 x i32><i32 7, i32 16, i32 16, i32 16, i32 16, i32 16, i32 undef, i32 undef, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 16, i32 16>
@@ -1383,7 +1383,7 @@ define <16 x i8> @shuffle_v16i8_07_zz_zz_zz_zz_zz_uu_uu_15_uu_uu_uu_uu_uu_zz_zz(
define <16 x i8> @PR12412(<16 x i8> %inval1, <16 x i8> %inval2) {
; SSE2-LABEL: PR12412:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
; SSE2-NEXT: pand %xmm2, %xmm1
; SSE2-NEXT: pand %xmm2, %xmm0
@@ -1391,7 +1391,7 @@ define <16 x i8> @PR12412(<16 x i8> %inval1, <16 x i8> %inval2) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: PR12412:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; SSSE3-NEXT: pshufb %xmm2, %xmm1
; SSSE3-NEXT: pshufb %xmm2, %xmm0
@@ -1399,7 +1399,7 @@ define <16 x i8> @PR12412(<16 x i8> %inval1, <16 x i8> %inval2) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: PR12412:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; SSE41-NEXT: pshufb %xmm2, %xmm1
; SSE41-NEXT: pshufb %xmm2, %xmm0
@@ -1407,7 +1407,7 @@ define <16 x i8> @PR12412(<16 x i8> %inval1, <16 x i8> %inval2) {
; SSE41-NEXT: retq
;
; AVX-LABEL: PR12412:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; AVX-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpshufb %xmm2, %xmm0, %xmm0
@@ -1420,12 +1420,12 @@ entry:
define <16 x i8> @shuffle_v16i8_uu_02_03_zz_uu_06_07_zz_uu_10_11_zz_uu_14_15_zz(<16 x i8> %a) {
; SSE-LABEL: shuffle_v16i8_uu_02_03_zz_uu_06_07_zz_uu_10_11_zz_uu_14_15_zz:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrld $8, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_uu_02_03_zz_uu_06_07_zz_uu_10_11_zz_uu_14_15_zz:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrld $8, %xmm0, %xmm0
; AVX-NEXT: retq
%shuffle = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <16 x i32> <i32 undef, i32 2, i32 3, i32 16, i32 undef, i32 6, i32 7, i32 16, i32 undef, i32 10, i32 11, i32 16, i32 undef, i32 14, i32 15, i32 16>
@@ -1434,12 +1434,12 @@ define <16 x i8> @shuffle_v16i8_uu_02_03_zz_uu_06_07_zz_uu_10_11_zz_uu_14_15_zz(
define <16 x i8> @shuffle_v16i8_bitcast_unpack(<16 x i8> %a, <16 x i8> %b) {
; SSE-LABEL: shuffle_v16i8_bitcast_unpack:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_bitcast_unpack:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX-NEXT: retq
%shuffle8 = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 7, i32 23, i32 6, i32 22, i32 5, i32 21, i32 4, i32 20, i32 3, i32 19, i32 2, i32 18, i32 1, i32 17, i32 0, i32 16>
@@ -1453,7 +1453,7 @@ define <16 x i8> @shuffle_v16i8_bitcast_unpack(<16 x i8> %a, <16 x i8> %b) {
define <16 x i8> @insert_dup_mem_v16i8_i32(i32* %ptr) {
; SSE2-LABEL: insert_dup_mem_v16i8_i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
@@ -1461,28 +1461,28 @@ define <16 x i8> @insert_dup_mem_v16i8_i32(i32* %ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: insert_dup_mem_v16i8_i32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSSE3-NEXT: pxor %xmm1, %xmm1
; SSSE3-NEXT: pshufb %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_dup_mem_v16i8_i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: pshufb %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: insert_dup_mem_v16i8_i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: insert_dup_mem_v16i8_i32:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpbroadcastb (%rdi), %xmm0
; AVX2OR512VL-NEXT: retq
%tmp = load i32, i32* %ptr, align 4
@@ -1494,7 +1494,7 @@ define <16 x i8> @insert_dup_mem_v16i8_i32(i32* %ptr) {
define <16 x i8> @insert_dup_mem_v16i8_sext_i8(i8* %ptr) {
; SSE2-LABEL: insert_dup_mem_v16i8_sext_i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movsbl (%rdi), %eax
; SSE2-NEXT: movd %eax, %xmm0
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -1503,7 +1503,7 @@ define <16 x i8> @insert_dup_mem_v16i8_sext_i8(i8* %ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: insert_dup_mem_v16i8_sext_i8:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movsbl (%rdi), %eax
; SSSE3-NEXT: movd %eax, %xmm0
; SSSE3-NEXT: pxor %xmm1, %xmm1
@@ -1511,7 +1511,7 @@ define <16 x i8> @insert_dup_mem_v16i8_sext_i8(i8* %ptr) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_dup_mem_v16i8_sext_i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movsbl (%rdi), %eax
; SSE41-NEXT: movd %eax, %xmm0
; SSE41-NEXT: pxor %xmm1, %xmm1
@@ -1519,7 +1519,7 @@ define <16 x i8> @insert_dup_mem_v16i8_sext_i8(i8* %ptr) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: insert_dup_mem_v16i8_sext_i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: movsbl (%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
@@ -1527,7 +1527,7 @@ define <16 x i8> @insert_dup_mem_v16i8_sext_i8(i8* %ptr) {
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: insert_dup_mem_v16i8_sext_i8:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpbroadcastb (%rdi), %xmm0
; AVX2OR512VL-NEXT: retq
%tmp = load i8, i8* %ptr, align 1
@@ -1540,7 +1540,7 @@ define <16 x i8> @insert_dup_mem_v16i8_sext_i8(i8* %ptr) {
define <16 x i8> @insert_dup_elt1_mem_v16i8_i32(i32* %ptr) {
; SSE2-LABEL: insert_dup_elt1_mem_v16i8_i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
@@ -1548,25 +1548,25 @@ define <16 x i8> @insert_dup_elt1_mem_v16i8_i32(i32* %ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: insert_dup_elt1_mem_v16i8_i32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_dup_elt1_mem_v16i8_i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; SSE41-NEXT: retq
;
; AVX1-LABEL: insert_dup_elt1_mem_v16i8_i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: insert_dup_elt1_mem_v16i8_i32:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpbroadcastb 1(%rdi), %xmm0
; AVX2OR512VL-NEXT: retq
%tmp = load i32, i32* %ptr, align 4
@@ -1578,7 +1578,7 @@ define <16 x i8> @insert_dup_elt1_mem_v16i8_i32(i32* %ptr) {
define <16 x i8> @insert_dup_elt2_mem_v16i8_i32(i32* %ptr) {
; SSE2-LABEL: insert_dup_elt2_mem_v16i8_i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,2,2,2,4,5,6,7]
@@ -1586,25 +1586,25 @@ define <16 x i8> @insert_dup_elt2_mem_v16i8_i32(i32* %ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: insert_dup_elt2_mem_v16i8_i32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_dup_elt2_mem_v16i8_i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
; SSE41-NEXT: retq
;
; AVX1-LABEL: insert_dup_elt2_mem_v16i8_i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: insert_dup_elt2_mem_v16i8_i32:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpbroadcastb 2(%rdi), %xmm0
; AVX2OR512VL-NEXT: retq
%tmp = load i32, i32* %ptr, align 4
@@ -1616,7 +1616,7 @@ define <16 x i8> @insert_dup_elt2_mem_v16i8_i32(i32* %ptr) {
define <16 x i8> @insert_dup_elt1_mem_v16i8_sext_i8(i8* %ptr) {
; SSE2-LABEL: insert_dup_elt1_mem_v16i8_sext_i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movsbl (%rdi), %eax
; SSE2-NEXT: movd %eax, %xmm0
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -1625,28 +1625,28 @@ define <16 x i8> @insert_dup_elt1_mem_v16i8_sext_i8(i8* %ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: insert_dup_elt1_mem_v16i8_sext_i8:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movsbl (%rdi), %eax
; SSSE3-NEXT: movd %eax, %xmm0
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_dup_elt1_mem_v16i8_sext_i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movsbl (%rdi), %eax
; SSE41-NEXT: movd %eax, %xmm0
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; SSE41-NEXT: retq
;
; AVX1-LABEL: insert_dup_elt1_mem_v16i8_sext_i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: movsbl (%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX1-NEXT: retq
;
; AVX2-LABEL: insert_dup_elt1_mem_v16i8_sext_i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: movsbl (%rdi), %eax
; AVX2-NEXT: shrl $8, %eax
; AVX2-NEXT: vmovd %eax, %xmm0
@@ -1654,7 +1654,7 @@ define <16 x i8> @insert_dup_elt1_mem_v16i8_sext_i8(i8* %ptr) {
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: insert_dup_elt1_mem_v16i8_sext_i8:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: movsbl (%rdi), %eax
; AVX512VL-NEXT: shrl $8, %eax
; AVX512VL-NEXT: vpbroadcastb %eax, %xmm0
@@ -1669,7 +1669,7 @@ define <16 x i8> @insert_dup_elt1_mem_v16i8_sext_i8(i8* %ptr) {
define <16 x i8> @insert_dup_elt2_mem_v16i8_sext_i8(i8* %ptr) {
; SSE2-LABEL: insert_dup_elt2_mem_v16i8_sext_i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movsbl (%rdi), %eax
; SSE2-NEXT: movd %eax, %xmm0
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -1678,28 +1678,28 @@ define <16 x i8> @insert_dup_elt2_mem_v16i8_sext_i8(i8* %ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: insert_dup_elt2_mem_v16i8_sext_i8:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movsbl (%rdi), %eax
; SSSE3-NEXT: movd %eax, %xmm0
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_dup_elt2_mem_v16i8_sext_i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movsbl (%rdi), %eax
; SSE41-NEXT: movd %eax, %xmm0
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
; SSE41-NEXT: retq
;
; AVX1-LABEL: insert_dup_elt2_mem_v16i8_sext_i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: movsbl (%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
; AVX1-NEXT: retq
;
; AVX2-LABEL: insert_dup_elt2_mem_v16i8_sext_i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: movsbl (%rdi), %eax
; AVX2-NEXT: shrl $16, %eax
; AVX2-NEXT: vmovd %eax, %xmm0
@@ -1707,7 +1707,7 @@ define <16 x i8> @insert_dup_elt2_mem_v16i8_sext_i8(i8* %ptr) {
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: insert_dup_elt2_mem_v16i8_sext_i8:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: movsbl (%rdi), %eax
; AVX512VL-NEXT: shrl $16, %eax
; AVX512VL-NEXT: vpbroadcastb %eax, %xmm0
@@ -1722,7 +1722,7 @@ define <16 x i8> @insert_dup_elt2_mem_v16i8_sext_i8(i8* %ptr) {
define <16 x i8> @PR31364(i8* nocapture readonly %a, i8* nocapture readonly %b) {
; SSE2-LABEL: PR31364:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movzbl (%rdi), %eax
; SSE2-NEXT: movzbl (%rsi), %ecx
; SSE2-NEXT: shll $8, %ecx
@@ -1741,7 +1741,7 @@ define <16 x i8> @PR31364(i8* nocapture readonly %a, i8* nocapture readonly %b)
; SSE2-NEXT: retq
;
; SSSE3-LABEL: PR31364:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movzbl (%rdi), %eax
; SSSE3-NEXT: movzbl (%rsi), %ecx
; SSSE3-NEXT: shll $8, %ecx
@@ -1752,7 +1752,7 @@ define <16 x i8> @PR31364(i8* nocapture readonly %a, i8* nocapture readonly %b)
; SSSE3-NEXT: retq
;
; SSE41-LABEL: PR31364:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: pinsrb $0, (%rdi), %xmm0
; SSE41-NEXT: pinsrb $1, (%rsi), %xmm0
@@ -1760,7 +1760,7 @@ define <16 x i8> @PR31364(i8* nocapture readonly %a, i8* nocapture readonly %b)
; SSE41-NEXT: retq
;
; AVX-LABEL: PR31364:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX-NEXT: vpinsrb $0, (%rdi), %xmm0, %xmm0
; AVX-NEXT: vpinsrb $1, (%rsi), %xmm0, %xmm0
@@ -1776,7 +1776,7 @@ define <16 x i8> @PR31364(i8* nocapture readonly %a, i8* nocapture readonly %b)
define <16 x i8> @PR31301(i8* nocapture readonly %x, i8* nocapture readonly %y) {
; SSE2-LABEL: PR31301:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movzbl (%rdi), %eax
; SSE2-NEXT: movd %eax, %xmm0
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -1791,7 +1791,7 @@ define <16 x i8> @PR31301(i8* nocapture readonly %x, i8* nocapture readonly %y)
; SSE2-NEXT: retq
;
; SSSE3-LABEL: PR31301:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movzbl (%rdi), %eax
; SSSE3-NEXT: movd %eax, %xmm0
; SSSE3-NEXT: pxor %xmm1, %xmm1
@@ -1803,7 +1803,7 @@ define <16 x i8> @PR31301(i8* nocapture readonly %x, i8* nocapture readonly %y)
; SSSE3-NEXT: retq
;
; SSE41-LABEL: PR31301:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movzbl (%rdi), %eax
; SSE41-NEXT: movd %eax, %xmm0
; SSE41-NEXT: pxor %xmm1, %xmm1
@@ -1815,7 +1815,7 @@ define <16 x i8> @PR31301(i8* nocapture readonly %x, i8* nocapture readonly %y)
; SSE41-NEXT: retq
;
; AVX1-LABEL: PR31301:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: movzbl (%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
@@ -1827,7 +1827,7 @@ define <16 x i8> @PR31301(i8* nocapture readonly %x, i8* nocapture readonly %y)
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: PR31301:
-; AVX2OR512VL: # BB#0: # %entry
+; AVX2OR512VL: # %bb.0: # %entry
; AVX2OR512VL-NEXT: vpbroadcastb (%rdi), %xmm0
; AVX2OR512VL-NEXT: vpbroadcastb (%rsi), %xmm1
; AVX2OR512VL-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
diff --git a/test/CodeGen/X86/vector-shuffle-128-v2.ll b/test/CodeGen/X86/vector-shuffle-128-v2.ll
index 991b3633cae..a31ba2afcf7 100644
--- a/test/CodeGen/X86/vector-shuffle-128-v2.ll
+++ b/test/CodeGen/X86/vector-shuffle-128-v2.ll
@@ -9,22 +9,22 @@
define <2 x i64> @shuffle_v2i64_00(<2 x i64> %a, <2 x i64> %b) {
; SSE-LABEL: shuffle_v2i64_00:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; SSE-NEXT: retq
;
; AVX1-LABEL: shuffle_v2i64_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,0,1]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v2i64_00:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v2i64_00:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpbroadcastq %xmm0, %xmm0
; AVX512VL-NEXT: retq
%shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 0, i32 0>
@@ -32,12 +32,12 @@ define <2 x i64> @shuffle_v2i64_00(<2 x i64> %a, <2 x i64> %b) {
}
define <2 x i64> @shuffle_v2i64_10(<2 x i64> %a, <2 x i64> %b) {
; SSE-LABEL: shuffle_v2i64_10:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v2i64_10:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX-NEXT: retq
%shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 0>
@@ -45,12 +45,12 @@ define <2 x i64> @shuffle_v2i64_10(<2 x i64> %a, <2 x i64> %b) {
}
define <2 x i64> @shuffle_v2i64_11(<2 x i64> %a, <2 x i64> %b) {
; SSE-LABEL: shuffle_v2i64_11:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v2i64_11:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,2,3]
; AVX-NEXT: retq
%shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 1>
@@ -58,22 +58,22 @@ define <2 x i64> @shuffle_v2i64_11(<2 x i64> %a, <2 x i64> %b) {
}
define <2 x i64> @shuffle_v2i64_22(<2 x i64> %a, <2 x i64> %b) {
; SSE-LABEL: shuffle_v2i64_22:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,1,0,1]
; SSE-NEXT: retq
;
; AVX1-LABEL: shuffle_v2i64_22:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm1[0,1,0,1]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v2i64_22:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastq %xmm1, %xmm0
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v2i64_22:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpbroadcastq %xmm1, %xmm0
; AVX512VL-NEXT: retq
%shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 2, i32 2>
@@ -81,12 +81,12 @@ define <2 x i64> @shuffle_v2i64_22(<2 x i64> %a, <2 x i64> %b) {
}
define <2 x i64> @shuffle_v2i64_32(<2 x i64> %a, <2 x i64> %b) {
; SSE-LABEL: shuffle_v2i64_32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v2i64_32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm1[2,3,0,1]
; AVX-NEXT: retq
%shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 3, i32 2>
@@ -94,12 +94,12 @@ define <2 x i64> @shuffle_v2i64_32(<2 x i64> %a, <2 x i64> %b) {
}
define <2 x i64> @shuffle_v2i64_33(<2 x i64> %a, <2 x i64> %b) {
; SSE-LABEL: shuffle_v2i64_33:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v2i64_33:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm1[2,3,2,3]
; AVX-NEXT: retq
%shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 3, i32 3>
@@ -108,27 +108,27 @@ define <2 x i64> @shuffle_v2i64_33(<2 x i64> %a, <2 x i64> %b) {
define <2 x double> @shuffle_v2f64_00(<2 x double> %a, <2 x double> %b) {
; SSE2-LABEL: shuffle_v2f64_00:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v2f64_00:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v2f64_00:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v2f64_00:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v2f64_00:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX-NEXT: retq
%shuffle = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 0, i32 0>
@@ -136,12 +136,12 @@ define <2 x double> @shuffle_v2f64_00(<2 x double> %a, <2 x double> %b) {
}
define <2 x double> @shuffle_v2f64_10(<2 x double> %a, <2 x double> %b) {
; SSE-LABEL: shuffle_v2f64_10:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v2f64_10:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX-NEXT: retq
@@ -150,12 +150,12 @@ define <2 x double> @shuffle_v2f64_10(<2 x double> %a, <2 x double> %b) {
}
define <2 x double> @shuffle_v2f64_11(<2 x double> %a, <2 x double> %b) {
; SSE-LABEL: shuffle_v2f64_11:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v2f64_11:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,1]
; AVX-NEXT: retq
%shuffle = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 1, i32 1>
@@ -163,28 +163,28 @@ define <2 x double> @shuffle_v2f64_11(<2 x double> %a, <2 x double> %b) {
}
define <2 x double> @shuffle_v2f64_22(<2 x double> %a, <2 x double> %b) {
; SSE2-LABEL: shuffle_v2f64_22:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0,0]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v2f64_22:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movddup {{.*#+}} xmm0 = xmm1[0,0]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v2f64_22:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movddup {{.*#+}} xmm0 = xmm1[0,0]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v2f64_22:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movddup {{.*#+}} xmm0 = xmm1[0,0]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v2f64_22:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm1[0,0]
; AVX-NEXT: retq
%shuffle = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 2, i32 2>
@@ -192,13 +192,13 @@ define <2 x double> @shuffle_v2f64_22(<2 x double> %a, <2 x double> %b) {
}
define <2 x double> @shuffle_v2f64_32(<2 x double> %a, <2 x double> %b) {
; SSE-LABEL: shuffle_v2f64_32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1,0]
; SSE-NEXT: movapd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v2f64_32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm1[1,0]
; AVX-NEXT: retq
@@ -207,13 +207,13 @@ define <2 x double> @shuffle_v2f64_32(<2 x double> %a, <2 x double> %b) {
}
define <2 x double> @shuffle_v2f64_33(<2 x double> %a, <2 x double> %b) {
; SSE-LABEL: shuffle_v2f64_33:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v2f64_33:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm1[1,1]
; AVX-NEXT: retq
%shuffle = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 3, i32 3>
@@ -221,40 +221,40 @@ define <2 x double> @shuffle_v2f64_33(<2 x double> %a, <2 x double> %b) {
}
define <2 x double> @shuffle_v2f64_03(<2 x double> %a, <2 x double> %b) {
; SSE2-LABEL: shuffle_v2f64_03:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE2-NEXT: movapd %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v2f64_03:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE3-NEXT: movapd %xmm1, %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v2f64_03:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSSE3-NEXT: movapd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v2f64_03:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuffle_v2f64_03:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v2f64_03:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v2f64_03:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; AVX512VL-NEXT: retq
%shuffle = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 0, i32 3>
@@ -262,37 +262,37 @@ define <2 x double> @shuffle_v2f64_03(<2 x double> %a, <2 x double> %b) {
}
define <2 x double> @shuffle_v2f64_21(<2 x double> %a, <2 x double> %b) {
; SSE2-LABEL: shuffle_v2f64_21:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v2f64_21:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v2f64_21:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v2f64_21:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuffle_v2f64_21:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v2f64_21:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v2f64_21:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; AVX512VL-NEXT: retq
%shuffle = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 2, i32 1>
@@ -302,12 +302,12 @@ define <2 x double> @shuffle_v2f64_21(<2 x double> %a, <2 x double> %b) {
define <2 x i64> @shuffle_v2i64_02(<2 x i64> %a, <2 x i64> %b) {
; SSE-LABEL: shuffle_v2i64_02:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v2i64_02:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-NEXT: retq
%shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 0, i32 2>
@@ -315,13 +315,13 @@ define <2 x i64> @shuffle_v2i64_02(<2 x i64> %a, <2 x i64> %b) {
}
define <2 x i64> @shuffle_v2i64_02_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64> %b) {
; SSE-LABEL: shuffle_v2i64_02_copy:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v2i64_02_copy:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm2[0]
; AVX-NEXT: retq
%shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 0, i32 2>
@@ -329,40 +329,40 @@ define <2 x i64> @shuffle_v2i64_02_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64
}
define <2 x i64> @shuffle_v2i64_03(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: shuffle_v2i64_03:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE2-NEXT: movapd %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v2i64_03:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE3-NEXT: movapd %xmm1, %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v2i64_03:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSSE3-NEXT: movapd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v2i64_03:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuffle_v2i64_03:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v2i64_03:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v2i64_03:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
; AVX512VL-NEXT: retq
%shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 0, i32 3>
@@ -370,41 +370,41 @@ define <2 x i64> @shuffle_v2i64_03(<2 x i64> %a, <2 x i64> %b) {
}
define <2 x i64> @shuffle_v2i64_03_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: shuffle_v2i64_03_copy:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
; SSE2-NEXT: movapd %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v2i64_03_copy:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
; SSE3-NEXT: movapd %xmm2, %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v2i64_03_copy:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
; SSSE3-NEXT: movapd %xmm2, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v2i64_03_copy:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuffle_v2i64_03_copy:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm2[4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v2i64_03_copy:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm2[2,3]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v2i64_03_copy:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm2[2,3]
; AVX512VL-NEXT: retq
%shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 0, i32 3>
@@ -412,29 +412,29 @@ define <2 x i64> @shuffle_v2i64_03_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64
}
define <2 x i64> @shuffle_v2i64_12(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: shuffle_v2i64_12:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0]
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v2i64_12:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v2i64_12:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: palignr {{.*#+}} xmm1 = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v2i64_12:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: palignr {{.*#+}} xmm1 = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v2i64_12:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
; AVX-NEXT: retq
%shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 2>
@@ -442,31 +442,31 @@ define <2 x i64> @shuffle_v2i64_12(<2 x i64> %a, <2 x i64> %b) {
}
define <2 x i64> @shuffle_v2i64_12_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: shuffle_v2i64_12_copy:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1],xmm2[0]
; SSE2-NEXT: movapd %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v2i64_12_copy:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1],xmm2[0]
; SSE3-NEXT: movapd %xmm1, %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v2i64_12_copy:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: palignr {{.*#+}} xmm2 = xmm1[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
; SSSE3-NEXT: movdqa %xmm2, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v2i64_12_copy:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: palignr {{.*#+}} xmm2 = xmm1[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v2i64_12_copy:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm1[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
; AVX-NEXT: retq
%shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 2>
@@ -474,12 +474,12 @@ define <2 x i64> @shuffle_v2i64_12_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64
}
define <2 x i64> @shuffle_v2i64_13(<2 x i64> %a, <2 x i64> %b) {
; SSE-LABEL: shuffle_v2i64_13:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v2i64_13:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
; AVX-NEXT: retq
%shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 3>
@@ -487,13 +487,13 @@ define <2 x i64> @shuffle_v2i64_13(<2 x i64> %a, <2 x i64> %b) {
}
define <2 x i64> @shuffle_v2i64_13_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64> %b) {
; SSE-LABEL: shuffle_v2i64_13_copy:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1]
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v2i64_13_copy:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm2[1]
; AVX-NEXT: retq
%shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 3>
@@ -501,13 +501,13 @@ define <2 x i64> @shuffle_v2i64_13_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64
}
define <2 x i64> @shuffle_v2i64_20(<2 x i64> %a, <2 x i64> %b) {
; SSE-LABEL: shuffle_v2i64_20:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v2i64_20:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX-NEXT: retq
%shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 2, i32 0>
@@ -515,13 +515,13 @@ define <2 x i64> @shuffle_v2i64_20(<2 x i64> %a, <2 x i64> %b) {
}
define <2 x i64> @shuffle_v2i64_20_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64> %b) {
; SSE-LABEL: shuffle_v2i64_20_copy:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm1[0]
; SSE-NEXT: movaps %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v2i64_20_copy:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm2[0],xmm1[0]
; AVX-NEXT: retq
%shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 2, i32 0>
@@ -529,37 +529,37 @@ define <2 x i64> @shuffle_v2i64_20_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64
}
define <2 x i64> @shuffle_v2i64_21(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: shuffle_v2i64_21:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v2i64_21:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v2i64_21:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v2i64_21:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuffle_v2i64_21:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v2i64_21:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v2i64_21:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
; AVX512VL-NEXT: retq
%shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 2, i32 1>
@@ -567,41 +567,41 @@ define <2 x i64> @shuffle_v2i64_21(<2 x i64> %a, <2 x i64> %b) {
}
define <2 x i64> @shuffle_v2i64_21_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: shuffle_v2i64_21_copy:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
; SSE2-NEXT: movapd %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v2i64_21_copy:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
; SSE3-NEXT: movapd %xmm1, %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v2i64_21_copy:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
; SSSE3-NEXT: movapd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v2i64_21_copy:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuffle_v2i64_21_copy:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm1[4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v2i64_21_copy:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm2[0,1],xmm1[2,3]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v2i64_21_copy:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vblendps {{.*#+}} xmm0 = xmm2[0,1],xmm1[2,3]
; AVX512VL-NEXT: retq
%shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 2, i32 1>
@@ -609,29 +609,29 @@ define <2 x i64> @shuffle_v2i64_21_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64
}
define <2 x i64> @shuffle_v2i64_30(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: shuffle_v2i64_30:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1],xmm0[0]
; SSE2-NEXT: movapd %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v2i64_30:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1],xmm0[0]
; SSE3-NEXT: movapd %xmm1, %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v2i64_30:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: palignr {{.*#+}} xmm0 = xmm1[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v2i64_30:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: palignr {{.*#+}} xmm0 = xmm1[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v2i64_30:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm1[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
; AVX-NEXT: retq
%shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 3, i32 0>
@@ -639,31 +639,31 @@ define <2 x i64> @shuffle_v2i64_30(<2 x i64> %a, <2 x i64> %b) {
}
define <2 x i64> @shuffle_v2i64_30_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: shuffle_v2i64_30_copy:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
; SSE2-NEXT: movapd %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v2i64_30_copy:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
; SSE3-NEXT: movapd %xmm2, %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v2i64_30_copy:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: palignr {{.*#+}} xmm1 = xmm2[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v2i64_30_copy:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: palignr {{.*#+}} xmm1 = xmm2[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v2i64_30_copy:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm2[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
; AVX-NEXT: retq
%shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 3, i32 0>
@@ -671,13 +671,13 @@ define <2 x i64> @shuffle_v2i64_30_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64
}
define <2 x i64> @shuffle_v2i64_31(<2 x i64> %a, <2 x i64> %b) {
; SSE-LABEL: shuffle_v2i64_31:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v2i64_31:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1]
; AVX-NEXT: retq
%shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 3, i32 1>
@@ -685,13 +685,13 @@ define <2 x i64> @shuffle_v2i64_31(<2 x i64> %a, <2 x i64> %b) {
}
define <2 x i64> @shuffle_v2i64_31_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64> %b) {
; SSE-LABEL: shuffle_v2i64_31_copy:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT: movaps %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v2i64_31_copy:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm2[1],xmm1[1]
; AVX-NEXT: retq
%shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 3, i32 1>
@@ -700,12 +700,12 @@ define <2 x i64> @shuffle_v2i64_31_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64
define <2 x i64> @shuffle_v2i64_0z(<2 x i64> %a) {
; SSE-LABEL: shuffle_v2i64_0z:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v2i64_0z:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
; AVX-NEXT: retq
%shuffle = shufflevector <2 x i64> %a, <2 x i64> zeroinitializer, <2 x i32> <i32 0, i32 3>
@@ -714,12 +714,12 @@ define <2 x i64> @shuffle_v2i64_0z(<2 x i64> %a) {
define <2 x i64> @shuffle_v2i64_1z(<2 x i64> %a) {
; SSE-LABEL: shuffle_v2i64_1z:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v2i64_1z:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero
; AVX-NEXT: retq
%shuffle = shufflevector <2 x i64> %a, <2 x i64> zeroinitializer, <2 x i32> <i32 1, i32 3>
@@ -728,12 +728,12 @@ define <2 x i64> @shuffle_v2i64_1z(<2 x i64> %a) {
define <2 x i64> @shuffle_v2i64_z0(<2 x i64> %a) {
; SSE-LABEL: shuffle_v2i64_z0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v2i64_z0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
; AVX-NEXT: retq
%shuffle = shufflevector <2 x i64> %a, <2 x i64> zeroinitializer, <2 x i32> <i32 2, i32 0>
@@ -742,43 +742,43 @@ define <2 x i64> @shuffle_v2i64_z0(<2 x i64> %a) {
define <2 x i64> @shuffle_v2i64_z1(<2 x i64> %a) {
; SSE2-LABEL: shuffle_v2i64_z1:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: xorpd %xmm1, %xmm1
; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v2i64_z1:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: xorpd %xmm1, %xmm1
; SSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v2i64_z1:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: xorpd %xmm1, %xmm1
; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v2i64_z1:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuffle_v2i64_z1:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v2i64_z1:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v2i64_z1:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
; AVX512VL-NEXT: retq
@@ -788,12 +788,12 @@ define <2 x i64> @shuffle_v2i64_z1(<2 x i64> %a) {
define <2 x double> @shuffle_v2f64_0z(<2 x double> %a) {
; SSE-LABEL: shuffle_v2f64_0z:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v2f64_0z:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
; AVX-NEXT: retq
%shuffle = shufflevector <2 x double> %a, <2 x double> zeroinitializer, <2 x i32> <i32 0, i32 3>
@@ -802,25 +802,25 @@ define <2 x double> @shuffle_v2f64_0z(<2 x double> %a) {
define <2 x double> @shuffle_v2f64_1z(<2 x double> %a) {
; SSE-LABEL: shuffle_v2f64_1z:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm1, %xmm1
; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
; SSE-NEXT: retq
;
; AVX1-LABEL: shuffle_v2f64_1z:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v2f64_1z:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v2f64_1z:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VL-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1]
; AVX512VL-NEXT: retq
@@ -830,26 +830,26 @@ define <2 x double> @shuffle_v2f64_1z(<2 x double> %a) {
define <2 x double> @shuffle_v2f64_z0(<2 x double> %a) {
; SSE-LABEL: shuffle_v2f64_z0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm1, %xmm1
; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: shuffle_v2f64_z0:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v2f64_z0:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v2f64_z0:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX512VL-NEXT: retq
@@ -859,43 +859,43 @@ define <2 x double> @shuffle_v2f64_z0(<2 x double> %a) {
define <2 x double> @shuffle_v2f64_z1(<2 x double> %a) {
; SSE2-LABEL: shuffle_v2f64_z1:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: xorpd %xmm1, %xmm1
; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v2f64_z1:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: xorpd %xmm1, %xmm1
; SSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v2f64_z1:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: xorpd %xmm1, %xmm1
; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v2f64_z1:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: xorpd %xmm1, %xmm1
; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuffle_v2f64_z1:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v2f64_z1:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v2f64_z1:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VL-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; AVX512VL-NEXT: retq
@@ -905,25 +905,25 @@ define <2 x double> @shuffle_v2f64_z1(<2 x double> %a) {
define <2 x double> @shuffle_v2f64_bitcast_1z(<2 x double> %a) {
; SSE-LABEL: shuffle_v2f64_bitcast_1z:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm1, %xmm1
; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
; SSE-NEXT: retq
;
; AVX1-LABEL: shuffle_v2f64_bitcast_1z:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v2f64_bitcast_1z:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v2f64_bitcast_1z:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VL-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1]
; AVX512VL-NEXT: retq
@@ -936,40 +936,40 @@ define <2 x double> @shuffle_v2f64_bitcast_1z(<2 x double> %a) {
define <2 x i64> @shuffle_v2i64_bitcast_z123(<2 x i64> %x) {
; SSE2-LABEL: shuffle_v2i64_bitcast_z123:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v2i64_bitcast_z123:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: andps {{.*}}(%rip), %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v2i64_bitcast_z123:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: andps {{.*}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v2i64_bitcast_z123:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuffle_v2i64_bitcast_z123:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v2i64_bitcast_z123:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v2i64_bitcast_z123:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; AVX512VL-NEXT: retq
@@ -982,12 +982,12 @@ define <2 x i64> @shuffle_v2i64_bitcast_z123(<2 x i64> %x) {
define <2 x i64> @insert_reg_and_zero_v2i64(i64 %a) {
; SSE-LABEL: insert_reg_and_zero_v2i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movq %rdi, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: insert_reg_and_zero_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovq %rdi, %xmm0
; AVX-NEXT: retq
%v = insertelement <2 x i64> undef, i64 %a, i32 0
@@ -997,12 +997,12 @@ define <2 x i64> @insert_reg_and_zero_v2i64(i64 %a) {
define <2 x i64> @insert_mem_and_zero_v2i64(i64* %ptr) {
; SSE-LABEL: insert_mem_and_zero_v2i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: retq
;
; AVX-LABEL: insert_mem_and_zero_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: retq
%a = load i64, i64* %ptr
@@ -1013,12 +1013,12 @@ define <2 x i64> @insert_mem_and_zero_v2i64(i64* %ptr) {
define <2 x double> @insert_reg_and_zero_v2f64(double %a) {
; SSE-LABEL: insert_reg_and_zero_v2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; SSE-NEXT: retq
;
; AVX-LABEL: insert_reg_and_zero_v2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
; AVX-NEXT: retq
%v = insertelement <2 x double> undef, double %a, i32 0
@@ -1028,12 +1028,12 @@ define <2 x double> @insert_reg_and_zero_v2f64(double %a) {
define <2 x double> @insert_mem_and_zero_v2f64(double* %ptr) {
; SSE-LABEL: insert_mem_and_zero_v2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: retq
;
; AVX-LABEL: insert_mem_and_zero_v2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: retq
%a = load double, double* %ptr
@@ -1044,43 +1044,43 @@ define <2 x double> @insert_mem_and_zero_v2f64(double* %ptr) {
define <2 x i64> @insert_reg_lo_v2i64(i64 %a, <2 x i64> %b) {
; SSE2-LABEL: insert_reg_lo_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movq %rdi, %xmm1
; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq
;
; SSE3-LABEL: insert_reg_lo_v2i64:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movq %rdi, %xmm1
; SSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: insert_reg_lo_v2i64:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movq %rdi, %xmm1
; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_reg_lo_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movq %rdi, %xmm1
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: insert_reg_lo_v2i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovq %rdi, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: insert_reg_lo_v2i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovq %rdi, %xmm1
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: insert_reg_lo_v2i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovq %rdi, %xmm1
; AVX512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
; AVX512VL-NEXT: retq
@@ -1091,40 +1091,40 @@ define <2 x i64> @insert_reg_lo_v2i64(i64 %a, <2 x i64> %b) {
define <2 x i64> @insert_mem_lo_v2i64(i64* %ptr, <2 x i64> %b) {
; SSE2-LABEL: insert_mem_lo_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movlpd {{.*#+}} xmm0 = mem[0],xmm0[1]
; SSE2-NEXT: retq
;
; SSE3-LABEL: insert_mem_lo_v2i64:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movlpd {{.*#+}} xmm0 = mem[0],xmm0[1]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: insert_mem_lo_v2i64:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movlpd {{.*#+}} xmm0 = mem[0],xmm0[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_mem_lo_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: insert_mem_lo_v2i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: insert_mem_lo_v2i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: insert_mem_lo_v2i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX512VL-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
; AVX512VL-NEXT: retq
@@ -1136,13 +1136,13 @@ define <2 x i64> @insert_mem_lo_v2i64(i64* %ptr, <2 x i64> %b) {
define <2 x i64> @insert_reg_hi_v2i64(i64 %a, <2 x i64> %b) {
; SSE-LABEL: insert_reg_hi_v2i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movq %rdi, %xmm1
; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT: retq
;
; AVX-LABEL: insert_reg_hi_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovq %rdi, %xmm1
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-NEXT: retq
@@ -1153,13 +1153,13 @@ define <2 x i64> @insert_reg_hi_v2i64(i64 %a, <2 x i64> %b) {
define <2 x i64> @insert_mem_hi_v2i64(i64* %ptr, <2 x i64> %b) {
; SSE-LABEL: insert_mem_hi_v2i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT: retq
;
; AVX-LABEL: insert_mem_hi_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-NEXT: retq
@@ -1171,40 +1171,40 @@ define <2 x i64> @insert_mem_hi_v2i64(i64* %ptr, <2 x i64> %b) {
define <2 x double> @insert_reg_lo_v2f64(double %a, <2 x double> %b) {
; SSE2-LABEL: insert_reg_lo_v2f64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE2-NEXT: movapd %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE3-LABEL: insert_reg_lo_v2f64:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE3-NEXT: movapd %xmm1, %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: insert_reg_lo_v2f64:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSSE3-NEXT: movapd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_reg_lo_v2f64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; SSE41-NEXT: retq
;
; AVX1-LABEL: insert_reg_lo_v2f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; AVX1-NEXT: retq
;
; AVX2-LABEL: insert_reg_lo_v2f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: insert_reg_lo_v2f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; AVX512VL-NEXT: retq
%v = insertelement <2 x double> undef, double %a, i32 0
@@ -1214,12 +1214,12 @@ define <2 x double> @insert_reg_lo_v2f64(double %a, <2 x double> %b) {
define <2 x double> @insert_mem_lo_v2f64(double* %ptr, <2 x double> %b) {
; SSE-LABEL: insert_mem_lo_v2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movlpd {{.*#+}} xmm0 = mem[0],xmm0[1]
; SSE-NEXT: retq
;
; AVX-LABEL: insert_mem_lo_v2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovlpd {{.*#+}} xmm0 = mem[0],xmm0[1]
; AVX-NEXT: retq
%a = load double, double* %ptr
@@ -1230,13 +1230,13 @@ define <2 x double> @insert_mem_lo_v2f64(double* %ptr, <2 x double> %b) {
define <2 x double> @insert_reg_hi_v2f64(double %a, <2 x double> %b) {
; SSE-LABEL: insert_reg_hi_v2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: insert_reg_hi_v2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX-NEXT: retq
%v = insertelement <2 x double> undef, double %a, i32 0
@@ -1246,12 +1246,12 @@ define <2 x double> @insert_reg_hi_v2f64(double %a, <2 x double> %b) {
define <2 x double> @insert_mem_hi_v2f64(double* %ptr, <2 x double> %b) {
; SSE-LABEL: insert_mem_hi_v2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; SSE-NEXT: retq
;
; AVX-LABEL: insert_mem_hi_v2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; AVX-NEXT: retq
%a = load double, double* %ptr
@@ -1262,27 +1262,27 @@ define <2 x double> @insert_mem_hi_v2f64(double* %ptr, <2 x double> %b) {
define <2 x double> @insert_dup_reg_v2f64(double %a) {
; SSE2-LABEL: insert_dup_reg_v2f64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
; SSE2-NEXT: retq
;
; SSE3-LABEL: insert_dup_reg_v2f64:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: insert_dup_reg_v2f64:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_dup_reg_v2f64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
; SSE41-NEXT: retq
;
; AVX-LABEL: insert_dup_reg_v2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX-NEXT: retq
%v = insertelement <2 x double> undef, double %a, i32 0
@@ -1292,28 +1292,28 @@ define <2 x double> @insert_dup_reg_v2f64(double %a) {
define <2 x double> @insert_dup_mem_v2f64(double* %ptr) {
; SSE2-LABEL: insert_dup_mem_v2f64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
; SSE2-NEXT: retq
;
; SSE3-LABEL: insert_dup_mem_v2f64:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movddup {{.*#+}} xmm0 = mem[0,0]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: insert_dup_mem_v2f64:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movddup {{.*#+}} xmm0 = mem[0,0]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_dup_mem_v2f64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movddup {{.*#+}} xmm0 = mem[0,0]
; SSE41-NEXT: retq
;
; AVX-LABEL: insert_dup_mem_v2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX-NEXT: retq
%a = load double, double* %ptr
@@ -1324,28 +1324,28 @@ define <2 x double> @insert_dup_mem_v2f64(double* %ptr) {
define <2 x double> @insert_dup_mem128_v2f64(<2 x double>* %ptr) nounwind {
; SSE2-LABEL: insert_dup_mem128_v2f64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps (%rdi), %xmm0
; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
; SSE2-NEXT: retq
;
; SSE3-LABEL: insert_dup_mem128_v2f64:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movddup {{.*#+}} xmm0 = mem[0,0]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: insert_dup_mem128_v2f64:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movddup {{.*#+}} xmm0 = mem[0,0]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_dup_mem128_v2f64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movddup {{.*#+}} xmm0 = mem[0,0]
; SSE41-NEXT: retq
;
; AVX-LABEL: insert_dup_mem128_v2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX-NEXT: retq
%v = load <2 x double>, <2 x double>* %ptr
@@ -1356,24 +1356,24 @@ define <2 x double> @insert_dup_mem128_v2f64(<2 x double>* %ptr) nounwind {
define <2 x i64> @insert_dup_mem_v2i64(i64* %ptr) {
; SSE-LABEL: insert_dup_mem_v2i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; SSE-NEXT: retq
;
; AVX1-LABEL: insert_dup_mem_v2i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,0,1]
; AVX1-NEXT: retq
;
; AVX2-LABEL: insert_dup_mem_v2i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastq (%rdi), %xmm0
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: insert_dup_mem_v2i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpbroadcastq (%rdi), %xmm0
; AVX512VL-NEXT: retq
%tmp = load i64, i64* %ptr, align 1
@@ -1384,13 +1384,13 @@ define <2 x i64> @insert_dup_mem_v2i64(i64* %ptr) {
define <2 x double> @shuffle_mem_v2f64_10(<2 x double>* %ptr) {
; SSE-LABEL: shuffle_mem_v2f64_10:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movapd (%rdi), %xmm0
; SSE-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_mem_v2f64_10:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = mem[1,0]
; AVX-NEXT: retq
diff --git a/test/CodeGen/X86/vector-shuffle-128-v4.ll b/test/CodeGen/X86/vector-shuffle-128-v4.ll
index c30c601c777..ba478e9e28d 100644
--- a/test/CodeGen/X86/vector-shuffle-128-v4.ll
+++ b/test/CodeGen/X86/vector-shuffle-128-v4.ll
@@ -9,12 +9,12 @@
define <4 x i32> @shuffle_v4i32_0001(<4 x i32> %a, <4 x i32> %b) {
; SSE-LABEL: shuffle_v4i32_0001:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,1]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v4i32_0001:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,1]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 0, i32 0, i32 1>
@@ -22,12 +22,12 @@ define <4 x i32> @shuffle_v4i32_0001(<4 x i32> %a, <4 x i32> %b) {
}
define <4 x i32> @shuffle_v4i32_0020(<4 x i32> %a, <4 x i32> %b) {
; SSE-LABEL: shuffle_v4i32_0020:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,0]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v4i32_0020:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,2,0]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 0, i32 2, i32 0>
@@ -35,12 +35,12 @@ define <4 x i32> @shuffle_v4i32_0020(<4 x i32> %a, <4 x i32> %b) {
}
define <4 x i32> @shuffle_v4i32_0112(<4 x i32> %a, <4 x i32> %b) {
; SSE-LABEL: shuffle_v4i32_0112:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,2]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v4i32_0112:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,2]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 1, i32 1, i32 2>
@@ -48,12 +48,12 @@ define <4 x i32> @shuffle_v4i32_0112(<4 x i32> %a, <4 x i32> %b) {
}
define <4 x i32> @shuffle_v4i32_0300(<4 x i32> %a, <4 x i32> %b) {
; SSE-LABEL: shuffle_v4i32_0300:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,0,0]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v4i32_0300:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,3,0,0]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 3, i32 0, i32 0>
@@ -61,12 +61,12 @@ define <4 x i32> @shuffle_v4i32_0300(<4 x i32> %a, <4 x i32> %b) {
}
define <4 x i32> @shuffle_v4i32_1000(<4 x i32> %a, <4 x i32> %b) {
; SSE-LABEL: shuffle_v4i32_1000:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,0,0,0]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v4i32_1000:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,0,0,0]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 1, i32 0, i32 0, i32 0>
@@ -74,12 +74,12 @@ define <4 x i32> @shuffle_v4i32_1000(<4 x i32> %a, <4 x i32> %b) {
}
define <4 x i32> @shuffle_v4i32_2200(<4 x i32> %a, <4 x i32> %b) {
; SSE-LABEL: shuffle_v4i32_2200:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,0,0]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v4i32_2200:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,2,0,0]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 2, i32 2, i32 0, i32 0>
@@ -87,12 +87,12 @@ define <4 x i32> @shuffle_v4i32_2200(<4 x i32> %a, <4 x i32> %b) {
}
define <4 x i32> @shuffle_v4i32_3330(<4 x i32> %a, <4 x i32> %b) {
; SSE-LABEL: shuffle_v4i32_3330:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,0]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v4i32_3330:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,0]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 3, i32 3, i32 3, i32 0>
@@ -100,12 +100,12 @@ define <4 x i32> @shuffle_v4i32_3330(<4 x i32> %a, <4 x i32> %b) {
}
define <4 x i32> @shuffle_v4i32_3210(<4 x i32> %a, <4 x i32> %b) {
; SSE-LABEL: shuffle_v4i32_3210:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,2,1,0]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v4i32_3210:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
@@ -114,12 +114,12 @@ define <4 x i32> @shuffle_v4i32_3210(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @shuffle_v4i32_2121(<4 x i32> %a, <4 x i32> %b) {
; SSE-LABEL: shuffle_v4i32_2121:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,1]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v4i32_2121:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,1,2,1]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 2, i32 1, i32 2, i32 1>
@@ -128,12 +128,12 @@ define <4 x i32> @shuffle_v4i32_2121(<4 x i32> %a, <4 x i32> %b) {
define <4 x float> @shuffle_v4f32_0001(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: shuffle_v4f32_0001:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0,0,1]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v4f32_0001:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,1]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 0, i32 0, i32 1>
@@ -141,12 +141,12 @@ define <4 x float> @shuffle_v4f32_0001(<4 x float> %a, <4 x float> %b) {
}
define <4 x float> @shuffle_v4f32_0020(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: shuffle_v4f32_0020:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0,2,0]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v4f32_0020:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,2,0]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 0, i32 2, i32 0>
@@ -154,12 +154,12 @@ define <4 x float> @shuffle_v4f32_0020(<4 x float> %a, <4 x float> %b) {
}
define <4 x float> @shuffle_v4f32_0300(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: shuffle_v4f32_0300:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3,0,0]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v4f32_0300:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,3,0,0]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 3, i32 0, i32 0>
@@ -167,12 +167,12 @@ define <4 x float> @shuffle_v4f32_0300(<4 x float> %a, <4 x float> %b) {
}
define <4 x float> @shuffle_v4f32_1000(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: shuffle_v4f32_1000:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0,0,0]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v4f32_1000:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,0,0,0]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 1, i32 0, i32 0, i32 0>
@@ -180,12 +180,12 @@ define <4 x float> @shuffle_v4f32_1000(<4 x float> %a, <4 x float> %b) {
}
define <4 x float> @shuffle_v4f32_2200(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: shuffle_v4f32_2200:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,2,0,0]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v4f32_2200:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,2,0,0]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 2, i32 2, i32 0, i32 0>
@@ -193,12 +193,12 @@ define <4 x float> @shuffle_v4f32_2200(<4 x float> %a, <4 x float> %b) {
}
define <4 x float> @shuffle_v4f32_3330(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: shuffle_v4f32_3330:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,0]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v4f32_3330:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,0]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 3, i32 3, i32 3, i32 0>
@@ -206,12 +206,12 @@ define <4 x float> @shuffle_v4f32_3330(<4 x float> %a, <4 x float> %b) {
}
define <4 x float> @shuffle_v4f32_3210(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: shuffle_v4f32_3210:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,2,1,0]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v4f32_3210:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
@@ -219,12 +219,12 @@ define <4 x float> @shuffle_v4f32_3210(<4 x float> %a, <4 x float> %b) {
}
define <4 x float> @shuffle_v4f32_0011(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: shuffle_v4f32_0011:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0,0,1,1]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v4f32_0011:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,1,1]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 0, i32 1, i32 1>
@@ -232,12 +232,12 @@ define <4 x float> @shuffle_v4f32_0011(<4 x float> %a, <4 x float> %b) {
}
define <4 x float> @shuffle_v4f32_2233(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: shuffle_v4f32_2233:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2,2,3,3]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v4f32_2233:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 2, i32 2, i32 3, i32 3>
@@ -245,27 +245,27 @@ define <4 x float> @shuffle_v4f32_2233(<4 x float> %a, <4 x float> %b) {
}
define <4 x float> @shuffle_v4f32_0022(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: shuffle_v4f32_0022:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0,2,2]
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v4f32_0022:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movsldup {{.*#+}} xmm0 = xmm0[0,0,2,2]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v4f32_0022:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movsldup {{.*#+}} xmm0 = xmm0[0,0,2,2]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v4f32_0022:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movsldup {{.*#+}} xmm0 = xmm0[0,0,2,2]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v4f32_0022:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
@@ -273,27 +273,27 @@ define <4 x float> @shuffle_v4f32_0022(<4 x float> %a, <4 x float> %b) {
}
define <4 x float> @shuffle_v4f32_1133(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: shuffle_v4f32_1133:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,3,3]
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v4f32_1133:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v4f32_1133:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v4f32_1133:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v4f32_1133:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
@@ -302,12 +302,12 @@ define <4 x float> @shuffle_v4f32_1133(<4 x float> %a, <4 x float> %b) {
define <4 x float> @shuffle_v4f32_0145(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: shuffle_v4f32_0145:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v4f32_0145:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
@@ -316,12 +316,12 @@ define <4 x float> @shuffle_v4f32_0145(<4 x float> %a, <4 x float> %b) {
define <4 x float> @shuffle_v4f32_6723(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: shuffle_v4f32_6723:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm1[1],xmm0[1]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v4f32_6723:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 6, i32 7, i32 2, i32 3>
@@ -330,37 +330,37 @@ define <4 x float> @shuffle_v4f32_6723(<4 x float> %a, <4 x float> %b) {
define <4 x i32> @shuffle_v4i32_0124(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: shuffle_v4i32_0124:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,0]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v4i32_0124:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,0]
; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v4i32_0124:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v4i32_0124:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuffle_v4i32_0124:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v4i32_0124:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vbroadcastss %xmm1, %xmm1
; AVX2OR512VL-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
; AVX2OR512VL-NEXT: retq
@@ -369,39 +369,39 @@ define <4 x i32> @shuffle_v4i32_0124(<4 x i32> %a, <4 x i32> %b) {
}
define <4 x i32> @shuffle_v4i32_0142(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: shuffle_v4i32_0142:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,0]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v4i32_0142:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,0]
; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v4i32_0142:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v4i32_0142:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,2]
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuffle_v4i32_0142:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,2,2]
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v4i32_0142:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpbroadcastq %xmm1, %xmm1
; AVX2OR512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,2,2]
; AVX2OR512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
@@ -411,42 +411,42 @@ define <4 x i32> @shuffle_v4i32_0142(<4 x i32> %a, <4 x i32> %b) {
}
define <4 x i32> @shuffle_v4i32_0412(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: shuffle_v4i32_0412:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[0,0]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[1,2]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v4i32_0412:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[0,0]
; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[1,2]
; SSE3-NEXT: movaps %xmm1, %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v4i32_0412:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[0,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[1,2]
; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v4i32_0412:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,2]
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuffle_v4i32_0412:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,1,2]
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v4i32_0412:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vbroadcastss %xmm1, %xmm1
; AVX2OR512VL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,2]
; AVX2OR512VL-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
@@ -456,40 +456,40 @@ define <4 x i32> @shuffle_v4i32_0412(<4 x i32> %a, <4 x i32> %b) {
}
define <4 x i32> @shuffle_v4i32_4012(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: shuffle_v4i32_4012:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[0,0]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[1,2]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v4i32_4012:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[0,0]
; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[1,2]
; SSE3-NEXT: movaps %xmm1, %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v4i32_4012:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[0,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[1,2]
; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v4i32_4012:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,2]
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuffle_v4i32_4012:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,2]
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v4i32_4012:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,1,2]
; AVX2OR512VL-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; AVX2OR512VL-NEXT: retq
@@ -498,12 +498,12 @@ define <4 x i32> @shuffle_v4i32_4012(<4 x i32> %a, <4 x i32> %b) {
}
define <4 x i32> @shuffle_v4i32_0145(<4 x i32> %a, <4 x i32> %b) {
; SSE-LABEL: shuffle_v4i32_0145:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v4i32_0145:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
@@ -511,39 +511,39 @@ define <4 x i32> @shuffle_v4i32_0145(<4 x i32> %a, <4 x i32> %b) {
}
define <4 x i32> @shuffle_v4i32_0451(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: shuffle_v4i32_0451:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,3,2]
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v4i32_0451:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,3,2]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v4i32_0451:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,3,2]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v4i32_0451:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5],xmm0[6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuffle_v4i32_0451:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5],xmm0[6,7]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v4i32_0451:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
; AVX2OR512VL-NEXT: vpbroadcastq %xmm0, %xmm0
; AVX2OR512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3]
@@ -553,13 +553,13 @@ define <4 x i32> @shuffle_v4i32_0451(<4 x i32> %a, <4 x i32> %b) {
}
define <4 x i32> @shuffle_v4i32_4501(<4 x i32> %a, <4 x i32> %b) {
; SSE-LABEL: shuffle_v4i32_4501:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v4i32_4501:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 4, i32 5, i32 0, i32 1>
@@ -567,39 +567,39 @@ define <4 x i32> @shuffle_v4i32_4501(<4 x i32> %a, <4 x i32> %b) {
}
define <4 x i32> @shuffle_v4i32_4015(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: shuffle_v4i32_4015:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,0,2,3]
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v4i32_4015:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,0,2,3]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v4i32_4015:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,0,2,3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v4i32_4015:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5],xmm1[6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuffle_v4i32_4015:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5],xmm1[6,7]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v4i32_4015:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpbroadcastq %xmm1, %xmm1
; AVX2OR512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
; AVX2OR512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3]
@@ -610,40 +610,40 @@ define <4 x i32> @shuffle_v4i32_4015(<4 x i32> %a, <4 x i32> %b) {
define <4 x float> @shuffle_v4f32_4zzz(<4 x float> %a) {
; SSE2-LABEL: shuffle_v4f32_4zzz:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: xorps %xmm1, %xmm1
; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v4f32_4zzz:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: xorps %xmm1, %xmm1
; SSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE3-NEXT: movaps %xmm1, %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v4f32_4zzz:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: xorps %xmm1, %xmm1
; SSSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v4f32_4zzz:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: xorps %xmm1, %xmm1
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; SSE41-NEXT: retq
;
; AVX1OR2-LABEL: shuffle_v4f32_4zzz:
-; AVX1OR2: # BB#0:
+; AVX1OR2: # %bb.0:
; AVX1OR2-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX1OR2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX1OR2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4f32_4zzz:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX512VL-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX512VL-NEXT: retq
@@ -653,33 +653,33 @@ define <4 x float> @shuffle_v4f32_4zzz(<4 x float> %a) {
define <4 x float> @shuffle_v4f32_z4zz(<4 x float> %a) {
; SSE2-LABEL: shuffle_v4f32_z4zz:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: xorps %xmm1, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v4f32_z4zz:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: xorps %xmm1, %xmm1
; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0]
; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v4f32_z4zz:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: xorps %xmm1, %xmm1
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v4f32_z4zz:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: insertps {{.*#+}} xmm0 = zero,xmm0[0],zero,zero
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v4f32_z4zz:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = zero,xmm0[0],zero,zero
; AVX-NEXT: retq
%shuffle = shufflevector <4 x float> zeroinitializer, <4 x float> %a, <4 x i32> <i32 2, i32 4, i32 3, i32 0>
@@ -688,7 +688,7 @@ define <4 x float> @shuffle_v4f32_z4zz(<4 x float> %a) {
define <4 x float> @shuffle_v4f32_zz4z(<4 x float> %a) {
; SSE2-LABEL: shuffle_v4f32_zz4z:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: xorps %xmm1, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[3,0]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[0,2]
@@ -696,7 +696,7 @@ define <4 x float> @shuffle_v4f32_zz4z(<4 x float> %a) {
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v4f32_zz4z:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: xorps %xmm1, %xmm1
; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[3,0]
; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[0,2]
@@ -704,7 +704,7 @@ define <4 x float> @shuffle_v4f32_zz4z(<4 x float> %a) {
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v4f32_zz4z:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: xorps %xmm1, %xmm1
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[3,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[0,2]
@@ -712,12 +712,12 @@ define <4 x float> @shuffle_v4f32_zz4z(<4 x float> %a) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v4f32_zz4z:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: insertps {{.*#+}} xmm0 = zero,zero,xmm0[0],zero
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v4f32_zz4z:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = zero,zero,xmm0[0],zero
; AVX-NEXT: retq
%shuffle = shufflevector <4 x float> zeroinitializer, <4 x float> %a, <4 x i32> <i32 0, i32 0, i32 4, i32 0>
@@ -726,33 +726,33 @@ define <4 x float> @shuffle_v4f32_zz4z(<4 x float> %a) {
define <4 x float> @shuffle_v4f32_zuu4(<4 x float> %a) {
; SSE2-LABEL: shuffle_v4f32_zuu4:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: xorps %xmm1, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v4f32_zuu4:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: xorps %xmm1, %xmm1
; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
; SSE3-NEXT: movaps %xmm1, %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v4f32_zuu4:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: xorps %xmm1, %xmm1
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v4f32_zuu4:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: insertps {{.*#+}} xmm0 = zero,zero,zero,xmm0[0]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v4f32_zuu4:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = zero,zero,zero,xmm0[0]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x float> zeroinitializer, <4 x float> %a, <4 x i32> <i32 0, i32 undef, i32 undef, i32 4>
@@ -761,7 +761,7 @@ define <4 x float> @shuffle_v4f32_zuu4(<4 x float> %a) {
define <4 x float> @shuffle_v4f32_zzz7(<4 x float> %a) {
; SSE2-LABEL: shuffle_v4f32_zzz7:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: xorps %xmm1, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,0],xmm1[2,0]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
@@ -769,7 +769,7 @@ define <4 x float> @shuffle_v4f32_zzz7(<4 x float> %a) {
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v4f32_zzz7:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: xorps %xmm1, %xmm1
; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,0],xmm1[2,0]
; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
@@ -777,7 +777,7 @@ define <4 x float> @shuffle_v4f32_zzz7(<4 x float> %a) {
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v4f32_zzz7:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: xorps %xmm1, %xmm1
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,0],xmm1[2,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
@@ -785,13 +785,13 @@ define <4 x float> @shuffle_v4f32_zzz7(<4 x float> %a) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v4f32_zzz7:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: xorps %xmm1, %xmm1
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v4f32_zzz7:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
; AVX-NEXT: retq
@@ -801,33 +801,33 @@ define <4 x float> @shuffle_v4f32_zzz7(<4 x float> %a) {
define <4 x float> @shuffle_v4f32_z6zz(<4 x float> %a) {
; SSE2-LABEL: shuffle_v4f32_z6zz:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: xorps %xmm1, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[0,0]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v4f32_z6zz:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: xorps %xmm1, %xmm1
; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[0,0]
; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v4f32_z6zz:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: xorps %xmm1, %xmm1
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[0,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v4f32_z6zz:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: insertps {{.*#+}} xmm0 = zero,xmm0[2],zero,zero
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v4f32_z6zz:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = zero,xmm0[2],zero,zero
; AVX-NEXT: retq
%shuffle = shufflevector <4 x float> zeroinitializer, <4 x float> %a, <4 x i32> <i32 0, i32 6, i32 2, i32 3>
@@ -836,7 +836,7 @@ define <4 x float> @shuffle_v4f32_z6zz(<4 x float> %a) {
define <4 x float> @shuffle_v4f32_0z23(<4 x float> %a) {
; SSE2-LABEL: shuffle_v4f32_0z23:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: xorps %xmm1, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm0[0,0]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[2,3]
@@ -844,7 +844,7 @@ define <4 x float> @shuffle_v4f32_0z23(<4 x float> %a) {
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v4f32_0z23:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: xorps %xmm1, %xmm1
; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm0[0,0]
; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[2,3]
@@ -852,7 +852,7 @@ define <4 x float> @shuffle_v4f32_0z23(<4 x float> %a) {
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v4f32_0z23:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: xorps %xmm1, %xmm1
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm0[0,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[2,3]
@@ -860,13 +860,13 @@ define <4 x float> @shuffle_v4f32_0z23(<4 x float> %a) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v4f32_0z23:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: xorps %xmm1, %xmm1
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v4f32_0z23:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
; AVX-NEXT: retq
@@ -876,34 +876,34 @@ define <4 x float> @shuffle_v4f32_0z23(<4 x float> %a) {
define <4 x float> @shuffle_v4f32_01z3(<4 x float> %a) {
; SSE2-LABEL: shuffle_v4f32_01z3:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: xorps %xmm1, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[3,0]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v4f32_01z3:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: xorps %xmm1, %xmm1
; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[3,0]
; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v4f32_01z3:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: xorps %xmm1, %xmm1
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[3,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v4f32_01z3:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: xorps %xmm1, %xmm1
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v4f32_01z3:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
; AVX-NEXT: retq
@@ -913,34 +913,34 @@ define <4 x float> @shuffle_v4f32_01z3(<4 x float> %a) {
define <4 x float> @shuffle_v4f32_012z(<4 x float> %a) {
; SSE2-LABEL: shuffle_v4f32_012z:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: xorps %xmm1, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,0],xmm0[2,0]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v4f32_012z:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: xorps %xmm1, %xmm1
; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,0],xmm0[2,0]
; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v4f32_012z:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: xorps %xmm1, %xmm1
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,0],xmm0[2,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v4f32_012z:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: xorps %xmm1, %xmm1
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v4f32_012z:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
; AVX-NEXT: retq
@@ -950,34 +950,34 @@ define <4 x float> @shuffle_v4f32_012z(<4 x float> %a) {
define <4 x float> @shuffle_v4f32_0zz3(<4 x float> %a) {
; SSE2-LABEL: shuffle_v4f32_0zz3:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: xorps %xmm1, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm1[1,2]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,3,1]
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v4f32_0zz3:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: xorps %xmm1, %xmm1
; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm1[1,2]
; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,3,1]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v4f32_0zz3:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: xorps %xmm1, %xmm1
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm1[1,2]
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,3,1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v4f32_0zz3:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: xorps %xmm1, %xmm1
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v4f32_0zz3:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3]
; AVX-NEXT: retq
@@ -987,34 +987,34 @@ define <4 x float> @shuffle_v4f32_0zz3(<4 x float> %a) {
define <4 x float> @shuffle_v4f32_0z2z(<4 x float> %v) {
; SSE2-LABEL: shuffle_v4f32_0z2z:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: xorps %xmm1, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,0]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v4f32_0z2z:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: xorps %xmm1, %xmm1
; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,0]
; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v4f32_0z2z:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: xorps %xmm1, %xmm1
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v4f32_0z2z:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: xorps %xmm1, %xmm1
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v4f32_0z2z:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; AVX-NEXT: retq
@@ -1024,13 +1024,13 @@ define <4 x float> @shuffle_v4f32_0z2z(<4 x float> %v) {
define <4 x float> @shuffle_v4f32_u051(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: shuffle_v4f32_u051:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v4f32_u051:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vunpcklps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 undef, i32 0, i32 5, i32 1>
@@ -1039,7 +1039,7 @@ define <4 x float> @shuffle_v4f32_u051(<4 x float> %a, <4 x float> %b) {
define <4 x float> @shuffle_v4f32_0zz4(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: shuffle_v4f32_0zz4:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: xorps %xmm2, %xmm2
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm2[2,0]
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,0]
@@ -1048,7 +1048,7 @@ define <4 x float> @shuffle_v4f32_0zz4(<4 x float> %a, <4 x float> %b) {
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v4f32_0zz4:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: xorps %xmm2, %xmm2
; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm2[2,0]
; SSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,0]
@@ -1057,7 +1057,7 @@ define <4 x float> @shuffle_v4f32_0zz4(<4 x float> %a, <4 x float> %b) {
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v4f32_0zz4:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: xorps %xmm2, %xmm2
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm2[2,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,0]
@@ -1066,12 +1066,12 @@ define <4 x float> @shuffle_v4f32_0zz4(<4 x float> %a, <4 x float> %b) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v4f32_0zz4:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,zero,xmm1[0]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v4f32_0zz4:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],zero,zero,xmm1[0]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x float> %b, <4 x float> zeroinitializer, <4 x i32> <i32 undef, i32 5, i32 6, i32 0>
@@ -1081,7 +1081,7 @@ define <4 x float> @shuffle_v4f32_0zz4(<4 x float> %a, <4 x float> %b) {
define <4 x float> @shuffle_v4f32_0zz6(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: shuffle_v4f32_0zz6:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,2]
; SSE2-NEXT: xorps %xmm1, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,2],xmm0[0,3]
@@ -1090,7 +1090,7 @@ define <4 x float> @shuffle_v4f32_0zz6(<4 x float> %a, <4 x float> %b) {
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v4f32_0zz6:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,2]
; SSE3-NEXT: xorps %xmm1, %xmm1
; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,2],xmm0[0,3]
@@ -1099,7 +1099,7 @@ define <4 x float> @shuffle_v4f32_0zz6(<4 x float> %a, <4 x float> %b) {
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v4f32_0zz6:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,2]
; SSSE3-NEXT: xorps %xmm1, %xmm1
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,2],xmm0[0,3]
@@ -1108,12 +1108,12 @@ define <4 x float> @shuffle_v4f32_0zz6(<4 x float> %a, <4 x float> %b) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v4f32_0zz6:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,zero,xmm1[2]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v4f32_0zz6:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],zero,zero,xmm1[2]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 undef, i32 undef, i32 6>
@@ -1123,7 +1123,7 @@ define <4 x float> @shuffle_v4f32_0zz6(<4 x float> %a, <4 x float> %b) {
define <4 x float> @shuffle_v4f32_0z24(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: shuffle_v4f32_0z24:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,0]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
; SSE2-NEXT: xorps %xmm1, %xmm1
@@ -1133,7 +1133,7 @@ define <4 x float> @shuffle_v4f32_0z24(<4 x float> %a, <4 x float> %b) {
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v4f32_0z24:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,0]
; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
; SSE3-NEXT: xorps %xmm1, %xmm1
@@ -1143,7 +1143,7 @@ define <4 x float> @shuffle_v4f32_0z24(<4 x float> %a, <4 x float> %b) {
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v4f32_0z24:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
; SSSE3-NEXT: xorps %xmm1, %xmm1
@@ -1153,12 +1153,12 @@ define <4 x float> @shuffle_v4f32_0z24(<4 x float> %a, <4 x float> %b) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v4f32_0z24:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[2],xmm1[0]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v4f32_0z24:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[2],xmm1[0]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 undef, i32 2, i32 4>
@@ -1168,40 +1168,40 @@ define <4 x float> @shuffle_v4f32_0z24(<4 x float> %a, <4 x float> %b) {
define <4 x i32> @shuffle_v4i32_4zzz(<4 x i32> %a) {
; SSE2-LABEL: shuffle_v4i32_4zzz:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: xorps %xmm1, %xmm1
; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v4i32_4zzz:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: xorps %xmm1, %xmm1
; SSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE3-NEXT: movaps %xmm1, %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v4i32_4zzz:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: xorps %xmm1, %xmm1
; SSSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v4i32_4zzz:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1OR2-LABEL: shuffle_v4i32_4zzz:
-; AVX1OR2: # BB#0:
+; AVX1OR2: # %bb.0:
; AVX1OR2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1OR2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
; AVX1OR2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4i32_4zzz:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX512VL-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX512VL-NEXT: retq
@@ -1211,42 +1211,42 @@ define <4 x i32> @shuffle_v4i32_4zzz(<4 x i32> %a) {
define <4 x i32> @shuffle_v4i32_z4zz(<4 x i32> %a) {
; SSE2-LABEL: shuffle_v4i32_z4zz:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: xorps %xmm1, %xmm1
; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,0,1,1]
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v4i32_z4zz:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: xorps %xmm1, %xmm1
; SSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,0,1,1]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v4i32_z4zz:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: xorps %xmm1, %xmm1
; SSSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,0,1,1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v4i32_z4zz:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7]
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,0,1,1]
; SSE41-NEXT: retq
;
; AVX1OR2-LABEL: shuffle_v4i32_z4zz:
-; AVX1OR2: # BB#0:
+; AVX1OR2: # %bb.0:
; AVX1OR2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1OR2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
; AVX1OR2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,1,1]
; AVX1OR2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4i32_z4zz:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX512VL-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX512VL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,0,1,1]
@@ -1257,42 +1257,42 @@ define <4 x i32> @shuffle_v4i32_z4zz(<4 x i32> %a) {
define <4 x i32> @shuffle_v4i32_zz4z(<4 x i32> %a) {
; SSE2-LABEL: shuffle_v4i32_zz4z:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: xorps %xmm1, %xmm1
; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,0,1]
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v4i32_zz4z:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: xorps %xmm1, %xmm1
; SSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,0,1]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v4i32_zz4z:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: xorps %xmm1, %xmm1
; SSSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,0,1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v4i32_zz4z:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7]
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,0,1]
; SSE41-NEXT: retq
;
; AVX1OR2-LABEL: shuffle_v4i32_zz4z:
-; AVX1OR2: # BB#0:
+; AVX1OR2: # %bb.0:
; AVX1OR2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1OR2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
; AVX1OR2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,0,1]
; AVX1OR2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4i32_zz4z:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX512VL-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX512VL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,1,0,1]
@@ -1303,12 +1303,12 @@ define <4 x i32> @shuffle_v4i32_zz4z(<4 x i32> %a) {
define <4 x i32> @shuffle_v4i32_zuu4(<4 x i32> %a) {
; SSE-LABEL: shuffle_v4i32_zuu4:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v4i32_zuu4:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x i32> zeroinitializer, <4 x i32> %a, <4 x i32> <i32 0, i32 undef, i32 undef, i32 4>
@@ -1317,42 +1317,42 @@ define <4 x i32> @shuffle_v4i32_zuu4(<4 x i32> %a) {
define <4 x i32> @shuffle_v4i32_z6zz(<4 x i32> %a) {
; SSE2-LABEL: shuffle_v4i32_z6zz:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: xorps %xmm1, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[0,0]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v4i32_z6zz:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: xorps %xmm1, %xmm1
; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[0,0]
; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v4i32_z6zz:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: xorps %xmm1, %xmm1
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[0,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v4i32_z6zz:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,2,3,3]
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuffle_v4i32_z6zz:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v4i32_z6zz:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
; AVX2OR512VL-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX2OR512VL-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
@@ -1363,31 +1363,31 @@ define <4 x i32> @shuffle_v4i32_z6zz(<4 x i32> %a) {
define <4 x i32> @shuffle_v4i32_7012(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: shuffle_v4i32_7012:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,0],xmm0[0,0]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[1,2]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v4i32_7012:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,0],xmm0[0,0]
; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[1,2]
; SSE3-NEXT: movaps %xmm1, %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v4i32_7012:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: palignr {{.*#+}} xmm0 = xmm1[12,13,14,15],xmm0[0,1,2,3,4,5,6,7,8,9,10,11]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v4i32_7012:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: palignr {{.*#+}} xmm0 = xmm1[12,13,14,15],xmm0[0,1,2,3,4,5,6,7,8,9,10,11]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v4i32_7012:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm1[12,13,14,15],xmm0[0,1,2,3,4,5,6,7,8,9,10,11]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 7, i32 0, i32 1, i32 2>
@@ -1396,29 +1396,29 @@ define <4 x i32> @shuffle_v4i32_7012(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @shuffle_v4i32_6701(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: shuffle_v4i32_6701:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1],xmm0[0]
; SSE2-NEXT: movapd %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v4i32_6701:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1],xmm0[0]
; SSE3-NEXT: movapd %xmm1, %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v4i32_6701:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: palignr {{.*#+}} xmm0 = xmm1[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v4i32_6701:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: palignr {{.*#+}} xmm0 = xmm1[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v4i32_6701:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm1[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 6, i32 7, i32 0, i32 1>
@@ -1427,31 +1427,31 @@ define <4 x i32> @shuffle_v4i32_6701(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @shuffle_v4i32_5670(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: shuffle_v4i32_5670:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[3,0]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,2],xmm0[2,0]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v4i32_5670:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[3,0]
; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,2],xmm0[2,0]
; SSE3-NEXT: movaps %xmm1, %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v4i32_5670:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: palignr {{.*#+}} xmm0 = xmm1[4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0,1,2,3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v4i32_5670:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: palignr {{.*#+}} xmm0 = xmm1[4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0,1,2,3]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v4i32_5670:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm1[4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0,1,2,3]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 5, i32 6, i32 7, i32 0>
@@ -1460,31 +1460,31 @@ define <4 x i32> @shuffle_v4i32_5670(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @shuffle_v4i32_1234(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: shuffle_v4i32_1234:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,2],xmm1[2,0]
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v4i32_1234:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,2],xmm1[2,0]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v4i32_1234:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: palignr {{.*#+}} xmm1 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3]
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v4i32_1234:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: palignr {{.*#+}} xmm1 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3]
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v4i32_1234:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
@@ -1493,29 +1493,29 @@ define <4 x i32> @shuffle_v4i32_1234(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @shuffle_v4i32_2345(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: shuffle_v4i32_2345:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0]
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v4i32_2345:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v4i32_2345:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: palignr {{.*#+}} xmm1 = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v4i32_2345:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: palignr {{.*#+}} xmm1 = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v4i32_2345:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 2, i32 3, i32 4, i32 5>
@@ -1524,13 +1524,13 @@ define <4 x i32> @shuffle_v4i32_2345(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @shuffle_v4i32_40u1(<4 x i32> %a, <4 x i32> %b) {
; SSE-LABEL: shuffle_v4i32_40u1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v4i32_40u1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vunpcklps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 4, i32 0, i32 undef, i32 1>
@@ -1539,31 +1539,31 @@ define <4 x i32> @shuffle_v4i32_40u1(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @shuffle_v4i32_3456(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: shuffle_v4i32_3456:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,0],xmm1[0,0]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[1,2]
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v4i32_3456:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,0],xmm1[0,0]
; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[1,2]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v4i32_3456:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: palignr {{.*#+}} xmm1 = xmm0[12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10,11]
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v4i32_3456:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: palignr {{.*#+}} xmm1 = xmm0[12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10,11]
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v4i32_3456:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10,11]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
@@ -1572,27 +1572,27 @@ define <4 x i32> @shuffle_v4i32_3456(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @shuffle_v4i32_0u1u(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: shuffle_v4i32_0u1u:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v4i32_0u1u:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v4i32_0u1u:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v4i32_0u1u:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v4i32_0u1u:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; AVX-NEXT: retq
%shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 undef, i32 1, i32 undef>
@@ -1601,30 +1601,30 @@ define <4 x i32> @shuffle_v4i32_0u1u(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @shuffle_v4i32_0z1z(<4 x i32> %a) {
; SSE2-LABEL: shuffle_v4i32_0z1z:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: xorps %xmm1, %xmm1
; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v4i32_0z1z:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: xorps %xmm1, %xmm1
; SSE3-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v4i32_0z1z:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: xorps %xmm1, %xmm1
; SSSE3-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v4i32_0z1z:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v4i32_0z1z:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; AVX-NEXT: retq
%shuffle = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 1, i32 7>
@@ -1633,12 +1633,12 @@ define <4 x i32> @shuffle_v4i32_0z1z(<4 x i32> %a) {
define <4 x i32> @shuffle_v4i32_01zu(<4 x i32> %a) {
; SSE-LABEL: shuffle_v4i32_01zu:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v4i32_01zu:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
; AVX-NEXT: retq
%shuffle = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 7, i32 undef>
@@ -1647,34 +1647,34 @@ define <4 x i32> @shuffle_v4i32_01zu(<4 x i32> %a) {
define <4 x i32> @shuffle_v4i32_0z23(<4 x i32> %a) {
; SSE2-LABEL: shuffle_v4i32_0z23:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v4i32_0z23:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: andps {{.*}}(%rip), %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v4i32_0z23:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: andps {{.*}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v4i32_0z23:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuffle_v4i32_0z23:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v4i32_0z23:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX2OR512VL-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
; AVX2OR512VL-NEXT: retq
@@ -1684,34 +1684,34 @@ define <4 x i32> @shuffle_v4i32_0z23(<4 x i32> %a) {
define <4 x i32> @shuffle_v4i32_01z3(<4 x i32> %a) {
; SSE2-LABEL: shuffle_v4i32_01z3:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v4i32_01z3:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: andps {{.*}}(%rip), %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v4i32_01z3:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: andps {{.*}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v4i32_01z3:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuffle_v4i32_01z3:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v4i32_01z3:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX2OR512VL-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
; AVX2OR512VL-NEXT: retq
@@ -1721,34 +1721,34 @@ define <4 x i32> @shuffle_v4i32_01z3(<4 x i32> %a) {
define <4 x i32> @shuffle_v4i32_012z(<4 x i32> %a) {
; SSE2-LABEL: shuffle_v4i32_012z:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v4i32_012z:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: andps {{.*}}(%rip), %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v4i32_012z:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: andps {{.*}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v4i32_012z:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuffle_v4i32_012z:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v4i32_012z:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX2OR512VL-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
; AVX2OR512VL-NEXT: retq
@@ -1758,34 +1758,34 @@ define <4 x i32> @shuffle_v4i32_012z(<4 x i32> %a) {
define <4 x i32> @shuffle_v4i32_0zz3(<4 x i32> %a) {
; SSE2-LABEL: shuffle_v4i32_0zz3:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v4i32_0zz3:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: andps {{.*}}(%rip), %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v4i32_0zz3:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: andps {{.*}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v4i32_0zz3:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5],xmm0[6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuffle_v4i32_0zz3:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5],xmm0[6,7]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v4i32_0zz3:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX2OR512VL-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3]
; AVX2OR512VL-NEXT: retq
@@ -1795,12 +1795,12 @@ define <4 x i32> @shuffle_v4i32_0zz3(<4 x i32> %a) {
define <4 x i32> @shuffle_v4i32_bitcast_0415(<4 x i32> %a, <4 x i32> %b) {
; SSE-LABEL: shuffle_v4i32_bitcast_0415:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v4i32_bitcast_0415:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; AVX-NEXT: retq
%shuffle32 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 1, i32 5, i32 0, i32 4>
@@ -1812,18 +1812,18 @@ define <4 x i32> @shuffle_v4i32_bitcast_0415(<4 x i32> %a, <4 x i32> %b) {
define <4 x float> @shuffle_v4f32_bitcast_4401(<4 x float> %a, <4 x i32> %b) {
; SSE-LABEL: shuffle_v4f32_bitcast_4401:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[0,1]
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX1OR2-LABEL: shuffle_v4f32_bitcast_4401:
-; AVX1OR2: # BB#0:
+; AVX1OR2: # %bb.0:
; AVX1OR2-NEXT: vshufps {{.*#+}} xmm0 = xmm1[0,0],xmm0[0,1]
; AVX1OR2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4f32_bitcast_4401:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,0,1,1]
; AVX512VL-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX512VL-NEXT: retq
@@ -1837,12 +1837,12 @@ define <4 x float> @shuffle_v4f32_bitcast_4401(<4 x float> %a, <4 x i32> %b) {
define <4 x float> @shuffle_v4f32_bitcast_0045(<4 x float> %a, <4 x i32> %b) {
; SSE-LABEL: shuffle_v4f32_bitcast_0045:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,1]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v4f32_bitcast_0045:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,1]
; AVX-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 0, i32 1, i32 1>
@@ -1853,33 +1853,33 @@ define <4 x float> @shuffle_v4f32_bitcast_0045(<4 x float> %a, <4 x i32> %b) {
define <4 x float> @mask_v4f32_4127(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: mask_v4f32_4127:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[1,2]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2,3,1]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE3-LABEL: mask_v4f32_4127:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[1,2]
; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2,3,1]
; SSE3-NEXT: movaps %xmm1, %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: mask_v4f32_4127:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[1,2]
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2,3,1]
; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: mask_v4f32_4127:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3]
; SSE41-NEXT: retq
;
; AVX-LABEL: mask_v4f32_4127:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3]
; AVX-NEXT: retq
%1 = bitcast <4 x float> %a to <4 x i32>
@@ -1893,33 +1893,33 @@ define <4 x float> @mask_v4f32_4127(<4 x float> %a, <4 x float> %b) {
define <4 x float> @mask_v4f32_0127(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: mask_v4f32_0127:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,0],xmm1[2,0]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE3-LABEL: mask_v4f32_0127:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,0],xmm1[2,0]
; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
; SSE3-NEXT: movaps %xmm1, %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: mask_v4f32_0127:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,0],xmm1[2,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: mask_v4f32_0127:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
; SSE41-NEXT: retq
;
; AVX-LABEL: mask_v4f32_0127:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
; AVX-NEXT: retq
%1 = bitcast <4 x float> %a to <2 x i64>
@@ -1933,38 +1933,38 @@ define <4 x float> @mask_v4f32_0127(<4 x float> %a, <4 x float> %b) {
define <4 x i32> @mask_v4i32_0127(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: mask_v4i32_0127:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,0],xmm1[2,0]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE3-LABEL: mask_v4i32_0127:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,0],xmm1[2,0]
; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
; SSE3-NEXT: movaps %xmm1, %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: mask_v4i32_0127:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,0],xmm1[2,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: mask_v4i32_0127:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: mask_v4i32_0127:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: mask_v4i32_0127:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
; AVX2OR512VL-NEXT: retq
%1 = bitcast <4 x i32> %a to <2 x i64>
@@ -1978,28 +1978,28 @@ define <4 x i32> @mask_v4i32_0127(<4 x i32> %a, <4 x i32> %b) {
define <4 x float> @broadcast_v4f32_0101_from_v2f32(<2 x float>* %x) {
; SSE2-LABEL: broadcast_v4f32_0101_from_v2f32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; SSE2-NEXT: retq
;
; SSE3-LABEL: broadcast_v4f32_0101_from_v2f32:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movddup {{.*#+}} xmm0 = mem[0,0]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: broadcast_v4f32_0101_from_v2f32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movddup {{.*#+}} xmm0 = mem[0,0]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: broadcast_v4f32_0101_from_v2f32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movddup {{.*#+}} xmm0 = mem[0,0]
; SSE41-NEXT: retq
;
; AVX-LABEL: broadcast_v4f32_0101_from_v2f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX-NEXT: retq
%1 = load <2 x float>, <2 x float>* %x, align 1
@@ -2009,12 +2009,12 @@ define <4 x float> @broadcast_v4f32_0101_from_v2f32(<2 x float>* %x) {
define <4 x i32> @insert_reg_and_zero_v4i32(i32 %a) {
; SSE-LABEL: insert_reg_and_zero_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movd %edi, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: insert_reg_and_zero_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovd %edi, %xmm0
; AVX-NEXT: retq
%v = insertelement <4 x i32> undef, i32 %a, i32 0
@@ -2024,12 +2024,12 @@ define <4 x i32> @insert_reg_and_zero_v4i32(i32 %a) {
define <4 x i32> @insert_mem_and_zero_v4i32(i32* %ptr) {
; SSE-LABEL: insert_mem_and_zero_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: retq
;
; AVX-LABEL: insert_mem_and_zero_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: retq
%a = load i32, i32* %ptr
@@ -2040,40 +2040,40 @@ define <4 x i32> @insert_mem_and_zero_v4i32(i32* %ptr) {
define <4 x float> @insert_reg_and_zero_v4f32(float %a) {
; SSE2-LABEL: insert_reg_and_zero_v4f32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: xorps %xmm1, %xmm1
; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE3-LABEL: insert_reg_and_zero_v4f32:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: xorps %xmm1, %xmm1
; SSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE3-NEXT: movaps %xmm1, %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: insert_reg_and_zero_v4f32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: xorps %xmm1, %xmm1
; SSSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_reg_and_zero_v4f32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: xorps %xmm1, %xmm1
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; SSE41-NEXT: retq
;
; AVX1OR2-LABEL: insert_reg_and_zero_v4f32:
-; AVX1OR2: # BB#0:
+; AVX1OR2: # %bb.0:
; AVX1OR2-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX1OR2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX1OR2-NEXT: retq
;
; AVX512VL-LABEL: insert_reg_and_zero_v4f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX512VL-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX512VL-NEXT: retq
@@ -2084,12 +2084,12 @@ define <4 x float> @insert_reg_and_zero_v4f32(float %a) {
define <4 x float> @insert_mem_and_zero_v4f32(float* %ptr) {
; SSE-LABEL: insert_mem_and_zero_v4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: retq
;
; AVX-LABEL: insert_mem_and_zero_v4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: retq
%a = load float, float* %ptr
@@ -2100,37 +2100,37 @@ define <4 x float> @insert_mem_and_zero_v4f32(float* %ptr) {
define <4 x i32> @insert_reg_lo_v4i32(i64 %a, <4 x i32> %b) {
; SSE2-LABEL: insert_reg_lo_v4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movq %rdi, %xmm1
; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq
;
; SSE3-LABEL: insert_reg_lo_v4i32:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movq %rdi, %xmm1
; SSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: insert_reg_lo_v4i32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movq %rdi, %xmm1
; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_reg_lo_v4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movq %rdi, %xmm1
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: insert_reg_lo_v4i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovq %rdi, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: insert_reg_lo_v4i32:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vmovq %rdi, %xmm1
; AVX2OR512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
; AVX2OR512VL-NEXT: retq
@@ -2142,34 +2142,34 @@ define <4 x i32> @insert_reg_lo_v4i32(i64 %a, <4 x i32> %b) {
define <4 x i32> @insert_mem_lo_v4i32(<2 x i32>* %ptr, <4 x i32> %b) {
; SSE2-LABEL: insert_mem_lo_v4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movlpd {{.*#+}} xmm0 = mem[0],xmm0[1]
; SSE2-NEXT: retq
;
; SSE3-LABEL: insert_mem_lo_v4i32:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movlpd {{.*#+}} xmm0 = mem[0],xmm0[1]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: insert_mem_lo_v4i32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movlpd {{.*#+}} xmm0 = mem[0],xmm0[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_mem_lo_v4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: insert_mem_lo_v4i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: insert_mem_lo_v4i32:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX2OR512VL-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
; AVX2OR512VL-NEXT: retq
@@ -2181,13 +2181,13 @@ define <4 x i32> @insert_mem_lo_v4i32(<2 x i32>* %ptr, <4 x i32> %b) {
define <4 x i32> @insert_reg_hi_v4i32(i64 %a, <4 x i32> %b) {
; SSE-LABEL: insert_reg_hi_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movq %rdi, %xmm1
; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT: retq
;
; AVX-LABEL: insert_reg_hi_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovq %rdi, %xmm1
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-NEXT: retq
@@ -2199,13 +2199,13 @@ define <4 x i32> @insert_reg_hi_v4i32(i64 %a, <4 x i32> %b) {
define <4 x i32> @insert_mem_hi_v4i32(<2 x i32>* %ptr, <4 x i32> %b) {
; SSE-LABEL: insert_mem_hi_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT: retq
;
; AVX-LABEL: insert_mem_hi_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-NEXT: retq
@@ -2217,35 +2217,35 @@ define <4 x i32> @insert_mem_hi_v4i32(<2 x i32>* %ptr, <4 x i32> %b) {
define <4 x float> @insert_reg_lo_v4f32(double %a, <4 x float> %b) {
; SSE2-LABEL: insert_reg_lo_v4f32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE2-NEXT: movapd %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE3-LABEL: insert_reg_lo_v4f32:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE3-NEXT: movapd %xmm1, %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: insert_reg_lo_v4f32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSSE3-NEXT: movapd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_reg_lo_v4f32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; SSE41-NEXT: retq
;
; AVX1OR2-LABEL: insert_reg_lo_v4f32:
-; AVX1OR2: # BB#0:
+; AVX1OR2: # %bb.0:
; AVX1OR2-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; AVX1OR2-NEXT: retq
;
; AVX512VL-LABEL: insert_reg_lo_v4f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; AVX512VL-NEXT: retq
%a.cast = bitcast double %a to <2 x float>
@@ -2256,12 +2256,12 @@ define <4 x float> @insert_reg_lo_v4f32(double %a, <4 x float> %b) {
define <4 x float> @insert_mem_lo_v4f32(<2 x float>* %ptr, <4 x float> %b) {
; SSE-LABEL: insert_mem_lo_v4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movlpd {{.*#+}} xmm0 = mem[0],xmm0[1]
; SSE-NEXT: retq
;
; AVX-LABEL: insert_mem_lo_v4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovlpd {{.*#+}} xmm0 = mem[0],xmm0[1]
; AVX-NEXT: retq
%a = load <2 x float>, <2 x float>* %ptr
@@ -2272,13 +2272,13 @@ define <4 x float> @insert_mem_lo_v4f32(<2 x float>* %ptr, <4 x float> %b) {
define <4 x float> @insert_reg_hi_v4f32(double %a, <4 x float> %b) {
; SSE-LABEL: insert_reg_hi_v4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: insert_reg_hi_v4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX-NEXT: retq
%a.cast = bitcast double %a to <2 x float>
@@ -2289,12 +2289,12 @@ define <4 x float> @insert_reg_hi_v4f32(double %a, <4 x float> %b) {
define <4 x float> @insert_mem_hi_v4f32(<2 x float>* %ptr, <4 x float> %b) {
; SSE-LABEL: insert_mem_hi_v4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; SSE-NEXT: retq
;
; AVX-LABEL: insert_mem_hi_v4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; AVX-NEXT: retq
%a = load <2 x float>, <2 x float>* %ptr
@@ -2305,13 +2305,13 @@ define <4 x float> @insert_mem_hi_v4f32(<2 x float>* %ptr, <4 x float> %b) {
define <4 x float> @shuffle_mem_v4f32_3210(<4 x float>* %ptr) {
; SSE-LABEL: shuffle_mem_v4f32_3210:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps (%rdi), %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,2,1,0]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_mem_v4f32_3210:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = mem[3,2,1,0]
; AVX-NEXT: retq
%a = load <4 x float>, <4 x float>* %ptr
@@ -2321,13 +2321,13 @@ define <4 x float> @shuffle_mem_v4f32_3210(<4 x float>* %ptr) {
define <4 x i32> @insert_dup_mem_v4i32(i32* %ptr) {
; SSE-LABEL: insert_dup_mem_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE-NEXT: retq
;
; AVX-LABEL: insert_dup_mem_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vbroadcastss (%rdi), %xmm0
; AVX-NEXT: retq
%tmp = load i32, i32* %ptr, align 4
@@ -2342,12 +2342,12 @@ define <4 x i32> @insert_dup_mem_v4i32(i32* %ptr) {
define <4 x i32> @shuffle_v4i32_z0zX(<4 x i32> %a) {
; SSE-LABEL: shuffle_v4i32_z0zX:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psllq $32, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v4i32_z0zX:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsllq $32, %xmm0, %xmm0
; AVX-NEXT: retq
%shuffle = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32> <i32 4, i32 0, i32 4, i32 undef>
@@ -2356,12 +2356,12 @@ define <4 x i32> @shuffle_v4i32_z0zX(<4 x i32> %a) {
define <4 x i32> @shuffle_v4i32_1z3z(<4 x i32> %a) {
; SSE-LABEL: shuffle_v4i32_1z3z:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrlq $32, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v4i32_1z3z:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrlq $32, %xmm0, %xmm0
; AVX-NEXT: retq
%shuffle = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32> <i32 1, i32 4, i32 3, i32 4>
diff --git a/test/CodeGen/X86/vector-shuffle-128-v8.ll b/test/CodeGen/X86/vector-shuffle-128-v8.ll
index c6926055233..60bc36948d2 100644
--- a/test/CodeGen/X86/vector-shuffle-128-v8.ll
+++ b/test/CodeGen/X86/vector-shuffle-128-v8.ll
@@ -8,12 +8,12 @@
define <8 x i16> @shuffle_v8i16_01012323(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: shuffle_v8i16_01012323:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_01012323:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,1,1]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 2, i32 3, i32 2, i32 3>
@@ -21,12 +21,12 @@ define <8 x i16> @shuffle_v8i16_01012323(<8 x i16> %a, <8 x i16> %b) {
}
define <8 x i16> @shuffle_v8i16_67452301(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: shuffle_v8i16_67452301:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,2,1,0]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_67452301:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 6, i32 7, i32 4, i32 5, i32 2, i32 3, i32 0, i32 1>
@@ -34,24 +34,24 @@ define <8 x i16> @shuffle_v8i16_67452301(<8 x i16> %a, <8 x i16> %b) {
}
define <8 x i16> @shuffle_v8i16_456789AB(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_456789AB:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_456789AB:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: palignr {{.*#+}} xmm1 = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_456789AB:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: palignr {{.*#+}} xmm1 = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_456789AB:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
@@ -60,19 +60,19 @@ define <8 x i16> @shuffle_v8i16_456789AB(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_00000000(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: shuffle_v8i16_00000000:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
; SSE-NEXT: retq
;
; AVX1-LABEL: shuffle_v8i16_00000000:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i16_00000000:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpbroadcastw %xmm0, %xmm0
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
@@ -80,13 +80,13 @@ define <8 x i16> @shuffle_v8i16_00000000(<8 x i16> %a, <8 x i16> %b) {
}
define <8 x i16> @shuffle_v8i16_00004444(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: shuffle_v8i16_00004444:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_00004444:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; AVX-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
; AVX-NEXT: retq
@@ -95,12 +95,12 @@ define <8 x i16> @shuffle_v8i16_00004444(<8 x i16> %a, <8 x i16> %b) {
}
define <8 x i16> @shuffle_v8i16_u0u1u2u3(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: shuffle_v8i16_u0u1u2u3:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_u0u1u2u3:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 undef, i32 0, i32 undef, i32 1, i32 undef, i32 2, i32 undef, i32 3>
@@ -108,12 +108,12 @@ define <8 x i16> @shuffle_v8i16_u0u1u2u3(<8 x i16> %a, <8 x i16> %b) {
}
define <8 x i16> @shuffle_v8i16_u4u5u6u7(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: shuffle_v8i16_u4u5u6u7:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_u4u5u6u7:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 undef, i32 4, i32 undef, i32 5, i32 undef, i32 6, i32 undef, i32 7>
@@ -121,13 +121,13 @@ define <8 x i16> @shuffle_v8i16_u4u5u6u7(<8 x i16> %a, <8 x i16> %b) {
}
define <8 x i16> @shuffle_v8i16_31206745(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: shuffle_v8i16_31206745:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,0,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,3,2]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_31206745:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,0,4,5,6,7]
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,3,2]
; AVX-NEXT: retq
@@ -136,24 +136,24 @@ define <8 x i16> @shuffle_v8i16_31206745(<8 x i16> %a, <8 x i16> %b) {
}
define <8 x i16> @shuffle_v8i16_44440000(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_44440000:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,0,3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_44440000:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[8,9,8,9,8,9,8,9,0,1,0,1,0,1,0,1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_44440000:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[8,9,8,9,8,9,8,9,0,1,0,1,0,1,0,1]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_44440000:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8,9,8,9,8,9,8,9,0,1,0,1,0,1,0,1]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 4, i32 4, i32 4, i32 0, i32 0, i32 0, i32 0>
@@ -161,12 +161,12 @@ define <8 x i16> @shuffle_v8i16_44440000(<8 x i16> %a, <8 x i16> %b) {
}
define <8 x i16> @shuffle_v8i16_23016745(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: shuffle_v8i16_23016745:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,0,3,2]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_23016745:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,0,3,2]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 6, i32 7, i32 4, i32 5>
@@ -174,13 +174,13 @@ define <8 x i16> @shuffle_v8i16_23016745(<8 x i16> %a, <8 x i16> %b) {
}
define <8 x i16> @shuffle_v8i16_23026745(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: shuffle_v8i16_23026745:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,3,0,2,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,3,2]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_23026745:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[2,3,0,2,4,5,6,7]
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,3,2]
; AVX-NEXT: retq
@@ -189,13 +189,13 @@ define <8 x i16> @shuffle_v8i16_23026745(<8 x i16> %a, <8 x i16> %b) {
}
define <8 x i16> @shuffle_v8i16_23016747(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: shuffle_v8i16_23016747:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,0,2,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,7,4,7]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_23016747:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,2,3]
; AVX-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,7,4,7]
; AVX-NEXT: retq
@@ -204,24 +204,24 @@ define <8 x i16> @shuffle_v8i16_23016747(<8 x i16> %a, <8 x i16> %b) {
}
define <8 x i16> @shuffle_v8i16_75643120(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_75643120:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,5,6,4]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_75643120:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[14,15,10,11,12,13,8,9,6,7,2,3,4,5,0,1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_75643120:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[14,15,10,11,12,13,8,9,6,7,2,3,4,5,0,1]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_75643120:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[14,15,10,11,12,13,8,9,6,7,2,3,4,5,0,1]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 7, i32 5, i32 6, i32 4, i32 3, i32 1, i32 2, i32 0>
@@ -230,24 +230,24 @@ define <8 x i16> @shuffle_v8i16_75643120(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_10545410(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_10545410:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,0]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_10545410:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[2,3,0,1,10,11,8,9,10,11,8,9,2,3,0,1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_10545410:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[2,3,0,1,10,11,8,9,10,11,8,9,2,3,0,1]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_10545410:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,3,0,1,10,11,8,9,10,11,8,9,2,3,0,1]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 1, i32 0, i32 5, i32 4, i32 5, i32 4, i32 1, i32 0>
@@ -255,24 +255,24 @@ define <8 x i16> @shuffle_v8i16_10545410(<8 x i16> %a, <8 x i16> %b) {
}
define <8 x i16> @shuffle_v8i16_54105410(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_54105410:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,0]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_54105410:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[10,11,8,9,2,3,0,1,10,11,8,9,2,3,0,1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_54105410:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[10,11,8,9,2,3,0,1,10,11,8,9,2,3,0,1]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_54105410:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[10,11,8,9,2,3,0,1,10,11,8,9,2,3,0,1]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 5, i32 4, i32 1, i32 0, i32 5, i32 4, i32 1, i32 0>
@@ -280,24 +280,24 @@ define <8 x i16> @shuffle_v8i16_54105410(<8 x i16> %a, <8 x i16> %b) {
}
define <8 x i16> @shuffle_v8i16_54101054(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_54101054:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,0]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_54101054:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[10,11,8,9,2,3,0,1,2,3,0,1,10,11,8,9]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_54101054:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[10,11,8,9,2,3,0,1,2,3,0,1,10,11,8,9]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_54101054:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[10,11,8,9,2,3,0,1,2,3,0,1,10,11,8,9]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 5, i32 4, i32 1, i32 0, i32 1, i32 0, i32 5, i32 4>
@@ -305,24 +305,24 @@ define <8 x i16> @shuffle_v8i16_54101054(<8 x i16> %a, <8 x i16> %b) {
}
define <8 x i16> @shuffle_v8i16_04400440(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_04400440:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,0]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,4,4,6]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_04400440:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,8,9,0,1,0,1,8,9,8,9,0,1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_04400440:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,8,9,0,1,0,1,8,9,8,9,0,1]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_04400440:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,8,9,0,1,0,1,8,9,8,9,0,1]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 4, i32 4, i32 0, i32 0, i32 4, i32 4, i32 0>
@@ -330,24 +330,24 @@ define <8 x i16> @shuffle_v8i16_04400440(<8 x i16> %a, <8 x i16> %b) {
}
define <8 x i16> @shuffle_v8i16_40044004(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_40044004:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,0]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,0,0,2,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,4]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_40044004:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[8,9,0,1,0,1,8,9,8,9,0,1,0,1,8,9]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_40044004:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[8,9,0,1,0,1,8,9,8,9,0,1,0,1,8,9]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_40044004:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8,9,0,1,0,1,8,9,8,9,0,1,0,1,8,9]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 0, i32 0, i32 4, i32 4, i32 0, i32 0, i32 4>
@@ -356,7 +356,7 @@ define <8 x i16> @shuffle_v8i16_40044004(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_26405173(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_26405173:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,1,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,5,6,4]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
@@ -365,17 +365,17 @@ define <8 x i16> @shuffle_v8i16_26405173(<8 x i16> %a, <8 x i16> %b) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_26405173:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[4,5,12,13,8,9,0,1,10,11,2,3,14,15,6,7]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_26405173:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[4,5,12,13,8,9,0,1,10,11,2,3,14,15,6,7]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_26405173:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,12,13,8,9,0,1,10,11,2,3,14,15,6,7]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 2, i32 6, i32 4, i32 0, i32 5, i32 1, i32 7, i32 3>
@@ -383,7 +383,7 @@ define <8 x i16> @shuffle_v8i16_26405173(<8 x i16> %a, <8 x i16> %b) {
}
define <8 x i16> @shuffle_v8i16_20645173(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_20645173:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,1,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,5,6,4]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
@@ -392,17 +392,17 @@ define <8 x i16> @shuffle_v8i16_20645173(<8 x i16> %a, <8 x i16> %b) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_20645173:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[4,5,0,1,12,13,8,9,10,11,2,3,14,15,6,7]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_20645173:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[4,5,0,1,12,13,8,9,10,11,2,3,14,15,6,7]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_20645173:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,0,1,12,13,8,9,10,11,2,3,14,15,6,7]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 2, i32 0, i32 6, i32 4, i32 5, i32 1, i32 7, i32 3>
@@ -410,7 +410,7 @@ define <8 x i16> @shuffle_v8i16_20645173(<8 x i16> %a, <8 x i16> %b) {
}
define <8 x i16> @shuffle_v8i16_26401375(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_26401375:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,1,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,5,6,4]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,1,2]
@@ -418,17 +418,17 @@ define <8 x i16> @shuffle_v8i16_26401375(<8 x i16> %a, <8 x i16> %b) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_26401375:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[4,5,12,13,8,9,0,1,2,3,6,7,14,15,10,11]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_26401375:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[4,5,12,13,8,9,0,1,2,3,6,7,14,15,10,11]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_26401375:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,12,13,8,9,0,1,2,3,6,7,14,15,10,11]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 2, i32 6, i32 4, i32 0, i32 1, i32 3, i32 7, i32 5>
@@ -437,7 +437,7 @@ define <8 x i16> @shuffle_v8i16_26401375(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_66751643(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_66751643:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,0]
@@ -446,17 +446,17 @@ define <8 x i16> @shuffle_v8i16_66751643(<8 x i16> %a, <8 x i16> %b) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_66751643:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[12,13,12,13,14,15,10,11,2,3,12,13,8,9,6,7]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_66751643:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[12,13,12,13,14,15,10,11,2,3,12,13,8,9,6,7]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_66751643:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[12,13,12,13,14,15,10,11,2,3,12,13,8,9,6,7]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 6, i32 6, i32 7, i32 5, i32 1, i32 6, i32 4, i32 3>
@@ -465,7 +465,7 @@ define <8 x i16> @shuffle_v8i16_66751643(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_60514754(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_60514754:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,4,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,0,3,1,4,5,6,7]
@@ -473,17 +473,17 @@ define <8 x i16> @shuffle_v8i16_60514754(<8 x i16> %a, <8 x i16> %b) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_60514754:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[12,13,0,1,10,11,2,3,8,9,14,15,10,11,8,9]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_60514754:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[12,13,0,1,10,11,2,3,8,9,14,15,10,11,8,9]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_60514754:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[12,13,0,1,10,11,2,3,8,9,14,15,10,11,8,9]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> <i32 6, i32 0, i32 5, i32 1, i32 4, i32 7, i32 5, i32 4>
@@ -492,24 +492,24 @@ define <8 x i16> @shuffle_v8i16_60514754(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_00444444(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_00444444:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,2,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_00444444:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,8,9,8,9,8,9,8,9,8,9,8,9]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_00444444:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,8,9,8,9,8,9,8,9,8,9,8,9]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_00444444:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,8,9,8,9,8,9,8,9,8,9,8,9]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 0, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
@@ -517,24 +517,24 @@ define <8 x i16> @shuffle_v8i16_00444444(<8 x i16> %a, <8 x i16> %b) {
}
define <8 x i16> @shuffle_v8i16_44004444(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_44004444:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,2,0,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_44004444:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[8,9,8,9,0,1,0,1,8,9,8,9,8,9,8,9]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_44004444:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[8,9,8,9,0,1,0,1,8,9,8,9,8,9,8,9]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_44004444:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8,9,8,9,0,1,0,1,8,9,8,9,8,9,8,9]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 4, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4>
@@ -542,24 +542,24 @@ define <8 x i16> @shuffle_v8i16_44004444(<8 x i16> %a, <8 x i16> %b) {
}
define <8 x i16> @shuffle_v8i16_04404444(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_04404444:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_04404444:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,8,9,0,1,8,9,8,9,8,9,8,9]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_04404444:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,8,9,0,1,8,9,8,9,8,9,8,9]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_04404444:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,8,9,0,1,8,9,8,9,8,9,8,9]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 4, i32 4, i32 0, i32 4, i32 4, i32 4, i32 4>
@@ -567,24 +567,24 @@ define <8 x i16> @shuffle_v8i16_04404444(<8 x i16> %a, <8 x i16> %b) {
}
define <8 x i16> @shuffle_v8i16_04400000(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_04400000:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,0,3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_04400000:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,8,9,0,1,0,1,0,1,0,1,0,1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_04400000:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,8,9,0,1,0,1,0,1,0,1,0,1]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_04400000:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,8,9,0,1,0,1,0,1,0,1,0,1]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 4, i32 4, i32 0, i32 0, i32 0, i32 0, i32 0>
@@ -592,13 +592,13 @@ define <8 x i16> @shuffle_v8i16_04400000(<8 x i16> %a, <8 x i16> %b) {
}
define <8 x i16> @shuffle_v8i16_04404567(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: shuffle_v8i16_04404567:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,0,4,5,6,7]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_04404567:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,0,4,5,6,7]
; AVX-NEXT: retq
@@ -608,24 +608,24 @@ define <8 x i16> @shuffle_v8i16_04404567(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_0X444444(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_0X444444:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,2,2,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_0X444444:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,8,9,8,9,8,9,8,9,8,9,8,9]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_0X444444:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,8,9,8,9,8,9,8,9,8,9,8,9]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_0X444444:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,8,9,8,9,8,9,8,9,8,9,8,9]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 undef, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
@@ -633,24 +633,24 @@ define <8 x i16> @shuffle_v8i16_0X444444(<8 x i16> %a, <8 x i16> %b) {
}
define <8 x i16> @shuffle_v8i16_44X04444(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_44X04444:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,2,2,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_44X04444:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[8,9,8,9,8,9,0,1,8,9,8,9,8,9,8,9]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_44X04444:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[8,9,8,9,8,9,0,1,8,9,8,9,8,9,8,9]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_44X04444:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8,9,8,9,8,9,0,1,8,9,8,9,8,9,8,9]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 4, i32 undef, i32 0, i32 4, i32 4, i32 4, i32 4>
@@ -658,24 +658,24 @@ define <8 x i16> @shuffle_v8i16_44X04444(<8 x i16> %a, <8 x i16> %b) {
}
define <8 x i16> @shuffle_v8i16_X4404444(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_X4404444:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_X4404444:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,8,9,0,1,8,9,8,9,8,9,8,9]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_X4404444:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,8,9,0,1,8,9,8,9,8,9,8,9]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_X4404444:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,8,9,0,1,8,9,8,9,8,9,8,9]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 undef, i32 4, i32 4, i32 0, i32 4, i32 4, i32 4, i32 4>
@@ -684,29 +684,29 @@ define <8 x i16> @shuffle_v8i16_X4404444(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_0127XXXX(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_0127XXXX:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_0127XXXX:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,14,15,4,5,14,15,12,13,14,15]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_0127XXXX:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,14,15,4,5,14,15,12,13,14,15]
; SSE41-NEXT: retq
;
; AVX1OR2-LABEL: shuffle_v8i16_0127XXXX:
-; AVX1OR2: # BB#0:
+; AVX1OR2: # %bb.0:
; AVX1OR2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,14,15,4,5,14,15,12,13,14,15]
; AVX1OR2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v8i16_0127XXXX:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
; AVX512VL-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,7]
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -717,29 +717,29 @@ define <8 x i16> @shuffle_v8i16_0127XXXX(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_XXXX4563(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_XXXX4563:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,0]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_XXXX4563:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[12,13,6,7,4,5,6,7,8,9,10,11,12,13,6,7]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_XXXX4563:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[12,13,6,7,4,5,6,7,8,9,10,11,12,13,6,7]
; SSE41-NEXT: retq
;
; AVX1OR2-LABEL: shuffle_v8i16_XXXX4563:
-; AVX1OR2: # BB#0:
+; AVX1OR2: # %bb.0:
; AVX1OR2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[12,13,6,7,4,5,6,7,8,9,10,11,12,13,6,7]
; AVX1OR2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v8i16_XXXX4563:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,1,2,0]
; AVX512VL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7]
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
@@ -750,29 +750,29 @@ define <8 x i16> @shuffle_v8i16_XXXX4563(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_4563XXXX(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_4563XXXX:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,0]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,0,2,3]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_4563XXXX:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,6,7,8,9,10,11,0,1,2,3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_4563XXXX:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,6,7,8,9,10,11,0,1,2,3]
; SSE41-NEXT: retq
;
; AVX1OR2-LABEL: shuffle_v8i16_4563XXXX:
-; AVX1OR2: # BB#0:
+; AVX1OR2: # %bb.0:
; AVX1OR2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,6,7,8,9,10,11,0,1,2,3]
; AVX1OR2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v8i16_4563XXXX:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,1,2,0]
; AVX512VL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7]
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,0,2,3]
@@ -783,29 +783,29 @@ define <8 x i16> @shuffle_v8i16_4563XXXX(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_01274563(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_01274563:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,4,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,1,2]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_01274563:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,14,15,8,9,10,11,12,13,6,7]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_01274563:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,14,15,8,9,10,11,12,13,6,7]
; SSE41-NEXT: retq
;
; AVX1OR2-LABEL: shuffle_v8i16_01274563:
-; AVX1OR2: # BB#0:
+; AVX1OR2: # %bb.0:
; AVX1OR2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,14,15,8,9,10,11,12,13,6,7]
; AVX1OR2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v8i16_01274563:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
; AVX512VL-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,4,7]
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,3,1,2]
@@ -816,29 +816,29 @@ define <8 x i16> @shuffle_v8i16_01274563(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_45630127(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_45630127:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,0]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,1,4,5,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,0,3,1]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_45630127:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,6,7,0,1,2,3,4,5,14,15]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_45630127:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,6,7,0,1,2,3,4,5,14,15]
; SSE41-NEXT: retq
;
; AVX1OR2-LABEL: shuffle_v8i16_45630127:
-; AVX1OR2: # BB#0:
+; AVX1OR2: # %bb.0:
; AVX1OR2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,6,7,0,1,2,3,4,5,14,15]
; AVX1OR2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v8i16_45630127:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,1,2,0]
; AVX512VL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,3,2,1,4,5,6,7]
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,0,3,1]
@@ -849,7 +849,7 @@ define <8 x i16> @shuffle_v8i16_45630127(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_37102735(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_37102735:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,5,6,4]
@@ -859,17 +859,17 @@ define <8 x i16> @shuffle_v8i16_37102735(<8 x i16> %a, <8 x i16> %b) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_37102735:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[6,7,14,15,2,3,0,1,4,5,14,15,6,7,10,11]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_37102735:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[6,7,14,15,2,3,0,1,4,5,14,15,6,7,10,11]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_37102735:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[6,7,14,15,2,3,0,1,4,5,14,15,6,7,10,11]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 3, i32 7, i32 1, i32 0, i32 2, i32 7, i32 3, i32 5>
@@ -878,12 +878,12 @@ define <8 x i16> @shuffle_v8i16_37102735(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_08192a3b(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: shuffle_v8i16_08192a3b:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_08192a3b:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
@@ -892,13 +892,13 @@ define <8 x i16> @shuffle_v8i16_08192a3b(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_0c1d2e3f(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: shuffle_v8i16_0c1d2e3f:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_0c1d2e3f:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX-NEXT: retq
@@ -908,12 +908,12 @@ define <8 x i16> @shuffle_v8i16_0c1d2e3f(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_4c5d6e7f(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: shuffle_v8i16_4c5d6e7f:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_4c5d6e7f:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
@@ -922,13 +922,13 @@ define <8 x i16> @shuffle_v8i16_4c5d6e7f(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_48596a7b(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: shuffle_v8i16_48596a7b:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_48596a7b:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX-NEXT: retq
@@ -938,14 +938,14 @@ define <8 x i16> @shuffle_v8i16_48596a7b(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_08196e7f(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: shuffle_v8i16_08196e7f:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_08196e7f:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@@ -956,14 +956,14 @@ define <8 x i16> @shuffle_v8i16_08196e7f(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_0c1d6879(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: shuffle_v8i16_0c1d6879:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,0,2,3]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_0c1d6879:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,0,2,3]
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@@ -974,14 +974,14 @@ define <8 x i16> @shuffle_v8i16_0c1d6879(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_109832ba(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: shuffle_v8i16_109832ba:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,0,3,1,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,4,7,5]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_109832ba:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[2,0,3,1,4,5,6,7]
; AVX-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,4,7,5]
@@ -992,13 +992,13 @@ define <8 x i16> @shuffle_v8i16_109832ba(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_8091a2b3(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: shuffle_v8i16_8091a2b3:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_8091a2b3:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 8, i32 0, i32 9, i32 1, i32 10, i32 2, i32 11, i32 3>
@@ -1006,13 +1006,13 @@ define <8 x i16> @shuffle_v8i16_8091a2b3(<8 x i16> %a, <8 x i16> %b) {
}
define <8 x i16> @shuffle_v8i16_c4d5e6f7(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: shuffle_v8i16_c4d5e6f7:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_c4d5e6f7:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 12, i32 4, i32 13, i32 5, i32 14, i32 6, i32 15, i32 7>
@@ -1021,7 +1021,7 @@ define <8 x i16> @shuffle_v8i16_c4d5e6f7(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_0213cedf(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: shuffle_v8i16_0213cedf:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,1,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,1,3,4,5,6,7]
@@ -1029,7 +1029,7 @@ define <8 x i16> @shuffle_v8i16_0213cedf(<8 x i16> %a, <8 x i16> %b) {
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_0213cedf:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,1,3,4,5,6,7]
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
; AVX-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,2,1,3,4,5,6,7]
@@ -1041,7 +1041,7 @@ define <8 x i16> @shuffle_v8i16_0213cedf(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_443aXXXX(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_443aXXXX:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,0,65535,65535,65535,65535,65535]
; SSE2-NEXT: pand %xmm2, %xmm0
; SSE2-NEXT: pandn %xmm1, %xmm2
@@ -1051,21 +1051,21 @@ define <8 x i16> @shuffle_v8i16_443aXXXX(<8 x i16> %a, <8 x i16> %b) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_443aXXXX:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,xmm1[4,5,u,u,u,u,u,u,u,u]
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[8,9,8,9,6,7],zero,zero,xmm0[u,u,u,u,u,u,u,u]
; SSSE3-NEXT: por %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_443aXXXX:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3,4,5,6,7]
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
; SSE41-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,3,2,4,5,6,7]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_443aXXXX:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3,4,5,6,7]
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,3,2,4,5,6,7]
@@ -1076,7 +1076,7 @@ define <8 x i16> @shuffle_v8i16_443aXXXX(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_032dXXXX(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_032dXXXX:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[3,1,2,0]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7]
@@ -1085,26 +1085,26 @@ define <8 x i16> @shuffle_v8i16_032dXXXX(<8 x i16> %a, <8 x i16> %b) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_032dXXXX:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,xmm1[10,11,u,u,u,u,u,u,u,u]
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,6,7,4,5],zero,zero,xmm0[u,u,u,u,u,u,u,u]
; SSSE3-NEXT: por %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_032dXXXX:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,6,7,4,5,10,11,0,1,10,11,0,1,2,3]
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuffle_v8i16_032dXXXX:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,6,7,4,5,10,11,0,1,10,11,0,1,2,3]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i16_032dXXXX:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,6,7,4,5,10,11,0,1,10,11,0,1,2,3]
; AVX2OR512VL-NEXT: retq
@@ -1113,12 +1113,12 @@ define <8 x i16> @shuffle_v8i16_032dXXXX(<8 x i16> %a, <8 x i16> %b) {
}
define <8 x i16> @shuffle_v8i16_XXXdXXXX(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: shuffle_v8i16_XXXdXXXX:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,2,3,3]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_XXXdXXXX:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm1[2,2,3,3]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 13, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -1127,7 +1127,7 @@ define <8 x i16> @shuffle_v8i16_XXXdXXXX(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_012dXXXX(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_012dXXXX:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,0,65535,65535,65535,65535]
; SSE2-NEXT: pand %xmm2, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
@@ -1136,20 +1136,20 @@ define <8 x i16> @shuffle_v8i16_012dXXXX(<8 x i16> %a, <8 x i16> %b) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_012dXXXX:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,xmm1[10,11,u,u,u,u,u,u,u,u]
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],zero,zero,xmm0[u,u,u,u,u,u,u,u]
; SSSE3-NEXT: por %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_012dXXXX:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3],xmm0[4,5,6,7]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_012dXXXX:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3],xmm0[4,5,6,7]
; AVX-NEXT: retq
@@ -1159,7 +1159,7 @@ define <8 x i16> @shuffle_v8i16_012dXXXX(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_XXXXcde3(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_XXXXcde3:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,65535,65535,65535,65535,0]
; SSE2-NEXT: pand %xmm2, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
@@ -1169,26 +1169,26 @@ define <8 x i16> @shuffle_v8i16_XXXXcde3(<8 x i16> %a, <8 x i16> %b) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_XXXXcde3:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u],zero,zero,zero,zero,zero,zero,xmm0[6,7]
; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,u,u,8,9,10,11,12,13],zero,zero
; SSSE3-NEXT: por %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_XXXXcde3:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5,6],xmm0[7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuffle_v8i16_XXXXcde3:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5,6],xmm0[7]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i16_XXXXcde3:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpbroadcastq %xmm0, %xmm0
; AVX2OR512VL-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5,6],xmm0[7]
; AVX2OR512VL-NEXT: retq
@@ -1198,7 +1198,7 @@ define <8 x i16> @shuffle_v8i16_XXXXcde3(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_cde3XXXX(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_cde3XXXX:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,0,65535,65535,65535,65535]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
; SSE2-NEXT: pand %xmm2, %xmm1
@@ -1208,20 +1208,20 @@ define <8 x i16> @shuffle_v8i16_cde3XXXX(<8 x i16> %a, <8 x i16> %b) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_cde3XXXX:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,xmm0[6,7,u,u,u,u,u,u,u,u]
; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[8,9,10,11,12,13],zero,zero,xmm1[u,u,u,u,u,u,u,u]
; SSSE3-NEXT: por %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_cde3XXXX:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3],xmm1[4,5,6,7]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_cde3XXXX:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3],xmm1[4,5,6,7]
; AVX-NEXT: retq
@@ -1231,7 +1231,7 @@ define <8 x i16> @shuffle_v8i16_cde3XXXX(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_012dcde3(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_012dcde3:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,3,2,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,0,4,5,6,7]
@@ -1243,26 +1243,26 @@ define <8 x i16> @shuffle_v8i16_012dcde3(<8 x i16> %a, <8 x i16> %b) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_012dcde3:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,xmm1[10,11,8,9,10,11,12,13],zero,zero
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],zero,zero,zero,zero,zero,zero,zero,zero,xmm0[6,7]
; SSSE3-NEXT: por %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_012dcde3:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,10,11,8,9,10,11,12,13,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuffle_v8i16_012dcde3:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,10,11,8,9,10,11,12,13,6,7]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i16_012dcde3:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,10,11,8,9,10,11,12,13,6,7]
; AVX2OR512VL-NEXT: retq
@@ -1272,7 +1272,7 @@ define <8 x i16> @shuffle_v8i16_012dcde3(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_0923cde7(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_0923cde7:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps {{.*#+}} xmm2 = [65535,0,65535,65535,0,0,0,65535]
; SSE2-NEXT: andps %xmm2, %xmm0
; SSE2-NEXT: andnps %xmm1, %xmm2
@@ -1280,7 +1280,7 @@ define <8 x i16> @shuffle_v8i16_0923cde7(<8 x i16> %a, <8 x i16> %b) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_0923cde7:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movaps {{.*#+}} xmm2 = [65535,0,65535,65535,0,0,0,65535]
; SSSE3-NEXT: andps %xmm2, %xmm0
; SSSE3-NEXT: andnps %xmm1, %xmm2
@@ -1288,12 +1288,12 @@ define <8 x i16> @shuffle_v8i16_0923cde7(<8 x i16> %a, <8 x i16> %b) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_0923cde7:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4,5,6],xmm0[7]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_0923cde7:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4,5,6],xmm0[7]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 9, i32 2, i32 3, i32 12, i32 13, i32 14, i32 7>
@@ -1302,7 +1302,7 @@ define <8 x i16> @shuffle_v8i16_0923cde7(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_XXX1X579(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_XXX1X579:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,1,2,0]
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535,65535,65535,65535,0]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,2,1,4,5,6,7]
@@ -1314,14 +1314,14 @@ define <8 x i16> @shuffle_v8i16_XXX1X579(<8 x i16> %a, <8 x i16> %b) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_XXX1X579:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u],zero,zero,xmm1[u,u],zero,zero,zero,zero,xmm1[2,3]
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,2,3,u,u,10,11,14,15],zero,zero
; SSSE3-NEXT: por %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_XXX1X579:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
; SSE41-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,2,1,4,5,6,7]
; SSE41-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,7,7]
@@ -1329,7 +1329,7 @@ define <8 x i16> @shuffle_v8i16_XXX1X579(<8 x i16> %a, <8 x i16> %b) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuffle_v8i16_XXX1X579:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,1,2,1,4,5,6,7]
; AVX1-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,7,7]
@@ -1337,7 +1337,7 @@ define <8 x i16> @shuffle_v8i16_XXX1X579(<8 x i16> %a, <8 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i16_XXX1X579:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpbroadcastd %xmm1, %xmm1
; AVX2OR512VL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,1,2,1,4,5,6,7]
; AVX2OR512VL-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,7,7]
@@ -1349,7 +1349,7 @@ define <8 x i16> @shuffle_v8i16_XXX1X579(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_XX4X8acX(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_XX4X8acX:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,2,2,3,4,5,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
@@ -1358,28 +1358,28 @@ define <8 x i16> @shuffle_v8i16_XX4X8acX(<8 x i16> %a, <8 x i16> %b) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_XX4X8acX:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,8,9,u,u],zero,zero,zero,zero,zero,zero,xmm0[u,u]
; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[u,u,u,u],zero,zero,xmm1[u,u,0,1,4,5,8,9,u,u]
; SSSE3-NEXT: por %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_XX4X8acX:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,1,4,5,4,5,6,7,0,1,4,5,8,9,4,5]
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuffle_v8i16_XX4X8acX:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,4,5,4,5,6,7,0,1,4,5,8,9,4,5]
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i16_XX4X8acX:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,4,5,4,5,6,7,0,1,4,5,8,9,4,5]
; AVX2OR512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
; AVX2OR512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
@@ -1390,13 +1390,13 @@ define <8 x i16> @shuffle_v8i16_XX4X8acX(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_8zzzzzzz(i16 %i) {
; SSE-LABEL: shuffle_v8i16_8zzzzzzz:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movzwl %di, %eax
; SSE-NEXT: movd %eax, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_8zzzzzzz:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: movzwl %di, %eax
; AVX-NEXT: vmovd %eax, %xmm0
; AVX-NEXT: retq
@@ -1407,13 +1407,13 @@ define <8 x i16> @shuffle_v8i16_8zzzzzzz(i16 %i) {
define <8 x i16> @shuffle_v8i16_z8zzzzzz(i16 %i) {
; SSE-LABEL: shuffle_v8i16_z8zzzzzz:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm0, %xmm0
; SSE-NEXT: pinsrw $1, %edi, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_z8zzzzzz:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX-NEXT: vpinsrw $1, %edi, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -1424,13 +1424,13 @@ define <8 x i16> @shuffle_v8i16_z8zzzzzz(i16 %i) {
define <8 x i16> @shuffle_v8i16_zzzzz8zz(i16 %i) {
; SSE-LABEL: shuffle_v8i16_zzzzz8zz:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm0, %xmm0
; SSE-NEXT: pinsrw $5, %edi, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_zzzzz8zz:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX-NEXT: vpinsrw $5, %edi, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -1441,13 +1441,13 @@ define <8 x i16> @shuffle_v8i16_zzzzz8zz(i16 %i) {
define <8 x i16> @shuffle_v8i16_zuuzuuz8(i16 %i) {
; SSE-LABEL: shuffle_v8i16_zuuzuuz8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm0, %xmm0
; SSE-NEXT: pinsrw $7, %edi, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_zuuzuuz8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX-NEXT: vpinsrw $7, %edi, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -1458,13 +1458,13 @@ define <8 x i16> @shuffle_v8i16_zuuzuuz8(i16 %i) {
define <8 x i16> @shuffle_v8i16_zzBzzzzz(i16 %i) {
; SSE-LABEL: shuffle_v8i16_zzBzzzzz:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm0, %xmm0
; SSE-NEXT: pinsrw $2, %edi, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_zzBzzzzz:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX-NEXT: vpinsrw $2, %edi, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -1475,24 +1475,24 @@ define <8 x i16> @shuffle_v8i16_zzBzzzzz(i16 %i) {
define <8 x i16> @shuffle_v8i16_def01234(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_def01234:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; SSE2-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9]
; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_def01234:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: palignr {{.*#+}} xmm0 = xmm1[10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7,8,9]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_def01234:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: palignr {{.*#+}} xmm0 = xmm1[10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7,8,9]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_def01234:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm1[10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7,8,9]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4>
@@ -1501,24 +1501,24 @@ define <8 x i16> @shuffle_v8i16_def01234(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_ueuu123u(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_ueuu123u:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; SSE2-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9]
; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_ueuu123u:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: palignr {{.*#+}} xmm0 = xmm1[10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7,8,9]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_ueuu123u:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: palignr {{.*#+}} xmm0 = xmm1[10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7,8,9]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_ueuu123u:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm1[10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7,8,9]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 undef, i32 14, i32 undef, i32 undef, i32 1, i32 2, i32 3, i32 undef>
@@ -1527,7 +1527,7 @@ define <8 x i16> @shuffle_v8i16_ueuu123u(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_56701234(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_56701234:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; SSE2-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9]
@@ -1535,17 +1535,17 @@ define <8 x i16> @shuffle_v8i16_56701234(<8 x i16> %a, <8 x i16> %b) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_56701234:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: palignr {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_56701234:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: palignr {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_56701234:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4>
@@ -1554,7 +1554,7 @@ define <8 x i16> @shuffle_v8i16_56701234(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_u6uu123u(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_u6uu123u:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; SSE2-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9]
@@ -1562,17 +1562,17 @@ define <8 x i16> @shuffle_v8i16_u6uu123u(<8 x i16> %a, <8 x i16> %b) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_u6uu123u:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: palignr {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_u6uu123u:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: palignr {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_u6uu123u:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 undef, i32 6, i32 undef, i32 undef, i32 1, i32 2, i32 3, i32 undef>
@@ -1581,12 +1581,12 @@ define <8 x i16> @shuffle_v8i16_u6uu123u(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_uuuu123u(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: shuffle_v8i16_uuuu123u:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_uuuu123u:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 1, i32 2, i32 3, i32 undef>
@@ -1595,24 +1595,24 @@ define <8 x i16> @shuffle_v8i16_uuuu123u(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_bcdef012(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_bcdef012:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
; SSE2-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5]
; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_bcdef012:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: palignr {{.*#+}} xmm0 = xmm1[6,7,8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_bcdef012:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: palignr {{.*#+}} xmm0 = xmm1[6,7,8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_bcdef012:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm1[6,7,8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 11, i32 12, i32 13, i32 14, i32 15, i32 0, i32 1, i32 2>
@@ -1621,24 +1621,24 @@ define <8 x i16> @shuffle_v8i16_bcdef012(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_ucdeuu1u(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_ucdeuu1u:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
; SSE2-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5]
; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_ucdeuu1u:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: palignr {{.*#+}} xmm0 = xmm1[6,7,8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_ucdeuu1u:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: palignr {{.*#+}} xmm0 = xmm1[6,7,8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_ucdeuu1u:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm1[6,7,8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 undef, i32 12, i32 13, i32 14, i32 undef, i32 undef, i32 1, i32 undef>
@@ -1647,7 +1647,7 @@ define <8 x i16> @shuffle_v8i16_ucdeuu1u(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_34567012(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_34567012:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
; SSE2-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5]
@@ -1655,17 +1655,17 @@ define <8 x i16> @shuffle_v8i16_34567012(<8 x i16> %a, <8 x i16> %b) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_34567012:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: palignr {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_34567012:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: palignr {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_34567012:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2>
@@ -1674,7 +1674,7 @@ define <8 x i16> @shuffle_v8i16_34567012(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_u456uu1u(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_u456uu1u:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
; SSE2-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5]
@@ -1682,17 +1682,17 @@ define <8 x i16> @shuffle_v8i16_u456uu1u(<8 x i16> %a, <8 x i16> %b) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_u456uu1u:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: palignr {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_u456uu1u:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: palignr {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_u456uu1u:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 undef, i32 4, i32 5, i32 6, i32 undef, i32 undef, i32 1, i32 undef>
@@ -1701,12 +1701,12 @@ define <8 x i16> @shuffle_v8i16_u456uu1u(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_u456uuuu(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: shuffle_v8i16_u456uuuu:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_u456uuuu:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 undef, i32 4, i32 5, i32 6, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -1715,26 +1715,26 @@ define <8 x i16> @shuffle_v8i16_u456uuuu(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_3456789a(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_3456789a:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
; SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5]
; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_3456789a:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: palignr {{.*#+}} xmm1 = xmm0[6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5]
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_3456789a:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: palignr {{.*#+}} xmm1 = xmm0[6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5]
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_3456789a:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10>
@@ -1743,26 +1743,26 @@ define <8 x i16> @shuffle_v8i16_3456789a(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_u456uu9u(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_u456uu9u:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
; SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5]
; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_u456uu9u:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: palignr {{.*#+}} xmm1 = xmm0[6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5]
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_u456uu9u:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: palignr {{.*#+}} xmm1 = xmm0[6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5]
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_u456uu9u:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 undef, i32 4, i32 5, i32 6, i32 undef, i32 undef, i32 9, i32 undef>
@@ -1771,26 +1771,26 @@ define <8 x i16> @shuffle_v8i16_u456uu9u(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_56789abc(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_56789abc:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7,8,9]
; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_56789abc:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: palignr {{.*#+}} xmm1 = xmm0[10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9]
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_56789abc:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: palignr {{.*#+}} xmm1 = xmm0[10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9]
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_56789abc:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12>
@@ -1799,26 +1799,26 @@ define <8 x i16> @shuffle_v8i16_56789abc(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_u6uu9abu(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_u6uu9abu:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7,8,9]
; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_u6uu9abu:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: palignr {{.*#+}} xmm1 = xmm0[10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9]
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_u6uu9abu:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: palignr {{.*#+}} xmm1 = xmm0[10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9]
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_u6uu9abu:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 undef, i32 6, i32 undef, i32 undef, i32 9, i32 10, i32 11, i32 undef>
@@ -1827,24 +1827,24 @@ define <8 x i16> @shuffle_v8i16_u6uu9abu(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_0uuu1uuu(<8 x i16> %a) {
; SSE2-LABEL: shuffle_v8i16_0uuu1uuu:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,6,7]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_0uuu1uuu:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
; SSSE3-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,6,7]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_0uuu1uuu:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_0uuu1uuu:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> zeroinitializer, <8 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 1, i32 undef, i32 undef, i32 undef>
@@ -1853,26 +1853,26 @@ define <8 x i16> @shuffle_v8i16_0uuu1uuu(<8 x i16> %a) {
define <8 x i16> @shuffle_v8i16_0zzz1zzz(<8 x i16> %a) {
; SSE2-LABEL: shuffle_v8i16_0zzz1zzz:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_0zzz1zzz:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pxor %xmm1, %xmm1
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_0zzz1zzz:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_0zzz1zzz:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> zeroinitializer, <8 x i32> <i32 0, i32 9, i32 10, i32 11, i32 1, i32 13, i32 14, i32 15>
@@ -1881,22 +1881,22 @@ define <8 x i16> @shuffle_v8i16_0zzz1zzz(<8 x i16> %a) {
define <8 x i16> @shuffle_v8i16_0u1u2u3u(<8 x i16> %a) {
; SSE2-LABEL: shuffle_v8i16_0u1u2u3u:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_0u1u2u3u:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_0u1u2u3u:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_0u1u2u3u:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> zeroinitializer, <8 x i32> <i32 0, i32 undef, i32 1, i32 undef, i32 2, i32 undef, i32 3, i32 undef>
@@ -1905,24 +1905,24 @@ define <8 x i16> @shuffle_v8i16_0u1u2u3u(<8 x i16> %a) {
define <8 x i16> @shuffle_v8i16_0z1z2z3z(<8 x i16> %a) {
; SSE2-LABEL: shuffle_v8i16_0z1z2z3z:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_0z1z2z3z:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pxor %xmm1, %xmm1
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_0z1z2z3z:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_0z1z2z3z:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> zeroinitializer, <8 x i32> <i32 0, i32 9, i32 1, i32 11, i32 2, i32 13, i32 3, i32 15>
@@ -1931,24 +1931,24 @@ define <8 x i16> @shuffle_v8i16_0z1z2z3z(<8 x i16> %a) {
define <8 x i16> @shuffle_v8i16_01100110(<8 x i16> %a) {
; SSE2-LABEL: shuffle_v8i16_01100110:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,5,4]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_01100110:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,2,3,0,1,0,1,2,3,2,3,0,1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_01100110:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,2,3,0,1,0,1,2,3,2,3,0,1]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_01100110:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,2,3,0,1,0,1,2,3,2,3,0,1]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 1, i32 0, i32 0, i32 1, i32 1, i32 0>
@@ -1957,24 +1957,24 @@ define <8 x i16> @shuffle_v8i16_01100110(<8 x i16> %a) {
define <8 x i16> @shuffle_v8i16_01u0u110(<8 x i16> %a) {
; SSE2-LABEL: shuffle_v8i16_01u0u110:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,2,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,5,4]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_01u0u110:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,0,1,0,1,2,3,2,3,0,1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_01u0u110:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,0,1,0,1,2,3,2,3,0,1]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_01u0u110:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,0,1,0,1,2,3,2,3,0,1]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 undef, i32 0, i32 undef, i32 1, i32 1, i32 0>
@@ -1983,24 +1983,24 @@ define <8 x i16> @shuffle_v8i16_01u0u110(<8 x i16> %a) {
define <8 x i16> @shuffle_v8i16_467uu675(<8 x i16> %a) {
; SSE2-LABEL: shuffle_v8i16_467uu675:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,3,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,7,5]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_467uu675:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[8,9,12,13,14,15,14,15,8,9,12,13,14,15,10,11]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_467uu675:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[8,9,12,13,14,15,14,15,8,9,12,13,14,15,10,11]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_467uu675:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8,9,12,13,14,15,14,15,8,9,12,13,14,15,10,11]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> zeroinitializer, <8 x i32> <i32 4, i32 6, i32 7, i32 undef, i32 undef, i32 6, i32 7, i32 5>
@@ -2012,12 +2012,12 @@ define <8 x i16> @shuffle_v8i16_467uu675(<8 x i16> %a) {
;
define <8 x i16> @shuffle_v8i16_z0z2z4z6(<8 x i16> %a) {
; SSE-LABEL: shuffle_v8i16_z0z2z4z6:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pslld $16, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_z0z2z4z6:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpslld $16, %xmm0, %xmm0
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> zeroinitializer, <8 x i32><i32 8, i32 0, i32 8, i32 2, i32 8, i32 4, i32 8, i32 6>
@@ -2026,12 +2026,12 @@ define <8 x i16> @shuffle_v8i16_z0z2z4z6(<8 x i16> %a) {
define <8 x i16> @shuffle_v8i16_zzz0zzz4(<8 x i16> %a) {
; SSE-LABEL: shuffle_v8i16_zzz0zzz4:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psllq $48, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_zzz0zzz4:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsllq $48, %xmm0, %xmm0
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> zeroinitializer, <8 x i32><i32 8, i32 8, i32 8, i32 0, i32 8, i32 8, i32 8, i32 4>
@@ -2040,12 +2040,12 @@ define <8 x i16> @shuffle_v8i16_zzz0zzz4(<8 x i16> %a) {
define <8 x i16> @shuffle_v8i16_zz01zX4X(<8 x i16> %a) {
; SSE-LABEL: shuffle_v8i16_zz01zX4X:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psllq $32, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_zz01zX4X:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsllq $32, %xmm0, %xmm0
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> zeroinitializer, <8 x i32><i32 8, i32 8, i32 0, i32 1, i32 8, i32 undef, i32 4, i32 undef>
@@ -2054,12 +2054,12 @@ define <8 x i16> @shuffle_v8i16_zz01zX4X(<8 x i16> %a) {
define <8 x i16> @shuffle_v8i16_z0X2z456(<8 x i16> %a) {
; SSE-LABEL: shuffle_v8i16_z0X2z456:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psllq $16, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_z0X2z456:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsllq $16, %xmm0, %xmm0
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> zeroinitializer, <8 x i32><i32 8, i32 0, i32 undef, i32 2, i32 8, i32 4, i32 5, i32 6>
@@ -2068,12 +2068,12 @@ define <8 x i16> @shuffle_v8i16_z0X2z456(<8 x i16> %a) {
define <8 x i16> @shuffle_v8i16_1z3zXz7z(<8 x i16> %a) {
; SSE-LABEL: shuffle_v8i16_1z3zXz7z:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrld $16, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_1z3zXz7z:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrld $16, %xmm0, %xmm0
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> zeroinitializer, <8 x i32><i32 1, i32 8, i32 3, i32 8, i32 undef, i32 8, i32 7, i32 8>
@@ -2082,12 +2082,12 @@ define <8 x i16> @shuffle_v8i16_1z3zXz7z(<8 x i16> %a) {
define <8 x i16> @shuffle_v8i16_1X3z567z(<8 x i16> %a) {
; SSE-LABEL: shuffle_v8i16_1X3z567z:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrlq $16, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_1X3z567z:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrlq $16, %xmm0, %xmm0
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> zeroinitializer, <8 x i32><i32 1, i32 undef, i32 3, i32 8, i32 5, i32 6, i32 7, i32 8>
@@ -2096,12 +2096,12 @@ define <8 x i16> @shuffle_v8i16_1X3z567z(<8 x i16> %a) {
define <8 x i16> @shuffle_v8i16_23zz67zz(<8 x i16> %a) {
; SSE-LABEL: shuffle_v8i16_23zz67zz:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrlq $32, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_23zz67zz:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrlq $32, %xmm0, %xmm0
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> zeroinitializer, <8 x i32><i32 2, i32 3, i32 8, i32 8, i32 6, i32 7, i32 8, i32 8>
@@ -2110,12 +2110,12 @@ define <8 x i16> @shuffle_v8i16_23zz67zz(<8 x i16> %a) {
define <8 x i16> @shuffle_v8i16_3zXXXzzz(<8 x i16> %a) {
; SSE-LABEL: shuffle_v8i16_3zXXXzzz:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrlq $48, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_3zXXXzzz:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrlq $48, %xmm0, %xmm0
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> zeroinitializer, <8 x i32><i32 3, i32 8, i32 undef, i32 undef, i32 undef, i32 8, i32 8, i32 8>
@@ -2124,12 +2124,12 @@ define <8 x i16> @shuffle_v8i16_3zXXXzzz(<8 x i16> %a) {
define <8 x i16> @shuffle_v8i16_01u3zzuz(<8 x i16> %a) {
; SSE-LABEL: shuffle_v8i16_01u3zzuz:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_01u3zzuz:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 undef, i32 3, i32 8, i32 8, i32 undef, i32 8>
@@ -2138,23 +2138,23 @@ define <8 x i16> @shuffle_v8i16_01u3zzuz(<8 x i16> %a) {
define <8 x i16> @shuffle_v8i16_0z234567(<8 x i16> %a) {
; SSE2-LABEL: shuffle_v8i16_0z234567:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_0z234567:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: andps {{.*}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_0z234567:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3,4,5,6,7]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_0z234567:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3,4,5,6,7]
; AVX-NEXT: retq
@@ -2164,23 +2164,23 @@ define <8 x i16> @shuffle_v8i16_0z234567(<8 x i16> %a) {
define <8 x i16> @shuffle_v8i16_0zzzz5z7(<8 x i16> %a) {
; SSE2-LABEL: shuffle_v8i16_0zzzz5z7:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_0zzzz5z7:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: andps {{.*}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_0zzzz5z7:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4],xmm0[5],xmm1[6],xmm0[7]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_0zzzz5z7:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4],xmm0[5],xmm1[6],xmm0[7]
; AVX-NEXT: retq
@@ -2190,23 +2190,23 @@ define <8 x i16> @shuffle_v8i16_0zzzz5z7(<8 x i16> %a) {
define <8 x i16> @shuffle_v8i16_0123456z(<8 x i16> %a) {
; SSE2-LABEL: shuffle_v8i16_0123456z:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_0123456z:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: andps {{.*}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_0123456z:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3,4,5,6],xmm1[7]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_0123456z:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3,4,5,6],xmm1[7]
; AVX-NEXT: retq
@@ -2216,7 +2216,7 @@ define <8 x i16> @shuffle_v8i16_0123456z(<8 x i16> %a) {
define <8 x i16> @shuffle_v8i16_fu3ucc5u(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: shuffle_v8i16_fu3ucc5u:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,5,4,4]
; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
@@ -2224,7 +2224,7 @@ define <8 x i16> @shuffle_v8i16_fu3ucc5u(<8 x i16> %a, <8 x i16> %b) {
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_fu3ucc5u:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
; AVX-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,5,4,4]
; AVX-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
@@ -2235,12 +2235,12 @@ define <8 x i16> @shuffle_v8i16_fu3ucc5u(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_8012345u(<8 x i16> %a) {
; SSE-LABEL: shuffle_v8i16_8012345u:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_8012345u:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> zeroinitializer, <8 x i32> <i32 8, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 undef>
@@ -2250,31 +2250,31 @@ define <8 x i16> @shuffle_v8i16_8012345u(<8 x i16> %a) {
define <8 x i16> @mask_v8i16_012345ef(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: mask_v8i16_012345ef:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,0],xmm1[2,0]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: mask_v8i16_012345ef:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,0],xmm1[2,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: mask_v8i16_012345ef:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: mask_v8i16_012345ef:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: mask_v8i16_012345ef:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
; AVX2OR512VL-NEXT: retq
%1 = bitcast <8 x i16> %a to <2 x i64>
@@ -2288,21 +2288,21 @@ define <8 x i16> @mask_v8i16_012345ef(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @insert_dup_mem_v8i16_i32(i32* %ptr) {
; SSE-LABEL: insert_dup_mem_v8i16_i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
; SSE-NEXT: retq
;
; AVX1-LABEL: insert_dup_mem_v8i16_i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: insert_dup_mem_v8i16_i32:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpbroadcastw (%rdi), %xmm0
; AVX2OR512VL-NEXT: retq
%tmp = load i32, i32* %ptr, align 4
@@ -2314,7 +2314,7 @@ define <8 x i16> @insert_dup_mem_v8i16_i32(i32* %ptr) {
define <8 x i16> @insert_dup_mem_v8i16_sext_i16(i16* %ptr) {
; SSE2-LABEL: insert_dup_mem_v8i16_sext_i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movswl (%rdi), %eax
; SSE2-NEXT: movd %eax, %xmm0
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
@@ -2322,35 +2322,35 @@ define <8 x i16> @insert_dup_mem_v8i16_sext_i16(i16* %ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: insert_dup_mem_v8i16_sext_i16:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movswl (%rdi), %eax
; SSSE3-NEXT: movd %eax, %xmm0
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_dup_mem_v8i16_sext_i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movswl (%rdi), %eax
; SSE41-NEXT: movd %eax, %xmm0
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
; SSE41-NEXT: retq
;
; AVX1-LABEL: insert_dup_mem_v8i16_sext_i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: movswl (%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
; AVX1-NEXT: retq
;
; AVX2-LABEL: insert_dup_mem_v8i16_sext_i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: movswl (%rdi), %eax
; AVX2-NEXT: vmovd %eax, %xmm0
; AVX2-NEXT: vpbroadcastw %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: insert_dup_mem_v8i16_sext_i16:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: movswl (%rdi), %eax
; AVX512VL-NEXT: vpbroadcastw %eax, %xmm0
; AVX512VL-NEXT: retq
@@ -2364,21 +2364,21 @@ define <8 x i16> @insert_dup_mem_v8i16_sext_i16(i16* %ptr) {
define <8 x i16> @insert_dup_elt1_mem_v8i16_i32(i32* %ptr) {
; SSE-LABEL: insert_dup_elt1_mem_v8i16_i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
; SSE-NEXT: retq
;
; AVX1-LABEL: insert_dup_elt1_mem_v8i16_i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: insert_dup_elt1_mem_v8i16_i32:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpbroadcastw 2(%rdi), %xmm0
; AVX2OR512VL-NEXT: retq
%tmp = load i32, i32* %ptr, align 4
@@ -2390,7 +2390,7 @@ define <8 x i16> @insert_dup_elt1_mem_v8i16_i32(i32* %ptr) {
define <8 x i16> @insert_dup_elt3_mem_v8i16_i32(i32* %ptr) {
; SSE2-LABEL: insert_dup_elt3_mem_v8i16_i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,0,1,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,3,3,3,4,5,6,7]
@@ -2398,25 +2398,25 @@ define <8 x i16> @insert_dup_elt3_mem_v8i16_i32(i32* %ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: insert_dup_elt3_mem_v8i16_i32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[2,3,2,3,2,3,2,3,2,3,2,3,2,3,2,3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_dup_elt3_mem_v8i16_i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[2,3,2,3,2,3,2,3,2,3,2,3,2,3,2,3]
; SSE41-NEXT: retq
;
; AVX1-LABEL: insert_dup_elt3_mem_v8i16_i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,3,2,3,2,3,2,3,2,3,2,3,2,3,2,3]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: insert_dup_elt3_mem_v8i16_i32:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpbroadcastw 2(%rdi), %xmm0
; AVX2OR512VL-NEXT: retq
%tmp = load i32, i32* %ptr, align 4
@@ -2428,7 +2428,7 @@ define <8 x i16> @insert_dup_elt3_mem_v8i16_i32(i32* %ptr) {
define <8 x i16> @insert_dup_elt1_mem_v8i16_sext_i16(i16* %ptr) {
; SSE2-LABEL: insert_dup_elt1_mem_v8i16_sext_i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movswl (%rdi), %eax
; SSE2-NEXT: movd %eax, %xmm0
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
@@ -2436,28 +2436,28 @@ define <8 x i16> @insert_dup_elt1_mem_v8i16_sext_i16(i16* %ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: insert_dup_elt1_mem_v8i16_sext_i16:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movswl (%rdi), %eax
; SSSE3-NEXT: movd %eax, %xmm0
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[2,3,2,3,2,3,2,3,2,3,2,3,2,3,2,3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_dup_elt1_mem_v8i16_sext_i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movswl (%rdi), %eax
; SSE41-NEXT: movd %eax, %xmm0
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[2,3,2,3,2,3,2,3,2,3,2,3,2,3,2,3]
; SSE41-NEXT: retq
;
; AVX1-LABEL: insert_dup_elt1_mem_v8i16_sext_i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: movswl (%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,3,2,3,2,3,2,3,2,3,2,3,2,3,2,3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: insert_dup_elt1_mem_v8i16_sext_i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: movswl (%rdi), %eax
; AVX2-NEXT: shrl $16, %eax
; AVX2-NEXT: vmovd %eax, %xmm0
@@ -2465,7 +2465,7 @@ define <8 x i16> @insert_dup_elt1_mem_v8i16_sext_i16(i16* %ptr) {
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: insert_dup_elt1_mem_v8i16_sext_i16:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: movswl (%rdi), %eax
; AVX512VL-NEXT: shrl $16, %eax
; AVX512VL-NEXT: vpbroadcastw %eax, %xmm0
@@ -2480,7 +2480,7 @@ define <8 x i16> @insert_dup_elt1_mem_v8i16_sext_i16(i16* %ptr) {
define <8 x i16> @insert_dup_elt3_mem_v8i16_sext_i16(i16* %ptr) {
; SSE2-LABEL: insert_dup_elt3_mem_v8i16_sext_i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movswl (%rdi), %eax
; SSE2-NEXT: movd %eax, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,0,1,1]
@@ -2489,28 +2489,28 @@ define <8 x i16> @insert_dup_elt3_mem_v8i16_sext_i16(i16* %ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: insert_dup_elt3_mem_v8i16_sext_i16:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movswl (%rdi), %eax
; SSSE3-NEXT: movd %eax, %xmm0
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[2,3,2,3,2,3,2,3,2,3,2,3,2,3,2,3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_dup_elt3_mem_v8i16_sext_i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movswl (%rdi), %eax
; SSE41-NEXT: movd %eax, %xmm0
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[2,3,2,3,2,3,2,3,2,3,2,3,2,3,2,3]
; SSE41-NEXT: retq
;
; AVX1-LABEL: insert_dup_elt3_mem_v8i16_sext_i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: movswl (%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,3,2,3,2,3,2,3,2,3,2,3,2,3,2,3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: insert_dup_elt3_mem_v8i16_sext_i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: movswl (%rdi), %eax
; AVX2-NEXT: shrl $16, %eax
; AVX2-NEXT: vmovd %eax, %xmm0
@@ -2518,7 +2518,7 @@ define <8 x i16> @insert_dup_elt3_mem_v8i16_sext_i16(i16* %ptr) {
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: insert_dup_elt3_mem_v8i16_sext_i16:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: movswl (%rdi), %eax
; AVX512VL-NEXT: shrl $16, %eax
; AVX512VL-NEXT: vpbroadcastw %eax, %xmm0
diff --git a/test/CodeGen/X86/vector-shuffle-256-v16.ll b/test/CodeGen/X86/vector-shuffle-256-v16.ll
index 7ef5bee5420..11f25a2d687 100644
--- a/test/CodeGen/X86/vector-shuffle-256-v16.ll
+++ b/test/CodeGen/X86/vector-shuffle-256-v16.ll
@@ -5,14 +5,14 @@
define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpbroadcastw %xmm0, %ymm0
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
@@ -21,7 +21,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_0
define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_00_00_00_00_00_00_00_00_01_00(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_00_00_00_00_00_00_00_00_00_00_00_00_00_01_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,2,3,0,1]
@@ -29,7 +29,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_00_00_00_00_00_00_00_00_01_0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_00_00_00_00_00_00_00_00_00_00_00_00_00_00_01_00:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,2,3,0,1]
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
; AVX2OR512VL-NEXT: retq
@@ -39,7 +39,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_00_00_00_00_00_00_00_00_01_0
define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_00_00_00_00_00_00_00_02_00_00(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_00_00_00_00_00_00_00_00_00_00_00_00_02_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,4,5,0,1,0,1]
@@ -47,7 +47,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_00_00_00_00_00_00_00_02_00_0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_00_00_00_00_00_00_00_00_00_00_00_00_00_02_00_00:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,4,5,0,1,0,1]
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
; AVX2OR512VL-NEXT: retq
@@ -57,7 +57,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_00_00_00_00_00_00_00_02_00_0
define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_00_00_00_00_00_00_03_00_00_00(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_00_00_00_00_00_00_00_00_00_00_00_03_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,6,7,0,1,0,1,0,1]
@@ -65,7 +65,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_00_00_00_00_00_00_03_00_00_0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_00_00_00_00_00_00_00_00_00_00_00_00_03_00_00_00:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,6,7,0,1,0,1,0,1]
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
; AVX2OR512VL-NEXT: retq
@@ -75,7 +75,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_00_00_00_00_00_00_03_00_00_0
define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_00_00_00_00_00_04_00_00_00_00(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_00_00_00_00_00_00_00_00_00_00_04_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,8,9,0,1,0,1,0,1,0,1]
@@ -83,7 +83,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_00_00_00_00_00_04_00_00_00_0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_00_00_00_00_00_00_00_00_00_00_00_04_00_00_00_00:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,8,9]
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
; AVX2OR512VL-NEXT: retq
@@ -93,7 +93,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_00_00_00_00_00_04_00_00_00_0
define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_00_00_00_00_05_00_00_00_00_00(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_00_00_00_00_00_00_00_00_00_05_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,10,11,0,1,0,1,0,1,0,1,0,1]
@@ -101,7 +101,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_00_00_00_00_05_00_00_00_00_0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_00_00_00_00_00_00_00_00_00_00_05_00_00_00_00_00:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,10,11,0,1]
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
; AVX2OR512VL-NEXT: retq
@@ -111,7 +111,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_00_00_00_00_05_00_00_00_00_0
define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_00_00_00_06_00_00_00_00_00_00(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_00_00_00_00_00_00_00_00_06_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,12,13,0,1,0,1,0,1,0,1,0,1,0,1]
@@ -119,7 +119,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_00_00_00_06_00_00_00_00_00_0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_00_00_00_00_00_00_00_00_00_06_00_00_00_00_00_00:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,12,13,0,1,0,1]
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
; AVX2OR512VL-NEXT: retq
@@ -129,7 +129,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_00_00_00_06_00_00_00_00_00_0
define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_00_00_07_00_00_00_00_00_00_00(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_00_00_00_00_00_00_00_07_00_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[14,15,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
@@ -137,7 +137,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_00_00_07_00_00_00_00_00_00_0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_00_00_00_00_00_00_00_00_07_00_00_00_00_00_00_00:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,14,15,0,1,0,1,0,1]
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
; AVX2OR512VL-NEXT: retq
@@ -147,7 +147,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_00_00_07_00_00_00_00_00_00_0
define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_00_08_00_00_00_00_00_00_00_00(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_00_00_00_00_00_00_08_00_00_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,0,1,0,1,0,1,0,1,0,1,0,1,2,3]
@@ -157,7 +157,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_00_08_00_00_00_00_00_00_00_0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_00_00_00_00_00_00_00_08_00_00_00_00_00_00_00_00:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastw %xmm0, %xmm1
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,0,1]
; AVX2-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[0,0,0,0,4,5,6,7,8,8,8,8,12,13,14,15]
@@ -167,7 +167,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_00_08_00_00_00_00_00_00_00_0
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_00_00_00_00_00_00_00_08_00_00_00_00_00_00_00_00:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [0,0,0,0,0,0,0,8,0,0,0,0,0,0,0,0]
; AVX512VL-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
@@ -177,7 +177,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_00_08_00_00_00_00_00_00_00_0
define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_09_00_00_00_00_00_00_00_00_00(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_00_00_00_00_00_09_00_00_00_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,0,1,0,1,0,1,0,1,0,1,6,7,0,1]
@@ -187,7 +187,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_09_00_00_00_00_00_00_00_00_0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_00_00_00_00_00_00_09_00_00_00_00_00_00_00_00_00:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = <0,0,255,255,u,u,u,u,u,u,u,u,u,u,u,u,255,255,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
@@ -195,7 +195,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_09_00_00_00_00_00_00_00_00_0
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_00_00_00_00_00_00_09_00_00_00_00_00_00_00_00_00:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [0,0,0,0,0,0,9,0,0,0,0,0,0,0,0,0]
; AVX512VL-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
@@ -205,7 +205,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_09_00_00_00_00_00_00_00_00_0
define <16 x i16> @shuffle_v16i16_00_00_00_00_00_10_00_00_00_00_00_00_00_00_00_00(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_00_00_00_00_10_00_00_00_00_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,0,1,0,1,0,1,0,1,10,11,0,1,0,1]
@@ -215,14 +215,14 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_00_10_00_00_00_00_00_00_00_00_00_0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_00_00_00_00_00_10_00_00_00_00_00_00_00_00_00_00:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,0,1,0,1,0,1,0,1,4,5,0,1,0,1,16,17,16,17,16,17,16,17,16,17,16,17,16,17,16,17]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_00_00_00_00_00_10_00_00_00_00_00_00_00_00_00_00:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [0,0,0,0,0,10,0,0,0,0,0,0,0,0,0,0]
; AVX512VL-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
@@ -232,7 +232,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_00_10_00_00_00_00_00_00_00_00_00_0
define <16 x i16> @shuffle_v16i16_00_00_00_00_11_00_00_00_00_00_00_00_00_00_00_00(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_00_00_00_11_00_00_00_00_00_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,0,1,0,1,0,1,14,15,0,1,0,1,0,1]
@@ -242,14 +242,14 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_11_00_00_00_00_00_00_00_00_00_00_0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_00_00_00_00_11_00_00_00_00_00_00_00_00_00_00_00:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,0,1,0,1,0,1,6,7,0,1,0,1,0,1,16,17,16,17,16,17,16,17,16,17,16,17,16,17,16,17]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_00_00_00_00_11_00_00_00_00_00_00_00_00_00_00_00:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [0,0,0,0,11,0,0,0,0,0,0,0,0,0,0,0]
; AVX512VL-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
@@ -259,7 +259,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_11_00_00_00_00_00_00_00_00_00_00_0
define <16 x i16> @shuffle_v16i16_00_00_00_12_00_00_00_00_00_00_00_00_00_00_00_00(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_00_00_12_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,0,1,0,1,8,9,0,1,0,1,0,1,0,1]
@@ -269,13 +269,13 @@ define <16 x i16> @shuffle_v16i16_00_00_00_12_00_00_00_00_00_00_00_00_00_00_00_0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_00_00_00_12_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,0,1]
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,0,1,0,1,8,9,0,1,0,1,0,1,0,1,16,17,16,17,16,17,16,17,16,17,16,17,16,17,16,17]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_00_00_00_12_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [0,0,0,12,0,0,0,0,0,0,0,0,0,0,0,0]
; AVX512VL-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
@@ -285,7 +285,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_12_00_00_00_00_00_00_00_00_00_00_00_0
define <16 x i16> @shuffle_v16i16_00_00_13_00_00_00_00_00_00_00_00_00_00_00_00_00(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_00_13_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,0,1,10,11,0,1,0,1,0,1,0,1,0,1]
@@ -295,13 +295,13 @@ define <16 x i16> @shuffle_v16i16_00_00_13_00_00_00_00_00_00_00_00_00_00_00_00_0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_00_00_13_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,0,1]
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,0,1,10,11,0,1,0,1,0,1,0,1,0,1,16,17,16,17,16,17,16,17,16,17,16,17,16,17,16,17]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_00_00_13_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [0,0,13,0,0,0,0,0,0,0,0,0,0,0,0,0]
; AVX512VL-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
@@ -311,7 +311,7 @@ define <16 x i16> @shuffle_v16i16_00_00_13_00_00_00_00_00_00_00_00_00_00_00_00_0
define <16 x i16> @shuffle_v16i16_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_00(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,12,13,0,1,0,1,0,1,0,1,0,1,0,1]
@@ -321,13 +321,13 @@ define <16 x i16> @shuffle_v16i16_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,0,1]
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,12,13,0,1,0,1,0,1,0,1,0,1,0,1,16,17,16,17,16,17,16,17,16,17,16,17,16,17,16,17]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
; AVX512VL-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
@@ -337,7 +337,7 @@ define <16 x i16> @shuffle_v16i16_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_0
define <16 x i16> @shuffle_v16i16_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[14,15,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
@@ -347,13 +347,13 @@ define <16 x i16> @shuffle_v16i16_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,0,1]
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[14,15,0,1,0,1,0,1,0,1,0,1,0,1,0,1,16,17,16,17,16,17,16,17,16,17,16,17,16,17,16,17]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: movl $15, %eax
; AVX512VL-NEXT: vmovd %eax, %xmm1
; AVX512VL-NEXT: vpermw %ymm0, %ymm1, %ymm0
@@ -364,7 +364,7 @@ define <16 x i16> @shuffle_v16i16_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_0
define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
@@ -374,7 +374,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_00_00_08_08_08_08_08_08_08_0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_00_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[0,0,0,0,4,5,6,7,8,8,8,8,12,13,14,15]
; AVX2OR512VL-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,1,1,4,4,5,5]
; AVX2OR512VL-NEXT: retq
@@ -384,7 +384,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_00_00_08_08_08_08_08_08_08_0
define <16 x i16> @shuffle_v16i16_07_07_07_07_07_07_07_07_15_15_15_15_15_15_15_15(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_07_07_07_07_07_07_07_07_15_15_15_15_15_15_15_15:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,7,7,7,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
@@ -394,7 +394,7 @@ define <16 x i16> @shuffle_v16i16_07_07_07_07_07_07_07_07_15_15_15_15_15_15_15_1
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_07_07_07_07_07_07_07_07_15_15_15_15_15_15_15_15:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,7,7,7,7,8,9,10,11,15,15,15,15]
; AVX2OR512VL-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,2,3,3,6,6,7,7]
; AVX2OR512VL-NEXT: retq
@@ -404,7 +404,7 @@ define <16 x i16> @shuffle_v16i16_07_07_07_07_07_07_07_07_15_15_15_15_15_15_15_1
define <16 x i16> @shuffle_v16i16_00_00_00_00_04_04_04_04_08_08_08_08_12_12_12_12(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_00_00_00_04_04_04_04_08_08_08_08_12_12_12_12:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,4,4]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
@@ -414,7 +414,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_04_04_04_04_08_08_08_08_12_12_12_1
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_00_00_00_00_04_04_04_04_08_08_08_08_12_12_12_12:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[0,0,0,0,4,5,6,7,8,8,8,8,12,13,14,15]
; AVX2OR512VL-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
; AVX2OR512VL-NEXT: retq
@@ -424,7 +424,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_04_04_04_04_08_08_08_08_12_12_12_1
define <16 x i16> @shuffle_v16i16_03_03_03_03_07_07_07_07_11_11_11_11_15_15_15_15(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_03_03_03_03_07_07_07_07_11_11_11_11_15_15_15_15:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[3,3,3,3,4,5,6,7]
; AVX1-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,7,7,7]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
@@ -434,7 +434,7 @@ define <16 x i16> @shuffle_v16i16_03_03_03_03_07_07_07_07_11_11_11_11_15_15_15_1
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_03_03_03_03_07_07_07_07_11_11_11_11_15_15_15_15:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[3,3,3,3,4,5,6,7,11,11,11,11,12,13,14,15]
; AVX2OR512VL-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,7,7,7,7,8,9,10,11,15,15,15,15]
; AVX2OR512VL-NEXT: retq
@@ -444,7 +444,7 @@ define <16 x i16> @shuffle_v16i16_03_03_03_03_07_07_07_07_11_11_11_11_15_15_15_1
define <16 x i16> @shuffle_v16i16_00_00_02_02_04_04_06_06_08_08_10_10_12_12_14_14(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_00_02_02_04_04_06_06_08_08_10_10_12_12_14_14:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,0,2,2,4,5,6,7]
; AVX1-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,6,6]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
@@ -454,7 +454,7 @@ define <16 x i16> @shuffle_v16i16_00_00_02_02_04_04_06_06_08_08_10_10_12_12_14_1
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_00_00_02_02_04_04_06_06_08_08_10_10_12_12_14_14:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[0,0,2,2,4,5,6,7,8,8,10,10,12,13,14,15]
; AVX2OR512VL-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,4,6,6,8,9,10,11,12,12,14,14]
; AVX2OR512VL-NEXT: retq
@@ -464,7 +464,7 @@ define <16 x i16> @shuffle_v16i16_00_00_02_02_04_04_06_06_08_08_10_10_12_12_14_1
define <16 x i16> @shuffle_v16i16_01_01_03_03_05_05_07_07_09_09_11_11_13_13_15_15(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_01_01_03_03_05_05_07_07_09_09_11_11_13_13_15_15:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[1,1,3,3,4,5,6,7]
; AVX1-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,7,7]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
@@ -474,7 +474,7 @@ define <16 x i16> @shuffle_v16i16_01_01_03_03_05_05_07_07_09_09_11_11_13_13_15_1
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_01_01_03_03_05_05_07_07_09_09_11_11_13_13_15_15:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[1,1,3,3,4,5,6,7,9,9,11,11,12,13,14,15]
; AVX2OR512VL-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,5,5,7,7,8,9,10,11,13,13,15,15]
; AVX2OR512VL-NEXT: retq
@@ -484,13 +484,13 @@ define <16 x i16> @shuffle_v16i16_01_01_03_03_05_05_07_07_09_09_11_11_13_13_15_1
define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_01_00_00_00_00_00_00_00_01_00(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_00_00_00_00_00_01_00_00_00_00_00_00_00_01_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,2,3,0,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_00_00_00_00_00_00_01_00_00_00_00_00_00_00_01_00:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,2,3,0,1]
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
; AVX2OR512VL-NEXT: retq
@@ -500,13 +500,13 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_01_00_00_00_00_00_00_00_01_0
define <16 x i16> @shuffle_v16i16_00_00_00_00_00_02_00_00_00_00_00_00_00_02_00_00(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_00_00_00_00_02_00_00_00_00_00_00_00_02_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,4,5,0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_00_00_00_00_00_02_00_00_00_00_00_00_00_02_00_00:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,4,5,0,1,0,1]
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
; AVX2OR512VL-NEXT: retq
@@ -516,13 +516,13 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_00_02_00_00_00_00_00_00_00_02_00_0
define <16 x i16> @shuffle_v16i16_00_00_00_00_03_00_00_00_00_00_00_00_03_00_00_00(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_00_00_00_03_00_00_00_00_00_00_00_03_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,6,7,0,1,0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_00_00_00_00_03_00_00_00_00_00_00_00_03_00_00_00:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,6,7,0,1,0,1,0,1]
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
; AVX2OR512VL-NEXT: retq
@@ -532,13 +532,13 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_03_00_00_00_00_00_00_00_03_00_00_0
define <16 x i16> @shuffle_v16i16_00_00_00_04_00_00_00_00_00_00_00_04_00_00_00_00(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_00_00_04_00_00_00_00_00_00_00_04_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,8,9,0,1,0,1,0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_00_00_00_04_00_00_00_00_00_00_00_04_00_00_00_00:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,8,9,0,1,0,1,0,1,0,1]
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
; AVX2OR512VL-NEXT: retq
@@ -548,13 +548,13 @@ define <16 x i16> @shuffle_v16i16_00_00_00_04_00_00_00_00_00_00_00_04_00_00_00_0
define <16 x i16> @shuffle_v16i16_00_00_05_00_00_00_00_00_00_00_05_00_00_00_00_00(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_00_05_00_00_00_00_00_00_00_05_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,10,11,0,1,0,1,0,1,0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_00_00_05_00_00_00_00_00_00_00_05_00_00_00_00_00:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,10,11,0,1,0,1,0,1,0,1,0,1]
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
; AVX2OR512VL-NEXT: retq
@@ -564,13 +564,13 @@ define <16 x i16> @shuffle_v16i16_00_00_05_00_00_00_00_00_00_00_05_00_00_00_00_0
define <16 x i16> @shuffle_v16i16_00_06_00_00_00_00_00_00_00_06_00_00_00_00_00_00(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_06_00_00_00_00_00_00_00_06_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,12,13,0,1,0,1,0,1,0,1,0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_00_06_00_00_00_00_00_00_00_06_00_00_00_00_00_00:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,12,13,0,1,0,1,0,1,0,1,0,1,0,1]
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
; AVX2OR512VL-NEXT: retq
@@ -580,13 +580,13 @@ define <16 x i16> @shuffle_v16i16_00_06_00_00_00_00_00_00_00_06_00_00_00_00_00_0
define <16 x i16> @shuffle_v16i16_07_00_00_00_00_00_00_00_07_00_00_00_00_00_00_00(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_07_00_00_00_00_00_00_00_07_00_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[14,15,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_07_00_00_00_00_00_00_00_07_00_00_00_00_00_00_00:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[14,15,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
; AVX2OR512VL-NEXT: retq
@@ -596,7 +596,7 @@ define <16 x i16> @shuffle_v16i16_07_00_00_00_00_00_00_00_07_00_00_00_00_00_00_0
define <16 x i16> @shuffle_v16i16_00_17_02_19_04_21_06_23_08_25_10_27_12_29_14_31(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_17_02_19_04_21_06_23_08_25_10_27_12_29_14_31:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [65535,0,65535,0,65535,0,65535,0,65535,0,65535,0,65535,0,65535,0]
; AVX1-NEXT: vandnps %ymm1, %ymm2, %ymm1
; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
@@ -604,7 +604,7 @@ define <16 x i16> @shuffle_v16i16_00_17_02_19_04_21_06_23_08_25_10_27_12_29_14_3
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_00_17_02_19_04_21_06_23_08_25_10_27_12_29_14_31:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 0, i32 17, i32 2, i32 19, i32 4, i32 21, i32 6, i32 23, i32 8, i32 25, i32 10, i32 27, i32 12, i32 29, i32 14, i32 31>
@@ -613,7 +613,7 @@ define <16 x i16> @shuffle_v16i16_00_17_02_19_04_21_06_23_08_25_10_27_12_29_14_3
define <16 x i16> @shuffle_v16i16_16_01_18_03_20_05_22_07_24_09_26_11_28_13_30_15(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_16_01_18_03_20_05_22_07_24_09_26_11_28_13_30_15:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [65535,0,65535,0,65535,0,65535,0,65535,0,65535,0,65535,0,65535,0]
; AVX1-NEXT: vandnps %ymm0, %ymm2, %ymm0
; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1
@@ -621,7 +621,7 @@ define <16 x i16> @shuffle_v16i16_16_01_18_03_20_05_22_07_24_09_26_11_28_13_30_1
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_16_01_18_03_20_05_22_07_24_09_26_11_28_13_30_15:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7],ymm1[8],ymm0[9],ymm1[10],ymm0[11],ymm1[12],ymm0[13],ymm1[14],ymm0[15]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 16, i32 1, i32 18, i32 3, i32 20, i32 5, i32 22, i32 7, i32 24, i32 9, i32 26, i32 11, i32 28, i32 13, i32 30, i32 15>
@@ -630,7 +630,7 @@ define <16 x i16> @shuffle_v16i16_16_01_18_03_20_05_22_07_24_09_26_11_28_13_30_1
define <16 x i16> @shuffle_v16i16_00_01_18_19_04_05_22_23_08_09_26_27_12_13_30_31(<16 x i16> %a, <16 x i16> %b) {
; ALL-LABEL: shuffle_v16i16_00_01_18_19_04_05_22_23_08_09_26_27_12_13_30_31:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
; ALL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 0, i32 1, i32 18, i32 19, i32 4, i32 5, i32 22, i32 23, i32 8, i32 9, i32 26, i32 27, i32 12, i32 13, i32 30, i32 31>
@@ -639,12 +639,12 @@ define <16 x i16> @shuffle_v16i16_00_01_18_19_04_05_22_23_08_09_26_27_12_13_30_3
define <16 x i16> @shuffle_v16i16_16_17_18_19_04_05_06_07_24_25_26_27_12_13_14_15(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_16_17_18_19_04_05_06_07_24_25_26_27_12_13_14_15:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_16_17_18_19_04_05_06_07_24_25_26_27_12_13_14_15:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 4, i32 5, i32 6, i32 7, i32 24, i32 25, i32 26, i32 27, i32 12, i32 13, i32 14, i32 15>
@@ -653,7 +653,7 @@ define <16 x i16> @shuffle_v16i16_16_17_18_19_04_05_06_07_24_25_26_27_12_13_14_1
define <16 x i16> @shuffle_v16i16_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_31(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_31:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0]
; AVX1-NEXT: vandnps %ymm1, %ymm2, %ymm1
; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
@@ -661,13 +661,13 @@ define <16 x i16> @shuffle_v16i16_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_3
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_31:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0]
; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_31:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: movw $-32768, %ax # imm = 0x8000
; AVX512VL-NEXT: kmovd %eax, %k1
; AVX512VL-NEXT: vmovdqu16 %ymm1, %ymm0 {%k1}
@@ -678,7 +678,7 @@ define <16 x i16> @shuffle_v16i16_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_3
define <16 x i16> @shuffle_v16i16_16_01_02_03_04_05_06_07_08_09_10_11_12_13_14_15(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_16_01_02_03_04_05_06_07_08_09_10_11_12_13_14_15:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
; AVX1-NEXT: vandnps %ymm1, %ymm2, %ymm1
; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
@@ -686,13 +686,13 @@ define <16 x i16> @shuffle_v16i16_16_01_02_03_04_05_06_07_08_09_10_11_12_13_14_1
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_16_01_02_03_04_05_06_07_08_09_10_11_12_13_14_15:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_16_01_02_03_04_05_06_07_08_09_10_11_12_13_14_15:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: movw $1, %ax
; AVX512VL-NEXT: kmovd %eax, %k1
; AVX512VL-NEXT: vmovdqu16 %ymm1, %ymm0 {%k1}
@@ -703,7 +703,7 @@ define <16 x i16> @shuffle_v16i16_16_01_02_03_04_05_06_07_08_09_10_11_12_13_14_1
define <16 x i16> @shuffle_v16i16_00_17_02_19_04_21_06_23_24_09_26_11_28_13_30_15(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_17_02_19_04_21_06_23_24_09_26_11_28_13_30_15:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [65535,0,65535,0,65535,0,65535,0,0,65535,0,65535,0,65535,0,65535]
; AVX1-NEXT: vandnps %ymm1, %ymm2, %ymm1
; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
@@ -711,13 +711,13 @@ define <16 x i16> @shuffle_v16i16_00_17_02_19_04_21_06_23_24_09_26_11_28_13_30_1
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_00_17_02_19_04_21_06_23_24_09_26_11_28_13_30_15:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [255,255,0,0,255,255,0,0,255,255,0,0,255,255,0,0,0,0,255,255,0,0,255,255,0,0,255,255,0,0,255,255]
; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_00_17_02_19_04_21_06_23_24_09_26_11_28_13_30_15:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: movw $21930, %ax # imm = 0x55AA
; AVX512VL-NEXT: kmovd %eax, %k1
; AVX512VL-NEXT: vmovdqu16 %ymm1, %ymm0 {%k1}
@@ -728,7 +728,7 @@ define <16 x i16> @shuffle_v16i16_00_17_02_19_04_21_06_23_24_09_26_11_28_13_30_1
define <16 x i16> @shuffle_v16i16_16_01_18_03_20_05_22_07_08_25_10_27_12_29_14_31(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_16_01_18_03_20_05_22_07_08_25_10_27_12_29_14_31:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [0,65535,0,65535,0,65535,0,65535,65535,0,65535,0,65535,0,65535,0]
; AVX1-NEXT: vandnps %ymm1, %ymm2, %ymm1
; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
@@ -736,13 +736,13 @@ define <16 x i16> @shuffle_v16i16_16_01_18_03_20_05_22_07_08_25_10_27_12_29_14_3
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_16_01_18_03_20_05_22_07_08_25_10_27_12_29_14_31:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,0,255,255,0,0,255,255,0,0,255,255,0,0,255,255,255,255,0,0,255,255,0,0,255,255,0,0,255,255,0,0]
; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_16_01_18_03_20_05_22_07_08_25_10_27_12_29_14_31:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: movw $-21931, %ax # imm = 0xAA55
; AVX512VL-NEXT: kmovd %eax, %k1
; AVX512VL-NEXT: vmovdqu16 %ymm1, %ymm0 {%k1}
@@ -753,7 +753,7 @@ define <16 x i16> @shuffle_v16i16_16_01_18_03_20_05_22_07_08_25_10_27_12_29_14_3
define <16 x i16> @shuffle_v16i16_00_01_18_19_20_21_06_07_08_09_26_27_12_13_30_31(<16 x i16> %a, <16 x i16> %b) {
; ALL-LABEL: shuffle_v16i16_00_01_18_19_20_21_06_07_08_09_26_27_12_13_30_31:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3,4],ymm1[5],ymm0[6],ymm1[7]
; ALL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 0, i32 1, i32 18, i32 19, i32 20, i32 21, i32 6, i32 7, i32 8, i32 9, i32 26, i32 27, i32 12, i32 13, i32 30, i32 31>
@@ -762,14 +762,14 @@ define <16 x i16> @shuffle_v16i16_00_01_18_19_20_21_06_07_08_09_26_27_12_13_30_3
define <16 x i16> @shuffle_v16i16_00_16_00_16_00_16_00_16_00_16_00_16_00_16_00_16(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_16_00_16_00_16_00_16_00_16_00_16_00_16_00_16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_00_16_00_16_00_16_00_16_00_16_00_16_00_16_00_16:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX2OR512VL-NEXT: vpbroadcastd %xmm0, %ymm0
; AVX2OR512VL-NEXT: retq
@@ -779,7 +779,7 @@ define <16 x i16> @shuffle_v16i16_00_16_00_16_00_16_00_16_00_16_00_16_00_16_00_1
define <16 x i16> @shuffle_v16i16_00_16_00_16_00_16_00_16_08_24_08_24_08_24_08_24(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_16_00_16_00_16_00_16_08_24_08_24_08_24_08_24:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
@@ -790,7 +790,7 @@ define <16 x i16> @shuffle_v16i16_00_16_00_16_00_16_00_16_08_24_08_24_08_24_08_2
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_00_16_00_16_00_16_00_16_08_24_08_24_08_24_08_24:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
; AVX2-NEXT: vpshuflw {{.*#+}} ymm1 = ymm1[0,0,0,0,4,5,6,7,8,8,8,8,12,13,14,15]
; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,0,1,1,4,4,5,5]
@@ -798,7 +798,7 @@ define <16 x i16> @shuffle_v16i16_00_16_00_16_00_16_00_16_08_24_08_24_08_24_08_2
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_00_16_00_16_00_16_00_16_08_24_08_24_08_24_08_24:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [0,16,0,16,0,16,0,16,8,24,8,24,8,24,8,24]
; AVX512VL-NEXT: vpermt2w %ymm1, %ymm2, %ymm0
; AVX512VL-NEXT: retq
@@ -808,7 +808,7 @@ define <16 x i16> @shuffle_v16i16_00_16_00_16_00_16_00_16_08_24_08_24_08_24_08_2
define <16 x i16> @shuffle_v16i16_16_16_16_16_04_05_06_07_24_24_24_24_12_13_14_15(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_16_16_16_16_04_05_06_07_24_24_24_24_12_13_14_15:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpshuflw {{.*#+}} xmm3 = xmm3[0,0,0,0,4,5,6,7]
@@ -819,13 +819,13 @@ define <16 x i16> @shuffle_v16i16_16_16_16_16_04_05_06_07_24_24_24_24_12_13_14_1
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_16_16_16_16_04_05_06_07_24_24_24_24_12_13_14_15:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshuflw {{.*#+}} ymm1 = ymm1[0,0,0,0,4,5,6,7,8,8,8,8,12,13,14,15]
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_16_16_16_16_04_05_06_07_24_24_24_24_12_13_14_15:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [0,0,0,0,20,21,22,23,8,8,8,8,28,29,30,31]
; AVX512VL-NEXT: vpermi2w %ymm0, %ymm1, %ymm2
; AVX512VL-NEXT: vmovdqa %ymm2, %ymm0
@@ -836,7 +836,7 @@ define <16 x i16> @shuffle_v16i16_16_16_16_16_04_05_06_07_24_24_24_24_12_13_14_1
define <16 x i16> @shuffle_v16i16_19_18_17_16_07_06_05_04_27_26_25_24_15_14_13_12(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_19_18_17_16_07_06_05_04_27_26_25_24_15_14_13_12:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
@@ -851,14 +851,14 @@ define <16 x i16> @shuffle_v16i16_19_18_17_16_07_06_05_04_27_26_25_24_15_14_13_1
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_19_18_17_16_07_06_05_04_27_26_25_24_15_14_13_12:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
; AVX2-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[3,2,1,0,4,5,6,7,11,10,9,8,12,13,14,15]
; AVX2-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,7,6,5,4,8,9,10,11,15,14,13,12]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_19_18_17_16_07_06_05_04_27_26_25_24_15_14_13_12:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [3,2,1,0,23,22,21,20,11,10,9,8,31,30,29,28]
; AVX512VL-NEXT: vpermi2w %ymm0, %ymm1, %ymm2
; AVX512VL-NEXT: vmovdqa %ymm2, %ymm0
@@ -869,7 +869,7 @@ define <16 x i16> @shuffle_v16i16_19_18_17_16_07_06_05_04_27_26_25_24_15_14_13_1
define <16 x i16> @shuffle_v16i16_19_18_17_16_03_02_01_00_27_26_25_24_11_10_09_08(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_19_18_17_16_03_02_01_00_27_26_25_24_11_10_09_08:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
@@ -881,7 +881,7 @@ define <16 x i16> @shuffle_v16i16_19_18_17_16_03_02_01_00_27_26_25_24_11_10_09_0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_19_18_17_16_03_02_01_00_27_26_25_24_11_10_09_08:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshuflw {{.*#+}} ymm1 = ymm1[3,2,1,0,4,5,6,7,11,10,9,8,12,13,14,15]
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,1,0,1,4,5,4,5]
; AVX2-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,7,6,5,4,8,9,10,11,15,14,13,12]
@@ -889,7 +889,7 @@ define <16 x i16> @shuffle_v16i16_19_18_17_16_03_02_01_00_27_26_25_24_11_10_09_0
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_19_18_17_16_03_02_01_00_27_26_25_24_11_10_09_08:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [3,2,1,0,19,18,17,16,11,10,9,8,27,26,25,24]
; AVX512VL-NEXT: vpermi2w %ymm0, %ymm1, %ymm2
; AVX512VL-NEXT: vmovdqa %ymm2, %ymm0
@@ -900,7 +900,7 @@ define <16 x i16> @shuffle_v16i16_19_18_17_16_03_02_01_00_27_26_25_24_11_10_09_0
define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_01_00_08_08_08_08_08_08_09_08(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_00_00_00_00_00_01_00_08_08_08_08_08_08_09_08:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,0,1,0,1,0,1,0,1,0,1,2,3,0,1]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -909,7 +909,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_01_00_08_08_08_08_08_08_09_0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_00_00_00_00_00_00_01_00_08_08_08_08_08_08_09_08:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,0,1,0,1,0,1,0,1,0,1,2,3,0,1,16,17,16,17,16,17,16,17,16,17,16,17,18,19,16,17]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 1, i32 0, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 9, i32 8>
@@ -918,7 +918,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_01_00_08_08_08_08_08_08_09_0
define <16 x i16> @shuffle_v16i16_00_00_00_00_00_02_00_00_08_08_08_08_08_10_08_08(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_00_00_00_00_02_00_00_08_08_08_08_08_10_08_08:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,0,1,0,1,0,1,0,1,4,5,0,1,0,1]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -927,7 +927,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_00_02_00_00_08_08_08_08_08_10_08_0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_00_00_00_00_00_02_00_00_08_08_08_08_08_10_08_08:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,0,1,0,1,0,1,0,1,4,5,0,1,0,1,16,17,16,17,16,17,16,17,16,17,20,21,16,17,16,17]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 2, i32 0, i32 0, i32 8, i32 8, i32 8, i32 8, i32 8, i32 10, i32 8, i32 8>
@@ -936,7 +936,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_00_02_00_00_08_08_08_08_08_10_08_0
define <16 x i16> @shuffle_v16i16_00_00_00_00_03_00_00_00_08_08_08_08_11_08_08_08(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_00_00_00_03_00_00_00_08_08_08_08_11_08_08_08:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,0,1,0,1,0,1,6,7,0,1,0,1,0,1]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -945,7 +945,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_03_00_00_00_08_08_08_08_11_08_08_0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_00_00_00_00_03_00_00_00_08_08_08_08_11_08_08_08:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,0,1,0,1,0,1,6,7,0,1,0,1,0,1,16,17,16,17,16,17,16,17,22,23,16,17,16,17,16,17]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 3, i32 0, i32 0, i32 0, i32 8, i32 8, i32 8, i32 8, i32 11, i32 8, i32 8, i32 8>
@@ -954,7 +954,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_03_00_00_00_08_08_08_08_11_08_08_0
define <16 x i16> @shuffle_v16i16_00_00_00_04_00_00_00_00_08_08_08_12_08_08_08_08(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_00_00_04_00_00_00_00_08_08_08_12_08_08_08_08:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,0,1,0,1,8,9,0,1,0,1,0,1,0,1]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -963,7 +963,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_04_00_00_00_00_08_08_08_12_08_08_08_0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_00_00_00_04_00_00_00_00_08_08_08_12_08_08_08_08:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,0,1,0,1,8,9,0,1,0,1,0,1,0,1,16,17,16,17,16,17,24,25,16,17,16,17,16,17,16,17]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 0, i32 0, i32 0, i32 4, i32 0, i32 0, i32 0, i32 0, i32 8, i32 8, i32 8, i32 12, i32 8, i32 8, i32 8, i32 8>
@@ -972,7 +972,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_04_00_00_00_00_08_08_08_12_08_08_08_0
define <16 x i16> @shuffle_v16i16_00_00_05_00_00_00_00_00_08_08_13_08_08_08_08_08(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_00_05_00_00_00_00_00_08_08_13_08_08_08_08_08:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,0,1,10,11,0,1,0,1,0,1,0,1,0,1]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -981,7 +981,7 @@ define <16 x i16> @shuffle_v16i16_00_00_05_00_00_00_00_00_08_08_13_08_08_08_08_0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_00_00_05_00_00_00_00_00_08_08_13_08_08_08_08_08:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,0,1,10,11,0,1,0,1,0,1,0,1,0,1,16,17,16,17,26,27,16,17,16,17,16,17,16,17,16,17]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 0, i32 0, i32 5, i32 0, i32 0, i32 0, i32 0, i32 0, i32 8, i32 8, i32 13, i32 8, i32 8, i32 8, i32 8, i32 8>
@@ -990,7 +990,7 @@ define <16 x i16> @shuffle_v16i16_00_00_05_00_00_00_00_00_08_08_13_08_08_08_08_0
define <16 x i16> @shuffle_v16i16_00_06_00_00_00_00_00_00_08_14_08_08_08_08_08_08(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_06_00_00_00_00_00_00_08_14_08_08_08_08_08_08:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,12,13,0,1,0,1,0,1,0,1,0,1,0,1]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -999,7 +999,7 @@ define <16 x i16> @shuffle_v16i16_00_06_00_00_00_00_00_00_08_14_08_08_08_08_08_0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_00_06_00_00_00_00_00_00_08_14_08_08_08_08_08_08:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,12,13,0,1,0,1,0,1,0,1,0,1,0,1,16,17,28,29,16,17,16,17,16,17,16,17,16,17,16,17]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 0, i32 6, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 8, i32 14, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8>
@@ -1008,7 +1008,7 @@ define <16 x i16> @shuffle_v16i16_00_06_00_00_00_00_00_00_08_14_08_08_08_08_08_0
define <16 x i16> @shuffle_v16i16_07_00_00_00_00_00_00_00_15_08_08_08_08_08_08_08(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_07_00_00_00_00_00_00_00_15_08_08_08_08_08_08_08:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [14,15,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -1017,7 +1017,7 @@ define <16 x i16> @shuffle_v16i16_07_00_00_00_00_00_00_00_15_08_08_08_08_08_08_0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_07_00_00_00_00_00_00_00_15_08_08_08_08_08_08_08:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[14,15,0,1,0,1,0,1,0,1,0,1,0,1,0,1,30,31,16,17,16,17,16,17,16,17,16,17,16,17,16,17]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 7, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 15, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8>
@@ -1026,7 +1026,7 @@ define <16 x i16> @shuffle_v16i16_07_00_00_00_00_00_00_00_15_08_08_08_08_08_08_0
define <16 x i16> @shuffle_v16i16_00_16_01_17_02_18_03_19_08_24_09_25_10_26_11_27(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_16_01_17_02_18_03_19_08_24_09_25_10_26_11_27:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
@@ -1035,7 +1035,7 @@ define <16 x i16> @shuffle_v16i16_00_16_01_17_02_18_03_19_08_24_09_25_10_26_11_2
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_00_16_01_17_02_18_03_19_08_24_09_25_10_26_11_27:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27>
@@ -1044,7 +1044,7 @@ define <16 x i16> @shuffle_v16i16_00_16_01_17_02_18_03_19_08_24_09_25_10_26_11_2
define <16 x i16> @shuffle_v16i16_04_20_05_21_06_22_07_23_12_28_13_29_14_30_15_31(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_04_20_05_21_06_22_07_23_12_28_13_29_14_30_15_31:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
@@ -1053,7 +1053,7 @@ define <16 x i16> @shuffle_v16i16_04_20_05_21_06_22_07_23_12_28_13_29_14_30_15_3
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_04_20_05_21_06_22_07_23_12_28_13_29_14_30_15_31:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
@@ -1062,7 +1062,7 @@ define <16 x i16> @shuffle_v16i16_04_20_05_21_06_22_07_23_12_28_13_29_14_30_15_3
define <16 x i16> @shuffle_v16i16_00_16_01_17_02_18_03_19_12_28_13_29_14_30_15_31(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_16_01_17_02_18_03_19_12_28_13_29_14_30_15_31:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
@@ -1071,14 +1071,14 @@ define <16 x i16> @shuffle_v16i16_00_16_01_17_02_18_03_19_12_28_13_29_14_30_15_3
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_00_16_01_17_02_18_03_19_12_28_13_29_14_30_15_31:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,0,1,u,u,2,3,u,u,4,5,u,u,6,7,u,u,24,25,u,u,26,27,u,u,28,29,u,u,30,31]
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,u,u,2,3,u,u,4,5,u,u,6,7,u,u,24,25,u,u,26,27,u,u,28,29,u,u,30,31,u,u]
; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_00_16_01_17_02_18_03_19_12_28_13_29_14_30_15_31:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [0,16,1,17,2,18,3,19,12,28,13,29,14,30,15,31]
; AVX512VL-NEXT: vpermt2w %ymm1, %ymm2, %ymm0
; AVX512VL-NEXT: retq
@@ -1088,7 +1088,7 @@ define <16 x i16> @shuffle_v16i16_00_16_01_17_02_18_03_19_12_28_13_29_14_30_15_3
define <16 x i16> @shuffle_v16i16_04_20_05_21_06_22_07_23_08_24_09_25_10_26_11_27(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_04_20_05_21_06_22_07_23_08_24_09_25_10_26_11_27:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
@@ -1097,14 +1097,14 @@ define <16 x i16> @shuffle_v16i16_04_20_05_21_06_22_07_23_08_24_09_25_10_26_11_2
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_04_20_05_21_06_22_07_23_08_24_09_25_10_26_11_27:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,8,9,u,u,10,11,u,u,12,13,u,u,14,15,u,u,16,17,u,u,18,19,u,u,20,21,u,u,22,23]
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[8,9,u,u,10,11,u,u,12,13,u,u,14,15,u,u,16,17,u,u,18,19,u,u,20,21,u,u,22,23,u,u]
; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_04_20_05_21_06_22_07_23_08_24_09_25_10_26_11_27:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [4,20,5,21,6,22,7,23,8,24,9,25,10,26,11,27]
; AVX512VL-NEXT: vpermt2w %ymm1, %ymm2, %ymm0
; AVX512VL-NEXT: retq
@@ -1114,7 +1114,7 @@ define <16 x i16> @shuffle_v16i16_04_20_05_21_06_22_07_23_08_24_09_25_10_26_11_2
define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_01_00_08_09_08_08_08_08_08_08(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_00_00_00_00_00_01_00_08_09_08_08_08_08_08_08:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,2,3,0,1]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,0,1,0,1,0,1,0,1,0,1,0,1]
@@ -1122,7 +1122,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_01_00_08_09_08_08_08_08_08_0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_00_00_00_00_00_00_01_00_08_09_08_08_08_08_08_08:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,0,1,0,1,0,1,0,1,0,1,2,3,0,1,16,17,18,19,16,17,16,17,16,17,16,17,16,17,16,17]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 1, i32 0, i32 8, i32 9, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8>
@@ -1131,7 +1131,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_01_00_08_09_08_08_08_08_08_0
define <16 x i16> @shuffle_v16i16_00_00_00_00_00_02_00_00_08_08_10_08_08_08_08_08(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_00_00_00_00_02_00_00_08_08_10_08_08_08_08_08:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,1,0,1,0,1,0,1,0,1,4,5,0,1,0,1]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,4,5,0,1,0,1,0,1,0,1,0,1]
@@ -1139,7 +1139,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_00_02_00_00_08_08_10_08_08_08_08_0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_00_00_00_00_00_02_00_00_08_08_10_08_08_08_08_08:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,0,1,0,1,0,1,0,1,4,5,0,1,0,1,16,17,16,17,20,21,16,17,16,17,16,17,16,17,16,17]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 2, i32 0, i32 0, i32 8, i32 8, i32 10, i32 8, i32 8, i32 8, i32 8, i32 8>
@@ -1148,7 +1148,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_00_02_00_00_08_08_10_08_08_08_08_0
define <16 x i16> @shuffle_v16i16_00_00_00_00_03_00_00_00_08_08_08_11_08_08_08_08(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_00_00_00_03_00_00_00_08_08_08_11_08_08_08_08:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,1,0,1,0,1,0,1,6,7,0,1,0,1,0,1]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,6,7,0,1,0,1,0,1,0,1]
@@ -1156,7 +1156,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_03_00_00_00_08_08_08_11_08_08_08_0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_00_00_00_00_03_00_00_00_08_08_08_11_08_08_08_08:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,0,1,0,1,0,1,6,7,0,1,0,1,0,1,16,17,16,17,16,17,22,23,16,17,16,17,16,17,16,17]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 3, i32 0, i32 0, i32 0, i32 8, i32 8, i32 8, i32 11, i32 8, i32 8, i32 8, i32 8>
@@ -1165,7 +1165,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_03_00_00_00_08_08_08_11_08_08_08_0
define <16 x i16> @shuffle_v16i16_00_00_00_04_00_00_00_00_08_08_08_08_12_08_08_08(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_00_00_04_00_00_00_00_08_08_08_08_12_08_08_08:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,1,0,1,0,1,8,9,0,1,0,1,0,1,0,1]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,8,9,0,1,0,1,0,1]
@@ -1173,7 +1173,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_04_00_00_00_00_08_08_08_08_12_08_08_0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_00_00_00_04_00_00_00_00_08_08_08_08_12_08_08_08:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,0,1,0,1,8,9,0,1,0,1,0,1,0,1,16,17,16,17,16,17,16,17,24,25,16,17,16,17,16,17]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 0, i32 0, i32 0, i32 4, i32 0, i32 0, i32 0, i32 0, i32 8, i32 8, i32 8, i32 8, i32 12, i32 8, i32 8, i32 8>
@@ -1182,7 +1182,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_04_00_00_00_00_08_08_08_08_12_08_08_0
define <16 x i16> @shuffle_v16i16_00_00_05_00_00_00_00_00_08_08_08_08_08_13_08_08(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_00_05_00_00_00_00_00_08_08_08_08_08_13_08_08:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,1,0,1,10,11,0,1,0,1,0,1,0,1,0,1]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,10,11,0,1,0,1]
@@ -1190,7 +1190,7 @@ define <16 x i16> @shuffle_v16i16_00_00_05_00_00_00_00_00_08_08_08_08_08_13_08_0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_00_00_05_00_00_00_00_00_08_08_08_08_08_13_08_08:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,0,1,10,11,0,1,0,1,0,1,0,1,0,1,16,17,16,17,16,17,16,17,16,17,26,27,16,17,16,17]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 0, i32 0, i32 5, i32 0, i32 0, i32 0, i32 0, i32 0, i32 8, i32 8, i32 8, i32 8, i32 8, i32 13, i32 8, i32 8>
@@ -1199,7 +1199,7 @@ define <16 x i16> @shuffle_v16i16_00_00_05_00_00_00_00_00_08_08_08_08_08_13_08_0
define <16 x i16> @shuffle_v16i16_00_06_00_00_00_00_00_00_08_08_08_08_08_08_14_08(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_06_00_00_00_00_00_00_08_08_08_08_08_08_14_08:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,1,12,13,0,1,0,1,0,1,0,1,0,1,0,1]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,12,13,0,1]
@@ -1207,7 +1207,7 @@ define <16 x i16> @shuffle_v16i16_00_06_00_00_00_00_00_00_08_08_08_08_08_08_14_0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_00_06_00_00_00_00_00_00_08_08_08_08_08_08_14_08:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,12,13,0,1,0,1,0,1,0,1,0,1,0,1,16,17,16,17,16,17,16,17,16,17,16,17,28,29,16,17]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 0, i32 6, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 14, i32 8>
@@ -1216,7 +1216,7 @@ define <16 x i16> @shuffle_v16i16_00_06_00_00_00_00_00_00_08_08_08_08_08_08_14_0
define <16 x i16> @shuffle_v16i16_07_00_00_00_00_00_00_00_08_08_08_08_08_08_08_15(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_07_00_00_00_00_00_00_00_08_08_08_08_08_08_08_15:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[14,15,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,14,15]
@@ -1224,7 +1224,7 @@ define <16 x i16> @shuffle_v16i16_07_00_00_00_00_00_00_00_08_08_08_08_08_08_08_1
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_07_00_00_00_00_00_00_00_08_08_08_08_08_08_08_15:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[14,15,0,1,0,1,0,1,0,1,0,1,0,1,0,1,16,17,16,17,16,17,16,17,16,17,16,17,16,17,30,31]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 7, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 15>
@@ -1233,7 +1233,7 @@ define <16 x i16> @shuffle_v16i16_07_00_00_00_00_00_00_00_08_08_08_08_08_08_08_1
define <16 x i16> @shuffle_v16i16_00_00_02_02_04_04_06_06_14_14_12_12_10_10_08_08(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_00_02_02_04_04_06_06_14_14_12_12_10_10_08_08:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,0,2,2,4,5,6,7]
; AVX1-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,6,6]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
@@ -1242,7 +1242,7 @@ define <16 x i16> @shuffle_v16i16_00_00_02_02_04_04_06_06_14_14_12_12_10_10_08_0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_00_00_02_02_04_04_06_06_14_14_12_12_10_10_08_08:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13,28,29,28,29,24,25,24,25,20,21,20,21,16,17,16,17]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6, i32 14, i32 14, i32 12, i32 12, i32 10, i32 10, i32 8, i32 8>
@@ -1251,7 +1251,7 @@ define <16 x i16> @shuffle_v16i16_00_00_02_02_04_04_06_06_14_14_12_12_10_10_08_0
define <16 x i16> @shuffle_v16i16_04_04_04_04_00_00_00_00_08_08_08_08_12_12_12_12(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_04_04_04_04_00_00_00_00_08_08_08_08_12_12_12_12:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[8,9,8,9,8,9,8,9,0,1,0,1,0,1,0,1]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
@@ -1260,7 +1260,7 @@ define <16 x i16> @shuffle_v16i16_04_04_04_04_00_00_00_00_08_08_08_08_12_12_12_1
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_04_04_04_04_00_00_00_00_08_08_08_08_12_12_12_12:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[8,9,8,9,8,9,8,9,0,1,0,1,0,1,0,1,16,17,16,17,16,17,16,17,24,25,24,25,24,25,24,25]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 4, i32 4, i32 4, i32 4, i32 0, i32 0, i32 0, i32 0, i32 8, i32 8, i32 8, i32 8, i32 12, i32 12, i32 12, i32 12>
@@ -1269,7 +1269,7 @@ define <16 x i16> @shuffle_v16i16_04_04_04_04_00_00_00_00_08_08_08_08_12_12_12_1
define <16 x i16> @shuffle_v16i16_00_uu_uu_00_00_00_00_00_08_08_uu_uu_08_08_14_08(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_uu_uu_00_00_00_00_00_08_08_uu_uu_08_08_14_08:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
@@ -1278,7 +1278,7 @@ define <16 x i16> @shuffle_v16i16_00_uu_uu_00_00_00_00_00_08_08_uu_uu_08_08_14_0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_00_uu_uu_00_00_00_00_00_08_08_uu_uu_08_08_14_08:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,u,u,u,u,0,1,0,1,0,1,0,1,0,1,16,17,16,17,u,u,u,u,16,17,16,17,28,29,16,17]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 0, i32 undef, i32 undef, i32 0, i32 0, i32 0, i32 0, i32 0, i32 8, i32 8, i32 undef, i32 undef, i32 8, i32 8, i32 14, i32 8>
@@ -1287,7 +1287,7 @@ define <16 x i16> @shuffle_v16i16_00_uu_uu_00_00_00_00_00_08_08_uu_uu_08_08_14_0
define <16 x i16> @shuffle_v16i16_07_uu_00_00_00_00_00_00_08_08_uu_uu_08_08_08_15(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_07_uu_00_00_00_00_00_00_08_08_uu_uu_08_08_08_15:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[14,15,2,3,0,1,0,1,0,1,0,1,0,1,0,1]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,4,5,6,7,0,1,0,1,0,1,14,15]
@@ -1295,7 +1295,7 @@ define <16 x i16> @shuffle_v16i16_07_uu_00_00_00_00_00_00_08_08_uu_uu_08_08_08_1
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_07_uu_00_00_00_00_00_00_08_08_uu_uu_08_08_08_15:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[14,15,u,u,0,1,0,1,0,1,0,1,0,1,0,1,16,17,16,17,u,u,u,u,16,17,16,17,16,17,30,31]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 7, i32 undef, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 8, i32 8, i32 undef, i32 undef, i32 8, i32 8, i32 8, i32 15>
@@ -1304,7 +1304,7 @@ define <16 x i16> @shuffle_v16i16_07_uu_00_00_00_00_00_00_08_08_uu_uu_08_08_08_1
define <16 x i16> @shuffle_v16i16_00_uu_uu_02_04_04_uu_06_14_14_uu_12_10_10_08_08(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_uu_uu_02_04_04_uu_06_14_14_uu_12_10_10_08_08:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,1,2,2,4,5,6,7]
; AVX1-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,6,6]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
@@ -1313,7 +1313,7 @@ define <16 x i16> @shuffle_v16i16_00_uu_uu_02_04_04_uu_06_14_14_uu_12_10_10_08_0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_00_uu_uu_02_04_04_uu_06_14_14_uu_12_10_10_08_08:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,u,u,u,u,4,5,8,9,8,9,u,u,12,13,28,29,28,29,u,u,24,25,20,21,20,21,16,17,16,17]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 0, i32 undef, i32 undef, i32 2, i32 4, i32 4, i32 undef, i32 6, i32 14, i32 14, i32 undef, i32 12, i32 10, i32 10, i32 8, i32 8>
@@ -1322,7 +1322,7 @@ define <16 x i16> @shuffle_v16i16_00_uu_uu_02_04_04_uu_06_14_14_uu_12_10_10_08_0
define <16 x i16> @shuffle_v16i16_04_04_04_04_uu_uu_uu_uu_08_08_08_uu_uu_12_12_12(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_04_04_04_04_uu_uu_uu_uu_08_08_08_uu_uu_12_12_12:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,1,2,3]
; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
@@ -1332,7 +1332,7 @@ define <16 x i16> @shuffle_v16i16_04_04_04_04_uu_uu_uu_uu_08_08_08_uu_uu_12_12_1
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_04_04_04_04_uu_uu_uu_uu_08_08_08_uu_uu_12_12_12:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[8,9,8,9,8,9,8,9,u,u,u,u,u,u,u,u,16,17,16,17,16,17,u,u,u,u,24,25,24,25,24,25]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 4, i32 4, i32 4, i32 4, i32 undef, i32 undef, i32 undef, i32 undef, i32 8, i32 8, i32 8, i32 undef, i32 undef, i32 12, i32 12, i32 12>
@@ -1341,7 +1341,7 @@ define <16 x i16> @shuffle_v16i16_04_04_04_04_uu_uu_uu_uu_08_08_08_uu_uu_12_12_1
define <16 x i16> @shuffle_v16i16_00_00_00_00_04_04_04_04_16_16_16_16_20_20_20_20(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_00_00_00_04_04_04_04_16_16_16_16_20_20_20_20:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
@@ -1350,14 +1350,14 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_04_04_04_04_16_16_16_16_20_20_20_2
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_00_00_00_00_04_04_04_04_16_16_16_16_20_20_20_20:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[0,0,0,0,4,5,6,7,8,8,8,8,12,13,14,15]
; AVX2-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_00_00_00_00_04_04_04_04_16_16_16_16_20_20_20_20:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [0,0,0,0,4,4,4,4,16,16,16,16,20,20,20,20]
; AVX512VL-NEXT: vpermt2w %ymm1, %ymm2, %ymm0
; AVX512VL-NEXT: retq
@@ -1367,7 +1367,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_04_04_04_04_16_16_16_16_20_20_20_2
define <16 x i16> @shuffle_v16i16_08_08_08_08_12_12_12_12_16_16_16_16_20_20_20_20(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_08_08_08_08_12_12_12_12_16_16_16_16_20_20_20_20:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
@@ -1377,14 +1377,14 @@ define <16 x i16> @shuffle_v16i16_08_08_08_08_12_12_12_12_16_16_16_16_20_20_20_2
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_08_08_08_08_12_12_12_12_16_16_16_16_20_20_20_20:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
; AVX2-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[0,0,0,0,4,5,6,7,8,8,8,8,12,13,14,15]
; AVX2-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_08_08_08_08_12_12_12_12_16_16_16_16_20_20_20_20:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [8,8,8,8,12,12,12,12,16,16,16,16,20,20,20,20]
; AVX512VL-NEXT: vpermt2w %ymm1, %ymm2, %ymm0
; AVX512VL-NEXT: retq
@@ -1394,7 +1394,7 @@ define <16 x i16> @shuffle_v16i16_08_08_08_08_12_12_12_12_16_16_16_16_20_20_20_2
define <16 x i16> @shuffle_v16i16_08_08_08_08_12_12_12_12_24_24_24_24_28_28_28_28(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_08_08_08_08_12_12_12_12_24_24_24_24_28_28_28_28:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
@@ -1405,14 +1405,14 @@ define <16 x i16> @shuffle_v16i16_08_08_08_08_12_12_12_12_24_24_24_24_28_28_28_2
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_08_08_08_08_12_12_12_12_24_24_24_24_28_28_28_28:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
; AVX2-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[0,0,0,0,4,5,6,7,8,8,8,8,12,13,14,15]
; AVX2-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_08_08_08_08_12_12_12_12_24_24_24_24_28_28_28_28:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [8,8,8,8,12,12,12,12,24,24,24,24,28,28,28,28]
; AVX512VL-NEXT: vpermt2w %ymm1, %ymm2, %ymm0
; AVX512VL-NEXT: retq
@@ -1422,7 +1422,7 @@ define <16 x i16> @shuffle_v16i16_08_08_08_08_12_12_12_12_24_24_24_24_28_28_28_2
define <16 x i16> @shuffle_v16i16_00_00_00_00_04_04_04_04_24_24_24_24_28_28_28_28(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_00_00_00_04_04_04_04_24_24_24_24_28_28_28_28:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
@@ -1432,14 +1432,14 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_04_04_04_04_24_24_24_24_28_28_28_2
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_00_00_00_00_04_04_04_04_24_24_24_24_28_28_28_28:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[0,0,0,0,4,5,6,7,8,8,8,8,12,13,14,15]
; AVX2-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_00_00_00_00_04_04_04_04_24_24_24_24_28_28_28_28:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [0,0,0,0,4,4,4,4,24,24,24,24,28,28,28,28]
; AVX512VL-NEXT: vpermt2w %ymm1, %ymm2, %ymm0
; AVX512VL-NEXT: retq
@@ -1449,21 +1449,21 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_04_04_04_04_24_24_24_24_28_28_28_2
define <16 x i16> @shuffle_v16i16_00_16_01_17_02_18_03_19_04_20_05_21_06_22_07_23(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_16_01_17_02_18_03_19_04_20_05_21_06_22_07_23:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_00_16_01_17_02_18_03_19_04_20_05_21_06_22_07_23:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_00_16_01_17_02_18_03_19_04_20_05_21_06_22_07_23:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [0,16,1,17,2,18,3,19,4,20,5,21,6,22,7,23]
; AVX512VL-NEXT: vpermt2w %ymm1, %ymm2, %ymm0
; AVX512VL-NEXT: retq
@@ -1473,7 +1473,7 @@ define <16 x i16> @shuffle_v16i16_00_16_01_17_02_18_03_19_04_20_05_21_06_22_07_2
define <16 x i16> @shuffle_v16i16_zz_zz_zz_zz_zz_zz_zz_16_zz_zz_zz_zz_zz_zz_zz_24(<16 x i16> %a) {
; AVX1-LABEL: shuffle_v16i16_zz_zz_zz_zz_zz_zz_zz_16_zz_zz_zz_zz_zz_zz_zz_24:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1]
@@ -1481,7 +1481,7 @@ define <16 x i16> @shuffle_v16i16_zz_zz_zz_zz_zz_zz_zz_16_zz_zz_zz_zz_zz_zz_zz_2
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_zz_zz_zz_zz_zz_zz_zz_16_zz_zz_zz_zz_zz_zz_zz_24:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpslldq {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[0,1],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> zeroinitializer, <16 x i16> %a, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 16, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 24>
@@ -1490,7 +1490,7 @@ define <16 x i16> @shuffle_v16i16_zz_zz_zz_zz_zz_zz_zz_16_zz_zz_zz_zz_zz_zz_zz_2
define <16 x i16> @shuffle_v16i16_17_18_19_20_21_22_23_zz_25_26_27_28_29_30_31_zz(<16 x i16> %a) {
; AVX1-LABEL: shuffle_v16i16_17_18_19_20_21_22_23_zz_25_26_27_28_29_30_31_zz:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsrldq {{.*#+}} xmm1 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
@@ -1498,7 +1498,7 @@ define <16 x i16> @shuffle_v16i16_17_18_19_20_21_22_23_zz_25_26_27_28_29_30_31_z
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_17_18_19_20_21_22_23_zz_25_26_27_28_29_30_31_zz:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,ymm0[18,19,20,21,22,23,24,25,26,27,28,29,30,31],zero,zero
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> zeroinitializer, <16 x i16> %a, <16 x i32> <i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 0, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 0>
@@ -1507,7 +1507,7 @@ define <16 x i16> @shuffle_v16i16_17_18_19_20_21_22_23_zz_25_26_27_28_29_30_31_z
define <16 x i16> @shuffle_v16i16_06_07_01_02_07_00_04_05_14_15_09_10_15_08_12_13(<16 x i16> %a) {
; AVX1-LABEL: shuffle_v16i16_06_07_01_02_07_00_04_05_14_15_09_10_15_08_12_13:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [12,13,14,15,2,3,4,5,14,15,0,1,8,9,10,11]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -1516,7 +1516,7 @@ define <16 x i16> @shuffle_v16i16_06_07_01_02_07_00_04_05_14_15_09_10_15_08_12_1
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_06_07_01_02_07_00_04_05_14_15_09_10_15_08_12_13:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[12,13,14,15,2,3,4,5,14,15,0,1,8,9,10,11,28,29,30,31,18,19,20,21,30,31,16,17,24,25,26,27]
; AVX2OR512VL-NEXT: retq
%1 = shufflevector <16 x i16> %a, <16 x i16> undef, <16 x i32> <i32 6, i32 7, i32 1, i32 2, i32 7, i32 0, i32 4, i32 5, i32 14, i32 15, i32 9, i32 10, i32 15, i32 8, i32 12, i32 13>
@@ -1529,7 +1529,7 @@ define <16 x i16> @shuffle_v16i16_06_07_01_02_07_00_04_05_14_15_09_10_15_08_12_1
define <16 x i16> @shuffle_v16i16_zz_00_zz_02_zz_04_zz_06_zz_08_zz_10_zz_12_zz_14(<16 x i16> %a) {
; AVX1-LABEL: shuffle_v16i16_zz_00_zz_02_zz_04_zz_06_zz_08_zz_10_zz_12_zz_14:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpslld $16, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpslld $16, %xmm0, %xmm0
@@ -1537,7 +1537,7 @@ define <16 x i16> @shuffle_v16i16_zz_00_zz_02_zz_04_zz_06_zz_08_zz_10_zz_12_zz_1
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_zz_00_zz_02_zz_04_zz_06_zz_08_zz_10_zz_12_zz_14:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpslld $16, %ymm0, %ymm0
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> zeroinitializer, <16 x i32> <i32 16, i32 0, i32 16, i32 2, i32 16, i32 4, i32 16, i32 6, i32 16, i32 8, i32 16, i32 10, i32 16, i32 12, i32 16, i32 14>
@@ -1546,7 +1546,7 @@ define <16 x i16> @shuffle_v16i16_zz_00_zz_02_zz_04_zz_06_zz_08_zz_10_zz_12_zz_1
define <16 x i16> @shuffle_v16i16_zz_zz_zz_00_zz_zz_zz_04_zz_zz_zz_08_zz_zz_zz_12(<16 x i16> %a) {
; AVX1-LABEL: shuffle_v16i16_zz_zz_zz_00_zz_zz_zz_04_zz_zz_zz_08_zz_zz_zz_12:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsllq $48, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsllq $48, %xmm0, %xmm0
@@ -1554,7 +1554,7 @@ define <16 x i16> @shuffle_v16i16_zz_zz_zz_00_zz_zz_zz_04_zz_zz_zz_08_zz_zz_zz_1
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_zz_zz_zz_00_zz_zz_zz_04_zz_zz_zz_08_zz_zz_zz_12:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpsllq $48, %ymm0, %ymm0
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> zeroinitializer, <16 x i32> <i32 16, i32 16, i32 16, i32 0, i32 16, i32 16, i32 16, i32 4, i32 16, i32 16, i32 16, i32 8, i32 16, i32 16, i32 16, i32 12>
@@ -1563,7 +1563,7 @@ define <16 x i16> @shuffle_v16i16_zz_zz_zz_00_zz_zz_zz_04_zz_zz_zz_08_zz_zz_zz_1
define <16 x i16> @shuffle_v16i16_01_zz_03_zz_05_zz_07_zz_09_zz_11_zz_13_zz_15_zz(<16 x i16> %a) {
; AVX1-LABEL: shuffle_v16i16_01_zz_03_zz_05_zz_07_zz_09_zz_11_zz_13_zz_15_zz:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
@@ -1571,7 +1571,7 @@ define <16 x i16> @shuffle_v16i16_01_zz_03_zz_05_zz_07_zz_09_zz_11_zz_13_zz_15_z
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_01_zz_03_zz_05_zz_07_zz_09_zz_11_zz_13_zz_15_zz:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpsrld $16, %ymm0, %ymm0
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> zeroinitializer, <16 x i32> <i32 1, i32 16, i32 3, i32 16, i32 5, i32 16, i32 7, i32 16, i32 9, i32 16, i32 11, i32 16, i32 13, i32 16, i32 15, i32 16>
@@ -1580,14 +1580,14 @@ define <16 x i16> @shuffle_v16i16_01_zz_03_zz_05_zz_07_zz_09_zz_11_zz_13_zz_15_z
define <16 x i16> @shuffle_v16i16_02_03_zz_zz_06_07_zz_zz_10_11_zz_zz_14_15_zz_zz(<16 x i16> %a) {
; AVX1-LABEL: shuffle_v16i16_02_03_zz_zz_06_07_zz_zz_10_11_zz_zz_14_15_zz_zz:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,3],ymm1[1,3],ymm0[5,7],ymm1[5,7]
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_02_03_zz_zz_06_07_zz_zz_10_11_zz_zz_14_15_zz_zz:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpsrlq $32, %ymm0, %ymm0
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> zeroinitializer, <16 x i32> <i32 2, i32 3, i32 16, i32 16, i32 6, i32 7, i32 16, i32 16, i32 10, i32 11, i32 16, i32 16, i32 14, i32 15, i32 16, i32 16>
@@ -1596,7 +1596,7 @@ define <16 x i16> @shuffle_v16i16_02_03_zz_zz_06_07_zz_zz_10_11_zz_zz_14_15_zz_z
define <16 x i16> @shuffle_v16i16_16_zz_zz_zz_17_zz_zz_zz_18_zz_zz_zz_19_zz_zz_zz(<16 x i16> %a) {
; AVX1-LABEL: shuffle_v16i16_16_zz_zz_zz_17_zz_zz_zz_18_zz_zz_zz_19_zz_zz_zz:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
@@ -1604,7 +1604,7 @@ define <16 x i16> @shuffle_v16i16_16_zz_zz_zz_17_zz_zz_zz_18_zz_zz_zz_19_zz_zz_z
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_16_zz_zz_zz_17_zz_zz_zz_18_zz_zz_zz_19_zz_zz_zz:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> zeroinitializer, <16 x i16> %a, <16 x i32> <i32 16, i32 0, i32 0, i32 0, i32 17, i32 0, i32 0, i32 0, i32 18, i32 0, i32 0, i32 0, i32 19, i32 0, i32 0, i32 0>
@@ -1613,7 +1613,7 @@ define <16 x i16> @shuffle_v16i16_16_zz_zz_zz_17_zz_zz_zz_18_zz_zz_zz_19_zz_zz_z
define <16 x i16> @shuffle_v16i16_16_zz_17_zz_18_zz_19_zz_20_zz_21_zz_22_zz_22_zz(<16 x i16> %a) {
; AVX1-LABEL: shuffle_v16i16_16_zz_17_zz_18_zz_19_zz_20_zz_21_zz_22_zz_22_zz:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
@@ -1621,7 +1621,7 @@ define <16 x i16> @shuffle_v16i16_16_zz_17_zz_18_zz_19_zz_20_zz_21_zz_22_zz_22_z
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_16_zz_17_zz_18_zz_19_zz_20_zz_21_zz_22_zz_22_zz:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> zeroinitializer, <16 x i16> %a, <16 x i32> <i32 16, i32 0, i32 17, i32 0, i32 18, i32 0, i32 19, i32 0, i32 20, i32 0, i32 21, i32 0, i32 22, i32 0, i32 23, i32 0>
@@ -1630,7 +1630,7 @@ define <16 x i16> @shuffle_v16i16_16_zz_17_zz_18_zz_19_zz_20_zz_21_zz_22_zz_22_z
define <16 x i16> @shuffle_v16i16_28_zz_zz_zz_29_zz_zz_zz_30_zz_zz_zz_31_zz_zz_zz(<16 x i16> %a) {
; AVX1-LABEL: shuffle_v16i16_28_zz_zz_zz_29_zz_zz_zz_30_zz_zz_zz_31_zz_zz_zz:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
@@ -1640,7 +1640,7 @@ define <16 x i16> @shuffle_v16i16_28_zz_zz_zz_29_zz_zz_zz_30_zz_zz_zz_31_zz_zz_z
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_28_zz_zz_zz_29_zz_zz_zz_30_zz_zz_zz_31_zz_zz_zz:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; AVX2-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
@@ -1652,7 +1652,7 @@ define <16 x i16> @shuffle_v16i16_28_zz_zz_zz_29_zz_zz_zz_30_zz_zz_zz_31_zz_zz_z
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_28_zz_zz_zz_29_zz_zz_zz_30_zz_zz_zz_31_zz_zz_zz:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [28,1,2,3,29,5,6,7,30,9,10,11,31,13,14,15]
; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VL-NEXT: vpermt2w %ymm0, %ymm2, %ymm1
@@ -1664,7 +1664,7 @@ define <16 x i16> @shuffle_v16i16_28_zz_zz_zz_29_zz_zz_zz_30_zz_zz_zz_31_zz_zz_z
define <16 x i16> @shuffle_v16i16_23_00_01_02_03_04_05_06_31_08_09_10_11_12_13_14(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_23_00_01_02_03_04_05_06_31_08_09_10_11_12_13_14:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm2[14,15],xmm3[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
@@ -1673,7 +1673,7 @@ define <16 x i16> @shuffle_v16i16_23_00_01_02_03_04_05_06_31_08_09_10_11_12_13_1
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_23_00_01_02_03_04_05_06_31_08_09_10_11_12_13_14:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[14,15],ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13],ymm1[30,31],ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28,29]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 23, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 31, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14>
@@ -1682,7 +1682,7 @@ define <16 x i16> @shuffle_v16i16_23_00_01_02_03_04_05_06_31_08_09_10_11_12_13_1
define <16 x i16> @shuffle_v16i16_01_02_03_04_05_06_07_16_09_10_11_12_13_14_15_24(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_01_02_03_04_05_06_07_16_09_10_11_12_13_14_15_24:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm2[2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm3[0,1]
@@ -1691,7 +1691,7 @@ define <16 x i16> @shuffle_v16i16_01_02_03_04_05_06_07_16_09_10_11_12_13_14_15_2
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_01_02_03_04_05_06_07_16_09_10_11_12_13_14_15_24:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm1[0,1],ymm0[18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm1[16,17]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 24>
@@ -1700,7 +1700,7 @@ define <16 x i16> @shuffle_v16i16_01_02_03_04_05_06_07_16_09_10_11_12_13_14_15_2
define <16 x i16> @shuffle_v16i16_17_18_19_20_21_22_23_00_25_26_27_28_29_30_31_8(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_17_18_19_20_21_22_23_00_25_26_27_28_29_30_31_8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm2[2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm3[0,1]
@@ -1709,7 +1709,7 @@ define <16 x i16> @shuffle_v16i16_17_18_19_20_21_22_23_00_25_26_27_28_29_30_31_8
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_17_18_19_20_21_22_23_00_25_26_27_28_29_30_31_8:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0,1],ymm1[18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16,17]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 00, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 8>
@@ -1718,7 +1718,7 @@ define <16 x i16> @shuffle_v16i16_17_18_19_20_21_22_23_00_25_26_27_28_29_30_31_8
define <16 x i16> @shuffle_v16i16_07_16_17_18_19_20_21_22_15_24_25_26_27_28_29_30(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_07_16_17_18_19_20_21_22_15_24_25_26_27_28_29_30:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm2[14,15],xmm3[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
@@ -1727,7 +1727,7 @@ define <16 x i16> @shuffle_v16i16_07_16_17_18_19_20_21_22_15_24_25_26_27_28_29_3
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_07_16_17_18_19_20_21_22_15_24_25_26_27_28_29_30:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[14,15],ymm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13],ymm0[30,31],ymm1[16,17,18,19,20,21,22,23,24,25,26,27,28,29]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 15, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30>
@@ -1736,20 +1736,20 @@ define <16 x i16> @shuffle_v16i16_07_16_17_18_19_20_21_22_15_24_25_26_27_28_29_3
define <16 x i16> @shuffle_v16i16_01_02_03_04_05_06_07_00_17_18_19_20_21_22_23_16(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_01_02_03_04_05_06_07_00_17_18_19_20_21_22_23_16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1]
; AVX1-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_01_02_03_04_05_06_07_00_17_18_19_20_21_22_23_16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1,18,19,20,21,22,23,24,25,26,27,28,29,30,31,16,17]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_01_02_03_04_05_06_07_00_17_18_19_20_21_22_23_16:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [1,2,3,4,5,6,7,0,17,18,19,20,21,22,23,16]
; AVX512VL-NEXT: vpermt2w %ymm1, %ymm2, %ymm0
; AVX512VL-NEXT: retq
@@ -1759,20 +1759,20 @@ define <16 x i16> @shuffle_v16i16_01_02_03_04_05_06_07_00_17_18_19_20_21_22_23_1
define <16 x i16> @shuffle_v16i16_07_00_01_02_03_04_05_06_23_16_17_18_19_20_21_22(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_07_00_01_02_03_04_05_06_23_16_17_18_19_20_21_22:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[14,15,0,1,2,3,4,5,6,7,8,9,10,11,12,13]
; AVX1-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[14,15,0,1,2,3,4,5,6,7,8,9,10,11,12,13]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_07_00_01_02_03_04_05_06_23_16_17_18_19_20_21_22:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[14,15,0,1,2,3,4,5,6,7,8,9,10,11,12,13,30,31,16,17,18,19,20,21,22,23,24,25,26,27,28,29]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_07_00_01_02_03_04_05_06_23_16_17_18_19_20_21_22:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [7,0,1,2,3,4,5,6,23,16,17,18,19,20,21,22]
; AVX512VL-NEXT: vpermt2w %ymm1, %ymm2, %ymm0
; AVX512VL-NEXT: retq
@@ -1782,7 +1782,7 @@ define <16 x i16> @shuffle_v16i16_07_00_01_02_03_04_05_06_23_16_17_18_19_20_21_2
define <16 x i16> @shuffle_v16i16_00_01_00_01_02_03_02_11_08_09_08_09_10_11_10_11(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_01_00_01_02_03_02_11_08_09_08_09_10_11_10_11:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,0,2,4,5,6,7]
@@ -1792,7 +1792,7 @@ define <16 x i16> @shuffle_v16i16_00_01_00_01_02_03_02_11_08_09_08_09_10_11_10_1
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_00_01_00_01_02_03_02_11_08_09_08_09_10_11_10_11:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vpbroadcastq %xmm1, %xmm1
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,1,1,4,4,5,5]
@@ -1801,7 +1801,7 @@ define <16 x i16> @shuffle_v16i16_00_01_00_01_02_03_02_11_08_09_08_09_10_11_10_1
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_00_01_00_01_02_03_02_11_08_09_08_09_10_11_10_11:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,0,1,2,3,2,11,8,9,8,9,10,11,10,11]
; AVX512VL-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
@@ -1811,7 +1811,7 @@ define <16 x i16> @shuffle_v16i16_00_01_00_01_02_03_02_11_08_09_08_09_10_11_10_1
define <16 x i16> @shuffle_v16i16_06_07_04_05_02_03_00_09_14_15_12_13_10_11_08_09(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_06_07_04_05_02_03_00_09_14_15_12_13_10_11_08_09:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3,4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,2,1,0]
@@ -1820,7 +1820,7 @@ define <16 x i16> @shuffle_v16i16_06_07_04_05_02_03_00_09_14_15_12_13_10_11_08_0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_06_07_04_05_02_03_00_09_14_15_12_13_10_11_08_09:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
@@ -1828,7 +1828,7 @@ define <16 x i16> @shuffle_v16i16_06_07_04_05_02_03_00_09_14_15_12_13_10_11_08_0
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_06_07_04_05_02_03_00_09_14_15_12_13_10_11_08_09:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [6,7,4,5,2,3,0,9,14,15,12,13,10,11,8,9]
; AVX512VL-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
@@ -1838,7 +1838,7 @@ define <16 x i16> @shuffle_v16i16_06_07_04_05_02_03_00_09_14_15_12_13_10_11_08_0
define <16 x i16> @shuffle_v16i16_04_05_06_07_16_17_18_27_12_13_14_15_24_25_26_27(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_04_05_06_07_16_17_18_27_12_13_14_15_24_25_26_27:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm2[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7]
@@ -1849,7 +1849,7 @@ define <16 x i16> @shuffle_v16i16_04_05_06_07_16_17_18_27_12_13_14_15_24_25_26_2
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_04_05_06_07_16_17_18_27_12_13_14_15_24_25_26_27:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
@@ -1858,7 +1858,7 @@ define <16 x i16> @shuffle_v16i16_04_05_06_07_16_17_18_27_12_13_14_15_24_25_26_2
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_04_05_06_07_16_17_18_27_12_13_14_15_24_25_26_27:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [4,5,6,7,16,17,18,27,12,13,14,15,24,25,26,27]
; AVX512VL-NEXT: vpermt2w %ymm1, %ymm2, %ymm0
; AVX512VL-NEXT: retq
@@ -1868,7 +1868,7 @@ define <16 x i16> @shuffle_v16i16_04_05_06_07_16_17_18_27_12_13_14_15_24_25_26_2
define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08_08(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08_08:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,2,3]
@@ -1878,7 +1878,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08_0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08_08:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vpbroadcastw %xmm1, %xmm1
; AVX2-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[0,0,0,0,4,5,6,7,8,8,8,8,12,13,14,15]
@@ -1888,7 +1888,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08_0
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08_08:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [0,0,0,0,0,0,0,8,8,8,8,8,8,8,8,8]
; AVX512VL-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
@@ -1898,7 +1898,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08_0
define <16 x i16> @shuffle_v16i16_00_00_00_00_04_04_04_12_08_08_08_08_12_12_12_12(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_00_00_00_04_04_04_12_08_08_08_08_12_12_12_12:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpsllq $48, %xmm1, %xmm2
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
@@ -1910,7 +1910,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_04_04_04_12_08_08_08_08_12_12_12_1
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_00_00_00_00_04_04_04_12_08_08_08_08_12_12_12_12:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vpsllq $48, %xmm1, %xmm1
; AVX2-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[0,0,0,0,4,5,6,7,8,8,8,8,12,13,14,15]
@@ -1920,7 +1920,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_04_04_04_12_08_08_08_08_12_12_12_1
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_00_00_00_00_04_04_04_12_08_08_08_08_12_12_12_12:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [0,0,0,0,4,4,4,12,8,8,8,8,12,12,12,12]
; AVX512VL-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
@@ -1930,7 +1930,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_04_04_04_12_08_08_08_08_12_12_12_1
define <16 x i16> @shuffle_v16i16_uu_00_uu_01_uu_02_uu_11_uu_08_uu_09_uu_10_uu_11(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_uu_00_uu_01_uu_02_uu_11_uu_08_uu_09_uu_10_uu_11:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@@ -1940,7 +1940,7 @@ define <16 x i16> @shuffle_v16i16_uu_00_uu_01_uu_02_uu_11_uu_08_uu_09_uu_10_uu_1
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_uu_00_uu_01_uu_02_uu_11_uu_08_uu_09_uu_10_uu_11:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = <255,255,255,255,255,255,0,0,u,u,u,u,u,u,u,u,255,255,255,255,255,255,255,255,u,u,u,u,u,u,u,u>
; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
@@ -1948,7 +1948,7 @@ define <16 x i16> @shuffle_v16i16_uu_00_uu_01_uu_02_uu_11_uu_08_uu_09_uu_10_uu_1
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_uu_00_uu_01_uu_02_uu_11_uu_08_uu_09_uu_10_uu_11:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = <u,0,u,1,u,2,u,11,u,8,u,9,u,10,u,11>
; AVX512VL-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
@@ -1958,7 +1958,7 @@ define <16 x i16> @shuffle_v16i16_uu_00_uu_01_uu_02_uu_11_uu_08_uu_09_uu_10_uu_1
define <16 x i16> @shuffle_v16i16_uu_04_uu_05_uu_06_uu_15_uu_12_uu_13_uu_14_uu_15(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_uu_04_uu_05_uu_06_uu_15_uu_12_uu_13_uu_14_uu_15:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
@@ -1968,14 +1968,14 @@ define <16 x i16> @shuffle_v16i16_uu_04_uu_05_uu_06_uu_15_uu_12_uu_13_uu_14_uu_1
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_uu_04_uu_05_uu_06_uu_15_uu_12_uu_13_uu_14_uu_15:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm1 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,0,1]
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_uu_04_uu_05_uu_06_uu_15_uu_12_uu_13_uu_14_uu_15:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = <u,4,u,5,u,6,u,15,u,12,u,13,u,14,u,15>
; AVX512VL-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
@@ -1985,7 +1985,7 @@ define <16 x i16> @shuffle_v16i16_uu_04_uu_05_uu_06_uu_15_uu_12_uu_13_uu_14_uu_1
define <16 x i16> @shuffle_v16i16_03_01_02_00_06_07_04_13_11_09_10_08_14_15_12_13(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_03_01_02_00_06_07_04_13_11_09_10_08_14_15_12_13:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5],xmm0[6,7]
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,0,4,5,6,7]
@@ -1996,7 +1996,7 @@ define <16 x i16> @shuffle_v16i16_03_01_02_00_06_07_04_13_11_09_10_08_14_15_12_1
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_03_01_02_00_06_07_04_13_11_09_10_08_14_15_12_13:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
@@ -2005,7 +2005,7 @@ define <16 x i16> @shuffle_v16i16_03_01_02_00_06_07_04_13_11_09_10_08_14_15_12_1
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_03_01_02_00_06_07_04_13_11_09_10_08_14_15_12_13:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [3,1,2,0,6,7,4,13,11,9,10,8,14,15,12,13]
; AVX512VL-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
@@ -2015,7 +2015,7 @@ define <16 x i16> @shuffle_v16i16_03_01_02_00_06_07_04_13_11_09_10_08_14_15_12_1
define <16 x i16> @shuffle_v16i16_04_04_04_04_00_00_00_08_12_12_12_12_08_08_08_08(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_04_04_04_04_00_00_00_08_12_12_12_12_08_08_08_08:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1]
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8,9,8,9,8,9,8,9,0,1,0,1,0,1,14,15]
@@ -2025,7 +2025,7 @@ define <16 x i16> @shuffle_v16i16_04_04_04_04_00_00_00_08_12_12_12_12_08_08_08_0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_04_04_04_04_00_00_00_08_12_12_12_12_08_08_08_08:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vpbroadcastw %xmm1, %xmm1
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[8,9,8,9,8,9,8,9,0,1,0,1,0,1,0,1,24,25,24,25,24,25,24,25,16,17,16,17,16,17,16,17]
@@ -2034,7 +2034,7 @@ define <16 x i16> @shuffle_v16i16_04_04_04_04_00_00_00_08_12_12_12_12_08_08_08_0
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_04_04_04_04_00_00_00_08_12_12_12_12_08_08_08_08:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [4,4,4,4,0,0,0,8,12,12,12,12,8,8,8,8]
; AVX512VL-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
@@ -2044,7 +2044,7 @@ define <16 x i16> @shuffle_v16i16_04_04_04_04_00_00_00_08_12_12_12_12_08_08_08_0
define <16 x i16> @shuffle_v16i16_02_03_00_01_06_07_04_13_10_11_08_09_14_15_12_13(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_02_03_00_01_06_07_04_13_10_11_08_09_14_15_12_13:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5],xmm0[6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,3,2]
@@ -2053,7 +2053,7 @@ define <16 x i16> @shuffle_v16i16_02_03_00_01_06_07_04_13_10_11_08_09_14_15_12_1
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_02_03_00_01_06_07_04_13_10_11_08_09_14_15_12_13:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
@@ -2061,7 +2061,7 @@ define <16 x i16> @shuffle_v16i16_02_03_00_01_06_07_04_13_10_11_08_09_14_15_12_1
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_02_03_00_01_06_07_04_13_10_11_08_09_14_15_12_13:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [2,3,0,1,6,7,4,13,10,11,8,9,14,15,12,13]
; AVX512VL-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
@@ -2071,7 +2071,7 @@ define <16 x i16> @shuffle_v16i16_02_03_00_01_06_07_04_13_10_11_08_09_14_15_12_1
define <16 x i16> @shuffle_v16i16_02_03_00_02_06_07_04_13_10_11_08_10_14_15_12_13(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_02_03_00_02_06_07_04_13_10_11_08_10_14_15_12_13:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5],xmm0[6,7]
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[2,3,0,2,4,5,6,7]
@@ -2082,7 +2082,7 @@ define <16 x i16> @shuffle_v16i16_02_03_00_02_06_07_04_13_10_11_08_10_14_15_12_1
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_02_03_00_02_06_07_04_13_10_11_08_10_14_15_12_13:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = <255,255,u,u,255,255,255,255,255,255,0,0,255,255,255,255,255,255,u,u,255,255,255,255,255,255,255,255,255,255,255,255>
; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
@@ -2091,7 +2091,7 @@ define <16 x i16> @shuffle_v16i16_02_03_00_02_06_07_04_13_10_11_08_10_14_15_12_1
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_02_03_00_02_06_07_04_13_10_11_08_10_14_15_12_13:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [2,3,0,2,6,7,4,13,10,11,8,10,14,15,12,13]
; AVX512VL-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
@@ -2101,7 +2101,7 @@ define <16 x i16> @shuffle_v16i16_02_03_00_02_06_07_04_13_10_11_08_10_14_15_12_1
define <16 x i16> @shuffle_v16i16_02_03_00_01_06_07_04_15_10_11_08_09_14_15_12_15(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_02_03_00_01_06_07_04_15_10_11_08_09_14_15_12_15:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,3,2]
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm1[7]
@@ -2111,7 +2111,7 @@ define <16 x i16> @shuffle_v16i16_02_03_00_01_06_07_04_15_10_11_08_09_14_15_12_1
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_02_03_00_01_06_07_04_15_10_11_08_09_14_15_12_15:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[1,0,2,3,5,4,6,7]
; AVX2-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,6,7,4,7,8,9,10,11,14,15,12,15]
@@ -2120,7 +2120,7 @@ define <16 x i16> @shuffle_v16i16_02_03_00_01_06_07_04_15_10_11_08_09_14_15_12_1
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_02_03_00_01_06_07_04_15_10_11_08_09_14_15_12_15:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [2,3,0,1,6,7,4,15,10,11,8,9,14,15,12,15]
; AVX512VL-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
@@ -2130,7 +2130,7 @@ define <16 x i16> @shuffle_v16i16_02_03_00_01_06_07_04_15_10_11_08_09_14_15_12_1
define <16 x i16> @shuffle_v16i16_07_05_06_04_03_01_02_08_15_13_14_12_11_09_10_08(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_07_05_06_04_03_01_02_08_15_13_14_12_11_09_10_08:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [14,15,10,11,12,13,8,9,6,7,2,3,4,5,0,1]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm3
@@ -2140,7 +2140,7 @@ define <16 x i16> @shuffle_v16i16_07_05_06_04_03_01_02_08_15_13_14_12_11_09_10_0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_07_05_06_04_03_01_02_08_15_13_14_12_11_09_10_08:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
@@ -2148,7 +2148,7 @@ define <16 x i16> @shuffle_v16i16_07_05_06_04_03_01_02_08_15_13_14_12_11_09_10_0
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_07_05_06_04_03_01_02_08_15_13_14_12_11_09_10_08:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [7,5,6,4,3,1,2,8,15,13,14,12,11,9,10,8]
; AVX512VL-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
@@ -2158,7 +2158,7 @@ define <16 x i16> @shuffle_v16i16_07_05_06_04_03_01_02_08_15_13_14_12_11_09_10_0
define <16 x i16> @shuffle_v16i16_01_00_05_04_05_04_01_08_09_08_13_12_13_12_09_08(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_01_00_05_04_05_04_01_08_09_08_13_12_13_12_09_08:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1]
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,3,0,1,10,11,8,9,10,11,8,9,2,3,2,3]
@@ -2168,7 +2168,7 @@ define <16 x i16> @shuffle_v16i16_01_00_05_04_05_04_01_08_09_08_13_12_13_12_09_0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_01_00_05_04_05_04_01_08_09_08_13_12_13_12_09_08:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vpbroadcastw %xmm1, %xmm1
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[2,3,0,1,10,11,8,9,10,11,8,9,2,3,0,1,18,19,16,17,26,27,24,25,26,27,24,25,18,19,16,17]
@@ -2177,7 +2177,7 @@ define <16 x i16> @shuffle_v16i16_01_00_05_04_05_04_01_08_09_08_13_12_13_12_09_0
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_01_00_05_04_05_04_01_08_09_08_13_12_13_12_09_08:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [1,0,5,4,5,4,1,8,9,8,13,12,13,12,9,8]
; AVX512VL-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
@@ -2187,7 +2187,7 @@ define <16 x i16> @shuffle_v16i16_01_00_05_04_05_04_01_08_09_08_13_12_13_12_09_0
define <16 x i16> @shuffle_v16i16_05_04_01_00_05_04_01_08_13_12_09_08_13_12_09_08(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_05_04_01_00_05_04_01_08_13_12_09_08_13_12_09_08:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1]
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[10,11,8,9,2,3,0,1,10,11,8,9,2,3,2,3]
@@ -2197,7 +2197,7 @@ define <16 x i16> @shuffle_v16i16_05_04_01_00_05_04_01_08_13_12_09_08_13_12_09_0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_05_04_01_00_05_04_01_08_13_12_09_08_13_12_09_08:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vpbroadcastw %xmm1, %xmm1
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[10,11,8,9,2,3,0,1,10,11,8,9,2,3,0,1,26,27,24,25,18,19,16,17,26,27,24,25,18,19,16,17]
@@ -2206,7 +2206,7 @@ define <16 x i16> @shuffle_v16i16_05_04_01_00_05_04_01_08_13_12_09_08_13_12_09_0
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_05_04_01_00_05_04_01_08_13_12_09_08_13_12_09_08:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [5,4,1,0,5,4,1,8,13,12,9,8,13,12,9,8]
; AVX512VL-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
@@ -2216,7 +2216,7 @@ define <16 x i16> @shuffle_v16i16_05_04_01_00_05_04_01_08_13_12_09_08_13_12_09_0
define <16 x i16> @shuffle_v16i16_05_04_01_00_01_00_05_12_13_12_09_08_09_08_13_12(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_05_04_01_00_01_00_05_12_13_12_09_08_09_08_13_12:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpsllq $48, %xmm1, %xmm2
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[10,11,8,9,2,3,0,1,2,3,0,1,10,11,2,3]
@@ -2226,7 +2226,7 @@ define <16 x i16> @shuffle_v16i16_05_04_01_00_01_00_05_12_13_12_09_08_09_08_13_1
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_05_04_01_00_01_00_05_12_13_12_09_08_09_08_13_12:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vpsllq $48, %xmm1, %xmm1
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[10,11,8,9,2,3,0,1,2,3,0,1,10,11,8,9,26,27,24,25,18,19,16,17,18,19,16,17,26,27,24,25]
@@ -2235,7 +2235,7 @@ define <16 x i16> @shuffle_v16i16_05_04_01_00_01_00_05_12_13_12_09_08_09_08_13_1
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_05_04_01_00_01_00_05_12_13_12_09_08_09_08_13_12:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [5,4,1,0,1,0,5,12,13,12,9,8,9,8,13,12]
; AVX512VL-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
@@ -2245,7 +2245,7 @@ define <16 x i16> @shuffle_v16i16_05_04_01_00_01_00_05_12_13_12_09_08_09_08_13_1
define <16 x i16> @shuffle_v16i16_00_04_04_00_00_04_04_08_08_12_12_08_08_12_12_08(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_04_04_00_00_04_04_08_08_12_12_08_08_12_12_08:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1]
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,8,9,0,1,0,1,8,9,8,9,2,3]
@@ -2255,7 +2255,7 @@ define <16 x i16> @shuffle_v16i16_00_04_04_00_00_04_04_08_08_12_12_08_08_12_12_0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_00_04_04_00_00_04_04_08_08_12_12_08_08_12_12_08:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vpbroadcastw %xmm1, %xmm1
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,8,9,8,9,0,1,0,1,8,9,8,9,0,1,16,17,24,25,24,25,16,17,16,17,24,25,24,25,16,17]
@@ -2264,7 +2264,7 @@ define <16 x i16> @shuffle_v16i16_00_04_04_00_00_04_04_08_08_12_12_08_08_12_12_0
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_00_04_04_00_00_04_04_08_08_12_12_08_08_12_12_08:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [0,4,4,0,0,4,4,8,8,12,12,8,8,12,12,8]
; AVX512VL-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
@@ -2274,7 +2274,7 @@ define <16 x i16> @shuffle_v16i16_00_04_04_00_00_04_04_08_08_12_12_08_08_12_12_0
define <16 x i16> @shuffle_v16i16_04_00_00_04_04_00_00_12_12_08_08_12_12_08_08_12(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_04_00_00_04_04_00_00_12_12_08_08_12_12_08_08_12:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpsllq $48, %xmm1, %xmm2
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8,9,0,1,0,1,8,9,8,9,0,1,0,1,2,3]
@@ -2284,7 +2284,7 @@ define <16 x i16> @shuffle_v16i16_04_00_00_04_04_00_00_12_12_08_08_12_12_08_08_1
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_04_00_00_04_04_00_00_12_12_08_08_12_12_08_08_12:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vpsllq $48, %xmm1, %xmm1
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[8,9,0,1,0,1,8,9,8,9,0,1,0,1,8,9,24,25,16,17,16,17,24,25,24,25,16,17,16,17,24,25]
@@ -2293,7 +2293,7 @@ define <16 x i16> @shuffle_v16i16_04_00_00_04_04_00_00_12_12_08_08_12_12_08_08_1
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_04_00_00_04_04_00_00_12_12_08_08_12_12_08_08_12:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [4,0,0,4,4,0,0,12,12,8,8,12,12,8,8,12]
; AVX512VL-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
@@ -2303,7 +2303,7 @@ define <16 x i16> @shuffle_v16i16_04_00_00_04_04_00_00_12_12_08_08_12_12_08_08_1
define <16 x i16> @shuffle_v16i16_02_06_04_00_05_01_07_11_10_14_12_08_13_09_15_11(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_02_06_04_00_05_01_07_11_10_14_12_08_13_09_15_11:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [4,5,12,13,8,9,0,1,10,11,2,3,14,15,6,7]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm3
@@ -2313,7 +2313,7 @@ define <16 x i16> @shuffle_v16i16_02_06_04_00_05_01_07_11_10_14_12_08_13_09_15_1
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_02_06_04_00_05_01_07_11_10_14_12_08_13_09_15_11:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
@@ -2321,7 +2321,7 @@ define <16 x i16> @shuffle_v16i16_02_06_04_00_05_01_07_11_10_14_12_08_13_09_15_1
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_02_06_04_00_05_01_07_11_10_14_12_08_13_09_15_11:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [2,6,4,0,5,1,7,11,10,14,12,8,13,9,15,11]
; AVX512VL-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
@@ -2331,7 +2331,7 @@ define <16 x i16> @shuffle_v16i16_02_06_04_00_05_01_07_11_10_14_12_08_13_09_15_1
define <16 x i16> @shuffle_v16i16_02_00_06_04_05_01_07_11_10_08_14_12_13_09_15_11(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_02_00_06_04_05_01_07_11_10_08_14_12_13_09_15_11:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [4,5,0,1,12,13,8,9,10,11,2,3,14,15,6,7]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm3
@@ -2341,7 +2341,7 @@ define <16 x i16> @shuffle_v16i16_02_00_06_04_05_01_07_11_10_08_14_12_13_09_15_1
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_02_00_06_04_05_01_07_11_10_08_14_12_13_09_15_11:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
@@ -2349,7 +2349,7 @@ define <16 x i16> @shuffle_v16i16_02_00_06_04_05_01_07_11_10_08_14_12_13_09_15_1
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_02_00_06_04_05_01_07_11_10_08_14_12_13_09_15_11:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [2,0,6,4,5,1,7,11,10,8,14,12,13,9,15,11]
; AVX512VL-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
@@ -2359,7 +2359,7 @@ define <16 x i16> @shuffle_v16i16_02_00_06_04_05_01_07_11_10_08_14_12_13_09_15_1
define <16 x i16> @shuffle_v16i16_02_06_04_00_01_03_07_13_10_14_12_08_09_11_15_13(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_02_06_04_00_01_03_07_13_10_14_12_08_09_11_15_13:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [4,5,12,13,8,9,0,1,2,3,6,7,14,15,10,11]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm3
@@ -2369,7 +2369,7 @@ define <16 x i16> @shuffle_v16i16_02_06_04_00_01_03_07_13_10_14_12_08_09_11_15_1
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_02_06_04_00_01_03_07_13_10_14_12_08_09_11_15_13:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
@@ -2377,7 +2377,7 @@ define <16 x i16> @shuffle_v16i16_02_06_04_00_01_03_07_13_10_14_12_08_09_11_15_1
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_02_06_04_00_01_03_07_13_10_14_12_08_09_11_15_13:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [2,6,4,0,1,3,7,13,10,14,12,8,9,11,15,13]
; AVX512VL-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
@@ -2387,7 +2387,7 @@ define <16 x i16> @shuffle_v16i16_02_06_04_00_01_03_07_13_10_14_12_08_09_11_15_1
define <16 x i16> @shuffle_v16i16_06_06_07_05_01_06_04_11_14_14_15_13_09_14_12_11(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_06_06_07_05_01_06_04_11_14_14_15_13_09_14_12_11:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [12,13,12,13,14,15,10,11,2,3,12,13,8,9,6,7]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm3
@@ -2397,14 +2397,14 @@ define <16 x i16> @shuffle_v16i16_06_06_07_05_01_06_04_11_14_14_15_13_09_14_12_1
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_06_06_07_05_01_06_04_11_14_14_15_13_09_14_12_11:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6,7]
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[12,13,12,13,14,15,10,11,2,3,12,13,8,9,6,7,28,29,28,29,30,31,26,27,18,19,28,29,24,25,22,23]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_06_06_07_05_01_06_04_11_14_14_15_13_09_14_12_11:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [6,6,7,5,1,6,4,11,14,14,15,13,9,14,12,11]
; AVX512VL-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
@@ -2414,7 +2414,7 @@ define <16 x i16> @shuffle_v16i16_06_06_07_05_01_06_04_11_14_14_15_13_09_14_12_1
define <16 x i16> @shuffle_v16i16_00_00_04_04_04_04_04_12_08_08_12_12_12_12_12_12(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_00_04_04_04_04_04_12_08_08_12_12_12_12_12_12:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpsllq $48, %xmm1, %xmm2
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,8,9,8,9,8,9,8,9,8,9,14,15]
@@ -2424,7 +2424,7 @@ define <16 x i16> @shuffle_v16i16_00_00_04_04_04_04_04_12_08_08_12_12_12_12_12_1
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_00_00_04_04_04_04_04_12_08_08_12_12_12_12_12_12:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vpsllq $48, %xmm1, %xmm1
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,0,1,8,9,8,9,8,9,8,9,8,9,8,9,16,17,16,17,24,25,24,25,24,25,24,25,24,25,24,25]
@@ -2433,7 +2433,7 @@ define <16 x i16> @shuffle_v16i16_00_00_04_04_04_04_04_12_08_08_12_12_12_12_12_1
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_00_00_04_04_04_04_04_12_08_08_12_12_12_12_12_12:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [0,0,4,4,4,4,4,12,8,8,12,12,12,12,12,12]
; AVX512VL-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
@@ -2443,7 +2443,7 @@ define <16 x i16> @shuffle_v16i16_00_00_04_04_04_04_04_12_08_08_12_12_12_12_12_1
define <16 x i16> @shuffle_v16i16_04_04_00_00_04_04_04_12_12_12_08_08_12_12_12_12(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_04_04_00_00_04_04_04_12_12_12_08_08_12_12_12_12:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpsllq $48, %xmm1, %xmm2
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8,9,8,9,0,1,0,1,8,9,8,9,8,9,14,15]
@@ -2453,7 +2453,7 @@ define <16 x i16> @shuffle_v16i16_04_04_00_00_04_04_04_12_12_12_08_08_12_12_12_1
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_04_04_00_00_04_04_04_12_12_12_08_08_12_12_12_12:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vpsllq $48, %xmm1, %xmm1
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[8,9,8,9,0,1,0,1,8,9,8,9,8,9,8,9,24,25,24,25,16,17,16,17,24,25,24,25,24,25,24,25]
@@ -2462,7 +2462,7 @@ define <16 x i16> @shuffle_v16i16_04_04_00_00_04_04_04_12_12_12_08_08_12_12_12_1
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_04_04_00_00_04_04_04_12_12_12_08_08_12_12_12_12:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [4,4,0,0,4,4,4,12,12,12,8,8,12,12,12,12]
; AVX512VL-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
@@ -2472,7 +2472,7 @@ define <16 x i16> @shuffle_v16i16_04_04_00_00_04_04_04_12_12_12_08_08_12_12_12_1
define <16 x i16> @shuffle_v16i16_00_04_04_00_04_04_04_12_08_12_12_08_12_12_12_12(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_04_04_00_04_04_04_12_08_12_12_08_12_12_12_12:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpsllq $48, %xmm1, %xmm2
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,8,9,0,1,8,9,8,9,8,9,14,15]
@@ -2482,7 +2482,7 @@ define <16 x i16> @shuffle_v16i16_00_04_04_00_04_04_04_12_08_12_12_08_12_12_12_1
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_00_04_04_00_04_04_04_12_08_12_12_08_12_12_12_12:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vpsllq $48, %xmm1, %xmm1
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,8,9,8,9,0,1,8,9,8,9,8,9,8,9,16,17,24,25,24,25,16,17,24,25,24,25,24,25,24,25]
@@ -2491,7 +2491,7 @@ define <16 x i16> @shuffle_v16i16_00_04_04_00_04_04_04_12_08_12_12_08_12_12_12_1
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_00_04_04_00_04_04_04_12_08_12_12_08_12_12_12_12:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [0,4,4,0,4,4,4,12,8,12,12,8,12,12,12,12]
; AVX512VL-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
@@ -2501,7 +2501,7 @@ define <16 x i16> @shuffle_v16i16_00_04_04_00_04_04_04_12_08_12_12_08_12_12_12_1
define <16 x i16> @shuffle_v16i16_00_04_04_00_00_00_00_08_08_12_12_08_08_08_08_08(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_04_04_00_00_00_00_08_08_12_12_08_08_08_08_08:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1]
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,8,9,0,1,0,1,0,1,0,1,14,15]
@@ -2511,7 +2511,7 @@ define <16 x i16> @shuffle_v16i16_00_04_04_00_00_00_00_08_08_12_12_08_08_08_08_0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_00_04_04_00_00_00_00_08_08_12_12_08_08_08_08_08:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vpbroadcastw %xmm1, %xmm1
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,8,9,8,9,0,1,0,1,0,1,0,1,0,1,16,17,24,25,24,25,16,17,16,17,16,17,16,17,16,17]
@@ -2520,7 +2520,7 @@ define <16 x i16> @shuffle_v16i16_00_04_04_00_00_00_00_08_08_12_12_08_08_08_08_0
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_00_04_04_00_00_00_00_08_08_12_12_08_08_08_08_08:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [0,4,4,0,0,0,0,8,8,12,12,8,8,8,8,8]
; AVX512VL-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
@@ -2530,7 +2530,7 @@ define <16 x i16> @shuffle_v16i16_00_04_04_00_00_00_00_08_08_12_12_08_08_08_08_0
define <16 x i16> @shuffle_v16i16_00_04_04_00_04_05_06_15_08_12_12_08_12_13_14_15(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_04_04_00_04_05_06_15_08_12_12_08_12_13_14_15:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,0,4,5,6,7]
@@ -2541,7 +2541,7 @@ define <16 x i16> @shuffle_v16i16_00_04_04_00_04_05_06_15_08_12_12_08_12_13_14_1
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_00_04_04_00_04_05_06_15_08_12_12_08_12_13_14_15:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[0,2,2,0,4,5,6,7,8,10,10,8,12,13,14,15]
@@ -2550,7 +2550,7 @@ define <16 x i16> @shuffle_v16i16_00_04_04_00_04_05_06_15_08_12_12_08_12_13_14_1
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_00_04_04_00_04_05_06_15_08_12_12_08_12_13_14_15:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [0,4,4,0,4,5,6,15,8,12,12,8,12,13,14,15]
; AVX512VL-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
@@ -2560,7 +2560,7 @@ define <16 x i16> @shuffle_v16i16_00_04_04_00_04_05_06_15_08_12_12_08_12_13_14_1
define <16 x i16> @shuffle_v16i16_00_uu_04_04_04_04_04_12_08_uu_12_12_12_12_12_12(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_uu_04_04_04_04_04_12_08_uu_12_12_12_12_12_12:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpsllq $48, %xmm1, %xmm2
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,8,9,8,9,8,9,8,9,8,9,14,15]
@@ -2570,7 +2570,7 @@ define <16 x i16> @shuffle_v16i16_00_uu_04_04_04_04_04_12_08_uu_12_12_12_12_12_1
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_00_uu_04_04_04_04_04_12_08_uu_12_12_12_12_12_12:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vpsllq $48, %xmm1, %xmm1
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,2,3,8,9,8,9,8,9,8,9,8,9,8,9,16,17,18,19,24,25,24,25,24,25,24,25,24,25,24,25]
@@ -2579,7 +2579,7 @@ define <16 x i16> @shuffle_v16i16_00_uu_04_04_04_04_04_12_08_uu_12_12_12_12_12_1
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_00_uu_04_04_04_04_04_12_08_uu_12_12_12_12_12_12:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = <0,u,4,4,4,4,4,12,8,u,12,12,12,12,12,12>
; AVX512VL-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
@@ -2589,7 +2589,7 @@ define <16 x i16> @shuffle_v16i16_00_uu_04_04_04_04_04_12_08_uu_12_12_12_12_12_1
define <16 x i16> @shuffle_v16i16_04_04_uu_00_04_04_04_12_12_12_uu_08_12_12_12_12(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_04_04_uu_00_04_04_04_12_12_12_uu_08_12_12_12_12:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpsllq $48, %xmm1, %xmm2
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8,9,8,9,8,9,0,1,8,9,8,9,8,9,14,15]
@@ -2599,7 +2599,7 @@ define <16 x i16> @shuffle_v16i16_04_04_uu_00_04_04_04_12_12_12_uu_08_12_12_12_1
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_04_04_uu_00_04_04_04_12_12_12_uu_08_12_12_12_12:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vpsllq $48, %xmm1, %xmm1
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[8,9,8,9,8,9,0,1,8,9,8,9,8,9,8,9,24,25,24,25,24,25,16,17,24,25,24,25,24,25,24,25]
@@ -2608,7 +2608,7 @@ define <16 x i16> @shuffle_v16i16_04_04_uu_00_04_04_04_12_12_12_uu_08_12_12_12_1
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_04_04_uu_00_04_04_04_12_12_12_uu_08_12_12_12_12:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = <4,4,u,0,4,4,4,12,12,12,u,8,12,12,12,12>
; AVX512VL-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
@@ -2618,7 +2618,7 @@ define <16 x i16> @shuffle_v16i16_04_04_uu_00_04_04_04_12_12_12_uu_08_12_12_12_1
define <16 x i16> @shuffle_v16i16_uu_04_04_00_04_04_04_12_uu_12_12_08_12_12_12_12(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_uu_04_04_00_04_04_04_12_uu_12_12_08_12_12_12_12:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpsllq $48, %xmm1, %xmm2
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,8,9,0,1,8,9,8,9,8,9,14,15]
@@ -2628,7 +2628,7 @@ define <16 x i16> @shuffle_v16i16_uu_04_04_00_04_04_04_12_uu_12_12_08_12_12_12_1
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_uu_04_04_00_04_04_04_12_uu_12_12_08_12_12_12_12:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vpsllq $48, %xmm1, %xmm1
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,8,9,8,9,0,1,8,9,8,9,8,9,8,9,16,17,24,25,24,25,16,17,24,25,24,25,24,25,24,25]
@@ -2637,7 +2637,7 @@ define <16 x i16> @shuffle_v16i16_uu_04_04_00_04_04_04_12_uu_12_12_08_12_12_12_1
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_uu_04_04_00_04_04_04_12_uu_12_12_08_12_12_12_12:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = <u,4,4,0,4,4,4,12,u,12,12,8,12,12,12,12>
; AVX512VL-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
@@ -2647,7 +2647,7 @@ define <16 x i16> @shuffle_v16i16_uu_04_04_00_04_04_04_12_uu_12_12_08_12_12_12_1
define <16 x i16> @shuffle_v16i16_00_01_02_07_uu_uu_uu_uu_08_09_10_15_uu_uu_uu_uu(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_01_02_07_uu_uu_uu_uu_08_09_10_15_uu_uu_uu_uu:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,14,15,4,5,14,15,12,13,14,15]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -2656,12 +2656,12 @@ define <16 x i16> @shuffle_v16i16_00_01_02_07_uu_uu_uu_uu_08_09_10_15_uu_uu_uu_u
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_00_01_02_07_uu_uu_uu_uu_08_09_10_15_uu_uu_uu_uu:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,14,15,4,5,14,15,12,13,14,15,16,17,18,19,20,21,30,31,20,21,30,31,28,29,30,31]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_00_01_02_07_uu_uu_uu_uu_08_09_10_15_uu_uu_uu_uu:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
; AVX512VL-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,7,6,7,8,9,10,11,12,15,14,15]
; AVX512VL-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
@@ -2672,7 +2672,7 @@ define <16 x i16> @shuffle_v16i16_00_01_02_07_uu_uu_uu_uu_08_09_10_15_uu_uu_uu_u
define <16 x i16> @shuffle_v16i16_uu_uu_uu_uu_04_05_06_11_uu_uu_uu_uu_12_13_14_11(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_uu_uu_uu_uu_04_05_06_11_uu_uu_uu_uu_12_13_14_11:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[0,1,0,1]
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm2[7]
@@ -2681,13 +2681,13 @@ define <16 x i16> @shuffle_v16i16_uu_uu_uu_uu_04_05_06_11_uu_uu_uu_uu_12_13_14_1
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_uu_uu_uu_uu_04_05_06_11_uu_uu_uu_uu_12_13_14_11:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[12,13,6,7,4,5,6,7,8,9,10,11,12,13,6,7,28,29,22,23,20,21,22,23,24,25,26,27,28,29,22,23]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_uu_uu_uu_uu_04_05_06_11_uu_uu_uu_uu_12_13_14_11:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = <u,u,u,u,4,5,6,11,u,u,u,u,12,13,14,11>
; AVX512VL-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
@@ -2697,7 +2697,7 @@ define <16 x i16> @shuffle_v16i16_uu_uu_uu_uu_04_05_06_11_uu_uu_uu_uu_12_13_14_1
define <16 x i16> @shuffle_v16i16_04_05_06_03_uu_uu_uu_uu_12_13_14_11_uu_uu_uu_uu(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_04_05_06_03_uu_uu_uu_uu_12_13_14_11_uu_uu_uu_uu:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [8,9,10,11,12,13,6,7,8,9,10,11,0,1,2,3]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -2706,12 +2706,12 @@ define <16 x i16> @shuffle_v16i16_04_05_06_03_uu_uu_uu_uu_12_13_14_11_uu_uu_uu_u
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_04_05_06_03_uu_uu_uu_uu_12_13_14_11_uu_uu_uu_uu:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[8,9,10,11,12,13,6,7,8,9,10,11,0,1,2,3,24,25,26,27,28,29,22,23,24,25,26,27,16,17,18,19]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_04_05_06_03_uu_uu_uu_uu_12_13_14_11_uu_uu_uu_uu:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,1,2,0,7,5,6,4]
; AVX512VL-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[0,3,2,3,4,5,6,7,8,11,10,11,12,13,14,15]
; AVX512VL-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,0,2,3,6,4,6,7]
@@ -2722,7 +2722,7 @@ define <16 x i16> @shuffle_v16i16_04_05_06_03_uu_uu_uu_uu_12_13_14_11_uu_uu_uu_u
define <16 x i16> @shuffle_v16i16_01_zz_02_zz_04_uu_06_07_08_09_10_11_12_13_14_15(<16 x i16> %a) {
; AVX1-LABEL: shuffle_v16i16_01_zz_02_zz_04_uu_06_07_08_09_10_11_12_13_14_15:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[1,1,2,3,4,5,6,7]
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4,5,6,7]
@@ -2730,7 +2730,7 @@ define <16 x i16> @shuffle_v16i16_01_zz_02_zz_04_uu_06_07_08_09_10_11_12_13_14_1
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_01_zz_02_zz_04_uu_06_07_08_09_10_11_12_13_14_15:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[2,3],zero,zero,ymm0[4,5],zero,zero,ymm0[8,9,u,u,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> zeroinitializer, <16 x i32> <i32 1, i32 16, i32 2, i32 16, i32 4, i32 undef, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -2739,7 +2739,7 @@ define <16 x i16> @shuffle_v16i16_01_zz_02_zz_04_uu_06_07_08_09_10_11_12_13_14_1
define <16 x i16> @shuffle_v16i16_00_01_02_07_04_05_06_11_08_09_10_15_12_13_14_11(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_01_02_07_04_05_06_11_08_09_10_15_12_13_14_11:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,14,15,8,9,10,11,12,13,6,7]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm3
@@ -2749,7 +2749,7 @@ define <16 x i16> @shuffle_v16i16_00_01_02_07_04_05_06_11_08_09_10_15_12_13_14_1
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_00_01_02_07_04_05_06_11_08_09_10_15_12_13_14_11:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
@@ -2757,7 +2757,7 @@ define <16 x i16> @shuffle_v16i16_00_01_02_07_04_05_06_11_08_09_10_15_12_13_14_1
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_00_01_02_07_04_05_06_11_08_09_10_15_12_13_14_11:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,7,4,5,6,11,8,9,10,15,12,13,14,11]
; AVX512VL-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
@@ -2767,7 +2767,7 @@ define <16 x i16> @shuffle_v16i16_00_01_02_07_04_05_06_11_08_09_10_15_12_13_14_1
define <16 x i16> @shuffle_v16i16_04_05_06_03_00_01_02_15_12_13_14_11_08_09_10_15(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_04_05_06_03_00_01_02_15_12_13_14_11_08_09_10_15:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,6,7,0,1,2,3,4,5,2,3]
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm1[7]
@@ -2776,7 +2776,7 @@ define <16 x i16> @shuffle_v16i16_04_05_06_03_00_01_02_15_12_13_14_11_08_09_10_1
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_04_05_06_03_00_01_02_15_12_13_14_11_08_09_10_15:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm0[8,9,10,11,12,13,6,7,0,1,2,3,4,5,14,15,24,25,26,27,28,29,22,23,16,17,18,19,20,21,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,0,1]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
@@ -2784,7 +2784,7 @@ define <16 x i16> @shuffle_v16i16_04_05_06_03_00_01_02_15_12_13_14_11_08_09_10_1
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_04_05_06_03_00_01_02_15_12_13_14_11_08_09_10_15:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [4,5,6,3,0,1,2,15,12,13,14,11,8,9,10,15]
; AVX512VL-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
@@ -2794,7 +2794,7 @@ define <16 x i16> @shuffle_v16i16_04_05_06_03_00_01_02_15_12_13_14_11_08_09_10_1
define <16 x i16> @shuffle_v16i16_03_07_01_00_02_07_03_13_11_15_09_08_10_15_11_13(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_03_07_01_00_02_07_03_13_11_15_09_08_10_15_11_13:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [6,7,14,15,2,3,0,1,4,5,14,15,6,7,10,11]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm3
@@ -2804,14 +2804,14 @@ define <16 x i16> @shuffle_v16i16_03_07_01_00_02_07_03_13_11_15_09_08_10_15_11_1
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_03_07_01_00_02_07_03_13_11_15_09_08_10_15_11_13:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4,5,6,7]
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[6,7,14,15,2,3,0,1,4,5,14,15,6,7,10,11,22,23,30,31,18,19,16,17,20,21,30,31,22,23,26,27]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_03_07_01_00_02_07_03_13_11_15_09_08_10_15_11_13:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [3,7,1,0,2,7,3,13,11,15,9,8,10,15,11,13]
; AVX512VL-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
@@ -2821,7 +2821,7 @@ define <16 x i16> @shuffle_v16i16_03_07_01_00_02_07_03_13_11_15_09_08_10_15_11_1
define <16 x i16> @shuffle_v16i16_00_16_01_17_02_18_03_27_08_24_09_25_10_26_11_27(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_16_01_17_02_18_03_27_08_24_09_25_10_26_11_27:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
@@ -2832,7 +2832,7 @@ define <16 x i16> @shuffle_v16i16_00_16_01_17_02_18_03_27_08_24_09_25_10_26_11_2
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_00_16_01_17_02_18_03_27_08_24_09_25_10_26_11_27:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm1[2,3,0,1]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = <255,255,255,255,255,255,0,0,u,u,u,u,u,u,u,u,255,255,255,255,255,255,255,255,u,u,u,u,u,u,u,u>
; AVX2-NEXT: vpblendvb %ymm3, %ymm1, %ymm2, %ymm1
@@ -2840,7 +2840,7 @@ define <16 x i16> @shuffle_v16i16_00_16_01_17_02_18_03_27_08_24_09_25_10_26_11_2
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_00_16_01_17_02_18_03_27_08_24_09_25_10_26_11_27:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [0,16,1,17,2,18,3,27,8,24,9,25,10,26,11,27]
; AVX512VL-NEXT: vpermt2w %ymm1, %ymm2, %ymm0
; AVX512VL-NEXT: retq
@@ -2850,7 +2850,7 @@ define <16 x i16> @shuffle_v16i16_00_16_01_17_02_18_03_27_08_24_09_25_10_26_11_2
define <16 x i16> @shuffle_v16i16_00_20_01_21_02_22_03_31_08_28_09_29_10_30_11_31(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_20_01_21_02_22_03_31_08_28_09_29_10_30_11_31:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[2,3,0,1]
@@ -2862,7 +2862,7 @@ define <16 x i16> @shuffle_v16i16_00_20_01_21_02_22_03_31_08_28_09_29_10_30_11_3
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_00_20_01_21_02_22_03_31_08_28_09_29_10_30_11_31:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm0[0,1,8,9,2,3,10,11,4,5,12,13,6,7,14,15,16,17,24,25,18,19,26,27,20,21,28,29,22,23,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,0,1]
@@ -2871,7 +2871,7 @@ define <16 x i16> @shuffle_v16i16_00_20_01_21_02_22_03_31_08_28_09_29_10_30_11_3
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_00_20_01_21_02_22_03_31_08_28_09_29_10_30_11_31:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [0,20,1,21,2,22,3,31,8,28,9,29,10,30,11,31]
; AVX512VL-NEXT: vpermt2w %ymm1, %ymm2, %ymm0
; AVX512VL-NEXT: retq
@@ -2881,7 +2881,7 @@ define <16 x i16> @shuffle_v16i16_00_20_01_21_02_22_03_31_08_28_09_29_10_30_11_3
define <16 x i16> @shuffle_v16i16_04_20_05_21_06_22_07_31_12_28_13_29_14_30_15_31(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_04_20_05_21_06_22_07_31_12_28_13_29_14_30_15_31:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
@@ -2892,7 +2892,7 @@ define <16 x i16> @shuffle_v16i16_04_20_05_21_06_22_07_31_12_28_13_29_14_30_15_3
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_04_20_05_21_06_22_07_31_12_28_13_29_14_30_15_31:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm2 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,0,1]
@@ -2901,7 +2901,7 @@ define <16 x i16> @shuffle_v16i16_04_20_05_21_06_22_07_31_12_28_13_29_14_30_15_3
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_04_20_05_21_06_22_07_31_12_28_13_29_14_30_15_31:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [4,20,5,21,6,22,7,31,12,28,13,29,14,30,15,31]
; AVX512VL-NEXT: vpermt2w %ymm1, %ymm2, %ymm0
; AVX512VL-NEXT: retq
@@ -2911,7 +2911,7 @@ define <16 x i16> @shuffle_v16i16_04_20_05_21_06_22_07_31_12_28_13_29_14_30_15_3
define <16 x i16> @shuffle_v16i16_04_16_05_17_06_18_07_27_12_24_13_25_14_26_15_27(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_04_16_05_17_06_18_07_27_12_24_13_25_14_26_15_27:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
@@ -2923,7 +2923,7 @@ define <16 x i16> @shuffle_v16i16_04_16_05_17_06_18_07_27_12_24_13_25_14_26_15_2
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_04_16_05_17_06_18_07_27_12_24_13_25_14_26_15_27:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
@@ -2932,7 +2932,7 @@ define <16 x i16> @shuffle_v16i16_04_16_05_17_06_18_07_27_12_24_13_25_14_26_15_2
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_04_16_05_17_06_18_07_27_12_24_13_25_14_26_15_27:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [4,16,5,17,6,18,7,27,12,24,13,25,14,26,15,27]
; AVX512VL-NEXT: vpermt2w %ymm1, %ymm2, %ymm0
; AVX512VL-NEXT: retq
@@ -2942,7 +2942,7 @@ define <16 x i16> @shuffle_v16i16_04_16_05_17_06_18_07_27_12_24_13_25_14_26_15_2
define <16 x i16> @shuffle_v16i16_00_16_01_17_06_22_07_31_08_24_09_25_14_30_15_31(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_16_01_17_06_22_07_31_08_24_09_25_14_30_15_31:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[0,3,2,3]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
@@ -2958,7 +2958,7 @@ define <16 x i16> @shuffle_v16i16_00_16_01_17_06_22_07_31_08_24_09_25_14_30_15_3
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_00_16_01_17_06_22_07_31_08_24_09_25_14_30_15_31:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm1[2,3,0,1]
; AVX2-NEXT: vpshuflw {{.*#+}} ymm1 = ymm1[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
; AVX2-NEXT: vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
@@ -2969,7 +2969,7 @@ define <16 x i16> @shuffle_v16i16_00_16_01_17_06_22_07_31_08_24_09_25_14_30_15_3
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_00_16_01_17_06_22_07_31_08_24_09_25_14_30_15_31:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [0,16,1,17,6,22,7,31,8,24,9,25,14,30,15,31]
; AVX512VL-NEXT: vpermt2w %ymm1, %ymm2, %ymm0
; AVX512VL-NEXT: retq
@@ -2979,7 +2979,7 @@ define <16 x i16> @shuffle_v16i16_00_16_01_17_06_22_07_31_08_24_09_25_14_30_15_3
define <16 x i16> @shuffle_v16i16_00_20_01_21_06_16_07_25_08_28_09_29_14_24_15_25(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_20_01_21_06_16_07_25_08_28_09_29_14_24_15_25:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[2,0,2,3]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
@@ -2993,7 +2993,7 @@ define <16 x i16> @shuffle_v16i16_00_20_01_21_06_16_07_25_08_28_09_29_14_24_15_2
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_00_20_01_21_06_16_07_25_08_28_09_29_14_24_15_25:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm1[2,3,0,1]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = <255,255,0,0,u,u,u,u,255,255,255,255,u,u,u,u,255,255,255,255,u,u,u,u,255,255,255,255,u,u,u,u>
; AVX2-NEXT: vpblendvb %ymm3, %ymm1, %ymm2, %ymm1
@@ -3004,7 +3004,7 @@ define <16 x i16> @shuffle_v16i16_00_20_01_21_06_16_07_25_08_28_09_29_14_24_15_2
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_00_20_01_21_06_16_07_25_08_28_09_29_14_24_15_25:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [0,20,1,21,6,16,7,25,8,28,9,29,14,24,15,25]
; AVX512VL-NEXT: vpermt2w %ymm1, %ymm2, %ymm0
; AVX512VL-NEXT: retq
@@ -3014,7 +3014,7 @@ define <16 x i16> @shuffle_v16i16_00_20_01_21_06_16_07_25_08_28_09_29_14_24_15_2
define <16 x i16> @shuffle_v16i16_01_00_17_16_03_02_19_26_09_08_25_24_11_10_27_26(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_01_00_17_16_03_02_19_26_09_08_25_24_11_10_27_26:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[4,5,0,1,12,13,10,11,8,9,10,11,12,13,10,11]
@@ -3028,7 +3028,7 @@ define <16 x i16> @shuffle_v16i16_01_00_17_16_03_02_19_26_09_08_25_24_11_10_27_2
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_01_00_17_16_03_02_19_26_09_08_25_24_11_10_27_26:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm1[2,3,0,1]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = <255,255,255,255,0,0,255,255,u,u,u,u,u,u,u,u,255,255,255,255,255,255,255,255,u,u,u,u,u,u,u,u>
; AVX2-NEXT: vpblendvb %ymm3, %ymm1, %ymm2, %ymm1
@@ -3038,7 +3038,7 @@ define <16 x i16> @shuffle_v16i16_01_00_17_16_03_02_19_26_09_08_25_24_11_10_27_2
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_01_00_17_16_03_02_19_26_09_08_25_24_11_10_27_26:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [1,0,17,16,3,2,19,26,9,8,25,24,11,10,27,26]
; AVX512VL-NEXT: vpermt2w %ymm1, %ymm2, %ymm0
; AVX512VL-NEXT: retq
@@ -3048,7 +3048,7 @@ define <16 x i16> @shuffle_v16i16_01_00_17_16_03_02_19_26_09_08_25_24_11_10_27_2
define <16 x i16> @shuffle_v16i16_16_00_17_01_18_02_19_11_24_08_25_09_26_10_27_11(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_16_00_17_01_18_02_19_11_24_08_25_09_26_10_27_11:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
@@ -3059,7 +3059,7 @@ define <16 x i16> @shuffle_v16i16_16_00_17_01_18_02_19_11_24_08_25_09_26_10_27_1
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_16_00_17_01_18_02_19_11_24_08_25_09_26_10_27_11:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,3,0,1]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = <255,255,255,255,255,255,0,0,u,u,u,u,u,u,u,u,255,255,255,255,255,255,255,255,u,u,u,u,u,u,u,u>
; AVX2-NEXT: vpblendvb %ymm3, %ymm0, %ymm2, %ymm0
@@ -3067,7 +3067,7 @@ define <16 x i16> @shuffle_v16i16_16_00_17_01_18_02_19_11_24_08_25_09_26_10_27_1
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_16_00_17_01_18_02_19_11_24_08_25_09_26_10_27_11:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [0,16,1,17,2,18,3,27,8,24,9,25,10,26,11,27]
; AVX512VL-NEXT: vpermi2w %ymm0, %ymm1, %ymm2
; AVX512VL-NEXT: vmovdqa %ymm2, %ymm0
@@ -3078,7 +3078,7 @@ define <16 x i16> @shuffle_v16i16_16_00_17_01_18_02_19_11_24_08_25_09_26_10_27_1
define <16 x i16> @shuffle_v16i16_20_04_21_05_22_06_23_15_28_12_29_13_30_14_31_15(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_20_04_21_05_22_06_23_15_28_12_29_13_30_14_31_15:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
@@ -3089,7 +3089,7 @@ define <16 x i16> @shuffle_v16i16_20_04_21_05_22_06_23_15_28_12_29_13_30_14_31_1
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_20_04_21_05_22_06_23_15_28_12_29_13_30_14_31_15:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm1 = ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15]
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm2 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,0,1]
@@ -3098,7 +3098,7 @@ define <16 x i16> @shuffle_v16i16_20_04_21_05_22_06_23_15_28_12_29_13_30_14_31_1
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_20_04_21_05_22_06_23_15_28_12_29_13_30_14_31_15:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [4,20,5,21,6,22,7,31,12,28,13,29,14,30,15,31]
; AVX512VL-NEXT: vpermi2w %ymm0, %ymm1, %ymm2
; AVX512VL-NEXT: vmovdqa %ymm2, %ymm0
@@ -3109,7 +3109,7 @@ define <16 x i16> @shuffle_v16i16_20_04_21_05_22_06_23_15_28_12_29_13_30_14_31_1
define <16 x i16> @shuffle_v16i16_00_02_01_03_20_22_21_31_08_10_09_11_28_30_29_31(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_02_01_03_20_22_21_31_08_10_09_11_28_30_29_31:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,2,1,3,4,5,6,7]
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
@@ -3124,7 +3124,7 @@ define <16 x i16> @shuffle_v16i16_00_02_01_03_20_22_21_31_08_10_09_11_28_30_29_3
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_00_02_01_03_20_22_21_31_08_10_09_11_28_30_29_31:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[0,2,1,3,4,5,6,7,8,10,9,11,12,13,14,15]
@@ -3134,7 +3134,7 @@ define <16 x i16> @shuffle_v16i16_00_02_01_03_20_22_21_31_08_10_09_11_28_30_29_3
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_00_02_01_03_20_22_21_31_08_10_09_11_28_30_29_31:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2,1,3,20,22,21,31,8,10,9,11,28,30,29,31]
; AVX512VL-NEXT: vpermt2w %ymm1, %ymm2, %ymm0
; AVX512VL-NEXT: retq
@@ -3144,7 +3144,7 @@ define <16 x i16> @shuffle_v16i16_00_02_01_03_20_22_21_31_08_10_09_11_28_30_29_3
define <16 x i16> @shuffle_v16i16_04_04_03_18_uu_uu_uu_uu_12_12_11_26_uu_uu_uu_uu(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_04_04_03_18_uu_uu_uu_uu_12_12_11_26_uu_uu_uu_uu:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0,1],xmm1[2],xmm0[3,4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,1,2,3]
; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,0,3,2,4,5,6,7]
@@ -3157,14 +3157,14 @@ define <16 x i16> @shuffle_v16i16_04_04_03_18_uu_uu_uu_uu_12_12_11_26_uu_uu_uu_u
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_04_04_03_18_uu_uu_uu_uu_12_12_11_26_uu_uu_uu_uu:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4,5,6,7,8,9],ymm1[10],ymm0[11,12,13,14,15]
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,1,2,3,6,5,6,7]
; AVX2-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[0,0,3,2,4,5,6,7,8,8,11,10,12,13,14,15]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_04_04_03_18_uu_uu_uu_uu_12_12_11_26_uu_uu_uu_uu:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = <4,4,3,18,u,u,u,u,12,12,11,26,u,u,u,u>
; AVX512VL-NEXT: vpermt2w %ymm1, %ymm2, %ymm0
; AVX512VL-NEXT: retq
@@ -3174,7 +3174,7 @@ define <16 x i16> @shuffle_v16i16_04_04_03_18_uu_uu_uu_uu_12_12_11_26_uu_uu_uu_u
define <16 x i16> @shuffle_v16i16_00_03_02_21_uu_uu_uu_uu_08_11_10_29_uu_uu_uu_uu(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_03_02_21_uu_uu_uu_uu_08_11_10_29_uu_uu_uu_uu:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
@@ -3186,13 +3186,13 @@ define <16 x i16> @shuffle_v16i16_00_03_02_21_uu_uu_uu_uu_08_11_10_29_uu_uu_uu_u
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_00_03_02_21_uu_uu_uu_uu_08_11_10_29_uu_uu_uu_uu:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,6,7,4,5,10,11,0,1,10,11,0,1,2,3,16,17,22,23,20,21,26,27,16,17,26,27,16,17,18,19]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_00_03_02_21_uu_uu_uu_uu_08_11_10_29_uu_uu_uu_uu:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = <0,3,2,21,u,u,u,u,8,11,10,29,u,u,u,u>
; AVX512VL-NEXT: vpermt2w %ymm1, %ymm2, %ymm0
; AVX512VL-NEXT: retq
@@ -3202,7 +3202,7 @@ define <16 x i16> @shuffle_v16i16_00_03_02_21_uu_uu_uu_uu_08_11_10_29_uu_uu_uu_u
define <16 x i16> @shuffle_v16i16_uu_uu_uu_21_uu_uu_uu_uu_uu_uu_uu_29_uu_uu_uu_uu(<16 x i16> %a, <16 x i16> %b) {
; ALL-LABEL: shuffle_v16i16_uu_uu_uu_21_uu_uu_uu_uu_uu_uu_uu_29_uu_uu_uu_uu:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm1[0,2,2,3,4,6,6,7]
; ALL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 undef, i32 undef, i32 undef, i32 21, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 29, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -3211,7 +3211,7 @@ define <16 x i16> @shuffle_v16i16_uu_uu_uu_21_uu_uu_uu_uu_uu_uu_uu_29_uu_uu_uu_u
define <16 x i16> @shuffle_v16i16_00_01_02_21_uu_uu_uu_uu_08_09_10_29_uu_uu_uu_uu(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_01_02_21_uu_uu_uu_uu_08_09_10_29_uu_uu_uu_uu:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
@@ -3222,13 +3222,13 @@ define <16 x i16> @shuffle_v16i16_00_01_02_21_uu_uu_uu_uu_08_09_10_29_uu_uu_uu_u
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_00_01_02_21_uu_uu_uu_uu_08_09_10_29_uu_uu_uu_uu:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6,7,8,9,10],ymm1[11],ymm0[12,13,14,15]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_00_01_02_21_uu_uu_uu_uu_08_09_10_29_uu_uu_uu_uu:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = <0,1,2,21,u,u,u,u,8,9,10,29,u,u,u,u>
; AVX512VL-NEXT: vpermt2w %ymm1, %ymm2, %ymm0
; AVX512VL-NEXT: retq
@@ -3238,7 +3238,7 @@ define <16 x i16> @shuffle_v16i16_00_01_02_21_uu_uu_uu_uu_08_09_10_29_uu_uu_uu_u
define <16 x i16> @shuffle_v16i16_uu_uu_uu_uu_20_21_22_11_uu_uu_uu_uu_28_29_30_11(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_uu_uu_uu_uu_20_21_22_11_uu_uu_uu_uu_28_29_30_11:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
@@ -3248,13 +3248,13 @@ define <16 x i16> @shuffle_v16i16_uu_uu_uu_uu_20_21_22_11_uu_uu_uu_uu_28_29_30_1
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_uu_uu_uu_uu_20_21_22_11_uu_uu_uu_uu_28_29_30_11:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,2]
; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5,6],ymm0[7],ymm1[8,9,10,11,12,13,14],ymm0[15]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_uu_uu_uu_uu_20_21_22_11_uu_uu_uu_uu_28_29_30_11:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = <u,u,u,u,4,5,6,27,u,u,u,u,12,13,14,27>
; AVX512VL-NEXT: vpermi2w %ymm0, %ymm1, %ymm2
; AVX512VL-NEXT: vmovdqa %ymm2, %ymm0
@@ -3265,7 +3265,7 @@ define <16 x i16> @shuffle_v16i16_uu_uu_uu_uu_20_21_22_11_uu_uu_uu_uu_28_29_30_1
define <16 x i16> @shuffle_v16i16_20_21_22_03_uu_uu_uu_uu_28_29_30_11_uu_uu_uu_uu(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_20_21_22_03_uu_uu_uu_uu_28_29_30_11_uu_uu_uu_uu:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
@@ -3276,13 +3276,13 @@ define <16 x i16> @shuffle_v16i16_20_21_22_03_uu_uu_uu_uu_28_29_30_11_uu_uu_uu_u
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_20_21_22_03_uu_uu_uu_uu_28_29_30_11_uu_uu_uu_uu:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,3,2,3,6,7,6,7]
; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7,8,9,10],ymm0[11],ymm1[12,13,14,15]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_20_21_22_03_uu_uu_uu_uu_28_29_30_11_uu_uu_uu_uu:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = <4,5,6,19,u,u,u,u,12,13,14,27,u,u,u,u>
; AVX512VL-NEXT: vpermi2w %ymm0, %ymm1, %ymm2
; AVX512VL-NEXT: vmovdqa %ymm2, %ymm0
@@ -3293,7 +3293,7 @@ define <16 x i16> @shuffle_v16i16_20_21_22_03_uu_uu_uu_uu_28_29_30_11_uu_uu_uu_u
define <16 x i16> @shuffle_v16i16_00_01_02_21_20_21_22_11_08_09_10_29_28_29_30_11(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_01_02_21_20_21_22_11_08_09_10_29_28_29_30_11:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[0,2,2,3]
@@ -3305,7 +3305,7 @@ define <16 x i16> @shuffle_v16i16_00_01_02_21_20_21_22_11_08_09_10_29_28_29_30_1
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_00_01_02_21_20_21_22_11_08_09_10_29_28_29_30_11:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = <255,255,255,255,255,255,0,0,255,255,255,255,255,255,u,u,255,255,255,255,255,255,255,255,255,255,255,255,255,255,u,u>
@@ -3314,7 +3314,7 @@ define <16 x i16> @shuffle_v16i16_00_01_02_21_20_21_22_11_08_09_10_29_28_29_30_1
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_00_01_02_21_20_21_22_11_08_09_10_29_28_29_30_11:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,21,20,21,22,11,8,9,10,29,28,29,30,11]
; AVX512VL-NEXT: vpermt2w %ymm1, %ymm2, %ymm0
; AVX512VL-NEXT: retq
@@ -3324,7 +3324,7 @@ define <16 x i16> @shuffle_v16i16_00_01_02_21_20_21_22_11_08_09_10_29_28_29_30_1
define <16 x i16> @shuffle_v16i16_00_17_02_03_20_21_22_15_08_25_10_11_28_29_30_15(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_17_02_03_20_21_22_15_08_25_10_11_28_29_30_15:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2,3],xmm2[4,5,6],xmm3[7]
@@ -3334,13 +3334,13 @@ define <16 x i16> @shuffle_v16i16_00_17_02_03_20_21_22_15_08_25_10_11_28_29_30_1
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_00_17_02_03_20_21_22_15_08_25_10_11_28_29_30_15:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3]
; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4,5,6],ymm0[7,8],ymm1[9],ymm0[10,11],ymm1[12,13,14],ymm0[15]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_00_17_02_03_20_21_22_15_08_25_10_11_28_29_30_15:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [0,17,2,3,20,21,22,15,8,25,10,11,28,29,30,15]
; AVX512VL-NEXT: vpermt2w %ymm1, %ymm2, %ymm0
; AVX512VL-NEXT: retq
@@ -3350,7 +3350,7 @@ define <16 x i16> @shuffle_v16i16_00_17_02_03_20_21_22_15_08_25_10_11_28_29_30_1
define <16 x i16> @shuffle_v16i16_uu_uu_uu_01_uu_05_07_25_uu_uu_uu_09_uu_13_15_25(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_uu_uu_uu_01_uu_05_07_25_uu_uu_uu_09_uu_13_15_25:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
@@ -3364,7 +3364,7 @@ define <16 x i16> @shuffle_v16i16_uu_uu_uu_01_uu_05_07_25_uu_uu_uu_09_uu_13_15_2
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_uu_uu_uu_01_uu_05_07_25_uu_uu_uu_09_uu_13_15_25:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
; AVX2-NEXT: vpbroadcastd %xmm1, %ymm1
; AVX2-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[0,1,2,1,4,5,6,7,8,9,10,9,12,13,14,15]
@@ -3373,7 +3373,7 @@ define <16 x i16> @shuffle_v16i16_uu_uu_uu_01_uu_05_07_25_uu_uu_uu_09_uu_13_15_2
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_uu_uu_uu_01_uu_05_07_25_uu_uu_uu_09_uu_13_15_25:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = <u,u,u,1,u,5,7,25,u,u,u,9,u,13,15,25>
; AVX512VL-NEXT: vpermt2w %ymm1, %ymm2, %ymm0
; AVX512VL-NEXT: retq
@@ -3383,7 +3383,7 @@ define <16 x i16> @shuffle_v16i16_uu_uu_uu_01_uu_05_07_25_uu_uu_uu_09_uu_13_15_2
define <16 x i16> @shuffle_v16i16_uu_uu_04_uu_16_18_20_uu_uu_uu_12_uu_24_26_28_uu(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_uu_uu_04_uu_16_18_20_uu_uu_uu_12_uu_24_26_28_uu:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,4,5,4,5,6,7,0,1,4,5,8,9,4,5]
; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2
@@ -3397,14 +3397,14 @@ define <16 x i16> @shuffle_v16i16_uu_uu_04_uu_16_18_20_uu_uu_uu_12_uu_24_26_28_u
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_uu_uu_04_uu_16_18_20_uu_uu_uu_12_uu_24_26_28_uu:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[0,1,4,5,4,5,6,7,0,1,4,5,8,9,4,5,16,17,20,21,20,21,22,23,16,17,20,21,24,25,20,21]
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_uu_uu_04_uu_16_18_20_uu_uu_uu_12_uu_24_26_28_uu:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = <u,u,20,u,0,2,4,u,u,u,28,u,8,10,12,u>
; AVX512VL-NEXT: vpermi2w %ymm0, %ymm1, %ymm2
; AVX512VL-NEXT: vmovdqa %ymm2, %ymm0
@@ -3415,7 +3415,7 @@ define <16 x i16> @shuffle_v16i16_uu_uu_04_uu_16_18_20_uu_uu_uu_12_uu_24_26_28_u
define <16 x i16> @shuffle_v16i16_21_22_23_00_01_02_03_12_29_30_31_08_09_10_11_12(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_21_22_23_00_01_02_03_12_29_30_31_08_09_10_11_12:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm2[10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7,8,9]
@@ -3425,7 +3425,7 @@ define <16 x i16> @shuffle_v16i16_21_22_23_00_01_02_03_12_29_30_31_08_09_10_11_1
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_21_22_23_00_01_02_03_12_29_30_31_08_09_10_11_12:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7],ymm0[8,9,10,11,12],ymm1[13,14,15]
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
@@ -3434,7 +3434,7 @@ define <16 x i16> @shuffle_v16i16_21_22_23_00_01_02_03_12_29_30_31_08_09_10_11_1
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_21_22_23_00_01_02_03_12_29_30_31_08_09_10_11_12:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [21,22,23,0,1,2,3,12,29,30,31,8,9,10,11,12]
; AVX512VL-NEXT: vpermt2w %ymm1, %ymm2, %ymm0
; AVX512VL-NEXT: retq
@@ -3444,7 +3444,7 @@ define <16 x i16> @shuffle_v16i16_21_22_23_00_01_02_03_12_29_30_31_08_09_10_11_1
define <16 x i16> @shuffle_v16i16_uu_22_uu_uu_01_02_03_uu_uu_30_uu_uu_09_10_11_uu(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_uu_22_uu_uu_01_02_03_uu_uu_30_uu_uu_09_10_11_uu:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm2[10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7,8,9]
@@ -3453,7 +3453,7 @@ define <16 x i16> @shuffle_v16i16_uu_22_uu_uu_01_02_03_uu_uu_30_uu_uu_09_10_11_u
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_uu_22_uu_uu_01_02_03_uu_uu_30_uu_uu_09_10_11_uu:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7,8,9],ymm1[26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23,24,25]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 undef, i32 22, i32 undef, i32 undef, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 30, i32 undef, i32 undef, i32 9, i32 10, i32 11, i32 undef>
@@ -3462,7 +3462,7 @@ define <16 x i16> @shuffle_v16i16_uu_22_uu_uu_01_02_03_uu_uu_30_uu_uu_09_10_11_u
define <16 x i16> @shuffle_v16i16_05_06_07_00_01_02_03_12_13_14_15_08_09_10_11_12(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_05_06_07_00_01_02_03_12_13_14_15_08_09_10_11_12:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4],xmm0[5,6,7]
; AVX1-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9]
@@ -3471,7 +3471,7 @@ define <16 x i16> @shuffle_v16i16_05_06_07_00_01_02_03_12_13_14_15_08_09_10_11_1
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_05_06_07_00_01_02_03_12_13_14_15_08_09_10_11_12:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
@@ -3479,7 +3479,7 @@ define <16 x i16> @shuffle_v16i16_05_06_07_00_01_02_03_12_13_14_15_08_09_10_11_1
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_05_06_07_00_01_02_03_12_13_14_15_08_09_10_11_12:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [5,6,7,0,1,2,3,12,13,14,15,8,9,10,11,12]
; AVX512VL-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
@@ -3489,7 +3489,7 @@ define <16 x i16> @shuffle_v16i16_05_06_07_00_01_02_03_12_13_14_15_08_09_10_11_1
define <16 x i16> @shuffle_v16i16_uu_06_uu_uu_01_02_03_uu_uu_14_uu_uu_09_10_11_uu(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_uu_06_uu_uu_01_02_03_uu_uu_14_uu_uu_09_10_11_uu:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpalignr {{.*#+}} xmm1 = xmm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9]
@@ -3497,7 +3497,7 @@ define <16 x i16> @shuffle_v16i16_uu_06_uu_uu_01_02_03_uu_uu_14_uu_uu_09_10_11_u
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_uu_06_uu_uu_01_02_03_uu_uu_14_uu_uu_09_10_11_uu:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,26,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 undef, i32 6, i32 undef, i32 undef, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 14, i32 undef, i32 undef, i32 9, i32 10, i32 11, i32 undef>
@@ -3506,7 +3506,7 @@ define <16 x i16> @shuffle_v16i16_uu_06_uu_uu_01_02_03_uu_uu_14_uu_uu_09_10_11_u
define <16 x i16> @shuffle_v16i16_uu_uu_uu_uu_01_02_03_uu_uu_uu_uu_uu_09_10_11_uu(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_uu_uu_uu_uu_01_02_03_uu_uu_uu_uu_uu_09_10_11_uu:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9]
@@ -3514,7 +3514,7 @@ define <16 x i16> @shuffle_v16i16_uu_uu_uu_uu_01_02_03_uu_uu_uu_uu_uu_09_10_11_u
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_uu_uu_uu_uu_01_02_03_uu_uu_uu_uu_uu_09_10_11_uu:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpslldq {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,ymm0[0,1,2,3,4,5,6,7,8,9],zero,zero,zero,zero,zero,zero,ymm0[16,17,18,19,20,21,22,23,24,25]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 9, i32 10, i32 11, i32 undef>
@@ -3523,7 +3523,7 @@ define <16 x i16> @shuffle_v16i16_uu_uu_uu_uu_01_02_03_uu_uu_uu_uu_uu_09_10_11_u
define <16 x i16> @shuffle_v16i16_19_20_21_22_23_00_01_10_27_28_29_30_31_08_09_10(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_19_20_21_22_23_00_01_10_27_28_29_30_31_08_09_10:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm2[6,7,8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5]
@@ -3535,7 +3535,7 @@ define <16 x i16> @shuffle_v16i16_19_20_21_22_23_00_01_10_27_28_29_30_31_08_09_1
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_19_20_21_22_23_00_01_10_27_28_29_30_31_08_09_10:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
@@ -3544,7 +3544,7 @@ define <16 x i16> @shuffle_v16i16_19_20_21_22_23_00_01_10_27_28_29_30_31_08_09_1
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_19_20_21_22_23_00_01_10_27_28_29_30_31_08_09_10:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [3,4,5,6,7,16,17,26,11,12,13,14,15,24,25,26]
; AVX512VL-NEXT: vpermi2w %ymm0, %ymm1, %ymm2
; AVX512VL-NEXT: vmovdqa %ymm2, %ymm0
@@ -3555,7 +3555,7 @@ define <16 x i16> @shuffle_v16i16_19_20_21_22_23_00_01_10_27_28_29_30_31_08_09_1
define <16 x i16> @shuffle_v16i16_uu_20_21_22_uu_uu_01_uu_uu_28_29_30_uu_uu_09_uu(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_uu_20_21_22_uu_uu_01_uu_uu_28_29_30_uu_uu_09_uu:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm2[6,7,8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5]
@@ -3564,7 +3564,7 @@ define <16 x i16> @shuffle_v16i16_uu_20_21_22_uu_uu_01_uu_uu_28_29_30_uu_uu_09_u
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_uu_20_21_22_uu_uu_01_uu_uu_28_29_30_uu_uu_09_uu:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[6,7,8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5],ymm1[22,23,24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 undef, i32 20, i32 21, i32 22, i32 undef, i32 undef, i32 1, i32 undef, i32 undef, i32 28, i32 29, i32 30, i32 undef, i32 undef, i32 9, i32 undef>
@@ -3573,7 +3573,7 @@ define <16 x i16> @shuffle_v16i16_uu_20_21_22_uu_uu_01_uu_uu_28_29_30_uu_uu_09_u
define <16 x i16> @shuffle_v16i16_03_04_05_06_07_00_01_10_11_12_13_14_15_08_09_10(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_03_04_05_06_07_00_01_10_11_12_13_14_15_08_09_10:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3,4,5,6,7]
; AVX1-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5]
@@ -3582,7 +3582,7 @@ define <16 x i16> @shuffle_v16i16_03_04_05_06_07_00_01_10_11_12_13_14_15_08_09_1
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_03_04_05_06_07_00_01_10_11_12_13_14_15_08_09_10:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
@@ -3590,7 +3590,7 @@ define <16 x i16> @shuffle_v16i16_03_04_05_06_07_00_01_10_11_12_13_14_15_08_09_1
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_03_04_05_06_07_00_01_10_11_12_13_14_15_08_09_10:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [3,4,5,6,7,0,1,10,11,12,13,14,15,8,9,10]
; AVX512VL-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
@@ -3600,7 +3600,7 @@ define <16 x i16> @shuffle_v16i16_03_04_05_06_07_00_01_10_11_12_13_14_15_08_09_1
define <16 x i16> @shuffle_v16i16_uu_04_05_06_uu_uu_01_uu_uu_12_13_14_uu_uu_09_uu(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_uu_04_05_06_uu_uu_01_uu_uu_12_13_14_uu_uu_09_uu:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpalignr {{.*#+}} xmm1 = xmm0[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5]
@@ -3608,7 +3608,7 @@ define <16 x i16> @shuffle_v16i16_uu_04_05_06_uu_uu_01_uu_uu_12_13_14_uu_uu_09_u
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_uu_04_05_06_uu_uu_01_uu_uu_12_13_14_uu_uu_09_uu:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,22,23,24,25,26,27,28,29,30,31,16,17,18,19,20,21]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 undef, i32 4, i32 5, i32 6, i32 undef, i32 undef, i32 1, i32 undef, i32 undef, i32 12, i32 13, i32 14, i32 undef, i32 undef, i32 9, i32 undef>
@@ -3617,7 +3617,7 @@ define <16 x i16> @shuffle_v16i16_uu_04_05_06_uu_uu_01_uu_uu_12_13_14_uu_uu_09_u
define <16 x i16> @shuffle_v16i16_uu_04_05_06_uu_uu_uu_uu_uu_12_13_14_uu_uu_uu_uu(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_uu_04_05_06_uu_uu_uu_uu_uu_12_13_14_uu_uu_uu_uu:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsrldq {{.*#+}} xmm1 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
@@ -3625,7 +3625,7 @@ define <16 x i16> @shuffle_v16i16_uu_04_05_06_uu_uu_uu_uu_uu_12_13_14_uu_uu_uu_u
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_uu_04_05_06_uu_uu_uu_uu_uu_12_13_14_uu_uu_uu_uu:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm0[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 undef, i32 4, i32 5, i32 6, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 12, i32 13, i32 14, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -3634,7 +3634,7 @@ define <16 x i16> @shuffle_v16i16_uu_04_05_06_uu_uu_uu_uu_uu_12_13_14_uu_uu_uu_u
define <16 x i16> @shuffle_v16i16_03_04_05_06_07_16_17_26_11_12_13_14_15_24_25_26(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_03_04_05_06_07_16_17_26_11_12_13_14_15_24_25_26:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm2[6,7,8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5]
@@ -3646,7 +3646,7 @@ define <16 x i16> @shuffle_v16i16_03_04_05_06_07_16_17_26_11_12_13_14_15_24_25_2
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_03_04_05_06_07_16_17_26_11_12_13_14_15_24_25_26:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
@@ -3655,7 +3655,7 @@ define <16 x i16> @shuffle_v16i16_03_04_05_06_07_16_17_26_11_12_13_14_15_24_25_2
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_03_04_05_06_07_16_17_26_11_12_13_14_15_24_25_26:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [3,4,5,6,7,16,17,26,11,12,13,14,15,24,25,26]
; AVX512VL-NEXT: vpermt2w %ymm1, %ymm2, %ymm0
; AVX512VL-NEXT: retq
@@ -3665,7 +3665,7 @@ define <16 x i16> @shuffle_v16i16_03_04_05_06_07_16_17_26_11_12_13_14_15_24_25_2
define <16 x i16> @shuffle_v16i16_uu_04_05_06_uu_uu_17_uu_uu_12_13_14_uu_uu_25_uu(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_uu_04_05_06_uu_uu_17_uu_uu_12_13_14_uu_uu_25_uu:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm2[6,7,8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5]
@@ -3674,7 +3674,7 @@ define <16 x i16> @shuffle_v16i16_uu_04_05_06_uu_uu_17_uu_uu_12_13_14_uu_uu_25_u
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_uu_04_05_06_uu_uu_17_uu_uu_12_13_14_uu_uu_25_uu:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[6,7,8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5],ymm0[22,23,24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 undef, i32 4, i32 5, i32 6, i32 undef, i32 undef, i32 17, i32 undef, i32 undef, i32 12, i32 13, i32 14, i32 undef, i32 undef, i32 25, i32 undef>
@@ -3683,7 +3683,7 @@ define <16 x i16> @shuffle_v16i16_uu_04_05_06_uu_uu_17_uu_uu_12_13_14_uu_uu_25_u
define <16 x i16> @shuffle_v16i16_05_06_07_16_17_18_19_28_13_14_15_24_25_26_27_28(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_05_06_07_16_17_18_19_28_13_14_15_24_25_26_27_28:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm2[10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7,8,9]
@@ -3693,7 +3693,7 @@ define <16 x i16> @shuffle_v16i16_05_06_07_16_17_18_19_28_13_14_15_24_25_26_27_2
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_05_06_07_16_17_18_19_28_13_14_15_24_25_26_27_28:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7],ymm1[8,9,10,11,12],ymm0[13,14,15]
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
@@ -3702,7 +3702,7 @@ define <16 x i16> @shuffle_v16i16_05_06_07_16_17_18_19_28_13_14_15_24_25_26_27_2
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_05_06_07_16_17_18_19_28_13_14_15_24_25_26_27_28:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [21,22,23,0,1,2,3,12,29,30,31,8,9,10,11,12]
; AVX512VL-NEXT: vpermi2w %ymm0, %ymm1, %ymm2
; AVX512VL-NEXT: vmovdqa %ymm2, %ymm0
@@ -3713,7 +3713,7 @@ define <16 x i16> @shuffle_v16i16_05_06_07_16_17_18_19_28_13_14_15_24_25_26_27_2
define <16 x i16> @shuffle_v16i16_uu_06_uu_uu_17_18_19_uu_uu_14_uu_uu_25_26_27_uu(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_uu_06_uu_uu_17_18_19_uu_uu_14_uu_uu_25_26_27_uu:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm2[10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7,8,9]
@@ -3722,7 +3722,7 @@ define <16 x i16> @shuffle_v16i16_uu_06_uu_uu_17_18_19_uu_uu_14_uu_uu_25_26_27_u
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_uu_06_uu_uu_17_18_19_uu_uu_14_uu_uu_25_26_27_uu:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7,8,9],ymm0[26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23,24,25]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 undef, i32 6, i32 undef, i32 undef, i32 17, i32 18, i32 19, i32 undef, i32 undef, i32 14, i32 undef, i32 undef, i32 25, i32 26, i32 27, i32 undef>
@@ -3731,7 +3731,7 @@ define <16 x i16> @shuffle_v16i16_uu_06_uu_uu_17_18_19_uu_uu_14_uu_uu_25_26_27_u
define <16 x i16> @shuffle_v16i16_23_uu_03_uu_20_20_05_uu_31_uu_11_uu_28_28_13_uu(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_23_uu_03_uu_20_20_05_uu_31_uu_11_uu_28_28_13_uu:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpslldq {{.*#+}} xmm2 = zero,zero,xmm2[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
@@ -3744,13 +3744,13 @@ define <16 x i16> @shuffle_v16i16_23_uu_03_uu_20_20_05_uu_31_uu_11_uu_28_28_13_u
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_23_uu_03_uu_20_20_05_uu_31_uu_11_uu_28_28_13_uu:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4],ymm0[5,6],ymm1[7],ymm0[8,9,10,11],ymm1[12],ymm0[13,14],ymm1[15]
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[14,15,14,15,6,7,6,7,8,9,8,9,10,11,14,15,30,31,30,31,22,23,22,23,24,25,24,25,26,27,30,31]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_23_uu_03_uu_20_20_05_uu_31_uu_11_uu_28_28_13_uu:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = <7,u,19,u,4,4,21,u,15,u,27,u,12,12,29,u>
; AVX512VL-NEXT: vpermi2w %ymm0, %ymm1, %ymm2
; AVX512VL-NEXT: vmovdqa %ymm2, %ymm0
@@ -3761,13 +3761,13 @@ define <16 x i16> @shuffle_v16i16_23_uu_03_uu_20_20_05_uu_31_uu_11_uu_28_28_13_u
define <16 x i16> @shuffle_v16i16_u_u_u_u_u_u_u_u_0_16_1_17_2_18_3_19(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_u_u_u_u_u_u_u_u_0_16_1_17_2_18_3_19:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_u_u_u_u_u_u_u_u_0_16_1_17_2_18_3_19:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX2OR512VL-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX2OR512VL-NEXT: retq
@@ -3777,14 +3777,14 @@ define <16 x i16> @shuffle_v16i16_u_u_u_u_u_u_u_u_0_16_1_17_2_18_3_19(<16 x i16>
define <16 x i16> @shuffle_v16i16_u_u_u_u_u_u_u_u_3_3_3_3_3_3_3_3(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_u_u_u_u_u_u_u_u_3_3_3_3_3_3_3_3:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,3,3,3,4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_u_u_u_u_u_u_u_u_3_3_3_3_3_3_3_3:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,3,3,3,4,5,6,7]
; AVX2OR512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
; AVX2OR512VL-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
@@ -3795,7 +3795,7 @@ define <16 x i16> @shuffle_v16i16_u_u_u_u_u_u_u_u_3_3_3_3_3_3_3_3(<16 x i16> %a,
define <16 x i16> @shuffle_v16i16_8_8_8_8_8_8_8_8_8_8_8_8_8_8_8_8(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_8_8_8_8_8_8_8_8_8_8_8_8_8_8_8_8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
@@ -3803,7 +3803,7 @@ define <16 x i16> @shuffle_v16i16_8_8_8_8_8_8_8_8_8_8_8_8_8_8_8_8(<16 x i16> %a,
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_8_8_8_8_8_8_8_8_8_8_8_8_8_8_8_8:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX2OR512VL-NEXT: vpbroadcastw %xmm0, %ymm0
; AVX2OR512VL-NEXT: retq
@@ -3813,7 +3813,7 @@ define <16 x i16> @shuffle_v16i16_8_8_8_8_8_8_8_8_8_8_8_8_8_8_8_8(<16 x i16> %a,
define <16 x i16> @shuffle_v16i16_4_20_5_21_6_22_7_23_u_u_u_u_u_u_u_u(<16 x i16> %a, <16 x i16> %b) {
; ALL-LABEL: shuffle_v16i16_4_20_5_21_6_22_7_23_u_u_u_u_u_u_u_u:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; ALL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -3822,7 +3822,7 @@ define <16 x i16> @shuffle_v16i16_4_20_5_21_6_22_7_23_u_u_u_u_u_u_u_u(<16 x i16>
define <16 x i16> @shuffle_v16i16_3_3_3_3_3_3_3_3_u_u_u_u_u_u_u_u(<16 x i16> %a, <16 x i16> %b) {
; ALL-LABEL: shuffle_v16i16_3_3_3_3_3_3_3_3_u_u_u_u_u_u_u_u:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,3,3,3,4,5,6,7]
; ALL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
; ALL-NEXT: retq
@@ -3832,14 +3832,14 @@ define <16 x i16> @shuffle_v16i16_3_3_3_3_3_3_3_3_u_u_u_u_u_u_u_u(<16 x i16> %a,
define <16 x i16> @shuffle_v16i16_9_9_9_9_9_9_9_9_u_u_u_u_u_u_u_u(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_9_9_9_9_9_9_9_9_u_u_u_u_u_u_u_u:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_9_9_9_9_9_9_9_9_u_u_u_u_u_u_u_u:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX2OR512VL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
; AVX2OR512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
@@ -3850,7 +3850,7 @@ define <16 x i16> @shuffle_v16i16_9_9_9_9_9_9_9_9_u_u_u_u_u_u_u_u(<16 x i16> %a,
define <16 x i16> @shuffle_v16i16_02_18_03_19_00_16_01_17_10_26_11_27_08_24_09_25(<16 x i16> %a0, <16 x i16> %a1) {
; AVX1-LABEL: shuffle_v16i16_02_18_03_19_00_16_01_17_10_26_11_27_08_24_09_25:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
@@ -3861,14 +3861,14 @@ define <16 x i16> @shuffle_v16i16_02_18_03_19_00_16_01_17_10_26_11_27_08_24_09_2
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_02_18_03_19_00_16_01_17_10_26_11_27_08_24_09_25:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[0,1,4,5,4,5,6,7,0,1,0,1,12,13,2,3,16,17,20,21,20,21,22,23,16,17,16,17,28,29,18,19]
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[4,5,2,3,6,7,6,7,0,1,2,3,2,3,14,15,20,21,18,19,22,23,22,23,16,17,18,19,18,19,30,31]
; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_02_18_03_19_00_16_01_17_10_26_11_27_08_24_09_25:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [2,18,3,19,0,16,1,17,10,26,11,27,8,24,9,25]
; AVX512VL-NEXT: vpermt2w %ymm1, %ymm2, %ymm0
; AVX512VL-NEXT: retq
@@ -3878,7 +3878,7 @@ define <16 x i16> @shuffle_v16i16_02_18_03_19_00_16_01_17_10_26_11_27_08_24_09_2
define <16 x i16> @shuffle_v16i16_02_18_03_19_10_26_11_27_00_16_01_17_08_24_09_25(<16 x i16> %a0, <16 x i16> %a1) {
; AVX1-LABEL: shuffle_v16i16_02_18_03_19_10_26_11_27_00_16_01_17_08_24_09_25:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[0,1,0,1]
@@ -3897,7 +3897,7 @@ define <16 x i16> @shuffle_v16i16_02_18_03_19_10_26_11_27_00_16_01_17_08_24_09_2
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_02_18_03_19_10_26_11_27_00_16_01_17_08_24_09_25:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[0,1,4,5,4,5,6,7,0,1,0,1,12,13,2,3,16,17,20,21,20,21,22,23,16,17,16,17,28,29,18,19]
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[4,5,2,3,6,7,6,7,0,1,2,3,2,3,14,15,20,21,18,19,22,23,22,23,16,17,18,19,18,19,30,31]
; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
@@ -3905,7 +3905,7 @@ define <16 x i16> @shuffle_v16i16_02_18_03_19_10_26_11_27_00_16_01_17_08_24_09_2
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_02_18_03_19_10_26_11_27_00_16_01_17_08_24_09_25:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [2,18,3,19,0,16,1,17,10,26,11,27,8,24,9,25]
; AVX512VL-NEXT: vpermi2w %ymm1, %ymm0, %ymm2
; AVX512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm2[0,2,1,3]
@@ -3919,7 +3919,7 @@ define <16 x i16> @shuffle_v16i16_02_18_03_19_10_26_11_27_00_16_01_17_08_24_09_2
define <16 x i16> @shuffle_v16i16_04_06_07_uu_uu_06_07_05_12_14_15_uu_uu_14_15_13(<16 x i16> %a) {
; AVX1-LABEL: shuffle_v16i16_04_06_07_uu_uu_06_07_05_12_14_15_uu_uu_14_15_13:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [8,9,12,13,14,15,14,15,8,9,12,13,14,15,10,11]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -3928,7 +3928,7 @@ define <16 x i16> @shuffle_v16i16_04_06_07_uu_uu_06_07_05_12_14_15_uu_uu_14_15_1
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_04_06_07_uu_uu_06_07_05_12_14_15_uu_uu_14_15_13:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[8,9,12,13,14,15,14,15,8,9,12,13,14,15,10,11,24,25,28,29,30,31,30,31,24,25,28,29,30,31,26,27]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> zeroinitializer, <16 x i32> <i32 4, i32 6, i32 7, i32 undef, i32 undef, i32 6, i32 7, i32 5, i32 12, i32 14, i32 15, i32 undef, i32 undef, i32 14, i32 15, i32 13>
@@ -3937,7 +3937,7 @@ define <16 x i16> @shuffle_v16i16_04_06_07_uu_uu_06_07_05_12_14_15_uu_uu_14_15_1
define <16 x i16> @insert_v16i16_0elt_into_zero_vector(i16* %ptr) {
; ALL-LABEL: insert_v16i16_0elt_into_zero_vector:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movzwl (%rdi), %eax
; ALL-NEXT: vmovd %eax, %xmm0
; ALL-NEXT: retq
@@ -3948,12 +3948,12 @@ define <16 x i16> @insert_v16i16_0elt_into_zero_vector(i16* %ptr) {
define <16 x i16> @concat_v16i16_0_1_2_3_4_5_6_7_24_25_26_27_28_29_30_31(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: concat_v16i16_0_1_2_3_4_5_6_7_24_25_26_27_28_29_30_31:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: concat_v16i16_0_1_2_3_4_5_6_7_24_25_26_27_28_29_30_31:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2OR512VL-NEXT: retq
%alo = shufflevector <16 x i16> %a, <16 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -3964,17 +3964,17 @@ define <16 x i16> @concat_v16i16_0_1_2_3_4_5_6_7_24_25_26_27_28_29_30_31(<16 x i
define <16 x i16> @concat_v16i16_8_9_10_11_12_13_14_15_24_25_26_27_28_29_30_31_bc(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: concat_v16i16_8_9_10_11_12_13_14_15_24_25_26_27_28_29_30_31_bc:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: concat_v16i16_8_9_10_11_12_13_14_15_24_25_26_27_28_29_30_31_bc:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: concat_v16i16_8_9_10_11_12_13_14_15_24_25_26_27_28_29_30_31_bc:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
; AVX512VL-NEXT: retq
%ahi = shufflevector <16 x i16> %a, <16 x i16> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -3988,7 +3988,7 @@ define <16 x i16> @concat_v16i16_8_9_10_11_12_13_14_15_24_25_26_27_28_29_30_31_b
define <16 x i16> @PR24935(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: PR24935:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[0,0,1,1]
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpalignr {{.*#+}} xmm4 = xmm3[2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1]
@@ -4007,7 +4007,7 @@ define <16 x i16> @PR24935(<16 x i16> %a, <16 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: PR24935:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufb {{.*#+}} ymm2 = ymm1[8,9,10,11,4,5,8,9,0,1,14,15,12,13,0,1,24,25,26,27,20,21,24,25,16,17,30,31,28,29,16,17]
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,0,1]
; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[6,7,4,5,0,1,10,11,4,5,10,11,4,5,6,7,22,23,20,21,16,17,26,27,20,21,26,27,20,21,22,23]
@@ -4023,7 +4023,7 @@ define <16 x i16> @PR24935(<16 x i16> %a, <16 x i16> %b) {
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: PR24935:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [11,10,17,13,10,7,27,0,17,25,0,12,29,20,16,8]
; AVX512VL-NEXT: vpermi2w %ymm0, %ymm1, %ymm2
; AVX512VL-NEXT: vmovdqa %ymm2, %ymm0
@@ -4034,7 +4034,7 @@ define <16 x i16> @PR24935(<16 x i16> %a, <16 x i16> %b) {
define <16 x i16> @PR34369(<16 x i16> %vec, <16 x i16> %mask) {
; AVX1-LABEL: PR34369:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpshufb {{.*#+}} xmm3 = xmm2[8,9,10,11,4,5,10,11,8,9,10,11,4,5,4,5]
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[6,7,0,1,0,1,6,7,10,11,4,5,4,5,6,7]
@@ -4050,7 +4050,7 @@ define <16 x i16> @PR34369(<16 x i16> %vec, <16 x i16> %mask) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: PR34369:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,3,0,1]
; AVX2-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[8,9,10,11,4,5,10,11,8,9,10,11,4,5,4,5]
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[6,7,0,1,0,1,u,u,10,11,4,5,4,5,u,u,30,31,16,17,28,29,16,17,18,19,20,21,24,25,24,25]
@@ -4062,7 +4062,7 @@ define <16 x i16> @PR34369(<16 x i16> %vec, <16 x i16> %mask) {
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: PR34369:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [3,0,0,13,5,2,2,10,15,8,14,8,9,10,12,12]
; AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX512VL-NEXT: vpcmpeqw %ymm3, %ymm1, %k1
@@ -4076,14 +4076,14 @@ define <16 x i16> @PR34369(<16 x i16> %vec, <16 x i16> %mask) {
define <16 x i16> @insert_dup_mem_v16i16_i32(i32* %ptr) {
; AVX1-LABEL: insert_dup_mem_v16i16_i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: insert_dup_mem_v16i16_i32:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpbroadcastw (%rdi), %ymm0
; AVX2OR512VL-NEXT: retq
%tmp = load i32, i32* %ptr, align 4
@@ -4095,7 +4095,7 @@ define <16 x i16> @insert_dup_mem_v16i16_i32(i32* %ptr) {
define <16 x i16> @insert_dup_mem_v16i16_sext_i16(i16* %ptr) {
; AVX1-LABEL: insert_dup_mem_v16i16_sext_i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: movswl (%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
@@ -4103,14 +4103,14 @@ define <16 x i16> @insert_dup_mem_v16i16_sext_i16(i16* %ptr) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: insert_dup_mem_v16i16_sext_i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: movswl (%rdi), %eax
; AVX2-NEXT: vmovd %eax, %xmm0
; AVX2-NEXT: vpbroadcastw %xmm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: insert_dup_mem_v16i16_sext_i16:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: movswl (%rdi), %eax
; AVX512VL-NEXT: vpbroadcastw %eax, %ymm0
; AVX512VL-NEXT: retq
@@ -4124,14 +4124,14 @@ define <16 x i16> @insert_dup_mem_v16i16_sext_i16(i16* %ptr) {
define <16 x i16> @insert_dup_elt1_mem_v16i16_i32(i32* %ptr) #0 {
; AVX1-LABEL: insert_dup_elt1_mem_v16i16_i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,3,2,3,2,3,2,3,2,3,2,3,2,3,2,3]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: insert_dup_elt1_mem_v16i16_i32:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpbroadcastw 2(%rdi), %ymm0
; AVX2OR512VL-NEXT: retq
%tmp = load i32, i32* %ptr, align 4
@@ -4143,14 +4143,14 @@ define <16 x i16> @insert_dup_elt1_mem_v16i16_i32(i32* %ptr) #0 {
define <16 x i16> @insert_dup_elt3_mem_v16i16_i32(i32* %ptr) #0 {
; AVX1-LABEL: insert_dup_elt3_mem_v16i16_i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,3,2,3,2,3,2,3,2,3,2,3,2,3,2,3]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: insert_dup_elt3_mem_v16i16_i32:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpbroadcastw 2(%rdi), %ymm0
; AVX2OR512VL-NEXT: retq
%tmp = load i32, i32* %ptr, align 4
diff --git a/test/CodeGen/X86/vector-shuffle-256-v32.ll b/test/CodeGen/X86/vector-shuffle-256-v32.ll
index 3c69f6160dd..01c7fc466eb 100644
--- a/test/CodeGen/X86/vector-shuffle-256-v32.ll
+++ b/test/CodeGen/X86/vector-shuffle-256-v32.ll
@@ -5,14 +5,14 @@
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpbroadcastb %xmm0, %ymm0
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
@@ -21,7 +21,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_01_00(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_01_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0]
@@ -29,7 +29,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_01_00:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0]
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
; AVX2OR512VL-NEXT: retq
@@ -39,7 +39,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_02_00_00(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_02_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0]
@@ -47,7 +47,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_02_00_00:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0]
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
; AVX2OR512VL-NEXT: retq
@@ -57,7 +57,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_03_00_00_00(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_03_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0]
@@ -65,7 +65,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_03_00_00_00:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0]
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
; AVX2OR512VL-NEXT: retq
@@ -75,7 +75,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_04_00_00_00_00(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_04_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,4,0,0,0,0]
@@ -83,7 +83,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_04_00_00_00_00:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,4,0,0,0,0]
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
; AVX2OR512VL-NEXT: retq
@@ -93,7 +93,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_05_00_00_00_00_00(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_05_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,5,0,0,0,0,0]
@@ -101,7 +101,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_05_00_00_00_00_00:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,5,0,0,0,0,0]
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
; AVX2OR512VL-NEXT: retq
@@ -111,7 +111,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_06_00_00_00_00_00_00(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_06_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,6,0,0,0,0,0,0]
@@ -119,7 +119,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_06_00_00_00_00_00_00:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,6,0,0,0,0,0,0]
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
; AVX2OR512VL-NEXT: retq
@@ -129,7 +129,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_07_00_00_00_00_00_00_00(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_07_00_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,7,0,0,0,0,0,0,0]
@@ -137,7 +137,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_07_00_00_00_00_00_00_00:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,7,0,0,0,0,0,0,0]
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
; AVX2OR512VL-NEXT: retq
@@ -147,7 +147,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_08_00_00_00_00_00_00_00_00(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_08_00_00_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,8,0,0,0,0,0,0,0,0]
@@ -155,7 +155,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_08_00_00_00_00_00_00_00_00:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,8]
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
; AVX2OR512VL-NEXT: retq
@@ -165,7 +165,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_09_00_00_00_00_00_00_00_00_00(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_09_00_00_00_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,9,0,0,0,0,0,0,0,0,0]
@@ -173,7 +173,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_09_00_00_00_00_00_00_00_00_00:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,0,0,0,9,0]
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
; AVX2OR512VL-NEXT: retq
@@ -183,7 +183,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_10_00_00_00_00_00_00_00_00_00_00(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_10_00_00_00_00_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,10,0,0,0,0,0,0,0,0,0,0]
@@ -191,7 +191,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_10_00_00_00_00_00_00_00_00_00_00:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,0,0,10,0,0]
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
; AVX2OR512VL-NEXT: retq
@@ -201,7 +201,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_11_00_00_00_00_00_00_00_00_00_00_00(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_11_00_00_00_00_00_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,11,0,0,0,0,0,0,0,0,0,0,0]
@@ -209,7 +209,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_11_00_00_00_00_00_00_00_00_00_00_00:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,0,11,0,0,0]
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
; AVX2OR512VL-NEXT: retq
@@ -219,7 +219,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_12_00_00_00_00_00_00_00_00_00_00_00_00(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_12_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,12,0,0,0,0,0,0,0,0,0,0,0,0]
@@ -227,7 +227,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_12_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,12,0,0,0,0]
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
; AVX2OR512VL-NEXT: retq
@@ -237,7 +237,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_13_00_00_00_00_00_00_00_00_00_00_00_00_00(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_13_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,13,0,0,0,0,0,0,0,0,0,0,0,0,0]
@@ -245,7 +245,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_13_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,13,0,0,0,0,0]
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
; AVX2OR512VL-NEXT: retq
@@ -255,7 +255,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_00(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
@@ -263,7 +263,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,14,0,0,0,0,0,0]
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
; AVX2OR512VL-NEXT: retq
@@ -273,7 +273,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: movl $15, %eax
; AVX1-NEXT: vmovd %eax, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm1
@@ -283,7 +283,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,15,0,0,0,0,0,0,0]
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
; AVX2OR512VL-NEXT: retq
@@ -293,7 +293,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_16_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_16_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
@@ -303,7 +303,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_16_
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_16_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
@@ -313,7 +313,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_16_
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_16_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512VL-NEXT: vpshufb %ymm2, %ymm1, %ymm1
@@ -329,7 +329,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_16_
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_17_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_17_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
@@ -339,7 +339,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_17_00_
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_17_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = <0,255,u,u,u,u,u,u,u,u,u,u,u,u,u,u,255,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
@@ -347,7 +347,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_17_00_
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_17_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX512VL-NEXT: movl $1, %eax
; AVX512VL-NEXT: kmovd %eax, %k1
@@ -360,7 +360,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_17_00_
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_18_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_18_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
@@ -370,7 +370,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_18_00_00_
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_18_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = <0,0,255,255,u,u,u,u,u,u,u,u,u,u,u,u,255,255,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
@@ -378,7 +378,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_18_00_00_
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_18_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX512VL-NEXT: movw $1, %ax
; AVX512VL-NEXT: kmovd %eax, %k1
@@ -391,7 +391,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_18_00_00_
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_19_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_19_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
@@ -401,7 +401,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_19_00_00_00_
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_19_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = <0,0,255,255,u,u,u,u,u,u,u,u,u,u,u,u,255,255,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
@@ -409,7 +409,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_19_00_00_00_
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_19_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX512VL-NEXT: movw $1, %ax
; AVX512VL-NEXT: kmovd %eax, %k1
@@ -422,7 +422,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_19_00_00_00_
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_20_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_20_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
@@ -432,7 +432,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_20_00_00_00_00_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_20_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2OR512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,0,0,0,4,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
@@ -443,7 +443,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_20_00_00_00_00_
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_21_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_21_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
@@ -453,7 +453,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_21_00_00_00_00_00_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_21_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2OR512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,0,0,5,0,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
@@ -464,7 +464,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_21_00_00_00_00_00_
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_22_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_22_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
@@ -474,7 +474,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_22_00_00_00_00_00_00_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_22_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2OR512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,0,6,0,0,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
@@ -485,7 +485,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_22_00_00_00_00_00_00_
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_23_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_23_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
@@ -495,7 +495,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_23_00_00_00_00_00_00_00_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_23_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2OR512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,7,0,0,0,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
@@ -506,7 +506,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_23_00_00_00_00_00_00_00_
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_24_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_24_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
@@ -517,13 +517,13 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_24_00_00_00_00_00_00_00_00_
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_24_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,0,1]
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,8,0,0,0,0,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_24_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
; AVX512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,8,0,0,0,0,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
@@ -534,7 +534,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_24_00_00_00_00_00_00_00_00_
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_25_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_25_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
@@ -545,13 +545,13 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_25_00_00_00_00_00_00_00_00_00_
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_00_00_00_00_00_00_25_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,0,1]
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,9,0,0,0,0,0,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_25_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
; AVX512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,9,0,0,0,0,0,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
@@ -562,7 +562,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_25_00_00_00_00_00_00_00_00_00_
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_26_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_26_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
@@ -573,13 +573,13 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_26_00_00_00_00_00_00_00_00_00_00_
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_00_00_00_00_00_26_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,0,1]
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,10,0,0,0,0,0,0,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i8_00_00_00_00_00_26_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
; AVX512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,10,0,0,0,0,0,0,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
@@ -590,7 +590,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_26_00_00_00_00_00_00_00_00_00_00_
define <32 x i8> @shuffle_v32i8_00_00_00_00_27_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_27_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
@@ -601,13 +601,13 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_27_00_00_00_00_00_00_00_00_00_00_00_
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_00_00_00_00_27_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,0,1]
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,11,0,0,0,0,0,0,0,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i8_00_00_00_00_27_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
; AVX512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,11,0,0,0,0,0,0,0,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
@@ -618,7 +618,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_27_00_00_00_00_00_00_00_00_00_00_00_
define <32 x i8> @shuffle_v32i8_00_00_00_28_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_28_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
@@ -629,13 +629,13 @@ define <32 x i8> @shuffle_v32i8_00_00_00_28_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_00_00_00_28_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,0,1]
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,12,0,0,0,0,0,0,0,0,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i8_00_00_00_28_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
; AVX512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,12,0,0,0,0,0,0,0,0,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
@@ -646,7 +646,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_28_00_00_00_00_00_00_00_00_00_00_00_00_
define <32 x i8> @shuffle_v32i8_00_00_29_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_29_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
@@ -657,13 +657,13 @@ define <32 x i8> @shuffle_v32i8_00_00_29_00_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_00_00_29_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,0,1]
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,13,0,0,0,0,0,0,0,0,0,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i8_00_00_29_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
; AVX512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,13,0,0,0,0,0,0,0,0,0,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
@@ -674,7 +674,7 @@ define <32 x i8> @shuffle_v32i8_00_00_29_00_00_00_00_00_00_00_00_00_00_00_00_00_
define <32 x i8> @shuffle_v32i8_00_30_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_30_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
@@ -685,13 +685,13 @@ define <32 x i8> @shuffle_v32i8_00_30_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_00_30_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,0,1]
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i8_00_30_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
; AVX512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
@@ -702,7 +702,7 @@ define <32 x i8> @shuffle_v32i8_00_30_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
define <32 x i8> @shuffle_v32i8_31_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_31_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm1
; AVX1-NEXT: movl $128, %eax
@@ -715,7 +715,7 @@ define <32 x i8> @shuffle_v32i8_31_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_31_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,0,1]
; AVX2-NEXT: movl $15, %eax
; AVX2-NEXT: vmovd %eax, %xmm1
@@ -723,7 +723,7 @@ define <32 x i8> @shuffle_v32i8_31_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i8_31_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
; AVX512VL-NEXT: movl $15, %eax
@@ -736,7 +736,7 @@ define <32 x i8> @shuffle_v32i8_31_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -745,7 +745,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2OR512VL-NEXT: vpshufb %ymm1, %ymm0, %ymm0
; AVX2OR512VL-NEXT: retq
@@ -755,7 +755,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
define <32 x i8> @shuffle_v32i8_15_15_15_15_15_15_15_15_15_15_15_15_15_15_15_15_31_31_31_31_31_31_31_31_31_31_31_31_31_31_31_31(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_15_15_15_15_15_15_15_15_15_15_15_15_15_15_15_15_31_31_31_31_31_31_31_31_31_31_31_31_31_31_31_31:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -764,7 +764,7 @@ define <32 x i8> @shuffle_v32i8_15_15_15_15_15_15_15_15_15_15_15_15_15_15_15_15_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_15_15_15_15_15_15_15_15_15_15_15_15_15_15_15_15_31_31_31_31_31_31_31_31_31_31_31_31_31_31_31_31:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31>
@@ -773,7 +773,7 @@ define <32 x i8> @shuffle_v32i8_15_15_15_15_15_15_15_15_15_15_15_15_15_15_15_15_
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08_16_16_16_16_16_16_16_16_24_24_24_24_24_24_24_24(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08_16_16_16_16_16_16_16_16_24_24_24_24_24_24_24_24:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,0,0,0,0,0,0,0,8,8,8,8,8,8,8,8]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -782,7 +782,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08_16_16_16_16_16_16_16_16_24_24_24_24_24_24_24_24:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,8,8,8,8,8,8,8,8,16,16,16,16,16,16,16,16,24,24,24,24,24,24,24,24]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24>
@@ -791,7 +791,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08_
define <32 x i8> @shuffle_v32i8_07_07_07_07_07_07_07_07_15_15_15_15_15_15_15_15_23_23_23_23_23_23_23_23_31_31_31_31_31_31_31_31(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_07_07_07_07_07_07_07_07_15_15_15_15_15_15_15_15_23_23_23_23_23_23_23_23_31_31_31_31_31_31_31_31:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [7,7,7,7,7,7,7,7,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -800,7 +800,7 @@ define <32 x i8> @shuffle_v32i8_07_07_07_07_07_07_07_07_15_15_15_15_15_15_15_15_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_07_07_07_07_07_07_07_07_15_15_15_15_15_15_15_15_23_23_23_23_23_23_23_23_31_31_31_31_31_31_31_31:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[7,7,7,7,7,7,7,7,15,15,15,15,15,15,15,15,23,23,23,23,23,23,23,23,31,31,31,31,31,31,31,31]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 23, i32 23, i32 23, i32 23, i32 23, i32 23, i32 23, i32 23, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31>
@@ -809,7 +809,7 @@ define <32 x i8> @shuffle_v32i8_07_07_07_07_07_07_07_07_15_15_15_15_15_15_15_15_
define <32 x i8> @shuffle_v32i8_00_00_00_00_04_04_04_04_08_08_08_08_12_12_12_12_16_16_16_16_20_20_20_20_24_24_24_24_28_28_28_28(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_04_04_04_04_08_08_08_08_12_12_12_12_16_16_16_16_20_20_20_20_24_24_24_24_28_28_28_28:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,0,0,0,4,4,4,4,8,8,8,8,12,12,12,12]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -818,7 +818,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_04_04_04_04_08_08_08_08_12_12_12_12_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_00_00_04_04_04_04_08_08_08_08_12_12_12_12_16_16_16_16_20_20_20_20_24_24_24_24_28_28_28_28:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4,8,8,8,8,12,12,12,12,16,16,16,16,20,20,20,20,24,24,24,24,28,28,28,28]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4, i32 8, i32 8, i32 8, i32 8, i32 12, i32 12, i32 12, i32 12, i32 16, i32 16, i32 16, i32 16, i32 20, i32 20, i32 20, i32 20, i32 24, i32 24, i32 24, i32 24, i32 28, i32 28, i32 28, i32 28>
@@ -827,7 +827,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_04_04_04_04_08_08_08_08_12_12_12_12_
define <32 x i8> @shuffle_v32i8_03_03_03_03_07_07_07_07_11_11_11_11_15_15_15_15_19_19_19_19_23_23_23_23_27_27_27_27_31_31_31_31(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_03_03_03_03_07_07_07_07_11_11_11_11_15_15_15_15_19_19_19_19_23_23_23_23_27_27_27_27_31_31_31_31:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [3,3,3,3,7,7,7,7,11,11,11,11,15,15,15,15]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -836,7 +836,7 @@ define <32 x i8> @shuffle_v32i8_03_03_03_03_07_07_07_07_11_11_11_11_15_15_15_15_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_03_03_03_03_07_07_07_07_11_11_11_11_15_15_15_15_19_19_19_19_23_23_23_23_27_27_27_27_31_31_31_31:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[3,3,3,3,7,7,7,7,11,11,11,11,15,15,15,15,19,19,19,19,23,23,23,23,27,27,27,27,31,31,31,31]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 3, i32 3, i32 3, i32 3, i32 7, i32 7, i32 7, i32 7, i32 11, i32 11, i32 11, i32 11, i32 15, i32 15, i32 15, i32 15, i32 19, i32 19, i32 19, i32 19, i32 23, i32 23, i32 23, i32 23, i32 27, i32 27, i32 27, i32 27, i32 31, i32 31, i32 31, i32 31>
@@ -845,7 +845,7 @@ define <32 x i8> @shuffle_v32i8_03_03_03_03_07_07_07_07_11_11_11_11_15_15_15_15_
define <32 x i8> @shuffle_v32i8_00_00_02_02_04_04_06_06_08_08_10_10_12_12_14_14_16_16_18_18_20_20_22_22_24_24_26_26_28_28_30_30(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_02_02_04_04_06_06_08_08_10_10_12_12_14_14_16_16_18_18_20_20_22_22_24_24_26_26_28_28_30_30:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -854,7 +854,7 @@ define <32 x i8> @shuffle_v32i8_00_00_02_02_04_04_06_06_08_08_10_10_12_12_14_14_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_02_02_04_04_06_06_08_08_10_10_12_12_14_14_16_16_18_18_20_20_22_22_24_24_26_26_28_28_30_30:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14,16,16,18,18,20,20,22,22,24,24,26,26,28,28,30,30]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6, i32 8, i32 8, i32 10, i32 10, i32 12, i32 12, i32 14, i32 14, i32 16, i32 16, i32 18, i32 18, i32 20, i32 20, i32 22, i32 22, i32 24, i32 24, i32 26, i32 26, i32 28, i32 28, i32 30, i32 30>
@@ -863,7 +863,7 @@ define <32 x i8> @shuffle_v32i8_00_00_02_02_04_04_06_06_08_08_10_10_12_12_14_14_
define <32 x i8> @shuffle_v32i8_01_01_03_03_05_05_07_07_09_09_11_11_13_13_15_15_17_17_19_19_21_21_23_23_25_25_27_27_29_29_31_31(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_01_01_03_03_05_05_07_07_09_09_11_11_13_13_15_15_17_17_19_19_21_21_23_23_25_25_27_27_29_29_31_31:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -872,7 +872,7 @@ define <32 x i8> @shuffle_v32i8_01_01_03_03_05_05_07_07_09_09_11_11_13_13_15_15_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_01_01_03_03_05_05_07_07_09_09_11_11_13_13_15_15_17_17_19_19_21_21_23_23_25_25_27_27_29_29_31_31:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15,17,17,19,19,21,21,23,23,25,25,27,27,29,29,31,31]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7, i32 9, i32 9, i32 11, i32 11, i32 13, i32 13, i32 15, i32 15, i32 17, i32 17, i32 19, i32 19, i32 21, i32 21, i32 23, i32 23, i32 25, i32 25, i32 27, i32 27, i32 29, i32 29, i32 31, i32 31>
@@ -881,13 +881,13 @@ define <32 x i8> @shuffle_v32i8_01_01_03_03_05_05_07_07_09_09_11_11_13_13_15_15_
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_01_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_01_00(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_01_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_01_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_01_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_01_00:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0]
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
; AVX2OR512VL-NEXT: retq
@@ -897,13 +897,13 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_01_00_
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_02_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_02_00_00(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_02_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_02_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_02_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_02_00_00:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0]
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
; AVX2OR512VL-NEXT: retq
@@ -913,13 +913,13 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_02_00_00_
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_07_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_07_00_00_00_00_00_00_00(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_07_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_07_00_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,7,0,0,0,0,0,0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_07_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_07_00_00_00_00_00_00_00:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,7,0,0,0,0,0,0,0]
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
; AVX2OR512VL-NEXT: retq
@@ -929,13 +929,13 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_07_00_00_00_00_00_00_00_
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_08_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_08_00_00_00_00_00_00_00_00(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_08_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_08_00_00_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,8,0,0,0,0,0,0,0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_08_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_08_00_00_00_00_00_00_00_00:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,8,0,0,0,0,0,0,0,0]
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
; AVX2OR512VL-NEXT: retq
@@ -945,13 +945,13 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_08_00_00_00_00_00_00_00_00_
define <32 x i8> @shuffle_v32i8_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_00(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
; AVX2OR512VL-NEXT: retq
@@ -961,7 +961,7 @@ define <32 x i8> @shuffle_v32i8_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
define <32 x i8> @shuffle_v32i8_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: movl $15, %eax
; AVX1-NEXT: vmovd %eax, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
@@ -969,7 +969,7 @@ define <32 x i8> @shuffle_v32i8_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: movl $15, %eax
; AVX2OR512VL-NEXT: vmovd %eax, %xmm1
; AVX2OR512VL-NEXT: vpshufb %xmm1, %xmm0, %xmm0
@@ -981,7 +981,7 @@ define <32 x i8> @shuffle_v32i8_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
define <32 x i8> @shuffle_v32i8_00_33_02_35_04_37_06_39_08_41_10_43_12_45_14_47_16_49_18_51_20_53_22_55_24_57_26_59_28_61_30_63(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_33_02_35_04_37_06_39_08_41_10_43_12_45_14_47_16_49_18_51_20_53_22_55_24_57_26_59_28_61_30_63:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
; AVX1-NEXT: vandnps %ymm1, %ymm2, %ymm1
; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
@@ -989,13 +989,13 @@ define <32 x i8> @shuffle_v32i8_00_33_02_35_04_37_06_39_08_41_10_43_12_45_14_47_
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_00_33_02_35_04_37_06_39_08_41_10_43_12_45_14_47_16_49_18_51_20_53_22_55_24_57_26_59_28_61_30_63:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i8_00_33_02_35_04_37_06_39_08_41_10_43_12_45_14_47_16_49_18_51_20_53_22_55_24_57_26_59_28_61_30_63:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: movl $-1431655766, %eax # imm = 0xAAAAAAAA
; AVX512VL-NEXT: kmovd %eax, %k1
; AVX512VL-NEXT: vmovdqu8 %ymm1, %ymm0 {%k1}
@@ -1006,7 +1006,7 @@ define <32 x i8> @shuffle_v32i8_00_33_02_35_04_37_06_39_08_41_10_43_12_45_14_47_
define <32 x i8> @shuffle_v32i8_32_01_34_03_36_05_38_07_40_09_42_11_44_13_46_15_48_17_50_19_52_21_54_23_56_25_58_27_60_29_62_31(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_32_01_34_03_36_05_38_07_40_09_42_11_44_13_46_15_48_17_50_19_52_21_54_23_56_25_58_27_60_29_62_31:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
; AVX1-NEXT: vandnps %ymm0, %ymm2, %ymm0
; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1
@@ -1014,13 +1014,13 @@ define <32 x i8> @shuffle_v32i8_32_01_34_03_36_05_38_07_40_09_42_11_44_13_46_15_
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_32_01_34_03_36_05_38_07_40_09_42_11_44_13_46_15_48_17_50_19_52_21_54_23_56_25_58_27_60_29_62_31:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i8_32_01_34_03_36_05_38_07_40_09_42_11_44_13_46_15_48_17_50_19_52_21_54_23_56_25_58_27_60_29_62_31:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: movl $-1431655766, %eax # imm = 0xAAAAAAAA
; AVX512VL-NEXT: kmovd %eax, %k1
; AVX512VL-NEXT: vpblendmb %ymm0, %ymm1, %ymm0 {%k1}
@@ -1031,12 +1031,12 @@ define <32 x i8> @shuffle_v32i8_32_01_34_03_36_05_38_07_40_09_42_11_44_13_46_15_
define <32 x i8> @shuffle_v32i8_zz_01_zz_03_zz_05_zz_07_zz_09_zz_11_zz_13_zz_15_zz_17_zz_19_zz_21_zz_23_zz_25_zz_27_zz_29_zz_31(<32 x i8> %a) {
; AVX1OR2-LABEL: shuffle_v32i8_zz_01_zz_03_zz_05_zz_07_zz_09_zz_11_zz_13_zz_15_zz_17_zz_19_zz_21_zz_23_zz_25_zz_27_zz_29_zz_31:
-; AVX1OR2: # BB#0:
+; AVX1OR2: # %bb.0:
; AVX1OR2-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; AVX1OR2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i8_zz_01_zz_03_zz_05_zz_07_zz_09_zz_11_zz_13_zz_15_zz_17_zz_19_zz_21_zz_23_zz_25_zz_27_zz_29_zz_31:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: movl $-1431655766, %eax # imm = 0xAAAAAAAA
; AVX512VL-NEXT: kmovd %eax, %k1
; AVX512VL-NEXT: vmovdqu8 %ymm0, %ymm0 {%k1} {z}
@@ -1047,13 +1047,13 @@ define <32 x i8> @shuffle_v32i8_zz_01_zz_03_zz_05_zz_07_zz_09_zz_11_zz_13_zz_15_
define <32 x i8> @shuffle_v32i8_01_zz_02_zz_04_uu_06_07_08_09_10_11_12_13_14_15_u6_17_18_19_20_21_22_23_24_25_26_27_28_29_30_31(<32 x i8> %a) {
; AVX1-LABEL: shuffle_v32i8_01_zz_02_zz_04_uu_06_07_08_09_10_11_12_13_14_15_u6_17_18_19_20_21_22_23_24_25_26_27_28_29_30_31:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[1],zero,xmm0[2],zero,xmm0[4,u,6,7,8,9,10,11,12,13,14,15]
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_01_zz_02_zz_04_uu_06_07_08_09_10_11_12_13_14_15_u6_17_18_19_20_21_22_23_24_25_26_27_28_29_30_31:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[1],zero,ymm0[2],zero,ymm0[4,u,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> zeroinitializer, <32 x i32> <i32 1, i32 32, i32 2, i32 32, i32 4, i32 undef, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
@@ -1062,7 +1062,7 @@ define <32 x i8> @shuffle_v32i8_01_zz_02_zz_04_uu_06_07_08_09_10_11_12_13_14_15_
define <32 x i8> @shuffle_v32i8_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
@@ -1070,7 +1070,7 @@ define <32 x i8> @shuffle_v32i8_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX2OR512VL-NEXT: vpbroadcastw %xmm0, %ymm0
; AVX2OR512VL-NEXT: retq
@@ -1080,7 +1080,7 @@ define <32 x i8> @shuffle_v32i8_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_
define <32 x i8> @shuffle_v32i8_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_16_48_16_48_16_48_16_48_16_48_16_48_16_48_16_48(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_16_48_16_48_16_48_16_48_16_48_16_48_16_48_16_48:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
@@ -1093,7 +1093,7 @@ define <32 x i8> @shuffle_v32i8_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_16_48_16_48_16_48_16_48_16_48_16_48_16_48_16_48:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[0,0,0,0,4,5,6,7,8,8,8,8,12,13,14,15]
@@ -1103,7 +1103,7 @@ define <32 x i8> @shuffle_v32i8_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i8_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_16_48_16_48_16_48_16_48_16_48_16_48_16_48_16_48:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[0,0,0,0,4,5,6,7,8,8,8,8,12,13,14,15]
; AVX512VL-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,1,1,4,4,5,5]
; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
@@ -1117,7 +1117,7 @@ define <32 x i8> @shuffle_v32i8_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_
define <32 x i8> @shuffle_v32i8_32_32_32_32_32_32_32_32_08_09_10_11_12_13_14_15_48_48_48_48_48_48_48_48_24_25_26_27_28_29_30_31(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_32_32_32_32_32_32_32_32_08_09_10_11_12_13_14_15_48_48_48_48_48_48_48_48_24_25_26_27_28_29_30_31:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -1130,7 +1130,7 @@ define <32 x i8> @shuffle_v32i8_32_32_32_32_32_32_32_32_08_09_10_11_12_13_14_15_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_32_32_32_32_32_32_32_32_08_09_10_11_12_13_14_15_48_48_48_48_48_48_48_48_24_25_26_27_28_29_30_31:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2OR512VL-NEXT: vpshufb %ymm2, %ymm1, %ymm1
; AVX2OR512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
@@ -1141,7 +1141,7 @@ define <32 x i8> @shuffle_v32i8_32_32_32_32_32_32_32_32_08_09_10_11_12_13_14_15_
define <32 x i8> @shuffle_v32i8_39_38_37_36_35_34_33_32_15_14_13_12_11_10_09_08_55_54_53_52_51_50_49_48_31_30_29_28_27_26_25_24(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_39_38_37_36_35_34_33_32_15_14_13_12_11_10_09_08_55_54_53_52_51_50_49_48_31_30_29_28_27_26_25_24:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = <15,14,13,12,11,10,9,8,u,u,u,u,u,u,u,u>
; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2
@@ -1156,7 +1156,7 @@ define <32 x i8> @shuffle_v32i8_39_38_37_36_35_34_33_32_15_14_13_12_11_10_09_08_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_39_38_37_36_35_34_33_32_15_14_13_12_11_10_09_08_55_54_53_52_51_50_49_48_31_30_29_28_27_26_25_24:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,23,22,21,20,19,18,17,16,31,30,29,28,27,26,25,24]
; AVX2OR512VL-NEXT: retq
@@ -1166,7 +1166,7 @@ define <32 x i8> @shuffle_v32i8_39_38_37_36_35_34_33_32_15_14_13_12_11_10_09_08_
define <32 x i8> @shuffle_v32i8_39_38_37_36_35_34_33_32_07_06_05_04_03_02_01_00_55_54_53_52_51_50_49_48_23_22_21_20_19_18_17_16(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_39_38_37_36_35_34_33_32_07_06_05_04_03_02_01_00_55_54_53_52_51_50_49_48_23_22_21_20_19_18_17_16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
@@ -1178,7 +1178,7 @@ define <32 x i8> @shuffle_v32i8_39_38_37_36_35_34_33_32_07_06_05_04_03_02_01_00_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_39_38_37_36_35_34_33_32_07_06_05_04_03_02_01_00_55_54_53_52_51_50_49_48_23_22_21_20_19_18_17_16:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,7,6,5,4,3,2,1,0,u,u,u,u,u,u,u,u,23,22,21,20,19,18,17,16]
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[7,6,5,4,3,2,1,0,u,u,u,u,u,u,u,u,23,22,21,20,19,18,17,16,u,u,u,u,u,u,u,u]
; AVX2OR512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
@@ -1189,7 +1189,7 @@ define <32 x i8> @shuffle_v32i8_39_38_37_36_35_34_33_32_07_06_05_04_03_02_01_00_
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_01_00_16_16_16_16_16_16_16_16_16_16_16_16_16_16_17_16(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_01_00_16_16_16_16_16_16_16_16_16_16_16_16_16_16_17_16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -1198,7 +1198,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_01_00_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_01_00_16_16_16_16_16_16_16_16_16_16_16_16_16_16_17_16:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,17,16]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 1, i32 0, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 17, i32 16>
@@ -1207,7 +1207,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_01_00_
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_02_00_00_16_16_16_16_16_16_16_16_16_16_16_16_16_18_16_16(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_02_00_00_16_16_16_16_16_16_16_16_16_16_16_16_16_18_16_16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -1216,7 +1216,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_02_00_00_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_02_00_00_16_16_16_16_16_16_16_16_16_16_16_16_16_18_16_16:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,18,16,16]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 2, i32 0, i32 0, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 18, i32 16, i32 16>
@@ -1225,7 +1225,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_02_00_00_
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_07_00_00_00_00_00_00_00_16_16_16_16_16_16_16_16_23_16_16_16_16_16_16_16(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_07_00_00_00_00_00_00_00_16_16_16_16_16_16_16_16_23_16_16_16_16_16_16_16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,0,0,0,0,0,0,0,7,0,0,0,0,0,0,0]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -1234,7 +1234,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_07_00_00_00_00_00_00_00_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_07_00_00_00_00_00_00_00_16_16_16_16_16_16_16_16_23_16_16_16_16_16_16_16:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,7,0,0,0,0,0,0,0,16,16,16,16,16,16,16,16,23,16,16,16,16,16,16,16]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 7, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 23, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
@@ -1243,7 +1243,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_07_00_00_00_00_00_00_00_
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_08_00_00_00_00_00_00_00_00_16_16_16_16_16_16_16_24_16_16_16_16_16_16_16_16(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_08_00_00_00_00_00_00_00_00_16_16_16_16_16_16_16_24_16_16_16_16_16_16_16_16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,0,0,0,0,0,0,8,0,0,0,0,0,0,0,0]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -1252,7 +1252,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_08_00_00_00_00_00_00_00_00_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_08_00_00_00_00_00_00_00_00_16_16_16_16_16_16_16_24_16_16_16_16_16_16_16_16:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,8,0,0,0,0,0,0,0,0,16,16,16,16,16,16,16,24,16,16,16,16,16,16,16,16]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 8, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 24, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
@@ -1261,7 +1261,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_08_00_00_00_00_00_00_00_00_
define <32 x i8> @shuffle_v32i8_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_00_16_30_16_16_16_16_16_16_16_16_16_16_16_16_16_16(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_00_16_30_16_16_16_16_16_16_16_16_16_16_16_16_16_16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -1270,7 +1270,7 @@ define <32 x i8> @shuffle_v32i8_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_00_16_30_16_16_16_16_16_16_16_16_16_16_16_16_16_16:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,16,30,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 14, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 16, i32 30, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
@@ -1279,7 +1279,7 @@ define <32 x i8> @shuffle_v32i8_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
define <32 x i8> @shuffle_v32i8_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_31_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_31_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: movl $15, %eax
; AVX1-NEXT: vmovd %eax, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
@@ -1289,7 +1289,7 @@ define <32 x i8> @shuffle_v32i8_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_31_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[15,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,31,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 31, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
@@ -1298,7 +1298,7 @@ define <32 x i8> @shuffle_v32i8_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
define <32 x i8> @shuffle_v32i8_00_32_01_33_02_34_03_35_04_36_05_37_06_38_07_39_16_48_17_49_18_50_19_51_20_52_21_53_22_54_23_55(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_32_01_33_02_34_03_35_04_36_05_37_06_38_07_39_16_48_17_49_18_50_19_51_20_52_21_53_22_54_23_55:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
@@ -1307,7 +1307,7 @@ define <32 x i8> @shuffle_v32i8_00_32_01_33_02_34_03_35_04_36_05_37_06_38_07_39_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_32_01_33_02_34_03_35_04_36_05_37_06_38_07_39_16_48_17_49_18_50_19_51_20_52_21_53_22_54_23_55:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 32, i32 1, i32 33, i32 2, i32 34, i32 3, i32 35, i32 4, i32 36, i32 5, i32 37, i32 6, i32 38, i32 7, i32 39, i32 16, i32 48, i32 17, i32 49, i32 18, i32 50, i32 19, i32 51, i32 20, i32 52, i32 21, i32 53, i32 22, i32 54, i32 23, i32 55>
@@ -1316,7 +1316,7 @@ define <32 x i8> @shuffle_v32i8_00_32_01_33_02_34_03_35_04_36_05_37_06_38_07_39_
define <32 x i8> @shuffle_v32i8_08_40_09_41_10_42_11_43_12_44_13_45_14_46_15_47_24_56_25_57_26_58_27_59_28_60_29_61_30_62_31_63(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_08_40_09_41_10_42_11_43_12_44_13_45_14_46_15_47_24_56_25_57_26_58_27_59_28_60_29_61_30_62_31_63:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
@@ -1325,7 +1325,7 @@ define <32 x i8> @shuffle_v32i8_08_40_09_41_10_42_11_43_12_44_13_45_14_46_15_47_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_08_40_09_41_10_42_11_43_12_44_13_45_14_46_15_47_24_56_25_57_26_58_27_59_28_60_29_61_30_62_31_63:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 8, i32 40, i32 9, i32 41, i32 10, i32 42, i32 11, i32 43, i32 12, i32 44, i32 13, i32 45, i32 14, i32 46, i32 15, i32 47, i32 24, i32 56, i32 25, i32 57, i32 26, i32 58, i32 27, i32 59, i32 28, i32 60, i32 29, i32 61, i32 30, i32 62, i32 31, i32 63>
@@ -1334,7 +1334,7 @@ define <32 x i8> @shuffle_v32i8_08_40_09_41_10_42_11_43_12_44_13_45_14_46_15_47_
define <32 x i8> @shuffle_v32i8_00_32_01_33_02_34_03_35_04_36_05_37_06_38_07_39_24_56_25_57_26_58_27_59_28_60_29_61_30_62_31_63(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_32_01_33_02_34_03_35_04_36_05_37_06_38_07_39_24_56_25_57_26_58_27_59_28_60_29_61_30_62_31_63:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
@@ -1343,7 +1343,7 @@ define <32 x i8> @shuffle_v32i8_00_32_01_33_02_34_03_35_04_36_05_37_06_38_07_39_
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_00_32_01_33_02_34_03_35_04_36_05_37_06_38_07_39_24_56_25_57_26_58_27_59_28_60_29_61_30_62_31_63:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,u,1,u,2,u,3,u,4,u,5,u,6,u,7,u,24,u,25,u,26,u,27,u,28,u,29,u,30,u,31,u]
; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,0,u,1,u,2,u,3,u,4,u,5,u,6,u,7,u,24,u,25,u,26,u,27,u,28,u,29,u,30,u,31]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
@@ -1351,7 +1351,7 @@ define <32 x i8> @shuffle_v32i8_00_32_01_33_02_34_03_35_04_36_05_37_06_38_07_39_
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i8_00_32_01_33_02_34_03_35_04_36_05_37_06_38_07_39_24_56_25_57_26_58_27_59_28_60_29_61_30_62_31_63:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,u,1,u,2,u,3,u,4,u,5,u,6,u,7,u,24,u,25,u,26,u,27,u,28,u,29,u,30,u,31,u]
; AVX512VL-NEXT: movl $-1431655766, %eax # imm = 0xAAAAAAAA
; AVX512VL-NEXT: kmovd %eax, %k1
@@ -1363,7 +1363,7 @@ define <32 x i8> @shuffle_v32i8_00_32_01_33_02_34_03_35_04_36_05_37_06_38_07_39_
define <32 x i8> @shuffle_v32i8_08_40_09_41_10_42_11_43_12_44_13_45_14_46_15_47_16_48_17_49_18_50_19_51_20_52_21_53_22_54_23_55(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_08_40_09_41_10_42_11_43_12_44_13_45_14_46_15_47_16_48_17_49_18_50_19_51_20_52_21_53_22_54_23_55:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
@@ -1372,7 +1372,7 @@ define <32 x i8> @shuffle_v32i8_08_40_09_41_10_42_11_43_12_44_13_45_14_46_15_47_
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_08_40_09_41_10_42_11_43_12_44_13_45_14_46_15_47_16_48_17_49_18_50_19_51_20_52_21_53_22_54_23_55:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[8,u,9,u,10,u,11,u,12,u,13,u,14,u,15,u,16,u,17,u,18,u,19,u,20,u,21,u,22,u,23,u]
; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,8,u,9,u,10,u,11,u,12,u,13,u,14,u,15,u,16,u,17,u,18,u,19,u,20,u,21,u,22,u,23]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
@@ -1380,7 +1380,7 @@ define <32 x i8> @shuffle_v32i8_08_40_09_41_10_42_11_43_12_44_13_45_14_46_15_47_
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i8_08_40_09_41_10_42_11_43_12_44_13_45_14_46_15_47_16_48_17_49_18_50_19_51_20_52_21_53_22_54_23_55:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[8,u,9,u,10,u,11,u,12,u,13,u,14,u,15,u,16,u,17,u,18,u,19,u,20,u,21,u,22,u,23,u]
; AVX512VL-NEXT: movl $-1431655766, %eax # imm = 0xAAAAAAAA
; AVX512VL-NEXT: kmovd %eax, %k1
@@ -1392,7 +1392,7 @@ define <32 x i8> @shuffle_v32i8_08_40_09_41_10_42_11_43_12_44_13_45_14_46_15_47_
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_01_00_16_17_16_16_16_16_16_16_16_16_16_16_16_16_16_16(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_01_00_16_17_16_16_16_16_16_16_16_16_16_16_16_16_16_16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
@@ -1400,7 +1400,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_01_00_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_01_00_16_17_16_16_16_16_16_16_16_16_16_16_16_16_16_16:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,16,17,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 1, i32 0, i32 16, i32 17, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
@@ -1409,7 +1409,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_01_00_
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_02_00_00_16_16_18_16_16_16_16_16_16_16_16_16_16_16_16_16(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_02_00_00_16_16_18_16_16_16_16_16_16_16_16_16_16_16_16_16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0]
@@ -1417,7 +1417,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_02_00_00_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_02_00_00_16_16_18_16_16_16_16_16_16_16_16_16_16_16_16_16:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,16,16,18,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 2, i32 0, i32 0, i32 16, i32 16, i32 18, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
@@ -1426,7 +1426,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_02_00_00_
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_07_00_00_00_00_00_00_00_16_16_16_16_16_16_16_23_16_16_16_16_16_16_16_16(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_07_00_00_00_00_00_00_00_16_16_16_16_16_16_16_23_16_16_16_16_16_16_16_16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,0,0,0,0,0,0,0,7,0,0,0,0,0,0,0]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,7,0,0,0,0,0,0,0,0]
@@ -1434,7 +1434,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_07_00_00_00_00_00_00_00_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_07_00_00_00_00_00_00_00_16_16_16_16_16_16_16_23_16_16_16_16_16_16_16_16:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,7,0,0,0,0,0,0,0,16,16,16,16,16,16,16,23,16,16,16,16,16,16,16,16]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 7, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 23, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
@@ -1443,7 +1443,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_07_00_00_00_00_00_00_00_
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_08_00_00_00_00_00_00_00_00_16_16_16_16_16_16_16_16_24_16_16_16_16_16_16_16(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_08_00_00_00_00_00_00_00_00_16_16_16_16_16_16_16_16_24_16_16_16_16_16_16_16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,0,0,0,0,0,0,8,0,0,0,0,0,0,0,0]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,8,0,0,0,0,0,0,0]
@@ -1451,7 +1451,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_08_00_00_00_00_00_00_00_00_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_08_00_00_00_00_00_00_00_00_16_16_16_16_16_16_16_16_24_16_16_16_16_16_16_16:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,8,0,0,0,0,0,0,0,0,16,16,16,16,16,16,16,16,24,16,16,16,16,16,16,16]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 8, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 24, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
@@ -1460,7 +1460,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_08_00_00_00_00_00_00_00_00_
define <32 x i8> @shuffle_v32i8_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_00_16_16_16_16_16_16_16_16_16_16_16_16_16_16_30_16(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_00_16_16_16_16_16_16_16_16_16_16_16_16_16_16_30_16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,0,0,0,14,0]
@@ -1468,7 +1468,7 @@ define <32 x i8> @shuffle_v32i8_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_00_16_16_16_16_16_16_16_16_16_16_16_16_16_16_30_16:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,30,16]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 14, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 30, i32 16>
@@ -1477,7 +1477,7 @@ define <32 x i8> @shuffle_v32i8_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
define <32 x i8> @shuffle_v32i8_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_31(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_31:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: movl $15, %eax
; AVX1-NEXT: vmovd %eax, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm1
@@ -1487,7 +1487,7 @@ define <32 x i8> @shuffle_v32i8_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_31:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[15,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,31]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 31>
@@ -1496,7 +1496,7 @@ define <32 x i8> @shuffle_v32i8_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
define <32 x i8> @shuffle_v32i8_00_00_00_00_04_04_04_04_08_08_08_08_12_12_12_12_28_28_28_28_24_24_24_24_20_20_20_20_16_16_16_16(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_04_04_04_04_08_08_08_08_12_12_12_12_28_28_28_28_24_24_24_24_20_20_20_20_16_16_16_16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,0,0,0,4,4,4,4,8,8,8,8,12,12,12,12]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[12,12,12,12,8,8,8,8,4,4,4,4,0,0,0,0]
@@ -1504,7 +1504,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_04_04_04_04_08_08_08_08_12_12_12_12_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_00_00_04_04_04_04_08_08_08_08_12_12_12_12_28_28_28_28_24_24_24_24_20_20_20_20_16_16_16_16:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4,8,8,8,8,12,12,12,12,28,28,28,28,24,24,24,24,20,20,20,20,16,16,16,16]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4, i32 8, i32 8, i32 8, i32 8, i32 12, i32 12, i32 12, i32 12, i32 28, i32 28, i32 28, i32 28, i32 24, i32 24, i32 24, i32 24, i32 20, i32 20, i32 20, i32 20, i32 16, i32 16, i32 16, i32 16>
@@ -1513,7 +1513,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_04_04_04_04_08_08_08_08_12_12_12_12_
define <32 x i8> @shuffle_v32i8_08_08_08_08_08_08_08_08_00_00_00_00_00_00_00_00_16_16_16_16_16_16_16_16_24_24_24_24_24_24_24_24(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_08_08_08_08_08_08_08_08_00_00_00_00_00_00_00_00_16_16_16_16_16_16_16_16_24_24_24_24_24_24_24_24:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[8,8,8,8,8,8,8,8,0,0,0,0,0,0,0,0]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,8,8,8,8,8,8,8,8]
@@ -1521,7 +1521,7 @@ define <32 x i8> @shuffle_v32i8_08_08_08_08_08_08_08_08_00_00_00_00_00_00_00_00_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_08_08_08_08_08_08_08_08_00_00_00_00_00_00_00_00_16_16_16_16_16_16_16_16_24_24_24_24_24_24_24_24:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[8,8,8,8,8,8,8,8,0,0,0,0,0,0,0,0,16,16,16,16,16,16,16,16,24,24,24,24,24,24,24,24]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24>
@@ -1530,14 +1530,14 @@ define <32 x i8> @shuffle_v32i8_08_08_08_08_08_08_08_08_00_00_00_00_00_00_00_00_
define <32 x i8> @shuffle_v32i8_00_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_16_16_16_16_uu_uu_uu_uu_uu_16_16_16_16_16_30_16(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_16_16_16_16_uu_uu_uu_uu_uu_16_16_16_16_16_30_16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,0,0,0,u,u,u,u,u,0,0,0,0,0,14,0]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_16_16_16_16_uu_uu_uu_uu_uu_16_16_16_16_16_30_16:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,16,16,16,u,u,u,u,u,16,16,16,16,16,30,16]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 16, i32 16, i32 16, i32 16, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 16, i32 16, i32 16, i32 16, i32 16, i32 30, i32 16>
@@ -1546,7 +1546,7 @@ define <32 x i8> @shuffle_v32i8_00_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_
define <32 x i8> @shuffle_v32i8_uu_14_uu_uu_00_00_00_00_00_00_00_00_00_00_00_00_16_16_uu_16_uu_uu_uu_uu_16_16_16_16_16_16_30_16(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_uu_14_uu_uu_00_00_00_00_00_00_00_00_00_00_00_00_16_16_uu_16_uu_uu_uu_uu_16_16_16_16_16_16_30_16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[14,14,1,1,0,0,0,0,0,0,0,0,0,0,0,0]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,u,0,u,u,u,u,0,0,0,0,0,0,14,0]
@@ -1554,7 +1554,7 @@ define <32 x i8> @shuffle_v32i8_uu_14_uu_uu_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_uu_14_uu_uu_00_00_00_00_00_00_00_00_00_00_00_00_16_16_uu_16_uu_uu_uu_uu_16_16_16_16_16_16_30_16:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,14,u,u,0,0,0,0,0,0,0,0,0,0,0,0,16,16,u,16,u,u,u,u,16,16,16,16,16,16,30,16]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 undef, i32 14, i32 undef, i32 undef, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 16, i32 16, i32 undef, i32 16, i32 undef, i32 undef, i32 undef, i32 undef, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 30, i32 16>
@@ -1563,7 +1563,7 @@ define <32 x i8> @shuffle_v32i8_uu_14_uu_uu_00_00_00_00_00_00_00_00_00_00_00_00_
define <32 x i8> @shuffle_v32i8_00_00_00_uu_uu_uu_04_uu_08_08_08_08_uu_uu_12_uu_28_28_28_28_uu_uu_uu_24_20_20_20_20_16_16_16_16(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_uu_uu_uu_04_uu_08_08_08_08_uu_uu_12_uu_28_28_28_28_uu_uu_uu_24_20_20_20_20_16_16_16_16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,0,0,0,4,4,4,4,8,8,8,8,12,12,12,12]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[12,12,12,12,8,8,8,8,4,4,4,4,0,0,0,0]
@@ -1571,7 +1571,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_uu_uu_uu_04_uu_08_08_08_08_uu_uu_12_uu_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_00_uu_uu_uu_04_uu_08_08_08_08_uu_uu_12_uu_28_28_28_28_uu_uu_uu_24_20_20_20_20_16_16_16_16:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,u,u,u,4,u,8,8,8,8,u,u,12,u,28,28,28,28,u,u,u,24,20,20,20,20,16,16,16,16]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 0, i32 0, i32 undef, i32 undef, i32 undef, i32 4, i32 undef, i32 8, i32 8, i32 8, i32 8, i32 undef, i32 undef, i32 12, i32 undef, i32 28, i32 28, i32 28, i32 28, i32 undef, i32 undef, i32 undef, i32 24, i32 20, i32 20, i32 20, i32 20, i32 16, i32 16, i32 16, i32 16>
@@ -1580,7 +1580,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_uu_uu_uu_04_uu_08_08_08_08_uu_uu_12_uu_
define <32 x i8> @shuffle_v32i8_08_08_08_08_08_08_08_08_uu_uu_uu_uu_uu_uu_uu_uu_16_16_16_uu_uu_uu_uu_uu_uu_uu_24_24_24_24_24_24(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_08_08_08_08_08_08_08_08_uu_uu_uu_uu_uu_uu_uu_uu_16_16_16_uu_uu_uu_uu_uu_uu_uu_24_24_24_24_24_24:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
@@ -1589,7 +1589,7 @@ define <32 x i8> @shuffle_v32i8_08_08_08_08_08_08_08_08_uu_uu_uu_uu_uu_uu_uu_uu_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_08_08_08_08_08_08_08_08_uu_uu_uu_uu_uu_uu_uu_uu_16_16_16_uu_uu_uu_uu_uu_uu_uu_24_24_24_24_24_24:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[8,8,8,8,8,8,8,8,u,u,u,u,u,u,u,u,16,16,16,u,u,u,u,u,u,u,24,24,24,24,24,24]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 16, i32 16, i32 16, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24>
@@ -1598,7 +1598,7 @@ define <32 x i8> @shuffle_v32i8_08_08_08_08_08_08_08_08_uu_uu_uu_uu_uu_uu_uu_uu_
define <32 x i8> @shuffle_v32i8_42_45_12_13_35_35_60_40_17_22_29_44_33_12_48_51_20_19_52_19_49_54_37_32_48_42_59_07_36_34_36_39(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_42_45_12_13_35_35_60_40_17_22_29_44_33_12_48_51_20_19_52_19_49_54_37_32_48_42_59_07_36_34_36_39:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vpshufb {{.*#+}} xmm3 = xmm2[u,u,4,u,1,6],zero,zero,xmm2[0],zero,xmm2[11,u],zero,zero,zero,zero
; AVX1-NEXT: vpshufb {{.*#+}} xmm4 = xmm1[u,u],zero,xmm1[u],zero,zero,xmm1[5,0],zero,xmm1[10],zero,xmm1[u,4,2,4,7]
@@ -1620,7 +1620,7 @@ define <32 x i8> @shuffle_v32i8_42_45_12_13_35_35_60_40_17_22_29_44_33_12_48_51_
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_42_45_12_13_35_35_60_40_17_22_29_44_33_12_48_51_20_19_52_19_49_54_37_32_48_42_59_07_36_34_36_39:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufb {{.*#+}} ymm2 = ymm1[10,13,u,u,3,3,u,8,u,u,u,12,1,u,u,u,u,u,20,u,17,22,u,u,16,u,27,u,u,u,u,u]
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,0,1]
; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,12,u,u,u,u,u,u,u,0,3,u,u,u,u,u,u,21,16,u,26,u,u,20,18,20,23]
@@ -1635,7 +1635,7 @@ define <32 x i8> @shuffle_v32i8_42_45_12_13_35_35_60_40_17_22_29_44_33_12_48_51_
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i8_42_45_12_13_35_35_60_40_17_22_29_44_33_12_48_51_20_19_52_19_49_54_37_32_48_42_59_07_36_34_36_39:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermq {{.*#+}} ymm2 = ymm1[2,3,0,1]
; AVX512VL-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[10,13,u,u,3,3,u,8,u,u,u,12,1,u,u,u,u,u,20,u,17,22,u,u,16,u,27,u,u,u,u,u]
; AVX512VL-NEXT: movl $-222248896, %eax # imm = 0xF2C0C040
@@ -1656,7 +1656,7 @@ define <32 x i8> @shuffle_v32i8_42_45_12_13_35_35_60_40_17_22_29_44_33_12_48_51_
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08_32_32_32_32_32_32_32_32_40_40_40_40_40_40_40_40(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08_32_32_32_32_32_32_32_32_40_40_40_40_40_40_40_40:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,0,0,0,0,0,0,0,8,8,8,8,8,8,8,8]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
@@ -1664,7 +1664,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08_32_32_32_32_32_32_32_32_40_40_40_40_40_40_40_40:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,8,8,8,8,8,8,8,8,16,16,16,16,16,16,16,16,24,24,24,24,24,24,24,24]
; AVX2OR512VL-NEXT: retq
@@ -1674,7 +1674,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08_
define <32 x i8> @shuffle_v32i8_16_16_16_16_16_16_16_16_24_24_24_24_24_24_24_24_32_32_32_32_32_32_32_32_40_40_40_40_40_40_40_40(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_16_16_16_16_16_16_16_16_24_24_24_24_24_24_24_24_32_32_32_32_32_32_32_32_40_40_40_40_40_40_40_40:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,0,0,0,0,0,0,0,8,8,8,8,8,8,8,8]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
@@ -1683,7 +1683,7 @@ define <32 x i8> @shuffle_v32i8_16_16_16_16_16_16_16_16_24_24_24_24_24_24_24_24_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_16_16_16_16_16_16_16_16_24_24_24_24_24_24_24_24_32_32_32_32_32_32_32_32_40_40_40_40_40_40_40_40:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,8,8,8,8,8,8,8,8,16,16,16,16,16,16,16,16,24,24,24,24,24,24,24,24]
; AVX2OR512VL-NEXT: retq
@@ -1693,7 +1693,7 @@ define <32 x i8> @shuffle_v32i8_16_16_16_16_16_16_16_16_24_24_24_24_24_24_24_24_
define <32 x i8> @shuffle_v32i8_16_16_16_16_16_16_16_16_24_24_24_24_24_24_24_24_48_48_48_48_48_48_48_48_56_56_56_56_56_56_56_56(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_16_16_16_16_16_16_16_16_24_24_24_24_24_24_24_24_48_48_48_48_48_48_48_48_56_56_56_56_56_56_56_56:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,0,0,0,0,0,0,0,8,8,8,8,8,8,8,8]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -1703,7 +1703,7 @@ define <32 x i8> @shuffle_v32i8_16_16_16_16_16_16_16_16_24_24_24_24_24_24_24_24_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_16_16_16_16_16_16_16_16_24_24_24_24_24_24_24_24_48_48_48_48_48_48_48_48_56_56_56_56_56_56_56_56:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,8,8,8,8,8,8,8,8,16,16,16,16,16,16,16,16,24,24,24,24,24,24,24,24]
; AVX2OR512VL-NEXT: retq
@@ -1713,7 +1713,7 @@ define <32 x i8> @shuffle_v32i8_16_16_16_16_16_16_16_16_24_24_24_24_24_24_24_24_
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08_48_48_48_48_48_48_48_48_56_56_56_56_56_56_56_56(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08_48_48_48_48_48_48_48_48_56_56_56_56_56_56_56_56:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,0,0,0,0,0,0,0,8,8,8,8,8,8,8,8]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -1722,7 +1722,7 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08_48_48_48_48_48_48_48_48_56_56_56_56_56_56_56_56:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,8,8,8,8,8,8,8,8,16,16,16,16,16,16,16,16,24,24,24,24,24,24,24,24]
; AVX2OR512VL-NEXT: retq
@@ -1732,14 +1732,14 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08_
define <32 x i8> @shuffle_v32i8_00_32_01_33_02_34_03_35_04_36_05_37_06_38_07_39_08_40_09_41_10_42_11_43_12_44_13_45_14_46_15_47(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_32_01_33_02_34_03_35_04_36_05_37_06_38_07_39_08_40_09_41_10_42_11_43_12_44_13_45_14_46_15_47:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_00_32_01_33_02_34_03_35_04_36_05_37_06_38_07_39_08_40_09_41_10_42_11_43_12_44_13_45_14_46_15_47:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
; AVX2OR512VL-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX2OR512VL-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
@@ -1750,7 +1750,7 @@ define <32 x i8> @shuffle_v32i8_00_32_01_33_02_34_03_35_04_36_05_37_06_38_07_39_
define <32 x i8> @shuffle_v32i8_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_32_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_48(<32 x i8> %a) {
; AVX1-LABEL: shuffle_v32i8_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_32_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_48:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0]
@@ -1758,7 +1758,7 @@ define <32 x i8> @shuffle_v32i8_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_32_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_32_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_48:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpslldq {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[0],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> zeroinitializer, <32 x i8> %a, <32 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 32, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 48>
@@ -1767,7 +1767,7 @@ define <32 x i8> @shuffle_v32i8_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_32_
define <32 x i8> @shuffle_v32i8_47_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_63_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz(<32 x i8> %a) {
; AVX1-LABEL: shuffle_v32i8_47_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_63_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsrldq {{.*#+}} xmm1 = xmm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
@@ -1775,7 +1775,7 @@ define <32 x i8> @shuffle_v32i8_47_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_47_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_63_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> zeroinitializer, <32 x i8> %a, <32 x i32> <i32 47, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 63, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
@@ -1788,7 +1788,7 @@ define <32 x i8> @shuffle_v32i8_47_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_
define <32 x i8> @shuffle_v32i8_zz_00_zz_02_zz_04_zz_06_zz_08_zz_10_zz_12_zz_14_zz_16_zz_18_zz_20_zz_22_zz_24_zz_26_zz_28_zz_30(<32 x i8> %a) {
; AVX1-LABEL: shuffle_v32i8_zz_00_zz_02_zz_04_zz_06_zz_08_zz_10_zz_12_zz_14_zz_16_zz_18_zz_20_zz_22_zz_24_zz_26_zz_28_zz_30:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsllw $8, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsllw $8, %xmm0, %xmm0
@@ -1796,7 +1796,7 @@ define <32 x i8> @shuffle_v32i8_zz_00_zz_02_zz_04_zz_06_zz_08_zz_10_zz_12_zz_14_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_zz_00_zz_02_zz_04_zz_06_zz_08_zz_10_zz_12_zz_14_zz_16_zz_18_zz_20_zz_22_zz_24_zz_26_zz_28_zz_30:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpsllw $8, %ymm0, %ymm0
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> zeroinitializer, <32 x i32> <i32 32, i32 0, i32 32, i32 2, i32 32, i32 4, i32 32, i32 6, i32 32, i32 8, i32 32, i32 10, i32 32, i32 12, i32 32, i32 14, i32 32, i32 16, i32 32, i32 18, i32 32, i32 20, i32 32, i32 22, i32 32, i32 24, i32 32, i32 26, i32 32, i32 28, i32 32, i32 30>
@@ -1805,7 +1805,7 @@ define <32 x i8> @shuffle_v32i8_zz_00_zz_02_zz_04_zz_06_zz_08_zz_10_zz_12_zz_14_
define <32 x i8> @shuffle_v32i8_zz_zz_00_01_zz_zz_04_05_zz_zz_08_09_zz_zz_12_13_zz_zz_16_17_zz_zz_20_21_zz_zz_24_25_zz_zz_28_29(<32 x i8> %a) {
; AVX1-LABEL: shuffle_v32i8_zz_zz_00_01_zz_zz_04_05_zz_zz_08_09_zz_zz_12_13_zz_zz_16_17_zz_zz_20_21_zz_zz_24_25_zz_zz_28_29:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpslld $16, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpslld $16, %xmm0, %xmm0
@@ -1813,7 +1813,7 @@ define <32 x i8> @shuffle_v32i8_zz_zz_00_01_zz_zz_04_05_zz_zz_08_09_zz_zz_12_13_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_zz_zz_00_01_zz_zz_04_05_zz_zz_08_09_zz_zz_12_13_zz_zz_16_17_zz_zz_20_21_zz_zz_24_25_zz_zz_28_29:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpslld $16, %ymm0, %ymm0
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> zeroinitializer, <32 x i32> <i32 32, i32 32, i32 0, i32 1, i32 32, i32 32, i32 4, i32 5, i32 32, i32 32, i32 8, i32 9, i32 32, i32 32, i32 12, i32 13, i32 32, i32 32, i32 16, i32 17, i32 32, i32 32, i32 20, i32 21, i32 32, i32 32, i32 24, i32 25, i32 32, i32 32, i32 28, i32 29>
@@ -1822,7 +1822,7 @@ define <32 x i8> @shuffle_v32i8_zz_zz_00_01_zz_zz_04_05_zz_zz_08_09_zz_zz_12_13_
define <32 x i8> @shuffle_v32i8_zz_zz_zz_zz_zz_zz_00_01_zz_zz_zz_zz_zz_zz_08_09_zz_zz_zz_zz_zz_zz_16_17_zz_zz_zz_zz_zz_zz_24_25(<32 x i8> %a) {
; AVX1-LABEL: shuffle_v32i8_zz_zz_zz_zz_zz_zz_00_01_zz_zz_zz_zz_zz_zz_08_09_zz_zz_zz_zz_zz_zz_16_17_zz_zz_zz_zz_zz_zz_24_25:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsllq $48, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsllq $48, %xmm0, %xmm0
@@ -1830,7 +1830,7 @@ define <32 x i8> @shuffle_v32i8_zz_zz_zz_zz_zz_zz_00_01_zz_zz_zz_zz_zz_zz_08_09_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_zz_zz_zz_zz_zz_zz_00_01_zz_zz_zz_zz_zz_zz_08_09_zz_zz_zz_zz_zz_zz_16_17_zz_zz_zz_zz_zz_zz_24_25:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpsllq $48, %ymm0, %ymm0
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> zeroinitializer, <32 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 0, i32 1, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 8, i32 9, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 16, i32 17, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 24, i32 25>
@@ -1839,7 +1839,7 @@ define <32 x i8> @shuffle_v32i8_zz_zz_zz_zz_zz_zz_00_01_zz_zz_zz_zz_zz_zz_08_09_
define <32 x i8> @shuffle_v32i8_01_zz_03_zz_05_zz_07_zz_09_zz_11_zz_13_zz_15_zz_17_zz_19_zz_21_zz_23_zz_25_zz_27_zz_29_zz_31_zz(<32 x i8> %a) {
; AVX1-LABEL: shuffle_v32i8_01_zz_03_zz_05_zz_07_zz_09_zz_11_zz_13_zz_15_zz_17_zz_19_zz_21_zz_23_zz_25_zz_27_zz_29_zz_31_zz:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
@@ -1847,7 +1847,7 @@ define <32 x i8> @shuffle_v32i8_01_zz_03_zz_05_zz_07_zz_09_zz_11_zz_13_zz_15_zz_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_01_zz_03_zz_05_zz_07_zz_09_zz_11_zz_13_zz_15_zz_17_zz_19_zz_21_zz_23_zz_25_zz_27_zz_29_zz_31_zz:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpsrlw $8, %ymm0, %ymm0
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> zeroinitializer, <32 x i32> <i32 1, i32 32, i32 3, i32 32, i32 5, i32 32, i32 7, i32 32, i32 9, i32 32, i32 11, i32 32, i32 13, i32 32, i32 15, i32 32, i32 17, i32 32, i32 19, i32 32, i32 21, i32 32, i32 23, i32 32, i32 25, i32 32, i32 27, i32 32, i32 29, i32 32, i32 31, i32 32>
@@ -1856,7 +1856,7 @@ define <32 x i8> @shuffle_v32i8_01_zz_03_zz_05_zz_07_zz_09_zz_11_zz_13_zz_15_zz_
define <32 x i8> @shuffle_v32i8_02_03_zz_zz_06_07_zz_zz_10_11_zz_zz_14_15_zz_zz_18_19_zz_zz_22_23_zz_zz_26_27_zz_zz_30_31_zz_zz(<32 x i8> %a) {
; AVX1-LABEL: shuffle_v32i8_02_03_zz_zz_06_07_zz_zz_10_11_zz_zz_14_15_zz_zz_18_19_zz_zz_22_23_zz_zz_26_27_zz_zz_30_31_zz_zz:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
@@ -1864,7 +1864,7 @@ define <32 x i8> @shuffle_v32i8_02_03_zz_zz_06_07_zz_zz_10_11_zz_zz_14_15_zz_zz_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_02_03_zz_zz_06_07_zz_zz_10_11_zz_zz_14_15_zz_zz_18_19_zz_zz_22_23_zz_zz_26_27_zz_zz_30_31_zz_zz:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpsrld $16, %ymm0, %ymm0
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> zeroinitializer, <32 x i32> <i32 2, i32 3, i32 32, i32 32, i32 6, i32 7, i32 32, i32 32, i32 10, i32 11, i32 32, i32 32, i32 14, i32 15, i32 32, i32 32, i32 18, i32 19, i32 32, i32 32, i32 22, i32 23, i32 32, i32 32, i32 26, i32 27, i32 32, i32 32, i32 30, i32 31, i32 32, i32 32>
@@ -1873,7 +1873,7 @@ define <32 x i8> @shuffle_v32i8_02_03_zz_zz_06_07_zz_zz_10_11_zz_zz_14_15_zz_zz_
define <32 x i8> @shuffle_v32i8_07_zz_zz_zz_zz_zz_zz_zz_15_zz_zz_zz_zz_z_zz_zz_23_zz_zz_zz_zz_zz_zz_zz_31_zz_zz_zz_zz_zz_zz_zz(<32 x i8> %a) {
; AVX1-LABEL: shuffle_v32i8_07_zz_zz_zz_zz_zz_zz_zz_15_zz_zz_zz_zz_z_zz_zz_23_zz_zz_zz_zz_zz_zz_zz_31_zz_zz_zz_zz_zz_zz_zz:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsrlq $56, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsrlq $56, %xmm0, %xmm0
@@ -1881,7 +1881,7 @@ define <32 x i8> @shuffle_v32i8_07_zz_zz_zz_zz_zz_zz_zz_15_zz_zz_zz_zz_z_zz_zz_2
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_07_zz_zz_zz_zz_zz_zz_zz_15_zz_zz_zz_zz_z_zz_zz_23_zz_zz_zz_zz_zz_zz_zz_31_zz_zz_zz_zz_zz_zz_zz:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpsrlq $56, %ymm0, %ymm0
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> zeroinitializer, <32 x i32> <i32 7, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 15, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 23, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 31, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>
@@ -1890,7 +1890,7 @@ define <32 x i8> @shuffle_v32i8_07_zz_zz_zz_zz_zz_zz_zz_15_zz_zz_zz_zz_z_zz_zz_2
define <32 x i8> @shuffle_v32i8_32_zz_zz_zz_zz_zz_zz_zz_33_zz_zz_zz_zz_zz_zz_zz_34_zz_zz_zz_zz_zz_zz_zz_35_zz_zz_zz_zz_zz_zz_zz(<32 x i8> %a) {
; AVX1-LABEL: shuffle_v32i8_32_zz_zz_zz_zz_zz_zz_zz_33_zz_zz_zz_zz_zz_zz_zz_34_zz_zz_zz_zz_zz_zz_zz_35_zz_zz_zz_zz_zz_zz_zz:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
@@ -1898,7 +1898,7 @@ define <32 x i8> @shuffle_v32i8_32_zz_zz_zz_zz_zz_zz_zz_33_zz_zz_zz_zz_zz_zz_zz_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_32_zz_zz_zz_zz_zz_zz_zz_33_zz_zz_zz_zz_zz_zz_zz_34_zz_zz_zz_zz_zz_zz_zz_35_zz_zz_zz_zz_zz_zz_zz:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> zeroinitializer, <32 x i8> %a, <32 x i32> <i32 32, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 33, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 34, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 35, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
@@ -1907,7 +1907,7 @@ define <32 x i8> @shuffle_v32i8_32_zz_zz_zz_zz_zz_zz_zz_33_zz_zz_zz_zz_zz_zz_zz_
define <32 x i8> @shuffle_v32i8_32_zz_zz_zz_33_zz_zz_zz_34_zz_zz_zz_35_zz_zz_zz_36_zz_zz_zz_37_zz_zz_zz_38_zz_zz_zz_39_zz_zz_zz(<32 x i8> %a) {
; AVX1-LABEL: shuffle_v32i8_32_zz_zz_zz_33_zz_zz_zz_34_zz_zz_zz_35_zz_zz_zz_36_zz_zz_zz_37_zz_zz_zz_38_zz_zz_zz_39_zz_zz_zz:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
@@ -1915,7 +1915,7 @@ define <32 x i8> @shuffle_v32i8_32_zz_zz_zz_33_zz_zz_zz_34_zz_zz_zz_35_zz_zz_zz_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_32_zz_zz_zz_33_zz_zz_zz_34_zz_zz_zz_35_zz_zz_zz_36_zz_zz_zz_37_zz_zz_zz_38_zz_zz_zz_39_zz_zz_zz:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> zeroinitializer, <32 x i8> %a, <32 x i32> <i32 32, i32 0, i32 0, i32 0, i32 33, i32 0, i32 0, i32 0, i32 34, i32 0, i32 0, i32 0, i32 35, i32 0, i32 0, i32 0, i32 36, i32 0, i32 0, i32 0, i32 37, i32 0, i32 0, i32 0, i32 38, i32 0, i32 0, i32 0, i32 39, i32 0, i32 0, i32 0>
@@ -1924,7 +1924,7 @@ define <32 x i8> @shuffle_v32i8_32_zz_zz_zz_33_zz_zz_zz_34_zz_zz_zz_35_zz_zz_zz_
define <32 x i8> @shuffle_v32i8_32_zz_33_zz_34_zz_35_zz_36_zz_37_zz_38_zz_39_zz_40_zz_41_zz_42_zz_43_zz_44_zz_45_zz_46_zz_47_zz(<32 x i8> %a) {
; AVX1-LABEL: shuffle_v32i8_32_zz_33_zz_34_zz_35_zz_36_zz_37_zz_38_zz_39_zz_40_zz_41_zz_42_zz_43_zz_44_zz_45_zz_46_zz_47_zz:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
@@ -1932,7 +1932,7 @@ define <32 x i8> @shuffle_v32i8_32_zz_33_zz_34_zz_35_zz_36_zz_37_zz_38_zz_39_zz_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_32_zz_33_zz_34_zz_35_zz_36_zz_37_zz_38_zz_39_zz_40_zz_41_zz_42_zz_43_zz_44_zz_45_zz_46_zz_47_zz:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> zeroinitializer, <32 x i8> %a, <32 x i32> <i32 32, i32 0, i32 33, i32 0, i32 34, i32 0, i32 35, i32 0, i32 36, i32 0, i32 37, i32 0, i32 38, i32 0, i32 39, i32 0, i32 40, i32 0, i32 41, i32 0, i32 42, i32 0, i32 43, i32 0, i32 44, i32 0, i32 45, i32 0, i32 46, i32 0, i32 47, i32 0>
@@ -1941,7 +1941,7 @@ define <32 x i8> @shuffle_v32i8_32_zz_33_zz_34_zz_35_zz_36_zz_37_zz_38_zz_39_zz_
define <32 x i8> @shuffle_v32i8_56_zz_zz_zz_57_zz_zz_zz_58_zz_zz_zz__zz_59_zz_zz_zz_60_zz_zz_zz_61_zz_zz_zz_62_zz_zz_zz_63_zz_zz_zz(<32 x i8> %a) {
; AVX1-LABEL: shuffle_v32i8_56_zz_zz_zz_57_zz_zz_zz_58_zz_zz_zz__zz_59_zz_zz_zz_60_zz_zz_zz_61_zz_zz_zz_62_zz_zz_zz_63_zz_zz_zz:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
@@ -1951,7 +1951,7 @@ define <32 x i8> @shuffle_v32i8_56_zz_zz_zz_57_zz_zz_zz_58_zz_zz_zz__zz_59_zz_zz
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_56_zz_zz_zz_57_zz_zz_zz_58_zz_zz_zz__zz_59_zz_zz_zz_60_zz_zz_zz_61_zz_zz_zz_62_zz_zz_zz_63_zz_zz_zz:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
@@ -1962,7 +1962,7 @@ define <32 x i8> @shuffle_v32i8_56_zz_zz_zz_57_zz_zz_zz_58_zz_zz_zz__zz_59_zz_zz
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i8_56_zz_zz_zz_57_zz_zz_zz_58_zz_zz_zz__zz_59_zz_zz_zz_60_zz_zz_zz_61_zz_zz_zz_62_zz_zz_zz_63_zz_zz_zz:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; AVX512VL-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
@@ -1979,7 +1979,7 @@ define <32 x i8> @shuffle_v32i8_56_zz_zz_zz_57_zz_zz_zz_58_zz_zz_zz__zz_59_zz_zz
define <32 x i8> @shuffle_v32i8_47_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_63_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_47_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_63_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm2[15],xmm3[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
@@ -1988,7 +1988,7 @@ define <32 x i8> @shuffle_v32i8_47_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_47_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_63_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[15],ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14],ymm1[31],ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 47, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 63, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30>
@@ -1997,7 +1997,7 @@ define <32 x i8> @shuffle_v32i8_47_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_
define <32 x i8> @shuffle_v32i8_uu_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_63_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_uu_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_63_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[15],xmm2[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
@@ -2006,7 +2006,7 @@ define <32 x i8> @shuffle_v32i8_uu_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_uu_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_63_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[15],ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14],ymm1[31],ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 undef, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 63, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30>
@@ -2015,7 +2015,7 @@ define <32 x i8> @shuffle_v32i8_uu_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_
define <32 x i8> @shuffle_v32i8_47_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_uu_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_47_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_uu_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[15],xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpslldq {{.*#+}} xmm0 = zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
@@ -2023,7 +2023,7 @@ define <32 x i8> @shuffle_v32i8_47_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_47_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_uu_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[15],ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14],ymm1[31],ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 47, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 undef, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30>
@@ -2032,7 +2032,7 @@ define <32 x i8> @shuffle_v32i8_47_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_
define <32 x i8> @shuffle_v32i8_uu_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_63_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_uu_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_63_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpslldq {{.*#+}} xmm0 = zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX1-NEXT: vpsrldq {{.*#+}} xmm1 = xmm1[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
@@ -2040,7 +2040,7 @@ define <32 x i8> @shuffle_v32i8_uu_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_uu_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_63_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[15],ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14],ymm1[31],ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 undef, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 63, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -2049,7 +2049,7 @@ define <32 x i8> @shuffle_v32i8_uu_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_
define <32 x i8> @shuffle_v32i8_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_63_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_63_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpalignr {{.*#+}} xmm0 = xmm1[15],xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
@@ -2057,7 +2057,7 @@ define <32 x i8> @shuffle_v32i8_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_63_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[15],ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14],ymm1[31],ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 63, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30>
@@ -2066,7 +2066,7 @@ define <32 x i8> @shuffle_v32i8_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_
define <32 x i8> @shuffle_v32i8_01_02_03_04_05_06_07_08_09_10_11_12_13_14_15_32_17_18_19_20_21_22_23_24_25_26_27_28_29_30_31_48(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_01_02_03_04_05_06_07_08_09_10_11_12_13_14_15_32_17_18_19_20_21_22_23_24_25_26_27_28_29_30_31_48:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm2[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm3[0]
@@ -2075,7 +2075,7 @@ define <32 x i8> @shuffle_v32i8_01_02_03_04_05_06_07_08_09_10_11_12_13_14_15_32_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_01_02_03_04_05_06_07_08_09_10_11_12_13_14_15_32_17_18_19_20_21_22_23_24_25_26_27_28_29_30_31_48:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm1[0],ymm0[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm1[16]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 32, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 48>
@@ -2084,7 +2084,7 @@ define <32 x i8> @shuffle_v32i8_01_02_03_04_05_06_07_08_09_10_11_12_13_14_15_32_
define <32 x i8> @shuffle_v32i8_33_34_35_36_37_38_39_40_41_42_43_44_45_46_47_00_49_50_51_52_53_54_55_56_57_58_59_60_61_62_63_16(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_33_34_35_36_37_38_39_40_41_42_43_44_45_46_47_00_49_50_51_52_53_54_55_56_57_58_59_60_61_62_63_16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm2[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm3[0]
@@ -2093,7 +2093,7 @@ define <32 x i8> @shuffle_v32i8_33_34_35_36_37_38_39_40_41_42_43_44_45_46_47_00_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_33_34_35_36_37_38_39_40_41_42_43_44_45_46_47_00_49_50_51_52_53_54_55_56_57_58_59_60_61_62_63_16:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0],ymm1[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 00, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 16>
@@ -2102,7 +2102,7 @@ define <32 x i8> @shuffle_v32i8_33_34_35_36_37_38_39_40_41_42_43_44_45_46_47_00_
define <32 x i8> @shuffle_v32i8_15_32_33_34_35_36_37_38_39_40_41_42_43_44_45_46_31_48_49_50_51_52_53_54_55_56_57_58_59_60_61_62(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_15_32_33_34_35_36_37_38_39_40_41_42_43_44_45_46_31_48_49_50_51_52_53_54_55_56_57_58_59_60_61_62:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm2[15],xmm3[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
@@ -2111,7 +2111,7 @@ define <32 x i8> @shuffle_v32i8_15_32_33_34_35_36_37_38_39_40_41_42_43_44_45_46_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_15_32_33_34_35_36_37_38_39_40_41_42_43_44_45_46_31_48_49_50_51_52_53_54_55_56_57_58_59_60_61_62:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[15],ymm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14],ymm0[31],ymm1[16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 15, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 31, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62>
@@ -2120,7 +2120,7 @@ define <32 x i8> @shuffle_v32i8_15_32_33_34_35_36_37_38_39_40_41_42_43_44_45_46_
define <32 x i8> @shuffle_v32i8_01_02_03_04_05_06_07_08_09_10_11_12_13_14_15_00_17_18_19_20_21_22_23_24_25_26_27_28_29_30_31_16(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_01_02_03_04_05_06_07_08_09_10_11_12_13_14_15_00_17_18_19_20_21_22_23_24_25_26_27_28_29_30_31_16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpalignr {{.*#+}} xmm1 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0]
@@ -2128,7 +2128,7 @@ define <32 x i8> @shuffle_v32i8_01_02_03_04_05_06_07_08_09_10_11_12_13_14_15_00_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_01_02_03_04_05_06_07_08_09_10_11_12_13_14_15_00_17_18_19_20_21_22_23_24_25_26_27_28_29_30_31_16:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,16]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 0, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16>
@@ -2137,7 +2137,7 @@ define <32 x i8> @shuffle_v32i8_01_02_03_04_05_06_07_08_09_10_11_12_13_14_15_00_
define <32 x i8> @shuffle_v32i8_15_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_31_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_15_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_31_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpalignr {{.*#+}} xmm1 = xmm0[15,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[15,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
@@ -2145,7 +2145,7 @@ define <32 x i8> @shuffle_v32i8_15_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_15_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_31_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[15,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,31,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 15, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30>
@@ -2154,19 +2154,19 @@ define <32 x i8> @shuffle_v32i8_15_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_
define <32 x i8> @shuffle_v32i8_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_10_10_10_10_10_10_10_10_10_10_10_10_10_10_10_10(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_10_10_10_10_10_10_10_10_10_10_10_10_10_10_10_10:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_10_10_10_10_10_10_10_10_10_10_10_10_10_10_10_10:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10]
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i8_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_10_10_10_10_10_10_10_10_10_10_10_10_10_10_10_10:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; AVX512VL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[2,2,2,2,4,5,6,7]
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
@@ -2178,7 +2178,7 @@ define <32 x i8> @shuffle_v32i8_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_
define <32 x i8> @shuffle_v32i8_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
@@ -2186,7 +2186,7 @@ define <32 x i8> @shuffle_v32i8_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX2OR512VL-NEXT: vpbroadcastb %xmm0, %ymm0
; AVX2OR512VL-NEXT: retq
@@ -2196,7 +2196,7 @@ define <32 x i8> @shuffle_v32i8_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_16_
define <32 x i8> @shuffle_v32i8_15_15_15_15_15_15_15_15_32_32_32_32_32_32_32_32_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_15_15_15_15_15_15_15_15_32_32_32_32_32_32_32_32_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[15,15,15,15,15,15,15,15,12,12,13,13,14,14,15,15]
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
@@ -2204,14 +2204,14 @@ define <32 x i8> @shuffle_v32i8_15_15_15_15_15_15_15_15_32_32_32_32_32_32_32_32_
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_15_15_15_15_15_15_15_15_32_32_32_32_32_32_32_32_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastb %xmm1, %xmm1
; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[15,15,15,15,15,15,15,15,12,12,13,13,14,14,15,15]
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i8_15_15_15_15_15_15_15_15_32_32_32_32_32_32_32_32_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpbroadcastb %xmm1, %xmm1
; AVX512VL-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
@@ -2224,12 +2224,12 @@ define <32 x i8> @shuffle_v32i8_15_15_15_15_15_15_15_15_32_32_32_32_32_32_32_32_
define <32 x i8> @shuffle_v32i8_15_15_15_15_15_15_15_15_15_15_15_15_15_15_15_15_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu(<32 x i8> %a, <32 x i8> %b) {
; AVX1OR2-LABEL: shuffle_v32i8_15_15_15_15_15_15_15_15_15_15_15_15_15_15_15_15_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu:
-; AVX1OR2: # BB#0:
+; AVX1OR2: # %bb.0:
; AVX1OR2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1OR2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i8_15_15_15_15_15_15_15_15_15_15_15_15_15_15_15_15_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; AVX512VL-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,7,7,7]
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
@@ -2240,19 +2240,19 @@ define <32 x i8> @shuffle_v32i8_15_15_15_15_15_15_15_15_15_15_15_15_15_15_15_15_
define <32 x i8> @shuffle_v32i8_22_22_22_22_22_22_22_22_22_22_22_22_22_22_22_22_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_22_22_22_22_22_22_22_22_22_22_22_22_22_22_22_22_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_22_22_22_22_22_22_22_22_22_22_22_22_22_22_22_22_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i8_22_22_22_22_22_22_22_22_22_22_22_22_22_22_22_22_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX512VL-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; AVX512VL-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,6,6,6]
@@ -2264,7 +2264,7 @@ define <32 x i8> @shuffle_v32i8_22_22_22_22_22_22_22_22_22_22_22_22_22_22_22_22_
define <32 x i8> @shuffe_v32i8_shift_00_02_04_06_08_10_12_14_16_18_20_22_24_26_28_30_32_34_36_38_40_42_44_46_48_50_52_54_56_58_60_62(<16 x i16> %a0, <16 x i16> %a1) {
; AVX1-LABEL: shuffe_v32i8_shift_00_02_04_06_08_10_12_14_16_18_20_22_24_26_28_30_32_34_36_38_40_42_44_46_48_50_52_54_56_58_60_62:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
@@ -2277,7 +2277,7 @@ define <32 x i8> @shuffe_v32i8_shift_00_02_04_06_08_10_12_14_16_18_20_22_24_26_2
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffe_v32i8_shift_00_02_04_06_08_10_12_14_16_18_20_22_24_26_28_30_32_34_36_38_40_42_44_46_48_50_52_54_56_58_60_62:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpsrlw $8, %ymm0, %ymm0
; AVX2OR512VL-NEXT: vpsrlw $8, %ymm1, %ymm1
; AVX2OR512VL-NEXT: vpackuswb %ymm1, %ymm0, %ymm0
@@ -2293,7 +2293,7 @@ define <32 x i8> @shuffe_v32i8_shift_00_02_04_06_08_10_12_14_16_18_20_22_24_26_2
define <4 x i64> @PR28136(<32 x i8> %a0, <32 x i8> %a1) {
; AVX1-LABEL: PR28136:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [8,8,10,10,12,12,14,14,9,9,11,11,13,13,15,15]
@@ -2311,7 +2311,7 @@ define <4 x i64> @PR28136(<32 x i8> %a0, <32 x i8> %a1) {
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: PR28136:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2OR512VL-NEXT: retq
@@ -2323,7 +2323,7 @@ define <4 x i64> @PR28136(<32 x i8> %a0, <32 x i8> %a1) {
define <32 x i8> @insert_dup_mem_v32i8_i32(i32* %ptr) {
; AVX1-LABEL: insert_dup_mem_v32i8_i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
@@ -2331,7 +2331,7 @@ define <32 x i8> @insert_dup_mem_v32i8_i32(i32* %ptr) {
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: insert_dup_mem_v32i8_i32:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpbroadcastb (%rdi), %ymm0
; AVX2OR512VL-NEXT: retq
%tmp = load i32, i32* %ptr, align 4
@@ -2343,7 +2343,7 @@ define <32 x i8> @insert_dup_mem_v32i8_i32(i32* %ptr) {
define <32 x i8> @insert_dup_mem_v32i8_sext_i8(i8* %ptr) {
; AVX1-LABEL: insert_dup_mem_v32i8_sext_i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: movsbl (%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
@@ -2352,7 +2352,7 @@ define <32 x i8> @insert_dup_mem_v32i8_sext_i8(i8* %ptr) {
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: insert_dup_mem_v32i8_sext_i8:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpbroadcastb (%rdi), %ymm0
; AVX2OR512VL-NEXT: retq
%tmp = load i8, i8* %ptr, align 1
@@ -2365,14 +2365,14 @@ define <32 x i8> @insert_dup_mem_v32i8_sext_i8(i8* %ptr) {
define <32 x i8> @insert_dup_elt1_mem_v32i8_i32(i32* %ptr) {
; AVX1-LABEL: insert_dup_elt1_mem_v32i8_i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: insert_dup_elt1_mem_v32i8_i32:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpbroadcastb 1(%rdi), %ymm0
; AVX2OR512VL-NEXT: retq
%tmp = load i32, i32* %ptr, align 4
@@ -2384,14 +2384,14 @@ define <32 x i8> @insert_dup_elt1_mem_v32i8_i32(i32* %ptr) {
define <32 x i8> @insert_dup_elt3_mem_v32i8_i32(i32* %ptr) {
; AVX1-LABEL: insert_dup_elt3_mem_v32i8_i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: insert_dup_elt3_mem_v32i8_i32:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpbroadcastb 3(%rdi), %ymm0
; AVX2OR512VL-NEXT: retq
%tmp = load i32, i32* %ptr, align 4
@@ -2403,7 +2403,7 @@ define <32 x i8> @insert_dup_elt3_mem_v32i8_i32(i32* %ptr) {
define <32 x i8> @insert_dup_elt1_mem_v32i8_sext_i8(i8* %ptr) {
; AVX1-LABEL: insert_dup_elt1_mem_v32i8_sext_i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: movsbl (%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
@@ -2411,7 +2411,7 @@ define <32 x i8> @insert_dup_elt1_mem_v32i8_sext_i8(i8* %ptr) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: insert_dup_elt1_mem_v32i8_sext_i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: movsbl (%rdi), %eax
; AVX2-NEXT: shrl $8, %eax
; AVX2-NEXT: vmovd %eax, %xmm0
@@ -2419,7 +2419,7 @@ define <32 x i8> @insert_dup_elt1_mem_v32i8_sext_i8(i8* %ptr) {
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: insert_dup_elt1_mem_v32i8_sext_i8:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: movsbl (%rdi), %eax
; AVX512VL-NEXT: shrl $8, %eax
; AVX512VL-NEXT: vpbroadcastb %eax, %ymm0
diff --git a/test/CodeGen/X86/vector-shuffle-256-v4.ll b/test/CodeGen/X86/vector-shuffle-256-v4.ll
index 1cc2a8385f0..3c8377d364d 100644
--- a/test/CodeGen/X86/vector-shuffle-256-v4.ll
+++ b/test/CodeGen/X86/vector-shuffle-256-v4.ll
@@ -5,18 +5,18 @@
define <4 x double> @shuffle_v4f64_0000(<4 x double> %a, <4 x double> %b) {
; AVX1-LABEL: shuffle_v4f64_0000:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4f64_0000:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vbroadcastsd %xmm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4f64_0000:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vbroadcastsd %xmm0, %ymm0
; AVX512VL-NEXT: retq
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
@@ -25,18 +25,18 @@ define <4 x double> @shuffle_v4f64_0000(<4 x double> %a, <4 x double> %b) {
define <4 x double> @shuffle_v4f64_0001(<4 x double> %a, <4 x double> %b) {
; AVX1-LABEL: shuffle_v4f64_0001:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = xmm0[0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4f64_0001:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,0,1]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4f64_0001:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,0,1]
; AVX512VL-NEXT: retq
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 0, i32 0, i32 1>
@@ -45,7 +45,7 @@ define <4 x double> @shuffle_v4f64_0001(<4 x double> %a, <4 x double> %b) {
define <4 x double> @shuffle_v4f64_0020(<4 x double> %a, <4 x double> %b) {
; AVX1-LABEL: shuffle_v4f64_0020:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
@@ -53,12 +53,12 @@ define <4 x double> @shuffle_v4f64_0020(<4 x double> %a, <4 x double> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4f64_0020:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,2,0]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4f64_0020:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,2,0]
; AVX512VL-NEXT: retq
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 0, i32 2, i32 0>
@@ -67,19 +67,19 @@ define <4 x double> @shuffle_v4f64_0020(<4 x double> %a, <4 x double> %b) {
define <4 x double> @shuffle_v4f64_0300(<4 x double> %a, <4 x double> %b) {
; AVX1-LABEL: shuffle_v4f64_0300:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX1-NEXT: vpermilpd {{.*#+}} ymm1 = ymm1[0,1,2,2]
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4f64_0300:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,0,0]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4f64_0300:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,0,0]
; AVX512VL-NEXT: retq
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 3, i32 0, i32 0>
@@ -88,19 +88,19 @@ define <4 x double> @shuffle_v4f64_0300(<4 x double> %a, <4 x double> %b) {
define <4 x double> @shuffle_v4f64_1000(<4 x double> %a, <4 x double> %b) {
; AVX1-LABEL: shuffle_v4f64_1000:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4f64_1000:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,0,0,0]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4f64_1000:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,0,0,0]
; AVX512VL-NEXT: retq
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 1, i32 0, i32 0, i32 0>
@@ -109,18 +109,18 @@ define <4 x double> @shuffle_v4f64_1000(<4 x double> %a, <4 x double> %b) {
define <4 x double> @shuffle_v4f64_2200(<4 x double> %a, <4 x double> %b) {
; AVX1-LABEL: shuffle_v4f64_2200:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4f64_2200:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,0,0]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4f64_2200:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,0,0]
; AVX512VL-NEXT: retq
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 2, i32 2, i32 0, i32 0>
@@ -129,18 +129,18 @@ define <4 x double> @shuffle_v4f64_2200(<4 x double> %a, <4 x double> %b) {
define <4 x double> @shuffle_v4f64_2222(<4 x double> %a, <4 x double> %b) {
; AVX1-LABEL: shuffle_v4f64_2222:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4f64_2222:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4f64_2222:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
; AVX512VL-NEXT: retq
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 2, i32 2, i32 2, i32 2>
@@ -149,18 +149,18 @@ define <4 x double> @shuffle_v4f64_2222(<4 x double> %a, <4 x double> %b) {
define <4 x double> @shuffle_v4f64_2222_bc(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: shuffle_v4f64_2222_bc:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4f64_2222_bc:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4f64_2222_bc:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
; AVX512VL-NEXT: retq
%tmp0 = bitcast <4 x i64> %a to <4 x double>
@@ -171,19 +171,19 @@ define <4 x double> @shuffle_v4f64_2222_bc(<4 x i64> %a, <4 x i64> %b) {
define <4 x double> @shuffle_v4f64_3330(<4 x double> %a, <4 x double> %b) {
; AVX1-LABEL: shuffle_v4f64_3330:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3]
; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,1,3,2]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4f64_3330:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,0]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4f64_3330:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,0]
; AVX512VL-NEXT: retq
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 3, i32 3, i32 3, i32 0>
@@ -192,18 +192,18 @@ define <4 x double> @shuffle_v4f64_3330(<4 x double> %a, <4 x double> %b) {
define <4 x double> @shuffle_v4f64_3210(<4 x double> %a, <4 x double> %b) {
; AVX1-LABEL: shuffle_v4f64_3210:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4f64_3210:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,2,1,0]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4f64_3210:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,2,1,0]
; AVX512VL-NEXT: retq
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
@@ -212,7 +212,7 @@ define <4 x double> @shuffle_v4f64_3210(<4 x double> %a, <4 x double> %b) {
define <4 x double> @shuffle_v4f64_0023(<4 x double> %a, <4 x double> %b) {
; ALL-LABEL: shuffle_v4f64_0023:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[0,0,2,3]
; ALL-NEXT: retq
@@ -222,7 +222,7 @@ define <4 x double> @shuffle_v4f64_0023(<4 x double> %a, <4 x double> %b) {
define <4 x double> @shuffle_v4f64_0022(<4 x double> %a, <4 x double> %b) {
; ALL-LABEL: shuffle_v4f64_0022:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
; ALL-NEXT: retq
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
@@ -231,7 +231,7 @@ define <4 x double> @shuffle_v4f64_0022(<4 x double> %a, <4 x double> %b) {
define <4 x double> @shuffle_v4f64mem_0022(<4 x double>* %ptr, <4 x double> %b) {
; ALL-LABEL: shuffle_v4f64mem_0022:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovddup {{.*#+}} ymm0 = mem[0,0,2,2]
; ALL-NEXT: retq
%a = load <4 x double>, <4 x double>* %ptr
@@ -241,7 +241,7 @@ define <4 x double> @shuffle_v4f64mem_0022(<4 x double>* %ptr, <4 x double> %b)
define <4 x double> @shuffle_v4f64_1032(<4 x double> %a, <4 x double> %b) {
; ALL-LABEL: shuffle_v4f64_1032:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
; ALL-NEXT: retq
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
@@ -250,7 +250,7 @@ define <4 x double> @shuffle_v4f64_1032(<4 x double> %a, <4 x double> %b) {
define <4 x double> @shuffle_v4f64_1133(<4 x double> %a, <4 x double> %b) {
; ALL-LABEL: shuffle_v4f64_1133:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,1,3,3]
; ALL-NEXT: retq
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
@@ -259,7 +259,7 @@ define <4 x double> @shuffle_v4f64_1133(<4 x double> %a, <4 x double> %b) {
define <4 x double> @shuffle_v4f64_1023(<4 x double> %a, <4 x double> %b) {
; ALL-LABEL: shuffle_v4f64_1023:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,3]
; ALL-NEXT: retq
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 1, i32 0, i32 2, i32 3>
@@ -268,7 +268,7 @@ define <4 x double> @shuffle_v4f64_1023(<4 x double> %a, <4 x double> %b) {
define <4 x double> @shuffle_v4f64_1022(<4 x double> %a, <4 x double> %b) {
; ALL-LABEL: shuffle_v4f64_1022:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,2]
; ALL-NEXT: retq
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 1, i32 0, i32 2, i32 2>
@@ -277,19 +277,19 @@ define <4 x double> @shuffle_v4f64_1022(<4 x double> %a, <4 x double> %b) {
define <4 x double> @shuffle_v4f64_0213(<4 x double> %a, <4 x double> %b) {
; AVX1-LABEL: shuffle_v4f64_0213:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX1-NEXT: vpermilpd {{.*#+}} ymm1 = ymm1[0,0,3,2]
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4f64_0213:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4f64_0213:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX512VL-NEXT: retq
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
@@ -298,7 +298,7 @@ define <4 x double> @shuffle_v4f64_0213(<4 x double> %a, <4 x double> %b) {
define <4 x double> @shuffle_v4f64_0423(<4 x double> %a, <4 x double> %b) {
; ALL-LABEL: shuffle_v4f64_0423:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovddup {{.*#+}} xmm1 = xmm1[0,0]
; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3]
; ALL-NEXT: retq
@@ -308,7 +308,7 @@ define <4 x double> @shuffle_v4f64_0423(<4 x double> %a, <4 x double> %b) {
define <4 x double> @shuffle_v4f64_0462(<4 x double> %a, <4 x double> %b) {
; ALL-LABEL: shuffle_v4f64_0462:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovddup {{.*#+}} ymm1 = ymm1[0,0,2,2]
; ALL-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3]
@@ -319,7 +319,7 @@ define <4 x double> @shuffle_v4f64_0462(<4 x double> %a, <4 x double> %b) {
define <4 x double> @shuffle_v4f64_0426(<4 x double> %a, <4 x double> %b) {
; ALL-LABEL: shuffle_v4f64_0426:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; ALL-NEXT: retq
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
@@ -328,7 +328,7 @@ define <4 x double> @shuffle_v4f64_0426(<4 x double> %a, <4 x double> %b) {
define <4 x double> @shuffle_v4f64_1537(<4 x double> %a, <4 x double> %b) {
; ALL-LABEL: shuffle_v4f64_1537:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
; ALL-NEXT: retq
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
@@ -337,7 +337,7 @@ define <4 x double> @shuffle_v4f64_1537(<4 x double> %a, <4 x double> %b) {
define <4 x double> @shuffle_v4f64_4062(<4 x double> %a, <4 x double> %b) {
; ALL-LABEL: shuffle_v4f64_4062:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; ALL-NEXT: retq
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 4, i32 0, i32 6, i32 2>
@@ -346,7 +346,7 @@ define <4 x double> @shuffle_v4f64_4062(<4 x double> %a, <4 x double> %b) {
define <4 x double> @shuffle_v4f64_5173(<4 x double> %a, <4 x double> %b) {
; ALL-LABEL: shuffle_v4f64_5173:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
; ALL-NEXT: retq
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 5, i32 1, i32 7, i32 3>
@@ -355,7 +355,7 @@ define <4 x double> @shuffle_v4f64_5173(<4 x double> %a, <4 x double> %b) {
define <4 x double> @shuffle_v4f64_5163(<4 x double> %a, <4 x double> %b) {
; ALL-LABEL: shuffle_v4f64_5163:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vshufpd {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[2],ymm0[3]
; ALL-NEXT: retq
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 5, i32 1, i32 6, i32 3>
@@ -364,7 +364,7 @@ define <4 x double> @shuffle_v4f64_5163(<4 x double> %a, <4 x double> %b) {
define <4 x double> @shuffle_v4f64_0527(<4 x double> %a, <4 x double> %b) {
; ALL-LABEL: shuffle_v4f64_0527:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3]
; ALL-NEXT: retq
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
@@ -373,7 +373,7 @@ define <4 x double> @shuffle_v4f64_0527(<4 x double> %a, <4 x double> %b) {
define <4 x double> @shuffle_v4f64_4163(<4 x double> %a, <4 x double> %b) {
; ALL-LABEL: shuffle_v4f64_4163:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3]
; ALL-NEXT: retq
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
@@ -382,7 +382,7 @@ define <4 x double> @shuffle_v4f64_4163(<4 x double> %a, <4 x double> %b) {
define <4 x double> @shuffle_v4f64_0145(<4 x double> %a, <4 x double> %b) {
; ALL-LABEL: shuffle_v4f64_0145:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; ALL-NEXT: retq
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
@@ -391,7 +391,7 @@ define <4 x double> @shuffle_v4f64_0145(<4 x double> %a, <4 x double> %b) {
define <4 x double> @shuffle_v4f64_4501(<4 x double> %a, <4 x double> %b) {
; ALL-LABEL: shuffle_v4f64_4501:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; ALL-NEXT: retq
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 4, i32 5, i32 0, i32 1>
@@ -400,7 +400,7 @@ define <4 x double> @shuffle_v4f64_4501(<4 x double> %a, <4 x double> %b) {
define <4 x double> @shuffle_v4f64_0167(<4 x double> %a, <4 x double> %b) {
; ALL-LABEL: shuffle_v4f64_0167:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
; ALL-NEXT: retq
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 1, i32 6, i32 7>
@@ -409,7 +409,7 @@ define <4 x double> @shuffle_v4f64_0167(<4 x double> %a, <4 x double> %b) {
define <4 x double> @shuffle_v4f64_1054(<4 x double> %a, <4 x double> %b) {
; ALL-LABEL: shuffle_v4f64_1054:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; ALL-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
; ALL-NEXT: retq
@@ -419,7 +419,7 @@ define <4 x double> @shuffle_v4f64_1054(<4 x double> %a, <4 x double> %b) {
define <4 x double> @shuffle_v4f64_3254(<4 x double> %a, <4 x double> %b) {
; ALL-LABEL: shuffle_v4f64_3254:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
; ALL-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
; ALL-NEXT: retq
@@ -429,7 +429,7 @@ define <4 x double> @shuffle_v4f64_3254(<4 x double> %a, <4 x double> %b) {
define <4 x double> @shuffle_v4f64_3276(<4 x double> %a, <4 x double> %b) {
; ALL-LABEL: shuffle_v4f64_3276:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
; ALL-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
; ALL-NEXT: retq
@@ -439,7 +439,7 @@ define <4 x double> @shuffle_v4f64_3276(<4 x double> %a, <4 x double> %b) {
define <4 x double> @shuffle_v4f64_1076(<4 x double> %a, <4 x double> %b) {
; ALL-LABEL: shuffle_v4f64_1076:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
; ALL-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
; ALL-NEXT: retq
@@ -449,21 +449,21 @@ define <4 x double> @shuffle_v4f64_1076(<4 x double> %a, <4 x double> %b) {
define <4 x double> @shuffle_v4f64_0415(<4 x double> %a, <4 x double> %b) {
; AVX1-LABEL: shuffle_v4f64_0415:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm0[1],xmm1[1]
; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4f64_0415:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,0,2,1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,1,3]
; AVX2-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4f64_0415:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovapd {{.*#+}} ymm2 = [0,4,1,5]
; AVX512VL-NEXT: vpermt2pd %ymm1, %ymm2, %ymm0
; AVX512VL-NEXT: retq
@@ -473,7 +473,7 @@ define <4 x double> @shuffle_v4f64_0415(<4 x double> %a, <4 x double> %b) {
define <4 x double> @shuffle_v4f64_u062(<4 x double> %a, <4 x double> %b) {
; ALL-LABEL: shuffle_v4f64_u062:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; ALL-NEXT: retq
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 undef, i32 0, i32 6, i32 2>
@@ -482,7 +482,7 @@ define <4 x double> @shuffle_v4f64_u062(<4 x double> %a, <4 x double> %b) {
define <4 x double> @shuffle_v4f64_15uu(<4 x double> %a, <4 x double> %b) {
; ALL-LABEL: shuffle_v4f64_15uu:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
; ALL-NEXT: retq
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 1, i32 5, i32 undef, i32 undef>
@@ -491,7 +491,7 @@ define <4 x double> @shuffle_v4f64_15uu(<4 x double> %a, <4 x double> %b) {
define <4 x double> @shuffle_v4f64_11uu(<4 x double> %a, <4 x double> %b) {
; ALL-LABEL: shuffle_v4f64_11uu:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,1]
; ALL-NEXT: retq
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 1, i32 1, i32 undef, i32 undef>
@@ -500,18 +500,18 @@ define <4 x double> @shuffle_v4f64_11uu(<4 x double> %a, <4 x double> %b) {
define <4 x double> @shuffle_v4f64_22uu(<4 x double> %a, <4 x double> %b) {
; AVX1-LABEL: shuffle_v4f64_22uu:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4f64_22uu:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,3]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4f64_22uu:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,3]
; AVX512VL-NEXT: retq
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 2, i32 2, i32 undef, i32 undef>
@@ -520,18 +520,18 @@ define <4 x double> @shuffle_v4f64_22uu(<4 x double> %a, <4 x double> %b) {
define <4 x double> @shuffle_v4f64_3333(<4 x double> %a, <4 x double> %b) {
; AVX1-LABEL: shuffle_v4f64_3333:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,1,3,3]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4f64_3333:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4f64_3333:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
; AVX512VL-NEXT: retq
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
@@ -540,21 +540,21 @@ define <4 x double> @shuffle_v4f64_3333(<4 x double> %a, <4 x double> %b) {
define <4 x double> @shuffle_v4f64_0z3z(<4 x double> %a, <4 x double> %b) {
; AVX1-LABEL: shuffle_v4f64_0z3z:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[0,0,3,2]
; AVX1-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4f64_0z3z:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[0,0,3,2]
; AVX2-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4f64_0z3z:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[0,0,3,2]
; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3]
@@ -565,7 +565,7 @@ define <4 x double> @shuffle_v4f64_0z3z(<4 x double> %a, <4 x double> %b) {
define <4 x double> @shuffle_v4f64_1z2z(<4 x double> %a, <4 x double> %b) {
; AVX1-LABEL: shuffle_v4f64_1z2z:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
@@ -575,14 +575,14 @@ define <4 x double> @shuffle_v4f64_1z2z(<4 x double> %a, <4 x double> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4f64_1z2z:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,0,2,0]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4f64_1z2z:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VL-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3]
; AVX512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,0,2,0]
@@ -593,18 +593,18 @@ define <4 x double> @shuffle_v4f64_1z2z(<4 x double> %a, <4 x double> %b) {
define <4 x i64> @shuffle_v4i64_0000(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: shuffle_v4i64_0000:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4i64_0000:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vbroadcastsd %xmm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4i64_0000:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vbroadcastsd %xmm0, %ymm0
; AVX512VL-NEXT: retq
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
@@ -613,18 +613,18 @@ define <4 x i64> @shuffle_v4i64_0000(<4 x i64> %a, <4 x i64> %b) {
define <4 x i64> @shuffle_v4i64_0001(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: shuffle_v4i64_0001:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4i64_0001:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,0,1]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4i64_0001:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,0,1]
; AVX512VL-NEXT: retq
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 0, i32 0, i32 1>
@@ -633,7 +633,7 @@ define <4 x i64> @shuffle_v4i64_0001(<4 x i64> %a, <4 x i64> %b) {
define <4 x i64> @shuffle_v4i64_0020(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: shuffle_v4i64_0020:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,0,1]
@@ -641,12 +641,12 @@ define <4 x i64> @shuffle_v4i64_0020(<4 x i64> %a, <4 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4i64_0020:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,2,0]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4i64_0020:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,2,0]
; AVX512VL-NEXT: retq
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 0, i32 2, i32 0>
@@ -655,19 +655,19 @@ define <4 x i64> @shuffle_v4i64_0020(<4 x i64> %a, <4 x i64> %b) {
define <4 x i64> @shuffle_v4i64_0112(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: shuffle_v4i64_0112:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpalignr {{.*#+}} xmm1 = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4i64_0112:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,1,2]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4i64_0112:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,1,2]
; AVX512VL-NEXT: retq
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 1, i32 1, i32 2>
@@ -676,19 +676,19 @@ define <4 x i64> @shuffle_v4i64_0112(<4 x i64> %a, <4 x i64> %b) {
define <4 x i64> @shuffle_v4i64_0300(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: shuffle_v4i64_0300:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX1-NEXT: vpermilpd {{.*#+}} ymm1 = ymm1[0,1,2,2]
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4i64_0300:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,0,0]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4i64_0300:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,0,0]
; AVX512VL-NEXT: retq
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 3, i32 0, i32 0>
@@ -697,19 +697,19 @@ define <4 x i64> @shuffle_v4i64_0300(<4 x i64> %a, <4 x i64> %b) {
define <4 x i64> @shuffle_v4i64_1000(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: shuffle_v4i64_1000:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4i64_1000:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,0,0,0]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4i64_1000:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,0,0,0]
; AVX512VL-NEXT: retq
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 1, i32 0, i32 0, i32 0>
@@ -718,18 +718,18 @@ define <4 x i64> @shuffle_v4i64_1000(<4 x i64> %a, <4 x i64> %b) {
define <4 x i64> @shuffle_v4i64_2200(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: shuffle_v4i64_2200:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4i64_2200:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,0,0]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4i64_2200:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,0,0]
; AVX512VL-NEXT: retq
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 2, i32 2, i32 0, i32 0>
@@ -738,19 +738,19 @@ define <4 x i64> @shuffle_v4i64_2200(<4 x i64> %a, <4 x i64> %b) {
define <4 x i64> @shuffle_v4i64_3330(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: shuffle_v4i64_3330:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3]
; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,1,3,2]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4i64_3330:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,0]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4i64_3330:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,0]
; AVX512VL-NEXT: retq
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 3, i32 3, i32 3, i32 0>
@@ -759,18 +759,18 @@ define <4 x i64> @shuffle_v4i64_3330(<4 x i64> %a, <4 x i64> %b) {
define <4 x i64> @shuffle_v4i64_3210(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: shuffle_v4i64_3210:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4i64_3210:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,2,1,0]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4i64_3210:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,2,1,0]
; AVX512VL-NEXT: retq
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
@@ -779,19 +779,19 @@ define <4 x i64> @shuffle_v4i64_3210(<4 x i64> %a, <4 x i64> %b) {
define <4 x i64> @shuffle_v4i64_0213(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: shuffle_v4i64_0213:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX1-NEXT: vpermilpd {{.*#+}} ymm1 = ymm1[0,0,3,2]
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4i64_0213:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4i64_0213:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX512VL-NEXT: retq
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
@@ -800,20 +800,20 @@ define <4 x i64> @shuffle_v4i64_0213(<4 x i64> %a, <4 x i64> %b) {
define <4 x i64> @shuffle_v4i64_0124(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: shuffle_v4i64_0124:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = xmm1[0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4i64_0124:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vbroadcastsd %xmm1, %ymm1
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4i64_0124:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vbroadcastsd %xmm1, %ymm1
; AVX512VL-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX512VL-NEXT: retq
@@ -823,21 +823,21 @@ define <4 x i64> @shuffle_v4i64_0124(<4 x i64> %a, <4 x i64> %b) {
define <4 x i64> @shuffle_v4i64_0142(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: shuffle_v4i64_0142:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[0,1,2,2]
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4i64_0142:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,2,2]
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4i64_0142:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,2,2]
; AVX512VL-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
@@ -848,7 +848,7 @@ define <4 x i64> @shuffle_v4i64_0142(<4 x i64> %a, <4 x i64> %b) {
define <4 x i64> @shuffle_v4i64_0412(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: shuffle_v4i64_0412:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = xmm1[0,0]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm0[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
@@ -857,14 +857,14 @@ define <4 x i64> @shuffle_v4i64_0412(<4 x i64> %a, <4 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4i64_0412:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastq %xmm1, %xmm1
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,1,2]
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4i64_0412:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpbroadcastq %xmm1, %xmm1
; AVX512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,1,2]
; AVX512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
@@ -875,7 +875,7 @@ define <4 x i64> @shuffle_v4i64_0412(<4 x i64> %a, <4 x i64> %b) {
define <4 x i64> @shuffle_v4i64_4012(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: shuffle_v4i64_4012:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm0[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
@@ -884,13 +884,13 @@ define <4 x i64> @shuffle_v4i64_4012(<4 x i64> %a, <4 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4i64_4012:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,1,2]
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4i64_4012:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,1,2]
; AVX512VL-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
; AVX512VL-NEXT: retq
@@ -900,7 +900,7 @@ define <4 x i64> @shuffle_v4i64_4012(<4 x i64> %a, <4 x i64> %b) {
define <4 x i64> @shuffle_v4i64_0145(<4 x i64> %a, <4 x i64> %b) {
; ALL-LABEL: shuffle_v4i64_0145:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; ALL-NEXT: retq
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
@@ -909,21 +909,21 @@ define <4 x i64> @shuffle_v4i64_0145(<4 x i64> %a, <4 x i64> %b) {
define <4 x i64> @shuffle_v4i64_0451(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: shuffle_v4i64_0451:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm1[1],xmm0[1]
; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4i64_0451:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,0,1,3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,2,1]
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5],ymm0[6,7]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4i64_0451:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,0,1,3]
; AVX512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,2,1]
; AVX512VL-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5],ymm0[6,7]
@@ -934,7 +934,7 @@ define <4 x i64> @shuffle_v4i64_0451(<4 x i64> %a, <4 x i64> %b) {
define <4 x i64> @shuffle_v4i64_4501(<4 x i64> %a, <4 x i64> %b) {
; ALL-LABEL: shuffle_v4i64_4501:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; ALL-NEXT: retq
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 4, i32 5, i32 0, i32 1>
@@ -943,21 +943,21 @@ define <4 x i64> @shuffle_v4i64_4501(<4 x i64> %a, <4 x i64> %b) {
define <4 x i64> @shuffle_v4i64_4015(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: shuffle_v4i64_4015:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm0[1],xmm1[1]
; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4i64_4015:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4i64_4015:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
; AVX512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,1,3]
; AVX512VL-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
@@ -968,20 +968,20 @@ define <4 x i64> @shuffle_v4i64_4015(<4 x i64> %a, <4 x i64> %b) {
define <4 x i64> @shuffle_v4i64_2u35(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: shuffle_v4i64_2u35:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm0[1],xmm1[1]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4i64_2u35:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,3,1]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4i64_2u35:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,3,1]
; AVX512VL-NEXT: retq
@@ -991,7 +991,7 @@ define <4 x i64> @shuffle_v4i64_2u35(<4 x i64> %a, <4 x i64> %b) {
define <4 x i64> @shuffle_v4i64_1251(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: shuffle_v4i64_1251:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3,0,1]
; AVX1-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm2[0],ymm0[2],ymm2[3]
; AVX1-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
@@ -1000,14 +1000,14 @@ define <4 x i64> @shuffle_v4i64_1251(<4 x i64> %a, <4 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4i64_1251:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,1,3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,2,2,1]
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4i64_1251:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,1,3]
; AVX512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,2,2,1]
; AVX512VL-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
@@ -1018,19 +1018,19 @@ define <4 x i64> @shuffle_v4i64_1251(<4 x i64> %a, <4 x i64> %b) {
define <4 x i64> @shuffle_v4i64_1054(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: shuffle_v4i64_1054:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4i64_1054:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4i64_1054:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX512VL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
; AVX512VL-NEXT: retq
@@ -1040,19 +1040,19 @@ define <4 x i64> @shuffle_v4i64_1054(<4 x i64> %a, <4 x i64> %b) {
define <4 x i64> @shuffle_v4i64_3254(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: shuffle_v4i64_3254:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4i64_3254:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
; AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4i64_3254:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
; AVX512VL-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
; AVX512VL-NEXT: retq
@@ -1062,19 +1062,19 @@ define <4 x i64> @shuffle_v4i64_3254(<4 x i64> %a, <4 x i64> %b) {
define <4 x i64> @shuffle_v4i64_3276(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: shuffle_v4i64_3276:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4i64_3276:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
; AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4i64_3276:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
; AVX512VL-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
; AVX512VL-NEXT: retq
@@ -1084,19 +1084,19 @@ define <4 x i64> @shuffle_v4i64_3276(<4 x i64> %a, <4 x i64> %b) {
define <4 x i64> @shuffle_v4i64_1076(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: shuffle_v4i64_1076:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4i64_1076:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4i64_1076:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX512VL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
; AVX512VL-NEXT: retq
@@ -1106,21 +1106,21 @@ define <4 x i64> @shuffle_v4i64_1076(<4 x i64> %a, <4 x i64> %b) {
define <4 x i64> @shuffle_v4i64_0415(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: shuffle_v4i64_0415:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm0[1],xmm1[1]
; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4i64_0415:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,0,2,1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4i64_0415:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,0,2,1]
; AVX512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,1,3]
; AVX512VL-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
@@ -1131,18 +1131,18 @@ define <4 x i64> @shuffle_v4i64_0415(<4 x i64> %a, <4 x i64> %b) {
define <4 x i64> @shuffle_v4i64_z4z6(<4 x i64> %a) {
; AVX1-LABEL: shuffle_v4i64_z4z6:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4i64_z4z6:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpslldq {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,ymm0[0,1,2,3,4,5,6,7],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,18,19,20,21,22,23]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4i64_z4z6:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpslldq {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,ymm0[0,1,2,3,4,5,6,7],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,18,19,20,21,22,23]
; AVX512VL-NEXT: retq
%shuffle = shufflevector <4 x i64> zeroinitializer, <4 x i64> %a, <4 x i32> <i32 0, i32 4, i32 0, i32 6>
@@ -1151,18 +1151,18 @@ define <4 x i64> @shuffle_v4i64_z4z6(<4 x i64> %a) {
define <4 x i64> @shuffle_v4i64_5zuz(<4 x i64> %a) {
; AVX1-LABEL: shuffle_v4i64_5zuz:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4i64_5zuz:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero,zero,zero
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4i64_5zuz:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero,zero,zero
; AVX512VL-NEXT: retq
%shuffle = shufflevector <4 x i64> zeroinitializer, <4 x i64> %a, <4 x i32> <i32 5, i32 0, i32 undef, i32 0>
@@ -1171,7 +1171,7 @@ define <4 x i64> @shuffle_v4i64_5zuz(<4 x i64> %a) {
define <4 x i64> @shuffle_v4i64_40u2(<4 x i64> %a, <4 x i64> %b) {
; ALL-LABEL: shuffle_v4i64_40u2:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; ALL-NEXT: retq
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 4, i32 0, i32 undef, i32 2>
@@ -1180,7 +1180,7 @@ define <4 x i64> @shuffle_v4i64_40u2(<4 x i64> %a, <4 x i64> %b) {
define <4 x i64> @shuffle_v4i64_15uu(<4 x i64> %a, <4 x i64> %b) {
; ALL-LABEL: shuffle_v4i64_15uu:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
; ALL-NEXT: retq
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 1, i32 5, i32 undef, i32 undef>
@@ -1189,7 +1189,7 @@ define <4 x i64> @shuffle_v4i64_15uu(<4 x i64> %a, <4 x i64> %b) {
define <4 x i64> @shuffle_v4i64_11uu(<4 x i64> %a, <4 x i64> %b) {
; ALL-LABEL: shuffle_v4i64_11uu:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,2,3]
; ALL-NEXT: retq
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 1, i32 1, i32 undef, i32 undef>
@@ -1198,18 +1198,18 @@ define <4 x i64> @shuffle_v4i64_11uu(<4 x i64> %a, <4 x i64> %b) {
define <4 x i64> @shuffle_v4i64_22uu(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: shuffle_v4i64_22uu:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,0,1]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4i64_22uu:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,3]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4i64_22uu:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,3]
; AVX512VL-NEXT: retq
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 2, i32 2, i32 undef, i32 undef>
@@ -1218,18 +1218,18 @@ define <4 x i64> @shuffle_v4i64_22uu(<4 x i64> %a, <4 x i64> %b) {
define <4 x i64> @shuffle_v4i64_3333(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: shuffle_v4i64_3333:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,1,3,3]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4i64_3333:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4i64_3333:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
; AVX512VL-NEXT: retq
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
@@ -1238,7 +1238,7 @@ define <4 x i64> @shuffle_v4i64_3333(<4 x i64> %a, <4 x i64> %b) {
define <4 x i64> @shuffle_v4i64_1z3z(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: shuffle_v4i64_1z3z:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
@@ -1248,12 +1248,12 @@ define <4 x i64> @shuffle_v4i64_1z3z(<4 x i64> %a, <4 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4i64_1z3z:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero,zero,zero
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4i64_1z3z:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero,zero,zero
; AVX512VL-NEXT: retq
%shuffle = shufflevector <4 x i64> %a, <4 x i64> <i64 0, i64 undef, i64 undef, i64 undef>, <4 x i32> <i32 1, i32 4, i32 3, i32 4>
@@ -1273,7 +1273,7 @@ define <4 x i64> @stress_test1(<4 x i64> %a, <4 x i64> %b) {
define <4 x i64> @insert_reg_and_zero_v4i64(i64 %a) {
; ALL-LABEL: insert_reg_and_zero_v4i64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovq %rdi, %xmm0
; ALL-NEXT: retq
%v = insertelement <4 x i64> undef, i64 %a, i64 0
@@ -1283,7 +1283,7 @@ define <4 x i64> @insert_reg_and_zero_v4i64(i64 %a) {
define <4 x i64> @insert_mem_and_zero_v4i64(i64* %ptr) {
; ALL-LABEL: insert_mem_and_zero_v4i64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; ALL-NEXT: retq
%a = load i64, i64* %ptr
@@ -1294,21 +1294,21 @@ define <4 x i64> @insert_mem_and_zero_v4i64(i64* %ptr) {
define <4 x double> @insert_reg_and_zero_v4f64(double %a) {
; AVX1-LABEL: insert_reg_and_zero_v4f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX1-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: insert_reg_and_zero_v4f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX2-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: insert_reg_and_zero_v4f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VL-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
@@ -1320,7 +1320,7 @@ define <4 x double> @insert_reg_and_zero_v4f64(double %a) {
define <4 x double> @insert_mem_and_zero_v4f64(double* %ptr) {
; ALL-LABEL: insert_mem_and_zero_v4f64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; ALL-NEXT: retq
%a = load double, double* %ptr
@@ -1331,7 +1331,7 @@ define <4 x double> @insert_mem_and_zero_v4f64(double* %ptr) {
define <4 x double> @splat_mem_v4f64(double* %ptr) {
; ALL-LABEL: splat_mem_v4f64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vbroadcastsd (%rdi), %ymm0
; ALL-NEXT: retq
%a = load double, double* %ptr
@@ -1342,7 +1342,7 @@ define <4 x double> @splat_mem_v4f64(double* %ptr) {
define <4 x i64> @splat_mem_v4i64(i64* %ptr) {
; ALL-LABEL: splat_mem_v4i64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vbroadcastsd (%rdi), %ymm0
; ALL-NEXT: retq
%a = load i64, i64* %ptr
@@ -1353,7 +1353,7 @@ define <4 x i64> @splat_mem_v4i64(i64* %ptr) {
define <4 x double> @splat_mem_v4f64_2(double* %p) {
; ALL-LABEL: splat_mem_v4f64_2:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vbroadcastsd (%rdi), %ymm0
; ALL-NEXT: retq
%1 = load double, double* %p
@@ -1364,18 +1364,18 @@ define <4 x double> @splat_mem_v4f64_2(double* %p) {
define <4 x double> @splat_v4f64(<2 x double> %r) {
; AVX1-LABEL: splat_v4f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: splat_v4f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vbroadcastsd %xmm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: splat_v4f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vbroadcastsd %xmm0, %ymm0
; AVX512VL-NEXT: retq
%1 = shufflevector <2 x double> %r, <2 x double> undef, <4 x i32> zeroinitializer
@@ -1384,7 +1384,7 @@ define <4 x double> @splat_v4f64(<2 x double> %r) {
define <4 x i64> @splat_mem_v4i64_from_v2i64(<2 x i64>* %ptr) {
; ALL-LABEL: splat_mem_v4i64_from_v2i64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vbroadcastsd (%rdi), %ymm0
; ALL-NEXT: retq
%v = load <2 x i64>, <2 x i64>* %ptr
@@ -1394,7 +1394,7 @@ define <4 x i64> @splat_mem_v4i64_from_v2i64(<2 x i64>* %ptr) {
define <4 x double> @splat_mem_v4f64_from_v2f64(<2 x double>* %ptr) {
; ALL-LABEL: splat_mem_v4f64_from_v2f64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vbroadcastsd (%rdi), %ymm0
; ALL-NEXT: retq
%v = load <2 x double>, <2 x double>* %ptr
@@ -1404,17 +1404,17 @@ define <4 x double> @splat_mem_v4f64_from_v2f64(<2 x double>* %ptr) {
define <4 x i64> @splat128_mem_v4i64_from_v2i64(<2 x i64>* %ptr) {
; AVX1-LABEL: splat128_mem_v4i64_from_v2i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; AVX1-NEXT: retq
;
; AVX2-LABEL: splat128_mem_v4i64_from_v2i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: splat128_mem_v4i64_from_v2i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; AVX512VL-NEXT: retq
%v = load <2 x i64>, <2 x i64>* %ptr
@@ -1424,7 +1424,7 @@ define <4 x i64> @splat128_mem_v4i64_from_v2i64(<2 x i64>* %ptr) {
define <4 x double> @splat128_mem_v4f64_from_v2f64(<2 x double>* %ptr) {
; ALL-LABEL: splat128_mem_v4f64_from_v2f64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; ALL-NEXT: retq
%v = load <2 x double>, <2 x double>* %ptr
@@ -1434,18 +1434,18 @@ define <4 x double> @splat128_mem_v4f64_from_v2f64(<2 x double>* %ptr) {
define <4 x double> @broadcast_v4f64_0000_from_v2i64(<2 x i64> %a0) {
; AVX1-LABEL: broadcast_v4f64_0000_from_v2i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: broadcast_v4f64_0000_from_v2i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vbroadcastsd %xmm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: broadcast_v4f64_0000_from_v2i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vbroadcastsd %xmm0, %ymm0
; AVX512VL-NEXT: retq
%1 = shufflevector <2 x i64> %a0, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -1456,7 +1456,7 @@ define <4 x double> @broadcast_v4f64_0000_from_v2i64(<2 x i64> %a0) {
define <4 x double> @bitcast_v4f64_0426(<4 x double> %a, <4 x double> %b) {
; ALL-LABEL: bitcast_v4f64_0426:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; ALL-NEXT: retq
%shuffle64 = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 4, i32 0, i32 6, i32 2>
@@ -1470,17 +1470,17 @@ define <4 x double> @bitcast_v4f64_0426(<4 x double> %a, <4 x double> %b) {
define <4 x i64> @concat_v4i64_0167(<4 x i64> %a0, <4 x i64> %a1) {
; AVX1-LABEL: concat_v4i64_0167:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: concat_v4i64_0167:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: concat_v4i64_0167:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX512VL-NEXT: retq
%a0lo = shufflevector <4 x i64> %a0, <4 x i64> %a1, <2 x i32> <i32 0, i32 1>
@@ -1491,7 +1491,7 @@ define <4 x i64> @concat_v4i64_0167(<4 x i64> %a0, <4 x i64> %a1) {
define <4 x i64> @concat_v4i64_0145_bc(<4 x i64> %a0, <4 x i64> %a1) {
; ALL-LABEL: concat_v4i64_0145_bc:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; ALL-NEXT: retq
%a0lo = shufflevector <4 x i64> %a0, <4 x i64> %a1, <2 x i32> <i32 0, i32 1>
@@ -1505,7 +1505,7 @@ define <4 x i64> @concat_v4i64_0145_bc(<4 x i64> %a0, <4 x i64> %a1) {
define <4 x i64> @insert_dup_mem_v4i64(i64* %ptr) {
; ALL-LABEL: insert_dup_mem_v4i64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vbroadcastsd (%rdi), %ymm0
; ALL-NEXT: retq
%tmp = load i64, i64* %ptr, align 1
@@ -1516,20 +1516,20 @@ define <4 x i64> @insert_dup_mem_v4i64(i64* %ptr) {
define <4 x i64> @shuffle_v4i64_1234(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: shuffle_v4i64_1234:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX1-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[3],ymm1[2]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4i64_1234:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,2,3,0]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4i64_1234:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: valignq {{.*#+}} ymm0 = ymm0[1,2,3],ymm1[0]
; AVX512VL-NEXT: retq
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
@@ -1538,18 +1538,18 @@ define <4 x i64> @shuffle_v4i64_1234(<4 x i64> %a, <4 x i64> %b) {
define <4 x i64> @shuffle_v4i64_1230(<4 x i64> %a) {
; AVX1-LABEL: shuffle_v4i64_1230:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX1-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[3],ymm1[2]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4i64_1230:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,2,3,0]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4i64_1230:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,2,3,0]
; AVX512VL-NEXT: retq
%shuffle = shufflevector <4 x i64> %a, <4 x i64> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 0>
@@ -1558,21 +1558,21 @@ define <4 x i64> @shuffle_v4i64_1230(<4 x i64> %a) {
define <4 x i64> @shuffle_v4i64_z0z3(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: shuffle_v4i64_z0z3:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[0,0,2,3]
; AVX1-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4i64_z0z3:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,2,3]
; AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4i64_z0z3:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,2,3]
; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
@@ -1583,7 +1583,7 @@ define <4 x i64> @shuffle_v4i64_z0z3(<4 x i64> %a, <4 x i64> %b) {
define <4 x i64> @shuffle_v4i64_1z2z(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: shuffle_v4i64_1z2z:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
@@ -1593,14 +1593,14 @@ define <4 x i64> @shuffle_v4i64_1z2z(<4 x i64> %a, <4 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4i64_1z2z:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,0,2,0]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4i64_1z2z:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
; AVX512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,0,2,0]
diff --git a/test/CodeGen/X86/vector-shuffle-256-v8.ll b/test/CodeGen/X86/vector-shuffle-256-v8.ll
index e4234c05845..44d0217f529 100644
--- a/test/CodeGen/X86/vector-shuffle-256-v8.ll
+++ b/test/CodeGen/X86/vector-shuffle-256-v8.ll
@@ -5,13 +5,13 @@
define <8 x float> @shuffle_v8f32_00000000(<8 x float> %a, <8 x float> %b) {
; AVX1-LABEL: shuffle_v8f32_00000000:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8f32_00000000:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vbroadcastss %xmm0, %ymm0
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
@@ -20,14 +20,14 @@ define <8 x float> @shuffle_v8f32_00000000(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_00000010(<8 x float> %a, <8 x float> %b) {
; AVX1-LABEL: shuffle_v8f32_00000010:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[0,0,0,0]
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,1,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8f32_00000010:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,1,0]
; AVX2OR512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,0,1]
; AVX2OR512VL-NEXT: retq
@@ -37,14 +37,14 @@ define <8 x float> @shuffle_v8f32_00000010(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_00000200(<8 x float> %a, <8 x float> %b) {
; AVX1-LABEL: shuffle_v8f32_00000200:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[0,0,0,0]
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8f32_00000200:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,2]
; AVX2OR512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,1,0]
; AVX2OR512VL-NEXT: retq
@@ -54,14 +54,14 @@ define <8 x float> @shuffle_v8f32_00000200(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_00003000(<8 x float> %a, <8 x float> %b) {
; AVX1-LABEL: shuffle_v8f32_00003000:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[0,0,0,0]
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,0,0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8f32_00003000:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,3,0]
; AVX2OR512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,1,0]
; AVX2OR512VL-NEXT: retq
@@ -71,7 +71,7 @@ define <8 x float> @shuffle_v8f32_00003000(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_00040000(<8 x float> %a, <8 x float> %b) {
; AVX1-LABEL: shuffle_v8f32_00040000:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[0,0,0,3]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
@@ -79,7 +79,7 @@ define <8 x float> @shuffle_v8f32_00040000(<8 x float> %a, <8 x float> %b) {
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8f32_00040000:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vmovaps {{.*#+}} ymm1 = [0,0,0,4,0,0,0,0]
; AVX2OR512VL-NEXT: vpermps %ymm0, %ymm1, %ymm0
; AVX2OR512VL-NEXT: retq
@@ -89,14 +89,14 @@ define <8 x float> @shuffle_v8f32_00040000(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_00500000(<8 x float> %a, <8 x float> %b) {
; AVX1-LABEL: shuffle_v8f32_00500000:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,1,0,4,4,4,4]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8f32_00500000:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vmovaps {{.*#+}} ymm1 = [0,0,5,0,0,0,0,0]
; AVX2OR512VL-NEXT: vpermps %ymm0, %ymm1, %ymm0
; AVX2OR512VL-NEXT: retq
@@ -106,14 +106,14 @@ define <8 x float> @shuffle_v8f32_00500000(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_06000000(<8 x float> %a, <8 x float> %b) {
; AVX1-LABEL: shuffle_v8f32_06000000:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,0,0,4,4,4,4]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8f32_06000000:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vmovaps {{.*#+}} ymm1 = [0,6,0,0,0,0,0,0]
; AVX2OR512VL-NEXT: vpermps %ymm0, %ymm1, %ymm0
; AVX2OR512VL-NEXT: retq
@@ -123,14 +123,14 @@ define <8 x float> @shuffle_v8f32_06000000(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_70000000(<8 x float> %a, <8 x float> %b) {
; AVX1-LABEL: shuffle_v8f32_70000000:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,0,0,0,4,4,4,4]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8f32_70000000:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: movl $7, %eax
; AVX2OR512VL-NEXT: vmovd %eax, %xmm1
; AVX2OR512VL-NEXT: vpermd %ymm0, %ymm1, %ymm0
@@ -141,7 +141,7 @@ define <8 x float> @shuffle_v8f32_70000000(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_01014545(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_01014545:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 4, i32 5, i32 4, i32 5>
@@ -150,14 +150,14 @@ define <8 x float> @shuffle_v8f32_01014545(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_00112233(<8 x float> %a, <8 x float> %b) {
; AVX1-LABEL: shuffle_v8f32_00112233:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[0,0,1,1]
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8f32_00112233:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vmovaps {{.*#+}} ymm1 = [0,0,1,1,2,2,3,3]
; AVX2OR512VL-NEXT: vpermps %ymm0, %ymm1, %ymm0
; AVX2OR512VL-NEXT: retq
@@ -167,14 +167,14 @@ define <8 x float> @shuffle_v8f32_00112233(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_00001111(<8 x float> %a, <8 x float> %b) {
; AVX1-LABEL: shuffle_v8f32_00001111:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[0,0,0,0]
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,1,1,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8f32_00001111:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,1,1]
; AVX2OR512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,1,1]
; AVX2OR512VL-NEXT: retq
@@ -184,7 +184,7 @@ define <8 x float> @shuffle_v8f32_00001111(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_81a3c5e7(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_81a3c5e7:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 8, i32 1, i32 10, i32 3, i32 12, i32 5, i32 14, i32 7>
@@ -193,14 +193,14 @@ define <8 x float> @shuffle_v8f32_81a3c5e7(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_08080808(<8 x float> %a, <8 x float> %b) {
; AVX1-LABEL: shuffle_v8f32_08080808:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0]
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,1,3]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8f32_08080808:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],zero,zero
; AVX2OR512VL-NEXT: vbroadcastsd %xmm0, %ymm0
; AVX2OR512VL-NEXT: retq
@@ -210,7 +210,7 @@ define <8 x float> @shuffle_v8f32_08080808(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_08084c4c(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_08084c4c:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,0],ymm1[0,0],ymm0[4,4],ymm1[4,4]
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
; ALL-NEXT: retq
@@ -220,7 +220,7 @@ define <8 x float> @shuffle_v8f32_08084c4c(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_8823cc67(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_8823cc67:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,0],ymm0[2,3],ymm1[4,4],ymm0[6,7]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 8, i32 8, i32 2, i32 3, i32 12, i32 12, i32 6, i32 7>
@@ -229,7 +229,7 @@ define <8 x float> @shuffle_v8f32_8823cc67(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_9832dc76(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_9832dc76:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vshufps {{.*#+}} ymm0 = ymm1[1,0],ymm0[3,2],ymm1[5,4],ymm0[7,6]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 9, i32 8, i32 3, i32 2, i32 13, i32 12, i32 7, i32 6>
@@ -238,7 +238,7 @@ define <8 x float> @shuffle_v8f32_9832dc76(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_9810dc54(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_9810dc54:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vshufps {{.*#+}} ymm0 = ymm1[1,0],ymm0[1,0],ymm1[5,4],ymm0[5,4]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 9, i32 8, i32 1, i32 0, i32 13, i32 12, i32 5, i32 4>
@@ -247,7 +247,7 @@ define <8 x float> @shuffle_v8f32_9810dc54(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_08194c5d(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_08194c5d:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
@@ -256,7 +256,7 @@ define <8 x float> @shuffle_v8f32_08194c5d(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_2a3b6e7f(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_2a3b6e7f:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
@@ -265,14 +265,14 @@ define <8 x float> @shuffle_v8f32_2a3b6e7f(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_08192a3b(<8 x float> %a, <8 x float> %b) {
; AVX1OR2-LABEL: shuffle_v8f32_08192a3b:
-; AVX1OR2: # BB#0:
+; AVX1OR2: # %bb.0:
; AVX1OR2-NEXT: vunpckhps {{.*#+}} xmm2 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX1OR2-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; AVX1OR2-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1OR2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v8f32_08192a3b:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovaps {{.*#+}} ymm2 = [0,8,1,9,2,10,3,11]
; AVX512VL-NEXT: vpermt2ps %ymm1, %ymm2, %ymm0
; AVX512VL-NEXT: retq
@@ -282,7 +282,7 @@ define <8 x float> @shuffle_v8f32_08192a3b(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_08991abb(<8 x float> %a, <8 x float> %b) {
; AVX1-LABEL: shuffle_v8f32_08991abb:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm0[0,0],xmm1[0,0]
; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[1,1]
; AVX1-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
@@ -291,7 +291,7 @@ define <8 x float> @shuffle_v8f32_08991abb(<8 x float> %a, <8 x float> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v8f32_08991abb:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovaps {{.*#+}} ymm2 = <u,0,1,1,u,2,3,3>
; AVX2-NEXT: vpermps %ymm1, %ymm2, %ymm1
; AVX2-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
@@ -300,7 +300,7 @@ define <8 x float> @shuffle_v8f32_08991abb(<8 x float> %a, <8 x float> %b) {
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v8f32_08991abb:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[0,1,1,3]
; AVX512VL-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,1,1,10,2,3,3]
; AVX512VL-NEXT: vpermi2ps %ymm2, %ymm1, %ymm0
@@ -311,7 +311,7 @@ define <8 x float> @shuffle_v8f32_08991abb(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_091b2d3f(<8 x float> %a, <8 x float> %b) {
; AVX1-LABEL: shuffle_v8f32_091b2d3f:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[0,1,1,3]
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,1,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
@@ -319,14 +319,14 @@ define <8 x float> @shuffle_v8f32_091b2d3f(<8 x float> %a, <8 x float> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v8f32_091b2d3f:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovaps {{.*#+}} ymm2 = <0,u,1,u,2,u,3,u>
; AVX2-NEXT: vpermps %ymm0, %ymm2, %ymm0
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v8f32_091b2d3f:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovaps {{.*#+}} ymm2 = [0,9,1,11,2,13,3,15]
; AVX512VL-NEXT: vpermt2ps %ymm1, %ymm2, %ymm0
; AVX512VL-NEXT: retq
@@ -336,14 +336,14 @@ define <8 x float> @shuffle_v8f32_091b2d3f(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_09ab1def(<8 x float> %a, <8 x float> %b) {
; AVX1-LABEL: shuffle_v8f32_09ab1def:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8f32_09ab1def:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
; AVX2OR512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,1,3]
; AVX2OR512VL-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
@@ -354,7 +354,7 @@ define <8 x float> @shuffle_v8f32_09ab1def(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_00014445(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_00014445:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,0,1,4,4,4,5]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 1, i32 4, i32 4, i32 4, i32 5>
@@ -363,7 +363,7 @@ define <8 x float> @shuffle_v8f32_00014445(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_00204464(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_00204464:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,2,0,4,4,6,4]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 0, i32 2, i32 0, i32 4, i32 4, i32 6, i32 4>
@@ -372,7 +372,7 @@ define <8 x float> @shuffle_v8f32_00204464(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_03004744(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_03004744:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,3,0,0,4,7,4,4]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 3, i32 0, i32 0, i32 4, i32 7, i32 4, i32 4>
@@ -381,7 +381,7 @@ define <8 x float> @shuffle_v8f32_03004744(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_10005444(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_10005444:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,0,0,0,5,4,4,4]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 1, i32 0, i32 0, i32 0, i32 5, i32 4, i32 4, i32 4>
@@ -390,7 +390,7 @@ define <8 x float> @shuffle_v8f32_10005444(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_22006644(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_22006644:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[2,2,0,0,6,6,4,4]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 2, i32 2, i32 0, i32 0, i32 6, i32 6, i32 4, i32 4>
@@ -399,7 +399,7 @@ define <8 x float> @shuffle_v8f32_22006644(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_33307774(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_33307774:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,3,3,0,7,7,7,4]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 3, i32 3, i32 3, i32 0, i32 7, i32 7, i32 7, i32 4>
@@ -408,7 +408,7 @@ define <8 x float> @shuffle_v8f32_33307774(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_32107654(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_32107654:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
@@ -417,7 +417,7 @@ define <8 x float> @shuffle_v8f32_32107654(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_00234467(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_00234467:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,2,3,4,4,6,7]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 0, i32 2, i32 3, i32 4, i32 4, i32 6, i32 7>
@@ -426,7 +426,7 @@ define <8 x float> @shuffle_v8f32_00234467(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_00224466(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_00224466:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
@@ -435,7 +435,7 @@ define <8 x float> @shuffle_v8f32_00224466(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_10325476(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_10325476:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,0,3,2,5,4,7,6]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
@@ -444,7 +444,7 @@ define <8 x float> @shuffle_v8f32_10325476(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_11335577(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_11335577:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
@@ -453,7 +453,7 @@ define <8 x float> @shuffle_v8f32_11335577(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_10235467(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_10235467:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,0,2,3,5,4,6,7]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 1, i32 0, i32 2, i32 3, i32 5, i32 4, i32 6, i32 7>
@@ -462,7 +462,7 @@ define <8 x float> @shuffle_v8f32_10235467(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_10225466(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_10225466:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,0,2,2,5,4,6,6]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 1, i32 0, i32 2, i32 2, i32 5, i32 4, i32 6, i32 6>
@@ -471,7 +471,7 @@ define <8 x float> @shuffle_v8f32_10225466(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_00015444(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_00015444:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,0,1,5,4,4,4]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 1, i32 5, i32 4, i32 4, i32 4>
@@ -480,7 +480,7 @@ define <8 x float> @shuffle_v8f32_00015444(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_00204644(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_00204644:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,2,0,4,6,4,4]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 0, i32 2, i32 0, i32 4, i32 6, i32 4, i32 4>
@@ -489,7 +489,7 @@ define <8 x float> @shuffle_v8f32_00204644(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_03004474(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_03004474:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,3,0,0,4,4,7,4]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 3, i32 0, i32 0, i32 4, i32 4, i32 7, i32 4>
@@ -498,7 +498,7 @@ define <8 x float> @shuffle_v8f32_03004474(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_10004444(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_10004444:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,0,0,0,4,4,4,4]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 1, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4>
@@ -507,7 +507,7 @@ define <8 x float> @shuffle_v8f32_10004444(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_22006446(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_22006446:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[2,2,0,0,6,4,4,6]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 2, i32 2, i32 0, i32 0, i32 6, i32 4, i32 4, i32 6>
@@ -516,7 +516,7 @@ define <8 x float> @shuffle_v8f32_22006446(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_33307474(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_33307474:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,3,3,0,7,4,7,4]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 3, i32 3, i32 3, i32 0, i32 7, i32 4, i32 7, i32 4>
@@ -525,7 +525,7 @@ define <8 x float> @shuffle_v8f32_33307474(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_32104567(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_32104567:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,4,5,6,7]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 4, i32 5, i32 6, i32 7>
@@ -534,7 +534,7 @@ define <8 x float> @shuffle_v8f32_32104567(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_00236744(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_00236744:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,2,3,6,7,4,4]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 0, i32 2, i32 3, i32 6, i32 7, i32 4, i32 4>
@@ -543,7 +543,7 @@ define <8 x float> @shuffle_v8f32_00236744(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_00226644(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_00226644:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,2,2,6,6,4,4]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 6, i32 6, i32 4, i32 4>
@@ -552,7 +552,7 @@ define <8 x float> @shuffle_v8f32_00226644(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_10324567(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_10324567:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,0,3,2,4,5,6,7]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 4, i32 5, i32 6, i32 7>
@@ -561,7 +561,7 @@ define <8 x float> @shuffle_v8f32_10324567(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_11334567(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_11334567:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,1,3,3,4,5,6,7]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -570,7 +570,7 @@ define <8 x float> @shuffle_v8f32_11334567(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_01235467(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_01235467:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,1,2,3,5,4,6,7]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 5, i32 4, i32 6, i32 7>
@@ -579,7 +579,7 @@ define <8 x float> @shuffle_v8f32_01235467(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_01235466(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_01235466:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,1,2,3,5,4,6,6]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 5, i32 4, i32 6, i32 6>
@@ -588,7 +588,7 @@ define <8 x float> @shuffle_v8f32_01235466(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_002u6u44(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_002u6u44:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,2,u,6,u,4,4]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 0, i32 2, i32 undef, i32 6, i32 undef, i32 4, i32 4>
@@ -597,7 +597,7 @@ define <8 x float> @shuffle_v8f32_002u6u44(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_00uu66uu(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_00uu66uu:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,u,u,6,6,u,u]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 0, i32 undef, i32 undef, i32 6, i32 6, i32 undef, i32 undef>
@@ -606,7 +606,7 @@ define <8 x float> @shuffle_v8f32_00uu66uu(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_103245uu(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_103245uu:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,0,3,2,4,5,u,u]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 4, i32 5, i32 undef, i32 undef>
@@ -615,7 +615,7 @@ define <8 x float> @shuffle_v8f32_103245uu(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_1133uu67(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_1133uu67:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,1,3,3,u,u,6,7]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 undef, i32 undef, i32 6, i32 7>
@@ -624,7 +624,7 @@ define <8 x float> @shuffle_v8f32_1133uu67(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_0uu354uu(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_0uu354uu:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,u,u,3,5,4,u,u]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 undef, i32 undef, i32 3, i32 5, i32 4, i32 undef, i32 undef>
@@ -633,7 +633,7 @@ define <8 x float> @shuffle_v8f32_0uu354uu(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_uuu3uu66(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_uuu3uu66:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[u,u,u,3,u,u,6,6]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 3, i32 undef, i32 undef, i32 6, i32 6>
@@ -642,7 +642,7 @@ define <8 x float> @shuffle_v8f32_uuu3uu66(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_c348cda0(<8 x float> %a, <8 x float> %b) {
; AVX1-LABEL: shuffle_v8f32_c348cda0:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3,0,1]
; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,3],ymm2[0,0],ymm0[4,7],ymm2[4,4]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm1[2,3,0,1]
@@ -652,7 +652,7 @@ define <8 x float> @shuffle_v8f32_c348cda0(<8 x float> %a, <8 x float> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v8f32_c348cda0:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovaps {{.*#+}} ymm2 = <4,u,u,0,4,5,2,u>
; AVX2-NEXT: vpermps %ymm1, %ymm2, %ymm1
; AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,3,2,0,4,7,6,4]
@@ -661,7 +661,7 @@ define <8 x float> @shuffle_v8f32_c348cda0(<8 x float> %a, <8 x float> %b) {
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v8f32_c348cda0:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovaps {{.*#+}} ymm2 = [4,11,12,0,4,5,2,8]
; AVX512VL-NEXT: vpermi2ps %ymm0, %ymm1, %ymm2
; AVX512VL-NEXT: vmovaps %ymm2, %ymm0
@@ -672,7 +672,7 @@ define <8 x float> @shuffle_v8f32_c348cda0(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_f511235a(<8 x float> %a, <8 x float> %b) {
; AVX1-LABEL: shuffle_v8f32_f511235a:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[3,1,2,2,7,5,6,6]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,0,1]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3,0,1]
@@ -682,7 +682,7 @@ define <8 x float> @shuffle_v8f32_f511235a(<8 x float> %a, <8 x float> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v8f32_f511235a:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[3,2,2,3,7,6,6,7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,0]
; AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,1,2,3,5,5,6,7]
@@ -691,7 +691,7 @@ define <8 x float> @shuffle_v8f32_f511235a(<8 x float> %a, <8 x float> %b) {
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v8f32_f511235a:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovaps {{.*#+}} ymm2 = [15,5,1,1,2,3,5,10]
; AVX512VL-NEXT: vpermt2ps %ymm1, %ymm2, %ymm0
; AVX512VL-NEXT: retq
@@ -701,13 +701,13 @@ define <8 x float> @shuffle_v8f32_f511235a(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_32103210(<8 x float> %a, <8 x float> %b) {
; AVX1-LABEL: shuffle_v8f32_32103210:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8f32_32103210:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0]
; AVX2OR512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,0,1]
; AVX2OR512VL-NEXT: retq
@@ -717,13 +717,13 @@ define <8 x float> @shuffle_v8f32_32103210(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_76547654(<8 x float> %a, <8 x float> %b) {
; AVX1-LABEL: shuffle_v8f32_76547654:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8f32_76547654:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; AVX2OR512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX2OR512VL-NEXT: retq
@@ -733,13 +733,13 @@ define <8 x float> @shuffle_v8f32_76547654(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_76543210(<8 x float> %a, <8 x float> %b) {
; AVX1-LABEL: shuffle_v8f32_76543210:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8f32_76543210:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; AVX2OR512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,3,0,1]
; AVX2OR512VL-NEXT: retq
@@ -749,7 +749,7 @@ define <8 x float> @shuffle_v8f32_76543210(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_3210ba98(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_3210ba98:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; ALL-NEXT: retq
@@ -759,7 +759,7 @@ define <8 x float> @shuffle_v8f32_3210ba98(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_3210fedc(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_3210fedc:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; ALL-NEXT: retq
@@ -769,7 +769,7 @@ define <8 x float> @shuffle_v8f32_3210fedc(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_7654fedc(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_7654fedc:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; ALL-NEXT: retq
@@ -779,7 +779,7 @@ define <8 x float> @shuffle_v8f32_7654fedc(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_fedc7654(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_fedc7654:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; ALL-NEXT: retq
@@ -789,7 +789,7 @@ define <8 x float> @shuffle_v8f32_fedc7654(<8 x float> %a, <8 x float> %b) {
define <8 x float> @PR21138(<8 x float> %truc, <8 x float> %tchose) {
; AVX1-LABEL: PR21138:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,3],xmm2[1,3]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
@@ -799,7 +799,7 @@ define <8 x float> @PR21138(<8 x float> %truc, <8 x float> %tchose) {
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: PR21138:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,3],ymm1[1,3],ymm0[5,7],ymm1[5,7]
; AVX2OR512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2OR512VL-NEXT: retq
@@ -809,7 +809,7 @@ define <8 x float> @PR21138(<8 x float> %truc, <8 x float> %tchose) {
define <8 x float> @shuffle_v8f32_ba987654(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_ba987654:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; ALL-NEXT: retq
@@ -819,7 +819,7 @@ define <8 x float> @shuffle_v8f32_ba987654(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_ba983210(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_ba983210:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; ALL-NEXT: retq
@@ -829,7 +829,7 @@ define <8 x float> @shuffle_v8f32_ba983210(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_80u1c4u5(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_80u1c4u5:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vunpcklps {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 8, i32 0, i32 undef, i32 1, i32 12, i32 4, i32 undef, i32 5>
@@ -838,7 +838,7 @@ define <8 x float> @shuffle_v8f32_80u1c4u5(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_a2u3e6f7(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_a2u3e6f7:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 10, i32 2, i32 undef, i32 3, i32 14, i32 6, i32 15, i32 7>
@@ -847,7 +847,7 @@ define <8 x float> @shuffle_v8f32_a2u3e6f7(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_uuuu1111(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_uuuu1111:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,1,1,1]
; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; ALL-NEXT: retq
@@ -857,13 +857,13 @@ define <8 x float> @shuffle_v8f32_uuuu1111(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_44444444(<8 x float> %a, <8 x float> %b) {
; AVX1-LABEL: shuffle_v8f32_44444444:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8f32_44444444:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX2OR512VL-NEXT: vbroadcastss %xmm0, %ymm0
; AVX2OR512VL-NEXT: retq
@@ -873,7 +873,7 @@ define <8 x float> @shuffle_v8f32_44444444(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_1188uuuu(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_1188uuuu:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[0,0]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 1, i32 1, i32 8, i32 8, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -882,7 +882,7 @@ define <8 x float> @shuffle_v8f32_1188uuuu(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_uuuu3210(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_uuuu3210:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0]
; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; ALL-NEXT: retq
@@ -892,7 +892,7 @@ define <8 x float> @shuffle_v8f32_uuuu3210(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_uuuu1188(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_uuuu1188:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[0,0]
; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; ALL-NEXT: retq
@@ -902,7 +902,7 @@ define <8 x float> @shuffle_v8f32_uuuu1188(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_1111uuuu(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_1111uuuu:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,1,1,1]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -911,7 +911,7 @@ define <8 x float> @shuffle_v8f32_1111uuuu(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_5555uuuu(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_5555uuuu:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vextractf128 $1, %ymm0, %xmm0
; ALL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,1,1,1]
; ALL-NEXT: retq
@@ -921,13 +921,13 @@ define <8 x float> @shuffle_v8f32_5555uuuu(<8 x float> %a, <8 x float> %b) {
define <8 x i32> @shuffle_v8i32_00000000(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_00000000:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_00000000:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vbroadcastss %xmm0, %ymm0
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
@@ -936,14 +936,14 @@ define <8 x i32> @shuffle_v8i32_00000000(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_00000010(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_00000010:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[0,0,0,0]
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,1,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_00000010:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,1,0]
; AVX2OR512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,0,1]
; AVX2OR512VL-NEXT: retq
@@ -953,14 +953,14 @@ define <8 x i32> @shuffle_v8i32_00000010(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_00000200(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_00000200:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[0,0,0,0]
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_00000200:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,2]
; AVX2OR512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,1,0]
; AVX2OR512VL-NEXT: retq
@@ -970,14 +970,14 @@ define <8 x i32> @shuffle_v8i32_00000200(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_00003000(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_00003000:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[0,0,0,0]
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,0,0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_00003000:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,3,0]
; AVX2OR512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,1,0]
; AVX2OR512VL-NEXT: retq
@@ -987,7 +987,7 @@ define <8 x i32> @shuffle_v8i32_00003000(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_00040000(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_00040000:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[0,0,0,3]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
@@ -995,7 +995,7 @@ define <8 x i32> @shuffle_v8i32_00040000(<8 x i32> %a, <8 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_00040000:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vmovaps {{.*#+}} ymm1 = [0,0,0,4,0,0,0,0]
; AVX2OR512VL-NEXT: vpermps %ymm0, %ymm1, %ymm0
; AVX2OR512VL-NEXT: retq
@@ -1005,14 +1005,14 @@ define <8 x i32> @shuffle_v8i32_00040000(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_00500000(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_00500000:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,1,0,4,4,4,4]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_00500000:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vmovaps {{.*#+}} ymm1 = [0,0,5,0,0,0,0,0]
; AVX2OR512VL-NEXT: vpermps %ymm0, %ymm1, %ymm0
; AVX2OR512VL-NEXT: retq
@@ -1022,14 +1022,14 @@ define <8 x i32> @shuffle_v8i32_00500000(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_06000000(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_06000000:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,0,0,4,4,4,4]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_06000000:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vmovaps {{.*#+}} ymm1 = [0,6,0,0,0,0,0,0]
; AVX2OR512VL-NEXT: vpermps %ymm0, %ymm1, %ymm0
; AVX2OR512VL-NEXT: retq
@@ -1039,14 +1039,14 @@ define <8 x i32> @shuffle_v8i32_06000000(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_70000000(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_70000000:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,0,0,0,4,4,4,4]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_70000000:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: movl $7, %eax
; AVX2OR512VL-NEXT: vmovd %eax, %xmm1
; AVX2OR512VL-NEXT: vpermd %ymm0, %ymm1, %ymm0
@@ -1057,12 +1057,12 @@ define <8 x i32> @shuffle_v8i32_70000000(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_01014545(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_01014545:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_01014545:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,1,0,1,4,5,4,5]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 4, i32 5, i32 4, i32 5>
@@ -1071,14 +1071,14 @@ define <8 x i32> @shuffle_v8i32_01014545(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_00112233(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_00112233:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[0,0,1,1]
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_00112233:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vmovaps {{.*#+}} ymm1 = [0,0,1,1,2,2,3,3]
; AVX2OR512VL-NEXT: vpermps %ymm0, %ymm1, %ymm0
; AVX2OR512VL-NEXT: retq
@@ -1088,14 +1088,14 @@ define <8 x i32> @shuffle_v8i32_00112233(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_00001111(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_00001111:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[0,0,0,0]
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,1,1,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_00001111:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,1,1]
; AVX2OR512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,1,1]
; AVX2OR512VL-NEXT: retq
@@ -1105,7 +1105,7 @@ define <8 x i32> @shuffle_v8i32_00001111(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_81a3c5e7(<8 x i32> %a, <8 x i32> %b) {
; ALL-LABEL: shuffle_v8i32_81a3c5e7:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 8, i32 1, i32 10, i32 3, i32 12, i32 5, i32 14, i32 7>
@@ -1114,14 +1114,14 @@ define <8 x i32> @shuffle_v8i32_81a3c5e7(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_08080808(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_08080808:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0]
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,1,3]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_08080808:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; AVX2OR512VL-NEXT: vbroadcastsd %xmm0, %ymm0
; AVX2OR512VL-NEXT: retq
@@ -1131,13 +1131,13 @@ define <8 x i32> @shuffle_v8i32_08080808(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_08084c4c(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_08084c4c:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,0],ymm1[0,0],ymm0[4,4],ymm1[4,4]
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_08084c4c:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[0,0,2,0,4,4,6,4]
; AVX2OR512VL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,1,0,1,4,5,4,5]
; AVX2OR512VL-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
@@ -1148,7 +1148,7 @@ define <8 x i32> @shuffle_v8i32_08084c4c(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_8823cc67(<8 x i32> %a, <8 x i32> %b) {
; ALL-LABEL: shuffle_v8i32_8823cc67:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,0],ymm0[2,3],ymm1[4,4],ymm0[6,7]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 8, i32 8, i32 2, i32 3, i32 12, i32 12, i32 6, i32 7>
@@ -1157,7 +1157,7 @@ define <8 x i32> @shuffle_v8i32_8823cc67(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_9832dc76(<8 x i32> %a, <8 x i32> %b) {
; ALL-LABEL: shuffle_v8i32_9832dc76:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vshufps {{.*#+}} ymm0 = ymm1[1,0],ymm0[3,2],ymm1[5,4],ymm0[7,6]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 9, i32 8, i32 3, i32 2, i32 13, i32 12, i32 7, i32 6>
@@ -1166,7 +1166,7 @@ define <8 x i32> @shuffle_v8i32_9832dc76(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_9810dc54(<8 x i32> %a, <8 x i32> %b) {
; ALL-LABEL: shuffle_v8i32_9810dc54:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vshufps {{.*#+}} ymm0 = ymm1[1,0],ymm0[1,0],ymm1[5,4],ymm0[5,4]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 9, i32 8, i32 1, i32 0, i32 13, i32 12, i32 5, i32 4>
@@ -1175,7 +1175,7 @@ define <8 x i32> @shuffle_v8i32_9810dc54(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_08194c5d(<8 x i32> %a, <8 x i32> %b) {
; ALL-LABEL: shuffle_v8i32_08194c5d:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
@@ -1184,7 +1184,7 @@ define <8 x i32> @shuffle_v8i32_08194c5d(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_2a3b6e7f(<8 x i32> %a, <8 x i32> %b) {
; ALL-LABEL: shuffle_v8i32_2a3b6e7f:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
@@ -1193,14 +1193,14 @@ define <8 x i32> @shuffle_v8i32_2a3b6e7f(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_08192a3b(<8 x i32> %a, <8 x i32> %b) {
; AVX1OR2-LABEL: shuffle_v8i32_08192a3b:
-; AVX1OR2: # BB#0:
+; AVX1OR2: # %bb.0:
; AVX1OR2-NEXT: vunpckhps {{.*#+}} xmm2 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX1OR2-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; AVX1OR2-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1OR2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v8i32_08192a3b:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm0 = [0,8,2,9,4,10,6,11]
; AVX512VL-NEXT: vpermi2d %ymm1, %ymm2, %ymm0
@@ -1211,7 +1211,7 @@ define <8 x i32> @shuffle_v8i32_08192a3b(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_08991abb(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_08991abb:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm0[0,0],xmm1[0,0]
; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[1,1]
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
@@ -1220,7 +1220,7 @@ define <8 x i32> @shuffle_v8i32_08991abb(<8 x i32> %a, <8 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v8i32_08991abb:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = <u,0,1,1,u,2,3,3>
; AVX2-NEXT: vpermd %ymm1, %ymm2, %ymm1
; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
@@ -1229,7 +1229,7 @@ define <8 x i32> @shuffle_v8i32_08991abb(<8 x i32> %a, <8 x i32> %b) {
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v8i32_08991abb:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm0 = [8,0,1,1,10,2,3,3]
; AVX512VL-NEXT: vpermi2d %ymm2, %ymm1, %ymm0
@@ -1240,7 +1240,7 @@ define <8 x i32> @shuffle_v8i32_08991abb(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_091b2d3f(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_091b2d3f:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[0,1,1,3]
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,1,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
@@ -1248,7 +1248,7 @@ define <8 x i32> @shuffle_v8i32_091b2d3f(<8 x i32> %a, <8 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_091b2d3f:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX2OR512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
; AVX2OR512VL-NEXT: retq
@@ -1258,14 +1258,14 @@ define <8 x i32> @shuffle_v8i32_091b2d3f(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_09ab1def(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_09ab1def:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_09ab1def:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,1,3]
; AVX2OR512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
@@ -1276,7 +1276,7 @@ define <8 x i32> @shuffle_v8i32_09ab1def(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_00014445(<8 x i32> %a, <8 x i32> %b) {
; ALL-LABEL: shuffle_v8i32_00014445:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,0,1,4,4,4,5]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 1, i32 4, i32 4, i32 4, i32 5>
@@ -1285,7 +1285,7 @@ define <8 x i32> @shuffle_v8i32_00014445(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_00204464(<8 x i32> %a, <8 x i32> %b) {
; ALL-LABEL: shuffle_v8i32_00204464:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,2,0,4,4,6,4]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 0, i32 0, i32 2, i32 0, i32 4, i32 4, i32 6, i32 4>
@@ -1294,7 +1294,7 @@ define <8 x i32> @shuffle_v8i32_00204464(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_03004744(<8 x i32> %a, <8 x i32> %b) {
; ALL-LABEL: shuffle_v8i32_03004744:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,3,0,0,4,7,4,4]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 0, i32 3, i32 0, i32 0, i32 4, i32 7, i32 4, i32 4>
@@ -1303,7 +1303,7 @@ define <8 x i32> @shuffle_v8i32_03004744(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_10005444(<8 x i32> %a, <8 x i32> %b) {
; ALL-LABEL: shuffle_v8i32_10005444:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,0,0,0,5,4,4,4]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 1, i32 0, i32 0, i32 0, i32 5, i32 4, i32 4, i32 4>
@@ -1312,7 +1312,7 @@ define <8 x i32> @shuffle_v8i32_10005444(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_22006644(<8 x i32> %a, <8 x i32> %b) {
; ALL-LABEL: shuffle_v8i32_22006644:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[2,2,0,0,6,6,4,4]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 2, i32 2, i32 0, i32 0, i32 6, i32 6, i32 4, i32 4>
@@ -1321,7 +1321,7 @@ define <8 x i32> @shuffle_v8i32_22006644(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_33307774(<8 x i32> %a, <8 x i32> %b) {
; ALL-LABEL: shuffle_v8i32_33307774:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,3,3,0,7,7,7,4]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 3, i32 3, i32 3, i32 0, i32 7, i32 7, i32 7, i32 4>
@@ -1330,7 +1330,7 @@ define <8 x i32> @shuffle_v8i32_33307774(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_32107654(<8 x i32> %a, <8 x i32> %b) {
; ALL-LABEL: shuffle_v8i32_32107654:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
@@ -1339,7 +1339,7 @@ define <8 x i32> @shuffle_v8i32_32107654(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_00234467(<8 x i32> %a, <8 x i32> %b) {
; ALL-LABEL: shuffle_v8i32_00234467:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,2,3,4,4,6,7]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 0, i32 0, i32 2, i32 3, i32 4, i32 4, i32 6, i32 7>
@@ -1348,12 +1348,12 @@ define <8 x i32> @shuffle_v8i32_00234467(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_00224466(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_00224466:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_00224466:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
@@ -1362,7 +1362,7 @@ define <8 x i32> @shuffle_v8i32_00224466(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_10325476(<8 x i32> %a, <8 x i32> %b) {
; ALL-LABEL: shuffle_v8i32_10325476:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,0,3,2,5,4,7,6]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
@@ -1371,12 +1371,12 @@ define <8 x i32> @shuffle_v8i32_10325476(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_11335577(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_11335577:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_11335577:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
@@ -1385,7 +1385,7 @@ define <8 x i32> @shuffle_v8i32_11335577(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_10235467(<8 x i32> %a, <8 x i32> %b) {
; ALL-LABEL: shuffle_v8i32_10235467:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,0,2,3,5,4,6,7]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 1, i32 0, i32 2, i32 3, i32 5, i32 4, i32 6, i32 7>
@@ -1394,7 +1394,7 @@ define <8 x i32> @shuffle_v8i32_10235467(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_10225466(<8 x i32> %a, <8 x i32> %b) {
; ALL-LABEL: shuffle_v8i32_10225466:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,0,2,2,5,4,6,6]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 1, i32 0, i32 2, i32 2, i32 5, i32 4, i32 6, i32 6>
@@ -1403,12 +1403,12 @@ define <8 x i32> @shuffle_v8i32_10225466(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_00015444(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_00015444:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,0,1,5,4,4,4]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_00015444:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vmovaps {{.*#+}} ymm1 = [0,0,0,1,5,4,4,4]
; AVX2OR512VL-NEXT: vpermps %ymm0, %ymm1, %ymm0
; AVX2OR512VL-NEXT: retq
@@ -1418,12 +1418,12 @@ define <8 x i32> @shuffle_v8i32_00015444(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_00204644(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_00204644:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,2,0,4,6,4,4]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_00204644:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vmovaps {{.*#+}} ymm1 = [0,0,2,0,4,6,4,4]
; AVX2OR512VL-NEXT: vpermps %ymm0, %ymm1, %ymm0
; AVX2OR512VL-NEXT: retq
@@ -1433,12 +1433,12 @@ define <8 x i32> @shuffle_v8i32_00204644(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_03004474(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_03004474:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,3,0,0,4,4,7,4]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_03004474:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vmovaps {{.*#+}} ymm1 = [0,3,0,0,4,4,7,4]
; AVX2OR512VL-NEXT: vpermps %ymm0, %ymm1, %ymm0
; AVX2OR512VL-NEXT: retq
@@ -1448,12 +1448,12 @@ define <8 x i32> @shuffle_v8i32_03004474(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_10004444(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_10004444:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,0,0,0,4,4,4,4]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_10004444:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vmovaps {{.*#+}} ymm1 = [1,0,0,0,4,4,4,4]
; AVX2OR512VL-NEXT: vpermps %ymm0, %ymm1, %ymm0
; AVX2OR512VL-NEXT: retq
@@ -1463,12 +1463,12 @@ define <8 x i32> @shuffle_v8i32_10004444(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_22006446(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_22006446:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[2,2,0,0,6,4,4,6]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_22006446:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vmovaps {{.*#+}} ymm1 = [2,2,0,0,6,4,4,6]
; AVX2OR512VL-NEXT: vpermps %ymm0, %ymm1, %ymm0
; AVX2OR512VL-NEXT: retq
@@ -1478,12 +1478,12 @@ define <8 x i32> @shuffle_v8i32_22006446(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_33307474(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_33307474:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,3,3,0,7,4,7,4]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_33307474:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vmovaps {{.*#+}} ymm1 = [3,3,3,0,7,4,7,4]
; AVX2OR512VL-NEXT: vpermps %ymm0, %ymm1, %ymm0
; AVX2OR512VL-NEXT: retq
@@ -1493,12 +1493,12 @@ define <8 x i32> @shuffle_v8i32_33307474(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_32104567(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_32104567:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_32104567:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vmovaps {{.*#+}} ymm1 = [3,2,1,0,4,5,6,7]
; AVX2OR512VL-NEXT: vpermps %ymm0, %ymm1, %ymm0
; AVX2OR512VL-NEXT: retq
@@ -1508,12 +1508,12 @@ define <8 x i32> @shuffle_v8i32_32104567(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_00236744(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_00236744:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,2,3,6,7,4,4]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_00236744:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vmovaps {{.*#+}} ymm1 = [0,0,2,3,6,7,4,4]
; AVX2OR512VL-NEXT: vpermps %ymm0, %ymm1, %ymm0
; AVX2OR512VL-NEXT: retq
@@ -1523,12 +1523,12 @@ define <8 x i32> @shuffle_v8i32_00236744(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_00226644(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_00226644:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,2,2,6,6,4,4]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_00226644:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vmovaps {{.*#+}} ymm1 = [0,0,2,2,6,6,4,4]
; AVX2OR512VL-NEXT: vpermps %ymm0, %ymm1, %ymm0
; AVX2OR512VL-NEXT: retq
@@ -1538,12 +1538,12 @@ define <8 x i32> @shuffle_v8i32_00226644(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_10324567(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_10324567:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,0,3,2,4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_10324567:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vmovaps {{.*#+}} ymm1 = [1,0,3,2,4,5,6,7]
; AVX2OR512VL-NEXT: vpermps %ymm0, %ymm1, %ymm0
; AVX2OR512VL-NEXT: retq
@@ -1553,12 +1553,12 @@ define <8 x i32> @shuffle_v8i32_10324567(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_11334567(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_11334567:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,1,3,3,4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_11334567:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vmovaps {{.*#+}} ymm1 = [1,1,3,3,4,5,6,7]
; AVX2OR512VL-NEXT: vpermps %ymm0, %ymm1, %ymm0
; AVX2OR512VL-NEXT: retq
@@ -1568,12 +1568,12 @@ define <8 x i32> @shuffle_v8i32_11334567(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_01235467(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_01235467:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,1,2,3,5,4,6,7]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_01235467:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vmovaps {{.*#+}} ymm1 = [0,1,2,3,5,4,6,7]
; AVX2OR512VL-NEXT: vpermps %ymm0, %ymm1, %ymm0
; AVX2OR512VL-NEXT: retq
@@ -1583,12 +1583,12 @@ define <8 x i32> @shuffle_v8i32_01235467(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_01235466(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_01235466:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,1,2,3,5,4,6,6]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_01235466:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vmovaps {{.*#+}} ymm1 = [0,1,2,3,5,4,6,6]
; AVX2OR512VL-NEXT: vpermps %ymm0, %ymm1, %ymm0
; AVX2OR512VL-NEXT: retq
@@ -1598,12 +1598,12 @@ define <8 x i32> @shuffle_v8i32_01235466(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_002u6u44(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_002u6u44:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,2,u,6,u,4,4]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_002u6u44:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vmovaps {{.*#+}} ymm1 = <0,0,2,u,6,u,4,4>
; AVX2OR512VL-NEXT: vpermps %ymm0, %ymm1, %ymm0
; AVX2OR512VL-NEXT: retq
@@ -1613,12 +1613,12 @@ define <8 x i32> @shuffle_v8i32_002u6u44(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_00uu66uu(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_00uu66uu:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,u,u,6,6,u,u]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_00uu66uu:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vmovaps {{.*#+}} ymm1 = <0,0,u,u,6,6,u,u>
; AVX2OR512VL-NEXT: vpermps %ymm0, %ymm1, %ymm0
; AVX2OR512VL-NEXT: retq
@@ -1628,12 +1628,12 @@ define <8 x i32> @shuffle_v8i32_00uu66uu(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_103245uu(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_103245uu:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,0,3,2,4,5,u,u]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_103245uu:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vmovaps {{.*#+}} ymm1 = <1,0,3,2,4,5,u,u>
; AVX2OR512VL-NEXT: vpermps %ymm0, %ymm1, %ymm0
; AVX2OR512VL-NEXT: retq
@@ -1643,12 +1643,12 @@ define <8 x i32> @shuffle_v8i32_103245uu(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_1133uu67(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_1133uu67:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,1,3,3,u,u,6,7]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_1133uu67:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vmovaps {{.*#+}} ymm1 = <1,1,3,3,u,u,6,7>
; AVX2OR512VL-NEXT: vpermps %ymm0, %ymm1, %ymm0
; AVX2OR512VL-NEXT: retq
@@ -1658,12 +1658,12 @@ define <8 x i32> @shuffle_v8i32_1133uu67(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_0uu354uu(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_0uu354uu:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,u,u,3,5,4,u,u]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_0uu354uu:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vmovaps {{.*#+}} ymm1 = <0,u,u,3,5,4,u,u>
; AVX2OR512VL-NEXT: vpermps %ymm0, %ymm1, %ymm0
; AVX2OR512VL-NEXT: retq
@@ -1673,12 +1673,12 @@ define <8 x i32> @shuffle_v8i32_0uu354uu(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_uuu3uu66(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_uuu3uu66:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[u,u,u,3,u,u,6,6]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_uuu3uu66:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vmovaps {{.*#+}} ymm1 = <u,u,u,3,u,u,6,6>
; AVX2OR512VL-NEXT: vpermps %ymm0, %ymm1, %ymm0
; AVX2OR512VL-NEXT: retq
@@ -1688,7 +1688,7 @@ define <8 x i32> @shuffle_v8i32_uuu3uu66(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_6caa87e5(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_6caa87e5:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm1[2,3,0,1]
@@ -1697,7 +1697,7 @@ define <8 x i32> @shuffle_v8i32_6caa87e5(<8 x i32> %a, <8 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v8i32_6caa87e5:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,1,3,2]
; AVX2-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[0,0,2,2,4,4,6,6]
; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,0,3]
@@ -1705,7 +1705,7 @@ define <8 x i32> @shuffle_v8i32_6caa87e5(<8 x i32> %a, <8 x i32> %b) {
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v8i32_6caa87e5:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [14,4,2,2,0,15,6,13]
; AVX512VL-NEXT: vpermi2d %ymm0, %ymm1, %ymm2
; AVX512VL-NEXT: vmovdqa %ymm2, %ymm0
@@ -1716,13 +1716,13 @@ define <8 x i32> @shuffle_v8i32_6caa87e5(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_32103210(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_32103210:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_32103210:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0]
; AVX2OR512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,0,1]
; AVX2OR512VL-NEXT: retq
@@ -1732,13 +1732,13 @@ define <8 x i32> @shuffle_v8i32_32103210(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_76547654(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_76547654:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_76547654:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; AVX2OR512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX2OR512VL-NEXT: retq
@@ -1748,13 +1748,13 @@ define <8 x i32> @shuffle_v8i32_76547654(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_76543210(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_76543210:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_76543210:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; AVX2OR512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,3,0,1]
; AVX2OR512VL-NEXT: retq
@@ -1764,7 +1764,7 @@ define <8 x i32> @shuffle_v8i32_76543210(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_3210ba98(<8 x i32> %a, <8 x i32> %b) {
; ALL-LABEL: shuffle_v8i32_3210ba98:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; ALL-NEXT: retq
@@ -1774,13 +1774,13 @@ define <8 x i32> @shuffle_v8i32_3210ba98(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_3210fedc(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_3210fedc:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_3210fedc:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2OR512VL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; AVX2OR512VL-NEXT: retq
@@ -1790,13 +1790,13 @@ define <8 x i32> @shuffle_v8i32_3210fedc(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_7654fedc(<8 x i32> %a, <8 x i32> %b) {
; AVX1OR2-LABEL: shuffle_v8i32_7654fedc:
-; AVX1OR2: # BB#0:
+; AVX1OR2: # %bb.0:
; AVX1OR2-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
; AVX1OR2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; AVX1OR2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v8i32_7654fedc:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
; AVX512VL-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; AVX512VL-NEXT: retq
@@ -1806,13 +1806,13 @@ define <8 x i32> @shuffle_v8i32_7654fedc(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_fedc7654(<8 x i32> %a, <8 x i32> %b) {
; AVX1OR2-LABEL: shuffle_v8i32_fedc7654:
-; AVX1OR2: # BB#0:
+; AVX1OR2: # %bb.0:
; AVX1OR2-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
; AVX1OR2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; AVX1OR2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v8i32_fedc7654:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
; AVX512VL-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; AVX512VL-NEXT: retq
@@ -1822,13 +1822,13 @@ define <8 x i32> @shuffle_v8i32_fedc7654(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_ba987654(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_ba987654:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_ba987654:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX2OR512VL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; AVX2OR512VL-NEXT: retq
@@ -1838,13 +1838,13 @@ define <8 x i32> @shuffle_v8i32_ba987654(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_ba983210(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_ba983210:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_ba983210:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX2OR512VL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; AVX2OR512VL-NEXT: retq
@@ -1854,13 +1854,13 @@ define <8 x i32> @shuffle_v8i32_ba983210(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_zuu8zuuc(<8 x i32> %a) {
; AVX1-LABEL: shuffle_v8i32_zuu8zuuc:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_zuu8zuuc:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpslldq {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[0,1,2,3],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,18,19]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <8 x i32> zeroinitializer, <8 x i32> %a, <8 x i32> <i32 0, i32 undef, i32 undef, i32 8, i32 0, i32 undef, i32 undef, i32 12>
@@ -1869,14 +1869,14 @@ define <8 x i32> @shuffle_v8i32_zuu8zuuc(<8 x i32> %a) {
define <8 x i32> @shuffle_v8i32_9ubzdefz(<8 x i32> %a) {
; AVX1-LABEL: shuffle_v8i32_9ubzdefz:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vshufps {{.*#+}} ymm1 = ymm1[3,0],ymm0[3,0],ymm1[7,4],ymm0[7,4]
; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,2],ymm1[2,0],ymm0[5,6],ymm1[6,4]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_9ubzdefz:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,ymm0[20,21,22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <8 x i32> zeroinitializer, <8 x i32> %a, <8 x i32> <i32 9, i32 undef, i32 11, i32 0, i32 13, i32 14, i32 15, i32 0>
@@ -1885,7 +1885,7 @@ define <8 x i32> @shuffle_v8i32_9ubzdefz(<8 x i32> %a) {
define <8 x i32> @shuffle_v8i32_80u1b4uu(<8 x i32> %a, <8 x i32> %b) {
; ALL-LABEL: shuffle_v8i32_80u1b4uu:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vunpcklps {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 8, i32 0, i32 undef, i32 1, i32 12, i32 4, i32 undef, i32 undef>
@@ -1894,7 +1894,7 @@ define <8 x i32> @shuffle_v8i32_80u1b4uu(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_uuuu1111(<8 x i32> %a, <8 x i32> %b) {
; ALL-LABEL: shuffle_v8i32_uuuu1111:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,1,1,1]
; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; ALL-NEXT: retq
@@ -1904,7 +1904,7 @@ define <8 x i32> @shuffle_v8i32_uuuu1111(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_2222uuuu(<8 x i32> %a, <8 x i32> %b) {
; ALL-LABEL: shuffle_v8i32_2222uuuu:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,2,2,2]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 2, i32 2, i32 2, i32 2, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -1913,7 +1913,7 @@ define <8 x i32> @shuffle_v8i32_2222uuuu(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_2A3Buuuu(<8 x i32> %a, <8 x i32> %b) {
; ALL-LABEL: shuffle_v8i32_2A3Buuuu:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -1922,13 +1922,13 @@ define <8 x i32> @shuffle_v8i32_2A3Buuuu(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_44444444(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_44444444:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_44444444:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX2OR512VL-NEXT: vbroadcastss %xmm0, %ymm0
; AVX2OR512VL-NEXT: retq
@@ -1938,13 +1938,13 @@ define <8 x i32> @shuffle_v8i32_44444444(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_44444444_bc(<8 x float> %a, <8 x float> %b) {
; AVX1-LABEL: shuffle_v8i32_44444444_bc:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_44444444_bc:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX2OR512VL-NEXT: vbroadcastss %xmm0, %ymm0
; AVX2OR512VL-NEXT: retq
@@ -1956,7 +1956,7 @@ define <8 x i32> @shuffle_v8i32_44444444_bc(<8 x float> %a, <8 x float> %b) {
define <8 x i32> @shuffle_v8i32_5555uuuu(<8 x i32> %a, <8 x i32> %b) {
; ALL-LABEL: shuffle_v8i32_5555uuuu:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vextractf128 $1, %ymm0, %xmm0
; ALL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,1,1,1]
; ALL-NEXT: retq
@@ -1967,12 +1967,12 @@ define <8 x i32> @shuffle_v8i32_5555uuuu(<8 x i32> %a, <8 x i32> %b) {
; PR32453
define <8 x i32> @shuffle_v8i32_uuuuuu7u(<8 x i32> %a, <8 x i32> %b) nounwind {
; AVX1-LABEL: shuffle_v8i32_uuuuuu7u:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_uuuuuu7u:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,1,3,3,4,5,7,7]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 7, i32 undef>
@@ -1981,7 +1981,7 @@ define <8 x i32> @shuffle_v8i32_uuuuuu7u(<8 x i32> %a, <8 x i32> %b) nounwind {
define <8 x float> @splat_mem_v8f32_2(float* %p) {
; ALL-LABEL: splat_mem_v8f32_2:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vbroadcastss (%rdi), %ymm0
; ALL-NEXT: retq
%1 = load float, float* %p
@@ -1992,13 +1992,13 @@ define <8 x float> @splat_mem_v8f32_2(float* %p) {
define <8 x float> @splat_v8f32(<4 x float> %r) {
; AVX1-LABEL: splat_v8f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: splat_v8f32:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vbroadcastss %xmm0, %ymm0
; AVX2OR512VL-NEXT: retq
%1 = shufflevector <4 x float> %r, <4 x float> undef, <8 x i32> zeroinitializer
@@ -2011,14 +2011,14 @@ define <8 x float> @splat_v8f32(<4 x float> %r) {
define <8 x i32> @shuffle_v8i32_z0U2zUz6(<8 x i32> %a) {
; AVX1-LABEL: shuffle_v8i32_z0U2zUz6:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6]
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[2,0,3,1,6,4,7,5]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_z0U2zUz6:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpsllq $32, %ymm0, %ymm0
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> zeroinitializer, <8 x i32> <i32 8, i32 0, i32 undef, i32 2, i32 8, i32 undef, i32 8, i32 6>
@@ -2027,14 +2027,14 @@ define <8 x i32> @shuffle_v8i32_z0U2zUz6(<8 x i32> %a) {
define <8 x i32> @shuffle_v8i32_1U3z5zUU(<8 x i32> %a) {
; AVX1-LABEL: shuffle_v8i32_1U3z5zUU:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,3],ymm1[1,3],ymm0[5,7],ymm1[5,7]
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_1U3z5zUU:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpsrlq $32, %ymm0, %ymm0
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> zeroinitializer, <8 x i32> <i32 1, i32 undef, i32 3, i32 8, i32 5, i32 8, i32 undef, i32 undef>
@@ -2043,13 +2043,13 @@ define <8 x i32> @shuffle_v8i32_1U3z5zUU(<8 x i32> %a) {
define <8 x i32> @shuffle_v8i32_B012F456(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_B012F456:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vshufps {{.*#+}} ymm1 = ymm1[3,0],ymm0[0,0],ymm1[7,4],ymm0[4,4]
; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,2],ymm0[1,2],ymm1[4,6],ymm0[5,6]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_B012F456:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[12,13,14,15],ymm0[0,1,2,3,4,5,6,7,8,9,10,11],ymm1[28,29,30,31],ymm0[16,17,18,19,20,21,22,23,24,25,26,27]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 11, i32 0, i32 1, i32 2, i32 15, i32 4, i32 5, i32 6>
@@ -2058,13 +2058,13 @@ define <8 x i32> @shuffle_v8i32_B012F456(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_1238567C(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_1238567C:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,0],ymm0[3,0],ymm1[4,4],ymm0[7,4]
; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,2],ymm1[2,0],ymm0[5,6],ymm1[6,4]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_1238567C:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[4,5,6,7,8,9,10,11,12,13,14,15],ymm1[0,1,2,3],ymm0[20,21,22,23,24,25,26,27,28,29,30,31],ymm1[16,17,18,19]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 1, i32 2, i32 3, i32 8, i32 5, i32 6, i32 7, i32 12>
@@ -2073,13 +2073,13 @@ define <8 x i32> @shuffle_v8i32_1238567C(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_9AB0DEF4(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_9AB0DEF4:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,0],ymm1[3,0],ymm0[4,4],ymm1[7,4]
; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm1[1,2],ymm0[2,0],ymm1[5,6],ymm0[6,4]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_9AB0DEF4:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0,1,2,3],ymm1[20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16,17,18,19]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 9, i32 10, i32 11, i32 0, i32 13, i32 14, i32 15, i32 4>
@@ -2088,13 +2088,13 @@ define <8 x i32> @shuffle_v8i32_9AB0DEF4(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_389A7CDE(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_389A7CDE:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[3,0],ymm1[0,0],ymm0[7,4],ymm1[4,4]
; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm1[1,2],ymm0[4,6],ymm1[5,6]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v8i32_389A7CDE:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[12,13,14,15],ymm1[0,1,2,3,4,5,6,7,8,9,10,11],ymm0[28,29,30,31],ymm1[16,17,18,19,20,21,22,23,24,25,26,27]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 3, i32 8, i32 9, i32 10, i32 7, i32 12, i32 13, i32 14>
@@ -2103,7 +2103,7 @@ define <8 x i32> @shuffle_v8i32_389A7CDE(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_30127456(<8 x i32> %a, <8 x i32> %b) {
; ALL-LABEL: shuffle_v8i32_30127456:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,0,1,2,7,4,5,6]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 3, i32 0, i32 1, i32 2, i32 7, i32 4, i32 5, i32 6>
@@ -2112,7 +2112,7 @@ define <8 x i32> @shuffle_v8i32_30127456(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_12305674(<8 x i32> %a, <8 x i32> %b) {
; ALL-LABEL: shuffle_v8i32_12305674:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,2,3,0,5,6,7,4]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 1, i32 2, i32 3, i32 0, i32 5, i32 6, i32 7, i32 4>
@@ -2121,7 +2121,7 @@ define <8 x i32> @shuffle_v8i32_12305674(<8 x i32> %a, <8 x i32> %b) {
define <8x float> @concat_v2f32_1(<2 x float>* %tmp64, <2 x float>* %tmp65) {
; ALL-LABEL: concat_v2f32_1:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; ALL-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; ALL-NEXT: retq
@@ -2136,7 +2136,7 @@ entry:
define <8x float> @concat_v2f32_2(<2 x float>* %tmp64, <2 x float>* %tmp65) {
; ALL-LABEL: concat_v2f32_2:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; ALL-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; ALL-NEXT: retq
@@ -2149,7 +2149,7 @@ entry:
define <8x float> @concat_v2f32_3(<2 x float>* %tmp64, <2 x float>* %tmp65) {
; ALL-LABEL: concat_v2f32_3:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; ALL-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; ALL-NEXT: retq
@@ -2163,7 +2163,7 @@ entry:
define <8 x i32> @insert_mem_and_zero_v8i32(i32* %ptr) {
; ALL-LABEL: insert_mem_and_zero_v8i32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; ALL-NEXT: retq
%a = load i32, i32* %ptr
@@ -2174,12 +2174,12 @@ define <8 x i32> @insert_mem_and_zero_v8i32(i32* %ptr) {
define <8 x i32> @concat_v8i32_0123CDEF(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: concat_v8i32_0123CDEF:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: concat_v8i32_0123CDEF:
-; AVX2OR512VL: # BB#0:
+; AVX2OR512VL: # %bb.0:
; AVX2OR512VL-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2OR512VL-NEXT: retq
%alo = shufflevector <8 x i32> %a, <8 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -2190,12 +2190,12 @@ define <8 x i32> @concat_v8i32_0123CDEF(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @concat_v8i32_4567CDEF_bc(<8 x i32> %a0, <8 x i32> %a1) {
; AVX1OR2-LABEL: concat_v8i32_4567CDEF_bc:
-; AVX1OR2: # BB#0:
+; AVX1OR2: # %bb.0:
; AVX1OR2-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
; AVX1OR2-NEXT: retq
;
; AVX512VL-LABEL: concat_v8i32_4567CDEF_bc:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
; AVX512VL-NEXT: retq
%a0hi = shufflevector <8 x i32> %a0, <8 x i32> %a1, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
@@ -2209,7 +2209,7 @@ define <8 x i32> @concat_v8i32_4567CDEF_bc(<8 x i32> %a0, <8 x i32> %a1) {
define <8 x float> @concat_v8f32_4567CDEF_bc(<8 x float> %f0, <8 x float> %f1) {
; ALL-LABEL: concat_v8f32_4567CDEF_bc:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
; ALL-NEXT: retq
%a0 = bitcast <8 x float> %f0 to <4 x i64>
@@ -2225,7 +2225,7 @@ define <8 x float> @concat_v8f32_4567CDEF_bc(<8 x float> %f0, <8 x float> %f1) {
define <8 x i32> @insert_dup_mem_v8i32(i32* %ptr) {
; ALL-LABEL: insert_dup_mem_v8i32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vbroadcastss (%rdi), %ymm0
; ALL-NEXT: retq
%tmp = load i32, i32* %ptr, align 4
@@ -2236,7 +2236,7 @@ define <8 x i32> @insert_dup_mem_v8i32(i32* %ptr) {
define <8 x i32> @shuffle_v8i32_12345678(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_12345678:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX1-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,0],ymm0[3,0],ymm1[4,4],ymm0[7,4]
@@ -2244,14 +2244,14 @@ define <8 x i32> @shuffle_v8i32_12345678(<8 x i32> %a, <8 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v8i32_12345678:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7]
; AVX2-NEXT: vmovaps {{.*#+}} ymm1 = [1,2,3,4,5,6,7,0]
; AVX2-NEXT: vpermps %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v8i32_12345678:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: valignd {{.*#+}} ymm0 = ymm0[1,2,3,4,5,6,7],ymm1[0]
; AVX512VL-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
@@ -2260,20 +2260,20 @@ define <8 x i32> @shuffle_v8i32_12345678(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_12345670(<8 x i32> %a) {
; AVX1-LABEL: shuffle_v8i32_12345670:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX1-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,0],ymm0[3,0],ymm1[4,4],ymm0[7,4]
; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,2],ymm1[2,0],ymm0[5,6],ymm1[6,4]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v8i32_12345670:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovaps {{.*#+}} ymm1 = [1,2,3,4,5,6,7,0]
; AVX2-NEXT: vpermps %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v8i32_12345670:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: valignd {{.*#+}} ymm0 = ymm0[1,2,3,4,5,6,7,0]
; AVX512VL-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> undef, <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0>
diff --git a/test/CodeGen/X86/vector-shuffle-512-v16.ll b/test/CodeGen/X86/vector-shuffle-512-v16.ll
index d1b6d6c2c64..12d3fb33be8 100644
--- a/test/CodeGen/X86/vector-shuffle-512-v16.ll
+++ b/test/CodeGen/X86/vector-shuffle-512-v16.ll
@@ -6,7 +6,7 @@ target triple = "x86_64-unknown-unknown"
define <16 x float> @shuffle_v16f32_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00(<16 x float> %a, <16 x float> %b) {
; ALL-LABEL: shuffle_v16f32_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vbroadcastss %xmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <16 x float> %a, <16 x float> %b, <16 x i32><i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
@@ -15,7 +15,7 @@ define <16 x float> @shuffle_v16f32_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00
define <16 x float> @shuffle_v16f32_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08(<16 x float> %a, <16 x float> %b) {
; ALL-LABEL: shuffle_v16f32_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vextractf32x4 $2, %zmm0, %xmm0
; ALL-NEXT: vbroadcastss %xmm0, %zmm0
; ALL-NEXT: retq
@@ -25,7 +25,7 @@ define <16 x float> @shuffle_v16f32_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08
define <16 x float> @shuffle_v16f32_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_bc(<16 x i32> %a, <16 x i32> %b) {
; ALL-LABEL: shuffle_v16f32_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_bc:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vextractf32x4 $2, %zmm0, %xmm0
; ALL-NEXT: vbroadcastss %xmm0, %zmm0
; ALL-NEXT: retq
@@ -37,7 +37,7 @@ define <16 x float> @shuffle_v16f32_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08
define <16 x float> @shuffle_v16f32_00_10_01_11_04_14_05_15_08_18_09_19_0c_1c_0d_1d(<16 x float> %a, <16 x float> %b) {
; ALL-LABEL: shuffle_v16f32_00_10_01_11_04_14_05_15_08_18_09_19_0c_1c_0d_1d:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vunpcklps {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
; ALL-NEXT: retq
%shuffle = shufflevector <16 x float> %a, <16 x float> %b, <16 x i32><i32 0, i32 16, i32 1, i32 17, i32 4, i32 20, i32 5, i32 21, i32 8, i32 24, i32 9, i32 25, i32 12, i32 28, i32 13, i32 29>
@@ -46,7 +46,7 @@ define <16 x float> @shuffle_v16f32_00_10_01_11_04_14_05_15_08_18_09_19_0c_1c_0d
define <16 x float> @shuffle_v16f32_00_zz_01_zz_04_zz_05_zz_08_zz_09_zz_0c_zz_0d_zz(<16 x float> %a, <16 x float> %b) {
; ALL-LABEL: shuffle_v16f32_00_zz_01_zz_04_zz_05_zz_08_zz_09_zz_0c_zz_0d_zz:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vxorps %xmm1, %xmm1, %xmm1
; ALL-NEXT: vunpcklps {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
; ALL-NEXT: retq
@@ -56,7 +56,7 @@ define <16 x float> @shuffle_v16f32_00_zz_01_zz_04_zz_05_zz_08_zz_09_zz_0c_zz_0d
define <16 x float> @shuffle_v16f32_vunpcklps_swap(<16 x float> %a, <16 x float> %b) {
; ALL-LABEL: shuffle_v16f32_vunpcklps_swap:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vunpcklps {{.*#+}} zmm0 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[12],zmm0[12],zmm1[13],zmm0[13]
; ALL-NEXT: retq
%shuffle = shufflevector <16 x float> %a, <16 x float> %b, <16 x i32> <i32 16, i32 0, i32 17, i32 1, i32 20, i32 4, i32 21, i32 5, i32 24, i32 8, i32 25, i32 9, i32 28, i32 12, i32 29, i32 13>
@@ -66,7 +66,7 @@ define <16 x float> @shuffle_v16f32_vunpcklps_swap(<16 x float> %a, <16 x float>
; PR34382
define <16 x float> @shuffle_v16f32_01_01_03_00_06_04_05_07_08_08_09_09_15_14_14_12(<16 x float> %a0) {
; ALL-LABEL: shuffle_v16f32_01_01_03_00_06_04_05_07_08_08_09_09_15_14_14_12:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} zmm0 = zmm0[1,1,3,0,6,4,5,7,8,8,9,9,15,14,14,12]
; ALL-NEXT: retq
%shuffle = shufflevector <16 x float> %a0, <16 x float> undef, <16 x i32> <i32 1, i32 1, i32 3, i32 0, i32 6, i32 4, i32 5, i32 7, i32 8, i32 8, i32 9, i32 9, i32 15, i32 14, i32 14, i32 12>
@@ -75,7 +75,7 @@ define <16 x float> @shuffle_v16f32_01_01_03_00_06_04_05_07_08_08_09_09_15_14_14
define <16 x i32> @shuffle_v16i32_00_10_01_11_04_14_05_15_08_18_09_19_0c_1c_0d_1d(<16 x i32> %a, <16 x i32> %b) {
; ALL-LABEL: shuffle_v16i32_00_10_01_11_04_14_05_15_08_18_09_19_0c_1c_0d_1d:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vunpcklps {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
; ALL-NEXT: retq
%shuffle = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32><i32 0, i32 16, i32 1, i32 17, i32 4, i32 20, i32 5, i32 21, i32 8, i32 24, i32 9, i32 25, i32 12, i32 28, i32 13, i32 29>
@@ -84,7 +84,7 @@ define <16 x i32> @shuffle_v16i32_00_10_01_11_04_14_05_15_08_18_09_19_0c_1c_0d_1
define <16 x i32> @shuffle_v16i32_zz_10_zz_11_zz_14_zz_15_zz_18_zz_19_zz_1c_zz_1d(<16 x i32> %a, <16 x i32> %b) {
; ALL-LABEL: shuffle_v16i32_zz_10_zz_11_zz_14_zz_15_zz_18_zz_19_zz_1c_zz_1d:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vxorps %xmm0, %xmm0, %xmm0
; ALL-NEXT: vunpcklps {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
; ALL-NEXT: retq
@@ -94,7 +94,7 @@ define <16 x i32> @shuffle_v16i32_zz_10_zz_11_zz_14_zz_15_zz_18_zz_19_zz_1c_zz_1
define <16 x float> @shuffle_v16f32_02_12_03_13_06_16_07_17_0a_1a_0b_1b_0e_1e_0f_1f(<16 x float> %a, <16 x float> %b) {
; ALL-LABEL: shuffle_v16f32_02_12_03_13_06_16_07_17_0a_1a_0b_1b_0e_1e_0f_1f:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vunpckhps {{.*#+}} zmm0 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
; ALL-NEXT: retq
%shuffle = shufflevector <16 x float> %a, <16 x float> %b, <16 x i32><i32 2, i32 18, i32 3, i32 19, i32 6, i32 22, i32 7, i32 23, i32 10, i32 26, i32 11, i32 27, i32 14, i32 30, i32 15, i32 31>
@@ -103,7 +103,7 @@ define <16 x float> @shuffle_v16f32_02_12_03_13_06_16_07_17_0a_1a_0b_1b_0e_1e_0f
define <16 x float> @shuffle_v16f32_zz_12_zz_13_zz_16_zz_17_zz_1a_zz_1b_zz_1e_zz_1f(<16 x float> %a, <16 x float> %b) {
; ALL-LABEL: shuffle_v16f32_zz_12_zz_13_zz_16_zz_17_zz_1a_zz_1b_zz_1e_zz_1f:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vxorps %xmm0, %xmm0, %xmm0
; ALL-NEXT: vunpckhps {{.*#+}} zmm0 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
; ALL-NEXT: retq
@@ -113,7 +113,7 @@ define <16 x float> @shuffle_v16f32_zz_12_zz_13_zz_16_zz_17_zz_1a_zz_1b_zz_1e_zz
define <16 x float> @shuffle_v16f32_00_00_02_02_04_04_06_06_08_08_10_10_12_12_14_14(<16 x float> %a, <16 x float> %b) {
; ALL-LABEL: shuffle_v16f32_00_00_02_02_04_04_06_06_08_08_10_10_12_12_14_14:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovsldup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
; ALL-NEXT: retq
%shuffle = shufflevector <16 x float> %a, <16 x float> %b, <16 x i32><i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6, i32 8, i32 8, i32 10, i32 10, i32 12, i32 12, i32 14, i32 14>
@@ -122,7 +122,7 @@ define <16 x float> @shuffle_v16f32_00_00_02_02_04_04_06_06_08_08_10_10_12_12_14
define <16 x float> @shuffle_v16f32_01_01_03_03_05_05_07_07_09_09_11_11_13_13_15_15(<16 x float> %a, <16 x float> %b) {
; ALL-LABEL: shuffle_v16f32_01_01_03_03_05_05_07_07_09_09_11_11_13_13_15_15:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovshdup {{.*#+}} zmm0 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
; ALL-NEXT: retq
%shuffle = shufflevector <16 x float> %a, <16 x float> %b, <16 x i32><i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7, i32 9, i32 9, i32 11, i32 11, i32 13, i32 13, i32 15, i32 15>
@@ -131,7 +131,7 @@ define <16 x float> @shuffle_v16f32_01_01_03_03_05_05_07_07_09_09_11_11_13_13_15
define <16 x float> @shuffle_v16f32_00_01_00_01_06_07_06_07_08_09_10_11_12_13_12_13(<16 x float> %a, <16 x float> %b) {
; ALL-LABEL: shuffle_v16f32_00_01_00_01_06_07_06_07_08_09_10_11_12_13_12_13:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilpd {{.*#+}} zmm0 = zmm0[0,0,3,3,4,5,6,6]
; ALL-NEXT: retq
%shuffle = shufflevector <16 x float> %a, <16 x float> %b, <16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 6, i32 7, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 12, i32 13>
@@ -140,7 +140,7 @@ define <16 x float> @shuffle_v16f32_00_01_00_01_06_07_06_07_08_09_10_11_12_13_12
define <16 x float> @shuffle_v16f32_00_00_02_00_04_04_06_04_08_08_10_08_12_12_14_12(<16 x float> %a, <16 x float> %b) {
; ALL-LABEL: shuffle_v16f32_00_00_02_00_04_04_06_04_08_08_10_08_12_12_14_12:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} zmm0 = zmm0[0,0,2,0,4,4,6,4,8,8,10,8,12,12,14,12]
; ALL-NEXT: retq
%shuffle = shufflevector <16 x float> %a, <16 x float> %b, <16 x i32> <i32 0, i32 0, i32 2, i32 0, i32 4, i32 4, i32 6, i32 4, i32 8, i32 8, i32 10, i32 8, i32 12, i32 12, i32 14, i32 12>
@@ -149,7 +149,7 @@ define <16 x float> @shuffle_v16f32_00_00_02_00_04_04_06_04_08_08_10_08_12_12_14
define <16 x float> @shuffle_v16f32_03_uu_uu_uu_uu_04_uu_uu_uu_uu_11_uu_uu_uu_uu_12(<16 x float> %a, <16 x float> %b) {
; ALL-LABEL: shuffle_v16f32_03_uu_uu_uu_uu_04_uu_uu_uu_uu_11_uu_uu_uu_uu_12:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} zmm0 = zmm0[3,0,3,0,7,4,7,4,11,8,11,8,15,12,15,12]
; ALL-NEXT: retq
%shuffle = shufflevector <16 x float> %a, <16 x float> %b, <16 x i32> <i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 4, i32 undef, i32 undef, i32 undef, i32 undef, i32 11, i32 undef, i32 undef, i32 undef, i32 undef, i32 12>
@@ -158,7 +158,7 @@ define <16 x float> @shuffle_v16f32_03_uu_uu_uu_uu_04_uu_uu_uu_uu_11_uu_uu_uu_uu
define <16 x i32> @shuffle_v16i32_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00(<16 x i32> %a, <16 x i32> %b) {
; ALL-LABEL: shuffle_v16i32_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vbroadcastss %xmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32><i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
@@ -167,7 +167,7 @@ define <16 x i32> @shuffle_v16i32_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_0
define <16 x i32> @shuffle_v16i32_04_04_04_04_04_04_04_04_04_04_04_04_04_04_04_04(<16 x i32> %a, <16 x i32> %b) {
; ALL-LABEL: shuffle_v16i32_04_04_04_04_04_04_04_04_04_04_04_04_04_04_04_04:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vextractf128 $1, %ymm0, %xmm0
; ALL-NEXT: vbroadcastss %xmm0, %zmm0
; ALL-NEXT: retq
@@ -177,7 +177,7 @@ define <16 x i32> @shuffle_v16i32_04_04_04_04_04_04_04_04_04_04_04_04_04_04_04_0
define <16 x i32> @shuffle_v16i32_02_12_03_13_06_16_07_17_0a_1a_0b_1b_0e_1e_0f_1f(<16 x i32> %a, <16 x i32> %b) {
; ALL-LABEL: shuffle_v16i32_02_12_03_13_06_16_07_17_0a_1a_0b_1b_0e_1e_0f_1f:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vunpckhps {{.*#+}} zmm0 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
; ALL-NEXT: retq
%shuffle = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32><i32 2, i32 18, i32 3, i32 19, i32 6, i32 22, i32 7, i32 23, i32 10, i32 26, i32 11, i32 27, i32 14, i32 30, i32 15, i32 31>
@@ -186,7 +186,7 @@ define <16 x i32> @shuffle_v16i32_02_12_03_13_06_16_07_17_0a_1a_0b_1b_0e_1e_0f_1
define <16 x i32> @shuffle_v16i32_02_zz_03_zz_06_zz_07_zz_0a_zz_0b_zz_0e_zz_0f_zz(<16 x i32> %a, <16 x i32> %b) {
; ALL-LABEL: shuffle_v16i32_02_zz_03_zz_06_zz_07_zz_0a_zz_0b_zz_0e_zz_0f_zz:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vxorps %xmm1, %xmm1, %xmm1
; ALL-NEXT: vunpckhps {{.*#+}} zmm0 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
; ALL-NEXT: retq
@@ -196,13 +196,13 @@ define <16 x i32> @shuffle_v16i32_02_zz_03_zz_06_zz_07_zz_0a_zz_0b_zz_0e_zz_0f_z
define <16 x i32> @shuffle_v16i32_01_02_03_16_05_06_07_20_09_10_11_24_13_14_15_28(<16 x i32> %a, <16 x i32> %b) {
; AVX512F-LABEL: shuffle_v16i32_01_02_03_16_05_06_07_20_09_10_11_24_13_14_15_28:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa32 {{.*#+}} zmm2 = [1,2,3,16,5,6,7,20,9,10,11,24,13,14,15,28]
; AVX512F-NEXT: vpermt2d %zmm1, %zmm2, %zmm0
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v16i32_01_02_03_16_05_06_07_20_09_10_11_24_13_14_15_28:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpalignr {{.*#+}} zmm0 = zmm0[4,5,6,7,8,9,10,11,12,13,14,15],zmm1[0,1,2,3],zmm0[20,21,22,23,24,25,26,27,28,29,30,31],zmm1[16,17,18,19],zmm0[36,37,38,39,40,41,42,43,44,45,46,47],zmm1[32,33,34,35],zmm0[52,53,54,55,56,57,58,59,60,61,62,63],zmm1[48,49,50,51]
; AVX512BW-NEXT: retq
%shuffle = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32><i32 1, i32 2, i32 3, i32 16, i32 5, i32 6, i32 7, i32 20, i32 9, i32 10, i32 11, i32 24, i32 13, i32 14, i32 15, i32 28>
@@ -211,7 +211,7 @@ define <16 x i32> @shuffle_v16i32_01_02_03_16_05_06_07_20_09_10_11_24_13_14_15_2
define <16 x float> @shuffle_v16f32_02_05_u_u_07_u_0a_01_00_05_u_04_07_u_0a_01(<16 x float> %a) {
; ALL-LABEL: shuffle_v16f32_02_05_u_u_07_u_0a_01_00_05_u_04_07_u_0a_01:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovaps {{.*#+}} zmm1 = <2,5,u,u,7,u,10,1,0,5,u,4,7,u,10,1>
; ALL-NEXT: vpermps %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
@@ -221,7 +221,7 @@ define <16 x float> @shuffle_v16f32_02_05_u_u_07_u_0a_01_00_05_u_04_07_u_0a_01(<
define <16 x i32> @shuffle_v16i32_02_05_u_u_07_u_0a_01_00_05_u_04_07_u_0a_01(<16 x i32> %a) {
; ALL-LABEL: shuffle_v16i32_02_05_u_u_07_u_0a_01_00_05_u_04_07_u_0a_01:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovaps {{.*#+}} zmm1 = <2,5,u,u,7,u,10,1,0,5,u,4,7,u,10,1>
; ALL-NEXT: vpermps %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
@@ -231,7 +231,7 @@ define <16 x i32> @shuffle_v16i32_02_05_u_u_07_u_0a_01_00_05_u_04_07_u_0a_01(<16
define <16 x i32> @shuffle_v16i32_0f_1f_0e_16_0d_1d_04_1e_0b_1b_0a_1a_09_19_08_18(<16 x i32> %a, <16 x i32> %b) {
; ALL-LABEL: shuffle_v16i32_0f_1f_0e_16_0d_1d_04_1e_0b_1b_0a_1a_09_19_08_18:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovdqa32 {{.*#+}} zmm2 = [15,31,14,22,13,29,4,28,11,27,10,26,9,25,8,24]
; ALL-NEXT: vpermt2d %zmm1, %zmm2, %zmm0
; ALL-NEXT: retq
@@ -241,7 +241,7 @@ define <16 x i32> @shuffle_v16i32_0f_1f_0e_16_0d_1d_04_1e_0b_1b_0a_1a_09_19_08_1
define <16 x float> @shuffle_v16f32_0f_1f_0e_16_0d_1d_04_1e_0b_1b_0a_1a_09_19_08_18(<16 x float> %a, <16 x float> %b) {
; ALL-LABEL: shuffle_v16f32_0f_1f_0e_16_0d_1d_04_1e_0b_1b_0a_1a_09_19_08_18:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovaps {{.*#+}} zmm2 = [15,31,14,22,13,29,4,28,11,27,10,26,9,25,8,24]
; ALL-NEXT: vpermt2ps %zmm1, %zmm2, %zmm0
; ALL-NEXT: retq
@@ -251,7 +251,7 @@ define <16 x float> @shuffle_v16f32_0f_1f_0e_16_0d_1d_04_1e_0b_1b_0a_1a_09_19_08
define <16 x float> @shuffle_v16f32_load_0f_1f_0e_16_0d_1d_04_1e_0b_1b_0a_1a_09_19_08_18(<16 x float> %a, <16 x float>* %b) {
; ALL-LABEL: shuffle_v16f32_load_0f_1f_0e_16_0d_1d_04_1e_0b_1b_0a_1a_09_19_08_18:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovaps {{.*#+}} zmm1 = [15,31,14,22,13,29,4,28,11,27,10,26,9,25,8,24]
; ALL-NEXT: vpermt2ps (%rdi), %zmm1, %zmm0
; ALL-NEXT: retq
@@ -262,7 +262,7 @@ define <16 x float> @shuffle_v16f32_load_0f_1f_0e_16_0d_1d_04_1e_0b_1b_0a_1a_09_
define <16 x i32> @shuffle_v16i32_load_0f_1f_0e_16_0d_1d_04_1e_0b_1b_0a_1a_09_19_08_18(<16 x i32> %a, <16 x i32>* %b) {
; ALL-LABEL: shuffle_v16i32_load_0f_1f_0e_16_0d_1d_04_1e_0b_1b_0a_1a_09_19_08_18:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovdqa32 {{.*#+}} zmm1 = [15,31,14,22,13,29,4,28,11,27,10,26,9,25,8,24]
; ALL-NEXT: vpermt2d (%rdi), %zmm1, %zmm0
; ALL-NEXT: retq
@@ -273,7 +273,7 @@ define <16 x i32> @shuffle_v16i32_load_0f_1f_0e_16_0d_1d_04_1e_0b_1b_0a_1a_09_19
define <16 x i32> @shuffle_v16i32_0_1_2_19_u_u_u_u_u_u_u_u_u_u_u_u(<16 x i32> %a, <16 x i32> %b) {
; ALL-LABEL: shuffle_v16i32_0_1_2_19_u_u_u_u_u_u_u_u_u_u_u_u:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
; ALL-NEXT: retq
%c = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 19, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -283,7 +283,7 @@ define <16 x i32> @shuffle_v16i32_0_1_2_19_u_u_u_u_u_u_u_u_u_u_u_u(<16 x i32> %a
;FIXME: can do better with vpcompress
define <8 x i32> @test_v16i32_1_3_5_7_9_11_13_15(<16 x i32> %v) {
; ALL-LABEL: test_v16i32_1_3_5_7_9_11_13_15:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm1
; ALL-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,3],ymm1[1,3],ymm0[5,7],ymm1[5,7]
; ALL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3]
@@ -295,7 +295,7 @@ define <8 x i32> @test_v16i32_1_3_5_7_9_11_13_15(<16 x i32> %v) {
;FIXME: can do better with vpcompress
define <4 x i32> @test_v16i32_0_1_2_12 (<16 x i32> %v) {
; ALL-LABEL: test_v16i32_0_1_2_12:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm1
; ALL-NEXT: vextractf128 $1, %ymm1, %xmm1
; ALL-NEXT: vbroadcastss %xmm1, %xmm1
@@ -308,7 +308,7 @@ define <4 x i32> @test_v16i32_0_1_2_12 (<16 x i32> %v) {
define <8 x float> @shuffle_v16f32_extract_256(float* %RET, float* %a) {
; ALL-LABEL: shuffle_v16f32_extract_256:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovups 32(%rsi), %ymm0
; ALL-NEXT: retq
%ptr_a = bitcast float* %a to <16 x float>*
@@ -320,7 +320,7 @@ define <8 x float> @shuffle_v16f32_extract_256(float* %RET, float* %a) {
;FIXME: can do better with vcompressp
define <8 x float> @test_v16f32_0_1_2_3_4_6_7_10 (<16 x float> %v) {
; ALL-LABEL: test_v16f32_0_1_2_3_4_6_7_10:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm1
; ALL-NEXT: vmovsldup {{.*#+}} xmm1 = xmm1[0,0,2,2]
; ALL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
@@ -334,7 +334,7 @@ define <8 x float> @test_v16f32_0_1_2_3_4_6_7_10 (<16 x float> %v) {
;FIXME: can do better with vcompressp
define <4 x float> @test_v16f32_0_1_3_6 (<16 x float> %v) {
; ALL-LABEL: test_v16f32_0_1_3_6:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[0,1,3,3]
; ALL-NEXT: vextractf128 $1, %ymm0, %xmm0
; ALL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
@@ -347,7 +347,7 @@ define <4 x float> @test_v16f32_0_1_3_6 (<16 x float> %v) {
define <16 x i32> @shuffle_v16i16_1_0_0_0_5_4_4_4_9_8_8_8_13_12_12_12(<16 x i32> %a, <16 x i32> %b) {
; ALL-LABEL: shuffle_v16i16_1_0_0_0_5_4_4_4_9_8_8_8_13_12_12_12:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} zmm0 = zmm0[1,0,0,0,5,4,4,4,9,8,8,8,13,12,12,12]
; ALL-NEXT: retq
%c = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32> <i32 1, i32 0, i32 0, i32 0, i32 5, i32 4, i32 4, i32 4, i32 9, i32 8, i32 8, i32 8, i32 13, i32 12, i32 12, i32 12>
@@ -356,7 +356,7 @@ define <16 x i32> @shuffle_v16i16_1_0_0_0_5_4_4_4_9_8_8_8_13_12_12_12(<16 x i32>
define <16 x i32> @shuffle_v16i16_3_3_0_0_7_7_4_4_11_11_8_8_15_15_12_12(<16 x i32> %a, <16 x i32> %b) {
; ALL-LABEL: shuffle_v16i16_3_3_0_0_7_7_4_4_11_11_8_8_15_15_12_12:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpermilps {{.*#+}} zmm0 = zmm0[2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13]
; ALL-NEXT: retq
%c = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32> <i32 2, i32 3, i32 0, i32 1, i32 6, i32 7, i32 4, i32 5, i32 10, i32 11, i32 8, i32 9, i32 14, i32 15, i32 12, i32 13>
@@ -365,7 +365,7 @@ define <16 x i32> @shuffle_v16i16_3_3_0_0_7_7_4_4_11_11_8_8_15_15_12_12(<16 x i3
define <16 x float> @shuffle_v16f32_00_01_10_10_04_05_14_14_08_09_18_18_0c_0d_1c_1c(<16 x float> %a, <16 x float> %b) {
; ALL-LABEL: shuffle_v16f32_00_01_10_10_04_05_14_14_08_09_18_18_0c_0d_1c_1c:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vshufps {{.*#+}} zmm0 = zmm0[0,1],zmm1[0,0],zmm0[4,5],zmm1[4,4],zmm0[8,9],zmm1[8,8],zmm0[12,13],zmm1[12,12]
; ALL-NEXT: retq
%shuffle = shufflevector <16 x float> %a, <16 x float> %b, <16 x i32> <i32 0, i32 1, i32 16, i32 16, i32 4, i32 5, i32 20, i32 20, i32 8, i32 9, i32 24, i32 24, i32 12, i32 13, i32 28, i32 28>
@@ -374,7 +374,7 @@ define <16 x float> @shuffle_v16f32_00_01_10_10_04_05_14_14_08_09_18_18_0c_0d_1c
define <16 x i32> @insert_mem_and_zero_v16i32(i32* %ptr) {
; ALL-LABEL: insert_mem_and_zero_v16i32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; ALL-NEXT: retq
%a = load i32, i32* %ptr
@@ -386,7 +386,7 @@ define <16 x i32> @insert_mem_and_zero_v16i32(i32* %ptr) {
define <16 x i32> @shuffle_v16i32_0zzzzzzzzzzzzzzz(<16 x i32> %a) {
; ALL-LABEL: shuffle_v16i32_0zzzzzzzzzzzzzzz:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vxorps %xmm1, %xmm1, %xmm1
; ALL-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; ALL-NEXT: retq
@@ -396,7 +396,7 @@ define <16 x i32> @shuffle_v16i32_0zzzzzzzzzzzzzzz(<16 x i32> %a) {
define <16 x float> @shuffle_v16f32_0zzzzzzzzzzzzzzz(<16 x float> %a) {
; ALL-LABEL: shuffle_v16f32_0zzzzzzzzzzzzzzz:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vxorps %xmm1, %xmm1, %xmm1
; ALL-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; ALL-NEXT: retq
@@ -406,7 +406,7 @@ define <16 x float> @shuffle_v16f32_0zzzzzzzzzzzzzzz(<16 x float> %a) {
define <16 x i32> @shuffle_v16i32_16_zz_17_zz_18_zz_19_zz_20_zz_21_zz_22_zz_23_zz(<16 x i32> %a) {
; ALL-LABEL: shuffle_v16i32_16_zz_17_zz_18_zz_19_zz_20_zz_21_zz_22_zz_23_zz:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
; ALL-NEXT: retq
%shuffle = shufflevector <16 x i32> zeroinitializer, <16 x i32> %a, <16 x i32> <i32 16, i32 0, i32 17, i32 0, i32 18, i32 0, i32 19, i32 0, i32 20, i32 0, i32 21, i32 0, i32 22, i32 0, i32 23, i32 0>
@@ -415,7 +415,7 @@ define <16 x i32> @shuffle_v16i32_16_zz_17_zz_18_zz_19_zz_20_zz_21_zz_22_zz_23_z
define <16 x i32> @shuffle_v16i32_01_02_03_04_05_06_07_08_09_10_11_12_13_14_15_16(<16 x i32> %a, <16 x i32> %b) {
; ALL-LABEL: shuffle_v16i32_01_02_03_04_05_06_07_08_09_10_11_12_13_14_15_16:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: valignd {{.*#+}} zmm0 = zmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zmm1[0]
; ALL-NEXT: retq
%shuffle = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32><i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16>
@@ -424,7 +424,7 @@ define <16 x i32> @shuffle_v16i32_01_02_03_04_05_06_07_08_09_10_11_12_13_14_15_1
define <16 x i32> @shuffle_v16i32_01_02_03_04_05_06_07_08_09_10_11_12_13_14_15_00(<16 x i32> %a) {
; ALL-LABEL: shuffle_v16i32_01_02_03_04_05_06_07_08_09_10_11_12_13_14_15_00:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: valignd {{.*#+}} zmm0 = zmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0]
; ALL-NEXT: retq
%shuffle = shufflevector <16 x i32> %a, <16 x i32> undef, <16 x i32><i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 0>
@@ -433,7 +433,7 @@ define <16 x i32> @shuffle_v16i32_01_02_03_04_05_06_07_08_09_10_11_12_13_14_15_0
define <16 x i32> @shuffle_v16i32_00_03_16_19_04_07_20_23_08_11_24_27_12_15_28_31(<16 x i32> %a, <16 x i32> %b) {
; ALL-LABEL: shuffle_v16i32_00_03_16_19_04_07_20_23_08_11_24_27_12_15_28_31:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vshufps {{.*#+}} zmm0 = zmm0[0,3],zmm1[0,3],zmm0[4,7],zmm1[4,7],zmm0[8,11],zmm1[8,11],zmm0[12,15],zmm1[12,15]
; ALL-NEXT: retq
%shuffle = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32> <i32 0, i32 3, i32 16, i32 19, i32 4, i32 7, i32 20, i32 23, i32 8, i32 11, i32 24, i32 27, i32 12, i32 15, i32 28, i32 31>
@@ -442,7 +442,7 @@ define <16 x i32> @shuffle_v16i32_00_03_16_19_04_07_20_23_08_11_24_27_12_15_28_3
define <16 x i32> @shuffle_v16i32_16_16_02_03_20_20_06_07_24_24_10_11_28_28_uu_uu(<16 x i32> %a, <16 x i32> %b) {
; ALL-LABEL: shuffle_v16i32_16_16_02_03_20_20_06_07_24_24_10_11_28_28_uu_uu:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vshufps {{.*#+}} zmm0 = zmm1[0,0],zmm0[2,3],zmm1[4,4],zmm0[6,7],zmm1[8,8],zmm0[10,11],zmm1[12,12],zmm0[14,15]
; ALL-NEXT: retq
%shuffle = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32> <i32 16, i32 16, i32 02, i32 03, i32 20, i32 20, i32 06, i32 07, i32 24, i32 24, i32 10, i32 11, i32 28, i32 28, i32 undef, i32 undef>
@@ -451,7 +451,7 @@ define <16 x i32> @shuffle_v16i32_16_16_02_03_20_20_06_07_24_24_10_11_28_28_uu_u
define <16 x i32> @shuffle_v8i32_17_16_01_00_21_20_05_04_25_24_09_08_29_28_13_12(<16 x i32> %a, <16 x i32> %b) {
; ALL-LABEL: shuffle_v8i32_17_16_01_00_21_20_05_04_25_24_09_08_29_28_13_12:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vshufps {{.*#+}} zmm0 = zmm1[1,0],zmm0[1,0],zmm1[5,4],zmm0[5,4],zmm1[9,8],zmm0[9,8],zmm1[13,12],zmm0[13,12]
; ALL-NEXT: retq
%shuffle = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32> <i32 17, i32 16, i32 01, i32 00, i32 21, i32 20, i32 05, i32 04, i32 25, i32 24, i32 09, i32 08, i32 29, i32 28, i32 13, i32 12>
@@ -460,7 +460,7 @@ define <16 x i32> @shuffle_v8i32_17_16_01_00_21_20_05_04_25_24_09_08_29_28_13_12
define <16 x float> @shuffle_v8f32_v16f32_04_04_04_04_04_04_04_04_04_04_04_04_04_04_04_04(<8 x float> %a) {
; ALL-LABEL: shuffle_v8f32_v16f32_04_04_04_04_04_04_04_04_04_04_04_04_04_04_04_04:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vextractf128 $1, %ymm0, %xmm0
; ALL-NEXT: vbroadcastss %xmm0, %zmm0
; ALL-NEXT: retq
@@ -470,14 +470,14 @@ define <16 x float> @shuffle_v8f32_v16f32_04_04_04_04_04_04_04_04_04_04_04_04_04
define <16 x i32> @mask_shuffle_v16i32_02_03_04_05_06_07_08_09_10_11_12_13_14_15_00_01(<16 x i32> %a, <16 x i32> %passthru, i16 %mask) {
; AVX512F-LABEL: mask_shuffle_v16i32_02_03_04_05_06_07_08_09_10_11_12_13_14_15_00_01:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: kmovw %edi, %k1
; AVX512F-NEXT: valignd {{.*#+}} zmm1 {%k1} = zmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1]
; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: mask_shuffle_v16i32_02_03_04_05_06_07_08_09_10_11_12_13_14_15_00_01:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: valignd {{.*#+}} zmm1 {%k1} = zmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1]
; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -490,14 +490,14 @@ define <16 x i32> @mask_shuffle_v16i32_02_03_04_05_06_07_08_09_10_11_12_13_14_15
define <16 x i32> @mask_shuffle_v16i32_02_03_04_05_06_07_08_09_10_11_12_13_14_15_16_17(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passthru, i16 %mask) {
; AVX512F-LABEL: mask_shuffle_v16i32_02_03_04_05_06_07_08_09_10_11_12_13_14_15_16_17:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: kmovw %edi, %k1
; AVX512F-NEXT: valignd {{.*#+}} zmm2 {%k1} = zmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zmm1[0,1]
; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm0
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: mask_shuffle_v16i32_02_03_04_05_06_07_08_09_10_11_12_13_14_15_16_17:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: valignd {{.*#+}} zmm2 {%k1} = zmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zmm1[0,1]
; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -510,13 +510,13 @@ define <16 x i32> @mask_shuffle_v16i32_02_03_04_05_06_07_08_09_10_11_12_13_14_15
define <16 x i32> @maskz_shuffle_v16i32_02_03_04_05_06_07_08_09_10_11_12_13_14_15_00_01(<16 x i32> %a, i16 %mask) {
; AVX512F-LABEL: maskz_shuffle_v16i32_02_03_04_05_06_07_08_09_10_11_12_13_14_15_00_01:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: kmovw %edi, %k1
; AVX512F-NEXT: valignd {{.*#+}} zmm0 {%k1} {z} = zmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1]
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: maskz_shuffle_v16i32_02_03_04_05_06_07_08_09_10_11_12_13_14_15_00_01:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: valignd {{.*#+}} zmm0 {%k1} {z} = zmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1]
; AVX512BW-NEXT: retq
@@ -528,13 +528,13 @@ define <16 x i32> @maskz_shuffle_v16i32_02_03_04_05_06_07_08_09_10_11_12_13_14_1
define <16 x i32> @maskz_shuffle_v16i32_02_03_04_05_06_07_08_09_10_11_12_13_14_15_16_17(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
; AVX512F-LABEL: maskz_shuffle_v16i32_02_03_04_05_06_07_08_09_10_11_12_13_14_15_16_17:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: kmovw %edi, %k1
; AVX512F-NEXT: valignd {{.*#+}} zmm0 {%k1} {z} = zmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zmm1[0,1]
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: maskz_shuffle_v16i32_02_03_04_05_06_07_08_09_10_11_12_13_14_15_16_17:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: valignd {{.*#+}} zmm0 {%k1} {z} = zmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zmm1[0,1]
; AVX512BW-NEXT: retq
@@ -546,7 +546,7 @@ define <16 x i32> @maskz_shuffle_v16i32_02_03_04_05_06_07_08_09_10_11_12_13_14_1
define <16 x float> @test_vshuff32x4_512(<16 x float> %x, <16 x float> %x1) nounwind {
; ALL-LABEL: test_vshuff32x4_512:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm1[2,3,0,1]
; ALL-NEXT: retq
%res = shufflevector <16 x float> %x, <16 x float> %x1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 20, i32 21, i32 22, i32 23, i32 16, i32 17, i32 18, i32 19>
@@ -555,7 +555,7 @@ define <16 x float> @test_vshuff32x4_512(<16 x float> %x, <16 x float> %x1) noun
define <16 x i32> @test_vshufi32x4_512(<16 x i32> %x, <16 x i32> %x1) nounwind {
; ALL-LABEL: test_vshufi32x4_512:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm1[2,3,0,1]
; ALL-NEXT: retq
%res = shufflevector <16 x i32> %x, <16 x i32> %x1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 20, i32 21, i32 22, i32 23, i32 16, i32 17, i32 18, i32 19>
@@ -564,7 +564,7 @@ define <16 x i32> @test_vshufi32x4_512(<16 x i32> %x, <16 x i32> %x1) nounwind {
define <16 x float> @test_vshuff32x4_512_mask(<16 x float> %x, <16 x float> %x1, <16 x float> %y, <16 x i1> %mask) nounwind {
; AVX512F-LABEL: test_vshuff32x4_512_mask:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpmovsxbd %xmm3, %zmm3
; AVX512F-NEXT: vpslld $31, %zmm3, %zmm3
; AVX512F-NEXT: vptestmd %zmm3, %zmm3, %k1
@@ -573,7 +573,7 @@ define <16 x float> @test_vshuff32x4_512_mask(<16 x float> %x, <16 x float> %x1,
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_vshuff32x4_512_mask:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsllw $7, %xmm3, %xmm3
; AVX512BW-NEXT: vpmovb2m %zmm3, %k1
; AVX512BW-NEXT: vshuff32x4 {{.*#+}} zmm2 {%k1} = zmm0[0,1,2,3,4,5,6,7],zmm1[4,5,6,7,0,1,2,3]
@@ -586,7 +586,7 @@ define <16 x float> @test_vshuff32x4_512_mask(<16 x float> %x, <16 x float> %x1,
define <16 x i32> @test_vshufi32x4_512_mask(<16 x i32> %x, <16 x i32> %x1, <16 x i32> %y, <16 x i1> %mask) nounwind {
; AVX512F-LABEL: test_vshufi32x4_512_mask:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpmovsxbd %xmm3, %zmm3
; AVX512F-NEXT: vpslld $31, %zmm3, %zmm3
; AVX512F-NEXT: vptestmd %zmm3, %zmm3, %k1
@@ -595,7 +595,7 @@ define <16 x i32> @test_vshufi32x4_512_mask(<16 x i32> %x, <16 x i32> %x1, <16 x
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_vshufi32x4_512_mask:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsllw $7, %xmm3, %xmm3
; AVX512BW-NEXT: vpmovb2m %zmm3, %k1
; AVX512BW-NEXT: vshufi32x4 {{.*#+}} zmm2 {%k1} = zmm0[0,1,2,3,4,5,6,7],zmm1[4,5,6,7,0,1,2,3]
@@ -608,14 +608,14 @@ define <16 x i32> @test_vshufi32x4_512_mask(<16 x i32> %x, <16 x i32> %x1, <16 x
define <16 x float> @mask_shuffle_v16f32_00_01_02_03_04_05_06_07_16_17_18_19_20_21_22_23(<16 x float> %a, <16 x float> %b, <16 x float> %passthru, i16 %mask) {
; AVX512F-LABEL: mask_shuffle_v16f32_00_01_02_03_04_05_06_07_16_17_18_19_20_21_22_23:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: kmovw %edi, %k1
; AVX512F-NEXT: vinsertf32x8 $1, %ymm1, %zmm0, %zmm2 {%k1}
; AVX512F-NEXT: vmovaps %zmm2, %zmm0
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: mask_shuffle_v16f32_00_01_02_03_04_05_06_07_16_17_18_19_20_21_22_23:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vinsertf32x8 $1, %ymm1, %zmm0, %zmm2 {%k1}
; AVX512BW-NEXT: vmovaps %zmm2, %zmm0
@@ -628,14 +628,14 @@ define <16 x float> @mask_shuffle_v16f32_00_01_02_03_04_05_06_07_16_17_18_19_20_
define <16 x float> @mask_shuffle_v16f32_00_01_02_03_16_17_18_19_08_09_10_11_12_13_14_15(<16 x float> %a, <16 x float> %b, <16 x float> %passthru, i16 %mask) {
; AVX512F-LABEL: mask_shuffle_v16f32_00_01_02_03_16_17_18_19_08_09_10_11_12_13_14_15:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: kmovw %edi, %k1
; AVX512F-NEXT: vinsertf32x4 $1, %xmm1, %zmm0, %zmm2 {%k1}
; AVX512F-NEXT: vmovaps %zmm2, %zmm0
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: mask_shuffle_v16f32_00_01_02_03_16_17_18_19_08_09_10_11_12_13_14_15:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vinsertf32x4 $1, %xmm1, %zmm0, %zmm2 {%k1}
; AVX512BW-NEXT: vmovaps %zmm2, %zmm0
@@ -648,14 +648,14 @@ define <16 x float> @mask_shuffle_v16f32_00_01_02_03_16_17_18_19_08_09_10_11_12_
define <16 x i32> @mask_shuffle_v16i32_00_01_02_03_04_05_06_07_16_17_18_19_20_21_22_23(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passthru, i16 %mask) {
; AVX512F-LABEL: mask_shuffle_v16i32_00_01_02_03_04_05_06_07_16_17_18_19_20_21_22_23:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: kmovw %edi, %k1
; AVX512F-NEXT: vinserti32x8 $1, %ymm1, %zmm0, %zmm2 {%k1}
; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm0
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: mask_shuffle_v16i32_00_01_02_03_04_05_06_07_16_17_18_19_20_21_22_23:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vinserti32x8 $1, %ymm1, %zmm0, %zmm2 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -668,14 +668,14 @@ define <16 x i32> @mask_shuffle_v16i32_00_01_02_03_04_05_06_07_16_17_18_19_20_21
define <16 x i32> @mask_shuffle_v16i32_00_01_02_03_16_17_18_19_08_09_10_11_12_13_14_15(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passthru, i16 %mask) {
; AVX512F-LABEL: mask_shuffle_v16i32_00_01_02_03_16_17_18_19_08_09_10_11_12_13_14_15:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: kmovw %edi, %k1
; AVX512F-NEXT: vinserti32x4 $1, %xmm1, %zmm0, %zmm2 {%k1}
; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm0
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: mask_shuffle_v16i32_00_01_02_03_16_17_18_19_08_09_10_11_12_13_14_15:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vinserti32x4 $1, %xmm1, %zmm0, %zmm2 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -688,7 +688,7 @@ define <16 x i32> @mask_shuffle_v16i32_00_01_02_03_16_17_18_19_08_09_10_11_12_13
define <16 x i32> @mask_shuffle_v4i32_v16i32_00_01_02_03_00_01_02_03_00_01_02_03_00_01_02_03(<4 x i32> %a) {
; ALL-LABEL: mask_shuffle_v4i32_v16i32_00_01_02_03_00_01_02_03_00_01_02_03_00_01_02_03:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
@@ -699,7 +699,7 @@ define <16 x i32> @mask_shuffle_v4i32_v16i32_00_01_02_03_00_01_02_03_00_01_02_03
define <16 x float> @mask_shuffle_v4f32_v16f32_00_01_02_03_00_01_02_03_00_01_02_03_00_01_02_03(<4 x float> %a) {
; ALL-LABEL: mask_shuffle_v4f32_v16f32_00_01_02_03_00_01_02_03_00_01_02_03_00_01_02_03:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
diff --git a/test/CodeGen/X86/vector-shuffle-512-v32.ll b/test/CodeGen/X86/vector-shuffle-512-v32.ll
index bb7c5e91099..3e49957bf85 100644
--- a/test/CodeGen/X86/vector-shuffle-512-v32.ll
+++ b/test/CodeGen/X86/vector-shuffle-512-v32.ll
@@ -6,13 +6,13 @@ target triple = "x86_64-unknown-unknown"
define <32 x i16> @shuffle_v32i16(<32 x i16> %a) {
; KNL-LABEL: shuffle_v32i16:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpbroadcastw %xmm0, %ymm0
; KNL-NEXT: vmovdqa %ymm0, %ymm1
; KNL-NEXT: retq
;
; SKX-LABEL: shuffle_v32i16:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpbroadcastw %xmm0, %zmm0
; SKX-NEXT: retq
%c = shufflevector <32 x i16> %a, <32 x i16> undef, <32 x i32> zeroinitializer
@@ -21,14 +21,14 @@ define <32 x i16> @shuffle_v32i16(<32 x i16> %a) {
define <32 x i16> @shuffle_v32i16_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08(<32 x i16> %a) {
; KNL-LABEL: shuffle_v32i16_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vextracti128 $1, %ymm0, %xmm0
; KNL-NEXT: vpbroadcastw %xmm0, %ymm0
; KNL-NEXT: vmovdqa %ymm0, %ymm1
; KNL-NEXT: retq
;
; SKX-LABEL: shuffle_v32i16_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vextracti128 $1, %ymm0, %xmm0
; SKX-NEXT: vpbroadcastw %xmm0, %zmm0
; SKX-NEXT: retq
@@ -38,7 +38,7 @@ define <32 x i16> @shuffle_v32i16_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_0
define <32 x i16> @shuffle_v32i16_02_05_u_u_07_u_0a_01_00_05_u_04_07_u_0a_01_02_05_u_u_07_u_0a_01_00_05_u_04_07_u_0a_1f(<32 x i16> %a) {
; KNL-LABEL: shuffle_v32i16_02_05_u_u_07_u_0a_01_00_05_u_04_07_u_0a_01_02_05_u_u_07_u_0a_01_00_05_u_04_07_u_0a_1f:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpshufb {{.*#+}} ymm2 = ymm0[4,5,10,11,4,5,6,7,14,15,2,3,4,5,2,3,20,21,26,27,20,21,22,23,30,31,18,19,20,21,18,19]
; KNL-NEXT: vpermq {{.*#+}} ymm3 = ymm0[2,3,0,1]
; KNL-NEXT: vpshufb {{.*#+}} ymm0 = ymm3[0,1,10,11,8,9,8,9,14,15,2,3,4,5,2,3,16,17,26,27,24,25,24,25,30,31,18,19,20,21,18,19]
@@ -52,7 +52,7 @@ define <32 x i16> @shuffle_v32i16_02_05_u_u_07_u_0a_01_00_05_u_04_07_u_0a_01_02_
; KNL-NEXT: retq
;
; SKX-LABEL: shuffle_v32i16_02_05_u_u_07_u_0a_01_00_05_u_04_07_u_0a_01_02_05_u_u_07_u_0a_01_00_05_u_04_07_u_0a_1f:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm1 = <2,5,u,u,7,u,10,1,0,5,u,4,7,u,10,1,2,5,u,u,7,u,10,1,0,5,u,4,7,u,10,31>
; SKX-NEXT: vpermw %zmm0, %zmm1, %zmm0
; SKX-NEXT: retq
@@ -62,7 +62,7 @@ define <32 x i16> @shuffle_v32i16_02_05_u_u_07_u_0a_01_00_05_u_04_07_u_0a_01_02_
define <32 x i16> @shuffle_v32i16_0f_1f_0e_16_0d_1d_04_1e_0b_1b_0a_1a_09_19_08_18_0f_1f_0e_16_0d_1d_04_1e_0b_1b_0a_1a_09_19_08_38(<32 x i16> %a, <32 x i16> %b) {
; KNL-LABEL: shuffle_v32i16_0f_1f_0e_16_0d_1d_04_1e_0b_1b_0a_1a_09_19_08_18_0f_1f_0e_16_0d_1d_04_1e_0b_1b_0a_1a_09_19_08_38:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpermq {{.*#+}} ymm2 = ymm1[2,3,0,1]
; KNL-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm2[4,5],ymm1[6],ymm2[7],ymm1[8,9,10,11],ymm2[12,13],ymm1[14],ymm2[15]
; KNL-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[u,u,14,15,u,u,12,13,u,u,10,11,u,u,8,9,u,u,22,23,u,u,20,21,u,u,18,19,u,u,u,u]
@@ -79,7 +79,7 @@ define <32 x i16> @shuffle_v32i16_0f_1f_0e_16_0d_1d_04_1e_0b_1b_0a_1a_09_19_08_1
; KNL-NEXT: retq
;
; SKX-LABEL: shuffle_v32i16_0f_1f_0e_16_0d_1d_04_1e_0b_1b_0a_1a_09_19_08_18_0f_1f_0e_16_0d_1d_04_1e_0b_1b_0a_1a_09_19_08_38:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [15,31,14,22,13,29,4,28,11,27,10,26,9,25,8,24,15,31,14,22,13,29,4,28,11,27,10,26,9,25,8,56]
; SKX-NEXT: vpermt2w %zmm1, %zmm2, %zmm0
; SKX-NEXT: retq
@@ -89,12 +89,12 @@ define <32 x i16> @shuffle_v32i16_0f_1f_0e_16_0d_1d_04_1e_0b_1b_0a_1a_09_19_08_1
define <32 x i16> @shuffle_v16i32_0_32_1_33_2_34_3_35_8_40_9_41_u_u_u_u(<32 x i16> %a, <32 x i16> %b) {
; KNL-LABEL: shuffle_v16i32_0_32_1_33_2_34_3_35_8_40_9_41_u_u_u_u:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11]
; KNL-NEXT: retq
;
; SKX-LABEL: shuffle_v16i32_0_32_1_33_2_34_3_35_8_40_9_41_u_u_u_u:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
; SKX-NEXT: retq
%c = shufflevector <32 x i16> %a, <32 x i16> %b, <32 x i32> <i32 0, i32 32, i32 1, i32 33, i32 2, i32 34, i32 3, i32 35, i32 8, i32 40, i32 9, i32 41, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -103,12 +103,12 @@ define <32 x i16> @shuffle_v16i32_0_32_1_33_2_34_3_35_8_40_9_41_u_u_u_u(<32 x i1
define <32 x i16> @shuffle_v16i32_4_36_5_37_6_38_7_39_12_44_13_45_u_u_u_u(<32 x i16> %a, <32 x i16> %b) {
; KNL-LABEL: shuffle_v16i32_4_36_5_37_6_38_7_39_12_44_13_45_u_u_u_u:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15]
; KNL-NEXT: retq
;
; SKX-LABEL: shuffle_v16i32_4_36_5_37_6_38_7_39_12_44_13_45_u_u_u_u:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
; SKX-NEXT: retq
%c = shufflevector <32 x i16> %a, <32 x i16> %b, <32 x i32> <i32 4, i32 36, i32 5, i32 37, i32 6, i32 38, i32 7, i32 39, i32 12, i32 44, i32 13, i32 45, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -117,13 +117,13 @@ define <32 x i16> @shuffle_v16i32_4_36_5_37_6_38_7_39_12_44_13_45_u_u_u_u(<32 x
define <32 x i16> @shuffle_v32i16_1_z_3_z_5_z_7_z_9_z_11_z_13_z_15_z_17_z_19_z_21_z_23_z_25_z_27_z_29_z_31_z(<32 x i16> %a, <32 x i16> %b) {
; KNL-LABEL: shuffle_v32i16_1_z_3_z_5_z_7_z_9_z_11_z_13_z_15_z_17_z_19_z_21_z_23_z_25_z_27_z_29_z_31_z:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpsrld $16, %ymm0, %ymm0
; KNL-NEXT: vpsrld $16, %ymm1, %ymm1
; KNL-NEXT: retq
;
; SKX-LABEL: shuffle_v32i16_1_z_3_z_5_z_7_z_9_z_11_z_13_z_15_z_17_z_19_z_21_z_23_z_25_z_27_z_29_z_31_z:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsrld $16, %zmm0, %zmm0
; SKX-NEXT: retq
%c = shufflevector <32 x i16> %a, <32 x i16> zeroinitializer, <32 x i32> <i32 1, i32 34, i32 3, i32 34, i32 5, i32 34, i32 7, i32 34, i32 9, i32 34, i32 11, i32 34, i32 13, i32 34, i32 15, i32 34, i32 17, i32 34, i32 19, i32 34, i32 21, i32 34, i32 23, i32 34, i32 25, i32 34, i32 27, i32 34, i32 29, i32 34, i32 31, i32 34>
@@ -132,13 +132,13 @@ define <32 x i16> @shuffle_v32i16_1_z_3_z_5_z_7_z_9_z_11_z_13_z_15_z_17_z_19_z_2
define <32 x i16> @shuffle_v32i16_z_0_z_2_z_4_z_6_z_8_z_10_z_12_z_14_z_16_z_18_z_20_z_22_z_24_z_26_z_28_z_30(<32 x i16> %a, <32 x i16> %b) {
; KNL-LABEL: shuffle_v32i16_z_0_z_2_z_4_z_6_z_8_z_10_z_12_z_14_z_16_z_18_z_20_z_22_z_24_z_26_z_28_z_30:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpslld $16, %ymm0, %ymm0
; KNL-NEXT: vpslld $16, %ymm1, %ymm1
; KNL-NEXT: retq
;
; SKX-LABEL: shuffle_v32i16_z_0_z_2_z_4_z_6_z_8_z_10_z_12_z_14_z_16_z_18_z_20_z_22_z_24_z_26_z_28_z_30:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpslld $16, %zmm0, %zmm0
; SKX-NEXT: retq
%c = shufflevector <32 x i16> %a, <32 x i16> zeroinitializer, <32 x i32> <i32 34, i32 0, i32 34, i32 2, i32 34, i32 4, i32 34, i32 6, i32 34, i32 8, i32 34, i32 10, i32 34, i32 12, i32 34, i32 14, i32 34, i32 16, i32 34, i32 18, i32 34, i32 20, i32 34, i32 22, i32 34, i32 24, i32 34, i32 26, i32 34, i32 28, i32 34, i32 30>
@@ -147,13 +147,13 @@ define <32 x i16> @shuffle_v32i16_z_0_z_2_z_4_z_6_z_8_z_10_z_12_z_14_z_16_z_18_z
define <32 x i16> @shuffle_v32i16_1_1_0_0_4_5_6_7_9_9_8_8_12_13_14_15_17_17_16_16_20_21_22_23_25_25_24_24_28_29_30_31(<32 x i16> %a, <32 x i16> %b) {
; KNL-LABEL: shuffle_v32i16_1_1_0_0_4_5_6_7_9_9_8_8_12_13_14_15_17_17_16_16_20_21_22_23_25_25_24_24_28_29_30_31:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[1,1,0,0,4,5,6,7,9,9,8,8,12,13,14,15]
; KNL-NEXT: vpshuflw {{.*#+}} ymm1 = ymm1[1,1,0,0,4,5,6,7,9,9,8,8,12,13,14,15]
; KNL-NEXT: retq
;
; SKX-LABEL: shuffle_v32i16_1_1_0_0_4_5_6_7_9_9_8_8_12_13_14_15_17_17_16_16_20_21_22_23_25_25_24_24_28_29_30_31:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpshuflw {{.*#+}} zmm0 = zmm0[1,1,0,0,4,5,6,7,9,9,8,8,12,13,14,15,17,17,16,16,20,21,22,23,25,25,24,24,28,29,30,31]
; SKX-NEXT: retq
%c = shufflevector <32 x i16> %a, <32 x i16> zeroinitializer, <32 x i32> <i32 1, i32 1, i32 0, i32 0, i32 4, i32 5, i32 6, i32 7, i32 9, i32 9, i32 8, i32 8, i32 12, i32 13, i32 14, i32 15, i32 17, i32 17, i32 16, i32 16, i32 20, i32 21, i32 22, i32 23, i32 25, i32 25, i32 24, i32 24, i32 28, i32 29, i32 30, i32 31>
@@ -162,13 +162,13 @@ define <32 x i16> @shuffle_v32i16_1_1_0_0_4_5_6_7_9_9_8_8_12_13_14_15_17_17_16_1
define <32 x i16> @shuffle_v32i16_0_1_2_3_5_5_4_4_8_9_10_11_13_13_12_12_16_17_18_19_21_21_20_20_24_25_26_27_29_29_28_28(<32 x i16> %a, <32 x i16> %b) {
; KNL-LABEL: shuffle_v32i16_0_1_2_3_5_5_4_4_8_9_10_11_13_13_12_12_16_17_18_19_21_21_20_20_24_25_26_27_29_29_28_28:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,5,5,4,4,8,9,10,11,13,13,12,12]
; KNL-NEXT: vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,5,5,4,4,8,9,10,11,13,13,12,12]
; KNL-NEXT: retq
;
; SKX-LABEL: shuffle_v32i16_0_1_2_3_5_5_4_4_8_9_10_11_13_13_12_12_16_17_18_19_21_21_20_20_24_25_26_27_29_29_28_28:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpshufhw {{.*#+}} zmm0 = zmm0[0,1,2,3,5,5,4,4,8,9,10,11,13,13,12,12,16,17,18,19,21,21,20,20,24,25,26,27,29,29,28,28]
; SKX-NEXT: retq
%c = shufflevector <32 x i16> %a, <32 x i16> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 5, i32 5, i32 4, i32 4, i32 8, i32 9, i32 10, i32 11, i32 13, i32 13, i32 12, i32 12, i32 16, i32 17, i32 18, i32 19, i32 21, i32 21, i32 20, i32 20, i32 24, i32 25, i32 26, i32 27, i32 29, i32 29, i32 28, i32 28>
@@ -177,7 +177,7 @@ define <32 x i16> @shuffle_v32i16_0_1_2_3_5_5_4_4_8_9_10_11_13_13_12_12_16_17_18
define <32 x i16> @shuffle_v32i16_1_1_0_0_5_5_4_4_9_9_11_11_13_13_12_12_17_17_19_19_21_21_20_20_25_25_27_27_29_29_28_28(<32 x i16> %a, <32 x i16> %b) {
; KNL-LABEL: shuffle_v32i16_1_1_0_0_5_5_4_4_9_9_11_11_13_13_12_12_17_17_19_19_21_21_20_20_25_25_27_27_29_29_28_28:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[1,1,0,0,4,5,6,7,9,9,8,8,12,13,14,15]
; KNL-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,5,5,4,4,8,9,10,11,13,13,12,12]
; KNL-NEXT: vpshuflw {{.*#+}} ymm1 = ymm1[1,1,0,0,4,5,6,7,9,9,8,8,12,13,14,15]
@@ -185,7 +185,7 @@ define <32 x i16> @shuffle_v32i16_1_1_0_0_5_5_4_4_9_9_11_11_13_13_12_12_17_17_19
; KNL-NEXT: retq
;
; SKX-LABEL: shuffle_v32i16_1_1_0_0_5_5_4_4_9_9_11_11_13_13_12_12_17_17_19_19_21_21_20_20_25_25_27_27_29_29_28_28:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpshuflw {{.*#+}} zmm0 = zmm0[1,1,0,0,4,5,6,7,9,9,8,8,12,13,14,15,17,17,16,16,20,21,22,23,25,25,24,24,28,29,30,31]
; SKX-NEXT: vpshufhw {{.*#+}} zmm0 = zmm0[0,1,2,3,5,5,4,4,8,9,10,11,13,13,12,12,16,17,18,19,21,21,20,20,24,25,26,27,29,29,28,28]
; SKX-NEXT: retq
@@ -195,7 +195,7 @@ define <32 x i16> @shuffle_v32i16_1_1_0_0_5_5_4_4_9_9_11_11_13_13_12_12_17_17_19
define <32 x i16> @shuffle_v32i16_0zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz(<32 x i16> %a) {
; KNL-LABEL: shuffle_v32i16_0zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: movl $65535, %eax ## imm = 0xFFFF
; KNL-NEXT: vmovd %eax, %xmm1
; KNL-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -203,7 +203,7 @@ define <32 x i16> @shuffle_v32i16_0zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz(<32 x i16> %a
; KNL-NEXT: retq
;
; SKX-LABEL: shuffle_v32i16_0zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: movl $1, %eax
; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vmovdqu16 %zmm0, %zmm0 {%k1} {z}
@@ -214,13 +214,13 @@ define <32 x i16> @shuffle_v32i16_0zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz(<32 x i16> %a
define <32 x i16> @insert_dup_mem_v32i16_i32(i32* %ptr) {
; KNL-LABEL: insert_dup_mem_v32i16_i32:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpbroadcastw (%rdi), %ymm0
; KNL-NEXT: vmovdqa %ymm0, %ymm1
; KNL-NEXT: retq
;
; SKX-LABEL: insert_dup_mem_v32i16_i32:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: movl (%rdi), %eax
; SKX-NEXT: vpbroadcastw %eax, %zmm0
; SKX-NEXT: retq
@@ -233,7 +233,7 @@ define <32 x i16> @insert_dup_mem_v32i16_i32(i32* %ptr) {
define <32 x i16> @insert_dup_mem_v32i16_sext_i16(i16* %ptr) {
; KNL-LABEL: insert_dup_mem_v32i16_sext_i16:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: movswl (%rdi), %eax
; KNL-NEXT: vmovd %eax, %xmm0
; KNL-NEXT: vpbroadcastw %xmm0, %ymm0
@@ -241,7 +241,7 @@ define <32 x i16> @insert_dup_mem_v32i16_sext_i16(i16* %ptr) {
; KNL-NEXT: retq
;
; SKX-LABEL: insert_dup_mem_v32i16_sext_i16:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: movswl (%rdi), %eax
; SKX-NEXT: vpbroadcastw %eax, %zmm0
; SKX-NEXT: retq
@@ -255,13 +255,13 @@ define <32 x i16> @insert_dup_mem_v32i16_sext_i16(i16* %ptr) {
define <32 x i16> @insert_dup_elt1_mem_v32i16_i32(i32* %ptr) #0 {
; KNL-LABEL: insert_dup_elt1_mem_v32i16_i32:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpbroadcastw 2(%rdi), %ymm0
; KNL-NEXT: vmovdqa %ymm0, %ymm1
; KNL-NEXT: retq
;
; SKX-LABEL: insert_dup_elt1_mem_v32i16_i32:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: movzwl 2(%rdi), %eax
; SKX-NEXT: vpbroadcastw %eax, %zmm0
; SKX-NEXT: retq
@@ -274,13 +274,13 @@ define <32 x i16> @insert_dup_elt1_mem_v32i16_i32(i32* %ptr) #0 {
define <32 x i16> @insert_dup_elt3_mem_v32i16_i32(i32* %ptr) #0 {
; KNL-LABEL: insert_dup_elt3_mem_v32i16_i32:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpbroadcastw 2(%rdi), %ymm0
; KNL-NEXT: vmovdqa %ymm0, %ymm1
; KNL-NEXT: retq
;
; SKX-LABEL: insert_dup_elt3_mem_v32i16_i32:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: movzwl 2(%rdi), %eax
; SKX-NEXT: vpbroadcastw %eax, %zmm0
; SKX-NEXT: retq
@@ -293,7 +293,7 @@ define <32 x i16> @insert_dup_elt3_mem_v32i16_i32(i32* %ptr) #0 {
define <32 x i16> @shuffle_v32i16_32_zz_zz_zz_33_zz_zz_zz_34_zz_zz_zz_35_zz_zz_zz_36_zz_zz_zz_37_zz_zz_zz_38_zz_zz_zz_39_zz_zz_zz(<32 x i16> %a) {
; KNL-LABEL: shuffle_v32i16_32_zz_zz_zz_33_zz_zz_zz_34_zz_zz_zz_35_zz_zz_zz_36_zz_zz_zz_37_zz_zz_zz_38_zz_zz_zz_39_zz_zz_zz:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpmovzxwq {{.*#+}} ymm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; KNL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; KNL-NEXT: vpmovzxwq {{.*#+}} ymm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
@@ -301,7 +301,7 @@ define <32 x i16> @shuffle_v32i16_32_zz_zz_zz_33_zz_zz_zz_34_zz_zz_zz_35_zz_zz_z
; KNL-NEXT: retq
;
; SKX-LABEL: shuffle_v32i16_32_zz_zz_zz_33_zz_zz_zz_34_zz_zz_zz_35_zz_zz_zz_36_zz_zz_zz_37_zz_zz_zz_38_zz_zz_zz_39_zz_zz_zz:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpmovzxwq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; SKX-NEXT: retq
%shuffle = shufflevector <32 x i16> zeroinitializer, <32 x i16> %a, <32 x i32> <i32 32, i32 0, i32 0, i32 0, i32 33, i32 0, i32 0, i32 0, i32 34, i32 0, i32 0, i32 0, i32 35, i32 0, i32 0, i32 0, i32 36, i32 0, i32 0, i32 0, i32 37, i32 0, i32 0, i32 0, i32 38, i32 0, i32 0, i32 0, i32 39, i32 0, i32 0, i32 0>
@@ -310,7 +310,7 @@ define <32 x i16> @shuffle_v32i16_32_zz_zz_zz_33_zz_zz_zz_34_zz_zz_zz_35_zz_zz_z
define <32 x i16> @shuffle_v32i16_32_zz_33_zz_34_zz_35_zz_36_zz_37_zz_38_zz_39_zz_40_zz_41_zz_42_zz_43_zz_44_zz_45_zz_46_zz_47_zz(<32 x i16> %a) {
; KNL-LABEL: shuffle_v32i16_32_zz_33_zz_34_zz_35_zz_36_zz_37_zz_38_zz_39_zz_40_zz_41_zz_42_zz_43_zz_44_zz_45_zz_46_zz_47_zz:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; KNL-NEXT: vextracti128 $1, %ymm0, %xmm0
; KNL-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
@@ -318,7 +318,7 @@ define <32 x i16> @shuffle_v32i16_32_zz_33_zz_34_zz_35_zz_36_zz_37_zz_38_zz_39_z
; KNL-NEXT: retq
;
; SKX-LABEL: shuffle_v32i16_32_zz_33_zz_34_zz_35_zz_36_zz_37_zz_38_zz_39_zz_40_zz_41_zz_42_zz_43_zz_44_zz_45_zz_46_zz_47_zz:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; SKX-NEXT: retq
%shuffle = shufflevector <32 x i16> zeroinitializer, <32 x i16> %a, <32 x i32> <i32 32, i32 0, i32 33, i32 0, i32 34, i32 0, i32 35, i32 0, i32 36, i32 0, i32 37, i32 0, i32 38, i32 0, i32 39, i32 0, i32 40, i32 0, i32 41, i32 0, i32 42, i32 0, i32 43, i32 0, i32 44, i32 0, i32 45, i32 0, i32 46, i32 0, i32 47, i32 0>
@@ -327,7 +327,7 @@ define <32 x i16> @shuffle_v32i16_32_zz_33_zz_34_zz_35_zz_36_zz_37_zz_38_zz_39_z
define <8 x i16> @pr32967(<32 x i16> %v) {
; KNL-LABEL: pr32967:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vextracti128 $1, %ymm1, %xmm2
; KNL-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; KNL-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,1,1,3,4,5,6,7]
@@ -344,7 +344,7 @@ define <8 x i16> @pr32967(<32 x i16> %v) {
; KNL-NEXT: retq
;
; SKX-LABEL: pr32967:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; SKX-NEXT: vmovdqa {{.*#+}} ymm1 = <1,5,9,13,17,21,25,29,u,u,u,u,u,u,u,u>
; SKX-NEXT: vpermi2w %ymm2, %ymm0, %ymm1
diff --git a/test/CodeGen/X86/vector-shuffle-512-v64.ll b/test/CodeGen/X86/vector-shuffle-512-v64.ll
index 12bb1370c93..cff6892caee 100644
--- a/test/CodeGen/X86/vector-shuffle-512-v64.ll
+++ b/test/CodeGen/X86/vector-shuffle-512-v64.ll
@@ -6,7 +6,7 @@
define <64 x i8> @shuffle_v64i8_02_03_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u(<64 x i8> %a) {
; ALL-LABEL: shuffle_v64i8_02_03_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpsrld $16, %xmm0, %xmm0
; ALL-NEXT: retq
%b = shufflevector <64 x i8> %a, <64 x i8> undef, <64 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -15,24 +15,24 @@ define <64 x i8> @shuffle_v64i8_02_03_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_
define <64 x i8> @shuffle_v64i8_zz_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_zz_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30_zz_32_33_34_35_36_37_38_39_40_41_42_43_44_45_46_zz_48_49_50_51_52_53_54_55_56_57_58_59_60_61_62(<64 x i8> %a, <64 x i8> %b) {
; AVX512F-LABEL: shuffle_v64i8_zz_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_zz_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30_zz_32_33_34_35_36_37_38_39_40_41_42_43_44_45_46_zz_48_49_50_51_52_53_54_55_56_57_58_59_60_61_62:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpslldq {{.*#+}} ymm0 = zero,ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14],zero,ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]
; AVX512F-NEXT: vpslldq {{.*#+}} ymm1 = zero,ymm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14],zero,ymm1[16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v64i8_zz_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_zz_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30_zz_32_33_34_35_36_37_38_39_40_41_42_43_44_45_46_zz_48_49_50_51_52_53_54_55_56_57_58_59_60_61_62:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpslldq {{.*#+}} zmm0 = zero,zmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14],zero,zmm0[16,17,18,19,20,21,22,23,24,25,26,27,28,29,30],zero,zmm0[32,33,34,35,36,37,38,39,40,41,42,43,44,45,46],zero,zmm0[48,49,50,51,52,53,54,55,56,57,58,59,60,61,62]
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: shuffle_v64i8_zz_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_zz_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30_zz_32_33_34_35_36_37_38_39_40_41_42_43_44_45_46_zz_48_49_50_51_52_53_54_55_56_57_58_59_60_61_62:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpslldq {{.*#+}} ymm0 = zero,ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14],zero,ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]
; AVX512DQ-NEXT: vpslldq {{.*#+}} ymm1 = zero,ymm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14],zero,ymm1[16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]
; AVX512DQ-NEXT: retq
;
; AVX512VBMI-LABEL: shuffle_v64i8_zz_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_zz_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30_zz_32_33_34_35_36_37_38_39_40_41_42_43_44_45_46_zz_48_49_50_51_52_53_54_55_56_57_58_59_60_61_62:
-; AVX512VBMI: # BB#0:
+; AVX512VBMI: # %bb.0:
; AVX512VBMI-NEXT: vpslldq {{.*#+}} zmm0 = zero,zmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14],zero,zmm0[16,17,18,19,20,21,22,23,24,25,26,27,28,29,30],zero,zmm0[32,33,34,35,36,37,38,39,40,41,42,43,44,45,46],zero,zmm0[48,49,50,51,52,53,54,55,56,57,58,59,60,61,62]
; AVX512VBMI-NEXT: retq
%shuffle = shufflevector <64 x i8> %a, <64 x i8> zeroinitializer, <64 x i32> <i32 79, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 95, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 111, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 127, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62>
@@ -41,24 +41,24 @@ define <64 x i8> @shuffle_v64i8_zz_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_
define <64 x i8> @shuffle_v64i8_02_03_04_05_06_07_08_09_10_11_12_13_14_15_zz_zz_18_19_20_21_22_23_24_25_26_27_28_29_30_31_zz_zz_34_35_36_37_38_39_40_41_42_43_44_45_46_47_zz_zz_50_51_52_53_54_55_56_57_58_59_60_61_62_63_zz_zz(<64 x i8> %a, <64 x i8> %b) {
; AVX512F-LABEL: shuffle_v64i8_02_03_04_05_06_07_08_09_10_11_12_13_14_15_zz_zz_18_19_20_21_22_23_24_25_26_27_28_29_30_31_zz_zz_34_35_36_37_38_39_40_41_42_43_44_45_46_47_zz_zz_50_51_52_53_54_55_56_57_58_59_60_61_62_63_zz_zz:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,ymm0[18,19,20,21,22,23,24,25,26,27,28,29,30,31],zero,zero
; AVX512F-NEXT: vpsrldq {{.*#+}} ymm1 = ymm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,ymm1[18,19,20,21,22,23,24,25,26,27,28,29,30,31],zero,zero
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v64i8_02_03_04_05_06_07_08_09_10_11_12_13_14_15_zz_zz_18_19_20_21_22_23_24_25_26_27_28_29_30_31_zz_zz_34_35_36_37_38_39_40_41_42_43_44_45_46_47_zz_zz_50_51_52_53_54_55_56_57_58_59_60_61_62_63_zz_zz:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsrldq {{.*#+}} zmm0 = zmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zmm0[18,19,20,21,22,23,24,25,26,27,28,29,30,31],zero,zero,zmm0[34,35,36,37,38,39,40,41,42,43,44,45,46,47],zero,zero,zmm0[50,51,52,53,54,55,56,57,58,59,60,61,62,63],zero,zero
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: shuffle_v64i8_02_03_04_05_06_07_08_09_10_11_12_13_14_15_zz_zz_18_19_20_21_22_23_24_25_26_27_28_29_30_31_zz_zz_34_35_36_37_38_39_40_41_42_43_44_45_46_47_zz_zz_50_51_52_53_54_55_56_57_58_59_60_61_62_63_zz_zz:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,ymm0[18,19,20,21,22,23,24,25,26,27,28,29,30,31],zero,zero
; AVX512DQ-NEXT: vpsrldq {{.*#+}} ymm1 = ymm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,ymm1[18,19,20,21,22,23,24,25,26,27,28,29,30,31],zero,zero
; AVX512DQ-NEXT: retq
;
; AVX512VBMI-LABEL: shuffle_v64i8_02_03_04_05_06_07_08_09_10_11_12_13_14_15_zz_zz_18_19_20_21_22_23_24_25_26_27_28_29_30_31_zz_zz_34_35_36_37_38_39_40_41_42_43_44_45_46_47_zz_zz_50_51_52_53_54_55_56_57_58_59_60_61_62_63_zz_zz:
-; AVX512VBMI: # BB#0:
+; AVX512VBMI: # %bb.0:
; AVX512VBMI-NEXT: vpsrldq {{.*#+}} zmm0 = zmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zmm0[18,19,20,21,22,23,24,25,26,27,28,29,30,31],zero,zero,zmm0[34,35,36,37,38,39,40,41,42,43,44,45,46,47],zero,zero,zmm0[50,51,52,53,54,55,56,57,58,59,60,61,62,63],zero,zero
; AVX512VBMI-NEXT: retq
%shuffle = shufflevector <64 x i8> %a, <64 x i8> zeroinitializer, <64 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 64, i32 64, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 64, i32 64, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 64, i32 64, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 64>
@@ -67,24 +67,24 @@ define <64 x i8> @shuffle_v64i8_02_03_04_05_06_07_08_09_10_11_12_13_14_15_zz_zz_
define <64 x i8> @shuffle_v64i8_79_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_95_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30_111_32_33_34_35_36_37_38_39_40_41_42_43_44_45_46_127_48_49_50_51_52_53_54_55_56_57_58_59_60_61_62(<64 x i8> %a, <64 x i8> %b) {
; AVX512F-LABEL: shuffle_v64i8_79_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_95_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30_111_32_33_34_35_36_37_38_39_40_41_42_43_44_45_46_127_48_49_50_51_52_53_54_55_56_57_58_59_60_61_62:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpalignr {{.*#+}} ymm0 = ymm2[15],ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14],ymm2[31],ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]
; AVX512F-NEXT: vpalignr {{.*#+}} ymm1 = ymm3[15],ymm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14],ymm3[31],ymm1[16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v64i8_79_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_95_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30_111_32_33_34_35_36_37_38_39_40_41_42_43_44_45_46_127_48_49_50_51_52_53_54_55_56_57_58_59_60_61_62:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpalignr {{.*#+}} zmm0 = zmm1[15],zmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14],zmm1[31],zmm0[16,17,18,19,20,21,22,23,24,25,26,27,28,29,30],zmm1[47],zmm0[32,33,34,35,36,37,38,39,40,41,42,43,44,45,46],zmm1[63],zmm0[48,49,50,51,52,53,54,55,56,57,58,59,60,61,62]
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: shuffle_v64i8_79_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_95_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30_111_32_33_34_35_36_37_38_39_40_41_42_43_44_45_46_127_48_49_50_51_52_53_54_55_56_57_58_59_60_61_62:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpalignr {{.*#+}} ymm0 = ymm2[15],ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14],ymm2[31],ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]
; AVX512DQ-NEXT: vpalignr {{.*#+}} ymm1 = ymm3[15],ymm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14],ymm3[31],ymm1[16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]
; AVX512DQ-NEXT: retq
;
; AVX512VBMI-LABEL: shuffle_v64i8_79_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_95_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30_111_32_33_34_35_36_37_38_39_40_41_42_43_44_45_46_127_48_49_50_51_52_53_54_55_56_57_58_59_60_61_62:
-; AVX512VBMI: # BB#0:
+; AVX512VBMI: # %bb.0:
; AVX512VBMI-NEXT: vpalignr {{.*#+}} zmm0 = zmm1[15],zmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14],zmm1[31],zmm0[16,17,18,19,20,21,22,23,24,25,26,27,28,29,30],zmm1[47],zmm0[32,33,34,35,36,37,38,39,40,41,42,43,44,45,46],zmm1[63],zmm0[48,49,50,51,52,53,54,55,56,57,58,59,60,61,62]
; AVX512VBMI-NEXT: retq
%shuffle = shufflevector <64 x i8> %a, <64 x i8> %b, <64 x i32> <i32 79, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 95, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 111, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 127, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62>
@@ -94,7 +94,7 @@ define <64 x i8> @shuffle_v64i8_79_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_
define <64 x i8> @shuffle_v64i8_0zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz(<64 x i8> %a) {
; AVX512F-LABEL: shuffle_v64i8_0zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: movl $255, %eax
; AVX512F-NEXT: vmovd %eax, %xmm1
; AVX512F-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -102,12 +102,12 @@ define <64 x i8> @shuffle_v64i8_0zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz(<64 x i8> %a) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v64i8_0zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[0],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: shuffle_v64i8_0zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: movl $255, %eax
; AVX512DQ-NEXT: vmovd %eax, %xmm1
; AVX512DQ-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -115,7 +115,7 @@ define <64 x i8> @shuffle_v64i8_0zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz(<64 x i8> %a) {
; AVX512DQ-NEXT: retq
;
; AVX512VBMI-LABEL: shuffle_v64i8_0zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz:
-; AVX512VBMI: # BB#0:
+; AVX512VBMI: # %bb.0:
; AVX512VBMI-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[0],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX512VBMI-NEXT: retq
%shuffle = shufflevector <64 x i8> %a, <64 x i8> zeroinitializer, <64 x i32> <i32 0, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64>
@@ -124,24 +124,24 @@ define <64 x i8> @shuffle_v64i8_0zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz(<64 x i8> %a) {
define <64 x i8> @shuffle_v64i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00(<64 x i8> %a, <64 x i8> %b) {
; AVX512F-LABEL: shuffle_v64i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpbroadcastb %xmm0, %ymm0
; AVX512F-NEXT: vmovdqa %ymm0, %ymm1
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v64i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpbroadcastb %xmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: shuffle_v64i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpbroadcastb %xmm0, %ymm0
; AVX512DQ-NEXT: vmovdqa %ymm0, %ymm1
; AVX512DQ-NEXT: retq
;
; AVX512VBMI-LABEL: shuffle_v64i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
-; AVX512VBMI: # BB#0:
+; AVX512VBMI: # %bb.0:
; AVX512VBMI-NEXT: vpbroadcastb %xmm0, %zmm0
; AVX512VBMI-NEXT: retq
%shuffle = shufflevector <64 x i8> %a, <64 x i8> %b, <64 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
@@ -150,7 +150,7 @@ define <64 x i8> @shuffle_v64i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
define <64 x i8> @shuffle_v64i8_63_62_61_60_59_58_57_56_55_54_53_52_51_50_49_48_47_46_45_44_43_42_41_40_39_38_37_36_35_34_33_32_31_30_29_28_27_26_25_24_23_22_21_20_19_18_17_16_15_14_13_12_11_10_09_08_07_06_05_04_03_02_01_00(<64 x i8> %a) {
; AVX512F-LABEL: shuffle_v64i8_63_62_61_60_59_58_57_56_55_54_53_52_51_50_49_48_47_46_45_44_43_42_41_40_39_38_37_36_35_34_33_32_31_30_29_28_27_26_25_24_23_22_21_20_19_18_17_16_15_14_13_12_11_10_09_08_07_06_05_04_03_02_01_00:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
; AVX512F-NEXT: vpshufb %ymm3, %ymm1, %ymm1
; AVX512F-NEXT: vpermq {{.*#+}} ymm2 = ymm1[2,3,0,1]
@@ -160,13 +160,13 @@ define <64 x i8> @shuffle_v64i8_63_62_61_60_59_58_57_56_55_54_53_52_51_50_49_48_
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v64i8_63_62_61_60_59_58_57_56_55_54_53_52_51_50_49_48_47_46_45_44_43_42_41_40_39_38_37_36_35_34_33_32_31_30_29_28_27_26_25_24_23_22_21_20_19_18_17_16_15_14_13_12_11_10_09_08_07_06_05_04_03_02_01_00:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,47,46,45,44,43,42,41,40,39,38,37,36,35,34,33,32,63,62,61,60,59,58,57,56,55,54,53,52,51,50,49,48]
; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[6,7,4,5,2,3,0,1]
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: shuffle_v64i8_63_62_61_60_59_58_57_56_55_54_53_52_51_50_49_48_47_46_45_44_43_42_41_40_39_38_37_36_35_34_33_32_31_30_29_28_27_26_25_24_23_22_21_20_19_18_17_16_15_14_13_12_11_10_09_08_07_06_05_04_03_02_01_00:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm3 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
; AVX512DQ-NEXT: vpshufb %ymm3, %ymm1, %ymm1
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm2 = ymm1[2,3,0,1]
@@ -176,7 +176,7 @@ define <64 x i8> @shuffle_v64i8_63_62_61_60_59_58_57_56_55_54_53_52_51_50_49_48_
; AVX512DQ-NEXT: retq
;
; AVX512VBMI-LABEL: shuffle_v64i8_63_62_61_60_59_58_57_56_55_54_53_52_51_50_49_48_47_46_45_44_43_42_41_40_39_38_37_36_35_34_33_32_31_30_29_28_27_26_25_24_23_22_21_20_19_18_17_16_15_14_13_12_11_10_09_08_07_06_05_04_03_02_01_00:
-; AVX512VBMI: # BB#0:
+; AVX512VBMI: # %bb.0:
; AVX512VBMI-NEXT: vmovdqa64 {{.*#+}} zmm1 = [63,62,61,60,59,58,57,56,55,54,53,52,51,50,49,48,47,46,45,44,43,42,41,40,39,38,37,36,35,34,33,32,31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
; AVX512VBMI-NEXT: vpermb %zmm0, %zmm1, %zmm0
; AVX512VBMI-NEXT: retq
@@ -186,24 +186,24 @@ define <64 x i8> @shuffle_v64i8_63_62_61_60_59_58_57_56_55_54_53_52_51_50_49_48_
define <64 x i8> @insert_dup_mem_v64i8_i32(i32* %ptr) {
; AVX512F-LABEL: insert_dup_mem_v64i8_i32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpbroadcastb (%rdi), %ymm0
; AVX512F-NEXT: vmovdqa %ymm0, %ymm1
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: insert_dup_mem_v64i8_i32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpbroadcastb (%rdi), %zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: insert_dup_mem_v64i8_i32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpbroadcastb (%rdi), %ymm0
; AVX512DQ-NEXT: vmovdqa %ymm0, %ymm1
; AVX512DQ-NEXT: retq
;
; AVX512VBMI-LABEL: insert_dup_mem_v64i8_i32:
-; AVX512VBMI: # BB#0:
+; AVX512VBMI: # %bb.0:
; AVX512VBMI-NEXT: vpbroadcastb (%rdi), %zmm0
; AVX512VBMI-NEXT: retq
%tmp = load i32, i32* %ptr, align 4
@@ -215,24 +215,24 @@ define <64 x i8> @insert_dup_mem_v64i8_i32(i32* %ptr) {
define <64 x i8> @insert_dup_mem_v64i8_sext_i8(i8* %ptr) {
; AVX512F-LABEL: insert_dup_mem_v64i8_sext_i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpbroadcastb (%rdi), %ymm0
; AVX512F-NEXT: vmovdqa %ymm0, %ymm1
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: insert_dup_mem_v64i8_sext_i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpbroadcastb (%rdi), %zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: insert_dup_mem_v64i8_sext_i8:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpbroadcastb (%rdi), %ymm0
; AVX512DQ-NEXT: vmovdqa %ymm0, %ymm1
; AVX512DQ-NEXT: retq
;
; AVX512VBMI-LABEL: insert_dup_mem_v64i8_sext_i8:
-; AVX512VBMI: # BB#0:
+; AVX512VBMI: # %bb.0:
; AVX512VBMI-NEXT: vpbroadcastb (%rdi), %zmm0
; AVX512VBMI-NEXT: retq
%tmp = load i8, i8* %ptr, align 1
@@ -245,24 +245,24 @@ define <64 x i8> @insert_dup_mem_v64i8_sext_i8(i8* %ptr) {
define <64 x i8> @insert_dup_elt1_mem_v64i8_i32(i32* %ptr) {
; AVX512F-LABEL: insert_dup_elt1_mem_v64i8_i32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpbroadcastb 1(%rdi), %ymm0
; AVX512F-NEXT: vmovdqa %ymm0, %ymm1
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: insert_dup_elt1_mem_v64i8_i32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpbroadcastb 1(%rdi), %zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: insert_dup_elt1_mem_v64i8_i32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpbroadcastb 1(%rdi), %ymm0
; AVX512DQ-NEXT: vmovdqa %ymm0, %ymm1
; AVX512DQ-NEXT: retq
;
; AVX512VBMI-LABEL: insert_dup_elt1_mem_v64i8_i32:
-; AVX512VBMI: # BB#0:
+; AVX512VBMI: # %bb.0:
; AVX512VBMI-NEXT: vpbroadcastb 1(%rdi), %zmm0
; AVX512VBMI-NEXT: retq
%tmp = load i32, i32* %ptr, align 4
@@ -274,24 +274,24 @@ define <64 x i8> @insert_dup_elt1_mem_v64i8_i32(i32* %ptr) {
define <64 x i8> @insert_dup_elt3_mem_v64i8_i32(i32* %ptr) {
; AVX512F-LABEL: insert_dup_elt3_mem_v64i8_i32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpbroadcastb 3(%rdi), %ymm0
; AVX512F-NEXT: vmovdqa %ymm0, %ymm1
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: insert_dup_elt3_mem_v64i8_i32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpbroadcastb 3(%rdi), %zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: insert_dup_elt3_mem_v64i8_i32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpbroadcastb 3(%rdi), %ymm0
; AVX512DQ-NEXT: vmovdqa %ymm0, %ymm1
; AVX512DQ-NEXT: retq
;
; AVX512VBMI-LABEL: insert_dup_elt3_mem_v64i8_i32:
-; AVX512VBMI: # BB#0:
+; AVX512VBMI: # %bb.0:
; AVX512VBMI-NEXT: vpbroadcastb 3(%rdi), %zmm0
; AVX512VBMI-NEXT: retq
%tmp = load i32, i32* %ptr, align 4
@@ -303,7 +303,7 @@ define <64 x i8> @insert_dup_elt3_mem_v64i8_i32(i32* %ptr) {
define <64 x i8> @insert_dup_elt1_mem_v64i8_sext_i8(i8* %ptr) {
; AVX512F-LABEL: insert_dup_elt1_mem_v64i8_sext_i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: movsbl (%rdi), %eax
; AVX512F-NEXT: shrl $8, %eax
; AVX512F-NEXT: vmovd %eax, %xmm0
@@ -312,14 +312,14 @@ define <64 x i8> @insert_dup_elt1_mem_v64i8_sext_i8(i8* %ptr) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: insert_dup_elt1_mem_v64i8_sext_i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: movsbl (%rdi), %eax
; AVX512BW-NEXT: shrl $8, %eax
; AVX512BW-NEXT: vpbroadcastb %eax, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: insert_dup_elt1_mem_v64i8_sext_i8:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: movsbl (%rdi), %eax
; AVX512DQ-NEXT: shrl $8, %eax
; AVX512DQ-NEXT: vmovd %eax, %xmm0
@@ -328,7 +328,7 @@ define <64 x i8> @insert_dup_elt1_mem_v64i8_sext_i8(i8* %ptr) {
; AVX512DQ-NEXT: retq
;
; AVX512VBMI-LABEL: insert_dup_elt1_mem_v64i8_sext_i8:
-; AVX512VBMI: # BB#0:
+; AVX512VBMI: # %bb.0:
; AVX512VBMI-NEXT: movsbl (%rdi), %eax
; AVX512VBMI-NEXT: shrl $8, %eax
; AVX512VBMI-NEXT: vpbroadcastb %eax, %zmm0
@@ -343,7 +343,7 @@ define <64 x i8> @insert_dup_elt1_mem_v64i8_sext_i8(i8* %ptr) {
define <64 x i8> @shuffle_v64i8_64_zz_zz_zz_zz_zz_zz_zz_65_zz_zz_zz_zz_zz_zz_zz_66_zz_zz_zz_zz_zz_zz_zz_67_zz_zz_zz_zz_zz_zz_zz_68_zz_zz_zz_zz_zz_zz_zz_69_zz_zz_zz_zz_zz_zz_zz_70_zz_zz_zz_zz_zz_zz_zz_71_zz_zz_zz_zz_zz_zz_zz(<64 x i8> %a) {
; AVX512F-LABEL: shuffle_v64i8_64_zz_zz_zz_zz_zz_zz_zz_65_zz_zz_zz_zz_zz_zz_zz_66_zz_zz_zz_zz_zz_zz_zz_67_zz_zz_zz_zz_zz_zz_zz_68_zz_zz_zz_zz_zz_zz_zz_69_zz_zz_zz_zz_zz_zz_zz_70_zz_zz_zz_zz_zz_zz_zz_71_zz_zz_zz_zz_zz_zz_zz:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpmovzxbq {{.*#+}} ymm2 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
; AVX512F-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; AVX512F-NEXT: vpmovzxbq {{.*#+}} ymm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
@@ -351,12 +351,12 @@ define <64 x i8> @shuffle_v64i8_64_zz_zz_zz_zz_zz_zz_zz_65_zz_zz_zz_zz_zz_zz_zz_
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v64i8_64_zz_zz_zz_zz_zz_zz_zz_65_zz_zz_zz_zz_zz_zz_zz_66_zz_zz_zz_zz_zz_zz_zz_67_zz_zz_zz_zz_zz_zz_zz_68_zz_zz_zz_zz_zz_zz_zz_69_zz_zz_zz_zz_zz_zz_zz_70_zz_zz_zz_zz_zz_zz_zz_71_zz_zz_zz_zz_zz_zz_zz:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovzxbq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero,xmm0[4],zero,zero,zero,zero,zero,zero,zero,xmm0[5],zero,zero,zero,zero,zero,zero,zero,xmm0[6],zero,zero,zero,zero,zero,zero,zero,xmm0[7],zero,zero,zero,zero,zero,zero,zero
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: shuffle_v64i8_64_zz_zz_zz_zz_zz_zz_zz_65_zz_zz_zz_zz_zz_zz_zz_66_zz_zz_zz_zz_zz_zz_zz_67_zz_zz_zz_zz_zz_zz_zz_68_zz_zz_zz_zz_zz_zz_zz_69_zz_zz_zz_zz_zz_zz_zz_70_zz_zz_zz_zz_zz_zz_zz_71_zz_zz_zz_zz_zz_zz_zz:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpmovzxbq {{.*#+}} ymm2 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; AVX512DQ-NEXT: vpmovzxbq {{.*#+}} ymm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
@@ -364,7 +364,7 @@ define <64 x i8> @shuffle_v64i8_64_zz_zz_zz_zz_zz_zz_zz_65_zz_zz_zz_zz_zz_zz_zz_
; AVX512DQ-NEXT: retq
;
; AVX512VBMI-LABEL: shuffle_v64i8_64_zz_zz_zz_zz_zz_zz_zz_65_zz_zz_zz_zz_zz_zz_zz_66_zz_zz_zz_zz_zz_zz_zz_67_zz_zz_zz_zz_zz_zz_zz_68_zz_zz_zz_zz_zz_zz_zz_69_zz_zz_zz_zz_zz_zz_zz_70_zz_zz_zz_zz_zz_zz_zz_71_zz_zz_zz_zz_zz_zz_zz:
-; AVX512VBMI: # BB#0:
+; AVX512VBMI: # %bb.0:
; AVX512VBMI-NEXT: vpmovzxbq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero,xmm0[4],zero,zero,zero,zero,zero,zero,zero,xmm0[5],zero,zero,zero,zero,zero,zero,zero,xmm0[6],zero,zero,zero,zero,zero,zero,zero,xmm0[7],zero,zero,zero,zero,zero,zero,zero
; AVX512VBMI-NEXT: retq
%shuffle = shufflevector <64 x i8> zeroinitializer, <64 x i8> %a, <64 x i32> <i32 64, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 65, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 66, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 67, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 68, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 69, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 70, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 71, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
@@ -373,7 +373,7 @@ define <64 x i8> @shuffle_v64i8_64_zz_zz_zz_zz_zz_zz_zz_65_zz_zz_zz_zz_zz_zz_zz_
define <64 x i8> @shuffle_v64i8_64_zz_zz_zz_65_zz_zz_zz_66_zz_zz_zz_67_zz_zz_zz_68_zz_zz_zz_69_zz_zz_zz_70_zz_zz_zz_71_zz_zz_zz_72_zz_zz_zz_73_zz_zz_zz_74_zz_zz_zz_75_zz_zz_zz_76_zz_zz_zz_77_zz_zz_zz_78_zz_zz_zz_79_zz_zz_zz(<64 x i8> %a) {
; AVX512F-LABEL: shuffle_v64i8_64_zz_zz_zz_65_zz_zz_zz_66_zz_zz_zz_67_zz_zz_zz_68_zz_zz_zz_69_zz_zz_zz_70_zz_zz_zz_71_zz_zz_zz_72_zz_zz_zz_73_zz_zz_zz_74_zz_zz_zz_75_zz_zz_zz_76_zz_zz_zz_77_zz_zz_zz_78_zz_zz_zz_79_zz_zz_zz:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpmovzxbd {{.*#+}} ymm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX512F-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX512F-NEXT: vpmovzxbd {{.*#+}} ymm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
@@ -381,12 +381,12 @@ define <64 x i8> @shuffle_v64i8_64_zz_zz_zz_65_zz_zz_zz_66_zz_zz_zz_67_zz_zz_zz_
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v64i8_64_zz_zz_zz_65_zz_zz_zz_66_zz_zz_zz_67_zz_zz_zz_68_zz_zz_zz_69_zz_zz_zz_70_zz_zz_zz_71_zz_zz_zz_72_zz_zz_zz_73_zz_zz_zz_74_zz_zz_zz_75_zz_zz_zz_76_zz_zz_zz_77_zz_zz_zz_78_zz_zz_zz_79_zz_zz_zz:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: shuffle_v64i8_64_zz_zz_zz_65_zz_zz_zz_66_zz_zz_zz_67_zz_zz_zz_68_zz_zz_zz_69_zz_zz_zz_70_zz_zz_zz_71_zz_zz_zz_72_zz_zz_zz_73_zz_zz_zz_74_zz_zz_zz_75_zz_zz_zz_76_zz_zz_zz_77_zz_zz_zz_78_zz_zz_zz_79_zz_zz_zz:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpmovzxbd {{.*#+}} ymm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX512DQ-NEXT: vpmovzxbd {{.*#+}} ymm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
@@ -394,7 +394,7 @@ define <64 x i8> @shuffle_v64i8_64_zz_zz_zz_65_zz_zz_zz_66_zz_zz_zz_67_zz_zz_zz_
; AVX512DQ-NEXT: retq
;
; AVX512VBMI-LABEL: shuffle_v64i8_64_zz_zz_zz_65_zz_zz_zz_66_zz_zz_zz_67_zz_zz_zz_68_zz_zz_zz_69_zz_zz_zz_70_zz_zz_zz_71_zz_zz_zz_72_zz_zz_zz_73_zz_zz_zz_74_zz_zz_zz_75_zz_zz_zz_76_zz_zz_zz_77_zz_zz_zz_78_zz_zz_zz_79_zz_zz_zz:
-; AVX512VBMI: # BB#0:
+; AVX512VBMI: # %bb.0:
; AVX512VBMI-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512VBMI-NEXT: retq
%shuffle = shufflevector <64 x i8> zeroinitializer, <64 x i8> %a, <64 x i32> <i32 64, i32 0, i32 0, i32 0, i32 65, i32 0, i32 0, i32 0, i32 66, i32 0, i32 0, i32 0, i32 67, i32 0, i32 0, i32 0, i32 68, i32 0, i32 0, i32 0, i32 69, i32 0, i32 0, i32 0, i32 70, i32 0, i32 0, i32 0, i32 71, i32 0, i32 0, i32 0, i32 72, i32 0, i32 0, i32 0, i32 73, i32 0, i32 0, i32 0, i32 74, i32 0, i32 0, i32 0, i32 75, i32 0, i32 0, i32 0, i32 76, i32 0, i32 0, i32 0, i32 77, i32 0, i32 0, i32 0, i32 78, i32 0, i32 0, i32 0, i32 79, i32 0, i32 0, i32 0>
@@ -403,7 +403,7 @@ define <64 x i8> @shuffle_v64i8_64_zz_zz_zz_65_zz_zz_zz_66_zz_zz_zz_67_zz_zz_zz_
define <64 x i8> @shuffle_v64i8_64_zz_65_zz_66_zz_67_zz_68_zz_69_zz_70_zz_71_zz_72_zz_73_zz_74_zz_75_zz_76_zz_77_zz_78_zz_79_zz_80_zz_81_zz_82_zz_83_zz_84_zz_85_zz_86_zz_87_zz_88_zz_89_zz_90_zz_91_zz_92_zz_93_zz_94_zz_95_zz(<64 x i8> %a) {
; AVX512F-LABEL: shuffle_v64i8_64_zz_65_zz_66_zz_67_zz_68_zz_69_zz_70_zz_71_zz_72_zz_73_zz_74_zz_75_zz_76_zz_77_zz_78_zz_79_zz_80_zz_81_zz_82_zz_83_zz_84_zz_85_zz_86_zz_87_zz_88_zz_89_zz_90_zz_91_zz_92_zz_93_zz_94_zz_95_zz:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
@@ -411,12 +411,12 @@ define <64 x i8> @shuffle_v64i8_64_zz_65_zz_66_zz_67_zz_68_zz_69_zz_70_zz_71_zz_
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v64i8_64_zz_65_zz_66_zz_67_zz_68_zz_69_zz_70_zz_71_zz_72_zz_73_zz_74_zz_75_zz_76_zz_77_zz_78_zz_79_zz_80_zz_81_zz_82_zz_83_zz_84_zz_85_zz_86_zz_87_zz_88_zz_89_zz_90_zz_91_zz_92_zz_93_zz_94_zz_95_zz:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: shuffle_v64i8_64_zz_65_zz_66_zz_67_zz_68_zz_69_zz_70_zz_71_zz_72_zz_73_zz_74_zz_75_zz_76_zz_77_zz_78_zz_79_zz_80_zz_81_zz_82_zz_83_zz_84_zz_85_zz_86_zz_87_zz_88_zz_89_zz_90_zz_91_zz_92_zz_93_zz_94_zz_95_zz:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX512DQ-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
@@ -424,7 +424,7 @@ define <64 x i8> @shuffle_v64i8_64_zz_65_zz_66_zz_67_zz_68_zz_69_zz_70_zz_71_zz_
; AVX512DQ-NEXT: retq
;
; AVX512VBMI-LABEL: shuffle_v64i8_64_zz_65_zz_66_zz_67_zz_68_zz_69_zz_70_zz_71_zz_72_zz_73_zz_74_zz_75_zz_76_zz_77_zz_78_zz_79_zz_80_zz_81_zz_82_zz_83_zz_84_zz_85_zz_86_zz_87_zz_88_zz_89_zz_90_zz_91_zz_92_zz_93_zz_94_zz_95_zz:
-; AVX512VBMI: # BB#0:
+; AVX512VBMI: # %bb.0:
; AVX512VBMI-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
; AVX512VBMI-NEXT: retq
%shuffle = shufflevector <64 x i8> zeroinitializer, <64 x i8> %a, <64 x i32> <i32 64, i32 0, i32 65, i32 0, i32 66, i32 0, i32 67, i32 0, i32 68, i32 0, i32 69, i32 0, i32 70, i32 0, i32 71, i32 0, i32 72, i32 0, i32 73, i32 0, i32 74, i32 0, i32 75, i32 0, i32 76, i32 0, i32 77, i32 0, i32 78, i32 0, i32 79, i32 0, i32 80, i32 0, i32 81, i32 0, i32 82, i32 0, i32 83, i32 0, i32 84, i32 0, i32 85, i32 0, i32 86, i32 0, i32 87, i32 0, i32 88, i32 0, i32 89, i32 0, i32 90, i32 0, i32 91, i32 0, i32 92, i32 0, i32 93, i32 0, i32 94, i32 0, i32 95, i32 0>
@@ -433,7 +433,7 @@ define <64 x i8> @shuffle_v64i8_64_zz_65_zz_66_zz_67_zz_68_zz_69_zz_70_zz_71_zz_
define <64 x i8> @shuffle_v64i8_63_zz_61_zz_59_zz_57_zz_55_zz_53_zz_51_zz_49_zz_47_zz_45_zz_43_zz_41_zz_39_zz_37_zz_35_zz_33_zz_31_zz_29_zz_27_zz_25_zz_23_zz_21_zz_19_zz_17_zz_15_zz_13_zz_11_zz_9_zz_7_zz_5_zz_3_zz_1_zz(<64 x i8> %a) {
; AVX512F-LABEL: shuffle_v64i8_63_zz_61_zz_59_zz_57_zz_55_zz_53_zz_51_zz_49_zz_47_zz_45_zz_43_zz_41_zz_39_zz_37_zz_35_zz_33_zz_31_zz_29_zz_27_zz_25_zz_23_zz_21_zz_19_zz_17_zz_15_zz_13_zz_11_zz_9_zz_7_zz_5_zz_3_zz_1_zz:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = <15,u,13,u,11,u,9,u,7,u,5,u,3,u,1,u,15,u,13,u,11,u,9,u,7,u,5,u,3,u,1,u>
; AVX512F-NEXT: vpshufb %ymm3, %ymm1, %ymm1
; AVX512F-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,0,1]
@@ -446,7 +446,7 @@ define <64 x i8> @shuffle_v64i8_63_zz_61_zz_59_zz_57_zz_55_zz_53_zz_51_zz_49_zz_
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v64i8_63_zz_61_zz_59_zz_57_zz_55_zz_53_zz_51_zz_49_zz_47_zz_45_zz_43_zz_41_zz_39_zz_37_zz_35_zz_33_zz_31_zz_29_zz_27_zz_25_zz_23_zz_21_zz_19_zz_17_zz_15_zz_13_zz_11_zz_9_zz_7_zz_5_zz_3_zz_1_zz:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = <15,u,13,u,11,u,9,u,7,u,5,u,3,u,1,u,15,u,13,u,11,u,9,u,7,u,5,u,3,u,1,u>
; AVX512BW-NEXT: vpshufb %ymm1, %ymm0, %ymm2
; AVX512BW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,0,1]
@@ -460,7 +460,7 @@ define <64 x i8> @shuffle_v64i8_63_zz_61_zz_59_zz_57_zz_55_zz_53_zz_51_zz_49_zz_
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: shuffle_v64i8_63_zz_61_zz_59_zz_57_zz_55_zz_53_zz_51_zz_49_zz_47_zz_45_zz_43_zz_41_zz_39_zz_37_zz_35_zz_33_zz_31_zz_29_zz_27_zz_25_zz_23_zz_21_zz_19_zz_17_zz_15_zz_13_zz_11_zz_9_zz_7_zz_5_zz_3_zz_1_zz:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm3 = <15,u,13,u,11,u,9,u,7,u,5,u,3,u,1,u,15,u,13,u,11,u,9,u,7,u,5,u,3,u,1,u>
; AVX512DQ-NEXT: vpshufb %ymm3, %ymm1, %ymm1
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,0,1]
@@ -473,7 +473,7 @@ define <64 x i8> @shuffle_v64i8_63_zz_61_zz_59_zz_57_zz_55_zz_53_zz_51_zz_49_zz_
; AVX512DQ-NEXT: retq
;
; AVX512VBMI-LABEL: shuffle_v64i8_63_zz_61_zz_59_zz_57_zz_55_zz_53_zz_51_zz_49_zz_47_zz_45_zz_43_zz_41_zz_39_zz_37_zz_35_zz_33_zz_31_zz_29_zz_27_zz_25_zz_23_zz_21_zz_19_zz_17_zz_15_zz_13_zz_11_zz_9_zz_7_zz_5_zz_3_zz_1_zz:
-; AVX512VBMI: # BB#0:
+; AVX512VBMI: # %bb.0:
; AVX512VBMI-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VBMI-NEXT: vmovdqa64 {{.*#+}} zmm2 = [63,65,61,67,59,69,57,71,55,73,53,75,51,77,49,79,47,81,45,83,43,85,41,87,39,89,37,91,35,93,33,95,31,97,29,99,27,101,25,103,23,105,21,107,19,109,17,111,15,113,13,115,11,117,9,119,7,121,5,123,3,125,1,127]
; AVX512VBMI-NEXT: vpermt2b %zmm1, %zmm2, %zmm0
@@ -484,7 +484,7 @@ define <64 x i8> @shuffle_v64i8_63_zz_61_zz_59_zz_57_zz_55_zz_53_zz_51_zz_49_zz_
define <64 x i8> @shuffle_v64i8_63_64_61_66_59_68_57_70_55_72_53_74_51_76_49_78_47_80_45_82_43_84_41_86_39_88_37_90_35_92_33_94_31_96_29_98_27_100_25_102_23_104_21_106_19_108_17_110_15_112_13_114_11_116_9_118_7_120_5_122_3_124_1_126(<64 x i8> %a, <64 x i8> %b) {
; AVX512F-LABEL: shuffle_v64i8_63_64_61_66_59_68_57_70_55_72_53_74_51_76_49_78_47_80_45_82_43_84_41_86_39_88_37_90_35_92_33_94_31_96_29_98_27_100_25_102_23_104_21_106_19_108_17_110_15_112_13_114_11_116_9_118_7_120_5_122_3_124_1_126:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpbroadcastw {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512F-NEXT: vpblendvb %ymm4, %ymm2, %ymm1, %ymm1
; AVX512F-NEXT: vpermq {{.*#+}} ymm2 = ymm1[2,3,0,1]
@@ -499,7 +499,7 @@ define <64 x i8> @shuffle_v64i8_63_64_61_66_59_68_57_70_55_72_53_74_51_76_49_78_
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v64i8_63_64_61_66_59_68_57_70_55_72_53_74_51_76_49_78_47_80_45_82_43_84_41_86_39_88_37_90_35_92_33_94_31_96_29_98_27_100_25_102_23_104_21_106_19_108_17_110_15_112_13_114_11_116_9_118_7_120_5_122_3_124_1_126:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vextracti64x4 $1, %zmm1, %ymm2
; AVX512BW-NEXT: vpbroadcastw {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512BW-NEXT: vpblendvb %ymm3, %ymm2, %ymm0, %ymm2
@@ -516,7 +516,7 @@ define <64 x i8> @shuffle_v64i8_63_64_61_66_59_68_57_70_55_72_53_74_51_76_49_78_
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: shuffle_v64i8_63_64_61_66_59_68_57_70_55_72_53_74_51_76_49_78_47_80_45_82_43_84_41_86_39_88_37_90_35_92_33_94_31_96_29_98_27_100_25_102_23_104_21_106_19_108_17_110_15_112_13_114_11_116_9_118_7_120_5_122_3_124_1_126:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpbroadcastw {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512DQ-NEXT: vpblendvb %ymm4, %ymm2, %ymm1, %ymm1
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm2 = ymm1[2,3,0,1]
@@ -531,7 +531,7 @@ define <64 x i8> @shuffle_v64i8_63_64_61_66_59_68_57_70_55_72_53_74_51_76_49_78_
; AVX512DQ-NEXT: retq
;
; AVX512VBMI-LABEL: shuffle_v64i8_63_64_61_66_59_68_57_70_55_72_53_74_51_76_49_78_47_80_45_82_43_84_41_86_39_88_37_90_35_92_33_94_31_96_29_98_27_100_25_102_23_104_21_106_19_108_17_110_15_112_13_114_11_116_9_118_7_120_5_122_3_124_1_126:
-; AVX512VBMI: # BB#0:
+; AVX512VBMI: # %bb.0:
; AVX512VBMI-NEXT: vmovdqa64 {{.*#+}} zmm2 = [63,64,61,66,59,68,57,70,55,72,53,74,51,76,49,78,47,80,45,82,43,84,41,86,39,88,37,90,35,92,33,94,31,96,29,98,27,100,25,102,23,104,21,106,19,108,17,110,15,112,13,114,11,116,9,118,7,120,5,122,3,124,1,126]
; AVX512VBMI-NEXT: vpermt2b %zmm1, %zmm2, %zmm0
; AVX512VBMI-NEXT: retq
@@ -541,7 +541,7 @@ define <64 x i8> @shuffle_v64i8_63_64_61_66_59_68_57_70_55_72_53_74_51_76_49_78_
define <64 x i8> @shuffle_v64i8_shift_00_02_04_06_08_10_12_14_16_18_20_22_24_26_28_30_32_34_36_38_40_42_44_46_48_50_52_54_56_58_60_62_64_66_68_70_72_74_76_78_80_82_84_86_88_90_92_94_96_98_100_102_104_106_108_110_112_114_116_118_120_122_124_126(<32 x i16> %a0, <32 x i16> %a1) {
; AVX512F-LABEL: shuffle_v64i8_shift_00_02_04_06_08_10_12_14_16_18_20_22_24_26_28_30_32_34_36_38_40_42_44_46_48_50_52_54_56_58_60_62_64_66_68_70_72_74_76_78_80_82_84_86_88_90_92_94_96_98_100_102_104_106_108_110_112_114_116_118_120_122_124_126:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsrlw $8, %ymm1, %ymm1
; AVX512F-NEXT: vpsrlw $8, %ymm0, %ymm0
; AVX512F-NEXT: vpackuswb %ymm1, %ymm0, %ymm0
@@ -553,7 +553,7 @@ define <64 x i8> @shuffle_v64i8_shift_00_02_04_06_08_10_12_14_16_18_20_22_24_26_
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v64i8_shift_00_02_04_06_08_10_12_14_16_18_20_22_24_26_28_30_32_34_36_38_40_42_44_46_48_50_52_54_56_58_60_62_64_66_68_70_72_74_76_78_80_82_84_86_88_90_92_94_96_98_100_102_104_106_108_110_112_114_116_118_120_122_124_126:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsrlw $8, %zmm0, %zmm0
; AVX512BW-NEXT: vpsrlw $8, %zmm1, %zmm1
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm2
@@ -566,7 +566,7 @@ define <64 x i8> @shuffle_v64i8_shift_00_02_04_06_08_10_12_14_16_18_20_22_24_26_
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: shuffle_v64i8_shift_00_02_04_06_08_10_12_14_16_18_20_22_24_26_28_30_32_34_36_38_40_42_44_46_48_50_52_54_56_58_60_62_64_66_68_70_72_74_76_78_80_82_84_86_88_90_92_94_96_98_100_102_104_106_108_110_112_114_116_118_120_122_124_126:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpsrlw $8, %ymm1, %ymm1
; AVX512DQ-NEXT: vpsrlw $8, %ymm0, %ymm0
; AVX512DQ-NEXT: vpackuswb %ymm1, %ymm0, %ymm0
@@ -578,7 +578,7 @@ define <64 x i8> @shuffle_v64i8_shift_00_02_04_06_08_10_12_14_16_18_20_22_24_26_
; AVX512DQ-NEXT: retq
;
; AVX512VBMI-LABEL: shuffle_v64i8_shift_00_02_04_06_08_10_12_14_16_18_20_22_24_26_28_30_32_34_36_38_40_42_44_46_48_50_52_54_56_58_60_62_64_66_68_70_72_74_76_78_80_82_84_86_88_90_92_94_96_98_100_102_104_106_108_110_112_114_116_118_120_122_124_126:
-; AVX512VBMI: # BB#0:
+; AVX512VBMI: # %bb.0:
; AVX512VBMI-NEXT: vmovdqa64 {{.*#+}} zmm2 = [1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31,33,35,37,39,41,43,45,47,49,51,53,55,57,59,61,63,65,67,69,71,73,75,77,79,81,83,85,87,89,91,93,95,97,99,101,103,105,107,109,111,113,115,117,119,121,123,125,127]
; AVX512VBMI-NEXT: vpermt2b %zmm1, %zmm2, %zmm0
; AVX512VBMI-NEXT: retq
diff --git a/test/CodeGen/X86/vector-shuffle-512-v8.ll b/test/CodeGen/X86/vector-shuffle-512-v8.ll
index a4f67195f72..fd33c0aeb59 100644
--- a/test/CodeGen/X86/vector-shuffle-512-v8.ll
+++ b/test/CodeGen/X86/vector-shuffle-512-v8.ll
@@ -4,12 +4,12 @@
define <8 x double> @shuffle_v8f64_00000000(<8 x double> %a, <8 x double> %b) {
; AVX512F-LABEL: shuffle_v8f64_00000000:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vbroadcastsd %xmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_00000000:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vbroadcastsd %xmm0, %zmm0
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
@@ -18,13 +18,13 @@ define <8 x double> @shuffle_v8f64_00000000(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_22222222(<8 x double> %a, <8 x double> %b) {
; AVX512F-LABEL: shuffle_v8f64_22222222:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512F-NEXT: vbroadcastsd %xmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_22222222:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512F-32-NEXT: vbroadcastsd %xmm0, %zmm0
; AVX512F-32-NEXT: retl
@@ -34,13 +34,13 @@ define <8 x double> @shuffle_v8f64_22222222(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_44444444(<8 x double> %a, <8 x double> %b) {
; AVX512F-LABEL: shuffle_v8f64_44444444:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vextractf32x4 $2, %zmm0, %xmm0
; AVX512F-NEXT: vbroadcastsd %xmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_44444444:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vextractf32x4 $2, %zmm0, %xmm0
; AVX512F-32-NEXT: vbroadcastsd %xmm0, %zmm0
; AVX512F-32-NEXT: retl
@@ -50,13 +50,13 @@ define <8 x double> @shuffle_v8f64_44444444(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_44444444_bc(<8 x i64> %a, <8 x i64> %b) {
; AVX512F-LABEL: shuffle_v8f64_44444444_bc:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vextractf32x4 $2, %zmm0, %xmm0
; AVX512F-NEXT: vbroadcastsd %xmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_44444444_bc:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vextractf32x4 $2, %zmm0, %xmm0
; AVX512F-32-NEXT: vbroadcastsd %xmm0, %zmm0
; AVX512F-32-NEXT: retl
@@ -68,13 +68,13 @@ define <8 x double> @shuffle_v8f64_44444444_bc(<8 x i64> %a, <8 x i64> %b) {
define <8 x double> @shuffle_v8f64_00000010(<8 x double> %a, <8 x double> %b) {
; AVX512F-LABEL: shuffle_v8f64_00000010:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,0,0,0,0,1,0]
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_00000010:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0]
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -84,13 +84,13 @@ define <8 x double> @shuffle_v8f64_00000010(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_00000200(<8 x double> %a, <8 x double> %b) {
; AVX512F-LABEL: shuffle_v8f64_00000200:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,0,0,0,2,0,0]
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_00000200:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0]
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -100,13 +100,13 @@ define <8 x double> @shuffle_v8f64_00000200(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_00003000(<8 x double> %a, <8 x double> %b) {
; AVX512F-LABEL: shuffle_v8f64_00003000:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,0,0,3,0,0,0]
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_00003000:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0]
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -116,13 +116,13 @@ define <8 x double> @shuffle_v8f64_00003000(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_00040000(<8 x double> %a, <8 x double> %b) {
; AVX512F-LABEL: shuffle_v8f64_00040000:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,0,4,0,0,0,0]
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_00040000:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,0,0,0,0,4,0,0,0,0,0,0,0,0,0]
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -132,13 +132,13 @@ define <8 x double> @shuffle_v8f64_00040000(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_00500000(<8 x double> %a, <8 x double> %b) {
; AVX512F-LABEL: shuffle_v8f64_00500000:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,5,0,0,0,0,0]
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_00500000:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,0,0,5,0,0,0,0,0,0,0,0,0,0,0]
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -148,13 +148,13 @@ define <8 x double> @shuffle_v8f64_00500000(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_06000000(<8 x double> %a, <8 x double> %b) {
; AVX512F-LABEL: shuffle_v8f64_06000000:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = [0,6,0,0,0,0,0,0]
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_06000000:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,6,0,0,0,0,0,0,0,0,0,0,0,0,0]
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -164,14 +164,14 @@ define <8 x double> @shuffle_v8f64_06000000(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_70000000(<8 x double> %a, <8 x double> %b) {
; AVX512F-LABEL: shuffle_v8f64_70000000:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: movl $7, %eax
; AVX512F-NEXT: vmovq %rax, %xmm1
; AVX512F-NEXT: vpermq %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_70000000:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl $7, %eax
; AVX512F-32-NEXT: vmovd %eax, %xmm1
; AVX512F-32-NEXT: vpermq %zmm0, %zmm1, %zmm0
@@ -182,12 +182,12 @@ define <8 x double> @shuffle_v8f64_70000000(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_01014545(<8 x double> %a, <8 x double> %b) {
; AVX512F-LABEL: shuffle_v8f64_01014545:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[0,1,0,1,4,5,4,5]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_01014545:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[0,1,0,1,4,5,4,5]
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 4, i32 5, i32 4, i32 5>
@@ -196,13 +196,13 @@ define <8 x double> @shuffle_v8f64_01014545(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_00112233(<8 x double> %a, <8 x double> %b) {
; AVX512F-LABEL: shuffle_v8f64_00112233:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,1,1,2,2,3,3]
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_00112233:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,0,0,1,0,1,0,2,0,2,0,3,0,3,0]
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -212,13 +212,13 @@ define <8 x double> @shuffle_v8f64_00112233(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_00001111(<8 x double> %a, <8 x double> %b) {
; AVX512F-LABEL: shuffle_v8f64_00001111:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,0,0,1,1,1,1]
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_00001111:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,0,0,0,0,0,0,1,0,1,0,1,0,1,0]
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -229,12 +229,12 @@ define <8 x double> @shuffle_v8f64_00001111(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_81a3c5e7(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_81a3c5e7:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vshufpd {{.*#+}} zmm0 = zmm1[0],zmm0[1],zmm1[2],zmm0[3],zmm1[4],zmm0[5],zmm1[6],zmm0[7]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_81a3c5e7:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vshufpd {{.*#+}} zmm0 = zmm1[0],zmm0[1],zmm1[2],zmm0[3],zmm1[4],zmm0[5],zmm1[6],zmm0[7]
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 8, i32 1, i32 10, i32 3, i32 12, i32 5, i32 14, i32 7>
@@ -244,13 +244,13 @@ define <8 x double> @shuffle_v8f64_81a3c5e7(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_08080808(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_08080808:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovapd {{.*#+}} zmm2 = [0,8,0,8,0,8,0,8]
; AVX512F-NEXT: vpermt2pd %zmm1, %zmm2, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_08080808:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovapd {{.*#+}} zmm2 = [0,0,8,0,0,0,8,0,0,0,8,0,0,0,8,0]
; AVX512F-32-NEXT: vpermt2pd %zmm1, %zmm2, %zmm0
; AVX512F-32-NEXT: retl
@@ -261,13 +261,13 @@ define <8 x double> @shuffle_v8f64_08080808(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_08084c4c(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_08084c4c:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovapd {{.*#+}} zmm2 = [0,8,0,8,4,12,4,12]
; AVX512F-NEXT: vpermt2pd %zmm1, %zmm2, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_08084c4c:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovapd {{.*#+}} zmm2 = [0,0,8,0,0,0,8,0,4,0,12,0,4,0,12,0]
; AVX512F-32-NEXT: vpermt2pd %zmm1, %zmm2, %zmm0
; AVX512F-32-NEXT: retl
@@ -278,14 +278,14 @@ define <8 x double> @shuffle_v8f64_08084c4c(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_8823cc67(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_8823cc67:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovapd {{.*#+}} zmm2 = [0,0,10,11,4,4,14,15]
; AVX512F-NEXT: vpermi2pd %zmm0, %zmm1, %zmm2
; AVX512F-NEXT: vmovapd %zmm2, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_8823cc67:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovapd {{.*#+}} zmm2 = [0,0,0,0,10,0,11,0,4,0,4,0,14,0,15,0]
; AVX512F-32-NEXT: vpermi2pd %zmm0, %zmm1, %zmm2
; AVX512F-32-NEXT: vmovapd %zmm2, %zmm0
@@ -297,14 +297,14 @@ define <8 x double> @shuffle_v8f64_8823cc67(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_9832dc76(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_9832dc76:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovapd {{.*#+}} zmm2 = [1,0,11,10,5,4,15,14]
; AVX512F-NEXT: vpermi2pd %zmm0, %zmm1, %zmm2
; AVX512F-NEXT: vmovapd %zmm2, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_9832dc76:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovapd {{.*#+}} zmm2 = [1,0,0,0,11,0,10,0,5,0,4,0,15,0,14,0]
; AVX512F-32-NEXT: vpermi2pd %zmm0, %zmm1, %zmm2
; AVX512F-32-NEXT: vmovapd %zmm2, %zmm0
@@ -316,14 +316,14 @@ define <8 x double> @shuffle_v8f64_9832dc76(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_9810dc54(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_9810dc54:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovapd {{.*#+}} zmm2 = [1,0,9,8,5,4,13,12]
; AVX512F-NEXT: vpermi2pd %zmm0, %zmm1, %zmm2
; AVX512F-NEXT: vmovapd %zmm2, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_9810dc54:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovapd {{.*#+}} zmm2 = [1,0,0,0,9,0,8,0,5,0,4,0,13,0,12,0]
; AVX512F-32-NEXT: vpermi2pd %zmm0, %zmm1, %zmm2
; AVX512F-32-NEXT: vmovapd %zmm2, %zmm0
@@ -335,13 +335,13 @@ define <8 x double> @shuffle_v8f64_9810dc54(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_08194c5d(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_08194c5d:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovapd {{.*#+}} zmm2 = [0,8,1,9,4,12,5,13]
; AVX512F-NEXT: vpermt2pd %zmm1, %zmm2, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_08194c5d:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovapd {{.*#+}} zmm2 = [0,0,8,0,1,0,9,0,4,0,12,0,5,0,13,0]
; AVX512F-32-NEXT: vpermt2pd %zmm1, %zmm2, %zmm0
; AVX512F-32-NEXT: retl
@@ -352,13 +352,13 @@ define <8 x double> @shuffle_v8f64_08194c5d(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_2a3b6e7f(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_2a3b6e7f:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovapd {{.*#+}} zmm2 = [2,10,3,11,6,14,7,15]
; AVX512F-NEXT: vpermt2pd %zmm1, %zmm2, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_2a3b6e7f:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovapd {{.*#+}} zmm2 = [2,0,10,0,3,0,11,0,6,0,14,0,7,0,15,0]
; AVX512F-32-NEXT: vpermt2pd %zmm1, %zmm2, %zmm0
; AVX512F-32-NEXT: retl
@@ -369,13 +369,13 @@ define <8 x double> @shuffle_v8f64_2a3b6e7f(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_08192a3b(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_08192a3b:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovapd {{.*#+}} zmm2 = [0,8,1,9,2,10,3,11]
; AVX512F-NEXT: vpermt2pd %zmm1, %zmm2, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_08192a3b:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovapd {{.*#+}} zmm2 = [0,0,8,0,1,0,9,0,2,0,10,0,3,0,11,0]
; AVX512F-32-NEXT: vpermt2pd %zmm1, %zmm2, %zmm0
; AVX512F-32-NEXT: retl
@@ -386,14 +386,14 @@ define <8 x double> @shuffle_v8f64_08192a3b(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_08991abb(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_08991abb:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovapd {{.*#+}} zmm2 = [8,0,1,1,9,2,3,3]
; AVX512F-NEXT: vpermi2pd %zmm0, %zmm1, %zmm2
; AVX512F-NEXT: vmovapd %zmm2, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_08991abb:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovapd {{.*#+}} zmm2 = [8,0,0,0,1,0,1,0,9,0,2,0,3,0,3,0]
; AVX512F-32-NEXT: vpermi2pd %zmm0, %zmm1, %zmm2
; AVX512F-32-NEXT: vmovapd %zmm2, %zmm0
@@ -405,13 +405,13 @@ define <8 x double> @shuffle_v8f64_08991abb(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_091b2d3f(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_091b2d3f:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovapd {{.*#+}} zmm2 = [0,9,1,11,2,13,3,15]
; AVX512F-NEXT: vpermt2pd %zmm1, %zmm2, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_091b2d3f:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovapd {{.*#+}} zmm2 = [0,0,9,0,1,0,11,0,2,0,13,0,3,0,15,0]
; AVX512F-32-NEXT: vpermt2pd %zmm1, %zmm2, %zmm0
; AVX512F-32-NEXT: retl
@@ -422,14 +422,14 @@ define <8 x double> @shuffle_v8f64_091b2d3f(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_09ab1def(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_09ab1def:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovapd {{.*#+}} zmm2 = [8,1,2,3,9,5,6,7]
; AVX512F-NEXT: vpermi2pd %zmm0, %zmm1, %zmm2
; AVX512F-NEXT: vmovapd %zmm2, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_09ab1def:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovapd {{.*#+}} zmm2 = [8,0,1,0,2,0,3,0,9,0,5,0,6,0,7,0]
; AVX512F-32-NEXT: vpermi2pd %zmm0, %zmm1, %zmm2
; AVX512F-32-NEXT: vmovapd %zmm2, %zmm0
@@ -441,12 +441,12 @@ define <8 x double> @shuffle_v8f64_09ab1def(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_00014445(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_00014445:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[0,0,0,1,4,4,4,5]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_00014445:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[0,0,0,1,4,4,4,5]
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 1, i32 4, i32 4, i32 4, i32 5>
@@ -456,12 +456,12 @@ define <8 x double> @shuffle_v8f64_00014445(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_00204464(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_00204464:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[0,0,2,0,4,4,6,4]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_00204464:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[0,0,2,0,4,4,6,4]
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 0, i32 2, i32 0, i32 4, i32 4, i32 6, i32 4>
@@ -471,12 +471,12 @@ define <8 x double> @shuffle_v8f64_00204464(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_03004744(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_03004744:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[0,3,0,0,4,7,4,4]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_03004744:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[0,3,0,0,4,7,4,4]
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 3, i32 0, i32 0, i32 4, i32 7, i32 4, i32 4>
@@ -486,12 +486,12 @@ define <8 x double> @shuffle_v8f64_03004744(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_10005444(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_10005444:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[1,0,0,0,5,4,4,4]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_10005444:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[1,0,0,0,5,4,4,4]
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 1, i32 0, i32 0, i32 0, i32 5, i32 4, i32 4, i32 4>
@@ -501,12 +501,12 @@ define <8 x double> @shuffle_v8f64_10005444(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_22006644(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_22006644:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[2,2,0,0,6,6,4,4]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_22006644:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[2,2,0,0,6,6,4,4]
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 2, i32 2, i32 0, i32 0, i32 6, i32 6, i32 4, i32 4>
@@ -516,12 +516,12 @@ define <8 x double> @shuffle_v8f64_22006644(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_33307774(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_33307774:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[3,3,3,0,7,7,7,4]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_33307774:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[3,3,3,0,7,7,7,4]
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 3, i32 3, i32 3, i32 0, i32 7, i32 7, i32 7, i32 4>
@@ -531,12 +531,12 @@ define <8 x double> @shuffle_v8f64_33307774(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_32107654(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_32107654:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[3,2,1,0,7,6,5,4]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_32107654:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[3,2,1,0,7,6,5,4]
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
@@ -546,12 +546,12 @@ define <8 x double> @shuffle_v8f64_32107654(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_00234467(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_00234467:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermilpd {{.*#+}} zmm0 = zmm0[0,0,2,3,4,4,6,7]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_00234467:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpermilpd {{.*#+}} zmm0 = zmm0[0,0,2,3,4,4,6,7]
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 0, i32 2, i32 3, i32 4, i32 4, i32 6, i32 7>
@@ -561,12 +561,12 @@ define <8 x double> @shuffle_v8f64_00234467(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_00224466(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_00224466:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovddup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_00224466:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovddup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6]
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
@@ -576,12 +576,12 @@ define <8 x double> @shuffle_v8f64_00224466(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_10325476(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_10325476:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermilpd {{.*#+}} zmm0 = zmm0[1,0,3,2,5,4,7,6]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_10325476:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpermilpd {{.*#+}} zmm0 = zmm0[1,0,3,2,5,4,7,6]
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
@@ -591,12 +591,12 @@ define <8 x double> @shuffle_v8f64_10325476(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_11335577(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_11335577:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermilpd {{.*#+}} zmm0 = zmm0[1,1,3,3,5,5,7,7]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_11335577:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpermilpd {{.*#+}} zmm0 = zmm0[1,1,3,3,5,5,7,7]
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
@@ -606,12 +606,12 @@ define <8 x double> @shuffle_v8f64_11335577(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_10235467(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_10235467:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermilpd {{.*#+}} zmm0 = zmm0[1,0,2,3,5,4,6,7]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_10235467:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpermilpd {{.*#+}} zmm0 = zmm0[1,0,2,3,5,4,6,7]
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 1, i32 0, i32 2, i32 3, i32 5, i32 4, i32 6, i32 7>
@@ -621,12 +621,12 @@ define <8 x double> @shuffle_v8f64_10235467(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_10225466(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_10225466:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermilpd {{.*#+}} zmm0 = zmm0[1,0,2,2,5,4,6,6]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_10225466:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpermilpd {{.*#+}} zmm0 = zmm0[1,0,2,2,5,4,6,6]
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 1, i32 0, i32 2, i32 2, i32 5, i32 4, i32 6, i32 6>
@@ -636,13 +636,13 @@ define <8 x double> @shuffle_v8f64_10225466(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_00015444(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_00015444:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,0,1,5,4,4,4]
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_00015444:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,0,0,0,0,1,0,5,0,4,0,4,0,4,0]
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -653,13 +653,13 @@ define <8 x double> @shuffle_v8f64_00015444(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_00204644(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_00204644:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,2,0,4,6,4,4]
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_00204644:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,0,0,2,0,0,0,4,0,6,0,4,0,4,0]
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -670,13 +670,13 @@ define <8 x double> @shuffle_v8f64_00204644(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_03004474(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_03004474:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = [0,3,0,0,4,4,7,4]
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_03004474:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,3,0,0,0,0,0,4,0,4,0,7,0,4,0]
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -687,13 +687,13 @@ define <8 x double> @shuffle_v8f64_03004474(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_10004444(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_10004444:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = [1,0,0,0,4,4,4,4]
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_10004444:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = [1,0,0,0,0,0,0,0,4,0,4,0,4,0,4,0]
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -704,13 +704,13 @@ define <8 x double> @shuffle_v8f64_10004444(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_22006446(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_22006446:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = [2,2,0,0,6,4,4,6]
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_22006446:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = [2,0,2,0,0,0,0,0,6,0,4,0,4,0,6,0]
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -721,13 +721,13 @@ define <8 x double> @shuffle_v8f64_22006446(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_33307474(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_33307474:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = [3,3,3,0,7,4,7,4]
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_33307474:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = [3,0,3,0,3,0,0,0,7,0,4,0,7,0,4,0]
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -738,13 +738,13 @@ define <8 x double> @shuffle_v8f64_33307474(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_32104567(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_32104567:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = [3,2,1,0,4,5,6,7]
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_32104567:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = [3,0,2,0,1,0,0,0,4,0,5,0,6,0,7,0]
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -755,13 +755,13 @@ define <8 x double> @shuffle_v8f64_32104567(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_00236744(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_00236744:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,2,3,6,7,4,4]
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_00236744:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,0,0,2,0,3,0,6,0,7,0,4,0,4,0]
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -772,13 +772,13 @@ define <8 x double> @shuffle_v8f64_00236744(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_00226644(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_00226644:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,2,2,6,6,4,4]
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_00226644:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,0,0,2,0,2,0,6,0,6,0,4,0,4,0]
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -789,12 +789,12 @@ define <8 x double> @shuffle_v8f64_00226644(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_10324567(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_10324567:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermilpd {{.*#+}} zmm0 = zmm0[1,0,3,2,4,5,6,7]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_10324567:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpermilpd {{.*#+}} zmm0 = zmm0[1,0,3,2,4,5,6,7]
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 4, i32 5, i32 6, i32 7>
@@ -804,12 +804,12 @@ define <8 x double> @shuffle_v8f64_10324567(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_11334567(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_11334567:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermilpd {{.*#+}} zmm0 = zmm0[1,1,3,3,4,5,6,7]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_11334567:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpermilpd {{.*#+}} zmm0 = zmm0[1,1,3,3,4,5,6,7]
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -819,12 +819,12 @@ define <8 x double> @shuffle_v8f64_11334567(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_01235467(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_01235467:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermilpd {{.*#+}} zmm0 = zmm0[0,1,2,3,5,4,6,7]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_01235467:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpermilpd {{.*#+}} zmm0 = zmm0[0,1,2,3,5,4,6,7]
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 5, i32 4, i32 6, i32 7>
@@ -834,12 +834,12 @@ define <8 x double> @shuffle_v8f64_01235467(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_01235466(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_01235466:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermilpd {{.*#+}} zmm0 = zmm0[0,1,2,3,5,4,6,6]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_01235466:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpermilpd {{.*#+}} zmm0 = zmm0[0,1,2,3,5,4,6,6]
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 5, i32 4, i32 6, i32 6>
@@ -849,13 +849,13 @@ define <8 x double> @shuffle_v8f64_01235466(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_002u6u44(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_002u6u44:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = <0,0,2,u,6,u,4,4>
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_002u6u44:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = <0,0,0,0,2,0,u,u,6,0,u,u,4,0,4,0>
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -866,13 +866,13 @@ define <8 x double> @shuffle_v8f64_002u6u44(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_00uu66uu(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_00uu66uu:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = <0,0,u,u,6,6,u,u>
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_00uu66uu:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = <0,0,0,0,u,u,u,u,6,0,6,0,u,u,u,u>
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -883,12 +883,12 @@ define <8 x double> @shuffle_v8f64_00uu66uu(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_103245uu(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_103245uu:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermilpd {{.*#+}} zmm0 = zmm0[1,0,3,2,4,5,6,6]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_103245uu:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpermilpd {{.*#+}} zmm0 = zmm0[1,0,3,2,4,5,6,6]
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 4, i32 5, i32 undef, i32 undef>
@@ -898,12 +898,12 @@ define <8 x double> @shuffle_v8f64_103245uu(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_1133uu67(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_1133uu67:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermilpd {{.*#+}} zmm0 = zmm0[1,1,3,3,4,4,6,7]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_1133uu67:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpermilpd {{.*#+}} zmm0 = zmm0[1,1,3,3,4,4,6,7]
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 undef, i32 undef, i32 6, i32 7>
@@ -913,12 +913,12 @@ define <8 x double> @shuffle_v8f64_1133uu67(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_0uu354uu(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_0uu354uu:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermilpd {{.*#+}} zmm0 = zmm0[0,0,2,3,5,4,6,6]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_0uu354uu:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpermilpd {{.*#+}} zmm0 = zmm0[0,0,2,3,5,4,6,6]
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 undef, i32 undef, i32 3, i32 5, i32 4, i32 undef, i32 undef>
@@ -928,12 +928,12 @@ define <8 x double> @shuffle_v8f64_0uu354uu(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_uuu3uu66(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_uuu3uu66:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermilpd {{.*#+}} zmm0 = zmm0[0,0,2,3,4,4,6,6]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_uuu3uu66:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpermilpd {{.*#+}} zmm0 = zmm0[0,0,2,3,4,4,6,6]
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 3, i32 undef, i32 undef, i32 6, i32 6>
@@ -943,14 +943,14 @@ define <8 x double> @shuffle_v8f64_uuu3uu66(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_c348cda0(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_c348cda0:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovapd {{.*#+}} zmm2 = [4,11,12,0,4,5,2,8]
; AVX512F-NEXT: vpermi2pd %zmm0, %zmm1, %zmm2
; AVX512F-NEXT: vmovapd %zmm2, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_c348cda0:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovapd {{.*#+}} zmm2 = [4,0,11,0,12,0,0,0,4,0,5,0,2,0,8,0]
; AVX512F-32-NEXT: vpermi2pd %zmm0, %zmm1, %zmm2
; AVX512F-32-NEXT: vmovapd %zmm2, %zmm0
@@ -962,13 +962,13 @@ define <8 x double> @shuffle_v8f64_c348cda0(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_f511235a(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_f511235a:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovapd {{.*#+}} zmm2 = [15,5,1,1,2,3,5,10]
; AVX512F-NEXT: vpermt2pd %zmm1, %zmm2, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_f511235a:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovapd {{.*#+}} zmm2 = [15,0,5,0,1,0,1,0,2,0,3,0,5,0,10,0]
; AVX512F-32-NEXT: vpermt2pd %zmm1, %zmm2, %zmm0
; AVX512F-32-NEXT: retl
@@ -978,14 +978,14 @@ define <8 x double> @shuffle_v8f64_f511235a(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_1z2z5z6z(<8 x double> %a, <8 x double> %b) {
; AVX512F-LABEL: shuffle_v8f64_1z2z5z6z:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX512F-NEXT: vmovapd {{.*#+}} zmm2 = [1,8,2,8,5,8,6,8]
; AVX512F-NEXT: vpermt2pd %zmm1, %zmm2, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_1z2z5z6z:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX512F-32-NEXT: vmovapd {{.*#+}} zmm2 = [1,0,8,0,2,0,8,0,5,0,8,0,6,0,8,0]
; AVX512F-32-NEXT: vpermt2pd %zmm1, %zmm2, %zmm0
@@ -997,12 +997,12 @@ define <8 x double> @shuffle_v8f64_1z2z5z6z(<8 x double> %a, <8 x double> %b) {
define <8 x i64> @shuffle_v8i64_00000000(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_00000000:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vbroadcastsd %xmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_00000000:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vbroadcastsd %xmm0, %zmm0
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
@@ -1011,13 +1011,13 @@ define <8 x i64> @shuffle_v8i64_00000000(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_44444444(<8 x i64> %a, <8 x i64> %b) {
; AVX512F-LABEL: shuffle_v8i64_44444444:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vextractf32x4 $2, %zmm0, %xmm0
; AVX512F-NEXT: vbroadcastsd %xmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_44444444:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vextractf32x4 $2, %zmm0, %xmm0
; AVX512F-32-NEXT: vbroadcastsd %xmm0, %zmm0
; AVX512F-32-NEXT: retl
@@ -1027,13 +1027,13 @@ define <8 x i64> @shuffle_v8i64_44444444(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_66666666(<8 x i64> %a, <8 x i64> %b) {
; AVX512F-LABEL: shuffle_v8i64_66666666:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vextractf32x4 $3, %zmm0, %xmm0
; AVX512F-NEXT: vbroadcastsd %xmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_66666666:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vextractf32x4 $3, %zmm0, %xmm0
; AVX512F-32-NEXT: vbroadcastsd %xmm0, %zmm0
; AVX512F-32-NEXT: retl
@@ -1044,13 +1044,13 @@ define <8 x i64> @shuffle_v8i64_66666666(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_00000010(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_00000010:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,0,0,0,0,1,0]
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_00000010:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0]
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -1061,13 +1061,13 @@ define <8 x i64> @shuffle_v8i64_00000010(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_00000200(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_00000200:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,0,0,0,2,0,0]
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_00000200:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0]
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -1078,13 +1078,13 @@ define <8 x i64> @shuffle_v8i64_00000200(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_00003000(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_00003000:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,0,0,3,0,0,0]
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_00003000:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0]
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -1095,13 +1095,13 @@ define <8 x i64> @shuffle_v8i64_00003000(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_00040000(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_00040000:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,0,4,0,0,0,0]
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_00040000:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,0,0,0,0,4,0,0,0,0,0,0,0,0,0]
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -1112,13 +1112,13 @@ define <8 x i64> @shuffle_v8i64_00040000(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_00500000(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_00500000:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,5,0,0,0,0,0]
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_00500000:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,0,0,5,0,0,0,0,0,0,0,0,0,0,0]
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -1129,13 +1129,13 @@ define <8 x i64> @shuffle_v8i64_00500000(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_06000000(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_06000000:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = [0,6,0,0,0,0,0,0]
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_06000000:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,6,0,0,0,0,0,0,0,0,0,0,0,0,0]
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -1146,14 +1146,14 @@ define <8 x i64> @shuffle_v8i64_06000000(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_70000000(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_70000000:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: movl $7, %eax
; AVX512F-NEXT: vmovq %rax, %xmm1
; AVX512F-NEXT: vpermq %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_70000000:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl $7, %eax
; AVX512F-32-NEXT: vmovd %eax, %xmm1
; AVX512F-32-NEXT: vpermq %zmm0, %zmm1, %zmm0
@@ -1164,12 +1164,12 @@ define <8 x i64> @shuffle_v8i64_70000000(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_01014545(<8 x i64> %a, <8 x i64> %b) {
; AVX512F-LABEL: shuffle_v8i64_01014545:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[0,1,0,1,4,5,4,5]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_01014545:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[0,1,0,1,4,5,4,5]
; AVX512F-32-NEXT: retl
@@ -1179,12 +1179,12 @@ define <8 x i64> @shuffle_v8i64_01014545(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_01014545_mem(<8 x i64>* %ptr, <8 x i64> %b) {
; AVX512F-LABEL: shuffle_v8i64_01014545_mem:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermpd {{.*#+}} zmm0 = mem[0,1,0,1,4,5,4,5]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_01014545_mem:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: vpermpd {{.*#+}} zmm0 = mem[0,1,0,1,4,5,4,5]
; AVX512F-32-NEXT: retl
@@ -1197,13 +1197,13 @@ define <8 x i64> @shuffle_v8i64_01014545_mem(<8 x i64>* %ptr, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_00112233(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_00112233:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,1,1,2,2,3,3]
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_00112233:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,0,0,1,0,1,0,2,0,2,0,3,0,3,0]
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -1214,13 +1214,13 @@ define <8 x i64> @shuffle_v8i64_00112233(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_00001111(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_00001111:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,0,0,1,1,1,1]
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_00001111:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,0,0,0,0,0,0,1,0,1,0,1,0,1,0]
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -1231,14 +1231,14 @@ define <8 x i64> @shuffle_v8i64_00001111(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_81a3c5e7(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_81a3c5e7:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: movb $-86, %al
; AVX512F-NEXT: kmovw %eax, %k1
; AVX512F-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_81a3c5e7:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movb $-86, %al
; AVX512F-32-NEXT: kmovw %eax, %k1
; AVX512F-32-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
@@ -1250,13 +1250,13 @@ define <8 x i64> @shuffle_v8i64_81a3c5e7(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_08080808(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_08080808:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,8,0,8,0,8,0,8]
; AVX512F-NEXT: vpermt2q %zmm1, %zmm2, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_08080808:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,0,8,0,0,0,8,0,0,0,8,0,0,0,8,0]
; AVX512F-32-NEXT: vpermt2q %zmm1, %zmm2, %zmm0
; AVX512F-32-NEXT: retl
@@ -1267,13 +1267,13 @@ define <8 x i64> @shuffle_v8i64_08080808(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_08084c4c(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_08084c4c:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,8,0,8,4,12,4,12]
; AVX512F-NEXT: vpermt2q %zmm1, %zmm2, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_08084c4c:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,0,8,0,0,0,8,0,4,0,12,0,4,0,12,0]
; AVX512F-32-NEXT: vpermt2q %zmm1, %zmm2, %zmm0
; AVX512F-32-NEXT: retl
@@ -1284,14 +1284,14 @@ define <8 x i64> @shuffle_v8i64_08084c4c(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_8823cc67(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_8823cc67:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,0,10,11,4,4,14,15]
; AVX512F-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_8823cc67:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,0,0,0,10,0,11,0,4,0,4,0,14,0,15,0]
; AVX512F-32-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
; AVX512F-32-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -1303,14 +1303,14 @@ define <8 x i64> @shuffle_v8i64_8823cc67(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_9832dc76(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_9832dc76:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = [1,0,11,10,5,4,15,14]
; AVX512F-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_9832dc76:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovdqa64 {{.*#+}} zmm2 = [1,0,0,0,11,0,10,0,5,0,4,0,15,0,14,0]
; AVX512F-32-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
; AVX512F-32-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -1322,14 +1322,14 @@ define <8 x i64> @shuffle_v8i64_9832dc76(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_9810dc54(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_9810dc54:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = [1,0,9,8,5,4,13,12]
; AVX512F-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_9810dc54:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovdqa64 {{.*#+}} zmm2 = [1,0,0,0,9,0,8,0,5,0,4,0,13,0,12,0]
; AVX512F-32-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
; AVX512F-32-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -1341,13 +1341,13 @@ define <8 x i64> @shuffle_v8i64_9810dc54(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_08194c5d(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_08194c5d:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,8,1,9,4,12,5,13]
; AVX512F-NEXT: vpermt2q %zmm1, %zmm2, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_08194c5d:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,0,8,0,1,0,9,0,4,0,12,0,5,0,13,0]
; AVX512F-32-NEXT: vpermt2q %zmm1, %zmm2, %zmm0
; AVX512F-32-NEXT: retl
@@ -1358,13 +1358,13 @@ define <8 x i64> @shuffle_v8i64_08194c5d(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_2a3b6e7f(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_2a3b6e7f:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = [2,10,3,11,6,14,7,15]
; AVX512F-NEXT: vpermt2q %zmm1, %zmm2, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_2a3b6e7f:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovdqa64 {{.*#+}} zmm2 = [2,0,10,0,3,0,11,0,6,0,14,0,7,0,15,0]
; AVX512F-32-NEXT: vpermt2q %zmm1, %zmm2, %zmm0
; AVX512F-32-NEXT: retl
@@ -1375,13 +1375,13 @@ define <8 x i64> @shuffle_v8i64_2a3b6e7f(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_08192a3b(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_08192a3b:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,8,1,9,2,10,3,11]
; AVX512F-NEXT: vpermt2q %zmm1, %zmm2, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_08192a3b:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,0,8,0,1,0,9,0,2,0,10,0,3,0,11,0]
; AVX512F-32-NEXT: vpermt2q %zmm1, %zmm2, %zmm0
; AVX512F-32-NEXT: retl
@@ -1392,14 +1392,14 @@ define <8 x i64> @shuffle_v8i64_08192a3b(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_08991abb(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_08991abb:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = [8,0,1,1,9,2,3,3]
; AVX512F-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_08991abb:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovdqa64 {{.*#+}} zmm2 = [8,0,0,0,1,0,1,0,9,0,2,0,3,0,3,0]
; AVX512F-32-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
; AVX512F-32-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -1411,13 +1411,13 @@ define <8 x i64> @shuffle_v8i64_08991abb(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_091b2d3f(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_091b2d3f:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,9,1,11,2,13,3,15]
; AVX512F-NEXT: vpermt2q %zmm1, %zmm2, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_091b2d3f:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,0,9,0,1,0,11,0,2,0,13,0,3,0,15,0]
; AVX512F-32-NEXT: vpermt2q %zmm1, %zmm2, %zmm0
; AVX512F-32-NEXT: retl
@@ -1428,14 +1428,14 @@ define <8 x i64> @shuffle_v8i64_091b2d3f(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_09ab1def(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_09ab1def:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = [8,1,2,3,9,5,6,7]
; AVX512F-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_09ab1def:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovdqa64 {{.*#+}} zmm2 = [8,0,1,0,2,0,3,0,9,0,5,0,6,0,7,0]
; AVX512F-32-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
; AVX512F-32-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -1447,12 +1447,12 @@ define <8 x i64> @shuffle_v8i64_09ab1def(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_00014445(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_00014445:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[0,0,0,1,4,4,4,5]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_00014445:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[0,0,0,1,4,4,4,5]
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 1, i32 4, i32 4, i32 4, i32 5>
@@ -1462,12 +1462,12 @@ define <8 x i64> @shuffle_v8i64_00014445(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_00204464(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_00204464:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[0,0,2,0,4,4,6,4]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_00204464:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[0,0,2,0,4,4,6,4]
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 0, i32 2, i32 0, i32 4, i32 4, i32 6, i32 4>
@@ -1477,12 +1477,12 @@ define <8 x i64> @shuffle_v8i64_00204464(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_03004744(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_03004744:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[0,3,0,0,4,7,4,4]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_03004744:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[0,3,0,0,4,7,4,4]
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 3, i32 0, i32 0, i32 4, i32 7, i32 4, i32 4>
@@ -1492,12 +1492,12 @@ define <8 x i64> @shuffle_v8i64_03004744(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_10005444(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_10005444:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[1,0,0,0,5,4,4,4]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_10005444:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[1,0,0,0,5,4,4,4]
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 1, i32 0, i32 0, i32 0, i32 5, i32 4, i32 4, i32 4>
@@ -1507,12 +1507,12 @@ define <8 x i64> @shuffle_v8i64_10005444(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_22006644(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_22006644:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[2,2,0,0,6,6,4,4]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_22006644:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[2,2,0,0,6,6,4,4]
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 2, i32 2, i32 0, i32 0, i32 6, i32 6, i32 4, i32 4>
@@ -1522,12 +1522,12 @@ define <8 x i64> @shuffle_v8i64_22006644(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_33307774(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_33307774:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[3,3,3,0,7,7,7,4]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_33307774:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[3,3,3,0,7,7,7,4]
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 3, i32 3, i32 3, i32 0, i32 7, i32 7, i32 7, i32 4>
@@ -1537,12 +1537,12 @@ define <8 x i64> @shuffle_v8i64_33307774(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_32107654(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_32107654:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[3,2,1,0,7,6,5,4]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_32107654:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[3,2,1,0,7,6,5,4]
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
@@ -1552,12 +1552,12 @@ define <8 x i64> @shuffle_v8i64_32107654(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_00234467(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_00234467:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[0,0,2,3,4,4,6,7]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_00234467:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[0,0,2,3,4,4,6,7]
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 0, i32 2, i32 3, i32 4, i32 4, i32 6, i32 7>
@@ -1567,12 +1567,12 @@ define <8 x i64> @shuffle_v8i64_00234467(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_00224466(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_00224466:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermilps {{.*#+}} zmm0 = zmm0[0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_00224466:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpermilps {{.*#+}} zmm0 = zmm0[0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
@@ -1582,12 +1582,12 @@ define <8 x i64> @shuffle_v8i64_00224466(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_10325476(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_10325476:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermilps {{.*#+}} zmm0 = zmm0[2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_10325476:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpermilps {{.*#+}} zmm0 = zmm0[2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13]
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
@@ -1597,12 +1597,12 @@ define <8 x i64> @shuffle_v8i64_10325476(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_11335577(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_11335577:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermilps {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7,10,11,10,11,14,15,14,15]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_11335577:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpermilps {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7,10,11,10,11,14,15,14,15]
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
@@ -1612,12 +1612,12 @@ define <8 x i64> @shuffle_v8i64_11335577(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_10235467(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_10235467:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[1,0,2,3,5,4,6,7]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_10235467:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[1,0,2,3,5,4,6,7]
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 1, i32 0, i32 2, i32 3, i32 5, i32 4, i32 6, i32 7>
@@ -1627,12 +1627,12 @@ define <8 x i64> @shuffle_v8i64_10235467(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_10225466(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_10225466:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[1,0,2,2,5,4,6,6]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_10225466:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[1,0,2,2,5,4,6,6]
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 1, i32 0, i32 2, i32 2, i32 5, i32 4, i32 6, i32 6>
@@ -1642,13 +1642,13 @@ define <8 x i64> @shuffle_v8i64_10225466(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_00015444(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_00015444:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,0,1,5,4,4,4]
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_00015444:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,0,0,0,0,1,0,5,0,4,0,4,0,4,0]
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -1659,13 +1659,13 @@ define <8 x i64> @shuffle_v8i64_00015444(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_00204644(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_00204644:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,2,0,4,6,4,4]
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_00204644:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,0,0,2,0,0,0,4,0,6,0,4,0,4,0]
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -1676,13 +1676,13 @@ define <8 x i64> @shuffle_v8i64_00204644(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_03004474(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_03004474:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = [0,3,0,0,4,4,7,4]
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_03004474:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,3,0,0,0,0,0,4,0,4,0,7,0,4,0]
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -1693,13 +1693,13 @@ define <8 x i64> @shuffle_v8i64_03004474(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_10004444(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_10004444:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = [1,0,0,0,4,4,4,4]
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_10004444:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = [1,0,0,0,0,0,0,0,4,0,4,0,4,0,4,0]
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -1710,13 +1710,13 @@ define <8 x i64> @shuffle_v8i64_10004444(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_22006446(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_22006446:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = [2,2,0,0,6,4,4,6]
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_22006446:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = [2,0,2,0,0,0,0,0,6,0,4,0,4,0,6,0]
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -1727,13 +1727,13 @@ define <8 x i64> @shuffle_v8i64_22006446(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_33307474(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_33307474:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = [3,3,3,0,7,4,7,4]
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_33307474:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = [3,0,3,0,3,0,0,0,7,0,4,0,7,0,4,0]
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -1744,13 +1744,13 @@ define <8 x i64> @shuffle_v8i64_33307474(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_32104567(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_32104567:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = [3,2,1,0,4,5,6,7]
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_32104567:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = [3,0,2,0,1,0,0,0,4,0,5,0,6,0,7,0]
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -1761,13 +1761,13 @@ define <8 x i64> @shuffle_v8i64_32104567(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_00236744(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_00236744:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,2,3,6,7,4,4]
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_00236744:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,0,0,2,0,3,0,6,0,7,0,4,0,4,0]
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -1778,13 +1778,13 @@ define <8 x i64> @shuffle_v8i64_00236744(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_00226644(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_00226644:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,2,2,6,6,4,4]
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_00226644:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,0,0,2,0,2,0,6,0,6,0,4,0,4,0]
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -1795,13 +1795,13 @@ define <8 x i64> @shuffle_v8i64_00226644(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_10324567(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_10324567:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = [1,0,3,2,4,5,6,7]
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_10324567:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = [1,0,0,0,3,0,2,0,4,0,5,0,6,0,7,0]
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -1812,13 +1812,13 @@ define <8 x i64> @shuffle_v8i64_10324567(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_11334567(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_11334567:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = [1,1,3,3,4,5,6,7]
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_11334567:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = [1,0,1,0,3,0,3,0,4,0,5,0,6,0,7,0]
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -1829,13 +1829,13 @@ define <8 x i64> @shuffle_v8i64_11334567(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_01235467(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_01235467:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = [0,1,2,3,5,4,6,7]
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_01235467:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,1,0,2,0,3,0,5,0,4,0,6,0,7,0]
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -1846,13 +1846,13 @@ define <8 x i64> @shuffle_v8i64_01235467(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_01235466(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_01235466:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = [0,1,2,3,5,4,6,6]
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_01235466:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = [0,0,1,0,2,0,3,0,5,0,4,0,6,0,6,0]
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -1863,13 +1863,13 @@ define <8 x i64> @shuffle_v8i64_01235466(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_002u6u44(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_002u6u44:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = <0,0,2,u,6,u,4,4>
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_002u6u44:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = <0,0,0,0,2,0,u,u,6,0,u,u,4,0,4,0>
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -1880,13 +1880,13 @@ define <8 x i64> @shuffle_v8i64_002u6u44(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_00uu66uu(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_00uu66uu:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = <0,0,u,u,6,6,u,u>
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_00uu66uu:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = <0,0,0,0,u,u,u,u,6,0,6,0,u,u,u,u>
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -1897,13 +1897,13 @@ define <8 x i64> @shuffle_v8i64_00uu66uu(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_103245uu(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_103245uu:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = <1,0,3,2,4,5,u,u>
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_103245uu:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = <1,0,0,0,3,0,2,0,4,0,5,0,u,u,u,u>
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -1914,13 +1914,13 @@ define <8 x i64> @shuffle_v8i64_103245uu(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_1133uu67(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_1133uu67:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = <1,1,3,3,u,u,6,7>
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_1133uu67:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = <1,0,1,0,3,0,3,0,u,u,u,u,6,0,7,0>
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -1931,13 +1931,13 @@ define <8 x i64> @shuffle_v8i64_1133uu67(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_0uu354uu(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_0uu354uu:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = <0,u,u,3,5,4,u,u>
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_0uu354uu:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = <0,0,u,u,u,u,3,0,5,0,4,0,u,u,u,u>
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -1948,13 +1948,13 @@ define <8 x i64> @shuffle_v8i64_0uu354uu(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_uuu3uu66(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_uuu3uu66:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps {{.*#+}} zmm1 = <u,u,u,3,u,u,6,6>
; AVX512F-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_uuu3uu66:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovaps {{.*#+}} zmm1 = <u,u,u,u,u,u,3,0,u,u,u,u,6,0,6,0>
; AVX512F-32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
@@ -1965,14 +1965,14 @@ define <8 x i64> @shuffle_v8i64_uuu3uu66(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_6caa87e5(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_6caa87e5:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = [14,4,2,2,0,15,6,13]
; AVX512F-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_6caa87e5:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vmovdqa64 {{.*#+}} zmm2 = [14,0,4,0,2,0,2,0,0,0,15,0,6,0,13,0]
; AVX512F-32-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
; AVX512F-32-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -1984,12 +1984,12 @@ define <8 x i64> @shuffle_v8i64_6caa87e5(<8 x i64> %a, <8 x i64> %b) {
define <8 x double> @shuffle_v8f64_082a4c6e(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_082a4c6e:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vunpcklpd {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_082a4c6e:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vunpcklpd {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32><i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
@@ -1999,13 +1999,13 @@ define <8 x double> @shuffle_v8f64_082a4c6e(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_0z2z4z6z(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_0z2z4z6z:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX512F-NEXT: vunpcklpd {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_0z2z4z6z:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX512F-32-NEXT: vunpcklpd {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; AVX512F-32-NEXT: retl
@@ -2016,12 +2016,12 @@ define <8 x double> @shuffle_v8f64_0z2z4z6z(<8 x double> %a, <8 x double> %b) {
define <8 x i64> @shuffle_v8i64_082a4c6e(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_082a4c6e:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vunpcklpd {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_082a4c6e:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vunpcklpd {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32><i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
@@ -2031,13 +2031,13 @@ define <8 x i64> @shuffle_v8i64_082a4c6e(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_z8zazcze(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_z8zazcze:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX512F-NEXT: vunpcklpd {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_z8zazcze:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX512F-32-NEXT: vunpcklpd {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; AVX512F-32-NEXT: retl
@@ -2048,12 +2048,12 @@ define <8 x i64> @shuffle_v8i64_z8zazcze(<8 x i64> %a, <8 x i64> %b) {
define <8 x double> @shuffle_v8f64_193b5d7f(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_193b5d7f:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_193b5d7f:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32><i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
@@ -2063,13 +2063,13 @@ define <8 x double> @shuffle_v8f64_193b5d7f(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_z9zbzdzf(<8 x double> %a, <8 x double> %b) {
;
; AVX512F-LABEL: shuffle_v8f64_z9zbzdzf:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX512F-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_z9zbzdzf:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX512F-32-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; AVX512F-32-NEXT: retl
@@ -2080,12 +2080,12 @@ define <8 x double> @shuffle_v8f64_z9zbzdzf(<8 x double> %a, <8 x double> %b) {
define <8 x i64> @shuffle_v8i64_193b5d7f(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_193b5d7f:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_193b5d7f:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32><i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
@@ -2095,13 +2095,13 @@ define <8 x i64> @shuffle_v8i64_193b5d7f(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_1z3z5z7z(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_1z3z5z7z:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX512F-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_1z3z5z7z:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX512F-32-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; AVX512F-32-NEXT: retl
@@ -2111,12 +2111,12 @@ define <8 x i64> @shuffle_v8i64_1z3z5z7z(<8 x i64> %a, <8 x i64> %b) {
define <8 x double> @test_vshuff64x2_512(<8 x double> %x, <8 x double> %x1) nounwind {
; AVX512F-LABEL: test_vshuff64x2_512:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[0,1,4,5],zmm1[2,3,0,1]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: test_vshuff64x2_512:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[0,1,4,5],zmm1[2,3,0,1]
; AVX512F-32-NEXT: retl
%res = shufflevector <8 x double> %x, <8 x double> %x1, <8 x i32> <i32 0, i32 1, i32 4, i32 5, i32 10, i32 11, i32 8, i32 9>
@@ -2125,7 +2125,7 @@ define <8 x double> @test_vshuff64x2_512(<8 x double> %x, <8 x double> %x1) noun
define <8 x double> @test_vshuff64x2_512_maskz(<8 x double> %x, <8 x double> %x1, <8 x i1> %mask) nounwind {
; AVX512F-LABEL: test_vshuff64x2_512_maskz:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpmovsxwq %xmm2, %zmm2
; AVX512F-NEXT: vpsllq $63, %zmm2, %zmm2
; AVX512F-NEXT: vptestmq %zmm2, %zmm2, %k1
@@ -2133,7 +2133,7 @@ define <8 x double> @test_vshuff64x2_512_maskz(<8 x double> %x, <8 x double> %x1
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: test_vshuff64x2_512_maskz:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpmovsxwq %xmm2, %zmm2
; AVX512F-32-NEXT: vpsllq $63, %zmm2, %zmm2
; AVX512F-32-NEXT: vptestmq %zmm2, %zmm2, %k1
@@ -2146,7 +2146,7 @@ define <8 x double> @test_vshuff64x2_512_maskz(<8 x double> %x, <8 x double> %x1
define <8 x i64> @test_vshufi64x2_512_mask(<8 x i64> %x, <8 x i64> %x1, <8 x i1> %mask) nounwind {
; AVX512F-LABEL: test_vshufi64x2_512_mask:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpmovsxwq %xmm2, %zmm2
; AVX512F-NEXT: vpsllq $63, %zmm2, %zmm2
; AVX512F-NEXT: vptestmq %zmm2, %zmm2, %k1
@@ -2154,7 +2154,7 @@ define <8 x i64> @test_vshufi64x2_512_mask(<8 x i64> %x, <8 x i64> %x1, <8 x i1>
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: test_vshufi64x2_512_mask:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpmovsxwq %xmm2, %zmm2
; AVX512F-32-NEXT: vpsllq $63, %zmm2, %zmm2
; AVX512F-32-NEXT: vptestmq %zmm2, %zmm2, %k1
@@ -2167,12 +2167,12 @@ define <8 x i64> @test_vshufi64x2_512_mask(<8 x i64> %x, <8 x i64> %x1, <8 x i1>
define <8 x double> @test_vshuff64x2_512_mem(<8 x double> %x, <8 x double> *%ptr) nounwind {
; AVX512F-LABEL: test_vshuff64x2_512_mem:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[0,1,4,5],mem[2,3,0,1]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: test_vshuff64x2_512_mem:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[0,1,4,5],mem[2,3,0,1]
; AVX512F-32-NEXT: retl
@@ -2183,7 +2183,7 @@ define <8 x double> @test_vshuff64x2_512_mem(<8 x double> %x, <8 x double> *%ptr
define <8 x double> @test_vshuff64x2_512_mem_mask(<8 x double> %x, <8 x double> *%ptr, <8 x i1> %mask) nounwind {
; AVX512F-LABEL: test_vshuff64x2_512_mem_mask:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpmovsxwq %xmm1, %zmm1
; AVX512F-NEXT: vpsllq $63, %zmm1, %zmm1
; AVX512F-NEXT: vptestmq %zmm1, %zmm1, %k1
@@ -2191,7 +2191,7 @@ define <8 x double> @test_vshuff64x2_512_mem_mask(<8 x double> %x, <8 x double>
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: test_vshuff64x2_512_mem_mask:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpmovsxwq %xmm1, %zmm1
; AVX512F-32-NEXT: vpsllq $63, %zmm1, %zmm1
; AVX512F-32-NEXT: vptestmq %zmm1, %zmm1, %k1
@@ -2206,7 +2206,7 @@ define <8 x double> @test_vshuff64x2_512_mem_mask(<8 x double> %x, <8 x double>
define <8 x double> @test_vshuff64x2_512_mem_maskz(<8 x double> %x, <8 x double> *%ptr, <8 x i1> %mask) nounwind {
; AVX512F-LABEL: test_vshuff64x2_512_mem_maskz:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpmovsxwq %xmm1, %zmm1
; AVX512F-NEXT: vpsllq $63, %zmm1, %zmm1
; AVX512F-NEXT: vptestmq %zmm1, %zmm1, %k1
@@ -2214,7 +2214,7 @@ define <8 x double> @test_vshuff64x2_512_mem_maskz(<8 x double> %x, <8 x double>
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: test_vshuff64x2_512_mem_maskz:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpmovsxwq %xmm1, %zmm1
; AVX512F-32-NEXT: vpsllq $63, %zmm1, %zmm1
; AVX512F-32-NEXT: vptestmq %zmm1, %zmm1, %k1
@@ -2229,12 +2229,12 @@ define <8 x double> @test_vshuff64x2_512_mem_maskz(<8 x double> %x, <8 x double>
define <8 x double> @shuffle_v8f64_23014567(<8 x double> %a0, <8 x double> %a1) {
; AVX512F-LABEL: shuffle_v8f64_23014567:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm1[2,3,0,1,4,5,6,7]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_23014567:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm1[2,3,0,1,4,5,6,7]
; AVX512F-32-NEXT: retl
%1 = shufflevector <8 x double> %a1, <8 x double> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 4, i32 5, i32 6, i32 7>
@@ -2243,12 +2243,12 @@ define <8 x double> @shuffle_v8f64_23014567(<8 x double> %a0, <8 x double> %a1)
define <8 x double> @shuffle_v8f64_2301uu67(<8 x double> %a0, <8 x double> %a1) {
; AVX512F-LABEL: shuffle_v8f64_2301uu67:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm1[2,3,0,1,0,1,6,7]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_2301uu67:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm1[2,3,0,1,0,1,6,7]
; AVX512F-32-NEXT: retl
%1 = shufflevector <8 x double> %a1, <8 x double> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 undef, i32 undef, i32 6, i32 7>
@@ -2257,12 +2257,12 @@ define <8 x double> @shuffle_v8f64_2301uu67(<8 x double> %a0, <8 x double> %a1)
define <8 x double> @shuffle_v8f64_2301uuuu(<8 x double> %a0, <8 x double> %a1) {
; AVX512F-LABEL: shuffle_v8f64_2301uuuu:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermpd {{.*#+}} ymm0 = ymm1[2,3,0,1]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_2301uuuu:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpermpd {{.*#+}} ymm0 = ymm1[2,3,0,1]
; AVX512F-32-NEXT: retl
%1 = shufflevector <8 x double> %a1, <8 x double> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -2271,12 +2271,12 @@ define <8 x double> @shuffle_v8f64_2301uuuu(<8 x double> %a0, <8 x double> %a1)
define <8 x double> @shuffle_v8f64_uuu2301(<8 x double> %a0, <8 x double> %a1) {
; AVX512F-LABEL: shuffle_v8f64_uuu2301:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[0,1,0,1],zmm1[2,3,0,1]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_uuu2301:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[0,1,0,1],zmm1[2,3,0,1]
; AVX512F-32-NEXT: retl
%1 = shufflevector <8 x double> %a1, <8 x double> undef, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 2, i32 3, i32 0, i32 1>
@@ -2285,13 +2285,13 @@ define <8 x double> @shuffle_v8f64_uuu2301(<8 x double> %a0, <8 x double> %a1) {
define <8 x i64> @shuffle_v8i64_0zzzzzzz(<8 x i64> %a) {
; AVX512F-LABEL: shuffle_v8i64_0zzzzzzz:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_0zzzzzzz:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX512F-32-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; AVX512F-32-NEXT: retl
@@ -2301,13 +2301,13 @@ define <8 x i64> @shuffle_v8i64_0zzzzzzz(<8 x i64> %a) {
define <8 x double> @shuffle_v8f64_0zzzzzzz(<8 x double> %a) {
; AVX512F-LABEL: shuffle_v8f64_0zzzzzzz:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_0zzzzzzz:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX512F-32-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; AVX512F-32-NEXT: retl
@@ -2318,12 +2318,12 @@ define <8 x double> @shuffle_v8f64_0zzzzzzz(<8 x double> %a) {
define <8 x i64> @shuffle_v8i64_12345678(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_12345678:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: valignq {{.*#+}} zmm0 = zmm0[1,2,3,4,5,6,7],zmm1[0]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_12345678:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: valignq {{.*#+}} zmm0 = zmm0[1,2,3,4,5,6,7],zmm1[0]
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
@@ -2333,12 +2333,12 @@ define <8 x i64> @shuffle_v8i64_12345678(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_12345670(<8 x i64> %a) {
;
; AVX512F-LABEL: shuffle_v8i64_12345670:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: valignq {{.*#+}} zmm0 = zmm0[1,2,3,4,5,6,7,0]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_12345670:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: valignq {{.*#+}} zmm0 = zmm0[1,2,3,4,5,6,7,0]
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x i64> %a, <8 x i64> undef, <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0>
@@ -2348,14 +2348,14 @@ define <8 x i64> @shuffle_v8i64_12345670(<8 x i64> %a) {
define <8 x i64> @mask_shuffle_v8i64_12345678(<8 x i64> %a, <8 x i64> %b, <8 x i64> %passthru, i8 %mask) {
;
; AVX512F-LABEL: mask_shuffle_v8i64_12345678:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: kmovw %edi, %k1
; AVX512F-NEXT: valignq {{.*#+}} zmm2 {%k1} = zmm0[1,2,3,4,5,6,7],zmm1[0]
; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: mask_shuffle_v8i64_12345678:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: kmovw %eax, %k1
; AVX512F-32-NEXT: valignq {{.*#+}} zmm2 {%k1} = zmm0[1,2,3,4,5,6,7],zmm1[0]
@@ -2370,14 +2370,14 @@ define <8 x i64> @mask_shuffle_v8i64_12345678(<8 x i64> %a, <8 x i64> %b, <8 x i
define <8 x i64> @mask_shuffle_v8i64_12345670(<8 x i64> %a, <8 x i64> %passthru, i8 %mask) {
;
; AVX512F-LABEL: mask_shuffle_v8i64_12345670:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: kmovw %edi, %k1
; AVX512F-NEXT: valignq {{.*#+}} zmm1 {%k1} = zmm0[1,2,3,4,5,6,7,0]
; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: mask_shuffle_v8i64_12345670:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: kmovw %eax, %k1
; AVX512F-32-NEXT: valignq {{.*#+}} zmm1 {%k1} = zmm0[1,2,3,4,5,6,7,0]
@@ -2392,13 +2392,13 @@ define <8 x i64> @mask_shuffle_v8i64_12345670(<8 x i64> %a, <8 x i64> %passthru,
define <8 x i64> @maskz_shuffle_v8i64_12345678(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
;
; AVX512F-LABEL: maskz_shuffle_v8i64_12345678:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: kmovw %edi, %k1
; AVX512F-NEXT: valignq {{.*#+}} zmm0 {%k1} {z} = zmm0[1,2,3,4,5,6,7],zmm1[0]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: maskz_shuffle_v8i64_12345678:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: kmovw %eax, %k1
; AVX512F-32-NEXT: valignq {{.*#+}} zmm0 {%k1} {z} = zmm0[1,2,3,4,5,6,7],zmm1[0]
@@ -2412,13 +2412,13 @@ define <8 x i64> @maskz_shuffle_v8i64_12345678(<8 x i64> %a, <8 x i64> %b, i8 %m
define <8 x i64> @maskz_shuffle_v8i64_12345670(<8 x i64> %a, i8 %mask) {
;
; AVX512F-LABEL: maskz_shuffle_v8i64_12345670:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: kmovw %edi, %k1
; AVX512F-NEXT: valignq {{.*#+}} zmm0 {%k1} {z} = zmm0[1,2,3,4,5,6,7,0]
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: maskz_shuffle_v8i64_12345670:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: kmovw %eax, %k1
; AVX512F-32-NEXT: valignq {{.*#+}} zmm0 {%k1} {z} = zmm0[1,2,3,4,5,6,7,0]
@@ -2431,12 +2431,12 @@ define <8 x i64> @maskz_shuffle_v8i64_12345670(<8 x i64> %a, i8 %mask) {
define <8 x double> @shuffle_v8f64_012389AB(<8 x double> %a, <8 x double> %b) {
; AVX512F-LABEL: shuffle_v8f64_012389AB:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_012389AB:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
@@ -2445,12 +2445,12 @@ define <8 x double> @shuffle_v8f64_012389AB(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_89AB0123(<8 x double> %a, <8 x double> %b) {
; AVX512F-LABEL: shuffle_v8f64_89AB0123:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_89AB0123:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 0, i32 1, i32 2, i32 3>
@@ -2459,12 +2459,12 @@ define <8 x double> @shuffle_v8f64_89AB0123(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_01230123(<8 x double> %a, <8 x double> %b) {
; AVX512F-LABEL: shuffle_v8f64_01230123:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_01230123:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
@@ -2473,12 +2473,12 @@ define <8 x double> @shuffle_v8f64_01230123(<8 x double> %a, <8 x double> %b) {
define <8 x i64> @shuffle_v8i64_012389AB(<8 x i64> %a, <8 x i64> %b) {
; AVX512F-LABEL: shuffle_v8i64_012389AB:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_012389AB:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
@@ -2487,12 +2487,12 @@ define <8 x i64> @shuffle_v8i64_012389AB(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_89AB0123(<8 x i64> %a, <8 x i64> %b) {
; AVX512F-LABEL: shuffle_v8i64_89AB0123:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_89AB0123:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 0, i32 1, i32 2, i32 3>
@@ -2501,12 +2501,12 @@ define <8 x i64> @shuffle_v8i64_89AB0123(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_01230123(<8 x i64> %a, <8 x i64> %b) {
; AVX512F-LABEL: shuffle_v8i64_01230123:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_01230123:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
@@ -2515,12 +2515,12 @@ define <8 x i64> @shuffle_v8i64_01230123(<8 x i64> %a, <8 x i64> %b) {
define <8 x double> @shuffle_v8f64_89234567(<8 x double> %a, <8 x double> %b) {
; AVX512F-LABEL: shuffle_v8f64_89234567:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vinsertf32x4 $0, %xmm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_89234567:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vinsertf32x4 $0, %xmm1, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 8, i32 9, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -2529,12 +2529,12 @@ define <8 x double> @shuffle_v8f64_89234567(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_01894567(<8 x double> %a, <8 x double> %b) {
; AVX512F-LABEL: shuffle_v8f64_01894567:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vinsertf32x4 $1, %xmm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_01894567:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vinsertf32x4 $1, %xmm1, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 1, i32 8, i32 9, i32 4, i32 5, i32 6, i32 7>
@@ -2543,12 +2543,12 @@ define <8 x double> @shuffle_v8f64_01894567(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_01238967(<8 x double> %a, <8 x double> %b) {
; AVX512F-LABEL: shuffle_v8f64_01238967:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vinsertf32x4 $2, %xmm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_01238967:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vinsertf32x4 $2, %xmm1, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 6, i32 7>
@@ -2557,12 +2557,12 @@ define <8 x double> @shuffle_v8f64_01238967(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_01234589(<8 x double> %a, <8 x double> %b) {
; AVX512F-LABEL: shuffle_v8f64_01234589:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vinsertf32x4 $3, %xmm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8f64_01234589:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vinsertf32x4 $3, %xmm1, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 8, i32 9>
@@ -2571,12 +2571,12 @@ define <8 x double> @shuffle_v8f64_01234589(<8 x double> %a, <8 x double> %b) {
define <8 x i64> @shuffle_v8i64_89234567(<8 x i64> %a, <8 x i64> %b) {
; AVX512F-LABEL: shuffle_v8i64_89234567:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vinsertf32x4 $0, %xmm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_89234567:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vinsertf32x4 $0, %xmm1, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 8, i32 9, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -2585,12 +2585,12 @@ define <8 x i64> @shuffle_v8i64_89234567(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_01894567(<8 x i64> %a, <8 x i64> %b) {
; AVX512F-LABEL: shuffle_v8i64_01894567:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vinsertf32x4 $1, %xmm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_01894567:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vinsertf32x4 $1, %xmm1, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 1, i32 8, i32 9, i32 4, i32 5, i32 6, i32 7>
@@ -2599,12 +2599,12 @@ define <8 x i64> @shuffle_v8i64_01894567(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_01238967(<8 x i64> %a, <8 x i64> %b) {
; AVX512F-LABEL: shuffle_v8i64_01238967:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vinsertf32x4 $2, %xmm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_01238967:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vinsertf32x4 $2, %xmm1, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 6, i32 7>
@@ -2613,12 +2613,12 @@ define <8 x i64> @shuffle_v8i64_01238967(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_01234589(<8 x i64> %a, <8 x i64> %b) {
; AVX512F-LABEL: shuffle_v8i64_01234589:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vinsertf32x4 $3, %xmm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_01234589:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vinsertf32x4 $3, %xmm1, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 8, i32 9>
@@ -2627,13 +2627,13 @@ define <8 x i64> @shuffle_v8i64_01234589(<8 x i64> %a, <8 x i64> %b) {
define <8 x double> @shuffle_v4f64_v8f64_22222222(<4 x double> %a) {
; AVX512F-LABEL: shuffle_v4f64_v8f64_22222222:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512F-NEXT: vbroadcastsd %xmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v4f64_v8f64_22222222:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512F-32-NEXT: vbroadcastsd %xmm0, %zmm0
; AVX512F-32-NEXT: retl
@@ -2643,14 +2643,14 @@ define <8 x double> @shuffle_v4f64_v8f64_22222222(<4 x double> %a) {
define <8 x i64> @shuffle_v2i64_v8i64_01010101(<2 x i64> %a) {
; AVX512F-LABEL: shuffle_v2i64_v8i64_01010101:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX512F-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v2i64_v8i64_01010101:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512F-32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX512F-32-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
@@ -2661,14 +2661,14 @@ define <8 x i64> @shuffle_v2i64_v8i64_01010101(<2 x i64> %a) {
define <8 x double> @shuffle_v2f64_v8f64_01010101(<2 x double> %a) {
; AVX512F-LABEL: shuffle_v2f64_v8f64_01010101:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX512F-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v2f64_v8f64_01010101:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512F-32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX512F-32-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
@@ -2680,7 +2680,7 @@ define <8 x double> @shuffle_v2f64_v8f64_01010101(<2 x double> %a) {
;FIXME: compressp
define <4 x double> @test_v8f64_2346 (<8 x double> %v) {
; AVX512F-LABEL: test_v8f64_2346:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX512F-NEXT: vextractf64x4 $1, %zmm0, %ymm0
; AVX512F-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,0,2]
@@ -2688,7 +2688,7 @@ define <4 x double> @test_v8f64_2346 (<8 x double> %v) {
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: test_v8f64_2346:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX512F-32-NEXT: vextractf64x4 $1, %zmm0, %ymm0
; AVX512F-32-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,0,2]
@@ -2701,7 +2701,7 @@ define <4 x double> @test_v8f64_2346 (<8 x double> %v) {
;FIXME: compressp
define <2 x double> @test_v8f64_34 (<8 x double> %v) {
; AVX512F-LABEL: test_v8f64_34:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vextractf32x4 $2, %zmm0, %xmm1
; AVX512F-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512F-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0]
@@ -2709,7 +2709,7 @@ define <2 x double> @test_v8f64_34 (<8 x double> %v) {
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: test_v8f64_34:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vextractf32x4 $2, %zmm0, %xmm1
; AVX512F-32-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512F-32-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0]
@@ -2722,7 +2722,7 @@ define <2 x double> @test_v8f64_34 (<8 x double> %v) {
; FIXME: vpcompress
define <4 x i64> @test_v8i64_1257 (<8 x i64> %v) {
; AVX512F-LABEL: test_v8i64_1257:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vextractf64x4 $1, %zmm0, %ymm1
; AVX512F-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,1,3]
; AVX512F-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,2,2,3]
@@ -2730,7 +2730,7 @@ define <4 x i64> @test_v8i64_1257 (<8 x i64> %v) {
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: test_v8i64_1257:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vextractf64x4 $1, %zmm0, %ymm1
; AVX512F-32-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,1,3]
; AVX512F-32-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,2,2,3]
@@ -2742,7 +2742,7 @@ define <4 x i64> @test_v8i64_1257 (<8 x i64> %v) {
define <2 x i64> @test_v8i64_2_5 (<8 x i64> %v) {
; AVX512F-LABEL: test_v8i64_2_5:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vextractf64x4 $1, %zmm0, %ymm1
; AVX512F-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512F-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
@@ -2750,7 +2750,7 @@ define <2 x i64> @test_v8i64_2_5 (<8 x i64> %v) {
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: test_v8i64_2_5:
-; AVX512F-32: # BB#0:
+; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vextractf64x4 $1, %zmm0, %ymm1
; AVX512F-32-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512F-32-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
diff --git a/test/CodeGen/X86/vector-shuffle-avx512.ll b/test/CodeGen/X86/vector-shuffle-avx512.ll
index 5a7207952be..be4a8b7b669 100644
--- a/test/CodeGen/X86/vector-shuffle-avx512.ll
+++ b/test/CodeGen/X86/vector-shuffle-avx512.ll
@@ -7,7 +7,7 @@
;expand 128 -> 256 include <4 x float> <2 x double>
define <8 x float> @expand(<4 x float> %a) {
; SKX64-LABEL: expand:
-; SKX64: # BB#0:
+; SKX64: # %bb.0:
; SKX64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; SKX64-NEXT: movb $5, %al
; SKX64-NEXT: kmovd %eax, %k1
@@ -15,14 +15,14 @@ define <8 x float> @expand(<4 x float> %a) {
; SKX64-NEXT: retq
;
; KNL64-LABEL: expand:
-; KNL64: # BB#0:
+; KNL64: # %bb.0:
; KNL64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
; KNL64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; KNL64-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3,4,5,6,7]
; KNL64-NEXT: retq
;
; SKX32-LABEL: expand:
-; SKX32: # BB#0:
+; SKX32: # %bb.0:
; SKX32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; SKX32-NEXT: movb $5, %al
; SKX32-NEXT: kmovd %eax, %k1
@@ -30,7 +30,7 @@ define <8 x float> @expand(<4 x float> %a) {
; SKX32-NEXT: retl
;
; KNL32-LABEL: expand:
-; KNL32: # BB#0:
+; KNL32: # %bb.0:
; KNL32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
; KNL32-NEXT: vxorps %xmm1, %xmm1, %xmm1
; KNL32-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3,4,5,6,7]
@@ -41,7 +41,7 @@ define <8 x float> @expand(<4 x float> %a) {
define <8 x float> @expand1(<4 x float> %a ) {
; SKX64-LABEL: expand1:
-; SKX64: # BB#0:
+; SKX64: # %bb.0:
; SKX64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; SKX64-NEXT: movb $-86, %al
; SKX64-NEXT: kmovd %eax, %k1
@@ -49,7 +49,7 @@ define <8 x float> @expand1(<4 x float> %a ) {
; SKX64-NEXT: retq
;
; KNL64-LABEL: expand1:
-; KNL64: # BB#0:
+; KNL64: # %bb.0:
; KNL64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; KNL64-NEXT: vmovaps {{.*#+}} ymm1 = <u,0,u,1,u,2,u,3>
; KNL64-NEXT: vpermps %ymm0, %ymm1, %ymm0
@@ -58,7 +58,7 @@ define <8 x float> @expand1(<4 x float> %a ) {
; KNL64-NEXT: retq
;
; SKX32-LABEL: expand1:
-; SKX32: # BB#0:
+; SKX32: # %bb.0:
; SKX32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; SKX32-NEXT: movb $-86, %al
; SKX32-NEXT: kmovd %eax, %k1
@@ -66,7 +66,7 @@ define <8 x float> @expand1(<4 x float> %a ) {
; SKX32-NEXT: retl
;
; KNL32-LABEL: expand1:
-; KNL32: # BB#0:
+; KNL32: # %bb.0:
; KNL32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; KNL32-NEXT: vmovaps {{.*#+}} ymm1 = <u,0,u,1,u,2,u,3>
; KNL32-NEXT: vpermps %ymm0, %ymm1, %ymm0
@@ -80,7 +80,7 @@ define <8 x float> @expand1(<4 x float> %a ) {
;Expand 128 -> 256 test <2 x double> -> <4 x double>
define <4 x double> @expand2(<2 x double> %a) {
; SKX64-LABEL: expand2:
-; SKX64: # BB#0:
+; SKX64: # %bb.0:
; SKX64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; SKX64-NEXT: movb $9, %al
; SKX64-NEXT: kmovd %eax, %k1
@@ -88,7 +88,7 @@ define <4 x double> @expand2(<2 x double> %a) {
; SKX64-NEXT: retq
;
; KNL64-LABEL: expand2:
-; KNL64: # BB#0:
+; KNL64: # %bb.0:
; KNL64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; KNL64-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,2,1]
; KNL64-NEXT: vxorpd %xmm1, %xmm1, %xmm1
@@ -96,7 +96,7 @@ define <4 x double> @expand2(<2 x double> %a) {
; KNL64-NEXT: retq
;
; SKX32-LABEL: expand2:
-; SKX32: # BB#0:
+; SKX32: # %bb.0:
; SKX32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; SKX32-NEXT: movb $9, %al
; SKX32-NEXT: kmovd %eax, %k1
@@ -104,7 +104,7 @@ define <4 x double> @expand2(<2 x double> %a) {
; SKX32-NEXT: retl
;
; KNL32-LABEL: expand2:
-; KNL32: # BB#0:
+; KNL32: # %bb.0:
; KNL32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; KNL32-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,2,1]
; KNL32-NEXT: vxorpd %xmm1, %xmm1, %xmm1
@@ -117,7 +117,7 @@ define <4 x double> @expand2(<2 x double> %a) {
;expand 128 -> 256 include case <4 x i32> <8 x i32>
define <8 x i32> @expand3(<4 x i32> %a ) {
; SKX64-LABEL: expand3:
-; SKX64: # BB#0:
+; SKX64: # %bb.0:
; SKX64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; SKX64-NEXT: movb $-127, %al
; SKX64-NEXT: kmovd %eax, %k1
@@ -125,14 +125,14 @@ define <8 x i32> @expand3(<4 x i32> %a ) {
; SKX64-NEXT: retq
;
; KNL64-LABEL: expand3:
-; KNL64: # BB#0:
+; KNL64: # %bb.0:
; KNL64-NEXT: vbroadcastsd %xmm0, %ymm0
; KNL64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; KNL64-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6],ymm0[7]
; KNL64-NEXT: retq
;
; SKX32-LABEL: expand3:
-; SKX32: # BB#0:
+; SKX32: # %bb.0:
; SKX32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; SKX32-NEXT: movb $-127, %al
; SKX32-NEXT: kmovd %eax, %k1
@@ -140,7 +140,7 @@ define <8 x i32> @expand3(<4 x i32> %a ) {
; SKX32-NEXT: retl
;
; KNL32-LABEL: expand3:
-; KNL32: # BB#0:
+; KNL32: # %bb.0:
; KNL32-NEXT: vbroadcastsd %xmm0, %ymm0
; KNL32-NEXT: vxorps %xmm1, %xmm1, %xmm1
; KNL32-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6],ymm0[7]
@@ -152,7 +152,7 @@ define <8 x i32> @expand3(<4 x i32> %a ) {
;expand 128 -> 256 include case <2 x i64> <4 x i64>
define <4 x i64> @expand4(<2 x i64> %a ) {
; SKX64-LABEL: expand4:
-; SKX64: # BB#0:
+; SKX64: # %bb.0:
; SKX64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; SKX64-NEXT: movb $9, %al
; SKX64-NEXT: kmovd %eax, %k1
@@ -160,7 +160,7 @@ define <4 x i64> @expand4(<2 x i64> %a ) {
; SKX64-NEXT: retq
;
; KNL64-LABEL: expand4:
-; KNL64: # BB#0:
+; KNL64: # %bb.0:
; KNL64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; KNL64-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,2,1]
; KNL64-NEXT: vxorps %xmm1, %xmm1, %xmm1
@@ -168,7 +168,7 @@ define <4 x i64> @expand4(<2 x i64> %a ) {
; KNL64-NEXT: retq
;
; SKX32-LABEL: expand4:
-; SKX32: # BB#0:
+; SKX32: # %bb.0:
; SKX32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; SKX32-NEXT: movb $9, %al
; SKX32-NEXT: kmovd %eax, %k1
@@ -176,7 +176,7 @@ define <4 x i64> @expand4(<2 x i64> %a ) {
; SKX32-NEXT: retl
;
; KNL32-LABEL: expand4:
-; KNL32: # BB#0:
+; KNL32: # %bb.0:
; KNL32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; KNL32-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,2,1]
; KNL32-NEXT: vxorps %xmm1, %xmm1, %xmm1
@@ -189,28 +189,28 @@ define <4 x i64> @expand4(<2 x i64> %a ) {
;Negative test for 128-> 256
define <8 x float> @expand5(<4 x float> %a ) {
; SKX64-LABEL: expand5:
-; SKX64: # BB#0:
+; SKX64: # %bb.0:
; SKX64-NEXT: vbroadcastss %xmm0, %ymm0
; SKX64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; SKX64-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7]
; SKX64-NEXT: retq
;
; KNL64-LABEL: expand5:
-; KNL64: # BB#0:
+; KNL64: # %bb.0:
; KNL64-NEXT: vbroadcastss %xmm0, %ymm0
; KNL64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; KNL64-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7]
; KNL64-NEXT: retq
;
; SKX32-LABEL: expand5:
-; SKX32: # BB#0:
+; SKX32: # %bb.0:
; SKX32-NEXT: vbroadcastss %xmm0, %ymm0
; SKX32-NEXT: vxorps %xmm1, %xmm1, %xmm1
; SKX32-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7]
; SKX32-NEXT: retl
;
; KNL32-LABEL: expand5:
-; KNL32: # BB#0:
+; KNL32: # %bb.0:
; KNL32-NEXT: vbroadcastss %xmm0, %ymm0
; KNL32-NEXT: vxorps %xmm1, %xmm1, %xmm1
; KNL32-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7]
@@ -222,25 +222,25 @@ define <8 x float> @expand5(<4 x float> %a ) {
;expand 256 -> 512 include <8 x float> <16 x float>
define <8 x float> @expand6(<4 x float> %a ) {
; SKX64-LABEL: expand6:
-; SKX64: # BB#0:
+; SKX64: # %bb.0:
; SKX64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; SKX64-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; SKX64-NEXT: retq
;
; KNL64-LABEL: expand6:
-; KNL64: # BB#0:
+; KNL64: # %bb.0:
; KNL64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; KNL64-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; KNL64-NEXT: retq
;
; SKX32-LABEL: expand6:
-; SKX32: # BB#0:
+; SKX32: # %bb.0:
; SKX32-NEXT: vxorps %xmm1, %xmm1, %xmm1
; SKX32-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; SKX32-NEXT: retl
;
; KNL32-LABEL: expand6:
-; KNL32: # BB#0:
+; KNL32: # %bb.0:
; KNL32-NEXT: vxorps %xmm1, %xmm1, %xmm1
; KNL32-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; KNL32-NEXT: retl
@@ -250,7 +250,7 @@ define <8 x float> @expand6(<4 x float> %a ) {
define <16 x float> @expand7(<8 x float> %a) {
; SKX64-LABEL: expand7:
-; SKX64: # BB#0:
+; SKX64: # %bb.0:
; SKX64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; SKX64-NEXT: movw $1285, %ax # imm = 0x505
; SKX64-NEXT: kmovd %eax, %k1
@@ -258,7 +258,7 @@ define <16 x float> @expand7(<8 x float> %a) {
; SKX64-NEXT: retq
;
; KNL64-LABEL: expand7:
-; KNL64: # BB#0:
+; KNL64: # %bb.0:
; KNL64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL64-NEXT: movw $1285, %ax # imm = 0x505
; KNL64-NEXT: kmovw %eax, %k1
@@ -266,7 +266,7 @@ define <16 x float> @expand7(<8 x float> %a) {
; KNL64-NEXT: retq
;
; SKX32-LABEL: expand7:
-; SKX32: # BB#0:
+; SKX32: # %bb.0:
; SKX32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; SKX32-NEXT: movw $1285, %ax # imm = 0x505
; SKX32-NEXT: kmovd %eax, %k1
@@ -274,7 +274,7 @@ define <16 x float> @expand7(<8 x float> %a) {
; SKX32-NEXT: retl
;
; KNL32-LABEL: expand7:
-; KNL32: # BB#0:
+; KNL32: # %bb.0:
; KNL32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL32-NEXT: movw $1285, %ax # imm = 0x505
; KNL32-NEXT: kmovw %eax, %k1
@@ -286,7 +286,7 @@ define <16 x float> @expand7(<8 x float> %a) {
define <16 x float> @expand8(<8 x float> %a ) {
; SKX64-LABEL: expand8:
-; SKX64: # BB#0:
+; SKX64: # %bb.0:
; SKX64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; SKX64-NEXT: movw $-21846, %ax # imm = 0xAAAA
; SKX64-NEXT: kmovd %eax, %k1
@@ -294,7 +294,7 @@ define <16 x float> @expand8(<8 x float> %a ) {
; SKX64-NEXT: retq
;
; KNL64-LABEL: expand8:
-; KNL64: # BB#0:
+; KNL64: # %bb.0:
; KNL64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL64-NEXT: movw $-21846, %ax # imm = 0xAAAA
; KNL64-NEXT: kmovw %eax, %k1
@@ -302,7 +302,7 @@ define <16 x float> @expand8(<8 x float> %a ) {
; KNL64-NEXT: retq
;
; SKX32-LABEL: expand8:
-; SKX32: # BB#0:
+; SKX32: # %bb.0:
; SKX32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; SKX32-NEXT: movw $-21846, %ax # imm = 0xAAAA
; SKX32-NEXT: kmovd %eax, %k1
@@ -310,7 +310,7 @@ define <16 x float> @expand8(<8 x float> %a ) {
; SKX32-NEXT: retl
;
; KNL32-LABEL: expand8:
-; KNL32: # BB#0:
+; KNL32: # %bb.0:
; KNL32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL32-NEXT: movw $-21846, %ax # imm = 0xAAAA
; KNL32-NEXT: kmovw %eax, %k1
@@ -323,7 +323,7 @@ define <16 x float> @expand8(<8 x float> %a ) {
;expand 256 -> 512 include <4 x double> <8 x double>
define <8 x double> @expand9(<4 x double> %a) {
; SKX64-LABEL: expand9:
-; SKX64: # BB#0:
+; SKX64: # %bb.0:
; SKX64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; SKX64-NEXT: movb $-127, %al
; SKX64-NEXT: kmovd %eax, %k1
@@ -331,7 +331,7 @@ define <8 x double> @expand9(<4 x double> %a) {
; SKX64-NEXT: retq
;
; KNL64-LABEL: expand9:
-; KNL64: # BB#0:
+; KNL64: # %bb.0:
; KNL64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL64-NEXT: movb $-127, %al
; KNL64-NEXT: kmovw %eax, %k1
@@ -339,7 +339,7 @@ define <8 x double> @expand9(<4 x double> %a) {
; KNL64-NEXT: retq
;
; SKX32-LABEL: expand9:
-; SKX32: # BB#0:
+; SKX32: # %bb.0:
; SKX32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; SKX32-NEXT: movb $-127, %al
; SKX32-NEXT: kmovd %eax, %k1
@@ -347,7 +347,7 @@ define <8 x double> @expand9(<4 x double> %a) {
; SKX32-NEXT: retl
;
; KNL32-LABEL: expand9:
-; KNL32: # BB#0:
+; KNL32: # %bb.0:
; KNL32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL32-NEXT: movb $-127, %al
; KNL32-NEXT: kmovw %eax, %k1
@@ -359,7 +359,7 @@ define <8 x double> @expand9(<4 x double> %a) {
define <16 x i32> @expand10(<8 x i32> %a ) {
; SKX64-LABEL: expand10:
-; SKX64: # BB#0:
+; SKX64: # %bb.0:
; SKX64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; SKX64-NEXT: movw $-21846, %ax # imm = 0xAAAA
; SKX64-NEXT: kmovd %eax, %k1
@@ -367,7 +367,7 @@ define <16 x i32> @expand10(<8 x i32> %a ) {
; SKX64-NEXT: retq
;
; KNL64-LABEL: expand10:
-; KNL64: # BB#0:
+; KNL64: # %bb.0:
; KNL64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL64-NEXT: movw $-21846, %ax # imm = 0xAAAA
; KNL64-NEXT: kmovw %eax, %k1
@@ -375,7 +375,7 @@ define <16 x i32> @expand10(<8 x i32> %a ) {
; KNL64-NEXT: retq
;
; SKX32-LABEL: expand10:
-; SKX32: # BB#0:
+; SKX32: # %bb.0:
; SKX32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; SKX32-NEXT: movw $-21846, %ax # imm = 0xAAAA
; SKX32-NEXT: kmovd %eax, %k1
@@ -383,7 +383,7 @@ define <16 x i32> @expand10(<8 x i32> %a ) {
; SKX32-NEXT: retl
;
; KNL32-LABEL: expand10:
-; KNL32: # BB#0:
+; KNL32: # %bb.0:
; KNL32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL32-NEXT: movw $-21846, %ax # imm = 0xAAAA
; KNL32-NEXT: kmovw %eax, %k1
@@ -395,7 +395,7 @@ define <16 x i32> @expand10(<8 x i32> %a ) {
define <8 x i64> @expand11(<4 x i64> %a) {
; SKX64-LABEL: expand11:
-; SKX64: # BB#0:
+; SKX64: # %bb.0:
; SKX64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; SKX64-NEXT: movb $-127, %al
; SKX64-NEXT: kmovd %eax, %k1
@@ -403,7 +403,7 @@ define <8 x i64> @expand11(<4 x i64> %a) {
; SKX64-NEXT: retq
;
; KNL64-LABEL: expand11:
-; KNL64: # BB#0:
+; KNL64: # %bb.0:
; KNL64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL64-NEXT: movb $-127, %al
; KNL64-NEXT: kmovw %eax, %k1
@@ -411,7 +411,7 @@ define <8 x i64> @expand11(<4 x i64> %a) {
; KNL64-NEXT: retq
;
; SKX32-LABEL: expand11:
-; SKX32: # BB#0:
+; SKX32: # %bb.0:
; SKX32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; SKX32-NEXT: movb $-127, %al
; SKX32-NEXT: kmovd %eax, %k1
@@ -419,7 +419,7 @@ define <8 x i64> @expand11(<4 x i64> %a) {
; SKX32-NEXT: retl
;
; KNL32-LABEL: expand11:
-; KNL32: # BB#0:
+; KNL32: # %bb.0:
; KNL32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL32-NEXT: movb $-127, %al
; KNL32-NEXT: kmovw %eax, %k1
@@ -432,7 +432,7 @@ define <8 x i64> @expand11(<4 x i64> %a) {
;Negative test for 256-> 512
define <16 x float> @expand12(<8 x float> %a) {
; SKX64-LABEL: expand12:
-; SKX64: # BB#0:
+; SKX64: # %bb.0:
; SKX64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; SKX64-NEXT: vmovaps {{.*#+}} zmm2 = [0,16,2,16,4,16,6,16,0,16,1,16,2,16,3,16]
; SKX64-NEXT: vxorps %xmm1, %xmm1, %xmm1
@@ -441,7 +441,7 @@ define <16 x float> @expand12(<8 x float> %a) {
; SKX64-NEXT: retq
;
; KNL64-LABEL: expand12:
-; KNL64: # BB#0:
+; KNL64: # %bb.0:
; KNL64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL64-NEXT: vmovaps {{.*#+}} zmm2 = [0,16,2,16,4,16,6,16,0,16,1,16,2,16,3,16]
; KNL64-NEXT: vxorps %xmm1, %xmm1, %xmm1
@@ -450,7 +450,7 @@ define <16 x float> @expand12(<8 x float> %a) {
; KNL64-NEXT: retq
;
; SKX32-LABEL: expand12:
-; SKX32: # BB#0:
+; SKX32: # %bb.0:
; SKX32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; SKX32-NEXT: vmovaps {{.*#+}} zmm2 = [0,16,2,16,4,16,6,16,0,16,1,16,2,16,3,16]
; SKX32-NEXT: vxorps %xmm1, %xmm1, %xmm1
@@ -459,7 +459,7 @@ define <16 x float> @expand12(<8 x float> %a) {
; SKX32-NEXT: retl
;
; KNL32-LABEL: expand12:
-; KNL32: # BB#0:
+; KNL32: # %bb.0:
; KNL32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL32-NEXT: vmovaps {{.*#+}} zmm2 = [0,16,2,16,4,16,6,16,0,16,1,16,2,16,3,16]
; KNL32-NEXT: vxorps %xmm1, %xmm1, %xmm1
@@ -472,25 +472,25 @@ define <16 x float> @expand12(<8 x float> %a) {
define <16 x float> @expand13(<8 x float> %a ) {
; SKX64-LABEL: expand13:
-; SKX64: # BB#0:
+; SKX64: # %bb.0:
; SKX64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; SKX64-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
; SKX64-NEXT: retq
;
; KNL64-LABEL: expand13:
-; KNL64: # BB#0:
+; KNL64: # %bb.0:
; KNL64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; KNL64-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
; KNL64-NEXT: retq
;
; SKX32-LABEL: expand13:
-; SKX32: # BB#0:
+; SKX32: # %bb.0:
; SKX32-NEXT: vxorps %xmm1, %xmm1, %xmm1
; SKX32-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
; SKX32-NEXT: retl
;
; KNL32-LABEL: expand13:
-; KNL32: # BB#0:
+; KNL32: # %bb.0:
; KNL32-NEXT: vxorps %xmm1, %xmm1, %xmm1
; KNL32-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
; KNL32-NEXT: retl
@@ -502,7 +502,7 @@ define <16 x float> @expand13(<8 x float> %a ) {
define <8 x float> @expand14(<4 x float> %a) {
; SKX64-LABEL: expand14:
-; SKX64: # BB#0:
+; SKX64: # %bb.0:
; SKX64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; SKX64-NEXT: movb $20, %al
; SKX64-NEXT: kmovd %eax, %k1
@@ -510,7 +510,7 @@ define <8 x float> @expand14(<4 x float> %a) {
; SKX64-NEXT: retq
;
; KNL64-LABEL: expand14:
-; KNL64: # BB#0:
+; KNL64: # %bb.0:
; KNL64-NEXT: vpermilps {{.*#+}} xmm1 = mem[3,3,0,0]
; KNL64-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,1,1]
; KNL64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
@@ -519,7 +519,7 @@ define <8 x float> @expand14(<4 x float> %a) {
; KNL64-NEXT: retq
;
; SKX32-LABEL: expand14:
-; SKX32: # BB#0:
+; SKX32: # %bb.0:
; SKX32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; SKX32-NEXT: movb $20, %al
; SKX32-NEXT: kmovd %eax, %k1
@@ -527,7 +527,7 @@ define <8 x float> @expand14(<4 x float> %a) {
; SKX32-NEXT: retl
;
; KNL32-LABEL: expand14:
-; KNL32: # BB#0:
+; KNL32: # %bb.0:
; KNL32-NEXT: vpermilps {{.*#+}} xmm1 = mem[3,3,0,0]
; KNL32-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,1,1]
; KNL32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
@@ -542,7 +542,7 @@ define <8 x float> @expand14(<4 x float> %a) {
;Negative test.
define <8 x float> @expand15(<4 x float> %a) {
; SKX64-LABEL: expand15:
-; SKX64: # BB#0:
+; SKX64: # %bb.0:
; SKX64-NEXT: vpermilps {{.*#+}} xmm1 = mem[0,1,0,0]
; SKX64-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[0,1,1,3]
; SKX64-NEXT: vmovaps {{.*#+}} ymm0 = [0,1,8,3,10,3,2,3]
@@ -550,7 +550,7 @@ define <8 x float> @expand15(<4 x float> %a) {
; SKX64-NEXT: retq
;
; KNL64-LABEL: expand15:
-; KNL64: # BB#0:
+; KNL64: # %bb.0:
; KNL64-NEXT: vpermilps {{.*#+}} xmm1 = mem[0,1,0,0]
; KNL64-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,1,1]
; KNL64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
@@ -559,7 +559,7 @@ define <8 x float> @expand15(<4 x float> %a) {
; KNL64-NEXT: retq
;
; SKX32-LABEL: expand15:
-; SKX32: # BB#0:
+; SKX32: # %bb.0:
; SKX32-NEXT: vpermilps {{.*#+}} xmm1 = mem[0,1,0,0]
; SKX32-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[0,1,1,3]
; SKX32-NEXT: vmovaps {{.*#+}} ymm0 = [0,1,8,3,10,3,2,3]
@@ -567,7 +567,7 @@ define <8 x float> @expand15(<4 x float> %a) {
; SKX32-NEXT: retl
;
; KNL32-LABEL: expand15:
-; KNL32: # BB#0:
+; KNL32: # %bb.0:
; KNL32-NEXT: vpermilps {{.*#+}} xmm1 = mem[0,1,0,0]
; KNL32-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,1,1]
; KNL32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
@@ -584,21 +584,21 @@ define <8 x float> @expand15(<4 x float> %a) {
define <64 x i8> @test_mm512_mask_blend_epi8(<64 x i8> %A, <64 x i8> %W){
; SKX64-LABEL: test_mm512_mask_blend_epi8:
-; SKX64: # BB#0: # %entry
+; SKX64: # %bb.0: # %entry
; SKX64-NEXT: movabsq $-6148914691236517206, %rax # imm = 0xAAAAAAAAAAAAAAAA
; SKX64-NEXT: kmovq %rax, %k1
; SKX64-NEXT: vpblendmb %zmm0, %zmm1, %zmm0 {%k1}
; SKX64-NEXT: retq
;
; KNL64-LABEL: test_mm512_mask_blend_epi8:
-; KNL64: # BB#0: # %entry
+; KNL64: # %bb.0: # %entry
; KNL64-NEXT: vpbroadcastw {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; KNL64-NEXT: vpblendvb %ymm4, %ymm2, %ymm0, %ymm0
; KNL64-NEXT: vpblendvb %ymm4, %ymm3, %ymm1, %ymm1
; KNL64-NEXT: retq
;
; SKX32-LABEL: test_mm512_mask_blend_epi8:
-; SKX32: # BB#0: # %entry
+; SKX32: # %bb.0: # %entry
; SKX32-NEXT: movl $-1431655766, %eax # imm = 0xAAAAAAAA
; SKX32-NEXT: kmovd %eax, %k0
; SKX32-NEXT: kunpckdq %k0, %k0, %k1
@@ -606,7 +606,7 @@ define <64 x i8> @test_mm512_mask_blend_epi8(<64 x i8> %A, <64 x i8> %W){
; SKX32-NEXT: retl
;
; KNL32-LABEL: test_mm512_mask_blend_epi8:
-; KNL32: # BB#0: # %entry
+; KNL32: # %bb.0: # %entry
; KNL32-NEXT: pushl %ebp
; KNL32-NEXT: .cfi_def_cfa_offset 8
; KNL32-NEXT: .cfi_offset %ebp, -8
@@ -627,27 +627,27 @@ entry:
define <32 x i16> @test_mm512_mask_blend_epi16(<32 x i16> %A, <32 x i16> %W){
; SKX64-LABEL: test_mm512_mask_blend_epi16:
-; SKX64: # BB#0: # %entry
+; SKX64: # %bb.0: # %entry
; SKX64-NEXT: movl $-1431655766, %eax # imm = 0xAAAAAAAA
; SKX64-NEXT: kmovd %eax, %k1
; SKX64-NEXT: vpblendmw %zmm0, %zmm1, %zmm0 {%k1}
; SKX64-NEXT: retq
;
; KNL64-LABEL: test_mm512_mask_blend_epi16:
-; KNL64: # BB#0: # %entry
+; KNL64: # %bb.0: # %entry
; KNL64-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2],ymm0[3],ymm2[4],ymm0[5],ymm2[6],ymm0[7],ymm2[8],ymm0[9],ymm2[10],ymm0[11],ymm2[12],ymm0[13],ymm2[14],ymm0[15]
; KNL64-NEXT: vpblendw {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2],ymm1[3],ymm3[4],ymm1[5],ymm3[6],ymm1[7],ymm3[8],ymm1[9],ymm3[10],ymm1[11],ymm3[12],ymm1[13],ymm3[14],ymm1[15]
; KNL64-NEXT: retq
;
; SKX32-LABEL: test_mm512_mask_blend_epi16:
-; SKX32: # BB#0: # %entry
+; SKX32: # %bb.0: # %entry
; SKX32-NEXT: movl $-1431655766, %eax # imm = 0xAAAAAAAA
; SKX32-NEXT: kmovd %eax, %k1
; SKX32-NEXT: vpblendmw %zmm0, %zmm1, %zmm0 {%k1}
; SKX32-NEXT: retl
;
; KNL32-LABEL: test_mm512_mask_blend_epi16:
-; KNL32: # BB#0: # %entry
+; KNL32: # %bb.0: # %entry
; KNL32-NEXT: pushl %ebp
; KNL32-NEXT: .cfi_def_cfa_offset 8
; KNL32-NEXT: .cfi_offset %ebp, -8
@@ -667,28 +667,28 @@ entry:
define <16 x i32> @test_mm512_mask_blend_epi32(<16 x i32> %A, <16 x i32> %W){
; SKX64-LABEL: test_mm512_mask_blend_epi32:
-; SKX64: # BB#0: # %entry
+; SKX64: # %bb.0: # %entry
; SKX64-NEXT: movw $-21846, %ax # imm = 0xAAAA
; SKX64-NEXT: kmovd %eax, %k1
; SKX64-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
; SKX64-NEXT: retq
;
; KNL64-LABEL: test_mm512_mask_blend_epi32:
-; KNL64: # BB#0: # %entry
+; KNL64: # %bb.0: # %entry
; KNL64-NEXT: movw $-21846, %ax # imm = 0xAAAA
; KNL64-NEXT: kmovw %eax, %k1
; KNL64-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
; KNL64-NEXT: retq
;
; SKX32-LABEL: test_mm512_mask_blend_epi32:
-; SKX32: # BB#0: # %entry
+; SKX32: # %bb.0: # %entry
; SKX32-NEXT: movw $-21846, %ax # imm = 0xAAAA
; SKX32-NEXT: kmovd %eax, %k1
; SKX32-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
; SKX32-NEXT: retl
;
; KNL32-LABEL: test_mm512_mask_blend_epi32:
-; KNL32: # BB#0: # %entry
+; KNL32: # %bb.0: # %entry
; KNL32-NEXT: movw $-21846, %ax # imm = 0xAAAA
; KNL32-NEXT: kmovw %eax, %k1
; KNL32-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
@@ -700,28 +700,28 @@ entry:
define <8 x i64> @test_mm512_mask_blend_epi64(<8 x i64> %A, <8 x i64> %W){
; SKX64-LABEL: test_mm512_mask_blend_epi64:
-; SKX64: # BB#0: # %entry
+; SKX64: # %bb.0: # %entry
; SKX64-NEXT: movb $-86, %al
; SKX64-NEXT: kmovd %eax, %k1
; SKX64-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
; SKX64-NEXT: retq
;
; KNL64-LABEL: test_mm512_mask_blend_epi64:
-; KNL64: # BB#0: # %entry
+; KNL64: # %bb.0: # %entry
; KNL64-NEXT: movb $-86, %al
; KNL64-NEXT: kmovw %eax, %k1
; KNL64-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
; KNL64-NEXT: retq
;
; SKX32-LABEL: test_mm512_mask_blend_epi64:
-; SKX32: # BB#0: # %entry
+; SKX32: # %bb.0: # %entry
; SKX32-NEXT: movb $-86, %al
; SKX32-NEXT: kmovd %eax, %k1
; SKX32-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
; SKX32-NEXT: retl
;
; KNL32-LABEL: test_mm512_mask_blend_epi64:
-; KNL32: # BB#0: # %entry
+; KNL32: # %bb.0: # %entry
; KNL32-NEXT: movb $-86, %al
; KNL32-NEXT: kmovw %eax, %k1
; KNL32-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
@@ -733,28 +733,28 @@ entry:
define <16 x float> @test_mm512_mask_blend_ps(<16 x float> %A, <16 x float> %W){
; SKX64-LABEL: test_mm512_mask_blend_ps:
-; SKX64: # BB#0: # %entry
+; SKX64: # %bb.0: # %entry
; SKX64-NEXT: movw $-21846, %ax # imm = 0xAAAA
; SKX64-NEXT: kmovd %eax, %k1
; SKX64-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
; SKX64-NEXT: retq
;
; KNL64-LABEL: test_mm512_mask_blend_ps:
-; KNL64: # BB#0: # %entry
+; KNL64: # %bb.0: # %entry
; KNL64-NEXT: movw $-21846, %ax # imm = 0xAAAA
; KNL64-NEXT: kmovw %eax, %k1
; KNL64-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
; KNL64-NEXT: retq
;
; SKX32-LABEL: test_mm512_mask_blend_ps:
-; SKX32: # BB#0: # %entry
+; SKX32: # %bb.0: # %entry
; SKX32-NEXT: movw $-21846, %ax # imm = 0xAAAA
; SKX32-NEXT: kmovd %eax, %k1
; SKX32-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
; SKX32-NEXT: retl
;
; KNL32-LABEL: test_mm512_mask_blend_ps:
-; KNL32: # BB#0: # %entry
+; KNL32: # %bb.0: # %entry
; KNL32-NEXT: movw $-21846, %ax # imm = 0xAAAA
; KNL32-NEXT: kmovw %eax, %k1
; KNL32-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
@@ -766,28 +766,28 @@ entry:
define <8 x double> @test_mm512_mask_blend_pd(<8 x double> %A, <8 x double> %W){
; SKX64-LABEL: test_mm512_mask_blend_pd:
-; SKX64: # BB#0: # %entry
+; SKX64: # %bb.0: # %entry
; SKX64-NEXT: movb $-88, %al
; SKX64-NEXT: kmovd %eax, %k1
; SKX64-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
; SKX64-NEXT: retq
;
; KNL64-LABEL: test_mm512_mask_blend_pd:
-; KNL64: # BB#0: # %entry
+; KNL64: # %bb.0: # %entry
; KNL64-NEXT: movb $-88, %al
; KNL64-NEXT: kmovw %eax, %k1
; KNL64-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
; KNL64-NEXT: retq
;
; SKX32-LABEL: test_mm512_mask_blend_pd:
-; SKX32: # BB#0: # %entry
+; SKX32: # %bb.0: # %entry
; SKX32-NEXT: movb $-88, %al
; SKX32-NEXT: kmovd %eax, %k1
; SKX32-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
; SKX32-NEXT: retl
;
; KNL32-LABEL: test_mm512_mask_blend_pd:
-; KNL32: # BB#0: # %entry
+; KNL32: # %bb.0: # %entry
; KNL32-NEXT: movb $-88, %al
; KNL32-NEXT: kmovw %eax, %k1
; KNL32-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
@@ -800,27 +800,27 @@ entry:
define <32 x i8> @test_mm256_mask_blend_epi8(<32 x i8> %A, <32 x i8> %W){
; SKX64-LABEL: test_mm256_mask_blend_epi8:
-; SKX64: # BB#0: # %entry
+; SKX64: # %bb.0: # %entry
; SKX64-NEXT: movl $-1431655766, %eax # imm = 0xAAAAAAAA
; SKX64-NEXT: kmovd %eax, %k1
; SKX64-NEXT: vpblendmb %ymm0, %ymm1, %ymm0 {%k1}
; SKX64-NEXT: retq
;
; KNL64-LABEL: test_mm256_mask_blend_epi8:
-; KNL64: # BB#0: # %entry
+; KNL64: # %bb.0: # %entry
; KNL64-NEXT: vmovdqa {{.*#+}} ymm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
; KNL64-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
; KNL64-NEXT: retq
;
; SKX32-LABEL: test_mm256_mask_blend_epi8:
-; SKX32: # BB#0: # %entry
+; SKX32: # %bb.0: # %entry
; SKX32-NEXT: movl $-1431655766, %eax # imm = 0xAAAAAAAA
; SKX32-NEXT: kmovd %eax, %k1
; SKX32-NEXT: vpblendmb %ymm0, %ymm1, %ymm0 {%k1}
; SKX32-NEXT: retl
;
; KNL32-LABEL: test_mm256_mask_blend_epi8:
-; KNL32: # BB#0: # %entry
+; KNL32: # %bb.0: # %entry
; KNL32-NEXT: vmovdqa {{.*#+}} ymm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
; KNL32-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
; KNL32-NEXT: retl
@@ -831,27 +831,27 @@ entry:
define <16 x i8> @test_mm_mask_blend_epi8(<16 x i8> %A, <16 x i8> %W){
; SKX64-LABEL: test_mm_mask_blend_epi8:
-; SKX64: # BB#0: # %entry
+; SKX64: # %bb.0: # %entry
; SKX64-NEXT: movw $-21846, %ax # imm = 0xAAAA
; SKX64-NEXT: kmovd %eax, %k1
; SKX64-NEXT: vpblendmb %xmm0, %xmm1, %xmm0 {%k1}
; SKX64-NEXT: retq
;
; KNL64-LABEL: test_mm_mask_blend_epi8:
-; KNL64: # BB#0: # %entry
+; KNL64: # %bb.0: # %entry
; KNL64-NEXT: vmovdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
; KNL64-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
; KNL64-NEXT: retq
;
; SKX32-LABEL: test_mm_mask_blend_epi8:
-; SKX32: # BB#0: # %entry
+; SKX32: # %bb.0: # %entry
; SKX32-NEXT: movw $-21846, %ax # imm = 0xAAAA
; SKX32-NEXT: kmovd %eax, %k1
; SKX32-NEXT: vpblendmb %xmm0, %xmm1, %xmm0 {%k1}
; SKX32-NEXT: retl
;
; KNL32-LABEL: test_mm_mask_blend_epi8:
-; KNL32: # BB#0: # %entry
+; KNL32: # %bb.0: # %entry
; KNL32-NEXT: vmovdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
; KNL32-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
; KNL32-NEXT: retl
diff --git a/test/CodeGen/X86/vector-shuffle-combining-avx.ll b/test/CodeGen/X86/vector-shuffle-combining-avx.ll
index 13432b9ccea..e230aa2bc7e 100644
--- a/test/CodeGen/X86/vector-shuffle-combining-avx.ll
+++ b/test/CodeGen/X86/vector-shuffle-combining-avx.ll
@@ -24,11 +24,11 @@ declare <4 x double> @llvm.x86.avx.vperm2f128.pd.256(<4 x double>, <4 x double>,
define <4 x float> @combine_vpermilvar_4f32_identity(<4 x float> %a0) {
; X32-LABEL: combine_vpermilvar_4f32_identity:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermilvar_4f32_identity:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
%1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 3, i32 2, i32 1, i32 0>)
%2 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %1, <4 x i32> <i32 3, i32 2, i32 1, i32 0>)
@@ -37,12 +37,12 @@ define <4 x float> @combine_vpermilvar_4f32_identity(<4 x float> %a0) {
define <4 x float> @combine_vpermilvar_4f32_movddup(<4 x float> %a0) {
; X32-LABEL: combine_vpermilvar_4f32_movddup:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermilvar_4f32_movddup:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; X64-NEXT: retq
%1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 0, i32 1, i32 0, i32 1>)
@@ -50,13 +50,13 @@ define <4 x float> @combine_vpermilvar_4f32_movddup(<4 x float> %a0) {
}
define <4 x float> @combine_vpermilvar_4f32_movddup_load(<4 x float> *%a0) {
; X32-LABEL: combine_vpermilvar_4f32_movddup_load:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermilvar_4f32_movddup_load:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; X64-NEXT: retq
%1 = load <4 x float>, <4 x float> *%a0
@@ -66,12 +66,12 @@ define <4 x float> @combine_vpermilvar_4f32_movddup_load(<4 x float> *%a0) {
define <4 x float> @combine_vpermilvar_4f32_movshdup(<4 x float> %a0) {
; X32-LABEL: combine_vpermilvar_4f32_movshdup:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermilvar_4f32_movshdup:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
; X64-NEXT: retq
%1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 undef, i32 1, i32 3, i32 3>)
@@ -80,12 +80,12 @@ define <4 x float> @combine_vpermilvar_4f32_movshdup(<4 x float> %a0) {
define <4 x float> @combine_vpermilvar_4f32_movsldup(<4 x float> %a0) {
; X32-LABEL: combine_vpermilvar_4f32_movsldup:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermilvar_4f32_movsldup:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2]
; X64-NEXT: retq
%1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 0, i32 0, i32 2, i32 undef>)
@@ -94,12 +94,12 @@ define <4 x float> @combine_vpermilvar_4f32_movsldup(<4 x float> %a0) {
define <4 x float> @combine_vpermilvar_4f32_unpckh(<4 x float> %a0) {
; X32-LABEL: combine_vpermilvar_4f32_unpckh:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermilvar_4f32_unpckh:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
; X64-NEXT: retq
%1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 2, i32 2, i32 3, i32 3>)
@@ -108,12 +108,12 @@ define <4 x float> @combine_vpermilvar_4f32_unpckh(<4 x float> %a0) {
define <4 x float> @combine_vpermilvar_4f32_unpckl(<4 x float> %a0) {
; X32-LABEL: combine_vpermilvar_4f32_unpckl:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,1,1]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermilvar_4f32_unpckl:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,1,1]
; X64-NEXT: retq
%1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 0, i32 0, i32 1, i32 1>)
@@ -122,11 +122,11 @@ define <4 x float> @combine_vpermilvar_4f32_unpckl(<4 x float> %a0) {
define <8 x float> @combine_vpermilvar_8f32_identity(<8 x float> %a0) {
; X32-LABEL: combine_vpermilvar_8f32_identity:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermilvar_8f32_identity:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
%1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 2, i32 3, i32 0, i32 undef>)
%2 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %1, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 2, i32 3, i32 0, i32 1>)
@@ -135,12 +135,12 @@ define <8 x float> @combine_vpermilvar_8f32_identity(<8 x float> %a0) {
define <8 x float> @combine_vpermilvar_8f32_10326u4u(<8 x float> %a0) {
; X32-LABEL: combine_vpermilvar_8f32_10326u4u:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,0,3,2,6,u,4,u]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermilvar_8f32_10326u4u:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,0,3,2,6,u,4,u]
; X64-NEXT: retq
%1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 0, i32 1, i32 2, i32 undef>)
@@ -150,32 +150,32 @@ define <8 x float> @combine_vpermilvar_8f32_10326u4u(<8 x float> %a0) {
define <8 x float> @combine_vpermilvar_vperm2f128_8f32(<8 x float> %a0) {
; X32-AVX1-LABEL: combine_vpermilvar_vperm2f128_8f32:
-; X32-AVX1: # BB#0:
+; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: combine_vpermilvar_vperm2f128_8f32:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,3,0,1]
; X32-AVX2-NEXT: retl
;
; X32-AVX512-LABEL: combine_vpermilvar_vperm2f128_8f32:
-; X32-AVX512: # BB#0:
+; X32-AVX512: # %bb.0:
; X32-AVX512-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,3,0,1]
; X32-AVX512-NEXT: retl
;
; X64-AVX1-LABEL: combine_vpermilvar_vperm2f128_8f32:
-; X64-AVX1: # BB#0:
+; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: combine_vpermilvar_vperm2f128_8f32:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,3,0,1]
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: combine_vpermilvar_vperm2f128_8f32:
-; X64-AVX512: # BB#0:
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,3,0,1]
; X64-AVX512-NEXT: retq
%1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 3, i32 2, i32 1, i32 0>)
@@ -186,12 +186,12 @@ define <8 x float> @combine_vpermilvar_vperm2f128_8f32(<8 x float> %a0) {
define <8 x float> @combine_vpermilvar_vperm2f128_zero_8f32(<8 x float> %a0) {
; X32-LABEL: combine_vpermilvar_vperm2f128_zero_8f32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermilvar_vperm2f128_zero_8f32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1]
; X64-NEXT: retq
%1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 3, i32 2, i32 1, i32 0>)
@@ -202,13 +202,13 @@ define <8 x float> @combine_vpermilvar_vperm2f128_zero_8f32(<8 x float> %a0) {
define <4 x double> @combine_vperm2f128_vpermilvar_as_vpblendpd(<4 x double> %a0) {
; X32-LABEL: combine_vperm2f128_vpermilvar_as_vpblendpd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X32-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
; X32-NEXT: retl
;
; X64-LABEL: combine_vperm2f128_vpermilvar_as_vpblendpd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X64-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
; X64-NEXT: retq
@@ -220,12 +220,12 @@ define <4 x double> @combine_vperm2f128_vpermilvar_as_vpblendpd(<4 x double> %a0
define <8 x float> @combine_vpermilvar_8f32_movddup(<8 x float> %a0) {
; X32-LABEL: combine_vpermilvar_8f32_movddup:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermilvar_8f32_movddup:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
; X64-NEXT: retq
%1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 4, i32 5, i32 4, i32 5>)
@@ -233,13 +233,13 @@ define <8 x float> @combine_vpermilvar_8f32_movddup(<8 x float> %a0) {
}
define <8 x float> @combine_vpermilvar_8f32_movddup_load(<8 x float> *%a0) {
; X32-LABEL: combine_vpermilvar_8f32_movddup_load:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovddup {{.*#+}} ymm0 = mem[0,0,2,2]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermilvar_8f32_movddup_load:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovddup {{.*#+}} ymm0 = mem[0,0,2,2]
; X64-NEXT: retq
%1 = load <8 x float>, <8 x float> *%a0
@@ -249,12 +249,12 @@ define <8 x float> @combine_vpermilvar_8f32_movddup_load(<8 x float> *%a0) {
define <8 x float> @combine_vpermilvar_8f32_movshdup(<8 x float> %a0) {
; X32-LABEL: combine_vpermilvar_8f32_movshdup:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermilvar_8f32_movshdup:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
; X64-NEXT: retq
%1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 undef, i32 5, i32 7, i32 7>)
@@ -263,12 +263,12 @@ define <8 x float> @combine_vpermilvar_8f32_movshdup(<8 x float> %a0) {
define <8 x float> @combine_vpermilvar_8f32_movsldup(<8 x float> %a0) {
; X32-LABEL: combine_vpermilvar_8f32_movsldup:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermilvar_8f32_movsldup:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6]
; X64-NEXT: retq
%1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>)
@@ -277,11 +277,11 @@ define <8 x float> @combine_vpermilvar_8f32_movsldup(<8 x float> %a0) {
define <2 x double> @combine_vpermilvar_2f64_identity(<2 x double> %a0) {
; X32-LABEL: combine_vpermilvar_2f64_identity:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermilvar_2f64_identity:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
%1 = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %a0, <2 x i64> <i64 2, i64 0>)
%2 = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %1, <2 x i64> <i64 2, i64 0>)
@@ -290,12 +290,12 @@ define <2 x double> @combine_vpermilvar_2f64_identity(<2 x double> %a0) {
define <2 x double> @combine_vpermilvar_2f64_movddup(<2 x double> %a0) {
; X32-LABEL: combine_vpermilvar_2f64_movddup:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermilvar_2f64_movddup:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; X64-NEXT: retq
%1 = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %a0, <2 x i64> <i64 0, i64 0>)
@@ -304,11 +304,11 @@ define <2 x double> @combine_vpermilvar_2f64_movddup(<2 x double> %a0) {
define <4 x double> @combine_vpermilvar_4f64_identity(<4 x double> %a0) {
; X32-LABEL: combine_vpermilvar_4f64_identity:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermilvar_4f64_identity:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
%1 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> <i64 2, i64 0, i64 2, i64 0>)
%2 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %1, <4 x i64> <i64 2, i64 0, i64 2, i64 0>)
@@ -317,12 +317,12 @@ define <4 x double> @combine_vpermilvar_4f64_identity(<4 x double> %a0) {
define <4 x double> @combine_vpermilvar_4f64_movddup(<4 x double> %a0) {
; X32-LABEL: combine_vpermilvar_4f64_movddup:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermilvar_4f64_movddup:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
; X64-NEXT: retq
%1 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> <i64 0, i64 0, i64 4, i64 4>)
@@ -331,12 +331,12 @@ define <4 x double> @combine_vpermilvar_4f64_movddup(<4 x double> %a0) {
define <4 x float> @combine_vpermilvar_4f32_4stage(<4 x float> %a0) {
; X32-LABEL: combine_vpermilvar_4f32_4stage:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,0,3,1]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermilvar_4f32_4stage:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,0,3,1]
; X64-NEXT: retq
%1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 3, i32 2, i32 1, i32 0>)
@@ -348,12 +348,12 @@ define <4 x float> @combine_vpermilvar_4f32_4stage(<4 x float> %a0) {
define <8 x float> @combine_vpermilvar_8f32_4stage(<8 x float> %a0) {
; X32-LABEL: combine_vpermilvar_8f32_4stage:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[2,0,3,1,6,4,7,5]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermilvar_8f32_4stage:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[2,0,3,1,6,4,7,5]
; X64-NEXT: retq
%1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 3, i32 2, i32 1, i32 0>)
@@ -365,12 +365,12 @@ define <8 x float> @combine_vpermilvar_8f32_4stage(<8 x float> %a0) {
define <4 x float> @combine_vpermilvar_4f32_as_insertps(<4 x float> %a0) {
; X32-LABEL: combine_vpermilvar_4f32_as_insertps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[1],zero,xmm0[2],zero
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermilvar_4f32_as_insertps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[1],zero,xmm0[2],zero
; X64-NEXT: retq
%1 = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 3, i32 2, i32 1, i32 0>)
@@ -380,12 +380,12 @@ define <4 x float> @combine_vpermilvar_4f32_as_insertps(<4 x float> %a0) {
define <2 x double> @constant_fold_vpermilvar_pd() {
; X32-LABEL: constant_fold_vpermilvar_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps {{.*#+}} xmm0 = [2.000000e+00,1.000000e+00]
; X32-NEXT: retl
;
; X64-LABEL: constant_fold_vpermilvar_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} xmm0 = [2.000000e+00,1.000000e+00]
; X64-NEXT: retq
%1 = call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> <double 1.0, double 2.0>, <2 x i64> <i64 2, i64 0>)
@@ -394,12 +394,12 @@ define <2 x double> @constant_fold_vpermilvar_pd() {
define <4 x double> @constant_fold_vpermilvar_pd_256() {
; X32-LABEL: constant_fold_vpermilvar_pd_256:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps {{.*#+}} ymm0 = [2.000000e+00,1.000000e+00,3.000000e+00,4.000000e+00]
; X32-NEXT: retl
;
; X64-LABEL: constant_fold_vpermilvar_pd_256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} ymm0 = [2.000000e+00,1.000000e+00,3.000000e+00,4.000000e+00]
; X64-NEXT: retq
%1 = call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> <double 1.0, double 2.0, double 3.0, double 4.0>, <4 x i64> <i64 2, i64 0, i64 0, i64 2>)
@@ -408,12 +408,12 @@ define <4 x double> @constant_fold_vpermilvar_pd_256() {
define <4 x float> @constant_fold_vpermilvar_ps() {
; X32-LABEL: constant_fold_vpermilvar_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps {{.*#+}} xmm0 = [4.000000e+00,1.000000e+00,3.000000e+00,2.000000e+00]
; X32-NEXT: retl
;
; X64-LABEL: constant_fold_vpermilvar_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} xmm0 = [4.000000e+00,1.000000e+00,3.000000e+00,2.000000e+00]
; X64-NEXT: retq
%1 = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, <4 x i32> <i32 3, i32 0, i32 2, i32 1>)
@@ -422,12 +422,12 @@ define <4 x float> @constant_fold_vpermilvar_ps() {
define <8 x float> @constant_fold_vpermilvar_ps_256() {
; X32-LABEL: constant_fold_vpermilvar_ps_256:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps {{.*#+}} ymm0 = [1.000000e+00,1.000000e+00,3.000000e+00,2.000000e+00,5.000000e+00,6.000000e+00,6.000000e+00,6.000000e+00]
; X32-NEXT: retl
;
; X64-LABEL: constant_fold_vpermilvar_ps_256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} ymm0 = [1.000000e+00,1.000000e+00,3.000000e+00,2.000000e+00,5.000000e+00,6.000000e+00,6.000000e+00,6.000000e+00]
; X64-NEXT: retq
%1 = call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0>, <8 x i32> <i32 4, i32 0, i32 2, i32 1, i32 0, i32 1, i32 1, i32 1>)
diff --git a/test/CodeGen/X86/vector-shuffle-combining-avx2.ll b/test/CodeGen/X86/vector-shuffle-combining-avx2.ll
index 9c6bebdd78b..b0b76d7dc13 100644
--- a/test/CodeGen/X86/vector-shuffle-combining-avx2.ll
+++ b/test/CodeGen/X86/vector-shuffle-combining-avx2.ll
@@ -11,12 +11,12 @@ declare <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8>, <32 x i8>)
define <32 x i8> @combine_pshufb_pslldq(<32 x i8> %a0) {
; X32-LABEL: combine_pshufb_pslldq:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_pslldq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-NEXT: retq
%1 = tail call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>)
@@ -26,12 +26,12 @@ define <32 x i8> @combine_pshufb_pslldq(<32 x i8> %a0) {
define <32 x i8> @combine_pshufb_psrldq(<32 x i8> %a0) {
; X32-LABEL: combine_pshufb_psrldq:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_psrldq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-NEXT: retq
%1 = tail call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128>)
@@ -41,12 +41,12 @@ define <32 x i8> @combine_pshufb_psrldq(<32 x i8> %a0) {
define <32 x i8> @combine_pshufb_vpermd(<8 x i32> %a) {
; X32-LABEL: combine_pshufb_vpermd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,16,17,18,18]
; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_vpermd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,16,17,18,18]
; X64-NEXT: retq
%tmp0 = call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> %a, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 4>)
@@ -57,12 +57,12 @@ define <32 x i8> @combine_pshufb_vpermd(<8 x i32> %a) {
define <32 x i8> @combine_pshufb_vpermps(<8 x float> %a) {
; X32-LABEL: combine_pshufb_vpermps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,16,17,18,18]
; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_vpermps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,16,17,18,18]
; X64-NEXT: retq
%tmp0 = call <8 x float> @llvm.x86.avx2.permps(<8 x float> %a, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 4>)
@@ -73,13 +73,13 @@ define <32 x i8> @combine_pshufb_vpermps(<8 x float> %a) {
define <32 x i8> @combine_and_pshufb(<32 x i8> %a0) {
; X32-LABEL: combine_and_pshufb:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X32-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7],ymm0[8],ymm1[9,10,11],ymm0[12],ymm1[13,14,15]
; X32-NEXT: retl
;
; X64-LABEL: combine_and_pshufb:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7],ymm0[8],ymm1[9,10,11],ymm0[12],ymm1[13,14,15]
; X64-NEXT: retq
@@ -90,13 +90,13 @@ define <32 x i8> @combine_and_pshufb(<32 x i8> %a0) {
define <32 x i8> @combine_pshufb_and(<32 x i8> %a0) {
; X32-LABEL: combine_pshufb_and:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X32-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7],ymm0[8],ymm1[9,10,11],ymm0[12],ymm1[13,14,15]
; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_and:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7],ymm0[8],ymm1[9,10,11],ymm0[12],ymm1[13,14,15]
; X64-NEXT: retq
@@ -107,13 +107,13 @@ define <32 x i8> @combine_pshufb_and(<32 x i8> %a0) {
define <4 x i64> @combine_permq_pshufb_as_vperm2i128(<4 x i64> %a0) {
; X32-LABEL: combine_permq_pshufb_as_vperm2i128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],zero,zero
; X32-NEXT: vpaddq {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: combine_permq_pshufb_as_vperm2i128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],zero,zero
; X64-NEXT: vpaddq {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
@@ -127,13 +127,13 @@ define <4 x i64> @combine_permq_pshufb_as_vperm2i128(<4 x i64> %a0) {
define <8 x i32> @combine_as_vpermd(<8 x i32> %a0) {
; X32-LABEL: combine_as_vpermd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps {{.*#+}} ymm1 = [4,5,4,5,6,7,0,7]
; X32-NEXT: vpermps %ymm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: combine_as_vpermd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} ymm1 = [4,5,4,5,6,7,0,7]
; X64-NEXT: vpermps %ymm0, %ymm1, %ymm0
; X64-NEXT: retq
@@ -145,13 +145,13 @@ define <8 x i32> @combine_as_vpermd(<8 x i32> %a0) {
define <8 x float> @combine_as_vpermps(<8 x float> %a0) {
; X32-LABEL: combine_as_vpermps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps {{.*#+}} ymm1 = <6,4,7,5,1,u,4,7>
; X32-NEXT: vpermps %ymm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: combine_as_vpermps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} ymm1 = <6,4,7,5,1,u,4,7>
; X64-NEXT: vpermps %ymm0, %ymm1, %ymm0
; X64-NEXT: retq
@@ -163,13 +163,13 @@ define <8 x float> @combine_as_vpermps(<8 x float> %a0) {
define <32 x i8> @combine_permq_pshufb_as_vpblendd(<4 x i64> %a0) {
; X32-LABEL: combine_permq_pshufb_as_vpblendd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X32-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; X32-NEXT: retl
;
; X64-LABEL: combine_permq_pshufb_as_vpblendd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; X64-NEXT: retq
@@ -181,12 +181,12 @@ define <32 x i8> @combine_permq_pshufb_as_vpblendd(<4 x i64> %a0) {
define <16 x i8> @combine_pshufb_as_vpbroadcastb128(<16 x i8> %a) {
; X32-LABEL: combine_pshufb_as_vpbroadcastb128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpbroadcastb %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_vpbroadcastb128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpbroadcastb %xmm0, %xmm0
; X64-NEXT: retq
%1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a, <16 x i8> zeroinitializer)
@@ -195,13 +195,13 @@ define <16 x i8> @combine_pshufb_as_vpbroadcastb128(<16 x i8> %a) {
define <32 x i8> @combine_pshufb_as_vpbroadcastb256(<2 x i64> %a) {
; X32-LABEL: combine_pshufb_as_vpbroadcastb256:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-NEXT: vpbroadcastb %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_vpbroadcastb256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-NEXT: vpbroadcastb %xmm0, %ymm0
; X64-NEXT: retq
@@ -216,12 +216,12 @@ define <32 x i8> @combine_pshufb_as_vpbroadcastb256(<2 x i64> %a) {
define <16 x i8> @combine_pshufb_as_vpbroadcastw128(<16 x i8> %a) {
; X32-LABEL: combine_pshufb_as_vpbroadcastw128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpbroadcastw %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_vpbroadcastw128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpbroadcastw %xmm0, %xmm0
; X64-NEXT: retq
%1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a, <16 x i8> <i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1>)
@@ -230,13 +230,13 @@ define <16 x i8> @combine_pshufb_as_vpbroadcastw128(<16 x i8> %a) {
define <32 x i8> @combine_pshufb_as_vpbroadcastw256(<2 x i64> %a) {
; X32-LABEL: combine_pshufb_as_vpbroadcastw256:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-NEXT: vpbroadcastw %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_vpbroadcastw256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-NEXT: vpbroadcastw %xmm0, %ymm0
; X64-NEXT: retq
@@ -251,13 +251,13 @@ define <32 x i8> @combine_pshufb_as_vpbroadcastw256(<2 x i64> %a) {
define <16 x i8> @combine_pshufb_as_vpbroadcastd128(<16 x i8> %a) {
; X32-LABEL: combine_pshufb_as_vpbroadcastd128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpbroadcastd %xmm0, %xmm0
; X32-NEXT: vpaddb {{\.LCPI.*}}, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_vpbroadcastd128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpbroadcastd %xmm0, %xmm0
; X64-NEXT: vpaddb {{.*}}(%rip), %xmm0, %xmm0
; X64-NEXT: retq
@@ -268,14 +268,14 @@ define <16 x i8> @combine_pshufb_as_vpbroadcastd128(<16 x i8> %a) {
define <8 x i32> @combine_permd_as_vpbroadcastd256(<4 x i32> %a) {
; X32-LABEL: combine_permd_as_vpbroadcastd256:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-NEXT: vpbroadcastd %xmm0, %ymm0
; X32-NEXT: vpaddd {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: combine_permd_as_vpbroadcastd256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-NEXT: vpbroadcastd %xmm0, %ymm0
; X64-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0
@@ -288,12 +288,12 @@ define <8 x i32> @combine_permd_as_vpbroadcastd256(<4 x i32> %a) {
define <16 x i8> @combine_pshufb_as_vpbroadcastq128(<16 x i8> %a) {
; X32-LABEL: combine_pshufb_as_vpbroadcastq128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpbroadcastq %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_vpbroadcastq128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpbroadcastq %xmm0, %xmm0
; X64-NEXT: retq
%1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>)
@@ -302,14 +302,14 @@ define <16 x i8> @combine_pshufb_as_vpbroadcastq128(<16 x i8> %a) {
define <8 x i32> @combine_permd_as_vpbroadcastq256(<4 x i32> %a) {
; X32-LABEL: combine_permd_as_vpbroadcastq256:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-NEXT: vpbroadcastq %xmm0, %ymm0
; X32-NEXT: vpaddd {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: combine_permd_as_vpbroadcastq256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-NEXT: vpbroadcastq %xmm0, %ymm0
; X64-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0
@@ -322,12 +322,12 @@ define <8 x i32> @combine_permd_as_vpbroadcastq256(<4 x i32> %a) {
define <4 x float> @combine_pshufb_as_vpbroadcastss128(<4 x float> %a) {
; X32-LABEL: combine_pshufb_as_vpbroadcastss128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vbroadcastss %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_vpbroadcastss128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcastss %xmm0, %xmm0
; X64-NEXT: retq
%1 = bitcast <4 x float> %a to <16 x i8>
@@ -338,13 +338,13 @@ define <4 x float> @combine_pshufb_as_vpbroadcastss128(<4 x float> %a) {
define <8 x float> @combine_permps_as_vpbroadcastss256(<4 x float> %a) {
; X32-LABEL: combine_permps_as_vpbroadcastss256:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-NEXT: vbroadcastss %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: combine_permps_as_vpbroadcastss256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-NEXT: vbroadcastss %xmm0, %ymm0
; X64-NEXT: retq
@@ -355,13 +355,13 @@ define <8 x float> @combine_permps_as_vpbroadcastss256(<4 x float> %a) {
define <4 x double> @combine_permps_as_vpbroadcastsd256(<2 x double> %a) {
; X32-LABEL: combine_permps_as_vpbroadcastsd256:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-NEXT: vbroadcastsd %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: combine_permps_as_vpbroadcastsd256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-NEXT: vbroadcastsd %xmm0, %ymm0
; X64-NEXT: retq
@@ -374,12 +374,12 @@ define <4 x double> @combine_permps_as_vpbroadcastsd256(<2 x double> %a) {
define <16 x i8> @combine_vpbroadcast_pshufb_as_vpbroadcastb128(<16 x i8> %a) {
; X32-LABEL: combine_vpbroadcast_pshufb_as_vpbroadcastb128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpbroadcastb %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: combine_vpbroadcast_pshufb_as_vpbroadcastb128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpbroadcastb %xmm0, %xmm0
; X64-NEXT: retq
%1 = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> zeroinitializer
@@ -389,12 +389,12 @@ define <16 x i8> @combine_vpbroadcast_pshufb_as_vpbroadcastb128(<16 x i8> %a) {
define <32 x i8> @combine_vpbroadcast_pshufb_as_vpbroadcastb256(<32 x i8> %a) {
; X32-LABEL: combine_vpbroadcast_pshufb_as_vpbroadcastb256:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpbroadcastb %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: combine_vpbroadcast_pshufb_as_vpbroadcastb256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpbroadcastb %xmm0, %ymm0
; X64-NEXT: retq
%1 = shufflevector <32 x i8> %a, <32 x i8> undef, <32 x i32> zeroinitializer
@@ -404,12 +404,12 @@ define <32 x i8> @combine_vpbroadcast_pshufb_as_vpbroadcastb256(<32 x i8> %a) {
define <4 x float> @combine_vpbroadcast_pshufb_as_vpbroadcastss128(<4 x float> %a) {
; X32-LABEL: combine_vpbroadcast_pshufb_as_vpbroadcastss128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vbroadcastss %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: combine_vpbroadcast_pshufb_as_vpbroadcastss128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcastss %xmm0, %xmm0
; X64-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> zeroinitializer
@@ -421,13 +421,13 @@ define <4 x float> @combine_vpbroadcast_pshufb_as_vpbroadcastss128(<4 x float> %
define <8 x float> @combine_vpbroadcast_permd_as_vpbroadcastss256(<4 x float> %a) {
; X32-LABEL: combine_vpbroadcast_permd_as_vpbroadcastss256:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vbroadcastss %xmm0, %ymm0
; X32-NEXT: vbroadcastss %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: combine_vpbroadcast_permd_as_vpbroadcastss256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcastss %xmm0, %ymm0
; X64-NEXT: vbroadcastss %xmm0, %ymm0
; X64-NEXT: retq
@@ -438,13 +438,13 @@ define <8 x float> @combine_vpbroadcast_permd_as_vpbroadcastss256(<4 x float> %a
define <4 x double> @combine_vpbroadcast_permd_as_vpbroadcastsd256(<2 x double> %a) {
; X32-LABEL: combine_vpbroadcast_permd_as_vpbroadcastsd256:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vbroadcastsd %xmm0, %ymm0
; X32-NEXT: vbroadcastsd %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: combine_vpbroadcast_permd_as_vpbroadcastsd256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcastsd %xmm0, %ymm0
; X64-NEXT: vbroadcastsd %xmm0, %ymm0
; X64-NEXT: retq
@@ -457,12 +457,12 @@ define <4 x double> @combine_vpbroadcast_permd_as_vpbroadcastsd256(<2 x double>
define <8 x i32> @combine_permd_as_permq(<8 x i32> %a) {
; X32-LABEL: combine_permd_as_permq:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,1]
; X32-NEXT: retl
;
; X64-LABEL: combine_permd_as_permq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,1]
; X64-NEXT: retq
%1 = call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> %a, <8 x i32> <i32 0, i32 1, i32 4, i32 5, i32 4, i32 5, i32 2, i32 3>)
@@ -471,12 +471,12 @@ define <8 x i32> @combine_permd_as_permq(<8 x i32> %a) {
define <8 x float> @combine_permps_as_permpd(<8 x float> %a) {
; X32-LABEL: combine_permps_as_permpd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,2,0,1]
; X32-NEXT: retl
;
; X64-LABEL: combine_permps_as_permpd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,2,0,1]
; X64-NEXT: retq
%1 = call <8 x float> @llvm.x86.avx2.permps(<8 x float> %a, <8 x i32> <i32 6, i32 7, i32 4, i32 5, i32 0, i32 1, i32 2, i32 3>)
@@ -485,12 +485,12 @@ define <8 x float> @combine_permps_as_permpd(<8 x float> %a) {
define <4 x i64> @combine_pshufb_as_zext(<32 x i8> %a0) {
; X32-LABEL: combine_pshufb_as_zext:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_zext:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; X64-NEXT: retq
%1 = shufflevector <32 x i8> %a0, <32 x i8> undef, <32 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -501,14 +501,14 @@ define <4 x i64> @combine_pshufb_as_zext(<32 x i8> %a0) {
define <4 x i64> @combine_pshufb_as_zext128(<32 x i8> %a0) {
; X32-LABEL: combine_pshufb_as_zext128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[15,14],zero,zero,zero,zero,zero,zero,ymm0[13,12],zero,zero,zero,zero,zero,zero,ymm0[31,30],zero,zero,zero,zero,zero,zero,ymm0[29,28],zero,zero,zero,zero,zero,zero
; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_zext128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[15,14],zero,zero,zero,zero,zero,zero,ymm0[13,12],zero,zero,zero,zero,zero,zero,ymm0[31,30],zero,zero,zero,zero,zero,zero,ymm0[29,28],zero,zero,zero,zero,zero,zero
@@ -521,25 +521,25 @@ define <4 x i64> @combine_pshufb_as_zext128(<32 x i8> %a0) {
define <4 x double> @combine_pshufb_as_vzmovl_64(<4 x double> %a0) {
; X32-AVX2-LABEL: combine_pshufb_as_vzmovl_64:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X32-AVX2-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
; X32-AVX2-NEXT: retl
;
; X32-AVX512-LABEL: combine_pshufb_as_vzmovl_64:
-; X32-AVX512: # BB#0:
+; X32-AVX512: # %bb.0:
; X32-AVX512-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X32-AVX512-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; X32-AVX512-NEXT: retl
;
; X64-AVX2-LABEL: combine_pshufb_as_vzmovl_64:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X64-AVX2-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: combine_pshufb_as_vzmovl_64:
-; X64-AVX512: # BB#0:
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X64-AVX512-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; X64-AVX512-NEXT: retq
@@ -551,25 +551,25 @@ define <4 x double> @combine_pshufb_as_vzmovl_64(<4 x double> %a0) {
define <8 x float> @combine_pshufb_as_vzmovl_32(<8 x float> %a0) {
; X32-AVX2-LABEL: combine_pshufb_as_vzmovl_32:
-; X32-AVX2: # BB#0:
+; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X32-AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
; X32-AVX2-NEXT: retl
;
; X32-AVX512-LABEL: combine_pshufb_as_vzmovl_32:
-; X32-AVX512: # BB#0:
+; X32-AVX512: # %bb.0:
; X32-AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X32-AVX512-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; X32-AVX512-NEXT: retl
;
; X64-AVX2-LABEL: combine_pshufb_as_vzmovl_32:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: combine_pshufb_as_vzmovl_32:
-; X64-AVX512: # BB#0:
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-AVX512-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; X64-AVX512-NEXT: retq
@@ -581,12 +581,12 @@ define <8 x float> @combine_pshufb_as_vzmovl_32(<8 x float> %a0) {
define <32 x i8> @combine_pshufb_as_pslldq(<32 x i8> %a0) {
; X32-LABEL: combine_pshufb_as_pslldq:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpslldq {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[0,1,2,3,4,5],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,18,19,20,21]
; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_pslldq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpslldq {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[0,1,2,3,4,5],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,18,19,20,21]
; X64-NEXT: retq
%res0 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5>)
@@ -595,12 +595,12 @@ define <32 x i8> @combine_pshufb_as_pslldq(<32 x i8> %a0) {
define <32 x i8> @combine_pshufb_as_psrldq(<32 x i8> %a0) {
; X32-LABEL: combine_pshufb_as_psrldq:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_psrldq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; X64-NEXT: retq
%res0 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128>)
@@ -609,12 +609,12 @@ define <32 x i8> @combine_pshufb_as_psrldq(<32 x i8> %a0) {
define <32 x i8> @combine_pshufb_as_psrlw(<32 x i8> %a0) {
; X32-LABEL: combine_pshufb_as_psrlw:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsrlw $8, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_psrlw:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsrlw $8, %ymm0, %ymm0
; X64-NEXT: retq
%res0 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 1, i8 128, i8 3, i8 128, i8 5, i8 128, i8 7, i8 128, i8 9, i8 128, i8 11, i8 128, i8 13, i8 128, i8 15, i8 128, i8 17, i8 128, i8 19, i8 128, i8 21, i8 128, i8 23, i8 128, i8 25, i8 128, i8 27, i8 128, i8 29, i8 128, i8 31, i8 128>)
@@ -623,12 +623,12 @@ define <32 x i8> @combine_pshufb_as_psrlw(<32 x i8> %a0) {
define <32 x i8> @combine_pshufb_as_pslld(<32 x i8> %a0) {
; X32-LABEL: combine_pshufb_as_pslld:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpslld $24, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_pslld:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpslld $24, %ymm0, %ymm0
; X64-NEXT: retq
%res0 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 128, i8 128, i8 128, i8 0, i8 128, i8 128, i8 128, i8 4, i8 128, i8 128, i8 128, i8 8, i8 128, i8 128, i8 128, i8 12, i8 128, i8 128, i8 128, i8 16, i8 128, i8 128, i8 128, i8 20, i8 128, i8 128, i8 128, i8 24, i8 128, i8 128, i8 128, i8 28>)
@@ -637,12 +637,12 @@ define <32 x i8> @combine_pshufb_as_pslld(<32 x i8> %a0) {
define <32 x i8> @combine_pshufb_as_psrlq(<32 x i8> %a0) {
; X32-LABEL: combine_pshufb_as_psrlq:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsrlq $40, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_psrlq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsrlq $40, %ymm0, %ymm0
; X64-NEXT: retq
%res0 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 5, i8 6, i8 7, i8 128, i8 128, i8 128, i8 128, i8 128, i8 13, i8 14, i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 21, i8 22, i8 23, i8 128, i8 128, i8 128, i8 128, i8 128, i8 29, i8 30, i8 31, i8 128, i8 128, i8 128, i8 128, i8 128>)
@@ -651,12 +651,12 @@ define <32 x i8> @combine_pshufb_as_psrlq(<32 x i8> %a0) {
define <32 x i8> @combine_pshufb_as_pshuflw(<32 x i8> %a0) {
; X32-LABEL: combine_pshufb_as_pshuflw:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15]
; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_pshuflw:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15]
; X64-NEXT: retq
%res0 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 2, i8 3, i8 0, i8 1, i8 6, i8 7, i8 4, i8 5, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 2, i8 3, i8 0, i8 1, i8 6, i8 7, i8 4, i8 5, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>)
@@ -665,12 +665,12 @@ define <32 x i8> @combine_pshufb_as_pshuflw(<32 x i8> %a0) {
define <32 x i8> @combine_pshufb_as_pshufhw(<32 x i8> %a0) {
; X32-LABEL: combine_pshufb_as_pshufhw:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,5,4,7,6,8,9,10,11,13,12,15,14]
; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_pshufhw:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,5,4,7,6,8,9,10,11,13,12,15,14]
; X64-NEXT: retq
%res0 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 10, i8 11, i8 8, i8 9, i8 14, i8 15, i8 12, i8 13, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 10, i8 11, i8 8, i8 9, i8 14, i8 15, i8 12, i8 13>)
@@ -679,12 +679,12 @@ define <32 x i8> @combine_pshufb_as_pshufhw(<32 x i8> %a0) {
define <32 x i8> @combine_pshufb_not_as_pshufw(<32 x i8> %a0) {
; X32-LABEL: combine_pshufb_not_as_pshufw:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13,18,19,16,17,22,23,20,21,26,27,24,25,30,31,28,29]
; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_not_as_pshufw:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13,18,19,16,17,22,23,20,21,26,27,24,25,30,31,28,29]
; X64-NEXT: retq
%res0 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 2, i8 3, i8 0, i8 1, i8 6, i8 7, i8 4, i8 5, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 2, i8 3, i8 0, i8 1, i8 6, i8 7, i8 4, i8 5, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>)
@@ -694,11 +694,11 @@ define <32 x i8> @combine_pshufb_not_as_pshufw(<32 x i8> %a0) {
define <32 x i8> @combine_pshufb_as_unpacklo_undef(<32 x i8> %a0) {
; X32-LABEL: combine_pshufb_as_unpacklo_undef:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_unpacklo_undef:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
%1 = tail call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 undef, i8 0, i8 undef, i8 1, i8 undef, i8 2, i8 undef, i8 3, i8 undef, i8 4, i8 undef, i8 5, i8 undef, i8 6, i8 undef, i8 7, i8 undef, i8 16, i8 undef, i8 17, i8 undef, i8 18, i8 undef, i8 19, i8 undef, i8 20, i8 undef, i8 21, i8 undef, i8 22, i8 undef, i8 23>)
%2 = shufflevector <32 x i8> %1, <32 x i8> undef, <32 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6, i32 8, i32 8, i32 10, i32 10, i32 12, i32 12, i32 14, i32 14, i32 16, i32 16, i32 18, i32 18, i32 20, i32 20, i32 22, i32 22, i32 24, i32 24, i32 26, i32 26, i32 28, i32 28, i32 30, i32 30>
@@ -707,13 +707,13 @@ define <32 x i8> @combine_pshufb_as_unpacklo_undef(<32 x i8> %a0) {
define <32 x i8> @combine_pshufb_as_unpacklo_zero(<32 x i8> %a0) {
; X32-LABEL: combine_pshufb_as_unpacklo_zero:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X32-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_unpacklo_zero:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
; X64-NEXT: retq
@@ -723,13 +723,13 @@ define <32 x i8> @combine_pshufb_as_unpacklo_zero(<32 x i8> %a0) {
define <32 x i8> @combine_pshufb_as_unpackhi_zero(<32 x i8> %a0) {
; X32-LABEL: combine_pshufb_as_unpackhi_zero:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X32-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_unpackhi_zero:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
; X64-NEXT: retq
@@ -739,12 +739,12 @@ define <32 x i8> @combine_pshufb_as_unpackhi_zero(<32 x i8> %a0) {
define <32 x i8> @combine_psrlw_pshufb(<16 x i16> %a0) {
; X32-LABEL: combine_psrlw_pshufb:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: combine_psrlw_pshufb:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
%1 = lshr <16 x i16> %a0, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
@@ -755,12 +755,12 @@ define <32 x i8> @combine_psrlw_pshufb(<16 x i16> %a0) {
define <32 x i8> @combine_pslld_pshufb(<8 x i32> %a0) {
; X32-LABEL: combine_pslld_pshufb:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: combine_pslld_pshufb:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
%1 = shl <8 x i32> %a0, <i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24>
@@ -771,12 +771,12 @@ define <32 x i8> @combine_pslld_pshufb(<8 x i32> %a0) {
define <32 x i8> @combine_psrlq_pshufb(<4 x i64> %a0) {
; X32-LABEL: combine_psrlq_pshufb:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,ymm0[7,6,5,4],zero,zero,zero,zero,ymm0[15,14,13,12],zero,zero,zero,zero,ymm0[23,22,21],zero,zero,zero,zero,ymm0[31,30,29,28],zero
; X32-NEXT: retl
;
; X64-LABEL: combine_psrlq_pshufb:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,ymm0[7,6,5,4],zero,zero,zero,zero,ymm0[15,14,13,12],zero,zero,zero,zero,ymm0[23,22,21],zero,zero,zero,zero,ymm0[31,30,29,28],zero
; X64-NEXT: retq
%1 = lshr <4 x i64> %a0, <i64 32, i64 32, i64 32, i64 32>
@@ -787,12 +787,12 @@ define <32 x i8> @combine_psrlq_pshufb(<4 x i64> %a0) {
define <32 x i8> @combine_unpack_unpack_pshufb(<32 x i8> %a0) {
; X32-LABEL: combine_unpack_unpack_pshufb:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,4,8,1,1,5,9,2,2,6,10,3,3,7,11,16,16,20,24,17,17,21,25,18,18,22,26,19,19,23,27]
; X32-NEXT: retl
;
; X64-LABEL: combine_unpack_unpack_pshufb:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,4,8,1,1,5,9,2,2,6,10,3,3,7,11,16,16,20,24,17,17,21,25,18,18,22,26,19,19,23,27]
; X64-NEXT: retq
%1 = shufflevector <32 x i8> %a0, <32 x i8> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 16, i32 17, i32 18, i32 19, i32 16, i32 17, i32 18, i32 19, i32 16, i32 17, i32 18, i32 19, i32 16, i32 17, i32 18, i32 19>
@@ -806,13 +806,13 @@ define <32 x i8> @combine_unpack_unpack_pshufb(<32 x i8> %a0) {
define <16 x i16> @shuffle_combine_packssdw_pshufb(<8 x i32> %a0) {
; X32-LABEL: shuffle_combine_packssdw_pshufb:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsrad $31, %ymm0, %ymm0
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[12,13,8,9,4,5,0,1,12,13,8,9,4,5,0,1,16,17,20,21,24,25,28,29,28,29,24,25,20,21,16,17]
; X32-NEXT: retl
;
; X64-LABEL: shuffle_combine_packssdw_pshufb:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsrad $31, %ymm0, %ymm0
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[12,13,8,9,4,5,0,1,12,13,8,9,4,5,0,1,16,17,20,21,24,25,28,29,28,29,24,25,20,21,16,17]
; X64-NEXT: retq
@@ -825,13 +825,13 @@ declare <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32>, <8 x i32>) nounwind readno
define <32 x i8> @shuffle_combine_packsswb_pshufb(<16 x i16> %a0, <16 x i16> %a1) {
; X32-LABEL: shuffle_combine_packsswb_pshufb:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsraw $15, %ymm0, %ymm0
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[14,12,10,8,6,4,2,0,14,12,10,8,6,4,2,0,30,28,26,24,22,20,18,16,30,28,26,24,22,20,18,16]
; X32-NEXT: retl
;
; X64-LABEL: shuffle_combine_packsswb_pshufb:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsraw $15, %ymm0, %ymm0
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[14,12,10,8,6,4,2,0,14,12,10,8,6,4,2,0,30,28,26,24,22,20,18,16,30,28,26,24,22,20,18,16]
; X64-NEXT: retq
@@ -845,12 +845,12 @@ declare <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16>, <16 x i16>) nounwind readn
define <16 x i16> @shuffle_combine_packusdw_pshufb(<8 x i32> %a0, <8 x i32> %a1) {
; X32-LABEL: shuffle_combine_packusdw_pshufb:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[14,15,10,11,6,7,2,3,14,15,10,11,6,7,2,3,18,19,22,23,26,27,30,31,30,31,26,27,22,23,18,19]
; X32-NEXT: retl
;
; X64-LABEL: shuffle_combine_packusdw_pshufb:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[14,15,10,11,6,7,2,3,14,15,10,11,6,7,2,3,18,19,22,23,26,27,30,31,30,31,26,27,22,23,18,19]
; X64-NEXT: retq
%1 = lshr <8 x i32> %a0, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
@@ -862,12 +862,12 @@ declare <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32>, <8 x i32>) nounwind readno
define <32 x i8> @shuffle_combine_packuswb_pshufb(<16 x i16> %a0, <16 x i16> %a1) {
; X32-LABEL: shuffle_combine_packuswb_pshufb:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[15,13,11,9,7,5,3,1,15,13,11,9,7,5,3,1,31,29,27,25,23,21,19,17,31,29,27,25,23,21,19,17]
; X32-NEXT: retl
;
; X64-LABEL: shuffle_combine_packuswb_pshufb:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[15,13,11,9,7,5,3,1,15,13,11,9,7,5,3,1,31,29,27,25,23,21,19,17,31,29,27,25,23,21,19,17]
; X64-NEXT: retq
%1 = lshr <16 x i16> %a0, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
@@ -880,12 +880,12 @@ declare <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16>, <16 x i16>) nounwind readn
define <16 x i8> @combine_pshufb_insertion_as_broadcast_v2i64(i64 %a0) {
; X32-LABEL: combine_pshufb_insertion_as_broadcast_v2i64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpbroadcastq {{[0-9]+}}(%esp), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_insertion_as_broadcast_v2i64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovq %rdi, %xmm0
; X64-NEXT: vpbroadcastq %xmm0, %xmm0
; X64-NEXT: retq
@@ -897,12 +897,12 @@ define <16 x i8> @combine_pshufb_insertion_as_broadcast_v2i64(i64 %a0) {
define <8 x i32> @combine_permd_insertion_as_broadcast_v4i64(i64 %a0) {
; X32-LABEL: combine_permd_insertion_as_broadcast_v4i64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vbroadcastsd {{[0-9]+}}(%esp), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: combine_permd_insertion_as_broadcast_v4i64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovq %rdi, %xmm0
; X64-NEXT: vpbroadcastq %xmm0, %ymm0
; X64-NEXT: retq
@@ -914,12 +914,12 @@ define <8 x i32> @combine_permd_insertion_as_broadcast_v4i64(i64 %a0) {
define <8 x i32> @constant_fold_permd() {
; X32-LABEL: constant_fold_permd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps {{.*#+}} ymm0 = [5,7,3,2,8,2,6,1]
; X32-NEXT: retl
;
; X64-LABEL: constant_fold_permd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} ymm0 = [5,7,3,2,8,2,6,1]
; X64-NEXT: retq
%1 = call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>, <8 x i32> <i32 4, i32 6, i32 2, i32 1, i32 7, i32 1, i32 5, i32 0>)
@@ -928,12 +928,12 @@ define <8 x i32> @constant_fold_permd() {
define <8 x float> @constant_fold_permps() {
; X32-LABEL: constant_fold_permps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps {{.*#+}} ymm0 = [5.000000e+00,7.000000e+00,3.000000e+00,2.000000e+00,8.000000e+00,2.000000e+00,6.000000e+00,1.000000e+00]
; X32-NEXT: retl
;
; X64-LABEL: constant_fold_permps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} ymm0 = [5.000000e+00,7.000000e+00,3.000000e+00,2.000000e+00,8.000000e+00,2.000000e+00,6.000000e+00,1.000000e+00]
; X64-NEXT: retq
%1 = call <8 x float> @llvm.x86.avx2.permps(<8 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0>, <8 x i32> <i32 4, i32 6, i32 2, i32 1, i32 7, i32 1, i32 5, i32 0>)
@@ -942,12 +942,12 @@ define <8 x float> @constant_fold_permps() {
define <32 x i8> @constant_fold_pshufb_256() {
; X32-LABEL: constant_fold_pshufb_256:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps {{.*#+}} ymm0 = <14,0,0,0,u,u,0,0,0,0,0,0,0,0,8,9,255,0,0,0,u,u,0,0,241,0,0,0,0,0,249,250>
; X32-NEXT: retl
;
; X64-LABEL: constant_fold_pshufb_256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} ymm0 = <14,0,0,0,u,u,0,0,0,0,0,0,0,0,8,9,255,0,0,0,u,u,0,0,241,0,0,0,0,0,249,250>
; X64-NEXT: retq
%1 = tail call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> <i8 15, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15>, <32 x i8> <i8 1, i8 -1, i8 -1, i8 -1, i8 undef, i8 undef, i8 -1, i8 -1, i8 15, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 7, i8 6, i8 1, i8 -1, i8 -1, i8 -1, i8 undef, i8 undef, i8 -1, i8 -1, i8 15, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 7, i8 6>)
@@ -956,13 +956,13 @@ define <32 x i8> @constant_fold_pshufb_256() {
define <32 x i8> @PR27320(<8 x i32> %a0) {
; X32-LABEL: PR27320:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,2,1]
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,1,2,3,4,4,5,6,7,7,8,9,10,10,11,28,29,29,30,31,16,16,17,18,19,19,20,21,22,22,23]
; X32-NEXT: retl
;
; X64-LABEL: PR27320:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,2,1]
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,1,2,3,4,4,5,6,7,7,8,9,10,10,11,28,29,29,30,31,16,16,17,18,19,19,20,21,22,22,23]
; X64-NEXT: retq
@@ -974,7 +974,7 @@ define <32 x i8> @PR27320(<8 x i32> %a0) {
define internal fastcc <8 x float> @PR34577(<8 x float> %inp0, <8 x float> %inp1, <8 x float> %inp2) {
; X32-AVX2-LABEL: PR34577:
-; X32-AVX2: # BB#0: # %entry
+; X32-AVX2: # %bb.0: # %entry
; X32-AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; X32-AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; X32-AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
@@ -984,7 +984,7 @@ define internal fastcc <8 x float> @PR34577(<8 x float> %inp0, <8 x float> %inp1
; X32-AVX2-NEXT: retl
;
; X32-AVX512-LABEL: PR34577:
-; X32-AVX512: # BB#0: # %entry
+; X32-AVX512: # %bb.0: # %entry
; X32-AVX512-NEXT: vmovapd {{.*#+}} ymm2 = <1,u,u,u,2,u,5,0>
; X32-AVX512-NEXT: vpermps %ymm0, %ymm2, %ymm0
; X32-AVX512-NEXT: vxorpd %xmm2, %xmm2, %xmm2
@@ -995,7 +995,7 @@ define internal fastcc <8 x float> @PR34577(<8 x float> %inp0, <8 x float> %inp1
; X32-AVX512-NEXT: retl
;
; X64-AVX2-LABEL: PR34577:
-; X64-AVX2: # BB#0: # %entry
+; X64-AVX2: # %bb.0: # %entry
; X64-AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; X64-AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; X64-AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
@@ -1005,7 +1005,7 @@ define internal fastcc <8 x float> @PR34577(<8 x float> %inp0, <8 x float> %inp1
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: PR34577:
-; X64-AVX512: # BB#0: # %entry
+; X64-AVX512: # %bb.0: # %entry
; X64-AVX512-NEXT: vmovapd {{.*#+}} ymm2 = <1,u,u,u,2,u,5,0>
; X64-AVX512-NEXT: vpermps %ymm0, %ymm2, %ymm0
; X64-AVX512-NEXT: vxorpd %xmm2, %xmm2, %xmm2
diff --git a/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll b/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll
index 898f4117403..474fdabfb46 100644
--- a/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll
+++ b/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll
@@ -27,11 +27,11 @@ declare <32 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.512(<32 x i16>, <32 x i16
define <8 x double> @combine_permvar_8f64_identity(<8 x double> %x0, <8 x double> %x1) {
; X32-LABEL: combine_permvar_8f64_identity:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: combine_permvar_8f64_identity:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
%res0 = call <8 x double> @llvm.x86.avx512.mask.permvar.df.512(<8 x double> %x0, <8 x i64> <i64 7, i64 6, i64 5, i64 4, i64 3, i64 2, i64 1, i64 0>, <8 x double> %x1, i8 -1)
%res1 = call <8 x double> @llvm.x86.avx512.mask.permvar.df.512(<8 x double> %res0, <8 x i64> <i64 7, i64 14, i64 5, i64 12, i64 3, i64 10, i64 1, i64 8>, <8 x double> %res0, i8 -1)
@@ -39,7 +39,7 @@ define <8 x double> @combine_permvar_8f64_identity(<8 x double> %x0, <8 x double
}
define <8 x double> @combine_permvar_8f64_identity_mask(<8 x double> %x0, <8 x double> %x1, i8 %m) {
; X32-LABEL: combine_permvar_8f64_identity_mask:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: kmovd %eax, %k1
; X32-NEXT: vmovapd {{.*#+}} zmm2 = [7,0,6,0,5,0,4,0,3,0,2,0,1,0,0,0]
@@ -50,7 +50,7 @@ define <8 x double> @combine_permvar_8f64_identity_mask(<8 x double> %x0, <8 x d
; X32-NEXT: retl
;
; X64-LABEL: combine_permvar_8f64_identity_mask:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vmovapd {{.*#+}} zmm2 = [7,6,5,4,3,2,1,0]
; X64-NEXT: vpermpd %zmm0, %zmm2, %zmm1 {%k1}
@@ -65,11 +65,11 @@ define <8 x double> @combine_permvar_8f64_identity_mask(<8 x double> %x0, <8 x d
define <8 x i64> @combine_permvar_8i64_identity(<8 x i64> %x0, <8 x i64> %x1) {
; X32-LABEL: combine_permvar_8i64_identity:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: combine_permvar_8i64_identity:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
%res0 = call <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64> %x0, <8 x i64> <i64 7, i64 6, i64 5, i64 4, i64 3, i64 2, i64 1, i64 0>, <8 x i64> %x1, i8 -1)
%res1 = call <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64> %res0, <8 x i64> <i64 7, i64 14, i64 5, i64 12, i64 3, i64 10, i64 1, i64 8>, <8 x i64> %res0, i8 -1)
@@ -77,7 +77,7 @@ define <8 x i64> @combine_permvar_8i64_identity(<8 x i64> %x0, <8 x i64> %x1) {
}
define <8 x i64> @combine_permvar_8i64_identity_mask(<8 x i64> %x0, <8 x i64> %x1, i8 %m) {
; X32-LABEL: combine_permvar_8i64_identity_mask:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: kmovd %eax, %k1
; X32-NEXT: vmovdqa64 {{.*#+}} zmm2 = [7,0,6,0,5,0,4,0,3,0,2,0,1,0,0,0]
@@ -88,7 +88,7 @@ define <8 x i64> @combine_permvar_8i64_identity_mask(<8 x i64> %x0, <8 x i64> %x
; X32-NEXT: retl
;
; X64-LABEL: combine_permvar_8i64_identity_mask:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vmovdqa64 {{.*#+}} zmm2 = [7,6,5,4,3,2,1,0]
; X64-NEXT: vpermq %zmm0, %zmm2, %zmm1 {%k1}
@@ -103,11 +103,11 @@ define <8 x i64> @combine_permvar_8i64_identity_mask(<8 x i64> %x0, <8 x i64> %x
define <8 x double> @combine_vpermt2var_8f64_identity(<8 x double> %x0, <8 x double> %x1) {
; X32-LABEL: combine_vpermt2var_8f64_identity:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermt2var_8f64_identity:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
%res0 = call <8 x double> @llvm.x86.avx512.maskz.vpermt2var.pd.512(<8 x i64> <i64 7, i64 6, i64 5, i64 4, i64 3, i64 2, i64 1, i64 0>, <8 x double> %x0, <8 x double> %x1, i8 -1)
%res1 = call <8 x double> @llvm.x86.avx512.maskz.vpermt2var.pd.512(<8 x i64> <i64 7, i64 14, i64 5, i64 12, i64 3, i64 10, i64 1, i64 8>, <8 x double> %res0, <8 x double> %res0, i8 -1)
@@ -115,7 +115,7 @@ define <8 x double> @combine_vpermt2var_8f64_identity(<8 x double> %x0, <8 x dou
}
define <8 x double> @combine_vpermt2var_8f64_identity_mask(<8 x double> %x0, <8 x double> %x1, i8 %m) {
; X32-LABEL: combine_vpermt2var_8f64_identity_mask:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: kmovd %eax, %k1
; X32-NEXT: vmovapd {{.*#+}} zmm2 = [7,0,6,0,5,0,4,0,3,0,2,0,1,0,0,0]
@@ -125,7 +125,7 @@ define <8 x double> @combine_vpermt2var_8f64_identity_mask(<8 x double> %x0, <8
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermt2var_8f64_identity_mask:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vmovapd {{.*#+}} zmm2 = [7,6,5,4,3,2,1,0]
; X64-NEXT: vpermi2pd %zmm1, %zmm0, %zmm2 {%k1} {z}
@@ -139,12 +139,12 @@ define <8 x double> @combine_vpermt2var_8f64_identity_mask(<8 x double> %x0, <8
define <8 x double> @combine_vpermt2var_8f64_movddup(<8 x double> %x0, <8 x double> %x1) {
; X32-LABEL: combine_vpermt2var_8f64_movddup:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovddup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermt2var_8f64_movddup:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovddup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6]
; X64-NEXT: retq
%res0 = call <8 x double> @llvm.x86.avx512.maskz.vpermt2var.pd.512(<8 x i64> <i64 0, i64 0, i64 2, i64 2, i64 4, i64 4, i64 undef, i64 undef>, <8 x double> %x0, <8 x double> %x1, i8 -1)
@@ -152,13 +152,13 @@ define <8 x double> @combine_vpermt2var_8f64_movddup(<8 x double> %x0, <8 x doub
}
define <8 x double> @combine_vpermt2var_8f64_movddup_load(<8 x double> *%p0, <8 x double> %x1) {
; X32-LABEL: combine_vpermt2var_8f64_movddup_load:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovddup {{.*#+}} zmm0 = mem[0,0,2,2,4,4,6,6]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermt2var_8f64_movddup_load:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovddup {{.*#+}} zmm0 = mem[0,0,2,2,4,4,6,6]
; X64-NEXT: retq
%x0 = load <8 x double>, <8 x double> *%p0
@@ -167,14 +167,14 @@ define <8 x double> @combine_vpermt2var_8f64_movddup_load(<8 x double> *%p0, <8
}
define <8 x double> @combine_vpermt2var_8f64_movddup_mask(<8 x double> %x0, <8 x double> %x1, i8 %m) {
; X32-LABEL: combine_vpermt2var_8f64_movddup_mask:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: kmovd %eax, %k1
; X32-NEXT: vmovddup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermt2var_8f64_movddup_mask:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vmovddup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6]
; X64-NEXT: retq
@@ -184,11 +184,11 @@ define <8 x double> @combine_vpermt2var_8f64_movddup_mask(<8 x double> %x0, <8 x
define <8 x i64> @combine_vpermt2var_8i64_identity(<8 x i64> %x0, <8 x i64> %x1) {
; X32-LABEL: combine_vpermt2var_8i64_identity:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermt2var_8i64_identity:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
%res0 = call <8 x i64> @llvm.x86.avx512.maskz.vpermt2var.q.512(<8 x i64> <i64 undef, i64 6, i64 5, i64 4, i64 3, i64 2, i64 1, i64 0>, <8 x i64> %x0, <8 x i64> %x1, i8 -1)
%res1 = call <8 x i64> @llvm.x86.avx512.maskz.vpermt2var.q.512(<8 x i64> <i64 undef, i64 14, i64 5, i64 12, i64 3, i64 10, i64 1, i64 8>, <8 x i64> %res0, <8 x i64> %res0, i8 -1)
@@ -196,7 +196,7 @@ define <8 x i64> @combine_vpermt2var_8i64_identity(<8 x i64> %x0, <8 x i64> %x1)
}
define <8 x i64> @combine_vpermt2var_8i64_identity_mask(<8 x i64> %x0, <8 x i64> %x1, i8 %m) {
; X32-LABEL: combine_vpermt2var_8i64_identity_mask:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: kmovd %eax, %k1
; X32-NEXT: vmovdqa64 {{.*#+}} zmm2 = [7,0,6,0,5,0,4,0,3,0,2,0,1,0,0,0]
@@ -206,7 +206,7 @@ define <8 x i64> @combine_vpermt2var_8i64_identity_mask(<8 x i64> %x0, <8 x i64>
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermt2var_8i64_identity_mask:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vmovdqa64 {{.*#+}} zmm2 = [7,6,5,4,3,2,1,0]
; X64-NEXT: vpermi2q %zmm1, %zmm0, %zmm2 {%k1} {z}
@@ -220,11 +220,11 @@ define <8 x i64> @combine_vpermt2var_8i64_identity_mask(<8 x i64> %x0, <8 x i64>
define <16 x float> @combine_vpermt2var_16f32_identity(<16 x float> %x0, <16 x float> %x1) {
; X32-LABEL: combine_vpermt2var_16f32_identity:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermt2var_16f32_identity:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
%res0 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>, <16 x float> %x0, <16 x float> %x1, i16 -1)
%res1 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> <i32 15, i32 30, i32 13, i32 28, i32 11, i32 26, i32 9, i32 24, i32 7, i32 22, i32 5, i32 20, i32 3, i32 18, i32 1, i32 16>, <16 x float> %res0, <16 x float> %res0, i16 -1)
@@ -232,7 +232,7 @@ define <16 x float> @combine_vpermt2var_16f32_identity(<16 x float> %x0, <16 x f
}
define <16 x float> @combine_vpermt2var_16f32_identity_mask(<16 x float> %x0, <16 x float> %x1, i16 %m) {
; X32-LABEL: combine_vpermt2var_16f32_identity_mask:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vmovaps {{.*#+}} zmm2 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
; X32-NEXT: vpermi2ps %zmm1, %zmm0, %zmm2 {%k1} {z}
@@ -241,7 +241,7 @@ define <16 x float> @combine_vpermt2var_16f32_identity_mask(<16 x float> %x0, <1
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermt2var_16f32_identity_mask:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vmovaps {{.*#+}} zmm2 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
; X64-NEXT: vpermi2ps %zmm1, %zmm0, %zmm2 {%k1} {z}
@@ -255,13 +255,13 @@ define <16 x float> @combine_vpermt2var_16f32_identity_mask(<16 x float> %x0, <1
define <16 x float> @combine_vpermt2var_16f32_vmovddup(<16 x float> %x0, <16 x float> %x1) {
; X32-LABEL: combine_vpermt2var_16f32_vmovddup:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps {{.*#+}} zmm2 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
; X32-NEXT: vpermt2ps %zmm1, %zmm2, %zmm0
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermt2var_16f32_vmovddup:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} zmm2 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
; X64-NEXT: vpermt2ps %zmm1, %zmm2, %zmm0
; X64-NEXT: retq
@@ -270,7 +270,7 @@ define <16 x float> @combine_vpermt2var_16f32_vmovddup(<16 x float> %x0, <16 x f
}
define <16 x float> @combine_vpermt2var_16f32_vmovddup_load(<16 x float> *%p0, <16 x float> %x1) {
; X32-LABEL: combine_vpermt2var_16f32_vmovddup_load:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovaps (%eax), %zmm2
; X32-NEXT: vmovaps {{.*#+}} zmm1 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
@@ -279,7 +279,7 @@ define <16 x float> @combine_vpermt2var_16f32_vmovddup_load(<16 x float> *%p0, <
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermt2var_16f32_vmovddup_load:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps (%rdi), %zmm2
; X64-NEXT: vmovaps {{.*#+}} zmm1 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
; X64-NEXT: vpermi2ps %zmm0, %zmm2, %zmm1
@@ -291,14 +291,14 @@ define <16 x float> @combine_vpermt2var_16f32_vmovddup_load(<16 x float> *%p0, <
}
define <16 x float> @combine_vpermt2var_16f32_vmovddup_mask(<16 x float> %x0, <16 x float> %x1, i16 %m) {
; X32-LABEL: combine_vpermt2var_16f32_vmovddup_mask:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vmovaps {{.*#+}} zmm2 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
; X32-NEXT: vpermt2ps %zmm1, %zmm2, %zmm0 {%k1} {z}
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermt2var_16f32_vmovddup_mask:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vmovaps {{.*#+}} zmm2 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
; X64-NEXT: vpermt2ps %zmm1, %zmm2, %zmm0 {%k1} {z}
@@ -308,7 +308,7 @@ define <16 x float> @combine_vpermt2var_16f32_vmovddup_mask(<16 x float> %x0, <1
}
define <16 x float> @combine_vpermt2var_16f32_vmovddup_mask_load(<16 x float> *%p0, <16 x float> %x1, i16 %m) {
; X32-LABEL: combine_vpermt2var_16f32_vmovddup_mask_load:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovaps (%eax), %zmm2
@@ -318,7 +318,7 @@ define <16 x float> @combine_vpermt2var_16f32_vmovddup_mask_load(<16 x float> *%
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermt2var_16f32_vmovddup_mask_load:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovd %esi, %k1
; X64-NEXT: vmovaps (%rdi), %zmm2
; X64-NEXT: vmovaps {{.*#+}} zmm1 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
@@ -332,12 +332,12 @@ define <16 x float> @combine_vpermt2var_16f32_vmovddup_mask_load(<16 x float> *%
define <16 x float> @combine_vpermt2var_16f32_vmovshdup(<16 x float> %x0, <16 x float> %x1) {
; X32-LABEL: combine_vpermt2var_16f32_vmovshdup:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovshdup {{.*#+}} zmm0 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermt2var_16f32_vmovshdup:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovshdup {{.*#+}} zmm0 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
; X64-NEXT: retq
%res0 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7, i32 9, i32 9, i32 11, i32 11, i32 13, i32 13, i32 15, i32 15>, <16 x float> %x0, <16 x float> %x1, i16 -1)
@@ -345,13 +345,13 @@ define <16 x float> @combine_vpermt2var_16f32_vmovshdup(<16 x float> %x0, <16 x
}
define <16 x float> @combine_vpermt2var_16f32_vmovshdup_load(<16 x float> *%p0, <16 x float> %x1) {
; X32-LABEL: combine_vpermt2var_16f32_vmovshdup_load:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovshdup {{.*#+}} zmm0 = mem[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermt2var_16f32_vmovshdup_load:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovshdup {{.*#+}} zmm0 = mem[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
; X64-NEXT: retq
%x0 = load <16 x float>, <16 x float> *%p0
@@ -360,13 +360,13 @@ define <16 x float> @combine_vpermt2var_16f32_vmovshdup_load(<16 x float> *%p0,
}
define <16 x float> @combine_vpermt2var_16f32_vmovshdup_mask(<16 x float> %x0, <16 x float> %x1, i16 %m) {
; X32-LABEL: combine_vpermt2var_16f32_vmovshdup_mask:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vmovshdup {{.*#+}} zmm0 {%k1} {z} = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermt2var_16f32_vmovshdup_mask:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vmovshdup {{.*#+}} zmm0 {%k1} {z} = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
; X64-NEXT: retq
@@ -376,12 +376,12 @@ define <16 x float> @combine_vpermt2var_16f32_vmovshdup_mask(<16 x float> %x0, <
define <16 x float> @combine_vpermt2var_16f32_vmovsldup(<16 x float> %x0, <16 x float> %x1) {
; X32-LABEL: combine_vpermt2var_16f32_vmovsldup:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovsldup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermt2var_16f32_vmovsldup:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovsldup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
; X64-NEXT: retq
%res0 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6, i32 8, i32 8, i32 10, i32 10, i32 12, i32 12, i32 14, i32 14>, <16 x float> %x0, <16 x float> %x1, i16 -1)
@@ -389,13 +389,13 @@ define <16 x float> @combine_vpermt2var_16f32_vmovsldup(<16 x float> %x0, <16 x
}
define <16 x float> @combine_vpermt2var_16f32_vmovsldup_load(<16 x float> *%p0, <16 x float> %x1) {
; X32-LABEL: combine_vpermt2var_16f32_vmovsldup_load:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovsldup {{.*#+}} zmm0 = mem[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermt2var_16f32_vmovsldup_load:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovsldup {{.*#+}} zmm0 = mem[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
; X64-NEXT: retq
%x0 = load <16 x float>, <16 x float> *%p0
@@ -404,13 +404,13 @@ define <16 x float> @combine_vpermt2var_16f32_vmovsldup_load(<16 x float> *%p0,
}
define <16 x float> @combine_vpermt2var_16f32_vmovsldup_mask(<16 x float> %x0, <16 x float> %x1, i16 %m) {
; X32-LABEL: combine_vpermt2var_16f32_vmovsldup_mask:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermt2var_16f32_vmovsldup_mask:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
; X64-NEXT: retq
@@ -419,14 +419,14 @@ define <16 x float> @combine_vpermt2var_16f32_vmovsldup_mask(<16 x float> %x0, <
}
define <16 x float> @combine_vpermt2var_16f32_vmovsldup_mask_load(<16 x float> *%p0, <16 x float> %x1, i16 %m) {
; X32-LABEL: combine_vpermt2var_16f32_vmovsldup_mask_load:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} {z} = mem[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermt2var_16f32_vmovsldup_mask_load:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovd %esi, %k1
; X64-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} {z} = mem[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
; X64-NEXT: retq
@@ -437,12 +437,12 @@ define <16 x float> @combine_vpermt2var_16f32_vmovsldup_mask_load(<16 x float> *
define <16 x float> @combine_vpermt2var_16f32_vpermilps(<16 x float> %x0, <16 x float> %x1) {
; X32-LABEL: combine_vpermt2var_16f32_vpermilps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpermilps {{.*#+}} zmm0 = zmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermt2var_16f32_vpermilps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermilps {{.*#+}} zmm0 = zmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
; X64-NEXT: retq
%res0 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12>, <16 x float> %x0, <16 x float> %x1, i16 -1)
@@ -450,13 +450,13 @@ define <16 x float> @combine_vpermt2var_16f32_vpermilps(<16 x float> %x0, <16 x
}
define <16 x float> @combine_vpermt2var_16f32_vpermilps_load(<16 x float> *%p0, <16 x float> %x1) {
; X32-LABEL: combine_vpermt2var_16f32_vpermilps_load:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpermilps {{.*#+}} zmm0 = mem[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermt2var_16f32_vpermilps_load:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermilps {{.*#+}} zmm0 = mem[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
; X64-NEXT: retq
%x0 = load <16 x float>, <16 x float> *%p0
@@ -465,13 +465,13 @@ define <16 x float> @combine_vpermt2var_16f32_vpermilps_load(<16 x float> *%p0,
}
define <16 x float> @combine_vpermt2var_16f32_vpermilps_mask(<16 x float> %x0, <16 x float> %x1, i16 %m) {
; X32-LABEL: combine_vpermt2var_16f32_vpermilps_mask:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vpermilps {{.*#+}} zmm0 {%k1} {z} = zmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermt2var_16f32_vpermilps_mask:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vpermilps {{.*#+}} zmm0 {%k1} {z} = zmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
; X64-NEXT: retq
@@ -480,14 +480,14 @@ define <16 x float> @combine_vpermt2var_16f32_vpermilps_mask(<16 x float> %x0, <
}
define <16 x float> @combine_vpermt2var_16f32_vpermilps_mask_load(<16 x float> *%p0, <16 x float> %x1, i16 %m) {
; X32-LABEL: combine_vpermt2var_16f32_vpermilps_mask_load:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vpermilps {{.*#+}} zmm0 {%k1} {z} = mem[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermt2var_16f32_vpermilps_mask_load:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovd %esi, %k1
; X64-NEXT: vpermilps {{.*#+}} zmm0 {%k1} {z} = mem[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
; X64-NEXT: retq
@@ -498,11 +498,11 @@ define <16 x float> @combine_vpermt2var_16f32_vpermilps_mask_load(<16 x float> *
define <16 x i32> @combine_vpermt2var_16i32_identity(<16 x i32> %x0, <16 x i32> %x1) {
; X32-LABEL: combine_vpermt2var_16i32_identity:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermt2var_16i32_identity:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
%res0 = call <16 x i32> @llvm.x86.avx512.maskz.vpermt2var.d.512(<16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 undef>, <16 x i32> %x0, <16 x i32> %x1, i16 -1)
%res1 = call <16 x i32> @llvm.x86.avx512.maskz.vpermt2var.d.512(<16 x i32> <i32 15, i32 30, i32 13, i32 28, i32 undef, i32 26, i32 9, i32 24, i32 7, i32 22, i32 5, i32 20, i32 3, i32 18, i32 1, i32 16>, <16 x i32> %res0, <16 x i32> %res0, i16 -1)
@@ -510,7 +510,7 @@ define <16 x i32> @combine_vpermt2var_16i32_identity(<16 x i32> %x0, <16 x i32>
}
define <16 x i32> @combine_vpermt2var_16i32_identity_mask(<16 x i32> %x0, <16 x i32> %x1, i16 %m) {
; X32-LABEL: combine_vpermt2var_16i32_identity_mask:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vmovdqa32 {{.*#+}} zmm2 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
; X32-NEXT: vpermi2d %zmm1, %zmm0, %zmm2 {%k1} {z}
@@ -519,7 +519,7 @@ define <16 x i32> @combine_vpermt2var_16i32_identity_mask(<16 x i32> %x0, <16 x
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermt2var_16i32_identity_mask:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vmovdqa32 {{.*#+}} zmm2 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
; X64-NEXT: vpermi2d %zmm1, %zmm0, %zmm2 {%k1} {z}
@@ -533,11 +533,11 @@ define <16 x i32> @combine_vpermt2var_16i32_identity_mask(<16 x i32> %x0, <16 x
define <32 x i16> @combine_vpermt2var_32i16_identity(<32 x i16> %x0, <32 x i16> %x1) {
; X32-LABEL: combine_vpermt2var_32i16_identity:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermt2var_32i16_identity:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
%res0 = call <32 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.512(<32 x i16> <i16 31, i16 30, i16 29, i16 28, i16 27, i16 26, i16 25, i16 24, i16 23, i16 22, i16 21, i16 20, i16 19, i16 18, i16 17, i16 16, i16 15, i16 14, i16 13, i16 12, i16 11, i16 10, i16 9, i16 8, i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 0>, <32 x i16> %x0, <32 x i16> %x1, i32 -1)
%res1 = call <32 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.512(<32 x i16> <i16 63, i16 30, i16 61, i16 28, i16 59, i16 26, i16 57, i16 24, i16 55, i16 22, i16 53, i16 20, i16 51, i16 18, i16 49, i16 16, i16 47, i16 46, i16 13, i16 44, i16 11, i16 42, i16 9, i16 40, i16 7, i16 38, i16 5, i16 36, i16 3, i16 34, i16 1, i16 32>, <32 x i16> %res0, <32 x i16> %res0, i32 -1)
@@ -545,7 +545,7 @@ define <32 x i16> @combine_vpermt2var_32i16_identity(<32 x i16> %x0, <32 x i16>
}
define <32 x i16> @combine_vpermt2var_32i16_identity_mask(<32 x i16> %x0, <32 x i16> %x1, i32 %m) {
; X32-LABEL: combine_vpermt2var_32i16_identity_mask:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; X32-NEXT: vmovdqa64 {{.*#+}} zmm2 = [31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
; X32-NEXT: vpermi2w %zmm1, %zmm0, %zmm2 {%k1} {z}
@@ -554,7 +554,7 @@ define <32 x i16> @combine_vpermt2var_32i16_identity_mask(<32 x i16> %x0, <32 x
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermt2var_32i16_identity_mask:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vmovdqa64 {{.*#+}} zmm2 = [31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
; X64-NEXT: vpermi2w %zmm1, %zmm0, %zmm2 {%k1} {z}
@@ -568,11 +568,11 @@ define <32 x i16> @combine_vpermt2var_32i16_identity_mask(<32 x i16> %x0, <32 x
define <64 x i8> @combine_pshufb_identity(<64 x i8> %x0) {
; X32-LABEL: combine_pshufb_identity:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_identity:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
%select = bitcast <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1> to <64 x i8>
%mask = bitcast <16 x i32> <i32 202182159, i32 134810123, i32 67438087, i32 66051, i32 202182159, i32 undef, i32 67438087, i32 66051, i32 202182159, i32 134810123, i32 67438087, i32 66051, i32 202182159, i32 134810123, i32 67438087, i32 66051> to <64 x i8>
@@ -582,7 +582,7 @@ define <64 x i8> @combine_pshufb_identity(<64 x i8> %x0) {
}
define <64 x i8> @combine_pshufb_identity_mask(<64 x i8> %x0, i64 %m) {
; X32-LABEL: combine_pshufb_identity_mask:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
; X32-NEXT: vmovdqa64 {{.*#+}} zmm2 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
; X32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
@@ -593,7 +593,7 @@ define <64 x i8> @combine_pshufb_identity_mask(<64 x i8> %x0, i64 %m) {
; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_identity_mask:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
; X64-NEXT: vmovdqa64 {{.*#+}} zmm2 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
; X64-NEXT: kmovq %rdi, %k1
@@ -611,12 +611,12 @@ define <64 x i8> @combine_pshufb_identity_mask(<64 x i8> %x0, i64 %m) {
define <32 x i16> @combine_permvar_as_vpbroadcastw512(<32 x i16> %x0) {
; X32-LABEL: combine_permvar_as_vpbroadcastw512:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpbroadcastw %xmm0, %zmm0
; X32-NEXT: retl
;
; X64-LABEL: combine_permvar_as_vpbroadcastw512:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpbroadcastw %xmm0, %zmm0
; X64-NEXT: retq
%1 = call <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16> %x0, <32 x i16> zeroinitializer, <32 x i16> undef, i32 -1)
@@ -625,12 +625,12 @@ define <32 x i16> @combine_permvar_as_vpbroadcastw512(<32 x i16> %x0) {
define <16 x i32> @combine_permvar_as_vpbroadcastd512(<16 x i32> %x0) {
; X32-LABEL: combine_permvar_as_vpbroadcastd512:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vbroadcastss %xmm0, %zmm0
; X32-NEXT: retl
;
; X64-LABEL: combine_permvar_as_vpbroadcastd512:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcastss %xmm0, %zmm0
; X64-NEXT: retq
%1 = call <16 x i32> @llvm.x86.avx512.mask.permvar.si.512(<16 x i32> %x0, <16 x i32> zeroinitializer, <16 x i32> undef, i16 -1)
@@ -639,12 +639,12 @@ define <16 x i32> @combine_permvar_as_vpbroadcastd512(<16 x i32> %x0) {
define <8 x i64> @combine_permvar_as_vpbroadcastq512(<8 x i64> %x0) {
; X32-LABEL: combine_permvar_as_vpbroadcastq512:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vbroadcastsd %xmm0, %zmm0
; X32-NEXT: retl
;
; X64-LABEL: combine_permvar_as_vpbroadcastq512:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcastsd %xmm0, %zmm0
; X64-NEXT: retq
%1 = call <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64> %x0, <8 x i64> zeroinitializer, <8 x i64> undef, i8 -1)
@@ -653,12 +653,12 @@ define <8 x i64> @combine_permvar_as_vpbroadcastq512(<8 x i64> %x0) {
define <8 x i64> @combine_permvar_8i64_as_permq(<8 x i64> %x0, <8 x i64> %x1) {
; X32-LABEL: combine_permvar_8i64_as_permq:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[3,2,1,0,7,6,5,4]
; X32-NEXT: retl
;
; X64-LABEL: combine_permvar_8i64_as_permq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[3,2,1,0,7,6,5,4]
; X64-NEXT: retq
%1 = call <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64> %x0, <8 x i64> <i64 3, i64 2, i64 1, i64 undef, i64 undef, i64 6, i64 5, i64 4>, <8 x i64> %x1, i8 -1)
@@ -666,7 +666,7 @@ define <8 x i64> @combine_permvar_8i64_as_permq(<8 x i64> %x0, <8 x i64> %x1) {
}
define <8 x i64> @combine_permvar_8i64_as_permq_mask(<8 x i64> %x0, <8 x i64> %x1, i8 %m) {
; X32-LABEL: combine_permvar_8i64_as_permq_mask:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: kmovd %eax, %k1
; X32-NEXT: vpermq {{.*#+}} zmm1 {%k1} = zmm0[3,2,1,0,7,6,5,4]
@@ -674,7 +674,7 @@ define <8 x i64> @combine_permvar_8i64_as_permq_mask(<8 x i64> %x0, <8 x i64> %x
; X32-NEXT: retl
;
; X64-LABEL: combine_permvar_8i64_as_permq_mask:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vpermq {{.*#+}} zmm1 {%k1} = zmm0[3,2,1,0,7,6,5,4]
; X64-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -685,12 +685,12 @@ define <8 x i64> @combine_permvar_8i64_as_permq_mask(<8 x i64> %x0, <8 x i64> %x
define <8 x double> @combine_permvar_8f64_as_permpd(<8 x double> %x0, <8 x double> %x1) {
; X32-LABEL: combine_permvar_8f64_as_permpd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[3,2,1,0,7,6,5,4]
; X32-NEXT: retl
;
; X64-LABEL: combine_permvar_8f64_as_permpd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[3,2,1,0,7,6,5,4]
; X64-NEXT: retq
%1 = call <8 x double> @llvm.x86.avx512.mask.permvar.df.512(<8 x double> %x0, <8 x i64> <i64 3, i64 2, i64 1, i64 undef, i64 undef, i64 6, i64 5, i64 4>, <8 x double> %x1, i8 -1)
@@ -698,7 +698,7 @@ define <8 x double> @combine_permvar_8f64_as_permpd(<8 x double> %x0, <8 x doubl
}
define <8 x double> @combine_permvar_8f64_as_permpd_mask(<8 x double> %x0, <8 x double> %x1, i8 %m) {
; X32-LABEL: combine_permvar_8f64_as_permpd_mask:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: kmovd %eax, %k1
; X32-NEXT: vpermpd {{.*#+}} zmm1 {%k1} = zmm0[3,2,1,0,7,6,5,4]
@@ -706,7 +706,7 @@ define <8 x double> @combine_permvar_8f64_as_permpd_mask(<8 x double> %x0, <8 x
; X32-NEXT: retl
;
; X64-LABEL: combine_permvar_8f64_as_permpd_mask:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vpermpd {{.*#+}} zmm1 {%k1} = zmm0[3,2,1,0,7,6,5,4]
; X64-NEXT: vmovapd %zmm1, %zmm0
@@ -717,12 +717,12 @@ define <8 x double> @combine_permvar_8f64_as_permpd_mask(<8 x double> %x0, <8 x
define <16 x float> @combine_vpermilvar_16f32_230146759A8BCFDE(<16 x float> %x0) {
; X32-LABEL: combine_vpermilvar_16f32_230146759A8BCFDE:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpermilps {{.*#+}} zmm0 = zmm0[2,3,0,1,4,6,7,5,9,10,8,11,12,15,13,14]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermilvar_16f32_230146759A8BCFDE:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermilps {{.*#+}} zmm0 = zmm0[2,3,0,1,4,6,7,5,9,10,8,11,12,15,13,14]
; X64-NEXT: retq
%res0 = call <16 x float> @llvm.x86.avx512.mask.vpermilvar.ps.512(<16 x float> %x0, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 3, i32 2, i32 1, i32 0, i32 2, i32 3, i32 0, i32 1, i32 1, i32 0, i32 3, i32 2>, <16 x float> undef, i16 -1)
@@ -732,12 +732,12 @@ define <16 x float> @combine_vpermilvar_16f32_230146759A8BCFDE(<16 x float> %x0)
define <64 x i8> @combine_pshufb_as_pslldq(<64 x i8> %a0) {
; X32-LABEL: combine_pshufb_as_pslldq:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpshufb {{.*#+}} zmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[0,1,2,3,4,5],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[16,17,18,19,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[32,33,34,35,36,37],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[48,49,50,51,52,53]
; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_pslldq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpshufb {{.*#+}} zmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[0,1,2,3,4,5],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[16,17,18,19,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[32,33,34,35,36,37],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[48,49,50,51,52,53]
; X64-NEXT: retq
%res0 = call <64 x i8> @llvm.x86.avx512.mask.pshuf.b.512(<64 x i8> %a0, <64 x i8> <i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5>, <64 x i8> undef, i64 -1)
@@ -745,13 +745,13 @@ define <64 x i8> @combine_pshufb_as_pslldq(<64 x i8> %a0) {
}
define <64 x i8> @combine_pshufb_as_pslldq_mask(<64 x i8> %a0, i64 %m) {
; X32-LABEL: combine_pshufb_as_pslldq_mask:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
; X32-NEXT: vpshufb {{.*#+}} zmm0 {%k1} {z} = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[0,1,2,3,4,5],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[16,17,18,19,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[32,33,34,35,36,37],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[48,49,50,51,52,53]
; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_pslldq_mask:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovq %rdi, %k1
; X64-NEXT: vpshufb {{.*#+}} zmm0 {%k1} {z} = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[0,1,2,3,4,5],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[16,17,18,19,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[32,33,34,35,36,37],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[48,49,50,51,52,53]
; X64-NEXT: retq
@@ -761,12 +761,12 @@ define <64 x i8> @combine_pshufb_as_pslldq_mask(<64 x i8> %a0, i64 %m) {
define <64 x i8> @combine_pshufb_as_psrldq(<64 x i8> %a0) {
; X32-LABEL: combine_pshufb_as_psrldq:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[47],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[63],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_psrldq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[47],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[63],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; X64-NEXT: retq
%res0 = call <64 x i8> @llvm.x86.avx512.mask.pshuf.b.512(<64 x i8> %a0, <64 x i8> <i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128>, <64 x i8> undef, i64 -1)
@@ -774,13 +774,13 @@ define <64 x i8> @combine_pshufb_as_psrldq(<64 x i8> %a0) {
}
define <64 x i8> @combine_pshufb_as_psrldq_mask(<64 x i8> %a0, i64 %m) {
; X32-LABEL: combine_pshufb_as_psrldq_mask:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
; X32-NEXT: vpshufb {{.*#+}} zmm0 {%k1} {z} = zmm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[47],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[63],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_psrldq_mask:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovq %rdi, %k1
; X64-NEXT: vpshufb {{.*#+}} zmm0 {%k1} {z} = zmm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[47],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[63],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; X64-NEXT: retq
@@ -790,12 +790,12 @@ define <64 x i8> @combine_pshufb_as_psrldq_mask(<64 x i8> %a0, i64 %m) {
define <32 x i16> @combine_permvar_as_pshuflw(<32 x i16> %a0) {
; X32-LABEL: combine_permvar_as_pshuflw:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpshuflw {{.*#+}} zmm0 = zmm0[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15,17,16,19,18,20,21,22,23,25,24,27,26,28,29,30,31]
; X32-NEXT: retl
;
; X64-LABEL: combine_permvar_as_pshuflw:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpshuflw {{.*#+}} zmm0 = zmm0[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15,17,16,19,18,20,21,22,23,25,24,27,26,28,29,30,31]
; X64-NEXT: retq
%res0 = call <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16> %a0, <32 x i16> <i16 1, i16 0, i16 3, i16 2, i16 4, i16 5, i16 6, i16 7, i16 9, i16 8, i16 11, i16 10, i16 12, i16 13, i16 14, i16 15, i16 17, i16 16, i16 19, i16 18, i16 20, i16 21, i16 22, i16 23, i16 25, i16 24, i16 27, i16 26, i16 28, i16 29, i16 30, i16 31>, <32 x i16> undef, i32 -1)
@@ -804,12 +804,12 @@ define <32 x i16> @combine_permvar_as_pshuflw(<32 x i16> %a0) {
define <32 x i16> @combine_pshufb_as_pshufhw(<32 x i16> %a0) {
; X32-LABEL: combine_pshufb_as_pshufhw:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpshufhw {{.*#+}} zmm0 = zmm0[0,1,2,3,5,4,7,6,8,9,10,11,13,12,15,14,16,17,18,19,21,20,23,22,24,25,26,27,29,28,31,30]
; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_pshufhw:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpshufhw {{.*#+}} zmm0 = zmm0[0,1,2,3,5,4,7,6,8,9,10,11,13,12,15,14,16,17,18,19,21,20,23,22,24,25,26,27,29,28,31,30]
; X64-NEXT: retq
%res0 = call <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16> %a0, <32 x i16> <i16 0, i16 1, i16 2, i16 3, i16 5, i16 4, i16 7, i16 6, i16 8, i16 9, i16 10, i16 11, i16 13, i16 12, i16 15, i16 14, i16 16, i16 17, i16 18, i16 19, i16 21, i16 20, i16 23, i16 22, i16 24, i16 25, i16 26, i16 27, i16 29, i16 28, i16 31, i16 30>, <32 x i16> undef, i32 -1)
@@ -818,12 +818,12 @@ define <32 x i16> @combine_pshufb_as_pshufhw(<32 x i16> %a0) {
define <32 x i16> @combine_vpermi2var_32i16_as_pshufb(<32 x i16> %a0) {
; X32-LABEL: combine_vpermi2var_32i16_as_pshufb:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13,18,19,16,17,22,23,20,21,26,27,24,25,30,31,28,29,34,35,32,33,38,39,36,37,42,43,40,41,46,47,44,45,50,51,48,49,54,55,52,53,58,59,56,57,62,63,60,61]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermi2var_32i16_as_pshufb:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13,18,19,16,17,22,23,20,21,26,27,24,25,30,31,28,29,34,35,32,33,38,39,36,37,42,43,40,41,46,47,44,45,50,51,48,49,54,55,52,53,58,59,56,57,62,63,60,61]
; X64-NEXT: retq
%res0 = call <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16> %a0, <32 x i16> <i16 1, i16 0, i16 3, i16 2, i16 4, i16 5, i16 6, i16 7, i16 9, i16 8, i16 11, i16 10, i16 12, i16 13, i16 14, i16 15, i16 17, i16 16, i16 19, i16 18, i16 20, i16 21, i16 22, i16 23, i16 25, i16 24, i16 27, i16 26, i16 28, i16 29, i16 30, i16 31>, <32 x i16> undef, i32 -1)
@@ -833,11 +833,11 @@ define <32 x i16> @combine_vpermi2var_32i16_as_pshufb(<32 x i16> %a0) {
define <8 x double> @combine_vpermi2var_8f64_identity(<8 x double> %x0, <8 x double> %x1) {
; X32-LABEL: combine_vpermi2var_8f64_identity:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermi2var_8f64_identity:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
%res0 = call <8 x double> @llvm.x86.avx512.mask.vpermi2var.pd.512(<8 x double> %x0, <8 x i64> <i64 7, i64 6, i64 5, i64 4, i64 3, i64 2, i64 1, i64 0>, <8 x double> %x1, i8 -1)
%res1 = call <8 x double> @llvm.x86.avx512.mask.vpermi2var.pd.512(<8 x double> %res0, <8 x i64> <i64 7, i64 14, i64 5, i64 12, i64 3, i64 10, i64 1, i64 8>, <8 x double> %res0, i8 -1)
@@ -846,12 +846,12 @@ define <8 x double> @combine_vpermi2var_8f64_identity(<8 x double> %x0, <8 x dou
define <8 x double> @combine_vpermi2var_8f64_as_shufpd(<8 x double> %x0, <8 x double> %x1) {
; X32-LABEL: combine_vpermi2var_8f64_as_shufpd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vshufpd {{.*#+}} zmm0 = zmm0[1],zmm1[0],zmm0[2],zmm1[2],zmm0[5],zmm1[5],zmm0[6],zmm1[7]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermi2var_8f64_as_shufpd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vshufpd {{.*#+}} zmm0 = zmm0[1],zmm1[0],zmm0[2],zmm1[2],zmm0[5],zmm1[5],zmm0[6],zmm1[7]
; X64-NEXT: retq
%1 = call <8 x double> @llvm.x86.avx512.mask.vpermi2var.pd.512(<8 x double> %x0, <8 x i64> <i64 1, i64 8, i64 2, i64 10, i64 5, i64 13, i64 6, i64 15>, <8 x double> %x1, i8 -1)
@@ -860,11 +860,11 @@ define <8 x double> @combine_vpermi2var_8f64_as_shufpd(<8 x double> %x0, <8 x do
define <8 x i64> @combine_vpermi2var_8i64_identity(<8 x i64> %x0, <8 x i64> %x1) {
; X32-LABEL: combine_vpermi2var_8i64_identity:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermi2var_8i64_identity:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
%res0 = call <8 x i64> @llvm.x86.avx512.mask.vpermi2var.q.512(<8 x i64> %x0, <8 x i64> <i64 undef, i64 6, i64 5, i64 4, i64 3, i64 2, i64 1, i64 0>, <8 x i64> %x1, i8 -1)
%res1 = call <8 x i64> @llvm.x86.avx512.mask.vpermi2var.q.512(<8 x i64> %res0, <8 x i64> <i64 undef, i64 14, i64 5, i64 12, i64 3, i64 10, i64 1, i64 8>, <8 x i64> %res0, i8 -1)
@@ -873,11 +873,11 @@ define <8 x i64> @combine_vpermi2var_8i64_identity(<8 x i64> %x0, <8 x i64> %x1)
define <16 x float> @combine_vpermi2var_16f32_identity(<16 x float> %x0, <16 x float> %x1) {
; X32-LABEL: combine_vpermi2var_16f32_identity:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermi2var_16f32_identity:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
%res0 = call <16 x float> @llvm.x86.avx512.mask.vpermi2var.ps.512(<16 x float> %x0, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>, <16 x float> %x1, i16 -1)
%res1 = call <16 x float> @llvm.x86.avx512.mask.vpermi2var.ps.512(<16 x float> %res0, <16 x i32> <i32 15, i32 30, i32 13, i32 28, i32 11, i32 26, i32 9, i32 24, i32 7, i32 22, i32 5, i32 20, i32 3, i32 18, i32 1, i32 16>, <16 x float> %res0, i16 -1)
@@ -886,11 +886,11 @@ define <16 x float> @combine_vpermi2var_16f32_identity(<16 x float> %x0, <16 x f
define <16 x i32> @combine_vpermi2var_16i32_identity(<16 x i32> %x0, <16 x i32> %x1) {
; X32-LABEL: combine_vpermi2var_16i32_identity:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermi2var_16i32_identity:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
%res0 = call <16 x i32> @llvm.x86.avx512.mask.vpermi2var.d.512(<16 x i32> %x0, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 undef>, <16 x i32> %x1, i16 -1)
%res1 = call <16 x i32> @llvm.x86.avx512.mask.vpermi2var.d.512(<16 x i32> %res0, <16 x i32> <i32 15, i32 30, i32 13, i32 28, i32 undef, i32 26, i32 9, i32 24, i32 7, i32 22, i32 5, i32 20, i32 3, i32 18, i32 1, i32 16>, <16 x i32> %res0, i16 -1)
@@ -899,12 +899,12 @@ define <16 x i32> @combine_vpermi2var_16i32_identity(<16 x i32> %x0, <16 x i32>
define <16 x float> @combine_vpermt2var_vpermi2var_16f32_as_unpckhps(<16 x float> %a0, <16 x float> %a1) {
; X32-LABEL: combine_vpermt2var_vpermi2var_16f32_as_unpckhps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vunpckhps {{.*#+}} zmm0 = zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[14],zmm0[14],zmm1[15],zmm0[15]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermt2var_vpermi2var_16f32_as_unpckhps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vunpckhps {{.*#+}} zmm0 = zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[14],zmm0[14],zmm1[15],zmm0[15]
; X64-NEXT: retq
%res0 = call <16 x float> @llvm.x86.avx512.mask.vpermi2var.ps.512(<16 x float> %a0, <16 x i32> <i32 18, i32 2, i32 19, i32 3, i32 22, i32 6, i32 23, i32 7, i32 26, i32 10, i32 27, i32 11, i32 30, i32 14, i32 31, i32 15>, <16 x float> %a1, i16 -1)
@@ -913,12 +913,12 @@ define <16 x float> @combine_vpermt2var_vpermi2var_16f32_as_unpckhps(<16 x float
define <16 x i32> @vpermt2var_vpermi2var_16i32_as_unpckldq(<16 x i32> %a0, <16 x i32> %a1) {
; X32-LABEL: vpermt2var_vpermi2var_16i32_as_unpckldq:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vunpcklps {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
; X32-NEXT: retl
;
; X64-LABEL: vpermt2var_vpermi2var_16i32_as_unpckldq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vunpcklps {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
; X64-NEXT: retq
%res0 = call <16 x i32> @llvm.x86.avx512.mask.vpermi2var.d.512(<16 x i32> %a0, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 4, i32 20, i32 5, i32 21, i32 8, i32 24, i32 9, i32 25, i32 12, i32 28, i32 13, i32 29>, <16 x i32> %a1, i16 -1)
@@ -927,11 +927,11 @@ define <16 x i32> @vpermt2var_vpermi2var_16i32_as_unpckldq(<16 x i32> %a0, <16 x
define <32 x i16> @combine_vpermi2var_32i16_identity(<32 x i16> %x0, <32 x i16> %x1) {
; X32-LABEL: combine_vpermi2var_32i16_identity:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermi2var_32i16_identity:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
%res0 = call <32 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.512(<32 x i16> %x0, <32 x i16> <i16 31, i16 30, i16 29, i16 28, i16 27, i16 26, i16 25, i16 24, i16 23, i16 22, i16 21, i16 20, i16 19, i16 18, i16 17, i16 16, i16 15, i16 14, i16 13, i16 12, i16 11, i16 10, i16 9, i16 8, i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 0>, <32 x i16> %x1, i32 -1)
%res1 = call <32 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.512(<32 x i16> %res0, <32 x i16> <i16 63, i16 30, i16 61, i16 28, i16 59, i16 26, i16 57, i16 24, i16 55, i16 22, i16 53, i16 20, i16 51, i16 18, i16 49, i16 16, i16 47, i16 46, i16 13, i16 44, i16 11, i16 42, i16 9, i16 40, i16 7, i16 38, i16 5, i16 36, i16 3, i16 34, i16 1, i16 32>, <32 x i16> %res0, i32 -1)
@@ -940,13 +940,13 @@ define <32 x i16> @combine_vpermi2var_32i16_identity(<32 x i16> %x0, <32 x i16>
define <8 x double> @combine_vpermi2var_8f64_as_vpermpd(<8 x double> %x0, <8 x double> %x1) {
; X32-LABEL: combine_vpermi2var_8f64_as_vpermpd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps {{.*#+}} zmm1 = [7,0,6,0,5,0,4,0,3,0,2,0,1,0,0,0]
; X32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermi2var_8f64_as_vpermpd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} zmm1 = [7,6,5,4,3,2,1,0]
; X64-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; X64-NEXT: retq
@@ -957,13 +957,13 @@ define <8 x double> @combine_vpermi2var_8f64_as_vpermpd(<8 x double> %x0, <8 x d
define <8 x i64> @combine_vpermt2var_8i64_as_vpermq(<8 x i64> %x0, <8 x i64> %x1) {
; X32-LABEL: combine_vpermt2var_8i64_as_vpermq:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps {{.*#+}} zmm1 = [7,0,6,0,5,0,4,0,3,0,2,0,1,0,0,0]
; X32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermt2var_8i64_as_vpermq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} zmm1 = [7,6,5,4,3,2,1,0]
; X64-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; X64-NEXT: retq
@@ -974,13 +974,13 @@ define <8 x i64> @combine_vpermt2var_8i64_as_vpermq(<8 x i64> %x0, <8 x i64> %x1
define <16 x float> @combine_vpermi2var_16f32_as_vpermps(<16 x float> %x0, <16 x float> %x1) {
; X32-LABEL: combine_vpermi2var_16f32_as_vpermps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps {{.*#+}} zmm1 = [7,7,5,5,3,3,1,1,15,15,13,13,11,11,9,9]
; X32-NEXT: vpermps %zmm0, %zmm1, %zmm0
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermi2var_16f32_as_vpermps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} zmm1 = [7,7,5,5,3,3,1,1,15,15,13,13,11,11,9,9]
; X64-NEXT: vpermps %zmm0, %zmm1, %zmm0
; X64-NEXT: retq
@@ -991,13 +991,13 @@ define <16 x float> @combine_vpermi2var_16f32_as_vpermps(<16 x float> %x0, <16 x
define <16 x i32> @combine_vpermt2var_16i32_as_vpermd(<16 x i32> %x0, <16 x i32> %x1) {
; X32-LABEL: combine_vpermt2var_16i32_as_vpermd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps {{.*#+}} zmm1 = [7,7,5,5,3,3,1,1,15,15,13,13,11,11,9,9]
; X32-NEXT: vpermps %zmm0, %zmm1, %zmm0
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermt2var_16i32_as_vpermd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} zmm1 = [7,7,5,5,3,3,1,1,15,15,13,13,11,11,9,9]
; X64-NEXT: vpermps %zmm0, %zmm1, %zmm0
; X64-NEXT: retq
@@ -1008,13 +1008,13 @@ define <16 x i32> @combine_vpermt2var_16i32_as_vpermd(<16 x i32> %x0, <16 x i32>
define <32 x i16> @combine_vpermi2var_32i16_as_permw(<32 x i16> %x0, <32 x i16> %x1) {
; X32-LABEL: combine_vpermi2var_32i16_as_permw:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,16,14,17,13,18,12,19,11,20,10,21,9,22,8,23,7,24,6,25,5,26,4,27,3,28,2,29,1,30,0,31]
; X32-NEXT: vpermw %zmm0, %zmm1, %zmm0
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermi2var_32i16_as_permw:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,16,14,17,13,18,12,19,11,20,10,21,9,22,8,23,7,24,6,25,5,26,4,27,3,28,2,29,1,30,0,31]
; X64-NEXT: vpermw %zmm0, %zmm1, %zmm0
; X64-NEXT: retq
@@ -1025,14 +1025,14 @@ define <32 x i16> @combine_vpermi2var_32i16_as_permw(<32 x i16> %x0, <32 x i16>
define <8 x double> @combine_vpermi2var_vpermt2var_8f64_as_vperm2(<8 x double> %x0, <8 x double> %x1) {
; X32-LABEL: combine_vpermi2var_vpermt2var_8f64_as_vperm2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovapd {{.*#+}} zmm2 = [4,0,14,0,3,0,12,0,7,0,8,0,0,0,15,0]
; X32-NEXT: vpermi2pd %zmm0, %zmm1, %zmm2
; X32-NEXT: vmovapd %zmm2, %zmm0
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermi2var_vpermt2var_8f64_as_vperm2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovapd {{.*#+}} zmm2 = [4,14,3,12,7,8,0,15]
; X64-NEXT: vpermi2pd %zmm0, %zmm1, %zmm2
; X64-NEXT: vmovapd %zmm2, %zmm0
@@ -1044,13 +1044,13 @@ define <8 x double> @combine_vpermi2var_vpermt2var_8f64_as_vperm2(<8 x double> %
define <16 x i32> @combine_vpermi2var_vpermt2var_16i32_as_vpermd(<16 x i32> %x0, <16 x i32> %x1) {
; X32-LABEL: combine_vpermi2var_vpermt2var_16i32_as_vpermd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,31,2,2,4,29,6,27,8,25,10,23,12,21,14,19]
; X32-NEXT: vpermt2d %zmm1, %zmm2, %zmm0
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermi2var_vpermt2var_16i32_as_vpermd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,31,2,2,4,29,6,27,8,25,10,23,12,21,14,19]
; X64-NEXT: vpermt2d %zmm1, %zmm2, %zmm0
; X64-NEXT: retq
@@ -1061,14 +1061,14 @@ define <16 x i32> @combine_vpermi2var_vpermt2var_16i32_as_vpermd(<16 x i32> %x0,
define <32 x i16> @combine_vpermt2var_vpermi2var_32i16_as_permw(<32 x i16> %x0, <32 x i16> %x1) {
; X32-LABEL: combine_vpermt2var_vpermi2var_32i16_as_permw:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovdqa64 {{.*#+}} zmm2 = [17,39,19,38,21,37,23,36,25,35,27,34,29,33,31,32,1,47,3,46,5,45,7,44,9,43,11,42,13,41,15,40]
; X32-NEXT: vpermi2w %zmm0, %zmm1, %zmm2
; X32-NEXT: vmovdqa64 %zmm2, %zmm0
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermt2var_vpermi2var_32i16_as_permw:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovdqa64 {{.*#+}} zmm2 = [17,39,19,38,21,37,23,36,25,35,27,34,29,33,31,32,1,47,3,46,5,45,7,44,9,43,11,42,13,41,15,40]
; X64-NEXT: vpermi2w %zmm0, %zmm1, %zmm2
; X64-NEXT: vmovdqa64 %zmm2, %zmm0
@@ -1080,14 +1080,14 @@ define <32 x i16> @combine_vpermt2var_vpermi2var_32i16_as_permw(<32 x i16> %x0,
define <8 x double> @combine_vpermi2var_vpermvar_8f64_as_vperm2_zero(<8 x double> %x0) {
; X32-LABEL: combine_vpermi2var_vpermvar_8f64_as_vperm2_zero:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X32-NEXT: vmovapd {{.*#+}} zmm2 = [8,0,3,0,10,0,11,0,1,0,7,0,14,0,5,0]
; X32-NEXT: vpermt2pd %zmm1, %zmm2, %zmm0
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermi2var_vpermvar_8f64_as_vperm2_zero:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X64-NEXT: vmovapd {{.*#+}} zmm2 = [8,3,10,11,1,7,14,5]
; X64-NEXT: vpermt2pd %zmm1, %zmm2, %zmm0
@@ -1099,14 +1099,14 @@ define <8 x double> @combine_vpermi2var_vpermvar_8f64_as_vperm2_zero(<8 x double
define <16 x float> @combine_vpermi2var_vpermvar_16f32_as_vperm2_zero(<16 x float> %x0) {
; X32-LABEL: combine_vpermi2var_vpermvar_16f32_as_vperm2_zero:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X32-NEXT: vmovaps {{.*#+}} zmm2 = [0,13,1,12,4,9,22,12,4,25,26,9,5,29,30,8]
; X32-NEXT: vpermt2ps %zmm1, %zmm2, %zmm0
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermi2var_vpermvar_16f32_as_vperm2_zero:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-NEXT: vmovaps {{.*#+}} zmm2 = [0,13,1,12,4,9,22,12,4,25,26,9,5,29,30,8]
; X64-NEXT: vpermt2ps %zmm1, %zmm2, %zmm0
@@ -1118,12 +1118,12 @@ define <16 x float> @combine_vpermi2var_vpermvar_16f32_as_vperm2_zero(<16 x floa
define <8 x i64> @combine_vpermvar_insertion_as_broadcast_v8i64(i64 %a0) {
; X32-LABEL: combine_vpermvar_insertion_as_broadcast_v8i64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vbroadcastsd {{[0-9]+}}(%esp), %zmm0
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermvar_insertion_as_broadcast_v8i64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovq %rdi, %xmm0
; X64-NEXT: vpbroadcastq %xmm0, %zmm0
; X64-NEXT: retq
diff --git a/test/CodeGen/X86/vector-shuffle-combining-avx512bwvl.ll b/test/CodeGen/X86/vector-shuffle-combining-avx512bwvl.ll
index e597968926a..f55aba61a1c 100644
--- a/test/CodeGen/X86/vector-shuffle-combining-avx512bwvl.ll
+++ b/test/CodeGen/X86/vector-shuffle-combining-avx512bwvl.ll
@@ -8,11 +8,11 @@ declare <16 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.256(<16 x i16>, <16 x i16
define <16 x i16> @combine_vpermt2var_16i16_identity(<16 x i16> %x0, <16 x i16> %x1) {
; X32-LABEL: combine_vpermt2var_16i16_identity:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermt2var_16i16_identity:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
%res0 = call <16 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.256(<16 x i16> <i16 15, i16 14, i16 13, i16 12, i16 11, i16 10, i16 9, i16 8, i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 0>, <16 x i16> %x0, <16 x i16> %x1, i16 -1)
%res1 = call <16 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.256(<16 x i16> <i16 15, i16 30, i16 13, i16 28, i16 11, i16 26, i16 9, i16 24, i16 7, i16 22, i16 5, i16 20, i16 3, i16 18, i16 1, i16 16>, <16 x i16> %res0, <16 x i16> %res0, i16 -1)
@@ -20,7 +20,7 @@ define <16 x i16> @combine_vpermt2var_16i16_identity(<16 x i16> %x0, <16 x i16>
}
define <16 x i16> @combine_vpermt2var_16i16_identity_mask(<16 x i16> %x0, <16 x i16> %x1, i16 %m) {
; X32-LABEL: combine_vpermt2var_16i16_identity_mask:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vmovdqa {{.*#+}} ymm2 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
; X32-NEXT: vpermi2w %ymm1, %ymm0, %ymm2 {%k1} {z}
@@ -29,7 +29,7 @@ define <16 x i16> @combine_vpermt2var_16i16_identity_mask(<16 x i16> %x0, <16 x
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermt2var_16i16_identity_mask:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vmovdqa {{.*#+}} ymm2 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
; X64-NEXT: vpermi2w %ymm1, %ymm0, %ymm2 {%k1} {z}
@@ -43,13 +43,13 @@ define <16 x i16> @combine_vpermt2var_16i16_identity_mask(<16 x i16> %x0, <16 x
define <16 x i16> @combine_vpermi2var_16i16_as_permw(<16 x i16> %x0, <16 x i16> %x1) {
; X32-LABEL: combine_vpermi2var_16i16_as_permw:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovdqa {{.*#+}} ymm1 = [15,0,14,1,13,2,12,3,11,4,10,5,9,6,8,7]
; X32-NEXT: vpermw %ymm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermi2var_16i16_as_permw:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovdqa {{.*#+}} ymm1 = [15,0,14,1,13,2,12,3,11,4,10,5,9,6,8,7]
; X64-NEXT: vpermw %ymm0, %ymm1, %ymm0
; X64-NEXT: retq
@@ -60,13 +60,13 @@ define <16 x i16> @combine_vpermi2var_16i16_as_permw(<16 x i16> %x0, <16 x i16>
define <16 x i16> @combine_vpermt2var_vpermi2var_16i16_as_vperm2(<16 x i16> %x0, <16 x i16> %x1) {
; X32-LABEL: combine_vpermt2var_vpermi2var_16i16_as_vperm2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovdqa {{.*#+}} ymm2 = [0,31,2,2,4,29,6,27,8,25,10,23,12,21,14,19]
; X32-NEXT: vpermt2w %ymm1, %ymm2, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermt2var_vpermi2var_16i16_as_vperm2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovdqa {{.*#+}} ymm2 = [0,31,2,2,4,29,6,27,8,25,10,23,12,21,14,19]
; X64-NEXT: vpermt2w %ymm1, %ymm2, %ymm0
; X64-NEXT: retq
@@ -77,12 +77,12 @@ define <16 x i16> @combine_vpermt2var_vpermi2var_16i16_as_vperm2(<16 x i16> %x0,
define <16 x i16> @combine_vpermt2var_vpermi2var_16i16_as_unpckhwd(<16 x i16> %a0, <16 x i16> %a1) {
; X32-LABEL: combine_vpermt2var_vpermi2var_16i16_as_unpckhwd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermt2var_vpermi2var_16i16_as_unpckhwd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15]
; X64-NEXT: retq
%res0 = call <16 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.256(<16 x i16> %a0, <16 x i16> <i16 20, i16 4, i16 21, i16 5, i16 22, i16 6, i16 23, i16 7, i16 28, i16 12, i16 29, i16 13, i16 30, i16 14, i16 31, i16 15>, <16 x i16> %a1, i16 -1)
@@ -91,12 +91,12 @@ define <16 x i16> @combine_vpermt2var_vpermi2var_16i16_as_unpckhwd(<16 x i16> %a
define <16 x i16> @combine_vpermt2var_vpermi2var_16i16_as_unpcklwd(<16 x i16> %a0, <16 x i16> %a1) {
; X32-LABEL: combine_vpermt2var_vpermi2var_16i16_as_unpcklwd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermt2var_vpermi2var_16i16_as_unpcklwd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
; X64-NEXT: retq
%res0 = call <16 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.256(<16 x i16> <i16 0, i16 16, i16 1, i16 17, i16 2, i16 18, i16 3, i16 19, i16 8, i16 24, i16 9, i16 25, i16 10, i16 26, i16 11, i16 27>, <16 x i16> %a0, <16 x i16> %a1, i16 -1)
diff --git a/test/CodeGen/X86/vector-shuffle-combining-avx512vbmi.ll b/test/CodeGen/X86/vector-shuffle-combining-avx512vbmi.ll
index 6b64029bf91..44add0416f2 100644
--- a/test/CodeGen/X86/vector-shuffle-combining-avx512vbmi.ll
+++ b/test/CodeGen/X86/vector-shuffle-combining-avx512vbmi.ll
@@ -23,11 +23,11 @@ declare <64 x i8> @llvm.x86.avx512.mask.pshuf.b.512(<64 x i8>, <64 x i8>, <64 x
define <16 x i8> @combine_vpermt2var_16i8_identity(<16 x i8> %x0, <16 x i8> %x1) {
; X32-LABEL: combine_vpermt2var_16i8_identity:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermt2var_16i8_identity:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: retq
%res0 = call <16 x i8> @llvm.x86.avx512.maskz.vpermt2var.qi.128(<16 x i8> <i8 15, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>, <16 x i8> %x0, <16 x i8> %x1, i16 -1)
%res1 = call <16 x i8> @llvm.x86.avx512.maskz.vpermt2var.qi.128(<16 x i8> <i8 15, i8 30, i8 13, i8 28, i8 11, i8 26, i8 9, i8 24, i8 7, i8 22, i8 5, i8 20, i8 3, i8 18, i8 1, i8 16>, <16 x i8> %res0, <16 x i8> %res0, i16 -1)
@@ -35,7 +35,7 @@ define <16 x i8> @combine_vpermt2var_16i8_identity(<16 x i8> %x0, <16 x i8> %x1)
}
define <16 x i8> @combine_vpermt2var_16i8_identity_mask(<16 x i8> %x0, <16 x i8> %x1, i16 %m) {
; X32-LABEL: combine_vpermt2var_16i8_identity_mask:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
; X32-NEXT: vmovdqa {{.*#+}} xmm2 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
; X32-NEXT: vpermi2b %xmm1, %xmm0, %xmm2 {%k1} {z}
@@ -44,7 +44,7 @@ define <16 x i8> @combine_vpermt2var_16i8_identity_mask(<16 x i8> %x0, <16 x i8>
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermt2var_16i8_identity_mask:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vmovdqa {{.*#+}} xmm2 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
; X64-NEXT: vpermi2b %xmm1, %xmm0, %xmm2 {%k1} {z}
@@ -58,12 +58,12 @@ define <16 x i8> @combine_vpermt2var_16i8_identity_mask(<16 x i8> %x0, <16 x i8>
define <16 x i8> @combine_vpermi2var_16i8_as_vpshufb(<16 x i8> %x0, <16 x i8> %x1) {
; X32-LABEL: combine_vpermi2var_16i8_as_vpshufb:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[15,0,14,1,13,2,12,3,11,4,10,5,9,6,8,7]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermi2var_16i8_as_vpshufb:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[15,0,14,1,13,2,12,3,11,4,10,5,9,6,8,7]
; X64-NEXT: retq
%res0 = call <16 x i8> @llvm.x86.avx512.mask.vpermi2var.qi.128(<16 x i8> %x0, <16 x i8> <i8 15, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>, <16 x i8> %x1, i16 -1)
@@ -72,13 +72,13 @@ define <16 x i8> @combine_vpermi2var_16i8_as_vpshufb(<16 x i8> %x0, <16 x i8> %x
}
define <32 x i8> @combine_vpermi2var_32i8_as_vpermb(<32 x i8> %x0, <32 x i8> %x1) {
; X32-LABEL: combine_vpermi2var_32i8_as_vpermb:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovdqa {{.*#+}} ymm1 = [0,0,1,23,2,22,3,21,4,22,5,21,6,20,7,19,0,0,1,23,2,22,3,21,4,22,5,21,6,20,7,19]
; X32-NEXT: vpermb %ymm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermi2var_32i8_as_vpermb:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovdqa {{.*#+}} ymm1 = [0,0,1,23,2,22,3,21,4,22,5,21,6,20,7,19,0,0,1,23,2,22,3,21,4,22,5,21,6,20,7,19]
; X64-NEXT: vpermb %ymm0, %ymm1, %ymm0
; X64-NEXT: retq
@@ -88,13 +88,13 @@ define <32 x i8> @combine_vpermi2var_32i8_as_vpermb(<32 x i8> %x0, <32 x i8> %x1
}
define <64 x i8> @combine_vpermi2var_64i8_as_vpermb(<64 x i8> %x0, <64 x i8> %x1) {
; X32-LABEL: combine_vpermi2var_64i8_as_vpermb:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,32,1,23,2,22,3,21,4,22,5,21,6,20,7,19,0,32,1,23,2,22,3,21,4,22,5,21,6,20,7,19,0,32,1,23,2,22,3,21,4,22,5,21,6,20,7,19,0,32,1,23,2,22,3,21,4,22,5,21,6,20,7,19]
; X32-NEXT: vpermb %zmm0, %zmm1, %zmm0
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermi2var_64i8_as_vpermb:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,32,1,23,2,22,3,21,4,22,5,21,6,20,7,19,0,32,1,23,2,22,3,21,4,22,5,21,6,20,7,19,0,32,1,23,2,22,3,21,4,22,5,21,6,20,7,19,0,32,1,23,2,22,3,21,4,22,5,21,6,20,7,19]
; X64-NEXT: vpermb %zmm0, %zmm1, %zmm0
; X64-NEXT: retq
@@ -105,7 +105,7 @@ define <64 x i8> @combine_vpermi2var_64i8_as_vpermb(<64 x i8> %x0, <64 x i8> %x1
define <16 x i8> @combine_vpermt2var_vpermi2var_16i8_as_vperm2(<16 x i8> %x0, <16 x i8> %x1) {
; X32-LABEL: combine_vpermt2var_vpermi2var_16i8_as_vperm2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovdqa {{.*#+}} xmm2 = [0,31,2,29,4,27,6,25,8,23,10,21,12,19,14,17]
; X32-NEXT: vpermi2b %xmm1, %xmm0, %xmm2
; X32-NEXT: vmovdqa {{.*#+}} xmm0 = [0,17,2,18,4,19,6,21,8,23,10,25,12,27,14,29]
@@ -113,7 +113,7 @@ define <16 x i8> @combine_vpermt2var_vpermi2var_16i8_as_vperm2(<16 x i8> %x0, <1
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermt2var_vpermi2var_16i8_as_vperm2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovdqa {{.*#+}} xmm2 = [0,31,2,29,4,27,6,25,8,23,10,21,12,19,14,17]
; X64-NEXT: vpermi2b %xmm1, %xmm0, %xmm2
; X64-NEXT: vmovdqa {{.*#+}} xmm0 = [0,17,2,18,4,19,6,21,8,23,10,25,12,27,14,29]
@@ -125,13 +125,13 @@ define <16 x i8> @combine_vpermt2var_vpermi2var_16i8_as_vperm2(<16 x i8> %x0, <1
}
define <32 x i8> @combine_vpermi2var_32i8_as_vperm2(<32 x i8> %x0, <32 x i8> %x1) {
; X32-LABEL: combine_vpermi2var_32i8_as_vperm2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovdqa {{.*#+}} ymm2 = [0,32,1,23,2,22,3,21,4,22,5,21,6,20,7,19,0,32,1,23,2,22,3,21,4,22,5,21,6,20,7,19]
; X32-NEXT: vpermt2b %ymm1, %ymm2, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermi2var_32i8_as_vperm2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovdqa {{.*#+}} ymm2 = [0,32,1,23,2,22,3,21,4,22,5,21,6,20,7,19,0,32,1,23,2,22,3,21,4,22,5,21,6,20,7,19]
; X64-NEXT: vpermt2b %ymm1, %ymm2, %ymm0
; X64-NEXT: retq
@@ -141,13 +141,13 @@ define <32 x i8> @combine_vpermi2var_32i8_as_vperm2(<32 x i8> %x0, <32 x i8> %x1
}
define <64 x i8> @combine_vpermi2var_64i8_as_vperm2(<64 x i8> %x0, <64 x i8> %x1) {
; X32-LABEL: combine_vpermi2var_64i8_as_vperm2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,80,1,70,2,54,3,49,4,36,5,23,6,18,7,5,0,90,1,100,2,110,3,120,4,22,5,21,6,20,7,19,0,32,1,23,2,22,3,21,4,22,5,21,6,20,7,19,0,32,1,23,2,22,3,21,4,22,5,21,6,20,7,19]
; X32-NEXT: vpermt2b %zmm1, %zmm2, %zmm0
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermi2var_64i8_as_vperm2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,80,1,70,2,54,3,49,4,36,5,23,6,18,7,5,0,90,1,100,2,110,3,120,4,22,5,21,6,20,7,19,0,32,1,23,2,22,3,21,4,22,5,21,6,20,7,19,0,32,1,23,2,22,3,21,4,22,5,21,6,20,7,19]
; X64-NEXT: vpermt2b %zmm1, %zmm2, %zmm0
; X64-NEXT: retq
diff --git a/test/CodeGen/X86/vector-shuffle-combining-sse41.ll b/test/CodeGen/X86/vector-shuffle-combining-sse41.ll
index 29e2124a168..27ccdefe4d5 100644
--- a/test/CodeGen/X86/vector-shuffle-combining-sse41.ll
+++ b/test/CodeGen/X86/vector-shuffle-combining-sse41.ll
@@ -10,12 +10,12 @@ declare <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8>, <16 x i8>)
define <16 x i8> @combine_vpshufb_as_movzx(<16 x i8> %a0) {
; SSE-LABEL: combine_vpshufb_as_movzx:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vpshufb_as_movzx:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; AVX-NEXT: retq
%res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 -1, i8 -1, i8 -1, i8 -1, i8 undef, i8 undef, i8 undef, i8 undef, i8 -1, i8 -1, i8 -1, i8 -1>)
diff --git a/test/CodeGen/X86/vector-shuffle-combining-sse4a.ll b/test/CodeGen/X86/vector-shuffle-combining-sse4a.ll
index af69a5ac228..5da94190cca 100644
--- a/test/CodeGen/X86/vector-shuffle-combining-sse4a.ll
+++ b/test/CodeGen/X86/vector-shuffle-combining-sse4a.ll
@@ -10,7 +10,7 @@ declare <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8>, <16 x i8>)
define <16 x i8> @combine_extrqi_pshufb_16i8(<16 x i8> %a0) {
; ALL-LABEL: combine_extrqi_pshufb_16i8:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: extrq {{.*#+}} xmm0 = xmm0[1,2],zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
; ALL-NEXT: retq
%1 = shufflevector <16 x i8> %a0, <16 x i8> zeroinitializer, <16 x i32> <i32 1, i32 2, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -20,7 +20,7 @@ define <16 x i8> @combine_extrqi_pshufb_16i8(<16 x i8> %a0) {
define <8 x i16> @combine_extrqi_pshufb_8i16(<8 x i16> %a0) {
; ALL-LABEL: combine_extrqi_pshufb_8i16:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: extrq {{.*#+}} xmm0 = xmm0[2,3],zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
; ALL-NEXT: retq
%1 = shufflevector <8 x i16> %a0, <8 x i16> zeroinitializer, <8 x i32> <i32 1, i32 2, i32 8, i32 8, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -32,18 +32,18 @@ define <8 x i16> @combine_extrqi_pshufb_8i16(<8 x i16> %a0) {
define <16 x i8> @combine_insertqi_pshufb_16i8(<16 x i8> %a0, <16 x i8> %a1) {
; SSSE3-LABEL: combine_insertqi_pshufb_16i8:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: extrq {{.*#+}} xmm1 = xmm1[0,1],zero,zero,zero,zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE42-LABEL: combine_insertqi_pshufb_16i8:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pmovzxwq {{.*#+}} xmm0 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; SSE42-NEXT: retq
;
; AVX-LABEL: combine_insertqi_pshufb_16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX-NEXT: retq
%1 = shufflevector <16 x i8> %a0, <16 x i8> %a1, <16 x i32> <i32 16, i32 17, i32 18, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -53,18 +53,18 @@ define <16 x i8> @combine_insertqi_pshufb_16i8(<16 x i8> %a0, <16 x i8> %a1) {
define <8 x i16> @combine_insertqi_pshufb_8i16(<8 x i16> %a0, <8 x i16> %a1) {
; SSSE3-LABEL: combine_insertqi_pshufb_8i16:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: extrq {{.*#+}} xmm1 = xmm1[0,1],zero,zero,zero,zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE42-LABEL: combine_insertqi_pshufb_8i16:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pmovzxwq {{.*#+}} xmm0 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; SSE42-NEXT: retq
;
; AVX-LABEL: combine_insertqi_pshufb_8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX-NEXT: retq
%1 = shufflevector <8 x i16> %a0, <8 x i16> %a1, <8 x i32> <i32 8, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -76,7 +76,7 @@ define <8 x i16> @combine_insertqi_pshufb_8i16(<8 x i16> %a0, <8 x i16> %a1) {
define <16 x i8> @combine_pshufb_insertqi_pshufb(<16 x i8> %a0, <16 x i8> %a1) {
; ALL-LABEL: combine_pshufb_insertqi_pshufb:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: insertq {{.*#+}} xmm0 = xmm0[0],xmm1[0,1],xmm0[3,4,5,6,7,u,u,u,u,u,u,u,u]
; ALL-NEXT: retq
%1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>)
diff --git a/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll b/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll
index c17d45f6fd4..dbb86624cc3 100644
--- a/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll
+++ b/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll
@@ -11,12 +11,12 @@ declare <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8>, <16 x i8>)
define <16 x i8> @combine_vpshufb_as_zero(<16 x i8> %a0) {
; SSE-LABEL: combine_vpshufb_as_zero:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vpshufb_as_zero:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 128, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>)
@@ -27,12 +27,12 @@ define <16 x i8> @combine_vpshufb_as_zero(<16 x i8> %a0) {
define <16 x i8> @combine_vpshufb_as_movq(<16 x i8> %a0) {
; SSE-LABEL: combine_vpshufb_as_movq:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vpshufb_as_movq:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
; AVX-NEXT: retq
%res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 0, i8 128, i8 1, i8 128, i8 2, i8 128, i8 3, i8 128, i8 4, i8 128, i8 5, i8 128, i8 6, i8 128, i8 7, i8 128>)
@@ -42,28 +42,28 @@ define <16 x i8> @combine_vpshufb_as_movq(<16 x i8> %a0) {
define <2 x double> @combine_pshufb_as_movsd(<2 x double> %a0, <2 x double> %a1) {
; SSSE3-LABEL: combine_pshufb_as_movsd:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSSE3-NEXT: movapd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_pshufb_as_movsd:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; SSE41-NEXT: retq
;
; AVX1-LABEL: combine_pshufb_as_movsd:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_pshufb_as_movsd:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; AVX2-NEXT: retq
;
; AVX512F-LABEL: combine_pshufb_as_movsd:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; AVX512F-NEXT: retq
%1 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 3, i32 0>
@@ -75,27 +75,27 @@ define <2 x double> @combine_pshufb_as_movsd(<2 x double> %a0, <2 x double> %a1)
define <4 x float> @combine_pshufb_as_movss(<4 x float> %a0, <4 x float> %a1) {
; SSSE3-LABEL: combine_pshufb_as_movss:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_pshufb_as_movss:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; SSE41-NEXT: retq
;
; AVX1-LABEL: combine_pshufb_as_movss:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_pshufb_as_movss:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; AVX2-NEXT: retq
;
; AVX512F-LABEL: combine_pshufb_as_movss:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; AVX512F-NEXT: retq
%1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 4, i32 3, i32 2, i32 1>
@@ -107,17 +107,17 @@ define <4 x float> @combine_pshufb_as_movss(<4 x float> %a0, <4 x float> %a1) {
define <4 x i32> @combine_pshufb_as_zext(<16 x i8> %a0) {
; SSSE3-LABEL: combine_pshufb_as_zext:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_pshufb_as_zext:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; SSE41-NEXT: retq
;
; AVX-LABEL: combine_pshufb_as_zext:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX-NEXT: retq
%1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 0, i8 -1, i8 -1, i8 -1, i8 1, i8 -1, i8 -1, i8 -1, i8 2, i8 -1, i8 -1, i8 -1, i8 3, i8 -1, i8 -1, i8 -1>)
@@ -127,12 +127,12 @@ define <4 x i32> @combine_pshufb_as_zext(<16 x i8> %a0) {
define <2 x double> @combine_pshufb_as_vzmovl_64(<2 x double> %a0) {
; SSE-LABEL: combine_pshufb_as_vzmovl_64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; SSE-NEXT: retq
;
; AVX-LABEL: combine_pshufb_as_vzmovl_64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
; AVX-NEXT: retq
%1 = bitcast <2 x double> %a0 to <16 x i8>
@@ -143,32 +143,32 @@ define <2 x double> @combine_pshufb_as_vzmovl_64(<2 x double> %a0) {
define <4 x float> @combine_pshufb_as_vzmovl_32(<4 x float> %a0) {
; SSSE3-LABEL: combine_pshufb_as_vzmovl_32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: xorps %xmm1, %xmm1
; SSSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_pshufb_as_vzmovl_32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: xorps %xmm1, %xmm1
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; SSE41-NEXT: retq
;
; AVX1-LABEL: combine_pshufb_as_vzmovl_32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_pshufb_as_vzmovl_32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX2-NEXT: retq
;
; AVX512F-LABEL: combine_pshufb_as_vzmovl_32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX512F-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX512F-NEXT: retq
@@ -180,12 +180,12 @@ define <4 x float> @combine_pshufb_as_vzmovl_32(<4 x float> %a0) {
define <4 x float> @combine_pshufb_movddup(<4 x float> %a0) {
; SSE-LABEL: combine_pshufb_movddup:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[5,5,5,5,7,7,7,7,5,5,5,5,7,7,7,7]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_pshufb_movddup:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,5,5,5,7,7,7,7,5,5,5,5,7,7,7,7]
; AVX-NEXT: retq
%1 = bitcast <4 x float> %a0 to <16 x i8>
@@ -197,12 +197,12 @@ define <4 x float> @combine_pshufb_movddup(<4 x float> %a0) {
define <4 x float> @combine_pshufb_movshdup(<4 x float> %a0) {
; SSE-LABEL: combine_pshufb_movshdup:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[7,7,7,7,7,7,7,7,3,3,3,3,3,3,3,3]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_pshufb_movshdup:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[7,7,7,7,7,7,7,7,3,3,3,3,3,3,3,3]
; AVX-NEXT: retq
%1 = bitcast <4 x float> %a0 to <16 x i8>
@@ -214,12 +214,12 @@ define <4 x float> @combine_pshufb_movshdup(<4 x float> %a0) {
define <4 x float> @combine_pshufb_movsldup(<4 x float> %a0) {
; SSE-LABEL: combine_pshufb_movsldup:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[5,5,5,5,5,5,5,5,1,1,1,1,1,1,1,1]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_pshufb_movsldup:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,5,5,5,5,5,5,5,1,1,1,1,1,1,1,1]
; AVX-NEXT: retq
%1 = bitcast <4 x float> %a0 to <16 x i8>
@@ -231,12 +231,12 @@ define <4 x float> @combine_pshufb_movsldup(<4 x float> %a0) {
define <16 x i8> @combine_pshufb_palignr(<16 x i8> %a0, <16 x i8> %a1) {
; SSE-LABEL: combine_pshufb_palignr:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_pshufb_palignr:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,2,3]
; AVX-NEXT: retq
%1 = shufflevector <16 x i8> %a0, <16 x i8> %a1, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
@@ -246,12 +246,12 @@ define <16 x i8> @combine_pshufb_palignr(<16 x i8> %a0, <16 x i8> %a1) {
define <16 x i8> @combine_pshufb_pslldq(<16 x i8> %a0) {
; SSE-LABEL: combine_pshufb_pslldq:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_pshufb_pslldq:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>)
@@ -261,12 +261,12 @@ define <16 x i8> @combine_pshufb_pslldq(<16 x i8> %a0) {
define <16 x i8> @combine_pshufb_psrldq(<16 x i8> %a0) {
; SSE-LABEL: combine_pshufb_psrldq:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_pshufb_psrldq:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128>)
@@ -276,18 +276,18 @@ define <16 x i8> @combine_pshufb_psrldq(<16 x i8> %a0) {
define <16 x i8> @combine_and_pshufb(<16 x i8> %a0) {
; SSSE3-LABEL: combine_and_pshufb:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: andps {{.*}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_and_pshufb:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4],xmm1[5,6,7]
; SSE41-NEXT: retq
;
; AVX-LABEL: combine_and_pshufb:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4],xmm1[5,6,7]
; AVX-NEXT: retq
@@ -298,18 +298,18 @@ define <16 x i8> @combine_and_pshufb(<16 x i8> %a0) {
define <16 x i8> @combine_pshufb_and(<16 x i8> %a0) {
; SSSE3-LABEL: combine_pshufb_and:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: andps {{.*}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_pshufb_and:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4],xmm1[5,6,7]
; SSE41-NEXT: retq
;
; AVX-LABEL: combine_pshufb_and:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4],xmm1[5,6,7]
; AVX-NEXT: retq
@@ -320,12 +320,12 @@ define <16 x i8> @combine_pshufb_and(<16 x i8> %a0) {
define <16 x i8> @combine_pshufb_as_palignr(<16 x i8> %a0) {
; SSE-LABEL: combine_pshufb_as_palignr:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: palignr {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_pshufb_as_palignr:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0]
; AVX-NEXT: retq
%res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 undef, i8 undef, i8 0>)
@@ -334,12 +334,12 @@ define <16 x i8> @combine_pshufb_as_palignr(<16 x i8> %a0) {
define <16 x i8> @combine_pshufb_as_pslldq(<16 x i8> %a0) {
; SSE-LABEL: combine_pshufb_as_pslldq:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_pshufb_as_pslldq:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5]
; AVX-NEXT: retq
%res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5>)
@@ -348,12 +348,12 @@ define <16 x i8> @combine_pshufb_as_pslldq(<16 x i8> %a0) {
define <16 x i8> @combine_pshufb_as_psrldq(<16 x i8> %a0) {
; SSE-LABEL: combine_pshufb_as_psrldq:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; SSE-NEXT: retq
;
; AVX-LABEL: combine_pshufb_as_psrldq:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX-NEXT: retq
%res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128>)
@@ -362,12 +362,12 @@ define <16 x i8> @combine_pshufb_as_psrldq(<16 x i8> %a0) {
define <16 x i8> @combine_pshufb_as_psrlw(<16 x i8> %a0) {
; SSE-LABEL: combine_pshufb_as_psrlw:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrlw $8, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_pshufb_as_psrlw:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrlw $8, %xmm0, %xmm0
; AVX-NEXT: retq
%res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 1, i8 128, i8 3, i8 128, i8 5, i8 128, i8 7, i8 128, i8 9, i8 128, i8 11, i8 128, i8 13, i8 128, i8 15, i8 128>)
@@ -376,12 +376,12 @@ define <16 x i8> @combine_pshufb_as_psrlw(<16 x i8> %a0) {
define <16 x i8> @combine_pshufb_as_pslld(<16 x i8> %a0) {
; SSE-LABEL: combine_pshufb_as_pslld:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pslld $24, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_pshufb_as_pslld:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpslld $24, %xmm0, %xmm0
; AVX-NEXT: retq
%res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 128, i8 128, i8 128, i8 0, i8 128, i8 128, i8 128, i8 4, i8 128, i8 128, i8 128, i8 8, i8 128, i8 128, i8 128, i8 12>)
@@ -390,12 +390,12 @@ define <16 x i8> @combine_pshufb_as_pslld(<16 x i8> %a0) {
define <16 x i8> @combine_pshufb_as_psrlq(<16 x i8> %a0) {
; SSE-LABEL: combine_pshufb_as_psrlq:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrlq $40, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_pshufb_as_psrlq:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrlq $40, %xmm0, %xmm0
; AVX-NEXT: retq
%res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 5, i8 6, i8 7, i8 128, i8 128, i8 128, i8 128, i8 128, i8 13, i8 14, i8 15, i8 128, i8 128, i8 128, i8 128, i8 128>)
@@ -404,12 +404,12 @@ define <16 x i8> @combine_pshufb_as_psrlq(<16 x i8> %a0) {
define <16 x i8> @combine_pshufb_as_pshuflw(<16 x i8> %a0) {
; SSE-LABEL: combine_pshufb_as_pshuflw:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_pshufb_as_pshuflw:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7]
; AVX-NEXT: retq
%res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 2, i8 3, i8 0, i8 1, i8 6, i8 7, i8 4, i8 5, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>)
@@ -418,12 +418,12 @@ define <16 x i8> @combine_pshufb_as_pshuflw(<16 x i8> %a0) {
define <16 x i8> @combine_pshufb_as_pshufhw(<16 x i8> %a0) {
; SSE-LABEL: combine_pshufb_as_pshufhw:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_pshufb_as_pshufhw:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6]
; AVX-NEXT: retq
%res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 10, i8 11, i8 8, i8 9, i8 14, i8 15, i8 12, i8 13>)
@@ -432,12 +432,12 @@ define <16 x i8> @combine_pshufb_as_pshufhw(<16 x i8> %a0) {
define <16 x i8> @combine_pshufb_not_as_pshufw(<16 x i8> %a0) {
; SSE-LABEL: combine_pshufb_not_as_pshufw:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_pshufb_not_as_pshufw:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13]
; AVX-NEXT: retq
%res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 2, i8 3, i8 0, i8 1, i8 6, i8 7, i8 4, i8 5, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>)
@@ -447,12 +447,12 @@ define <16 x i8> @combine_pshufb_not_as_pshufw(<16 x i8> %a0) {
define <16 x i8> @combine_vpshufb_as_pshuflw_not_pslld(<16 x i8> *%a0) {
; SSE-LABEL: combine_vpshufb_as_pshuflw_not_pslld:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = mem[0,0,2,2,4,5,6,7]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vpshufb_as_pshuflw_not_pslld:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = mem[0,0,2,2,4,5,6,7]
; AVX-NEXT: retq
%res0 = load <16 x i8>, <16 x i8> *%a0, align 16
@@ -462,12 +462,12 @@ define <16 x i8> @combine_vpshufb_as_pshuflw_not_pslld(<16 x i8> *%a0) {
define <16 x i8> @combine_pshufb_as_unary_unpcklbw(<16 x i8> %a0) {
; SSE-LABEL: combine_pshufb_as_unary_unpcklbw:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_pshufb_as_unary_unpcklbw:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; AVX-NEXT: retq
%1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 0, i8 undef, i8 undef, i8 1, i8 2, i8 2, i8 3, i8 3, i8 4, i8 4, i8 5, i8 5, i8 6, i8 6, i8 7, i8 7>)
@@ -476,12 +476,12 @@ define <16 x i8> @combine_pshufb_as_unary_unpcklbw(<16 x i8> %a0) {
define <16 x i8> @combine_pshufb_as_unary_unpckhwd(<16 x i8> %a0) {
; SSE-LABEL: combine_pshufb_as_unary_unpckhwd:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_pshufb_as_unary_unpckhwd:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
; AVX-NEXT: retq
%1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 8, i8 9, i8 8, i8 9, i8 10, i8 11, i8 10, i8 11, i8 12, i8 13, i8 12, i8 13, i8 14, i8 15, i8 undef, i8 undef>)
@@ -490,7 +490,7 @@ define <16 x i8> @combine_pshufb_as_unary_unpckhwd(<16 x i8> %a0) {
define <8 x i16> @combine_pshufb_as_unpacklo_undef(<16 x i8> %a0) {
; ALL-LABEL: combine_pshufb_as_unpacklo_undef:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: retq
%1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 undef, i8 undef, i8 0, i8 1, i8 undef, i8 undef, i8 2, i8 3, i8 undef, i8 undef, i8 4, i8 5, i8 undef, i8 undef, i8 6, i8 7>)
%2 = bitcast <16 x i8> %1 to <8 x i16>
@@ -500,7 +500,7 @@ define <8 x i16> @combine_pshufb_as_unpacklo_undef(<16 x i8> %a0) {
define <16 x i8> @combine_pshufb_as_unpackhi_undef(<16 x i8> %a0) {
; ALL-LABEL: combine_pshufb_as_unpackhi_undef:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: retq
%1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 8, i8 undef, i8 9, i8 undef, i8 10, i8 undef, i8 11, i8 undef, i8 12, i8 undef, i8 13, i8 undef, i8 14, i8 undef, i8 15, i8 undef>)
%2 = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7, i32 9, i32 9, i32 11, i32 11, i32 13, i32 13, i32 15, i32 15>
@@ -509,14 +509,14 @@ define <16 x i8> @combine_pshufb_as_unpackhi_undef(<16 x i8> %a0) {
define <16 x i8> @combine_pshufb_as_unpacklo_zero(<16 x i8> %a0) {
; SSE-LABEL: combine_pshufb_as_unpacklo_zero:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm1, %xmm1
; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_pshufb_as_unpacklo_zero:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vunpcklps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; AVX-NEXT: retq
@@ -526,13 +526,13 @@ define <16 x i8> @combine_pshufb_as_unpacklo_zero(<16 x i8> %a0) {
define <16 x i8> @combine_pshufb_as_unpackhi_zero(<16 x i8> %a0) {
; SSE-LABEL: combine_pshufb_as_unpackhi_zero:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_pshufb_as_unpackhi_zero:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
; AVX-NEXT: retq
@@ -542,12 +542,12 @@ define <16 x i8> @combine_pshufb_as_unpackhi_zero(<16 x i8> %a0) {
define <16 x i8> @combine_psrlw_pshufb(<8 x i16> %a0) {
; SSE-LABEL: combine_psrlw_pshufb:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[1],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[1],zero,zero,zero
; SSE-NEXT: retq
;
; AVX-LABEL: combine_psrlw_pshufb:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[1],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[1],zero,zero,zero
; AVX-NEXT: retq
%1 = lshr <8 x i16> %a0, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
@@ -558,12 +558,12 @@ define <16 x i8> @combine_psrlw_pshufb(<8 x i16> %a0) {
define <16 x i8> @combine_pslld_pshufb(<4 x i32> %a0) {
; SSE-LABEL: combine_pslld_pshufb:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[2,1,0],zero,xmm0[6,5,4],zero,xmm0[10,9,8],zero,xmm0[14,13,12],zero
; SSE-NEXT: retq
;
; AVX-LABEL: combine_pslld_pshufb:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,1,0],zero,xmm0[6,5,4],zero,xmm0[10,9,8],zero,xmm0[14,13,12],zero
; AVX-NEXT: retq
%1 = shl <4 x i32> %a0, <i32 8, i32 8, i32 8, i32 8>
@@ -574,12 +574,12 @@ define <16 x i8> @combine_pslld_pshufb(<4 x i32> %a0) {
define <16 x i8> @combine_psrlq_pshufb(<2 x i64> %a0) {
; SSE-LABEL: combine_psrlq_pshufb:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufb {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,xmm0[7,6],zero,zero,zero,zero,zero,zero,xmm0[15,14]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_psrlq_pshufb:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,xmm0[7,6],zero,zero,zero,zero,zero,zero,xmm0[15,14]
; AVX-NEXT: retq
%1 = lshr <2 x i64> %a0, <i64 48, i64 48>
@@ -590,12 +590,12 @@ define <16 x i8> @combine_psrlq_pshufb(<2 x i64> %a0) {
define <16 x i8> @combine_unpckl_arg0_pshufb(<16 x i8> %a0, <16 x i8> %a1) {
; SSE-LABEL: combine_unpckl_arg0_pshufb:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero
; SSE-NEXT: retq
;
; AVX-LABEL: combine_unpckl_arg0_pshufb:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero
; AVX-NEXT: retq
%1 = shufflevector <16 x i8> %a0, <16 x i8> %a1, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
@@ -605,13 +605,13 @@ define <16 x i8> @combine_unpckl_arg0_pshufb(<16 x i8> %a0, <16 x i8> %a1) {
define <16 x i8> @combine_unpckl_arg1_pshufb(<16 x i8> %a0, <16 x i8> %a1) {
; SSE-LABEL: combine_unpckl_arg1_pshufb:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_unpckl_arg1_pshufb:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero
; AVX-NEXT: retq
%1 = shufflevector <16 x i8> %a0, <16 x i8> %a1, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
@@ -621,12 +621,12 @@ define <16 x i8> @combine_unpckl_arg1_pshufb(<16 x i8> %a0, <16 x i8> %a1) {
define <8 x i16> @shuffle_combine_unpack_insert(<8 x i16> %a0) {
; SSE-LABEL: shuffle_combine_unpack_insert:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[4,5,4,5,4,5,8,9,8,9,8,9,10,11,10,11]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_combine_unpack_insert:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,4,5,4,5,8,9,8,9,8,9,10,11,10,11]
; AVX-NEXT: retq
%1 = extractelement <8 x i16> %a0, i32 2
@@ -642,13 +642,13 @@ define <8 x i16> @shuffle_combine_unpack_insert(<8 x i16> %a0) {
define <16 x i8> @shuffle_combine_packssdw_pshufb(<4 x i32> %a0) {
; SSE-LABEL: shuffle_combine_packssdw_pshufb:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrad $31, %xmm0
; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[13,12,9,8,5,4,1,0,13,12,9,8,5,4,1,0]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_combine_packssdw_pshufb:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrad $31, %xmm0, %xmm0
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[13,12,9,8,5,4,1,0,13,12,9,8,5,4,1,0]
; AVX-NEXT: retq
@@ -662,13 +662,13 @@ declare <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32>, <4 x i32>) nounwind rea
define <16 x i8> @shuffle_combine_packsswb_pshufb(<8 x i16> %a0, <8 x i16> %a1) {
; SSE-LABEL: shuffle_combine_packsswb_pshufb:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psraw $15, %xmm0
; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[14,12,10,8,6,4,2,0,14,12,10,8,6,4,2,0]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_combine_packsswb_pshufb:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsraw $15, %xmm0, %xmm0
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[14,12,10,8,6,4,2,0,14,12,10,8,6,4,2,0]
; AVX-NEXT: retq
@@ -682,12 +682,12 @@ declare <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16>, <8 x i16>) nounwind rea
define <16 x i8> @shuffle_combine_packuswb_pshufb(<8 x i16> %a0, <8 x i16> %a1) {
; SSE-LABEL: shuffle_combine_packuswb_pshufb:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[15,13,11,9,7,5,3,1,15,13,11,9,7,5,3,1]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_combine_packuswb_pshufb:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[15,13,11,9,7,5,3,1,15,13,11,9,7,5,3,1]
; AVX-NEXT: retq
%1 = lshr <8 x i16> %a0, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
@@ -700,12 +700,12 @@ declare <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16>, <8 x i16>) nounwind rea
define <16 x i8> @constant_fold_pshufb() {
; SSE-LABEL: constant_fold_pshufb:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = <14,0,0,0,u,u,0,0,0,0,0,0,0,0,8,9>
; SSE-NEXT: retq
;
; AVX-LABEL: constant_fold_pshufb:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = <14,0,0,0,u,u,0,0,0,0,0,0,0,0,8,9>
; AVX-NEXT: retq
%1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> <i8 15, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>, <16 x i8> <i8 1, i8 -1, i8 -1, i8 -1, i8 undef, i8 undef, i8 -1, i8 -1, i8 15, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 7, i8 6>)
@@ -715,7 +715,7 @@ define <16 x i8> @constant_fold_pshufb() {
; FIXME - unnecessary pshufb/broadcast being used - pshufb mask only needs lowest byte.
define <16 x i8> @constant_fold_pshufb_2() {
; SSE-LABEL: constant_fold_pshufb_2:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movl $2, %eax
; SSE-NEXT: movd %eax, %xmm0
; SSE-NEXT: pxor %xmm1, %xmm1
@@ -723,7 +723,7 @@ define <16 x i8> @constant_fold_pshufb_2() {
; SSE-NEXT: retq
;
; AVX1-LABEL: constant_fold_pshufb_2:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: movl $2, %eax
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
@@ -731,14 +731,14 @@ define <16 x i8> @constant_fold_pshufb_2() {
; AVX1-NEXT: retq
;
; AVX2-LABEL: constant_fold_pshufb_2:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: movl $2, %eax
; AVX2-NEXT: vmovd %eax, %xmm0
; AVX2-NEXT: vpbroadcastb %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: constant_fold_pshufb_2:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: movl $2, %eax
; AVX512F-NEXT: vmovd %eax, %xmm0
; AVX512F-NEXT: vpbroadcastb %xmm0, %xmm0
@@ -749,19 +749,19 @@ define <16 x i8> @constant_fold_pshufb_2() {
define i32 @mask_zzz3_v16i8(<16 x i8> %a0) {
; SSSE3-LABEL: mask_zzz3_v16i8:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = zero,zero,zero,xmm0[14,u,u,u,u,u,u,u,u,u,u,u,u]
; SSSE3-NEXT: movd %xmm0, %eax
; SSSE3-NEXT: retq
;
; SSE41-LABEL: mask_zzz3_v16i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[14]
; SSE41-NEXT: pextrd $3, %xmm0, %eax
; SSE41-NEXT: retq
;
; AVX-LABEL: mask_zzz3_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[14]
; AVX-NEXT: vpextrd $3, %xmm0, %eax
; AVX-NEXT: retq
@@ -774,19 +774,19 @@ define i32 @mask_zzz3_v16i8(<16 x i8> %a0) {
define i32 @mask_z1z3_v16i8(<16 x i8> %a0) {
; SSSE3-LABEL: mask_z1z3_v16i8:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = zero,xmm0[10],zero,xmm0[14,u,u,u,u,u,u,u,u,u,u,u,u]
; SSSE3-NEXT: movd %xmm0, %eax
; SSSE3-NEXT: retq
;
; SSE41-LABEL: mask_z1z3_v16i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,u,u,u],zero,xmm0[10],zero,xmm0[14]
; SSE41-NEXT: pextrd $3, %xmm0, %eax
; SSE41-NEXT: retq
;
; AVX-LABEL: mask_z1z3_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,u,u,u],zero,xmm0[10],zero,xmm0[14]
; AVX-NEXT: vpextrd $3, %xmm0, %eax
; AVX-NEXT: retq
@@ -799,13 +799,13 @@ define i32 @mask_z1z3_v16i8(<16 x i8> %a0) {
define i32 @PR22415(double %a0) {
; SSE-LABEL: PR22415:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
; SSE-NEXT: movd %xmm0, %eax
; SSE-NEXT: retq
;
; AVX-LABEL: PR22415:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
; AVX-NEXT: vmovd %xmm0, %eax
; AVX-NEXT: retq
diff --git a/test/CodeGen/X86/vector-shuffle-combining-xop.ll b/test/CodeGen/X86/vector-shuffle-combining-xop.ll
index 7387b957ced..83001cf5fb9 100644
--- a/test/CodeGen/X86/vector-shuffle-combining-xop.ll
+++ b/test/CodeGen/X86/vector-shuffle-combining-xop.ll
@@ -14,12 +14,12 @@ declare <16 x i8> @llvm.x86.xop.vpperm(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
define <2 x double> @combine_vpermil2pd_identity(<2 x double> %a0, <2 x double> %a1) {
; X32-LABEL: combine_vpermil2pd_identity:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermil2pd_identity:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps %xmm1, %xmm0
; X64-NEXT: retq
%res0 = call <2 x double> @llvm.x86.xop.vpermil2pd(<2 x double> %a1, <2 x double> %a0, <2 x i64> <i64 2, i64 0>, i8 0)
@@ -29,12 +29,12 @@ define <2 x double> @combine_vpermil2pd_identity(<2 x double> %a0, <2 x double>
define <4 x double> @combine_vpermil2pd256_identity(<4 x double> %a0, <4 x double> %a1) {
; X32-LABEL: combine_vpermil2pd256_identity:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermil2pd256_identity:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps %ymm1, %ymm0
; X64-NEXT: retq
%res0 = call <4 x double> @llvm.x86.xop.vpermil2pd.256(<4 x double> %a1, <4 x double> %a0, <4 x i64> <i64 2, i64 0, i64 2, i64 0>, i8 0)
@@ -44,12 +44,12 @@ define <4 x double> @combine_vpermil2pd256_identity(<4 x double> %a0, <4 x doubl
define <4 x double> @combine_vpermil2pd256_0z73(<4 x double> %a0, <4 x double> %a1) {
; X32-LABEL: combine_vpermil2pd256_0z73:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpermil2pd {{.*#+}} ymm0 = ymm0[0],zero,ymm1[3],ymm0[3]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermil2pd256_0z73:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermil2pd {{.*#+}} ymm0 = ymm0[0],zero,ymm1[3],ymm0[3]
; X64-NEXT: retq
%res0 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 0, i32 undef, i32 7, i32 3>
@@ -59,12 +59,12 @@ define <4 x double> @combine_vpermil2pd256_0z73(<4 x double> %a0, <4 x double> %
define <4 x float> @combine_vpermil2ps_identity(<4 x float> %a0, <4 x float> %a1) {
; X32-LABEL: combine_vpermil2ps_identity:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermil2ps_identity:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps %xmm1, %xmm0
; X64-NEXT: retq
%res0 = call <4 x float> @llvm.x86.xop.vpermil2ps(<4 x float> %a1, <4 x float> %a0, <4 x i32> <i32 3, i32 2, i32 1, i32 0>, i8 0)
@@ -74,14 +74,14 @@ define <4 x float> @combine_vpermil2ps_identity(<4 x float> %a0, <4 x float> %a1
define <4 x float> @combine_vpermil2ps_1z74(<4 x float> %a0, <4 x float> %a1) {
; X32-LABEL: combine_vpermil2ps_1z74:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[3,0]
; X32-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X32-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermil2ps_1z74:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[3,0]
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
@@ -93,12 +93,12 @@ define <4 x float> @combine_vpermil2ps_1z74(<4 x float> %a0, <4 x float> %a1) {
define <4 x float> @combine_vpermil2ps_02zu(<4 x float> %a0, <4 x float> %a1) {
; X32-LABEL: combine_vpermil2ps_02zu:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermil2ps_02zu:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; X64-NEXT: retq
%res0 = call <4 x float> @llvm.x86.xop.vpermil2ps(<4 x float> %a0, <4 x float> zeroinitializer, <4 x i32> <i32 0, i32 2, i32 4, i32 undef>, i8 0)
@@ -107,12 +107,12 @@ define <4 x float> @combine_vpermil2ps_02zu(<4 x float> %a0, <4 x float> %a1) {
define <8 x float> @combine_vpermil2ps256_identity(<8 x float> %a0, <8 x float> %a1) {
; X32-LABEL: combine_vpermil2ps256_identity:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermil2ps256_identity:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps %ymm1, %ymm0
; X64-NEXT: retq
%res0 = call <8 x float> @llvm.x86.xop.vpermil2ps.256(<8 x float> %a1, <8 x float> %a0, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 1, i32 0, i32 3, i32 2>, i8 0)
@@ -122,12 +122,12 @@ define <8 x float> @combine_vpermil2ps256_identity(<8 x float> %a0, <8 x float>
define <8 x float> @combine_vpermil2ps256_08z945Az(<8 x float> %a0, <8 x float> %a1) {
; X32-LABEL: combine_vpermil2ps256_08z945Az:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpermil2ps {{.*#+}} ymm0 = ymm0[0],ymm1[0],zero,ymm1[1],ymm0[4,5],ymm1[6],zero
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermil2ps256_08z945Az:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermil2ps {{.*#+}} ymm0 = ymm0[0],ymm1[0],zero,ymm1[1],ymm0[4,5],ymm1[6],zero
; X64-NEXT: retq
%res0 = call <8 x float> @llvm.x86.xop.vpermil2ps.256(<8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 0, i32 1, i32 6, i32 7>, i8 0)
@@ -137,12 +137,12 @@ define <8 x float> @combine_vpermil2ps256_08z945Az(<8 x float> %a0, <8 x float>
define <8 x float> @combine_vpermil2ps256_zero(<8 x float> %a0, <8 x float> %a1) {
; X32-LABEL: combine_vpermil2ps256_zero:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermil2ps256_zero:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-NEXT: retq
%res0 = call <8 x float> @llvm.x86.xop.vpermil2ps.256(<8 x float> %a1, <8 x float> %a0, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 8, i32 9, i32 10, i32 11>, i8 2)
@@ -151,13 +151,13 @@ define <8 x float> @combine_vpermil2ps256_zero(<8 x float> %a0, <8 x float> %a1)
define <4 x float> @combine_vpermil2ps_blend_with_zero(<4 x float> %a0, <4 x float> %a1) {
; X32-LABEL: combine_vpermil2ps_blend_with_zero:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X32-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermil2ps_blend_with_zero:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; X64-NEXT: retq
@@ -167,12 +167,12 @@ define <4 x float> @combine_vpermil2ps_blend_with_zero(<4 x float> %a0, <4 x flo
define <2 x double> @combine_vpermil2pd_as_shufpd(<2 x double> %a0, <2 x double> %a1) {
; X32-LABEL: combine_vpermil2pd_as_shufpd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermil2pd_as_shufpd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0]
; X64-NEXT: retq
%res0 = call <2 x double> @llvm.x86.xop.vpermil2pd(<2 x double> %a0, <2 x double> %a1, <2 x i64> <i64 2, i64 4>, i8 0)
@@ -181,12 +181,12 @@ define <2 x double> @combine_vpermil2pd_as_shufpd(<2 x double> %a0, <2 x double>
define <4 x double> @combine_vpermil2pd256_as_shufpd(<4 x double> %a0, <4 x double> %a1) {
; X32-LABEL: combine_vpermil2pd256_as_shufpd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[3],ymm1[3]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermil2pd256_as_shufpd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[3],ymm1[3]
; X64-NEXT: retq
%res0 = call <4 x double> @llvm.x86.xop.vpermil2pd.256(<4 x double> %a0, <4 x double> %a1, <4 x i64> <i64 0, i64 4, i64 2, i64 7>, i8 0)
@@ -195,12 +195,12 @@ define <4 x double> @combine_vpermil2pd256_as_shufpd(<4 x double> %a0, <4 x doub
define <16 x i8> @combine_vpperm_identity(<16 x i8> %a0, <16 x i8> %a1) {
; X32-LABEL: combine_vpperm_identity:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: combine_vpperm_identity:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps %xmm1, %xmm0
; X64-NEXT: retq
%res0 = call <16 x i8> @llvm.x86.xop.vpperm(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> <i8 31, i8 30, i8 29, i8 28, i8 27, i8 26, i8 25, i8 24, i8 23, i8 22, i8 21, i8 20, i8 19, i8 18, i8 17, i8 16>)
@@ -210,12 +210,12 @@ define <16 x i8> @combine_vpperm_identity(<16 x i8> %a0, <16 x i8> %a1) {
define <16 x i8> @combine_vpperm_zero(<16 x i8> %a0, <16 x i8> %a1) {
; X32-LABEL: combine_vpperm_zero:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: combine_vpperm_zero:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-NEXT: retq
%res0 = call <16 x i8> @llvm.x86.xop.vpperm(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> <i8 128, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>)
@@ -226,12 +226,12 @@ define <16 x i8> @combine_vpperm_zero(<16 x i8> %a0, <16 x i8> %a1) {
define <16 x i8> @combine_vpperm_identity_bitcast(<16 x i8> %a0, <16 x i8> %a1) {
; X32-LABEL: combine_vpperm_identity_bitcast:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpaddq {{\.LCPI.*}}, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: combine_vpperm_identity_bitcast:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpaddq {{.*}}(%rip), %xmm0, %xmm0
; X64-NEXT: retq
%mask = bitcast <2 x i64> <i64 1084818905618843912, i64 506097522914230528> to <16 x i8>
@@ -245,13 +245,13 @@ define <16 x i8> @combine_vpperm_identity_bitcast(<16 x i8> %a0, <16 x i8> %a1)
define <16 x i8> @combine_vpperm_as_blend_with_zero(<16 x i8> %a0, <16 x i8> %a1) {
; X32-LABEL: combine_vpperm_as_blend_with_zero:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4,5,6,7]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpperm_as_blend_with_zero:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4,5,6,7]
; X64-NEXT: retq
@@ -261,12 +261,12 @@ define <16 x i8> @combine_vpperm_as_blend_with_zero(<16 x i8> %a0, <16 x i8> %a1
define <16 x i8> @combine_vpperm_as_unary_unpckhbw(<16 x i8> %a0, <16 x i8> %a1) {
; X32-LABEL: combine_vpperm_as_unary_unpckhbw:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpperm_as_unary_unpckhbw:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; X64-NEXT: retq
%res0 = call <16 x i8> @llvm.x86.xop.vpperm(<16 x i8> %a0, <16 x i8> %a0, <16 x i8> <i8 8, i8 undef, i8 9, i8 25, i8 10, i8 26, i8 11, i8 27, i8 12, i8 28, i8 13, i8 29, i8 14, i8 30, i8 15, i8 31>)
@@ -275,12 +275,12 @@ define <16 x i8> @combine_vpperm_as_unary_unpckhbw(<16 x i8> %a0, <16 x i8> %a1)
define <16 x i8> @combine_vpperm_as_unpckhbw(<16 x i8> %a0, <16 x i8> %a1) {
; X32-LABEL: combine_vpperm_as_unpckhbw:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpperm_as_unpckhbw:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
; X64-NEXT: retq
%res0 = call <16 x i8> @llvm.x86.xop.vpperm(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> <i8 8, i8 24, i8 9, i8 25, i8 10, i8 26, i8 11, i8 27, i8 12, i8 28, i8 13, i8 29, i8 14, i8 30, i8 15, i8 31>)
@@ -289,12 +289,12 @@ define <16 x i8> @combine_vpperm_as_unpckhbw(<16 x i8> %a0, <16 x i8> %a1) {
define <16 x i8> @combine_vpperm_as_unpcklbw(<16 x i8> %a0, <16 x i8> %a1) {
; X32-LABEL: combine_vpperm_as_unpcklbw:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpperm_as_unpcklbw:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; X64-NEXT: retq
%res0 = call <16 x i8> @llvm.x86.xop.vpperm(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> <i8 16, i8 0, i8 17, i8 1, i8 18, i8 2, i8 19, i8 3, i8 20, i8 4, i8 21, i8 5, i8 22, i8 6, i8 23, i8 7>)
@@ -303,12 +303,12 @@ define <16 x i8> @combine_vpperm_as_unpcklbw(<16 x i8> %a0, <16 x i8> %a1) {
define <4 x i32> @combine_vpperm_10zz32BA(<4 x i32> %a0, <4 x i32> %a1) {
; X32-LABEL: combine_vpperm_10zz32BA:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpperm {{.*#+}} xmm0 = xmm0[2,3,0,1],zero,zero,zero,zero,xmm0[6,7,4,5],xmm1[6,7,4,5]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpperm_10zz32BA:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpperm {{.*#+}} xmm0 = xmm0[2,3,0,1],zero,zero,zero,zero,xmm0[6,7,4,5],xmm1[6,7,4,5]
; X64-NEXT: retq
%res0 = shufflevector <4 x i32> %a0, <4 x i32> %a1, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
@@ -321,7 +321,7 @@ define <4 x i32> @combine_vpperm_10zz32BA(<4 x i32> %a0, <4 x i32> %a1) {
; FIXME: Duplicated load in i686
define void @buildvector_v4f32_0404(float %a, float %b, <4 x float>* %ptr) {
; X32-LABEL: buildvector_v4f32_0404:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
@@ -330,7 +330,7 @@ define void @buildvector_v4f32_0404(float %a, float %b, <4 x float>* %ptr) {
; X32-NEXT: retl
;
; X64-LABEL: buildvector_v4f32_0404:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermil2ps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[0],xmm1[0]
; X64-NEXT: vmovaps %xmm0, (%rdi)
; X64-NEXT: retq
@@ -344,7 +344,7 @@ define void @buildvector_v4f32_0404(float %a, float %b, <4 x float>* %ptr) {
define void @buildvector_v4f32_07z6(float %a, <4 x float> %b, <4 x float>* %ptr) {
; X32-LABEL: buildvector_v4f32_07z6:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: vpermil2ps {{.*#+}} xmm0 = xmm1[0],xmm0[3],zero,xmm0[2]
@@ -352,7 +352,7 @@ define void @buildvector_v4f32_07z6(float %a, <4 x float> %b, <4 x float>* %ptr)
; X32-NEXT: retl
;
; X64-LABEL: buildvector_v4f32_07z6:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermil2ps {{.*#+}} xmm0 = xmm0[0],xmm1[3],zero,xmm1[2]
; X64-NEXT: vmovaps %xmm0, (%rdi)
; X64-NEXT: retq
@@ -368,12 +368,12 @@ define void @buildvector_v4f32_07z6(float %a, <4 x float> %b, <4 x float>* %ptr)
define <2 x double> @constant_fold_vpermil2pd() {
; X32-LABEL: constant_fold_vpermil2pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps {{.*#+}} xmm0 = [-2.000000e+00,2.000000e+00]
; X32-NEXT: retl
;
; X64-LABEL: constant_fold_vpermil2pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} xmm0 = [-2.000000e+00,2.000000e+00]
; X64-NEXT: retq
%1 = call <2 x double> @llvm.x86.xop.vpermil2pd(<2 x double> <double 1.0, double 2.0>, <2 x double> <double -2.0, double -1.0>, <2 x i64> <i64 4, i64 2>, i8 2)
@@ -382,12 +382,12 @@ define <2 x double> @constant_fold_vpermil2pd() {
define <4 x double> @constant_fold_vpermil2pd_256() {
; X32-LABEL: constant_fold_vpermil2pd_256:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps {{.*#+}} ymm0 = [-4.000000e+00,0.000000e+00,4.000000e+00,3.000000e+00]
; X32-NEXT: retl
;
; X64-LABEL: constant_fold_vpermil2pd_256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} ymm0 = [-4.000000e+00,0.000000e+00,4.000000e+00,3.000000e+00]
; X64-NEXT: retq
%1 = call <4 x double> @llvm.x86.xop.vpermil2pd.256(<4 x double> <double 1.0, double 2.0, double 3.0, double 4.0>, <4 x double> <double -4.0, double -3.0, double -2.0, double -1.0>, <4 x i64> <i64 4, i64 8, i64 2, i64 0>, i8 2)
@@ -396,12 +396,12 @@ define <4 x double> @constant_fold_vpermil2pd_256() {
define <4 x float> @constant_fold_vpermil2ps() {
; X32-LABEL: constant_fold_vpermil2ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps {{.*#+}} xmm0 = [-4.000000e+00,1.000000e+00,3.000000e+00,0.000000e+00]
; X32-NEXT: retl
;
; X64-LABEL: constant_fold_vpermil2ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} xmm0 = [-4.000000e+00,1.000000e+00,3.000000e+00,0.000000e+00]
; X64-NEXT: retq
%1 = call <4 x float> @llvm.x86.xop.vpermil2ps(<4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, <4 x float> <float -4.0, float -3.0, float -2.0, float -1.0>, <4 x i32> <i32 4, i32 0, i32 2, i32 8>, i8 2)
@@ -410,12 +410,12 @@ define <4 x float> @constant_fold_vpermil2ps() {
define <8 x float> @constant_fold_vpermil2ps_256() {
; X32-LABEL: constant_fold_vpermil2ps_256:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps {{.*#+}} ymm0 = [-8.000000e+00,1.000000e+00,3.000000e+00,0.000000e+00,5.000000e+00,0.000000e+00,5.000000e+00,7.000000e+00]
; X32-NEXT: retl
;
; X64-LABEL: constant_fold_vpermil2ps_256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} ymm0 = [-8.000000e+00,1.000000e+00,3.000000e+00,0.000000e+00,5.000000e+00,0.000000e+00,5.000000e+00,7.000000e+00]
; X64-NEXT: retq
%1 = call <8 x float> @llvm.x86.xop.vpermil2ps.256(<8 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0>, <8 x float> <float -8.0, float -7.0, float -6.0, float -5.0, float -4.0, float -3.0, float -2.0, float -1.0>, <8 x i32> <i32 4, i32 0, i32 2, i32 8, i32 0, i32 8, i32 0, i32 2>, i8 2)
@@ -424,12 +424,12 @@ define <8 x float> @constant_fold_vpermil2ps_256() {
define <16 x i8> @constant_fold_vpperm() {
; X32-LABEL: constant_fold_vpperm:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovaps {{.*#+}} xmm0 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; X32-NEXT: retl
;
; X64-LABEL: constant_fold_vpperm:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} xmm0 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; X64-NEXT: retq
%1 = call <16 x i8> @llvm.x86.xop.vpperm(<16 x i8> <i8 0, i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15>, <16 x i8> <i8 15, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>, <16 x i8> <i8 31, i8 30, i8 29, i8 28, i8 27, i8 26, i8 25, i8 24, i8 23, i8 22, i8 21, i8 20, i8 19, i8 18, i8 17, i8 16>)
@@ -438,14 +438,14 @@ define <16 x i8> @constant_fold_vpperm() {
define <4 x float> @PR31296(i8* %in) {
; X32-LABEL: PR31296:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],zero,zero,mem[0]
; X32-NEXT: retl
;
; X64-LABEL: PR31296:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movl (%rdi), %eax
; X64-NEXT: vmovq %rax, %xmm0
; X64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],zero,zero,mem[0]
diff --git a/test/CodeGen/X86/vector-shuffle-combining.ll b/test/CodeGen/X86/vector-shuffle-combining.ll
index 6eacd7dd4ce..bfee2ddacdb 100644
--- a/test/CodeGen/X86/vector-shuffle-combining.ll
+++ b/test/CodeGen/X86/vector-shuffle-combining.ll
@@ -18,7 +18,7 @@ declare <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16>, i8)
define <4 x i32> @combine_pshufd1(<4 x i32> %a) {
; ALL-LABEL: combine_pshufd1:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: retq
entry:
%b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 27)
@@ -28,7 +28,7 @@ entry:
define <4 x i32> @combine_pshufd2(<4 x i32> %a) {
; ALL-LABEL: combine_pshufd2:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: retq
entry:
%b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 27)
@@ -41,7 +41,7 @@ entry:
define <4 x i32> @combine_pshufd3(<4 x i32> %a) {
; ALL-LABEL: combine_pshufd3:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: retq
entry:
%b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 27)
@@ -54,12 +54,12 @@ entry:
define <4 x i32> @combine_pshufd4(<4 x i32> %a) {
; SSE-LABEL: combine_pshufd4:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_pshufd4:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
; AVX-NEXT: retq
entry:
@@ -73,12 +73,12 @@ entry:
define <4 x i32> @combine_pshufd5(<4 x i32> %a) {
; SSE-LABEL: combine_pshufd5:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_pshufd5:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
; AVX-NEXT: retq
entry:
@@ -92,17 +92,17 @@ entry:
define <4 x i32> @combine_pshufd6(<4 x i32> %a) {
; SSE-LABEL: combine_pshufd6:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE-NEXT: retq
;
; AVX1-LABEL: combine_pshufd6:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0]
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_pshufd6:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vbroadcastss %xmm0, %xmm0
; AVX2-NEXT: retq
entry:
@@ -113,7 +113,7 @@ entry:
define <8 x i16> @combine_pshuflw1(<8 x i16> %a) {
; ALL-LABEL: combine_pshuflw1:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: retq
entry:
%b = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %a, i8 27)
@@ -123,7 +123,7 @@ entry:
define <8 x i16> @combine_pshuflw2(<8 x i16> %a) {
; ALL-LABEL: combine_pshuflw2:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: retq
entry:
%b = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %a, i8 27)
@@ -134,12 +134,12 @@ entry:
define <8 x i16> @combine_pshuflw3(<8 x i16> %a) {
; SSE-LABEL: combine_pshuflw3:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_pshuflw3:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
; AVX-NEXT: retq
entry:
@@ -151,12 +151,12 @@ entry:
define <8 x i16> @combine_pshufhw1(<8 x i16> %a) {
; SSE-LABEL: combine_pshufhw1:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_pshufhw1:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
; AVX-NEXT: retq
entry:
@@ -168,13 +168,13 @@ entry:
define <4 x i32> @combine_bitwise_ops_test1(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
; SSE-LABEL: combine_bitwise_ops_test1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pand %xmm1, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_bitwise_ops_test1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,1,3]
; AVX-NEXT: retq
@@ -186,13 +186,13 @@ define <4 x i32> @combine_bitwise_ops_test1(<4 x i32> %a, <4 x i32> %b, <4 x i32
define <4 x i32> @combine_bitwise_ops_test2(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
; SSE-LABEL: combine_bitwise_ops_test2:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: por %xmm1, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_bitwise_ops_test2:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,1,3]
; AVX-NEXT: retq
@@ -204,13 +204,13 @@ define <4 x i32> @combine_bitwise_ops_test2(<4 x i32> %a, <4 x i32> %b, <4 x i32
define <4 x i32> @combine_bitwise_ops_test3(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
; SSE-LABEL: combine_bitwise_ops_test3:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm1, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_bitwise_ops_test3:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,1,3]
; AVX-NEXT: retq
@@ -222,13 +222,13 @@ define <4 x i32> @combine_bitwise_ops_test3(<4 x i32> %a, <4 x i32> %b, <4 x i32
define <4 x i32> @combine_bitwise_ops_test4(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
; SSE-LABEL: combine_bitwise_ops_test4:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pand %xmm1, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_bitwise_ops_test4:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,1,3]
; AVX-NEXT: retq
@@ -240,13 +240,13 @@ define <4 x i32> @combine_bitwise_ops_test4(<4 x i32> %a, <4 x i32> %b, <4 x i32
define <4 x i32> @combine_bitwise_ops_test5(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
; SSE-LABEL: combine_bitwise_ops_test5:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: por %xmm1, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_bitwise_ops_test5:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,1,3]
; AVX-NEXT: retq
@@ -258,13 +258,13 @@ define <4 x i32> @combine_bitwise_ops_test5(<4 x i32> %a, <4 x i32> %b, <4 x i32
define <4 x i32> @combine_bitwise_ops_test6(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
; SSE-LABEL: combine_bitwise_ops_test6:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm1, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_bitwise_ops_test6:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,1,3]
; AVX-NEXT: retq
@@ -280,7 +280,7 @@ define <4 x i32> @combine_bitwise_ops_test6(<4 x i32> %a, <4 x i32> %b, <4 x i32
define <4 x i32> @combine_bitwise_ops_test1b(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
; SSE2-LABEL: combine_bitwise_ops_test1b:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pand %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3]
@@ -288,7 +288,7 @@ define <4 x i32> @combine_bitwise_ops_test1b(<4 x i32> %a, <4 x i32> %b, <4 x i3
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_bitwise_ops_test1b:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pand %xmm1, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3]
@@ -296,19 +296,19 @@ define <4 x i32> @combine_bitwise_ops_test1b(<4 x i32> %a, <4 x i32> %b, <4 x i3
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_bitwise_ops_test1b:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pand %xmm1, %xmm0
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: combine_bitwise_ops_test1b:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_bitwise_ops_test1b:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vandps %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; AVX2-NEXT: retq
@@ -320,7 +320,7 @@ define <4 x i32> @combine_bitwise_ops_test1b(<4 x i32> %a, <4 x i32> %b, <4 x i3
define <4 x i32> @combine_bitwise_ops_test2b(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
; SSE2-LABEL: combine_bitwise_ops_test2b:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3]
@@ -328,7 +328,7 @@ define <4 x i32> @combine_bitwise_ops_test2b(<4 x i32> %a, <4 x i32> %b, <4 x i3
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_bitwise_ops_test2b:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: por %xmm1, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3]
@@ -336,19 +336,19 @@ define <4 x i32> @combine_bitwise_ops_test2b(<4 x i32> %a, <4 x i32> %b, <4 x i3
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_bitwise_ops_test2b:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: por %xmm1, %xmm0
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: combine_bitwise_ops_test2b:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_bitwise_ops_test2b:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vorps %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; AVX2-NEXT: retq
@@ -360,33 +360,33 @@ define <4 x i32> @combine_bitwise_ops_test2b(<4 x i32> %a, <4 x i32> %b, <4 x i3
define <4 x i32> @combine_bitwise_ops_test3b(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
; SSE2-LABEL: combine_bitwise_ops_test3b:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: xorps %xmm1, %xmm0
; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_bitwise_ops_test3b:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: xorps %xmm1, %xmm0
; SSSE3-NEXT: andps {{.*}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_bitwise_ops_test3b:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm1, %xmm0
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: combine_bitwise_ops_test3b:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_bitwise_ops_test3b:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vxorps %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
@@ -399,7 +399,7 @@ define <4 x i32> @combine_bitwise_ops_test3b(<4 x i32> %a, <4 x i32> %b, <4 x i3
define <4 x i32> @combine_bitwise_ops_test4b(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
; SSE2-LABEL: combine_bitwise_ops_test4b:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pand %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
@@ -407,7 +407,7 @@ define <4 x i32> @combine_bitwise_ops_test4b(<4 x i32> %a, <4 x i32> %b, <4 x i3
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_bitwise_ops_test4b:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pand %xmm1, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
@@ -415,19 +415,19 @@ define <4 x i32> @combine_bitwise_ops_test4b(<4 x i32> %a, <4 x i32> %b, <4 x i3
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_bitwise_ops_test4b:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pand %xmm1, %xmm0
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3],xmm2[4,5],xmm0[6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: combine_bitwise_ops_test4b:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3],xmm2[4,5],xmm0[6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_bitwise_ops_test4b:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vandps %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2],xmm0[3]
; AVX2-NEXT: retq
@@ -439,7 +439,7 @@ define <4 x i32> @combine_bitwise_ops_test4b(<4 x i32> %a, <4 x i32> %b, <4 x i3
define <4 x i32> @combine_bitwise_ops_test5b(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
; SSE2-LABEL: combine_bitwise_ops_test5b:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
@@ -447,7 +447,7 @@ define <4 x i32> @combine_bitwise_ops_test5b(<4 x i32> %a, <4 x i32> %b, <4 x i3
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_bitwise_ops_test5b:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: por %xmm1, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
@@ -455,19 +455,19 @@ define <4 x i32> @combine_bitwise_ops_test5b(<4 x i32> %a, <4 x i32> %b, <4 x i3
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_bitwise_ops_test5b:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: por %xmm1, %xmm0
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3],xmm2[4,5],xmm0[6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: combine_bitwise_ops_test5b:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3],xmm2[4,5],xmm0[6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_bitwise_ops_test5b:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vorps %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2],xmm0[3]
; AVX2-NEXT: retq
@@ -479,33 +479,33 @@ define <4 x i32> @combine_bitwise_ops_test5b(<4 x i32> %a, <4 x i32> %b, <4 x i3
define <4 x i32> @combine_bitwise_ops_test6b(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
; SSE2-LABEL: combine_bitwise_ops_test6b:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: xorps %xmm1, %xmm0
; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_bitwise_ops_test6b:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: xorps %xmm1, %xmm0
; SSSE3-NEXT: andps {{.*}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_bitwise_ops_test6b:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm1, %xmm0
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: combine_bitwise_ops_test6b:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_bitwise_ops_test6b:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vxorps %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
@@ -518,13 +518,13 @@ define <4 x i32> @combine_bitwise_ops_test6b(<4 x i32> %a, <4 x i32> %b, <4 x i3
define <4 x i32> @combine_bitwise_ops_test1c(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
; SSE-LABEL: combine_bitwise_ops_test1c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: andps %xmm1, %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[1,3]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_bitwise_ops_test1c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[1,3]
; AVX-NEXT: retq
@@ -536,13 +536,13 @@ define <4 x i32> @combine_bitwise_ops_test1c(<4 x i32> %a, <4 x i32> %b, <4 x i3
define <4 x i32> @combine_bitwise_ops_test2c(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
; SSE-LABEL: combine_bitwise_ops_test2c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: orps %xmm1, %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[1,3]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_bitwise_ops_test2c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[1,3]
; AVX-NEXT: retq
@@ -554,27 +554,27 @@ define <4 x i32> @combine_bitwise_ops_test2c(<4 x i32> %a, <4 x i32> %b, <4 x i3
define <4 x i32> @combine_bitwise_ops_test3c(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
; SSE2-LABEL: combine_bitwise_ops_test3c:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: xorps %xmm1, %xmm0
; SSE2-NEXT: xorps %xmm1, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[2,3]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_bitwise_ops_test3c:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: xorps %xmm1, %xmm0
; SSSE3-NEXT: xorps %xmm1, %xmm1
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[2,3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_bitwise_ops_test3c:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: xorps %xmm1, %xmm0
; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; SSE41-NEXT: retq
;
; AVX-LABEL: combine_bitwise_ops_test3c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm1, %xmm0, %xmm0
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; AVX-NEXT: retq
@@ -586,14 +586,14 @@ define <4 x i32> @combine_bitwise_ops_test3c(<4 x i32> %a, <4 x i32> %b, <4 x i3
define <4 x i32> @combine_bitwise_ops_test4c(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
; SSE-LABEL: combine_bitwise_ops_test4c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: andps %xmm1, %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm0[1,3]
; SSE-NEXT: movaps %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_bitwise_ops_test4c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm2[0,2],xmm0[1,3]
; AVX-NEXT: retq
@@ -605,14 +605,14 @@ define <4 x i32> @combine_bitwise_ops_test4c(<4 x i32> %a, <4 x i32> %b, <4 x i3
define <4 x i32> @combine_bitwise_ops_test5c(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
; SSE-LABEL: combine_bitwise_ops_test5c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: orps %xmm1, %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm0[1,3]
; SSE-NEXT: movaps %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_bitwise_ops_test5c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm2[0,2],xmm0[1,3]
; AVX-NEXT: retq
@@ -624,7 +624,7 @@ define <4 x i32> @combine_bitwise_ops_test5c(<4 x i32> %a, <4 x i32> %b, <4 x i3
define <4 x i32> @combine_bitwise_ops_test6c(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
; SSE2-LABEL: combine_bitwise_ops_test6c:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: xorps %xmm1, %xmm0
; SSE2-NEXT: xorps %xmm1, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[1,3]
@@ -632,7 +632,7 @@ define <4 x i32> @combine_bitwise_ops_test6c(<4 x i32> %a, <4 x i32> %b, <4 x i3
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_bitwise_ops_test6c:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: xorps %xmm1, %xmm0
; SSSE3-NEXT: xorps %xmm1, %xmm1
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[1,3]
@@ -640,13 +640,13 @@ define <4 x i32> @combine_bitwise_ops_test6c(<4 x i32> %a, <4 x i32> %b, <4 x i3
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_bitwise_ops_test6c:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: xorps %xmm1, %xmm0
; SSE41-NEXT: insertps {{.*#+}} xmm0 = zero,zero,xmm0[1,3]
; SSE41-NEXT: retq
;
; AVX-LABEL: combine_bitwise_ops_test6c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm1, %xmm0, %xmm0
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = zero,zero,xmm0[1,3]
; AVX-NEXT: retq
@@ -658,12 +658,12 @@ define <4 x i32> @combine_bitwise_ops_test6c(<4 x i32> %a, <4 x i32> %b, <4 x i3
define <4 x i32> @combine_nested_undef_test1(<4 x i32> %A, <4 x i32> %B) {
; SSE-LABEL: combine_nested_undef_test1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,0,1]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_nested_undef_test1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,0,1]
; AVX-NEXT: retq
%1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 4, i32 3, i32 1>
@@ -673,12 +673,12 @@ define <4 x i32> @combine_nested_undef_test1(<4 x i32> %A, <4 x i32> %B) {
define <4 x i32> @combine_nested_undef_test2(<4 x i32> %A, <4 x i32> %B) {
; SSE-LABEL: combine_nested_undef_test2:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,0,3]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_nested_undef_test2:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,1,0,3]
; AVX-NEXT: retq
%1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 5, i32 2, i32 3>
@@ -688,12 +688,12 @@ define <4 x i32> @combine_nested_undef_test2(<4 x i32> %A, <4 x i32> %B) {
define <4 x i32> @combine_nested_undef_test3(<4 x i32> %A, <4 x i32> %B) {
; SSE-LABEL: combine_nested_undef_test3:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,0,3]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_nested_undef_test3:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,1,0,3]
; AVX-NEXT: retq
%1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 6, i32 2, i32 3>
@@ -703,17 +703,17 @@ define <4 x i32> @combine_nested_undef_test3(<4 x i32> %A, <4 x i32> %B) {
define <4 x i32> @combine_nested_undef_test4(<4 x i32> %A, <4 x i32> %B) {
; SSE-LABEL: combine_nested_undef_test4:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; SSE-NEXT: retq
;
; AVX1-LABEL: combine_nested_undef_test4:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,0,1]
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_nested_undef_test4:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0
; AVX2-NEXT: retq
%1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 4, i32 7, i32 1>
@@ -723,12 +723,12 @@ define <4 x i32> @combine_nested_undef_test4(<4 x i32> %A, <4 x i32> %B) {
define <4 x i32> @combine_nested_undef_test5(<4 x i32> %A, <4 x i32> %B) {
; SSE-LABEL: combine_nested_undef_test5:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_nested_undef_test5:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,2,3]
; AVX-NEXT: retq
%1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 5, i32 5, i32 2, i32 3>
@@ -738,12 +738,12 @@ define <4 x i32> @combine_nested_undef_test5(<4 x i32> %A, <4 x i32> %B) {
define <4 x i32> @combine_nested_undef_test6(<4 x i32> %A, <4 x i32> %B) {
; SSE-LABEL: combine_nested_undef_test6:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_nested_undef_test6:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX-NEXT: retq
%1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 6, i32 2, i32 4>
@@ -753,12 +753,12 @@ define <4 x i32> @combine_nested_undef_test6(<4 x i32> %A, <4 x i32> %B) {
define <4 x i32> @combine_nested_undef_test7(<4 x i32> %A, <4 x i32> %B) {
; SSE-LABEL: combine_nested_undef_test7:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,0,2]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_nested_undef_test7:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,0,2]
; AVX-NEXT: retq
%1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
@@ -768,12 +768,12 @@ define <4 x i32> @combine_nested_undef_test7(<4 x i32> %A, <4 x i32> %B) {
define <4 x i32> @combine_nested_undef_test8(<4 x i32> %A, <4 x i32> %B) {
; SSE-LABEL: combine_nested_undef_test8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_nested_undef_test8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,1,3,3]
; AVX-NEXT: retq
%1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
@@ -783,12 +783,12 @@ define <4 x i32> @combine_nested_undef_test8(<4 x i32> %A, <4 x i32> %B) {
define <4 x i32> @combine_nested_undef_test9(<4 x i32> %A, <4 x i32> %B) {
; SSE-LABEL: combine_nested_undef_test9:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,2]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_nested_undef_test9:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,3,2,2]
; AVX-NEXT: retq
%1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 1, i32 3, i32 2, i32 5>
@@ -798,12 +798,12 @@ define <4 x i32> @combine_nested_undef_test9(<4 x i32> %A, <4 x i32> %B) {
define <4 x i32> @combine_nested_undef_test10(<4 x i32> %A, <4 x i32> %B) {
; SSE-LABEL: combine_nested_undef_test10:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,3]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_nested_undef_test10:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,1,1,3]
; AVX-NEXT: retq
%1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 1, i32 1, i32 5, i32 5>
@@ -813,12 +813,12 @@ define <4 x i32> @combine_nested_undef_test10(<4 x i32> %A, <4 x i32> %B) {
define <4 x i32> @combine_nested_undef_test11(<4 x i32> %A, <4 x i32> %B) {
; SSE-LABEL: combine_nested_undef_test11:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,1]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_nested_undef_test11:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,1,2,1]
; AVX-NEXT: retq
%1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 1, i32 2, i32 5, i32 4>
@@ -828,17 +828,17 @@ define <4 x i32> @combine_nested_undef_test11(<4 x i32> %A, <4 x i32> %B) {
define <4 x i32> @combine_nested_undef_test12(<4 x i32> %A, <4 x i32> %B) {
; SSE-LABEL: combine_nested_undef_test12:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; SSE-NEXT: retq
;
; AVX1-LABEL: combine_nested_undef_test12:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,0,1]
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_nested_undef_test12:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0
; AVX2-NEXT: retq
%1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 0, i32 2, i32 4>
@@ -849,7 +849,7 @@ define <4 x i32> @combine_nested_undef_test12(<4 x i32> %A, <4 x i32> %B) {
; The following pair of shuffles is folded into vector %A.
define <4 x i32> @combine_nested_undef_test13(<4 x i32> %A, <4 x i32> %B) {
; ALL-LABEL: combine_nested_undef_test13:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: retq
%1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 1, i32 4, i32 2, i32 6>
%2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 4, i32 0, i32 2, i32 4>
@@ -859,12 +859,12 @@ define <4 x i32> @combine_nested_undef_test13(<4 x i32> %A, <4 x i32> %B) {
; The following pair of shuffles is folded into vector %B.
define <4 x i32> @combine_nested_undef_test14(<4 x i32> %A, <4 x i32> %B) {
; SSE-LABEL: combine_nested_undef_test14:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_nested_undef_test14:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps %xmm1, %xmm0
; AVX-NEXT: retq
%1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 6, i32 2, i32 4>
@@ -881,35 +881,35 @@ define <4 x i32> @combine_nested_undef_test14(<4 x i32> %A, <4 x i32> %B) {
define <4 x i32> @combine_nested_undef_test15(<4 x i32> %A, <4 x i32> %B) {
; SSE2-LABEL: combine_nested_undef_test15:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[0,1]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_nested_undef_test15:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[0,1]
; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_nested_undef_test15:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,0,1]
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: combine_nested_undef_test15:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,1,0,1]
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_nested_undef_test15:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vbroadcastss %xmm1, %xmm1
; AVX2-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,0,1]
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
@@ -921,33 +921,33 @@ define <4 x i32> @combine_nested_undef_test15(<4 x i32> %A, <4 x i32> %B) {
define <4 x i32> @combine_nested_undef_test16(<4 x i32> %A, <4 x i32> %B) {
; SSE2-LABEL: combine_nested_undef_test16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,0,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_nested_undef_test16:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,0,2,3]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_nested_undef_test16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: combine_nested_undef_test16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_nested_undef_test16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; AVX2-NEXT: retq
@@ -958,31 +958,31 @@ define <4 x i32> @combine_nested_undef_test16(<4 x i32> %A, <4 x i32> %B) {
define <4 x i32> @combine_nested_undef_test17(<4 x i32> %A, <4 x i32> %B) {
; SSE2-LABEL: combine_nested_undef_test17:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[1,0]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm1[0,2]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_nested_undef_test17:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[1,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm1[0,2]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_nested_undef_test17:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,0,1]
; SSE41-NEXT: retq
;
; AVX1-LABEL: combine_nested_undef_test17:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,1,0,1]
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_nested_undef_test17:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; AVX2-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,0,1]
; AVX2-NEXT: retq
@@ -993,12 +993,12 @@ define <4 x i32> @combine_nested_undef_test17(<4 x i32> %A, <4 x i32> %B) {
define <4 x i32> @combine_nested_undef_test18(<4 x i32> %A, <4 x i32> %B) {
; SSE-LABEL: combine_nested_undef_test18:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,0,3]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_nested_undef_test18:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm1[1,1,0,3]
; AVX-NEXT: retq
%1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 4, i32 5, i32 2, i32 7>
@@ -1008,31 +1008,31 @@ define <4 x i32> @combine_nested_undef_test18(<4 x i32> %A, <4 x i32> %B) {
define <4 x i32> @combine_nested_undef_test19(<4 x i32> %A, <4 x i32> %B) {
; SSE2-LABEL: combine_nested_undef_test19:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,0,0,0]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_nested_undef_test19:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,0,0,0]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_nested_undef_test19:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,0,0,0]
; SSE41-NEXT: retq
;
; AVX1-LABEL: combine_nested_undef_test19:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,0,0]
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_nested_undef_test19:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
; AVX2-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,0,0,0]
; AVX2-NEXT: retq
@@ -1043,33 +1043,33 @@ define <4 x i32> @combine_nested_undef_test19(<4 x i32> %A, <4 x i32> %B) {
define <4 x i32> @combine_nested_undef_test20(<4 x i32> %A, <4 x i32> %B) {
; SSE2-LABEL: combine_nested_undef_test20:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,3]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2,3,1]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_nested_undef_test20:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,3]
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2,3,1]
; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_nested_undef_test20:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,3,0]
; SSE41-NEXT: retq
;
; AVX1-LABEL: combine_nested_undef_test20:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,3,0]
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_nested_undef_test20:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
; AVX2-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,3,0]
; AVX2-NEXT: retq
@@ -1080,31 +1080,31 @@ define <4 x i32> @combine_nested_undef_test20(<4 x i32> %A, <4 x i32> %B) {
define <4 x i32> @combine_nested_undef_test21(<4 x i32> %A, <4 x i32> %B) {
; SSE2-LABEL: combine_nested_undef_test21:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,3,0,3]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_nested_undef_test21:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,3,0,3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_nested_undef_test21:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; SSE41-NEXT: retq
;
; AVX1-LABEL: combine_nested_undef_test21:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_nested_undef_test21:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0
; AVX2-NEXT: retq
@@ -1119,12 +1119,12 @@ define <4 x i32> @combine_nested_undef_test21(<4 x i32> %A, <4 x i32> %B) {
define <4 x i32> @combine_nested_undef_test22(<4 x i32> %A, <4 x i32> %B) {
; SSE-LABEL: combine_nested_undef_test22:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,3]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_nested_undef_test22:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm1[1,1,1,3]
; AVX-NEXT: retq
%1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 4, i32 5, i32 2, i32 7>
@@ -1134,12 +1134,12 @@ define <4 x i32> @combine_nested_undef_test22(<4 x i32> %A, <4 x i32> %B) {
define <4 x i32> @combine_nested_undef_test23(<4 x i32> %A, <4 x i32> %B) {
; SSE-LABEL: combine_nested_undef_test23:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,1,0,3]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_nested_undef_test23:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm1[0,1,0,3]
; AVX-NEXT: retq
%1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 4, i32 5, i32 2, i32 7>
@@ -1149,12 +1149,12 @@ define <4 x i32> @combine_nested_undef_test23(<4 x i32> %A, <4 x i32> %B) {
define <4 x i32> @combine_nested_undef_test24(<4 x i32> %A, <4 x i32> %B) {
; SSE-LABEL: combine_nested_undef_test24:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,3,2,3]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_nested_undef_test24:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm1[0,3,2,3]
; AVX-NEXT: retq
%1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 4, i32 1, i32 6, i32 7>
@@ -1164,17 +1164,17 @@ define <4 x i32> @combine_nested_undef_test24(<4 x i32> %A, <4 x i32> %B) {
define <4 x i32> @combine_nested_undef_test25(<4 x i32> %A, <4 x i32> %B) {
; SSE-LABEL: combine_nested_undef_test25:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; SSE-NEXT: retq
;
; AVX1-LABEL: combine_nested_undef_test25:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,0,1]
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_nested_undef_test25:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0
; AVX2-NEXT: retq
%1 = shufflevector <4 x i32> %B, <4 x i32> %A, <4 x i32> <i32 1, i32 5, i32 2, i32 4>
@@ -1184,12 +1184,12 @@ define <4 x i32> @combine_nested_undef_test25(<4 x i32> %A, <4 x i32> %B) {
define <4 x i32> @combine_nested_undef_test26(<4 x i32> %A, <4 x i32> %B) {
; SSE-LABEL: combine_nested_undef_test26:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_nested_undef_test26:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,2,3]
; AVX-NEXT: retq
%1 = shufflevector <4 x i32> %B, <4 x i32> %A, <4 x i32> <i32 1, i32 2, i32 6, i32 7>
@@ -1199,17 +1199,17 @@ define <4 x i32> @combine_nested_undef_test26(<4 x i32> %A, <4 x i32> %B) {
define <4 x i32> @combine_nested_undef_test27(<4 x i32> %A, <4 x i32> %B) {
; SSE-LABEL: combine_nested_undef_test27:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; SSE-NEXT: retq
;
; AVX1-LABEL: combine_nested_undef_test27:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,0,1]
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_nested_undef_test27:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0
; AVX2-NEXT: retq
%1 = shufflevector <4 x i32> %B, <4 x i32> %A, <4 x i32> <i32 2, i32 1, i32 5, i32 4>
@@ -1219,12 +1219,12 @@ define <4 x i32> @combine_nested_undef_test27(<4 x i32> %A, <4 x i32> %B) {
define <4 x i32> @combine_nested_undef_test28(<4 x i32> %A, <4 x i32> %B) {
; SSE-LABEL: combine_nested_undef_test28:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,0]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_nested_undef_test28:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,0]
; AVX-NEXT: retq
%1 = shufflevector <4 x i32> %B, <4 x i32> %A, <4 x i32> <i32 1, i32 2, i32 4, i32 5>
@@ -1234,12 +1234,12 @@ define <4 x i32> @combine_nested_undef_test28(<4 x i32> %A, <4 x i32> %B) {
define <4 x float> @combine_test1(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: combine_test1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_test1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps %xmm1, %xmm0
; AVX-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
@@ -1249,24 +1249,24 @@ define <4 x float> @combine_test1(<4 x float> %a, <4 x float> %b) {
define <4 x float> @combine_test2(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: combine_test2:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_test2:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_test2:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; SSE41-NEXT: retq
;
; AVX-LABEL: combine_test2:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
@@ -1276,12 +1276,12 @@ define <4 x float> @combine_test2(<4 x float> %a, <4 x float> %b) {
define <4 x float> @combine_test3(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: combine_test3:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_test3:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 1, i32 7>
@@ -1291,12 +1291,12 @@ define <4 x float> @combine_test3(<4 x float> %a, <4 x float> %b) {
define <4 x float> @combine_test4(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: combine_test4:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm1[1],xmm0[1]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_test4:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1]
; AVX-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 2, i32 3, i32 5, i32 5>
@@ -1306,24 +1306,24 @@ define <4 x float> @combine_test4(<4 x float> %a, <4 x float> %b) {
define <4 x float> @combine_test5(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: combine_test5:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_test5:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_test5:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
; SSE41-NEXT: retq
;
; AVX-LABEL: combine_test5:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
; AVX-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
@@ -1333,12 +1333,12 @@ define <4 x float> @combine_test5(<4 x float> %a, <4 x float> %b) {
define <4 x i32> @combine_test6(<4 x i32> %a, <4 x i32> %b) {
; SSE-LABEL: combine_test6:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_test6:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps %xmm1, %xmm0
; AVX-NEXT: retq
%1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
@@ -1348,29 +1348,29 @@ define <4 x i32> @combine_test6(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @combine_test7(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: combine_test7:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_test7:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_test7:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: combine_test7:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_test7:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX2-NEXT: retq
%1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
@@ -1380,12 +1380,12 @@ define <4 x i32> @combine_test7(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @combine_test8(<4 x i32> %a, <4 x i32> %b) {
; SSE-LABEL: combine_test8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_test8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-NEXT: retq
%1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 5, i32 1, i32 7>
@@ -1395,13 +1395,13 @@ define <4 x i32> @combine_test8(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @combine_test9(<4 x i32> %a, <4 x i32> %b) {
; SSE-LABEL: combine_test9:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_test9:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1]
; AVX-NEXT: retq
%1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 2, i32 3, i32 5, i32 5>
@@ -1411,29 +1411,29 @@ define <4 x i32> @combine_test9(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @combine_test10(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: combine_test10:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_test10:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_test10:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: combine_test10:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_test10:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
; AVX2-NEXT: retq
%1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
@@ -1443,7 +1443,7 @@ define <4 x i32> @combine_test10(<4 x i32> %a, <4 x i32> %b) {
define <4 x float> @combine_test11(<4 x float> %a, <4 x float> %b) {
; ALL-LABEL: combine_test11:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
%2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
@@ -1452,24 +1452,24 @@ define <4 x float> @combine_test11(<4 x float> %a, <4 x float> %b) {
define <4 x float> @combine_test12(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: combine_test12:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_test12:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_test12:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; SSE41-NEXT: retq
;
; AVX-LABEL: combine_test12:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
@@ -1479,12 +1479,12 @@ define <4 x float> @combine_test12(<4 x float> %a, <4 x float> %b) {
define <4 x float> @combine_test13(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: combine_test13:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_test13:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
@@ -1494,12 +1494,12 @@ define <4 x float> @combine_test13(<4 x float> %a, <4 x float> %b) {
define <4 x float> @combine_test14(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: combine_test14:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_test14:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
; AVX-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 6, i32 7, i32 5, i32 5>
@@ -1509,24 +1509,24 @@ define <4 x float> @combine_test14(<4 x float> %a, <4 x float> %b) {
define <4 x float> @combine_test15(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: combine_test15:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_test15:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_test15:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
; SSE41-NEXT: retq
;
; AVX-LABEL: combine_test15:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
; AVX-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 7>
@@ -1536,7 +1536,7 @@ define <4 x float> @combine_test15(<4 x float> %a, <4 x float> %b) {
define <4 x i32> @combine_test16(<4 x i32> %a, <4 x i32> %b) {
; ALL-LABEL: combine_test16:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: retq
%1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
%2 = shufflevector <4 x i32> %1, <4 x i32> %a, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
@@ -1545,29 +1545,29 @@ define <4 x i32> @combine_test16(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @combine_test17(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: combine_test17:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_test17:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_test17:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: combine_test17:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_test17:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX2-NEXT: retq
%1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
@@ -1577,12 +1577,12 @@ define <4 x i32> @combine_test17(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @combine_test18(<4 x i32> %a, <4 x i32> %b) {
; SSE-LABEL: combine_test18:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_test18:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-NEXT: retq
%1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
@@ -1592,12 +1592,12 @@ define <4 x i32> @combine_test18(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @combine_test19(<4 x i32> %a, <4 x i32> %b) {
; SSE-LABEL: combine_test19:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_test19:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
; AVX-NEXT: retq
%1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 6, i32 7, i32 5, i32 5>
@@ -1607,29 +1607,29 @@ define <4 x i32> @combine_test19(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @combine_test20(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: combine_test20:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_test20:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_test20:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: combine_test20:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_test20:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
; AVX2-NEXT: retq
%1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 7>
@@ -1639,7 +1639,7 @@ define <4 x i32> @combine_test20(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @combine_test21(<8 x i32> %a, <4 x i32>* %ptr) {
; SSE-LABEL: combine_test21:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm1[0]
; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
@@ -1647,7 +1647,7 @@ define <4 x i32> @combine_test21(<8 x i32> %a, <4 x i32>* %ptr) {
; SSE-NEXT: retq
;
; AVX-LABEL: combine_test21:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-NEXT: vmovlhps {{.*#+}} xmm2 = xmm0[0],xmm1[0]
; AVX-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
@@ -1662,13 +1662,13 @@ define <4 x i32> @combine_test21(<8 x i32> %a, <4 x i32>* %ptr) {
define <8 x float> @combine_test22(<2 x float>* %a, <2 x float>* %b) {
; SSE-LABEL: combine_test22:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: movhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_test22:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; AVX-NEXT: retq
@@ -1682,12 +1682,12 @@ define <8 x float> @combine_test22(<2 x float>* %a, <2 x float>* %b) {
; PR22359
define void @combine_test23(<8 x float> %v, <2 x float>* %ptr) {
; SSE-LABEL: combine_test23:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movups %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: combine_test23:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovups %xmm0, (%rdi)
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
@@ -1704,13 +1704,13 @@ define void @combine_test23(<8 x float> %v, <2 x float>* %ptr) {
define <4 x float> @combine_test1b(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: combine_test1b:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_test1b:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm1[0,1,2,0]
; AVX-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
@@ -1720,23 +1720,23 @@ define <4 x float> @combine_test1b(<4 x float> %a, <4 x float> %b) {
define <4 x float> @combine_test2b(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: combine_test2b:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0,0]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_test2b:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movddup {{.*#+}} xmm0 = xmm1[0,0]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_test2b:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movddup {{.*#+}} xmm0 = xmm1[0,0]
; SSE41-NEXT: retq
;
; AVX-LABEL: combine_test2b:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm1[0,0]
; AVX-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
@@ -1746,25 +1746,25 @@ define <4 x float> @combine_test2b(<4 x float> %a, <4 x float> %b) {
define <4 x float> @combine_test3b(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: combine_test3b:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[3,0]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[2,3]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_test3b:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[3,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[2,3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_test3b:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3,2,3]
; SSE41-NEXT: retq
;
; AVX-LABEL: combine_test3b:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,3,2,3]
; AVX-NEXT: retq
@@ -1775,13 +1775,13 @@ define <4 x float> @combine_test3b(<4 x float> %a, <4 x float> %b) {
define <4 x float> @combine_test4b(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: combine_test4b:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_test4b:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm1[1,1,2,3]
; AVX-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
@@ -1794,7 +1794,7 @@ define <4 x float> @combine_test4b(<4 x float> %a, <4 x float> %b) {
define <4 x i8> @combine_test1c(<4 x i8>* %a, <4 x i8>* %b) {
; SSE2-LABEL: combine_test1c:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -1803,7 +1803,7 @@ define <4 x i8> @combine_test1c(<4 x i8>* %a, <4 x i8>* %b) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_test1c:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -1812,21 +1812,21 @@ define <4 x i8> @combine_test1c(<4 x i8>* %a, <4 x i8>* %b) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_test1c:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: combine_test1c:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_test1c:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
@@ -1840,7 +1840,7 @@ define <4 x i8> @combine_test1c(<4 x i8>* %a, <4 x i8>* %b) {
define <4 x i8> @combine_test2c(<4 x i8>* %a, <4 x i8>* %b) {
; SSE2-LABEL: combine_test2c:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
@@ -1851,7 +1851,7 @@ define <4 x i8> @combine_test2c(<4 x i8>* %a, <4 x i8>* %b) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_test2c:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
@@ -1862,14 +1862,14 @@ define <4 x i8> @combine_test2c(<4 x i8>* %a, <4 x i8>* %b) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_test2c:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE41-NEXT: retq
;
; AVX-LABEL: combine_test2c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
@@ -1883,7 +1883,7 @@ define <4 x i8> @combine_test2c(<4 x i8>* %a, <4 x i8>* %b) {
define <4 x i8> @combine_test3c(<4 x i8>* %a, <4 x i8>* %b) {
; SSE2-LABEL: combine_test3c:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
@@ -1894,7 +1894,7 @@ define <4 x i8> @combine_test3c(<4 x i8>* %a, <4 x i8>* %b) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_test3c:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
@@ -1905,14 +1905,14 @@ define <4 x i8> @combine_test3c(<4 x i8>* %a, <4 x i8>* %b) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_test3c:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; SSE41-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1]
; SSE41-NEXT: retq
;
; AVX-LABEL: combine_test3c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm1[1],xmm0[1]
@@ -1926,7 +1926,7 @@ define <4 x i8> @combine_test3c(<4 x i8>* %a, <4 x i8>* %b) {
define <4 x i8> @combine_test4c(<4 x i8>* %a, <4 x i8>* %b) {
; SSE2-LABEL: combine_test4c:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
@@ -1938,7 +1938,7 @@ define <4 x i8> @combine_test4c(<4 x i8>* %a, <4 x i8>* %b) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_test4c:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
@@ -1950,21 +1950,21 @@ define <4 x i8> @combine_test4c(<4 x i8>* %a, <4 x i8>* %b) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_test4c:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: combine_test4c:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_test4c:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
@@ -2008,22 +2008,22 @@ define <4 x i8> @combine_test4c(<4 x i8>* %a, <4 x i8>* %b) {
define <4 x float> @combine_blend_01(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: combine_blend_01:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_blend_01:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_blend_01:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE41-NEXT: retq
;
; AVX-LABEL: combine_blend_01:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 undef, i32 2, i32 3>
@@ -2033,26 +2033,26 @@ define <4 x float> @combine_blend_01(<4 x float> %a, <4 x float> %b) {
define <4 x float> @combine_blend_02(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: combine_blend_02:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[1,3]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2,1,3]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_blend_02:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[1,3]
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2,1,3]
; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_blend_02:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
; SSE41-NEXT: retq
;
; AVX-LABEL: combine_blend_02:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 undef, i32 3>
@@ -2062,24 +2062,24 @@ define <4 x float> @combine_blend_02(<4 x float> %a, <4 x float> %b) {
define <4 x float> @combine_blend_123(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: combine_blend_123:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_blend_123:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_blend_123:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; SSE41-NEXT: retq
;
; AVX-LABEL: combine_blend_123:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 undef, i32 undef>
@@ -2090,13 +2090,13 @@ define <4 x float> @combine_blend_123(<4 x float> %a, <4 x float> %b) {
define <4 x i32> @combine_test_movhl_1(<4 x i32> %a, <4 x i32> %b) {
; SSE-LABEL: combine_test_movhl_1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_test_movhl_1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1]
; AVX-NEXT: retq
%1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 2, i32 7, i32 5, i32 3>
@@ -2106,13 +2106,13 @@ define <4 x i32> @combine_test_movhl_1(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @combine_test_movhl_2(<4 x i32> %a, <4 x i32> %b) {
; SSE-LABEL: combine_test_movhl_2:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_test_movhl_2:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1]
; AVX-NEXT: retq
%1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 2, i32 0, i32 3, i32 6>
@@ -2122,13 +2122,13 @@ define <4 x i32> @combine_test_movhl_2(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @combine_test_movhl_3(<4 x i32> %a, <4 x i32> %b) {
; SSE-LABEL: combine_test_movhl_3:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_test_movhl_3:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1]
; AVX-NEXT: retq
%1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 7, i32 6, i32 3, i32 2>
@@ -2142,22 +2142,22 @@ define <4 x i32> @combine_test_movhl_3(<4 x i32> %a, <4 x i32> %b) {
define <4 x float> @combine_undef_input_test1(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: combine_undef_input_test1:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_undef_input_test1:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_undef_input_test1:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE41-NEXT: retq
;
; AVX-LABEL: combine_undef_input_test1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; AVX-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 4, i32 2, i32 3, i32 1>
@@ -2167,12 +2167,12 @@ define <4 x float> @combine_undef_input_test1(<4 x float> %a, <4 x float> %b) {
define <4 x float> @combine_undef_input_test2(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: combine_undef_input_test2:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_undef_input_test2:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 6, i32 0, i32 1, i32 7>
@@ -2182,12 +2182,12 @@ define <4 x float> @combine_undef_input_test2(<4 x float> %a, <4 x float> %b) {
define <4 x float> @combine_undef_input_test3(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: combine_undef_input_test3:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_undef_input_test3:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 5, i32 1, i32 7>
@@ -2197,12 +2197,12 @@ define <4 x float> @combine_undef_input_test3(<4 x float> %a, <4 x float> %b) {
define <4 x float> @combine_undef_input_test4(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: combine_undef_input_test4:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm1[1],xmm0[1]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_undef_input_test4:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1]
; AVX-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 5, i32 5>
@@ -2212,24 +2212,24 @@ define <4 x float> @combine_undef_input_test4(<4 x float> %a, <4 x float> %b) {
define <4 x float> @combine_undef_input_test5(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: combine_undef_input_test5:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE2-NEXT: movapd %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_undef_input_test5:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSSE3-NEXT: movapd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_undef_input_test5:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; SSE41-NEXT: retq
;
; AVX-LABEL: combine_undef_input_test5:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; AVX-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 4, i32 1, i32 3>
@@ -2243,7 +2243,7 @@ define <4 x float> @combine_undef_input_test5(<4 x float> %a, <4 x float> %b) {
define <4 x float> @combine_undef_input_test6(<4 x float> %a) {
; ALL-LABEL: combine_undef_input_test6:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 4, i32 2, i32 3, i32 1>
%2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 4, i32 5, i32 1, i32 2>
@@ -2252,22 +2252,22 @@ define <4 x float> @combine_undef_input_test6(<4 x float> %a) {
define <4 x float> @combine_undef_input_test7(<4 x float> %a) {
; SSE2-LABEL: combine_undef_input_test7:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_undef_input_test7:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_undef_input_test7:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
; SSE41-NEXT: retq
;
; AVX-LABEL: combine_undef_input_test7:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 6, i32 0, i32 1, i32 7>
@@ -2277,22 +2277,22 @@ define <4 x float> @combine_undef_input_test7(<4 x float> %a) {
define <4 x float> @combine_undef_input_test8(<4 x float> %a) {
; SSE2-LABEL: combine_undef_input_test8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_undef_input_test8:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_undef_input_test8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
; SSE41-NEXT: retq
;
; AVX-LABEL: combine_undef_input_test8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 5, i32 1, i32 7>
@@ -2302,12 +2302,12 @@ define <4 x float> @combine_undef_input_test8(<4 x float> %a) {
define <4 x float> @combine_undef_input_test9(<4 x float> %a) {
; SSE-LABEL: combine_undef_input_test9:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_undef_input_test9:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,1]
; AVX-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 5, i32 5>
@@ -2317,7 +2317,7 @@ define <4 x float> @combine_undef_input_test9(<4 x float> %a) {
define <4 x float> @combine_undef_input_test10(<4 x float> %a) {
; ALL-LABEL: combine_undef_input_test10:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 4, i32 1, i32 3>
%2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 0, i32 2, i32 6, i32 7>
@@ -2326,22 +2326,22 @@ define <4 x float> @combine_undef_input_test10(<4 x float> %a) {
define <4 x float> @combine_undef_input_test11(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: combine_undef_input_test11:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_undef_input_test11:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_undef_input_test11:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE41-NEXT: retq
;
; AVX-LABEL: combine_undef_input_test11:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; AVX-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 4, i32 2, i32 3, i32 1>
@@ -2351,12 +2351,12 @@ define <4 x float> @combine_undef_input_test11(<4 x float> %a, <4 x float> %b) {
define <4 x float> @combine_undef_input_test12(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: combine_undef_input_test12:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_undef_input_test12:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 6, i32 0, i32 1, i32 7>
@@ -2366,12 +2366,12 @@ define <4 x float> @combine_undef_input_test12(<4 x float> %a, <4 x float> %b) {
define <4 x float> @combine_undef_input_test13(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: combine_undef_input_test13:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_undef_input_test13:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 5, i32 1, i32 7>
@@ -2381,12 +2381,12 @@ define <4 x float> @combine_undef_input_test13(<4 x float> %a, <4 x float> %b) {
define <4 x float> @combine_undef_input_test14(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: combine_undef_input_test14:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm1[1],xmm0[1]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_undef_input_test14:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1]
; AVX-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 5, i32 5>
@@ -2396,24 +2396,24 @@ define <4 x float> @combine_undef_input_test14(<4 x float> %a, <4 x float> %b) {
define <4 x float> @combine_undef_input_test15(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: combine_undef_input_test15:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE2-NEXT: movapd %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_undef_input_test15:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSSE3-NEXT: movapd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_undef_input_test15:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; SSE41-NEXT: retq
;
; AVX-LABEL: combine_undef_input_test15:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; AVX-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 4, i32 1, i32 3>
@@ -2433,7 +2433,7 @@ define <4 x float> @combine_undef_input_test15(<4 x float> %a, <4 x float> %b) {
define <4 x float> @combine_undef_input_test16(<4 x float> %a) {
; ALL-LABEL: combine_undef_input_test16:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 4, i32 2, i32 3, i32 1>
%2 = shufflevector <4 x float> %a, <4 x float> %1, <4 x i32> <i32 0, i32 1, i32 5, i32 3>
@@ -2442,22 +2442,22 @@ define <4 x float> @combine_undef_input_test16(<4 x float> %a) {
define <4 x float> @combine_undef_input_test17(<4 x float> %a) {
; SSE2-LABEL: combine_undef_input_test17:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_undef_input_test17:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_undef_input_test17:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
; SSE41-NEXT: retq
;
; AVX-LABEL: combine_undef_input_test17:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 6, i32 0, i32 1, i32 7>
@@ -2467,22 +2467,22 @@ define <4 x float> @combine_undef_input_test17(<4 x float> %a) {
define <4 x float> @combine_undef_input_test18(<4 x float> %a) {
; SSE2-LABEL: combine_undef_input_test18:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_undef_input_test18:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_undef_input_test18:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
; SSE41-NEXT: retq
;
; AVX-LABEL: combine_undef_input_test18:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 5, i32 1, i32 7>
@@ -2492,12 +2492,12 @@ define <4 x float> @combine_undef_input_test18(<4 x float> %a) {
define <4 x float> @combine_undef_input_test19(<4 x float> %a) {
; SSE-LABEL: combine_undef_input_test19:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_undef_input_test19:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,1]
; AVX-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 5, i32 5>
@@ -2507,7 +2507,7 @@ define <4 x float> @combine_undef_input_test19(<4 x float> %a) {
define <4 x float> @combine_undef_input_test20(<4 x float> %a) {
; ALL-LABEL: combine_undef_input_test20:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 4, i32 1, i32 3>
%2 = shufflevector <4 x float> %a, <4 x float> %1, <4 x i32> <i32 4, i32 6, i32 2, i32 3>
@@ -2521,14 +2521,14 @@ define <4 x float> @combine_undef_input_test20(<4 x float> %a) {
define <8 x i32> @combine_unneeded_subvector1(<8 x i32> %a) {
; SSE-LABEL: combine_unneeded_subvector1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: paddd {{.*}}(%rip), %xmm1
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[3,2,1,0]
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: combine_unneeded_subvector1:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
@@ -2537,7 +2537,7 @@ define <8 x i32> @combine_unneeded_subvector1(<8 x i32> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_unneeded_subvector1:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
@@ -2549,14 +2549,14 @@ define <8 x i32> @combine_unneeded_subvector1(<8 x i32> %a) {
define <8 x i32> @combine_unneeded_subvector2(<8 x i32> %a, <8 x i32> %b) {
; SSE-LABEL: combine_unneeded_subvector2:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: paddd {{.*}}(%rip), %xmm1
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[3,2,1,0]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,2,1,0]
; SSE-NEXT: retq
;
; AVX1-LABEL: combine_unneeded_subvector2:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
@@ -2565,7 +2565,7 @@ define <8 x i32> @combine_unneeded_subvector2(<8 x i32> %a, <8 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_unneeded_subvector2:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
@@ -2577,26 +2577,26 @@ define <8 x i32> @combine_unneeded_subvector2(<8 x i32> %a, <8 x i32> %b) {
define <4 x float> @combine_insertps1(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: combine_insertps1:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[1,0]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[2,3]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_insertps1:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[1,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[2,3]
; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_insertps1:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm1[2],xmm0[1,2,3]
; SSE41-NEXT: retq
;
; AVX-LABEL: combine_insertps1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[2],xmm0[1,2,3]
; AVX-NEXT: retq
@@ -2607,26 +2607,26 @@ define <4 x float> @combine_insertps1(<4 x float> %a, <4 x float> %b) {
define <4 x float> @combine_insertps2(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: combine_insertps2:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[0,0]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[2,3]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_insertps2:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[0,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[2,3]
; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_insertps2:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[2],xmm0[2,3]
; SSE41-NEXT: retq
;
; AVX-LABEL: combine_insertps2:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[2],xmm0[2,3]
; AVX-NEXT: retq
@@ -2637,24 +2637,24 @@ define <4 x float> @combine_insertps2(<4 x float> %a, <4 x float> %b) {
define <4 x float> @combine_insertps3(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: combine_insertps3:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_insertps3:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_insertps3:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
; SSE41-NEXT: retq
;
; AVX-LABEL: combine_insertps3:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
; AVX-NEXT: retq
@@ -2665,24 +2665,24 @@ define <4 x float> @combine_insertps3(<4 x float> %a, <4 x float> %b) {
define <4 x float> @combine_insertps4(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: combine_insertps4:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,0]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_insertps4:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_insertps4:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
; SSE41-NEXT: retq
;
; AVX-LABEL: combine_insertps4:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
; AVX-NEXT: retq
@@ -2693,13 +2693,13 @@ define <4 x float> @combine_insertps4(<4 x float> %a, <4 x float> %b) {
define void @combine_scalar_load_with_blend_with_zero(double* %a0, <4 x float>* %a1) {
; SSE-LABEL: combine_scalar_load_with_blend_with_zero:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: movaps %xmm0, (%rsi)
; SSE-NEXT: retq
;
; AVX-LABEL: combine_scalar_load_with_blend_with_zero:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovaps %xmm0, (%rsi)
; AVX-NEXT: retq
@@ -2715,26 +2715,26 @@ define void @combine_scalar_load_with_blend_with_zero(double* %a0, <4 x float>*
; PR30371
define <4 x float> @combine_constant_insertion_v4f32(float %f) {
; SSE2-LABEL: combine_constant_insertion_v4f32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps {{.*#+}} xmm1 = <u,4,5,3>
; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_constant_insertion_v4f32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movaps {{.*#+}} xmm1 = <u,4,5,3>
; SSSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_constant_insertion_v4f32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],mem[1,2,3]
; SSE41-NEXT: retq
;
; AVX-LABEL: combine_constant_insertion_v4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],mem[1,2,3]
; AVX-NEXT: retq
%a0 = insertelement <4 x float> undef, float %f, i32 0
@@ -2744,33 +2744,33 @@ define <4 x float> @combine_constant_insertion_v4f32(float %f) {
define <4 x i32> @combine_constant_insertion_v4i32(i32 %f) {
; SSE2-LABEL: combine_constant_insertion_v4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movd %edi, %xmm1
; SSE2-NEXT: movaps {{.*#+}} xmm0 = <u,4,5,30>
; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_constant_insertion_v4i32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movd %edi, %xmm1
; SSSE3-NEXT: movaps {{.*#+}} xmm0 = <u,4,5,30>
; SSSE3-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_constant_insertion_v4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movd %edi, %xmm0
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],mem[2,3,4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: combine_constant_insertion_v4i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %edi, %xmm0
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],mem[2,3,4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_constant_insertion_v4i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %edi, %xmm0
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],mem[1,2,3]
; AVX2-NEXT: retq
@@ -2781,7 +2781,7 @@ define <4 x i32> @combine_constant_insertion_v4i32(i32 %f) {
define <4 x float> @PR22377(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: PR22377:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movaps %xmm0, %xmm1
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3,1,3]
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,0,2]
@@ -2790,7 +2790,7 @@ define <4 x float> @PR22377(<4 x float> %a, <4 x float> %b) {
; SSE-NEXT: retq
;
; AVX-LABEL: PR22377:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,3,1,3]
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,0,2]
; AVX-NEXT: vaddps %xmm0, %xmm1, %xmm1
@@ -2806,7 +2806,7 @@ entry:
define <4 x float> @PR22390(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: PR22390:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,0,1,2]
; SSE2-NEXT: movaps %xmm0, %xmm2
; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
@@ -2815,7 +2815,7 @@ define <4 x float> @PR22390(<4 x float> %a, <4 x float> %b) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: PR22390:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,0,1,2]
; SSSE3-NEXT: movaps %xmm0, %xmm2
; SSSE3-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
@@ -2824,14 +2824,14 @@ define <4 x float> @PR22390(<4 x float> %a, <4 x float> %b) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: PR22390:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,0,1,2]
; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm1[0],xmm0[1,2,3]
; SSE41-NEXT: addps %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: PR22390:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,0,1,2]
; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm0[1,2,3]
; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0
@@ -2845,7 +2845,7 @@ entry:
define <8 x float> @PR22412(<8 x float> %a, <8 x float> %b) {
; SSE2-LABEL: PR22412:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
; SSE2-NEXT: movapd %xmm2, %xmm0
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm3[3,2]
@@ -2854,7 +2854,7 @@ define <8 x float> @PR22412(<8 x float> %a, <8 x float> %b) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: PR22412:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
; SSSE3-NEXT: movapd %xmm2, %xmm0
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm3[3,2]
@@ -2863,7 +2863,7 @@ define <8 x float> @PR22412(<8 x float> %a, <8 x float> %b) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: PR22412:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm0[0],xmm2[1]
; SSE41-NEXT: movapd %xmm0, %xmm1
; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm3[3,2]
@@ -2873,14 +2873,14 @@ define <8 x float> @PR22412(<8 x float> %a, <8 x float> %b) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: PR22412:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,0],ymm1[3,2],ymm0[5,4],ymm1[7,6]
; AVX1-NEXT: retq
;
; AVX2-LABEL: PR22412:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
; AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,0,3,2,5,4,7,6]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,1]
@@ -2893,7 +2893,7 @@ entry:
define <4 x float> @PR30264(<4 x float> %x) {
; SSE2-LABEL: PR30264:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: xorps %xmm1, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm0[0,0]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],mem[2,3]
@@ -2901,7 +2901,7 @@ define <4 x float> @PR30264(<4 x float> %x) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: PR30264:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: xorps %xmm1, %xmm1
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm0[0,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],mem[2,3]
@@ -2909,14 +2909,14 @@ define <4 x float> @PR30264(<4 x float> %x) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: PR30264:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movaps {{.*#+}} xmm1 = <u,u,4,1>
; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm0[0],zero,xmm1[2,3]
; SSE41-NEXT: movaps %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: PR30264:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm1 = <u,u,4,1>
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],zero,xmm1[2,3]
; AVX-NEXT: retq
diff --git a/test/CodeGen/X86/vector-shuffle-masked.ll b/test/CodeGen/X86/vector-shuffle-masked.ll
index 839ea71da9e..ee8ab50b588 100644
--- a/test/CodeGen/X86/vector-shuffle-masked.ll
+++ b/test/CodeGen/X86/vector-shuffle-masked.ll
@@ -3,7 +3,7 @@
define <4 x i32> @mask_shuffle_v4i32_1234(<4 x i32> %a, <4 x i32> %b, <4 x i32> %passthru, i8 %mask) {
; CHECK-LABEL: mask_shuffle_v4i32_1234:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: valignd {{.*#+}} xmm2 {%k1} = xmm0[1,2,3],xmm1[0]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0
@@ -17,7 +17,7 @@ define <4 x i32> @mask_shuffle_v4i32_1234(<4 x i32> %a, <4 x i32> %b, <4 x i32>
define <4 x i32> @maskz_shuffle_v4i32_1234(<4 x i32> %a, <4 x i32> %b, i8 %mask) {
; CHECK-LABEL: maskz_shuffle_v4i32_1234:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: valignd {{.*#+}} xmm0 {%k1} {z} = xmm0[1,2,3],xmm1[0]
; CHECK-NEXT: retq
@@ -30,7 +30,7 @@ define <4 x i32> @maskz_shuffle_v4i32_1234(<4 x i32> %a, <4 x i32> %b, i8 %mask)
define <4 x i32> @mask_shuffle_v4i32_2345(<4 x i32> %a, <4 x i32> %b, <4 x i32> %passthru, i8 %mask) {
; CHECK-LABEL: mask_shuffle_v4i32_2345:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: valignd {{.*#+}} xmm2 {%k1} = xmm0[2,3],xmm1[0,1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0
@@ -44,7 +44,7 @@ define <4 x i32> @mask_shuffle_v4i32_2345(<4 x i32> %a, <4 x i32> %b, <4 x i32>
define <4 x i32> @maskz_shuffle_v4i32_2345(<4 x i32> %a, <4 x i32> %b, i8 %mask) {
; CHECK-LABEL: maskz_shuffle_v4i32_2345:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: valignd {{.*#+}} xmm0 {%k1} {z} = xmm0[2,3],xmm1[0,1]
; CHECK-NEXT: retq
@@ -57,7 +57,7 @@ define <4 x i32> @maskz_shuffle_v4i32_2345(<4 x i32> %a, <4 x i32> %b, i8 %mask)
define <2 x i64> @mask_shuffle_v2i64_12(<2 x i64> %a, <2 x i64> %b, <2 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: mask_shuffle_v2i64_12:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: valignq {{.*#+}} xmm2 {%k1} = xmm0[1],xmm1[0]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0
@@ -71,7 +71,7 @@ define <2 x i64> @mask_shuffle_v2i64_12(<2 x i64> %a, <2 x i64> %b, <2 x i64> %p
define <2 x i64> @maskz_shuffle_v2i64_12(<2 x i64> %a, <2 x i64> %b, i8 %mask) {
; CHECK-LABEL: maskz_shuffle_v2i64_12:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: valignq {{.*#+}} xmm0 {%k1} {z} = xmm0[1],xmm1[0]
; CHECK-NEXT: retq
@@ -84,7 +84,7 @@ define <2 x i64> @maskz_shuffle_v2i64_12(<2 x i64> %a, <2 x i64> %b, i8 %mask) {
define <4 x i64> @mask_shuffle_v4i64_1234(<4 x i64> %a, <4 x i64> %b, <4 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: mask_shuffle_v4i64_1234:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: valignq {{.*#+}} ymm2 {%k1} = ymm0[1,2,3],ymm1[0]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0
@@ -98,7 +98,7 @@ define <4 x i64> @mask_shuffle_v4i64_1234(<4 x i64> %a, <4 x i64> %b, <4 x i64>
define <4 x i64> @maskz_shuffle_v4i64_1234(<4 x i64> %a, <4 x i64> %b, i8 %mask) {
; CHECK-LABEL: maskz_shuffle_v4i64_1234:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: valignq {{.*#+}} ymm0 {%k1} {z} = ymm0[1,2,3],ymm1[0]
; CHECK-NEXT: retq
@@ -111,7 +111,7 @@ define <4 x i64> @maskz_shuffle_v4i64_1234(<4 x i64> %a, <4 x i64> %b, i8 %mask)
define <4 x i64> @mask_shuffle_v4i64_1230(<4 x i64> %a, <4 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: mask_shuffle_v4i64_1230:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpermq {{.*#+}} ymm1 {%k1} = ymm0[1,2,3,0]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0
@@ -125,7 +125,7 @@ define <4 x i64> @mask_shuffle_v4i64_1230(<4 x i64> %a, <4 x i64> %passthru, i8
define <4 x i64> @maskz_shuffle_v4i64_1230(<4 x i64> %a, i8 %mask) {
; CHECK-LABEL: maskz_shuffle_v4i64_1230:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = ymm0[1,2,3,0]
; CHECK-NEXT: retq
@@ -138,7 +138,7 @@ define <4 x i64> @maskz_shuffle_v4i64_1230(<4 x i64> %a, i8 %mask) {
define <8 x i32> @mask_shuffle_v8i32_12345678(<8 x i32> %a, <8 x i32> %b, <8 x i32> %passthru, i8 %mask) {
; CHECK-LABEL: mask_shuffle_v8i32_12345678:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: valignd {{.*#+}} ymm2 {%k1} = ymm0[1,2,3,4,5,6,7],ymm1[0]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0
@@ -151,7 +151,7 @@ define <8 x i32> @mask_shuffle_v8i32_12345678(<8 x i32> %a, <8 x i32> %b, <8 x i
define <8 x i32> @maskz_shuffle_v8i32_12345678(<8 x i32> %a, <8 x i32> %b, i8 %mask) {
; CHECK-LABEL: maskz_shuffle_v8i32_12345678:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: valignd {{.*#+}} ymm0 {%k1} {z} = ymm0[1,2,3,4,5,6,7],ymm1[0]
; CHECK-NEXT: retq
@@ -163,7 +163,7 @@ define <8 x i32> @maskz_shuffle_v8i32_12345678(<8 x i32> %a, <8 x i32> %b, i8 %m
define <8 x i32> @mask_shuffle_v8i32_23456789(<8 x i32> %a, <8 x i32> %b, <8 x i32> %passthru, i8 %mask) {
; CHECK-LABEL: mask_shuffle_v8i32_23456789:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: valignd {{.*#+}} ymm2 {%k1} = ymm0[2,3,4,5,6,7],ymm1[0,1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0
@@ -176,7 +176,7 @@ define <8 x i32> @mask_shuffle_v8i32_23456789(<8 x i32> %a, <8 x i32> %b, <8 x i
define <8 x i32> @maskz_shuffle_v8i32_23456789(<8 x i32> %a, <8 x i32> %b, i8 %mask) {
; CHECK-LABEL: maskz_shuffle_v8i32_23456789:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: valignd {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3,4,5,6,7],ymm1[0,1]
; CHECK-NEXT: retq
@@ -188,7 +188,7 @@ define <8 x i32> @maskz_shuffle_v8i32_23456789(<8 x i32> %a, <8 x i32> %b, i8 %m
define <8 x i32> @mask_shuffle_v8i32_12345670(<8 x i32> %a, <8 x i32> %passthru, i8 %mask) {
; CHECK-LABEL: mask_shuffle_v8i32_12345670:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: valignd {{.*#+}} ymm1 {%k1} = ymm0[1,2,3,4,5,6,7,0]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0
@@ -201,7 +201,7 @@ define <8 x i32> @mask_shuffle_v8i32_12345670(<8 x i32> %a, <8 x i32> %passthru,
define <8 x i32> @maskz_shuffle_v8i32_12345670(<8 x i32> %a, i8 %mask) {
; CHECK-LABEL: maskz_shuffle_v8i32_12345670:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: valignd {{.*#+}} ymm0 {%k1} {z} = ymm0[1,2,3,4,5,6,7,0]
; CHECK-NEXT: retq
@@ -213,7 +213,7 @@ define <8 x i32> @maskz_shuffle_v8i32_12345670(<8 x i32> %a, i8 %mask) {
define <8 x i32> @mask_shuffle_v8i32_23456701(<8 x i32> %a, <8 x i32> %passthru, i8 %mask) {
; CHECK-LABEL: mask_shuffle_v8i32_23456701:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,2,3,0]
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
@@ -226,7 +226,7 @@ define <8 x i32> @mask_shuffle_v8i32_23456701(<8 x i32> %a, <8 x i32> %passthru,
define <8 x i32> @maskz_shuffle_v8i32_23456701(<8 x i32> %a, i8 %mask) {
; CHECK-LABEL: maskz_shuffle_v8i32_23456701:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,2,3,0]
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
@@ -239,7 +239,7 @@ define <8 x i32> @maskz_shuffle_v8i32_23456701(<8 x i32> %a, i8 %mask) {
define <4 x i32> @mask_extract_v8i32_v4i32_0(<8 x i32> %a, <4 x i32> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v8i32_v4i32_0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: vzeroupper
@@ -253,7 +253,7 @@ define <4 x i32> @mask_extract_v8i32_v4i32_0(<8 x i32> %a, <4 x i32> %passthru,
define <4 x i32> @mask_extract_v8i32_v4i32_0_z(<8 x i32> %a, i8 %mask) {
; CHECK-LABEL: mask_extract_v8i32_v4i32_0_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: vzeroupper
@@ -267,7 +267,7 @@ define <4 x i32> @mask_extract_v8i32_v4i32_0_z(<8 x i32> %a, i8 %mask) {
define <4 x i32> @mask_extract_v8i32_v4i32_1(<8 x i32> %a, <4 x i32> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v8i32_v4i32_1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextracti32x4 $1, %ymm0, %xmm1 {%k1}
; CHECK-NEXT: vmovdqa %xmm1, %xmm0
@@ -282,7 +282,7 @@ define <4 x i32> @mask_extract_v8i32_v4i32_1(<8 x i32> %a, <4 x i32> %passthru,
define <4 x i32> @mask_extract_v8i32_v4i32_1_z(<8 x i32> %a, i8 %mask) {
; CHECK-LABEL: mask_extract_v8i32_v4i32_1_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextracti32x4 $1, %ymm0, %xmm0 {%k1} {z}
; CHECK-NEXT: vzeroupper
@@ -296,7 +296,7 @@ define <4 x i32> @mask_extract_v8i32_v4i32_1_z(<8 x i32> %a, i8 %mask) {
define <4 x float> @mask_extract_v8f32_v4f32_0(<8 x float> %a, <4 x float> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v8f32_v4f32_0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vblendmps %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: vzeroupper
@@ -310,7 +310,7 @@ define <4 x float> @mask_extract_v8f32_v4f32_0(<8 x float> %a, <4 x float> %pass
define <4 x float> @mask_extract_v8f32_v4f32_0_z(<8 x float> %a, i8 %mask) {
; CHECK-LABEL: mask_extract_v8f32_v4f32_0_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vmovaps %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: vzeroupper
@@ -324,7 +324,7 @@ define <4 x float> @mask_extract_v8f32_v4f32_0_z(<8 x float> %a, i8 %mask) {
define <4 x float> @mask_extract_v8f32_v4f32_1(<8 x float> %a, <4 x float> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v8f32_v4f32_1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextractf32x4 $1, %ymm0, %xmm1 {%k1}
; CHECK-NEXT: vmovaps %xmm1, %xmm0
@@ -339,7 +339,7 @@ define <4 x float> @mask_extract_v8f32_v4f32_1(<8 x float> %a, <4 x float> %pass
define <4 x float> @mask_extract_v8f32_v4f32_1_z(<8 x float> %a, i8 %mask) {
; CHECK-LABEL: mask_extract_v8f32_v4f32_1_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextractf32x4 $1, %ymm0, %xmm0 {%k1} {z}
; CHECK-NEXT: vzeroupper
@@ -353,7 +353,7 @@ define <4 x float> @mask_extract_v8f32_v4f32_1_z(<8 x float> %a, i8 %mask) {
define <2 x i64> @mask_extract_v4i64_v2i64_0(<4 x i64> %a, <2 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v4i64_v2i64_0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpblendmq %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: vzeroupper
@@ -367,7 +367,7 @@ define <2 x i64> @mask_extract_v4i64_v2i64_0(<4 x i64> %a, <2 x i64> %passthru,
define <2 x i64> @mask_extract_v4i64_v2i64_0_z(<4 x i64> %a, i8 %mask) {
; CHECK-LABEL: mask_extract_v4i64_v2i64_0_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: vzeroupper
@@ -381,7 +381,7 @@ define <2 x i64> @mask_extract_v4i64_v2i64_0_z(<4 x i64> %a, i8 %mask) {
define <2 x i64> @mask_extract_v4i64_v2i64_1(<4 x i64> %a, <2 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v4i64_v2i64_1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextracti64x2 $1, %ymm0, %xmm1 {%k1}
; CHECK-NEXT: vmovdqa %xmm1, %xmm0
@@ -396,7 +396,7 @@ define <2 x i64> @mask_extract_v4i64_v2i64_1(<4 x i64> %a, <2 x i64> %passthru,
define <2 x i64> @mask_extract_v4i64_v2i64_1_z(<4 x i64> %a, i8 %mask) {
; CHECK-LABEL: mask_extract_v4i64_v2i64_1_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextracti64x2 $1, %ymm0, %xmm0 {%k1} {z}
; CHECK-NEXT: vzeroupper
@@ -410,7 +410,7 @@ define <2 x i64> @mask_extract_v4i64_v2i64_1_z(<4 x i64> %a, i8 %mask) {
define <2 x double> @mask_extract_v4f64_v2f64_0(<4 x double> %a, <2 x double> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v4f64_v2f64_0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vblendmpd %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: vzeroupper
@@ -424,7 +424,7 @@ define <2 x double> @mask_extract_v4f64_v2f64_0(<4 x double> %a, <2 x double> %p
define <2 x double> @mask_extract_v4f64_v2f64_0_z(<4 x double> %a, i8 %mask) {
; CHECK-LABEL: mask_extract_v4f64_v2f64_0_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vmovapd %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: vzeroupper
@@ -438,7 +438,7 @@ define <2 x double> @mask_extract_v4f64_v2f64_0_z(<4 x double> %a, i8 %mask) {
define <2 x double> @mask_extract_v4f64_v2f64_1(<4 x double> %a, <2 x double> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v4f64_v2f64_1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextractf64x2 $1, %ymm0, %xmm1 {%k1}
; CHECK-NEXT: vmovapd %xmm1, %xmm0
@@ -453,7 +453,7 @@ define <2 x double> @mask_extract_v4f64_v2f64_1(<4 x double> %a, <2 x double> %p
define <2 x double> @mask_extract_v4f64_v2f64_1_z(<4 x double> %a, i8 %mask) {
; CHECK-LABEL: mask_extract_v4f64_v2f64_1_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextractf64x2 $1, %ymm0, %xmm0 {%k1} {z}
; CHECK-NEXT: vzeroupper
@@ -467,7 +467,7 @@ define <2 x double> @mask_extract_v4f64_v2f64_1_z(<4 x double> %a, i8 %mask) {
define <4 x i32> @mask_extract_v16i32_v4i32_0(<16 x i32> %a, <4 x i32> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v16i32_v4i32_0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: vzeroupper
@@ -481,7 +481,7 @@ define <4 x i32> @mask_extract_v16i32_v4i32_0(<16 x i32> %a, <4 x i32> %passthru
define <4 x i32> @mask_extract_v16i32_v4i32_0_z(<16 x i32> %a, i8 %mask) {
; CHECK-LABEL: mask_extract_v16i32_v4i32_0_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: vzeroupper
@@ -495,7 +495,7 @@ define <4 x i32> @mask_extract_v16i32_v4i32_0_z(<16 x i32> %a, i8 %mask) {
define <4 x i32> @mask_extract_v16i32_v4i32_1(<16 x i32> %a, <4 x i32> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v16i32_v4i32_1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextracti32x4 $1, %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovdqa %xmm1, %xmm0
@@ -510,7 +510,7 @@ define <4 x i32> @mask_extract_v16i32_v4i32_1(<16 x i32> %a, <4 x i32> %passthru
define <4 x i32> @mask_extract_v16i32_v4i32_1_z(<16 x i32> %a, i8 %mask) {
; CHECK-LABEL: mask_extract_v16i32_v4i32_1_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextracti32x4 $1, %zmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: vzeroupper
@@ -524,7 +524,7 @@ define <4 x i32> @mask_extract_v16i32_v4i32_1_z(<16 x i32> %a, i8 %mask) {
define <4 x i32> @mask_extract_v16i32_v4i32_2(<16 x i32> %a, <4 x i32> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v16i32_v4i32_2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextracti32x4 $2, %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovdqa %xmm1, %xmm0
@@ -539,7 +539,7 @@ define <4 x i32> @mask_extract_v16i32_v4i32_2(<16 x i32> %a, <4 x i32> %passthru
define <4 x i32> @mask_extract_v16i32_v4i32_3(<16 x i32> %a, <4 x i32> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v16i32_v4i32_3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextracti32x4 $3, %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovdqa %xmm1, %xmm0
@@ -554,7 +554,7 @@ define <4 x i32> @mask_extract_v16i32_v4i32_3(<16 x i32> %a, <4 x i32> %passthru
define <4 x float> @mask_extract_v16f32_v4f32_0(<16 x float> %a, <4 x float> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v16f32_v4f32_0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vblendmps %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: vzeroupper
@@ -568,7 +568,7 @@ define <4 x float> @mask_extract_v16f32_v4f32_0(<16 x float> %a, <4 x float> %pa
define <4 x float> @mask_extract_v16f32_v4f32_0_z(<16 x float> %a, i8 %mask) {
; CHECK-LABEL: mask_extract_v16f32_v4f32_0_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vmovaps %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: vzeroupper
@@ -582,7 +582,7 @@ define <4 x float> @mask_extract_v16f32_v4f32_0_z(<16 x float> %a, i8 %mask) {
define <4 x float> @mask_extract_v16f32_v4f32_1(<16 x float> %a, <4 x float> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v16f32_v4f32_1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextractf32x4 $1, %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovaps %xmm1, %xmm0
@@ -597,7 +597,7 @@ define <4 x float> @mask_extract_v16f32_v4f32_1(<16 x float> %a, <4 x float> %pa
define <4 x float> @mask_extract_v16f32_v4f32_1_z(<16 x float> %a, i8 %mask) {
; CHECK-LABEL: mask_extract_v16f32_v4f32_1_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextractf32x4 $1, %zmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: vzeroupper
@@ -611,7 +611,7 @@ define <4 x float> @mask_extract_v16f32_v4f32_1_z(<16 x float> %a, i8 %mask) {
define <4 x float> @mask_extract_v16f32_v4f32_2(<16 x float> %a, <4 x float> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v16f32_v4f32_2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextractf32x4 $2, %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovaps %xmm1, %xmm0
@@ -626,7 +626,7 @@ define <4 x float> @mask_extract_v16f32_v4f32_2(<16 x float> %a, <4 x float> %pa
define <4 x float> @mask_extract_v16f32_v4f32_3(<16 x float> %a, <4 x float> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v16f32_v4f32_3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextractf32x4 $3, %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovaps %xmm1, %xmm0
@@ -641,7 +641,7 @@ define <4 x float> @mask_extract_v16f32_v4f32_3(<16 x float> %a, <4 x float> %pa
define <8 x i32> @mask_extract_v16i32_v8i32_0(<16 x i32> %a, <8 x i32> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v16i32_v8i32_0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
@@ -653,7 +653,7 @@ define <8 x i32> @mask_extract_v16i32_v8i32_0(<16 x i32> %a, <8 x i32> %passthru
define <8 x i32> @mask_extract_v16i32_v8i32_0_z(<16 x i32> %a, i8 %mask) {
; CHECK-LABEL: mask_extract_v16i32_v8i32_0_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -665,7 +665,7 @@ define <8 x i32> @mask_extract_v16i32_v8i32_0_z(<16 x i32> %a, i8 %mask) {
define <8 x i32> @mask_extract_v16i32_v8i32_1(<16 x i32> %a, <8 x i32> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v16i32_v8i32_1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextracti32x8 $1, %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vmovdqa %ymm1, %ymm0
@@ -678,7 +678,7 @@ define <8 x i32> @mask_extract_v16i32_v8i32_1(<16 x i32> %a, <8 x i32> %passthru
define <8 x i32> @mask_extract_v16i32_v8i32_1_z(<16 x i32> %a, i8 %mask) {
; CHECK-LABEL: mask_extract_v16i32_v8i32_1_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextracti32x8 $1, %zmm0, %ymm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -690,7 +690,7 @@ define <8 x i32> @mask_extract_v16i32_v8i32_1_z(<16 x i32> %a, i8 %mask) {
define <8 x float> @mask_extract_v16f32_v8f32_0(<16 x float> %a, <8 x float> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v16f32_v8f32_0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vblendmps %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
@@ -702,7 +702,7 @@ define <8 x float> @mask_extract_v16f32_v8f32_0(<16 x float> %a, <8 x float> %pa
define <8 x float> @mask_extract_v16f32_v8f32_0_z(<16 x float> %a, i8 %mask) {
; CHECK-LABEL: mask_extract_v16f32_v8f32_0_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vmovaps %ymm0, %ymm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -714,7 +714,7 @@ define <8 x float> @mask_extract_v16f32_v8f32_0_z(<16 x float> %a, i8 %mask) {
define <8 x float> @mask_extract_v16f32_v8f32_1(<16 x float> %a, <8 x float> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v16f32_v8f32_1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextractf32x8 $1, %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vmovaps %ymm1, %ymm0
@@ -727,7 +727,7 @@ define <8 x float> @mask_extract_v16f32_v8f32_1(<16 x float> %a, <8 x float> %pa
define <8 x float> @mask_extract_v16f32_v8f32_1_z(<16 x float> %a, i8 %mask) {
; CHECK-LABEL: mask_extract_v16f32_v8f32_1_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextractf32x8 $1, %zmm0, %ymm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -739,7 +739,7 @@ define <8 x float> @mask_extract_v16f32_v8f32_1_z(<16 x float> %a, i8 %mask) {
define <2 x i64> @mask_extract_v8i64_v2i64_0(<8 x i64> %a, <2 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v8i64_v2i64_0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpblendmq %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: vzeroupper
@@ -753,7 +753,7 @@ define <2 x i64> @mask_extract_v8i64_v2i64_0(<8 x i64> %a, <2 x i64> %passthru,
define <2 x i64> @mask_extract_v8i64_v2i64_0_z(<8 x i64> %a, i8 %mask) {
; CHECK-LABEL: mask_extract_v8i64_v2i64_0_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: vzeroupper
@@ -767,7 +767,7 @@ define <2 x i64> @mask_extract_v8i64_v2i64_0_z(<8 x i64> %a, i8 %mask) {
define <2 x i64> @mask_extract_v8i64_v2i64_1(<8 x i64> %a, <2 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v8i64_v2i64_1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextracti64x2 $1, %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovdqa %xmm1, %xmm0
@@ -782,7 +782,7 @@ define <2 x i64> @mask_extract_v8i64_v2i64_1(<8 x i64> %a, <2 x i64> %passthru,
define <2 x i64> @mask_extract_v8i64_v2i64_1_z(<8 x i64> %a, i8 %mask) {
; CHECK-LABEL: mask_extract_v8i64_v2i64_1_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextracti64x2 $1, %zmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: vzeroupper
@@ -796,7 +796,7 @@ define <2 x i64> @mask_extract_v8i64_v2i64_1_z(<8 x i64> %a, i8 %mask) {
define <2 x i64> @mask_extract_v8i64_v2i64_2(<8 x i64> %a, <2 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v8i64_v2i64_2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextracti64x2 $2, %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovdqa %xmm1, %xmm0
@@ -811,7 +811,7 @@ define <2 x i64> @mask_extract_v8i64_v2i64_2(<8 x i64> %a, <2 x i64> %passthru,
define <2 x i64> @mask_extract_v8i64_v2i64_3(<8 x i64> %a, <2 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v8i64_v2i64_3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextracti64x2 $3, %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovdqa %xmm1, %xmm0
@@ -826,7 +826,7 @@ define <2 x i64> @mask_extract_v8i64_v2i64_3(<8 x i64> %a, <2 x i64> %passthru,
define <2 x double> @mask_extract_v8f64_v2f64_0(<8 x double> %a, <2 x double> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v8f64_v2f64_0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vblendmpd %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: vzeroupper
@@ -840,7 +840,7 @@ define <2 x double> @mask_extract_v8f64_v2f64_0(<8 x double> %a, <2 x double> %p
define <2 x double> @mask_extract_v8f64_v2f64_0_z(<8 x double> %a, i8 %mask) {
; CHECK-LABEL: mask_extract_v8f64_v2f64_0_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vmovapd %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: vzeroupper
@@ -854,7 +854,7 @@ define <2 x double> @mask_extract_v8f64_v2f64_0_z(<8 x double> %a, i8 %mask) {
define <2 x double> @mask_extract_v8f64_v2f64_1(<8 x double> %a, <2 x double> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v8f64_v2f64_1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextractf64x2 $1, %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovapd %xmm1, %xmm0
@@ -869,7 +869,7 @@ define <2 x double> @mask_extract_v8f64_v2f64_1(<8 x double> %a, <2 x double> %p
define <2 x double> @mask_extract_v8f64_v2f64_1_z(<8 x double> %a, i8 %mask) {
; CHECK-LABEL: mask_extract_v8f64_v2f64_1_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextractf64x2 $1, %zmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: vzeroupper
@@ -883,7 +883,7 @@ define <2 x double> @mask_extract_v8f64_v2f64_1_z(<8 x double> %a, i8 %mask) {
define <2 x double> @mask_extract_v8f64_v2f64_2(<8 x double> %a, <2 x double> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v8f64_v2f64_2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextractf64x2 $2, %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovapd %xmm1, %xmm0
@@ -898,7 +898,7 @@ define <2 x double> @mask_extract_v8f64_v2f64_2(<8 x double> %a, <2 x double> %p
define <2 x double> @mask_extract_v8f64_v2f64_3(<8 x double> %a, <2 x double> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v8f64_v2f64_3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextractf64x2 $3, %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovapd %xmm1, %xmm0
@@ -913,7 +913,7 @@ define <2 x double> @mask_extract_v8f64_v2f64_3(<8 x double> %a, <2 x double> %p
define <4 x i64> @mask_extract_v8i64_v4i64_0(<8 x i64> %a, <4 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v8i64_v4i64_0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpblendmq %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
@@ -926,7 +926,7 @@ define <4 x i64> @mask_extract_v8i64_v4i64_0(<8 x i64> %a, <4 x i64> %passthru,
define <4 x i64> @mask_extract_v8i64_v4i64_0_z(<8 x i64> %a, i8 %mask) {
; CHECK-LABEL: mask_extract_v8i64_v4i64_0_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vmovdqa64 %ymm0, %ymm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -939,7 +939,7 @@ define <4 x i64> @mask_extract_v8i64_v4i64_0_z(<8 x i64> %a, i8 %mask) {
define <4 x i64> @mask_extract_v8i64_v4i64_1(<8 x i64> %a, <4 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v8i64_v4i64_1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vmovdqa %ymm1, %ymm0
@@ -953,7 +953,7 @@ define <4 x i64> @mask_extract_v8i64_v4i64_1(<8 x i64> %a, <4 x i64> %passthru,
define <4 x i64> @mask_extract_v8i64_v4i64_1_z(<8 x i64> %a, i8 %mask) {
; CHECK-LABEL: mask_extract_v8i64_v4i64_1_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -966,7 +966,7 @@ define <4 x i64> @mask_extract_v8i64_v4i64_1_z(<8 x i64> %a, i8 %mask) {
define <4 x double> @mask_extract_v8f64_v4f64_0(<8 x double> %a, <4 x double> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v8f64_v4f64_0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vblendmpd %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
@@ -979,7 +979,7 @@ define <4 x double> @mask_extract_v8f64_v4f64_0(<8 x double> %a, <4 x double> %p
define <4 x double> @mask_extract_v8f64_v4f64_0_z(<8 x double> %a, i8 %mask) {
; CHECK-LABEL: mask_extract_v8f64_v4f64_0_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vmovapd %ymm0, %ymm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -992,7 +992,7 @@ define <4 x double> @mask_extract_v8f64_v4f64_0_z(<8 x double> %a, i8 %mask) {
define <4 x double> @mask_extract_v8f64_v4f64_1(<8 x double> %a, <4 x double> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v8f64_v4f64_1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vmovapd %ymm1, %ymm0
@@ -1006,7 +1006,7 @@ define <4 x double> @mask_extract_v8f64_v4f64_1(<8 x double> %a, <4 x double> %p
define <4 x double> @mask_extract_v8f64_v4f64_1_z(<8 x double> %a, i8 %mask) {
; CHECK-LABEL: mask_extract_v8f64_v4f64_1_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1019,7 +1019,7 @@ define <4 x double> @mask_extract_v8f64_v4f64_1_z(<8 x double> %a, i8 %mask) {
define <8 x i32> @mask_cast_extract_v8i64_v8i32_0(<8 x i64> %a, <8 x i32> %passthru, i8 %mask) {
; CHECK-LABEL: mask_cast_extract_v8i64_v8i32_0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
@@ -1032,7 +1032,7 @@ define <8 x i32> @mask_cast_extract_v8i64_v8i32_0(<8 x i64> %a, <8 x i32> %passt
define <8 x i32> @mask_cast_extract_v8i64_v8i32_0_z(<8 x i64> %a, i8 %mask) {
; CHECK-LABEL: mask_cast_extract_v8i64_v8i32_0_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1045,7 +1045,7 @@ define <8 x i32> @mask_cast_extract_v8i64_v8i32_0_z(<8 x i64> %a, i8 %mask) {
define <8 x i32> @mask_cast_extract_v8i64_v8i32_1(<8 x i64> %a, <8 x i32> %passthru, i8 %mask) {
; CHECK-LABEL: mask_cast_extract_v8i64_v8i32_1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextracti32x8 $1, %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vmovdqa %ymm1, %ymm0
@@ -1059,7 +1059,7 @@ define <8 x i32> @mask_cast_extract_v8i64_v8i32_1(<8 x i64> %a, <8 x i32> %passt
define <8 x i32> @mask_cast_extract_v8i64_v8i32_1_z(<8 x i64> %a, i8 %mask) {
; CHECK-LABEL: mask_cast_extract_v8i64_v8i32_1_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextracti32x8 $1, %zmm0, %ymm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1072,7 +1072,7 @@ define <8 x i32> @mask_cast_extract_v8i64_v8i32_1_z(<8 x i64> %a, i8 %mask) {
define <8 x float> @mask_cast_extract_v8f64_v8f32_0(<8 x double> %a, <8 x float> %passthru, i8 %mask) {
; CHECK-LABEL: mask_cast_extract_v8f64_v8f32_0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vblendmps %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
@@ -1085,7 +1085,7 @@ define <8 x float> @mask_cast_extract_v8f64_v8f32_0(<8 x double> %a, <8 x float>
define <8 x float> @mask_cast_extract_v8f64_v8f32_0_z(<8 x double> %a, i8 %mask) {
; CHECK-LABEL: mask_cast_extract_v8f64_v8f32_0_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vmovaps %ymm0, %ymm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1098,7 +1098,7 @@ define <8 x float> @mask_cast_extract_v8f64_v8f32_0_z(<8 x double> %a, i8 %mask)
define <8 x float> @mask_cast_extract_v8f64_v8f32_1(<8 x double> %a, <8 x float> %passthru, i8 %mask) {
; CHECK-LABEL: mask_cast_extract_v8f64_v8f32_1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextractf32x8 $1, %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vmovaps %ymm1, %ymm0
@@ -1112,7 +1112,7 @@ define <8 x float> @mask_cast_extract_v8f64_v8f32_1(<8 x double> %a, <8 x float>
define <8 x float> @mask_cast_extract_v8f64_v8f32_1_z(<8 x double> %a, i8 %mask) {
; CHECK-LABEL: mask_cast_extract_v8f64_v8f32_1_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextractf32x8 $1, %zmm0, %ymm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1125,7 +1125,7 @@ define <8 x float> @mask_cast_extract_v8f64_v8f32_1_z(<8 x double> %a, i8 %mask)
define <4 x i32> @mask_cast_extract_v8i64_v4i32_0(<8 x i64> %a, <4 x i32> %passthru, i8 %mask) {
; CHECK-LABEL: mask_cast_extract_v8i64_v4i32_0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: vzeroupper
@@ -1140,7 +1140,7 @@ define <4 x i32> @mask_cast_extract_v8i64_v4i32_0(<8 x i64> %a, <4 x i32> %passt
define <4 x i32> @mask_cast_extract_v8i64_v4i32_0_z(<8 x i64> %a, i8 %mask) {
; CHECK-LABEL: mask_cast_extract_v8i64_v4i32_0_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: vzeroupper
@@ -1155,7 +1155,7 @@ define <4 x i32> @mask_cast_extract_v8i64_v4i32_0_z(<8 x i64> %a, i8 %mask) {
define <4 x i32> @mask_cast_extract_v8i64_v4i32_1(<8 x i64> %a, <4 x i32> %passthru, i8 %mask) {
; CHECK-LABEL: mask_cast_extract_v8i64_v4i32_1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextracti32x4 $1, %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovdqa %xmm1, %xmm0
@@ -1171,7 +1171,7 @@ define <4 x i32> @mask_cast_extract_v8i64_v4i32_1(<8 x i64> %a, <4 x i32> %passt
define <4 x i32> @mask_cast_extract_v8i64_v4i32_1_z(<8 x i64> %a, i8 %mask) {
; CHECK-LABEL: mask_cast_extract_v8i64_v4i32_1_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextracti32x4 $1, %zmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: vzeroupper
@@ -1186,7 +1186,7 @@ define <4 x i32> @mask_cast_extract_v8i64_v4i32_1_z(<8 x i64> %a, i8 %mask) {
define <4 x float> @mask_cast_extract_v8f64_v4f32_0(<8 x double> %a, <4 x float> %passthru, i8 %mask) {
; CHECK-LABEL: mask_cast_extract_v8f64_v4f32_0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vblendmps %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: vzeroupper
@@ -1201,7 +1201,7 @@ define <4 x float> @mask_cast_extract_v8f64_v4f32_0(<8 x double> %a, <4 x float>
define <4 x float> @mask_cast_extract_v8f64_v4f32_0_z(<8 x double> %a, i8 %mask) {
; CHECK-LABEL: mask_cast_extract_v8f64_v4f32_0_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vmovaps %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: vzeroupper
@@ -1216,7 +1216,7 @@ define <4 x float> @mask_cast_extract_v8f64_v4f32_0_z(<8 x double> %a, i8 %mask)
define <4 x float> @mask_cast_extract_v8f64_v4f32_1(<8 x double> %a, <4 x float> %passthru, i8 %mask) {
; CHECK-LABEL: mask_cast_extract_v8f64_v4f32_1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextractf32x4 $1, %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovaps %xmm1, %xmm0
@@ -1232,7 +1232,7 @@ define <4 x float> @mask_cast_extract_v8f64_v4f32_1(<8 x double> %a, <4 x float>
define <4 x float> @mask_cast_extract_v8f64_v4f32_1_z(<8 x double> %a, i8 %mask) {
; CHECK-LABEL: mask_cast_extract_v8f64_v4f32_1_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextractf32x4 $1, %zmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: vzeroupper
@@ -1247,7 +1247,7 @@ define <4 x float> @mask_cast_extract_v8f64_v4f32_1_z(<8 x double> %a, i8 %mask)
define <4 x i64> @mask_cast_extract_v16i32_v4i64_0(<16 x i32> %a, <4 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: mask_cast_extract_v16i32_v4i64_0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpblendmq %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
@@ -1261,7 +1261,7 @@ define <4 x i64> @mask_cast_extract_v16i32_v4i64_0(<16 x i32> %a, <4 x i64> %pas
define <4 x i64> @mask_cast_extract_v16i32_v4i64_0_z(<16 x i32> %a, i8 %mask) {
; CHECK-LABEL: mask_cast_extract_v16i32_v4i64_0_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vmovdqa64 %ymm0, %ymm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1275,7 +1275,7 @@ define <4 x i64> @mask_cast_extract_v16i32_v4i64_0_z(<16 x i32> %a, i8 %mask) {
define <4 x i64> @mask_cast_extract_v16i32_v4i64_1(<16 x i32> %a, <4 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: mask_cast_extract_v16i32_v4i64_1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vmovdqa %ymm1, %ymm0
@@ -1290,7 +1290,7 @@ define <4 x i64> @mask_cast_extract_v16i32_v4i64_1(<16 x i32> %a, <4 x i64> %pas
define <4 x i64> @mask_cast_extract_v16i32_v4i64_1_z(<16 x i32> %a, i8 %mask) {
; CHECK-LABEL: mask_cast_extract_v16i32_v4i64_1_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1304,7 +1304,7 @@ define <4 x i64> @mask_cast_extract_v16i32_v4i64_1_z(<16 x i32> %a, i8 %mask) {
define <4 x double> @mask_cast_extract_v16f32_v4f64_0(<16 x float> %a, <4 x double> %passthru, i8 %mask) {
; CHECK-LABEL: mask_cast_extract_v16f32_v4f64_0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vblendmpd %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
@@ -1318,7 +1318,7 @@ define <4 x double> @mask_cast_extract_v16f32_v4f64_0(<16 x float> %a, <4 x doub
define <4 x double> @mask_cast_extract_v16f32_v4f64_0_z(<16 x float> %a, i8 %mask) {
; CHECK-LABEL: mask_cast_extract_v16f32_v4f64_0_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vmovapd %ymm0, %ymm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1332,7 +1332,7 @@ define <4 x double> @mask_cast_extract_v16f32_v4f64_0_z(<16 x float> %a, i8 %mas
define <4 x double> @mask_cast_extract_v16f32_v4f64_1(<16 x float> %a, <4 x double> %passthru, i8 %mask) {
; CHECK-LABEL: mask_cast_extract_v16f32_v4f64_1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vmovapd %ymm1, %ymm0
@@ -1347,7 +1347,7 @@ define <4 x double> @mask_cast_extract_v16f32_v4f64_1(<16 x float> %a, <4 x doub
define <4 x double> @mask_cast_extract_v16f32_v4f64_1_z(<16 x float> %a, i8 %mask) {
; CHECK-LABEL: mask_cast_extract_v16f32_v4f64_1_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -1361,7 +1361,7 @@ define <4 x double> @mask_cast_extract_v16f32_v4f64_1_z(<16 x float> %a, i8 %mas
define <2 x i64> @mask_cast_extract_v16i32_v2i64_0(<16 x i32> %a, <2 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: mask_cast_extract_v16i32_v2i64_0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpblendmq %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: vzeroupper
@@ -1376,7 +1376,7 @@ define <2 x i64> @mask_cast_extract_v16i32_v2i64_0(<16 x i32> %a, <2 x i64> %pas
define <2 x i64> @mask_cast_extract_v16i32_v2i64_0_z(<16 x i32> %a, i8 %mask) {
; CHECK-LABEL: mask_cast_extract_v16i32_v2i64_0_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: vzeroupper
@@ -1391,7 +1391,7 @@ define <2 x i64> @mask_cast_extract_v16i32_v2i64_0_z(<16 x i32> %a, i8 %mask) {
define <2 x i64> @mask_cast_extract_v16i32_v2i64_1(<16 x i32> %a, <2 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: mask_cast_extract_v16i32_v2i64_1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextracti64x2 $1, %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovdqa %xmm1, %xmm0
@@ -1407,7 +1407,7 @@ define <2 x i64> @mask_cast_extract_v16i32_v2i64_1(<16 x i32> %a, <2 x i64> %pas
define <2 x i64> @mask_cast_extract_v16i32_v2i64_1_z(<16 x i32> %a, i8 %mask) {
; CHECK-LABEL: mask_cast_extract_v16i32_v2i64_1_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextracti64x2 $1, %zmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: vzeroupper
@@ -1422,7 +1422,7 @@ define <2 x i64> @mask_cast_extract_v16i32_v2i64_1_z(<16 x i32> %a, i8 %mask) {
define <2 x double> @mask_cast_extract_v16f32_v2f64_0(<16 x float> %a, <2 x double> %passthru, i8 %mask) {
; CHECK-LABEL: mask_cast_extract_v16f32_v2f64_0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vblendmpd %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: vzeroupper
@@ -1437,7 +1437,7 @@ define <2 x double> @mask_cast_extract_v16f32_v2f64_0(<16 x float> %a, <2 x doub
define <2 x double> @mask_cast_extract_v16f32_v2f64_0_z(<16 x float> %a, i8 %mask) {
; CHECK-LABEL: mask_cast_extract_v16f32_v2f64_0_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vmovapd %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: vzeroupper
@@ -1452,7 +1452,7 @@ define <2 x double> @mask_cast_extract_v16f32_v2f64_0_z(<16 x float> %a, i8 %mas
define <2 x double> @mask_cast_extract_v16f32_v2f64_1(<16 x float> %a, <2 x double> %passthru, i8 %mask) {
; CHECK-LABEL: mask_cast_extract_v16f32_v2f64_1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextractf64x2 $1, %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovapd %xmm1, %xmm0
@@ -1468,7 +1468,7 @@ define <2 x double> @mask_cast_extract_v16f32_v2f64_1(<16 x float> %a, <2 x doub
define <2 x double> @mask_cast_extract_v16f32_v2f64_1_z(<16 x float> %a, i8 %mask) {
; CHECK-LABEL: mask_cast_extract_v16f32_v2f64_1_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vextractf64x2 $1, %zmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: vzeroupper
@@ -1483,7 +1483,7 @@ define <2 x double> @mask_cast_extract_v16f32_v2f64_1_z(<16 x float> %a, i8 %mas
define <2 x double> @broadcast_v4f32_0101_from_v2f32_mask(double* %x, <2 x double> %passthru, i8 %mask) {
; CHECK-LABEL: broadcast_v4f32_0101_from_v2f32_mask:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovddup {{.*#+}} xmm0 {%k1} = mem[0,0]
; CHECK-NEXT: retq
@@ -1498,7 +1498,7 @@ define <2 x double> @broadcast_v4f32_0101_from_v2f32_mask(double* %x, <2 x doubl
define <2 x double> @broadcast_v4f32_0101_from_v2f32_maskz(double* %x, i8 %mask) {
; CHECK-LABEL: broadcast_v4f32_0101_from_v2f32_maskz:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovddup {{.*#+}} xmm0 {%k1} {z} = mem[0,0]
; CHECK-NEXT: retq
@@ -1513,7 +1513,7 @@ define <2 x double> @broadcast_v4f32_0101_from_v2f32_maskz(double* %x, i8 %mask)
define <8 x float> @test_broadcast_2f64_8f32(<2 x double> *%p, i8 %mask) nounwind {
; CHECK-LABEL: test_broadcast_2f64_8f32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3]
; CHECK-NEXT: retq
@@ -1527,7 +1527,7 @@ define <8 x float> @test_broadcast_2f64_8f32(<2 x double> *%p, i8 %mask) nounwin
define <8 x i32> @test_broadcast_2i64_8i32(<2 x i64> *%p, i8 %mask) nounwind {
; CHECK-LABEL: test_broadcast_2i64_8i32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3]
; CHECK-NEXT: retq
@@ -1541,7 +1541,7 @@ define <8 x i32> @test_broadcast_2i64_8i32(<2 x i64> *%p, i8 %mask) nounwind {
define <16 x float> @test_broadcast_2f64_16f32(<2 x double> *%p, i16 %mask) nounwind {
; CHECK-LABEL: test_broadcast_2f64_16f32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vbroadcastf32x4 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; CHECK-NEXT: retq
@@ -1555,7 +1555,7 @@ define <16 x float> @test_broadcast_2f64_16f32(<2 x double> *%p, i16 %mask) noun
define <16 x i32> @test_broadcast_2i64_16i32(<2 x i64> *%p, i16 %mask) nounwind {
; CHECK-LABEL: test_broadcast_2i64_16i32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; CHECK-NEXT: retq
@@ -1569,7 +1569,7 @@ define <16 x i32> @test_broadcast_2i64_16i32(<2 x i64> *%p, i16 %mask) nounwind
define <16 x float> @test_broadcast_4f64_16f32(<4 x double> *%p, i16 %mask) nounwind {
; CHECK-LABEL: test_broadcast_4f64_16f32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vbroadcastf32x8 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
; CHECK-NEXT: retq
@@ -1583,7 +1583,7 @@ define <16 x float> @test_broadcast_4f64_16f32(<4 x double> *%p, i16 %mask) noun
define <16 x i32> @test_broadcast_4i64_16i32(<4 x i64> *%p, i16 %mask) nounwind {
; CHECK-LABEL: test_broadcast_4i64_16i32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vbroadcasti32x8 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
; CHECK-NEXT: retq
@@ -1597,7 +1597,7 @@ define <16 x i32> @test_broadcast_4i64_16i32(<4 x i64> *%p, i16 %mask) nounwind
define <4 x double> @test_broadcast_4f32_4f64(<4 x float> *%p, i8 %mask) nounwind {
; CHECK-LABEL: test_broadcast_4f32_4f64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vbroadcastf64x2 {{.*#+}} ymm0 {%k1} {z} = mem[0,1,0,1]
; CHECK-NEXT: retq
@@ -1612,7 +1612,7 @@ define <4 x double> @test_broadcast_4f32_4f64(<4 x float> *%p, i8 %mask) nounwin
define <4 x i64> @test_broadcast_4i32_4i64(<4 x i32> *%p, i8 %mask) nounwind {
; CHECK-LABEL: test_broadcast_4i32_4i64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vbroadcasti64x2 {{.*#+}} ymm0 {%k1} {z} = mem[0,1,0,1]
; CHECK-NEXT: retq
@@ -1627,7 +1627,7 @@ define <4 x i64> @test_broadcast_4i32_4i64(<4 x i32> *%p, i8 %mask) nounwind {
define <8 x double> @test_broadcast_4f32_8f64(<4 x float> *%p, i8 %mask) nounwind {
; CHECK-LABEL: test_broadcast_4f32_8f64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vbroadcastf64x2 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,0,1,0,1,0,1]
; CHECK-NEXT: retq
@@ -1641,7 +1641,7 @@ define <8 x double> @test_broadcast_4f32_8f64(<4 x float> *%p, i8 %mask) nounwin
define <8 x i64> @test_broadcast_4i32_8i64(<4 x i32> *%p, i8 %mask) nounwind {
; CHECK-LABEL: test_broadcast_4i32_8i64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vbroadcasti64x2 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,0,1,0,1,0,1]
; CHECK-NEXT: retq
@@ -1655,7 +1655,7 @@ define <8 x i64> @test_broadcast_4i32_8i64(<4 x i32> *%p, i8 %mask) nounwind {
define <8 x double> @test_broadcast_8f32_8f64(<8 x float> *%p, i8 %mask) nounwind {
; CHECK-LABEL: test_broadcast_8f32_8f64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vbroadcastf64x4 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3]
; CHECK-NEXT: retq
@@ -1669,7 +1669,7 @@ define <8 x double> @test_broadcast_8f32_8f64(<8 x float> *%p, i8 %mask) nounwin
define <8 x i64> @test_broadcast_8i32_8i64(<8 x i32> *%p, i8 %mask) nounwind {
; CHECK-LABEL: test_broadcast_8i32_8i64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3]
; CHECK-NEXT: retq
@@ -1683,7 +1683,7 @@ define <8 x i64> @test_broadcast_8i32_8i64(<8 x i32> *%p, i8 %mask) nounwind {
define <4 x float> @test_broadcastf32x2_v4f32(<4 x float> %vec, <4 x float> %passthru, i8 %mask) {
; CHECK-LABEL: test_broadcastf32x2_v4f32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vblendmps %xmm0, %xmm1, %xmm0 {%k1}
@@ -1697,7 +1697,7 @@ define <4 x float> @test_broadcastf32x2_v4f32(<4 x float> %vec, <4 x float> %pas
define <4 x float> @test_broadcastf32x2_v4f32_z(<4 x float> %vec, i8 %mask) {
; CHECK-LABEL: test_broadcastf32x2_v4f32_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vmovaps %xmm0, %xmm0 {%k1} {z}
@@ -1711,7 +1711,7 @@ define <4 x float> @test_broadcastf32x2_v4f32_z(<4 x float> %vec, i8 %mask) {
define <4 x i32> @test_broadcasti32x2_v4i32(<4 x i32> %vec, <4 x i32> %passthru, i8 %mask) {
; CHECK-LABEL: test_broadcasti32x2_v4i32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} xmm1 {%k1} = xmm0[0,1,0,1]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0
@@ -1725,7 +1725,7 @@ define <4 x i32> @test_broadcasti32x2_v4i32(<4 x i32> %vec, <4 x i32> %passthru,
define <4 x i32> @test_broadcasti32x2_v4i32_z(<4 x i32> %vec, i8 %mask) {
; CHECK-LABEL: test_broadcasti32x2_v4i32_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} xmm0 {%k1} {z} = xmm0[0,1,0,1]
; CHECK-NEXT: retq
@@ -1738,7 +1738,7 @@ define <4 x i32> @test_broadcasti32x2_v4i32_z(<4 x i32> %vec, i8 %mask) {
define <8 x float> @test_broadcastf32x2_v8f32(<8 x float> %vec, <8 x float> %passthru, i8 %mask) {
; CHECK-LABEL: test_broadcastf32x2_v8f32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} ymm1 {%k1} = xmm0[0,1,0,1,0,1,0,1]
; CHECK-NEXT: vmovapd %ymm1, %ymm0
@@ -1751,7 +1751,7 @@ define <8 x float> @test_broadcastf32x2_v8f32(<8 x float> %vec, <8 x float> %pas
define <8 x float> @test_broadcastf32x2_v8f32_z(<8 x float> %vec, i8 %mask) {
; CHECK-LABEL: test_broadcastf32x2_v8f32_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} ymm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1]
; CHECK-NEXT: retq
@@ -1763,7 +1763,7 @@ define <8 x float> @test_broadcastf32x2_v8f32_z(<8 x float> %vec, i8 %mask) {
define <8 x i32> @test_broadcasti32x2_v8i32(<8 x i32> %vec, <8 x i32> %passthru, i8 %mask) {
; CHECK-LABEL: test_broadcasti32x2_v8i32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} ymm1 {%k1} = xmm0[0,1,0,1,0,1,0,1]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0
@@ -1776,7 +1776,7 @@ define <8 x i32> @test_broadcasti32x2_v8i32(<8 x i32> %vec, <8 x i32> %passthru,
define <8 x i32> @test_broadcasti32x2_v8i32_z(<8 x i32> %vec, i8 %mask) {
; CHECK-LABEL: test_broadcasti32x2_v8i32_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} ymm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1]
; CHECK-NEXT: retq
@@ -1788,7 +1788,7 @@ define <8 x i32> @test_broadcasti32x2_v8i32_z(<8 x i32> %vec, i8 %mask) {
define <16 x float> @test_broadcastf32x2_v16f32_z(<16 x float> %vec, i16 %mask) {
; CHECK-LABEL: test_broadcastf32x2_v16f32_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} zmm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
; CHECK-NEXT: retq
@@ -1800,7 +1800,7 @@ define <16 x float> @test_broadcastf32x2_v16f32_z(<16 x float> %vec, i16 %mask)
define <16 x i32> @test_broadcasti32x2_v16i32(<16 x i32> %vec, <16 x i32> %passthru, i16 %mask) {
; CHECK-LABEL: test_broadcasti32x2_v16i32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} zmm1 {%k1} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -1813,7 +1813,7 @@ define <16 x i32> @test_broadcasti32x2_v16i32(<16 x i32> %vec, <16 x i32> %passt
define <16 x float> @test_broadcastf32x2_v16f32(<16 x float> %vec, <16 x float> %passthru, i16 %mask) {
; CHECK-LABEL: test_broadcastf32x2_v16f32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} zmm1 {%k1} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
; CHECK-NEXT: vmovapd %zmm1, %zmm0
@@ -1826,7 +1826,7 @@ define <16 x float> @test_broadcastf32x2_v16f32(<16 x float> %vec, <16 x float>
define <16 x i32> @test_broadcasti32x2_v16i32_z(<16 x i32> %vec, i16 %mask) {
; CHECK-LABEL: test_broadcasti32x2_v16i32_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} zmm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
; CHECK-NEXT: retq
@@ -1838,7 +1838,7 @@ define <16 x i32> @test_broadcasti32x2_v16i32_z(<16 x i32> %vec, i16 %mask) {
define <16 x i8> @mask_shuffle_v16i8_1_2_3_4_5_6_7_8_9_10_11_12_13_14_15_16(<16 x i8> %a, <16 x i8> %b, <16 x i8> %passthru, i16 %mask) {
; CHECK-LABEL: mask_shuffle_v16i8_1_2_3_4_5_6_7_8_9_10_11_12_13_14_15_16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpalignr {{.*#+}} xmm2 {%k1} = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm1[0]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0
@@ -1851,7 +1851,7 @@ define <16 x i8> @mask_shuffle_v16i8_1_2_3_4_5_6_7_8_9_10_11_12_13_14_15_16(<16
define <16 x i8> @maskz_shuffle_v16i8_1_2_3_4_5_6_7_8_9_10_11_12_13_14_15_16(<16 x i8> %a, <16 x i8> %b, i16 %mask) {
; CHECK-LABEL: maskz_shuffle_v16i8_1_2_3_4_5_6_7_8_9_10_11_12_13_14_15_16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpalignr {{.*#+}} xmm0 {%k1} {z} = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm1[0]
; CHECK-NEXT: retq
@@ -1863,7 +1863,7 @@ define <16 x i8> @maskz_shuffle_v16i8_1_2_3_4_5_6_7_8_9_10_11_12_13_14_15_16(<16
define <16 x i8> @mask_shuffle_v16i8_4_5_6_7_8_9_10_11_12_13_14_15_16_17_18_19(<16 x i8> %a, <16 x i8> %b, <16 x i8> %passthru, i16 %mask) {
; CHECK-LABEL: mask_shuffle_v16i8_4_5_6_7_8_9_10_11_12_13_14_15_16_17_18_19:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpalignr {{.*#+}} xmm2 {%k1} = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0
@@ -1876,7 +1876,7 @@ define <16 x i8> @mask_shuffle_v16i8_4_5_6_7_8_9_10_11_12_13_14_15_16_17_18_19(<
define <16 x i8> @maskz_shuffle_v16i8_4_5_6_7_8_9_10_11_12_13_14_15_16_17_18_19(<16 x i8> %a, <16 x i8> %b, i16 %mask) {
; CHECK-LABEL: maskz_shuffle_v16i8_4_5_6_7_8_9_10_11_12_13_14_15_16_17_18_19:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpalignr {{.*#+}} xmm0 {%k1} {z} = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3]
; CHECK-NEXT: retq
@@ -1888,7 +1888,7 @@ define <16 x i8> @maskz_shuffle_v16i8_4_5_6_7_8_9_10_11_12_13_14_15_16_17_18_19(
define <16 x i8> @mask_shuffle_v16i8_8_9_10_11_12_13_14_15_16_17_18_19_20_21_22_23(<16 x i8> %a, <16 x i8> %b, <16 x i8> %passthru, i16 %mask) {
; CHECK-LABEL: mask_shuffle_v16i8_8_9_10_11_12_13_14_15_16_17_18_19_20_21_22_23:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpalignr {{.*#+}} xmm2 {%k1} = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0
@@ -1901,7 +1901,7 @@ define <16 x i8> @mask_shuffle_v16i8_8_9_10_11_12_13_14_15_16_17_18_19_20_21_22_
define <16 x i8> @maskz_shuffle_v16i8_8_9_10_11_12_13_14_15_16_17_18_19_20_21_22_23(<16 x i8> %a, <16 x i8> %b, i16 %mask) {
; CHECK-LABEL: maskz_shuffle_v16i8_8_9_10_11_12_13_14_15_16_17_18_19_20_21_22_23:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpalignr {{.*#+}} xmm0 {%k1} {z} = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/vector-shuffle-mmx.ll b/test/CodeGen/X86/vector-shuffle-mmx.ll
index 7a0814b0eb3..c235e83a0d5 100644
--- a/test/CodeGen/X86/vector-shuffle-mmx.ll
+++ b/test/CodeGen/X86/vector-shuffle-mmx.ll
@@ -6,7 +6,7 @@
define void @test0(<1 x i64>* %x) {
; X32-LABEL: test0:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3]
@@ -14,7 +14,7 @@ define void @test0(<1 x i64>* %x) {
; X32-NEXT: retl
;
; X64-LABEL: test0:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; X64-NEXT: movq %xmm0, (%rdi)
@@ -30,7 +30,7 @@ entry:
define void @test1() {
; X32-LABEL: test1:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: pushl %edi
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: subl $16, %esp
@@ -49,7 +49,7 @@ define void @test1() {
; X32-NEXT: retl
;
; X64-LABEL: test1:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: xorps %xmm0, %xmm0
; X64-NEXT: movlps %xmm0, -{{[0-9]+}}(%rsp)
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %mm0
@@ -75,7 +75,7 @@ entry:
define void @test2() nounwind {
; X32-LABEL: test2:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl L_tmp_V2i$non_lazy_ptr, %eax
; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0,0,1,1]
@@ -83,7 +83,7 @@ define void @test2() nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test2:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: movq _tmp_V2i@{{.*}}(%rip), %rax
; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
diff --git a/test/CodeGen/X86/vector-shuffle-sse1.ll b/test/CodeGen/X86/vector-shuffle-sse1.ll
index cf8e8eb8a12..d3597564afd 100644
--- a/test/CodeGen/X86/vector-shuffle-sse1.ll
+++ b/test/CodeGen/X86/vector-shuffle-sse1.ll
@@ -3,7 +3,7 @@
define <4 x float> @shuffle_v4f32_0001(<4 x float> %a, <4 x float> %b) {
; SSE1-LABEL: shuffle_v4f32_0001:
-; SSE1: # BB#0:
+; SSE1: # %bb.0:
; SSE1-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0,0,1]
; SSE1-NEXT: retq
%shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 0, i32 0, i32 1>
@@ -12,7 +12,7 @@ define <4 x float> @shuffle_v4f32_0001(<4 x float> %a, <4 x float> %b) {
define <4 x float> @shuffle_v4f32_0020(<4 x float> %a, <4 x float> %b) {
; SSE1-LABEL: shuffle_v4f32_0020:
-; SSE1: # BB#0:
+; SSE1: # %bb.0:
; SSE1-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0,2,0]
; SSE1-NEXT: retq
%shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 0, i32 2, i32 0>
@@ -21,7 +21,7 @@ define <4 x float> @shuffle_v4f32_0020(<4 x float> %a, <4 x float> %b) {
define <4 x float> @shuffle_v4f32_0300(<4 x float> %a, <4 x float> %b) {
; SSE1-LABEL: shuffle_v4f32_0300:
-; SSE1: # BB#0:
+; SSE1: # %bb.0:
; SSE1-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3,0,0]
; SSE1-NEXT: retq
%shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 3, i32 0, i32 0>
@@ -30,7 +30,7 @@ define <4 x float> @shuffle_v4f32_0300(<4 x float> %a, <4 x float> %b) {
define <4 x float> @shuffle_v4f32_1000(<4 x float> %a, <4 x float> %b) {
; SSE1-LABEL: shuffle_v4f32_1000:
-; SSE1: # BB#0:
+; SSE1: # %bb.0:
; SSE1-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0,0,0]
; SSE1-NEXT: retq
%shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 1, i32 0, i32 0, i32 0>
@@ -39,7 +39,7 @@ define <4 x float> @shuffle_v4f32_1000(<4 x float> %a, <4 x float> %b) {
define <4 x float> @shuffle_v4f32_2200(<4 x float> %a, <4 x float> %b) {
; SSE1-LABEL: shuffle_v4f32_2200:
-; SSE1: # BB#0:
+; SSE1: # %bb.0:
; SSE1-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,2,0,0]
; SSE1-NEXT: retq
%shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 2, i32 2, i32 0, i32 0>
@@ -48,7 +48,7 @@ define <4 x float> @shuffle_v4f32_2200(<4 x float> %a, <4 x float> %b) {
define <4 x float> @shuffle_v4f32_3330(<4 x float> %a, <4 x float> %b) {
; SSE1-LABEL: shuffle_v4f32_3330:
-; SSE1: # BB#0:
+; SSE1: # %bb.0:
; SSE1-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,0]
; SSE1-NEXT: retq
%shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 3, i32 3, i32 3, i32 0>
@@ -57,7 +57,7 @@ define <4 x float> @shuffle_v4f32_3330(<4 x float> %a, <4 x float> %b) {
define <4 x float> @shuffle_v4f32_3210(<4 x float> %a, <4 x float> %b) {
; SSE1-LABEL: shuffle_v4f32_3210:
-; SSE1: # BB#0:
+; SSE1: # %bb.0:
; SSE1-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,2,1,0]
; SSE1-NEXT: retq
%shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
@@ -66,7 +66,7 @@ define <4 x float> @shuffle_v4f32_3210(<4 x float> %a, <4 x float> %b) {
define <4 x float> @shuffle_v4f32_0011(<4 x float> %a, <4 x float> %b) {
; SSE1-LABEL: shuffle_v4f32_0011:
-; SSE1: # BB#0:
+; SSE1: # %bb.0:
; SSE1-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0,0,1,1]
; SSE1-NEXT: retq
%shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 0, i32 1, i32 1>
@@ -75,7 +75,7 @@ define <4 x float> @shuffle_v4f32_0011(<4 x float> %a, <4 x float> %b) {
define <4 x float> @shuffle_v4f32_2233(<4 x float> %a, <4 x float> %b) {
; SSE1-LABEL: shuffle_v4f32_2233:
-; SSE1: # BB#0:
+; SSE1: # %bb.0:
; SSE1-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2,2,3,3]
; SSE1-NEXT: retq
%shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 2, i32 2, i32 3, i32 3>
@@ -84,7 +84,7 @@ define <4 x float> @shuffle_v4f32_2233(<4 x float> %a, <4 x float> %b) {
define <4 x float> @shuffle_v4f32_0022(<4 x float> %a, <4 x float> %b) {
; SSE1-LABEL: shuffle_v4f32_0022:
-; SSE1: # BB#0:
+; SSE1: # %bb.0:
; SSE1-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0,2,2]
; SSE1-NEXT: retq
%shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
@@ -93,7 +93,7 @@ define <4 x float> @shuffle_v4f32_0022(<4 x float> %a, <4 x float> %b) {
define <4 x float> @shuffle_v4f32_1133(<4 x float> %a, <4 x float> %b) {
; SSE1-LABEL: shuffle_v4f32_1133:
-; SSE1: # BB#0:
+; SSE1: # %bb.0:
; SSE1-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,3,3]
; SSE1-NEXT: retq
%shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
@@ -102,7 +102,7 @@ define <4 x float> @shuffle_v4f32_1133(<4 x float> %a, <4 x float> %b) {
define <4 x float> @shuffle_v4f32_0145(<4 x float> %a, <4 x float> %b) {
; SSE1-LABEL: shuffle_v4f32_0145:
-; SSE1: # BB#0:
+; SSE1: # %bb.0:
; SSE1-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE1-NEXT: retq
%shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
@@ -111,7 +111,7 @@ define <4 x float> @shuffle_v4f32_0145(<4 x float> %a, <4 x float> %b) {
define <4 x float> @shuffle_v4f32_0101(<4 x float> %a, <4 x float> %b) {
; SSE1-LABEL: shuffle_v4f32_0101:
-; SSE1: # BB#0:
+; SSE1: # %bb.0:
; SSE1-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
; SSE1-NEXT: retq
%shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
@@ -120,7 +120,7 @@ define <4 x float> @shuffle_v4f32_0101(<4 x float> %a, <4 x float> %b) {
define <4 x float> @shuffle_v4f32_2323(<4 x float> %a, <4 x float> %b) {
; SSE1-LABEL: shuffle_v4f32_2323:
-; SSE1: # BB#0:
+; SSE1: # %bb.0:
; SSE1-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSE1-NEXT: retq
%shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 2, i32 3, i32 2, i32 3>
@@ -129,7 +129,7 @@ define <4 x float> @shuffle_v4f32_2323(<4 x float> %a, <4 x float> %b) {
define <4 x float> @shuffle_v4f32_6723(<4 x float> %a, <4 x float> %b) {
; SSE1-LABEL: shuffle_v4f32_6723:
-; SSE1: # BB#0:
+; SSE1: # %bb.0:
; SSE1-NEXT: movhlps {{.*#+}} xmm0 = xmm1[1],xmm0[1]
; SSE1-NEXT: retq
%shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 6, i32 7, i32 2, i32 3>
@@ -138,7 +138,7 @@ define <4 x float> @shuffle_v4f32_6723(<4 x float> %a, <4 x float> %b) {
define <4 x float> @shuffle_v4f32_4zzz(<4 x float> %a) {
; SSE1-LABEL: shuffle_v4f32_4zzz:
-; SSE1: # BB#0:
+; SSE1: # %bb.0:
; SSE1-NEXT: xorps %xmm1, %xmm1
; SSE1-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE1-NEXT: movaps %xmm1, %xmm0
@@ -149,7 +149,7 @@ define <4 x float> @shuffle_v4f32_4zzz(<4 x float> %a) {
define <4 x float> @shuffle_v4f32_z4zz(<4 x float> %a) {
; SSE1-LABEL: shuffle_v4f32_z4zz:
-; SSE1: # BB#0:
+; SSE1: # %bb.0:
; SSE1-NEXT: xorps %xmm1, %xmm1
; SSE1-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0]
; SSE1-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
@@ -160,7 +160,7 @@ define <4 x float> @shuffle_v4f32_z4zz(<4 x float> %a) {
define <4 x float> @shuffle_v4f32_zz4z(<4 x float> %a) {
; SSE1-LABEL: shuffle_v4f32_zz4z:
-; SSE1: # BB#0:
+; SSE1: # %bb.0:
; SSE1-NEXT: xorps %xmm1, %xmm1
; SSE1-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[3,0]
; SSE1-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[0,2]
@@ -172,7 +172,7 @@ define <4 x float> @shuffle_v4f32_zz4z(<4 x float> %a) {
define <4 x float> @shuffle_v4f32_zuu4(<4 x float> %a) {
; SSE1-LABEL: shuffle_v4f32_zuu4:
-; SSE1: # BB#0:
+; SSE1: # %bb.0:
; SSE1-NEXT: xorps %xmm1, %xmm1
; SSE1-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
; SSE1-NEXT: movaps %xmm1, %xmm0
@@ -183,7 +183,7 @@ define <4 x float> @shuffle_v4f32_zuu4(<4 x float> %a) {
define <4 x float> @shuffle_v4f32_zzz7(<4 x float> %a) {
; SSE1-LABEL: shuffle_v4f32_zzz7:
-; SSE1: # BB#0:
+; SSE1: # %bb.0:
; SSE1-NEXT: xorps %xmm1, %xmm1
; SSE1-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,0],xmm1[2,0]
; SSE1-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
@@ -195,7 +195,7 @@ define <4 x float> @shuffle_v4f32_zzz7(<4 x float> %a) {
define <4 x float> @shuffle_v4f32_z6zz(<4 x float> %a) {
; SSE1-LABEL: shuffle_v4f32_z6zz:
-; SSE1: # BB#0:
+; SSE1: # %bb.0:
; SSE1-NEXT: xorps %xmm1, %xmm1
; SSE1-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[0,0]
; SSE1-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
@@ -206,7 +206,7 @@ define <4 x float> @shuffle_v4f32_z6zz(<4 x float> %a) {
define <4 x float> @insert_reg_and_zero_v4f32(float %a) {
; SSE1-LABEL: insert_reg_and_zero_v4f32:
-; SSE1: # BB#0:
+; SSE1: # %bb.0:
; SSE1-NEXT: xorps %xmm1, %xmm1
; SSE1-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE1-NEXT: movaps %xmm1, %xmm0
@@ -218,7 +218,7 @@ define <4 x float> @insert_reg_and_zero_v4f32(float %a) {
define <4 x float> @insert_mem_and_zero_v4f32(float* %ptr) {
; SSE1-LABEL: insert_mem_and_zero_v4f32:
-; SSE1: # BB#0:
+; SSE1: # %bb.0:
; SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE1-NEXT: retq
%a = load float, float* %ptr
@@ -229,7 +229,7 @@ define <4 x float> @insert_mem_and_zero_v4f32(float* %ptr) {
define <4 x float> @insert_mem_lo_v4f32(<2 x float>* %ptr, <4 x float> %b) {
; SSE1-LABEL: insert_mem_lo_v4f32:
-; SSE1: # BB#0:
+; SSE1: # %bb.0:
; SSE1-NEXT: movq (%rdi), %rax
; SSE1-NEXT: movl %eax, -{{[0-9]+}}(%rsp)
; SSE1-NEXT: shrq $32, %rax
@@ -250,7 +250,7 @@ define <4 x float> @insert_mem_lo_v4f32(<2 x float>* %ptr, <4 x float> %b) {
define <4 x float> @insert_mem_hi_v4f32(<2 x float>* %ptr, <4 x float> %b) {
; SSE1-LABEL: insert_mem_hi_v4f32:
-; SSE1: # BB#0:
+; SSE1: # %bb.0:
; SSE1-NEXT: movq (%rdi), %rax
; SSE1-NEXT: movl %eax, -{{[0-9]+}}(%rsp)
; SSE1-NEXT: shrq $32, %rax
@@ -270,7 +270,7 @@ define <4 x float> @insert_mem_hi_v4f32(<2 x float>* %ptr, <4 x float> %b) {
define <4 x float> @shuffle_mem_v4f32_3210(<4 x float>* %ptr) {
; SSE1-LABEL: shuffle_mem_v4f32_3210:
-; SSE1: # BB#0:
+; SSE1: # %bb.0:
; SSE1-NEXT: movaps (%rdi), %xmm0
; SSE1-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,2,1,0]
; SSE1-NEXT: retq
@@ -281,7 +281,7 @@ define <4 x float> @shuffle_mem_v4f32_3210(<4 x float>* %ptr) {
define <4 x float> @shuffle_mem_v4f32_0145(<4 x float> %a, <4 x float>* %pb) {
; SSE1-LABEL: shuffle_mem_v4f32_0145:
-; SSE1: # BB#0:
+; SSE1: # %bb.0:
; SSE1-NEXT: movhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1]
; SSE1-NEXT: retq
%b = load <4 x float>, <4 x float>* %pb, align 16
@@ -291,7 +291,7 @@ define <4 x float> @shuffle_mem_v4f32_0145(<4 x float> %a, <4 x float>* %pb) {
define <4 x float> @shuffle_mem_v4f32_6723(<4 x float> %a, <4 x float>* %pb) {
; SSE1-LABEL: shuffle_mem_v4f32_6723:
-; SSE1: # BB#0:
+; SSE1: # %bb.0:
; SSE1-NEXT: movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3]
; SSE1-NEXT: retq
%b = load <4 x float>, <4 x float>* %pb, align 16
diff --git a/test/CodeGen/X86/vector-shuffle-sse41.ll b/test/CodeGen/X86/vector-shuffle-sse41.ll
index be9a4b95077..bcf706fc06f 100644
--- a/test/CodeGen/X86/vector-shuffle-sse41.ll
+++ b/test/CodeGen/X86/vector-shuffle-sse41.ll
@@ -4,12 +4,12 @@
define <8 x i16> @blend_packusdw(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2, <4 x i32> %a3) {
; SSE41-LABEL: blend_packusdw:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: packusdw %xmm2, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: blend_packusdw:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
; AVX-NEXT: retq
%p0 = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> %a0, <4 x i32> %a1)
@@ -20,12 +20,12 @@ define <8 x i16> @blend_packusdw(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2, <4
define <16 x i8> @blend_packuswb(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> %a2, <8 x i16> %a3) {
; SSE41-LABEL: blend_packuswb:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: packuswb %xmm2, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: blend_packuswb:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
; AVX-NEXT: retq
%p0 = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> %a0, <8 x i16> %a1)
@@ -36,14 +36,14 @@ define <16 x i8> @blend_packuswb(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> %a2, <8
define <8 x i16> @blend_packusdw_packuswb(<4 x i32> %a0, <4 x i32> %a1, <8 x i16> %a2, <8 x i16> %a3) {
; SSE41-LABEL: blend_packusdw_packuswb:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: packusdw %xmm1, %xmm0
; SSE41-NEXT: packuswb %xmm3, %xmm2
; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE41-NEXT: retq
;
; AVX-LABEL: blend_packusdw_packuswb:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpackuswb %xmm3, %xmm2, %xmm1
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
diff --git a/test/CodeGen/X86/vector-shuffle-sse4a.ll b/test/CodeGen/X86/vector-shuffle-sse4a.ll
index 64cc9af5a2a..501d9157260 100644
--- a/test/CodeGen/X86/vector-shuffle-sse4a.ll
+++ b/test/CodeGen/X86/vector-shuffle-sse4a.ll
@@ -10,7 +10,7 @@
; A length of zero is equivalent to a bit length of 64.
define <2 x i64> @extrqi_len0_idx0(<2 x i64> %a) {
; ALL-LABEL: extrqi_len0_idx0:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: retq
%1 = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> %a, i8 0, i8 0)
ret <2 x i64> %1
@@ -18,7 +18,7 @@ define <2 x i64> @extrqi_len0_idx0(<2 x i64> %a) {
define <2 x i64> @extrqi_len8_idx16(<2 x i64> %a) {
; ALL-LABEL: extrqi_len8_idx16:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: extrq {{.*#+}} xmm0 = xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
; ALL-NEXT: retq
%1 = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> %a, i8 8, i8 16)
@@ -28,7 +28,7 @@ define <2 x i64> @extrqi_len8_idx16(<2 x i64> %a) {
; If the length + index exceeds the bottom 64 bits the result is undefined.
define <2 x i64> @extrqi_len32_idx48(<2 x i64> %a) {
; ALL-LABEL: extrqi_len32_idx48:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: extrq {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; ALL-NEXT: retq
%1 = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> %a, i8 32, i8 48)
@@ -37,17 +37,17 @@ define <2 x i64> @extrqi_len32_idx48(<2 x i64> %a) {
define <16 x i8> @shuf_0zzzuuuuuuuuuuuu(<16 x i8> %a0) {
; AMD10H-LABEL: shuf_0zzzuuuuuuuuuuuu:
-; AMD10H: # BB#0:
+; AMD10H: # %bb.0:
; AMD10H-NEXT: extrq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
; AMD10H-NEXT: retq
;
; BTVER1-LABEL: shuf_0zzzuuuuuuuuuuuu:
-; BTVER1: # BB#0:
+; BTVER1: # %bb.0:
; BTVER1-NEXT: extrq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
; BTVER1-NEXT: retq
;
; BTVER2-LABEL: shuf_0zzzuuuuuuuuuuuu:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; BTVER2-NEXT: retq
%s = shufflevector <16 x i8> %a0, <16 x i8> zeroinitializer, <16 x i32> <i32 0, i32 16, i32 16, i32 16, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -56,7 +56,7 @@ define <16 x i8> @shuf_0zzzuuuuuuuuuuuu(<16 x i8> %a0) {
define <16 x i8> @shuf_0zzzzzzz1zzzzzzz(<16 x i8> %a0) {
; AMD10H-LABEL: shuf_0zzzzzzz1zzzzzzz:
-; AMD10H: # BB#0:
+; AMD10H: # %bb.0:
; AMD10H-NEXT: movdqa %xmm0, %xmm1
; AMD10H-NEXT: extrq {{.*#+}} xmm1 = xmm1[1],zero,zero,zero,zero,zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
; AMD10H-NEXT: extrq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
@@ -64,12 +64,12 @@ define <16 x i8> @shuf_0zzzzzzz1zzzzzzz(<16 x i8> %a0) {
; AMD10H-NEXT: retq
;
; BTVER1-LABEL: shuf_0zzzzzzz1zzzzzzz:
-; BTVER1: # BB#0:
+; BTVER1: # %bb.0:
; BTVER1-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; BTVER1-NEXT: retq
;
; BTVER2-LABEL: shuf_0zzzzzzz1zzzzzzz:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; BTVER2-NEXT: retq
%s = shufflevector <16 x i8> %a0, <16 x i8> zeroinitializer, <16 x i32> <i32 0, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 1, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
@@ -78,7 +78,7 @@ define <16 x i8> @shuf_0zzzzzzz1zzzzzzz(<16 x i8> %a0) {
define <16 x i8> @shuf_2zzzzzzz3zzzzzzz(<16 x i8> %a0) {
; AMD10H-LABEL: shuf_2zzzzzzz3zzzzzzz:
-; AMD10H: # BB#0:
+; AMD10H: # %bb.0:
; AMD10H-NEXT: movdqa %xmm0, %xmm1
; AMD10H-NEXT: extrq {{.*#+}} xmm1 = xmm1[3],zero,zero,zero,zero,zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
; AMD10H-NEXT: extrq {{.*#+}} xmm0 = xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
@@ -86,12 +86,12 @@ define <16 x i8> @shuf_2zzzzzzz3zzzzzzz(<16 x i8> %a0) {
; AMD10H-NEXT: retq
;
; BTVER1-LABEL: shuf_2zzzzzzz3zzzzzzz:
-; BTVER1: # BB#0:
+; BTVER1: # %bb.0:
; BTVER1-NEXT: pshufb {{.*#+}} xmm0 = xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
; BTVER1-NEXT: retq
;
; BTVER2-LABEL: shuf_2zzzzzzz3zzzzzzz:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpsrld $16, %xmm0, %xmm0
; BTVER2-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; BTVER2-NEXT: retq
@@ -101,17 +101,17 @@ define <16 x i8> @shuf_2zzzzzzz3zzzzzzz(<16 x i8> %a0) {
define <16 x i8> @shuf_01zzuuuuuuuuuuuu(<16 x i8> %a0) {
; AMD10H-LABEL: shuf_01zzuuuuuuuuuuuu:
-; AMD10H: # BB#0:
+; AMD10H: # %bb.0:
; AMD10H-NEXT: extrq {{.*#+}} xmm0 = xmm0[0,1],zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
; AMD10H-NEXT: retq
;
; BTVER1-LABEL: shuf_01zzuuuuuuuuuuuu:
-; BTVER1: # BB#0:
+; BTVER1: # %bb.0:
; BTVER1-NEXT: extrq {{.*#+}} xmm0 = xmm0[0,1],zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
; BTVER1-NEXT: retq
;
; BTVER2-LABEL: shuf_01zzuuuuuuuuuuuu:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; BTVER2-NEXT: retq
%s = shufflevector <16 x i8> %a0, <16 x i8> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 16, i32 16, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -120,7 +120,7 @@ define <16 x i8> @shuf_01zzuuuuuuuuuuuu(<16 x i8> %a0) {
define <16 x i8> @shuf_01zzzzzz23zzzzzz(<16 x i8> %a0) {
; AMD10H-LABEL: shuf_01zzzzzz23zzzzzz:
-; AMD10H: # BB#0:
+; AMD10H: # %bb.0:
; AMD10H-NEXT: movdqa %xmm0, %xmm1
; AMD10H-NEXT: extrq {{.*#+}} xmm1 = xmm1[2,3],zero,zero,zero,zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
; AMD10H-NEXT: extrq {{.*#+}} xmm0 = xmm0[0,1],zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
@@ -128,12 +128,12 @@ define <16 x i8> @shuf_01zzzzzz23zzzzzz(<16 x i8> %a0) {
; AMD10H-NEXT: retq
;
; BTVER1-LABEL: shuf_01zzzzzz23zzzzzz:
-; BTVER1: # BB#0:
+; BTVER1: # %bb.0:
; BTVER1-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1],zero,zero,zero,zero,zero,zero,xmm0[2,3],zero,zero,zero,zero,zero,zero
; BTVER1-NEXT: retq
;
; BTVER2-LABEL: shuf_01zzzzzz23zzzzzz:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; BTVER2-NEXT: retq
%s = shufflevector <16 x i8> %a0, <16 x i8> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 2, i32 3, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
@@ -142,7 +142,7 @@ define <16 x i8> @shuf_01zzzzzz23zzzzzz(<16 x i8> %a0) {
define <16 x i8> @shuf_1zzzuuuuuuuuuuuu(<16 x i8> %a0) {
; ALL-LABEL: shuf_1zzzuuuuuuuuuuuu:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: extrq {{.*#+}} xmm0 = xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
; ALL-NEXT: retq
%s = shufflevector <16 x i8> %a0, <16 x i8> zeroinitializer, <16 x i32> <i32 1, i32 16, i32 16, i32 16, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -151,7 +151,7 @@ define <16 x i8> @shuf_1zzzuuuuuuuuuuuu(<16 x i8> %a0) {
define <8 x i16> @shuf_1zzzuuuu(<8 x i16> %a0) {
; ALL-LABEL: shuf_1zzzuuuu:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: extrq {{.*#+}} xmm0 = xmm0[2,3],zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
; ALL-NEXT: retq
%s = shufflevector <8 x i16> %a0, <8 x i16> zeroinitializer, <8 x i32> <i32 1, i32 8, i32 8, i32 8, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -160,7 +160,7 @@ define <8 x i16> @shuf_1zzzuuuu(<8 x i16> %a0) {
define <8 x i16> @shuf_12zzuuuu(<8 x i16> %a0) {
; ALL-LABEL: shuf_12zzuuuu:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: extrq {{.*#+}} xmm0 = xmm0[2,3,4,5],zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
; ALL-NEXT: retq
%s = shufflevector <8 x i16> %a0, <8 x i16> zeroinitializer, <8 x i32> <i32 1, i32 2, i32 8, i32 8, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -169,17 +169,17 @@ define <8 x i16> @shuf_12zzuuuu(<8 x i16> %a0) {
define <8 x i16> @shuf_012zuuuu(<8 x i16> %a0) {
; AMD10H-LABEL: shuf_012zuuuu:
-; AMD10H: # BB#0:
+; AMD10H: # %bb.0:
; AMD10H-NEXT: extrq {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],zero,zero,xmm0[u,u,u,u,u,u,u,u]
; AMD10H-NEXT: retq
;
; BTVER1-LABEL: shuf_012zuuuu:
-; BTVER1: # BB#0:
+; BTVER1: # %bb.0:
; BTVER1-NEXT: extrq {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],zero,zero,xmm0[u,u,u,u,u,u,u,u]
; BTVER1-NEXT: retq
;
; BTVER2-LABEL: shuf_012zuuuu:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; BTVER2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3],xmm0[4,5,6,7]
; BTVER2-NEXT: retq
@@ -189,7 +189,7 @@ define <8 x i16> @shuf_012zuuuu(<8 x i16> %a0) {
define <8 x i16> @shuf_0zzz1zzz(<8 x i16> %a0) {
; AMD10H-LABEL: shuf_0zzz1zzz:
-; AMD10H: # BB#0:
+; AMD10H: # %bb.0:
; AMD10H-NEXT: movdqa %xmm0, %xmm1
; AMD10H-NEXT: extrq {{.*#+}} xmm1 = xmm1[2,3],zero,zero,zero,zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
; AMD10H-NEXT: extrq {{.*#+}} xmm0 = xmm0[0,1],zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
@@ -197,12 +197,12 @@ define <8 x i16> @shuf_0zzz1zzz(<8 x i16> %a0) {
; AMD10H-NEXT: retq
;
; BTVER1-LABEL: shuf_0zzz1zzz:
-; BTVER1: # BB#0:
+; BTVER1: # %bb.0:
; BTVER1-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1],zero,zero,zero,zero,zero,zero,xmm0[2,3],zero,zero,zero,zero,zero,zero
; BTVER1-NEXT: retq
;
; BTVER2-LABEL: shuf_0zzz1zzz:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; BTVER2-NEXT: retq
%s = shufflevector <8 x i16> %a0, <8 x i16> zeroinitializer, <8 x i32> <i32 0, i32 8, i32 8, i32 8, i32 1, i32 8, i32 8, i32 8>
@@ -211,19 +211,19 @@ define <8 x i16> @shuf_0zzz1zzz(<8 x i16> %a0) {
define <4 x i32> @shuf_0z1z(<4 x i32> %a0) {
; AMD10H-LABEL: shuf_0z1z:
-; AMD10H: # BB#0:
+; AMD10H: # %bb.0:
; AMD10H-NEXT: xorps %xmm1, %xmm1
; AMD10H-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; AMD10H-NEXT: retq
;
; BTVER1-LABEL: shuf_0z1z:
-; BTVER1: # BB#0:
+; BTVER1: # %bb.0:
; BTVER1-NEXT: xorps %xmm1, %xmm1
; BTVER1-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; BTVER1-NEXT: retq
;
; BTVER2-LABEL: shuf_0z1z:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; BTVER2-NEXT: retq
%s = shufflevector <4 x i32> %a0, <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 4, i32 1, i32 4>
@@ -237,17 +237,17 @@ define <4 x i32> @shuf_0z1z(<4 x i32> %a0) {
; A length of zero is equivalent to a bit length of 64.
define <2 x i64> @insertqi_len0_idx0(<2 x i64> %a, <2 x i64> %b) {
; AMD10H-LABEL: insertqi_len0_idx0:
-; AMD10H: # BB#0:
+; AMD10H: # %bb.0:
; AMD10H-NEXT: movaps %xmm1, %xmm0
; AMD10H-NEXT: retq
;
; BTVER1-LABEL: insertqi_len0_idx0:
-; BTVER1: # BB#0:
+; BTVER1: # %bb.0:
; BTVER1-NEXT: movaps %xmm1, %xmm0
; BTVER1-NEXT: retq
;
; BTVER2-LABEL: insertqi_len0_idx0:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vmovaps %xmm1, %xmm0
; BTVER2-NEXT: retq
%1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %a, <2 x i64> %b, i8 0, i8 0)
@@ -256,7 +256,7 @@ define <2 x i64> @insertqi_len0_idx0(<2 x i64> %a, <2 x i64> %b) {
define <2 x i64> @insertqi_len8_idx16(<2 x i64> %a, <2 x i64> %b) {
; ALL-LABEL: insertqi_len8_idx16:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: insertq {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3,4,5,6,7,u,u,u,u,u,u,u,u]
; ALL-NEXT: retq
%1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %a, <2 x i64> %b, i8 8, i8 16)
@@ -266,7 +266,7 @@ define <2 x i64> @insertqi_len8_idx16(<2 x i64> %a, <2 x i64> %b) {
; If the length + index exceeds the bottom 64 bits the result is undefined
define <2 x i64> @insertqi_len32_idx48(<2 x i64> %a, <2 x i64> %b) {
; ALL-LABEL: insertqi_len32_idx48:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: insertq {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; ALL-NEXT: retq
%1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %a, <2 x i64> %b, i8 32, i8 48)
@@ -275,7 +275,7 @@ define <2 x i64> @insertqi_len32_idx48(<2 x i64> %a, <2 x i64> %b) {
define <16 x i8> @shuf_0_0_2_3_uuuu_uuuu_uuuu(<16 x i8> %a0, <16 x i8> %a1) {
; ALL-LABEL: shuf_0_0_2_3_uuuu_uuuu_uuuu:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: insertq {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7,u,u,u,u,u,u,u,u]
; ALL-NEXT: retq
%s = shufflevector <16 x i8> %a0, <16 x i8> %a1, <16 x i32> <i32 0, i32 0, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -284,7 +284,7 @@ define <16 x i8> @shuf_0_0_2_3_uuuu_uuuu_uuuu(<16 x i8> %a0, <16 x i8> %a1) {
define <16 x i8> @shuf_0_16_2_3_uuuu_uuuu_uuuu(<16 x i8> %a0, <16 x i8> %a1) {
; ALL-LABEL: shuf_0_16_2_3_uuuu_uuuu_uuuu:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: insertq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3,4,5,6,7,u,u,u,u,u,u,u,u]
; ALL-NEXT: retq
%s = shufflevector <16 x i8> %a0, <16 x i8> %a1, <16 x i32> <i32 0, i32 16, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -293,7 +293,7 @@ define <16 x i8> @shuf_0_16_2_3_uuuu_uuuu_uuuu(<16 x i8> %a0, <16 x i8> %a1) {
define <16 x i8> @shuf_16_1_2_3_uuuu_uuuu_uuuu(<16 x i8> %a0, <16 x i8> %a1) {
; ALL-LABEL: shuf_16_1_2_3_uuuu_uuuu_uuuu:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: insertq {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5,6,7,u,u,u,u,u,u,u,u]
; ALL-NEXT: retq
%s = shufflevector <16 x i8> %a0, <16 x i8> %a1, <16 x i32> <i32 16, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -302,7 +302,7 @@ define <16 x i8> @shuf_16_1_2_3_uuuu_uuuu_uuuu(<16 x i8> %a0, <16 x i8> %a1) {
define <8 x i16> @shuf_0823uuuu(<8 x i16> %a0, <8 x i16> %a1) {
; ALL-LABEL: shuf_0823uuuu:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: insertq {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,1],xmm0[4,5,6,7,u,u,u,u,u,u,u,u]
; ALL-NEXT: retq
%s = shufflevector <8 x i16> %a0, <8 x i16> %a1, <8 x i32> <i32 0, i32 8, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -311,7 +311,7 @@ define <8 x i16> @shuf_0823uuuu(<8 x i16> %a0, <8 x i16> %a1) {
define <8 x i16> @shuf_0183uuuu(<8 x i16> %a0, <8 x i16> %a1) {
; ALL-LABEL: shuf_0183uuuu:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: insertq {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[0,1],xmm0[6,7,u,u,u,u,u,u,u,u]
; ALL-NEXT: retq
%s = shufflevector <8 x i16> %a0, <8 x i16> %a1, <8 x i32> <i32 0, i32 1, i32 8, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -320,7 +320,7 @@ define <8 x i16> @shuf_0183uuuu(<8 x i16> %a0, <8 x i16> %a1) {
define <8 x i16> @shuf_0128uuuu(<8 x i16> %a0, <8 x i16> %a1) {
; ALL-LABEL: shuf_0128uuuu:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: insertq {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[0,1],xmm0[u,u,u,u,u,u,u,u]
; ALL-NEXT: retq
%s = shufflevector <8 x i16> %a0, <8 x i16> %a1, <8 x i32> <i32 0, i32 1, i32 2, i32 8, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -329,7 +329,7 @@ define <8 x i16> @shuf_0128uuuu(<8 x i16> %a0, <8 x i16> %a1) {
define <8 x i16> @shuf_0893uuuu(<8 x i16> %a0, <8 x i16> %a1) {
; ALL-LABEL: shuf_0893uuuu:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: insertq {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,1,2,3],xmm0[6,7,u,u,u,u,u,u,u,u]
; ALL-NEXT: retq
%s = shufflevector <8 x i16> %a0, <8 x i16> %a1, <8 x i32> <i32 0, i32 8, i32 9, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -338,7 +338,7 @@ define <8 x i16> @shuf_0893uuuu(<8 x i16> %a0, <8 x i16> %a1) {
define <8 x i16> @shuf_089Auuuu(<8 x i16> %a0, <8 x i16> %a1) {
; ALL-LABEL: shuf_089Auuuu:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: insertq {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,1,2,3,4,5],xmm0[u,u,u,u,u,u,u,u]
; ALL-NEXT: retq
%s = shufflevector <8 x i16> %a0, <8 x i16> %a1, <8 x i32> <i32 0, i32 8, i32 9, i32 10, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -347,7 +347,7 @@ define <8 x i16> @shuf_089Auuuu(<8 x i16> %a0, <8 x i16> %a1) {
define <8 x i16> @shuf_089uuuuu(<8 x i16> %a0, <8 x i16> %a1) {
; ALL-LABEL: shuf_089uuuuu:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: insertq {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,1,2,3],xmm0[6,7,u,u,u,u,u,u,u,u]
; ALL-NEXT: retq
%s = shufflevector <8 x i16> %a0, <8 x i16> %a1, <8 x i32> <i32 0, i32 8, i32 9, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -361,7 +361,7 @@ define <8 x i16> @shuf_089uuuuu(<8 x i16> %a0, <8 x i16> %a1) {
; Out of range.
define <16 x i8> @shuffle_8_18_uuuuuuuuuuuuuu(<16 x i8> %a, <16 x i8> %b) {
; AMD10H-LABEL: shuffle_8_18_uuuuuuuuuuuuuu:
-; AMD10H: # BB#0:
+; AMD10H: # %bb.0:
; AMD10H-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; AMD10H-NEXT: andpd {{.*}}(%rip), %xmm0
; AMD10H-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -370,14 +370,14 @@ define <16 x i8> @shuffle_8_18_uuuuuuuuuuuuuu(<16 x i8> %a, <16 x i8> %b) {
; AMD10H-NEXT: retq
;
; BTVER1-LABEL: shuffle_8_18_uuuuuuuuuuuuuu:
-; BTVER1: # BB#0:
+; BTVER1: # %bb.0:
; BTVER1-NEXT: psrld $16, %xmm1
; BTVER1-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; BTVER1-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; BTVER1-NEXT: retq
;
; BTVER2-LABEL: shuffle_8_18_uuuuuuuuuuuuuu:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpsrld $16, %xmm1, %xmm1
; BTVER2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; BTVER2-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
@@ -388,19 +388,19 @@ define <16 x i8> @shuffle_8_18_uuuuuuuuuuuuuu(<16 x i8> %a, <16 x i8> %b) {
define <16 x i8> @shuffle_uu_0_5_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu(<16 x i8> %v) {
; AMD10H-LABEL: shuffle_uu_0_5_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu:
-; AMD10H: # BB#0:
+; AMD10H: # %bb.0:
; AMD10H-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; AMD10H-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; AMD10H-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7]
; AMD10H-NEXT: retq
;
; BTVER1-LABEL: shuffle_uu_0_5_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu:
-; BTVER1: # BB#0:
+; BTVER1: # %bb.0:
; BTVER1-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,0,5,5,4,4,5,5,4,4,5,5,6,6,7,7]
; BTVER1-NEXT: retq
;
; BTVER2-LABEL: shuffle_uu_0_5_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,5,5,4,4,5,5,4,4,5,5,6,6,7,7]
; BTVER2-NEXT: retq
%1 = shufflevector <16 x i8> %v, <16 x i8> zeroinitializer, <16 x i32> <i32 undef, i32 0, i32 5, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -409,18 +409,18 @@ define <16 x i8> @shuffle_uu_0_5_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu(<16 x i8
define <16 x i8> @shuffle_uu_16_4_16_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu(<16 x i8> %v) {
; AMD10H-LABEL: shuffle_uu_16_4_16_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu:
-; AMD10H: # BB#0:
+; AMD10H: # %bb.0:
; AMD10H-NEXT: psrlq $16, %xmm0
; AMD10H-NEXT: pand {{.*}}(%rip), %xmm0
; AMD10H-NEXT: retq
;
; BTVER1-LABEL: shuffle_uu_16_4_16_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu:
-; BTVER1: # BB#0:
+; BTVER1: # %bb.0:
; BTVER1-NEXT: pshufb {{.*#+}} xmm0 = xmm0[u],zero,xmm0[4],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
; BTVER1-NEXT: retq
;
; BTVER2-LABEL: shuffle_uu_16_4_16_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u],zero,xmm0[4],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
; BTVER2-NEXT: retq
%1 = shufflevector <16 x i8> %v, <16 x i8> zeroinitializer, <16 x i32> <i32 undef, i32 16, i32 4, i32 16, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -429,7 +429,7 @@ define <16 x i8> @shuffle_uu_16_4_16_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu(<16 x i
define <16 x i8> @shuffle_uu_uu_4_16_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu(<16 x i8> %v) {
; ALL-LABEL: shuffle_uu_uu_4_16_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: extrq {{.*#+}} xmm0 = xmm0[2,3,4],zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
; ALL-NEXT: retq
%1 = shufflevector <16 x i8> %v, <16 x i8> zeroinitializer, <16 x i32> <i32 undef, i32 undef, i32 4, i32 16, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
diff --git a/test/CodeGen/X86/vector-shuffle-v1.ll b/test/CodeGen/X86/vector-shuffle-v1.ll
index 1a408fe1d59..eba442e3138 100644
--- a/test/CodeGen/X86/vector-shuffle-v1.ll
+++ b/test/CodeGen/X86/vector-shuffle-v1.ll
@@ -5,12 +5,12 @@
define <2 x i1> @shuf2i1_1_0(<2 x i1> %a) {
; AVX512F-LABEL: shuf2i1_1_0:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuf2i1_1_0:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsllq $63, %xmm0, %xmm0
; AVX512VL-NEXT: vptestmq %xmm0, %xmm0, %k1
; AVX512VL-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -22,7 +22,7 @@ define <2 x i1> @shuf2i1_1_0(<2 x i1> %a) {
; AVX512VL-NEXT: retq
;
; VL_BW_DQ-LABEL: shuf2i1_1_0:
-; VL_BW_DQ: # BB#0:
+; VL_BW_DQ: # %bb.0:
; VL_BW_DQ-NEXT: vpsllq $63, %xmm0, %xmm0
; VL_BW_DQ-NEXT: vptestmq %xmm0, %xmm0, %k0
; VL_BW_DQ-NEXT: vpmovm2q %k0, %xmm0
@@ -36,14 +36,14 @@ define <2 x i1> @shuf2i1_1_0(<2 x i1> %a) {
define <2 x i1> @shuf2i1_1_2(<2 x i1> %a) {
; AVX512F-LABEL: shuf2i1_1_2:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: movl $1, %eax
; AVX512F-NEXT: vmovq %rax, %xmm1
; AVX512F-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuf2i1_1_2:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsllq $63, %xmm0, %xmm0
; AVX512VL-NEXT: vptestmq %xmm0, %xmm0, %k1
; AVX512VL-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -58,7 +58,7 @@ define <2 x i1> @shuf2i1_1_2(<2 x i1> %a) {
; AVX512VL-NEXT: retq
;
; VL_BW_DQ-LABEL: shuf2i1_1_2:
-; VL_BW_DQ: # BB#0:
+; VL_BW_DQ: # %bb.0:
; VL_BW_DQ-NEXT: vpsllq $63, %xmm0, %xmm0
; VL_BW_DQ-NEXT: vptestmq %xmm0, %xmm0, %k0
; VL_BW_DQ-NEXT: movb $1, %al
@@ -76,12 +76,12 @@ define <2 x i1> @shuf2i1_1_2(<2 x i1> %a) {
define <4 x i1> @shuf4i1_3_2_10(<4 x i1> %a) {
; AVX512F-LABEL: shuf4i1_3_2_10:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0]
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuf4i1_3_2_10:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpslld $31, %xmm0, %xmm0
; AVX512VL-NEXT: vptestmd %xmm0, %xmm0, %k1
; AVX512VL-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -93,7 +93,7 @@ define <4 x i1> @shuf4i1_3_2_10(<4 x i1> %a) {
; AVX512VL-NEXT: retq
;
; VL_BW_DQ-LABEL: shuf4i1_3_2_10:
-; VL_BW_DQ: # BB#0:
+; VL_BW_DQ: # %bb.0:
; VL_BW_DQ-NEXT: vpslld $31, %xmm0, %xmm0
; VL_BW_DQ-NEXT: vptestmd %xmm0, %xmm0, %k0
; VL_BW_DQ-NEXT: vpmovm2d %k0, %xmm0
@@ -107,7 +107,7 @@ define <4 x i1> @shuf4i1_3_2_10(<4 x i1> %a) {
define <8 x i1> @shuf8i1_3_6_1_0_3_7_7_0(<8 x i64> %a, <8 x i64> %b, <8 x i64> %a1, <8 x i64> %b1) {
; AVX512F-LABEL: shuf8i1_3_6_1_0_3_7_7_0:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpeqq %zmm2, %zmm0, %k1
; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm1 = [3,6,1,0,3,7,7,0]
@@ -120,7 +120,7 @@ define <8 x i1> @shuf8i1_3_6_1_0_3_7_7_0(<8 x i64> %a, <8 x i64> %b, <8 x i64> %
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuf8i1_3_6_1_0_3_7_7_0:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpcmpeqq %zmm2, %zmm0, %k1
; AVX512VL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512VL-NEXT: vmovdqa64 {{.*#+}} zmm1 = [3,6,1,0,3,7,7,0]
@@ -134,7 +134,7 @@ define <8 x i1> @shuf8i1_3_6_1_0_3_7_7_0(<8 x i64> %a, <8 x i64> %b, <8 x i64> %
; AVX512VL-NEXT: retq
;
; VL_BW_DQ-LABEL: shuf8i1_3_6_1_0_3_7_7_0:
-; VL_BW_DQ: # BB#0:
+; VL_BW_DQ: # %bb.0:
; VL_BW_DQ-NEXT: vpcmpeqq %zmm2, %zmm0, %k0
; VL_BW_DQ-NEXT: vpmovm2q %k0, %zmm0
; VL_BW_DQ-NEXT: vmovdqa64 {{.*#+}} zmm1 = [3,6,1,0,3,7,7,0]
@@ -151,7 +151,7 @@ define <8 x i1> @shuf8i1_3_6_1_0_3_7_7_0(<8 x i64> %a, <8 x i64> %b, <8 x i64> %
define <16 x i1> @shuf16i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0(<16 x i32> %a, <16 x i32> %b, <16 x i32> %a1, <16 x i32> %b1) {
; AVX512F-LABEL: shuf16i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpeqd %zmm2, %zmm0, %k1
; AVX512F-NEXT: vpcmpeqd %zmm3, %zmm1, %k2
; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k2} {z}
@@ -166,7 +166,7 @@ define <16 x i1> @shuf16i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0(<16 x i32> %a, <1
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuf16i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpcmpeqd %zmm2, %zmm0, %k1
; AVX512VL-NEXT: vpcmpeqd %zmm3, %zmm1, %k2
; AVX512VL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k2} {z}
@@ -181,7 +181,7 @@ define <16 x i1> @shuf16i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0(<16 x i32> %a, <1
; AVX512VL-NEXT: retq
;
; VL_BW_DQ-LABEL: shuf16i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0:
-; VL_BW_DQ: # BB#0:
+; VL_BW_DQ: # %bb.0:
; VL_BW_DQ-NEXT: vpcmpeqd %zmm2, %zmm0, %k0
; VL_BW_DQ-NEXT: vpcmpeqd %zmm3, %zmm1, %k1
; VL_BW_DQ-NEXT: vpmovm2d %k1, %zmm0
@@ -200,7 +200,7 @@ define <16 x i1> @shuf16i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0(<16 x i32> %a, <1
define <32 x i1> @shuf32i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0(<32 x i1> %a) {
; AVX512F-LABEL: shuf32i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpshufb {{.*#+}} ymm1 = ymm0[3,6,u,12,3,7,7,0,3,6,1,13,3,u,7,0,u,u,22,u,u,u,u,u,u,u,u,u,u,21,u,u]
; AVX512F-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,0,1]
; AVX512F-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,6,u,u,u,u,u,u,u,u,u,u,5,u,u,19,22,u,28,19,23,23,16,19,22,17,29,19,u,23,16]
@@ -209,7 +209,7 @@ define <32 x i1> @shuf32i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0_3_6_22_12_3_7_7_0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuf32i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpshufb {{.*#+}} ymm1 = ymm0[3,6,u,12,3,7,7,0,3,6,1,13,3,u,7,0,u,u,22,u,u,u,u,u,u,u,u,u,u,21,u,u]
; AVX512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,0,1]
; AVX512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,6,u,u,u,u,u,u,u,u,u,u,5,u,u,19,22,u,28,19,23,23,16,19,22,17,29,19,u,23,16]
@@ -218,7 +218,7 @@ define <32 x i1> @shuf32i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0_3_6_22_12_3_7_7_0
; AVX512VL-NEXT: retq
;
; VL_BW_DQ-LABEL: shuf32i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0:
-; VL_BW_DQ: # BB#0:
+; VL_BW_DQ: # %bb.0:
; VL_BW_DQ-NEXT: vpsllw $7, %ymm0, %ymm0
; VL_BW_DQ-NEXT: vpmovb2m %ymm0, %k0
; VL_BW_DQ-NEXT: vpmovm2w %k0, %zmm0
@@ -233,7 +233,7 @@ define <32 x i1> @shuf32i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0_3_6_22_12_3_7_7_0
define <8 x i1> @shuf8i1_u_2_u_u_2_u_2_u(i8 %a) {
; AVX512F-LABEL: shuf8i1_u_2_u_u_2_u_2_u:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: kmovw %edi, %k1
; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm0
@@ -246,7 +246,7 @@ define <8 x i1> @shuf8i1_u_2_u_u_2_u_2_u(i8 %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuf8i1_u_2_u_u_2_u_2_u:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: kmovw %edi, %k1
; AVX512VL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm0
@@ -260,7 +260,7 @@ define <8 x i1> @shuf8i1_u_2_u_u_2_u_2_u(i8 %a) {
; AVX512VL-NEXT: retq
;
; VL_BW_DQ-LABEL: shuf8i1_u_2_u_u_2_u_2_u:
-; VL_BW_DQ: # BB#0:
+; VL_BW_DQ: # %bb.0:
; VL_BW_DQ-NEXT: kmovd %edi, %k0
; VL_BW_DQ-NEXT: vpmovm2q %k0, %zmm0
; VL_BW_DQ-NEXT: vextracti128 $1, %ymm0, %xmm0
@@ -276,7 +276,7 @@ define <8 x i1> @shuf8i1_u_2_u_u_2_u_2_u(i8 %a) {
define i8 @shuf8i1_10_2_9_u_3_u_2_u(i8 %a) {
; AVX512F-LABEL: shuf8i1_10_2_9_u_3_u_2_u:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: kmovw %edi, %k1
; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
@@ -290,7 +290,7 @@ define i8 @shuf8i1_10_2_9_u_3_u_2_u(i8 %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuf8i1_10_2_9_u_3_u_2_u:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: kmovw %edi, %k1
; AVX512VL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
@@ -304,7 +304,7 @@ define i8 @shuf8i1_10_2_9_u_3_u_2_u(i8 %a) {
; AVX512VL-NEXT: retq
;
; VL_BW_DQ-LABEL: shuf8i1_10_2_9_u_3_u_2_u:
-; VL_BW_DQ: # BB#0:
+; VL_BW_DQ: # %bb.0:
; VL_BW_DQ-NEXT: kmovd %edi, %k0
; VL_BW_DQ-NEXT: vpmovm2q %k0, %zmm0
; VL_BW_DQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
@@ -323,7 +323,7 @@ define i8 @shuf8i1_10_2_9_u_3_u_2_u(i8 %a) {
define i8 @shuf8i1_0_1_4_5_u_u_u_u(i8 %a) {
; AVX512F-LABEL: shuf8i1_0_1_4_5_u_u_u_u:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: kmovw %edi, %k1
; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,4,5,0,1,0,1]
@@ -335,7 +335,7 @@ define i8 @shuf8i1_0_1_4_5_u_u_u_u(i8 %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuf8i1_0_1_4_5_u_u_u_u:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: kmovw %edi, %k1
; AVX512VL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512VL-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,4,5,0,1,0,1]
@@ -347,7 +347,7 @@ define i8 @shuf8i1_0_1_4_5_u_u_u_u(i8 %a) {
; AVX512VL-NEXT: retq
;
; VL_BW_DQ-LABEL: shuf8i1_0_1_4_5_u_u_u_u:
-; VL_BW_DQ: # BB#0:
+; VL_BW_DQ: # %bb.0:
; VL_BW_DQ-NEXT: kmovd %edi, %k0
; VL_BW_DQ-NEXT: vpmovm2q %k0, %zmm0
; VL_BW_DQ-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,4,5,0,1,0,1]
@@ -364,7 +364,7 @@ define i8 @shuf8i1_0_1_4_5_u_u_u_u(i8 %a) {
define i8 @shuf8i1_9_6_1_0_3_7_7_0(i8 %a) {
; AVX512F-LABEL: shuf8i1_9_6_1_0_3_7_7_0:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: kmovw %edi, %k1
; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
@@ -378,7 +378,7 @@ define i8 @shuf8i1_9_6_1_0_3_7_7_0(i8 %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuf8i1_9_6_1_0_3_7_7_0:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: kmovw %edi, %k1
; AVX512VL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
@@ -392,7 +392,7 @@ define i8 @shuf8i1_9_6_1_0_3_7_7_0(i8 %a) {
; AVX512VL-NEXT: retq
;
; VL_BW_DQ-LABEL: shuf8i1_9_6_1_0_3_7_7_0:
-; VL_BW_DQ: # BB#0:
+; VL_BW_DQ: # %bb.0:
; VL_BW_DQ-NEXT: kmovd %edi, %k0
; VL_BW_DQ-NEXT: vpmovm2q %k0, %zmm0
; VL_BW_DQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
@@ -411,7 +411,7 @@ define i8 @shuf8i1_9_6_1_0_3_7_7_0(i8 %a) {
define i8 @shuf8i1_9_6_1_10_3_7_7_0(i8 %a) {
; AVX512F-LABEL: shuf8i1_9_6_1_10_3_7_7_0:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: kmovw %edi, %k1
; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm1 = [9,1,2,10,4,5,6,7]
@@ -425,7 +425,7 @@ define i8 @shuf8i1_9_6_1_10_3_7_7_0(i8 %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuf8i1_9_6_1_10_3_7_7_0:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: kmovw %edi, %k1
; AVX512VL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512VL-NEXT: vmovdqa64 {{.*#+}} zmm1 = [9,1,2,10,4,5,6,7]
@@ -439,7 +439,7 @@ define i8 @shuf8i1_9_6_1_10_3_7_7_0(i8 %a) {
; AVX512VL-NEXT: retq
;
; VL_BW_DQ-LABEL: shuf8i1_9_6_1_10_3_7_7_0:
-; VL_BW_DQ: # BB#0:
+; VL_BW_DQ: # %bb.0:
; VL_BW_DQ-NEXT: kmovd %edi, %k0
; VL_BW_DQ-NEXT: vpmovm2q %k0, %zmm0
; VL_BW_DQ-NEXT: vmovdqa64 {{.*#+}} zmm1 = [9,1,2,10,4,5,6,7]
@@ -458,7 +458,7 @@ define i8 @shuf8i1_9_6_1_10_3_7_7_0(i8 %a) {
define i8 @shuf8i1__9_6_1_10_3_7_7_1(i8 %a) {
; AVX512F-LABEL: shuf8i1__9_6_1_10_3_7_7_1:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: kmovw %edi, %k1
; AVX512F-NEXT: movb $51, %al
; AVX512F-NEXT: kmovw %eax, %k2
@@ -474,7 +474,7 @@ define i8 @shuf8i1__9_6_1_10_3_7_7_1(i8 %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuf8i1__9_6_1_10_3_7_7_1:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: kmovw %edi, %k1
; AVX512VL-NEXT: movb $51, %al
; AVX512VL-NEXT: kmovw %eax, %k2
@@ -490,7 +490,7 @@ define i8 @shuf8i1__9_6_1_10_3_7_7_1(i8 %a) {
; AVX512VL-NEXT: retq
;
; VL_BW_DQ-LABEL: shuf8i1__9_6_1_10_3_7_7_1:
-; VL_BW_DQ: # BB#0:
+; VL_BW_DQ: # %bb.0:
; VL_BW_DQ-NEXT: kmovd %edi, %k0
; VL_BW_DQ-NEXT: vpmovm2q %k0, %zmm0
; VL_BW_DQ-NEXT: vmovdqa64 {{.*#+}} zmm1 = [9,6,1,0,3,7,7,1]
@@ -509,7 +509,7 @@ define i8 @shuf8i1__9_6_1_10_3_7_7_1(i8 %a) {
define i8 @shuf8i1_9_6_1_10_3_7_7_0_all_ones(<8 x i1> %a) {
; AVX512F-LABEL: shuf8i1_9_6_1_10_3_7_7_0_all_ones:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpmovsxwq %xmm0, %zmm0
; AVX512F-NEXT: vpsllq $63, %zmm0, %zmm0
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k1
@@ -525,7 +525,7 @@ define i8 @shuf8i1_9_6_1_10_3_7_7_0_all_ones(<8 x i1> %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuf8i1_9_6_1_10_3_7_7_0_all_ones:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpmovsxwq %xmm0, %zmm0
; AVX512VL-NEXT: vpsllq $63, %zmm0, %zmm0
; AVX512VL-NEXT: vptestmq %zmm0, %zmm0, %k1
@@ -541,7 +541,7 @@ define i8 @shuf8i1_9_6_1_10_3_7_7_0_all_ones(<8 x i1> %a) {
; AVX512VL-NEXT: retq
;
; VL_BW_DQ-LABEL: shuf8i1_9_6_1_10_3_7_7_0_all_ones:
-; VL_BW_DQ: # BB#0:
+; VL_BW_DQ: # %bb.0:
; VL_BW_DQ-NEXT: vpsllw $15, %xmm0, %xmm0
; VL_BW_DQ-NEXT: vpmovw2m %xmm0, %k0
; VL_BW_DQ-NEXT: vpmovm2q %k0, %zmm0
@@ -561,7 +561,7 @@ define i8 @shuf8i1_9_6_1_10_3_7_7_0_all_ones(<8 x i1> %a) {
define i16 @shuf16i1_0_0_0_0_0_0_0_0_0_0_0_0_0_0_0_0(i16 %a) {
; AVX512F-LABEL: shuf16i1_0_0_0_0_0_0_0_0_0_0_0_0_0_0_0_0:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: kmovw %edi, %k1
; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpbroadcastd %xmm0, %zmm0
@@ -573,7 +573,7 @@ define i16 @shuf16i1_0_0_0_0_0_0_0_0_0_0_0_0_0_0_0_0(i16 %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuf16i1_0_0_0_0_0_0_0_0_0_0_0_0_0_0_0_0:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: kmovw %edi, %k1
; AVX512VL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512VL-NEXT: vpbroadcastd %xmm0, %zmm0
@@ -585,7 +585,7 @@ define i16 @shuf16i1_0_0_0_0_0_0_0_0_0_0_0_0_0_0_0_0(i16 %a) {
; AVX512VL-NEXT: retq
;
; VL_BW_DQ-LABEL: shuf16i1_0_0_0_0_0_0_0_0_0_0_0_0_0_0_0_0:
-; VL_BW_DQ: # BB#0:
+; VL_BW_DQ: # %bb.0:
; VL_BW_DQ-NEXT: kmovd %edi, %k0
; VL_BW_DQ-NEXT: vpmovm2d %k0, %zmm0
; VL_BW_DQ-NEXT: vpbroadcastd %xmm0, %zmm0
@@ -602,7 +602,7 @@ define i16 @shuf16i1_0_0_0_0_0_0_0_0_0_0_0_0_0_0_0_0(i16 %a) {
define i64 @shuf64i1_zero(i64 %a) {
; AVX512F-LABEL: shuf64i1_zero:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: pushq %rbp
; AVX512F-NEXT: .cfi_def_cfa_offset 16
; AVX512F-NEXT: .cfi_offset %rbp, -16
@@ -634,7 +634,7 @@ define i64 @shuf64i1_zero(i64 %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuf64i1_zero:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: pushq %rbp
; AVX512VL-NEXT: .cfi_def_cfa_offset 16
; AVX512VL-NEXT: .cfi_offset %rbp, -16
@@ -666,7 +666,7 @@ define i64 @shuf64i1_zero(i64 %a) {
; AVX512VL-NEXT: retq
;
; VL_BW_DQ-LABEL: shuf64i1_zero:
-; VL_BW_DQ: # BB#0:
+; VL_BW_DQ: # %bb.0:
; VL_BW_DQ-NEXT: kmovq %rdi, %k0
; VL_BW_DQ-NEXT: vpmovm2b %k0, %zmm0
; VL_BW_DQ-NEXT: vpbroadcastb %xmm0, %zmm0
diff --git a/test/CodeGen/X86/vector-shuffle-v48.ll b/test/CodeGen/X86/vector-shuffle-v48.ll
index cfccc40a15c..3042d117d33 100644
--- a/test/CodeGen/X86/vector-shuffle-v48.ll
+++ b/test/CodeGen/X86/vector-shuffle-v48.ll
@@ -2,7 +2,7 @@
; RUN: llc -mtriple=x86_64-pc-linux -mattr=+avx2 < %s | FileCheck %s
define <32 x i8> @foo(<48 x i8>* %x0, <16 x i32> %x1, <16 x i32> %x2) {
; CHECK-LABEL: foo:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqu 32(%rdi), %xmm0
; CHECK-NEXT: vmovdqu (%rdi), %ymm1
; CHECK-NEXT: vpermq {{.*#+}} ymm2 = ymm1[2,3,0,1]
diff --git a/test/CodeGen/X86/vector-shuffle-variable-128.ll b/test/CodeGen/X86/vector-shuffle-variable-128.ll
index c0a58640b56..6a0474803c6 100644
--- a/test/CodeGen/X86/vector-shuffle-variable-128.ll
+++ b/test/CodeGen/X86/vector-shuffle-variable-128.ll
@@ -11,7 +11,7 @@
define <2 x double> @var_shuffle_v2f64_v2f64_xx_i64(<2 x double> %x, i64 %i0, i64 %i1) nounwind {
; SSE-LABEL: var_shuffle_v2f64_v2f64_xx_i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: andl $1, %esi
; SSE-NEXT: andl $1, %edi
; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
@@ -20,7 +20,7 @@ define <2 x double> @var_shuffle_v2f64_v2f64_xx_i64(<2 x double> %x, i64 %i0, i6
; SSE-NEXT: retq
;
; AVX-LABEL: var_shuffle_v2f64_v2f64_xx_i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: andl $1, %esi
; AVX-NEXT: andl $1, %edi
; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
@@ -36,7 +36,7 @@ define <2 x double> @var_shuffle_v2f64_v2f64_xx_i64(<2 x double> %x, i64 %i0, i6
define <2 x i64> @var_shuffle_v2i64_v2i64_xx_i64(<2 x i64> %x, i32 %i0, i32 %i1) nounwind {
; SSE-LABEL: var_shuffle_v2i64_v2i64_xx_i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; SSE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SSE-NEXT: andl $1, %edi
@@ -48,7 +48,7 @@ define <2 x i64> @var_shuffle_v2i64_v2i64_xx_i64(<2 x i64> %x, i32 %i0, i32 %i1)
; SSE-NEXT: retq
;
; AVX-LABEL: var_shuffle_v2i64_v2i64_xx_i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; AVX-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX-NEXT: andl $1, %edi
@@ -67,7 +67,7 @@ define <2 x i64> @var_shuffle_v2i64_v2i64_xx_i64(<2 x i64> %x, i32 %i0, i32 %i1)
define <4 x float> @var_shuffle_v4f32_v4f32_xxxx_i32(<4 x float> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3) nounwind {
; SSE2-LABEL: var_shuffle_v4f32_v4f32_xxxx_i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
; SSE2-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
; SSE2-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
@@ -87,7 +87,7 @@ define <4 x float> @var_shuffle_v4f32_v4f32_xxxx_i32(<4 x float> %x, i32 %i0, i3
; SSE2-NEXT: retq
;
; SSSE3-LABEL: var_shuffle_v4f32_v4f32_xxxx_i32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
; SSSE3-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
; SSSE3-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
@@ -107,7 +107,7 @@ define <4 x float> @var_shuffle_v4f32_v4f32_xxxx_i32(<4 x float> %x, i32 %i0, i3
; SSSE3-NEXT: retq
;
; SSE41-LABEL: var_shuffle_v4f32_v4f32_xxxx_i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
; SSE41-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
; SSE41-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
@@ -124,7 +124,7 @@ define <4 x float> @var_shuffle_v4f32_v4f32_xxxx_i32(<4 x float> %x, i32 %i0, i3
; SSE41-NEXT: retq
;
; AVX-LABEL: var_shuffle_v4f32_v4f32_xxxx_i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
; AVX-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
; AVX-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
@@ -152,7 +152,7 @@ define <4 x float> @var_shuffle_v4f32_v4f32_xxxx_i32(<4 x float> %x, i32 %i0, i3
define <4 x i32> @var_shuffle_v4i32_v4i32_xxxx_i32(<4 x i32> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3) nounwind {
; SSE2-LABEL: var_shuffle_v4i32_v4i32_xxxx_i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
; SSE2-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
; SSE2-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
@@ -172,7 +172,7 @@ define <4 x i32> @var_shuffle_v4i32_v4i32_xxxx_i32(<4 x i32> %x, i32 %i0, i32 %i
; SSE2-NEXT: retq
;
; SSSE3-LABEL: var_shuffle_v4i32_v4i32_xxxx_i32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
; SSSE3-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
; SSSE3-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
@@ -192,7 +192,7 @@ define <4 x i32> @var_shuffle_v4i32_v4i32_xxxx_i32(<4 x i32> %x, i32 %i0, i32 %i
; SSSE3-NEXT: retq
;
; SSE41-LABEL: var_shuffle_v4i32_v4i32_xxxx_i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
; SSE41-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
; SSE41-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
@@ -209,7 +209,7 @@ define <4 x i32> @var_shuffle_v4i32_v4i32_xxxx_i32(<4 x i32> %x, i32 %i0, i32 %i
; SSE41-NEXT: retq
;
; AVX-LABEL: var_shuffle_v4i32_v4i32_xxxx_i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
; AVX-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
; AVX-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
@@ -237,7 +237,7 @@ define <4 x i32> @var_shuffle_v4i32_v4i32_xxxx_i32(<4 x i32> %x, i32 %i0, i32 %i
define <8 x i16> @var_shuffle_v8i16_v8i16_xxxxxxxx_i16(<8 x i16> %x, i16 %i0, i16 %i1, i16 %i2, i16 %i3, i16 %i4, i16 %i5, i16 %i6, i16 %i7) nounwind {
; SSE2-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
; SSE2-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
; SSE2-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
@@ -281,7 +281,7 @@ define <8 x i16> @var_shuffle_v8i16_v8i16_xxxxxxxx_i16(<8 x i16> %x, i16 %i0, i1
; SSE2-NEXT: retq
;
; SSSE3-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
; SSSE3-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
; SSSE3-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
@@ -325,7 +325,7 @@ define <8 x i16> @var_shuffle_v8i16_v8i16_xxxxxxxx_i16(<8 x i16> %x, i16 %i0, i1
; SSSE3-NEXT: retq
;
; SSE41-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
; SSE41-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
; SSE41-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
@@ -355,7 +355,7 @@ define <8 x i16> @var_shuffle_v8i16_v8i16_xxxxxxxx_i16(<8 x i16> %x, i16 %i0, i1
; SSE41-NEXT: retq
;
; AVX-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
; AVX-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
; AVX-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
@@ -404,7 +404,7 @@ define <8 x i16> @var_shuffle_v8i16_v8i16_xxxxxxxx_i16(<8 x i16> %x, i16 %i0, i1
define <16 x i8> @var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8(<16 x i8> %x, i8 %i0, i8 %i1, i8 %i2, i8 %i3, i8 %i4, i8 %i5, i8 %i6, i8 %i7, i8 %i8, i8 %i9, i8 %i10, i8 %i11, i8 %i12, i8 %i13, i8 %i14, i8 %i15) nounwind {
; SSE2-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
; SSE2-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
; SSE2-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
@@ -489,7 +489,7 @@ define <16 x i8> @var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8(<16 x i8> %x, i8 %
; SSE2-NEXT: retq
;
; SSSE3-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
; SSSE3-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
; SSSE3-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
@@ -574,7 +574,7 @@ define <16 x i8> @var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8(<16 x i8> %x, i8 %
; SSSE3-NEXT: retq
;
; SSE41-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
; SSE41-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
; SSE41-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
@@ -629,7 +629,7 @@ define <16 x i8> @var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8(<16 x i8> %x, i8 %
; SSE41-NEXT: retq
;
; AVX-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
; AVX-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
; AVX-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
@@ -723,7 +723,7 @@ define <16 x i8> @var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8(<16 x i8> %x, i8 %
define <4 x i32> @mem_shuffle_v4i32_v4i32_xxxx_i32(<4 x i32> %x, i32* %i) nounwind {
; SSE2-LABEL: mem_shuffle_v4i32_v4i32_xxxx_i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movl (%rdi), %eax
; SSE2-NEXT: movl 4(%rdi), %ecx
; SSE2-NEXT: andl $3, %eax
@@ -743,7 +743,7 @@ define <4 x i32> @mem_shuffle_v4i32_v4i32_xxxx_i32(<4 x i32> %x, i32* %i) nounwi
; SSE2-NEXT: retq
;
; SSSE3-LABEL: mem_shuffle_v4i32_v4i32_xxxx_i32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movl (%rdi), %eax
; SSSE3-NEXT: movl 4(%rdi), %ecx
; SSSE3-NEXT: andl $3, %eax
@@ -763,7 +763,7 @@ define <4 x i32> @mem_shuffle_v4i32_v4i32_xxxx_i32(<4 x i32> %x, i32* %i) nounwi
; SSSE3-NEXT: retq
;
; SSE41-LABEL: mem_shuffle_v4i32_v4i32_xxxx_i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movl (%rdi), %eax
; SSE41-NEXT: movl 4(%rdi), %ecx
; SSE41-NEXT: andl $3, %eax
@@ -780,7 +780,7 @@ define <4 x i32> @mem_shuffle_v4i32_v4i32_xxxx_i32(<4 x i32> %x, i32* %i) nounwi
; SSE41-NEXT: retq
;
; AVX-LABEL: mem_shuffle_v4i32_v4i32_xxxx_i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: movl (%rdi), %eax
; AVX-NEXT: movl 4(%rdi), %ecx
; AVX-NEXT: andl $3, %eax
@@ -816,7 +816,7 @@ define <4 x i32> @mem_shuffle_v4i32_v4i32_xxxx_i32(<4 x i32> %x, i32* %i) nounwi
define <16 x i8> @mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8(<16 x i8> %x, i8* %i) nounwind {
; SSE2-LABEL: mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movzbl (%rdi), %eax
; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: movzbl 15(%rdi), %edx
@@ -901,7 +901,7 @@ define <16 x i8> @mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8(<16 x i8> %x, i8*
; SSE2-NEXT: retq
;
; SSSE3-LABEL: mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movzbl (%rdi), %eax
; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSSE3-NEXT: movzbl 15(%rdi), %edx
@@ -986,7 +986,7 @@ define <16 x i8> @mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8(<16 x i8> %x, i8*
; SSSE3-NEXT: retq
;
; SSE41-LABEL: mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movzbl (%rdi), %eax
; SSE41-NEXT: andl $15, %eax
; SSE41-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
@@ -1041,7 +1041,7 @@ define <16 x i8> @mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8(<16 x i8> %x, i8*
; SSE41-NEXT: retq
;
; AVX-LABEL: mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: movzbl (%rdi), %eax
; AVX-NEXT: andl $15, %eax
; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
@@ -1167,7 +1167,7 @@ define <16 x i8> @mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8(<16 x i8> %x, i8*
define <4 x float> @var_shuffle_v4f32_v4f32_x0yx_i32(<4 x float> %x, <4 x float> %y, i32 %i0, i32 %i1, i32 %i2, i32 %i3) nounwind {
; SSE-LABEL: var_shuffle_v4f32_v4f32_x0yx_i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
; SSE-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
; SSE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
@@ -1184,7 +1184,7 @@ define <4 x float> @var_shuffle_v4f32_v4f32_x0yx_i32(<4 x float> %x, <4 x float>
; SSE-NEXT: retq
;
; AVX-LABEL: var_shuffle_v4f32_v4f32_x0yx_i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
; AVX-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
; AVX-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
@@ -1212,7 +1212,7 @@ define <4 x float> @var_shuffle_v4f32_v4f32_x0yx_i32(<4 x float> %x, <4 x float>
define <8 x i16> @var_shuffle_v8i16_v8i16_xyxyxy00_i16(<8 x i16> %x, <8 x i16> %y, i16 %i0, i16 %i1, i16 %i2, i16 %i3, i16 %i4, i16 %i5, i16 %i6, i16 %i7) nounwind {
; SSE2-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
; SSE2-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
; SSE2-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
@@ -1249,7 +1249,7 @@ define <8 x i16> @var_shuffle_v8i16_v8i16_xyxyxy00_i16(<8 x i16> %x, <8 x i16> %
; SSE2-NEXT: retq
;
; SSSE3-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
; SSSE3-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
; SSSE3-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
@@ -1286,7 +1286,7 @@ define <8 x i16> @var_shuffle_v8i16_v8i16_xyxyxy00_i16(<8 x i16> %x, <8 x i16> %
; SSSE3-NEXT: retq
;
; SSE41-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
; SSE41-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
; SSE41-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
@@ -1311,7 +1311,7 @@ define <8 x i16> @var_shuffle_v8i16_v8i16_xyxyxy00_i16(<8 x i16> %x, <8 x i16> %
; SSE41-NEXT: retq
;
; AVX-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
; AVX-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
; AVX-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
diff --git a/test/CodeGen/X86/vector-shuffle-variable-256.ll b/test/CodeGen/X86/vector-shuffle-variable-256.ll
index aa60e774232..7926fb93335 100644
--- a/test/CodeGen/X86/vector-shuffle-variable-256.ll
+++ b/test/CodeGen/X86/vector-shuffle-variable-256.ll
@@ -8,7 +8,7 @@
define <4 x double> @var_shuffle_v4f64_v4f64_xxxx_i64(<4 x double> %x, i64 %i0, i64 %i1, i64 %i2, i64 %i3) nounwind {
; ALL-LABEL: var_shuffle_v4f64_v4f64_xxxx_i64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: pushq %rbp
; ALL-NEXT: movq %rsp, %rbp
; ALL-NEXT: andq $-32, %rsp
@@ -39,7 +39,7 @@ define <4 x double> @var_shuffle_v4f64_v4f64_xxxx_i64(<4 x double> %x, i64 %i0,
define <4 x double> @var_shuffle_v4f64_v4f64_uxx0_i64(<4 x double> %x, i64 %i0, i64 %i1, i64 %i2, i64 %i3) nounwind {
; ALL-LABEL: var_shuffle_v4f64_v4f64_uxx0_i64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: pushq %rbp
; ALL-NEXT: movq %rsp, %rbp
; ALL-NEXT: andq $-32, %rsp
@@ -67,7 +67,7 @@ define <4 x double> @var_shuffle_v4f64_v4f64_uxx0_i64(<4 x double> %x, i64 %i0,
define <4 x double> @var_shuffle_v4f64_v2f64_xxxx_i64(<2 x double> %x, i64 %i0, i64 %i1, i64 %i2, i64 %i3) nounwind {
; ALL-LABEL: var_shuffle_v4f64_v2f64_xxxx_i64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: andl $1, %esi
; ALL-NEXT: andl $1, %edi
; ALL-NEXT: andl $1, %ecx
@@ -92,7 +92,7 @@ define <4 x double> @var_shuffle_v4f64_v2f64_xxxx_i64(<2 x double> %x, i64 %i0,
define <4 x i64> @var_shuffle_v4i64_v4i64_xxxx_i64(<4 x i64> %x, i64 %i0, i64 %i1, i64 %i2, i64 %i3) nounwind {
; ALL-LABEL: var_shuffle_v4i64_v4i64_xxxx_i64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: pushq %rbp
; ALL-NEXT: movq %rsp, %rbp
; ALL-NEXT: andq $-32, %rsp
@@ -125,7 +125,7 @@ define <4 x i64> @var_shuffle_v4i64_v4i64_xxxx_i64(<4 x i64> %x, i64 %i0, i64 %i
define <4 x i64> @var_shuffle_v4i64_v4i64_xx00_i64(<4 x i64> %x, i64 %i0, i64 %i1, i64 %i2, i64 %i3) nounwind {
; ALL-LABEL: var_shuffle_v4i64_v4i64_xx00_i64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: pushq %rbp
; ALL-NEXT: movq %rsp, %rbp
; ALL-NEXT: andq $-32, %rsp
@@ -153,7 +153,7 @@ define <4 x i64> @var_shuffle_v4i64_v4i64_xx00_i64(<4 x i64> %x, i64 %i0, i64 %i
define <4 x i64> @var_shuffle_v4i64_v2i64_xxxx_i64(<2 x i64> %x, i64 %i0, i64 %i1, i64 %i2, i64 %i3) nounwind {
; ALL-LABEL: var_shuffle_v4i64_v2i64_xxxx_i64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: andl $1, %edi
; ALL-NEXT: andl $1, %esi
; ALL-NEXT: andl $1, %edx
@@ -180,7 +180,7 @@ define <4 x i64> @var_shuffle_v4i64_v2i64_xxxx_i64(<2 x i64> %x, i64 %i0, i64 %i
define <8 x float> @var_shuffle_v8f32_v8f32_xxxxxxxx_i32(<8 x float> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7) nounwind {
; ALL-LABEL: var_shuffle_v8f32_v8f32_xxxxxxxx_i32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: pushq %rbp
; ALL-NEXT: movq %rsp, %rbp
; ALL-NEXT: andq $-32, %rsp
@@ -235,7 +235,7 @@ define <8 x float> @var_shuffle_v8f32_v8f32_xxxxxxxx_i32(<8 x float> %x, i32 %i0
define <8 x float> @var_shuffle_v8f32_v4f32_xxxxxxxx_i32(<4 x float> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7) nounwind {
; ALL-LABEL: var_shuffle_v8f32_v4f32_xxxxxxxx_i32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
; ALL-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
; ALL-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
@@ -284,7 +284,7 @@ define <8 x float> @var_shuffle_v8f32_v4f32_xxxxxxxx_i32(<4 x float> %x, i32 %i0
define <16 x i16> @var_shuffle_v16i16_v16i16_xxxxxxxxxxxxxxxx_i16(<16 x i16> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, i32 %i8, i32 %i9, i32 %i10, i32 %i11, i32 %i12, i32 %i13, i32 %i14, i32 %i15) nounwind {
; AVX1-LABEL: var_shuffle_v16i16_v16i16_xxxxxxxxxxxxxxxx_i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: pushq %rbp
; AVX1-NEXT: movq %rsp, %rbp
; AVX1-NEXT: andq $-32, %rsp
@@ -346,7 +346,7 @@ define <16 x i16> @var_shuffle_v16i16_v16i16_xxxxxxxxxxxxxxxx_i16(<16 x i16> %x,
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shuffle_v16i16_v16i16_xxxxxxxxxxxxxxxx_i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: pushq %rbp
; AVX2-NEXT: movq %rsp, %rbp
; AVX2-NEXT: andq $-32, %rsp
@@ -443,7 +443,7 @@ define <16 x i16> @var_shuffle_v16i16_v16i16_xxxxxxxxxxxxxxxx_i16(<16 x i16> %x,
define <16 x i16> @var_shuffle_v16i16_v8i16_xxxxxxxxxxxxxxxx_i16(<8 x i16> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, i32 %i8, i32 %i9, i32 %i10, i32 %i11, i32 %i12, i32 %i13, i32 %i14, i32 %i15) nounwind {
; AVX1-LABEL: var_shuffle_v16i16_v8i16_xxxxxxxxxxxxxxxx_i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
; AVX1-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
; AVX1-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
@@ -499,7 +499,7 @@ define <16 x i16> @var_shuffle_v16i16_v8i16_xxxxxxxxxxxxxxxx_i16(<8 x i16> %x, i
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shuffle_v16i16_v8i16_xxxxxxxxxxxxxxxx_i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
; AVX2-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
; AVX2-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
@@ -594,7 +594,7 @@ define <16 x i16> @var_shuffle_v16i16_v8i16_xxxxxxxxxxxxxxxx_i16(<8 x i16> %x, i
define <4 x i64> @mem_shuffle_v4i64_v4i64_xxxx_i64(<4 x i64> %x, i64* %i) nounwind {
; ALL-LABEL: mem_shuffle_v4i64_v4i64_xxxx_i64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: pushq %rbp
; ALL-NEXT: movq %rsp, %rbp
; ALL-NEXT: andq $-32, %rsp
@@ -639,7 +639,7 @@ define <4 x i64> @mem_shuffle_v4i64_v4i64_xxxx_i64(<4 x i64> %x, i64* %i) nounwi
define <4 x i64> @mem_shuffle_v4i64_v2i64_xxxx_i64(<2 x i64> %x, i64* %i) nounwind {
; ALL-LABEL: mem_shuffle_v4i64_v2i64_xxxx_i64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movq (%rdi), %rax
; ALL-NEXT: movq 8(%rdi), %rcx
; ALL-NEXT: andl $1, %eax
diff --git a/test/CodeGen/X86/vector-sqrt.ll b/test/CodeGen/X86/vector-sqrt.ll
index c5ac4466b5f..1e6b3c1358b 100644
--- a/test/CodeGen/X86/vector-sqrt.ll
+++ b/test/CodeGen/X86/vector-sqrt.ll
@@ -4,7 +4,7 @@
; Function Attrs: nounwind readonly uwtable
define <2 x double> @sqrtd2(double* nocapture readonly %v) local_unnamed_addr #0 {
; CHECK-LABEL: sqrtd2:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsqrtsd (%rdi), %xmm0, %xmm0
; CHECK-NEXT: vsqrtsd 8(%rdi), %xmm1, %xmm1
; CHECK-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
@@ -26,7 +26,7 @@ declare double @sqrt(double) local_unnamed_addr #1
; Function Attrs: nounwind readonly uwtable
define <4 x float> @sqrtf4(float* nocapture readonly %v) local_unnamed_addr #0 {
; CHECK-LABEL: sqrtf4:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsqrtss (%rdi), %xmm0, %xmm0
; CHECK-NEXT: vsqrtss 4(%rdi), %xmm1, %xmm1
; CHECK-NEXT: vsqrtss 8(%rdi), %xmm2, %xmm2
diff --git a/test/CodeGen/X86/vector-trunc-math.ll b/test/CodeGen/X86/vector-trunc-math.ll
index c399ea077cc..45479941143 100644
--- a/test/CodeGen/X86/vector-trunc-math.ll
+++ b/test/CodeGen/X86/vector-trunc-math.ll
@@ -12,14 +12,14 @@
define <4 x i32> @trunc_add_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; SSE-LABEL: trunc_add_v4i64_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: paddq %xmm3, %xmm1
; SSE-NEXT: paddq %xmm2, %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_add_v4i64_v4i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpaddq %xmm2, %xmm3, %xmm2
@@ -29,7 +29,7 @@ define <4 x i32> @trunc_add_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_add_v4i64_v4i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
@@ -38,7 +38,7 @@ define <4 x i32> @trunc_add_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_add_v4i64_v4i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -51,7 +51,7 @@ define <4 x i32> @trunc_add_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
define <8 x i16> @trunc_add_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; SSE-LABEL: trunc_add_v8i64_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: paddq %xmm6, %xmm2
; SSE-NEXT: paddq %xmm7, %xmm3
; SSE-NEXT: paddq %xmm4, %xmm0
@@ -70,7 +70,7 @@ define <8 x i16> @trunc_add_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_add_v8i64_v8i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
@@ -91,7 +91,7 @@ define <8 x i16> @trunc_add_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_add_v8i64_v8i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpaddq %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
@@ -106,7 +106,7 @@ define <8 x i16> @trunc_add_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_add_v8i64_v8i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vpmovqw %zmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -118,7 +118,7 @@ define <8 x i16> @trunc_add_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
define <8 x i16> @trunc_add_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; SSE-LABEL: trunc_add_v8i32_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: paddd %xmm2, %xmm0
; SSE-NEXT: paddd %xmm3, %xmm1
; SSE-NEXT: pslld $16, %xmm1
@@ -129,7 +129,7 @@ define <8 x i16> @trunc_add_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_add_v8i32_v8i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
@@ -142,7 +142,7 @@ define <8 x i16> @trunc_add_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_add_v8i32_v8i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
@@ -151,7 +151,7 @@ define <8 x i16> @trunc_add_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_add_v8i32_v8i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -164,7 +164,7 @@ define <8 x i16> @trunc_add_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
define <16 x i8> @trunc_add_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwind {
; SSE-LABEL: trunc_add_v16i64_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: paddq {{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: paddq {{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: paddq {{[0-9]+}}(%rsp), %xmm2
@@ -192,7 +192,7 @@ define <16 x i8> @trunc_add_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_add_v16i64_v16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpaddq %xmm4, %xmm0, %xmm8
; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
@@ -229,7 +229,7 @@ define <16 x i8> @trunc_add_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_add_v16i64_v16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpaddq %ymm5, %ymm1, %ymm1
; AVX2-NEXT: vpaddq %ymm4, %ymm0, %ymm0
; AVX2-NEXT: vpaddq %ymm7, %ymm3, %ymm3
@@ -257,7 +257,7 @@ define <16 x i8> @trunc_add_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_add_v16i64_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpaddq %zmm3, %zmm1, %zmm1
; AVX512-NEXT: vpaddq %zmm2, %zmm0, %zmm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
@@ -273,7 +273,7 @@ define <16 x i8> @trunc_add_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
define <16 x i8> @trunc_add_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwind {
; SSE-LABEL: trunc_add_v16i32_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: paddd %xmm4, %xmm0
; SSE-NEXT: paddd %xmm5, %xmm1
; SSE-NEXT: paddd %xmm6, %xmm2
@@ -289,7 +289,7 @@ define <16 x i8> @trunc_add_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwin
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_add_v16i32_v16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpaddd %xmm2, %xmm0, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
@@ -310,7 +310,7 @@ define <16 x i8> @trunc_add_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwin
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_add_v16i32_v16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpaddd %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpaddd %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
@@ -326,7 +326,7 @@ define <16 x i8> @trunc_add_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwin
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_add_v16i32_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vpmovdb %zmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -338,7 +338,7 @@ define <16 x i8> @trunc_add_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwin
define <16 x i8> @trunc_add_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwind {
; SSE-LABEL: trunc_add_v16i16_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: paddw %xmm2, %xmm0
; SSE-NEXT: paddw %xmm3, %xmm1
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
@@ -348,7 +348,7 @@ define <16 x i8> @trunc_add_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_add_v16i16_v16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpaddw %xmm1, %xmm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
@@ -361,7 +361,7 @@ define <16 x i8> @trunc_add_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_add_v16i16_v16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -372,7 +372,7 @@ define <16 x i8> @trunc_add_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX2-NEXT: retq
;
; AVX512F-LABEL: trunc_add_v16i16_v16i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
@@ -380,7 +380,7 @@ define <16 x i8> @trunc_add_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: trunc_add_v16i16_v16i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -388,7 +388,7 @@ define <16 x i8> @trunc_add_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: trunc_add_v16i16_v16i8:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
@@ -401,7 +401,7 @@ define <16 x i8> @trunc_add_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
define <8 x i16> @trunc_add_v8i32_v8i16_sext_8i8(<16 x i8> %a0, <8 x i32> %a1) {
; SSE-LABEL: trunc_add_v8i32_v8i16_sext_8i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pslld $16, %xmm2
; SSE-NEXT: psrad $16, %xmm2
; SSE-NEXT: pslld $16, %xmm1
@@ -413,7 +413,7 @@ define <8 x i16> @trunc_add_v8i32_v8i16_sext_8i8(<16 x i8> %a0, <8 x i32> %a1) {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_add_v8i32_v8i16_sext_8i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2
@@ -425,7 +425,7 @@ define <8 x i16> @trunc_add_v8i32_v8i16_sext_8i8(<16 x i8> %a0, <8 x i32> %a1) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_add_v8i32_v8i16_sext_8i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovsxbw %xmm0, %xmm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
@@ -434,7 +434,7 @@ define <8 x i16> @trunc_add_v8i32_v8i16_sext_8i8(<16 x i8> %a0, <8 x i32> %a1) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_add_v8i32_v8i16_sext_8i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; AVX512-NEXT: vpmovdw %zmm1, %ymm1
; AVX512-NEXT: vpmovsxbw %xmm0, %xmm0
@@ -454,13 +454,13 @@ define <8 x i16> @trunc_add_v8i32_v8i16_sext_8i8(<16 x i8> %a0, <8 x i32> %a1) {
define <4 x i32> @trunc_add_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
; SSE-LABEL: trunc_add_const_v4i64_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; SSE-NEXT: paddd {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_add_const_v4i64_v4i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
@@ -468,7 +468,7 @@ define <4 x i32> @trunc_add_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_add_const_v4i64_v4i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
@@ -476,7 +476,7 @@ define <4 x i32> @trunc_add_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_add_const_v4i64_v4i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
@@ -489,7 +489,7 @@ define <4 x i32> @trunc_add_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
define <8 x i16> @trunc_add_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
; SSE-LABEL: trunc_add_const_v8i64_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -505,7 +505,7 @@ define <8 x i16> @trunc_add_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_add_const_v8i64_v8i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3],xmm2[4],xmm3[5,6,7]
@@ -521,7 +521,7 @@ define <8 x i16> @trunc_add_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_add_const_v8i64_v8i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
@@ -534,7 +534,7 @@ define <8 x i16> @trunc_add_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_add_const_v8i64_v8i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovqw %zmm0, %xmm0
; AVX512-NEXT: vpaddw {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -546,7 +546,7 @@ define <8 x i16> @trunc_add_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
define <8 x i16> @trunc_add_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
; SSE-LABEL: trunc_add_const_v8i32_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pslld $16, %xmm1
; SSE-NEXT: psrad $16, %xmm1
; SSE-NEXT: pslld $16, %xmm0
@@ -556,7 +556,7 @@ define <8 x i16> @trunc_add_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_add_const_v8i32_v8i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -567,7 +567,7 @@ define <8 x i16> @trunc_add_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_add_const_v8i32_v8i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpaddw {{.*}}(%rip), %xmm0, %xmm0
@@ -575,7 +575,7 @@ define <8 x i16> @trunc_add_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_add_const_v8i32_v8i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: vpaddw {{.*}}(%rip), %xmm0, %xmm0
@@ -588,7 +588,7 @@ define <8 x i16> @trunc_add_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
define <16 x i8> @trunc_add_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; SSE-LABEL: trunc_add_const_v16i64_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm8 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
; SSE-NEXT: pand %xmm8, %xmm7
; SSE-NEXT: pand %xmm8, %xmm6
@@ -609,7 +609,7 @@ define <16 x i8> @trunc_add_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_add_const_v16i64_v16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
; AVX1-NEXT: vpand %xmm5, %xmm4, %xmm4
@@ -635,7 +635,7 @@ define <16 x i8> @trunc_add_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_add_const_v16i64_v16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
@@ -660,7 +660,7 @@ define <16 x i8> @trunc_add_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_add_const_v16i64_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm1, %ymm1
; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
@@ -675,7 +675,7 @@ define <16 x i8> @trunc_add_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
define <16 x i8> @trunc_add_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
; SSE-LABEL: trunc_add_const_v16i32_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
; SSE-NEXT: pand %xmm4, %xmm3
; SSE-NEXT: pand %xmm4, %xmm2
@@ -688,7 +688,7 @@ define <16 x i8> @trunc_add_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_add_const_v16i32_v16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
@@ -704,7 +704,7 @@ define <16 x i8> @trunc_add_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_add_const_v16i32_v16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
@@ -719,7 +719,7 @@ define <16 x i8> @trunc_add_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_add_const_v16i32_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovdb %zmm0, %xmm0
; AVX512-NEXT: vpaddb {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -731,7 +731,7 @@ define <16 x i8> @trunc_add_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
define <16 x i8> @trunc_add_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; SSE-LABEL: trunc_add_const_v16i16_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
; SSE-NEXT: pand %xmm2, %xmm1
; SSE-NEXT: pand %xmm2, %xmm0
@@ -740,7 +740,7 @@ define <16 x i8> @trunc_add_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_add_const_v16i16_v16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -751,7 +751,7 @@ define <16 x i8> @trunc_add_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_add_const_v16i16_v16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -762,7 +762,7 @@ define <16 x i8> @trunc_add_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: trunc_add_const_v16i16_v16i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
; AVX512F-NEXT: vpaddb {{.*}}(%rip), %xmm0, %xmm0
@@ -770,7 +770,7 @@ define <16 x i8> @trunc_add_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: trunc_add_const_v16i16_v16i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: vpaddb {{.*}}(%rip), %xmm0, %xmm0
@@ -778,7 +778,7 @@ define <16 x i8> @trunc_add_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: trunc_add_const_v16i16_v16i8:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
; AVX512DQ-NEXT: vpaddb {{.*}}(%rip), %xmm0, %xmm0
@@ -795,14 +795,14 @@ define <16 x i8> @trunc_add_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
define <4 x i32> @trunc_sub_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; SSE-LABEL: trunc_sub_v4i64_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psubq %xmm3, %xmm1
; SSE-NEXT: psubq %xmm2, %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_sub_v4i64_v4i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpsubq %xmm2, %xmm3, %xmm2
@@ -812,7 +812,7 @@ define <4 x i32> @trunc_sub_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_sub_v4i64_v4i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
@@ -821,7 +821,7 @@ define <4 x i32> @trunc_sub_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_sub_v4i64_v4i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsubq %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -834,7 +834,7 @@ define <4 x i32> @trunc_sub_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
define <8 x i16> @trunc_sub_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; SSE-LABEL: trunc_sub_v8i64_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psubq %xmm6, %xmm2
; SSE-NEXT: psubq %xmm7, %xmm3
; SSE-NEXT: psubq %xmm4, %xmm0
@@ -853,7 +853,7 @@ define <8 x i16> @trunc_sub_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_sub_v8i64_v8i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsubq %xmm2, %xmm0, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
@@ -874,7 +874,7 @@ define <8 x i16> @trunc_sub_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_sub_v8i64_v8i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsubq %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vpsubq %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
@@ -889,7 +889,7 @@ define <8 x i16> @trunc_sub_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_sub_v8i64_v8i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsubq %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vpmovqw %zmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -901,7 +901,7 @@ define <8 x i16> @trunc_sub_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
define <8 x i16> @trunc_sub_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; SSE-LABEL: trunc_sub_v8i32_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psubd %xmm2, %xmm0
; SSE-NEXT: psubd %xmm3, %xmm1
; SSE-NEXT: pslld $16, %xmm1
@@ -912,7 +912,7 @@ define <8 x i16> @trunc_sub_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_sub_v8i32_v8i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
@@ -925,7 +925,7 @@ define <8 x i16> @trunc_sub_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_sub_v8i32_v8i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsubd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
@@ -934,7 +934,7 @@ define <8 x i16> @trunc_sub_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_sub_v8i32_v8i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsubd %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -947,7 +947,7 @@ define <8 x i16> @trunc_sub_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
define <16 x i8> @trunc_sub_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwind {
; SSE-LABEL: trunc_sub_v16i64_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psubq {{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: psubq {{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: psubq {{[0-9]+}}(%rsp), %xmm2
@@ -975,7 +975,7 @@ define <16 x i8> @trunc_sub_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_sub_v16i64_v16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsubq %xmm4, %xmm0, %xmm8
; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
@@ -1012,7 +1012,7 @@ define <16 x i8> @trunc_sub_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_sub_v16i64_v16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsubq %ymm5, %ymm1, %ymm1
; AVX2-NEXT: vpsubq %ymm4, %ymm0, %ymm0
; AVX2-NEXT: vpsubq %ymm7, %ymm3, %ymm3
@@ -1040,7 +1040,7 @@ define <16 x i8> @trunc_sub_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_sub_v16i64_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsubq %zmm3, %zmm1, %zmm1
; AVX512-NEXT: vpsubq %zmm2, %zmm0, %zmm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
@@ -1056,7 +1056,7 @@ define <16 x i8> @trunc_sub_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
define <16 x i8> @trunc_sub_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwind {
; SSE-LABEL: trunc_sub_v16i32_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psubd %xmm4, %xmm0
; SSE-NEXT: psubd %xmm5, %xmm1
; SSE-NEXT: psubd %xmm6, %xmm2
@@ -1072,7 +1072,7 @@ define <16 x i8> @trunc_sub_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwin
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_sub_v16i32_v16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsubd %xmm2, %xmm0, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
@@ -1093,7 +1093,7 @@ define <16 x i8> @trunc_sub_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwin
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_sub_v16i32_v16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsubd %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpsubd %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
@@ -1109,7 +1109,7 @@ define <16 x i8> @trunc_sub_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwin
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_sub_v16i32_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsubd %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vpmovdb %zmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -1121,7 +1121,7 @@ define <16 x i8> @trunc_sub_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwin
define <16 x i8> @trunc_sub_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwind {
; SSE-LABEL: trunc_sub_v16i16_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psubw %xmm2, %xmm0
; SSE-NEXT: psubw %xmm3, %xmm1
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
@@ -1131,7 +1131,7 @@ define <16 x i8> @trunc_sub_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_sub_v16i16_v16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsubw %xmm1, %xmm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
@@ -1144,7 +1144,7 @@ define <16 x i8> @trunc_sub_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_sub_v16i16_v16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsubw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -1155,7 +1155,7 @@ define <16 x i8> @trunc_sub_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX2-NEXT: retq
;
; AVX512F-LABEL: trunc_sub_v16i16_v16i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsubw %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
@@ -1163,7 +1163,7 @@ define <16 x i8> @trunc_sub_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: trunc_sub_v16i16_v16i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsubw %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -1171,7 +1171,7 @@ define <16 x i8> @trunc_sub_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: trunc_sub_v16i16_v16i8:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpsubw %ymm1, %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
@@ -1188,7 +1188,7 @@ define <16 x i8> @trunc_sub_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
define <4 x i32> @trunc_sub_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
; SSE-LABEL: trunc_sub_const_v4i64_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movl $1, %eax
; SSE-NEXT: movq %rax, %xmm2
; SSE-NEXT: pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1,2,3,4,5,6,7]
@@ -1198,7 +1198,7 @@ define <4 x i32> @trunc_sub_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_sub_const_v4i64_v4i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: movl $1, %eax
; AVX1-NEXT: vmovq %rax, %xmm1
; AVX1-NEXT: vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7]
@@ -1210,7 +1210,7 @@ define <4 x i32> @trunc_sub_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_sub_const_v4i64_v4i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsubq {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
@@ -1219,7 +1219,7 @@ define <4 x i32> @trunc_sub_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_sub_const_v4i64_v4i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsubq {{.*}}(%rip), %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -1232,7 +1232,7 @@ define <4 x i32> @trunc_sub_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
define <8 x i16> @trunc_sub_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
; SSE-LABEL: trunc_sub_const_v8i64_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movl $1, %eax
; SSE-NEXT: movq %rax, %xmm4
; SSE-NEXT: pslldq {{.*#+}} xmm4 = zero,zero,zero,zero,zero,zero,zero,zero,xmm4[0,1,2,3,4,5,6,7]
@@ -1255,7 +1255,7 @@ define <8 x i16> @trunc_sub_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_sub_const_v8i64_v8i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: movl $1, %eax
; AVX1-NEXT: vmovq %rax, %xmm2
; AVX1-NEXT: vpslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1,2,3,4,5,6,7]
@@ -1277,7 +1277,7 @@ define <8 x i16> @trunc_sub_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_sub_const_v8i64_v8i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsubq {{.*}}(%rip), %ymm1, %ymm1
; AVX2-NEXT: vpsubq {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
@@ -1292,7 +1292,7 @@ define <8 x i16> @trunc_sub_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_sub_const_v8i64_v8i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsubq {{.*}}(%rip), %zmm0, %zmm0
; AVX512-NEXT: vpmovqw %zmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -1304,7 +1304,7 @@ define <8 x i16> @trunc_sub_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
define <8 x i16> @trunc_sub_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
; SSE-LABEL: trunc_sub_const_v8i32_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psubd {{.*}}(%rip), %xmm0
; SSE-NEXT: psubd {{.*}}(%rip), %xmm1
; SSE-NEXT: pslld $16, %xmm1
@@ -1315,7 +1315,7 @@ define <8 x i16> @trunc_sub_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_sub_const_v8i32_v8i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsubd {{.*}}(%rip), %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsubd {{.*}}(%rip), %xmm0, %xmm0
@@ -1327,7 +1327,7 @@ define <8 x i16> @trunc_sub_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_sub_const_v8i32_v8i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsubd {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
@@ -1336,7 +1336,7 @@ define <8 x i16> @trunc_sub_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_sub_const_v8i32_v8i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsubd {{.*}}(%rip), %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -1349,7 +1349,7 @@ define <8 x i16> @trunc_sub_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
define <16 x i8> @trunc_sub_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; SSE-LABEL: trunc_sub_const_v16i64_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movl $1, %eax
; SSE-NEXT: movq %rax, %xmm8
; SSE-NEXT: pslldq {{.*#+}} xmm8 = zero,zero,zero,zero,zero,zero,zero,zero,xmm8[0,1,2,3,4,5,6,7]
@@ -1380,7 +1380,7 @@ define <16 x i8> @trunc_sub_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_sub_const_v16i64_v16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: movl $1, %eax
; AVX1-NEXT: vmovq %rax, %xmm4
; AVX1-NEXT: vpslldq {{.*#+}} xmm4 = zero,zero,zero,zero,zero,zero,zero,zero,xmm4[0,1,2,3,4,5,6,7]
@@ -1416,7 +1416,7 @@ define <16 x i8> @trunc_sub_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_sub_const_v16i64_v16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsubq {{.*}}(%rip), %ymm1, %ymm1
; AVX2-NEXT: vpsubq {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vpsubq {{.*}}(%rip), %ymm3, %ymm3
@@ -1444,7 +1444,7 @@ define <16 x i8> @trunc_sub_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_sub_const_v16i64_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsubq {{.*}}(%rip), %zmm1, %zmm1
; AVX512-NEXT: vpsubq {{.*}}(%rip), %zmm0, %zmm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
@@ -1460,7 +1460,7 @@ define <16 x i8> @trunc_sub_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
define <16 x i8> @trunc_sub_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
; SSE-LABEL: trunc_sub_const_v16i32_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psubd {{.*}}(%rip), %xmm0
; SSE-NEXT: psubd {{.*}}(%rip), %xmm1
; SSE-NEXT: psubd {{.*}}(%rip), %xmm2
@@ -1476,7 +1476,7 @@ define <16 x i8> @trunc_sub_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_sub_const_v16i32_v16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsubd {{.*}}(%rip), %xmm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsubd {{.*}}(%rip), %xmm0, %xmm0
@@ -1495,7 +1495,7 @@ define <16 x i8> @trunc_sub_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_sub_const_v16i32_v16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsubd {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vpsubd {{.*}}(%rip), %ymm1, %ymm1
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
@@ -1511,7 +1511,7 @@ define <16 x i8> @trunc_sub_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_sub_const_v16i32_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsubd {{.*}}(%rip), %zmm0, %zmm0
; AVX512-NEXT: vpmovdb %zmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -1523,7 +1523,7 @@ define <16 x i8> @trunc_sub_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
define <16 x i8> @trunc_sub_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; SSE-LABEL: trunc_sub_const_v16i16_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psubw {{.*}}(%rip), %xmm0
; SSE-NEXT: psubw {{.*}}(%rip), %xmm1
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
@@ -1533,7 +1533,7 @@ define <16 x i8> @trunc_sub_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_sub_const_v16i16_v16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm0
@@ -1545,7 +1545,7 @@ define <16 x i8> @trunc_sub_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_sub_const_v16i16_v16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -1556,7 +1556,7 @@ define <16 x i8> @trunc_sub_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: trunc_sub_const_v16i16_v16i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
@@ -1564,7 +1564,7 @@ define <16 x i8> @trunc_sub_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: trunc_sub_const_v16i16_v16i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -1572,7 +1572,7 @@ define <16 x i8> @trunc_sub_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: trunc_sub_const_v16i16_v16i8:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
@@ -1589,7 +1589,7 @@ define <16 x i8> @trunc_sub_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
define <4 x i32> @trunc_mul_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; SSE-LABEL: trunc_mul_v4i64_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm1, %xmm4
; SSE-NEXT: psrlq $32, %xmm4
; SSE-NEXT: pmuludq %xmm3, %xmm4
@@ -1614,7 +1614,7 @@ define <4 x i32> @trunc_mul_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_mul_v4i64_v4i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
@@ -1624,7 +1624,7 @@ define <4 x i32> @trunc_mul_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_mul_v4i64_v4i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
@@ -1634,7 +1634,7 @@ define <4 x i32> @trunc_mul_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: trunc_mul_v4i64_v4i32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512F-NEXT: vpmovqd %zmm1, %ymm1
@@ -1644,7 +1644,7 @@ define <4 x i32> @trunc_mul_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: trunc_mul_v4i64_v4i32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vpmovqd %zmm1, %ymm1
@@ -1654,7 +1654,7 @@ define <4 x i32> @trunc_mul_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: trunc_mul_v4i64_v4i32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
@@ -1669,7 +1669,7 @@ define <4 x i32> @trunc_mul_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
define <8 x i16> @trunc_mul_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; SSE-LABEL: trunc_mul_v8i64_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
@@ -1696,7 +1696,7 @@ define <8 x i16> @trunc_mul_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_mul_v8i64_v8i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vpxor %xmm5, %xmm5, %xmm5
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0],xmm5[1,2,3],xmm4[4],xmm5[5,6,7]
@@ -1721,7 +1721,7 @@ define <8 x i16> @trunc_mul_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_mul_v8i64_v8i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
@@ -1742,7 +1742,7 @@ define <8 x i16> @trunc_mul_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: trunc_mul_v8i64_v8i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpmovqw %zmm1, %xmm1
; AVX512F-NEXT: vpmovqw %zmm0, %xmm0
; AVX512F-NEXT: vpmullw %xmm1, %xmm0, %xmm0
@@ -1750,7 +1750,7 @@ define <8 x i16> @trunc_mul_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: trunc_mul_v8i64_v8i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovqw %zmm1, %xmm1
; AVX512BW-NEXT: vpmovqw %zmm0, %xmm0
; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0
@@ -1758,7 +1758,7 @@ define <8 x i16> @trunc_mul_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: trunc_mul_v8i64_v8i16:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
; AVX512DQ-NEXT: vpmovqw %zmm0, %xmm0
; AVX512DQ-NEXT: vzeroupper
@@ -1770,7 +1770,7 @@ define <8 x i16> @trunc_mul_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
define <8 x i16> @trunc_mul_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; SSE-LABEL: trunc_mul_v8i32_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
; SSE-NEXT: pmuludq %xmm2, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -1793,7 +1793,7 @@ define <8 x i16> @trunc_mul_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_mul_v8i32_v8i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
@@ -1806,7 +1806,7 @@ define <8 x i16> @trunc_mul_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_mul_v8i32_v8i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmulld %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
@@ -1815,7 +1815,7 @@ define <8 x i16> @trunc_mul_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_mul_v8i32_v8i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmulld %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -1828,7 +1828,7 @@ define <8 x i16> @trunc_mul_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
define <16 x i8> @trunc_mul_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwind {
; SSE-LABEL: trunc_mul_v16i64_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
; SSE-NEXT: movdqa %xmm0, %xmm9
; SSE-NEXT: psrlq $32, %xmm9
@@ -1936,7 +1936,7 @@ define <16 x i8> @trunc_mul_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_mul_v16i64_v16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm8
; AVX1-NEXT: vpmuludq %xmm4, %xmm8, %xmm8
; AVX1-NEXT: vpsrlq $32, %xmm4, %xmm9
@@ -2029,7 +2029,7 @@ define <16 x i8> @trunc_mul_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_mul_v16i64_v16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufd {{.*#+}} ymm7 = ymm7[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,2,2,3]
; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
@@ -2065,7 +2065,7 @@ define <16 x i8> @trunc_mul_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
; AVX2-NEXT: retq
;
; AVX512F-LABEL: trunc_mul_v16i64_v16i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpmovqd %zmm3, %ymm3
; AVX512F-NEXT: vpmovqd %zmm1, %ymm1
; AVX512F-NEXT: vpmulld %ymm3, %ymm1, %ymm1
@@ -2078,7 +2078,7 @@ define <16 x i8> @trunc_mul_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: trunc_mul_v16i64_v16i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovqd %zmm3, %ymm3
; AVX512BW-NEXT: vpmovqd %zmm1, %ymm1
; AVX512BW-NEXT: vpmulld %ymm3, %ymm1, %ymm1
@@ -2091,7 +2091,7 @@ define <16 x i8> @trunc_mul_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: trunc_mul_v16i64_v16i8:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpmullq %zmm3, %zmm1, %zmm1
; AVX512DQ-NEXT: vpmullq %zmm2, %zmm0, %zmm0
; AVX512DQ-NEXT: vpmovqd %zmm0, %ymm0
@@ -2107,7 +2107,7 @@ define <16 x i8> @trunc_mul_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
define <16 x i8> @trunc_mul_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwind {
; SSE-LABEL: trunc_mul_v16i32_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm0[1,1,3,3]
; SSE-NEXT: pmuludq %xmm4, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -2147,7 +2147,7 @@ define <16 x i8> @trunc_mul_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwin
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_mul_v16i32_v16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmulld %xmm2, %xmm0, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
@@ -2168,7 +2168,7 @@ define <16 x i8> @trunc_mul_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwin
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_mul_v16i32_v16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmulld %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpmulld %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
@@ -2184,7 +2184,7 @@ define <16 x i8> @trunc_mul_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwin
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_mul_v16i32_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vpmovdb %zmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -2196,7 +2196,7 @@ define <16 x i8> @trunc_mul_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwin
define <16 x i8> @trunc_mul_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwind {
; SSE-LABEL: trunc_mul_v16i16_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmullw %xmm2, %xmm0
; SSE-NEXT: pmullw %xmm3, %xmm1
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
@@ -2206,7 +2206,7 @@ define <16 x i8> @trunc_mul_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_mul_v16i16_v16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
@@ -2219,7 +2219,7 @@ define <16 x i8> @trunc_mul_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_mul_v16i16_v16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -2230,7 +2230,7 @@ define <16 x i8> @trunc_mul_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX2-NEXT: retq
;
; AVX512F-LABEL: trunc_mul_v16i16_v16i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpmullw %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
@@ -2238,7 +2238,7 @@ define <16 x i8> @trunc_mul_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: trunc_mul_v16i16_v16i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmullw %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -2246,7 +2246,7 @@ define <16 x i8> @trunc_mul_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: trunc_mul_v16i16_v16i8:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpmullw %ymm1, %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
@@ -2259,7 +2259,7 @@ define <16 x i8> @trunc_mul_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
define <8 x i16> @trunc_mul_v8i32_v8i16_zext_8i8(<16 x i8> %a0, <8 x i32> %a1) {
; SSE-LABEL: trunc_mul_v8i32_v8i16_zext_8i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm3, %xmm3
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
; SSE-NEXT: pslld $16, %xmm2
@@ -2271,7 +2271,7 @@ define <8 x i16> @trunc_mul_v8i32_v8i16_zext_8i8(<16 x i8> %a0, <8 x i32> %a1) {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_mul_v8i32_v8i16_zext_8i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2
@@ -2283,7 +2283,7 @@ define <8 x i16> @trunc_mul_v8i32_v8i16_zext_8i8(<16 x i8> %a0, <8 x i32> %a1) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_mul_v8i32_v8i16_zext_8i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
@@ -2292,7 +2292,7 @@ define <8 x i16> @trunc_mul_v8i32_v8i16_zext_8i8(<16 x i8> %a0, <8 x i32> %a1) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_mul_v8i32_v8i16_zext_8i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; AVX512-NEXT: vpmovdw %zmm1, %ymm1
; AVX512-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
@@ -2312,7 +2312,7 @@ define <8 x i16> @trunc_mul_v8i32_v8i16_zext_8i8(<16 x i8> %a0, <8 x i32> %a1) {
define <4 x i32> @trunc_mul_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
; SSE-LABEL: trunc_mul_const_v4i64_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [2,3]
; SSE-NEXT: movdqa %xmm1, %xmm3
; SSE-NEXT: pmuludq %xmm2, %xmm3
@@ -2333,7 +2333,7 @@ define <4 x i32> @trunc_mul_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_mul_const_v4i64_v4i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
@@ -2341,7 +2341,7 @@ define <4 x i32> @trunc_mul_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_mul_const_v4i64_v4i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
@@ -2349,7 +2349,7 @@ define <4 x i32> @trunc_mul_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_mul_const_v4i64_v4i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
@@ -2362,7 +2362,7 @@ define <4 x i32> @trunc_mul_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
define <8 x i16> @trunc_mul_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
; SSE-LABEL: trunc_mul_const_v8i64_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -2378,7 +2378,7 @@ define <8 x i16> @trunc_mul_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_mul_const_v8i64_v8i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3],xmm2[4],xmm3[5,6,7]
@@ -2394,7 +2394,7 @@ define <8 x i16> @trunc_mul_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_mul_const_v8i64_v8i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
@@ -2407,7 +2407,7 @@ define <8 x i16> @trunc_mul_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_mul_const_v8i64_v8i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovqw %zmm0, %xmm0
; AVX512-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -2419,7 +2419,7 @@ define <8 x i16> @trunc_mul_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
define <8 x i16> @trunc_mul_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
; SSE-LABEL: trunc_mul_const_v8i32_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pslld $16, %xmm1
; SSE-NEXT: psrad $16, %xmm1
; SSE-NEXT: pslld $16, %xmm0
@@ -2429,7 +2429,7 @@ define <8 x i16> @trunc_mul_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_mul_const_v8i32_v8i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -2440,7 +2440,7 @@ define <8 x i16> @trunc_mul_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_mul_const_v8i32_v8i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
@@ -2448,7 +2448,7 @@ define <8 x i16> @trunc_mul_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_mul_const_v8i32_v8i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
@@ -2461,7 +2461,7 @@ define <8 x i16> @trunc_mul_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
define <16 x i8> @trunc_mul_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; SSE-LABEL: trunc_mul_const_v16i64_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movl $1, %eax
; SSE-NEXT: movq %rax, %xmm8
; SSE-NEXT: pslldq {{.*#+}} xmm8 = zero,zero,zero,zero,zero,zero,zero,zero,xmm8[0,1,2,3,4,5,6,7]
@@ -2539,7 +2539,7 @@ define <16 x i8> @trunc_mul_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_mul_const_v16i64_v16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: movl $1, %eax
; AVX1-NEXT: vmovq %rax, %xmm4
; AVX1-NEXT: vpslldq {{.*#+}} xmm4 = zero,zero,zero,zero,zero,zero,zero,zero,xmm4[0,1,2,3,4,5,6,7]
@@ -2614,7 +2614,7 @@ define <16 x i8> @trunc_mul_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_mul_const_v16i64_v16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-NEXT: vpmulld {{.*}}(%rip), %xmm2, %xmm2
@@ -2642,7 +2642,7 @@ define <16 x i8> @trunc_mul_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_mul_const_v16i64_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: vpmulld {{.*}}(%rip), %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm1, %ymm1
@@ -2658,7 +2658,7 @@ define <16 x i8> @trunc_mul_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
define <16 x i8> @trunc_mul_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
; SSE-LABEL: trunc_mul_const_v16i32_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm4 = [0,1,2,3]
; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
; SSE-NEXT: pmuludq %xmm4, %xmm0
@@ -2702,7 +2702,7 @@ define <16 x i8> @trunc_mul_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_mul_const_v16i32_v16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
@@ -2721,7 +2721,7 @@ define <16 x i8> @trunc_mul_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_mul_const_v16i32_v16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
@@ -2737,7 +2737,7 @@ define <16 x i8> @trunc_mul_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_mul_const_v16i32_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmulld {{.*}}(%rip), %zmm0, %zmm0
; AVX512-NEXT: vpmovdb %zmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -2749,7 +2749,7 @@ define <16 x i8> @trunc_mul_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
define <16 x i8> @trunc_mul_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; SSE-LABEL: trunc_mul_const_v16i16_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmullw {{.*}}(%rip), %xmm0
; SSE-NEXT: pmullw {{.*}}(%rip), %xmm1
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
@@ -2759,7 +2759,7 @@ define <16 x i8> @trunc_mul_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_mul_const_v16i16_v16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
@@ -2771,7 +2771,7 @@ define <16 x i8> @trunc_mul_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_mul_const_v16i16_v16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -2782,7 +2782,7 @@ define <16 x i8> @trunc_mul_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: trunc_mul_const_v16i16_v16i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
@@ -2790,7 +2790,7 @@ define <16 x i8> @trunc_mul_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: trunc_mul_const_v16i16_v16i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -2798,7 +2798,7 @@ define <16 x i8> @trunc_mul_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: trunc_mul_const_v16i16_v16i8:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
@@ -2815,14 +2815,14 @@ define <16 x i8> @trunc_mul_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
define <4 x i32> @trunc_and_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; SSE-LABEL: trunc_and_v4i64_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: andps %xmm3, %xmm1
; SSE-NEXT: andps %xmm2, %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_and_v4i64_v4i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
@@ -2830,7 +2830,7 @@ define <4 x i32> @trunc_and_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_and_v4i64_v4i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vandps %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
@@ -2839,7 +2839,7 @@ define <4 x i32> @trunc_and_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_and_v4i64_v4i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -2852,7 +2852,7 @@ define <4 x i32> @trunc_and_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
define <8 x i16> @trunc_and_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; SSE-LABEL: trunc_and_v8i64_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pand %xmm6, %xmm2
; SSE-NEXT: pand %xmm7, %xmm3
; SSE-NEXT: pand %xmm4, %xmm0
@@ -2871,7 +2871,7 @@ define <8 x i16> @trunc_and_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_and_v8i64_v8i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm1
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
@@ -2888,7 +2888,7 @@ define <8 x i16> @trunc_and_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_and_v8i64_v8i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
@@ -2903,7 +2903,7 @@ define <8 x i16> @trunc_and_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_and_v8i64_v8i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vpmovqw %zmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -2915,7 +2915,7 @@ define <8 x i16> @trunc_and_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
define <8 x i16> @trunc_and_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; SSE-LABEL: trunc_and_v8i32_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pand %xmm3, %xmm1
; SSE-NEXT: pslld $16, %xmm1
; SSE-NEXT: psrad $16, %xmm1
@@ -2926,7 +2926,7 @@ define <8 x i16> @trunc_and_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_and_v8i32_v8i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
@@ -2937,7 +2937,7 @@ define <8 x i16> @trunc_and_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_and_v8i32_v8i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
@@ -2946,7 +2946,7 @@ define <8 x i16> @trunc_and_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_and_v8i32_v8i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -2959,7 +2959,7 @@ define <8 x i16> @trunc_and_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
define <16 x i8> @trunc_and_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwind {
; SSE-LABEL: trunc_and_v16i64_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pand {{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: pand {{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: pand {{[0-9]+}}(%rsp), %xmm2
@@ -2987,7 +2987,7 @@ define <16 x i8> @trunc_and_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_and_v16i64_v16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vandps %ymm4, %ymm0, %ymm0
; AVX1-NEXT: vandps %ymm5, %ymm1, %ymm1
; AVX1-NEXT: vandps %ymm6, %ymm2, %ymm2
@@ -3016,7 +3016,7 @@ define <16 x i8> @trunc_and_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_and_v16i64_v16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpand %ymm5, %ymm1, %ymm1
; AVX2-NEXT: vpand %ymm4, %ymm0, %ymm0
; AVX2-NEXT: vpand %ymm7, %ymm3, %ymm3
@@ -3044,7 +3044,7 @@ define <16 x i8> @trunc_and_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_and_v16i64_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpandq %zmm3, %zmm1, %zmm1
; AVX512-NEXT: vpandq %zmm2, %zmm0, %zmm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
@@ -3060,7 +3060,7 @@ define <16 x i8> @trunc_and_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
define <16 x i8> @trunc_and_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwind {
; SSE-LABEL: trunc_and_v16i32_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm8 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
; SSE-NEXT: pand %xmm8, %xmm7
; SSE-NEXT: pand %xmm3, %xmm7
@@ -3076,7 +3076,7 @@ define <16 x i8> @trunc_and_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwin
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_and_v16i32_v16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm1
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
@@ -3093,7 +3093,7 @@ define <16 x i8> @trunc_and_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwin
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_and_v16i32_v16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
@@ -3109,7 +3109,7 @@ define <16 x i8> @trunc_and_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwin
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_and_v16i32_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vpmovdb %zmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -3121,7 +3121,7 @@ define <16 x i8> @trunc_and_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwin
define <16 x i8> @trunc_and_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwind {
; SSE-LABEL: trunc_and_v16i16_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
; SSE-NEXT: pand %xmm4, %xmm3
; SSE-NEXT: pand %xmm1, %xmm3
@@ -3131,7 +3131,7 @@ define <16 x i8> @trunc_and_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_and_v16i16_v16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -3142,7 +3142,7 @@ define <16 x i8> @trunc_and_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_and_v16i16_v16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -3153,7 +3153,7 @@ define <16 x i8> @trunc_and_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX2-NEXT: retq
;
; AVX512F-LABEL: trunc_and_v16i16_v16i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
@@ -3161,7 +3161,7 @@ define <16 x i8> @trunc_and_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: trunc_and_v16i16_v16i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -3169,7 +3169,7 @@ define <16 x i8> @trunc_and_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: trunc_and_v16i16_v16i8:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
@@ -3186,13 +3186,13 @@ define <16 x i8> @trunc_and_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
define <4 x i32> @trunc_and_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
; SSE-LABEL: trunc_and_const_v4i64_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_and_const_v4i64_v4i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; AVX1-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
@@ -3200,7 +3200,7 @@ define <4 x i32> @trunc_and_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_and_const_v4i64_v4i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
@@ -3208,7 +3208,7 @@ define <4 x i32> @trunc_and_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_and_const_v4i64_v4i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
@@ -3221,7 +3221,7 @@ define <4 x i32> @trunc_and_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
define <8 x i16> @trunc_and_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
; SSE-LABEL: trunc_and_const_v8i64_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -3237,7 +3237,7 @@ define <8 x i16> @trunc_and_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_and_const_v8i64_v8i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3],xmm2[4],xmm3[5,6,7]
@@ -3253,7 +3253,7 @@ define <8 x i16> @trunc_and_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_and_const_v8i64_v8i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
@@ -3266,7 +3266,7 @@ define <8 x i16> @trunc_and_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_and_const_v8i64_v8i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovqw %zmm0, %xmm0
; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -3278,7 +3278,7 @@ define <8 x i16> @trunc_and_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
define <8 x i16> @trunc_and_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
; SSE-LABEL: trunc_and_const_v8i32_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pslld $16, %xmm1
; SSE-NEXT: psrad $16, %xmm1
; SSE-NEXT: pslld $16, %xmm0
@@ -3288,7 +3288,7 @@ define <8 x i16> @trunc_and_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_and_const_v8i32_v8i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -3299,7 +3299,7 @@ define <8 x i16> @trunc_and_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_and_const_v8i32_v8i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
@@ -3307,7 +3307,7 @@ define <8 x i16> @trunc_and_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_and_const_v8i32_v8i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
@@ -3320,7 +3320,7 @@ define <8 x i16> @trunc_and_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
define <16 x i8> @trunc_and_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; SSE-LABEL: trunc_and_const_v16i64_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm8 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
; SSE-NEXT: pand %xmm8, %xmm7
; SSE-NEXT: pand %xmm8, %xmm6
@@ -3341,7 +3341,7 @@ define <16 x i8> @trunc_and_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_and_const_v16i64_v16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
; AVX1-NEXT: vpand %xmm5, %xmm4, %xmm4
@@ -3367,7 +3367,7 @@ define <16 x i8> @trunc_and_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_and_const_v16i64_v16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
@@ -3392,7 +3392,7 @@ define <16 x i8> @trunc_and_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_and_const_v16i64_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm1, %ymm1
; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
@@ -3407,7 +3407,7 @@ define <16 x i8> @trunc_and_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
define <16 x i8> @trunc_and_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
; SSE-LABEL: trunc_and_const_v16i32_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
; SSE-NEXT: pand %xmm4, %xmm3
; SSE-NEXT: pand %xmm4, %xmm2
@@ -3420,7 +3420,7 @@ define <16 x i8> @trunc_and_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_and_const_v16i32_v16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
@@ -3436,7 +3436,7 @@ define <16 x i8> @trunc_and_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_and_const_v16i32_v16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
@@ -3451,7 +3451,7 @@ define <16 x i8> @trunc_and_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_and_const_v16i32_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovdb %zmm0, %xmm0
; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -3463,7 +3463,7 @@ define <16 x i8> @trunc_and_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
define <16 x i8> @trunc_and_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; SSE-LABEL: trunc_and_const_v16i16_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
; SSE-NEXT: pand %xmm2, %xmm1
; SSE-NEXT: pand %xmm2, %xmm0
@@ -3472,7 +3472,7 @@ define <16 x i8> @trunc_and_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_and_const_v16i16_v16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -3483,7 +3483,7 @@ define <16 x i8> @trunc_and_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_and_const_v16i16_v16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -3494,7 +3494,7 @@ define <16 x i8> @trunc_and_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: trunc_and_const_v16i16_v16i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
@@ -3502,7 +3502,7 @@ define <16 x i8> @trunc_and_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: trunc_and_const_v16i16_v16i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
@@ -3510,7 +3510,7 @@ define <16 x i8> @trunc_and_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: trunc_and_const_v16i16_v16i8:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
; AVX512DQ-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
@@ -3527,14 +3527,14 @@ define <16 x i8> @trunc_and_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
define <4 x i32> @trunc_xor_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; SSE-LABEL: trunc_xor_v4i64_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm3, %xmm1
; SSE-NEXT: xorps %xmm2, %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_xor_v4i64_v4i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
@@ -3542,7 +3542,7 @@ define <4 x i32> @trunc_xor_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_xor_v4i64_v4i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vxorps %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
@@ -3551,7 +3551,7 @@ define <4 x i32> @trunc_xor_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_xor_v4i64_v4i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -3564,7 +3564,7 @@ define <4 x i32> @trunc_xor_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
define <8 x i16> @trunc_xor_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; SSE-LABEL: trunc_xor_v8i64_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm6, %xmm2
; SSE-NEXT: pxor %xmm7, %xmm3
; SSE-NEXT: pxor %xmm4, %xmm0
@@ -3583,7 +3583,7 @@ define <8 x i16> @trunc_xor_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_xor_v8i64_v8i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm0
; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm1
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
@@ -3600,7 +3600,7 @@ define <8 x i16> @trunc_xor_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_xor_v8i64_v8i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
@@ -3615,7 +3615,7 @@ define <8 x i16> @trunc_xor_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_xor_v8i64_v8i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vpmovqw %zmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -3627,7 +3627,7 @@ define <8 x i16> @trunc_xor_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
define <8 x i16> @trunc_xor_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; SSE-LABEL: trunc_xor_v8i32_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm3, %xmm1
; SSE-NEXT: pslld $16, %xmm1
; SSE-NEXT: psrad $16, %xmm1
@@ -3638,7 +3638,7 @@ define <8 x i16> @trunc_xor_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_xor_v8i32_v8i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
@@ -3649,7 +3649,7 @@ define <8 x i16> @trunc_xor_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_xor_v8i32_v8i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
@@ -3658,7 +3658,7 @@ define <8 x i16> @trunc_xor_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_xor_v8i32_v8i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -3671,7 +3671,7 @@ define <8 x i16> @trunc_xor_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
define <16 x i8> @trunc_xor_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwind {
; SSE-LABEL: trunc_xor_v16i64_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor {{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: pxor {{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: pxor {{[0-9]+}}(%rsp), %xmm2
@@ -3699,7 +3699,7 @@ define <16 x i8> @trunc_xor_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_xor_v16i64_v16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vxorps %ymm4, %ymm0, %ymm0
; AVX1-NEXT: vxorps %ymm5, %ymm1, %ymm1
; AVX1-NEXT: vxorps %ymm6, %ymm2, %ymm2
@@ -3728,7 +3728,7 @@ define <16 x i8> @trunc_xor_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_xor_v16i64_v16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %ymm5, %ymm1, %ymm1
; AVX2-NEXT: vpxor %ymm4, %ymm0, %ymm0
; AVX2-NEXT: vpxor %ymm7, %ymm3, %ymm3
@@ -3756,7 +3756,7 @@ define <16 x i8> @trunc_xor_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_xor_v16i64_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpxorq %zmm3, %zmm1, %zmm1
; AVX512-NEXT: vpxorq %zmm2, %zmm0, %zmm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
@@ -3772,7 +3772,7 @@ define <16 x i8> @trunc_xor_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
define <16 x i8> @trunc_xor_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwind {
; SSE-LABEL: trunc_xor_v16i32_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm4, %xmm0
; SSE-NEXT: pxor %xmm5, %xmm1
; SSE-NEXT: pxor %xmm6, %xmm2
@@ -3788,7 +3788,7 @@ define <16 x i8> @trunc_xor_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwin
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_xor_v16i32_v16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm0
; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm1
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
@@ -3805,7 +3805,7 @@ define <16 x i8> @trunc_xor_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwin
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_xor_v16i32_v16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
@@ -3821,7 +3821,7 @@ define <16 x i8> @trunc_xor_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwin
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_xor_v16i32_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vpmovdb %zmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -3833,7 +3833,7 @@ define <16 x i8> @trunc_xor_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwin
define <16 x i8> @trunc_xor_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwind {
; SSE-LABEL: trunc_xor_v16i16_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm2, %xmm0
; SSE-NEXT: pxor %xmm3, %xmm1
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
@@ -3843,7 +3843,7 @@ define <16 x i8> @trunc_xor_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_xor_v16i16_v16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -3854,7 +3854,7 @@ define <16 x i8> @trunc_xor_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_xor_v16i16_v16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -3865,7 +3865,7 @@ define <16 x i8> @trunc_xor_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX2-NEXT: retq
;
; AVX512F-LABEL: trunc_xor_v16i16_v16i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
@@ -3873,7 +3873,7 @@ define <16 x i8> @trunc_xor_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: trunc_xor_v16i16_v16i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -3881,7 +3881,7 @@ define <16 x i8> @trunc_xor_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: trunc_xor_v16i16_v16i8:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
@@ -3898,13 +3898,13 @@ define <16 x i8> @trunc_xor_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
define <4 x i32> @trunc_xor_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
; SSE-LABEL: trunc_xor_const_v4i64_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; SSE-NEXT: xorps {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_xor_const_v4i64_v4i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; AVX1-NEXT: vxorps {{.*}}(%rip), %xmm0, %xmm0
@@ -3912,7 +3912,7 @@ define <4 x i32> @trunc_xor_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_xor_const_v4i64_v4i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vxorps {{.*}}(%rip), %xmm0, %xmm0
@@ -3920,7 +3920,7 @@ define <4 x i32> @trunc_xor_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_xor_const_v4i64_v4i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
@@ -3933,7 +3933,7 @@ define <4 x i32> @trunc_xor_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
define <8 x i16> @trunc_xor_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
; SSE-LABEL: trunc_xor_const_v8i64_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -3949,7 +3949,7 @@ define <8 x i16> @trunc_xor_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_xor_const_v8i64_v8i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3],xmm2[4],xmm3[5,6,7]
@@ -3965,7 +3965,7 @@ define <8 x i16> @trunc_xor_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_xor_const_v8i64_v8i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
@@ -3978,7 +3978,7 @@ define <8 x i16> @trunc_xor_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_xor_const_v8i64_v8i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovqw %zmm0, %xmm0
; AVX512-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -3990,7 +3990,7 @@ define <8 x i16> @trunc_xor_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
define <8 x i16> @trunc_xor_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
; SSE-LABEL: trunc_xor_const_v8i32_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pslld $16, %xmm1
; SSE-NEXT: psrad $16, %xmm1
; SSE-NEXT: pslld $16, %xmm0
@@ -4000,7 +4000,7 @@ define <8 x i16> @trunc_xor_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_xor_const_v8i32_v8i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -4011,7 +4011,7 @@ define <8 x i16> @trunc_xor_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_xor_const_v8i32_v8i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
@@ -4019,7 +4019,7 @@ define <8 x i16> @trunc_xor_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_xor_const_v8i32_v8i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
@@ -4032,7 +4032,7 @@ define <8 x i16> @trunc_xor_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
define <16 x i8> @trunc_xor_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; SSE-LABEL: trunc_xor_const_v16i64_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm8 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
; SSE-NEXT: pand %xmm8, %xmm7
; SSE-NEXT: pand %xmm8, %xmm6
@@ -4053,7 +4053,7 @@ define <16 x i8> @trunc_xor_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_xor_const_v16i64_v16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
; AVX1-NEXT: vpand %xmm5, %xmm4, %xmm4
@@ -4079,7 +4079,7 @@ define <16 x i8> @trunc_xor_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_xor_const_v16i64_v16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
@@ -4104,7 +4104,7 @@ define <16 x i8> @trunc_xor_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_xor_const_v16i64_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm1, %ymm1
; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
@@ -4119,7 +4119,7 @@ define <16 x i8> @trunc_xor_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
define <16 x i8> @trunc_xor_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
; SSE-LABEL: trunc_xor_const_v16i32_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
; SSE-NEXT: pand %xmm4, %xmm3
; SSE-NEXT: pand %xmm4, %xmm2
@@ -4132,7 +4132,7 @@ define <16 x i8> @trunc_xor_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_xor_const_v16i32_v16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
@@ -4148,7 +4148,7 @@ define <16 x i8> @trunc_xor_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_xor_const_v16i32_v16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
@@ -4163,7 +4163,7 @@ define <16 x i8> @trunc_xor_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_xor_const_v16i32_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovdb %zmm0, %xmm0
; AVX512-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -4175,7 +4175,7 @@ define <16 x i8> @trunc_xor_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
define <16 x i8> @trunc_xor_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; SSE-LABEL: trunc_xor_const_v16i16_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
; SSE-NEXT: pand %xmm2, %xmm1
; SSE-NEXT: pand %xmm2, %xmm0
@@ -4184,7 +4184,7 @@ define <16 x i8> @trunc_xor_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_xor_const_v16i16_v16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -4195,7 +4195,7 @@ define <16 x i8> @trunc_xor_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_xor_const_v16i16_v16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -4206,7 +4206,7 @@ define <16 x i8> @trunc_xor_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: trunc_xor_const_v16i16_v16i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
; AVX512F-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
@@ -4214,7 +4214,7 @@ define <16 x i8> @trunc_xor_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: trunc_xor_const_v16i16_v16i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
@@ -4222,7 +4222,7 @@ define <16 x i8> @trunc_xor_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: trunc_xor_const_v16i16_v16i8:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
; AVX512DQ-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
@@ -4239,14 +4239,14 @@ define <16 x i8> @trunc_xor_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
define <4 x i32> @trunc_or_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; SSE-LABEL: trunc_or_v4i64_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: orps %xmm3, %xmm1
; SSE-NEXT: orps %xmm2, %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_or_v4i64_v4i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
@@ -4254,7 +4254,7 @@ define <4 x i32> @trunc_or_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_or_v4i64_v4i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vorps %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
@@ -4263,7 +4263,7 @@ define <4 x i32> @trunc_or_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_or_v4i64_v4i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -4276,7 +4276,7 @@ define <4 x i32> @trunc_or_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
define <8 x i16> @trunc_or_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; SSE-LABEL: trunc_or_v8i64_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: por %xmm6, %xmm2
; SSE-NEXT: por %xmm7, %xmm3
; SSE-NEXT: por %xmm4, %xmm0
@@ -4295,7 +4295,7 @@ define <8 x i16> @trunc_or_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_or_v8i64_v8i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm0
; AVX1-NEXT: vorps %ymm3, %ymm1, %ymm1
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
@@ -4312,7 +4312,7 @@ define <8 x i16> @trunc_or_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_or_v8i64_v8i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpor %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vpor %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
@@ -4327,7 +4327,7 @@ define <8 x i16> @trunc_or_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_or_v8i64_v8i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vpmovqw %zmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -4339,7 +4339,7 @@ define <8 x i16> @trunc_or_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
define <8 x i16> @trunc_or_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; SSE-LABEL: trunc_or_v8i32_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: por %xmm3, %xmm1
; SSE-NEXT: pslld $16, %xmm1
; SSE-NEXT: psrad $16, %xmm1
@@ -4350,7 +4350,7 @@ define <8 x i16> @trunc_or_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_or_v8i32_v8i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
@@ -4361,7 +4361,7 @@ define <8 x i16> @trunc_or_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_or_v8i32_v8i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
@@ -4370,7 +4370,7 @@ define <8 x i16> @trunc_or_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_or_v8i32_v8i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -4383,7 +4383,7 @@ define <8 x i16> @trunc_or_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
define <16 x i8> @trunc_or_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwind {
; SSE-LABEL: trunc_or_v16i64_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: por {{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: por {{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: por {{[0-9]+}}(%rsp), %xmm2
@@ -4411,7 +4411,7 @@ define <16 x i8> @trunc_or_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwind
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_or_v16i64_v16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vorps %ymm4, %ymm0, %ymm0
; AVX1-NEXT: vorps %ymm5, %ymm1, %ymm1
; AVX1-NEXT: vorps %ymm6, %ymm2, %ymm2
@@ -4440,7 +4440,7 @@ define <16 x i8> @trunc_or_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwind
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_or_v16i64_v16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpor %ymm5, %ymm1, %ymm1
; AVX2-NEXT: vpor %ymm4, %ymm0, %ymm0
; AVX2-NEXT: vpor %ymm7, %ymm3, %ymm3
@@ -4468,7 +4468,7 @@ define <16 x i8> @trunc_or_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwind
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_or_v16i64_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vporq %zmm3, %zmm1, %zmm1
; AVX512-NEXT: vporq %zmm2, %zmm0, %zmm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
@@ -4484,7 +4484,7 @@ define <16 x i8> @trunc_or_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwind
define <16 x i8> @trunc_or_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwind {
; SSE-LABEL: trunc_or_v16i32_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: por %xmm4, %xmm0
; SSE-NEXT: por %xmm5, %xmm1
; SSE-NEXT: por %xmm6, %xmm2
@@ -4500,7 +4500,7 @@ define <16 x i8> @trunc_or_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwind
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_or_v16i32_v16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm0
; AVX1-NEXT: vorps %ymm3, %ymm1, %ymm1
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
@@ -4517,7 +4517,7 @@ define <16 x i8> @trunc_or_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwind
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_or_v16i32_v16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpor %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpor %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
@@ -4533,7 +4533,7 @@ define <16 x i8> @trunc_or_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwind
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_or_v16i32_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vpmovdb %zmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -4545,7 +4545,7 @@ define <16 x i8> @trunc_or_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwind
define <16 x i8> @trunc_or_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwind {
; SSE-LABEL: trunc_or_v16i16_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: por %xmm2, %xmm0
; SSE-NEXT: por %xmm3, %xmm1
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
@@ -4555,7 +4555,7 @@ define <16 x i8> @trunc_or_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwind
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_or_v16i16_v16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -4566,7 +4566,7 @@ define <16 x i8> @trunc_or_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwind
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_or_v16i16_v16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -4577,7 +4577,7 @@ define <16 x i8> @trunc_or_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwind
; AVX2-NEXT: retq
;
; AVX512F-LABEL: trunc_or_v16i16_v16i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
@@ -4585,7 +4585,7 @@ define <16 x i8> @trunc_or_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwind
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: trunc_or_v16i16_v16i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -4593,7 +4593,7 @@ define <16 x i8> @trunc_or_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwind
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: trunc_or_v16i16_v16i8:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
@@ -4610,13 +4610,13 @@ define <16 x i8> @trunc_or_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwind
define <4 x i32> @trunc_or_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
; SSE-LABEL: trunc_or_const_v4i64_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; SSE-NEXT: orps {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_or_const_v4i64_v4i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; AVX1-NEXT: vorps {{.*}}(%rip), %xmm0, %xmm0
@@ -4624,7 +4624,7 @@ define <4 x i32> @trunc_or_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_or_const_v4i64_v4i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vorps {{.*}}(%rip), %xmm0, %xmm0
@@ -4632,7 +4632,7 @@ define <4 x i32> @trunc_or_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_or_const_v4i64_v4i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
@@ -4645,7 +4645,7 @@ define <4 x i32> @trunc_or_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
define <8 x i16> @trunc_or_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
; SSE-LABEL: trunc_or_const_v8i64_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -4661,7 +4661,7 @@ define <8 x i16> @trunc_or_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_or_const_v8i64_v8i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3],xmm2[4],xmm3[5,6,7]
@@ -4677,7 +4677,7 @@ define <8 x i16> @trunc_or_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_or_const_v8i64_v8i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
@@ -4690,7 +4690,7 @@ define <8 x i16> @trunc_or_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_or_const_v8i64_v8i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovqw %zmm0, %xmm0
; AVX512-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -4702,7 +4702,7 @@ define <8 x i16> @trunc_or_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
define <8 x i16> @trunc_or_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
; SSE-LABEL: trunc_or_const_v8i32_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pslld $16, %xmm1
; SSE-NEXT: psrad $16, %xmm1
; SSE-NEXT: pslld $16, %xmm0
@@ -4712,7 +4712,7 @@ define <8 x i16> @trunc_or_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_or_const_v8i32_v8i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -4723,7 +4723,7 @@ define <8 x i16> @trunc_or_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_or_const_v8i32_v8i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
@@ -4731,7 +4731,7 @@ define <8 x i16> @trunc_or_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_or_const_v8i32_v8i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
@@ -4744,7 +4744,7 @@ define <8 x i16> @trunc_or_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
define <16 x i8> @trunc_or_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; SSE-LABEL: trunc_or_const_v16i64_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm8 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
; SSE-NEXT: pand %xmm8, %xmm7
; SSE-NEXT: pand %xmm8, %xmm6
@@ -4765,7 +4765,7 @@ define <16 x i8> @trunc_or_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_or_const_v16i64_v16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
; AVX1-NEXT: vpand %xmm5, %xmm4, %xmm4
@@ -4791,7 +4791,7 @@ define <16 x i8> @trunc_or_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_or_const_v16i64_v16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
@@ -4816,7 +4816,7 @@ define <16 x i8> @trunc_or_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_or_const_v16i64_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm1, %ymm1
; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
@@ -4831,7 +4831,7 @@ define <16 x i8> @trunc_or_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
define <16 x i8> @trunc_or_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
; SSE-LABEL: trunc_or_const_v16i32_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
; SSE-NEXT: pand %xmm4, %xmm3
; SSE-NEXT: pand %xmm4, %xmm2
@@ -4844,7 +4844,7 @@ define <16 x i8> @trunc_or_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_or_const_v16i32_v16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
@@ -4860,7 +4860,7 @@ define <16 x i8> @trunc_or_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_or_const_v16i32_v16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
@@ -4875,7 +4875,7 @@ define <16 x i8> @trunc_or_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_or_const_v16i32_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovdb %zmm0, %xmm0
; AVX512-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -4887,7 +4887,7 @@ define <16 x i8> @trunc_or_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
define <16 x i8> @trunc_or_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; SSE-LABEL: trunc_or_const_v16i16_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
; SSE-NEXT: pand %xmm2, %xmm1
; SSE-NEXT: pand %xmm2, %xmm0
@@ -4896,7 +4896,7 @@ define <16 x i8> @trunc_or_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_or_const_v16i16_v16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -4907,7 +4907,7 @@ define <16 x i8> @trunc_or_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_or_const_v16i16_v16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -4918,7 +4918,7 @@ define <16 x i8> @trunc_or_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: trunc_or_const_v16i16_v16i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
; AVX512F-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
@@ -4926,7 +4926,7 @@ define <16 x i8> @trunc_or_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: trunc_or_const_v16i16_v16i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
@@ -4934,7 +4934,7 @@ define <16 x i8> @trunc_or_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: trunc_or_const_v16i16_v16i8:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
; AVX512DQ-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
@@ -4951,7 +4951,7 @@ define <16 x i8> @trunc_or_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
define <4 x i32> @mul_add_const_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwind {
; SSE-LABEL: mul_add_const_v4i64_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,1,3]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,3,3]
@@ -4982,7 +4982,7 @@ define <4 x i32> @mul_add_const_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwi
; SSE-NEXT: retq
;
; AVX-LABEL: mul_add_const_v4i64_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
@@ -4996,7 +4996,7 @@ define <4 x i32> @mul_add_const_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwi
define <4 x i32> @mul_add_self_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwind {
; SSE-LABEL: mul_add_self_v4i64_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
; SSE-NEXT: movdqa %xmm2, %xmm3
; SSE-NEXT: psrad $31, %xmm3
@@ -5037,7 +5037,7 @@ define <4 x i32> @mul_add_self_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwin
; SSE-NEXT: retq
;
; AVX-LABEL: mul_add_self_v4i64_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpaddd %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -5051,7 +5051,7 @@ define <4 x i32> @mul_add_self_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwin
define <4 x i32> @mul_add_multiuse_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwind {
; SSE-LABEL: mul_add_multiuse_v4i64_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,1,3]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,1,3,3]
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,1,1,3]
@@ -5081,7 +5081,7 @@ define <4 x i32> @mul_add_multiuse_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nou
; SSE-NEXT: retq
;
; AVX-LABEL: mul_add_multiuse_v4i64_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm1
; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
diff --git a/test/CodeGen/X86/vector-trunc.ll b/test/CodeGen/X86/vector-trunc.ll
index b5ba1ff24da..62428a558de 100644
--- a/test/CodeGen/X86/vector-trunc.ll
+++ b/test/CodeGen/X86/vector-trunc.ll
@@ -11,14 +11,14 @@
define <8 x i32> @trunc8i64_8i32(<8 x i64> %a) {
; SSE-LABEL: trunc8i64_8i32:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2]
; SSE-NEXT: movaps %xmm2, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc8i64_8i32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
@@ -27,7 +27,7 @@ define <8 x i32> @trunc8i64_8i32(<8 x i64> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc8i64_8i32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
@@ -36,7 +36,7 @@ define <8 x i32> @trunc8i64_8i32(<8 x i64> %a) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc8i64_8i32:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: retq
entry:
@@ -46,7 +46,7 @@ entry:
define <8 x i32> @trunc8i64_8i32_ashr(<8 x i64> %a) {
; SSE2-LABEL: trunc8i64_8i32_ashr:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,3,2,3]
; SSE2-NEXT: psrad $31, %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3]
@@ -61,7 +61,7 @@ define <8 x i32> @trunc8i64_8i32_ashr(<8 x i64> %a) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: trunc8i64_8i32_ashr:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,3,2,3]
; SSSE3-NEXT: psrad $31, %xmm3
; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3]
@@ -76,7 +76,7 @@ define <8 x i32> @trunc8i64_8i32_ashr(<8 x i64> %a) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: trunc8i64_8i32_ashr:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
; SSE41-NEXT: psrad $31, %xmm3
; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm4[0,1],xmm3[2,3],xmm4[4,5],xmm3[6,7]
@@ -89,7 +89,7 @@ define <8 x i32> @trunc8i64_8i32_ashr(<8 x i64> %a) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: trunc8i64_8i32_ashr:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpsrad $31, %xmm2, %xmm3
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
@@ -104,7 +104,7 @@ define <8 x i32> @trunc8i64_8i32_ashr(<8 x i64> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc8i64_8i32_ashr:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,3,2,3,5,7,6,7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[1,3,2,3,5,7,6,7]
@@ -113,7 +113,7 @@ define <8 x i32> @trunc8i64_8i32_ashr(<8 x i64> %a) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc8i64_8i32_ashr:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpsraq $32, %zmm0, %zmm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: retq
@@ -125,7 +125,7 @@ entry:
define <8 x i32> @trunc8i64_8i32_lshr(<8 x i64> %a) {
; SSE-LABEL: trunc8i64_8i32_lshr:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: psrlq $32, %xmm3
; SSE-NEXT: psrlq $32, %xmm2
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2]
@@ -136,7 +136,7 @@ define <8 x i32> @trunc8i64_8i32_lshr(<8 x i64> %a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc8i64_8i32_lshr:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpsrlq $32, %xmm2, %xmm2
; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm0
@@ -149,7 +149,7 @@ define <8 x i32> @trunc8i64_8i32_lshr(<8 x i64> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc8i64_8i32_lshr:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpsrlq $32, %ymm1, %ymm1
; AVX2-NEXT: vpsrlq $32, %ymm0, %ymm0
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
@@ -160,7 +160,7 @@ define <8 x i32> @trunc8i64_8i32_lshr(<8 x i64> %a) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc8i64_8i32_lshr:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpsrlq $32, %zmm0, %zmm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: retq
@@ -172,7 +172,7 @@ entry:
define <8 x i16> @trunc8i64_8i16(<8 x i64> %a) {
; SSE2-LABEL: trunc8i64_8i16:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -187,7 +187,7 @@ define <8 x i16> @trunc8i64_8i16(<8 x i64> %a) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: trunc8i64_8i16:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSSE3-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -202,7 +202,7 @@ define <8 x i16> @trunc8i64_8i16(<8 x i64> %a) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: trunc8i64_8i16:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pxor %xmm4, %xmm4
; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1,2,3],xmm3[4],xmm4[5,6,7]
; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1,2,3],xmm2[4],xmm4[5,6,7]
@@ -214,7 +214,7 @@ define <8 x i16> @trunc8i64_8i16(<8 x i64> %a) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: trunc8i64_8i16:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3],xmm2[4],xmm3[5,6,7]
@@ -229,7 +229,7 @@ define <8 x i16> @trunc8i64_8i16(<8 x i64> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc8i64_8i16:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
@@ -242,7 +242,7 @@ define <8 x i16> @trunc8i64_8i16(<8 x i64> %a) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc8i64_8i16:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovqw %zmm0, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
@@ -253,7 +253,7 @@ entry:
define void @trunc8i64_8i8(<8 x i64> %a) {
; SSE-LABEL: trunc8i64_8i8:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
; SSE-NEXT: pand %xmm4, %xmm3
; SSE-NEXT: pand %xmm4, %xmm2
@@ -267,7 +267,7 @@ define void @trunc8i64_8i8(<8 x i64> %a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc8i64_8i8:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
@@ -284,7 +284,7 @@ define void @trunc8i64_8i8(<8 x i64> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc8i64_8i8:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
@@ -298,7 +298,7 @@ define void @trunc8i64_8i8(<8 x i64> %a) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc8i64_8i8:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovqb %zmm0, (%rax)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
@@ -310,7 +310,7 @@ entry:
define <8 x i16> @trunc8i32_8i16(<8 x i32> %a) {
; SSE2-LABEL: trunc8i32_8i16:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pslld $16, %xmm1
; SSE2-NEXT: psrad $16, %xmm1
; SSE2-NEXT: pslld $16, %xmm0
@@ -319,7 +319,7 @@ define <8 x i16> @trunc8i32_8i16(<8 x i32> %a) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: trunc8i32_8i16:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; SSSE3-NEXT: pshufb %xmm2, %xmm1
; SSSE3-NEXT: pshufb %xmm2, %xmm0
@@ -327,7 +327,7 @@ define <8 x i16> @trunc8i32_8i16(<8 x i32> %a) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: trunc8i32_8i16:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; SSE41-NEXT: pshufb %xmm2, %xmm1
; SSE41-NEXT: pshufb %xmm2, %xmm0
@@ -335,7 +335,7 @@ define <8 x i16> @trunc8i32_8i16(<8 x i32> %a) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: trunc8i32_8i16:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -345,7 +345,7 @@ define <8 x i16> @trunc8i32_8i16(<8 x i32> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc8i32_8i16:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -353,7 +353,7 @@ define <8 x i16> @trunc8i32_8i16(<8 x i32> %a) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: trunc8i32_8i16:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -361,13 +361,13 @@ define <8 x i16> @trunc8i32_8i16(<8 x i32> %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: trunc8i32_8i16:
-; AVX512VL: # BB#0: # %entry
+; AVX512VL: # %bb.0: # %entry
; AVX512VL-NEXT: vpmovdw %ymm0, %xmm0
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: trunc8i32_8i16:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -375,7 +375,7 @@ define <8 x i16> @trunc8i32_8i16(<8 x i32> %a) {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: trunc8i32_8i16:
-; AVX512BWVL: # BB#0: # %entry
+; AVX512BWVL: # %bb.0: # %entry
; AVX512BWVL-NEXT: vpmovdw %ymm0, %xmm0
; AVX512BWVL-NEXT: vzeroupper
; AVX512BWVL-NEXT: retq
@@ -386,14 +386,14 @@ entry:
define <8 x i16> @trunc8i32_8i16_ashr(<8 x i32> %a) {
; SSE-LABEL: trunc8i32_8i16_ashr:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: psrad $16, %xmm1
; SSE-NEXT: psrad $16, %xmm0
; SSE-NEXT: packssdw %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc8i32_8i16_ashr:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpsrad $16, %xmm1, %xmm1
; AVX1-NEXT: vpsrad $16, %xmm0, %xmm0
@@ -402,7 +402,7 @@ define <8 x i16> @trunc8i32_8i16_ashr(<8 x i32> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc8i32_8i16_ashr:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpsrad $16, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
@@ -410,7 +410,7 @@ define <8 x i16> @trunc8i32_8i16_ashr(<8 x i32> %a) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: trunc8i32_8i16_ashr:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpsrad $16, %ymm0, %ymm0
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -418,14 +418,14 @@ define <8 x i16> @trunc8i32_8i16_ashr(<8 x i32> %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: trunc8i32_8i16_ashr:
-; AVX512VL: # BB#0: # %entry
+; AVX512VL: # %bb.0: # %entry
; AVX512VL-NEXT: vpsrad $16, %ymm0, %ymm0
; AVX512VL-NEXT: vpmovdw %ymm0, %xmm0
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: trunc8i32_8i16_ashr:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpsrad $16, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -433,7 +433,7 @@ define <8 x i16> @trunc8i32_8i16_ashr(<8 x i32> %a) {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: trunc8i32_8i16_ashr:
-; AVX512BWVL: # BB#0: # %entry
+; AVX512BWVL: # %bb.0: # %entry
; AVX512BWVL-NEXT: vpsrad $16, %ymm0, %ymm0
; AVX512BWVL-NEXT: vpmovdw %ymm0, %xmm0
; AVX512BWVL-NEXT: vzeroupper
@@ -446,7 +446,7 @@ entry:
define <8 x i16> @trunc8i32_8i16_lshr(<8 x i32> %a) {
; SSE2-LABEL: trunc8i32_8i16_lshr:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: psrld $16, %xmm0
; SSE2-NEXT: psrld $16, %xmm1
; SSE2-NEXT: pslld $16, %xmm1
@@ -457,7 +457,7 @@ define <8 x i16> @trunc8i32_8i16_lshr(<8 x i32> %a) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: trunc8i32_8i16_lshr:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [2,3,6,7,10,11,14,15,10,11,14,15,14,15,255,255]
; SSSE3-NEXT: pshufb %xmm2, %xmm1
; SSSE3-NEXT: pshufb %xmm2, %xmm0
@@ -465,14 +465,14 @@ define <8 x i16> @trunc8i32_8i16_lshr(<8 x i32> %a) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: trunc8i32_8i16_lshr:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: psrld $16, %xmm1
; SSE41-NEXT: psrld $16, %xmm0
; SSE41-NEXT: packusdw %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: trunc8i32_8i16_lshr:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpsrld $16, %xmm1, %xmm1
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
@@ -481,7 +481,7 @@ define <8 x i16> @trunc8i32_8i16_lshr(<8 x i32> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc8i32_8i16_lshr:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
@@ -489,7 +489,7 @@ define <8 x i16> @trunc8i32_8i16_lshr(<8 x i32> %a) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: trunc8i32_8i16_lshr:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpsrld $16, %ymm0, %ymm0
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -497,14 +497,14 @@ define <8 x i16> @trunc8i32_8i16_lshr(<8 x i32> %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: trunc8i32_8i16_lshr:
-; AVX512VL: # BB#0: # %entry
+; AVX512VL: # %bb.0: # %entry
; AVX512VL-NEXT: vpsrld $16, %ymm0, %ymm0
; AVX512VL-NEXT: vpmovdw %ymm0, %xmm0
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: trunc8i32_8i16_lshr:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpsrld $16, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -512,7 +512,7 @@ define <8 x i16> @trunc8i32_8i16_lshr(<8 x i32> %a) {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: trunc8i32_8i16_lshr:
-; AVX512BWVL: # BB#0: # %entry
+; AVX512BWVL: # %bb.0: # %entry
; AVX512BWVL-NEXT: vpsrld $16, %ymm0, %ymm0
; AVX512BWVL-NEXT: vpmovdw %ymm0, %xmm0
; AVX512BWVL-NEXT: vzeroupper
@@ -525,7 +525,7 @@ entry:
define void @trunc8i32_8i8(<8 x i32> %a) {
; SSE2-LABEL: trunc8i32_8i8:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
; SSE2-NEXT: pand %xmm2, %xmm1
; SSE2-NEXT: pand %xmm2, %xmm0
@@ -535,7 +535,7 @@ define void @trunc8i32_8i8(<8 x i32> %a) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: trunc8i32_8i8:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
; SSSE3-NEXT: pshufb %xmm2, %xmm1
; SSSE3-NEXT: pshufb %xmm2, %xmm0
@@ -544,7 +544,7 @@ define void @trunc8i32_8i8(<8 x i32> %a) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: trunc8i32_8i8:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
; SSE41-NEXT: pshufb %xmm2, %xmm1
; SSE41-NEXT: pshufb %xmm2, %xmm0
@@ -553,7 +553,7 @@ define void @trunc8i32_8i8(<8 x i32> %a) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: trunc8i32_8i8:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -564,7 +564,7 @@ define void @trunc8i32_8i8(<8 x i32> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc8i32_8i8:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
@@ -573,7 +573,7 @@ define void @trunc8i32_8i8(<8 x i32> %a) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: trunc8i32_8i8:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
@@ -582,13 +582,13 @@ define void @trunc8i32_8i8(<8 x i32> %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: trunc8i32_8i8:
-; AVX512VL: # BB#0: # %entry
+; AVX512VL: # %bb.0: # %entry
; AVX512VL-NEXT: vpmovdb %ymm0, (%rax)
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: trunc8i32_8i8:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
@@ -597,7 +597,7 @@ define void @trunc8i32_8i8(<8 x i32> %a) {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: trunc8i32_8i8:
-; AVX512BWVL: # BB#0: # %entry
+; AVX512BWVL: # %bb.0: # %entry
; AVX512BWVL-NEXT: vpmovdb %ymm0, (%rax)
; AVX512BWVL-NEXT: vzeroupper
; AVX512BWVL-NEXT: retq
@@ -609,7 +609,7 @@ entry:
define void @trunc16i32_16i16(<16 x i32> %a) {
; SSE2-LABEL: trunc16i32_16i16:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pslld $16, %xmm1
; SSE2-NEXT: psrad $16, %xmm1
; SSE2-NEXT: pslld $16, %xmm0
@@ -625,7 +625,7 @@ define void @trunc16i32_16i16(<16 x i32> %a) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: trunc16i32_16i16:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: pslld $16, %xmm1
; SSSE3-NEXT: psrad $16, %xmm1
; SSSE3-NEXT: pslld $16, %xmm0
@@ -641,7 +641,7 @@ define void @trunc16i32_16i16(<16 x i32> %a) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: trunc16i32_16i16:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pxor %xmm4, %xmm4
; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2],xmm4[3],xmm1[4],xmm4[5],xmm1[6],xmm4[7]
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1],xmm0[2],xmm4[3],xmm0[4],xmm4[5],xmm0[6],xmm4[7]
@@ -654,7 +654,7 @@ define void @trunc16i32_16i16(<16 x i32> %a) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: trunc16i32_16i16:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2],xmm3[3],xmm2[4],xmm3[5],xmm2[6],xmm3[7]
@@ -670,7 +670,7 @@ define void @trunc16i32_16i16(<16 x i32> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc16i32_16i16:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
@@ -682,7 +682,7 @@ define void @trunc16i32_16i16(<16 x i32> %a) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc16i32_16i16:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovdw %zmm0, (%rax)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
@@ -694,7 +694,7 @@ entry:
define void @trunc16i32_16i16_ashr(<16 x i32> %a) {
; SSE-LABEL: trunc16i32_16i16_ashr:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: psrad $16, %xmm3
; SSE-NEXT: psrad $16, %xmm2
; SSE-NEXT: packssdw %xmm3, %xmm2
@@ -706,7 +706,7 @@ define void @trunc16i32_16i16_ashr(<16 x i32> %a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc16i32_16i16_ashr:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpsrad $16, %xmm2, %xmm2
; AVX1-NEXT: vpsrad $16, %xmm0, %xmm0
@@ -721,7 +721,7 @@ define void @trunc16i32_16i16_ashr(<16 x i32> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc16i32_16i16_ashr:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpsrad $16, %ymm1, %ymm1
; AVX2-NEXT: vpsrad $16, %ymm0, %ymm0
; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
@@ -731,7 +731,7 @@ define void @trunc16i32_16i16_ashr(<16 x i32> %a) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc16i32_16i16_ashr:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpsrld $16, %zmm0, %zmm0
; AVX512-NEXT: vpmovdw %zmm0, (%rax)
; AVX512-NEXT: vzeroupper
@@ -745,7 +745,7 @@ entry:
define void @trunc16i32_16i16_lshr(<16 x i32> %a) {
; SSE2-LABEL: trunc16i32_16i16_lshr:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: psrld $16, %xmm2
; SSE2-NEXT: psrld $16, %xmm3
; SSE2-NEXT: psrld $16, %xmm0
@@ -765,7 +765,7 @@ define void @trunc16i32_16i16_lshr(<16 x i32> %a) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: trunc16i32_16i16_lshr:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: psrld $16, %xmm2
; SSSE3-NEXT: psrld $16, %xmm3
; SSSE3-NEXT: psrld $16, %xmm0
@@ -785,7 +785,7 @@ define void @trunc16i32_16i16_lshr(<16 x i32> %a) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: trunc16i32_16i16_lshr:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: psrld $16, %xmm3
; SSE41-NEXT: psrld $16, %xmm2
; SSE41-NEXT: packusdw %xmm3, %xmm2
@@ -797,7 +797,7 @@ define void @trunc16i32_16i16_lshr(<16 x i32> %a) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: trunc16i32_16i16_lshr:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpsrld $16, %xmm2, %xmm2
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
@@ -812,7 +812,7 @@ define void @trunc16i32_16i16_lshr(<16 x i32> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc16i32_16i16_lshr:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpsrld $16, %ymm1, %ymm1
; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
; AVX2-NEXT: vpackusdw %ymm1, %ymm0, %ymm0
@@ -822,7 +822,7 @@ define void @trunc16i32_16i16_lshr(<16 x i32> %a) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc16i32_16i16_lshr:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpsrld $16, %zmm0, %zmm0
; AVX512-NEXT: vpmovdw %zmm0, (%rax)
; AVX512-NEXT: vzeroupper
@@ -836,7 +836,7 @@ entry:
define void @trunc16i32_16i8(<16 x i32> %a) {
; SSE-LABEL: trunc16i32_16i8:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
; SSE-NEXT: pand %xmm4, %xmm3
; SSE-NEXT: pand %xmm4, %xmm2
@@ -849,7 +849,7 @@ define void @trunc16i32_16i8(<16 x i32> %a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc16i32_16i8:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
@@ -865,7 +865,7 @@ define void @trunc16i32_16i8(<16 x i32> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc16i32_16i8:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
@@ -880,7 +880,7 @@ define void @trunc16i32_16i8(<16 x i32> %a) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc16i32_16i8:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovdb %zmm0, (%rax)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
@@ -892,7 +892,7 @@ entry:
define void @trunc16i32_16i8_ashr(<16 x i32> %a) {
; SSE-LABEL: trunc16i32_16i8_ashr:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: psrad $24, %xmm1
; SSE-NEXT: psrad $24, %xmm0
; SSE-NEXT: packssdw %xmm1, %xmm0
@@ -904,7 +904,7 @@ define void @trunc16i32_16i8_ashr(<16 x i32> %a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc16i32_16i8_ashr:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpsrad $24, %xmm2, %xmm2
; AVX1-NEXT: vpsrad $24, %xmm0, %xmm0
@@ -919,7 +919,7 @@ define void @trunc16i32_16i8_ashr(<16 x i32> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc16i32_16i8_ashr:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpsrad $24, %ymm1, %ymm1
; AVX2-NEXT: vpsrad $24, %ymm0, %ymm0
; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
@@ -931,7 +931,7 @@ define void @trunc16i32_16i8_ashr(<16 x i32> %a) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc16i32_16i8_ashr:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpsrld $24, %zmm0, %zmm0
; AVX512-NEXT: vpmovdb %zmm0, (%rax)
; AVX512-NEXT: vzeroupper
@@ -945,7 +945,7 @@ entry:
define void @trunc16i32_16i8_lshr(<16 x i32> %a) {
; SSE2-LABEL: trunc16i32_16i8_lshr:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: psrld $24, %xmm1
; SSE2-NEXT: psrld $24, %xmm0
; SSE2-NEXT: packuswb %xmm1, %xmm0
@@ -957,7 +957,7 @@ define void @trunc16i32_16i8_lshr(<16 x i32> %a) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: trunc16i32_16i8_lshr:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: psrld $24, %xmm1
; SSSE3-NEXT: psrld $24, %xmm0
; SSSE3-NEXT: packuswb %xmm1, %xmm0
@@ -969,7 +969,7 @@ define void @trunc16i32_16i8_lshr(<16 x i32> %a) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: trunc16i32_16i8_lshr:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: psrld $24, %xmm1
; SSE41-NEXT: psrld $24, %xmm0
; SSE41-NEXT: packssdw %xmm1, %xmm0
@@ -981,7 +981,7 @@ define void @trunc16i32_16i8_lshr(<16 x i32> %a) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: trunc16i32_16i8_lshr:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpsrld $24, %xmm2, %xmm2
; AVX1-NEXT: vpsrld $24, %xmm0, %xmm0
@@ -996,7 +996,7 @@ define void @trunc16i32_16i8_lshr(<16 x i32> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc16i32_16i8_lshr:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpsrld $24, %ymm1, %ymm1
; AVX2-NEXT: vpsrld $24, %ymm0, %ymm0
; AVX2-NEXT: vpackuswb %ymm1, %ymm0, %ymm0
@@ -1008,7 +1008,7 @@ define void @trunc16i32_16i8_lshr(<16 x i32> %a) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc16i32_16i8_lshr:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpsrld $24, %zmm0, %zmm0
; AVX512-NEXT: vpmovdb %zmm0, (%rax)
; AVX512-NEXT: vzeroupper
@@ -1023,7 +1023,7 @@ entry:
;PR25684
define void @trunc16i16_16i8(<16 x i16> %a) {
; SSE2-LABEL: trunc16i16_16i8:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
; SSE2-NEXT: pand %xmm2, %xmm1
; SSE2-NEXT: pand %xmm2, %xmm0
@@ -1032,7 +1032,7 @@ define void @trunc16i16_16i8(<16 x i16> %a) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: trunc16i16_16i8:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; SSSE3-NEXT: pshufb %xmm2, %xmm1
; SSSE3-NEXT: pshufb %xmm2, %xmm0
@@ -1041,7 +1041,7 @@ define void @trunc16i16_16i8(<16 x i16> %a) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: trunc16i16_16i8:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; SSE41-NEXT: pshufb %xmm2, %xmm1
; SSE41-NEXT: pshufb %xmm2, %xmm0
@@ -1050,7 +1050,7 @@ define void @trunc16i16_16i8(<16 x i16> %a) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: trunc16i16_16i8:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -1061,7 +1061,7 @@ define void @trunc16i16_16i8(<16 x i16> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc16i16_16i8:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -1072,7 +1072,7 @@ define void @trunc16i16_16i8(<16 x i16> %a) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: trunc16i16_16i8:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
; AVX512F-NEXT: vmovdqu %xmm0, (%rax)
@@ -1080,7 +1080,7 @@ define void @trunc16i16_16i8(<16 x i16> %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: trunc16i16_16i8:
-; AVX512VL: # BB#0: # %entry
+; AVX512VL: # %bb.0: # %entry
; AVX512VL-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0
; AVX512VL-NEXT: vmovdqu %xmm0, (%rax)
@@ -1088,7 +1088,7 @@ define void @trunc16i16_16i8(<16 x i16> %a) {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: trunc16i16_16i8:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: vmovdqu %xmm0, (%rax)
@@ -1096,7 +1096,7 @@ define void @trunc16i16_16i8(<16 x i16> %a) {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: trunc16i16_16i8:
-; AVX512BWVL: # BB#0: # %entry
+; AVX512BWVL: # %bb.0: # %entry
; AVX512BWVL-NEXT: vpmovwb %ymm0, (%rax)
; AVX512BWVL-NEXT: vzeroupper
; AVX512BWVL-NEXT: retq
@@ -1108,7 +1108,7 @@ entry:
define void @trunc16i16_16i8_ashr(<16 x i16> %a) {
; SSE-LABEL: trunc16i16_16i8_ashr:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: psraw $8, %xmm1
; SSE-NEXT: psraw $8, %xmm0
; SSE-NEXT: packsswb %xmm1, %xmm0
@@ -1116,7 +1116,7 @@ define void @trunc16i16_16i8_ashr(<16 x i16> %a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc16i16_16i8_ashr:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpsraw $8, %xmm1, %xmm1
; AVX1-NEXT: vpsraw $8, %xmm0, %xmm0
@@ -1126,7 +1126,7 @@ define void @trunc16i16_16i8_ashr(<16 x i16> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc16i16_16i8_ashr:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpsraw $8, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
@@ -1135,7 +1135,7 @@ define void @trunc16i16_16i8_ashr(<16 x i16> %a) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: trunc16i16_16i8_ashr:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpsraw $8, %ymm0, %ymm0
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
@@ -1144,7 +1144,7 @@ define void @trunc16i16_16i8_ashr(<16 x i16> %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: trunc16i16_16i8_ashr:
-; AVX512VL: # BB#0: # %entry
+; AVX512VL: # %bb.0: # %entry
; AVX512VL-NEXT: vpsraw $8, %ymm0, %ymm0
; AVX512VL-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0
@@ -1153,7 +1153,7 @@ define void @trunc16i16_16i8_ashr(<16 x i16> %a) {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: trunc16i16_16i8_ashr:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpsraw $8, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: vmovdqu %xmm0, (%rax)
@@ -1161,7 +1161,7 @@ define void @trunc16i16_16i8_ashr(<16 x i16> %a) {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: trunc16i16_16i8_ashr:
-; AVX512BWVL: # BB#0: # %entry
+; AVX512BWVL: # %bb.0: # %entry
; AVX512BWVL-NEXT: vpsrlw $8, %ymm0, %ymm0
; AVX512BWVL-NEXT: vpmovwb %ymm0, (%rax)
; AVX512BWVL-NEXT: vzeroupper
@@ -1175,7 +1175,7 @@ entry:
define void @trunc16i16_16i8_lshr(<16 x i16> %a) {
; SSE-LABEL: trunc16i16_16i8_lshr:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: psrlw $8, %xmm1
; SSE-NEXT: psrlw $8, %xmm0
; SSE-NEXT: packuswb %xmm1, %xmm0
@@ -1183,7 +1183,7 @@ define void @trunc16i16_16i8_lshr(<16 x i16> %a) {
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc16i16_16i8_lshr:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
@@ -1193,7 +1193,7 @@ define void @trunc16i16_16i8_lshr(<16 x i16> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc16i16_16i8_lshr:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
@@ -1202,7 +1202,7 @@ define void @trunc16i16_16i8_lshr(<16 x i16> %a) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: trunc16i16_16i8_lshr:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpsrlw $8, %ymm0, %ymm0
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
@@ -1211,7 +1211,7 @@ define void @trunc16i16_16i8_lshr(<16 x i16> %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: trunc16i16_16i8_lshr:
-; AVX512VL: # BB#0: # %entry
+; AVX512VL: # %bb.0: # %entry
; AVX512VL-NEXT: vpsrlw $8, %ymm0, %ymm0
; AVX512VL-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0
@@ -1220,7 +1220,7 @@ define void @trunc16i16_16i8_lshr(<16 x i16> %a) {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: trunc16i16_16i8_lshr:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpsrlw $8, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: vmovdqu %xmm0, (%rax)
@@ -1228,7 +1228,7 @@ define void @trunc16i16_16i8_lshr(<16 x i16> %a) {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: trunc16i16_16i8_lshr:
-; AVX512BWVL: # BB#0: # %entry
+; AVX512BWVL: # %bb.0: # %entry
; AVX512BWVL-NEXT: vpsrlw $8, %ymm0, %ymm0
; AVX512BWVL-NEXT: vpmovwb %ymm0, (%rax)
; AVX512BWVL-NEXT: vzeroupper
@@ -1242,7 +1242,7 @@ entry:
define void @trunc32i16_32i8(<32 x i16> %a) {
; SSE2-LABEL: trunc32i16_32i8:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
; SSE2-NEXT: pand %xmm4, %xmm1
; SSE2-NEXT: pand %xmm4, %xmm0
@@ -1255,7 +1255,7 @@ define void @trunc32i16_32i8(<32 x i16> %a) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: trunc32i16_32i8:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; SSSE3-NEXT: pshufb %xmm4, %xmm1
; SSSE3-NEXT: pshufb %xmm4, %xmm0
@@ -1268,7 +1268,7 @@ define void @trunc32i16_32i8(<32 x i16> %a) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: trunc32i16_32i8:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; SSE41-NEXT: pshufb %xmm4, %xmm1
; SSE41-NEXT: pshufb %xmm4, %xmm0
@@ -1281,7 +1281,7 @@ define void @trunc32i16_32i8(<32 x i16> %a) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: trunc32i16_32i8:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2
@@ -1297,7 +1297,7 @@ define void @trunc32i16_32i8(<32 x i16> %a) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc32i16_32i8:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; AVX2-NEXT: vpshufb %xmm3, %xmm2, %xmm2
@@ -1313,7 +1313,7 @@ define void @trunc32i16_32i8(<32 x i16> %a) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: trunc32i16_32i8:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
; AVX512F-NEXT: vpmovsxwd %ymm1, %zmm1
@@ -1324,7 +1324,7 @@ define void @trunc32i16_32i8(<32 x i16> %a) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: trunc32i16_32i8:
-; AVX512VL: # BB#0: # %entry
+; AVX512VL: # %bb.0: # %entry
; AVX512VL-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0
; AVX512VL-NEXT: vpmovsxwd %ymm1, %zmm1
@@ -1335,13 +1335,13 @@ define void @trunc32i16_32i8(<32 x i16> %a) {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: trunc32i16_32i8:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmovwb %zmm0, (%rax)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: trunc32i16_32i8:
-; AVX512BWVL: # BB#0: # %entry
+; AVX512BWVL: # %bb.0: # %entry
; AVX512BWVL-NEXT: vpmovwb %zmm0, (%rax)
; AVX512BWVL-NEXT: vzeroupper
; AVX512BWVL-NEXT: retq
@@ -1353,14 +1353,14 @@ entry:
define <8 x i32> @trunc2x4i64_8i32(<4 x i64> %a, <4 x i64> %b) {
; SSE-LABEL: trunc2x4i64_8i32:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2]
; SSE-NEXT: movaps %xmm2, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc2x4i64_8i32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
@@ -1369,7 +1369,7 @@ define <8 x i32> @trunc2x4i64_8i32(<4 x i64> %a, <4 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc2x4i64_8i32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
@@ -1378,7 +1378,7 @@ define <8 x i32> @trunc2x4i64_8i32(<4 x i64> %a, <4 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: trunc2x4i64_8i32:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
@@ -1387,14 +1387,14 @@ define <8 x i32> @trunc2x4i64_8i32(<4 x i64> %a, <4 x i64> %b) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: trunc2x4i64_8i32:
-; AVX512VL: # BB#0: # %entry
+; AVX512VL: # %bb.0: # %entry
; AVX512VL-NEXT: vpmovqd %ymm0, %xmm0
; AVX512VL-NEXT: vpmovqd %ymm1, %xmm1
; AVX512VL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: trunc2x4i64_8i32:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
@@ -1403,7 +1403,7 @@ define <8 x i32> @trunc2x4i64_8i32(<4 x i64> %a, <4 x i64> %b) {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: trunc2x4i64_8i32:
-; AVX512BWVL: # BB#0: # %entry
+; AVX512BWVL: # %bb.0: # %entry
; AVX512BWVL-NEXT: vpmovqd %ymm0, %xmm0
; AVX512BWVL-NEXT: vpmovqd %ymm1, %xmm1
; AVX512BWVL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
@@ -1417,7 +1417,7 @@ entry:
define <8 x i16> @trunc2x4i64_8i16(<4 x i64> %a, <4 x i64> %b) {
; SSE2-LABEL: trunc2x4i64_8i16:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -1432,7 +1432,7 @@ define <8 x i16> @trunc2x4i64_8i16(<4 x i64> %a, <4 x i64> %b) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: trunc2x4i64_8i16:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSSE3-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -1447,7 +1447,7 @@ define <8 x i16> @trunc2x4i64_8i16(<4 x i64> %a, <4 x i64> %b) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: trunc2x4i64_8i16:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
; SSE41-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,0,2,4,5,6,7]
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
@@ -1462,7 +1462,7 @@ define <8 x i16> @trunc2x4i64_8i16(<4 x i64> %a, <4 x i64> %b) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: trunc2x4i64_8i16:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
@@ -1475,7 +1475,7 @@ define <8 x i16> @trunc2x4i64_8i16(<4 x i64> %a, <4 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc2x4i64_8i16:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
@@ -1488,7 +1488,7 @@ define <8 x i16> @trunc2x4i64_8i16(<4 x i64> %a, <4 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: trunc2x4i64_8i16:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
@@ -1501,7 +1501,7 @@ define <8 x i16> @trunc2x4i64_8i16(<4 x i64> %a, <4 x i64> %b) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: trunc2x4i64_8i16:
-; AVX512VL: # BB#0: # %entry
+; AVX512VL: # %bb.0: # %entry
; AVX512VL-NEXT: vpmovqd %ymm0, %xmm0
; AVX512VL-NEXT: vpmovqd %ymm1, %xmm1
; AVX512VL-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
@@ -1515,7 +1515,7 @@ define <8 x i16> @trunc2x4i64_8i16(<4 x i64> %a, <4 x i64> %b) {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: trunc2x4i64_8i16:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
@@ -1528,7 +1528,7 @@ define <8 x i16> @trunc2x4i64_8i16(<4 x i64> %a, <4 x i64> %b) {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: trunc2x4i64_8i16:
-; AVX512BWVL: # BB#0: # %entry
+; AVX512BWVL: # %bb.0: # %entry
; AVX512BWVL-NEXT: vpmovqd %ymm0, %xmm0
; AVX512BWVL-NEXT: vpmovqd %ymm1, %xmm1
; AVX512BWVL-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
@@ -1549,17 +1549,17 @@ entry:
define <4 x i32> @trunc2x2i64_4i32(<2 x i64> %a, <2 x i64> %b) {
; SSE-LABEL: trunc2x2i64_4i32:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; SSE-NEXT: retq
;
; AVX-LABEL: trunc2x2i64_4i32:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; AVX-NEXT: retq
;
; AVX512-LABEL: trunc2x2i64_4i32:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; AVX512-NEXT: retq
entry:
@@ -1571,37 +1571,37 @@ entry:
define i64 @trunc2i64_i64(<2 x i64> %inval) {
; SSE-LABEL: trunc2i64_i64:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE-NEXT: movq %xmm0, %rax
; SSE-NEXT: retq
;
; AVX-LABEL: trunc2i64_i64:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; AVX-NEXT: vmovq %xmm0, %rax
; AVX-NEXT: retq
;
; AVX512F-LABEL: trunc2i64_i64:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; AVX512F-NEXT: vmovq %xmm0, %rax
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: trunc2i64_i64:
-; AVX512VL: # BB#0: # %entry
+; AVX512VL: # %bb.0: # %entry
; AVX512VL-NEXT: vpmovqd %xmm0, -{{[0-9]+}}(%rsp)
; AVX512VL-NEXT: movq -{{[0-9]+}}(%rsp), %rax
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: trunc2i64_i64:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; AVX512BW-NEXT: vmovq %xmm0, %rax
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: trunc2i64_i64:
-; AVX512BWVL: # BB#0: # %entry
+; AVX512BWVL: # %bb.0: # %entry
; AVX512BWVL-NEXT: vpmovqd %xmm0, -{{[0-9]+}}(%rsp)
; AVX512BWVL-NEXT: movq -{{[0-9]+}}(%rsp), %rax
; AVX512BWVL-NEXT: retq
@@ -1613,7 +1613,7 @@ entry:
define <8 x i16> @trunc2x4i32_8i16(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: trunc2x4i32_8i16:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -1624,7 +1624,7 @@ define <8 x i16> @trunc2x4i32_8i16(<4 x i32> %a, <4 x i32> %b) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: trunc2x4i32_8i16:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; SSSE3-NEXT: pshufb %xmm2, %xmm1
; SSSE3-NEXT: pshufb %xmm2, %xmm0
@@ -1632,7 +1632,7 @@ define <8 x i16> @trunc2x4i32_8i16(<4 x i32> %a, <4 x i32> %b) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: trunc2x4i32_8i16:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; SSE41-NEXT: pshufb %xmm2, %xmm1
; SSE41-NEXT: pshufb %xmm2, %xmm0
@@ -1640,7 +1640,7 @@ define <8 x i16> @trunc2x4i32_8i16(<4 x i32> %a, <4 x i32> %b) {
; SSE41-NEXT: retq
;
; AVX-LABEL: trunc2x4i32_8i16:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpshufb %xmm2, %xmm0, %xmm0
@@ -1648,7 +1648,7 @@ define <8 x i16> @trunc2x4i32_8i16(<4 x i32> %a, <4 x i32> %b) {
; AVX-NEXT: retq
;
; AVX512F-LABEL: trunc2x4i32_8i16:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX512F-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; AVX512F-NEXT: vpshufb %xmm2, %xmm0, %xmm0
@@ -1656,7 +1656,7 @@ define <8 x i16> @trunc2x4i32_8i16(<4 x i32> %a, <4 x i32> %b) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: trunc2x4i32_8i16:
-; AVX512VL: # BB#0: # %entry
+; AVX512VL: # %bb.0: # %entry
; AVX512VL-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
; AVX512VL-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -1667,7 +1667,7 @@ define <8 x i16> @trunc2x4i32_8i16(<4 x i32> %a, <4 x i32> %b) {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: trunc2x4i32_8i16:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX512BW-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; AVX512BW-NEXT: vpshufb %xmm2, %xmm0, %xmm0
@@ -1675,7 +1675,7 @@ define <8 x i16> @trunc2x4i32_8i16(<4 x i32> %a, <4 x i32> %b) {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: trunc2x4i32_8i16:
-; AVX512BWVL: # BB#0: # %entry
+; AVX512BWVL: # %bb.0: # %entry
; AVX512BWVL-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
; AVX512BWVL-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -1694,7 +1694,7 @@ entry:
; PR15524 http://llvm.org/bugs/show_bug.cgi?id=15524
define i64 @trunc4i32_i64(<4 x i32> %inval) {
; SSE2-LABEL: trunc4i32_i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -1702,43 +1702,43 @@ define i64 @trunc4i32_i64(<4 x i32> %inval) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: trunc4i32_i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; SSSE3-NEXT: movq %xmm0, %rax
; SSSE3-NEXT: retq
;
; SSE41-LABEL: trunc4i32_i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; SSE41-NEXT: movq %xmm0, %rax
; SSE41-NEXT: retq
;
; AVX-LABEL: trunc4i32_i64:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX-NEXT: vmovq %xmm0, %rax
; AVX-NEXT: retq
;
; AVX512F-LABEL: trunc4i32_i64:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX512F-NEXT: vmovq %xmm0, %rax
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: trunc4i32_i64:
-; AVX512VL: # BB#0: # %entry
+; AVX512VL: # %bb.0: # %entry
; AVX512VL-NEXT: vpmovdw %xmm0, -{{[0-9]+}}(%rsp)
; AVX512VL-NEXT: movq -{{[0-9]+}}(%rsp), %rax
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: trunc4i32_i64:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX512BW-NEXT: vmovq %xmm0, %rax
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: trunc4i32_i64:
-; AVX512BWVL: # BB#0: # %entry
+; AVX512BWVL: # %bb.0: # %entry
; AVX512BWVL-NEXT: vpmovdw %xmm0, -{{[0-9]+}}(%rsp)
; AVX512BWVL-NEXT: movq -{{[0-9]+}}(%rsp), %rax
; AVX512BWVL-NEXT: retq
@@ -1750,7 +1750,7 @@ entry:
define <16 x i8> @trunc2x8i16_16i8(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: trunc2x8i16_16i8:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
; SSE2-NEXT: pand %xmm2, %xmm1
; SSE2-NEXT: pand %xmm2, %xmm0
@@ -1758,7 +1758,7 @@ define <16 x i8> @trunc2x8i16_16i8(<8 x i16> %a, <8 x i16> %b) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: trunc2x8i16_16i8:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; SSSE3-NEXT: pshufb %xmm2, %xmm1
; SSSE3-NEXT: pshufb %xmm2, %xmm0
@@ -1766,7 +1766,7 @@ define <16 x i8> @trunc2x8i16_16i8(<8 x i16> %a, <8 x i16> %b) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: trunc2x8i16_16i8:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; SSE41-NEXT: pshufb %xmm2, %xmm1
; SSE41-NEXT: pshufb %xmm2, %xmm0
@@ -1774,7 +1774,7 @@ define <16 x i8> @trunc2x8i16_16i8(<8 x i16> %a, <8 x i16> %b) {
; SSE41-NEXT: retq
;
; AVX-LABEL: trunc2x8i16_16i8:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; AVX-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpshufb %xmm2, %xmm0, %xmm0
@@ -1782,7 +1782,7 @@ define <16 x i8> @trunc2x8i16_16i8(<8 x i16> %a, <8 x i16> %b) {
; AVX-NEXT: retq
;
; AVX512-LABEL: trunc2x8i16_16i8:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; AVX512-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vpshufb %xmm2, %xmm0, %xmm0
@@ -1798,50 +1798,50 @@ entry:
; PR15524 http://llvm.org/bugs/show_bug.cgi?id=15524
define i64 @trunc8i16_i64(<8 x i16> %inval) {
; SSE2-LABEL: trunc8i16_i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; SSE2-NEXT: packuswb %xmm0, %xmm0
; SSE2-NEXT: movq %xmm0, %rax
; SSE2-NEXT: retq
;
; SSSE3-LABEL: trunc8i16_i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; SSSE3-NEXT: movq %xmm0, %rax
; SSSE3-NEXT: retq
;
; SSE41-LABEL: trunc8i16_i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; SSE41-NEXT: movq %xmm0, %rax
; SSE41-NEXT: retq
;
; AVX-LABEL: trunc8i16_i64:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; AVX-NEXT: vmovq %xmm0, %rax
; AVX-NEXT: retq
;
; AVX512F-LABEL: trunc8i16_i64:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; AVX512F-NEXT: vmovq %xmm0, %rax
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: trunc8i16_i64:
-; AVX512VL: # BB#0: # %entry
+; AVX512VL: # %bb.0: # %entry
; AVX512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; AVX512VL-NEXT: vmovq %xmm0, %rax
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: trunc8i16_i64:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; AVX512BW-NEXT: vmovq %xmm0, %rax
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: trunc8i16_i64:
-; AVX512BWVL: # BB#0: # %entry
+; AVX512BWVL: # %bb.0: # %entry
; AVX512BWVL-NEXT: vpmovwb %xmm0, -{{[0-9]+}}(%rsp)
; AVX512BWVL-NEXT: movq -{{[0-9]+}}(%rsp), %rax
; AVX512BWVL-NEXT: retq
@@ -1853,32 +1853,32 @@ entry:
define <16 x i8> @trunc16i64_16i8_const() {
; SSE-LABEL: trunc16i64_16i8_const:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: trunc16i64_16i8_const:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512F-LABEL: trunc16i64_16i8_const:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: trunc16i64_16i8_const:
-; AVX512VL: # BB#0: # %entry
+; AVX512VL: # %bb.0: # %entry
; AVX512VL-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: trunc16i64_16i8_const:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: trunc16i64_16i8_const:
-; AVX512BWVL: # BB#0: # %entry
+; AVX512BWVL: # %bb.0: # %entry
; AVX512BWVL-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX512BWVL-NEXT: retq
@@ -1890,7 +1890,7 @@ entry:
define void @PR34773(i16* %a0, i8* %a1) {
; SSE-LABEL: PR34773:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqu (%rdi), %xmm0
; SSE-NEXT: movdqu 16(%rdi), %xmm1
; SSE-NEXT: movdqu 32(%rdi), %xmm2
@@ -1906,7 +1906,7 @@ define void @PR34773(i16* %a0, i8* %a1) {
; SSE-NEXT: retq
;
; AVX1-LABEL: PR34773:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqu (%rdi), %ymm0
; AVX1-NEXT: vmovdqu 32(%rdi), %ymm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
@@ -1923,7 +1923,7 @@ define void @PR34773(i16* %a0, i8* %a1) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: PR34773:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqu (%rdi), %ymm0
; AVX2-NEXT: vmovdqu 32(%rdi), %ymm1
; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
@@ -1938,7 +1938,7 @@ define void @PR34773(i16* %a0, i8* %a1) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: PR34773:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqu (%rdi), %ymm0
; AVX512F-NEXT: vmovdqu 32(%rdi), %ymm1
; AVX512F-NEXT: vpsrlw $8, %ymm0, %ymm0
@@ -1953,7 +1953,7 @@ define void @PR34773(i16* %a0, i8* %a1) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: PR34773:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqu (%rdi), %ymm0
; AVX512VL-NEXT: vmovdqu 32(%rdi), %ymm1
; AVX512VL-NEXT: vpsrlw $8, %ymm0, %ymm0
@@ -1968,7 +1968,7 @@ define void @PR34773(i16* %a0, i8* %a1) {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: PR34773:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqu (%rdi), %ymm0
; AVX512BW-NEXT: vmovdqu 32(%rdi), %ymm1
; AVX512BW-NEXT: vpsrlw $8, %ymm0, %ymm0
@@ -1981,7 +1981,7 @@ define void @PR34773(i16* %a0, i8* %a1) {
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: PR34773:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpsrlw $8, (%rdi), %ymm0
; AVX512BWVL-NEXT: vpsrlw $8, 32(%rdi), %ymm1
; AVX512BWVL-NEXT: vpmovwb %ymm0, (%rsi)
diff --git a/test/CodeGen/X86/vector-tzcnt-128.ll b/test/CodeGen/X86/vector-tzcnt-128.ll
index 8a3f9621b33..15ff8f78d32 100644
--- a/test/CodeGen/X86/vector-tzcnt-128.ll
+++ b/test/CodeGen/X86/vector-tzcnt-128.ll
@@ -16,7 +16,7 @@
define <2 x i64> @testv2i64(<2 x i64> %in) nounwind {
; SSE2-LABEL: testv2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: psubq %xmm0, %xmm2
@@ -41,7 +41,7 @@ define <2 x i64> @testv2i64(<2 x i64> %in) nounwind {
; SSE2-NEXT: retq
;
; SSE3-LABEL: testv2i64:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: pxor %xmm1, %xmm1
; SSE3-NEXT: pxor %xmm2, %xmm2
; SSE3-NEXT: psubq %xmm0, %xmm2
@@ -66,7 +66,7 @@ define <2 x i64> @testv2i64(<2 x i64> %in) nounwind {
; SSE3-NEXT: retq
;
; SSSE3-LABEL: testv2i64:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pxor %xmm1, %xmm1
; SSSE3-NEXT: pxor %xmm2, %xmm2
; SSSE3-NEXT: psubq %xmm0, %xmm2
@@ -87,7 +87,7 @@ define <2 x i64> @testv2i64(<2 x i64> %in) nounwind {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: testv2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: pxor %xmm2, %xmm2
; SSE41-NEXT: psubq %xmm0, %xmm2
@@ -108,7 +108,7 @@ define <2 x i64> @testv2i64(<2 x i64> %in) nounwind {
; SSE41-NEXT: retq
;
; AVX-LABEL: testv2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpsubq %xmm0, %xmm1, %xmm2
; AVX-NEXT: vpand %xmm2, %xmm0, %xmm0
@@ -126,7 +126,7 @@ define <2 x i64> @testv2i64(<2 x i64> %in) nounwind {
; AVX-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: testv2i64:
-; AVX512VPOPCNTDQ: # BB#0:
+; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpsubq %xmm0, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -138,7 +138,7 @@ define <2 x i64> @testv2i64(<2 x i64> %in) nounwind {
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-LABEL: testv2i64:
-; BITALG_NOVLX: # BB#0:
+; BITALG_NOVLX: # %bb.0:
; BITALG_NOVLX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; BITALG_NOVLX-NEXT: vpsubq %xmm0, %xmm1, %xmm2
; BITALG_NOVLX-NEXT: vpand %xmm2, %xmm0, %xmm0
@@ -156,7 +156,7 @@ define <2 x i64> @testv2i64(<2 x i64> %in) nounwind {
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: testv2i64:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vpxor %xmm1, %xmm1, %xmm1
; BITALG-NEXT: vpsubq %xmm0, %xmm1, %xmm2
; BITALG-NEXT: vpand %xmm2, %xmm0, %xmm0
@@ -174,7 +174,7 @@ define <2 x i64> @testv2i64(<2 x i64> %in) nounwind {
; BITALG-NEXT: retq
;
; X32-SSE-LABEL: testv2i64:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: pxor %xmm1, %xmm1
; X32-SSE-NEXT: pxor %xmm2, %xmm2
; X32-SSE-NEXT: psubq %xmm0, %xmm2
@@ -198,7 +198,7 @@ define <2 x i64> @testv2i64(<2 x i64> %in) nounwind {
define <2 x i64> @testv2i64u(<2 x i64> %in) nounwind {
; SSE2-LABEL: testv2i64u:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: psubq %xmm0, %xmm2
@@ -223,7 +223,7 @@ define <2 x i64> @testv2i64u(<2 x i64> %in) nounwind {
; SSE2-NEXT: retq
;
; SSE3-LABEL: testv2i64u:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: pxor %xmm1, %xmm1
; SSE3-NEXT: pxor %xmm2, %xmm2
; SSE3-NEXT: psubq %xmm0, %xmm2
@@ -248,7 +248,7 @@ define <2 x i64> @testv2i64u(<2 x i64> %in) nounwind {
; SSE3-NEXT: retq
;
; SSSE3-LABEL: testv2i64u:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pxor %xmm1, %xmm1
; SSSE3-NEXT: pxor %xmm2, %xmm2
; SSSE3-NEXT: psubq %xmm0, %xmm2
@@ -269,7 +269,7 @@ define <2 x i64> @testv2i64u(<2 x i64> %in) nounwind {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: testv2i64u:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: pxor %xmm2, %xmm2
; SSE41-NEXT: psubq %xmm0, %xmm2
@@ -290,7 +290,7 @@ define <2 x i64> @testv2i64u(<2 x i64> %in) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: testv2i64u:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpsubq %xmm0, %xmm1, %xmm2
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
@@ -308,7 +308,7 @@ define <2 x i64> @testv2i64u(<2 x i64> %in) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: testv2i64u:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpsubq %xmm0, %xmm1, %xmm2
; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
@@ -326,7 +326,7 @@ define <2 x i64> @testv2i64u(<2 x i64> %in) nounwind {
; AVX2-NEXT: retq
;
; AVX512CDVL-LABEL: testv2i64u:
-; AVX512CDVL: # BB#0:
+; AVX512CDVL: # %bb.0:
; AVX512CDVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512CDVL-NEXT: vpsubq %xmm0, %xmm1, %xmm1
; AVX512CDVL-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -336,7 +336,7 @@ define <2 x i64> @testv2i64u(<2 x i64> %in) nounwind {
; AVX512CDVL-NEXT: retq
;
; AVX512CD-LABEL: testv2i64u:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512CD-NEXT: vpsubq %xmm0, %xmm1, %xmm1
; AVX512CD-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -347,7 +347,7 @@ define <2 x i64> @testv2i64u(<2 x i64> %in) nounwind {
; AVX512CD-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: testv2i64u:
-; AVX512VPOPCNTDQ: # BB#0:
+; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpsubq %xmm0, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -359,7 +359,7 @@ define <2 x i64> @testv2i64u(<2 x i64> %in) nounwind {
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-LABEL: testv2i64u:
-; BITALG_NOVLX: # BB#0:
+; BITALG_NOVLX: # %bb.0:
; BITALG_NOVLX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; BITALG_NOVLX-NEXT: vpsubq %xmm0, %xmm1, %xmm2
; BITALG_NOVLX-NEXT: vpand %xmm2, %xmm0, %xmm0
@@ -377,7 +377,7 @@ define <2 x i64> @testv2i64u(<2 x i64> %in) nounwind {
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: testv2i64u:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vpxor %xmm1, %xmm1, %xmm1
; BITALG-NEXT: vpsubq %xmm0, %xmm1, %xmm2
; BITALG-NEXT: vpand %xmm2, %xmm0, %xmm0
@@ -395,7 +395,7 @@ define <2 x i64> @testv2i64u(<2 x i64> %in) nounwind {
; BITALG-NEXT: retq
;
; X32-SSE-LABEL: testv2i64u:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: pxor %xmm1, %xmm1
; X32-SSE-NEXT: pxor %xmm2, %xmm2
; X32-SSE-NEXT: psubq %xmm0, %xmm2
@@ -419,7 +419,7 @@ define <2 x i64> @testv2i64u(<2 x i64> %in) nounwind {
define <4 x i32> @testv4i32(<4 x i32> %in) nounwind {
; SSE2-LABEL: testv4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: psubd %xmm0, %xmm2
@@ -449,7 +449,7 @@ define <4 x i32> @testv4i32(<4 x i32> %in) nounwind {
; SSE2-NEXT: retq
;
; SSE3-LABEL: testv4i32:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: pxor %xmm1, %xmm1
; SSE3-NEXT: pxor %xmm2, %xmm2
; SSE3-NEXT: psubd %xmm0, %xmm2
@@ -479,7 +479,7 @@ define <4 x i32> @testv4i32(<4 x i32> %in) nounwind {
; SSE3-NEXT: retq
;
; SSSE3-LABEL: testv4i32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pxor %xmm1, %xmm1
; SSSE3-NEXT: pxor %xmm2, %xmm2
; SSSE3-NEXT: psubd %xmm0, %xmm2
@@ -505,7 +505,7 @@ define <4 x i32> @testv4i32(<4 x i32> %in) nounwind {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: testv4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: pxor %xmm2, %xmm2
; SSE41-NEXT: psubd %xmm0, %xmm2
@@ -530,7 +530,7 @@ define <4 x i32> @testv4i32(<4 x i32> %in) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: testv4i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpsubd %xmm0, %xmm1, %xmm2
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
@@ -552,7 +552,7 @@ define <4 x i32> @testv4i32(<4 x i32> %in) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: testv4i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpsubd %xmm0, %xmm1, %xmm2
; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
@@ -574,7 +574,7 @@ define <4 x i32> @testv4i32(<4 x i32> %in) nounwind {
; AVX2-NEXT: retq
;
; AVX512CDVL-LABEL: testv4i32:
-; AVX512CDVL: # BB#0:
+; AVX512CDVL: # %bb.0:
; AVX512CDVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512CDVL-NEXT: vpsubd %xmm0, %xmm1, %xmm2
; AVX512CDVL-NEXT: vpand %xmm2, %xmm0, %xmm0
@@ -596,7 +596,7 @@ define <4 x i32> @testv4i32(<4 x i32> %in) nounwind {
; AVX512CDVL-NEXT: retq
;
; AVX512CD-LABEL: testv4i32:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512CD-NEXT: vpsubd %xmm0, %xmm1, %xmm2
; AVX512CD-NEXT: vpand %xmm2, %xmm0, %xmm0
@@ -618,7 +618,7 @@ define <4 x i32> @testv4i32(<4 x i32> %in) nounwind {
; AVX512CD-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: testv4i32:
-; AVX512VPOPCNTDQ: # BB#0:
+; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpsubd %xmm0, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -630,7 +630,7 @@ define <4 x i32> @testv4i32(<4 x i32> %in) nounwind {
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-LABEL: testv4i32:
-; BITALG_NOVLX: # BB#0:
+; BITALG_NOVLX: # %bb.0:
; BITALG_NOVLX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; BITALG_NOVLX-NEXT: vpsubd %xmm0, %xmm1, %xmm2
; BITALG_NOVLX-NEXT: vpand %xmm2, %xmm0, %xmm0
@@ -652,7 +652,7 @@ define <4 x i32> @testv4i32(<4 x i32> %in) nounwind {
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: testv4i32:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vpxor %xmm1, %xmm1, %xmm1
; BITALG-NEXT: vpsubd %xmm0, %xmm1, %xmm2
; BITALG-NEXT: vpand %xmm2, %xmm0, %xmm0
@@ -674,7 +674,7 @@ define <4 x i32> @testv4i32(<4 x i32> %in) nounwind {
; BITALG-NEXT: retq
;
; X32-SSE-LABEL: testv4i32:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: pxor %xmm1, %xmm1
; X32-SSE-NEXT: pxor %xmm2, %xmm2
; X32-SSE-NEXT: psubd %xmm0, %xmm2
@@ -703,7 +703,7 @@ define <4 x i32> @testv4i32(<4 x i32> %in) nounwind {
define <4 x i32> @testv4i32u(<4 x i32> %in) nounwind {
; SSE2-LABEL: testv4i32u:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: psubd %xmm0, %xmm2
@@ -733,7 +733,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %in) nounwind {
; SSE2-NEXT: retq
;
; SSE3-LABEL: testv4i32u:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: pxor %xmm1, %xmm1
; SSE3-NEXT: pxor %xmm2, %xmm2
; SSE3-NEXT: psubd %xmm0, %xmm2
@@ -763,7 +763,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %in) nounwind {
; SSE3-NEXT: retq
;
; SSSE3-LABEL: testv4i32u:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pxor %xmm1, %xmm1
; SSSE3-NEXT: pxor %xmm2, %xmm2
; SSSE3-NEXT: psubd %xmm0, %xmm2
@@ -789,7 +789,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %in) nounwind {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: testv4i32u:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: pxor %xmm2, %xmm2
; SSE41-NEXT: psubd %xmm0, %xmm2
@@ -814,7 +814,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %in) nounwind {
; SSE41-NEXT: retq
;
; AVX1-LABEL: testv4i32u:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpsubd %xmm0, %xmm1, %xmm2
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
@@ -836,7 +836,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %in) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: testv4i32u:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpsubd %xmm0, %xmm1, %xmm2
; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
@@ -858,7 +858,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %in) nounwind {
; AVX2-NEXT: retq
;
; AVX512CDVL-LABEL: testv4i32u:
-; AVX512CDVL: # BB#0:
+; AVX512CDVL: # %bb.0:
; AVX512CDVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512CDVL-NEXT: vpsubd %xmm0, %xmm1, %xmm1
; AVX512CDVL-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -868,7 +868,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %in) nounwind {
; AVX512CDVL-NEXT: retq
;
; AVX512CD-LABEL: testv4i32u:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512CD-NEXT: vpsubd %xmm0, %xmm1, %xmm1
; AVX512CD-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -879,7 +879,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %in) nounwind {
; AVX512CD-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: testv4i32u:
-; AVX512VPOPCNTDQ: # BB#0:
+; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpsubd %xmm0, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -891,7 +891,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %in) nounwind {
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-LABEL: testv4i32u:
-; BITALG_NOVLX: # BB#0:
+; BITALG_NOVLX: # %bb.0:
; BITALG_NOVLX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; BITALG_NOVLX-NEXT: vpsubd %xmm0, %xmm1, %xmm2
; BITALG_NOVLX-NEXT: vpand %xmm2, %xmm0, %xmm0
@@ -913,7 +913,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %in) nounwind {
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: testv4i32u:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vpxor %xmm1, %xmm1, %xmm1
; BITALG-NEXT: vpsubd %xmm0, %xmm1, %xmm2
; BITALG-NEXT: vpand %xmm2, %xmm0, %xmm0
@@ -935,7 +935,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %in) nounwind {
; BITALG-NEXT: retq
;
; X32-SSE-LABEL: testv4i32u:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: pxor %xmm1, %xmm1
; X32-SSE-NEXT: pxor %xmm2, %xmm2
; X32-SSE-NEXT: psubd %xmm0, %xmm2
@@ -964,7 +964,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %in) nounwind {
define <8 x i16> @testv8i16(<8 x i16> %in) nounwind {
; SSE2-LABEL: testv8i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: psubw %xmm0, %xmm1
; SSE2-NEXT: pand %xmm0, %xmm1
@@ -991,7 +991,7 @@ define <8 x i16> @testv8i16(<8 x i16> %in) nounwind {
; SSE2-NEXT: retq
;
; SSE3-LABEL: testv8i16:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: pxor %xmm1, %xmm1
; SSE3-NEXT: psubw %xmm0, %xmm1
; SSE3-NEXT: pand %xmm0, %xmm1
@@ -1018,7 +1018,7 @@ define <8 x i16> @testv8i16(<8 x i16> %in) nounwind {
; SSE3-NEXT: retq
;
; SSSE3-LABEL: testv8i16:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pxor %xmm1, %xmm1
; SSSE3-NEXT: psubw %xmm0, %xmm1
; SSSE3-NEXT: pand %xmm0, %xmm1
@@ -1041,7 +1041,7 @@ define <8 x i16> @testv8i16(<8 x i16> %in) nounwind {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: testv8i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: psubw %xmm0, %xmm1
; SSE41-NEXT: pand %xmm0, %xmm1
@@ -1064,7 +1064,7 @@ define <8 x i16> @testv8i16(<8 x i16> %in) nounwind {
; SSE41-NEXT: retq
;
; AVX-LABEL: testv8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpsubw %xmm0, %xmm1, %xmm1
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -1084,7 +1084,7 @@ define <8 x i16> @testv8i16(<8 x i16> %in) nounwind {
; AVX-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: testv8i16:
-; AVX512VPOPCNTDQ: # BB#0:
+; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpsubw %xmm0, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -1097,7 +1097,7 @@ define <8 x i16> @testv8i16(<8 x i16> %in) nounwind {
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-LABEL: testv8i16:
-; BITALG_NOVLX: # BB#0:
+; BITALG_NOVLX: # %bb.0:
; BITALG_NOVLX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; BITALG_NOVLX-NEXT: vpsubw %xmm0, %xmm1, %xmm1
; BITALG_NOVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -1109,7 +1109,7 @@ define <8 x i16> @testv8i16(<8 x i16> %in) nounwind {
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: testv8i16:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vpxor %xmm1, %xmm1, %xmm1
; BITALG-NEXT: vpsubw %xmm0, %xmm1, %xmm1
; BITALG-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -1119,7 +1119,7 @@ define <8 x i16> @testv8i16(<8 x i16> %in) nounwind {
; BITALG-NEXT: retq
;
; X32-SSE-LABEL: testv8i16:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: pxor %xmm1, %xmm1
; X32-SSE-NEXT: psubw %xmm0, %xmm1
; X32-SSE-NEXT: pand %xmm0, %xmm1
@@ -1146,7 +1146,7 @@ define <8 x i16> @testv8i16(<8 x i16> %in) nounwind {
define <8 x i16> @testv8i16u(<8 x i16> %in) nounwind {
; SSE2-LABEL: testv8i16u:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: psubw %xmm0, %xmm1
; SSE2-NEXT: pand %xmm0, %xmm1
@@ -1173,7 +1173,7 @@ define <8 x i16> @testv8i16u(<8 x i16> %in) nounwind {
; SSE2-NEXT: retq
;
; SSE3-LABEL: testv8i16u:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: pxor %xmm1, %xmm1
; SSE3-NEXT: psubw %xmm0, %xmm1
; SSE3-NEXT: pand %xmm0, %xmm1
@@ -1200,7 +1200,7 @@ define <8 x i16> @testv8i16u(<8 x i16> %in) nounwind {
; SSE3-NEXT: retq
;
; SSSE3-LABEL: testv8i16u:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pxor %xmm1, %xmm1
; SSSE3-NEXT: psubw %xmm0, %xmm1
; SSSE3-NEXT: pand %xmm0, %xmm1
@@ -1223,7 +1223,7 @@ define <8 x i16> @testv8i16u(<8 x i16> %in) nounwind {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: testv8i16u:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: psubw %xmm0, %xmm1
; SSE41-NEXT: pand %xmm0, %xmm1
@@ -1246,7 +1246,7 @@ define <8 x i16> @testv8i16u(<8 x i16> %in) nounwind {
; SSE41-NEXT: retq
;
; AVX-LABEL: testv8i16u:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpsubw %xmm0, %xmm1, %xmm1
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -1266,7 +1266,7 @@ define <8 x i16> @testv8i16u(<8 x i16> %in) nounwind {
; AVX-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: testv8i16u:
-; AVX512VPOPCNTDQ: # BB#0:
+; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpsubw %xmm0, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -1279,7 +1279,7 @@ define <8 x i16> @testv8i16u(<8 x i16> %in) nounwind {
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-LABEL: testv8i16u:
-; BITALG_NOVLX: # BB#0:
+; BITALG_NOVLX: # %bb.0:
; BITALG_NOVLX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; BITALG_NOVLX-NEXT: vpsubw %xmm0, %xmm1, %xmm1
; BITALG_NOVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -1291,7 +1291,7 @@ define <8 x i16> @testv8i16u(<8 x i16> %in) nounwind {
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: testv8i16u:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vpxor %xmm1, %xmm1, %xmm1
; BITALG-NEXT: vpsubw %xmm0, %xmm1, %xmm1
; BITALG-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -1301,7 +1301,7 @@ define <8 x i16> @testv8i16u(<8 x i16> %in) nounwind {
; BITALG-NEXT: retq
;
; X32-SSE-LABEL: testv8i16u:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: pxor %xmm1, %xmm1
; X32-SSE-NEXT: psubw %xmm0, %xmm1
; X32-SSE-NEXT: pand %xmm0, %xmm1
@@ -1328,7 +1328,7 @@ define <8 x i16> @testv8i16u(<8 x i16> %in) nounwind {
define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
; SSE2-LABEL: testv16i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: psubb %xmm0, %xmm1
; SSE2-NEXT: pand %xmm0, %xmm1
@@ -1351,7 +1351,7 @@ define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
; SSE2-NEXT: retq
;
; SSE3-LABEL: testv16i8:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: pxor %xmm1, %xmm1
; SSE3-NEXT: psubb %xmm0, %xmm1
; SSE3-NEXT: pand %xmm0, %xmm1
@@ -1374,7 +1374,7 @@ define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
; SSE3-NEXT: retq
;
; SSSE3-LABEL: testv16i8:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pxor %xmm1, %xmm1
; SSSE3-NEXT: psubb %xmm0, %xmm1
; SSSE3-NEXT: pand %xmm0, %xmm1
@@ -1393,7 +1393,7 @@ define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: testv16i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: psubb %xmm0, %xmm1
; SSE41-NEXT: pand %xmm0, %xmm1
@@ -1412,7 +1412,7 @@ define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
; SSE41-NEXT: retq
;
; AVX-LABEL: testv16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpsubb %xmm0, %xmm1, %xmm1
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -1429,7 +1429,7 @@ define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
; AVX-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: testv16i8:
-; AVX512VPOPCNTDQ: # BB#0:
+; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpsubb %xmm0, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -1442,7 +1442,7 @@ define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-LABEL: testv16i8:
-; BITALG_NOVLX: # BB#0:
+; BITALG_NOVLX: # %bb.0:
; BITALG_NOVLX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; BITALG_NOVLX-NEXT: vpsubb %xmm0, %xmm1, %xmm1
; BITALG_NOVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -1454,7 +1454,7 @@ define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: testv16i8:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vpxor %xmm1, %xmm1, %xmm1
; BITALG-NEXT: vpsubb %xmm0, %xmm1, %xmm1
; BITALG-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -1464,7 +1464,7 @@ define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
; BITALG-NEXT: retq
;
; X32-SSE-LABEL: testv16i8:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: pxor %xmm1, %xmm1
; X32-SSE-NEXT: psubb %xmm0, %xmm1
; X32-SSE-NEXT: pand %xmm0, %xmm1
@@ -1487,7 +1487,7 @@ define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
define <16 x i8> @testv16i8u(<16 x i8> %in) nounwind {
; SSE2-LABEL: testv16i8u:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: psubb %xmm0, %xmm1
; SSE2-NEXT: pand %xmm0, %xmm1
@@ -1510,7 +1510,7 @@ define <16 x i8> @testv16i8u(<16 x i8> %in) nounwind {
; SSE2-NEXT: retq
;
; SSE3-LABEL: testv16i8u:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: pxor %xmm1, %xmm1
; SSE3-NEXT: psubb %xmm0, %xmm1
; SSE3-NEXT: pand %xmm0, %xmm1
@@ -1533,7 +1533,7 @@ define <16 x i8> @testv16i8u(<16 x i8> %in) nounwind {
; SSE3-NEXT: retq
;
; SSSE3-LABEL: testv16i8u:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pxor %xmm1, %xmm1
; SSSE3-NEXT: psubb %xmm0, %xmm1
; SSSE3-NEXT: pand %xmm0, %xmm1
@@ -1552,7 +1552,7 @@ define <16 x i8> @testv16i8u(<16 x i8> %in) nounwind {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: testv16i8u:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: psubb %xmm0, %xmm1
; SSE41-NEXT: pand %xmm0, %xmm1
@@ -1571,7 +1571,7 @@ define <16 x i8> @testv16i8u(<16 x i8> %in) nounwind {
; SSE41-NEXT: retq
;
; AVX-LABEL: testv16i8u:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpsubb %xmm0, %xmm1, %xmm1
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -1588,7 +1588,7 @@ define <16 x i8> @testv16i8u(<16 x i8> %in) nounwind {
; AVX-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: testv16i8u:
-; AVX512VPOPCNTDQ: # BB#0:
+; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpsubb %xmm0, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -1601,7 +1601,7 @@ define <16 x i8> @testv16i8u(<16 x i8> %in) nounwind {
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-LABEL: testv16i8u:
-; BITALG_NOVLX: # BB#0:
+; BITALG_NOVLX: # %bb.0:
; BITALG_NOVLX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; BITALG_NOVLX-NEXT: vpsubb %xmm0, %xmm1, %xmm1
; BITALG_NOVLX-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -1613,7 +1613,7 @@ define <16 x i8> @testv16i8u(<16 x i8> %in) nounwind {
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: testv16i8u:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vpxor %xmm1, %xmm1, %xmm1
; BITALG-NEXT: vpsubb %xmm0, %xmm1, %xmm1
; BITALG-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -1623,7 +1623,7 @@ define <16 x i8> @testv16i8u(<16 x i8> %in) nounwind {
; BITALG-NEXT: retq
;
; X32-SSE-LABEL: testv16i8u:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: pxor %xmm1, %xmm1
; X32-SSE-NEXT: psubb %xmm0, %xmm1
; X32-SSE-NEXT: pand %xmm0, %xmm1
@@ -1646,37 +1646,37 @@ define <16 x i8> @testv16i8u(<16 x i8> %in) nounwind {
define <2 x i64> @foldv2i64() nounwind {
; SSE-LABEL: foldv2i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movl $8, %eax
; SSE-NEXT: movq %rax, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: foldv2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: movl $8, %eax
; AVX-NEXT: vmovq %rax, %xmm0
; AVX-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: foldv2i64:
-; AVX512VPOPCNTDQ: # BB#0:
+; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: movl $8, %eax
; AVX512VPOPCNTDQ-NEXT: vmovq %rax, %xmm0
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-LABEL: foldv2i64:
-; BITALG_NOVLX: # BB#0:
+; BITALG_NOVLX: # %bb.0:
; BITALG_NOVLX-NEXT: movl $8, %eax
; BITALG_NOVLX-NEXT: vmovq %rax, %xmm0
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: foldv2i64:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: movl $8, %eax
; BITALG-NEXT: vmovq %rax, %xmm0
; BITALG-NEXT: retq
;
; X32-SSE-LABEL: foldv2i64:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movl $8, %eax
; X32-SSE-NEXT: movd %eax, %xmm0
; X32-SSE-NEXT: retl
@@ -1686,37 +1686,37 @@ define <2 x i64> @foldv2i64() nounwind {
define <2 x i64> @foldv2i64u() nounwind {
; SSE-LABEL: foldv2i64u:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movl $8, %eax
; SSE-NEXT: movq %rax, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: foldv2i64u:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: movl $8, %eax
; AVX-NEXT: vmovq %rax, %xmm0
; AVX-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: foldv2i64u:
-; AVX512VPOPCNTDQ: # BB#0:
+; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: movl $8, %eax
; AVX512VPOPCNTDQ-NEXT: vmovq %rax, %xmm0
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-LABEL: foldv2i64u:
-; BITALG_NOVLX: # BB#0:
+; BITALG_NOVLX: # %bb.0:
; BITALG_NOVLX-NEXT: movl $8, %eax
; BITALG_NOVLX-NEXT: vmovq %rax, %xmm0
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: foldv2i64u:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: movl $8, %eax
; BITALG-NEXT: vmovq %rax, %xmm0
; BITALG-NEXT: retq
;
; X32-SSE-LABEL: foldv2i64u:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movl $8, %eax
; X32-SSE-NEXT: movd %eax, %xmm0
; X32-SSE-NEXT: retl
@@ -1726,32 +1726,32 @@ define <2 x i64> @foldv2i64u() nounwind {
define <4 x i32> @foldv4i32() nounwind {
; SSE-LABEL: foldv4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,32,0]
; SSE-NEXT: retq
;
; AVX-LABEL: foldv4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,32,0]
; AVX-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: foldv4i32:
-; AVX512VPOPCNTDQ: # BB#0:
+; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,32,0]
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-LABEL: foldv4i32:
-; BITALG_NOVLX: # BB#0:
+; BITALG_NOVLX: # %bb.0:
; BITALG_NOVLX-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,32,0]
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: foldv4i32:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,32,0]
; BITALG-NEXT: retq
;
; X32-SSE-LABEL: foldv4i32:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,32,0]
; X32-SSE-NEXT: retl
%out = call <4 x i32> @llvm.cttz.v4i32(<4 x i32> <i32 256, i32 -1, i32 0, i32 255>, i1 0)
@@ -1760,32 +1760,32 @@ define <4 x i32> @foldv4i32() nounwind {
define <4 x i32> @foldv4i32u() nounwind {
; SSE-LABEL: foldv4i32u:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,32,0]
; SSE-NEXT: retq
;
; AVX-LABEL: foldv4i32u:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,32,0]
; AVX-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: foldv4i32u:
-; AVX512VPOPCNTDQ: # BB#0:
+; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,32,0]
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-LABEL: foldv4i32u:
-; BITALG_NOVLX: # BB#0:
+; BITALG_NOVLX: # %bb.0:
; BITALG_NOVLX-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,32,0]
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: foldv4i32u:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,32,0]
; BITALG-NEXT: retq
;
; X32-SSE-LABEL: foldv4i32u:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,32,0]
; X32-SSE-NEXT: retl
%out = call <4 x i32> @llvm.cttz.v4i32(<4 x i32> <i32 256, i32 -1, i32 0, i32 255>, i1 -1)
@@ -1794,32 +1794,32 @@ define <4 x i32> @foldv4i32u() nounwind {
define <8 x i16> @foldv8i16() nounwind {
; SSE-LABEL: foldv8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,16,0,16,0,3,3]
; SSE-NEXT: retq
;
; AVX-LABEL: foldv8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,16,0,16,0,3,3]
; AVX-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: foldv8i16:
-; AVX512VPOPCNTDQ: # BB#0:
+; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,16,0,16,0,3,3]
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-LABEL: foldv8i16:
-; BITALG_NOVLX: # BB#0:
+; BITALG_NOVLX: # %bb.0:
; BITALG_NOVLX-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,16,0,16,0,3,3]
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: foldv8i16:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,16,0,16,0,3,3]
; BITALG-NEXT: retq
;
; X32-SSE-LABEL: foldv8i16:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,16,0,16,0,3,3]
; X32-SSE-NEXT: retl
%out = call <8 x i16> @llvm.cttz.v8i16(<8 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88>, i1 0)
@@ -1828,32 +1828,32 @@ define <8 x i16> @foldv8i16() nounwind {
define <8 x i16> @foldv8i16u() nounwind {
; SSE-LABEL: foldv8i16u:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,16,0,16,0,3,3]
; SSE-NEXT: retq
;
; AVX-LABEL: foldv8i16u:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,16,0,16,0,3,3]
; AVX-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: foldv8i16u:
-; AVX512VPOPCNTDQ: # BB#0:
+; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,16,0,16,0,3,3]
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-LABEL: foldv8i16u:
-; BITALG_NOVLX: # BB#0:
+; BITALG_NOVLX: # %bb.0:
; BITALG_NOVLX-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,16,0,16,0,3,3]
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: foldv8i16u:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,16,0,16,0,3,3]
; BITALG-NEXT: retq
;
; X32-SSE-LABEL: foldv8i16u:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,16,0,16,0,3,3]
; X32-SSE-NEXT: retl
%out = call <8 x i16> @llvm.cttz.v8i16(<8 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88>, i1 -1)
@@ -1862,32 +1862,32 @@ define <8 x i16> @foldv8i16u() nounwind {
define <16 x i8> @foldv16i8() nounwind {
; SSE-LABEL: foldv16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5]
; SSE-NEXT: retq
;
; AVX-LABEL: foldv16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5]
; AVX-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: foldv16i8:
-; AVX512VPOPCNTDQ: # BB#0:
+; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5]
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-LABEL: foldv16i8:
-; BITALG_NOVLX: # BB#0:
+; BITALG_NOVLX: # %bb.0:
; BITALG_NOVLX-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5]
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: foldv16i8:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5]
; BITALG-NEXT: retq
;
; X32-SSE-LABEL: foldv16i8:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5]
; X32-SSE-NEXT: retl
%out = call <16 x i8> @llvm.cttz.v16i8(<16 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32>, i1 0)
@@ -1896,32 +1896,32 @@ define <16 x i8> @foldv16i8() nounwind {
define <16 x i8> @foldv16i8u() nounwind {
; SSE-LABEL: foldv16i8u:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5]
; SSE-NEXT: retq
;
; AVX-LABEL: foldv16i8u:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5]
; AVX-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: foldv16i8u:
-; AVX512VPOPCNTDQ: # BB#0:
+; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5]
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-LABEL: foldv16i8u:
-; BITALG_NOVLX: # BB#0:
+; BITALG_NOVLX: # %bb.0:
; BITALG_NOVLX-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5]
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: foldv16i8u:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5]
; BITALG-NEXT: retq
;
; X32-SSE-LABEL: foldv16i8u:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5]
; X32-SSE-NEXT: retl
%out = call <16 x i8> @llvm.cttz.v16i8(<16 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32>, i1 -1)
diff --git a/test/CodeGen/X86/vector-tzcnt-256.ll b/test/CodeGen/X86/vector-tzcnt-256.ll
index 6de28399a2f..cc2bcd8710d 100644
--- a/test/CodeGen/X86/vector-tzcnt-256.ll
+++ b/test/CodeGen/X86/vector-tzcnt-256.ll
@@ -12,7 +12,7 @@
define <4 x i64> @testv4i64(<4 x i64> %in) nounwind {
; AVX1-LABEL: testv4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm3
@@ -42,7 +42,7 @@ define <4 x i64> @testv4i64(<4 x i64> %in) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: testv4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpsubq %ymm0, %ymm1, %ymm2
; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
@@ -60,7 +60,7 @@ define <4 x i64> @testv4i64(<4 x i64> %in) nounwind {
; AVX2-NEXT: retq
;
; AVX512CDVL-LABEL: testv4i64:
-; AVX512CDVL: # BB#0:
+; AVX512CDVL: # %bb.0:
; AVX512CDVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512CDVL-NEXT: vpsubq %ymm0, %ymm1, %ymm2
; AVX512CDVL-NEXT: vpand %ymm2, %ymm0, %ymm0
@@ -78,7 +78,7 @@ define <4 x i64> @testv4i64(<4 x i64> %in) nounwind {
; AVX512CDVL-NEXT: retq
;
; AVX512CD-LABEL: testv4i64:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512CD-NEXT: vpsubq %ymm0, %ymm1, %ymm2
; AVX512CD-NEXT: vpand %ymm2, %ymm0, %ymm0
@@ -96,7 +96,7 @@ define <4 x i64> @testv4i64(<4 x i64> %in) nounwind {
; AVX512CD-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: testv4i64:
-; AVX512VPOPCNTDQ: # BB#0:
+; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpsubq %ymm0, %ymm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -107,7 +107,7 @@ define <4 x i64> @testv4i64(<4 x i64> %in) nounwind {
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-LABEL: testv4i64:
-; BITALG_NOVLX: # BB#0:
+; BITALG_NOVLX: # %bb.0:
; BITALG_NOVLX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; BITALG_NOVLX-NEXT: vpsubq %ymm0, %ymm1, %ymm2
; BITALG_NOVLX-NEXT: vpand %ymm2, %ymm0, %ymm0
@@ -125,7 +125,7 @@ define <4 x i64> @testv4i64(<4 x i64> %in) nounwind {
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: testv4i64:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vpxor %xmm1, %xmm1, %xmm1
; BITALG-NEXT: vpsubq %ymm0, %ymm1, %ymm2
; BITALG-NEXT: vpand %ymm2, %ymm0, %ymm0
@@ -143,7 +143,7 @@ define <4 x i64> @testv4i64(<4 x i64> %in) nounwind {
; BITALG-NEXT: retq
;
; X32-AVX-LABEL: testv4i64:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X32-AVX-NEXT: vpsubq %ymm0, %ymm1, %ymm2
; X32-AVX-NEXT: vpand %ymm2, %ymm0, %ymm0
@@ -164,7 +164,7 @@ define <4 x i64> @testv4i64(<4 x i64> %in) nounwind {
define <4 x i64> @testv4i64u(<4 x i64> %in) nounwind {
; AVX1-LABEL: testv4i64u:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm3
@@ -194,7 +194,7 @@ define <4 x i64> @testv4i64u(<4 x i64> %in) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: testv4i64u:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpsubq %ymm0, %ymm1, %ymm2
; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
@@ -212,7 +212,7 @@ define <4 x i64> @testv4i64u(<4 x i64> %in) nounwind {
; AVX2-NEXT: retq
;
; AVX512CDVL-LABEL: testv4i64u:
-; AVX512CDVL: # BB#0:
+; AVX512CDVL: # %bb.0:
; AVX512CDVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512CDVL-NEXT: vpsubq %ymm0, %ymm1, %ymm1
; AVX512CDVL-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -222,7 +222,7 @@ define <4 x i64> @testv4i64u(<4 x i64> %in) nounwind {
; AVX512CDVL-NEXT: retq
;
; AVX512CD-LABEL: testv4i64u:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512CD-NEXT: vpsubq %ymm0, %ymm1, %ymm1
; AVX512CD-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -232,7 +232,7 @@ define <4 x i64> @testv4i64u(<4 x i64> %in) nounwind {
; AVX512CD-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: testv4i64u:
-; AVX512VPOPCNTDQ: # BB#0:
+; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpsubq %ymm0, %ymm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -243,7 +243,7 @@ define <4 x i64> @testv4i64u(<4 x i64> %in) nounwind {
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-LABEL: testv4i64u:
-; BITALG_NOVLX: # BB#0:
+; BITALG_NOVLX: # %bb.0:
; BITALG_NOVLX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; BITALG_NOVLX-NEXT: vpsubq %ymm0, %ymm1, %ymm2
; BITALG_NOVLX-NEXT: vpand %ymm2, %ymm0, %ymm0
@@ -261,7 +261,7 @@ define <4 x i64> @testv4i64u(<4 x i64> %in) nounwind {
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: testv4i64u:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vpxor %xmm1, %xmm1, %xmm1
; BITALG-NEXT: vpsubq %ymm0, %ymm1, %ymm2
; BITALG-NEXT: vpand %ymm2, %ymm0, %ymm0
@@ -279,7 +279,7 @@ define <4 x i64> @testv4i64u(<4 x i64> %in) nounwind {
; BITALG-NEXT: retq
;
; X32-AVX-LABEL: testv4i64u:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X32-AVX-NEXT: vpsubq %ymm0, %ymm1, %ymm2
; X32-AVX-NEXT: vpand %ymm2, %ymm0, %ymm0
@@ -300,7 +300,7 @@ define <4 x i64> @testv4i64u(<4 x i64> %in) nounwind {
define <8 x i32> @testv8i32(<8 x i32> %in) nounwind {
; AVX1-LABEL: testv8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm3
@@ -338,7 +338,7 @@ define <8 x i32> @testv8i32(<8 x i32> %in) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: testv8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpsubd %ymm0, %ymm1, %ymm2
; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
@@ -360,7 +360,7 @@ define <8 x i32> @testv8i32(<8 x i32> %in) nounwind {
; AVX2-NEXT: retq
;
; AVX512CDVL-LABEL: testv8i32:
-; AVX512CDVL: # BB#0:
+; AVX512CDVL: # %bb.0:
; AVX512CDVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512CDVL-NEXT: vpsubd %ymm0, %ymm1, %ymm2
; AVX512CDVL-NEXT: vpand %ymm2, %ymm0, %ymm0
@@ -382,7 +382,7 @@ define <8 x i32> @testv8i32(<8 x i32> %in) nounwind {
; AVX512CDVL-NEXT: retq
;
; AVX512CD-LABEL: testv8i32:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512CD-NEXT: vpsubd %ymm0, %ymm1, %ymm2
; AVX512CD-NEXT: vpand %ymm2, %ymm0, %ymm0
@@ -404,7 +404,7 @@ define <8 x i32> @testv8i32(<8 x i32> %in) nounwind {
; AVX512CD-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: testv8i32:
-; AVX512VPOPCNTDQ: # BB#0:
+; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpsubd %ymm0, %ymm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -415,7 +415,7 @@ define <8 x i32> @testv8i32(<8 x i32> %in) nounwind {
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-LABEL: testv8i32:
-; BITALG_NOVLX: # BB#0:
+; BITALG_NOVLX: # %bb.0:
; BITALG_NOVLX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; BITALG_NOVLX-NEXT: vpsubd %ymm0, %ymm1, %ymm2
; BITALG_NOVLX-NEXT: vpand %ymm2, %ymm0, %ymm0
@@ -437,7 +437,7 @@ define <8 x i32> @testv8i32(<8 x i32> %in) nounwind {
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: testv8i32:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vpxor %xmm1, %xmm1, %xmm1
; BITALG-NEXT: vpsubd %ymm0, %ymm1, %ymm2
; BITALG-NEXT: vpand %ymm2, %ymm0, %ymm0
@@ -459,7 +459,7 @@ define <8 x i32> @testv8i32(<8 x i32> %in) nounwind {
; BITALG-NEXT: retq
;
; X32-AVX-LABEL: testv8i32:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X32-AVX-NEXT: vpsubd %ymm0, %ymm1, %ymm2
; X32-AVX-NEXT: vpand %ymm2, %ymm0, %ymm0
@@ -485,7 +485,7 @@ define <8 x i32> @testv8i32(<8 x i32> %in) nounwind {
define <8 x i32> @testv8i32u(<8 x i32> %in) nounwind {
; AVX1-LABEL: testv8i32u:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm3
@@ -523,7 +523,7 @@ define <8 x i32> @testv8i32u(<8 x i32> %in) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: testv8i32u:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpsubd %ymm0, %ymm1, %ymm2
; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
@@ -545,7 +545,7 @@ define <8 x i32> @testv8i32u(<8 x i32> %in) nounwind {
; AVX2-NEXT: retq
;
; AVX512CDVL-LABEL: testv8i32u:
-; AVX512CDVL: # BB#0:
+; AVX512CDVL: # %bb.0:
; AVX512CDVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512CDVL-NEXT: vpsubd %ymm0, %ymm1, %ymm1
; AVX512CDVL-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -555,7 +555,7 @@ define <8 x i32> @testv8i32u(<8 x i32> %in) nounwind {
; AVX512CDVL-NEXT: retq
;
; AVX512CD-LABEL: testv8i32u:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512CD-NEXT: vpsubd %ymm0, %ymm1, %ymm1
; AVX512CD-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -565,7 +565,7 @@ define <8 x i32> @testv8i32u(<8 x i32> %in) nounwind {
; AVX512CD-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: testv8i32u:
-; AVX512VPOPCNTDQ: # BB#0:
+; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpsubd %ymm0, %ymm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -576,7 +576,7 @@ define <8 x i32> @testv8i32u(<8 x i32> %in) nounwind {
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-LABEL: testv8i32u:
-; BITALG_NOVLX: # BB#0:
+; BITALG_NOVLX: # %bb.0:
; BITALG_NOVLX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; BITALG_NOVLX-NEXT: vpsubd %ymm0, %ymm1, %ymm2
; BITALG_NOVLX-NEXT: vpand %ymm2, %ymm0, %ymm0
@@ -598,7 +598,7 @@ define <8 x i32> @testv8i32u(<8 x i32> %in) nounwind {
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: testv8i32u:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vpxor %xmm1, %xmm1, %xmm1
; BITALG-NEXT: vpsubd %ymm0, %ymm1, %ymm2
; BITALG-NEXT: vpand %ymm2, %ymm0, %ymm0
@@ -620,7 +620,7 @@ define <8 x i32> @testv8i32u(<8 x i32> %in) nounwind {
; BITALG-NEXT: retq
;
; X32-AVX-LABEL: testv8i32u:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X32-AVX-NEXT: vpsubd %ymm0, %ymm1, %ymm2
; X32-AVX-NEXT: vpand %ymm2, %ymm0, %ymm0
@@ -646,7 +646,7 @@ define <8 x i32> @testv8i32u(<8 x i32> %in) nounwind {
define <16 x i16> @testv16i16(<16 x i16> %in) nounwind {
; AVX1-LABEL: testv16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpsubw %xmm0, %xmm1, %xmm2
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm2
@@ -680,7 +680,7 @@ define <16 x i16> @testv16i16(<16 x i16> %in) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: testv16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpsubw %ymm0, %ymm1, %ymm1
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -700,7 +700,7 @@ define <16 x i16> @testv16i16(<16 x i16> %in) nounwind {
; AVX2-NEXT: retq
;
; AVX512CDVL-LABEL: testv16i16:
-; AVX512CDVL: # BB#0:
+; AVX512CDVL: # %bb.0:
; AVX512CDVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512CDVL-NEXT: vpsubw %ymm0, %ymm1, %ymm1
; AVX512CDVL-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -720,7 +720,7 @@ define <16 x i16> @testv16i16(<16 x i16> %in) nounwind {
; AVX512CDVL-NEXT: retq
;
; AVX512CD-LABEL: testv16i16:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512CD-NEXT: vpsubw %ymm0, %ymm1, %ymm1
; AVX512CD-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -740,7 +740,7 @@ define <16 x i16> @testv16i16(<16 x i16> %in) nounwind {
; AVX512CD-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: testv16i16:
-; AVX512VPOPCNTDQ: # BB#0:
+; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpsubw %ymm0, %ymm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -752,7 +752,7 @@ define <16 x i16> @testv16i16(<16 x i16> %in) nounwind {
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-LABEL: testv16i16:
-; BITALG_NOVLX: # BB#0:
+; BITALG_NOVLX: # %bb.0:
; BITALG_NOVLX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; BITALG_NOVLX-NEXT: vpsubw %ymm0, %ymm1, %ymm1
; BITALG_NOVLX-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -763,7 +763,7 @@ define <16 x i16> @testv16i16(<16 x i16> %in) nounwind {
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: testv16i16:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vpxor %xmm1, %xmm1, %xmm1
; BITALG-NEXT: vpsubw %ymm0, %ymm1, %ymm1
; BITALG-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -773,7 +773,7 @@ define <16 x i16> @testv16i16(<16 x i16> %in) nounwind {
; BITALG-NEXT: retq
;
; X32-AVX-LABEL: testv16i16:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X32-AVX-NEXT: vpsubw %ymm0, %ymm1, %ymm1
; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -797,7 +797,7 @@ define <16 x i16> @testv16i16(<16 x i16> %in) nounwind {
define <16 x i16> @testv16i16u(<16 x i16> %in) nounwind {
; AVX1-LABEL: testv16i16u:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpsubw %xmm0, %xmm1, %xmm2
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm2
@@ -831,7 +831,7 @@ define <16 x i16> @testv16i16u(<16 x i16> %in) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: testv16i16u:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpsubw %ymm0, %ymm1, %ymm1
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -851,7 +851,7 @@ define <16 x i16> @testv16i16u(<16 x i16> %in) nounwind {
; AVX2-NEXT: retq
;
; AVX512CDVL-LABEL: testv16i16u:
-; AVX512CDVL: # BB#0:
+; AVX512CDVL: # %bb.0:
; AVX512CDVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512CDVL-NEXT: vpsubw %ymm0, %ymm1, %ymm1
; AVX512CDVL-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -871,7 +871,7 @@ define <16 x i16> @testv16i16u(<16 x i16> %in) nounwind {
; AVX512CDVL-NEXT: retq
;
; AVX512CD-LABEL: testv16i16u:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512CD-NEXT: vpsubw %ymm0, %ymm1, %ymm1
; AVX512CD-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -891,7 +891,7 @@ define <16 x i16> @testv16i16u(<16 x i16> %in) nounwind {
; AVX512CD-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: testv16i16u:
-; AVX512VPOPCNTDQ: # BB#0:
+; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpsubw %ymm0, %ymm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -903,7 +903,7 @@ define <16 x i16> @testv16i16u(<16 x i16> %in) nounwind {
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-LABEL: testv16i16u:
-; BITALG_NOVLX: # BB#0:
+; BITALG_NOVLX: # %bb.0:
; BITALG_NOVLX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; BITALG_NOVLX-NEXT: vpsubw %ymm0, %ymm1, %ymm1
; BITALG_NOVLX-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -914,7 +914,7 @@ define <16 x i16> @testv16i16u(<16 x i16> %in) nounwind {
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: testv16i16u:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vpxor %xmm1, %xmm1, %xmm1
; BITALG-NEXT: vpsubw %ymm0, %ymm1, %ymm1
; BITALG-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -924,7 +924,7 @@ define <16 x i16> @testv16i16u(<16 x i16> %in) nounwind {
; BITALG-NEXT: retq
;
; X32-AVX-LABEL: testv16i16u:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X32-AVX-NEXT: vpsubw %ymm0, %ymm1, %ymm1
; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -948,7 +948,7 @@ define <16 x i16> @testv16i16u(<16 x i16> %in) nounwind {
define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
; AVX1-LABEL: testv32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpsubb %xmm1, %xmm2, %xmm3
@@ -976,7 +976,7 @@ define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: testv32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpsubb %ymm0, %ymm1, %ymm1
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -993,7 +993,7 @@ define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
; AVX2-NEXT: retq
;
; AVX512CDVL-LABEL: testv32i8:
-; AVX512CDVL: # BB#0:
+; AVX512CDVL: # %bb.0:
; AVX512CDVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512CDVL-NEXT: vpsubb %ymm0, %ymm1, %ymm1
; AVX512CDVL-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -1010,7 +1010,7 @@ define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
; AVX512CDVL-NEXT: retq
;
; AVX512CD-LABEL: testv32i8:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512CD-NEXT: vpsubb %ymm0, %ymm1, %ymm1
; AVX512CD-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -1027,7 +1027,7 @@ define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
; AVX512CD-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: testv32i8:
-; AVX512VPOPCNTDQ: # BB#0:
+; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpsubb %ymm0, %ymm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -1044,7 +1044,7 @@ define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-LABEL: testv32i8:
-; BITALG_NOVLX: # BB#0:
+; BITALG_NOVLX: # %bb.0:
; BITALG_NOVLX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; BITALG_NOVLX-NEXT: vpsubb %ymm0, %ymm1, %ymm1
; BITALG_NOVLX-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -1055,7 +1055,7 @@ define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: testv32i8:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vpxor %xmm1, %xmm1, %xmm1
; BITALG-NEXT: vpsubb %ymm0, %ymm1, %ymm1
; BITALG-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -1065,7 +1065,7 @@ define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
; BITALG-NEXT: retq
;
; X32-AVX-LABEL: testv32i8:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X32-AVX-NEXT: vpsubb %ymm0, %ymm1, %ymm1
; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -1086,7 +1086,7 @@ define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
define <32 x i8> @testv32i8u(<32 x i8> %in) nounwind {
; AVX1-LABEL: testv32i8u:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpsubb %xmm1, %xmm2, %xmm3
@@ -1114,7 +1114,7 @@ define <32 x i8> @testv32i8u(<32 x i8> %in) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: testv32i8u:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpsubb %ymm0, %ymm1, %ymm1
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -1131,7 +1131,7 @@ define <32 x i8> @testv32i8u(<32 x i8> %in) nounwind {
; AVX2-NEXT: retq
;
; AVX512CDVL-LABEL: testv32i8u:
-; AVX512CDVL: # BB#0:
+; AVX512CDVL: # %bb.0:
; AVX512CDVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512CDVL-NEXT: vpsubb %ymm0, %ymm1, %ymm1
; AVX512CDVL-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -1148,7 +1148,7 @@ define <32 x i8> @testv32i8u(<32 x i8> %in) nounwind {
; AVX512CDVL-NEXT: retq
;
; AVX512CD-LABEL: testv32i8u:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512CD-NEXT: vpsubb %ymm0, %ymm1, %ymm1
; AVX512CD-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -1165,7 +1165,7 @@ define <32 x i8> @testv32i8u(<32 x i8> %in) nounwind {
; AVX512CD-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: testv32i8u:
-; AVX512VPOPCNTDQ: # BB#0:
+; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpsubb %ymm0, %ymm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -1182,7 +1182,7 @@ define <32 x i8> @testv32i8u(<32 x i8> %in) nounwind {
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-LABEL: testv32i8u:
-; BITALG_NOVLX: # BB#0:
+; BITALG_NOVLX: # %bb.0:
; BITALG_NOVLX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; BITALG_NOVLX-NEXT: vpsubb %ymm0, %ymm1, %ymm1
; BITALG_NOVLX-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -1193,7 +1193,7 @@ define <32 x i8> @testv32i8u(<32 x i8> %in) nounwind {
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: testv32i8u:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vpxor %xmm1, %xmm1, %xmm1
; BITALG-NEXT: vpsubb %ymm0, %ymm1, %ymm1
; BITALG-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -1203,7 +1203,7 @@ define <32 x i8> @testv32i8u(<32 x i8> %in) nounwind {
; BITALG-NEXT: retq
;
; X32-AVX-LABEL: testv32i8u:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X32-AVX-NEXT: vpsubb %ymm0, %ymm1, %ymm1
; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -1224,22 +1224,22 @@ define <32 x i8> @testv32i8u(<32 x i8> %in) nounwind {
define <4 x i64> @foldv4i64() nounwind {
; AVX-LABEL: foldv4i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,64,0]
; AVX-NEXT: retq
;
; BITALG_NOVLX-LABEL: foldv4i64:
-; BITALG_NOVLX: # BB#0:
+; BITALG_NOVLX: # %bb.0:
; BITALG_NOVLX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,64,0]
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: foldv4i64:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,64,0]
; BITALG-NEXT: retq
;
; X32-AVX-LABEL: foldv4i64:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,0,0,64,0,0,0]
; X32-AVX-NEXT: retl
%out = call <4 x i64> @llvm.cttz.v4i64(<4 x i64> <i64 256, i64 -1, i64 0, i64 255>, i1 0)
@@ -1248,22 +1248,22 @@ define <4 x i64> @foldv4i64() nounwind {
define <4 x i64> @foldv4i64u() nounwind {
; AVX-LABEL: foldv4i64u:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,64,0]
; AVX-NEXT: retq
;
; BITALG_NOVLX-LABEL: foldv4i64u:
-; BITALG_NOVLX: # BB#0:
+; BITALG_NOVLX: # %bb.0:
; BITALG_NOVLX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,64,0]
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: foldv4i64u:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,64,0]
; BITALG-NEXT: retq
;
; X32-AVX-LABEL: foldv4i64u:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,0,0,64,0,0,0]
; X32-AVX-NEXT: retl
%out = call <4 x i64> @llvm.cttz.v4i64(<4 x i64> <i64 256, i64 -1, i64 0, i64 255>, i1 -1)
@@ -1272,22 +1272,22 @@ define <4 x i64> @foldv4i64u() nounwind {
define <8 x i32> @foldv8i32() nounwind {
; AVX-LABEL: foldv8i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,32,0,16,0,3,3]
; AVX-NEXT: retq
;
; BITALG_NOVLX-LABEL: foldv8i32:
-; BITALG_NOVLX: # BB#0:
+; BITALG_NOVLX: # %bb.0:
; BITALG_NOVLX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,32,0,16,0,3,3]
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: foldv8i32:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,32,0,16,0,3,3]
; BITALG-NEXT: retq
;
; X32-AVX-LABEL: foldv8i32:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,32,0,16,0,3,3]
; X32-AVX-NEXT: retl
%out = call <8 x i32> @llvm.cttz.v8i32(<8 x i32> <i32 256, i32 -1, i32 0, i32 255, i32 -65536, i32 7, i32 24, i32 88>, i1 0)
@@ -1296,22 +1296,22 @@ define <8 x i32> @foldv8i32() nounwind {
define <8 x i32> @foldv8i32u() nounwind {
; AVX-LABEL: foldv8i32u:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,32,0,16,0,3,3]
; AVX-NEXT: retq
;
; BITALG_NOVLX-LABEL: foldv8i32u:
-; BITALG_NOVLX: # BB#0:
+; BITALG_NOVLX: # %bb.0:
; BITALG_NOVLX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,32,0,16,0,3,3]
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: foldv8i32u:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,32,0,16,0,3,3]
; BITALG-NEXT: retq
;
; X32-AVX-LABEL: foldv8i32u:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,32,0,16,0,3,3]
; X32-AVX-NEXT: retl
%out = call <8 x i32> @llvm.cttz.v8i32(<8 x i32> <i32 256, i32 -1, i32 0, i32 255, i32 -65536, i32 7, i32 24, i32 88>, i1 -1)
@@ -1320,22 +1320,22 @@ define <8 x i32> @foldv8i32u() nounwind {
define <16 x i16> @foldv16i16() nounwind {
; AVX-LABEL: foldv16i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,16,0,16,0,3,3,1,1,0,1,2,3,4,5]
; AVX-NEXT: retq
;
; BITALG_NOVLX-LABEL: foldv16i16:
-; BITALG_NOVLX: # BB#0:
+; BITALG_NOVLX: # %bb.0:
; BITALG_NOVLX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,16,0,16,0,3,3,1,1,0,1,2,3,4,5]
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: foldv16i16:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,16,0,16,0,3,3,1,1,0,1,2,3,4,5]
; BITALG-NEXT: retq
;
; X32-AVX-LABEL: foldv16i16:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,16,0,16,0,3,3,1,1,0,1,2,3,4,5]
; X32-AVX-NEXT: retl
%out = call <16 x i16> @llvm.cttz.v16i16(<16 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88, i16 -2, i16 254, i16 1, i16 2, i16 4, i16 8, i16 16, i16 32>, i1 0)
@@ -1344,22 +1344,22 @@ define <16 x i16> @foldv16i16() nounwind {
define <16 x i16> @foldv16i16u() nounwind {
; AVX-LABEL: foldv16i16u:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,16,0,16,0,3,3,1,1,0,1,2,3,4,5]
; AVX-NEXT: retq
;
; BITALG_NOVLX-LABEL: foldv16i16u:
-; BITALG_NOVLX: # BB#0:
+; BITALG_NOVLX: # %bb.0:
; BITALG_NOVLX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,16,0,16,0,3,3,1,1,0,1,2,3,4,5]
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: foldv16i16u:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,16,0,16,0,3,3,1,1,0,1,2,3,4,5]
; BITALG-NEXT: retq
;
; X32-AVX-LABEL: foldv16i16u:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,16,0,16,0,3,3,1,1,0,1,2,3,4,5]
; X32-AVX-NEXT: retl
%out = call <16 x i16> @llvm.cttz.v16i16(<16 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88, i16 -2, i16 254, i16 1, i16 2, i16 4, i16 8, i16 16, i16 32>, i1 -1)
@@ -1368,22 +1368,22 @@ define <16 x i16> @foldv16i16u() nounwind {
define <32 x i8> @foldv32i8() nounwind {
; AVX-LABEL: foldv32i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5,6,7,8,8,7,6,5,4,3,2,1,0,0,0,0,0]
; AVX-NEXT: retq
;
; BITALG_NOVLX-LABEL: foldv32i8:
-; BITALG_NOVLX: # BB#0:
+; BITALG_NOVLX: # %bb.0:
; BITALG_NOVLX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5,6,7,8,8,7,6,5,4,3,2,1,0,0,0,0,0]
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: foldv32i8:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5,6,7,8,8,7,6,5,4,3,2,1,0,0,0,0,0]
; BITALG-NEXT: retq
;
; X32-AVX-LABEL: foldv32i8:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5,6,7,8,8,7,6,5,4,3,2,1,0,0,0,0,0]
; X32-AVX-NEXT: retl
%out = call <32 x i8> @llvm.cttz.v32i8(<32 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32, i8 64, i8 128, i8 256, i8 -256, i8 -128, i8 -64, i8 -32, i8 -16, i8 -8, i8 -4, i8 -2, i8 -1, i8 3, i8 5, i8 7, i8 127>, i1 0)
@@ -1392,22 +1392,22 @@ define <32 x i8> @foldv32i8() nounwind {
define <32 x i8> @foldv32i8u() nounwind {
; AVX-LABEL: foldv32i8u:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5,6,7,8,8,7,6,5,4,3,2,1,0,0,0,0,0]
; AVX-NEXT: retq
;
; BITALG_NOVLX-LABEL: foldv32i8u:
-; BITALG_NOVLX: # BB#0:
+; BITALG_NOVLX: # %bb.0:
; BITALG_NOVLX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5,6,7,8,8,7,6,5,4,3,2,1,0,0,0,0,0]
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: foldv32i8u:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5,6,7,8,8,7,6,5,4,3,2,1,0,0,0,0,0]
; BITALG-NEXT: retq
;
; X32-AVX-LABEL: foldv32i8u:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5,6,7,8,8,7,6,5,4,3,2,1,0,0,0,0,0]
; X32-AVX-NEXT: retl
%out = call <32 x i8> @llvm.cttz.v32i8(<32 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32, i8 64, i8 128, i8 256, i8 -256, i8 -128, i8 -64, i8 -32, i8 -16, i8 -8, i8 -4, i8 -2, i8 -1, i8 3, i8 5, i8 7, i8 127>, i1 -1)
diff --git a/test/CodeGen/X86/vector-tzcnt-512.ll b/test/CodeGen/X86/vector-tzcnt-512.ll
index 40dea81a10f..37c86f7f81a 100644
--- a/test/CodeGen/X86/vector-tzcnt-512.ll
+++ b/test/CodeGen/X86/vector-tzcnt-512.ll
@@ -7,7 +7,7 @@
define <8 x i64> @testv8i64(<8 x i64> %in) nounwind {
; AVX512CD-LABEL: testv8i64:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512CD-NEXT: vpsubq %zmm0, %zmm1, %zmm1
; AVX512CD-NEXT: vpandq %zmm1, %zmm0, %zmm0
@@ -35,7 +35,7 @@ define <8 x i64> @testv8i64(<8 x i64> %in) nounwind {
; AVX512CD-NEXT: retq
;
; AVX512CDBW-LABEL: testv8i64:
-; AVX512CDBW: # BB#0:
+; AVX512CDBW: # %bb.0:
; AVX512CDBW-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512CDBW-NEXT: vpsubq %zmm0, %zmm1, %zmm2
; AVX512CDBW-NEXT: vpandq %zmm2, %zmm0, %zmm0
@@ -53,7 +53,7 @@ define <8 x i64> @testv8i64(<8 x i64> %in) nounwind {
; AVX512CDBW-NEXT: retq
;
; AVX512BW-LABEL: testv8i64:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512BW-NEXT: vpsubq %zmm0, %zmm1, %zmm2
; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm0
@@ -71,7 +71,7 @@ define <8 x i64> @testv8i64(<8 x i64> %in) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: testv8i64:
-; AVX512VPOPCNTDQ: # BB#0:
+; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpsubq %zmm0, %zmm1, %zmm1
; AVX512VPOPCNTDQ-NEXT: vpandq %zmm1, %zmm0, %zmm0
@@ -81,7 +81,7 @@ define <8 x i64> @testv8i64(<8 x i64> %in) nounwind {
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG-LABEL: testv8i64:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vpxor %xmm1, %xmm1, %xmm1
; BITALG-NEXT: vpsubq %zmm0, %zmm1, %zmm2
; BITALG-NEXT: vpandq %zmm2, %zmm0, %zmm0
@@ -103,7 +103,7 @@ define <8 x i64> @testv8i64(<8 x i64> %in) nounwind {
define <8 x i64> @testv8i64u(<8 x i64> %in) nounwind {
; AVX512CD-LABEL: testv8i64u:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512CD-NEXT: vpsubq %zmm0, %zmm1, %zmm1
; AVX512CD-NEXT: vpandq %zmm1, %zmm0, %zmm0
@@ -113,7 +113,7 @@ define <8 x i64> @testv8i64u(<8 x i64> %in) nounwind {
; AVX512CD-NEXT: retq
;
; AVX512CDBW-LABEL: testv8i64u:
-; AVX512CDBW: # BB#0:
+; AVX512CDBW: # %bb.0:
; AVX512CDBW-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512CDBW-NEXT: vpsubq %zmm0, %zmm1, %zmm1
; AVX512CDBW-NEXT: vpandq %zmm1, %zmm0, %zmm0
@@ -123,7 +123,7 @@ define <8 x i64> @testv8i64u(<8 x i64> %in) nounwind {
; AVX512CDBW-NEXT: retq
;
; AVX512BW-LABEL: testv8i64u:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512BW-NEXT: vpsubq %zmm0, %zmm1, %zmm2
; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm0
@@ -141,7 +141,7 @@ define <8 x i64> @testv8i64u(<8 x i64> %in) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: testv8i64u:
-; AVX512VPOPCNTDQ: # BB#0:
+; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpsubq %zmm0, %zmm1, %zmm1
; AVX512VPOPCNTDQ-NEXT: vpandq %zmm1, %zmm0, %zmm0
@@ -151,7 +151,7 @@ define <8 x i64> @testv8i64u(<8 x i64> %in) nounwind {
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG-LABEL: testv8i64u:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vpxor %xmm1, %xmm1, %xmm1
; BITALG-NEXT: vpsubq %zmm0, %zmm1, %zmm2
; BITALG-NEXT: vpandq %zmm2, %zmm0, %zmm0
@@ -173,7 +173,7 @@ define <8 x i64> @testv8i64u(<8 x i64> %in) nounwind {
define <16 x i32> @testv16i32(<16 x i32> %in) nounwind {
; AVX512CD-LABEL: testv16i32:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512CD-NEXT: vpsubd %zmm0, %zmm1, %zmm1
; AVX512CD-NEXT: vpandq %zmm1, %zmm0, %zmm0
@@ -209,7 +209,7 @@ define <16 x i32> @testv16i32(<16 x i32> %in) nounwind {
; AVX512CD-NEXT: retq
;
; AVX512CDBW-LABEL: testv16i32:
-; AVX512CDBW: # BB#0:
+; AVX512CDBW: # %bb.0:
; AVX512CDBW-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512CDBW-NEXT: vpsubd %zmm0, %zmm1, %zmm2
; AVX512CDBW-NEXT: vpandq %zmm2, %zmm0, %zmm0
@@ -231,7 +231,7 @@ define <16 x i32> @testv16i32(<16 x i32> %in) nounwind {
; AVX512CDBW-NEXT: retq
;
; AVX512BW-LABEL: testv16i32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512BW-NEXT: vpsubd %zmm0, %zmm1, %zmm2
; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm0
@@ -253,7 +253,7 @@ define <16 x i32> @testv16i32(<16 x i32> %in) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: testv16i32:
-; AVX512VPOPCNTDQ: # BB#0:
+; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpsubd %zmm0, %zmm1, %zmm1
; AVX512VPOPCNTDQ-NEXT: vpandq %zmm1, %zmm0, %zmm0
@@ -263,7 +263,7 @@ define <16 x i32> @testv16i32(<16 x i32> %in) nounwind {
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG-LABEL: testv16i32:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vpxor %xmm1, %xmm1, %xmm1
; BITALG-NEXT: vpsubd %zmm0, %zmm1, %zmm2
; BITALG-NEXT: vpandq %zmm2, %zmm0, %zmm0
@@ -289,7 +289,7 @@ define <16 x i32> @testv16i32(<16 x i32> %in) nounwind {
define <16 x i32> @testv16i32u(<16 x i32> %in) nounwind {
; AVX512CD-LABEL: testv16i32u:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512CD-NEXT: vpsubd %zmm0, %zmm1, %zmm1
; AVX512CD-NEXT: vpandq %zmm1, %zmm0, %zmm0
@@ -299,7 +299,7 @@ define <16 x i32> @testv16i32u(<16 x i32> %in) nounwind {
; AVX512CD-NEXT: retq
;
; AVX512CDBW-LABEL: testv16i32u:
-; AVX512CDBW: # BB#0:
+; AVX512CDBW: # %bb.0:
; AVX512CDBW-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512CDBW-NEXT: vpsubd %zmm0, %zmm1, %zmm1
; AVX512CDBW-NEXT: vpandq %zmm1, %zmm0, %zmm0
@@ -309,7 +309,7 @@ define <16 x i32> @testv16i32u(<16 x i32> %in) nounwind {
; AVX512CDBW-NEXT: retq
;
; AVX512BW-LABEL: testv16i32u:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512BW-NEXT: vpsubd %zmm0, %zmm1, %zmm2
; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm0
@@ -331,7 +331,7 @@ define <16 x i32> @testv16i32u(<16 x i32> %in) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: testv16i32u:
-; AVX512VPOPCNTDQ: # BB#0:
+; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpsubd %zmm0, %zmm1, %zmm1
; AVX512VPOPCNTDQ-NEXT: vpandq %zmm1, %zmm0, %zmm0
@@ -341,7 +341,7 @@ define <16 x i32> @testv16i32u(<16 x i32> %in) nounwind {
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG-LABEL: testv16i32u:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vpxor %xmm1, %xmm1, %xmm1
; BITALG-NEXT: vpsubd %zmm0, %zmm1, %zmm2
; BITALG-NEXT: vpandq %zmm2, %zmm0, %zmm0
@@ -367,7 +367,7 @@ define <16 x i32> @testv16i32u(<16 x i32> %in) nounwind {
define <32 x i16> @testv32i16(<32 x i16> %in) nounwind {
; AVX512CD-LABEL: testv32i16:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512CD-NEXT: vpsubw %ymm0, %ymm2, %ymm3
; AVX512CD-NEXT: vpand %ymm3, %ymm0, %ymm0
@@ -399,7 +399,7 @@ define <32 x i16> @testv32i16(<32 x i16> %in) nounwind {
; AVX512CD-NEXT: retq
;
; AVX512CDBW-LABEL: testv32i16:
-; AVX512CDBW: # BB#0:
+; AVX512CDBW: # %bb.0:
; AVX512CDBW-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512CDBW-NEXT: vpsubw %zmm0, %zmm1, %zmm1
; AVX512CDBW-NEXT: vpandq %zmm1, %zmm0, %zmm0
@@ -419,7 +419,7 @@ define <32 x i16> @testv32i16(<32 x i16> %in) nounwind {
; AVX512CDBW-NEXT: retq
;
; AVX512BW-LABEL: testv32i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512BW-NEXT: vpsubw %zmm0, %zmm1, %zmm1
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
@@ -439,7 +439,7 @@ define <32 x i16> @testv32i16(<32 x i16> %in) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: testv32i16:
-; AVX512VPOPCNTDQ: # BB#0:
+; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512VPOPCNTDQ-NEXT: vpsubw %ymm0, %ymm2, %ymm3
; AVX512VPOPCNTDQ-NEXT: vpand %ymm3, %ymm0, %ymm0
@@ -457,7 +457,7 @@ define <32 x i16> @testv32i16(<32 x i16> %in) nounwind {
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG-LABEL: testv32i16:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vpxor %xmm1, %xmm1, %xmm1
; BITALG-NEXT: vpsubw %zmm0, %zmm1, %zmm1
; BITALG-NEXT: vpandq %zmm1, %zmm0, %zmm0
@@ -471,7 +471,7 @@ define <32 x i16> @testv32i16(<32 x i16> %in) nounwind {
define <32 x i16> @testv32i16u(<32 x i16> %in) nounwind {
; AVX512CD-LABEL: testv32i16u:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512CD-NEXT: vpsubw %ymm0, %ymm2, %ymm3
; AVX512CD-NEXT: vpand %ymm3, %ymm0, %ymm0
@@ -503,7 +503,7 @@ define <32 x i16> @testv32i16u(<32 x i16> %in) nounwind {
; AVX512CD-NEXT: retq
;
; AVX512CDBW-LABEL: testv32i16u:
-; AVX512CDBW: # BB#0:
+; AVX512CDBW: # %bb.0:
; AVX512CDBW-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512CDBW-NEXT: vpsubw %zmm0, %zmm1, %zmm1
; AVX512CDBW-NEXT: vpandq %zmm1, %zmm0, %zmm0
@@ -523,7 +523,7 @@ define <32 x i16> @testv32i16u(<32 x i16> %in) nounwind {
; AVX512CDBW-NEXT: retq
;
; AVX512BW-LABEL: testv32i16u:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512BW-NEXT: vpsubw %zmm0, %zmm1, %zmm1
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
@@ -543,7 +543,7 @@ define <32 x i16> @testv32i16u(<32 x i16> %in) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: testv32i16u:
-; AVX512VPOPCNTDQ: # BB#0:
+; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512VPOPCNTDQ-NEXT: vpsubw %ymm0, %ymm2, %ymm3
; AVX512VPOPCNTDQ-NEXT: vpand %ymm3, %ymm0, %ymm0
@@ -561,7 +561,7 @@ define <32 x i16> @testv32i16u(<32 x i16> %in) nounwind {
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG-LABEL: testv32i16u:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vpxor %xmm1, %xmm1, %xmm1
; BITALG-NEXT: vpsubw %zmm0, %zmm1, %zmm1
; BITALG-NEXT: vpandq %zmm1, %zmm0, %zmm0
@@ -575,7 +575,7 @@ define <32 x i16> @testv32i16u(<32 x i16> %in) nounwind {
define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
; AVX512CD-LABEL: testv64i8:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512CD-NEXT: vpsubb %ymm0, %ymm2, %ymm3
; AVX512CD-NEXT: vpand %ymm3, %ymm0, %ymm0
@@ -601,7 +601,7 @@ define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
; AVX512CD-NEXT: retq
;
; AVX512CDBW-LABEL: testv64i8:
-; AVX512CDBW: # BB#0:
+; AVX512CDBW: # %bb.0:
; AVX512CDBW-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512CDBW-NEXT: vpsubb %zmm0, %zmm1, %zmm1
; AVX512CDBW-NEXT: vpandq %zmm1, %zmm0, %zmm0
@@ -618,7 +618,7 @@ define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
; AVX512CDBW-NEXT: retq
;
; AVX512BW-LABEL: testv64i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512BW-NEXT: vpsubb %zmm0, %zmm1, %zmm1
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
@@ -635,7 +635,7 @@ define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: testv64i8:
-; AVX512VPOPCNTDQ: # BB#0:
+; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512VPOPCNTDQ-NEXT: vpsubb %ymm0, %ymm2, %ymm3
; AVX512VPOPCNTDQ-NEXT: vpand %ymm3, %ymm0, %ymm0
@@ -661,7 +661,7 @@ define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG-LABEL: testv64i8:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vpxor %xmm1, %xmm1, %xmm1
; BITALG-NEXT: vpsubb %zmm0, %zmm1, %zmm1
; BITALG-NEXT: vpandq %zmm1, %zmm0, %zmm0
@@ -675,7 +675,7 @@ define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
define <64 x i8> @testv64i8u(<64 x i8> %in) nounwind {
; AVX512CD-LABEL: testv64i8u:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512CD-NEXT: vpsubb %ymm0, %ymm2, %ymm3
; AVX512CD-NEXT: vpand %ymm3, %ymm0, %ymm0
@@ -701,7 +701,7 @@ define <64 x i8> @testv64i8u(<64 x i8> %in) nounwind {
; AVX512CD-NEXT: retq
;
; AVX512CDBW-LABEL: testv64i8u:
-; AVX512CDBW: # BB#0:
+; AVX512CDBW: # %bb.0:
; AVX512CDBW-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512CDBW-NEXT: vpsubb %zmm0, %zmm1, %zmm1
; AVX512CDBW-NEXT: vpandq %zmm1, %zmm0, %zmm0
@@ -718,7 +718,7 @@ define <64 x i8> @testv64i8u(<64 x i8> %in) nounwind {
; AVX512CDBW-NEXT: retq
;
; AVX512BW-LABEL: testv64i8u:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512BW-NEXT: vpsubb %zmm0, %zmm1, %zmm1
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
@@ -735,7 +735,7 @@ define <64 x i8> @testv64i8u(<64 x i8> %in) nounwind {
; AVX512BW-NEXT: retq
;
; AVX512VPOPCNTDQ-LABEL: testv64i8u:
-; AVX512VPOPCNTDQ: # BB#0:
+; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512VPOPCNTDQ-NEXT: vpsubb %ymm0, %ymm2, %ymm3
; AVX512VPOPCNTDQ-NEXT: vpand %ymm3, %ymm0, %ymm0
@@ -761,7 +761,7 @@ define <64 x i8> @testv64i8u(<64 x i8> %in) nounwind {
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG-LABEL: testv64i8u:
-; BITALG: # BB#0:
+; BITALG: # %bb.0:
; BITALG-NEXT: vpxor %xmm1, %xmm1, %xmm1
; BITALG-NEXT: vpsubb %zmm0, %zmm1, %zmm1
; BITALG-NEXT: vpandq %zmm1, %zmm0, %zmm0
diff --git a/test/CodeGen/X86/vector-unsigned-cmp.ll b/test/CodeGen/X86/vector-unsigned-cmp.ll
index 3e4b9aedf2b..f4fd54f8da9 100644
--- a/test/CodeGen/X86/vector-unsigned-cmp.ll
+++ b/test/CodeGen/X86/vector-unsigned-cmp.ll
@@ -10,7 +10,7 @@
define <2 x i1> @ugt_v2i64(<2 x i64> %x, <2 x i64> %y) {
; SSE-LABEL: ugt_v2i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrlq $1, %xmm0
; SSE-NEXT: psrlq $1, %xmm1
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
@@ -27,7 +27,7 @@ define <2 x i1> @ugt_v2i64(<2 x i64> %x, <2 x i64> %y) {
; SSE-NEXT: retq
;
; AVX-LABEL: ugt_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrlq $1, %xmm0, %xmm0
; AVX-NEXT: vpsrlq $1, %xmm1, %xmm1
; AVX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
@@ -40,7 +40,7 @@ define <2 x i1> @ugt_v2i64(<2 x i64> %x, <2 x i64> %y) {
define <2 x i1> @ult_v2i64(<2 x i64> %x, <2 x i64> %y) {
; SSE-LABEL: ult_v2i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrlq $1, %xmm0
; SSE-NEXT: psrlq $1, %xmm1
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
@@ -57,7 +57,7 @@ define <2 x i1> @ult_v2i64(<2 x i64> %x, <2 x i64> %y) {
; SSE-NEXT: retq
;
; AVX-LABEL: ult_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrlq $1, %xmm0, %xmm0
; AVX-NEXT: vpsrlq $1, %xmm1, %xmm1
; AVX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
@@ -70,7 +70,7 @@ define <2 x i1> @ult_v2i64(<2 x i64> %x, <2 x i64> %y) {
define <2 x i1> @uge_v2i64(<2 x i64> %x, <2 x i64> %y) {
; SSE-LABEL: uge_v2i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrlq $1, %xmm0
; SSE-NEXT: psrlq $1, %xmm1
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
@@ -89,7 +89,7 @@ define <2 x i1> @uge_v2i64(<2 x i64> %x, <2 x i64> %y) {
; SSE-NEXT: retq
;
; AVX-LABEL: uge_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrlq $1, %xmm0, %xmm0
; AVX-NEXT: vpsrlq $1, %xmm1, %xmm1
; AVX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
@@ -104,7 +104,7 @@ define <2 x i1> @uge_v2i64(<2 x i64> %x, <2 x i64> %y) {
define <2 x i1> @ule_v2i64(<2 x i64> %x, <2 x i64> %y) {
; SSE-LABEL: ule_v2i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrlq $1, %xmm0
; SSE-NEXT: psrlq $1, %xmm1
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
@@ -123,7 +123,7 @@ define <2 x i1> @ule_v2i64(<2 x i64> %x, <2 x i64> %y) {
; SSE-NEXT: retq
;
; AVX-LABEL: ule_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrlq $1, %xmm0, %xmm0
; AVX-NEXT: vpsrlq $1, %xmm1, %xmm1
; AVX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
@@ -138,14 +138,14 @@ define <2 x i1> @ule_v2i64(<2 x i64> %x, <2 x i64> %y) {
define <4 x i1> @ugt_v4i32(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: ugt_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrld $1, %xmm0
; SSE-NEXT: psrld $1, %xmm1
; SSE-NEXT: pcmpgtd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: ugt_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrld $1, %xmm0, %xmm0
; AVX-NEXT: vpsrld $1, %xmm1, %xmm1
; AVX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
@@ -158,7 +158,7 @@ define <4 x i1> @ugt_v4i32(<4 x i32> %x, <4 x i32> %y) {
define <4 x i1> @ult_v4i32(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: ult_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrld $1, %xmm0
; SSE-NEXT: psrld $1, %xmm1
; SSE-NEXT: pcmpgtd %xmm0, %xmm1
@@ -166,7 +166,7 @@ define <4 x i1> @ult_v4i32(<4 x i32> %x, <4 x i32> %y) {
; SSE-NEXT: retq
;
; AVX-LABEL: ult_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrld $1, %xmm0, %xmm0
; AVX-NEXT: vpsrld $1, %xmm1, %xmm1
; AVX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
@@ -179,7 +179,7 @@ define <4 x i1> @ult_v4i32(<4 x i32> %x, <4 x i32> %y) {
define <4 x i1> @uge_v4i32(<4 x i32> %x, <4 x i32> %y) {
; SSE2-LABEL: uge_v4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: psrld $1, %xmm0
; SSE2-NEXT: psrld $1, %xmm1
; SSE2-NEXT: pcmpgtd %xmm0, %xmm1
@@ -188,7 +188,7 @@ define <4 x i1> @uge_v4i32(<4 x i32> %x, <4 x i32> %y) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: uge_v4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: psrld $1, %xmm0
; SSE41-NEXT: psrld $1, %xmm1
; SSE41-NEXT: pmaxud %xmm0, %xmm1
@@ -196,7 +196,7 @@ define <4 x i1> @uge_v4i32(<4 x i32> %x, <4 x i32> %y) {
; SSE41-NEXT: retq
;
; AVX-LABEL: uge_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrld $1, %xmm0, %xmm0
; AVX-NEXT: vpsrld $1, %xmm1, %xmm1
; AVX-NEXT: vpmaxud %xmm1, %xmm0, %xmm1
@@ -210,7 +210,7 @@ define <4 x i1> @uge_v4i32(<4 x i32> %x, <4 x i32> %y) {
define <4 x i1> @ule_v4i32(<4 x i32> %x, <4 x i32> %y) {
; SSE2-LABEL: ule_v4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: psrld $1, %xmm0
; SSE2-NEXT: psrld $1, %xmm1
; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
@@ -219,7 +219,7 @@ define <4 x i1> @ule_v4i32(<4 x i32> %x, <4 x i32> %y) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: ule_v4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: psrld $1, %xmm0
; SSE41-NEXT: psrld $1, %xmm1
; SSE41-NEXT: pminud %xmm0, %xmm1
@@ -227,7 +227,7 @@ define <4 x i1> @ule_v4i32(<4 x i32> %x, <4 x i32> %y) {
; SSE41-NEXT: retq
;
; AVX-LABEL: ule_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrld $1, %xmm0, %xmm0
; AVX-NEXT: vpsrld $1, %xmm1, %xmm1
; AVX-NEXT: vpminud %xmm1, %xmm0, %xmm1
@@ -241,14 +241,14 @@ define <4 x i1> @ule_v4i32(<4 x i32> %x, <4 x i32> %y) {
define <8 x i1> @ugt_v8i16(<8 x i16> %x, <8 x i16> %y) {
; SSE-LABEL: ugt_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrlw $1, %xmm0
; SSE-NEXT: psrlw $1, %xmm1
; SSE-NEXT: pcmpgtw %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: ugt_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
; AVX-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
@@ -261,7 +261,7 @@ define <8 x i1> @ugt_v8i16(<8 x i16> %x, <8 x i16> %y) {
define <8 x i1> @ult_v8i16(<8 x i16> %x, <8 x i16> %y) {
; SSE-LABEL: ult_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrlw $1, %xmm0
; SSE-NEXT: psrlw $1, %xmm1
; SSE-NEXT: pcmpgtw %xmm0, %xmm1
@@ -269,7 +269,7 @@ define <8 x i1> @ult_v8i16(<8 x i16> %x, <8 x i16> %y) {
; SSE-NEXT: retq
;
; AVX-LABEL: ult_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
; AVX-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
@@ -282,7 +282,7 @@ define <8 x i1> @ult_v8i16(<8 x i16> %x, <8 x i16> %y) {
define <8 x i1> @uge_v8i16(<8 x i16> %x, <8 x i16> %y) {
; SSE2-LABEL: uge_v8i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: psrlw $1, %xmm0
; SSE2-NEXT: psrlw $1, %xmm1
; SSE2-NEXT: psubusw %xmm0, %xmm1
@@ -291,7 +291,7 @@ define <8 x i1> @uge_v8i16(<8 x i16> %x, <8 x i16> %y) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: uge_v8i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: psrlw $1, %xmm0
; SSE41-NEXT: psrlw $1, %xmm1
; SSE41-NEXT: pmaxuw %xmm0, %xmm1
@@ -299,7 +299,7 @@ define <8 x i1> @uge_v8i16(<8 x i16> %x, <8 x i16> %y) {
; SSE41-NEXT: retq
;
; AVX-LABEL: uge_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
; AVX-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX-NEXT: vpmaxuw %xmm1, %xmm0, %xmm1
@@ -313,7 +313,7 @@ define <8 x i1> @uge_v8i16(<8 x i16> %x, <8 x i16> %y) {
define <8 x i1> @ule_v8i16(<8 x i16> %x, <8 x i16> %y) {
; SSE2-LABEL: ule_v8i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: psrlw $1, %xmm0
; SSE2-NEXT: psrlw $1, %xmm1
; SSE2-NEXT: psubusw %xmm1, %xmm0
@@ -322,7 +322,7 @@ define <8 x i1> @ule_v8i16(<8 x i16> %x, <8 x i16> %y) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: ule_v8i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: psrlw $1, %xmm0
; SSE41-NEXT: psrlw $1, %xmm1
; SSE41-NEXT: pminuw %xmm0, %xmm1
@@ -330,7 +330,7 @@ define <8 x i1> @ule_v8i16(<8 x i16> %x, <8 x i16> %y) {
; SSE41-NEXT: retq
;
; AVX-LABEL: ule_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
; AVX-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX-NEXT: vpminuw %xmm1, %xmm0, %xmm1
@@ -344,7 +344,7 @@ define <8 x i1> @ule_v8i16(<8 x i16> %x, <8 x i16> %y) {
define <16 x i1> @ugt_v16i8(<16 x i8> %x, <16 x i8> %y) {
; SSE-LABEL: ugt_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrlw $1, %xmm0
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; SSE-NEXT: pand %xmm2, %xmm0
@@ -354,7 +354,7 @@ define <16 x i1> @ugt_v16i8(<16 x i8> %x, <16 x i8> %y) {
; SSE-NEXT: retq
;
; AVX-LABEL: ugt_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; AVX-NEXT: vpand %xmm2, %xmm0, %xmm0
@@ -370,7 +370,7 @@ define <16 x i1> @ugt_v16i8(<16 x i8> %x, <16 x i8> %y) {
define <16 x i1> @ult_v16i8(<16 x i8> %x, <16 x i8> %y) {
; SSE-LABEL: ult_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrlw $1, %xmm0
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; SSE-NEXT: pand %xmm2, %xmm0
@@ -381,7 +381,7 @@ define <16 x i1> @ult_v16i8(<16 x i8> %x, <16 x i8> %y) {
; SSE-NEXT: retq
;
; AVX-LABEL: ult_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; AVX-NEXT: vpand %xmm2, %xmm0, %xmm0
@@ -397,7 +397,7 @@ define <16 x i1> @ult_v16i8(<16 x i8> %x, <16 x i8> %y) {
define <16 x i1> @uge_v16i8(<16 x i8> %x, <16 x i8> %y) {
; SSE-LABEL: uge_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrlw $1, %xmm0
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; SSE-NEXT: pand %xmm2, %xmm0
@@ -408,7 +408,7 @@ define <16 x i1> @uge_v16i8(<16 x i8> %x, <16 x i8> %y) {
; SSE-NEXT: retq
;
; AVX-LABEL: uge_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; AVX-NEXT: vpand %xmm2, %xmm0, %xmm0
@@ -425,7 +425,7 @@ define <16 x i1> @uge_v16i8(<16 x i8> %x, <16 x i8> %y) {
define <16 x i1> @ule_v16i8(<16 x i8> %x, <16 x i8> %y) {
; SSE-LABEL: ule_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: psrlw $1, %xmm0
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; SSE-NEXT: pand %xmm2, %xmm0
@@ -436,7 +436,7 @@ define <16 x i1> @ule_v16i8(<16 x i8> %x, <16 x i8> %y) {
; SSE-NEXT: retq
;
; AVX-LABEL: ule_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; AVX-NEXT: vpand %xmm2, %xmm0, %xmm0
diff --git a/test/CodeGen/X86/vector-zext.ll b/test/CodeGen/X86/vector-zext.ll
index b0544169dad..94eadd8c1aa 100644
--- a/test/CodeGen/X86/vector-zext.ll
+++ b/test/CodeGen/X86/vector-zext.ll
@@ -9,24 +9,24 @@
define <8 x i16> @zext_16i8_to_8i16(<16 x i8> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: zext_16i8_to_8i16:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_16i8_to_8i16:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: pxor %xmm1, %xmm1
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_16i8_to_8i16:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; SSE41-NEXT: retq
;
; AVX-LABEL: zext_16i8_to_8i16:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX-NEXT: retq
entry:
@@ -38,7 +38,7 @@ entry:
; PR17654
define <16 x i16> @zext_16i8_to_16i16(<16 x i8> %A) {
; SSE2-LABEL: zext_16i8_to_16i16:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
@@ -46,7 +46,7 @@ define <16 x i16> @zext_16i8_to_16i16(<16 x i8> %A) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_16i8_to_16i16:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: pxor %xmm2, %xmm2
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
@@ -54,7 +54,7 @@ define <16 x i16> @zext_16i8_to_16i16(<16 x i8> %A) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_16i8_to_16i16:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
@@ -62,7 +62,7 @@ define <16 x i16> @zext_16i8_to_16i16(<16 x i8> %A) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_16i8_to_16i16:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
@@ -70,12 +70,12 @@ define <16 x i16> @zext_16i8_to_16i16(<16 x i8> %A) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_16i8_to_16i16:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: zext_16i8_to_16i16:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; AVX512-NEXT: retq
entry:
@@ -85,7 +85,7 @@ entry:
define <32 x i16> @zext_32i8_to_32i16(<32 x i8> %A) {
; SSE2-LABEL: zext_32i8_to_32i16:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: pxor %xmm4, %xmm4
@@ -97,7 +97,7 @@ define <32 x i16> @zext_32i8_to_32i16(<32 x i8> %A) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_32i8_to_32i16:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa %xmm1, %xmm3
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: pxor %xmm4, %xmm4
@@ -109,7 +109,7 @@ define <32 x i16> @zext_32i8_to_32i16(<32 x i8> %A) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_32i8_to_32i16:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -121,7 +121,7 @@ define <32 x i16> @zext_32i8_to_32i16(<32 x i8> %A) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_32i8_to_32i16:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
@@ -135,7 +135,7 @@ define <32 x i16> @zext_32i8_to_32i16(<32 x i8> %A) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_32i8_to_32i16:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
@@ -143,7 +143,7 @@ define <32 x i16> @zext_32i8_to_32i16(<32 x i8> %A) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: zext_32i8_to_32i16:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
@@ -151,7 +151,7 @@ define <32 x i16> @zext_32i8_to_32i16(<32 x i8> %A) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: zext_32i8_to_32i16:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
; AVX512BW-NEXT: retq
entry:
@@ -161,26 +161,26 @@ entry:
define <4 x i32> @zext_16i8_to_4i32(<16 x i8> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: zext_16i8_to_4i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_16i8_to_4i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: pxor %xmm1, %xmm1
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_16i8_to_4i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; SSE41-NEXT: retq
;
; AVX-LABEL: zext_16i8_to_4i32:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX-NEXT: retq
entry:
@@ -191,7 +191,7 @@ entry:
define <8 x i32> @zext_16i8_to_8i32(<16 x i8> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: zext_16i8_to_8i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
@@ -201,7 +201,7 @@ define <8 x i32> @zext_16i8_to_8i32(<16 x i8> %A) nounwind uwtable readnone ssp
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_16i8_to_8i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: pxor %xmm2, %xmm2
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
@@ -211,7 +211,7 @@ define <8 x i32> @zext_16i8_to_8i32(<16 x i8> %A) nounwind uwtable readnone ssp
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_16i8_to_8i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
@@ -219,7 +219,7 @@ define <8 x i32> @zext_16i8_to_8i32(<16 x i8> %A) nounwind uwtable readnone ssp
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_16i8_to_8i32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
@@ -227,12 +227,12 @@ define <8 x i32> @zext_16i8_to_8i32(<16 x i8> %A) nounwind uwtable readnone ssp
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_16i8_to_8i32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: zext_16i8_to_8i32:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX512-NEXT: retq
entry:
@@ -243,7 +243,7 @@ entry:
define <16 x i32> @zext_16i8_to_16i32(<16 x i8> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: zext_16i8_to_16i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pxor %xmm4, %xmm4
; SSE2-NEXT: movdqa %xmm3, %xmm1
@@ -258,7 +258,7 @@ define <16 x i32> @zext_16i8_to_16i32(<16 x i8> %A) nounwind uwtable readnone ss
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_16i8_to_16i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm3
; SSSE3-NEXT: pxor %xmm4, %xmm4
; SSSE3-NEXT: movdqa %xmm3, %xmm1
@@ -273,7 +273,7 @@ define <16 x i32> @zext_16i8_to_16i32(<16 x i8> %A) nounwind uwtable readnone ss
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_16i8_to_16i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm4 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
@@ -285,7 +285,7 @@ define <16 x i32> @zext_16i8_to_16i32(<16 x i8> %A) nounwind uwtable readnone ss
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_16i8_to_16i32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
@@ -299,7 +299,7 @@ define <16 x i32> @zext_16i8_to_16i32(<16 x i8> %A) nounwind uwtable readnone ss
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_16i8_to_16i32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
@@ -307,7 +307,7 @@ define <16 x i32> @zext_16i8_to_16i32(<16 x i8> %A) nounwind uwtable readnone ss
; AVX2-NEXT: retq
;
; AVX512-LABEL: zext_16i8_to_16i32:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512-NEXT: retq
entry:
@@ -317,7 +317,7 @@ entry:
define <2 x i64> @zext_16i8_to_2i64(<16 x i8> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: zext_16i8_to_2i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@@ -325,17 +325,17 @@ define <2 x i64> @zext_16i8_to_2i64(<16 x i8> %A) nounwind uwtable readnone ssp
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_16i8_to_2i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_16i8_to_2i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; SSE41-NEXT: retq
;
; AVX-LABEL: zext_16i8_to_2i64:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; AVX-NEXT: retq
entry:
@@ -346,7 +346,7 @@ entry:
define <4 x i64> @zext_16i8_to_4i64(<16 x i8> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: zext_16i8_to_4i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
@@ -357,14 +357,14 @@ define <4 x i64> @zext_16i8_to_4i64(<16 x i8> %A) nounwind uwtable readnone ssp
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_16i8_to_4i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[2],zero,zero,zero,zero,zero,zero,zero,xmm1[3],zero,zero,zero,zero,zero,zero,zero
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_16i8_to_4i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxbq {{.*#+}} xmm2 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; SSE41-NEXT: psrld $16, %xmm0
; SSE41-NEXT: pmovzxbq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
@@ -372,7 +372,7 @@ define <4 x i64> @zext_16i8_to_4i64(<16 x i8> %A) nounwind uwtable readnone ssp
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_16i8_to_4i64:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
@@ -380,12 +380,12 @@ define <4 x i64> @zext_16i8_to_4i64(<16 x i8> %A) nounwind uwtable readnone ssp
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_16i8_to_4i64:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: zext_16i8_to_4i64:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
; AVX512-NEXT: retq
entry:
@@ -396,7 +396,7 @@ entry:
define <8 x i64> @zext_16i8_to_8i64(<16 x i8> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: zext_16i8_to_8i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: pxor %xmm4, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,2,3]
@@ -413,7 +413,7 @@ define <8 x i64> @zext_16i8_to_8i64(<16 x i8> %A) nounwind uwtable readnone ssp
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_16i8_to_8i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [0,128,128,128,128,128,128,128,1,128,128,128,128,128,128,128]
; SSSE3-NEXT: pshufb %xmm4, %xmm0
@@ -426,7 +426,7 @@ define <8 x i64> @zext_16i8_to_8i64(<16 x i8> %A) nounwind uwtable readnone ssp
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_16i8_to_8i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxbq {{.*#+}} xmm4 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: psrld $16, %xmm1
@@ -439,7 +439,7 @@ define <8 x i64> @zext_16i8_to_8i64(<16 x i8> %A) nounwind uwtable readnone ssp
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_16i8_to_8i64:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm2
; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
@@ -453,7 +453,7 @@ define <8 x i64> @zext_16i8_to_8i64(<16 x i8> %A) nounwind uwtable readnone ssp
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_16i8_to_8i64:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxbq {{.*#+}} ymm2 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; AVX2-NEXT: vpmovzxbq {{.*#+}} ymm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
@@ -461,7 +461,7 @@ define <8 x i64> @zext_16i8_to_8i64(<16 x i8> %A) nounwind uwtable readnone ssp
; AVX2-NEXT: retq
;
; AVX512-LABEL: zext_16i8_to_8i64:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovzxbq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero,xmm0[4],zero,zero,zero,zero,zero,zero,zero,xmm0[5],zero,zero,zero,zero,zero,zero,zero,xmm0[6],zero,zero,zero,zero,zero,zero,zero,xmm0[7],zero,zero,zero,zero,zero,zero,zero
; AVX512-NEXT: retq
entry:
@@ -472,24 +472,24 @@ entry:
define <4 x i32> @zext_8i16_to_4i32(<8 x i16> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: zext_8i16_to_4i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_8i16_to_4i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: pxor %xmm1, %xmm1
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_8i16_to_4i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; SSE41-NEXT: retq
;
; AVX-LABEL: zext_8i16_to_4i32:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX-NEXT: retq
entry:
@@ -500,7 +500,7 @@ entry:
define <8 x i32> @zext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: zext_8i16_to_8i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
@@ -508,7 +508,7 @@ define <8 x i32> @zext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone ssp
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_8i16_to_8i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: pxor %xmm2, %xmm2
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
@@ -516,7 +516,7 @@ define <8 x i32> @zext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone ssp
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_8i16_to_8i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
@@ -524,7 +524,7 @@ define <8 x i32> @zext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone ssp
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_8i16_to_8i32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
@@ -532,12 +532,12 @@ define <8 x i32> @zext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone ssp
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_8i16_to_8i32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: zext_8i16_to_8i32:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512-NEXT: retq
entry:
@@ -547,7 +547,7 @@ entry:
define <16 x i32> @zext_16i16_to_16i32(<16 x i16> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: zext_16i16_to_16i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: pxor %xmm4, %xmm4
@@ -559,7 +559,7 @@ define <16 x i32> @zext_16i16_to_16i32(<16 x i16> %A) nounwind uwtable readnone
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_16i16_to_16i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa %xmm1, %xmm3
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: pxor %xmm4, %xmm4
@@ -571,7 +571,7 @@ define <16 x i32> @zext_16i16_to_16i32(<16 x i16> %A) nounwind uwtable readnone
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_16i16_to_16i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -583,7 +583,7 @@ define <16 x i32> @zext_16i16_to_16i32(<16 x i16> %A) nounwind uwtable readnone
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_16i16_to_16i32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
@@ -597,7 +597,7 @@ define <16 x i32> @zext_16i16_to_16i32(<16 x i16> %A) nounwind uwtable readnone
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_16i16_to_16i32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
@@ -605,7 +605,7 @@ define <16 x i32> @zext_16i16_to_16i32(<16 x i16> %A) nounwind uwtable readnone
; AVX2-NEXT: retq
;
; AVX512-LABEL: zext_16i16_to_16i32:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512-NEXT: retq
entry:
@@ -615,26 +615,26 @@ entry:
define <2 x i64> @zext_8i16_to_2i64(<8 x i16> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: zext_8i16_to_2i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_8i16_to_2i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: pxor %xmm1, %xmm1
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_8i16_to_2i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; SSE41-NEXT: retq
;
; AVX-LABEL: zext_8i16_to_2i64:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; AVX-NEXT: retq
entry:
@@ -645,7 +645,7 @@ entry:
define <4 x i64> @zext_8i16_to_4i64(<8 x i16> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: zext_8i16_to_4i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
@@ -655,7 +655,7 @@ define <4 x i64> @zext_8i16_to_4i64(<8 x i16> %A) nounwind uwtable readnone ssp
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_8i16_to_4i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: pxor %xmm2, %xmm2
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
@@ -665,7 +665,7 @@ define <4 x i64> @zext_8i16_to_4i64(<8 x i16> %A) nounwind uwtable readnone ssp
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_8i16_to_4i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxwq {{.*#+}} xmm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; SSE41-NEXT: pmovzxwq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
@@ -673,7 +673,7 @@ define <4 x i64> @zext_8i16_to_4i64(<8 x i16> %A) nounwind uwtable readnone ssp
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_8i16_to_4i64:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
@@ -681,12 +681,12 @@ define <4 x i64> @zext_8i16_to_4i64(<8 x i16> %A) nounwind uwtable readnone ssp
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_8i16_to_4i64:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: zext_8i16_to_4i64:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX512-NEXT: retq
entry:
@@ -697,7 +697,7 @@ entry:
define <8 x i64> @zext_8i16_to_8i64(<8 x i16> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: zext_8i16_to_8i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pxor %xmm4, %xmm4
; SSE2-NEXT: movdqa %xmm3, %xmm1
@@ -712,7 +712,7 @@ define <8 x i64> @zext_8i16_to_8i64(<8 x i16> %A) nounwind uwtable readnone ssp
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_8i16_to_8i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm3
; SSSE3-NEXT: pxor %xmm4, %xmm4
; SSSE3-NEXT: movdqa %xmm3, %xmm1
@@ -727,7 +727,7 @@ define <8 x i64> @zext_8i16_to_8i64(<8 x i16> %A) nounwind uwtable readnone ssp
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_8i16_to_8i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxwq {{.*#+}} xmm4 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
; SSE41-NEXT: pmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
@@ -739,7 +739,7 @@ define <8 x i64> @zext_8i16_to_8i64(<8 x i16> %A) nounwind uwtable readnone ssp
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_8i16_to_8i64:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
@@ -753,7 +753,7 @@ define <8 x i64> @zext_8i16_to_8i64(<8 x i16> %A) nounwind uwtable readnone ssp
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_8i16_to_8i64:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxwq {{.*#+}} ymm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX2-NEXT: vpmovzxwq {{.*#+}} ymm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
@@ -761,7 +761,7 @@ define <8 x i64> @zext_8i16_to_8i64(<8 x i16> %A) nounwind uwtable readnone ssp
; AVX2-NEXT: retq
;
; AVX512-LABEL: zext_8i16_to_8i64:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovzxwq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX512-NEXT: retq
entry:
@@ -771,24 +771,24 @@ entry:
define <2 x i64> @zext_4i32_to_2i64(<4 x i32> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: zext_4i32_to_2i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: xorps %xmm1, %xmm1
; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_4i32_to_2i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: xorps %xmm1, %xmm1
; SSSE3-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_4i32_to_2i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; SSE41-NEXT: retq
;
; AVX-LABEL: zext_4i32_to_2i64:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; AVX-NEXT: retq
entry:
@@ -799,7 +799,7 @@ entry:
define <4 x i64> @zext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: zext_4i32_to_4i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movaps %xmm0, %xmm1
; SSE2-NEXT: xorps %xmm2, %xmm2
; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
@@ -807,7 +807,7 @@ define <4 x i64> @zext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone ssp
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_4i32_to_4i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movaps %xmm0, %xmm1
; SSSE3-NEXT: xorps %xmm2, %xmm2
; SSSE3-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
@@ -815,7 +815,7 @@ define <4 x i64> @zext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone ssp
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_4i32_to_4i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero
@@ -823,7 +823,7 @@ define <4 x i64> @zext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone ssp
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_4i32_to_4i64:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
@@ -831,12 +831,12 @@ define <4 x i64> @zext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone ssp
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_4i32_to_4i64:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: zext_4i32_to_4i64:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX512-NEXT: retq
entry:
@@ -846,7 +846,7 @@ entry:
define <8 x i64> @zext_8i32_to_8i64(<8 x i32> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: zext_8i32_to_8i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movaps %xmm1, %xmm3
; SSE2-NEXT: movaps %xmm0, %xmm1
; SSE2-NEXT: xorps %xmm4, %xmm4
@@ -858,7 +858,7 @@ define <8 x i64> @zext_8i32_to_8i64(<8 x i32> %A) nounwind uwtable readnone ssp
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_8i32_to_8i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movaps %xmm1, %xmm3
; SSSE3-NEXT: movaps %xmm0, %xmm1
; SSSE3-NEXT: xorps %xmm4, %xmm4
@@ -870,7 +870,7 @@ define <8 x i64> @zext_8i32_to_8i64(<8 x i32> %A) nounwind uwtable readnone ssp
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_8i32_to_8i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -882,7 +882,7 @@ define <8 x i64> @zext_8i32_to_8i64(<8 x i32> %A) nounwind uwtable readnone ssp
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_8i32_to_8i64:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
@@ -896,7 +896,7 @@ define <8 x i64> @zext_8i32_to_8i64(<8 x i32> %A) nounwind uwtable readnone ssp
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_8i32_to_8i64:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
@@ -904,7 +904,7 @@ define <8 x i64> @zext_8i32_to_8i64(<8 x i32> %A) nounwind uwtable readnone ssp
; AVX2-NEXT: retq
;
; AVX512-LABEL: zext_8i32_to_8i64:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
; AVX512-NEXT: retq
entry:
@@ -914,7 +914,7 @@ entry:
define <2 x i64> @load_zext_2i8_to_2i64(<2 x i8> *%ptr) {
; SSE2-LABEL: load_zext_2i8_to_2i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movzwl (%rdi), %eax
; SSE2-NEXT: movd %eax, %xmm0
; SSE2-NEXT: pxor %xmm1, %xmm1
@@ -924,19 +924,19 @@ define <2 x i64> @load_zext_2i8_to_2i64(<2 x i8> *%ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_zext_2i8_to_2i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movzwl (%rdi), %eax
; SSSE3-NEXT: movd %eax, %xmm0
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_zext_2i8_to_2i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
; SSE41-NEXT: retq
;
; AVX-LABEL: load_zext_2i8_to_2i64:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
; AVX-NEXT: retq
entry:
@@ -947,7 +947,7 @@ entry:
define <4 x i32> @load_zext_4i8_to_4i32(<4 x i8> *%ptr) {
; SSE2-LABEL: load_zext_4i8_to_4i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
@@ -955,7 +955,7 @@ define <4 x i32> @load_zext_4i8_to_4i32(<4 x i8> *%ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_zext_4i8_to_4i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSSE3-NEXT: pxor %xmm1, %xmm1
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
@@ -963,12 +963,12 @@ define <4 x i32> @load_zext_4i8_to_4i32(<4 x i8> *%ptr) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_zext_4i8_to_4i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; SSE41-NEXT: retq
;
; AVX-LABEL: load_zext_4i8_to_4i32:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX-NEXT: retq
entry:
@@ -979,7 +979,7 @@ entry:
define <4 x i64> @load_zext_4i8_to_4i64(<4 x i8> *%ptr) {
; SSE2-LABEL: load_zext_4i8_to_4i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
@@ -990,7 +990,7 @@ define <4 x i64> @load_zext_4i8_to_4i64(<4 x i8> *%ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_zext_4i8_to_4i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
@@ -998,25 +998,25 @@ define <4 x i64> @load_zext_4i8_to_4i64(<4 x i8> *%ptr) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_zext_4i8_to_4i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
; SSE41-NEXT: pmovzxbq {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
; SSE41-NEXT: retq
;
; AVX1-LABEL: load_zext_4i8_to_4i64:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_zext_4i8_to_4i64:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxbq {{.*#+}} ymm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_zext_4i8_to_4i64:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovzxbq {{.*#+}} ymm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero
; AVX512-NEXT: retq
entry:
@@ -1027,26 +1027,26 @@ entry:
define <8 x i16> @load_zext_8i8_to_8i16(<8 x i8> *%ptr) {
; SSE2-LABEL: load_zext_8i8_to_8i16:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_zext_8i8_to_8i16:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSSE3-NEXT: pxor %xmm1, %xmm1
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_zext_8i8_to_8i16:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; SSE41-NEXT: retq
;
; AVX-LABEL: load_zext_8i8_to_8i16:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX-NEXT: retq
entry:
@@ -1057,7 +1057,7 @@ entry:
define <8 x i32> @load_zext_8i8_to_8i32(<8 x i8> *%ptr) {
; SSE2-LABEL: load_zext_8i8_to_8i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
@@ -1067,7 +1067,7 @@ define <8 x i32> @load_zext_8i8_to_8i32(<8 x i8> *%ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_zext_8i8_to_8i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSSE3-NEXT: pxor %xmm2, %xmm2
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
@@ -1077,25 +1077,25 @@ define <8 x i32> @load_zext_8i8_to_8i32(<8 x i8> *%ptr) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_zext_8i8_to_8i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; SSE41-NEXT: retq
;
; AVX1-LABEL: load_zext_8i8_to_8i32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_zext_8i8_to_8i32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_zext_8i8_to_8i32:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; AVX512-NEXT: retq
entry:
@@ -1106,7 +1106,7 @@ entry:
define <8 x i32> @load_zext_16i8_to_8i32(<16 x i8> *%ptr) {
; SSE2-LABEL: load_zext_16i8_to_8i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa (%rdi), %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
@@ -1116,7 +1116,7 @@ define <8 x i32> @load_zext_16i8_to_8i32(<16 x i8> *%ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_zext_16i8_to_8i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa (%rdi), %xmm1
; SSSE3-NEXT: pxor %xmm2, %xmm2
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
@@ -1126,7 +1126,7 @@ define <8 x i32> @load_zext_16i8_to_8i32(<16 x i8> *%ptr) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_zext_16i8_to_8i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movdqa (%rdi), %xmm1
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
@@ -1134,7 +1134,7 @@ define <8 x i32> @load_zext_16i8_to_8i32(<16 x i8> *%ptr) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: load_zext_16i8_to_8i32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovdqa (%rdi), %xmm0
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
@@ -1143,12 +1143,12 @@ define <8 x i32> @load_zext_16i8_to_8i32(<16 x i8> *%ptr) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_zext_16i8_to_8i32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_zext_16i8_to_8i32:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; AVX512-NEXT: retq
entry:
@@ -1160,7 +1160,7 @@ entry:
define <8 x i64> @load_zext_8i8_to_8i64(<8 x i8> *%ptr) {
; SSE2-LABEL: load_zext_8i8_to_8i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSE2-NEXT: pxor %xmm4, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,2,3]
@@ -1177,7 +1177,7 @@ define <8 x i64> @load_zext_8i8_to_8i64(<8 x i8> *%ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_zext_8i8_to_8i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [0,128,128,128,128,128,128,128,1,128,128,128,128,128,128,128]
; SSSE3-NEXT: movdqa %xmm1, %xmm0
@@ -1191,7 +1191,7 @@ define <8 x i64> @load_zext_8i8_to_8i64(<8 x i8> *%ptr) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_zext_8i8_to_8i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
; SSE41-NEXT: pmovzxbq {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
; SSE41-NEXT: pmovzxbq {{.*#+}} xmm2 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
@@ -1199,7 +1199,7 @@ define <8 x i64> @load_zext_8i8_to_8i64(<8 x i8> *%ptr) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: load_zext_8i8_to_8i64:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
@@ -1209,13 +1209,13 @@ define <8 x i64> @load_zext_8i8_to_8i64(<8 x i8> *%ptr) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_zext_8i8_to_8i64:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxbq {{.*#+}} ymm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero
; AVX2-NEXT: vpmovzxbq {{.*#+}} ymm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_zext_8i8_to_8i64:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovzxbq {{.*#+}} zmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero,mem[4],zero,zero,zero,zero,zero,zero,zero,mem[5],zero,zero,zero,zero,zero,zero,zero,mem[6],zero,zero,zero,zero,zero,zero,zero,mem[7],zero,zero,zero,zero,zero,zero,zero
; AVX512-NEXT: retq
entry:
@@ -1226,7 +1226,7 @@ entry:
define <16 x i16> @load_zext_16i8_to_16i16(<16 x i8> *%ptr) {
; SSE2-LABEL: load_zext_16i8_to_16i16:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa (%rdi), %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: movdqa %xmm1, %xmm0
@@ -1235,7 +1235,7 @@ define <16 x i16> @load_zext_16i8_to_16i16(<16 x i8> *%ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_zext_16i8_to_16i16:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa (%rdi), %xmm1
; SSSE3-NEXT: pxor %xmm2, %xmm2
; SSSE3-NEXT: movdqa %xmm1, %xmm0
@@ -1244,25 +1244,25 @@ define <16 x i16> @load_zext_16i8_to_16i16(<16 x i8> *%ptr) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_zext_16i8_to_16i16:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; SSE41-NEXT: retq
;
; AVX1-LABEL: load_zext_16i8_to_16i16:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_zext_16i8_to_16i16:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_zext_16i8_to_16i16:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; AVX512-NEXT: retq
entry:
@@ -1273,7 +1273,7 @@ entry:
define <2 x i64> @load_zext_2i16_to_2i64(<2 x i16> *%ptr) {
; SSE2-LABEL: load_zext_2i16_to_2i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@@ -1281,7 +1281,7 @@ define <2 x i64> @load_zext_2i16_to_2i64(<2 x i16> *%ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_zext_2i16_to_2i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSSE3-NEXT: pxor %xmm1, %xmm1
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@@ -1289,12 +1289,12 @@ define <2 x i64> @load_zext_2i16_to_2i64(<2 x i16> *%ptr) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_zext_2i16_to_2i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxwq {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero
; SSE41-NEXT: retq
;
; AVX-LABEL: load_zext_2i16_to_2i64:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovzxwq {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero
; AVX-NEXT: retq
entry:
@@ -1305,26 +1305,26 @@ entry:
define <4 x i32> @load_zext_4i16_to_4i32(<4 x i16> *%ptr) {
; SSE2-LABEL: load_zext_4i16_to_4i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_zext_4i16_to_4i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSSE3-NEXT: pxor %xmm1, %xmm1
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_zext_4i16_to_4i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; SSE41-NEXT: retq
;
; AVX-LABEL: load_zext_4i16_to_4i32:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX-NEXT: retq
entry:
@@ -1335,7 +1335,7 @@ entry:
define <4 x i64> @load_zext_4i16_to_4i64(<4 x i16> *%ptr) {
; SSE2-LABEL: load_zext_4i16_to_4i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
@@ -1345,7 +1345,7 @@ define <4 x i64> @load_zext_4i16_to_4i64(<4 x i16> *%ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_zext_4i16_to_4i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSSE3-NEXT: pxor %xmm2, %xmm2
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
@@ -1355,25 +1355,25 @@ define <4 x i64> @load_zext_4i16_to_4i64(<4 x i16> *%ptr) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_zext_4i16_to_4i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxwq {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero
; SSE41-NEXT: pmovzxwq {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero
; SSE41-NEXT: retq
;
; AVX1-LABEL: load_zext_4i16_to_4i64:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero
; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_zext_4i16_to_4i64:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxwq {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_zext_4i16_to_4i64:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovzxwq {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX512-NEXT: retq
entry:
@@ -1384,7 +1384,7 @@ entry:
define <8 x i32> @load_zext_8i16_to_8i32(<8 x i16> *%ptr) {
; SSE2-LABEL: load_zext_8i16_to_8i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa (%rdi), %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: movdqa %xmm1, %xmm0
@@ -1393,7 +1393,7 @@ define <8 x i32> @load_zext_8i16_to_8i32(<8 x i16> *%ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_zext_8i16_to_8i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa (%rdi), %xmm1
; SSSE3-NEXT: pxor %xmm2, %xmm2
; SSSE3-NEXT: movdqa %xmm1, %xmm0
@@ -1402,25 +1402,25 @@ define <8 x i32> @load_zext_8i16_to_8i32(<8 x i16> *%ptr) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_zext_8i16_to_8i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; SSE41-NEXT: retq
;
; AVX1-LABEL: load_zext_8i16_to_8i32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_zext_8i16_to_8i32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_zext_8i16_to_8i32:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX512-NEXT: retq
entry:
@@ -1431,26 +1431,26 @@ entry:
define <2 x i64> @load_zext_2i32_to_2i64(<2 x i32> *%ptr) {
; SSE2-LABEL: load_zext_2i32_to_2i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: xorps %xmm1, %xmm1
; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_zext_2i32_to_2i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSSE3-NEXT: xorps %xmm1, %xmm1
; SSSE3-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_zext_2i32_to_2i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
; SSE41-NEXT: retq
;
; AVX-LABEL: load_zext_2i32_to_2i64:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
; AVX-NEXT: retq
entry:
@@ -1461,7 +1461,7 @@ entry:
define <4 x i64> @load_zext_4i32_to_4i64(<4 x i32> *%ptr) {
; SSE2-LABEL: load_zext_4i32_to_4i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movaps (%rdi), %xmm1
; SSE2-NEXT: xorps %xmm2, %xmm2
; SSE2-NEXT: movaps %xmm1, %xmm0
@@ -1470,7 +1470,7 @@ define <4 x i64> @load_zext_4i32_to_4i64(<4 x i32> *%ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_zext_4i32_to_4i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movaps (%rdi), %xmm1
; SSSE3-NEXT: xorps %xmm2, %xmm2
; SSSE3-NEXT: movaps %xmm1, %xmm0
@@ -1479,25 +1479,25 @@ define <4 x i64> @load_zext_4i32_to_4i64(<4 x i32> *%ptr) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_zext_4i32_to_4i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
; SSE41-NEXT: retq
;
; AVX1-LABEL: load_zext_4i32_to_4i64:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_zext_4i32_to_4i64:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_zext_4i32_to_4i64:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX512-NEXT: retq
entry:
@@ -1508,7 +1508,7 @@ entry:
define <8 x i32> @zext_8i8_to_8i32(<8 x i8> %z) {
; SSE2-LABEL: zext_8i8_to_8i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm2
@@ -1518,7 +1518,7 @@ define <8 x i32> @zext_8i8_to_8i32(<8 x i8> %z) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_8i8_to_8i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: pand {{.*}}(%rip), %xmm1
; SSSE3-NEXT: pxor %xmm2, %xmm2
@@ -1528,7 +1528,7 @@ define <8 x i32> @zext_8i8_to_8i32(<8 x i8> %z) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_8i8_to_8i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pand {{.*}}(%rip), %xmm0
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -1537,7 +1537,7 @@ define <8 x i32> @zext_8i8_to_8i32(<8 x i8> %z) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_8i8_to_8i32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -1546,13 +1546,13 @@ define <8 x i32> @zext_8i8_to_8i32(<8 x i8> %z) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_8i8_to_8i32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: zext_8i8_to_8i32:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512-NEXT: retq
@@ -1563,7 +1563,7 @@ entry:
define <8 x i32> @shuf_zext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: shuf_zext_8i16_to_8i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
@@ -1571,7 +1571,7 @@ define <8 x i32> @shuf_zext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuf_zext_8i16_to_8i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: pxor %xmm2, %xmm2
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
@@ -1579,7 +1579,7 @@ define <8 x i32> @shuf_zext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuf_zext_8i16_to_8i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: pxor %xmm2, %xmm2
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
@@ -1587,7 +1587,7 @@ define <8 x i32> @shuf_zext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuf_zext_8i16_to_8i32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
@@ -1595,12 +1595,12 @@ define <8 x i32> @shuf_zext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuf_zext_8i16_to_8i32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: shuf_zext_8i16_to_8i32:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512-NEXT: retq
entry:
@@ -1611,7 +1611,7 @@ entry:
define <4 x i64> @shuf_zext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: shuf_zext_4i32_to_4i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movaps %xmm0, %xmm1
; SSE2-NEXT: xorps %xmm2, %xmm2
; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
@@ -1619,7 +1619,7 @@ define <4 x i64> @shuf_zext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuf_zext_4i32_to_4i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movaps %xmm0, %xmm1
; SSSE3-NEXT: xorps %xmm2, %xmm2
; SSSE3-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
@@ -1627,7 +1627,7 @@ define <4 x i64> @shuf_zext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuf_zext_4i32_to_4i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: pxor %xmm2, %xmm2
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero
@@ -1635,7 +1635,7 @@ define <4 x i64> @shuf_zext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuf_zext_4i32_to_4i64:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm1 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
@@ -1643,12 +1643,12 @@ define <4 x i64> @shuf_zext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuf_zext_4i32_to_4i64:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: shuf_zext_4i32_to_4i64:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX512-NEXT: retq
entry:
@@ -1659,7 +1659,7 @@ entry:
define <8 x i32> @shuf_zext_8i8_to_8i32(<8 x i8> %A) {
; SSE2-LABEL: shuf_zext_8i8_to_8i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
; SSE2-NEXT: packuswb %xmm1, %xmm1
@@ -1671,7 +1671,7 @@ define <8 x i32> @shuf_zext_8i8_to_8i32(<8 x i8> %A) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuf_zext_8i8_to_8i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: pand {{.*}}(%rip), %xmm1
; SSSE3-NEXT: pxor %xmm2, %xmm2
@@ -1681,7 +1681,7 @@ define <8 x i32> @shuf_zext_8i8_to_8i32(<8 x i8> %A) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuf_zext_8i8_to_8i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
@@ -1690,7 +1690,7 @@ define <8 x i32> @shuf_zext_8i8_to_8i32(<8 x i8> %A) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuf_zext_8i8_to_8i32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
@@ -1699,13 +1699,13 @@ define <8 x i32> @shuf_zext_8i8_to_8i32(<8 x i8> %A) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuf_zext_8i8_to_8i32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: shuf_zext_8i8_to_8i32:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; AVX512-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX512-NEXT: retq
@@ -1717,7 +1717,7 @@ entry:
define <2 x i64> @shuf_zext_16i8_to_2i64_offset6(<16 x i8> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: shuf_zext_16i8_to_2i64_offset6:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
@@ -1725,18 +1725,18 @@ define <2 x i64> @shuf_zext_16i8_to_2i64_offset6(<16 x i8> %A) nounwind uwtable
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuf_zext_16i8_to_2i64_offset6:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[6],zero,zero,zero,zero,zero,zero,zero,xmm0[7],zero,zero,zero,zero,zero,zero,zero
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuf_zext_16i8_to_2i64_offset6:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: psrlq $48, %xmm0
; SSE41-NEXT: pmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; SSE41-NEXT: retq
;
; AVX-LABEL: shuf_zext_16i8_to_2i64_offset6:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpsrlq $48, %xmm0, %xmm0
; AVX-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; AVX-NEXT: retq
@@ -1748,7 +1748,7 @@ entry:
define <4 x i64> @shuf_zext_16i8_to_4i64_offset11(<16 x i8> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: shuf_zext_16i8_to_4i64_offset11:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero
; SSE2-NEXT: pxor %xmm2, %xmm2
@@ -1761,14 +1761,14 @@ define <4 x i64> @shuf_zext_16i8_to_4i64_offset11(<16 x i8> %A) nounwind uwtable
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuf_zext_16i8_to_4i64_offset11:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[11],zero,zero,zero,zero,zero,zero,zero,xmm0[12],zero,zero,zero,zero,zero,zero,zero
; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[13],zero,zero,zero,zero,zero,zero,zero,xmm1[14],zero,zero,zero,zero,zero,zero,zero
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuf_zext_16i8_to_4i64_offset11:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: psrldq {{.*#+}} xmm1 = xmm1[11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; SSE41-NEXT: pmovzxbq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
@@ -1778,7 +1778,7 @@ define <4 x i64> @shuf_zext_16i8_to_4i64_offset11(<16 x i8> %A) nounwind uwtable
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuf_zext_16i8_to_4i64_offset11:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpsrldq {{.*#+}} xmm1 = xmm0[11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX1-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
@@ -1787,13 +1787,13 @@ define <4 x i64> @shuf_zext_16i8_to_4i64_offset11(<16 x i8> %A) nounwind uwtable
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuf_zext_16i8_to_4i64_offset11:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX2-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: shuf_zext_16i8_to_4i64_offset11:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX512-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
; AVX512-NEXT: retq
@@ -1805,7 +1805,7 @@ entry:
define <2 x i64> @shuf_zext_8i16_to_2i64_offset6(<8 x i16> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: shuf_zext_8i16_to_2i64_offset6:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@@ -1813,18 +1813,18 @@ define <2 x i64> @shuf_zext_8i16_to_2i64_offset6(<8 x i16> %A) nounwind uwtable
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuf_zext_8i16_to_2i64_offset6:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[6,7],zero,zero,zero,zero,zero,zero,xmm0[8,9],zero,zero,zero,zero,zero,zero
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuf_zext_8i16_to_2i64_offset6:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: psrldq {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
; SSE41-NEXT: pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; SSE41-NEXT: retq
;
; AVX-LABEL: shuf_zext_8i16_to_2i64_offset6:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
; AVX-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; AVX-NEXT: retq
@@ -1836,7 +1836,7 @@ entry:
define <4 x i64> @shuf_zext_8i16_to_4i64_offset2(<8 x i16> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: shuf_zext_8i16_to_4i64_offset2:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
@@ -1846,7 +1846,7 @@ define <4 x i64> @shuf_zext_8i16_to_4i64_offset2(<8 x i16> %A) nounwind uwtable
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuf_zext_8i16_to_4i64_offset2:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: pxor %xmm2, %xmm2
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
@@ -1856,7 +1856,7 @@ define <4 x i64> @shuf_zext_8i16_to_4i64_offset2(<8 x i16> %A) nounwind uwtable
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuf_zext_8i16_to_4i64_offset2:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
; SSE41-NEXT: pmovzxwq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -1865,7 +1865,7 @@ define <4 x i64> @shuf_zext_8i16_to_4i64_offset2(<8 x i16> %A) nounwind uwtable
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuf_zext_8i16_to_4i64_offset2:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -1874,13 +1874,13 @@ define <4 x i64> @shuf_zext_8i16_to_4i64_offset2(<8 x i16> %A) nounwind uwtable
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuf_zext_8i16_to_4i64_offset2:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,2,2,3]
; AVX2-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: shuf_zext_8i16_to_4i64_offset2:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,2,2,3]
; AVX512-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX512-NEXT: retq
@@ -1892,27 +1892,27 @@ entry:
define <4 x i32> @shuf_zext_8i16_to_4i32_offset1(<8 x i16> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: shuf_zext_8i16_to_4i32_offset1:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuf_zext_8i16_to_4i32_offset1:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
; SSSE3-NEXT: pxor %xmm1, %xmm1
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuf_zext_8i16_to_4i32_offset1:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; SSE41-NEXT: retq
;
; AVX-LABEL: shuf_zext_8i16_to_4i32_offset1:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
; AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX-NEXT: retq
@@ -1924,7 +1924,7 @@ entry:
define <8 x i32> @shuf_zext_8i16_to_8i32_offset3(<8 x i16> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: shuf_zext_8i16_to_8i32_offset3:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
; SSE2-NEXT: pxor %xmm2, %xmm2
@@ -1933,7 +1933,7 @@ define <8 x i32> @shuf_zext_8i16_to_8i32_offset3(<8 x i16> %A) nounwind uwtable
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuf_zext_8i16_to_8i32_offset3:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: psrldq {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
; SSSE3-NEXT: pxor %xmm2, %xmm2
@@ -1942,7 +1942,7 @@ define <8 x i32> @shuf_zext_8i16_to_8i32_offset3(<8 x i16> %A) nounwind uwtable
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuf_zext_8i16_to_8i32_offset3:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
; SSE41-NEXT: pxor %xmm2, %xmm2
@@ -1951,7 +1951,7 @@ define <8 x i32> @shuf_zext_8i16_to_8i32_offset3(<8 x i16> %A) nounwind uwtable
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuf_zext_8i16_to_8i32_offset3:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpslldq {{.*#+}} xmm1 = zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
@@ -1960,13 +1960,13 @@ define <8 x i32> @shuf_zext_8i16_to_8i32_offset3(<8 x i16> %A) nounwind uwtable
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuf_zext_8i16_to_8i32_offset3:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: shuf_zext_8i16_to_8i32_offset3:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512-NEXT: retq
@@ -1978,7 +1978,7 @@ entry:
define <8 x i32> @shuf_zext_16i16_to_8i32_offset8(<16 x i16> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: shuf_zext_16i16_to_8i32_offset8:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
@@ -1986,7 +1986,7 @@ define <8 x i32> @shuf_zext_16i16_to_8i32_offset8(<16 x i16> %A) nounwind uwtabl
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuf_zext_16i16_to_8i32_offset8:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: pxor %xmm2, %xmm2
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
@@ -1994,7 +1994,7 @@ define <8 x i32> @shuf_zext_16i16_to_8i32_offset8(<16 x i16> %A) nounwind uwtabl
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuf_zext_16i16_to_8i32_offset8:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,2,3,3]
; SSE41-NEXT: pxor %xmm2, %xmm2
; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3],xmm0[4],xmm2[5,6,7]
@@ -2003,7 +2003,7 @@ define <8 x i32> @shuf_zext_16i16_to_8i32_offset8(<16 x i16> %A) nounwind uwtabl
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuf_zext_16i16_to_8i32_offset8:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,2,3,3]
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
@@ -2013,13 +2013,13 @@ define <8 x i32> @shuf_zext_16i16_to_8i32_offset8(<16 x i16> %A) nounwind uwtabl
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuf_zext_16i16_to_8i32_offset8:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: shuf_zext_16i16_to_8i32_offset8:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512-NEXT: retq
@@ -2031,13 +2031,13 @@ entry:
define <2 x i64> @shuf_zext_4i32_to_2i64_offset2(<4 x i32> %A) nounwind uwtable readnone ssp {
; SSE-LABEL: shuf_zext_4i32_to_2i64_offset2:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: xorps %xmm1, %xmm1
; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE-NEXT: retq
;
; AVX-LABEL: shuf_zext_4i32_to_2i64_offset2:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX-NEXT: retq
@@ -2049,7 +2049,7 @@ entry:
define <4 x i64> @shuf_zext_4i32_to_4i64_offset1(<4 x i32> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: shuf_zext_4i32_to_4i64_offset1:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [0,0,4294967295,0]
; SSE2-NEXT: pand %xmm1, %xmm0
@@ -2057,7 +2057,7 @@ define <4 x i64> @shuf_zext_4i32_to_4i64_offset1(<4 x i32> %A) nounwind uwtable
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuf_zext_4i32_to_4i64_offset1:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [0,0,4294967295,0]
; SSSE3-NEXT: pand %xmm1, %xmm0
@@ -2065,7 +2065,7 @@ define <4 x i64> @shuf_zext_4i32_to_4i64_offset1(<4 x i32> %A) nounwind uwtable
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuf_zext_4i32_to_4i64_offset1:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
@@ -2073,7 +2073,7 @@ define <4 x i64> @shuf_zext_4i32_to_4i64_offset1(<4 x i32> %A) nounwind uwtable
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuf_zext_4i32_to_4i64_offset1:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2],xmm1[3]
; AVX1-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
@@ -2081,13 +2081,13 @@ define <4 x i64> @shuf_zext_4i32_to_4i64_offset1(<4 x i32> %A) nounwind uwtable
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuf_zext_4i32_to_4i64_offset1:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,2,3,3]
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: shuf_zext_4i32_to_4i64_offset1:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,2,3,3]
; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX512-NEXT: retq
@@ -2099,7 +2099,7 @@ entry:
define <32 x i32> @zext_32i8_to_32i32(<32 x i8> %x) {
; SSE2-LABEL: zext_32i8_to_32i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
@@ -2131,7 +2131,7 @@ define <32 x i32> @zext_32i8_to_32i32(<32 x i8> %x) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_32i8_to_32i32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pxor %xmm2, %xmm2
; SSSE3-NEXT: movdqa %xmm0, %xmm3
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
@@ -2163,7 +2163,7 @@ define <32 x i32> @zext_32i8_to_32i32(<32 x i8> %x) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_32i8_to_32i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,2,3]
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
@@ -2190,7 +2190,7 @@ define <32 x i32> @zext_32i8_to_32i32(<32 x i8> %x) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_32i8_to_32i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
@@ -2214,7 +2214,7 @@ define <32 x i32> @zext_32i8_to_32i32(<32 x i8> %x) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_32i8_to_32i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
@@ -2230,7 +2230,7 @@ define <32 x i32> @zext_32i8_to_32i32(<32 x i8> %x) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: zext_32i8_to_32i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
@@ -2242,7 +2242,7 @@ define <32 x i32> @zext_32i8_to_32i32(<32 x i8> %x) {
define <2 x i32> @zext_2i8_to_2i32(<2 x i8>* %addr) {
; SSE2-LABEL: zext_2i8_to_2i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movzwl (%rdi), %eax
; SSE2-NEXT: movd %eax, %xmm0
; SSE2-NEXT: pxor %xmm1, %xmm1
@@ -2253,7 +2253,7 @@ define <2 x i32> @zext_2i8_to_2i32(<2 x i8>* %addr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_2i8_to_2i32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movzwl (%rdi), %eax
; SSSE3-NEXT: movd %eax, %xmm0
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[3],zero,zero,zero
@@ -2261,13 +2261,13 @@ define <2 x i32> @zext_2i8_to_2i32(<2 x i8>* %addr) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_2i8_to_2i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
; SSE41-NEXT: paddq %xmm0, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: zext_2i8_to_2i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
; AVX-NEXT: vpaddq %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
diff --git a/test/CodeGen/X86/vector-zmov.ll b/test/CodeGen/X86/vector-zmov.ll
index 106177ecda8..6f2f78263b2 100644
--- a/test/CodeGen/X86/vector-zmov.ll
+++ b/test/CodeGen/X86/vector-zmov.ll
@@ -7,12 +7,12 @@
define <4 x i32> @load_zmov_4i32_to_0zzz(<4 x i32> *%ptr) {
; SSE-LABEL: load_zmov_4i32_to_0zzz:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: retq
;
; AVX-LABEL: load_zmov_4i32_to_0zzz:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: retq
entry:
@@ -23,12 +23,12 @@ entry:
define <2 x i64> @load_zmov_2i64_to_0z(<2 x i64> *%ptr) {
; SSE-LABEL: load_zmov_2i64_to_0z:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: retq
;
; AVX-LABEL: load_zmov_2i64_to_0z:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/viabs.ll b/test/CodeGen/X86/viabs.ll
index 19b86572320..fa8bbaa6554 100644
--- a/test/CodeGen/X86/viabs.ll
+++ b/test/CodeGen/X86/viabs.ll
@@ -8,7 +8,7 @@
define <4 x i32> @test_abs_gt_v4i32(<4 x i32> %a) nounwind {
; SSE2-LABEL: test_abs_gt_v4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrad $31, %xmm1
; SSE2-NEXT: paddd %xmm1, %xmm0
@@ -16,22 +16,22 @@ define <4 x i32> @test_abs_gt_v4i32(<4 x i32> %a) nounwind {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_abs_gt_v4i32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pabsd %xmm0, %xmm0
; SSSE3-NEXT: retq
;
; AVX1-LABEL: test_abs_gt_v4i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpabsd %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_abs_gt_v4i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpabsd %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_abs_gt_v4i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpabsd %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x1e,0xc0]
; AVX512-NEXT: retq # encoding: [0xc3]
%tmp1neg = sub <4 x i32> zeroinitializer, %a
@@ -42,7 +42,7 @@ define <4 x i32> @test_abs_gt_v4i32(<4 x i32> %a) nounwind {
define <4 x i32> @test_abs_ge_v4i32(<4 x i32> %a) nounwind {
; SSE2-LABEL: test_abs_ge_v4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrad $31, %xmm1
; SSE2-NEXT: paddd %xmm1, %xmm0
@@ -50,22 +50,22 @@ define <4 x i32> @test_abs_ge_v4i32(<4 x i32> %a) nounwind {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_abs_ge_v4i32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pabsd %xmm0, %xmm0
; SSSE3-NEXT: retq
;
; AVX1-LABEL: test_abs_ge_v4i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpabsd %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_abs_ge_v4i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpabsd %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_abs_ge_v4i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpabsd %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x1e,0xc0]
; AVX512-NEXT: retq # encoding: [0xc3]
%tmp1neg = sub <4 x i32> zeroinitializer, %a
@@ -76,7 +76,7 @@ define <4 x i32> @test_abs_ge_v4i32(<4 x i32> %a) nounwind {
define <8 x i16> @test_abs_gt_v8i16(<8 x i16> %a) nounwind {
; SSE2-LABEL: test_abs_gt_v8i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psraw $15, %xmm1
; SSE2-NEXT: paddw %xmm1, %xmm0
@@ -84,27 +84,27 @@ define <8 x i16> @test_abs_gt_v8i16(<8 x i16> %a) nounwind {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_abs_gt_v8i16:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pabsw %xmm0, %xmm0
; SSSE3-NEXT: retq
;
; AVX1-LABEL: test_abs_gt_v8i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpabsw %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_abs_gt_v8i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpabsw %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_abs_gt_v8i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpabsw %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x1d,0xc0]
; AVX512F-NEXT: retq # encoding: [0xc3]
;
; AVX512BW-LABEL: test_abs_gt_v8i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpabsw %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x1d,0xc0]
; AVX512BW-NEXT: retq # encoding: [0xc3]
%tmp1neg = sub <8 x i16> zeroinitializer, %a
@@ -115,7 +115,7 @@ define <8 x i16> @test_abs_gt_v8i16(<8 x i16> %a) nounwind {
define <16 x i8> @test_abs_lt_v16i8(<16 x i8> %a) nounwind {
; SSE2-LABEL: test_abs_lt_v16i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: pcmpgtb %xmm0, %xmm1
; SSE2-NEXT: paddb %xmm1, %xmm0
@@ -123,27 +123,27 @@ define <16 x i8> @test_abs_lt_v16i8(<16 x i8> %a) nounwind {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_abs_lt_v16i8:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pabsb %xmm0, %xmm0
; SSSE3-NEXT: retq
;
; AVX1-LABEL: test_abs_lt_v16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpabsb %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_abs_lt_v16i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpabsb %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_abs_lt_v16i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpabsb %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x1c,0xc0]
; AVX512F-NEXT: retq # encoding: [0xc3]
;
; AVX512BW-LABEL: test_abs_lt_v16i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpabsb %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x1c,0xc0]
; AVX512BW-NEXT: retq # encoding: [0xc3]
%tmp1neg = sub <16 x i8> zeroinitializer, %a
@@ -154,7 +154,7 @@ define <16 x i8> @test_abs_lt_v16i8(<16 x i8> %a) nounwind {
define <4 x i32> @test_abs_le_v4i32(<4 x i32> %a) nounwind {
; SSE2-LABEL: test_abs_le_v4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrad $31, %xmm1
; SSE2-NEXT: paddd %xmm1, %xmm0
@@ -162,22 +162,22 @@ define <4 x i32> @test_abs_le_v4i32(<4 x i32> %a) nounwind {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_abs_le_v4i32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pabsd %xmm0, %xmm0
; SSSE3-NEXT: retq
;
; AVX1-LABEL: test_abs_le_v4i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpabsd %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_abs_le_v4i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpabsd %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_abs_le_v4i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpabsd %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x1e,0xc0]
; AVX512-NEXT: retq # encoding: [0xc3]
%tmp1neg = sub <4 x i32> zeroinitializer, %a
@@ -188,7 +188,7 @@ define <4 x i32> @test_abs_le_v4i32(<4 x i32> %a) nounwind {
define <8 x i32> @test_abs_gt_v8i32(<8 x i32> %a) nounwind {
; SSE2-LABEL: test_abs_gt_v8i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: psrad $31, %xmm2
; SSE2-NEXT: paddd %xmm2, %xmm0
@@ -200,13 +200,13 @@ define <8 x i32> @test_abs_gt_v8i32(<8 x i32> %a) nounwind {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_abs_gt_v8i32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pabsd %xmm0, %xmm0
; SSSE3-NEXT: pabsd %xmm1, %xmm1
; SSSE3-NEXT: retq
;
; AVX1-LABEL: test_abs_gt_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpabsd %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpabsd %xmm0, %xmm0
@@ -214,12 +214,12 @@ define <8 x i32> @test_abs_gt_v8i32(<8 x i32> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_abs_gt_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpabsd %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_abs_gt_v8i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpabsd %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x1e,0xc0]
; AVX512-NEXT: retq # encoding: [0xc3]
%tmp1neg = sub <8 x i32> zeroinitializer, %a
@@ -230,7 +230,7 @@ define <8 x i32> @test_abs_gt_v8i32(<8 x i32> %a) nounwind {
define <8 x i32> @test_abs_ge_v8i32(<8 x i32> %a) nounwind {
; SSE2-LABEL: test_abs_ge_v8i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: psrad $31, %xmm2
; SSE2-NEXT: paddd %xmm2, %xmm0
@@ -242,13 +242,13 @@ define <8 x i32> @test_abs_ge_v8i32(<8 x i32> %a) nounwind {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_abs_ge_v8i32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pabsd %xmm0, %xmm0
; SSSE3-NEXT: pabsd %xmm1, %xmm1
; SSSE3-NEXT: retq
;
; AVX1-LABEL: test_abs_ge_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpabsd %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpabsd %xmm0, %xmm0
@@ -256,12 +256,12 @@ define <8 x i32> @test_abs_ge_v8i32(<8 x i32> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_abs_ge_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpabsd %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_abs_ge_v8i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpabsd %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x1e,0xc0]
; AVX512-NEXT: retq # encoding: [0xc3]
%tmp1neg = sub <8 x i32> zeroinitializer, %a
@@ -272,7 +272,7 @@ define <8 x i32> @test_abs_ge_v8i32(<8 x i32> %a) nounwind {
define <16 x i16> @test_abs_gt_v16i16(<16 x i16> %a) nounwind {
; SSE2-LABEL: test_abs_gt_v16i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: psraw $15, %xmm2
; SSE2-NEXT: paddw %xmm2, %xmm0
@@ -284,13 +284,13 @@ define <16 x i16> @test_abs_gt_v16i16(<16 x i16> %a) nounwind {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_abs_gt_v16i16:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pabsw %xmm0, %xmm0
; SSSE3-NEXT: pabsw %xmm1, %xmm1
; SSSE3-NEXT: retq
;
; AVX1-LABEL: test_abs_gt_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpabsw %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpabsw %xmm0, %xmm0
@@ -298,17 +298,17 @@ define <16 x i16> @test_abs_gt_v16i16(<16 x i16> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_abs_gt_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpabsw %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_abs_gt_v16i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpabsw %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x1d,0xc0]
; AVX512F-NEXT: retq # encoding: [0xc3]
;
; AVX512BW-LABEL: test_abs_gt_v16i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpabsw %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x1d,0xc0]
; AVX512BW-NEXT: retq # encoding: [0xc3]
%tmp1neg = sub <16 x i16> zeroinitializer, %a
@@ -319,7 +319,7 @@ define <16 x i16> @test_abs_gt_v16i16(<16 x i16> %a) nounwind {
define <32 x i8> @test_abs_lt_v32i8(<32 x i8> %a) nounwind {
; SSE2-LABEL: test_abs_lt_v32i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: pxor %xmm3, %xmm3
; SSE2-NEXT: pcmpgtb %xmm0, %xmm3
@@ -331,13 +331,13 @@ define <32 x i8> @test_abs_lt_v32i8(<32 x i8> %a) nounwind {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_abs_lt_v32i8:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pabsb %xmm0, %xmm0
; SSSE3-NEXT: pabsb %xmm1, %xmm1
; SSSE3-NEXT: retq
;
; AVX1-LABEL: test_abs_lt_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpabsb %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpabsb %xmm0, %xmm0
@@ -345,17 +345,17 @@ define <32 x i8> @test_abs_lt_v32i8(<32 x i8> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_abs_lt_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpabsb %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_abs_lt_v32i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpabsb %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x1c,0xc0]
; AVX512F-NEXT: retq # encoding: [0xc3]
;
; AVX512BW-LABEL: test_abs_lt_v32i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpabsb %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x1c,0xc0]
; AVX512BW-NEXT: retq # encoding: [0xc3]
%tmp1neg = sub <32 x i8> zeroinitializer, %a
@@ -366,7 +366,7 @@ define <32 x i8> @test_abs_lt_v32i8(<32 x i8> %a) nounwind {
define <8 x i32> @test_abs_le_v8i32(<8 x i32> %a) nounwind {
; SSE2-LABEL: test_abs_le_v8i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: psrad $31, %xmm2
; SSE2-NEXT: paddd %xmm2, %xmm0
@@ -378,13 +378,13 @@ define <8 x i32> @test_abs_le_v8i32(<8 x i32> %a) nounwind {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_abs_le_v8i32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pabsd %xmm0, %xmm0
; SSSE3-NEXT: pabsd %xmm1, %xmm1
; SSSE3-NEXT: retq
;
; AVX1-LABEL: test_abs_le_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpabsd %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpabsd %xmm0, %xmm0
@@ -392,12 +392,12 @@ define <8 x i32> @test_abs_le_v8i32(<8 x i32> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_abs_le_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpabsd %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_abs_le_v8i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpabsd %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x1e,0xc0]
; AVX512-NEXT: retq # encoding: [0xc3]
%tmp1neg = sub <8 x i32> zeroinitializer, %a
@@ -408,7 +408,7 @@ define <8 x i32> @test_abs_le_v8i32(<8 x i32> %a) nounwind {
define <16 x i32> @test_abs_le_16i32(<16 x i32> %a) nounwind {
; SSE2-LABEL: test_abs_le_16i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm4
; SSE2-NEXT: psrad $31, %xmm4
; SSE2-NEXT: paddd %xmm4, %xmm0
@@ -428,7 +428,7 @@ define <16 x i32> @test_abs_le_16i32(<16 x i32> %a) nounwind {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_abs_le_16i32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pabsd %xmm0, %xmm0
; SSSE3-NEXT: pabsd %xmm1, %xmm1
; SSSE3-NEXT: pabsd %xmm2, %xmm2
@@ -436,7 +436,7 @@ define <16 x i32> @test_abs_le_16i32(<16 x i32> %a) nounwind {
; SSSE3-NEXT: retq
;
; AVX1-LABEL: test_abs_le_16i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpabsd %xmm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpabsd %xmm0, %xmm0
@@ -448,13 +448,13 @@ define <16 x i32> @test_abs_le_16i32(<16 x i32> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_abs_le_16i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpabsd %ymm0, %ymm0
; AVX2-NEXT: vpabsd %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_abs_le_16i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpabsd %zmm0, %zmm0 # encoding: [0x62,0xf2,0x7d,0x48,0x1e,0xc0]
; AVX512-NEXT: retq # encoding: [0xc3]
%tmp1neg = sub <16 x i32> zeroinitializer, %a
@@ -465,7 +465,7 @@ define <16 x i32> @test_abs_le_16i32(<16 x i32> %a) nounwind {
define <2 x i64> @test_abs_ge_v2i64(<2 x i64> %a) nounwind {
; SSE-LABEL: test_abs_ge_v2i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psrad $31, %xmm1
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
@@ -474,7 +474,7 @@ define <2 x i64> @test_abs_ge_v2i64(<2 x i64> %a) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: test_abs_ge_v2i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm1
; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
@@ -482,7 +482,7 @@ define <2 x i64> @test_abs_ge_v2i64(<2 x i64> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_abs_ge_v2i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm1
; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
@@ -490,7 +490,7 @@ define <2 x i64> @test_abs_ge_v2i64(<2 x i64> %a) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_abs_ge_v2i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpabsq %xmm0, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x1f,0xc0]
; AVX512-NEXT: retq # encoding: [0xc3]
%tmp1neg = sub <2 x i64> zeroinitializer, %a
@@ -501,7 +501,7 @@ define <2 x i64> @test_abs_ge_v2i64(<2 x i64> %a) nounwind {
define <4 x i64> @test_abs_gt_v4i64(<4 x i64> %a) nounwind {
; SSE-LABEL: test_abs_gt_v4i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: psrad $31, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
@@ -515,7 +515,7 @@ define <4 x i64> @test_abs_gt_v4i64(<4 x i64> %a) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: test_abs_gt_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm2, %xmm3
@@ -528,7 +528,7 @@ define <4 x i64> @test_abs_gt_v4i64(<4 x i64> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_abs_gt_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm1
; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
@@ -536,7 +536,7 @@ define <4 x i64> @test_abs_gt_v4i64(<4 x i64> %a) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_abs_gt_v4i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpabsq %ymm0, %ymm0 # encoding: [0x62,0xf2,0xfd,0x28,0x1f,0xc0]
; AVX512-NEXT: retq # encoding: [0xc3]
%tmp1neg = sub <4 x i64> zeroinitializer, %a
@@ -547,7 +547,7 @@ define <4 x i64> @test_abs_gt_v4i64(<4 x i64> %a) nounwind {
define <8 x i64> @test_abs_le_v8i64(<8 x i64> %a) nounwind {
; SSE-LABEL: test_abs_le_v8i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm4
; SSE-NEXT: psrad $31, %xmm4
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
@@ -571,7 +571,7 @@ define <8 x i64> @test_abs_le_v8i64(<8 x i64> %a) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: test_abs_le_v8i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm4
@@ -592,7 +592,7 @@ define <8 x i64> @test_abs_le_v8i64(<8 x i64> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_abs_le_v8i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpcmpgtq %ymm0, %ymm2, %ymm3
; AVX2-NEXT: vpaddq %ymm3, %ymm0, %ymm0
@@ -603,7 +603,7 @@ define <8 x i64> @test_abs_le_v8i64(<8 x i64> %a) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_abs_le_v8i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpabsq %zmm0, %zmm0 # encoding: [0x62,0xf2,0xfd,0x48,0x1f,0xc0]
; AVX512-NEXT: retq # encoding: [0xc3]
%tmp1neg = sub <8 x i64> zeroinitializer, %a
@@ -614,7 +614,7 @@ define <8 x i64> @test_abs_le_v8i64(<8 x i64> %a) nounwind {
define <8 x i64> @test_abs_le_v8i64_fold(<8 x i64>* %a.ptr) nounwind {
; SSE-LABEL: test_abs_le_v8i64_fold:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqu (%rdi), %xmm0
; SSE-NEXT: movdqu 16(%rdi), %xmm1
; SSE-NEXT: movdqu 32(%rdi), %xmm2
@@ -642,7 +642,7 @@ define <8 x i64> @test_abs_le_v8i64_fold(<8 x i64>* %a.ptr) nounwind {
; SSE-NEXT: retq
;
; AVX1-LABEL: test_abs_le_v8i64_fold:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqu (%rdi), %ymm0
; AVX1-NEXT: vmovdqu 32(%rdi), %ymm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
@@ -665,7 +665,7 @@ define <8 x i64> @test_abs_le_v8i64_fold(<8 x i64>* %a.ptr) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_abs_le_v8i64_fold:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqu (%rdi), %ymm0
; AVX2-NEXT: vmovdqu 32(%rdi), %ymm1
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
@@ -678,7 +678,7 @@ define <8 x i64> @test_abs_le_v8i64_fold(<8 x i64>* %a.ptr) nounwind {
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_abs_le_v8i64_fold:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpabsq (%rdi), %zmm0 # encoding: [0x62,0xf2,0xfd,0x48,0x1f,0x07]
; AVX512-NEXT: retq # encoding: [0xc3]
%a = load <8 x i64>, <8 x i64>* %a.ptr, align 8
@@ -690,7 +690,7 @@ define <8 x i64> @test_abs_le_v8i64_fold(<8 x i64>* %a.ptr) nounwind {
define <64 x i8> @test_abs_lt_v64i8(<64 x i8> %a) nounwind {
; SSE2-LABEL: test_abs_lt_v64i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm4, %xmm4
; SSE2-NEXT: pxor %xmm5, %xmm5
; SSE2-NEXT: pcmpgtb %xmm0, %xmm5
@@ -710,7 +710,7 @@ define <64 x i8> @test_abs_lt_v64i8(<64 x i8> %a) nounwind {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_abs_lt_v64i8:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pabsb %xmm0, %xmm0
; SSSE3-NEXT: pabsb %xmm1, %xmm1
; SSSE3-NEXT: pabsb %xmm2, %xmm2
@@ -718,7 +718,7 @@ define <64 x i8> @test_abs_lt_v64i8(<64 x i8> %a) nounwind {
; SSSE3-NEXT: retq
;
; AVX1-LABEL: test_abs_lt_v64i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpabsb %xmm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpabsb %xmm0, %xmm0
@@ -730,19 +730,19 @@ define <64 x i8> @test_abs_lt_v64i8(<64 x i8> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_abs_lt_v64i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpabsb %ymm0, %ymm0
; AVX2-NEXT: vpabsb %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_abs_lt_v64i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpabsb %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x1c,0xc0]
; AVX512F-NEXT: vpabsb %ymm1, %ymm1 # encoding: [0xc4,0xe2,0x7d,0x1c,0xc9]
; AVX512F-NEXT: retq # encoding: [0xc3]
;
; AVX512BW-LABEL: test_abs_lt_v64i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpabsb %zmm0, %zmm0 # encoding: [0x62,0xf2,0x7d,0x48,0x1c,0xc0]
; AVX512BW-NEXT: retq # encoding: [0xc3]
%tmp1neg = sub <64 x i8> zeroinitializer, %a
@@ -753,7 +753,7 @@ define <64 x i8> @test_abs_lt_v64i8(<64 x i8> %a) nounwind {
define <32 x i16> @test_abs_gt_v32i16(<32 x i16> %a) nounwind {
; SSE2-LABEL: test_abs_gt_v32i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm4
; SSE2-NEXT: psraw $15, %xmm4
; SSE2-NEXT: paddw %xmm4, %xmm0
@@ -773,7 +773,7 @@ define <32 x i16> @test_abs_gt_v32i16(<32 x i16> %a) nounwind {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_abs_gt_v32i16:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pabsw %xmm0, %xmm0
; SSSE3-NEXT: pabsw %xmm1, %xmm1
; SSSE3-NEXT: pabsw %xmm2, %xmm2
@@ -781,7 +781,7 @@ define <32 x i16> @test_abs_gt_v32i16(<32 x i16> %a) nounwind {
; SSSE3-NEXT: retq
;
; AVX1-LABEL: test_abs_gt_v32i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpabsw %xmm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpabsw %xmm0, %xmm0
@@ -793,19 +793,19 @@ define <32 x i16> @test_abs_gt_v32i16(<32 x i16> %a) nounwind {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_abs_gt_v32i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpabsw %ymm0, %ymm0
; AVX2-NEXT: vpabsw %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_abs_gt_v32i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpabsw %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x1d,0xc0]
; AVX512F-NEXT: vpabsw %ymm1, %ymm1 # encoding: [0xc4,0xe2,0x7d,0x1d,0xc9]
; AVX512F-NEXT: retq # encoding: [0xc3]
;
; AVX512BW-LABEL: test_abs_gt_v32i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpabsw %zmm0, %zmm0 # encoding: [0x62,0xf2,0x7d,0x48,0x1d,0xc0]
; AVX512BW-NEXT: retq # encoding: [0xc3]
%tmp1neg = sub <32 x i16> zeroinitializer, %a
diff --git a/test/CodeGen/X86/vmovq.ll b/test/CodeGen/X86/vmovq.ll
index 5c1ff7d06ee..2b4ae679573 100644
--- a/test/CodeGen/X86/vmovq.ll
+++ b/test/CodeGen/X86/vmovq.ll
@@ -4,7 +4,7 @@
define <2 x i64> @PR25554(<2 x i64> %v0, <2 x i64> %v1) {
; SSE-LABEL: PR25554:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movl $1, %eax
; SSE-NEXT: movq %rax, %xmm1
; SSE-NEXT: por %xmm1, %xmm0
@@ -13,7 +13,7 @@ define <2 x i64> @PR25554(<2 x i64> %v0, <2 x i64> %v1) {
; SSE-NEXT: retq
;
; AVX-LABEL: PR25554:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: movl $1, %eax
; AVX-NEXT: vmovq %rax, %xmm1
; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
diff --git a/test/CodeGen/X86/vpshufbitqbm-intrinsics.ll b/test/CodeGen/X86/vpshufbitqbm-intrinsics.ll
index e490798bfd5..7f37bcc9778 100644
--- a/test/CodeGen/X86/vpshufbitqbm-intrinsics.ll
+++ b/test/CodeGen/X86/vpshufbitqbm-intrinsics.ll
@@ -4,7 +4,7 @@
declare i16 @llvm.x86.avx512.mask.vpshufbitqmb.128(<16 x i8> %a, <16 x i8> %b, i16 %mask)
define i16 @test_vpshufbitqmb_128(<16 x i8> %a, <16 x i8> %b, i16 %mask) {
; CHECK-LABEL: test_vpshufbitqmb_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpshufbitqmb %xmm1, %xmm0, %k0 {%k1}
; CHECK-NEXT: kmovd %k0, %eax
@@ -17,7 +17,7 @@ define i16 @test_vpshufbitqmb_128(<16 x i8> %a, <16 x i8> %b, i16 %mask) {
declare i32 @llvm.x86.avx512.mask.vpshufbitqmb.256(<32 x i8> %a, <32 x i8> %b, i32 %mask)
define i32 @test_vpshufbitqmb_256(<32 x i8> %a, <32 x i8> %b, i32 %mask) {
; CHECK-LABEL: test_vpshufbitqmb_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpshufbitqmb %ymm1, %ymm0, %k0 {%k1}
; CHECK-NEXT: kmovd %k0, %eax
@@ -30,7 +30,7 @@ define i32 @test_vpshufbitqmb_256(<32 x i8> %a, <32 x i8> %b, i32 %mask) {
declare i64 @llvm.x86.avx512.mask.vpshufbitqmb.512(<64 x i8> %a, <64 x i8> %b, i64 %mask)
define i64 @test_vpshufbitqmb_512(<64 x i8> %a, <64 x i8> %b, i64 %mask) {
; CHECK-LABEL: test_vpshufbitqmb_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovq %rdi, %k1
; CHECK-NEXT: vpshufbitqmb %zmm1, %zmm0, %k0 {%k1}
; CHECK-NEXT: kmovq %k0, %rax
diff --git a/test/CodeGen/X86/vselect-2.ll b/test/CodeGen/X86/vselect-2.ll
index 1ffcdb09c55..f31f6f6597c 100644
--- a/test/CodeGen/X86/vselect-2.ll
+++ b/test/CodeGen/X86/vselect-2.ll
@@ -6,23 +6,23 @@
define <4 x i32> @test1(<4 x i32> %A, <4 x i32> %B) {
; SSE2-LABEL: test1:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE2-NEXT: movapd %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test1:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: test1:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: test1:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
; AVX2-NEXT: retq
%select = select <4 x i1><i1 true, i1 true, i1 false, i1 false>, <4 x i32> %A, <4 x i32> %B
@@ -31,22 +31,22 @@ define <4 x i32> @test1(<4 x i32> %A, <4 x i32> %B) {
define <4 x i32> @test2(<4 x i32> %A, <4 x i32> %B) {
; SSE2-LABEL: test2:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq
;
; SSE41-LABEL: test2:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: test2:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: test2:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
; AVX2-NEXT: retq
%select = select <4 x i1><i1 false, i1 false, i1 true, i1 true>, <4 x i32> %A, <4 x i32> %B
@@ -55,18 +55,18 @@ define <4 x i32> @test2(<4 x i32> %A, <4 x i32> %B) {
define <4 x float> @test3(<4 x float> %A, <4 x float> %B) {
; SSE2-LABEL: test3:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE2-NEXT: movapd %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test3:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; SSE41-NEXT: retq
;
; AVX-LABEL: test3:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; AVX-NEXT: retq
%select = select <4 x i1><i1 true, i1 true, i1 false, i1 false>, <4 x float> %A, <4 x float> %B
@@ -75,17 +75,17 @@ define <4 x float> @test3(<4 x float> %A, <4 x float> %B) {
define <4 x float> @test4(<4 x float> %A, <4 x float> %B) {
; SSE2-LABEL: test4:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq
;
; SSE41-LABEL: test4:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE41-NEXT: retq
;
; AVX-LABEL: test4:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; AVX-NEXT: retq
%select = select <4 x i1><i1 false, i1 false, i1 true, i1 true>, <4 x float> %A, <4 x float> %B
diff --git a/test/CodeGen/X86/vselect-avx.ll b/test/CodeGen/X86/vselect-avx.ll
index 11886cd11c5..9c2ae113c14 100644
--- a/test/CodeGen/X86/vselect-avx.ll
+++ b/test/CodeGen/X86/vselect-avx.ll
@@ -17,7 +17,7 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
define void @test(<4 x i16>* %a, <4 x i16>* %b) {
; AVX-LABEL: test:
-; AVX: ## BB#0: ## %body
+; AVX: ## %bb.0: ## %body
; AVX-NEXT: movq {{.*}}(%rip), %rax
; AVX-NEXT: movq %rax, (%rdi)
; AVX-NEXT: movq {{.*}}(%rip), %rax
@@ -39,7 +39,7 @@ body:
define void @test2(double** %call1559, i64 %indvars.iv4198, <4 x i1> %tmp1895) {
; AVX1-LABEL: test2:
-; AVX1: ## BB#0: ## %bb
+; AVX1: ## %bb.0: ## %bb
; AVX1-NEXT: vpslld $31, %xmm0, %xmm0
; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0
; AVX1-NEXT: vpmovsxdq %xmm0, %xmm1
@@ -54,7 +54,7 @@ define void @test2(double** %call1559, i64 %indvars.iv4198, <4 x i1> %tmp1895) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test2:
-; AVX2: ## BB#0: ## %bb
+; AVX2: ## %bb.0: ## %bb
; AVX2-NEXT: vpslld $31, %xmm0, %xmm0
; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
; AVX2-NEXT: movq (%rdi,%rsi,8), %rax
@@ -84,7 +84,7 @@ bb:
define void @test3(<4 x i32> %induction30, <4 x i16>* %tmp16, <4 x i16>* %tmp17, <4 x i16> %tmp3, <4 x i16> %tmp12) {
; AVX1-LABEL: test3:
-; AVX1: ## BB#0:
+; AVX1: ## %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1431655766,1431655766,1431655766,1431655766]
; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
@@ -106,7 +106,7 @@ define void @test3(<4 x i32> %induction30, <4 x i16>* %tmp16, <4 x i16>* %tmp17,
; AVX1-NEXT: retq
;
; AVX2-LABEL: test3:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1431655766,1431655766,1431655766,1431655766]
; AVX2-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
; AVX2-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
@@ -142,7 +142,7 @@ define void @test3(<4 x i32> %induction30, <4 x i16>* %tmp16, <4 x i16>* %tmp17,
define <32 x i8> @PR22706(<32 x i1> %x) {
; AVX1-LABEL: PR22706:
-; AVX1: ## BB#0:
+; AVX1: ## %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpsllw $7, %xmm1, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
@@ -159,7 +159,7 @@ define <32 x i8> @PR22706(<32 x i1> %x) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: PR22706:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsllw $7, %ymm0, %ymm0
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
diff --git a/test/CodeGen/X86/vselect-constants.ll b/test/CodeGen/X86/vselect-constants.ll
index 4ce2ecfa739..d1931844190 100644
--- a/test/CodeGen/X86/vselect-constants.ll
+++ b/test/CodeGen/X86/vselect-constants.ll
@@ -15,7 +15,7 @@
define <4 x i32> @sel_C1_or_C2_vec(<4 x i1> %cond) {
; SSE-LABEL: sel_C1_or_C2_vec:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pslld $31, %xmm0
; SSE-NEXT: psrad $31, %xmm0
; SSE-NEXT: movdqa %xmm0, %xmm1
@@ -25,7 +25,7 @@ define <4 x i32> @sel_C1_or_C2_vec(<4 x i1> %cond) {
; SSE-NEXT: retq
;
; AVX-LABEL: sel_C1_or_C2_vec:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpslld $31, %xmm0, %xmm0
; AVX-NEXT: vmovaps {{.*#+}} xmm1 = [42,0,4294967294,4294967295]
; AVX-NEXT: vblendvps %xmm0, {{.*}}(%rip), %xmm1, %xmm0
@@ -36,7 +36,7 @@ define <4 x i32> @sel_C1_or_C2_vec(<4 x i1> %cond) {
define <4 x i32> @cmp_sel_C1_or_C2_vec(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: cmp_sel_C1_or_C2_vec:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm1, %xmm0
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: pandn {{.*}}(%rip), %xmm1
@@ -45,7 +45,7 @@ define <4 x i32> @cmp_sel_C1_or_C2_vec(<4 x i32> %x, <4 x i32> %y) {
; SSE-NEXT: retq
;
; AVX-LABEL: cmp_sel_C1_or_C2_vec:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovaps {{.*#+}} xmm1 = [42,0,4294967294,4294967295]
; AVX-NEXT: vblendvps %xmm0, {{.*}}(%rip), %xmm1, %xmm0
@@ -57,13 +57,13 @@ define <4 x i32> @cmp_sel_C1_or_C2_vec(<4 x i32> %x, <4 x i32> %y) {
define <4 x i32> @sel_Cplus1_or_C_vec(<4 x i1> %cond) {
; SSE-LABEL: sel_Cplus1_or_C_vec:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
; SSE-NEXT: paddd {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: sel_Cplus1_or_C_vec:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
@@ -73,7 +73,7 @@ define <4 x i32> @sel_Cplus1_or_C_vec(<4 x i1> %cond) {
define <4 x i32> @cmp_sel_Cplus1_or_C_vec(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: cmp_sel_Cplus1_or_C_vec:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm1, %xmm0
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [42,0,4294967294,4294967295]
; SSE-NEXT: psubd %xmm0, %xmm1
@@ -81,7 +81,7 @@ define <4 x i32> @cmp_sel_Cplus1_or_C_vec(<4 x i32> %x, <4 x i32> %y) {
; SSE-NEXT: retq
;
; AVX-LABEL: cmp_sel_Cplus1_or_C_vec:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [42,0,4294967294,4294967295]
; AVX-NEXT: vpsubd %xmm0, %xmm1, %xmm0
@@ -93,14 +93,14 @@ define <4 x i32> @cmp_sel_Cplus1_or_C_vec(<4 x i32> %x, <4 x i32> %y) {
define <4 x i32> @sel_Cminus1_or_C_vec(<4 x i1> %cond) {
; SSE-LABEL: sel_Cminus1_or_C_vec:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pslld $31, %xmm0
; SSE-NEXT: psrad $31, %xmm0
; SSE-NEXT: paddd {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: sel_Cminus1_or_C_vec:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpslld $31, %xmm0, %xmm0
; AVX-NEXT: vpsrad $31, %xmm0, %xmm0
; AVX-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
@@ -111,13 +111,13 @@ define <4 x i32> @sel_Cminus1_or_C_vec(<4 x i1> %cond) {
define <4 x i32> @cmp_sel_Cminus1_or_C_vec(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: cmp_sel_Cminus1_or_C_vec:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm1, %xmm0
; SSE-NEXT: paddd {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: cmp_sel_Cminus1_or_C_vec:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
@@ -128,13 +128,13 @@ define <4 x i32> @cmp_sel_Cminus1_or_C_vec(<4 x i32> %x, <4 x i32> %y) {
define <4 x i32> @sel_minus1_or_0_vec(<4 x i1> %cond) {
; SSE-LABEL: sel_minus1_or_0_vec:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pslld $31, %xmm0
; SSE-NEXT: psrad $31, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: sel_minus1_or_0_vec:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpslld $31, %xmm0, %xmm0
; AVX-NEXT: vpsrad $31, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -144,12 +144,12 @@ define <4 x i32> @sel_minus1_or_0_vec(<4 x i1> %cond) {
define <4 x i32> @cmp_sel_minus1_or_0_vec(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: cmp_sel_minus1_or_0_vec:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: cmp_sel_minus1_or_0_vec:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%cond = icmp eq <4 x i32> %x, %y
@@ -159,14 +159,14 @@ define <4 x i32> @cmp_sel_minus1_or_0_vec(<4 x i32> %x, <4 x i32> %y) {
define <4 x i32> @sel_0_or_minus1_vec(<4 x i1> %cond) {
; SSE-LABEL: sel_0_or_minus1_vec:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
; SSE-NEXT: pcmpeqd %xmm1, %xmm1
; SSE-NEXT: paddd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: sel_0_or_minus1_vec:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
@@ -177,14 +177,14 @@ define <4 x i32> @sel_0_or_minus1_vec(<4 x i1> %cond) {
define <4 x i32> @cmp_sel_0_or_minus1_vec(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: cmp_sel_0_or_minus1_vec:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm1, %xmm0
; SSE-NEXT: pcmpeqd %xmm1, %xmm1
; SSE-NEXT: pxor %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: cmp_sel_0_or_minus1_vec:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
@@ -196,12 +196,12 @@ define <4 x i32> @cmp_sel_0_or_minus1_vec(<4 x i32> %x, <4 x i32> %y) {
define <4 x i32> @sel_1_or_0_vec(<4 x i1> %cond) {
; SSE-LABEL: sel_1_or_0_vec:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: sel_1_or_0_vec:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%add = select <4 x i1> %cond, <4 x i32> <i32 1, i32 1, i32 1, i32 1>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
@@ -210,13 +210,13 @@ define <4 x i32> @sel_1_or_0_vec(<4 x i1> %cond) {
define <4 x i32> @cmp_sel_1_or_0_vec(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: cmp_sel_1_or_0_vec:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm1, %xmm0
; SSE-NEXT: psrld $31, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: cmp_sel_1_or_0_vec:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpsrld $31, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -227,12 +227,12 @@ define <4 x i32> @cmp_sel_1_or_0_vec(<4 x i32> %x, <4 x i32> %y) {
define <4 x i32> @sel_0_or_1_vec(<4 x i1> %cond) {
; SSE-LABEL: sel_0_or_1_vec:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: andnps {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: sel_0_or_1_vec:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vandnps {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%add = select <4 x i1> %cond, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -241,13 +241,13 @@ define <4 x i32> @sel_0_or_1_vec(<4 x i1> %cond) {
define <4 x i32> @cmp_sel_0_or_1_vec(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: cmp_sel_0_or_1_vec:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm1, %xmm0
; SSE-NEXT: pandn {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: cmp_sel_0_or_1_vec:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpandn {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
diff --git a/test/CodeGen/X86/vselect-minmax.ll b/test/CodeGen/X86/vselect-minmax.ll
index 5524eaf397c..11edc6a6be5 100644
--- a/test/CodeGen/X86/vselect-minmax.ll
+++ b/test/CodeGen/X86/vselect-minmax.ll
@@ -8,7 +8,7 @@
define <16 x i8> @test1(<16 x i8> %a, <16 x i8> %b) {
; SSE2-LABEL: test1:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: pcmpgtb %xmm0, %xmm2
; SSE2-NEXT: pand %xmm2, %xmm0
@@ -17,12 +17,12 @@ define <16 x i8> @test1(<16 x i8> %a, <16 x i8> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test1:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminsb %xmm1, %xmm0
; SSE4-NEXT: retq
;
; AVX-LABEL: test1:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpminsb %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -33,7 +33,7 @@ entry:
define <16 x i8> @test2(<16 x i8> %a, <16 x i8> %b) {
; SSE2-LABEL: test2:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pcmpgtb %xmm1, %xmm2
; SSE2-NEXT: pcmpeqd %xmm3, %xmm3
@@ -45,12 +45,12 @@ define <16 x i8> @test2(<16 x i8> %a, <16 x i8> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test2:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminsb %xmm1, %xmm0
; SSE4-NEXT: retq
;
; AVX-LABEL: test2:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpminsb %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -61,7 +61,7 @@ entry:
define <16 x i8> @test3(<16 x i8> %a, <16 x i8> %b) {
; SSE2-LABEL: test3:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pcmpgtb %xmm1, %xmm2
; SSE2-NEXT: pand %xmm2, %xmm0
@@ -71,12 +71,12 @@ define <16 x i8> @test3(<16 x i8> %a, <16 x i8> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test3:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxsb %xmm1, %xmm0
; SSE4-NEXT: retq
;
; AVX-LABEL: test3:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -87,7 +87,7 @@ entry:
define <16 x i8> @test4(<16 x i8> %a, <16 x i8> %b) {
; SSE2-LABEL: test4:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: pcmpgtb %xmm0, %xmm3
; SSE2-NEXT: pcmpeqd %xmm2, %xmm2
@@ -99,12 +99,12 @@ define <16 x i8> @test4(<16 x i8> %a, <16 x i8> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test4:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxsb %xmm1, %xmm0
; SSE4-NEXT: retq
;
; AVX-LABEL: test4:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -115,12 +115,12 @@ entry:
define <16 x i8> @test5(<16 x i8> %a, <16 x i8> %b) {
; SSE-LABEL: test5:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pminub %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test5:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -131,12 +131,12 @@ entry:
define <16 x i8> @test6(<16 x i8> %a, <16 x i8> %b) {
; SSE-LABEL: test6:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pminub %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test6:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -147,12 +147,12 @@ entry:
define <16 x i8> @test7(<16 x i8> %a, <16 x i8> %b) {
; SSE-LABEL: test7:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pmaxub %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test7:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -163,12 +163,12 @@ entry:
define <16 x i8> @test8(<16 x i8> %a, <16 x i8> %b) {
; SSE-LABEL: test8:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pmaxub %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test8:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -179,12 +179,12 @@ entry:
define <8 x i16> @test9(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: test9:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pminsw %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test9:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpminsw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -195,12 +195,12 @@ entry:
define <8 x i16> @test10(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: test10:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pminsw %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test10:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpminsw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -211,12 +211,12 @@ entry:
define <8 x i16> @test11(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: test11:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pmaxsw %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test11:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -227,12 +227,12 @@ entry:
define <8 x i16> @test12(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: test12:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pmaxsw %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test12:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -243,7 +243,7 @@ entry:
define <8 x i16> @test13(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: test13:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -255,12 +255,12 @@ define <8 x i16> @test13(<8 x i16> %a, <8 x i16> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test13:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminuw %xmm1, %xmm0
; SSE4-NEXT: retq
;
; AVX-LABEL: test13:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpminuw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -271,7 +271,7 @@ entry:
define <8 x i16> @test14(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: test14:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: psubusw %xmm1, %xmm2
; SSE2-NEXT: pxor %xmm3, %xmm3
@@ -282,12 +282,12 @@ define <8 x i16> @test14(<8 x i16> %a, <8 x i16> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test14:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminuw %xmm1, %xmm0
; SSE4-NEXT: retq
;
; AVX-LABEL: test14:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpminuw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -298,7 +298,7 @@ entry:
define <8 x i16> @test15(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: test15:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -310,12 +310,12 @@ define <8 x i16> @test15(<8 x i16> %a, <8 x i16> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test15:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxuw %xmm1, %xmm0
; SSE4-NEXT: retq
;
; AVX-LABEL: test15:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -326,7 +326,7 @@ entry:
define <8 x i16> @test16(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: test16:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: psubusw %xmm0, %xmm2
; SSE2-NEXT: pxor %xmm3, %xmm3
@@ -337,12 +337,12 @@ define <8 x i16> @test16(<8 x i16> %a, <8 x i16> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test16:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxuw %xmm1, %xmm0
; SSE4-NEXT: retq
;
; AVX-LABEL: test16:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -353,7 +353,7 @@ entry:
define <4 x i32> @test17(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: test17:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: pcmpgtd %xmm0, %xmm2
; SSE2-NEXT: pand %xmm2, %xmm0
@@ -362,12 +362,12 @@ define <4 x i32> @test17(<4 x i32> %a, <4 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test17:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminsd %xmm1, %xmm0
; SSE4-NEXT: retq
;
; AVX-LABEL: test17:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpminsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -378,7 +378,7 @@ entry:
define <4 x i32> @test18(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: test18:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
; SSE2-NEXT: pcmpeqd %xmm3, %xmm3
@@ -390,12 +390,12 @@ define <4 x i32> @test18(<4 x i32> %a, <4 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test18:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminsd %xmm1, %xmm0
; SSE4-NEXT: retq
;
; AVX-LABEL: test18:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpminsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -406,7 +406,7 @@ entry:
define <4 x i32> @test19(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: test19:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
; SSE2-NEXT: pand %xmm2, %xmm0
@@ -416,12 +416,12 @@ define <4 x i32> @test19(<4 x i32> %a, <4 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test19:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxsd %xmm1, %xmm0
; SSE4-NEXT: retq
;
; AVX-LABEL: test19:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -432,7 +432,7 @@ entry:
define <4 x i32> @test20(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: test20:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: pcmpgtd %xmm0, %xmm3
; SSE2-NEXT: pcmpeqd %xmm2, %xmm2
@@ -444,12 +444,12 @@ define <4 x i32> @test20(<4 x i32> %a, <4 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test20:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxsd %xmm1, %xmm0
; SSE4-NEXT: retq
;
; AVX-LABEL: test20:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -460,7 +460,7 @@ entry:
define <4 x i32> @test21(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: test21:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -472,12 +472,12 @@ define <4 x i32> @test21(<4 x i32> %a, <4 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test21:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminud %xmm1, %xmm0
; SSE4-NEXT: retq
;
; AVX-LABEL: test21:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpminud %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -488,7 +488,7 @@ entry:
define <4 x i32> @test22(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: test22:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: pxor %xmm3, %xmm2
@@ -503,12 +503,12 @@ define <4 x i32> @test22(<4 x i32> %a, <4 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test22:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminud %xmm1, %xmm0
; SSE4-NEXT: retq
;
; AVX-LABEL: test22:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpminud %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -519,7 +519,7 @@ entry:
define <4 x i32> @test23(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: test23:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -531,12 +531,12 @@ define <4 x i32> @test23(<4 x i32> %a, <4 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test23:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxud %xmm1, %xmm0
; SSE4-NEXT: retq
;
; AVX-LABEL: test23:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -547,7 +547,7 @@ entry:
define <4 x i32> @test24(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: test24:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pxor %xmm3, %xmm2
@@ -562,12 +562,12 @@ define <4 x i32> @test24(<4 x i32> %a, <4 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test24:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxud %xmm1, %xmm0
; SSE4-NEXT: retq
;
; AVX-LABEL: test24:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -578,7 +578,7 @@ entry:
define <32 x i8> @test25(<32 x i8> %a, <32 x i8> %b) {
; SSE2-LABEL: test25:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: pcmpgtb %xmm1, %xmm4
; SSE2-NEXT: movdqa %xmm2, %xmm5
@@ -592,13 +592,13 @@ define <32 x i8> @test25(<32 x i8> %a, <32 x i8> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test25:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminsb %xmm2, %xmm0
; SSE4-NEXT: pminsb %xmm3, %xmm1
; SSE4-NEXT: retq
;
; AVX1-LABEL: test25:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminsb %xmm2, %xmm3, %xmm2
@@ -607,12 +607,12 @@ define <32 x i8> @test25(<32 x i8> %a, <32 x i8> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test25:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminsb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test25:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpminsb %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -623,7 +623,7 @@ entry:
define <32 x i8> @test26(<32 x i8> %a, <32 x i8> %b) {
; SSE2-LABEL: test26:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm1, %xmm6
; SSE2-NEXT: pcmpgtb %xmm3, %xmm6
; SSE2-NEXT: pcmpeqd %xmm7, %xmm7
@@ -643,13 +643,13 @@ define <32 x i8> @test26(<32 x i8> %a, <32 x i8> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test26:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminsb %xmm2, %xmm0
; SSE4-NEXT: pminsb %xmm3, %xmm1
; SSE4-NEXT: retq
;
; AVX1-LABEL: test26:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminsb %xmm2, %xmm3, %xmm2
@@ -658,12 +658,12 @@ define <32 x i8> @test26(<32 x i8> %a, <32 x i8> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test26:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminsb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test26:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpminsb %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -674,7 +674,7 @@ entry:
define <32 x i8> @test27(<32 x i8> %a, <32 x i8> %b) {
; SSE2-LABEL: test27:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pcmpgtb %xmm3, %xmm4
; SSE2-NEXT: movdqa %xmm0, %xmm5
@@ -690,13 +690,13 @@ define <32 x i8> @test27(<32 x i8> %a, <32 x i8> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test27:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxsb %xmm2, %xmm0
; SSE4-NEXT: pmaxsb %xmm3, %xmm1
; SSE4-NEXT: retq
;
; AVX1-LABEL: test27:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxsb %xmm2, %xmm3, %xmm2
@@ -705,12 +705,12 @@ define <32 x i8> @test27(<32 x i8> %a, <32 x i8> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test27:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test27:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -721,7 +721,7 @@ entry:
define <32 x i8> @test28(<32 x i8> %a, <32 x i8> %b) {
; SSE2-LABEL: test28:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm3, %xmm6
; SSE2-NEXT: pcmpgtb %xmm1, %xmm6
; SSE2-NEXT: pcmpeqd %xmm4, %xmm4
@@ -741,13 +741,13 @@ define <32 x i8> @test28(<32 x i8> %a, <32 x i8> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test28:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxsb %xmm2, %xmm0
; SSE4-NEXT: pmaxsb %xmm3, %xmm1
; SSE4-NEXT: retq
;
; AVX1-LABEL: test28:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxsb %xmm2, %xmm3, %xmm2
@@ -756,12 +756,12 @@ define <32 x i8> @test28(<32 x i8> %a, <32 x i8> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test28:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test28:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -772,13 +772,13 @@ entry:
define <32 x i8> @test29(<32 x i8> %a, <32 x i8> %b) {
; SSE-LABEL: test29:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pminub %xmm2, %xmm0
; SSE-NEXT: pminub %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: test29:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminub %xmm2, %xmm3, %xmm2
@@ -787,12 +787,12 @@ define <32 x i8> @test29(<32 x i8> %a, <32 x i8> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test29:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminub %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test29:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpminub %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -803,13 +803,13 @@ entry:
define <32 x i8> @test30(<32 x i8> %a, <32 x i8> %b) {
; SSE-LABEL: test30:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pminub %xmm2, %xmm0
; SSE-NEXT: pminub %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: test30:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminub %xmm2, %xmm3, %xmm2
@@ -818,12 +818,12 @@ define <32 x i8> @test30(<32 x i8> %a, <32 x i8> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test30:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminub %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test30:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpminub %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -834,13 +834,13 @@ entry:
define <32 x i8> @test31(<32 x i8> %a, <32 x i8> %b) {
; SSE-LABEL: test31:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pmaxub %xmm2, %xmm0
; SSE-NEXT: pmaxub %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: test31:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxub %xmm2, %xmm3, %xmm2
@@ -849,12 +849,12 @@ define <32 x i8> @test31(<32 x i8> %a, <32 x i8> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test31:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test31:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -865,13 +865,13 @@ entry:
define <32 x i8> @test32(<32 x i8> %a, <32 x i8> %b) {
; SSE-LABEL: test32:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pmaxub %xmm2, %xmm0
; SSE-NEXT: pmaxub %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: test32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxub %xmm2, %xmm3, %xmm2
@@ -880,12 +880,12 @@ define <32 x i8> @test32(<32 x i8> %a, <32 x i8> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test32:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -896,13 +896,13 @@ entry:
define <16 x i16> @test33(<16 x i16> %a, <16 x i16> %b) {
; SSE-LABEL: test33:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pminsw %xmm2, %xmm0
; SSE-NEXT: pminsw %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: test33:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminsw %xmm2, %xmm3, %xmm2
@@ -911,12 +911,12 @@ define <16 x i16> @test33(<16 x i16> %a, <16 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test33:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminsw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test33:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpminsw %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -927,13 +927,13 @@ entry:
define <16 x i16> @test34(<16 x i16> %a, <16 x i16> %b) {
; SSE-LABEL: test34:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pminsw %xmm2, %xmm0
; SSE-NEXT: pminsw %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: test34:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminsw %xmm2, %xmm3, %xmm2
@@ -942,12 +942,12 @@ define <16 x i16> @test34(<16 x i16> %a, <16 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test34:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminsw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test34:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpminsw %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -958,13 +958,13 @@ entry:
define <16 x i16> @test35(<16 x i16> %a, <16 x i16> %b) {
; SSE-LABEL: test35:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pmaxsw %xmm2, %xmm0
; SSE-NEXT: pmaxsw %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: test35:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxsw %xmm2, %xmm3, %xmm2
@@ -973,12 +973,12 @@ define <16 x i16> @test35(<16 x i16> %a, <16 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test35:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test35:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -989,13 +989,13 @@ entry:
define <16 x i16> @test36(<16 x i16> %a, <16 x i16> %b) {
; SSE-LABEL: test36:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pmaxsw %xmm2, %xmm0
; SSE-NEXT: pmaxsw %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: test36:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxsw %xmm2, %xmm3, %xmm2
@@ -1004,12 +1004,12 @@ define <16 x i16> @test36(<16 x i16> %a, <16 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test36:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test36:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -1020,7 +1020,7 @@ entry:
define <16 x i16> @test37(<16 x i16> %a, <16 x i16> %b) {
; SSE2-LABEL: test37:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [32768,32768,32768,32768,32768,32768,32768,32768]
; SSE2-NEXT: movdqa %xmm1, %xmm5
; SSE2-NEXT: pxor %xmm4, %xmm5
@@ -1040,13 +1040,13 @@ define <16 x i16> @test37(<16 x i16> %a, <16 x i16> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test37:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminuw %xmm2, %xmm0
; SSE4-NEXT: pminuw %xmm3, %xmm1
; SSE4-NEXT: retq
;
; AVX1-LABEL: test37:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminuw %xmm2, %xmm3, %xmm2
@@ -1055,12 +1055,12 @@ define <16 x i16> @test37(<16 x i16> %a, <16 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test37:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminuw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test37:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpminuw %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -1071,7 +1071,7 @@ entry:
define <16 x i16> @test38(<16 x i16> %a, <16 x i16> %b) {
; SSE2-LABEL: test38:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: psubusw %xmm3, %xmm4
; SSE2-NEXT: pxor %xmm6, %xmm6
@@ -1090,13 +1090,13 @@ define <16 x i16> @test38(<16 x i16> %a, <16 x i16> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test38:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminuw %xmm2, %xmm0
; SSE4-NEXT: pminuw %xmm3, %xmm1
; SSE4-NEXT: retq
;
; AVX1-LABEL: test38:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminuw %xmm2, %xmm3, %xmm2
@@ -1105,12 +1105,12 @@ define <16 x i16> @test38(<16 x i16> %a, <16 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test38:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminuw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test38:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpminuw %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -1121,7 +1121,7 @@ entry:
define <16 x i16> @test39(<16 x i16> %a, <16 x i16> %b) {
; SSE2-LABEL: test39:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [32768,32768,32768,32768,32768,32768,32768,32768]
; SSE2-NEXT: movdqa %xmm3, %xmm6
; SSE2-NEXT: pxor %xmm5, %xmm6
@@ -1142,13 +1142,13 @@ define <16 x i16> @test39(<16 x i16> %a, <16 x i16> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test39:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxuw %xmm2, %xmm0
; SSE4-NEXT: pmaxuw %xmm3, %xmm1
; SSE4-NEXT: retq
;
; AVX1-LABEL: test39:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxuw %xmm2, %xmm3, %xmm2
@@ -1157,12 +1157,12 @@ define <16 x i16> @test39(<16 x i16> %a, <16 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test39:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test39:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -1173,7 +1173,7 @@ entry:
define <16 x i16> @test40(<16 x i16> %a, <16 x i16> %b) {
; SSE2-LABEL: test40:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: psubusw %xmm1, %xmm4
; SSE2-NEXT: pxor %xmm5, %xmm5
@@ -1190,13 +1190,13 @@ define <16 x i16> @test40(<16 x i16> %a, <16 x i16> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test40:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxuw %xmm2, %xmm0
; SSE4-NEXT: pmaxuw %xmm3, %xmm1
; SSE4-NEXT: retq
;
; AVX1-LABEL: test40:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxuw %xmm2, %xmm3, %xmm2
@@ -1205,12 +1205,12 @@ define <16 x i16> @test40(<16 x i16> %a, <16 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test40:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test40:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -1221,7 +1221,7 @@ entry:
define <8 x i32> @test41(<8 x i32> %a, <8 x i32> %b) {
; SSE2-LABEL: test41:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: pcmpgtd %xmm1, %xmm4
; SSE2-NEXT: movdqa %xmm2, %xmm5
@@ -1235,13 +1235,13 @@ define <8 x i32> @test41(<8 x i32> %a, <8 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test41:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminsd %xmm2, %xmm0
; SSE4-NEXT: pminsd %xmm3, %xmm1
; SSE4-NEXT: retq
;
; AVX1-LABEL: test41:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminsd %xmm2, %xmm3, %xmm2
@@ -1250,12 +1250,12 @@ define <8 x i32> @test41(<8 x i32> %a, <8 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test41:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminsd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test41:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpminsd %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -1266,7 +1266,7 @@ entry:
define <8 x i32> @test42(<8 x i32> %a, <8 x i32> %b) {
; SSE2-LABEL: test42:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm1, %xmm6
; SSE2-NEXT: pcmpgtd %xmm3, %xmm6
; SSE2-NEXT: pcmpeqd %xmm7, %xmm7
@@ -1286,13 +1286,13 @@ define <8 x i32> @test42(<8 x i32> %a, <8 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test42:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminsd %xmm2, %xmm0
; SSE4-NEXT: pminsd %xmm3, %xmm1
; SSE4-NEXT: retq
;
; AVX1-LABEL: test42:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminsd %xmm2, %xmm3, %xmm2
@@ -1301,12 +1301,12 @@ define <8 x i32> @test42(<8 x i32> %a, <8 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test42:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminsd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test42:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpminsd %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -1317,7 +1317,7 @@ entry:
define <8 x i32> @test43(<8 x i32> %a, <8 x i32> %b) {
; SSE2-LABEL: test43:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
; SSE2-NEXT: movdqa %xmm0, %xmm5
@@ -1333,13 +1333,13 @@ define <8 x i32> @test43(<8 x i32> %a, <8 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test43:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxsd %xmm2, %xmm0
; SSE4-NEXT: pmaxsd %xmm3, %xmm1
; SSE4-NEXT: retq
;
; AVX1-LABEL: test43:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxsd %xmm2, %xmm3, %xmm2
@@ -1348,12 +1348,12 @@ define <8 x i32> @test43(<8 x i32> %a, <8 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test43:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test43:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -1364,7 +1364,7 @@ entry:
define <8 x i32> @test44(<8 x i32> %a, <8 x i32> %b) {
; SSE2-LABEL: test44:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm3, %xmm6
; SSE2-NEXT: pcmpgtd %xmm1, %xmm6
; SSE2-NEXT: pcmpeqd %xmm4, %xmm4
@@ -1384,13 +1384,13 @@ define <8 x i32> @test44(<8 x i32> %a, <8 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test44:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxsd %xmm2, %xmm0
; SSE4-NEXT: pmaxsd %xmm3, %xmm1
; SSE4-NEXT: retq
;
; AVX1-LABEL: test44:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxsd %xmm2, %xmm3, %xmm2
@@ -1399,12 +1399,12 @@ define <8 x i32> @test44(<8 x i32> %a, <8 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test44:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test44:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -1415,7 +1415,7 @@ entry:
define <8 x i32> @test45(<8 x i32> %a, <8 x i32> %b) {
; SSE2-LABEL: test45:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm1, %xmm5
; SSE2-NEXT: pxor %xmm4, %xmm5
@@ -1435,13 +1435,13 @@ define <8 x i32> @test45(<8 x i32> %a, <8 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test45:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminud %xmm2, %xmm0
; SSE4-NEXT: pminud %xmm3, %xmm1
; SSE4-NEXT: retq
;
; AVX1-LABEL: test45:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminud %xmm2, %xmm3, %xmm2
@@ -1450,12 +1450,12 @@ define <8 x i32> @test45(<8 x i32> %a, <8 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test45:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test45:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpminud %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -1466,7 +1466,7 @@ entry:
define <8 x i32> @test46(<8 x i32> %a, <8 x i32> %b) {
; SSE2-LABEL: test46:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: pxor %xmm6, %xmm4
@@ -1492,13 +1492,13 @@ define <8 x i32> @test46(<8 x i32> %a, <8 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test46:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminud %xmm2, %xmm0
; SSE4-NEXT: pminud %xmm3, %xmm1
; SSE4-NEXT: retq
;
; AVX1-LABEL: test46:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminud %xmm2, %xmm3, %xmm2
@@ -1507,12 +1507,12 @@ define <8 x i32> @test46(<8 x i32> %a, <8 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test46:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test46:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpminud %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -1523,7 +1523,7 @@ entry:
define <8 x i32> @test47(<8 x i32> %a, <8 x i32> %b) {
; SSE2-LABEL: test47:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm3, %xmm6
; SSE2-NEXT: pxor %xmm5, %xmm6
@@ -1544,13 +1544,13 @@ define <8 x i32> @test47(<8 x i32> %a, <8 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test47:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxud %xmm2, %xmm0
; SSE4-NEXT: pmaxud %xmm3, %xmm1
; SSE4-NEXT: retq
;
; AVX1-LABEL: test47:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxud %xmm2, %xmm3, %xmm2
@@ -1559,12 +1559,12 @@ define <8 x i32> @test47(<8 x i32> %a, <8 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test47:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test47:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -1575,7 +1575,7 @@ entry:
define <8 x i32> @test48(<8 x i32> %a, <8 x i32> %b) {
; SSE2-LABEL: test48:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pxor %xmm6, %xmm4
@@ -1601,13 +1601,13 @@ define <8 x i32> @test48(<8 x i32> %a, <8 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test48:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxud %xmm2, %xmm0
; SSE4-NEXT: pmaxud %xmm3, %xmm1
; SSE4-NEXT: retq
;
; AVX1-LABEL: test48:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxud %xmm2, %xmm3, %xmm2
@@ -1616,12 +1616,12 @@ define <8 x i32> @test48(<8 x i32> %a, <8 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test48:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test48:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -1632,7 +1632,7 @@ entry:
define <16 x i8> @test49(<16 x i8> %a, <16 x i8> %b) {
; SSE2-LABEL: test49:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: pcmpgtb %xmm0, %xmm2
; SSE2-NEXT: pand %xmm2, %xmm1
@@ -1642,12 +1642,12 @@ define <16 x i8> @test49(<16 x i8> %a, <16 x i8> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test49:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxsb %xmm1, %xmm0
; SSE4-NEXT: retq
;
; AVX-LABEL: test49:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -1658,7 +1658,7 @@ entry:
define <16 x i8> @test50(<16 x i8> %a, <16 x i8> %b) {
; SSE2-LABEL: test50:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pcmpgtb %xmm1, %xmm2
; SSE2-NEXT: pcmpeqd %xmm3, %xmm3
@@ -1670,12 +1670,12 @@ define <16 x i8> @test50(<16 x i8> %a, <16 x i8> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test50:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxsb %xmm1, %xmm0
; SSE4-NEXT: retq
;
; AVX-LABEL: test50:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -1686,7 +1686,7 @@ entry:
define <16 x i8> @test51(<16 x i8> %a, <16 x i8> %b) {
; SSE2-LABEL: test51:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pcmpgtb %xmm1, %xmm2
; SSE2-NEXT: pand %xmm2, %xmm1
@@ -1696,12 +1696,12 @@ define <16 x i8> @test51(<16 x i8> %a, <16 x i8> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test51:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminsb %xmm1, %xmm0
; SSE4-NEXT: retq
;
; AVX-LABEL: test51:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpminsb %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -1712,7 +1712,7 @@ entry:
define <16 x i8> @test52(<16 x i8> %a, <16 x i8> %b) {
; SSE2-LABEL: test52:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: pcmpgtb %xmm0, %xmm3
; SSE2-NEXT: pcmpeqd %xmm2, %xmm2
@@ -1724,12 +1724,12 @@ define <16 x i8> @test52(<16 x i8> %a, <16 x i8> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test52:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminsb %xmm1, %xmm0
; SSE4-NEXT: retq
;
; AVX-LABEL: test52:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpminsb %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -1740,12 +1740,12 @@ entry:
define <16 x i8> @test53(<16 x i8> %a, <16 x i8> %b) {
; SSE-LABEL: test53:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pmaxub %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test53:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -1756,12 +1756,12 @@ entry:
define <16 x i8> @test54(<16 x i8> %a, <16 x i8> %b) {
; SSE-LABEL: test54:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pmaxub %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test54:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -1772,12 +1772,12 @@ entry:
define <16 x i8> @test55(<16 x i8> %a, <16 x i8> %b) {
; SSE-LABEL: test55:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pminub %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test55:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -1788,12 +1788,12 @@ entry:
define <16 x i8> @test56(<16 x i8> %a, <16 x i8> %b) {
; SSE-LABEL: test56:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pminub %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test56:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -1804,12 +1804,12 @@ entry:
define <8 x i16> @test57(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: test57:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pmaxsw %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test57:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -1820,12 +1820,12 @@ entry:
define <8 x i16> @test58(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: test58:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pmaxsw %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test58:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -1836,12 +1836,12 @@ entry:
define <8 x i16> @test59(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: test59:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pminsw %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test59:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpminsw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -1852,12 +1852,12 @@ entry:
define <8 x i16> @test60(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: test60:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pminsw %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test60:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpminsw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -1868,7 +1868,7 @@ entry:
define <8 x i16> @test61(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: test61:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -1881,12 +1881,12 @@ define <8 x i16> @test61(<8 x i16> %a, <8 x i16> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test61:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxuw %xmm1, %xmm0
; SSE4-NEXT: retq
;
; AVX-LABEL: test61:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -1897,7 +1897,7 @@ entry:
define <8 x i16> @test62(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: test62:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: psubusw %xmm1, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm2
@@ -1909,12 +1909,12 @@ define <8 x i16> @test62(<8 x i16> %a, <8 x i16> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test62:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxuw %xmm1, %xmm0
; SSE4-NEXT: retq
;
; AVX-LABEL: test62:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -1925,7 +1925,7 @@ entry:
define <8 x i16> @test63(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: test63:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -1938,12 +1938,12 @@ define <8 x i16> @test63(<8 x i16> %a, <8 x i16> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test63:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminuw %xmm1, %xmm0
; SSE4-NEXT: retq
;
; AVX-LABEL: test63:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpminuw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -1954,7 +1954,7 @@ entry:
define <8 x i16> @test64(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: test64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: psubusw %xmm0, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm2
@@ -1966,12 +1966,12 @@ define <8 x i16> @test64(<8 x i16> %a, <8 x i16> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test64:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminuw %xmm1, %xmm0
; SSE4-NEXT: retq
;
; AVX-LABEL: test64:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpminuw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -1982,7 +1982,7 @@ entry:
define <4 x i32> @test65(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: test65:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: pcmpgtd %xmm0, %xmm2
; SSE2-NEXT: pand %xmm2, %xmm1
@@ -1992,12 +1992,12 @@ define <4 x i32> @test65(<4 x i32> %a, <4 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test65:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxsd %xmm1, %xmm0
; SSE4-NEXT: retq
;
; AVX-LABEL: test65:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -2008,7 +2008,7 @@ entry:
define <4 x i32> @test66(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: test66:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
; SSE2-NEXT: pcmpeqd %xmm3, %xmm3
@@ -2020,12 +2020,12 @@ define <4 x i32> @test66(<4 x i32> %a, <4 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test66:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxsd %xmm1, %xmm0
; SSE4-NEXT: retq
;
; AVX-LABEL: test66:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -2036,7 +2036,7 @@ entry:
define <4 x i32> @test67(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: test67:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
; SSE2-NEXT: pand %xmm2, %xmm1
@@ -2046,12 +2046,12 @@ define <4 x i32> @test67(<4 x i32> %a, <4 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test67:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminsd %xmm1, %xmm0
; SSE4-NEXT: retq
;
; AVX-LABEL: test67:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpminsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -2062,7 +2062,7 @@ entry:
define <4 x i32> @test68(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: test68:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: pcmpgtd %xmm0, %xmm3
; SSE2-NEXT: pcmpeqd %xmm2, %xmm2
@@ -2074,12 +2074,12 @@ define <4 x i32> @test68(<4 x i32> %a, <4 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test68:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminsd %xmm1, %xmm0
; SSE4-NEXT: retq
;
; AVX-LABEL: test68:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpminsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -2090,7 +2090,7 @@ entry:
define <4 x i32> @test69(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: test69:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -2103,12 +2103,12 @@ define <4 x i32> @test69(<4 x i32> %a, <4 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test69:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxud %xmm1, %xmm0
; SSE4-NEXT: retq
;
; AVX-LABEL: test69:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -2119,7 +2119,7 @@ entry:
define <4 x i32> @test70(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: test70:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: pxor %xmm3, %xmm2
@@ -2134,12 +2134,12 @@ define <4 x i32> @test70(<4 x i32> %a, <4 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test70:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxud %xmm1, %xmm0
; SSE4-NEXT: retq
;
; AVX-LABEL: test70:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -2150,7 +2150,7 @@ entry:
define <4 x i32> @test71(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: test71:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -2163,12 +2163,12 @@ define <4 x i32> @test71(<4 x i32> %a, <4 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test71:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminud %xmm1, %xmm0
; SSE4-NEXT: retq
;
; AVX-LABEL: test71:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpminud %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -2179,7 +2179,7 @@ entry:
define <4 x i32> @test72(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: test72:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pxor %xmm3, %xmm2
@@ -2194,12 +2194,12 @@ define <4 x i32> @test72(<4 x i32> %a, <4 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test72:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminud %xmm1, %xmm0
; SSE4-NEXT: retq
;
; AVX-LABEL: test72:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpminud %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -2210,7 +2210,7 @@ entry:
define <32 x i8> @test73(<32 x i8> %a, <32 x i8> %b) {
; SSE2-LABEL: test73:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: pcmpgtb %xmm1, %xmm4
; SSE2-NEXT: movdqa %xmm2, %xmm5
@@ -2226,13 +2226,13 @@ define <32 x i8> @test73(<32 x i8> %a, <32 x i8> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test73:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxsb %xmm2, %xmm0
; SSE4-NEXT: pmaxsb %xmm3, %xmm1
; SSE4-NEXT: retq
;
; AVX1-LABEL: test73:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxsb %xmm2, %xmm3, %xmm2
@@ -2241,12 +2241,12 @@ define <32 x i8> @test73(<32 x i8> %a, <32 x i8> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test73:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test73:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -2257,7 +2257,7 @@ entry:
define <32 x i8> @test74(<32 x i8> %a, <32 x i8> %b) {
; SSE2-LABEL: test74:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm1, %xmm6
; SSE2-NEXT: pcmpgtb %xmm3, %xmm6
; SSE2-NEXT: pcmpeqd %xmm7, %xmm7
@@ -2277,13 +2277,13 @@ define <32 x i8> @test74(<32 x i8> %a, <32 x i8> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test74:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxsb %xmm2, %xmm0
; SSE4-NEXT: pmaxsb %xmm3, %xmm1
; SSE4-NEXT: retq
;
; AVX1-LABEL: test74:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxsb %xmm2, %xmm3, %xmm2
@@ -2292,12 +2292,12 @@ define <32 x i8> @test74(<32 x i8> %a, <32 x i8> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test74:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test74:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -2308,7 +2308,7 @@ entry:
define <32 x i8> @test75(<32 x i8> %a, <32 x i8> %b) {
; SSE2-LABEL: test75:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pcmpgtb %xmm3, %xmm4
; SSE2-NEXT: movdqa %xmm0, %xmm5
@@ -2324,13 +2324,13 @@ define <32 x i8> @test75(<32 x i8> %a, <32 x i8> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test75:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminsb %xmm2, %xmm0
; SSE4-NEXT: pminsb %xmm3, %xmm1
; SSE4-NEXT: retq
;
; AVX1-LABEL: test75:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminsb %xmm2, %xmm3, %xmm2
@@ -2339,12 +2339,12 @@ define <32 x i8> @test75(<32 x i8> %a, <32 x i8> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test75:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminsb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test75:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpminsb %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -2355,7 +2355,7 @@ entry:
define <32 x i8> @test76(<32 x i8> %a, <32 x i8> %b) {
; SSE2-LABEL: test76:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm3, %xmm6
; SSE2-NEXT: pcmpgtb %xmm1, %xmm6
; SSE2-NEXT: pcmpeqd %xmm4, %xmm4
@@ -2375,13 +2375,13 @@ define <32 x i8> @test76(<32 x i8> %a, <32 x i8> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test76:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminsb %xmm2, %xmm0
; SSE4-NEXT: pminsb %xmm3, %xmm1
; SSE4-NEXT: retq
;
; AVX1-LABEL: test76:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminsb %xmm2, %xmm3, %xmm2
@@ -2390,12 +2390,12 @@ define <32 x i8> @test76(<32 x i8> %a, <32 x i8> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test76:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminsb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test76:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpminsb %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -2406,13 +2406,13 @@ entry:
define <32 x i8> @test77(<32 x i8> %a, <32 x i8> %b) {
; SSE-LABEL: test77:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pmaxub %xmm2, %xmm0
; SSE-NEXT: pmaxub %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: test77:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxub %xmm2, %xmm3, %xmm2
@@ -2421,12 +2421,12 @@ define <32 x i8> @test77(<32 x i8> %a, <32 x i8> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test77:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test77:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -2437,13 +2437,13 @@ entry:
define <32 x i8> @test78(<32 x i8> %a, <32 x i8> %b) {
; SSE-LABEL: test78:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pmaxub %xmm2, %xmm0
; SSE-NEXT: pmaxub %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: test78:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxub %xmm2, %xmm3, %xmm2
@@ -2452,12 +2452,12 @@ define <32 x i8> @test78(<32 x i8> %a, <32 x i8> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test78:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test78:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -2468,13 +2468,13 @@ entry:
define <32 x i8> @test79(<32 x i8> %a, <32 x i8> %b) {
; SSE-LABEL: test79:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pminub %xmm2, %xmm0
; SSE-NEXT: pminub %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: test79:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminub %xmm2, %xmm3, %xmm2
@@ -2483,12 +2483,12 @@ define <32 x i8> @test79(<32 x i8> %a, <32 x i8> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test79:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminub %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test79:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpminub %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -2499,13 +2499,13 @@ entry:
define <32 x i8> @test80(<32 x i8> %a, <32 x i8> %b) {
; SSE-LABEL: test80:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pminub %xmm2, %xmm0
; SSE-NEXT: pminub %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: test80:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminub %xmm2, %xmm3, %xmm2
@@ -2514,12 +2514,12 @@ define <32 x i8> @test80(<32 x i8> %a, <32 x i8> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test80:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminub %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test80:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpminub %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -2530,13 +2530,13 @@ entry:
define <16 x i16> @test81(<16 x i16> %a, <16 x i16> %b) {
; SSE-LABEL: test81:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pmaxsw %xmm2, %xmm0
; SSE-NEXT: pmaxsw %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: test81:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxsw %xmm2, %xmm3, %xmm2
@@ -2545,12 +2545,12 @@ define <16 x i16> @test81(<16 x i16> %a, <16 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test81:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test81:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -2561,13 +2561,13 @@ entry:
define <16 x i16> @test82(<16 x i16> %a, <16 x i16> %b) {
; SSE-LABEL: test82:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pmaxsw %xmm2, %xmm0
; SSE-NEXT: pmaxsw %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: test82:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxsw %xmm2, %xmm3, %xmm2
@@ -2576,12 +2576,12 @@ define <16 x i16> @test82(<16 x i16> %a, <16 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test82:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test82:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -2592,13 +2592,13 @@ entry:
define <16 x i16> @test83(<16 x i16> %a, <16 x i16> %b) {
; SSE-LABEL: test83:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pminsw %xmm2, %xmm0
; SSE-NEXT: pminsw %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: test83:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminsw %xmm2, %xmm3, %xmm2
@@ -2607,12 +2607,12 @@ define <16 x i16> @test83(<16 x i16> %a, <16 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test83:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminsw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test83:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpminsw %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -2623,13 +2623,13 @@ entry:
define <16 x i16> @test84(<16 x i16> %a, <16 x i16> %b) {
; SSE-LABEL: test84:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pminsw %xmm2, %xmm0
; SSE-NEXT: pminsw %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: test84:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminsw %xmm2, %xmm3, %xmm2
@@ -2638,12 +2638,12 @@ define <16 x i16> @test84(<16 x i16> %a, <16 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test84:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminsw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test84:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpminsw %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -2654,7 +2654,7 @@ entry:
define <16 x i16> @test85(<16 x i16> %a, <16 x i16> %b) {
; SSE2-LABEL: test85:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [32768,32768,32768,32768,32768,32768,32768,32768]
; SSE2-NEXT: movdqa %xmm1, %xmm6
; SSE2-NEXT: pxor %xmm4, %xmm6
@@ -2676,13 +2676,13 @@ define <16 x i16> @test85(<16 x i16> %a, <16 x i16> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test85:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxuw %xmm2, %xmm0
; SSE4-NEXT: pmaxuw %xmm3, %xmm1
; SSE4-NEXT: retq
;
; AVX1-LABEL: test85:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxuw %xmm2, %xmm3, %xmm2
@@ -2691,12 +2691,12 @@ define <16 x i16> @test85(<16 x i16> %a, <16 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test85:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test85:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -2707,7 +2707,7 @@ entry:
define <16 x i16> @test86(<16 x i16> %a, <16 x i16> %b) {
; SSE2-LABEL: test86:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: psubusw %xmm3, %xmm4
; SSE2-NEXT: pxor %xmm6, %xmm6
@@ -2726,13 +2726,13 @@ define <16 x i16> @test86(<16 x i16> %a, <16 x i16> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test86:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxuw %xmm2, %xmm0
; SSE4-NEXT: pmaxuw %xmm3, %xmm1
; SSE4-NEXT: retq
;
; AVX1-LABEL: test86:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxuw %xmm2, %xmm3, %xmm2
@@ -2741,12 +2741,12 @@ define <16 x i16> @test86(<16 x i16> %a, <16 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test86:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test86:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -2757,7 +2757,7 @@ entry:
define <16 x i16> @test87(<16 x i16> %a, <16 x i16> %b) {
; SSE2-LABEL: test87:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [32768,32768,32768,32768,32768,32768,32768,32768]
; SSE2-NEXT: movdqa %xmm3, %xmm6
; SSE2-NEXT: pxor %xmm4, %xmm6
@@ -2779,13 +2779,13 @@ define <16 x i16> @test87(<16 x i16> %a, <16 x i16> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test87:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminuw %xmm2, %xmm0
; SSE4-NEXT: pminuw %xmm3, %xmm1
; SSE4-NEXT: retq
;
; AVX1-LABEL: test87:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminuw %xmm2, %xmm3, %xmm2
@@ -2794,12 +2794,12 @@ define <16 x i16> @test87(<16 x i16> %a, <16 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test87:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminuw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test87:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpminuw %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -2810,7 +2810,7 @@ entry:
define <16 x i16> @test88(<16 x i16> %a, <16 x i16> %b) {
; SSE2-LABEL: test88:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: psubusw %xmm1, %xmm4
; SSE2-NEXT: pxor %xmm6, %xmm6
@@ -2829,13 +2829,13 @@ define <16 x i16> @test88(<16 x i16> %a, <16 x i16> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test88:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminuw %xmm2, %xmm0
; SSE4-NEXT: pminuw %xmm3, %xmm1
; SSE4-NEXT: retq
;
; AVX1-LABEL: test88:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminuw %xmm2, %xmm3, %xmm2
@@ -2844,12 +2844,12 @@ define <16 x i16> @test88(<16 x i16> %a, <16 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test88:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminuw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test88:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpminuw %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -2860,7 +2860,7 @@ entry:
define <8 x i32> @test89(<8 x i32> %a, <8 x i32> %b) {
; SSE2-LABEL: test89:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: pcmpgtd %xmm1, %xmm4
; SSE2-NEXT: movdqa %xmm2, %xmm5
@@ -2876,13 +2876,13 @@ define <8 x i32> @test89(<8 x i32> %a, <8 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test89:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxsd %xmm2, %xmm0
; SSE4-NEXT: pmaxsd %xmm3, %xmm1
; SSE4-NEXT: retq
;
; AVX1-LABEL: test89:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxsd %xmm2, %xmm3, %xmm2
@@ -2891,12 +2891,12 @@ define <8 x i32> @test89(<8 x i32> %a, <8 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test89:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test89:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -2907,7 +2907,7 @@ entry:
define <8 x i32> @test90(<8 x i32> %a, <8 x i32> %b) {
; SSE2-LABEL: test90:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm1, %xmm6
; SSE2-NEXT: pcmpgtd %xmm3, %xmm6
; SSE2-NEXT: pcmpeqd %xmm7, %xmm7
@@ -2927,13 +2927,13 @@ define <8 x i32> @test90(<8 x i32> %a, <8 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test90:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxsd %xmm2, %xmm0
; SSE4-NEXT: pmaxsd %xmm3, %xmm1
; SSE4-NEXT: retq
;
; AVX1-LABEL: test90:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxsd %xmm2, %xmm3, %xmm2
@@ -2942,12 +2942,12 @@ define <8 x i32> @test90(<8 x i32> %a, <8 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test90:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test90:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -2958,7 +2958,7 @@ entry:
define <8 x i32> @test91(<8 x i32> %a, <8 x i32> %b) {
; SSE2-LABEL: test91:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
; SSE2-NEXT: movdqa %xmm0, %xmm5
@@ -2974,13 +2974,13 @@ define <8 x i32> @test91(<8 x i32> %a, <8 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test91:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminsd %xmm2, %xmm0
; SSE4-NEXT: pminsd %xmm3, %xmm1
; SSE4-NEXT: retq
;
; AVX1-LABEL: test91:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminsd %xmm2, %xmm3, %xmm2
@@ -2989,12 +2989,12 @@ define <8 x i32> @test91(<8 x i32> %a, <8 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test91:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminsd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test91:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpminsd %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -3005,7 +3005,7 @@ entry:
define <8 x i32> @test92(<8 x i32> %a, <8 x i32> %b) {
; SSE2-LABEL: test92:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm3, %xmm6
; SSE2-NEXT: pcmpgtd %xmm1, %xmm6
; SSE2-NEXT: pcmpeqd %xmm4, %xmm4
@@ -3025,13 +3025,13 @@ define <8 x i32> @test92(<8 x i32> %a, <8 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test92:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminsd %xmm2, %xmm0
; SSE4-NEXT: pminsd %xmm3, %xmm1
; SSE4-NEXT: retq
;
; AVX1-LABEL: test92:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminsd %xmm2, %xmm3, %xmm2
@@ -3040,12 +3040,12 @@ define <8 x i32> @test92(<8 x i32> %a, <8 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test92:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminsd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test92:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpminsd %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -3056,7 +3056,7 @@ entry:
define <8 x i32> @test93(<8 x i32> %a, <8 x i32> %b) {
; SSE2-LABEL: test93:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm1, %xmm6
; SSE2-NEXT: pxor %xmm4, %xmm6
@@ -3078,13 +3078,13 @@ define <8 x i32> @test93(<8 x i32> %a, <8 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test93:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxud %xmm2, %xmm0
; SSE4-NEXT: pmaxud %xmm3, %xmm1
; SSE4-NEXT: retq
;
; AVX1-LABEL: test93:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxud %xmm2, %xmm3, %xmm2
@@ -3093,12 +3093,12 @@ define <8 x i32> @test93(<8 x i32> %a, <8 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test93:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test93:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -3109,7 +3109,7 @@ entry:
define <8 x i32> @test94(<8 x i32> %a, <8 x i32> %b) {
; SSE2-LABEL: test94:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: pxor %xmm6, %xmm4
@@ -3135,13 +3135,13 @@ define <8 x i32> @test94(<8 x i32> %a, <8 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test94:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxud %xmm2, %xmm0
; SSE4-NEXT: pmaxud %xmm3, %xmm1
; SSE4-NEXT: retq
;
; AVX1-LABEL: test94:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxud %xmm2, %xmm3, %xmm2
@@ -3150,12 +3150,12 @@ define <8 x i32> @test94(<8 x i32> %a, <8 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test94:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test94:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -3166,7 +3166,7 @@ entry:
define <8 x i32> @test95(<8 x i32> %a, <8 x i32> %b) {
; SSE2-LABEL: test95:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm3, %xmm6
; SSE2-NEXT: pxor %xmm4, %xmm6
@@ -3188,13 +3188,13 @@ define <8 x i32> @test95(<8 x i32> %a, <8 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test95:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminud %xmm2, %xmm0
; SSE4-NEXT: pminud %xmm3, %xmm1
; SSE4-NEXT: retq
;
; AVX1-LABEL: test95:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminud %xmm2, %xmm3, %xmm2
@@ -3203,12 +3203,12 @@ define <8 x i32> @test95(<8 x i32> %a, <8 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test95:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test95:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpminud %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -3219,7 +3219,7 @@ entry:
define <8 x i32> @test96(<8 x i32> %a, <8 x i32> %b) {
; SSE2-LABEL: test96:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pxor %xmm6, %xmm4
@@ -3245,13 +3245,13 @@ define <8 x i32> @test96(<8 x i32> %a, <8 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test96:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminud %xmm2, %xmm0
; SSE4-NEXT: pminud %xmm3, %xmm1
; SSE4-NEXT: retq
;
; AVX1-LABEL: test96:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminud %xmm2, %xmm3, %xmm2
@@ -3260,12 +3260,12 @@ define <8 x i32> @test96(<8 x i32> %a, <8 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test96:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test96:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpminud %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
entry:
@@ -3278,7 +3278,7 @@ entry:
define <64 x i8> @test97(<64 x i8> %a, <64 x i8> %b) {
; SSE2-LABEL: test97:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm7, %xmm8
; SSE2-NEXT: pcmpgtb %xmm3, %xmm8
; SSE2-NEXT: movdqa %xmm6, %xmm9
@@ -3302,7 +3302,7 @@ define <64 x i8> @test97(<64 x i8> %a, <64 x i8> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test97:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminsb %xmm4, %xmm0
; SSE4-NEXT: pminsb %xmm5, %xmm1
; SSE4-NEXT: pminsb %xmm6, %xmm2
@@ -3310,7 +3310,7 @@ define <64 x i8> @test97(<64 x i8> %a, <64 x i8> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test97:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpminsb %xmm4, %xmm5, %xmm4
@@ -3324,13 +3324,13 @@ define <64 x i8> @test97(<64 x i8> %a, <64 x i8> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test97:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminsb %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpminsb %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test97:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpminsb %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
entry:
@@ -3341,7 +3341,7 @@ entry:
define <64 x i8> @test98(<64 x i8> %a, <64 x i8> %b) {
; SSE2-LABEL: test98:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm3, %xmm8
; SSE2-NEXT: movdqa %xmm2, %xmm9
; SSE2-NEXT: movdqa %xmm8, %xmm12
@@ -3377,7 +3377,7 @@ define <64 x i8> @test98(<64 x i8> %a, <64 x i8> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test98:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminsb %xmm4, %xmm0
; SSE4-NEXT: pminsb %xmm5, %xmm1
; SSE4-NEXT: pminsb %xmm6, %xmm2
@@ -3385,7 +3385,7 @@ define <64 x i8> @test98(<64 x i8> %a, <64 x i8> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test98:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpminsb %xmm4, %xmm5, %xmm4
@@ -3399,13 +3399,13 @@ define <64 x i8> @test98(<64 x i8> %a, <64 x i8> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test98:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminsb %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpminsb %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test98:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpminsb %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
entry:
@@ -3416,7 +3416,7 @@ entry:
define <64 x i8> @test99(<64 x i8> %a, <64 x i8> %b) {
; SSE2-LABEL: test99:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm3, %xmm8
; SSE2-NEXT: pcmpgtb %xmm7, %xmm3
; SSE2-NEXT: movdqa %xmm2, %xmm9
@@ -3443,7 +3443,7 @@ define <64 x i8> @test99(<64 x i8> %a, <64 x i8> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test99:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxsb %xmm4, %xmm0
; SSE4-NEXT: pmaxsb %xmm5, %xmm1
; SSE4-NEXT: pmaxsb %xmm6, %xmm2
@@ -3451,7 +3451,7 @@ define <64 x i8> @test99(<64 x i8> %a, <64 x i8> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test99:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpmaxsb %xmm4, %xmm5, %xmm4
@@ -3465,13 +3465,13 @@ define <64 x i8> @test99(<64 x i8> %a, <64 x i8> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test99:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxsb %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpmaxsb %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test99:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmaxsb %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
entry:
@@ -3482,7 +3482,7 @@ entry:
define <64 x i8> @test100(<64 x i8> %a, <64 x i8> %b) {
; SSE2-LABEL: test100:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm3, %xmm8
; SSE2-NEXT: movdqa %xmm2, %xmm9
; SSE2-NEXT: movdqa %xmm0, %xmm10
@@ -3518,7 +3518,7 @@ define <64 x i8> @test100(<64 x i8> %a, <64 x i8> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test100:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxsb %xmm4, %xmm0
; SSE4-NEXT: pmaxsb %xmm5, %xmm1
; SSE4-NEXT: pmaxsb %xmm6, %xmm2
@@ -3526,7 +3526,7 @@ define <64 x i8> @test100(<64 x i8> %a, <64 x i8> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test100:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpmaxsb %xmm4, %xmm5, %xmm4
@@ -3540,13 +3540,13 @@ define <64 x i8> @test100(<64 x i8> %a, <64 x i8> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test100:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxsb %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpmaxsb %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test100:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmaxsb %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
entry:
@@ -3557,7 +3557,7 @@ entry:
define <64 x i8> @test101(<64 x i8> %a, <64 x i8> %b) {
; SSE-LABEL: test101:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pminub %xmm4, %xmm0
; SSE-NEXT: pminub %xmm5, %xmm1
; SSE-NEXT: pminub %xmm6, %xmm2
@@ -3565,7 +3565,7 @@ define <64 x i8> @test101(<64 x i8> %a, <64 x i8> %b) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test101:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpminub %xmm4, %xmm5, %xmm4
@@ -3579,13 +3579,13 @@ define <64 x i8> @test101(<64 x i8> %a, <64 x i8> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test101:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminub %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpminub %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test101:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpminub %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
entry:
@@ -3596,7 +3596,7 @@ entry:
define <64 x i8> @test102(<64 x i8> %a, <64 x i8> %b) {
; SSE-LABEL: test102:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pminub %xmm4, %xmm0
; SSE-NEXT: pminub %xmm5, %xmm1
; SSE-NEXT: pminub %xmm6, %xmm2
@@ -3604,7 +3604,7 @@ define <64 x i8> @test102(<64 x i8> %a, <64 x i8> %b) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test102:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpminub %xmm4, %xmm5, %xmm4
@@ -3618,13 +3618,13 @@ define <64 x i8> @test102(<64 x i8> %a, <64 x i8> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test102:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminub %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpminub %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test102:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpminub %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
entry:
@@ -3635,7 +3635,7 @@ entry:
define <64 x i8> @test103(<64 x i8> %a, <64 x i8> %b) {
; SSE-LABEL: test103:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pmaxub %xmm4, %xmm0
; SSE-NEXT: pmaxub %xmm5, %xmm1
; SSE-NEXT: pmaxub %xmm6, %xmm2
@@ -3643,7 +3643,7 @@ define <64 x i8> @test103(<64 x i8> %a, <64 x i8> %b) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test103:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpmaxub %xmm4, %xmm5, %xmm4
@@ -3657,13 +3657,13 @@ define <64 x i8> @test103(<64 x i8> %a, <64 x i8> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test103:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxub %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpmaxub %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test103:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmaxub %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
entry:
@@ -3674,7 +3674,7 @@ entry:
define <64 x i8> @test104(<64 x i8> %a, <64 x i8> %b) {
; SSE-LABEL: test104:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pmaxub %xmm4, %xmm0
; SSE-NEXT: pmaxub %xmm5, %xmm1
; SSE-NEXT: pmaxub %xmm6, %xmm2
@@ -3682,7 +3682,7 @@ define <64 x i8> @test104(<64 x i8> %a, <64 x i8> %b) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test104:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpmaxub %xmm4, %xmm5, %xmm4
@@ -3696,13 +3696,13 @@ define <64 x i8> @test104(<64 x i8> %a, <64 x i8> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test104:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxub %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpmaxub %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test104:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmaxub %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
entry:
@@ -3713,7 +3713,7 @@ entry:
define <32 x i16> @test105(<32 x i16> %a, <32 x i16> %b) {
; SSE-LABEL: test105:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pminsw %xmm4, %xmm0
; SSE-NEXT: pminsw %xmm5, %xmm1
; SSE-NEXT: pminsw %xmm6, %xmm2
@@ -3721,7 +3721,7 @@ define <32 x i16> @test105(<32 x i16> %a, <32 x i16> %b) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test105:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpminsw %xmm4, %xmm5, %xmm4
@@ -3735,13 +3735,13 @@ define <32 x i16> @test105(<32 x i16> %a, <32 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test105:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminsw %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpminsw %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test105:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpminsw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
entry:
@@ -3752,7 +3752,7 @@ entry:
define <32 x i16> @test106(<32 x i16> %a, <32 x i16> %b) {
; SSE-LABEL: test106:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pminsw %xmm4, %xmm0
; SSE-NEXT: pminsw %xmm5, %xmm1
; SSE-NEXT: pminsw %xmm6, %xmm2
@@ -3760,7 +3760,7 @@ define <32 x i16> @test106(<32 x i16> %a, <32 x i16> %b) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test106:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpminsw %xmm4, %xmm5, %xmm4
@@ -3774,13 +3774,13 @@ define <32 x i16> @test106(<32 x i16> %a, <32 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test106:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminsw %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpminsw %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test106:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpminsw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
entry:
@@ -3791,7 +3791,7 @@ entry:
define <32 x i16> @test107(<32 x i16> %a, <32 x i16> %b) {
; SSE-LABEL: test107:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pmaxsw %xmm4, %xmm0
; SSE-NEXT: pmaxsw %xmm5, %xmm1
; SSE-NEXT: pmaxsw %xmm6, %xmm2
@@ -3799,7 +3799,7 @@ define <32 x i16> @test107(<32 x i16> %a, <32 x i16> %b) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test107:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpmaxsw %xmm4, %xmm5, %xmm4
@@ -3813,13 +3813,13 @@ define <32 x i16> @test107(<32 x i16> %a, <32 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test107:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxsw %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpmaxsw %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test107:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmaxsw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
entry:
@@ -3830,7 +3830,7 @@ entry:
define <32 x i16> @test108(<32 x i16> %a, <32 x i16> %b) {
; SSE-LABEL: test108:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pmaxsw %xmm4, %xmm0
; SSE-NEXT: pmaxsw %xmm5, %xmm1
; SSE-NEXT: pmaxsw %xmm6, %xmm2
@@ -3838,7 +3838,7 @@ define <32 x i16> @test108(<32 x i16> %a, <32 x i16> %b) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test108:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpmaxsw %xmm4, %xmm5, %xmm4
@@ -3852,13 +3852,13 @@ define <32 x i16> @test108(<32 x i16> %a, <32 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test108:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxsw %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpmaxsw %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test108:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmaxsw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
entry:
@@ -3869,7 +3869,7 @@ entry:
define <32 x i16> @test109(<32 x i16> %a, <32 x i16> %b) {
; SSE2-LABEL: test109:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm10 = [32768,32768,32768,32768,32768,32768,32768,32768]
; SSE2-NEXT: movdqa %xmm3, %xmm9
; SSE2-NEXT: pxor %xmm10, %xmm9
@@ -3905,7 +3905,7 @@ define <32 x i16> @test109(<32 x i16> %a, <32 x i16> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test109:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminuw %xmm4, %xmm0
; SSE4-NEXT: pminuw %xmm5, %xmm1
; SSE4-NEXT: pminuw %xmm6, %xmm2
@@ -3913,7 +3913,7 @@ define <32 x i16> @test109(<32 x i16> %a, <32 x i16> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test109:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpminuw %xmm4, %xmm5, %xmm4
@@ -3927,13 +3927,13 @@ define <32 x i16> @test109(<32 x i16> %a, <32 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test109:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminuw %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpminuw %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test109:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpminuw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
entry:
@@ -3944,7 +3944,7 @@ entry:
define <32 x i16> @test110(<32 x i16> %a, <32 x i16> %b) {
; SSE2-LABEL: test110:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm3, %xmm8
; SSE2-NEXT: movdqa %xmm2, %xmm9
; SSE2-NEXT: movdqa %xmm1, %xmm10
@@ -3974,7 +3974,7 @@ define <32 x i16> @test110(<32 x i16> %a, <32 x i16> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test110:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminuw %xmm4, %xmm0
; SSE4-NEXT: pminuw %xmm5, %xmm1
; SSE4-NEXT: pminuw %xmm6, %xmm2
@@ -3982,7 +3982,7 @@ define <32 x i16> @test110(<32 x i16> %a, <32 x i16> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test110:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpminuw %xmm4, %xmm5, %xmm4
@@ -3996,13 +3996,13 @@ define <32 x i16> @test110(<32 x i16> %a, <32 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test110:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminuw %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpminuw %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test110:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpminuw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
entry:
@@ -4013,7 +4013,7 @@ entry:
define <32 x i16> @test111(<32 x i16> %a, <32 x i16> %b) {
; SSE2-LABEL: test111:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm11 = [32768,32768,32768,32768,32768,32768,32768,32768]
; SSE2-NEXT: movdqa %xmm7, %xmm9
; SSE2-NEXT: pxor %xmm11, %xmm9
@@ -4052,7 +4052,7 @@ define <32 x i16> @test111(<32 x i16> %a, <32 x i16> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test111:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxuw %xmm4, %xmm0
; SSE4-NEXT: pmaxuw %xmm5, %xmm1
; SSE4-NEXT: pmaxuw %xmm6, %xmm2
@@ -4060,7 +4060,7 @@ define <32 x i16> @test111(<32 x i16> %a, <32 x i16> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test111:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpmaxuw %xmm4, %xmm5, %xmm4
@@ -4074,13 +4074,13 @@ define <32 x i16> @test111(<32 x i16> %a, <32 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test111:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxuw %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpmaxuw %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test111:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmaxuw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
entry:
@@ -4091,7 +4091,7 @@ entry:
define <32 x i16> @test112(<32 x i16> %a, <32 x i16> %b) {
; SSE2-LABEL: test112:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm7, %xmm8
; SSE2-NEXT: psubusw %xmm3, %xmm8
; SSE2-NEXT: pxor %xmm9, %xmm9
@@ -4120,7 +4120,7 @@ define <32 x i16> @test112(<32 x i16> %a, <32 x i16> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test112:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxuw %xmm4, %xmm0
; SSE4-NEXT: pmaxuw %xmm5, %xmm1
; SSE4-NEXT: pmaxuw %xmm6, %xmm2
@@ -4128,7 +4128,7 @@ define <32 x i16> @test112(<32 x i16> %a, <32 x i16> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test112:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpmaxuw %xmm4, %xmm5, %xmm4
@@ -4142,13 +4142,13 @@ define <32 x i16> @test112(<32 x i16> %a, <32 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test112:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxuw %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpmaxuw %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test112:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmaxuw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
entry:
@@ -4159,7 +4159,7 @@ entry:
define <16 x i32> @test113(<16 x i32> %a, <16 x i32> %b) {
; SSE2-LABEL: test113:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm7, %xmm8
; SSE2-NEXT: pcmpgtd %xmm3, %xmm8
; SSE2-NEXT: movdqa %xmm6, %xmm9
@@ -4183,7 +4183,7 @@ define <16 x i32> @test113(<16 x i32> %a, <16 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test113:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminsd %xmm4, %xmm0
; SSE4-NEXT: pminsd %xmm5, %xmm1
; SSE4-NEXT: pminsd %xmm6, %xmm2
@@ -4191,7 +4191,7 @@ define <16 x i32> @test113(<16 x i32> %a, <16 x i32> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test113:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpminsd %xmm4, %xmm5, %xmm4
@@ -4205,13 +4205,13 @@ define <16 x i32> @test113(<16 x i32> %a, <16 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test113:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminsd %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpminsd %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test113:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpminsd %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
entry:
@@ -4222,7 +4222,7 @@ entry:
define <16 x i32> @test114(<16 x i32> %a, <16 x i32> %b) {
; SSE2-LABEL: test114:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm3, %xmm8
; SSE2-NEXT: movdqa %xmm2, %xmm9
; SSE2-NEXT: movdqa %xmm8, %xmm12
@@ -4258,7 +4258,7 @@ define <16 x i32> @test114(<16 x i32> %a, <16 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test114:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminsd %xmm4, %xmm0
; SSE4-NEXT: pminsd %xmm5, %xmm1
; SSE4-NEXT: pminsd %xmm6, %xmm2
@@ -4266,7 +4266,7 @@ define <16 x i32> @test114(<16 x i32> %a, <16 x i32> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test114:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpminsd %xmm4, %xmm5, %xmm4
@@ -4280,13 +4280,13 @@ define <16 x i32> @test114(<16 x i32> %a, <16 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test114:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminsd %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpminsd %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test114:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpminsd %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
entry:
@@ -4297,7 +4297,7 @@ entry:
define <16 x i32> @test115(<16 x i32> %a, <16 x i32> %b) {
; SSE2-LABEL: test115:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm3, %xmm8
; SSE2-NEXT: pcmpgtd %xmm7, %xmm3
; SSE2-NEXT: movdqa %xmm2, %xmm9
@@ -4324,7 +4324,7 @@ define <16 x i32> @test115(<16 x i32> %a, <16 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test115:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxsd %xmm4, %xmm0
; SSE4-NEXT: pmaxsd %xmm5, %xmm1
; SSE4-NEXT: pmaxsd %xmm6, %xmm2
@@ -4332,7 +4332,7 @@ define <16 x i32> @test115(<16 x i32> %a, <16 x i32> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test115:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpmaxsd %xmm4, %xmm5, %xmm4
@@ -4346,13 +4346,13 @@ define <16 x i32> @test115(<16 x i32> %a, <16 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test115:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxsd %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpmaxsd %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test115:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmaxsd %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
entry:
@@ -4363,7 +4363,7 @@ entry:
define <16 x i32> @test116(<16 x i32> %a, <16 x i32> %b) {
; SSE2-LABEL: test116:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm3, %xmm8
; SSE2-NEXT: movdqa %xmm2, %xmm9
; SSE2-NEXT: movdqa %xmm0, %xmm10
@@ -4399,7 +4399,7 @@ define <16 x i32> @test116(<16 x i32> %a, <16 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test116:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxsd %xmm4, %xmm0
; SSE4-NEXT: pmaxsd %xmm5, %xmm1
; SSE4-NEXT: pmaxsd %xmm6, %xmm2
@@ -4407,7 +4407,7 @@ define <16 x i32> @test116(<16 x i32> %a, <16 x i32> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test116:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpmaxsd %xmm4, %xmm5, %xmm4
@@ -4421,13 +4421,13 @@ define <16 x i32> @test116(<16 x i32> %a, <16 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test116:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxsd %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpmaxsd %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test116:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmaxsd %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
entry:
@@ -4438,7 +4438,7 @@ entry:
define <16 x i32> @test117(<16 x i32> %a, <16 x i32> %b) {
; SSE2-LABEL: test117:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm10 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm3, %xmm9
; SSE2-NEXT: pxor %xmm10, %xmm9
@@ -4474,7 +4474,7 @@ define <16 x i32> @test117(<16 x i32> %a, <16 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test117:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminud %xmm4, %xmm0
; SSE4-NEXT: pminud %xmm5, %xmm1
; SSE4-NEXT: pminud %xmm6, %xmm2
@@ -4482,7 +4482,7 @@ define <16 x i32> @test117(<16 x i32> %a, <16 x i32> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test117:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpminud %xmm4, %xmm5, %xmm4
@@ -4496,13 +4496,13 @@ define <16 x i32> @test117(<16 x i32> %a, <16 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test117:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminud %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpminud %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test117:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpminud %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
entry:
@@ -4513,7 +4513,7 @@ entry:
define <16 x i32> @test118(<16 x i32> %a, <16 x i32> %b) {
; SSE2-LABEL: test118:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm10
; SSE2-NEXT: movdqa {{.*#+}} xmm14 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm7, %xmm0
@@ -4561,7 +4561,7 @@ define <16 x i32> @test118(<16 x i32> %a, <16 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test118:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminud %xmm4, %xmm0
; SSE4-NEXT: pminud %xmm5, %xmm1
; SSE4-NEXT: pminud %xmm6, %xmm2
@@ -4569,7 +4569,7 @@ define <16 x i32> @test118(<16 x i32> %a, <16 x i32> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test118:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpminud %xmm4, %xmm5, %xmm4
@@ -4583,13 +4583,13 @@ define <16 x i32> @test118(<16 x i32> %a, <16 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test118:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminud %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpminud %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test118:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpminud %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
entry:
@@ -4600,7 +4600,7 @@ entry:
define <16 x i32> @test119(<16 x i32> %a, <16 x i32> %b) {
; SSE2-LABEL: test119:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm11 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm7, %xmm9
; SSE2-NEXT: pxor %xmm11, %xmm9
@@ -4639,7 +4639,7 @@ define <16 x i32> @test119(<16 x i32> %a, <16 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test119:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxud %xmm4, %xmm0
; SSE4-NEXT: pmaxud %xmm5, %xmm1
; SSE4-NEXT: pmaxud %xmm6, %xmm2
@@ -4647,7 +4647,7 @@ define <16 x i32> @test119(<16 x i32> %a, <16 x i32> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test119:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpmaxud %xmm4, %xmm5, %xmm4
@@ -4661,13 +4661,13 @@ define <16 x i32> @test119(<16 x i32> %a, <16 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test119:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxud %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpmaxud %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test119:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmaxud %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
entry:
@@ -4678,7 +4678,7 @@ entry:
define <16 x i32> @test120(<16 x i32> %a, <16 x i32> %b) {
; SSE2-LABEL: test120:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm10
; SSE2-NEXT: movdqa {{.*#+}} xmm14 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm3, %xmm0
@@ -4726,7 +4726,7 @@ define <16 x i32> @test120(<16 x i32> %a, <16 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test120:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxud %xmm4, %xmm0
; SSE4-NEXT: pmaxud %xmm5, %xmm1
; SSE4-NEXT: pmaxud %xmm6, %xmm2
@@ -4734,7 +4734,7 @@ define <16 x i32> @test120(<16 x i32> %a, <16 x i32> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test120:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpmaxud %xmm4, %xmm5, %xmm4
@@ -4748,13 +4748,13 @@ define <16 x i32> @test120(<16 x i32> %a, <16 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test120:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxud %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpmaxud %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test120:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmaxud %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
entry:
@@ -4765,7 +4765,7 @@ entry:
define <8 x i64> @test121(<8 x i64> %a, <8 x i64> %b) {
; SSE2-LABEL: test121:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [2147483648,0,2147483648,0]
; SSE2-NEXT: movdqa %xmm3, %xmm8
; SSE2-NEXT: pxor %xmm9, %xmm8
@@ -4829,7 +4829,7 @@ define <8 x i64> @test121(<8 x i64> %a, <8 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test121:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm8
; SSE4-NEXT: movdqa %xmm7, %xmm9
; SSE4-NEXT: pcmpgtq %xmm3, %xmm9
@@ -4853,7 +4853,7 @@ define <8 x i64> @test121(<8 x i64> %a, <8 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test121:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm5
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm5, %xmm4
@@ -4869,7 +4869,7 @@ define <8 x i64> @test121(<8 x i64> %a, <8 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test121:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm3, %ymm4
; AVX2-NEXT: vpcmpgtq %ymm0, %ymm2, %ymm5
; AVX2-NEXT: vblendvpd %ymm5, %ymm0, %ymm2, %ymm0
@@ -4877,7 +4877,7 @@ define <8 x i64> @test121(<8 x i64> %a, <8 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test121:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpminsq %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
entry:
@@ -4888,7 +4888,7 @@ entry:
define <8 x i64> @test122(<8 x i64> %a, <8 x i64> %b) {
; SSE2-LABEL: test122:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm7, %xmm8
; SSE2-NEXT: movdqa %xmm8, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa %xmm3, %xmm7
@@ -4969,7 +4969,7 @@ define <8 x i64> @test122(<8 x i64> %a, <8 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test122:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm8
; SSE4-NEXT: movdqa %xmm3, %xmm9
; SSE4-NEXT: pcmpgtq %xmm7, %xmm9
@@ -4997,7 +4997,7 @@ define <8 x i64> @test122(<8 x i64> %a, <8 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test122:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm5, %xmm4
@@ -5018,7 +5018,7 @@ define <8 x i64> @test122(<8 x i64> %a, <8 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test122:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpcmpgtq %ymm3, %ymm1, %ymm4
; AVX2-NEXT: vpcmpeqd %ymm5, %ymm5, %ymm5
; AVX2-NEXT: vpxor %ymm5, %ymm4, %ymm4
@@ -5029,7 +5029,7 @@ define <8 x i64> @test122(<8 x i64> %a, <8 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test122:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpminsq %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
entry:
@@ -5040,7 +5040,7 @@ entry:
define <8 x i64> @test123(<8 x i64> %a, <8 x i64> %b) {
; SSE2-LABEL: test123:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [2147483648,0,2147483648,0]
; SSE2-NEXT: movdqa %xmm7, %xmm8
; SSE2-NEXT: pxor %xmm9, %xmm8
@@ -5104,7 +5104,7 @@ define <8 x i64> @test123(<8 x i64> %a, <8 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test123:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm8
; SSE4-NEXT: movdqa %xmm3, %xmm9
; SSE4-NEXT: pcmpgtq %xmm7, %xmm9
@@ -5127,7 +5127,7 @@ define <8 x i64> @test123(<8 x i64> %a, <8 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test123:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm5, %xmm4
@@ -5143,7 +5143,7 @@ define <8 x i64> @test123(<8 x i64> %a, <8 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test123:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpcmpgtq %ymm3, %ymm1, %ymm4
; AVX2-NEXT: vpcmpgtq %ymm2, %ymm0, %ymm5
; AVX2-NEXT: vblendvpd %ymm5, %ymm0, %ymm2, %ymm0
@@ -5151,7 +5151,7 @@ define <8 x i64> @test123(<8 x i64> %a, <8 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test123:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
entry:
@@ -5162,7 +5162,7 @@ entry:
define <8 x i64> @test124(<8 x i64> %a, <8 x i64> %b) {
; SSE2-LABEL: test124:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm7, %xmm11
; SSE2-NEXT: movdqa %xmm11, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa %xmm3, %xmm7
@@ -5244,7 +5244,7 @@ define <8 x i64> @test124(<8 x i64> %a, <8 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test124:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm8
; SSE4-NEXT: movdqa %xmm7, %xmm9
; SSE4-NEXT: pcmpgtq %xmm3, %xmm9
@@ -5273,7 +5273,7 @@ define <8 x i64> @test124(<8 x i64> %a, <8 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test124:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm5
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm5, %xmm4
@@ -5294,7 +5294,7 @@ define <8 x i64> @test124(<8 x i64> %a, <8 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test124:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm3, %ymm4
; AVX2-NEXT: vpcmpeqd %ymm5, %ymm5, %ymm5
; AVX2-NEXT: vpxor %ymm5, %ymm4, %ymm4
@@ -5305,7 +5305,7 @@ define <8 x i64> @test124(<8 x i64> %a, <8 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test124:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
entry:
@@ -5316,7 +5316,7 @@ entry:
define <8 x i64> @test125(<8 x i64> %a, <8 x i64> %b) {
; SSE2-LABEL: test125:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm3, %xmm8
; SSE2-NEXT: pxor %xmm9, %xmm8
@@ -5380,7 +5380,7 @@ define <8 x i64> @test125(<8 x i64> %a, <8 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test125:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm8
; SSE4-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
; SSE4-NEXT: movdqa %xmm3, %xmm10
@@ -5416,7 +5416,7 @@ define <8 x i64> @test125(<8 x i64> %a, <8 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test125:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpxor %xmm5, %xmm4, %xmm4
@@ -5441,7 +5441,7 @@ define <8 x i64> @test125(<8 x i64> %a, <8 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test125:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
; AVX2-NEXT: vpxor %ymm4, %ymm1, %ymm5
; AVX2-NEXT: vpxor %ymm4, %ymm3, %ymm6
@@ -5454,7 +5454,7 @@ define <8 x i64> @test125(<8 x i64> %a, <8 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test125:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpminuq %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
entry:
@@ -5465,7 +5465,7 @@ entry:
define <8 x i64> @test126(<8 x i64> %a, <8 x i64> %b) {
; SSE2-LABEL: test126:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm7, %xmm8
; SSE2-NEXT: movdqa %xmm8, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa %xmm3, %xmm7
@@ -5546,7 +5546,7 @@ define <8 x i64> @test126(<8 x i64> %a, <8 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test126:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm9
; SSE4-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
; SSE4-NEXT: movdqa %xmm7, %xmm10
@@ -5587,7 +5587,7 @@ define <8 x i64> @test126(<8 x i64> %a, <8 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test126:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpxor %xmm5, %xmm4, %xmm4
@@ -5617,7 +5617,7 @@ define <8 x i64> @test126(<8 x i64> %a, <8 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test126:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
; AVX2-NEXT: vpxor %ymm4, %ymm3, %ymm5
; AVX2-NEXT: vpxor %ymm4, %ymm1, %ymm6
@@ -5633,7 +5633,7 @@ define <8 x i64> @test126(<8 x i64> %a, <8 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test126:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpminuq %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
entry:
@@ -5644,7 +5644,7 @@ entry:
define <8 x i64> @test127(<8 x i64> %a, <8 x i64> %b) {
; SSE2-LABEL: test127:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm7, %xmm8
; SSE2-NEXT: pxor %xmm9, %xmm8
@@ -5708,7 +5708,7 @@ define <8 x i64> @test127(<8 x i64> %a, <8 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test127:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm8
; SSE4-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
; SSE4-NEXT: movdqa %xmm7, %xmm10
@@ -5744,7 +5744,7 @@ define <8 x i64> @test127(<8 x i64> %a, <8 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test127:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpxor %xmm5, %xmm4, %xmm4
@@ -5769,7 +5769,7 @@ define <8 x i64> @test127(<8 x i64> %a, <8 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test127:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
; AVX2-NEXT: vpxor %ymm4, %ymm3, %ymm5
; AVX2-NEXT: vpxor %ymm4, %ymm1, %ymm6
@@ -5782,7 +5782,7 @@ define <8 x i64> @test127(<8 x i64> %a, <8 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test127:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
entry:
@@ -5793,7 +5793,7 @@ entry:
define <8 x i64> @test128(<8 x i64> %a, <8 x i64> %b) {
; SSE2-LABEL: test128:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm7, %xmm11
; SSE2-NEXT: movdqa %xmm11, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa %xmm3, %xmm7
@@ -5875,7 +5875,7 @@ define <8 x i64> @test128(<8 x i64> %a, <8 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test128:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm9
; SSE4-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
; SSE4-NEXT: movdqa %xmm3, %xmm10
@@ -5916,7 +5916,7 @@ define <8 x i64> @test128(<8 x i64> %a, <8 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test128:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpxor %xmm5, %xmm4, %xmm4
@@ -5946,7 +5946,7 @@ define <8 x i64> @test128(<8 x i64> %a, <8 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test128:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
; AVX2-NEXT: vpxor %ymm4, %ymm1, %ymm5
; AVX2-NEXT: vpxor %ymm4, %ymm3, %ymm6
@@ -5962,7 +5962,7 @@ define <8 x i64> @test128(<8 x i64> %a, <8 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test128:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
entry:
@@ -5973,7 +5973,7 @@ entry:
define <64 x i8> @test129(<64 x i8> %a, <64 x i8> %b) {
; SSE2-LABEL: test129:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm3, %xmm8
; SSE2-NEXT: movdqa %xmm7, %xmm3
; SSE2-NEXT: pcmpgtb %xmm8, %xmm3
@@ -6001,7 +6001,7 @@ define <64 x i8> @test129(<64 x i8> %a, <64 x i8> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test129:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxsb %xmm4, %xmm0
; SSE4-NEXT: pmaxsb %xmm5, %xmm1
; SSE4-NEXT: pmaxsb %xmm6, %xmm2
@@ -6009,7 +6009,7 @@ define <64 x i8> @test129(<64 x i8> %a, <64 x i8> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test129:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpmaxsb %xmm4, %xmm5, %xmm4
@@ -6023,13 +6023,13 @@ define <64 x i8> @test129(<64 x i8> %a, <64 x i8> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test129:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxsb %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpmaxsb %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test129:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmaxsb %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
entry:
@@ -6040,7 +6040,7 @@ entry:
define <64 x i8> @test130(<64 x i8> %a, <64 x i8> %b) {
; SSE2-LABEL: test130:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm2, %xmm8
; SSE2-NEXT: movdqa %xmm3, %xmm12
; SSE2-NEXT: pcmpgtb %xmm7, %xmm12
@@ -6076,7 +6076,7 @@ define <64 x i8> @test130(<64 x i8> %a, <64 x i8> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test130:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxsb %xmm4, %xmm0
; SSE4-NEXT: pmaxsb %xmm5, %xmm1
; SSE4-NEXT: pmaxsb %xmm6, %xmm2
@@ -6084,7 +6084,7 @@ define <64 x i8> @test130(<64 x i8> %a, <64 x i8> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test130:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpmaxsb %xmm4, %xmm5, %xmm4
@@ -6098,13 +6098,13 @@ define <64 x i8> @test130(<64 x i8> %a, <64 x i8> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test130:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxsb %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpmaxsb %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test130:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmaxsb %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
entry:
@@ -6115,7 +6115,7 @@ entry:
define <64 x i8> @test131(<64 x i8> %a, <64 x i8> %b) {
; SSE2-LABEL: test131:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm3, %xmm8
; SSE2-NEXT: pcmpgtb %xmm7, %xmm3
; SSE2-NEXT: movdqa %xmm2, %xmm9
@@ -6142,7 +6142,7 @@ define <64 x i8> @test131(<64 x i8> %a, <64 x i8> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test131:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminsb %xmm4, %xmm0
; SSE4-NEXT: pminsb %xmm5, %xmm1
; SSE4-NEXT: pminsb %xmm6, %xmm2
@@ -6150,7 +6150,7 @@ define <64 x i8> @test131(<64 x i8> %a, <64 x i8> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test131:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpminsb %xmm4, %xmm5, %xmm4
@@ -6164,13 +6164,13 @@ define <64 x i8> @test131(<64 x i8> %a, <64 x i8> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test131:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminsb %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpminsb %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test131:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpminsb %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
entry:
@@ -6181,7 +6181,7 @@ entry:
define <64 x i8> @test132(<64 x i8> %a, <64 x i8> %b) {
; SSE2-LABEL: test132:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm2, %xmm8
; SSE2-NEXT: movdqa %xmm0, %xmm10
; SSE2-NEXT: movdqa %xmm7, %xmm12
@@ -6217,7 +6217,7 @@ define <64 x i8> @test132(<64 x i8> %a, <64 x i8> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test132:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminsb %xmm4, %xmm0
; SSE4-NEXT: pminsb %xmm5, %xmm1
; SSE4-NEXT: pminsb %xmm6, %xmm2
@@ -6225,7 +6225,7 @@ define <64 x i8> @test132(<64 x i8> %a, <64 x i8> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test132:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpminsb %xmm4, %xmm5, %xmm4
@@ -6239,13 +6239,13 @@ define <64 x i8> @test132(<64 x i8> %a, <64 x i8> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test132:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminsb %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpminsb %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test132:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpminsb %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
entry:
@@ -6256,7 +6256,7 @@ entry:
define <64 x i8> @test133(<64 x i8> %a, <64 x i8> %b) {
; SSE-LABEL: test133:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pmaxub %xmm4, %xmm0
; SSE-NEXT: pmaxub %xmm5, %xmm1
; SSE-NEXT: pmaxub %xmm6, %xmm2
@@ -6264,7 +6264,7 @@ define <64 x i8> @test133(<64 x i8> %a, <64 x i8> %b) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test133:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpmaxub %xmm4, %xmm5, %xmm4
@@ -6278,13 +6278,13 @@ define <64 x i8> @test133(<64 x i8> %a, <64 x i8> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test133:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxub %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpmaxub %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test133:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmaxub %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
entry:
@@ -6295,7 +6295,7 @@ entry:
define <64 x i8> @test134(<64 x i8> %a, <64 x i8> %b) {
; SSE-LABEL: test134:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pmaxub %xmm4, %xmm0
; SSE-NEXT: pmaxub %xmm5, %xmm1
; SSE-NEXT: pmaxub %xmm6, %xmm2
@@ -6303,7 +6303,7 @@ define <64 x i8> @test134(<64 x i8> %a, <64 x i8> %b) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test134:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpmaxub %xmm4, %xmm5, %xmm4
@@ -6317,13 +6317,13 @@ define <64 x i8> @test134(<64 x i8> %a, <64 x i8> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test134:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxub %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpmaxub %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test134:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmaxub %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
entry:
@@ -6334,7 +6334,7 @@ entry:
define <64 x i8> @test135(<64 x i8> %a, <64 x i8> %b) {
; SSE-LABEL: test135:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pminub %xmm4, %xmm0
; SSE-NEXT: pminub %xmm5, %xmm1
; SSE-NEXT: pminub %xmm6, %xmm2
@@ -6342,7 +6342,7 @@ define <64 x i8> @test135(<64 x i8> %a, <64 x i8> %b) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test135:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpminub %xmm4, %xmm5, %xmm4
@@ -6356,13 +6356,13 @@ define <64 x i8> @test135(<64 x i8> %a, <64 x i8> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test135:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminub %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpminub %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test135:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpminub %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
entry:
@@ -6373,7 +6373,7 @@ entry:
define <64 x i8> @test136(<64 x i8> %a, <64 x i8> %b) {
; SSE-LABEL: test136:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pminub %xmm4, %xmm0
; SSE-NEXT: pminub %xmm5, %xmm1
; SSE-NEXT: pminub %xmm6, %xmm2
@@ -6381,7 +6381,7 @@ define <64 x i8> @test136(<64 x i8> %a, <64 x i8> %b) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test136:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpminub %xmm4, %xmm5, %xmm4
@@ -6395,13 +6395,13 @@ define <64 x i8> @test136(<64 x i8> %a, <64 x i8> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test136:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminub %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpminub %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test136:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpminub %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
entry:
@@ -6412,7 +6412,7 @@ entry:
define <32 x i16> @test137(<32 x i16> %a, <32 x i16> %b) {
; SSE-LABEL: test137:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pmaxsw %xmm4, %xmm0
; SSE-NEXT: pmaxsw %xmm5, %xmm1
; SSE-NEXT: pmaxsw %xmm6, %xmm2
@@ -6420,7 +6420,7 @@ define <32 x i16> @test137(<32 x i16> %a, <32 x i16> %b) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test137:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpmaxsw %xmm4, %xmm5, %xmm4
@@ -6434,13 +6434,13 @@ define <32 x i16> @test137(<32 x i16> %a, <32 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test137:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxsw %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpmaxsw %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test137:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmaxsw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
entry:
@@ -6451,7 +6451,7 @@ entry:
define <32 x i16> @test138(<32 x i16> %a, <32 x i16> %b) {
; SSE-LABEL: test138:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pmaxsw %xmm4, %xmm0
; SSE-NEXT: pmaxsw %xmm5, %xmm1
; SSE-NEXT: pmaxsw %xmm6, %xmm2
@@ -6459,7 +6459,7 @@ define <32 x i16> @test138(<32 x i16> %a, <32 x i16> %b) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test138:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpmaxsw %xmm4, %xmm5, %xmm4
@@ -6473,13 +6473,13 @@ define <32 x i16> @test138(<32 x i16> %a, <32 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test138:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxsw %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpmaxsw %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test138:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmaxsw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
entry:
@@ -6490,7 +6490,7 @@ entry:
define <32 x i16> @test139(<32 x i16> %a, <32 x i16> %b) {
; SSE-LABEL: test139:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pminsw %xmm4, %xmm0
; SSE-NEXT: pminsw %xmm5, %xmm1
; SSE-NEXT: pminsw %xmm6, %xmm2
@@ -6498,7 +6498,7 @@ define <32 x i16> @test139(<32 x i16> %a, <32 x i16> %b) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test139:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpminsw %xmm4, %xmm5, %xmm4
@@ -6512,13 +6512,13 @@ define <32 x i16> @test139(<32 x i16> %a, <32 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test139:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminsw %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpminsw %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test139:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpminsw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
entry:
@@ -6529,7 +6529,7 @@ entry:
define <32 x i16> @test140(<32 x i16> %a, <32 x i16> %b) {
; SSE-LABEL: test140:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pminsw %xmm4, %xmm0
; SSE-NEXT: pminsw %xmm5, %xmm1
; SSE-NEXT: pminsw %xmm6, %xmm2
@@ -6537,7 +6537,7 @@ define <32 x i16> @test140(<32 x i16> %a, <32 x i16> %b) {
; SSE-NEXT: retq
;
; AVX1-LABEL: test140:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpminsw %xmm4, %xmm5, %xmm4
@@ -6551,13 +6551,13 @@ define <32 x i16> @test140(<32 x i16> %a, <32 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test140:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminsw %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpminsw %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test140:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpminsw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
entry:
@@ -6568,7 +6568,7 @@ entry:
define <32 x i16> @test141(<32 x i16> %a, <32 x i16> %b) {
; SSE2-LABEL: test141:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm11
; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [32768,32768,32768,32768,32768,32768,32768,32768]
; SSE2-NEXT: movdqa %xmm3, %xmm9
@@ -6608,7 +6608,7 @@ define <32 x i16> @test141(<32 x i16> %a, <32 x i16> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test141:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxuw %xmm4, %xmm0
; SSE4-NEXT: pmaxuw %xmm5, %xmm1
; SSE4-NEXT: pmaxuw %xmm6, %xmm2
@@ -6616,7 +6616,7 @@ define <32 x i16> @test141(<32 x i16> %a, <32 x i16> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test141:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpmaxuw %xmm4, %xmm5, %xmm4
@@ -6630,13 +6630,13 @@ define <32 x i16> @test141(<32 x i16> %a, <32 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test141:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxuw %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpmaxuw %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test141:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmaxuw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
entry:
@@ -6647,7 +6647,7 @@ entry:
define <32 x i16> @test142(<32 x i16> %a, <32 x i16> %b) {
; SSE2-LABEL: test142:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm3, %xmm8
; SSE2-NEXT: movdqa %xmm2, %xmm9
; SSE2-NEXT: movdqa %xmm1, %xmm10
@@ -6677,7 +6677,7 @@ define <32 x i16> @test142(<32 x i16> %a, <32 x i16> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test142:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxuw %xmm4, %xmm0
; SSE4-NEXT: pmaxuw %xmm5, %xmm1
; SSE4-NEXT: pmaxuw %xmm6, %xmm2
@@ -6685,7 +6685,7 @@ define <32 x i16> @test142(<32 x i16> %a, <32 x i16> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test142:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpmaxuw %xmm4, %xmm5, %xmm4
@@ -6699,13 +6699,13 @@ define <32 x i16> @test142(<32 x i16> %a, <32 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test142:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxuw %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpmaxuw %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test142:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmaxuw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
entry:
@@ -6716,7 +6716,7 @@ entry:
define <32 x i16> @test143(<32 x i16> %a, <32 x i16> %b) {
; SSE2-LABEL: test143:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm11
; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [32768,32768,32768,32768,32768,32768,32768,32768]
; SSE2-NEXT: movdqa %xmm7, %xmm9
@@ -6756,7 +6756,7 @@ define <32 x i16> @test143(<32 x i16> %a, <32 x i16> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test143:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminuw %xmm4, %xmm0
; SSE4-NEXT: pminuw %xmm5, %xmm1
; SSE4-NEXT: pminuw %xmm6, %xmm2
@@ -6764,7 +6764,7 @@ define <32 x i16> @test143(<32 x i16> %a, <32 x i16> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test143:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpminuw %xmm4, %xmm5, %xmm4
@@ -6778,13 +6778,13 @@ define <32 x i16> @test143(<32 x i16> %a, <32 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test143:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminuw %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpminuw %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test143:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpminuw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
entry:
@@ -6795,7 +6795,7 @@ entry:
define <32 x i16> @test144(<32 x i16> %a, <32 x i16> %b) {
; SSE2-LABEL: test144:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm3, %xmm8
; SSE2-NEXT: movdqa %xmm2, %xmm9
; SSE2-NEXT: movdqa %xmm1, %xmm10
@@ -6828,7 +6828,7 @@ define <32 x i16> @test144(<32 x i16> %a, <32 x i16> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test144:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminuw %xmm4, %xmm0
; SSE4-NEXT: pminuw %xmm5, %xmm1
; SSE4-NEXT: pminuw %xmm6, %xmm2
@@ -6836,7 +6836,7 @@ define <32 x i16> @test144(<32 x i16> %a, <32 x i16> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test144:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpminuw %xmm4, %xmm5, %xmm4
@@ -6850,13 +6850,13 @@ define <32 x i16> @test144(<32 x i16> %a, <32 x i16> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test144:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminuw %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpminuw %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test144:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpminuw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
entry:
@@ -6867,7 +6867,7 @@ entry:
define <16 x i32> @test145(<16 x i32> %a, <16 x i32> %b) {
; SSE2-LABEL: test145:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm3, %xmm8
; SSE2-NEXT: movdqa %xmm7, %xmm3
; SSE2-NEXT: pcmpgtd %xmm8, %xmm3
@@ -6895,7 +6895,7 @@ define <16 x i32> @test145(<16 x i32> %a, <16 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test145:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxsd %xmm4, %xmm0
; SSE4-NEXT: pmaxsd %xmm5, %xmm1
; SSE4-NEXT: pmaxsd %xmm6, %xmm2
@@ -6903,7 +6903,7 @@ define <16 x i32> @test145(<16 x i32> %a, <16 x i32> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test145:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpmaxsd %xmm4, %xmm5, %xmm4
@@ -6917,13 +6917,13 @@ define <16 x i32> @test145(<16 x i32> %a, <16 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test145:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxsd %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpmaxsd %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test145:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmaxsd %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
entry:
@@ -6934,7 +6934,7 @@ entry:
define <16 x i32> @test146(<16 x i32> %a, <16 x i32> %b) {
; SSE2-LABEL: test146:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm2, %xmm8
; SSE2-NEXT: movdqa %xmm3, %xmm12
; SSE2-NEXT: pcmpgtd %xmm7, %xmm12
@@ -6970,7 +6970,7 @@ define <16 x i32> @test146(<16 x i32> %a, <16 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test146:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxsd %xmm4, %xmm0
; SSE4-NEXT: pmaxsd %xmm5, %xmm1
; SSE4-NEXT: pmaxsd %xmm6, %xmm2
@@ -6978,7 +6978,7 @@ define <16 x i32> @test146(<16 x i32> %a, <16 x i32> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test146:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpmaxsd %xmm4, %xmm5, %xmm4
@@ -6992,13 +6992,13 @@ define <16 x i32> @test146(<16 x i32> %a, <16 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test146:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxsd %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpmaxsd %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test146:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmaxsd %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
entry:
@@ -7009,7 +7009,7 @@ entry:
define <16 x i32> @test147(<16 x i32> %a, <16 x i32> %b) {
; SSE2-LABEL: test147:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm3, %xmm8
; SSE2-NEXT: pcmpgtd %xmm7, %xmm3
; SSE2-NEXT: movdqa %xmm2, %xmm9
@@ -7036,7 +7036,7 @@ define <16 x i32> @test147(<16 x i32> %a, <16 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test147:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminsd %xmm4, %xmm0
; SSE4-NEXT: pminsd %xmm5, %xmm1
; SSE4-NEXT: pminsd %xmm6, %xmm2
@@ -7044,7 +7044,7 @@ define <16 x i32> @test147(<16 x i32> %a, <16 x i32> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test147:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpminsd %xmm4, %xmm5, %xmm4
@@ -7058,13 +7058,13 @@ define <16 x i32> @test147(<16 x i32> %a, <16 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test147:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminsd %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpminsd %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test147:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpminsd %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
entry:
@@ -7075,7 +7075,7 @@ entry:
define <16 x i32> @test148(<16 x i32> %a, <16 x i32> %b) {
; SSE2-LABEL: test148:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm2, %xmm8
; SSE2-NEXT: movdqa %xmm0, %xmm10
; SSE2-NEXT: movdqa %xmm7, %xmm12
@@ -7111,7 +7111,7 @@ define <16 x i32> @test148(<16 x i32> %a, <16 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test148:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminsd %xmm4, %xmm0
; SSE4-NEXT: pminsd %xmm5, %xmm1
; SSE4-NEXT: pminsd %xmm6, %xmm2
@@ -7119,7 +7119,7 @@ define <16 x i32> @test148(<16 x i32> %a, <16 x i32> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test148:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpminsd %xmm4, %xmm5, %xmm4
@@ -7133,13 +7133,13 @@ define <16 x i32> @test148(<16 x i32> %a, <16 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test148:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminsd %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpminsd %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test148:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpminsd %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
entry:
@@ -7150,7 +7150,7 @@ entry:
define <16 x i32> @test149(<16 x i32> %a, <16 x i32> %b) {
; SSE2-LABEL: test149:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm11
; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm3, %xmm9
@@ -7190,7 +7190,7 @@ define <16 x i32> @test149(<16 x i32> %a, <16 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test149:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxud %xmm4, %xmm0
; SSE4-NEXT: pmaxud %xmm5, %xmm1
; SSE4-NEXT: pmaxud %xmm6, %xmm2
@@ -7198,7 +7198,7 @@ define <16 x i32> @test149(<16 x i32> %a, <16 x i32> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test149:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpmaxud %xmm4, %xmm5, %xmm4
@@ -7212,13 +7212,13 @@ define <16 x i32> @test149(<16 x i32> %a, <16 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test149:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxud %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpmaxud %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test149:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmaxud %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
entry:
@@ -7229,7 +7229,7 @@ entry:
define <16 x i32> @test150(<16 x i32> %a, <16 x i32> %b) {
; SSE2-LABEL: test150:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm10
; SSE2-NEXT: movdqa {{.*#+}} xmm14 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm7, %xmm0
@@ -7277,7 +7277,7 @@ define <16 x i32> @test150(<16 x i32> %a, <16 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test150:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pmaxud %xmm4, %xmm0
; SSE4-NEXT: pmaxud %xmm5, %xmm1
; SSE4-NEXT: pmaxud %xmm6, %xmm2
@@ -7285,7 +7285,7 @@ define <16 x i32> @test150(<16 x i32> %a, <16 x i32> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test150:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpmaxud %xmm4, %xmm5, %xmm4
@@ -7299,13 +7299,13 @@ define <16 x i32> @test150(<16 x i32> %a, <16 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test150:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmaxud %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpmaxud %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test150:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmaxud %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
entry:
@@ -7316,7 +7316,7 @@ entry:
define <16 x i32> @test151(<16 x i32> %a, <16 x i32> %b) {
; SSE2-LABEL: test151:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm11
; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm7, %xmm9
@@ -7356,7 +7356,7 @@ define <16 x i32> @test151(<16 x i32> %a, <16 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test151:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminud %xmm4, %xmm0
; SSE4-NEXT: pminud %xmm5, %xmm1
; SSE4-NEXT: pminud %xmm6, %xmm2
@@ -7364,7 +7364,7 @@ define <16 x i32> @test151(<16 x i32> %a, <16 x i32> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test151:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpminud %xmm4, %xmm5, %xmm4
@@ -7378,13 +7378,13 @@ define <16 x i32> @test151(<16 x i32> %a, <16 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test151:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminud %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpminud %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test151:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpminud %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
entry:
@@ -7395,7 +7395,7 @@ entry:
define <16 x i32> @test152(<16 x i32> %a, <16 x i32> %b) {
; SSE2-LABEL: test152:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm10
; SSE2-NEXT: movdqa {{.*#+}} xmm14 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm3, %xmm0
@@ -7443,7 +7443,7 @@ define <16 x i32> @test152(<16 x i32> %a, <16 x i32> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test152:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: pminud %xmm4, %xmm0
; SSE4-NEXT: pminud %xmm5, %xmm1
; SSE4-NEXT: pminud %xmm6, %xmm2
@@ -7451,7 +7451,7 @@ define <16 x i32> @test152(<16 x i32> %a, <16 x i32> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test152:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpminud %xmm4, %xmm5, %xmm4
@@ -7465,13 +7465,13 @@ define <16 x i32> @test152(<16 x i32> %a, <16 x i32> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test152:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpminud %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpminud %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test152:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpminud %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
entry:
@@ -7484,7 +7484,7 @@ entry:
define <8 x i64> @test153(<8 x i64> %a, <8 x i64> %b) {
; SSE2-LABEL: test153:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm11 = [2147483648,0,2147483648,0]
; SSE2-NEXT: movdqa %xmm3, %xmm8
; SSE2-NEXT: pxor %xmm11, %xmm8
@@ -7552,7 +7552,7 @@ define <8 x i64> @test153(<8 x i64> %a, <8 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test153:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm8
; SSE4-NEXT: movdqa %xmm7, %xmm9
; SSE4-NEXT: pcmpgtq %xmm3, %xmm9
@@ -7573,7 +7573,7 @@ define <8 x i64> @test153(<8 x i64> %a, <8 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test153:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm5
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm5, %xmm4
@@ -7589,7 +7589,7 @@ define <8 x i64> @test153(<8 x i64> %a, <8 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test153:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm3, %ymm4
; AVX2-NEXT: vpcmpgtq %ymm0, %ymm2, %ymm5
; AVX2-NEXT: vblendvpd %ymm5, %ymm2, %ymm0, %ymm0
@@ -7597,7 +7597,7 @@ define <8 x i64> @test153(<8 x i64> %a, <8 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test153:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
entry:
@@ -7608,7 +7608,7 @@ entry:
define <8 x i64> @test154(<8 x i64> %a, <8 x i64> %b) {
; SSE2-LABEL: test154:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm7, %xmm8
; SSE2-NEXT: movdqa %xmm8, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa %xmm3, %xmm7
@@ -7689,7 +7689,7 @@ define <8 x i64> @test154(<8 x i64> %a, <8 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test154:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm8
; SSE4-NEXT: movdqa %xmm3, %xmm9
; SSE4-NEXT: pcmpgtq %xmm7, %xmm9
@@ -7714,7 +7714,7 @@ define <8 x i64> @test154(<8 x i64> %a, <8 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test154:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm5, %xmm4
@@ -7735,7 +7735,7 @@ define <8 x i64> @test154(<8 x i64> %a, <8 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test154:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpcmpgtq %ymm3, %ymm1, %ymm4
; AVX2-NEXT: vpcmpeqd %ymm5, %ymm5, %ymm5
; AVX2-NEXT: vpxor %ymm5, %ymm4, %ymm4
@@ -7746,7 +7746,7 @@ define <8 x i64> @test154(<8 x i64> %a, <8 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test154:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
entry:
@@ -7757,7 +7757,7 @@ entry:
define <8 x i64> @test155(<8 x i64> %a, <8 x i64> %b) {
; SSE2-LABEL: test155:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm11 = [2147483648,0,2147483648,0]
; SSE2-NEXT: movdqa %xmm7, %xmm8
; SSE2-NEXT: pxor %xmm11, %xmm8
@@ -7825,7 +7825,7 @@ define <8 x i64> @test155(<8 x i64> %a, <8 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test155:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm8
; SSE4-NEXT: movdqa %xmm3, %xmm9
; SSE4-NEXT: pcmpgtq %xmm7, %xmm9
@@ -7845,7 +7845,7 @@ define <8 x i64> @test155(<8 x i64> %a, <8 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test155:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm5, %xmm4
@@ -7861,7 +7861,7 @@ define <8 x i64> @test155(<8 x i64> %a, <8 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test155:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpcmpgtq %ymm3, %ymm1, %ymm4
; AVX2-NEXT: vpcmpgtq %ymm2, %ymm0, %ymm5
; AVX2-NEXT: vblendvpd %ymm5, %ymm2, %ymm0, %ymm0
@@ -7869,7 +7869,7 @@ define <8 x i64> @test155(<8 x i64> %a, <8 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test155:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpminsq %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
entry:
@@ -7880,7 +7880,7 @@ entry:
define <8 x i64> @test156(<8 x i64> %a, <8 x i64> %b) {
; SSE2-LABEL: test156:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm7, %xmm11
; SSE2-NEXT: movdqa %xmm11, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa %xmm3, %xmm7
@@ -7962,7 +7962,7 @@ define <8 x i64> @test156(<8 x i64> %a, <8 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test156:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm8
; SSE4-NEXT: movdqa %xmm7, %xmm9
; SSE4-NEXT: pcmpgtq %xmm3, %xmm9
@@ -7988,7 +7988,7 @@ define <8 x i64> @test156(<8 x i64> %a, <8 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test156:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm5
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm5, %xmm4
@@ -8009,7 +8009,7 @@ define <8 x i64> @test156(<8 x i64> %a, <8 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test156:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm3, %ymm4
; AVX2-NEXT: vpcmpeqd %ymm5, %ymm5, %ymm5
; AVX2-NEXT: vpxor %ymm5, %ymm4, %ymm4
@@ -8020,7 +8020,7 @@ define <8 x i64> @test156(<8 x i64> %a, <8 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test156:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpminsq %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
entry:
@@ -8031,7 +8031,7 @@ entry:
define <8 x i64> @test157(<8 x i64> %a, <8 x i64> %b) {
; SSE2-LABEL: test157:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm11 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm3, %xmm8
; SSE2-NEXT: pxor %xmm11, %xmm8
@@ -8099,7 +8099,7 @@ define <8 x i64> @test157(<8 x i64> %a, <8 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test157:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm8
; SSE4-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
; SSE4-NEXT: movdqa %xmm3, %xmm10
@@ -8132,7 +8132,7 @@ define <8 x i64> @test157(<8 x i64> %a, <8 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test157:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpxor %xmm5, %xmm4, %xmm4
@@ -8157,7 +8157,7 @@ define <8 x i64> @test157(<8 x i64> %a, <8 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test157:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
; AVX2-NEXT: vpxor %ymm4, %ymm1, %ymm5
; AVX2-NEXT: vpxor %ymm4, %ymm3, %ymm6
@@ -8170,7 +8170,7 @@ define <8 x i64> @test157(<8 x i64> %a, <8 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test157:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
entry:
@@ -8181,7 +8181,7 @@ entry:
define <8 x i64> @test158(<8 x i64> %a, <8 x i64> %b) {
; SSE2-LABEL: test158:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm7, %xmm8
; SSE2-NEXT: movdqa %xmm8, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa %xmm3, %xmm7
@@ -8262,7 +8262,7 @@ define <8 x i64> @test158(<8 x i64> %a, <8 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test158:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm8
; SSE4-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
; SSE4-NEXT: movdqa %xmm7, %xmm10
@@ -8300,7 +8300,7 @@ define <8 x i64> @test158(<8 x i64> %a, <8 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test158:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpxor %xmm5, %xmm4, %xmm4
@@ -8330,7 +8330,7 @@ define <8 x i64> @test158(<8 x i64> %a, <8 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test158:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
; AVX2-NEXT: vpxor %ymm4, %ymm3, %ymm5
; AVX2-NEXT: vpxor %ymm4, %ymm1, %ymm6
@@ -8346,7 +8346,7 @@ define <8 x i64> @test158(<8 x i64> %a, <8 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test158:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
entry:
@@ -8357,7 +8357,7 @@ entry:
define <8 x i64> @test159(<8 x i64> %a, <8 x i64> %b) {
; SSE2-LABEL: test159:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm11 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm7, %xmm8
; SSE2-NEXT: pxor %xmm11, %xmm8
@@ -8425,7 +8425,7 @@ define <8 x i64> @test159(<8 x i64> %a, <8 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test159:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm8
; SSE4-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
; SSE4-NEXT: movdqa %xmm7, %xmm10
@@ -8458,7 +8458,7 @@ define <8 x i64> @test159(<8 x i64> %a, <8 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test159:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpxor %xmm5, %xmm4, %xmm4
@@ -8483,7 +8483,7 @@ define <8 x i64> @test159(<8 x i64> %a, <8 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test159:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
; AVX2-NEXT: vpxor %ymm4, %ymm3, %ymm5
; AVX2-NEXT: vpxor %ymm4, %ymm1, %ymm6
@@ -8496,7 +8496,7 @@ define <8 x i64> @test159(<8 x i64> %a, <8 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test159:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpminuq %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
entry:
@@ -8507,7 +8507,7 @@ entry:
define <8 x i64> @test160(<8 x i64> %a, <8 x i64> %b) {
; SSE2-LABEL: test160:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm7, %xmm11
; SSE2-NEXT: movdqa %xmm11, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa %xmm3, %xmm7
@@ -8589,7 +8589,7 @@ define <8 x i64> @test160(<8 x i64> %a, <8 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test160:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm8
; SSE4-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
; SSE4-NEXT: movdqa %xmm3, %xmm10
@@ -8627,7 +8627,7 @@ define <8 x i64> @test160(<8 x i64> %a, <8 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test160:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpxor %xmm5, %xmm4, %xmm4
@@ -8657,7 +8657,7 @@ define <8 x i64> @test160(<8 x i64> %a, <8 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test160:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
; AVX2-NEXT: vpxor %ymm4, %ymm1, %ymm5
; AVX2-NEXT: vpxor %ymm4, %ymm3, %ymm6
@@ -8673,7 +8673,7 @@ define <8 x i64> @test160(<8 x i64> %a, <8 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test160:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpminuq %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
entry:
@@ -8684,7 +8684,7 @@ entry:
define <4 x i64> @test161(<4 x i64> %a, <4 x i64> %b) {
; SSE2-LABEL: test161:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,0,2147483648,0]
; SSE2-NEXT: movdqa %xmm1, %xmm5
; SSE2-NEXT: pxor %xmm4, %xmm5
@@ -8718,7 +8718,7 @@ define <4 x i64> @test161(<4 x i64> %a, <4 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test161:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm4
; SSE4-NEXT: movdqa %xmm3, %xmm5
; SSE4-NEXT: pcmpgtq %xmm1, %xmm5
@@ -8732,7 +8732,7 @@ define <4 x i64> @test161(<4 x i64> %a, <4 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test161:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -8742,13 +8742,13 @@ define <4 x i64> @test161(<4 x i64> %a, <4 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test161:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm2
; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test161:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpminsq %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: retq
entry:
@@ -8759,7 +8759,7 @@ entry:
define <4 x i64> @test162(<4 x i64> %a, <4 x i64> %b) {
; SSE2-LABEL: test162:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [2147483648,0,2147483648,0]
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: pxor %xmm7, %xmm4
@@ -8799,7 +8799,7 @@ define <4 x i64> @test162(<4 x i64> %a, <4 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test162:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm4
; SSE4-NEXT: movdqa %xmm1, %xmm5
; SSE4-NEXT: pcmpgtq %xmm3, %xmm5
@@ -8815,7 +8815,7 @@ define <4 x i64> @test162(<4 x i64> %a, <4 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test162:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -8828,7 +8828,7 @@ define <4 x i64> @test162(<4 x i64> %a, <4 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test162:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3
; AVX2-NEXT: vpxor %ymm3, %ymm2, %ymm2
@@ -8836,7 +8836,7 @@ define <4 x i64> @test162(<4 x i64> %a, <4 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test162:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpminsq %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: retq
entry:
@@ -8847,7 +8847,7 @@ entry:
define <4 x i64> @test163(<4 x i64> %a, <4 x i64> %b) {
; SSE2-LABEL: test163:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,0,2147483648,0]
; SSE2-NEXT: movdqa %xmm3, %xmm5
; SSE2-NEXT: pxor %xmm4, %xmm5
@@ -8881,7 +8881,7 @@ define <4 x i64> @test163(<4 x i64> %a, <4 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test163:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm4
; SSE4-NEXT: movdqa %xmm1, %xmm5
; SSE4-NEXT: pcmpgtq %xmm3, %xmm5
@@ -8894,7 +8894,7 @@ define <4 x i64> @test163(<4 x i64> %a, <4 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test163:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -8904,13 +8904,13 @@ define <4 x i64> @test163(<4 x i64> %a, <4 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test163:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test163:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmaxsq %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: retq
entry:
@@ -8921,7 +8921,7 @@ entry:
define <4 x i64> @test164(<4 x i64> %a, <4 x i64> %b) {
; SSE2-LABEL: test164:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [2147483648,0,2147483648,0]
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pxor %xmm7, %xmm4
@@ -8961,7 +8961,7 @@ define <4 x i64> @test164(<4 x i64> %a, <4 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test164:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm4
; SSE4-NEXT: movdqa %xmm3, %xmm5
; SSE4-NEXT: pcmpgtq %xmm1, %xmm5
@@ -8978,7 +8978,7 @@ define <4 x i64> @test164(<4 x i64> %a, <4 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test164:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -8991,7 +8991,7 @@ define <4 x i64> @test164(<4 x i64> %a, <4 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test164:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm2
; AVX2-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3
; AVX2-NEXT: vpxor %ymm3, %ymm2, %ymm2
@@ -8999,7 +8999,7 @@ define <4 x i64> @test164(<4 x i64> %a, <4 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test164:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmaxsq %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: retq
entry:
@@ -9010,7 +9010,7 @@ entry:
define <4 x i64> @test165(<4 x i64> %a, <4 x i64> %b) {
; SSE2-LABEL: test165:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm1, %xmm5
; SSE2-NEXT: pxor %xmm4, %xmm5
@@ -9044,7 +9044,7 @@ define <4 x i64> @test165(<4 x i64> %a, <4 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test165:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm4
; SSE4-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
; SSE4-NEXT: movdqa %xmm1, %xmm6
@@ -9064,7 +9064,7 @@ define <4 x i64> @test165(<4 x i64> %a, <4 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test165:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
@@ -9079,7 +9079,7 @@ define <4 x i64> @test165(<4 x i64> %a, <4 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test165:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2
; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm3
; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm2
@@ -9088,7 +9088,7 @@ define <4 x i64> @test165(<4 x i64> %a, <4 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test165:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpminuq %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: retq
entry:
@@ -9099,7 +9099,7 @@ entry:
define <4 x i64> @test166(<4 x i64> %a, <4 x i64> %b) {
; SSE2-LABEL: test166:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: pxor %xmm7, %xmm4
@@ -9139,7 +9139,7 @@ define <4 x i64> @test166(<4 x i64> %a, <4 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test166:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm4
; SSE4-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
; SSE4-NEXT: movdqa %xmm3, %xmm6
@@ -9162,7 +9162,7 @@ define <4 x i64> @test166(<4 x i64> %a, <4 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test166:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
@@ -9180,7 +9180,7 @@ define <4 x i64> @test166(<4 x i64> %a, <4 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test166:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2
; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm3
; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm2
@@ -9191,7 +9191,7 @@ define <4 x i64> @test166(<4 x i64> %a, <4 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test166:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpminuq %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: retq
entry:
@@ -9202,7 +9202,7 @@ entry:
define <4 x i64> @test167(<4 x i64> %a, <4 x i64> %b) {
; SSE2-LABEL: test167:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm3, %xmm5
; SSE2-NEXT: pxor %xmm4, %xmm5
@@ -9236,7 +9236,7 @@ define <4 x i64> @test167(<4 x i64> %a, <4 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test167:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm4
; SSE4-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
; SSE4-NEXT: movdqa %xmm3, %xmm6
@@ -9256,7 +9256,7 @@ define <4 x i64> @test167(<4 x i64> %a, <4 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test167:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
@@ -9271,7 +9271,7 @@ define <4 x i64> @test167(<4 x i64> %a, <4 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test167:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2
; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm3
; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm2
@@ -9280,7 +9280,7 @@ define <4 x i64> @test167(<4 x i64> %a, <4 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test167:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmaxuq %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: retq
entry:
@@ -9291,7 +9291,7 @@ entry:
define <4 x i64> @test168(<4 x i64> %a, <4 x i64> %b) {
; SSE2-LABEL: test168:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pxor %xmm7, %xmm4
@@ -9331,7 +9331,7 @@ define <4 x i64> @test168(<4 x i64> %a, <4 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test168:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm4
; SSE4-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
; SSE4-NEXT: movdqa %xmm1, %xmm6
@@ -9354,7 +9354,7 @@ define <4 x i64> @test168(<4 x i64> %a, <4 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test168:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
@@ -9372,7 +9372,7 @@ define <4 x i64> @test168(<4 x i64> %a, <4 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test168:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2
; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm3
; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm2
@@ -9383,7 +9383,7 @@ define <4 x i64> @test168(<4 x i64> %a, <4 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test168:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmaxuq %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: retq
entry:
@@ -9394,7 +9394,7 @@ entry:
define <4 x i64> @test169(<4 x i64> %a, <4 x i64> %b) {
; SSE2-LABEL: test169:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [2147483648,0,2147483648,0]
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pxor %xmm5, %xmm4
@@ -9430,7 +9430,7 @@ define <4 x i64> @test169(<4 x i64> %a, <4 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test169:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm4
; SSE4-NEXT: movdqa %xmm3, %xmm5
; SSE4-NEXT: pcmpgtq %xmm1, %xmm5
@@ -9443,7 +9443,7 @@ define <4 x i64> @test169(<4 x i64> %a, <4 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test169:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -9453,13 +9453,13 @@ define <4 x i64> @test169(<4 x i64> %a, <4 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test169:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm2
; AVX2-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test169:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmaxsq %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: retq
entry:
@@ -9470,7 +9470,7 @@ entry:
define <4 x i64> @test170(<4 x i64> %a, <4 x i64> %b) {
; SSE2-LABEL: test170:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [2147483648,0,2147483648,0]
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: pxor %xmm7, %xmm4
@@ -9510,7 +9510,7 @@ define <4 x i64> @test170(<4 x i64> %a, <4 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test170:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm4
; SSE4-NEXT: movdqa %xmm1, %xmm5
; SSE4-NEXT: pcmpgtq %xmm3, %xmm5
@@ -9525,7 +9525,7 @@ define <4 x i64> @test170(<4 x i64> %a, <4 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test170:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -9538,7 +9538,7 @@ define <4 x i64> @test170(<4 x i64> %a, <4 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test170:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3
; AVX2-NEXT: vpxor %ymm3, %ymm2, %ymm2
@@ -9546,7 +9546,7 @@ define <4 x i64> @test170(<4 x i64> %a, <4 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test170:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmaxsq %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: retq
entry:
@@ -9557,7 +9557,7 @@ entry:
define <4 x i64> @test171(<4 x i64> %a, <4 x i64> %b) {
; SSE2-LABEL: test171:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [2147483648,0,2147483648,0]
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: pxor %xmm5, %xmm4
@@ -9593,7 +9593,7 @@ define <4 x i64> @test171(<4 x i64> %a, <4 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test171:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm4
; SSE4-NEXT: movdqa %xmm1, %xmm5
; SSE4-NEXT: pcmpgtq %xmm3, %xmm5
@@ -9605,7 +9605,7 @@ define <4 x i64> @test171(<4 x i64> %a, <4 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test171:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -9615,13 +9615,13 @@ define <4 x i64> @test171(<4 x i64> %a, <4 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test171:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test171:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpminsq %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: retq
entry:
@@ -9632,7 +9632,7 @@ entry:
define <4 x i64> @test172(<4 x i64> %a, <4 x i64> %b) {
; SSE2-LABEL: test172:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [2147483648,0,2147483648,0]
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pxor %xmm7, %xmm4
@@ -9672,7 +9672,7 @@ define <4 x i64> @test172(<4 x i64> %a, <4 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test172:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm4
; SSE4-NEXT: movdqa %xmm3, %xmm5
; SSE4-NEXT: pcmpgtq %xmm1, %xmm5
@@ -9688,7 +9688,7 @@ define <4 x i64> @test172(<4 x i64> %a, <4 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test172:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -9701,7 +9701,7 @@ define <4 x i64> @test172(<4 x i64> %a, <4 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test172:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm2
; AVX2-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3
; AVX2-NEXT: vpxor %ymm3, %ymm2, %ymm2
@@ -9709,7 +9709,7 @@ define <4 x i64> @test172(<4 x i64> %a, <4 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test172:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpminsq %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: retq
entry:
@@ -9720,7 +9720,7 @@ entry:
define <4 x i64> @test173(<4 x i64> %a, <4 x i64> %b) {
; SSE2-LABEL: test173:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pxor %xmm5, %xmm4
@@ -9756,7 +9756,7 @@ define <4 x i64> @test173(<4 x i64> %a, <4 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test173:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm4
; SSE4-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
; SSE4-NEXT: movdqa %xmm1, %xmm6
@@ -9775,7 +9775,7 @@ define <4 x i64> @test173(<4 x i64> %a, <4 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test173:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
@@ -9790,7 +9790,7 @@ define <4 x i64> @test173(<4 x i64> %a, <4 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test173:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2
; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm3
; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm2
@@ -9799,7 +9799,7 @@ define <4 x i64> @test173(<4 x i64> %a, <4 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test173:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmaxuq %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: retq
entry:
@@ -9810,7 +9810,7 @@ entry:
define <4 x i64> @test174(<4 x i64> %a, <4 x i64> %b) {
; SSE2-LABEL: test174:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: pxor %xmm7, %xmm4
@@ -9850,7 +9850,7 @@ define <4 x i64> @test174(<4 x i64> %a, <4 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test174:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm4
; SSE4-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
; SSE4-NEXT: movdqa %xmm3, %xmm6
@@ -9872,7 +9872,7 @@ define <4 x i64> @test174(<4 x i64> %a, <4 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test174:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
@@ -9890,7 +9890,7 @@ define <4 x i64> @test174(<4 x i64> %a, <4 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test174:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2
; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm3
; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm2
@@ -9901,7 +9901,7 @@ define <4 x i64> @test174(<4 x i64> %a, <4 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test174:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmaxuq %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: retq
entry:
@@ -9912,7 +9912,7 @@ entry:
define <4 x i64> @test175(<4 x i64> %a, <4 x i64> %b) {
; SSE2-LABEL: test175:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: pxor %xmm5, %xmm4
@@ -9948,7 +9948,7 @@ define <4 x i64> @test175(<4 x i64> %a, <4 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test175:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm4
; SSE4-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
; SSE4-NEXT: movdqa %xmm3, %xmm6
@@ -9967,7 +9967,7 @@ define <4 x i64> @test175(<4 x i64> %a, <4 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test175:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
@@ -9982,7 +9982,7 @@ define <4 x i64> @test175(<4 x i64> %a, <4 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test175:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2
; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm3
; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm2
@@ -9991,7 +9991,7 @@ define <4 x i64> @test175(<4 x i64> %a, <4 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test175:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpminuq %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: retq
entry:
@@ -10002,7 +10002,7 @@ entry:
define <4 x i64> @test176(<4 x i64> %a, <4 x i64> %b) {
; SSE2-LABEL: test176:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pxor %xmm7, %xmm4
@@ -10042,7 +10042,7 @@ define <4 x i64> @test176(<4 x i64> %a, <4 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test176:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm4
; SSE4-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
; SSE4-NEXT: movdqa %xmm1, %xmm6
@@ -10064,7 +10064,7 @@ define <4 x i64> @test176(<4 x i64> %a, <4 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test176:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
@@ -10082,7 +10082,7 @@ define <4 x i64> @test176(<4 x i64> %a, <4 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test176:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2
; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm3
; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm2
@@ -10093,7 +10093,7 @@ define <4 x i64> @test176(<4 x i64> %a, <4 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test176:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpminuq %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: retq
entry:
@@ -10104,7 +10104,7 @@ entry:
define <2 x i64> @test177(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: test177:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -10123,7 +10123,7 @@ define <2 x i64> @test177(<2 x i64> %a, <2 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test177:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm2
; SSE4-NEXT: movdqa %xmm1, %xmm0
; SSE4-NEXT: pcmpgtq %xmm2, %xmm0
@@ -10132,19 +10132,19 @@ define <2 x i64> @test177(<2 x i64> %a, <2 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test177:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test177:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test177:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpminsq %xmm1, %xmm0, %xmm0
; AVX512BW-NEXT: retq
entry:
@@ -10155,7 +10155,7 @@ entry:
define <2 x i64> @test178(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: test178:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -10177,7 +10177,7 @@ define <2 x i64> @test178(<2 x i64> %a, <2 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test178:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm2
; SSE4-NEXT: pcmpgtq %xmm1, %xmm0
; SSE4-NEXT: pcmpeqd %xmm3, %xmm3
@@ -10187,7 +10187,7 @@ define <2 x i64> @test178(<2 x i64> %a, <2 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test178:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
@@ -10195,7 +10195,7 @@ define <2 x i64> @test178(<2 x i64> %a, <2 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test178:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
; AVX2-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vpxor %xmm3, %xmm2, %xmm2
@@ -10203,7 +10203,7 @@ define <2 x i64> @test178(<2 x i64> %a, <2 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test178:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpminsq %xmm1, %xmm0, %xmm0
; AVX512BW-NEXT: retq
entry:
@@ -10214,7 +10214,7 @@ entry:
define <2 x i64> @test179(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: test179:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -10233,7 +10233,7 @@ define <2 x i64> @test179(<2 x i64> %a, <2 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test179:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm2
; SSE4-NEXT: pcmpgtq %xmm1, %xmm0
; SSE4-NEXT: blendvpd %xmm0, %xmm2, %xmm1
@@ -10241,19 +10241,19 @@ define <2 x i64> @test179(<2 x i64> %a, <2 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test179:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test179:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test179:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmaxsq %xmm1, %xmm0, %xmm0
; AVX512BW-NEXT: retq
entry:
@@ -10264,7 +10264,7 @@ entry:
define <2 x i64> @test180(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: test180:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -10286,7 +10286,7 @@ define <2 x i64> @test180(<2 x i64> %a, <2 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test180:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm2
; SSE4-NEXT: movdqa %xmm1, %xmm3
; SSE4-NEXT: pcmpgtq %xmm2, %xmm3
@@ -10297,7 +10297,7 @@ define <2 x i64> @test180(<2 x i64> %a, <2 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test180:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
@@ -10305,7 +10305,7 @@ define <2 x i64> @test180(<2 x i64> %a, <2 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test180:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
; AVX2-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vpxor %xmm3, %xmm2, %xmm2
@@ -10313,7 +10313,7 @@ define <2 x i64> @test180(<2 x i64> %a, <2 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test180:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmaxsq %xmm1, %xmm0, %xmm0
; AVX512BW-NEXT: retq
entry:
@@ -10324,7 +10324,7 @@ entry:
define <2 x i64> @test181(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: test181:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -10343,7 +10343,7 @@ define <2 x i64> @test181(<2 x i64> %a, <2 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test181:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm2
; SSE4-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
; SSE4-NEXT: movdqa %xmm2, %xmm3
@@ -10355,7 +10355,7 @@ define <2 x i64> @test181(<2 x i64> %a, <2 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test181:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm3
; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm2
@@ -10364,7 +10364,7 @@ define <2 x i64> @test181(<2 x i64> %a, <2 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test181:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm3
; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm2
@@ -10373,7 +10373,7 @@ define <2 x i64> @test181(<2 x i64> %a, <2 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test181:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpminuq %xmm1, %xmm0, %xmm0
; AVX512BW-NEXT: retq
entry:
@@ -10384,7 +10384,7 @@ entry:
define <2 x i64> @test182(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: test182:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -10406,7 +10406,7 @@ define <2 x i64> @test182(<2 x i64> %a, <2 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test182:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm2
; SSE4-NEXT: movdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
; SSE4-NEXT: movdqa %xmm1, %xmm0
@@ -10420,7 +10420,7 @@ define <2 x i64> @test182(<2 x i64> %a, <2 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test182:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm3
; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm2
@@ -10431,7 +10431,7 @@ define <2 x i64> @test182(<2 x i64> %a, <2 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test182:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm3
; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm2
@@ -10442,7 +10442,7 @@ define <2 x i64> @test182(<2 x i64> %a, <2 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test182:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpminuq %xmm1, %xmm0, %xmm0
; AVX512BW-NEXT: retq
entry:
@@ -10453,7 +10453,7 @@ entry:
define <2 x i64> @test183(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: test183:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -10472,7 +10472,7 @@ define <2 x i64> @test183(<2 x i64> %a, <2 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test183:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm2
; SSE4-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
; SSE4-NEXT: movdqa %xmm1, %xmm3
@@ -10484,7 +10484,7 @@ define <2 x i64> @test183(<2 x i64> %a, <2 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test183:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm3
; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm2
@@ -10493,7 +10493,7 @@ define <2 x i64> @test183(<2 x i64> %a, <2 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test183:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm3
; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm2
@@ -10502,7 +10502,7 @@ define <2 x i64> @test183(<2 x i64> %a, <2 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test183:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmaxuq %xmm1, %xmm0, %xmm0
; AVX512BW-NEXT: retq
entry:
@@ -10513,7 +10513,7 @@ entry:
define <2 x i64> @test184(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: test184:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -10535,7 +10535,7 @@ define <2 x i64> @test184(<2 x i64> %a, <2 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test184:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm2
; SSE4-NEXT: movdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
; SSE4-NEXT: pxor %xmm3, %xmm0
@@ -10548,7 +10548,7 @@ define <2 x i64> @test184(<2 x i64> %a, <2 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test184:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm3
; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm2
@@ -10559,7 +10559,7 @@ define <2 x i64> @test184(<2 x i64> %a, <2 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test184:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm3
; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm2
@@ -10570,7 +10570,7 @@ define <2 x i64> @test184(<2 x i64> %a, <2 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test184:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmaxuq %xmm1, %xmm0, %xmm0
; AVX512BW-NEXT: retq
entry:
@@ -10581,7 +10581,7 @@ entry:
define <2 x i64> @test185(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: test185:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -10601,7 +10601,7 @@ define <2 x i64> @test185(<2 x i64> %a, <2 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test185:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm2
; SSE4-NEXT: movdqa %xmm1, %xmm0
; SSE4-NEXT: pcmpgtq %xmm2, %xmm0
@@ -10610,19 +10610,19 @@ define <2 x i64> @test185(<2 x i64> %a, <2 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test185:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
; AVX1-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test185:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
; AVX2-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test185:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmaxsq %xmm1, %xmm0, %xmm0
; AVX512BW-NEXT: retq
entry:
@@ -10633,7 +10633,7 @@ entry:
define <2 x i64> @test186(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: test186:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -10655,7 +10655,7 @@ define <2 x i64> @test186(<2 x i64> %a, <2 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test186:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm2
; SSE4-NEXT: pcmpgtq %xmm1, %xmm0
; SSE4-NEXT: pcmpeqd %xmm3, %xmm3
@@ -10665,7 +10665,7 @@ define <2 x i64> @test186(<2 x i64> %a, <2 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test186:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
@@ -10673,7 +10673,7 @@ define <2 x i64> @test186(<2 x i64> %a, <2 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test186:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
; AVX2-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vpxor %xmm3, %xmm2, %xmm2
@@ -10681,7 +10681,7 @@ define <2 x i64> @test186(<2 x i64> %a, <2 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test186:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmaxsq %xmm1, %xmm0, %xmm0
; AVX512BW-NEXT: retq
entry:
@@ -10692,7 +10692,7 @@ entry:
define <2 x i64> @test187(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: test187:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -10712,7 +10712,7 @@ define <2 x i64> @test187(<2 x i64> %a, <2 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test187:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm2
; SSE4-NEXT: pcmpgtq %xmm1, %xmm0
; SSE4-NEXT: blendvpd %xmm0, %xmm1, %xmm2
@@ -10720,19 +10720,19 @@ define <2 x i64> @test187(<2 x i64> %a, <2 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test187:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
; AVX1-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test187:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
; AVX2-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test187:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpminsq %xmm1, %xmm0, %xmm0
; AVX512BW-NEXT: retq
entry:
@@ -10743,7 +10743,7 @@ entry:
define <2 x i64> @test188(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: test188:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -10765,7 +10765,7 @@ define <2 x i64> @test188(<2 x i64> %a, <2 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test188:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm2
; SSE4-NEXT: movdqa %xmm1, %xmm3
; SSE4-NEXT: pcmpgtq %xmm2, %xmm3
@@ -10776,7 +10776,7 @@ define <2 x i64> @test188(<2 x i64> %a, <2 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test188:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
@@ -10784,7 +10784,7 @@ define <2 x i64> @test188(<2 x i64> %a, <2 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test188:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
; AVX2-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vpxor %xmm3, %xmm2, %xmm2
@@ -10792,7 +10792,7 @@ define <2 x i64> @test188(<2 x i64> %a, <2 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test188:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpminsq %xmm1, %xmm0, %xmm0
; AVX512BW-NEXT: retq
entry:
@@ -10803,7 +10803,7 @@ entry:
define <2 x i64> @test189(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: test189:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -10823,7 +10823,7 @@ define <2 x i64> @test189(<2 x i64> %a, <2 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test189:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm2
; SSE4-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
; SSE4-NEXT: movdqa %xmm2, %xmm3
@@ -10835,7 +10835,7 @@ define <2 x i64> @test189(<2 x i64> %a, <2 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test189:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm3
; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm2
@@ -10844,7 +10844,7 @@ define <2 x i64> @test189(<2 x i64> %a, <2 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test189:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm3
; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm2
@@ -10853,7 +10853,7 @@ define <2 x i64> @test189(<2 x i64> %a, <2 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test189:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmaxuq %xmm1, %xmm0, %xmm0
; AVX512BW-NEXT: retq
entry:
@@ -10864,7 +10864,7 @@ entry:
define <2 x i64> @test190(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: test190:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -10886,7 +10886,7 @@ define <2 x i64> @test190(<2 x i64> %a, <2 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test190:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm2
; SSE4-NEXT: movdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
; SSE4-NEXT: movdqa %xmm1, %xmm0
@@ -10900,7 +10900,7 @@ define <2 x i64> @test190(<2 x i64> %a, <2 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test190:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm3
; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm2
@@ -10911,7 +10911,7 @@ define <2 x i64> @test190(<2 x i64> %a, <2 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test190:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm3
; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm2
@@ -10922,7 +10922,7 @@ define <2 x i64> @test190(<2 x i64> %a, <2 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test190:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmaxuq %xmm1, %xmm0, %xmm0
; AVX512BW-NEXT: retq
entry:
@@ -10933,7 +10933,7 @@ entry:
define <2 x i64> @test191(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: test191:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -10953,7 +10953,7 @@ define <2 x i64> @test191(<2 x i64> %a, <2 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test191:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm2
; SSE4-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
; SSE4-NEXT: movdqa %xmm1, %xmm3
@@ -10965,7 +10965,7 @@ define <2 x i64> @test191(<2 x i64> %a, <2 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test191:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm3
; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm2
@@ -10974,7 +10974,7 @@ define <2 x i64> @test191(<2 x i64> %a, <2 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test191:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm3
; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm2
@@ -10983,7 +10983,7 @@ define <2 x i64> @test191(<2 x i64> %a, <2 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test191:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpminuq %xmm1, %xmm0, %xmm0
; AVX512BW-NEXT: retq
entry:
@@ -10994,7 +10994,7 @@ entry:
define <2 x i64> @test192(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: test192:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -11016,7 +11016,7 @@ define <2 x i64> @test192(<2 x i64> %a, <2 x i64> %b) {
; SSE2-NEXT: retq
;
; SSE4-LABEL: test192:
-; SSE4: # BB#0: # %entry
+; SSE4: # %bb.0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm2
; SSE4-NEXT: movdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
; SSE4-NEXT: pxor %xmm3, %xmm0
@@ -11029,7 +11029,7 @@ define <2 x i64> @test192(<2 x i64> %a, <2 x i64> %b) {
; SSE4-NEXT: retq
;
; AVX1-LABEL: test192:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm3
; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm2
@@ -11040,7 +11040,7 @@ define <2 x i64> @test192(<2 x i64> %a, <2 x i64> %b) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: test192:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm3
; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm2
@@ -11051,7 +11051,7 @@ define <2 x i64> @test192(<2 x i64> %a, <2 x i64> %b) {
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test192:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpminuq %xmm1, %xmm0, %xmm0
; AVX512BW-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/vselect-packss.ll b/test/CodeGen/X86/vselect-packss.ll
index cab8521160c..2cd22fc4541 100644
--- a/test/CodeGen/X86/vselect-packss.ll
+++ b/test/CodeGen/X86/vselect-packss.ll
@@ -14,7 +14,7 @@
define <16 x i8> @vselect_packss_v16i16(<16 x i16> %a0, <16 x i16> %a1, <16 x i8> %a2, <16 x i8> %a3) {
; SSE2-LABEL: vselect_packss_v16i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pcmpeqw %xmm3, %xmm1
; SSE2-NEXT: pcmpeqw %xmm2, %xmm0
; SSE2-NEXT: packsswb %xmm1, %xmm0
@@ -24,7 +24,7 @@ define <16 x i8> @vselect_packss_v16i16(<16 x i16> %a0, <16 x i16> %a1, <16 x i8
; SSE2-NEXT: retq
;
; SSE42-LABEL: vselect_packss_v16i16:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpeqw %xmm3, %xmm1
; SSE42-NEXT: pcmpeqw %xmm2, %xmm0
; SSE42-NEXT: packsswb %xmm1, %xmm0
@@ -33,7 +33,7 @@ define <16 x i8> @vselect_packss_v16i16(<16 x i16> %a0, <16 x i16> %a1, <16 x i8
; SSE42-NEXT: retq
;
; AVX1-LABEL: vselect_packss_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpcmpeqw %xmm4, %xmm5, %xmm4
@@ -44,7 +44,7 @@ define <16 x i8> @vselect_packss_v16i16(<16 x i16> %a0, <16 x i16> %a1, <16 x i8
; AVX1-NEXT: retq
;
; AVX2-LABEL: vselect_packss_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
@@ -53,7 +53,7 @@ define <16 x i8> @vselect_packss_v16i16(<16 x i16> %a0, <16 x i16> %a1, <16 x i8
; AVX2-NEXT: retq
;
; AVX512NOBW-LABEL: vselect_packss_v16i16:
-; AVX512NOBW: # BB#0:
+; AVX512NOBW: # %bb.0:
; AVX512NOBW-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
; AVX512NOBW-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512NOBW-NEXT: vpmovdb %zmm0, %xmm0
@@ -62,7 +62,7 @@ define <16 x i8> @vselect_packss_v16i16(<16 x i16> %a0, <16 x i16> %a1, <16 x i8
; AVX512NOBW-NEXT: retq
;
; AVX512BWNOVL-LABEL: vselect_packss_v16i16:
-; AVX512BWNOVL: # BB#0:
+; AVX512BWNOVL: # %bb.0:
; AVX512BWNOVL-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
; AVX512BWNOVL-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BWNOVL-NEXT: vpblendvb %xmm0, %xmm2, %xmm3, %xmm0
@@ -70,7 +70,7 @@ define <16 x i8> @vselect_packss_v16i16(<16 x i16> %a0, <16 x i16> %a1, <16 x i8
; AVX512BWNOVL-NEXT: retq
;
; AVX512BWVL-LABEL: vselect_packss_v16i16:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpcmpeqw %ymm1, %ymm0, %k0
; AVX512BWVL-NEXT: vpmovm2b %k0, %xmm0
; AVX512BWVL-NEXT: vpblendvb %xmm0, %xmm2, %xmm3, %xmm0
@@ -87,7 +87,7 @@ define <16 x i8> @vselect_packss_v16i16(<16 x i16> %a0, <16 x i16> %a1, <16 x i8
define <16 x i8> @vselect_packss_v16i32(<16 x i32> %a0, <16 x i32> %a1, <16 x i8> %a2, <16 x i8> %a3) {
; SSE2-LABEL: vselect_packss_v16i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pcmpeqd %xmm7, %xmm3
; SSE2-NEXT: pcmpeqd %xmm6, %xmm2
; SSE2-NEXT: packssdw %xmm3, %xmm2
@@ -102,7 +102,7 @@ define <16 x i8> @vselect_packss_v16i32(<16 x i32> %a0, <16 x i32> %a1, <16 x i8
; SSE2-NEXT: retq
;
; SSE42-LABEL: vselect_packss_v16i32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
; SSE42-NEXT: pcmpeqd %xmm7, %xmm3
; SSE42-NEXT: pcmpeqd %xmm6, %xmm2
@@ -116,7 +116,7 @@ define <16 x i8> @vselect_packss_v16i32(<16 x i32> %a0, <16 x i32> %a1, <16 x i8
; SSE42-NEXT: retq
;
; AVX1-LABEL: vselect_packss_v16i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm6
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm7
; AVX1-NEXT: vpcmpeqd %xmm6, %xmm7, %xmm6
@@ -133,7 +133,7 @@ define <16 x i8> @vselect_packss_v16i32(<16 x i32> %a0, <16 x i32> %a1, <16 x i8
; AVX1-NEXT: retq
;
; AVX2-LABEL: vselect_packss_v16i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpeqd %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vpcmpeqd %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
@@ -147,7 +147,7 @@ define <16 x i8> @vselect_packss_v16i32(<16 x i32> %a0, <16 x i32> %a1, <16 x i8
; AVX2-NEXT: retq
;
; AVX512NOBW-LABEL: vselect_packss_v16i32:
-; AVX512NOBW: # BB#0:
+; AVX512NOBW: # %bb.0:
; AVX512NOBW-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
; AVX512NOBW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512NOBW-NEXT: vpmovdb %zmm0, %xmm0
@@ -156,7 +156,7 @@ define <16 x i8> @vselect_packss_v16i32(<16 x i32> %a0, <16 x i32> %a1, <16 x i8
; AVX512NOBW-NEXT: retq
;
; AVX512BWNOVL-LABEL: vselect_packss_v16i32:
-; AVX512BWNOVL: # BB#0:
+; AVX512BWNOVL: # %bb.0:
; AVX512BWNOVL-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; AVX512BWNOVL-NEXT: vpmovm2b %k0, %zmm0
; AVX512BWNOVL-NEXT: vpblendvb %xmm0, %xmm2, %xmm3, %xmm0
@@ -164,7 +164,7 @@ define <16 x i8> @vselect_packss_v16i32(<16 x i32> %a0, <16 x i32> %a1, <16 x i8
; AVX512BWNOVL-NEXT: retq
;
; AVX512BWVL-LABEL: vselect_packss_v16i32:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; AVX512BWVL-NEXT: vpmovm2b %k0, %xmm0
; AVX512BWVL-NEXT: vpblendvb %xmm0, %xmm2, %xmm3, %xmm0
@@ -181,7 +181,7 @@ define <16 x i8> @vselect_packss_v16i32(<16 x i32> %a0, <16 x i32> %a1, <16 x i8
define <16 x i8> @vselect_packss_v16i64(<16 x i64> %a0, <16 x i64> %a1, <16 x i8> %a2, <16 x i8> %a3) {
; SSE2-LABEL: vselect_packss_v16i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pcmpeqd {{[0-9]+}}(%rsp), %xmm7
; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm7[1,0,3,2]
; SSE2-NEXT: pand %xmm7, %xmm8
@@ -221,7 +221,7 @@ define <16 x i8> @vselect_packss_v16i64(<16 x i64> %a0, <16 x i64> %a1, <16 x i8
; SSE2-NEXT: retq
;
; SSE42-LABEL: vselect_packss_v16i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpeqq {{[0-9]+}}(%rsp), %xmm7
; SSE42-NEXT: pcmpeqq {{[0-9]+}}(%rsp), %xmm6
; SSE42-NEXT: packssdw %xmm7, %xmm6
@@ -244,7 +244,7 @@ define <16 x i8> @vselect_packss_v16i64(<16 x i64> %a0, <16 x i64> %a1, <16 x i8
; SSE42-NEXT: retq
;
; AVX1-LABEL: vselect_packss_v16i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm8
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm9
; AVX1-NEXT: vpcmpeqq %xmm8, %xmm9, %xmm8
@@ -275,7 +275,7 @@ define <16 x i8> @vselect_packss_v16i64(<16 x i64> %a0, <16 x i64> %a1, <16 x i8
; AVX1-NEXT: retq
;
; AVX2-LABEL: vselect_packss_v16i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpeqq %ymm7, %ymm3, %ymm3
; AVX2-NEXT: vpcmpeqq %ymm6, %ymm2, %ymm2
; AVX2-NEXT: vpackssdw %ymm3, %ymm2, %ymm2
@@ -295,7 +295,7 @@ define <16 x i8> @vselect_packss_v16i64(<16 x i64> %a0, <16 x i64> %a1, <16 x i8
; AVX2-NEXT: retq
;
; AVX512NOBW-LABEL: vselect_packss_v16i64:
-; AVX512NOBW: # BB#0:
+; AVX512NOBW: # %bb.0:
; AVX512NOBW-NEXT: vpcmpeqq %zmm2, %zmm0, %k0
; AVX512NOBW-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
; AVX512NOBW-NEXT: kunpckbw %k0, %k1, %k1
@@ -306,7 +306,7 @@ define <16 x i8> @vselect_packss_v16i64(<16 x i64> %a0, <16 x i64> %a1, <16 x i8
; AVX512NOBW-NEXT: retq
;
; AVX512BWNOVL-LABEL: vselect_packss_v16i64:
-; AVX512BWNOVL: # BB#0:
+; AVX512BWNOVL: # %bb.0:
; AVX512BWNOVL-NEXT: vpcmpeqq %zmm2, %zmm0, %k0
; AVX512BWNOVL-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
; AVX512BWNOVL-NEXT: kunpckbw %k0, %k1, %k0
@@ -316,7 +316,7 @@ define <16 x i8> @vselect_packss_v16i64(<16 x i64> %a0, <16 x i64> %a1, <16 x i8
; AVX512BWNOVL-NEXT: retq
;
; AVX512BWVL-LABEL: vselect_packss_v16i64:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpcmpeqq %zmm2, %zmm0, %k0
; AVX512BWVL-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
; AVX512BWVL-NEXT: kunpckbw %k0, %k1, %k0
@@ -339,7 +339,7 @@ define <16 x i8> @vselect_packss_v16i64(<16 x i64> %a0, <16 x i64> %a1, <16 x i8
define <16 x i8> @vselect_packss(<16 x i16> %a0, <16 x i16> %a1, <16 x i8> %a2, <16 x i8> %a3) {
; SSE2-LABEL: vselect_packss:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pcmpeqw %xmm3, %xmm1
; SSE2-NEXT: pcmpeqw %xmm2, %xmm0
; SSE2-NEXT: packsswb %xmm1, %xmm0
@@ -349,7 +349,7 @@ define <16 x i8> @vselect_packss(<16 x i16> %a0, <16 x i16> %a1, <16 x i8> %a2,
; SSE2-NEXT: retq
;
; SSE42-LABEL: vselect_packss:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpeqw %xmm3, %xmm1
; SSE42-NEXT: pcmpeqw %xmm2, %xmm0
; SSE42-NEXT: packsswb %xmm1, %xmm0
@@ -358,7 +358,7 @@ define <16 x i8> @vselect_packss(<16 x i16> %a0, <16 x i16> %a1, <16 x i8> %a2,
; SSE42-NEXT: retq
;
; AVX1-LABEL: vselect_packss:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpcmpeqw %xmm4, %xmm5, %xmm4
@@ -369,7 +369,7 @@ define <16 x i8> @vselect_packss(<16 x i16> %a0, <16 x i16> %a1, <16 x i8> %a2,
; AVX1-NEXT: retq
;
; AVX2-LABEL: vselect_packss:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
@@ -378,7 +378,7 @@ define <16 x i8> @vselect_packss(<16 x i16> %a0, <16 x i16> %a1, <16 x i8> %a2,
; AVX2-NEXT: retq
;
; AVX512NOBW-LABEL: vselect_packss:
-; AVX512NOBW: # BB#0:
+; AVX512NOBW: # %bb.0:
; AVX512NOBW-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
; AVX512NOBW-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512NOBW-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
@@ -387,7 +387,7 @@ define <16 x i8> @vselect_packss(<16 x i16> %a0, <16 x i16> %a1, <16 x i8> %a2,
; AVX512NOBW-NEXT: retq
;
; AVX512BWNOVL-LABEL: vselect_packss:
-; AVX512BWNOVL: # BB#0:
+; AVX512BWNOVL: # %bb.0:
; AVX512BWNOVL-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
; AVX512BWNOVL-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512BWNOVL-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
@@ -396,7 +396,7 @@ define <16 x i8> @vselect_packss(<16 x i16> %a0, <16 x i16> %a1, <16 x i8> %a2,
; AVX512BWNOVL-NEXT: retq
;
; AVX512BWVL-LABEL: vselect_packss:
-; AVX512BWVL: # BB#0:
+; AVX512BWVL: # %bb.0:
; AVX512BWVL-NEXT: vpcmpeqw %ymm1, %ymm0, %k0
; AVX512BWVL-NEXT: vpmovm2w %k0, %ymm0
; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
diff --git a/test/CodeGen/X86/vselect-pcmp.ll b/test/CodeGen/X86/vselect-pcmp.ll
index d162e342e5c..c13aa717237 100644
--- a/test/CodeGen/X86/vselect-pcmp.ll
+++ b/test/CodeGen/X86/vselect-pcmp.ll
@@ -13,12 +13,12 @@
define <16 x i8> @signbit_sel_v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %mask) {
; AVX12-LABEL: signbit_sel_v16i8:
-; AVX12: # BB#0:
+; AVX12: # %bb.0:
; AVX12-NEXT: vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
; AVX12-NEXT: retq
;
; AVX512-LABEL: signbit_sel_v16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX512-NEXT: vpcmpgtb %xmm2, %xmm3, %xmm2
; AVX512-NEXT: vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
@@ -32,7 +32,7 @@ define <16 x i8> @signbit_sel_v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %mask)
define <8 x i16> @signbit_sel_v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> %mask) {
; AVX-LABEL: signbit_sel_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX-NEXT: vpcmpgtw %xmm2, %xmm3, %xmm2
; AVX-NEXT: vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
@@ -44,12 +44,12 @@ define <8 x i16> @signbit_sel_v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> %mask)
define <4 x i32> @signbit_sel_v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> %mask) {
; AVX12F-LABEL: signbit_sel_v4i32:
-; AVX12F: # BB#0:
+; AVX12F: # %bb.0:
; AVX12F-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; AVX12F-NEXT: retq
;
; AVX512VL-LABEL: signbit_sel_v4i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX512VL-NEXT: vpcmpgtd %xmm2, %xmm3, %k1
; AVX512VL-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
@@ -61,12 +61,12 @@ define <4 x i32> @signbit_sel_v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> %mask)
define <2 x i64> @signbit_sel_v2i64(<2 x i64> %x, <2 x i64> %y, <2 x i64> %mask) {
; AVX12F-LABEL: signbit_sel_v2i64:
-; AVX12F: # BB#0:
+; AVX12F: # %bb.0:
; AVX12F-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; AVX12F-NEXT: retq
;
; AVX512VL-LABEL: signbit_sel_v2i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX512VL-NEXT: vpcmpgtq %xmm2, %xmm3, %k1
; AVX512VL-NEXT: vpblendmq %xmm0, %xmm1, %xmm0 {%k1}
@@ -78,12 +78,12 @@ define <2 x i64> @signbit_sel_v2i64(<2 x i64> %x, <2 x i64> %y, <2 x i64> %mask)
define <4 x float> @signbit_sel_v4f32(<4 x float> %x, <4 x float> %y, <4 x i32> %mask) {
; AVX12F-LABEL: signbit_sel_v4f32:
-; AVX12F: # BB#0:
+; AVX12F: # %bb.0:
; AVX12F-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; AVX12F-NEXT: retq
;
; AVX512VL-LABEL: signbit_sel_v4f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX512VL-NEXT: vpcmpgtd %xmm2, %xmm3, %k1
; AVX512VL-NEXT: vblendmps %xmm0, %xmm1, %xmm0 {%k1}
@@ -95,12 +95,12 @@ define <4 x float> @signbit_sel_v4f32(<4 x float> %x, <4 x float> %y, <4 x i32>
define <2 x double> @signbit_sel_v2f64(<2 x double> %x, <2 x double> %y, <2 x i64> %mask) {
; AVX12F-LABEL: signbit_sel_v2f64:
-; AVX12F: # BB#0:
+; AVX12F: # %bb.0:
; AVX12F-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; AVX12F-NEXT: retq
;
; AVX512VL-LABEL: signbit_sel_v2f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX512VL-NEXT: vpcmpgtq %xmm2, %xmm3, %k1
; AVX512VL-NEXT: vblendmpd %xmm0, %xmm1, %xmm0 {%k1}
@@ -114,7 +114,7 @@ define <2 x double> @signbit_sel_v2f64(<2 x double> %x, <2 x double> %y, <2 x i6
define <32 x i8> @signbit_sel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %mask) {
; AVX1-LABEL: signbit_sel_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
; AVX1-NEXT: vpcmpgtb %xmm3, %xmm4, %xmm3
@@ -126,12 +126,12 @@ define <32 x i8> @signbit_sel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %mask)
; AVX1-NEXT: retq
;
; AVX2-LABEL: signbit_sel_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: signbit_sel_v32i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX512-NEXT: vpcmpgtb %ymm2, %ymm3, %ymm2
; AVX512-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
@@ -145,7 +145,7 @@ define <32 x i8> @signbit_sel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %mask)
define <16 x i16> @signbit_sel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i16> %mask) {
; AVX1-LABEL: signbit_sel_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
; AVX1-NEXT: vpcmpgtw %xmm3, %xmm4, %xmm3
@@ -157,14 +157,14 @@ define <16 x i16> @signbit_sel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i16> %
; AVX1-NEXT: retq
;
; AVX2-LABEL: signbit_sel_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vpcmpgtw %ymm2, %ymm3, %ymm2
; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: signbit_sel_v16i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX512-NEXT: vpcmpgtw %ymm2, %ymm3, %ymm2
; AVX512-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
@@ -176,12 +176,12 @@ define <16 x i16> @signbit_sel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i16> %
define <8 x i32> @signbit_sel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %mask) {
; AVX12-LABEL: signbit_sel_v8i32:
-; AVX12: # BB#0:
+; AVX12: # %bb.0:
; AVX12-NEXT: vblendvps %ymm2, %ymm0, %ymm1, %ymm0
; AVX12-NEXT: retq
;
; AVX512F-LABEL: signbit_sel_v8i32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: %ymm2<def> %ymm2<kill> %zmm2<def>
; AVX512F-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
@@ -192,7 +192,7 @@ define <8 x i32> @signbit_sel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %mask)
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: signbit_sel_v8i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX512VL-NEXT: vpcmpgtd %ymm2, %ymm3, %k1
; AVX512VL-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
@@ -204,12 +204,12 @@ define <8 x i32> @signbit_sel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %mask)
define <4 x i64> @signbit_sel_v4i64(<4 x i64> %x, <4 x i64> %y, <4 x i64> %mask) {
; AVX12F-LABEL: signbit_sel_v4i64:
-; AVX12F: # BB#0:
+; AVX12F: # %bb.0:
; AVX12F-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
; AVX12F-NEXT: retq
;
; AVX512VL-LABEL: signbit_sel_v4i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX512VL-NEXT: vpcmpgtq %ymm2, %ymm3, %k1
; AVX512VL-NEXT: vpblendmq %ymm0, %ymm1, %ymm0 {%k1}
@@ -221,12 +221,12 @@ define <4 x i64> @signbit_sel_v4i64(<4 x i64> %x, <4 x i64> %y, <4 x i64> %mask)
define <4 x double> @signbit_sel_v4f64(<4 x double> %x, <4 x double> %y, <4 x i64> %mask) {
; AVX12F-LABEL: signbit_sel_v4f64:
-; AVX12F: # BB#0:
+; AVX12F: # %bb.0:
; AVX12F-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
; AVX12F-NEXT: retq
;
; AVX512VL-LABEL: signbit_sel_v4f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX512VL-NEXT: vpcmpgtq %ymm2, %ymm3, %k1
; AVX512VL-NEXT: vblendmpd %ymm0, %ymm1, %ymm0 {%k1}
@@ -240,7 +240,7 @@ define <4 x double> @signbit_sel_v4f64(<4 x double> %x, <4 x double> %y, <4 x i6
define <4 x double> @signbit_sel_v4f64_small_mask(<4 x double> %x, <4 x double> %y, <4 x i32> %mask) {
; AVX1-LABEL: signbit_sel_v4f64_small_mask:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovsxdq %xmm2, %xmm3
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
; AVX1-NEXT: vpmovsxdq %xmm2, %xmm2
@@ -249,19 +249,19 @@ define <4 x double> @signbit_sel_v4f64_small_mask(<4 x double> %x, <4 x double>
; AVX1-NEXT: retq
;
; AVX2-LABEL: signbit_sel_v4f64_small_mask:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovsxdq %xmm2, %ymm2
; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: signbit_sel_v4f64_small_mask:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpmovsxdq %xmm2, %ymm2
; AVX512F-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: signbit_sel_v4f64_small_mask:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX512VL-NEXT: vpcmpgtd %xmm2, %xmm3, %k1
; AVX512VL-NEXT: vblendmpd %ymm0, %ymm1, %ymm0 {%k1}
@@ -275,13 +275,13 @@ define <4 x double> @signbit_sel_v4f64_small_mask(<4 x double> %x, <4 x double>
define <8 x double> @signbit_sel_v8f64(<8 x double> %x, <8 x double> %y, <8 x i64> %mask) {
; AVX12-LABEL: signbit_sel_v8f64:
-; AVX12: # BB#0:
+; AVX12: # %bb.0:
; AVX12-NEXT: vblendvpd %ymm4, %ymm0, %ymm2, %ymm0
; AVX12-NEXT: vblendvpd %ymm5, %ymm1, %ymm3, %ymm1
; AVX12-NEXT: retq
;
; AVX512-LABEL: signbit_sel_v8f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX512-NEXT: vpcmpgtq %zmm2, %zmm3, %k1
; AVX512-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
@@ -297,14 +297,14 @@ define <8 x double> @signbit_sel_v8f64(<8 x double> %x, <8 x double> %y, <8 x i6
define <4 x float> @signbit_sel_v4f32_fcmp(<4 x float> %x, <4 x float> %y, <4 x float> %mask) #0 {
; AVX12F-LABEL: signbit_sel_v4f32_fcmp:
-; AVX12F: # BB#0:
+; AVX12F: # %bb.0:
; AVX12F-NEXT: vxorps %xmm2, %xmm2, %xmm2
; AVX12F-NEXT: vcmpltps %xmm2, %xmm0, %xmm2
; AVX12F-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; AVX12F-NEXT: retq
;
; AVX512VL-LABEL: signbit_sel_v4f32_fcmp:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512VL-NEXT: vcmpltps %xmm2, %xmm0, %k1
; AVX512VL-NEXT: vblendmps %xmm0, %xmm1, %xmm0 {%k1}
diff --git a/test/CodeGen/X86/vselect-zero.ll b/test/CodeGen/X86/vselect-zero.ll
index 400933a9aff..8eb137a61ff 100644
--- a/test/CodeGen/X86/vselect-zero.ll
+++ b/test/CodeGen/X86/vselect-zero.ll
@@ -8,14 +8,14 @@
define <4 x i32> @test1(<4 x i1> %cond, <4 x i32> %x) {
; SSE-LABEL: test1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pslld $31, %xmm0
; SSE-NEXT: psrad $31, %xmm0
; SSE-NEXT: pandn %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpslld $31, %xmm0, %xmm0
; AVX-NEXT: vpsrad $31, %xmm0, %xmm0
; AVX-NEXT: vpandn %xmm1, %xmm0, %xmm0
@@ -26,13 +26,13 @@ define <4 x i32> @test1(<4 x i1> %cond, <4 x i32> %x) {
define <4 x i32> @test2(<4 x float> %a, <4 x float> %b, <4 x i32> %x) {
; SSE-LABEL: test2:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpneqps %xmm1, %xmm0
; SSE-NEXT: andps %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test2:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpneqps %xmm1, %xmm0, %xmm0
; AVX-NEXT: vandps %xmm2, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -43,13 +43,13 @@ define <4 x i32> @test2(<4 x float> %a, <4 x float> %b, <4 x i32> %x) {
define float @fsel(float %a, float %b, float %x) {
; SSE-LABEL: fsel:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpeqss %xmm1, %xmm0
; SSE-NEXT: andnps %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: fsel:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpeqss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vandnps %xmm2, %xmm0, %xmm0
; AVX-NEXT: retq
diff --git a/test/CodeGen/X86/vselect.ll b/test/CodeGen/X86/vselect.ll
index e7bb0c02fc1..985f6a861b9 100644
--- a/test/CodeGen/X86/vselect.ll
+++ b/test/CodeGen/X86/vselect.ll
@@ -9,18 +9,18 @@
define <4 x float> @test1(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: test1:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[1,3]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
; SSE2-NEXT: retq
;
; SSE41-LABEL: test1:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; SSE41-NEXT: retq
;
; AVX-LABEL: test1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; AVX-NEXT: retq
%1 = select <4 x i1> <i1 true, i1 false, i1 true, i1 false>, <4 x float> %a, <4 x float> %b
@@ -29,18 +29,18 @@ define <4 x float> @test1(<4 x float> %a, <4 x float> %b) {
define <4 x float> @test2(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: test2:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE2-NEXT: movapd %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test2:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; SSE41-NEXT: retq
;
; AVX-LABEL: test2:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; AVX-NEXT: retq
%1 = select <4 x i1> <i1 true, i1 true, i1 false, i1 false>, <4 x float> %a, <4 x float> %b
@@ -49,17 +49,17 @@ define <4 x float> @test2(<4 x float> %a, <4 x float> %b) {
define <4 x float> @test3(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: test3:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq
;
; SSE41-LABEL: test3:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE41-NEXT: retq
;
; AVX-LABEL: test3:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; AVX-NEXT: retq
%1 = select <4 x i1> <i1 false, i1 false, i1 true, i1 true>, <4 x float> %a, <4 x float> %b
@@ -68,12 +68,12 @@ define <4 x float> @test3(<4 x float> %a, <4 x float> %b) {
define <4 x float> @test4(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: test4:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test4:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps %xmm1, %xmm0
; AVX-NEXT: retq
%1 = select <4 x i1> <i1 false, i1 false, i1 false, i1 false>, <4 x float> %a, <4 x float> %b
@@ -82,11 +82,11 @@ define <4 x float> @test4(<4 x float> %a, <4 x float> %b) {
define <4 x float> @test5(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: test5:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: retq
;
; AVX-LABEL: test5:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: retq
%1 = select <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> %a, <4 x float> %b
ret <4 x float> %1
@@ -94,11 +94,11 @@ define <4 x float> @test5(<4 x float> %a, <4 x float> %b) {
define <8 x i16> @test6(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: test6:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: retq
;
; AVX-LABEL: test6:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: retq
%1 = select <8 x i1> <i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false>, <8 x i16> %a, <8 x i16> %a
ret <8 x i16> %1
@@ -106,23 +106,23 @@ define <8 x i16> @test6(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @test7(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: test7:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE2-NEXT: movapd %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test7:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: test7:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: test7:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
; AVX2-NEXT: retq
%1 = select <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false>, <8 x i16> %a, <8 x i16> %b
@@ -131,22 +131,22 @@ define <8 x i16> @test7(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @test8(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: test8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq
;
; SSE41-LABEL: test8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: test8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: test8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
; AVX2-NEXT: retq
%1 = select <8 x i1> <i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true>, <8 x i16> %a, <8 x i16> %b
@@ -155,12 +155,12 @@ define <8 x i16> @test8(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @test9(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: test9:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test9:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps %xmm1, %xmm0
; AVX-NEXT: retq
%1 = select <8 x i1> <i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>, <8 x i16> %a, <8 x i16> %b
@@ -169,11 +169,11 @@ define <8 x i16> @test9(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @test10(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: test10:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: retq
;
; AVX-LABEL: test10:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: retq
%1 = select <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> %a, <8 x i16> %b
ret <8 x i16> %1
@@ -181,7 +181,7 @@ define <8 x i16> @test10(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @test11(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: test11:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps {{.*#+}} xmm2 = [0,65535,65535,0,65535,65535,65535,65535]
; SSE2-NEXT: andps %xmm2, %xmm0
; SSE2-NEXT: andnps %xmm1, %xmm2
@@ -189,12 +189,12 @@ define <8 x i16> @test11(<8 x i16> %a, <8 x i16> %b) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: test11:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
; SSE41-NEXT: retq
;
; AVX-LABEL: test11:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
; AVX-NEXT: retq
%1 = select <8 x i1> <i1 false, i1 true, i1 true, i1 false, i1 undef, i1 true, i1 true, i1 undef>, <8 x i16> %a, <8 x i16> %b
@@ -203,12 +203,12 @@ define <8 x i16> @test11(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @test12(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: test12:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test12:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps %xmm1, %xmm0
; AVX-NEXT: retq
%1 = select <8 x i1> <i1 false, i1 false, i1 undef, i1 false, i1 false, i1 false, i1 false, i1 undef>, <8 x i16> %a, <8 x i16> %b
@@ -217,12 +217,12 @@ define <8 x i16> @test12(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @test13(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: test13:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test13:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps %xmm1, %xmm0
; AVX-NEXT: retq
%1 = select <8 x i1> <i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef>, <8 x i16> %a, <8 x i16> %b
@@ -232,11 +232,11 @@ define <8 x i16> @test13(<8 x i16> %a, <8 x i16> %b) {
; Fold (vselect (build_vector AllOnes), N1, N2) -> N1
define <4 x float> @test14(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: test14:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: retq
;
; AVX-LABEL: test14:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: retq
%1 = select <4 x i1> <i1 true, i1 undef, i1 true, i1 undef>, <4 x float> %a, <4 x float> %b
ret <4 x float> %1
@@ -244,11 +244,11 @@ define <4 x float> @test14(<4 x float> %a, <4 x float> %b) {
define <8 x i16> @test15(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: test15:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: retq
;
; AVX-LABEL: test15:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: retq
%1 = select <8 x i1> <i1 true, i1 true, i1 true, i1 undef, i1 undef, i1 true, i1 true, i1 undef>, <8 x i16> %a, <8 x i16> %b
ret <8 x i16> %1
@@ -257,12 +257,12 @@ define <8 x i16> @test15(<8 x i16> %a, <8 x i16> %b) {
; Fold (vselect (build_vector AllZeros), N1, N2) -> N2
define <4 x float> @test16(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: test16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps %xmm1, %xmm0
; AVX-NEXT: retq
%1 = select <4 x i1> <i1 false, i1 undef, i1 false, i1 undef>, <4 x float> %a, <4 x float> %b
@@ -271,12 +271,12 @@ define <4 x float> @test16(<4 x float> %a, <4 x float> %b) {
define <8 x i16> @test17(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: test17:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test17:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps %xmm1, %xmm0
; AVX-NEXT: retq
%1 = select <8 x i1> <i1 false, i1 false, i1 false, i1 undef, i1 undef, i1 false, i1 false, i1 undef>, <8 x i16> %a, <8 x i16> %b
@@ -285,17 +285,17 @@ define <8 x i16> @test17(<8 x i16> %a, <8 x i16> %b) {
define <4 x float> @test18(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: test18:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; SSE2-NEXT: retq
;
; SSE41-LABEL: test18:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; SSE41-NEXT: retq
;
; AVX-LABEL: test18:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; AVX-NEXT: retq
%1 = select <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x float> %a, <4 x float> %b
@@ -304,22 +304,22 @@ define <4 x float> @test18(<4 x float> %a, <4 x float> %b) {
define <4 x i32> @test19(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: test19:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; SSE2-NEXT: retq
;
; SSE41-LABEL: test19:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: test19:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: test19:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; AVX2-NEXT: retq
%1 = select <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x i32> %a, <4 x i32> %b
@@ -328,17 +328,17 @@ define <4 x i32> @test19(<4 x i32> %a, <4 x i32> %b) {
define <2 x double> @test20(<2 x double> %a, <2 x double> %b) {
; SSE2-LABEL: test20:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq
;
; SSE41-LABEL: test20:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE41-NEXT: retq
;
; AVX-LABEL: test20:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; AVX-NEXT: retq
%1 = select <2 x i1> <i1 false, i1 true>, <2 x double> %a, <2 x double> %b
@@ -347,22 +347,22 @@ define <2 x double> @test20(<2 x double> %a, <2 x double> %b) {
define <2 x i64> @test21(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: test21:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq
;
; SSE41-LABEL: test21:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: test21:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: test21:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
; AVX2-NEXT: retq
%1 = select <2 x i1> <i1 false, i1 true>, <2 x i64> %a, <2 x i64> %b
@@ -371,18 +371,18 @@ define <2 x i64> @test21(<2 x i64> %a, <2 x i64> %b) {
define <4 x float> @test22(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: test22:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test22:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; SSE41-NEXT: retq
;
; AVX-LABEL: test22:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX-NEXT: retq
%1 = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x float> %a, <4 x float> %b
@@ -391,23 +391,23 @@ define <4 x float> @test22(<4 x float> %a, <4 x float> %b) {
define <4 x i32> @test23(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: test23:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test23:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: test23:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: test23:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX2-NEXT: retq
%1 = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i32> %a, <4 x i32> %b
@@ -416,18 +416,18 @@ define <4 x i32> @test23(<4 x i32> %a, <4 x i32> %b) {
define <2 x double> @test24(<2 x double> %a, <2 x double> %b) {
; SSE2-LABEL: test24:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE2-NEXT: movapd %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test24:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; SSE41-NEXT: retq
;
; AVX-LABEL: test24:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; AVX-NEXT: retq
%1 = select <2 x i1> <i1 true, i1 false>, <2 x double> %a, <2 x double> %b
@@ -436,23 +436,23 @@ define <2 x double> @test24(<2 x double> %a, <2 x double> %b) {
define <2 x i64> @test25(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: test25:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE2-NEXT: movapd %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test25:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: test25:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: test25:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
; AVX2-NEXT: retq
%1 = select <2 x i1> <i1 true, i1 false>, <2 x i64> %a, <2 x i64> %b
@@ -461,14 +461,14 @@ define <2 x i64> @test25(<2 x i64> %a, <2 x i64> %b) {
define <4 x float> @select_of_shuffles_0(<2 x float> %a0, <2 x float> %b0, <2 x float> %a1, <2 x float> %b1) {
; SSE-LABEL: select_of_shuffles_0:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm3[0]
; SSE-NEXT: subps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: select_of_shuffles_0:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; AVX-NEXT: vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm3[0]
; AVX-NEXT: vsubps %xmm1, %xmm0, %xmm0
@@ -486,7 +486,7 @@ define <4 x float> @select_of_shuffles_0(<2 x float> %a0, <2 x float> %b0, <2 x
; PR20677
define <16 x double> @select_illegal(<16 x double> %a, <16 x double> %b) {
; SSE-LABEL: select_illegal:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm4
; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm5
; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm6
@@ -503,7 +503,7 @@ define <16 x double> @select_illegal(<16 x double> %a, <16 x double> %b) {
; SSE-NEXT: retq
;
; AVX-LABEL: select_illegal:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps %ymm6, %ymm2
; AVX-NEXT: vmovaps %ymm7, %ymm3
; AVX-NEXT: retq
diff --git a/test/CodeGen/X86/vshift-1.ll b/test/CodeGen/X86/vshift-1.ll
index a31adc33790..a2e1e7a641c 100644
--- a/test/CodeGen/X86/vshift-1.ll
+++ b/test/CodeGen/X86/vshift-1.ll
@@ -7,14 +7,14 @@
define void @shift1a(<2 x i64> %val, <2 x i64>* %dst) nounwind {
; X32-LABEL: shift1a:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: psllq $32, %xmm0
; X32-NEXT: movdqa %xmm0, (%eax)
; X32-NEXT: retl
;
; X64-LABEL: shift1a:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: psllq $32, %xmm0
; X64-NEXT: movdqa %xmm0, (%rdi)
; X64-NEXT: retq
@@ -26,7 +26,7 @@ entry:
define void @shift1b(<2 x i64> %val, <2 x i64>* %dst, i64 %amt) nounwind {
; X32-LABEL: shift1b:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; X32-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
@@ -35,7 +35,7 @@ define void @shift1b(<2 x i64> %val, <2 x i64>* %dst, i64 %amt) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: shift1b:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq %rsi, %xmm1
; X64-NEXT: psllq %xmm1, %xmm0
; X64-NEXT: movdqa %xmm0, (%rdi)
@@ -51,14 +51,14 @@ entry:
define void @shift2a(<4 x i32> %val, <4 x i32>* %dst) nounwind {
; X32-LABEL: shift2a:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: pslld $5, %xmm0
; X32-NEXT: movdqa %xmm0, (%eax)
; X32-NEXT: retl
;
; X64-LABEL: shift2a:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: pslld $5, %xmm0
; X64-NEXT: movdqa %xmm0, (%rdi)
; X64-NEXT: retq
@@ -70,7 +70,7 @@ entry:
define void @shift2b(<4 x i32> %val, <4 x i32>* %dst, i32 %amt) nounwind {
; X32-LABEL: shift2b:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: pslld %xmm1, %xmm0
@@ -78,7 +78,7 @@ define void @shift2b(<4 x i32> %val, <4 x i32>* %dst, i32 %amt) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: shift2b:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movd %esi, %xmm1
; X64-NEXT: pslld %xmm1, %xmm0
; X64-NEXT: movdqa %xmm0, (%rdi)
@@ -95,14 +95,14 @@ entry:
define void @shift3a(<8 x i16> %val, <8 x i16>* %dst) nounwind {
; X32-LABEL: shift3a:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: psllw $5, %xmm0
; X32-NEXT: movdqa %xmm0, (%eax)
; X32-NEXT: retl
;
; X64-LABEL: shift3a:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: psllw $5, %xmm0
; X64-NEXT: movdqa %xmm0, (%rdi)
; X64-NEXT: retq
@@ -115,7 +115,7 @@ entry:
; Make sure the shift amount is properly zero extended.
define void @shift3b(<8 x i16> %val, <8 x i16>* %dst, i16 %amt) nounwind {
; X32-LABEL: shift3b:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movd %ecx, %xmm1
@@ -124,7 +124,7 @@ define void @shift3b(<8 x i16> %val, <8 x i16>* %dst, i16 %amt) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: shift3b:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movzwl %si, %eax
; X64-NEXT: movd %eax, %xmm1
; X64-NEXT: psllw %xmm1, %xmm0
diff --git a/test/CodeGen/X86/vshift-2.ll b/test/CodeGen/X86/vshift-2.ll
index a381637b40a..6b01a8acdf4 100644
--- a/test/CodeGen/X86/vshift-2.ll
+++ b/test/CodeGen/X86/vshift-2.ll
@@ -7,14 +7,14 @@
define void @shift1a(<2 x i64> %val, <2 x i64>* %dst) nounwind {
; X32-LABEL: shift1a:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: psrlq $32, %xmm0
; X32-NEXT: movdqa %xmm0, (%eax)
; X32-NEXT: retl
;
; X64-LABEL: shift1a:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: psrlq $32, %xmm0
; X64-NEXT: movdqa %xmm0, (%rdi)
; X64-NEXT: retq
@@ -26,7 +26,7 @@ entry:
define void @shift1b(<2 x i64> %val, <2 x i64>* %dst, i64 %amt) nounwind {
; X32-LABEL: shift1b:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; X32-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
@@ -35,7 +35,7 @@ define void @shift1b(<2 x i64> %val, <2 x i64>* %dst, i64 %amt) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: shift1b:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq %rsi, %xmm1
; X64-NEXT: psrlq %xmm1, %xmm0
; X64-NEXT: movdqa %xmm0, (%rdi)
@@ -50,14 +50,14 @@ entry:
define void @shift2a(<4 x i32> %val, <4 x i32>* %dst) nounwind {
; X32-LABEL: shift2a:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: psrld $17, %xmm0
; X32-NEXT: movdqa %xmm0, (%eax)
; X32-NEXT: retl
;
; X64-LABEL: shift2a:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: psrld $17, %xmm0
; X64-NEXT: movdqa %xmm0, (%rdi)
; X64-NEXT: retq
@@ -69,7 +69,7 @@ entry:
define void @shift2b(<4 x i32> %val, <4 x i32>* %dst, i32 %amt) nounwind {
; X32-LABEL: shift2b:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: psrld %xmm1, %xmm0
@@ -77,7 +77,7 @@ define void @shift2b(<4 x i32> %val, <4 x i32>* %dst, i32 %amt) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: shift2b:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movd %esi, %xmm1
; X64-NEXT: psrld %xmm1, %xmm0
; X64-NEXT: movdqa %xmm0, (%rdi)
@@ -95,14 +95,14 @@ entry:
define void @shift3a(<8 x i16> %val, <8 x i16>* %dst) nounwind {
; X32-LABEL: shift3a:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: psrlw $5, %xmm0
; X32-NEXT: movdqa %xmm0, (%eax)
; X32-NEXT: retl
;
; X64-LABEL: shift3a:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: psrlw $5, %xmm0
; X64-NEXT: movdqa %xmm0, (%rdi)
; X64-NEXT: retq
@@ -115,7 +115,7 @@ entry:
; properly zero extend the shift amount
define void @shift3b(<8 x i16> %val, <8 x i16>* %dst, i16 %amt) nounwind {
; X32-LABEL: shift3b:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movd %ecx, %xmm1
@@ -124,7 +124,7 @@ define void @shift3b(<8 x i16> %val, <8 x i16>* %dst, i16 %amt) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: shift3b:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movzwl %si, %eax
; X64-NEXT: movd %eax, %xmm1
; X64-NEXT: psrlw %xmm1, %xmm0
diff --git a/test/CodeGen/X86/vshift-3.ll b/test/CodeGen/X86/vshift-3.ll
index c59dacec6e3..57261ab8a55 100644
--- a/test/CodeGen/X86/vshift-3.ll
+++ b/test/CodeGen/X86/vshift-3.ll
@@ -9,7 +9,7 @@
define void @shift1a(<2 x i64> %val, <2 x i64>* %dst) nounwind {
; X32-LABEL: shift1a:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
; X32-NEXT: psrad $31, %xmm0
@@ -19,7 +19,7 @@ define void @shift1a(<2 x i64> %val, <2 x i64>* %dst) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: shift1a:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
; X64-NEXT: psrad $31, %xmm0
; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
@@ -34,14 +34,14 @@ entry:
define void @shift2a(<4 x i32> %val, <4 x i32>* %dst) nounwind {
; X32-LABEL: shift2a:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: psrad $5, %xmm0
; X32-NEXT: movdqa %xmm0, (%eax)
; X32-NEXT: retl
;
; X64-LABEL: shift2a:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: psrad $5, %xmm0
; X64-NEXT: movdqa %xmm0, (%rdi)
; X64-NEXT: retq
@@ -53,7 +53,7 @@ entry:
define void @shift2b(<4 x i32> %val, <4 x i32>* %dst, i32 %amt) nounwind {
; X32-LABEL: shift2b:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: psrad %xmm1, %xmm0
@@ -61,7 +61,7 @@ define void @shift2b(<4 x i32> %val, <4 x i32>* %dst, i32 %amt) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: shift2b:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movd %esi, %xmm1
; X64-NEXT: psrad %xmm1, %xmm0
; X64-NEXT: movdqa %xmm0, (%rdi)
@@ -78,14 +78,14 @@ entry:
define void @shift3a(<8 x i16> %val, <8 x i16>* %dst) nounwind {
; X32-LABEL: shift3a:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: psraw $5, %xmm0
; X32-NEXT: movdqa %xmm0, (%eax)
; X32-NEXT: retl
;
; X64-LABEL: shift3a:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: psraw $5, %xmm0
; X64-NEXT: movdqa %xmm0, (%rdi)
; X64-NEXT: retq
@@ -97,7 +97,7 @@ entry:
define void @shift3b(<8 x i16> %val, <8 x i16>* %dst, i16 %amt) nounwind {
; X32-LABEL: shift3b:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movd %ecx, %xmm1
@@ -106,7 +106,7 @@ define void @shift3b(<8 x i16> %val, <8 x i16>* %dst, i16 %amt) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: shift3b:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movzwl %si, %eax
; X64-NEXT: movd %eax, %xmm1
; X64-NEXT: psraw %xmm1, %xmm0
diff --git a/test/CodeGen/X86/vshift-4.ll b/test/CodeGen/X86/vshift-4.ll
index a47f5641972..a49d6f38449 100644
--- a/test/CodeGen/X86/vshift-4.ll
+++ b/test/CodeGen/X86/vshift-4.ll
@@ -7,14 +7,14 @@
define void @shift1a(<2 x i64> %val, <2 x i64>* %dst, <2 x i64> %sh) nounwind {
; X32-LABEL: shift1a:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: psllq %xmm1, %xmm0
; X32-NEXT: movdqa %xmm0, (%eax)
; X32-NEXT: retl
;
; X64-LABEL: shift1a:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: psllq %xmm1, %xmm0
; X64-NEXT: movdqa %xmm0, (%rdi)
; X64-NEXT: retq
@@ -28,7 +28,7 @@ entry:
; shift1b can't use a packed shift but can shift lanes separately and shuffle back together
define void @shift1b(<2 x i64> %val, <2 x i64>* %dst, <2 x i64> %sh) nounwind {
; X32-LABEL: shift1b:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movdqa %xmm0, %xmm2
; X32-NEXT: psllq %xmm1, %xmm2
@@ -39,7 +39,7 @@ define void @shift1b(<2 x i64> %val, <2 x i64>* %dst, <2 x i64> %sh) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: shift1b:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movdqa %xmm0, %xmm2
; X64-NEXT: psllq %xmm1, %xmm2
; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
@@ -56,7 +56,7 @@ entry:
define void @shift2a(<4 x i32> %val, <4 x i32>* %dst, <2 x i32> %amt) nounwind {
; X32-LABEL: shift2a:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
; X32-NEXT: xorps %xmm2, %xmm2
@@ -66,7 +66,7 @@ define void @shift2a(<4 x i32> %val, <4 x i32>* %dst, <2 x i32> %amt) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: shift2a:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
; X64-NEXT: xorps %xmm2, %xmm2
; X64-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
@@ -82,7 +82,7 @@ entry:
define void @shift2b(<4 x i32> %val, <4 x i32>* %dst, <2 x i32> %amt) nounwind {
; X32-LABEL: shift2b:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
; X32-NEXT: xorps %xmm2, %xmm2
@@ -92,7 +92,7 @@ define void @shift2b(<4 x i32> %val, <4 x i32>* %dst, <2 x i32> %amt) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: shift2b:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
; X64-NEXT: xorps %xmm2, %xmm2
; X64-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
@@ -108,7 +108,7 @@ entry:
define void @shift2c(<4 x i32> %val, <4 x i32>* %dst, <2 x i32> %amt) nounwind {
; X32-LABEL: shift2c:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
; X32-NEXT: xorps %xmm2, %xmm2
@@ -118,7 +118,7 @@ define void @shift2c(<4 x i32> %val, <4 x i32>* %dst, <2 x i32> %amt) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: shift2c:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
; X64-NEXT: xorps %xmm2, %xmm2
; X64-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
@@ -134,7 +134,7 @@ entry:
define void @shift3a(<8 x i16> %val, <8 x i16>* %dst, <8 x i16> %amt) nounwind {
; X32-LABEL: shift3a:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: pextrw $6, %xmm1, %ecx
; X32-NEXT: movd %ecx, %xmm1
@@ -143,7 +143,7 @@ define void @shift3a(<8 x i16> %val, <8 x i16>* %dst, <8 x i16> %amt) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: shift3a:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: pextrw $6, %xmm1, %eax
; X64-NEXT: movd %eax, %xmm1
; X64-NEXT: psllw %xmm1, %xmm0
@@ -158,7 +158,7 @@ entry:
define void @shift3b(<8 x i16> %val, <8 x i16>* %dst, i16 %amt) nounwind {
; X32-LABEL: shift3b:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movd %ecx, %xmm1
@@ -167,7 +167,7 @@ define void @shift3b(<8 x i16> %val, <8 x i16>* %dst, i16 %amt) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: shift3b:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movzwl %si, %eax
; X64-NEXT: movd %eax, %xmm1
; X64-NEXT: psllw %xmm1, %xmm0
diff --git a/test/CodeGen/X86/vshift-5.ll b/test/CodeGen/X86/vshift-5.ll
index 38b391b6439..0fe0f8a5e22 100644
--- a/test/CodeGen/X86/vshift-5.ll
+++ b/test/CodeGen/X86/vshift-5.ll
@@ -6,7 +6,7 @@
define void @shift5a(<4 x i32> %val, <4 x i32>* %dst, i32* %pamt) nounwind {
; X32-LABEL: shift5a:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -15,7 +15,7 @@ define void @shift5a(<4 x i32> %val, <4 x i32>* %dst, i32* %pamt) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: shift5a:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X64-NEXT: pslld %xmm1, %xmm0
; X64-NEXT: movdqa %xmm0, (%rdi)
@@ -32,7 +32,7 @@ entry:
define void @shift5b(<4 x i32> %val, <4 x i32>* %dst, i32* %pamt) nounwind {
; X32-LABEL: shift5b:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -41,7 +41,7 @@ define void @shift5b(<4 x i32> %val, <4 x i32>* %dst, i32* %pamt) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: shift5b:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X64-NEXT: psrad %xmm1, %xmm0
; X64-NEXT: movdqa %xmm0, (%rdi)
@@ -58,7 +58,7 @@ entry:
define void @shift5c(<4 x i32> %val, <4 x i32>* %dst, i32 %amt) nounwind {
; X32-LABEL: shift5c:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: pslld %xmm1, %xmm0
@@ -66,7 +66,7 @@ define void @shift5c(<4 x i32> %val, <4 x i32>* %dst, i32 %amt) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: shift5c:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movd %esi, %xmm1
; X64-NEXT: pslld %xmm1, %xmm0
; X64-NEXT: movdqa %xmm0, (%rdi)
@@ -82,7 +82,7 @@ entry:
define void @shift5d(<4 x i32> %val, <4 x i32>* %dst, i32 %amt) nounwind {
; X32-LABEL: shift5d:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: psrad %xmm1, %xmm0
@@ -90,7 +90,7 @@ define void @shift5d(<4 x i32> %val, <4 x i32>* %dst, i32 %amt) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: shift5d:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movd %esi, %xmm1
; X64-NEXT: psrad %xmm1, %xmm0
; X64-NEXT: movdqa %xmm0, (%rdi)
diff --git a/test/CodeGen/X86/vshift-6.ll b/test/CodeGen/X86/vshift-6.ll
index 36d428cb9cf..5cfa38ab833 100644
--- a/test/CodeGen/X86/vshift-6.ll
+++ b/test/CodeGen/X86/vshift-6.ll
@@ -26,7 +26,7 @@
define <16 x i8> @do_not_crash(i8*, i32*, i64*, i32, i64, i8) {
; X32-LABEL: do_not_crash:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movb %al, (%ecx)
@@ -63,7 +63,7 @@ define <16 x i8> @do_not_crash(i8*, i32*, i64*, i32, i64, i8) {
; X32-NEXT: retl
;
; X64-LABEL: do_not_crash:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movb %r9b, (%rdi)
; X64-NEXT: movd %r9d, %xmm0
; X64-NEXT: psllq $56, %xmm0
diff --git a/test/CodeGen/X86/vsplit-and.ll b/test/CodeGen/X86/vsplit-and.ll
index e7ff6639110..26bbcdbe5d9 100644
--- a/test/CodeGen/X86/vsplit-and.ll
+++ b/test/CodeGen/X86/vsplit-and.ll
@@ -3,7 +3,7 @@
define void @t0(<2 x i64>* %dst, <2 x i64> %src1, <2 x i64> %src2) nounwind readonly {
; CHECK-LABEL: t0:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pxor %xmm2, %xmm2
; CHECK-NEXT: pcmpeqq %xmm2, %xmm0
; CHECK-NEXT: pcmpeqq %xmm2, %xmm1
@@ -22,7 +22,7 @@ define void @t0(<2 x i64>* %dst, <2 x i64> %src1, <2 x i64> %src2) nounwind read
define void @t2(<3 x i64>* %dst, <3 x i64> %src1, <3 x i64> %src2) nounwind readonly {
; CHECK-LABEL: t2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movq %r9, %xmm1
; CHECK-NEXT: movq %r8, %xmm0
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
diff --git a/test/CodeGen/X86/vzero-excess.ll b/test/CodeGen/X86/vzero-excess.ll
index 9ddafec6518..62525ec580f 100644
--- a/test/CodeGen/X86/vzero-excess.ll
+++ b/test/CodeGen/X86/vzero-excess.ll
@@ -6,7 +6,7 @@
define <4 x float> @zeroupper_v4f32(<8 x float> *%x, <8 x float> %y) nounwind {
; CHECK-LABEL: zeroupper_v4f32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pushq %rbx
; CHECK-NEXT: subq $48, %rsp
; CHECK-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
@@ -33,7 +33,7 @@ define <4 x float> @zeroupper_v4f32(<8 x float> *%x, <8 x float> %y) nounwind {
define <8 x float> @zeroupper_v8f32(<8 x float> %x) nounwind {
; CHECK-LABEL: zeroupper_v8f32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: subq $56, %rsp
; CHECK-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
; CHECK-NEXT: vzeroupper
@@ -48,7 +48,7 @@ define <8 x float> @zeroupper_v8f32(<8 x float> %x) nounwind {
define <4 x float> @zeroall_v4f32(<8 x float> *%x, <8 x float> %y) nounwind {
; CHECK-LABEL: zeroall_v4f32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pushq %rbx
; CHECK-NEXT: subq $48, %rsp
; CHECK-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
@@ -75,7 +75,7 @@ define <4 x float> @zeroall_v4f32(<8 x float> *%x, <8 x float> %y) nounwind {
define <8 x float> @zeroall_v8f32(<8 x float> %x) nounwind {
; CHECK-LABEL: zeroall_v8f32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: subq $56, %rsp
; CHECK-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
; CHECK-NEXT: vzeroall
diff --git a/test/CodeGen/X86/wide-fma-contraction.ll b/test/CodeGen/X86/wide-fma-contraction.ll
index b9976155fcf..3ee09dd8f80 100644
--- a/test/CodeGen/X86/wide-fma-contraction.ll
+++ b/test/CodeGen/X86/wide-fma-contraction.ll
@@ -6,7 +6,7 @@
; CHECK-NOFMA-LABEL: fmafunc
define <16 x float> @fmafunc(<16 x float> %a, <16 x float> %b, <16 x float> %c) {
; CHECK-LABEL: fmafunc:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pushl %ebp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: .cfi_offset %ebp, -8
@@ -21,7 +21,7 @@ define <16 x float> @fmafunc(<16 x float> %a, <16 x float> %b, <16 x float> %c)
; CHECK-NEXT: retl
;
; CHECK-NOFMA-LABEL: fmafunc:
-; CHECK-NOFMA: ## BB#0:
+; CHECK-NOFMA: ## %bb.0:
; CHECK-NOFMA-NEXT: pushl %ebp
; CHECK-NOFMA-NEXT: .cfi_def_cfa_offset 8
; CHECK-NOFMA-NEXT: .cfi_offset %ebp, -8
diff --git a/test/CodeGen/X86/wide-integer-cmp.ll b/test/CodeGen/X86/wide-integer-cmp.ll
index 97460b36a74..e7956c65345 100644
--- a/test/CodeGen/X86/wide-integer-cmp.ll
+++ b/test/CodeGen/X86/wide-integer-cmp.ll
@@ -3,14 +3,14 @@
define i32 @branch_eq(i64 %a, i64 %b) {
; CHECK-LABEL: branch_eq:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: xorl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: xorl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: orl %ecx, %eax
; CHECK-NEXT: jne .LBB0_2
-; CHECK-NEXT: # BB#1: # %bb1
+; CHECK-NEXT: # %bb.1: # %bb1
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: retl
; CHECK-NEXT: .LBB0_2: # %bb2
@@ -27,13 +27,13 @@ bb2:
define i32 @branch_slt(i64 %a, i64 %b) {
; CHECK-LABEL: branch_slt:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: sbbl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: jge .LBB1_2
-; CHECK-NEXT: # BB#1: # %bb1
+; CHECK-NEXT: # %bb.1: # %bb1
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: retl
; CHECK-NEXT: .LBB1_2: # %bb2
@@ -50,13 +50,13 @@ bb2:
define i32 @branch_ule(i64 %a, i64 %b) {
; CHECK-LABEL: branch_ule:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: sbbl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: jb .LBB2_2
-; CHECK-NEXT: # BB#1: # %bb1
+; CHECK-NEXT: # %bb.1: # %bb1
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: retl
; CHECK-NEXT: .LBB2_2: # %bb2
@@ -73,7 +73,7 @@ bb2:
define i32 @set_gt(i64 %a, i64 %b) {
; CHECK-LABEL: set_gt:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: cmpl {{[0-9]+}}(%esp), %eax
@@ -89,7 +89,7 @@ entry:
define i32 @test_wide(i128 %a, i128 %b) {
; CHECK-LABEL: test_wide:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushl %esi
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: .cfi_offset %esi, -8
@@ -102,7 +102,7 @@ define i32 @test_wide(i128 %a, i128 %b) {
; CHECK-NEXT: sbbl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: sbbl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: jge .LBB4_2
-; CHECK-NEXT: # BB#1: # %bb1
+; CHECK-NEXT: # %bb.1: # %bb1
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: popl %esi
; CHECK-NEXT: retl
@@ -123,11 +123,11 @@ bb2:
; sure the code can handle that.
define i32 @test_carry_false(i64 %a, i64 %b) {
; CHECK-LABEL: test_carry_false:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: jge .LBB5_2
-; CHECK-NEXT: # BB#1: # %bb1
+; CHECK-NEXT: # %bb.1: # %bb1
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: retl
; CHECK-NEXT: .LBB5_2: # %bb2
diff --git a/test/CodeGen/X86/widen_arith-1.ll b/test/CodeGen/X86/widen_arith-1.ll
index a1e9b53638c..d6607e8b98f 100644
--- a/test/CodeGen/X86/widen_arith-1.ll
+++ b/test/CodeGen/X86/widen_arith-1.ll
@@ -3,7 +3,7 @@
define void @update(<3 x i8>* %dst, <3 x i8>* %src, i32 %n) nounwind {
; CHECK-LABEL: update:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subl $12, %esp
; CHECK-NEXT: movl $0, (%esp)
; CHECK-NEXT: pcmpeqd %xmm0, %xmm0
@@ -26,7 +26,7 @@ define void @update(<3 x i8>* %dst, <3 x i8>* %src, i32 %n) nounwind {
; CHECK-NEXT: movl (%esp), %eax
; CHECK-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: jl .LBB0_2
-; CHECK-NEXT: # BB#3: # %afterfor
+; CHECK-NEXT: # %bb.3: # %afterfor
; CHECK-NEXT: addl $12, %esp
; CHECK-NEXT: retl
entry:
diff --git a/test/CodeGen/X86/widen_arith-2.ll b/test/CodeGen/X86/widen_arith-2.ll
index ec1ecb41f18..aa2573f9b2c 100644
--- a/test/CodeGen/X86/widen_arith-2.ll
+++ b/test/CodeGen/X86/widen_arith-2.ll
@@ -5,7 +5,7 @@
define void @update(i64* %dst_i, i64* %src_i, i32 %n) nounwind {
; CHECK-LABEL: update:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subl $12, %esp
; CHECK-NEXT: movl $0, (%esp)
; CHECK-NEXT: pcmpeqd %xmm0, %xmm0
@@ -32,7 +32,7 @@ define void @update(i64* %dst_i, i64* %src_i, i32 %n) nounwind {
; CHECK-NEXT: movl (%esp), %eax
; CHECK-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: jl .LBB0_2
-; CHECK-NEXT: # BB#3: # %afterfor
+; CHECK-NEXT: # %bb.3: # %afterfor
; CHECK-NEXT: addl $12, %esp
; CHECK-NEXT: retl
entry:
diff --git a/test/CodeGen/X86/widen_arith-3.ll b/test/CodeGen/X86/widen_arith-3.ll
index d53e8285922..aa656de2342 100644
--- a/test/CodeGen/X86/widen_arith-3.ll
+++ b/test/CodeGen/X86/widen_arith-3.ll
@@ -8,7 +8,7 @@
define void @update(<3 x i16>* %dst, <3 x i16>* %src, i32 %n) nounwind {
; CHECK-LABEL: update:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushl %ebp
; CHECK-NEXT: movl %esp, %ebp
; CHECK-NEXT: andl $-8, %esp
@@ -39,7 +39,7 @@ define void @update(<3 x i16>* %dst, <3 x i16>* %src, i32 %n) nounwind {
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: cmpl 16(%ebp), %eax
; CHECK-NEXT: jl .LBB0_2
-; CHECK-NEXT: # BB#3: # %afterfor
+; CHECK-NEXT: # %bb.3: # %afterfor
; CHECK-NEXT: movl %ebp, %esp
; CHECK-NEXT: popl %ebp
; CHECK-NEXT: retl
diff --git a/test/CodeGen/X86/widen_arith-4.ll b/test/CodeGen/X86/widen_arith-4.ll
index 987c32009e3..e3e2b1d1fb5 100644
--- a/test/CodeGen/X86/widen_arith-4.ll
+++ b/test/CodeGen/X86/widen_arith-4.ll
@@ -5,7 +5,7 @@
define void @update(<5 x i16>* %dst, <5 x i16>* %src, i32 %n) nounwind {
; CHECK-LABEL: update:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movq %rsi, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movl %edx, -{{[0-9]+}}(%rsp)
@@ -34,7 +34,7 @@ define void @update(<5 x i16>* %dst, <5 x i16>* %src, i32 %n) nounwind {
; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %eax
; CHECK-NEXT: cmpl -{{[0-9]+}}(%rsp), %eax
; CHECK-NEXT: jl .LBB0_2
-; CHECK-NEXT: # BB#3: # %afterfor
+; CHECK-NEXT: # %bb.3: # %afterfor
; CHECK-NEXT: retq
entry:
%dst.addr = alloca <5 x i16>*
diff --git a/test/CodeGen/X86/widen_arith-5.ll b/test/CodeGen/X86/widen_arith-5.ll
index 005c2a41be4..b76895503ba 100644
--- a/test/CodeGen/X86/widen_arith-5.ll
+++ b/test/CodeGen/X86/widen_arith-5.ll
@@ -5,7 +5,7 @@
define void @update(<3 x i32>* %dst, <3 x i32>* %src, i32 %n) nounwind {
; CHECK-LABEL: update:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movq %rsi, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movl %edx, -{{[0-9]+}}(%rsp)
@@ -33,7 +33,7 @@ define void @update(<3 x i32>* %dst, <3 x i32>* %src, i32 %n) nounwind {
; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %eax
; CHECK-NEXT: cmpl -{{[0-9]+}}(%rsp), %eax
; CHECK-NEXT: jl .LBB0_2
-; CHECK-NEXT: # BB#3: # %afterfor
+; CHECK-NEXT: # %bb.3: # %afterfor
; CHECK-NEXT: retq
entry:
%dst.addr = alloca <3 x i32>*
diff --git a/test/CodeGen/X86/widen_arith-6.ll b/test/CodeGen/X86/widen_arith-6.ll
index 0421915154e..73b8f4ea276 100644
--- a/test/CodeGen/X86/widen_arith-6.ll
+++ b/test/CodeGen/X86/widen_arith-6.ll
@@ -5,7 +5,7 @@
define void @update(<3 x float>* %dst, <3 x float>* %src, i32 %n) nounwind {
; CHECK-LABEL: update:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushl %ebp
; CHECK-NEXT: movl %esp, %ebp
; CHECK-NEXT: andl $-16, %esp
@@ -35,7 +35,7 @@ define void @update(<3 x float>* %dst, <3 x float>* %src, i32 %n) nounwind {
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: cmpl 16(%ebp), %eax
; CHECK-NEXT: jl .LBB0_2
-; CHECK-NEXT: # BB#3: # %afterfor
+; CHECK-NEXT: # %bb.3: # %afterfor
; CHECK-NEXT: movl %ebp, %esp
; CHECK-NEXT: popl %ebp
; CHECK-NEXT: retl
diff --git a/test/CodeGen/X86/widen_bitops-0.ll b/test/CodeGen/X86/widen_bitops-0.ll
index ab27d497ebb..f8843fd8ce2 100644
--- a/test/CodeGen/X86/widen_bitops-0.ll
+++ b/test/CodeGen/X86/widen_bitops-0.ll
@@ -8,13 +8,13 @@
define i24 @and_i24_as_v3i8(i24 %a, i24 %b) nounwind {
; X32-SSE-LABEL: and_i24_as_v3i8:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE-NEXT: andl {{[0-9]+}}(%esp), %eax
; X32-SSE-NEXT: retl
;
; X64-SSE-LABEL: and_i24_as_v3i8:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: andl %esi, %edi
; X64-SSE-NEXT: movl %edi, %eax
; X64-SSE-NEXT: retq
@@ -27,13 +27,13 @@ define i24 @and_i24_as_v3i8(i24 %a, i24 %b) nounwind {
define i24 @xor_i24_as_v3i8(i24 %a, i24 %b) nounwind {
; X32-SSE-LABEL: xor_i24_as_v3i8:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE-NEXT: xorl {{[0-9]+}}(%esp), %eax
; X32-SSE-NEXT: retl
;
; X64-SSE-LABEL: xor_i24_as_v3i8:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: xorl %esi, %edi
; X64-SSE-NEXT: movl %edi, %eax
; X64-SSE-NEXT: retq
@@ -46,13 +46,13 @@ define i24 @xor_i24_as_v3i8(i24 %a, i24 %b) nounwind {
define i24 @or_i24_as_v3i8(i24 %a, i24 %b) nounwind {
; X32-SSE-LABEL: or_i24_as_v3i8:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE-NEXT: orl {{[0-9]+}}(%esp), %eax
; X32-SSE-NEXT: retl
;
; X64-SSE-LABEL: or_i24_as_v3i8:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: orl %esi, %edi
; X64-SSE-NEXT: movl %edi, %eax
; X64-SSE-NEXT: retq
@@ -69,13 +69,13 @@ define i24 @or_i24_as_v3i8(i24 %a, i24 %b) nounwind {
define i24 @and_i24_as_v8i3(i24 %a, i24 %b) nounwind {
; X32-SSE-LABEL: and_i24_as_v8i3:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE-NEXT: andl {{[0-9]+}}(%esp), %eax
; X32-SSE-NEXT: retl
;
; X64-SSE-LABEL: and_i24_as_v8i3:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: andl %esi, %edi
; X64-SSE-NEXT: movl %edi, %eax
; X64-SSE-NEXT: retq
@@ -88,13 +88,13 @@ define i24 @and_i24_as_v8i3(i24 %a, i24 %b) nounwind {
define i24 @xor_i24_as_v8i3(i24 %a, i24 %b) nounwind {
; X32-SSE-LABEL: xor_i24_as_v8i3:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE-NEXT: xorl {{[0-9]+}}(%esp), %eax
; X32-SSE-NEXT: retl
;
; X64-SSE-LABEL: xor_i24_as_v8i3:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: xorl %esi, %edi
; X64-SSE-NEXT: movl %edi, %eax
; X64-SSE-NEXT: retq
@@ -107,13 +107,13 @@ define i24 @xor_i24_as_v8i3(i24 %a, i24 %b) nounwind {
define i24 @or_i24_as_v8i3(i24 %a, i24 %b) nounwind {
; X32-SSE-LABEL: or_i24_as_v8i3:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE-NEXT: orl {{[0-9]+}}(%esp), %eax
; X32-SSE-NEXT: retl
;
; X64-SSE-LABEL: or_i24_as_v8i3:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: orl %esi, %edi
; X64-SSE-NEXT: movl %edi, %eax
; X64-SSE-NEXT: retq
@@ -130,7 +130,7 @@ define i24 @or_i24_as_v8i3(i24 %a, i24 %b) nounwind {
define <3 x i8> @and_v3i8_as_i24(<3 x i8> %a, <3 x i8> %b) nounwind {
; X32-SSE-LABEL: and_v3i8_as_i24:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-SSE-NEXT: pinsrb $4, {{[0-9]+}}(%esp), %xmm0
; X32-SSE-NEXT: pinsrb $8, {{[0-9]+}}(%esp), %xmm0
@@ -147,7 +147,7 @@ define <3 x i8> @and_v3i8_as_i24(<3 x i8> %a, <3 x i8> %b) nounwind {
; X32-SSE-NEXT: retl
;
; X64-SSE-LABEL: and_v3i8_as_i24:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: movd %ecx, %xmm0
; X64-SSE-NEXT: pinsrd $1, %r8d, %xmm0
; X64-SSE-NEXT: pinsrd $2, %r9d, %xmm0
@@ -171,7 +171,7 @@ define <3 x i8> @and_v3i8_as_i24(<3 x i8> %a, <3 x i8> %b) nounwind {
define <3 x i8> @xor_v3i8_as_i24(<3 x i8> %a, <3 x i8> %b) nounwind {
; X32-SSE-LABEL: xor_v3i8_as_i24:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-SSE-NEXT: pinsrb $4, {{[0-9]+}}(%esp), %xmm0
; X32-SSE-NEXT: pinsrb $8, {{[0-9]+}}(%esp), %xmm0
@@ -188,7 +188,7 @@ define <3 x i8> @xor_v3i8_as_i24(<3 x i8> %a, <3 x i8> %b) nounwind {
; X32-SSE-NEXT: retl
;
; X64-SSE-LABEL: xor_v3i8_as_i24:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: movd %ecx, %xmm0
; X64-SSE-NEXT: pinsrd $1, %r8d, %xmm0
; X64-SSE-NEXT: pinsrd $2, %r9d, %xmm0
@@ -212,7 +212,7 @@ define <3 x i8> @xor_v3i8_as_i24(<3 x i8> %a, <3 x i8> %b) nounwind {
define <3 x i8> @or_v3i8_as_i24(<3 x i8> %a, <3 x i8> %b) nounwind {
; X32-SSE-LABEL: or_v3i8_as_i24:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-SSE-NEXT: pinsrb $4, {{[0-9]+}}(%esp), %xmm0
; X32-SSE-NEXT: pinsrb $8, {{[0-9]+}}(%esp), %xmm0
@@ -229,7 +229,7 @@ define <3 x i8> @or_v3i8_as_i24(<3 x i8> %a, <3 x i8> %b) nounwind {
; X32-SSE-NEXT: retl
;
; X64-SSE-LABEL: or_v3i8_as_i24:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: movd %ecx, %xmm0
; X64-SSE-NEXT: pinsrd $1, %r8d, %xmm0
; X64-SSE-NEXT: pinsrd $2, %r9d, %xmm0
@@ -257,12 +257,12 @@ define <3 x i8> @or_v3i8_as_i24(<3 x i8> %a, <3 x i8> %b) nounwind {
define <8 x i3> @and_v8i3_as_i24(<8 x i3> %a, <8 x i3> %b) nounwind {
; X32-SSE-LABEL: and_v8i3_as_i24:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: andps %xmm1, %xmm0
; X32-SSE-NEXT: retl
;
; X64-SSE-LABEL: and_v8i3_as_i24:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: andps %xmm1, %xmm0
; X64-SSE-NEXT: retq
%1 = bitcast <8 x i3> %a to i24
@@ -274,12 +274,12 @@ define <8 x i3> @and_v8i3_as_i24(<8 x i3> %a, <8 x i3> %b) nounwind {
define <8 x i3> @xor_v8i3_as_i24(<8 x i3> %a, <8 x i3> %b) nounwind {
; X32-SSE-LABEL: xor_v8i3_as_i24:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: xorps %xmm1, %xmm0
; X32-SSE-NEXT: retl
;
; X64-SSE-LABEL: xor_v8i3_as_i24:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: xorps %xmm1, %xmm0
; X64-SSE-NEXT: retq
%1 = bitcast <8 x i3> %a to i24
@@ -291,12 +291,12 @@ define <8 x i3> @xor_v8i3_as_i24(<8 x i3> %a, <8 x i3> %b) nounwind {
define <8 x i3> @or_v8i3_as_i24(<8 x i3> %a, <8 x i3> %b) nounwind {
; X32-SSE-LABEL: or_v8i3_as_i24:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: orps %xmm1, %xmm0
; X32-SSE-NEXT: retl
;
; X64-SSE-LABEL: or_v8i3_as_i24:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: orps %xmm1, %xmm0
; X64-SSE-NEXT: retq
%1 = bitcast <8 x i3> %a to i24
diff --git a/test/CodeGen/X86/widen_bitops-1.ll b/test/CodeGen/X86/widen_bitops-1.ll
index f2a6b22c2af..fa41b1643ff 100644
--- a/test/CodeGen/X86/widen_bitops-1.ll
+++ b/test/CodeGen/X86/widen_bitops-1.ll
@@ -8,13 +8,13 @@
define i32 @and_i32_as_v4i8(i32 %a, i32 %b) nounwind {
; X32-SSE-LABEL: and_i32_as_v4i8:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE-NEXT: andl {{[0-9]+}}(%esp), %eax
; X32-SSE-NEXT: retl
;
; X64-SSE-LABEL: and_i32_as_v4i8:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: andl %esi, %edi
; X64-SSE-NEXT: movl %edi, %eax
; X64-SSE-NEXT: retq
@@ -27,13 +27,13 @@ define i32 @and_i32_as_v4i8(i32 %a, i32 %b) nounwind {
define i32 @xor_i32_as_v4i8(i32 %a, i32 %b) nounwind {
; X32-SSE-LABEL: xor_i32_as_v4i8:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE-NEXT: xorl {{[0-9]+}}(%esp), %eax
; X32-SSE-NEXT: retl
;
; X64-SSE-LABEL: xor_i32_as_v4i8:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: xorl %esi, %edi
; X64-SSE-NEXT: movl %edi, %eax
; X64-SSE-NEXT: retq
@@ -46,13 +46,13 @@ define i32 @xor_i32_as_v4i8(i32 %a, i32 %b) nounwind {
define i32 @or_i32_as_v4i8(i32 %a, i32 %b) nounwind {
; X32-SSE-LABEL: or_i32_as_v4i8:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE-NEXT: orl {{[0-9]+}}(%esp), %eax
; X32-SSE-NEXT: retl
;
; X64-SSE-LABEL: or_i32_as_v4i8:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: orl %esi, %edi
; X64-SSE-NEXT: movl %edi, %eax
; X64-SSE-NEXT: retq
@@ -69,13 +69,13 @@ define i32 @or_i32_as_v4i8(i32 %a, i32 %b) nounwind {
define i32 @and_i32_as_v8i4(i32 %a, i32 %b) nounwind {
; X32-SSE-LABEL: and_i32_as_v8i4:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE-NEXT: andl {{[0-9]+}}(%esp), %eax
; X32-SSE-NEXT: retl
;
; X64-SSE-LABEL: and_i32_as_v8i4:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: andl %esi, %edi
; X64-SSE-NEXT: movl %edi, %eax
; X64-SSE-NEXT: retq
@@ -88,13 +88,13 @@ define i32 @and_i32_as_v8i4(i32 %a, i32 %b) nounwind {
define i32 @xor_i32_as_v8i4(i32 %a, i32 %b) nounwind {
; X32-SSE-LABEL: xor_i32_as_v8i4:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE-NEXT: xorl {{[0-9]+}}(%esp), %eax
; X32-SSE-NEXT: retl
;
; X64-SSE-LABEL: xor_i32_as_v8i4:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: xorl %esi, %edi
; X64-SSE-NEXT: movl %edi, %eax
; X64-SSE-NEXT: retq
@@ -107,13 +107,13 @@ define i32 @xor_i32_as_v8i4(i32 %a, i32 %b) nounwind {
define i32 @or_i32_as_v8i4(i32 %a, i32 %b) nounwind {
; X32-SSE-LABEL: or_i32_as_v8i4:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE-NEXT: orl {{[0-9]+}}(%esp), %eax
; X32-SSE-NEXT: retl
;
; X64-SSE-LABEL: or_i32_as_v8i4:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: orl %esi, %edi
; X64-SSE-NEXT: movl %edi, %eax
; X64-SSE-NEXT: retq
@@ -130,12 +130,12 @@ define i32 @or_i32_as_v8i4(i32 %a, i32 %b) nounwind {
define <4 x i8> @and_v4i8_as_i32(<4 x i8> %a, <4 x i8> %b) nounwind {
; X32-SSE-LABEL: and_v4i8_as_i32:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: andps %xmm1, %xmm0
; X32-SSE-NEXT: retl
;
; X64-SSE-LABEL: and_v4i8_as_i32:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: andps %xmm1, %xmm0
; X64-SSE-NEXT: retq
%1 = bitcast <4 x i8> %a to i32
@@ -147,12 +147,12 @@ define <4 x i8> @and_v4i8_as_i32(<4 x i8> %a, <4 x i8> %b) nounwind {
define <4 x i8> @xor_v4i8_as_i32(<4 x i8> %a, <4 x i8> %b) nounwind {
; X32-SSE-LABEL: xor_v4i8_as_i32:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: xorps %xmm1, %xmm0
; X32-SSE-NEXT: retl
;
; X64-SSE-LABEL: xor_v4i8_as_i32:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: xorps %xmm1, %xmm0
; X64-SSE-NEXT: retq
%1 = bitcast <4 x i8> %a to i32
@@ -164,12 +164,12 @@ define <4 x i8> @xor_v4i8_as_i32(<4 x i8> %a, <4 x i8> %b) nounwind {
define <4 x i8> @or_v4i8_as_i32(<4 x i8> %a, <4 x i8> %b) nounwind {
; X32-SSE-LABEL: or_v4i8_as_i32:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: orps %xmm1, %xmm0
; X32-SSE-NEXT: retl
;
; X64-SSE-LABEL: or_v4i8_as_i32:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: orps %xmm1, %xmm0
; X64-SSE-NEXT: retq
%1 = bitcast <4 x i8> %a to i32
@@ -185,12 +185,12 @@ define <4 x i8> @or_v4i8_as_i32(<4 x i8> %a, <4 x i8> %b) nounwind {
define <8 x i4> @and_v8i4_as_i32(<8 x i4> %a, <8 x i4> %b) nounwind {
; X32-SSE-LABEL: and_v8i4_as_i32:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: andps %xmm1, %xmm0
; X32-SSE-NEXT: retl
;
; X64-SSE-LABEL: and_v8i4_as_i32:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: andps %xmm1, %xmm0
; X64-SSE-NEXT: retq
%1 = bitcast <8 x i4> %a to i32
@@ -202,12 +202,12 @@ define <8 x i4> @and_v8i4_as_i32(<8 x i4> %a, <8 x i4> %b) nounwind {
define <8 x i4> @xor_v8i4_as_i32(<8 x i4> %a, <8 x i4> %b) nounwind {
; X32-SSE-LABEL: xor_v8i4_as_i32:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: xorps %xmm1, %xmm0
; X32-SSE-NEXT: retl
;
; X64-SSE-LABEL: xor_v8i4_as_i32:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: xorps %xmm1, %xmm0
; X64-SSE-NEXT: retq
%1 = bitcast <8 x i4> %a to i32
@@ -219,12 +219,12 @@ define <8 x i4> @xor_v8i4_as_i32(<8 x i4> %a, <8 x i4> %b) nounwind {
define <8 x i4> @or_v8i4_as_i32(<8 x i4> %a, <8 x i4> %b) nounwind {
; X32-SSE-LABEL: or_v8i4_as_i32:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: orps %xmm1, %xmm0
; X32-SSE-NEXT: retl
;
; X64-SSE-LABEL: or_v8i4_as_i32:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: orps %xmm1, %xmm0
; X64-SSE-NEXT: retq
%1 = bitcast <8 x i4> %a to i32
diff --git a/test/CodeGen/X86/widen_cast-1.ll b/test/CodeGen/X86/widen_cast-1.ll
index 65c8db155e3..41da54cf110 100644
--- a/test/CodeGen/X86/widen_cast-1.ll
+++ b/test/CodeGen/X86/widen_cast-1.ll
@@ -8,7 +8,7 @@
define void @convert(<2 x i32>* %dst, <4 x i16>* %src) nounwind {
; CHECK-LABEL: convert:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushl %eax
; CHECK-NEXT: movl $0, (%esp)
; CHECK-NEXT: movdqa {{.*#+}} xmm0 = [1,1,1,1]
@@ -33,7 +33,7 @@ define void @convert(<2 x i32>* %dst, <4 x i16>* %src) nounwind {
; CHECK-NEXT: retl
;
; ATOM-LABEL: convert:
-; ATOM: # BB#0: # %entry
+; ATOM: # %bb.0: # %entry
; ATOM-NEXT: pushl %eax
; ATOM-NEXT: movdqa {{.*#+}} xmm0 = [1,1,1,1]
; ATOM-NEXT: movdqa {{.*#+}} xmm1 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
diff --git a/test/CodeGen/X86/widen_cast-2.ll b/test/CodeGen/X86/widen_cast-2.ll
index 8caa962e4ec..03d4700c064 100644
--- a/test/CodeGen/X86/widen_cast-2.ll
+++ b/test/CodeGen/X86/widen_cast-2.ll
@@ -4,7 +4,7 @@
define void @convert(<7 x i32>* %dst, <14 x i16>* %src) nounwind {
; CHECK-LABEL: convert:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushl %eax
; CHECK-NEXT: movl $0, (%esp)
; CHECK-NEXT: pcmpeqd %xmm0, %xmm0
diff --git a/test/CodeGen/X86/widen_cast-3.ll b/test/CodeGen/X86/widen_cast-3.ll
index a50e199cd10..18a04c48a59 100644
--- a/test/CodeGen/X86/widen_cast-3.ll
+++ b/test/CodeGen/X86/widen_cast-3.ll
@@ -6,7 +6,7 @@
define void @convert(<12 x i8>* %dst.addr, <3 x i32> %src) nounwind {
; X86-LABEL: convert:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: pcmpeqd %xmm1, %xmm1
; X86-NEXT: psubd %xmm1, %xmm0
@@ -16,7 +16,7 @@ define void @convert(<12 x i8>* %dst.addr, <3 x i32> %src) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: convert:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pcmpeqd %xmm1, %xmm1
; X64-NEXT: psubd %xmm1, %xmm0
; X64-NEXT: pextrd $2, %xmm0, 8(%rdi)
diff --git a/test/CodeGen/X86/widen_cast-4.ll b/test/CodeGen/X86/widen_cast-4.ll
index 5c352124725..c3fa2f5454e 100644
--- a/test/CodeGen/X86/widen_cast-4.ll
+++ b/test/CodeGen/X86/widen_cast-4.ll
@@ -6,7 +6,7 @@
define void @update(i64* %dst_i, i64* %src_i, i32 %n) nounwind {
; NARROW-LABEL: update:
-; NARROW: # BB#0: # %entry
+; NARROW: # %bb.0: # %entry
; NARROW-NEXT: subl $12, %esp
; NARROW-NEXT: movl $0, (%esp)
; NARROW-NEXT: pcmpeqd %xmm0, %xmm0
@@ -35,12 +35,12 @@ define void @update(i64* %dst_i, i64* %src_i, i32 %n) nounwind {
; NARROW-NEXT: movl (%esp), %eax
; NARROW-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NARROW-NEXT: jl .LBB0_2
-; NARROW-NEXT: # BB#3: # %afterfor
+; NARROW-NEXT: # %bb.3: # %afterfor
; NARROW-NEXT: addl $12, %esp
; NARROW-NEXT: retl
;
; WIDE-LABEL: update:
-; WIDE: # BB#0: # %entry
+; WIDE: # %bb.0: # %entry
; WIDE-NEXT: subl $12, %esp
; WIDE-NEXT: movl $0, (%esp)
; WIDE-NEXT: pcmpeqd %xmm0, %xmm0
@@ -72,7 +72,7 @@ define void @update(i64* %dst_i, i64* %src_i, i32 %n) nounwind {
; WIDE-NEXT: movl (%esp), %eax
; WIDE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; WIDE-NEXT: jl .LBB0_2
-; WIDE-NEXT: # BB#3: # %afterfor
+; WIDE-NEXT: # %bb.3: # %afterfor
; WIDE-NEXT: addl $12, %esp
; WIDE-NEXT: retl
entry:
diff --git a/test/CodeGen/X86/widen_cast-5.ll b/test/CodeGen/X86/widen_cast-5.ll
index 986fa4743c6..b0363d02302 100644
--- a/test/CodeGen/X86/widen_cast-5.ll
+++ b/test/CodeGen/X86/widen_cast-5.ll
@@ -6,7 +6,7 @@
define void @convert(<2 x i32>* %dst.addr, i64 %src) nounwind {
; X86-LABEL: convert:
-; X86: ## BB#0: ## %entry
+; X86: ## %bb.0: ## %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
; X86-NEXT: pxor LCPI0_0, %xmm0
@@ -15,7 +15,7 @@ define void @convert(<2 x i32>* %dst.addr, i64 %src) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: convert:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: movq %rsi, %xmm0
; X64-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; X64-NEXT: pxor {{.*}}(%rip), %xmm0
diff --git a/test/CodeGen/X86/widen_cast-6.ll b/test/CodeGen/X86/widen_cast-6.ll
index 347d5245f76..c75c3597eae 100644
--- a/test/CodeGen/X86/widen_cast-6.ll
+++ b/test/CodeGen/X86/widen_cast-6.ll
@@ -6,14 +6,14 @@
define i32 @return_v2hi() nounwind {
; X86-LABEL: return_v2hi:
-; X86: ## BB#0: ## %entry
+; X86: ## %bb.0: ## %entry
; X86-NEXT: pushl %eax
; X86-NEXT: xorl %eax, %eax
; X86-NEXT: popl %ecx
; X86-NEXT: retl
;
; X64-LABEL: return_v2hi:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/widen_compare-1.ll b/test/CodeGen/X86/widen_compare-1.ll
index e8d993d2280..9c0fb0e7461 100644
--- a/test/CodeGen/X86/widen_compare-1.ll
+++ b/test/CodeGen/X86/widen_compare-1.ll
@@ -6,12 +6,12 @@
define <2 x i16> @compare_v2i64_to_v2i16(<2 x i16>* %src) nounwind {
; X86-LABEL: compare_v2i64_to_v2i16:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pcmpeqd %xmm0, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: compare_v2i64_to_v2i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pcmpeqd %xmm0, %xmm0
; X64-NEXT: retq
%val = load <2 x i16>, <2 x i16>* %src, align 4
diff --git a/test/CodeGen/X86/widen_conv-1.ll b/test/CodeGen/X86/widen_conv-1.ll
index c548fc2c77e..7e0f999bc10 100644
--- a/test/CodeGen/X86/widen_conv-1.ll
+++ b/test/CodeGen/X86/widen_conv-1.ll
@@ -6,7 +6,7 @@
define void @convert_v2i64_to_v2i32(<2 x i32>* %dst.addr, <2 x i64> %src) nounwind {
; X86-LABEL: convert_v2i64_to_v2i32:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0
; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -14,7 +14,7 @@ define void @convert_v2i64_to_v2i32(<2 x i32>* %dst.addr, <2 x i64> %src) nounwi
; X86-NEXT: retl
;
; X64-LABEL: convert_v2i64_to_v2i32:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: paddd {{.*}}(%rip), %xmm0
; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X64-NEXT: movq %xmm0, (%rdi)
@@ -30,7 +30,7 @@ entry:
define void @convert_v3i32_to_v3i8(<3 x i8>* %dst.addr, <3 x i32>* %src.addr) nounwind {
; X86-LABEL: convert_v3i32_to_v3i8:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
@@ -44,7 +44,7 @@ define void @convert_v3i32_to_v3i8(<3 x i8>* %dst.addr, <3 x i32>* %src.addr) no
; X86-NEXT: retl
;
; X64-LABEL: convert_v3i32_to_v3i8:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movdqa (%rsi), %xmm0
; X64-NEXT: pcmpeqd %xmm1, %xmm1
; X64-NEXT: psubd %xmm1, %xmm0
@@ -64,7 +64,7 @@ entry:
define void @convert_v5i16_to_v5i8(<5 x i8>* %dst.addr, <5 x i16>* %src.addr) nounwind {
; X86-LABEL: convert_v5i16_to_v5i8:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
@@ -82,7 +82,7 @@ define void @convert_v5i16_to_v5i8(<5 x i8>* %dst.addr, <5 x i16>* %src.addr) no
; X86-NEXT: retl
;
; X64-LABEL: convert_v5i16_to_v5i8:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movdqa (%rsi), %xmm0
; X64-NEXT: pcmpeqd %xmm1, %xmm1
; X64-NEXT: psubw %xmm1, %xmm0
diff --git a/test/CodeGen/X86/widen_conv-2.ll b/test/CodeGen/X86/widen_conv-2.ll
index 015b0faa982..3a39cbfba2e 100644
--- a/test/CodeGen/X86/widen_conv-2.ll
+++ b/test/CodeGen/X86/widen_conv-2.ll
@@ -6,7 +6,7 @@
define void @convert_v2i16_v2i32(<2 x i32>* %dst.addr, <2 x i16> %src) nounwind {
; X86-LABEL: convert_v2i16_v2i32:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: psllq $48, %xmm0
; X86-NEXT: psrad $16, %xmm0
@@ -15,7 +15,7 @@ define void @convert_v2i16_v2i32(<2 x i32>* %dst.addr, <2 x i16> %src) nounwind
; X86-NEXT: retl
;
; X64-LABEL: convert_v2i16_v2i32:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: psllq $48, %xmm0
; X64-NEXT: psrad $16, %xmm0
; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
diff --git a/test/CodeGen/X86/widen_conv-3.ll b/test/CodeGen/X86/widen_conv-3.ll
index 3b20f351571..186e43e213b 100644
--- a/test/CodeGen/X86/widen_conv-3.ll
+++ b/test/CodeGen/X86/widen_conv-3.ll
@@ -8,7 +8,7 @@
define void @convert_v2i16_to_v2f32(<2 x float>* %dst.addr, <2 x i16> %src) nounwind {
; X86-SSE2-LABEL: convert_v2i16_to_v2f32:
-; X86-SSE2: # BB#0: # %entry
+; X86-SSE2: # %bb.0: # %entry
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: psllq $48, %xmm0
; X86-SSE2-NEXT: psrad $16, %xmm0
@@ -20,7 +20,7 @@ define void @convert_v2i16_to_v2f32(<2 x float>* %dst.addr, <2 x i16> %src) noun
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: convert_v2i16_to_v2f32:
-; X86-SSE42: # BB#0: # %entry
+; X86-SSE42: # %bb.0: # %entry
; X86-SSE42-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE42-NEXT: psllq $48, %xmm0
; X86-SSE42-NEXT: psrad $16, %xmm0
@@ -31,7 +31,7 @@ define void @convert_v2i16_to_v2f32(<2 x float>* %dst.addr, <2 x i16> %src) noun
; X86-SSE42-NEXT: retl
;
; X64-LABEL: convert_v2i16_to_v2f32:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: psllq $48, %xmm0
; X64-NEXT: psrad $16, %xmm0
; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
@@ -48,7 +48,7 @@ entry:
define void @convert_v3i8_to_v3f32(<3 x float>* %dst.addr, <3 x i8>* %src.addr) nounwind {
; X86-SSE2-LABEL: convert_v3i8_to_v3f32:
-; X86-SSE2: # BB#0: # %entry
+; X86-SSE2: # %bb.0: # %entry
; X86-SSE2-NEXT: pushl %ebp
; X86-SSE2-NEXT: movl %esp, %ebp
; X86-SSE2-NEXT: pushl %esi
@@ -84,7 +84,7 @@ define void @convert_v3i8_to_v3f32(<3 x float>* %dst.addr, <3 x i8>* %src.addr)
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: convert_v3i8_to_v3f32:
-; X86-SSE42: # BB#0: # %entry
+; X86-SSE42: # %bb.0: # %entry
; X86-SSE42-NEXT: pushl %eax
; X86-SSE42-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE42-NEXT: movl {{[0-9]+}}(%esp), %ecx
@@ -103,7 +103,7 @@ define void @convert_v3i8_to_v3f32(<3 x float>* %dst.addr, <3 x i8>* %src.addr)
; X86-SSE42-NEXT: retl
;
; X64-SSE2-LABEL: convert_v3i8_to_v3f32:
-; X64-SSE2: # BB#0: # %entry
+; X64-SSE2: # %bb.0: # %entry
; X64-SSE2-NEXT: movzwl (%rsi), %eax
; X64-SSE2-NEXT: movq %rax, %xmm0
; X64-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -126,7 +126,7 @@ define void @convert_v3i8_to_v3f32(<3 x float>* %dst.addr, <3 x i8>* %src.addr)
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: convert_v3i8_to_v3f32:
-; X64-SSE42: # BB#0: # %entry
+; X64-SSE42: # %bb.0: # %entry
; X64-SSE42-NEXT: movzbl 2(%rsi), %eax
; X64-SSE42-NEXT: movzwl (%rsi), %ecx
; X64-SSE42-NEXT: movq %rcx, %xmm0
diff --git a/test/CodeGen/X86/widen_conv-4.ll b/test/CodeGen/X86/widen_conv-4.ll
index 6dc938893d3..4fa3bd52211 100644
--- a/test/CodeGen/X86/widen_conv-4.ll
+++ b/test/CodeGen/X86/widen_conv-4.ll
@@ -8,7 +8,7 @@
define void @convert_v7i16_v7f32(<7 x float>* %dst.addr, <7 x i16> %src) nounwind {
; X86-SSE2-LABEL: convert_v7i16_v7f32:
-; X86-SSE2: # BB#0: # %entry
+; X86-SSE2: # %bb.0: # %entry
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: pxor %xmm1, %xmm1
; X86-SSE2-NEXT: movdqa %xmm0, %xmm2
@@ -26,7 +26,7 @@ define void @convert_v7i16_v7f32(<7 x float>* %dst.addr, <7 x i16> %src) nounwin
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: convert_v7i16_v7f32:
-; X86-SSE42: # BB#0: # %entry
+; X86-SSE42: # %bb.0: # %entry
; X86-SSE42-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE42-NEXT: pxor %xmm1, %xmm1
; X86-SSE42-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
@@ -40,7 +40,7 @@ define void @convert_v7i16_v7f32(<7 x float>* %dst.addr, <7 x i16> %src) nounwin
; X86-SSE42-NEXT: retl
;
; X64-SSE2-LABEL: convert_v7i16_v7f32:
-; X64-SSE2: # BB#0: # %entry
+; X64-SSE2: # %bb.0: # %entry
; X64-SSE2-NEXT: pxor %xmm1, %xmm1
; X64-SSE2-NEXT: movdqa %xmm0, %xmm2
; X64-SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
@@ -54,7 +54,7 @@ define void @convert_v7i16_v7f32(<7 x float>* %dst.addr, <7 x i16> %src) nounwin
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: convert_v7i16_v7f32:
-; X64-SSE42: # BB#0: # %entry
+; X64-SSE42: # %bb.0: # %entry
; X64-SSE42-NEXT: pxor %xmm1, %xmm1
; X64-SSE42-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; X64-SSE42-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
@@ -74,7 +74,7 @@ entry:
define void @convert_v3i8_to_v3f32(<3 x float>* %dst.addr, <3 x i8>* %src.addr) nounwind {
; X86-SSE2-LABEL: convert_v3i8_to_v3f32:
-; X86-SSE2: # BB#0: # %entry
+; X86-SSE2: # %bb.0: # %entry
; X86-SSE2-NEXT: pushl %ebp
; X86-SSE2-NEXT: movl %esp, %ebp
; X86-SSE2-NEXT: pushl %esi
@@ -110,7 +110,7 @@ define void @convert_v3i8_to_v3f32(<3 x float>* %dst.addr, <3 x i8>* %src.addr)
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: convert_v3i8_to_v3f32:
-; X86-SSE42: # BB#0: # %entry
+; X86-SSE42: # %bb.0: # %entry
; X86-SSE42-NEXT: pushl %eax
; X86-SSE42-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE42-NEXT: movl {{[0-9]+}}(%esp), %ecx
@@ -128,7 +128,7 @@ define void @convert_v3i8_to_v3f32(<3 x float>* %dst.addr, <3 x i8>* %src.addr)
; X86-SSE42-NEXT: retl
;
; X64-SSE2-LABEL: convert_v3i8_to_v3f32:
-; X64-SSE2: # BB#0: # %entry
+; X64-SSE2: # %bb.0: # %entry
; X64-SSE2-NEXT: movzwl (%rsi), %eax
; X64-SSE2-NEXT: movq %rax, %xmm0
; X64-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -151,7 +151,7 @@ define void @convert_v3i8_to_v3f32(<3 x float>* %dst.addr, <3 x i8>* %src.addr)
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: convert_v3i8_to_v3f32:
-; X64-SSE42: # BB#0: # %entry
+; X64-SSE42: # %bb.0: # %entry
; X64-SSE42-NEXT: movzbl 2(%rsi), %eax
; X64-SSE42-NEXT: movzwl (%rsi), %ecx
; X64-SSE42-NEXT: movq %rcx, %xmm0
diff --git a/test/CodeGen/X86/widen_conversions.ll b/test/CodeGen/X86/widen_conversions.ll
index 9945e26c550..acd8c78fa2d 100644
--- a/test/CodeGen/X86/widen_conversions.ll
+++ b/test/CodeGen/X86/widen_conversions.ll
@@ -4,7 +4,7 @@
define <4 x i32> @zext_v4i8_to_v4i32(<4 x i8>* %ptr) {
; X86-LABEL: zext_v4i8_to_v4i32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-NEXT: pxor %xmm1, %xmm1
@@ -13,7 +13,7 @@ define <4 x i32> @zext_v4i8_to_v4i32(<4 x i8>* %ptr) {
; X86-NEXT: retl
;
; X64-LABEL: zext_v4i8_to_v4i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-NEXT: pxor %xmm1, %xmm1
; X64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
diff --git a/test/CodeGen/X86/widen_extract-1.ll b/test/CodeGen/X86/widen_extract-1.ll
index d75fedc3203..024187f1f84 100644
--- a/test/CodeGen/X86/widen_extract-1.ll
+++ b/test/CodeGen/X86/widen_extract-1.ll
@@ -6,14 +6,14 @@
define void @convert(<2 x double>* %dst.addr, <3 x double> %src) {
; X32-LABEL: convert:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movups {{[0-9]+}}(%esp), %xmm0
; X32-NEXT: movaps %xmm0, (%eax)
; X32-NEXT: retl
;
; X64-LABEL: convert:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; X64-NEXT: movaps %xmm0, (%rdi)
; X64-NEXT: retq
diff --git a/test/CodeGen/X86/widen_load-0.ll b/test/CodeGen/X86/widen_load-0.ll
index f998cf77048..01e813a78ad 100644
--- a/test/CodeGen/X86/widen_load-0.ll
+++ b/test/CodeGen/X86/widen_load-0.ll
@@ -8,7 +8,7 @@
define void @short2_int_swap(<2 x i16>* nocapture %b, i32* nocapture %c) nounwind {
; X86-LABEL: short2_int_swap:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
@@ -20,7 +20,7 @@ define void @short2_int_swap(<2 x i16>* nocapture %b, i32* nocapture %c) nounwin
; X86-NEXT: retl
;
; X64-LABEL: short2_int_swap:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movl (%rsi), %eax
; X64-NEXT: movl (%rdi), %ecx
; X64-NEXT: movl %eax, (%rdi)
diff --git a/test/CodeGen/X86/widen_load-2.ll b/test/CodeGen/X86/widen_load-2.ll
index 9fc0805b899..7d773c779ed 100644
--- a/test/CodeGen/X86/widen_load-2.ll
+++ b/test/CodeGen/X86/widen_load-2.ll
@@ -8,7 +8,7 @@
%i32vec3 = type <3 x i32>
define void @add3i32(%i32vec3* sret %ret, %i32vec3* %ap, %i32vec3* %bp) {
; X86-LABEL: add3i32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -20,7 +20,7 @@ define void @add3i32(%i32vec3* sret %ret, %i32vec3* %ap, %i32vec3* %bp) {
; X86-NEXT: retl $4
;
; X64-LABEL: add3i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movdqa (%rsi), %xmm0
; X64-NEXT: paddd (%rdx), %xmm0
; X64-NEXT: pextrd $2, %xmm0, 8(%rdi)
@@ -36,7 +36,7 @@ define void @add3i32(%i32vec3* sret %ret, %i32vec3* %ap, %i32vec3* %bp) {
define void @add3i32_2(%i32vec3* sret %ret, %i32vec3* %ap, %i32vec3* %bp) {
; X86-LABEL: add3i32_2:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -53,7 +53,7 @@ define void @add3i32_2(%i32vec3* sret %ret, %i32vec3* %ap, %i32vec3* %bp) {
; X86-NEXT: retl $4
;
; X64-LABEL: add3i32_2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; X64-NEXT: pinsrd $2, 8(%rsi), %xmm0
; X64-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
@@ -73,7 +73,7 @@ define void @add3i32_2(%i32vec3* sret %ret, %i32vec3* %ap, %i32vec3* %bp) {
%i32vec7 = type <7 x i32>
define void @add7i32(%i32vec7* sret %ret, %i32vec7* %ap, %i32vec7* %bp) {
; X86-LABEL: add7i32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -88,7 +88,7 @@ define void @add7i32(%i32vec7* sret %ret, %i32vec7* %ap, %i32vec7* %bp) {
; X86-NEXT: retl $4
;
; X64-LABEL: add7i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movdqa (%rsi), %xmm0
; X64-NEXT: movdqa 16(%rsi), %xmm1
; X64-NEXT: paddd (%rdx), %xmm0
@@ -108,7 +108,7 @@ define void @add7i32(%i32vec7* sret %ret, %i32vec7* %ap, %i32vec7* %bp) {
%i32vec12 = type <12 x i32>
define void @add12i32(%i32vec12* sret %ret, %i32vec12* %ap, %i32vec12* %bp) {
; X86-LABEL: add12i32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -124,7 +124,7 @@ define void @add12i32(%i32vec12* sret %ret, %i32vec12* %ap, %i32vec12* %bp) {
; X86-NEXT: retl $4
;
; X64-LABEL: add12i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movdqa (%rsi), %xmm0
; X64-NEXT: movdqa 16(%rsi), %xmm1
; X64-NEXT: movdqa 32(%rsi), %xmm2
@@ -147,7 +147,7 @@ define void @add12i32(%i32vec12* sret %ret, %i32vec12* %ap, %i32vec12* %bp) {
%i16vec3 = type <3 x i16>
define void @add3i16(%i16vec3* nocapture sret %ret, %i16vec3* %ap, %i16vec3* %bp) nounwind {
; X86-LABEL: add3i16:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
@@ -170,7 +170,7 @@ define void @add3i16(%i16vec3* nocapture sret %ret, %i16vec3* %ap, %i16vec3* %bp
; X86-NEXT: retl $4
;
; X64-LABEL: add3i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; X64-NEXT: pmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; X64-NEXT: paddd %xmm0, %xmm1
@@ -189,7 +189,7 @@ define void @add3i16(%i16vec3* nocapture sret %ret, %i16vec3* %ap, %i16vec3* %bp
%i16vec4 = type <4 x i16>
define void @add4i16(%i16vec4* nocapture sret %ret, %i16vec4* %ap, %i16vec4* %bp) nounwind {
; X86-LABEL: add4i16:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -200,7 +200,7 @@ define void @add4i16(%i16vec4* nocapture sret %ret, %i16vec4* %ap, %i16vec4* %bp
; X86-NEXT: retl $4
;
; X64-LABEL: add4i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; X64-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; X64-NEXT: paddw %xmm0, %xmm1
@@ -217,7 +217,7 @@ define void @add4i16(%i16vec4* nocapture sret %ret, %i16vec4* %ap, %i16vec4* %bp
%i16vec12 = type <12 x i16>
define void @add12i16(%i16vec12* nocapture sret %ret, %i16vec12* %ap, %i16vec12* %bp) nounwind {
; X86-LABEL: add12i16:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -231,7 +231,7 @@ define void @add12i16(%i16vec12* nocapture sret %ret, %i16vec12* %ap, %i16vec12*
; X86-NEXT: retl $4
;
; X64-LABEL: add12i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movdqa (%rsi), %xmm0
; X64-NEXT: movdqa 16(%rsi), %xmm1
; X64-NEXT: paddw (%rdx), %xmm0
@@ -250,7 +250,7 @@ define void @add12i16(%i16vec12* nocapture sret %ret, %i16vec12* %ap, %i16vec12*
%i16vec18 = type <18 x i16>
define void @add18i16(%i16vec18* nocapture sret %ret, %i16vec18* %ap, %i16vec18* %bp) nounwind {
; X86-LABEL: add18i16:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -266,7 +266,7 @@ define void @add18i16(%i16vec18* nocapture sret %ret, %i16vec18* %ap, %i16vec18*
; X86-NEXT: retl $4
;
; X64-LABEL: add18i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movdqa (%rsi), %xmm0
; X64-NEXT: movdqa 16(%rsi), %xmm1
; X64-NEXT: movdqa 32(%rsi), %xmm2
@@ -289,7 +289,7 @@ define void @add18i16(%i16vec18* nocapture sret %ret, %i16vec18* %ap, %i16vec18*
%i8vec3 = type <3 x i8>
define void @add3i8(%i8vec3* nocapture sret %ret, %i8vec3* %ap, %i8vec3* %bp) nounwind {
; X86-LABEL: add3i8:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: subl $12, %esp
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
@@ -304,7 +304,7 @@ define void @add3i8(%i8vec3* nocapture sret %ret, %i8vec3* %ap, %i8vec3* %bp) no
; X86-NEXT: retl $4
;
; X64-LABEL: add3i8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; X64-NEXT: pmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; X64-NEXT: paddd %xmm0, %xmm1
@@ -323,7 +323,7 @@ define void @add3i8(%i8vec3* nocapture sret %ret, %i8vec3* %ap, %i8vec3* %bp) no
%i8vec31 = type <31 x i8>
define void @add31i8(%i8vec31* nocapture sret %ret, %i8vec31* %ap, %i8vec31* %bp) nounwind {
; X86-LABEL: add31i8:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -340,7 +340,7 @@ define void @add31i8(%i8vec31* nocapture sret %ret, %i8vec31* %ap, %i8vec31* %bp
; X86-NEXT: retl $4
;
; X64-LABEL: add31i8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movdqa (%rsi), %xmm0
; X64-NEXT: movdqa 16(%rsi), %xmm1
; X64-NEXT: paddb (%rdx), %xmm0
@@ -363,7 +363,7 @@ define void @add31i8(%i8vec31* nocapture sret %ret, %i8vec31* %ap, %i8vec31* %bp
%i8vec3pack = type { <3 x i8>, i8 }
define void @rot(%i8vec3pack* nocapture sret %result, %i8vec3pack* %X, %i8vec3pack* %rot) nounwind {
; X86-LABEL: rot:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: subl $16, %esp
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
@@ -385,7 +385,7 @@ define void @rot(%i8vec3pack* nocapture sret %result, %i8vec3pack* %X, %i8vec3pa
; X86-NEXT: retl $4
;
; X64-LABEL: rot:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movdqa {{.*#+}} xmm0 = [40606,158]
; X64-NEXT: pextrw $0, %xmm0, (%rsi)
; X64-NEXT: movb $-98, 2(%rsi)
diff --git a/test/CodeGen/X86/widen_load-3.ll b/test/CodeGen/X86/widen_load-3.ll
index bc36c5fbd57..ce358d91479 100644
--- a/test/CodeGen/X86/widen_load-3.ll
+++ b/test/CodeGen/X86/widen_load-3.ll
@@ -10,7 +10,7 @@
define <7 x i64> @load7_aligned(<7 x i64>* %x) {
; X86-SSE-LABEL: load7_aligned:
-; X86-SSE: # BB#0:
+; X86-SSE: # %bb.0:
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-SSE-NEXT: movaps (%ecx), %xmm0
@@ -26,7 +26,7 @@ define <7 x i64> @load7_aligned(<7 x i64>* %x) {
; X86-SSE-NEXT: retl $4
;
; X86-AVX-LABEL: load7_aligned:
-; X86-AVX: # BB#0:
+; X86-AVX: # %bb.0:
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-AVX-NEXT: vmovaps (%ecx), %ymm0
@@ -40,7 +40,7 @@ define <7 x i64> @load7_aligned(<7 x i64>* %x) {
; X86-AVX-NEXT: retl $4
;
; X64-SSE-LABEL: load7_aligned:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: movaps (%rsi), %xmm0
; X64-SSE-NEXT: movaps 16(%rsi), %xmm1
; X64-SSE-NEXT: movaps 32(%rsi), %xmm2
@@ -53,7 +53,7 @@ define <7 x i64> @load7_aligned(<7 x i64>* %x) {
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: load7_aligned:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovaps (%rsi), %ymm0
; X64-AVX-NEXT: vmovaps 32(%rsi), %ymm1
; X64-AVX-NEXT: vmovaps %ymm0, (%rdi)
@@ -69,7 +69,7 @@ define <7 x i64> @load7_aligned(<7 x i64>* %x) {
define <7 x i64> @load7_unaligned(<7 x i64>* %x) {
; X86-SSE-LABEL: load7_unaligned:
-; X86-SSE: # BB#0:
+; X86-SSE: # %bb.0:
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-SSE-NEXT: movups (%ecx), %xmm0
@@ -85,7 +85,7 @@ define <7 x i64> @load7_unaligned(<7 x i64>* %x) {
; X86-SSE-NEXT: retl $4
;
; X86-AVX-LABEL: load7_unaligned:
-; X86-AVX: # BB#0:
+; X86-AVX: # %bb.0:
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-AVX-NEXT: vmovups (%ecx), %ymm0
@@ -100,7 +100,7 @@ define <7 x i64> @load7_unaligned(<7 x i64>* %x) {
; X86-AVX-NEXT: retl $4
;
; X64-SSE-LABEL: load7_unaligned:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: movups (%rsi), %xmm0
; X64-SSE-NEXT: movups 16(%rsi), %xmm1
; X64-SSE-NEXT: movups 32(%rsi), %xmm2
@@ -113,7 +113,7 @@ define <7 x i64> @load7_unaligned(<7 x i64>* %x) {
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: load7_unaligned:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovups (%rsi), %ymm0
; X64-AVX-NEXT: vmovups 32(%rsi), %xmm1
; X64-AVX-NEXT: movq 48(%rsi), %rax
diff --git a/test/CodeGen/X86/widen_shuffle-1.ll b/test/CodeGen/X86/widen_shuffle-1.ll
index aeb4e213006..c0387b3878a 100644
--- a/test/CodeGen/X86/widen_shuffle-1.ll
+++ b/test/CodeGen/X86/widen_shuffle-1.ll
@@ -5,7 +5,7 @@
; widening shuffle v3float and then a add
define void @shuf(<3 x float>* %dst.addr, <3 x float> %src1,<3 x float> %src2) nounwind {
; X86-LABEL: shuf:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: addps %xmm1, %xmm0
; X86-NEXT: extractps $2, %xmm0, 8(%eax)
@@ -14,7 +14,7 @@ define void @shuf(<3 x float>* %dst.addr, <3 x float> %src1,<3 x float> %src2) n
; X86-NEXT: retl
;
; X64-LABEL: shuf:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: addps %xmm1, %xmm0
; X64-NEXT: extractps $2, %xmm0, 8(%rdi)
; X64-NEXT: movlps %xmm0, (%rdi)
@@ -30,7 +30,7 @@ entry:
; widening shuffle v3float with a different mask and then a add
define void @shuf2(<3 x float>* %dst.addr, <3 x float> %src1,<3 x float> %src2) nounwind {
; X86-LABEL: shuf2:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
; X86-NEXT: addps %xmm1, %xmm0
@@ -40,7 +40,7 @@ define void @shuf2(<3 x float>* %dst.addr, <3 x float> %src1,<3 x float> %src2)
; X86-NEXT: retl
;
; X64-LABEL: shuf2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
; X64-NEXT: addps %xmm1, %xmm0
; X64-NEXT: extractps $2, %xmm0, 8(%rdi)
@@ -58,14 +58,14 @@ entry:
; opA with opB, the DAG will produce new operations with opA.
define void @shuf3(<4 x float> %tmp10, <4 x float> %vecinit15, <4 x float>* %dst) nounwind {
; X86-LABEL: shuf3:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0,0,0]
; X86-NEXT: movaps %xmm1, (%eax)
; X86-NEXT: retl
;
; X64-LABEL: shuf3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0,0,0]
; X64-NEXT: movaps %xmm1, (%rdi)
; X64-NEXT: retq
@@ -88,7 +88,7 @@ entry:
; PR10421: make sure we correctly handle extreme widening with CONCAT_VECTORS
define <8 x i8> @shuf4(<4 x i8> %a, <4 x i8> %b) nounwind readnone {
; X86-LABEL: shuf4:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; X86-NEXT: pshufb %xmm2, %xmm1
; X86-NEXT: pshufb %xmm2, %xmm0
@@ -96,7 +96,7 @@ define <8 x i8> @shuf4(<4 x i8> %a, <4 x i8> %b) nounwind readnone {
; X86-NEXT: retl
;
; X64-LABEL: shuf4:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; X64-NEXT: pshufb %xmm2, %xmm1
; X64-NEXT: pshufb %xmm2, %xmm0
@@ -109,14 +109,14 @@ define <8 x i8> @shuf4(<4 x i8> %a, <4 x i8> %b) nounwind readnone {
; PR11389: another CONCAT_VECTORS case
define void @shuf5(<8 x i8>* %p) nounwind {
; X86-LABEL: shuf5:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X86-NEXT: movsd %xmm0, (%eax)
; X86-NEXT: retl
;
; X64-LABEL: shuf5:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq {{.*}}(%rip), %rax
; X64-NEXT: movq %rax, (%rdi)
; X64-NEXT: retq
diff --git a/test/CodeGen/X86/widened-broadcast.ll b/test/CodeGen/X86/widened-broadcast.ll
index 82500329977..564371c7321 100644
--- a/test/CodeGen/X86/widened-broadcast.ll
+++ b/test/CodeGen/X86/widened-broadcast.ll
@@ -9,18 +9,18 @@
define <4 x float> @load_splat_4f32_4f32_0101(<4 x float>* %ptr) nounwind uwtable readnone ssp {
; SSE2-LABEL: load_splat_4f32_4f32_0101:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movaps (%rdi), %xmm0
; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
; SSE2-NEXT: retq
;
; SSE42-LABEL: load_splat_4f32_4f32_0101:
-; SSE42: # BB#0: # %entry
+; SSE42: # %bb.0: # %entry
; SSE42-NEXT: movddup {{.*#+}} xmm0 = mem[0,0]
; SSE42-NEXT: retq
;
; AVX-LABEL: load_splat_4f32_4f32_0101:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX-NEXT: retq
entry:
@@ -31,31 +31,31 @@ entry:
define <8 x float> @load_splat_8f32_4f32_01010101(<4 x float>* %ptr) nounwind uwtable readnone ssp {
; SSE2-LABEL: load_splat_8f32_4f32_01010101:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movaps (%rdi), %xmm0
; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
; SSE2-NEXT: movaps %xmm0, %xmm1
; SSE2-NEXT: retq
;
; SSE42-LABEL: load_splat_8f32_4f32_01010101:
-; SSE42: # BB#0: # %entry
+; SSE42: # %bb.0: # %entry
; SSE42-NEXT: movddup {{.*#+}} xmm0 = mem[0,0]
; SSE42-NEXT: movapd %xmm0, %xmm1
; SSE42-NEXT: retq
;
; AVX1-LABEL: load_splat_8f32_4f32_01010101:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_splat_8f32_4f32_01010101:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vbroadcastsd (%rdi), %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_splat_8f32_4f32_01010101:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vbroadcastsd (%rdi), %ymm0
; AVX512-NEXT: retq
entry:
@@ -66,20 +66,20 @@ entry:
define <8 x float> @load_splat_8f32_8f32_01010101(<8 x float>* %ptr) nounwind uwtable readnone ssp {
; SSE2-LABEL: load_splat_8f32_8f32_01010101:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movaps (%rdi), %xmm0
; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
; SSE2-NEXT: movaps %xmm0, %xmm1
; SSE2-NEXT: retq
;
; SSE42-LABEL: load_splat_8f32_8f32_01010101:
-; SSE42: # BB#0: # %entry
+; SSE42: # %bb.0: # %entry
; SSE42-NEXT: movddup {{.*#+}} xmm0 = mem[0,0]
; SSE42-NEXT: movapd %xmm0, %xmm1
; SSE42-NEXT: retq
;
; AVX-LABEL: load_splat_8f32_8f32_01010101:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vbroadcastsd (%rdi), %ymm0
; AVX-NEXT: retq
entry:
@@ -90,22 +90,22 @@ entry:
define <4 x i32> @load_splat_4i32_4i32_0101(<4 x i32>* %ptr) nounwind uwtable readnone ssp {
; SSE-LABEL: load_splat_4i32_4i32_0101:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pshufd {{.*#+}} xmm0 = mem[0,1,0,1]
; SSE-NEXT: retq
;
; AVX1-LABEL: load_splat_4i32_4i32_0101:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = mem[0,1,0,1]
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_splat_4i32_4i32_0101:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpbroadcastq (%rdi), %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_splat_4i32_4i32_0101:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpbroadcastq (%rdi), %xmm0
; AVX512-NEXT: retq
entry:
@@ -116,24 +116,24 @@ entry:
define <8 x i32> @load_splat_8i32_4i32_01010101(<4 x i32>* %ptr) nounwind uwtable readnone ssp {
; SSE-LABEL: load_splat_8i32_4i32_01010101:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pshufd {{.*#+}} xmm0 = mem[0,1,0,1]
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: load_splat_8i32_4i32_01010101:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = mem[0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_splat_8i32_4i32_01010101:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vbroadcastsd (%rdi), %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_splat_8i32_4i32_01010101:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vbroadcastsd (%rdi), %ymm0
; AVX512-NEXT: retq
entry:
@@ -144,24 +144,24 @@ entry:
define <8 x i32> @load_splat_8i32_8i32_01010101(<8 x i32>* %ptr) nounwind uwtable readnone ssp {
; SSE-LABEL: load_splat_8i32_8i32_01010101:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pshufd {{.*#+}} xmm0 = mem[0,1,0,1]
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: load_splat_8i32_8i32_01010101:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_splat_8i32_8i32_01010101:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vbroadcastsd (%rdi), %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_splat_8i32_8i32_01010101:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vbroadcastsd (%rdi), %ymm0
; AVX512-NEXT: retq
entry:
@@ -172,22 +172,22 @@ entry:
define <8 x i16> @load_splat_8i16_8i16_01010101(<8 x i16>* %ptr) nounwind uwtable readnone ssp {
; SSE-LABEL: load_splat_8i16_8i16_01010101:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pshufd {{.*#+}} xmm0 = mem[0,0,0,0]
; SSE-NEXT: retq
;
; AVX1-LABEL: load_splat_8i16_8i16_01010101:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = mem[0,0,0,0]
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_splat_8i16_8i16_01010101:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vbroadcastss (%rdi), %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_splat_8i16_8i16_01010101:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vbroadcastss (%rdi), %xmm0
; AVX512-NEXT: retq
entry:
@@ -198,22 +198,22 @@ entry:
define <8 x i16> @load_splat_8i16_8i16_01230123(<8 x i16>* %ptr) nounwind uwtable readnone ssp {
; SSE-LABEL: load_splat_8i16_8i16_01230123:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pshufd {{.*#+}} xmm0 = mem[0,1,0,1]
; SSE-NEXT: retq
;
; AVX1-LABEL: load_splat_8i16_8i16_01230123:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = mem[0,1,0,1]
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_splat_8i16_8i16_01230123:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpbroadcastq (%rdi), %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_splat_8i16_8i16_01230123:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpbroadcastq (%rdi), %xmm0
; AVX512-NEXT: retq
entry:
@@ -224,24 +224,24 @@ entry:
define <16 x i16> @load_splat_16i16_8i16_0101010101010101(<8 x i16>* %ptr) nounwind uwtable readnone ssp {
; SSE-LABEL: load_splat_16i16_8i16_0101010101010101:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pshufd {{.*#+}} xmm0 = mem[0,0,0,0]
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: load_splat_16i16_8i16_0101010101010101:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = mem[0,0,0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_splat_16i16_8i16_0101010101010101:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vbroadcastss (%rdi), %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_splat_16i16_8i16_0101010101010101:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vbroadcastss (%rdi), %ymm0
; AVX512-NEXT: retq
entry:
@@ -252,24 +252,24 @@ entry:
define <16 x i16> @load_splat_16i16_8i16_0123012301230123(<8 x i16>* %ptr) nounwind uwtable readnone ssp {
; SSE-LABEL: load_splat_16i16_8i16_0123012301230123:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pshufd {{.*#+}} xmm0 = mem[0,1,0,1]
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: load_splat_16i16_8i16_0123012301230123:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = mem[0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_splat_16i16_8i16_0123012301230123:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vbroadcastsd (%rdi), %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_splat_16i16_8i16_0123012301230123:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vbroadcastsd (%rdi), %ymm0
; AVX512-NEXT: retq
entry:
@@ -280,24 +280,24 @@ entry:
define <16 x i16> @load_splat_16i16_16i16_0101010101010101(<16 x i16>* %ptr) nounwind uwtable readnone ssp {
; SSE-LABEL: load_splat_16i16_16i16_0101010101010101:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pshufd {{.*#+}} xmm0 = mem[0,0,0,0]
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: load_splat_16i16_16i16_0101010101010101:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = mem[0,0,0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_splat_16i16_16i16_0101010101010101:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vbroadcastss (%rdi), %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_splat_16i16_16i16_0101010101010101:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vbroadcastss (%rdi), %ymm0
; AVX512-NEXT: retq
entry:
@@ -308,13 +308,13 @@ entry:
define <16 x i16> @load_splat_16i16_16i16_0123012301230123(<16 x i16>* %ptr) nounwind uwtable readnone ssp {
; SSE-LABEL: load_splat_16i16_16i16_0123012301230123:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pshufd {{.*#+}} xmm0 = mem[0,1,0,1]
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: load_splat_16i16_16i16_0123012301230123:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vbroadcastsd (%rdi), %ymm0
; AVX-NEXT: retq
entry:
@@ -325,24 +325,24 @@ entry:
define <16 x i8> @load_splat_16i8_16i8_0101010101010101(<16 x i8>* %ptr) nounwind uwtable readnone ssp {
; SSE-LABEL: load_splat_16i8_16i8_0101010101010101:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = mem[0,0,0,0,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
; SSE-NEXT: retq
;
; AVX1-LABEL: load_splat_16i8_16i8_0101010101010101:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = mem[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_splat_16i8_16i8_0101010101010101:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpbroadcastw (%rdi), %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_splat_16i8_16i8_0101010101010101:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpbroadcastw (%rdi), %xmm0
; AVX512-NEXT: retq
entry:
@@ -353,22 +353,22 @@ entry:
define <16 x i8> @load_splat_16i8_16i8_0123012301230123(<16 x i8>* %ptr) nounwind uwtable readnone ssp {
; SSE-LABEL: load_splat_16i8_16i8_0123012301230123:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pshufd {{.*#+}} xmm0 = mem[0,0,0,0]
; SSE-NEXT: retq
;
; AVX1-LABEL: load_splat_16i8_16i8_0123012301230123:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = mem[0,0,0,0]
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_splat_16i8_16i8_0123012301230123:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vbroadcastss (%rdi), %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_splat_16i8_16i8_0123012301230123:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vbroadcastss (%rdi), %xmm0
; AVX512-NEXT: retq
entry:
@@ -379,22 +379,22 @@ entry:
define <16 x i8> @load_splat_16i8_16i8_0123456701234567(<16 x i8>* %ptr) nounwind uwtable readnone ssp {
; SSE-LABEL: load_splat_16i8_16i8_0123456701234567:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pshufd {{.*#+}} xmm0 = mem[0,1,0,1]
; SSE-NEXT: retq
;
; AVX1-LABEL: load_splat_16i8_16i8_0123456701234567:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = mem[0,1,0,1]
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_splat_16i8_16i8_0123456701234567:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpbroadcastq (%rdi), %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_splat_16i8_16i8_0123456701234567:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpbroadcastq (%rdi), %xmm0
; AVX512-NEXT: retq
entry:
@@ -405,26 +405,26 @@ entry:
define <32 x i8> @load_splat_32i8_16i8_01010101010101010101010101010101(<16 x i8>* %ptr) nounwind uwtable readnone ssp {
; SSE-LABEL: load_splat_32i8_16i8_01010101010101010101010101010101:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = mem[0,0,0,0,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: load_splat_32i8_16i8_01010101010101010101010101010101:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = mem[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_splat_32i8_16i8_01010101010101010101010101010101:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpbroadcastw (%rdi), %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_splat_32i8_16i8_01010101010101010101010101010101:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpbroadcastw (%rdi), %ymm0
; AVX512-NEXT: retq
entry:
@@ -435,24 +435,24 @@ entry:
define <32 x i8> @load_splat_32i8_16i8_01230123012301230123012301230123(<16 x i8>* %ptr) nounwind uwtable readnone ssp {
; SSE-LABEL: load_splat_32i8_16i8_01230123012301230123012301230123:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pshufd {{.*#+}} xmm0 = mem[0,0,0,0]
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: load_splat_32i8_16i8_01230123012301230123012301230123:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = mem[0,0,0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_splat_32i8_16i8_01230123012301230123012301230123:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vbroadcastss (%rdi), %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_splat_32i8_16i8_01230123012301230123012301230123:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vbroadcastss (%rdi), %ymm0
; AVX512-NEXT: retq
entry:
@@ -463,24 +463,24 @@ entry:
define <32 x i8> @load_splat_32i8_16i8_01234567012345670123456701234567(<16 x i8>* %ptr) nounwind uwtable readnone ssp {
; SSE-LABEL: load_splat_32i8_16i8_01234567012345670123456701234567:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pshufd {{.*#+}} xmm0 = mem[0,1,0,1]
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: load_splat_32i8_16i8_01234567012345670123456701234567:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = mem[0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_splat_32i8_16i8_01234567012345670123456701234567:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vbroadcastsd (%rdi), %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_splat_32i8_16i8_01234567012345670123456701234567:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vbroadcastsd (%rdi), %ymm0
; AVX512-NEXT: retq
entry:
@@ -491,26 +491,26 @@ entry:
define <32 x i8> @load_splat_32i8_32i8_01010101010101010101010101010101(<32 x i8>* %ptr) nounwind uwtable readnone ssp {
; SSE-LABEL: load_splat_32i8_32i8_01010101010101010101010101010101:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = mem[0,0,0,0,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: load_splat_32i8_32i8_01010101010101010101010101010101:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = mem[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_splat_32i8_32i8_01010101010101010101010101010101:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpbroadcastw (%rdi), %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_splat_32i8_32i8_01010101010101010101010101010101:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpbroadcastw (%rdi), %ymm0
; AVX512-NEXT: retq
entry:
@@ -521,24 +521,24 @@ entry:
define <32 x i8> @load_splat_32i8_32i8_01230123012301230123012301230123(<32 x i8>* %ptr) nounwind uwtable readnone ssp {
; SSE-LABEL: load_splat_32i8_32i8_01230123012301230123012301230123:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pshufd {{.*#+}} xmm0 = mem[0,0,0,0]
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: load_splat_32i8_32i8_01230123012301230123012301230123:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vbroadcastss (%rdi), %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_splat_32i8_32i8_01230123012301230123012301230123:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vmovaps (%rdi), %ymm0
; AVX2-NEXT: vbroadcastss %xmm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_splat_32i8_32i8_01230123012301230123012301230123:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovaps (%rdi), %ymm0
; AVX512-NEXT: vbroadcastss %xmm0, %ymm0
; AVX512-NEXT: retq
@@ -550,13 +550,13 @@ entry:
define <32 x i8> @load_splat_32i8_32i8_01234567012345670123456701234567(<32 x i8>* %ptr) nounwind uwtable readnone ssp {
; SSE-LABEL: load_splat_32i8_32i8_01234567012345670123456701234567:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pshufd {{.*#+}} xmm0 = mem[0,1,0,1]
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: load_splat_32i8_32i8_01234567012345670123456701234567:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vbroadcastsd (%rdi), %ymm0
; AVX-NEXT: retq
entry:
@@ -567,13 +567,13 @@ entry:
define <4 x float> @load_splat_4f32_8f32_0000(<8 x float>* %ptr) nounwind uwtable readnone ssp {
; SSE-LABEL: load_splat_4f32_8f32_0000:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movaps (%rdi), %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE-NEXT: retq
;
; AVX-LABEL: load_splat_4f32_8f32_0000:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vbroadcastss (%rdi), %xmm0
; AVX-NEXT: retq
entry:
@@ -584,20 +584,20 @@ entry:
define <8 x float> @load_splat_8f32_16f32_89898989(<16 x float>* %ptr) nounwind uwtable readnone ssp {
; SSE2-LABEL: load_splat_8f32_16f32_89898989:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movaps 32(%rdi), %xmm0
; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
; SSE2-NEXT: movaps %xmm0, %xmm1
; SSE2-NEXT: retq
;
; SSE42-LABEL: load_splat_8f32_16f32_89898989:
-; SSE42: # BB#0: # %entry
+; SSE42: # %bb.0: # %entry
; SSE42-NEXT: movddup {{.*#+}} xmm0 = mem[0,0]
; SSE42-NEXT: movapd %xmm0, %xmm1
; SSE42-NEXT: retq
;
; AVX-LABEL: load_splat_8f32_16f32_89898989:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vbroadcastsd 32(%rdi), %ymm0
; AVX-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/win64_sibcall.ll b/test/CodeGen/X86/win64_sibcall.ll
index 42dd4d31ca9..5a65d34a426 100644
--- a/test/CodeGen/X86/win64_sibcall.ll
+++ b/test/CodeGen/X86/win64_sibcall.ll
@@ -6,9 +6,9 @@
define void @C1(%Object addrspace(1)* %param0) gc "coreclr" {
entry:
-; WIN_X64: # BB#0:
+; WIN_X64: # %bb.0:
; WIN_X64: pushq %rax
-; LINUX: # BB#0: # %entry
+; LINUX: # %bb.0: # %entry
; LINUX: movq $0, -8(%rsp)
%this = alloca %Object addrspace(1)*
diff --git a/test/CodeGen/X86/win_chkstk.ll b/test/CodeGen/X86/win_chkstk.ll
index c7550a467a3..41fdccd9364 100644
--- a/test/CodeGen/X86/win_chkstk.ll
+++ b/test/CodeGen/X86/win_chkstk.ll
@@ -30,19 +30,19 @@ entry:
; allocation.
define i32 @main128() nounwind {
entry:
-; WIN_X32: # BB#0:
+; WIN_X32: # %bb.0:
; WIN_X32-NOT: calll __chkstk
; WIN_X32: ret
-; WIN_X64: # BB#0:
+; WIN_X64: # %bb.0:
; WIN_X64-NOT: callq __chkstk
; WIN_X64: ret
-; MINGW_X64: # BB#0:
+; MINGW_X64: # %bb.0:
; MINGW_X64-NOT: callq ___chkstk_ms
; MINGW_X64: ret
-; LINUX: # BB#0:
+; LINUX: # %bb.0:
; LINUX-NOT: call __chkstk
; LINUX: ret
%array128 = alloca [128 x i8], align 16 ; <[128 x i8]*> [#uses=0]
diff --git a/test/CodeGen/X86/win_coreclr_chkstk.ll b/test/CodeGen/X86/win_coreclr_chkstk.ll
index b4b8010ec56..8934535d6f5 100644
--- a/test/CodeGen/X86/win_coreclr_chkstk.ll
+++ b/test/CodeGen/X86/win_coreclr_chkstk.ll
@@ -8,7 +8,7 @@
define i32 @main4k() nounwind {
entry:
; WIN_X64-LABEL:main4k:
-; WIN_X64: # BB#0:
+; WIN_X64: # %bb.0:
; WIN_X64: movl $4096, %eax
; WIN_X64: movq %rcx, 8(%rsp)
; WIN_X64: movq %rdx, 16(%rsp)
@@ -19,7 +19,7 @@ entry:
; WIN_X64: movq %gs:16, %rcx
; WIN_X64: cmpq %rcx, %rdx
; WIN_X64: jae .LBB0_3
-; WIN_X64:# BB#1:
+; WIN_X64:# %bb.1:
; WIN_X64: andq $-4096, %rdx
; WIN_X64:.LBB0_2:
; WIN_X64: leaq -4096(%rcx), %rcx
diff --git a/test/CodeGen/X86/x32-cet-intrinsics.ll b/test/CodeGen/X86/x32-cet-intrinsics.ll
index 4d45014d18f..b6f6c05e4f8 100644
--- a/test/CodeGen/X86/x32-cet-intrinsics.ll
+++ b/test/CodeGen/X86/x32-cet-intrinsics.ll
@@ -3,7 +3,7 @@
define void @test_incsspd(i32 %a) local_unnamed_addr {
; CHECK-LABEL: test_incsspd:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: incsspd %eax
; CHECK-NEXT: retl
@@ -16,7 +16,7 @@ declare void @llvm.x86.incsspd(i32)
define i32 @test_rdsspd(i32 %a) {
; CHECK-LABEL: test_rdsspd:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: rdsspd %eax
; CHECK-NEXT: retl
@@ -29,7 +29,7 @@ declare i32 @llvm.x86.rdsspd(i32)
define void @test_saveprevssp() {
; CHECK-LABEL: test_saveprevssp:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: saveprevssp
; CHECK-NEXT: retl
entry:
@@ -41,7 +41,7 @@ declare void @llvm.x86.saveprevssp()
define void @test_rstorssp(i8* %__p) {
; CHECK-LABEL: test_rstorssp:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: rstorssp (%eax)
; CHECK-NEXT: retl
@@ -54,7 +54,7 @@ declare void @llvm.x86.rstorssp(i8*)
define void @test_wrssd(i32 %a, i8* %__p) {
; CHECK-LABEL: test_wrssd:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: wrssd %eax, (%ecx)
@@ -68,7 +68,7 @@ declare void @llvm.x86.wrssd(i32, i8*)
define void @test_wrussd(i32 %a, i8* %__p) {
; CHECK-LABEL: test_wrussd:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: wrussd %eax, (%ecx)
@@ -82,7 +82,7 @@ declare void @llvm.x86.wrussd(i32, i8*)
define void @test_setssbsy() {
; CHECK-LABEL: test_setssbsy:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: setssbsy
; CHECK-NEXT: retl
entry:
@@ -94,7 +94,7 @@ declare void @llvm.x86.setssbsy()
define void @test_clrssbsy(i8* %__p) {
; CHECK-LABEL: test_clrssbsy:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: clrssbsy (%eax)
; CHECK-NEXT: retl
diff --git a/test/CodeGen/X86/x32-lea-1.ll b/test/CodeGen/X86/x32-lea-1.ll
index afe3581a85b..0b687579fb8 100644
--- a/test/CodeGen/X86/x32-lea-1.ll
+++ b/test/CodeGen/X86/x32-lea-1.ll
@@ -3,7 +3,7 @@
define void @foo(i32** %p) {
; CHECK-LABEL: foo:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: leal -{{[0-9]+}}(%rsp), %eax
; CHECK-NEXT: addl $16, %eax
; CHECK-NEXT: movl %eax, (%edi)
diff --git a/test/CodeGen/X86/x64-cet-intrinsics.ll b/test/CodeGen/X86/x64-cet-intrinsics.ll
index f9cba0056db..de95e1916bc 100644
--- a/test/CodeGen/X86/x64-cet-intrinsics.ll
+++ b/test/CodeGen/X86/x64-cet-intrinsics.ll
@@ -3,7 +3,7 @@
define void @test_incsspd(i32 %a) local_unnamed_addr {
; CHECK-LABEL: test_incsspd:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: incsspd %edi
; CHECK-NEXT: retq
entry:
@@ -15,7 +15,7 @@ declare void @llvm.x86.incsspd(i32)
define void @test_incsspq(i32 %a) local_unnamed_addr {
; CHECK-LABEL: test_incsspq:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movslq %edi, %rax
; CHECK-NEXT: incsspq %rax
; CHECK-NEXT: retq
@@ -29,7 +29,7 @@ declare void @llvm.x86.incsspq(i64)
define i32 @test_rdsspd(i32 %a) {
; CHECK-LABEL: test_rdsspd:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: rdsspd %edi
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
@@ -42,7 +42,7 @@ declare i32 @llvm.x86.rdsspd(i32)
define i64 @test_rdsspq(i64 %a) {
; CHECK-LABEL: test_rdsspq:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: rdsspq %rdi
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: retq
@@ -55,7 +55,7 @@ declare i64 @llvm.x86.rdsspq(i64)
define void @test_saveprevssp() {
; CHECK-LABEL: test_saveprevssp:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: saveprevssp
; CHECK-NEXT: retq
entry:
@@ -67,7 +67,7 @@ declare void @llvm.x86.saveprevssp()
define void @test_rstorssp(i8* %__p) {
; CHECK-LABEL: test_rstorssp:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: rstorssp (%rdi)
; CHECK-NEXT: retq
entry:
@@ -79,7 +79,7 @@ declare void @llvm.x86.rstorssp(i8*)
define void @test_wrssd(i32 %a, i8* %__p) {
; CHECK-LABEL: test_wrssd:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: wrssd %edi, (%rsi)
; CHECK-NEXT: retq
entry:
@@ -91,7 +91,7 @@ declare void @llvm.x86.wrssd(i32, i8*)
define void @test_wrssq(i64 %a, i8* %__p) {
; CHECK-LABEL: test_wrssq:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: wrssq %rdi, (%rsi)
; CHECK-NEXT: retq
entry:
@@ -103,7 +103,7 @@ declare void @llvm.x86.wrssq(i64, i8*)
define void @test_wrussd(i32 %a, i8* %__p) {
; CHECK-LABEL: test_wrussd:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: wrussd %edi, (%rsi)
; CHECK-NEXT: retq
entry:
@@ -115,7 +115,7 @@ declare void @llvm.x86.wrussd(i32, i8*)
define void @test_wrussq(i64 %a, i8* %__p) {
; CHECK-LABEL: test_wrussq:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: wrussq %rdi, (%rsi)
; CHECK-NEXT: retq
entry:
@@ -127,7 +127,7 @@ declare void @llvm.x86.wrussq(i64, i8*)
define void @test_setssbsy() {
; CHECK-LABEL: test_setssbsy:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: setssbsy
; CHECK-NEXT: retq
entry:
@@ -139,7 +139,7 @@ declare void @llvm.x86.setssbsy()
define void @test_clrssbsy(i8* %__p) {
; CHECK-LABEL: test_clrssbsy:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: clrssbsy (%rdi)
; CHECK-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/x86-64-intrcc-nosse.ll b/test/CodeGen/X86/x86-64-intrcc-nosse.ll
index ab84088c344..7b39ab64db8 100644
--- a/test/CodeGen/X86/x86-64-intrcc-nosse.ll
+++ b/test/CodeGen/X86/x86-64-intrcc-nosse.ll
@@ -8,7 +8,7 @@
; Clobbered SSE must not be saved when the target doesn't support SSE
define x86_intrcc void @test_isr_sse_clobbers(%struct.interrupt_frame* %frame, i64 %ecode) {
; CHECK-LABEL: test_isr_sse_clobbers:
- ; CHECK: # BB#0:
+ ; CHECK: # %bb.0:
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: cld
; CHECK-NEXT: #APP
diff --git a/test/CodeGen/X86/x86-fold-pshufb.ll b/test/CodeGen/X86/x86-fold-pshufb.ll
index c250bef08e4..a07593390d0 100644
--- a/test/CodeGen/X86/x86-fold-pshufb.ll
+++ b/test/CodeGen/X86/x86-fold-pshufb.ll
@@ -7,7 +7,7 @@
define <2 x i64> @fold_pshufb() {
; CHECK-LABEL: fold_pshufb:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movaps {{.*#+}} xmm0 = [0,0,0,0,1,0,0,0,2,0,0,0,3,0,0,0]
; CHECK-NEXT: retq
entry:
@@ -23,7 +23,7 @@ entry:
define <2 x i64> @pr24562() {
; CHECK-LABEL: pr24562:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorps %xmm0, %xmm0
; CHECK-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/x86-interleaved-access.ll b/test/CodeGen/X86/x86-interleaved-access.ll
index 2b2ef74a682..0c0ca56ea3a 100644
--- a/test/CodeGen/X86/x86-interleaved-access.ll
+++ b/test/CodeGen/X86/x86-interleaved-access.ll
@@ -5,7 +5,7 @@
define <4 x double> @load_factorf64_4(<16 x double>* %ptr) {
; AVX1-LABEL: load_factorf64_4:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovupd (%rdi), %ymm0
; AVX1-NEXT: vmovupd 32(%rdi), %ymm1
; AVX1-NEXT: vmovupd 64(%rdi), %ymm2
@@ -22,7 +22,7 @@ define <4 x double> @load_factorf64_4(<16 x double>* %ptr) {
; AVX1-NEXT: retq
;
; AVX-LABEL: load_factorf64_4:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovupd (%rdi), %ymm0
; AVX-NEXT: vmovupd 32(%rdi), %ymm1
; AVX-NEXT: vmovupd 64(%rdi), %ymm2
@@ -50,7 +50,7 @@ define <4 x double> @load_factorf64_4(<16 x double>* %ptr) {
define <4 x double> @load_factorf64_2(<16 x double>* %ptr) {
; AVX1-LABEL: load_factorf64_2:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovupd (%rdi), %ymm0
; AVX1-NEXT: vmovupd 32(%rdi), %ymm1
; AVX1-NEXT: vmovupd 64(%rdi), %ymm2
@@ -65,7 +65,7 @@ define <4 x double> @load_factorf64_2(<16 x double>* %ptr) {
; AVX1-NEXT: retq
;
; AVX-LABEL: load_factorf64_2:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovupd (%rdi), %ymm0
; AVX-NEXT: vmovupd 32(%rdi), %ymm1
; AVX-NEXT: vmovupd 64(%rdi), %ymm2
@@ -87,7 +87,7 @@ define <4 x double> @load_factorf64_2(<16 x double>* %ptr) {
define <4 x double> @load_factorf64_1(<16 x double>* %ptr) {
; AVX1-LABEL: load_factorf64_1:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovupd (%rdi), %ymm0
; AVX1-NEXT: vmovupd 32(%rdi), %ymm1
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[0,1],mem[0,1]
@@ -97,7 +97,7 @@ define <4 x double> @load_factorf64_1(<16 x double>* %ptr) {
; AVX1-NEXT: retq
;
; AVX-LABEL: load_factorf64_1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovupd (%rdi), %ymm0
; AVX-NEXT: vmovupd 32(%rdi), %ymm1
; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[0,1],mem[0,1]
@@ -114,7 +114,7 @@ define <4 x double> @load_factorf64_1(<16 x double>* %ptr) {
define <4 x i64> @load_factori64_4(<16 x i64>* %ptr) {
; AVX1-LABEL: load_factori64_4:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovups (%rdi), %ymm0
; AVX1-NEXT: vmovups 32(%rdi), %ymm1
; AVX1-NEXT: vmovups 64(%rdi), %ymm2
@@ -141,7 +141,7 @@ define <4 x i64> @load_factori64_4(<16 x i64>* %ptr) {
; AVX1-NEXT: retq
;
; AVX-LABEL: load_factori64_4:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqu (%rdi), %ymm0
; AVX-NEXT: vmovdqu 32(%rdi), %ymm1
; AVX-NEXT: vmovdqu 64(%rdi), %ymm2
@@ -171,7 +171,7 @@ define <4 x i64> @load_factori64_4(<16 x i64>* %ptr) {
define void @store_factorf64_4(<16 x double>* %ptr, <4 x double> %v0, <4 x double> %v1, <4 x double> %v2, <4 x double> %v3) {
; AVX1-LABEL: store_factorf64_4:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm4
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm5
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
@@ -188,7 +188,7 @@ define void @store_factorf64_4(<16 x double>* %ptr, <4 x double> %v0, <4 x doubl
; AVX1-NEXT: retq
;
; AVX2-LABEL: store_factorf64_4:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm4
; AVX2-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm5
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
@@ -205,7 +205,7 @@ define void @store_factorf64_4(<16 x double>* %ptr, <4 x double> %v0, <4 x doubl
; AVX2-NEXT: retq
;
; AVX512-LABEL: store_factorf64_4:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm4
; AVX512-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm5
; AVX512-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
@@ -229,7 +229,7 @@ define void @store_factorf64_4(<16 x double>* %ptr, <4 x double> %v0, <4 x doubl
define void @store_factori64_4(<16 x i64>* %ptr, <4 x i64> %v0, <4 x i64> %v1, <4 x i64> %v2, <4 x i64> %v3) {
; AVX1-LABEL: store_factori64_4:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm4
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm5
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
@@ -246,7 +246,7 @@ define void @store_factori64_4(<16 x i64>* %ptr, <4 x i64> %v0, <4 x i64> %v1, <
; AVX1-NEXT: retq
;
; AVX2-LABEL: store_factori64_4:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm4
; AVX2-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm5
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
@@ -263,7 +263,7 @@ define void @store_factori64_4(<16 x i64>* %ptr, <4 x i64> %v0, <4 x i64> %v1, <
; AVX2-NEXT: retq
;
; AVX512-LABEL: store_factori64_4:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm4
; AVX512-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm5
; AVX512-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
@@ -288,7 +288,7 @@ define void @store_factori64_4(<16 x i64>* %ptr, <4 x i64> %v0, <4 x i64> %v1, <
define void @interleaved_store_vf32_i8_stride4(<32 x i8> %x1, <32 x i8> %x2, <32 x i8> %x3, <32 x i8> %x4, <128 x i8>* %p) {
; AVX1-LABEL: interleaved_store_vf32_i8_stride4:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6
@@ -325,7 +325,7 @@ define void @interleaved_store_vf32_i8_stride4(<32 x i8> %x1, <32 x i8> %x2, <32
; AVX1-NEXT: retq
;
; AVX2-LABEL: interleaved_store_vf32_i8_stride4:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[4],ymm3[4],ymm2[5],ymm3[5],ymm2[6],ymm3[6],ymm2[7],ymm3[7],ymm2[16],ymm3[16],ymm2[17],ymm3[17],ymm2[18],ymm3[18],ymm2[19],ymm3[19],ymm2[20],ymm3[20],ymm2[21],ymm3[21],ymm2[22],ymm3[22],ymm2[23],ymm3[23]
@@ -346,7 +346,7 @@ define void @interleaved_store_vf32_i8_stride4(<32 x i8> %x1, <32 x i8> %x2, <32
; AVX2-NEXT: retq
;
; AVX512-LABEL: interleaved_store_vf32_i8_stride4:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
; AVX512-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
; AVX512-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[4],ymm3[4],ymm2[5],ymm3[5],ymm2[6],ymm3[6],ymm2[7],ymm3[7],ymm2[16],ymm3[16],ymm2[17],ymm3[17],ymm2[18],ymm3[18],ymm2[19],ymm3[19],ymm2[20],ymm3[20],ymm2[21],ymm3[21],ymm2[22],ymm3[22],ymm2[23],ymm3[23]
@@ -374,7 +374,7 @@ ret void
define void @interleaved_store_vf16_i8_stride4(<16 x i8> %x1, <16 x i8> %x2, <16 x i8> %x3, <16 x i8> %x4, <64 x i8>* %p) {
; AVX1-LABEL: interleaved_store_vf16_i8_stride4:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
@@ -391,7 +391,7 @@ define void @interleaved_store_vf16_i8_stride4(<16 x i8> %x1, <16 x i8> %x2, <16
; AVX1-NEXT: retq
;
; AVX2-LABEL: interleaved_store_vf16_i8_stride4:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX2-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
@@ -408,7 +408,7 @@ define void @interleaved_store_vf16_i8_stride4(<16 x i8> %x1, <16 x i8> %x2, <16
; AVX2-NEXT: retq
;
; AVX512-LABEL: interleaved_store_vf16_i8_stride4:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
@@ -432,7 +432,7 @@ ret void
define <8 x i8> @interleaved_load_vf8_i8_stride4(<32 x i8>* %ptr) {
; AVX1-LABEL: interleaved_load_vf8_i8_stride4:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqu (%rdi), %ymm0
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
@@ -460,7 +460,7 @@ define <8 x i8> @interleaved_load_vf8_i8_stride4(<32 x i8>* %ptr) {
; AVX1-NEXT: retq
;
; AVX-LABEL: interleaved_load_vf8_i8_stride4:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqu (%rdi), %ymm0
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX-NEXT: vextracti128 $1, %ymm0, %xmm2
@@ -500,7 +500,7 @@ define <8 x i8> @interleaved_load_vf8_i8_stride4(<32 x i8>* %ptr) {
define <16 x i1> @interleaved_load_vf16_i8_stride4(<64 x i8>* %ptr) {
; AVX1-LABEL: interleaved_load_vf16_i8_stride4:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %ymm0
; AVX1-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
@@ -551,7 +551,7 @@ define <16 x i1> @interleaved_load_vf16_i8_stride4(<64 x i8>* %ptr) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: interleaved_load_vf16_i8_stride4:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <u,u,u,u,0,4,8,12,u,u,u,u,u,u,u,u>
@@ -602,7 +602,7 @@ define <16 x i1> @interleaved_load_vf16_i8_stride4(<64 x i8>* %ptr) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: interleaved_load_vf16_i8_stride4:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -669,7 +669,7 @@ define <16 x i1> @interleaved_load_vf16_i8_stride4(<64 x i8>* %ptr) {
define <32 x i1> @interleaved_load_vf32_i8_stride4(<128 x i8>* %ptr) {
; AVX1-LABEL: interleaved_load_vf32_i8_stride4:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %ymm10
; AVX1-NEXT: vmovdqa 32(%rdi), %ymm13
; AVX1-NEXT: vmovdqa 64(%rdi), %ymm2
@@ -769,7 +769,7 @@ define <32 x i1> @interleaved_load_vf32_i8_stride4(<128 x i8>* %ptr) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: interleaved_load_vf32_i8_stride4:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %ymm11
; AVX2-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX2-NEXT: vmovdqa 64(%rdi), %ymm7
@@ -867,7 +867,7 @@ define <32 x i1> @interleaved_load_vf32_i8_stride4(<128 x i8>* %ptr) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: interleaved_load_vf32_i8_stride4:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm7
; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
@@ -984,7 +984,7 @@ define <32 x i1> @interleaved_load_vf32_i8_stride4(<128 x i8>* %ptr) {
define void @interleaved_store_vf8_i8_stride4(<8 x i8> %x1, <8 x i8> %x2, <8 x i8> %x3, <8 x i8> %x4, <32 x i8>* %p) {
; AVX1-LABEL: interleaved_store_vf8_i8_stride4:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; AVX1-NEXT: vpshufb %xmm4, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm4, %xmm0, %xmm0
@@ -1000,7 +1000,7 @@ define void @interleaved_store_vf8_i8_stride4(<8 x i8> %x1, <8 x i8> %x2, <8 x i
; AVX1-NEXT: retq
;
; AVX-LABEL: interleaved_store_vf8_i8_stride4:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; AVX-NEXT: vpshufb %xmm4, %xmm1, %xmm1
; AVX-NEXT: vpshufb %xmm4, %xmm0, %xmm0
@@ -1023,7 +1023,7 @@ ret void
define <32 x i8> @interleaved_load_vf32_i8_stride3(<96 x i8>* %ptr){
; AVX1-LABEL: interleaved_load_vf32_i8_stride3:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %xmm0
; AVX1-NEXT: vmovdqa 16(%rdi), %xmm1
; AVX1-NEXT: vmovdqa 32(%rdi), %xmm2
@@ -1068,7 +1068,7 @@ define <32 x i8> @interleaved_load_vf32_i8_stride3(<96 x i8>* %ptr){
; AVX1-NEXT: retq
;
; AVX-LABEL: interleaved_load_vf32_i8_stride3:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa (%rdi), %xmm0
; AVX-NEXT: vmovdqa 16(%rdi), %xmm1
; AVX-NEXT: vmovdqa 32(%rdi), %xmm2
@@ -1101,7 +1101,7 @@ define <32 x i8> @interleaved_load_vf32_i8_stride3(<96 x i8>* %ptr){
define <16 x i8> @interleaved_load_vf16_i8_stride3(<48 x i8>* %ptr){
; AVX1-LABEL: interleaved_load_vf16_i8_stride3:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %xmm0
; AVX1-NEXT: vmovdqa 16(%rdi), %xmm1
; AVX1-NEXT: vmovdqa 32(%rdi), %xmm2
@@ -1123,7 +1123,7 @@ define <16 x i8> @interleaved_load_vf16_i8_stride3(<48 x i8>* %ptr){
; AVX1-NEXT: retq
;
; AVX-LABEL: interleaved_load_vf16_i8_stride3:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa (%rdi), %xmm0
; AVX-NEXT: vmovdqa 16(%rdi), %xmm1
; AVX-NEXT: vmovdqa 32(%rdi), %xmm2
@@ -1154,7 +1154,7 @@ define <16 x i8> @interleaved_load_vf16_i8_stride3(<48 x i8>* %ptr){
define <8 x i8> @interleaved_load_vf8_i8_stride3(<24 x i8>* %ptr){
; AVX1-LABEL: interleaved_load_vf8_i8_stride3:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpshufb {{.*#+}} xmm2 = zero,xmm1[u],zero,xmm1[u],zero,xmm1[u],zero,xmm1[u],zero,xmm1[u],zero,xmm1[u,2,u,5,u]
@@ -1172,7 +1172,7 @@ define <8 x i8> @interleaved_load_vf8_i8_stride3(<24 x i8>* %ptr){
; AVX1-NEXT: retq
;
; AVX-LABEL: interleaved_load_vf8_i8_stride3:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa (%rdi), %ymm0
; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX-NEXT: vpshufb {{.*#+}} xmm2 = zero,xmm1[u],zero,xmm1[u],zero,xmm1[u],zero,xmm1[u],zero,xmm1[u],zero,xmm1[u,2,u,5,u]
@@ -1199,7 +1199,7 @@ define <8 x i8> @interleaved_load_vf8_i8_stride3(<24 x i8>* %ptr){
define void @interleaved_store_vf8_i8_stride3(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <24 x i8>* %p) {
; AVX1-LABEL: interleaved_store_vf8_i8_stride3:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; AVX1-NEXT: vpshufb %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm3, %xmm0, %xmm0
@@ -1216,7 +1216,7 @@ define void @interleaved_store_vf8_i8_stride3(<8 x i8> %a, <8 x i8> %b, <8 x i8>
; AVX1-NEXT: retq
;
; AVX-LABEL: interleaved_store_vf8_i8_stride3:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; AVX-NEXT: vpshufb %xmm3, %xmm1, %xmm1
; AVX-NEXT: vpshufb %xmm3, %xmm0, %xmm0
@@ -1240,7 +1240,7 @@ ret void
define void @interleaved_store_vf16_i8_stride3(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <48 x i8>* %p) {
; AVX1-LABEL: interleaved_store_vf16_i8_stride3:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5]
; AVX1-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10]
; AVX1-NEXT: vpalignr {{.*#+}} xmm3 = xmm0[5,6,7,8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4]
@@ -1264,7 +1264,7 @@ define void @interleaved_store_vf16_i8_stride3(<16 x i8> %a, <16 x i8> %b, <16 x
; AVX1-NEXT: retq
;
; AVX2-LABEL: interleaved_store_vf16_i8_stride3:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5]
; AVX2-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10]
; AVX2-NEXT: vpalignr {{.*#+}} xmm3 = xmm0[5,6,7,8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4]
@@ -1288,7 +1288,7 @@ define void @interleaved_store_vf16_i8_stride3(<16 x i8> %a, <16 x i8> %b, <16 x
; AVX2-NEXT: retq
;
; AVX512-LABEL: interleaved_store_vf16_i8_stride3:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5]
; AVX512-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10]
; AVX512-NEXT: vpalignr {{.*#+}} xmm3 = xmm0[5,6,7,8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4]
@@ -1320,7 +1320,7 @@ ret void
define void @interleaved_store_vf32_i8_stride3(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c, <96 x i8>* %p) {
; AVX1-LABEL: interleaved_store_vf32_i8_stride3:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpalignr {{.*#+}} xmm3 = xmm3[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5]
; AVX1-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5]
@@ -1357,7 +1357,7 @@ define void @interleaved_store_vf32_i8_stride3(<32 x i8> %a, <32 x i8> %b, <32 x
; AVX1-NEXT: retq
;
; AVX2-LABEL: interleaved_store_vf32_i8_stride3:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,22,23,24,25,26,27,28,29,30,31,16,17,18,19,20,21]
; AVX2-NEXT: vpalignr {{.*#+}} ymm1 = ymm1[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25,26]
; AVX2-NEXT: vpalignr {{.*#+}} ymm3 = ymm0[5,6,7,8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4],ymm0[21,22,23,24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20]
@@ -1380,7 +1380,7 @@ define void @interleaved_store_vf32_i8_stride3(<32 x i8> %a, <32 x i8> %b, <32 x
; AVX2-NEXT: retq
;
; AVX512-LABEL: interleaved_store_vf32_i8_stride3:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,22,23,24,25,26,27,28,29,30,31,16,17,18,19,20,21]
; AVX512-NEXT: vpalignr {{.*#+}} ymm1 = ymm1[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25,26]
; AVX512-NEXT: vpalignr {{.*#+}} ymm3 = ymm0[5,6,7,8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4],ymm0[21,22,23,24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20]
@@ -1410,7 +1410,7 @@ ret void
define void @interleaved_store_vf64_i8_stride3(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c, <192 x i8>* %p) {
; AVX1-LABEL: interleaved_store_vf64_i8_stride3:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm6
; AVX1-NEXT: vpalignr {{.*#+}} xmm8 = xmm6[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5]
; AVX1-NEXT: vpalignr {{.*#+}} xmm9 = xmm1[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5]
@@ -1478,7 +1478,7 @@ define void @interleaved_store_vf64_i8_stride3(<64 x i8> %a, <64 x i8> %b, <64 x
; AVX1-NEXT: retq
;
; AVX2-LABEL: interleaved_store_vf64_i8_stride3:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpalignr {{.*#+}} ymm1 = ymm1[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,22,23,24,25,26,27,28,29,30,31,16,17,18,19,20,21]
; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,22,23,24,25,26,27,28,29,30,31,16,17,18,19,20,21]
; AVX2-NEXT: vpalignr {{.*#+}} ymm3 = ymm3[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25,26]
@@ -1518,7 +1518,7 @@ define void @interleaved_store_vf64_i8_stride3(<64 x i8> %a, <64 x i8> %b, <64 x
; AVX2-NEXT: retq
;
; AVX512-LABEL: interleaved_store_vf64_i8_stride3:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpalignr {{.*#+}} zmm0 = zmm0[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,22,23,24,25,26,27,28,29,30,31,16,17,18,19,20,21,38,39,40,41,42,43,44,45,46,47,32,33,34,35,36,37,54,55,56,57,58,59,60,61,62,63,48,49,50,51,52,53]
; AVX512-NEXT: vpalignr {{.*#+}} zmm1 = zmm1[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25,26,43,44,45,46,47,32,33,34,35,36,37,38,39,40,41,42,59,60,61,62,63,48,49,50,51,52,53,54,55,56,57,58]
; AVX512-NEXT: vpalignr {{.*#+}} zmm3 = zmm0[5,6,7,8,9,10,11,12,13,14,15],zmm2[0,1,2,3,4],zmm0[21,22,23,24,25,26,27,28,29,30,31],zmm2[16,17,18,19,20],zmm0[37,38,39,40,41,42,43,44,45,46,47],zmm2[32,33,34,35,36],zmm0[53,54,55,56,57,58,59,60,61,62,63],zmm2[48,49,50,51,52]
@@ -1560,7 +1560,7 @@ ret void
define <64 x i8> @interleaved_load_vf64_i8_stride3(<192 x i8>* %ptr){
; AVX1-LABEL: interleaved_load_vf64_i8_stride3:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqu (%rdi), %xmm11
; AVX1-NEXT: vmovdqu 16(%rdi), %xmm10
; AVX1-NEXT: vmovdqu 32(%rdi), %xmm8
@@ -1643,7 +1643,7 @@ define <64 x i8> @interleaved_load_vf64_i8_stride3(<192 x i8>* %ptr){
; AVX1-NEXT: retq
;
; AVX2-LABEL: interleaved_load_vf64_i8_stride3:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqu (%rdi), %xmm0
; AVX2-NEXT: vmovdqu 16(%rdi), %xmm1
; AVX2-NEXT: vmovdqu 32(%rdi), %xmm2
@@ -1686,7 +1686,7 @@ define <64 x i8> @interleaved_load_vf64_i8_stride3(<192 x i8>* %ptr){
; AVX2-NEXT: retq
;
; AVX512-LABEL: interleaved_load_vf64_i8_stride3:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovdqu (%rdi), %xmm0
; AVX512-NEXT: vmovdqu 16(%rdi), %xmm1
; AVX512-NEXT: vmovdqu 32(%rdi), %xmm2
@@ -1736,7 +1736,7 @@ ret <64 x i8> %add2
define void @interleaved_store_vf64_i8_stride4(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c,<64 x i8> %d, <256 x i8>* %p) {
; AVX1-LABEL: interleaved_store_vf64_i8_stride4:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: subq $24, %rsp
; AVX1-NEXT: .cfi_def_cfa_offset 32
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
@@ -1820,7 +1820,7 @@ define void @interleaved_store_vf64_i8_stride4(<64 x i8> %a, <64 x i8> %b, <64 x
; AVX1-NEXT: retq
;
; AVX2-LABEL: interleaved_store_vf64_i8_stride4:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm8 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[16],ymm2[16],ymm0[17],ymm2[17],ymm0[18],ymm2[18],ymm0[19],ymm2[19],ymm0[20],ymm2[20],ymm0[21],ymm2[21],ymm0[22],ymm2[22],ymm0[23],ymm2[23]
; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm9 = ymm1[0],ymm3[0],ymm1[1],ymm3[1],ymm1[2],ymm3[2],ymm1[3],ymm3[3],ymm1[4],ymm3[4],ymm1[5],ymm3[5],ymm1[6],ymm3[6],ymm1[7],ymm3[7],ymm1[16],ymm3[16],ymm1[17],ymm3[17],ymm1[18],ymm3[18],ymm1[19],ymm3[19],ymm1[20],ymm3[20],ymm1[21],ymm3[21],ymm1[22],ymm3[22],ymm1[23],ymm3[23]
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31]
@@ -1857,7 +1857,7 @@ define void @interleaved_store_vf64_i8_stride4(<64 x i8> %a, <64 x i8> %b, <64 x
; AVX2-NEXT: retq
;
; AVX512-LABEL: interleaved_store_vf64_i8_stride4:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpunpcklbw {{.*#+}} zmm4 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[32],zmm1[32],zmm0[33],zmm1[33],zmm0[34],zmm1[34],zmm0[35],zmm1[35],zmm0[36],zmm1[36],zmm0[37],zmm1[37],zmm0[38],zmm1[38],zmm0[39],zmm1[39],zmm0[48],zmm1[48],zmm0[49],zmm1[49],zmm0[50],zmm1[50],zmm0[51],zmm1[51],zmm0[52],zmm1[52],zmm0[53],zmm1[53],zmm0[54],zmm1[54],zmm0[55],zmm1[55]
; AVX512-NEXT: vpunpckhbw {{.*#+}} zmm0 = zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[12],zmm1[12],zmm0[13],zmm1[13],zmm0[14],zmm1[14],zmm0[15],zmm1[15],zmm0[24],zmm1[24],zmm0[25],zmm1[25],zmm0[26],zmm1[26],zmm0[27],zmm1[27],zmm0[28],zmm1[28],zmm0[29],zmm1[29],zmm0[30],zmm1[30],zmm0[31],zmm1[31],zmm0[40],zmm1[40],zmm0[41],zmm1[41],zmm0[42],zmm1[42],zmm0[43],zmm1[43],zmm0[44],zmm1[44],zmm0[45],zmm1[45],zmm0[46],zmm1[46],zmm0[47],zmm1[47],zmm0[56],zmm1[56],zmm0[57],zmm1[57],zmm0[58],zmm1[58],zmm0[59],zmm1[59],zmm0[60],zmm1[60],zmm0[61],zmm1[61],zmm0[62],zmm1[62],zmm0[63],zmm1[63]
; AVX512-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm2[0],zmm3[0],zmm2[1],zmm3[1],zmm2[2],zmm3[2],zmm2[3],zmm3[3],zmm2[4],zmm3[4],zmm2[5],zmm3[5],zmm2[6],zmm3[6],zmm2[7],zmm3[7],zmm2[16],zmm3[16],zmm2[17],zmm3[17],zmm2[18],zmm3[18],zmm2[19],zmm3[19],zmm2[20],zmm3[20],zmm2[21],zmm3[21],zmm2[22],zmm3[22],zmm2[23],zmm3[23],zmm2[32],zmm3[32],zmm2[33],zmm3[33],zmm2[34],zmm3[34],zmm2[35],zmm3[35],zmm2[36],zmm3[36],zmm2[37],zmm3[37],zmm2[38],zmm3[38],zmm2[39],zmm3[39],zmm2[48],zmm3[48],zmm2[49],zmm3[49],zmm2[50],zmm3[50],zmm2[51],zmm3[51],zmm2[52],zmm3[52],zmm2[53],zmm3[53],zmm2[54],zmm3[54],zmm2[55],zmm3[55]
diff --git a/test/CodeGen/X86/x86-interleaved-check.ll b/test/CodeGen/X86/x86-interleaved-check.ll
index cd1518bc216..0a77b868506 100644
--- a/test/CodeGen/X86/x86-interleaved-check.ll
+++ b/test/CodeGen/X86/x86-interleaved-check.ll
@@ -5,7 +5,7 @@
define void @validate() {
; AVX-LABEL: validate:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
entry:
%0 = bitcast i8 addrspace(1)* undef to <96 x i8> addrspace(1)*
%wide.vec = load <96 x i8>, <96 x i8> addrspace(1)* %0, align 1
diff --git a/test/CodeGen/X86/x86-no_caller_saved_registers-preserve.ll b/test/CodeGen/X86/x86-no_caller_saved_registers-preserve.ll
index 8eb299d9360..9c4cb671f4c 100644
--- a/test/CodeGen/X86/x86-no_caller_saved_registers-preserve.ll
+++ b/test/CodeGen/X86/x86-no_caller_saved_registers-preserve.ll
@@ -9,7 +9,7 @@
;; (that are modified by the function) should be preserved (%rdx and %xmm1).
define x86_64_sysvcc i32 @bar(i32 %a0, i32 %a1, float %b0) #0 {
; CHECK-LABEL: bar:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pushq %rdx
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
diff --git a/test/CodeGen/X86/x86-setcc-int-to-fp-combine.ll b/test/CodeGen/X86/x86-setcc-int-to-fp-combine.ll
index 26dd9d46641..0c41c3ec6c1 100644
--- a/test/CodeGen/X86/x86-setcc-int-to-fp-combine.ll
+++ b/test/CodeGen/X86/x86-setcc-int-to-fp-combine.ll
@@ -7,7 +7,7 @@ define <4 x float> @foo(<4 x float> %val, <4 x float> %test) nounwind {
; CHECK-NEXT: .long 1065353216 ## 0x3f800000
; CHECK-NEXT: .long 1065353216 ## 0x3f800000
; CHECK-LABEL: foo:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: cmpeqps %xmm1, %xmm0
; CHECK-NEXT: andps {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
@@ -27,7 +27,7 @@ define void @foo1(<4 x float> %val, <4 x float> %test, <4 x double>* %p) nounwin
; CHECK-NEXT: .long 1 ## 0x1
; CHECK-NEXT: .long 1 ## 0x1
; CHECK-LABEL: foo1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: cmpeqps %xmm1, %xmm0
; CHECK-NEXT: andps {{.*}}(%rip), %xmm0
; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -51,7 +51,7 @@ define void @foo2(<4 x float>* noalias %result) nounwind {
; CHECK-NEXT: .long 1086324736 ## float 6
; CHECK-NEXT: .long 1088421888 ## float 7
; CHECK-LABEL: foo2:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movaps {{.*#+}} xmm0 = [4.000000e+00,5.000000e+00,6.000000e+00,7.000000e+00]
; CHECK-NEXT: movaps %xmm0, (%rdi)
; CHECK-NEXT: retq
@@ -69,7 +69,7 @@ define <4 x float> @foo3(<4 x float> %val, <4 x float> %test) nounwind {
; CHECK-NEXT: .long 1065353216 ## 0x3f800000
; CHECK-NEXT: .long 0 ## 0x0
; CHECK-LABEL: foo3:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: cmpeqps %xmm1, %xmm0
; CHECK-NEXT: andps {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
@@ -88,7 +88,7 @@ define void @foo4(<4 x float>* noalias %result) nounwind {
; CHECK-NEXT: .long 1124073472 ## float 128
; CHECK-NEXT: .long 1132396544 ## float 255
; CHECK-LABEL: foo4:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: movaps {{.*#+}} xmm0 = [1.000000e+00,1.270000e+02,1.280000e+02,2.550000e+02]
; CHECK-NEXT: movaps %xmm0, (%rdi)
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/x86-shifts.ll b/test/CodeGen/X86/x86-shifts.ll
index 9ab54891254..f6191866edd 100644
--- a/test/CodeGen/X86/x86-shifts.ll
+++ b/test/CodeGen/X86/x86-shifts.ll
@@ -6,7 +6,7 @@
define <4 x i32> @shl4(<4 x i32> %A) nounwind {
; X32-LABEL: shl4:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movdqa %xmm0, %xmm1
; X32-NEXT: pslld $2, %xmm1
; X32-NEXT: paddd %xmm0, %xmm0
@@ -14,7 +14,7 @@ define <4 x i32> @shl4(<4 x i32> %A) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: shl4:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movdqa %xmm0, %xmm1
; X64-NEXT: pslld $2, %xmm1
; X64-NEXT: paddd %xmm0, %xmm0
@@ -29,7 +29,7 @@ entry:
define <4 x i32> @shr4(<4 x i32> %A) nounwind {
; X32-LABEL: shr4:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movdqa %xmm0, %xmm1
; X32-NEXT: psrld $2, %xmm1
; X32-NEXT: psrld $1, %xmm0
@@ -37,7 +37,7 @@ define <4 x i32> @shr4(<4 x i32> %A) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: shr4:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movdqa %xmm0, %xmm1
; X64-NEXT: psrld $2, %xmm1
; X64-NEXT: psrld $1, %xmm0
@@ -52,7 +52,7 @@ entry:
define <4 x i32> @sra4(<4 x i32> %A) nounwind {
; X32-LABEL: sra4:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movdqa %xmm0, %xmm1
; X32-NEXT: psrad $2, %xmm1
; X32-NEXT: psrad $1, %xmm0
@@ -60,7 +60,7 @@ define <4 x i32> @sra4(<4 x i32> %A) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: sra4:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movdqa %xmm0, %xmm1
; X64-NEXT: psrad $2, %xmm1
; X64-NEXT: psrad $1, %xmm0
@@ -75,7 +75,7 @@ entry:
define <2 x i64> @shl2(<2 x i64> %A) nounwind {
; X32-LABEL: shl2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movdqa %xmm0, %xmm1
; X32-NEXT: psllq $2, %xmm1
; X32-NEXT: psllq $9, %xmm0
@@ -83,7 +83,7 @@ define <2 x i64> @shl2(<2 x i64> %A) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: shl2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movdqa %xmm0, %xmm1
; X64-NEXT: psllq $2, %xmm1
; X64-NEXT: psllq $9, %xmm0
@@ -98,7 +98,7 @@ entry:
define <2 x i64> @shr2(<2 x i64> %A) nounwind {
; X32-LABEL: shr2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movdqa %xmm0, %xmm1
; X32-NEXT: psrlq $8, %xmm1
; X32-NEXT: psrlq $1, %xmm0
@@ -106,7 +106,7 @@ define <2 x i64> @shr2(<2 x i64> %A) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: shr2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movdqa %xmm0, %xmm1
; X64-NEXT: psrlq $8, %xmm1
; X64-NEXT: psrlq $1, %xmm0
@@ -122,7 +122,7 @@ entry:
define <8 x i16> @shl8(<8 x i16> %A) nounwind {
; X32-LABEL: shl8:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movdqa %xmm0, %xmm1
; X32-NEXT: psllw $2, %xmm1
; X32-NEXT: paddw %xmm0, %xmm0
@@ -130,7 +130,7 @@ define <8 x i16> @shl8(<8 x i16> %A) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: shl8:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movdqa %xmm0, %xmm1
; X64-NEXT: psllw $2, %xmm1
; X64-NEXT: paddw %xmm0, %xmm0
@@ -145,7 +145,7 @@ entry:
define <8 x i16> @shr8(<8 x i16> %A) nounwind {
; X32-LABEL: shr8:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movdqa %xmm0, %xmm1
; X32-NEXT: psrlw $2, %xmm1
; X32-NEXT: psrlw $1, %xmm0
@@ -153,7 +153,7 @@ define <8 x i16> @shr8(<8 x i16> %A) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: shr8:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movdqa %xmm0, %xmm1
; X64-NEXT: psrlw $2, %xmm1
; X64-NEXT: psrlw $1, %xmm0
@@ -168,7 +168,7 @@ entry:
define <8 x i16> @sra8(<8 x i16> %A) nounwind {
; X32-LABEL: sra8:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movdqa %xmm0, %xmm1
; X32-NEXT: psraw $2, %xmm1
; X32-NEXT: psraw $1, %xmm0
@@ -176,7 +176,7 @@ define <8 x i16> @sra8(<8 x i16> %A) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: sra8:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movdqa %xmm0, %xmm1
; X64-NEXT: psraw $2, %xmm1
; X64-NEXT: psraw $1, %xmm0
@@ -194,7 +194,7 @@ entry:
define <8 x i16> @sll8_nosplat(<8 x i16> %A) nounwind {
; X32-LABEL: sll8_nosplat:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movdqa {{.*#+}} xmm1 = [2,4,8,64,4,4,4,4]
; X32-NEXT: pmullw %xmm0, %xmm1
; X32-NEXT: pmullw {{\.LCPI.*}}, %xmm0
@@ -202,7 +202,7 @@ define <8 x i16> @sll8_nosplat(<8 x i16> %A) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: sll8_nosplat:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movdqa {{.*#+}} xmm1 = [2,4,8,64,4,4,4,4]
; X64-NEXT: pmullw %xmm0, %xmm1
; X64-NEXT: pmullw {{.*}}(%rip), %xmm0
@@ -218,7 +218,7 @@ entry:
define <2 x i64> @shr2_nosplat(<2 x i64> %A) nounwind {
; X32-LABEL: shr2_nosplat:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movdqa %xmm0, %xmm2
; X32-NEXT: psrlq $8, %xmm2
; X32-NEXT: movdqa %xmm0, %xmm1
@@ -230,7 +230,7 @@ define <2 x i64> @shr2_nosplat(<2 x i64> %A) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: shr2_nosplat:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movdqa %xmm0, %xmm2
; X64-NEXT: psrlq $8, %xmm2
; X64-NEXT: movdqa %xmm0, %xmm1
@@ -252,7 +252,7 @@ entry:
define <2 x i32> @shl2_other(<2 x i32> %A) nounwind {
; X32-LABEL: shl2_other:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movdqa %xmm0, %xmm1
; X32-NEXT: psllq $2, %xmm1
; X32-NEXT: psllq $9, %xmm0
@@ -260,7 +260,7 @@ define <2 x i32> @shl2_other(<2 x i32> %A) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: shl2_other:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movdqa %xmm0, %xmm1
; X64-NEXT: psllq $2, %xmm1
; X64-NEXT: psllq $9, %xmm0
@@ -275,7 +275,7 @@ entry:
define <2 x i32> @shr2_other(<2 x i32> %A) nounwind {
; X32-LABEL: shr2_other:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: pand {{\.LCPI.*}}, %xmm0
; X32-NEXT: movdqa %xmm0, %xmm1
; X32-NEXT: psrlq $8, %xmm1
@@ -284,7 +284,7 @@ define <2 x i32> @shr2_other(<2 x i32> %A) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: shr2_other:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: pand {{.*}}(%rip), %xmm0
; X64-NEXT: movdqa %xmm0, %xmm1
; X64-NEXT: psrlq $8, %xmm1
@@ -300,13 +300,13 @@ entry:
define <16 x i8> @shl9(<16 x i8> %A) nounwind {
; X32-LABEL: shl9:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: psllw $3, %xmm0
; X32-NEXT: pand {{\.LCPI.*}}, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: shl9:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psllw $3, %xmm0
; X64-NEXT: pand {{.*}}(%rip), %xmm0
; X64-NEXT: retq
@@ -316,13 +316,13 @@ define <16 x i8> @shl9(<16 x i8> %A) nounwind {
define <16 x i8> @shr9(<16 x i8> %A) nounwind {
; X32-LABEL: shr9:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: psrlw $3, %xmm0
; X32-NEXT: pand {{\.LCPI.*}}, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: shr9:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psrlw $3, %xmm0
; X64-NEXT: pand {{.*}}(%rip), %xmm0
; X64-NEXT: retq
@@ -332,14 +332,14 @@ define <16 x i8> @shr9(<16 x i8> %A) nounwind {
define <16 x i8> @sra_v16i8_7(<16 x i8> %A) nounwind {
; X32-LABEL: sra_v16i8_7:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pxor %xmm1, %xmm1
; X32-NEXT: pcmpgtb %xmm0, %xmm1
; X32-NEXT: movdqa %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: sra_v16i8_7:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pxor %xmm1, %xmm1
; X64-NEXT: pcmpgtb %xmm0, %xmm1
; X64-NEXT: movdqa %xmm1, %xmm0
@@ -350,7 +350,7 @@ define <16 x i8> @sra_v16i8_7(<16 x i8> %A) nounwind {
define <16 x i8> @sra_v16i8(<16 x i8> %A) nounwind {
; X32-LABEL: sra_v16i8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: psrlw $3, %xmm0
; X32-NEXT: pand {{\.LCPI.*}}, %xmm0
; X32-NEXT: movdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
@@ -359,7 +359,7 @@ define <16 x i8> @sra_v16i8(<16 x i8> %A) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: sra_v16i8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psrlw $3, %xmm0
; X64-NEXT: pand {{.*}}(%rip), %xmm0
; X64-NEXT: movdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
diff --git a/test/CodeGen/X86/x86-shrink-wrapping.ll b/test/CodeGen/X86/x86-shrink-wrapping.ll
index 519f0d0924e..9d856c6442b 100644
--- a/test/CodeGen/X86/x86-shrink-wrapping.ll
+++ b/test/CodeGen/X86/x86-shrink-wrapping.ll
@@ -989,16 +989,16 @@ attributes #4 = { "no-frame-pointer-elim"="true" }
; looking for the nearest common post-dominator of an "unreachable" block.
; CHECK-LABEL: infiniteLoopNoSuccessor:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; Make sure the prologue happens in the entry block.
; CHECK-NEXT: pushq %rbp
; ...
; Make sure we don't shrink-wrap.
-; CHECK: ## BB#1
+; CHECK: ## %bb.1
; CHECK-NOT: pushq %rbp
; ...
; Make sure the epilogue happens in the exit block.
-; CHECK: ## BB#5
+; CHECK: ## %bb.5
; CHECK: popq %rbp
; CHECK-NEXT: retq
define void @infiniteLoopNoSuccessor() #5 {
diff --git a/test/CodeGen/X86/x86-upgrade-avx-vbroadcast.ll b/test/CodeGen/X86/x86-upgrade-avx-vbroadcast.ll
index d3a12862a9e..e7b3a5b4990 100644
--- a/test/CodeGen/X86/x86-upgrade-avx-vbroadcast.ll
+++ b/test/CodeGen/X86/x86-upgrade-avx-vbroadcast.ll
@@ -9,7 +9,7 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
define <4 x float> @test_mm_broadcast_ss(float* readonly %__a){
; CHECK-LABEL: test_mm_broadcast_ss:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vbroadcastss (%rdi), %xmm0
; CHECK-NEXT: retq
entry:
@@ -21,7 +21,7 @@ declare <8 x float> @llvm.x86.avx.vbroadcast.ss.256(i8*)
define <4 x double> @test_mm256_broadcast_sd(double* readonly %__a) {
; CHECK-LABEL: test_mm256_broadcast_sd:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vbroadcastsd (%rdi), %ymm0
; CHECK-NEXT: retq
entry:
@@ -33,7 +33,7 @@ declare <4 x double> @llvm.x86.avx.vbroadcast.sd.256(i8*)
define <8 x float> @test_mm256_broadcast_ss(float* readonly %__a) {
; CHECK-LABEL: test_mm256_broadcast_ss:
-; CHECK: ## BB#0: ## %entry
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: vbroadcastss (%rdi), %ymm0
; CHECK-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/x86-upgrade-avx2-vbroadcast.ll b/test/CodeGen/X86/x86-upgrade-avx2-vbroadcast.ll
index 9dc0d9f7113..98fd3d90cbe 100644
--- a/test/CodeGen/X86/x86-upgrade-avx2-vbroadcast.ll
+++ b/test/CodeGen/X86/x86-upgrade-avx2-vbroadcast.ll
@@ -7,7 +7,7 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
define <4 x i64> @broadcast128(<2 x i64> %src) {
; CHECK-LABEL: broadcast128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; CHECK-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
diff --git a/test/CodeGen/X86/x87-schedule.ll b/test/CodeGen/X86/x87-schedule.ll
index 2b134aaae58..767f70d1190 100644
--- a/test/CodeGen/X86/x87-schedule.ll
+++ b/test/CodeGen/X86/x87-schedule.ll
@@ -13,70 +13,70 @@
define void @test_f2xm1() optsize {
; GENERIC-LABEL: test_f2xm1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: f2xm1
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_f2xm1:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: f2xm1 # sched: [99:49.50]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_f2xm1:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: f2xm1 # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_f2xm1:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: f2xm1 # sched: [100:0.33]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_f2xm1:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: f2xm1 # sched: [100:0.25]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_f2xm1:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: f2xm1 # sched: [100:0.25]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_f2xm1:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: f2xm1 # sched: [100:0.25]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_f2xm1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: f2xm1 # sched: [100:0.25]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_f2xm1:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: f2xm1 # sched: [100:0.17]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_f2xm1:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: f2xm1 # sched: [100:?]
; ZNVER1-NEXT: #NO_APP
@@ -87,70 +87,70 @@ define void @test_f2xm1() optsize {
define void @test_fabs() optsize {
; GENERIC-LABEL: test_fabs:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fabs
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fabs:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fabs # sched: [1:1.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fabs:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fabs # sched: [1:0.50]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fabs:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fabs # sched: [1:1.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_fabs:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fabs # sched: [1:1.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_fabs:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fabs # sched: [1:0.33]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fabs:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fabs # sched: [1:0.33]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fabs:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fabs # sched: [1:0.33]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_fabs:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fabs # sched: [1:0.50]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fabs:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fabs # sched: [2:1.00]
; ZNVER1-NEXT: #NO_APP
@@ -161,7 +161,7 @@ define void @test_fabs() optsize {
define void @test_fadd(float *%a0, double *%a1) optsize {
; GENERIC-LABEL: test_fadd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %ecx
; GENERIC-NEXT: #APP
@@ -173,7 +173,7 @@ define void @test_fadd(float *%a0, double *%a1) optsize {
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fadd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:1.00]
; ATOM-NEXT: #APP
@@ -185,7 +185,7 @@ define void @test_fadd(float *%a0, double *%a1) optsize {
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fadd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [3:1.00]
; SLM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [3:1.00]
; SLM-NEXT: #APP
@@ -197,7 +197,7 @@ define void @test_fadd(float *%a0, double *%a1) optsize {
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fadd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SANDY-NEXT: #APP
@@ -209,7 +209,7 @@ define void @test_fadd(float *%a0, double *%a1) optsize {
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_fadd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:0.50]
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:0.50]
; HASWELL-NEXT: #APP
@@ -221,7 +221,7 @@ define void @test_fadd(float *%a0, double *%a1) optsize {
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_fadd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BROADWELL-NEXT: #APP
@@ -233,7 +233,7 @@ define void @test_fadd(float *%a0, double *%a1) optsize {
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fadd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
@@ -245,7 +245,7 @@ define void @test_fadd(float *%a0, double *%a1) optsize {
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fadd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKX-NEXT: #APP
@@ -257,7 +257,7 @@ define void @test_fadd(float *%a0, double *%a1) optsize {
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_fadd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:1.00]
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:1.00]
; BTVER2-NEXT: #APP
@@ -269,7 +269,7 @@ define void @test_fadd(float *%a0, double *%a1) optsize {
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fadd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [8:0.50]
; ZNVER1-NEXT: #APP
@@ -285,7 +285,7 @@ define void @test_fadd(float *%a0, double *%a1) optsize {
define void @test_faddp_fiadd(i16 *%a0, i32 *%a1) optsize {
; GENERIC-LABEL: test_faddp_fiadd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %ecx
; GENERIC-NEXT: #APP
@@ -297,7 +297,7 @@ define void @test_faddp_fiadd(i16 *%a0, i32 *%a1) optsize {
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_faddp_fiadd:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:1.00]
; ATOM-NEXT: #APP
@@ -309,7 +309,7 @@ define void @test_faddp_fiadd(i16 *%a0, i32 *%a1) optsize {
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_faddp_fiadd:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [3:1.00]
; SLM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [3:1.00]
; SLM-NEXT: #APP
@@ -321,7 +321,7 @@ define void @test_faddp_fiadd(i16 *%a0, i32 *%a1) optsize {
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_faddp_fiadd:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SANDY-NEXT: #APP
@@ -333,7 +333,7 @@ define void @test_faddp_fiadd(i16 *%a0, i32 *%a1) optsize {
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_faddp_fiadd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:0.50]
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:0.50]
; HASWELL-NEXT: #APP
@@ -345,7 +345,7 @@ define void @test_faddp_fiadd(i16 *%a0, i32 *%a1) optsize {
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_faddp_fiadd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BROADWELL-NEXT: #APP
@@ -357,7 +357,7 @@ define void @test_faddp_fiadd(i16 *%a0, i32 *%a1) optsize {
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_faddp_fiadd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
@@ -369,7 +369,7 @@ define void @test_faddp_fiadd(i16 *%a0, i32 *%a1) optsize {
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_faddp_fiadd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKX-NEXT: #APP
@@ -381,7 +381,7 @@ define void @test_faddp_fiadd(i16 *%a0, i32 *%a1) optsize {
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_faddp_fiadd:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:1.00]
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:1.00]
; BTVER2-NEXT: #APP
@@ -393,7 +393,7 @@ define void @test_faddp_fiadd(i16 *%a0, i32 *%a1) optsize {
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_faddp_fiadd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [8:0.50]
; ZNVER1-NEXT: #APP
@@ -412,70 +412,70 @@ define void @test_faddp_fiadd(i16 *%a0, i32 *%a1) optsize {
define void @test_fchs() optsize {
; GENERIC-LABEL: test_fchs:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fchs
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fchs:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fchs # sched: [1:1.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fchs:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fchs # sched: [1:0.50]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fchs:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fchs # sched: [1:1.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_fchs:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fchs # sched: [1:1.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_fchs:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fchs # sched: [1:0.33]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fchs:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fchs # sched: [1:0.33]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fchs:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fchs # sched: [1:0.33]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_fchs:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fchs # sched: [1:0.50]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fchs:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fchs # sched: [1:1.00]
; ZNVER1-NEXT: #NO_APP
@@ -486,7 +486,7 @@ define void @test_fchs() optsize {
define void @test_fclex_fnclex() optsize {
; GENERIC-LABEL: test_fclex_fnclex:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: wait
; GENERIC-NEXT: fnclex
@@ -495,7 +495,7 @@ define void @test_fclex_fnclex() optsize {
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fclex_fnclex:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: wait # sched: [1:0.50]
; ATOM-NEXT: fnclex # sched: [25:12.50]
@@ -504,7 +504,7 @@ define void @test_fclex_fnclex() optsize {
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fclex_fnclex:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: wait # sched: [100:1.00]
; SLM-NEXT: fnclex # sched: [100:1.00]
@@ -513,7 +513,7 @@ define void @test_fclex_fnclex() optsize {
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fclex_fnclex:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: wait # sched: [100:0.33]
; SANDY-NEXT: fnclex # sched: [100:0.33]
@@ -522,7 +522,7 @@ define void @test_fclex_fnclex() optsize {
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_fclex_fnclex:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: wait # sched: [1:0.50]
; HASWELL-NEXT: fnclex # sched: [1:1.25]
@@ -531,7 +531,7 @@ define void @test_fclex_fnclex() optsize {
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_fclex_fnclex:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: wait # sched: [2:0.50]
; BROADWELL-NEXT: fnclex # sched: [4:1.00]
@@ -540,7 +540,7 @@ define void @test_fclex_fnclex() optsize {
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fclex_fnclex:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: wait # sched: [2:0.50]
; SKYLAKE-NEXT: fnclex # sched: [4:1.00]
@@ -549,7 +549,7 @@ define void @test_fclex_fnclex() optsize {
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fclex_fnclex:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: wait # sched: [2:0.50]
; SKX-NEXT: fnclex # sched: [4:1.00]
@@ -558,7 +558,7 @@ define void @test_fclex_fnclex() optsize {
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_fclex_fnclex:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: wait # sched: [100:0.17]
; BTVER2-NEXT: fnclex # sched: [100:0.17]
@@ -567,7 +567,7 @@ define void @test_fclex_fnclex() optsize {
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fclex_fnclex:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: wait # sched: [1:1.00]
; ZNVER1-NEXT: fnclex # sched: [100:?]
@@ -580,7 +580,7 @@ define void @test_fclex_fnclex() optsize {
define void @test_fcmov() optsize {
; GENERIC-LABEL: test_fcmov:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fcmovb %st(1), %st(0)
; GENERIC-NEXT: fcmovbe %st(1), %st(0)
@@ -594,7 +594,7 @@ define void @test_fcmov() optsize {
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fcmov:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fcmovb %st(1), %st(0)
; ATOM-NEXT: fcmovbe %st(1), %st(0)
@@ -608,7 +608,7 @@ define void @test_fcmov() optsize {
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fcmov:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fcmovb %st(1), %st(0)
; SLM-NEXT: fcmovbe %st(1), %st(0)
@@ -622,7 +622,7 @@ define void @test_fcmov() optsize {
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fcmov:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fcmovb %st(1), %st(0) # sched: [3:2.00]
; SANDY-NEXT: fcmovbe %st(1), %st(0) # sched: [3:2.00]
@@ -636,7 +636,7 @@ define void @test_fcmov() optsize {
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_fcmov:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fcmovb %st(1), %st(0)
; HASWELL-NEXT: fcmovbe %st(1), %st(0)
@@ -650,7 +650,7 @@ define void @test_fcmov() optsize {
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_fcmov:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fcmovb %st(1), %st(0)
; BROADWELL-NEXT: fcmovbe %st(1), %st(0)
@@ -664,7 +664,7 @@ define void @test_fcmov() optsize {
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fcmov:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fcmovb %st(1), %st(0)
; SKYLAKE-NEXT: fcmovbe %st(1), %st(0)
@@ -678,7 +678,7 @@ define void @test_fcmov() optsize {
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fcmov:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fcmovb %st(1), %st(0)
; SKX-NEXT: fcmovbe %st(1), %st(0)
@@ -692,7 +692,7 @@ define void @test_fcmov() optsize {
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_fcmov:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fcmovb %st(1), %st(0)
; BTVER2-NEXT: fcmovbe %st(1), %st(0)
@@ -706,7 +706,7 @@ define void @test_fcmov() optsize {
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fcmov:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fcmovb %st(1), %st(0) # sched: [100:?]
; ZNVER1-NEXT: fcmovbe %st(1), %st(0) # sched: [100:?]
@@ -724,7 +724,7 @@ define void @test_fcmov() optsize {
define void @test_fcom(float *%a0, double *%a1) optsize {
; GENERIC-LABEL: test_fcom:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %ecx
; GENERIC-NEXT: #APP
@@ -736,7 +736,7 @@ define void @test_fcom(float *%a0, double *%a1) optsize {
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fcom:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:1.00]
; ATOM-NEXT: #APP
@@ -748,7 +748,7 @@ define void @test_fcom(float *%a0, double *%a1) optsize {
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fcom:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [3:1.00]
; SLM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [3:1.00]
; SLM-NEXT: #APP
@@ -760,7 +760,7 @@ define void @test_fcom(float *%a0, double *%a1) optsize {
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fcom:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SANDY-NEXT: #APP
@@ -772,7 +772,7 @@ define void @test_fcom(float *%a0, double *%a1) optsize {
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_fcom:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:0.50]
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:0.50]
; HASWELL-NEXT: #APP
@@ -784,7 +784,7 @@ define void @test_fcom(float *%a0, double *%a1) optsize {
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_fcom:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BROADWELL-NEXT: #APP
@@ -796,7 +796,7 @@ define void @test_fcom(float *%a0, double *%a1) optsize {
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fcom:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
@@ -808,7 +808,7 @@ define void @test_fcom(float *%a0, double *%a1) optsize {
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fcom:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKX-NEXT: #APP
@@ -820,7 +820,7 @@ define void @test_fcom(float *%a0, double *%a1) optsize {
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_fcom:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:1.00]
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:1.00]
; BTVER2-NEXT: #APP
@@ -832,7 +832,7 @@ define void @test_fcom(float *%a0, double *%a1) optsize {
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fcom:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [8:0.50]
; ZNVER1-NEXT: #APP
@@ -848,7 +848,7 @@ define void @test_fcom(float *%a0, double *%a1) optsize {
define void @test_fcomp_fcompp(float *%a0, double *%a1) optsize {
; GENERIC-LABEL: test_fcomp_fcompp:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %ecx
; GENERIC-NEXT: #APP
@@ -861,7 +861,7 @@ define void @test_fcomp_fcompp(float *%a0, double *%a1) optsize {
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fcomp_fcompp:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:1.00]
; ATOM-NEXT: #APP
@@ -874,7 +874,7 @@ define void @test_fcomp_fcompp(float *%a0, double *%a1) optsize {
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fcomp_fcompp:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [3:1.00]
; SLM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [3:1.00]
; SLM-NEXT: #APP
@@ -887,7 +887,7 @@ define void @test_fcomp_fcompp(float *%a0, double *%a1) optsize {
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fcomp_fcompp:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SANDY-NEXT: #APP
@@ -900,7 +900,7 @@ define void @test_fcomp_fcompp(float *%a0, double *%a1) optsize {
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_fcomp_fcompp:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:0.50]
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:0.50]
; HASWELL-NEXT: #APP
@@ -913,7 +913,7 @@ define void @test_fcomp_fcompp(float *%a0, double *%a1) optsize {
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_fcomp_fcompp:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BROADWELL-NEXT: #APP
@@ -926,7 +926,7 @@ define void @test_fcomp_fcompp(float *%a0, double *%a1) optsize {
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fcomp_fcompp:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
@@ -939,7 +939,7 @@ define void @test_fcomp_fcompp(float *%a0, double *%a1) optsize {
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fcomp_fcompp:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKX-NEXT: #APP
@@ -952,7 +952,7 @@ define void @test_fcomp_fcompp(float *%a0, double *%a1) optsize {
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_fcomp_fcompp:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:1.00]
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:1.00]
; BTVER2-NEXT: #APP
@@ -965,7 +965,7 @@ define void @test_fcomp_fcompp(float *%a0, double *%a1) optsize {
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fcomp_fcompp:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [8:0.50]
; ZNVER1-NEXT: #APP
@@ -982,7 +982,7 @@ define void @test_fcomp_fcompp(float *%a0, double *%a1) optsize {
define void @test_fcomi_fcomip() optsize {
; GENERIC-LABEL: test_fcomi_fcomip:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fcomi %st(3)
; GENERIC-NEXT: fcompi %st(3)
@@ -990,7 +990,7 @@ define void @test_fcomi_fcomip() optsize {
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fcomi_fcomip:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fcomi %st(3) # sched: [9:4.50]
; ATOM-NEXT: fcompi %st(3) # sched: [9:4.50]
@@ -998,7 +998,7 @@ define void @test_fcomi_fcomip() optsize {
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fcomi_fcomip:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fcomi %st(3) # sched: [3:1.00]
; SLM-NEXT: fcompi %st(3) # sched: [3:1.00]
@@ -1006,7 +1006,7 @@ define void @test_fcomi_fcomip() optsize {
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fcomi_fcomip:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fcomi %st(3) # sched: [3:1.00]
; SANDY-NEXT: fcompi %st(3) # sched: [3:1.00]
@@ -1014,7 +1014,7 @@ define void @test_fcomi_fcomip() optsize {
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_fcomi_fcomip:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fcomi %st(3) # sched: [1:0.50]
; HASWELL-NEXT: fcompi %st(3) # sched: [1:0.50]
@@ -1022,7 +1022,7 @@ define void @test_fcomi_fcomip() optsize {
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_fcomi_fcomip:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fcomi %st(3) # sched: [3:1.00]
; BROADWELL-NEXT: fcompi %st(3) # sched: [3:1.00]
@@ -1030,7 +1030,7 @@ define void @test_fcomi_fcomip() optsize {
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fcomi_fcomip:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fcomi %st(3) # sched: [3:1.00]
; SKYLAKE-NEXT: fcompi %st(3) # sched: [3:1.00]
@@ -1038,7 +1038,7 @@ define void @test_fcomi_fcomip() optsize {
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fcomi_fcomip:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fcomi %st(3) # sched: [3:1.00]
; SKX-NEXT: fcompi %st(3) # sched: [3:1.00]
@@ -1046,7 +1046,7 @@ define void @test_fcomi_fcomip() optsize {
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_fcomi_fcomip:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fcomi %st(3) # sched: [3:1.00]
; BTVER2-NEXT: fcompi %st(3) # sched: [3:1.00]
@@ -1054,7 +1054,7 @@ define void @test_fcomi_fcomip() optsize {
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fcomi_fcomip:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fcomi %st(3) # sched: [9:0.50]
; ZNVER1-NEXT: fcompi %st(3) # sched: [9:0.50]
@@ -1066,70 +1066,70 @@ define void @test_fcomi_fcomip() optsize {
define void @test_fcos() optsize {
; GENERIC-LABEL: test_fcos:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fcos
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fcos:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fcos # sched: [174:87.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fcos:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fcos # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fcos:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fcos # sched: [100:0.33]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_fcos:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fcos # sched: [100:0.25]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_fcos:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fcos # sched: [100:0.25]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fcos:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fcos # sched: [100:0.25]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fcos:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fcos # sched: [100:0.25]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_fcos:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fcos # sched: [100:0.17]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fcos:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fcos # sched: [100:?]
; ZNVER1-NEXT: #NO_APP
@@ -1140,70 +1140,70 @@ define void @test_fcos() optsize {
define void @test_fdecstp() optsize {
; GENERIC-LABEL: test_fdecstp:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fdecstp
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fdecstp:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fdecstp # sched: [1:0.50]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fdecstp:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fdecstp # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fdecstp:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fdecstp # sched: [1:1.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_fdecstp:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fdecstp # sched: [2:1.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_fdecstp:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fdecstp # sched: [2:1.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fdecstp:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fdecstp # sched: [2:1.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fdecstp:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fdecstp # sched: [2:1.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_fdecstp:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fdecstp # sched: [100:0.17]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fdecstp:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fdecstp # sched: [11:1.00]
; ZNVER1-NEXT: #NO_APP
@@ -1214,7 +1214,7 @@ define void @test_fdecstp() optsize {
define void @test_fdiv(float *%a0, double *%a1) optsize {
; GENERIC-LABEL: test_fdiv:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %ecx
; GENERIC-NEXT: #APP
@@ -1226,7 +1226,7 @@ define void @test_fdiv(float *%a0, double *%a1) optsize {
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fdiv:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:1.00]
; ATOM-NEXT: #APP
@@ -1238,7 +1238,7 @@ define void @test_fdiv(float *%a0, double *%a1) optsize {
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fdiv:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [3:1.00]
; SLM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [3:1.00]
; SLM-NEXT: #APP
@@ -1250,7 +1250,7 @@ define void @test_fdiv(float *%a0, double *%a1) optsize {
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fdiv:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SANDY-NEXT: #APP
@@ -1262,7 +1262,7 @@ define void @test_fdiv(float *%a0, double *%a1) optsize {
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_fdiv:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:0.50]
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:0.50]
; HASWELL-NEXT: #APP
@@ -1274,7 +1274,7 @@ define void @test_fdiv(float *%a0, double *%a1) optsize {
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_fdiv:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BROADWELL-NEXT: #APP
@@ -1286,7 +1286,7 @@ define void @test_fdiv(float *%a0, double *%a1) optsize {
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fdiv:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
@@ -1298,7 +1298,7 @@ define void @test_fdiv(float *%a0, double *%a1) optsize {
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fdiv:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKX-NEXT: #APP
@@ -1310,7 +1310,7 @@ define void @test_fdiv(float *%a0, double *%a1) optsize {
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_fdiv:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:1.00]
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:1.00]
; BTVER2-NEXT: #APP
@@ -1322,7 +1322,7 @@ define void @test_fdiv(float *%a0, double *%a1) optsize {
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fdiv:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [8:0.50]
; ZNVER1-NEXT: #APP
@@ -1338,7 +1338,7 @@ define void @test_fdiv(float *%a0, double *%a1) optsize {
define void @test_fdivp_fidiv(i16 *%a0, i32 *%a1) optsize {
; GENERIC-LABEL: test_fdivp_fidiv:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %ecx
; GENERIC-NEXT: #APP
@@ -1350,7 +1350,7 @@ define void @test_fdivp_fidiv(i16 *%a0, i32 *%a1) optsize {
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fdivp_fidiv:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:1.00]
; ATOM-NEXT: #APP
@@ -1362,7 +1362,7 @@ define void @test_fdivp_fidiv(i16 *%a0, i32 *%a1) optsize {
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fdivp_fidiv:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [3:1.00]
; SLM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [3:1.00]
; SLM-NEXT: #APP
@@ -1374,7 +1374,7 @@ define void @test_fdivp_fidiv(i16 *%a0, i32 *%a1) optsize {
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fdivp_fidiv:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SANDY-NEXT: #APP
@@ -1386,7 +1386,7 @@ define void @test_fdivp_fidiv(i16 *%a0, i32 *%a1) optsize {
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_fdivp_fidiv:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:0.50]
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:0.50]
; HASWELL-NEXT: #APP
@@ -1398,7 +1398,7 @@ define void @test_fdivp_fidiv(i16 *%a0, i32 *%a1) optsize {
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_fdivp_fidiv:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BROADWELL-NEXT: #APP
@@ -1410,7 +1410,7 @@ define void @test_fdivp_fidiv(i16 *%a0, i32 *%a1) optsize {
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fdivp_fidiv:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
@@ -1422,7 +1422,7 @@ define void @test_fdivp_fidiv(i16 *%a0, i32 *%a1) optsize {
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fdivp_fidiv:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKX-NEXT: #APP
@@ -1434,7 +1434,7 @@ define void @test_fdivp_fidiv(i16 *%a0, i32 *%a1) optsize {
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_fdivp_fidiv:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:1.00]
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:1.00]
; BTVER2-NEXT: #APP
@@ -1446,7 +1446,7 @@ define void @test_fdivp_fidiv(i16 *%a0, i32 *%a1) optsize {
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fdivp_fidiv:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [8:0.50]
; ZNVER1-NEXT: #APP
@@ -1462,7 +1462,7 @@ define void @test_fdivp_fidiv(i16 *%a0, i32 *%a1) optsize {
define void @test_fdivr(float *%a0, double *%a1) optsize {
; GENERIC-LABEL: test_fdivr:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %ecx
; GENERIC-NEXT: #APP
@@ -1474,7 +1474,7 @@ define void @test_fdivr(float *%a0, double *%a1) optsize {
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fdivr:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:1.00]
; ATOM-NEXT: #APP
@@ -1486,7 +1486,7 @@ define void @test_fdivr(float *%a0, double *%a1) optsize {
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fdivr:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [3:1.00]
; SLM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [3:1.00]
; SLM-NEXT: #APP
@@ -1498,7 +1498,7 @@ define void @test_fdivr(float *%a0, double *%a1) optsize {
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fdivr:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SANDY-NEXT: #APP
@@ -1510,7 +1510,7 @@ define void @test_fdivr(float *%a0, double *%a1) optsize {
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_fdivr:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:0.50]
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:0.50]
; HASWELL-NEXT: #APP
@@ -1522,7 +1522,7 @@ define void @test_fdivr(float *%a0, double *%a1) optsize {
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_fdivr:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BROADWELL-NEXT: #APP
@@ -1534,7 +1534,7 @@ define void @test_fdivr(float *%a0, double *%a1) optsize {
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fdivr:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
@@ -1546,7 +1546,7 @@ define void @test_fdivr(float *%a0, double *%a1) optsize {
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fdivr:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKX-NEXT: #APP
@@ -1558,7 +1558,7 @@ define void @test_fdivr(float *%a0, double *%a1) optsize {
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_fdivr:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:1.00]
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:1.00]
; BTVER2-NEXT: #APP
@@ -1570,7 +1570,7 @@ define void @test_fdivr(float *%a0, double *%a1) optsize {
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fdivr:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [8:0.50]
; ZNVER1-NEXT: #APP
@@ -1586,7 +1586,7 @@ define void @test_fdivr(float *%a0, double *%a1) optsize {
define void @test_fdivrp_fidivr(i16 *%a0, i32 *%a1) optsize {
; GENERIC-LABEL: test_fdivrp_fidivr:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %ecx
; GENERIC-NEXT: #APP
@@ -1598,7 +1598,7 @@ define void @test_fdivrp_fidivr(i16 *%a0, i32 *%a1) optsize {
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fdivrp_fidivr:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:1.00]
; ATOM-NEXT: #APP
@@ -1610,7 +1610,7 @@ define void @test_fdivrp_fidivr(i16 *%a0, i32 *%a1) optsize {
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fdivrp_fidivr:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [3:1.00]
; SLM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [3:1.00]
; SLM-NEXT: #APP
@@ -1622,7 +1622,7 @@ define void @test_fdivrp_fidivr(i16 *%a0, i32 *%a1) optsize {
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fdivrp_fidivr:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SANDY-NEXT: #APP
@@ -1634,7 +1634,7 @@ define void @test_fdivrp_fidivr(i16 *%a0, i32 *%a1) optsize {
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_fdivrp_fidivr:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:0.50]
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:0.50]
; HASWELL-NEXT: #APP
@@ -1646,7 +1646,7 @@ define void @test_fdivrp_fidivr(i16 *%a0, i32 *%a1) optsize {
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_fdivrp_fidivr:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BROADWELL-NEXT: #APP
@@ -1658,7 +1658,7 @@ define void @test_fdivrp_fidivr(i16 *%a0, i32 *%a1) optsize {
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fdivrp_fidivr:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
@@ -1670,7 +1670,7 @@ define void @test_fdivrp_fidivr(i16 *%a0, i32 *%a1) optsize {
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fdivrp_fidivr:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKX-NEXT: #APP
@@ -1682,7 +1682,7 @@ define void @test_fdivrp_fidivr(i16 *%a0, i32 *%a1) optsize {
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_fdivrp_fidivr:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:1.00]
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:1.00]
; BTVER2-NEXT: #APP
@@ -1694,7 +1694,7 @@ define void @test_fdivrp_fidivr(i16 *%a0, i32 *%a1) optsize {
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fdivrp_fidivr:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [8:0.50]
; ZNVER1-NEXT: #APP
@@ -1710,70 +1710,70 @@ define void @test_fdivrp_fidivr(i16 *%a0, i32 *%a1) optsize {
define void @test_ffree() optsize {
; GENERIC-LABEL: test_ffree:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: ffree %st(0)
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_ffree:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: ffree %st(0) # sched: [1:0.50]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_ffree:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: ffree %st(0) # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_ffree:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: ffree %st(0) # sched: [1:1.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_ffree:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: ffree %st(0) # sched: [1:0.50]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_ffree:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: ffree %st(0) # sched: [100:0.25]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_ffree:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: ffree %st(0) # sched: [100:0.25]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_ffree:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: ffree %st(0) # sched: [100:0.25]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_ffree:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: ffree %st(0) # sched: [100:0.17]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_ffree:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: ffree %st(0) # sched: [11:1.00]
; ZNVER1-NEXT: #NO_APP
@@ -1784,7 +1784,7 @@ define void @test_ffree() optsize {
define void @test_ficom(i16 *%a0, i32 *%a1) optsize {
; GENERIC-LABEL: test_ficom:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %ecx
; GENERIC-NEXT: #APP
@@ -1796,7 +1796,7 @@ define void @test_ficom(i16 *%a0, i32 *%a1) optsize {
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_ficom:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:1.00]
; ATOM-NEXT: #APP
@@ -1808,7 +1808,7 @@ define void @test_ficom(i16 *%a0, i32 *%a1) optsize {
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_ficom:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [3:1.00]
; SLM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [3:1.00]
; SLM-NEXT: #APP
@@ -1820,7 +1820,7 @@ define void @test_ficom(i16 *%a0, i32 *%a1) optsize {
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_ficom:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SANDY-NEXT: #APP
@@ -1832,7 +1832,7 @@ define void @test_ficom(i16 *%a0, i32 *%a1) optsize {
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_ficom:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:0.50]
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:0.50]
; HASWELL-NEXT: #APP
@@ -1844,7 +1844,7 @@ define void @test_ficom(i16 *%a0, i32 *%a1) optsize {
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_ficom:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BROADWELL-NEXT: #APP
@@ -1856,7 +1856,7 @@ define void @test_ficom(i16 *%a0, i32 *%a1) optsize {
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_ficom:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
@@ -1868,7 +1868,7 @@ define void @test_ficom(i16 *%a0, i32 *%a1) optsize {
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_ficom:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKX-NEXT: #APP
@@ -1880,7 +1880,7 @@ define void @test_ficom(i16 *%a0, i32 *%a1) optsize {
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_ficom:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:1.00]
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:1.00]
; BTVER2-NEXT: #APP
@@ -1892,7 +1892,7 @@ define void @test_ficom(i16 *%a0, i32 *%a1) optsize {
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_ficom:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [8:0.50]
; ZNVER1-NEXT: #APP
@@ -1908,7 +1908,7 @@ define void @test_ficom(i16 *%a0, i32 *%a1) optsize {
define void @test_fild(i16 *%a0, i32 *%a1, i64 *%a2) optsize {
; GENERIC-LABEL: test_fild:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %ecx
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -1920,7 +1920,7 @@ define void @test_fild(i16 *%a0, i32 *%a1, i64 *%a2) optsize {
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fild:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:1.00]
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [1:1.00]
@@ -1932,7 +1932,7 @@ define void @test_fild(i16 *%a0, i32 *%a1, i64 *%a2) optsize {
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fild:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [3:1.00]
; SLM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [3:1.00]
; SLM-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [3:1.00]
@@ -1944,7 +1944,7 @@ define void @test_fild(i16 *%a0, i32 *%a1, i64 *%a2) optsize {
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fild:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [5:0.50]
@@ -1956,7 +1956,7 @@ define void @test_fild(i16 *%a0, i32 *%a1, i64 *%a2) optsize {
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_fild:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:0.50]
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:0.50]
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [1:0.50]
@@ -1968,7 +1968,7 @@ define void @test_fild(i16 *%a0, i32 *%a1, i64 *%a2) optsize {
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_fild:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [5:0.50]
@@ -1980,7 +1980,7 @@ define void @test_fild(i16 *%a0, i32 *%a1, i64 *%a2) optsize {
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fild:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [5:0.50]
@@ -1992,7 +1992,7 @@ define void @test_fild(i16 *%a0, i32 *%a1, i64 *%a2) optsize {
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fild:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKX-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [5:0.50]
@@ -2004,7 +2004,7 @@ define void @test_fild(i16 *%a0, i32 *%a1, i64 *%a2) optsize {
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_fild:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:1.00]
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:1.00]
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [5:1.00]
@@ -2016,7 +2016,7 @@ define void @test_fild(i16 *%a0, i32 *%a1, i64 *%a2) optsize {
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fild:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [8:0.50]
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [8:0.50]
@@ -2032,70 +2032,70 @@ define void @test_fild(i16 *%a0, i32 *%a1, i64 *%a2) optsize {
define void @test_fincstp() optsize {
; GENERIC-LABEL: test_fincstp:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fincstp
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fincstp:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fincstp # sched: [1:0.50]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fincstp:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fincstp # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fincstp:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fincstp # sched: [1:1.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_fincstp:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fincstp # sched: [1:0.50]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_fincstp:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fincstp # sched: [1:0.50]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fincstp:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fincstp # sched: [1:0.50]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fincstp:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fincstp # sched: [1:0.50]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_fincstp:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fincstp # sched: [100:0.17]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fincstp:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fincstp # sched: [11:1.00]
; ZNVER1-NEXT: #NO_APP
@@ -2106,7 +2106,7 @@ define void @test_fincstp() optsize {
define void @test_finit_fninit() optsize {
; GENERIC-LABEL: test_finit_fninit:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: wait
; GENERIC-NEXT: fninit
@@ -2115,7 +2115,7 @@ define void @test_finit_fninit() optsize {
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_finit_fninit:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: wait # sched: [1:0.50]
; ATOM-NEXT: fninit # sched: [63:31.50]
@@ -2124,7 +2124,7 @@ define void @test_finit_fninit() optsize {
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_finit_fninit:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: wait # sched: [100:1.00]
; SLM-NEXT: fninit # sched: [100:1.00]
@@ -2133,7 +2133,7 @@ define void @test_finit_fninit() optsize {
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_finit_fninit:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: wait # sched: [100:0.33]
; SANDY-NEXT: fninit # sched: [5:1.33]
@@ -2142,7 +2142,7 @@ define void @test_finit_fninit() optsize {
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_finit_fninit:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: wait # sched: [1:0.50]
; HASWELL-NEXT: fninit # sched: [1:?]
@@ -2151,7 +2151,7 @@ define void @test_finit_fninit() optsize {
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_finit_fninit:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: wait # sched: [2:0.50]
; BROADWELL-NEXT: fninit # sched: [75:6.00]
@@ -2160,7 +2160,7 @@ define void @test_finit_fninit() optsize {
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_finit_fninit:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: wait # sched: [2:0.50]
; SKYLAKE-NEXT: fninit # sched: [75:6.00]
@@ -2169,7 +2169,7 @@ define void @test_finit_fninit() optsize {
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_finit_fninit:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: wait # sched: [2:0.50]
; SKX-NEXT: fninit # sched: [75:6.00]
@@ -2178,7 +2178,7 @@ define void @test_finit_fninit() optsize {
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_finit_fninit:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: wait # sched: [100:0.17]
; BTVER2-NEXT: fninit # sched: [100:0.17]
@@ -2187,7 +2187,7 @@ define void @test_finit_fninit() optsize {
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_finit_fninit:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: wait # sched: [1:1.00]
; ZNVER1-NEXT: fninit # sched: [100:?]
@@ -2209,7 +2209,7 @@ define void @test_finit_fninit() optsize {
define void @test_fld1_fldl2e_fldl2t_fldlg2_fldln2_fldpi_fldz() optsize {
; GENERIC-LABEL: test_fld1_fldl2e_fldl2t_fldlg2_fldln2_fldpi_fldz:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fld1
; GENERIC-NEXT: fldl2e
@@ -2221,7 +2221,7 @@ define void @test_fld1_fldl2e_fldl2t_fldlg2_fldln2_fldpi_fldz() optsize {
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fld1_fldl2e_fldl2t_fldlg2_fldln2_fldpi_fldz:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fld1 # sched: [6:3.00]
; ATOM-NEXT: fldl2e # sched: [10:5.00]
@@ -2233,7 +2233,7 @@ define void @test_fld1_fldl2e_fldl2t_fldlg2_fldln2_fldpi_fldz() optsize {
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fld1_fldl2e_fldl2t_fldlg2_fldln2_fldpi_fldz:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fld1 # sched: [1:?]
; SLM-NEXT: fldl2e # sched: [100:1.00]
@@ -2245,7 +2245,7 @@ define void @test_fld1_fldl2e_fldl2t_fldlg2_fldln2_fldpi_fldz() optsize {
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fld1_fldl2e_fldl2t_fldlg2_fldln2_fldpi_fldz:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fld1 # sched: [1:?]
; SANDY-NEXT: fldl2e # sched: [100:0.33]
@@ -2257,7 +2257,7 @@ define void @test_fld1_fldl2e_fldl2t_fldlg2_fldln2_fldpi_fldz() optsize {
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_fld1_fldl2e_fldl2t_fldlg2_fldln2_fldpi_fldz:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fld1 # sched: [1:?]
; HASWELL-NEXT: fldl2e # sched: [100:0.25]
@@ -2269,7 +2269,7 @@ define void @test_fld1_fldl2e_fldl2t_fldlg2_fldln2_fldpi_fldz() optsize {
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_fld1_fldl2e_fldl2t_fldlg2_fldln2_fldpi_fldz:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fld1 # sched: [1:?]
; BROADWELL-NEXT: fldl2e # sched: [100:0.25]
@@ -2281,7 +2281,7 @@ define void @test_fld1_fldl2e_fldl2t_fldlg2_fldln2_fldpi_fldz() optsize {
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fld1_fldl2e_fldl2t_fldlg2_fldln2_fldpi_fldz:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fld1 # sched: [1:?]
; SKYLAKE-NEXT: fldl2e # sched: [100:0.25]
@@ -2293,7 +2293,7 @@ define void @test_fld1_fldl2e_fldl2t_fldlg2_fldln2_fldpi_fldz() optsize {
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fld1_fldl2e_fldl2t_fldlg2_fldln2_fldpi_fldz:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fld1 # sched: [1:?]
; SKX-NEXT: fldl2e # sched: [100:0.25]
@@ -2305,7 +2305,7 @@ define void @test_fld1_fldl2e_fldl2t_fldlg2_fldln2_fldpi_fldz() optsize {
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_fld1_fldl2e_fldl2t_fldlg2_fldln2_fldpi_fldz:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fld1 # sched: [1:?]
; BTVER2-NEXT: fldl2e # sched: [100:0.17]
@@ -2317,7 +2317,7 @@ define void @test_fld1_fldl2e_fldl2t_fldlg2_fldln2_fldpi_fldz() optsize {
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fld1_fldl2e_fldl2t_fldlg2_fldln2_fldpi_fldz:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fld1 # sched: [11:1.00]
; ZNVER1-NEXT: fldl2e # sched: [100:?]
@@ -2333,7 +2333,7 @@ define void @test_fld1_fldl2e_fldl2t_fldlg2_fldln2_fldpi_fldz() optsize {
define void @test_fmul(float *%a0, double *%a1) optsize {
; GENERIC-LABEL: test_fmul:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %ecx
; GENERIC-NEXT: #APP
@@ -2345,7 +2345,7 @@ define void @test_fmul(float *%a0, double *%a1) optsize {
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fmul:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:1.00]
; ATOM-NEXT: #APP
@@ -2357,7 +2357,7 @@ define void @test_fmul(float *%a0, double *%a1) optsize {
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fmul:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [3:1.00]
; SLM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [3:1.00]
; SLM-NEXT: #APP
@@ -2369,7 +2369,7 @@ define void @test_fmul(float *%a0, double *%a1) optsize {
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fmul:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SANDY-NEXT: #APP
@@ -2381,7 +2381,7 @@ define void @test_fmul(float *%a0, double *%a1) optsize {
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_fmul:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:0.50]
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:0.50]
; HASWELL-NEXT: #APP
@@ -2393,7 +2393,7 @@ define void @test_fmul(float *%a0, double *%a1) optsize {
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_fmul:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BROADWELL-NEXT: #APP
@@ -2405,7 +2405,7 @@ define void @test_fmul(float *%a0, double *%a1) optsize {
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fmul:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
@@ -2417,7 +2417,7 @@ define void @test_fmul(float *%a0, double *%a1) optsize {
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fmul:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKX-NEXT: #APP
@@ -2429,7 +2429,7 @@ define void @test_fmul(float *%a0, double *%a1) optsize {
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_fmul:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:1.00]
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:1.00]
; BTVER2-NEXT: #APP
@@ -2441,7 +2441,7 @@ define void @test_fmul(float *%a0, double *%a1) optsize {
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fmul:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [8:0.50]
; ZNVER1-NEXT: #APP
@@ -2457,7 +2457,7 @@ define void @test_fmul(float *%a0, double *%a1) optsize {
define void @test_fmulp_fimul(i16 *%a0, i32 *%a1) optsize {
; GENERIC-LABEL: test_fmulp_fimul:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %ecx
; GENERIC-NEXT: #APP
@@ -2469,7 +2469,7 @@ define void @test_fmulp_fimul(i16 *%a0, i32 *%a1) optsize {
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fmulp_fimul:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:1.00]
; ATOM-NEXT: #APP
@@ -2481,7 +2481,7 @@ define void @test_fmulp_fimul(i16 *%a0, i32 *%a1) optsize {
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fmulp_fimul:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [3:1.00]
; SLM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [3:1.00]
; SLM-NEXT: #APP
@@ -2493,7 +2493,7 @@ define void @test_fmulp_fimul(i16 *%a0, i32 *%a1) optsize {
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fmulp_fimul:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SANDY-NEXT: #APP
@@ -2505,7 +2505,7 @@ define void @test_fmulp_fimul(i16 *%a0, i32 *%a1) optsize {
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_fmulp_fimul:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:0.50]
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:0.50]
; HASWELL-NEXT: #APP
@@ -2517,7 +2517,7 @@ define void @test_fmulp_fimul(i16 *%a0, i32 *%a1) optsize {
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_fmulp_fimul:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BROADWELL-NEXT: #APP
@@ -2529,7 +2529,7 @@ define void @test_fmulp_fimul(i16 *%a0, i32 *%a1) optsize {
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fmulp_fimul:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
@@ -2541,7 +2541,7 @@ define void @test_fmulp_fimul(i16 *%a0, i32 *%a1) optsize {
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fmulp_fimul:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKX-NEXT: #APP
@@ -2553,7 +2553,7 @@ define void @test_fmulp_fimul(i16 *%a0, i32 *%a1) optsize {
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_fmulp_fimul:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:1.00]
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:1.00]
; BTVER2-NEXT: #APP
@@ -2565,7 +2565,7 @@ define void @test_fmulp_fimul(i16 *%a0, i32 *%a1) optsize {
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fmulp_fimul:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [8:0.50]
; ZNVER1-NEXT: #APP
@@ -2581,70 +2581,70 @@ define void @test_fmulp_fimul(i16 *%a0, i32 *%a1) optsize {
define void @test_fnop() optsize {
; GENERIC-LABEL: test_fnop:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fnop
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fnop:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fnop # sched: [1:0.50]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fnop:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fnop # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fnop:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fnop # sched: [1:1.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_fnop:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fnop # sched: [1:0.50]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_fnop:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fnop # sched: [1:0.50]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fnop:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fnop # sched: [1:0.50]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fnop:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fnop # sched: [1:0.50]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_fnop:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fnop # sched: [100:0.17]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fnop:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fnop # sched: [1:1.00]
; ZNVER1-NEXT: #NO_APP
@@ -2655,70 +2655,70 @@ define void @test_fnop() optsize {
define void @test_fpatan() optsize {
; GENERIC-LABEL: test_fpatan:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fpatan
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fpatan:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fpatan # sched: [183:91.50]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fpatan:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fpatan # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fpatan:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fpatan # sched: [100:0.33]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_fpatan:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fpatan # sched: [100:0.25]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_fpatan:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fpatan # sched: [100:0.25]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fpatan:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fpatan # sched: [100:0.25]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fpatan:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fpatan # sched: [100:0.25]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_fpatan:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fpatan # sched: [100:0.17]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fpatan:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fpatan # sched: [100:?]
; ZNVER1-NEXT: #NO_APP
@@ -2729,7 +2729,7 @@ define void @test_fpatan() optsize {
define void @test_fprem_fprem1() optsize {
; GENERIC-LABEL: test_fprem_fprem1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fprem
; GENERIC-NEXT: fprem1
@@ -2737,7 +2737,7 @@ define void @test_fprem_fprem1() optsize {
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fprem_fprem1:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fprem # sched: [55:27.50]
; ATOM-NEXT: fprem1 # sched: [71:35.50]
@@ -2745,7 +2745,7 @@ define void @test_fprem_fprem1() optsize {
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fprem_fprem1:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fprem # sched: [100:1.00]
; SLM-NEXT: fprem1 # sched: [100:1.00]
@@ -2753,7 +2753,7 @@ define void @test_fprem_fprem1() optsize {
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fprem_fprem1:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fprem # sched: [100:0.33]
; SANDY-NEXT: fprem1 # sched: [100:0.33]
@@ -2761,7 +2761,7 @@ define void @test_fprem_fprem1() optsize {
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_fprem_fprem1:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fprem # sched: [19:?]
; HASWELL-NEXT: fprem1 # sched: [19:?]
@@ -2769,7 +2769,7 @@ define void @test_fprem_fprem1() optsize {
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_fprem_fprem1:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fprem # sched: [100:0.25]
; BROADWELL-NEXT: fprem1 # sched: [100:0.25]
@@ -2777,7 +2777,7 @@ define void @test_fprem_fprem1() optsize {
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fprem_fprem1:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fprem # sched: [100:0.25]
; SKYLAKE-NEXT: fprem1 # sched: [100:0.25]
@@ -2785,7 +2785,7 @@ define void @test_fprem_fprem1() optsize {
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fprem_fprem1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fprem # sched: [100:0.25]
; SKX-NEXT: fprem1 # sched: [100:0.25]
@@ -2793,7 +2793,7 @@ define void @test_fprem_fprem1() optsize {
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_fprem_fprem1:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fprem # sched: [100:0.17]
; BTVER2-NEXT: fprem1 # sched: [100:0.17]
@@ -2801,7 +2801,7 @@ define void @test_fprem_fprem1() optsize {
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fprem_fprem1:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fprem # sched: [100:?]
; ZNVER1-NEXT: fprem1 # sched: [100:?]
@@ -2813,70 +2813,70 @@ define void @test_fprem_fprem1() optsize {
define void @test_fptan() optsize {
; GENERIC-LABEL: test_fptan:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fptan
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fptan:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fptan # sched: [168:84.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fptan:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fptan # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fptan:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fptan # sched: [100:0.33]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_fptan:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fptan # sched: [100:0.25]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_fptan:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fptan # sched: [100:0.25]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fptan:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fptan # sched: [100:0.25]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fptan:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fptan # sched: [100:0.25]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_fptan:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fptan # sched: [100:0.17]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fptan:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fptan # sched: [100:?]
; ZNVER1-NEXT: #NO_APP
@@ -2887,70 +2887,70 @@ define void @test_fptan() optsize {
define void @test_frndint() optsize {
; GENERIC-LABEL: test_frndint:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: frndint
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_frndint:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: frndint # sched: [46:23.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_frndint:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: frndint # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_frndint:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: frndint # sched: [100:0.33]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_frndint:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: frndint # sched: [11:?]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_frndint:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: frndint # sched: [100:0.25]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_frndint:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: frndint # sched: [100:0.25]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_frndint:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: frndint # sched: [100:0.25]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_frndint:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: frndint # sched: [100:0.17]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_frndint:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: frndint # sched: [100:?]
; ZNVER1-NEXT: #NO_APP
@@ -2965,70 +2965,70 @@ define void @test_frndint() optsize {
define void @test_fscale() optsize {
; GENERIC-LABEL: test_fscale:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fscale
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fscale:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fscale # sched: [77:38.50]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fscale:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fscale # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fscale:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fscale # sched: [100:0.33]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_fscale:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fscale # sched: [75:?]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_fscale:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fscale # sched: [100:0.25]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fscale:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fscale # sched: [100:0.25]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fscale:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fscale # sched: [100:0.25]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_fscale:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fscale # sched: [100:0.17]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fscale:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fscale # sched: [100:?]
; ZNVER1-NEXT: #NO_APP
@@ -3039,70 +3039,70 @@ define void @test_fscale() optsize {
define void @test_fsin() optsize {
; GENERIC-LABEL: test_fsin:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fsin
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fsin:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fsin # sched: [174:87.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fsin:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fsin # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fsin:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fsin # sched: [100:0.33]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_fsin:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fsin # sched: [100:0.25]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_fsin:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fsin # sched: [100:0.25]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fsin:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fsin # sched: [100:0.25]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fsin:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fsin # sched: [100:0.25]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_fsin:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fsin # sched: [100:0.17]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fsin:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fsin # sched: [100:?]
; ZNVER1-NEXT: #NO_APP
@@ -3113,70 +3113,70 @@ define void @test_fsin() optsize {
define void @test_fsincos() optsize {
; GENERIC-LABEL: test_fsincos:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fsincos
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fsincos:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fsincos # sched: [174:87.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fsincos:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fsincos # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fsincos:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fsincos # sched: [100:0.33]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_fsincos:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fsincos # sched: [100:0.25]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_fsincos:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fsincos # sched: [100:0.25]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fsincos:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fsincos # sched: [100:0.25]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fsincos:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fsincos # sched: [100:0.25]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_fsincos:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fsincos # sched: [100:0.17]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fsincos:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fsincos # sched: [100:?]
; ZNVER1-NEXT: #NO_APP
@@ -3187,70 +3187,70 @@ define void @test_fsincos() optsize {
define void @test_fsqrt() optsize {
; GENERIC-LABEL: test_fsqrt:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fsqrt
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fsqrt:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fsqrt # sched: [71:35.50]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fsqrt:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fsqrt # sched: [15:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fsqrt:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fsqrt # sched: [14:1.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_fsqrt:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fsqrt # sched: [15:1.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_fsqrt:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fsqrt # sched: [15:1.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fsqrt:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fsqrt # sched: [15:1.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fsqrt:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fsqrt # sched: [15:1.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_fsqrt:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fsqrt # sched: [21:21.00]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fsqrt:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fsqrt # sched: [20:1.00]
; ZNVER1-NEXT: #NO_APP
@@ -3273,7 +3273,7 @@ define void @test_fsqrt() optsize {
define void @test_fsub(float *%a0, double *%a1) optsize {
; GENERIC-LABEL: test_fsub:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %ecx
; GENERIC-NEXT: #APP
@@ -3285,7 +3285,7 @@ define void @test_fsub(float *%a0, double *%a1) optsize {
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fsub:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:1.00]
; ATOM-NEXT: #APP
@@ -3297,7 +3297,7 @@ define void @test_fsub(float *%a0, double *%a1) optsize {
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fsub:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [3:1.00]
; SLM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [3:1.00]
; SLM-NEXT: #APP
@@ -3309,7 +3309,7 @@ define void @test_fsub(float *%a0, double *%a1) optsize {
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fsub:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SANDY-NEXT: #APP
@@ -3321,7 +3321,7 @@ define void @test_fsub(float *%a0, double *%a1) optsize {
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_fsub:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:0.50]
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:0.50]
; HASWELL-NEXT: #APP
@@ -3333,7 +3333,7 @@ define void @test_fsub(float *%a0, double *%a1) optsize {
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_fsub:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BROADWELL-NEXT: #APP
@@ -3345,7 +3345,7 @@ define void @test_fsub(float *%a0, double *%a1) optsize {
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fsub:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
@@ -3357,7 +3357,7 @@ define void @test_fsub(float *%a0, double *%a1) optsize {
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fsub:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKX-NEXT: #APP
@@ -3369,7 +3369,7 @@ define void @test_fsub(float *%a0, double *%a1) optsize {
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_fsub:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:1.00]
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:1.00]
; BTVER2-NEXT: #APP
@@ -3381,7 +3381,7 @@ define void @test_fsub(float *%a0, double *%a1) optsize {
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fsub:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [8:0.50]
; ZNVER1-NEXT: #APP
@@ -3397,7 +3397,7 @@ define void @test_fsub(float *%a0, double *%a1) optsize {
define void @test_fsubp_fisub(i16 *%a0, i32 *%a1) optsize {
; GENERIC-LABEL: test_fsubp_fisub:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %ecx
; GENERIC-NEXT: #APP
@@ -3409,7 +3409,7 @@ define void @test_fsubp_fisub(i16 *%a0, i32 *%a1) optsize {
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fsubp_fisub:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:1.00]
; ATOM-NEXT: #APP
@@ -3421,7 +3421,7 @@ define void @test_fsubp_fisub(i16 *%a0, i32 *%a1) optsize {
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fsubp_fisub:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [3:1.00]
; SLM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [3:1.00]
; SLM-NEXT: #APP
@@ -3433,7 +3433,7 @@ define void @test_fsubp_fisub(i16 *%a0, i32 *%a1) optsize {
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fsubp_fisub:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SANDY-NEXT: #APP
@@ -3445,7 +3445,7 @@ define void @test_fsubp_fisub(i16 *%a0, i32 *%a1) optsize {
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_fsubp_fisub:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:0.50]
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:0.50]
; HASWELL-NEXT: #APP
@@ -3457,7 +3457,7 @@ define void @test_fsubp_fisub(i16 *%a0, i32 *%a1) optsize {
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_fsubp_fisub:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BROADWELL-NEXT: #APP
@@ -3469,7 +3469,7 @@ define void @test_fsubp_fisub(i16 *%a0, i32 *%a1) optsize {
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fsubp_fisub:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
@@ -3481,7 +3481,7 @@ define void @test_fsubp_fisub(i16 *%a0, i32 *%a1) optsize {
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fsubp_fisub:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKX-NEXT: #APP
@@ -3493,7 +3493,7 @@ define void @test_fsubp_fisub(i16 *%a0, i32 *%a1) optsize {
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_fsubp_fisub:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:1.00]
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:1.00]
; BTVER2-NEXT: #APP
@@ -3505,7 +3505,7 @@ define void @test_fsubp_fisub(i16 *%a0, i32 *%a1) optsize {
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fsubp_fisub:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [8:0.50]
; ZNVER1-NEXT: #APP
@@ -3521,7 +3521,7 @@ define void @test_fsubp_fisub(i16 *%a0, i32 *%a1) optsize {
define void @test_fsubr(float *%a0, double *%a1) optsize {
; GENERIC-LABEL: test_fsubr:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %ecx
; GENERIC-NEXT: #APP
@@ -3533,7 +3533,7 @@ define void @test_fsubr(float *%a0, double *%a1) optsize {
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fsubr:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:1.00]
; ATOM-NEXT: #APP
@@ -3545,7 +3545,7 @@ define void @test_fsubr(float *%a0, double *%a1) optsize {
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fsubr:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [3:1.00]
; SLM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [3:1.00]
; SLM-NEXT: #APP
@@ -3557,7 +3557,7 @@ define void @test_fsubr(float *%a0, double *%a1) optsize {
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fsubr:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SANDY-NEXT: #APP
@@ -3569,7 +3569,7 @@ define void @test_fsubr(float *%a0, double *%a1) optsize {
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_fsubr:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:0.50]
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:0.50]
; HASWELL-NEXT: #APP
@@ -3581,7 +3581,7 @@ define void @test_fsubr(float *%a0, double *%a1) optsize {
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_fsubr:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BROADWELL-NEXT: #APP
@@ -3593,7 +3593,7 @@ define void @test_fsubr(float *%a0, double *%a1) optsize {
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fsubr:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
@@ -3605,7 +3605,7 @@ define void @test_fsubr(float *%a0, double *%a1) optsize {
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fsubr:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKX-NEXT: #APP
@@ -3617,7 +3617,7 @@ define void @test_fsubr(float *%a0, double *%a1) optsize {
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_fsubr:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:1.00]
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:1.00]
; BTVER2-NEXT: #APP
@@ -3629,7 +3629,7 @@ define void @test_fsubr(float *%a0, double *%a1) optsize {
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fsubr:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [8:0.50]
; ZNVER1-NEXT: #APP
@@ -3645,7 +3645,7 @@ define void @test_fsubr(float *%a0, double *%a1) optsize {
define void @test_fsubrp_fisubr(i16 *%a0, i32 *%a1) optsize {
; GENERIC-LABEL: test_fsubrp_fisubr:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %ecx
; GENERIC-NEXT: #APP
@@ -3657,7 +3657,7 @@ define void @test_fsubrp_fisubr(i16 *%a0, i32 *%a1) optsize {
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fsubrp_fisubr:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:1.00]
; ATOM-NEXT: #APP
@@ -3669,7 +3669,7 @@ define void @test_fsubrp_fisubr(i16 *%a0, i32 *%a1) optsize {
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fsubrp_fisubr:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [3:1.00]
; SLM-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [3:1.00]
; SLM-NEXT: #APP
@@ -3681,7 +3681,7 @@ define void @test_fsubrp_fisubr(i16 *%a0, i32 *%a1) optsize {
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fsubrp_fisubr:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SANDY-NEXT: #APP
@@ -3693,7 +3693,7 @@ define void @test_fsubrp_fisubr(i16 *%a0, i32 *%a1) optsize {
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_fsubrp_fisubr:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:0.50]
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [1:0.50]
; HASWELL-NEXT: #APP
@@ -3705,7 +3705,7 @@ define void @test_fsubrp_fisubr(i16 *%a0, i32 *%a1) optsize {
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_fsubrp_fisubr:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; BROADWELL-NEXT: #APP
@@ -3717,7 +3717,7 @@ define void @test_fsubrp_fisubr(i16 *%a0, i32 *%a1) optsize {
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fsubrp_fisubr:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
@@ -3729,7 +3729,7 @@ define void @test_fsubrp_fisubr(i16 *%a0, i32 *%a1) optsize {
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fsubrp_fisubr:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:0.50]
; SKX-NEXT: #APP
@@ -3741,7 +3741,7 @@ define void @test_fsubrp_fisubr(i16 *%a0, i32 *%a1) optsize {
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_fsubrp_fisubr:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:1.00]
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [5:1.00]
; BTVER2-NEXT: #APP
@@ -3753,7 +3753,7 @@ define void @test_fsubrp_fisubr(i16 *%a0, i32 *%a1) optsize {
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fsubrp_fisubr:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [8:0.50]
; ZNVER1-NEXT: #APP
@@ -3769,70 +3769,70 @@ define void @test_fsubrp_fisubr(i16 *%a0, i32 *%a1) optsize {
define void @test_ftst() optsize {
; GENERIC-LABEL: test_ftst:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: ftst
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_ftst:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: ftst # sched: [9:4.50]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_ftst:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: ftst # sched: [3:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_ftst:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: ftst # sched: [3:1.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_ftst:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: ftst # sched: [1:1.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_ftst:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: ftst # sched: [3:1.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_ftst:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: ftst # sched: [3:1.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_ftst:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: ftst # sched: [3:1.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_ftst:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: ftst # sched: [3:1.00]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_ftst:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: ftst # sched: [1:1.00]
; ZNVER1-NEXT: #NO_APP
@@ -3843,7 +3843,7 @@ define void @test_ftst() optsize {
define void @test_fucom_fucomp_fucompp() optsize {
; GENERIC-LABEL: test_fucom_fucomp_fucompp:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fucom %st(1)
; GENERIC-NEXT: fucom %st(3)
@@ -3854,7 +3854,7 @@ define void @test_fucom_fucomp_fucompp() optsize {
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fucom_fucomp_fucompp:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fucom %st(1) # sched: [1:1.00]
; ATOM-NEXT: fucom %st(3) # sched: [1:1.00]
@@ -3865,7 +3865,7 @@ define void @test_fucom_fucomp_fucompp() optsize {
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fucom_fucomp_fucompp:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fucom %st(1) # sched: [3:1.00]
; SLM-NEXT: fucom %st(3) # sched: [3:1.00]
@@ -3876,7 +3876,7 @@ define void @test_fucom_fucomp_fucompp() optsize {
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fucom_fucomp_fucompp:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fucom %st(1) # sched: [1:1.00]
; SANDY-NEXT: fucom %st(3) # sched: [1:1.00]
@@ -3887,7 +3887,7 @@ define void @test_fucom_fucomp_fucompp() optsize {
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_fucom_fucomp_fucompp:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fucom %st(1) # sched: [1:1.00]
; HASWELL-NEXT: fucom %st(3) # sched: [1:1.00]
@@ -3898,7 +3898,7 @@ define void @test_fucom_fucomp_fucompp() optsize {
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_fucom_fucomp_fucompp:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fucom %st(1) # sched: [1:1.00]
; BROADWELL-NEXT: fucom %st(3) # sched: [1:1.00]
@@ -3909,7 +3909,7 @@ define void @test_fucom_fucomp_fucompp() optsize {
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fucom_fucomp_fucompp:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fucom %st(1) # sched: [1:1.00]
; SKYLAKE-NEXT: fucom %st(3) # sched: [1:1.00]
@@ -3920,7 +3920,7 @@ define void @test_fucom_fucomp_fucompp() optsize {
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fucom_fucomp_fucompp:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fucom %st(1) # sched: [1:1.00]
; SKX-NEXT: fucom %st(3) # sched: [1:1.00]
@@ -3931,7 +3931,7 @@ define void @test_fucom_fucomp_fucompp() optsize {
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_fucom_fucomp_fucompp:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fucom %st(1) # sched: [3:1.00]
; BTVER2-NEXT: fucom %st(3) # sched: [3:1.00]
@@ -3942,7 +3942,7 @@ define void @test_fucom_fucomp_fucompp() optsize {
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fucom_fucomp_fucompp:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fucom %st(1) # sched: [1:1.00]
; ZNVER1-NEXT: fucom %st(3) # sched: [1:1.00]
@@ -3957,7 +3957,7 @@ define void @test_fucom_fucomp_fucompp() optsize {
define void @test_fucomi_fucomip() optsize {
; GENERIC-LABEL: test_fucomi_fucomip:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fucomi %st(3)
; GENERIC-NEXT: fucompi %st(3)
@@ -3965,7 +3965,7 @@ define void @test_fucomi_fucomip() optsize {
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fucomi_fucomip:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fucomi %st(3) # sched: [9:4.50]
; ATOM-NEXT: fucompi %st(3) # sched: [9:4.50]
@@ -3973,7 +3973,7 @@ define void @test_fucomi_fucomip() optsize {
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fucomi_fucomip:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fucomi %st(3) # sched: [3:1.00]
; SLM-NEXT: fucompi %st(3) # sched: [3:1.00]
@@ -3981,7 +3981,7 @@ define void @test_fucomi_fucomip() optsize {
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fucomi_fucomip:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fucomi %st(3) # sched: [3:1.00]
; SANDY-NEXT: fucompi %st(3) # sched: [3:1.00]
@@ -3989,7 +3989,7 @@ define void @test_fucomi_fucomip() optsize {
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_fucomi_fucomip:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fucomi %st(3) # sched: [1:0.50]
; HASWELL-NEXT: fucompi %st(3) # sched: [1:0.50]
@@ -3997,7 +3997,7 @@ define void @test_fucomi_fucomip() optsize {
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_fucomi_fucomip:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fucomi %st(3) # sched: [3:1.00]
; BROADWELL-NEXT: fucompi %st(3) # sched: [3:1.00]
@@ -4005,7 +4005,7 @@ define void @test_fucomi_fucomip() optsize {
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fucomi_fucomip:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fucomi %st(3) # sched: [3:1.00]
; SKYLAKE-NEXT: fucompi %st(3) # sched: [3:1.00]
@@ -4013,7 +4013,7 @@ define void @test_fucomi_fucomip() optsize {
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fucomi_fucomip:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fucomi %st(3) # sched: [3:1.00]
; SKX-NEXT: fucompi %st(3) # sched: [3:1.00]
@@ -4021,7 +4021,7 @@ define void @test_fucomi_fucomip() optsize {
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_fucomi_fucomip:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fucomi %st(3) # sched: [3:1.00]
; BTVER2-NEXT: fucompi %st(3) # sched: [3:1.00]
@@ -4029,7 +4029,7 @@ define void @test_fucomi_fucomip() optsize {
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fucomi_fucomip:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fucomi %st(3) # sched: [9:0.50]
; ZNVER1-NEXT: fucompi %st(3) # sched: [9:0.50]
@@ -4041,70 +4041,70 @@ define void @test_fucomi_fucomip() optsize {
define void @test_fwait() optsize {
; GENERIC-LABEL: test_fwait:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: wait
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fwait:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: wait # sched: [1:0.50]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fwait:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: wait # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fwait:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: wait # sched: [100:0.33]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_fwait:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: wait # sched: [1:0.50]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_fwait:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: wait # sched: [2:0.50]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fwait:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: wait # sched: [2:0.50]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fwait:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: wait # sched: [2:0.50]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_fwait:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: wait # sched: [100:0.17]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fwait:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: wait # sched: [1:1.00]
; ZNVER1-NEXT: #NO_APP
@@ -4115,70 +4115,70 @@ define void @test_fwait() optsize {
define void @test_fxam() optsize {
; GENERIC-LABEL: test_fxam:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fxam
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fxam:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fxam # sched: [1:1.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fxam:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fxam # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fxam:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fxam # sched: [100:0.33]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_fxam:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fxam # sched: [1:2.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_fxam:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fxam # sched: [100:0.25]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fxam:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fxam # sched: [100:0.25]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fxam:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fxam # sched: [100:0.25]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_fxam:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fxam # sched: [100:0.17]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fxam:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fxam # sched: [1:1.00]
; ZNVER1-NEXT: #NO_APP
@@ -4189,7 +4189,7 @@ define void @test_fxam() optsize {
define void @test_fxch() optsize {
; GENERIC-LABEL: test_fxch:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fxch %st(1)
; GENERIC-NEXT: fxch %st(3)
@@ -4197,7 +4197,7 @@ define void @test_fxch() optsize {
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fxch:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fxch %st(1) # sched: [1:1.00]
; ATOM-NEXT: fxch %st(3) # sched: [1:1.00]
@@ -4205,7 +4205,7 @@ define void @test_fxch() optsize {
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fxch:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fxch %st(1) # sched: [1:0.50]
; SLM-NEXT: fxch %st(3) # sched: [1:0.50]
@@ -4213,7 +4213,7 @@ define void @test_fxch() optsize {
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fxch:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fxch %st(1) # sched: [1:0.33]
; SANDY-NEXT: fxch %st(3) # sched: [1:0.33]
@@ -4221,7 +4221,7 @@ define void @test_fxch() optsize {
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_fxch:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fxch %st(1) # sched: [17:4.00]
; HASWELL-NEXT: fxch %st(3) # sched: [17:4.00]
@@ -4229,7 +4229,7 @@ define void @test_fxch() optsize {
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_fxch:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fxch %st(1) # sched: [14:4.00]
; BROADWELL-NEXT: fxch %st(3) # sched: [14:4.00]
@@ -4237,7 +4237,7 @@ define void @test_fxch() optsize {
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fxch:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fxch %st(1) # sched: [17:4.00]
; SKYLAKE-NEXT: fxch %st(3) # sched: [17:4.00]
@@ -4245,7 +4245,7 @@ define void @test_fxch() optsize {
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fxch:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fxch %st(1) # sched: [17:4.00]
; SKX-NEXT: fxch %st(3) # sched: [17:4.00]
@@ -4253,7 +4253,7 @@ define void @test_fxch() optsize {
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_fxch:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fxch %st(1) # sched: [1:0.17]
; BTVER2-NEXT: fxch %st(3) # sched: [1:0.17]
@@ -4261,7 +4261,7 @@ define void @test_fxch() optsize {
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fxch:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fxch %st(1) # sched: [1:0.25]
; ZNVER1-NEXT: fxch %st(3) # sched: [1:0.25]
@@ -4273,7 +4273,7 @@ define void @test_fxch() optsize {
define void @test_fxrstor_fxsave(i8* %a0) optsize {
; GENERIC-LABEL: test_fxrstor_fxsave:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: movl {{[0-9]+}}(%esp), %eax
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fxrstor (%eax)
@@ -4282,7 +4282,7 @@ define void @test_fxrstor_fxsave(i8* %a0) optsize {
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fxrstor_fxsave:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
; ATOM-NEXT: #APP
; ATOM-NEXT: fxrstor (%eax) # sched: [141:70.50]
@@ -4291,7 +4291,7 @@ define void @test_fxrstor_fxsave(i8* %a0) optsize {
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fxrstor_fxsave:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [3:1.00]
; SLM-NEXT: #APP
; SLM-NEXT: fxrstor (%eax) # sched: [100:1.00]
@@ -4300,7 +4300,7 @@ define void @test_fxrstor_fxsave(i8* %a0) optsize {
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fxrstor_fxsave:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SANDY-NEXT: #APP
; SANDY-NEXT: fxrstor (%eax) # sched: [5:2.00]
@@ -4309,7 +4309,7 @@ define void @test_fxrstor_fxsave(i8* %a0) optsize {
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_fxrstor_fxsave:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [1:0.50]
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fxrstor (%eax) # sched: [59:16.50]
@@ -4318,7 +4318,7 @@ define void @test_fxrstor_fxsave(i8* %a0) optsize {
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_fxrstor_fxsave:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fxrstor (%eax) # sched: [63:16.50]
@@ -4327,7 +4327,7 @@ define void @test_fxrstor_fxsave(i8* %a0) optsize {
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fxrstor_fxsave:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fxrstor (%eax) # sched: [63:16.50]
@@ -4336,7 +4336,7 @@ define void @test_fxrstor_fxsave(i8* %a0) optsize {
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fxrstor_fxsave:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
; SKX-NEXT: #APP
; SKX-NEXT: fxrstor (%eax) # sched: [63:16.50]
@@ -4345,7 +4345,7 @@ define void @test_fxrstor_fxsave(i8* %a0) optsize {
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_fxrstor_fxsave:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:1.00]
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fxrstor (%eax) # sched: [100:0.17]
@@ -4354,7 +4354,7 @@ define void @test_fxrstor_fxsave(i8* %a0) optsize {
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fxrstor_fxsave:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fxrstor (%eax) # sched: [100:?]
@@ -4367,70 +4367,70 @@ define void @test_fxrstor_fxsave(i8* %a0) optsize {
define void @test_fxtract() optsize {
; GENERIC-LABEL: test_fxtract:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fxtract
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fxtract:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fxtract # sched: [25:12.50]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fxtract:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fxtract # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fxtract:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fxtract # sched: [100:0.33]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_fxtract:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fxtract # sched: [15:?]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_fxtract:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fxtract # sched: [100:0.25]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fxtract:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fxtract # sched: [100:0.25]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fxtract:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fxtract # sched: [100:0.25]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_fxtract:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fxtract # sched: [100:0.17]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fxtract:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fxtract # sched: [100:?]
; ZNVER1-NEXT: #NO_APP
@@ -4441,70 +4441,70 @@ define void @test_fxtract() optsize {
define void @test_fyl2x() optsize {
; GENERIC-LABEL: test_fyl2x:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fyl2x
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fyl2x:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fyl2x # sched: [146:73.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fyl2x:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fyl2x # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fyl2x:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fyl2x # sched: [100:0.33]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_fyl2x:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fyl2x # sched: [100:0.25]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_fyl2x:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fyl2x # sched: [100:0.25]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fyl2x:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fyl2x # sched: [100:0.25]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fyl2x:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fyl2x # sched: [100:0.25]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_fyl2x:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fyl2x # sched: [100:0.17]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fyl2x:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fyl2x # sched: [100:?]
; ZNVER1-NEXT: #NO_APP
@@ -4515,70 +4515,70 @@ define void @test_fyl2x() optsize {
define void @test_fyl2xp1() optsize {
; GENERIC-LABEL: test_fyl2xp1:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: #APP
; GENERIC-NEXT: fyl2xp1
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: retl
;
; ATOM-LABEL: test_fyl2xp1:
-; ATOM: # BB#0:
+; ATOM: # %bb.0:
; ATOM-NEXT: #APP
; ATOM-NEXT: fyl2xp1 # sched: [147:73.50]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: retl # sched: [79:39.50]
;
; SLM-LABEL: test_fyl2xp1:
-; SLM: # BB#0:
+; SLM: # %bb.0:
; SLM-NEXT: #APP
; SLM-NEXT: fyl2xp1 # sched: [100:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: retl # sched: [4:1.00]
;
; SANDY-LABEL: test_fyl2xp1:
-; SANDY: # BB#0:
+; SANDY: # %bb.0:
; SANDY-NEXT: #APP
; SANDY-NEXT: fyl2xp1 # sched: [100:0.33]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: retl # sched: [5:1.00]
;
; HASWELL-LABEL: test_fyl2xp1:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: #APP
; HASWELL-NEXT: fyl2xp1 # sched: [100:0.25]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: retl # sched: [5:0.50]
;
; BROADWELL-LABEL: test_fyl2xp1:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: #APP
; BROADWELL-NEXT: fyl2xp1 # sched: [100:0.25]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: retl # sched: [6:0.50]
;
; SKYLAKE-LABEL: test_fyl2xp1:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: #APP
; SKYLAKE-NEXT: fyl2xp1 # sched: [100:0.25]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: retl # sched: [6:0.50]
;
; SKX-LABEL: test_fyl2xp1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: #APP
; SKX-NEXT: fyl2xp1 # sched: [100:0.25]
; SKX-NEXT: #NO_APP
; SKX-NEXT: retl # sched: [6:0.50]
;
; BTVER2-LABEL: test_fyl2xp1:
-; BTVER2: # BB#0:
+; BTVER2: # %bb.0:
; BTVER2-NEXT: #APP
; BTVER2-NEXT: fyl2xp1 # sched: [100:0.17]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: retl # sched: [4:1.00]
;
; ZNVER1-LABEL: test_fyl2xp1:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: #APP
; ZNVER1-NEXT: fyl2xp1 # sched: [100:?]
; ZNVER1-NEXT: #NO_APP
diff --git a/test/CodeGen/X86/xaluo.ll b/test/CodeGen/X86/xaluo.ll
index 25fd21d80c6..7d4cd220248 100644
--- a/test/CodeGen/X86/xaluo.ll
+++ b/test/CodeGen/X86/xaluo.ll
@@ -9,14 +9,14 @@
; SADDO reg, reg
define zeroext i1 @saddoi8(i8 signext %v1, i8 signext %v2, i8* %res) {
; SDAG-LABEL: saddoi8:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: addb %sil, %dil
; SDAG-NEXT: seto %al
; SDAG-NEXT: movb %dil, (%rdx)
; SDAG-NEXT: retq
;
; FAST-LABEL: saddoi8:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: addb %sil, %dil
; FAST-NEXT: seto %al
; FAST-NEXT: movb %dil, (%rdx)
@@ -25,7 +25,7 @@ define zeroext i1 @saddoi8(i8 signext %v1, i8 signext %v2, i8* %res) {
; FAST-NEXT: retq
;
; KNL-LABEL: saddoi8:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: addb %sil, %dil
; KNL-NEXT: seto %al
; KNL-NEXT: movb %dil, (%rdx)
@@ -39,14 +39,14 @@ define zeroext i1 @saddoi8(i8 signext %v1, i8 signext %v2, i8* %res) {
define zeroext i1 @saddoi16(i16 %v1, i16 %v2, i16* %res) {
; SDAG-LABEL: saddoi16:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: addw %si, %di
; SDAG-NEXT: seto %al
; SDAG-NEXT: movw %di, (%rdx)
; SDAG-NEXT: retq
;
; FAST-LABEL: saddoi16:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: addw %si, %di
; FAST-NEXT: seto %al
; FAST-NEXT: movw %di, (%rdx)
@@ -55,7 +55,7 @@ define zeroext i1 @saddoi16(i16 %v1, i16 %v2, i16* %res) {
; FAST-NEXT: retq
;
; KNL-LABEL: saddoi16:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: addw %si, %di
; KNL-NEXT: seto %al
; KNL-NEXT: movw %di, (%rdx)
@@ -69,14 +69,14 @@ define zeroext i1 @saddoi16(i16 %v1, i16 %v2, i16* %res) {
define zeroext i1 @saddoi32(i32 %v1, i32 %v2, i32* %res) {
; SDAG-LABEL: saddoi32:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: addl %esi, %edi
; SDAG-NEXT: seto %al
; SDAG-NEXT: movl %edi, (%rdx)
; SDAG-NEXT: retq
;
; FAST-LABEL: saddoi32:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: addl %esi, %edi
; FAST-NEXT: seto %al
; FAST-NEXT: movl %edi, (%rdx)
@@ -85,7 +85,7 @@ define zeroext i1 @saddoi32(i32 %v1, i32 %v2, i32* %res) {
; FAST-NEXT: retq
;
; KNL-LABEL: saddoi32:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: addl %esi, %edi
; KNL-NEXT: seto %al
; KNL-NEXT: movl %edi, (%rdx)
@@ -99,14 +99,14 @@ define zeroext i1 @saddoi32(i32 %v1, i32 %v2, i32* %res) {
define zeroext i1 @saddoi64(i64 %v1, i64 %v2, i64* %res) {
; SDAG-LABEL: saddoi64:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: addq %rsi, %rdi
; SDAG-NEXT: seto %al
; SDAG-NEXT: movq %rdi, (%rdx)
; SDAG-NEXT: retq
;
; FAST-LABEL: saddoi64:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: addq %rsi, %rdi
; FAST-NEXT: seto %al
; FAST-NEXT: movq %rdi, (%rdx)
@@ -115,7 +115,7 @@ define zeroext i1 @saddoi64(i64 %v1, i64 %v2, i64* %res) {
; FAST-NEXT: retq
;
; KNL-LABEL: saddoi64:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: addq %rsi, %rdi
; KNL-NEXT: seto %al
; KNL-NEXT: movq %rdi, (%rdx)
@@ -130,14 +130,14 @@ define zeroext i1 @saddoi64(i64 %v1, i64 %v2, i64* %res) {
; SADDO reg, 1 | INC
define zeroext i1 @saddoinci8(i8 %v1, i8* %res) {
; SDAG-LABEL: saddoinci8:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: incb %dil
; SDAG-NEXT: seto %al
; SDAG-NEXT: movb %dil, (%rsi)
; SDAG-NEXT: retq
;
; FAST-LABEL: saddoinci8:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: incb %dil
; FAST-NEXT: seto %al
; FAST-NEXT: movb %dil, (%rsi)
@@ -146,7 +146,7 @@ define zeroext i1 @saddoinci8(i8 %v1, i8* %res) {
; FAST-NEXT: retq
;
; KNL-LABEL: saddoinci8:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: incb %dil
; KNL-NEXT: seto %al
; KNL-NEXT: movb %dil, (%rsi)
@@ -160,14 +160,14 @@ define zeroext i1 @saddoinci8(i8 %v1, i8* %res) {
define zeroext i1 @saddoinci16(i16 %v1, i16* %res) {
; SDAG-LABEL: saddoinci16:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: incw %di
; SDAG-NEXT: seto %al
; SDAG-NEXT: movw %di, (%rsi)
; SDAG-NEXT: retq
;
; FAST-LABEL: saddoinci16:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: incw %di
; FAST-NEXT: seto %al
; FAST-NEXT: movw %di, (%rsi)
@@ -176,7 +176,7 @@ define zeroext i1 @saddoinci16(i16 %v1, i16* %res) {
; FAST-NEXT: retq
;
; KNL-LABEL: saddoinci16:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: incw %di
; KNL-NEXT: seto %al
; KNL-NEXT: movw %di, (%rsi)
@@ -190,14 +190,14 @@ define zeroext i1 @saddoinci16(i16 %v1, i16* %res) {
define zeroext i1 @saddoinci32(i32 %v1, i32* %res) {
; SDAG-LABEL: saddoinci32:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: incl %edi
; SDAG-NEXT: seto %al
; SDAG-NEXT: movl %edi, (%rsi)
; SDAG-NEXT: retq
;
; FAST-LABEL: saddoinci32:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: incl %edi
; FAST-NEXT: seto %al
; FAST-NEXT: movl %edi, (%rsi)
@@ -206,7 +206,7 @@ define zeroext i1 @saddoinci32(i32 %v1, i32* %res) {
; FAST-NEXT: retq
;
; KNL-LABEL: saddoinci32:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: incl %edi
; KNL-NEXT: seto %al
; KNL-NEXT: movl %edi, (%rsi)
@@ -220,14 +220,14 @@ define zeroext i1 @saddoinci32(i32 %v1, i32* %res) {
define zeroext i1 @saddoinci64(i64 %v1, i64* %res) {
; SDAG-LABEL: saddoinci64:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: incq %rdi
; SDAG-NEXT: seto %al
; SDAG-NEXT: movq %rdi, (%rsi)
; SDAG-NEXT: retq
;
; FAST-LABEL: saddoinci64:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: incq %rdi
; FAST-NEXT: seto %al
; FAST-NEXT: movq %rdi, (%rsi)
@@ -236,7 +236,7 @@ define zeroext i1 @saddoinci64(i64 %v1, i64* %res) {
; FAST-NEXT: retq
;
; KNL-LABEL: saddoinci64:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: incq %rdi
; KNL-NEXT: seto %al
; KNL-NEXT: movq %rdi, (%rsi)
@@ -252,7 +252,7 @@ define zeroext i1 @saddoinci64(i64 %v1, i64* %res) {
; FIXME: DAG doesn't optimize immediates on the LHS.
define zeroext i1 @saddoi64imm1(i64 %v1, i64* %res) {
; SDAG-LABEL: saddoi64imm1:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: movl $2, %ecx
; SDAG-NEXT: addq %rdi, %rcx
; SDAG-NEXT: seto %al
@@ -260,7 +260,7 @@ define zeroext i1 @saddoi64imm1(i64 %v1, i64* %res) {
; SDAG-NEXT: retq
;
; FAST-LABEL: saddoi64imm1:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: addq $2, %rdi
; FAST-NEXT: seto %al
; FAST-NEXT: movq %rdi, (%rsi)
@@ -269,7 +269,7 @@ define zeroext i1 @saddoi64imm1(i64 %v1, i64* %res) {
; FAST-NEXT: retq
;
; KNL-LABEL: saddoi64imm1:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: movl $2, %ecx
; KNL-NEXT: addq %rdi, %rcx
; KNL-NEXT: seto %al
@@ -285,14 +285,14 @@ define zeroext i1 @saddoi64imm1(i64 %v1, i64* %res) {
; Check boundary conditions for large immediates.
define zeroext i1 @saddoi64imm2(i64 %v1, i64* %res) {
; SDAG-LABEL: saddoi64imm2:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: addq $-2147483648, %rdi ## imm = 0x80000000
; SDAG-NEXT: seto %al
; SDAG-NEXT: movq %rdi, (%rsi)
; SDAG-NEXT: retq
;
; FAST-LABEL: saddoi64imm2:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: addq $-2147483648, %rdi ## imm = 0x80000000
; FAST-NEXT: seto %al
; FAST-NEXT: movq %rdi, (%rsi)
@@ -301,7 +301,7 @@ define zeroext i1 @saddoi64imm2(i64 %v1, i64* %res) {
; FAST-NEXT: retq
;
; KNL-LABEL: saddoi64imm2:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: addq $-2147483648, %rdi ## imm = 0x80000000
; KNL-NEXT: seto %al
; KNL-NEXT: movq %rdi, (%rsi)
@@ -315,7 +315,7 @@ define zeroext i1 @saddoi64imm2(i64 %v1, i64* %res) {
define zeroext i1 @saddoi64imm3(i64 %v1, i64* %res) {
; SDAG-LABEL: saddoi64imm3:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: movabsq $-21474836489, %rcx ## imm = 0xFFFFFFFAFFFFFFF7
; SDAG-NEXT: addq %rdi, %rcx
; SDAG-NEXT: seto %al
@@ -323,7 +323,7 @@ define zeroext i1 @saddoi64imm3(i64 %v1, i64* %res) {
; SDAG-NEXT: retq
;
; FAST-LABEL: saddoi64imm3:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: movabsq $-21474836489, %rax ## imm = 0xFFFFFFFAFFFFFFF7
; FAST-NEXT: addq %rdi, %rax
; FAST-NEXT: seto %cl
@@ -333,7 +333,7 @@ define zeroext i1 @saddoi64imm3(i64 %v1, i64* %res) {
; FAST-NEXT: retq
;
; KNL-LABEL: saddoi64imm3:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: movabsq $-21474836489, %rcx ## imm = 0xFFFFFFFAFFFFFFF7
; KNL-NEXT: addq %rdi, %rcx
; KNL-NEXT: seto %al
@@ -348,14 +348,14 @@ define zeroext i1 @saddoi64imm3(i64 %v1, i64* %res) {
define zeroext i1 @saddoi64imm4(i64 %v1, i64* %res) {
; SDAG-LABEL: saddoi64imm4:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: addq $2147483647, %rdi ## imm = 0x7FFFFFFF
; SDAG-NEXT: seto %al
; SDAG-NEXT: movq %rdi, (%rsi)
; SDAG-NEXT: retq
;
; FAST-LABEL: saddoi64imm4:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: addq $2147483647, %rdi ## imm = 0x7FFFFFFF
; FAST-NEXT: seto %al
; FAST-NEXT: movq %rdi, (%rsi)
@@ -364,7 +364,7 @@ define zeroext i1 @saddoi64imm4(i64 %v1, i64* %res) {
; FAST-NEXT: retq
;
; KNL-LABEL: saddoi64imm4:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: addq $2147483647, %rdi ## imm = 0x7FFFFFFF
; KNL-NEXT: seto %al
; KNL-NEXT: movq %rdi, (%rsi)
@@ -378,7 +378,7 @@ define zeroext i1 @saddoi64imm4(i64 %v1, i64* %res) {
define zeroext i1 @saddoi64imm5(i64 %v1, i64* %res) {
; SDAG-LABEL: saddoi64imm5:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: movl $2147483648, %ecx ## imm = 0x80000000
; SDAG-NEXT: addq %rdi, %rcx
; SDAG-NEXT: seto %al
@@ -386,7 +386,7 @@ define zeroext i1 @saddoi64imm5(i64 %v1, i64* %res) {
; SDAG-NEXT: retq
;
; FAST-LABEL: saddoi64imm5:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: movl $2147483648, %eax ## imm = 0x80000000
; FAST-NEXT: addq %rdi, %rax
; FAST-NEXT: seto %cl
@@ -396,7 +396,7 @@ define zeroext i1 @saddoi64imm5(i64 %v1, i64* %res) {
; FAST-NEXT: retq
;
; KNL-LABEL: saddoi64imm5:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: movl $2147483648, %ecx ## imm = 0x80000000
; KNL-NEXT: addq %rdi, %rcx
; KNL-NEXT: seto %al
@@ -412,14 +412,14 @@ define zeroext i1 @saddoi64imm5(i64 %v1, i64* %res) {
; UADDO
define zeroext i1 @uaddoi32(i32 %v1, i32 %v2, i32* %res) {
; SDAG-LABEL: uaddoi32:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: addl %esi, %edi
; SDAG-NEXT: setb %al
; SDAG-NEXT: movl %edi, (%rdx)
; SDAG-NEXT: retq
;
; FAST-LABEL: uaddoi32:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: addl %esi, %edi
; FAST-NEXT: setb %al
; FAST-NEXT: movl %edi, (%rdx)
@@ -428,7 +428,7 @@ define zeroext i1 @uaddoi32(i32 %v1, i32 %v2, i32* %res) {
; FAST-NEXT: retq
;
; KNL-LABEL: uaddoi32:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: addl %esi, %edi
; KNL-NEXT: setb %al
; KNL-NEXT: movl %edi, (%rdx)
@@ -442,14 +442,14 @@ define zeroext i1 @uaddoi32(i32 %v1, i32 %v2, i32* %res) {
define zeroext i1 @uaddoi64(i64 %v1, i64 %v2, i64* %res) {
; SDAG-LABEL: uaddoi64:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: addq %rsi, %rdi
; SDAG-NEXT: setb %al
; SDAG-NEXT: movq %rdi, (%rdx)
; SDAG-NEXT: retq
;
; FAST-LABEL: uaddoi64:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: addq %rsi, %rdi
; FAST-NEXT: setb %al
; FAST-NEXT: movq %rdi, (%rdx)
@@ -458,7 +458,7 @@ define zeroext i1 @uaddoi64(i64 %v1, i64 %v2, i64* %res) {
; FAST-NEXT: retq
;
; KNL-LABEL: uaddoi64:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: addq %rsi, %rdi
; KNL-NEXT: setb %al
; KNL-NEXT: movq %rdi, (%rdx)
@@ -473,14 +473,14 @@ define zeroext i1 @uaddoi64(i64 %v1, i64 %v2, i64* %res) {
; UADDO reg, 1 | NOT INC
define zeroext i1 @uaddoinci8(i8 %v1, i8* %res) {
; SDAG-LABEL: uaddoinci8:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: addb $1, %dil
; SDAG-NEXT: setb %al
; SDAG-NEXT: movb %dil, (%rsi)
; SDAG-NEXT: retq
;
; FAST-LABEL: uaddoinci8:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: addb $1, %dil
; FAST-NEXT: setb %al
; FAST-NEXT: movb %dil, (%rsi)
@@ -489,7 +489,7 @@ define zeroext i1 @uaddoinci8(i8 %v1, i8* %res) {
; FAST-NEXT: retq
;
; KNL-LABEL: uaddoinci8:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: addb $1, %dil
; KNL-NEXT: setb %al
; KNL-NEXT: movb %dil, (%rsi)
@@ -503,14 +503,14 @@ define zeroext i1 @uaddoinci8(i8 %v1, i8* %res) {
define zeroext i1 @uaddoinci16(i16 %v1, i16* %res) {
; SDAG-LABEL: uaddoinci16:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: addw $1, %di
; SDAG-NEXT: setb %al
; SDAG-NEXT: movw %di, (%rsi)
; SDAG-NEXT: retq
;
; FAST-LABEL: uaddoinci16:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: addw $1, %di
; FAST-NEXT: setb %al
; FAST-NEXT: movw %di, (%rsi)
@@ -519,7 +519,7 @@ define zeroext i1 @uaddoinci16(i16 %v1, i16* %res) {
; FAST-NEXT: retq
;
; KNL-LABEL: uaddoinci16:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: addw $1, %di
; KNL-NEXT: setb %al
; KNL-NEXT: movw %di, (%rsi)
@@ -533,14 +533,14 @@ define zeroext i1 @uaddoinci16(i16 %v1, i16* %res) {
define zeroext i1 @uaddoinci32(i32 %v1, i32* %res) {
; SDAG-LABEL: uaddoinci32:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: addl $1, %edi
; SDAG-NEXT: setb %al
; SDAG-NEXT: movl %edi, (%rsi)
; SDAG-NEXT: retq
;
; FAST-LABEL: uaddoinci32:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: addl $1, %edi
; FAST-NEXT: setb %al
; FAST-NEXT: movl %edi, (%rsi)
@@ -549,7 +549,7 @@ define zeroext i1 @uaddoinci32(i32 %v1, i32* %res) {
; FAST-NEXT: retq
;
; KNL-LABEL: uaddoinci32:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: addl $1, %edi
; KNL-NEXT: setb %al
; KNL-NEXT: movl %edi, (%rsi)
@@ -563,14 +563,14 @@ define zeroext i1 @uaddoinci32(i32 %v1, i32* %res) {
define zeroext i1 @uaddoinci64(i64 %v1, i64* %res) {
; SDAG-LABEL: uaddoinci64:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: addq $1, %rdi
; SDAG-NEXT: setb %al
; SDAG-NEXT: movq %rdi, (%rsi)
; SDAG-NEXT: retq
;
; FAST-LABEL: uaddoinci64:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: addq $1, %rdi
; FAST-NEXT: setb %al
; FAST-NEXT: movq %rdi, (%rsi)
@@ -579,7 +579,7 @@ define zeroext i1 @uaddoinci64(i64 %v1, i64* %res) {
; FAST-NEXT: retq
;
; KNL-LABEL: uaddoinci64:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: addq $1, %rdi
; KNL-NEXT: setb %al
; KNL-NEXT: movq %rdi, (%rsi)
@@ -594,14 +594,14 @@ define zeroext i1 @uaddoinci64(i64 %v1, i64* %res) {
; SSUBO
define zeroext i1 @ssuboi32(i32 %v1, i32 %v2, i32* %res) {
; SDAG-LABEL: ssuboi32:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: subl %esi, %edi
; SDAG-NEXT: seto %al
; SDAG-NEXT: movl %edi, (%rdx)
; SDAG-NEXT: retq
;
; FAST-LABEL: ssuboi32:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: subl %esi, %edi
; FAST-NEXT: seto %al
; FAST-NEXT: movl %edi, (%rdx)
@@ -610,7 +610,7 @@ define zeroext i1 @ssuboi32(i32 %v1, i32 %v2, i32* %res) {
; FAST-NEXT: retq
;
; KNL-LABEL: ssuboi32:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: subl %esi, %edi
; KNL-NEXT: seto %al
; KNL-NEXT: movl %edi, (%rdx)
@@ -624,14 +624,14 @@ define zeroext i1 @ssuboi32(i32 %v1, i32 %v2, i32* %res) {
define zeroext i1 @ssuboi64(i64 %v1, i64 %v2, i64* %res) {
; SDAG-LABEL: ssuboi64:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: subq %rsi, %rdi
; SDAG-NEXT: seto %al
; SDAG-NEXT: movq %rdi, (%rdx)
; SDAG-NEXT: retq
;
; FAST-LABEL: ssuboi64:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: subq %rsi, %rdi
; FAST-NEXT: seto %al
; FAST-NEXT: movq %rdi, (%rdx)
@@ -640,7 +640,7 @@ define zeroext i1 @ssuboi64(i64 %v1, i64 %v2, i64* %res) {
; FAST-NEXT: retq
;
; KNL-LABEL: ssuboi64:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: subq %rsi, %rdi
; KNL-NEXT: seto %al
; KNL-NEXT: movq %rdi, (%rdx)
@@ -655,14 +655,14 @@ define zeroext i1 @ssuboi64(i64 %v1, i64 %v2, i64* %res) {
; USUBO
define zeroext i1 @usuboi32(i32 %v1, i32 %v2, i32* %res) {
; SDAG-LABEL: usuboi32:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: subl %esi, %edi
; SDAG-NEXT: setb %al
; SDAG-NEXT: movl %edi, (%rdx)
; SDAG-NEXT: retq
;
; FAST-LABEL: usuboi32:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: subl %esi, %edi
; FAST-NEXT: setb %al
; FAST-NEXT: movl %edi, (%rdx)
@@ -671,7 +671,7 @@ define zeroext i1 @usuboi32(i32 %v1, i32 %v2, i32* %res) {
; FAST-NEXT: retq
;
; KNL-LABEL: usuboi32:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: subl %esi, %edi
; KNL-NEXT: setb %al
; KNL-NEXT: movl %edi, (%rdx)
@@ -685,14 +685,14 @@ define zeroext i1 @usuboi32(i32 %v1, i32 %v2, i32* %res) {
define zeroext i1 @usuboi64(i64 %v1, i64 %v2, i64* %res) {
; SDAG-LABEL: usuboi64:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: subq %rsi, %rdi
; SDAG-NEXT: setb %al
; SDAG-NEXT: movq %rdi, (%rdx)
; SDAG-NEXT: retq
;
; FAST-LABEL: usuboi64:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: subq %rsi, %rdi
; FAST-NEXT: setb %al
; FAST-NEXT: movq %rdi, (%rdx)
@@ -701,7 +701,7 @@ define zeroext i1 @usuboi64(i64 %v1, i64 %v2, i64* %res) {
; FAST-NEXT: retq
;
; KNL-LABEL: usuboi64:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: subq %rsi, %rdi
; KNL-NEXT: setb %al
; KNL-NEXT: movq %rdi, (%rdx)
@@ -718,7 +718,7 @@ define zeroext i1 @usuboi64(i64 %v1, i64 %v2, i64* %res) {
;
define i32 @saddoselecti32(i32 %v1, i32 %v2) {
; SDAG-LABEL: saddoselecti32:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: movl %edi, %eax
; SDAG-NEXT: addl %esi, %eax
; SDAG-NEXT: cmovol %edi, %esi
@@ -726,7 +726,7 @@ define i32 @saddoselecti32(i32 %v1, i32 %v2) {
; SDAG-NEXT: retq
;
; FAST-LABEL: saddoselecti32:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: movl %edi, %eax
; FAST-NEXT: addl %esi, %eax
; FAST-NEXT: cmovol %edi, %esi
@@ -734,7 +734,7 @@ define i32 @saddoselecti32(i32 %v1, i32 %v2) {
; FAST-NEXT: retq
;
; KNL-LABEL: saddoselecti32:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: movl %edi, %eax
; KNL-NEXT: addl %esi, %eax
; KNL-NEXT: cmovol %edi, %esi
@@ -748,7 +748,7 @@ define i32 @saddoselecti32(i32 %v1, i32 %v2) {
define i64 @saddoselecti64(i64 %v1, i64 %v2) {
; SDAG-LABEL: saddoselecti64:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: movq %rdi, %rax
; SDAG-NEXT: addq %rsi, %rax
; SDAG-NEXT: cmovoq %rdi, %rsi
@@ -756,7 +756,7 @@ define i64 @saddoselecti64(i64 %v1, i64 %v2) {
; SDAG-NEXT: retq
;
; FAST-LABEL: saddoselecti64:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: movq %rdi, %rax
; FAST-NEXT: addq %rsi, %rax
; FAST-NEXT: cmovoq %rdi, %rsi
@@ -764,7 +764,7 @@ define i64 @saddoselecti64(i64 %v1, i64 %v2) {
; FAST-NEXT: retq
;
; KNL-LABEL: saddoselecti64:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: movq %rdi, %rax
; KNL-NEXT: addq %rsi, %rax
; KNL-NEXT: cmovoq %rdi, %rsi
@@ -778,7 +778,7 @@ define i64 @saddoselecti64(i64 %v1, i64 %v2) {
define i32 @uaddoselecti32(i32 %v1, i32 %v2) {
; SDAG-LABEL: uaddoselecti32:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: movl %edi, %eax
; SDAG-NEXT: addl %esi, %eax
; SDAG-NEXT: cmovbl %edi, %esi
@@ -786,7 +786,7 @@ define i32 @uaddoselecti32(i32 %v1, i32 %v2) {
; SDAG-NEXT: retq
;
; FAST-LABEL: uaddoselecti32:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: movl %edi, %eax
; FAST-NEXT: addl %esi, %eax
; FAST-NEXT: cmovbl %edi, %esi
@@ -794,7 +794,7 @@ define i32 @uaddoselecti32(i32 %v1, i32 %v2) {
; FAST-NEXT: retq
;
; KNL-LABEL: uaddoselecti32:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: movl %edi, %eax
; KNL-NEXT: addl %esi, %eax
; KNL-NEXT: cmovbl %edi, %esi
@@ -808,7 +808,7 @@ define i32 @uaddoselecti32(i32 %v1, i32 %v2) {
define i64 @uaddoselecti64(i64 %v1, i64 %v2) {
; SDAG-LABEL: uaddoselecti64:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: movq %rdi, %rax
; SDAG-NEXT: addq %rsi, %rax
; SDAG-NEXT: cmovbq %rdi, %rsi
@@ -816,7 +816,7 @@ define i64 @uaddoselecti64(i64 %v1, i64 %v2) {
; SDAG-NEXT: retq
;
; FAST-LABEL: uaddoselecti64:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: movq %rdi, %rax
; FAST-NEXT: addq %rsi, %rax
; FAST-NEXT: cmovbq %rdi, %rsi
@@ -824,7 +824,7 @@ define i64 @uaddoselecti64(i64 %v1, i64 %v2) {
; FAST-NEXT: retq
;
; KNL-LABEL: uaddoselecti64:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: movq %rdi, %rax
; KNL-NEXT: addq %rsi, %rax
; KNL-NEXT: cmovbq %rdi, %rsi
@@ -838,21 +838,21 @@ define i64 @uaddoselecti64(i64 %v1, i64 %v2) {
define i32 @ssuboselecti32(i32 %v1, i32 %v2) {
; SDAG-LABEL: ssuboselecti32:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: cmpl %esi, %edi
; SDAG-NEXT: cmovol %edi, %esi
; SDAG-NEXT: movl %esi, %eax
; SDAG-NEXT: retq
;
; FAST-LABEL: ssuboselecti32:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: cmpl %esi, %edi
; FAST-NEXT: cmovol %edi, %esi
; FAST-NEXT: movl %esi, %eax
; FAST-NEXT: retq
;
; KNL-LABEL: ssuboselecti32:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: cmpl %esi, %edi
; KNL-NEXT: cmovol %edi, %esi
; KNL-NEXT: movl %esi, %eax
@@ -865,21 +865,21 @@ define i32 @ssuboselecti32(i32 %v1, i32 %v2) {
define i64 @ssuboselecti64(i64 %v1, i64 %v2) {
; SDAG-LABEL: ssuboselecti64:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: cmpq %rsi, %rdi
; SDAG-NEXT: cmovoq %rdi, %rsi
; SDAG-NEXT: movq %rsi, %rax
; SDAG-NEXT: retq
;
; FAST-LABEL: ssuboselecti64:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: cmpq %rsi, %rdi
; FAST-NEXT: cmovoq %rdi, %rsi
; FAST-NEXT: movq %rsi, %rax
; FAST-NEXT: retq
;
; KNL-LABEL: ssuboselecti64:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: cmpq %rsi, %rdi
; KNL-NEXT: cmovoq %rdi, %rsi
; KNL-NEXT: movq %rsi, %rax
@@ -892,21 +892,21 @@ define i64 @ssuboselecti64(i64 %v1, i64 %v2) {
define i32 @usuboselecti32(i32 %v1, i32 %v2) {
; SDAG-LABEL: usuboselecti32:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: cmpl %esi, %edi
; SDAG-NEXT: cmovbl %edi, %esi
; SDAG-NEXT: movl %esi, %eax
; SDAG-NEXT: retq
;
; FAST-LABEL: usuboselecti32:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: cmpl %esi, %edi
; FAST-NEXT: cmovbl %edi, %esi
; FAST-NEXT: movl %esi, %eax
; FAST-NEXT: retq
;
; KNL-LABEL: usuboselecti32:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: cmpl %esi, %edi
; KNL-NEXT: cmovbl %edi, %esi
; KNL-NEXT: movl %esi, %eax
@@ -919,21 +919,21 @@ define i32 @usuboselecti32(i32 %v1, i32 %v2) {
define i64 @usuboselecti64(i64 %v1, i64 %v2) {
; SDAG-LABEL: usuboselecti64:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: cmpq %rsi, %rdi
; SDAG-NEXT: cmovbq %rdi, %rsi
; SDAG-NEXT: movq %rsi, %rax
; SDAG-NEXT: retq
;
; FAST-LABEL: usuboselecti64:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: cmpq %rsi, %rdi
; FAST-NEXT: cmovbq %rdi, %rsi
; FAST-NEXT: movq %rsi, %rax
; FAST-NEXT: retq
;
; KNL-LABEL: usuboselecti64:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: cmpq %rsi, %rdi
; KNL-NEXT: cmovbq %rdi, %rsi
; KNL-NEXT: movq %rsi, %rax
@@ -949,10 +949,10 @@ define i64 @usuboselecti64(i64 %v1, i64 %v2) {
;
define zeroext i1 @saddobri32(i32 %v1, i32 %v2) {
; SDAG-LABEL: saddobri32:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: addl %esi, %edi
; SDAG-NEXT: jo LBB31_1
-; SDAG-NEXT: ## BB#2: ## %continue
+; SDAG-NEXT: ## %bb.2: ## %continue
; SDAG-NEXT: movb $1, %al
; SDAG-NEXT: retq
; SDAG-NEXT: LBB31_1: ## %overflow
@@ -960,10 +960,10 @@ define zeroext i1 @saddobri32(i32 %v1, i32 %v2) {
; SDAG-NEXT: retq
;
; FAST-LABEL: saddobri32:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: addl %esi, %edi
; FAST-NEXT: jo LBB31_1
-; FAST-NEXT: ## BB#2: ## %continue
+; FAST-NEXT: ## %bb.2: ## %continue
; FAST-NEXT: movb $1, %al
; FAST-NEXT: andb $1, %al
; FAST-NEXT: movzbl %al, %eax
@@ -975,10 +975,10 @@ define zeroext i1 @saddobri32(i32 %v1, i32 %v2) {
; FAST-NEXT: retq
;
; KNL-LABEL: saddobri32:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: addl %esi, %edi
; KNL-NEXT: jo LBB31_1
-; KNL-NEXT: ## BB#2: ## %continue
+; KNL-NEXT: ## %bb.2: ## %continue
; KNL-NEXT: movb $1, %al
; KNL-NEXT: retq
; KNL-NEXT: LBB31_1: ## %overflow
@@ -998,10 +998,10 @@ continue:
define zeroext i1 @saddobri64(i64 %v1, i64 %v2) {
; SDAG-LABEL: saddobri64:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: addq %rsi, %rdi
; SDAG-NEXT: jo LBB32_1
-; SDAG-NEXT: ## BB#2: ## %continue
+; SDAG-NEXT: ## %bb.2: ## %continue
; SDAG-NEXT: movb $1, %al
; SDAG-NEXT: retq
; SDAG-NEXT: LBB32_1: ## %overflow
@@ -1009,10 +1009,10 @@ define zeroext i1 @saddobri64(i64 %v1, i64 %v2) {
; SDAG-NEXT: retq
;
; FAST-LABEL: saddobri64:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: addq %rsi, %rdi
; FAST-NEXT: jo LBB32_1
-; FAST-NEXT: ## BB#2: ## %continue
+; FAST-NEXT: ## %bb.2: ## %continue
; FAST-NEXT: movb $1, %al
; FAST-NEXT: andb $1, %al
; FAST-NEXT: movzbl %al, %eax
@@ -1024,10 +1024,10 @@ define zeroext i1 @saddobri64(i64 %v1, i64 %v2) {
; FAST-NEXT: retq
;
; KNL-LABEL: saddobri64:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: addq %rsi, %rdi
; KNL-NEXT: jo LBB32_1
-; KNL-NEXT: ## BB#2: ## %continue
+; KNL-NEXT: ## %bb.2: ## %continue
; KNL-NEXT: movb $1, %al
; KNL-NEXT: retq
; KNL-NEXT: LBB32_1: ## %overflow
@@ -1047,10 +1047,10 @@ continue:
define zeroext i1 @uaddobri32(i32 %v1, i32 %v2) {
; SDAG-LABEL: uaddobri32:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: addl %esi, %edi
; SDAG-NEXT: jb LBB33_1
-; SDAG-NEXT: ## BB#2: ## %continue
+; SDAG-NEXT: ## %bb.2: ## %continue
; SDAG-NEXT: movb $1, %al
; SDAG-NEXT: retq
; SDAG-NEXT: LBB33_1: ## %overflow
@@ -1058,10 +1058,10 @@ define zeroext i1 @uaddobri32(i32 %v1, i32 %v2) {
; SDAG-NEXT: retq
;
; FAST-LABEL: uaddobri32:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: addl %esi, %edi
; FAST-NEXT: jb LBB33_1
-; FAST-NEXT: ## BB#2: ## %continue
+; FAST-NEXT: ## %bb.2: ## %continue
; FAST-NEXT: movb $1, %al
; FAST-NEXT: andb $1, %al
; FAST-NEXT: movzbl %al, %eax
@@ -1073,10 +1073,10 @@ define zeroext i1 @uaddobri32(i32 %v1, i32 %v2) {
; FAST-NEXT: retq
;
; KNL-LABEL: uaddobri32:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: addl %esi, %edi
; KNL-NEXT: jb LBB33_1
-; KNL-NEXT: ## BB#2: ## %continue
+; KNL-NEXT: ## %bb.2: ## %continue
; KNL-NEXT: movb $1, %al
; KNL-NEXT: retq
; KNL-NEXT: LBB33_1: ## %overflow
@@ -1096,10 +1096,10 @@ continue:
define zeroext i1 @uaddobri64(i64 %v1, i64 %v2) {
; SDAG-LABEL: uaddobri64:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: addq %rsi, %rdi
; SDAG-NEXT: jb LBB34_1
-; SDAG-NEXT: ## BB#2: ## %continue
+; SDAG-NEXT: ## %bb.2: ## %continue
; SDAG-NEXT: movb $1, %al
; SDAG-NEXT: retq
; SDAG-NEXT: LBB34_1: ## %overflow
@@ -1107,10 +1107,10 @@ define zeroext i1 @uaddobri64(i64 %v1, i64 %v2) {
; SDAG-NEXT: retq
;
; FAST-LABEL: uaddobri64:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: addq %rsi, %rdi
; FAST-NEXT: jb LBB34_1
-; FAST-NEXT: ## BB#2: ## %continue
+; FAST-NEXT: ## %bb.2: ## %continue
; FAST-NEXT: movb $1, %al
; FAST-NEXT: andb $1, %al
; FAST-NEXT: movzbl %al, %eax
@@ -1122,10 +1122,10 @@ define zeroext i1 @uaddobri64(i64 %v1, i64 %v2) {
; FAST-NEXT: retq
;
; KNL-LABEL: uaddobri64:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: addq %rsi, %rdi
; KNL-NEXT: jb LBB34_1
-; KNL-NEXT: ## BB#2: ## %continue
+; KNL-NEXT: ## %bb.2: ## %continue
; KNL-NEXT: movb $1, %al
; KNL-NEXT: retq
; KNL-NEXT: LBB34_1: ## %overflow
@@ -1145,10 +1145,10 @@ continue:
define zeroext i1 @ssubobri32(i32 %v1, i32 %v2) {
; SDAG-LABEL: ssubobri32:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: cmpl %esi, %edi
; SDAG-NEXT: jo LBB35_1
-; SDAG-NEXT: ## BB#2: ## %continue
+; SDAG-NEXT: ## %bb.2: ## %continue
; SDAG-NEXT: movb $1, %al
; SDAG-NEXT: retq
; SDAG-NEXT: LBB35_1: ## %overflow
@@ -1156,10 +1156,10 @@ define zeroext i1 @ssubobri32(i32 %v1, i32 %v2) {
; SDAG-NEXT: retq
;
; FAST-LABEL: ssubobri32:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: cmpl %esi, %edi
; FAST-NEXT: jo LBB35_1
-; FAST-NEXT: ## BB#2: ## %continue
+; FAST-NEXT: ## %bb.2: ## %continue
; FAST-NEXT: movb $1, %al
; FAST-NEXT: andb $1, %al
; FAST-NEXT: movzbl %al, %eax
@@ -1171,10 +1171,10 @@ define zeroext i1 @ssubobri32(i32 %v1, i32 %v2) {
; FAST-NEXT: retq
;
; KNL-LABEL: ssubobri32:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: cmpl %esi, %edi
; KNL-NEXT: jo LBB35_1
-; KNL-NEXT: ## BB#2: ## %continue
+; KNL-NEXT: ## %bb.2: ## %continue
; KNL-NEXT: movb $1, %al
; KNL-NEXT: retq
; KNL-NEXT: LBB35_1: ## %overflow
@@ -1194,10 +1194,10 @@ continue:
define zeroext i1 @ssubobri64(i64 %v1, i64 %v2) {
; SDAG-LABEL: ssubobri64:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: cmpq %rsi, %rdi
; SDAG-NEXT: jo LBB36_1
-; SDAG-NEXT: ## BB#2: ## %continue
+; SDAG-NEXT: ## %bb.2: ## %continue
; SDAG-NEXT: movb $1, %al
; SDAG-NEXT: retq
; SDAG-NEXT: LBB36_1: ## %overflow
@@ -1205,10 +1205,10 @@ define zeroext i1 @ssubobri64(i64 %v1, i64 %v2) {
; SDAG-NEXT: retq
;
; FAST-LABEL: ssubobri64:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: cmpq %rsi, %rdi
; FAST-NEXT: jo LBB36_1
-; FAST-NEXT: ## BB#2: ## %continue
+; FAST-NEXT: ## %bb.2: ## %continue
; FAST-NEXT: movb $1, %al
; FAST-NEXT: andb $1, %al
; FAST-NEXT: movzbl %al, %eax
@@ -1220,10 +1220,10 @@ define zeroext i1 @ssubobri64(i64 %v1, i64 %v2) {
; FAST-NEXT: retq
;
; KNL-LABEL: ssubobri64:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: cmpq %rsi, %rdi
; KNL-NEXT: jo LBB36_1
-; KNL-NEXT: ## BB#2: ## %continue
+; KNL-NEXT: ## %bb.2: ## %continue
; KNL-NEXT: movb $1, %al
; KNL-NEXT: retq
; KNL-NEXT: LBB36_1: ## %overflow
@@ -1243,10 +1243,10 @@ continue:
define zeroext i1 @usubobri32(i32 %v1, i32 %v2) {
; SDAG-LABEL: usubobri32:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: cmpl %esi, %edi
; SDAG-NEXT: jb LBB37_1
-; SDAG-NEXT: ## BB#2: ## %continue
+; SDAG-NEXT: ## %bb.2: ## %continue
; SDAG-NEXT: movb $1, %al
; SDAG-NEXT: retq
; SDAG-NEXT: LBB37_1: ## %overflow
@@ -1254,10 +1254,10 @@ define zeroext i1 @usubobri32(i32 %v1, i32 %v2) {
; SDAG-NEXT: retq
;
; FAST-LABEL: usubobri32:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: cmpl %esi, %edi
; FAST-NEXT: jb LBB37_1
-; FAST-NEXT: ## BB#2: ## %continue
+; FAST-NEXT: ## %bb.2: ## %continue
; FAST-NEXT: movb $1, %al
; FAST-NEXT: andb $1, %al
; FAST-NEXT: movzbl %al, %eax
@@ -1269,10 +1269,10 @@ define zeroext i1 @usubobri32(i32 %v1, i32 %v2) {
; FAST-NEXT: retq
;
; KNL-LABEL: usubobri32:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: cmpl %esi, %edi
; KNL-NEXT: jb LBB37_1
-; KNL-NEXT: ## BB#2: ## %continue
+; KNL-NEXT: ## %bb.2: ## %continue
; KNL-NEXT: movb $1, %al
; KNL-NEXT: retq
; KNL-NEXT: LBB37_1: ## %overflow
@@ -1292,10 +1292,10 @@ continue:
define zeroext i1 @usubobri64(i64 %v1, i64 %v2) {
; SDAG-LABEL: usubobri64:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: cmpq %rsi, %rdi
; SDAG-NEXT: jb LBB38_1
-; SDAG-NEXT: ## BB#2: ## %continue
+; SDAG-NEXT: ## %bb.2: ## %continue
; SDAG-NEXT: movb $1, %al
; SDAG-NEXT: retq
; SDAG-NEXT: LBB38_1: ## %overflow
@@ -1303,10 +1303,10 @@ define zeroext i1 @usubobri64(i64 %v1, i64 %v2) {
; SDAG-NEXT: retq
;
; FAST-LABEL: usubobri64:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: cmpq %rsi, %rdi
; FAST-NEXT: jb LBB38_1
-; FAST-NEXT: ## BB#2: ## %continue
+; FAST-NEXT: ## %bb.2: ## %continue
; FAST-NEXT: movb $1, %al
; FAST-NEXT: andb $1, %al
; FAST-NEXT: movzbl %al, %eax
@@ -1318,10 +1318,10 @@ define zeroext i1 @usubobri64(i64 %v1, i64 %v2) {
; FAST-NEXT: retq
;
; KNL-LABEL: usubobri64:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: cmpq %rsi, %rdi
; KNL-NEXT: jb LBB38_1
-; KNL-NEXT: ## BB#2: ## %continue
+; KNL-NEXT: ## %bb.2: ## %continue
; KNL-NEXT: movb $1, %al
; KNL-NEXT: retq
; KNL-NEXT: LBB38_1: ## %overflow
@@ -1341,7 +1341,7 @@ continue:
define {i64, i1} @uaddoovf(i64 %a, i64 %b) {
; SDAG-LABEL: uaddoovf:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: movzbl %dil, %ecx
; SDAG-NEXT: movzbl %sil, %eax
; SDAG-NEXT: addq %rcx, %rax
@@ -1349,7 +1349,7 @@ define {i64, i1} @uaddoovf(i64 %a, i64 %b) {
; SDAG-NEXT: retq
;
; FAST-LABEL: uaddoovf:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: movzbl %dil, %ecx
; FAST-NEXT: movzbl %sil, %eax
; FAST-NEXT: addq %rcx, %rax
@@ -1357,7 +1357,7 @@ define {i64, i1} @uaddoovf(i64 %a, i64 %b) {
; FAST-NEXT: retq
;
; KNL-LABEL: uaddoovf:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: movzbl %dil, %ecx
; KNL-NEXT: movzbl %sil, %eax
; KNL-NEXT: addq %rcx, %rax
@@ -1371,21 +1371,21 @@ define {i64, i1} @uaddoovf(i64 %a, i64 %b) {
define {i64, i1} @usuboovf(i64 %a, i64 %b) {
; SDAG-LABEL: usuboovf:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: notq %rsi
; SDAG-NEXT: xorl %edx, %edx
; SDAG-NEXT: movq %rsi, %rax
; SDAG-NEXT: retq
;
; FAST-LABEL: usuboovf:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: notq %rsi
; FAST-NEXT: xorl %edx, %edx
; FAST-NEXT: movq %rsi, %rax
; FAST-NEXT: retq
;
; KNL-LABEL: usuboovf:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: notq %rsi
; KNL-NEXT: xorl %edx, %edx
; KNL-NEXT: movq %rsi, %rax
diff --git a/test/CodeGen/X86/xchg-nofold.ll b/test/CodeGen/X86/xchg-nofold.ll
index 939fa040422..b6020413175 100644
--- a/test/CodeGen/X86/xchg-nofold.ll
+++ b/test/CodeGen/X86/xchg-nofold.ll
@@ -8,13 +8,13 @@
; CHECK-LABEL: _Z3fooRSt6atomicIbEb
define zeroext i1 @_Z3fooRSt6atomicIbEb(%"struct.std::atomic"* nocapture dereferenceable(1) %a, i1 returned zeroext %b) nounwind {
; CHECK-LABEL: _Z3fooRSt6atomicIbEb:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: shrq $3, %rax
; CHECK-NEXT: movb 2147450880(%rax), %al
; CHECK-NEXT: testb %al, %al
; CHECK-NEXT: je .LBB0_3
-; CHECK-NEXT: # BB#1:
+; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: movl %edi, %ecx
; CHECK-NEXT: andl $7, %ecx
; CHECK-NEXT: cmpb %al, %cl
diff --git a/test/CodeGen/X86/xmulo.ll b/test/CodeGen/X86/xmulo.ll
index 03f284d87a6..3788d9c2d39 100644
--- a/test/CodeGen/X86/xmulo.ll
+++ b/test/CodeGen/X86/xmulo.ll
@@ -5,7 +5,7 @@
define {i64, i1} @t1() nounwind {
; SDAG-LABEL: t1:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: movl $8, %ecx
; SDAG-NEXT: movl $9, %eax
; SDAG-NEXT: mulq %rcx
@@ -13,7 +13,7 @@ define {i64, i1} @t1() nounwind {
; SDAG-NEXT: retq
;
; FAST-LABEL: t1:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: movl $8, %ecx
; FAST-NEXT: movl $9, %eax
; FAST-NEXT: mulq %rcx
@@ -21,7 +21,7 @@ define {i64, i1} @t1() nounwind {
; FAST-NEXT: retq
;
; KNL-LABEL: t1:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: movl $8, %ecx
; KNL-NEXT: movl $9, %eax
; KNL-NEXT: mulq %rcx
@@ -33,7 +33,7 @@ define {i64, i1} @t1() nounwind {
define {i64, i1} @t2() nounwind {
; SDAG-LABEL: t2:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: xorl %ecx, %ecx
; SDAG-NEXT: movl $9, %eax
; SDAG-NEXT: mulq %rcx
@@ -41,7 +41,7 @@ define {i64, i1} @t2() nounwind {
; SDAG-NEXT: retq
;
; FAST-LABEL: t2:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: xorl %ecx, %ecx
; FAST-NEXT: movl $9, %eax
; FAST-NEXT: mulq %rcx
@@ -49,7 +49,7 @@ define {i64, i1} @t2() nounwind {
; FAST-NEXT: retq
;
; KNL-LABEL: t2:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: xorl %ecx, %ecx
; KNL-NEXT: movl $9, %eax
; KNL-NEXT: mulq %rcx
@@ -61,7 +61,7 @@ define {i64, i1} @t2() nounwind {
define {i64, i1} @t3() nounwind {
; SDAG-LABEL: t3:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: movq $-1, %rcx
; SDAG-NEXT: movl $9, %eax
; SDAG-NEXT: mulq %rcx
@@ -69,7 +69,7 @@ define {i64, i1} @t3() nounwind {
; SDAG-NEXT: retq
;
; FAST-LABEL: t3:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: movq $-1, %rcx
; FAST-NEXT: movl $9, %eax
; FAST-NEXT: mulq %rcx
@@ -77,7 +77,7 @@ define {i64, i1} @t3() nounwind {
; FAST-NEXT: retq
;
; KNL-LABEL: t3:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: movq $-1, %rcx
; KNL-NEXT: movl $9, %eax
; KNL-NEXT: mulq %rcx
@@ -90,7 +90,7 @@ define {i64, i1} @t3() nounwind {
; SMULO
define zeroext i1 @smuloi8(i8 %v1, i8 %v2, i8* %res) {
; SDAG-LABEL: smuloi8:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: movl %edi, %eax
; SDAG-NEXT: imulb %sil
; SDAG-NEXT: seto %cl
@@ -99,7 +99,7 @@ define zeroext i1 @smuloi8(i8 %v1, i8 %v2, i8* %res) {
; SDAG-NEXT: retq
;
; FAST-LABEL: smuloi8:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: movl %edi, %eax
; FAST-NEXT: imulb %sil
; FAST-NEXT: seto %cl
@@ -109,7 +109,7 @@ define zeroext i1 @smuloi8(i8 %v1, i8 %v2, i8* %res) {
; FAST-NEXT: retq
;
; KNL-LABEL: smuloi8:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: movl %edi, %eax
; KNL-NEXT: imulb %sil
; KNL-NEXT: seto %cl
@@ -125,14 +125,14 @@ define zeroext i1 @smuloi8(i8 %v1, i8 %v2, i8* %res) {
define zeroext i1 @smuloi16(i16 %v1, i16 %v2, i16* %res) {
; SDAG-LABEL: smuloi16:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: imulw %si, %di
; SDAG-NEXT: seto %al
; SDAG-NEXT: movw %di, (%rdx)
; SDAG-NEXT: retq
;
; FAST-LABEL: smuloi16:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: imulw %si, %di
; FAST-NEXT: seto %al
; FAST-NEXT: movw %di, (%rdx)
@@ -141,7 +141,7 @@ define zeroext i1 @smuloi16(i16 %v1, i16 %v2, i16* %res) {
; FAST-NEXT: retq
;
; KNL-LABEL: smuloi16:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: imulw %si, %di
; KNL-NEXT: seto %al
; KNL-NEXT: movw %di, (%rdx)
@@ -155,14 +155,14 @@ define zeroext i1 @smuloi16(i16 %v1, i16 %v2, i16* %res) {
define zeroext i1 @smuloi32(i32 %v1, i32 %v2, i32* %res) {
; SDAG-LABEL: smuloi32:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: imull %esi, %edi
; SDAG-NEXT: seto %al
; SDAG-NEXT: movl %edi, (%rdx)
; SDAG-NEXT: retq
;
; FAST-LABEL: smuloi32:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: imull %esi, %edi
; FAST-NEXT: seto %al
; FAST-NEXT: movl %edi, (%rdx)
@@ -171,7 +171,7 @@ define zeroext i1 @smuloi32(i32 %v1, i32 %v2, i32* %res) {
; FAST-NEXT: retq
;
; KNL-LABEL: smuloi32:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: imull %esi, %edi
; KNL-NEXT: seto %al
; KNL-NEXT: movl %edi, (%rdx)
@@ -185,14 +185,14 @@ define zeroext i1 @smuloi32(i32 %v1, i32 %v2, i32* %res) {
define zeroext i1 @smuloi64(i64 %v1, i64 %v2, i64* %res) {
; SDAG-LABEL: smuloi64:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: imulq %rsi, %rdi
; SDAG-NEXT: seto %al
; SDAG-NEXT: movq %rdi, (%rdx)
; SDAG-NEXT: retq
;
; FAST-LABEL: smuloi64:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: imulq %rsi, %rdi
; FAST-NEXT: seto %al
; FAST-NEXT: movq %rdi, (%rdx)
@@ -201,7 +201,7 @@ define zeroext i1 @smuloi64(i64 %v1, i64 %v2, i64* %res) {
; FAST-NEXT: retq
;
; KNL-LABEL: smuloi64:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: imulq %rsi, %rdi
; KNL-NEXT: seto %al
; KNL-NEXT: movq %rdi, (%rdx)
@@ -216,7 +216,7 @@ define zeroext i1 @smuloi64(i64 %v1, i64 %v2, i64* %res) {
; UMULO
define zeroext i1 @umuloi8(i8 %v1, i8 %v2, i8* %res) {
; SDAG-LABEL: umuloi8:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: movl %edi, %eax
; SDAG-NEXT: mulb %sil
; SDAG-NEXT: seto %cl
@@ -225,7 +225,7 @@ define zeroext i1 @umuloi8(i8 %v1, i8 %v2, i8* %res) {
; SDAG-NEXT: retq
;
; FAST-LABEL: umuloi8:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: movl %edi, %eax
; FAST-NEXT: mulb %sil
; FAST-NEXT: seto %cl
@@ -235,7 +235,7 @@ define zeroext i1 @umuloi8(i8 %v1, i8 %v2, i8* %res) {
; FAST-NEXT: retq
;
; KNL-LABEL: umuloi8:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: movl %edi, %eax
; KNL-NEXT: mulb %sil
; KNL-NEXT: seto %cl
@@ -251,7 +251,7 @@ define zeroext i1 @umuloi8(i8 %v1, i8 %v2, i8* %res) {
define zeroext i1 @umuloi16(i16 %v1, i16 %v2, i16* %res) {
; SDAG-LABEL: umuloi16:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: movq %rdx, %rcx
; SDAG-NEXT: movl %edi, %eax
; SDAG-NEXT: mulw %si
@@ -261,7 +261,7 @@ define zeroext i1 @umuloi16(i16 %v1, i16 %v2, i16* %res) {
; SDAG-NEXT: retq
;
; FAST-LABEL: umuloi16:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: movq %rdx, %rcx
; FAST-NEXT: movl %edi, %eax
; FAST-NEXT: mulw %si
@@ -272,7 +272,7 @@ define zeroext i1 @umuloi16(i16 %v1, i16 %v2, i16* %res) {
; FAST-NEXT: retq
;
; KNL-LABEL: umuloi16:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: movq %rdx, %rcx
; KNL-NEXT: movl %edi, %eax
; KNL-NEXT: mulw %si
@@ -289,7 +289,7 @@ define zeroext i1 @umuloi16(i16 %v1, i16 %v2, i16* %res) {
define zeroext i1 @umuloi32(i32 %v1, i32 %v2, i32* %res) {
; SDAG-LABEL: umuloi32:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: movq %rdx, %rcx
; SDAG-NEXT: movl %edi, %eax
; SDAG-NEXT: mull %esi
@@ -299,7 +299,7 @@ define zeroext i1 @umuloi32(i32 %v1, i32 %v2, i32* %res) {
; SDAG-NEXT: retq
;
; FAST-LABEL: umuloi32:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: movq %rdx, %rcx
; FAST-NEXT: movl %edi, %eax
; FAST-NEXT: mull %esi
@@ -310,7 +310,7 @@ define zeroext i1 @umuloi32(i32 %v1, i32 %v2, i32* %res) {
; FAST-NEXT: retq
;
; KNL-LABEL: umuloi32:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: movq %rdx, %rcx
; KNL-NEXT: movl %edi, %eax
; KNL-NEXT: mull %esi
@@ -327,7 +327,7 @@ define zeroext i1 @umuloi32(i32 %v1, i32 %v2, i32* %res) {
define zeroext i1 @umuloi64(i64 %v1, i64 %v2, i64* %res) {
; SDAG-LABEL: umuloi64:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: movq %rdx, %rcx
; SDAG-NEXT: movq %rdi, %rax
; SDAG-NEXT: mulq %rsi
@@ -337,7 +337,7 @@ define zeroext i1 @umuloi64(i64 %v1, i64 %v2, i64* %res) {
; SDAG-NEXT: retq
;
; FAST-LABEL: umuloi64:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: movq %rdx, %rcx
; FAST-NEXT: movq %rdi, %rax
; FAST-NEXT: mulq %rsi
@@ -348,7 +348,7 @@ define zeroext i1 @umuloi64(i64 %v1, i64 %v2, i64* %res) {
; FAST-NEXT: retq
;
; KNL-LABEL: umuloi64:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: movq %rdx, %rcx
; KNL-NEXT: movq %rdi, %rax
; KNL-NEXT: mulq %rsi
@@ -368,7 +368,7 @@ define zeroext i1 @umuloi64(i64 %v1, i64 %v2, i64* %res) {
;
define i32 @smuloselecti32(i32 %v1, i32 %v2) {
; SDAG-LABEL: smuloselecti32:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: movl %edi, %eax
; SDAG-NEXT: imull %esi, %eax
; SDAG-NEXT: cmovol %edi, %esi
@@ -376,7 +376,7 @@ define i32 @smuloselecti32(i32 %v1, i32 %v2) {
; SDAG-NEXT: retq
;
; FAST-LABEL: smuloselecti32:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: movl %edi, %eax
; FAST-NEXT: imull %esi, %eax
; FAST-NEXT: cmovol %edi, %esi
@@ -384,7 +384,7 @@ define i32 @smuloselecti32(i32 %v1, i32 %v2) {
; FAST-NEXT: retq
;
; KNL-LABEL: smuloselecti32:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: movl %edi, %eax
; KNL-NEXT: imull %esi, %eax
; KNL-NEXT: cmovol %edi, %esi
@@ -398,7 +398,7 @@ define i32 @smuloselecti32(i32 %v1, i32 %v2) {
define i64 @smuloselecti64(i64 %v1, i64 %v2) {
; SDAG-LABEL: smuloselecti64:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: movq %rdi, %rax
; SDAG-NEXT: imulq %rsi, %rax
; SDAG-NEXT: cmovoq %rdi, %rsi
@@ -406,7 +406,7 @@ define i64 @smuloselecti64(i64 %v1, i64 %v2) {
; SDAG-NEXT: retq
;
; FAST-LABEL: smuloselecti64:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: movq %rdi, %rax
; FAST-NEXT: imulq %rsi, %rax
; FAST-NEXT: cmovoq %rdi, %rsi
@@ -414,7 +414,7 @@ define i64 @smuloselecti64(i64 %v1, i64 %v2) {
; FAST-NEXT: retq
;
; KNL-LABEL: smuloselecti64:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: movq %rdi, %rax
; KNL-NEXT: imulq %rsi, %rax
; KNL-NEXT: cmovoq %rdi, %rsi
@@ -428,7 +428,7 @@ define i64 @smuloselecti64(i64 %v1, i64 %v2) {
define i32 @umuloselecti32(i32 %v1, i32 %v2) {
; SDAG-LABEL: umuloselecti32:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: movl %edi, %eax
; SDAG-NEXT: mull %esi
; SDAG-NEXT: cmovol %edi, %esi
@@ -436,7 +436,7 @@ define i32 @umuloselecti32(i32 %v1, i32 %v2) {
; SDAG-NEXT: retq
;
; FAST-LABEL: umuloselecti32:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: movl %edi, %eax
; FAST-NEXT: mull %esi
; FAST-NEXT: cmovol %edi, %esi
@@ -444,7 +444,7 @@ define i32 @umuloselecti32(i32 %v1, i32 %v2) {
; FAST-NEXT: retq
;
; KNL-LABEL: umuloselecti32:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: movl %edi, %eax
; KNL-NEXT: mull %esi
; KNL-NEXT: cmovol %edi, %esi
@@ -458,7 +458,7 @@ define i32 @umuloselecti32(i32 %v1, i32 %v2) {
define i64 @umuloselecti64(i64 %v1, i64 %v2) {
; SDAG-LABEL: umuloselecti64:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: movq %rdi, %rax
; SDAG-NEXT: mulq %rsi
; SDAG-NEXT: cmovoq %rdi, %rsi
@@ -466,7 +466,7 @@ define i64 @umuloselecti64(i64 %v1, i64 %v2) {
; SDAG-NEXT: retq
;
; FAST-LABEL: umuloselecti64:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: movq %rdi, %rax
; FAST-NEXT: mulq %rsi
; FAST-NEXT: cmovoq %rdi, %rsi
@@ -474,7 +474,7 @@ define i64 @umuloselecti64(i64 %v1, i64 %v2) {
; FAST-NEXT: retq
;
; KNL-LABEL: umuloselecti64:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: movq %rdi, %rax
; KNL-NEXT: mulq %rsi
; KNL-NEXT: cmovoq %rdi, %rsi
@@ -491,10 +491,10 @@ define i64 @umuloselecti64(i64 %v1, i64 %v2) {
;
define zeroext i1 @smulobri32(i32 %v1, i32 %v2) {
; SDAG-LABEL: smulobri32:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: imull %esi, %edi
; SDAG-NEXT: jo LBB15_1
-; SDAG-NEXT: ## BB#2: ## %continue
+; SDAG-NEXT: ## %bb.2: ## %continue
; SDAG-NEXT: movb $1, %al
; SDAG-NEXT: retq
; SDAG-NEXT: LBB15_1: ## %overflow
@@ -502,10 +502,10 @@ define zeroext i1 @smulobri32(i32 %v1, i32 %v2) {
; SDAG-NEXT: retq
;
; FAST-LABEL: smulobri32:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: imull %esi, %edi
; FAST-NEXT: jo LBB15_1
-; FAST-NEXT: ## BB#2: ## %continue
+; FAST-NEXT: ## %bb.2: ## %continue
; FAST-NEXT: movb $1, %al
; FAST-NEXT: andb $1, %al
; FAST-NEXT: movzbl %al, %eax
@@ -517,10 +517,10 @@ define zeroext i1 @smulobri32(i32 %v1, i32 %v2) {
; FAST-NEXT: retq
;
; KNL-LABEL: smulobri32:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: imull %esi, %edi
; KNL-NEXT: jo LBB15_1
-; KNL-NEXT: ## BB#2: ## %continue
+; KNL-NEXT: ## %bb.2: ## %continue
; KNL-NEXT: movb $1, %al
; KNL-NEXT: retq
; KNL-NEXT: LBB15_1: ## %overflow
@@ -540,10 +540,10 @@ continue:
define zeroext i1 @smulobri64(i64 %v1, i64 %v2) {
; SDAG-LABEL: smulobri64:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: imulq %rsi, %rdi
; SDAG-NEXT: jo LBB16_1
-; SDAG-NEXT: ## BB#2: ## %continue
+; SDAG-NEXT: ## %bb.2: ## %continue
; SDAG-NEXT: movb $1, %al
; SDAG-NEXT: retq
; SDAG-NEXT: LBB16_1: ## %overflow
@@ -551,10 +551,10 @@ define zeroext i1 @smulobri64(i64 %v1, i64 %v2) {
; SDAG-NEXT: retq
;
; FAST-LABEL: smulobri64:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: imulq %rsi, %rdi
; FAST-NEXT: jo LBB16_1
-; FAST-NEXT: ## BB#2: ## %continue
+; FAST-NEXT: ## %bb.2: ## %continue
; FAST-NEXT: movb $1, %al
; FAST-NEXT: andb $1, %al
; FAST-NEXT: movzbl %al, %eax
@@ -566,10 +566,10 @@ define zeroext i1 @smulobri64(i64 %v1, i64 %v2) {
; FAST-NEXT: retq
;
; KNL-LABEL: smulobri64:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: imulq %rsi, %rdi
; KNL-NEXT: jo LBB16_1
-; KNL-NEXT: ## BB#2: ## %continue
+; KNL-NEXT: ## %bb.2: ## %continue
; KNL-NEXT: movb $1, %al
; KNL-NEXT: retq
; KNL-NEXT: LBB16_1: ## %overflow
@@ -589,11 +589,11 @@ continue:
define zeroext i1 @umulobri32(i32 %v1, i32 %v2) {
; SDAG-LABEL: umulobri32:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: movl %edi, %eax
; SDAG-NEXT: mull %esi
; SDAG-NEXT: jo LBB17_1
-; SDAG-NEXT: ## BB#2: ## %continue
+; SDAG-NEXT: ## %bb.2: ## %continue
; SDAG-NEXT: movb $1, %al
; SDAG-NEXT: retq
; SDAG-NEXT: LBB17_1: ## %overflow
@@ -601,11 +601,11 @@ define zeroext i1 @umulobri32(i32 %v1, i32 %v2) {
; SDAG-NEXT: retq
;
; FAST-LABEL: umulobri32:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: movl %edi, %eax
; FAST-NEXT: mull %esi
; FAST-NEXT: jo LBB17_1
-; FAST-NEXT: ## BB#2: ## %continue
+; FAST-NEXT: ## %bb.2: ## %continue
; FAST-NEXT: movb $1, %al
; FAST-NEXT: andb $1, %al
; FAST-NEXT: movzbl %al, %eax
@@ -617,11 +617,11 @@ define zeroext i1 @umulobri32(i32 %v1, i32 %v2) {
; FAST-NEXT: retq
;
; KNL-LABEL: umulobri32:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: movl %edi, %eax
; KNL-NEXT: mull %esi
; KNL-NEXT: jo LBB17_1
-; KNL-NEXT: ## BB#2: ## %continue
+; KNL-NEXT: ## %bb.2: ## %continue
; KNL-NEXT: movb $1, %al
; KNL-NEXT: retq
; KNL-NEXT: LBB17_1: ## %overflow
@@ -641,11 +641,11 @@ continue:
define zeroext i1 @umulobri64(i64 %v1, i64 %v2) {
; SDAG-LABEL: umulobri64:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: movq %rdi, %rax
; SDAG-NEXT: mulq %rsi
; SDAG-NEXT: jo LBB18_1
-; SDAG-NEXT: ## BB#2: ## %continue
+; SDAG-NEXT: ## %bb.2: ## %continue
; SDAG-NEXT: movb $1, %al
; SDAG-NEXT: retq
; SDAG-NEXT: LBB18_1: ## %overflow
@@ -653,11 +653,11 @@ define zeroext i1 @umulobri64(i64 %v1, i64 %v2) {
; SDAG-NEXT: retq
;
; FAST-LABEL: umulobri64:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: movq %rdi, %rax
; FAST-NEXT: mulq %rsi
; FAST-NEXT: jo LBB18_1
-; FAST-NEXT: ## BB#2: ## %continue
+; FAST-NEXT: ## %bb.2: ## %continue
; FAST-NEXT: movb $1, %al
; FAST-NEXT: andb $1, %al
; FAST-NEXT: movzbl %al, %eax
@@ -669,11 +669,11 @@ define zeroext i1 @umulobri64(i64 %v1, i64 %v2) {
; FAST-NEXT: retq
;
; KNL-LABEL: umulobri64:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: movq %rdi, %rax
; KNL-NEXT: mulq %rsi
; KNL-NEXT: jo LBB18_1
-; KNL-NEXT: ## BB#2: ## %continue
+; KNL-NEXT: ## %bb.2: ## %continue
; KNL-NEXT: movb $1, %al
; KNL-NEXT: retq
; KNL-NEXT: LBB18_1: ## %overflow
@@ -693,7 +693,7 @@ continue:
define i1 @bug27873(i64 %c1, i1 %c2) {
; SDAG-LABEL: bug27873:
-; SDAG: ## BB#0:
+; SDAG: ## %bb.0:
; SDAG-NEXT: movl $160, %ecx
; SDAG-NEXT: movq %rdi, %rax
; SDAG-NEXT: mulq %rcx
@@ -702,7 +702,7 @@ define i1 @bug27873(i64 %c1, i1 %c2) {
; SDAG-NEXT: retq
;
; FAST-LABEL: bug27873:
-; FAST: ## BB#0:
+; FAST: ## %bb.0:
; FAST-NEXT: movl $160, %ecx
; FAST-NEXT: movq %rdi, %rax
; FAST-NEXT: mulq %rcx
@@ -711,7 +711,7 @@ define i1 @bug27873(i64 %c1, i1 %c2) {
; FAST-NEXT: retq
;
; KNL-LABEL: bug27873:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: movl $160, %ecx
; KNL-NEXT: movq %rdi, %rax
; KNL-NEXT: mulq %rcx
diff --git a/test/CodeGen/X86/xop-ifma.ll b/test/CodeGen/X86/xop-ifma.ll
index 83291095b87..594058f6c53 100644
--- a/test/CodeGen/X86/xop-ifma.ll
+++ b/test/CodeGen/X86/xop-ifma.ll
@@ -4,7 +4,7 @@
define <8 x i16> @test_mul_v8i16_add_v8i16(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> %a2) {
; XOP-LABEL: test_mul_v8i16_add_v8i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpmacsww %xmm2, %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = mul <8 x i16> %a0, %a1
@@ -14,7 +14,7 @@ define <8 x i16> @test_mul_v8i16_add_v8i16(<8 x i16> %a0, <8 x i16> %a1, <8 x i1
define <16 x i16> @test_mul_v16i16_add_v16i16(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> %a2) {
; XOP-AVX1-LABEL: test_mul_v16i16_add_v16i16:
-; XOP-AVX1: # BB#0:
+; XOP-AVX1: # %bb.0:
; XOP-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; XOP-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
; XOP-AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5
@@ -24,7 +24,7 @@ define <16 x i16> @test_mul_v16i16_add_v16i16(<16 x i16> %a0, <16 x i16> %a1, <1
; XOP-AVX1-NEXT: retq
;
; XOP-AVX2-LABEL: test_mul_v16i16_add_v16i16:
-; XOP-AVX2: # BB#0:
+; XOP-AVX2: # %bb.0:
; XOP-AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
; XOP-AVX2-NEXT: vpaddw %ymm0, %ymm2, %ymm0
; XOP-AVX2-NEXT: retq
@@ -35,7 +35,7 @@ define <16 x i16> @test_mul_v16i16_add_v16i16(<16 x i16> %a0, <16 x i16> %a1, <1
define <4 x i32> @test_mul_v4i32_add_v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) {
; XOP-LABEL: test_mul_v4i32_add_v4i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpmacsdd %xmm2, %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = mul <4 x i32> %a0, %a1
@@ -45,7 +45,7 @@ define <4 x i32> @test_mul_v4i32_add_v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i3
define <8 x i32> @test_mul_v8i32_add_v8i32(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) {
; XOP-AVX1-LABEL: test_mul_v8i32_add_v8i32:
-; XOP-AVX1: # BB#0:
+; XOP-AVX1: # %bb.0:
; XOP-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; XOP-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
; XOP-AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5
@@ -55,7 +55,7 @@ define <8 x i32> @test_mul_v8i32_add_v8i32(<8 x i32> %a0, <8 x i32> %a1, <8 x i3
; XOP-AVX1-NEXT: retq
;
; XOP-AVX2-LABEL: test_mul_v8i32_add_v8i32:
-; XOP-AVX2: # BB#0:
+; XOP-AVX2: # %bb.0:
; XOP-AVX2-NEXT: vpmulld %ymm1, %ymm0, %ymm0
; XOP-AVX2-NEXT: vpaddd %ymm0, %ymm2, %ymm0
; XOP-AVX2-NEXT: retq
@@ -66,7 +66,7 @@ define <8 x i32> @test_mul_v8i32_add_v8i32(<8 x i32> %a0, <8 x i32> %a1, <8 x i3
define <4 x i64> @test_mulx_v4i32_add_v4i64(<4 x i32> %a0, <4 x i32> %a1, <4 x i64> %a2) {
; XOP-AVX1-LABEL: test_mulx_v4i32_add_v4i64:
-; XOP-AVX1: # BB#0:
+; XOP-AVX1: # %bb.0:
; XOP-AVX1-NEXT: vpmovsxdq %xmm0, %xmm3
; XOP-AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; XOP-AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
@@ -80,7 +80,7 @@ define <4 x i64> @test_mulx_v4i32_add_v4i64(<4 x i32> %a0, <4 x i32> %a1, <4 x i
; XOP-AVX1-NEXT: retq
;
; XOP-AVX2-LABEL: test_mulx_v4i32_add_v4i64:
-; XOP-AVX2: # BB#0:
+; XOP-AVX2: # %bb.0:
; XOP-AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
; XOP-AVX2-NEXT: vpmovsxdq %xmm1, %ymm1
; XOP-AVX2-NEXT: vpmuldq %ymm1, %ymm0, %ymm0
@@ -95,7 +95,7 @@ define <4 x i64> @test_mulx_v4i32_add_v4i64(<4 x i32> %a0, <4 x i32> %a1, <4 x i
define <2 x i64> @test_pmuldq_lo_v4i32_add_v2i64(<4 x i32> %a0, <4 x i32> %a1, <2 x i64> %a2) {
; XOP-LABEL: test_pmuldq_lo_v4i32_add_v2i64:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpmacsdql %xmm2, %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = call <2 x i64> @llvm.x86.sse41.pmuldq(<4 x i32> %a0, <4 x i32> %a1)
@@ -105,7 +105,7 @@ define <2 x i64> @test_pmuldq_lo_v4i32_add_v2i64(<4 x i32> %a0, <4 x i32> %a1, <
define <2 x i64> @test_pmuldq_hi_v4i32_add_v2i64(<4 x i32> %a0, <4 x i32> %a1, <2 x i64> %a2) {
; XOP-LABEL: test_pmuldq_hi_v4i32_add_v2i64:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpmacsdqh %xmm2, %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = shufflevector <4 x i32> %a0, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 3, i32 undef>
@@ -117,7 +117,7 @@ define <2 x i64> @test_pmuldq_hi_v4i32_add_v2i64(<4 x i32> %a0, <4 x i32> %a1, <
define <4 x i32> @test_pmaddwd_v8i16_add_v4i32(<8 x i16> %a0, <8 x i16> %a1, <4 x i32> %a2) {
; XOP-LABEL: test_pmaddwd_v8i16_add_v4i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpmadcswd %xmm2, %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> %a0, <8 x i16> %a1)
diff --git a/test/CodeGen/X86/xop-intrinsics-fast-isel.ll b/test/CodeGen/X86/xop-intrinsics-fast-isel.ll
index 2da37e4d2b9..911ab945c5d 100644
--- a/test/CodeGen/X86/xop-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/xop-intrinsics-fast-isel.ll
@@ -6,12 +6,12 @@
define <2 x i64> @test_mm_maccs_epi16(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2) nounwind {
; X32-LABEL: test_mm_maccs_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmacssww %xmm2, %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_maccs_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmacssww %xmm2, %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -25,12 +25,12 @@ declare <8 x i16> @llvm.x86.xop.vpmacssww(<8 x i16>, <8 x i16>, <8 x i16>) nounw
define <2 x i64> @test_mm_macc_epi16(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2) nounwind {
; X32-LABEL: test_mm_macc_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmacsww %xmm2, %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_macc_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmacsww %xmm2, %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -44,12 +44,12 @@ declare <8 x i16> @llvm.x86.xop.vpmacsww(<8 x i16>, <8 x i16>, <8 x i16>) nounwi
define <2 x i64> @test_mm_maccsd_epi16(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2) nounwind {
; X32-LABEL: test_mm_maccsd_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmacsswd %xmm2, %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_maccsd_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmacsswd %xmm2, %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -63,12 +63,12 @@ declare <4 x i32> @llvm.x86.xop.vpmacsswd(<8 x i16>, <8 x i16>, <4 x i32>) nounw
define <2 x i64> @test_mm_maccd_epi16(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2) nounwind {
; X32-LABEL: test_mm_maccd_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmacswd %xmm2, %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_maccd_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmacswd %xmm2, %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -82,12 +82,12 @@ declare <4 x i32> @llvm.x86.xop.vpmacswd(<8 x i16>, <8 x i16>, <4 x i32>) nounwi
define <2 x i64> @test_mm_maccs_epi32(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2) nounwind {
; X32-LABEL: test_mm_maccs_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmacssdd %xmm2, %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_maccs_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmacssdd %xmm2, %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -101,12 +101,12 @@ declare <4 x i32> @llvm.x86.xop.vpmacssdd(<4 x i32>, <4 x i32>, <4 x i32>) nounw
define <2 x i64> @test_mm_macc_epi32(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2) nounwind {
; X32-LABEL: test_mm_macc_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmacsdd %xmm2, %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_macc_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmacsdd %xmm2, %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -120,12 +120,12 @@ declare <4 x i32> @llvm.x86.xop.vpmacsdd(<4 x i32>, <4 x i32>, <4 x i32>) nounwi
define <2 x i64> @test_mm_maccslo_epi32(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2) nounwind {
; X32-LABEL: test_mm_maccslo_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmacssdql %xmm2, %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_maccslo_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmacssdql %xmm2, %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -137,12 +137,12 @@ declare <2 x i64> @llvm.x86.xop.vpmacssdql(<4 x i32>, <4 x i32>, <2 x i64>) noun
define <2 x i64> @test_mm_macclo_epi32(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2) nounwind {
; X32-LABEL: test_mm_macclo_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmacsdql %xmm2, %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_macclo_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmacsdql %xmm2, %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -154,12 +154,12 @@ declare <2 x i64> @llvm.x86.xop.vpmacsdql(<4 x i32>, <4 x i32>, <2 x i64>) nounw
define <2 x i64> @test_mm_maccshi_epi32(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2) nounwind {
; X32-LABEL: test_mm_maccshi_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmacssdqh %xmm2, %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_maccshi_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmacssdqh %xmm2, %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -171,12 +171,12 @@ declare <2 x i64> @llvm.x86.xop.vpmacssdqh(<4 x i32>, <4 x i32>, <2 x i64>) noun
define <2 x i64> @test_mm_macchi_epi32(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2) nounwind {
; X32-LABEL: test_mm_macchi_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmacsdqh %xmm2, %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_macchi_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmacsdqh %xmm2, %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -188,12 +188,12 @@ declare <2 x i64> @llvm.x86.xop.vpmacsdqh(<4 x i32>, <4 x i32>, <2 x i64>) nounw
define <2 x i64> @test_mm_maddsd_epi16(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2) nounwind {
; X32-LABEL: test_mm_maddsd_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmadcsswd %xmm2, %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_maddsd_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmadcsswd %xmm2, %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -207,12 +207,12 @@ declare <4 x i32> @llvm.x86.xop.vpmadcsswd(<8 x i16>, <8 x i16>, <4 x i32>) noun
define <2 x i64> @test_mm_maddd_epi16(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2) nounwind {
; X32-LABEL: test_mm_maddd_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmadcswd %xmm2, %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_maddd_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmadcswd %xmm2, %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -226,12 +226,12 @@ declare <4 x i32> @llvm.x86.xop.vpmadcswd(<8 x i16>, <8 x i16>, <4 x i32>) nounw
define <2 x i64> @test_mm_haddw_epi8(<2 x i64> %a0) {
; X32-LABEL: test_mm_haddw_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vphaddbw %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_haddw_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vphaddbw %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -243,12 +243,12 @@ declare <8 x i16> @llvm.x86.xop.vphaddbw(<16 x i8>) nounwind readnone
define <2 x i64> @test_mm_haddd_epi8(<2 x i64> %a0) {
; X32-LABEL: test_mm_haddd_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vphaddbd %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_haddd_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vphaddbd %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -260,12 +260,12 @@ declare <4 x i32> @llvm.x86.xop.vphaddbd(<16 x i8>) nounwind readnone
define <2 x i64> @test_mm_haddq_epi8(<2 x i64> %a0) {
; X32-LABEL: test_mm_haddq_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vphaddbq %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_haddq_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vphaddbq %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -276,12 +276,12 @@ declare <2 x i64> @llvm.x86.xop.vphaddbq(<16 x i8>) nounwind readnone
define <2 x i64> @test_mm_haddd_epi16(<2 x i64> %a0) {
; X32-LABEL: test_mm_haddd_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vphaddwd %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_haddd_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vphaddwd %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -293,12 +293,12 @@ declare <4 x i32> @llvm.x86.xop.vphaddwd(<8 x i16>) nounwind readnone
define <2 x i64> @test_mm_haddq_epi16(<2 x i64> %a0) {
; X32-LABEL: test_mm_haddq_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vphaddwq %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_haddq_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vphaddwq %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -309,12 +309,12 @@ declare <2 x i64> @llvm.x86.xop.vphaddwq(<8 x i16>) nounwind readnone
define <2 x i64> @test_mm_haddq_epi32(<2 x i64> %a0) {
; X32-LABEL: test_mm_haddq_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vphadddq %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_haddq_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vphadddq %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -325,12 +325,12 @@ declare <2 x i64> @llvm.x86.xop.vphadddq(<4 x i32>) nounwind readnone
define <2 x i64> @test_mm_haddw_epu8(<2 x i64> %a0) {
; X32-LABEL: test_mm_haddw_epu8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vphaddubw %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_haddw_epu8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vphaddubw %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -342,12 +342,12 @@ declare <8 x i16> @llvm.x86.xop.vphaddubw(<16 x i8>) nounwind readnone
define <2 x i64> @test_mm_haddd_epu8(<2 x i64> %a0) {
; X32-LABEL: test_mm_haddd_epu8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vphaddubd %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_haddd_epu8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vphaddubd %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -359,12 +359,12 @@ declare <4 x i32> @llvm.x86.xop.vphaddubd(<16 x i8>) nounwind readnone
define <2 x i64> @test_mm_haddq_epu8(<2 x i64> %a0) {
; X32-LABEL: test_mm_haddq_epu8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vphaddubq %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_haddq_epu8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vphaddubq %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -375,12 +375,12 @@ declare <2 x i64> @llvm.x86.xop.vphaddubq(<16 x i8>) nounwind readnone
define <2 x i64> @test_mm_haddd_epu16(<2 x i64> %a0) {
; X32-LABEL: test_mm_haddd_epu16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vphadduwd %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_haddd_epu16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vphadduwd %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -393,12 +393,12 @@ declare <4 x i32> @llvm.x86.xop.vphadduwd(<8 x i16>) nounwind readnone
define <2 x i64> @test_mm_haddq_epu16(<2 x i64> %a0) {
; X32-LABEL: test_mm_haddq_epu16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vphadduwq %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_haddq_epu16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vphadduwq %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -409,12 +409,12 @@ declare <2 x i64> @llvm.x86.xop.vphadduwq(<8 x i16>) nounwind readnone
define <2 x i64> @test_mm_haddq_epu32(<2 x i64> %a0) {
; X32-LABEL: test_mm_haddq_epu32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vphaddudq %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_haddq_epu32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vphaddudq %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -425,12 +425,12 @@ declare <2 x i64> @llvm.x86.xop.vphaddudq(<4 x i32>) nounwind readnone
define <2 x i64> @test_mm_hsubw_epi8(<2 x i64> %a0) {
; X32-LABEL: test_mm_hsubw_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vphsubbw %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_hsubw_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vphsubbw %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -442,12 +442,12 @@ declare <8 x i16> @llvm.x86.xop.vphsubbw(<16 x i8>) nounwind readnone
define <2 x i64> @test_mm_hsubd_epi16(<2 x i64> %a0) {
; X32-LABEL: test_mm_hsubd_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vphsubwd %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_hsubd_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vphsubwd %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -459,12 +459,12 @@ declare <4 x i32> @llvm.x86.xop.vphsubwd(<8 x i16>) nounwind readnone
define <2 x i64> @test_mm_hsubq_epi32(<2 x i64> %a0) {
; X32-LABEL: test_mm_hsubq_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vphsubdq %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_hsubq_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vphsubdq %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -475,7 +475,7 @@ declare <2 x i64> @llvm.x86.xop.vphsubdq(<4 x i32>) nounwind readnone
define <2 x i64> @test_mm_cmov_si128(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2) {
; X32-LABEL: test_mm_cmov_si128:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
; X32-NEXT: vpxor %xmm3, %xmm2, %xmm3
; X32-NEXT: vpand %xmm2, %xmm0, %xmm0
@@ -484,7 +484,7 @@ define <2 x i64> @test_mm_cmov_si128(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmov_si128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
; X64-NEXT: vpxor %xmm3, %xmm2, %xmm3
; X64-NEXT: vpand %xmm2, %xmm0, %xmm0
@@ -498,7 +498,7 @@ declare <2 x i64> @llvm.x86.xop.vpcmov(<2 x i64>, <2 x i64>, <2 x i64>) nounwind
define <4 x i64> @test_mm256_cmov_si256(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> %a2) {
; X32-LABEL: test_mm256_cmov_si256:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vxorps %xmm3, %xmm3, %xmm3
; X32-NEXT: vcmptrueps %ymm3, %ymm3, %ymm3
; X32-NEXT: vxorps %ymm3, %ymm2, %ymm3
@@ -508,7 +508,7 @@ define <4 x i64> @test_mm256_cmov_si256(<4 x i64> %a0, <4 x i64> %a1, <4 x i64>
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_cmov_si256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %xmm3, %xmm3, %xmm3
; X64-NEXT: vcmptrueps %ymm3, %ymm3, %ymm3
; X64-NEXT: vxorps %ymm3, %ymm2, %ymm3
@@ -523,12 +523,12 @@ declare <4 x i64> @llvm.x86.xop.vpcmov.256(<4 x i64>, <4 x i64>, <4 x i64>) noun
define <2 x i64> @test_mm_perm_epi8(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2) {
; X32-LABEL: test_mm_perm_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpperm %xmm2, %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_perm_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpperm %xmm2, %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -542,12 +542,12 @@ declare <16 x i8> @llvm.x86.xop.vpperm(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
define <2 x i64> @test_mm_rot_epi8(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_rot_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vprotb %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_rot_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vprotb %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -560,12 +560,12 @@ declare <16 x i8> @llvm.x86.xop.vprotb(<16 x i8>, <16 x i8>) nounwind readnone
define <2 x i64> @test_mm_rot_epi16(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_rot_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vprotw %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_rot_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vprotw %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -578,12 +578,12 @@ declare <8 x i16> @llvm.x86.xop.vprotw(<8 x i16>, <8 x i16>) nounwind readnone
define <2 x i64> @test_mm_rot_epi32(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_rot_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vprotd %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_rot_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vprotd %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -596,12 +596,12 @@ declare <4 x i32> @llvm.x86.xop.vprotd(<4 x i32>, <4 x i32>) nounwind readnone
define <2 x i64> @test_mm_rot_epi64(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_rot_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vprotq %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_rot_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vprotq %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%res = call <2 x i64> @llvm.x86.xop.vprotq(<2 x i64> %a0, <2 x i64> %a1)
@@ -611,12 +611,12 @@ declare <2 x i64> @llvm.x86.xop.vprotq(<2 x i64>, <2 x i64>) nounwind readnone
define <2 x i64> @test_mm_roti_epi8(<2 x i64> %a0) {
; X32-LABEL: test_mm_roti_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vprotb $1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_roti_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vprotb $1, %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -628,12 +628,12 @@ declare <16 x i8> @llvm.x86.xop.vprotbi(<16 x i8>, i8) nounwind readnone
define <2 x i64> @test_mm_roti_epi16(<2 x i64> %a0) {
; X32-LABEL: test_mm_roti_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vprotw $50, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_roti_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vprotw $50, %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -645,12 +645,12 @@ declare <8 x i16> @llvm.x86.xop.vprotwi(<8 x i16>, i8) nounwind readnone
define <2 x i64> @test_mm_roti_epi32(<2 x i64> %a0) {
; X32-LABEL: test_mm_roti_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vprotd $226, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_roti_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vprotd $226, %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -662,12 +662,12 @@ declare <4 x i32> @llvm.x86.xop.vprotdi(<4 x i32>, i8) nounwind readnone
define <2 x i64> @test_mm_roti_epi64(<2 x i64> %a0) {
; X32-LABEL: test_mm_roti_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vprotq $100, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_roti_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vprotq $100, %xmm0, %xmm0
; X64-NEXT: retq
%res = call <2 x i64> @llvm.x86.xop.vprotqi(<2 x i64> %a0, i8 100)
@@ -677,12 +677,12 @@ declare <2 x i64> @llvm.x86.xop.vprotqi(<2 x i64>, i8) nounwind readnone
define <2 x i64> @test_mm_shl_epi8(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_shl_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpshlb %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_shl_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpshlb %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -695,12 +695,12 @@ declare <16 x i8> @llvm.x86.xop.vpshlb(<16 x i8>, <16 x i8>) nounwind readnone
define <2 x i64> @test_mm_shl_epi16(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_shl_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpshlw %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_shl_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpshlw %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -713,12 +713,12 @@ declare <8 x i16> @llvm.x86.xop.vpshlw(<8 x i16>, <8 x i16>) nounwind readnone
define <2 x i64> @test_mm_shl_epi32(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_shl_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpshld %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_shl_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpshld %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -731,12 +731,12 @@ declare <4 x i32> @llvm.x86.xop.vpshld(<4 x i32>, <4 x i32>) nounwind readnone
define <2 x i64> @test_mm_shl_epi64(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_shl_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpshlq %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_shl_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpshlq %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%res = call <2 x i64> @llvm.x86.xop.vpshlq(<2 x i64> %a0, <2 x i64> %a1)
@@ -746,12 +746,12 @@ declare <2 x i64> @llvm.x86.xop.vpshlq(<2 x i64>, <2 x i64>) nounwind readnone
define <2 x i64> @test_mm_sha_epi8(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_sha_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpshab %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_sha_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpshab %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -764,12 +764,12 @@ declare <16 x i8> @llvm.x86.xop.vpshab(<16 x i8>, <16 x i8>) nounwind readnone
define <2 x i64> @test_mm_sha_epi16(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_sha_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpshaw %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_sha_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpshaw %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -782,12 +782,12 @@ declare <8 x i16> @llvm.x86.xop.vpshaw(<8 x i16>, <8 x i16>) nounwind readnone
define <2 x i64> @test_mm_sha_epi32(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_sha_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpshad %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_sha_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpshad %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -800,12 +800,12 @@ declare <4 x i32> @llvm.x86.xop.vpshad(<4 x i32>, <4 x i32>) nounwind readnone
define <2 x i64> @test_mm_sha_epi64(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_sha_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpshaq %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_sha_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpshaq %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%res = call <2 x i64> @llvm.x86.xop.vpshaq(<2 x i64> %a0, <2 x i64> %a1)
@@ -815,12 +815,12 @@ declare <2 x i64> @llvm.x86.xop.vpshaq(<2 x i64>, <2 x i64>) nounwind readnone
define <2 x i64> @test_mm_com_epu8(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_com_epu8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpcomltub %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_com_epu8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcomltub %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -833,12 +833,12 @@ declare <16 x i8> @llvm.x86.xop.vpcomub(<16 x i8>, <16 x i8>, i8) nounwind readn
define <2 x i64> @test_mm_com_epu16(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_com_epu16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpcomltuw %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_com_epu16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcomltuw %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -851,12 +851,12 @@ declare <8 x i16> @llvm.x86.xop.vpcomuw(<8 x i16>, <8 x i16>, i8) nounwind readn
define <2 x i64> @test_mm_com_epu32(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_com_epu32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpcomltud %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_com_epu32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcomltud %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -869,12 +869,12 @@ declare <4 x i32> @llvm.x86.xop.vpcomud(<4 x i32>, <4 x i32>, i8) nounwind readn
define <2 x i64> @test_mm_com_epu64(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_com_epu64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpcomltuq %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_com_epu64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcomltuq %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%res = call <2 x i64> @llvm.x86.xop.vpcomuq(<2 x i64> %a0, <2 x i64> %a1, i8 0)
@@ -884,12 +884,12 @@ declare <2 x i64> @llvm.x86.xop.vpcomuq(<2 x i64>, <2 x i64>, i8) nounwind readn
define <2 x i64> @test_mm_com_epi8(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_com_epi8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpcomltb %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_com_epi8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcomltb %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -902,12 +902,12 @@ declare <16 x i8> @llvm.x86.xop.vpcomb(<16 x i8>, <16 x i8>, i8) nounwind readno
define <2 x i64> @test_mm_com_epi16(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_com_epi16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpcomltw %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_com_epi16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcomltw %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -920,12 +920,12 @@ declare <8 x i16> @llvm.x86.xop.vpcomw(<8 x i16>, <8 x i16>, i8) nounwind readno
define <2 x i64> @test_mm_com_epi32(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_com_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpcomltd %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_com_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcomltd %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -938,12 +938,12 @@ declare <4 x i32> @llvm.x86.xop.vpcomd(<4 x i32>, <4 x i32>, i8) nounwind readno
define <2 x i64> @test_mm_com_epi64(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_com_epi64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpcomltq %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_com_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcomltq %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%res = call <2 x i64> @llvm.x86.xop.vpcomq(<2 x i64> %a0, <2 x i64> %a1, i8 0)
@@ -953,12 +953,12 @@ declare <2 x i64> @llvm.x86.xop.vpcomq(<2 x i64>, <2 x i64>, i8) nounwind readno
define <2 x double> @test_mm_permute2_pd(<2 x double> %a0, <2 x double> %a1, <2 x i64> %a2) {
; X32-LABEL: test_mm_permute2_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpermil2pd $0, %xmm2, %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_permute2_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermil2pd $0, %xmm2, %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%res = call <2 x double> @llvm.x86.xop.vpermil2pd(<2 x double> %a0, <2 x double> %a1, <2 x i64> %a2, i8 0)
@@ -968,12 +968,12 @@ declare <2 x double> @llvm.x86.xop.vpermil2pd(<2 x double>, <2 x double>, <2 x i
define <4 x double> @test_mm256_permute2_pd(<4 x double> %a0, <4 x double> %a1, <4 x i64> %a2) {
; X32-LABEL: test_mm256_permute2_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpermil2pd $0, %ymm2, %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_permute2_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermil2pd $0, %ymm2, %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%res = call <4 x double> @llvm.x86.xop.vpermil2pd.256(<4 x double> %a0, <4 x double> %a1, <4 x i64> %a2, i8 0)
@@ -983,12 +983,12 @@ declare <4 x double> @llvm.x86.xop.vpermil2pd.256(<4 x double>, <4 x double>, <4
define <4 x float> @test_mm_permute2_ps(<4 x float> %a0, <4 x float> %a1, <2 x i64> %a2) {
; X32-LABEL: test_mm_permute2_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpermil2ps $0, %xmm2, %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_permute2_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermil2ps $0, %xmm2, %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%arg2 = bitcast <2 x i64> %a2 to <4 x i32>
@@ -999,12 +999,12 @@ declare <4 x float> @llvm.x86.xop.vpermil2ps(<4 x float>, <4 x float>, <4 x i32>
define <8 x float> @test_mm256_permute2_ps(<8 x float> %a0, <8 x float> %a1, <4 x i64> %a2) {
; X32-LABEL: test_mm256_permute2_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpermil2ps $0, %ymm2, %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_permute2_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermil2ps $0, %ymm2, %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%arg2 = bitcast <4 x i64> %a2 to <8 x i32>
@@ -1015,12 +1015,12 @@ declare <8 x float> @llvm.x86.xop.vpermil2ps.256(<8 x float>, <8 x float>, <8 x
define <4 x float> @test_mm_frcz_ss(<4 x float> %a0) {
; X32-LABEL: test_mm_frcz_ss:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vfrczss %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_frcz_ss:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vfrczss %xmm0, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.xop.vfrcz.ss(<4 x float> %a0)
@@ -1030,12 +1030,12 @@ declare <4 x float> @llvm.x86.xop.vfrcz.ss(<4 x float>) nounwind readnone
define <2 x double> @test_mm_frcz_sd(<2 x double> %a0) {
; X32-LABEL: test_mm_frcz_sd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vfrczsd %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_frcz_sd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vfrczsd %xmm0, %xmm0
; X64-NEXT: retq
%res = call <2 x double> @llvm.x86.xop.vfrcz.sd(<2 x double> %a0)
@@ -1045,12 +1045,12 @@ declare <2 x double> @llvm.x86.xop.vfrcz.sd(<2 x double>) nounwind readnone
define <4 x float> @test_mm_frcz_ps(<4 x float> %a0) {
; X32-LABEL: test_mm_frcz_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vfrczps %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_frcz_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vfrczps %xmm0, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.xop.vfrcz.ps(<4 x float> %a0)
@@ -1060,12 +1060,12 @@ declare <4 x float> @llvm.x86.xop.vfrcz.ps(<4 x float>) nounwind readnone
define <2 x double> @test_mm_frcz_pd(<2 x double> %a0) {
; X32-LABEL: test_mm_frcz_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vfrczpd %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_frcz_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vfrczpd %xmm0, %xmm0
; X64-NEXT: retq
%res = call <2 x double> @llvm.x86.xop.vfrcz.pd(<2 x double> %a0)
@@ -1075,12 +1075,12 @@ declare <2 x double> @llvm.x86.xop.vfrcz.pd(<2 x double>) nounwind readnone
define <8 x float> @test_mm256_frcz_ps(<8 x float> %a0) {
; X32-LABEL: test_mm256_frcz_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vfrczps %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_frcz_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vfrczps %ymm0, %ymm0
; X64-NEXT: retq
%res = call <8 x float> @llvm.x86.xop.vfrcz.ps.256(<8 x float> %a0)
@@ -1090,12 +1090,12 @@ declare <8 x float> @llvm.x86.xop.vfrcz.ps.256(<8 x float>) nounwind readnone
define <4 x double> @test_mm256_frcz_pd(<4 x double> %a0) {
; X32-LABEL: test_mm256_frcz_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vfrczpd %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_frcz_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vfrczpd %ymm0, %ymm0
; X64-NEXT: retq
%res = call <4 x double> @llvm.x86.xop.vfrcz.pd.256(<4 x double> %a0)
diff --git a/test/CodeGen/X86/xop-intrinsics-x86_64-upgrade.ll b/test/CodeGen/X86/xop-intrinsics-x86_64-upgrade.ll
index 2369beffb6b..c5493368ab1 100644
--- a/test/CodeGen/X86/xop-intrinsics-x86_64-upgrade.ll
+++ b/test/CodeGen/X86/xop-intrinsics-x86_64-upgrade.ll
@@ -3,7 +3,7 @@
define <2 x double> @test_int_x86_xop_vpermil2pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) {
; CHECK-LABEL: test_int_x86_xop_vpermil2pd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermil2pd $1, %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x double> @llvm.x86.xop.vpermil2pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 1) ; [#uses=1]
@@ -11,7 +11,7 @@ define <2 x double> @test_int_x86_xop_vpermil2pd(<2 x double> %a0, <2 x double>
}
define <2 x double> @test_int_x86_xop_vpermil2pd_mr(<2 x double> %a0, <2 x double>* %a1, <2 x double> %a2) {
; CHECK-LABEL: test_int_x86_xop_vpermil2pd_mr:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermil2pd $1, %xmm1, (%rdi), %xmm0, %xmm0
; CHECK-NEXT: retq
%vec = load <2 x double>, <2 x double>* %a1
@@ -20,7 +20,7 @@ define <2 x double> @test_int_x86_xop_vpermil2pd_mr(<2 x double> %a0, <2 x doubl
}
define <2 x double> @test_int_x86_xop_vpermil2pd_rm(<2 x double> %a0, <2 x double> %a1, <2 x double>* %a2) {
; CHECK-LABEL: test_int_x86_xop_vpermil2pd_rm:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermil2pd $1, (%rdi), %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%vec = load <2 x double>, <2 x double>* %a2
@@ -31,7 +31,7 @@ declare <2 x double> @llvm.x86.xop.vpermil2pd(<2 x double>, <2 x double>, <2 x d
define <4 x double> @test_int_x86_xop_vpermil2pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) {
; CHECK-LABEL: test_int_x86_xop_vpermil2pd_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermil2pd $2, %ymm2, %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
%res = call <4 x double> @llvm.x86.xop.vpermil2pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 2) ;
@@ -39,7 +39,7 @@ define <4 x double> @test_int_x86_xop_vpermil2pd_256(<4 x double> %a0, <4 x doub
}
define <4 x double> @test_int_x86_xop_vpermil2pd_256_mr(<4 x double> %a0, <4 x double>* %a1, <4 x double> %a2) {
; CHECK-LABEL: test_int_x86_xop_vpermil2pd_256_mr:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermil2pd $2, %ymm1, (%rdi), %ymm0, %ymm0
; CHECK-NEXT: retq
%vec = load <4 x double>, <4 x double>* %a1
@@ -48,7 +48,7 @@ define <4 x double> @test_int_x86_xop_vpermil2pd_256_mr(<4 x double> %a0, <4 x d
}
define <4 x double> @test_int_x86_xop_vpermil2pd_256_rm(<4 x double> %a0, <4 x double> %a1, <4 x double>* %a2) {
; CHECK-LABEL: test_int_x86_xop_vpermil2pd_256_rm:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermil2pd $2, (%rdi), %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
%vec = load <4 x double>, <4 x double>* %a2
@@ -59,7 +59,7 @@ declare <4 x double> @llvm.x86.xop.vpermil2pd.256(<4 x double>, <4 x double>, <4
define <4 x float> @test_int_x86_xop_vpermil2ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) {
; CHECK-LABEL: test_int_x86_xop_vpermil2ps:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermil2ps $3, %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.xop.vpermil2ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 3) ;
@@ -69,7 +69,7 @@ declare <4 x float> @llvm.x86.xop.vpermil2ps(<4 x float>, <4 x float>, <4 x floa
define <8 x float> @test_int_x86_xop_vpermil2ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) {
; CHECK-LABEL: test_int_x86_xop_vpermil2ps_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermil2ps $4, %ymm2, %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
%res = call <8 x float> @llvm.x86.xop.vpermil2ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, i8 4) ;
@@ -79,7 +79,7 @@ declare <8 x float> @llvm.x86.xop.vpermil2ps.256(<8 x float>, <8 x float>, <8 x
define <16 x i8> @test_int_x86_xop_vpcomeqb(<16 x i8> %a0, <16 x i8> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomeqb:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomeqb %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <16 x i8> @llvm.x86.xop.vpcomeqb(<16 x i8> %a0, <16 x i8> %a1) ;
@@ -87,7 +87,7 @@ define <16 x i8> @test_int_x86_xop_vpcomeqb(<16 x i8> %a0, <16 x i8> %a1) {
}
define <16 x i8> @test_int_x86_xop_vpcomeqb_mem(<16 x i8> %a0, <16 x i8>* %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomeqb_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomeqb (%rdi), %xmm0, %xmm0
; CHECK-NEXT: retq
%vec = load <16 x i8>, <16 x i8>* %a1
@@ -98,7 +98,7 @@ declare <16 x i8> @llvm.x86.xop.vpcomeqb(<16 x i8>, <16 x i8>) nounwind readnone
define <8 x i16> @test_int_x86_xop_vpcomeqw(<8 x i16> %a0, <8 x i16> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomeqw:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomeqw %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <8 x i16> @llvm.x86.xop.vpcomeqw(<8 x i16> %a0, <8 x i16> %a1) ;
@@ -108,7 +108,7 @@ declare <8 x i16> @llvm.x86.xop.vpcomeqw(<8 x i16>, <8 x i16>) nounwind readnone
define <4 x i32> @test_int_x86_xop_vpcomeqd(<4 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomeqd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomeqd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.xop.vpcomeqd(<4 x i32> %a0, <4 x i32> %a1) ;
@@ -118,7 +118,7 @@ declare <4 x i32> @llvm.x86.xop.vpcomeqd(<4 x i32>, <4 x i32>) nounwind readnone
define <2 x i64> @test_int_x86_xop_vpcomeqq(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomeqq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomeqq %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x i64> @llvm.x86.xop.vpcomeqq(<2 x i64> %a0, <2 x i64> %a1) ;
@@ -128,7 +128,7 @@ declare <2 x i64> @llvm.x86.xop.vpcomeqq(<2 x i64>, <2 x i64>) nounwind readnone
define <16 x i8> @test_int_x86_xop_vpcomequb(<16 x i8> %a0, <16 x i8> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomequb:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomequb %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <16 x i8> @llvm.x86.xop.vpcomequb(<16 x i8> %a0, <16 x i8> %a1) ;
@@ -138,7 +138,7 @@ declare <16 x i8> @llvm.x86.xop.vpcomequb(<16 x i8>, <16 x i8>) nounwind readnon
define <4 x i32> @test_int_x86_xop_vpcomequd(<4 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomequd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomequd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.xop.vpcomequd(<4 x i32> %a0, <4 x i32> %a1) ;
@@ -148,7 +148,7 @@ declare <4 x i32> @llvm.x86.xop.vpcomequd(<4 x i32>, <4 x i32>) nounwind readnon
define <2 x i64> @test_int_x86_xop_vpcomequq(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomequq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomequq %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x i64> @llvm.x86.xop.vpcomequq(<2 x i64> %a0, <2 x i64> %a1) ;
@@ -158,7 +158,7 @@ declare <2 x i64> @llvm.x86.xop.vpcomequq(<2 x i64>, <2 x i64>) nounwind readnon
define <8 x i16> @test_int_x86_xop_vpcomequw(<8 x i16> %a0, <8 x i16> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomequw:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomequw %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <8 x i16> @llvm.x86.xop.vpcomequw(<8 x i16> %a0, <8 x i16> %a1) ;
@@ -168,7 +168,7 @@ declare <8 x i16> @llvm.x86.xop.vpcomequw(<8 x i16>, <8 x i16>) nounwind readnon
define <16 x i8> @test_int_x86_xop_vpcomfalseb(<16 x i8> %a0, <16 x i8> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomfalseb:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomfalseb %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <16 x i8> @llvm.x86.xop.vpcomfalseb(<16 x i8> %a0, <16 x i8> %a1) ;
@@ -178,7 +178,7 @@ declare <16 x i8> @llvm.x86.xop.vpcomfalseb(<16 x i8>, <16 x i8>) nounwind readn
define <4 x i32> @test_int_x86_xop_vpcomfalsed(<4 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomfalsed:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomfalsed %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.xop.vpcomfalsed(<4 x i32> %a0, <4 x i32> %a1) ;
@@ -188,7 +188,7 @@ declare <4 x i32> @llvm.x86.xop.vpcomfalsed(<4 x i32>, <4 x i32>) nounwind readn
define <2 x i64> @test_int_x86_xop_vpcomfalseq(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomfalseq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomfalseq %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x i64> @llvm.x86.xop.vpcomfalseq(<2 x i64> %a0, <2 x i64> %a1) ;
@@ -198,7 +198,7 @@ declare <2 x i64> @llvm.x86.xop.vpcomfalseq(<2 x i64>, <2 x i64>) nounwind readn
define <16 x i8> @test_int_x86_xop_vpcomfalseub(<16 x i8> %a0, <16 x i8> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomfalseub:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomfalseub %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <16 x i8> @llvm.x86.xop.vpcomfalseub(<16 x i8> %a0, <16 x i8> %a1) ;
@@ -208,7 +208,7 @@ declare <16 x i8> @llvm.x86.xop.vpcomfalseub(<16 x i8>, <16 x i8>) nounwind read
define <4 x i32> @test_int_x86_xop_vpcomfalseud(<4 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomfalseud:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomfalseud %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.xop.vpcomfalseud(<4 x i32> %a0, <4 x i32> %a1) ;
@@ -218,7 +218,7 @@ declare <4 x i32> @llvm.x86.xop.vpcomfalseud(<4 x i32>, <4 x i32>) nounwind read
define <2 x i64> @test_int_x86_xop_vpcomfalseuq(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomfalseuq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomfalseuq %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x i64> @llvm.x86.xop.vpcomfalseuq(<2 x i64> %a0, <2 x i64> %a1) ;
@@ -228,7 +228,7 @@ declare <2 x i64> @llvm.x86.xop.vpcomfalseuq(<2 x i64>, <2 x i64>) nounwind read
define <8 x i16> @test_int_x86_xop_vpcomfalseuw(<8 x i16> %a0, <8 x i16> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomfalseuw:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomfalseuw %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <8 x i16> @llvm.x86.xop.vpcomfalseuw(<8 x i16> %a0, <8 x i16> %a1) ;
@@ -238,7 +238,7 @@ declare <8 x i16> @llvm.x86.xop.vpcomfalseuw(<8 x i16>, <8 x i16>) nounwind read
define <8 x i16> @test_int_x86_xop_vpcomfalsew(<8 x i16> %a0, <8 x i16> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomfalsew:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomfalsew %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <8 x i16> @llvm.x86.xop.vpcomfalsew(<8 x i16> %a0, <8 x i16> %a1) ;
@@ -248,7 +248,7 @@ declare <8 x i16> @llvm.x86.xop.vpcomfalsew(<8 x i16>, <8 x i16>) nounwind readn
define <16 x i8> @test_int_x86_xop_vpcomgeb(<16 x i8> %a0, <16 x i8> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomgeb:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomgeb %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <16 x i8> @llvm.x86.xop.vpcomgeb(<16 x i8> %a0, <16 x i8> %a1) ;
@@ -258,7 +258,7 @@ declare <16 x i8> @llvm.x86.xop.vpcomgeb(<16 x i8>, <16 x i8>) nounwind readnone
define <4 x i32> @test_int_x86_xop_vpcomged(<4 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomged:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomged %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.xop.vpcomged(<4 x i32> %a0, <4 x i32> %a1) ;
@@ -268,7 +268,7 @@ declare <4 x i32> @llvm.x86.xop.vpcomged(<4 x i32>, <4 x i32>) nounwind readnone
define <2 x i64> @test_int_x86_xop_vpcomgeq(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomgeq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomgeq %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x i64> @llvm.x86.xop.vpcomgeq(<2 x i64> %a0, <2 x i64> %a1) ;
@@ -278,7 +278,7 @@ declare <2 x i64> @llvm.x86.xop.vpcomgeq(<2 x i64>, <2 x i64>) nounwind readnone
define <16 x i8> @test_int_x86_xop_vpcomgeub(<16 x i8> %a0, <16 x i8> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomgeub:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomgeub %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <16 x i8> @llvm.x86.xop.vpcomgeub(<16 x i8> %a0, <16 x i8> %a1) ;
@@ -288,7 +288,7 @@ declare <16 x i8> @llvm.x86.xop.vpcomgeub(<16 x i8>, <16 x i8>) nounwind readnon
define <4 x i32> @test_int_x86_xop_vpcomgeud(<4 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomgeud:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomgeud %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.xop.vpcomgeud(<4 x i32> %a0, <4 x i32> %a1) ;
@@ -298,7 +298,7 @@ declare <4 x i32> @llvm.x86.xop.vpcomgeud(<4 x i32>, <4 x i32>) nounwind readnon
define <2 x i64> @test_int_x86_xop_vpcomgeuq(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomgeuq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomgeuq %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x i64> @llvm.x86.xop.vpcomgeuq(<2 x i64> %a0, <2 x i64> %a1) ;
@@ -308,7 +308,7 @@ declare <2 x i64> @llvm.x86.xop.vpcomgeuq(<2 x i64>, <2 x i64>) nounwind readnon
define <8 x i16> @test_int_x86_xop_vpcomgeuw(<8 x i16> %a0, <8 x i16> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomgeuw:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomgeuw %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <8 x i16> @llvm.x86.xop.vpcomgeuw(<8 x i16> %a0, <8 x i16> %a1) ;
@@ -318,7 +318,7 @@ declare <8 x i16> @llvm.x86.xop.vpcomgeuw(<8 x i16>, <8 x i16>) nounwind readnon
define <8 x i16> @test_int_x86_xop_vpcomgew(<8 x i16> %a0, <8 x i16> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomgew:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomgew %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <8 x i16> @llvm.x86.xop.vpcomgew(<8 x i16> %a0, <8 x i16> %a1) ;
@@ -328,7 +328,7 @@ declare <8 x i16> @llvm.x86.xop.vpcomgew(<8 x i16>, <8 x i16>) nounwind readnone
define <16 x i8> @test_int_x86_xop_vpcomgtb(<16 x i8> %a0, <16 x i8> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomgtb:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomgtb %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <16 x i8> @llvm.x86.xop.vpcomgtb(<16 x i8> %a0, <16 x i8> %a1) ;
@@ -338,7 +338,7 @@ declare <16 x i8> @llvm.x86.xop.vpcomgtb(<16 x i8>, <16 x i8>) nounwind readnone
define <4 x i32> @test_int_x86_xop_vpcomgtd(<4 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomgtd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomgtd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.xop.vpcomgtd(<4 x i32> %a0, <4 x i32> %a1) ;
@@ -348,7 +348,7 @@ declare <4 x i32> @llvm.x86.xop.vpcomgtd(<4 x i32>, <4 x i32>) nounwind readnone
define <2 x i64> @test_int_x86_xop_vpcomgtq(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomgtq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomgtq %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x i64> @llvm.x86.xop.vpcomgtq(<2 x i64> %a0, <2 x i64> %a1) ;
@@ -358,7 +358,7 @@ declare <2 x i64> @llvm.x86.xop.vpcomgtq(<2 x i64>, <2 x i64>) nounwind readnone
define <16 x i8> @test_int_x86_xop_vpcomgtub(<16 x i8> %a0, <16 x i8> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomgtub:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomgtub %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <16 x i8> @llvm.x86.xop.vpcomgtub(<16 x i8> %a0, <16 x i8> %a1) ;
@@ -368,7 +368,7 @@ declare <16 x i8> @llvm.x86.xop.vpcomgtub(<16 x i8>, <16 x i8>) nounwind readnon
define <4 x i32> @test_int_x86_xop_vpcomgtud(<4 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomgtud:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomgtud %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.xop.vpcomgtud(<4 x i32> %a0, <4 x i32> %a1) ;
@@ -378,7 +378,7 @@ declare <4 x i32> @llvm.x86.xop.vpcomgtud(<4 x i32>, <4 x i32>) nounwind readnon
define <2 x i64> @test_int_x86_xop_vpcomgtuq(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomgtuq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomgtuq %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x i64> @llvm.x86.xop.vpcomgtuq(<2 x i64> %a0, <2 x i64> %a1) ;
@@ -388,7 +388,7 @@ declare <2 x i64> @llvm.x86.xop.vpcomgtuq(<2 x i64>, <2 x i64>) nounwind readnon
define <8 x i16> @test_int_x86_xop_vpcomgtuw(<8 x i16> %a0, <8 x i16> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomgtuw:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomgtuw %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <8 x i16> @llvm.x86.xop.vpcomgtuw(<8 x i16> %a0, <8 x i16> %a1) ;
@@ -398,7 +398,7 @@ declare <8 x i16> @llvm.x86.xop.vpcomgtuw(<8 x i16>, <8 x i16>) nounwind readnon
define <8 x i16> @test_int_x86_xop_vpcomgtw(<8 x i16> %a0, <8 x i16> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomgtw:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomgtw %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <8 x i16> @llvm.x86.xop.vpcomgtw(<8 x i16> %a0, <8 x i16> %a1) ;
@@ -408,7 +408,7 @@ declare <8 x i16> @llvm.x86.xop.vpcomgtw(<8 x i16>, <8 x i16>) nounwind readnone
define <16 x i8> @test_int_x86_xop_vpcomleb(<16 x i8> %a0, <16 x i8> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomleb:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomleb %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <16 x i8> @llvm.x86.xop.vpcomleb(<16 x i8> %a0, <16 x i8> %a1) ;
@@ -418,7 +418,7 @@ declare <16 x i8> @llvm.x86.xop.vpcomleb(<16 x i8>, <16 x i8>) nounwind readnone
define <4 x i32> @test_int_x86_xop_vpcomled(<4 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomled:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomled %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.xop.vpcomled(<4 x i32> %a0, <4 x i32> %a1) ;
@@ -428,7 +428,7 @@ declare <4 x i32> @llvm.x86.xop.vpcomled(<4 x i32>, <4 x i32>) nounwind readnone
define <2 x i64> @test_int_x86_xop_vpcomleq(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomleq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomleq %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x i64> @llvm.x86.xop.vpcomleq(<2 x i64> %a0, <2 x i64> %a1) ;
@@ -438,7 +438,7 @@ declare <2 x i64> @llvm.x86.xop.vpcomleq(<2 x i64>, <2 x i64>) nounwind readnone
define <16 x i8> @test_int_x86_xop_vpcomleub(<16 x i8> %a0, <16 x i8> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomleub:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomleub %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <16 x i8> @llvm.x86.xop.vpcomleub(<16 x i8> %a0, <16 x i8> %a1) ;
@@ -448,7 +448,7 @@ declare <16 x i8> @llvm.x86.xop.vpcomleub(<16 x i8>, <16 x i8>) nounwind readnon
define <4 x i32> @test_int_x86_xop_vpcomleud(<4 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomleud:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomleud %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.xop.vpcomleud(<4 x i32> %a0, <4 x i32> %a1) ;
@@ -458,7 +458,7 @@ declare <4 x i32> @llvm.x86.xop.vpcomleud(<4 x i32>, <4 x i32>) nounwind readnon
define <2 x i64> @test_int_x86_xop_vpcomleuq(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomleuq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomleuq %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x i64> @llvm.x86.xop.vpcomleuq(<2 x i64> %a0, <2 x i64> %a1) ;
@@ -468,7 +468,7 @@ declare <2 x i64> @llvm.x86.xop.vpcomleuq(<2 x i64>, <2 x i64>) nounwind readnon
define <8 x i16> @test_int_x86_xop_vpcomleuw(<8 x i16> %a0, <8 x i16> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomleuw:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomleuw %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <8 x i16> @llvm.x86.xop.vpcomleuw(<8 x i16> %a0, <8 x i16> %a1) ;
@@ -478,7 +478,7 @@ declare <8 x i16> @llvm.x86.xop.vpcomleuw(<8 x i16>, <8 x i16>) nounwind readnon
define <8 x i16> @test_int_x86_xop_vpcomlew(<8 x i16> %a0, <8 x i16> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomlew:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomlew %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <8 x i16> @llvm.x86.xop.vpcomlew(<8 x i16> %a0, <8 x i16> %a1) ;
@@ -488,7 +488,7 @@ declare <8 x i16> @llvm.x86.xop.vpcomlew(<8 x i16>, <8 x i16>) nounwind readnone
define <16 x i8> @test_int_x86_xop_vpcomltb(<16 x i8> %a0, <16 x i8> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomltb:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomltb %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <16 x i8> @llvm.x86.xop.vpcomltb(<16 x i8> %a0, <16 x i8> %a1) ;
@@ -498,7 +498,7 @@ declare <16 x i8> @llvm.x86.xop.vpcomltb(<16 x i8>, <16 x i8>) nounwind readnone
define <4 x i32> @test_int_x86_xop_vpcomltd(<4 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomltd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomltd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.xop.vpcomltd(<4 x i32> %a0, <4 x i32> %a1) ;
@@ -508,7 +508,7 @@ declare <4 x i32> @llvm.x86.xop.vpcomltd(<4 x i32>, <4 x i32>) nounwind readnone
define <2 x i64> @test_int_x86_xop_vpcomltq(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomltq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomltq %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x i64> @llvm.x86.xop.vpcomltq(<2 x i64> %a0, <2 x i64> %a1) ;
@@ -518,7 +518,7 @@ declare <2 x i64> @llvm.x86.xop.vpcomltq(<2 x i64>, <2 x i64>) nounwind readnone
define <16 x i8> @test_int_x86_xop_vpcomltub(<16 x i8> %a0, <16 x i8> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomltub:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomltub %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <16 x i8> @llvm.x86.xop.vpcomltub(<16 x i8> %a0, <16 x i8> %a1) ;
@@ -528,7 +528,7 @@ declare <16 x i8> @llvm.x86.xop.vpcomltub(<16 x i8>, <16 x i8>) nounwind readnon
define <4 x i32> @test_int_x86_xop_vpcomltud(<4 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomltud:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomltud %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.xop.vpcomltud(<4 x i32> %a0, <4 x i32> %a1) ;
@@ -538,7 +538,7 @@ declare <4 x i32> @llvm.x86.xop.vpcomltud(<4 x i32>, <4 x i32>) nounwind readnon
define <2 x i64> @test_int_x86_xop_vpcomltuq(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomltuq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomltuq %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x i64> @llvm.x86.xop.vpcomltuq(<2 x i64> %a0, <2 x i64> %a1) ;
@@ -548,7 +548,7 @@ declare <2 x i64> @llvm.x86.xop.vpcomltuq(<2 x i64>, <2 x i64>) nounwind readnon
define <8 x i16> @test_int_x86_xop_vpcomltuw(<8 x i16> %a0, <8 x i16> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomltuw:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomltuw %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <8 x i16> @llvm.x86.xop.vpcomltuw(<8 x i16> %a0, <8 x i16> %a1) ;
@@ -558,7 +558,7 @@ declare <8 x i16> @llvm.x86.xop.vpcomltuw(<8 x i16>, <8 x i16>) nounwind readnon
define <8 x i16> @test_int_x86_xop_vpcomltw(<8 x i16> %a0, <8 x i16> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomltw:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomltw %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <8 x i16> @llvm.x86.xop.vpcomltw(<8 x i16> %a0, <8 x i16> %a1) ;
@@ -568,7 +568,7 @@ declare <8 x i16> @llvm.x86.xop.vpcomltw(<8 x i16>, <8 x i16>) nounwind readnone
define <16 x i8> @test_int_x86_xop_vpcomneb(<16 x i8> %a0, <16 x i8> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomneb:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomneqb %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <16 x i8> @llvm.x86.xop.vpcomneb(<16 x i8> %a0, <16 x i8> %a1) ;
@@ -578,7 +578,7 @@ declare <16 x i8> @llvm.x86.xop.vpcomneb(<16 x i8>, <16 x i8>) nounwind readnone
define <4 x i32> @test_int_x86_xop_vpcomned(<4 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomned:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomneqd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.xop.vpcomned(<4 x i32> %a0, <4 x i32> %a1) ;
@@ -588,7 +588,7 @@ declare <4 x i32> @llvm.x86.xop.vpcomned(<4 x i32>, <4 x i32>) nounwind readnone
define <2 x i64> @test_int_x86_xop_vpcomneq(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomneq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomneqq %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x i64> @llvm.x86.xop.vpcomneq(<2 x i64> %a0, <2 x i64> %a1) ;
@@ -598,7 +598,7 @@ declare <2 x i64> @llvm.x86.xop.vpcomneq(<2 x i64>, <2 x i64>) nounwind readnone
define <16 x i8> @test_int_x86_xop_vpcomneub(<16 x i8> %a0, <16 x i8> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomneub:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomnequb %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <16 x i8> @llvm.x86.xop.vpcomneub(<16 x i8> %a0, <16 x i8> %a1) ;
@@ -608,7 +608,7 @@ declare <16 x i8> @llvm.x86.xop.vpcomneub(<16 x i8>, <16 x i8>) nounwind readnon
define <4 x i32> @test_int_x86_xop_vpcomneud(<4 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomneud:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomnequd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.xop.vpcomneud(<4 x i32> %a0, <4 x i32> %a1) ;
@@ -618,7 +618,7 @@ declare <4 x i32> @llvm.x86.xop.vpcomneud(<4 x i32>, <4 x i32>) nounwind readnon
define <2 x i64> @test_int_x86_xop_vpcomneuq(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomneuq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomnequq %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x i64> @llvm.x86.xop.vpcomneuq(<2 x i64> %a0, <2 x i64> %a1) ;
@@ -628,7 +628,7 @@ declare <2 x i64> @llvm.x86.xop.vpcomneuq(<2 x i64>, <2 x i64>) nounwind readnon
define <8 x i16> @test_int_x86_xop_vpcomneuw(<8 x i16> %a0, <8 x i16> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomneuw:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomnequw %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <8 x i16> @llvm.x86.xop.vpcomneuw(<8 x i16> %a0, <8 x i16> %a1) ;
@@ -638,7 +638,7 @@ declare <8 x i16> @llvm.x86.xop.vpcomneuw(<8 x i16>, <8 x i16>) nounwind readnon
define <8 x i16> @test_int_x86_xop_vpcomnew(<8 x i16> %a0, <8 x i16> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomnew:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomneqw %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <8 x i16> @llvm.x86.xop.vpcomnew(<8 x i16> %a0, <8 x i16> %a1) ;
@@ -648,7 +648,7 @@ declare <8 x i16> @llvm.x86.xop.vpcomnew(<8 x i16>, <8 x i16>) nounwind readnone
define <16 x i8> @test_int_x86_xop_vpcomtrueb(<16 x i8> %a0, <16 x i8> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomtrueb:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomtrueb %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <16 x i8> @llvm.x86.xop.vpcomtrueb(<16 x i8> %a0, <16 x i8> %a1) ;
@@ -658,7 +658,7 @@ declare <16 x i8> @llvm.x86.xop.vpcomtrueb(<16 x i8>, <16 x i8>) nounwind readno
define <4 x i32> @test_int_x86_xop_vpcomtrued(<4 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomtrued:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomtrued %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.xop.vpcomtrued(<4 x i32> %a0, <4 x i32> %a1) ;
@@ -668,7 +668,7 @@ declare <4 x i32> @llvm.x86.xop.vpcomtrued(<4 x i32>, <4 x i32>) nounwind readno
define <2 x i64> @test_int_x86_xop_vpcomtrueq(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomtrueq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomtrueq %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x i64> @llvm.x86.xop.vpcomtrueq(<2 x i64> %a0, <2 x i64> %a1) ;
@@ -678,7 +678,7 @@ declare <2 x i64> @llvm.x86.xop.vpcomtrueq(<2 x i64>, <2 x i64>) nounwind readno
define <16 x i8> @test_int_x86_xop_vpcomtrueub(<16 x i8> %a0, <16 x i8> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomtrueub:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomtrueub %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <16 x i8> @llvm.x86.xop.vpcomtrueub(<16 x i8> %a0, <16 x i8> %a1) ;
@@ -688,7 +688,7 @@ declare <16 x i8> @llvm.x86.xop.vpcomtrueub(<16 x i8>, <16 x i8>) nounwind readn
define <4 x i32> @test_int_x86_xop_vpcomtrueud(<4 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomtrueud:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomtrueud %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.xop.vpcomtrueud(<4 x i32> %a0, <4 x i32> %a1) ;
@@ -698,7 +698,7 @@ declare <4 x i32> @llvm.x86.xop.vpcomtrueud(<4 x i32>, <4 x i32>) nounwind readn
define <2 x i64> @test_int_x86_xop_vpcomtrueuq(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomtrueuq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomtrueuq %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x i64> @llvm.x86.xop.vpcomtrueuq(<2 x i64> %a0, <2 x i64> %a1) ;
@@ -708,7 +708,7 @@ declare <2 x i64> @llvm.x86.xop.vpcomtrueuq(<2 x i64>, <2 x i64>) nounwind readn
define <8 x i16> @test_int_x86_xop_vpcomtrueuw(<8 x i16> %a0, <8 x i16> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomtrueuw:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomtrueuw %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <8 x i16> @llvm.x86.xop.vpcomtrueuw(<8 x i16> %a0, <8 x i16> %a1) ;
@@ -718,7 +718,7 @@ declare <8 x i16> @llvm.x86.xop.vpcomtrueuw(<8 x i16>, <8 x i16>) nounwind readn
define <8 x i16> @test_int_x86_xop_vpcomtruew(<8 x i16> %a0, <8 x i16> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomtruew:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomtruew %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <8 x i16> @llvm.x86.xop.vpcomtruew(<8 x i16> %a0, <8 x i16> %a1) ;
@@ -728,7 +728,7 @@ declare <8 x i16> @llvm.x86.xop.vpcomtruew(<8 x i16>, <8 x i16>) nounwind readno
define <2 x i64> @test_int_x86_xop_vpcmov(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2) {
; CHECK-LABEL: test_int_x86_xop_vpcmov:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmov %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x i64> @llvm.x86.xop.vpcmov(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2) ;
@@ -738,7 +738,7 @@ declare <2 x i64> @llvm.x86.xop.vpcmov(<2 x i64>, <2 x i64>, <2 x i64>) nounwind
define <4 x i64> @test_int_x86_xop_vpcmov_256(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> %a2) {
; CHECK-LABEL: test_int_x86_xop_vpcmov_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmov %ymm2, %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
%res = call <4 x i64> @llvm.x86.xop.vpcmov.256(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> %a2) ;
@@ -746,7 +746,7 @@ define <4 x i64> @test_int_x86_xop_vpcmov_256(<4 x i64> %a0, <4 x i64> %a1, <4 x
}
define <4 x i64> @test_int_x86_xop_vpcmov_256_mr(<4 x i64> %a0, <4 x i64>* %a1, <4 x i64> %a2) {
; CHECK-LABEL: test_int_x86_xop_vpcmov_256_mr:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmov %ymm1, (%rdi), %ymm0, %ymm0
; CHECK-NEXT: retq
%vec = load <4 x i64>, <4 x i64>* %a1
@@ -755,7 +755,7 @@ define <4 x i64> @test_int_x86_xop_vpcmov_256_mr(<4 x i64> %a0, <4 x i64>* %a1,
}
define <4 x i64> @test_int_x86_xop_vpcmov_256_rm(<4 x i64> %a0, <4 x i64> %a1, <4 x i64>* %a2) {
; CHECK-LABEL: test_int_x86_xop_vpcmov_256_rm:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmov (%rdi), %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
%vec = load <4 x i64>, <4 x i64>* %a2
diff --git a/test/CodeGen/X86/xop-intrinsics-x86_64.ll b/test/CodeGen/X86/xop-intrinsics-x86_64.ll
index 76286a26ffa..d4c5420f20d 100644
--- a/test/CodeGen/X86/xop-intrinsics-x86_64.ll
+++ b/test/CodeGen/X86/xop-intrinsics-x86_64.ll
@@ -3,7 +3,7 @@
define <2 x double> @test_int_x86_xop_vpermil2pd(<2 x double> %a0, <2 x double> %a1, <2 x i64> %a2) {
; CHECK-LABEL: test_int_x86_xop_vpermil2pd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermil2pd $1, %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x double> @llvm.x86.xop.vpermil2pd(<2 x double> %a0, <2 x double> %a1, <2 x i64> %a2, i8 1) ; [#uses=1]
@@ -11,7 +11,7 @@ define <2 x double> @test_int_x86_xop_vpermil2pd(<2 x double> %a0, <2 x double>
}
define <2 x double> @test_int_x86_xop_vpermil2pd_mr(<2 x double> %a0, <2 x double>* %a1, <2 x i64> %a2) {
; CHECK-LABEL: test_int_x86_xop_vpermil2pd_mr:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermil2pd $1, %xmm1, (%rdi), %xmm0, %xmm0
; CHECK-NEXT: retq
%vec = load <2 x double>, <2 x double>* %a1
@@ -20,7 +20,7 @@ define <2 x double> @test_int_x86_xop_vpermil2pd_mr(<2 x double> %a0, <2 x doubl
}
define <2 x double> @test_int_x86_xop_vpermil2pd_rm(<2 x double> %a0, <2 x double> %a1, <2 x i64>* %a2) {
; CHECK-LABEL: test_int_x86_xop_vpermil2pd_rm:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermil2pd $1, (%rdi), %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%vec = load <2 x i64>, <2 x i64>* %a2
@@ -31,7 +31,7 @@ declare <2 x double> @llvm.x86.xop.vpermil2pd(<2 x double>, <2 x double>, <2 x i
define <4 x double> @test_int_x86_xop_vpermil2pd_256(<4 x double> %a0, <4 x double> %a1, <4 x i64> %a2) {
; CHECK-LABEL: test_int_x86_xop_vpermil2pd_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermil2pd $2, %ymm2, %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
%res = call <4 x double> @llvm.x86.xop.vpermil2pd.256(<4 x double> %a0, <4 x double> %a1, <4 x i64> %a2, i8 2) ;
@@ -39,7 +39,7 @@ define <4 x double> @test_int_x86_xop_vpermil2pd_256(<4 x double> %a0, <4 x doub
}
define <4 x double> @test_int_x86_xop_vpermil2pd_256_mr(<4 x double> %a0, <4 x double>* %a1, <4 x i64> %a2) {
; CHECK-LABEL: test_int_x86_xop_vpermil2pd_256_mr:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermil2pd $2, %ymm1, (%rdi), %ymm0, %ymm0
; CHECK-NEXT: retq
%vec = load <4 x double>, <4 x double>* %a1
@@ -48,7 +48,7 @@ define <4 x double> @test_int_x86_xop_vpermil2pd_256_mr(<4 x double> %a0, <4 x d
}
define <4 x double> @test_int_x86_xop_vpermil2pd_256_rm(<4 x double> %a0, <4 x double> %a1, <4 x i64>* %a2) {
; CHECK-LABEL: test_int_x86_xop_vpermil2pd_256_rm:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermil2pd $2, (%rdi), %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
%vec = load <4 x i64>, <4 x i64>* %a2
@@ -59,7 +59,7 @@ declare <4 x double> @llvm.x86.xop.vpermil2pd.256(<4 x double>, <4 x double>, <4
define <4 x float> @test_int_x86_xop_vpermil2ps(<4 x float> %a0, <4 x float> %a1, <4 x i32> %a2) {
; CHECK-LABEL: test_int_x86_xop_vpermil2ps:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermil2ps $3, %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.xop.vpermil2ps(<4 x float> %a0, <4 x float> %a1, <4 x i32> %a2, i8 3) ;
@@ -69,7 +69,7 @@ declare <4 x float> @llvm.x86.xop.vpermil2ps(<4 x float>, <4 x float>, <4 x i32>
define <8 x float> @test_int_x86_xop_vpermil2ps_256(<8 x float> %a0, <8 x float> %a1, <8 x i32> %a2) {
; CHECK-LABEL: test_int_x86_xop_vpermil2ps_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermil2ps $4, %ymm2, %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
%res = call <8 x float> @llvm.x86.xop.vpermil2ps.256(<8 x float> %a0, <8 x float> %a1, <8 x i32> %a2, i8 4) ;
@@ -79,7 +79,7 @@ declare <8 x float> @llvm.x86.xop.vpermil2ps.256(<8 x float>, <8 x float>, <8 x
define <2 x i64> @test_int_x86_xop_vpcmov(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2) {
; CHECK-LABEL: test_int_x86_xop_vpcmov:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmov %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%1 = xor <2 x i64> %a2, <i64 -1, i64 -1>
@@ -91,7 +91,7 @@ define <2 x i64> @test_int_x86_xop_vpcmov(<2 x i64> %a0, <2 x i64> %a1, <2 x i64
define <4 x i64> @test_int_x86_xop_vpcmov_256(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> %a2) {
; CHECK-LABEL: test_int_x86_xop_vpcmov_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmov %ymm2, %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
%1 = xor <4 x i64> %a2, <i64 -1, i64 -1, i64 -1, i64 -1>
@@ -102,7 +102,7 @@ define <4 x i64> @test_int_x86_xop_vpcmov_256(<4 x i64> %a0, <4 x i64> %a1, <4 x
}
define <4 x i64> @test_int_x86_xop_vpcmov_256_mr(<4 x i64> %a0, <4 x i64>* %a1, <4 x i64> %a2) {
; CHECK-LABEL: test_int_x86_xop_vpcmov_256_mr:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmov %ymm1, (%rdi), %ymm0, %ymm0
; CHECK-NEXT: retq
%vec = load <4 x i64>, <4 x i64>* %a1
@@ -114,7 +114,7 @@ define <4 x i64> @test_int_x86_xop_vpcmov_256_mr(<4 x i64> %a0, <4 x i64>* %a1,
}
define <4 x i64> @test_int_x86_xop_vpcmov_256_rm(<4 x i64> %a0, <4 x i64> %a1, <4 x i64>* %a2) {
; CHECK-LABEL: test_int_x86_xop_vpcmov_256_rm:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmov (%rdi), %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
%vec = load <4 x i64>, <4 x i64>* %a2
@@ -127,7 +127,7 @@ define <4 x i64> @test_int_x86_xop_vpcmov_256_rm(<4 x i64> %a0, <4 x i64> %a1, <
define <4 x i32> @test_int_x86_xop_vphaddbd(<16 x i8> %a0) {
; CHECK-LABEL: test_int_x86_xop_vphaddbd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vphaddbd %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.xop.vphaddbd(<16 x i8> %a0) ;
@@ -137,7 +137,7 @@ declare <4 x i32> @llvm.x86.xop.vphaddbd(<16 x i8>) nounwind readnone
define <2 x i64> @test_int_x86_xop_vphaddbq(<16 x i8> %a0) {
; CHECK-LABEL: test_int_x86_xop_vphaddbq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vphaddbq %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x i64> @llvm.x86.xop.vphaddbq(<16 x i8> %a0) ;
@@ -147,7 +147,7 @@ declare <2 x i64> @llvm.x86.xop.vphaddbq(<16 x i8>) nounwind readnone
define <8 x i16> @test_int_x86_xop_vphaddbw(<16 x i8> %a0) {
; CHECK-LABEL: test_int_x86_xop_vphaddbw:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vphaddbw %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <8 x i16> @llvm.x86.xop.vphaddbw(<16 x i8> %a0) ;
@@ -157,7 +157,7 @@ declare <8 x i16> @llvm.x86.xop.vphaddbw(<16 x i8>) nounwind readnone
define <2 x i64> @test_int_x86_xop_vphadddq(<4 x i32> %a0) {
; CHECK-LABEL: test_int_x86_xop_vphadddq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vphadddq %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x i64> @llvm.x86.xop.vphadddq(<4 x i32> %a0) ;
@@ -167,7 +167,7 @@ declare <2 x i64> @llvm.x86.xop.vphadddq(<4 x i32>) nounwind readnone
define <4 x i32> @test_int_x86_xop_vphaddubd(<16 x i8> %a0) {
; CHECK-LABEL: test_int_x86_xop_vphaddubd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vphaddubd %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.xop.vphaddubd(<16 x i8> %a0) ;
@@ -177,7 +177,7 @@ declare <4 x i32> @llvm.x86.xop.vphaddubd(<16 x i8>) nounwind readnone
define <2 x i64> @test_int_x86_xop_vphaddubq(<16 x i8> %a0) {
; CHECK-LABEL: test_int_x86_xop_vphaddubq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vphaddubq %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x i64> @llvm.x86.xop.vphaddubq(<16 x i8> %a0) ;
@@ -187,7 +187,7 @@ declare <2 x i64> @llvm.x86.xop.vphaddubq(<16 x i8>) nounwind readnone
define <8 x i16> @test_int_x86_xop_vphaddubw(<16 x i8> %a0) {
; CHECK-LABEL: test_int_x86_xop_vphaddubw:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vphaddubw %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <8 x i16> @llvm.x86.xop.vphaddubw(<16 x i8> %a0) ;
@@ -197,7 +197,7 @@ declare <8 x i16> @llvm.x86.xop.vphaddubw(<16 x i8>) nounwind readnone
define <2 x i64> @test_int_x86_xop_vphaddudq(<4 x i32> %a0) {
; CHECK-LABEL: test_int_x86_xop_vphaddudq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vphaddudq %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x i64> @llvm.x86.xop.vphaddudq(<4 x i32> %a0) ;
@@ -207,7 +207,7 @@ declare <2 x i64> @llvm.x86.xop.vphaddudq(<4 x i32>) nounwind readnone
define <4 x i32> @test_int_x86_xop_vphadduwd(<8 x i16> %a0) {
; CHECK-LABEL: test_int_x86_xop_vphadduwd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vphadduwd %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.xop.vphadduwd(<8 x i16> %a0) ;
@@ -217,7 +217,7 @@ declare <4 x i32> @llvm.x86.xop.vphadduwd(<8 x i16>) nounwind readnone
define <2 x i64> @test_int_x86_xop_vphadduwq(<8 x i16> %a0) {
; CHECK-LABEL: test_int_x86_xop_vphadduwq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vphadduwq %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x i64> @llvm.x86.xop.vphadduwq(<8 x i16> %a0) ;
@@ -227,7 +227,7 @@ declare <2 x i64> @llvm.x86.xop.vphadduwq(<8 x i16>) nounwind readnone
define <4 x i32> @test_int_x86_xop_vphaddwd(<8 x i16> %a0) {
; CHECK-LABEL: test_int_x86_xop_vphaddwd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vphaddwd %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.xop.vphaddwd(<8 x i16> %a0) ;
@@ -237,7 +237,7 @@ declare <4 x i32> @llvm.x86.xop.vphaddwd(<8 x i16>) nounwind readnone
define <2 x i64> @test_int_x86_xop_vphaddwq(<8 x i16> %a0) {
; CHECK-LABEL: test_int_x86_xop_vphaddwq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vphaddwq %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x i64> @llvm.x86.xop.vphaddwq(<8 x i16> %a0) ;
@@ -247,7 +247,7 @@ declare <2 x i64> @llvm.x86.xop.vphaddwq(<8 x i16>) nounwind readnone
define <8 x i16> @test_int_x86_xop_vphsubbw(<16 x i8> %a0) {
; CHECK-LABEL: test_int_x86_xop_vphsubbw:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vphsubbw %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <8 x i16> @llvm.x86.xop.vphsubbw(<16 x i8> %a0) ;
@@ -257,7 +257,7 @@ declare <8 x i16> @llvm.x86.xop.vphsubbw(<16 x i8>) nounwind readnone
define <2 x i64> @test_int_x86_xop_vphsubdq(<4 x i32> %a0) {
; CHECK-LABEL: test_int_x86_xop_vphsubdq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vphsubdq %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x i64> @llvm.x86.xop.vphsubdq(<4 x i32> %a0) ;
@@ -265,7 +265,7 @@ define <2 x i64> @test_int_x86_xop_vphsubdq(<4 x i32> %a0) {
}
define <2 x i64> @test_int_x86_xop_vphsubdq_mem(<4 x i32>* %a0) {
; CHECK-LABEL: test_int_x86_xop_vphsubdq_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vphsubdq (%rdi), %xmm0
; CHECK-NEXT: retq
%vec = load <4 x i32>, <4 x i32>* %a0
@@ -276,7 +276,7 @@ declare <2 x i64> @llvm.x86.xop.vphsubdq(<4 x i32>) nounwind readnone
define <4 x i32> @test_int_x86_xop_vphsubwd(<8 x i16> %a0) {
; CHECK-LABEL: test_int_x86_xop_vphsubwd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vphsubwd %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.xop.vphsubwd(<8 x i16> %a0) ;
@@ -284,7 +284,7 @@ define <4 x i32> @test_int_x86_xop_vphsubwd(<8 x i16> %a0) {
}
define <4 x i32> @test_int_x86_xop_vphsubwd_mem(<8 x i16>* %a0) {
; CHECK-LABEL: test_int_x86_xop_vphsubwd_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vphsubwd (%rdi), %xmm0
; CHECK-NEXT: retq
%vec = load <8 x i16>, <8 x i16>* %a0
@@ -295,7 +295,7 @@ declare <4 x i32> @llvm.x86.xop.vphsubwd(<8 x i16>) nounwind readnone
define <4 x i32> @test_int_x86_xop_vpmacsdd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) {
; CHECK-LABEL: test_int_x86_xop_vpmacsdd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmacsdd %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.xop.vpmacsdd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) ;
@@ -305,7 +305,7 @@ declare <4 x i32> @llvm.x86.xop.vpmacsdd(<4 x i32>, <4 x i32>, <4 x i32>) nounwi
define <2 x i64> @test_int_x86_xop_vpmacsdqh(<4 x i32> %a0, <4 x i32> %a1, <2 x i64> %a2) {
; CHECK-LABEL: test_int_x86_xop_vpmacsdqh:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmacsdqh %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x i64> @llvm.x86.xop.vpmacsdqh(<4 x i32> %a0, <4 x i32> %a1, <2 x i64> %a2) ;
@@ -315,7 +315,7 @@ declare <2 x i64> @llvm.x86.xop.vpmacsdqh(<4 x i32>, <4 x i32>, <2 x i64>) nounw
define <2 x i64> @test_int_x86_xop_vpmacsdql(<4 x i32> %a0, <4 x i32> %a1, <2 x i64> %a2) {
; CHECK-LABEL: test_int_x86_xop_vpmacsdql:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmacsdql %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x i64> @llvm.x86.xop.vpmacsdql(<4 x i32> %a0, <4 x i32> %a1, <2 x i64> %a2) ;
@@ -325,7 +325,7 @@ declare <2 x i64> @llvm.x86.xop.vpmacsdql(<4 x i32>, <4 x i32>, <2 x i64>) nounw
define <4 x i32> @test_int_x86_xop_vpmacssdd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) {
; CHECK-LABEL: test_int_x86_xop_vpmacssdd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmacssdd %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.xop.vpmacssdd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) ;
@@ -335,7 +335,7 @@ declare <4 x i32> @llvm.x86.xop.vpmacssdd(<4 x i32>, <4 x i32>, <4 x i32>) nounw
define <2 x i64> @test_int_x86_xop_vpmacssdqh(<4 x i32> %a0, <4 x i32> %a1, <2 x i64> %a2) {
; CHECK-LABEL: test_int_x86_xop_vpmacssdqh:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmacssdqh %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x i64> @llvm.x86.xop.vpmacssdqh(<4 x i32> %a0, <4 x i32> %a1, <2 x i64> %a2) ;
@@ -345,7 +345,7 @@ declare <2 x i64> @llvm.x86.xop.vpmacssdqh(<4 x i32>, <4 x i32>, <2 x i64>) noun
define <2 x i64> @test_int_x86_xop_vpmacssdql(<4 x i32> %a0, <4 x i32> %a1, <2 x i64> %a2) {
; CHECK-LABEL: test_int_x86_xop_vpmacssdql:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmacssdql %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x i64> @llvm.x86.xop.vpmacssdql(<4 x i32> %a0, <4 x i32> %a1, <2 x i64> %a2) ;
@@ -355,7 +355,7 @@ declare <2 x i64> @llvm.x86.xop.vpmacssdql(<4 x i32>, <4 x i32>, <2 x i64>) noun
define <4 x i32> @test_int_x86_xop_vpmacsswd(<8 x i16> %a0, <8 x i16> %a1, <4 x i32> %a2) {
; CHECK-LABEL: test_int_x86_xop_vpmacsswd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmacsswd %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.xop.vpmacsswd(<8 x i16> %a0, <8 x i16> %a1, <4 x i32> %a2) ;
@@ -365,7 +365,7 @@ declare <4 x i32> @llvm.x86.xop.vpmacsswd(<8 x i16>, <8 x i16>, <4 x i32>) nounw
define <8 x i16> @test_int_x86_xop_vpmacssww(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> %a2) {
; CHECK-LABEL: test_int_x86_xop_vpmacssww:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmacssww %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <8 x i16> @llvm.x86.xop.vpmacssww(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> %a2) ;
@@ -375,7 +375,7 @@ declare <8 x i16> @llvm.x86.xop.vpmacssww(<8 x i16>, <8 x i16>, <8 x i16>) nounw
define <4 x i32> @test_int_x86_xop_vpmacswd(<8 x i16> %a0, <8 x i16> %a1, <4 x i32> %a2) {
; CHECK-LABEL: test_int_x86_xop_vpmacswd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmacswd %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.xop.vpmacswd(<8 x i16> %a0, <8 x i16> %a1, <4 x i32> %a2) ;
@@ -385,7 +385,7 @@ declare <4 x i32> @llvm.x86.xop.vpmacswd(<8 x i16>, <8 x i16>, <4 x i32>) nounwi
define <8 x i16> @test_int_x86_xop_vpmacsww(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> %a2) {
; CHECK-LABEL: test_int_x86_xop_vpmacsww:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmacsww %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <8 x i16> @llvm.x86.xop.vpmacsww(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> %a2) ;
@@ -395,7 +395,7 @@ declare <8 x i16> @llvm.x86.xop.vpmacsww(<8 x i16>, <8 x i16>, <8 x i16>) nounwi
define <4 x i32> @test_int_x86_xop_vpmadcsswd(<8 x i16> %a0, <8 x i16> %a1, <4 x i32> %a2) {
; CHECK-LABEL: test_int_x86_xop_vpmadcsswd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmadcsswd %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.xop.vpmadcsswd(<8 x i16> %a0, <8 x i16> %a1, <4 x i32> %a2) ;
@@ -405,7 +405,7 @@ declare <4 x i32> @llvm.x86.xop.vpmadcsswd(<8 x i16>, <8 x i16>, <4 x i32>) noun
define <4 x i32> @test_int_x86_xop_vpmadcswd(<8 x i16> %a0, <8 x i16> %a1, <4 x i32> %a2) {
; CHECK-LABEL: test_int_x86_xop_vpmadcswd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmadcswd %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.xop.vpmadcswd(<8 x i16> %a0, <8 x i16> %a1, <4 x i32> %a2) ;
@@ -413,7 +413,7 @@ define <4 x i32> @test_int_x86_xop_vpmadcswd(<8 x i16> %a0, <8 x i16> %a1, <4 x
}
define <4 x i32> @test_int_x86_xop_vpmadcswd_mem(<8 x i16> %a0, <8 x i16>* %a1, <4 x i32> %a2) {
; CHECK-LABEL: test_int_x86_xop_vpmadcswd_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmadcswd %xmm1, (%rdi), %xmm0, %xmm0
; CHECK-NEXT: retq
%vec = load <8 x i16>, <8 x i16>* %a1
@@ -424,7 +424,7 @@ declare <4 x i32> @llvm.x86.xop.vpmadcswd(<8 x i16>, <8 x i16>, <4 x i32>) nounw
define <16 x i8> @test_int_x86_xop_vpperm(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %a2) {
; CHECK-LABEL: test_int_x86_xop_vpperm:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpperm %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <16 x i8> @llvm.x86.xop.vpperm(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %a2) ;
@@ -432,7 +432,7 @@ define <16 x i8> @test_int_x86_xop_vpperm(<16 x i8> %a0, <16 x i8> %a1, <16 x i8
}
define <16 x i8> @test_int_x86_xop_vpperm_rm(<16 x i8> %a0, <16 x i8> %a1, <16 x i8>* %a2) {
; CHECK-LABEL: test_int_x86_xop_vpperm_rm:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpperm (%rdi), %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%vec = load <16 x i8>, <16 x i8>* %a2
@@ -441,7 +441,7 @@ define <16 x i8> @test_int_x86_xop_vpperm_rm(<16 x i8> %a0, <16 x i8> %a1, <16 x
}
define <16 x i8> @test_int_x86_xop_vpperm_mr(<16 x i8> %a0, <16 x i8>* %a1, <16 x i8> %a2) {
; CHECK-LABEL: test_int_x86_xop_vpperm_mr:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpperm %xmm1, (%rdi), %xmm0, %xmm0
; CHECK-NEXT: retq
%vec = load <16 x i8>, <16 x i8>* %a1
@@ -452,7 +452,7 @@ declare <16 x i8> @llvm.x86.xop.vpperm(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
define <16 x i8> @test_int_x86_xop_vprotb(<16 x i8> %a0, <16 x i8> %a1) {
; CHECK-LABEL: test_int_x86_xop_vprotb:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vprotb %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <16 x i8> @llvm.x86.xop.vprotb(<16 x i8> %a0, <16 x i8> %a1) ;
@@ -462,7 +462,7 @@ declare <16 x i8> @llvm.x86.xop.vprotb(<16 x i8>, <16 x i8>) nounwind readnone
define <4 x i32> @test_int_x86_xop_vprotd(<4 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_int_x86_xop_vprotd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vprotd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.xop.vprotd(<4 x i32> %a0, <4 x i32> %a1) ;
@@ -472,7 +472,7 @@ declare <4 x i32> @llvm.x86.xop.vprotd(<4 x i32>, <4 x i32>) nounwind readnone
define <2 x i64> @test_int_x86_xop_vprotq(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_int_x86_xop_vprotq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vprotq %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x i64> @llvm.x86.xop.vprotq(<2 x i64> %a0, <2 x i64> %a1) ;
@@ -482,7 +482,7 @@ declare <2 x i64> @llvm.x86.xop.vprotq(<2 x i64>, <2 x i64>) nounwind readnone
define <8 x i16> @test_int_x86_xop_vprotw(<8 x i16> %a0, <8 x i16> %a1) {
; CHECK-LABEL: test_int_x86_xop_vprotw:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vprotw %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <8 x i16> @llvm.x86.xop.vprotw(<8 x i16> %a0, <8 x i16> %a1) ;
@@ -492,7 +492,7 @@ declare <8 x i16> @llvm.x86.xop.vprotw(<8 x i16>, <8 x i16>) nounwind readnone
define <16 x i8> @test_int_x86_xop_vprotbi(<16 x i8> %a0) {
; CHECK-LABEL: test_int_x86_xop_vprotbi:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vprotb $1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <16 x i8> @llvm.x86.xop.vprotbi(<16 x i8> %a0, i8 1) ;
@@ -502,7 +502,7 @@ declare <16 x i8> @llvm.x86.xop.vprotbi(<16 x i8>, i8) nounwind readnone
define <4 x i32> @test_int_x86_xop_vprotdi(<4 x i32> %a0) {
; CHECK-LABEL: test_int_x86_xop_vprotdi:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vprotd $254, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.xop.vprotdi(<4 x i32> %a0, i8 -2) ;
@@ -512,7 +512,7 @@ declare <4 x i32> @llvm.x86.xop.vprotdi(<4 x i32>, i8) nounwind readnone
define <2 x i64> @test_int_x86_xop_vprotqi(<2 x i64> %a0) {
; CHECK-LABEL: test_int_x86_xop_vprotqi:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vprotq $3, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x i64> @llvm.x86.xop.vprotqi(<2 x i64> %a0, i8 3) ;
@@ -522,7 +522,7 @@ declare <2 x i64> @llvm.x86.xop.vprotqi(<2 x i64>, i8) nounwind readnone
define <8 x i16> @test_int_x86_xop_vprotwi(<8 x i16> %a0) {
; CHECK-LABEL: test_int_x86_xop_vprotwi:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vprotw $252, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <8 x i16> @llvm.x86.xop.vprotwi(<8 x i16> %a0, i8 -4) ;
@@ -532,7 +532,7 @@ declare <8 x i16> @llvm.x86.xop.vprotwi(<8 x i16>, i8) nounwind readnone
define <16 x i8> @test_int_x86_xop_vpshab(<16 x i8> %a0, <16 x i8> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpshab:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshab %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <16 x i8> @llvm.x86.xop.vpshab(<16 x i8> %a0, <16 x i8> %a1) ;
@@ -542,7 +542,7 @@ declare <16 x i8> @llvm.x86.xop.vpshab(<16 x i8>, <16 x i8>) nounwind readnone
define <4 x i32> @test_int_x86_xop_vpshad(<4 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpshad:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshad %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.xop.vpshad(<4 x i32> %a0, <4 x i32> %a1) ;
@@ -552,7 +552,7 @@ declare <4 x i32> @llvm.x86.xop.vpshad(<4 x i32>, <4 x i32>) nounwind readnone
define <2 x i64> @test_int_x86_xop_vpshaq(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpshaq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshaq %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x i64> @llvm.x86.xop.vpshaq(<2 x i64> %a0, <2 x i64> %a1) ;
@@ -562,7 +562,7 @@ declare <2 x i64> @llvm.x86.xop.vpshaq(<2 x i64>, <2 x i64>) nounwind readnone
define <8 x i16> @test_int_x86_xop_vpshaw(<8 x i16> %a0, <8 x i16> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpshaw:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshaw %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <8 x i16> @llvm.x86.xop.vpshaw(<8 x i16> %a0, <8 x i16> %a1) ;
@@ -572,7 +572,7 @@ declare <8 x i16> @llvm.x86.xop.vpshaw(<8 x i16>, <8 x i16>) nounwind readnone
define <16 x i8> @test_int_x86_xop_vpshlb(<16 x i8> %a0, <16 x i8> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpshlb:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshlb %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <16 x i8> @llvm.x86.xop.vpshlb(<16 x i8> %a0, <16 x i8> %a1) ;
@@ -582,7 +582,7 @@ declare <16 x i8> @llvm.x86.xop.vpshlb(<16 x i8>, <16 x i8>) nounwind readnone
define <4 x i32> @test_int_x86_xop_vpshld(<4 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpshld:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshld %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.xop.vpshld(<4 x i32> %a0, <4 x i32> %a1) ;
@@ -592,7 +592,7 @@ declare <4 x i32> @llvm.x86.xop.vpshld(<4 x i32>, <4 x i32>) nounwind readnone
define <2 x i64> @test_int_x86_xop_vpshlq(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpshlq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshlq %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x i64> @llvm.x86.xop.vpshlq(<2 x i64> %a0, <2 x i64> %a1) ;
@@ -602,7 +602,7 @@ declare <2 x i64> @llvm.x86.xop.vpshlq(<2 x i64>, <2 x i64>) nounwind readnone
define <8 x i16> @test_int_x86_xop_vpshlw(<8 x i16> %a0, <8 x i16> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpshlw:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshlw %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <8 x i16> @llvm.x86.xop.vpshlw(<8 x i16> %a0, <8 x i16> %a1) ;
@@ -610,7 +610,7 @@ define <8 x i16> @test_int_x86_xop_vpshlw(<8 x i16> %a0, <8 x i16> %a1) {
}
define <8 x i16> @test_int_x86_xop_vpshlw_rm(<8 x i16> %a0, <8 x i16>* %a1) {
; CHECK-LABEL: test_int_x86_xop_vpshlw_rm:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshlw (%rdi), %xmm0, %xmm0
; CHECK-NEXT: retq
%vec = load <8 x i16>, <8 x i16>* %a1
@@ -619,7 +619,7 @@ define <8 x i16> @test_int_x86_xop_vpshlw_rm(<8 x i16> %a0, <8 x i16>* %a1) {
}
define <8 x i16> @test_int_x86_xop_vpshlw_mr(<8 x i16>* %a0, <8 x i16> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpshlw_mr:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshlw %xmm0, (%rdi), %xmm0
; CHECK-NEXT: retq
%vec = load <8 x i16>, <8 x i16>* %a0
@@ -630,7 +630,7 @@ declare <8 x i16> @llvm.x86.xop.vpshlw(<8 x i16>, <8 x i16>) nounwind readnone
define <4 x float> @test_int_x86_xop_vfrcz_ss(<4 x float> %a0) {
; CHECK-LABEL: test_int_x86_xop_vfrcz_ss:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfrczss %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.xop.vfrcz.ss(<4 x float> %a0) ;
@@ -638,7 +638,7 @@ define <4 x float> @test_int_x86_xop_vfrcz_ss(<4 x float> %a0) {
}
define <4 x float> @test_int_x86_xop_vfrcz_ss_mem(float* %a0) {
; CHECK-LABEL: test_int_x86_xop_vfrcz_ss_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfrczss (%rdi), %xmm0
; CHECK-NEXT: retq
%elem = load float, float* %a0
@@ -650,7 +650,7 @@ declare <4 x float> @llvm.x86.xop.vfrcz.ss(<4 x float>) nounwind readnone
define <2 x double> @test_int_x86_xop_vfrcz_sd(<2 x double> %a0) {
; CHECK-LABEL: test_int_x86_xop_vfrcz_sd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfrczsd %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x double> @llvm.x86.xop.vfrcz.sd(<2 x double> %a0) ;
@@ -658,7 +658,7 @@ define <2 x double> @test_int_x86_xop_vfrcz_sd(<2 x double> %a0) {
}
define <2 x double> @test_int_x86_xop_vfrcz_sd_mem(double* %a0) {
; CHECK-LABEL: test_int_x86_xop_vfrcz_sd_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfrczsd (%rdi), %xmm0
; CHECK-NEXT: retq
%elem = load double, double* %a0
@@ -670,7 +670,7 @@ declare <2 x double> @llvm.x86.xop.vfrcz.sd(<2 x double>) nounwind readnone
define <2 x double> @test_int_x86_xop_vfrcz_pd(<2 x double> %a0) {
; CHECK-LABEL: test_int_x86_xop_vfrcz_pd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfrczpd %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x double> @llvm.x86.xop.vfrcz.pd(<2 x double> %a0) ;
@@ -678,7 +678,7 @@ define <2 x double> @test_int_x86_xop_vfrcz_pd(<2 x double> %a0) {
}
define <2 x double> @test_int_x86_xop_vfrcz_pd_mem(<2 x double>* %a0) {
; CHECK-LABEL: test_int_x86_xop_vfrcz_pd_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfrczpd (%rdi), %xmm0
; CHECK-NEXT: retq
%vec = load <2 x double>, <2 x double>* %a0
@@ -689,7 +689,7 @@ declare <2 x double> @llvm.x86.xop.vfrcz.pd(<2 x double>) nounwind readnone
define <4 x double> @test_int_x86_xop_vfrcz_pd_256(<4 x double> %a0) {
; CHECK-LABEL: test_int_x86_xop_vfrcz_pd_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfrczpd %ymm0, %ymm0
; CHECK-NEXT: retq
%res = call <4 x double> @llvm.x86.xop.vfrcz.pd.256(<4 x double> %a0) ;
@@ -697,7 +697,7 @@ define <4 x double> @test_int_x86_xop_vfrcz_pd_256(<4 x double> %a0) {
}
define <4 x double> @test_int_x86_xop_vfrcz_pd_256_mem(<4 x double>* %a0) {
; CHECK-LABEL: test_int_x86_xop_vfrcz_pd_256_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfrczpd (%rdi), %ymm0
; CHECK-NEXT: retq
%vec = load <4 x double>, <4 x double>* %a0
@@ -708,7 +708,7 @@ declare <4 x double> @llvm.x86.xop.vfrcz.pd.256(<4 x double>) nounwind readnone
define <4 x float> @test_int_x86_xop_vfrcz_ps(<4 x float> %a0) {
; CHECK-LABEL: test_int_x86_xop_vfrcz_ps:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfrczps %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.xop.vfrcz.ps(<4 x float> %a0) ;
@@ -716,7 +716,7 @@ define <4 x float> @test_int_x86_xop_vfrcz_ps(<4 x float> %a0) {
}
define <4 x float> @test_int_x86_xop_vfrcz_ps_mem(<4 x float>* %a0) {
; CHECK-LABEL: test_int_x86_xop_vfrcz_ps_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfrczps (%rdi), %xmm0
; CHECK-NEXT: retq
%vec = load <4 x float>, <4 x float>* %a0
@@ -727,7 +727,7 @@ declare <4 x float> @llvm.x86.xop.vfrcz.ps(<4 x float>) nounwind readnone
define <8 x float> @test_int_x86_xop_vfrcz_ps_256(<8 x float> %a0) {
; CHECK-LABEL: test_int_x86_xop_vfrcz_ps_256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfrczps %ymm0, %ymm0
; CHECK-NEXT: retq
%res = call <8 x float> @llvm.x86.xop.vfrcz.ps.256(<8 x float> %a0) ;
@@ -735,7 +735,7 @@ define <8 x float> @test_int_x86_xop_vfrcz_ps_256(<8 x float> %a0) {
}
define <8 x float> @test_int_x86_xop_vfrcz_ps_256_mem(<8 x float>* %a0) {
; CHECK-LABEL: test_int_x86_xop_vfrcz_ps_256_mem:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vfrczps (%rdi), %ymm0
; CHECK-NEXT: retq
%vec = load <8 x float>, <8 x float>* %a0
@@ -746,7 +746,7 @@ declare <8 x float> @llvm.x86.xop.vfrcz.ps.256(<8 x float>) nounwind readnone
define <16 x i8> @test_int_x86_xop_vpcomb(<16 x i8> %a0, <16 x i8> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomb:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomltb %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <16 x i8> @llvm.x86.xop.vpcomb(<16 x i8> %a0, <16 x i8> %a1, i8 0) ;
@@ -756,7 +756,7 @@ declare <16 x i8> @llvm.x86.xop.vpcomb(<16 x i8>, <16 x i8>, i8) nounwind readno
define <8 x i16> @test_int_x86_xop_vpcomw(<8 x i16> %a0, <8 x i16> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomw:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomltw %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <8 x i16> @llvm.x86.xop.vpcomw(<8 x i16> %a0, <8 x i16> %a1, i8 0) ;
@@ -766,7 +766,7 @@ declare <8 x i16> @llvm.x86.xop.vpcomw(<8 x i16>, <8 x i16>, i8) nounwind readno
define <4 x i32> @test_int_x86_xop_vpcomd(<4 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomltd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.xop.vpcomd(<4 x i32> %a0, <4 x i32> %a1, i8 0) ;
@@ -776,7 +776,7 @@ declare <4 x i32> @llvm.x86.xop.vpcomd(<4 x i32>, <4 x i32>, i8) nounwind readno
define <2 x i64> @test_int_x86_xop_vpcomq(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomltq %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x i64> @llvm.x86.xop.vpcomq(<2 x i64> %a0, <2 x i64> %a1, i8 0) ;
@@ -786,7 +786,7 @@ declare <2 x i64> @llvm.x86.xop.vpcomq(<2 x i64>, <2 x i64>, i8) nounwind readno
define <16 x i8> @test_int_x86_xop_vpcomub(<16 x i8> %a0, <16 x i8> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomub:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomltub %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <16 x i8> @llvm.x86.xop.vpcomub(<16 x i8> %a0, <16 x i8> %a1, i8 0) ;
@@ -796,7 +796,7 @@ declare <16 x i8> @llvm.x86.xop.vpcomub(<16 x i8>, <16 x i8>, i8) nounwind readn
define <8 x i16> @test_int_x86_xop_vpcomuw(<8 x i16> %a0, <8 x i16> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomuw:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomltuw %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <8 x i16> @llvm.x86.xop.vpcomuw(<8 x i16> %a0, <8 x i16> %a1, i8 0) ;
@@ -806,7 +806,7 @@ declare <8 x i16> @llvm.x86.xop.vpcomuw(<8 x i16>, <8 x i16>, i8) nounwind readn
define <4 x i32> @test_int_x86_xop_vpcomud(<4 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomud:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomltud %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.xop.vpcomud(<4 x i32> %a0, <4 x i32> %a1, i8 0) ;
@@ -816,7 +816,7 @@ declare <4 x i32> @llvm.x86.xop.vpcomud(<4 x i32>, <4 x i32>, i8) nounwind readn
define <2 x i64> @test_int_x86_xop_vpcomuq(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_int_x86_xop_vpcomuq:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcomltuq %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x i64> @llvm.x86.xop.vpcomuq(<2 x i64> %a0, <2 x i64> %a1, i8 0) ;
diff --git a/test/CodeGen/X86/xop-mask-comments.ll b/test/CodeGen/X86/xop-mask-comments.ll
index 665bcaae777..c8aa85c425a 100644
--- a/test/CodeGen/X86/xop-mask-comments.ll
+++ b/test/CodeGen/X86/xop-mask-comments.ll
@@ -8,12 +8,12 @@
define <16 x i8> @vpperm_shuffle_unary(<16 x i8> %a0) {
; X32-LABEL: vpperm_shuffle_unary:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpperm {{.*#+}} xmm0 = xmm0[15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
; X32-NEXT: retl
;
; X64-LABEL: vpperm_shuffle_unary:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpperm {{.*#+}} xmm0 = xmm0[15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
; X64-NEXT: retq
%1 = tail call <16 x i8> @llvm.x86.xop.vpperm(<16 x i8> %a0, <16 x i8> %a0, <16 x i8> <i8 31, i8 14, i8 29, i8 12, i8 27, i8 10, i8 25, i8 8, i8 23, i8 6, i8 21, i8 4, i8 19, i8 2, i8 17, i8 0>)
@@ -22,12 +22,12 @@ define <16 x i8> @vpperm_shuffle_unary(<16 x i8> %a0) {
define <16 x i8> @vpperm_shuffle_unary_undef(<16 x i8> %a0) {
; X32-LABEL: vpperm_shuffle_unary_undef:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpperm {{.*#+}} xmm0 = xmm0[15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
; X32-NEXT: retl
;
; X64-LABEL: vpperm_shuffle_unary_undef:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpperm {{.*#+}} xmm0 = xmm0[15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
; X64-NEXT: retq
%1 = tail call <16 x i8> @llvm.x86.xop.vpperm(<16 x i8> %a0, <16 x i8> undef, <16 x i8> <i8 31, i8 14, i8 29, i8 12, i8 27, i8 10, i8 25, i8 8, i8 23, i8 6, i8 21, i8 4, i8 19, i8 2, i8 17, i8 0>)
@@ -36,12 +36,12 @@ define <16 x i8> @vpperm_shuffle_unary_undef(<16 x i8> %a0) {
define <16 x i8> @vpperm_shuffle_unary_zero(<16 x i8> %a0) {
; X32-LABEL: vpperm_shuffle_unary_zero:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpperm {{.*#+}} xmm0 = xmm0[15,14,13,12,11,10,9,8,7,6,5,4,3],zero,xmm0[1],zero
; X32-NEXT: retl
;
; X64-LABEL: vpperm_shuffle_unary_zero:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpperm {{.*#+}} xmm0 = xmm0[15,14,13,12,11,10,9,8,7,6,5,4,3],zero,xmm0[1],zero
; X64-NEXT: retq
%1 = tail call <16 x i8> @llvm.x86.xop.vpperm(<16 x i8> %a0, <16 x i8> %a0, <16 x i8> <i8 31, i8 14, i8 29, i8 12, i8 27, i8 10, i8 25, i8 8, i8 23, i8 6, i8 21, i8 4, i8 19, i8 130, i8 17, i8 128>)
@@ -50,12 +50,12 @@ define <16 x i8> @vpperm_shuffle_unary_zero(<16 x i8> %a0) {
define <16 x i8> @vpperm_shuffle_binary(<16 x i8> %a0, <16 x i8> %a1) {
; X32-LABEL: vpperm_shuffle_binary:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpperm {{.*#+}} xmm0 = xmm1[15],xmm0[14],xmm1[13],xmm0[12],xmm1[11],xmm0[10],xmm1[9],xmm0[8],xmm1[7],xmm0[6],xmm1[5],xmm0[4],xmm1[3],xmm0[2],xmm1[1],xmm0[0]
; X32-NEXT: retl
;
; X64-LABEL: vpperm_shuffle_binary:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpperm {{.*#+}} xmm0 = xmm1[15],xmm0[14],xmm1[13],xmm0[12],xmm1[11],xmm0[10],xmm1[9],xmm0[8],xmm1[7],xmm0[6],xmm1[5],xmm0[4],xmm1[3],xmm0[2],xmm1[1],xmm0[0]
; X64-NEXT: retq
%1 = tail call <16 x i8> @llvm.x86.xop.vpperm(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> <i8 31, i8 14, i8 29, i8 12, i8 27, i8 10, i8 25, i8 8, i8 23, i8 6, i8 21, i8 4, i8 19, i8 2, i8 17, i8 0>)
@@ -64,12 +64,12 @@ define <16 x i8> @vpperm_shuffle_binary(<16 x i8> %a0, <16 x i8> %a1) {
define <16 x i8> @vpperm_shuffle_binary_zero(<16 x i8> %a0, <16 x i8> %a1) {
; X32-LABEL: vpperm_shuffle_binary_zero:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpperm {{.*#+}} xmm0 = xmm1[15],xmm0[14],xmm1[13],xmm0[12],xmm1[11],xmm0[10],xmm1[9],xmm0[8],xmm1[7],xmm0[6],xmm1[5],xmm0[4],zero,zero,zero,zero
; X32-NEXT: retl
;
; X64-LABEL: vpperm_shuffle_binary_zero:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpperm {{.*#+}} xmm0 = xmm1[15],xmm0[14],xmm1[13],xmm0[12],xmm1[11],xmm0[10],xmm1[9],xmm0[8],xmm1[7],xmm0[6],xmm1[5],xmm0[4],zero,zero,zero,zero
; X64-NEXT: retq
%1 = tail call <16 x i8> @llvm.x86.xop.vpperm(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> <i8 31, i8 14, i8 29, i8 12, i8 27, i8 10, i8 25, i8 8, i8 23, i8 6, i8 21, i8 4, i8 147, i8 130, i8 145, i8 128>)
@@ -79,12 +79,12 @@ define <16 x i8> @vpperm_shuffle_binary_zero(<16 x i8> %a0, <16 x i8> %a1) {
; we can't decode vpperm's other permute ops
define <16 x i8> @vpperm_shuffle_general(<16 x i8> %a0, <16 x i8> %a1) {
; X32-LABEL: vpperm_shuffle_general:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpperm {{\.LCPI.*}}, %xmm0, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: vpperm_shuffle_general:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
; X64-NEXT: retq
%1 = tail call <16 x i8> @llvm.x86.xop.vpperm(<16 x i8> %a0, <16 x i8> %a0, <16 x i8> <i8 31, i8 14, i8 29, i8 12, i8 27, i8 10, i8 25, i8 8, i8 23, i8 6, i8 21, i8 4, i8 179, i8 162, i8 177, i8 160>)
@@ -99,13 +99,13 @@ define <16 x i8> @vpperm_shuffle_general(<16 x i8> %a0, <16 x i8> %a1) {
; be a quicker (and smaller) alternative.
define <2 x double> @vpermil2pd_21(<2 x double> %a0, <2 x double> %a1) {
; X32-LABEL: vpermil2pd_21:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X32-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; X32-NEXT: retl
;
; X64-LABEL: vpermil2pd_21:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; X64-NEXT: retq
@@ -115,12 +115,12 @@ define <2 x double> @vpermil2pd_21(<2 x double> %a0, <2 x double> %a1) {
define <4 x double> @vpermil2pd256_0062(<4 x double> %a0, <4 x double> %a1) {
; X32-LABEL: vpermil2pd256_0062:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpermil2pd {{.*#+}} ymm0 = ymm0[0,0],ymm1[2],ymm0[2]
; X32-NEXT: retl
;
; X64-LABEL: vpermil2pd256_0062:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermil2pd {{.*#+}} ymm0 = ymm0[0,0],ymm1[2],ymm0[2]
; X64-NEXT: retq
%1 = call <4 x double> @llvm.x86.xop.vpermil2pd.256(<4 x double> %a0, <4 x double> %a1, <4 x i64> <i64 0, i64 0, i64 4, i64 0>, i8 0)
@@ -129,12 +129,12 @@ define <4 x double> @vpermil2pd256_0062(<4 x double> %a0, <4 x double> %a1) {
define <4 x double> @vpermil2pd256_zz73(<4 x double> %a0, <4 x double> %a1) {
; X32-LABEL: vpermil2pd256_zz73:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpermil2pd {{.*#+}} ymm0 = zero,zero,ymm1[3],ymm0[3]
; X32-NEXT: retl
;
; X64-LABEL: vpermil2pd256_zz73:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermil2pd {{.*#+}} ymm0 = zero,zero,ymm1[3],ymm0[3]
; X64-NEXT: retq
%1 = call <4 x double> @llvm.x86.xop.vpermil2pd.256(<4 x double> %a0, <4 x double> %a1, <4 x i64> <i64 0, i64 0, i64 14, i64 10>, i8 3)
@@ -143,12 +143,12 @@ define <4 x double> @vpermil2pd256_zz73(<4 x double> %a0, <4 x double> %a1) {
define <4 x float> @vpermil2ps_0561(<4 x float> %a0, <4 x float> %a1) {
; X32-LABEL: vpermil2ps_0561:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpermil2ps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[1]
; X32-NEXT: retl
;
; X64-LABEL: vpermil2ps_0561:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermil2ps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[1]
; X64-NEXT: retq
%1 = call <4 x float> @llvm.x86.xop.vpermil2ps(<4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 5, i32 6, i32 1>, i8 0)
@@ -157,12 +157,12 @@ define <4 x float> @vpermil2ps_0561(<4 x float> %a0, <4 x float> %a1) {
define <8 x float> @vpermil2ps256_098144FE(<8 x float> %a0, <8 x float> %a1) {
; X32-LABEL: vpermil2ps256_098144FE:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpermil2ps {{.*#+}} ymm0 = ymm0[0],ymm1[1,0],ymm0[1,4,4],ymm1[7,6]
; X32-NEXT: retl
;
; X64-LABEL: vpermil2ps256_098144FE:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermil2ps {{.*#+}} ymm0 = ymm0[0],ymm1[1,0],ymm0[1,4,4],ymm1[7,6]
; X64-NEXT: retq
%1 = call <8 x float> @llvm.x86.xop.vpermil2ps.256(<8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 0, i32 5, i32 4, i32 1, i32 0, i32 0, i32 7, i32 6>, i8 0)
@@ -171,12 +171,12 @@ define <8 x float> @vpermil2ps256_098144FE(<8 x float> %a0, <8 x float> %a1) {
define <8 x float> @vpermil2ps256_0zz8BzzA(<8 x float> %a0, <8 x float> %a1) {
; X32-LABEL: vpermil2ps256_0zz8BzzA:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpermil2ps {{.*#+}} ymm0 = ymm0[0],zero,zero,ymm1[0,7],zero,zero,ymm1[6]
; X32-NEXT: retl
;
; X64-LABEL: vpermil2ps256_0zz8BzzA:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermil2ps {{.*#+}} ymm0 = ymm0[0],zero,zero,ymm1[0,7],zero,zero,ymm1[6]
; X64-NEXT: retq
%1 = call <8 x float> @llvm.x86.xop.vpermil2ps.256(<8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 0, i32 8, i32 8, i32 4, i32 7, i32 8, i32 8, i32 6>, i8 2)
diff --git a/test/CodeGen/X86/xop-pcmov.ll b/test/CodeGen/X86/xop-pcmov.ll
index 77aefe993b2..4e8abc0d4b6 100644
--- a/test/CodeGen/X86/xop-pcmov.ll
+++ b/test/CodeGen/X86/xop-pcmov.ll
@@ -4,7 +4,7 @@
define <4 x double> @pcmov_4f64(<4 x double> %a, <4 x double> %b, <4 x double> %m) {
; CHECK-LABEL: pcmov_4f64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmov %ymm2, %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
%1 = bitcast <4 x double> %m to <4 x i64>
@@ -20,7 +20,7 @@ define <4 x double> @pcmov_4f64(<4 x double> %a, <4 x double> %b, <4 x double> %
define <2 x double> @pcmov_2f64(<2 x double> %a, <2 x double> %b, <2 x double> %m) {
; CHECK-LABEL: pcmov_2f64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmov %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%1 = bitcast <2 x double> %m to <2 x i64>
@@ -36,7 +36,7 @@ define <2 x double> @pcmov_2f64(<2 x double> %a, <2 x double> %b, <2 x double> %
define <8 x float> @pcmov_8f32(<8 x float> %a, <8 x float> %b, <8 x float> %m) {
; CHECK-LABEL: pcmov_8f32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmov %ymm2, %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
%1 = bitcast <8 x float> %m to <8 x i32>
@@ -52,7 +52,7 @@ define <8 x float> @pcmov_8f32(<8 x float> %a, <8 x float> %b, <8 x float> %m) {
define <4 x float> @pcmov_4f32(<4 x float> %a, <4 x float> %b, <4 x float> %m) {
; CHECK-LABEL: pcmov_4f32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmov %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%1 = bitcast <4 x float> %m to <4 x i32>
@@ -68,7 +68,7 @@ define <4 x float> @pcmov_4f32(<4 x float> %a, <4 x float> %b, <4 x float> %m) {
define <4 x i64> @pcmov_4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %m) {
; CHECK-LABEL: pcmov_4i64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmov %ymm2, %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
%1 = and <4 x i64> %a, %m
@@ -80,7 +80,7 @@ define <4 x i64> @pcmov_4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %m) {
define <2 x i64> @pcmov_2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %m) {
; CHECK-LABEL: pcmov_2i64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmov %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%1 = and <2 x i64> %a, %m
@@ -92,7 +92,7 @@ define <2 x i64> @pcmov_2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %m) {
define <8 x i32> @pcmov_8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %m) {
; CHECK-LABEL: pcmov_8i32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmov %ymm2, %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
%1 = and <8 x i32> %a, %m
@@ -104,7 +104,7 @@ define <8 x i32> @pcmov_8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %m) {
define <4 x i32> @pcmov_4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %m) {
; CHECK-LABEL: pcmov_4i32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmov %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%1 = and <4 x i32> %a, %m
@@ -116,7 +116,7 @@ define <4 x i32> @pcmov_4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %m) {
define <16 x i16> @pcmov_16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %m) {
; CHECK-LABEL: pcmov_16i16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmov %ymm2, %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
%1 = and <16 x i16> %a, %m
@@ -128,7 +128,7 @@ define <16 x i16> @pcmov_16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %m) {
define <8 x i16> @pcmov_8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %m) {
; CHECK-LABEL: pcmov_8i16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmov %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%1 = and <8 x i16> %a, %m
@@ -140,7 +140,7 @@ define <8 x i16> @pcmov_8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %m) {
define <32 x i8> @pcmov_32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %m) {
; CHECK-LABEL: pcmov_32i8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmov %ymm2, %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
%1 = and <32 x i8> %a, %m
@@ -152,7 +152,7 @@ define <32 x i8> @pcmov_32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %m) {
define <16 x i8> @pcmov_16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %m) {
; CHECK-LABEL: pcmov_16i8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmov %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%1 = and <16 x i8> %a, %m
diff --git a/test/CodeGen/X86/xor-icmp.ll b/test/CodeGen/X86/xor-icmp.ll
index b7f0edb24ad..6cdc3186cd4 100644
--- a/test/CodeGen/X86/xor-icmp.ll
+++ b/test/CodeGen/X86/xor-icmp.ll
@@ -5,23 +5,23 @@
define i32 @t(i32 %a, i32 %b) nounwind ssp {
; X32-LABEL: t:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: xorb {{[0-9]+}}(%esp), %al
; X32-NEXT: testb $64, %al
; X32-NEXT: je .LBB0_1
-; X32-NEXT: # BB#2: # %bb1
+; X32-NEXT: # %bb.2: # %bb1
; X32-NEXT: jmp bar # TAILCALL
; X32-NEXT: .LBB0_1: # %bb
; X32-NEXT: jmp foo # TAILCALL
;
; X64-LABEL: t:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: xorl %esi, %edi
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: btl $14, %edi
; X64-NEXT: jae .LBB0_1
-; X64-NEXT: # BB#2: # %bb1
+; X64-NEXT: # %bb.2: # %bb1
; X64-NEXT: jmp bar # TAILCALL
; X64-NEXT: .LBB0_1: # %bb
; X64-NEXT: jmp foo # TAILCALL
@@ -48,27 +48,27 @@ declare i32 @bar(...)
define i32 @t2(i32 %x, i32 %y) nounwind ssp {
; X32-LABEL: t2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: cmpl $0, {{[0-9]+}}(%esp)
; X32-NEXT: sete %al
; X32-NEXT: cmpl $0, {{[0-9]+}}(%esp)
; X32-NEXT: sete %cl
; X32-NEXT: cmpb %al, %cl
; X32-NEXT: je .LBB1_1
-; X32-NEXT: # BB#2: # %bb
+; X32-NEXT: # %bb.2: # %bb
; X32-NEXT: jmp foo # TAILCALL
; X32-NEXT: .LBB1_1: # %return
; X32-NEXT: retl
;
; X64-LABEL: t2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: testl %edi, %edi
; X64-NEXT: sete %al
; X64-NEXT: testl %esi, %esi
; X64-NEXT: sete %cl
; X64-NEXT: cmpb %al, %cl
; X64-NEXT: je .LBB1_1
-; X64-NEXT: # BB#2: # %bb
+; X64-NEXT: # %bb.2: # %bb
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: jmp foo # TAILCALL
; X64-NEXT: .LBB1_1: # %return
diff --git a/test/CodeGen/X86/xor-select-i1-combine.ll b/test/CodeGen/X86/xor-select-i1-combine.ll
index c9383282a0c..8ba7f7d931d 100644
--- a/test/CodeGen/X86/xor-select-i1-combine.ll
+++ b/test/CodeGen/X86/xor-select-i1-combine.ll
@@ -6,7 +6,7 @@
define i32 @main(i8 %small) {
; CHECK-LABEL: main:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: testb $1, %dil
; CHECK-NEXT: movl $m, %eax
; CHECK-NEXT: movl $n, %ecx
@@ -24,7 +24,7 @@ entry:
define i32 @main2(i8 %small) {
; CHECK-LABEL: main2:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl $m, %eax
; CHECK-NEXT: movl $n, %ecx
; CHECK-NEXT: testb $1, %dil
diff --git a/test/CodeGen/X86/zext-shl.ll b/test/CodeGen/X86/zext-shl.ll
index 7722f46d753..1b9c813bc1e 100644
--- a/test/CodeGen/X86/zext-shl.ll
+++ b/test/CodeGen/X86/zext-shl.ll
@@ -3,7 +3,7 @@
define i32 @t1(i8 zeroext %x) nounwind {
; CHECK-LABEL: t1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: shll $5, %eax
; CHECK-NEXT: retl
@@ -15,7 +15,7 @@ define i32 @t1(i8 zeroext %x) nounwind {
define i32 @t2(i8 zeroext %x) nounwind {
; CHECK-LABEL: t2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: shrl $3, %eax
; CHECK-NEXT: retl
diff --git a/test/CodeGen/X86/zext-trunc.ll b/test/CodeGen/X86/zext-trunc.ll
index e51a77abc92..2052f7bcd6a 100644
--- a/test/CodeGen/X86/zext-trunc.ll
+++ b/test/CodeGen/X86/zext-trunc.ll
@@ -4,7 +4,7 @@
define i64 @foo(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: foo:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: leal (%rdi,%rsi), %eax
; CHECK-NEXT: retq
%c = add i64 %a, %b
diff --git a/test/DebugInfo/COFF/asan-module-ctor.ll b/test/DebugInfo/COFF/asan-module-ctor.ll
index 31e68adfb56..65b21ee83ef 100644
--- a/test/DebugInfo/COFF/asan-module-ctor.ll
+++ b/test/DebugInfo/COFF/asan-module-ctor.ll
@@ -10,7 +10,7 @@
; The module ctor has no debug info. All we have to do is don't crash.
; X86: _asan.module_ctor:
; X86-NEXT: L{{.*}}:
-; X86: # BB
+; X86: # %bb.
; X86-NEXT: calll ___asan_init_v3
; X86-NEXT: retl
diff --git a/test/DebugInfo/COFF/inlining-header.ll b/test/DebugInfo/COFF/inlining-header.ll
index 48069731b68..865047a0157 100644
--- a/test/DebugInfo/COFF/inlining-header.ll
+++ b/test/DebugInfo/COFF/inlining-header.ll
@@ -26,7 +26,7 @@
; ASM: _main: # @main
; ASM: Lfunc_begin0:
; ASM: .cv_func_id 0
-; ASM: # BB#0: # %entry
+; ASM: # %bb.0: # %entry
; ASM: .cv_file 1 "D:\\src\\llvm\\build\\t.cpp"
; ASM: .cv_loc 0 1 9 5 is_stmt 0 # t.cpp:9:5
; ASM: incl "?x@@3HC"
diff --git a/test/DebugInfo/COFF/local-variable-gap.ll b/test/DebugInfo/COFF/local-variable-gap.ll
index d0e2188098f..d6f1f856237 100644
--- a/test/DebugInfo/COFF/local-variable-gap.ll
+++ b/test/DebugInfo/COFF/local-variable-gap.ll
@@ -38,7 +38,7 @@
; ASM: movl %esi, %ecx
; ASM: testl %eax, %eax
; ASM: jne .LBB0_5
-; ASM: # BB#2: # %if.end
+; ASM: # %bb.2: # %if.end
; ASM: #DEBUG_VALUE: p <- %esi
; ASM: callq use
; ASM: jmp .LBB0_4
diff --git a/test/DebugInfo/COFF/local-variables.ll b/test/DebugInfo/COFF/local-variables.ll
index e34b7d129d0..840b3734de9 100644
--- a/test/DebugInfo/COFF/local-variables.ll
+++ b/test/DebugInfo/COFF/local-variables.ll
@@ -26,14 +26,14 @@
; ASM: .cv_file 1 "D:\\src\\llvm\\build\\t.cpp"
; ASM: .cv_loc 0 1 7 0 is_stmt 0 # t.cpp:7:0
; ASM: .seh_proc f
-; ASM: # BB#0: # %entry
+; ASM: # %bb.0: # %entry
; ASM: subq $56, %rsp
; ASM: movl %ecx, 52(%rsp)
; ASM: [[prologue_end:\.Ltmp.*]]:
; ASM: .cv_loc 0 1 8 7 # t.cpp:8:7
; ASM: testl %ecx, %ecx
; ASM: je .LBB0_2
-; ASM: # BB#1: # %if.then
+; ASM: # %bb.1: # %if.then
; ASM: [[if_start:\.Ltmp.*]]:
; ASM: .cv_loc 0 1 9 9 # t.cpp:9:9
; ASM: movl $42, 40(%rsp)
diff --git a/test/DebugInfo/COFF/multifile.ll b/test/DebugInfo/COFF/multifile.ll
index ba7fc82bf1f..a3dec02d55e 100644
--- a/test/DebugInfo/COFF/multifile.ll
+++ b/test/DebugInfo/COFF/multifile.ll
@@ -17,7 +17,7 @@
; 10 }
; X86-LABEL: _f:
-; X86: # BB
+; X86: # %bb.
; X86: .cv_file 1 "D:\\one.c" "70B51F534D80639D033AE92C6A856AF6" 1
; X86: .cv_loc 0 1 1 0 is_stmt 0 # one.c:1:0
; X86: calll _g
@@ -106,7 +106,7 @@
; X64-NEXT: .L{{.*}}:{{$}}
; X64: .cv_file 1 "D:\\input.c" "70B51F534D80639D033AE92C6A856AF6" 1
; X64: .cv_loc 0 1 3 0 is_stmt 0 # input.c:3:0
-; X64: # BB
+; X64: # %bb.
; X64: subq $40, %rsp
; X64: .cv_file 2 "D:\\one.c" "70B51F534D80639D033AE92C6A856AF6" 1
; X64: .cv_loc 0 2 1 0 # one.c:1:0
diff --git a/test/DebugInfo/COFF/multifunction.ll b/test/DebugInfo/COFF/multifunction.ll
index 4d14a61ceb3..c759ed7a460 100644
--- a/test/DebugInfo/COFF/multifunction.ll
+++ b/test/DebugInfo/COFF/multifunction.ll
@@ -23,7 +23,7 @@
; X86-LABEL: _x:
-; X86: # BB
+; X86: # %bb.
; X86: .cv_file 1 "D:\\source.c"
; X86: .cv_loc 0 1 4 42 is_stmt 0 # source.c:4:42
; X86: calll _z
@@ -32,7 +32,7 @@
; X86: [[END_OF_X:.?Lfunc_end.*]]:
;
; X86-LABEL: _y:
-; X86: # BB
+; X86: # %bb.
; X86: .cv_loc 1 1 8 52 # source.c:8:52
; X86: calll _z
; X86: .cv_loc 1 1 9 53 # source.c:9:53
@@ -40,7 +40,7 @@
; X86: [[END_OF_Y:.?Lfunc_end.*]]:
;
; X86-LABEL: _f:
-; X86: # BB
+; X86: # %bb.
; X86: .cv_loc 2 1 12 62 # source.c:12:62
; X86: calll _x
; X86: .cv_loc 2 1 13 63 # source.c:13:63
@@ -287,7 +287,7 @@
; X64-NEXT: .L{{.*}}:
; X64: .cv_file 1 "D:\\source.c"
; X64: .cv_loc 0 1 3 0 is_stmt 0 # source.c:3:0
-; X64: # BB
+; X64: # %bb.
; X64: subq $40, %rsp
; X64: .cv_loc 0 1 4 42 # source.c:4:42
; X64-NEXT: callq z
@@ -299,7 +299,7 @@
; X64-LABEL: y:
; X64-NEXT: .L{{.*}}:
; X64: .cv_loc 1 1 7 0 # source.c:7:0
-; X64: # BB
+; X64: # %bb.
; X64: subq $40, %rsp
; X64: .cv_loc 1 1 8 52 # source.c:8:52
; X64-NEXT: callq z
@@ -311,7 +311,7 @@
; X64-LABEL: f:
; X64-NEXT: .L{{.*}}:
; X64: .cv_loc 2 1 11 0 # source.c:11:0
-; X64: # BB
+; X64: # %bb.
; X64: subq $40, %rsp
; X64: .cv_loc 2 1 12 62 # source.c:12:62
; X64-NEXT: callq x
diff --git a/test/DebugInfo/COFF/pieces.ll b/test/DebugInfo/COFF/pieces.ll
index 5c5b5a1da37..129732806b9 100644
--- a/test/DebugInfo/COFF/pieces.ll
+++ b/test/DebugInfo/COFF/pieces.ll
@@ -37,7 +37,7 @@
; ASM-LABEL: loop_csr: # @loop_csr
; ASM: #DEBUG_VALUE: loop_csr:o <- [DW_OP_LLVM_fragment 0 32] 0
; ASM: #DEBUG_VALUE: loop_csr:o <- [DW_OP_LLVM_fragment 32 32] 0
-; ASM: # BB#2: # %for.body.preheader
+; ASM: # %bb.2: # %for.body.preheader
; ASM: xorl %edi, %edi
; ASM: xorl %esi, %esi
; ASM: .p2align 4, 0x90
diff --git a/test/DebugInfo/COFF/register-variables.ll b/test/DebugInfo/COFF/register-variables.ll
index c00d3d3b62e..73cd2bc2c53 100644
--- a/test/DebugInfo/COFF/register-variables.ll
+++ b/test/DebugInfo/COFF/register-variables.ll
@@ -23,7 +23,7 @@
; ASM: f: # @f
; ASM: .Lfunc_begin0:
-; ASM: # BB#0: # %entry
+; ASM: # %bb.0: # %entry
; ASM: pushq %rsi
; ASM: subq $32, %rsp
; ASM: #DEBUG_VALUE: f:p <- %ecx
@@ -38,7 +38,7 @@
; ASM: testl %esi, %esi
; ASM: je .LBB0_2
; ASM: [[after_je:\.Ltmp.*]]:
-; ASM: # BB#1: # %if.then
+; ASM: # %bb.1: # %if.then
; ASM-DAG: #DEBUG_VALUE: inlineinc:a <- %eax
; ASM-DAG: #DEBUG_VALUE: a <- %eax
; ASM-DAG: #DEBUG_VALUE: f:p <- %esi
diff --git a/test/DebugInfo/COFF/simple.ll b/test/DebugInfo/COFF/simple.ll
index 90a973b4c3f..c1a9f1af29f 100644
--- a/test/DebugInfo/COFF/simple.ll
+++ b/test/DebugInfo/COFF/simple.ll
@@ -16,7 +16,7 @@
; 5 }
; X86-LABEL: _f:
-; X86: # BB
+; X86: # %bb.
; X86: .cv_file 1 "D:\\test.c" "F310AB26998CA831CBDF169E4EECACFA" 1
; X86: .cv_loc 0 1 4 2 is_stmt 0 # test.c:4:2
; X86: calll _g
@@ -131,7 +131,7 @@
; X64-NEXT: .L{{.*}}:{{$}}
; X64: .cv_file 1 "D:\\test.c" "F310AB26998CA831CBDF169E4EECACFA" 1
; X64: .cv_loc 0 1 3 0 is_stmt 0 # test.c:3:0
-; X64: # BB
+; X64: # %bb.
; X64: subq $40, %rsp
; X64: .cv_loc 0 1 4 2 # test.c:4:2
; X64-NEXT: callq g
diff --git a/test/DebugInfo/MIR/X86/live-debug-values-3preds.mir b/test/DebugInfo/MIR/X86/live-debug-values-3preds.mir
index f8bfe7b4a26..2041abaf7c3 100644
--- a/test/DebugInfo/MIR/X86/live-debug-values-3preds.mir
+++ b/test/DebugInfo/MIR/X86/live-debug-values-3preds.mir
@@ -28,8 +28,8 @@
# CHECK: ![[Y_VAR:[0-9]+]] = !DILocalVariable(name: "y", {{.*}})
# CHECK: ![[Z_VAR:[0-9]+]] = !DILocalVariable(name: "z", {{.*}})
-# DBG_VALUE for variables "x", "y" and "z" are extended into BB#9 from its
-# predecessors BB#0, BB#2 and BB#8.
+# DBG_VALUE for variables "x", "y" and "z" are extended into %bb.9 from its
+# predecessors %bb.0, %bb.2 and %bb.8.
# CHECK: bb.9.for.end:
# CHECK-DAG: DBG_VALUE debug-use %edi, debug-use %noreg, ![[X_VAR]], !DIExpression(), debug-location !{{[0-9]+}}
# CHECK-DAG: DBG_VALUE debug-use %esi, debug-use %noreg, ![[Y_VAR]], !DIExpression(), debug-location !{{[0-9]+}}
diff --git a/test/DebugInfo/MIR/X86/live-debug-values.mir b/test/DebugInfo/MIR/X86/live-debug-values.mir
index eeb6ad5e98b..7b6f0e7f374 100644
--- a/test/DebugInfo/MIR/X86/live-debug-values.mir
+++ b/test/DebugInfo/MIR/X86/live-debug-values.mir
@@ -30,8 +30,8 @@
# llvm/test/DebugInfo/live-debug-values.ll and present here for testing under
# MIR->MIR serialization.
-# DBG_VALUE for variable "n" is extended into BB#5 from its predecessors BB#3
-# and BB#4.
+# DBG_VALUE for variable "n" is extended into %bb.5 from its predecessors %bb.3
+# and %bb.4.
# CHECK: ![[N_VAR:[0-9]+]] = !DILocalVariable(name: "n",{{.*}})
#
# CHECK: bb.5.if.end.7:
diff --git a/test/DebugInfo/MIR/X86/live-debug-vars-unused-arg-debugonly.mir b/test/DebugInfo/MIR/X86/live-debug-vars-unused-arg-debugonly.mir
index e3928725291..7660a031690 100644
--- a/test/DebugInfo/MIR/X86/live-debug-vars-unused-arg-debugonly.mir
+++ b/test/DebugInfo/MIR/X86/live-debug-vars-unused-arg-debugonly.mir
@@ -154,10 +154,10 @@ body: |
#
# CHECKDBG-LABEL: ********** EMITTING LIVE DEBUG VARIABLES **********
# CHECKDBG-NEXT: !"argc,5" [0B;0e):0 Loc0=%edi
-# CHECKDBG-NEXT: [0B;0e):0 BB#0-160B
+# CHECKDBG-NEXT: [0B;0e):0 %bb.0-160B
# CHECKDBG-NEXT: !"argv,5" [0B;0e):0 Loc0=%rsi
-# CHECKDBG-NEXT: [0B;0e):0 BB#0-160B
+# CHECKDBG-NEXT: [0B;0e):0 %bb.0-160B
# CHECKDBG-NEXT: !"a0,7" [16r;64r):0 Loc0=%2
-# CHECKDBG-NEXT: [16r;64r):0 BB#0-160B
+# CHECKDBG-NEXT: [16r;64r):0 %bb.0-160B
# CHECKDBG-NEXT: !"a1,8" [32r;80r):0 Loc0=%3
-# CHECKDBG-NEXT: [32r;80r):0 BB#0-160B
+# CHECKDBG-NEXT: [32r;80r):0 %bb.0-160B
diff --git a/test/DebugInfo/SystemZ/variable-loc.s b/test/DebugInfo/SystemZ/variable-loc.s
index 77705a593f3..6940b1be4d7 100644
--- a/test/DebugInfo/SystemZ/variable-loc.s
+++ b/test/DebugInfo/SystemZ/variable-loc.s
@@ -45,7 +45,7 @@ main: # @main
.cfi_startproc
.Lfunc_begin0:
.loc 2 18 0 # :18:0
-# BB#0: # %entry
+# %bb.0: # %entry
stmg %r12, %r15, 96(%r15)
.Ltmp2:
.cfi_offset %r12, -64
diff --git a/test/DebugInfo/X86/dbg-value-transfer-order.ll b/test/DebugInfo/X86/dbg-value-transfer-order.ll
index 6c55da986ff..7ef994609f3 100644
--- a/test/DebugInfo/X86/dbg-value-transfer-order.ll
+++ b/test/DebugInfo/X86/dbg-value-transfer-order.ll
@@ -28,7 +28,7 @@
; CHECK: movl $32, %ecx
; CHECK: testl {{.*}}
; CHECK: jne .LBB0_3
-; CHECK: # BB#2: # %if.then
+; CHECK: # %bb.2: # %if.then
; CHECK: callq if_then
; CHECK: movl %eax, %ecx
; CHECK: .LBB0_3: # %if.end
diff --git a/test/DebugInfo/X86/live-debug-values.ll b/test/DebugInfo/X86/live-debug-values.ll
index 206b2d62f27..ac6c7c46118 100644
--- a/test/DebugInfo/X86/live-debug-values.ll
+++ b/test/DebugInfo/X86/live-debug-values.ll
@@ -27,8 +27,8 @@
; This case will also produce multiple locations but only the debug range
; extension is tested here.
-; DBG_VALUE for variable "n" is extended into BB#5 from its predecessors BB#3
-; and BB#4.
+; DBG_VALUE for variable "n" is extended into %bb.5 from its predecessors %bb.3
+; and %bb.4.
; CHECK: .LBB0_5:
; CHECK-NEXT: #DEBUG_VALUE: main:n <- %ebx
; Other register values have been clobbered.
diff --git a/test/ExecutionEngine/RuntimeDyld/PowerPC/ppc32_elf_rel_addr16.s b/test/ExecutionEngine/RuntimeDyld/PowerPC/ppc32_elf_rel_addr16.s
index 896aba5e673..94a7dd1c510 100644
--- a/test/ExecutionEngine/RuntimeDyld/PowerPC/ppc32_elf_rel_addr16.s
+++ b/test/ExecutionEngine/RuntimeDyld/PowerPC/ppc32_elf_rel_addr16.s
@@ -7,7 +7,7 @@
.type lookup,@function
lookup: # @lookup
.Lfunc_begin0:
-# BB#0:
+# %bb.0:
stw 31, -4(1)
stwu 1, -16(1)
insn_hi:
diff --git a/test/ExecutionEngine/RuntimeDyld/X86/COFF_x86_64.s b/test/ExecutionEngine/RuntimeDyld/X86/COFF_x86_64.s
index 3d19c3721d9..4d6b2d9b89a 100644
--- a/test/ExecutionEngine/RuntimeDyld/X86/COFF_x86_64.s
+++ b/test/ExecutionEngine/RuntimeDyld/X86/COFF_x86_64.s
@@ -18,7 +18,7 @@ __real400921f9f01b866e:
F: # @F
.Ltmp0:
.seh_proc F
-# BB#0: # %entry
+# %bb.0: # %entry
.Ltmp1:
.seh_endprologue
# rtdyld-check: decode_operand(inst1, 4) = __real400921f9f01b866e - next_pc(inst1)
diff --git a/test/ExecutionEngine/RuntimeDyld/X86/ELF_x64-64_PIC_relocations.s b/test/ExecutionEngine/RuntimeDyld/X86/ELF_x64-64_PIC_relocations.s
index 80d37d45d7e..62e9a3fb838 100644
--- a/test/ExecutionEngine/RuntimeDyld/X86/ELF_x64-64_PIC_relocations.s
+++ b/test/ExecutionEngine/RuntimeDyld/X86/ELF_x64-64_PIC_relocations.s
@@ -21,7 +21,7 @@
.align 16, 0x90
.type foo,@function
foo: # @foo
-# BB#0:
+# %bb.0:
movq G@GOTPCREL(%rip), %rax
movl (%rax), %eax
retq
diff --git a/test/ExecutionEngine/RuntimeDyld/X86/ELF_x86_64_StubBuf.s b/test/ExecutionEngine/RuntimeDyld/X86/ELF_x86_64_StubBuf.s
index 0099fd832a6..71463dd3434 100644
--- a/test/ExecutionEngine/RuntimeDyld/X86/ELF_x86_64_StubBuf.s
+++ b/test/ExecutionEngine/RuntimeDyld/X86/ELF_x86_64_StubBuf.s
@@ -11,7 +11,7 @@
.align 4, 0x90
_f: ## @f
.cfi_startproc
-## BB#0: ## %entry
+## %bb.0: ## %entry
pushq %rax
Ltmp0:
.cfi_def_cfa_offset 16
diff --git a/test/Instrumentation/AddressSanitizer/X86/asm_mov.s b/test/Instrumentation/AddressSanitizer/X86/asm_mov.s
index 14fc056d72f..ecb30b63f11 100644
--- a/test/Instrumentation/AddressSanitizer/X86/asm_mov.s
+++ b/test/Instrumentation/AddressSanitizer/X86/asm_mov.s
@@ -19,7 +19,7 @@
# CHECK: movb %al, (%rdi)
mov1b: # @mov1b
.cfi_startproc
-# BB#0:
+# %bb.0:
#APP
movb (%rsi), %al
movb %al, (%rdi)
@@ -48,7 +48,7 @@ mov1b: # @mov1b
# CHECK: movaps %xmm0, (%rdi)
mov16b: # @mov16b
.cfi_startproc
-# BB#0:
+# %bb.0:
#APP
movaps (%rsi), %xmm0
movaps %xmm0, (%rdi)
diff --git a/test/Instrumentation/AddressSanitizer/X86/asm_mov_no_instrumentation.s b/test/Instrumentation/AddressSanitizer/X86/asm_mov_no_instrumentation.s
index 5d5de5d2a13..a6290a4ab79 100644
--- a/test/Instrumentation/AddressSanitizer/X86/asm_mov_no_instrumentation.s
+++ b/test/Instrumentation/AddressSanitizer/X86/asm_mov_no_instrumentation.s
@@ -11,7 +11,7 @@
# CHECK-NOT: callq __asan_report_store1@PLT
mov1b: # @mov1b
.cfi_startproc
-# BB#0:
+# %bb.0:
#APP
movb (%rsi), %al
movb %al, (%rdi)
diff --git a/test/Instrumentation/AddressSanitizer/X86/asm_swap_intel.s b/test/Instrumentation/AddressSanitizer/X86/asm_swap_intel.s
index 093c96b0efa..9743d83e471 100644
--- a/test/Instrumentation/AddressSanitizer/X86/asm_swap_intel.s
+++ b/test/Instrumentation/AddressSanitizer/X86/asm_swap_intel.s
@@ -31,7 +31,7 @@
# CHECK: movq %rax, (%rdx)
swap: # @swap
.cfi_startproc
-# BB#0:
+# %bb.0:
push rbx
.Ltmp0:
.cfi_def_cfa_offset 16
diff --git a/test/MC/AArch64/arm64-ilp32.s b/test/MC/AArch64/arm64-ilp32.s
index 3e9f44abd70..182d2d4c9e7 100644
--- a/test/MC/AArch64/arm64-ilp32.s
+++ b/test/MC/AArch64/arm64-ilp32.s
@@ -8,7 +8,7 @@
.align 2
.type foo,@function
foo: // @foo
-// BB#0: // %entry
+// %bb.0: // %entry
sub sp, sp, #16 // =16
// CHECK-ILP32: 0000000000000004 R_AARCH64_P32_ADR_PREL_PG_HI21 sizes
// CHECK-ILP32: 0000000000000008 R_AARCH64_P32_ADD_ABS_LO12_NC sizes
diff --git a/test/MC/AArch64/arm64-leaf-compact-unwind.s b/test/MC/AArch64/arm64-leaf-compact-unwind.s
index 2ff7fe82e9b..2278ab7c248 100644
--- a/test/MC/AArch64/arm64-leaf-compact-unwind.s
+++ b/test/MC/AArch64/arm64-leaf-compact-unwind.s
@@ -70,7 +70,7 @@
.align 2
_foo1: ; @foo1
.cfi_startproc
-; BB#0: ; %entry
+; %bb.0: ; %entry
add w0, w0, #42 ; =#42
ret
.cfi_endproc
@@ -79,7 +79,7 @@ _foo1: ; @foo1
.align 2
_foo2: ; @foo2
.cfi_startproc
-; BB#0: ; %entry
+; %bb.0: ; %entry
sub sp, sp, #144 ; =#144
Ltmp2:
.cfi_def_cfa_offset 144
@@ -91,7 +91,7 @@ LBB1_1: ; %for.body
add x9, x9, #1 ; =#1
cmp w9, #36 ; =#36
b.ne LBB1_1
-; BB#2:
+; %bb.2:
mov x9, xzr
mov w0, wzr
LBB1_3: ; %for.body4
@@ -101,7 +101,7 @@ LBB1_3: ; %for.body4
cmp w9, #144 ; =#144
add w0, w10, w0
b.ne LBB1_3
-; BB#4: ; %for.end9
+; %bb.4: ; %for.end9
add sp, sp, #144 ; =#144
ret
.cfi_endproc
@@ -110,7 +110,7 @@ LBB1_3: ; %for.body4
.align 2
_foo3: ; @foo3
.cfi_startproc
-; BB#0: ; %entry
+; %bb.0: ; %entry
stp x26, x25, [sp, #-64]!
stp x24, x23, [sp, #16]
stp x22, x21, [sp, #32]
@@ -191,7 +191,7 @@ Lloh1:
.align 2
_foo4: ; @foo4
.cfi_startproc
-; BB#0: ; %entry
+; %bb.0: ; %entry
stp x28, x27, [sp, #-16]!
sub sp, sp, #512 ; =#512
Ltmp12:
@@ -211,7 +211,7 @@ LBB3_1: ; %for.body
add x9, x9, #1 ; =#1
cmp w9, #128 ; =#128
b.ne LBB3_1
-; BB#2: ; %for.cond2.preheader
+; %bb.2: ; %for.cond2.preheader
mov x9, xzr
mov w0, wzr
add x8, x8, w5, sxtw #2
@@ -222,7 +222,7 @@ LBB3_3: ; %for.body4
cmp w9, #512 ; =#512
add w0, w10, w0
b.ne LBB3_3
-; BB#4: ; %for.end11
+; %bb.4: ; %for.end11
add sp, sp, #512 ; =#512
ldp x28, x27, [sp], #16
ret
diff --git a/test/MC/AArch64/basic-pic.s b/test/MC/AArch64/basic-pic.s
index a10874dcca0..79e03c2e299 100644
--- a/test/MC/AArch64/basic-pic.s
+++ b/test/MC/AArch64/basic-pic.s
@@ -8,7 +8,7 @@
.type get_globalvar,@function
get_globalvar: // @get_globalvar
.cfi_startproc
-// BB#0:
+// %bb.0:
adrp x0, :got:var
ldr x0, [x0, #:got_lo12:var]
ldr w0, [x0]
@@ -24,7 +24,7 @@ get_globalvar: // @get_globalvar
.type get_globalvaraddr,@function
get_globalvaraddr: // @get_globalvaraddr
.cfi_startproc
-// BB#0:
+// %bb.0:
adrp x0, :got:var
ldr x0, [x0, #:got_lo12:var]
ret
@@ -38,7 +38,7 @@ get_globalvaraddr: // @get_globalvaraddr
.type get_hiddenvar,@function
get_hiddenvar: // @get_hiddenvar
.cfi_startproc
-// BB#0:
+// %bb.0:
adrp x0, hiddenvar
ldr w0, [x0, #:lo12:hiddenvar]
ret
@@ -52,7 +52,7 @@ get_hiddenvar: // @get_hiddenvar
.type get_hiddenvaraddr,@function
get_hiddenvaraddr: // @get_hiddenvaraddr
.cfi_startproc
-// BB#0:
+// %bb.0:
adrp x0, hiddenvar
add x0, x0, #:lo12:hiddenvar
ret
@@ -66,7 +66,7 @@ get_hiddenvaraddr: // @get_hiddenvaraddr
.type get_func,@function
get_func: // @get_func
.cfi_startproc
-// BB#0:
+// %bb.0:
adrp x0, :got:get_func
ldr x0, [x0, #:got_lo12:get_func]
ret
diff --git a/test/MC/AArch64/elf-extern.s b/test/MC/AArch64/elf-extern.s
index 14c26c1b997..5e3b314d570 100644
--- a/test/MC/AArch64/elf-extern.s
+++ b/test/MC/AArch64/elf-extern.s
@@ -9,7 +9,7 @@
.type check_extern,@function
check_extern: // @check_extern
.cfi_startproc
-// BB#0:
+// %bb.0:
sub sp, sp, #16
.Ltmp2:
.cfi_def_cfa sp, 16
diff --git a/test/MC/AArch64/inline-asm-modifiers.s b/test/MC/AArch64/inline-asm-modifiers.s
index 1dc5fe60d3b..71c9379e375 100644
--- a/test/MC/AArch64/inline-asm-modifiers.s
+++ b/test/MC/AArch64/inline-asm-modifiers.s
@@ -5,7 +5,7 @@
.globl test_inline_modifier_L
.type test_inline_modifier_L,@function
test_inline_modifier_L: // @test_inline_modifier_L
-// BB#0:
+// %bb.0:
//APP
add x0, x0, #:lo12:var_simple
//NO_APP
@@ -38,7 +38,7 @@ test_inline_modifier_L: // @test_inline_modifier_L
.globl test_inline_modifier_G
.type test_inline_modifier_G,@function
test_inline_modifier_G: // @test_inline_modifier_G
-// BB#0:
+// %bb.0:
//APP
add x0, x0, #:dtprel_hi12:var_tlsld, lsl #12
//NO_APP
@@ -55,7 +55,7 @@ test_inline_modifier_G: // @test_inline_modifier_G
.globl test_inline_modifier_A
.type test_inline_modifier_A,@function
test_inline_modifier_A: // @test_inline_modifier_A
-// BB#0:
+// %bb.0:
//APP
adrp x0, var_simple
//NO_APP
@@ -79,7 +79,7 @@ test_inline_modifier_A: // @test_inline_modifier_A
.globl test_inline_modifier_wx
.type test_inline_modifier_wx,@function
test_inline_modifier_wx: // @test_inline_modifier_wx
-// BB#0:
+// %bb.0:
mov w2, w0
//APP
add w2, w2, w2
@@ -115,7 +115,7 @@ test_inline_modifier_wx: // @test_inline_modifier_wx
.globl test_inline_modifier_bhsdq
.type test_inline_modifier_bhsdq,@function
test_inline_modifier_bhsdq: // @test_inline_modifier_bhsdq
-// BB#0:
+// %bb.0:
//APP
ldr b0, [sp]
//NO_APP
@@ -153,7 +153,7 @@ test_inline_modifier_bhsdq: // @test_inline_modifier_bhsdq
.globl test_inline_modifier_c
.type test_inline_modifier_c,@function
test_inline_modifier_c: // @test_inline_modifier_c
-// BB#0:
+// %bb.0:
//APP
adr x0, 3
//NO_APP
diff --git a/test/MC/AArch64/jump-table.s b/test/MC/AArch64/jump-table.s
index 578ebf4e660..c4b879e6ef0 100644
--- a/test/MC/AArch64/jump-table.s
+++ b/test/MC/AArch64/jump-table.s
@@ -6,11 +6,11 @@
.type test_jumptable,@function
test_jumptable: // @test_jumptable
.cfi_startproc
-// BB#0:
+// %bb.0:
ubfx w1, w0, #0, #32
cmp w0, #4
b.hi .LBB0_3
-// BB#1:
+// %bb.1:
adrp x0, .LJTI0_0
add x0, x0, #:lo12:.LJTI0_0
ldr x0, [x0, x1, lsl #3]
diff --git a/test/MC/ARM/2010-11-30-reloc-movt.s b/test/MC/ARM/2010-11-30-reloc-movt.s
index dc6960ba6b8..9f0553af182 100644
--- a/test/MC/ARM/2010-11-30-reloc-movt.s
+++ b/test/MC/ARM/2010-11-30-reloc-movt.s
@@ -17,7 +17,7 @@
.align 2
.type barf,%function
barf: @ @barf
-@ BB#0: @ %entry
+@ %bb.0: @ %entry
push {r11, lr}
movw r0, :lower16:a
movt r0, :upper16:a
diff --git a/test/MC/ARM/elf-eflags-eabi.s b/test/MC/ARM/elf-eflags-eabi.s
index fe0b6c071e6..bc4cc3b9942 100644
--- a/test/MC/ARM/elf-eflags-eabi.s
+++ b/test/MC/ARM/elf-eflags-eabi.s
@@ -6,7 +6,7 @@
.align 2
.type barf,%function
barf: @ @barf
-@ BB#0: @ %entry
+@ %bb.0: @ %entry
b foo
@@@ make sure the EF_ARM_EABIMASK comes out OK
diff --git a/test/MC/ARM/elf-movt.s b/test/MC/ARM/elf-movt.s
index 858e4aa41b2..d1e6bd7916d 100644
--- a/test/MC/ARM/elf-movt.s
+++ b/test/MC/ARM/elf-movt.s
@@ -10,7 +10,7 @@
.align 2
.type barf,%function
barf: @ @barf
-@ BB#0: @ %entry
+@ %bb.0: @ %entry
movw r0, :lower16:GOT-(.LPC0_2+8)
movt r0, :upper16:GOT-(.LPC0_2+8)
.LPC0_2:
diff --git a/test/MC/AsmParser/seh-directive-errors.s b/test/MC/AsmParser/seh-directive-errors.s
index 65476fe2dee..07d0a4a6179 100644
--- a/test/MC/AsmParser/seh-directive-errors.s
+++ b/test/MC/AsmParser/seh-directive-errors.s
@@ -68,7 +68,7 @@ g:
.p2align 4, 0x90
h: # @h
.seh_proc h
-# BB#0: # %entry
+# %bb.0: # %entry
subq $72, %rsp
.seh_stackalloc 72
movaps %xmm7, 48(%rsp) # 16-byte Spill
diff --git a/test/MC/COFF/basic-coff-64.s b/test/MC/COFF/basic-coff-64.s
index 1fa9280e0ca..5fe710fdba4 100644
--- a/test/MC/COFF/basic-coff-64.s
+++ b/test/MC/COFF/basic-coff-64.s
@@ -11,7 +11,7 @@
.globl _main
.align 16, 0x90
_main: # @main
-# BB#0: # %entry
+# %bb.0: # %entry
subl $4, %esp
movl $.L_.str, (%esp)
call _printf
diff --git a/test/MC/COFF/basic-coff.s b/test/MC/COFF/basic-coff.s
index 6aa247bdd29..575a937c957 100644
--- a/test/MC/COFF/basic-coff.s
+++ b/test/MC/COFF/basic-coff.s
@@ -11,7 +11,7 @@
.globl _main
.align 16, 0x90
_main: # @main
-# BB#0: # %entry
+# %bb.0: # %entry
subl $4, %esp
movl $L_.str, (%esp)
call _printf
diff --git a/test/MC/COFF/cv-def-range.s b/test/MC/COFF/cv-def-range.s
index 2b0d4b754d4..7afdac23ea6 100644
--- a/test/MC/COFF/cv-def-range.s
+++ b/test/MC/COFF/cv-def-range.s
@@ -17,7 +17,7 @@ Lfunc_begin0:
.cv_file 1 "\\usr\\local\\google\\home\\majnemer\\llvm\\src\\<stdin>"
.cv_func_id 0
.cv_loc 0 1 3 0 is_stmt 0 # <stdin>:3:0
-# BB#0: # %entry
+# %bb.0: # %entry
pushl %ebp
movl %esp, %ebp
subl $8, %esp
diff --git a/test/MC/COFF/cv-empty-linetable.s b/test/MC/COFF/cv-empty-linetable.s
index 568d55a3657..865bc7556be 100644
--- a/test/MC/COFF/cv-empty-linetable.s
+++ b/test/MC/COFF/cv-empty-linetable.s
@@ -14,7 +14,7 @@
.p2align 4, 0x90
_f: # @f
Lfunc_begin0:
-# BB#0: # %entry
+# %bb.0: # %entry
.cv_file 1 "cv-empty-linetable.s"
.cv_func_id 1
.cv_loc 1 1 3 15 is_stmt 0
diff --git a/test/MC/COFF/cv-inline-linetable-unreachable.s b/test/MC/COFF/cv-inline-linetable-unreachable.s
index d894fc758fb..ab184602c86 100644
--- a/test/MC/COFF/cv-inline-linetable-unreachable.s
+++ b/test/MC/COFF/cv-inline-linetable-unreachable.s
@@ -18,7 +18,7 @@ Lfunc_begin0:
.cv_func_id 0
.cv_inline_site_id 1 within 0 inlined_at 1 1 1
.cv_loc 0 1 7 0 is_stmt 0 # <stdin>:7:0
-# BB#0: # %entry
+# %bb.0: # %entry
pushl %ebp
movl %esp, %ebp
.cv_loc 1 1 4 3 # <stdin>:4:3
diff --git a/test/MC/COFF/cv-inline-linetable.s b/test/MC/COFF/cv-inline-linetable.s
index 2c89f9836c4..61a42d92f40 100644
--- a/test/MC/COFF/cv-inline-linetable.s
+++ b/test/MC/COFF/cv-inline-linetable.s
@@ -19,7 +19,7 @@ Lfunc_begin0:
.cv_inline_site_id 1 within 0 inlined_at 1 15 3
.cv_inline_site_id 2 within 1 inlined_at 1 10 3
.cv_loc 0 1 13 0 is_stmt 0 # t.cpp:13:0
-# BB#0: # %entry
+# %bb.0: # %entry
pushl %eax
.cv_loc 0 1 14 5 # t.cpp:14:5
addl $6, "?x@@3HC"
diff --git a/test/MC/COFF/diff.s b/test/MC/COFF/diff.s
index 58cc7fe532b..d68e628577b 100644
--- a/test/MC/COFF/diff.s
+++ b/test/MC/COFF/diff.s
@@ -27,7 +27,7 @@ Y:
.globl _foobar
.align 16, 0x90
_foobar: # @foobar
-# BB#0:
+# %bb.0:
ret
.data
diff --git a/test/MC/COFF/seh-linkonce.s b/test/MC/COFF/seh-linkonce.s
index 5631b748c00..3352d68b3cf 100644
--- a/test/MC/COFF/seh-linkonce.s
+++ b/test/MC/COFF/seh-linkonce.s
@@ -11,7 +11,7 @@
weak_func: # @weak_func
.Ltmp0:
.seh_proc weak_func
-# BB#0: # %entry
+# %bb.0: # %entry
pushq %rbp
.Ltmp1:
.seh_pushreg 5
diff --git a/test/MC/COFF/seh-section-2.s b/test/MC/COFF/seh-section-2.s
index 9a7156d4d80..650c5b5105b 100644
--- a/test/MC/COFF/seh-section-2.s
+++ b/test/MC/COFF/seh-section-2.s
@@ -13,7 +13,7 @@
f: # @f
.Ltmp0:
.seh_proc f
-# BB#0:
+# %bb.0:
subq $40, %rsp
.Ltmp1:
.seh_stackalloc 40
@@ -37,7 +37,7 @@ f: # @f
g: # @g
.Ltmp4:
.seh_proc g
-# BB#0:
+# %bb.0:
.Ltmp5:
.seh_endprologue
retq
diff --git a/test/MC/COFF/simple-fixups.s b/test/MC/COFF/simple-fixups.s
index 9d960084272..c1556afb5c0 100644
--- a/test/MC/COFF/simple-fixups.s
+++ b/test/MC/COFF/simple-fixups.s
@@ -13,7 +13,7 @@
.globl _foo
.align 16, 0x90
_foo: # @foo
-# BB#0: # %e
+# %bb.0: # %e
.align 16, 0x90
LBB0_1: # %i
# =>This Inner Loop Header: Depth=1
@@ -26,7 +26,7 @@ LBB0_1: # %i
.globl _bar
.align 16, 0x90
_bar: # @bar
-# BB#0: # %e
+# %bb.0: # %e
.align 16, 0x90
LBB1_1: # %i
# =>This Inner Loop Header: Depth=1
@@ -39,7 +39,7 @@ LBB1_1: # %i
.globl _baz
.align 16, 0x90
_baz: # @baz
-# BB#0: # %e
+# %bb.0: # %e
subl $4, %esp
Ltmp0:
call _baz
diff --git a/test/MC/COFF/symbol-alias.s b/test/MC/COFF/symbol-alias.s
index 71ccec31b82..ad3b6b23ece 100644
--- a/test/MC/COFF/symbol-alias.s
+++ b/test/MC/COFF/symbol-alias.s
@@ -13,7 +13,7 @@
.globl _foo
.align 16, 0x90
_foo: # @foo
-# BB#0: # %entry
+# %bb.0: # %entry
ret
.data
diff --git a/test/MC/COFF/symbol-fragment-offset-64.s b/test/MC/COFF/symbol-fragment-offset-64.s
index 03077ce9429..94e7d07965b 100644
--- a/test/MC/COFF/symbol-fragment-offset-64.s
+++ b/test/MC/COFF/symbol-fragment-offset-64.s
@@ -11,7 +11,7 @@
.globl _main
.align 16, 0x90
_main: # @main
-# BB#0: # %entry
+# %bb.0: # %entry
subl $4, %esp
movl $.L_.str0, (%esp)
callq _printf
diff --git a/test/MC/COFF/symbol-fragment-offset.s b/test/MC/COFF/symbol-fragment-offset.s
index c592fa4c0e7..5875bf47f92 100644
--- a/test/MC/COFF/symbol-fragment-offset.s
+++ b/test/MC/COFF/symbol-fragment-offset.s
@@ -11,7 +11,7 @@
.globl _main
.align 16, 0x90
_main: # @main
-# BB#0: # %entry
+# %bb.0: # %entry
subl $4, %esp
movl $L_.str0, (%esp)
calll _printf
diff --git a/test/MC/COFF/weak.s b/test/MC/COFF/weak.s
index e0d077840f1..567a590deb2 100644
--- a/test/MC/COFF/weak.s
+++ b/test/MC/COFF/weak.s
@@ -12,12 +12,12 @@
.globl _main
.align 16, 0x90
_main: # @main
-# BB#0: # %entry
+# %bb.0: # %entry
subl $4, %esp
movl $_test_weak, %eax
testl %eax, %eax
je LBB0_2
-# BB#1: # %if.then
+# %bb.1: # %if.then
call _test_weak
movl $1, %eax
addl $4, %esp
diff --git a/test/MC/ELF/ARM/clang-section.s b/test/MC/ELF/ARM/clang-section.s
index 0b0d27c4ceb..73bae69aa26 100644
--- a/test/MC/ELF/ARM/clang-section.s
+++ b/test/MC/ELF/ARM/clang-section.s
@@ -23,12 +23,12 @@
.code 32 @ @foo
foo:
.fnstart
-@ BB#0: @ %entry
+@ %bb.0: @ %entry
ldr r0, .LCPI0_0
ldr r0, [r0]
mov pc, lr
.p2align 2
-@ BB#1:
+@ %bb.1:
.LCPI0_0:
.long b
.Lfunc_end0:
@@ -43,7 +43,7 @@ foo:
.code 32 @ @goo
goo:
.fnstart
-@ BB#0: @ %entry
+@ %bb.0: @ %entry
.save {r11, lr}
push {r11, lr}
ldr r0, .LCPI1_0
@@ -52,7 +52,7 @@ goo:
pop {r11, lr}
mov pc, lr
.p2align 2
-@ BB#1:
+@ %bb.1:
.LCPI1_0:
.long _ZL1g
.LCPI1_1:
@@ -69,12 +69,12 @@ goo:
.code 32 @ @hoo
hoo:
.fnstart
-@ BB#0: @ %entry
+@ %bb.0: @ %entry
ldr r0, .LCPI2_0
ldr r0, [r0]
mov pc, lr
.p2align 2
-@ BB#1:
+@ %bb.1:
.LCPI2_0:
.long b
.Lfunc_end2:
diff --git a/test/MC/ELF/basic-elf-32.s b/test/MC/ELF/basic-elf-32.s
index 1036b04a747..d4aab27d29c 100644
--- a/test/MC/ELF/basic-elf-32.s
+++ b/test/MC/ELF/basic-elf-32.s
@@ -5,7 +5,7 @@
.align 16, 0x90
.type main,@function
main: # @main
-# BB#0:
+# %bb.0:
subl $4, %esp
movl $.L.str1, (%esp)
calll puts
diff --git a/test/MC/ELF/basic-elf-64.s b/test/MC/ELF/basic-elf-64.s
index b93f9aebd3a..01f020bd64b 100644
--- a/test/MC/ELF/basic-elf-64.s
+++ b/test/MC/ELF/basic-elf-64.s
@@ -5,7 +5,7 @@
.align 16, 0x90
.type main,@function
main: # @main
-# BB#0:
+# %bb.0:
subq $8, %rsp
movl $.L.str1, %edi
callq puts
diff --git a/test/MC/ELF/call-abs.s b/test/MC/ELF/call-abs.s
index 81265a1b075..145b9a7da2f 100644
--- a/test/MC/ELF/call-abs.s
+++ b/test/MC/ELF/call-abs.s
@@ -4,7 +4,7 @@
.globl f
.type f,@function
f: # @f
-# BB#0: # %entry
+# %bb.0: # %entry
subl $4, %esp
calll 42
incl %eax
diff --git a/test/MC/ELF/fde.s b/test/MC/ELF/fde.s
index 52ee33f16fb..09be34b5303 100644
--- a/test/MC/ELF/fde.s
+++ b/test/MC/ELF/fde.s
@@ -10,7 +10,7 @@
__cxx_global_var_init: # @__cxx_global_var_init
.cfi_startproc
.Lfunc_begin0:
-# BB#0: # %entry
+# %bb.0: # %entry
pushq %rbp
.Ltmp2:
.cfi_def_cfa_offset 16
diff --git a/test/MC/MachO/debug_frame.s b/test/MC/MachO/debug_frame.s
index d185127f4b1..509c57ac2c2 100644
--- a/test/MC/MachO/debug_frame.s
+++ b/test/MC/MachO/debug_frame.s
@@ -16,7 +16,7 @@ _proc:
_f: ## @f
Ltmp0:
.cfi_startproc
-## BB#0: ## %entry
+## %bb.0: ## %entry
movl $42, %eax
ret
Ltmp1:
diff --git a/test/MC/Mips/do_switch1.s b/test/MC/Mips/do_switch1.s
index 69742e9091a..e589351030e 100644
--- a/test/MC/Mips/do_switch1.s
+++ b/test/MC/Mips/do_switch1.s
@@ -22,7 +22,7 @@ main: # @main
.set noreorder
.set nomacro
.set noat
-# BB#0: # %entry
+# %bb.0: # %entry
addiu $sp, $sp, -8
addiu $1, $zero, 2
sw $1, 4($sp)
diff --git a/test/MC/Mips/do_switch2.s b/test/MC/Mips/do_switch2.s
index 0c8ad4ad896..d82e8f502b0 100644
--- a/test/MC/Mips/do_switch2.s
+++ b/test/MC/Mips/do_switch2.s
@@ -21,7 +21,7 @@ main: # @main
.set noreorder
.set nomacro
.set noat
-# BB#0: # %entry
+# %bb.0: # %entry
lui $2, %hi(_gp_disp)
addiu $2, $2, %lo(_gp_disp)
addiu $sp, $sp, -8
diff --git a/test/MC/Mips/do_switch3.s b/test/MC/Mips/do_switch3.s
index 7ed4f7c5219..b35ff126179 100644
--- a/test/MC/Mips/do_switch3.s
+++ b/test/MC/Mips/do_switch3.s
@@ -21,7 +21,7 @@ main: # @main
.set noreorder
.set nomacro
.set noat
-# BB#0: # %entry
+# %bb.0: # %entry
daddiu $sp, $sp, -16
lui $1, %hi(%neg(%gp_rel(main)))
daddu $2, $1, $25
diff --git a/test/MC/Mips/elf-N64.s b/test/MC/Mips/elf-N64.s
index bf6ebd73091..6be46ddd8a9 100644
--- a/test/MC/Mips/elf-N64.s
+++ b/test/MC/Mips/elf-N64.s
@@ -29,7 +29,7 @@ main: # @main
.set noreorder
.set nomacro
.set noat
-# BB#0: # %entry
+# %bb.0: # %entry
daddiu $sp, $sp, -16
sd $ra, 8($sp) # 8-byte Folded Spill
sd $gp, 0($sp) # 8-byte Folded Spill
diff --git a/test/MC/Mips/elf-gprel-32-64.s b/test/MC/Mips/elf-gprel-32-64.s
index 2f5ac6652a3..b590c97b278 100644
--- a/test/MC/Mips/elf-gprel-32-64.s
+++ b/test/MC/Mips/elf-gprel-32-64.s
@@ -34,7 +34,7 @@ test: # @test
.set noreorder
.set nomacro
.set noat
-# BB#0: # %entry
+# %bb.0: # %entry
lui $1, %hi(%neg(%gp_rel(test)))
daddu $2, $1, $25
sltiu $1, $4, 4
diff --git a/test/MC/Mips/elf-relsym.s b/test/MC/Mips/elf-relsym.s
index d19065e0cd7..53d863bde97 100644
--- a/test/MC/Mips/elf-relsym.s
+++ b/test/MC/Mips/elf-relsym.s
@@ -40,7 +40,7 @@ foo1: # @foo1
.set noreorder
.set nomacro
.set noat
-# BB#0: # %entry
+# %bb.0: # %entry
lui $2, %hi(_gp_disp)
addiu $2, $2, %lo(_gp_disp)
addu $1, $2, $25
diff --git a/test/MC/Mips/elf-tls.s b/test/MC/Mips/elf-tls.s
index d50f62c2099..23a52e1ef05 100644
--- a/test/MC/Mips/elf-tls.s
+++ b/test/MC/Mips/elf-tls.s
@@ -27,7 +27,7 @@ f1: # @f1
.set noreorder
.set nomacro
.set noat
-# BB#0: # %entry
+# %bb.0: # %entry
lui $2, %hi(_gp_disp)
addiu $2, $2, %lo(_gp_disp)
addiu $sp, $sp, -24
@@ -59,7 +59,7 @@ f2: # @f2
.set noreorder
.set nomacro
.set noat
-# BB#0: # %entry
+# %bb.0: # %entry
lui $2, %hi(_gp_disp)
addiu $2, $2, %lo(_gp_disp)
addiu $sp, $sp, -24
@@ -91,7 +91,7 @@ f3: # @f3
.set noreorder
.set nomacro
.set noat
-# BB#0: # %entry
+# %bb.0: # %entry
lui $2, %hi(_gp_disp)
addiu $2, $2, %lo(_gp_disp)
addiu $sp, $sp, -24
diff --git a/test/MC/Mips/mips_gprel16.s b/test/MC/Mips/mips_gprel16.s
index a6e09c6c7b0..72c6fa710c2 100644
--- a/test/MC/Mips/mips_gprel16.s
+++ b/test/MC/Mips/mips_gprel16.s
@@ -26,7 +26,7 @@ testvar1: # @testvar1
.set noreorder
.set nomacro
.set noat
-# BB#0: # %entry
+# %bb.0: # %entry
// CHECK: lw ${{[0-9]+}}, 0($gp)
lw $1, %gp_rel(var1)($gp)
jr $ra
@@ -50,7 +50,7 @@ testvar2: # @testvar2
.set noreorder
.set nomacro
.set noat
-# BB#0: # %entry
+# %bb.0: # %entry
// CHECK: lw ${{[0-9]+}}, 4($gp)
lw $1, %gp_rel(var2)($gp)
jr $ra
diff --git a/test/MC/Mips/r-mips-got-disp.s b/test/MC/Mips/r-mips-got-disp.s
index 3cadc228422..b75cac59217 100644
--- a/test/MC/Mips/r-mips-got-disp.s
+++ b/test/MC/Mips/r-mips-got-disp.s
@@ -22,7 +22,7 @@ main: # @main
.set noreorder
.set nomacro
.set noat
-# BB#0: # %entry
+# %bb.0: # %entry
daddiu $sp, $sp, -16
sd $ra, 8($sp) # 8-byte Folded Spill
sd $gp, 0($sp) # 8-byte Folded Spill
diff --git a/test/MC/Mips/xgot.s b/test/MC/Mips/xgot.s
index 3380a856375..9c64db749fd 100644
--- a/test/MC/Mips/xgot.s
+++ b/test/MC/Mips/xgot.s
@@ -31,7 +31,7 @@ fill: # @fill
.set noreorder
.set nomacro
.set noat
-# BB#0: # %entry
+# %bb.0: # %entry
lui $2, %hi(_gp_disp)
addiu $2, $2, %lo(_gp_disp)
addiu $sp, $sp, -24
diff --git a/test/MC/PowerPC/tls-gd-obj.s b/test/MC/PowerPC/tls-gd-obj.s
index fb4ab8b351e..66c8fa0138b 100644
--- a/test/MC/PowerPC/tls-gd-obj.s
+++ b/test/MC/PowerPC/tls-gd-obj.s
@@ -18,7 +18,7 @@ main: # @main
.quad 0
.text
.L.main:
-# BB#0: # %entry
+# %bb.0: # %entry
addis 3, 2, a@got@tlsgd@ha
addi 3, 3, a@got@tlsgd@l
li 4, 0
diff --git a/test/MC/PowerPC/tls-ie-obj.s b/test/MC/PowerPC/tls-ie-obj.s
index f7de644630c..f3bb7ee0335 100644
--- a/test/MC/PowerPC/tls-ie-obj.s
+++ b/test/MC/PowerPC/tls-ie-obj.s
@@ -17,7 +17,7 @@ main: # @main
.quad 0
.text
.L.main:
-# BB#0: # %entry
+# %bb.0: # %entry
li 3, 0
addis 4, 2, a@got@tprel@ha
ld 4, a@got@tprel@l(4)
diff --git a/test/MC/PowerPC/tls-ld-obj.s b/test/MC/PowerPC/tls-ld-obj.s
index 1fa371dfac2..3538d70a30a 100644
--- a/test/MC/PowerPC/tls-ld-obj.s
+++ b/test/MC/PowerPC/tls-ld-obj.s
@@ -17,7 +17,7 @@ main: # @main
.quad 0
.text
.L.main:
-# BB#0: # %entry
+# %bb.0: # %entry
addis 3, 2, a@got@tlsld@ha
addi 3, 3, a@got@tlsld@l
li 4, 0
diff --git a/test/MC/X86/compact-unwind.s b/test/MC/X86/compact-unwind.s
index 82be239d79c..70fc018f788 100644
--- a/test/MC/X86/compact-unwind.s
+++ b/test/MC/X86/compact-unwind.s
@@ -13,7 +13,7 @@
.globl _test0
_test0: ## @test0
.cfi_startproc
-## BB#0: ## %entry
+## %bb.0: ## %entry
pushq %rbp
Ltmp0:
.cfi_def_cfa_offset 16
@@ -43,7 +43,7 @@ Ltmp4:
.globl _test1
_test1: ## @test1
.cfi_startproc
-## BB#0: ## %entry
+## %bb.0: ## %entry
pushq %rbp
Ltmp10:
.cfi_def_cfa_offset 16
diff --git a/test/tools/llvm-cfi-verify/X86/Inputs/unprotected-fullinfo.s b/test/tools/llvm-cfi-verify/X86/Inputs/unprotected-fullinfo.s
index 7b5ca07d7e4..8e296d251c0 100644
--- a/test/tools/llvm-cfi-verify/X86/Inputs/unprotected-fullinfo.s
+++ b/test/tools/llvm-cfi-verify/X86/Inputs/unprotected-fullinfo.s
@@ -22,7 +22,7 @@ _Z1av: # @_Z1av
.file 1 "tiny.cc"
.loc 1 1 0 # tiny.cc:1:0
.cfi_startproc
-# BB#0:
+# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
@@ -45,7 +45,7 @@ _Z1bv: # @_Z1bv
.Lfunc_begin1:
.loc 1 2 0 # tiny.cc:2:0
.cfi_startproc
-# BB#0:
+# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
@@ -68,7 +68,7 @@ main: # @main
.Lfunc_begin2:
.loc 1 4 0 # tiny.cc:4:0
.cfi_startproc
-# BB#0:
+# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
@@ -84,7 +84,7 @@ main: # @main
.Ltmp5:
.loc 1 6 7 is_stmt 0 # tiny.cc:6:7
jne .LBB2_2
-# BB#1:
+# %bb.1:
.loc 1 0 7 # tiny.cc:0:7
movabsq $_Z1av, %rax
.Ltmp6:
diff --git a/test/tools/llvm-cfi-verify/X86/Inputs/unprotected-lineinfo.s b/test/tools/llvm-cfi-verify/X86/Inputs/unprotected-lineinfo.s
index 155f5978b46..e44770896fd 100644
--- a/test/tools/llvm-cfi-verify/X86/Inputs/unprotected-lineinfo.s
+++ b/test/tools/llvm-cfi-verify/X86/Inputs/unprotected-lineinfo.s
@@ -22,7 +22,7 @@ _Z1av: # @_Z1av
.file 1 "tiny.cc"
.loc 1 1 0 # tiny.cc:1:0
.cfi_startproc
-# BB#0:
+# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
@@ -44,7 +44,7 @@ _Z1bv: # @_Z1bv
.Lfunc_begin1:
.loc 1 2 0 # tiny.cc:2:0
.cfi_startproc
-# BB#0:
+# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
@@ -66,7 +66,7 @@ main: # @main
.Lfunc_begin2:
.loc 1 4 0 # tiny.cc:4:0
.cfi_startproc
-# BB#0:
+# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
@@ -81,7 +81,7 @@ main: # @main
cmpl $1, -8(%rbp)
.loc 1 6 7 is_stmt 0 # tiny.cc:6:7
jne .LBB2_2
-# BB#1:
+# %bb.1:
.loc 1 0 7 # tiny.cc:0:7
movabsq $_Z1av, %rax
.loc 1 7 9 is_stmt 1 # tiny.cc:7:9
diff --git a/test/tools/llvm-cfi-verify/X86/Inputs/unprotected-nolineinfo.s b/test/tools/llvm-cfi-verify/X86/Inputs/unprotected-nolineinfo.s
index 2d3cf2f484e..a7eaeb0e654 100644
--- a/test/tools/llvm-cfi-verify/X86/Inputs/unprotected-nolineinfo.s
+++ b/test/tools/llvm-cfi-verify/X86/Inputs/unprotected-nolineinfo.s
@@ -19,7 +19,7 @@
.type _Z1av,@function
_Z1av: # @_Z1av
.cfi_startproc
-# BB#0:
+# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
@@ -36,7 +36,7 @@ _Z1av: # @_Z1av
.type _Z1bv,@function
_Z1bv: # @_Z1bv
.cfi_startproc
-# BB#0:
+# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
@@ -53,7 +53,7 @@ _Z1bv: # @_Z1bv
.type main,@function
main: # @main
.cfi_startproc
-# BB#0:
+# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
@@ -65,7 +65,7 @@ main: # @main
movq %rsi, -16(%rbp)
cmpl $1, -8(%rbp)
jne .LBB2_2
-# BB#1:
+# %bb.1:
movabsq $_Z1av, %rax
movq %rax, -24(%rbp)
jmp .LBB2_3
diff --git a/test/tools/llvm-dwarfdump/X86/brief.s b/test/tools/llvm-dwarfdump/X86/brief.s
index d77700d6655..98835d43560 100644
--- a/test/tools/llvm-dwarfdump/X86/brief.s
+++ b/test/tools/llvm-dwarfdump/X86/brief.s
@@ -38,7 +38,7 @@ Lfunc_begin0:
.file 1 "brief.c"
.loc 1 1 0 ## brief.c:1:0
.cfi_startproc
-## BB#0: ## %entry
+## %bb.0: ## %entry
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
diff --git a/test/tools/llvm-dwarfdump/X86/debugloc.s b/test/tools/llvm-dwarfdump/X86/debugloc.s
index 9dc266a1264..749f3aa9b3a 100644
--- a/test/tools/llvm-dwarfdump/X86/debugloc.s
+++ b/test/tools/llvm-dwarfdump/X86/debugloc.s
@@ -28,7 +28,7 @@ foo: # @foo
.file 1 "test.c"
.loc 1 1 0 # test.c:1:0
.cfi_startproc
-# BB#0:
+# %bb.0:
#DEBUG_VALUE: foo:i <- %RDI
.loc 1 2 3 prologue_end # test.c:2:3
movq %rdi, %rax
@@ -47,7 +47,7 @@ bar: # @bar
.Lfunc_begin1:
.loc 1 5 0 # test.c:5:0
.cfi_startproc
-# BB#0:
+# %bb.0:
#DEBUG_VALUE: bar:i <- %RDI
.loc 1 6 3 prologue_end # test.c:6:3
movq %rdi, %rax
diff --git a/test/tools/llvm-dwarfdump/X86/lookup.s b/test/tools/llvm-dwarfdump/X86/lookup.s
index d095b8b388a..652844447c1 100644
--- a/test/tools/llvm-dwarfdump/X86/lookup.s
+++ b/test/tools/llvm-dwarfdump/X86/lookup.s
@@ -45,7 +45,7 @@ Lfunc_begin0:
.file 1 "foo.c"
.loc 1 1 0 ## foo.c:1:0
.cfi_startproc
-## BB#0: ## %entry
+## %bb.0: ## %entry
pushq %rbp
Lcfi0:
.cfi_def_cfa_offset 16
diff --git a/test/tools/llvm-dwarfdump/X86/verify_debug_info.s b/test/tools/llvm-dwarfdump/X86/verify_debug_info.s
index 27110e0794c..f1944102240 100644
--- a/test/tools/llvm-dwarfdump/X86/verify_debug_info.s
+++ b/test/tools/llvm-dwarfdump/X86/verify_debug_info.s
@@ -26,7 +26,7 @@ Lfunc_begin0:
.file 1 "basic.c"
.loc 1 1 0 ## basic.c:1:0
.cfi_startproc
-## BB#0: ## %entry
+## %bb.0: ## %entry
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
diff --git a/test/tools/llvm-dwarfdump/X86/verify_die_ranges.s b/test/tools/llvm-dwarfdump/X86/verify_die_ranges.s
index 44280c4a499..0c148e6b341 100644
--- a/test/tools/llvm-dwarfdump/X86/verify_die_ranges.s
+++ b/test/tools/llvm-dwarfdump/X86/verify_die_ranges.s
@@ -14,7 +14,7 @@ Lfunc_begin0:
.file 1 "basic.c"
.loc 1 1 0 ## basic.c:1:0
.cfi_startproc
-## BB#0: ## %entry
+## %bb.0: ## %entry
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16